coq-8.4pl2/0000750000175000001440000000000012127276563011641 5ustar notinuserscoq-8.4pl2/install.sh0000750000175000001440000000021611330405500013621 0ustar notinusers#! /bin/sh dest="$1" shift for f; do bn=`basename $f` dn=`dirname $f` install -d "$dest/$dn" install -m 644 $f "$dest/$dn/$bn" done coq-8.4pl2/kernel/0000750000175000001440000000000012127276527013121 5ustar notinuserscoq-8.4pl2/kernel/mod_subst.mli0000640000175000001440000001176112010532755015617 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_path -> delta_resolver -> delta_resolver val add_kn_delta_resolver : kernel_name -> kernel_name -> delta_resolver -> delta_resolver val add_inline_delta_resolver : kernel_name -> (int * constr option) -> delta_resolver -> delta_resolver val add_delta_resolver : delta_resolver -> delta_resolver -> delta_resolver (** Effect of a [delta_resolver] on kernel name, constant, inductive, etc *) val kn_of_delta : delta_resolver -> kernel_name -> kernel_name val constant_of_delta_kn : delta_resolver -> kernel_name -> constant val constant_of_delta : delta_resolver -> constant -> constant val mind_of_delta_kn : delta_resolver -> kernel_name -> mutual_inductive val mind_of_delta : delta_resolver -> mutual_inductive -> mutual_inductive val mp_of_delta : delta_resolver -> module_path -> module_path (** Extract the set of inlined constant in the resolver *) val inline_of_delta : int option -> delta_resolver -> (int * kernel_name) list (** Does a [delta_resolver] contains a [mp], a constant, an inductive ? *) val mp_in_delta : module_path -> delta_resolver -> bool val con_in_delta : constant -> delta_resolver -> bool val mind_in_delta : mutual_inductive -> delta_resolver -> bool (** {6 Substitution} *) type substitution val empty_subst : substitution val is_empty_subst : substitution -> bool (** add_* add [arg2/arg1]\{arg3\} to the substitution with no sequential composition *) val add_mbid : mod_bound_id -> module_path -> delta_resolver -> substitution -> substitution val add_mp : module_path -> module_path -> delta_resolver -> substitution -> substitution (** map_* create a new substitution [arg2/arg1]\{arg3\} *) val map_mbid : mod_bound_id -> module_path -> delta_resolver -> substitution val map_mp : module_path -> module_path -> delta_resolver -> substitution (** sequential composition: [substitute (join sub1 sub2) t = substitute sub2 (substitute sub1 t)] *) val join : substitution -> substitution -> substitution (** Apply the substitution on the domain of the resolver *) val subst_dom_delta_resolver : substitution -> delta_resolver -> delta_resolver (** Apply the substitution on the codomain of the resolver *) val subst_codom_delta_resolver : substitution -> delta_resolver -> delta_resolver val subst_dom_codom_delta_resolver : substitution -> delta_resolver -> delta_resolver type 'a substituted val from_val : 'a -> 'a substituted val force : (substitution -> 'a -> 'a) -> 'a substituted -> 'a val subst_substituted : substitution -> 'a substituted -> 'a substituted (**/**) (* debugging *) val debug_string_of_subst : substitution -> string val debug_pr_subst : substitution -> Pp.std_ppcmds val debug_string_of_delta : delta_resolver -> string val debug_pr_delta : delta_resolver -> Pp.std_ppcmds (**/**) (** [subst_mp sub mp] guarantees that whenever the result of the substitution is structutally equal [mp], it is equal by pointers as well [==] *) val subst_mp : substitution -> module_path -> module_path val subst_ind : substitution -> mutual_inductive -> mutual_inductive val subst_kn : substitution -> kernel_name -> kernel_name val subst_con : substitution -> constant -> constant * constr (** Here the semantics is completely unclear. What does "Hint Unfold t" means when "t" is a parameter? Does the user mean "Unfold X.t" or does she mean "Unfold y" where X.t is later on instantiated with y? I choose the first interpretation (i.e. an evaluable reference is never expanded). *) val subst_evaluable_reference : substitution -> evaluable_global_reference -> evaluable_global_reference (** [replace_mp_in_con mp mp' con] replaces [mp] with [mp'] in [con] *) val replace_mp_in_kn : module_path -> module_path -> kernel_name -> kernel_name (** [subst_mps sub c] performs the substitution [sub] on all kernel names appearing in [c] *) val subst_mps : substitution -> constr -> constr (** [occur_*id id sub] returns true iff [id] occurs in [sub] on either side *) val occur_mbid : mod_bound_id -> substitution -> bool (** [repr_substituted r] dumps the representation of a substituted: - [None, a] when r is a value - [Some s, a] when r is a delayed substitution [s] applied to [a] *) val repr_substituted : 'a substituted -> substitution list option * 'a coq-8.4pl2/kernel/declarations.ml0000640000175000001440000003151112010532755016112 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* None | Def c -> Some c | OpaqueDef lc -> Some (force_lazy_constr lc) let constant_has_body cb = match cb.const_body with | Undef _ -> false | Def _ | OpaqueDef _ -> true let is_opaque cb = match cb.const_body with | OpaqueDef _ -> true | Undef _ | Def _ -> false (* Substitutions of [constant_body] *) let subst_rel_declaration sub (id,copt,t as x) = let copt' = Option.smartmap (subst_mps sub) copt in let t' = subst_mps sub t in if copt == copt' & t == t' then x else (id,copt',t') let subst_rel_context sub = list_smartmap (subst_rel_declaration sub) (* TODO: these substitution functions could avoid duplicating things when the substitution have preserved all the fields *) let subst_const_type sub arity = if is_empty_subst sub then arity else match arity with | NonPolymorphicType s -> NonPolymorphicType (subst_mps sub s) | PolymorphicArity (ctx,s) -> PolymorphicArity (subst_rel_context sub ctx,s) let subst_const_def sub = function | Undef inl -> Undef inl | Def c -> Def (subst_constr_subst sub c) | OpaqueDef lc -> OpaqueDef (subst_lazy_constr sub lc) let subst_const_body sub cb = { const_hyps = (assert (cb.const_hyps=[]); []); const_body = subst_const_def sub cb.const_body; const_type = subst_const_type sub cb.const_type; const_body_code = Cemitcodes.subst_to_patch_subst sub cb.const_body_code; const_constraints = cb.const_constraints} (* Hash-consing of [constant_body] *) let hcons_rel_decl ((n,oc,t) as d) = let n' = hcons_name n and oc' = Option.smartmap hcons_constr oc and t' = hcons_types t in if n' == n && oc' == oc && t' == t then d else (n',oc',t') let hcons_rel_context l = list_smartmap hcons_rel_decl l let hcons_polyarity ar = { poly_param_levels = list_smartmap (Option.smartmap hcons_univ) ar.poly_param_levels; poly_level = hcons_univ ar.poly_level } let hcons_const_type = function | NonPolymorphicType t -> NonPolymorphicType (hcons_constr t) | PolymorphicArity (ctx,s) -> PolymorphicArity (hcons_rel_context ctx, hcons_polyarity s) let hcons_const_def = function | Undef inl -> Undef inl | Def l_constr -> let constr = force l_constr in Def (from_val (hcons_constr constr)) | OpaqueDef lc -> if lazy_constr_is_val lc then let constr = force_opaque lc in OpaqueDef (opaque_from_val (hcons_constr constr)) else OpaqueDef lc let hcons_const_body cb = { cb with const_body = hcons_const_def cb.const_body; const_type = hcons_const_type cb.const_type; const_constraints = hcons_constraints cb.const_constraints } (*s Inductive types (internal representation with redundant information). *) type recarg = | Norec | Mrec of inductive | Imbr of inductive let subst_recarg sub r = match r with | Norec -> r | Mrec (kn,i) -> let kn' = subst_ind sub kn in if kn==kn' then r else Mrec (kn',i) | Imbr (kn,i) -> let kn' = subst_ind sub kn in if kn==kn' then r else Imbr (kn',i) type wf_paths = recarg Rtree.t let mk_norec = Rtree.mk_node Norec [||] let mk_paths r recargs = Rtree.mk_node r (Array.map (fun l -> Rtree.mk_node Norec (Array.of_list l)) recargs) let dest_recarg p = fst (Rtree.dest_node p) (* dest_subterms returns the sizes of each argument of each constructor of an inductive object of size [p]. This should never be done for Norec, because the number of sons does not correspond to the number of constructors. *) let dest_subterms p = let (ra,cstrs) = Rtree.dest_node p in assert (ra<>Norec); Array.map (fun t -> Array.to_list (snd (Rtree.dest_node t))) cstrs let recarg_length p j = let (_,cstrs) = Rtree.dest_node p in Array.length (snd (Rtree.dest_node cstrs.(j-1))) let subst_wf_paths sub p = Rtree.smartmap (subst_recarg sub) p (**********************************************************************) (* Representation of mutual inductive types in the kernel *) (* Inductive I1 (params) : U1 := c11 : T11 | ... | c1p1 : T1p1 ... with In (params) : Un := cn1 : Tn1 | ... | cnpn : Tnpn *) type monomorphic_inductive_arity = { mind_user_arity : constr; mind_sort : sorts; } type inductive_arity = | Monomorphic of monomorphic_inductive_arity | Polymorphic of polymorphic_arity type one_inductive_body = { (* Primitive datas *) (* Name of the type: [Ii] *) mind_typename : identifier; (* Arity context of [Ii] with parameters: [forall params, Ui] *) mind_arity_ctxt : rel_context; (* Arity sort, original user arity, and allowed elim sorts, if monomorphic *) mind_arity : inductive_arity; (* Names of the constructors: [cij] *) mind_consnames : identifier array; (* Types of the constructors with parameters: [forall params, Tij], where the Ik are replaced by de Bruijn index in the context I1:forall params, U1 .. In:forall params, Un *) mind_user_lc : types array; (* Derived datas *) (* Number of expected real arguments of the type (no let, no params) *) mind_nrealargs : int; (* Length of realargs context (with let, no params) *) mind_nrealargs_ctxt : int; (* List of allowed elimination sorts *) mind_kelim : sorts_family list; (* Head normalized constructor types so that their conclusion is atomic *) mind_nf_lc : types array; (* Length of the signature of the constructors (with let, w/o params) *) mind_consnrealdecls : int array; (* Signature of recursive arguments in the constructors *) mind_recargs : wf_paths; (* Datas for bytecode compilation *) (* number of constant constructor *) mind_nb_constant : int; (* number of no constant constructor *) mind_nb_args : int; mind_reloc_tbl : Cbytecodes.reloc_table; } type mutual_inductive_body = { (* The component of the mutual inductive block *) mind_packets : one_inductive_body array; (* Whether the inductive type has been declared as a record *) mind_record : bool; (* Whether the type is inductive or coinductive *) mind_finite : bool; (* Number of types in the block *) mind_ntypes : int; (* Section hypotheses on which the block depends *) mind_hyps : section_context; (* Number of expected parameters *) mind_nparams : int; (* Number of recursively uniform (i.e. ordinary) parameters *) mind_nparams_rec : int; (* The context of parameters (includes let-in declaration) *) mind_params_ctxt : rel_context; (* Universes constraints enforced by the inductive declaration *) mind_constraints : constraints; } let subst_indarity sub = function | Monomorphic s -> Monomorphic { mind_user_arity = subst_mps sub s.mind_user_arity; mind_sort = s.mind_sort; } | Polymorphic s as x -> x let subst_mind_packet sub mbp = { mind_consnames = mbp.mind_consnames; mind_consnrealdecls = mbp.mind_consnrealdecls; mind_typename = mbp.mind_typename; mind_nf_lc = array_smartmap (subst_mps sub) mbp.mind_nf_lc; mind_arity_ctxt = subst_rel_context sub mbp.mind_arity_ctxt; mind_arity = subst_indarity sub mbp.mind_arity; mind_user_lc = array_smartmap (subst_mps sub) mbp.mind_user_lc; mind_nrealargs = mbp.mind_nrealargs; mind_nrealargs_ctxt = mbp.mind_nrealargs_ctxt; mind_kelim = mbp.mind_kelim; mind_recargs = subst_wf_paths sub mbp.mind_recargs (*wf_paths*); mind_nb_constant = mbp.mind_nb_constant; mind_nb_args = mbp.mind_nb_args; mind_reloc_tbl = mbp.mind_reloc_tbl } let subst_mind sub mib = { mind_record = mib.mind_record ; mind_finite = mib.mind_finite ; mind_ntypes = mib.mind_ntypes ; mind_hyps = (assert (mib.mind_hyps=[]); []) ; mind_nparams = mib.mind_nparams; mind_nparams_rec = mib.mind_nparams_rec; mind_params_ctxt = map_rel_context (subst_mps sub) mib.mind_params_ctxt; mind_packets = array_smartmap (subst_mind_packet sub) mib.mind_packets ; mind_constraints = mib.mind_constraints } let hcons_indarity = function | Monomorphic a -> Monomorphic { mind_user_arity = hcons_constr a.mind_user_arity; mind_sort = hcons_sorts a.mind_sort } | Polymorphic a -> Polymorphic (hcons_polyarity a) let hcons_mind_packet oib = { oib with mind_typename = hcons_ident oib.mind_typename; mind_arity_ctxt = hcons_rel_context oib.mind_arity_ctxt; mind_arity = hcons_indarity oib.mind_arity; mind_consnames = array_smartmap hcons_ident oib.mind_consnames; mind_user_lc = array_smartmap hcons_types oib.mind_user_lc; mind_nf_lc = array_smartmap hcons_types oib.mind_nf_lc } let hcons_mind mib = { mib with mind_packets = array_smartmap hcons_mind_packet mib.mind_packets; mind_params_ctxt = hcons_rel_context mib.mind_params_ctxt; mind_constraints = hcons_constraints mib.mind_constraints } (*s Modules: signature component specifications, module types, and module declarations *) type structure_field_body = | SFBconst of constant_body | SFBmind of mutual_inductive_body | SFBmodule of module_body | SFBmodtype of module_type_body and structure_body = (label * structure_field_body) list and struct_expr_body = | SEBident of module_path | SEBfunctor of mod_bound_id * module_type_body * struct_expr_body | SEBapply of struct_expr_body * struct_expr_body * constraints | SEBstruct of structure_body | SEBwith of struct_expr_body * with_declaration_body and with_declaration_body = With_module_body of identifier list * module_path | With_definition_body of identifier list * constant_body and module_body = { mod_mp : module_path; mod_expr : struct_expr_body option; mod_type : struct_expr_body; mod_type_alg : struct_expr_body option; mod_constraints : constraints; mod_delta : delta_resolver; mod_retroknowledge : Retroknowledge.action list} and module_type_body = { typ_mp : module_path; typ_expr : struct_expr_body; typ_expr_alg : struct_expr_body option ; typ_constraints : constraints; typ_delta :delta_resolver} coq-8.4pl2/kernel/entries.ml0000640000175000001440000000531012010532755015111 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mp | _ -> raise Not_path let rec mp_from_mexpr = function | MSEident mp -> mp | MSEapply (expr,_) -> mp_from_mexpr expr | MSEfunctor (_,_,expr) -> mp_from_mexpr expr | MSEwith (expr,_) -> mp_from_mexpr expr let is_modular = function | SFBmodule _ | SFBmodtype _ -> true | SFBconst _ | SFBmind _ -> false let rec list_split_assoc ((k,m) as km) rev_before = function | [] -> raise Not_found | (k',b)::after when k=k' && is_modular b = m -> rev_before,b,after | h::tail -> list_split_assoc km (h::rev_before) tail let discr_resolver env mtb = match mtb.typ_expr with SEBstruct _ -> mtb.typ_delta | _ -> (*case mp is a functor *) empty_delta_resolver let rec rebuild_mp mp l = match l with []-> mp | i::r -> rebuild_mp (MPdot(mp,i)) r let rec check_with env sign with_decl alg_sign mp equiv = let sign,wd,equiv,cst= match with_decl with | With_Definition (idl,c) -> let sign,cb,cst = check_with_def env sign (idl,c) mp equiv in sign,With_definition_body(idl,cb),equiv,cst | With_Module (idl,mp1) -> let sign,equiv,cst = check_with_mod env sign (idl,mp1) mp equiv in sign,With_module_body(idl,mp1),equiv,cst in if alg_sign = None then sign,None,equiv,cst else sign,Some (SEBwith(Option.get(alg_sign),wd)),equiv,cst and check_with_def env sign (idl,c) mp equiv = let sig_b = match sign with | SEBstruct(sig_b) -> sig_b | _ -> error_signature_expected sign in let id,idl = match idl with | [] -> assert false | id::idl -> id,idl in let l = label_of_id id in try let rev_before,spec,after = list_split_assoc (l,(idl<>[])) [] sig_b in let before = List.rev rev_before in let env' = Modops.add_signature mp before equiv env in if idl = [] then (* Toplevel definition *) let cb = match spec with | SFBconst cb -> cb | _ -> error_not_a_constant l in (* In the spirit of subtyping.check_constant, we accept any implementations of parameters and opaques terms, as long as they have the right type *) let def,cst = match cb.const_body with | Undef _ | OpaqueDef _ -> let (j,cst1) = Typeops.infer env' c in let typ = Typeops.type_of_constant_type env' cb.const_type in let cst2 = Reduction.conv_leq env' j.uj_type typ in let cst = union_constraints (union_constraints cb.const_constraints cst1) cst2 in let def = Def (Declarations.from_val j.uj_val) in def,cst | Def cs -> let cst1 = Reduction.conv env' c (Declarations.force cs) in let cst = union_constraints cb.const_constraints cst1 in let def = Def (Declarations.from_val c) in def,cst in let cb' = { cb with const_body = def; const_body_code = Cemitcodes.from_val (compile_constant_body env' def); const_constraints = cst } in SEBstruct(before@(l,SFBconst(cb'))::after),cb',cst else (* Definition inside a sub-module *) let old = match spec with | SFBmodule msb -> msb | _ -> error_not_a_module (string_of_label l) in begin match old.mod_expr with | None -> let sign,cb,cst = check_with_def env' old.mod_type (idl,c) (MPdot(mp,l)) old.mod_delta in let new_spec = SFBmodule({old with mod_type = sign; mod_type_alg = None}) in SEBstruct(before@(l,new_spec)::after),cb,cst | Some msb -> error_generative_module_expected l end with | Not_found -> error_no_such_label l | Reduction.NotConvertible -> error_incorrect_with_constraint l and check_with_mod env sign (idl,mp1) mp equiv = let sig_b = match sign with | SEBstruct(sig_b) ->sig_b | _ -> error_signature_expected sign in let id,idl = match idl with | [] -> assert false | id::idl -> id,idl in let l = label_of_id id in try let rev_before,spec,after = list_split_assoc (l,true) [] sig_b in let before = List.rev rev_before in let env' = Modops.add_signature mp before equiv env in if idl = [] then (* Toplevel module definition *) let old = match spec with SFBmodule msb -> msb | _ -> error_not_a_module (string_of_label l) in let mb_mp1 = (lookup_module mp1 env) in let mtb_mp1 = module_type_of_module None mb_mp1 in let cst = match old.mod_expr with None -> begin try union_constraints (check_subtypes env' mtb_mp1 (module_type_of_module None old)) old.mod_constraints with Failure _ -> error_incorrect_with_constraint (label_of_id id) end | Some (SEBident(mp')) -> check_modpath_equiv env' mp1 mp'; old.mod_constraints | _ -> error_generative_module_expected l in let new_mb = strengthen_and_subst_mb mb_mp1 (MPdot(mp,l)) false in let new_spec = SFBmodule {new_mb with mod_mp = MPdot(mp,l); mod_expr = Some (SEBident mp1); mod_constraints = cst} in (* we propagate the new equality in the rest of the signature with the identity substitution accompagned by the new resolver*) let id_subst = map_mp (MPdot(mp,l)) (MPdot(mp,l)) new_mb.mod_delta in SEBstruct(before@(l,new_spec)::subst_signature id_subst after), add_delta_resolver equiv new_mb.mod_delta,cst else (* Module definition of a sub-module *) let old = match spec with SFBmodule msb -> msb | _ -> error_not_a_module (string_of_label l) in begin match old.mod_expr with None -> let sign,equiv',cst = check_with_mod env' old.mod_type (idl,mp1) (MPdot(mp,l)) old.mod_delta in let new_equiv = add_delta_resolver equiv equiv' in let new_spec = SFBmodule {old with mod_type = sign; mod_type_alg = None; mod_delta = equiv'} in let id_subst = map_mp (MPdot(mp,l)) (MPdot(mp,l)) equiv' in SEBstruct(before@(l,new_spec)::subst_signature id_subst after), new_equiv,cst | Some (SEBident(mp')) -> let mpnew = rebuild_mp mp' (List.map label_of_id idl) in check_modpath_equiv env' mpnew mp; SEBstruct(before@(l,spec)::after) ,equiv,empty_constraint | _ -> error_generative_module_expected l end with Not_found -> error_no_such_label l | Reduction.NotConvertible -> error_incorrect_with_constraint l and translate_module env mp inl me = match me.mod_entry_expr, me.mod_entry_type with | None, None -> anomaly "Mod_typing.translate_module: empty type and expr in module entry" | None, Some mte -> let mtb = translate_module_type env mp inl mte in { mod_mp = mp; mod_expr = None; mod_type = mtb.typ_expr; mod_type_alg = mtb.typ_expr_alg; mod_delta = mtb.typ_delta; mod_constraints = mtb.typ_constraints; mod_retroknowledge = []} | Some mexpr, _ -> let sign,alg_implem,resolver,cst1 = translate_struct_module_entry env mp inl mexpr in let sign,alg1,resolver,cst2 = match me.mod_entry_type with | None -> sign,None,resolver,empty_constraint | Some mte -> let mtb = translate_module_type env mp inl mte in let cst = check_subtypes env {typ_mp = mp; typ_expr = sign; typ_expr_alg = None; typ_constraints = empty_constraint; typ_delta = resolver;} mtb in mtb.typ_expr,mtb.typ_expr_alg,mtb.typ_delta,cst in { mod_mp = mp; mod_type = sign; mod_expr = alg_implem; mod_type_alg = alg1; mod_constraints = Univ.union_constraints cst1 cst2; mod_delta = resolver; mod_retroknowledge = []} (* spiwack: not so sure about that. It may cause a bug when closing nested modules. If it does, I don't really know how to fix the bug.*) and translate_apply env inl ftrans mexpr mkalg = let sign,alg,resolver,cst1 = ftrans in let farg_id, farg_b, fbody_b = destr_functor env sign in let mp1 = try path_of_mexpr mexpr with Not_path -> error_application_to_not_path mexpr in let mtb = module_type_of_module None (lookup_module mp1 env) in let cst2 = check_subtypes env mtb farg_b in let mp_delta = discr_resolver env mtb in let mp_delta = inline_delta_resolver env inl mp1 farg_id farg_b mp_delta in let subst = map_mbid farg_id mp1 mp_delta in subst_struct_expr subst fbody_b, mkalg alg mp1 cst2, subst_codom_delta_resolver subst resolver, Univ.union_constraints cst1 cst2 and translate_functor env inl arg_id arg_e trans mkalg = let mtb = translate_module_type env (MPbound arg_id) inl arg_e in let env' = add_module (module_body_of_type (MPbound arg_id) mtb) env in let sign,alg,resolver,cst = trans env' in SEBfunctor (arg_id, mtb, sign), mkalg alg arg_id mtb, resolver, Univ.union_constraints cst mtb.typ_constraints and translate_struct_module_entry env mp inl = function | MSEident mp1 -> let mb = lookup_module mp1 env in let mb' = strengthen_and_subst_mb mb mp false in mb'.mod_type, Some (SEBident mp1), mb'.mod_delta,Univ.empty_constraint | MSEfunctor (arg_id, arg_e, body_expr) -> let trans env' = translate_struct_module_entry env' mp inl body_expr in let mkalg a id m = Option.map (fun a -> SEBfunctor (id,m,a)) a in translate_functor env inl arg_id arg_e trans mkalg | MSEapply (fexpr,mexpr) -> let trans = translate_struct_module_entry env mp inl fexpr in let mkalg a mp c = Option.map (fun a -> SEBapply(a,SEBident mp,c)) a in translate_apply env inl trans mexpr mkalg | MSEwith(mte, with_decl) -> let sign,alg,resolve,cst1 = translate_struct_module_entry env mp inl mte in let sign,alg,resolve,cst2 = check_with env sign with_decl alg mp resolve in sign,alg,resolve,Univ.union_constraints cst1 cst2 and translate_struct_type_entry env inl = function | MSEident mp1 -> let mtb = lookup_modtype mp1 env in mtb.typ_expr,Some (SEBident mp1),mtb.typ_delta,Univ.empty_constraint | MSEfunctor (arg_id, arg_e, body_expr) -> let trans env' = translate_struct_type_entry env' inl body_expr in translate_functor env inl arg_id arg_e trans (fun _ _ _ -> None) | MSEapply (fexpr,mexpr) -> let trans = translate_struct_type_entry env inl fexpr in translate_apply env inl trans mexpr (fun _ _ _ -> None) | MSEwith(mte, with_decl) -> let sign,alg,resolve,cst1 = translate_struct_type_entry env inl mte in let sign,alg,resolve,cst2 = check_with env sign with_decl alg (mp_from_mexpr mte) resolve in sign,alg,resolve,Univ.union_constraints cst1 cst2 and translate_module_type env mp inl mte = let mp_from = mp_from_mexpr mte in let sign,alg,resolve,cst = translate_struct_type_entry env inl mte in let mtb = subst_modtype_and_resolver {typ_mp = mp_from; typ_expr = sign; typ_expr_alg = None; typ_constraints = cst; typ_delta = resolve} mp in {mtb with typ_expr_alg = alg} let rec translate_struct_include_module_entry env mp inl = function | MSEident mp1 -> let mb = lookup_module mp1 env in let mb' = strengthen_and_subst_mb mb mp true in let mb_typ = clean_bounded_mod_expr mb'.mod_type in mb_typ,None,mb'.mod_delta,Univ.empty_constraint | MSEapply (fexpr,mexpr) -> let ftrans = translate_struct_include_module_entry env mp inl fexpr in translate_apply env inl ftrans mexpr (fun _ _ _ -> None) | _ -> error ("You cannot Include a high-order structure.") let rec add_struct_expr_constraints env = function | SEBident _ -> env | SEBfunctor (_,mtb,meb) -> add_struct_expr_constraints (add_modtype_constraints env mtb) meb | SEBstruct (structure_body) -> List.fold_left (fun env (_,item) -> add_struct_elem_constraints env item) env structure_body | SEBapply (meb1,meb2,cst) -> Environ.add_constraints cst (add_struct_expr_constraints (add_struct_expr_constraints env meb1) meb2) | SEBwith(meb,With_definition_body(_,cb))-> Environ.add_constraints cb.const_constraints (add_struct_expr_constraints env meb) | SEBwith(meb,With_module_body(_,_))-> add_struct_expr_constraints env meb and add_struct_elem_constraints env = function | SFBconst cb -> Environ.add_constraints cb.const_constraints env | SFBmind mib -> Environ.add_constraints mib.mind_constraints env | SFBmodule mb -> add_module_constraints env mb | SFBmodtype mtb -> add_modtype_constraints env mtb and add_module_constraints env mb = let env = match mb.mod_expr with | None -> env | Some meb -> add_struct_expr_constraints env meb in let env = add_struct_expr_constraints env mb.mod_type in Environ.add_constraints mb.mod_constraints env and add_modtype_constraints env mtb = Environ.add_constraints mtb.typ_constraints (add_struct_expr_constraints env mtb.typ_expr) let rec struct_expr_constraints cst = function | SEBident _ -> cst | SEBfunctor (_,mtb,meb) -> struct_expr_constraints (modtype_constraints cst mtb) meb | SEBstruct (structure_body) -> List.fold_left (fun cst (_,item) -> struct_elem_constraints cst item) cst structure_body | SEBapply (meb1,meb2,cst1) -> struct_expr_constraints (struct_expr_constraints (Univ.union_constraints cst1 cst) meb1) meb2 | SEBwith(meb,With_definition_body(_,cb))-> struct_expr_constraints (Univ.union_constraints cb.const_constraints cst) meb | SEBwith(meb,With_module_body(_,_))-> struct_expr_constraints cst meb and struct_elem_constraints cst = function | SFBconst cb -> cst | SFBmind mib -> cst | SFBmodule mb -> module_constraints cst mb | SFBmodtype mtb -> modtype_constraints cst mtb and module_constraints cst mb = let cst = match mb.mod_expr with | None -> cst | Some meb -> struct_expr_constraints cst meb in let cst = struct_expr_constraints cst mb.mod_type in Univ.union_constraints mb.mod_constraints cst and modtype_constraints cst mtb = struct_expr_constraints (Univ.union_constraints mtb.typ_constraints cst) mtb.typ_expr let struct_expr_constraints = struct_expr_constraints Univ.empty_constraint let module_constraints = module_constraints Univ.empty_constraint coq-8.4pl2/kernel/cemitcodes.ml0000640000175000001440000002711112010532755015562 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = String.length !out_buffer then begin let len = String.length !out_buffer in let new_buffer = String.create (2 * len) in String.blit !out_buffer 0 new_buffer 0 len; out_buffer := new_buffer end; String.unsafe_set !out_buffer p (Char.unsafe_chr b1); String.unsafe_set !out_buffer (p+1) (Char.unsafe_chr b2); String.unsafe_set !out_buffer (p+2) (Char.unsafe_chr b3); String.unsafe_set !out_buffer (p+3) (Char.unsafe_chr b4); out_position := p + 4 *) let out_word b1 b2 b3 b4 = let p = !out_position in if p >= String.length !out_buffer then begin let len = String.length !out_buffer in let new_len = if len <= Sys.max_string_length / 2 then 2 * len else if len = Sys.max_string_length then raise (Invalid_argument "String.create") (* Pas la bonne execption .... *) else Sys.max_string_length in let new_buffer = String.create new_len in String.blit !out_buffer 0 new_buffer 0 len; out_buffer := new_buffer end; String.unsafe_set !out_buffer p (Char.unsafe_chr b1); String.unsafe_set !out_buffer (p+1) (Char.unsafe_chr b2); String.unsafe_set !out_buffer (p+2) (Char.unsafe_chr b3); String.unsafe_set !out_buffer (p+3) (Char.unsafe_chr b4); out_position := p + 4 let out opcode = out_word opcode 0 0 0 let out_int n = out_word n (n asr 8) (n asr 16) (n asr 24) (* Handling of local labels and backpatching *) type label_definition = Label_defined of int | Label_undefined of (int * int) list let label_table = ref ([| |] : label_definition array) (* le ieme element de la table = Label_defined n signifie que l'on a deja rencontrer le label i et qu'il est a l'offset n. = Label_undefined l signifie que l'on a pas encore rencontrer ce label, le premier entier indique ou est l'entier a patcher dans la string, le deuxieme son origine *) let extend_label_table needed = let new_size = ref(Array.length !label_table) in while needed >= !new_size do new_size := 2 * !new_size done; let new_table = Array.create !new_size (Label_undefined []) in Array.blit !label_table 0 new_table 0 (Array.length !label_table); label_table := new_table let backpatch (pos, orig) = let displ = (!out_position - orig) asr 2 in !out_buffer.[pos] <- Char.unsafe_chr displ; !out_buffer.[pos+1] <- Char.unsafe_chr (displ asr 8); !out_buffer.[pos+2] <- Char.unsafe_chr (displ asr 16); !out_buffer.[pos+3] <- Char.unsafe_chr (displ asr 24) let define_label lbl = if lbl >= Array.length !label_table then extend_label_table lbl; match (!label_table).(lbl) with Label_defined _ -> raise(Failure "CEmitcode.define_label") | Label_undefined patchlist -> List.iter backpatch patchlist; (!label_table).(lbl) <- Label_defined !out_position let out_label_with_orig orig lbl = if lbl >= Array.length !label_table then extend_label_table lbl; match (!label_table).(lbl) with Label_defined def -> out_int((def - orig) asr 2) | Label_undefined patchlist -> (* spiwack: patchlist is supposed to be non-empty all the time thus I commented that out. If there is no problem I suggest removing it for next release (cur: 8.1) *) (*if patchlist = [] then *) (!label_table).(lbl) <- Label_undefined((!out_position, orig) :: patchlist); out_int 0 let out_label l = out_label_with_orig !out_position l (* Relocation information *) let reloc_info = ref ([] : (reloc_info * int) list) let enter info = reloc_info := (info, !out_position) :: !reloc_info let slot_for_const c = enter (Reloc_const c); out_int 0 and slot_for_annot a = enter (Reloc_annot a); out_int 0 and slot_for_getglobal id = enter (Reloc_getglobal id); out_int 0 (* Emission of one instruction *) let emit_instr = function | Klabel lbl -> define_label lbl | Kacc n -> if n < 8 then out(opACC0 + n) else (out opACC; out_int n) | Kenvacc n -> if n >= 1 && n <= 4 then out(opENVACC1 + n - 1) else (out opENVACC; out_int n) | Koffsetclosure ofs -> if ofs = -2 || ofs = 0 || ofs = 2 then out (opOFFSETCLOSURE0 + ofs / 2) else (out opOFFSETCLOSURE; out_int ofs) | Kpush -> out opPUSH | Kpop n -> out opPOP; out_int n | Kpush_retaddr lbl -> out opPUSH_RETADDR; out_label lbl | Kapply n -> if n < 4 then out(opAPPLY1 + n - 1) else (out opAPPLY; out_int n) | Kappterm(n, sz) -> if n < 4 then (out(opAPPTERM1 + n - 1); out_int sz) else (out opAPPTERM; out_int n; out_int sz) | Kreturn n -> out opRETURN; out_int n | Kjump -> out opRETURN; out_int 0 | Krestart -> out opRESTART | Kgrab n -> out opGRAB; out_int n | Kgrabrec(rec_arg) -> out opGRABREC; out_int rec_arg | Kclosure(lbl, n) -> out opCLOSURE; out_int n; out_label lbl | Kclosurerec(nfv,init,lbl_types,lbl_bodies) -> out opCLOSUREREC;out_int (Array.length lbl_bodies); out_int nfv; out_int init; let org = !out_position in Array.iter (out_label_with_orig org) lbl_types; let org = !out_position in Array.iter (out_label_with_orig org) lbl_bodies | Kclosurecofix(nfv,init,lbl_types,lbl_bodies) -> out opCLOSURECOFIX;out_int (Array.length lbl_bodies); out_int nfv; out_int init; let org = !out_position in Array.iter (out_label_with_orig org) lbl_types; let org = !out_position in Array.iter (out_label_with_orig org) lbl_bodies | Kgetglobal q -> out opGETGLOBAL; slot_for_getglobal q | Kconst((Const_b0 i)) -> if i >= 0 && i <= 3 then out (opCONST0 + i) else (out opCONSTINT; out_int i) | Kconst c -> out opGETGLOBAL; slot_for_const c | Kmakeblock(n, t) -> if n = 0 then raise (Invalid_argument "emit_instr : block size = 0") else if n < 4 then (out(opMAKEBLOCK1 + n - 1); out_int t) else (out opMAKEBLOCK; out_int n; out_int t) | Kmakeprod -> out opMAKEPROD | Kmakeswitchblock(typlbl,swlbl,annot,sz) -> out opMAKESWITCHBLOCK; out_label typlbl; out_label swlbl; slot_for_annot annot;out_int sz | Kswitch (tbl_const, tbl_block) -> out opSWITCH; out_int (Array.length tbl_const + (Array.length tbl_block lsl 16)); let org = !out_position in Array.iter (out_label_with_orig org) tbl_const; Array.iter (out_label_with_orig org) tbl_block | Kpushfields n -> out opPUSHFIELDS;out_int n | Kfield n -> if n <= 1 then out (opGETFIELD0+n) else (out opGETFIELD;out_int n) | Ksetfield n -> if n <= 1 then out (opSETFIELD0+n) else (out opSETFIELD;out_int n) | Ksequence _ -> raise (Invalid_argument "Cemitcodes.emit_instr") (* spiwack *) | Kbranch lbl -> out opBRANCH; out_label lbl | Kaddint31 -> out opADDINT31 | Kaddcint31 -> out opADDCINT31 | Kaddcarrycint31 -> out opADDCARRYCINT31 | Ksubint31 -> out opSUBINT31 | Ksubcint31 -> out opSUBCINT31 | Ksubcarrycint31 -> out opSUBCARRYCINT31 | Kmulint31 -> out opMULINT31 | Kmulcint31 -> out opMULCINT31 | Kdiv21int31 -> out opDIV21INT31 | Kdivint31 -> out opDIVINT31 | Kaddmuldivint31 -> out opADDMULDIVINT31 | Kcompareint31 -> out opCOMPAREINT31 | Khead0int31 -> out opHEAD0INT31 | Ktail0int31 -> out opTAIL0INT31 | Kisconst lbl -> out opISCONST; out_label lbl | Kareconst(n,lbl) -> out opARECONST; out_int n; out_label lbl | Kcompint31 -> out opCOMPINT31 | Kdecompint31 -> out opDECOMPINT31 (*/spiwack *) | Kstop -> out opSTOP (* Emission of a list of instructions. Include some peephole optimization. *) let rec emit = function | [] -> () (* Peephole optimizations *) | Kpush :: Kacc n :: c -> if n < 8 then out(opPUSHACC0 + n) else (out opPUSHACC; out_int n); emit c | Kpush :: Kenvacc n :: c -> if n >= 1 && n <= 4 then out(opPUSHENVACC1 + n - 1) else (out opPUSHENVACC; out_int n); emit c | Kpush :: Koffsetclosure ofs :: c -> if ofs = -2 || ofs = 0 || ofs = 2 then out(opPUSHOFFSETCLOSURE0 + ofs / 2) else (out opPUSHOFFSETCLOSURE; out_int ofs); emit c | Kpush :: Kgetglobal id :: c -> out opPUSHGETGLOBAL; slot_for_getglobal id; emit c | Kpush :: Kconst (Const_b0 i) :: c -> if i >= 0 && i <= 3 then out (opPUSHCONST0 + i) else (out opPUSHCONSTINT; out_int i); emit c | Kpush :: Kconst const :: c -> out opPUSHGETGLOBAL; slot_for_const const; emit c | Kpop n :: Kjump :: c -> out opRETURN; out_int n; emit c | Ksequence(c1,c2)::c -> emit c1; emit c2;emit c (* Default case *) | instr :: c -> emit_instr instr; emit c (* Initialization *) let init () = out_position := 0; label_table := Array.create 16 (Label_undefined []); reloc_info := [] type emitcodes = string let copy = String.copy let length = String.length type to_patch = emitcodes * (patch list) * fv (* Substitution *) let rec subst_strcst s sc = match sc with | Const_sorts _ | Const_b0 _ -> sc | Const_bn(tag,args) -> Const_bn(tag,Array.map (subst_strcst s) args) | Const_ind(ind) -> let kn,i = ind in Const_ind((subst_ind s kn, i)) let subst_patch s (ri,pos) = match ri with | Reloc_annot a -> let (kn,i) = a.ci.ci_ind in let ci = {a.ci with ci_ind = (subst_ind s kn,i)} in (Reloc_annot {a with ci = ci},pos) | Reloc_const sc -> (Reloc_const (subst_strcst s sc), pos) | Reloc_getglobal kn -> (Reloc_getglobal (fst (subst_con s kn)), pos) let subst_to_patch s (code,pl,fv) = code,List.rev_map (subst_patch s) pl,fv type body_code = | BCdefined of to_patch | BCallias of constant | BCconstant let subst_body_code s = function | BCdefined tp -> BCdefined (subst_to_patch s tp) | BCallias kn -> BCallias (fst (subst_con s kn)) | BCconstant -> BCconstant type to_patch_substituted = body_code substituted let from_val = from_val let force = force subst_body_code let subst_to_patch_subst = subst_substituted let repr_body_code = repr_substituted let to_memory (init_code, fun_code, fv) = init(); emit init_code; emit fun_code; let code = String.create !out_position in String.unsafe_blit !out_buffer 0 code 0 !out_position; let reloc = List.rev !reloc_info in Array.iter (fun lbl -> (match lbl with Label_defined _ -> assert true | Label_undefined patchlist -> assert (patchlist = []))) !label_table; (code, reloc, fv) coq-8.4pl2/kernel/safe_typing.mli0000640000175000001440000001110612010532755016121 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Environ.env val empty_environment : safe_environment val is_empty : safe_environment -> bool (** Adding and removing local declarations (Local or Variables) *) val push_named_assum : identifier * types -> safe_environment -> Univ.constraints * safe_environment val push_named_def : identifier * constr * types option -> safe_environment -> Univ.constraints * safe_environment (** Adding global axioms or definitions *) type global_declaration = | ConstantEntry of constant_entry | GlobalRecipe of Cooking.recipe val add_constant : dir_path -> label -> global_declaration -> safe_environment -> constant * safe_environment (** Adding an inductive type *) val add_mind : dir_path -> label -> mutual_inductive_entry -> safe_environment -> mutual_inductive * safe_environment (** Adding a module *) val add_module : label -> module_entry -> inline -> safe_environment -> module_path * delta_resolver * safe_environment (** Adding a module type *) val add_modtype : label -> module_struct_entry -> inline -> safe_environment -> module_path * safe_environment (** Adding universe constraints *) val add_constraints : Univ.constraints -> safe_environment -> safe_environment (** Settin the strongly constructive or classical logical engagement *) val set_engagement : engagement -> safe_environment -> safe_environment (** {6 Interactive module functions } *) val start_module : label -> safe_environment -> module_path * safe_environment val end_module : label -> (module_struct_entry * inline) option -> safe_environment -> module_path * delta_resolver * safe_environment val add_module_parameter : mod_bound_id -> module_struct_entry -> inline -> safe_environment -> delta_resolver * safe_environment val start_modtype : label -> safe_environment -> module_path * safe_environment val end_modtype : label -> safe_environment -> module_path * safe_environment val add_include : module_struct_entry -> bool -> inline -> safe_environment -> delta_resolver * safe_environment val pack_module : safe_environment -> module_body val current_modpath : safe_environment -> module_path val delta_of_senv : safe_environment -> delta_resolver*delta_resolver (** Loading and saving compilation units *) (** exporting and importing modules *) type compiled_library val start_library : dir_path -> safe_environment -> module_path * safe_environment val export : safe_environment -> dir_path -> module_path * compiled_library val import : compiled_library -> Digest.t -> safe_environment -> module_path * safe_environment (** Remove the body of opaque constants *) module LightenLibrary : sig type table type lightened_compiled_library val save : compiled_library -> lightened_compiled_library * table val load : load_proof:Flags.load_proofs -> table Lazy.t -> lightened_compiled_library -> compiled_library end (** {6 Typing judgments } *) type judgment val j_val : judgment -> constr val j_type : judgment -> constr (** Safe typing of a term returning a typing judgment and universe constraints to be added to the environment for the judgment to hold. It is guaranteed that the constraints are satisfiable *) val safe_infer : safe_environment -> constr -> judgment * Univ.constraints val typing : safe_environment -> constr -> judgment (** {7 Query } *) val exists_objlabel : label -> safe_environment -> bool (*spiwack: safe retroknowledge functionalities *) open Retroknowledge val retroknowledge : (retroknowledge-> 'a) -> safe_environment -> 'a val register : safe_environment -> field -> Retroknowledge.entry -> constr -> safe_environment coq-8.4pl2/kernel/entries.mli0000640000175000001440000000526212010532755015270 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_path -> inline -> module_entry -> module_body val translate_module_type : env -> module_path -> inline -> module_struct_entry -> module_type_body val translate_struct_module_entry : env -> module_path -> inline -> module_struct_entry -> struct_expr_body (* Signature *) * struct_expr_body option (* Algebraic expr, in fact never None *) * delta_resolver * Univ.constraints val translate_struct_type_entry : env -> inline -> module_struct_entry -> struct_expr_body * struct_expr_body option * delta_resolver * Univ.constraints val translate_struct_include_module_entry : env -> module_path -> inline -> module_struct_entry -> struct_expr_body * struct_expr_body option (* Algebraic expr, always None *) * delta_resolver * Univ.constraints val add_modtype_constraints : env -> module_type_body -> env val add_module_constraints : env -> module_body -> env val add_struct_expr_constraints : env -> struct_expr_body -> env val struct_expr_constraints : struct_expr_body -> Univ.constraints val module_constraints : module_body -> Univ.constraints coq-8.4pl2/kernel/cooking.ml0000640000175000001440000001064012010532755015073 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* anomaly "dirpath_prefix: empty dirpath" | _::l -> make_dirpath l let pop_mind kn = let (mp,dir,l) = Names.repr_mind kn in Names.make_mind mp (pop_dirpath dir) l let pop_con con = let (mp,dir,l) = Names.repr_con con in Names.make_con mp (pop_dirpath dir) l type my_global_reference = | ConstRef of constant | IndRef of inductive | ConstructRef of constructor let cache = (Hashtbl.create 13 : (my_global_reference, constr) Hashtbl.t) let clear_cooking_sharing () = Hashtbl.clear cache let share r (cstl,knl) = try Hashtbl.find cache r with Not_found -> let f,l = match r with | IndRef (kn,i) -> mkInd (pop_mind kn,i), Mindmap.find kn knl | ConstructRef ((kn,i),j) -> mkConstruct ((pop_mind kn,i),j), Mindmap.find kn knl | ConstRef cst -> mkConst (pop_con cst), Cmap.find cst cstl in let c = mkApp (f, Array.map mkVar l) in Hashtbl.add cache r c; (* has raised Not_found if not in work_list *) c let update_case_info ci modlist = try let ind, n = match kind_of_term (share (IndRef ci.ci_ind) modlist) with | App (f,l) -> (destInd f, Array.length l) | Ind ind -> ind, 0 | _ -> assert false in { ci with ci_ind = ind; ci_npar = ci.ci_npar + n } with Not_found -> ci let empty_modlist = (Cmap.empty, Mindmap.empty) let expmod_constr modlist c = let rec substrec c = match kind_of_term c with | Case (ci,p,t,br) -> map_constr substrec (mkCase (update_case_info ci modlist,p,t,br)) | Ind ind -> (try share (IndRef ind) modlist with | Not_found -> map_constr substrec c) | Construct cstr -> (try share (ConstructRef cstr) modlist with | Not_found -> map_constr substrec c) | Const cst -> (try share (ConstRef cst) modlist with | Not_found -> map_constr substrec c) | _ -> map_constr substrec c in if modlist = empty_modlist then c else substrec c let abstract_constant_type = List.fold_left (fun c d -> mkNamedProd_wo_LetIn d c) let abstract_constant_body = List.fold_left (fun c d -> mkNamedLambda_or_LetIn d c) type recipe = { d_from : constant_body; d_abstract : named_context; d_modlist : work_list } let on_body f = function | Undef inl -> Undef inl | Def cs -> Def (Declarations.from_val (f (Declarations.force cs))) | OpaqueDef lc -> OpaqueDef (Declarations.opaque_from_val (f (Declarations.force_opaque lc))) let constr_of_def = function | Undef _ -> assert false | Def cs -> Declarations.force cs | OpaqueDef lc -> Declarations.force_opaque lc let cook_constant env r = let cb = r.d_from in let hyps = Sign.map_named_context (expmod_constr r.d_modlist) r.d_abstract in let body = on_body (fun c -> abstract_constant_body (expmod_constr r.d_modlist c) hyps) cb.const_body in let const_hyps = Sign.fold_named_context (fun (h,_,_) hyps -> List.filter (fun (id,_,_) -> id <> h) hyps) hyps ~init:cb.const_hyps in let typ = match cb.const_type with | NonPolymorphicType t -> let typ = abstract_constant_type (expmod_constr r.d_modlist t) hyps in NonPolymorphicType typ | PolymorphicArity (ctx,s) -> let t = mkArity (ctx,Type s.poly_level) in let typ = abstract_constant_type (expmod_constr r.d_modlist t) hyps in let j = make_judge (constr_of_def body) typ in Typeops.make_polymorphic_if_constant_for_ind env j in (body, typ, cb.const_constraints, const_hyps) coq-8.4pl2/kernel/vm.mli0000640000175000001440000000460311535371772014252 0ustar notinusersopen Names open Term open Cbytecodes open Cemitcodes (** Efficient Virtual Machine *) val set_drawinstr : unit -> unit val transp_values : unit -> bool val set_transp_values : bool -> unit (** Machine code *) type tcode (** Values *) type vprod type vfun type vfix type vcofix type vblock type vswitch type arguments type atom = | Aid of id_key | Aiddef of id_key * values | Aind of inductive (** Zippers *) type zipper = | Zapp of arguments | Zfix of vfix * arguments (** might be empty *) | Zswitch of vswitch type stack = zipper list type to_up type whd = | Vsort of sorts | Vprod of vprod | Vfun of vfun | Vfix of vfix * arguments option | Vcofix of vcofix * to_up * arguments option | Vconstr_const of int | Vconstr_block of vblock | Vatom_stk of atom * stack (** Constructors *) val val_of_str_const : structured_constant -> values val val_of_rel : int -> values val val_of_rel_def : int -> values -> values val val_of_named : identifier -> values val val_of_named_def : identifier -> values -> values val val_of_constant : constant -> values val val_of_constant_def : int -> constant -> values -> values external val_of_annot_switch : annot_switch -> values = "%identity" (** Destructors *) val whd_val : values -> whd (** Arguments *) val nargs : arguments -> int val arg : arguments -> int -> values (** Product *) val dom : vprod -> values val codom : vprod -> vfun (** Function *) val body_of_vfun : int -> vfun -> values val decompose_vfun2 : int -> vfun -> vfun -> int * values * values (** Fix *) val current_fix : vfix -> int val check_fix : vfix -> vfix -> bool val rec_args : vfix -> int array val reduce_fix : int -> vfix -> vfun array * values array (** bodies , types *) (** CoFix *) val current_cofix : vcofix -> int val check_cofix : vcofix -> vcofix -> bool val reduce_cofix : int -> vcofix -> values array * values array (** bodies , types *) (** Block *) val btag : vblock -> int val bsize : vblock -> int val bfield : vblock -> int -> values (** Switch *) val check_switch : vswitch -> vswitch -> bool val case_info : vswitch -> case_info val type_of_switch : vswitch -> values val branch_of_switch : int -> vswitch -> (int * values) array (** Evaluation *) val whd_stack : values -> stack -> whd val force_whd : values -> stack -> whd val eta_whd : int -> whd -> values coq-8.4pl2/kernel/csymtable.mli0000640000175000001440000000137012010532755015576 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> values val set_opaque_const : constant -> unit val set_transparent_const : constant -> unit coq-8.4pl2/kernel/pre_env.ml0000640000175000001440000000733712121620060015077 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raise Not_found let env_of_rel n env = { env with env_rel_context = Util.list_skipn n env.env_rel_context; env_rel_val = Util.list_skipn n env.env_rel_val; env_nb_rel = env.env_nb_rel - n } (* Named context *) let push_named_context_val d (ctxt,vals) = let id,_,_ = d in let rval = ref VKnone in Sign.add_named_decl d ctxt, (id,rval)::vals exception ASSERT of rel_context let push_named d env = (* if not (env.env_rel_context = []) then raise (ASSERT env.env_rel_context); assert (env.env_rel_context = []); *) let id,body,_ = d in let rval = ref VKnone in { env with env_named_context = Sign.add_named_decl d env.env_named_context; env_named_vals = (id,rval):: env.env_named_vals } let lookup_named_val id env = snd(List.find (fun (id',_) -> id = id') env.env_named_vals) (* Warning all the names should be different *) let env_of_named id env = env (* Global constants *) let lookup_constant_key kn env = Cmap_env.find kn env.env_globals.env_constants let lookup_constant kn env = fst (Cmap_env.find kn env.env_globals.env_constants) (* Mutual Inductives *) let lookup_mind kn env = Mindmap_env.find kn env.env_globals.env_inductives coq-8.4pl2/kernel/make-opcodes0000640000175000001440000000014610135466420015403 0ustar notinusers$1=="enum" {n=0; next; } {for (i = 1; i <= NF; i++) {printf("let op%s = %d\n", $i, n++);}} coq-8.4pl2/kernel/cooking.mli0000640000175000001440000000205112010532755015241 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* recipe -> constant_def * constant_type * constraints * Sign.section_context (** {6 Utility functions used in module [Discharge]. } *) val expmod_constr : work_list -> constr -> constr val clear_cooking_sharing : unit -> unit coq-8.4pl2/kernel/inductive.mli0000640000175000001440000001054312010532755015607 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* types -> inductive * constr list val find_inductive : env -> types -> inductive * constr list val find_coinductive : env -> types -> inductive * constr list type mind_specif = mutual_inductive_body * one_inductive_body (** {6 ... } *) (** Fetching information in the environment about an inductive type. Raises [Not_found] if the inductive type is not found. *) val lookup_mind_specif : env -> inductive -> mind_specif (** {6 Functions to build standard types related to inductive } *) val ind_subst : mutual_inductive -> mutual_inductive_body -> constr list val type_of_inductive : env -> mind_specif -> types val elim_sorts : mind_specif -> sorts_family list (** Return type as quoted by the user *) val type_of_constructor : constructor -> mind_specif -> types (** Return constructor types in normal form *) val arities_of_constructors : inductive -> mind_specif -> types array (** Return constructor types in user form *) val type_of_constructors : inductive -> mind_specif -> types array (** Transforms inductive specification into types (in nf) *) val arities_of_specif : mutual_inductive -> mind_specif -> types array val inductive_params : mind_specif -> int (** [type_case_branches env (I,args) (p:A) c] computes useful types about the following Cases expression:

Cases (c :: (I args)) of b1..bn end It computes the type of every branch (pattern variables are introduced by products), the type for the whole expression, and the universe constraints generated. *) val type_case_branches : env -> inductive * constr list -> unsafe_judgment -> constr -> types array * types * constraints val build_branches_type : inductive -> mutual_inductive_body * one_inductive_body -> constr list -> constr -> types array (** Return the arity of an inductive type *) val mind_arity : one_inductive_body -> rel_context * sorts_family val inductive_sort_family : one_inductive_body -> sorts_family (** Check a [case_info] actually correspond to a Case expression on the given inductive type. *) val check_case_info : env -> inductive -> case_info -> unit (** {6 Guard conditions for fix and cofix-points. } *) val check_fix : env -> fixpoint -> unit val check_cofix : env -> cofixpoint -> unit (** {6 Support for sort-polymorphic inductive types } *) (** The "polyprop" optional argument below allows to control the "Prop-polymorphism". By default, it is allowed. But when "polyprop=false", the following exception is raised when a polymorphic singleton inductive type becomes Prop due to parameter instantiation. This is used by the Ocaml extraction, which cannot handle (yet?) Prop-polymorphism. *) exception SingletonInductiveBecomesProp of identifier val type_of_inductive_knowing_parameters : ?polyprop:bool -> env -> one_inductive_body -> types array -> types val max_inductive_sort : sorts array -> universe val instantiate_universes : env -> rel_context -> polymorphic_arity -> types array -> rel_context * sorts (** {6 Debug} *) type size = Large | Strict type subterm_spec = Subterm of (size * wf_paths) | Dead_code | Not_subterm type guard_env = { env : env; (** dB of last fixpoint *) rel_min : int; (** dB of variables denoting subterms *) genv : subterm_spec Lazy.t list; } type stack_element = |SClosure of guard_env*constr |SArg of subterm_spec Lazy.t val subterm_specif : guard_env -> stack_element list -> constr -> subterm_spec coq-8.4pl2/kernel/conv_oracle.mli0000640000175000001440000000274412010532755016113 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a tableKey -> 'a tableKey -> bool (** Priority for the expansion of constant in the conversion test. * Higher levels means that the expansion is less prioritary. * (And Expand stands for -oo, and Opaque +oo.) * The default value (transparent constants) is [Level 0]. *) type level = Expand | Level of int | Opaque val transparent : level val get_strategy : 'a tableKey -> level (** Sets the level of a constant. * Level of RelKey constant cannot be set. *) val set_strategy : 'a tableKey -> level -> unit val get_transp_state : unit -> transparent_state (**************************** Summary operations *) type oracle val init : unit -> unit val freeze : unit -> oracle val unfreeze : oracle -> unit coq-8.4pl2/kernel/retroknowledge.mli0000640000175000001440000001341112010532755016645 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* entry -> Cbytecodes.comp_env -> constr array -> int -> Cbytecodes.bytecodes-> Cbytecodes.bytecodes (*Given an identifier id (usually Construct _) and its argument array, returns a function that tries an ad-hoc optimisated compilation (in the case of the 31-bit integers it means compiling them directly into an integer) raises Not_found if id should be compiled as usual, and expectingly CBytecodes.NotClosed if the term is not a closed constructor pattern (a constant for the compiler) *) val get_vm_constant_static_info : retroknowledge -> entry -> constr array -> Cbytecodes.structured_constant (*Given an identifier id (usually Construct _ ) its argument array and a continuation, returns the compiled version of id+args+cont when id has a specific treatment (in the case of 31-bit integers, that would be the dynamic compilation into integers) or raises Not_found if id should be compiled as usual *) val get_vm_constant_dynamic_info : retroknowledge -> entry -> Cbytecodes.comp_env -> Cbytecodes.block array -> int -> Cbytecodes.bytecodes -> Cbytecodes.bytecodes (** Given a type identifier, this function is used before compiling a match over this type. In the case of 31-bit integers for instance, it is used to add the instruction sequence which would perform a dynamic decompilation in case the argument of the match is not in coq representation *) val get_vm_before_match_info : retroknowledge -> entry -> Cbytecodes.bytecodes -> Cbytecodes.bytecodes (** Given a type identifier, this function is used by pretyping/vnorm.ml to recover the elements of that type from their compiled form if it's non standard (it is used (and can be used) only when the compiled form is not a block *) val get_vm_decompile_constant_info : retroknowledge -> entry -> int -> Term.constr (** the following functions are solely used in Pre_env and Environ to implement the functions register and unregister (and mem) of Environ *) val add_field : retroknowledge -> field -> entry -> retroknowledge val mem : retroknowledge -> field -> bool val remove : retroknowledge -> field -> retroknowledge val find : retroknowledge -> field -> entry (** the following function manipulate the reactive information of values they are only used by the functions of Pre_env, and Environ to implement the functions register and unregister of Environ *) val add_vm_compiling_info : retroknowledge-> entry -> (bool -> Cbytecodes.comp_env -> constr array -> int -> Cbytecodes.bytecodes -> Cbytecodes.bytecodes) -> retroknowledge val add_vm_constant_static_info : retroknowledge-> entry -> (bool->constr array-> Cbytecodes.structured_constant) -> retroknowledge val add_vm_constant_dynamic_info : retroknowledge-> entry -> (bool -> Cbytecodes.comp_env -> Cbytecodes.block array -> int -> Cbytecodes.bytecodes -> Cbytecodes.bytecodes) -> retroknowledge val add_vm_before_match_info : retroknowledge -> entry -> (bool->Cbytecodes.bytecodes->Cbytecodes.bytecodes) -> retroknowledge val add_vm_decompile_constant_info : retroknowledge -> entry -> (int -> constr) -> retroknowledge val clear_info : retroknowledge-> entry -> retroknowledge coq-8.4pl2/kernel/kernel.mllib0000640000175000001440000000042711651570606015422 0ustar notinusersNames Univ Esubst Term Mod_subst Sign Cbytecodes Copcodes Cemitcodes Declarations Retroknowledge Pre_env Cbytegen Environ Conv_oracle Closure Reduction Type_errors Entries Modops Inductive Typeops Indtypes Cooking Term_typing Subtyping Mod_typing Safe_typing Vm Csymtable Vconv coq-8.4pl2/kernel/safe_typing.ml0000640000175000001440000006720412121620060015750 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* set_engagement eng env | _ -> env type library_info = dir_path * Digest.t type safe_environment = { old : safe_environment; env : env; modinfo : module_info; modlabels : Labset.t; objlabels : Labset.t; revstruct : structure_body; univ : Univ.constraints; engagement : engagement option; imports : library_info list; loads : (module_path * module_body) list; local_retroknowledge : Retroknowledge.action list} let exists_modlabel l senv = Labset.mem l senv.modlabels let exists_objlabel l senv = Labset.mem l senv.objlabels let check_modlabel l senv = if exists_modlabel l senv then error_existing_label l let check_objlabel l senv = if exists_objlabel l senv then error_existing_label l let check_objlabels ls senv = Labset.iter (fun l -> check_objlabel l senv) ls let labels_of_mib mib = let add,get = let labels = ref Labset.empty in (fun id -> labels := Labset.add (label_of_id id) !labels), (fun () -> !labels) in let visit_mip mip = add mip.mind_typename; Array.iter add mip.mind_consnames in Array.iter visit_mip mib.mind_packets; get () (* a small hack to avoid variants and an unused case in all functions *) let rec empty_environment = { old = empty_environment; env = empty_env; modinfo = { modpath = initial_path; label = mk_label "_"; variant = NONE; resolver = empty_delta_resolver; resolver_of_param = empty_delta_resolver}; modlabels = Labset.empty; objlabels = Labset.empty; revstruct = []; univ = Univ.empty_constraint; engagement = None; imports = []; loads = []; local_retroknowledge = [] } let env_of_safe_env senv = senv.env let env_of_senv = env_of_safe_env let add_constraints cst senv = { senv with env = Environ.add_constraints cst senv.env; univ = Univ.union_constraints cst senv.univ } let constraints_of_sfb = function | SFBconst cb -> cb.const_constraints | SFBmind mib -> mib.mind_constraints | SFBmodtype mtb -> mtb.typ_constraints | SFBmodule mb -> mb.mod_constraints (* A generic function for adding a new field in a same environment. It also performs the corresponding [add_constraints]. *) type generic_name = | C of constant | I of mutual_inductive | MT of module_path | M let add_field ((l,sfb) as field) gn senv = let mlabs,olabs = match sfb with | SFBmind mib -> let l = labels_of_mib mib in check_objlabels l senv; (Labset.empty,l) | SFBconst _ -> check_objlabel l senv; (Labset.empty, Labset.singleton l) | SFBmodule _ | SFBmodtype _ -> check_modlabel l senv; (Labset.singleton l, Labset.empty) in let senv = add_constraints (constraints_of_sfb sfb) senv in let env' = match sfb, gn with | SFBconst cb, C con -> Environ.add_constant con cb senv.env | SFBmind mib, I mind -> Environ.add_mind mind mib senv.env | SFBmodtype mtb, MT mp -> Environ.add_modtype mp mtb senv.env | SFBmodule mb, M -> Modops.add_module mb senv.env | _ -> assert false in { senv with env = env'; modlabels = Labset.union mlabs senv.modlabels; objlabels = Labset.union olabs senv.objlabels; revstruct = field :: senv.revstruct } (* Applying a certain function to the resolver of a safe environment *) let update_resolver f senv = let mi = senv.modinfo in { senv with modinfo = { mi with resolver = f mi.resolver }} (* universal lifting, used for the "get" operations mostly *) let retroknowledge f senv = Environ.retroknowledge f (env_of_senv senv) let register senv field value by_clause = (* todo : value closed, by_clause safe, by_clause of the proper type*) (* spiwack : updates the safe_env with the information that the register action has to be performed (again) when the environement is imported *) {senv with env = Environ.register senv.env field value; local_retroknowledge = Retroknowledge.RKRegister (field,value)::senv.local_retroknowledge } (* spiwack : currently unused *) let unregister senv field = (*spiwack: todo: do things properly or delete *) {senv with env = Environ.unregister senv.env field} (* /spiwack *) (* Insertion of section variables. They are now typed before being added to the environment. *) (* Same as push_named, but check that the variable is not already there. Should *not* be done in Environ because tactics add temporary hypothesis many many times, and the check performed here would cost too much. *) let safe_push_named (id,_,_ as d) env = let _ = try let _ = lookup_named id env in error ("Identifier "^string_of_id id^" already defined.") with Not_found -> () in Environ.push_named d env let push_named_def (id,b,topt) senv = let (c,typ,cst) = translate_local_def senv.env (b,topt) in let senv' = add_constraints cst senv in let env'' = safe_push_named (id,Some c,typ) senv'.env in (cst, {senv' with env=env''}) let push_named_assum (id,t) senv = let (t,cst) = translate_local_assum senv.env t in let senv' = add_constraints cst senv in let env'' = safe_push_named (id,None,t) senv'.env in (cst, {senv' with env=env''}) (* Insertion of constants and parameters in environment. *) type global_declaration = | ConstantEntry of constant_entry | GlobalRecipe of Cooking.recipe let add_constant dir l decl senv = let kn = make_con senv.modinfo.modpath dir l in let cb = match decl with | ConstantEntry ce -> translate_constant senv.env kn ce | GlobalRecipe r -> let cb = translate_recipe senv.env kn r in if dir = empty_dirpath then hcons_const_body cb else cb in let senv' = add_field (l,SFBconst cb) (C kn) senv in let senv'' = match cb.const_body with | Undef (Some lev) -> update_resolver (add_inline_delta_resolver (user_con kn) (lev,None)) senv' | _ -> senv' in kn, senv'' (* Insertion of inductive types. *) let add_mind dir l mie senv = if mie.mind_entry_inds = [] then anomaly "empty inductive types declaration"; (* this test is repeated by translate_mind *) let id = (List.nth mie.mind_entry_inds 0).mind_entry_typename in if l <> label_of_id id then anomaly ("the label of inductive packet and its first inductive"^ " type do not match"); let kn = make_mind senv.modinfo.modpath dir l in let mib = translate_mind senv.env kn mie in let mib = if mib.mind_hyps <> [] then mib else hcons_mind mib in let senv' = add_field (l,SFBmind mib) (I kn) senv in kn, senv' (* Insertion of module types *) let add_modtype l mte inl senv = let mp = MPdot(senv.modinfo.modpath, l) in let mtb = translate_module_type senv.env mp inl mte in let senv' = add_field (l,SFBmodtype mtb) (MT mp) senv in mp, senv' (* full_add_module adds module with universes and constraints *) let full_add_module mb senv = let senv = add_constraints mb.mod_constraints senv in { senv with env = Modops.add_module mb senv.env } (* Insertion of modules *) let add_module l me inl senv = let mp = MPdot(senv.modinfo.modpath, l) in let mb = translate_module senv.env mp inl me in let senv' = add_field (l,SFBmodule mb) M senv in let senv'' = match mb.mod_type with | SEBstruct _ -> update_resolver (add_delta_resolver mb.mod_delta) senv' | _ -> senv' in mp,mb.mod_delta,senv'' (* Interactive modules *) let start_module l senv = check_modlabel l senv; let mp = MPdot(senv.modinfo.modpath, l) in let modinfo = { modpath = mp; label = l; variant = STRUCT []; resolver = empty_delta_resolver; resolver_of_param = empty_delta_resolver} in mp, { old = senv; env = senv.env; modinfo = modinfo; modlabels = Labset.empty; objlabels = Labset.empty; revstruct = []; univ = Univ.empty_constraint; engagement = None; imports = senv.imports; loads = []; (* spiwack : not sure, but I hope it's correct *) local_retroknowledge = [] } let end_module l restype senv = let oldsenv = senv.old in let modinfo = senv.modinfo in let mp = senv.modinfo.modpath in let restype = Option.map (fun (res,inl) -> translate_module_type senv.env mp inl res) restype in let params,is_functor = match modinfo.variant with | NONE | LIBRARY _ | SIG _ -> error_no_module_to_end () | STRUCT params -> params, (List.length params > 0) in if l <> modinfo.label then error_incompatible_labels l modinfo.label; if not (empty_context senv.env) then error_non_empty_local_context None; let functorize_struct tb = List.fold_left (fun mtb (arg_id,arg_b) -> SEBfunctor(arg_id,arg_b,mtb)) tb params in let auto_tb = SEBstruct (List.rev senv.revstruct) in let mexpr,mod_typ,mod_typ_alg,resolver,cst = match restype with | None -> let mexpr = functorize_struct auto_tb in mexpr,mexpr,None,modinfo.resolver,empty_constraint | Some mtb -> let auto_mtb = { typ_mp = senv.modinfo.modpath; typ_expr = auto_tb; typ_expr_alg = None; typ_constraints = empty_constraint; typ_delta = empty_delta_resolver} in let cst = check_subtypes senv.env auto_mtb mtb in let mod_typ = functorize_struct mtb.typ_expr in let mexpr = functorize_struct auto_tb in let typ_alg = Option.map functorize_struct mtb.typ_expr_alg in mexpr,mod_typ,typ_alg,mtb.typ_delta,cst in let cst = union_constraints cst senv.univ in let mb = { mod_mp = mp; mod_expr = Some mexpr; mod_type = mod_typ; mod_type_alg = mod_typ_alg; mod_constraints = cst; mod_delta = resolver; mod_retroknowledge = senv.local_retroknowledge } in let newenv = oldsenv.env in let newenv = set_engagement_opt senv.engagement newenv in let senv'= {senv with env=newenv} in let senv' = List.fold_left (fun env (_,mb) -> full_add_module mb env) senv' (List.rev senv'.loads) in let newenv = Environ.add_constraints cst senv'.env in let newenv = Modops.add_module mb newenv in let modinfo = match mb.mod_type with SEBstruct _ -> { oldsenv.modinfo with resolver = add_delta_resolver resolver oldsenv.modinfo.resolver} | _ -> oldsenv.modinfo in mp,resolver,{ old = oldsenv.old; env = newenv; modinfo = modinfo; modlabels = Labset.add l oldsenv.modlabels; objlabels = oldsenv.objlabels; revstruct = (l,SFBmodule mb)::oldsenv.revstruct; univ = Univ.union_constraints senv'.univ oldsenv.univ; (* engagement is propagated to the upper level *) engagement = senv'.engagement; imports = senv'.imports; loads = senv'.loads@oldsenv.loads; local_retroknowledge = senv'.local_retroknowledge@oldsenv.local_retroknowledge } (* Include for module and module type*) let add_include me is_module inl senv = let sign,cst,resolver = if is_module then let sign,_,resolver,cst = translate_struct_include_module_entry senv.env senv.modinfo.modpath inl me in sign,cst,resolver else let mtb = translate_module_type senv.env senv.modinfo.modpath inl me in mtb.typ_expr,mtb.typ_constraints,mtb.typ_delta in let senv = add_constraints cst senv in let mp_sup = senv.modinfo.modpath in (* Include Self support *) let rec compute_sign sign mb resolver senv = match sign with | SEBfunctor(mbid,mtb,str) -> let cst_sub = check_subtypes senv.env mb mtb in let senv = add_constraints cst_sub senv in let mpsup_delta = inline_delta_resolver senv.env inl mp_sup mbid mtb mb.typ_delta in let subst = map_mbid mbid mp_sup mpsup_delta in let resolver = subst_codom_delta_resolver subst resolver in (compute_sign (subst_struct_expr subst str) mb resolver senv) | str -> resolver,str,senv in let resolver,sign,senv = compute_sign sign {typ_mp = mp_sup; typ_expr = SEBstruct (List.rev senv.revstruct); typ_expr_alg = None; typ_constraints = empty_constraint; typ_delta = senv.modinfo.resolver} resolver senv in let str = match sign with | SEBstruct(str_l) -> str_l | _ -> error ("You cannot Include a higher-order structure.") in let senv = update_resolver (add_delta_resolver resolver) senv in let add senv ((l,elem) as field) = let new_name = match elem with | SFBconst _ -> let kn = make_kn mp_sup empty_dirpath l in C (constant_of_delta_kn resolver kn) | SFBmind _ -> let kn = make_kn mp_sup empty_dirpath l in I (mind_of_delta_kn resolver kn) | SFBmodule _ -> M | SFBmodtype _ -> MT (MPdot(senv.modinfo.modpath, l)) in add_field field new_name senv in resolver,(List.fold_left add senv str) (* Adding parameters to modules or module types *) let add_module_parameter mbid mte inl senv = if senv.revstruct <> [] or senv.loads <> [] then anomaly "Cannot add a module parameter to a non empty module"; let mtb = translate_module_type senv.env (MPbound mbid) inl mte in let senv = full_add_module (module_body_of_type (MPbound mbid) mtb) senv in let new_variant = match senv.modinfo.variant with | STRUCT params -> STRUCT ((mbid,mtb) :: params) | SIG params -> SIG ((mbid,mtb) :: params) | _ -> anomaly "Module parameters can only be added to modules or signatures" in let resolver_of_param = match mtb.typ_expr with SEBstruct _ -> mtb.typ_delta | _ -> empty_delta_resolver in mtb.typ_delta, { old = senv.old; env = senv.env; modinfo = { senv.modinfo with variant = new_variant; resolver_of_param = add_delta_resolver resolver_of_param senv.modinfo.resolver_of_param}; modlabels = senv.modlabels; objlabels = senv.objlabels; revstruct = []; univ = senv.univ; engagement = senv.engagement; imports = senv.imports; loads = []; local_retroknowledge = senv.local_retroknowledge } (* Interactive module types *) let start_modtype l senv = check_modlabel l senv; let mp = MPdot(senv.modinfo.modpath, l) in let modinfo = { modpath = mp; label = l; variant = SIG []; resolver = empty_delta_resolver; resolver_of_param = empty_delta_resolver} in mp, { old = senv; env = senv.env; modinfo = modinfo; modlabels = Labset.empty; objlabels = Labset.empty; revstruct = []; univ = Univ.empty_constraint; engagement = None; imports = senv.imports; loads = [] ; (* spiwack: not 100% sure, but I think it should be like that *) local_retroknowledge = []} let end_modtype l senv = let oldsenv = senv.old in let modinfo = senv.modinfo in let params = match modinfo.variant with | LIBRARY _ | NONE | STRUCT _ -> error_no_modtype_to_end () | SIG params -> params in if l <> modinfo.label then error_incompatible_labels l modinfo.label; if not (empty_context senv.env) then error_non_empty_local_context None; let auto_tb = SEBstruct (List.rev senv.revstruct) in let mtb_expr = List.fold_left (fun mtb (arg_id,arg_b) -> SEBfunctor(arg_id,arg_b,mtb)) auto_tb params in let mp = MPdot (oldsenv.modinfo.modpath, l) in let newenv = oldsenv.env in let newenv = Environ.add_constraints senv.univ newenv in let newenv = set_engagement_opt senv.engagement newenv in let senv = {senv with env=newenv} in let senv = List.fold_left (fun env (mp,mb) -> full_add_module mb env) senv (List.rev senv.loads) in let mtb = {typ_mp = mp; typ_expr = mtb_expr; typ_expr_alg = None; typ_constraints = senv.univ; typ_delta = senv.modinfo.resolver} in let newenv = Environ.add_modtype mp mtb senv.env in mp, { old = oldsenv.old; env = newenv; modinfo = oldsenv.modinfo; modlabels = Labset.add l oldsenv.modlabels; objlabels = oldsenv.objlabels; revstruct = (l,SFBmodtype mtb)::oldsenv.revstruct; univ = Univ.union_constraints senv.univ oldsenv.univ; engagement = senv.engagement; imports = senv.imports; loads = senv.loads@oldsenv.loads; (* spiwack : if there is a bug with retroknowledge in nested modules it's likely to come from here *) local_retroknowledge = senv.local_retroknowledge@oldsenv.local_retroknowledge} let current_modpath senv = senv.modinfo.modpath let delta_of_senv senv = senv.modinfo.resolver,senv.modinfo.resolver_of_param (* Check that the engagement expected by a library matches the initial one *) let check_engagement env c = match Environ.engagement env, c with | Some ImpredicativeSet, Some ImpredicativeSet -> () | _, None -> () | _, Some ImpredicativeSet -> error "Needs option -impredicative-set." let set_engagement c senv = {senv with env = Environ.set_engagement c senv.env; engagement = Some c } (* Libraries = Compiled modules *) type compiled_library = dir_path * module_body * library_info list * engagement option (* We check that only initial state Require's were performed before [start_library] was called *) let is_empty senv = senv.revstruct = [] && senv.modinfo.modpath = initial_path && senv.modinfo.variant = NONE let start_library dir senv = if not (is_empty senv) then anomaly "Safe_typing.start_library: environment should be empty"; let dir_path,l = match (repr_dirpath dir) with [] -> anomaly "Empty dirpath in Safe_typing.start_library" | hd::tl -> make_dirpath tl, label_of_id hd in let mp = MPfile dir in let modinfo = {modpath = mp; label = l; variant = LIBRARY dir; resolver = empty_delta_resolver; resolver_of_param = empty_delta_resolver} in mp, { old = senv; env = senv.env; modinfo = modinfo; modlabels = Labset.empty; objlabels = Labset.empty; revstruct = []; univ = Univ.empty_constraint; engagement = None; imports = senv.imports; loads = []; local_retroknowledge = [] } let pack_module senv = {mod_mp=senv.modinfo.modpath; mod_expr=None; mod_type= SEBstruct (List.rev senv.revstruct); mod_type_alg=None; mod_constraints=empty_constraint; mod_delta=senv.modinfo.resolver; mod_retroknowledge=[]; } let export senv dir = let modinfo = senv.modinfo in begin match modinfo.variant with | LIBRARY dp -> if dir <> dp then anomaly "We are not exporting the right library!" | _ -> anomaly "We are not exporting the library" end; (*if senv.modinfo.params <> [] || senv.modinfo.restype <> None then (* error_export_simple *) (); *) let str = SEBstruct (List.rev senv.revstruct) in let mp = senv.modinfo.modpath in let mb = { mod_mp = mp; mod_expr = Some str; mod_type = str; mod_type_alg = None; mod_constraints = senv.univ; mod_delta = senv.modinfo.resolver; mod_retroknowledge = senv.local_retroknowledge} in mp, (dir,mb,senv.imports,engagement senv.env) let check_imports senv needed = let imports = senv.imports in let check (id,stamp) = try let actual_stamp = List.assoc id imports in if stamp <> actual_stamp then error ("Inconsistent assumptions over module "^(string_of_dirpath id)^".") with Not_found -> error ("Reference to unknown module "^(string_of_dirpath id)^".") in List.iter check needed (* we have an inefficiency: Since loaded files are added to the environment every time a module is closed, their components are calculated many times. Thic could be avoided in several ways: 1 - for each file create a dummy environment containing only this file's components, merge this environment with the global environment, and store for the future (instead of just its type) 2 - create "persistent modules" environment table in Environ add put loaded by side-effect once and for all (like it is done in OCaml). Would this be correct with respect to undo's and stuff ? *) let import (dp,mb,depends,engmt) digest senv = check_imports senv depends; check_engagement senv.env engmt; let mp = MPfile dp in let env = senv.env in let env = Environ.add_constraints mb.mod_constraints env in let env = Modops.add_module mb env in mp, { senv with env = env; modinfo = {senv.modinfo with resolver = add_delta_resolver mb.mod_delta senv.modinfo.resolver}; imports = (dp,digest)::senv.imports; loads = (mp,mb)::senv.loads } (* Store the body of modules' opaque constants inside a table. This module is used during the serialization and deserialization of vo files. By adding an indirection to the opaque constant definitions, we gain the ability not to load them. As these constant definitions are usually big terms, we save a deserialization time as well as some memory space. *) module LightenLibrary : sig type table type lightened_compiled_library val save : compiled_library -> lightened_compiled_library * table val load : load_proof:Flags.load_proofs -> table Lazy.t -> lightened_compiled_library -> compiled_library end = struct (* The table is implemented as an array of [constr_substituted]. Keys are hence integers. To avoid changing the [compiled_library] type, we brutally encode integers into [lazy_constr]. This isn't pretty, but shouldn't be dangerous since the produced structure [lightened_compiled_library] is abstract and only meant for writing to .vo via Marshal (which doesn't care about types). *) type table = constr_substituted array let key_as_lazy_constr (i:int) = (Obj.magic i : lazy_constr) let key_of_lazy_constr (c:lazy_constr) = (Obj.magic c : int) (* To avoid any future misuse of the lightened library that could interpret encoded keys as real [constr_substituted], we hide these kind of values behind an abstract datatype. *) type lightened_compiled_library = compiled_library (* Map a [compiled_library] to another one by just updating the opaque term [t] to [on_opaque_const_body t]. *) let traverse_library on_opaque_const_body = let rec traverse_module mb = match mb.mod_expr with None -> { mb with mod_expr = None; mod_type = traverse_modexpr mb.mod_type; } | Some impl when impl == mb.mod_type-> let mtb = traverse_modexpr mb.mod_type in { mb with mod_expr = Some mtb; mod_type = mtb; } | Some impl -> { mb with mod_expr = Option.map traverse_modexpr mb.mod_expr; mod_type = traverse_modexpr mb.mod_type; } and traverse_struct struc = let traverse_body (l,body) = (l,match body with | SFBconst cb when is_opaque cb -> SFBconst {cb with const_body = on_opaque_const_body cb.const_body} | (SFBconst _ | SFBmind _ ) as x -> x | SFBmodule m -> SFBmodule (traverse_module m) | SFBmodtype m -> SFBmodtype ({m with typ_expr = traverse_modexpr m.typ_expr})) in List.map traverse_body struc and traverse_modexpr = function | SEBfunctor (mbid,mty,mexpr) -> SEBfunctor (mbid, ({mty with typ_expr = traverse_modexpr mty.typ_expr}), traverse_modexpr mexpr) | SEBident mp as x -> x | SEBstruct (struc) -> SEBstruct (traverse_struct struc) | SEBapply (mexpr,marg,u) -> SEBapply (traverse_modexpr mexpr,traverse_modexpr marg,u) | SEBwith (seb,wdcl) -> SEBwith (traverse_modexpr seb,wdcl) in fun (dp,mb,depends,s) -> (dp,traverse_module mb,depends,s) (* To disburden a library from opaque definitions, we simply traverse it and add an indirection between the module body and its reference to a [const_body]. *) let save library = let ((insert : constant_def -> constant_def), (get_table : unit -> table)) = (* We use an integer as a key inside the table. *) let counter = ref (-1) in (* During the traversal, the table is implemented by a list to get constant time insertion. *) let opaque_definitions = ref [] in ((* Insert inside the table. *) (fun def -> let opaque_definition = match def with | OpaqueDef lc -> force_lazy_constr lc | _ -> assert false in incr counter; opaque_definitions := opaque_definition :: !opaque_definitions; OpaqueDef (key_as_lazy_constr !counter)), (* Get the final table representation. *) (fun () -> Array.of_list (List.rev !opaque_definitions))) in let lightened_library = traverse_library insert library in (lightened_library, get_table ()) (* Loading is also a traversing that decodes the embedded keys that are inside the [lightened_library]. If the [load_proof] flag is set, we lookup inside the table to graft the [constr_substituted]. Otherwise, we set the [const_body] field to [None]. *) let load ~load_proof (table : table Lazy.t) lightened_library = let decode_key = function | Undef _ | Def _ -> assert false | OpaqueDef k -> let k = key_of_lazy_constr k in let access key = try (Lazy.force table).(key) with e when Errors.noncritical e -> error "Error while retrieving an opaque body" in match load_proof with | Flags.Force -> let lc = Lazy.lazy_from_val (access k) in OpaqueDef (make_lazy_constr lc) | Flags.Lazy -> let lc = lazy (access k) in OpaqueDef (make_lazy_constr lc) | Flags.Dont -> Undef None in traverse_library decode_key lightened_library end type judgment = unsafe_judgment let j_val j = j.uj_val let j_type j = j.uj_type let safe_infer senv = infer (env_of_senv senv) let typing senv = Typeops.typing (env_of_senv senv) coq-8.4pl2/kernel/term.mli0000640000175000001440000005360512010532755014572 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* sorts_family (** {6 Useful types } *) (** {6 Existential variables } *) type existential_key = int (** {6 Existential variables } *) type metavariable = int (** {6 Case annotation } *) type case_style = LetStyle | IfStyle | LetPatternStyle | MatchStyle | RegularStyle (** infer printing form from number of constructor *) type case_printing = { ind_nargs : int; (** length of the arity of the inductive type *) style : case_style } (** the integer is the number of real args, needed for reduction *) type case_info = { ci_ind : inductive; ci_npar : int; ci_cstr_ndecls : int array; (** number of real args of each constructor *) ci_pp_info : case_printing (** not interpreted by the kernel *) } (** {6 The type of constructions } *) type constr (** [eq_constr a b] is true if [a] equals [b] modulo alpha, casts, and application grouping *) val eq_constr : constr -> constr -> bool (** [types] is the same as [constr] but is intended to be used for documentation to indicate that such or such function specifically works with {e types} (i.e. terms of type a sort). (Rem:plurial form since [type] is a reserved ML keyword) *) type types = constr (** {5 Functions for dealing with constr terms. } The following functions are intended to simplify and to uniform the manipulation of terms. Some of these functions may be overlapped with previous ones. *) (** {6 Term constructors. } *) (** Constructs a DeBrujin index (DB indices begin at 1) *) val mkRel : int -> constr (** Constructs a Variable *) val mkVar : identifier -> constr (** Constructs an patvar named "?n" *) val mkMeta : metavariable -> constr (** Constructs an existential variable *) type existential = existential_key * constr array val mkEvar : existential -> constr (** Construct a sort *) val mkSort : sorts -> types val mkProp : types val mkSet : types val mkType : Univ.universe -> types (** This defines the strategy to use for verifiying a Cast *) type cast_kind = VMcast | DEFAULTcast | REVERTcast (** Constructs the term [t1::t2], i.e. the term t{_ 1} casted with the type t{_ 2} (that means t2 is declared as the type of t1). *) val mkCast : constr * cast_kind * constr -> constr (** Constructs the product [(x:t1)t2] *) val mkProd : name * types * types -> types val mkNamedProd : identifier -> types -> types -> types (** non-dependent product [t1 -> t2], an alias for [forall (_:t1), t2]. Beware [t_2] is NOT lifted. Eg: in context [A:Prop], [A->A] is built by [(mkArrow (mkRel 0) (mkRel 1))] *) val mkArrow : types -> types -> constr (** Constructs the abstraction \[x:t{_ 1}\]t{_ 2} *) val mkLambda : name * types * constr -> constr val mkNamedLambda : identifier -> types -> constr -> constr (** Constructs the product [let x = t1 : t2 in t3] *) val mkLetIn : name * constr * types * constr -> constr val mkNamedLetIn : identifier -> constr -> types -> constr -> constr (** [mkApp (f,[| t_1; ...; t_n |]] constructs the application {% $(f~t_1~\dots~t_n)$ %}. *) val mkApp : constr * constr array -> constr (** Constructs a constant The array of terms correspond to the variables introduced in the section *) val mkConst : constant -> constr (** Inductive types *) (** Constructs the ith (co)inductive type of the block named kn The array of terms correspond to the variables introduced in the section *) val mkInd : inductive -> constr (** Constructs the jth constructor of the ith (co)inductive type of the block named kn. The array of terms correspond to the variables introduced in the section *) val mkConstruct : constructor -> constr (** Constructs a destructor of inductive type. [mkCase ci p c ac] stand for match [c] as [x] in [I args] return [p] with [ac] presented as describe in [ci]. [p] stucture is [fun args x -> "return clause"] [ac]{^ ith} element is ith constructor case presented as {e lambda construct_args (without params). case_term } *) val mkCase : case_info * constr * constr * constr array -> constr (** If [recindxs = [|i1,...in|]] [funnames = [|f1,.....fn|]] [typarray = [|t1,...tn|]] [bodies = [|b1,.....bn|]] then [mkFix ((recindxs,i), funnames, typarray, bodies) ] constructs the {% $ %}i{% $ %}th function of the block (counting from 0) [Fixpoint f1 [ctx1] = b1 with f2 [ctx2] = b2 ... with fn [ctxn] = bn.] where the length of the {% $ %}j{% $ %}th context is {% $ %}ij{% $ %}. *) type rec_declaration = name array * types array * constr array type fixpoint = (int array * int) * rec_declaration val mkFix : fixpoint -> constr (** If [funnames = [|f1,.....fn|]] [typarray = [|t1,...tn|]] [bodies = [b1,.....bn]] then [mkCoFix (i, (funnames, typarray, bodies))] constructs the ith function of the block [CoFixpoint f1 = b1 with f2 = b2 ... with fn = bn.] *) type cofixpoint = int * rec_declaration val mkCoFix : cofixpoint -> constr (** {6 Concrete type for making pattern-matching. } *) (** [constr array] is an instance matching definitional [named_context] in the same order (i.e. last argument first) *) type 'constr pexistential = existential_key * 'constr array type ('constr, 'types) prec_declaration = name array * 'types array * 'constr array type ('constr, 'types) pfixpoint = (int array * int) * ('constr, 'types) prec_declaration type ('constr, 'types) pcofixpoint = int * ('constr, 'types) prec_declaration type ('constr, 'types) kind_of_term = | Rel of int | Var of identifier | Meta of metavariable | Evar of 'constr pexistential | Sort of sorts | Cast of 'constr * cast_kind * 'types | Prod of name * 'types * 'types | Lambda of name * 'types * 'constr | LetIn of name * 'constr * 'types * 'constr | App of 'constr * 'constr array | Const of constant | Ind of inductive | Construct of constructor | Case of case_info * 'constr * 'constr * 'constr array | Fix of ('constr, 'types) pfixpoint | CoFix of ('constr, 'types) pcofixpoint (** User view of [constr]. For [App], it is ensured there is at least one argument and the function is not itself an applicative term *) val kind_of_term : constr -> (constr, types) kind_of_term (** Experimental, used in Presburger contrib *) type ('constr, 'types) kind_of_type = | SortType of sorts | CastType of 'types * 'types | ProdType of name * 'types * 'types | LetInType of name * 'constr * 'types * 'types | AtomicType of 'constr * 'constr array val kind_of_type : types -> (constr, types) kind_of_type (** {6 Simple term case analysis. } *) val isRel : constr -> bool val isRelN : int -> constr -> bool val isVar : constr -> bool val isVarId : identifier -> constr -> bool val isInd : constr -> bool val isEvar : constr -> bool val isMeta : constr -> bool val isMetaOf : metavariable -> constr -> bool val isEvar_or_Meta : constr -> bool val isSort : constr -> bool val isCast : constr -> bool val isApp : constr -> bool val isLambda : constr -> bool val isLetIn : constr -> bool val isProd : constr -> bool val isConst : constr -> bool val isConstruct : constr -> bool val isFix : constr -> bool val isCoFix : constr -> bool val isCase : constr -> bool val is_Prop : constr -> bool val is_Set : constr -> bool val isprop : constr -> bool val is_Type : constr -> bool val iskind : constr -> bool val is_small : sorts -> bool (** {6 Term destructors } *) (** Destructor operations are partial functions and @raise Invalid_argument "dest*" if the term has not the expected form. *) (** Destructs a DeBrujin index *) val destRel : constr -> int (** Destructs an existential variable *) val destMeta : constr -> metavariable (** Destructs a variable *) val destVar : constr -> identifier (** Destructs a sort. [is_Prop] recognizes the sort {% \textsf{%}Prop{% }%}, whether [isprop] recognizes both {% \textsf{%}Prop{% }%} and {% \textsf{%}Set{% }%}. *) val destSort : constr -> sorts (** Destructs a casted term *) val destCast : constr -> constr * cast_kind * constr (** Destructs the product {% $ %}(x:t_1)t_2{% $ %} *) val destProd : types -> name * types * types (** Destructs the abstraction {% $ %}[x:t_1]t_2{% $ %} *) val destLambda : constr -> name * types * constr (** Destructs the let {% $ %}[x:=b:t_1]t_2{% $ %} *) val destLetIn : constr -> name * constr * types * constr (** Destructs an application *) val destApp : constr -> constr * constr array (** Obsolete synonym of destApp *) val destApplication : constr -> constr * constr array (** Decompose any term as an applicative term; the list of args can be empty *) val decompose_app : constr -> constr * constr list (** Destructs a constant *) val destConst : constr -> constant (** Destructs an existential variable *) val destEvar : constr -> existential (** Destructs a (co)inductive type *) val destInd : constr -> inductive (** Destructs a constructor *) val destConstruct : constr -> constructor (** Destructs a [match c as x in I args return P with ... | Ci(...yij...) => ti | ... end] (or [let (..y1i..) := c as x in I args return P in t1], or [if c then t1 else t2]) @return [(info,c,fun args x => P,[|...|fun yij => ti| ...|])] where [info] is pretty-printing information *) val destCase : constr -> case_info * constr * constr * constr array (** Destructs the {% $ %}i{% $ %}th function of the block [Fixpoint f{_ 1} ctx{_ 1} = b{_ 1} with f{_ 2} ctx{_ 2} = b{_ 2} ... with f{_ n} ctx{_ n} = b{_ n}], where the length of the {% $ %}j{% $ %}th context is {% $ %}ij{% $ %}. *) val destFix : constr -> fixpoint val destCoFix : constr -> cofixpoint (** {6 Local } *) (** A {e declaration} has the form [(name,body,type)]. It is either an {e assumption} if [body=None] or a {e definition} if [body=Some actualbody]. It is referred by {e name} if [na] is an identifier or by {e relative index} if [na] is not an identifier (in the latter case, [na] is of type [name] but just for printing purpose) *) type named_declaration = identifier * constr option * types type rel_declaration = name * constr option * types val map_named_declaration : (constr -> constr) -> named_declaration -> named_declaration val map_rel_declaration : (constr -> constr) -> rel_declaration -> rel_declaration val fold_named_declaration : (constr -> 'a -> 'a) -> named_declaration -> 'a -> 'a val fold_rel_declaration : (constr -> 'a -> 'a) -> rel_declaration -> 'a -> 'a val exists_named_declaration : (constr -> bool) -> named_declaration -> bool val exists_rel_declaration : (constr -> bool) -> rel_declaration -> bool val for_all_named_declaration : (constr -> bool) -> named_declaration -> bool val for_all_rel_declaration : (constr -> bool) -> rel_declaration -> bool val eq_named_declaration : named_declaration -> named_declaration -> bool val eq_rel_declaration : rel_declaration -> rel_declaration -> bool (** {6 Contexts of declarations referred to by de Bruijn indices } *) (** In [rel_context], more recent declaration is on top *) type rel_context = rel_declaration list val empty_rel_context : rel_context val add_rel_decl : rel_declaration -> rel_context -> rel_context val lookup_rel : int -> rel_context -> rel_declaration val rel_context_length : rel_context -> int val rel_context_nhyps : rel_context -> int (** Constructs either [(x:t)c] or [[x=b:t]c] *) val mkProd_or_LetIn : rel_declaration -> types -> types val mkProd_wo_LetIn : rel_declaration -> types -> types val mkNamedProd_or_LetIn : named_declaration -> types -> types val mkNamedProd_wo_LetIn : named_declaration -> types -> types (** Constructs either [[x:t]c] or [[x=b:t]c] *) val mkLambda_or_LetIn : rel_declaration -> constr -> constr val mkNamedLambda_or_LetIn : named_declaration -> constr -> constr (** {6 Other term constructors. } *) (** [applist (f,args)] and its variants work as [mkApp] *) val applist : constr * constr list -> constr val applistc : constr -> constr list -> constr val appvect : constr * constr array -> constr val appvectc : constr -> constr array -> constr (** [prodn n l b] = [forall (x_1:T_1)...(x_n:T_n), b] where [l] is [(x_n,T_n)...(x_1,T_1)...]. *) val prodn : int -> (name * constr) list -> constr -> constr (** [compose_prod l b] @return [forall (x_1:T_1)...(x_n:T_n), b] where [l] is [(x_n,T_n)...(x_1,T_1)]. Inverse of [decompose_prod]. *) val compose_prod : (name * constr) list -> constr -> constr (** [lamn n l b] @return [fun (x_1:T_1)...(x_n:T_n) => b] where [l] is [(x_n,T_n)...(x_1,T_1)...]. *) val lamn : int -> (name * constr) list -> constr -> constr (** [compose_lam l b] @return [fun (x_1:T_1)...(x_n:T_n) => b] where [l] is [(x_n,T_n)...(x_1,T_1)]. Inverse of [it_destLam] *) val compose_lam : (name * constr) list -> constr -> constr (** [to_lambda n l] @return [fun (x_1:T_1)...(x_n:T_n) => T] where [l] is [forall (x_1:T_1)...(x_n:T_n), T] *) val to_lambda : int -> constr -> constr (** [to_prod n l] @return [forall (x_1:T_1)...(x_n:T_n), T] where [l] is [fun (x_1:T_1)...(x_n:T_n) => T] *) val to_prod : int -> constr -> constr (** pseudo-reduction rule *) (** [prod_appvect] [forall (x1:B1;...;xn:Bn), B] [a1...an] @return [B[a1...an]] *) val prod_appvect : constr -> constr array -> constr val prod_applist : constr -> constr list -> constr val it_mkLambda_or_LetIn : constr -> rel_context -> constr val it_mkProd_or_LetIn : types -> rel_context -> types (** {6 Other term destructors. } *) (** Transforms a product term {% $ %}(x_1:T_1)..(x_n:T_n)T{% $ %} into the pair {% $ %}([(x_n,T_n);...;(x_1,T_1)],T){% $ %}, where {% $ %}T{% $ %} is not a product. *) val decompose_prod : constr -> (name*constr) list * constr (** Transforms a lambda term {% $ %}[x_1:T_1]..[x_n:T_n]T{% $ %} into the pair {% $ %}([(x_n,T_n);...;(x_1,T_1)],T){% $ %}, where {% $ %}T{% $ %} is not a lambda. *) val decompose_lam : constr -> (name*constr) list * constr (** Given a positive integer n, transforms a product term {% $ %}(x_1:T_1)..(x_n:T_n)T{% $ %} into the pair {% $ %}([(xn,Tn);...;(x1,T1)],T){% $ %}. *) val decompose_prod_n : int -> constr -> (name * constr) list * constr (** Given a positive integer {% $ %}n{% $ %}, transforms a lambda term {% $ %}[x_1:T_1]..[x_n:T_n]T{% $ %} into the pair {% $ %}([(x_n,T_n);...;(x_1,T_1)],T){% $ %} *) val decompose_lam_n : int -> constr -> (name * constr) list * constr (** Extract the premisses and the conclusion of a term of the form "(xi:Ti) ... (xj:=cj:Tj) ..., T" where T is not a product nor a let *) val decompose_prod_assum : types -> rel_context * types (** Idem with lambda's *) val decompose_lam_assum : constr -> rel_context * constr (** Idem but extract the first [n] premisses *) val decompose_prod_n_assum : int -> types -> rel_context * types val decompose_lam_n_assum : int -> constr -> rel_context * constr (** [nb_lam] {% $ %}[x_1:T_1]...[x_n:T_n]c{% $ %} where {% $ %}c{% $ %} is not an abstraction gives {% $ %}n{% $ %} (casts are ignored) *) val nb_lam : constr -> int (** Similar to [nb_lam], but gives the number of products instead *) val nb_prod : constr -> int (** Returns the premisses/parameters of a type/term (let-in included) *) val prod_assum : types -> rel_context val lam_assum : constr -> rel_context (** Returns the first n-th premisses/parameters of a type/term (let included)*) val prod_n_assum : int -> types -> rel_context val lam_n_assum : int -> constr -> rel_context (** Remove the premisses/parameters of a type/term *) val strip_prod : types -> types val strip_lam : constr -> constr (** Remove the first n-th premisses/parameters of a type/term *) val strip_prod_n : int -> types -> types val strip_lam_n : int -> constr -> constr (** Remove the premisses/parameters of a type/term (including let-in) *) val strip_prod_assum : types -> types val strip_lam_assum : constr -> constr (** flattens application lists *) val collapse_appl : constr -> constr (** Removes recursively the casts around a term i.e. [strip_outer_cast (Cast (Cast ... (Cast c, t) ... ))] is [c]. *) val strip_outer_cast : constr -> constr (** Apply a function letting Casted types in place *) val under_casts : (constr -> constr) -> constr -> constr (** Apply a function under components of Cast if any *) val under_outer_cast : (constr -> constr) -> constr -> constr (** {6 ... } *) (** An "arity" is a term of the form [[x1:T1]...[xn:Tn]s] with [s] a sort. Such a term can canonically be seen as the pair of a context of types and of a sort *) type arity = rel_context * sorts (** Build an "arity" from its canonical form *) val mkArity : arity -> types (** Destructs an "arity" into its canonical form *) val destArity : types -> arity (** Tells if a term has the form of an arity *) val isArity : types -> bool (** {6 Occur checks } *) (** [closedn n M] is true iff [M] is a (deBruijn) closed term under n binders *) val closedn : int -> constr -> bool (** [closed0 M] is true iff [M] is a (deBruijn) closed term *) val closed0 : constr -> bool (** [noccurn n M] returns true iff [Rel n] does NOT occur in term [M] *) val noccurn : int -> constr -> bool (** [noccur_between n m M] returns true iff [Rel p] does NOT occur in term [M] for n <= p < n+m *) val noccur_between : int -> int -> constr -> bool (** Checking function for terms containing existential- or meta-variables. The function [noccur_with_meta] does not consider meta-variables applied to some terms (intended to be its local context) (for existential variables, it is necessarily the case) *) val noccur_with_meta : int -> int -> constr -> bool (** {6 Relocation and substitution } *) (** [exliftn el c] lifts [c] with lifting [el] *) val exliftn : Esubst.lift -> constr -> constr (** [liftn n k c] lifts by [n] indexes above or equal to [k] in [c] *) val liftn : int -> int -> constr -> constr (** [lift n c] lifts by [n] the positive indexes in [c] *) val lift : int -> constr -> constr (** [substnl [a1;...;an] k c] substitutes in parallel [a1],...,[an] for respectively [Rel(k+1)],...,[Rel(k+n)] in [c]; it relocates accordingly indexes in [a1],...,[an] *) val substnl : constr list -> int -> constr -> constr val substl : constr list -> constr -> constr val subst1 : constr -> constr -> constr val substnl_decl : constr list -> int -> rel_declaration -> rel_declaration val substl_decl : constr list -> rel_declaration -> rel_declaration val subst1_decl : constr -> rel_declaration -> rel_declaration val subst1_named_decl : constr -> named_declaration -> named_declaration val substl_named_decl : constr list -> named_declaration -> named_declaration val replace_vars : (identifier * constr) list -> constr -> constr val subst_var : identifier -> constr -> constr (** [subst_vars [id1;...;idn] t] substitute [VAR idj] by [Rel j] in [t] if two names are identical, the one of least indice is kept *) val subst_vars : identifier list -> constr -> constr (** [substn_vars n [id1;...;idn] t] substitute [VAR idj] by [Rel j+n-1] in [t] if two names are identical, the one of least indice is kept *) val substn_vars : int -> identifier list -> constr -> constr (** {6 Functionals working on the immediate subterm of a construction } *) (** [fold_constr f acc c] folds [f] on the immediate subterms of [c] starting from [acc] and proceeding from left to right according to the usual representation of the constructions; it is not recursive *) val fold_constr : ('a -> constr -> 'a) -> 'a -> constr -> 'a (** [map_constr f c] maps [f] on the immediate subterms of [c]; it is not recursive and the order with which subterms are processed is not specified *) val map_constr : (constr -> constr) -> constr -> constr (** [map_constr_with_binders g f n c] maps [f n] on the immediate subterms of [c]; it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive and the order with which subterms are processed is not specified *) val map_constr_with_binders : ('a -> 'a) -> ('a -> constr -> constr) -> 'a -> constr -> constr (** [iter_constr f c] iters [f] on the immediate subterms of [c]; it is not recursive and the order with which subterms are processed is not specified *) val iter_constr : (constr -> unit) -> constr -> unit (** [iter_constr_with_binders g f n c] iters [f n] on the immediate subterms of [c]; it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive and the order with which subterms are processed is not specified *) val iter_constr_with_binders : ('a -> 'a) -> ('a -> constr -> unit) -> 'a -> constr -> unit (** [compare_constr f c1 c2] compare [c1] and [c2] using [f] to compare the immediate subterms of [c1] of [c2] if needed; Cast's, binders name and Cases annotations are not taken into account *) val compare_constr : (constr -> constr -> bool) -> constr -> constr -> bool val constr_ord : constr -> constr -> int val hash_constr : constr -> int (*********************************************************************) val hcons_sorts : sorts -> sorts val hcons_constr : constr -> constr val hcons_types : types -> types (**************************************) type values coq-8.4pl2/kernel/univ.ml0000640000175000001440000007505412064106422014432 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | Set, _ -> -1 | _, Set -> 1 | Level (dp1, i1), Level (dp2, i2) -> if i1 < i2 then -1 else if i1 > i2 then 1 else compare dp1 dp2 let to_string = function | Set -> "Set" | Level (d,n) -> Names.string_of_dirpath d^"."^string_of_int n end module UniverseLMap = Map.Make (UniverseLevel) module UniverseLSet = Set.Make (UniverseLevel) type universe_level = UniverseLevel.t let compare_levels = UniverseLevel.compare (* An algebraic universe [universe] is either a universe variable [UniverseLevel.t] or a formal universe known to be greater than some universe variables and strictly greater than some (other) universe variables Universes variables denote universes initially present in the term to type-check and non variable algebraic universes denote the universes inferred while type-checking: it is either the successor of a universe present in the initial term to type-check or the maximum of two algebraic universes *) type universe = | Atom of UniverseLevel.t | Max of UniverseLevel.t list * UniverseLevel.t list let make_universe_level (m,n) = UniverseLevel.Level (m,n) let make_universe l = Atom l let make_univ c = Atom (make_universe_level c) let universe_level = function | Atom l -> Some l | Max _ -> None let pr_uni_level u = str (UniverseLevel.to_string u) let pr_uni = function | Atom u -> pr_uni_level u | Max ([],[u]) -> str "(" ++ pr_uni_level u ++ str ")+1" | Max (gel,gtl) -> str "max(" ++ hov 0 (prlist_with_sep pr_comma pr_uni_level gel ++ (if gel <> [] & gtl <> [] then pr_comma () else mt ()) ++ prlist_with_sep pr_comma (fun x -> str "(" ++ pr_uni_level x ++ str ")+1") gtl) ++ str ")" (* Returns the formal universe that lies juste above the universe variable u. Used to type the sort u. *) let super = function | Atom u -> Max ([],[u]) | Max _ -> anomaly ("Cannot take the successor of a non variable universe:\n"^ "(maybe a bugged tactic)") (* Returns the formal universe that is greater than the universes u and v. Used to type the products. *) let sup u v = match u,v with | Atom u, Atom v -> if UniverseLevel.compare u v = 0 then Atom u else Max ([u;v],[]) | u, Max ([],[]) -> u | Max ([],[]), v -> v | Atom u, Max (gel,gtl) -> Max (list_add_set u gel,gtl) | Max (gel,gtl), Atom v -> Max (list_add_set v gel,gtl) | Max (gel,gtl), Max (gel',gtl') -> let gel'' = list_union gel gel' in let gtl'' = list_union gtl gtl' in Max (list_subtract gel'' gtl'',gtl'') (* Comparison on this type is pointer equality *) type canonical_arc = { univ: UniverseLevel.t; lt: UniverseLevel.t list; le: UniverseLevel.t list; rank: int } let terminal u = {univ=u; lt=[]; le=[]; rank=0} (* A UniverseLevel.t is either an alias for another one, or a canonical one, for which we know the universes that are above *) type univ_entry = Canonical of canonical_arc | Equiv of UniverseLevel.t type universes = univ_entry UniverseLMap.t let enter_equiv_arc u v g = UniverseLMap.add u (Equiv v) g let enter_arc ca g = UniverseLMap.add ca.univ (Canonical ca) g (* The lower predicative level of the hierarchy that contains (impredicative) Prop and singleton inductive types *) let type0m_univ = Max ([],[]) let is_type0m_univ = function | Max ([],[]) -> true | _ -> false (* The level of predicative Set *) let type0_univ = Atom UniverseLevel.Set let is_type0_univ = function | Atom UniverseLevel.Set -> true | Max ([UniverseLevel.Set], []) -> warning "Non canonical Set"; true | u -> false let is_univ_variable = function | Atom a when a<>UniverseLevel.Set -> true | _ -> false (* When typing [Prop] and [Set], there is no constraint on the level, hence the definition of [type1_univ], the type of [Prop] *) let type1_univ = Max ([], [UniverseLevel.Set]) let initial_universes = UniverseLMap.empty let is_initial_universes = UniverseLMap.is_empty (* Every UniverseLevel.t has a unique canonical arc representative *) (* repr : universes -> UniverseLevel.t -> canonical_arc *) (* canonical representative : we follow the Equiv links *) let repr g u = let rec repr_rec u = let a = try UniverseLMap.find u g with Not_found -> anomalylabstrm "Univ.repr" (str"Universe " ++ pr_uni_level u ++ str" undefined") in match a with | Equiv v -> repr_rec v | Canonical arc -> arc in repr_rec u let can g = List.map (repr g) (* [safe_repr] also search for the canonical representative, but if the graph doesn't contain the searched universe, we add it. *) let safe_repr g u = let rec safe_repr_rec u = match UniverseLMap.find u g with | Equiv v -> safe_repr_rec v | Canonical arc -> arc in try g, safe_repr_rec u with Not_found -> let can = terminal u in enter_arc can g, can (* reprleq : canonical_arc -> canonical_arc list *) (* All canonical arcv such that arcu<=arcv with arcv#arcu *) let reprleq g arcu = let rec searchrec w = function | [] -> w | v :: vl -> let arcv = repr g v in if List.memq arcv w || arcu==arcv then searchrec w vl else searchrec (arcv :: w) vl in searchrec [] arcu.le (* between : UniverseLevel.t -> canonical_arc -> canonical_arc list *) (* between u v = {w|u<=w<=v, w canonical} *) (* between is the most costly operation *) let between g arcu arcv = (* good are all w | u <= w <= v *) (* bad are all w | u <= w ~<= v *) (* find good and bad nodes in {w | u <= w} *) (* explore b u = (b or "u is good") *) let rec explore ((good, bad, b) as input) arcu = if List.memq arcu good then (good, bad, true) (* b or true *) else if List.memq arcu bad then input (* (good, bad, b or false) *) else let leq = reprleq g arcu in (* is some universe >= u good ? *) let good, bad, b_leq = List.fold_left explore (good, bad, false) leq in if b_leq then arcu::good, bad, true (* b or true *) else good, arcu::bad, b (* b or false *) in let good,_,_ = explore ([arcv],[],false) arcu in good (* We assume compare(u,v) = LE with v canonical (see compare below). In this case List.hd(between g u v) = repr u Otherwise, between g u v = [] *) type order = EQ | LT | LE | NLE (** [compare_neq] : is [arcv] in the transitive upward closure of [arcu] ? In [strict] mode, we fully distinguish between LE and LT, while in non-strict mode, we simply answer LE for both situations. If [arcv] is encountered in a LT part, we could directly answer without visiting unneeded parts of this transitive closure. In [strict] mode, if [arcv] is encountered in a LE part, we could only change the default answer (1st arg [c]) from NLE to LE, since a strict constraint may appear later. During the recursive traversal, [lt_done] and [le_done] are universes we have already visited, they do not contain [arcv]. The 4rd arg is [(lt_todo,le_todo)], two lists of universes not yet considered, known to be above [arcu], strictly or not. We use depth-first search, but the presence of [arcv] in [new_lt] is checked as soon as possible : this seems to be slightly faster on a test. *) let compare_neq strict g arcu arcv = let rec cmp c lt_done le_done = function | [],[] -> c | arc::lt_todo, le_todo -> if List.memq arc lt_done then cmp c lt_done le_done (lt_todo,le_todo) else let lt_new = can g (arc.lt@arc.le) in if List.memq arcv lt_new then if strict then LT else LE else cmp c (arc::lt_done) le_done (lt_new@lt_todo,le_todo) | [], arc::le_todo -> if arc == arcv then (* No need to continue inspecting universes above arc: if arcv is strictly above arc, then we would have a cycle. But we cannot answer LE yet, a stronger constraint may come later from [le_todo]. *) if strict then cmp LE lt_done le_done ([],le_todo) else LE else if (List.memq arc lt_done) || (List.memq arc le_done) then cmp c lt_done le_done ([],le_todo) else let lt_new = can g arc.lt in if List.memq arcv lt_new then if strict then LT else LE else let le_new = can g arc.le in cmp c lt_done (arc::le_done) (lt_new, le_new@le_todo) in cmp NLE [] [] ([],[arcu]) let compare g arcu arcv = if arcu == arcv then EQ else compare_neq true g arcu arcv let is_leq g arcu arcv = arcu == arcv || (compare_neq false g arcu arcv = LE) let is_lt g arcu arcv = (compare g arcu arcv = LT) (* Invariants : compare(u,v) = EQ <=> compare(v,u) = EQ compare(u,v) = LT or LE => compare(v,u) = NLE compare(u,v) = NLE => compare(v,u) = NLE or LE or LT Adding u>=v is consistent iff compare(v,u) # LT and then it is redundant iff compare(u,v) # NLE Adding u>v is consistent iff compare(v,u) = NLE and then it is redundant iff compare(u,v) = LT *) (** * Universe checks [check_eq] and [check_geq], used in coqchk *) let compare_eq g u v = let g, arcu = safe_repr g u in let _, arcv = safe_repr g v in arcu == arcv type check_function = universes -> universe -> universe -> bool let incl_list cmp l1 l2 = List.for_all (fun x1 -> List.exists (fun x2 -> cmp x1 x2) l2) l1 let compare_list cmp l1 l2 = incl_list cmp l1 l2 && incl_list cmp l2 l1 let rec check_eq g u v = match (u,v) with | Atom ul, Atom vl -> compare_eq g ul vl | Max(ule,ult), Max(vle,vlt) -> (* TODO: remove elements of lt in le! *) compare_list (compare_eq g) ule vle && compare_list (compare_eq g) ult vlt | _ -> anomaly "check_eq" (* not complete! (Atom(u) = Max([u],[]) *) let compare_greater g strict u v = let g, arcu = safe_repr g u in let g, arcv = safe_repr g v in if strict then is_lt g arcv arcu else arcv == snd (safe_repr g UniverseLevel.Set) || is_leq g arcv arcu (* let compare_greater g strict u v = let b = compare_greater g strict u v in ppnl(str (if b then if strict then ">" else ">=" else "NOT >=")); b *) let check_geq g u v = match u, v with | Atom ul, Atom vl -> compare_greater g false ul vl | Atom ul, Max(le,lt) -> List.for_all (fun vl -> compare_greater g false ul vl) le && List.for_all (fun vl -> compare_greater g true ul vl) lt | _ -> anomaly "check_greater" (** Enforcing new constraints : [setlt], [setleq], [merge], [merge_disc] *) (* setlt : UniverseLevel.t -> UniverseLevel.t -> unit *) (* forces u > v *) (* this is normally an update of u in g rather than a creation. *) let setlt g arcu arcv = let arcu' = {arcu with lt=arcv.univ::arcu.lt} in enter_arc arcu' g, arcu' (* checks that non-redundant *) let setlt_if (g,arcu) v = let arcv = repr g v in if is_lt g arcu arcv then g, arcu else setlt g arcu arcv (* setleq : UniverseLevel.t -> UniverseLevel.t -> unit *) (* forces u >= v *) (* this is normally an update of u in g rather than a creation. *) let setleq g arcu arcv = let arcu' = {arcu with le=arcv.univ::arcu.le} in enter_arc arcu' g, arcu' (* checks that non-redundant *) let setleq_if (g,arcu) v = let arcv = repr g v in if is_leq g arcu arcv then g, arcu else setleq g arcu arcv (* merge : UniverseLevel.t -> UniverseLevel.t -> unit *) (* we assume compare(u,v) = LE *) (* merge u v forces u ~ v with repr u as canonical repr *) let merge g arcu arcv = (* we find the arc with the biggest rank, and we redirect all others to it *) let arcu, g, v = let best_ranked (max_rank, old_max_rank, best_arc, rest) arc = if arc.rank >= max_rank then (arc.rank, max_rank, arc, best_arc::rest) else (max_rank, old_max_rank, best_arc, arc::rest) in match between g arcu arcv with | [] -> anomaly "Univ.between" | arc::rest -> let (max_rank, old_max_rank, best_arc, rest) = List.fold_left best_ranked (arc.rank, min_int, arc, []) rest in if max_rank > old_max_rank then best_arc, g, rest else begin (* one redirected node also has max_rank *) let arcu = {best_arc with rank = max_rank + 1} in arcu, enter_arc arcu g, rest end in let redirect (g,w,w') arcv = let g' = enter_equiv_arc arcv.univ arcu.univ g in (g',list_unionq arcv.lt w,arcv.le@w') in let (g',w,w') = List.fold_left redirect (g,[],[]) v in let g_arcu = (g',arcu) in let g_arcu = List.fold_left setlt_if g_arcu w in let g_arcu = List.fold_left setleq_if g_arcu w' in fst g_arcu (* merge_disc : UniverseLevel.t -> UniverseLevel.t -> unit *) (* we assume compare(u,v) = compare(v,u) = NLE *) (* merge_disc u v forces u ~ v with repr u as canonical repr *) let merge_disc g arc1 arc2 = let arcu, arcv = if arc1.rank < arc2.rank then arc2, arc1 else arc1, arc2 in let arcu, g = if arc1.rank <> arc2.rank then arcu, g else let arcu = {arcu with rank = succ arcu.rank} in arcu, enter_arc arcu g in let g' = enter_equiv_arc arcv.univ arcu.univ g in let g_arcu = (g',arcu) in let g_arcu = List.fold_left setlt_if g_arcu arcv.lt in let g_arcu = List.fold_left setleq_if g_arcu arcv.le in fst g_arcu (* Universe inconsistency: error raised when trying to enforce a relation that would create a cycle in the graph of universes. *) type constraint_type = Lt | Le | Eq exception UniverseInconsistency of constraint_type * universe * universe let error_inconsistency o u v = raise (UniverseInconsistency (o,Atom u,Atom v)) (* enforce_univ_leq : UniverseLevel.t -> UniverseLevel.t -> unit *) (* enforce_univ_leq u v will force u<=v if possible, will fail otherwise *) let enforce_univ_leq u v g = let g,arcu = safe_repr g u in let g,arcv = safe_repr g v in if is_leq g arcu arcv then g else match compare g arcv arcu with | LT -> error_inconsistency Le u v | LE -> merge g arcv arcu | NLE -> fst (setleq g arcu arcv) | EQ -> anomaly "Univ.compare" (* enforc_univ_eq : UniverseLevel.t -> UniverseLevel.t -> unit *) (* enforc_univ_eq u v will force u=v if possible, will fail otherwise *) let enforce_univ_eq u v g = let g,arcu = safe_repr g u in let g,arcv = safe_repr g v in match compare g arcu arcv with | EQ -> g | LT -> error_inconsistency Eq u v | LE -> merge g arcu arcv | NLE -> (match compare g arcv arcu with | LT -> error_inconsistency Eq u v | LE -> merge g arcv arcu | NLE -> merge_disc g arcu arcv | EQ -> anomaly "Univ.compare") (* enforce_univ_lt u v will force u g | LE -> fst (setlt g arcu arcv) | EQ -> error_inconsistency Lt u v | NLE -> if is_leq g arcv arcu then error_inconsistency Lt u v else fst (setlt g arcu arcv) (* Constraints and sets of consrtaints. *) type univ_constraint = UniverseLevel.t * constraint_type * UniverseLevel.t let enforce_constraint cst g = match cst with | (u,Lt,v) -> enforce_univ_lt u v g | (u,Le,v) -> enforce_univ_leq u v g | (u,Eq,v) -> enforce_univ_eq u v g module Constraint = Set.Make( struct type t = univ_constraint let compare (u,c,v) (u',c',v') = let i = Pervasives.compare c c' in if i <> 0 then i else let i' = UniverseLevel.compare u u' in if i' <> 0 then i' else UniverseLevel.compare v v' end) type constraints = Constraint.t let empty_constraint = Constraint.empty let is_empty_constraint = Constraint.is_empty let union_constraints = Constraint.union type constraint_function = universe -> universe -> constraints -> constraints let constraint_add_leq v u c = if v = UniverseLevel.Set then c else Constraint.add (v,Le,u) c let enforce_geq u v c = match u, v with | Atom u, Atom v -> constraint_add_leq v u c | Atom u, Max (gel,gtl) -> let d = List.fold_right (fun v -> constraint_add_leq v u) gel c in List.fold_right (fun v -> Constraint.add (v,Lt,u)) gtl d | _ -> anomaly "A universe bound can only be a variable" let enforce_eq u v c = match (u,v) with | Atom u, Atom v -> Constraint.add (u,Eq,v) c | _ -> anomaly "A universe comparison can only happen between variables" let merge_constraints c g = Constraint.fold enforce_constraint c g (* Normalization *) let lookup_level u g = try Some (UniverseLMap.find u g) with Not_found -> None (** [normalize_universes g] returns a graph where all edges point directly to the canonical representent of their target. The output graph should be equivalent to the input graph from a logical point of view, but optimized. We maintain the invariant that the key of a [Canonical] element is its own name, by keeping [Equiv] edges (see the assertion)... I (Stéphane Glondu) am not sure if this plays a role in the rest of the module. *) let normalize_universes g = let rec visit u arc cache = match lookup_level u cache with | Some x -> x, cache | None -> match Lazy.force arc with | None -> u, UniverseLMap.add u u cache | Some (Canonical {univ=v; lt=_; le=_}) -> v, UniverseLMap.add u v cache | Some (Equiv v) -> let v, cache = visit v (lazy (lookup_level v g)) cache in v, UniverseLMap.add u v cache in let cache = UniverseLMap.fold (fun u arc cache -> snd (visit u (Lazy.lazy_from_val (Some arc)) cache)) g UniverseLMap.empty in let repr x = UniverseLMap.find x cache in let lrepr us = List.fold_left (fun e x -> UniverseLSet.add (repr x) e) UniverseLSet.empty us in let canonicalize u = function | Equiv _ -> Equiv (repr u) | Canonical {univ=v; lt=lt; le=le; rank=rank} -> assert (u == v); (* avoid duplicates and self-loops *) let lt = lrepr lt and le = lrepr le in let le = UniverseLSet.filter (fun x -> x != u && not (UniverseLSet.mem x lt)) le in UniverseLSet.iter (fun x -> assert (x != u)) lt; Canonical { univ = v; lt = UniverseLSet.elements lt; le = UniverseLSet.elements le; rank = rank } in UniverseLMap.mapi canonicalize g (** [check_sorted g sorted]: [g] being a universe graph, [sorted] being a map to levels, checks that all constraints in [g] are satisfied in [sorted]. *) let check_sorted g sorted = let get u = try UniverseLMap.find u sorted with | Not_found -> assert false in UniverseLMap.iter (fun u arc -> let lu = get u in match arc with | Equiv v -> assert (lu = get v) | Canonical {univ=u'; lt=lt; le=le} -> assert (u == u'); List.iter (fun v -> assert (lu <= get v)) le; List.iter (fun v -> assert (lu < get v)) lt) g (** Bellman-Ford algorithm with a few customizations: - [weight(eq|le) = 0], [weight(lt) = -1] - a [le] edge is initially added from [bottom] to all other vertices, and [bottom] is used as the source vertex *) let bellman_ford bottom g = assert (lookup_level bottom g = None); let ( << ) a b = match a, b with | _, None -> true | None, _ -> false | Some x, Some y -> x < y and ( ++ ) a y = match a with | None -> None | Some x -> Some (x-y) and push u x m = match x with | None -> m | Some y -> UniverseLMap.add u y m in let relax u v uv distances = let x = lookup_level u distances ++ uv in if x << lookup_level v distances then push v x distances else distances in let init = UniverseLMap.add bottom 0 UniverseLMap.empty in let vertices = UniverseLMap.fold (fun u arc res -> let res = UniverseLSet.add u res in match arc with | Equiv e -> UniverseLSet.add e res | Canonical {univ=univ; lt=lt; le=le} -> assert (u == univ); let add res v = UniverseLSet.add v res in let res = List.fold_left add res le in let res = List.fold_left add res lt in res) g UniverseLSet.empty in let g = let node = Canonical { univ = bottom; lt = []; le = UniverseLSet.elements vertices; rank = 0 } in UniverseLMap.add bottom node g in let rec iter count accu = if count <= 0 then accu else let accu = UniverseLMap.fold (fun u arc res -> match arc with | Equiv e -> relax e u 0 (relax u e 0 res) | Canonical {univ=univ; lt=lt; le=le} -> assert (u == univ); let res = List.fold_left (fun res v -> relax u v 0 res) res le in let res = List.fold_left (fun res v -> relax u v 1 res) res lt in res) g accu in iter (count-1) accu in let distances = iter (UniverseLSet.cardinal vertices) init in let () = UniverseLMap.iter (fun u arc -> let lu = lookup_level u distances in match arc with | Equiv v -> let lv = lookup_level v distances in assert (not (lu << lv) && not (lv << lu)) | Canonical {univ=univ; lt=lt; le=le} -> assert (u == univ); List.iter (fun v -> assert (not (lu ++ 0 << lookup_level v distances))) le; List.iter (fun v -> assert (not (lu ++ 1 << lookup_level v distances))) lt) g in distances (** [sort_universes g] builds a map from universes in [g] to natural numbers. It outputs a graph containing equivalence edges from each level appearing in [g] to [Type.n], and [lt] edges between the [Type.n]s. The output graph should imply the input graph (and the implication will be strict most of the time), but is not necessarily minimal. Note: the result is unspecified if the input graph already contains [Type.n] nodes (calling a module Type is probably a bad idea anyway). *) let sort_universes orig = let mp = Names.make_dirpath [Names.id_of_string "Type"] in let rec make_level accu g i = let type0 = UniverseLevel.Level (mp, i) in let distances = bellman_ford type0 g in let accu, continue = UniverseLMap.fold (fun u x (accu, continue) -> let continue = continue || x < 0 in let accu = if x = 0 && u != type0 then UniverseLMap.add u i accu else accu in accu, continue) distances (accu, false) in let filter x = not (UniverseLMap.mem x accu) in let push g u = if UniverseLMap.mem u g then g else UniverseLMap.add u (Equiv u) g in let g = UniverseLMap.fold (fun u arc res -> match arc with | Equiv v as x -> begin match filter u, filter v with | true, true -> UniverseLMap.add u x res | true, false -> push res u | false, true -> push res v | false, false -> res end | Canonical {univ=v; lt=lt; le=le; rank=r} -> assert (u == v); if filter u then let lt = List.filter filter lt in let le = List.filter filter le in UniverseLMap.add u (Canonical {univ=u; lt=lt; le=le; rank=r}) res else let res = List.fold_left (fun g u -> if filter u then push g u else g) res lt in let res = List.fold_left (fun g u -> if filter u then push g u else g) res le in res) g UniverseLMap.empty in if continue then make_level accu g (i+1) else i, accu in let max, levels = make_level UniverseLMap.empty orig 0 in (* defensively check that the result makes sense *) check_sorted orig levels; let types = Array.init (max+1) (fun x -> UniverseLevel.Level (mp, x)) in let g = UniverseLMap.map (fun x -> Equiv types.(x)) levels in let g = let rec aux i g = if i < max then let u = types.(i) in let g = UniverseLMap.add u (Canonical { univ = u; le = []; lt = [types.(i+1)]; rank = 1 }) g in aux (i+1) g else g in aux 0 g in g (**********************************************************************) (* Tools for sort-polymorphic inductive types *) (* Temporary inductive type levels *) let fresh_level = let n = ref 0 in fun () -> incr n; UniverseLevel.Level (Names.make_dirpath [],!n) let fresh_local_univ () = Atom (fresh_level ()) (* Miscellaneous functions to remove or test local univ assumed to occur only in the le constraints *) let make_max = function | ([u],[]) -> Atom u | (le,lt) -> Max (le,lt) let remove_large_constraint u = function | Atom u' as x -> if u = u' then Max ([],[]) else x | Max (le,lt) -> make_max (list_remove u le,lt) let is_direct_constraint u = function | Atom u' -> u = u' | Max (le,lt) -> List.mem u le (* Solve a system of universe constraint of the form u_s11, ..., u_s1p1, w1 <= u1 ... u_sn1, ..., u_snpn, wn <= un where - the ui (1 <= i <= n) are universe variables, - the sjk select subsets of the ui for each equations, - the wi are arbitrary complex universes that do not mention the ui. *) let is_direct_sort_constraint s v = match s with | Some u -> is_direct_constraint u v | None -> false let solve_constraints_system levels level_bounds = let levels = Array.map (Option.map (function Atom u -> u | _ -> anomaly "expects Atom")) levels in let v = Array.copy level_bounds in let nind = Array.length v in for i=0 to nind-1 do for j=0 to nind-1 do if i<>j & is_direct_sort_constraint levels.(j) v.(i) then v.(i) <- sup v.(i) level_bounds.(j) done; for j=0 to nind-1 do match levels.(j) with | Some u -> v.(i) <- remove_large_constraint u v.(i) | None -> () done done; v let subst_large_constraint u u' v = match u with | Atom u -> if is_direct_constraint u v then sup u' (remove_large_constraint u v) else v | _ -> anomaly "expect a universe level" let subst_large_constraints = List.fold_right (fun (u,u') -> subst_large_constraint u u') let no_upper_constraints u cst = match u with | Atom u -> Constraint.for_all (fun (u1,_,_) -> u1 <> u) cst | Max _ -> anomaly "no_upper_constraints" (* Is u mentionned in v (or equals to v) ? *) let univ_depends u v = match u, v with | Atom u, Atom v -> u = v | Atom u, Max (gel,gtl) -> List.mem u gel || List.mem u gtl | _ -> anomaly "univ_depends given a non-atomic 1st arg" (* Pretty-printing *) let pr_arc = function | _, Canonical {univ=u; lt=[]; le=[]} -> mt () | _, Canonical {univ=u; lt=lt; le=le} -> pr_uni_level u ++ str " " ++ v 0 (prlist_with_sep pr_spc (fun v -> str "< " ++ pr_uni_level v) lt ++ (if lt <> [] & le <> [] then spc () else mt()) ++ prlist_with_sep pr_spc (fun v -> str "<= " ++ pr_uni_level v) le) ++ fnl () | u, Equiv v -> pr_uni_level u ++ str " = " ++ pr_uni_level v ++ fnl () let pr_universes g = let graph = UniverseLMap.fold (fun u a l -> (u,a)::l) g [] in prlist pr_arc graph let pr_constraints c = Constraint.fold (fun (u1,op,u2) pp_std -> let op_str = match op with | Lt -> " < " | Le -> " <= " | Eq -> " = " in pp_std ++ pr_uni_level u1 ++ str op_str ++ pr_uni_level u2 ++ fnl () ) c (str "") (* Dumping constraints to a file *) let dump_universes output g = let dump_arc u = function | Canonical {univ=u; lt=lt; le=le} -> let u_str = UniverseLevel.to_string u in List.iter (fun v -> output Lt u_str (UniverseLevel.to_string v)) lt; List.iter (fun v -> output Le u_str (UniverseLevel.to_string v)) le | Equiv v -> output Eq (UniverseLevel.to_string u) (UniverseLevel.to_string v) in UniverseLMap.iter dump_arc g (* Hash-consing *) module Hunivlevel = Hashcons.Make( struct type t = universe_level type u = Names.dir_path -> Names.dir_path let hash_sub hdir = function | UniverseLevel.Set -> UniverseLevel.Set | UniverseLevel.Level (d,n) -> UniverseLevel.Level (hdir d,n) let equal l1 l2 = match l1,l2 with | UniverseLevel.Set, UniverseLevel.Set -> true | UniverseLevel.Level (d,n), UniverseLevel.Level (d',n') -> n == n' && d == d' | _ -> false let hash = Hashtbl.hash end) module Huniv = Hashcons.Make( struct type t = universe type u = universe_level -> universe_level let hash_sub hdir = function | Atom u -> Atom (hdir u) | Max (gel,gtl) -> Max (List.map hdir gel, List.map hdir gtl) let equal u v = match u, v with | Atom u, Atom v -> u == v | Max (gel,gtl), Max (gel',gtl') -> (list_for_all2eq (==) gel gel') && (list_for_all2eq (==) gtl gtl') | _ -> false let hash = Hashtbl.hash end) let hcons_univlevel = Hashcons.simple_hcons Hunivlevel.f Names.hcons_dirpath let hcons_univ = Hashcons.simple_hcons Huniv.f hcons_univlevel module Hconstraint = Hashcons.Make( struct type t = univ_constraint type u = universe_level -> universe_level let hash_sub hul (l1,k,l2) = (hul l1, k, hul l2) let equal (l1,k,l2) (l1',k',l2') = l1 == l1' && k = k' && l2 == l2' let hash = Hashtbl.hash end) module Hconstraints = Hashcons.Make( struct type t = constraints type u = univ_constraint -> univ_constraint let hash_sub huc s = Constraint.fold (fun x -> Constraint.add (huc x)) s Constraint.empty let equal s s' = list_for_all2eq (==) (Constraint.elements s) (Constraint.elements s') let hash = Hashtbl.hash end) let hcons_constraint = Hashcons.simple_hcons Hconstraint.f hcons_univlevel let hcons_constraints = Hashcons.simple_hcons Hconstraints.f hcons_constraint coq-8.4pl2/kernel/byterun/0000750000175000001440000000000012127276526014610 5ustar notinuserscoq-8.4pl2/kernel/byterun/coq_memory.h0000640000175000001440000000407211447122051017122 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #ifndef _COQ_MEMORY_ #define _COQ_MEMORY_ #include #include #include #include #include #define Coq_stack_size (4096 * sizeof(value)) #define Coq_stack_threshold (256 * sizeof(value)) #define Coq_global_data_Size (4096 * sizeof(value)) #define Coq_max_stack_size (256 * 1024) #define TRANSP 0 #define BOXED 1 /* stack */ extern value * coq_stack_low; extern value * coq_stack_high; extern value * coq_stack_threshold; /* global_data */ extern value coq_global_data; extern value coq_global_boxed; extern int coq_all_transp; extern value coq_atom_tbl; extern int drawinstr; /* interp state */ extern value * coq_sp; /* Some predefined pointer code */ extern code_t accumulate; /* functions over global environment */ value coq_static_alloc(value size); /* ML */ value init_coq_vm(value unit); /* ML */ value re_init_coq_vm(value unit); /* ML */ void realloc_coq_stack(asize_t required_space); value get_coq_global_data(value unit); /* ML */ value realloc_coq_global_data(value size); /* ML */ value get_coq_global_boxed(value unit); value realloc_coq_global_boxed(value size); /* ML */ value get_coq_atom_tbl(value unit); /* ML */ value realloc_coq_atom_tbl(value size); /* ML */ value coq_set_transp_value(value transp); /* ML */ value get_coq_transp_value(value unit); /* ML */ #endif /* _COQ_MEMORY_ */ value coq_set_drawinstr(value unit); coq-8.4pl2/kernel/byterun/int64_native.h0000640000175000001440000000407411366307247017300 0ustar notinusers/***********************************************************************/ /* */ /* Objective Caml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 2002 Institut National de Recherche en Informatique et */ /* en Automatique. All rights reserved. This file is distributed */ /* under the terms of the GNU Library General Public License, with */ /* the special exception on linking described in file ../LICENSE. */ /* */ /***********************************************************************/ /* Wrapper macros around native 64-bit integer arithmetic, so that it has the same interface as the software emulation provided in int64_emul.h */ #ifndef CAML_INT64_NATIVE_H #define CAML_INT64_NATIVE_H #define I64_literal(hi,lo) ((int64)(hi) << 32 | (lo)) #define I64_compare(x,y) (((x) > (y)) - ((x) < (y))) #define I64_ult(x,y) ((uint64)(x) < (uint64)(y)) #define I64_neg(x) (-(x)) #define I64_add(x,y) ((x) + (y)) #define I64_sub(x,y) ((x) - (y)) #define I64_mul(x,y) ((x) * (y)) #define I64_is_zero(x) ((x) == 0) #define I64_is_negative(x) ((x) < 0) #define I64_div(x,y) ((x) / (y)) #define I64_mod(x,y) ((x) % (y)) #define I64_udivmod(x,y,quo,rem) \ (*(rem) = (uint64)(x) % (uint64)(y), \ *(quo) = (uint64)(x) / (uint64)(y)) #define I64_and(x,y) ((x) & (y)) #define I64_or(x,y) ((x) | (y)) #define I64_xor(x,y) ((x) ^ (y)) #define I64_lsl(x,y) ((x) << (y)) #define I64_asr(x,y) ((x) >> (y)) #define I64_lsr(x,y) ((uint64)(x) >> (y)) #define I64_to_intnat(x) ((intnat) (x)) #define I64_of_intnat(x) ((intnat) (x)) #define I64_to_int32(x) ((int32) (x)) #define I64_of_int32(x) ((int64) (x)) #define I64_to_double(x) ((double)(x)) #define I64_of_double(x) ((int64)(x)) #endif /* CAML_INT64_NATIVE_H */ coq-8.4pl2/kernel/byterun/coq_gc.h0000640000175000001440000000411511057726532016214 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #ifndef _COQ_CAML_GC_ #define _COQ_CAML_GC_ #include #include typedef void (*scanning_action) (value, value *); CAMLextern char *young_ptr; CAMLextern char *young_limit; CAMLextern void (*scan_roots_hook) (scanning_action); CAMLextern void minor_collection (void); #define Caml_white (0 << 8) #define Caml_black (3 << 8) #define Make_header(wosize, tag, color) \ (((header_t) (((header_t) (wosize) << 10) \ + (color) \ + (tag_t) (tag))) \ ) #define Alloc_small(result, wosize, tag) do{ \ young_ptr -= Bhsize_wosize (wosize); \ if (young_ptr < young_limit){ \ young_ptr += Bhsize_wosize (wosize); \ Setup_for_gc; \ minor_collection (); \ Restore_after_gc; \ young_ptr -= Bhsize_wosize (wosize); \ } \ Hd_hp (young_ptr) = Make_header ((wosize), (tag), Caml_black); \ (result) = Val_hp (young_ptr); \ }while(0) #endif /*_COQ_CAML_GC_ */ coq-8.4pl2/kernel/byterun/coq_values.c0000640000175000001440000000407510537323613017115 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #include #include "coq_fix_code.h" #include "coq_instruct.h" #include "coq_memory.h" #include "coq_values.h" #include /* KIND OF VALUES */ #define Setup_for_gc #define Restore_after_gc value coq_kind_of_closure(value v) { opcode_t * c; int res; int is_app = 0; c = Code_val(v); if (Is_instruction(c, GRAB)) return Val_int(0); if (Is_instruction(c, RESTART)) {is_app = 1; c++;} if (Is_instruction(c, GRABREC)) return Val_int(1+is_app); if (Is_instruction(c, MAKEACCU)) return Val_int(3); return Val_int(0); } /* DESTRUCT ACCU */ value coq_closure_arity(value clos) { opcode_t * c = Code_val(clos); if (Is_instruction(c,RESTART)) { c++; if (Is_instruction(c,GRAB)) return Val_int(3 + c[1] - Wosize_val(clos)); else { if (Wosize_val(clos) != 2) failwith("Coq Values : coq_closure_arity"); return Val_int(1); } } if (Is_instruction(c,GRAB)) return Val_int(1 + c[1]); return Val_int(1); } /* Fonction sur les fix */ value coq_offset(value v) { if (Tag_val(v) == Closure_tag) return Val_int(0); else return Val_long(-Wsize_bsize(Infix_offset_val(v))); } value coq_offset_closure(value v, value offset){ return (value)&Field(v, Int_val(offset)); } value coq_offset_tcode(value code,value offset){ return((value)((code_t)code + Int_val(offset))); } value coq_int_tcode(value code, value offset) { return Val_int(*((code_t) code + Int_val(offset))); } coq-8.4pl2/kernel/byterun/coq_fix_code.c0000640000175000001440000001417111057726532017401 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ /* Arnaud Spiwack: expanded the virtual machine with operators used for fast computation of bounded (31bits) integers */ #include #include #include #include #include #include #include #include "coq_instruct.h" #include "coq_fix_code.h" #ifdef THREADED_CODE char ** coq_instr_table; char * coq_instr_base; int arity[STOP+1]; void init_arity () { /* instruction with zero operand */ arity[ACC0]=arity[ACC1]=arity[ACC2]=arity[ACC3]=arity[ACC4]=arity[ACC5]= arity[ACC6]=arity[ACC7]=arity[PUSH]=arity[PUSHACC0]=arity[PUSHACC1]= arity[PUSHACC2]=arity[PUSHACC3]=arity[PUSHACC4]=arity[PUSHACC5]= arity[PUSHACC6]=arity[PUSHACC7]=arity[ENVACC1]=arity[ENVACC2]= arity[ENVACC3]=arity[ENVACC4]=arity[PUSHENVACC1]=arity[PUSHENVACC2]= arity[PUSHENVACC3]=arity[PUSHENVACC4]=arity[APPLY1]=arity[APPLY2]= arity[APPLY3]=arity[RESTART]=arity[OFFSETCLOSUREM2]= arity[OFFSETCLOSURE0]=arity[OFFSETCLOSURE2]=arity[PUSHOFFSETCLOSUREM2]= arity[PUSHOFFSETCLOSURE0]=arity[PUSHOFFSETCLOSURE2]= arity[GETFIELD0]=arity[GETFIELD1]=arity[SETFIELD0]=arity[SETFIELD1]= arity[CONST0]=arity[CONST1]=arity[CONST2]=arity[CONST3]= arity[PUSHCONST0]=arity[PUSHCONST1]=arity[PUSHCONST2]=arity[PUSHCONST3]= arity[ACCUMULATE]=arity[STOP]=arity[MAKEPROD]= arity[ADDINT31]=arity[ADDCINT31]=arity[ADDCARRYCINT31]= arity[SUBINT31]=arity[SUBCINT31]=arity[SUBCARRYCINT31]= arity[MULCINT31]=arity[MULINT31]=arity[COMPAREINT31]= arity[DIV21INT31]=arity[DIVINT31]=arity[ADDMULDIVINT31]= arity[HEAD0INT31]=arity[TAIL0INT31]= arity[COMPINT31]=arity[DECOMPINT31]=0; /* instruction with one operand */ arity[ACC]=arity[PUSHACC]=arity[POP]=arity[ENVACC]=arity[PUSHENVACC]= arity[PUSH_RETADDR]=arity[APPLY]=arity[APPTERM1]=arity[APPTERM2]= arity[APPTERM3]=arity[RETURN]=arity[GRAB]=arity[OFFSETCLOSURE]= arity[PUSHOFFSETCLOSURE]=arity[GETGLOBAL]=arity[PUSHGETGLOBAL]= arity[MAKEBLOCK1]=arity[MAKEBLOCK2]=arity[MAKEBLOCK3]=arity[MAKEBLOCK4]= arity[MAKEACCU]=arity[CONSTINT]=arity[PUSHCONSTINT]=arity[GRABREC]= arity[PUSHFIELDS]=arity[GETFIELD]=arity[SETFIELD]=arity[ACCUMULATECOND]= arity[BRANCH]=arity[ISCONST]= 1; /* instruction with two operands */ arity[APPTERM]=arity[MAKEBLOCK]=arity[CLOSURE]= arity[ARECONST]=2; /* instruction with four operands */ arity[MAKESWITCHBLOCK]=4; /* instruction with arbitrary operands */ arity[CLOSUREREC]=arity[CLOSURECOFIX]=arity[SWITCH]=0; } #endif /* THREADED_CODE */ void * coq_stat_alloc (asize_t sz) { void * result = malloc (sz); if (result == NULL) raise_out_of_memory (); return result; } value coq_makeaccu (value i) { code_t q; code_t res = coq_stat_alloc(8); q = res; *q++ = VALINSTR(MAKEACCU); *q = (opcode_t)Int_val(i); return (value)res; } value coq_accucond (value i) { code_t q; code_t res = coq_stat_alloc(8); q = res; *q++ = VALINSTR(ACCUMULATECOND); *q = (opcode_t)Int_val(i); return (value)res; } value coq_pushpop (value i) { code_t res; int n; n = Int_val(i); if (n == 0) { res = coq_stat_alloc(4); *res = VALINSTR(STOP); return (value)res; } else { code_t q; res = coq_stat_alloc(12); q = res; *q++ = VALINSTR(POP); *q++ = (opcode_t)n; *q = VALINSTR(STOP); return (value)res; } } value coq_is_accumulate_code(value code){ code_t q; int res; q = (code_t)code; res = Is_instruction(q,ACCUMULATECOND) || Is_instruction(q,ACCUMULATE); return Val_bool(res); } #ifdef ARCH_BIG_ENDIAN #define Reverse_32(dst,src) { \ char * _p, * _q; \ char _a, _b; \ _p = (char *) (src); \ _q = (char *) (dst); \ _a = _p[0]; \ _b = _p[1]; \ _q[0] = _p[3]; \ _q[1] = _p[2]; \ _q[3] = _a; \ _q[2] = _b; \ } #define COPY32(dst,src) Reverse_32(dst,src) #else #define COPY32(dst,src) (*dst=*src) #endif /* ARCH_BIG_ENDIAN */ value coq_tcode_of_code (value code, value size) { code_t p, q, res; asize_t len = (asize_t) Long_val(size); res = coq_stat_alloc(len); q = res; len /= sizeof(opcode_t); for (p = (code_t)code; p < (code_t)code + len; /*nothing*/) { opcode_t instr; COPY32(&instr,p); p++; if (instr < 0 || instr > STOP){ instr = STOP; }; *q++ = VALINSTR(instr); if (instr == SWITCH) { uint32 i, sizes, const_size, block_size; COPY32(q,p); p++; sizes=*q++; const_size = sizes & 0xFFFF; block_size = sizes >> 16; sizes = const_size + block_size; for(i=0; i #include #include "coq_gc.h" #include "coq_instruct.h" #include "coq_fix_code.h" #include "coq_memory.h" #include "coq_values.h" /*spiwack : imports support functions for 64-bit integers */ #include #ifdef ARCH_INT64_TYPE #include "int64_native.h" #else #include "int64_emul.h" #endif /* spiwack: I append here a few macros for value/number manipulation */ #define uint32_of_value(val) (((uint32)val >> 1)) #define value_of_uint32(i) ((value)(((uint32)(i) << 1) | 1)) #define UI64_of_uint32(lo) ((uint64)(I64_literal(0,(uint32)(lo)))) #define UI64_of_value(val) (UI64_of_uint32(uint32_of_value(val))) /* /spiwack */ /* Registers for the abstract machine: pc the code pointer sp the stack pointer (grows downward) accu the accumulator env heap-allocated environment trapsp pointer to the current trap frame extra_args number of extra arguments provided by the caller sp is a local copy of the global variable extern_sp. */ /* Instruction decoding */ #ifdef THREADED_CODE # define Instruct(name) coq_lbl_##name: # if defined(ARCH_SIXTYFOUR) && !defined(ARCH_CODE32) # define coq_Jumptbl_base ((char *) &&coq_lbl_ACC0) # else # define coq_Jumptbl_base ((char *) 0) # define coq_jumptbl_base ((char *) 0) # endif # ifdef DEBUG # define Next goto next_instr # else # define Next goto *(void *)(coq_jumptbl_base + *pc++) # endif #else # define Instruct(name) case name: # define Next break #endif /* #define _COQ_DEBUG_ */ #ifdef _COQ_DEBUG_ # define print_instr(s) /*if (drawinstr)*/ printf("%s\n",s) # define print_int(i) /*if (drawinstr)*/ printf("%d\n",i) # else # define print_instr(s) # define print_int(i) #endif /* GC interface */ #define Setup_for_gc { sp -= 2; sp[0] = accu; sp[1] = coq_env; coq_sp = sp; } #define Restore_after_gc { accu = sp[0]; coq_env = sp[1]; sp += 2; } /* Register optimization. Some compilers underestimate the use of the local variables representing the abstract machine registers, and don't put them in hardware registers, which slows down the interpreter considerably. For GCC, Xavier Leroy have hand-assigned hardware registers for several architectures. */ #if defined(__GNUC__) && !defined(DEBUG) #ifdef __mips__ #define PC_REG asm("$16") #define SP_REG asm("$17") #define ACCU_REG asm("$18") #endif #ifdef __sparc__ #define PC_REG asm("%l0") #define SP_REG asm("%l1") #define ACCU_REG asm("%l2") #endif #ifdef __alpha__ #ifdef __CRAY__ #define PC_REG asm("r9") #define SP_REG asm("r10") #define ACCU_REG asm("r11") #define JUMPTBL_BASE_REG asm("r12") #else #define PC_REG asm("$9") #define SP_REG asm("$10") #define ACCU_REG asm("$11") #define JUMPTBL_BASE_REG asm("$12") #endif #endif #ifdef __i386__ #define PC_REG asm("%esi") #define SP_REG asm("%edi") #define ACCU_REG #endif #if defined(PPC) || defined(_POWER) || defined(_IBMR2) #define PC_REG asm("26") #define SP_REG asm("27") #define ACCU_REG asm("28") #endif #ifdef __hppa__ #define PC_REG asm("%r18") #define SP_REG asm("%r17") #define ACCU_REG asm("%r16") #endif #ifdef __mc68000__ #define PC_REG asm("a5") #define SP_REG asm("a4") #define ACCU_REG asm("d7") #endif #if defined(__arm__) && !defined(__thumb2__) #define PC_REG asm("r9") #define SP_REG asm("r8") #define ACCU_REG asm("r7") #endif #ifdef __ia64__ #define PC_REG asm("36") #define SP_REG asm("37") #define ACCU_REG asm("38") #define JUMPTBL_BASE_REG asm("39") #endif #endif /* For signal handling, we hijack some code from the caml runtime */ extern intnat caml_signals_are_pending; extern intnat caml_pending_signals[]; extern void caml_process_pending_signals(void); /* The interpreter itself */ value coq_interprete (code_t coq_pc, value coq_accu, value coq_env, long coq_extra_args) { /*Declaration des variables */ #ifdef PC_REG register code_t pc PC_REG; register value * sp SP_REG; register value accu ACCU_REG; #else register code_t pc; register value * sp; register value accu; #endif #if defined(THREADED_CODE) && defined(ARCH_SIXTYFOUR) && !defined(ARCH_CODE32) #ifdef JUMPTBL_BASE_REG register char * coq_jumptbl_base JUMPTBL_BASE_REG; #else register char * coq_jumptbl_base; #endif #endif #ifdef THREADED_CODE static void * coq_jumptable[] = { # include "coq_jumptbl.h" }; #else opcode_t curr_instr; #endif print_instr("Enter Interpreter"); if (coq_pc == NULL) { /* Interpreter is initializing */ print_instr("Interpreter is initializing"); #ifdef THREADED_CODE coq_instr_table = (char **) coq_jumptable; coq_instr_base = coq_Jumptbl_base; #endif return Val_unit; } #if defined(THREADED_CODE) && defined(ARCH_SIXTYFOUR) && !defined(ARCH_CODE32) coq_jumptbl_base = coq_Jumptbl_base; #endif /* Initialisation */ sp = coq_sp; pc = coq_pc; accu = coq_accu; #ifdef THREADED_CODE goto *(void *)(coq_jumptbl_base + *pc++); /* Jump to the first instruction */ #else while(1) { curr_instr = *pc++; switch(curr_instr) { #endif /* Basic stack operations */ Instruct(ACC0){ print_instr("ACC0"); accu = sp[0]; Next; } Instruct(ACC1){ print_instr("ACC1"); accu = sp[1]; Next; } Instruct(ACC2){ print_instr("ACC2"); accu = sp[2]; Next; } Instruct(ACC3){ print_instr("ACC3"); accu = sp[3]; Next; } Instruct(ACC4){ print_instr("ACC4"); accu = sp[4]; Next; } Instruct(ACC5){ print_instr("ACC5"); accu = sp[5]; Next; } Instruct(ACC6){ print_instr("ACC6"); accu = sp[6]; Next; } Instruct(ACC7){ print_instr("ACC7"); accu = sp[7]; Next; } Instruct(PUSH){ print_instr("PUSH"); *--sp = accu; Next; } Instruct(PUSHACC0) { print_instr("PUSHACC0"); *--sp = accu; Next; } Instruct(PUSHACC1){ print_instr("PUSHACC1"); *--sp = accu; accu = sp[1]; Next; } Instruct(PUSHACC2){ print_instr("PUSHACC2"); *--sp = accu; accu = sp[2]; Next; } Instruct(PUSHACC3){ print_instr("PUSHACC3"); *--sp = accu; accu = sp[3]; Next; } Instruct(PUSHACC4){ print_instr("PUSHACC4"); *--sp = accu; accu = sp[4]; Next; } Instruct(PUSHACC5){ print_instr("PUSHACC5"); *--sp = accu; accu = sp[5]; Next; } Instruct(PUSHACC6){ print_instr("PUSHACC5"); *--sp = accu; accu = sp[6]; Next; } Instruct(PUSHACC7){ print_instr("PUSHACC7"); *--sp = accu; accu = sp[7]; Next; } Instruct(PUSHACC){ print_instr("PUSHACC"); *--sp = accu; } /* Fallthrough */ Instruct(ACC){ print_instr("ACC"); accu = sp[*pc++]; Next; } Instruct(POP){ print_instr("POP"); sp += *pc++; Next; } /* Access in heap-allocated environment */ Instruct(ENVACC1){ print_instr("ENVACC1"); accu = Field(coq_env, 1); Next; } Instruct(ENVACC2){ print_instr("ENVACC2"); accu = Field(coq_env, 2); Next; } Instruct(ENVACC3){ print_instr("ENVACC3"); accu = Field(coq_env, 3); Next; } Instruct(ENVACC4){ print_instr("ENVACC4"); accu = Field(coq_env, 4); Next; } Instruct(PUSHENVACC1){ print_instr("PUSHENVACC1"); *--sp = accu; accu = Field(coq_env, 1); Next; } Instruct(PUSHENVACC2){ print_instr("PUSHENVACC2"); *--sp = accu; accu = Field(coq_env, 2); Next; } Instruct(PUSHENVACC3){ print_instr("PUSHENVACC3"); *--sp = accu; accu = Field(coq_env, 3); Next; } Instruct(PUSHENVACC4){ print_instr("PUSHENVACC4"); *--sp = accu; accu = Field(coq_env, 4); Next; } Instruct(PUSHENVACC){ print_instr("PUSHENVACC"); *--sp = accu; } /* Fallthrough */ Instruct(ENVACC){ print_instr("ENVACC"); accu = Field(coq_env, *pc++); Next; } /* Function application */ Instruct(PUSH_RETADDR) { print_instr("PUSH_RETADDR"); sp -= 3; sp[0] = (value) (pc + *pc); sp[1] = coq_env; sp[2] = Val_long(coq_extra_args); coq_extra_args = 0; pc++; Next; } Instruct(APPLY) { print_instr("APPLY"); coq_extra_args = *pc - 1; pc = Code_val(accu); coq_env = accu; goto check_stacks; } Instruct(APPLY1) { value arg1 = sp[0]; print_instr("APPLY1"); sp -= 3; sp[0] = arg1; sp[1] = (value)pc; sp[2] = coq_env; sp[3] = Val_long(coq_extra_args); pc = Code_val(accu); coq_env = accu; coq_extra_args = 0; goto check_stacks; } Instruct(APPLY2) { value arg1 = sp[0]; value arg2 = sp[1]; print_instr("APPLY2"); sp -= 3; sp[0] = arg1; sp[1] = arg2; sp[2] = (value)pc; sp[3] = coq_env; sp[4] = Val_long(coq_extra_args); pc = Code_val(accu); coq_env = accu; coq_extra_args = 1; goto check_stacks; } Instruct(APPLY3) { value arg1 = sp[0]; value arg2 = sp[1]; value arg3 = sp[2]; print_instr("APPLY3"); sp -= 3; sp[0] = arg1; sp[1] = arg2; sp[2] = arg3; sp[3] = (value)pc; sp[4] = coq_env; sp[5] = Val_long(coq_extra_args); pc = Code_val(accu); coq_env = accu; coq_extra_args = 2; goto check_stacks; } /* Stack checks */ check_stacks: print_instr("check_stacks"); if (sp < coq_stack_threshold) { coq_sp = sp; realloc_coq_stack(Coq_stack_threshold); sp = coq_sp; } /* We also check for signals */ if (caml_signals_are_pending) { /* If there's a Ctrl-C, we reset the vm */ if (caml_pending_signals[SIGINT]) { coq_sp = coq_stack_high; } caml_process_pending_signals(); } Next; Instruct(APPTERM) { int nargs = *pc++; int slotsize = *pc; value * newsp; int i; print_instr("APPTERM"); /* Slide the nargs bottom words of the current frame to the top of the frame, and discard the remainder of the frame */ newsp = sp + slotsize - nargs; for (i = nargs - 1; i >= 0; i--) newsp[i] = sp[i]; sp = newsp; pc = Code_val(accu); coq_env = accu; coq_extra_args += nargs - 1; goto check_stacks; } Instruct(APPTERM1) { value arg1 = sp[0]; print_instr("APPTERM1"); sp = sp + *pc - 1; sp[0] = arg1; pc = Code_val(accu); coq_env = accu; goto check_stacks; } Instruct(APPTERM2) { value arg1 = sp[0]; value arg2 = sp[1]; print_instr("APPTERM2"); sp = sp + *pc - 2; sp[0] = arg1; sp[1] = arg2; pc = Code_val(accu); coq_env = accu; coq_extra_args += 1; goto check_stacks; } Instruct(APPTERM3) { value arg1 = sp[0]; value arg2 = sp[1]; value arg3 = sp[2]; print_instr("APPTERM3"); sp = sp + *pc - 3; sp[0] = arg1; sp[1] = arg2; sp[2] = arg3; pc = Code_val(accu); coq_env = accu; coq_extra_args += 2; goto check_stacks; } Instruct(RETURN) { print_instr("RETURN"); print_int(*pc); sp += *pc++; if (coq_extra_args > 0) { coq_extra_args--; pc = Code_val(accu); coq_env = accu; } else { pc = (code_t)(sp[0]); coq_env = sp[1]; coq_extra_args = Long_val(sp[2]); sp += 3; } Next; } Instruct(RESTART) { int num_args = Wosize_val(coq_env) - 2; int i; print_instr("RESTART"); sp -= num_args; for (i = 0; i < num_args; i++) sp[i] = Field(coq_env, i + 2); coq_env = Field(coq_env, 1); coq_extra_args += num_args; Next; } Instruct(GRAB) { int required = *pc++; print_instr("GRAB"); /* printf("GRAB %d\n",required); */ if (coq_extra_args >= required) { coq_extra_args -= required; } else { mlsize_t num_args, i; num_args = 1 + coq_extra_args; /* arg1 + extra args */ Alloc_small(accu, num_args + 2, Closure_tag); Field(accu, 1) = coq_env; for (i = 0; i < num_args; i++) Field(accu, i + 2) = sp[i]; Code_val(accu) = pc - 3; /* Point to the preceding RESTART instr. */ sp += num_args; pc = (code_t)(sp[0]); coq_env = sp[1]; coq_extra_args = Long_val(sp[2]); sp += 3; } Next; } Instruct(GRABREC) { int rec_pos = *pc++; /* commence a zero */ print_instr("GRABREC"); if (rec_pos <= coq_extra_args && !Is_accu(sp[rec_pos])) { pc++;/* On saute le Restart */ } else { if (coq_extra_args < rec_pos) { mlsize_t num_args, i; num_args = 1 + coq_extra_args; /* arg1 + extra args */ Alloc_small(accu, num_args + 2, Closure_tag); Field(accu, 1) = coq_env; for (i = 0; i < num_args; i++) Field(accu, i + 2) = sp[i]; Code_val(accu) = pc - 3; sp += num_args; pc = (code_t)(sp[0]); coq_env = sp[1]; coq_extra_args = Long_val(sp[2]); sp += 3; } else { /* L'argument recursif est un accumulateur */ mlsize_t num_args, i; /* Construction du PF partiellement appliqu */ Alloc_small(accu, rec_pos + 2, Closure_tag); Field(accu, 1) = coq_env; for (i = 0; i < rec_pos; i++) Field(accu, i + 2) = sp[i]; Code_val(accu) = pc; sp += rec_pos; *--sp = accu; /* Construction de l'atom */ Alloc_small(accu, 2, ATOM_FIX_TAG); Field(accu,1) = sp[0]; Field(accu,0) = sp[1]; sp++; sp[0] = accu; /* Construction de l'accumulateur */ num_args = coq_extra_args - rec_pos; Alloc_small(accu, 2+num_args, Accu_tag); Code_val(accu) = accumulate; Field(accu,1) = sp[0]; sp++; for (i = 0; i < num_args;i++)Field(accu, i + 2) = sp[i]; sp += num_args; pc = (code_t)(sp[0]); coq_env = sp[1]; coq_extra_args = Long_val(sp[2]); sp += 3; } } Next; } Instruct(CLOSURE) { int nvars = *pc++; int i; print_instr("CLOSURE"); print_int(nvars); if (nvars > 0) *--sp = accu; Alloc_small(accu, 1 + nvars, Closure_tag); Code_val(accu) = pc + *pc; pc++; for (i = 0; i < nvars; i++) Field(accu, i + 1) = sp[i]; sp += nvars; Next; } Instruct(CLOSUREREC) { int nfuncs = *pc++; int nvars = *pc++; int start = *pc++; int i; value * p; print_instr("CLOSUREREC"); if (nvars > 0) *--sp = accu; /* construction du vecteur de type */ Alloc_small(accu, nfuncs, 0); for(i = 0; i < nfuncs; i++) { Field(accu,i) = (value)(pc+pc[i]); } pc += nfuncs; *--sp=accu; Alloc_small(accu, nfuncs * 2 + nvars, Closure_tag); Field(accu, nfuncs * 2 + nvars - 1) = *sp++; /* On remplie la partie pour les variables libres */ p = &Field(accu, nfuncs * 2 - 1); for (i = 0; i < nvars; i++) { *p++ = *sp++; } p = &Field(accu, 0); *p = (value) (pc + pc[0]); p++; for (i = 1; i < nfuncs; i++) { *p = Make_header(i * 2, Infix_tag, Caml_white); p++; /* color irrelevant. */ *p = (value) (pc + pc[i]); p++; } pc += nfuncs; accu = accu + 2 * start * sizeof(value); Next; } Instruct(CLOSURECOFIX){ int nfunc = *pc++; int nvars = *pc++; int start = *pc++; int i, j , size; value * p; print_instr("CLOSURECOFIX"); if (nvars > 0) *--sp = accu; /* construction du vecteur de type */ Alloc_small(accu, nfunc, 0); for(i = 0; i < nfunc; i++) { Field(accu,i) = (value)(pc+pc[i]); } pc += nfunc; *--sp=accu; /* Creation des blocks accumulate */ for(i=0; i < nfunc; i++) { Alloc_small(accu, 2, Accu_tag); Code_val(accu) = accumulate; Field(accu,1) = Val_int(1); *--sp=accu; } /* creation des fonction cofix */ p = sp; size = nfunc + nvars + 2; for (i=0; i < nfunc; i++) { Alloc_small(accu, size, Closure_tag); Code_val(accu) = pc+pc[i]; for (j = 0; j < nfunc; j++) Field(accu, j+1) = p[j]; Field(accu, size - 1) = p[nfunc]; for (j = nfunc+1; j <= nfunc+nvars; j++) Field(accu, j) = p[j]; *--sp = accu; /* creation du block contenant le cofix */ Alloc_small(accu,1, ATOM_COFIX_TAG); Field(accu, 0) = sp[0]; *sp = accu; /* mise a jour du block accumulate */ caml_modify(&Field(p[i], 1),*sp); sp++; } pc += nfunc; accu = p[start]; sp = p + nfunc + 1 + nvars; print_instr("ici4"); Next; } Instruct(PUSHOFFSETCLOSURE) { print_instr("PUSHOFFSETCLOSURE"); *--sp = accu; } /* fallthrough */ Instruct(OFFSETCLOSURE) { print_instr("OFFSETCLOSURE"); accu = coq_env + *pc++ * sizeof(value); Next; } Instruct(PUSHOFFSETCLOSUREM2) { print_instr("PUSHOFFSETCLOSUREM2"); *--sp = accu; } /* fallthrough */ Instruct(OFFSETCLOSUREM2) { print_instr("OFFSETCLOSUREM2"); accu = coq_env - 2 * sizeof(value); Next; } Instruct(PUSHOFFSETCLOSURE0) { print_instr("PUSHOFFSETCLOSURE0"); *--sp = accu; }/* fallthrough */ Instruct(OFFSETCLOSURE0) { print_instr("OFFSETCLOSURE0"); accu = coq_env; Next; } Instruct(PUSHOFFSETCLOSURE2){ print_instr("PUSHOFFSETCLOSURE2"); *--sp = accu; /* fallthrough */ } Instruct(OFFSETCLOSURE2) { print_instr("OFFSETCLOSURE2"); accu = coq_env + 2 * sizeof(value); Next; } /* Access to global variables */ Instruct(PUSHGETGLOBAL) { print_instr("PUSH"); *--sp = accu; } /* Fallthrough */ Instruct(GETGLOBAL){ print_instr("GETGLOBAL"); accu = Field(coq_global_data, *pc); pc++; Next; } /* Allocation of blocks */ Instruct(MAKEBLOCK) { mlsize_t wosize = *pc++; tag_t tag = *pc++; mlsize_t i; value block; print_instr("MAKEBLOCK"); Alloc_small(block, wosize, tag); Field(block, 0) = accu; for (i = 1; i < wosize; i++) Field(block, i) = *sp++; accu = block; Next; } Instruct(MAKEBLOCK1) { tag_t tag = *pc++; value block; print_instr("MAKEBLOCK1"); Alloc_small(block, 1, tag); Field(block, 0) = accu; accu = block; Next; } Instruct(MAKEBLOCK2) { tag_t tag = *pc++; value block; print_instr("MAKEBLOCK2"); Alloc_small(block, 2, tag); Field(block, 0) = accu; Field(block, 1) = sp[0]; sp += 1; accu = block; Next; } Instruct(MAKEBLOCK3) { tag_t tag = *pc++; value block; print_instr("MAKEBLOCK3"); Alloc_small(block, 3, tag); Field(block, 0) = accu; Field(block, 1) = sp[0]; Field(block, 2) = sp[1]; sp += 2; accu = block; Next; } Instruct(MAKEBLOCK4) { tag_t tag = *pc++; value block; print_instr("MAKEBLOCK4"); Alloc_small(block, 4, tag); Field(block, 0) = accu; Field(block, 1) = sp[0]; Field(block, 2) = sp[1]; Field(block, 3) = sp[2]; sp += 3; accu = block; Next; } /* Access to components of blocks */ Instruct(SWITCH) { uint32 sizes = *pc++; print_instr("SWITCH"); print_int(sizes & 0xFFFF); if (Is_block(accu)) { long index = Tag_val(accu); print_instr("block"); print_int(index); pc += pc[(sizes & 0xFFFF) + index]; } else { long index = Long_val(accu); print_instr("constant"); print_int(index); pc += pc[index]; } Next; } Instruct(PUSHFIELDS){ int i; int size = *pc++; print_instr("PUSHFIELDS"); sp -= size; for(i=0;i p = 2v*w */ p = I64_mul (UI64_of_value (accu), UI64_of_uint32 ((*sp++)^1)); if ( I64_is_zero(p) ) { accu = (value)1; } else { /* the output type is supposed to have a constant constructor and a non-constant constructor (in that order), the tag of the non-constant constructor is then 1 */ Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ /*unsigned shift*/ Field(accu, 0) = (value)(I64_lsr(p,31)|1) ; /*higher part*/ Field(accu, 1) = (value)(I64_to_int32(p)|1); /*lower part*/ } Next; } Instruct (DIV21INT31) { print_instr("DIV21INT31"); /* spiwack: takes three int31 (the two first ones represent an int62) and performs the euclidian division of the int62 by the int31 */ uint64 bigint; bigint = UI64_of_value(accu); bigint = I64_or(I64_lsl(bigint, 31),UI64_of_value(*sp++)); uint64 divisor; divisor = UI64_of_value(*sp++); Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ if (I64_is_zero (divisor)) { Field(accu, 0) = 1; /* 2*0+1 */ Field(accu, 1) = 1; /* 2*0+1 */ } else { uint64 quo, mod; I64_udivmod(bigint, divisor, &quo, &mod); Field(accu, 0) = value_of_uint32(I64_to_int32(quo)); Field(accu, 1) = value_of_uint32(I64_to_int32(mod)); } Next; } Instruct (DIVINT31) { print_instr("DIVINT31"); /* spiwack: a priori no need of the NON_STANDARD_DIV_MOD flag since it probably only concerns negative number. needs to be checked at this point */ uint32 divisor; divisor = uint32_of_value(*sp++); if (divisor == 0) { Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ Field(accu, 0) = 1; /* 2*0+1 */ Field(accu, 1) = 1; /* 2*0+1 */ } else { uint32 modulus; modulus = uint32_of_value(accu); Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ Field(accu, 0) = value_of_uint32(modulus/divisor); Field(accu, 1) = value_of_uint32(modulus%divisor); } Next; } Instruct (ADDMULDIVINT31) { print_instr("ADDMULDIVINT31"); /* higher level shift (does shifts and cycles and such) */ uint32 shiftby; shiftby = uint32_of_value(accu); if (shiftby > 31) { if (shiftby < 62) { *sp++; accu = (value)((((*sp++)^1) << (shiftby - 31)) | 1); } else { accu = (value)(1); } } else{ /* *sp = 2*x+1 --> accu = 2^(shiftby+1)*x */ accu = (value)(((*sp++)^1) << shiftby); /* accu = 2^(shiftby+1)*x --> 2^(shifby+1)*x+2*y/2^(31-shiftby)+1 */ accu = (value)((accu | (((uint32)(*sp++)) >> (31-shiftby)))|1); } Next; } Instruct (COMPAREINT31) { /* returns Eq if equal, Lt if accu is less than *sp, Gt otherwise */ /* assumes Inudctive _ : _ := Eq | Lt | Gt */ print_instr("COMPAREINT31"); if ((uint32)accu == (uint32)*sp) { accu = 1; /* 2*0+1 */ sp++; } else{if ((uint32)accu < (uint32)(*sp++)) { accu = 3; /* 2*1+1 */ } else{ accu = 5; /* 2*2+1 */ }} Next; } Instruct (HEAD0INT31) { int r = 0; uint32 x; print_instr("HEAD0INT31"); x = (uint32) accu; if (!(x & 0xFFFF0000)) { x <<= 16; r += 16; } if (!(x & 0xFF000000)) { x <<= 8; r += 8; } if (!(x & 0xF0000000)) { x <<= 4; r += 4; } if (!(x & 0xC0000000)) { x <<= 2; r += 2; } if (!(x & 0x80000000)) { x <<=1; r += 1; } if (!(x & 0x80000000)) { r += 1; } accu = value_of_uint32(r); Next; } Instruct (TAIL0INT31) { int r = 0; uint32 x; print_instr("TAIL0INT31"); x = (((uint32) accu >> 1) | 0x80000000); if (!(x & 0xFFFF)) { x >>= 16; r += 16; } if (!(x & 0x00FF)) { x >>= 8; r += 8; } if (!(x & 0x000F)) { x >>= 4; r += 4; } if (!(x & 0x0003)) { x >>= 2; r += 2; } if (!(x & 0x0001)) { x >>=1; r += 1; } if (!(x & 0x0001)) { r += 1; } accu = value_of_uint32(r); Next; } Instruct (ISCONST) { /* Branches if the accu does not contain a constant (i.e., a non-block value) */ print_instr("ISCONST"); if ((accu & 1) == 0) /* last bit is 0 -> it is a block */ pc += *pc; else pc++; Next; } Instruct (ARECONST) { /* Branches if the n first values on the stack are not all constansts */ print_instr("ARECONST"); int i, n, ok; ok = 1; n = *pc++; for(i=0; i < n; i++) { if ((sp[i] & 1) == 0) { ok = 0; break; } } if(ok) pc++; else pc += *pc; Next; } Instruct (COMPINT31) { /* makes an 31-bit integer out of the accumulator and the 30 first values of the stack and put it in the accumulator (the accumulator then the topmost get to be the heavier bits) */ print_instr("COMPINT31"); int i; /*accu=accu or accu = (value)((unsigned long)1-accu) if bool is used for the bits */ for(i=0; i < 30; i++) { accu = (value) ((((uint32)accu-1) << 1) | *sp++); /* -1 removes the tag bit, << 1 multiplies the value by 2, | *sp++ pops the last value and add it (no carry involved) not that it reintroduces a tag bit */ /* alternative, if bool is used for the bits : accu = (value) ((((unsigned long)accu) << 1) & !*sp++); */ } Next; } Instruct (DECOMPINT31) { /* builds a block out of a 31-bit integer (from the accumulator), used before cases */ int i; value block; print_instr("DECOMPINT31"); Alloc_small(block, 31, 1); // Alloc_small(*, size, tag) for(i = 30; i >= 0; i--) { Field(block, i) = (value)(accu & 3); /* two last bits of the accumulator */ //Field(block, i) = 3; accu = (value) ((uint32)accu >> 1) | 1; /* last bit must be a one */ }; accu = block; Next; } /* /spiwack */ /* Debugging and machine control */ Instruct(STOP){ print_instr("STOP"); coq_sp = sp; return accu; } #ifndef THREADED_CODE default: /*fprintf(stderr, "%d\n", *pc);*/ failwith("Coq VM: Fatal error: bad opcode"); } } #endif } value coq_push_ra(value tcode) { print_instr("push_ra"); coq_sp -= 3; coq_sp[0] = (value) tcode; coq_sp[1] = Val_unit; coq_sp[2] = Val_long(0); return Val_unit; } value coq_push_val(value v) { print_instr("push_val"); *--coq_sp = v; return Val_unit; } value coq_push_arguments(value args) { int nargs,i; nargs = Wosize_val(args) - 2; coq_sp -= nargs; print_instr("push_args");print_int(nargs); for(i = 0; i < nargs; i++) coq_sp[i] = Field(args, i+2); return Val_unit; } value coq_push_vstack(value stk) { int len,i; len = Wosize_val(stk); coq_sp -= len; print_instr("push_vstack");print_int(len); for(i = 0; i < len; i++) coq_sp[i] = Field(stk,i); return Val_unit; } value coq_interprete_ml(value tcode, value a, value e, value ea) { print_instr("coq_interprete"); return coq_interprete((code_t)tcode, a, e, Long_val(ea)); print_instr("end coq_interprete"); } value coq_eval_tcode (value tcode, value e) { return coq_interprete_ml(tcode, Val_unit, e, 0); } coq-8.4pl2/kernel/byterun/libcoqrun.clib0000640000175000001440000000006611161000644017423 0ustar notinuserscoq_fix_code.o coq_memory.o coq_values.o coq_interp.o coq-8.4pl2/kernel/byterun/coq_values.h0000640000175000001440000000231511057726532017122 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #ifndef _COQ_VALUES_ #define _COQ_VALUES_ #include #include #define Default_tag 0 #define Accu_tag 0 #define ATOM_ID_TAG 0 #define ATOM_IDDEF_TAG 1 #define ATOM_INDUCTIVE_TAG 2 #define ATOM_FIX_TAG 3 #define ATOM_SWITCH_TAG 4 #define ATOM_COFIX_TAG 5 #define ATOM_COFIXEVALUATED_TAG 6 /* Les blocs accumulate */ #define Is_accu(v) (Is_block(v) && (Tag_val(v) == Accu_tag)) #define IS_EVALUATED_COFIX(v) (Is_accu(v) && Is_block(Field(v,1)) && (Tag_val(Field(v,1)) == ATOM_COFIXEVALUATED_TAG)) #endif /* _COQ_VALUES_ */ coq-8.4pl2/kernel/byterun/coq_fix_code.h0000640000175000001440000000241311057726532017402 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #ifndef _COQ_FIX_CODE_ #define _COQ_FIX_CODE_ #include void * coq_stat_alloc (asize_t sz); #ifdef THREADED_CODE extern char ** coq_instr_table; extern char * coq_instr_base; void init_arity(); #define VALINSTR(instr) ((opcode_t)(coq_instr_table[instr] - coq_instr_base)) #else #define VALINSTR(instr) instr #endif /* THREADED_CODE */ #define Is_instruction(pc,instr) (*pc == VALINSTR(instr)) value coq_tcode_of_code(value code, value len); value coq_makeaccu (value i); value coq_pushpop (value i); value coq_accucond (value i); value coq_is_accumulate_code(value code); #endif /* _COQ_FIX_CODE_ */ coq-8.4pl2/kernel/byterun/coq_interp.h0000640000175000001440000000176710537323613017131 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ value coq_push_ra(value tcode); value coq_push_val(value v); value coq_push_arguments(value args); value coq_push_vstack(value stk); value coq_interprete_ml(value tcode, value a, value e, value ea); value coq_interprete (code_t coq_pc, value coq_accu, value coq_env, long coq_extra_args); value coq_eval_tcode (value tcode, value e); coq-8.4pl2/kernel/byterun/coq_memory.c0000640000175000001440000001556711447122051017130 0ustar notinusers/***********************************************************************/ /* */ /* Coq Compiler */ /* */ /* Benjamin Gregoire, projets Logical and Cristal */ /* INRIA Rocquencourt */ /* */ /* */ /***********************************************************************/ #include #include #include "coq_gc.h" #include "coq_instruct.h" #include "coq_fix_code.h" #include "coq_memory.h" #include "coq_interp.h" /* stack */ value * coq_stack_low; value * coq_stack_high; value * coq_stack_threshold; asize_t coq_max_stack_size = Coq_max_stack_size; /* global_data */ value coq_global_data; value coq_global_boxed; int coq_all_transp; value coq_atom_tbl; int drawinstr; /* interp state */ long coq_saved_sp_offset; value * coq_sp; /* Some predefined pointer code */ code_t accumulate; /* functions over global environment */ void coq_stat_free (void * blk) { free (blk); } value coq_static_alloc(value size) /* ML */ { return (value) coq_stat_alloc((asize_t) Long_val(size)); } value accumulate_code(value unit) /* ML */ { return (value) accumulate; } static void (*coq_prev_scan_roots_hook) (scanning_action); static void coq_scan_roots(scanning_action action) { register value * i; /* Scan the global variables */ (*action)(coq_global_data, &coq_global_data); (*action)(coq_global_boxed, &coq_global_boxed); (*action)(coq_atom_tbl, &coq_atom_tbl); /* Scan the stack */ for (i = coq_sp; i < coq_stack_high; i++) { (*action) (*i, i); }; /* Hook */ if (coq_prev_scan_roots_hook != NULL) (*coq_prev_scan_roots_hook)(action); } void init_coq_stack() { coq_stack_low = (value *) coq_stat_alloc(Coq_stack_size); coq_stack_high = coq_stack_low + Coq_stack_size / sizeof (value); coq_stack_threshold = coq_stack_low + Coq_stack_threshold / sizeof(value); coq_max_stack_size = Coq_max_stack_size; } void init_coq_global_data(long requested_size) { int i; coq_global_data = alloc_shr(requested_size, 0); for (i = 0; i < requested_size; i++) Field (coq_global_data, i) = Val_unit; } void init_coq_global_boxed(long requested_size) { int i; coq_global_boxed = alloc_shr(requested_size, 0); for (i = 0; i < requested_size; i++) Field (coq_global_boxed, i) = Val_true; } void init_coq_atom_tbl(long requested_size){ int i; coq_atom_tbl = alloc_shr(requested_size, 0); for (i = 0; i < requested_size; i++) Field (coq_atom_tbl, i) = Val_unit; } void init_coq_interpreter() { coq_sp = coq_stack_high; coq_interprete(NULL, Val_unit, Val_unit, 0); } static int coq_vm_initialized = 0; value init_coq_vm(value unit) /* ML */ { int i; if (coq_vm_initialized == 1) { fprintf(stderr,"already open \n");fflush(stderr);} else { drawinstr=0; #ifdef THREADED_CODE init_arity(); #endif /* THREADED_CODE */ /* Allocate the table of global and the stack */ init_coq_stack(); init_coq_global_data(Coq_global_data_Size); init_coq_global_boxed(40); init_coq_atom_tbl(40); /* Initialing the interpreter */ coq_all_transp = 0; init_coq_interpreter(); /* Some predefined pointer code */ accumulate = (code_t) coq_stat_alloc(sizeof(opcode_t)); *accumulate = VALINSTR(ACCUMULATE); /* Initialize GC */ if (coq_prev_scan_roots_hook == NULL) coq_prev_scan_roots_hook = scan_roots_hook; scan_roots_hook = coq_scan_roots; coq_vm_initialized = 1; } return Val_unit;; } void realloc_coq_stack(asize_t required_space) { asize_t size; value * new_low, * new_high, * new_sp; value * p; size = coq_stack_high - coq_stack_low; do { size *= 2; } while (size < coq_stack_high - coq_sp + required_space); new_low = (value *) coq_stat_alloc(size * sizeof(value)); new_high = new_low + size; #define shift(ptr) \ ((char *) new_high - ((char *) coq_stack_high - (char *) (ptr))) new_sp = (value *) shift(coq_sp); memmove((char *) new_sp, (char *) coq_sp, (coq_stack_high - coq_sp) * sizeof(value)); coq_stat_free(coq_stack_low); coq_stack_low = new_low; coq_stack_high = new_high; coq_stack_threshold = coq_stack_low + Coq_stack_threshold / sizeof(value); coq_sp = new_sp; #undef shift } value get_coq_global_data(value unit) /* ML */ { return coq_global_data; } value get_coq_atom_tbl(value unit) /* ML */ { return coq_atom_tbl; } value get_coq_global_boxed(value unit) /* ML */ { return coq_global_boxed; } value realloc_coq_global_data(value size) /* ML */ { mlsize_t requested_size, actual_size, i; value new_global_data; requested_size = Long_val(size); actual_size = Wosize_val(coq_global_data); if (requested_size >= actual_size) { requested_size = (requested_size + 0x100) & 0xFFFFFF00; new_global_data = alloc_shr(requested_size, 0); for (i = 0; i < actual_size; i++) initialize(&Field(new_global_data, i), Field(coq_global_data, i)); for (i = actual_size; i < requested_size; i++){ Field (new_global_data, i) = Val_long (0); } coq_global_data = new_global_data; } return Val_unit; } value realloc_coq_global_boxed(value size) /* ML */ { mlsize_t requested_size, actual_size, i; value new_global_boxed; requested_size = Long_val(size); actual_size = Wosize_val(coq_global_boxed); if (requested_size >= actual_size) { requested_size = (requested_size + 0x100) & 0xFFFFFF00; new_global_boxed = alloc_shr(requested_size, 0); for (i = 0; i < actual_size; i++) initialize(&Field(new_global_boxed, i), Field(coq_global_boxed, i)); for (i = actual_size; i < requested_size; i++) Field (new_global_boxed, i) = Val_long (0); coq_global_boxed = new_global_boxed; } return Val_unit; } value realloc_coq_atom_tbl(value size) /* ML */ { mlsize_t requested_size, actual_size, i; value new_atom_tbl; requested_size = Long_val(size); actual_size = Wosize_val(coq_atom_tbl); if (requested_size >= actual_size) { requested_size = (requested_size + 0x100) & 0xFFFFFF00; new_atom_tbl = alloc_shr(requested_size, 0); for (i = 0; i < actual_size; i++) initialize(&Field(new_atom_tbl, i), Field(coq_atom_tbl, i)); for (i = actual_size; i < requested_size; i++) Field (new_atom_tbl, i) = Val_long (0); coq_atom_tbl = new_atom_tbl; } return Val_unit; } value coq_set_transp_value(value transp) { coq_all_transp = (transp == Val_true); return Val_unit; } value get_coq_transp_value(value unit) { return Val_bool(coq_all_transp); } value coq_set_drawinstr(value unit) { drawinstr = 1; return Val_unit; } coq-8.4pl2/kernel/byterun/int64_emul.h0000640000175000001440000001351511366307247016754 0ustar notinusers/***********************************************************************/ /* */ /* Objective Caml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 2002 Institut National de Recherche en Informatique et */ /* en Automatique. All rights reserved. This file is distributed */ /* under the terms of the GNU Library General Public License, with */ /* the special exception on linking described in file ../LICENSE. */ /* */ /***********************************************************************/ /* Software emulation of 64-bit integer arithmetic, for C compilers that do not support it. */ #ifndef CAML_INT64_EMUL_H #define CAML_INT64_EMUL_H #include #ifdef ARCH_BIG_ENDIAN #define I64_literal(hi,lo) { hi, lo } #else #define I64_literal(hi,lo) { lo, hi } #endif /* Unsigned comparison */ static int I64_ucompare(uint64 x, uint64 y) { if (x.h > y.h) return 1; if (x.h < y.h) return -1; if (x.l > y.l) return 1; if (x.l < y.l) return -1; return 0; } #define I64_ult(x, y) (I64_ucompare(x, y) < 0) /* Signed comparison */ static int I64_compare(int64 x, int64 y) { if ((int32)x.h > (int32)y.h) return 1; if ((int32)x.h < (int32)y.h) return -1; if (x.l > y.l) return 1; if (x.l < y.l) return -1; return 0; } /* Negation */ static int64 I64_neg(int64 x) { int64 res; res.l = -x.l; res.h = ~x.h; if (res.l == 0) res.h++; return res; } /* Addition */ static int64 I64_add(int64 x, int64 y) { int64 res; res.l = x.l + y.l; res.h = x.h + y.h; if (res.l < x.l) res.h++; return res; } /* Subtraction */ static int64 I64_sub(int64 x, int64 y) { int64 res; res.l = x.l - y.l; res.h = x.h - y.h; if (x.l < y.l) res.h--; return res; } /* Multiplication */ static int64 I64_mul(int64 x, int64 y) { int64 res; uint32 prod00 = (x.l & 0xFFFF) * (y.l & 0xFFFF); uint32 prod10 = (x.l >> 16) * (y.l & 0xFFFF); uint32 prod01 = (x.l & 0xFFFF) * (y.l >> 16); uint32 prod11 = (x.l >> 16) * (y.l >> 16); res.l = prod00; res.h = prod11 + (prod01 >> 16) + (prod10 >> 16); prod01 = prod01 << 16; res.l += prod01; if (res.l < prod01) res.h++; prod10 = prod10 << 16; res.l += prod10; if (res.l < prod10) res.h++; res.h += x.l * y.h + x.h * y.l; return res; } #define I64_is_zero(x) (((x).l | (x).h) == 0) #define I64_is_negative(x) ((int32) (x).h < 0) /* Bitwise operations */ static int64 I64_and(int64 x, int64 y) { int64 res; res.l = x.l & y.l; res.h = x.h & y.h; return res; } static int64 I64_or(int64 x, int64 y) { int64 res; res.l = x.l | y.l; res.h = x.h | y.h; return res; } static int64 I64_xor(int64 x, int64 y) { int64 res; res.l = x.l ^ y.l; res.h = x.h ^ y.h; return res; } /* Shifts */ static int64 I64_lsl(int64 x, int s) { int64 res; s = s & 63; if (s == 0) return x; if (s < 32) { res.l = x.l << s; res.h = (x.h << s) | (x.l >> (32 - s)); } else { res.l = 0; res.h = x.l << (s - 32); } return res; } static int64 I64_lsr(int64 x, int s) { int64 res; s = s & 63; if (s == 0) return x; if (s < 32) { res.l = (x.l >> s) | (x.h << (32 - s)); res.h = x.h >> s; } else { res.l = x.h >> (s - 32); res.h = 0; } return res; } static int64 I64_asr(int64 x, int s) { int64 res; s = s & 63; if (s == 0) return x; if (s < 32) { res.l = (x.l >> s) | (x.h << (32 - s)); res.h = (int32) x.h >> s; } else { res.l = (int32) x.h >> (s - 32); res.h = (int32) x.h >> 31; } return res; } /* Division and modulus */ #define I64_SHL1(x) x.h = (x.h << 1) | (x.l >> 31); x.l <<= 1 #define I64_SHR1(x) x.l = (x.l >> 1) | (x.h << 31); x.h >>= 1 static void I64_udivmod(uint64 modulus, uint64 divisor, uint64 * quo, uint64 * mod) { int64 quotient, mask; int cmp; quotient.h = 0; quotient.l = 0; mask.h = 0; mask.l = 1; while ((int32) divisor.h >= 0) { cmp = I64_ucompare(divisor, modulus); I64_SHL1(divisor); I64_SHL1(mask); if (cmp >= 0) break; } while (mask.l | mask.h) { if (I64_ucompare(modulus, divisor) >= 0) { quotient.h |= mask.h; quotient.l |= mask.l; modulus = I64_sub(modulus, divisor); } I64_SHR1(mask); I64_SHR1(divisor); } *quo = quotient; *mod = modulus; } static int64 I64_div(int64 x, int64 y) { int64 q, r; int32 sign; sign = x.h ^ y.h; if ((int32) x.h < 0) x = I64_neg(x); if ((int32) y.h < 0) y = I64_neg(y); I64_udivmod(x, y, &q, &r); if (sign < 0) q = I64_neg(q); return q; } static int64 I64_mod(int64 x, int64 y) { int64 q, r; int32 sign; sign = x.h; if ((int32) x.h < 0) x = I64_neg(x); if ((int32) y.h < 0) y = I64_neg(y); I64_udivmod(x, y, &q, &r); if (sign < 0) r = I64_neg(r); return r; } /* Coercions */ static int64 I64_of_int32(int32 x) { int64 res; res.l = x; res.h = x >> 31; return res; } #define I64_to_int32(x) ((int32) (x).l) /* Note: we assume sizeof(intnat) = 4 here, which is true otherwise autoconfiguration would have selected native 64-bit integers */ #define I64_of_intnat I64_of_int32 #define I64_to_intnat I64_to_int32 static double I64_to_double(int64 x) { double res; int32 sign = x.h; if (sign < 0) x = I64_neg(x); res = ldexp((double) x.h, 32) + x.l; if (sign < 0) res = -res; return res; } static int64 I64_of_double(double f) { int64 res; double frac, integ; int neg; neg = (f < 0); f = fabs(f); frac = modf(ldexp(f, -32), &integ); res.h = (uint32) integ; res.l = (uint32) ldexp(frac, 32); if (neg) res = I64_neg(res); return res; } #endif /* CAML_INT64_EMUL_H */ coq-8.4pl2/kernel/environ.mli0000640000175000001440000001730712010532755015302 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Pre_env.env val env_of_pre_env : Pre_env.env -> env type named_context_val val eq_named_context_val : named_context_val -> named_context_val -> bool val empty_env : env val universes : env -> Univ.universes val rel_context : env -> rel_context val named_context : env -> named_context val named_context_val : env -> named_context_val val engagement : env -> engagement option (** is the local context empty *) val empty_context : env -> bool (** {5 Context of de Bruijn variables ([rel_context]) } *) val nb_rel : env -> int val push_rel : rel_declaration -> env -> env val push_rel_context : rel_context -> env -> env val push_rec_types : rec_declaration -> env -> env (** Looks up in the context of local vars referred by indice ([rel_context]) raises [Not_found] if the index points out of the context *) val lookup_rel : int -> env -> rel_declaration val evaluable_rel : int -> env -> bool (** {6 Recurrence on [rel_context] } *) val fold_rel_context : (env -> rel_declaration -> 'a -> 'a) -> env -> init:'a -> 'a (** {5 Context of variables (section variables and goal assumptions) } *) val named_context_of_val : named_context_val -> named_context val named_vals_of_val : named_context_val -> Pre_env.named_vals val val_of_named_context : named_context -> named_context_val val empty_named_context_val : named_context_val (** [map_named_val f ctxt] apply [f] to the body and the type of each declarations. *** /!\ *** [f t] should be convertible with t *) val map_named_val : (constr -> constr) -> named_context_val -> named_context_val val push_named : named_declaration -> env -> env val push_named_context_val : named_declaration -> named_context_val -> named_context_val (** Looks up in the context of local vars referred by names ([named_context]) raises [Not_found] if the identifier is not found *) val lookup_named : variable -> env -> named_declaration val lookup_named_val : variable -> named_context_val -> named_declaration val evaluable_named : variable -> env -> bool val named_type : variable -> env -> types val named_body : variable -> env -> constr option (** {6 Recurrence on [named_context]: older declarations processed first } *) val fold_named_context : (env -> named_declaration -> 'a -> 'a) -> env -> init:'a -> 'a (** Recurrence on [named_context] starting from younger decl *) val fold_named_context_reverse : ('a -> named_declaration -> 'a) -> init:'a -> env -> 'a (** This forgets named and rel contexts *) val reset_context : env -> env (** This forgets rel context and sets a new named context *) val reset_with_named_context : named_context_val -> env -> env (** {5 Global constants } {6 Add entries to global environment } *) val add_constant : constant -> constant_body -> env -> env (** Looks up in the context of global constant names raises [Not_found] if the required path is not found *) val lookup_constant : constant -> env -> constant_body val evaluable_constant : constant -> env -> bool (** {6 ... } *) (** [constant_value env c] raises [NotEvaluableConst Opaque] if [c] is opaque and [NotEvaluableConst NoBody] if it has no body and [Not_found] if it does not exist in [env] *) type const_evaluation_result = NoBody | Opaque exception NotEvaluableConst of const_evaluation_result val constant_value : env -> constant -> constr val constant_type : env -> constant -> constant_type val constant_opt_value : env -> constant -> constr option (** {5 Inductive types } *) val add_mind : mutual_inductive -> mutual_inductive_body -> env -> env (** Looks up in the context of global inductive names raises [Not_found] if the required path is not found *) val lookup_mind : mutual_inductive -> env -> mutual_inductive_body (** {5 Modules } *) val add_modtype : module_path -> module_type_body -> env -> env (** [shallow_add_module] does not add module components *) val shallow_add_module : module_path -> module_body -> env -> env val lookup_module : module_path -> env -> module_body val lookup_modtype : module_path -> env -> module_type_body (** {5 Universe constraints } *) val add_constraints : Univ.constraints -> env -> env val set_engagement : engagement -> env -> env (** {6 Sets of referred section variables } [global_vars_set env c] returns the list of [id]'s occurring either directly as [Var id] in [c] or indirectly as a section variable dependent in a global reference occurring in [c] *) val global_vars_set : env -> constr -> Idset.t (** the constr must be a global reference *) val vars_of_global : env -> constr -> identifier list val keep_hyps : env -> Idset.t -> section_context (** {5 Unsafe judgments. } We introduce here the pre-type of judgments, which is actually only a datatype to store a term with its type and the type of its type. *) type unsafe_judgment = { uj_val : constr; uj_type : types } val make_judge : constr -> types -> unsafe_judgment val j_val : unsafe_judgment -> constr val j_type : unsafe_judgment -> types type unsafe_type_judgment = { utj_val : constr; utj_type : sorts } (** {6 Compilation of global declaration } *) val compile_constant_body : env -> constant_def -> Cemitcodes.body_code exception Hyp_not_found (** [apply_to_hyp sign id f] split [sign] into [tail::(id,_,_)::head] and return [tail::(f head (id,_,_) (rev tail))::head]. the value associated to id should not change *) val apply_to_hyp : named_context_val -> variable -> (named_context -> named_declaration -> named_context -> named_declaration) -> named_context_val (** [apply_to_hyp_and_dependent_on sign id f g] split [sign] into [tail::(id,_,_)::head] and return [(g tail)::(f (id,_,_))::head]. *) val apply_to_hyp_and_dependent_on : named_context_val -> variable -> (named_declaration -> named_context_val -> named_declaration) -> (named_declaration -> named_context_val -> named_declaration) -> named_context_val val insert_after_hyp : named_context_val -> variable -> named_declaration -> (named_context -> unit) -> named_context_val val remove_hyps : identifier list -> (named_declaration -> named_declaration) -> (Pre_env.lazy_val -> Pre_env.lazy_val) -> named_context_val -> named_context_val open Retroknowledge (** functions manipulating the retroknowledge @author spiwack *) val retroknowledge : (retroknowledge->'a) -> env -> 'a val registered : env -> field -> bool val unregister : env -> field -> env val register : env -> field -> Retroknowledge.entry -> env coq-8.4pl2/kernel/univ.mli0000640000175000001440000000724012010532755014576 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 *) val type0m_univ : universe (** image of Prop in the universes hierarchy *) val type0_univ : universe (** image of Set in the universes hierarchy *) val type1_univ : universe (** the universe of the type of Prop/Set *) val make_universe_level : Names.dir_path * int -> universe_level val make_universe : universe_level -> universe val make_univ : Names.dir_path * int -> universe val is_type0_univ : universe -> bool val is_type0m_univ : universe -> bool val is_univ_variable : universe -> bool val universe_level : universe -> universe_level option val compare_levels : universe_level -> universe_level -> int (** The type of a universe *) val super : universe -> universe (** The max of 2 universes *) val sup : universe -> universe -> universe (** {6 Graphs of universes. } *) type universes type check_function = universes -> universe -> universe -> bool val check_geq : check_function val check_eq : check_function (** The empty graph of universes *) val initial_universes : universes val is_initial_universes : universes -> bool (** {6 Constraints. } *) type constraints val empty_constraint : constraints val union_constraints : constraints -> constraints -> constraints val is_empty_constraint : constraints -> bool type constraint_function = universe -> universe -> constraints -> constraints val enforce_geq : constraint_function val enforce_eq : constraint_function (** {6 ... } *) (** Merge of constraints in a universes graph. The function [merge_constraints] merges a set of constraints in a given universes graph. It raises the exception [UniverseInconsistency] if the constraints are not satisfiable. *) type constraint_type = Lt | Le | Eq exception UniverseInconsistency of constraint_type * universe * universe val merge_constraints : constraints -> universes -> universes val normalize_universes : universes -> universes val sort_universes : universes -> universes (** {6 Support for sort-polymorphic inductive types } *) val fresh_local_univ : unit -> universe val solve_constraints_system : universe option array -> universe array -> universe array val subst_large_constraint : universe -> universe -> universe -> universe val subst_large_constraints : (universe * universe) list -> universe -> universe val no_upper_constraints : universe -> constraints -> bool (** Is u mentionned in v (or equals to v) ? *) val univ_depends : universe -> universe -> bool (** {6 Pretty-printing of universes. } *) val pr_uni_level : universe_level -> Pp.std_ppcmds val pr_uni : universe -> Pp.std_ppcmds val pr_universes : universes -> Pp.std_ppcmds val pr_constraints : constraints -> Pp.std_ppcmds (** {6 Dumping to a file } *) val dump_universes : (constraint_type -> string -> string -> unit) -> universes -> unit (** {6 Hash-consing } *) val hcons_univlevel : universe_level -> universe_level val hcons_univ : universe -> universe val hcons_constraints : constraints -> constraints coq-8.4pl2/kernel/subtyping.mli0000640000175000001440000000121212010532755015632 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_type_body -> module_type_body -> constraints coq-8.4pl2/kernel/cbytecodes.mli0000640000175000001440000001366212010532755015746 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t val reset_label_counter : unit -> unit end type instruction = | Klabel of Label.t | Kacc of int | Kenvacc of int | Koffsetclosure of int | Kpush | Kpop of int | Kpush_retaddr of Label.t | Kapply of int (** number of arguments *) | Kappterm of int * int (** number of arguments, slot size *) | Kreturn of int (** slot size *) | Kjump | Krestart | Kgrab of int (** number of arguments *) | Kgrabrec of int (** rec arg *) | Kclosure of Label.t * int (** label, number of free variables *) | Kclosurerec of int * int * Label.t array * Label.t array (** nb fv, init, lbl types, lbl bodies *) | Kclosurecofix of int * int * Label.t array * Label.t array (** nb fv, init, lbl types, lbl bodies *) | Kgetglobal of constant | Kconst of structured_constant | Kmakeblock of int * tag (** size, tag *) | Kmakeprod | Kmakeswitchblock of Label.t * Label.t * annot_switch * int | Kswitch of Label.t array * Label.t array (** consts,blocks *) | Kpushfields of int | Kfield of int | Ksetfield of int | Kstop | Ksequence of bytecodes * bytecodes (** spiwack: instructions concerning integers *) | Kbranch of Label.t (** jump to label, is it needed ? *) | Kaddint31 (** adds the int31 in the accu and the one ontop of the stack *) | Kaddcint31 (** makes the sum and keeps the carry *) | Kaddcarrycint31 (** sum +1, keeps the carry *) | Ksubint31 (** subtraction modulo *) | Ksubcint31 (** subtraction, keeps the carry *) | Ksubcarrycint31 (** subtraction -1, keeps the carry *) | Kmulint31 (** multiplication modulo *) | Kmulcint31 (** multiplication, result in two int31, for exact computation *) | Kdiv21int31 (** divides a double size integer (represented by an int31 in the accumulator and one on the top of the stack) by an int31. The result is a pair of the quotient and the rest. If the divisor is 0, it returns 0. *) | Kdivint31 (** euclidian division (returns a pair quotient,rest) *) | Kaddmuldivint31 (** generic operation for shifting and cycling. Takes 3 int31 i j and s, and returns x*2^s+y/(2^(31-s) *) | Kcompareint31 (** unsigned comparison of int31 cf COMPAREINT31 in kernel/byterun/coq_interp.c for more info *) | Khead0int31 (** Give the numbers of 0 in head of a in31*) | Ktail0int31 (** Give the numbers of 0 in tail of a in31 ie low bits *) | Kisconst of Label.t (** conditional jump *) | Kareconst of int*Label.t (** conditional jump *) | Kcompint31 (** dynamic compilation of int31 *) | Kdecompint31 (** dynamix decompilation of int31 /spiwack *) and bytecodes = instruction list type fv_elem = FVnamed of identifier | FVrel of int type fv = fv_elem array (** spiwack: this exception is expected to be raised by function expecting closed terms. *) exception NotClosed (*spiwack: both type have been moved from Cbytegen because I needed then for the retroknowledge *) type vm_env = { size : int; (** longueur de la liste [n] *) fv_rev : fv_elem list (** [fvn; ... ;fv1] *) } type comp_env = { nb_stack : int; (** nbre de variables sur la pile *) in_stack : int list; (** position dans la pile *) nb_rec : int; (** nbre de fonctions mutuellement *) (** recursives = nbr *) pos_rec : instruction list; (** instruction d'acces pour les variables *) (** de point fix ou de cofix *) offset : int; in_env : vm_env ref } val draw_instr : bytecodes -> unit (*spiwack: moved this here because I needed it for retroknowledge *) type block = | Bconstr of constr | Bstrconst of structured_constant | Bmakeblock of int * block array | Bconstruct_app of int * int * int * block array (** tag , nparams, arity *) | Bspecial of (comp_env -> block array -> int -> bytecodes -> bytecodes) * block array (** compilation function (see get_vm_constant_dynamic_info in retroknowledge.mli for more info) , argument array *) coq-8.4pl2/kernel/mod_subst.ml0000640000175000001440000004216112121725465015451 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* *) let string_of_hint = function | Inline (_,Some _) -> "inline(Some _)" | Inline _ -> "inline()" | Equiv kn -> string_of_kn kn let debug_string_of_delta resolve = let kn_to_string kn hint l = (string_of_kn kn ^ "=>" ^ string_of_hint hint) :: l in let mp_to_string mp mp' l = (string_of_mp mp ^ "=>" ^ string_of_mp mp') :: l in let l = Deltamap.fold mp_to_string kn_to_string resolve [] in String.concat ", " (List.rev l) let list_contents sub = let one_pair (mp,reso) = (string_of_mp mp,debug_string_of_delta reso) in let mp_one_pair mp0 p l = (string_of_mp mp0, one_pair p)::l in let mbi_one_pair mbi p l = (debug_string_of_mbid mbi, one_pair p)::l in Umap.fold mp_one_pair mbi_one_pair sub [] let debug_string_of_subst sub = let l = List.map (fun (s1,(s2,s3)) -> s1^"|->"^s2^"["^s3^"]") (list_contents sub) in "{" ^ String.concat "; " l ^ "}" let debug_pr_delta resolve = str (debug_string_of_delta resolve) let debug_pr_subst sub = let l = list_contents sub in let f (s1,(s2,s3)) = hov 2 (str s1 ++ spc () ++ str "|-> " ++ str s2 ++ spc () ++ str "[" ++ str s3 ++ str "]") in str "{" ++ hov 2 (prlist_with_sep pr_comma f l) ++ str "}" (* *) (** Extending a [delta_resolver] *) let add_inline_delta_resolver kn (lev,oc) = Deltamap.add_kn kn (Inline (lev,oc)) let add_kn_delta_resolver kn kn' = Deltamap.add_kn kn (Equiv kn') let add_mp_delta_resolver mp1 mp2 = Deltamap.add_mp mp1 mp2 (** Extending a [substitution *) let add_mbid mbid mp resolve s = Umap.add_mbi mbid (mp,resolve) s let add_mp mp1 mp2 resolve s = Umap.add_mp mp1 (mp2,resolve) s let map_mbid mbid mp resolve = add_mbid mbid mp resolve empty_subst let map_mp mp1 mp2 resolve = add_mp mp1 mp2 resolve empty_subst let mp_in_delta mp = Deltamap.mem_mp mp let kn_in_delta kn resolver = try match Deltamap.find_kn kn resolver with | Equiv _ -> true | Inline _ -> false with Not_found -> false let con_in_delta con resolver = kn_in_delta (user_con con) resolver let mind_in_delta mind resolver = kn_in_delta (user_mind mind) resolver let mp_of_delta resolve mp = try Deltamap.find_mp mp resolve with Not_found -> mp let rec find_prefix resolve mp = let rec sub_mp = function | MPdot(mp,l) as mp_sup -> (try Deltamap.find_mp mp_sup resolve with Not_found -> MPdot(sub_mp mp,l)) | p -> Deltamap.find_mp p resolve in try sub_mp mp with Not_found -> mp exception Change_equiv_to_inline of (int * constr) let solve_delta_kn resolve kn = try match Deltamap.find_kn kn resolve with | Equiv kn1 -> kn1 | Inline (lev, Some c) -> raise (Change_equiv_to_inline (lev,c)) | Inline (_, None) -> raise Not_found with Not_found -> let mp,dir,l = repr_kn kn in let new_mp = find_prefix resolve mp in if mp == new_mp then kn else make_kn new_mp dir l let kn_of_delta resolve kn = try solve_delta_kn resolve kn with e when Errors.noncritical e -> kn let constant_of_delta_kn resolve kn = constant_of_kn_equiv kn (kn_of_delta resolve kn) let gen_of_delta resolve x kn fix_can = try let new_kn = solve_delta_kn resolve kn in if kn == new_kn then x else fix_can new_kn with e when Errors.noncritical e -> x let constant_of_delta resolve con = let kn = user_con con in gen_of_delta resolve con kn (constant_of_kn_equiv kn) let constant_of_delta2 resolve con = let kn, kn' = canonical_con con, user_con con in gen_of_delta resolve con kn (constant_of_kn_equiv kn') let mind_of_delta_kn resolve kn = mind_of_kn_equiv kn (kn_of_delta resolve kn) let mind_of_delta resolve mind = let kn = user_mind mind in gen_of_delta resolve mind kn (mind_of_kn_equiv kn) let mind_of_delta2 resolve mind = let kn, kn' = canonical_mind mind, user_mind mind in gen_of_delta resolve mind kn (mind_of_kn_equiv kn') let inline_of_delta inline resolver = match inline with | None -> [] | Some inl_lev -> let extract kn hint l = match hint with | Inline (lev,_) -> if lev <= inl_lev then (lev,kn)::l else l | _ -> l in Deltamap.fold_kn extract resolver [] let find_inline_of_delta kn resolve = match Deltamap.find_kn kn resolve with | Inline (_,o) -> o | _ -> raise Not_found let constant_of_delta_with_inline resolve con = let kn1,kn2 = canonical_con con,user_con con in try find_inline_of_delta kn2 resolve with Not_found -> if kn1 == kn2 then None else try find_inline_of_delta kn1 resolve with Not_found -> None let subst_mp0 sub mp = (* 's like subst *) let rec aux mp = match mp with | MPfile sid -> Umap.find_mp mp sub | MPbound bid -> begin try Umap.find_mbi bid sub with Not_found -> Umap.find_mp mp sub end | MPdot (mp1,l) as mp2 -> begin try Umap.find_mp mp2 sub with Not_found -> let mp1',resolve = aux mp1 in MPdot (mp1',l),resolve end in try Some (aux mp) with Not_found -> None let subst_mp sub mp = match subst_mp0 sub mp with None -> mp | Some (mp',_) -> mp' let subst_kn_delta sub kn = let mp,dir,l = repr_kn kn in match subst_mp0 sub mp with Some (mp',resolve) -> solve_delta_kn resolve (make_kn mp' dir l) | None -> kn let subst_kn sub kn = let mp,dir,l = repr_kn kn in match subst_mp0 sub mp with Some (mp',_) -> (make_kn mp' dir l) | None -> kn exception No_subst type sideconstantsubst = | User | Canonical let gen_subst_mp f sub mp1 mp2 = let o1 = subst_mp0 sub mp1 in let o2 = if mp1 == mp2 then o1 else subst_mp0 sub mp2 in match o1, o2 with | None, None -> raise No_subst | Some (mp',resolve), None -> User, (f mp' mp2), resolve | None, Some (mp',resolve) -> Canonical, (f mp1 mp'), resolve | Some (mp1',_), Some (mp2',resolve2) -> Canonical, (f mp1' mp2'), resolve2 let subst_ind sub mind = let kn1,kn2 = user_mind mind, canonical_mind mind in let mp1,dir,l = repr_kn kn1 in let mp2,_,_ = repr_kn kn2 in let rebuild_mind mp1 mp2 = make_mind_equiv mp1 mp2 dir l in try let side,mind',resolve = gen_subst_mp rebuild_mind sub mp1 mp2 in match side with | User -> mind_of_delta resolve mind' | Canonical -> mind_of_delta2 resolve mind' with No_subst -> mind let subst_con0 sub con = let kn1,kn2 = user_con con,canonical_con con in let mp1,dir,l = repr_kn kn1 in let mp2,_,_ = repr_kn kn2 in let rebuild_con mp1 mp2 = make_con_equiv mp1 mp2 dir l in let dup con = con, mkConst con in let side,con',resolve = gen_subst_mp rebuild_con sub mp1 mp2 in match constant_of_delta_with_inline resolve con' with | Some t -> (* In case of inlining, discard the canonical part (cf #2608) *) constant_of_kn (user_con con'), t | None -> let con'' = match side with | User -> constant_of_delta resolve con' | Canonical -> constant_of_delta2 resolve con' in if con'' == con then raise No_subst else dup con'' let subst_con sub con = try subst_con0 sub con with No_subst -> con, mkConst con (* Here the semantics is completely unclear. What does "Hint Unfold t" means when "t" is a parameter? Does the user mean "Unfold X.t" or does she mean "Unfold y" where X.t is later on instantiated with y? I choose the first interpretation (i.e. an evaluable reference is never expanded). *) let subst_evaluable_reference subst = function | EvalVarRef id -> EvalVarRef id | EvalConstRef kn -> EvalConstRef (fst (subst_con subst kn)) let rec map_kn f f' c = let func = map_kn f f' in match kind_of_term c with | Const kn -> (try snd (f' kn) with No_subst -> c) | Ind (kn,i) -> let kn' = f kn in if kn'==kn then c else mkInd (kn',i) | Construct ((kn,i),j) -> let kn' = f kn in if kn'==kn then c else mkConstruct ((kn',i),j) | Case (ci,p,ct,l) -> let ci_ind = let (kn,i) = ci.ci_ind in let kn' = f kn in if kn'==kn then ci.ci_ind else kn',i in let p' = func p in let ct' = func ct in let l' = array_smartmap func l in if (ci.ci_ind==ci_ind && p'==p && l'==l && ct'==ct)then c else mkCase ({ci with ci_ind = ci_ind}, p',ct', l') | Cast (ct,k,t) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else mkCast (ct', k, t') | Prod (na,t,ct) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else mkProd (na, t', ct') | Lambda (na,t,ct) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else mkLambda (na, t', ct') | LetIn (na,b,t,ct) -> let ct' = func ct in let t'= func t in let b'= func b in if (t'==t && ct'==ct && b==b') then c else mkLetIn (na, b', t', ct') | App (ct,l) -> let ct' = func ct in let l' = array_smartmap func l in if (ct'== ct && l'==l) then c else mkApp (ct',l') | Evar (e,l) -> let l' = array_smartmap func l in if (l'==l) then c else mkEvar (e,l') | Fix (ln,(lna,tl,bl)) -> let tl' = array_smartmap func tl in let bl' = array_smartmap func bl in if (bl == bl'&& tl == tl') then c else mkFix (ln,(lna,tl',bl')) | CoFix(ln,(lna,tl,bl)) -> let tl' = array_smartmap func tl in let bl' = array_smartmap func bl in if (bl == bl'&& tl == tl') then c else mkCoFix (ln,(lna,tl',bl')) | _ -> c let subst_mps sub c = if is_empty_subst sub then c else map_kn (subst_ind sub) (subst_con0 sub) c let rec replace_mp_in_mp mpfrom mpto mp = match mp with | _ when mp = mpfrom -> mpto | MPdot (mp1,l) -> let mp1' = replace_mp_in_mp mpfrom mpto mp1 in if mp1==mp1' then mp else MPdot (mp1',l) | _ -> mp let replace_mp_in_kn mpfrom mpto kn = let mp,dir,l = repr_kn kn in let mp'' = replace_mp_in_mp mpfrom mpto mp in if mp==mp'' then kn else make_kn mp'' dir l let rec mp_in_mp mp mp1 = match mp1 with | _ when mp1 = mp -> true | MPdot (mp2,l) -> mp_in_mp mp mp2 | _ -> false let subset_prefixed_by mp resolver = let mp_prefix mkey mequ rslv = if mp_in_mp mp mkey then Deltamap.add_mp mkey mequ rslv else rslv in let kn_prefix kn hint rslv = match hint with | Inline _ -> rslv | Equiv _ -> if mp_in_mp mp (modpath kn) then Deltamap.add_kn kn hint rslv else rslv in Deltamap.fold mp_prefix kn_prefix resolver empty_delta_resolver let subst_dom_delta_resolver subst resolver = let mp_apply_subst mkey mequ rslv = Deltamap.add_mp (subst_mp subst mkey) mequ rslv in let kn_apply_subst kkey hint rslv = Deltamap.add_kn (subst_kn subst kkey) hint rslv in Deltamap.fold mp_apply_subst kn_apply_subst resolver empty_delta_resolver let subst_mp_delta sub mp mkey = match subst_mp0 sub mp with None -> empty_delta_resolver,mp | Some (mp',resolve) -> let mp1 = find_prefix resolve mp' in let resolve1 = subset_prefixed_by mp1 resolve in (subst_dom_delta_resolver (map_mp mp1 mkey empty_delta_resolver) resolve1),mp1 let gen_subst_delta_resolver dom subst resolver = let mp_apply_subst mkey mequ rslv = let mkey' = if dom then subst_mp subst mkey else mkey in let rslv',mequ' = subst_mp_delta subst mequ mkey in Deltamap.join rslv' (Deltamap.add_mp mkey' mequ' rslv) in let kn_apply_subst kkey hint rslv = let kkey' = if dom then subst_kn subst kkey else kkey in let hint' = match hint with | Equiv kequ -> (try Equiv (subst_kn_delta subst kequ) with Change_equiv_to_inline (lev,c) -> Inline (lev,Some c)) | Inline (lev,Some t) -> Inline (lev,Some (subst_mps subst t)) | Inline (_,None) -> hint in Deltamap.add_kn kkey' hint' rslv in Deltamap.fold mp_apply_subst kn_apply_subst resolver empty_delta_resolver let subst_codom_delta_resolver = gen_subst_delta_resolver false let subst_dom_codom_delta_resolver = gen_subst_delta_resolver true let update_delta_resolver resolver1 resolver2 = let mp_apply_rslv mkey mequ rslv = if Deltamap.mem_mp mkey resolver2 then rslv else Deltamap.add_mp mkey (find_prefix resolver2 mequ) rslv in let kn_apply_rslv kkey hint rslv = if Deltamap.mem_kn kkey resolver2 then rslv else let hint' = match hint with | Equiv kequ -> (try Equiv (solve_delta_kn resolver2 kequ) with Change_equiv_to_inline (lev,c) -> Inline (lev, Some c)) | _ -> hint in Deltamap.add_kn kkey hint' rslv in Deltamap.fold mp_apply_rslv kn_apply_rslv resolver1 empty_delta_resolver let add_delta_resolver resolver1 resolver2 = if resolver1 == resolver2 then resolver2 else if resolver2 = empty_delta_resolver then resolver1 else Deltamap.join (update_delta_resolver resolver1 resolver2) resolver2 let substition_prefixed_by k mp subst = let mp_prefixmp kmp (mp_to,reso) sub = if mp_in_mp mp kmp && mp <> kmp then let new_key = replace_mp_in_mp mp k kmp in Umap.add_mp new_key (mp_to,reso) sub else sub in let mbi_prefixmp mbi _ sub = sub in Umap.fold mp_prefixmp mbi_prefixmp subst empty_subst let join subst1 subst2 = let apply_subst mpk add (mp,resolve) res = let mp',resolve' = match subst_mp0 subst2 mp with | None -> mp, None | Some (mp',resolve') -> mp', Some resolve' in let resolve'' = match resolve' with | Some res -> add_delta_resolver (subst_dom_codom_delta_resolver subst2 resolve) res | None -> subst_codom_delta_resolver subst2 resolve in let prefixed_subst = substition_prefixed_by mpk mp' subst2 in Umap.join prefixed_subst (add (mp',resolve'') res) in let mp_apply_subst mp = apply_subst mp (Umap.add_mp mp) in let mbi_apply_subst mbi = apply_subst (MPbound mbi) (Umap.add_mbi mbi) in let subst = Umap.fold mp_apply_subst mbi_apply_subst subst1 empty_subst in Umap.join subst2 subst let rec occur_in_path mbi = function | MPbound bid' -> mbi = bid' | MPdot (mp1,_) -> occur_in_path mbi mp1 | _ -> false let occur_mbid mbi sub = let check_one mbi' (mp,_) = if mbi = mbi' || occur_in_path mbi mp then raise Exit in try Umap.iter_mbi check_one sub; false with Exit -> true type 'a lazy_subst = | LSval of 'a | LSlazy of substitution list * 'a type 'a substituted = 'a lazy_subst ref let from_val a = ref (LSval a) let force fsubst r = match !r with | LSval a -> a | LSlazy(s,a) -> let subst = List.fold_left join empty_subst (List.rev s) in let a' = fsubst subst a in r := LSval a'; a' let subst_substituted s r = match !r with | LSval a -> ref (LSlazy([s],a)) | LSlazy(s',a) -> ref (LSlazy(s::s',a)) (* debug *) let repr_substituted r = match !r with | LSval a -> None, a | LSlazy(s,a) -> Some s, a coq-8.4pl2/kernel/vconv.mli0000640000175000001440000000147312010532755014752 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool val set_use_vm : bool -> unit val vconv : conv_pb -> types conversion_function val val_of_constr : env -> constr -> values coq-8.4pl2/kernel/pre_env.mli0000640000175000001440000000433412010532755015254 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int val push_rel : rel_declaration -> env -> env val lookup_rel_val : int -> env -> lazy_val val env_of_rel : int -> env -> env (** Named context *) val push_named_context_val : named_declaration -> named_context_val -> named_context_val val push_named : named_declaration -> env -> env val lookup_named_val : identifier -> env -> lazy_val val env_of_named : identifier -> env -> env (** Global constants *) val lookup_constant_key : constant -> env -> constant_key val lookup_constant : constant -> env -> constant_body (** Mutual Inductives *) val lookup_mind : mutual_inductive -> env -> mutual_inductive_body coq-8.4pl2/kernel/indtypes.mli0000640000175000001440000000262312010532755015454 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mutual_inductive -> mutual_inductive_entry -> mutual_inductive_body coq-8.4pl2/kernel/cbytegen.mli0000640000175000001440000000331611564530317015422 0ustar notinusersopen Names open Cbytecodes open Cemitcodes open Term open Declarations open Pre_env val compile : env -> constr -> bytecodes * bytecodes * fv (** init, fun, fv *) val compile_constant_body : env -> constant_def -> body_code (** Shortcut of the previous function used during module strengthening *) val compile_alias : constant -> body_code (** spiwack: this function contains the information needed to perform the static compilation of int31 (trying and obtaining a 31-bit integer in processor representation at compile time) *) val compile_structured_int31 : bool -> constr array -> structured_constant (** this function contains the information needed to perform the dynamic compilation of int31 (trying and obtaining a 31-bit integer in processor representation at runtime when it failed at compile time *) val dynamic_int31_compilation : bool -> comp_env -> block array -> int -> bytecodes -> bytecodes (*spiwack: template for the compilation n-ary operation, invariant: n>=1. works as follow: checks if all the arguments are non-pointers if they are applies the operation (second argument) if not all of them are, returns to a coq definition (third argument) *) val op_compilation : int -> instruction -> constant -> bool -> comp_env -> constr array -> int -> bytecodes-> bytecodes (*spiwack: compiling function to insert dynamic decompilation before matching integers (in case they are in processor representation) *) val int31_escape_before_match : bool -> bytecodes -> bytecodes coq-8.4pl2/kernel/names.mli0000640000175000001440000001716712010532755014731 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val id_of_string : string -> identifier val id_ord : identifier -> identifier -> int (** Identifiers sets and maps *) module Idset : Set.S with type elt = identifier module Idpred : Predicate.S with type elt = identifier module Idmap : sig include Map.S with type key = identifier val exists : (identifier -> 'a -> bool) -> 'a t -> bool val singleton : key -> 'a -> 'a t end (** {6 Various types based on identifiers } *) type name = Name of identifier | Anonymous type variable = identifier (** {6 Directory paths = section names paths } *) type module_ident = identifier module ModIdmap : Map.S with type key = module_ident type dir_path (** Inner modules idents on top of list (to improve sharing). For instance: A.B.C is ["C";"B";"A"] *) val make_dirpath : module_ident list -> dir_path val repr_dirpath : dir_path -> module_ident list val empty_dirpath : dir_path (** Printing of directory paths as ["coq_root.module.submodule"] *) val string_of_dirpath : dir_path -> string (** {6 Names of structure elements } *) type label val mk_label : string -> label val string_of_label : label -> string val pr_label : label -> Pp.std_ppcmds val label_of_id : identifier -> label val id_of_label : label -> identifier module Labset : Set.S with type elt = label module Labmap : Map.S with type key = label (** {6 Unique names for bound modules } *) type mod_bound_id (** The first argument is a file name - to prevent conflict between different files *) val make_mbid : dir_path -> identifier -> mod_bound_id val repr_mbid : mod_bound_id -> int * identifier * dir_path val id_of_mbid : mod_bound_id -> identifier val debug_string_of_mbid : mod_bound_id -> string val string_of_mbid : mod_bound_id -> string (** {6 The module part of the kernel name } *) type module_path = | MPfile of dir_path | MPbound of mod_bound_id | MPdot of module_path * label val check_bound_mp : module_path -> bool val string_of_mp : module_path -> string module MPset : Set.S with type elt = module_path module MPmap : Map.S with type key = module_path (** Initial "seed" of the unique identifier generator *) val initial_dir : dir_path (** Name of the toplevel structure *) val initial_path : module_path (** [= MPfile initial_dir] *) (** {6 The absolute names of objects seen by kernel } *) type kernel_name (** Constructor and destructor *) val make_kn : module_path -> dir_path -> label -> kernel_name val repr_kn : kernel_name -> module_path * dir_path * label val modpath : kernel_name -> module_path val label : kernel_name -> label val string_of_kn : kernel_name -> string val pr_kn : kernel_name -> Pp.std_ppcmds val kn_ord : kernel_name -> kernel_name -> int module KNset : Set.S with type elt = kernel_name module KNpred : Predicate.S with type elt = kernel_name module KNmap : Map.S with type key = kernel_name (** {6 Specific paths for declarations } *) type constant type mutual_inductive (** Beware: first inductive has index 0 *) type inductive = mutual_inductive * int (** Beware: first constructor has index 1 *) type constructor = inductive * int (** *_env modules consider an order on user part of names the others consider an order on canonical part of names*) module Cmap : Map.S with type key = constant module Cmap_env : Map.S with type key = constant module Cpred : Predicate.S with type elt = constant module Cset : Set.S with type elt = constant module Cset_env : Set.S with type elt = constant module Mindmap : Map.S with type key = mutual_inductive module Mindmap_env : Map.S with type key = mutual_inductive module Mindset : Set.S with type elt = mutual_inductive module Indmap : Map.S with type key = inductive module Constrmap : Map.S with type key = constructor module Indmap_env : Map.S with type key = inductive module Constrmap_env : Map.S with type key = constructor val constant_of_kn : kernel_name -> constant val constant_of_kn_equiv : kernel_name -> kernel_name -> constant val make_con : module_path -> dir_path -> label -> constant val make_con_equiv : module_path -> module_path -> dir_path -> label -> constant val user_con : constant -> kernel_name val canonical_con : constant -> kernel_name val repr_con : constant -> module_path * dir_path * label val eq_constant : constant -> constant -> bool val con_with_label : constant -> label -> constant val string_of_con : constant -> string val con_label : constant -> label val con_modpath : constant -> module_path val pr_con : constant -> Pp.std_ppcmds val debug_pr_con : constant -> Pp.std_ppcmds val debug_string_of_con : constant -> string val mind_of_kn : kernel_name -> mutual_inductive val mind_of_kn_equiv : kernel_name -> kernel_name -> mutual_inductive val make_mind : module_path -> dir_path -> label -> mutual_inductive val make_mind_equiv : module_path -> module_path -> dir_path -> label -> mutual_inductive val user_mind : mutual_inductive -> kernel_name val canonical_mind : mutual_inductive -> kernel_name val repr_mind : mutual_inductive -> module_path * dir_path * label val eq_mind : mutual_inductive -> mutual_inductive -> bool val string_of_mind : mutual_inductive -> string val mind_label : mutual_inductive -> label val mind_modpath : mutual_inductive -> module_path val pr_mind : mutual_inductive -> Pp.std_ppcmds val debug_pr_mind : mutual_inductive -> Pp.std_ppcmds val debug_string_of_mind : mutual_inductive -> string val ind_modpath : inductive -> module_path val constr_modpath : constructor -> module_path val ith_mutual_inductive : inductive -> int -> inductive val ith_constructor_of_inductive : inductive -> int -> constructor val inductive_of_constructor : constructor -> inductive val index_of_constructor : constructor -> int val eq_ind : inductive -> inductive -> bool val eq_constructor : constructor -> constructor -> bool (** Better to have it here that in Closure, since required in grammar.cma *) type evaluable_global_reference = | EvalVarRef of identifier | EvalConstRef of constant val eq_egr : evaluable_global_reference -> evaluable_global_reference -> bool (** {6 Hash-consing } *) val hcons_string : string -> string val hcons_ident : identifier -> identifier val hcons_name : name -> name val hcons_dirpath : dir_path -> dir_path val hcons_con : constant -> constant val hcons_mind : mutual_inductive -> mutual_inductive val hcons_ind : inductive -> inductive val hcons_construct : constructor -> constructor (******) type 'a tableKey = | ConstKey of constant | VarKey of identifier | RelKey of 'a type transparent_state = Idpred.t * Cpred.t val empty_transparent_state : transparent_state val full_transparent_state : transparent_state val var_full_transparent_state : transparent_state val cst_full_transparent_state : transparent_state type inv_rel_key = int (** index in the [rel_context] part of environment starting by the end, {e inverse} of de Bruijn indice *) type id_key = inv_rel_key tableKey val eq_id_key : id_key -> id_key -> bool (*equalities on constant and inductive names for the checker*) val eq_con_chk : constant -> constant -> bool val eq_ind_chk : inductive -> inductive -> bool coq-8.4pl2/kernel/typeops.mli0000640000175000001440000000704212010532755015320 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> unsafe_judgment * constraints val infer_v : env -> constr array -> unsafe_judgment array * constraints val infer_type : env -> types -> unsafe_type_judgment * constraints val infer_local_decls : env -> (identifier * local_entry) list -> env * rel_context * constraints (** {6 Basic operations of the typing machine. } *) (** If [j] is the judgement {% $ %}c:t{% $ %}, then [assumption_of_judgement env j] returns the type {% $ %}c{% $ %}, checking that {% $ %}t{% $ %} is a sort. *) val assumption_of_judgment : env -> unsafe_judgment -> types val type_judgment : env -> unsafe_judgment -> unsafe_type_judgment (** {6 Type of sorts. } *) val judge_of_prop_contents : contents -> unsafe_judgment val judge_of_type : universe -> unsafe_judgment (** {6 Type of a bound variable. } *) val judge_of_relative : env -> int -> unsafe_judgment (** {6 Type of variables } *) val judge_of_variable : env -> variable -> unsafe_judgment (** {6 type of a constant } *) val judge_of_constant : env -> constant -> unsafe_judgment val judge_of_constant_knowing_parameters : env -> constant -> unsafe_judgment array -> unsafe_judgment (** {6 Type of application. } *) val judge_of_apply : env -> unsafe_judgment -> unsafe_judgment array -> unsafe_judgment * constraints (** {6 Type of an abstraction. } *) val judge_of_abstraction : env -> name -> unsafe_type_judgment -> unsafe_judgment -> unsafe_judgment (** {6 Type of a product. } *) val judge_of_product : env -> name -> unsafe_type_judgment -> unsafe_type_judgment -> unsafe_judgment (** s Type of a let in. *) val judge_of_letin : env -> name -> unsafe_judgment -> unsafe_type_judgment -> unsafe_judgment -> unsafe_judgment (** {6 Type of a cast. } *) val judge_of_cast : env -> unsafe_judgment -> cast_kind -> unsafe_type_judgment -> unsafe_judgment * constraints (** {6 Inductive types. } *) val judge_of_inductive : env -> inductive -> unsafe_judgment val judge_of_inductive_knowing_parameters : env -> inductive -> unsafe_judgment array -> unsafe_judgment val judge_of_constructor : env -> constructor -> unsafe_judgment (** {6 Type of Cases. } *) val judge_of_case : env -> case_info -> unsafe_judgment -> unsafe_judgment -> unsafe_judgment array -> unsafe_judgment * constraints (** Typecheck general fixpoint (not checking guard conditions) *) val type_fixpoint : env -> name array -> types array -> unsafe_judgment array -> constraints (** Kernel safe typing but applicable to partial proofs *) val typing : env -> constr -> unsafe_judgment val type_of_constant : env -> constant -> types val type_of_constant_type : env -> constant_type -> types val type_of_constant_knowing_parameters : env -> constant_type -> constr array -> types (** Make a type polymorphic if an arity *) val make_polymorphic_if_constant_for_ind : env -> unsafe_judgment -> constant_type coq-8.4pl2/kernel/type_errors.ml0000640000175000001440000000765312010532755016031 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* NonInformativeToInformative | InType, InSet -> StrongEliminationOnNonSmallType (* if Set impredicative *) | _ -> WrongArity coq-8.4pl2/kernel/environ.ml0000640000175000001440000004145112010532755015126 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ -> false let nb_rel env = env.env_nb_rel let push_rel = push_rel let push_rel_context ctxt x = Sign.fold_rel_context push_rel ctxt ~init:x let push_rec_types (lna,typarray,_) env = let ctxt = array_map2_i (fun i na t -> (na, None, lift i t)) lna typarray in Array.fold_left (fun e assum -> push_rel assum e) env ctxt let fold_rel_context f env ~init = let rec fold_right env = match env.env_rel_context with | [] -> init | rd::rc -> let env = { env with env_rel_context = rc; env_rel_val = List.tl env.env_rel_val; env_nb_rel = env.env_nb_rel - 1 } in f env rd (fold_right env) in fold_right env (* Named context *) let named_context_of_val = fst let named_vals_of_val = snd (* [map_named_val f ctxt] apply [f] to the body and the type of each declarations. *** /!\ *** [f t] should be convertible with t *) let map_named_val f (ctxt,ctxtv) = let ctxt = List.map (fun (id,body,typ) -> (id, Option.map f body, f typ)) ctxt in (ctxt,ctxtv) let empty_named_context = empty_named_context let push_named = push_named let push_named_context_val = push_named_context_val let val_of_named_context ctxt = List.fold_right push_named_context_val ctxt empty_named_context_val let lookup_named id env = Sign.lookup_named id env.env_named_context let lookup_named_val id (ctxt,_) = Sign.lookup_named id ctxt let eq_named_context_val c1 c2 = c1 == c2 || named_context_equal (named_context_of_val c1) (named_context_of_val c2) (* A local const is evaluable if it is defined *) let named_type id env = let (_,_,t) = lookup_named id env in t let named_body id env = let (_,b,_) = lookup_named id env in b let evaluable_named id env = match named_body id env with | Some _ -> true | _ -> false let reset_with_named_context (ctxt,ctxtv) env = { env with env_named_context = ctxt; env_named_vals = ctxtv; env_rel_context = empty_rel_context; env_rel_val = []; env_nb_rel = 0 } let reset_context = reset_with_named_context empty_named_context_val let fold_named_context f env ~init = let rec fold_right env = match env.env_named_context with | [] -> init | d::ctxt -> let env = reset_with_named_context (ctxt,List.tl env.env_named_vals) env in f env d (fold_right env) in fold_right env let fold_named_context_reverse f ~init env = Sign.fold_named_context_reverse f ~init:init (named_context env) (* Global constants *) let lookup_constant = lookup_constant let add_constant kn cs env = let new_constants = Cmap_env.add kn (cs,ref None) env.env_globals.env_constants in let new_globals = { env.env_globals with env_constants = new_constants } in { env with env_globals = new_globals } (* constant_type gives the type of a constant *) let constant_type env kn = let cb = lookup_constant kn env in cb.const_type type const_evaluation_result = NoBody | Opaque exception NotEvaluableConst of const_evaluation_result let constant_value env kn = let cb = lookup_constant kn env in match cb.const_body with | Def l_body -> Declarations.force l_body | OpaqueDef _ -> raise (NotEvaluableConst Opaque) | Undef _ -> raise (NotEvaluableConst NoBody) let constant_opt_value env cst = try Some (constant_value env cst) with NotEvaluableConst _ -> None (* A global const is evaluable if it is defined and not opaque *) let evaluable_constant cst env = try let _ = constant_value env cst in true with NotEvaluableConst _ -> false (* Mutual Inductives *) let lookup_mind = lookup_mind let add_mind kn mib env = let new_inds = Mindmap_env.add kn mib env.env_globals.env_inductives in let new_globals = { env.env_globals with env_inductives = new_inds } in { env with env_globals = new_globals } (* Universe constraints *) let add_constraints c env = if is_empty_constraint c then env else let s = env.env_stratification in { env with env_stratification = { s with env_universes = merge_constraints c s.env_universes } } let set_engagement c env = (* Unsafe *) { env with env_stratification = { env.env_stratification with env_engagement = Some c } } (* Lookup of section variables *) let lookup_constant_variables c env = let cmap = lookup_constant c env in Sign.vars_of_named_context cmap.const_hyps let lookup_inductive_variables (kn,i) env = let mis = lookup_mind kn env in Sign.vars_of_named_context mis.mind_hyps let lookup_constructor_variables (ind,_) env = lookup_inductive_variables ind env (* Returns the list of global variables in a term *) let vars_of_global env constr = match kind_of_term constr with Var id -> [id] | Const kn -> lookup_constant_variables kn env | Ind ind -> lookup_inductive_variables ind env | Construct cstr -> lookup_constructor_variables cstr env | _ -> raise Not_found let global_vars_set env constr = let rec filtrec acc c = let acc = match kind_of_term c with | Var _ | Const _ | Ind _ | Construct _ -> List.fold_right Idset.add (vars_of_global env c) acc | _ -> acc in fold_constr filtrec acc c in filtrec Idset.empty constr (* [keep_hyps env ids] keeps the part of the section context of [env] which contains the variables of the set [ids], and recursively the variables contained in the types of the needed variables. *) let keep_hyps env needed = let really_needed = Sign.fold_named_context_reverse (fun need (id,copt,t) -> if Idset.mem id need then let globc = match copt with | None -> Idset.empty | Some c -> global_vars_set env c in Idset.union (global_vars_set env t) (Idset.union globc need) else need) ~init:needed (named_context env) in Sign.fold_named_context (fun (id,_,_ as d) nsign -> if Idset.mem id really_needed then add_named_decl d nsign else nsign) (named_context env) ~init:empty_named_context (* Modules *) let add_modtype ln mtb env = let new_modtypes = MPmap.add ln mtb env.env_globals.env_modtypes in let new_globals = { env.env_globals with env_modtypes = new_modtypes } in { env with env_globals = new_globals } let shallow_add_module mp mb env = let new_mods = MPmap.add mp mb env.env_globals.env_modules in let new_globals = { env.env_globals with env_modules = new_mods } in { env with env_globals = new_globals } let lookup_module mp env = MPmap.find mp env.env_globals.env_modules let lookup_modtype mp env = MPmap.find mp env.env_globals.env_modtypes (*s Judgments. *) type unsafe_judgment = { uj_val : constr; uj_type : types } let make_judge v tj = { uj_val = v; uj_type = tj } let j_val j = j.uj_val let j_type j = j.uj_type type unsafe_type_judgment = { utj_val : constr; utj_type : sorts } (*s Compilation of global declaration *) let compile_constant_body = Cbytegen.compile_constant_body exception Hyp_not_found let rec apply_to_hyp (ctxt,vals) id f = let rec aux rtail ctxt vals = match ctxt, vals with | (idc,c,ct as d)::ctxt, v::vals -> if idc = id then (f ctxt d rtail)::ctxt, v::vals else let ctxt',vals' = aux (d::rtail) ctxt vals in d::ctxt', v::vals' | [],[] -> raise Hyp_not_found | _, _ -> assert false in aux [] ctxt vals let rec apply_to_hyp_and_dependent_on (ctxt,vals) id f g = let rec aux ctxt vals = match ctxt,vals with | (idc,c,ct as d)::ctxt, v::vals -> if idc = id then let sign = ctxt,vals in push_named_context_val (f d sign) sign else let (ctxt,vals as sign) = aux ctxt vals in push_named_context_val (g d sign) sign | [],[] -> raise Hyp_not_found | _,_ -> assert false in aux ctxt vals let insert_after_hyp (ctxt,vals) id d check = let rec aux ctxt vals = match ctxt, vals with | (idc,c,ct)::ctxt', v::vals' -> if idc = id then begin check ctxt; push_named_context_val d (ctxt,vals) end else let ctxt,vals = aux ctxt vals in d::ctxt, v::vals | [],[] -> raise Hyp_not_found | _, _ -> assert false in aux ctxt vals (* To be used in Logic.clear_hyps *) let remove_hyps ids check_context check_value (ctxt, vals) = List.fold_right2 (fun (id,_,_ as d) (id',v) (ctxt,vals) -> if List.mem id ids then (ctxt,vals) else let nd = check_context d in let nv = check_value v in (nd::ctxt,(id',nv)::vals)) ctxt vals ([],[]) (*spiwack: the following functions assemble the pieces of the retroknowledge note that the "consistent" register function is available in the module Safetyping, Environ only synchronizes the proactive and the reactive parts*) open Retroknowledge (* lifting of the "get" functions works also for "mem"*) let retroknowledge f env = f env.retroknowledge let registered env field = retroknowledge mem env field (* spiwack: this unregistration function is not in operation yet. It should not be used *) (* this unregistration function assumes that no "constr" can hold two different places in the retroknowledge. There is no reason why it shouldn't be true, but in case someone needs it, remember to add special branches to the unregister function *) let unregister env field = match field with | KInt31 (_,Int31Type) -> (*there is only one matching kind due to the fact that Environ.env is abstract, and that the only function which add elements to the retroknowledge is Environ.register which enforces this shape *) (match retroknowledge find env field with | Ind i31t -> let i31c = Construct (i31t, 1) in {env with retroknowledge = remove (retroknowledge clear_info env i31c) field} | _ -> assert false) |_ -> {env with retroknowledge = try remove (retroknowledge clear_info env (retroknowledge find env field)) field with Not_found -> retroknowledge remove env field} (* the Environ.register function syncrhonizes the proactive and reactive retroknowledge. *) let register = (* subfunction used for static decompilation of int31 (after a vm_compute, see pretyping/vnorm.ml for more information) *) let constr_of_int31 = let nth_digit_plus_one i n = (* calculates the nth (starting with 0) digit of i and adds 1 to it (nth_digit_plus_one 1 3 = 2) *) if (land) i ((lsl) 1 n) = 0 then 1 else 2 in fun ind -> fun digit_ind -> fun tag -> let array_of_int i = Array.init 31 (fun n -> mkConstruct (digit_ind, nth_digit_plus_one i (30-n))) in mkApp(mkConstruct(ind, 1), array_of_int tag) in (* subfunction which adds the information bound to the constructor of the int31 type to the reactive retroknowledge *) let add_int31c retroknowledge c = let rk = add_vm_constant_static_info retroknowledge c Cbytegen.compile_structured_int31 in add_vm_constant_dynamic_info rk c Cbytegen.dynamic_int31_compilation in (* subfunction which adds the compiling information of an int31 operation which has a specific vm instruction (associates it to the name of the coq definition in the reactive retroknowledge) *) let add_int31_op retroknowledge v n op kn = add_vm_compiling_info retroknowledge v (Cbytegen.op_compilation n op kn) in fun env field value -> (* subfunction which shortens the (very often use) registration of binary operators to the reactive retroknowledge. *) let add_int31_binop_from_const op = match value with | Const kn -> retroknowledge add_int31_op env value 2 op kn | _ -> anomaly "Environ.register: should be a constant" in let add_int31_unop_from_const op = match value with | Const kn -> retroknowledge add_int31_op env value 1 op kn | _ -> anomaly "Environ.register: should be a constant" in (* subfunction which completes the function constr_of_int31 above by performing the actual retroknowledge operations *) let add_int31_decompilation_from_type rk = (* invariant : the type of bits is registered, otherwise the function would raise Not_found. The invariant is enforced in safe_typing.ml *) match field with | KInt31 (grp, Int31Type) -> (match Retroknowledge.find rk (KInt31 (grp,Int31Bits)) with | Ind i31bit_type -> (match value with | Ind i31t -> Retroknowledge.add_vm_decompile_constant_info rk value (constr_of_int31 i31t i31bit_type) | _ -> anomaly "Environ.register: should be an inductive type") | _ -> anomaly "Environ.register: Int31Bits should be an inductive type") | _ -> anomaly "Environ.register: add_int31_decompilation_from_type called with an abnormal field" in {env with retroknowledge = let retroknowledge_with_reactive_info = match field with | KInt31 (_, Int31Type) -> let i31c = match value with | Ind i31t -> (Construct (i31t, 1)) | _ -> anomaly "Environ.register: should be an inductive type" in add_int31_decompilation_from_type (add_vm_before_match_info (retroknowledge add_int31c env i31c) value Cbytegen.int31_escape_before_match) | KInt31 (_, Int31Plus) -> add_int31_binop_from_const Cbytecodes.Kaddint31 | KInt31 (_, Int31PlusC) -> add_int31_binop_from_const Cbytecodes.Kaddcint31 | KInt31 (_, Int31PlusCarryC) -> add_int31_binop_from_const Cbytecodes.Kaddcarrycint31 | KInt31 (_, Int31Minus) -> add_int31_binop_from_const Cbytecodes.Ksubint31 | KInt31 (_, Int31MinusC) -> add_int31_binop_from_const Cbytecodes.Ksubcint31 | KInt31 (_, Int31MinusCarryC) -> add_int31_binop_from_const Cbytecodes.Ksubcarrycint31 | KInt31 (_, Int31Times) -> add_int31_binop_from_const Cbytecodes.Kmulint31 | KInt31 (_, Int31TimesC) -> add_int31_binop_from_const Cbytecodes.Kmulcint31 | KInt31 (_, Int31Div21) -> (* this is a ternary operation *) (match value with | Const kn -> retroknowledge add_int31_op env value 3 Cbytecodes.Kdiv21int31 kn | _ -> anomaly "Environ.register: should be a constant") | KInt31 (_, Int31Div) -> add_int31_binop_from_const Cbytecodes.Kdivint31 | KInt31 (_, Int31AddMulDiv) -> (* this is a ternary operation *) (match value with | Const kn -> retroknowledge add_int31_op env value 3 Cbytecodes.Kaddmuldivint31 kn | _ -> anomaly "Environ.register: should be a constant") | KInt31 (_, Int31Compare) -> add_int31_binop_from_const Cbytecodes.Kcompareint31 | KInt31 (_, Int31Head0) -> add_int31_unop_from_const Cbytecodes.Khead0int31 | KInt31 (_, Int31Tail0) -> add_int31_unop_from_const Cbytecodes.Ktail0int31 | _ -> env.retroknowledge in Retroknowledge.add_field retroknowledge_with_reactive_info field value } coq-8.4pl2/kernel/csymtable.ml0000640000175000001440000001451112121620060015414 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int -> tcode = "coq_tcode_of_code" external eval_tcode : tcode -> values array -> values = "coq_eval_tcode" (*******************) (* Linkage du code *) (*******************) (* Table des globaux *) (* [global_data] contient les valeurs des constantes globales (axiomes,definitions), les annotations des switch et les structured constant *) external global_data : unit -> values array = "get_coq_global_data" (* [realloc_global_data n] augmente de n la taille de [global_data] *) external realloc_global_data : int -> unit = "realloc_coq_global_data" let check_global_data n = if n >= Array.length (global_data()) then realloc_global_data n let num_global = ref 0 let set_global v = let n = !num_global in check_global_data n; (global_data()).(n) <- v; incr num_global; n (* [global_transp],[global_boxed] contiennent les valeurs des definitions gelees. Les deux versions sont maintenues en //. [global_transp] contient la version transparente. [global_boxed] contient la version gelees. *) external global_boxed : unit -> bool array = "get_coq_global_boxed" (* [realloc_global_data n] augmente de n la taille de [global_data] *) external realloc_global_boxed : int -> unit = "realloc_coq_global_boxed" let check_global_boxed n = if n >= Array.length (global_boxed()) then realloc_global_boxed n let num_boxed = ref 0 let boxed_tbl = Hashtbl.create 53 let cst_opaque = ref Cpred.full let is_opaque kn = Cpred.mem kn !cst_opaque let set_global_boxed kn v = let n = !num_boxed in check_global_boxed n; (global_boxed()).(n) <- (is_opaque kn); Hashtbl.add boxed_tbl kn n ; incr num_boxed; set_global (val_of_constant_def n kn v) (* table pour les structured_constant et les annotations des switchs *) let str_cst_tbl = Hashtbl.create 31 (* (structured_constant * int) Hashtbl.t*) let annot_tbl = Hashtbl.create 31 (* (annot_switch * int) Hashtbl.t *) (*************************************************************) (*** Mise a jour des valeurs des variables et des constantes *) (*************************************************************) exception NotEvaluated open Pp let key rk = match !rk with | Some k -> (*Pp.msgnl (str"found at: "++int k);*) k | _ -> raise NotEvaluated (************************) (* traduction des patch *) (* slot_for_*, calcul la valeur de l'objet, la place dans la table global, rend sa position dans la table *) let slot_for_str_cst key = try Hashtbl.find str_cst_tbl key with Not_found -> let n = set_global (val_of_str_const key) in Hashtbl.add str_cst_tbl key n; n let slot_for_annot key = try Hashtbl.find annot_tbl key with Not_found -> let n = set_global (val_of_annot_switch key) in Hashtbl.add annot_tbl key n; n let rec slot_for_getglobal env kn = let (cb,rk) = lookup_constant_key kn env in try key rk with NotEvaluated -> (* Pp.msgnl(str"not yet evaluated");*) let pos = match Cemitcodes.force cb.const_body_code with | BCdefined(code,pl,fv) -> let v = eval_to_patch env (code,pl,fv) in set_global v | BCallias kn' -> slot_for_getglobal env kn' | BCconstant -> set_global (val_of_constant kn) in (*Pp.msgnl(str"value stored at: "++int pos);*) rk := Some pos; pos and slot_for_fv env fv = match fv with | FVnamed id -> let nv = Pre_env.lookup_named_val id env in begin match !nv with | VKvalue (v,_) -> v | VKnone -> let (_, b, _) = Sign.lookup_named id env.env_named_context in let v,d = match b with | None -> (val_of_named id, Idset.empty) | Some c -> (val_of_constr env c, Environ.global_vars_set (Environ.env_of_pre_env env) c) in nv := VKvalue (v,d); v end | FVrel i -> let rv = Pre_env.lookup_rel_val i env in begin match !rv with | VKvalue (v, _) -> v | VKnone -> let (_, b, _) = lookup_rel i env.env_rel_context in let (v, d) = match b with | None -> (val_of_rel (nb_rel env - i), Idset.empty) | Some c -> let renv = env_of_rel i env in (val_of_constr renv c, Environ.global_vars_set (Environ.env_of_pre_env renv) c) in rv := VKvalue (v,d); v end and eval_to_patch env (buff,pl,fv) = (* copy code *before* patching because of nested evaluations: the code we are patching might be called (and thus "concurrently" patched) and results in wrong results. Side-effects... *) let buff = Cemitcodes.copy buff in let patch = function | Reloc_annot a, pos -> patch_int buff pos (slot_for_annot a) | Reloc_const sc, pos -> patch_int buff pos (slot_for_str_cst sc) | Reloc_getglobal kn, pos -> (* Pp.msgnl (str"patching global: "++str(debug_string_of_con kn));*) patch_int buff pos (slot_for_getglobal env kn); (* Pp.msgnl (str"patch done: "++str(debug_string_of_con kn))*) in List.iter patch pl; let vm_env = Array.map (slot_for_fv env) fv in let tc = tcode_of_code buff (length buff) in (*Pp.msgnl (str"execute code");*) eval_tcode tc vm_env and val_of_constr env c = let (_,fun_code,_ as ccfv) = try compile env c with reraise -> print_string "can not compile \n";Format.print_flush();raise reraise in eval_to_patch env (to_memory ccfv) let set_transparent_const kn = cst_opaque := Cpred.remove kn !cst_opaque; List.iter (fun n -> (global_boxed()).(n) <- false) (Hashtbl.find_all boxed_tbl kn) let set_opaque_const kn = cst_opaque := Cpred.add kn !cst_opaque; List.iter (fun n -> (global_boxed()).(n) <- true) (Hashtbl.find_all boxed_tbl kn) coq-8.4pl2/kernel/cbytecodes.ml0000640000175000001440000002267012010532755015574 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* fprintf ppf "L%i:" lbl | Kacc n -> fprintf ppf "\tacc %i" n | Kenvacc n -> fprintf ppf "\tenvacc %i" n | Koffsetclosure n -> fprintf ppf "\toffsetclosure %i" n | Kpush -> fprintf ppf "\tpush" | Kpop n -> fprintf ppf "\tpop %i" n | Kpush_retaddr lbl -> fprintf ppf "\tpush_retaddr L%i" lbl | Kapply n -> fprintf ppf "\tapply %i" n | Kappterm(n, m) -> fprintf ppf "\tappterm %i, %i" n m | Kreturn n -> fprintf ppf "\treturn %i" n | Kjump -> fprintf ppf "\tjump" | Krestart -> fprintf ppf "\trestart" | Kgrab n -> fprintf ppf "\tgrab %i" n | Kgrabrec n -> fprintf ppf "\tgrabrec %i" n | Kclosure(lbl, n) -> fprintf ppf "\tclosure L%i, %i" lbl n | Kclosurerec(fv,init,lblt,lblb) -> fprintf ppf "\tclosurerec"; fprintf ppf "%i , %i, " fv init; print_string "types = "; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblt; print_string " bodies = "; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblb; | Kclosurecofix (fv,init,lblt,lblb) -> fprintf ppf "\tclosurecofix"; fprintf ppf " %i , %i, " fv init; print_string "types = "; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblt; print_string " bodies = "; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblb; | Kgetglobal id -> fprintf ppf "\tgetglobal %s" (Names.string_of_con id) | Kconst cst -> fprintf ppf "\tconst" | Kmakeblock(n, m) -> fprintf ppf "\tmakeblock %i, %i" n m | Kmakeprod -> fprintf ppf "\tmakeprod" | Kmakeswitchblock(lblt,lbls,_,sz) -> fprintf ppf "\tmakeswitchblock %i, %i, %i" lblt lbls sz | Kswitch(lblc,lblb) -> fprintf ppf "\tswitch"; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblc; Array.iter (fun lbl -> fprintf ppf " %i" lbl) lblb; | Kpushfields n -> fprintf ppf "\tpushfields %i" n | Ksetfield n -> fprintf ppf "\tsetfield %i" n | Kfield n -> fprintf ppf "\tgetfield %i" n | Kstop -> fprintf ppf "\tstop" | Ksequence (c1,c2) -> fprintf ppf "%a@ %a" instruction_list c1 instruction_list c2 (* spiwack *) | Kbranch lbl -> fprintf ppf "\tbranch %i" lbl | Kaddint31 -> fprintf ppf "\taddint31" | Kaddcint31 -> fprintf ppf "\taddcint31" | Kaddcarrycint31 -> fprintf ppf "\taddcarrycint31" | Ksubint31 -> fprintf ppf "\tsubint31" | Ksubcint31 -> fprintf ppf "\tsubcint31" | Ksubcarrycint31 -> fprintf ppf "\tsubcarrycint31" | Kmulint31 -> fprintf ppf "\tmulint31" | Kmulcint31 -> fprintf ppf "\tmulcint31" | Kdiv21int31 -> fprintf ppf "\tdiv21int31" | Kdivint31 -> fprintf ppf "\tdivint31" | Kcompareint31 -> fprintf ppf "\tcompareint31" | Khead0int31 -> fprintf ppf "\thead0int31" | Ktail0int31 -> fprintf ppf "\ttail0int31" | Kaddmuldivint31 -> fprintf ppf "\taddmuldivint31" | Kisconst lbl -> fprintf ppf "\tisconst %i" lbl | Kareconst(n,lbl) -> fprintf ppf "\tareconst %i %i" n lbl | Kcompint31 -> fprintf ppf "\tcompint31" | Kdecompint31 -> fprintf ppf "\tdecompint" (* /spiwack *) and instruction_list ppf = function [] -> () | Klabel lbl :: il -> fprintf ppf "L%i:%a" lbl instruction_list il | instr :: il -> fprintf ppf "%a@ %a" instruction instr instruction_list il (*spiwack: moved this type in this file because I needed it for retroknowledge which can't depend from cbytegen *) type block = | Bconstr of constr | Bstrconst of structured_constant | Bmakeblock of int * block array | Bconstruct_app of int * int * int * block array (* tag , nparams, arity *) | Bspecial of (comp_env -> block array -> int -> bytecodes -> bytecodes) * block array (* spiwack: compilation given by a function *) (* compilation function (see get_vm_constant_dynamic_info in retroknowledge.mli for more info) , argument array *) let draw_instr c = fprintf std_formatter "@[%a@]" instruction_list c coq-8.4pl2/kernel/subtyping.ml0000640000175000001440000004006012122674544015474 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Labmap.add (label_of_id id) (IndConstr((ip,i+1), mib)) map) oib.mind_consnames map in Labmap.add (label_of_id oib.mind_typename) (IndType (ip, mib)) map in array_fold_right_i add_mip_nameobjects mib.mind_packets map (* creates (namedobject/namedmodule) map for the whole signature *) type labmap = { objs : namedobject Labmap.t; mods : namedmodule Labmap.t } let empty_labmap = { objs = Labmap.empty; mods = Labmap.empty } let get_obj mp map l = try Labmap.find l map.objs with Not_found -> error_no_such_label_sub l (string_of_mp mp) let get_mod mp map l = try Labmap.find l map.mods with Not_found -> error_no_such_label_sub l (string_of_mp mp) let make_labmap mp list = let add_one (l,e) map = match e with | SFBconst cb -> { map with objs = Labmap.add l (Constant cb) map.objs } | SFBmind mib -> { map with objs = add_mib_nameobjects mp l mib map.objs } | SFBmodule mb -> { map with mods = Labmap.add l (Module mb) map.mods } | SFBmodtype mtb -> { map with mods = Labmap.add l (Modtype mtb) map.mods } in List.fold_right add_one list empty_labmap let check_conv_error error why cst f env a1 a2 = try union_constraints cst (f env a1 a2) with NotConvertible -> error why (* for now we do not allow reorderings *) let check_inductive cst env mp1 l info1 mp2 mib2 spec2 subst1 subst2 reso1 reso2= let kn1 = make_mind mp1 empty_dirpath l in let kn2 = make_mind mp2 empty_dirpath l in let error why = error_signature_mismatch l spec2 why in let check_conv why cst f = check_conv_error error why cst f in let mib1 = match info1 with | IndType ((_,0), mib) -> subst_mind subst1 mib | _ -> error (InductiveFieldExpected mib2) in let mib2 = subst_mind subst2 mib2 in let check_inductive_type cst name env t1 t2 = (* Due to sort-polymorphism in inductive types, the conclusions of t1 and t2, if in Type, are generated as the least upper bounds of the types of the constructors. By monotonicity of the infered l.u.b. wrt subtyping (i.e. if X:U |- T(X):s and |- M:U' and U'<=U then infer_type(T(M))<=s), each universe in the conclusion of t1 has an bounding universe in the conclusion of t2, so that we don't need to check the subtyping of the conclusions of t1 and t2. Even if we'd like to recheck it, the inference of constraints is not designed to deal with algebraic constraints of the form max-univ(u1..un) <= max-univ(u'1..u'n), so that it is not easy to recheck it (in short, we would need the actual graph of constraints as input while type checking is currently designed to output a set of constraints instead) *) (* So we cheat and replace the subtyping problem on algebraic constraints of the form max-univ(u1..un) <= max-univ(u'1..u'n) (that we know are necessary true) by trivial constraints that the constraint generator knows how to deal with *) let (ctx1,s1) = dest_arity env t1 in let (ctx2,s2) = dest_arity env t2 in let s1,s2 = match s1, s2 with | Type _, Type _ -> (* shortcut here *) prop_sort, prop_sort | (Prop _, Type _) | (Type _,Prop _) -> error (NotConvertibleInductiveField name) | _ -> (s1, s2) in check_conv (NotConvertibleInductiveField name) cst conv_leq env (mkArity (ctx1,s1)) (mkArity (ctx2,s2)) in let check_packet cst p1 p2 = let check f why = if f p1 <> f p2 then error why in check (fun p -> p.mind_consnames) NotSameConstructorNamesField; check (fun p -> p.mind_typename) NotSameInductiveNameInBlockField; (* nf_lc later *) (* nf_arity later *) (* user_lc ignored *) (* user_arity ignored *) check (fun p -> p.mind_nrealargs) (NotConvertibleInductiveField p2.mind_typename); (* How can it fail since the type of inductive are checked below? [HH] *) (* kelim ignored *) (* listrec ignored *) (* finite done *) (* nparams done *) (* params_ctxt done because part of the inductive types *) (* Don't check the sort of the type if polymorphic *) let cst = check_inductive_type cst p2.mind_typename env (type_of_inductive env (mib1,p1)) (type_of_inductive env (mib2,p2)) in cst in let check_cons_types i cst p1 p2 = array_fold_left3 (fun cst id t1 t2 -> check_conv (NotConvertibleConstructorField id) cst conv env t1 t2) cst p2.mind_consnames (arities_of_specif kn1 (mib1,p1)) (arities_of_specif kn1 (mib2,p2)) in let check f why = if f mib1 <> f mib2 then error (why (f mib2)) in check (fun mib -> mib.mind_finite) (fun x -> FiniteInductiveFieldExpected x); check (fun mib -> mib.mind_ntypes) (fun x -> InductiveNumbersFieldExpected x); assert (mib1.mind_hyps=[] && mib2.mind_hyps=[]); assert (Array.length mib1.mind_packets >= 1 && Array.length mib2.mind_packets >= 1); (* Check that the expected numbers of uniform parameters are the same *) (* No need to check the contexts of parameters: it is checked *) (* at the time of checking the inductive arities in check_packet. *) (* Notice that we don't expect the local definitions to match: only *) (* the inductive types and constructors types have to be convertible *) check (fun mib -> mib.mind_nparams) (fun x -> InductiveParamsNumberField x); begin match mind_of_delta reso2 kn2 with | kn2' when kn2=kn2' -> () | kn2' -> if not (eq_mind (mind_of_delta reso1 kn1) (subst_ind subst2 kn2')) then error NotEqualInductiveAliases end; (* we check that records and their field names are preserved. *) check (fun mib -> mib.mind_record) (fun x -> RecordFieldExpected x); if mib1.mind_record then begin let rec names_prod_letin t = match kind_of_term t with | Prod(n,_,t) -> n::(names_prod_letin t) | LetIn(n,_,_,t) -> n::(names_prod_letin t) | Cast(t,_,_) -> names_prod_letin t | _ -> [] in assert (Array.length mib1.mind_packets = 1); assert (Array.length mib2.mind_packets = 1); assert (Array.length mib1.mind_packets.(0).mind_user_lc = 1); assert (Array.length mib2.mind_packets.(0).mind_user_lc = 1); check (fun mib -> let nparamdecls = List.length mib.mind_params_ctxt in let names = names_prod_letin (mib.mind_packets.(0).mind_user_lc.(0)) in snd (list_chop nparamdecls names)) (fun x -> RecordProjectionsExpected x); end; (* we first check simple things *) let cst = array_fold_left2 check_packet cst mib1.mind_packets mib2.mind_packets in (* and constructor types in the end *) let cst = array_fold_left2_i check_cons_types cst mib1.mind_packets mib2.mind_packets in cst let check_constant cst env mp1 l info1 cb2 spec2 subst1 subst2 = let error why = error_signature_mismatch l spec2 why in let check_conv cst f = check_conv_error error cst f in let check_type cst env t1 t2 = let err = NotConvertibleTypeField (env, t1, t2) in (* If the type of a constant is generated, it may mention non-variable algebraic universes that the general conversion algorithm is not ready to handle. Anyway, generated types of constants are functions of the body of the constant. If the bodies are the same in environments that are subtypes one of the other, the types are subtypes too (i.e. if Gamma <= Gamma', Gamma |- A |> T, Gamma |- A' |> T' and Gamma |- A=A' then T <= T'). Hence they don't have to be checked again *) let t1,t2 = if isArity t2 then let (ctx2,s2) = destArity t2 in match s2 with | Type v when not (is_univ_variable v) -> (* The type in the interface is inferred and is made of algebraic universes *) begin try let (ctx1,s1) = dest_arity env t1 in match s1 with | Type u when not (is_univ_variable u) -> (* Both types are inferred, no need to recheck them. We cheat and collapse the types to Prop *) mkArity (ctx1,prop_sort), mkArity (ctx2,prop_sort) | Prop _ -> (* The type in the interface is inferred, it may be the case that the type in the implementation is smaller because the body is more reduced. We safely collapse the upper type to Prop *) mkArity (ctx1,prop_sort), mkArity (ctx2,prop_sort) | Type _ -> (* The type in the interface is inferred and the type in the implementation is not inferred or is inferred but from a more reduced body so that it is just a variable. Since constraints of the form "univ <= max(...)" are not expressible in the system of algebraic universes: we fail (the user has to use an explicit type in the interface *) error NoTypeConstraintExpected with NotArity -> error err end | _ -> t1,t2 else (t1,t2) in check_conv err cst conv_leq env t1 t2 in match info1 with | Constant cb1 -> assert (cb1.const_hyps=[] && cb2.const_hyps=[]) ; let cb1 = subst_const_body subst1 cb1 in let cb2 = subst_const_body subst2 cb2 in (* Start by checking types*) let typ1 = Typeops.type_of_constant_type env cb1.const_type in let typ2 = Typeops.type_of_constant_type env cb2.const_type in let cst = check_type cst env typ1 typ2 in (* Now we check the bodies: - A transparent constant can only be implemented by a compatible transparent constant. - In the signature, an opaque is handled just as a parameter: anything of the right type can implement it, even if bodies differ. *) (match cb2.const_body with | Undef _ | OpaqueDef _ -> cst | Def lc2 -> (match cb1.const_body with | Undef _ | OpaqueDef _ -> error NotConvertibleBodyField | Def lc1 -> (* NB: cb1 might have been strengthened and appear as transparent. Anyway [check_conv] will handle that afterwards. *) let c1 = Declarations.force lc1 in let c2 = Declarations.force lc2 in check_conv NotConvertibleBodyField cst conv env c1 c2)) | IndType ((kn,i),mind1) -> ignore (Util.error ( "The kernel does not recognize yet that a parameter can be " ^ "instantiated by an inductive type. Hint: you can rename the " ^ "inductive type and give a definition to map the old name to the new " ^ "name.")); assert (mind1.mind_hyps=[] && cb2.const_hyps=[]) ; if constant_has_body cb2 then error DefinitionFieldExpected; let arity1 = type_of_inductive env (mind1,mind1.mind_packets.(i)) in let typ2 = Typeops.type_of_constant_type env cb2.const_type in let error = NotConvertibleTypeField (env, arity1, typ2) in check_conv error cst conv_leq env arity1 typ2 | IndConstr (((kn,i),j) as cstr,mind1) -> ignore (Util.error ( "The kernel does not recognize yet that a parameter can be " ^ "instantiated by a constructor. Hint: you can rename the " ^ "constructor and give a definition to map the old name to the new " ^ "name.")); assert (mind1.mind_hyps=[] && cb2.const_hyps=[]) ; if constant_has_body cb2 then error DefinitionFieldExpected; let ty1 = type_of_constructor cstr (mind1,mind1.mind_packets.(i)) in let ty2 = Typeops.type_of_constant_type env cb2.const_type in let error = NotConvertibleTypeField (env, ty1, ty2) in check_conv error cst conv env ty1 ty2 let rec check_modules cst env msb1 msb2 subst1 subst2 = let mty1 = module_type_of_module None msb1 in let mty2 = module_type_of_module None msb2 in let cst = check_modtypes cst env mty1 mty2 subst1 subst2 false in cst and check_signatures cst env mp1 sig1 mp2 sig2 subst1 subst2 reso1 reso2= let map1 = make_labmap mp1 sig1 in let check_one_body cst (l,spec2) = match spec2 with | SFBconst cb2 -> check_constant cst env mp1 l (get_obj mp1 map1 l) cb2 spec2 subst1 subst2 | SFBmind mib2 -> check_inductive cst env mp1 l (get_obj mp1 map1 l) mp2 mib2 spec2 subst1 subst2 reso1 reso2 | SFBmodule msb2 -> begin match get_mod mp1 map1 l with | Module msb -> check_modules cst env msb msb2 subst1 subst2 | _ -> error_signature_mismatch l spec2 ModuleFieldExpected end | SFBmodtype mtb2 -> let mtb1 = match get_mod mp1 map1 l with | Modtype mtb -> mtb | _ -> error_signature_mismatch l spec2 ModuleTypeFieldExpected in let env = add_module (module_body_of_type mtb2.typ_mp mtb2) (add_module (module_body_of_type mtb1.typ_mp mtb1) env) in check_modtypes cst env mtb1 mtb2 subst1 subst2 true in List.fold_left check_one_body cst sig2 and check_modtypes cst env mtb1 mtb2 subst1 subst2 equiv = if mtb1==mtb2 then cst else let mtb1',mtb2'=mtb1.typ_expr,mtb2.typ_expr in let rec check_structure cst env str1 str2 equiv subst1 subst2 = match str1,str2 with | SEBstruct (list1), SEBstruct (list2) -> if equiv then let subst2 = add_mp mtb2.typ_mp mtb1.typ_mp mtb1.typ_delta subst2 in Univ.union_constraints (check_signatures cst env mtb1.typ_mp list1 mtb2.typ_mp list2 subst1 subst2 mtb1.typ_delta mtb2.typ_delta) (check_signatures cst env mtb2.typ_mp list2 mtb1.typ_mp list1 subst2 subst1 mtb2.typ_delta mtb1.typ_delta) else check_signatures cst env mtb1.typ_mp list1 mtb2.typ_mp list2 subst1 subst2 mtb1.typ_delta mtb2.typ_delta | SEBfunctor (arg_id1,arg_t1,body_t1), SEBfunctor (arg_id2,arg_t2,body_t2) -> let subst1 = (join (map_mbid arg_id1 (MPbound arg_id2) arg_t2.typ_delta) subst1) in let cst = check_modtypes cst env arg_t2 arg_t1 subst2 subst1 equiv in (* contravariant *) let env = add_module (module_body_of_type (MPbound arg_id2) arg_t2) env in let env = match body_t1 with SEBstruct str -> add_module {mod_mp = mtb1.typ_mp; mod_expr = None; mod_type = subst_struct_expr subst1 body_t1; mod_type_alg= None; mod_constraints=mtb1.typ_constraints; mod_retroknowledge = []; mod_delta = mtb1.typ_delta} env | _ -> env in check_structure cst env body_t1 body_t2 equiv subst1 subst2 | _ , _ -> error_incompatible_modtypes mtb1 mtb2 in if mtb1'== mtb2' then cst else check_structure cst env mtb1' mtb2' equiv subst1 subst2 let check_subtypes env sup super = let env = add_module (module_body_of_type sup.typ_mp sup) env in check_modtypes empty_constraint env (strengthen sup sup.typ_mp) super empty_subst (map_mp super.typ_mp sup.typ_mp sup.typ_delta) false coq-8.4pl2/kernel/closure.mli0000640000175000001440000001441112010532755015267 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a (** {6 ... } *) (** Delta implies all consts (both global (= by [kernel_name]) and local (= by [Rel] or [Var])), all evars, and letin's. Rem: reduction of a Rel/Var bound to a term is Delta, but reduction of a LetIn expression is Letin reduction *) val all_opaque : transparent_state val all_transparent : transparent_state val is_transparent_variable : transparent_state -> variable -> bool val is_transparent_constant : transparent_state -> constant -> bool (** Sets of reduction kinds. *) module type RedFlagsSig = sig type reds type red_kind (** The different kinds of reduction *) val fBETA : red_kind val fDELTA : red_kind val fIOTA : red_kind val fZETA : red_kind val fCONST : constant -> red_kind val fVAR : identifier -> red_kind (** No reduction at all *) val no_red : reds (** Adds a reduction kind to a set *) val red_add : reds -> red_kind -> reds (** Removes a reduction kind to a set *) val red_sub : reds -> red_kind -> reds (** Adds a reduction kind to a set *) val red_add_transparent : reds -> transparent_state -> reds (** Build a reduction set from scratch = iter [red_add] on [no_red] *) val mkflags : red_kind list -> reds (** Tests if a reduction kind is set *) val red_set : reds -> red_kind -> bool end module RedFlags : RedFlagsSig open RedFlags val beta : reds val betaiota : reds val betadeltaiota : reds val betaiotazeta : reds val betadeltaiotanolet : reds val unfold_side_red : reds val unfold_red : evaluable_global_reference -> reds (***********************************************************************) type table_key = id_key type 'a infos val ref_value_cache: 'a infos -> table_key -> 'a option val info_flags: 'a infos -> reds val create: ('a infos -> constr -> 'a) -> reds -> env -> (existential -> constr option) -> 'a infos val evar_value : 'a infos -> existential -> constr option (*********************************************************************** s Lazy reduction. *) (** [fconstr] is the type of frozen constr *) type fconstr (** [fconstr] can be accessed by using the function [fterm_of] and by matching on type [fterm] *) type fterm = | FRel of int | FAtom of constr (** Metas and Sorts *) | FCast of fconstr * cast_kind * fconstr | FFlex of table_key | FInd of inductive | FConstruct of constructor | FApp of fconstr * fconstr array | FFix of fixpoint * fconstr subs | FCoFix of cofixpoint * fconstr subs | FCases of case_info * fconstr * fconstr * fconstr array | FLambda of int * (name * constr) list * constr * fconstr subs | FProd of name * fconstr * fconstr | FLetIn of name * fconstr * fconstr * constr * fconstr subs | FEvar of existential * fconstr subs | FLIFT of int * fconstr | FCLOS of constr * fconstr subs | FLOCKED (*********************************************************************** s A [stack] is a context of arguments, arguments are pushed by [append_stack] one array at a time but popped with [decomp_stack] one by one *) type stack_member = | Zapp of fconstr array | Zcase of case_info * fconstr * fconstr array | Zfix of fconstr * stack | Zshift of int | Zupdate of fconstr and stack = stack_member list val empty_stack : stack val append_stack : fconstr array -> stack -> stack val decomp_stack : stack -> (fconstr * stack) option val array_of_stack : stack -> fconstr array val stack_assign : stack -> int -> fconstr -> stack val stack_args_size : stack -> int val stack_tail : int -> stack -> stack val stack_nth : stack -> int -> fconstr val zip_term : (fconstr -> constr) -> constr -> stack -> constr val eta_expand_stack : stack -> stack (** To lazy reduce a constr, create a [clos_infos] with [create_clos_infos], inject the term to reduce with [inject]; then use a reduction function *) val inject : constr -> fconstr (** mk_atom: prevents a term from being evaluated *) val mk_atom : constr -> fconstr val fterm_of : fconstr -> fterm val term_of_fconstr : fconstr -> constr val destFLambda : (fconstr subs -> constr -> fconstr) -> fconstr -> name * fconstr * fconstr (** Global and local constant cache *) type clos_infos val create_clos_infos : ?evars:(existential->constr option) -> reds -> env -> clos_infos (** Reduction function *) (** [norm_val] is for strong normalization *) val norm_val : clos_infos -> fconstr -> constr (** [whd_val] is for weak head normalization *) val whd_val : clos_infos -> fconstr -> constr (** [whd_stack] performs weak head normalization in a given stack. It stops whenever a reduction is blocked. *) val whd_stack : clos_infos -> fconstr -> stack -> fconstr * stack (** Conversion auxiliary functions to do step by step normalisation *) (** [unfold_reference] unfolds references in a [fconstr] *) val unfold_reference : clos_infos -> table_key -> fconstr option val eq_table_key : table_key -> table_key -> bool (*********************************************************************** i This is for lazy debug *) val lift_fconstr : int -> fconstr -> fconstr val lift_fconstr_vect : int -> fconstr array -> fconstr array val mk_clos : fconstr subs -> constr -> fconstr val mk_clos_vect : fconstr subs -> constr array -> fconstr array val mk_clos_deep : (fconstr subs -> constr -> fconstr) -> fconstr subs -> constr -> fconstr val kni: clos_infos -> fconstr -> stack -> fconstr * stack val knr: clos_infos -> fconstr -> stack -> fconstr * stack val kl : clos_infos -> fconstr -> constr val to_constr : (lift -> fconstr -> constr) -> lift -> fconstr -> constr val optimise_closure : fconstr subs -> constr -> fconstr subs * constr (** End of cbn debug section i*) coq-8.4pl2/kernel/type_errors.mli0000640000175000001440000000704612010532755016176 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int -> 'a val error_unbound_var : env -> variable -> 'a val error_not_type : env -> unsafe_judgment -> 'a val error_assumption : env -> unsafe_judgment -> 'a val error_reference_variables : env -> constr -> 'a val error_elim_arity : env -> inductive -> sorts_family list -> constr -> unsafe_judgment -> (sorts_family * sorts_family * arity_error) option -> 'a val error_case_not_inductive : env -> unsafe_judgment -> 'a val error_number_branches : env -> unsafe_judgment -> int -> 'a val error_ill_formed_branch : env -> constr -> constructor -> constr -> constr -> 'a val error_generalization : env -> name * types -> unsafe_judgment -> 'a val error_actual_type : env -> unsafe_judgment -> types -> 'a val error_cant_apply_not_functional : env -> unsafe_judgment -> unsafe_judgment array -> 'a val error_cant_apply_bad_type : env -> int * constr * constr -> unsafe_judgment -> unsafe_judgment array -> 'a val error_ill_formed_rec_body : env -> guard_error -> name array -> int -> env -> unsafe_judgment array -> 'a val error_ill_typed_rec_body : env -> int -> name array -> unsafe_judgment array -> types array -> 'a val error_elim_explain : sorts_family -> sorts_family -> arity_error coq-8.4pl2/kernel/cemitcodes.mli0000640000175000001440000000176511615523023015740 0ustar notinusersopen Names open Cbytecodes type reloc_info = | Reloc_annot of annot_switch | Reloc_const of structured_constant | Reloc_getglobal of constant type patch = reloc_info * int (* A virer *) val subst_patch : Mod_subst.substitution -> patch -> patch type emitcodes val copy : emitcodes -> emitcodes val length : emitcodes -> int val patch_int : emitcodes -> (*pos*)int -> int -> unit type to_patch = emitcodes * (patch list) * fv val subst_to_patch : Mod_subst.substitution -> to_patch -> to_patch type body_code = | BCdefined of to_patch | BCallias of constant | BCconstant type to_patch_substituted val from_val : body_code -> to_patch_substituted val force : to_patch_substituted -> body_code val subst_to_patch_subst : Mod_subst.substitution -> to_patch_substituted -> to_patch_substituted val repr_body_code : to_patch_substituted -> Mod_subst.substitution list option * body_code val to_memory : bytecodes * bytecodes * fv -> to_patch (** init code, fun code, fv *) coq-8.4pl2/kernel/retroknowledge.ml0000640000175000001440000002045212010532755016477 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* continuation -> result *) (bool->Cbytecodes.comp_env->constr array -> int->Cbytecodes.bytecodes->Cbytecodes.bytecodes) option; vm_constant_static : (*fastcomputation flag -> constructor -> args -> result*) (bool->constr array->Cbytecodes.structured_constant) option; vm_constant_dynamic : (*fastcomputation flag -> constructor -> reloc -> args -> sz -> cont -> result *) (bool->Cbytecodes.comp_env->Cbytecodes.block array->int-> Cbytecodes.bytecodes->Cbytecodes.bytecodes) option; (* fastcomputation flag -> cont -> result *) vm_before_match : (bool -> Cbytecodes.bytecodes -> Cbytecodes.bytecodes) option; (* tag (= compiled int for instance) -> result *) vm_decompile_const : (int -> Term.constr) option} and reactive = reactive_end Reactive.t and retroknowledge = {flags : flags; proactive : proactive; reactive : reactive} (* This type represent an atomic action of the retroknowledge. It is stored in the compiled libraries *) (* As per now, there is only the possibility of registering things the possibility of unregistering or changing the flag is under study *) type action = | RKRegister of field*entry (*initialisation*) let initial_flags = {fastcomputation = true;} let initial_proactive = (Proactive.empty:proactive) let initial_reactive = (Reactive.empty:reactive) let initial_retroknowledge = {flags = initial_flags; proactive = initial_proactive; reactive = initial_reactive } let empty_reactive_end = { vm_compiling = None ; vm_constant_static = None; vm_constant_dynamic = None; vm_before_match = None; vm_decompile_const = None } (* acces functions for proactive retroknowledge *) let add_field knowledge field value = {knowledge with proactive = Proactive.add field value knowledge.proactive} let mem knowledge field = Proactive.mem field knowledge.proactive let remove knowledge field = {knowledge with proactive = Proactive.remove field knowledge.proactive} let find knowledge field = Proactive.find field knowledge.proactive (*access functions for reactive retroknowledge*) (* used for compiling of functions (add, mult, etc..) *) let get_vm_compiling_info knowledge key = match (Reactive.find key knowledge.reactive).vm_compiling with | None -> raise Not_found | Some f -> f knowledge.flags.fastcomputation (* used for compilation of fully applied constructors *) let get_vm_constant_static_info knowledge key = match (Reactive.find key knowledge.reactive).vm_constant_static with | None -> raise Not_found | Some f -> f knowledge.flags.fastcomputation (* used for compilation of partially applied constructors *) let get_vm_constant_dynamic_info knowledge key = match (Reactive.find key knowledge.reactive).vm_constant_dynamic with | None -> raise Not_found | Some f -> f knowledge.flags.fastcomputation let get_vm_before_match_info knowledge key = match (Reactive.find key knowledge.reactive).vm_before_match with | None -> raise Not_found | Some f -> f knowledge.flags.fastcomputation let get_vm_decompile_constant_info knowledge key = match (Reactive.find key knowledge.reactive).vm_decompile_const with | None -> raise Not_found | Some f -> f (* functions manipulating reactive knowledge *) let add_vm_compiling_info knowledge value nfo = {knowledge with reactive = try Reactive.add value {(Reactive.find value (knowledge.reactive)) with vm_compiling = Some nfo} knowledge.reactive with Not_found -> Reactive.add value {empty_reactive_end with vm_compiling = Some nfo} knowledge.reactive } let add_vm_constant_static_info knowledge value nfo = {knowledge with reactive = try Reactive.add value {(Reactive.find value (knowledge.reactive)) with vm_constant_static = Some nfo} knowledge.reactive with Not_found -> Reactive.add value {empty_reactive_end with vm_constant_static = Some nfo} knowledge.reactive } let add_vm_constant_dynamic_info knowledge value nfo = {knowledge with reactive = try Reactive.add value {(Reactive.find value (knowledge.reactive)) with vm_constant_dynamic = Some nfo} knowledge.reactive with Not_found -> Reactive.add value {empty_reactive_end with vm_constant_dynamic = Some nfo} knowledge.reactive } let add_vm_before_match_info knowledge value nfo = {knowledge with reactive = try Reactive.add value {(Reactive.find value (knowledge.reactive)) with vm_before_match = Some nfo} knowledge.reactive with Not_found -> Reactive.add value {empty_reactive_end with vm_before_match = Some nfo} knowledge.reactive } let add_vm_decompile_constant_info knowledge value nfo = {knowledge with reactive = try Reactive.add value {(Reactive.find value (knowledge.reactive)) with vm_decompile_const = Some nfo} knowledge.reactive with Not_found -> Reactive.add value {empty_reactive_end with vm_decompile_const = Some nfo} knowledge.reactive } let clear_info knowledge value = {knowledge with reactive = Reactive.remove value knowledge.reactive} coq-8.4pl2/kernel/cbytegen.ml0000640000175000001440000010011412010532755015236 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* *) (* type = [Ct1 | .... | Ctn] *) (* Ci is the code pointer of the i-th body *) (* At runtime, a fixpoint environment (which is the same as the fixpoint *) (* itself) is a pointer to the field holding its code pointer. *) (* In each fixpoint body, de Bruijn [nbr] represents the first fixpoint *) (* and de Bruijn [1] the last one. *) (* Access to these variables is performed by the [Koffsetclosure n] *) (* instruction that shifts the environment pointer of [n] fields. *) (* This allows to represent mutual fixpoints in just one block. *) (* [Ct1 | ... | Ctn] is an array holding code pointers of the fixpoint *) (* types. They are used in conversion tests (which requires that *) (* fixpoint types must be convertible). Their environment is the one of *) (* the last fixpoint : *) (* [t1|C1| ... |tc|Cc| ... |t(nbr)|C(nbr)| fv1 | fv2 | .... | fvn | type] *) (* ^ *) (* Representation of mutual cofix : *) (* a1 = [A_t | accumulate | [Cfx_t | fcofix1 ] ] *) (* ... *) (* anbr = [A_t | accumulate | [Cfx_t | fcofixnbr ] ] *) (* *) (* fcofix1 = [clos_t | code1 | a1 |...| anbr | fv1 |...| fvn | type] *) (* ^ *) (* ... *) (* fcofixnbr = [clos_t | codenbr | a1 |...| anbr | fv1 |...| fvn | type] *) (* ^ *) (* The [ai] blocks are functions that accumulate their arguments: *) (* ai arg1 argp ---> *) (* ai' = [A_t | accumulate | [Cfx_t | fcofixi] | arg1 | ... | argp ] *) (* If such a block is matched against, we have to force evaluation, *) (* function [fcofixi] is then applied to [ai'] [arg1] ... [argp] *) (* Once evaluation is completed [ai'] is updated with the result: *) (* ai' <-- *) (* [A_t | accumulate | [Cfxe_t |fcofixi|result] | arg1 | ... | argp ] *) (* This representation is nice because the application of the cofix is *) (* evaluated only once (it simulates a lazy evaluation) *) (* Moreover, when cofix don't have arguments, it is possible to create *) (* a cycle, e.g.: *) (* cofix one := cons 1 one *) (* a1 = [A_t | accumulate | [Cfx_t|fcofix1] ] *) (* fcofix1 = [clos_t | code | a1] *) (* The result of evaluating [a1] is [cons_t | 1 | a1]. *) (* When [a1] is updated : *) (* a1 = [A_t | accumulate | [Cfxe_t | fcofix1 | [cons_t | 1 | a1]] ] *) (* The cycle is created ... *) (* *) (* In Cfxe_t accumulators, we need to store [fcofixi] for testing *) (* conversion of cofixpoints (which is intentional). *) let empty_fv = { size= 0; fv_rev = [] } let fv r = !(r.in_env) let empty_comp_env ()= { nb_stack = 0; in_stack = []; nb_rec = 0; pos_rec = []; offset = 0; in_env = ref empty_fv; } (*i Creation functions for comp_env *) let rec add_param n sz l = if n = 0 then l else add_param (n - 1) sz (n+sz::l) let comp_env_fun arity = { nb_stack = arity; in_stack = add_param arity 0 []; nb_rec = 0; pos_rec = []; offset = 1; in_env = ref empty_fv } let comp_env_fix_type rfv = { nb_stack = 0; in_stack = []; nb_rec = 0; pos_rec = []; offset = 1; in_env = rfv } let comp_env_fix ndef curr_pos arity rfv = let prec = ref [] in for i = ndef downto 1 do prec := Koffsetclosure (2 * (ndef - curr_pos - i)) :: !prec done; { nb_stack = arity; in_stack = add_param arity 0 []; nb_rec = ndef; pos_rec = !prec; offset = 2 * (ndef - curr_pos - 1)+1; in_env = rfv } let comp_env_cofix_type ndef rfv = { nb_stack = 0; in_stack = []; nb_rec = 0; pos_rec = []; offset = 1+ndef; in_env = rfv } let comp_env_cofix ndef arity rfv = let prec = ref [] in for i = 1 to ndef do prec := Kenvacc i :: !prec done; { nb_stack = arity; in_stack = add_param arity 0 []; nb_rec = ndef; pos_rec = !prec; offset = ndef+1; in_env = rfv } (* [push_param ] add function parameters on the stack *) let push_param n sz r = { r with nb_stack = r.nb_stack + n; in_stack = add_param n sz r.in_stack } (* [push_local sz r] add a new variable on the stack at position [sz] *) let push_local sz r = { r with nb_stack = r.nb_stack + 1; in_stack = (sz + 1) :: r.in_stack } (*i Compilation of variables *) let find_at el l = let rec aux n = function | [] -> raise Not_found | hd :: tl -> if hd = el then n else aux (n+1) tl in aux 1 l let pos_named id r = let env = !(r.in_env) in let cid = FVnamed id in try Kenvacc(r.offset + env.size - (find_at cid env.fv_rev)) with Not_found -> let pos = env.size in r.in_env := { size = pos+1; fv_rev = cid:: env.fv_rev}; Kenvacc (r.offset + pos) let pos_rel i r sz = if i <= r.nb_stack then Kacc(sz - (List.nth r.in_stack (i-1))) else let i = i - r.nb_stack in if i <= r.nb_rec then try List.nth r.pos_rec (i-1) with (Failure _|Invalid_argument _) -> assert false else let i = i - r.nb_rec in let db = FVrel(i) in let env = !(r.in_env) in try Kenvacc(r.offset + env.size - (find_at db env.fv_rev)) with Not_found -> let pos = env.size in r.in_env := { size = pos+1; fv_rev = db:: env.fv_rev}; Kenvacc(r.offset + pos) (*i Examination of the continuation *) (* Discard all instructions up to the next label. *) (* This function is to be applied to the continuation before adding a *) (* non-terminating instruction (branch, raise, return, appterm) *) (* in front of it. *) let rec discard_dead_code cont = cont (*function [] -> [] | (Klabel _ | Krestart ) :: _ as cont -> cont | _ :: cont -> discard_dead_code cont *) (* Return a label to the beginning of the given continuation. *) (* If the sequence starts with a branch, use the target of that branch *) (* as the label, thus avoiding a jump to a jump. *) let label_code = function | Klabel lbl :: _ as cont -> (lbl, cont) | Kbranch lbl :: _ as cont -> (lbl, cont) | cont -> let lbl = Label.create() in (lbl, Klabel lbl :: cont) (* Return a branch to the continuation. That is, an instruction that, when executed, branches to the continuation or performs what the continuation performs. We avoid generating branches to returns. *) (* spiwack: make_branch was only used once. Changed it back to the ZAM one to match the appropriate semantics (old one avoided the introduction of an unconditional branch operation, which seemed appropriate for the 31-bit integers' code). As a memory, I leave the former version in this comment. let make_branch cont = match cont with | (Kreturn _ as return) :: cont' -> return, cont' | Klabel lbl as b :: _ -> b, cont | _ -> let b = Klabel(Label.create()) in b,b::cont *) let rec make_branch_2 lbl n cont = function Kreturn m :: _ -> (Kreturn (n + m), cont) | Klabel _ :: c -> make_branch_2 lbl n cont c | Kpop m :: c -> make_branch_2 lbl (n + m) cont c | _ -> match lbl with Some lbl -> (Kbranch lbl, cont) | None -> let lbl = Label.create() in (Kbranch lbl, Klabel lbl :: cont) let make_branch cont = match cont with (Kbranch _ as branch) :: _ -> (branch, cont) | (Kreturn _ as return) :: _ -> (return, cont) | Klabel lbl :: _ -> make_branch_2 (Some lbl) 0 cont cont | _ -> make_branch_2 (None) 0 cont cont (* Check if we're in tailcall position *) let rec is_tailcall = function | Kreturn k :: _ -> Some k | Klabel _ :: c -> is_tailcall c | _ -> None (* Extention of the continuation *) (* Add a Kpop n instruction in front of a continuation *) let rec add_pop n = function | Kpop m :: cont -> add_pop (n+m) cont | Kreturn m:: cont -> Kreturn (n+m) ::cont | cont -> if n = 0 then cont else Kpop n :: cont let add_grab arity lbl cont = if arity = 1 then Klabel lbl :: cont else Krestart :: Klabel lbl :: Kgrab (arity - 1) :: cont let add_grabrec rec_arg arity lbl cont = if arity = 1 then Klabel lbl :: Kgrabrec 0 :: Krestart :: cont else Krestart :: Klabel lbl :: Kgrabrec rec_arg :: Krestart :: Kgrab (arity - 1) :: cont (* continuation of a cofix *) let cont_cofix arity = (* accu = res *) (* stk = ai::args::ra::... *) (* ai = [At|accumulate|[Cfx_t|fcofix]|args] *) [ Kpush; Kpush; (* stk = res::res::ai::args::ra::... *) Kacc 2; Kfield 1; Kfield 0; Kmakeblock(2, cofix_evaluated_tag); Kpush; (* stk = [Cfxe_t|fcofix|res]::res::ai::args::ra::...*) Kacc 2; Ksetfield 1; (* ai = [At|accumulate|[Cfxe_t|fcofix|res]|args] *) (* stk = res::ai::args::ra::... *) Kacc 0; (* accu = res *) Kreturn (arity+2) ] (*i Global environment *) let global_env = ref empty_env let set_global_env env = global_env := env (* Code of closures *) let fun_code = ref [] let init_fun_code () = fun_code := [] (* Compilation of constructors and inductive types *) (* Inv : nparam + arity > 0 *) let code_construct tag nparams arity cont = let f_cont = add_pop nparams (if arity = 0 then [Kconst (Const_b0 tag); Kreturn 0] else [Kacc 0; Kpop 1; Kmakeblock(arity, tag); Kreturn 0]) in let lbl = Label.create() in fun_code := [Ksequence (add_grab (nparams+arity) lbl f_cont,!fun_code)]; Kclosure(lbl,0) :: cont let get_strcst = function | Bstrconst sc -> sc | _ -> raise Not_found let rec str_const c = match kind_of_term c with | Sort s -> Bstrconst (Const_sorts s) | Cast(c,_,_) -> str_const c | App(f,args) -> begin match kind_of_term f with | Construct((kn,j),i) -> begin let oib = lookup_mind kn !global_env in let oip = oib.mind_packets.(j) in let num,arity = oip.mind_reloc_tbl.(i-1) in let nparams = oib.mind_nparams in if nparams + arity = Array.length args then (* spiwack: *) (* 1/ tries to compile the constructor in an optimal way, it is supposed to work only if the arguments are all fully constructed, fails with Cbytecodes.NotClosed. it can also raise Not_found when there is no special treatment for this constructor for instance: tries to to compile an integer of the form I31 D1 D2 ... D31 to [D1D2...D31] as a processor number (a caml number actually) *) try try Bstrconst (Retroknowledge.get_vm_constant_static_info (!global_env).retroknowledge (kind_of_term f) args) with NotClosed -> (* 2/ if the arguments are not all closed (this is expectingly (and it is currently the case) the only reason why this exception is raised) tries to give a clever, run-time behavior to the constructor. Raises Not_found if there is no special treatment for this integer. this is done in a lazy fashion, using the constructor Bspecial because it needs to know the continuation and such, which can't be done at this time. for instance, for int31: if one of the digit is not closed, it's not impossible that the number gets fully instanciated at run-time, thus to ensure uniqueness of the representation in the vm it is necessary to try and build a caml integer during the execution *) let rargs = Array.sub args nparams arity in let b_args = Array.map str_const rargs in Bspecial ((Retroknowledge.get_vm_constant_dynamic_info (!global_env).retroknowledge (kind_of_term f)), b_args) with Not_found -> (* 3/ if no special behavior is available, then the compiler falls back to the normal behavior *) if arity = 0 then Bstrconst(Const_b0 num) else let rargs = Array.sub args nparams arity in let b_args = Array.map str_const rargs in try let sc_args = Array.map get_strcst b_args in Bstrconst(Const_bn(num, sc_args)) with Not_found -> Bmakeblock(num,b_args) else let b_args = Array.map str_const args in (* spiwack: tries first to apply the run-time compilation behavior of the constructor, as in 2/ above *) try Bspecial ((Retroknowledge.get_vm_constant_dynamic_info (!global_env).retroknowledge (kind_of_term f)), b_args) with Not_found -> Bconstruct_app(num, nparams, arity, b_args) end | _ -> Bconstr c end | Ind ind -> Bstrconst (Const_ind ind) | Construct ((kn,j),i) -> begin (* spiwack: tries first to apply the run-time compilation behavior of the constructor, as in 2/ above *) try Bspecial ((Retroknowledge.get_vm_constant_dynamic_info (!global_env).retroknowledge (kind_of_term c)), [| |]) with Not_found -> let oib = lookup_mind kn !global_env in let oip = oib.mind_packets.(j) in let num,arity = oip.mind_reloc_tbl.(i-1) in let nparams = oib.mind_nparams in if nparams + arity = 0 then Bstrconst(Const_b0 num) else Bconstruct_app(num,nparams,arity,[||]) end | _ -> Bconstr c (* compiling application *) let comp_args comp_expr reloc args sz cont = let nargs_m_1 = Array.length args - 1 in let c = ref (comp_expr reloc args.(0) (sz + nargs_m_1) cont) in for i = 1 to nargs_m_1 do c := comp_expr reloc args.(i) (sz + nargs_m_1 - i) (Kpush :: !c) done; !c let comp_app comp_fun comp_arg reloc f args sz cont = let nargs = Array.length args in match is_tailcall cont with | Some k -> comp_args comp_arg reloc args sz (Kpush :: comp_fun reloc f (sz + nargs) (Kappterm(nargs, k + nargs) :: (discard_dead_code cont))) | None -> if nargs < 4 then comp_args comp_arg reloc args sz (Kpush :: (comp_fun reloc f (sz+nargs) (Kapply nargs :: cont))) else let lbl,cont1 = label_code cont in Kpush_retaddr lbl :: (comp_args comp_arg reloc args (sz + 3) (Kpush :: (comp_fun reloc f (sz+3+nargs) (Kapply nargs :: cont1)))) (* Compiling free variables *) let compile_fv_elem reloc fv sz cont = match fv with | FVrel i -> pos_rel i reloc sz :: cont | FVnamed id -> pos_named id reloc :: cont let rec compile_fv reloc l sz cont = match l with | [] -> cont | [fvn] -> compile_fv_elem reloc fvn sz cont | fvn :: tl -> compile_fv_elem reloc fvn sz (Kpush :: compile_fv reloc tl (sz + 1) cont) (* Compiling constants *) let rec get_allias env kn = let tps = (lookup_constant kn env).const_body_code in match Cemitcodes.force tps with | BCallias kn' -> get_allias env kn' | _ -> kn (* Compiling expressions *) let rec compile_constr reloc c sz cont = match kind_of_term c with | Meta _ -> raise (Invalid_argument "Cbytegen.compile_constr : Meta") | Evar _ -> raise (Invalid_argument "Cbytegen.compile_constr : Evar") | Cast(c,_,_) -> compile_constr reloc c sz cont | Rel i -> pos_rel i reloc sz :: cont | Var id -> pos_named id reloc :: cont | Const kn -> compile_const reloc kn [||] sz cont | Sort _ | Ind _ | Construct _ -> compile_str_cst reloc (str_const c) sz cont | LetIn(_,xb,_,body) -> compile_constr reloc xb sz (Kpush :: (compile_constr (push_local sz reloc) body (sz+1) (add_pop 1 cont))) | Prod(id,dom,codom) -> let cont1 = Kpush :: compile_constr reloc dom (sz+1) (Kmakeprod :: cont) in compile_constr reloc (mkLambda(id,dom,codom)) sz cont1 | Lambda _ -> let params, body = decompose_lam c in let arity = List.length params in let r_fun = comp_env_fun arity in let lbl_fun = Label.create() in let cont_fun = compile_constr r_fun body arity [Kreturn arity] in fun_code := [Ksequence(add_grab arity lbl_fun cont_fun,!fun_code)]; let fv = fv r_fun in compile_fv reloc fv.fv_rev sz (Kclosure(lbl_fun,fv.size) :: cont) | App(f,args) -> begin match kind_of_term f with | Construct _ -> compile_str_cst reloc (str_const c) sz cont | Const kn -> compile_const reloc kn args sz cont | _ -> comp_app compile_constr compile_constr reloc f args sz cont end | Fix ((rec_args,init),(_,type_bodies,rec_bodies)) -> let ndef = Array.length type_bodies in let rfv = ref empty_fv in let lbl_types = Array.create ndef Label.no in let lbl_bodies = Array.create ndef Label.no in (* Compilation des types *) let env_type = comp_env_fix_type rfv in for i = 0 to ndef - 1 do let lbl,fcode = label_code (compile_constr env_type type_bodies.(i) 0 [Kstop]) in lbl_types.(i) <- lbl; fun_code := [Ksequence(fcode,!fun_code)] done; (* Compiling bodies *) for i = 0 to ndef - 1 do let params,body = decompose_lam rec_bodies.(i) in let arity = List.length params in let env_body = comp_env_fix ndef i arity rfv in let cont1 = compile_constr env_body body arity [Kreturn arity] in let lbl = Label.create () in lbl_bodies.(i) <- lbl; let fcode = add_grabrec rec_args.(i) arity lbl cont1 in fun_code := [Ksequence(fcode,!fun_code)] done; let fv = !rfv in compile_fv reloc fv.fv_rev sz (Kclosurerec(fv.size,init,lbl_types,lbl_bodies) :: cont) | CoFix(init,(_,type_bodies,rec_bodies)) -> let ndef = Array.length type_bodies in let lbl_types = Array.create ndef Label.no in let lbl_bodies = Array.create ndef Label.no in (* Compiling types *) let rfv = ref empty_fv in let env_type = comp_env_cofix_type ndef rfv in for i = 0 to ndef - 1 do let lbl,fcode = label_code (compile_constr env_type type_bodies.(i) 0 [Kstop]) in lbl_types.(i) <- lbl; fun_code := [Ksequence(fcode,!fun_code)] done; (* Compiling bodies *) for i = 0 to ndef - 1 do let params,body = decompose_lam rec_bodies.(i) in let arity = List.length params in let env_body = comp_env_cofix ndef arity rfv in let lbl = Label.create () in let cont1 = compile_constr env_body body (arity+1) (cont_cofix arity) in let cont2 = add_grab (arity+1) lbl cont1 in lbl_bodies.(i) <- lbl; fun_code := [Ksequence(cont2,!fun_code)]; done; let fv = !rfv in compile_fv reloc fv.fv_rev sz (Kclosurecofix(fv.size, init, lbl_types, lbl_bodies) :: cont) | Case(ci,t,a,branchs) -> let ind = ci.ci_ind in let mib = lookup_mind (fst ind) !global_env in let oib = mib.mind_packets.(snd ind) in let tbl = oib.mind_reloc_tbl in let lbl_consts = Array.create oib.mind_nb_constant Label.no in let lbl_blocks = Array.create (oib.mind_nb_args+1) Label.no in let branch1,cont = make_branch cont in (* Compiling return type *) let lbl_typ,fcode = label_code (compile_constr reloc t sz [Kpop sz; Kstop]) in fun_code := [Ksequence(fcode,!fun_code)]; (* Compiling branches *) let lbl_sw = Label.create () in let sz_b,branch,is_tailcall = match branch1 with | Kreturn k -> assert (k = sz); sz, branch1, true | _ -> sz+3, Kjump, false in let annot = {ci = ci; rtbl = tbl; tailcall = is_tailcall} in (* Compiling branch for accumulators *) let lbl_accu, code_accu = label_code(Kmakeswitchblock(lbl_typ,lbl_sw,annot,sz) :: branch::cont) in lbl_blocks.(0) <- lbl_accu; let c = ref code_accu in (* Compiling regular constructor branches *) for i = 0 to Array.length tbl - 1 do let tag, arity = tbl.(i) in if arity = 0 then let lbl_b,code_b = label_code(compile_constr reloc branchs.(i) sz_b (branch :: !c)) in lbl_consts.(tag) <- lbl_b; c := code_b else let args, body = decompose_lam branchs.(i) in let nargs = List.length args in let lbl_b,code_b = label_code( if nargs = arity then Kpushfields arity :: compile_constr (push_param arity sz_b reloc) body (sz_b+arity) (add_pop arity (branch :: !c)) else let sz_appterm = if is_tailcall then sz_b + arity else arity in Kpushfields arity :: compile_constr reloc branchs.(i) (sz_b+arity) (Kappterm(arity,sz_appterm) :: !c)) in lbl_blocks.(tag) <- lbl_b; c := code_b done; c := Klabel lbl_sw :: Kswitch(lbl_consts,lbl_blocks) :: !c; let code_sw = match branch1 with (* spiwack : branch1 can't be a lbl anymore it's a Branch instead | Klabel lbl -> Kpush_retaddr lbl :: !c *) | Kbranch lbl -> Kpush_retaddr lbl :: !c | _ -> !c in compile_constr reloc a sz (try let entry = Term.Ind ind in Retroknowledge.get_vm_before_match_info (!global_env).retroknowledge entry code_sw with Not_found -> code_sw) and compile_str_cst reloc sc sz cont = match sc with | Bconstr c -> compile_constr reloc c sz cont | Bstrconst sc -> Kconst sc :: cont | Bmakeblock(tag,args) -> let nargs = Array.length args in comp_args compile_str_cst reloc args sz (Kmakeblock(nargs,tag) :: cont) | Bconstruct_app(tag,nparams,arity,args) -> if Array.length args = 0 then code_construct tag nparams arity cont else comp_app (fun _ _ _ cont -> code_construct tag nparams arity cont) compile_str_cst reloc () args sz cont | Bspecial (comp_fx, args) -> comp_fx reloc args sz cont (* spiwack : compilation of constants with their arguments. Makes a special treatment with 31-bit integer addition *) and compile_const = fun reloc-> fun kn -> fun args -> fun sz -> fun cont -> let nargs = Array.length args in (* spiwack: checks if there is a specific way to compile the constant if there is not, Not_found is raised, and the function falls back on its normal behavior *) try Retroknowledge.get_vm_compiling_info (!global_env).retroknowledge (kind_of_term (mkConst kn)) reloc args sz cont with Not_found -> if nargs = 0 then Kgetglobal (get_allias !global_env kn) :: cont else comp_app (fun _ _ _ cont -> Kgetglobal (get_allias !global_env kn) :: cont) compile_constr reloc () args sz cont let compile env c = set_global_env env; init_fun_code (); Label.reset_label_counter (); let reloc = empty_comp_env () in let init_code = compile_constr reloc c 0 [Kstop] in let fv = List.rev (!(reloc.in_env).fv_rev) in (* draw_instr init_code; draw_instr !fun_code; Format.print_string "fv = "; List.iter (fun v -> match v with | FVnamed id -> Format.print_string ((string_of_id id)^"; ") | FVrel i -> Format.print_string ((string_of_int i)^"; ")) fv; Format .print_string "\n"; Format.print_flush(); *) init_code,!fun_code, Array.of_list fv let compile_constant_body env = function | Undef _ | OpaqueDef _ -> BCconstant | Def sb -> let body = Declarations.force sb in match kind_of_term body with | Const kn' -> (* we use the canonical name of the constant*) let con= constant_of_kn (canonical_con kn') in BCallias (get_allias env con) | _ -> let res = compile env body in let to_patch = to_memory res in BCdefined to_patch (* Shortcut of the previous function used during module strengthening *) let compile_alias kn = BCallias (constant_of_kn (canonical_con kn)) (* spiwack: additional function which allow different part of compilation of the 31-bit integers *) let make_areconst n else_lbl cont = if n <=0 then cont else Kareconst (n, else_lbl)::cont (* try to compile int31 as a const_b0. Succeed if all the arguments are closed fails otherwise by raising NotClosed*) let compile_structured_int31 fc args = if not fc then raise Not_found else Const_b0 (Array.fold_left (fun temp_i -> fun t -> match kind_of_term t with | Construct (_,d) -> 2*temp_i+d-1 | _ -> raise NotClosed) 0 args ) (* this function is used for the compilation of the constructor of the int31, it is used when it appears not fully applied, or applied to at least one non-closed digit *) let dynamic_int31_compilation fc reloc args sz cont = if not fc then raise Not_found else let nargs = Array.length args in if nargs = 31 then let (escape,labeled_cont) = make_branch cont in let else_lbl = Label.create() in comp_args compile_str_cst reloc args sz ( Kisconst else_lbl::Kareconst(30,else_lbl)::Kcompint31::escape::Klabel else_lbl::Kmakeblock(31, 1)::labeled_cont) else let code_construct cont = (* spiwack: variant of the global code_construct which handles dynamic compilation of integers *) let f_cont = let else_lbl = Label.create () in [Kacc 0; Kpop 1; Kisconst else_lbl; Kareconst(30,else_lbl); Kcompint31; Kreturn 0; Klabel else_lbl; Kmakeblock(31, 1); Kreturn 0] in let lbl = Label.create() in fun_code := [Ksequence (add_grab 31 lbl f_cont,!fun_code)]; Kclosure(lbl,0) :: cont in if nargs = 0 then code_construct cont else comp_app (fun _ _ _ cont -> code_construct cont) compile_str_cst reloc () args sz cont (*(* template compilation for 2ary operation, it probably possible to make a generic such function with arity abstracted *) let op2_compilation op = let code_construct normal cont = (*kn cont =*) let f_cont = let else_lbl = Label.create () in Kareconst(2, else_lbl):: Kacc 0:: Kpop 1:: op:: Kreturn 0:: Klabel else_lbl:: (* works as comp_app with nargs = 2 and tailcall cont [Kreturn 0]*) (*Kgetglobal (get_allias !global_env kn):: *) normal:: Kappterm(2, 2):: [] (* = discard_dead_code [Kreturn 0] *) in let lbl = Label.create () in fun_code := [Ksequence (add_grab 2 lbl f_cont, !fun_code)]; Kclosure(lbl, 0)::cont in fun normal fc _ reloc args sz cont -> if not fc then raise Not_found else let nargs = Array.length args in if nargs=2 then (*if it is a fully applied addition*) let (escape, labeled_cont) = make_branch cont in let else_lbl = Label.create () in comp_args compile_constr reloc args sz (Kisconst else_lbl::(make_areconst 1 else_lbl (*Kaddint31::escape::Klabel else_lbl::Kpush::*) (op::escape::Klabel else_lbl::Kpush:: (* works as comp_app with nargs = 2 and non-tailcall cont*) (*Kgetglobal (get_allias !global_env kn):: *) normal:: Kapply 2::labeled_cont))) else if nargs=0 then code_construct normal cont else comp_app (fun _ _ _ cont -> code_construct normal cont) compile_constr reloc () args sz cont *) (*template for n-ary operation, invariant: n>=1, the operations does the following : 1/ checks if all the arguments are constants (i.e. non-block values) 2/ if they are, uses the "op" instruction to execute 3/ if at least one is not, branches to the normal behavior: Kgetglobal (get_allias !global_env kn) *) let op_compilation n op = let code_construct kn cont = let f_cont = let else_lbl = Label.create () in Kareconst(n, else_lbl):: Kacc 0:: Kpop 1:: op:: Kreturn 0:: Klabel else_lbl:: (* works as comp_app with nargs = n and tailcall cont [Kreturn 0]*) Kgetglobal (get_allias !global_env kn):: Kappterm(n, n):: [] (* = discard_dead_code [Kreturn 0] *) in let lbl = Label.create () in fun_code := [Ksequence (add_grab n lbl f_cont, !fun_code)]; Kclosure(lbl, 0)::cont in fun kn fc reloc args sz cont -> if not fc then raise Not_found else let nargs = Array.length args in if nargs=n then (*if it is a fully applied addition*) let (escape, labeled_cont) = make_branch cont in let else_lbl = Label.create () in comp_args compile_constr reloc args sz (Kisconst else_lbl::(make_areconst (n-1) else_lbl (*Kaddint31::escape::Klabel else_lbl::Kpush::*) (op::escape::Klabel else_lbl::Kpush:: (* works as comp_app with nargs = n and non-tailcall cont*) Kgetglobal (get_allias !global_env kn):: Kapply n::labeled_cont))) else if nargs=0 then code_construct kn cont else comp_app (fun _ _ _ cont -> code_construct kn cont) compile_constr reloc () args sz cont let int31_escape_before_match fc cont = if not fc then raise Not_found else let escape_lbl, labeled_cont = label_code cont in (Kisconst escape_lbl)::Kdecompint31::labeled_cont coq-8.4pl2/kernel/modops.ml0000640000175000001440000005027212122674544014757 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (arg_id,arg_t,body_t) | _ -> error_not_a_functor mtb let is_functor = function | SEBfunctor (arg_id,arg_t,body_t) -> true | _ -> false let module_body_of_type mp mtb = { mod_mp = mp; mod_type = mtb.typ_expr; mod_type_alg = mtb.typ_expr_alg; mod_expr = None; mod_constraints = mtb.typ_constraints; mod_delta = mtb.typ_delta; mod_retroknowledge = []} let check_modpath_equiv env mp1 mp2 = if mp1=mp2 then () else let mb1=lookup_module mp1 env in let mb2=lookup_module mp2 env in if (mp_of_delta mb1.mod_delta mp1)=(mp_of_delta mb2.mod_delta mp2) then () else error_not_equal_modpaths mp1 mp2 let rec subst_with_body sub = function | With_module_body(id,mp) -> With_module_body(id,subst_mp sub mp) | With_definition_body(id,cb) -> With_definition_body( id,subst_const_body sub cb) and subst_modtype sub do_delta mtb= let mp = subst_mp sub mtb.typ_mp in let sub = add_mp mtb.typ_mp mp empty_delta_resolver sub in let typ_expr' = subst_struct_expr sub do_delta mtb.typ_expr in let typ_alg' = Option.smartmap (subst_struct_expr sub (fun x y-> x)) mtb.typ_expr_alg in let mtb_delta = do_delta mtb.typ_delta sub in if typ_expr'==mtb.typ_expr && typ_alg'==mtb.typ_expr_alg && mp==mtb.typ_mp then mtb else {mtb with typ_mp = mp; typ_expr = typ_expr'; typ_expr_alg = typ_alg'; typ_delta = mtb_delta} and subst_structure sub do_delta sign = let subst_body = function SFBconst cb -> SFBconst (subst_const_body sub cb) | SFBmind mib -> SFBmind (subst_mind sub mib) | SFBmodule mb -> SFBmodule (subst_module sub do_delta mb) | SFBmodtype mtb -> SFBmodtype (subst_modtype sub do_delta mtb) in List.map (fun (l,b) -> (l,subst_body b)) sign and subst_module sub do_delta mb = let mp = subst_mp sub mb.mod_mp in let sub = if is_functor mb.mod_type && not(mp=mb.mod_mp) then add_mp mb.mod_mp mp empty_delta_resolver sub else sub in let id_delta = (fun x y-> x) in let mtb',me' = let mtb = subst_struct_expr sub do_delta mb.mod_type in match mb.mod_expr with None -> mtb,None | Some me -> if me==mb.mod_type then mtb,Some mtb else mtb,Option.smartmap (subst_struct_expr sub id_delta) mb.mod_expr in let typ_alg' = Option.smartmap (subst_struct_expr sub id_delta) mb.mod_type_alg in let mb_delta = do_delta mb.mod_delta sub in if mtb'==mb.mod_type && mb.mod_expr == me' && mb_delta == mb.mod_delta && mp == mb.mod_mp then mb else { mb with mod_mp = mp; mod_expr = me'; mod_type_alg = typ_alg'; mod_type=mtb'; mod_delta = mb_delta} and subst_struct_expr sub do_delta = function | SEBident mp -> SEBident (subst_mp sub mp) | SEBfunctor (mbid, mtb, meb') -> SEBfunctor(mbid,subst_modtype sub do_delta mtb ,subst_struct_expr sub do_delta meb') | SEBstruct (str)-> SEBstruct( subst_structure sub do_delta str) | SEBapply (meb1,meb2,cst)-> SEBapply(subst_struct_expr sub do_delta meb1, subst_struct_expr sub do_delta meb2, cst) | SEBwith (meb,wdb)-> SEBwith(subst_struct_expr sub do_delta meb, subst_with_body sub wdb) let subst_signature subst = subst_structure subst (fun resolver subst-> subst_codom_delta_resolver subst resolver) let subst_struct_expr subst = subst_struct_expr subst (fun resolver subst-> subst_codom_delta_resolver subst resolver) (* spiwack: here comes the function which takes care of importing the retroknowledge declared in the library *) (* lclrk : retroknowledge_action list, rkaction : retroknowledge action *) let add_retroknowledge mp = let perform rkaction env = match rkaction with | Retroknowledge.RKRegister (f, e) -> Environ.register env f (match e with | Const kn -> kind_of_term (mkConst kn) | Ind ind -> kind_of_term (mkInd ind) | _ -> anomaly "Modops.add_retroknowledge: had to import an unsupported kind of term") in fun lclrk env -> (* The order of the declaration matters, for instance (and it's at the time this comment is being written, the only relevent instance) the int31 type registration absolutely needs int31 bits to be registered. Since the local_retroknowledge is stored in reverse order (each new registration is added at the top of the list) we need a fold_right for things to go right (the pun is not intented). So we lose tail recursivity, but the world will have exploded before any module imports 10 000 retroknowledge registration.*) List.fold_right perform lclrk env let rec add_signature mp sign resolver env = let add_one env (l,elem) = let kn = make_kn mp empty_dirpath l in match elem with | SFBconst cb -> Environ.add_constant (constant_of_delta_kn resolver kn) cb env | SFBmind mib -> Environ.add_mind (mind_of_delta_kn resolver kn) mib env | SFBmodule mb -> add_module mb env (* adds components as well *) | SFBmodtype mtb -> Environ.add_modtype mtb.typ_mp mtb env in List.fold_left add_one env sign and add_module mb env = let mp = mb.mod_mp in let env = Environ.shallow_add_module mp mb env in match mb.mod_type with | SEBstruct (sign) -> add_retroknowledge mp mb.mod_retroknowledge (add_signature mp sign mb.mod_delta env) | SEBfunctor _ -> env | _ -> anomaly "Modops:the evaluation of the structure failed " let strengthen_const mp_from l cb resolver = match cb.const_body with | Def _ -> cb | _ -> let kn = make_kn mp_from empty_dirpath l in let con = constant_of_delta_kn resolver kn in { cb with const_body = Def (Declarations.from_val (mkConst con)); const_body_code = Cemitcodes.from_val (Cbytegen.compile_alias con) } let rec strengthen_mod mp_from mp_to mb = if mp_in_delta mb.mod_mp mb.mod_delta then mb else match mb.mod_type with | SEBstruct (sign) -> let resolve_out,sign_out = strengthen_sig mp_from sign mp_to mb.mod_delta in { mb with mod_expr = Some (SEBident mp_to); mod_type = SEBstruct(sign_out); mod_type_alg = mb.mod_type_alg; mod_constraints = mb.mod_constraints; mod_delta = add_mp_delta_resolver mp_from mp_to (add_delta_resolver mb.mod_delta resolve_out); mod_retroknowledge = mb.mod_retroknowledge} | SEBfunctor _ -> mb | _ -> anomaly "Modops:the evaluation of the structure failed " and strengthen_sig mp_from sign mp_to resolver = match sign with | [] -> empty_delta_resolver,[] | (l,SFBconst cb) :: rest -> let item' = l,SFBconst (strengthen_const mp_from l cb resolver) in let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item'::rest' | (_,SFBmind _ as item):: rest -> let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item::rest' | (l,SFBmodule mb) :: rest -> let mp_from' = MPdot (mp_from,l) in let mp_to' = MPdot(mp_to,l) in let mb_out = strengthen_mod mp_from' mp_to' mb in let item' = l,SFBmodule (mb_out) in let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in add_delta_resolver resolve_out mb.mod_delta, item':: rest' | (l,SFBmodtype mty as item) :: rest -> let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item::rest' let strengthen mtb mp = if mp_in_delta mtb.typ_mp mtb.typ_delta then (* in this case mtb has already been strengthened*) mtb else match mtb.typ_expr with | SEBstruct (sign) -> let resolve_out,sign_out = strengthen_sig mtb.typ_mp sign mp mtb.typ_delta in {mtb with typ_expr = SEBstruct(sign_out); typ_delta = add_delta_resolver mtb.typ_delta (add_mp_delta_resolver mtb.typ_mp mp resolve_out)} | SEBfunctor _ -> mtb | _ -> anomaly "Modops:the evaluation of the structure failed " let module_type_of_module mp mb = match mp with Some mp -> strengthen { typ_mp = mp; typ_expr = mb.mod_type; typ_expr_alg = None; typ_constraints = mb.mod_constraints; typ_delta = mb.mod_delta} mp | None -> {typ_mp = mb.mod_mp; typ_expr = mb.mod_type; typ_expr_alg = None; typ_constraints = mb.mod_constraints; typ_delta = mb.mod_delta} let inline_delta_resolver env inl mp mbid mtb delta = let constants = inline_of_delta inl mtb.typ_delta in let rec make_inline delta = function | [] -> delta | (lev,kn)::r -> let kn = replace_mp_in_kn (MPbound mbid) mp kn in let con = constant_of_delta_kn delta kn in try let constant = lookup_constant con env in let l = make_inline delta r in match constant.const_body with | Undef _ | OpaqueDef _ -> l | Def body -> let constr = Declarations.force body in add_inline_delta_resolver kn (lev, Some constr) l with Not_found -> error_no_such_label_sub (con_label con) (string_of_mp (con_modpath con)) in make_inline delta constants let rec strengthen_and_subst_mod mb subst mp_from mp_to resolver = match mb.mod_type with SEBstruct(str) -> let mb_is_an_alias = mp_in_delta mb.mod_mp mb.mod_delta in if mb_is_an_alias then subst_module subst (fun resolver subst-> subst_dom_delta_resolver subst resolver) mb else let resolver,new_sig = strengthen_and_subst_struct str subst mp_from mp_from mp_to false false mb.mod_delta in {mb with mod_mp = mp_to; mod_expr = Some (SEBident mp_from); mod_type = SEBstruct(new_sig); mod_delta = add_mp_delta_resolver mp_to mp_from resolver} | SEBfunctor(arg_id,arg_b,body) -> let subst = add_mp mb.mod_mp mp_to empty_delta_resolver subst in subst_module subst (fun resolver subst-> subst_dom_codom_delta_resolver subst resolver) mb | _ -> anomaly "Modops:the evaluation of the structure failed " and strengthen_and_subst_struct str subst mp_alias mp_from mp_to alias incl resolver = match str with | [] -> empty_delta_resolver,[] | (l,SFBconst cb) :: rest -> let item' = if alias then (* case alias no strengthening needed*) l,SFBconst (subst_const_body subst cb) else l,SFBconst (strengthen_const mp_from l (subst_const_body subst cb) resolver) in let resolve_out,rest' = strengthen_and_subst_struct rest subst mp_alias mp_from mp_to alias incl resolver in if incl then (* If we are performing an inclusion we need to add the fact that the constant mp_to.l is \Delta-equivalent to resolver(mp_from.l) *) let kn_from = make_kn mp_from empty_dirpath l in let kn_to = make_kn mp_to empty_dirpath l in let old_name = kn_of_delta resolver kn_from in (add_kn_delta_resolver kn_to old_name resolve_out), item'::rest' else (*In this case the fact that the constant mp_to.l is \Delta-equivalent to resolver(mp_from.l) is already known because resolve_out contains mp_to maps to resolver(mp_from)*) resolve_out,item'::rest' | (l,SFBmind mib) :: rest -> (*Same as constant*) let item' = l,SFBmind (subst_mind subst mib) in let resolve_out,rest' = strengthen_and_subst_struct rest subst mp_alias mp_from mp_to alias incl resolver in if incl then let kn_from = make_kn mp_from empty_dirpath l in let kn_to = make_kn mp_to empty_dirpath l in let old_name = kn_of_delta resolver kn_from in (add_kn_delta_resolver kn_to old_name resolve_out), item'::rest' else resolve_out,item'::rest' | (l,SFBmodule mb) :: rest -> let mp_from' = MPdot (mp_from,l) in let mp_to' = MPdot(mp_to,l) in let mb_out = if alias then subst_module subst (fun resolver subst -> subst_dom_delta_resolver subst resolver) mb else strengthen_and_subst_mod mb subst mp_from' mp_to' resolver in let item' = l,SFBmodule (mb_out) in let resolve_out,rest' = strengthen_and_subst_struct rest subst mp_alias mp_from mp_to alias incl resolver in (* if mb is a functor we should not derive new equivalences on names, hence we add the fact that the functor can only be equivalent to itself. If we adopt an applicative semantic for functor this should be changed.*) if is_functor mb_out.mod_type then (add_mp_delta_resolver mp_to' mp_to' resolve_out),item':: rest' else add_delta_resolver resolve_out mb_out.mod_delta, item':: rest' | (l,SFBmodtype mty) :: rest -> let mp_from' = MPdot (mp_from,l) in let mp_to' = MPdot(mp_to,l) in let subst' = add_mp mp_from' mp_to' empty_delta_resolver subst in let mty = subst_modtype subst' (fun resolver subst -> subst_dom_codom_delta_resolver subst' resolver) mty in let resolve_out,rest' = strengthen_and_subst_struct rest subst mp_alias mp_from mp_to alias incl resolver in (add_mp_delta_resolver mp_to' mp_to' resolve_out),(l,SFBmodtype mty)::rest' (* Let P be a module path when we write "Module M:=P." or "Module M. Include P. End M." we need to perform two operations to compute the body of M. The first one is applying the substitution {P <- M} on the type of P and the second one is strenghtening. *) let strengthen_and_subst_mb mb mp include_b = match mb.mod_type with SEBstruct str -> let mb_is_an_alias = mp_in_delta mb.mod_mp mb.mod_delta in (*if mb.mod_mp is an alias then the strengthening is useless (i.e. it is already done)*) let mp_alias = mp_of_delta mb.mod_delta mb.mod_mp in let subst_resolver = map_mp mb.mod_mp mp empty_delta_resolver in let new_resolver = add_mp_delta_resolver mp mp_alias (subst_dom_delta_resolver subst_resolver mb.mod_delta) in let subst = map_mp mb.mod_mp mp new_resolver in let resolver_out,new_sig = strengthen_and_subst_struct str subst mp_alias mb.mod_mp mp mb_is_an_alias include_b mb.mod_delta in {mb with mod_mp = mp; mod_type = SEBstruct(new_sig); mod_expr = Some (SEBident mb.mod_mp); mod_delta = if include_b then resolver_out else add_delta_resolver new_resolver resolver_out} | SEBfunctor(arg_id,argb,body) -> let subst = map_mp mb.mod_mp mp empty_delta_resolver in subst_module subst (fun resolver subst -> subst_dom_codom_delta_resolver subst resolver) mb | _ -> anomaly "Modops:the evaluation of the structure failed " let subst_modtype_and_resolver mtb mp = let subst = (map_mp mtb.typ_mp mp empty_delta_resolver) in let new_delta = subst_dom_codom_delta_resolver subst mtb.typ_delta in let full_subst = (map_mp mtb.typ_mp mp new_delta) in subst_modtype full_subst (fun resolver subst -> subst_dom_codom_delta_resolver subst resolver) mtb let rec is_bounded_expr l = function | SEBident mp -> List.mem mp l | SEBapply (fexpr,mexpr,_) -> is_bounded_expr l mexpr || is_bounded_expr l fexpr | _ -> false let rec clean_struct l = function | (lab,SFBmodule mb) as field -> let clean_typ = clean_expr l mb.mod_type in let clean_impl = begin try if (is_bounded_expr l (Option.get mb.mod_expr)) then Some clean_typ else Some (clean_expr l (Option.get mb.mod_expr)) with Option.IsNone -> None end in if clean_typ==mb.mod_type && clean_impl==mb.mod_expr then field else (lab,SFBmodule {mb with mod_type=clean_typ; mod_expr=clean_impl}) | field -> field and clean_expr l = function | SEBfunctor (mbid,sigt,str) as s-> let str_clean = clean_expr l str in let sig_clean = clean_expr l sigt.typ_expr in if str_clean == str && sig_clean = sigt.typ_expr then s else SEBfunctor (mbid,{sigt with typ_expr=sig_clean},str_clean) | SEBstruct str as s-> let str_clean = Util.list_smartmap (clean_struct l) str in if str_clean == str then s else SEBstruct(str_clean) | str -> str let rec collect_mbid l = function | SEBfunctor (mbid,sigt,str) as s-> let str_clean = collect_mbid ((MPbound mbid)::l) str in if str_clean == str then s else SEBfunctor (mbid,sigt,str_clean) | SEBstruct str as s-> let str_clean = Util.list_smartmap (clean_struct l) str in if str_clean == str then s else SEBstruct(str_clean) | _ -> anomaly "Modops:the evaluation of the structure failed " let clean_bounded_mod_expr = function | SEBfunctor _ as str -> let str_clean = collect_mbid [] str in if str_clean == str then str else str_clean | str -> str coq-8.4pl2/kernel/term_typing.ml0000640000175000001440000001237212010532755016007 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* make_polymorphic_if_constant_for_ind env j, cst1 | Some t -> let (tj,cst2) = infer_type env t in let (_,cst3) = judge_of_cast env j DEFAULTcast tj in assert (eq_constr t tj.utj_val); let cstrs = union_constraints (union_constraints cst1 cst2) cst3 in NonPolymorphicType t, cstrs let local_constrain_type env j cst1 = function | None -> j.uj_type, cst1 | Some t -> let (tj,cst2) = infer_type env t in let (_,cst3) = judge_of_cast env j DEFAULTcast tj in assert (eq_constr t tj.utj_val); t, union_constraints (union_constraints cst1 cst2) cst3 let translate_local_def env (b,topt) = let (j,cst) = infer env b in let (typ,cst) = local_constrain_type env j cst topt in (j.uj_val,typ,cst) let translate_local_assum env t = let (j,cst) = infer env t in let t = Typeops.assumption_of_judgment env j in (t,cst) (* (* Same as push_named, but check that the variable is not already there. Should *not* be done in Environ because tactics add temporary hypothesis many many times, and the check performed here would cost too much. *) let safe_push_named (id,_,_ as d) env = let _ = try let _ = lookup_named id env in error ("Identifier "^string_of_id id^" already defined.") with Not_found -> () in push_named d env let push_named_def = push_rel_or_named_def safe_push_named let push_rel_def = push_rel_or_named_def push_rel let push_rel_or_named_assum push (id,t) env = let (j,cst) = safe_infer env t in let t = Typeops.assumption_of_judgment env j in let env' = add_constraints cst env in let env'' = push (id,None,t) env' in (cst,env'') let push_named_assum = push_rel_or_named_assum push_named let push_rel_assum d env = snd (push_rel_or_named_assum push_rel d env) let push_rels_with_univ vars env = List.fold_left (fun env nvar -> push_rel_assum nvar env) env vars *) (* Insertion of constants and parameters in environment. *) let infer_declaration env dcl = match dcl with | DefinitionEntry c -> let (j,cst) = infer env c.const_entry_body in let j = {uj_val = hcons_constr j.uj_val; uj_type = hcons_constr j.uj_type} in let (typ,cst) = constrain_type env j cst c.const_entry_type in let def = if c.const_entry_opaque then OpaqueDef (Declarations.opaque_from_val j.uj_val) else Def (Declarations.from_val j.uj_val) in def, typ, cst, c.const_entry_secctx | ParameterEntry (ctx,t,nl) -> let (j,cst) = infer env t in let t = hcons_constr (Typeops.assumption_of_judgment env j) in Undef nl, NonPolymorphicType t, cst, ctx let global_vars_set_constant_type env = function | NonPolymorphicType t -> global_vars_set env t | PolymorphicArity (ctx,_) -> Sign.fold_rel_context (fold_rel_declaration (fun t c -> Idset.union (global_vars_set env t) c)) ctx ~init:Idset.empty let build_constant_declaration env kn (def,typ,cst,ctx) = let hyps = let inferred = let ids_typ = global_vars_set_constant_type env typ in let ids_def = match def with | Undef _ -> Idset.empty | Def cs -> global_vars_set env (Declarations.force cs) | OpaqueDef lc -> global_vars_set env (Declarations.force_opaque lc) in keep_hyps env (Idset.union ids_typ ids_def) in let declared = match ctx with | None -> inferred | Some declared -> declared in let mk_set l = List.fold_right Idset.add (List.map pi1 l) Idset.empty in let inferred_set, declared_set = mk_set inferred, mk_set declared in if not (Idset.subset inferred_set declared_set) then error ("The following section variable are used but not declared:\n"^ (String.concat ", " (List.map string_of_id (Idset.elements (Idset.diff inferred_set declared_set))))); declared in let tps = Cemitcodes.from_val (compile_constant_body env def) in { const_hyps = hyps; const_body = def; const_type = typ; const_body_code = tps; const_constraints = cst } (*s Global and local constant declaration. *) let translate_constant env kn ce = build_constant_declaration env kn (infer_declaration env ce) let translate_recipe env kn r = build_constant_declaration env kn (let def,typ,cst,hyps = Cooking.cook_constant env r in def,typ,cst,Some hyps) (* Insertion of inductive types. *) let translate_mind env kn mie = check_inductive env kn mie coq-8.4pl2/kernel/esubst.ml0000640000175000001440000001311312010532755014745 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n *) (* i.e under n binders *) let el_id = ELID (* compose a relocation of magnitude n *) let rec el_shft_rec n = function | ELSHFT(el,k) -> el_shft_rec (k+n) el | el -> ELSHFT(el,n) let el_shft n el = if n = 0 then el else el_shft_rec n el (* cross n binders *) let rec el_liftn_rec n = function | ELID -> ELID | ELLFT(k,el) -> el_liftn_rec (n+k) el | el -> ELLFT(n, el) let el_liftn n el = if n = 0 then el else el_liftn_rec n el let el_lift el = el_liftn_rec 1 el (* relocation of de Bruijn n in an explicit lift *) let rec reloc_rel n = function | ELID -> n | ELLFT(k,el) -> if n <= k then n else (reloc_rel (n-k) el) + k | ELSHFT(el,k) -> (reloc_rel (n+k) el) let rec is_lift_id = function | ELID -> true | ELSHFT(e,n) -> n=0 & is_lift_id e | ELLFT (_,e) -> is_lift_id e (*********************) (* Substitutions *) (*********************) (* (bounded) explicit substitutions of type 'a *) type 'a subs = | ESID of int (* ESID(n) = %n END bounded identity *) | CONS of 'a array * 'a subs (* CONS([|t1..tn|],S) = (S.t1...tn) parallel substitution beware of the order *) | SHIFT of int * 'a subs (* SHIFT(n,S) = (^n o S) terms in S are relocated *) (* with n vars *) | LIFT of int * 'a subs (* LIFT(n,S) = (%n S) stands for ((^n o S).n...1) *) (* operations of subs: collapses constructors when possible. * Needn't be recursive if we always use these functions *) let subs_id i = ESID i let subs_cons(x,s) = if Array.length x = 0 then s else CONS(x,s) let subs_liftn n = function | ESID p -> ESID (p+n) (* bounded identity lifted extends by p *) | LIFT (p,lenv) -> LIFT (p+n, lenv) | lenv -> LIFT (n,lenv) let subs_lift a = subs_liftn 1 a let subs_liftn n a = if n = 0 then a else subs_liftn n a let subs_shft = function | (0, s) -> s | (n, SHIFT (k,s1)) -> SHIFT (k+n, s1) | (n, s) -> SHIFT (n,s) let subs_shft (n,a) = if n = 0 then a else subs_shft(n,a) let subs_shift_cons = function (0, s, t) -> CONS(t,s) | (k, SHIFT(n,s1), t) -> CONS(t,SHIFT(k+n, s1)) | (k, s, t) -> CONS(t,SHIFT(k, s));; (* Tests whether a substitution is equal to the identity *) let rec is_subs_id = function ESID _ -> true | LIFT(_,s) -> is_subs_id s | SHIFT(0,s) -> is_subs_id s | CONS(x,s) -> Array.length x = 0 && is_subs_id s | _ -> false (* Expands de Bruijn k in the explicit substitution subs * lams accumulates de shifts to perform when retrieving the i-th value * the rules used are the following: * * [id]k --> k * [S.t]1 --> t * [S.t]k --> [S](k-1) if k > 1 * [^n o S] k --> [^n]([S]k) * [(%n S)] k --> k if k <= n * [(%n S)] k --> [^n]([S](k-n)) * * the result is (Inr (k+lams,p)) when the variable is just relocated * where p is None if the variable points inside subs and Some(k) if the * variable points k bindings beyond subs. *) let rec exp_rel lams k subs = match subs with | CONS (def,_) when k <= Array.length def -> Inl(lams,def.(Array.length def - k)) | CONS (v,l) -> exp_rel lams (k - Array.length v) l | LIFT (n,_) when k<=n -> Inr(lams+k,None) | LIFT (n,l) -> exp_rel (n+lams) (k-n) l | SHIFT (n,s) -> exp_rel (n+lams) k s | ESID n when k<=n -> Inr(lams+k,None) | ESID n -> Inr(lams+k,Some (k-n)) let expand_rel k subs = exp_rel 0 k subs let rec comp mk_cl s1 s2 = match (s1, s2) with | _, ESID _ -> s1 | ESID _, _ -> s2 | SHIFT(k,s), _ -> subs_shft(k, comp mk_cl s s2) | _, CONS(x,s') -> CONS(Array.map (fun t -> mk_cl(s1,t)) x, comp mk_cl s1 s') | CONS(x,s), SHIFT(k,s') -> let lg = Array.length x in if k == lg then comp mk_cl s s' else if k > lg then comp mk_cl s (SHIFT(k-lg, s')) else comp mk_cl (CONS(Array.sub x 0 (lg-k), s)) s' | CONS(x,s), LIFT(k,s') -> let lg = Array.length x in if k == lg then CONS(x, comp mk_cl s s') else if k > lg then CONS(x, comp mk_cl s (LIFT(k-lg, s'))) else CONS(Array.sub x (lg-k) k, comp mk_cl (CONS(Array.sub x 0 (lg-k),s)) s') | LIFT(k,s), SHIFT(k',s') -> if k if k constr val whd_betadeltaiota : env -> constr -> constr val whd_betadeltaiota_nolet : env -> constr -> constr val whd_betaiota : constr -> constr val nf_betaiota : constr -> constr (*********************************************************************** s conversion functions *) exception NotConvertible exception NotConvertibleVect of int type 'a conversion_function = env -> 'a -> 'a -> Univ.constraints type 'a trans_conversion_function = Names.transparent_state -> env -> 'a -> 'a -> Univ.constraints type conv_pb = CONV | CUMUL val sort_cmp : conv_pb -> sorts -> sorts -> Univ.constraints -> Univ.constraints val conv_sort : sorts conversion_function val conv_sort_leq : sorts conversion_function val trans_conv_cmp : ?l2r:bool -> conv_pb -> constr trans_conversion_function val trans_conv : ?l2r:bool -> ?evars:(existential->constr option) -> constr trans_conversion_function val trans_conv_leq : ?l2r:bool -> ?evars:(existential->constr option) -> types trans_conversion_function val conv_cmp : ?l2r:bool -> conv_pb -> constr conversion_function val conv : ?l2r:bool -> ?evars:(existential->constr option) -> constr conversion_function val conv_leq : ?l2r:bool -> ?evars:(existential->constr option) -> types conversion_function val conv_leq_vecti : ?l2r:bool -> ?evars:(existential->constr option) -> types array conversion_function (** option for conversion *) val set_vm_conv : (conv_pb -> types conversion_function) -> unit val vm_conv : conv_pb -> types conversion_function val set_default_conv : (conv_pb -> ?l2r:bool -> types conversion_function) -> unit val default_conv : conv_pb -> ?l2r:bool -> types conversion_function val default_conv_leq : ?l2r:bool -> types conversion_function (************************************************************************) (** Builds an application node, reducing beta redexes it may produce. *) val beta_appvect : constr -> constr array -> constr (** Builds an application node, reducing the [n] first beta-zeta redexes. *) val betazeta_appvect : int -> constr -> constr array -> constr (** Pseudo-reduction rule Prod(x,A,B) a --> B[x\a] *) val hnf_prod_applist : env -> types -> constr list -> types (*********************************************************************** s Recognizing products and arities modulo reduction *) val dest_prod : env -> types -> rel_context * types val dest_prod_assum : env -> types -> rel_context * types exception NotArity val dest_arity : env -> types -> arity (* raises NotArity if not an arity *) val is_arity : env -> types -> bool coq-8.4pl2/kernel/term.ml0000640000175000001440000014000512010532755014410 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* InProp | Prop Pos -> InSet | Type _ -> InType (********************************************************************) (* Constructions as implemented *) (********************************************************************) (* [constr array] is an instance matching definitional [named_context] in the same order (i.e. last argument first) *) type 'constr pexistential = existential_key * 'constr array type ('constr, 'types) prec_declaration = name array * 'types array * 'constr array type ('constr, 'types) pfixpoint = (int array * int) * ('constr, 'types) prec_declaration type ('constr, 'types) pcofixpoint = int * ('constr, 'types) prec_declaration (* [Var] is used for named variables and [Rel] for variables as de Bruijn indices. *) type ('constr, 'types) kind_of_term = | Rel of int | Var of identifier | Meta of metavariable | Evar of 'constr pexistential | Sort of sorts | Cast of 'constr * cast_kind * 'types | Prod of name * 'types * 'types | Lambda of name * 'types * 'constr | LetIn of name * 'constr * 'types * 'constr | App of 'constr * 'constr array | Const of constant | Ind of inductive | Construct of constructor | Case of case_info * 'constr * 'constr * 'constr array | Fix of ('constr, 'types) pfixpoint | CoFix of ('constr, 'types) pcofixpoint (* constr is the fixpoint of the previous type. Requires option -rectypes of the Caml compiler to be set *) type constr = (constr,constr) kind_of_term type existential = existential_key * constr array type rec_declaration = name array * constr array * constr array type fixpoint = (int array * int) * rec_declaration type cofixpoint = int * rec_declaration (*********************) (* Term constructors *) (*********************) (* Constructs a DeBrujin index with number n *) let rels = [|Rel 1;Rel 2;Rel 3;Rel 4;Rel 5;Rel 6;Rel 7; Rel 8; Rel 9;Rel 10;Rel 11;Rel 12;Rel 13;Rel 14;Rel 15; Rel 16|] let mkRel n = if 0 mkProp (* Easy sharing *) | Prop Pos -> mkSet | s -> Sort s (* Constructs the term t1::t2, i.e. the term t1 casted with the type t2 *) (* (that means t2 is declared as the type of t1) *) let mkCast (t1,k2,t2) = match t1 with | Cast (c,k1, _) when k1 = VMcast & k1 = k2 -> Cast (c,k1,t2) | _ -> Cast (t1,k2,t2) (* Constructs the product (x:t1)t2 *) let mkProd (x,t1,t2) = Prod (x,t1,t2) (* Constructs the abstraction [x:t1]t2 *) let mkLambda (x,t1,t2) = Lambda (x,t1,t2) (* Constructs [x=c_1:t]c_2 *) let mkLetIn (x,c1,t,c2) = LetIn (x,c1,t,c2) (* If lt = [t1; ...; tn], constructs the application (t1 ... tn) *) (* We ensure applicative terms have at least one argument and the function is not itself an applicative term *) let mkApp (f, a) = if Array.length a = 0 then f else match f with | App (g, cl) -> App (g, Array.append cl a) | _ -> App (f, a) (* Constructs a constant *) let mkConst c = Const c (* Constructs an existential variable *) let mkEvar e = Evar e (* Constructs the ith (co)inductive type of the block named kn *) let mkInd m = Ind m (* Constructs the jth constructor of the ith (co)inductive type of the block named kn. The array of terms correspond to the variables introduced in the section *) let mkConstruct c = Construct c (* Constructs the term

Case c of c1 | c2 .. | cn end *) let mkCase (ci, p, c, ac) = Case (ci, p, c, ac) (* If recindxs = [|i1,...in|] funnames = [|f1,...fn|] typarray = [|t1,...tn|] bodies = [|b1,...bn|] then mkFix ((recindxs,i),(funnames,typarray,bodies)) constructs the ith function of the block Fixpoint f1 [ctx1] : t1 := b1 with f2 [ctx2] : t2 := b2 ... with fn [ctxn] : tn := bn. where the lenght of the jth context is ij. *) let mkFix fix = Fix fix (* If funnames = [|f1,...fn|] typarray = [|t1,...tn|] bodies = [|b1,...bn|] then mkCoFix (i,(funnames,typsarray,bodies)) constructs the ith function of the block CoFixpoint f1 : t1 := b1 with f2 : t2 := b2 ... with fn : tn := bn. *) let mkCoFix cofix= CoFix cofix (* Constructs an existential variable named "?n" *) let mkMeta n = Meta n (* Constructs a Variable named id *) let mkVar id = Var id (************************************************************************) (* kind_of_term = constructions as seen by the user *) (************************************************************************) (* User view of [constr]. For [App], it is ensured there is at least one argument and the function is not itself an applicative term *) let kind_of_term c = c (* Experimental, used in Presburger contrib *) type ('constr, 'types) kind_of_type = | SortType of sorts | CastType of 'types * 'types | ProdType of name * 'types * 'types | LetInType of name * 'constr * 'types * 'types | AtomicType of 'constr * 'constr array let kind_of_type = function | Sort s -> SortType s | Cast (c,_,t) -> CastType (c, t) | Prod (na,t,c) -> ProdType (na, t, c) | LetIn (na,b,t,c) -> LetInType (na, b, t, c) | App (c,l) -> AtomicType (c, l) | (Rel _ | Meta _ | Var _ | Evar _ | Const _ | Case _ | Fix _ | CoFix _ | Ind _ as c) -> AtomicType (c,[||]) | (Lambda _ | Construct _) -> failwith "Not a type" (**********************************************************************) (* Non primitive term destructors *) (**********************************************************************) (* Destructor operations : partial functions Raise invalid_arg "dest*" if the const has not the expected form *) (* Destructs a DeBrujin index *) let destRel c = match kind_of_term c with | Rel n -> n | _ -> invalid_arg "destRel" (* Destructs an existential variable *) let destMeta c = match kind_of_term c with | Meta n -> n | _ -> invalid_arg "destMeta" let isMeta c = match kind_of_term c with Meta _ -> true | _ -> false let isMetaOf mv c = match kind_of_term c with Meta mv' -> mv = mv' | _ -> false (* Destructs a variable *) let destVar c = match kind_of_term c with | Var id -> id | _ -> invalid_arg "destVar" (* Destructs a type *) let isSort c = match kind_of_term c with | Sort s -> true | _ -> false let destSort c = match kind_of_term c with | Sort s -> s | _ -> invalid_arg "destSort" let rec isprop c = match kind_of_term c with | Sort (Prop _) -> true | Cast (c,_,_) -> isprop c | _ -> false let rec is_Prop c = match kind_of_term c with | Sort (Prop Null) -> true | Cast (c,_,_) -> is_Prop c | _ -> false let rec is_Set c = match kind_of_term c with | Sort (Prop Pos) -> true | Cast (c,_,_) -> is_Set c | _ -> false let rec is_Type c = match kind_of_term c with | Sort (Type _) -> true | Cast (c,_,_) -> is_Type c | _ -> false let is_small = function | Prop _ -> true | _ -> false let iskind c = isprop c or is_Type c (* Tests if an evar *) let isEvar c = match kind_of_term c with Evar _ -> true | _ -> false let isEvar_or_Meta c = match kind_of_term c with | Evar _ | Meta _ -> true | _ -> false (* Destructs a casted term *) let destCast c = match kind_of_term c with | Cast (t1,k,t2) -> (t1,k,t2) | _ -> invalid_arg "destCast" let isCast c = match kind_of_term c with Cast _ -> true | _ -> false (* Tests if a de Bruijn index *) let isRel c = match kind_of_term c with Rel _ -> true | _ -> false let isRelN n c = match kind_of_term c with Rel n' -> n = n' | _ -> false (* Tests if a variable *) let isVar c = match kind_of_term c with Var _ -> true | _ -> false let isVarId id c = match kind_of_term c with Var id' -> id = id' | _ -> false (* Tests if an inductive *) let isInd c = match kind_of_term c with Ind _ -> true | _ -> false (* Destructs the product (x:t1)t2 *) let destProd c = match kind_of_term c with | Prod (x,t1,t2) -> (x,t1,t2) | _ -> invalid_arg "destProd" let isProd c = match kind_of_term c with | Prod _ -> true | _ -> false (* Destructs the abstraction [x:t1]t2 *) let destLambda c = match kind_of_term c with | Lambda (x,t1,t2) -> (x,t1,t2) | _ -> invalid_arg "destLambda" let isLambda c = match kind_of_term c with | Lambda _ -> true | _ -> false (* Destructs the let [x:=b:t1]t2 *) let destLetIn c = match kind_of_term c with | LetIn (x,b,t1,t2) -> (x,b,t1,t2) | _ -> invalid_arg "destLetIn" let isLetIn c = match kind_of_term c with LetIn _ -> true | _ -> false (* Destructs an application *) let destApp c = match kind_of_term c with | App (f,a) -> (f, a) | _ -> invalid_arg "destApplication" let destApplication = destApp let isApp c = match kind_of_term c with App _ -> true | _ -> false (* Destructs a constant *) let destConst c = match kind_of_term c with | Const kn -> kn | _ -> invalid_arg "destConst" let isConst c = match kind_of_term c with Const _ -> true | _ -> false (* Destructs an existential variable *) let destEvar c = match kind_of_term c with | Evar (kn, a as r) -> r | _ -> invalid_arg "destEvar" (* Destructs a (co)inductive type named kn *) let destInd c = match kind_of_term c with | Ind (kn, a as r) -> r | _ -> invalid_arg "destInd" (* Destructs a constructor *) let destConstruct c = match kind_of_term c with | Construct (kn, a as r) -> r | _ -> invalid_arg "dest" let isConstruct c = match kind_of_term c with Construct _ -> true | _ -> false (* Destructs a term

Case c of lc1 | lc2 .. | lcn end *) let destCase c = match kind_of_term c with | Case (ci,p,c,v) -> (ci,p,c,v) | _ -> anomaly "destCase" let isCase c = match kind_of_term c with Case _ -> true | _ -> false let destFix c = match kind_of_term c with | Fix fix -> fix | _ -> invalid_arg "destFix" let isFix c = match kind_of_term c with Fix _ -> true | _ -> false let destCoFix c = match kind_of_term c with | CoFix cofix -> cofix | _ -> invalid_arg "destCoFix" let isCoFix c = match kind_of_term c with CoFix _ -> true | _ -> false (******************************************************************) (* Cast management *) (******************************************************************) let rec strip_outer_cast c = match kind_of_term c with | Cast (c,_,_) -> strip_outer_cast c | _ -> c (* Fonction spciale qui laisse les cast cls sous les Fix ou les Case *) let under_outer_cast f c = match kind_of_term c with | Cast (b,k,t) -> mkCast (f b, k, f t) | _ -> f c let rec under_casts f c = match kind_of_term c with | Cast (c,k,t) -> mkCast (under_casts f c, k, t) | _ -> f c (******************************************************************) (* Flattening and unflattening of embedded applications and casts *) (******************************************************************) (* flattens application lists throwing casts in-between *) let rec collapse_appl c = match kind_of_term c with | App (f,cl) -> let rec collapse_rec f cl2 = match kind_of_term (strip_outer_cast f) with | App (g,cl1) -> collapse_rec g (Array.append cl1 cl2) | _ -> mkApp (f,cl2) in collapse_rec f cl | _ -> c let decompose_app c = match kind_of_term c with | App (f,cl) -> (f, Array.to_list cl) | _ -> (c,[]) (****************************************************************************) (* Functions to recur through subterms *) (****************************************************************************) (* [fold_constr f acc c] folds [f] on the immediate subterms of [c] starting from [acc] and proceeding from left to right according to the usual representation of the constructions; it is not recursive *) let fold_constr f acc c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> acc | Cast (c,_,t) -> f (f acc c) t | Prod (_,t,c) -> f (f acc t) c | Lambda (_,t,c) -> f (f acc t) c | LetIn (_,b,t,c) -> f (f (f acc b) t) c | App (c,l) -> Array.fold_left f (f acc c) l | Evar (_,l) -> Array.fold_left f acc l | Case (_,p,c,bl) -> Array.fold_left f (f (f acc p) c) bl | Fix (_,(lna,tl,bl)) -> let fd = array_map3 (fun na t b -> (na,t,b)) lna tl bl in Array.fold_left (fun acc (na,t,b) -> f (f acc t) b) acc fd | CoFix (_,(lna,tl,bl)) -> let fd = array_map3 (fun na t b -> (na,t,b)) lna tl bl in Array.fold_left (fun acc (na,t,b) -> f (f acc t) b) acc fd (* [iter_constr f c] iters [f] on the immediate subterms of [c]; it is not recursive and the order with which subterms are processed is not specified *) let iter_constr f c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> () | Cast (c,_,t) -> f c; f t | Prod (_,t,c) -> f t; f c | Lambda (_,t,c) -> f t; f c | LetIn (_,b,t,c) -> f b; f t; f c | App (c,l) -> f c; Array.iter f l | Evar (_,l) -> Array.iter f l | Case (_,p,c,bl) -> f p; f c; Array.iter f bl | Fix (_,(_,tl,bl)) -> Array.iter f tl; Array.iter f bl | CoFix (_,(_,tl,bl)) -> Array.iter f tl; Array.iter f bl (* [iter_constr_with_binders g f n c] iters [f n] on the immediate subterms of [c]; it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive and the order with which subterms are processed is not specified *) let iter_constr_with_binders g f n c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> () | Cast (c,_,t) -> f n c; f n t | Prod (_,t,c) -> f n t; f (g n) c | Lambda (_,t,c) -> f n t; f (g n) c | LetIn (_,b,t,c) -> f n b; f n t; f (g n) c | App (c,l) -> f n c; Array.iter (f n) l | Evar (_,l) -> Array.iter (f n) l | Case (_,p,c,bl) -> f n p; f n c; Array.iter (f n) bl | Fix (_,(_,tl,bl)) -> Array.iter (f n) tl; Array.iter (f (iterate g (Array.length tl) n)) bl | CoFix (_,(_,tl,bl)) -> Array.iter (f n) tl; Array.iter (f (iterate g (Array.length tl) n)) bl (* [map_constr f c] maps [f] on the immediate subterms of [c]; it is not recursive and the order with which subterms are processed is not specified *) let map_constr f c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> c | Cast (c,k,t) -> mkCast (f c, k, f t) | Prod (na,t,c) -> mkProd (na, f t, f c) | Lambda (na,t,c) -> mkLambda (na, f t, f c) | LetIn (na,b,t,c) -> mkLetIn (na, f b, f t, f c) | App (c,l) -> mkApp (f c, Array.map f l) | Evar (e,l) -> mkEvar (e, Array.map f l) | Case (ci,p,c,bl) -> mkCase (ci, f p, f c, Array.map f bl) | Fix (ln,(lna,tl,bl)) -> mkFix (ln,(lna,Array.map f tl,Array.map f bl)) | CoFix(ln,(lna,tl,bl)) -> mkCoFix (ln,(lna,Array.map f tl,Array.map f bl)) (* [map_constr_with_binders g f n c] maps [f n] on the immediate subterms of [c]; it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive and the order with which subterms are processed is not specified *) let map_constr_with_binders g f l c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> c | Cast (c,k,t) -> mkCast (f l c, k, f l t) | Prod (na,t,c) -> mkProd (na, f l t, f (g l) c) | Lambda (na,t,c) -> mkLambda (na, f l t, f (g l) c) | LetIn (na,b,t,c) -> mkLetIn (na, f l b, f l t, f (g l) c) | App (c,al) -> mkApp (f l c, Array.map (f l) al) | Evar (e,al) -> mkEvar (e, Array.map (f l) al) | Case (ci,p,c,bl) -> mkCase (ci, f l p, f l c, Array.map (f l) bl) | Fix (ln,(lna,tl,bl)) -> let l' = iterate g (Array.length tl) l in mkFix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) | CoFix(ln,(lna,tl,bl)) -> let l' = iterate g (Array.length tl) l in mkCoFix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) (* [compare_constr f c1 c2] compare [c1] and [c2] using [f] to compare the immediate subterms of [c1] of [c2] if needed; Cast's, application associativity, binders name and Cases annotations are not taken into account *) let compare_constr f t1 t2 = match kind_of_term t1, kind_of_term t2 with | Rel n1, Rel n2 -> n1 = n2 | Meta m1, Meta m2 -> m1 = m2 | Var id1, Var id2 -> id1 = id2 | Sort s1, Sort s2 -> s1 = s2 | Cast (c1,_,_), _ -> f c1 t2 | _, Cast (c2,_,_) -> f t1 c2 | Prod (_,t1,c1), Prod (_,t2,c2) -> f t1 t2 & f c1 c2 | Lambda (_,t1,c1), Lambda (_,t2,c2) -> f t1 t2 & f c1 c2 | LetIn (_,b1,t1,c1), LetIn (_,b2,t2,c2) -> f b1 b2 & f t1 t2 & f c1 c2 | App (c1,l1), _ when isCast c1 -> f (mkApp (pi1 (destCast c1),l1)) t2 | _, App (c2,l2) when isCast c2 -> f t1 (mkApp (pi1 (destCast c2),l2)) | App (c1,l1), App (c2,l2) -> Array.length l1 = Array.length l2 && f c1 c2 && array_for_all2 f l1 l2 | Evar (e1,l1), Evar (e2,l2) -> e1 = e2 & array_for_all2 f l1 l2 | Const c1, Const c2 -> eq_constant c1 c2 | Ind c1, Ind c2 -> eq_ind c1 c2 | Construct c1, Construct c2 -> eq_constructor c1 c2 | Case (_,p1,c1,bl1), Case (_,p2,c2,bl2) -> f p1 p2 & f c1 c2 & array_for_all2 f bl1 bl2 | Fix (ln1,(_,tl1,bl1)), Fix (ln2,(_,tl2,bl2)) -> ln1 = ln2 & array_for_all2 f tl1 tl2 & array_for_all2 f bl1 bl2 | CoFix(ln1,(_,tl1,bl1)), CoFix(ln2,(_,tl2,bl2)) -> ln1 = ln2 & array_for_all2 f tl1 tl2 & array_for_all2 f bl1 bl2 | _ -> false (*******************************) (* alpha conversion functions *) (*******************************) (* alpha conversion : ignore print names and casts *) let rec eq_constr m n = (m==n) or compare_constr eq_constr m n let eq_constr m n = eq_constr m n (* to avoid tracing a recursive fun *) let constr_ord_int f t1 t2 = let (=?) f g i1 i2 j1 j2= let c=f i1 i2 in if c=0 then g j1 j2 else c in let (==?) fg h i1 i2 j1 j2 k1 k2= let c=fg i1 i2 j1 j2 in if c=0 then h k1 k2 else c in match kind_of_term t1, kind_of_term t2 with | Rel n1, Rel n2 -> n1 - n2 | Meta m1, Meta m2 -> m1 - m2 | Var id1, Var id2 -> id_ord id1 id2 | Sort s1, Sort s2 -> Pervasives.compare s1 s2 | Cast (c1,_,_), _ -> f c1 t2 | _, Cast (c2,_,_) -> f t1 c2 | Prod (_,t1,c1), Prod (_,t2,c2) | Lambda (_,t1,c1), Lambda (_,t2,c2) -> (f =? f) t1 t2 c1 c2 | LetIn (_,b1,t1,c1), LetIn (_,b2,t2,c2) -> ((f =? f) ==? f) b1 b2 t1 t2 c1 c2 | App (c1,l1), _ when isCast c1 -> f (mkApp (pi1 (destCast c1),l1)) t2 | _, App (c2,l2) when isCast c2 -> f t1 (mkApp (pi1 (destCast c2),l2)) | App (c1,l1), App (c2,l2) -> (f =? (array_compare f)) c1 c2 l1 l2 | Evar (e1,l1), Evar (e2,l2) -> ((-) =? (array_compare f)) e1 e2 l1 l2 | Const c1, Const c2 -> kn_ord (canonical_con c1) (canonical_con c2) | Ind (spx, ix), Ind (spy, iy) -> let c = ix - iy in if c = 0 then kn_ord (canonical_mind spx) (canonical_mind spy) else c | Construct ((spx, ix), jx), Construct ((spy, iy), jy) -> let c = jx - jy in if c = 0 then (let c = ix - iy in if c = 0 then kn_ord (canonical_mind spx) (canonical_mind spy) else c) else c | Case (_,p1,c1,bl1), Case (_,p2,c2,bl2) -> ((f =? f) ==? (array_compare f)) p1 p2 c1 c2 bl1 bl2 | Fix (ln1,(_,tl1,bl1)), Fix (ln2,(_,tl2,bl2)) -> ((Pervasives.compare =? (array_compare f)) ==? (array_compare f)) ln1 ln2 tl1 tl2 bl1 bl2 | CoFix(ln1,(_,tl1,bl1)), CoFix(ln2,(_,tl2,bl2)) -> ((Pervasives.compare =? (array_compare f)) ==? (array_compare f)) ln1 ln2 tl1 tl2 bl1 bl2 | t1, t2 -> Pervasives.compare t1 t2 let rec constr_ord m n= constr_ord_int constr_ord m n (***************************************************************************) (* Type of assumptions *) (***************************************************************************) type types = constr type strategy = types option type named_declaration = identifier * constr option * types type rel_declaration = name * constr option * types let map_named_declaration f (id, v, ty) = (id, Option.map f v, f ty) let map_rel_declaration = map_named_declaration let fold_named_declaration f (_, v, ty) a = f ty (Option.fold_right f v a) let fold_rel_declaration = fold_named_declaration let exists_named_declaration f (_, v, ty) = Option.cata f false v || f ty let exists_rel_declaration f (_, v, ty) = Option.cata f false v || f ty let for_all_named_declaration f (_, v, ty) = Option.cata f true v && f ty let for_all_rel_declaration f (_, v, ty) = Option.cata f true v && f ty let eq_named_declaration (i1, c1, t1) (i2, c2, t2) = id_ord i1 i2 = 0 && Option.Misc.compare eq_constr c1 c2 && eq_constr t1 t2 let eq_rel_declaration (n1, c1, t1) (n2, c2, t2) = n1 = n2 && Option.Misc.compare eq_constr c1 c2 && eq_constr t1 t2 (***************************************************************************) (* Type of local contexts (telescopes) *) (***************************************************************************) (*s Signatures of ordered optionally named variables, intended to be accessed by de Bruijn indices (to represent bound variables) *) type rel_context = rel_declaration list let empty_rel_context = [] let add_rel_decl d ctxt = d::ctxt let rec lookup_rel n sign = match n, sign with | 1, decl :: _ -> decl | n, _ :: sign -> lookup_rel (n-1) sign | _, [] -> raise Not_found let rel_context_length = List.length let rel_context_nhyps hyps = let rec nhyps acc = function | [] -> acc | (_,None,_)::hyps -> nhyps (1+acc) hyps | (_,Some _,_)::hyps -> nhyps acc hyps in nhyps 0 hyps (****************************************************************************) (* Functions for dealing with constr terms *) (****************************************************************************) (*********************) (* Occurring *) (*********************) exception LocalOccur (* (closedn n M) raises FreeVar if a variable of height greater than n occurs in M, returns () otherwise *) let closedn n c = let rec closed_rec n c = match kind_of_term c with | Rel m -> if m>n then raise LocalOccur | _ -> iter_constr_with_binders succ closed_rec n c in try closed_rec n c; true with LocalOccur -> false (* [closed0 M] is true iff [M] is a (deBruijn) closed term *) let closed0 = closedn 0 (* (noccurn n M) returns true iff (Rel n) does NOT occur in term M *) let noccurn n term = let rec occur_rec n c = match kind_of_term c with | Rel m -> if m = n then raise LocalOccur | _ -> iter_constr_with_binders succ occur_rec n c in try occur_rec n term; true with LocalOccur -> false (* (noccur_between n m M) returns true iff (Rel p) does NOT occur in term M for n <= p < n+m *) let noccur_between n m term = let rec occur_rec n c = match kind_of_term c with | Rel(p) -> if n<=p && p iter_constr_with_binders succ occur_rec n c in try occur_rec n term; true with LocalOccur -> false (* Checking function for terms containing existential variables. The function [noccur_with_meta] considers the fact that each existential variable (as well as each isevar) in the term appears applied to its local context, which may contain the CoFix variables. These occurrences of CoFix variables are not considered *) let noccur_with_meta n m term = let rec occur_rec n c = match kind_of_term c with | Rel p -> if n<=p & p (match kind_of_term f with | Cast (c,_,_) when isMeta c -> () | Meta _ -> () | _ -> iter_constr_with_binders succ occur_rec n c) | Evar (_, _) -> () | _ -> iter_constr_with_binders succ occur_rec n c in try (occur_rec n term; true) with LocalOccur -> false (*********************) (* Lifting *) (*********************) (* The generic lifting function *) let rec exliftn el c = match kind_of_term c with | Rel i -> mkRel(reloc_rel i el) | _ -> map_constr_with_binders el_lift exliftn el c (* Lifting the binding depth across k bindings *) let liftn n k = match el_liftn (pred k) (el_shft n el_id) with | ELID -> (fun c -> c) | el -> exliftn el let lift n = liftn n 1 (*********************) (* Substituting *) (*********************) (* (subst1 M c) substitutes M for Rel(1) in c we generalise it to (substl [M1,...,Mn] c) which substitutes in parallel M1,...,Mn for respectively Rel(1),...,Rel(n) in c *) (* 1st : general case *) type info = Closed | Open | Unknown type 'a substituend = { mutable sinfo: info; sit: 'a } let rec lift_substituend depth s = match s.sinfo with | Closed -> s.sit | Open -> lift depth s.sit | Unknown -> s.sinfo <- if closed0 s.sit then Closed else Open; lift_substituend depth s let make_substituend c = { sinfo=Unknown; sit=c } let substn_many lamv n c = let lv = Array.length lamv in if lv = 0 then c else let rec substrec depth c = match kind_of_term c with | Rel k -> if k<=depth then c else if k-depth <= lv then lift_substituend depth lamv.(k-depth-1) else mkRel (k-lv) | _ -> map_constr_with_binders succ substrec depth c in substrec n c (* let substkey = Profile.declare_profile "substn_many";; let substn_many lamv n c = Profile.profile3 substkey substn_many lamv n c;; *) let substnl laml n = substn_many (Array.map make_substituend (Array.of_list laml)) n let substl laml = substnl laml 0 let subst1 lam = substl [lam] let substnl_decl laml k = map_rel_declaration (substnl laml k) let substl_decl laml = substnl_decl laml 0 let subst1_decl lam = substl_decl [lam] let substnl_named laml k = map_named_declaration (substnl laml k) let substl_named_decl = substl_decl let subst1_named_decl = subst1_decl (* (thin_val sigma) removes identity substitutions from sigma *) let rec thin_val = function | [] -> [] | (((id,{ sit = v }) as s)::tl) when isVar v -> if id = destVar v then thin_val tl else s::(thin_val tl) | h::tl -> h::(thin_val tl) (* (replace_vars sigma M) applies substitution sigma to term M *) let replace_vars var_alist = let var_alist = List.map (fun (str,c) -> (str,make_substituend c)) var_alist in let var_alist = thin_val var_alist in let rec substrec n c = match kind_of_term c with | Var x -> (try lift_substituend n (List.assoc x var_alist) with Not_found -> c) | _ -> map_constr_with_binders succ substrec n c in if var_alist = [] then (function x -> x) else substrec 0 (* let repvarkey = Profile.declare_profile "replace_vars";; let replace_vars vl c = Profile.profile2 repvarkey replace_vars vl c ;; *) (* (subst_var str t) substitute (VAR str) by (Rel 1) in t *) let subst_var str = replace_vars [(str, mkRel 1)] (* (subst_vars [id1;...;idn] t) substitute (VAR idj) by (Rel j) in t *) let substn_vars p vars = let _,subst = List.fold_left (fun (n,l) var -> ((n+1),(var,mkRel n)::l)) (p,[]) vars in replace_vars (List.rev subst) let subst_vars = substn_vars 1 (***************************) (* Other term constructors *) (***************************) let mkNamedProd id typ c = mkProd (Name id, typ, subst_var id c) let mkNamedLambda id typ c = mkLambda (Name id, typ, subst_var id c) let mkNamedLetIn id c1 t c2 = mkLetIn (Name id, c1, t, subst_var id c2) (* Constructs either [(x:t)c] or [[x=b:t]c] *) let mkProd_or_LetIn (na,body,t) c = match body with | None -> mkProd (na, t, c) | Some b -> mkLetIn (na, b, t, c) let mkNamedProd_or_LetIn (id,body,t) c = match body with | None -> mkNamedProd id t c | Some b -> mkNamedLetIn id b t c (* Constructs either [(x:t)c] or [c] where [x] is replaced by [b] *) let mkProd_wo_LetIn (na,body,t) c = match body with | None -> mkProd (na, t, c) | Some b -> subst1 b c let mkNamedProd_wo_LetIn (id,body,t) c = match body with | None -> mkNamedProd id t c | Some b -> subst1 b (subst_var id c) (* non-dependent product t1 -> t2 *) let mkArrow t1 t2 = mkProd (Anonymous, t1, t2) (* Constructs either [[x:t]c] or [[x=b:t]c] *) let mkLambda_or_LetIn (na,body,t) c = match body with | None -> mkLambda (na, t, c) | Some b -> mkLetIn (na, b, t, c) let mkNamedLambda_or_LetIn (id,body,t) c = match body with | None -> mkNamedLambda id t c | Some b -> mkNamedLetIn id b t c (* prodn n [xn:Tn;..;x1:T1;Gamma] b = (x1:T1)..(xn:Tn)b *) let prodn n env b = let rec prodrec = function | (0, env, b) -> b | (n, ((v,t)::l), b) -> prodrec (n-1, l, mkProd (v,t,b)) | _ -> assert false in prodrec (n,env,b) (* compose_prod [xn:Tn;..;x1:T1] b = (x1:T1)..(xn:Tn)b *) let compose_prod l b = prodn (List.length l) l b (* lamn n [xn:Tn;..;x1:T1;Gamma] b = [x1:T1]..[xn:Tn]b *) let lamn n env b = let rec lamrec = function | (0, env, b) -> b | (n, ((v,t)::l), b) -> lamrec (n-1, l, mkLambda (v,t,b)) | _ -> assert false in lamrec (n,env,b) (* compose_lam [xn:Tn;..;x1:T1] b = [x1:T1]..[xn:Tn]b *) let compose_lam l b = lamn (List.length l) l b let applist (f,l) = mkApp (f, Array.of_list l) let applistc f l = mkApp (f, Array.of_list l) let appvect = mkApp let appvectc f l = mkApp (f,l) (* to_lambda n (x1:T1)...(xn:Tn)T = * [x1:T1]...[xn:Tn]T *) let rec to_lambda n prod = if n = 0 then prod else match kind_of_term prod with | Prod (na,ty,bd) -> mkLambda (na,ty,to_lambda (n-1) bd) | Cast (c,_,_) -> to_lambda n c | _ -> errorlabstrm "to_lambda" (mt ()) let rec to_prod n lam = if n=0 then lam else match kind_of_term lam with | Lambda (na,ty,bd) -> mkProd (na,ty,to_prod (n-1) bd) | Cast (c,_,_) -> to_prod n c | _ -> errorlabstrm "to_prod" (mt ()) (* pseudo-reduction rule: * [prod_app s (Prod(_,B)) N --> B[N] * with an strip_outer_cast on the first argument to produce a product *) let prod_app t n = match kind_of_term (strip_outer_cast t) with | Prod (_,_,b) -> subst1 n b | _ -> errorlabstrm "prod_app" (str"Needed a product, but didn't find one" ++ fnl ()) (* prod_appvect T [| a1 ; ... ; an |] -> (T a1 ... an) *) let prod_appvect t nL = Array.fold_left prod_app t nL (* prod_applist T [ a1 ; ... ; an ] -> (T a1 ... an) *) let prod_applist t nL = List.fold_left prod_app t nL let it_mkProd_or_LetIn = List.fold_left (fun c d -> mkProd_or_LetIn d c) let it_mkLambda_or_LetIn = List.fold_left (fun c d -> mkLambda_or_LetIn d c) (*********************************) (* Other term destructors *) (*********************************) (* Transforms a product term (x1:T1)..(xn:Tn)T into the pair ([(xn,Tn);...;(x1,T1)],T), where T is not a product *) let decompose_prod = let rec prodec_rec l c = match kind_of_term c with | Prod (x,t,c) -> prodec_rec ((x,t)::l) c | Cast (c,_,_) -> prodec_rec l c | _ -> l,c in prodec_rec [] (* Transforms a lambda term [x1:T1]..[xn:Tn]T into the pair ([(xn,Tn);...;(x1,T1)],T), where T is not a lambda *) let decompose_lam = let rec lamdec_rec l c = match kind_of_term c with | Lambda (x,t,c) -> lamdec_rec ((x,t)::l) c | Cast (c,_,_) -> lamdec_rec l c | _ -> l,c in lamdec_rec [] (* Given a positive integer n, transforms a product term (x1:T1)..(xn:Tn)T into the pair ([(xn,Tn);...;(x1,T1)],T) *) let decompose_prod_n n = if n < 0 then error "decompose_prod_n: integer parameter must be positive"; let rec prodec_rec l n c = if n=0 then l,c else match kind_of_term c with | Prod (x,t,c) -> prodec_rec ((x,t)::l) (n-1) c | Cast (c,_,_) -> prodec_rec l n c | _ -> error "decompose_prod_n: not enough products" in prodec_rec [] n (* Given a positive integer n, transforms a lambda term [x1:T1]..[xn:Tn]T into the pair ([(xn,Tn);...;(x1,T1)],T) *) let decompose_lam_n n = if n < 0 then error "decompose_lam_n: integer parameter must be positive"; let rec lamdec_rec l n c = if n=0 then l,c else match kind_of_term c with | Lambda (x,t,c) -> lamdec_rec ((x,t)::l) (n-1) c | Cast (c,_,_) -> lamdec_rec l n c | _ -> error "decompose_lam_n: not enough abstractions" in lamdec_rec [] n (* Transforms a product term (x1:T1)..(xn:Tn)T into the pair ([(xn,Tn);...;(x1,T1)],T), where T is not a product *) let decompose_prod_assum = let rec prodec_rec l c = match kind_of_term c with | Prod (x,t,c) -> prodec_rec (add_rel_decl (x,None,t) l) c | LetIn (x,b,t,c) -> prodec_rec (add_rel_decl (x,Some b,t) l) c | Cast (c,_,_) -> prodec_rec l c | _ -> l,c in prodec_rec empty_rel_context (* Transforms a lambda term [x1:T1]..[xn:Tn]T into the pair ([(xn,Tn);...;(x1,T1)],T), where T is not a lambda *) let decompose_lam_assum = let rec lamdec_rec l c = match kind_of_term c with | Lambda (x,t,c) -> lamdec_rec (add_rel_decl (x,None,t) l) c | LetIn (x,b,t,c) -> lamdec_rec (add_rel_decl (x,Some b,t) l) c | Cast (c,_,_) -> lamdec_rec l c | _ -> l,c in lamdec_rec empty_rel_context (* Given a positive integer n, transforms a product term (x1:T1)..(xn:Tn)T into the pair ([(xn,Tn);...;(x1,T1)],T) *) let decompose_prod_n_assum n = if n < 0 then error "decompose_prod_n_assum: integer parameter must be positive"; let rec prodec_rec l n c = if n=0 then l,c else match kind_of_term c with | Prod (x,t,c) -> prodec_rec (add_rel_decl (x,None,t) l) (n-1) c | LetIn (x,b,t,c) -> prodec_rec (add_rel_decl (x,Some b,t) l) (n-1) c | Cast (c,_,_) -> prodec_rec l n c | c -> error "decompose_prod_n_assum: not enough assumptions" in prodec_rec empty_rel_context n (* Given a positive integer n, transforms a lambda term [x1:T1]..[xn:Tn]T into the pair ([(xn,Tn);...;(x1,T1)],T) Lets in between are not expanded but turn into local definitions, but n is the actual number of destructurated lambdas. *) let decompose_lam_n_assum n = if n < 0 then error "decompose_lam_n_assum: integer parameter must be positive"; let rec lamdec_rec l n c = if n=0 then l,c else match kind_of_term c with | Lambda (x,t,c) -> lamdec_rec (add_rel_decl (x,None,t) l) (n-1) c | LetIn (x,b,t,c) -> lamdec_rec (add_rel_decl (x,Some b,t) l) n c | Cast (c,_,_) -> lamdec_rec l n c | c -> error "decompose_lam_n_assum: not enough abstractions" in lamdec_rec empty_rel_context n (* (nb_lam [na1:T1]...[nan:Tan]c) where c is not an abstraction * gives n (casts are ignored) *) let nb_lam = let rec nbrec n c = match kind_of_term c with | Lambda (_,_,c) -> nbrec (n+1) c | Cast (c,_,_) -> nbrec n c | _ -> n in nbrec 0 (* similar to nb_lam, but gives the number of products instead *) let nb_prod = let rec nbrec n c = match kind_of_term c with | Prod (_,_,c) -> nbrec (n+1) c | Cast (c,_,_) -> nbrec n c | _ -> n in nbrec 0 let prod_assum t = fst (decompose_prod_assum t) let prod_n_assum n t = fst (decompose_prod_n_assum n t) let strip_prod_assum t = snd (decompose_prod_assum t) let strip_prod t = snd (decompose_prod t) let strip_prod_n n t = snd (decompose_prod_n n t) let lam_assum t = fst (decompose_lam_assum t) let lam_n_assum n t = fst (decompose_lam_n_assum n t) let strip_lam_assum t = snd (decompose_lam_assum t) let strip_lam t = snd (decompose_lam t) let strip_lam_n n t = snd (decompose_lam_n n t) (***************************) (* Arities *) (***************************) (* An "arity" is a term of the form [[x1:T1]...[xn:Tn]s] with [s] a sort. Such a term can canonically be seen as the pair of a context of types and of a sort *) type arity = rel_context * sorts let destArity = let rec prodec_rec l c = match kind_of_term c with | Prod (x,t,c) -> prodec_rec ((x,None,t)::l) c | LetIn (x,b,t,c) -> prodec_rec ((x,Some b,t)::l) c | Cast (c,_,_) -> prodec_rec l c | Sort s -> l,s | _ -> anomaly "destArity: not an arity" in prodec_rec [] let mkArity (sign,s) = it_mkProd_or_LetIn (mkSort s) sign let rec isArity c = match kind_of_term c with | Prod (_,_,c) -> isArity c | LetIn (_,b,_,c) -> isArity (subst1 b c) | Cast (c,_,_) -> isArity c | Sort _ -> true | _ -> false (*******************) (* hash-consing *) (*******************) (* Hash-consing of [constr] does not use the module [Hashcons] because [Hashcons] is not efficient on deep tree-like data structures. Indeed, [Hashcons] is based the (very efficient) generic hash function [Hashtbl.hash], which computes the hash key through a depth bounded traversal of the data structure to be hashed. As a consequence, for a deep [constr] like the natural number 1000 (S (S (... (S O)))), the same hash is assigned to all the sub [constr]s greater than the maximal depth handled by [Hashtbl.hash]. This entails a huge number of collisions in the hash table and leads to cubic hash-consing in this worst-case. In order to compute a hash key that is independent of the data structure depth while being constant-time, an incremental hashing function must be devised. A standard implementation creates a cache of the hashing function by decorating each node of the hash-consed data structure with its hash key. In that case, the hash function can deduce the hash key of a toplevel data structure by a local computation based on the cache held on its substructures. Unfortunately, this simple implementation introduces a space overhead that is damageable for the hash-consing of small [constr]s (the most common case). One can think of an heterogeneous distribution of caches on smartly chosen nodes, but this is forbidden by the use of generic equality in Coq source code. (Indeed, this forces each [constr] to have a unique canonical representation.) Given that hash-consing proceeds inductively, we can nonetheless computes the hash key incrementally during hash-consing by changing a little the signature of the hash-consing function: it now returns both the hash-consed term and its hash key. This simple solution is implemented in the following code: it does not introduce a space overhead in [constr], that's why the efficiency is unchanged for small [constr]s. Besides, it does handle deep [constr]s without introducing an unreasonable number of collisions in the hash table. Some benchmarks make us think that this implementation of hash-consing is linear in the size of the hash-consed data structure for our daily use of Coq. *) let array_eqeq t1 t2 = t1 == t2 || (Array.length t1 = Array.length t2 && let rec aux i = (i = Array.length t1) || (t1.(i) == t2.(i) && aux (i + 1)) in aux 0) let equals_constr t1 t2 = match t1, t2 with | Rel n1, Rel n2 -> n1 == n2 | Meta m1, Meta m2 -> m1 == m2 | Var id1, Var id2 -> id1 == id2 | Sort s1, Sort s2 -> s1 == s2 | Cast (c1,k1,t1), Cast (c2,k2,t2) -> c1 == c2 & k1 == k2 & t1 == t2 | Prod (n1,t1,c1), Prod (n2,t2,c2) -> n1 == n2 & t1 == t2 & c1 == c2 | Lambda (n1,t1,c1), Lambda (n2,t2,c2) -> n1 == n2 & t1 == t2 & c1 == c2 | LetIn (n1,b1,t1,c1), LetIn (n2,b2,t2,c2) -> n1 == n2 & b1 == b2 & t1 == t2 & c1 == c2 | App (c1,l1), App (c2,l2) -> c1 == c2 & array_eqeq l1 l2 | Evar (e1,l1), Evar (e2,l2) -> e1 = e2 & array_eqeq l1 l2 | Const c1, Const c2 -> c1 == c2 | Ind (sp1,i1), Ind (sp2,i2) -> sp1 == sp2 & i1 = i2 | Construct ((sp1,i1),j1), Construct ((sp2,i2),j2) -> sp1 == sp2 & i1 = i2 & j1 = j2 | Case (ci1,p1,c1,bl1), Case (ci2,p2,c2,bl2) -> ci1 == ci2 & p1 == p2 & c1 == c2 & array_eqeq bl1 bl2 | Fix (ln1,(lna1,tl1,bl1)), Fix (ln2,(lna2,tl2,bl2)) -> ln1 = ln2 & array_eqeq lna1 lna2 & array_eqeq tl1 tl2 & array_eqeq bl1 bl2 | CoFix(ln1,(lna1,tl1,bl1)), CoFix(ln2,(lna2,tl2,bl2)) -> ln1 = ln2 & array_eqeq lna1 lna2 & array_eqeq tl1 tl2 & array_eqeq bl1 bl2 | _ -> false (** Note that the following Make has the side effect of creating once and for all the table we'll use for hash-consing all constr *) module H = Hashtbl_alt.Make(struct type t = constr let equals = equals_constr end) open Hashtbl_alt.Combine (* [hcons_term hash_consing_functions constr] computes an hash-consed representation for [constr] using [hash_consing_functions] on leaves. *) let hcons_term (sh_sort,sh_ci,sh_construct,sh_ind,sh_con,sh_na,sh_id) = (* Note : we hash-cons constr arrays *in place* *) let rec hash_term_array t = let accu = ref 0 in for i = 0 to Array.length t - 1 do let x, h = sh_rec t.(i) in accu := combine !accu h; t.(i) <- x done; !accu and hash_term t = match t with | Var i -> (Var (sh_id i), combinesmall 1 (Hashtbl.hash i)) | Sort s -> (Sort (sh_sort s), combinesmall 2 (Hashtbl.hash s)) | Cast (c, k, t) -> let c, hc = sh_rec c in let t, ht = sh_rec t in (Cast (c, k, t), combinesmall 3 (combine3 hc (Hashtbl.hash k) ht)) | Prod (na,t,c) -> let t, ht = sh_rec t and c, hc = sh_rec c in (Prod (sh_na na, t, c), combinesmall 4 (combine3 (Hashtbl.hash na) ht hc)) | Lambda (na,t,c) -> let t, ht = sh_rec t and c, hc = sh_rec c in (Lambda (sh_na na, t, c), combinesmall 5 (combine3 (Hashtbl.hash na) ht hc)) | LetIn (na,b,t,c) -> let b, hb = sh_rec b in let t, ht = sh_rec t in let c, hc = sh_rec c in (LetIn (sh_na na, b, t, c), combinesmall 6 (combine4 (Hashtbl.hash na) hb ht hc)) | App (c,l) -> let c, hc = sh_rec c in let hl = hash_term_array l in (App (c, l), combinesmall 7 (combine hl hc)) | Evar (e,l) -> let hl = hash_term_array l in (* since the array have been hashed in place : *) (t, combinesmall 8 (combine (Hashtbl.hash e) hl)) | Const c -> (Const (sh_con c), combinesmall 9 (Hashtbl.hash c)) | Ind ((kn,i) as ind) -> (Ind (sh_ind ind), combinesmall 9 (combine (Hashtbl.hash kn) i)) | Construct (((kn,i),j) as c)-> (Construct (sh_construct c), combinesmall 10 (combine3 (Hashtbl.hash kn) i j)) | Case (ci,p,c,bl) -> let p, hp = sh_rec p and c, hc = sh_rec c in let hbl = hash_term_array bl in let hbl = combine (combine hc hp) hbl in (Case (sh_ci ci, p, c, bl), combinesmall 11 hbl) | Fix (ln,(lna,tl,bl)) -> let hbl = hash_term_array bl in let htl = hash_term_array tl in Array.iteri (fun i x -> lna.(i) <- sh_na x) lna; (* since the three arrays have been hashed in place : *) (t, combinesmall 13 (combine (Hashtbl.hash lna) (combine hbl htl))) | CoFix(ln,(lna,tl,bl)) -> let hbl = hash_term_array bl in let htl = hash_term_array tl in Array.iteri (fun i x -> lna.(i) <- sh_na x) lna; (* since the three arrays have been hashed in place : *) (t, combinesmall 14 (combine (Hashtbl.hash lna) (combine hbl htl))) | Meta n -> (t, combinesmall 15 n) | Rel n -> (t, combinesmall 16 n) and sh_rec t = let (y, h) = hash_term t in (* [h] must be positive. *) let h = h land 0x3FFFFFFF in (H.may_add_and_get h y, h) in (* Make sure our statically allocated Rels (1 to 16) are considered as canonical, and hence hash-consed to themselves *) ignore (hash_term_array rels); fun t -> fst (sh_rec t) (* Exported hashing fonction on constr, used mainly in plugins. Appears to have slight differences from [snd (hash_term t)] above ? *) let rec hash_constr t = match kind_of_term t with | Var i -> combinesmall 1 (Hashtbl.hash i) | Sort s -> combinesmall 2 (Hashtbl.hash s) | Cast (c, _, _) -> hash_constr c | Prod (_, t, c) -> combinesmall 4 (combine (hash_constr t) (hash_constr c)) | Lambda (_, t, c) -> combinesmall 5 (combine (hash_constr t) (hash_constr c)) | LetIn (_, b, t, c) -> combinesmall 6 (combine3 (hash_constr b) (hash_constr t) (hash_constr c)) | App (c,l) when isCast c -> hash_constr (mkApp (pi1 (destCast c),l)) | App (c,l) -> combinesmall 7 (combine (hash_term_array l) (hash_constr c)) | Evar (e,l) -> combinesmall 8 (combine (Hashtbl.hash e) (hash_term_array l)) | Const c -> combinesmall 9 (Hashtbl.hash c) (* TODO: proper hash function for constants *) | Ind (kn,i) -> combinesmall 9 (combine (Hashtbl.hash kn) i) | Construct ((kn,i),j) -> combinesmall 10 (combine3 (Hashtbl.hash kn) i j) | Case (_ , p, c, bl) -> combinesmall 11 (combine3 (hash_constr c) (hash_constr p) (hash_term_array bl)) | Fix (ln ,(_, tl, bl)) -> combinesmall 13 (combine (hash_term_array bl) (hash_term_array tl)) | CoFix(ln, (_, tl, bl)) -> combinesmall 14 (combine (hash_term_array bl) (hash_term_array tl)) | Meta n -> combinesmall 15 n | Rel n -> combinesmall 16 n and hash_term_array t = Array.fold_left (fun acc t -> combine (hash_constr t) acc) 0 t module Hsorts = Hashcons.Make( struct type t = sorts type u = universe -> universe let hash_sub huniv = function Prop c -> Prop c | Type u -> Type (huniv u) let equal s1 s2 = match (s1,s2) with (Prop c1, Prop c2) -> c1=c2 | (Type u1, Type u2) -> u1 == u2 |_ -> false let hash = Hashtbl.hash end) module Hcaseinfo = Hashcons.Make( struct type t = case_info type u = inductive -> inductive let hash_sub hind ci = { ci with ci_ind = hind ci.ci_ind } let equal ci ci' = ci.ci_ind == ci'.ci_ind && ci.ci_npar = ci'.ci_npar && ci.ci_cstr_ndecls = ci'.ci_cstr_ndecls && (* we use (=) on purpose *) ci.ci_pp_info = ci'.ci_pp_info (* we use (=) on purpose *) let hash = Hashtbl.hash end) let hcons_sorts = Hashcons.simple_hcons Hsorts.f hcons_univ let hcons_caseinfo = Hashcons.simple_hcons Hcaseinfo.f hcons_ind let hcons_constr = hcons_term (hcons_sorts, hcons_caseinfo, hcons_construct, hcons_ind, hcons_con, hcons_name, hcons_ident) let hcons_types = hcons_constr (*******) (* Type of abstract machine values *) type values coq-8.4pl2/kernel/sign.mli0000640000175000001440000000464212010532755014560 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* named_context -> named_context val vars_of_named_context : named_context -> identifier list val lookup_named : identifier -> named_context -> named_declaration (** number of declarations *) val named_context_length : named_context -> int (** named context equality *) val named_context_equal : named_context -> named_context -> bool (** {6 Recurrence on [named_context]: older declarations processed first } *) val fold_named_context : (named_declaration -> 'a -> 'a) -> named_context -> init:'a -> 'a (** newer declarations first *) val fold_named_context_reverse : ('a -> named_declaration -> 'a) -> init:'a -> named_context -> 'a (** {6 Section-related auxiliary functions } *) val instance_from_named_context : named_context -> constr array (** {6 ... } *) (** Signatures of ordered optionally named variables, intended to be accessed by de Bruijn indices *) val push_named_to_rel_context : named_context -> rel_context -> rel_context (** {6 Recurrence on [rel_context]: older declarations processed first } *) val fold_rel_context : (rel_declaration -> 'a -> 'a) -> rel_context -> init:'a -> 'a (** newer declarations first *) val fold_rel_context_reverse : ('a -> rel_declaration -> 'a) -> init:'a -> rel_context -> 'a (** {6 Map function of [rel_context] } *) val map_rel_context : (constr -> constr) -> rel_context -> rel_context (** {6 Map function of [named_context] } *) val map_named_context : (constr -> constr) -> named_context -> named_context (** {6 Map function of [rel_context] } *) val iter_rel_context : (constr -> unit) -> rel_context -> unit (** {6 Map function of [named_context] } *) val iter_named_context : (constr -> unit) -> named_context -> unit coq-8.4pl2/kernel/modops.mli0000640000175000001440000000772412122674544015134 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_type_body -> module_body val module_type_of_module : module_path option -> module_body -> module_type_body val destr_functor : env -> struct_expr_body -> mod_bound_id * module_type_body * struct_expr_body val subst_struct_expr : substitution -> struct_expr_body -> struct_expr_body val subst_signature : substitution -> structure_body -> structure_body val add_signature : module_path -> structure_body -> delta_resolver -> env -> env (** adds a module and its components, but not the constraints *) val add_module : module_body -> env -> env val check_modpath_equiv : env -> module_path -> module_path -> unit val strengthen : module_type_body -> module_path -> module_type_body val inline_delta_resolver : env -> inline -> module_path -> mod_bound_id -> module_type_body -> delta_resolver -> delta_resolver val strengthen_and_subst_mb : module_body -> module_path -> bool -> module_body val subst_modtype_and_resolver : module_type_body -> module_path -> module_type_body val clean_bounded_mod_expr : struct_expr_body -> struct_expr_body (** Errors *) type signature_mismatch_error = | InductiveFieldExpected of mutual_inductive_body | DefinitionFieldExpected | ModuleFieldExpected | ModuleTypeFieldExpected | NotConvertibleInductiveField of identifier | NotConvertibleConstructorField of identifier | NotConvertibleBodyField | NotConvertibleTypeField of env * types * types | NotSameConstructorNamesField | NotSameInductiveNameInBlockField | FiniteInductiveFieldExpected of bool | InductiveNumbersFieldExpected of int | InductiveParamsNumberField of int | RecordFieldExpected of bool | RecordProjectionsExpected of name list | NotEqualInductiveAliases | NoTypeConstraintExpected type module_typing_error = | SignatureMismatch of label * structure_field_body * signature_mismatch_error | LabelAlreadyDeclared of label | ApplicationToNotPath of module_struct_entry | NotAFunctor of struct_expr_body | IncompatibleModuleTypes of module_type_body * module_type_body | NotEqualModulePaths of module_path * module_path | NoSuchLabel of label | IncompatibleLabels of label * label | SignatureExpected of struct_expr_body | NoModuleToEnd | NoModuleTypeToEnd | NotAModule of string | NotAModuleType of string | NotAConstant of label | IncorrectWithConstraint of label | GenerativeModuleExpected of label | NonEmptyLocalContect of label option | LabelMissing of label * string exception ModuleTypingError of module_typing_error val error_existing_label : label -> 'a val error_application_to_not_path : module_struct_entry -> 'a val error_incompatible_modtypes : module_type_body -> module_type_body -> 'a val error_signature_mismatch : label -> structure_field_body -> signature_mismatch_error -> 'a val error_incompatible_labels : label -> label -> 'a val error_no_such_label : label -> 'a val error_signature_expected : struct_expr_body -> 'a val error_no_module_to_end : unit -> 'a val error_no_modtype_to_end : unit -> 'a val error_not_a_module : string -> 'a val error_not_a_constant : label -> 'a val error_incorrect_with_constraint : label -> 'a val error_generative_module_expected : label -> 'a val error_non_empty_local_context : label option -> 'a val error_no_such_label_sub : label->string->'a coq-8.4pl2/kernel/term_typing.mli0000640000175000001440000000244612010532755016161 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr * types option -> constr * types * Univ.constraints val translate_local_assum : env -> types -> types * Univ.constraints val infer_declaration : env -> constant_entry -> constant_def * constant_type * constraints * Sign.section_context option val build_constant_declaration : env -> 'a -> constant_def * constant_type * constraints * Sign.section_context option -> constant_body val translate_constant : env -> constant -> constant_entry -> constant_body val translate_mind : env -> mutual_inductive -> mutual_inductive_entry -> mutual_inductive_body val translate_recipe : env -> constant -> Cooking.recipe -> constant_body coq-8.4pl2/kernel/closure.ml0000640000175000001440000010150512010532755015117 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* red_kind val fVAR : identifier -> red_kind val no_red : reds val red_add : reds -> red_kind -> reds val red_sub : reds -> red_kind -> reds val red_add_transparent : reds -> transparent_state -> reds val mkflags : red_kind list -> reds val red_set : reds -> red_kind -> bool end module RedFlags = (struct (* [r_const=(true,cl)] means all constants but those in [cl] *) (* [r_const=(false,cl)] means only those in [cl] *) (* [r_delta=true] just mean [r_const=(true,[])] *) type reds = { r_beta : bool; r_delta : bool; r_const : transparent_state; r_zeta : bool; r_iota : bool } type red_kind = BETA | DELTA | IOTA | ZETA | CONST of constant | VAR of identifier let fBETA = BETA let fDELTA = DELTA let fIOTA = IOTA let fZETA = ZETA let fCONST kn = CONST kn let fVAR id = VAR id let no_red = { r_beta = false; r_delta = false; r_const = all_opaque; r_zeta = false; r_iota = false } let red_add red = function | BETA -> { red with r_beta = true } | DELTA -> { red with r_delta = true; r_const = all_transparent } | CONST kn -> let (l1,l2) = red.r_const in { red with r_const = l1, Cpred.add kn l2 } | IOTA -> { red with r_iota = true } | ZETA -> { red with r_zeta = true } | VAR id -> let (l1,l2) = red.r_const in { red with r_const = Idpred.add id l1, l2 } let red_sub red = function | BETA -> { red with r_beta = false } | DELTA -> { red with r_delta = false } | CONST kn -> let (l1,l2) = red.r_const in { red with r_const = l1, Cpred.remove kn l2 } | IOTA -> { red with r_iota = false } | ZETA -> { red with r_zeta = false } | VAR id -> let (l1,l2) = red.r_const in { red with r_const = Idpred.remove id l1, l2 } let red_add_transparent red tr = { red with r_const = tr } let mkflags = List.fold_left red_add no_red let red_set red = function | BETA -> incr_cnt red.r_beta beta | CONST kn -> let (_,l) = red.r_const in let c = Cpred.mem kn l in incr_cnt c delta | VAR id -> (* En attendant d'avoir des kn pour les Var *) let (l,_) = red.r_const in let c = Idpred.mem id l in incr_cnt c delta | ZETA -> incr_cnt red.r_zeta zeta | IOTA -> incr_cnt red.r_iota iota | DELTA -> (* Used for Rel/Var defined in context *) incr_cnt red.r_delta delta end : RedFlagsSig) open RedFlags let betadeltaiota = mkflags [fBETA;fDELTA;fZETA;fIOTA] let betadeltaiotanolet = mkflags [fBETA;fDELTA;fIOTA] let betaiota = mkflags [fBETA;fIOTA] let beta = mkflags [fBETA] let betaiotazeta = mkflags [fBETA;fIOTA;fZETA] (* Removing fZETA for finer behaviour would break many developments *) let unfold_side_flags = [fBETA;fIOTA;fZETA] let unfold_side_red = mkflags [fBETA;fIOTA;fZETA] let unfold_red kn = let flag = match kn with | EvalVarRef id -> fVAR id | EvalConstRef kn -> fCONST kn in mkflags (flag::unfold_side_flags) (* Flags of reduction and cache of constants: 'a is a type that may be * mapped to constr. 'a infos implements a cache for constants and * abstractions, storing a representation (of type 'a) of the body of * this constant or abstraction. * * i_tab is the cache table of the results * * i_repr is the function to get the representation from the current * state of the cache and the body of the constant. The result * is stored in the table. * * i_rels = (4,[(1,c);(3,d)]) means there are 4 free rel variables * and only those with index 1 and 3 have bodies which are c and d resp. * * i_vars is the list of _defined_ named variables. * * ref_value_cache searchs in the tab, otherwise uses i_repr to * compute the result and store it in the table. If the constant can't * be unfolded, returns None, but does not store this failure. * This * doesn't take the RESET into account. You mustn't keep such a table * after a Reset. * This type is not exported. Only its two * instantiations (cbv or lazy) are. *) type table_key = id_key let eq_table_key = Names.eq_id_key type 'a infos = { i_flags : reds; i_repr : 'a infos -> constr -> 'a; i_env : env; i_sigma : existential -> constr option; i_rels : int * (int * constr) list; i_vars : (identifier * constr) list; i_tab : (table_key, 'a) Hashtbl.t } let info_flags info = info.i_flags let ref_value_cache info ref = try Some (Hashtbl.find info.i_tab ref) with Not_found -> try let body = match ref with | RelKey n -> let (s,l) = info.i_rels in lift n (List.assoc (s-n) l) | VarKey id -> List.assoc id info.i_vars | ConstKey cst -> constant_value info.i_env cst in let v = info.i_repr info body in Hashtbl.add info.i_tab ref v; Some v with | Not_found (* List.assoc *) | NotEvaluableConst _ (* Const *) -> None let evar_value info ev = info.i_sigma ev let defined_vars flags env = (* if red_local_const (snd flags) then*) Sign.fold_named_context (fun (id,b,_) e -> match b with | None -> e | Some body -> (id, body)::e) (named_context env) ~init:[] (* else []*) let defined_rels flags env = (* if red_local_const (snd flags) then*) Sign.fold_rel_context (fun (id,b,t) (i,subs) -> match b with | None -> (i+1, subs) | Some body -> (i+1, (i,body) :: subs)) (rel_context env) ~init:(0,[]) (* else (0,[])*) let create mk_cl flgs env evars = { i_flags = flgs; i_repr = mk_cl; i_env = env; i_sigma = evars; i_rels = defined_rels flgs env; i_vars = defined_vars flgs env; i_tab = Hashtbl.create 17 } (**********************************************************************) (* Lazy reduction: the one used in kernel operations *) (* type of shared terms. fconstr and frterm are mutually recursive. * Clone of the constr structure, but completely mutable, and * annotated with reduction state (reducible or not). * - FLIFT is a delayed shift; allows sharing between 2 lifted copies * of a given term. * - FCLOS is a delayed substitution applied to a constr * - FLOCKED is used to erase the content of a reference that must * be updated. This is to allow the garbage collector to work * before the term is computed. *) (* Norm means the term is fully normalized and cannot create a redex when substituted Cstr means the term is in head normal form and that it can create a redex when substituted (i.e. constructor, fix, lambda) Whnf means we reached the head normal form and that it cannot create a redex when substituted Red is used for terms that might be reduced *) type red_state = Norm | Cstr | Whnf | Red let neutr = function | (Whnf|Norm) -> Whnf | (Red|Cstr) -> Red type fconstr = { mutable norm: red_state; mutable term: fterm } and fterm = | FRel of int | FAtom of constr (* Metas and Sorts *) | FCast of fconstr * cast_kind * fconstr | FFlex of table_key | FInd of inductive | FConstruct of constructor | FApp of fconstr * fconstr array | FFix of fixpoint * fconstr subs | FCoFix of cofixpoint * fconstr subs | FCases of case_info * fconstr * fconstr * fconstr array | FLambda of int * (name * constr) list * constr * fconstr subs | FProd of name * fconstr * fconstr | FLetIn of name * fconstr * fconstr * constr * fconstr subs | FEvar of existential * fconstr subs | FLIFT of int * fconstr | FCLOS of constr * fconstr subs | FLOCKED let fterm_of v = v.term let set_norm v = v.norm <- Norm let is_val v = v.norm = Norm let mk_atom c = {norm=Norm;term=FAtom c} (* Could issue a warning if no is still Red, pointing out that we loose sharing. *) let update v1 (no,t) = if !share then (v1.norm <- no; v1.term <- t; v1) else {norm=no;term=t} (**********************************************************************) (* The type of (machine) stacks (= lambda-bar-calculus' contexts) *) type stack_member = | Zapp of fconstr array | Zcase of case_info * fconstr * fconstr array | Zfix of fconstr * stack | Zshift of int | Zupdate of fconstr and stack = stack_member list let empty_stack = [] let append_stack v s = if Array.length v = 0 then s else match s with | Zapp l :: s -> Zapp (Array.append v l) :: s | _ -> Zapp v :: s (* Collapse the shifts in the stack *) let zshift n s = match (n,s) with (0,_) -> s | (_,Zshift(k)::s) -> Zshift(n+k)::s | _ -> Zshift(n)::s let rec stack_args_size = function | Zapp v :: s -> Array.length v + stack_args_size s | Zshift(_)::s -> stack_args_size s | Zupdate(_)::s -> stack_args_size s | _ -> 0 (* When used as an argument stack (only Zapp can appear) *) let rec decomp_stack = function | Zapp v :: s -> (match Array.length v with 0 -> decomp_stack s | 1 -> Some (v.(0), s) | _ -> Some (v.(0), (Zapp (Array.sub v 1 (Array.length v - 1)) :: s))) | _ -> None let array_of_stack s = let rec stackrec = function | [] -> [] | Zapp args :: s -> args :: (stackrec s) | _ -> assert false in Array.concat (stackrec s) let rec stack_assign s p c = match s with | Zapp args :: s -> let q = Array.length args in if p >= q then Zapp args :: stack_assign s (p-q) c else (let nargs = Array.copy args in nargs.(p) <- c; Zapp nargs :: s) | _ -> s let rec stack_tail p s = if p = 0 then s else match s with | Zapp args :: s -> let q = Array.length args in if p >= q then stack_tail (p-q) s else Zapp (Array.sub args p (q-p)) :: s | _ -> failwith "stack_tail" let rec stack_nth s p = match s with | Zapp args :: s -> let q = Array.length args in if p >= q then stack_nth s (p-q) else args.(p) | _ -> raise Not_found (* Lifting. Preserves sharing (useful only for cell with norm=Red). lft_fconstr always create a new cell, while lift_fconstr avoids it when the lift is 0. *) let rec lft_fconstr n ft = match ft.term with | (FInd _|FConstruct _|FFlex(ConstKey _|VarKey _)) -> ft | FRel i -> {norm=Norm;term=FRel(i+n)} | FLambda(k,tys,f,e) -> {norm=Cstr; term=FLambda(k,tys,f,subs_shft(n,e))} | FFix(fx,e) -> {norm=Cstr; term=FFix(fx,subs_shft(n,e))} | FCoFix(cfx,e) -> {norm=Cstr; term=FCoFix(cfx,subs_shft(n,e))} | FLIFT(k,m) -> lft_fconstr (n+k) m | FLOCKED -> assert false | _ -> {norm=ft.norm; term=FLIFT(n,ft)} let lift_fconstr k f = if k=0 then f else lft_fconstr k f let lift_fconstr_vect k v = if k=0 then v else Array.map (fun f -> lft_fconstr k f) v let clos_rel e i = match expand_rel i e with | Inl(n,mt) -> lift_fconstr n mt | Inr(k,None) -> {norm=Norm; term= FRel k} | Inr(k,Some p) -> lift_fconstr (k-p) {norm=Red;term=FFlex(RelKey p)} (* since the head may be reducible, we might introduce lifts of 0 *) let compact_stack head stk = let rec strip_rec depth = function | Zshift(k)::s -> strip_rec (depth+k) s | Zupdate(m)::s -> (* Be sure to create a new cell otherwise sharing would be lost by the update operation *) let h' = lft_fconstr depth head in let _ = update m (h'.norm,h'.term) in strip_rec depth s | stk -> zshift depth stk in strip_rec 0 stk (* Put an update mark in the stack, only if needed *) let zupdate m s = if !share & m.norm = Red then let s' = compact_stack m s in let _ = m.term <- FLOCKED in Zupdate(m)::s' else s (* Closure optimization: *) let rec compact_constr (lg, subs as s) c k = match kind_of_term c with Rel i -> if i < k then c,s else (try mkRel (k + lg - list_index (i-k+1) subs), (lg,subs) with Not_found -> mkRel (k+lg), (lg+1, (i-k+1)::subs)) | (Sort _|Var _|Meta _|Ind _|Const _|Construct _) -> c,s | Evar(ev,v) -> let (v',s) = compact_vect s v k in if v==v' then c,s else mkEvar(ev,v'),s | Cast(a,ck,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b k in if a==a' && b==b' then c,s else mkCast(a', ck, b'), s | App(f,v) -> let (f',s) = compact_constr s f k in let (v',s) = compact_vect s v k in if f==f' && v==v' then c,s else mkApp(f',v'), s | Lambda(n,a,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b (k+1) in if a==a' && b==b' then c,s else mkLambda(n,a',b'), s | Prod(n,a,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b (k+1) in if a==a' && b==b' then c,s else mkProd(n,a',b'), s | LetIn(n,a,ty,b) -> let (a',s) = compact_constr s a k in let (ty',s) = compact_constr s ty k in let (b',s) = compact_constr s b (k+1) in if a==a' && ty==ty' && b==b' then c,s else mkLetIn(n,a',ty',b'), s | Fix(fi,(na,ty,bd)) -> let (ty',s) = compact_vect s ty k in let (bd',s) = compact_vect s bd (k+Array.length ty) in if ty==ty' && bd==bd' then c,s else mkFix(fi,(na,ty',bd')), s | CoFix(i,(na,ty,bd)) -> let (ty',s) = compact_vect s ty k in let (bd',s) = compact_vect s bd (k+Array.length ty) in if ty==ty' && bd==bd' then c,s else mkCoFix(i,(na,ty',bd')), s | Case(ci,p,a,br) -> let (p',s) = compact_constr s p k in let (a',s) = compact_constr s a k in let (br',s) = compact_vect s br k in if p==p' && a==a' && br==br' then c,s else mkCase(ci,p',a',br'),s and compact_vect s v k = compact_v [] s v k (Array.length v - 1) and compact_v acc s v k i = if i < 0 then let v' = Array.of_list acc in if array_for_all2 (==) v v' then v,s else v',s else let (a',s') = compact_constr s v.(i) k in compact_v (a'::acc) s' v k (i-1) (* Computes the minimal environment of a closure. Idea: if the subs is not identity, the term will have to be reallocated entirely (to propagate the substitution). So, computing the set of free variables does not change the complexity. *) let optimise_closure env c = if is_subs_id env then (env,c) else let (c',(_,s)) = compact_constr (0,[]) c 1 in let env' = Array.map (fun i -> clos_rel env i) (Array.of_list s) in (subs_cons (env', subs_id 0),c') let mk_lambda env t = let (env,t) = optimise_closure env t in let (rvars,t') = decompose_lam t in FLambda(List.length rvars, List.rev rvars, t', env) let destFLambda clos_fun t = match t.term with FLambda(_,[(na,ty)],b,e) -> (na,clos_fun e ty,clos_fun (subs_lift e) b) | FLambda(n,(na,ty)::tys,b,e) -> (na,clos_fun e ty,{norm=Cstr;term=FLambda(n-1,tys,b,subs_lift e)}) | _ -> assert false (* t must be a FLambda and binding list cannot be empty *) (* Optimization: do not enclose variables in a closure. Makes variable access much faster *) let mk_clos e t = match kind_of_term t with | Rel i -> clos_rel e i | Var x -> { norm = Red; term = FFlex (VarKey x) } | Const c -> { norm = Red; term = FFlex (ConstKey c) } | Meta _ | Sort _ -> { norm = Norm; term = FAtom t } | Ind kn -> { norm = Norm; term = FInd kn } | Construct kn -> { norm = Cstr; term = FConstruct kn } | (CoFix _|Lambda _|Fix _|Prod _|Evar _|App _|Case _|Cast _|LetIn _) -> {norm = Red; term = FCLOS(t,e)} let mk_clos_vect env v = Array.map (mk_clos env) v (* Translate the head constructor of t from constr to fconstr. This function is parameterized by the function to apply on the direct subterms. Could be used insted of mk_clos. *) let mk_clos_deep clos_fun env t = match kind_of_term t with | (Rel _|Ind _|Const _|Construct _|Var _|Meta _ | Sort _) -> mk_clos env t | Cast (a,k,b) -> { norm = Red; term = FCast (clos_fun env a, k, clos_fun env b)} | App (f,v) -> { norm = Red; term = FApp (clos_fun env f, Array.map (clos_fun env) v) } | Case (ci,p,c,v) -> { norm = Red; term = FCases (ci, clos_fun env p, clos_fun env c, Array.map (clos_fun env) v) } | Fix fx -> { norm = Cstr; term = FFix (fx, env) } | CoFix cfx -> { norm = Cstr; term = FCoFix(cfx,env) } | Lambda _ -> { norm = Cstr; term = mk_lambda env t } | Prod (n,t,c) -> { norm = Whnf; term = FProd (n, clos_fun env t, clos_fun (subs_lift env) c) } | LetIn (n,b,t,c) -> { norm = Red; term = FLetIn (n, clos_fun env b, clos_fun env t, c, env) } | Evar ev -> { norm = Red; term = FEvar(ev,env) } (* A better mk_clos? *) let mk_clos2 = mk_clos_deep mk_clos (* The inverse of mk_clos_deep: move back to constr *) let rec to_constr constr_fun lfts v = match v.term with | FRel i -> mkRel (reloc_rel i lfts) | FFlex (RelKey p) -> mkRel (reloc_rel p lfts) | FFlex (VarKey x) -> mkVar x | FAtom c -> exliftn lfts c | FCast (a,k,b) -> mkCast (constr_fun lfts a, k, constr_fun lfts b) | FFlex (ConstKey op) -> mkConst op | FInd op -> mkInd op | FConstruct op -> mkConstruct op | FCases (ci,p,c,ve) -> mkCase (ci, constr_fun lfts p, constr_fun lfts c, Array.map (constr_fun lfts) ve) | FFix ((op,(lna,tys,bds)),e) -> let n = Array.length bds in let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn n e)) bds in let lfts' = el_liftn n lfts in mkFix (op, (lna, Array.map (constr_fun lfts) ftys, Array.map (constr_fun lfts') fbds)) | FCoFix ((op,(lna,tys,bds)),e) -> let n = Array.length bds in let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn n e)) bds in let lfts' = el_liftn (Array.length bds) lfts in mkCoFix (op, (lna, Array.map (constr_fun lfts) ftys, Array.map (constr_fun lfts') fbds)) | FApp (f,ve) -> mkApp (constr_fun lfts f, Array.map (constr_fun lfts) ve) | FLambda _ -> let (na,ty,bd) = destFLambda mk_clos2 v in mkLambda (na, constr_fun lfts ty, constr_fun (el_lift lfts) bd) | FProd (n,t,c) -> mkProd (n, constr_fun lfts t, constr_fun (el_lift lfts) c) | FLetIn (n,b,t,f,e) -> let fc = mk_clos2 (subs_lift e) f in mkLetIn (n, constr_fun lfts b, constr_fun lfts t, constr_fun (el_lift lfts) fc) | FEvar ((ev,args),env) -> mkEvar(ev,Array.map (fun a -> constr_fun lfts (mk_clos2 env a)) args) | FLIFT (k,a) -> to_constr constr_fun (el_shft k lfts) a | FCLOS (t,env) -> let fr = mk_clos2 env t in let unfv = update v (fr.norm,fr.term) in to_constr constr_fun lfts unfv | FLOCKED -> assert false (*mkVar(id_of_string"_LOCK_")*) (* This function defines the correspondance between constr and fconstr. When we find a closure whose substitution is the identity, then we directly return the constr to avoid possibly huge reallocation. *) let term_of_fconstr = let rec term_of_fconstr_lift lfts v = match v.term with | FCLOS(t,env) when is_subs_id env & is_lift_id lfts -> t | FLambda(_,tys,f,e) when is_subs_id e & is_lift_id lfts -> compose_lam (List.rev tys) f | FFix(fx,e) when is_subs_id e & is_lift_id lfts -> mkFix fx | FCoFix(cfx,e) when is_subs_id e & is_lift_id lfts -> mkCoFix cfx | _ -> to_constr term_of_fconstr_lift lfts v in term_of_fconstr_lift el_id (* fstrong applies unfreeze_fun recursively on the (freeze) term and * yields a term. Assumes that the unfreeze_fun never returns a * FCLOS term. let rec fstrong unfreeze_fun lfts v = to_constr (fstrong unfreeze_fun) lfts (unfreeze_fun v) *) let rec zip m stk = match stk with | [] -> m | Zapp args :: s -> zip {norm=neutr m.norm; term=FApp(m, args)} s | Zcase(ci,p,br)::s -> let t = FCases(ci, p, m, br) in zip {norm=neutr m.norm; term=t} s | Zfix(fx,par)::s -> zip fx (par @ append_stack [|m|] s) | Zshift(n)::s -> zip (lift_fconstr n m) s | Zupdate(rf)::s -> zip (update rf (m.norm,m.term)) s let fapp_stack (m,stk) = zip m stk (*********************************************************************) (* The assertions in the functions below are granted because they are called only when m is a constructor, a cofix (strip_update_shift_app), a fix (get_nth_arg) or an abstraction (strip_update_shift, through get_arg). *) (* optimised for the case where there are no shifts... *) let strip_update_shift_app head stk = assert (head.norm <> Red); let rec strip_rec rstk h depth = function | Zshift(k) as e :: s -> strip_rec (e::rstk) (lift_fconstr k h) (depth+k) s | (Zapp args :: s) -> strip_rec (Zapp args :: rstk) {norm=h.norm;term=FApp(h,args)} depth s | Zupdate(m)::s -> strip_rec rstk (update m (h.norm,h.term)) depth s | stk -> (depth,List.rev rstk, stk) in strip_rec [] head 0 stk let get_nth_arg head n stk = assert (head.norm <> Red); let rec strip_rec rstk h n = function | Zshift(k) as e :: s -> strip_rec (e::rstk) (lift_fconstr k h) n s | Zapp args::s' -> let q = Array.length args in if n >= q then strip_rec (Zapp args::rstk) {norm=h.norm;term=FApp(h,args)} (n-q) s' else let bef = Array.sub args 0 n in let aft = Array.sub args (n+1) (q-n-1) in let stk' = List.rev (if n = 0 then rstk else (Zapp bef :: rstk)) in (Some (stk', args.(n)), append_stack aft s') | Zupdate(m)::s -> strip_rec rstk (update m (h.norm,h.term)) n s | s -> (None, List.rev rstk @ s) in strip_rec [] head n stk (* Beta reduction: look for an applied argument in the stack. Since the encountered update marks are removed, h must be a whnf *) let rec get_args n tys f e stk = match stk with Zupdate r :: s -> let _hd = update r (Cstr,FLambda(n,tys,f,e)) in get_args n tys f e s | Zshift k :: s -> get_args n tys f (subs_shft (k,e)) s | Zapp l :: s -> let na = Array.length l in if n == na then (Inl (subs_cons(l,e)),s) else if n < na then (* more arguments *) let args = Array.sub l 0 n in let eargs = Array.sub l n (na-n) in (Inl (subs_cons(args,e)), Zapp eargs :: s) else (* more lambdas *) let etys = list_skipn na tys in get_args (n-na) etys f (subs_cons(l,e)) s | _ -> (Inr {norm=Cstr;term=FLambda(n,tys,f,e)}, stk) (* Eta expansion: add a reference to implicit surrounding lambda at end of stack *) let rec eta_expand_stack = function | (Zapp _ | Zfix _ | Zcase _ | Zshift _ | Zupdate _ as e) :: s -> e :: eta_expand_stack s | [] -> [Zshift 1; Zapp [|{norm=Norm; term= FRel 1}|]] (* Iota reduction: extract the arguments to be passed to the Case branches *) let rec reloc_rargs_rec depth stk = match stk with Zapp args :: s -> Zapp (lift_fconstr_vect depth args) :: reloc_rargs_rec depth s | Zshift(k)::s -> if k=depth then s else reloc_rargs_rec (depth-k) s | _ -> stk let reloc_rargs depth stk = if depth = 0 then stk else reloc_rargs_rec depth stk let rec drop_parameters depth n argstk = match argstk with Zapp args::s -> let q = Array.length args in if n > q then drop_parameters depth (n-q) s else if n = q then reloc_rargs depth s else let aft = Array.sub args n (q-n) in reloc_rargs depth (append_stack aft s) | Zshift(k)::s -> drop_parameters (depth-k) n s | [] -> (* we know that n < stack_args_size(argstk) (if well-typed term) *) if n=0 then [] else anomaly "ill-typed term: found a match on a partially applied constructor" | _ -> assert false (* strip_update_shift_app only produces Zapp and Zshift items *) (* Iota reduction: expansion of a fixpoint. * Given a fixpoint and a substitution, returns the corresponding * fixpoint body, and the substitution in which it should be * evaluated: its first variables are the fixpoint bodies * * FCLOS(fix Fi {F0 := T0 .. Fn-1 := Tn-1}, S) * -> (S. FCLOS(F0,S) . ... . FCLOS(Fn-1,S), Ti) *) (* does not deal with FLIFT *) let contract_fix_vect fix = let (thisbody, make_body, env, nfix) = match fix with | FFix (((reci,i),(_,_,bds as rdcl)),env) -> (bds.(i), (fun j -> { norm = Cstr; term = FFix (((reci,j),rdcl),env) }), env, Array.length bds) | FCoFix ((i,(_,_,bds as rdcl)),env) -> (bds.(i), (fun j -> { norm = Cstr; term = FCoFix ((j,rdcl),env) }), env, Array.length bds) | _ -> assert false in (subs_cons(Array.init nfix make_body, env), thisbody) (*********************************************************************) (* A machine that inspects the head of a term until it finds an atom or a subterm that may produce a redex (abstraction, constructor, cofix, letin, constant), or a neutral term (product, inductive) *) let rec knh m stk = match m.term with | FLIFT(k,a) -> knh a (zshift k stk) | FCLOS(t,e) -> knht e t (zupdate m stk) | FLOCKED -> assert false | FApp(a,b) -> knh a (append_stack b (zupdate m stk)) | FCases(ci,p,t,br) -> knh t (Zcase(ci,p,br)::zupdate m stk) | FFix(((ri,n),(_,_,_)),_) -> (match get_nth_arg m ri.(n) stk with (Some(pars,arg),stk') -> knh arg (Zfix(m,pars)::stk') | (None, stk') -> (m,stk')) | FCast(t,_,_) -> knh t stk (* cases where knh stops *) | (FFlex _|FLetIn _|FConstruct _|FEvar _| FCoFix _|FLambda _|FRel _|FAtom _|FInd _|FProd _) -> (m, stk) (* The same for pure terms *) and knht e t stk = match kind_of_term t with | App(a,b) -> knht e a (append_stack (mk_clos_vect e b) stk) | Case(ci,p,t,br) -> knht e t (Zcase(ci, mk_clos e p, mk_clos_vect e br)::stk) | Fix _ -> knh (mk_clos2 e t) stk | Cast(a,_,_) -> knht e a stk | Rel n -> knh (clos_rel e n) stk | (Lambda _|Prod _|Construct _|CoFix _|Ind _| LetIn _|Const _|Var _|Evar _|Meta _|Sort _) -> (mk_clos2 e t, stk) (************************************************************************) (* Computes a weak head normal form from the result of knh. *) let rec knr info m stk = match m.term with | FLambda(n,tys,f,e) when red_set info.i_flags fBETA -> (match get_args n tys f e stk with Inl e', s -> knit info e' f s | Inr lam, s -> (lam,s)) | FFlex(ConstKey kn) when red_set info.i_flags (fCONST kn) -> (match ref_value_cache info (ConstKey kn) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FFlex(VarKey id) when red_set info.i_flags (fVAR id) -> (match ref_value_cache info (VarKey id) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FFlex(RelKey k) when red_set info.i_flags fDELTA -> (match ref_value_cache info (RelKey k) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FConstruct(ind,c) when red_set info.i_flags fIOTA -> (match strip_update_shift_app m stk with (depth, args, Zcase(ci,_,br)::s) -> assert (ci.ci_npar>=0); let rargs = drop_parameters depth ci.ci_npar args in kni info br.(c-1) (rargs@s) | (_, cargs, Zfix(fx,par)::s) -> let rarg = fapp_stack(m,cargs) in let stk' = par @ append_stack [|rarg|] s in let (fxe,fxbd) = contract_fix_vect fx.term in knit info fxe fxbd stk' | (_,args,s) -> (m,args@s)) | FCoFix _ when red_set info.i_flags fIOTA -> (match strip_update_shift_app m stk with (_, args, ((Zcase _::_) as stk')) -> let (fxe,fxbd) = contract_fix_vect m.term in knit info fxe fxbd (args@stk') | (_,args,s) -> (m,args@s)) | FLetIn (_,v,_,bd,e) when red_set info.i_flags fZETA -> knit info (subs_cons([|v|],e)) bd stk | FEvar(ev,env) -> (match evar_value info ev with Some c -> knit info env c stk | None -> (m,stk)) | _ -> (m,stk) (* Computes the weak head normal form of a term *) and kni info m stk = let (hm,s) = knh m stk in knr info hm s and knit info e t stk = let (ht,s) = knht e t stk in knr info ht s let kh info v stk = fapp_stack(kni info v stk) (************************************************************************) let rec zip_term zfun m stk = match stk with | [] -> m | Zapp args :: s -> zip_term zfun (mkApp(m, Array.map zfun args)) s | Zcase(ci,p,br)::s -> let t = mkCase(ci, zfun p, m, Array.map zfun br) in zip_term zfun t s | Zfix(fx,par)::s -> let h = mkApp(zip_term zfun (zfun fx) par,[|m|]) in zip_term zfun h s | Zshift(n)::s -> zip_term zfun (lift n m) s | Zupdate(rf)::s -> zip_term zfun m s (* Computes the strong normal form of a term. 1- Calls kni 2- tries to rebuild the term. If a closure still has to be computed, calls itself recursively. *) let rec kl info m = if is_val m then (incr prune; term_of_fconstr m) else let (nm,s) = kni info m [] in let _ = fapp_stack(nm,s) in (* to unlock Zupdates! *) zip_term (kl info) (norm_head info nm) s (* no redex: go up for atoms and already normalized terms, go down otherwise. *) and norm_head info m = if is_val m then (incr prune; term_of_fconstr m) else match m.term with | FLambda(n,tys,f,e) -> let (e',rvtys) = List.fold_left (fun (e,ctxt) (na,ty) -> (subs_lift e, (na,kl info (mk_clos e ty))::ctxt)) (e,[]) tys in let bd = kl info (mk_clos e' f) in List.fold_left (fun b (na,ty) -> mkLambda(na,ty,b)) bd rvtys | FLetIn(na,a,b,f,e) -> let c = mk_clos (subs_lift e) f in mkLetIn(na, kl info a, kl info b, kl info c) | FProd(na,dom,rng) -> mkProd(na, kl info dom, kl info rng) | FCoFix((n,(na,tys,bds)),e) -> let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn (Array.length na) e)) bds in mkCoFix(n,(na, Array.map (kl info) ftys, Array.map (kl info) fbds)) | FFix((n,(na,tys,bds)),e) -> let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn (Array.length na) e)) bds in mkFix(n,(na, Array.map (kl info) ftys, Array.map (kl info) fbds)) | FEvar((i,args),env) -> mkEvar(i, Array.map (fun a -> kl info (mk_clos env a)) args) | t -> term_of_fconstr m (* Initialization and then normalization *) (* weak reduction *) let whd_val info v = with_stats (lazy (term_of_fconstr (kh info v []))) (* strong reduction *) let norm_val info v = with_stats (lazy (kl info v)) let inject = mk_clos (subs_id 0) let whd_stack infos m stk = let k = kni infos m stk in let _ = fapp_stack k in (* to unlock Zupdates! *) k (* cache of constants: the body is computed only when needed. *) type clos_infos = fconstr infos let create_clos_infos ?(evars=fun _ -> None) flgs env = create (fun _ -> inject) flgs env evars let unfold_reference = ref_value_cache coq-8.4pl2/kernel/esubst.mli0000640000175000001440000000546112010532755015125 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a subs val subs_cons: 'a array * 'a subs -> 'a subs val subs_shft: int * 'a subs -> 'a subs val subs_lift: 'a subs -> 'a subs val subs_liftn: int -> 'a subs -> 'a subs (** [subs_shift_cons(k,s,[|t1..tn|])] builds (^k s).t1..tn *) val subs_shift_cons: int * 'a subs * 'a array -> 'a subs (** [expand_rel k subs] expands de Bruijn [k] in the explicit substitution [subs]. The result is either (Inl(lams,v)) when the variable is substituted by value [v] under lams binders (i.e. v *has* to be shifted by lams), or (Inr (k',p)) when the variable k is just relocated as k'; p is None if the variable points inside subs and Some(k) if the variable points k bindings beyond subs (cf argument of ESID). *) val expand_rel: int -> 'a subs -> (int * 'a, int * int option) Util.union (** Tests whether a substitution behaves like the identity *) val is_subs_id: 'a subs -> bool (** Composition of substitutions: [comp mk_clos s1 s2] computes a substitution equivalent to applying s2 then s1. Argument mk_clos is used when a closure has to be created, i.e. when s1 is applied on an element of s2. *) val comp : ('a subs * 'a -> 'a) -> 'a subs -> 'a subs -> 'a subs (** {6 Compact representation } *) (** Compact representation of explicit relocations - [ELSHFT(l,n)] == lift of [n], then apply [lift l]. - [ELLFT(n,l)] == apply [l] to de Bruijn > [n] i.e under n binders. *) type lift = private | ELID | ELSHFT of lift * int | ELLFT of int * lift val el_id : lift val el_shft : int -> lift -> lift val el_liftn : int -> lift -> lift val el_lift : lift -> lift val reloc_rel : int -> lift -> int val is_lift_id : lift -> bool coq-8.4pl2/kernel/declarations.mli0000640000175000001440000001764612010532755016300 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr_substituted val force : constr_substituted -> constr (** Opaque proof terms are not loaded immediately, but are there in a lazy form. Forcing this lazy may trigger some unmarshal of the necessary structure. *) type lazy_constr val subst_lazy_constr : substitution -> lazy_constr -> lazy_constr val force_lazy_constr : lazy_constr -> constr_substituted val make_lazy_constr : constr_substituted Lazy.t -> lazy_constr val lazy_constr_is_val : lazy_constr -> bool val force_opaque : lazy_constr -> constr val opaque_from_val : constr -> lazy_constr (** Inlining level of parameters at functor applications. None means no inlining *) type inline = int option (** A constant can have no body (axiom/parameter), or a transparent body, or an opaque one *) type constant_def = | Undef of inline | Def of constr_substituted | OpaqueDef of lazy_constr type constant_body = { const_hyps : section_context; (** New: younger hyp at top *) const_body : constant_def; const_type : constant_type; const_body_code : to_patch_substituted; const_constraints : constraints } val subst_const_def : substitution -> constant_def -> constant_def val subst_const_body : substitution -> constant_body -> constant_body (** Is there a actual body in const_body or const_body_opaque ? *) val constant_has_body : constant_body -> bool (** Accessing const_body_opaque or const_body *) val body_of_constant : constant_body -> constr_substituted option val is_opaque : constant_body -> bool (** {6 Representation of mutual inductive types in the kernel } *) type recarg = | Norec | Mrec of inductive | Imbr of inductive val subst_recarg : substitution -> recarg -> recarg type wf_paths = recarg Rtree.t val mk_norec : wf_paths val mk_paths : recarg -> wf_paths list array -> wf_paths val dest_recarg : wf_paths -> recarg val dest_subterms : wf_paths -> wf_paths list array val recarg_length : wf_paths -> int -> int val subst_wf_paths : substitution -> wf_paths -> wf_paths (** {v Inductive I1 (params) : U1 := c11 : T11 | ... | c1p1 : T1p1 ... with In (params) : Un := cn1 : Tn1 | ... | cnpn : Tnpn v} *) type monomorphic_inductive_arity = { mind_user_arity : constr; mind_sort : sorts; } type inductive_arity = | Monomorphic of monomorphic_inductive_arity | Polymorphic of polymorphic_arity type one_inductive_body = { (** {8 Primitive datas } *) mind_typename : identifier; (** Name of the type: [Ii] *) mind_arity_ctxt : rel_context; (** Arity context of [Ii] with parameters: [forall params, Ui] *) mind_arity : inductive_arity; (** Arity sort and original user arity if monomorphic *) mind_consnames : identifier array; (** Names of the constructors: [cij] *) mind_user_lc : types array; (** Types of the constructors with parameters: [forall params, Tij], where the Ik are replaced by de Bruijn index in the context I1:forall params, U1 .. In:forall params, Un *) (** {8 Derived datas } *) mind_nrealargs : int; (** Number of expected real arguments of the type (no let, no params) *) mind_nrealargs_ctxt : int; (** Length of realargs context (with let, no params) *) mind_kelim : sorts_family list; (** List of allowed elimination sorts *) mind_nf_lc : types array; (** Head normalized constructor types so that their conclusion is atomic *) mind_consnrealdecls : int array; (** Length of the signature of the constructors (with let, w/o params) (not used in the kernel) *) mind_recargs : wf_paths; (** Signature of recursive arguments in the constructors *) (** {8 Datas for bytecode compilation } *) mind_nb_constant : int; (** number of constant constructor *) mind_nb_args : int; (** number of no constant constructor *) mind_reloc_tbl : Cbytecodes.reloc_table; } type mutual_inductive_body = { mind_packets : one_inductive_body array; (** The component of the mutual inductive block *) mind_record : bool; (** Whether the inductive type has been declared as a record *) mind_finite : bool; (** Whether the type is inductive or coinductive *) mind_ntypes : int; (** Number of types in the block *) mind_hyps : section_context; (** Section hypotheses on which the block depends *) mind_nparams : int; (** Number of expected parameters *) mind_nparams_rec : int; (** Number of recursively uniform (i.e. ordinary) parameters *) mind_params_ctxt : rel_context; (** The context of parameters (includes let-in declaration) *) mind_constraints : constraints; (** Universes constraints enforced by the inductive declaration *) } val subst_mind : substitution -> mutual_inductive_body -> mutual_inductive_body (** {6 Modules: signature component specifications, module types, and module declarations } *) type structure_field_body = | SFBconst of constant_body | SFBmind of mutual_inductive_body | SFBmodule of module_body | SFBmodtype of module_type_body (** NB: we may encounter now (at most) twice the same label in a [structure_body], once for a module ([SFBmodule] or [SFBmodtype]) and once for an object ([SFBconst] or [SFBmind]) *) and structure_body = (label * structure_field_body) list and struct_expr_body = | SEBident of module_path | SEBfunctor of mod_bound_id * module_type_body * struct_expr_body | SEBapply of struct_expr_body * struct_expr_body * constraints | SEBstruct of structure_body | SEBwith of struct_expr_body * with_declaration_body and with_declaration_body = With_module_body of identifier list * module_path | With_definition_body of identifier list * constant_body and module_body = { (** absolute path of the module *) mod_mp : module_path; (** Implementation *) mod_expr : struct_expr_body option; (** Signature *) mod_type : struct_expr_body; (** algebraic structure expression is kept if it's relevant for extraction *) mod_type_alg : struct_expr_body option; (** set of all constraint in the module *) mod_constraints : constraints; (** quotiented set of equivalent constant and inductive name *) mod_delta : delta_resolver; mod_retroknowledge : Retroknowledge.action list} and module_type_body = { (** Path of the module type *) typ_mp : module_path; typ_expr : struct_expr_body; (** algebraic structure expression is kept if it's relevant for extraction *) typ_expr_alg : struct_expr_body option ; typ_constraints : constraints; (** quotiented set of equivalent constant and inductive name *) typ_delta :delta_resolver} (** Hash-consing *) (** Here, strictly speaking, we don't perform true hash-consing of the structure, but simply hash-cons all inner constr and other known elements *) val hcons_const_body : constant_body -> constant_body val hcons_mind : mutual_inductive_body -> mutual_inductive_body coq-8.4pl2/kernel/vm.ml0000640000175000001440000004724612010532755014100 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit = "coq_set_drawinstr" (******************************************) (* Utility Functions about Obj ************) (******************************************) external offset_closure : Obj.t -> int -> Obj.t = "coq_offset_closure" external offset : Obj.t -> int = "coq_offset" let accu_tag = 0 (*******************************************) (* Initalization of the abstract machine ***) (*******************************************) external init_vm : unit -> unit = "init_coq_vm" let _ = init_vm () external transp_values : unit -> bool = "get_coq_transp_value" external set_transp_values : bool -> unit = "coq_set_transp_value" (*******************************************) (* Machine code *** ************************) (*******************************************) type tcode let tcode_of_obj v = ((Obj.obj v):tcode) let fun_code v = tcode_of_obj (Obj.field (Obj.repr v) 0) external mkAccuCode : int -> tcode = "coq_makeaccu" external mkPopStopCode : int -> tcode = "coq_pushpop" external mkAccuCond : int -> tcode = "coq_accucond" external offset_tcode : tcode -> int -> tcode = "coq_offset_tcode" external int_tcode : tcode -> int -> int = "coq_int_tcode" external accumulate : unit -> tcode = "accumulate_code" let accumulate = accumulate () external is_accumulate : tcode -> bool = "coq_is_accumulate_code" let popstop_tbl = ref (Array.init 30 mkPopStopCode) let popstop_code i = let len = Array.length !popstop_tbl in if i < len then !popstop_tbl.(i) else begin popstop_tbl := Array.init (i+10) (fun j -> if j < len then !popstop_tbl.(j) else mkPopStopCode j); !popstop_tbl.(i) end let stop = popstop_code 0 (******************************************************) (* Abstract data types and utility functions **********) (******************************************************) (* Values of the abstract machine *) let val_of_obj v = ((Obj.obj v):values) let crazy_val = (val_of_obj (Obj.repr 0)) (* Abstract data *) type vprod type vfun type vfix type vcofix type vblock type arguments type vm_env type vstack = values array type vswitch = { sw_type_code : tcode; sw_code : tcode; sw_annot : annot_switch; sw_stk : vstack; sw_env : vm_env } (* Representation of values *) (* + Products : *) (* - vprod = 0_[ dom | codom] *) (* dom : values, codom : vfun *) (* *) (* + Functions have two representations : *) (* - unapplied fun : vf = Ct_[ C | fv1 | ... | fvn] *) (* C:tcode, fvi : values *) (* Remark : a function and its environment is the same value. *) (* - partially applied fun : Ct_[Restart:C| vf | arg1 | ... argn] *) (* *) (* + Fixpoints : *) (* - Ct_[C1|Infix_t|C2|...|Infix_t|Cn|fv1|...|fvn] *) (* One single block to represent all of the fixpoints, each fixpoint *) (* is the pointer to the field holding the pointer to its code, and *) (* the infix tag is used to know where the block starts. *) (* - Partial application follows the scheme of partially applied *) (* functions. Note: only fixpoints not having been applied to its *) (* recursive argument are coded this way. When the rec. arg. is *) (* applied, either it's a constructor and the fix reduces, or it's *) (* and the fix is coded as an accumulator. *) (* *) (* + Cofixpoints : see cbytegen.ml *) (* *) (* + vblock's encode (non constant) constructors as in Ocaml, but *) (* starting from 0 up. tag 0 ( = accu_tag) is reserved for *) (* accumulators. *) (* *) (* + vm_env is the type of the machine environments (i.e. a function or *) (* a fixpoint) *) (* *) (* + Accumulators : At_[accumulate| accu | arg1 | ... | argn ] *) (* - representation of [accu] : tag_[....] *) (* -- tag <= 2 : encoding atom type (sorts, free vars, etc.) *) (* -- 3_[accu|fix_app] : a fixpoint blocked by an accu *) (* -- 4_[accu|vswitch] : a match blocked by an accu *) (* -- 5_[fcofix] : a cofix function *) (* -- 6_[fcofix|val] : a cofix function, val represent the value *) (* of the function applied to arg1 ... argn *) (* The [arguments] type, which is abstracted as an array, represents : *) (* tag[ _ | _ |v1|... | vn] *) (* Generally the first field is a code pointer. *) (* Do not edit this type without editing C code, especially "coq_values.h" *) type atom = | Aid of id_key | Aiddef of id_key * values | Aind of inductive (* Zippers *) type zipper = | Zapp of arguments | Zfix of vfix*arguments (* Possibly empty *) | Zswitch of vswitch type stack = zipper list type to_up = values type whd = | Vsort of sorts | Vprod of vprod | Vfun of vfun | Vfix of vfix * arguments option | Vcofix of vcofix * to_up * arguments option | Vconstr_const of int | Vconstr_block of vblock | Vatom_stk of atom * stack (*************************************************) (* Destructors ***********************************) (*************************************************) let rec whd_accu a stk = let stk = if Obj.size a = 2 then stk else Zapp (Obj.obj a) :: stk in let at = Obj.field a 1 in match Obj.tag at with | i when i <= 2 -> Vatom_stk(Obj.magic at, stk) | 3 (* fix_app tag *) -> let fa = Obj.field at 1 in let zfix = Zfix (Obj.obj (Obj.field fa 1), Obj.obj fa) in whd_accu (Obj.field at 0) (zfix :: stk) | 4 (* switch tag *) -> let zswitch = Zswitch (Obj.obj (Obj.field at 1)) in whd_accu (Obj.field at 0) (zswitch :: stk) | 5 (* cofix_tag *) -> let vcfx = Obj.obj (Obj.field at 0) in let to_up = Obj.obj a in begin match stk with | [] -> Vcofix(vcfx, to_up, None) | [Zapp args] -> Vcofix(vcfx, to_up, Some args) | _ -> assert false end | 6 (* cofix_evaluated_tag *) -> let vcofix = Obj.obj (Obj.field at 0) in let res = Obj.obj a in begin match stk with | [] -> Vcofix(vcofix, res, None) | [Zapp args] -> Vcofix(vcofix, res, Some args) | _ -> assert false end | _ -> assert false external kind_of_closure : Obj.t -> int = "coq_kind_of_closure" let whd_val : values -> whd = fun v -> let o = Obj.repr v in if Obj.is_int o then Vconstr_const (Obj.obj o) else let tag = Obj.tag o in if tag = accu_tag then ( if Obj.size o = 1 then Obj.obj o (* sort *) else if is_accumulate (fun_code o) then whd_accu o [] else (Vprod(Obj.obj o))) else if tag = Obj.closure_tag || tag = Obj.infix_tag then ( match kind_of_closure o with | 0 -> Vfun(Obj.obj o) | 1 -> Vfix(Obj.obj o, None) | 2 -> Vfix(Obj.obj (Obj.field o 1), Some (Obj.obj o)) | 3 -> Vatom_stk(Aid(RelKey(int_tcode (fun_code o) 1)), []) | _ -> Util.anomaly "Vm.whd : kind_of_closure does not work") else Vconstr_block(Obj.obj o) (************************************************) (* Abstrct machine ******************************) (************************************************) (* gestion de la pile *) external push_ra : tcode -> unit = "coq_push_ra" external push_val : values -> unit = "coq_push_val" external push_arguments : arguments -> unit = "coq_push_arguments" external push_vstack : vstack -> unit = "coq_push_vstack" (* interpreteur *) external interprete : tcode -> values -> vm_env -> int -> values = "coq_interprete_ml" (* Functions over arguments *) let nargs : arguments -> int = fun args -> (Obj.size (Obj.repr args)) - 2 let arg args i = if 0 <= i && i < (nargs args) then val_of_obj (Obj.field (Obj.repr args) (i+2)) else raise (Invalid_argument ("Vm.arg size = "^(string_of_int (nargs args))^ " acces "^(string_of_int i))) let apply_arguments vf vargs = let n = nargs vargs in if n = 0 then vf else begin push_ra stop; push_arguments vargs; interprete (fun_code vf) vf (Obj.magic vf) (n - 1) end let apply_vstack vf vstk = let n = Array.length vstk in if n = 0 then vf else begin push_ra stop; push_vstack vstk; interprete (fun_code vf) vf (Obj.magic vf) (n - 1) end (**********************************************) (* Constructors *******************************) (**********************************************) let obj_of_atom : atom -> Obj.t = fun a -> let res = Obj.new_block accu_tag 2 in Obj.set_field res 0 (Obj.repr accumulate); Obj.set_field res 1 (Obj.repr a); res (* obj_of_str_const : structured_constant -> Obj.t *) let rec obj_of_str_const str = match str with | Const_sorts s -> Obj.repr (Vsort s) | Const_ind ind -> obj_of_atom (Aind ind) | Const_b0 tag -> Obj.repr tag | Const_bn(tag, args) -> let len = Array.length args in let res = Obj.new_block tag len in for i = 0 to len - 1 do Obj.set_field res i (obj_of_str_const args.(i)) done; res let val_of_obj o = ((Obj.obj o) : values) let val_of_str_const str = val_of_obj (obj_of_str_const str) let val_of_atom a = val_of_obj (obj_of_atom a) let idkey_tbl = Hashtbl.create 31 let val_of_idkey key = try Hashtbl.find idkey_tbl key with Not_found -> let v = val_of_atom (Aid key) in Hashtbl.add idkey_tbl key v; v let val_of_rel k = val_of_idkey (RelKey k) let val_of_rel_def k v = val_of_atom(Aiddef(RelKey k, v)) let val_of_named id = val_of_idkey (VarKey id) let val_of_named_def id v = val_of_atom(Aiddef(VarKey id, v)) let val_of_constant c = val_of_idkey (ConstKey c) let val_of_constant_def n c v = let res = Obj.new_block accu_tag 2 in Obj.set_field res 0 (Obj.repr (mkAccuCond n)); Obj.set_field res 1 (Obj.repr (Aiddef(ConstKey c, v))); val_of_obj res external val_of_annot_switch : annot_switch -> values = "%identity" let mkrel_vstack k arity = let max = k + arity - 1 in Array.init arity (fun i -> val_of_rel (max - i)) (*************************************************) (** Operations manipulating data types ***********) (*************************************************) (* Functions over products *) let dom : vprod -> values = fun p -> val_of_obj (Obj.field (Obj.repr p) 0) let codom : vprod -> vfun = fun p -> (Obj.obj (Obj.field (Obj.repr p) 1)) (* Functions over vfun *) external closure_arity : vfun -> int = "coq_closure_arity" let body_of_vfun k vf = let vargs = mkrel_vstack k 1 in apply_vstack (Obj.magic vf) vargs let decompose_vfun2 k vf1 vf2 = let arity = min (closure_arity vf1) (closure_arity vf2) in assert (0 < arity && arity < Sys.max_array_length); let vargs = mkrel_vstack k arity in let v1 = apply_vstack (Obj.magic vf1) vargs in let v2 = apply_vstack (Obj.magic vf2) vargs in arity, v1, v2 (* Functions over fixpoint *) let first o = (offset_closure o (offset o)) let last o = (Obj.field o (Obj.size o - 1)) let current_fix vf = - (offset (Obj.repr vf) / 2) let unsafe_fb_code fb i = tcode_of_obj (Obj.field (Obj.repr fb) (2 * i)) let unsafe_rec_arg fb i = int_tcode (unsafe_fb_code fb i) 1 let rec_args vf = let fb = first (Obj.repr vf) in let size = Obj.size (last fb) in Array.init size (unsafe_rec_arg fb) exception FALSE let check_fix f1 f2 = let i1, i2 = current_fix f1, current_fix f2 in (* Checking starting point *) if i1 = i2 then let fb1,fb2 = first (Obj.repr f1), first (Obj.repr f2) in let n = Obj.size (last fb1) in (* Checking number of definitions *) if n = Obj.size (last fb2) then (* Checking recursive arguments *) try for i = 0 to n - 1 do if unsafe_rec_arg fb1 i <> unsafe_rec_arg fb2 i then raise FALSE done; true with FALSE -> false else false else false (* Functions over vfix *) external atom_rel : unit -> atom array = "get_coq_atom_tbl" external realloc_atom_rel : int -> unit = "realloc_coq_atom_tbl" let relaccu_tbl = let atom_rel = atom_rel() in let len = Array.length atom_rel in for i = 0 to len - 1 do atom_rel.(i) <- Aid (RelKey i) done; ref (Array.init len mkAccuCode) let relaccu_code i = let len = Array.length !relaccu_tbl in if i < len then !relaccu_tbl.(i) else begin realloc_atom_rel i; let atom_rel = atom_rel () in let nl = Array.length atom_rel in for j = len to nl - 1 do atom_rel.(j) <- Aid(RelKey j) done; relaccu_tbl := Array.init nl (fun j -> if j < len then !relaccu_tbl.(j) else mkAccuCode j); !relaccu_tbl.(i) end let reduce_fix k vf = let fb = first (Obj.repr vf) in (* computing types *) let fc_typ = ((Obj.obj (last fb)) : tcode array) in let ndef = Array.length fc_typ in let et = offset_closure fb (2*(ndef - 1)) in let ftyp = Array.map (fun c -> interprete c crazy_val (Obj.magic et) 0) fc_typ in (* Construction of the environment of fix bodies *) let e = Obj.dup fb in for i = 0 to ndef - 1 do Obj.set_field e (2 * i) (Obj.repr (relaccu_code (k + i))) done; let fix_body i = let jump_grabrec c = offset_tcode c 2 in let c = jump_grabrec (unsafe_fb_code fb i) in let res = Obj.new_block Obj.closure_tag 2 in Obj.set_field res 0 (Obj.repr c); Obj.set_field res 1 (offset_closure e (2*i)); ((Obj.obj res) : vfun) in (Array.init ndef fix_body, ftyp) (* Functions over vcofix *) let get_fcofix vcf i = match whd_val (Obj.obj (Obj.field (Obj.repr vcf) (i+1))) with | Vcofix(vcfi, _, _) -> vcfi | _ -> assert false let current_cofix vcf = let ndef = Obj.size (last (Obj.repr vcf)) in let rec find_cofix pos = if pos < ndef then if get_fcofix vcf pos == vcf then pos else find_cofix (pos+1) else raise Not_found in try find_cofix 0 with Not_found -> assert false let check_cofix vcf1 vcf2 = (current_cofix vcf1 = current_cofix vcf2) && (Obj.size (last (Obj.repr vcf1)) = Obj.size (last (Obj.repr vcf2))) let reduce_cofix k vcf = let fc_typ = ((Obj.obj (last (Obj.repr vcf))) : tcode array) in let ndef = Array.length fc_typ in let ftyp = (* Evaluate types *) Array.map (fun c -> interprete c crazy_val (Obj.magic vcf) 0) fc_typ in (* Construction of the environment of cofix bodies *) let e = Obj.dup (Obj.repr vcf) in for i = 0 to ndef - 1 do Obj.set_field e (i+1) (Obj.repr (val_of_rel (k+i))) done; let cofix_body i = let vcfi = get_fcofix vcf i in let c = Obj.field (Obj.repr vcfi) 0 in Obj.set_field e 0 c; let atom = Obj.new_block cofix_tag 1 in let self = Obj.new_block accu_tag 2 in Obj.set_field self 0 (Obj.repr accumulate); Obj.set_field self 1 (Obj.repr atom); apply_vstack (Obj.obj e) [|Obj.obj self|] in (Array.init ndef cofix_body, ftyp) (* Functions over vblock *) let btag : vblock -> int = fun b -> Obj.tag (Obj.repr b) let bsize : vblock -> int = fun b -> Obj.size (Obj.repr b) let bfield b i = if 0 <= i && i < (bsize b) then val_of_obj (Obj.field (Obj.repr b) i) else raise (Invalid_argument "Vm.bfield") (* Functions over vswitch *) let check_switch sw1 sw2 = sw1.sw_annot.rtbl = sw2.sw_annot.rtbl let case_info sw = sw.sw_annot.ci let type_of_switch sw = push_vstack sw.sw_stk; interprete sw.sw_type_code crazy_val sw.sw_env 0 let branch_arg k (tag,arity) = if arity = 0 then ((Obj.magic tag):values) else let b = Obj.new_block tag arity in for i = 0 to arity - 1 do Obj.set_field b i (Obj.repr (val_of_rel (k+i))) done; val_of_obj b let apply_switch sw arg = let tc = sw.sw_annot.tailcall in if tc then (push_ra stop;push_vstack sw.sw_stk) else (push_vstack sw.sw_stk; push_ra (popstop_code (Array.length sw.sw_stk))); interprete sw.sw_code arg sw.sw_env 0 let branch_of_switch k sw = let eval_branch (_,arity as ta) = let arg = branch_arg k ta in let v = apply_switch sw arg in (arity, v) in Array.map eval_branch sw.sw_annot.rtbl (* Evaluation *) let rec whd_stack v stk = match stk with | [] -> whd_val v | Zapp args :: stkt -> whd_stack (apply_arguments v args) stkt | Zfix (f,args) :: stkt -> let o = Obj.repr v in if Obj.is_block o && Obj.tag o = accu_tag then whd_accu (Obj.repr v) stk else let v', stkt = match stkt with | Zapp args' :: stkt -> push_ra stop; push_arguments args'; push_val v; push_arguments args; let v' = interprete (fun_code f) (Obj.magic f) (Obj.magic f) (nargs args+ nargs args') in v', stkt | _ -> push_ra stop; push_val v; push_arguments args; let v' = interprete (fun_code f) (Obj.magic f) (Obj.magic f) (nargs args) in v', stkt in whd_stack v' stkt | Zswitch sw :: stkt -> let o = Obj.repr v in if Obj.is_block o && Obj.tag o = accu_tag then if Obj.tag (Obj.field o 1) < cofix_tag then whd_accu (Obj.repr v) stk else let to_up = match whd_accu (Obj.repr v) [] with | Vcofix (_, to_up, _) -> to_up | _ -> assert false in whd_stack (apply_switch sw to_up) stkt else whd_stack (apply_switch sw v) stkt let rec force_whd v stk = match whd_stack v stk with | Vatom_stk(Aiddef(_,v),stk) -> force_whd v stk | res -> res let rec eta_stack a stk v = match stk with | [] -> apply_vstack a [|v|] | Zapp args :: stk -> eta_stack (apply_arguments a args) stk v | Zfix(f,args) :: stk -> let a,stk = match stk with | Zapp args' :: stk -> push_ra stop; push_arguments args'; push_val a; push_arguments args; let a = interprete (fun_code f) (Obj.magic f) (Obj.magic f) (nargs args+ nargs args') in a, stk | _ -> push_ra stop; push_val a; push_arguments args; let a = interprete (fun_code f) (Obj.magic f) (Obj.magic f) (nargs args) in a, stk in eta_stack a stk v | Zswitch sw :: stk -> eta_stack (apply_switch sw a) stk v let eta_whd k whd = let v = val_of_rel k in match whd with | Vsort _ | Vprod _ | Vconstr_const _ | Vconstr_block _ -> assert false | Vfun f -> body_of_vfun k f | Vfix(f, None) -> push_ra stop; push_val v; interprete (fun_code f) (Obj.magic f) (Obj.magic f) 0 | Vfix(f, Some args) -> push_ra stop; push_val v; push_arguments args; interprete (fun_code f) (Obj.magic f) (Obj.magic f) (nargs args) | Vcofix(_,to_up,_) -> push_ra stop; push_val v; interprete (fun_code to_up) (Obj.magic to_up) (Obj.magic to_up) 0 | Vatom_stk(a,stk) -> eta_stack (val_of_atom a) stk v coq-8.4pl2/kernel/inductive.ml0000640000175000001440000010376512010532755015447 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = Array.length mib.mind_packets then error "Inductive.lookup_mind_specif: invalid inductive index"; (mib, mib.mind_packets.(tyi)) let find_rectype env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match kind_of_term t with | Ind ind -> (ind, l) | _ -> raise Not_found let find_inductive env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match kind_of_term t with | Ind ind when (fst (lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found let find_coinductive env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match kind_of_term t with | Ind ind when not (fst (lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found let inductive_params (mib,_) = mib.mind_nparams (************************************************************************) (* Build the substitution that replaces Rels by the appropriate *) (* inductives *) let ind_subst mind mib = let ntypes = mib.mind_ntypes in let make_Ik k = mkInd (mind,ntypes-k-1) in list_tabulate make_Ik ntypes (* Instantiate inductives in constructor type *) let constructor_instantiate mind mib c = let s = ind_subst mind mib in substl s c let instantiate_params full t args sign = let fail () = anomaly "instantiate_params: type, ctxt and args mismatch" in let (rem_args, subs, ty) = Sign.fold_rel_context (fun (_,copt,_) (largs,subs,ty) -> match (copt, largs, kind_of_term ty) with | (None, a::args, Prod(_,_,t)) -> (args, a::subs, t) | (Some b,_,LetIn(_,_,_,t)) -> (largs, (substl subs b)::subs, t) | (_,[],_) -> if full then fail() else ([], subs, ty) | _ -> fail ()) sign ~init:(args,[],t) in if rem_args <> [] then fail(); substl subs ty let full_inductive_instantiate mib params sign = let dummy = prop_sort in let t = mkArity (sign,dummy) in fst (destArity (instantiate_params true t params mib.mind_params_ctxt)) let full_constructor_instantiate ((mind,_),(mib,_),params) = let inst_ind = constructor_instantiate mind mib in (fun t -> instantiate_params true (inst_ind t) params mib.mind_params_ctxt) (************************************************************************) (************************************************************************) (* Functions to build standard types related to inductive *) (* Computing the actual sort of an applied or partially applied inductive type: I_i: forall uniformparams:utyps, forall otherparams:otyps, Type(a) uniformargs : utyps otherargs : otyps I_1:forall ...,s_1;...I_n:forall ...,s_n |- sort(C_kj(uniformargs)) = s_kj s'_k = max(..s_kj..) merge(..s'_k..) = ..s''_k.. -------------------------------------------------------------------- Gamma |- I_i uniformargs otherargs : phi(s''_i) where - if p=0, phi() = Prop - if p=1, phi(s) = s - if p<>1, phi(s) = sup(Set,s) Remark: Set (predicative) is encoded as Type(0) *) let sort_as_univ = function | Type u -> u | Prop Null -> type0m_univ | Prop Pos -> type0_univ let cons_subst u su subst = try (u, sup su (List.assoc u subst)) :: List.remove_assoc u subst with Not_found -> (u, su) :: subst let actualize_decl_level env lev t = let sign,s = dest_arity env t in mkArity (sign,lev) let polymorphism_on_non_applied_parameters = false (* Bind expected levels of parameters to actual levels *) (* Propagate the new levels in the signature *) let rec make_subst env = function | (_,Some _,_ as t)::sign, exp, args -> let ctx,subst = make_subst env (sign, exp, args) in t::ctx, subst | d::sign, None::exp, args -> let args = match args with _::args -> args | [] -> [] in let ctx,subst = make_subst env (sign, exp, args) in d::ctx, subst | d::sign, Some u::exp, a::args -> (* We recover the level of the argument, but we don't change the *) (* level in the corresponding type in the arity; this level in the *) (* arity is a global level which, at typing time, will be enforce *) (* to be greater than the level of the argument; this is probably *) (* a useless extra constraint *) let s = sort_as_univ (snd (dest_arity env a)) in let ctx,subst = make_subst env (sign, exp, args) in d::ctx, cons_subst u s subst | (na,None,t as d)::sign, Some u::exp, [] -> (* No more argument here: we instantiate the type with a fresh level *) (* which is first propagated to the corresponding premise in the arity *) (* (actualize_decl_level), then to the conclusion of the arity (via *) (* the substitution) *) let ctx,subst = make_subst env (sign, exp, []) in if polymorphism_on_non_applied_parameters then let s = fresh_local_univ () in let t = actualize_decl_level env (Type s) t in (na,None,t)::ctx, cons_subst u s subst else d::ctx, subst | sign, [], _ -> (* Uniform parameters are exhausted *) sign,[] | [], _, _ -> assert false let instantiate_universes env ctx ar argsorts = let args = Array.to_list argsorts in let ctx,subst = make_subst env (ctx,ar.poly_param_levels,args) in let level = subst_large_constraints subst ar.poly_level in ctx, (* Singleton type not containing types are interpretable in Prop *) if is_type0m_univ level then prop_sort (* Non singleton type not containing types are interpretable in Set *) else if is_type0_univ level then set_sort (* This is a Type with constraints *) else Type level exception SingletonInductiveBecomesProp of identifier let type_of_inductive_knowing_parameters ?(polyprop=true) env mip paramtyps = match mip.mind_arity with | Monomorphic s -> s.mind_user_arity | Polymorphic ar -> let ctx = List.rev mip.mind_arity_ctxt in let ctx,s = instantiate_universes env ctx ar paramtyps in (* The Ocaml extraction cannot handle (yet?) "Prop-polymorphism", i.e. the situation where a non-Prop singleton inductive becomes Prop when applied to Prop params *) if not polyprop && not (is_type0m_univ ar.poly_level) && s = prop_sort then raise (SingletonInductiveBecomesProp mip.mind_typename); mkArity (List.rev ctx,s) (* Type of a (non applied) inductive type *) let type_of_inductive env (_,mip) = type_of_inductive_knowing_parameters env mip [||] (* The max of an array of universes *) let cumulate_constructor_univ u = function | Prop Null -> u | Prop Pos -> sup type0_univ u | Type u' -> sup u u' let max_inductive_sort = Array.fold_left cumulate_constructor_univ type0m_univ (************************************************************************) (* Type of a constructor *) let type_of_constructor cstr (mib,mip) = let ind = inductive_of_constructor cstr in let specif = mip.mind_user_lc in let i = index_of_constructor cstr in let nconstr = Array.length mip.mind_consnames in if i > nconstr then error "Not enough constructors in the type."; constructor_instantiate (fst ind) mib specif.(i-1) let arities_of_specif kn (mib,mip) = let specif = mip.mind_nf_lc in Array.map (constructor_instantiate kn mib) specif let arities_of_constructors ind specif = arities_of_specif (fst ind) specif let type_of_constructors ind (mib,mip) = let specif = mip.mind_user_lc in Array.map (constructor_instantiate (fst ind) mib) specif (************************************************************************) (* Type of case predicates *) let local_rels ctxt = let (rels,_) = Sign.fold_rel_context_reverse (fun (rels,n) (_,copt,_) -> match copt with None -> (mkRel n :: rels, n+1) | Some _ -> (rels, n+1)) ~init:([],1) ctxt in rels (* Get type of inductive, with parameters instantiated *) let inductive_sort_family mip = match mip.mind_arity with | Monomorphic s -> family_of_sort s.mind_sort | Polymorphic _ -> InType let mind_arity mip = mip.mind_arity_ctxt, inductive_sort_family mip let get_instantiated_arity (mib,mip) params = let sign, s = mind_arity mip in full_inductive_instantiate mib params sign, s let elim_sorts (_,mip) = mip.mind_kelim let extended_rel_list n hyps = let rec reln l p = function | (_,None,_) :: hyps -> reln (mkRel (n+p) :: l) (p+1) hyps | (_,Some _,_) :: hyps -> reln l (p+1) hyps | [] -> l in reln [] 1 hyps let build_dependent_inductive ind (_,mip) params = let realargs,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in applist (mkInd ind, List.map (lift mip.mind_nrealargs_ctxt) params @ extended_rel_list 0 realargs) (* This exception is local *) exception LocalArity of (sorts_family * sorts_family * arity_error) option let check_allowed_sort ksort specif = if not (List.exists ((=) ksort) (elim_sorts specif)) then let s = inductive_sort_family (snd specif) in raise (LocalArity (Some(ksort,s,error_elim_explain ksort s))) let is_correct_arity env c pj ind specif params = let arsign,_ = get_instantiated_arity specif params in let rec srec env pt ar u = let pt' = whd_betadeltaiota env pt in match kind_of_term pt', ar with | Prod (na1,a1,t), (_,None,a1')::ar' -> let univ = try conv env a1 a1' with NotConvertible -> raise (LocalArity None) in srec (push_rel (na1,None,a1) env) t ar' (union_constraints u univ) | Prod (_,a1,a2), [] -> (* whnf of t was not needed here! *) let ksort = match kind_of_term (whd_betadeltaiota env a2) with | Sort s -> family_of_sort s | _ -> raise (LocalArity None) in let dep_ind = build_dependent_inductive ind specif params in let univ = try conv env a1 dep_ind with NotConvertible -> raise (LocalArity None) in check_allowed_sort ksort specif; union_constraints u univ | _, (_,Some _,_ as d)::ar' -> srec (push_rel d env) (lift 1 pt') ar' u | _ -> raise (LocalArity None) in try srec env pj.uj_type (List.rev arsign) empty_constraint with LocalArity kinds -> error_elim_arity env ind (elim_sorts specif) c pj kinds (************************************************************************) (* Type of case branches *) (* [p] is the predicate, [i] is the constructor number (starting from 0), and [cty] is the type of the constructor (params not instantiated) *) let build_branches_type ind (_,mip as specif) params p = let build_one_branch i cty = let typi = full_constructor_instantiate (ind,specif,params) cty in let (args,ccl) = decompose_prod_assum typi in let nargs = rel_context_length args in let (_,allargs) = decompose_app ccl in let (lparams,vargs) = list_chop (inductive_params specif) allargs in let cargs = let cstr = ith_constructor_of_inductive ind (i+1) in let dep_cstr = applist (mkConstruct cstr,lparams@(local_rels args)) in vargs @ [dep_cstr] in let base = beta_appvect (lift nargs p) (Array.of_list cargs) in it_mkProd_or_LetIn base args in Array.mapi build_one_branch mip.mind_nf_lc (* [p] is the predicate, [c] is the match object, [realargs] is the list of real args of the inductive type *) let build_case_type n p c realargs = whd_betaiota (betazeta_appvect (n+1) p (Array.of_list (realargs@[c]))) let type_case_branches env (ind,largs) pj c = let specif = lookup_mind_specif env ind in let nparams = inductive_params specif in let (params,realargs) = list_chop nparams largs in let p = pj.uj_val in let univ = is_correct_arity env c pj ind specif params in let lc = build_branches_type ind specif params p in let ty = build_case_type (snd specif).mind_nrealargs_ctxt p c realargs in (lc, ty, univ) (************************************************************************) (* Checking the case annotation is relevent *) let check_case_info env indsp ci = let (mib,mip) = lookup_mind_specif env indsp in if not (eq_ind indsp ci.ci_ind) or (mib.mind_nparams <> ci.ci_npar) or (mip.mind_consnrealdecls <> ci.ci_cstr_ndecls) then raise (TypeError(env,WrongCaseInfo(indsp,ci))) (************************************************************************) (************************************************************************) (* Guard conditions for fix and cofix-points *) (* Check if t is a subterm of Rel n, and gives its specification, assuming lst already gives index of subterms with corresponding specifications of recursive arguments *) (* A powerful notion of subterm *) (* To each inductive definition corresponds an array describing the structure of recursive arguments for each constructor, we call it the recursive spec of the type (it has type recargs vect). For checking the guard, we start from the decreasing argument (Rel n) with its recursive spec. During checking the guardness condition, we collect patterns variables corresponding to subterms of n, each of them with its recursive spec. They are organised in a list lst of type (int * recargs) list which is sorted with respect to the first argument. *) (*************************************************************) (* Environment annotated with marks on recursive arguments *) (* tells whether it is a strict or loose subterm *) type size = Large | Strict (* merging information *) let size_glb s1 s2 = match s1,s2 with Strict, Strict -> Strict | _ -> Large (* possible specifications for a term: - Not_subterm: when the size of a term is not related to the recursive argument of the fixpoint - Subterm: when the term is a subterm of the recursive argument the wf_paths argument specifies which subterms are recursive - Dead_code: when the term has been built by elimination over an empty type *) type subterm_spec = Subterm of (size * wf_paths) | Dead_code | Not_subterm let spec_of_tree t = lazy (if Rtree.eq_rtree (=) (Lazy.force t) mk_norec then Not_subterm else Subterm(Strict,Lazy.force t)) let subterm_spec_glb = let glb2 s1 s2 = match s1, s2 with s1, Dead_code -> s1 | Dead_code, s2 -> s2 | Not_subterm, _ -> Not_subterm | _, Not_subterm -> Not_subterm | Subterm (a1,t1), Subterm (a2,t2) -> if Rtree.eq_rtree (=) t1 t2 then Subterm (size_glb a1 a2, t1) (* branches do not return objects with same spec *) else Not_subterm in Array.fold_left glb2 Dead_code type guard_env = { env : env; (* dB of last fixpoint *) rel_min : int; (* dB of variables denoting subterms *) genv : subterm_spec Lazy.t list; } let make_renv env recarg (kn,tyi) = let mib = Environ.lookup_mind kn env in let mind_recvec = Array.map (fun mip -> mip.mind_recargs) mib.mind_packets in { env = env; rel_min = recarg+2; genv = [Lazy.lazy_from_val(Subterm(Large,mind_recvec.(tyi)))] } let push_var renv (x,ty,spec) = { env = push_rel (x,None,ty) renv.env; rel_min = renv.rel_min+1; genv = spec:: renv.genv } let assign_var_spec renv (i,spec) = { renv with genv = list_assign renv.genv (i-1) spec } let push_var_renv renv (x,ty) = push_var renv (x,ty,lazy Not_subterm) (* Fetch recursive information about a variable p *) let subterm_var p renv = try Lazy.force (List.nth renv.genv (p-1)) with Failure _ | Invalid_argument _ -> Not_subterm let push_ctxt_renv renv ctxt = let n = rel_context_length ctxt in { env = push_rel_context ctxt renv.env; rel_min = renv.rel_min+n; genv = iterate (fun ge -> lazy Not_subterm::ge) n renv.genv } let push_fix_renv renv (_,v,_ as recdef) = let n = Array.length v in { env = push_rec_types recdef renv.env; rel_min = renv.rel_min+n; genv = iterate (fun ge -> lazy Not_subterm::ge) n renv.genv } (* Definition and manipulation of the stack *) type stack_element = |SClosure of guard_env*constr |SArg of subterm_spec Lazy.t let push_stack_closures renv l stack = List.fold_right (fun h b -> (SClosure (renv,h))::b) l stack let push_stack_args l stack = List.fold_right (fun h b -> (SArg h)::b) l stack (******************************) (* {6 Computing the recursive subterms of a term (propagation of size information through Cases).} *) let lookup_subterms env ind = let (_,mip) = lookup_mind_specif env ind in mip.mind_recargs let match_inductive ind ra = match ra with | (Mrec i | Imbr i) -> eq_ind ind i | Norec -> false (* In {match c as z in ci y_s return P with |C_i x_s => t end} [branches_specif renv c_spec ci] returns an array of x_s specs knowing c_spec. *) let branches_specif renv c_spec ci = let car = (* We fetch the regular tree associated to the inductive of the match. This is just to get the number of constructors (and constructor arities) that fit the match branches without forcing c_spec. Note that c_spec might be more precise than [v] below, because of nested inductive types. *) let (_,mip) = lookup_mind_specif renv.env ci.ci_ind in let v = dest_subterms mip.mind_recargs in Array.map List.length v in Array.mapi (fun i nca -> (* i+1-th cstructor has arity nca *) let lvra = lazy (match Lazy.force c_spec with Subterm (_,t) when match_inductive ci.ci_ind (dest_recarg t) -> let vra = Array.of_list (dest_subterms t).(i) in assert (nca = Array.length vra); Array.map (fun t -> Lazy.force (spec_of_tree (lazy t))) vra | Dead_code -> Array.create nca Dead_code | _ -> Array.create nca Not_subterm) in list_tabulate (fun j -> lazy (Lazy.force lvra).(j)) nca) car (* [subterm_specif renv t] computes the recursive structure of [t] and compare its size with the size of the initial recursive argument of the fixpoint we are checking. [renv] collects such information about variables. *) let rec subterm_specif renv stack t = (* maybe reduction is not always necessary! *) let f,l = decompose_app (whd_betadeltaiota renv.env t) in match kind_of_term f with | Rel k -> subterm_var k renv | Case (ci,_,c,lbr) -> let stack' = push_stack_closures renv l stack in let cases_spec = branches_specif renv (lazy_subterm_specif renv [] c) ci in let stl = Array.mapi (fun i br' -> let stack_br = push_stack_args (cases_spec.(i)) stack' in subterm_specif renv stack_br br') lbr in subterm_spec_glb stl | Fix ((recindxs,i),(_,typarray,bodies as recdef)) -> (* when proving that the fixpoint f(x)=e is less than n, it is enough to prove that e is less than n assuming f is less than n furthermore when f is applied to a term which is strictly less than n, one may assume that x itself is strictly less than n *) let (ctxt,clfix) = dest_prod renv.env typarray.(i) in let oind = let env' = push_rel_context ctxt renv.env in try Some(fst(find_inductive env' clfix)) with Not_found -> None in (match oind with None -> Not_subterm (* happens if fix is polymorphic *) | Some ind -> let nbfix = Array.length typarray in let recargs = lookup_subterms renv.env ind in (* pushing the fixpoints *) let renv' = push_fix_renv renv recdef in let renv' = (* Why Strict here ? To be general, it could also be Large... *) assign_var_spec renv' (nbfix-i, lazy (Subterm(Strict,recargs))) in let decrArg = recindxs.(i) in let theBody = bodies.(i) in let nbOfAbst = decrArg+1 in let sign,strippedBody = decompose_lam_n_assum nbOfAbst theBody in (* pushing the fix parameters *) let stack' = push_stack_closures renv l stack in let renv'' = push_ctxt_renv renv' sign in let renv'' = if List.length stack' < nbOfAbst then renv'' else let decrArg = List.nth stack' decrArg in let arg_spec = stack_element_specif decrArg in assign_var_spec renv'' (1, arg_spec) in subterm_specif renv'' [] strippedBody) | Lambda (x,a,b) -> assert (l=[]); let spec,stack' = extract_stack renv a stack in subterm_specif (push_var renv (x,a,spec)) stack' b (* Metas and evars are considered OK *) | (Meta _|Evar _) -> Dead_code (* Other terms are not subterms *) | _ -> Not_subterm and lazy_subterm_specif renv stack t = lazy (subterm_specif renv stack t) and stack_element_specif = function |SClosure (h_renv,h) -> lazy_subterm_specif h_renv [] h |SArg x -> x and extract_stack renv a = function | [] -> Lazy.lazy_from_val Not_subterm , [] | h::t -> stack_element_specif h, t (* Check term c can be applied to one of the mutual fixpoints. *) let check_is_subterm x = match Lazy.force x with Subterm (Strict,_) | Dead_code -> true | _ -> false (************************************************************************) exception FixGuardError of env * guard_error let error_illegal_rec_call renv fx (arg_renv,arg) = let (_,le_vars,lt_vars) = List.fold_left (fun (i,le,lt) sbt -> match Lazy.force sbt with (Subterm(Strict,_) | Dead_code) -> (i+1, le, i::lt) | (Subterm(Large,_)) -> (i+1, i::le, lt) | _ -> (i+1, le ,lt)) (1,[],[]) renv.genv in raise (FixGuardError (renv.env, RecursionOnIllegalTerm(fx,(arg_renv.env, arg), le_vars,lt_vars))) let error_partial_apply renv fx = raise (FixGuardError (renv.env,NotEnoughArgumentsForFixCall fx)) (* Check if [def] is a guarded fixpoint body with decreasing arg. given [recpos], the decreasing arguments of each mutually defined fixpoint. *) let check_one_fix renv recpos def = let nfi = Array.length recpos in (* Checks if [t] only make valid recursive calls [stack] is the list of constructor's argument specification and arguments than will be applied after reduction. example u in t where we have (match .. with |.. => t end) u *) let rec check_rec_call renv stack t = (* if [t] does not make recursive calls, it is guarded: *) if noccur_with_meta renv.rel_min nfi t then () else let (f,l) = decompose_app (whd_betaiotazeta t) in match kind_of_term f with | Rel p -> (* Test if [p] is a fixpoint (recursive call) *) if renv.rel_min <= p & p < renv.rel_min+nfi then begin List.iter (check_rec_call renv []) l; (* the position of the invoked fixpoint: *) let glob = renv.rel_min+nfi-1-p in (* the decreasing arg of the rec call: *) let np = recpos.(glob) in let stack' = push_stack_closures renv l stack in if List.length stack' <= np then error_partial_apply renv glob else (* Check the decreasing arg is smaller *) let z = List.nth stack' np in if not (check_is_subterm (stack_element_specif z)) then begin match z with |SClosure (z,z') -> error_illegal_rec_call renv glob (z,z') |SArg _ -> error_partial_apply renv glob end end else begin match pi2 (lookup_rel p renv.env) with | None -> List.iter (check_rec_call renv []) l | Some c -> try List.iter (check_rec_call renv []) l with FixGuardError _ -> check_rec_call renv stack (applist(lift p c,l)) end | Case (ci,p,c_0,lrest) -> List.iter (check_rec_call renv []) (c_0::p::l); (* compute the recarg information for the arguments of each branch *) let case_spec = branches_specif renv (lazy_subterm_specif renv [] c_0) ci in let stack' = push_stack_closures renv l stack in Array.iteri (fun k br' -> let stack_br = push_stack_args case_spec.(k) stack' in check_rec_call renv stack_br br') lrest (* Enables to traverse Fixpoint definitions in a more intelligent way, ie, the rule : if - g = fix g (y1:T1)...(yp:Tp) {struct yp} := e & - f is guarded with respect to the set of pattern variables S in a1 ... am & - f is guarded with respect to the set of pattern variables S in T1 ... Tp & - ap is a sub-term of the formal argument of f & - f is guarded with respect to the set of pattern variables S+{yp} in e then f is guarded with respect to S in (g a1 ... am). Eduardo 7/9/98 *) | Fix ((recindxs,i),(_,typarray,bodies as recdef)) -> List.iter (check_rec_call renv []) l; Array.iter (check_rec_call renv []) typarray; let decrArg = recindxs.(i) in let renv' = push_fix_renv renv recdef in let stack' = push_stack_closures renv l stack in Array.iteri (fun j body -> if i=j && (List.length stack' > decrArg) then let recArg = List.nth stack' decrArg in let arg_sp = stack_element_specif recArg in check_nested_fix_body renv' (decrArg+1) arg_sp body else check_rec_call renv' [] body) bodies | Const kn -> if evaluable_constant kn renv.env then try List.iter (check_rec_call renv []) l with (FixGuardError _ ) -> let value = (applist(constant_value renv.env kn, l)) in check_rec_call renv stack value else List.iter (check_rec_call renv []) l | Lambda (x,a,b) -> assert (l = []); check_rec_call renv [] a ; let spec, stack' = extract_stack renv a stack in check_rec_call (push_var renv (x,a,spec)) stack' b | Prod (x,a,b) -> assert (l = [] && stack = []); check_rec_call renv [] a; check_rec_call (push_var_renv renv (x,a)) [] b | CoFix (i,(_,typarray,bodies as recdef)) -> List.iter (check_rec_call renv []) l; Array.iter (check_rec_call renv []) typarray; let renv' = push_fix_renv renv recdef in Array.iter (check_rec_call renv' []) bodies | (Ind _ | Construct _) -> List.iter (check_rec_call renv []) l | Var id -> begin match pi2 (lookup_named id renv.env) with | None -> List.iter (check_rec_call renv []) l | Some c -> try List.iter (check_rec_call renv []) l with (FixGuardError _) -> check_rec_call renv stack (applist(c,l)) end | Sort _ -> assert (l = []) (* l is not checked because it is considered as the meta's context *) | (Evar _ | Meta _) -> () | (App _ | LetIn _ | Cast _) -> assert false (* beta zeta reduction *) and check_nested_fix_body renv decr recArgsDecrArg body = if decr = 0 then check_rec_call (assign_var_spec renv (1,recArgsDecrArg)) [] body else match kind_of_term body with | Lambda (x,a,b) -> check_rec_call renv [] a; let renv' = push_var_renv renv (x,a) in check_nested_fix_body renv' (decr-1) recArgsDecrArg b | _ -> anomaly "Not enough abstractions in fix body" in check_rec_call renv [] def let judgment_of_fixpoint (_, types, bodies) = array_map2 (fun typ body -> { uj_val = body ; uj_type = typ }) types bodies let inductive_of_mutfix env ((nvect,bodynum),(names,types,bodies as recdef)) = let nbfix = Array.length bodies in if nbfix = 0 or Array.length nvect <> nbfix or Array.length types <> nbfix or Array.length names <> nbfix or bodynum < 0 or bodynum >= nbfix then anomaly "Ill-formed fix term"; let fixenv = push_rec_types recdef env in let vdefj = judgment_of_fixpoint recdef in let raise_err env i err = error_ill_formed_rec_body env err names i fixenv vdefj in (* Check the i-th definition with recarg k *) let find_ind i k def = (* check fi does not appear in the k+1 first abstractions, gives the type of the k+1-eme abstraction (must be an inductive) *) let rec check_occur env n def = match kind_of_term (whd_betadeltaiota env def) with | Lambda (x,a,b) -> if noccur_with_meta n nbfix a then let env' = push_rel (x, None, a) env in if n = k+1 then (* get the inductive type of the fixpoint *) let (mind, _) = try find_inductive env a with Not_found -> raise_err env i (RecursionNotOnInductiveType a) in (mind, (env', b)) else check_occur env' (n+1) b else anomaly "check_one_fix: Bad occurrence of recursive call" | _ -> raise_err env i NotEnoughAbstractionInFixBody in check_occur fixenv 1 def in (* Do it on every fixpoint *) let rv = array_map2_i find_ind nvect bodies in (Array.map fst rv, Array.map snd rv) let check_fix env ((nvect,_),(names,_,bodies as recdef) as fix) = let (minds, rdef) = inductive_of_mutfix env fix in for i = 0 to Array.length bodies - 1 do let (fenv,body) = rdef.(i) in let renv = make_renv fenv nvect.(i) minds.(i) in try check_one_fix renv nvect body with FixGuardError (fixenv,err) -> error_ill_formed_rec_body fixenv err names i (push_rec_types recdef env) (judgment_of_fixpoint recdef) done (* let cfkey = Profile.declare_profile "check_fix";; let check_fix env fix = Profile.profile3 cfkey check_fix env fix;; *) (************************************************************************) (* Co-fixpoints. *) exception CoFixGuardError of env * guard_error let anomaly_ill_typed () = anomaly "check_one_cofix: too many arguments applied to constructor" let rec codomain_is_coind env c = let b = whd_betadeltaiota env c in match kind_of_term b with | Prod (x,a,b) -> codomain_is_coind (push_rel (x, None, a) env) b | _ -> (try find_coinductive env b with Not_found -> raise (CoFixGuardError (env, CodomainNotInductiveType b))) let check_one_cofix env nbfix def deftype = let rec check_rec_call env alreadygrd n vlra t = if not (noccur_with_meta n nbfix t) then let c,args = decompose_app (whd_betadeltaiota env t) in match kind_of_term c with | Rel p when n <= p && p < n+nbfix -> (* recursive call: must be guarded and no nested recursive call allowed *) if not alreadygrd then raise (CoFixGuardError (env,UnguardedRecursiveCall t)) else if not(List.for_all (noccur_with_meta n nbfix) args) then raise (CoFixGuardError (env,NestedRecursiveOccurrences)) | Construct (_,i as cstr_kn) -> let lra = vlra.(i-1) in let mI = inductive_of_constructor cstr_kn in let (mib,mip) = lookup_mind_specif env mI in let realargs = list_skipn mib.mind_nparams args in let rec process_args_of_constr = function | (t::lr), (rar::lrar) -> if rar = mk_norec then if noccur_with_meta n nbfix t then process_args_of_constr (lr, lrar) else raise (CoFixGuardError (env,RecCallInNonRecArgOfConstructor t)) else let spec = dest_subterms rar in check_rec_call env true n spec t; process_args_of_constr (lr, lrar) | [],_ -> () | _ -> anomaly_ill_typed () in process_args_of_constr (realargs, lra) | Lambda (x,a,b) -> assert (args = []); if noccur_with_meta n nbfix a then let env' = push_rel (x, None, a) env in check_rec_call env' alreadygrd (n+1) vlra b else raise (CoFixGuardError (env,RecCallInTypeOfAbstraction a)) | CoFix (j,(_,varit,vdefs as recdef)) -> if (List.for_all (noccur_with_meta n nbfix) args) then let nbfix = Array.length vdefs in if (array_for_all (noccur_with_meta n nbfix) varit) then let env' = push_rec_types recdef env in (Array.iter (check_rec_call env' alreadygrd (n+1) vlra) vdefs; List.iter (check_rec_call env alreadygrd n vlra) args) else raise (CoFixGuardError (env,RecCallInTypeOfDef c)) else raise (CoFixGuardError (env,UnguardedRecursiveCall c)) | Case (_,p,tm,vrest) -> if (noccur_with_meta n nbfix p) then if (noccur_with_meta n nbfix tm) then if (List.for_all (noccur_with_meta n nbfix) args) then Array.iter (check_rec_call env alreadygrd n vlra) vrest else raise (CoFixGuardError (env,RecCallInCaseFun c)) else raise (CoFixGuardError (env,RecCallInCaseArg c)) else raise (CoFixGuardError (env,RecCallInCasePred c)) | Meta _ -> () | Evar _ -> List.iter (check_rec_call env alreadygrd n vlra) args | _ -> raise (CoFixGuardError (env,NotGuardedForm t)) in let (mind, _) = codomain_is_coind env deftype in let vlra = lookup_subterms env mind in check_rec_call env false 1 (dest_subterms vlra) def (* The function which checks that the whole block of definitions satisfies the guarded condition *) let check_cofix env (bodynum,(names,types,bodies as recdef)) = let nbfix = Array.length bodies in for i = 0 to nbfix-1 do let fixenv = push_rec_types recdef env in try check_one_cofix fixenv nbfix bodies.(i) types.(i) with CoFixGuardError (errenv,err) -> error_ill_formed_rec_body errenv err names i fixenv (judgment_of_fixpoint recdef) done coq-8.4pl2/kernel/conv_oracle.ml0000640000175000001440000000455712010532755015746 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (try Idmap.find id !var_opacity with Not_found -> default) | ConstKey c -> (try Cmap.find c !cst_opacity with Not_found -> default) | RelKey _ -> Expand let set_strategy k l = match k with | VarKey id -> var_opacity := if l=default then Idmap.remove id !var_opacity else Idmap.add id l !var_opacity | ConstKey c -> cst_opacity := if l=default then Cmap.remove c !cst_opacity else Cmap.add c l !cst_opacity | RelKey _ -> Util.error "set_strategy: RelKey" let get_transp_state () = (Idmap.fold (fun id l ts -> if l=Opaque then Idpred.remove id ts else ts) !var_opacity Idpred.full, Cmap.fold (fun c l ts -> if l=Opaque then Cpred.remove c ts else ts) !cst_opacity Cpred.full) (* Unfold the first constant only if it is "more transparent" than the second one. In case of tie, expand the second one. *) let oracle_order l2r k1 k2 = match get_strategy k1, get_strategy k2 with | Expand, _ -> true | Level n1, Opaque -> true | Level n1, Level n2 -> n1 < n2 | _ -> l2r (* use recommended default *) (* summary operations *) let init() = (cst_opacity := Cmap.empty; var_opacity := Idmap.empty) let freeze () = (!var_opacity, !cst_opacity) let unfreeze (vo,co) = (cst_opacity := co; var_opacity := vo) coq-8.4pl2/kernel/vconv.ml0000640000175000001440000001731011620555751014605 0ustar notinusersopen Names open Declarations open Term open Environ open Conv_oracle open Reduction open Closure open Vm open Csymtable open Univ let val_of_constr env c = val_of_constr (pre_env env) c (* Test la structure des piles *) let compare_zipper z1 z2 = match z1, z2 with | Zapp args1, Zapp args2 -> nargs args1 = nargs args2 | Zfix(f1,args1), Zfix(f2,args2) -> nargs args1 = nargs args2 | Zswitch _, Zswitch _ -> true | _ , _ -> false let rec compare_stack stk1 stk2 = match stk1, stk2 with | [], [] -> true | z1::stk1, z2::stk2 -> if compare_zipper z1 z2 then compare_stack stk1 stk2 else false | _, _ -> false (* Conversion *) let conv_vect fconv vect1 vect2 cu = let n = Array.length vect1 in if n = Array.length vect2 then let rcu = ref cu in for i = 0 to n - 1 do rcu := fconv vect1.(i) vect2.(i) !rcu done; !rcu else raise NotConvertible let infos = ref (create_clos_infos betaiotazeta Environ.empty_env) let rec conv_val pb k v1 v2 cu = if v1 == v2 then cu else conv_whd pb k (whd_val v1) (whd_val v2) cu and conv_whd pb k whd1 whd2 cu = match whd1, whd2 with | Vsort s1, Vsort s2 -> sort_cmp pb s1 s2 cu | Vprod p1, Vprod p2 -> let cu = conv_val CONV k (dom p1) (dom p2) cu in conv_fun pb k (codom p1) (codom p2) cu | Vfun f1, Vfun f2 -> conv_fun CONV k f1 f2 cu | Vfix (f1,None), Vfix (f2,None) -> conv_fix k f1 f2 cu | Vfix (f1,Some args1), Vfix(f2,Some args2) -> if nargs args1 <> nargs args2 then raise NotConvertible else conv_arguments k args1 args2 (conv_fix k f1 f2 cu) | Vcofix (cf1,_,None), Vcofix (cf2,_,None) -> conv_cofix k cf1 cf2 cu | Vcofix (cf1,_,Some args1), Vcofix (cf2,_,Some args2) -> if nargs args1 <> nargs args2 then raise NotConvertible else conv_arguments k args1 args2 (conv_cofix k cf1 cf2 cu) | Vconstr_const i1, Vconstr_const i2 -> if i1 = i2 then cu else raise NotConvertible | Vconstr_block b1, Vconstr_block b2 -> let sz = bsize b1 in if btag b1 = btag b2 && sz = bsize b2 then let rcu = ref cu in for i = 0 to sz - 1 do rcu := conv_val CONV k (bfield b1 i) (bfield b2 i) !rcu done; !rcu else raise NotConvertible | Vatom_stk(a1,stk1), Vatom_stk(a2,stk2) -> conv_atom pb k a1 stk1 a2 stk2 cu | Vfun _, _ | _, Vfun _ -> conv_val CONV (k+1) (eta_whd k whd1) (eta_whd k whd2) cu | _, Vatom_stk(Aiddef(_,v),stk) -> conv_whd pb k whd1 (force_whd v stk) cu | Vatom_stk(Aiddef(_,v),stk), _ -> conv_whd pb k (force_whd v stk) whd2 cu | _, _ -> raise NotConvertible and conv_atom pb k a1 stk1 a2 stk2 cu = match a1, a2 with | Aind (kn1,i1), Aind(kn2,i2) -> if eq_ind (kn1,i1) (kn2,i2) && compare_stack stk1 stk2 then conv_stack k stk1 stk2 cu else raise NotConvertible | Aid ik1, Aid ik2 -> if ik1 = ik2 && compare_stack stk1 stk2 then conv_stack k stk1 stk2 cu else raise NotConvertible | Aiddef(ik1,v1), Aiddef(ik2,v2) -> begin try if eq_table_key ik1 ik2 && compare_stack stk1 stk2 then conv_stack k stk1 stk2 cu else raise NotConvertible with NotConvertible -> if oracle_order false ik1 ik2 then conv_whd pb k (whd_stack v1 stk1) (Vatom_stk(a2,stk2)) cu else conv_whd pb k (Vatom_stk(a1,stk1)) (whd_stack v2 stk2) cu end | Aiddef(ik1,v1), _ -> conv_whd pb k (force_whd v1 stk1) (Vatom_stk(a2,stk2)) cu | _, Aiddef(ik2,v2) -> conv_whd pb k (Vatom_stk(a1,stk1)) (force_whd v2 stk2) cu | _, _ -> raise NotConvertible and conv_stack k stk1 stk2 cu = match stk1, stk2 with | [], [] -> cu | Zapp args1 :: stk1, Zapp args2 :: stk2 -> conv_stack k stk1 stk2 (conv_arguments k args1 args2 cu) | Zfix(f1,args1) :: stk1, Zfix(f2,args2) :: stk2 -> conv_stack k stk1 stk2 (conv_arguments k args1 args2 (conv_fix k f1 f2 cu)) | Zswitch sw1 :: stk1, Zswitch sw2 :: stk2 -> if check_switch sw1 sw2 then let vt1,vt2 = type_of_switch sw1, type_of_switch sw2 in let rcu = ref (conv_val CONV k vt1 vt2 cu) in let b1, b2 = branch_of_switch k sw1, branch_of_switch k sw2 in for i = 0 to Array.length b1 - 1 do rcu := conv_val CONV (k + fst b1.(i)) (snd b1.(i)) (snd b2.(i)) !rcu done; conv_stack k stk1 stk2 !rcu else raise NotConvertible | _, _ -> raise NotConvertible and conv_fun pb k f1 f2 cu = if f1 == f2 then cu else let arity,b1,b2 = decompose_vfun2 k f1 f2 in conv_val pb (k+arity) b1 b2 cu and conv_fix k f1 f2 cu = if f1 == f2 then cu else if check_fix f1 f2 then let bf1, tf1 = reduce_fix k f1 in let bf2, tf2 = reduce_fix k f2 in let cu = conv_vect (conv_val CONV k) tf1 tf2 cu in conv_vect (conv_fun CONV (k + Array.length tf1)) bf1 bf2 cu else raise NotConvertible and conv_cofix k cf1 cf2 cu = if cf1 == cf2 then cu else if check_cofix cf1 cf2 then let bcf1, tcf1 = reduce_cofix k cf1 in let bcf2, tcf2 = reduce_cofix k cf2 in let cu = conv_vect (conv_val CONV k) tcf1 tcf2 cu in conv_vect (conv_val CONV (k + Array.length tcf1)) bcf1 bcf2 cu else raise NotConvertible and conv_arguments k args1 args2 cu = if args1 == args2 then cu else let n = nargs args1 in if n = nargs args2 then let rcu = ref cu in for i = 0 to n - 1 do rcu := conv_val CONV k (arg args1 i) (arg args2 i) !rcu done; !rcu else raise NotConvertible let rec conv_eq pb t1 t2 cu = if t1 == t2 then cu else match kind_of_term t1, kind_of_term t2 with | Rel n1, Rel n2 -> if n1 = n2 then cu else raise NotConvertible | Meta m1, Meta m2 -> if m1 = m2 then cu else raise NotConvertible | Var id1, Var id2 -> if id1 = id2 then cu else raise NotConvertible | Sort s1, Sort s2 -> sort_cmp pb s1 s2 cu | Cast (c1,_,_), _ -> conv_eq pb c1 t2 cu | _, Cast (c2,_,_) -> conv_eq pb t1 c2 cu | Prod (_,t1,c1), Prod (_,t2,c2) -> conv_eq pb c1 c2 (conv_eq CONV t1 t2 cu) | Lambda (_,t1,c1), Lambda (_,t2,c2) -> conv_eq CONV c1 c2 cu | LetIn (_,b1,t1,c1), LetIn (_,b2,t2,c2) -> conv_eq pb c1 c2 (conv_eq CONV b1 b2 cu) | App (c1,l1), App (c2,l2) -> conv_eq_vect l1 l2 (conv_eq CONV c1 c2 cu) | Evar (e1,l1), Evar (e2,l2) -> if e1 = e2 then conv_eq_vect l1 l2 cu else raise NotConvertible | Const c1, Const c2 -> if eq_constant c1 c2 then cu else raise NotConvertible | Ind c1, Ind c2 -> if eq_ind c1 c2 then cu else raise NotConvertible | Construct c1, Construct c2 -> if eq_constructor c1 c2 then cu else raise NotConvertible | Case (_,p1,c1,bl1), Case (_,p2,c2,bl2) -> let pcu = conv_eq CONV p1 p2 cu in let ccu = conv_eq CONV c1 c2 pcu in conv_eq_vect bl1 bl2 ccu | Fix (ln1,(_,tl1,bl1)), Fix (ln2,(_,tl2,bl2)) -> if ln1 = ln2 then conv_eq_vect tl1 tl2 (conv_eq_vect bl1 bl2 cu) else raise NotConvertible | CoFix(ln1,(_,tl1,bl1)), CoFix(ln2,(_,tl2,bl2)) -> if ln1 = ln2 then conv_eq_vect tl1 tl2 (conv_eq_vect bl1 bl2 cu) else raise NotConvertible | _ -> raise NotConvertible and conv_eq_vect vt1 vt2 cu = let len = Array.length vt1 in if len = Array.length vt2 then let rcu = ref cu in for i = 0 to len-1 do rcu := conv_eq CONV vt1.(i) vt2.(i) !rcu done; !rcu else raise NotConvertible let vconv pb env t1 t2 = let cu = try conv_eq pb t1 t2 empty_constraint with NotConvertible -> infos := create_clos_infos betaiotazeta env; let v1 = val_of_constr env t1 in let v2 = val_of_constr env t2 in let cu = conv_val pb (nb_rel env) v1 v2 empty_constraint in cu in cu let _ = Reduction.set_vm_conv vconv let use_vm = ref false let set_use_vm b = use_vm := b; if b then Reduction.set_default_conv (fun cv_pb ?(l2r=false) -> vconv cv_pb) else Reduction.set_default_conv (fun cv_pb ?(l2r=false) -> Reduction.conv_cmp cv_pb) let use_vm _ = !use_vm coq-8.4pl2/kernel/reduction.ml0000640000175000001440000004711012010532755015440 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* None | ConstKey cst when not (Cpred.mem cst csts) -> None | _ -> unfold_reference infos k let rec is_empty_stack = function [] -> true | Zupdate _::s -> is_empty_stack s | Zshift _::s -> is_empty_stack s | _ -> false (* Compute the lift to be performed on a term placed in a given stack *) let el_stack el stk = let n = List.fold_left (fun i z -> match z with Zshift n -> i+n | _ -> i) 0 stk in el_shft n el let compare_stack_shape stk1 stk2 = let rec compare_rec bal stk1 stk2 = match (stk1,stk2) with ([],[]) -> bal=0 | ((Zupdate _|Zshift _)::s1, _) -> compare_rec bal s1 stk2 | (_, (Zupdate _|Zshift _)::s2) -> compare_rec bal stk1 s2 | (Zapp l1::s1, _) -> compare_rec (bal+Array.length l1) s1 stk2 | (_, Zapp l2::s2) -> compare_rec (bal-Array.length l2) stk1 s2 | (Zcase(c1,_,_)::s1, Zcase(c2,_,_)::s2) -> bal=0 (* && c1.ci_ind = c2.ci_ind *) && compare_rec 0 s1 s2 | (Zfix(_,a1)::s1, Zfix(_,a2)::s2) -> bal=0 && compare_rec 0 a1 a2 && compare_rec 0 s1 s2 | (_,_) -> false in compare_rec 0 stk1 stk2 type lft_constr_stack_elt = Zlapp of (lift * fconstr) array | Zlfix of (lift * fconstr) * lft_constr_stack | Zlcase of case_info * lift * fconstr * fconstr array and lft_constr_stack = lft_constr_stack_elt list let rec zlapp v = function Zlapp v2 :: s -> zlapp (Array.append v v2) s | s -> Zlapp v :: s let pure_stack lfts stk = let rec pure_rec lfts stk = match stk with [] -> (lfts,[]) | zi::s -> (match (zi,pure_rec lfts s) with (Zupdate _,lpstk) -> lpstk | (Zshift n,(l,pstk)) -> (el_shft n l, pstk) | (Zapp a, (l,pstk)) -> (l,zlapp (Array.map (fun t -> (l,t)) a) pstk) | (Zfix(fx,a),(l,pstk)) -> let (lfx,pa) = pure_rec l a in (l, Zlfix((lfx,fx),pa)::pstk) | (Zcase(ci,p,br),(l,pstk)) -> (l,Zlcase(ci,l,p,br)::pstk)) in snd (pure_rec lfts stk) (****************************************************************************) (* Reduction Functions *) (****************************************************************************) let whd_betaiota t = whd_val (create_clos_infos betaiota empty_env) (inject t) let nf_betaiota t = norm_val (create_clos_infos betaiota empty_env) (inject t) let whd_betaiotazeta x = match kind_of_term x with | (Sort _|Var _|Meta _|Evar _|Const _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _) -> x | _ -> whd_val (create_clos_infos betaiotazeta empty_env) (inject x) let whd_betadeltaiota env t = match kind_of_term t with | (Sort _|Meta _|Evar _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _) -> t | _ -> whd_val (create_clos_infos betadeltaiota env) (inject t) let whd_betadeltaiota_nolet env t = match kind_of_term t with | (Sort _|Meta _|Evar _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _|LetIn _) -> t | _ -> whd_val (create_clos_infos betadeltaiotanolet env) (inject t) (* Beta *) let beta_appvect c v = let rec stacklam env t stack = match kind_of_term t, stack with Lambda(_,_,c), arg::stacktl -> stacklam (arg::env) c stacktl | _ -> applist (substl env t, stack) in stacklam [] c (Array.to_list v) let betazeta_appvect n c v = let rec stacklam n env t stack = if n = 0 then applist (substl env t, stack) else match kind_of_term t, stack with Lambda(_,_,c), arg::stacktl -> stacklam (n-1) (arg::env) c stacktl | LetIn(_,b,_,c), _ -> stacklam (n-1) (b::env) c stack | _ -> anomaly "Not enough lambda/let's" in stacklam n [] c (Array.to_list v) (********************************************************************) (* Conversion *) (********************************************************************) (* Conversion utility functions *) type 'a conversion_function = env -> 'a -> 'a -> Univ.constraints type 'a trans_conversion_function = transparent_state -> env -> 'a -> 'a -> Univ.constraints exception NotConvertible exception NotConvertibleVect of int let compare_stacks f fmind lft1 stk1 lft2 stk2 cuniv = let rec cmp_rec pstk1 pstk2 cuniv = match (pstk1,pstk2) with | (z1::s1, z2::s2) -> let cu1 = cmp_rec s1 s2 cuniv in (match (z1,z2) with | (Zlapp a1,Zlapp a2) -> array_fold_right2 f a1 a2 cu1 | (Zlfix(fx1,a1),Zlfix(fx2,a2)) -> let cu2 = f fx1 fx2 cu1 in cmp_rec a1 a2 cu2 | (Zlcase(ci1,l1,p1,br1),Zlcase(ci2,l2,p2,br2)) -> if not (fmind ci1.ci_ind ci2.ci_ind) then raise NotConvertible; let cu2 = f (l1,p1) (l2,p2) cu1 in array_fold_right2 (fun c1 c2 -> f (l1,c1) (l2,c2)) br1 br2 cu2 | _ -> assert false) | _ -> cuniv in if compare_stack_shape stk1 stk2 then cmp_rec (pure_stack lft1 stk1) (pure_stack lft2 stk2) cuniv else raise NotConvertible (* Convertibility of sorts *) (* The sort cumulativity is Prop <= Set <= Type 1 <= ... <= Type i <= ... and this holds whatever Set is predicative or impredicative *) type conv_pb = | CONV | CUMUL let sort_cmp pb s0 s1 cuniv = match (s0,s1) with | (Prop c1, Prop c2) when pb = CUMUL -> if c1 = Null or c2 = Pos then cuniv (* Prop <= Set *) else raise NotConvertible | (Prop c1, Prop c2) -> if c1 = c2 then cuniv else raise NotConvertible | (Prop c1, Type u) when pb = CUMUL -> assert (is_univ_variable u); cuniv | (Type u1, Type u2) -> assert (is_univ_variable u2); (match pb with | CONV -> enforce_eq u1 u2 cuniv | CUMUL -> enforce_geq u2 u1 cuniv) | (_, _) -> raise NotConvertible let conv_sort env s0 s1 = sort_cmp CONV s0 s1 empty_constraint let conv_sort_leq env s0 s1 = sort_cmp CUMUL s0 s1 empty_constraint let rec no_arg_available = function | [] -> true | Zupdate _ :: stk -> no_arg_available stk | Zshift _ :: stk -> no_arg_available stk | Zapp v :: stk -> Array.length v = 0 && no_arg_available stk | Zcase _ :: _ -> true | Zfix _ :: _ -> true let rec no_nth_arg_available n = function | [] -> true | Zupdate _ :: stk -> no_nth_arg_available n stk | Zshift _ :: stk -> no_nth_arg_available n stk | Zapp v :: stk -> let k = Array.length v in if n >= k then no_nth_arg_available (n-k) stk else false | Zcase _ :: _ -> true | Zfix _ :: _ -> true let rec no_case_available = function | [] -> true | Zupdate _ :: stk -> no_case_available stk | Zshift _ :: stk -> no_case_available stk | Zapp _ :: stk -> no_case_available stk | Zcase _ :: _ -> false | Zfix _ :: _ -> true let in_whnf (t,stk) = match fterm_of t with | (FLetIn _ | FCases _ | FApp _ | FCLOS _ | FLIFT _ | FCast _) -> false | FLambda _ -> no_arg_available stk | FConstruct _ -> no_case_available stk | FCoFix _ -> no_case_available stk | FFix(((ri,n),(_,_,_)),_) -> no_nth_arg_available ri.(n) stk | (FFlex _ | FProd _ | FEvar _ | FInd _ | FAtom _ | FRel _) -> true | FLOCKED -> assert false (* Conversion between [lft1]term1 and [lft2]term2 *) let rec ccnv cv_pb l2r infos lft1 lft2 term1 term2 cuniv = eqappr cv_pb l2r infos (lft1, (term1,[])) (lft2, (term2,[])) cuniv (* Conversion between [lft1](hd1 v1) and [lft2](hd2 v2) *) and eqappr cv_pb l2r infos (lft1,st1) (lft2,st2) cuniv = Util.check_for_interrupt (); (* First head reduce both terms *) let rec whd_both (t1,stk1) (t2,stk2) = let st1' = whd_stack (snd infos) t1 stk1 in let st2' = whd_stack (snd infos) t2 stk2 in (* Now, whd_stack on term2 might have modified st1 (due to sharing), and st1 might not be in whnf anymore. If so, we iterate ccnv. *) if in_whnf st1' then (st1',st2') else whd_both st1' st2' in let ((hd1,v1),(hd2,v2)) = whd_both st1 st2 in let appr1 = (lft1,(hd1,v1)) and appr2 = (lft2,(hd2,v2)) in (* compute the lifts that apply to the head of the term (hd1 and hd2) *) let el1 = el_stack lft1 v1 in let el2 = el_stack lft2 v2 in match (fterm_of hd1, fterm_of hd2) with (* case of leaves *) | (FAtom a1, FAtom a2) -> (match kind_of_term a1, kind_of_term a2 with | (Sort s1, Sort s2) -> if not (is_empty_stack v1 && is_empty_stack v2) then anomaly "conversion was given ill-typed terms (Sort)"; sort_cmp cv_pb s1 s2 cuniv | (Meta n, Meta m) -> if n=m then convert_stacks l2r infos lft1 lft2 v1 v2 cuniv else raise NotConvertible | _ -> raise NotConvertible) | (FEvar ((ev1,args1),env1), FEvar ((ev2,args2),env2)) -> if ev1=ev2 then let u1 = convert_stacks l2r infos lft1 lft2 v1 v2 cuniv in convert_vect l2r infos el1 el2 (Array.map (mk_clos env1) args1) (Array.map (mk_clos env2) args2) u1 else raise NotConvertible (* 2 index known to be bound to no constant *) | (FRel n, FRel m) -> if reloc_rel n el1 = reloc_rel m el2 then convert_stacks l2r infos lft1 lft2 v1 v2 cuniv else raise NotConvertible (* 2 constants, 2 local defined vars or 2 defined rels *) | (FFlex fl1, FFlex fl2) -> (try (* try first intensional equality *) if eq_table_key fl1 fl2 then convert_stacks l2r infos lft1 lft2 v1 v2 cuniv else raise NotConvertible with NotConvertible -> (* else the oracle tells which constant is to be expanded *) let (app1,app2) = if Conv_oracle.oracle_order l2r fl1 fl2 then match unfold_reference infos fl1 with | Some def1 -> ((lft1, whd_stack (snd infos) def1 v1), appr2) | None -> (match unfold_reference infos fl2 with | Some def2 -> (appr1, (lft2, whd_stack (snd infos) def2 v2)) | None -> raise NotConvertible) else match unfold_reference infos fl2 with | Some def2 -> (appr1, (lft2, whd_stack (snd infos) def2 v2)) | None -> (match unfold_reference infos fl1 with | Some def1 -> ((lft1, whd_stack (snd infos) def1 v1), appr2) | None -> raise NotConvertible) in eqappr cv_pb l2r infos app1 app2 cuniv) (* other constructors *) | (FLambda _, FLambda _) -> (* Inconsistency: we tolerate that v1, v2 contain shift and update but we throw them away *) if not (is_empty_stack v1 && is_empty_stack v2) then anomaly "conversion was given ill-typed terms (FLambda)"; let (_,ty1,bd1) = destFLambda mk_clos hd1 in let (_,ty2,bd2) = destFLambda mk_clos hd2 in let u1 = ccnv CONV l2r infos el1 el2 ty1 ty2 cuniv in ccnv CONV l2r infos (el_lift el1) (el_lift el2) bd1 bd2 u1 | (FProd (_,c1,c2), FProd (_,c'1,c'2)) -> if not (is_empty_stack v1 && is_empty_stack v2) then anomaly "conversion was given ill-typed terms (FProd)"; (* Luo's system *) let u1 = ccnv CONV l2r infos el1 el2 c1 c'1 cuniv in ccnv cv_pb l2r infos (el_lift el1) (el_lift el2) c2 c'2 u1 (* Eta-expansion on the fly *) | (FLambda _, _) -> if v1 <> [] then anomaly "conversion was given unreduced term (FLambda)"; let (_,_ty1,bd1) = destFLambda mk_clos hd1 in eqappr CONV l2r infos (el_lift lft1, (bd1, [])) (el_lift lft2, (hd2, eta_expand_stack v2)) cuniv | (_, FLambda _) -> if v2 <> [] then anomaly "conversion was given unreduced term (FLambda)"; let (_,_ty2,bd2) = destFLambda mk_clos hd2 in eqappr CONV l2r infos (el_lift lft1, (hd1, eta_expand_stack v1)) (el_lift lft2, (bd2, [])) cuniv (* only one constant, defined var or defined rel *) | (FFlex fl1, _) -> (match unfold_reference infos fl1 with | Some def1 -> eqappr cv_pb l2r infos (lft1, whd_stack (snd infos) def1 v1) appr2 cuniv | None -> raise NotConvertible) | (_, FFlex fl2) -> (match unfold_reference infos fl2 with | Some def2 -> eqappr cv_pb l2r infos appr1 (lft2, whd_stack (snd infos) def2 v2) cuniv | None -> raise NotConvertible) (* Inductive types: MutInd MutConstruct Fix Cofix *) | (FInd ind1, FInd ind2) -> if eq_ind ind1 ind2 then convert_stacks l2r infos lft1 lft2 v1 v2 cuniv else raise NotConvertible | (FConstruct (ind1,j1), FConstruct (ind2,j2)) -> if j1 = j2 && eq_ind ind1 ind2 then convert_stacks l2r infos lft1 lft2 v1 v2 cuniv else raise NotConvertible | (FFix ((op1,(_,tys1,cl1)),e1), FFix((op2,(_,tys2,cl2)),e2)) -> if op1 = op2 then let n = Array.length cl1 in let fty1 = Array.map (mk_clos e1) tys1 in let fty2 = Array.map (mk_clos e2) tys2 in let fcl1 = Array.map (mk_clos (subs_liftn n e1)) cl1 in let fcl2 = Array.map (mk_clos (subs_liftn n e2)) cl2 in let u1 = convert_vect l2r infos el1 el2 fty1 fty2 cuniv in let u2 = convert_vect l2r infos (el_liftn n el1) (el_liftn n el2) fcl1 fcl2 u1 in convert_stacks l2r infos lft1 lft2 v1 v2 u2 else raise NotConvertible | (FCoFix ((op1,(_,tys1,cl1)),e1), FCoFix((op2,(_,tys2,cl2)),e2)) -> if op1 = op2 then let n = Array.length cl1 in let fty1 = Array.map (mk_clos e1) tys1 in let fty2 = Array.map (mk_clos e2) tys2 in let fcl1 = Array.map (mk_clos (subs_liftn n e1)) cl1 in let fcl2 = Array.map (mk_clos (subs_liftn n e2)) cl2 in let u1 = convert_vect l2r infos el1 el2 fty1 fty2 cuniv in let u2 = convert_vect l2r infos (el_liftn n el1) (el_liftn n el2) fcl1 fcl2 u1 in convert_stacks l2r infos lft1 lft2 v1 v2 u2 else raise NotConvertible (* Should not happen because both (hd1,v1) and (hd2,v2) are in whnf *) | ( (FLetIn _, _) | (FCases _,_) | (FApp _,_) | (FCLOS _,_) | (FLIFT _,_) | (_, FLetIn _) | (_,FCases _) | (_,FApp _) | (_,FCLOS _) | (_,FLIFT _) | (FLOCKED,_) | (_,FLOCKED) ) -> assert false (* In all other cases, terms are not convertible *) | _ -> raise NotConvertible and convert_stacks l2r infos lft1 lft2 stk1 stk2 cuniv = compare_stacks (fun (l1,t1) (l2,t2) c -> ccnv CONV l2r infos l1 l2 t1 t2 c) (eq_ind) lft1 stk1 lft2 stk2 cuniv and convert_vect l2r infos lft1 lft2 v1 v2 cuniv = let lv1 = Array.length v1 in let lv2 = Array.length v2 in if lv1 = lv2 then let rec fold n univ = if n >= lv1 then univ else let u1 = ccnv CONV l2r infos lft1 lft2 v1.(n) v2.(n) univ in fold (n+1) u1 in fold 0 cuniv else raise NotConvertible let clos_fconv trans cv_pb l2r evars env t1 t2 = let infos = trans, create_clos_infos ~evars betaiotazeta env in ccnv cv_pb l2r infos el_id el_id (inject t1) (inject t2) empty_constraint let trans_fconv reds cv_pb l2r evars env t1 t2 = if eq_constr t1 t2 then empty_constraint else clos_fconv reds cv_pb l2r evars env t1 t2 let trans_conv_cmp ?(l2r=false) conv reds = trans_fconv reds conv l2r (fun _->None) let trans_conv ?(l2r=false) ?(evars=fun _->None) reds = trans_fconv reds CONV l2r evars let trans_conv_leq ?(l2r=false) ?(evars=fun _->None) reds = trans_fconv reds CUMUL l2r evars let fconv = trans_fconv (Idpred.full, Cpred.full) let conv_cmp ?(l2r=false) cv_pb = fconv cv_pb l2r (fun _->None) let conv ?(l2r=false) ?(evars=fun _->None) = fconv CONV l2r evars let conv_leq ?(l2r=false) ?(evars=fun _->None) = fconv CUMUL l2r evars let conv_leq_vecti ?(l2r=false) ?(evars=fun _->None) env v1 v2 = array_fold_left2_i (fun i c t1 t2 -> let c' = try conv_leq ~l2r ~evars env t1 t2 with NotConvertible -> raise (NotConvertibleVect i) in union_constraints c c') empty_constraint v1 v2 (* option for conversion *) let vm_conv = ref (fun cv_pb -> fconv cv_pb false (fun _->None)) let set_vm_conv f = vm_conv := f let vm_conv cv_pb env t1 t2 = try !vm_conv cv_pb env t1 t2 with Not_found | Invalid_argument _ -> (* If compilation fails, fall-back to closure conversion *) fconv cv_pb false (fun _->None) env t1 t2 let default_conv = ref (fun cv_pb ?(l2r=false) -> fconv cv_pb l2r (fun _->None)) let set_default_conv f = default_conv := f let default_conv cv_pb ?(l2r=false) env t1 t2 = try !default_conv ~l2r cv_pb env t1 t2 with Not_found | Invalid_argument _ -> (* If compilation fails, fall-back to closure conversion *) fconv cv_pb false (fun _->None) env t1 t2 let default_conv_leq = default_conv CUMUL (* let convleqkey = Profile.declare_profile "Kernel_reduction.conv_leq";; let conv_leq env t1 t2 = Profile.profile4 convleqkey conv_leq env t1 t2;; let convkey = Profile.declare_profile "Kernel_reduction.conv";; let conv env t1 t2 = Profile.profile4 convleqkey conv env t1 t2;; *) (********************************************************************) (* Special-Purpose Reduction *) (********************************************************************) (* pseudo-reduction rule: * [hnf_prod_app env s (Prod(_,B)) N --> B[N] * with an HNF on the first argument to produce a product. * if this does not work, then we use the string S as part of our * error message. *) let hnf_prod_app env t n = match kind_of_term (whd_betadeltaiota env t) with | Prod (_,_,b) -> subst1 n b | _ -> anomaly "hnf_prod_app: Need a product" let hnf_prod_applist env t nl = List.fold_left (hnf_prod_app env) t nl (* Dealing with arities *) let dest_prod env = let rec decrec env m c = let t = whd_betadeltaiota env c in match kind_of_term t with | Prod (n,a,c0) -> let d = (n,None,a) in decrec (push_rel d env) (add_rel_decl d m) c0 | _ -> m,t in decrec env empty_rel_context (* The same but preserving lets *) let dest_prod_assum env = let rec prodec_rec env l ty = let rty = whd_betadeltaiota_nolet env ty in match kind_of_term rty with | Prod (x,t,c) -> let d = (x,None,t) in prodec_rec (push_rel d env) (add_rel_decl d l) c | LetIn (x,b,t,c) -> let d = (x,Some b,t) in prodec_rec (push_rel d env) (add_rel_decl d l) c | Cast (c,_,_) -> prodec_rec env l c | _ -> l,rty in prodec_rec env empty_rel_context exception NotArity let dest_arity env c = let l, c = dest_prod_assum env c in match kind_of_term c with | Sort s -> l,s | _ -> raise NotArity let is_arity env c = try let _ = dest_arity env c in true with NotArity -> false coq-8.4pl2/kernel/doc.tex0000640000175000001440000000042506771170177014414 0ustar notinusers \newpage \section*{The Coq kernel} \ocwsection \label{kernel} This chapter describes the \Coq\ kernel, which is a type checker for the \CCI. The modules of the kernel are organized as follows. \bigskip \begin{center}\epsfig{file=kernel.dep.ps,width=\linewidth}\end{center} coq-8.4pl2/kernel/indtypes.ml0000640000175000001440000006123412010532755015306 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* idset | c::cl -> if Idset.mem c idset then raise (InductiveError (SameNamesConstructors c)) else check (Idset.add c idset) cl in check (* [mind_check_names mie] checks the names of an inductive types declaration, and raises the corresponding exceptions when two types or two constructors have the same name. *) let mind_check_names mie = let rec check indset cstset = function | [] -> () | ind::inds -> let id = ind.mind_entry_typename in let cl = ind.mind_entry_consnames in if Idset.mem id indset then raise (InductiveError (SameNamesTypes id)) else let cstset' = check_constructors_names cstset cl in check (Idset.add id indset) cstset' inds in check Idset.empty Idset.empty mie.mind_entry_inds (* The above verification is not necessary from the kernel point of vue since inductive and constructors are not referred to by their name, but only by the name of the inductive packet and an index. *) (************************************************************************) (************************************************************************) (* Typing the arities and constructor types *) let is_logic_type t = (t.utj_type = prop_sort) (* [infos] is a sequence of pair [islogic,issmall] for each type in the product of a constructor or arity *) let is_small infos = List.for_all (fun (logic,small) -> small) infos let is_logic_constr infos = List.for_all (fun (logic,small) -> logic) infos (* An inductive definition is a "unit" if it has only one constructor and that all arguments expected by this constructor are logical, this is the case for equality, conjunction of logical properties *) let is_unit constrsinfos = match constrsinfos with (* One info = One constructor *) | [constrinfos] -> is_logic_constr constrinfos | [] -> (* type without constructors *) true | _ -> false let rec infos_and_sort env t = let t = whd_betadeltaiota env t in match kind_of_term t with | Prod (name,c1,c2) -> let (varj,_) = infer_type env c1 in let env1 = Environ.push_rel (name,None,varj.utj_val) env in let logic = is_logic_type varj in let small = Term.is_small varj.utj_type in (logic,small) :: (infos_and_sort env1 c2) | _ when is_constructor_head t -> [] | _ -> (* don't fail if not positive, it is tested later *) [] let small_unit constrsinfos = let issmall = List.for_all is_small constrsinfos and isunit = is_unit constrsinfos in issmall, isunit (* Computing the levels of polymorphic inductive types For each inductive type of a block that is of level u_i, we have the constraints that u_i >= v_i where v_i is the type level of the types of the constructors of this inductive type. Each v_i depends of some of the u_i and of an extra (maybe non variable) universe, say w_i that summarize all the other constraints. Typically, for three inductive types, we could have u1,u2,u3,w1 <= u1 u1 w2 <= u2 u2,u3,w3 <= u3 From this system of inequations, we shall deduce w1,w2,w3 <= u1 w1,w2 <= u2 w1,w2,w3 <= u3 *) let extract_level (_,_,_,lc,lev) = (* Enforce that the level is not in Prop if more than two constructors *) if Array.length lc >= 2 then sup type0_univ lev else lev let inductive_levels arities inds = let levels = Array.map pi3 arities in let cstrs_levels = Array.map extract_level inds in (* Take the transitive closure of the system of constructors *) (* level constraints and remove the recursive dependencies *) solve_constraints_system levels cstrs_levels (* This (re)computes informations relevant to extraction and the sort of an arity or type constructor; we do not to recompute universes constraints *) let constraint_list_union = List.fold_left union_constraints empty_constraint let infer_constructor_packet env_ar_par params lc = (* type-check the constructors *) let jlc,cstl = List.split (List.map (infer_type env_ar_par) lc) in let cst = constraint_list_union cstl in let jlc = Array.of_list jlc in (* generalize the constructor over the parameters *) let lc'' = Array.map (fun j -> it_mkProd_or_LetIn j.utj_val params) jlc in (* compute the max of the sorts of the products of the constructor type *) let level = max_inductive_sort (Array.map (fun j -> j.utj_type) jlc) in (* compute *) let info = small_unit (List.map (infos_and_sort env_ar_par) lc) in (info,lc'',level,cst) (* Type-check an inductive definition. Does not check positivity conditions. *) let typecheck_inductive env mie = if mie.mind_entry_inds = [] then anomaly "empty inductive types declaration"; (* Check unicity of names *) mind_check_names mie; (* Params are typed-checked here *) let env_params, params, cst1 = infer_local_decls env mie.mind_entry_params in (* We first type arity of each inductive definition *) (* This allows to build the environment of arities and to share *) (* the set of constraints *) let cst, env_arities, rev_arity_list = List.fold_left (fun (cst,env_ar,l) ind -> (* Arities (without params) are typed-checked here *) let arity, cst2 = infer_type env_params ind.mind_entry_arity in (* We do not need to generate the universe of full_arity; if later, after the validation of the inductive definition, full_arity is used as argument or subject to cast, an upper universe will be generated *) let full_arity = it_mkProd_or_LetIn arity.utj_val params in let cst = union_constraints cst cst2 in let id = ind.mind_entry_typename in let env_ar' = push_rel (Name id, None, full_arity) (add_constraints cst2 env_ar) in let lev = (* Decide that if the conclusion is not explicitly Type *) (* then the inductive type is not polymorphic *) match kind_of_term ((strip_prod_assum arity.utj_val)) with | Sort (Type u) -> Some u | _ -> None in (cst,env_ar',(id,full_arity,lev)::l)) (cst1,env,[]) mie.mind_entry_inds in let arity_list = List.rev rev_arity_list in (* builds the typing context "Gamma, I1:A1, ... In:An, params" *) let env_ar_par = push_rel_context params (add_constraints cst1 env_arities) in (* Now, we type the constructors (without params) *) let inds,cst = List.fold_right2 (fun ind arity_data (inds,cst) -> let (info,lc',cstrs_univ,cst') = infer_constructor_packet env_ar_par params ind.mind_entry_lc in let consnames = ind.mind_entry_consnames in let ind' = (arity_data,consnames,info,lc',cstrs_univ) in (ind'::inds, union_constraints cst cst')) mie.mind_entry_inds arity_list ([],cst) in let inds = Array.of_list inds in let arities = Array.of_list arity_list in let param_ccls = List.fold_left (fun l (_,b,p) -> if b = None then (* Parameter contributes to polymorphism only if explicit Type *) let c = strip_prod_assum p in (* Add Type levels to the ordered list of parameters contributing to *) (* polymorphism unless there is aliasing (i.e. non distinct levels) *) match kind_of_term c with | Sort (Type u) -> if List.mem (Some u) l then None :: List.map (function Some v when u = v -> None | x -> x) l else Some u :: l | _ -> None :: l else l) [] params in (* Compute/check the sorts of the inductive types *) let ind_min_levels = inductive_levels arities inds in let inds, cst = array_fold_map2' (fun ((id,full_arity,ar_level),cn,info,lc,_) lev cst -> let sign, s = dest_arity env full_arity in let status,cst = match s with | Type u when ar_level <> None (* Explicitly polymorphic *) && no_upper_constraints u cst -> (* The polymorphic level is a function of the level of the *) (* conclusions of the parameters *) (* We enforce [u >= lev] in case [lev] has a strict upper *) (* constraints over [u] *) Inr (param_ccls, lev), enforce_geq u lev cst | Type u (* Not an explicit occurrence of Type *) -> Inl (info,full_arity,s), enforce_geq u lev cst | Prop Pos when engagement env <> Some ImpredicativeSet -> (* Predicative set: check that the content is indeed predicative *) if not (is_type0m_univ lev) & not (is_type0_univ lev) then raise (InductiveError LargeNonPropInductiveNotInType); Inl (info,full_arity,s), cst | Prop _ -> Inl (info,full_arity,s), cst in (id,cn,lc,(sign,status)),cst) inds ind_min_levels cst in (env_arities, params, inds, cst) (************************************************************************) (************************************************************************) (* Positivity *) type ill_formed_ind = | LocalNonPos of int | LocalNotEnoughArgs of int | LocalNotConstructor | LocalNonPar of int * int exception IllFormedInd of ill_formed_ind (* [mind_extract_params mie] extracts the params from an inductive types declaration, and checks that they are all present (and all the same) for all the given types. *) let mind_extract_params = decompose_prod_n_assum let explain_ind_err id ntyp env0 nbpar c nargs err = let (lpar,c') = mind_extract_params nbpar c in let env = push_rel_context lpar env0 in match err with | LocalNonPos kt -> raise (InductiveError (NonPos (env,c',mkRel (kt+nbpar)))) | LocalNotEnoughArgs kt -> raise (InductiveError (NotEnoughArgs (env,c',mkRel (kt+nbpar)))) | LocalNotConstructor -> raise (InductiveError (NotConstructor (env,id,c',mkRel (ntyp+nbpar),nbpar,nargs))) | LocalNonPar (n,l) -> raise (InductiveError (NonPar (env,c',n,mkRel (nbpar-n+1), mkRel (l+nbpar)))) let failwith_non_pos n ntypes c = for k = n to n + ntypes - 1 do if not (noccurn k c) then raise (IllFormedInd (LocalNonPos (k-n+1))) done let failwith_non_pos_vect n ntypes v = Array.iter (failwith_non_pos n ntypes) v; anomaly "failwith_non_pos_vect: some k in [n;n+ntypes-1] should occur" let failwith_non_pos_list n ntypes l = List.iter (failwith_non_pos n ntypes) l; anomaly "failwith_non_pos_list: some k in [n;n+ntypes-1] should occur" (* Check the inductive type is called with the expected parameters *) let check_correct_par (env,n,ntypes,_) hyps l largs = let nparams = rel_context_nhyps hyps in let largs = Array.of_list largs in if Array.length largs < nparams then raise (IllFormedInd (LocalNotEnoughArgs l)); let (lpar,largs') = array_chop nparams largs in let nhyps = List.length hyps in let rec check k index = function | [] -> () | (_,Some _,_)::hyps -> check k (index+1) hyps | _::hyps -> match kind_of_term (whd_betadeltaiota env lpar.(k)) with | Rel w when w = index -> check (k-1) (index+1) hyps | _ -> raise (IllFormedInd (LocalNonPar (k+1,l))) in check (nparams-1) (n-nhyps) hyps; if not (array_for_all (noccur_between n ntypes) largs') then failwith_non_pos_vect n ntypes largs' (* Computes the maximum number of recursive parameters : the first parameters which are constant in recursive arguments n is the current depth, nmr is the maximum number of possible recursive parameters *) let compute_rec_par (env,n,_,_) hyps nmr largs = if nmr = 0 then 0 else (* start from 0, hyps will be in reverse order *) let (lpar,_) = list_chop nmr largs in let rec find k index = function ([],_) -> nmr | (_,[]) -> assert false (* |hyps|>=nmr *) | (lp,(_,Some _,_)::hyps) -> find k (index-1) (lp,hyps) | (p::lp,_::hyps) -> ( match kind_of_term (whd_betadeltaiota env p) with | Rel w when w = index -> find (k+1) (index-1) (lp,hyps) | _ -> k) in find 0 (n-1) (lpar,List.rev hyps) let lambda_implicit_lift n a = let implicit_sort = mkType (make_univ (make_dirpath [id_of_string "implicit"], 0)) in let lambda_implicit a = mkLambda (Anonymous, implicit_sort, a) in iterate lambda_implicit n (lift n a) (* This removes global parameters of the inductive types in lc (for nested inductive types only ) *) let abstract_mind_lc env ntyps npars lc = if npars = 0 then lc else let make_abs = list_tabulate (function i -> lambda_implicit_lift npars (mkRel (i+1))) ntyps in Array.map (substl make_abs) lc (* [env] is the typing environment [n] is the dB of the last inductive type [ntypes] is the number of inductive types in the definition (i.e. range of inductives is [n; n+ntypes-1]) [lra] is the list of recursive tree of each variable *) let ienv_push_var (env, n, ntypes, lra) (x,a,ra) = (push_rel (x,None,a) env, n+1, ntypes, (Norec,ra)::lra) let ienv_push_inductive (env, n, ntypes, ra_env) (mi,lpar) = let auxntyp = 1 in let specif = lookup_mind_specif env mi in let env' = push_rel (Anonymous,None, hnf_prod_applist env (type_of_inductive env specif) lpar) env in let ra_env' = (Imbr mi,(Rtree.mk_rec_calls 1).(0)) :: List.map (fun (r,t) -> (r,Rtree.lift 1 t)) ra_env in (* New index of the inductive types *) let newidx = n + auxntyp in (env', newidx, ntypes, ra_env') let rec ienv_decompose_prod (env,_,_,_ as ienv) n c = if n=0 then (ienv,c) else let c' = whd_betadeltaiota env c in match kind_of_term c' with Prod(na,a,b) -> let ienv' = ienv_push_var ienv (na,a,mk_norec) in ienv_decompose_prod ienv' (n-1) b | _ -> assert false let array_min nmr a = if nmr = 0 then 0 else Array.fold_left (fun k (nmri,_) -> min k nmri) nmr a (* The recursive function that checks positivity and builds the list of recursive arguments *) let check_positivity_one (env,_,ntypes,_ as ienv) hyps (_,i as ind) nargs lcnames indlc = let lparams = rel_context_length hyps in let nmr = rel_context_nhyps hyps in (* Checking the (strict) positivity of a constructor argument type [c] *) let rec check_pos (env, n, ntypes, ra_env as ienv) nmr c = let x,largs = decompose_app (whd_betadeltaiota env c) in match kind_of_term x with | Prod (na,b,d) -> assert (largs = []); (match weaker_noccur_between env n ntypes b with None -> failwith_non_pos_list n ntypes [b] | Some b -> check_pos (ienv_push_var ienv (na, b, mk_norec)) nmr d) | Rel k -> (try let (ra,rarg) = List.nth ra_env (k-1) in let nmr1 = (match ra with Mrec _ -> compute_rec_par ienv hyps nmr largs | _ -> nmr) in if not (List.for_all (noccur_between n ntypes) largs) then failwith_non_pos_list n ntypes largs else (nmr1,rarg) with Failure _ | Invalid_argument _ -> (nmr,mk_norec)) | Ind ind_kn -> (* If the inductive type being defined appears in a parameter, then we have a nested indtype *) if List.for_all (noccur_between n ntypes) largs then (nmr,mk_norec) else check_positive_nested ienv nmr (ind_kn, largs) | err -> if noccur_between n ntypes x && List.for_all (noccur_between n ntypes) largs then (nmr,mk_norec) else failwith_non_pos_list n ntypes (x::largs) (* accesses to the environment are not factorised, but is it worth? *) and check_positive_nested (env,n,ntypes,ra_env as ienv) nmr (mi, largs) = let (mib,mip) = lookup_mind_specif env mi in let auxnpar = mib.mind_nparams_rec in let nonrecpar = mib.mind_nparams - auxnpar in let (lpar,auxlargs) = try list_chop auxnpar largs with Failure _ -> raise (IllFormedInd (LocalNonPos n)) in (* If the inductive appears in the args (non params) then the definition is not positive. *) if not (List.for_all (noccur_between n ntypes) auxlargs) then failwith_non_pos_list n ntypes auxlargs; (* We do not deal with imbricated mutual inductive types *) let auxntyp = mib.mind_ntypes in if auxntyp <> 1 then raise (IllFormedInd (LocalNonPos n)); (* The nested inductive type with parameters removed *) let auxlcvect = abstract_mind_lc env auxntyp auxnpar mip.mind_nf_lc in (* Extends the environment with a variable corresponding to the inductive def *) let (env',_,_,_ as ienv') = ienv_push_inductive ienv (mi,lpar) in (* Parameters expressed in env' *) let lpar' = List.map (lift auxntyp) lpar in let irecargs_nmr = (* fails if the inductive type occurs non positively *) (* with recursive parameters substituted *) Array.map (function c -> let c' = hnf_prod_applist env' c lpar' in (* skip non-recursive parameters *) let (ienv',c') = ienv_decompose_prod ienv' nonrecpar c' in check_constructors ienv' false nmr c') auxlcvect in let irecargs = Array.map snd irecargs_nmr and nmr' = array_min nmr irecargs_nmr in (nmr',(Rtree.mk_rec [|mk_paths (Imbr mi) irecargs|]).(0)) (* check the inductive types occur positively in the products of C, if check_head=true, also check the head corresponds to a constructor of the ith type *) and check_constructors ienv check_head nmr c = let rec check_constr_rec (env,n,ntypes,ra_env as ienv) nmr lrec c = let x,largs = decompose_app (whd_betadeltaiota env c) in match kind_of_term x with | Prod (na,b,d) -> assert (largs = []); let nmr',recarg = check_pos ienv nmr b in let ienv' = ienv_push_var ienv (na,b,mk_norec) in check_constr_rec ienv' nmr' (recarg::lrec) d | hd -> if check_head then if hd = Rel (n+ntypes-i-1) then check_correct_par ienv hyps (ntypes-i) largs else raise (IllFormedInd LocalNotConstructor) else if not (List.for_all (noccur_between n ntypes) largs) then failwith_non_pos_list n ntypes largs; (nmr,List.rev lrec) in check_constr_rec ienv nmr [] c in let irecargs_nmr = array_map2 (fun id c -> let _,rawc = mind_extract_params lparams c in try check_constructors ienv true nmr rawc with IllFormedInd err -> explain_ind_err id (ntypes-i) env lparams c nargs err) (Array.of_list lcnames) indlc in let irecargs = Array.map snd irecargs_nmr and nmr' = array_min nmr irecargs_nmr in (nmr', mk_paths (Mrec ind) irecargs) let check_positivity kn env_ar params inds = let ntypes = Array.length inds in let rc = Array.mapi (fun j t -> (Mrec (kn,j),t)) (Rtree.mk_rec_calls ntypes) in let lra_ind = List.rev (Array.to_list rc) in let lparams = rel_context_length params in let nmr = rel_context_nhyps params in let check_one i (_,lcnames,lc,(sign,_)) = let ra_env = list_tabulate (fun _ -> (Norec,mk_norec)) lparams @ lra_ind in let ienv = (env_ar, 1+lparams, ntypes, ra_env) in let nargs = rel_context_nhyps sign - nmr in check_positivity_one ienv params (kn,i) nargs lcnames lc in let irecargs_nmr = Array.mapi check_one inds in let irecargs = Array.map snd irecargs_nmr and nmr' = array_min nmr irecargs_nmr in (nmr',Rtree.mk_rec irecargs) (************************************************************************) (************************************************************************) (* Build the inductive packet *) (* Allowed eliminations *) let all_sorts = [InProp;InSet;InType] let small_sorts = [InProp;InSet] let logical_sorts = [InProp] let allowed_sorts issmall isunit s = match family_of_sort s with (* Type: all elimination allowed *) | InType -> all_sorts (* Small Set is predicative: all elimination allowed *) | InSet when issmall -> all_sorts (* Large Set is necessarily impredicative: forbids large elimination *) | InSet -> small_sorts (* Unitary/empty Prop: elimination to all sorts are realizable *) (* unless the type is large. If it is large, forbids large elimination *) (* which otherwise allows to simulate the inconsistent system Type:Type *) | InProp when isunit -> if issmall then all_sorts else small_sorts (* Other propositions: elimination only to Prop *) | InProp -> logical_sorts let fold_inductive_blocks f = Array.fold_left (fun acc (_,_,lc,(arsign,_)) -> f (Array.fold_left f acc lc) (it_mkProd_or_LetIn (* dummy *) mkSet arsign)) let used_section_variables env inds = let ids = fold_inductive_blocks (fun l c -> Idset.union (Environ.global_vars_set env c) l) Idset.empty inds in keep_hyps env ids let build_inductive env env_ar params isrecord isfinite inds nmr recargs cst = let ntypes = Array.length inds in (* Compute the set of used section variables *) let hyps = used_section_variables env inds in let nparamargs = rel_context_nhyps params in let nparamdecls = rel_context_length params in (* Check one inductive *) let build_one_packet (id,cnames,lc,(ar_sign,ar_kind)) recarg = (* Type of constructors in normal form *) let splayed_lc = Array.map (dest_prod_assum env_ar) lc in let nf_lc = Array.map (fun (d,b) -> it_mkProd_or_LetIn b d) splayed_lc in let consnrealargs = Array.map (fun (d,_) -> rel_context_length d - rel_context_length params) splayed_lc in (* Elimination sorts *) let arkind,kelim = match ar_kind with | Inr (param_levels,lev) -> Polymorphic { poly_param_levels = param_levels; poly_level = lev; }, all_sorts | Inl ((issmall,isunit),ar,s) -> let kelim = allowed_sorts issmall isunit s in Monomorphic { mind_user_arity = ar; mind_sort = s; }, kelim in (* Assigning VM tags to constructors *) let nconst, nblock = ref 0, ref 0 in let transf num = let arity = List.length (dest_subterms recarg).(num) in if arity = 0 then let p = (!nconst, 0) in incr nconst; p else let p = (!nblock + 1, arity) in incr nblock; p (* les tag des constructeur constant commence a 0, les tag des constructeur non constant a 1 (0 => accumulator) *) in let rtbl = Array.init (List.length cnames) transf in (* Build the inductive packet *) { mind_typename = id; mind_arity = arkind; mind_arity_ctxt = ar_sign; mind_nrealargs = rel_context_nhyps ar_sign - nparamargs; mind_nrealargs_ctxt = rel_context_length ar_sign - nparamdecls; mind_kelim = kelim; mind_consnames = Array.of_list cnames; mind_consnrealdecls = consnrealargs; mind_user_lc = lc; mind_nf_lc = nf_lc; mind_recargs = recarg; mind_nb_constant = !nconst; mind_nb_args = !nblock; mind_reloc_tbl = rtbl; } in let packets = array_map2 build_one_packet inds recargs in (* Build the mutual inductive *) { mind_record = isrecord; mind_ntypes = ntypes; mind_finite = isfinite; mind_hyps = hyps; mind_nparams = nparamargs; mind_nparams_rec = nmr; mind_params_ctxt = params; mind_packets = packets; mind_constraints = cst } (************************************************************************) (************************************************************************) let check_inductive env kn mie = (* First type-check the inductive definition *) let (env_ar, params, inds, cst) = typecheck_inductive env mie in (* Then check positivity conditions *) let (nmr,recargs) = check_positivity kn env_ar params inds in (* Build the inductive packets *) build_inductive env env_ar params mie.mind_entry_record mie.mind_entry_finite inds nmr recargs cst coq-8.4pl2/kernel/sign.ml0000640000175000001440000000557212010532755014412 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* decl | _ :: sign -> lookup_named id sign | [] -> raise Not_found let named_context_length = List.length let named_context_equal = list_equal eq_named_declaration let vars_of_named_context = List.map (fun (id,_,_) -> id) let instance_from_named_context sign = let rec inst_rec = function | (id,None,_) :: sign -> mkVar id :: inst_rec sign | _ :: sign -> inst_rec sign | [] -> [] in Array.of_list (inst_rec sign) let fold_named_context f l ~init = List.fold_right f l init let fold_named_context_reverse f ~init l = List.fold_left f init l (*s Signatures of ordered section variables *) type section_context = named_context let fold_rel_context f l ~init:x = List.fold_right f l x let fold_rel_context_reverse f ~init:x l = List.fold_left f x l let map_context f l = let map_decl (n, body_o, typ as decl) = let body_o' = Option.smartmap f body_o in let typ' = f typ in if body_o' == body_o && typ' == typ then decl else (n, body_o', typ') in list_smartmap map_decl l let map_rel_context = map_context let map_named_context = map_context let iter_rel_context f = List.iter (fun (_,b,t) -> f t; Option.iter f b) let iter_named_context f = List.iter (fun (_,b,t) -> f t; Option.iter f b) (* Push named declarations on top of a rel context *) (* Bizarre. Should be avoided. *) let push_named_to_rel_context hyps ctxt = let rec push = function | (id,b,t) :: l -> let s, hyps = push l in let d = (Name id, Option.map (subst_vars s) b, subst_vars s t) in id::s, d::hyps | [] -> [],[] in let s, hyps = push hyps in let rec subst = function | d :: l -> let n, ctxt = subst l in (n+1), (map_rel_declaration (substn_vars n s) d)::ctxt | [] -> 1, hyps in snd (subst ctxt) coq-8.4pl2/kernel/names.ml0000640000175000001440000003337112121725465014560 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if f a b then raise Finded) m ; false with |Finded -> true let singleton k v = add k v empty end module Idpred = Predicate.Make(IdOrdered) (** {6 Various types based on identifiers } *) type name = Name of identifier | Anonymous type variable = identifier (** {6 Directory paths = section names paths } *) (** Dirpaths are lists of module identifiers. The actual representation is reversed to optimise sharing: Coq.A.B is ["B";"A";"Coq"] *) type module_ident = identifier type dir_path = module_ident list module ModIdmap = Idmap let make_dirpath x = x let repr_dirpath x = x let empty_dirpath = [] (** Printing of directory paths as ["coq_root.module.submodule"] *) let string_of_dirpath = function | [] -> "<>" | sl -> String.concat "." (List.map string_of_id (List.rev sl)) (** {6 Unique names for bound modules } *) let u_number = ref 0 type uniq_ident = int * identifier * dir_path let make_uid dir s = incr u_number;(!u_number,s,dir) let debug_string_of_uid (i,s,p) = "<"(*^string_of_dirpath p ^"#"^*) ^ s ^"#"^ string_of_int i^">" let string_of_uid (i,s,p) = string_of_dirpath p ^"."^s module Umap = Map.Make(struct type t = uniq_ident let compare = Pervasives.compare end) type mod_bound_id = uniq_ident let make_mbid = make_uid let repr_mbid (n, id, dp) = (n, id, dp) let debug_string_of_mbid = debug_string_of_uid let string_of_mbid = string_of_uid let id_of_mbid (_,s,_) = s (** {6 Names of structure elements } *) type label = identifier let mk_label = id_of_string let string_of_label = string_of_id let pr_label l = str (string_of_label l) let id_of_label l = l let label_of_id id = id module Labset = Idset module Labmap = Idmap (** {6 The module part of the kernel name } *) type module_path = | MPfile of dir_path | MPbound of mod_bound_id | MPdot of module_path * label let rec check_bound_mp = function | MPbound _ -> true | MPdot(mp,_) ->check_bound_mp mp | _ -> false let rec string_of_mp = function | MPfile sl -> string_of_dirpath sl | MPbound uid -> string_of_uid uid | MPdot (mp,l) -> string_of_mp mp ^ "." ^ string_of_label l (** we compare labels first if both are MPdots *) let rec mp_ord mp1 mp2 = match (mp1,mp2) with MPdot(mp1,l1), MPdot(mp2,l2) -> let c = Pervasives.compare l1 l2 in if c<>0 then c else mp_ord mp1 mp2 | _,_ -> Pervasives.compare mp1 mp2 module MPord = struct type t = module_path let compare = mp_ord end module MPset = Set.Make(MPord) module MPmap = Map.Make(MPord) let default_module_name = "If you see this, it's a bug" let initial_dir = make_dirpath [default_module_name] let initial_path = MPfile initial_dir (** {6 Kernel names } *) type kernel_name = module_path * dir_path * label let make_kn mp dir l = (mp,dir,l) let repr_kn kn = kn let modpath kn = let mp,_,_ = repr_kn kn in mp let label kn = let _,_,l = repr_kn kn in l let string_of_kn (mp,dir,l) = let str_dir = if dir = [] then "." else "#" ^ string_of_dirpath dir ^ "#" in string_of_mp mp ^ str_dir ^ string_of_label l let pr_kn kn = str (string_of_kn kn) let kn_ord kn1 kn2 = let mp1,dir1,l1 = kn1 in let mp2,dir2,l2 = kn2 in let c = Pervasives.compare l1 l2 in if c <> 0 then c else let c = Pervasives.compare dir1 dir2 in if c<>0 then c else MPord.compare mp1 mp2 module KNord = struct type t = kernel_name let compare = kn_ord end module KNmap = Map.Make(KNord) module KNpred = Predicate.Make(KNord) module KNset = Set.Make(KNord) (** {6 Constant names } *) (** a constant name is a kernel name couple (kn1,kn2) where kn1 corresponds to the name used at toplevel (i.e. what the user see) and kn2 corresponds to the canonical kernel name i.e. in the environment we have kn1 \rhd_{\delta}^* kn2 \rhd_{\delta} t *) type constant = kernel_name*kernel_name let constant_of_kn kn = (kn,kn) let constant_of_kn_equiv kn1 kn2 = (kn1,kn2) let make_con mp dir l = constant_of_kn (mp,dir,l) let make_con_equiv mp1 mp2 dir l = if mp1 == mp2 then make_con mp1 dir l else ((mp1,dir,l),(mp2,dir,l)) let canonical_con con = snd con let user_con con = fst con let repr_con con = fst con let eq_constant (_,kn1) (_,kn2) = kn1=kn2 let con_label con = label (fst con) let con_modpath con = modpath (fst con) let string_of_con con = string_of_kn (fst con) let pr_con con = str (string_of_con con) let debug_string_of_con con = "(" ^ string_of_kn (fst con) ^ "," ^ string_of_kn (snd con) ^ ")" let debug_pr_con con = str (debug_string_of_con con) let con_with_label ((mp1,dp1,l1),(mp2,dp2,l2) as con) lbl = if lbl = l1 && lbl = l2 then con else ((mp1,dp1,lbl),(mp2,dp2,lbl)) (** For the environment we distinguish constants by their user part*) module User_ord = struct type t = kernel_name*kernel_name let compare x y= kn_ord (fst x) (fst y) end (** For other uses (ex: non-logical things) it is enough to deal with the canonical part *) module Canonical_ord = struct type t = kernel_name*kernel_name let compare x y= kn_ord (snd x) (snd y) end module Cmap = Map.Make(Canonical_ord) module Cmap_env = Map.Make(User_ord) module Cpred = Predicate.Make(Canonical_ord) module Cset = Set.Make(Canonical_ord) module Cset_env = Set.Make(User_ord) (** {6 Names of mutual inductive types } *) (** The same thing is done for mutual inductive names it replaces also the old mind_equiv field of mutual inductive types *) (** Beware: first inductive has index 0 *) (** Beware: first constructor has index 1 *) type mutual_inductive = kernel_name*kernel_name type inductive = mutual_inductive * int type constructor = inductive * int let mind_modpath mind = modpath (fst mind) let ind_modpath ind = mind_modpath (fst ind) let constr_modpath c = ind_modpath (fst c) let mind_of_kn kn = (kn,kn) let mind_of_kn_equiv kn1 kn2 = (kn1,kn2) let make_mind mp dir l = mind_of_kn (mp,dir,l) let make_mind_equiv mp1 mp2 dir l = if mp1 == mp2 then make_mind mp1 dir l else ((mp1,dir,l),(mp2,dir,l)) let canonical_mind mind = snd mind let user_mind mind = fst mind let repr_mind mind = fst mind let mind_label mind= label (fst mind) let eq_mind (_,kn1) (_,kn2) = kn1=kn2 let string_of_mind mind = string_of_kn (fst mind) let pr_mind mind = str (string_of_mind mind) let debug_string_of_mind mind = "(" ^ string_of_kn (fst mind) ^ "," ^ string_of_kn (snd mind) ^ ")" let debug_pr_mind con = str (debug_string_of_mind con) let ith_mutual_inductive (kn,_) i = (kn,i) let ith_constructor_of_inductive ind i = (ind,i) let inductive_of_constructor (ind,i) = ind let index_of_constructor (ind,i) = i let eq_ind (kn1,i1) (kn2,i2) = i1=i2&&eq_mind kn1 kn2 let eq_constructor (kn1,i1) (kn2,i2) = i1=i2&&eq_ind kn1 kn2 module Mindmap = Map.Make(Canonical_ord) module Mindset = Set.Make(Canonical_ord) module Mindmap_env = Map.Make(User_ord) module InductiveOrdered = struct type t = inductive let compare (spx,ix) (spy,iy) = let c = ix - iy in if c = 0 then Canonical_ord.compare spx spy else c end module InductiveOrdered_env = struct type t = inductive let compare (spx,ix) (spy,iy) = let c = ix - iy in if c = 0 then User_ord.compare spx spy else c end module Indmap = Map.Make(InductiveOrdered) module Indmap_env = Map.Make(InductiveOrdered_env) module ConstructorOrdered = struct type t = constructor let compare (indx,ix) (indy,iy) = let c = ix - iy in if c = 0 then InductiveOrdered.compare indx indy else c end module ConstructorOrdered_env = struct type t = constructor let compare (indx,ix) (indy,iy) = let c = ix - iy in if c = 0 then InductiveOrdered_env.compare indx indy else c end module Constrmap = Map.Make(ConstructorOrdered) module Constrmap_env = Map.Make(ConstructorOrdered_env) (* Better to have it here that in closure, since used in grammar.cma *) type evaluable_global_reference = | EvalVarRef of identifier | EvalConstRef of constant let eq_egr e1 e2 = match e1,e2 with EvalConstRef con1, EvalConstRef con2 -> eq_constant con1 con2 | _,_ -> e1 = e2 (** {6 Hash-consing of name objects } *) module Hname = Hashcons.Make( struct type t = name type u = identifier -> identifier let hash_sub hident = function | Name id -> Name (hident id) | n -> n let equal n1 n2 = match (n1,n2) with | (Name id1, Name id2) -> id1 == id2 | (Anonymous,Anonymous) -> true | _ -> false let hash = Hashtbl.hash end) module Hdir = Hashcons.Make( struct type t = dir_path type u = identifier -> identifier let hash_sub hident d = list_smartmap hident d let rec equal d1 d2 = match (d1,d2) with | [],[] -> true | id1::d1,id2::d2 -> id1 == id2 & equal d1 d2 | _ -> false let hash = Hashtbl.hash end) module Huniqid = Hashcons.Make( struct type t = uniq_ident type u = (identifier -> identifier) * (dir_path -> dir_path) let hash_sub (hid,hdir) (n,s,dir) = (n,hid s,hdir dir) let equal (n1,s1,dir1) (n2,s2,dir2) = n1 = n2 && s1 == s2 && dir1 == dir2 let hash = Hashtbl.hash end) module Hmod = Hashcons.Make( struct type t = module_path type u = (dir_path -> dir_path) * (uniq_ident -> uniq_ident) * (string -> string) let rec hash_sub (hdir,huniqid,hstr as hfuns) = function | MPfile dir -> MPfile (hdir dir) | MPbound m -> MPbound (huniqid m) | MPdot (md,l) -> MPdot (hash_sub hfuns md, hstr l) let rec equal d1 d2 = match (d1,d2) with | MPfile dir1, MPfile dir2 -> dir1 == dir2 | MPbound m1, MPbound m2 -> m1 == m2 | MPdot (mod1,l1), MPdot (mod2,l2) -> l1 == l2 && equal mod1 mod2 | _ -> false let hash = Hashtbl.hash end) module Hkn = Hashcons.Make( struct type t = kernel_name type u = (module_path -> module_path) * (dir_path -> dir_path) * (string -> string) let hash_sub (hmod,hdir,hstr) (md,dir,l) = (hmod md, hdir dir, hstr l) let equal (mod1,dir1,l1) (mod2,dir2,l2) = mod1 == mod2 && dir1 == dir2 && l1 == l2 let hash = Hashtbl.hash end) (** For [constant] and [mutual_inductive], we discriminate only on the user part : having the same user part implies having the same canonical part (invariant of the system). *) module Hcn = Hashcons.Make( struct type t = kernel_name*kernel_name type u = kernel_name -> kernel_name let hash_sub hkn (user,can) = (hkn user, hkn can) let equal (user1,_) (user2,_) = user1 == user2 let hash (user,_) = Hashtbl.hash user end) module Hind = Hashcons.Make( struct type t = inductive type u = mutual_inductive -> mutual_inductive let hash_sub hmind (mind, i) = (hmind mind, i) let equal (mind1,i1) (mind2,i2) = mind1 == mind2 && i1 = i2 let hash = Hashtbl.hash end) module Hconstruct = Hashcons.Make( struct type t = constructor type u = inductive -> inductive let hash_sub hind (ind, j) = (hind ind, j) let equal (ind1,j1) (ind2,j2) = ind1 == ind2 && j1 = j2 let hash = Hashtbl.hash end) let hcons_string = Hashcons.simple_hcons Hashcons.Hstring.f () let hcons_ident = hcons_string let hcons_name = Hashcons.simple_hcons Hname.f hcons_ident let hcons_dirpath = Hashcons.simple_hcons Hdir.f hcons_ident let hcons_uid = Hashcons.simple_hcons Huniqid.f (hcons_ident,hcons_dirpath) let hcons_mp = Hashcons.simple_hcons Hmod.f (hcons_dirpath,hcons_uid,hcons_string) let hcons_kn = Hashcons.simple_hcons Hkn.f (hcons_mp,hcons_dirpath,hcons_string) let hcons_con = Hashcons.simple_hcons Hcn.f hcons_kn let hcons_mind = Hashcons.simple_hcons Hcn.f hcons_kn let hcons_ind = Hashcons.simple_hcons Hind.f hcons_mind let hcons_construct = Hashcons.simple_hcons Hconstruct.f hcons_ind (*******) type transparent_state = Idpred.t * Cpred.t let empty_transparent_state = (Idpred.empty, Cpred.empty) let full_transparent_state = (Idpred.full, Cpred.full) let var_full_transparent_state = (Idpred.full, Cpred.empty) let cst_full_transparent_state = (Idpred.empty, Cpred.full) type 'a tableKey = | ConstKey of constant | VarKey of identifier | RelKey of 'a type inv_rel_key = int (* index in the [rel_context] part of environment starting by the end, {\em inverse} of de Bruijn indice *) type id_key = inv_rel_key tableKey let eq_id_key ik1 ik2 = match ik1,ik2 with ConstKey (_,kn1), ConstKey (_,kn2) -> kn1=kn2 | a,b -> a=b let eq_con_chk (kn1,_) (kn2,_) = kn1=kn2 let eq_mind_chk (kn1,_) (kn2,_) = kn1=kn2 let eq_ind_chk (kn1,i1) (kn2,i2) = i1=i2&&eq_mind_chk kn1 kn2 coq-8.4pl2/kernel/typeops.ml0000640000175000001440000004003012010532755015141 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let c' = try default_conv CUMUL env t1 t2 with NotConvertible -> raise (NotConvertibleVect i) in union_constraints c c') empty_constraint v1 v2 (* This should be a type (a priori without intension to be an assumption) *) let type_judgment env j = match kind_of_term(whd_betadeltaiota env j.uj_type) with | Sort s -> {utj_val = j.uj_val; utj_type = s } | _ -> error_not_type env j (* This should be a type intended to be assumed. The error message is *) (* not as useful as for [type_judgment]. *) let assumption_of_judgment env j = try (type_judgment env j).utj_val with TypeError _ -> error_assumption env j (************************************************) (* Incremental typing rules: builds a typing judgement given the *) (* judgements for the subterms. *) (*s Type of sorts *) (* Prop and Set *) let judge_of_prop = { uj_val = mkProp; uj_type = mkSort type1_sort } let judge_of_set = { uj_val = mkSet; uj_type = mkSort type1_sort } let judge_of_prop_contents = function | Null -> judge_of_prop | Pos -> judge_of_set (* Type of Type(i). *) let judge_of_type u = let uu = super u in { uj_val = mkType u; uj_type = mkType uu } (*s Type of a de Bruijn index. *) let judge_of_relative env n = try let (_,_,typ) = lookup_rel n env in { uj_val = mkRel n; uj_type = lift n typ } with Not_found -> error_unbound_rel env n (* Type of variables *) let judge_of_variable env id = try let ty = named_type id env in make_judge (mkVar id) ty with Not_found -> error_unbound_var env id (* Management of context of variables. *) (* Checks if a context of variable can be instantiated by the variables of the current env *) (* TODO: check order? *) let rec check_hyps_inclusion env sign = Sign.fold_named_context (fun (id,_,ty1) () -> let ty2 = named_type id env in if not (eq_constr ty2 ty1) then error "types do not match") sign ~init:() let check_args env c hyps = try check_hyps_inclusion env hyps with UserError _ | Not_found -> error_reference_variables env c (* Checks if the given context of variables [hyps] is included in the current context of [env]. *) (* let check_hyps id env hyps = let hyps' = named_context env in if not (hyps_inclusion env hyps hyps') then error_reference_variables env id *) (* Instantiation of terms on real arguments. *) (* Make a type polymorphic if an arity *) let extract_level env p = let _,c = dest_prod_assum env p in match kind_of_term c with Sort (Type u) -> Some u | _ -> None let extract_context_levels env = List.fold_left (fun l (_,b,p) -> if b=None then extract_level env p::l else l) [] let make_polymorphic_if_constant_for_ind env {uj_val = c; uj_type = t} = let params, ccl = dest_prod_assum env t in match kind_of_term ccl with | Sort (Type u) when isInd (fst (decompose_app (whd_betadeltaiota env c))) -> let param_ccls = extract_context_levels env params in let s = { poly_param_levels = param_ccls; poly_level = u} in PolymorphicArity (params,s) | _ -> NonPolymorphicType t (* Type of constants *) let type_of_constant_knowing_parameters env t paramtyps = match t with | NonPolymorphicType t -> t | PolymorphicArity (sign,ar) -> let ctx = List.rev sign in let ctx,s = instantiate_universes env ctx ar paramtyps in mkArity (List.rev ctx,s) let type_of_constant_type env t = type_of_constant_knowing_parameters env t [||] let type_of_constant env cst = type_of_constant_type env (constant_type env cst) let judge_of_constant_knowing_parameters env cst jl = let c = mkConst cst in let cb = lookup_constant cst env in let _ = check_args env c cb.const_hyps in let paramstyp = Array.map (fun j -> j.uj_type) jl in let t = type_of_constant_knowing_parameters env cb.const_type paramstyp in make_judge c t let judge_of_constant env cst = judge_of_constant_knowing_parameters env cst [||] (* Type of a lambda-abstraction. *) (* [judge_of_abstraction env name var j] implements the rule env, name:typ |- j.uj_val:j.uj_type env, |- (name:typ)j.uj_type : s ----------------------------------------------------------------------- env |- [name:typ]j.uj_val : (name:typ)j.uj_type Since all products are defined in the Calculus of Inductive Constructions and no upper constraint exists on the sort $s$, we don't need to compute $s$ *) let judge_of_abstraction env name var j = { uj_val = mkLambda (name, var.utj_val, j.uj_val); uj_type = mkProd (name, var.utj_val, j.uj_type) } (* Type of let-in. *) let judge_of_letin env name defj typj j = { uj_val = mkLetIn (name, defj.uj_val, typj.utj_val, j.uj_val) ; uj_type = subst1 defj.uj_val j.uj_type } (* Type of an application. *) let judge_of_apply env funj argjv = let rec apply_rec n typ cst = function | [] -> { uj_val = mkApp (j_val funj, Array.map j_val argjv); uj_type = typ }, cst | hj::restjl -> (match kind_of_term (whd_betadeltaiota env typ) with | Prod (_,c1,c2) -> (try let c = conv_leq false env hj.uj_type c1 in let cst' = union_constraints cst c in apply_rec (n+1) (subst1 hj.uj_val c2) cst' restjl with NotConvertible -> error_cant_apply_bad_type env (n,c1, hj.uj_type) funj argjv) | _ -> error_cant_apply_not_functional env funj argjv) in apply_rec 1 funj.uj_type empty_constraint (Array.to_list argjv) (* Type of product *) let sort_of_product env domsort rangsort = match (domsort, rangsort) with (* Product rule (s,Prop,Prop) *) | (_, Prop Null) -> rangsort (* Product rule (Prop/Set,Set,Set) *) | (Prop _, Prop Pos) -> rangsort (* Product rule (Type,Set,?) *) | (Type u1, Prop Pos) -> if engagement env = Some ImpredicativeSet then (* Rule is (Type,Set,Set) in the Set-impredicative calculus *) rangsort else (* Rule is (Type_i,Set,Type_i) in the Set-predicative calculus *) Type (sup u1 type0_univ) (* Product rule (Prop,Type_i,Type_i) *) | (Prop Pos, Type u2) -> Type (sup type0_univ u2) (* Product rule (Prop,Type_i,Type_i) *) | (Prop Null, Type _) -> rangsort (* Product rule (Type_i,Type_i,Type_i) *) | (Type u1, Type u2) -> Type (sup u1 u2) (* [judge_of_product env name (typ1,s1) (typ2,s2)] implements the rule env |- typ1:s1 env, name:typ1 |- typ2 : s2 ------------------------------------------------------------------------- s' >= (s1,s2), env |- (name:typ)j.uj_val : s' where j.uj_type is convertible to a sort s2 *) let judge_of_product env name t1 t2 = let s = sort_of_product env t1.utj_type t2.utj_type in { uj_val = mkProd (name, t1.utj_val, t2.utj_val); uj_type = mkSort s } (* Type of a type cast *) (* [judge_of_cast env (c,typ1) (typ2,s)] implements the rule env |- c:typ1 env |- typ2:s env |- typ1 <= typ2 --------------------------------------------------------------------- env |- c:typ2 *) let judge_of_cast env cj k tj = let expected_type = tj.utj_val in try let c, cst = match k with | VMcast -> mkCast (cj.uj_val, k, expected_type), vm_conv CUMUL env cj.uj_type expected_type | DEFAULTcast -> mkCast (cj.uj_val, k, expected_type), conv_leq false env cj.uj_type expected_type | REVERTcast -> cj.uj_val, conv_leq true env cj.uj_type expected_type in { uj_val = c; uj_type = expected_type }, cst with NotConvertible -> error_actual_type env cj expected_type (* Inductive types. *) (* The type is parametric over the uniform parameters whose conclusion is in Type; to enforce the internal constraints between the parameters and the instances of Type occurring in the type of the constructors, we use the level variables _statically_ assigned to the conclusions of the parameters as mediators: e.g. if a parameter has conclusion Type(alpha), static constraints of the form alpha<=v exist between alpha and the Type's occurring in the constructor types; when the parameters is finally instantiated by a term of conclusion Type(u), then the constraints u<=alpha is computed in the App case of execute; from this constraints, the expected dynamic constraints of the form u<=v are enforced *) let judge_of_inductive_knowing_parameters env ind jl = let c = mkInd ind in let (mib,mip) = lookup_mind_specif env ind in check_args env c mib.mind_hyps; let paramstyp = Array.map (fun j -> j.uj_type) jl in let t = Inductive.type_of_inductive_knowing_parameters env mip paramstyp in make_judge c t let judge_of_inductive env ind = judge_of_inductive_knowing_parameters env ind [||] (* Constructors. *) let judge_of_constructor env c = let constr = mkConstruct c in let _ = let ((kn,_),_) = c in let mib = lookup_mind kn env in check_args env constr mib.mind_hyps in let specif = lookup_mind_specif env (inductive_of_constructor c) in make_judge constr (type_of_constructor c specif) (* Case. *) let check_branch_types env ind cj (lfj,explft) = try conv_leq_vecti env (Array.map j_type lfj) explft with NotConvertibleVect i -> error_ill_formed_branch env cj.uj_val (ind,i+1) lfj.(i).uj_type explft.(i) | Invalid_argument _ -> error_number_branches env cj (Array.length explft) let judge_of_case env ci pj cj lfj = let indspec = try find_rectype env cj.uj_type with Not_found -> error_case_not_inductive env cj in let _ = check_case_info env (fst indspec) ci in let (bty,rslty,univ) = type_case_branches env indspec pj cj.uj_val in let univ' = check_branch_types env (fst indspec) cj (lfj,bty) in ({ uj_val = mkCase (ci, (*nf_betaiota*) pj.uj_val, cj.uj_val, Array.map j_val lfj); uj_type = rslty }, union_constraints univ univ') (* Fixpoints. *) (* Checks the type of a general (co)fixpoint, i.e. without checking *) (* the specific guard condition. *) let type_fixpoint env lna lar vdefj = let lt = Array.length vdefj in assert (Array.length lar = lt); try conv_leq_vecti env (Array.map j_type vdefj) (Array.map (fun ty -> lift lt ty) lar) with NotConvertibleVect i -> error_ill_typed_rec_body env i lna vdefj lar (************************************************************************) (************************************************************************) (* This combinator adds the universe constraints both in the local graph and in the universes of the environment. This is to ensure that the infered local graph is satisfiable. *) let univ_combinator (cst,univ) (j,c') = (j,(union_constraints cst c', merge_constraints c' univ)) (* The typing machine. *) (* ATTENTION : faudra faire le typage du contexte des Const, Ind et Constructsi un jour cela devient des constructions arbitraires et non plus des variables *) let rec execute env cstr cu = match kind_of_term cstr with (* Atomic terms *) | Sort (Prop c) -> (judge_of_prop_contents c, cu) | Sort (Type u) -> (judge_of_type u, cu) | Rel n -> (judge_of_relative env n, cu) | Var id -> (judge_of_variable env id, cu) | Const c -> (judge_of_constant env c, cu) (* Lambda calculus operators *) | App (f,args) -> let (jl,cu1) = execute_array env args cu in let (j,cu2) = match kind_of_term f with | Ind ind -> (* Sort-polymorphism of inductive types *) judge_of_inductive_knowing_parameters env ind jl, cu1 | Const cst -> (* Sort-polymorphism of constant *) judge_of_constant_knowing_parameters env cst jl, cu1 | _ -> (* No sort-polymorphism *) execute env f cu1 in univ_combinator cu2 (judge_of_apply env j jl) | Lambda (name,c1,c2) -> let (varj,cu1) = execute_type env c1 cu in let env1 = push_rel (name,None,varj.utj_val) env in let (j',cu2) = execute env1 c2 cu1 in (judge_of_abstraction env name varj j', cu2) | Prod (name,c1,c2) -> let (varj,cu1) = execute_type env c1 cu in let env1 = push_rel (name,None,varj.utj_val) env in let (varj',cu2) = execute_type env1 c2 cu1 in (judge_of_product env name varj varj', cu2) | LetIn (name,c1,c2,c3) -> let (j1,cu1) = execute env c1 cu in let (j2,cu2) = execute_type env c2 cu1 in let (_,cu3) = univ_combinator cu2 (judge_of_cast env j1 DEFAULTcast j2) in let env1 = push_rel (name,Some j1.uj_val,j2.utj_val) env in let (j',cu4) = execute env1 c3 cu3 in (judge_of_letin env name j1 j2 j', cu4) | Cast (c,k, t) -> let (cj,cu1) = execute env c cu in let (tj,cu2) = execute_type env t cu1 in univ_combinator cu2 (judge_of_cast env cj k tj) (* Inductive types *) | Ind ind -> (judge_of_inductive env ind, cu) | Construct c -> (judge_of_constructor env c, cu) | Case (ci,p,c,lf) -> let (cj,cu1) = execute env c cu in let (pj,cu2) = execute env p cu1 in let (lfj,cu3) = execute_array env lf cu2 in univ_combinator cu3 (judge_of_case env ci pj cj lfj) | Fix ((vn,i as vni),recdef) -> let ((fix_ty,recdef'),cu1) = execute_recdef env recdef i cu in let fix = (vni,recdef') in check_fix env fix; (make_judge (mkFix fix) fix_ty, cu1) | CoFix (i,recdef) -> let ((fix_ty,recdef'),cu1) = execute_recdef env recdef i cu in let cofix = (i,recdef') in check_cofix env cofix; (make_judge (mkCoFix cofix) fix_ty, cu1) (* Partial proofs: unsupported by the kernel *) | Meta _ -> anomaly "the kernel does not support metavariables" | Evar _ -> anomaly "the kernel does not support existential variables" and execute_type env constr cu = let (j,cu1) = execute env constr cu in (type_judgment env j, cu1) and execute_recdef env (names,lar,vdef) i cu = let (larj,cu1) = execute_array env lar cu in let lara = Array.map (assumption_of_judgment env) larj in let env1 = push_rec_types (names,lara,vdef) env in let (vdefj,cu2) = execute_array env1 vdef cu1 in let vdefv = Array.map j_val vdefj in let cst = type_fixpoint env1 names lara vdefj in univ_combinator cu2 ((lara.(i),(names,lara,vdefv)),cst) and execute_array env = array_fold_map' (execute env) (* Derived functions *) let infer env constr = let (j,(cst,_)) = execute env constr (empty_constraint, universes env) in assert (eq_constr j.uj_val constr); (j, cst) let infer_type env constr = let (j,(cst,_)) = execute_type env constr (empty_constraint, universes env) in (j, cst) let infer_v env cv = let (jv,(cst,_)) = execute_array env cv (empty_constraint, universes env) in (jv, cst) (* Typing of several terms. *) let infer_local_decl env id = function | LocalDef c -> let (j,cst) = infer env c in (Name id, Some j.uj_val, j.uj_type), cst | LocalAssum c -> let (j,cst) = infer env c in (Name id, None, assumption_of_judgment env j), cst let infer_local_decls env decls = let rec inferec env = function | (id, d) :: l -> let env, l, cst1 = inferec env l in let d, cst2 = infer_local_decl env id d in push_rel d env, add_rel_decl d l, union_constraints cst1 cst2 | [] -> env, empty_rel_context, empty_constraint in inferec env decls (* Exported typing functions *) let typing env c = let (j,cst) = infer env c in let _ = add_constraints cst env in j coq-8.4pl2/configure0000750000175000001440000011027112127276167013552 0ustar notinusers#!/bin/sh ################################## # # Configuration script for Coq # ################################## VERSION=8.4pl2 VOMAGIC=08400 STATEMAGIC=58400 DATE=`LC_ALL=C LANG=C date +"%B %Y"` # Create the bin/ directory if non-existent test -d bin || mkdir bin # a local which command for sh which () { IFS=":" # set words separator in PATH to be ':' (it allows spaces in dirnames) for i in $PATH; do if test -z "$i"; then i=.; fi if [ -f "$i/$1" ] ; then IFS=" " echo "$i/$1" break fi done } usage () { printf "Available options for configure are:\n" echo "-help" printf "\tDisplays this help page\n" echo "-prefix

" printf "\tSet installation directory to \n" echo "-local" printf "\tSet installation directory to the current source tree\n" echo "-coqrunbyteflags " printf "\tSet link flags for VM-dependent bytecode (coqtop)\n" echo "-coqtoolsbyteflags " printf "\tSet link flags for VM-independant bytecode (coqdep, coqdoc, ...)\n" echo "-custom" printf "\tGenerate all bytecode executables with -custom (not recommended)\n" echo "-src " printf "\tSpecifies the source directory\n" echo "-bindir " echo "-libdir " echo "-configdir " echo "-datadir " echo "-mandir " echo "-docdir " printf "\tSpecifies where to install bin/lib/config/data/man/doc files resp.\n" echo "-emacslib " printf "\tSpecifies where emacs files are to be installed\n" echo "-coqdocdir " printf "\tSpecifies where Coqdoc style files are to be installed\n" echo "-camldir " printf "\tSpecifies the path to the OCaml library\n" echo "-lablgtkdir " printf "\tSpecifies the path to the Lablgtk library\n" echo "-usecamlp5" printf "\tSpecifies to use camlp5 instead of camlp4\n" echo "-usecamlp4" printf "\tSpecifies to use camlp4 instead of camlp5\n" echo "-camlp5dir " printf "\tSpecifies where to look for the Camlp5 library and tells to use it\n" echo "-arch " printf "\tSpecifies the architecture\n" echo "-opt" printf "\tSpecifies whether or not to use OCaml *.opt optimized compilers\n" echo "-natdynlink (yes|no)" printf "\tSpecifies whether or not to use dynamic loading of native code\n" echo "-coqide (opt|byte|no)" printf "\tSpecifies whether or not to compile Coqide\n" echo "-nomacintegration" printf "\tSpecifies to not try to build coqide mac integration\n" echo "-browser " printf "\tUse to open URL %%s\n" echo "-with-doc (yes|no)" printf "\tSpecifies whether or not to compile the documentation\n" echo "-with-geoproof (yes|no)" printf "\tSpecifies whether or not to use Geoproof binding\n" echo "-byte-only" printf "\tCompiles only bytecode version of Coq\n" echo "-debug" printf "\tAdd debugging information in the Coq executables\n" echo "-profile" printf "\tAdd profiling information in the Coq executables\n" echo "-annotate" printf "\tCompiles Coq with -dtypes option\n" echo "-makecmd " printf "\tName of GNU Make command.\n" } # Default OCaml binaries bytecamlc=ocamlc nativecamlc=ocamlopt ocamlmklibexec=ocamlmklib ocamlexec=ocaml ocamldepexec=ocamldep ocamldocexec=ocamldoc ocamllexexec=ocamllex ocamlyaccexec=ocamlyacc ocamlmktopexec=ocamlmktop camlp4oexec=camlp4o coq_debug_flag= coq_debug_flag_opt= coq_profile_flag= coq_annotate_flag= best_compiler=opt cflags="-fno-defer-pop -Wall -Wno-unused" natdynlink=yes local=false coqrunbyteflags_spec=no coqtoolsbyteflags_spec=no custom_spec=no src_spec=no prefix_spec=no bindir_spec=no libdir_spec=no configdir_spec=no datadir_spec=no mandir_spec=no docdir_spec=no emacslib_spec=no emacs_spec=no camldir_spec=no lablgtkdir_spec=no coqdocdir_spec=no arch_spec=no coqide_spec=no nomacintegration_spec=no browser_spec=no wwwcoq_spec=no with_geoproof=false with_doc=all with_doc_spec=no force_caml_version=no force_caml_version_spec=no usecamlp5=yes COQSRC=`pwd` # Parse command-line arguments while : ; do case "$1" in "") break;; -help|--help) usage exit;; -prefix|--prefix) prefix_spec=yes prefix="$2" shift;; -local|--local) local=true;; -coqrunbyteflags|--coqrunbyteflags) coqrunbyteflags_spec=yes coqrunbyteflags="$2" shift;; -coqtoolsbyteflags|--coqtoolsbyteflags) coqtoolsbyteflags_spec=yes coqtoolsbyteflags="$2" shift;; -custom|--custom) custom_spec=yes;; -src|--src) src_spec=yes COQSRC="$2" shift;; -bindir|--bindir) bindir_spec=yes bindir="$2" shift;; -libdir|--libdir) libdir_spec=yes libdir="$2" shift;; -configdir|--configdir) configdir_spec=yes configdir="$2" shift;; -datadir|--datadir) datadir_spec=yes datadir="$2" shift;; -mandir|--mandir) mandir_spec=yes mandir="$2" shift;; -docdir|--docdir) docdir_spec=yes docdir="$2" shift;; -emacslib|--emacslib) emacslib_spec=yes emacslib="$2" shift;; -emacs |--emacs) emacs_spec=yes emacs="$2" printf "Warning: obsolete -emacs option\n" shift;; -coqdocdir|--coqdocdir) coqdocdir_spec=yes coqdocdir="$2" shift;; -camldir|--camldir) camldir_spec=yes camldir="$2" shift;; -lablgtkdir|--lablgtkdir) lablgtkdir_spec=yes lablgtkdir="$2" shift;; -usecamlp5|--usecamlp5) usecamlp5=yes;; -usecamlp4|--usecamlp4) usecamlp5=no;; -camlp5dir|--camlp5dir) usecamlp5=yes camlp5dir="$2" shift;; -arch|--arch) arch_spec=yes arch=$2 shift;; -opt|--opt) bytecamlc=ocamlc.opt camlp4oexec=camlp4o # can't add .opt since dyn load'll be required nativecamlc=ocamlopt.opt;; -natdynlink|--natdynlink) case "$2" in yes) natdynlink=yes;; *) natdynlink=no esac shift;; -coqide|--coqide) coqide_spec=yes case "$2" in byte|opt) COQIDE=$2;; *) COQIDE=no esac shift;; -nomacintegration) nomacintegration_spec=yes shift;; -browser|--browser) browser_spec=yes BROWSER=$2 shift;; -coqwebsite|--coqwebsite) wwwcoq_spec=yes WWWCOQ=$2 shift;; -with-doc|--with-doc) with_doc_spec=yes case "$2" in yes|all) with_doc=all;; *) with_doc=no esac shift;; -with-geoproof|--with-geoproof) case "$2" in yes) with_geoproof=true;; no) with_geoproof=false;; esac shift;; -makecmd|--makecmd) makecmd="$2" shift;; -byte-only|-byteonly|--byteonly|--byte-only) best_compiler=byte;; -debug|--debug) coq_debug_flag=-g;; -profile|--profile) coq_profile_flag=-p;; -annotate|--annotate) coq_annotate_flag=-dtypes;; -force-caml-version|--force-caml-version|-force-ocaml-version|--force-ocaml-version) force_caml_version_spec=yes force_caml_version=yes;; *) echo "Unknown option \"$1\"." 1>&2; usage; exit 2;; esac shift done if [ $prefix_spec = yes -a $local = true ] ; then echo "Options -prefix and -local are incompatible." echo "Configure script failed!" exit 1 fi # compile date DATEPGM=`which date` case $DATEPGM in "") echo "I can't find the program \"date\" in your path." echo "Please give me the current date" read COMPILEDATE;; *) COMPILEDATE=`LC_ALL=C LANG=C date +"%b %d %Y %H:%M:%S"`;; esac # Architecture case $arch_spec in no) # First we test if we are running a Cygwin or Mingw/Msys system if [ `uname -s | cut -c -6` = "CYGWIN" ] ; then ARCH="win32" CYGWIN=yes elif [ `uname -s | cut -c -7` = "MINGW32" ]; then ARCH="win32" else # If not, we determine the architecture if test -x /bin/uname ; then ARCH=`/bin/uname -s` elif test -x /usr/bin/uname ; then ARCH=`/usr/bin/uname -s` elif test -x /bin/arch ; then ARCH=`/bin/arch` elif test -x /usr/bin/arch ; then ARCH=`/usr/bin/arch` elif test -x /usr/ucb/arch ; then ARCH=`/usr/ucb/arch` else echo "I can not automatically find the name of your architecture." printf "%s"\ "Give me a name, please [win32 for Win95, Win98 or WinNT]: " read ARCH fi fi;; yes) ARCH=$arch esac # executable extension case $ARCH in win32) EXE=".exe" DLLEXT=".dll";; *) EXE="" DLLEXT=".so" esac # Is the source tree checked out from a recognised # version control system ? if test -e .svn/entries ; then checkedout=svn elif [ -d '{arch}' ]; then checkedout=gnuarch elif [ -z "${GIT_DIR}" ] && [ -d .git ] || [ -d "${GIT_DIR}" ]; then checkedout=git else checkedout=0 fi # make command MAKE=`which ${makecmd:-make}` if [ "$MAKE" != "" ]; then MAKEVERSION=`$MAKE -v | head -1 | cut -d" " -f3` MAKEVERSIONMAJOR=`echo $MAKEVERSION | cut -d. -f1` MAKEVERSIONMINOR=`echo $MAKEVERSION | cut -d. -f2` if [ "$MAKEVERSIONMAJOR" -eq 3 -a "$MAKEVERSIONMINOR" -ge 81 ]; then echo "You have GNU Make $MAKEVERSION. Good!" else OK="no" #Extra support for local installation of make 3.81 #will be useless when make >= 3.81 will be standard if [ -x ./make ]; then MAKEVERSION=`./make -v | head -1` if [ "$MAKEVERSION" = "GNU Make 3.81" ]; then OK="yes"; fi fi if [ $OK = "no" ]; then echo "GNU Make >= 3.81 is needed." echo "Make 3.81 can be downloaded from ftp://ftp.gnu.org/gnu/make/make-3.81.tar.gz" echo "then locally installed on a Unix-style system by issuing:" echo " tar xzvf make-3.81.tar.gz" echo " cd make-3.81" echo " ./configure" echo " make" echo " mv make .." echo " cd .." echo "Restart then the configure script and later use ./make instead of make." exit 1 else echo "You have locally installed GNU Make 3.81. Good!" fi fi else echo "Cannot find GNU Make >= 3.81." fi # Browser command if [ "$browser_spec" = "no" ]; then case $ARCH in win32) BROWSER='start %s' ;; Darwin) BROWSER='open %s' ;; *) BROWSER='firefox -remote "OpenURL(%s,new-tab)" || firefox %s &' ;; esac fi if [ "$wwwcoq_spec" = "no" ]; then WWWCOQ="http://coq.inria.fr/" fi ######################################### # Objective Caml programs case $camldir_spec in no) CAMLC=`which $bytecamlc` case "$CAMLC" in "") echo "$bytecamlc is not present in your path!" echo "Give me manually the path to the $bytecamlc executable [/usr/local/bin by default]: " read CAMLC case "$CAMLC" in "") CAMLC=/usr/local/bin/$bytecamlc;; */ocamlc|*/ocamlc.opt) true;; */) CAMLC="${CAMLC}"$bytecamlc;; *) CAMLC="${CAMLC}"/$bytecamlc;; esac esac CAMLBIN=`dirname "$CAMLC"`;; yes) CAMLC=$camldir/$bytecamlc CAMLBIN=`dirname "$CAMLC"` bytecamlc="$CAMLC" nativecamlc=$CAMLBIN/$nativecamlc ocamlexec=$CAMLBIN/ocaml ocamldepexec=$CAMLBIN/ocamldep ocamldocexec=$CAMLBIN/ocamldoc ocamllexexec=$CAMLBIN/ocamllex ocamlyaccexec=$CAMLBIN/ocamlyacc ocamlmktopexec=$CAMLBIN/ocamlmktop ocamlmklibexec=$CAMLBIN/ocamlmklib camlp4oexec=$CAMLBIN/camlp4o esac if test ! -f "$CAMLC" ; then echo "I can not find the executable '$CAMLC'. Have you installed it?" echo "Configuration script failed!" exit 1 fi # Under Windows, we need to convert from cygwin/mingw paths (/c/Program Files/Ocaml) # to more windows-looking paths (c:/Program Files/Ocaml). Note that / are kept mk_win_path () { case $ARCH,$CYGWIN in win32,yes) cygpath -m "$1" ;; win32*) "$ocamlexec" "tools/mingwpath.ml" "$1" ;; *) echo "$1" ;; esac } case $ARCH,$src_spec in win32,yes) echo "Error: the -src option is currently not supported on Windows" exit 1;; win32) CAMLBIN=`mk_win_path "$CAMLBIN"`;; esac # Beware of the final \r in Win32 CAMLVERSION=`"$CAMLC" -version | tr -d "\r"` CAMLLIB=`"$CAMLC" -where | tr -d "\r"` case $CAMLVERSION in 1.*|2.*|3.0*|3.10*|3.11.[01]) echo "Your version of Objective-Caml is $CAMLVERSION." if [ "$force_caml_version" = "yes" ]; then echo "*Warning* You are compiling Coq with an outdated version of Objective-Caml." else echo " You need Objective-Caml 3.11.2 or later." echo " Configuration script failed!" exit 1 fi;; 3.11.2|3.12*|4.*) CAMLP4COMPAT="-loc loc" echo "You have Objective-Caml $CAMLVERSION. Good!";; *) echo "I found the Objective-Caml compiler but cannot find its version number!" echo "Is it installed properly?" echo "Configuration script failed!" exit 1;; esac CAMLTAG=OCAML`echo $CAMLVERSION | sed -e "s/\([1-9]\)\.\([0-9]*\).*/\1\2/g"` # For coqmktop & bytecode compiler if [ "$coq_debug_flag" = "-g" ]; then case $CAMLTAG in OCAML31*|OCAML4*) # Compilation debug flag coq_debug_flag_opt="-g" ;; esac fi # Native dynlink if [ "$natdynlink" = "yes" -a -f "$CAMLLIB"/dynlink.cmxa ]; then HASNATDYNLINK=true else HASNATDYNLINK=false fi case $HASNATDYNLINK,$ARCH,`uname -r`,$CAMLVERSION in true,Darwin,9.*,3.11.*) # ocaml 3.11.0 dynlink on MacOS 10.5 is buggy NATDYNLINKFLAG=os5fixme;; #Possibly a problem on 10.6.0/10.6.1/10.6.2 #May just be a 32 vs 64 problem for all 10.6.* true,Darwin,10.0.*,3.11.*) # Possibly a problem on 10.6.0 NATDYNLINKFLAG=os5fixme;; true,Darwin,10.1.*,3.11.*) # Possibly a problem on 10.6.1 NATDYNLINKFLAG=os5fixme;; true,Darwin,10.2.*,3.11.*) # Possibly a problem on 10.6.2 NATDYNLINKFLAG=os5fixme;; true,Darwin,10.*,3.11.*) if [ `getconf LONG_BIT` = "32" ]; then # Still a problem for x86_32 NATDYNLINKFLAG=os5fixme else # Not a problem for x86_64 NATDYNLINKFLAG=$HASNATDYNLINK fi;; *) NATDYNLINKFLAG=$HASNATDYNLINK;; esac # Camlp4 / Camlp5 configuration # Assume that camlp(4|5) binaries are at the same place as ocaml ones # (this should become configurable some day) CAMLP4BIN=${CAMLBIN} case $usecamlp5 in yes) CAMLP4=camlp5 CAMLP4MOD=gramlib if [ "$camlp5dir" != "" ]; then if [ -f "$camlp5dir/${CAMLP4MOD}.cma" ]; then CAMLP4LIB=$camlp5dir FULLCAMLP4LIB=$camlp5dir else echo "Cannot find camlp5 libraries in $camlp5dir (camlp5.cma not found)." echo "Configuration script failed!" exit 1 fi elif [ -f "${CAMLLIB}/camlp5/${CAMLP4MOD}.cma" ]; then CAMLP4LIB=+camlp5 FULLCAMLP4LIB=${CAMLLIB}/camlp5 elif [ -f "${CAMLLIB}/site-lib/${CAMLP4MOD}.cma" ]; then CAMLP4LIB=+site-lib/camlp5 FULLCAMLP4LIB=${CAMLLIB}/site-lib/camlp5 else echo "No Camlp5 installation found. Looking for Camlp4 instead..." usecamlp5=no fi esac # If we're (still...) going to use Camlp5, let's check its version case $usecamlp5 in yes) camlp4oexec=`echo "$camlp4oexec" | tr 4 5` case `"$camlp4oexec" -v 2>&1` in *"version 4.0"*|*5.00*) echo "Camlp5 version < 5.01 not supported." echo "Configuration script failed!" exit 1;; esac esac # We might now try to use Camlp4, either by explicit choice or # by lack of proper Camlp5 installation case $usecamlp5 in no) CAMLP4=camlp4 CAMLP4MOD=camlp4lib CAMLP4LIB=+camlp4 FULLCAMLP4LIB=${CAMLLIB}/camlp4 if [ ! -f "${FULLCAMLP4LIB}/${CAMLP4MOD}.cma" ]; then echo "No Camlp4 installation found." echo "Configuration script failed!" exit 1 fi camlp4oexec=${camlp4oexec}rf if [ "`"$camlp4oexec" 2>&1`" != "" ]; then echo "Error: $camlp4oexec not found or not executable." echo "Configuration script failed!" exit 1 fi esac # do we have a native compiler: test of ocamlopt and its version if [ "$best_compiler" = "opt" ] ; then if test -e "$nativecamlc" || test -e "`which $nativecamlc`"; then CAMLOPTVERSION=`"$nativecamlc" -v | sed -n -e 's|.*version* *\(.*\)$|\1|p' ` if [ ! -f "${FULLCAMLP4LIB}/${CAMLP4MOD}.cmxa" ]; then best_compiler=byte echo "Cannot find native-code $CAMLP4," echo "only the bytecode version of Coq will be available." else if [ "$CAMLOPTVERSION" != "$CAMLVERSION" ] ; then echo "Native and bytecode compilers do not have the same version!" fi echo "You have native-code compilation. Good!" fi else best_compiler=byte echo "You have only bytecode compilation." fi fi # OS dependent libraries OSDEPLIBS="-cclib -lunix" case $ARCH in sun4*) OS=`uname -r` case $OS in 5*) OS="Sun Solaris $OS" OSDEPLIBS="$OSDEPLIBS -cclib -lnsl -cclib -lsocket";; *) OS="Sun OS $OS" esac;; esac # lablgtk2 and CoqIDE IDEARCHFLAGS= IDEARCHFILE= IDEARCHDEF=X11 # -byte-only should imply -coqide byte, unless the user decides otherwise if [ "$best_compiler" = "byte" -a "$coqide_spec" = "no" ]; then coqide_spec=yes COQIDE=byte fi # Which coqide is asked ? which one is possible ? if [ "$coqide_spec" = "yes" -a "$COQIDE" = "no" ]; then echo "CoqIde disabled as requested." else case $lablgtkdir_spec in no) if lablgtkdirtmp=$(ocamlfind query lablgtk2 2> /dev/null); then if [ -f "$lablgtkdirtmp/glib.cmi" -a -f "$lablgtkdirtmp/glib.mli" ]; then lablgtkdirfoundmsg="LabelGtk2 found by ocamlfind" lablgtkdir=$lablgtkdirtmp LABLGTKLIB=$lablgtkdir # Pour le message utilisateur else echo "Headers missings in Lablgtk2 found by ocamlfind (glib.cmi/glib.mli not found)." fi fi if [ "$lablgtkdir" = "" -a -f "${CAMLLIB}/lablgtk2/glib.mli" -a -f "${CAMLLIB}/glib.mli" ]; then lablgtkdirfoundmsg="LablGtk2 found in ocaml lib directory" lablgtkdir=${CAMLLIB}/lablgtk2 LABLGTKLIB=+lablgtk2 # Pour le message utilisateur fi;; yes) if [ ! -d "$lablgtkdir" ]; then echo "$lablgtkdir is not a valid directory." echo "Configuration script failed!" exit 1 elif [ -f "$lablgtkdir/glib.cmi" -a -f "$lablgtkdir/glib.mli" ]; then lablgtkdirfoundmsg="LablGtk2 directory found" LABLGTKLIB=$lablgtkdir # Pour le message utilisateur else echo "Headers missing in LablGtk2 library (glib.cmi/glib.mli not found)." echo "Configuration script failed!" exit 1 fi;; esac if [ "$lablgtkdir" = "" ]; then echo "LablGtk2 not found: CoqIde will not be available." COQIDE=no elif [ -z "`grep -w convert_with_fallback "$lablgtkdir/glib.mli"`" ]; then echo "$lablgtkdirfoundmsg but too old: CoqIde will not be available." COQIDE=no; elif [ "$coqide_spec" = "yes" -a "$COQIDE" = "byte" ]; then echo "$lablgtkdirfoundmsg, bytecode CoqIde will be used as requested." COQIDE=byte elif [ ! -f "${CAMLLIB}/threads/threads.cmxa" ]; then echo "$lablgtkdirfoundmsg, no native threads: bytecode CoqIde will be available." COQIDE=byte else echo "$lablgtkdirfoundmsg, native threads: native CoqIde will be available." COQIDE=opt if [ "$nomacintegration_spec" = "no" ] && lablgtkosxdir=$(ocamlfind query lablgtkosx 2> /dev/null); then IDEARCHFLAGS=lablgtkosx.cmxa IDEARCHDEF=QUARTZ elif [ "$ARCH" = "win32" ]; then IDEARCHFLAGS= IDEARCHFILE=ide/ide_win32_stubs.o IDEARCHDEF=WIN32 fi fi fi case $COQIDE in byte|opt) LABLGTKINCLUDES="-I $LABLGTKLIB";; no) LABLGTKINCLUDES="";; esac [ x$lablgtkosxdir = x ] || LABLGTKINCLUDES="$LABLGTKINCLUDES -I $lablgtkosxdir" # strip command case $ARCH in Darwin) if [ "$HASNATDYNLINK" = "true" ] then STRIPCOMMAND="true" else STRIPCOMMAND="strip" fi;; *) if [ "$coq_profile_flag" = "-p" ] || [ "$coq_debug_flag" = "-g" ] then STRIPCOMMAND="true" else STRIPCOMMAND="strip" fi esac ### Test if documentation can be compiled (latex, hevea) if test "$with_doc" = "all" then for cmd in "latex" "hevea" ; do if test ! -x "`which $cmd`" then echo "$cmd was not found; documentation will not be available" with_doc=no break fi done fi ########################################### # bindir, libdir, mandir, docdir, etc. # OCaml only understand Windows filenames (C:\...) case $ARCH in win32) COQSRC=`mk_win_path "$COQSRC"` CAMLBIN=`mk_win_path "$CAMLBIN"` CAMLP4BIN=`mk_win_path "$CAMLP4BIN"` esac case $src_spec in no) COQTOP=${COQSRC} esac case $ARCH$CYGWIN in win32) W32PREF='C:\coq\' bindir_def="${W32PREF}bin" libdir_def="${W32PREF}lib" configdir_def="${W32PREF}config" datadir_def="${W32PREF}share" mandir_def="${W32PREF}man" docdir_def="${W32PREF}doc" emacslib_def="${W32PREF}emacs" coqdocdir_def="${W32PREF}latex";; *) bindir_def=/usr/local/bin libdir_def=/usr/local/lib/coq configdir_def=/etc/xdg/coq datadir_def=/usr/local/share/coq mandir_def=/usr/local/share/man docdir_def=/usr/local/share/doc/coq emacslib_def=/usr/local/share/emacs/site-lisp coqdocdir_def=/usr/local/share/texmf/tex/latex/misc;; esac emacs_def=emacs case $bindir_spec/$prefix_spec/$local in yes/*/*) BINDIR=$bindir ;; */yes/*) BINDIR=$prefix/bin ;; */*/true) BINDIR=$COQTOP/bin ;; *) printf "Where should I install the Coq binaries [%s]? " "$bindir_def" read BINDIR case $BINDIR in "") BINDIR=$bindir_def;; *) true;; esac;; esac case $libdir_spec/$prefix_spec/$local in yes/*/*) LIBDIR=$libdir;; */yes/*) libdir_spec=yes case $ARCH in win32) LIBDIR=$prefix ;; *) LIBDIR=$prefix/lib/coq ;; esac ;; */*/true) LIBDIR=$COQTOP ;; *) printf "Where should I install the Coq library [%s]? " "$libdir_def" read LIBDIR libdir_spec=yes case $LIBDIR in "") LIBDIR=$libdir_def;; *) true;; esac;; esac case $configdir_spec/$prefix_spec/$local in yes/*/*) CONFIGDIR=$configdir;; */yes/*) configdir_spec=yes case $ARCH in win32) CONFIGDIR=$prefix/config;; *) CONFIGDIR=$prefix/etc/xdg/coq;; esac;; */*/true) CONFIGDIR=$COQTOP/ide configdir_spec=yes;; *) printf "Where should I install the Coqide configuration files [%s]? " "$configdir_def" read CONFIGDIR case $CONFIGDIR in "") CONFIGDIR=$configdir_def;; *) configdir_spec=yes;; esac;; esac case $datadir_spec/$prefix_spec/$local in yes/*/*) DATADIR=$datadir;; */yes/*) DATADIR=$prefix/share/coq;; */*/true) DATADIR=$COQTOP/ide datadir_spec=yes;; *) printf "Where should I install the Coqide data files [%s]? " "$datadir_def" read DATADIR case $DATADIR in "") DATADIR=$datadir_def;; *) datadir_spec=yes;; esac;; esac case $mandir_spec/$prefix_spec/$local in yes/*/*) MANDIR=$mandir;; */yes/*) MANDIR=$prefix/share/man ;; */*/true) MANDIR=$COQTOP/man ;; *) printf "Where should I install the Coq man pages [%s]? " "$mandir_def" read MANDIR case $MANDIR in "") MANDIR=$mandir_def;; *) true;; esac;; esac case $docdir_spec/$prefix_spec/$local in yes/*/*) DOCDIR=$docdir;; */yes/*) DOCDIR=$prefix/share/doc/coq;; */*/true) DOCDIR=$COQTOP/doc;; *) printf "Where should I install the Coq documentation [%s]? " "$docdir_def" read DOCDIR case $DOCDIR in "") DOCDIR=$docdir_def;; *) true;; esac;; esac case $emacslib_spec/$prefix_spec/$local in yes/*/*) EMACSLIB=$emacslib;; */yes/*) case $ARCH in win32) EMACSLIB=$prefix/emacs ;; *) EMACSLIB=$prefix/share/emacs/site-lisp ;; esac ;; */*/true) EMACSLIB=$COQTOP/tools/emacs ;; *) printf "Where should I install the Coq Emacs mode [%s]? " "$emacslib_def" read EMACSLIB case $EMACSLIB in "") EMACSLIB=$emacslib_def;; *) true;; esac;; esac case $coqdocdir_spec/$prefix_spec/$local in yes/*/*) COQDOCDIR=$coqdocdir;; */yes/*) case $ARCH in win32) COQDOCDIR=$prefix/latex ;; *) COQDOCDIR=$prefix/share/emacs/site-lisp ;; esac ;; */*/true) COQDOCDIR=$COQTOP/tools/coqdoc ;; *) printf "Where should I install Coqdoc TeX/LaTeX files [%s]? " "$coqdocdir_def" read COQDOCDIR case $COQDOCDIR in "") COQDOCDIR=$coqdocdir_def;; *) true;; esac;; esac # Determine if we enable -custom by default (Windows and MacOS) CUSTOM_OS=no if [ "$ARCH" = "win32" ] || [ "$ARCH" = "Darwin" ]; then CUSTOM_OS=yes fi BUILDLDPATH="# you might want to set CAML_LD_LIBRARY_PATH by hand!" case $coqrunbyteflags_spec/$local/$custom_spec/$CUSTOM_OS in yes/*/*/*) COQRUNBYTEFLAGS="$coqrunbyteflags";; */*/yes/*|*/*/*/yes) COQRUNBYTEFLAGS="-custom";; */true/*/*) COQRUNBYTEFLAGS="-dllib -lcoqrun -dllpath '$COQTOP'/kernel/byterun";; *) COQRUNBYTEFLAGS="-dllib -lcoqrun -dllpath '$LIBDIR'" BUILDLDPATH="export CAML_LD_LIBRARY_PATH='$COQTOP'/kernel/byterun";; esac case $coqtoolsbyteflags_spec/$custom_spec/$CUSTOM_OS in yes/*/*) COQTOOLSBYTEFLAGS="$coqtoolsbyteflags";; */yes/*|*/*/yes) COQTOOLSBYTEFLAGS="-custom";; *) COQTOOLSBYTEFLAGS="";; esac # case $emacs_spec in # no) printf "Which Emacs command should I use to compile coq.el [%s]? " "$emacs_def" # read EMACS # case $EMACS in # "") EMACS="$emacs_def";; # *) true;; # esac;; # yes) EMACS="$emacs";; # esac ########################################### # Summary of the configuration echo "" echo " Coq top directory : $COQTOP" echo " Architecture : $ARCH" if test ! -z "$OS" ; then echo " Operating system : $OS" fi echo " Coq VM bytecode link flags : $COQRUNBYTEFLAGS" echo " Coq tools bytecode link flags : $COQTOOLSBYTEFLAGS" echo " OS dependent libraries : $OSDEPLIBS" echo " Objective-Caml/Camlp4 version : $CAMLVERSION" echo " Objective-Caml/Camlp4 binaries in : $CAMLBIN" echo " Objective-Caml library in : $CAMLLIB" echo " Camlp4 library in : $CAMLP4LIB" if test "$best_compiler" = opt ; then echo " Native dynamic link support : $HASNATDYNLINK" fi if test "$COQIDE" != "no"; then echo " Lablgtk2 library in : $LABLGTKLIB" fi if test "$IDEARCHDEF" = "QUARTZ"; then echo " Mac OS integration is on" fi if test "$with_doc" = "all"; then echo " Documentation : All" else echo " Documentation : None" fi echo " CoqIde : $COQIDE" echo " Web browser : $BROWSER" echo " Coq web site : $WWWCOQ" echo "" echo " Paths for true installation:" echo " binaries will be copied in $BINDIR" echo " library will be copied in $LIBDIR" echo " config files will be copied in $CONFIGDIR" echo " data files will be copied in $DATADIR" echo " man pages will be copied in $MANDIR" echo " documentation will be copied in $DOCDIR" echo " emacs mode will be copied in $EMACSLIB" echo "" ################################################## # Building the $COQTOP/dev/ocamldebug-coq file ################################################## OCAMLDEBUGCOQ=$COQSRC/dev/ocamldebug-coq if test "$coq_debug_flag" = "-g" ; then rm -f $OCAMLDEBUGCOQ sed -e "s|COQTOPDIRECTORY|$COQTOP|" \ -e "s|COQLIBDIRECTORY|$LIBDIR|" \ -e "s|CAMLBINDIRECTORY|$CAMLBIN|" \ -e "s|CAMLP4LIBDIRECTORY|$FULLCAMLP4LIB|"\ $OCAMLDEBUGCOQ.template > $OCAMLDEBUGCOQ chmod a-w,a+x $OCAMLDEBUGCOQ fi #################################################### # Fixing lablgtk types (before/after 2.6.0) #################################################### if [ ! "$COQIDE" = "no" ]; then if grep "class view " "$lablgtkdir/gText.mli" | grep -q "\[>" ; then if grep -q "?accepts_tab:bool" "$lablgtkdir/gText.mli" ; then cp -f ide/undo_lablgtk_ge212.mli ide/undo.mli else cp -f ide/undo_lablgtk_ge26.mli ide/undo.mli fi else cp -f ide/undo_lablgtk_lt26.mli ide/undo.mli fi fi ############################################## # Creation of configuration files ############################################## mlconfig_file="$COQSRC/config/coq_config.ml" config_file="$COQSRC/config/Makefile" config_template="$COQSRC/config/Makefile.template" ### Warning !! ### After this line, be careful when using variables, ### since some of them (e.g. $COQSRC) will be escaped escape_string () { "$ocamlexec" "tools/escape_string.ml" "$1" } # Escaped version of browser command BROWSER=`escape_string "$BROWSER"` # Under Windows, we now escape the backslashes that will ends in # ocaml strings (coq_config.ml) or in Makefile variables. case $ARCH in win32) COQTOP=`escape_string "$COQTOP"` BINDIR=`escape_string "$BINDIR"` COQSRC=`escape_string "$COQSRC"` LIBDIR=`escape_string "$LIBDIR"` CONFIGDIR=`escape_string "$CONFIGDIR"` DATADIR=`escape_string "$DATADIR"` CAMLBIN=`escape_string "$CAMLBIN"` CAMLLIB=`escape_string "$CAMLLIB"` MANDIR=`escape_string "$MANDIR"` DOCDIR=`escape_string "$DOCDIR"` EMACSLIB=`escape_string "$EMACSLIB"` COQDOCDIR=`escape_string "$COQDOCDIR"` CAMLP4BIN=`escape_string "$CAMLP4BIN"` CAMLP4LIB=`escape_string "$CAMLP4LIB"` LABLGTKINCLUDES=`escape_string "$LABLGTKINCLUDES"` COQRUNBYTEFLAGS=`escape_string "$COQRUNBYTEFLAGS"` COQTOOLSBYTEFLAGS=`escape_string "$COQTOOLSBYTEFLAGS"` BUILDLDPATH=`escape_string "$BUILDLDPATH"` ocamlexec=`escape_string "$ocamlexec"` bytecamlc=`escape_string "$bytecamlc"` nativecamlc=`escape_string "$nativecamlc"` ocamlmklibexec=`escape_string "$ocamlmklibexec"` ocamldepexec=`escape_string "$ocamldepexec"` ocamldocexec=`escape_string "$ocamldocexec"` ocamllexexec=`escape_string "$ocamllexexec"` ocamlyaccexec=`escape_string "$ocamlyaccexec"` camlp4oexec=`escape_string "$camlp4oexec"` ;; esac case $libdir_spec in yes) LIBDIR_OPTION="Some \"$LIBDIR\"";; *) LIBDIR_OPTION="None";; esac case $configdir_spec in yes) CONFIGDIR_OPTION="Some \"$CONFIGDIR\"";; *) CONFIGDIR_OPTION="None";; esac case $datadir_spec in yes) DATADIR_OPTION="Some \"$DATADIR\"";; *) DATADIR_OPTION="None";; esac ##################################################### # Building the $COQTOP/config/coq_config.ml file ##################################################### rm -f "$mlconfig_file" cat << END_OF_COQ_CONFIG > $mlconfig_file (* DO NOT EDIT THIS FILE: automatically generated by ../configure *) let local = $local let coqrunbyteflags = "$COQRUNBYTEFLAGS" let coqlib = $LIBDIR_OPTION let configdir = $CONFIGDIR_OPTION let datadir = $DATADIR_OPTION let docdir = "$DOCDIR" let ocaml = "$ocamlexec" let ocamlc = "$bytecamlc" let ocamlopt = "$nativecamlc" let ocamlmklib = "$ocamlmklibexec" let ocamldep = "$ocamldepexec" let ocamldoc = "$ocamldocexec" let ocamlyacc = "$ocamlyaccexec" let ocamllex = "$ocamllexexec" let camlbin = "$CAMLBIN" let camllib = "$CAMLLIB" let camlp4 = "$CAMLP4" let camlp4o = "$camlp4oexec" let camlp4bin = "$CAMLP4BIN" let camlp4lib = "$CAMLP4LIB" let camlp4compat = "$CAMLP4COMPAT" let coqideincl = "$LABLGTKINCLUDES" let cflags = "$cflags" let best = "$best_compiler" let arch = "$ARCH" let has_coqide = "$COQIDE" let gtk_platform = \`$IDEARCHDEF let has_natdynlink = $HASNATDYNLINK let natdynlinkflag = "$NATDYNLINKFLAG" let osdeplibs = "$OSDEPLIBS" let version = "$VERSION" let caml_version = "$CAMLVERSION" let date = "$DATE" let compile_date = "$COMPILEDATE" let vo_magic_number = $VOMAGIC let state_magic_number = $STATEMAGIC let exec_extension = "$EXE" let with_geoproof = ref $with_geoproof let browser = "$BROWSER" let wwwcoq = "$WWWCOQ" let wwwrefman = wwwcoq ^ "distrib/" ^ version ^ "/refman/" let wwwstdlib = wwwcoq ^ "distrib/" ^ version ^ "/stdlib/" let localwwwrefman = "file:/" ^ docdir ^ "html/refman" END_OF_COQ_CONFIG # to be sure printf is found on windows when spaces occur in PATH variable PRINTF=`which printf` # Subdirectories of theories/ added in coq_config.ml subdirs () { (cd $1; find * \( -name .svn -prune \) -o \( -type d -exec $PRINTF "\"%s\";\n" {} \; \) >> "$mlconfig_file") } echo "let theories_dirs = [" >> "$mlconfig_file" subdirs theories echo "]" >> "$mlconfig_file" echo "let plugins_dirs = [" >> "$mlconfig_file" subdirs plugins echo "]" >> "$mlconfig_file" chmod a-w "$mlconfig_file" ############################################### # Building the $COQTOP/config/Makefile file ############################################### rm -f "$config_file" cat << END_OF_MAKEFILE > $config_file ###### config/Makefile : Configuration file for Coq ############## # # # This file is generated by the script "configure" # # DO NOT EDIT IT !! DO NOT EDIT IT !! DO NOT EDIT IT !! # # If something is wrong below, then rerun the script "configure" # # with the good options (see the file INSTALL). # # # ################################################################## #Variable used to detect whether ./configure has run successfully. COQ_CONFIGURED=yes # Local use (no installation) LOCAL=$local # Bytecode link flags for VM ("-custom" or "-dllib -lcoqrun") COQRUNBYTEFLAGS=$COQRUNBYTEFLAGS COQTOOLSBYTEFLAGS=$COQTOOLSBYTEFLAGS $BUILDLDPATH # Paths for true installation # BINDIR=path where coqtop, coqc, coqmktop, coq-tex, coqdep, gallina and # do_Makefile will reside # LIBDIR=path where the Coq library will reside # MANDIR=path where to install manual pages # EMACSDIR=path where to put Coq's Emacs mode (coq.el) BINDIR="$BINDIR" COQLIBINSTALL="$LIBDIR" CONFIGDIR="$CONFIGDIR" DATADIR="$DATADIR" MANDIR="$MANDIR" DOCDIR="$DOCDIR" EMACSLIB="$EMACSLIB" EMACS=$EMACS # Path to Coq distribution COQSRC="$COQSRC" VERSION=$VERSION # Ocaml version number CAMLVERSION=$CAMLTAG # Ocaml libraries CAMLLIB="$CAMLLIB" # Ocaml .h directory CAMLHLIB="$CAMLLIB" # Camlp4 : flavor, binaries, libraries ... # NB : CAMLP4BIN can be empty if camlp4 is in the PATH # NB : avoid using CAMLP4LIB (conflict under Windows) CAMLP4BIN="$CAMLP4BIN" CAMLP4=$CAMLP4 CAMLP4O=$camlp4oexec CAMLP4COMPAT=$CAMLP4COMPAT MYCAMLP4LIB="$CAMLP4LIB" # LablGTK COQIDEINCLUDES=$LABLGTKINCLUDES # Objective-Caml compile command OCAML="$ocamlexec" OCAMLC="$bytecamlc" OCAMLMKLIB="$ocamlmklibexec" OCAMLOPT="$nativecamlc" OCAMLDEP="$ocamldepexec" OCAMLDOC="$ocamldocexec" OCAMLLEX="$ocamllexexec" OCAMLYACC="$ocamlyaccexec" # Caml link command and Caml make top command CAMLLINK="$bytecamlc" CAMLOPTLINK="$nativecamlc" CAMLMKTOP="$ocamlmktopexec" # Caml flags CAMLFLAGS=-rectypes $coq_annotate_flag # Compilation debug flags CAMLDEBUG=$coq_debug_flag CAMLDEBUGOPT=$coq_debug_flag_opt # User compilation flag USERFLAGS= # Flags for GCC CFLAGS=$cflags # Compilation profile flag CAMLTIMEPROF=$coq_profile_flag # The best compiler: native (=opt) or bytecode (=byte) if no native compiler BEST=$best_compiler # Your architecture # Can be obtain by UNIX command arch ARCH=$ARCH HASNATDYNLINK=$NATDYNLINKFLAG # Supplementary libs for some systems, currently: # . Sun Solaris: -cclib -lunix -cclib -lnsl -cclib -lsocket # . others : -cclib -lunix OSDEPLIBS=$OSDEPLIBS # executable files extension, currently: # Unix systems: # Win32 systems : .exe EXE=$EXE DLLEXT=$DLLEXT # the command MKDIR (try to replace it with mkdirhier if you have problems) MKDIR=mkdir -p # where to put the coqdoc.sty style file COQDOCDIR="$COQDOCDIR" #the command STRIP # Unix systems and profiling: true # Unix systems and no profiling: strip STRIP=$STRIPCOMMAND # CoqIde (no/byte/opt) HASCOQIDE=$COQIDE IDEOPTFLAGS=$IDEARCHFLAGS IDEOPTDEPS=$IDEARCHFILE IDEOPTINT=$IDEARCHDEF # Defining REVISION CHECKEDOUT=$checkedout # Option to control compilation and installation of the documentation WITHDOC=$with_doc # make or sed are bogus and believe lines not terminating by a return # are inexistent END_OF_MAKEFILE chmod a-w "$config_file" ################################################## # The end #################################################### echo "If anything in the above is wrong, please restart './configure'." echo echo "*Warning* To compile the system for a new architecture" echo " don't forget to do a 'make archclean' before './configure'." coq-8.4pl2/scripts/0000750000175000001440000000000012127276527013330 5ustar notinuserscoq-8.4pl2/scripts/coqmktop.ml0000640000175000001440000002546012121620060015502 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Sys.command ("\""^cmd^"\"") else Sys.command (* Objects to link *) (* 1. Core objects *) let ocamlobjs = ["str.cma";"unix.cma";"nums.cma"] let dynobjs = ["dynlink.cma"] let camlp4objs = if Coq_config.camlp4 = "camlp5" then ["gramlib.cma"] else ["camlp4lib.cma"] let libobjs = ocamlobjs @ camlp4objs let spaces = Str.regexp "[ \t\n]+" let split_list l = Str.split spaces l let copts = split_list Tolink.copts let core_objs = split_list Tolink.core_objs let core_libs = split_list Tolink.core_libs (* 3. Toplevel objects *) let camlp4topobjs = if Coq_config.camlp4 = "camlp5" then ["camlp5_top.cma"; "pa_o.cmo"; "pa_extend.cmo"] else [ "Camlp4Top.cmo"; "Camlp4Parsers/Camlp4OCamlRevisedParser.cmo"; "Camlp4Parsers/Camlp4OCamlParser.cmo"; "Camlp4Parsers/Camlp4GrammarParser.cmo" ] let topobjs = camlp4topobjs let gramobjs = [] let notopobjs = gramobjs (* 4. High-level tactics objects *) (* environment *) let opt = ref false let full = ref false let top = ref false let echo = ref false let no_start = ref false let is_ocaml4 = Coq_config.caml_version.[0] <> '3' (* Since the .cma are given with their relative paths (e.g. "lib/clib.cma"), we only need to include directories mentionned in the temp main ml file below (for accessing the corresponding .cmi). *) let src_dirs = [ []; ["lib"]; ["toplevel"]; ["kernel";"byterun"]; ] let includes () = let coqlib = if !Flags.boot then "." else Envars.coqlib () in let mkdir d = "\"" ^ List.fold_left Filename.concat coqlib d ^ "\"" in (List.fold_right (fun d l -> "-I" :: mkdir d :: l) src_dirs []) @ ["-I"; "\"" ^ Envars.camlp4lib () ^ "\""] @ (if is_ocaml4 then ["-I"; "+compiler-libs"] else []) (* Transform bytecode object file names in native object file names *) let native_suffix f = if Filename.check_suffix f ".cmo" then (Filename.chop_suffix f ".cmo") ^ ".cmx" else if Filename.check_suffix f ".cma" then (Filename.chop_suffix f ".cma") ^ ".cmxa" else if Filename.check_suffix f ".a" then f else failwith ("File "^f^" has not extension .cmo, .cma or .a") (* Transforms a file name in the corresponding Caml module name. *) let rem_ext_regexpr = Str.regexp "\\(.*\\)\\.\\(cm..?\\|ml\\)" let module_of_file name = let s = Str.replace_first rem_ext_regexpr "\\1" (Filename.basename name) in String.capitalize s (* Build the list of files to link and the list of modules names *) let files_to_link userfiles = let dyn_objs = if not !opt || Coq_config.has_natdynlink then dynobjs else [] in let toplevel_objs = if !top then topobjs else if !opt then notopobjs else [] in let objs = dyn_objs @ libobjs @ core_objs @ toplevel_objs in let modules = List.map module_of_file (objs @ userfiles) in let libs = dyn_objs @ libobjs @ core_libs @ toplevel_objs in let libstolink = (if !opt then List.map native_suffix libs else libs) @ userfiles in (modules, libstolink) (* Gives the list of all the directories under [dir]. Uses [Unix] (it is hard to do without it). *) let all_subdirs dir = let l = ref [dir] in let add f = l := f :: !l in let rec traverse dir = let dirh = try opendir dir with Unix_error _ -> invalid_arg "all_subdirs" in try while true do let f = readdir dirh in if f <> "." && f <> ".." then let file = Filename.concat dir f in if (stat file).st_kind = S_DIR then begin add file; traverse file end done with End_of_file -> closedir dirh in traverse dir; List.rev !l (* usage *) let usage () = prerr_endline "Usage: coqmktop files\ \nFlags are:\ \n -coqlib dir Specify where the Coq object files are\ \n -camlbin dir Specify where the OCaml binaries are\ \n -camlp4bin dir Specify where the Camlp4/5 binaries are\ \n -o exec-file Specify the name of the resulting toplevel\ \n -boot Run in boot mode\ \n -echo Print calls to external commands\ \n -full Link high level tactics\ \n -opt Compile in native code\ \n -top Build Coq on a OCaml toplevel (incompatible with -opt)\ \n -R dir Add recursively dir to OCaml search path\ \n"; exit 1 (* parsing of the command line *) let parse_args () = let rec parse (op,fl) = function | [] -> List.rev op, List.rev fl | "-coqlib" :: d :: rem -> Flags.coqlib_spec := true; Flags.coqlib := d ; parse (op,fl) rem | "-coqlib" :: _ -> usage () | "-camlbin" :: d :: rem -> Flags.camlbin_spec := true; Flags.camlbin := d ; parse (op,fl) rem | "-camlbin" :: _ -> usage () | "-camlp4bin" :: d :: rem -> Flags.camlp4bin_spec := true; Flags.camlp4bin := d ; parse (op,fl) rem | "-camlp4bin" :: _ -> usage () | "-boot" :: rem -> Flags.boot := true; parse (op,fl) rem | "-opt" :: rem -> opt := true ; parse (op,fl) rem | "-full" :: rem -> full := true ; parse (op,fl) rem | "-top" :: rem -> top := true ; parse (op,fl) rem | "-v8" :: rem -> Printf.eprintf "warning: option -v8 deprecated"; parse (op,fl) rem | "-echo" :: rem -> echo := true ; parse (op,fl) rem | ("-cclib"|"-ccopt"|"-I"|"-o"|"-w" as o) :: rem' -> begin match rem' with | a :: rem -> parse (a::o::op,fl) rem | [] -> usage () end | "-R" :: a :: rem -> parse ((List.rev(List.flatten (List.map (fun d -> ["-I";d]) (all_subdirs a))))@op,fl) rem | "-R" :: [] -> usage () | ("-noassert"|"-compact"|"-g"|"-p"|"-thread"|"-dtypes" as o) :: rem -> parse (o::op,fl) rem | ("-h"|"--help") :: _ -> usage () | ("-no-start") :: rem -> no_start:=true; parse (op, fl) rem | f :: rem -> if Filename.check_suffix f ".ml" or Filename.check_suffix f ".cmx" or Filename.check_suffix f ".cmo" or Filename.check_suffix f ".cmxa" or Filename.check_suffix f ".cma" or Filename.check_suffix f ".c" then parse (op,f::fl) rem else begin prerr_endline ("Don't know what to do with " ^ f); exit 1 end in parse ([Coq_config.osdeplibs],[]) (List.tl (Array.to_list Sys.argv)) let clean file = let rm f = if Sys.file_exists f then Sys.remove f in let basename = Filename.chop_suffix file ".ml" in if not !echo then begin rm file; rm (basename ^ ".o"); rm (basename ^ ".cmi"); rm (basename ^ ".cmo"); rm (basename ^ ".cmx") end (* Creates another temporary file for Dynlink if needed *) let tmp_dynlink()= let tmp = Filename.temp_file "coqdynlink" ".ml" in let _ = Sys.command ("echo \"Dynlink.init();;\" > "^tmp) in tmp (* Initializes the kind of loading in the main program *) let declare_loading_string () = if not !top then "Mltop.remove ();;" else "begin try\ \n (* Enable rectypes in the toplevel if it has the directive #rectypes *)\ \n begin match Hashtbl.find Toploop.directive_table \"rectypes\" with\ \n | Toploop.Directive_none f -> f ()\ \n | _ -> ()\ \n end\ \n with\ \n | Not_found -> ()\ \n end;;\ \n\ \n let ppf = Format.std_formatter;;\ \n Mltop.set_top\ \n {Mltop.load_obj=\ \n (fun f -> if not (Topdirs.load_file ppf f) then Util.error (\"Could not load plugin \"^f));\ \n Mltop.use_file=Topdirs.dir_use ppf;\ \n Mltop.add_dir=Topdirs.dir_directory;\ \n Mltop.ml_loop=(fun () -> Toploop.loop ppf) };;\ \n" (* create a temporary main file to link *) let create_tmp_main_file modules = let main_name,oc = Filename.open_temp_file "coqmain" ".ml" in try (* Add the pre-linked modules *) output_string oc "List.iter Mltop.add_known_module [\""; output_string oc (String.concat "\";\"" modules); output_string oc "\"];;\n"; (* Initializes the kind of loading *) output_string oc (declare_loading_string()); (* Start the toplevel loop *) if not !no_start then output_string oc "Coqtop.start();;\n"; close_out oc; main_name with reraise -> clean main_name; raise reraise (* main part *) let main () = let (options, userfiles) = parse_args () in (* which ocaml command to invoke *) let camlbin = Envars.camlbin () in let prog = if !opt then begin (* native code *) if !top then failwith "no custom toplevel in native code !"; let ocamloptexec = Filename.quote (Filename.concat camlbin "ocamlopt") in ocamloptexec^" -linkall" end else (* bytecode (we shunt ocamlmktop script which fails on win32) *) let ocamlmktoplib = if is_ocaml4 then " ocamlcommon.cma ocamlbytecomp.cma ocamltoplevel.cma" else " toplevellib.cma" in let ocamlcexec = Filename.quote (Filename.concat camlbin "ocamlc") in let ocamlccustom = Printf.sprintf "%s %s -linkall " ocamlcexec Coq_config.coqrunbyteflags in (if !top then ocamlccustom^ocamlmktoplib else ocamlccustom) in (* files to link *) let (modules, tolink) = files_to_link userfiles in (*file for dynlink *) let dynlink= if not (!opt || !top) then [tmp_dynlink()] else [] in (* the list of the loaded modules *) let main_file = create_tmp_main_file modules in try let args = options @ includes () @ copts @ tolink @ dynlink in let args = args @ [ Filename.quote main_file ] in (* add topstart.cmo explicitly because we shunted ocamlmktop wrapper *) let args = if !top then args @ [ "topstart.cmo" ] else args in (* Now, with the .cma, we MUST use the -linkall option *) let command = String.concat " " (prog::"-rectypes"::args) in if !echo then begin print_endline command; print_endline ("(command length is " ^ (string_of_int (String.length command)) ^ " characters)"); flush Pervasives.stdout end; let retcode = safe_sys_command command in clean main_file; (* command gives the exit code in HSB, and signal in LSB !!! *) if retcode > 255 then retcode lsr 8 else retcode with reraise -> clean main_file; raise reraise let retcode = try Printexc.print main () with any -> 1 let _ = exit retcode coq-8.4pl2/scripts/coqc.ml0000640000175000001440000001443012010532755014577 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ". On essaye au maximum d'utiliser les modules Sys et Filename pour que la portabilit soit maximale, mais il reste encore des appels des fonctions du module Unix. Ceux-ci sont prfixs par "Unix." *) (* environment *) let environment = Unix.environment () let best = if Coq_config.arch = "win32" then "" else ("."^Coq_config.best) let binary = ref ("coqtop" ^ best) let image = ref "" (* coqc options *) let verbose = ref false (* Verifies that a string starts by a letter and do not contain others caracters than letters, digits, or `_` *) let check_module_name s = let err c = output_string stderr "Invalid module name: "; output_string stderr s; output_string stderr " character "; if c = '\'' then output_string stderr "\"'\"" else (output_string stderr"'"; output_char stderr c; output_string stderr"'"); output_string stderr " is not allowed in module names\n"; exit 1 in match String.get s 0 with | 'a' .. 'z' | 'A' .. 'Z' -> for i = 1 to (String.length s)-1 do match String.get s i with | 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' -> () | c -> err c done | c -> err c let rec make_compilation_args = function | [] -> [] | file :: fl -> let dirname = Filename.dirname file in let basename = Filename.basename file in let modulename = if Filename.check_suffix basename ".v" then Filename.chop_suffix basename ".v" else basename in check_module_name modulename; let file = Filename.concat dirname modulename in (if !verbose then "-compile-verbose" else "-compile") :: file :: (make_compilation_args fl) (* compilation of files [files] with command [command] and args [args] *) let compile command args files = let args' = command :: args @ (make_compilation_args files) in match Sys.os_type with | "Win32" -> let pid = Unix.create_process_env command (Array.of_list args') environment Unix.stdin Unix.stdout Unix.stderr in let status = snd (Unix.waitpid [] pid) in let errcode = match status with Unix.WEXITED c|Unix.WSTOPPED c|Unix.WSIGNALED c -> c in exit errcode | _ -> Unix.execvpe command (Array.of_list args') environment (* parsing of the command line * * special treatment for -bindir and -i. * other options are passed to coqtop *) let usage () = Usage.print_usage_coqc () ; flush stderr ; exit 1 let parse_args () = let rec parse (cfiles,args) = function | [] -> List.rev cfiles, List.rev args | ("-verbose" | "--verbose") :: rem -> verbose := true ; parse (cfiles,args) rem | "-image" :: f :: rem -> image := f; parse (cfiles,args) rem | "-image" :: [] -> usage () | "-byte" :: rem -> binary := "coqtop.byte"; parse (cfiles,args) rem | "-opt" :: rem -> binary := "coqtop.opt"; parse (cfiles,args) rem | "-libdir" :: _ :: rem -> print_string "Warning: option -libdir deprecated and ignored\n"; flush stdout; parse (cfiles,args) rem | ("-db"|"-debugger") :: rem -> print_string "Warning: option -db/-debugger deprecated and ignored\n";flush stdout; parse (cfiles,args) rem | ("-?"|"-h"|"-H"|"-help"|"--help") :: _ -> usage () | ("-outputstate"|"-inputstate"|"-is" |"-load-vernac-source"|"-l"|"-load-vernac-object" |"-load-ml-source"|"-require"|"-load-ml-object" |"-init-file"|"-dump-glob"|"-compat"|"-coqlib" as o) :: rem -> begin match rem with | s :: rem' -> parse (cfiles,s::o::args) rem' | [] -> usage () end | ("-I"|"-include" as o) :: rem -> begin match rem with | s :: "-as" :: t :: rem' -> parse (cfiles,t::"-as"::s::o::args) rem' | s :: "-as" :: [] -> usage () | s :: rem' -> parse (cfiles,s::o::args) rem' | [] -> usage () end | "-R" :: s :: "-as" :: t :: rem -> parse (cfiles,t::"-as"::s::"-R"::args) rem | "-R" :: s :: "-as" :: [] -> usage () | "-R" :: s :: t :: rem -> parse (cfiles,t::s::"-R"::args) rem | ("-notactics"|"-debug"|"-nolib"|"-boot" |"-batch"|"-nois"|"-noglob"|"-no-glob" |"-q"|"-full"|"-profile"|"-just-parsing"|"-echo" |"-unsafe"|"-quiet" |"-silent"|"-m"|"-xml"|"-v7"|"-v8"|"-beautify"|"-strict-implicit" |"-dont-load-proofs"|"-load-proofs"|"-force-load-proofs" |"-impredicative-set"|"-vm" as o) :: rem -> parse (cfiles,o::args) rem | ("-where") :: _ -> (try print_endline (Envars.coqlib ()) with Util.UserError(_,pps) -> Pp.msgerrnl (Pp.hov 0 pps)); exit 0 | ("-config" | "--config") :: _ -> Usage.print_config (); exit 0 | ("-v"|"--version") :: _ -> Usage.version 0 | f :: rem -> if Sys.file_exists f then parse (f::cfiles,args) rem else let fv = f ^ ".v" in if Sys.file_exists fv then parse (fv::cfiles,args) rem else begin prerr_endline ("coqc: "^f^": no such file or directory") ; exit 1 end in parse ([],[]) (List.tl (Array.to_list Sys.argv)) (* main: we parse the command line, define the command to compile files * and then call the compilation on each file *) let main () = let cfiles, args = parse_args () in if cfiles = [] then begin prerr_endline "coqc: too few arguments" ; usage () end; let coqtopname = if !image <> "" then !image else Filename.concat Envars.coqbin (!binary ^ Coq_config.exec_extension) in (* List.iter (compile coqtopname args) cfiles*) Unix.handle_unix_error (compile coqtopname args) cfiles let _ = Printexc.print main () coq-8.4pl2/README.win0000640000175000001440000000376311554053562013322 0ustar notinusersTHE COQ V8 SYSTEM ================= This file contains remarks specific to the windows port of Coq. INSTALLATION. ============= The Coq package for Windows comes with an auto-installer. It will install Coq binaries and libraries under any directory you specify (C:\Program Files\Coq is the default path). It also creates shortcuts in the Windows menus. Alternatively, you can launch Coq using coqide.exe or coqtop.exe in the bin sub-directory of the installation (C:\Program Files\Coq\bin by default). COMPILATION. ============ If you want to install coq, you had better transfer the precompiled distribution. If you really need to recompile under Windows, here are some indications: 1- Install ocaml for Windows (MinGW port). See: http://caml.inria.fr 2- Install a shell environment with at least: - a C compiler (gcc), - the GNU make utility The Cygwin environment is well suited for compiling Coq (official packages are made using Cygwin) See: http://www.cygwin.com 3- In order to compile Coqide, you will need the LablGTK library See: http://wwwfun.kurims.kyoto-u.ac.jp/soft/olabl/lablgtk.html You also need to install the GTK libraries for Windows (see the installation instruction for LablGTK) 4- In a shell window, type successively ./configure make world make install 5- Though not nescessary, you can find useful: - Windows version of (X)Emacs: it is a powerful environment for developpers with coloured syntax, modes for compilation and debug, and many more. It is free. See: http://www.gnu.org/software. - Windows subversion client (very useful if you have access to the Coq archive). Good luck :-) Alternatively, it is now possible (and even recommended ...) to build Windows executables of coq from Linux thanks to a mingw cross-compiler. If interested, please contact us for more details. The Coq Team. coq-8.4pl2/CREDITS0000640000175000001440000001556311675653503012674 0ustar notinusersThe "Coq proof assistant" was jointly developed by - INRIA Formel, Coq, LogiCal, ProVal, TypiCal, Marelle, pi.r2 projects (starting 1985), - Laboratoire de l'Informatique du Parallelisme (LIP) associated to CNRS and ENS Lyon (Sep. 1989 to Aug. 1997), - Laboratoire de Recherche en Informatique (LRI) associated to CNRS and university Paris Sud (since Sep. 1997), - Laboratoire d'Informatique de l'Ecole Polytechnique (LIX) associated to CNRS and Ecole Polytechnique (since Jan. 2003). - Laboratoire PPS associated to CNRS and university Paris 7 (since Jan. 2009). All files of the "Coq proof assistant" in directories or sub-directories of config dev ide interp kernel lib library parsing pretyping proofs scripts states tactics test-suite theories tools toplevel are distributed under the terms of the GNU Lesser General Public License Version 2.1 (see file LICENSE). These files are COPYRIGHT 1999-2010, The Coq development team, CNRS, INRIA and Universit Paris Sud. Files from the directory doc are distributed as indicated in file doc/LICENCE. The following directories contain independent contributions supported by the Coq development team. All of them are released under the terms of the GNU Lesser General Public License Version 2.1. plugins/cc developed by Pierre Corbineau (ENS Cachan, 2001, LRI, 2001-2005, Radboud University at Nijmegen, 2005-2008) plugins/correctness developed by Jean-Christophe Fillitre (LRI, 1999-2001) plugins/dp developed by Nicolas Ayache (LRI, 2005-2006) and Jean-Christophe Fillitre (LRI, 2005-2008) plugins/extraction developed by Pierre Letouzey (LRI, 2000-2004, PPS, 2005-now) plugins/field developed by David Delahaye and Micaela Mayero (INRIA-LogiCal, 2001) plugins/firstorder developed by Pierre Corbineau (LRI, 2003-2008) plugins/fourier developed by Loc Pottier (INRIA-Lemme, 2001) plugins/funind developed by Pierre Courtieu (INRIA-Lemme, 2003-2004, CNAM, 2004-2008), Julien Forest (INRIA-Everest, 2006, CNAM, 2007-2008) and Yves Bertot (INRIA-Marelle, 2005-2006) plugins/omega developed by Pierre Crgut (France Telecom R&D, 1996) plugins/nsatz developed by Loc Pottier (INRIA-Marelle, 2009) plugins/ring developed by Samuel Boutin (INRIA-Coq, 1996) and Patrick Loiseleur (LRI, 1997-1999) plugins/romega developed by Pierre Crgut (France Telecom R&D, 2001-2004) plugins/rtauto developed by Pierre Corbineau (LRI, 2005) plugins/setoid_ring developed by Benjamin Grgoire (INRIA-Everest, 2005-2006), Assia Mahboubi, Laurent Thry (INRIA-Marelle, 2006) and Bruno Barras (INRIA LogiCal, 2005-2006), plugins/subtac developed by Matthieu Sozeau (LRI, 2005-2008) plugins/xml developed by Claudio Sacerdoti (Univ. Bologna, 2000-2005) as part of the HELM and MoWGLI projects; extension by Cezary Kaliszyk as part of the ProofWeb project (Radbout University at Nijmegen, 2008) plugins/micromega developed by Frdric Besson (IRISA/INRIA, 2006-2008), with some extensions by Evgeny Makarov (INRIA, 2007); sum-of-squares solver and interface to the csdp solver uses code from John Harrison (University of Cambridge, 1998) parsing/search.ml mainly developed by Yves Bertot (INRIA-Lemme, 2000-2004) theories/ZArith started by Pierre Crgut (France Telecom R&D, 1996) theories/Strings developed by Laurent Thry (INRIA-Lemme, 2003) theories/Numbers/Cyclic developed by Benjamin Grgoire (INRIA-Everest, 2007), Laurent Thry (INRIA-Marelle, 2007-2008), Arnaud Spiwack (INRIA-LogiCal, 2007) and Pierre Letouzey (PPS, 2008) ide/utils some files come from Maxence Guesdon's Cameleon tool Many discussions within the INRIA teams and labs taking part to the development influenced the design of Coq especially with C. Auger, Y. Bertot, F. Blanqui, J. Courant, P. Courtieu, J. Duprat, S. Glondu, J. Goubault, J.-P. Jouannaud, S. Lescuyer, A. Mahboubi, C. March, A. Miquel, B. Monate, L. Pottier, Y. Rgis-Gianas, P.-Y. Strub, L. Thry, B. Werner The development of Coq also significantly benefited from feedback, suggestions or short contributions from: C. Alvarado, P. Crgut, J.-F. Monin (France Telecom R&D), P. Castran (University Bordeaux 1), the Foundations Group (Radboud University, Nijmegen, The Netherlands), Laboratoire J.-A. Dieudonn (University of Nice-Sophia Antipolis), F. Garillot, G. Gonthier (INRIA-MSR joint lab), INRIA-Gallium project, the CS dept at Yale, the CIS dept at U. Penn, the CSE dept at Harvard, the CS dept at Princeton The following people have contributed to the development of different versions of the Coq Proof assistant during the indicated time: Bruno Barras (INRIA, 1995-now) Pierre Boutillier (INRIA-PPS, 2010-now) Jacek Chrzaszcz (LRI, 1998-2003) Thierry Coquand (INRIA, 1985-1989) Pierre Corbineau (LRI, 2003-2005, Nijmegen, 2005-2008, Grenoble 1, 2008-now) Cristina Cornes (INRIA, 1993-1996) Yann Coscoy (INRIA Sophia-Antipolis, 1995-1996) David Delahaye (INRIA, 1997-2002) Daniel de Rauglaudre (INRIA, 1996-1998) Olivier Desmettre (INRIA, 2001-2003) Gilles Dowek (INRIA, 1991-1994) Amy Felty (INRIA, 1993) Jean-Christophe Fillitre (ENS Lyon, 1994-1997, LRI, 1997-now) Eduardo Gimnez (ENS Lyon, 1993-1996, INRIA, 1997-1998) Stphane Glondu (INRIA-PPS, 2007-now) Benjamin Grgoire (INRIA, 2003-now) Hugo Herbelin (INRIA, 1996-now) Grard Huet (INRIA, 1985-1997) Pierre Letouzey (LRI, 2000-2004, PPS, 2005-2008, INRIA-PPS, 2009-now) Patrick Loiseleur (Paris Sud, 1997-1999) Evgeny Makarov (INRIA, 2007) Pascal Manoury (INRIA, 1993) Micaela Mayero (INRIA, 1997-2002) Claude March (INRIA 2003-2004 & LRI, 2004) Benjamin Monate (LRI, 2003) Csar Muoz (INRIA, 1994-1995) Chetan Murthy (INRIA, 1992-1994) Julien Narboux (INRIA, 2005-2006, Strasbourg, 2007-now) Jean-Marc Notin (CNRS, 2006-now) Catherine Parent-Vigouroux (ENS Lyon, 1992-1995) Christine Paulin-Mohring (INRIA, 1985-1989, ENS Lyon, 1989-1997, LRI, 1997-now) Pierre-Marie Pdrot (INRIA-PPS, 2011-now) Matthias Puech (INRIA-Bologna, 2008-now) Yann Rgis-Gianas (INRIA-PPS, 2009-now) Clment Renard (INRIA, 2001-2004) Claudio Sacerdoti Coen (INRIA, 2004-2005) Amokrane Sabi (INRIA, 1993-1998) Vincent Siles (INRIA, 2007) lie Soubiran (INRIA, 2007-now) Matthieu Sozeau (INRIA, 2005-now) Arnaud Spiwack (INRIA, 2006-now) Enrico Tassi (INRIA, 2011-now) Benjamin Werner (INRIA, 1989-1994) *************************************************************************** INRIA refers to: Institut National de la Recherche en Informatique et Automatique CNRS refers to: Centre National de la Recherche Scientifique LRI refers to: Laboratoire de Recherche en Informatique, UMR 8623 CNRS and Universit Paris-Sud ENS Lyon refers to: Ecole Normale Suprieure de Lyon PPS refers to: Laboratoire Preuve, Programmation, Systme, UMR 7126, CNRS and Universit Paris 7 **************************************************************************** coq-8.4pl2/tools/0000750000175000001440000000000012127276530012773 5ustar notinuserscoq-8.4pl2/tools/coqdep.ml0000640000175000001440000001552012010532755014577 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (Filename.dirname (file_name f d')) then begin eprintf "*** Warning : the file %s is defined twice!\n" (f ^ suf); flush stderr end with Not_found -> () end; Hashtbl.add tab f d in iter check let add_coqlib_known phys_dir log_dir f = match get_extension f [".vo"] with | (basename,".vo") -> let name = log_dir@[basename] in let paths = suffixes name in List.iter (fun f -> Hashtbl.add coqlibKnown f ()) paths | _ -> () let sort () = let seen = Hashtbl.create 97 in let rec loop file = let file = canonize file in if not (Hashtbl.mem seen file) then begin Hashtbl.add seen file (); let cin = open_in (file ^ ".v") in let lb = Lexing.from_channel cin in try while true do match coq_action lb with | Require sl -> List.iter (fun s -> try loop (Hashtbl.find vKnown s) with Not_found -> ()) sl | RequireString s -> loop s | _ -> () done with Fin_fichier -> close_in cin; printf "%s%s " file !suffixe end in List.iter (fun (name,_) -> loop name) !vAccu let (dep_tab : (string,string list) Hashtbl.t) = Hashtbl.create 151 let mL_dep_list b f = try Hashtbl.find dep_tab f with Not_found -> let deja_vu = ref ([] : string list) in try let chan = open_in f in let buf = Lexing.from_channel chan in try while true do let (Use_module str) = caml_action buf in if str = b then begin eprintf "*** Warning : in file %s the" f; eprintf " notation %s. is useless !\n" b; flush stderr end else if not (List.mem str !deja_vu) then addQueue deja_vu str done; [] with Fin_fichier -> begin close_in chan; let rl = List.rev !deja_vu in Hashtbl.add dep_tab f rl; rl end with Sys_error _ -> [] let affiche_Declare f dcl = printf "\n*** In file %s: \n" f; printf "Declare ML Module"; List.iter (fun str -> printf " \"%s\"" str) dcl; printf ".\n"; flush stdout let warning_Declare f dcl = eprintf "*** Warning : in file %s, the ML modules" f; eprintf " declaration should be\n"; eprintf "*** Declare ML Module"; List.iter (fun str -> eprintf " \"%s\"" str) dcl; eprintf ".\n"; flush stderr let traite_Declare f = let decl_list = ref ([] : string list) in let rec treat = function | s :: ll -> let s' = basename_noext s in (match search_ml_known s with | Some mldir when not (List.mem s' !decl_list) -> let fullname = file_name (String.uncapitalize s') mldir in let depl = mL_dep_list s (fullname ^ ".ml") in treat depl; decl_list := s :: !decl_list | _ -> ()); treat ll | [] -> () in try let chan = open_in f in let buf = Lexing.from_channel chan in begin try while true do let tok = coq_action buf in (match tok with | Declare sl -> decl_list := []; treat sl; decl_list := List.rev !decl_list; if !option_D then affiche_Declare f !decl_list else if !decl_list <> sl then warning_Declare f !decl_list | _ -> ()) done with Fin_fichier -> () end; close_in chan with Sys_error _ -> () let declare_dependencies () = List.iter (fun (name,_) -> traite_Declare (name^".v"); flush stdout) (List.rev !vAccu) let usage () = eprintf "[ usage: coqdep [-w] [-I dir] [-R dir coqdir] [-coqlib dir] [-c] [-i] [-D] + ]\n"; flush stderr; exit 1 let rec parse = function | "-c" :: ll -> option_c := true; parse ll | "-D" :: ll -> option_D := true; parse ll | "-w" :: ll -> option_w := true; parse ll | "-boot" :: ll -> Flags.boot := true; parse ll | "-sort" :: ll -> option_sort := true; parse ll | ("-noglob" | "-no-glob") :: ll -> option_noglob := true; parse ll | "-I" :: r :: "-as" :: ln :: ll -> add_dir add_known r [ln]; parse ll | "-I" :: r :: "-as" :: [] -> usage () | "-I" :: r :: ll -> add_dir add_known r []; parse ll | "-I" :: [] -> usage () | "-R" :: r :: "-as" :: ln :: ll -> add_rec_dir add_known r [ln]; parse ll | "-R" :: r :: "-as" :: [] -> usage () | "-R" :: r :: ln :: ll -> add_rec_dir add_known r [ln]; parse ll | "-R" :: ([] | [_]) -> usage () | "-coqlib" :: (r :: ll) -> Flags.coqlib_spec := true; Flags.coqlib := r; parse ll | "-coqlib" :: [] -> usage () | "-suffix" :: (s :: ll) -> suffixe := s ; parse ll | "-suffix" :: [] -> usage () | "-slash" :: ll -> option_slash := true; parse ll | ("-h"|"--help"|"-help") :: _ -> usage () | f :: ll -> treat_file None f; parse ll | [] -> () let coqdep () = if Array.length Sys.argv < 2 then usage (); parse (List.tl (Array.to_list Sys.argv)); if not Coq_config.has_natdynlink then option_natdynlk := false; (* NOTE: These directories are searched from last to first *) if !Flags.boot then begin add_rec_dir add_known "theories" ["Coq"]; add_rec_dir add_known "plugins" ["Coq"] end else begin let coqlib = Envars.coqlib () in add_rec_dir add_coqlib_known (coqlib//"theories") ["Coq"]; add_rec_dir add_coqlib_known (coqlib//"plugins") ["Coq"]; let user = coqlib//"user-contrib" in if Sys.file_exists user then add_rec_dir add_coqlib_known user []; List.iter (fun s -> add_rec_dir add_coqlib_known s []) Envars.xdg_dirs; List.iter (fun s -> add_rec_dir add_coqlib_known s []) Envars.coqpath; end; List.iter (fun (f,d) -> add_mli_known f d) !mliAccu; List.iter (fun (f,d) -> add_mllib_known f d) !mllibAccu; List.iter (fun (f,_,d) -> add_ml_known f d) !mlAccu; warning_mult ".mli" iter_mli_known; warning_mult ".ml" iter_ml_known; if !option_sort then begin sort (); exit 0 end; if !option_c && not !option_D then mL_dependencies (); if not !option_D then coq_dependencies (); if !option_w || !option_D then declare_dependencies () let _ = Printexc.catch coqdep () coq-8.4pl2/tools/coq_makefile.ml0000640000175000001440000007336412050530763015756 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* print x | x :: l -> print x; print sep; print_list sep l | [] -> () let list_iter_i f = let rec aux i = function [] -> () | a::l -> f i a; aux (i+1) l in aux 1 let section s = let l = String.length s in let sep = String.make (l+5) '#' and sep2 = String.make (l+5) ' ' in String.set sep (l+4) '\n'; String.set sep2 0 '#'; String.set sep2 (l+3) '#'; String.set sep2 (l+4) '\n'; print sep; print sep2; print "# "; print s; print " #\n"; print sep2; print sep; print "\n" let usage () = output_string stderr "Usage summary: coq_makefile [subdirectory] .... [file.v] ... [file.ml[i4]?] ... [file.mllib] ... [-custom command dependencies file] ... [-I dir] ... [-R physicalpath logicalpath] ... [VARIABLE = value] ... [-arg opt] ... [-opt|-byte] [-no-install] [-f file] [-o file] [-h] [--help] [file.v]: Coq file to be compiled [file.ml[i4]?]: Objective Caml file to be compiled [file.mllib]: ocamlbuild file that describes a Objective Caml library [subdirectory] : subdirectory that should be \"made\" and has a Makefile itself to do so. [-custom command dependencies file]: add target \"file\" with command \"command\" and dependencies \"dependencies\" [-I dir]: look for Objective Caml dependencies in \"dir\" [-R physicalpath logicalpath]: look for Coq dependencies resursively starting from \"physicalpath\". The logical path associated to the physical path is \"logicalpath\". [VARIABLE = value]: Add the variable definition \"VARIABLE=value\" [-byte]: compile with byte-code version of coq [-opt]: compile with native-code version of coq [-arg opt]: send option \"opt\" to coqc [-install opt]: where opt is \"user\" to force install into user directory, \"none\" to build a makefile with no install target or \"global\" to force install in $COQLIB directory [-f file]: take the contents of file as arguments [-o file]: output should go in file file Output file outside the current directory is forbidden. [-h]: print this usage summary [--help]: equivalent to [-h]\n"; exit 1 let is_genrule r = let genrule = Str.regexp("%") in Str.string_match genrule r 0 let string_prefix a b = let rec aux i = try if a.[i] = b.[i] then aux (i+1) else i with |Invalid_argument _ -> i in String.sub a 0 (aux 0) let is_prefix dir1 dir2 = let l1 = String.length dir1 in let l2 = String.length dir2 in dir1 = dir2 or (l1 < l2 & String.sub dir2 0 l1 = dir1 & dir2.[l1] = '/') let physical_dir_of_logical_dir ldir = let le = String.length ldir - 1 in let pdir = if ldir.[le] = '.' then String.sub ldir 0 (le - 1) else String.copy ldir in for i = 0 to le - 1 do if pdir.[i] = '.' then pdir.[i] <- '/'; done; pdir let standard opt = print "byte:\n"; print "\t$(MAKE) all \"OPT:=-byte\"\n\n"; print "opt:\n"; if not opt then print "\t@echo \"WARNING: opt is disabled\"\n"; print "\t$(MAKE) all \"OPT:="; print (if opt then "-opt" else "-byte"); print "\"\n\n" let classify_files_by_root var files (inc_i,inc_r) = if not (List.exists (fun (pdir,_,_) -> pdir = ".") inc_r) then begin let absdir_of_files = List.rev_map (fun x -> Minilib.canonical_path_name (Filename.dirname x)) files in (* files in scope of a -I option (assuming they are no overlapping) *) let has_inc_i = List.exists (fun (_,a) -> List.mem a absdir_of_files) inc_i in if has_inc_i then begin printf "%sINC=" var; List.iter (fun (pdir,absdir) -> if List.mem absdir absdir_of_files then printf "$(filter $(wildcard %s/*),$(%s)) " pdir var ) inc_i; printf "\n"; end; (* Files in the scope of a -R option (assuming they are disjoint) *) list_iter_i (fun i (pdir,ldir,abspdir) -> if List.exists (is_prefix abspdir) absdir_of_files then printf "%s%d=$(patsubst %s/%%,%%,$(filter %s/%%,$(%s)))\n" var i pdir pdir var) inc_r; end let install_include_by_root files_var files (inc_i,inc_r) = try (* All files caught by a -R . option (assuming it is the only one) *) let ldir = match inc_r with |[(".",t,_)] -> t |l -> let out = List.assoc "." (List.map (fun (p,l,_) -> (p,l)) inc_r) in let () = prerr_string "Warning: install rule assumes that -R . _ is the only -R option" in out in let pdir = physical_dir_of_logical_dir ldir in printf "\tfor i in $(%s); do \\\n" files_var; printf "\t install -d `dirname $(DSTROOT)$(COQLIBINSTALL)/%s/$$i`; \\\n" pdir; printf "\t install -m 0644 $$i $(DSTROOT)$(COQLIBINSTALL)/%s/$$i; \\\n" pdir; printf "\tdone\n" with Not_found -> let absdir_of_files = List.rev_map (fun x -> Minilib.canonical_path_name (Filename.dirname x)) files in let has_inc_i_files = List.exists (fun (_,a) -> List.mem a absdir_of_files) inc_i in let install_inc_i d = printf "\tinstall -d $(DSTROOT)$(COQLIBINSTALL)/%s; \\\n" d; printf "\tfor i in $(%sINC); do \\\n" files_var; printf "\t install -m 0644 $$i $(DSTROOT)$(COQLIBINSTALL)/%s/`basename $$i`; \\\n" d; printf "\tdone\n" in if inc_r = [] then if has_inc_i_files then begin (* Files in the scope of a -I option *) install_inc_i "$(INSTALLDEFAULTROOT)"; end else () else (* Files in the scope of a -R option (assuming they are disjoint) *) list_iter_i (fun i (pdir,ldir,abspdir) -> let has_inc_r_files = List.exists (is_prefix abspdir) absdir_of_files in let pdir' = physical_dir_of_logical_dir ldir in if has_inc_r_files then begin printf "\tcd %s; for i in $(%s%d); do \\\n" pdir files_var i; printf "\t install -d `dirname $(DSTROOT)$(COQLIBINSTALL)/%s/$$i`; \\\n" pdir'; printf "\t install -m 0644 $$i $(DSTROOT)$(COQLIBINSTALL)/%s/$$i; \\\n" pdir'; printf "\tdone\n"; end; if has_inc_i_files then install_inc_i pdir' ) inc_r let install_doc some_vfiles some_mlifiles (_,inc_r) = let install_one_kind kind dir = printf "\tinstall -d $(DSTROOT)$(COQDOCINSTALL)/%s/%s\n" dir kind; printf "\tfor i in %s/*; do \\\n" kind; printf "\t install -m 0644 $$i $(DSTROOT)$(COQDOCINSTALL)/%s/$$i;\\\n" dir; print "\tdone\n" in print "install-doc:\n"; let () = match inc_r with |[] -> if some_vfiles then install_one_kind "html" "$(INSTALLDEFAULTROOT)"; if some_mlifiles then install_one_kind "mlihtml" "$(INSTALLDEFAULTROOT)"; |(_,lp,_)::q -> let pr = List.fold_left (fun a (_,b,_) -> string_prefix a b) lp q in if (pr <> "") && ((List.exists (fun(_,b,_) -> b = pr) inc_r) || pr.[String.length pr - 1] = '.') then begin let rt = physical_dir_of_logical_dir pr in if some_vfiles then install_one_kind "html" rt; if some_mlifiles then install_one_kind "mlihtml" rt; end else begin prerr_string "Warning: -R options don't have a correct common prefix, install-doc will put anything in $INSTALLDEFAULTROOT\n"; if some_vfiles then install_one_kind "html" "$(INSTALLDEFAULTROOT)"; if some_mlifiles then install_one_kind "mlihtml" "$(INSTALLDEFAULTROOT)"; end in print "\n" let install (vfiles,(mlifiles,ml4files,mlfiles,mllibfiles,mlpackfiles),_,sds) inc = function |Project_file.NoInstall -> () |is_install -> let () = if is_install = Project_file.UnspecInstall then print "userinstall:\n\t+$(MAKE) USERINSTALL=true install\n\n" in let not_empty = function |[] -> false |_::_ -> true in let cmofiles = mlpackfiles@mlfiles@ml4files in let cmifiles = mlifiles@cmofiles in let cmxsfiles = cmofiles@mllibfiles in if (not_empty cmxsfiles) then begin print "install-natdynlink:\n"; install_include_by_root "CMXSFILES" cmxsfiles inc; print "\n"; end; print "install:"; if (not_empty cmxsfiles) then print "$(if $(HASNATDYNLINK_OR_EMPTY),install-natdynlink)"; print "\n"; if not_empty vfiles then install_include_by_root "VOFILES" vfiles inc; if (not_empty cmofiles) then install_include_by_root "CMOFILES" cmofiles inc; if (not_empty cmifiles) then install_include_by_root "CMIFILES" cmifiles inc; if (not_empty mllibfiles) then install_include_by_root "CMAFILES" mllibfiles inc; List.iter (fun x -> printf "\t(cd %s; $(MAKE) DSTROOT=$(DSTROOT) INSTALLDEFAULTROOT=$(INSTALLDEFAULTROOT)/%s install)\n" x x) sds; print "\n"; install_doc (not_empty vfiles) (not_empty mlifiles) inc let make_makefile sds = if !make_name <> "" then begin printf "%s: %s\n" !makefile_name !make_name; print "\tmv -f $@ $@.bak\n"; print "\t$(COQBIN)coq_makefile -f $< -o $@\n\n"; List.iter (fun x -> print "\t(cd "; print x; print " ; $(MAKE) Makefile)\n") sds; print "\n"; end let clean sds sps = print "clean:\n"; if !some_mlfile || !some_mlifile || !some_ml4file || !some_mllibfile || !some_mlpackfile then begin print "\trm -f $(ALLCMOFILES) $(CMIFILES) $(CMAFILES)\n"; print "\trm -f $(ALLCMOFILES:.cmo=.cmx) $(CMXAFILES) $(CMXSFILES) $(ALLCMOFILES:.cmo=.o) $(CMXAFILES:.cmxa=.a)\n"; print "\trm -f $(addsuffix .d,$(MLFILES) $(MLIFILES) $(ML4FILES) $(MLLIBFILES) $(MLPACKFILES))\n"; end; if !some_vfile then print "\trm -f $(VOFILES) $(VIFILES) $(GFILES) $(VFILES:.v=.v.d) $(VFILES:=.beautified) $(VFILES:=.old)\n"; print "\trm -f all.ps all-gal.ps all.pdf all-gal.pdf all.glob $(VFILES:.v=.glob) $(VFILES:.v=.tex) $(VFILES:.v=.g.tex) all-mli.tex\n"; print "\t- rm -rf html mlihtml\n"; List.iter (fun (file,_,_) -> if not (is_genrule file) then (print "\t- rm -rf "; print file; print "\n")) sps; List.iter (fun x -> print "\t(cd "; print x; print " ; $(MAKE) clean)\n") sds; print "\n"; print "archclean:\n"; print "\trm -f *.cmx *.o\n"; List.iter (fun x -> print "\t(cd "; print x; print " ; $(MAKE) archclean)\n") sds; print "\n"; print "printenv:\n\t@$(COQBIN)coqtop -config\n"; print "\t@echo CAMLC =\t$(CAMLC)\n\t@echo CAMLOPTC =\t$(CAMLOPTC)\n\t@echo PP =\t$(PP)\n\t@echo COQFLAGS =\t$(COQFLAGS)\n"; print "\t@echo COQLIBINSTALL =\t$(COQLIBINSTALL)\n\t@echo COQDOCINSTALL =\t$(COQDOCINSTALL)\n\n" let header_includes () = () let implicit () = section "Implicit rules."; let mli_rules () = print "%.cmi: %.mli\n\t$(CAMLC) $(ZDEBUG) $(ZFLAGS) $<\n\n"; print "%.mli.d: %.mli\n"; print "\t$(OCAMLDEP) -slash $(OCAMLLIBS) \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n" in let ml4_rules () = print "%.cmo: %.ml4\n\t$(CAMLC) $(ZDEBUG) $(ZFLAGS) $(PP) -impl $<\n\n"; print "%.cmx: %.ml4\n\t$(CAMLOPTC) $(ZDEBUG) $(ZFLAGS) $(PP) -impl $<\n\n"; print "%.ml4.d: %.ml4\n"; print "\t$(COQDEP) -slash $(OCAMLLIBS) \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n" in let ml_rules () = print "%.cmo: %.ml\n\t$(CAMLC) $(ZDEBUG) $(ZFLAGS) $<\n\n"; print "%.cmx: %.ml\n\t$(CAMLOPTC) $(ZDEBUG) $(ZFLAGS) $<\n\n"; print "%.ml.d: %.ml\n"; print "\t$(OCAMLDEP) -slash $(OCAMLLIBS) \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n" in let cmxs_rules () = print "%.cmxs: %.cmxa\n\t$(CAMLOPTLINK) $(ZDEBUG) $(ZFLAGS) -linkall -shared -o $@ $<\n\n"; print "%.cmxs: %.cmx\n\t$(CAMLOPTLINK) $(ZDEBUG) $(ZFLAGS) -shared -o $@ $<\n\n" in let mllib_rules () = print "%.cma: | %.mllib\n\t$(CAMLLINK) $(ZDEBUG) $(ZFLAGS) -a -o $@ $^\n\n"; print "%.cmxa: | %.mllib\n\t$(CAMLOPTLINK) $(ZDEBUG) $(ZFLAGS) -a -o $@ $^\n\n"; print "%.mllib.d: %.mllib\n"; print "\t$(COQDEP) -slash $(COQLIBS) -c \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n" in let mlpack_rules () = print "%.cmo: | %.mlpack\n\t$(CAMLLINK) $(ZDEBUG) $(ZFLAGS) -pack -o $@ $^\n\n"; print "%.cmx: | %.mlpack\n\t$(CAMLOPTLINK) $(ZDEBUG) $(ZFLAGS) -pack -o $@ $^\n\n"; print "%.mlpack.d: %.mlpack\n"; print "\t$(COQDEP) -slash $(COQLIBS) -c \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n"; in let v_rules () = print "%.vo %.glob: %.v\n\t$(COQC) $(COQDEBUG) $(COQFLAGS) $*\n\n"; print "%.vi: %.v\n\t$(COQC) -i $(COQDEBUG) $(COQFLAGS) $*\n\n"; print "%.g: %.v\n\t$(GALLINA) $<\n\n"; print "%.tex: %.v\n\t$(COQDOC) $(COQDOCFLAGS) -latex $< -o $@\n\n"; print "%.html: %.v %.glob\n\t$(COQDOC) $(COQDOCFLAGS) -html $< -o $@\n\n"; print "%.g.tex: %.v\n\t$(COQDOC) $(COQDOCFLAGS) -latex -g $< -o $@\n\n"; print "%.g.html: %.v %.glob\n\t$(COQDOC)$(COQDOCFLAGS) -html -g $< -o $@\n\n"; print "%.v.d: %.v\n"; print "\t$(COQDEP) -slash $(COQLIBS) \"$<\" > \"$@\" || ( RV=$$?; rm -f \"$@\"; exit $${RV} )\n\n"; print "%.v.beautified:\n\t$(COQC) $(COQDEBUG) $(COQFLAGS) -beautify $*\n\n" in if !some_mlifile then mli_rules (); if !some_ml4file then ml4_rules (); if !some_mlfile then ml_rules (); if !some_mlfile || !some_ml4file then cmxs_rules (); if !some_mllibfile then mllib_rules (); if !some_mlpackfile then mlpack_rules (); if !some_vfile then v_rules () let variables is_install opt (args,defs) = let var_aux (v,def) = print v; print "="; print def; print "\n" in section "Variables definitions."; List.iter var_aux defs; print "\n"; if not opt then print "override OPT:=-byte\n" else print "OPT?=\n"; begin match args with |[] -> () |h::t -> print "OTHERFLAGS="; print h; List.iter (fun s -> print " ";print s) t; print "\n"; end; (* Coq executables and relative variables *) if !some_vfile || !some_mlpackfile || !some_mllibfile then print "COQDEP?=$(COQBIN)coqdep -c\n"; if !some_vfile then begin print "COQFLAGS?=-q $(OPT) $(COQLIBS) $(OTHERFLAGS) $(COQ_XML)\n"; print "COQCHKFLAGS?=-silent -o\n"; print "COQDOCFLAGS?=-interpolate -utf8\n"; print "COQC?=$(COQBIN)coqc\n"; print "GALLINA?=$(COQBIN)gallina\n"; print "COQDOC?=$(COQBIN)coqdoc\n"; print "COQCHK?=$(COQBIN)coqchk\n\n"; end; (* Caml executables and relative variables *) if !some_ml4file || !some_mlfile || !some_mlifile then begin print "COQSRCLIBS?=-I $(COQLIB)kernel -I $(COQLIB)lib \\ -I $(COQLIB)library -I $(COQLIB)parsing \\ -I $(COQLIB)pretyping -I $(COQLIB)interp \\ -I $(COQLIB)proofs -I $(COQLIB)tactics \\ -I $(COQLIB)toplevel"; List.iter (fun c -> print " \\ -I $(COQLIB)plugins/"; print c) Coq_config.plugins_dirs; print "\n"; print "ZFLAGS=$(OCAMLLIBS) $(COQSRCLIBS) -I $(CAMLP4LIB)\n\n"; print "CAMLC?=$(OCAMLC) -c -rectypes\n"; print "CAMLOPTC?=$(OCAMLOPT) -c -rectypes\n"; print "CAMLLINK?=$(OCAMLC) -rectypes\n"; print "CAMLOPTLINK?=$(OCAMLOPT) -rectypes\n"; print "GRAMMARS?=grammar.cma\n"; print "CAMLP4EXTEND?=pa_extend.cmo pa_macro.cmo q_MLast.cmo\n"; print "CAMLP4OPTIONS?=-loc loc\n"; print "PP?=-pp \"$(CAMLP4BIN)$(CAMLP4)o -I $(CAMLLIB) -I . $(COQSRCLIBS) $(CAMLP4EXTEND) $(GRAMMARS) $(CAMLP4OPTIONS) -impl\"\n\n"; end; match is_install with | Project_file.NoInstall -> () | Project_file.UnspecInstall -> section "Install Paths."; print "ifdef USERINSTALL\n"; print "XDG_DATA_HOME?=$(HOME)/.local/share\n"; print "COQLIBINSTALL=$(XDG_DATA_HOME)/coq\n"; print "COQDOCINSTALL=$(XDG_DATA_HOME)/doc/coq\n"; print "else\n"; print "COQLIBINSTALL=${COQLIB}user-contrib\n"; print "COQDOCINSTALL=${DOCDIR}user-contrib\n"; print "endif\n\n" | Project_file.TraditionalInstall -> section "Install Paths."; print "COQLIBINSTALL=${COQLIB}user-contrib\n"; print "COQDOCINSTALL=${DOCDIR}user-contrib\n"; print "\n" | Project_file.UserInstall -> section "Install Paths."; print "XDG_DATA_HOME?=$(HOME)/.local/share\n"; print "COQLIBINSTALL=$(XDG_DATA_HOME)/coq\n"; print "COQDOCINSTALL=$(XDG_DATA_HOME)/doc/coq\n"; print "\n" let parameters () = print ".DEFAULT_GOAL := all\n\n# \n"; print "# This Makefile may take arguments passed as environment variables:\n"; print "# COQBIN to specify the directory where Coq binaries resides;\n"; print "# ZDEBUG/COQDEBUG to specify debug flags for ocamlc&ocamlopt/coqc;\n"; print "# DSTROOT to specify a prefix to install path.\n\n"; print "# Here is a hack to make $(eval $(shell works:\n"; print "define donewline\n\n\nendef\n"; print "includecmdwithout@ = $(eval $(subst @,$(donewline),$(shell { $(1) | tr -d '\\r' | tr '\\n' '@'; })))\n"; print "$(call includecmdwithout@,$(COQBIN)coqtop -config)\n\n" let include_dirs (inc_i,inc_r) = let parse_includes l = List.map (fun (x,_) -> "-I " ^ x) l in let parse_rec_includes l = List.map (fun (p,l,_) -> let l' = if l = "" then "\"\"" else l in "-R " ^ p ^ " " ^ l') l in let inc_i' = List.filter (fun (_,i) -> not (List.exists (fun (_,_,i') -> is_prefix i' i) inc_r)) inc_i in let str_i = parse_includes inc_i in let str_i' = parse_includes inc_i' in let str_r = parse_rec_includes inc_r in section "Libraries definitions."; if !some_ml4file || !some_mlfile || !some_mlifile then begin print "OCAMLLIBS?="; print_list "\\\n " str_i; print "\n"; end; if !some_vfile || !some_mllibfile || !some_mlpackfile then begin print "COQLIBS?="; print_list "\\\n " str_i'; print " "; print_list "\\\n " str_r; print "\n"; print "COQDOCLIBS?="; print_list "\\\n " str_r; print "\n\n"; end let custom sps = let pr_path (file,dependencies,com) = print file; print ": "; print dependencies; print "\n"; if com <> "" then (print "\t"; print com); print "\n\n" in if sps <> [] then section "Custom targets."; List.iter pr_path sps let subdirs sds = let pr_subdir s = print s; print ":\n\tcd "; print s; print " ; $(MAKE) all\n\n" in if sds <> [] then section "Subdirectories."; List.iter pr_subdir sds let forpacks l = let () = if l <> [] then section "Ad-hoc implicit rules for mlpack." in List.iter (fun it -> let h = Filename.chop_extension it in printf "$(addsuffix .cmx,$(filter $(basename $(MLFILES)),$(%s_MLPACK_DEPENDENCIES))): %%.cmx: %%.ml\n" h; printf "\t$(CAMLOPTC) $(ZDEBUG) $(ZFLAGS) -for-pack %s $<\n\n" (String.capitalize (Filename.basename h)); printf "$(addsuffix .cmx,$(filter $(basename $(ML4FILES)),$(%s_MLPACK_DEPENDENCIES))): %%.cmx: %%.ml4\n" h; printf "\t$(CAMLOPTC) $(ZDEBUG) $(ZFLAGS) -for-pack %s $(PP) -impl $<\n\n" (String.capitalize (Filename.basename h)) ) l let main_targets vfiles (mlifiles,ml4files,mlfiles,mllibfiles,mlpackfiles) other_targets inc = let decl_var var = function |[] -> () |l -> printf "%s:=" var; print_list "\\\n " l; print "\n"; printf "\n-include $(addsuffix .d,$(%s))\n.SECONDARY: $(addsuffix .d,$(%s))\n\n" var var in section "Files dispatching."; decl_var "VFILES" vfiles; begin match vfiles with |[] -> () |l -> print "VOFILES:=$(VFILES:.v=.vo)\n"; classify_files_by_root "VOFILES" l inc; print "GLOBFILES:=$(VFILES:.v=.glob)\n"; print "VIFILES:=$(VFILES:.v=.vi)\n"; print "GFILES:=$(VFILES:.v=.g)\n"; print "HTMLFILES:=$(VFILES:.v=.html)\n"; print "GHTMLFILES:=$(VFILES:.v=.g.html)\n" end; decl_var "ML4FILES" ml4files; decl_var "MLFILES" mlfiles; decl_var "MLPACKFILES" mlpackfiles; decl_var "MLLIBFILES" mllibfiles; decl_var "MLIFILES" mlifiles; let mlsfiles = match ml4files,mlfiles,mlpackfiles with |[],[],[] -> [] |[],[],_ -> Printf.eprintf "Mlpack cannot work without ml[4]?"; [] |[],ml,[] -> print "ALLCMOFILES:=$(MLFILES:.ml=.cmo)\n"; ml |ml4,[],[] -> print "ALLCMOFILES:=$(ML4FILES:.ml4=.cmo)\n"; ml4 |ml4,ml,[] -> print "ALLCMOFILES:=$(ML4FILES:.ml4=.cmo) $(MLFILES:.ml=.cmo)\n"; List.rev_append ml ml4 |[],ml,mlpack -> print "ALLCMOFILES:=$(MLFILES:.ml=.cmo) $(MLPACKFILES:.mlpack=.cmo)\n"; List.rev_append mlpack ml |ml4,[],mlpack -> print "ALLCMOFILES:=$(ML4FILES:.ml4=.cmo) $(MLPACKFILES:.mlpack=.cmo)\n"; List.rev_append mlpack ml4 |ml4,ml,mlpack -> print "ALLCMOFILES:=$(ML4FILES:.ml4=.cmo) $(MLFILES:.ml=.cmo) $(MLPACKFILES:.mlpack=.cmo)\n"; List.rev_append mlpack (List.rev_append ml ml4) in begin match mlsfiles with |[] -> () |l -> print "CMOFILES=$(filter-out $(addsuffix .cmo,$(foreach lib,$(MLLIBFILES:.mllib=_MLLIB_DEPENDENCIES) $(MLPACKFILES:.mlpack=_MLPACK_DEPENDENCIES),$($(lib)))),$(ALLCMOFILES))\n"; classify_files_by_root "CMOFILES" l inc; print "CMXFILES=$(CMOFILES:.cmo=.cmx)\n"; print "OFILES=$(CMXFILES:.cmx=.o)\n"; end; begin match mllibfiles with |[] -> () |l -> print "CMAFILES:=$(MLLIBFILES:.mllib=.cma)\n"; classify_files_by_root "CMAFILES" l inc; print "CMXAFILES:=$(CMAFILES:.cma=.cmxa)\n"; end; begin match mlifiles,mlsfiles with |[],[] -> () |l,[] -> print "CMIFILES:=$(MLIFILES:.mli=.cmi)\n"; classify_files_by_root "CMIFILES" l inc; |[],l -> print "CMIFILES=$(ALLCMOFILES:.cmo=.cmi)\n"; classify_files_by_root "CMIFILES" l inc; |l1,l2 -> print "CMIFILES=$(sort $(ALLCMOFILES:.cmo=.cmi) $(MLIFILES:.mli=.cmi))\n"; classify_files_by_root "CMIFILES" (l1@l2) inc; end; begin match mllibfiles,mlsfiles with |[],[] -> () |l,[] -> print "CMXSFILES:=$(CMXAFILES:.cmxa=.cmxs)\n"; classify_files_by_root "CMXSFILES" l inc; |[],l -> print "CMXSFILES=$(CMXFILES:.cmx=.cmxs)\n"; classify_files_by_root "CMXSFILES" l inc; |l1,l2 -> print "CMXSFILES=$(CMXFILES:.cmx=.cmxs) $(CMXAFILES:.cmxa=.cmxs)\n"; classify_files_by_root "CMXSFILES" (l1@l2) inc; end; print "ifeq '$(HASNATDYNLINK)' 'true'\n"; print "HASNATDYNLINK_OR_EMPTY := yes\n"; print "else\n"; print "HASNATDYNLINK_OR_EMPTY :=\n"; print "endif\n\n"; section "Definition of the toplevel targets."; print "all: "; if !some_vfile then print "$(VOFILES) "; if !some_mlfile || !some_ml4file || !some_mlpackfile then print "$(CMOFILES) "; if !some_mllibfile then print "$(CMAFILES) "; if !some_mlfile || !some_ml4file || !some_mllibfile || !some_mlpackfile then print "$(if $(HASNATDYNLINK_OR_EMPTY),$(CMXSFILES)) "; print_list "\\\n " other_targets; print "\n\n"; if !some_mlifile then begin print "mlihtml: $(MLIFILES:.mli=.cmi)\n"; print "\t mkdir $@ || rm -rf $@/*\n"; print "\t$(OCAMLDOC) -html -rectypes -d $@ -m A $(ZDEBUG) $(ZFLAGS) $(^:.cmi=.mli)\n\n"; print "all-mli.tex: $(MLIFILES:.mli=.cmi)\n"; print "\t$(OCAMLDOC) -latex -rectypes -o $@ -m A $(ZDEBUG) $(ZFLAGS) $(^:.cmi=.mli)\n\n"; end; if !some_vfile then begin print "spec: $(VIFILES)\n\n"; print "gallina: $(GFILES)\n\n"; print "html: $(GLOBFILES) $(VFILES)\n"; print "\t- mkdir -p html\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -html $(COQDOCLIBS) -d html $(VFILES)\n\n"; print "gallinahtml: $(GLOBFILES) $(VFILES)\n"; print "\t- mkdir -p html\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -html -g $(COQDOCLIBS) -d html $(VFILES)\n\n"; print "all.ps: $(VFILES)\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -ps $(COQDOCLIBS) -o $@ `$(COQDEP) -sort -suffix .v $^`\n\n"; print "all-gal.ps: $(VFILES)\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -ps -g $(COQDOCLIBS) -o $@ `$(COQDEP) -sort -suffix .v $^`\n\n"; print "all.pdf: $(VFILES)\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -pdf $(COQDOCLIBS) -o $@ `$(COQDEP) -sort -suffix .v $^`\n\n"; print "all-gal.pdf: $(VFILES)\n"; print "\t$(COQDOC) -toc $(COQDOCFLAGS) -pdf -g $(COQDOCLIBS) -o $@ `$(COQDEP) -sort -suffix .v $^`\n\n"; print "validate: $(VOFILES)\n"; print "\t$(COQCHK) $(COQCHKFLAGS) $(COQLIBS) $(notdir $(^:.vo=))\n\n"; print "beautify: $(VFILES:=.beautified)\n"; print "\tfor file in $^; do mv $${file%.beautified} $${file%beautified}old && mv $${file} $${file%.beautified}; done\n"; print "\t@echo \'Do not do \"make clean\" until you are sure that everything went well!\'\n"; print "\t@echo \'If there were a problem, execute \"for file in $$(find . -name \\*.v.old -print); do mv $${file} $${file%.old}; done\" in your shell/'\n\n" end let all_target (vfiles, (_,_,_,_,mlpackfiles as mlfiles), sps, sds) inc = let special_targets = List.filter (fun (n,_,_) -> not (is_genrule n)) sps in let other_targets = List.map (function x,_,_ -> x) special_targets @ sds in main_targets vfiles mlfiles other_targets inc; print ".PHONY: "; print_list " " ("all" :: "opt" :: "byte" :: "archclean" :: "clean" :: "install" :: "userinstall" :: "depend" :: "html" :: "validate" :: sds); print "\n\n"; custom sps; subdirs sds; forpacks mlpackfiles let banner () = print (Printf.sprintf "############################################################################# ## v # The Coq Proof Assistant ## ## print x; print " ") l let command_line args = print "#\n# This Makefile was generated by the command line :\n"; print "# coq_makefile "; print_list args; print "\n#\n\n" let ensure_root_dir (v,(mli,ml4,ml,mllib,mlpack),_,_) ((i_inc,r_inc) as l) = let here = Sys.getcwd () in let not_tops =List.for_all (fun s -> s <> Filename.basename s) in if List.exists (fun (_,x) -> x = here) i_inc or List.exists (fun (_,_,x) -> is_prefix x here) r_inc or (not_tops v && not_tops mli && not_tops ml4 && not_tops ml && not_tops mllib && not_tops mlpack) then l else ((".",here)::i_inc,r_inc) let warn_install_at_root_directory (vfiles,(mlifiles,ml4files,mlfiles,mllibfiles,mlpackfiles),_,_) (inc_i,inc_r) = let inc_r_top = List.filter (fun (_,ldir,_) -> ldir = "") inc_r in let inc_top = List.map (fun (p,_,_) -> p) inc_r_top in let files = vfiles @ mlifiles @ ml4files @ mlfiles @ mllibfiles @ mlpackfiles in if inc_r = [] || List.exists (fun f -> List.mem (Filename.dirname f) inc_top) files then Printf.eprintf "Warning: install target will copy files at the first level of the coq contributions installation directory; option -R %sis recommended\n" (if inc_r_top = [] then "" else "with non trivial logical root ") let check_overlapping_include (_,inc_r) = let pwd = Sys.getcwd () in let rec aux = function | [] -> () | (pdir,_,abspdir)::l -> if not (is_prefix pwd abspdir) then Printf.eprintf "Warning: in option -R, %s is not a subdirectory of the current directory\n" pdir; List.iter (fun (pdir',_,abspdir') -> if is_prefix abspdir abspdir' or is_prefix abspdir' abspdir then Printf.eprintf "Warning: in options -R, %s and %s overlap\n" pdir pdir') l; in aux inc_r let do_makefile args = let has_file var = function |[] -> var := false |_::_ -> var := true in let (project_file,makefile,is_install,opt),l = try Project_file.process_cmd_line Filename.current_dir_name (None,None,Project_file.UnspecInstall,true) [] args with Project_file.Parsing_error -> usage () in let (v_f,(mli_f,ml4_f,ml_f,mllib_f,mlpack_f),sps,sds as targets), inc, defs = Project_file.split_arguments l in let () = match project_file with |None -> () |Some f -> make_name := f in let () = match makefile with |None -> () |Some f -> makefile_name := f; output_channel := open_out f in has_file some_vfile v_f; has_file some_mlifile mli_f; has_file some_mlfile ml_f; has_file some_ml4file ml4_f; has_file some_mllibfile mllib_f; has_file some_mlpackfile mlpack_f; let check_dep f = if Filename.check_suffix f ".v" then some_vfile := true else if (Filename.check_suffix f ".mli") then some_mlifile := true else if (Filename.check_suffix f ".ml4") then some_ml4file := true else if (Filename.check_suffix f ".ml") then some_mlfile := true else if (Filename.check_suffix f ".mllib") then some_mllibfile := true else if (Filename.check_suffix f ".mlpack") then some_mlpackfile := true in List.iter (fun (_,dependencies,_) -> List.iter check_dep (Str.split (Str.regexp "[ \t]+") dependencies)) sps; let inc = ensure_root_dir targets inc in if is_install <> Project_file.NoInstall then warn_install_at_root_directory targets inc; check_overlapping_include inc; banner (); header_includes (); warning (); command_line args; parameters (); include_dirs inc; variables is_install opt defs; all_target targets inc; section "Special targets."; standard opt; install targets inc is_install; clean sds sps; make_makefile sds; implicit (); warning (); if not (makefile = None) then close_out !output_channel; exit 0 let main () = let args = if Array.length Sys.argv = 1 then usage (); List.tl (Array.to_list Sys.argv) in do_makefile args let _ = Printexc.catch main () coq-8.4pl2/tools/win32hack.mllib0000640000175000001440000000002211754414461015602 0ustar notinusersWin32hack_filenamecoq-8.4pl2/tools/gallina_lexer.mll0000640000175000001440000001137712010532755016314 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 then comment lexbuf } | "*)" [' ''\t']*'\n' { (if !comments then print (Lexing.lexeme lexbuf)); comment_depth := pred !comment_depth; if !comment_depth > 0 then comment lexbuf } | eof { raise Fin_fichier } | _ { (if !comments then print (Lexing.lexeme lexbuf)); comment lexbuf } and skip_comment = parse | "(*" { comment_depth := succ !comment_depth; skip_comment lexbuf } | "*)" { comment_depth := pred !comment_depth; if !comment_depth > 0 then skip_comment lexbuf } | eof { raise Fin_fichier } | _ { skip_comment lexbuf } and body_def = parse | [^'.']* ":=" { print (Lexing.lexeme lexbuf); () } | _ { print (Lexing.lexeme lexbuf); body lexbuf } and body = parse | enddot { print ".\n"; skip_proof lexbuf } | ":=" { print ".\n"; skip_proof lexbuf } | "(*" { print "(*"; comment_depth := 1; comment lexbuf; body lexbuf } | eof { raise Fin_fichier } | _ { print (Lexing.lexeme lexbuf); body lexbuf } and body_pgm = parse | enddot { print ".\n"; skip_proof lexbuf } | "(*" { print "(*"; comment_depth := 1; comment lexbuf; body_pgm lexbuf } | eof { raise Fin_fichier } | _ { print (Lexing.lexeme lexbuf); body_pgm lexbuf } and skip_until_point = parse | '.' '\n' { () } | enddot { end_of_line lexbuf } | "(*" { comment_depth := 1; skip_comment lexbuf; skip_until_point lexbuf } | eof { raise Fin_fichier } | _ { skip_until_point lexbuf } and end_of_line = parse | [' ' '\t' ]* { end_of_line lexbuf } | '\n' { () } | eof { raise Fin_fichier } | _ { print (Lexing.lexeme lexbuf) } and skip_proof = parse | "Save." { end_of_line lexbuf } | "Save" space { skip_until_point lexbuf } | "Qed." { end_of_line lexbuf } | "Qed" space { skip_until_point lexbuf } | "Defined." { end_of_line lexbuf } | "Defined" space { skip_until_point lexbuf } | "Abort." { end_of_line lexbuf } | "Abort" space { skip_until_point lexbuf } | "Proof" space { skip_until_point lexbuf } | "Proof" [' ' '\t']* '.' { skip_proof lexbuf } | "(*" { comment_depth := 1; skip_comment lexbuf; skip_proof lexbuf } | eof { raise Fin_fichier } | _ { skip_proof lexbuf } coq-8.4pl2/tools/coqdep_common.ml0000640000175000001440000004100312044760777016160 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (f, "") | s :: _ when Filename.check_suffix f s -> (Filename.chop_suffix f s, s) | _ :: l -> get_extension f l (** [basename_noext] removes both the directory part and the extension (if necessary) of a filename *) let basename_noext filename = let fn = Filename.basename filename in try Filename.chop_extension fn with _ -> fn (** ML Files specified on the command line. In the entries: - the first string is the basename of the file, without extension nor directory part - the second string of [mlAccu] is the extension (either .ml or .ml4) - the [dir] part is the directory, with None used as the current directory *) let mlAccu = ref ([] : (string * string * dir) list) and mliAccu = ref ([] : (string * dir) list) and mllibAccu = ref ([] : (string * dir) list) and mlpackAccu = ref ([] : (string * dir) list) (** Coq files specifies on the command line: - first string is the full filename, with only its extension removed - second string is the absolute version of the previous (via getcwd) *) let vAccu = ref ([] : (string * string) list) (** Queue operations *) let addQueue q v = q := v :: !q let safe_hash_add clq q (k,v) = try let v2 = Hashtbl.find q k in if v<>v2 then let rec add_clash = function (k1,l1)::cltl when k=k1 -> (k1,v::l1)::cltl | cl::cltl -> cl::add_clash cltl | [] -> [(k,[v;v2])] in clq := add_clash !clq; (* overwrite previous bindings, as coqc does *) Hashtbl.add q k v with Not_found -> Hashtbl.add q k v (** Files found in the loadpaths. For the ML files, the string is the basename without extension. To allow ML source filename to be potentially capitalized, we perform a double search. *) let mkknown () = let h = (Hashtbl.create 19 : (string, dir) Hashtbl.t) in let add x s = if Hashtbl.mem h x then () else Hashtbl.add h x s and iter f = Hashtbl.iter f h and search x = try Some (Hashtbl.find h (String.uncapitalize x)) with Not_found -> try Some (Hashtbl.find h (String.capitalize x)) with Not_found -> None in add, iter, search let add_ml_known, iter_ml_known, search_ml_known = mkknown () let add_mli_known, iter_mli_known, search_mli_known = mkknown () let add_mllib_known, _, search_mllib_known = mkknown () let add_mlpack_known, _, search_mlpack_known = mkknown () let vKnown = (Hashtbl.create 19 : (string list, string) Hashtbl.t) let coqlibKnown = (Hashtbl.create 19 : (string list, unit) Hashtbl.t) let clash_v = ref ([]: (string list * string list) list) let error_cannot_parse s (i,j) = Printf.eprintf "File \"%s\", characters %i-%i: Syntax error\n" s i j; exit 1 let warning_module_notfound f s = eprintf "*** Warning: in file %s, library " f; eprintf "%s.v is required and has not been found in loadpath!\n" (String.concat "." s); flush stderr let warning_notfound f s = eprintf "*** Warning: in file %s, the file " f; eprintf "%s.v is required and has not been found!\n" s; flush stderr let warning_declare f s = eprintf "*** Warning: in file %s, declared ML module " f; eprintf "%s has not been found!\n" s; flush stderr let warning_clash file dir = match List.assoc dir !clash_v with (f1::f2::fl) -> let f = Filename.basename f1 in let d1 = Filename.dirname f1 in let d2 = Filename.dirname f2 in let dl = List.map Filename.dirname (List.rev fl) in eprintf "*** Warning: in file %s, \n required library %s matches several files in path\n (found %s.v in " file (String.concat "." dir) f; List.iter (fun s -> eprintf "%s, " s) dl; eprintf "%s and %s; used the latter)\n" d2 d1 | _ -> assert false let safe_assoc verbose file k = if verbose && List.mem_assoc k !clash_v then warning_clash file k; Hashtbl.find vKnown k let absolute_dir dir = let current = Sys.getcwd () in Sys.chdir dir; let dir' = Sys.getcwd () in Sys.chdir current; dir' let absolute_file_name basename odir = let dir = match odir with Some dir -> dir | None -> "." in absolute_dir dir // basename let file_name s = function | None -> s | Some "." -> s | Some d -> d // s let depend_ML str = match search_mli_known str, search_ml_known str with | Some mlidir, Some mldir -> let mlifile = file_name str mlidir and mlfile = file_name str mldir in (" "^mlifile^".cmi"," "^mlfile^".cmx") | None, Some mldir -> let mlfile = file_name str mldir in (" "^mlfile^".cmo"," "^mlfile^".cmx") | Some mlidir, None -> let mlifile = file_name str mlidir in (" "^mlifile^".cmi"," "^mlifile^".cmi") | None, None -> "", "" let soustraite_fichier_ML dep md ext = try let chan = open_process_in (dep^" -modules "^md^ext) in let list = ocamldep_parse (Lexing.from_channel chan) in let a_faire = ref "" in let a_faire_opt = ref "" in List.iter (fun str -> let byte,opt = depend_ML str in a_faire := !a_faire ^ byte; a_faire_opt := !a_faire_opt ^ opt) (List.rev list); (!a_faire, !a_faire_opt) with | Sys_error _ -> ("","") | _ -> Printf.eprintf "Coqdep: subprocess %s failed on file %s%s\n" dep md ext; exit 1 let autotraite_fichier_ML md ext = try let chan = open_in (md ^ ext) in let buf = Lexing.from_channel chan in let deja_vu = ref [md] in let a_faire = ref "" in let a_faire_opt = ref "" in begin try while true do let (Use_module str) = caml_action buf in if List.mem str !deja_vu then () else begin addQueue deja_vu str; let byte,opt = depend_ML str in a_faire := !a_faire ^ byte; a_faire_opt := !a_faire_opt ^ opt end done with Fin_fichier -> () end; close_in chan; (!a_faire, !a_faire_opt) with Sys_error _ -> ("","") let traite_fichier_ML md ext = match !option_mldep with | Some dep -> soustraite_fichier_ML dep md ext | None -> autotraite_fichier_ML md ext let traite_fichier_modules md ext = try let chan = open_in (md ^ ext) in let list = mllib_list (Lexing.from_channel chan) in List.fold_left (fun a_faire str -> match search_mlpack_known str with | Some mldir -> let file = file_name str mldir in a_faire^" "^file | None -> match search_ml_known str with | Some mldir -> let file = file_name str mldir in a_faire^" "^file | None -> a_faire) "" list with | Sys_error _ -> "" | Syntax_error (i,j) -> error_cannot_parse (md^ext) (i,j) (* Makefile's escaping rules are awful: $ is escaped by doubling and other special characters are escaped by backslash prefixing while backslashes themselves must be escaped only if part of a sequence followed by a special character (i.e. in case of ambiguity with a use of it as escaping character). Moreover (even if not crucial) it is apparently not possible to directly escape ';' and leading '\t'. *) let escape = let s' = Buffer.create 10 in fun s -> Buffer.clear s'; for i = 0 to String.length s - 1 do let c = s.[i] in if c = ' ' or c = '#' or c = ':' (* separators and comments *) or c = '%' (* pattern *) or c = '?' or c = '[' or c = ']' or c = '*' (* expansion in filenames *) or i=0 && c = '~' && (String.length s = 1 || s.[1] = '/' || 'A' <= s.[1] && s.[1] <= 'Z' || 'a' <= s.[1] && s.[1] <= 'z') (* homedir expansion *) then begin let j = ref (i-1) in while !j >= 0 && s.[!j] = '\\' do Buffer.add_char s' '\\'; decr j (* escape all preceding '\' *) done; Buffer.add_char s' '\\'; end; if c = '$' then Buffer.add_char s' '$'; Buffer.add_char s' c done; Buffer.contents s' let canonize f = let f' = absolute_dir (Filename.dirname f) // Filename.basename f in match List.filter (fun (_,full) -> f' = full) !vAccu with | (f,_) :: _ -> escape f | _ -> escape f let rec traite_fichier_Coq verbose f = try let chan = open_in f in let buf = Lexing.from_channel chan in let deja_vu_v = ref ([]: string list list) and deja_vu_ml = ref ([] : string list) in try while true do let tok = coq_action buf in match tok with | Require strl -> List.iter (fun str -> if not (List.mem str !deja_vu_v) then begin addQueue deja_vu_v str; try let file_str = safe_assoc verbose f str in printf " %s%s" (canonize file_str) !suffixe with Not_found -> if verbose && not (Hashtbl.mem coqlibKnown str) then warning_module_notfound f str end) strl | RequireString s -> let str = Filename.basename s in if not (List.mem [str] !deja_vu_v) then begin addQueue deja_vu_v [str]; try let file_str = Hashtbl.find vKnown [str] in printf " %s%s" (canonize file_str) !suffixe with Not_found -> if not (Hashtbl.mem coqlibKnown [str]) then warning_notfound f s end | Declare sl -> let declare suff dir s = let base = file_name s dir in let opt = if !option_natdynlk then " "^base^".cmxs" else "" in printf " %s%s%s" (escape base) suff opt in let decl str = let s = basename_noext str in if not (List.mem s !deja_vu_ml) then begin addQueue deja_vu_ml s; match search_mllib_known s with | Some mldir -> declare ".cma" mldir s | None -> match search_mlpack_known s with | Some mldir -> declare ".cmo" mldir s | None -> match search_ml_known s with | Some mldir -> declare ".cmo" mldir s | None -> warning_declare f str end in List.iter decl sl | Load str -> let str = Filename.basename str in if not (List.mem [str] !deja_vu_v) then begin addQueue deja_vu_v [str]; try let file_str = Hashtbl.find vKnown [str] in let canon = canonize file_str in printf " %s.v" canon; traite_fichier_Coq true (canon ^ ".v") with Not_found -> () end | AddLoadPath _ | AddRecLoadPath _ -> (* TODO *) () done with Fin_fichier -> close_in chan | Syntax_error (i,j) -> close_in chan; error_cannot_parse f (i,j) with Sys_error _ -> () let mL_dependencies () = List.iter (fun (name,ext,dirname) -> let fullname = file_name name dirname in let (dep,dep_opt) = traite_fichier_ML fullname ext in let intf = match search_mli_known name with | None -> "" | Some mldir -> " "^(file_name name mldir)^".cmi" in let efullname = escape fullname in printf "%s.cmo:%s%s\n" efullname dep intf; printf "%s.cmx:%s%s\n" efullname dep_opt intf; flush stdout) (List.rev !mlAccu); List.iter (fun (name,dirname) -> let fullname = file_name name dirname in let (dep,_) = traite_fichier_ML fullname ".mli" in printf "%s.cmi:%s\n" (escape fullname) dep; flush stdout) (List.rev !mliAccu); List.iter (fun (name,dirname) -> let fullname = file_name name dirname in let dep = traite_fichier_modules fullname ".mllib" in let efullname = escape fullname in printf "%s_MLLIB_DEPENDENCIES:=%s\n" efullname dep; printf "%s.cma:$(addsuffix .cmo,$(%s_MLLIB_DEPENDENCIES))\n" efullname efullname; printf "%s.cmxa %s.cmxs:$(addsuffix .cmx,$(%s_MLLIB_DEPENDENCIES))\n" efullname efullname efullname; flush stdout) (List.rev !mllibAccu); List.iter (fun (name,dirname) -> let fullname = file_name name dirname in let dep = traite_fichier_modules fullname ".mlpack" in let efullname = escape fullname in printf "%s_MLPACK_DEPENDENCIES:=%s\n" efullname dep; printf "%s.cmo:$(addsuffix .cmo,$(%s_MLPACK_DEPENDENCIES))\n" efullname efullname; printf "%s.cmx %s.cmxs:$(addsuffix .cmx,$(%s_MLPACK_DEPENDENCIES))\n" efullname efullname efullname; flush stdout) (List.rev !mlpackAccu) let coq_dependencies () = List.iter (fun (name,_) -> let ename = escape name in let glob = if !option_noglob then "" else " "^ename^".glob" in printf "%s%s%s %s.v.beautified: %s.v" ename !suffixe glob ename ename; traite_fichier_Coq true (name ^ ".v"); printf "\n"; flush stdout) (List.rev !vAccu) let rec suffixes = function | [] -> assert false | [name] -> [[name]] | dir::suffix as l -> l::suffixes suffix let add_known phys_dir log_dir f = match get_extension f [".v";".ml";".mli";".ml4";".mllib";".mlpack"] with | (basename,".v") -> let name = log_dir@[basename] in let file = phys_dir//basename in let paths = suffixes name in List.iter (fun n -> safe_hash_add clash_v vKnown (n,file)) paths | (basename,(".ml"|".ml4")) -> add_ml_known basename (Some phys_dir) | (basename,".mli") -> add_mli_known basename (Some phys_dir) | (basename,".mllib") -> add_mllib_known basename (Some phys_dir) | (basename,".mlpack") -> add_mlpack_known basename (Some phys_dir) | _ -> () (* Visits all the directories under [dir], including [dir], or just [dir] if [recur=false] *) let rec add_directory recur add_file phys_dir log_dir = let dirh = opendir phys_dir in try while true do let f = readdir dirh in (* we avoid . .. and all hidden files and subdirs (e.g. .svn, _darcs) *) if f.[0] <> '.' && f.[0] <> '_' then let phys_f = if phys_dir = "." then f else phys_dir//f in match try (stat phys_f).st_kind with _ -> S_BLK with | S_DIR when recur -> if List.mem phys_f !norecdir_list then () else add_directory recur add_file phys_f (log_dir@[f]) | S_REG -> add_file phys_dir log_dir f | _ -> () done with End_of_file -> closedir dirh let add_dir add_file phys_dir log_dir = try add_directory false add_file phys_dir log_dir with Unix_error _ -> () let add_rec_dir add_file phys_dir log_dir = handle_unix_error (add_directory true add_file phys_dir) log_dir let rec treat_file old_dirname old_name = let name = Filename.basename old_name and new_dirname = Filename.dirname old_name in let dirname = match (old_dirname,new_dirname) with | (d, ".") -> d | (None,d) -> Some d | (Some d1,d2) -> Some (d1//d2) in let complete_name = file_name name dirname in match try (stat complete_name).st_kind with _ -> S_BLK with | S_DIR -> (if name.[0] <> '.' then let dir=opendir complete_name in let newdirname = match dirname with | None -> name | Some d -> d//name in try while true do treat_file (Some newdirname) (readdir dir) done with End_of_file -> closedir dir) | S_REG -> (match get_extension name [".v";".ml";".mli";".ml4";".mllib";".mlpack"] with | (base,".v") -> let name = file_name base dirname and absname = absolute_file_name base dirname in addQueue vAccu (name, absname) | (base,(".ml"|".ml4" as ext)) -> addQueue mlAccu (base,ext,dirname) | (base,".mli") -> addQueue mliAccu (base,dirname) | (base,".mllib") -> addQueue mllibAccu (base,dirname) | (base,".mlpack") -> addQueue mlpackAccu (base,dirname) | _ -> ()) | _ -> () coq-8.4pl2/tools/fake_ide.ml0000640000175000001440000000634612052731210015052 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* exit 1 | _ -> () let commands = [ "INTERPRAWSILENT", (fun s -> eval_call (Ide_intf.interp (true,false,s))); "INTERPRAW", (fun s -> eval_call (Ide_intf.interp (true,true,s))); "INTERPSILENT", (fun s -> eval_call (Ide_intf.interp (false,false,s))); "INTERP", (fun s -> eval_call (Ide_intf.interp (false,true,s))); "REWIND", (fun s -> eval_call (Ide_intf.rewind (int_of_string s))); "GOALS", (fun _ -> eval_call Ide_intf.goals); "HINTS", (fun _ -> eval_call Ide_intf.hints); "GETOPTIONS", (fun _ -> eval_call Ide_intf.get_options); "STATUS", (fun _ -> eval_call Ide_intf.status); "INLOADPATH", (fun s -> eval_call (Ide_intf.inloadpath s)); "MKCASES", (fun s -> eval_call (Ide_intf.mkcases s)); "#", (fun _ -> raise Comment); ] let read_eval_print line = let lline = String.length line in let rec find_cmd = function | [] -> prerr_endline ("Error: Unknown API Command :"^line); exit 1 | (cmd,fn) :: cmds -> let lcmd = String.length cmd in if lline >= lcmd && String.sub line 0 lcmd = cmd then let arg = try String.sub line (lcmd+1) (lline-lcmd-1) with _ -> "" in fn arg else find_cmd cmds in find_cmd commands let usage () = Printf.printf "A fake coqide process talking to a coqtop -ideslave.\n\ Usage: %s []\n\ Input syntax is one API call per line, the keyword coming first,\n\ with the rest of the line as string argument (e.g. INTERP Check plus.)\n\ Supported API keywords are:\n" (Filename.basename Sys.argv.(0)); List.iter (fun (s,_) -> Printf.printf "\t%s\n" s) commands; exit 1 let main = Sys.set_signal Sys.sigpipe (Sys.Signal_handle (fun _ -> prerr_endline "Broken Pipe (coqtop died ?)"; exit 1)); let coqtop_name = match Array.length Sys.argv with | 1 -> "coqtop" | 2 when Sys.argv.(1) <> "-help" -> Sys.argv.(1) | _ -> usage () in coqtop := Unix.open_process (coqtop_name^" -ideslave"); while true do let l = try read_line () with End_of_file -> exit 0 in try read_eval_print l with | Comment -> () | e -> prerr_endline ("Uncaught exception" ^ Printexc.to_string e); exit 1 done coq-8.4pl2/tools/coqwc.mll0000640000175000001440000002175112010532755014617 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* printf " %s" f | _ -> ()); if !percentage then begin let s = sl + pl + dl in let p = if s > 0 then 100 * dl / s else 0 in printf " (%d%%)" p end; print_newline () let print_file fo = print_line !slines !plines !dlines fo let print_totals () = print_line !tslines !tplines !tdlines (Some "total") (*i*)}(*i*) (*s Shortcuts for regular expressions. The [rcs] regular expression is used to skip the CVS infos possibly contained in some comments, in order not to consider it as documentation. *) let space = [' ' '\t' '\r'] let character = "'" ([^ '\\' '\''] | '\\' (['\\' '\'' 'n' 't' 'b' 'r'] | ['0'-'9'] ['0'-'9'] ['0'-'9'])) "'" let rcs_keyword = "Author" | "Date" | "Header" | "Id" | "Name" | "Locker" | "Log" | "RCSfile" | "Revision" | "Source" | "State" let rcs = "\036" rcs_keyword [^ '$']* "\036" let stars = "(*" '*'* "*)" let dot = '.' (' ' | '\t' | '\n' | '\r' | eof) let proof_start = "Theorem" | "Lemma" | "Fact" | "Remark" | "Goal" | "Correctness" | "Obligation" | "Next" let proof_end = ("Save" | "Qed" | "Defined" | "Abort" | "Admitted") [^'.']* '.' (*s [spec] scans the specification. *) rule spec = parse | "(*" { comment lexbuf; spec lexbuf } | '"' { let n = string lexbuf in slines := !slines + n; seen_spec := true; spec lexbuf } | '\n' { newline (); spec lexbuf } | space+ | stars { spec lexbuf } | proof_start space { seen_spec := true; spec_to_dot lexbuf; proof lexbuf } | proof_start '\n' { seen_spec := true; newline (); spec_to_dot lexbuf; proof lexbuf } | "Program"? "Definition" space { seen_spec := true; definition lexbuf } | "Program"? "Fixpoint" space { seen_spec := true; definition lexbuf } | character | _ { seen_spec := true; spec lexbuf } | eof { () } (*s [spec_to_dot] scans a spec until a dot is reached and returns. *) and spec_to_dot = parse | "(*" { comment lexbuf; spec_to_dot lexbuf } | '"' { let n = string lexbuf in slines := !slines + n; seen_spec := true; spec_to_dot lexbuf } | '\n' { newline (); spec_to_dot lexbuf } | dot { () } | space+ | stars { spec_to_dot lexbuf } | character | _ { seen_spec := true; spec_to_dot lexbuf } | eof { () } (*s [definition] scans a definition; passes to [proof] is the body is absent, and to [spec] otherwise *) and definition = parse | "(*" { comment lexbuf; definition lexbuf } | '"' { let n = string lexbuf in slines := !slines + n; seen_spec := true; definition lexbuf } | '\n' { newline (); definition lexbuf } | ":=" { seen_spec := true; spec lexbuf } | dot { proof lexbuf } | space+ | stars { definition lexbuf } | character | _ { seen_spec := true; definition lexbuf } | eof { () } (*s Scans a proof, then returns to [spec]. *) and proof = parse | "(*" { comment lexbuf; proof lexbuf } | '"' { let n = string lexbuf in plines := !plines + n; seen_proof := true; proof lexbuf } | space+ | stars { proof lexbuf } | '\n' { newline (); proof lexbuf } | "Proof" space* '.' { seen_proof := true; proof lexbuf } | "Proof" space { proof_term lexbuf } | proof_end { seen_proof := true; spec lexbuf } | character | _ { seen_proof := true; proof lexbuf } | eof { () } and proof_term = parse | "(*" { comment lexbuf; proof_term lexbuf } | '"' { let n = string lexbuf in plines := !plines + n; seen_proof := true; proof_term lexbuf } | space+ | stars { proof_term lexbuf } | '\n' { newline (); proof_term lexbuf } | dot { spec lexbuf } | character | _ { seen_proof := true; proof_term lexbuf } | eof { () } (*s Scans a comment. *) and comment = parse | "(*" { comment lexbuf; comment lexbuf } | "*)" { () } | '"' { let n = string lexbuf in dlines := !dlines + n; seen_comment := true; comment lexbuf } | '\n' { newline (); comment lexbuf } | space+ | stars { comment lexbuf } | character | _ { seen_comment := true; comment lexbuf } | eof { () } (*s The entry [string] reads a string until its end and returns the number of newlines it contains. *) and string = parse | '"' { 0 } | '\\' ('\\' | 'n' | '"') { string lexbuf } | '\n' { succ (string lexbuf) } | _ { string lexbuf } | eof { 0 } (*s The following entry [read_header] is used to skip the possible header at the beggining of files (unless option \texttt{-e} is specified). It stops whenever it encounters an empty line or any character outside a comment. In this last case, it correctly resets the lexer position on that character (decreasing [lex_curr_pos] by 1). *) and read_header = parse | "(*" { skip_comment lexbuf; skip_until_nl lexbuf; read_header lexbuf } | "\n" { () } | space+ { read_header lexbuf } | _ { lexbuf.lex_curr_pos <- lexbuf.lex_curr_pos - 1 } | eof { () } and skip_comment = parse | "*)" { () } | "(*" { skip_comment lexbuf; skip_comment lexbuf } | _ { skip_comment lexbuf } | eof { () } and skip_until_nl = parse | '\n' { () } | _ { skip_until_nl lexbuf } | eof { () } (*i*){(*i*) (*s Processing files and channels. *) let process_channel ch = let lb = Lexing.from_channel ch in reset_counters (); if !skip_header then read_header lb; spec lb let process_file f = try let ch = open_in f in process_channel ch; close_in ch; print_file (Some f); update_totals () with | Sys_error "Is a directory" -> flush stdout; eprintf "coqwc: %s: Is a directory\n" f; flush stderr | Sys_error s -> flush stdout; eprintf "coqwc: %s\n" s; flush stderr (*s Parsing of the command line. *) let usage () = prerr_endline "usage: coqwc [options] [files]"; prerr_endline "Options are:"; prerr_endline " -p print percentage of comments"; prerr_endline " -s print only the spec size"; prerr_endline " -r print only the proof size"; prerr_endline " -e (everything) do not skip headers"; exit 1 let rec parse = function | [] -> [] | ("-h" | "-?" | "-help" | "--help") :: _ -> usage () | ("-s" | "--spec-only") :: args -> proof_only := false; spec_only := true; parse args | ("-r" | "--proof-only") :: args -> spec_only := false; proof_only := true; parse args | ("-p" | "--percentage") :: args -> percentage := true; parse args | ("-e" | "--header") :: args -> skip_header := false; parse args | f :: args -> f :: (parse args) (*s Main program. *) let main () = let files = parse (List.tl (Array.to_list Sys.argv)) in if not (!spec_only || !proof_only) then printf " spec proof comments\n"; match files with | [] -> process_channel stdin; print_file None | [f] -> process_file f | _ -> List.iter process_file files; print_totals () let _ = Printexc.catch main () (*i*)}(*i*) coq-8.4pl2/tools/compat5.mlp0000640000175000001440000000170512010532755015054 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ] -> [< '(KEYWORD "EXTEND", loc); my_token_filter s >] | [< 'tokloc; s >] -> [< 'tokloc; my_token_filter s >] | [< >] -> [< >] let _ = Token.Filter.define_filter (Gram.get_filter()) (fun prev strm -> prev (my_token_filter strm)) coq-8.4pl2/tools/win32hack_filename.ml0000640000175000001440000000021111754414461016753 0ustar notinusers(* The mingw32-ocaml cross-compiler currently uses Filename.dir_sep="/". Let's tweak that... *) let _ = Filename.dir_sep.[0] <- '\\' coq-8.4pl2/tools/compat5b.mlp0000640000175000001440000000172112010532755015214 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ] -> [< 't; '(UIDENT "Gram", Loc.ghost); my_token_filter s >] | [< 'tokloc; s >] -> [< 'tokloc; my_token_filter s >] | [< >] -> [< >] let _ = Token.Filter.define_filter (Gram.get_filter()) (fun prev strm -> prev (my_token_filter strm)) coq-8.4pl2/tools/coqdep_common.mli0000640000175000001440000000417412010532755016323 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string -> string val get_extension : string -> string list -> string * string val basename_noext : string -> string val mlAccu : (string * string * dir) list ref val mliAccu : (string * dir) list ref val mllibAccu : (string * dir) list ref val vAccu : (string * string) list ref val addQueue : 'a list ref -> 'a -> unit val add_ml_known : string -> dir -> unit val iter_ml_known : (string -> dir -> unit) -> unit val search_ml_known : string -> dir option val add_mli_known : string -> dir -> unit val iter_mli_known : (string -> dir -> unit) -> unit val search_mli_known : string -> dir option val add_mllib_known : string -> dir -> unit val search_mllib_known : string -> dir option val vKnown : (string list, string) Hashtbl.t val coqlibKnown : (string list, unit) Hashtbl.t val file_name : string -> string option -> string val escape : string -> string val canonize : string -> string val mL_dependencies : unit -> unit val coq_dependencies : unit -> unit val suffixes : 'a list -> 'a list list val add_known : string -> string list -> string -> unit val add_directory : bool -> (string -> string list -> string -> unit) -> string -> string list -> unit val add_dir : (string -> string list -> string -> unit) -> string -> string list -> unit val add_rec_dir : (string -> string list -> string -> unit) -> string -> string list -> unit val treat_file : dir -> string -> unit coq-8.4pl2/tools/coq-db.el0000640000175000001440000002074111422377320014463 0ustar notinusers;;; coq-db.el --- coq keywords database utility functions ;; ;; Author: Pierre Courtieu ;; License: GPL (GNU GENERAL PUBLIC LICENSE) ;; ;;; We store all information on keywords (tactics or command) in big ;; tables (ex: `coq-tactics-db') From there we get: menus including ;; "smart" commands, completions for command coq-insert-... ;; abbrev tables and font-lock keyword ;;; real value defined below ;;; Commentary: ;; ;;; Code: ;(require 'proof-config) ; for proof-face-specs, a macro ;(require 'holes) (defconst coq-syntax-db nil "Documentation-only variable, for coq keyword databases. Each element of a keyword database contains the definition of a \"form\", of the form: (MENUNAME ABBREV INSERT STATECH KWREG INSERT-FUN HIDE) MENUNAME is the name of form (or form variant) as it should appear in menus or completion lists. ABBREV is the abbreviation for completion via \\[expand-abbrev]. INSERT is the complete text of the form, which may contain holes denoted by \"#\" or \"@{xxx}\". If non-nil the optional STATECH specifies that the command is not state preserving for coq. If non-nil the optional KWREG is the regexp to colorize correponding to the keyword. ex: \"simple\\\\s-+destruct\" (\\\\s-+ meaning \"one or more spaces\"). *WARNING*: A regexp longer than another one should be put FIRST. For example: (\"Module Type\" ... ... t \"Module\\s-+Type\") (\"Module\" ... ... t \"Module\") Is ok because the longer regexp is recognized first. If non-nil the optional INSERT-FUN is the function to be called when inserting the form (instead of inserting INSERT, except when using \\[expand-abbrev]). This allows to write functions asking for more information to assist the user. If non-nil the optional HIDE specifies that this form should not appear in the menu but only in interactive completions. Example of what could be in your emacs init file: (defvar coq-user-tactics-db '( (\"mytac\" \"mt\" \"mytac # #\" t \"mytac\") (\"myassert by\" \"massb\" \"myassert ( # : # ) by #\" t \"assert\") )) Explanation of the first line: the tactic menu entry mytac, abbreviated by mt, will insert \"mytac # #\" where #s are holes to fill, and \"mytac\" becomes a new keyword to colorize." ) (defun coq-insert-from-db (db prompt) "Ask for a keyword, with completion on keyword database DB and insert. Insert corresponding string with holes at point. If an insertion function is present for the keyword, call it instead. see `coq-syntax-db' for DB structure." (let* ((tac (completing-read (concat prompt " (tab for completion) : ") db nil nil)) (infos (cddr (assoc tac db))) (s (car infos)) ; completion to insert (f (car-safe (cdr-safe (cdr-safe (cdr infos))))) ; insertion function (pt (point))) (if f (funcall f) ; call f if present (insert (or s tac)) ; insert completion and indent otherwise (holes-replace-string-by-holes-backward-jump pt) (indent-according-to-mode)))) (defun coq-build-regexp-list-from-db (db &optional filter) "Take a keyword database DB and return the list of regexps for font-lock. If non-nil Optional argument FILTER is a function applying to each line of DB. For each line if FILTER returns nil, then the keyword is not added to the regexp. See `coq-syntax-db' for DB structure." (let ((l db) (res ())) (while l (let* ((hd (car l)) (tl (cdr l)) ; hd is the first infos list (e1 (car hd)) (tl1 (cdr hd)) ; e1 = menu entry (e2 (car tl1)) (tl2 (cdr tl1)) ; e2 = abbreviation (e3 (car tl2)) (tl3 (cdr tl2)) ; e3 = completion (e4 (car-safe tl3)) (tl4 (cdr-safe tl3)) ; e4 = state changing (e5 (car-safe tl4)) (tl5 (cdr-safe tl4)) ; e5 = colorization string ) ;; TODO delete doublons (when (and e5 (or (not filter) (funcall filter hd))) (setq res (nconc res (list e5)))) ; careful: nconc destructive! (setq l tl))) res )) ;; Computes the max length of strings in a list (defun max-length-db (db) "Return the length of the longest first element (menu label) of DB. See `coq-syntax-db' for DB structure." (let ((l db) (res 0)) (while l (let ((lgth (length (car (car l))))) (setq res (max lgth res)) (setq l (cdr l)))) res)) (defun coq-build-menu-from-db-internal (db lgth menuwidth) "Take a keyword database DB and return one insertion submenu. Argument LGTH is the max size of the submenu. Argument MENUWIDTH is the width of the largest line in the menu (without abbrev and shortcut specifications). Used by `coq-build-menu-from-db', which you should probably use instead. See `coq-syntax-db' for DB structure." (let ((l db) (res ()) (size lgth) (keybind-abbrev (substitute-command-keys " \\[expand-abbrev]"))) (while (and l (> size 0)) (let* ((hd (car l))(tl (cdr l)) ; hd is a list of length 3 or 4 (e1 (car hd)) (tl1 (cdr hd)) ; e1 = menu entry (e2 (car tl1)) (tl2 (cdr tl1)) ; e2 = abbreviation (e3 (car tl2)) (tl3 (cdr tl2)) ; e3 = completion (e4 (car-safe tl3)) (tl4 (cdr-safe tl3)) ; e4 = state changing (e5 (car-safe tl4)) (tl5 (cdr-safe tl4)) ; e5 = colorization string (e6 (car-safe tl5)) ; e6 = function for smart insertion (e7 (car-safe (cdr-safe tl5))) ; e7 = if non-nil : hide in menu (entry-with (max (- menuwidth (length e1)) 0)) (spaces (make-string entry-with ? )) ;;(restofmenu (coq-build-menu-from-db-internal tl (- size 1) menuwidth)) ) (when (not e7) ;; if not hidden (let ((menu-entry (vector ;; menu entry label (concat e1 (if (not e2) "" (concat spaces "(" e2 keybind-abbrev ")"))) ;;insertion function if present otherwise insert completion (if e6 e6 `(holes-insert-and-expand ,e3)) t))) (setq res (nconc res (list menu-entry)))));; append *in place* (setq l tl) (setq size (- size 1)))) res)) (defun coq-build-title-menu (db size) "Build a title for the first submenu of DB, of size SIZE. Return the string made of the first and the SIZE nth first element of DB, separated by \"...\". Used by `coq-build-menu-from-db'. See `coq-syntax-db' for DB structure." (concat (car-safe (car-safe db)) " ... " (car-safe (car-safe (nthcdr (- size 1) db))))) (defun coq-sort-menu-entries (menu) (sort menu '(lambda (x y) (string< (downcase (elt x 0)) (downcase (elt y 0)))))) (defun coq-build-menu-from-db (db &optional size) "Take a keyword database DB and return a list of insertion menus for them. Submenus contain SIZE entries (default 30). See `coq-syntax-db' for DB structure." ;; sort is destructive for the list, so copy list before sorting (let* ((l (coq-sort-menu-entries (copy-list db))) (res ()) (wdth (+ 2 (max-length-db db))) (sz (or size 30)) (lgth (length l))) (while l (if (<= lgth sz) (setq res ;; careful: nconc destructive! (nconc res (list (cons (coq-build-title-menu l lgth) (coq-build-menu-from-db-internal l lgth wdth))))) (setq res ; careful: nconc destructive! (nconc res (list (cons (coq-build-title-menu l sz) (coq-build-menu-from-db-internal l sz wdth)))))) (setq l (nthcdr sz l)) (setq lgth (length l))) res)) (defun coq-build-abbrev-table-from-db (db) "Take a keyword database DB and return an abbrev table. See `coq-syntax-db' for DB structure." (let ((l db) (res ())) (while l (let* ((hd (car l))(tl (cdr l)) ; hd is a list of length 3 or 4 (e1 (car hd)) (tl1 (cdr hd)) ; e1 = menu entry (e2 (car tl1)) (tl2 (cdr tl1)) ; e2 = abbreviation (e3 (car tl2)) (tl3 (cdr tl2)) ; e3 = completion ) ;; careful: nconc destructive! (when e2 (setq res (nconc res (list `(,e2 ,e3 holes-abbrev-complete))))) (setq l tl))) res)) (defun filter-state-preserving (l) ; checkdoc-params: (l) "Not documented." (not (nth 3 l))) ; fourth argument is nil --> state preserving command (defun filter-state-changing (l) ; checkdoc-params: (l) "Not documented." (nth 3 l)) ; fourth argument is nil --> state preserving command (defconst coq-solve-tactics-face 'coq-solve-tactics-face "Expression that evaluates to a face. Required so that 'proof-solve-tactics-face is a proper facename") ;;A new face for tactics which fail when they don't kill the current goal (defface coq-solve-tactics-face '((t (:background "red"))) "Face for names of closing tactics in proof scripts." :group 'proof-faces) (provide 'coq-db) ;;; coq-db.el ends here ;** Local Variables: *** ;** fill-column: 80 *** ;** End: *** coq-8.4pl2/tools/mingwpath.ml0000640000175000001440000000053311757162655015337 0ustar notinusers(** Mingwpath *) (** Converts mingw-encoded filenames such as: /c/Program Files/Ocaml/bin to a more windows-friendly form (but still with / instead of \) : c:/Program Files/Ocaml/bin This nice hack was suggested by Benjamin Monate (cf bug #2526) to mimic the cygwin-specific tool cygpath *) print_string Sys.argv.(1) coq-8.4pl2/tools/coqdep_boot.ml0000640000175000001440000000355612010532755015630 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* option_slash := true; parse ll | "-natdynlink" :: "no" :: ll -> option_natdynlk := false; parse ll | "-c" :: ll -> option_c := true; parse ll | "-boot" :: ll -> parse ll (* We're already in boot mode by default *) | "-mldep" :: ocamldep :: ll -> option_mldep := Some ocamldep; option_c := true; parse ll | "-I" :: r :: ll -> (* To solve conflict (e.g. same filename in kernel and checker) we allow to state an explicit order *) add_dir add_known r []; norecdir_list:=r::!norecdir_list; parse ll | f :: ll -> treat_file None f; parse ll | [] -> () let coqdep_boot () = if Array.length Sys.argv < 2 then exit 1; parse (List.tl (Array.to_list Sys.argv)); if !option_c then add_rec_dir add_known "." [] else begin add_rec_dir add_known "theories" ["Coq"]; add_rec_dir add_known "plugins" ["Coq"]; end; if !option_c then mL_dependencies (); coq_dependencies () let _ = Printexc.catch coqdep_boot () coq-8.4pl2/tools/coq-syntax.el0000640000175000001440000012723211610373016015424 0ustar notinusers;; coq-syntax.el Font lock expressions for Coq ;; Copyright (C) 1997-2007 LFCS Edinburgh. ;; Authors: Thomas Kleymann, Healfdene Goguen, Pierre Courtieu ;; License: GPL (GNU GENERAL PUBLIC LICENSE) ;; Maintainer: Pierre Courtieu ;; coq-syntax.el,v 9.9 2008/07/21 15:14:58 pier Exp ;(require 'proof-syntax) ;(require 'proof-utils) ; proof-locate-executable (require 'coq-db) ;;; keyword databases (defcustom coq-user-tactics-db nil "User defined tactic information. See `coq-syntax-db' for syntax. It is not necessary to add your own tactics here (it is not needed by the synchronizing/backtracking system). You may however do so for the following reasons: 1 your tactics will be colorized by font-lock 2 your tactics will be added to the menu and to completion when calling \\[coq-insert-tactic] 3 you may define an abbreviation for your tactic." :type '(repeat sexp) :group 'coq) (defcustom coq-user-commands-db nil "User defined command information. See `coq-syntax-db' for syntax. It is not necessary to add your own commands here (it is not needed by the synchronizing/backtracking system). You may however do so for the following reasons: 1 your commands will be colorized by font-lock 2 your commands will be added to the menu and to completion when calling \\[coq-insert-command] 3 you may define an abbreviation for your command." :type '(repeat sexp) :group 'coq) (defcustom coq-user-tacticals-db nil "User defined tactical information. See `coq-syntax-db' for syntax. It is not necessary to add your own commands here (it is not needed by the synchronizing/backtracking system). You may however do so for the following reasons: 1 your commands will be colorized by font-lock 2 your commands will be added to the menu and to completion when calling \\[coq-insert-command] 3 you may define an abbreviation for your command." :type '(repeat sexp) :group 'coq) (defcustom coq-user-solve-tactics-db nil "User defined closing tactics. See `coq-syntax-db' for syntax. It is not necessary to add your own commands here (it is not needed by the synchronizing/backtracking system). You may however do so for the following reasons: 1 your commands will be colorized by font-lock 2 your commands will be added to the menu and to completion when calling \\[coq-insert-command] 3 you may define an abbreviation for your command." :type '(repeat sexp) :group 'coq) (defcustom coq-user-reserved-db nil "User defined reserved keywords information. See `coq-syntax-db' for syntax. It is not necessary to add your own commands here (it is not needed by the synchronizing/backtracking system). You may however do so for the following reasons: 1 your commands will be colorized by font-lock 2 your commands will be added to the menu and to completion when calling \\[coq-insert-command] 3 you may define an abbreviation for your command." :type '(repeat sexp) :group 'coq) (defvar coq-tactics-db (append coq-user-tactics-db '( ("absurd " "abs" "absurd " t "absurd") ("apply" "ap" "apply " t "apply") ("assert by" "assb" "assert ( # : # ) by #" t "assert") ("assert" "ass" "assert ( # : # )" t) ;; ("assumption" "as" "assumption" t "assumption") ("auto with arith" "awa" "auto with arith" t) ("auto with" "aw" "auto with @{db}" t) ("auto" "a" "auto" t "auto") ("autorewrite with in using" "arwiu" "autorewrite with @{db,db...} in @{hyp} using @{tac}" t) ("autorewrite with in" "arwi" "autorewrite with @{db,db...} in @{hyp}" t) ("autorewrite with using" "arwu" "autorewrite with @{db,db...} using @{tac}" t) ("autorewrite with" "ar" "autorewrite with @{db,db...}" t "autorewrite") ("case" "c" "case " t "case") ("cbv" "cbv" "cbv beta [#] delta iota zeta" t "cbv") ("change in" "chi" "change # in #" t) ("change with in" "chwi" "change # with # in #" t) ("change with" "chw" "change # with" t) ("change" "ch" "change " t "change") ("clear" "cl" "clear" t "clear") ("clearbody" "cl" "clearbody" t "clearbody") ("cofix" "cof" "cofix" t "cofix") ("coinduction" "coind" "coinduction" t "coinduction") ("compare" "cmpa" "compare # #" t "compare") ("compute" "cmpu" "compute" t "compute") ;; ("congruence" "cong" "congruence" t "congruence") ("constructor" "cons" "constructor" t "constructor") ;; ("contradiction" "contr" "contradiction" t "contradiction") ("cut" "cut" "cut" t "cut") ("cutrewrite" "cutr" "cutrewrite -> # = #" t "cutrewrite") ;; ("decide equality" "deg" "decide equality" t "decide\\s-+equality") ("decompose record" "decr" "decompose record #" t "decompose\\s-+record") ("decompose sum" "decs" "decompose sum #" t "decompose\\s-+sum") ("decompose" "dec" "decompose [#] #" t "decompose") ("dependent inversion" "depinv" "dependent inversion" t "dependent\\s-+inversion") ("dependent inversion with" "depinvw" "dependent inversion # with #" t) ("dependent inversion_clear" "depinvc" "dependent inversion_clear" t "dependent\\s-+inversion_clear") ("dependent inversion_clear with" "depinvw" "dependent inversion_clear # with #" t) ("dependent rewrite ->" "depr" "dependent rewrite -> @{id}" t "dependent\\s-+rewrite") ("dependent rewrite <-" "depr<" "dependent rewrite <- @{id}" t) ("destruct as" "desa" "destruct # as #" t) ("destruct using" "desu" "destruct # using #" t) ("destruct" "des" "destruct " t "destruct") ;; ("discriminate" "dis" "discriminate" t "discriminate") ("discrR" "discrR" "discrR" t "discrR") ("double induction" "dind" "double induction # #" t "double\\s-+induction") ("eapply" "eap" "eapply #" t "eapply") ("eauto with arith" "eawa" "eauto with arith" t) ("eauto with" "eaw" "eauto with @{db}" t) ("eauto" "ea" "eauto" t "eauto") ("econstructor" "econs" "econstructor" t "econstructor") ("eexists" "eex" "eexists" t "eexists") ("eleft" "eleft" "eleft" t "eleft") ("elim using" "elu" "elim # using #" t) ("elim" "e" "elim #" t "elim") ("elimtype" "elt" "elimtype" "elimtype") ("eright" "erig" "eright" "eright") ("esplit" "esp" "esplit" t "esplit") ;; ("exact" "exa" "exact" t "exact") ("exists" "ex" "exists #" t "exists") ;; ("fail" "fa" "fail" nil) ;; ("field" "field" "field" t "field") ("firstorder" "fsto" "firstorder" t "firstorder") ("firstorder with" "fsto" "firstorder with #" t) ("firstorder with using" "fsto" "firstorder # with #" t) ("fold" "fold" "fold #" t "fold") ;; ("fourier" "four" "fourier" t "fourier") ("functional induction" "fi" "functional induction @{f} @{args}" t "functional\\s-+induction") ("generalize dependent" "gd" "generalize dependent #" t "generalize\\s-+dependent") ("generalize" "g" "generalize #" t "generalize") ("hnf" "hnf" "hnf" t "hnf") ("idtac" "id" "idtac" nil "idtac") ; also in tacticals with abbrev id ("idtac \"" "id\"" "idtac \"#\"") ; also in tacticals ("induction" "ind" "induction #" t "induction") ("induction using" "indu" "induction # using #" t) ("injection" "inj" "injection #" t "injection") ("instantiate" "inst" "instantiate" t "instantiate") ("intro" "i" "intro" t "intro") ("intro after" "ia" "intro # after #" t) ("intros" "is" "intros #" t "intros") ("intros! (guess names)" nil "intros #" nil nil coq-insert-intros) ("intros until" "isu" "intros until #" t) ("intuition" "intu" "intuition #" t "intuition") ("inversion" "inv" "inversion #" t "inversion") ("inversion in" "invi" "inversion # in #" t) ("inversion using" "invu" "inversion # using #" t) ("inversion using in" "invui" "inversion # using # in #" t) ("inversion_clear" "invcl" "inversion_clear" t "inversion_clear") ("lapply" "lap" "lapply" t "lapply") ("lazy" "lazy" "lazy beta [#] delta iota zeta" t "lazy") ("left" "left" "left" t "left") ("linear" "lin" "linear" t "linear") ("load" "load" "load" t "load") ("move after" "mov" "move # after #" t "move") ("omega" "o" "omega" t "omega") ("pattern" "pat" "pattern" t "pattern") ("pattern(s)" "pats" "pattern # , #" t) ("pattern at" "pata" "pattern # at #" t) ("pose" "po" "pose ( # := # )" t "pose") ("prolog" "prol" "prolog" t "prolog") ("quote" "quote" "quote" t "quote") ("quote []" "quote2" "quote # [#]" t) ("red" "red" "red" t "red") ("refine" "ref" "refine" t "refine") ;; ("reflexivity" "refl" "reflexivity #" t "reflexivity") ("rename into" "ren" "rename # into #" t "rename") ("replace with" "rep" "replace # with #" t "replace") ("replace with in" "repi" "replace # with # in #" t) ("rewrite <- in" "ri<" "rewrite <- # in #" t) ("rewrite <-" "r<" "rewrite <- #" t) ("rewrite in" "ri" "rewrite # in #" t) ("rewrite" "r" "rewrite #" t "rewrite") ("right" "rig" "right" t "right") ;; ("ring" "ring" "ring #" t "ring") ("set in * |-" "seth" "set ( # := #) in * |-" t) ("set in *" "set*" "set ( # := #) in *" t) ("set in |- *" "setg" "set ( # := #) in |- *" t) ("set in" "seti" "set ( # := #) in #" t) ("set" "set" "set ( # := #)" t "set") ("setoid_replace with" "strep2" "setoid_replace # with #" t "setoid_replace") ("setoid replace with" "strep" "setoid replace # with #" t "setoid\\s-+replace") ("setoid_rewrite" "strew" "setoid_rewrite #" t "setoid_rewrite") ("setoid rewrite" "strew" "setoid rewrite #" t "setoid\\s-+rewrite") ("simpl" "s" "simpl" t "simpl") ("simpl" "sa" "simpl # at #" t) ("simple destruct" "sdes" "simple destruct" t "simple\\s-+destruct") ("simple inversion" "sinv" "simple inversion" t "simple\\s-+inversion") ("simple induction" "sind" "simple induction" t "simple\\s-+induction") ("simplify_eq" "simeq" "simplify_eq @{hyp}" t "simplify_eq") ("specialize" "spec" "specialize" t "specialize") ("split" "sp" "split" t "split") ("split_Rabs" "spra" "splitRabs" t "split_Rabs") ("split_Rmult" "sprm" "splitRmult" t "split_Rmult") ("stepl" "stl" "stepl #" t "stepl") ("stepl by" "stlb" "stepl # by #" t) ("stepr" "str" "stepr #" t "stepr") ("stepr by" "strb" "stepr # by #" t) ("subst" "su" "subst #" t "subst") ("symmetry" "sy" "symmetry" t "symmetry") ("symmetry in" "syi" "symmetry in #" t) ;; ("tauto" "ta" "tauto" t "tauto") ("transitivity" "trans" "transitivity #" t "transitivity") ("trivial" "t" "trivial" t "trivial") ("trivial with" "tw" "trivial with @{db}" t) ("unfold" "u" "unfold #" t "unfold") ("unfold(s)" "us" "unfold # , #" t) ("unfold in" "unfi" "unfold # in #" t) ("unfold at" "unfa" "unfold # at #" t) )) "Coq tactics information list. See `coq-syntax-db' for syntax. " ) (defvar coq-solve-tactics-db (append coq-user-solve-tactics-db '( ("assumption" "as" "assumption" t "assumption") ("by" "by" "by #" t "by") ("congruence" "cong" "congruence" t "congruence") ("contradiction" "contr" "contradiction" t "contradiction") ("decide equality" "deg" "decide equality" t "decide\\s-+equality") ("discriminate" "dis" "discriminate" t "discriminate") ("exact" "exa" "exact" t "exact") ("fourier" "four" "fourier" t "fourier") ("fail" "fa" "fail" nil) ("field" "field" "field" t "field") ("omega" "o" "omega" t "omega") ("reflexivity" "refl" "reflexivity #" t "reflexivity") ("ring" "ring" "ring #" t "ring") ("solve" nil "solve [ # | # ]" nil "solve") ("tauto" "ta" "tauto" t "tauto") )) "Coq tactic(al)s that solve a subgoal." ) (defvar coq-tacticals-db (append coq-user-tacticals-db '( ("info" nil "info #" nil "info") ("first" nil "first [ # | # ]" nil "first") ("abstract" nil "abstract @{tac} using @{name}." nil "abstract") ("do" nil "do @{num} @{tac}" nil "do") ("idtac" nil "idtac") ; also in tactics ; ("idtac \"" nil "idtac \"#\"") ; also in tactics ("fail" "fa" "fail" nil "fail") ; ("fail \"" "fa\"" "fail" nil) ; ; ("orelse" nil "orelse #" t "orelse") ("repeat" nil "repeat #" nil "repeat") ("try" nil "try #" nil "try") ("progress" nil "progress #" nil "progress") ("|" nil "[ # | # ]" nil) ("||" nil "# || #" nil) )) "Coq tacticals information list. See `coq-syntax-db' for syntax.") (defvar coq-decl-db '( ("Axiom" "ax" "Axiom # : #" t "Axiom") ("Hint Constructors" "hc" "Hint Constructors # : #." t "Hint\\s-+Constructors") ("Hint Extern" "he" "Hint Extern @{cost} @{pat} => @{tac} : @{db}." t "Hint\\s-+Extern") ("Hint Immediate" "hi" "Hint Immediate # : @{db}." t "Hint\\s-+Immediate") ("Hint Resolve" "hr" "Hint Resolve # : @{db}." t "Hint\\s-+Resolve") ("Hint Rewrite ->" "hrw" "Hint Rewrite -> @{t1,t2...} using @{tac} : @{db}." t "Hint\\s-+Rewrite") ("Hint Rewrite <-" "hrw" "Hint Rewrite <- @{t1,t2...} using @{tac} : @{db}." t ) ("Hint Unfold" "hu" "Hint Unfold # : #." t "Hint\\s-+Unfold") ("Hypothesis" "hyp" "Hypothesis #: #" t "Hypothesis") ("Hypotheses" "hyp" "Hypotheses #: #" t "Hypotheses") ("Parameter" "par" "Parameter #: #" t "Parameter") ("Parameters" "par" "Parameter #: #" t "Parameters") ("Conjecture" "conj" "Conjecture #: #." t "Conjecture") ("Variable" "v" "Variable #: #." t "Variable") ("Variables" "vs" "Variables # , #: #." t "Variables") ("Coercion" "coerc" "Coercion @{id} : @{typ1} >-> @{typ2}." t "Coercion") ) "Coq declaration keywords information list. See `coq-syntax-db' for syntax." ) (defvar coq-defn-db '( ("CoFixpoint" "cfix" "CoFixpoint # (#:#) : # :=\n#." t "CoFixpoint") ("CoInductive" "coindv" "CoInductive # : # :=\n|# : #." t "CoInductive") ("Declare Module : :=" "dm" "Declare Module # : # := #." t "Declare\\s-+Module") ("Declare Module <: :=" "dm2" "Declare Module # <: # := #." t);; careful ("Declare Module Import : :=" "dmi" "Declare Module # : # := #." t) ("Declare Module Import <: :=" "dmi2" "Declare Module # <: # := #." t);; careful ("Declare Module Export : :=" "dme" "Declare Module # : # := #." t) ("Declare Module Export <: :=" "dme2" "Declare Module # <: # := #." t);; careful ("Definition" "def" "Definition #:# := #." t "Definition");; careful ("Definition (2 args)" "def2" "Definition # (# : #) (# : #):# := #." t) ("Definition (3 args)" "def3" "Definition # (# : #) (# : #) (# : #):# := #." t) ("Definition (4 args)" "def4" "Definition # (# : #) (# : #) (# : #) (# : #):# := #." t) ("Program Definition" "pdef" "Program Definition #:# := #." t "Program\\s-+Definition");; careful ? ("Program Definition (2 args)" "pdef2" "Program Definition # (# : #) (# : #):# := #." t) ("Program Definition (3 args)" "pdef3" "Program Definition # (# : #) (# : #) (# : #):# := #." t) ("Program Definition (4 args)" "pdef4" "Program Definition # (# : #) (# : #) (# : #) (# : #):# := #." t) ("Derive Inversion" nil "Derive Inversion @{id} with # Sort #." t "Derive\\s-+Inversion") ("Derive Dependent Inversion" nil "Derive Dependent Inversion @{id} with # Sort #." t "Derive\\s-+Dependent\\s-+Inversion") ("Derive Inversion_clear" nil "Derive Inversion_clear @{id} with # Sort #." t) ("Fixpoint" "fix" "Fixpoint # (#:#) {struct @{arg}} : # :=\n#." t "Fixpoint") ("Program Fixpoint" "pfix" "Program Fixpoint # (#:#) {struct @{arg}} : # :=\n#." t "Program\\s-+Fixpoint") ("Program Fixpoint measure" "pfixm" "Program Fixpoint # (#:#) {measure @{arg} @{f}} : # :=\n#." t) ("Program Fixpoint wf" "pfixwf" "Program Fixpoint # (#:#) {wf @{arg} @{f}} : # :=\n#." t) ("Function" "func" "Function # (#:#) {struct @{arg}} : # :=\n#." t "Function") ("Function measure" "funcm" "Function # (#:#) {measure @{f} @{arg}} : # :=\n#." t) ("Function wf" "func wf" "Function # (#:#) {wf @{R} @{arg}} : # :=\n#." t) ("Functional Scheme with" "fsw" "Functional Scheme @{name} := Induction for @{fun} with @{mutfuns}." t ) ("Functional Scheme" "fs" "Functional Scheme @{name} := Induction for @{fun}." t "Functional\\s-+Scheme") ("Inductive" "indv" "Inductive # : # := # : #." t "Inductive") ("Inductive (2 args)" "indv2" "Inductive # : # :=\n| # : #\n| # : #." t ) ("Inductive (3 args)" "indv3" "Inductive # : # :=\n| # : #\n| # : #\n| # : #." t ) ("Inductive (4 args)" "indv4" "Inductive # : # :=\n| # : #\n| # : #\n| # : #\n| # : #." t ) ("Inductive (5 args)" "indv5" "Inductive # : # :=\n| # : #\n| # : #\n| # : #\n| # : #\n| # : #." t ) ("Let" "Let" "Let # : # := #." t "Let") ("Ltac" "ltac" "Ltac # := #" t "Ltac") ("Module :=" "mo" "Module # : # := #." t ) ; careful ("Module <: :=" "mo2" "Module # <: # := #." t ) ; careful ("Module Import :=" "moi" "Module Import # : # := #." t ) ; careful ("Module Import <: :=" "moi2" "Module Import # <: # := #." t ) ; careful ("Module Export :=" "moe" "Module Export # : # := #." t ) ; careful ("Module Export <: :=" "moe2" "Module Export# <: # := #." t ) ; careful ("Record" "rec" "Record # : # := {\n# : #;\n# : # }" t "Record") ("Scheme" "sc" "Scheme @{name} := #." t "Scheme") ("Scheme Induction" "sci" "Scheme @{name} := Induction for # Sort #." t) ("Scheme Minimality" "scm" "Scheme @{name} := Minimality for # Sort #." t) ("Structure" "str" "Structure # : # := {\n# : #;\n# : # }" t "Structure") ) "Coq definition keywords information list. See `coq-syntax-db' for syntax. " ) ;; modules and section are indented like goal starters (defvar coq-goal-starters-db '( ("Add Morphism" "addmor" "Add Morphism @{f} : @{id}" t "Add\\s-+Morphism") ("Chapter" "chp" "Chapter # : #." t "Chapter") ("Corollary" "cor" "Corollary # : #.\nProof.\n#\nQed." t "Corollary") ("Declare Module :" "dmi" "Declare Module # : #.\n#\nEnd #." t) ("Declare Module <:" "dmi2" "Declare Module # <: #.\n#\nEnd #." t) ("Definition goal" "defg" "Definition #:#.\n#\nSave." t);; careful ("Fact" "fct" "Fact # : #." t "Fact") ("Goal" nil "Goal #." t "Goal") ("Lemma" "l" "Lemma # : #.\nProof.\n#\nQed." t "Lemma") ("Program Lemma" "pl" "Program Lemma # : #.\nProof.\n#\nQed." t "Program\\s-+Lemma") ("Module! (interactive)" nil "Module # : #.\n#\nEnd #." nil nil coq-insert-section-or-module) ("Module Type" "mti" "Module Type #.\n#\nEnd #." t "Module\\s-+Type") ; careful ("Module :" "moi" "Module # : #.\n#\nEnd #." t "Module") ; careful ("Module <:" "moi2" "Module # <: #.\n#\nEnd #." t ) ; careful ("Remark" "rk" "Remark # : #.\n#\nQed." t "Remark") ("Section" "sec" "Section #." t "Section") ("Theorem" "th" "Theorem # : #.\n#\nQed." t "Theorem") ("Program Theorem" "pth" "Program Theorem # : #.\nProof.\n#\nQed." t "Program\\s-+Theorem") ("Obligation" "obl" "Obligation #.\n#\nQed." t "Obligation") ("Next Obligation" "nobl" "Next Obligation.\n#\nQed." t "Next Obligation") ) "Coq goal starters keywords information list. See `coq-syntax-db' for syntax. " ) ;; command that are not declarations, definition or goal starters (defvar coq-other-commands-db '( ;; ("Abort" nil "Abort." t "Abort" nil nil);don't appear in menu ("About" nil "About #." nil "About") ("Add" nil "Add #." nil "Add" nil t) ("Add Abstract Ring" nil "Add Abstract Ring #." t "Add\\s-+Abstract\\s-+Ring") ("Add Abstract Semi Ring" nil "Add Abstract Semi Ring #." t "Add\\s-+Abstract\\s-+Semi\\s-+Ring") ("Add Field" nil "Add Field #." t "Add\\s-+Field") ("Add LoadPath" nil "Add LoadPath #." nil "Add\\s-+LoadPath") ("Add ML Path" nil "Add ML Path #." nil "Add\\s-+ML\\s-+Path") ("Add Morphism" nil "Add Morphism #." t "Add\\s-+Morphism") ("Add Printing" nil "Add Printing #." t "Add\\s-+Printing") ("Add Printing Constructor" nil "Add Printing Constructor #." t "Add\\s-+Printing\\s-+Constructor") ("Add Printing If" nil "Add Printing If #." t "Add\\s-+Printing\\s-+If") ("Add Printing Let" nil "Add Printing Let #." t "Add\\s-+Printing\\s-+Let") ("Add Printing Record" nil "Add Printing Record #." t "Add\\s-+Printing\\s-+Record") ("Add Rec LoadPath" nil "Add Rec LoadPath #." nil "Add\\s-+Rec\\s-+LoadPath") ("Add Rec ML Path" nil "Add Rec ML Path #." nil "Add\\s-+Rec\\s-+ML\\s-+Path") ("Add Ring" nil "Add Ring #." t "Add\\s-+Ring") ("Add Semi Ring" nil "Add Semi Ring #." t "Add\\s-+Semi\\s-+Ring") ("Add Setoid" nil "Add Setoid #." t "Add\\s-+Setoid") ("Admit Obligations" "oblsadmit" "Admit Obligations." nil "Admit\\s-+Obligations") ("Arguments Scope" "argsc" "Arguments Scope @{id} [ @{_} ]" t "Arguments\\s-+Scope") ("Bind Scope" "bndsc" "Bind Scope @{scope} with @{type}" t "Bind\\s-+Scope") ("Canonical Structure" nil "Canonical Structure #." t "Canonical\\s-+Structure") ("Cd" nil "Cd #." nil "Cd") ("Check" nil "Check" nil "Check") ("Close Local Scope" "cllsc" "Close Local Scope #" t "Close\\s-+Local\\s-+Scope") ("Close Scope" "clsc" "Close Scope #" t "Close\\s-+Scope") ("Comments" nil "Comments #." nil "Comments") ("Delimit Scope" "delsc" "Delimit Scope @{scope} with @{id}." t "Delimit\\s-+Scope" ) ("Eval" nil "Eval #." nil "Eval") ("Export" nil "Export #." t "Export") ("Extract Constant" "extrc" "Extract Constant @{id} => \"@{id}\"." nil "Extract\\s-+Constant") ("Extract Inlined Constant" "extric" "Extract Inlined Constant @{id} => \"@{id}\"." nil "Extract\\s-+Inlined\\s-+Constant") ("Extract Inductive" "extri" "Extract Inductive @{id} => \"@{id}\" [\"@{id}\" \"@{id...}\"]." nil "Extract") ("Extraction" "extr" "Extraction @{id}." nil "Extraction") ("Extraction (in a file)" "extrf" "Extraction \"@{file}\" @{id}." nil) ("Extraction Inline" nil "Extraction Inline #." t "Extraction\\s-+Inline") ("Extraction NoInline" nil "Extraction NoInline #." t "Extraction\\s-+NoInline") ("Extraction Language" "extrlang" "Extraction Language #." t "Extraction\\s-+Language") ("Extraction Library" "extrl" "Extraction Library @{id}." nil "Extraction\\s-+Library") ("Focus" nil "Focus #." nil "Focus") ("Identity Coercion" nil "Identity Coercion #." t "Identity\\s-+Coercion") ("Implicit Arguments Off" nil "Implicit Arguments Off." t "Implicit\\s-+Arguments\\s-+Off") ("Implicit Arguments On" nil "Implicit Arguments On." t "Implicit\\s-+Arguments\\s-+On") ("Implicit Arguments" nil "Implicit Arguments # [#]." t "Implicit\\s-+Arguments") ("Import" nil "Import #." t "Import") ("Infix" "inf" "Infix \"#\" := # (at level #) : @{scope}." t "Infix") ("Inspect" nil "Inspect #." nil "Inspect") ("Locate" nil "Locate" nil "Locate") ("Locate File" nil "Locate File \"#\"." nil "Locate\\s-+File") ("Locate Library" nil "Locate Library #." nil "Locate\\s-+Library") ("Notation (assoc)" "notas" "Notation \"#\" := # (at level #, # associativity)." t) ("Notation (at assoc)" "notassc" "Notation \"#\" := # (at level #, # associativity) : @{scope}." t) ("Notation (at at scope)" "notasc" "Notation \"#\" := # (at level #, # at level #) : @{scope}." t) ("Notation (at at)" "nota" "Notation \"#\" := # (at level #, # at level #)." t) ("Notation (only parsing)" "notsp" "Notation # := # (only parsing)." t) ("Notation Local (only parsing)" "notslp" "Notation Local # := # (only parsing)." t) ("Notation Local" "notsl" "Notation Local # := #." t "Notation\\s-+Local") ("Notation (simple)" "nots" "Notation # := #." t "Notation") ("Opaque" nil "Opaque #." nil "Opaque") ("Obligations Tactic" nil "Obligations Tactic := #." t "Obligations\\s-+Tactic") ("Open Local Scope" "oplsc" "Open Local Scope #" t "Open\\s-+Local\\s-+Scope") ("Open Scope" "opsc" "Open Scope #" t "Open\\s-+Scope") ("Print Coercions" nil "Print Coercions." nil "Print\\s-+Coercions") ("Print Hint" nil "Print Hint." nil "Print\\s-+Hint" coq-PrintHint) ("Print" "p" "Print #." nil "Print") ("Qed" nil "Qed." nil "Qed") ("Pwd" nil "Pwd." nil "Pwd") ("Recursive Extraction" "recextr" "Recursive Extraction @{id}." nil "Recursive\\s-+Extraction") ("Recursive Extraction Library" "recextrl" "Recursive Extraction Library @{id}." nil "Recursive\\s-+Extraction\\s-+Library") ("Recursive Extraction Module" "recextrm" "Recursive Extraction Module @{id}." nil "Recursive\\s-+Extraction\\s-+Module") ("Remove LoadPath" nil "Remove LoadPath" nil "Remove\\s-+LoadPath") ("Remove LoadPath" nil "Remove LoadPath" nil "Remove\\s-+LoadPath") ("Remove Printing If" nil "Remove Printing If #." t "Remove\\s-+Printing\\s-+If") ("Remove Printing Let" nil "Remove Printing Let #." t "Remove\\s-+Printing\\s-+Let") ("Require Export" nil "Require Export #." t "Require\\s-+Export") ("Require Import" nil "Require Import #." t "Require\\s-+Import") ("Require" nil "Require #." t "Require") ("Reserved Notation" nil "Reserved Notation" nil "Reserved\\s-+Notation") ("Reset Extraction Inline" nil "Reset Extraction Inline." t "Reset\\s-+Extraction\\s-+Inline") ("Save" nil "Save." t "Save") ("Search" nil "Search #" nil "Search") ("SearchAbout" nil "SearchAbout #" nil "SearchAbout") ("SearchPattern" nil "SearchPattern #" nil "SearchPattern") ("SearchRewrite" nil "SearchRewrite #" nil "SearchRewrite") ("Set Extraction AutoInline" nil "Set Extraction AutoInline" t "Set\\s-+Extraction\\s-+AutoInline") ("Set Extraction Optimize" nil "Set Extraction Optimize" t "Set\\s-+Extraction\\s-+Optimize") ("Set Implicit Arguments" nil "Set Implicit Arguments" t "Set\\s-+Implicit\\s-+Arguments") ("Set Strict Implicit" nil "Set Strict Implicit" t "Set\\s-+Strict\\s-+Implicit") ("Set Printing Synth" nil "Set Printing Synth" t "Set\\s-+Printing\\s-+Synth") ("Set Printing Wildcard" nil "Set Printing Wildcard" t "Set\\s-+Printing\\s-+Wildcard") ("Set Printing All" "sprall" "Set Printing All" t "Set\\s-+Printing\\s-+All") ("Set Printing Records" nil "Set Printing Records" t "Set\\s-+Printing\\s-+Records") ("Set Hyps Limit" nil "Set Hyps Limit #." nil "Set\\s-+Hyps\\s-+Limit") ("Set Printing Coercions" nil "Set Printing Coercions." t "Set\\s-+Printing\\s-+Coercions") ("Set Printing Notations" "sprn" "Set Printing Notations" t "Set\\s-+Printing\\s-+Notations") ("Set Undo" nil "Set Undo #." nil "Set\\s-+Undo") ("Show" nil "Show #." nil "Show") ("Solve Obligations" "oblssolve" "Solve Obligations using #." nil "Solve\\s-+Obligations") ("Test" nil "Test" nil "Test" nil t) ("Test Printing Depth" nil "Test Printing Depth." nil "Test\\s-+Printing\\s-+Depth") ("Test Printing If" nil "Test Printing If #." nil "Test\\s-+Printing\\s-+If") ("Test Printing Let" nil "Test Printing Let #." nil "Test\\s-+Printing\\s-+Let") ("Test Printing Synth" nil "Test Printing Synth." nil "Test\\s-+Printing\\s-+Synth") ("Test Printing Width" nil "Test Printing Width." nil "Test\\s-+Printing\\s-+Width") ("Test Printing Wildcard" nil "Test Printing Wildcard." nil "Test\\s-+Printing\\s-+Wildcard") ("Transparent" nil "Transparent #." nil "Transparent") ("Unfocus" nil "Unfocus." nil "Unfocus") ("Unset Extraction AutoInline" nil "Unset Extraction AutoInline" t "Unset\\s-+Extraction\\s-+AutoInline") ("Unset Extraction Optimize" nil "Unset Extraction Optimize" t "Unset\\s-+Extraction\\s-+Optimize") ("Unset Implicit Arguments" nil "Unset Implicit Arguments" t "Unset\\s-+Implicit\\s-+Arguments") ("Unset Strict Implicit" nil "Unset Strict Implicit" t "Unset\\s-+Strict\\s-+Implicit") ("Unset Printing Synth" nil "Unset Printing Synth" t "Unset\\s-+Printing\\s-+Synth") ("Unset Printing Wildcard" nil "Unset Printing Wildcard" t "Unset\\s-+Printing\\s-+Wildcard") ("Unset Hyps Limit" nil "Unset Hyps Limit" nil "Unset\\s-+Hyps\\s-+Limit") ("Unset Printing All" "unsprall" "Unset Printing All" nil "Unset\\s-+Printing\\s-+All") ("Unset Printing Coercion" nil "Unset Printing Coercion #." t "Unset\\s-+Printing\\s-+Coercion") ("Unset Printing Coercions" nil "Unset Printing Coercions." nil "Unset\\s-+Printing\\s-+Coercions") ("Unset Printing Notations" "unsprn" "Unset Printing Notations" nil "Unset\\s-+Printing\\s-+Notations") ("Unset Undo" nil "Unset Undo." nil "Unset\\s-+Undo") ; ("print" "pr" "print #" "print") ) "Command that are not declarations, definition or goal starters." ) (defvar coq-commands-db (append coq-decl-db coq-defn-db coq-goal-starters-db coq-other-commands-db coq-user-commands-db) "Coq all commands keywords information list. See `coq-syntax-db' for syntax. " ) (defvar coq-terms-db '( ("fun (1 args)" "f" "fun #:# => #" nil "fun") ("fun (2 args)" "f2" "fun (#:#) (#:#) => #") ("fun (3 args)" "f3" "fun (#:#) (#:#) (#:#) => #") ("fun (4 args)" "f4" "fun (#:#) (#:#) (#:#) (#:#) => #") ("forall" "fo" "forall #:#,#" nil "forall") ("forall (2 args)" "fo2" "forall (#:#) (#:#), #") ("forall (3 args)" "fo3" "forall (#:#) (#:#) (#:#), #") ("forall (4 args)" "fo4" "forall (#:#) (#:#) (#:#) (#:#), #") ("if" "if" "if # then # else #" nil "if") ("let in" "li" "let # := # in #" nil "let") ("match! (from type)" nil "" nil "match" coq-insert-match) ("match with" "m" "match # with\n| # => #\nend") ("match with 2" "m2" "match # with\n| # => #\n| # => #\nend") ("match with 3" "m3" "match # with\n| # => #\n| # => #\n| # => #\nend") ("match with 4" "m4" "match # with\n| # => #\n| # => #\n| # => #\n| # => #\nend") ("match with 5" "m5" "match # with\n| # => #\n| # => #\n| # => #\n| # => #\n| # => #\nend") ) "Coq terms keywords information list. See `coq-syntax-db' for syntax. " ) ;;; Goals (and module/sections) starters detection ;; ----- keywords for font-lock. ;; FIXME da: this one function breaks the nice configuration of Proof General: ;; would like to have proof-goal-regexp instead. ;; Unfortunately Coq allows "Definition" and friends to perhaps have a goal, ;; so it appears more difficult than just a proof-goal-regexp setting. ;; Future improvement may simply to be allow a function value for ;; proof-goal-regexp. ;; FIXME Pierre: the right way IMHO here would be to set a span ;; property 'goalcommand when coq prompt says it (if the name of ;; current proof has changed). ;; excerpt of Jacek Chrzaszcz, implementer of the module system: sorry ;; for the french: ;;*) suivant les suggestions de Chritine, pas de mode preuve dans un type de ;; module (donc pas de Definition truc:machin. Lemma, Theorem ... ) ;; ;; *) la commande Module M [ ( : | <: ) MTYP ] [ := MEXPR ] est valable ;; uniquement hors d'un MT ;; - si :=MEXPR est absent, elle demarre un nouveau module interactif ;; - si :=MEXPR est present, elle definit un module ;; (la fonction vernac_define_module dans toplevel/vernacentries) ;; ;; *) la nouvelle commande Declare Module M [ ( : | <: ) MTYP ] [ := MEXPR ] ;; est valable uniquement dans un MT ;; - si :=MEXPR absent, :MTYP absent, elle demarre un nouveau module ;; interactif ;; - si (:=MEXPR absent, :MTYP present) ;; ou (:=MEXPR present, :MTYP absent) ;; elle declare un module. ;; (la fonction vernac_declare_module dans toplevel/vernacentries) (defun coq-count-match (regexp strg) "Count the number of (maximum, non overlapping) matching substring of STRG matching REGEXP. Empty match are counted once." (let ((nbmatch 0) (str strg)) (while (and (proof-string-match regexp str) (not (string-equal str ""))) (incf nbmatch) (if (= (match-end 0) 0) (setq str (substring str 1)) (setq str (substring str (match-end 0))))) nbmatch)) ;; This function is used for amalgamating a proof into a single ;; goal-save region (proof-goal-command-p used in ;; proof-done-advancing-save in generic/proof-script.el) for coq < ;; 8.0. It is the test when looking backward the start of the proof. ;; It is NOT used for coq > v8.1 ;; (coq-find-and-forget in coq.el uses state numbers, proof numbers and ;; lemma names given in the prompt) ;; compatibility with v8.0, will delete it some day (defun coq-goal-command-str-v80-p (str) "See `coq-goal-command-p'." (let* ((match (coq-count-match "\\" str)) (with (coq-count-match "\\" str)) (letwith (+ (coq-count-match "\\" str) (- with match))) (affect (coq-count-match ":=" str))) (and (proof-string-match coq-goal-command-regexp str) (not ; (and (proof-string-match "\\`\\(Local\\|Definition\\|Lemma\\|Module\\)\\>" str) (not (= letwith affect)))) (not (proof-string-match "\\`Declare\\s-+Module\\(\\w\\|\\s-\\|<\\)*:" str)) ) ) ) ;; Module and or section openings are detected syntactically. Module ;; *openings* are difficult to detect because there can be Module ;; ...with X := ... . So we need to count :='s to detect real openings. ;; TODO: have opened section/chapter in the prompt too, and get rid of ;; syntactical tests everywhere (defun coq-module-opening-p (str) "Decide whether STR is a module or section opening or not. Used by `coq-goal-command-p'" (let* ((match (coq-count-match "\\" str)) (with (coq-count-match "\\" str)) (letwith (+ (coq-count-match "\\" str) (- with match))) (affect (coq-count-match ":=" str))) (and (proof-string-match "\\`\\(Module\\)\\>" str) (= letwith affect)) )) (defun coq-section-command-p (str) (proof-string-match "\\`\\(Section\\|Chapter\\)\\>" str)) (defun coq-goal-command-str-v81-p (str) "Decide syntactically whether STR is a goal start or not. Use `coq-goal-command-p-v81' on a span instead if possible." (coq-goal-command-str-v80-p str) ) ;; This is the function that tests if a SPAN is a goal start. All it ;; has to do is look at the 'goalcmd attribute of the span. ;; It also looks if this is not a module start. ;; TODO: have also attributes 'modulecmd and 'sectioncmd. This needs ;; something in the coq prompt telling the name of all opened modules ;; (like for open goals), and use it to set goalcmd --> no more need ;; to look at Modules and section (actually indentation will still ;; need it) (defun coq-goal-command-p-v81 (span) "see `coq-goal-command-p'" (or (span-property span 'goalcmd) ;; module and section starts are detected here (let ((str (or (span-property span 'cmd) ""))) (or (coq-section-command-p str) (coq-module-opening-p str)) ))) ;; In coq > 8.1 This is used only for indentation. (defun coq-goal-command-str-p (str) "Decide whether argument is a goal or not. Use `coq-goal-command-p' on a span instead if posible." (cond (coq-version-is-V8-1 (coq-goal-command-str-v81-p str)) (coq-version-is-V8-0 (coq-goal-command-str-v80-p str)) (t (coq-goal-command-str-v80-p str));; this is temporary )) ;; This is used for backtracking (defun coq-goal-command-p (span) "Decide whether argument is a goal or not." (cond (coq-version-is-V8-1 (coq-goal-command-p-v81 span)) (coq-version-is-V8-0 (coq-goal-command-str-v80-p (span-property span 'cmd))) (t (coq-goal-command-str-v80-p (span-property span 'cmd)));; this is temporary )) (defvar coq-keywords-save-strict '("Defined" "Save" "Qed" "End" "Admitted" "Abort" )) (defvar coq-keywords-save (append coq-keywords-save-strict '("Proof")) ) (defun coq-save-command-p (span str) "Decide whether argument is a Save command or not" (or (proof-string-match coq-save-command-regexp-strict str) (and (proof-string-match "\\`Proof\\>" str) (not (proof-string-match "Proof\\s-*\\(\\.\\|\\\\)" str))) ) ) (defvar coq-keywords-kill-goal '("Abort")) ;; Following regexps are all state changing (defvar coq-keywords-state-changing-misc-commands (coq-build-regexp-list-from-db coq-commands-db 'filter-state-changing)) (defvar coq-keywords-goal (coq-build-regexp-list-from-db coq-goal-starters-db)) (defvar coq-keywords-decl (coq-build-regexp-list-from-db coq-decl-db)) (defvar coq-keywords-defn (coq-build-regexp-list-from-db coq-defn-db)) (defvar coq-keywords-state-changing-commands (append coq-keywords-state-changing-misc-commands coq-keywords-decl ; all state changing coq-keywords-defn ; idem coq-keywords-goal)) ; idem ;; (defvar coq-keywords-state-preserving-commands (coq-build-regexp-list-from-db coq-commands-db 'filter-state-preserving)) ;; concat this is faster that redoing coq-build-regexp-list-from-db on ;; whole commands-db (defvar coq-keywords-commands (append coq-keywords-state-changing-commands coq-keywords-state-preserving-commands) "All commands keyword.") (defvar coq-solve-tactics (coq-build-regexp-list-from-db coq-solve-tactics-db) "Keywords for closing tactic(al)s.") (defvar coq-tacticals (coq-build-regexp-list-from-db coq-tacticals-db) "Keywords for tacticals in a Coq script.") ;; From JF Monin: (defvar coq-reserved (append coq-user-reserved-db '( "False" "True" "after" "as" "cofix" "fix" "forall" "fun" "match" "return" "struct" "else" "end" "if" "in" "into" "let" "then" "using" "with" "beta" "delta" "iota" "zeta" "after" "until" "at" "Sort" "Time")) "Reserved keywords of Coq.") (defvar coq-state-changing-tactics (coq-build-regexp-list-from-db coq-tactics-db 'filter-state-changing)) (defvar coq-state-preserving-tactics (coq-build-regexp-list-from-db coq-tactics-db 'filter-state-preserving)) (defvar coq-tactics (append coq-state-changing-tactics coq-state-preserving-tactics)) (defvar coq-retractable-instruct (append coq-state-changing-tactics coq-keywords-state-changing-commands)) (defvar coq-non-retractable-instruct (append coq-state-preserving-tactics coq-keywords-state-preserving-commands)) (defvar coq-keywords (append coq-keywords-goal coq-keywords-save coq-keywords-decl coq-keywords-defn coq-keywords-commands) "All keywords in a Coq script.") (defvar coq-symbols '("|" "||" ":" ";" "," "(" ")" "[" "]" "{" "}" ":=" "=>" "->" ".") "Punctuation Symbols used by Coq.") ;; ----- regular expressions (defvar coq-error-regexp "^\\(Error:\\|Discarding pattern\\|Syntax error:\\|System Error:\\|User Error:\\|User error:\\|Anomaly[:.]\\|Toplevel input[,]\\)" "A regexp indicating that the Coq process has identified an error.") (defvar coq-id proof-id) (defvar coq-id-shy "\\(?:\\w\\(?:\\w\\|\\s_\\)*\\)") (defvar coq-ids (proof-ids coq-id " ")) (defun coq-first-abstr-regexp (paren end) (concat paren "\\s-*\\(" coq-ids "\\)\\s-*" end)) (defcustom coq-variable-highlight-enable t "Activates partial bound variable highlighting" :type 'boolean :group 'coq) (defvar coq-font-lock-terms (if coq-variable-highlight-enable (list ;; lambda binders (list (coq-first-abstr-regexp "\\" "\\(?:=>\\|:\\)") 1 'font-lock-variable-name-face) ;; forall binder (list (coq-first-abstr-regexp "\\" "\\(?:,\\|:\\)") 1 'font-lock-variable-name-face) ; (list "\\" ; (list 0 font-lock-type-face) ; (list (concat "[^ :]\\s-*\\(" coq-ids "\\)\\s-*") nil nil ; (list 0 font-lock-variable-name-face))) ;; parenthesized binders (list (coq-first-abstr-regexp "(" ":[ a-zA-Z]") 1 'font-lock-variable-name-face) )) "*Font-lock table for Coq terms.") ;; According to Coq, "Definition" is both a declaration and a goal. ;; It is understood here as being a goal. This is important for ;; recognizing global identifiers, see coq-global-p. (defconst coq-save-command-regexp-strict (proof-anchor-regexp (concat "\\(Time\\s-+\\|\\)\\(" (proof-ids-to-regexp coq-keywords-save-strict) "\\)"))) (defconst coq-save-command-regexp (proof-anchor-regexp (concat "\\(Time\\s-+\\|\\)\\(" (proof-ids-to-regexp coq-keywords-save) "\\)"))) (defconst coq-save-with-hole-regexp (concat "\\(Time\\s-+\\|\\)\\(" (proof-ids-to-regexp coq-keywords-save-strict) "\\)\\s-+\\(" coq-id "\\)\\s-*\\.")) (defconst coq-goal-command-regexp (proof-anchor-regexp (proof-ids-to-regexp coq-keywords-goal))) (defconst coq-goal-with-hole-regexp (concat "\\(" (proof-ids-to-regexp coq-keywords-goal) "\\)\\s-+\\(" coq-id "\\)\\s-*:?")) (defconst coq-decl-with-hole-regexp (concat "\\(" (proof-ids-to-regexp coq-keywords-decl) "\\)\\s-+\\(" coq-ids "\\)\\s-*:")) ;; (defconst coq-decl-with-hole-regexp ;; (if coq-variable-highlight-enable coq-decl-with-hole-regexp-1 'nil)) (defconst coq-defn-with-hole-regexp (concat "\\(" (proof-ids-to-regexp coq-keywords-defn) "\\)\\s-+\\(" coq-id "\\)")) ;; must match: ;; "with f x y :" (followed by = or not) ;; "with f x y (z:" (not followed by =) ;; BUT NOT: ;; "with f ... (x:=" ;; "match ... with .. => " (defconst coq-with-with-hole-regexp (concat "\\(with\\)\\s-+\\(" coq-id "\\)\\s-*\\([^=(.]*:\\|[^(]*(\\s-*" coq-id "\\s-*:[^=]\\)")) ;; marche aussi a peu pres ;; (concat "\\(with\\)\\s-+\\(" coq-id "\\)\\s-*\\([^(.]*:\\|.*)[^(.]*:=\\)")) ;;"\\\\|\\\\|\\" (defvar coq-font-lock-keywords-1 (append coq-font-lock-terms (list (cons (proof-ids-to-regexp coq-solve-tactics) 'coq-solve-tactics-face) (cons (proof-ids-to-regexp coq-keywords) 'font-lock-keyword-face) (cons (proof-ids-to-regexp coq-reserved) 'font-lock-type-face) (cons (proof-ids-to-regexp coq-tactics ) 'proof-tactics-name-face) (cons (proof-ids-to-regexp coq-tacticals) 'proof-tacticals-name-face) (cons (proof-ids-to-regexp (list "Set" "Type" "Prop")) 'font-lock-type-face) (cons "============================" 'font-lock-keyword-face) (cons "Subtree proved!" 'font-lock-keyword-face) (cons "subgoal [0-9]+ is:" 'font-lock-keyword-face) (list "^\\([^ \n]+\\) \\(is defined\\)" (list 2 'font-lock-keyword-face t) (list 1 'font-lock-function-name-face t)) (list coq-goal-with-hole-regexp 2 'font-lock-function-name-face)) (if coq-variable-highlight-enable (list (list coq-decl-with-hole-regexp 2 'font-lock-variable-name-face))) (list (list coq-defn-with-hole-regexp 2 'font-lock-function-name-face) (list coq-with-with-hole-regexp 2 'font-lock-function-name-face) (list coq-save-with-hole-regexp 2 'font-lock-function-name-face) ;; Remove spurious variable and function faces on commas. '(proof-zap-commas)))) (defvar coq-font-lock-keywords coq-font-lock-keywords-1) (defun coq-init-syntax-table () "Set appropriate values for syntax table in current buffer." (modify-syntax-entry ?\$ ".") (modify-syntax-entry ?\/ ".") (modify-syntax-entry ?\\ ".") (modify-syntax-entry ?+ ".") (modify-syntax-entry ?- ".") (modify-syntax-entry ?= ".") (modify-syntax-entry ?% ".") (modify-syntax-entry ?< ".") (modify-syntax-entry ?> ".") (modify-syntax-entry ?\& ".") (modify-syntax-entry ?_ "_") (modify-syntax-entry ?\' "_") (modify-syntax-entry ?\| ".") ;; should maybe be "_" but it makes coq-find-and-forget (in coq.el) bug (modify-syntax-entry ?\. ".") (condition-case nil ;; Try to use Emacs-21's nested comments. (modify-syntax-entry ?\* ". 23n") ;; Revert to non-nested comments if that failed. (error (modify-syntax-entry ?\* ". 23"))) (modify-syntax-entry ?\( "()1") (modify-syntax-entry ?\) ")(4")) (defconst coq-generic-expression (mapcar (lambda (kw) (list (capitalize kw) (concat "\\<" kw "\\>" "\\s-+\\(\\w+\\)\\W" ) 1)) (append coq-keywords-decl coq-keywords-defn coq-keywords-goal))) (provide 'coq-syntax) ;;; coq-syntax.el ends here ; Local Variables: *** ; indent-tabs-mode: nil *** ; End: *** coq-8.4pl2/tools/coqdoc/0000750000175000001440000000000012127276530014243 5ustar notinuserscoq-8.4pl2/tools/coqdoc/coqdoc.css0000640000175000001440000001214011450434621016217 0ustar notinusersbody { padding: 0px 0px; margin: 0px 0px; background-color: white } #page { display: block; padding: 0px; margin: 0px; padding-bottom: 10px; } #header { display: block; position: relative; padding: 0; margin: 0; vertical-align: middle; border-bottom-style: solid; border-width: thin } #header h1 { padding: 0; margin: 0;} /* Contents */ #main{ display: block; padding: 10px; font-family: sans-serif; font-size: 100%; line-height: 100% } #main h1 { line-height: 95% } /* allow for multi-line headers */ #main a.idref:visited {color : #416DFF; text-decoration : none; } #main a.idref:link {color : #416DFF; text-decoration : none; } #main a.idref:hover {text-decoration : none; } #main a.idref:active {text-decoration : none; } #main a.modref:visited {color : #416DFF; text-decoration : none; } #main a.modref:link {color : #416DFF; text-decoration : none; } #main a.modref:hover {text-decoration : none; } #main a.modref:active {text-decoration : none; } #main .keyword { color : #cf1d1d } #main { color: black } .section { background-color: rgb(60%,60%,100%); padding-top: 13px; padding-bottom: 13px; padding-left: 3px; margin-top: 5px; margin-bottom: 5px; font-size : 175% } h2.section { background-color: rgb(80%,80%,100%); padding-left: 3px; padding-top: 12px; padding-bottom: 10px; font-size : 130% } h3.section { background-color: rgb(90%,90%,100%); padding-left: 3px; padding-top: 7px; padding-bottom: 7px; font-size : 115% } h4.section { /* background-color: rgb(80%,80%,80%); max-width: 20em; padding-left: 5px; padding-top: 5px; padding-bottom: 5px; */ background-color: white; padding-left: 0px; padding-top: 0px; padding-bottom: 0px; font-size : 100%; font-style : bold; text-decoration : underline; } #main .doc { margin: 0px; font-family: sans-serif; font-size: 100%; line-height: 125%; max-width: 40em; color: black; padding: 10px; background-color: #90bdff; border-style: plain} .inlinecode { display: inline; /* font-size: 125%; */ color: #666666; font-family: monospace } .doc .inlinecode { display: inline; font-size: 120%; color: rgb(30%,30%,70%); font-family: monospace } .doc .inlinecode .id { color: rgb(30%,30%,70%); } .inlinecodenm { display: inline; color: #444444; } .doc .code { display: inline; font-size: 120%; color: rgb(30%,30%,70%); font-family: monospace } .comment { display: inline; font-family: monospace; color: rgb(50%,50%,80%); } .code { display: block; /* padding-left: 15px; */ font-size: 110%; font-family: monospace; } table.infrule { border: 0px; margin-left: 50px; margin-top: 10px; margin-bottom: 10px; } td.infrule { font-family: monospace; text-align: center; /* color: rgb(35%,35%,70%); */ padding: 0px; line-height: 100%; } tr.infrulemiddle hr { margin: 1px 0 1px 0; } .infrulenamecol { color: rgb(60%,60%,60%); font-size: 80%; padding-left: 1em; padding-bottom: 0.1em } /* Pied de page */ #footer { font-size: 65%; font-family: sans-serif; } .id { display: inline; } .id[type="constructor"] { color: rgb(60%,0%,0%); } .id[type="var"] { color: rgb(40%,0%,40%); } .id[type="variable"] { color: rgb(40%,0%,40%); } .id[type="definition"] { color: rgb(0%,40%,0%); } .id[type="abbreviation"] { color: rgb(0%,40%,0%); } .id[type="lemma"] { color: rgb(0%,40%,0%); } .id[type="instance"] { color: rgb(0%,40%,0%); } .id[type="projection"] { color: rgb(0%,40%,0%); } .id[type="method"] { color: rgb(0%,40%,0%); } .id[type="inductive"] { color: rgb(0%,0%,80%); } .id[type="record"] { color: rgb(0%,0%,80%); } .id[type="class"] { color: rgb(0%,0%,80%); } .id[type="keyword"] { color : #cf1d1d; /* color: black; */ } .inlinecode .id { color: rgb(0%,0%,0%); } /* TOC */ #toc h2 { padding: 10px; background-color: rgb(60%,60%,100%); } #toc li { padding-bottom: 8px; } /* Index */ #index { margin: 0; padding: 0; width: 100%; } #index #frontispiece { margin: 1em auto; padding: 1em; width: 60%; } .booktitle { font-size : 140% } .authors { font-size : 90%; line-height: 115%; } .moreauthors { font-size : 60% } #index #entrance { text-align: center; } #index #entrance .spacer { margin: 0 30px 0 30px; } #index #footer { position: absolute; bottom: 0; text-align: bottom; } .paragraph { height: 0.75em; } ul.doclist { margin-top: 0em; margin-bottom: 0em; } coq-8.4pl2/tools/coqdoc/output.mli0000640000175000001440000000604612010532755016310 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val add_printing_token : string -> string option * string option -> unit val remove_printing_token : string -> unit val set_module : coq_module -> string option -> unit val get_module : bool -> string val header : unit -> unit val trailer : unit -> unit val push_in_preamble : string -> unit val start_module : unit -> unit val start_doc : unit -> unit val end_doc : unit -> unit val start_emph : unit -> unit val stop_emph : unit -> unit val start_comment : unit -> unit val end_comment : unit -> unit val start_coq : unit -> unit val end_coq : unit -> unit val start_code : unit -> unit val end_code : unit -> unit val start_inline_coq : unit -> unit val end_inline_coq : unit -> unit val start_inline_coq_block : unit -> unit val end_inline_coq_block : unit -> unit val indentation : int -> unit val line_break : unit -> unit val paragraph : unit -> unit val empty_line_of_code : unit -> unit val section : int -> (unit -> unit) -> unit val item : int -> unit val stop_item : unit -> unit val reach_item_level : int -> unit val rule : unit -> unit val nbsp : unit -> unit val char : char -> unit val keyword : string -> loc -> unit val ident : string -> loc -> unit val sublexer : char -> loc -> unit val initialize : unit -> unit val proofbox : unit -> unit val latex_char : char -> unit val latex_string : string -> unit val html_char : char -> unit val html_string : string -> unit val verbatim_char : bool -> char -> unit val hard_verbatim_char : char -> unit val start_latex_math : unit -> unit val stop_latex_math : unit -> unit val start_verbatim : bool -> unit val stop_verbatim : bool -> unit val start_quote : unit -> unit val stop_quote : unit -> unit val url : string -> string option -> unit (* this outputs an inference rule in one go. You pass it the list of assumptions, then the middle line info, then the conclusion (which is allowed to span multiple lines). In each case, the int is the number of spaces before the start of the line's text and the string is the text of the line with the leading trailing space trimmed. For the middle rule, you can also optionally provide a name. We need the space info so that in modes where we aren't doing something smart we can just format the rule verbatim like the user did *) val inf_rule : (int * string) list -> (int * string * (string option)) -> (int * string) list -> unit val make_multi_index : unit -> unit val make_index : unit -> unit val make_toc : unit -> unit coq-8.4pl2/tools/coqdoc/cdglobals.ml0000640000175000001440000000712312010532755016526 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "" && Filename.is_relative f then if not (Sys.file_exists !output_dir) then (Printf.eprintf "No such directory: %s\n" !output_dir; exit 1) else Filename.concat !output_dir f else f let open_out_file f = out_channel := try open_out (coqdoc_out f) with Sys_error s -> Printf.eprintf "%s\n" s; exit 1 let close_out_file () = close_out !out_channel type glob_source_t = | NoGlob | DotGlob | GlobFile of string let glob_source = ref DotGlob (*s Manipulations of paths and path aliases *) let normalize_path p = (* We use the Unix subsystem to normalize a physical path (relative or absolute) and get rid of symbolic links, relative links (like ./ or ../ in the middle of the path; it's tricky but it works... *) (* Rq: Sys.getcwd () returns paths without '/' at the end *) let orig = Sys.getcwd () in Sys.chdir p; let res = Sys.getcwd () in Sys.chdir orig; res let normalize_filename f = let basename = Filename.basename f in let dirname = Filename.dirname f in normalize_path dirname, basename (** A weaker analog of the function in Envars *) let guess_coqlib () = let file = "states/initial.coq" in match Coq_config.coqlib with | Some coqlib when Sys.file_exists (Filename.concat coqlib file) -> coqlib | Some _ | None -> let coqbin = normalize_path (Filename.dirname Sys.executable_name) in let prefix = Filename.dirname coqbin in let rpath = if Coq_config.local then [] else (if Coq_config.arch = "win32" then ["lib"] else ["lib";"coq"]) in let coqlib = List.fold_left Filename.concat prefix rpath in if Sys.file_exists (Filename.concat coqlib file) then coqlib else prefix let header_trailer = ref true let header_file = ref "" let header_file_spec = ref false let footer_file = ref "" let footer_file_spec = ref false let quiet = ref true let light = ref false let gallina = ref false let short = ref false let index = ref true let multi_index = ref false let index_name = ref "index" let toc = ref false let page_title = ref "" let title = ref "" let externals = ref true let coqlib = ref Coq_config.wwwstdlib let coqlib_path = ref (guess_coqlib ()) let raw_comments = ref false let parse_comments = ref false let plain_comments = ref false let toc_depth = (ref None : int option ref) let lib_name = ref "Library" let lib_subtitles = ref false let interpolate = ref false let inline_notmono = ref false let charset = ref "iso-8859-1" let inputenc = ref "" let latin1 = ref false let utf8 = ref false let set_latin1 () = charset := "iso-8859-1"; inputenc := "latin1"; latin1 := true let set_utf8 () = charset := "utf-8"; inputenc := "utf8x"; utf8 := true (* Parsing options *) type coq_module = string type file = | Vernac_file of string * coq_module | Latex_file of string coq-8.4pl2/tools/coqdoc/tokens.mli0000640000175000001440000000616012010532755016250 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string -> string -> ttree (* Remove a translation from a dictionary: returns an equal dictionary if the word not present *) val ttree_remove : ttree -> string -> ttree (* Translate a string *) val translate : string -> string option (* Sublexer automaton *) (* The sublexer buffers the chars it receives; if after some time, it recognizes that a sequence of chars has a translation in the current dictionary, it replaces the buffer by the translation *) (* Received chars can come with a "tag" (usually made from informations from the globalization file). A sequence of chars can be considered a word only, if all chars have the same "tag". Rules for cutting words are the following: - in a sequence like "**" where * is in the dictionary but not **, "**" is not translated; otherwise said, to be translated, a sequence must not be surrounded by other symbol-like chars - in a sequence like "<>_h*", where <>_h is in the dictionary, the translation is done because the switch from a letter to a symbol char is an acceptable cutting point - in a sequence like "<>_ha", where <>_h is in the dictionary, the translation is not done because it is considered that h and a are not separable (however, if h and a have different tags, and h has the same tags as <, > and _, the translation happens) - in a sequence like "<>_ha", where <> but not <>_h is in the dictionary, the translation is done for <> and _ha is considered independently because the switch from a symbol char to a letter is considered to be an acceptable cutting point - the longest-word rule applies: if both <> and <>_h are in the dictionary, "<>_h" is one word and gets translated *) (* Warning: do not output anything on output channel inbetween a call to [output_tagged_*] and [flush_sublexer]!! *) type out_function = bool (* needs escape *) -> bool (* it is a symbol, not a pure ident *) -> Index.index_entry option (* the index type of the token if any *) -> string -> unit (* This must be initialized before calling the sublexer *) val token_tree : ttree ref ref val outfun : out_function ref (* Process an ident part that might be a symbol part *) val output_tagged_ident_string : string -> unit (* Process a non-ident char (possibly equipped with a tag) *) val output_tagged_symbol_char : Index.index_entry option -> char -> unit (* Flush the buffered content of the lexer using [outfun] *) val flush_sublexer : unit -> unit coq-8.4pl2/tools/coqdoc/alpha.mli0000640000175000001440000000133512010532755016031 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* char -> int val compare_string : string -> string -> int (* Alphabetic normalization. *) val norm_char : char -> char val norm_string : string -> string coq-8.4pl2/tools/coqdoc/coqdoc.sty0000640000175000001440000001256511476414450016267 0ustar notinusers % This is coqdoc.sty, by Jean-Christophe Fillitre % This LaTeX package is used by coqdoc (http://www.lri.fr/~filliatr/coqdoc) % % You can modify the following macros to customize the appearance % of the document. \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{coqdoc}[2002/02/11] % % Headings % \usepackage{fancyhdr} % \newcommand{\coqdocleftpageheader}{\thepage\ -- \today} % \newcommand{\coqdocrightpageheader}{\today\ -- \thepage} % \pagestyle{fancyplain} % %BEGIN LATEX % \headsep 8mm % \renewcommand{\plainheadrulewidth}{0.4pt} % \renewcommand{\plainfootrulewidth}{0pt} % \lhead[\coqdocleftpageheader]{\leftmark} % \rhead[\leftmark]{\coqdocrightpageheader} % \cfoot{} % %END LATEX % Hevea puts to much space with \medskip and \bigskip %HEVEA\renewcommand{\medskip}{} %HEVEA\renewcommand{\bigskip}{} %HEVEA\newcommand{\lnot}{\coqwkw{not}} %HEVEA\newcommand{\lor}{\coqwkw{or}} %HEVEA\newcommand{\land}{\&} % own name \newcommand{\coqdoc}{\textsf{coqdoc}} % pretty underscores (the package fontenc causes ugly underscores) %BEGIN LATEX \def\_{\kern.08em\vbox{\hrule width.35em height.6pt}\kern.08em} %END LATEX % macro for typesetting keywords \newcommand{\coqdockw}[1]{\texttt{#1}} % macro for typesetting variable identifiers \newcommand{\coqdocvar}[1]{\textit{#1}} % macro for typesetting constant identifiers \newcommand{\coqdoccst}[1]{\textsf{#1}} % macro for typesetting module identifiers \newcommand{\coqdocmod}[1]{\textsc{\textsf{#1}}} % macro for typesetting module constant identifiers (e.g. Parameters in % module types) \newcommand{\coqdocax}[1]{\textsl{\textsf{#1}}} % macro for typesetting inductive type identifiers \newcommand{\coqdocind}[1]{\textbf{\textsf{#1}}} % macro for typesetting constructor identifiers \newcommand{\coqdocconstr}[1]{\textsf{#1}} % macro for typesetting tactic identifiers \newcommand{\coqdoctac}[1]{\texttt{#1}} % These are the real macros used by coqdoc, their typesetting is % based on the above macros by default. \newcommand{\coqdoclibrary}[1]{\coqdoccst{#1}} \newcommand{\coqdocinductive}[1]{\coqdocind{#1}} \newcommand{\coqdocdefinition}[1]{\coqdoccst{#1}} \newcommand{\coqdocvariable}[1]{\coqdocvar{#1}} \newcommand{\coqdocconstructor}[1]{\coqdocconstr{#1}} \newcommand{\coqdoclemma}[1]{\coqdoccst{#1}} \newcommand{\coqdocclass}[1]{\coqdocind{#1}} \newcommand{\coqdocinstance}[1]{\coqdoccst{#1}} \newcommand{\coqdocmethod}[1]{\coqdoccst{#1}} \newcommand{\coqdocabbreviation}[1]{\coqdoccst{#1}} \newcommand{\coqdocrecord}[1]{\coqdocind{#1}} \newcommand{\coqdocprojection}[1]{\coqdoccst{#1}} \newcommand{\coqdocnotation}[1]{\coqdockw{#1}} \newcommand{\coqdocsection}[1]{\coqdoccst{#1}} \newcommand{\coqdocaxiom}[1]{\coqdocax{#1}} \newcommand{\coqdocmodule}[1]{\coqdocmod{#1}} % Environment encompassing code fragments % !!! CAUTION: This environment may have empty contents \newenvironment{coqdoccode}{}{} % Environment for comments \newenvironment{coqdoccomment}{\tt(*}{*)} % newline and indentation %BEGIN LATEX % Base indentation length \newlength{\coqdocbaseindent} \setlength{\coqdocbaseindent}{0em} % Beginning of a line without any Coq indentation \newcommand{\coqdocnoindent}{\noindent\kern\coqdocbaseindent} % Beginning of a line with a given Coq indentation \newcommand{\coqdocindent}[1]{\noindent\kern\coqdocbaseindent\noindent\kern#1} % End-of-the-line \newcommand{\coqdoceol}{\hspace*{\fill}\setlength\parskip{0pt}\par} % Empty lines (in code only) \newcommand{\coqdocemptyline}{\vskip 0.4em plus 0.1em minus 0.1em} \usepackage{ifpdf} \ifpdf \RequirePackage{hyperref} \hypersetup{raiselinks=true,colorlinks=true,linkcolor=black} % To do indexing, use something like: % \usepackage{multind} % \newcommand{\coqdef}[3]{\hypertarget{coq:#1}{\index{coq}{#1@#2|hyperpage}#3}} \newcommand{\coqdef}[3]{\phantomsection\hypertarget{coq:#1}{#3}} \newcommand{\coqref}[2]{\hyperlink{coq:#1}{#2}} \newcommand{\coqexternalref}[3]{\href{#1.html\##2}{#3}} \newcommand{\identref}[2]{\hyperlink{coq:#1}{\textsf {#2}}} \newcommand{\coqlibrary}[3]{\cleardoublepage\phantomsection \hypertarget{coq:#1}{\chapter{#2\texorpdfstring{\coqdoccst}{}{#3}}}} \else \newcommand{\coqdef}[3]{#3} \newcommand{\coqref}[2]{#2} \newcommand{\coqexternalref}[3]{#3} \newcommand{\texorpdfstring}[2]{#1} \newcommand{\identref}[2]{\textsf{#2}} \newcommand{\coqlibrary}[3]{\cleardoublepage\chapter{#2\coqdoccst{#3}}} \fi \usepackage{xr} \newif\if@coqdoccolors \@coqdoccolorsfalse \DeclareOption{color}{\@coqdoccolorstrue} \ProcessOptions \if@coqdoccolors \RequirePackage{xcolor} \definecolor{varpurple}{rgb}{0.4,0,0.4} \definecolor{constrmaroon}{rgb}{0.6,0,0} \definecolor{defgreen}{rgb}{0,0.4,0} \definecolor{indblue}{rgb}{0,0,0.8} \definecolor{kwred}{rgb}{0.8,0.1,0.1} \def\coqdocvarcolor{varpurple} \def\coqdockwcolor{kwred} \def\coqdoccstcolor{defgreen} \def\coqdocindcolor{indblue} \def\coqdocconstrcolor{constrmaroon} \def\coqdocmodcolor{defgreen} \def\coqdocaxcolor{varpurple} \def\coqdoctaccolor{black} \def\coqdockw#1{{\color{\coqdockwcolor}{\texttt{#1}}}} \def\coqdocvar#1{{\color{\coqdocvarcolor}{\textit{#1}}}} \def\coqdoccst#1{{\color{\coqdoccstcolor}{\textrm{#1}}}} \def\coqdocind#1{{\color{\coqdocindcolor}{\textsf{#1}}}} \def\coqdocconstr#1{{\color{\coqdocconstrcolor}{\textsf{#1}}}} \def\coqdocmod#1{{{\color{\coqdocmodcolor}{\textsc{\textsf{#1}}}}}} \def\coqdocax#1{{{\color{\coqdocaxcolor}{\textsl{\textrm{#1}}}}}} \def\coqdoctac#1{{\color{\coqdoctaccolor}{\texttt{#1}}}} \fi \endinput coq-8.4pl2/tools/coqdoc/index.mli0000640000175000001440000000363712037140010016046 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string type index_entry = | Def of string * entry_type | Ref of coq_module * string * entry_type (* Find what symbol coqtop said is located at loc in the source file *) val find : coq_module -> loc -> index_entry (* Find what data is referred to by some string in some coq module *) val find_string : coq_module -> string -> index_entry val add_module : coq_module -> unit type module_kind = Local | External of coq_module | Unknown val find_module : coq_module -> module_kind val init_coqlib_library : unit -> unit val add_external_library : string -> coq_module -> unit (*s Read globalizations from a file (produced by coqc -dump-glob) *) val read_glob : Digest.t option -> string -> unit (*s Indexes *) type 'a index = { idx_name : string; idx_entries : (char * (string * 'a) list) list; idx_size : int } val current_library : string ref val display_letter : char -> string val prepare_entry : string -> entry_type -> string val all_entries : unit -> (coq_module * entry_type) index * (entry_type * coq_module index) list val map : (string -> 'a -> 'b) -> 'a index -> 'b index coq-8.4pl2/tools/coqdoc/output.ml0000640000175000001440000012000712062325704016132 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Hashtbl.add h key ()) l; function s -> try Hashtbl.find h s; true with Not_found -> false let is_keyword = build_table [ "About"; "AddPath"; "Axiom"; "Abort"; "Chapter"; "Check"; "Coercion"; "Compute"; "CoFixpoint"; "CoInductive"; "Corollary"; "Defined"; "Definition"; "End"; "Eval"; "Example"; "Export"; "Fact"; "Fix"; "Fixpoint"; "Function"; "Generalizable"; "Global"; "Grammar"; "Guarded"; "Goal"; "Hint"; "Debug"; "On"; "Hypothesis"; "Hypotheses"; "Resolve"; "Unfold"; "Immediate"; "Extern"; "Constructors"; "Rewrite"; "Implicit"; "Import"; "Inductive"; "Infix"; "Lemma"; "Let"; "Load"; "Local"; "Ltac"; "Module"; "Module Type"; "Declare Module"; "Include"; "Mutual"; "Parameter"; "Parameters"; "Print"; "Printing"; "All"; "Proof"; "Proof with"; "Qed"; "Record"; "Recursive"; "Remark"; "Require"; "Save"; "Scheme"; "Assumptions"; "Axioms"; "Universes"; "Induction"; "for"; "Sort"; "Section"; "Show"; "Structure"; "Syntactic"; "Syntax"; "Tactic"; "Theorem"; "Search"; "SearchAbout"; "SearchRewrite"; "Set"; "Types"; "Undo"; "Unset"; "Variable"; "Variables"; "Context"; "Notation"; "Reserved Notation"; "Tactic Notation"; "Delimit"; "Bind"; "Open"; "Scope"; "Inline"; "Implicit Arguments"; "Add"; "Strict"; "Typeclasses"; "Instance"; "Global Instance"; "Class"; "Instantiation"; "subgoal"; "subgoals"; "vm_compute"; "Opaque"; "Transparent"; "Time"; "Extraction"; "Extract"; (* Program *) "Program Definition"; "Program Example"; "Program Fixpoint"; "Program Lemma"; "Obligation"; "Obligations"; "Solve"; "using"; "Next Obligation"; "Next"; "Program Instance"; "Equations"; "Equations_nocomp"; (*i (* coq terms *) *) "forall"; "match"; "as"; "in"; "return"; "with"; "end"; "let"; "fun"; "if"; "then"; "else"; "Prop"; "Set"; "Type"; ":="; "where"; "struct"; "wf"; "measure"; "fix"; "cofix"; (* Ltac *) "before"; "after"; "constr"; "ltac"; "goal"; "context"; "beta"; "delta"; "iota"; "zeta"; (* Notations *) "level"; "associativity"; "no" ] let is_tactic = build_table [ "intro"; "intros"; "apply"; "rewrite"; "refine"; "case"; "clear"; "injection"; "elimtype"; "progress"; "setoid_rewrite"; "left"; "right"; "constructor"; "econstructor"; "decide equality"; "abstract"; "exists"; "cbv"; "simple destruct"; "info"; "fourier"; "field"; "specialize"; "evar"; "solve"; "instanciate"; "quote"; "eexact"; "autorewrite"; "destruct"; "destruction"; "destruct_call"; "dependent"; "elim"; "extensionality"; "f_equal"; "generalize"; "generalize_eqs"; "generalize_eqs_vars"; "induction"; "rename"; "move"; "omega"; "set"; "assert"; "do"; "repeat"; "cut"; "assumption"; "exact"; "split"; "subst"; "try"; "discriminate"; "simpl"; "unfold"; "red"; "compute"; "at"; "in"; "by"; "reflexivity"; "symmetry"; "transitivity"; "replace"; "setoid_replace"; "inversion"; "inversion_clear"; "pattern"; "intuition"; "congruence"; "fail"; "fresh"; "trivial"; "tauto"; "firstorder"; "ring"; "clapply"; "program_simpl"; "program_simplify"; "eapply"; "auto"; "eauto"; "change"; "fold"; "hnf"; "lazy"; "simple"; "eexists"; "debug"; "idtac"; "first"; "type of"; "pose"; "eval"; "instantiate"; "until" ] (*s Current Coq module *) let current_module : (string * string option) ref = ref ("",None) let get_module withsub = let (m,sub) = !current_module in if withsub then match sub with | None -> m | Some sub -> m ^ ": " ^ sub else m let set_module m sub = current_module := (m,sub); page_title := get_module true (*s Common to both LaTeX and HTML *) let item_level = ref 0 let in_doc = ref false (*s Customized and predefined pretty-print *) let initialize_texmacs () = let ensuremath x = sprintf ">" x in List.fold_right (fun (s,t) tt -> Tokens.ttree_add tt s t) [ "*", ensuremath "times"; "->", ensuremath "rightarrow"; "<-", ensuremath "leftarrow"; "<->", ensuremath "leftrightarrow"; "=>", ensuremath "Rightarrow"; "<=", ensuremath "le"; ">=", ensuremath "ge"; "<>", ensuremath "noteq"; "~", ensuremath "lnot"; "/\\", ensuremath "land"; "\\/", ensuremath "lor"; "|-", ensuremath "vdash" ] Tokens.empty_ttree let token_tree_texmacs = ref (initialize_texmacs ()) let token_tree_latex = ref Tokens.empty_ttree let token_tree_html = ref Tokens.empty_ttree let initialize_tex_html () = let if_utf8 = if !Cdglobals.utf8 then fun x -> Some x else fun _ -> None in let (tree_latex, tree_html) = List.fold_right (fun (s,l,l') (tt,tt') -> (Tokens.ttree_add tt s l, match l' with None -> tt' | Some l' -> Tokens.ttree_add tt' s l')) [ "*" , "\\ensuremath{\\times}", if_utf8 "×"; "|", "\\ensuremath{|}", None; "->", "\\ensuremath{\\rightarrow}", if_utf8 "→"; "->~", "\\ensuremath{\\rightarrow\\lnot}", None; "->~~", "\\ensuremath{\\rightarrow\\lnot\\lnot}", None; "<-", "\\ensuremath{\\leftarrow}", None; "<->", "\\ensuremath{\\leftrightarrow}", if_utf8 "↔"; "=>", "\\ensuremath{\\Rightarrow}", if_utf8 "⇒"; "<=", "\\ensuremath{\\le}", if_utf8 "≤"; ">=", "\\ensuremath{\\ge}", if_utf8 "≥"; "<>", "\\ensuremath{\\not=}", if_utf8 "≠"; "~", "\\ensuremath{\\lnot}", if_utf8 "¬"; "/\\", "\\ensuremath{\\land}", if_utf8 "∧"; "\\/", "\\ensuremath{\\lor}", if_utf8 "∨"; "|-", "\\ensuremath{\\vdash}", None; "forall", "\\ensuremath{\\forall}", if_utf8 "∀"; "exists", "\\ensuremath{\\exists}", if_utf8 "∃"; "Π", "\\ensuremath{\\Pi}", if_utf8 "Π"; "λ", "\\ensuremath{\\lambda}", if_utf8 "λ"; (* "fun", "\\ensuremath{\\lambda}" ? *) ] (Tokens.empty_ttree,Tokens.empty_ttree) in token_tree_latex := tree_latex; token_tree_html := tree_html let add_printing_token s (t1,t2) = (match t1 with None -> () | Some t1 -> token_tree_latex := Tokens.ttree_add !token_tree_latex s t1); (match t2 with None -> () | Some t2 -> token_tree_html := Tokens.ttree_add !token_tree_html s t2) let remove_printing_token s = token_tree_latex := Tokens.ttree_remove !token_tree_latex s; token_tree_html := Tokens.ttree_remove !token_tree_html s (*s Table of contents *) type toc_entry = | Toc_library of string * string option | Toc_section of int * (unit -> unit) * string let (toc_q : toc_entry Queue.t) = Queue.create () let add_toc_entry e = Queue.add e toc_q let new_label = let r = ref 0 in fun () -> incr r; "lab" ^ string_of_int !r (*s LaTeX output *) module Latex = struct let in_title = ref false (*s Latex preamble *) let (preamble : string Queue.t) = Queue.create () let push_in_preamble s = Queue.add s preamble let utf8x_extra_support () = printf "\n"; printf "%%Warning: tipa declares many non-standard macros used by utf8x to\n"; printf "%%interpret utf8 characters but extra packages might have to be added\n"; printf "%%(e.g. \"textgreek\" for Greek letters not already in tipa).\n"; printf "%%Use coqdoc's option -p to add new packages.\n"; printf "\\usepackage{tipa}\n"; printf "\n" let header () = if !header_trailer then begin printf "\\documentclass[12pt]{report}\n"; if !inputenc != "" then printf "\\usepackage[%s]{inputenc}\n" !inputenc; if !inputenc = "utf8x" then utf8x_extra_support (); printf "\\usepackage[T1]{fontenc}\n"; printf "\\usepackage{fullpage}\n"; printf "\\usepackage{coqdoc}\n"; printf "\\usepackage{amsmath,amssymb}\n"; (match !toc_depth with | None -> () | Some n -> printf "\\setcounter{tocdepth}{%i}\n" n); Queue.iter (fun s -> printf "%s\n" s) preamble; printf "\\begin{document}\n" end; output_string "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; output_string "%% This file has been automatically generated with the command\n"; output_string "%% "; Array.iter (fun s -> printf "%s " s) Sys.argv; printf "\n"; output_string "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" let trailer () = if !header_trailer then begin printf "\\end{document}\n" end (*s Latex low-level translation *) let nbsp () = output_char '~' let char c = match c with | '\\' -> printf "\\symbol{92}" | '$' | '#' | '%' | '&' | '{' | '}' | '_' -> output_char '\\'; output_char c | '^' | '~' -> output_char '\\'; output_char c; printf "{}" | _ -> output_char c let label_char c = match c with | '_' -> output_char ' ' | '\\' | '$' | '#' | '%' | '&' | '{' | '}' | '^' | '~' -> printf "x%X" (Char.code c) | _ -> if c >= '\x80' then printf "x%X" (Char.code c) else output_char c let label_ident s = for i = 0 to String.length s - 1 do label_char s.[i] done let latex_char = output_char let latex_string = output_string let html_char _ = () let html_string _ = () (*s Latex char escaping *) let escaped = let buff = Buffer.create 5 in fun s -> Buffer.clear buff; for i = 0 to String.length s - 1 do match s.[i] with | '\\' -> Buffer.add_string buff "\\symbol{92}" | '$' | '#' | '%' | '&' | '{' | '}' | '_' as c -> Buffer.add_char buff '\\'; Buffer.add_char buff c | '^' | '~' as c -> Buffer.add_char buff '\\'; Buffer.add_char buff c; Buffer.add_string buff "{}" | '\'' -> if i < String.length s - 1 && s.[i+1] = '\'' then begin Buffer.add_char buff '\''; Buffer.add_char buff '{'; Buffer.add_char buff '}' end else Buffer.add_char buff '\'' | c -> Buffer.add_char buff c done; Buffer.contents buff (*s Latex reference and symbol translation *) let start_module () = let ln = !lib_name in if not !short then begin printf "\\coqlibrary{"; label_ident (get_module false); printf "}{"; if ln <> "" then printf "%s " ln; printf "}{%s}\n\n" (escaped (get_module true)) end let start_latex_math () = output_char '$' let stop_latex_math () = output_char '$' let start_quote () = output_char '`'; output_char '`' let stop_quote () = output_char '\''; output_char '\'' let start_verbatim inline = if inline then printf "\\texttt{" else printf "\\begin{verbatim}" let stop_verbatim inline = if inline then printf "}" else printf "\\end{verbatim}\n" let url addr name = printf "%s\\footnote{\\url{%s}}" (match name with | None -> "" | Some n -> n) addr let indentation n = if n == 0 then printf "\\coqdocnoindent\n" else let space = 0.5 *. (float n) in printf "\\coqdocindent{%2.2fem}\n" space let ident_ref m fid typ s = let id = if fid <> "" then (m ^ "." ^ fid) else m in match find_module m with | Local -> if typ = Variable then printf "\\coqdoc%s{%s}" (type_name typ) s else (printf "\\coqref{"; label_ident id; printf "}{\\coqdoc%s{%s}}" (type_name typ) s) | External m when !externals -> printf "\\coqexternalref{"; label_ident fid; printf "}{%s}{\\coqdoc%s{%s}}" (escaped m) (type_name typ) s | External _ | Unknown -> printf "\\coqdoc%s{%s}" (type_name typ) s let defref m id ty s = if ty <> Notation then (printf "\\coqdef{"; label_ident (m ^ "." ^ id); printf "}{%s}{\\coqdoc%s{%s}}" s (type_name ty) s) else (* Glob file still not able to say the exact extent of the definition *) (* so we currently renounce to highlight the notation location *) (printf "\\coqdef{"; label_ident (m ^ "." ^ id); printf "}{%s}{%s}" s s) let reference s = function | Def (fullid,typ) -> defref (get_module false) fullid typ s | Ref (m,fullid,typ) -> ident_ref m fullid typ s (*s The sublexer buffers symbol characters and attached uninterpreted ident and try to apply special translation such as, predefined, translation "->" to "\ensuremath{\rightarrow}" or, virtually, a user-level translation from "=_h" to "\ensuremath{=_{h}}" *) let output_sublexer_string doescape issymbchar tag s = let s = if doescape then escaped s else s in match tag with | Some ref -> reference s ref | None -> if issymbchar then output_string s else printf "\\coqdocvar{%s}" s let last_was_in = ref false let sublexer c loc = if c = '*' && !last_was_in then begin Tokens.flush_sublexer (); output_char '*' end else begin let tag = try Some (Index.find (get_module false) loc) with Not_found -> None in Tokens.output_tagged_symbol_char tag c end; last_was_in := false let initialize () = initialize_tex_html (); Tokens.token_tree := token_tree_latex; Tokens.outfun := output_sublexer_string (*s Interpreting ident with fallback on sublexer if unknown ident *) let translate s = match Tokens.translate s with Some s -> s | None -> escaped s let keyword s loc = printf "\\coqdockw{%s}" (translate s) let ident s loc = last_was_in := s = "in"; try let tag = Index.find (get_module false) loc in reference (translate s) tag with Not_found -> if is_tactic s then printf "\\coqdoctac{%s}" (translate s) else if is_keyword s then printf "\\coqdockw{%s}" (translate s) else if !Cdglobals.interpolate && !in_doc (* always a var otherwise *) then try let tag = Index.find_string (get_module false) s in reference (translate s) tag with _ -> Tokens.output_tagged_ident_string s else Tokens.output_tagged_ident_string s let ident s l = if !in_title then ( printf "\\texorpdfstring{\\protect"; ident s l; printf "}{%s}" (translate s)) else ident s l (*s Translating structure *) let proofbox () = printf "\\ensuremath{\\Box}" let rec reach_item_level n = if !item_level < n then begin printf "\n\\begin{itemize}\n\\item "; incr item_level; reach_item_level n end else if !item_level > n then begin printf "\n\\end{itemize}\n"; decr item_level; reach_item_level n end let item n = let old_level = !item_level in reach_item_level n; if n <= old_level then printf "\n\\item " let stop_item () = reach_item_level 0 let start_doc () = in_doc := true let end_doc () = in_doc := false; stop_item () (* This is broken if we are in math mode, but coqdoc currently isn't tracking that *) let start_emph () = printf "\\textit{" let stop_emph () = printf "}" let start_comment () = printf "\\begin{coqdoccomment}\n" let end_comment () = printf "\\end{coqdoccomment}\n" let start_coq () = printf "\\begin{coqdoccode}\n" let end_coq () = printf "\\end{coqdoccode}\n" let start_code () = end_doc (); start_coq () let end_code () = end_coq (); start_doc () let section_kind = function | 1 -> "\\section{" | 2 -> "\\subsection{" | 3 -> "\\subsubsection{" | 4 -> "\\paragraph{" | _ -> assert false let section lev f = stop_item (); output_string (section_kind lev); in_title := true; f (); in_title := false; printf "}\n\n" let rule () = printf "\\par\n\\noindent\\hrulefill\\par\n\\noindent{}" let paragraph () = printf "\n\n" let line_break () = printf "\\coqdoceol\n" let empty_line_of_code () = printf "\\coqdocemptyline\n" let start_inline_coq_block () = line_break (); empty_line_of_code () let end_inline_coq_block () = empty_line_of_code () let start_inline_coq () = () let end_inline_coq () = () let make_multi_index () = () let make_index () = () let make_toc () = printf "\\tableofcontents\n" end (*s HTML output *) module Html = struct let header () = if !header_trailer then if !header_file_spec then let cin = Pervasives.open_in !header_file in try while true do let s = Pervasives.input_line cin in printf "%s\n" s done with End_of_file -> Pervasives.close_in cin else begin printf "\n"; printf "\n\n"; printf "\n" !charset; printf "\n"; printf "%s\n\n\n" !page_title; printf "\n\n
\n\n
\n
\n\n"; printf "
\n\n" end let trailer () = if !header_trailer && !footer_file_spec then let cin = Pervasives.open_in !footer_file in try while true do let s = Pervasives.input_line cin in printf "%s\n" s done with End_of_file -> Pervasives.close_in cin else begin if !index && (get_module false) <> "Index" then printf "
\n\n
\n
Index" !index_name; printf "
This page has been generated by "; printf "coqdoc\n" Coq_config.wwwcoq; printf "
\n\n
\n\n\n" end let start_module () = let ln = !lib_name in if not !short then begin let (m,sub) = !current_module in add_toc_entry (Toc_library (m,sub)); if ln = "" then printf "

%s

\n\n" (get_module true) else printf "

%s %s

\n\n" ln (get_module true) end let indentation n = for i = 1 to n do printf " " done let line_break () = printf "
\n" let empty_line_of_code () = printf "\n
\n" let nbsp () = printf " " let char = function | '<' -> printf "<" | '>' -> printf ">" | '&' -> printf "&" | c -> output_char c let raw_string s = for i = 0 to String.length s - 1 do char s.[i] done let escaped = let buff = Buffer.create 5 in fun s -> Buffer.clear buff; for i = 0 to String.length s - 1 do match s.[i] with | '<' -> Buffer.add_string buff "<" | '>' -> Buffer.add_string buff ">" | '&' -> Buffer.add_string buff "&" | c -> Buffer.add_char buff c done; Buffer.contents buff let latex_char _ = () let latex_string _ = () let html_char = output_char let html_string = output_string let start_latex_math () = () let stop_latex_math () = () let start_quote () = char '"' let stop_quote () = start_quote () let start_verbatim inline = if inline then printf "" else printf "
"

  let stop_verbatim inline = 
    if inline then printf "" 
    else printf "
\n" let url addr name = printf "%s" addr (match name with | Some n -> n | None -> addr) let ident_ref m fid typ s = match find_module m with | Local -> printf "" m fid; printf "%s" typ s | External m when !externals -> printf "" m fid; printf "%s" typ s | External _ | Unknown -> printf "%s" typ s let reference s r = match r with | Def (fullid,ty) -> printf "" fullid; printf "%s" (type_name ty) s | Ref (m,fullid,ty) -> ident_ref m fullid (type_name ty) s let output_sublexer_string doescape issymbchar tag s = let s = if doescape then escaped s else s in match tag with | Some ref -> reference s ref | None -> if issymbchar then output_string s else printf "%s" s let sublexer c loc = let tag = try Some (Index.find (get_module false) loc) with Not_found -> None in Tokens.output_tagged_symbol_char tag c let initialize () = initialize_tex_html(); Tokens.token_tree := token_tree_html; Tokens.outfun := output_sublexer_string let translate s = match Tokens.translate s with Some s -> s | None -> escaped s let keyword s loc = printf "%s" (translate s) let ident s loc = if is_keyword s then begin printf "%s" (translate s) end else begin try reference (translate s) (Index.find (get_module false) loc) with Not_found -> if is_tactic s then printf "%s" (translate s) else if !Cdglobals.interpolate && !in_doc (* always a var otherwise *) then try reference (translate s) (Index.find_string (get_module false) s) with _ -> Tokens.output_tagged_ident_string s else Tokens.output_tagged_ident_string s end let proofbox () = printf "" let rec reach_item_level n = if !item_level < n then begin printf "
    \n
  • "; incr item_level; reach_item_level n end else if !item_level > n then begin printf "\n
  • \n
\n"; decr item_level; reach_item_level n end let item n = let old_level = !item_level in reach_item_level n; if n <= old_level then printf "\n\n
  • " let stop_item () = reach_item_level 0 let start_coq () = if not !raw_comments then printf "
    \n" let end_coq () = if not !raw_comments then printf "
    \n" let start_doc () = in_doc := true; if not !raw_comments then printf "\n
    \n" let end_doc () = in_doc := false; stop_item (); if not !raw_comments then printf "\n
    \n" let start_emph () = printf "" let stop_emph () = printf "" let start_comment () = printf "(*" let end_comment () = printf "*)" let start_code () = end_doc (); start_coq () let end_code () = end_coq (); start_doc () let start_inline_coq () = if !inline_notmono then printf "" else printf "" let end_inline_coq () = printf "" let start_inline_coq_block () = line_break (); start_inline_coq () let end_inline_coq_block () = end_inline_coq () let paragraph () = printf "\n
    \n\n" (* inference rules *) let inf_rule assumptions (_,_,midnm) conclusions = (* this first function replaces any occurance of 3 or more spaces in a row with " "s. We do this to the assumptions so that people can put multiple rules on a line with nice formatting *) let replace_spaces str = let rec copy a n = match n with 0 -> [] | n -> (a :: copy a (n - 1)) in let results = Str.full_split (Str.regexp "[' '][' '][' ']+") str in let strs = List.map (fun r -> match r with | Str.Text s -> [s] | Str.Delim s -> copy " " (String.length s)) results in String.concat "" (List.concat strs) in let start_assumption line = (printf "\n"; printf " %s\n" (replace_spaces line)) in let end_assumption () = (printf " \n"; printf "\n") in let rec print_assumptions hyps = match hyps with | [] -> start_assumption "  " | [(_,hyp)] -> start_assumption hyp | ((_,hyp) :: hyps') -> (start_assumption hyp; end_assumption (); print_assumptions hyps') in printf "
    \n"; print_assumptions assumptions; printf " " | Some s -> printf " %s  \n " s); printf "\n"; printf "\n"; printf " \n"; printf "\n"; print_assumptions conclusions; end_assumption (); printf "
    \n"; (match midnm with | None -> printf "  \n

    " let section lev f = let lab = new_label () in let r = sprintf "%s.html#%s" (get_module false) lab in (match !toc_depth with | None -> add_toc_entry (Toc_section (lev, f, r)) | Some n -> if lev <= n then add_toc_entry (Toc_section (lev, f, r)) else ()); stop_item (); printf "" lab lev; f (); printf "\n" lev let rule () = printf "
    \n" (* make a HTML index from a list of triples (name,text,link) *) let index_ref i c = let idxc = sprintf "%s_%c" i.idx_name c in !index_name ^ (if !multi_index then "_" ^ idxc ^ ".html" else ".html#" ^ idxc) let letter_index category idx (c,l) = if l <> [] then begin let cat = if category && idx <> "global" then "(" ^ idx ^ ")" else "" in printf "

    %s %s

    \n" idx c (display_letter c) cat; List.iter (fun (id,(text,link,t)) -> let id' = prepare_entry id t in printf "%s %s
    \n" link id' text) l; printf "

    " end let all_letters i = List.iter (letter_index false i.idx_name) i.idx_entries (* Construction d'une liste des index (1 index global, puis 1 index par catégorie) *) let format_global_index = Index.map (fun s (m,t) -> if t = Library then let ln = !lib_name in if ln <> "" then "[" ^ String.lowercase ln ^ "]", m ^ ".html", t else "[library]", m ^ ".html", t else sprintf "[%s, in %s]" (type_name t) m m , sprintf "%s.html#%s" m s, t) let format_bytype_index = function | Library, idx -> Index.map (fun id m -> "", m ^ ".html", Library) idx | (t,idx) -> Index.map (fun s m -> let text = sprintf "[in %s]" m m in (text, sprintf "%s.html#%s" m s, t)) idx (* Impression de la table d'index *) let print_index_table_item i = printf "\n%s Index\n" (String.capitalize i.idx_name); List.iter (fun (c,l) -> if l <> [] then printf "%s\n" (index_ref i c) (display_letter c) else printf "%s\n" (display_letter c)) i.idx_entries; let n = i.idx_size in printf "(%d %s)\n" n (if n > 1 then "entries" else "entry"); printf "\n" let print_index_table idxl = printf "\n"; List.iter print_index_table_item idxl; printf "
    \n" let make_one_multi_index prt_tbl i = (* Attn: make_one_multi_index crée un nouveau fichier... *) let idx = i.idx_name in let one_letter ((c,l) as cl) = open_out_file (sprintf "%s_%s_%c.html" !index_name idx c); if (!header_trailer) then header (); prt_tbl (); printf "
    "; letter_index true idx cl; if List.length l > 30 then begin printf "
    "; prt_tbl () end; if (!header_trailer) then trailer (); close_out_file () in List.iter one_letter i.idx_entries let make_multi_index () = let all_index = let glob,bt = Index.all_entries () in (format_global_index glob) :: (List.map format_bytype_index bt) in let print_table () = print_index_table all_index in List.iter (make_one_multi_index print_table) all_index let make_index () = let all_index = let glob,bt = Index.all_entries () in (format_global_index glob) :: (List.map format_bytype_index bt) in let print_table () = print_index_table all_index in let print_one_index i = if i.idx_size > 0 then begin printf "
    \n

    %s Index

    \n" (String.capitalize i.idx_name); all_letters i end in set_module "Index" None; if !title <> "" then printf "

    %s

    \n" !title; print_table (); if not (!multi_index) then begin List.iter print_one_index all_index; printf "
    "; print_table () end let make_toc () = let ln = !lib_name in let make_toc_entry = function | Toc_library (m,sub) -> stop_item (); let ms = match sub with | None -> m | Some s -> m ^ ": " ^ s in if ln = "" then printf "

    %s

    \n" m ms else printf "

    %s %s

    \n" m ln ms | Toc_section (n, f, r) -> item n; printf "" r; f (); printf "\n" in printf "
    \n"; Queue.iter make_toc_entry toc_q; stop_item (); printf "
    \n" end (*s TeXmacs-aware output *) module TeXmacs = struct (*s Latex preamble *) let (preamble : string Queue.t) = in_doc := false; Queue.create () let push_in_preamble s = Queue.add s preamble let header () = output_string "(*i This file has been automatically generated with the command \n"; output_string " "; Array.iter (fun s -> printf "%s " s) Sys.argv; printf " *)\n" let trailer () = () let nbsp () = output_char ' ' let char_true c = match c with | '\\' -> printf "\\\\" | '<' -> printf "\\<" | '|' -> printf "\\|" | '>' -> printf "\\>" | _ -> output_char c let char c = if !in_doc then char_true c else output_char c let latex_char = char_true let latex_string = String.iter latex_char let html_char _ = () let html_string _ = () let raw_ident s = for i = 0 to String.length s - 1 do char s.[i] done let start_module () = () let start_latex_math () = printf "' let start_verbatim inline = in_doc := true; printf "<\\verbatim>" let stop_verbatim inline = in_doc := false; printf "" let url addr name = printf "%s<\\footnote><\\url>%s" addr (match name with | None -> "" | Some n -> n) let start_quote () = output_char '`'; output_char '`' let stop_quote () = output_char '\''; output_char '\'' let indentation n = () let keyword s = printf "" let ident_true s = if is_keyword s then keyword s else raw_ident s let keyword s loc = keyword s let ident s _ = if !in_doc then ident_true s else raw_ident s let output_sublexer_string doescape issymbchar tag s = if doescape then raw_ident s else output_string s let sublexer c l = if !in_doc then Tokens.output_tagged_symbol_char None c else char c let initialize () = Tokens.token_tree := token_tree_texmacs; Tokens.outfun := output_sublexer_string let proofbox () = printf "QED" let rec reach_item_level n = if !item_level < n then begin printf "\n<\\itemize>\n"; incr item_level; reach_item_level n end else if !item_level > n then begin printf "\n"; decr item_level; reach_item_level n end let item n = let old_level = !item_level in reach_item_level n; if n <= old_level then printf "\n\n" let stop_item () = reach_item_level 0 let start_doc () = in_doc := true; printf "(** texmacs: " let end_doc () = stop_item (); in_doc := false; printf " *)" let start_coq () = () let end_coq () = () let start_emph () = printf "" let start_comment () = () let end_comment () = () let start_code () = in_doc := true; printf "<\\code>\n" let end_code () = in_doc := false; printf "\n" let section_kind = function | 1 -> "section" | 2 -> "subsection" | 3 -> "subsubsection" | 4 -> "paragraph" | _ -> assert false let section lev f = stop_item (); printf "<"; output_string (section_kind lev); printf "|"; f (); printf ">\n\n" let rule () = printf "\n\n" let paragraph () = printf "\n\n" let line_break_true () = printf "" let line_break () = printf "\n" let empty_line_of_code () = printf "\n" let start_inline_coq () = printf "" let start_inline_coq_block () = line_break (); start_inline_coq () let end_inline_coq_block () = end_inline_coq () let make_multi_index () = () let make_index () = () let make_toc () = () end (*s Raw output *) module Raw = struct let header () = () let trailer () = () let nbsp () = output_char ' ' let char = output_char let latex_char = output_char let latex_string = output_string let html_char _ = () let html_string _ = () let raw_ident s = for i = 0 to String.length s - 1 do char s.[i] done let start_module () = () let end_module () = () let start_latex_math () = () let stop_latex_math () = () let start_verbatim inline = () let stop_verbatim inline = () let url addr name = match name with | Some n -> printf "%s (%s)" n addr | None -> printf "%s" addr let start_quote () = printf "\"" let stop_quote () = printf "\"" let indentation n = for i = 1 to n do printf " " done let keyword s loc = raw_ident s let ident s loc = raw_ident s let sublexer c l = char c let initialize () = Tokens.token_tree := ref Tokens.empty_ttree; Tokens.outfun := (fun _ _ _ _ -> failwith "Useless") let proofbox () = printf "[]" let item n = printf "- " let stop_item () = () let reach_item_level _ = () let start_doc () = printf "(** " let end_doc () = printf " *)\n" let start_emph () = printf "_" let stop_emph () = printf "_" let start_comment () = printf "(*" let end_comment () = printf "*)" let start_coq () = () let end_coq () = () let start_code () = end_doc (); start_coq () let end_code () = end_coq (); start_doc () let section_kind = function | 1 -> "* " | 2 -> "** " | 3 -> "*** " | 4 -> "**** " | _ -> assert false let section lev f = output_string (section_kind lev); f () let rule () = () let paragraph () = printf "\n\n" let line_break () = printf "\n" let empty_line_of_code () = printf "\n" let start_inline_coq () = () let end_inline_coq () = () let start_inline_coq_block () = line_break (); start_inline_coq () let end_inline_coq_block () = end_inline_coq () let make_multi_index () = () let make_index () = () let make_toc () = () end (*s Generic output *) let select f1 f2 f3 f4 x = match !target_language with LaTeX -> f1 x | HTML -> f2 x | TeXmacs -> f3 x | Raw -> f4 x let push_in_preamble = Latex.push_in_preamble let header = select Latex.header Html.header TeXmacs.header Raw.header let trailer = select Latex.trailer Html.trailer TeXmacs.trailer Raw.trailer let start_module = select Latex.start_module Html.start_module TeXmacs.start_module Raw.start_module let start_doc = select Latex.start_doc Html.start_doc TeXmacs.start_doc Raw.start_doc let end_doc = select Latex.end_doc Html.end_doc TeXmacs.end_doc Raw.end_doc let start_comment = select Latex.start_comment Html.start_comment TeXmacs.start_comment Raw.start_comment let end_comment = select Latex.end_comment Html.end_comment TeXmacs.end_comment Raw.end_comment let start_coq = select Latex.start_coq Html.start_coq TeXmacs.start_coq Raw.start_coq let end_coq = select Latex.end_coq Html.end_coq TeXmacs.end_coq Raw.end_coq let start_code = select Latex.start_code Html.start_code TeXmacs.start_code Raw.start_code let end_code = select Latex.end_code Html.end_code TeXmacs.end_code Raw.end_code let start_inline_coq = select Latex.start_inline_coq Html.start_inline_coq TeXmacs.start_inline_coq Raw.start_inline_coq let end_inline_coq = select Latex.end_inline_coq Html.end_inline_coq TeXmacs.end_inline_coq Raw.end_inline_coq let start_inline_coq_block = select Latex.start_inline_coq_block Html.start_inline_coq_block TeXmacs.start_inline_coq_block Raw.start_inline_coq_block let end_inline_coq_block = select Latex.end_inline_coq_block Html.end_inline_coq_block TeXmacs.end_inline_coq_block Raw.end_inline_coq_block let indentation = select Latex.indentation Html.indentation TeXmacs.indentation Raw.indentation let paragraph = select Latex.paragraph Html.paragraph TeXmacs.paragraph Raw.paragraph let line_break = select Latex.line_break Html.line_break TeXmacs.line_break Raw.line_break let empty_line_of_code = select Latex.empty_line_of_code Html.empty_line_of_code TeXmacs.empty_line_of_code Raw.empty_line_of_code let section = select Latex.section Html.section TeXmacs.section Raw.section let item = select Latex.item Html.item TeXmacs.item Raw.item let stop_item = select Latex.stop_item Html.stop_item TeXmacs.stop_item Raw.stop_item let reach_item_level = select Latex.reach_item_level Html.reach_item_level TeXmacs.reach_item_level Raw.reach_item_level let rule = select Latex.rule Html.rule TeXmacs.rule Raw.rule let nbsp = select Latex.nbsp Html.nbsp TeXmacs.nbsp Raw.nbsp let char = select Latex.char Html.char TeXmacs.char Raw.char let keyword = select Latex.keyword Html.keyword TeXmacs.keyword Raw.keyword let ident = select Latex.ident Html.ident TeXmacs.ident Raw.ident let sublexer = select Latex.sublexer Html.sublexer TeXmacs.sublexer Raw.sublexer let initialize = select Latex.initialize Html.initialize TeXmacs.initialize Raw.initialize let proofbox = select Latex.proofbox Html.proofbox TeXmacs.proofbox Raw.proofbox let latex_char = select Latex.latex_char Html.latex_char TeXmacs.latex_char Raw.latex_char let latex_string = select Latex.latex_string Html.latex_string TeXmacs.latex_string Raw.latex_string let html_char = select Latex.html_char Html.html_char TeXmacs.html_char Raw.html_char let html_string = select Latex.html_string Html.html_string TeXmacs.html_string Raw.html_string let start_emph = select Latex.start_emph Html.start_emph TeXmacs.start_emph Raw.start_emph let stop_emph = select Latex.stop_emph Html.stop_emph TeXmacs.stop_emph Raw.stop_emph let start_latex_math = select Latex.start_latex_math Html.start_latex_math TeXmacs.start_latex_math Raw.start_latex_math let stop_latex_math = select Latex.stop_latex_math Html.stop_latex_math TeXmacs.stop_latex_math Raw.stop_latex_math let start_verbatim = select Latex.start_verbatim Html.start_verbatim TeXmacs.start_verbatim Raw.start_verbatim let stop_verbatim = select Latex.stop_verbatim Html.stop_verbatim TeXmacs.stop_verbatim Raw.stop_verbatim let verbatim_char inline = select (if inline then Latex.char else output_char) Html.char TeXmacs.char Raw.char let hard_verbatim_char = output_char let url = select Latex.url Html.url TeXmacs.url Raw.url let start_quote = select Latex.start_quote Html.start_quote TeXmacs.start_quote Raw.start_quote let stop_quote = select Latex.stop_quote Html.stop_quote TeXmacs.stop_quote Raw.stop_quote let inf_rule_dumb assumptions (midsp,midln,midnm) conclusions = start_verbatim false; let dumb_line = function (sp,ln) -> (String.iter char ((String.make sp ' ') ^ ln); char '\n') in (List.iter dumb_line assumptions; dumb_line (midsp, midln ^ (match midnm with | Some s -> " " ^ s | None -> "")); List.iter dumb_line conclusions); stop_verbatim false let inf_rule = select inf_rule_dumb Html.inf_rule inf_rule_dumb inf_rule_dumb let make_multi_index = select Latex.make_multi_index Html.make_multi_index TeXmacs.make_multi_index Raw.make_multi_index let make_index = select Latex.make_index Html.make_index TeXmacs.make_index Raw.make_index let make_toc = select Latex.make_toc Html.make_toc TeXmacs.make_toc Raw.make_toc coq-8.4pl2/tools/coqdoc/style.css0000640000175000001440000000201310016666073016112 0ustar notinusersa:visited {color : #416DFF; text-decoration : none; } a:link {color : #416DFF; text-decoration : none; font-weight : bold} a:hover {color : Red; text-decoration : underline; } a:active {color : Red; text-decoration : underline; } .keyword { font-weight : bold ; color : Red } .keywordsign { color : #C04600 } .superscript { font-size : 4 } .subscript { font-size : 4 } .comment { color : Green } .constructor { color : Blue } .string { color : Maroon } .warning { color : Red ; font-weight : bold } .info { margin-left : 3em; margin-right : 3em } .title1 { font-size : 20pt ; background-color : #416DFF } .title2 { font-size : 20pt ; background-color : #418DFF } .title3 { font-size : 20pt ; background-color : #41ADFF } .title4 { font-size : 20pt ; background-color : #41CDFF } .title5 { font-size : 20pt ; background-color : #41EDFF } .title6 { font-size : 20pt ; background-color : #41FFFF } body { background-color : White } tr { background-color : White } # .doc { background-color :#aaeeff } .doc { background-color :#66ff66 } coq-8.4pl2/tools/coqdoc/cpretty.mli0000640000175000001440000000122612010532755016435 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Cdglobals.coq_module -> unit val detect_subtitle : string -> Cdglobals.coq_module -> string option coq-8.4pl2/tools/coqdoc/tokens.ml0000640000175000001440000001310312010532755016072 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* None with | Some tt' -> CharMap.add c (insert tt' (i + 1)) (CharMap.remove c tt.branch) | None -> let tt' = {node = None; branch = CharMap.empty} in CharMap.add c (insert tt' (i + 1)) tt.branch in { node = tt.node; branch = br } in insert ttree 0 (* Removes a string from a dictionary: returns an equal dictionary if the word not present. *) let ttree_remove ttree str = let rec remove tt i = if i == String.length str then {node = None; branch = tt.branch} else let c = str.[i] in let br = match try Some (CharMap.find c tt.branch) with Not_found -> None with | Some tt' -> CharMap.add c (remove tt' (i + 1)) (CharMap.remove c tt.branch) | None -> tt.branch in { node = tt.node; branch = br } in remove ttree 0 let ttree_descend ttree c = CharMap.find c ttree.branch let ttree_find ttree str = let rec proc_rec tt i = if i == String.length str then tt else proc_rec (CharMap.find str.[i] tt.branch) (i+1) in proc_rec ttree 0 (*s Parameters of the translation automaton *) type out_function = bool -> bool -> Index.index_entry option -> string -> unit let token_tree = ref (ref empty_ttree) let outfun = ref (fun _ _ _ _ -> failwith "outfun not initialized") (*s Translation automaton *) let buff = Buffer.create 4 let flush_buffer was_symbolchar tag tok = let hastr = String.length tok <> 0 in if hastr then !outfun false was_symbolchar tag tok; if Buffer.length buff <> 0 then !outfun true (if hastr then not was_symbolchar else was_symbolchar) tag (Buffer.contents buff); Buffer.clear buff type sublexer_state = | Neutral | Buffering of bool * Index.index_entry option * string * ttree let translation_state = ref Neutral let buffer_char is_symbolchar ctag c = let rec aux = function | Neutral -> restart_buffering () | Buffering (was_symbolchar,tag,translated,tt) -> if tag <> ctag then (* A strong tag comes from Coq; if different Coq tags *) (* hence, we don't try to see the chars as part of a single token *) let translated = match tt.node with | Some tok -> Buffer.clear buff; tok | None -> translated in flush_buffer was_symbolchar tag translated; restart_buffering () else begin (* If we change the category of characters (symbol vs ident) *) (* we accept this as a possible token cut point and remember the *) (* translated token up to that point *) let translated = if is_symbolchar <> was_symbolchar then match tt.node with | Some tok -> Buffer.clear buff; tok | None -> translated else translated in (* We try to make a significant token from the current *) (* buffer and the new character *) try let tt = ttree_descend tt c in Buffer.add_char buff c; Buffering (is_symbolchar,ctag,translated,tt) with Not_found -> (* No existing translation for the given set of chars *) if is_symbolchar <> was_symbolchar then (* If we changed the category of character read, we accept it *) (* as a possible cut point and restart looking for a translation *) (flush_buffer was_symbolchar tag translated; restart_buffering ()) else (* If we did not change the category of character read, we do *) (* not want to cut arbitrarily in the middle of the sequence of *) (* symbol characters or identifier characters *) (Buffer.add_char buff c; Buffering (is_symbolchar,tag,translated,empty_ttree)) end and restart_buffering () = let tt = try ttree_descend !(!token_tree) c with Not_found -> empty_ttree in Buffer.add_char buff c; Buffering (is_symbolchar,ctag,"",tt) in translation_state := aux !translation_state let output_tagged_ident_string s = for i = 0 to String.length s - 1 do buffer_char false None s.[i] done let output_tagged_symbol_char tag c = buffer_char true tag c let flush_sublexer () = match !translation_state with | Neutral -> () | Buffering (was_symbolchar,tag,translated,tt) -> let translated = match tt.node with | Some tok -> Buffer.clear buff; tok | None -> translated in flush_buffer was_symbolchar tag translated; translation_state := Neutral (* Translation not using the automaton *) let translate s = try (ttree_find !(!token_tree) s).node with Not_found -> None coq-8.4pl2/tools/coqdoc/main.ml0000640000175000001440000005032212010532755015517 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* on 9 & 10 Mar 2004: * - handling of absolute filenames (function coq_module) * - coq_module: chop ./// (arbitrary amount of slashes), not only "./" * - function chop_prefix not useful anymore. Deleted. * - correct typo in usage message: "-R" -> "--R" * - shorten the definition of make_path * This notice is made to comply with section 2.a of the GPLv2. * It may be removed or abbreviated as far as I am concerned. *) open Cdglobals open Printf (*s \textbf{Usage.} Printed on error output. *) let usage () = prerr_endline ""; prerr_endline "Usage: coqdoc "; prerr_endline " --html produce a HTML document (default)"; prerr_endline " --latex produce a LaTeX document"; prerr_endline " --texmacs produce a TeXmacs document"; prerr_endline " --raw produce a text document"; prerr_endline " --dvi output the DVI"; prerr_endline " --ps output the PostScript"; prerr_endline " --pdf output the Pdf"; prerr_endline " --stdout write output to stdout"; prerr_endline " -o write output in file "; prerr_endline " -d output files into directory "; prerr_endline " -g (gallina) skip proofs"; prerr_endline " -s (short) no titles for files"; prerr_endline " -l light mode (only defs and statements)"; prerr_endline " -t give a title to the document"; prerr_endline " --body-only suppress LaTeX/HTML header and trailer"; prerr_endline " --with-header prepend as html reader"; prerr_endline " --with-footer append as html footer"; prerr_endline " --no-index do not output the index"; prerr_endline " --multi-index index split in multiple files"; prerr_endline " --index set index name (default is index)"; prerr_endline " --toc output a table of contents"; prerr_endline " --vernac consider as a .v file"; prerr_endline " --tex consider as a .tex file"; prerr_endline " -p insert in LaTeX preamble"; prerr_endline " --files-from read file names to process in "; prerr_endline " --glob-from read globalization information from "; prerr_endline " --no-glob don't use any globalization information (no links will be inserted at identifiers)"; prerr_endline " --quiet quiet mode (default)"; prerr_endline " --verbose verbose mode"; prerr_endline " --no-externals no links to Coq standard library"; prerr_endline " --external set URL for external library d"; prerr_endline " --coqlib set URL for Coq standard library"; prerr_endline (" (default is " ^ Coq_config.wwwstdlib ^ ")"); prerr_endline " --boot run in boot mode"; prerr_endline " --coqlib_path set the path where Coq files are installed"; prerr_endline " -R map physical dir to Coq dir"; prerr_endline " --latin1 set ISO-8859-1 input language"; prerr_endline " --utf8 set UTF-8 input language"; prerr_endline " --charset set HTML charset"; prerr_endline " --inputenc set LaTeX input encoding"; prerr_endline " --interpolate try to typeset identifiers in comments using definitions in the same module"; prerr_endline " --parse-comments parse regular comments"; prerr_endline " --plain-comments consider comments as non-literate text"; prerr_endline " --toc-depth don't include TOC entries for sections below level "; prerr_endline " --no-lib-name don't display \"Library\" before library names in the toc"; prerr_endline " --lib-name call top level toc entries instead of \"Library\""; prerr_endline " --lib-subtitles first line comments of the form (** * ModuleName : text *) will be interpreted as subtitles"; prerr_endline " --inline-notmono use a proportional width font for inline code (possibly with a different color)"; prerr_endline ""; exit 1 let obsolete s = eprintf "Warning: option %s is now obsolete; please update your scripts\n" s (*s \textbf{Banner.} Always printed. Notice that it is printed on error output, so that when the output of [coqdoc] is redirected this header is not (unless both standard and error outputs are redirected, of course). *) let banner () = eprintf "This is coqdoc version %s, compiled on %s\n" Coq_config.version Coq_config.compile_date; flush stderr let target_full_name f = match !Cdglobals.target_language with | HTML -> f ^ ".html" | Raw -> f ^ ".txt" | _ -> f ^ ".tex" (*s \textbf{Separation of files.} Files given on the command line are separated according to their type, which is determined by their suffix. Coq files have suffixe \verb!.v! or \verb!.g! and \LaTeX\ files have suffix \verb!.tex!. *) let check_if_file_exists f = if not (Sys.file_exists f) then begin eprintf "coqdoc: %s: no such file\n" f; exit 1 end (* [paths] maps a physical path to a name *) let paths = ref [] let add_path dir name = let p = normalize_path dir in paths := (p,name) :: !paths (* turn A/B/C into A.B.C *) let rec name_of_path p name dirname suffix = if p = dirname then String.concat "." (if name = "" then suffix else (name::suffix)) else let subdir = Filename.dirname dirname in if subdir = dirname then raise Not_found else name_of_path p name subdir (Filename.basename dirname::suffix) let coq_module filename = let bfname = Filename.chop_extension filename in let dirname, fname = normalize_filename bfname in let rec change_prefix = function (* Follow coqc: if in scope of -R, substitute logical name *) (* otherwise, keep only base name *) | [] -> fname | (p, name) :: rem -> try name_of_path p name dirname [fname] with Not_found -> change_prefix rem in change_prefix !paths let what_file f = check_if_file_exists f; if Filename.check_suffix f ".v" || Filename.check_suffix f ".g" then Vernac_file (f, coq_module f) else if Filename.check_suffix f ".tex" then Latex_file f else (eprintf "\ncoqdoc: don't know what to do with %s\n" f; exit 1) (*s \textbf{Reading file names from a file.} * File names may be given * in a file instead of being given on the command * line. [(files_from_file f)] returns the list of file names contained * in the file named [f]. These file names must be separated by spaces, * tabulations or newlines. *) let files_from_file f = let files_from_channel ch = let buf = Buffer.create 80 in let l = ref [] in try while true do match input_char ch with | ' ' | '\t' | '\n' -> if Buffer.length buf > 0 then l := (Buffer.contents buf) :: !l; Buffer.clear buf | c -> Buffer.add_char buf c done; [] with End_of_file -> List.rev !l in try check_if_file_exists f; let ch = open_in f in let l = files_from_channel ch in close_in ch;l with Sys_error s -> begin eprintf "coqdoc: cannot read from file %s (%s)\n" f s; exit 1 end (*s \textbf{Parsing of the command line.} *) let dvi = ref false let ps = ref false let pdf = ref false let parse () = let files = ref [] in let add_file f = files := f :: !files in let rec parse_rec = function | [] -> () | ("-nopreamble" | "--nopreamble" | "--no-preamble" | "-bodyonly" | "--bodyonly" | "--body-only") :: rem -> header_trailer := false; parse_rec rem | ("-with-header" | "--with-header") :: f ::rem -> header_trailer := true; header_file_spec := true; header_file := f; parse_rec rem | ("-with-header" | "--with-header") :: [] -> usage () | ("-with-footer" | "--with-footer") :: f ::rem -> header_trailer := true; footer_file_spec := true; footer_file := f; parse_rec rem | ("-with-footer" | "--with-footer") :: [] -> usage () | ("-p" | "--preamble") :: s :: rem -> Output.push_in_preamble s; parse_rec rem | ("-p" | "--preamble") :: [] -> usage () | ("-noindex" | "--noindex" | "--no-index") :: rem -> index := false; parse_rec rem | ("-multi-index" | "--multi-index") :: rem -> multi_index := true; parse_rec rem | ("-index" | "--index") :: s :: rem -> Cdglobals.index_name := s; parse_rec rem | ("-index" | "--index") :: [] -> usage () | ("-toc" | "--toc" | "--table-of-contents") :: rem -> toc := true; parse_rec rem | ("-stdout" | "--stdout") :: rem -> out_to := StdOut; parse_rec rem | ("-o" | "--output") :: f :: rem -> out_to := File (Filename.basename f); output_dir := Filename.dirname f; parse_rec rem | ("-o" | "--output") :: [] -> usage () | ("-d" | "--directory") :: dir :: rem -> output_dir := dir; parse_rec rem | ("-d" | "--directory") :: [] -> usage () | ("-s" | "--short") :: rem -> short := true; parse_rec rem | ("-l" | "-light" | "--light") :: rem -> gallina := true; light := true; parse_rec rem | ("-g" | "-gallina" | "--gallina") :: rem -> gallina := true; parse_rec rem | ("-t" | "-title" | "--title") :: s :: rem -> title := s; parse_rec rem | ("-t" | "-title" | "--title") :: [] -> usage () | ("-latex" | "--latex") :: rem -> Cdglobals.target_language := LaTeX; parse_rec rem | ("-pdf" | "--pdf") :: rem -> Cdglobals.target_language := LaTeX; pdf := true; parse_rec rem | ("-dvi" | "--dvi") :: rem -> Cdglobals.target_language := LaTeX; dvi := true; parse_rec rem | ("-ps" | "--ps") :: rem -> Cdglobals.target_language := LaTeX; ps := true; parse_rec rem | ("-html" | "--html") :: rem -> Cdglobals.target_language := HTML; parse_rec rem | ("-texmacs" | "--texmacs") :: rem -> Cdglobals.target_language := TeXmacs; parse_rec rem | ("-raw" | "--raw") :: rem -> Cdglobals.target_language := Raw; parse_rec rem | ("-charset" | "--charset") :: s :: rem -> Cdglobals.charset := s; parse_rec rem | ("-charset" | "--charset") :: [] -> usage () | ("-inputenc" | "--inputenc") :: s :: rem -> Cdglobals.inputenc := s; parse_rec rem | ("-inputenc" | "--inputenc") :: [] -> usage () | ("-raw-comments" | "--raw-comments") :: rem -> Cdglobals.raw_comments := true; parse_rec rem | ("-parse-comments" | "--parse-comments") :: rem -> Cdglobals.parse_comments := true; parse_rec rem | ("-plain-comments" | "--plain-comments") :: rem -> Cdglobals.plain_comments := true; parse_rec rem | ("-interpolate" | "--interpolate") :: rem -> Cdglobals.interpolate := true; parse_rec rem | ("-toc-depth" | "--toc-depth") :: [] -> usage () | ("-toc-depth" | "--toc-depth") :: ds :: rem -> let d = try int_of_string ds with Failure _ -> (eprintf "--toc-depth must be followed by an integer\n"; exit 1) in Cdglobals.toc_depth := Some d; parse_rec rem | ("-no-lib-name" | "--no-lib-name") :: rem -> Cdglobals.lib_name := ""; parse_rec rem | ("-lib-name" | "--lib-name") :: ds :: rem -> Cdglobals.lib_name := ds; parse_rec rem | ("-lib-subtitles" | "--lib-subtitles") :: rem -> Cdglobals.lib_subtitles := true; parse_rec rem | ("-inline-notmono" | "--inline-notmono") :: rem -> Cdglobals.inline_notmono := true; parse_rec rem | ("-latin1" | "--latin1") :: rem -> Cdglobals.set_latin1 (); parse_rec rem | ("-utf8" | "--utf8") :: rem -> Cdglobals.set_utf8 (); parse_rec rem | ("-q" | "-quiet" | "--quiet") :: rem -> quiet := true; parse_rec rem | ("-v" | "-verbose" | "--verbose") :: rem -> quiet := false; parse_rec rem | ("-h" | "-help" | "-?" | "--help") :: rem -> banner (); usage () | ("-V" | "-version" | "--version") :: _ -> banner (); exit 0 | ("-vernac-file" | "--vernac-file") :: f :: rem -> check_if_file_exists f; add_file (Vernac_file (f, coq_module f)); parse_rec rem | ("-vernac-file" | "--vernac-file") :: [] -> usage () | ("-tex-file" | "--tex-file") :: f :: rem -> add_file (Latex_file f); parse_rec rem | ("-tex-file" | "--tex-file") :: [] -> usage () | ("-files" | "--files" | "--files-from") :: f :: rem -> List.iter (fun f -> add_file (what_file f)) (files_from_file f); parse_rec rem | ("-files" | "--files") :: [] -> usage () | "-R" :: path :: log :: rem -> add_path path log; parse_rec rem | "-R" :: ([] | [_]) -> usage () | ("-glob-from" | "--glob-from") :: f :: rem -> glob_source := GlobFile f; parse_rec rem | ("-glob-from" | "--glob-from") :: [] -> usage () | ("-no-glob" | "--no-glob") :: rem -> glob_source := NoGlob; parse_rec rem | ("--no-externals" | "-no-externals" | "-noexternals") :: rem -> Cdglobals.externals := false; parse_rec rem | ("--external" | "-external") :: u :: logicalpath :: rem -> Index.add_external_library logicalpath u; parse_rec rem | ("--coqlib" | "-coqlib") :: u :: rem -> Cdglobals.coqlib := u; parse_rec rem | ("--coqlib" | "-coqlib") :: [] -> usage () | ("--boot" | "-boot") :: rem -> Cdglobals.coqlib_path := normalize_path ( Filename.concat (Filename.dirname Sys.executable_name) Filename.parent_dir_name ); parse_rec rem | ("--coqlib_path" | "-coqlib_path") :: d :: rem -> Cdglobals.coqlib_path := d; parse_rec rem | ("--coqlib_path" | "-coqlib_path") :: [] -> usage () | f :: rem -> add_file (what_file f); parse_rec rem in parse_rec (List.tl (Array.to_list Sys.argv)); List.rev !files (*s The following function produces the output. The default output is the \LaTeX\ document: in that case, we just call [Web.produce_document]. If option \verb!-dvi!, \verb!-ps! or \verb!-html! is invoked, then we make calls to \verb!latex! or \verb!dvips! or \verb!pdflatex! accordingly. *) let locally dir f x = let cwd = Sys.getcwd () in try Sys.chdir dir; let y = f x in Sys.chdir cwd; y with e -> Sys.chdir cwd; raise e let clean_temp_files basefile = let remove f = try Sys.remove f with _ -> () in remove (basefile ^ ".tex"); remove (basefile ^ ".log"); remove (basefile ^ ".aux"); remove (basefile ^ ".toc"); remove (basefile ^ ".dvi"); remove (basefile ^ ".ps"); remove (basefile ^ ".pdf"); remove (basefile ^ ".haux"); remove (basefile ^ ".html") let clean_and_exit file res = clean_temp_files file; exit res let cat file = let c = open_in file in try while true do print_char (input_char c) done with End_of_file -> close_in c let copy src dst = let cin = open_in src in try let cout = open_out dst in try while true do Pervasives.output_char cout (input_char cin) done with End_of_file -> close_out cout; close_in cin with Sys_error e -> eprintf "%s\n" e; exit 1 (*s Functions for generating output files *) let gen_one_file l = let file = function | Vernac_file (f,m) -> let sub = if !lib_subtitles then Cpretty.detect_subtitle f m else None in Output.set_module m sub; Cpretty.coq_file f m | Latex_file _ -> () in if (!header_trailer) then Output.header (); if !toc then Output.make_toc (); List.iter file l; if !index then Output.make_index(); if (!header_trailer) then Output.trailer () let gen_mult_files l = let file = function | Vernac_file (f,m) -> let sub = if !lib_subtitles then Cpretty.detect_subtitle f m else None in let hf = target_full_name m in Output.set_module m sub; open_out_file hf; if (!header_trailer) then Output.header (); Cpretty.coq_file f m; if (!header_trailer) then Output.trailer (); close_out_file() | Latex_file _ -> () in List.iter file l; if (!index && !target_language=HTML) then begin if (!multi_index) then Output.make_multi_index (); open_out_file (!index_name^".html"); page_title := (if !title <> "" then !title else "Index"); if (!header_trailer) then Output.header (); Output.make_index (); if (!header_trailer) then Output.trailer (); close_out_file() end; if (!toc && !target_language=HTML) then begin open_out_file "toc.html"; page_title := (if !title <> "" then !title else "Table of contents"); if (!header_trailer) then Output.header (); if !title <> "" then printf "

    %s

    \n" !title; Output.make_toc (); if (!header_trailer) then Output.trailer (); close_out_file() end (* Rq: pour latex et texmacs, une toc ou un index spar n'a pas de sens... *) let read_glob_file vfile f = try Index.read_glob vfile f with Sys_error s -> eprintf "Warning: %s (links will not be available)\n" s let read_glob_file_of = function | Vernac_file (f,_) -> read_glob_file (Some f) (Filename.chop_extension f ^ ".glob") | Latex_file _ -> () let index_module = function | Vernac_file (f,m) -> Index.add_module m | Latex_file _ -> () let copy_style_file file = let src = List.fold_left Filename.concat !Cdglobals.coqlib_path ["tools";"coqdoc";file] in let dst = coqdoc_out file in if Sys.file_exists src then copy src dst else eprintf "Warning: file %s does not exist\n" src let produce_document l = if !target_language=HTML then copy_style_file "coqdoc.css"; if !target_language=LaTeX then copy_style_file "coqdoc.sty"; (match !Cdglobals.glob_source with | NoGlob -> () | DotGlob -> List.iter read_glob_file_of l | GlobFile f -> read_glob_file None f); List.iter index_module l; match !out_to with | StdOut -> Cdglobals.out_channel := stdout; gen_one_file l | File f -> open_out_file f; gen_one_file l; close_out_file() | MultFiles -> gen_mult_files l let produce_output fl = if not (!dvi || !ps || !pdf) then produce_document fl else begin let texfile = Filename.temp_file "coqdoc" ".tex" in let basefile = Filename.chop_suffix texfile ".tex" in let final_out_to = !out_to in out_to := File texfile; output_dir := (Filename.dirname texfile); produce_document fl; let latexexe = if !pdf then "pdflatex" else "latex" in let latexcmd = let file = Filename.basename texfile in let file = if !quiet then sprintf "'\\nonstopmode\\input{%s}'" file else file in sprintf "%s %s && %s %s 1>&2 %s" latexexe file latexexe file (if !quiet then "> /dev/null" else "") in let res = locally (Filename.dirname texfile) Sys.command latexcmd in if res <> 0 then begin eprintf "Couldn't run LaTeX successfully\n"; clean_and_exit basefile res end; let dvifile = basefile ^ ".dvi" in if !dvi then begin match final_out_to with | MultFiles | StdOut -> cat dvifile | File f -> copy dvifile f end; let pdffile = basefile ^ ".pdf" in if !pdf then begin match final_out_to with | MultFiles | StdOut -> cat pdffile | File f -> copy pdffile f end; if !ps then begin let psfile = basefile ^ ".ps" in let command = sprintf "dvips %s -o %s %s" dvifile psfile (if !quiet then "> /dev/null 2>&1" else "") in let res = Sys.command command in if res <> 0 then begin eprintf "Couldn't run dvips successfully\n"; clean_and_exit basefile res end; match final_out_to with | MultFiles | StdOut -> cat psfile | File f -> copy psfile f end; clean_temp_files basefile end (*s \textbf{Main program.} Print the banner, parse the command line, read the files and then call [produce_document] from module [Web]. *) let main () = let files = parse () in Index.init_coqlib_library (); if not !quiet then banner (); if files <> [] then produce_output files let _ = Printexc.catch main () coq-8.4pl2/tools/coqdoc/alpha.ml0000640000175000001440000000303012010532755015652 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'A' | '\199' -> 'C' | '\200'..'\203' -> 'E' | '\204'..'\207' -> 'I' | '\209' -> 'N' | '\210'..'\214' -> 'O' | '\217'..'\220' -> 'U' | '\221' -> 'Y' | c -> c let norm_char_utf8 c = Char.uppercase c let norm_char c = if !utf8 then norm_char_utf8 c else if !latin1 then norm_char_latin1 c else Char.uppercase c let norm_string s = let u = String.copy s in for i = 0 to String.length s - 1 do u.[i] <- norm_char s.[i] done; u let compare_char c1 c2 = match norm_char c1, norm_char c2 with | ('A'..'Z' as c1), ('A'..'Z' as c2) -> compare c1 c2 | 'A'..'Z', _ -> -1 | _, 'A'..'Z' -> 1 | '_', _ -> -1 | _, '_' -> 1 | c1, c2 -> compare c1 c2 let compare_string s1 s2 = let n1 = String.length s1 in let n2 = String.length s2 in let rec cmp i = if i == n1 || i == n2 then n1 - n2 else let c = compare_char s1.[i] s2.[i] in if c == 0 then cmp (succ i) else c in cmp 0 coq-8.4pl2/tools/coqdoc/cpretty.mll0000640000175000001440000012016112010532755016440 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [] | (l :: ls) -> l :: (take (n-1) ls) (* count the number of spaces at the beginning of a string *) let count_spaces s = let n = String.length s in let rec count c i = if i == n then c,i else match s.[i] with | '\t' -> count (c + (8 - (c mod 8))) (i + 1) | ' ' -> count (c + 1) (i + 1) | _ -> c,i in count 0 0 let remove_newline s = let n = String.length s in let rec count i = if i == n || s.[i] <> '\n' then i else count (i + 1) in let i = count 0 in i, String.sub s i (n - i) let count_dashes s = let c = ref 0 in for i = 0 to String.length s - 1 do if s.[i] = '-' then incr c done; !c let cut_head_tail_spaces s = let n = String.length s in let rec look_up i = if i == n || s.[i] <> ' ' then i else look_up (i+1) in let rec look_dn i = if i == -1 || s.[i] <> ' ' then i else look_dn (i-1) in let l = look_up 0 in let r = look_dn (n-1) in if l <= r then String.sub s l (r-l+1) else s let sec_title s = let rec count lev i = if s.[i] = '*' then count (succ lev) (succ i) else let t = String.sub s i (String.length s - i) in lev, cut_head_tail_spaces t in count 0 (String.index s '*') let strip_eol s = let eol = s.[String.length s - 1] = '\n' in (eol, if eol then String.sub s 1 (String.length s - 1) else s) let formatted = ref false let brackets = ref 0 let comment_level = ref 0 let in_proof = ref None let in_emph = ref false let in_env start stop = let r = ref false in let start_env () = r := true; start () in let stop_env () = if !r then stop (); r := false in (fun x -> !r), start_env, stop_env let in_emph, start_emph, stop_emph = in_env Output.start_emph Output.stop_emph let in_quote, start_quote, stop_quote = in_env Output.start_quote Output.stop_quote let url_buffer = Buffer.create 40 let url_name_buffer = Buffer.create 40 let backtrack lexbuf = lexbuf.lex_curr_pos <- lexbuf.lex_start_pos; lexbuf.lex_curr_p <- lexbuf.lex_start_p let backtrack_past_newline lexbuf = let buf = lexeme lexbuf in let splits = Str.bounded_split_delim (Str.regexp "['\n']") buf 2 in match splits with | [] -> () | (_ :: []) -> () | (s1 :: rest :: _) -> let length_skip = 1 + String.length s1 in lexbuf.lex_curr_pos <- lexbuf.lex_start_pos + length_skip let is_space = function ' ' | '\t' | '\n' | '\r' -> true | _ -> false (* saving/restoring the PP state *) type state = { st_gallina : bool; st_light : bool } let state_stack = Stack.create () let save_state () = Stack.push { st_gallina = !Cdglobals.gallina; st_light = !Cdglobals.light } state_stack let restore_state () = let s = Stack.pop state_stack in Cdglobals.gallina := s.st_gallina; Cdglobals.light := s.st_light let without_ref r f x = save_state (); r := false; f x; restore_state () let without_gallina = without_ref Cdglobals.gallina let without_light = without_ref Cdglobals.light let show_all f = without_gallina (without_light f) let begin_show () = save_state (); Cdglobals.gallina := false; Cdglobals.light := false let end_show () = restore_state () (* Reset the globals *) let reset () = formatted := false; brackets := 0; comment_level := 0 (* erasing of Section/End *) let section_re = Str.regexp "[ \t]*Section" let end_re = Str.regexp "[ \t]*End" let is_section s = Str.string_match section_re s 0 let is_end s = Str.string_match end_re s 0 let sections_to_close = ref 0 let section_or_end s = if is_section s then begin incr sections_to_close; true end else if is_end s then begin if !sections_to_close > 0 then begin decr sections_to_close; true end else false end else true (* for item lists *) type list_compare = | Before | StartLevel of int | InLevel of int * bool (* Before : we're before any levels StartLevel : at the same column as the dash in a level InLevel : after the dash of this level, but before any deeper dashes. bool is true if this is the last level *) let find_level levels cur_indent = match levels with | [] -> Before | (l::ls) -> if cur_indent < l then Before else (* cur_indent will never be less than the head of the list *) let rec findind ls n = match ls with | [] -> InLevel (n,true) | (l :: []) -> if cur_indent = l then StartLevel n else InLevel (n,true) | (l1 :: l2 :: ls) -> if cur_indent = l1 then StartLevel n else if cur_indent < l2 then InLevel (n,false) else findind (l2 :: ls) (n+1) in findind (l::ls) 1 type is_start_list = | Rule | List of int | Neither let check_start_list str = let n_dashes = count_dashes str in let (n_spaces,_) = count_spaces str in if n_dashes >= 4 && not !Cdglobals.plain_comments then Rule else if n_dashes = 1 && not !Cdglobals.plain_comments then List n_spaces else Neither (* examine a string for subtitleness *) let subtitle m s = match Str.split_delim (Str.regexp ":") s with | [] -> false | (name::_) -> if (cut_head_tail_spaces name) = m then true else false (* tokens pretty-print *) let token_buffer = Buffer.create 1024 let token_re = Str.regexp "[ \t]*(\\*\\*[ \t]+printing[ \t]+\\([^ \t]+\\)" let printing_token_re = Str.regexp "[ \t]*\\(\\(%\\([^%]*\\)%\\)\\|\\(\\$[^$]*\\$\\)\\)?[ \t]*\\(#\\(\\(&#\\|[^#]\\)*\\)#\\)?" let add_printing_token toks pps = try if Str.string_match token_re toks 0 then let tok = Str.matched_group 1 toks in if Str.string_match printing_token_re pps 0 then let pp = (try Some (Str.matched_group 3 pps) with _ -> try Some (Str.matched_group 4 pps) with _ -> None), (try Some (Str.matched_group 6 pps) with _ -> None) in Output.add_printing_token tok pp with _ -> () let remove_token_re = Str.regexp "[ \t]*(\\*\\*[ \t]+remove[ \t]+printing[ \t]+\\([^ \t]+\\)[ \t]*\\*)" let remove_printing_token toks = try if Str.string_match remove_token_re toks 0 then let tok = Str.matched_group 1 toks in Output.remove_printing_token tok with _ -> () let extract_ident_re = Str.regexp "([ \t]*\\([^ \t]+\\)[ \t]*:=" let extract_ident s = assert (String.length s >= 3); if Str.string_match extract_ident_re s 0 then Str.matched_group 1 s else String.sub s 1 (String.length s - 3) let output_indented_keyword s lexbuf = let nbsp,isp = count_spaces s in Output.indentation nbsp; let s = String.sub s isp (String.length s - isp) in Output.keyword s (lexeme_start lexbuf + isp) } (*s Regular expressions *) let space = [' ' '\t'] let space_nl = [' ' '\t' '\n' '\r'] let nl = "\r\n" | '\n' let firstchar = ['A'-'Z' 'a'-'z' '_'] | (* superscript 1 *) '\194' '\185' | (* utf-8 latin 1 supplement *) '\195' ['\128'-'\150'] | '\195' ['\152'-'\182'] | '\195' ['\184'-'\191'] | (* utf-8 letterlike symbols *) (* '\206' ([ '\145' - '\183'] | '\187') | *) (* '\xCF' [ '\x00' - '\xCE' ] | *) (* utf-8 letterlike symbols *) '\206' (['\145'-'\161'] | ['\163'-'\187']) | '\226' ('\130' [ '\128'-'\137' ] (* subscripts *) | '\129' [ '\176'-'\187' ] (* superscripts *) | '\132' ['\128'-'\191'] | '\133' ['\128'-'\143']) let identchar = firstchar | ['\'' '0'-'9' '@' ] let id = firstchar identchar* let pfx_id = (id '.')* let identifier = id | pfx_id id (* This misses unicode stuff, and it adds "[" and "]". It's only an approximation of idents - used for detecting whether an underscore is part of an identifier or meant to indicate emphasis *) let nonidentchar = [^ 'A'-'Z' 'a'-'z' '_' '[' ']' '\'' '0'-'9' '@' ] let printing_token = [^ ' ' '\t']* let thm_token = "Theorem" | "Lemma" | "Fact" | "Remark" | "Corollary" | "Proposition" | "Property" | "Goal" let prf_token = "Next" space+ "Obligation" | "Proof" (space* "." | space+ "with" | space+ "using") let immediate_prf_token = (* Approximation of a proof term, if not in the prf_token case *) (* To be checked after prf_token *) "Proof" space* [^ '.' 'w' 'u'] let def_token = "Definition" | "Let" | "Class" | "SubClass" | "Example" | "Fixpoint" | "Function" | "Boxed" | "CoFixpoint" | "Record" | "Structure" | "Scheme" | "Inductive" | "CoInductive" | "Equations" | "Instance" | "Declare" space+ "Instance" | "Global" space+ "Instance" let decl_token = "Hypothesis" | "Hypotheses" | "Parameter" | "Axiom" 's'? | "Conjecture" let gallina_ext = "Module" | "Include" space+ "Type" | "Include" | "Declare" space+ "Module" | "Transparent" | "Opaque" | "Canonical" | "Coercion" | "Identity" | "Implicit" | "Tactic" space+ "Notation" | "Section" | "Context" | "Variable" 's'? | ("Hypothesis" | "Hypotheses") | "End" let notation_kw = "Notation" | "Infix" | "Reserved" space+ "Notation" let commands = "Pwd" | "Cd" | "Drop" | "ProtectedLoop" | "Quit" | "Restart" | "Load" | "Add" | "Remove" space+ "Loadpath" | "Print" | "Inspect" | "About" | "SearchAbout" | "SearchRewrite" | "Search" | "Locate" | "Eval" | "Reset" | "Check" | "Type" | "Section" | "Chapter" | "Variable" 's'? | ("Hypothesis" | "Hypotheses") | "End" let end_kw = immediate_prf_token | "Qed" | "Defined" | "Save" | "Admitted" | "Abort" let extraction = "Extraction" | "Recursive" space+ "Extraction" | "Extract" let gallina_kw = thm_token | def_token | decl_token | gallina_ext | commands | extraction let prog_kw = "Program" space+ gallina_kw | "Obligation" | "Obligations" | "Solve" let hint_kw = "Extern" | "Rewrite" | "Resolve" | "Immediate" | "Transparent" | "Opaque" | "Unfold" | "Constructors" let set_kw = "Printing" space+ ("Coercions" | "Universes" | "All") | "Implicit" space+ "Arguments" let gallina_kw_to_hide = "Implicit" space+ "Arguments" | "Ltac" | "Require" | "Import" | "Export" | "Load" | "Hint" space+ hint_kw | "Open" | "Close" | "Delimit" | "Transparent" | "Opaque" | ("Declare" space+ ("Morphism" | "Step") ) | ("Set" | "Unset") space+ set_kw | "Declare" space+ ("Left" | "Right") space+ "Step" | "Debug" space+ ("On" | "Off") let section = "*" | "**" | "***" | "****" let item_space = " " let begin_hide = "(*" space* "begin" space+ "hide" space* "*)" space* nl let end_hide = "(*" space* "end" space+ "hide" space* "*)" space* nl let begin_show = "(*" space* "begin" space+ "show" space* "*)" space* nl let end_show = "(*" space* "end" space+ "show" space* "*)" space* nl (* let begin_verb = "(*" space* "begin" space+ "verb" space* "*)" let end_verb = "(*" space* "end" space+ "verb" space* "*)" *) (*s Scanning Coq, at beginning of line *) rule coq_bol = parse | space* nl+ { if not (!in_proof <> None && (!Cdglobals.gallina || !Cdglobals.light)) then Output.empty_line_of_code (); coq_bol lexbuf } | space* "(**" space_nl { Output.end_coq (); Output.start_doc (); let eol = doc_bol lexbuf in Output.end_doc (); Output.start_coq (); if eol then coq_bol lexbuf else coq lexbuf } | space* "Comments" space_nl { Output.end_coq (); Output.start_doc (); comments lexbuf; Output.end_doc (); Output.start_coq (); coq lexbuf } | space* begin_hide { skip_hide lexbuf; coq_bol lexbuf } | space* begin_show { begin_show (); coq_bol lexbuf } | space* end_show { end_show (); coq_bol lexbuf } | space* ("Local"|"Global") { in_proof := None; let s = lexeme lexbuf in output_indented_keyword s lexbuf; coq_bol lexbuf } | space* gallina_kw_to_hide { let s = lexeme lexbuf in if !Cdglobals.light && section_or_end s then let eol = skip_to_dot lexbuf in if eol then (coq_bol lexbuf) else coq lexbuf else begin output_indented_keyword s lexbuf; let eol = body lexbuf in if eol then coq_bol lexbuf else coq lexbuf end } | space* thm_token { let s = lexeme lexbuf in output_indented_keyword s lexbuf; let eol = body lexbuf in in_proof := Some eol; if eol then coq_bol lexbuf else coq lexbuf } | space* prf_token { in_proof := Some true; let eol = if not !Cdglobals.gallina then begin backtrack lexbuf; body_bol lexbuf end else let s = lexeme lexbuf in if s.[String.length s - 1] = '.' then false else skip_to_dot lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | space* end_kw { let eol = if not (!in_proof <> None && !Cdglobals.gallina) then begin backtrack lexbuf; body_bol lexbuf end else skip_to_dot lexbuf in in_proof := None; if eol then coq_bol lexbuf else coq lexbuf } | space* gallina_kw { in_proof := None; let s = lexeme lexbuf in output_indented_keyword s lexbuf; let eol= body lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | space* prog_kw { in_proof := None; let s = lexeme lexbuf in output_indented_keyword s lexbuf; let eol= body lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | space* notation_kw { let s = lexeme lexbuf in output_indented_keyword s lexbuf; let eol= start_notation_string lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | space* "(**" space+ "printing" space+ printing_token space+ { let tok = lexeme lexbuf in let s = printing_token_body lexbuf in add_printing_token tok s; coq_bol lexbuf } | space* "(**" space+ "printing" space+ { eprintf "warning: bad 'printing' command at character %d\n" (lexeme_start lexbuf); flush stderr; comment_level := 1; ignore (comment lexbuf); coq_bol lexbuf } | space* "(**" space+ "remove" space+ "printing" space+ printing_token space* "*)" { remove_printing_token (lexeme lexbuf); coq_bol lexbuf } | space* "(**" space+ "remove" space+ "printing" space+ { eprintf "warning: bad 'remove printing' command at character %d\n" (lexeme_start lexbuf); flush stderr; comment_level := 1; ignore (comment lexbuf); coq_bol lexbuf } | space* "(*" { comment_level := 1; if !Cdglobals.parse_comments then begin let s = lexeme lexbuf in let nbsp,isp = count_spaces s in Output.indentation nbsp; Output.start_comment (); end; let eol = comment lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | eof { () } | _ { let eol = if not !Cdglobals.gallina then begin backtrack lexbuf; body_bol lexbuf end else skip_to_dot lexbuf in if eol then coq_bol lexbuf else coq lexbuf } (*s Scanning Coq elsewhere *) and coq = parse | nl { if not (!in_proof <> None && !Cdglobals.gallina) then Output.line_break(); coq_bol lexbuf } | "(**" space_nl { Output.end_coq (); Output.start_doc (); let eol = doc_bol lexbuf in Output.end_doc (); Output.start_coq (); if eol then coq_bol lexbuf else coq lexbuf } | "(*" { comment_level := 1; if !Cdglobals.parse_comments then begin let s = lexeme lexbuf in let nbsp,isp = count_spaces s in Output.indentation nbsp; Output.start_comment (); end; let eol = comment lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | nl+ space* "]]" { if not !formatted then begin (* Isn't this an anomaly *) let s = lexeme lexbuf in let nlsp,s = remove_newline s in let nbsp,isp = count_spaces s in Output.indentation nbsp; let loc = lexeme_start lexbuf + isp + nlsp in Output.sublexer ']' loc; Output.sublexer ']' (loc+1); coq lexbuf end } | eof { () } | gallina_kw_to_hide { let s = lexeme lexbuf in if !Cdglobals.light && section_or_end s then begin let eol = skip_to_dot lexbuf in if eol then coq_bol lexbuf else coq lexbuf end else begin Output.ident s (lexeme_start lexbuf); let eol=body lexbuf in if eol then coq_bol lexbuf else coq lexbuf end } | prf_token { let eol = if not !Cdglobals.gallina then begin backtrack lexbuf; body lexbuf end else let s = lexeme lexbuf in let eol = if s.[String.length s - 1] = '.' then false else skip_to_dot lexbuf in eol in if eol then coq_bol lexbuf else coq lexbuf } | end_kw { let eol = if not !Cdglobals.gallina then begin backtrack lexbuf; body lexbuf end else let eol = skip_to_dot lexbuf in if !in_proof <> Some true && eol then Output.line_break (); eol in in_proof := None; if eol then coq_bol lexbuf else coq lexbuf } | gallina_kw { let s = lexeme lexbuf in Output.ident s (lexeme_start lexbuf); let eol = body lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | notation_kw { let s = lexeme lexbuf in Output.ident s (lexeme_start lexbuf); let eol= start_notation_string lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | prog_kw { let s = lexeme lexbuf in Output.ident s (lexeme_start lexbuf); let eol = body lexbuf in if eol then coq_bol lexbuf else coq lexbuf } | space+ { Output.char ' '; coq lexbuf } | eof { () } | _ { let eol = if not !Cdglobals.gallina then begin backtrack lexbuf; body lexbuf end else skip_to_dot lexbuf in if eol then coq_bol lexbuf else coq lexbuf} (*s Scanning documentation, at beginning of line *) and doc_bol = parse | space* section space+ ([^'\n' '*'] | '*'+ [^'\n' ')' '*'])* ('*'+ '\n')? { let eol, lex = strip_eol (lexeme lexbuf) in let lev, s = sec_title lex in if (!Cdglobals.lib_subtitles) && (subtitle (Output.get_module false) s) then () else Output.section lev (fun () -> ignore (doc None (from_string s))); if eol then doc_bol lexbuf else doc None lexbuf } | space_nl* '-'+ { let buf' = lexeme lexbuf in let bufs = Str.split_delim (Str.regexp "['\n']") buf' in let lines = (List.length bufs) - 1 in let line = match bufs with | [] -> eprintf "Internal error bad_split1 - please report\n"; exit 1 | _ -> List.nth bufs lines in match check_start_list line with | Neither -> backtrack_past_newline lexbuf; doc None lexbuf | List n -> Output.paragraph (); Output.item 1; doc (Some [n]) lexbuf | Rule -> Output.rule (); doc None lexbuf } | space* nl+ { Output.paragraph (); doc_bol lexbuf } | "<<" space* { Output.start_verbatim false; verbatim false lexbuf; doc_bol lexbuf } | eof { true } | '_' { if !Cdglobals.plain_comments then Output.char '_' else start_emph (); doc None lexbuf } | _ { backtrack lexbuf; doc None lexbuf } (*s Scanning lists - using whitespace *) and doc_list_bol indents = parse | space* '-' { let (n_spaces,_) = count_spaces (lexeme lexbuf) in match find_level indents n_spaces with | Before -> backtrack lexbuf; doc_bol lexbuf | StartLevel n -> Output.item n; doc (Some (take n indents)) lexbuf | InLevel (n,true) -> let items = List.length indents in Output.item (items+1); doc (Some (List.append indents [n_spaces])) lexbuf | InLevel (_,false) -> backtrack lexbuf; doc_bol lexbuf } | "<<" space* { Output.start_verbatim false; verbatim false lexbuf; doc_list_bol indents lexbuf } | "[[" nl { formatted := true; Output.start_inline_coq_block (); ignore(body_bol lexbuf); Output.end_inline_coq_block (); formatted := false; doc_list_bol indents lexbuf } | "[[[" nl { inf_rules (Some indents) lexbuf } | space* nl space* '-' { (* Like in the doc_bol production, these two productions exist only to deal properly with whitespace *) Output.paragraph (); backtrack_past_newline lexbuf; doc_list_bol indents lexbuf } | space* nl space* _ { let buf' = lexeme lexbuf in let buf = let bufs = Str.split_delim (Str.regexp "['\n']") buf' in match bufs with | (_ :: s :: []) -> s | (_ :: _ :: s :: _) -> s | _ -> eprintf "Internal error bad_split2 - please report\n"; exit 1 in let (n_spaces,_) = count_spaces buf in match find_level indents n_spaces with | InLevel _ -> Output.paragraph (); backtrack_past_newline lexbuf; doc_list_bol indents lexbuf | StartLevel n -> if n = 1 then begin Output.stop_item (); backtrack_past_newline lexbuf; doc_bol lexbuf end else begin Output.paragraph (); backtrack_past_newline lexbuf; doc_list_bol indents lexbuf end | Before -> (* Here we were at the beginning of a line, and it was blank. The next line started before any list items. So: insert a paragraph for the empty line, rewind to whatever's just after the newline, then toss over to doc_bol for whatever comes next. *) Output.stop_item (); Output.paragraph (); backtrack_past_newline lexbuf; doc_bol lexbuf } | space* _ { let (n_spaces,_) = count_spaces (lexeme lexbuf) in match find_level indents n_spaces with | Before -> Output.stop_item (); backtrack lexbuf; doc_bol lexbuf | StartLevel n -> (if n = 1 then Output.stop_item () else Output.reach_item_level (n-1)); backtrack lexbuf; doc (Some (take (n-1) indents)) lexbuf | InLevel (n,_) -> Output.reach_item_level n; backtrack lexbuf; doc (Some (take n indents)) lexbuf } (*s Scanning documentation elsewhere *) and doc indents = parse | nl { Output.char '\n'; match indents with | Some ls -> doc_list_bol ls lexbuf | None -> doc_bol lexbuf } | "[[" nl { if !Cdglobals.plain_comments then (Output.char '['; Output.char '['; doc indents lexbuf) else (formatted := true; Output.start_inline_coq_block (); let eol = body_bol lexbuf in Output.end_inline_coq_block (); formatted := false; if eol then match indents with | Some ls -> doc_list_bol ls lexbuf | None -> doc_bol lexbuf else doc indents lexbuf)} | "[[[" nl { inf_rules indents lexbuf } | "[]" { Output.proofbox (); doc indents lexbuf } | "{{" { url lexbuf; doc indents lexbuf } | "[" { if !Cdglobals.plain_comments then Output.char '[' else (brackets := 1; Output.start_inline_coq (); escaped_coq lexbuf; Output.end_inline_coq ()); doc indents lexbuf } | "(*" { backtrack lexbuf ; let bol_parse = match indents with | Some is -> doc_list_bol is | None -> doc_bol in let eol = comment lexbuf in if eol then bol_parse lexbuf else doc indents lexbuf } | '*'* "*)" space_nl* "(**" {(match indents with | Some _ -> Output.stop_item () | None -> ()); (* this says - if there is a blank line between the two comments, insert one in the output too *) let lines = List.length (Str.split_delim (Str.regexp "['\n']") (lexeme lexbuf)) in if lines > 2 then Output.paragraph (); doc_bol lexbuf } | '*'* "*)" space* nl { true } | '*'* "*)" { false } | "$" { if !Cdglobals.plain_comments then Output.char '$' else (Output.start_latex_math (); escaped_math_latex lexbuf); doc indents lexbuf } | "$$" { if !Cdglobals.plain_comments then Output.char '$'; Output.char '$'; doc indents lexbuf } | "%" { if !Cdglobals.plain_comments then Output.char '%' else escaped_latex lexbuf; doc indents lexbuf } | "%%" { if !Cdglobals.plain_comments then Output.char '%'; Output.char '%'; doc indents lexbuf } | "#" { if !Cdglobals.plain_comments then Output.char '#' else escaped_html lexbuf; doc indents lexbuf } | "##" { if !Cdglobals.plain_comments then Output.char '#'; Output.char '#'; doc indents lexbuf } | nonidentchar '_' nonidentchar { List.iter (fun x -> Output.char (lexeme_char lexbuf x)) [0;1;2]; doc indents lexbuf} | nonidentchar '_' { Output.char (lexeme_char lexbuf 0); if !Cdglobals.plain_comments then Output.char '_' else start_emph () ; doc indents lexbuf } | '_' nonidentchar { if !Cdglobals.plain_comments then Output.char '_' else stop_emph () ; Output.char (lexeme_char lexbuf 1); doc indents lexbuf } | "<<" space* { Output.start_verbatim true; verbatim true lexbuf; doc_bol lexbuf } | '"' { if !Cdglobals.plain_comments then Output.char '"' else if in_quote () then stop_quote () else start_quote (); doc indents lexbuf } | eof { false } | _ { Output.char (lexeme_char lexbuf 0); doc indents lexbuf } (*s Various escapings *) and escaped_math_latex = parse | "$" { Output.stop_latex_math () } | eof { Output.stop_latex_math () } | _ { Output.latex_char (lexeme_char lexbuf 0); escaped_math_latex lexbuf } and escaped_latex = parse | "%" { () } | eof { () } | _ { Output.latex_char (lexeme_char lexbuf 0); escaped_latex lexbuf } and escaped_html = parse | "#" { () } | "&#" { Output.html_char '&'; Output.html_char '#'; escaped_html lexbuf } | "##" { Output.html_char '#'; escaped_html lexbuf } | eof { () } | _ { Output.html_char (lexeme_char lexbuf 0); escaped_html lexbuf } and verbatim inline = parse | nl ">>" space* nl { Output.verbatim_char inline '\n'; Output.stop_verbatim inline } | nl ">>" { Output.verbatim_char inline '\n'; Output.stop_verbatim inline } | ">>" { Output.stop_verbatim inline } | eof { Output.stop_verbatim inline } | _ { Output.verbatim_char inline (lexeme_char lexbuf 0); verbatim inline lexbuf } and url = parse | "}}" { Output.url (Buffer.contents url_buffer) None; Buffer.clear url_buffer } | "}" { url_name lexbuf } | _ { Buffer.add_char url_buffer (lexeme_char lexbuf 0); url lexbuf } and url_name = parse | "}" { Output.url (Buffer.contents url_buffer) (Some (Buffer.contents url_name_buffer)); Buffer.clear url_buffer; Buffer.clear url_name_buffer } | _ { Buffer.add_char url_name_buffer (lexeme_char lexbuf 0); url_name lexbuf } (*s Coq, inside quotations *) and escaped_coq = parse | "]" { decr brackets; if !brackets > 0 then (Output.sublexer ']' (lexeme_start lexbuf); escaped_coq lexbuf) else Tokens.flush_sublexer () } | "[" { incr brackets; Output.sublexer '[' (lexeme_start lexbuf); escaped_coq lexbuf } | "(*" { Tokens.flush_sublexer (); comment_level := 1; ignore (comment lexbuf); escaped_coq lexbuf } | "*)" { (* likely to be a syntax error: we escape *) backtrack lexbuf } | eof { Tokens.flush_sublexer () } | (identifier '.')* identifier { Tokens.flush_sublexer(); Output.ident (lexeme lexbuf) (lexeme_start lexbuf); escaped_coq lexbuf } | space_nl* { let str = lexeme lexbuf in Tokens.flush_sublexer(); (if !Cdglobals.inline_notmono then () else Output.end_inline_coq ()); String.iter Output.char str; (if !Cdglobals.inline_notmono then () else Output.start_inline_coq ()); escaped_coq lexbuf } | _ { Output.sublexer (lexeme_char lexbuf 0) (lexeme_start lexbuf); escaped_coq lexbuf } (*s Coq "Comments" command. *) and comments = parse | space_nl+ { Output.char ' '; comments lexbuf } | '"' [^ '"']* '"' { let s = lexeme lexbuf in let s = String.sub s 1 (String.length s - 2) in ignore (doc None (from_string s)); comments lexbuf } | ([^ '.' '"'] | '.' [^ ' ' '\t' '\n'])+ { escaped_coq (from_string (lexeme lexbuf)); comments lexbuf } | "." (space_nl | eof) { () } | eof { () } | _ { Output.char (lexeme_char lexbuf 0); comments lexbuf } (*s Skip comments *) and comment = parse | "(*" { incr comment_level; if !Cdglobals.parse_comments then Output.start_comment (); comment lexbuf } | "*)" space* nl { if !Cdglobals.parse_comments then (Output.end_comment (); Output.line_break ()); decr comment_level; if !comment_level > 0 then comment lexbuf else true } | "*)" { if !Cdglobals.parse_comments then (Output.end_comment ()); decr comment_level; if !comment_level > 0 then comment lexbuf else false } | "[" { if !Cdglobals.parse_comments then if !Cdglobals.plain_comments then Output.char '[' else (brackets := 1; Output.start_inline_coq (); escaped_coq lexbuf; Output.end_inline_coq ()); comment lexbuf } | "[[" nl { if !Cdglobals.parse_comments then if !Cdglobals.plain_comments then (Output.char '['; Output.char '[') else (formatted := true; Output.start_inline_coq_block (); let _ = body_bol lexbuf in Output.end_inline_coq_block (); formatted := false); comment lexbuf} | "$" { if !Cdglobals.parse_comments then if !Cdglobals.plain_comments then Output.char '$' else (Output.start_latex_math (); escaped_math_latex lexbuf); comment lexbuf } | "$$" { if !Cdglobals.parse_comments then (if !Cdglobals.plain_comments then Output.char '$'; Output.char '$'); doc None lexbuf } | "%" { if !Cdglobals.parse_comments then if !Cdglobals.plain_comments then Output.char '%' else escaped_latex lexbuf; comment lexbuf } | "%%" { if !Cdglobals.parse_comments then (if !Cdglobals.plain_comments then Output.char '%'; Output.char '%'); comment lexbuf } | "#" { if !Cdglobals.parse_comments then if !Cdglobals.plain_comments then Output.char '$' else escaped_html lexbuf; comment lexbuf } | "##" { if !Cdglobals.parse_comments then (if !Cdglobals.plain_comments then Output.char '#'; Output.char '#'); comment lexbuf } | eof { false } | space+ { if !Cdglobals.parse_comments then Output.indentation (fst (count_spaces (lexeme lexbuf))); comment lexbuf } | nl { if !Cdglobals.parse_comments then Output.line_break (); comment lexbuf } | _ { if !Cdglobals.parse_comments then Output.char (lexeme_char lexbuf 0); comment lexbuf } and skip_to_dot = parse | '.' space* nl { true } | eof | '.' space+ { false } | "(*" { comment_level := 1; ignore (comment lexbuf); skip_to_dot lexbuf } | _ { skip_to_dot lexbuf } and body_bol = parse | space+ { Output.indentation (fst (count_spaces (lexeme lexbuf))); body lexbuf } | _ { backtrack lexbuf; Output.indentation 0; body lexbuf } and body = parse | nl {Tokens.flush_sublexer(); Output.line_break(); new_line lexbuf; body_bol lexbuf} | nl+ space* "]]" space* nl { Tokens.flush_sublexer(); if not !formatted then begin let s = lexeme lexbuf in let nlsp,s = remove_newline s in let _,isp = count_spaces s in let loc = lexeme_start lexbuf + nlsp + isp in Output.sublexer ']' loc; Output.sublexer ']' (loc+1); Tokens.flush_sublexer(); body lexbuf end else begin Output.paragraph (); true end } | "]]" space* nl { Tokens.flush_sublexer(); if not !formatted then begin let loc = lexeme_start lexbuf in Output.sublexer ']' loc; Output.sublexer ']' (loc+1); Tokens.flush_sublexer(); Output.line_break(); body lexbuf end else begin Output.paragraph (); true end } | eof { Tokens.flush_sublexer(); false } | '.' space* nl | '.' space* eof { Tokens.flush_sublexer(); Output.char '.'; Output.line_break(); if not !formatted then true else body_bol lexbuf } | '.' space* nl "]]" space* nl { Tokens.flush_sublexer(); Output.char '.'; if not !formatted then begin eprintf "Error: stray ]] at %d\n" (lexeme_start lexbuf); flush stderr; exit 1 end else begin Output.paragraph (); true end } | '.' space+ { Tokens.flush_sublexer(); Output.char '.'; Output.char ' '; if not !formatted then false else body lexbuf } | "(**" space_nl { Tokens.flush_sublexer(); Output.end_coq (); Output.start_doc (); let eol = doc_bol lexbuf in Output.end_doc (); Output.start_coq (); if eol then body_bol lexbuf else body lexbuf } | "(*" { Tokens.flush_sublexer(); comment_level := 1; if !Cdglobals.parse_comments then Output.start_comment (); let eol = comment lexbuf in if eol then begin if not !Cdglobals.parse_comments then Output.line_break(); body_bol lexbuf end else body lexbuf } | "where" { Tokens.flush_sublexer(); Output.ident (lexeme lexbuf) (lexeme_start lexbuf); start_notation_string lexbuf } | identifier { Tokens.flush_sublexer(); Output.ident (lexeme lexbuf) (lexeme_start lexbuf); body lexbuf } | ".." { Tokens.flush_sublexer(); Output.char '.'; Output.char '.'; body lexbuf } | '"' { Tokens.flush_sublexer(); Output.char '"'; string lexbuf; body lexbuf } | space { Tokens.flush_sublexer(); Output.char (lexeme_char lexbuf 0); body lexbuf } | _ { let c = lexeme_char lexbuf 0 in Output.sublexer c (lexeme_start lexbuf); body lexbuf } and start_notation_string = parse | space { Tokens.flush_sublexer(); Output.char (lexeme_char lexbuf 0); start_notation_string lexbuf } | '"' (* a true notation *) { Output.sublexer '"' (lexeme_start lexbuf); notation_string lexbuf; body lexbuf } | _ (* an abbreviation *) { backtrack lexbuf; body lexbuf } and notation_string = parse | "\"\"" { Output.char '"'; Output.char '"'; (* Unlikely! *) notation_string lexbuf } | '"' { Tokens.flush_sublexer(); Output.char '"' } | _ { let c = lexeme_char lexbuf 0 in Output.sublexer c (lexeme_start lexbuf); notation_string lexbuf } and string = parse | "\"\"" { Output.char '"'; Output.char '"'; string lexbuf } | '"' { Output.char '"' } | _ { let c = lexeme_char lexbuf 0 in Output.char c; string lexbuf } and skip_hide = parse | eof | end_hide { () } | _ { skip_hide lexbuf } (*s Reading token pretty-print *) and printing_token_body = parse | "*)" nl? | eof { let s = Buffer.contents token_buffer in Buffer.clear token_buffer; s } | _ { Buffer.add_string token_buffer (lexeme lexbuf); printing_token_body lexbuf } (*s These handle inference rules, parsing the body segments of things enclosed in [[[ ]]] brackets *) and inf_rules indents = parse | space* nl (* blank line, before or between definitions *) { inf_rules indents lexbuf } | "]]]" nl (* end of the inference rules block *) { match indents with | Some ls -> doc_list_bol ls lexbuf | None -> doc_bol lexbuf } | _ { backtrack lexbuf; (* anything else must be the first line in a rule *) inf_rules_assumptions indents [] lexbuf} (* The inference rule parsing just collects the inference rule and then calls the output function once, instead of doing things incrementally like the rest of the lexer. If only there were a real parsing phase... *) and inf_rules_assumptions indents assumptions = parse | space* "---" '-'* [^ '\n']* nl (* hit the horizontal line *) { let line = lexeme lexbuf in let (spaces,_) = count_spaces line in let dashes_and_name = cut_head_tail_spaces (String.sub line 0 (String.length line - 1)) in let ldn = String.length dashes_and_name in let (dashes,name) = try (let i = String.index dashes_and_name ' ' in let d = String.sub dashes_and_name 0 i in let n = cut_head_tail_spaces (String.sub dashes_and_name (i+1) (ldn-i-1)) in (d, Some n)) with _ -> (dashes_and_name, None) in inf_rules_conclusion indents (List.rev assumptions) (spaces, dashes, name) [] lexbuf } | [^ '\n']* nl (* if it's not the horizontal line, it's an assumption *) { let line = lexeme lexbuf in let (spaces,_) = count_spaces line in let assumption = cut_head_tail_spaces (String.sub line 0 (String.length line - 1)) in inf_rules_assumptions indents ((spaces,assumption)::assumptions) lexbuf } (*s The conclusion is required to come immediately after the horizontal bar. It is allowed to contain multiple lines of text, like the assumptions. The conclusion ends when we spot a blank line or a ']]]'. *) and inf_rules_conclusion indents assumptions middle conclusions = parse | space* nl | space* "]]]" nl (* end of conclusions. *) { backtrack lexbuf; Output.inf_rule assumptions middle (List.rev conclusions); inf_rules indents lexbuf } | space* [^ '\n']+ nl (* this is a line in the conclusion *) { let line = lexeme lexbuf in let (spaces,_) = count_spaces line in let conc = cut_head_tail_spaces (String.sub line 0 (String.length line - 1)) in inf_rules_conclusion indents assumptions middle ((spaces,conc) :: conclusions) lexbuf } (*s A small scanner to support the chapter subtitle feature *) and st_start m = parse | "(*" "*"+ space+ "*" space+ { st_modname m lexbuf } | _ { None } and st_modname m = parse | identifier space* ":" space* { if subtitle m (lexeme lexbuf) then st_subtitle lexbuf else None } | _ { None } and st_subtitle = parse | [^ '\n']* '\n' { let st = lexeme lexbuf in let i = try Str.search_forward (Str.regexp "\\**)") st 0 with Not_found -> (eprintf "unterminated comment at beginning of file\n"; exit 1) in Some (cut_head_tail_spaces (String.sub st 0 i)) } | _ { None } (*s Applying the scanners to files *) { let coq_file f m = reset (); let c = open_in f in let lb = from_channel c in (Index.current_library := m; Output.initialize (); Output.start_module (); Output.start_coq (); coq_bol lb; Output.end_coq (); close_in c) let detect_subtitle f m = let c = open_in f in let lb = from_channel c in let sub = st_start m lb in close_in c; sub } coq-8.4pl2/tools/coqdoc/index.ml0000640000175000001440000002721012037140014015672 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "<>" then if id <> "<>" then sp ^ "." ^ id else sp else if id <> "<>" then id else "" let add_def loc1 loc2 ty sp id = let fullid = full_ident sp id in let def = Def (fullid, ty) in for loc = loc1 to loc2 do Hashtbl.add reftable (!current_library, loc) def done; Hashtbl.add deftable !current_library (fullid, ty); Hashtbl.add byidtable id (!current_library, fullid, ty) let add_ref m loc m' sp id ty = let fullid = full_ident sp id in if Hashtbl.mem reftable (m, loc) then () else Hashtbl.add reftable (m, loc) (Ref (m', fullid, ty)); let idx = if id = "<>" then m' else id in if Hashtbl.mem byidtable idx then () else Hashtbl.add byidtable idx (m', fullid, ty) let find m l = Hashtbl.find reftable (m, l) let find_string m s = let (m,s,t) = Hashtbl.find byidtable s in Ref (m,s,t) (*s Manipulating path prefixes *) type stack = string list let rec string_of_stack st = match st with | [] -> "" | x::[] -> x | x::tl -> (string_of_stack tl) ^ "." ^ x let empty_stack = [] let module_stack = ref empty_stack let section_stack = ref empty_stack let init_stack () = module_stack := empty_stack; section_stack := empty_stack let push st p = st := p::!st let pop st = match !st with | [] -> () | _::tl -> st := tl let head st = match st with | [] -> "" | x::_ -> x let begin_module m = push module_stack m let begin_section s = push section_stack s let end_block id = (** determines if it ends a module or a section and pops the stack *) if ((String.compare (head !module_stack) id ) == 0) then pop module_stack else if ((String.compare (head !section_stack) id) == 0) then pop section_stack else () let make_fullid id = (** prepends the current module path to an id *) let path = string_of_stack !module_stack in if String.length path > 0 then path ^ "." ^ id else id (* Coq modules *) let split_sp s = try let i = String.rindex s '.' in String.sub s 0 i, String.sub s (i + 1) (String.length s - i - 1) with Not_found -> "", s let modules = Hashtbl.create 97 let local_modules = Hashtbl.create 97 let add_module m = let _,id = split_sp m in Hashtbl.add modules id m; Hashtbl.add local_modules m () type module_kind = Local | External of string | Unknown let external_libraries = ref [] let add_external_library logicalpath url = external_libraries := (logicalpath,url) :: !external_libraries let find_external_library logicalpath = let rec aux = function | [] -> raise Not_found | (l,u)::rest -> if String.length logicalpath > String.length l & String.sub logicalpath 0 (String.length l + 1) = l ^"." then u else aux rest in aux !external_libraries let init_coqlib_library () = add_external_library "Coq" !coqlib let find_module m = if Hashtbl.mem local_modules m then Local else try External (Filename.concat (find_external_library m) m) with Not_found -> Unknown (* Building indexes *) type 'a index = { idx_name : string; idx_entries : (char * (string * 'a) list) list; idx_size : int } let map f i = { i with idx_entries = List.map (fun (c,l) -> (c, List.map (fun (s,x) -> (s,f s x)) l)) i.idx_entries } let compare_entries (s1,_) (s2,_) = Alpha.compare_string s1 s2 let sort_entries el = let t = Hashtbl.create 97 in List.iter (fun c -> Hashtbl.add t c []) ['A'; 'B'; 'C'; 'D'; 'E'; 'F'; 'G'; 'H'; 'I'; 'J'; 'K'; 'L'; 'M'; 'N'; 'O'; 'P'; 'Q'; 'R'; 'S'; 'T'; 'U'; 'V'; 'W'; 'X'; 'Y'; 'Z'; '_'; '*']; List.iter (fun ((s,_) as e) -> let c = Alpha.norm_char s.[0] in let c,l = try c,Hashtbl.find t c with Not_found -> '*',Hashtbl.find t '*' in Hashtbl.replace t c (e :: l)) el; let res = ref [] in Hashtbl.iter (fun c l -> res := (c, List.sort compare_entries l) :: !res) t; List.sort (fun (c1,_) (c2,_) -> Alpha.compare_char c1 c2) !res let display_letter c = if c = '*' then "other" else String.make 1 c let index_size = List.fold_left (fun s (_,l) -> s + List.length l) 0 let hashtbl_elements h = Hashtbl.fold (fun x y l -> (x,y)::l) h [] let type_name = function | Library -> let ln = !lib_name in if ln <> "" then String.lowercase ln else "library" | Module -> "module" | Definition -> "definition" | Inductive -> "inductive" | Constructor -> "constructor" | Lemma -> "lemma" | Record -> "record" | Projection -> "projection" | Instance -> "instance" | Class -> "class" | Method -> "method" | Variable -> "variable" | Axiom -> "axiom" | TacticDefinition -> "tactic" | Abbreviation -> "abbreviation" | Notation -> "notation" | Section -> "section" let prepare_entry s = function | Notation -> (* We decode the encoding done in Dumpglob.cook_notation of coqtop *) (* Encoded notations have the form section:sc:x_'++'_x where: *) (* - the section, if any, ends with a "." *) (* - the scope can be empty *) (* - tokens are separated with "_" *) (* - non-terminal symbols are conventionally represented by "x" *) (* - terminals are enclosed within simple quotes *) (* - existing simple quotes (that necessarily are parts of *) (* terminals) are doubled *) (* (as a consequence, when a terminal contains "_" or "x", these *) (* necessarily appear enclosed within non-doubled simple quotes) *) (* - non-printable characters < 32 are left encoded so that they *) (* are human-readable in index files *) (* Example: "x ' %x _% y %'x %'_' z" is encoded as *) (* "x_''''_'%x'_'_%'_x_'%''x'_'%''_'''_x" *) let err () = eprintf "Invalid notation in globalization file\n"; exit 1 in let h = try String.index_from s 0 ':' with _ -> err () in let i = try String.index_from s (h+1) ':' with _ -> err () in let sc = String.sub s (h+1) (i-h-1) in let ntn = String.make (String.length s - i) ' ' in let k = ref 0 in let j = ref (i+1) in let quoted = ref false in let l = String.length s - 1 in while !j <= l do if not !quoted then begin (match s.[!j] with | '_' -> ntn.[!k] <- ' '; incr k | 'x' -> ntn.[!k] <- '_'; incr k | '\'' -> quoted := true | _ -> assert false) end else if s.[!j] = '\'' then if (!j = l || s.[!j+1] = '_') then quoted := false else (incr j; ntn.[!k] <- s.[!j]; incr k) else begin ntn.[!k] <- s.[!j]; incr k end; incr j done; let ntn = String.sub ntn 0 !k in if sc = "" then ntn else ntn ^ " (" ^ sc ^ ")" | _ -> s let all_entries () = let gl = ref [] in let add_g s m t = gl := (s,(m,t)) :: !gl in let bt = Hashtbl.create 11 in let add_bt t s m = let l = try Hashtbl.find bt t with Not_found -> [] in Hashtbl.replace bt t ((s,m) :: l) in let classify m (s,t) = (add_g s m t; add_bt t s m) in Hashtbl.iter classify deftable; Hashtbl.iter (fun id m -> add_g id m Library; add_bt Library id m) modules; { idx_name = "global"; idx_entries = sort_entries !gl; idx_size = List.length !gl }, Hashtbl.fold (fun t e l -> (t, { idx_name = type_name t; idx_entries = sort_entries e; idx_size = List.length e }) :: l) bt [] let type_of_string = function | "def" | "coe" | "subclass" | "canonstruc" | "fix" | "cofix" | "ex" | "scheme" -> Definition | "prf" | "thm" -> Lemma | "ind" | "coind" -> Inductive | "constr" -> Constructor | "rec" | "corec" -> Record | "proj" -> Projection | "class" -> Class | "meth" -> Method | "inst" -> Instance | "var" -> Variable | "defax" | "prfax" | "ax" -> Axiom | "syndef" -> Abbreviation | "not" -> Notation | "lib" -> Library | "mod" | "modtype" -> Module | "tac" -> TacticDefinition | "sec" -> Section | s -> raise (Invalid_argument ("type_of_string:" ^ s)) let ill_formed_glob_file f = eprintf "Warning: ill-formed file %s (links will not be available)\n" f let outdated_glob_file f = eprintf "Warning: %s not consistent with corresponding .v file (links will not be available)\n" f let correct_file vfile f c = let s = input_line c in if String.length s < 7 || String.sub s 0 7 <> "DIGEST " then (ill_formed_glob_file f; false) else let s = String.sub s 7 (String.length s - 7) in match vfile, s with | None, "NO" -> true | Some _, "NO" -> ill_formed_glob_file f; false | None, _ -> ill_formed_glob_file f; false | Some vfile, s -> s = Digest.to_hex (Digest.file vfile) || (outdated_glob_file f; false) let read_glob vfile f = let c = open_in f in if correct_file vfile f c then let cur_mod = ref "" in try while true do let s = input_line c in let n = String.length s in if n > 0 then begin match s.[0] with | 'F' -> cur_mod := String.sub s 1 (n - 1); current_library := !cur_mod | 'R' -> (try Scanf.sscanf s "R%d:%d %s %s %s %s" (fun loc1 loc2 lib_dp sp id ty -> for loc=loc1 to loc2 do add_ref !cur_mod loc lib_dp sp id (type_of_string ty); (* Also add an entry for each module mentioned in [lib_dp], * to use in interpolation. *) ignore (List.fold_right (fun thisPiece priorPieces -> let newPieces = match priorPieces with | "" -> thisPiece | _ -> thisPiece ^ "." ^ priorPieces in add_ref !cur_mod loc "" "" newPieces Library; newPieces) (Str.split (Str.regexp_string ".") lib_dp) "") done) with _ -> ()) | _ -> try Scanf.sscanf s "not %d %s %s" (fun loc sp id -> add_def loc loc (type_of_string "not") sp id) with Scanf.Scan_failure _ -> try Scanf.sscanf s "%s %d:%d %s %s" (fun ty loc1 loc2 sp id -> add_def loc1 loc2 (type_of_string ty) sp id) with Scanf.Scan_failure _ -> () end done; assert false with End_of_file -> close_in c coq-8.4pl2/tools/coq_tex.ml0000640000175000001440000002322012043513320014753 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* begin close_in chan_in; close_out chan_out end (* Second pass: insert the answers of Coq from [coq_output] into the * TeX file [texfile]. The result goes in file [result]. *) let begin_coq_example = Str.regexp "\\\\begin{coq_\\(example\\|example\\*\\|example\\#\\)}[ \t]*$" let begin_coq_eval = Str.regexp "\\\\begin{coq_eval}[ \t]*$" let end_coq_example = Str.regexp "\\\\end{coq_\\(example\\|example\\*\\|example\\#\\)}[ \t]*$" let end_coq_eval = Str.regexp "\\\\end{coq_eval}[ \t]*$" let dot_end_line = Str.regexp "\\.[ \t]*\\((\\*.*\\*)\\)?[ \t]*$" let has_match r s = try let _ = Str.search_forward r s 0 in true with Not_found -> false let percent = Str.regexp "%" let bang = Str.regexp "!" let expos = Str.regexp "^" let tex_escaped s = let dollar = "\\$" and backslash = "\\\\" and expon = "\\^" in let delims = Str.regexp ("[_{}&%#" ^ dollar ^ backslash ^ expon ^"~ <>]") in let adapt_delim = function | "_" | "{" | "}" | "&" | "%" | "#" | "$" as c -> "\\"^c | "\\" -> "{\\char'134}" | "^" -> "{\\char'136}" | "~" -> "{\\char'176}" | " " -> "~" | "<" -> "{<}" | ">" -> "{>}" | _ -> assert false in let adapt = function | Str.Text s -> s | Str.Delim s -> adapt_delim s in String.concat "" (List.map adapt (Str.full_split delims s)) let encapsule sl c_out s = if sl then Printf.fprintf c_out "\\texttt{\\textit{%s}}\\\\\n" (tex_escaped s) else Printf.fprintf c_out "\\texttt{%s}\\\\\n" (tex_escaped s) let print_block c_out bl = List.iter (fun s -> if s="" then () else encapsule !slanted c_out s) bl let insert texfile coq_output result = let c_tex = open_in texfile in let c_coq = open_in coq_output in let c_out = open_out result in (* next_block k : this function reads the next block of Coq output * removing the k leading prompts. * it returns the block as a list of string) *) let last_read = ref "" in let next_block k = if !last_read = "" then last_read := input_line c_coq; (* skip k prompts *) for i = 1 to k do last_read := remove_prompt !last_read; done; (* read and return the following lines until a prompt is found *) let rec read_lines () = let s = input_line c_coq in if Str.string_match any_prompt s 0 then begin last_read := s; [] end else s :: (read_lines ()) in let first = !last_read in first :: (read_lines ()) in (* we are just after \end{coq_...} block *) let rec just_after () = let s = input_line c_tex in if Str.string_match begin_coq_example s 0 then begin inside (Str.matched_group 1 s <> "example*") (Str.matched_group 1 s <> "example#") 0 false end else begin if !hrule then output_string c_out "\\hrulefill\\\\\n"; output_string c_out "\\end{flushleft}\n"; if !small then output_string c_out "\\end{small}\n"; if Str.string_match begin_coq_eval s 0 then eval 0 else begin output_string c_out (s ^ "\n"); outside () end end (* we are outside of a \begin{coq_...} ... \end{coq_...} block *) and outside () = let s = input_line c_tex in if Str.string_match begin_coq_example s 0 then begin if !small then output_string c_out "\\begin{small}\n"; output_string c_out "\\begin{flushleft}\n"; if !hrule then output_string c_out "\\hrulefill\\\\\n"; inside (Str.matched_group 1 s <> "example*") (Str.matched_group 1 s <> "example#") 0 true end else if Str.string_match begin_coq_eval s 0 then eval 0 else begin output_string c_out (s ^ "\n"); outside () end (* we are inside a \begin{coq_example?} ... \end{coq_example?} block * show_answers tells what kind of block it is * k is the number of lines read until now *) and inside show_answers show_questions k first_block = let s = input_line c_tex in if Str.string_match end_coq_example s 0 then begin just_after () end else begin if !verbose then Printf.printf "Coq < %s\n" s; if (not first_block) & k=0 then output_string c_out "\\medskip\n"; if show_questions then encapsule false c_out ("Coq < " ^ s); if has_match dot_end_line s then begin let bl = next_block (succ k) in if !verbose then List.iter print_endline bl; if show_answers then print_block c_out bl; inside show_answers show_questions 0 false end else inside show_answers show_questions (succ k) first_block end (* we are inside a \begin{coq_eval} ... \end{coq_eval} block * k is the number of lines read until now *) and eval k = let s = input_line c_tex in if Str.string_match end_coq_eval s 0 then outside () else begin if !verbose then Printf.printf "Coq < %s\n" s; if has_match dot_end_line s then let bl = next_block (succ k) in if !verbose then List.iter print_endline bl; eval 0 else eval (succ k) end in try let _ = next_block 0 in (* to skip the Coq banner *) let _ = next_block 0 in (* to skip the Coq answer to Set Printing Width *) outside () with End_of_file -> begin close_in c_tex; close_in c_coq; close_out c_out end (* Process of one TeX file *) let rm f = try Sys.remove f with _ -> () let one_file texfile = let inputv = Filename.temp_file "coq_tex" ".v" in let coq_output = Filename.temp_file "coq_tex" ".coq_output"in let result = if !output_specified then !output else if Filename.check_suffix texfile ".tex" then (Filename.chop_suffix texfile ".tex") ^ ".v.tex" else texfile ^ ".v.tex" in try (* 1. extract Coq phrases *) extract texfile inputv; (* 2. run Coq on input *) let _ = Sys.command (Printf.sprintf "%s < %s > %s 2>&1" !image inputv coq_output) in (* 3. insert Coq output into original file *) insert texfile coq_output result; (* 4. clean up *) rm inputv; rm coq_output with e -> begin rm inputv; rm coq_output; raise e end (* Parsing of the command line, check of the Coq command and process * of all the files in the command line, one by one *) let files = ref [] let parse_cl () = Arg.parse [ "-o", Arg.String (fun s -> output_specified := true; output := s), "output-file Specifiy the resulting LaTeX file"; "-n", Arg.Int (fun n -> linelen := n), "line-width Set the line width"; "-image", Arg.String (fun s -> image := s), "coq-image Use coq-image as Coq command"; "-w", Arg.Set cut_at_blanks, " Try to cut lines at blanks"; "-v", Arg.Set verbose, " Verbose mode (show Coq answers on stdout)"; "-sl", Arg.Set slanted, " Coq answers in slanted font (only with LaTeX2e)"; "-hrule", Arg.Set hrule, " Coq parts are written between 2 horizontal lines"; "-small", Arg.Set small, " Coq parts are written in small font"; "-boot", Arg.Set boot, " Launch coqtop with the -boot option" ] (fun s -> files := s :: !files) "coq-tex [options] file ..." let find_coqtop () = let prog = Sys.executable_name in try let size = String.length prog in let i = Str.search_backward (Str.regexp_string "coq-tex") prog (size-7) in (String.sub prog 0 i)^"coqtop"^(String.sub prog (i+7) (size-i-7)) with Not_found -> begin Printf.printf "Warning: preprocessing with default image \"coqtop\"\n"; "coqtop" end let main () = parse_cl (); if !image = "" then image := Filename.quote (find_coqtop ()); if !boot then image := !image ^ " -boot"; if Sys.command (!image ^ " -batch -silent") <> 0 then begin Printf.printf "Error: "; let _ = Sys.command (!image ^ " -batch") in exit 1 end else begin Printf.printf "Your version of coqtop seems OK\n"; flush stdout end; List.iter one_file (List.rev !files) let _ = Printexc.catch main () coq-8.4pl2/tools/gallina.ml0000640000175000001440000000350512010532755014733 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* begin flush !chan_out; close_in chan_in; if not !option_stdout then close_out !chan_out end with Sys_error _ -> () let traite_stdin () = try let buf = Lexing.from_channel stdin in try while true do Gallina_lexer.action buf done with Fin_fichier -> flush !chan_out with Sys_error _ -> () let gallina () = let lg_command = Array.length Sys.argv in if lg_command < 2 then begin output_string stderr "Usage: gallina [-] [-stdout] file1 file2 ...\n"; flush stderr; exit 1 end; let treat = function | "-" -> option_moins := true | "-stdout" -> option_stdout := true | "-nocomments" -> comments := false | f -> if Filename.check_suffix f ".v" then vfiles := (Filename.chop_suffix f ".v") :: !vfiles in Array.iter treat Sys.argv; if !option_moins then traite_stdin () else List.iter traite_fichier !vfiles let _ = Printexc.catch gallina () coq-8.4pl2/tools/compat5.ml0000640000175000001440000000125512010532755014674 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* for coq-inferior ; compatibility code for proofgeneral files (require 'coq-font-lock) ; ProofGeneral files. remember to remove coq version tests in ; coq-syntax.el (require 'coq-syntax) (defvar coq-mode-map nil "Keymap used in Coq mode.") (if coq-mode-map () (setq coq-mode-map (make-sparse-keymap)) (define-key coq-mode-map "\t" 'coq-indent-command) (define-key coq-mode-map "\M-\t" 'coq-unindent-command) (define-key coq-mode-map "\C-c\C-c" 'compile) ) (defvar coq-mode-syntax-table nil "Syntax table in use in Coq mode buffers.") (if coq-mode-syntax-table () (setq coq-mode-syntax-table (make-syntax-table)) ; ( is first character of comment start (modify-syntax-entry ?\( "()1" coq-mode-syntax-table) ; * is second character of comment start, ; and first character of comment end (modify-syntax-entry ?* ". 23" coq-mode-syntax-table) ; ) is last character of comment end (modify-syntax-entry ?\) ")(4" coq-mode-syntax-table) ; quote is a string-like delimiter (for character literals) (modify-syntax-entry ?' "\"" coq-mode-syntax-table) ; quote is part of words (modify-syntax-entry ?' "w" coq-mode-syntax-table) ) (defvar coq-mode-indentation 2 "*Indentation for each extra tab in Coq mode.") (defun coq-mode-variables () (set-syntax-table coq-mode-syntax-table) (make-local-variable 'paragraph-start) (setq paragraph-start (concat "^$\\|" page-delimiter)) (make-local-variable 'paragraph-separate) (setq paragraph-separate paragraph-start) (make-local-variable 'paragraph-ignore-fill-prefix) (setq paragraph-ignore-fill-prefix t) (make-local-variable 'require-final-newline) (setq require-final-newline t) (make-local-variable 'comment-start) (setq comment-start "(* ") (make-local-variable 'comment-end) (setq comment-end " *)") (make-local-variable 'comment-column) (setq comment-column 40) (make-local-variable 'comment-start-skip) (setq comment-start-skip "(\\*+ *") (make-local-variable 'parse-sexp-ignore-comments) (setq parse-sexp-ignore-comments nil) (make-local-variable 'indent-line-function) (setq indent-line-function 'coq-indent-command) (make-local-variable 'font-lock-keywords) (setq font-lock-defaults '(coq-font-lock-keywords-1))) ;;; The major mode (defun coq-mode () "Major mode for editing Coq code. Tab at the beginning of a line indents this line like the line above. Extra tabs increase the indentation level. \\{coq-mode-map} The variable coq-mode-indentation indicates how many spaces are inserted for each indentation level." (interactive) (kill-all-local-variables) (setq major-mode 'coq-mode) (setq mode-name "coq") (use-local-map coq-mode-map) (coq-mode-variables) (run-hooks 'coq-mode-hook)) ;;; Indentation stuff (defun coq-in-indentation () "Tests whether all characters between beginning of line and point are blanks." (save-excursion (skip-chars-backward " \t") (bolp))) (defun coq-indent-command () "Indent the current line in Coq mode. When the point is at the beginning of an empty line, indent this line like the line above. When the point is at the beginning of an indented line \(i.e. all characters between beginning of line and point are blanks\), increase the indentation by one level. The indentation size is given by the variable coq-mode-indentation. In all other cases, insert a tabulation (using insert-tab)." (interactive) (let* ((begline (save-excursion (beginning-of-line) (point))) (current-offset (- (point) begline)) (previous-indentation (save-excursion (if (eq (forward-line -1) 0) (current-indentation) 0)))) (cond ((and (bolp) (looking-at "[ \t]*$") (> previous-indentation 0)) (indent-to previous-indentation)) ((coq-in-indentation) (indent-to (+ current-offset coq-mode-indentation))) (t (insert-tab))))) (defun coq-unindent-command () "Decrease indentation by one level in Coq mode. Works only if the point is at the beginning of an indented line \(i.e. all characters between beginning of line and point are blanks\). Does nothing otherwise." (interactive) (let* ((begline (save-excursion (beginning-of-line) (point))) (current-offset (- (point) begline))) (if (and (>= current-offset coq-mode-indentation) (coq-in-indentation)) (backward-delete-char-untabify coq-mode-indentation)))) ;;; coq.el ends here (provide 'coq) coq-8.4pl2/tools/compat5b.ml0000640000175000001440000000125612010532755015037 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* coq_token val caml_action : Lexing.lexbuf -> mL_token val mllib_list : Lexing.lexbuf -> string list val ocamldep_parse : Lexing.lexbuf -> string list coq-8.4pl2/tools/coq-inferior.el0000640000175000001440000002652607456544706015742 0ustar notinusers;;; inferior-coq.el --- Run an inferior Coq process. ;;; ;;; Copyright (C) Marco Maggesi ;;; Time-stamp: "2002-02-28 12:15:04 maggesi" ;; Emacs Lisp Archive Entry ;; Filename: inferior-coq.el ;; Version: 1.0 ;; Keywords: process coq ;; Author: Marco Maggesi ;; Maintainer: Marco Maggesi ;; Description: Run an inferior Coq process. ;; URL: http://www.math.unifi.it/~maggesi/ ;; Compatibility: Emacs20, Emacs21, XEmacs21 ;; This is free software; you can redistribute it and/or modify it under ;; the terms of the GNU General Public License as published by the Free ;; Software Foundation; either version 2, or (at your option) any later ;; version. ;; ;; This is distributed in the hope that it will be useful, but WITHOUT ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ;; for more details. ;; ;; You should have received a copy of the GNU General Public License ;; along with GNU Emacs; see the file COPYING. If not, write to the ;; Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ;; MA 02111-1307, USA. ;;; Commentary: ;; Coq is a proof assistant (http://coq.inria.fr/). This code run an ;; inferior Coq process and defines functions to send bits of code ;; from other buffers to the inferior process. This is a ;; customisation of comint-mode (see comint.el). For a more complex ;; and full featured Coq interface under Emacs look at Proof General ;; (http://zermelo.dcs.ed.ac.uk/~proofgen/). ;; ;; Written by Marco Maggesi with code heavly ;; borrowed from emacs cmuscheme.el ;; ;; Please send me bug reports, bug fixes, and extensions, so that I can ;; merge them into the master source. ;;; Installation: ;; You need to have coq.el already installed (it comes with the ;; standard Coq distribution) in order to use this code. Put this ;; file somewhere in you load-path and add the following lines in your ;; "~/.emacs": ;; ;; (setq auto-mode-alist (cons '("\\.v$" . coq-mode) auto-mode-alist)) ;; (autoload 'coq-mode "coq" "Major mode for editing Coq vernacular." t) ;; (autoload 'run-coq "inferior-coq" "Run an inferior Coq process." t) ;; (autoload 'run-coq-other-window "inferior-coq" ;; "Run an inferior Coq process in a new window." t) ;; (autoload 'run-coq-other-frame "inferior-coq" ;; "Run an inferior Coq process in a new frame." t) ;;; Usage: ;; Call `M-x "run-coq'. ;; ;; Functions and key bindings (Learn more keys with `C-c C-h' or `C-h m'): ;; C-return ('M-x coq-send-line) send the current line. ;; C-c C-r (`M-x coq-send-region') send the current region. ;; C-c C-a (`M-x coq-send-abort') send the command "Abort". ;; C-c C-t (`M-x coq-send-restart') send the command "Restart". ;; C-c C-s (`M-x coq-send-show') send the command "Show". ;; C-c C-u (`M-x coq-send-undo') send the command "Undo". ;; C-c C-v (`M-x coq-check-region') run command "Check" on region. ;; C-c . (`M-x coq-come-here') Restart and send until current point. ;;; Change Log: ;; From -0.0 to 1.0 brought into existence. (require 'coq) (require 'comint) (setq coq-program-name "coqtop") (defgroup inferior-coq nil "Run a coq process in a buffer." :group 'coq) (defcustom inferior-coq-mode-hook nil "*Hook for customising inferior-coq mode." :type 'hook :group 'coq) (defvar inferior-coq-mode-map (let ((m (make-sparse-keymap))) (define-key m "\C-c\C-r" 'coq-send-region) (define-key m "\C-c\C-a" 'coq-send-abort) (define-key m "\C-c\C-t" 'coq-send-restart) (define-key m "\C-c\C-s" 'coq-send-show) (define-key m "\C-c\C-u" 'coq-send-undo) (define-key m "\C-c\C-v" 'coq-check-region) m)) ;; Install the process communication commands in the coq-mode keymap. (define-key coq-mode-map [(control return)] 'coq-send-line) (define-key coq-mode-map "\C-c\C-r" 'coq-send-region) (define-key coq-mode-map "\C-c\C-a" 'coq-send-abort) (define-key coq-mode-map "\C-c\C-t" 'coq-send-restart) (define-key coq-mode-map "\C-c\C-s" 'coq-send-show) (define-key coq-mode-map "\C-c\C-u" 'coq-send-undo) (define-key coq-mode-map "\C-c\C-v" 'coq-check-region) (define-key coq-mode-map "\C-c." 'coq-come-here) (defvar coq-buffer) (define-derived-mode inferior-coq-mode comint-mode "Inferior Coq" "\ Major mode for interacting with an inferior Coq process. The following commands are available: \\{inferior-coq-mode-map} A Coq process can be fired up with M-x run-coq. Customisation: Entry to this mode runs the hooks on comint-mode-hook and inferior-coq-mode-hook (in that order). You can send text to the inferior Coq process from other buffers containing Coq source. Functions and key bindings (Learn more keys with `C-c C-h'): C-return ('M-x coq-send-line) send the current line. C-c C-r (`M-x coq-send-region') send the current region. C-c C-a (`M-x coq-send-abort') send the command \"Abort\". C-c C-t (`M-x coq-send-restart') send the command \"Restart\". C-c C-s (`M-x coq-send-show') send the command \"Show\". C-c C-u (`M-x coq-send-undo') send the command \"Undo\". C-c C-v (`M-x coq-check-region') run command \"Check\" on region. C-c . (`M-x coq-come-here') Restart and send until current point. " ;; Customise in inferior-coq-mode-hook (setq comint-prompt-regexp "^[^<]* < *") (coq-mode-variables) (setq mode-line-process '(":%s")) (setq comint-input-filter (function coq-input-filter)) (setq comint-get-old-input (function coq-get-old-input))) (defcustom inferior-coq-filter-regexp "\\`\\s *\\S ?\\S ?\\s *\\'" "*Input matching this regexp are not saved on the history list. Defaults to a regexp ignoring all inputs of 0, 1, or 2 letters." :type 'regexp :group 'inferior-coq) (defun coq-input-filter (str) "Don't save anything matching `inferior-coq-filter-regexp'." (not (string-match inferior-coq-filter-regexp str))) (defun coq-get-old-input () "Snarf the sexp ending at point." (save-excursion (let ((end (point))) (backward-sexp) (buffer-substring (point) end)))) (defun coq-args-to-list (string) (let ((where (string-match "[ \t]" string))) (cond ((null where) (list string)) ((not (= where 0)) (cons (substring string 0 where) (coq-args-to-list (substring string (+ 1 where) (length string))))) (t (let ((pos (string-match "[^ \t]" string))) (if (null pos) nil (coq-args-to-list (substring string pos (length string))))))))) ;;;###autoload (defun run-coq (cmd) "Run an inferior Coq process, input and output via buffer *coq*. If there is a process already running in `*coq*', switch to that buffer. With argument, allows you to edit the command line (default is value of `coq-program-name'). Runs the hooks `inferior-coq-mode-hook' \(after the `comint-mode-hook' is run). \(Type \\[describe-mode] in the process buffer for a list of commands.)" (interactive (list (if current-prefix-arg (read-string "Run Coq: " coq-program-name) coq-program-name))) (if (not (comint-check-proc "*coq*")) (let ((cmdlist (coq-args-to-list cmd))) (set-buffer (apply 'make-comint "coq" (car cmdlist) nil (cdr cmdlist))) (inferior-coq-mode))) (setq coq-program-name cmd) (setq coq-buffer "*coq*") (switch-to-buffer "*coq*")) ;;;###autoload (add-hook 'same-window-buffer-names "*coq*") ;;;###autoload (defun run-coq-other-window (cmd) "Run an inferior Coq process, input and output via buffer *coq*. If there is a process already running in `*coq*', switch to that buffer. With argument, allows you to edit the command line (default is value of `coq-program-name'). Runs the hooks `inferior-coq-mode-hook' \(after the `comint-mode-hook' is run). \(Type \\[describe-mode] in the process buffer for a list of commands.)" (interactive (list (if current-prefix-arg (read-string "Run Coq: " coq-program-name) coq-program-name))) (if (not (comint-check-proc "*coq*")) (let ((cmdlist (coq-args-to-list cmd))) (set-buffer (apply 'make-comint "coq" (car cmdlist) nil (cdr cmdlist))) (inferior-coq-mode))) (setq coq-program-name cmd) (setq coq-buffer "*coq*") (pop-to-buffer "*coq*")) ;;;###autoload (add-hook 'same-window-buffer-names "*coq*") (defun run-coq-other-frame (cmd) "Run an inferior Coq process, input and output via buffer *coq*. If there is a process already running in `*coq*', switch to that buffer. With argument, allows you to edit the command line (default is value of `coq-program-name'). Runs the hooks `inferior-coq-mode-hook' \(after the `comint-mode-hook' is run). \(Type \\[describe-mode] in the process buffer for a list of commands.)" (interactive (list (if current-prefix-arg (read-string "Run Coq: " coq-program-name) coq-program-name))) (if (not (comint-check-proc "*coq*")) (let ((cmdlist (coq-args-to-list cmd))) (set-buffer (apply 'make-comint "coq" (car cmdlist) nil (cdr cmdlist))) (inferior-coq-mode))) (setq coq-program-name cmd) (setq coq-buffer "*coq*") (switch-to-buffer-other-frame "*coq*")) (defun switch-to-coq (eob-p) "Switch to the coq process buffer. With argument, position cursor at end of buffer." (interactive "P") (if (get-buffer coq-buffer) (pop-to-buffer coq-buffer) (error "No current process buffer. See variable `coq-buffer'")) (cond (eob-p (push-mark) (goto-char (point-max))))) (defun coq-send-region (start end) "Send the current region to the inferior Coq process." (interactive "r") (comint-send-region (coq-proc) start end) (comint-send-string (coq-proc) "\n")) (defun coq-send-line () "Send the current line to the Coq process." (interactive) (save-excursion (end-of-line) (let ((end (point))) (beginning-of-line) (coq-send-region (point) end))) (next-line 1)) (defun coq-send-abort () "Send the command \"Abort.\" to the inferior Coq process." (interactive) (comint-send-string (coq-proc) "Abort.\n")) (defun coq-send-restart () "Send the command \"Restart.\" to the inferior Coq process." (interactive) (comint-send-string (coq-proc) "Restart.\n")) (defun coq-send-undo () "Reset coq to the initial state and send the region between the beginning of file and the point." (interactive) (comint-send-string (coq-proc) "Undo.\n")) (defun coq-check-region (start end) "Run the commmand \"Check\" on the current region." (interactive "r") (comint-proc-query (coq-proc) (concat "Check " (buffer-substring start end) ".\n"))) (defun coq-send-show () "Send the command \"Show.\" to the inferior Coq process." (interactive) (comint-send-string (coq-proc) "Show.\n")) (defun coq-come-here () "Reset coq to the initial state and send the region between the beginning of file and the point." (interactive) (comint-send-string (coq-proc) "Reset Initial.\n") (coq-send-region 1 (point))) (defvar coq-buffer nil "*The current coq process buffer.") (defun coq-proc () "Return the current coq process. See variable `coq-buffer'." (let ((proc (get-buffer-process (if (eq major-mode 'inferior-coq-mode) (current-buffer) coq-buffer)))) (or proc (error "No current process. See variable `coq-buffer'")))) (defcustom inferior-coq-load-hook nil "This hook is run when inferior-coq is loaded in. This is a good place to put keybindings." :type 'hook :group 'inferior-coq) (run-hooks 'inferior-coq-load-hook) (provide 'inferior-coq) coq-8.4pl2/tools/coqdep_lexer.mll0000640000175000001440000002302612010532755016152 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* " | "." | ".." | ".(" | ".[" | ":" | "::" | ":=" | ";" | ";;" | "<-" | "=" | "[" | "[|" | "[<" | "]" | "_" | "{" | "|" | "||" | "|]" | ">]" | "}" | "!=" | "-" | "-." { caml_action lexbuf } | ['!' '?' '~'] ['!' '$' '%' '&' '*' '+' '-' '.' '/' ':' '<' '=' '>' '?' '@' '^' '|' '~'] * { caml_action lexbuf } | ['=' '<' '>' '@' '^' '|' '&' '$'] ['!' '$' '%' '&' '*' '+' '-' '.' '/' ':' '<' '=' '>' '?' '@' '^' '|' '~'] * { caml_action lexbuf } | ['+' '-'] ['!' '$' '%' '&' '*' '+' '-' '.' '/' ':' '<' '=' '>' '?' '@' '^' '|' '~'] * { caml_action lexbuf } | "**" ['!' '$' '%' '&' '*' '+' '-' '.' '/' ':' '<' '=' '>' '?' '@' '^' '|' '~'] * { caml_action lexbuf } | ['*' '/' '%'] ['!' '$' '%' '&' '*' '+' '-' '.' '/' ':' '<' '=' '>' '?' '@' '^' '|' '~'] * { caml_action lexbuf } | eof { raise Fin_fichier } | _ { caml_action lexbuf } and comment = parse | "(*" (* "*)" *) { comment_depth := succ !comment_depth; comment lexbuf } | "*)" { comment_depth := pred !comment_depth; if !comment_depth > 0 then comment lexbuf } | "'" [^ '\\' '\''] "'" { comment lexbuf } | "'" '\\' ['\\' '\'' 'n' 't' 'b' 'r'] "'" { comment lexbuf } | "'" '\\' ['0'-'9'] ['0'-'9'] ['0'-'9'] "'" { comment lexbuf } | eof { raise Fin_fichier } | _ { comment lexbuf } and string = parse | '"' (* '"' *) { () } | '\\' ("\010" | "\013" | "\010\013") [' ' '\009'] * { string lexbuf } | '\\' ['\\' '"' 'n' 't' 'b' 'r'] (*'"'*) { string lexbuf } | '\\' ['0'-'9'] ['0'-'9'] ['0'-'9'] { string lexbuf } | eof { raise Fin_fichier } | _ { string lexbuf } and load_file = parse | '"' [^ '"']* '"' (*'"'*) { let s = lexeme lexbuf in parse_dot lexbuf; Load (unquote_vfile_string s) } | coq_ident { let s = lexeme lexbuf in skip_to_dot lexbuf; Load s } | eof { syntax_error lexbuf } | _ { syntax_error lexbuf } and require_file = parse | "(*" { comment_depth := 1; comment lexbuf; require_file lexbuf } | space+ { require_file lexbuf } | coq_ident { module_current_name := [Lexing.lexeme lexbuf]; module_names := [coq_qual_id_tail lexbuf]; let qid = coq_qual_id_list lexbuf in parse_dot lexbuf; Require qid } | '"' [^'"']* '"' (*'"'*) { let s = Lexing.lexeme lexbuf in parse_dot lexbuf; RequireString (unquote_vfile_string s) } | eof { syntax_error lexbuf } | _ { syntax_error lexbuf } and skip_to_dot = parse | dot { () } | eof { syntax_error lexbuf } | _ { skip_to_dot lexbuf } and parse_dot = parse | dot { () } | eof { syntax_error lexbuf } | _ { syntax_error lexbuf } and coq_qual_id = parse | "(*" { comment_depth := 1; comment lexbuf; coq_qual_id lexbuf } | space+ { coq_qual_id lexbuf } | coq_ident { module_current_name := [Lexing.lexeme lexbuf]; coq_qual_id_tail lexbuf } | eof { syntax_error lexbuf } | _ { backtrack lexbuf; let qid = List.rev !module_current_name in module_current_name := []; qid } and coq_qual_id_tail = parse | "(*" { comment_depth := 1; comment lexbuf; coq_qual_id_tail lexbuf } | space+ { coq_qual_id_tail lexbuf } | coq_field { module_current_name := field_name (Lexing.lexeme lexbuf) :: !module_current_name; coq_qual_id_tail lexbuf } | eof { syntax_error lexbuf } | _ { backtrack lexbuf; let qid = List.rev !module_current_name in module_current_name := []; qid } and coq_qual_id_list = parse | "(*" { comment_depth := 1; comment lexbuf; coq_qual_id_list lexbuf } | space+ { coq_qual_id_list lexbuf } | coq_ident { module_current_name := [Lexing.lexeme lexbuf]; module_names := coq_qual_id_tail lexbuf :: !module_names; coq_qual_id_list lexbuf } | eof { syntax_error lexbuf } | _ { backtrack lexbuf; List.rev !module_names } and modules = parse | space+ { modules lexbuf } | "(*" { comment_depth := 1; comment lexbuf; modules lexbuf } | '"' [^'"']* '"' { let lex = (Lexing.lexeme lexbuf) in let str = String.sub lex 1 (String.length lex - 2) in mllist := str :: !mllist; modules lexbuf} | eof { syntax_error lexbuf } | _ { (Declare (List.rev !mllist)) } and qual_id = parse | '.' [^ '.' '(' '['] { Use_module (String.uncapitalize !ml_module_name) } | eof { raise Fin_fichier } | _ { caml_action lexbuf } and mllib_list = parse | caml_up_ident { let s = String.uncapitalize (Lexing.lexeme lexbuf) in s :: mllib_list lexbuf } | "*predef*" { mllib_list lexbuf } | space+ { mllib_list lexbuf } | eof { [] } | _ { syntax_error lexbuf } and ocamldep_parse = parse | [^ ':' ]* ':' { mllib_list lexbuf } coq-8.4pl2/tools/escape_string.ml0000640000175000001440000000005311757162655016164 0ustar notinusersprint_string (String.escaped Sys.argv.(1)) coq-8.4pl2/tools/mkwinapp.ml0000640000175000001440000000571011554053543015157 0ustar notinusers(* OCaml-Win32 * mkwinapp.ml * Copyright (c) 2002-2004 by Harry Chomsky * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *) (********************************************************************* * This program alters an .exe file to make it use the "windows subsystem" * instead of the "console subsystem". In other words, when Windows runs * the program, it will not create a console for it. *) (* Pierre Letouzey 23/12/2010 : modification to allow selecting the subsystem to use instead of just setting the windows subsystem *) (* This tool can be run directly via : ocaml unix.cma mkwinapp.ml [-set|-unset] *) exception Invalid_file_format let input_word ic = let lo = input_byte ic in let hi = input_byte ic in (hi lsl 8) + lo let find_pe_header ic = seek_in ic 0x3C; let peheader = input_word ic in seek_in ic peheader; if input_char ic <> 'P' then raise Invalid_file_format; if input_char ic <> 'E' then raise Invalid_file_format; peheader let find_optional_header ic = let peheader = find_pe_header ic in let coffheader = peheader + 4 in seek_in ic (coffheader + 16); let optsize = input_word ic in if optsize < 96 then raise Invalid_file_format; let optheader = coffheader + 20 in seek_in ic optheader; let magic = input_word ic in if magic <> 0x010B && magic <> 0x020B then raise Invalid_file_format; optheader let change flag ic oc = let optheader = find_optional_header ic in seek_out oc (optheader + 64); for i = 1 to 4 do output_byte oc 0 done; output_byte oc (if flag then 2 else 3) let usage () = print_endline "Alters a Win32 executable file to use the Windows subsystem or not."; print_endline "Usage: mkwinapp [-set|-unset] "; print_endline "Giving no option is equivalent to -set"; exit 1 let main () = let n = Array.length Sys.argv - 1 in if not (n = 1 || n = 2) then usage (); let flag = if n = 1 then true else if Sys.argv.(1) = "-set" then true else if Sys.argv.(1) = "-unset" then false else usage () in let filename = Sys.argv.(n) in let f = Unix.openfile filename [Unix.O_RDWR] 0 in let ic = Unix.in_channel_of_descr f and oc = Unix.out_channel_of_descr f in change flag ic oc let _ = main () coq-8.4pl2/tools/check-translate0000750000175000001440000000171710423774771016006 0ustar notinusers#!/bin/sh echo -------------- Producing translated files --------------------- rm */*/*.v8 >& /dev/null make COQ_XML=-translate theories || { echo ---- Failed to translate; exit 1; } if [ -e translated ]; then rm -r translated; fi if [ -e successful-translation ]; then rm -r successful-translation; fi if [ -e failed-translation ]; then rm -r failed-translation; fi mv theories translated mkdir theories echo -------------------- Upgrading files -------------------------- cd translated for i in */*.v do mkdir ../theories/`dirname $i` >& /dev/null mv "$i"8 ../theories/$i done cd .. echo --------------- Recompiling translated files ------------------ make theories || { echo ---- Failed to recompile; mv theories failed-translation; mv translated theories; exit 1; } echo ----------------- Recompilation successful -------------------- if [ -e successful-translation ]; then rm -r successful-translation; fi mv theories successful-translation; mv translated theories coq-8.4pl2/tools/coq-font-lock.el0000640000175000001440000001144611207534415015775 0ustar notinusers;; coq-font-lock.el --- Coq syntax highlighting for Emacs - compatibilty code ;; Pierre Courtieu, may 2009 ;; ;; Authors: Pierre Courtieu ;; License: GPL (GNU GENERAL PUBLIC LICENSE) ;; Maintainer: Pierre Courtieu ;; This is copy paste from ProofGeneral by David Aspinall ;; . ProofGeneral is under GPL and Copyright ;; (C) LFCS Edinburgh. ;;; Commentary: ;; This file contains the code necessary to coq-syntax.el and ;; coq-db.el from ProofGeneral. It is also pocked from ProofGeneral. ;;; History: ;; First created from ProofGeneral may 28th 2009 ;;; Code: (setq coq-version-is-V8-1 t) (defun coq-build-regexp-list-from-db (db &optional filter) "Take a keyword database DB and return the list of regexps for font-lock. If non-nil Optional argument FILTER is a function applying to each line of DB. For each line if FILTER returns nil, then the keyword is not added to the regexp. See `coq-syntax-db' for DB structure." (let ((l db) (res ())) (while l (let* ((hd (car l)) (tl (cdr l)) ; hd is the first infos list (e1 (car hd)) (tl1 (cdr hd)) ; e1 = menu entry (e2 (car tl1)) (tl2 (cdr tl1)) ; e2 = abbreviation (e3 (car tl2)) (tl3 (cdr tl2)) ; e3 = completion (e4 (car-safe tl3)) (tl4 (cdr-safe tl3)) ; e4 = state changing (e5 (car-safe tl4)) (tl5 (cdr-safe tl4)) ; e5 = colorization string ) ;; TODO delete doublons (when (and e5 (or (not filter) (funcall filter hd))) (setq res (nconc res (list e5)))) ; careful: nconc destructive! (setq l tl))) res )) (defun filter-state-preserving (l) ; checkdoc-params: (l) "Not documented." (not (nth 3 l))) ; fourth argument is nil --> state preserving command (defun filter-state-changing (l) ; checkdoc-params: (l) "Not documented." (nth 3 l)) ; fourth argument is nil --> state preserving command ;; Generic font-lock (defvar proof-id "\\(\\w\\(\\w\\|\\s_\\)*\\)" "A regular expression for parsing identifiers.") ;; For font-lock, we treat ,-separated identifiers as one identifier ;; and refontify commata using \{proof-zap-commas}. (defun proof-anchor-regexp (e) "Anchor (\\`) and group the regexp E." (concat "\\`\\(" e "\\)")) (defun proof-ids (proof-id &optional sepregexp) "Generate a regular expression for separated lists of identifiers PROOF-ID. Default is comma separated, or SEPREGEXP if set." (concat proof-id "\\(\\s-*" (or sepregexp ",") "\\s-*" proof-id "\\)*")) (defun proof-ids-to-regexp (l) "Maps a non-empty list of tokens `L' to a regexp matching any element." (if (featurep 'xemacs) (mapconcat (lambda (s) (concat "\\_<" s "\\_>")) l "\\|") ;; old version (concat "\\_<\\(?:" (mapconcat 'identity l "\\|") "\\)\\_>"))) ;; TODO: get rid of this list. Does 'default work widely enough ;; by now? (defconst pg-defface-window-systems '(x ;; bog standard mswindows ;; Windows w32 ;; Windows gtk ;; gtk emacs (obsolete?) mac ;; used by Aquamacs carbon ;; used by Carbon XEmacs ns ;; NeXTstep Emacs (Emacs.app) x-toolkit) ;; possible catch all (but probably not) "A list of possible values for variable `window-system'. If you are on a window system and your value of variable `window-system' is not listed here, you may not get the correct syntax colouring behaviour.") (defmacro proof-face-specs (bl bd ow) "Return a spec for `defface' with BL for light bg, BD for dark, OW o/w." `(append (apply 'append (mapcar (lambda (ty) (list (list (list (list 'type ty) '(class color) (list 'background 'light)) (quote ,bl)) (list (list (list 'type ty) '(class color) (list 'background 'dark)) (quote ,bd)))) pg-defface-window-systems)) (list (list t (quote ,ow))))) ;;A new face for tactics (defface coq-solve-tactics-face (proof-face-specs (:foreground "forestgreen" t) ; pour les fonds clairs (:foreground "forestgreen" t) ; pour les fond foncs ()) ; pour le noir et blanc "Face for names of closing tactics in proof scripts." :group 'proof-faces) ;;A new face for tactics which fail when they don't kill the current goal (defface coq-solve-tactics-face (proof-face-specs (:foreground "red" t) ; pour les fonds clairs (:foreground "red" t) ; pour les fond foncs ()) ; pour le noir et blanc "Face for names of closing tactics in proof scripts." :group 'proof-faces) (defconst coq-solve-tactics-face 'coq-solve-tactics-face "Expression that evaluates to a face. Required so that 'proof-solve-tactics-face is a proper facename") (defconst proof-tactics-name-face 'coq-solve-tactics-face) (defconst proof-tacticals-name-face 'coq-solve-tactics-face) (provide 'coq-font-lock) ;;; coq-font-lock.el ends here coq-8.4pl2/tools/coq-sl.sty0000750000175000001440000000134407024515437014741 0ustar notinusers% COQ style option, for use with the coq-latex filter. \typeout{Document Style option `coq-sl' <7 Apr 92>.} \ifcase\@ptsize \font\sltt = cmsltt10 \or \font\sltt = cmsltt10 \@halfmag \or \font\sltt = cmsltt10 \@magscale1 \fi {\catcode`\^^M=\active % \gdef\@coqinputline#1^^M{\tt Coq < #1\par} % \gdef\@coqoutputline#1^^M{\sltt#1\par} } % \def\@coqblankline{\medskip} \chardef\@coqbackslash="5C \def\coq{ \bgroup \flushleft \parindent 0pt \parskip 0pt \let\do\@makeother\dospecials \catcode`\^^M=\active \catcode`\\=0 \catcode`\ \active \frenchspacing \@vobeyspaces \let\?\@coqinputline \let\:\@coqoutputline \let\;\@coqblankline \let\\\@coqbackslash } \def\endcoq{ \endflushleft \egroup\noindent } coq-8.4pl2/tools/README.coq-tex0000750000175000001440000000057407024515437015245 0ustar notinusersDESCRIPTION. The coq-tex filter extracts Coq phrases embedded in LaTeX files, evaluates them, and insert the outcome of the evaluation after each phrase. The filter is written in Perl, so you'll need Perl version 4 installed on your machine. USAGE. See the manual page (coq-tex.1). AUTHOR. Jean-Christophe Filliatre (jcfillia@lip.ens-lyon.fr) from caml-tex of Xavier Leroy. coq-8.4pl2/checker/0000750000175000001440000000000012127276531013240 5ustar notinuserscoq-8.4pl2/checker/check_stat.ml0000640000175000001440000000357412010532755015706 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* str "Theory: Set is impredicative" | None -> str "Theory: Set is predicative" let cst_filter f csts = Cmap_env.fold (fun c ce acc -> if f c ce then c::acc else acc) csts [] let is_ax _ cb = not (constant_has_body cb) let pr_ax csts = let axs = cst_filter is_ax csts in if axs = [] then str "Axioms: " else hv 2 (str "Axioms:" ++ fnl() ++ prlist_with_sep fnl Indtypes.prcon axs) let print_context env = if !output_context then begin let {env_globals= {env_constants=csts; env_inductives=inds; env_modules=mods; env_modtypes=mtys}; env_stratification= {env_universes=univ; env_engagement=engt}} = env in msgnl(hov 0 (fnl() ++ str"CONTEXT SUMMARY" ++ fnl() ++ str"===============" ++ fnl() ++ fnl() ++ str "* " ++ hov 0 (pr_engt engt ++ fnl()) ++ fnl() ++ str "* " ++ hov 0 (pr_ax csts) ++ fnl())) end let stats () = print_context (Safe_typing.get_env()); print_memory_stat () coq-8.4pl2/checker/declarations.ml0000640000175000001440000006340411546054263016252 0ustar notinusersopen Util open Names open Term open Validate (* Bytecode *) type values type reloc_table type to_patch_substituted (*Retroknowledge *) type action type retroknowledge type engagement = ImpredicativeSet let val_eng = val_enum "eng" 1 type polymorphic_arity = { poly_param_levels : Univ.universe option list; poly_level : Univ.universe; } let val_pol_arity = val_tuple ~name:"polyorphic_arity"[|val_list(val_opt val_univ);val_univ|] type constant_type = | NonPolymorphicType of constr | PolymorphicArity of rel_context * polymorphic_arity let val_cst_type = val_sum "constant_type" 0 [|[|val_constr|];[|val_rctxt;val_pol_arity|]|] (** Substitutions, code imported from kernel/mod_subst *) type delta_hint = | Inline of int * constr option | Equiv of kernel_name module Deltamap = struct type t = module_path MPmap.t * delta_hint KNmap.t let empty = MPmap.empty, KNmap.empty let add_kn kn hint (mm,km) = (mm,KNmap.add kn hint km) let add_mp mp mp' (mm,km) = (MPmap.add mp mp' mm, km) let remove_mp mp (mm,km) = (MPmap.remove mp mm, km) let find_mp mp map = MPmap.find mp (fst map) let find_kn kn map = KNmap.find kn (snd map) let mem_mp mp map = MPmap.mem mp (fst map) let mem_kn kn map = KNmap.mem kn (snd map) let fold_kn f map i = KNmap.fold f (snd map) i let fold fmp fkn (mm,km) i = MPmap.fold fmp mm (KNmap.fold fkn km i) let join map1 map2 = fold add_mp add_kn map1 map2 end type delta_resolver = Deltamap.t let empty_delta_resolver = Deltamap.empty module MBImap = Map.Make (struct type t = mod_bound_id let compare = Pervasives.compare end) module Umap = struct type 'a t = 'a MPmap.t * 'a MBImap.t let empty = MPmap.empty, MBImap.empty let is_empty (m1,m2) = MPmap.is_empty m1 && MBImap.is_empty m2 let add_mbi mbi x (m1,m2) = (m1,MBImap.add mbi x m2) let add_mp mp x (m1,m2) = (MPmap.add mp x m1, m2) let find_mp mp map = MPmap.find mp (fst map) let find_mbi mbi map = MBImap.find mbi (snd map) let mem_mp mp map = MPmap.mem mp (fst map) let mem_mbi mbi map = MBImap.mem mbi (snd map) let iter_mbi f map = MBImap.iter f (snd map) let fold fmp fmbi (m1,m2) i = MPmap.fold fmp m1 (MBImap.fold fmbi m2 i) let join map1 map2 = fold add_mp add_mbi map1 map2 end type substitution = (module_path * delta_resolver) Umap.t type 'a subst_fun = substitution -> 'a -> 'a let empty_subst = Umap.empty let is_empty_subst = Umap.is_empty let val_delta_hint = val_sum "delta_hint" 0 [|[|val_int; val_opt val_constr|];[|val_kn|]|] let val_res = val_tuple ~name:"delta_resolver" [|val_map ~name:"delta_resolver" val_mp val_mp; val_map ~name:"delta_resolver" val_kn val_delta_hint|] let val_mp_res = val_tuple [|val_mp;val_res|] let val_subst = val_tuple ~name:"substitution" [|val_map ~name:"substitution" val_mp val_mp_res; val_map ~name:"substitution" val_uid val_mp_res|] let add_mbid mbid mp = Umap.add_mbi mbid (mp,empty_delta_resolver) let add_mp mp1 mp2 = Umap.add_mp mp1 (mp2,empty_delta_resolver) let map_mbid mbid mp = add_mbid mbid mp empty_subst let map_mp mp1 mp2 = add_mp mp1 mp2 empty_subst let mp_in_delta mp = Deltamap.mem_mp mp let rec find_prefix resolve mp = let rec sub_mp = function | MPdot(mp,l) as mp_sup -> (try Deltamap.find_mp mp_sup resolve with Not_found -> MPdot(sub_mp mp,l)) | p -> Deltamap.find_mp p resolve in try sub_mp mp with Not_found -> mp (** Nota: the following function is slightly different in mod_subst PL: Is it on purpose ? *) let solve_delta_kn resolve kn = try match Deltamap.find_kn kn resolve with | Equiv kn1 -> kn1 | Inline _ -> raise Not_found with Not_found -> let mp,dir,l = repr_kn kn in let new_mp = find_prefix resolve mp in if mp == new_mp then kn else make_kn new_mp dir l let gen_of_delta resolve x kn fix_can = try let new_kn = solve_delta_kn resolve kn in if kn == new_kn then x else fix_can new_kn with _ -> x let constant_of_delta resolve con = let kn = user_con con in gen_of_delta resolve con kn (constant_of_kn_equiv kn) let constant_of_delta2 resolve con = let kn, kn' = canonical_con con, user_con con in gen_of_delta resolve con kn (constant_of_kn_equiv kn') let mind_of_delta resolve mind = let kn = user_mind mind in gen_of_delta resolve mind kn (mind_of_kn_equiv kn) let mind_of_delta2 resolve mind = let kn, kn' = canonical_mind mind, user_mind mind in gen_of_delta resolve mind kn (mind_of_kn_equiv kn') let find_inline_of_delta kn resolve = match Deltamap.find_kn kn resolve with | Inline (_,o) -> o | _ -> raise Not_found let constant_of_delta_with_inline resolve con = let kn1,kn2 = canonical_con con,user_con con in try find_inline_of_delta kn2 resolve with Not_found -> try find_inline_of_delta kn1 resolve with Not_found -> None let subst_mp0 sub mp = (* 's like subst *) let rec aux mp = match mp with | MPfile sid -> Umap.find_mp mp sub | MPbound bid -> begin try Umap.find_mbi bid sub with Not_found -> Umap.find_mp mp sub end | MPdot (mp1,l) as mp2 -> begin try Umap.find_mp mp2 sub with Not_found -> let mp1',resolve = aux mp1 in MPdot (mp1',l),resolve end in try Some (aux mp) with Not_found -> None let subst_mp sub mp = match subst_mp0 sub mp with None -> mp | Some (mp',_) -> mp' let subst_kn_delta sub kn = let mp,dir,l = repr_kn kn in match subst_mp0 sub mp with Some (mp',resolve) -> solve_delta_kn resolve (make_kn mp' dir l) | None -> kn let subst_kn sub kn = let mp,dir,l = repr_kn kn in match subst_mp0 sub mp with Some (mp',_) -> make_kn mp' dir l | None -> kn exception No_subst type sideconstantsubst = | User | Canonical let gen_subst_mp f sub mp1 mp2 = match subst_mp0 sub mp1, subst_mp0 sub mp2 with | None, None -> raise No_subst | Some (mp',resolve), None -> User, (f mp' mp2), resolve | None, Some (mp',resolve) -> Canonical, (f mp1 mp'), resolve | Some (mp1',_), Some (mp2',resolve2) -> Canonical, (f mp1' mp2'), resolve2 let subst_ind sub mind = let kn1,kn2 = user_mind mind, canonical_mind mind in let mp1,dir,l = repr_kn kn1 in let mp2,_,_ = repr_kn kn2 in let rebuild_mind mp1 mp2 = make_mind_equiv mp1 mp2 dir l in try let side,mind',resolve = gen_subst_mp rebuild_mind sub mp1 mp2 in match side with | User -> mind_of_delta resolve mind' | Canonical -> mind_of_delta2 resolve mind' with No_subst -> mind let subst_con0 sub con = let kn1,kn2 = user_con con,canonical_con con in let mp1,dir,l = repr_kn kn1 in let mp2,_,_ = repr_kn kn2 in let rebuild_con mp1 mp2 = make_con_equiv mp1 mp2 dir l in let dup con = con, Const con in let side,con',resolve = gen_subst_mp rebuild_con sub mp1 mp2 in match constant_of_delta_with_inline resolve con' with | Some t -> con', t | None -> let con'' = match side with | User -> constant_of_delta resolve con' | Canonical -> constant_of_delta2 resolve con' in if con'' == con then raise No_subst else dup con'' let rec map_kn f f' c = let func = map_kn f f' in match c with | Const kn -> (try snd (f' kn) with No_subst -> c) | Ind (kn,i) -> let kn' = f kn in if kn'==kn then c else Ind (kn',i) | Construct ((kn,i),j) -> let kn' = f kn in if kn'==kn then c else Construct ((kn',i),j) | Case (ci,p,ct,l) -> let ci_ind = let (kn,i) = ci.ci_ind in let kn' = f kn in if kn'==kn then ci.ci_ind else kn',i in let p' = func p in let ct' = func ct in let l' = array_smartmap func l in if (ci.ci_ind==ci_ind && p'==p && l'==l && ct'==ct)then c else Case ({ci with ci_ind = ci_ind}, p',ct', l') | Cast (ct,k,t) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else Cast (ct', k, t') | Prod (na,t,ct) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else Prod (na, t', ct') | Lambda (na,t,ct) -> let ct' = func ct in let t'= func t in if (t'==t && ct'==ct) then c else Lambda (na, t', ct') | LetIn (na,b,t,ct) -> let ct' = func ct in let t'= func t in let b'= func b in if (t'==t && ct'==ct && b==b') then c else LetIn (na, b', t', ct') | App (ct,l) -> let ct' = func ct in let l' = array_smartmap func l in if (ct'== ct && l'==l) then c else App (ct',l') | Evar (e,l) -> let l' = array_smartmap func l in if (l'==l) then c else Evar (e,l') | Fix (ln,(lna,tl,bl)) -> let tl' = array_smartmap func tl in let bl' = array_smartmap func bl in if (bl == bl'&& tl == tl') then c else Fix (ln,(lna,tl',bl')) | CoFix(ln,(lna,tl,bl)) -> let tl' = array_smartmap func tl in let bl' = array_smartmap func bl in if (bl == bl'&& tl == tl') then c else CoFix (ln,(lna,tl',bl')) | _ -> c let subst_mps sub c = if is_empty_subst sub then c else map_kn (subst_ind sub) (subst_con0 sub) c type 'a lazy_subst = | LSval of 'a | LSlazy of substitution list * 'a type 'a substituted = 'a lazy_subst ref let val_substituted val_a = val_ref (val_sum "constr_substituted" 0 [|[|val_a|];[|val_list val_subst;val_a|]|]) let from_val a = ref (LSval a) let rec replace_mp_in_mp mpfrom mpto mp = match mp with | _ when mp = mpfrom -> mpto | MPdot (mp1,l) -> let mp1' = replace_mp_in_mp mpfrom mpto mp1 in if mp1==mp1' then mp else MPdot (mp1',l) | _ -> mp let rec mp_in_mp mp mp1 = match mp1 with | _ when mp1 = mp -> true | MPdot (mp2,l) -> mp_in_mp mp mp2 | _ -> false let subset_prefixed_by mp resolver = let mp_prefix mkey mequ rslv = if mp_in_mp mp mkey then Deltamap.add_mp mkey mequ rslv else rslv in let kn_prefix kn hint rslv = match hint with | Inline _ -> rslv | Equiv _ -> if mp_in_mp mp (modpath kn) then Deltamap.add_kn kn hint rslv else rslv in Deltamap.fold mp_prefix kn_prefix resolver empty_delta_resolver let subst_dom_delta_resolver subst resolver = let mp_apply_subst mkey mequ rslv = Deltamap.add_mp (subst_mp subst mkey) mequ rslv in let kn_apply_subst kkey hint rslv = Deltamap.add_kn (subst_kn subst kkey) hint rslv in Deltamap.fold mp_apply_subst kn_apply_subst resolver empty_delta_resolver let subst_mp_delta sub mp mkey = match subst_mp0 sub mp with None -> empty_delta_resolver,mp | Some (mp',resolve) -> let mp1 = find_prefix resolve mp' in let resolve1 = subset_prefixed_by mp1 resolve in (subst_dom_delta_resolver (map_mp mp1 mkey) resolve1),mp1 let gen_subst_delta_resolver dom subst resolver = let mp_apply_subst mkey mequ rslv = let mkey' = if dom then subst_mp subst mkey else mkey in let rslv',mequ' = subst_mp_delta subst mequ mkey in Deltamap.join rslv' (Deltamap.add_mp mkey' mequ' rslv) in let kn_apply_subst kkey hint rslv = let kkey' = if dom then subst_kn subst kkey else kkey in let hint' = match hint with | Equiv kequ -> Equiv (subst_kn_delta subst kequ) | Inline (lev,Some t) -> Inline (lev,Some (subst_mps subst t)) | Inline (_,None) -> hint in Deltamap.add_kn kkey' hint' rslv in Deltamap.fold mp_apply_subst kn_apply_subst resolver empty_delta_resolver let subst_codom_delta_resolver = gen_subst_delta_resolver false let subst_dom_codom_delta_resolver = gen_subst_delta_resolver true let update_delta_resolver resolver1 resolver2 = let mp_apply_rslv mkey mequ rslv = if Deltamap.mem_mp mkey resolver2 then rslv else Deltamap.add_mp mkey (find_prefix resolver2 mequ) rslv in let kn_apply_rslv kkey hint rslv = if Deltamap.mem_kn kkey resolver2 then rslv else let hint' = match hint with | Equiv kequ -> Equiv (solve_delta_kn resolver2 kequ) | _ -> hint in Deltamap.add_kn kkey hint' rslv in Deltamap.fold mp_apply_rslv kn_apply_rslv resolver1 empty_delta_resolver let add_delta_resolver resolver1 resolver2 = if resolver1 == resolver2 then resolver2 else if resolver2 = empty_delta_resolver then resolver1 else Deltamap.join (update_delta_resolver resolver1 resolver2) resolver2 let substition_prefixed_by k mp subst = let mp_prefixmp kmp (mp_to,reso) sub = if mp_in_mp mp kmp && mp <> kmp then let new_key = replace_mp_in_mp mp k kmp in Umap.add_mp new_key (mp_to,reso) sub else sub in let mbi_prefixmp mbi _ sub = sub in Umap.fold mp_prefixmp mbi_prefixmp subst empty_subst let join subst1 subst2 = let apply_subst mpk add (mp,resolve) res = let mp',resolve' = match subst_mp0 subst2 mp with | None -> mp, None | Some (mp',resolve') -> mp', Some resolve' in let resolve'' = match resolve' with | Some res -> add_delta_resolver (subst_dom_codom_delta_resolver subst2 resolve) res | None -> subst_codom_delta_resolver subst2 resolve in let prefixed_subst = substition_prefixed_by mpk mp' subst2 in Umap.join prefixed_subst (add (mp',resolve'') res) in let mp_apply_subst mp = apply_subst mp (Umap.add_mp mp) in let mbi_apply_subst mbi = apply_subst (MPbound mbi) (Umap.add_mbi mbi) in let subst = Umap.fold mp_apply_subst mbi_apply_subst subst1 empty_subst in Umap.join subst2 subst let force fsubst r = match !r with | LSval a -> a | LSlazy(s,a) -> let subst = List.fold_left join empty_subst (List.rev s) in let a' = fsubst subst a in r := LSval a'; a' let subst_substituted s r = match !r with | LSval a -> ref (LSlazy([s],a)) | LSlazy(s',a) -> ref (LSlazy(s::s',a)) let force_constr = force subst_mps type constr_substituted = constr substituted let val_cstr_subst = val_substituted val_constr let subst_constr_subst = subst_substituted (** Beware! In .vo files, lazy_constr are stored as integers used as indexes for a separate table. The actual lazy_constr is restored later, by [Safe_typing.LightenLibrary.load]. This allows us to use here a different definition of lazy_constr than coqtop: since the checker will inspect all proofs parts, even opaque ones, no need to use Lazy.t here *) type lazy_constr = constr_substituted let subst_lazy_constr = subst_substituted let force_lazy_constr = force_constr let lazy_constr_from_val c = c let val_lazy_constr = val_cstr_subst (** Inlining level of parameters at functor applications. This is ignored by the checker. *) type inline = int option (** A constant can have no body (axiom/parameter), or a transparent body, or an opaque one *) type constant_def = | Undef of inline | Def of constr_substituted | OpaqueDef of lazy_constr let val_cst_def = val_sum "constant_def" 0 [|[|val_opt val_int|]; [|val_cstr_subst|]; [|val_lazy_constr|]|] let subst_constant_def sub = function | Undef inl -> Undef inl | Def c -> Def (subst_constr_subst sub c) | OpaqueDef lc -> OpaqueDef (subst_lazy_constr sub lc) type constant_body = { const_hyps : section_context; (* New: younger hyp at top *) const_body : constant_def; const_type : constant_type; const_body_code : to_patch_substituted; const_constraints : Univ.constraints } let body_of_constant cb = match cb.const_body with | Undef _ -> None | Def c -> Some c | OpaqueDef c -> Some c let constant_has_body cb = match cb.const_body with | Undef _ -> false | Def _ | OpaqueDef _ -> true let is_opaque cb = match cb.const_body with | OpaqueDef _ -> true | Def _ | Undef _ -> false let val_cb = val_tuple ~name:"constant_body" [|val_nctxt; val_cst_def; val_cst_type; no_val; val_cstrs|] let subst_rel_declaration sub (id,copt,t as x) = let copt' = Option.smartmap (subst_mps sub) copt in let t' = subst_mps sub t in if copt == copt' & t == t' then x else (id,copt',t') let subst_rel_context sub = list_smartmap (subst_rel_declaration sub) type recarg = | Norec | Mrec of inductive | Imbr of inductive let val_recarg = val_sum "recarg" 1 (* Norec *) [|[|val_ind|] (* Mrec *);[|val_ind|] (* Imbr *)|] let subst_recarg sub r = match r with | Norec -> r | (Mrec(kn,i)|Imbr (kn,i)) -> let kn' = subst_ind sub kn in if kn==kn' then r else Imbr (kn',i) type wf_paths = recarg Rtree.t let val_wfp = val_rec_sum "wf_paths" 0 (fun val_wfp -> [|[|val_int;val_int|]; (* Rtree.Param *) [|val_recarg;val_array val_wfp|]; (* Rtree.Node *) [|val_int;val_array val_wfp|] (* Rtree.Rec *) |]) let mk_norec = Rtree.mk_node Norec [||] let mk_paths r recargs = Rtree.mk_node r (Array.map (fun l -> Rtree.mk_node Norec (Array.of_list l)) recargs) let dest_recarg p = fst (Rtree.dest_node p) let dest_subterms p = let (_,cstrs) = Rtree.dest_node p in Array.map (fun t -> Array.to_list (snd (Rtree.dest_node t))) cstrs let subst_wf_paths sub p = Rtree.smartmap (subst_recarg sub) p (**********************************************************************) (* Representation of mutual inductive types in the kernel *) (* Inductive I1 (params) : U1 := c11 : T11 | ... | c1p1 : T1p1 ... with In (params) : Un := cn1 : Tn1 | ... | cnpn : Tnpn *) type monomorphic_inductive_arity = { mind_user_arity : constr; mind_sort : sorts; } let val_mono_ind_arity = val_tuple ~name:"monomorphic_inductive_arity"[|val_constr;val_sort|] type inductive_arity = | Monomorphic of monomorphic_inductive_arity | Polymorphic of polymorphic_arity let val_ind_arity = val_sum "inductive_arity" 0 [|[|val_mono_ind_arity|];[|val_pol_arity|]|] type one_inductive_body = { (* Primitive datas *) (* Name of the type: [Ii] *) mind_typename : identifier; (* Arity context of [Ii] with parameters: [forall params, Ui] *) mind_arity_ctxt : rel_context; (* Arity sort, original user arity, and allowed elim sorts, if monomorphic *) mind_arity : inductive_arity; (* Names of the constructors: [cij] *) mind_consnames : identifier array; (* Types of the constructors with parameters: [forall params, Tij], where the Ik are replaced by de Bruijn index in the context I1:forall params, U1 .. In:forall params, Un *) mind_user_lc : constr array; (* Derived datas *) (* Number of expected real arguments of the type (no let, no params) *) mind_nrealargs : int; (* Length of realargs context (with let, no params) *) mind_nrealargs_ctxt : int; (* List of allowed elimination sorts *) mind_kelim : sorts_family list; (* Head normalized constructor types so that their conclusion is atomic *) mind_nf_lc : constr array; (* Length of the signature of the constructors (with let, w/o params) *) mind_consnrealdecls : int array; (* Signature of recursive arguments in the constructors *) mind_recargs : wf_paths; (* Datas for bytecode compilation *) (* number of constant constructor *) mind_nb_constant : int; (* number of no constant constructor *) mind_nb_args : int; mind_reloc_tbl : reloc_table; } let val_one_ind = val_tuple ~name:"one_inductive_body" [|val_id;val_rctxt;val_ind_arity;val_array val_id;val_array val_constr; val_int;val_int;val_list val_sortfam;val_array val_constr;val_array val_int; val_wfp;val_int;val_int;no_val|] type mutual_inductive_body = { (* The component of the mutual inductive block *) mind_packets : one_inductive_body array; (* Whether the inductive type has been declared as a record *) mind_record : bool; (* Whether the type is inductive or coinductive *) mind_finite : bool; (* Number of types in the block *) mind_ntypes : int; (* Section hypotheses on which the block depends *) mind_hyps : section_context; (* Number of expected parameters *) mind_nparams : int; (* Number of recursively uniform (i.e. ordinary) parameters *) mind_nparams_rec : int; (* The context of parameters (includes let-in declaration) *) mind_params_ctxt : rel_context; (* Universes constraints enforced by the inductive declaration *) mind_constraints : Univ.constraints; } let val_ind_pack = val_tuple ~name:"mutual_inductive_body" [|val_array val_one_ind;val_bool;val_bool;val_int;val_nctxt; val_int; val_int; val_rctxt;val_cstrs|] let subst_arity sub = function | NonPolymorphicType s -> NonPolymorphicType (subst_mps sub s) | PolymorphicArity (ctx,s) -> PolymorphicArity (subst_rel_context sub ctx,s) (* TODO: should be changed to non-coping after Term.subst_mps *) let subst_const_body sub cb = { const_hyps = (assert (cb.const_hyps=[]); []); const_body = subst_constant_def sub cb.const_body; const_type = subst_arity sub cb.const_type; const_body_code = (*Cemitcodes.subst_to_patch_subst sub*) cb.const_body_code; const_constraints = cb.const_constraints} let subst_arity sub = function | Monomorphic s -> Monomorphic { mind_user_arity = subst_mps sub s.mind_user_arity; mind_sort = s.mind_sort; } | Polymorphic s as x -> x let subst_mind_packet sub mbp = { mind_consnames = mbp.mind_consnames; mind_consnrealdecls = mbp.mind_consnrealdecls; mind_typename = mbp.mind_typename; mind_nf_lc = array_smartmap (subst_mps sub) mbp.mind_nf_lc; mind_arity_ctxt = subst_rel_context sub mbp.mind_arity_ctxt; mind_arity = subst_arity sub mbp.mind_arity; mind_user_lc = array_smartmap (subst_mps sub) mbp.mind_user_lc; mind_nrealargs = mbp.mind_nrealargs; mind_nrealargs_ctxt = mbp.mind_nrealargs_ctxt; mind_kelim = mbp.mind_kelim; mind_recargs = subst_wf_paths sub mbp.mind_recargs (*wf_paths*); mind_nb_constant = mbp.mind_nb_constant; mind_nb_args = mbp.mind_nb_args; mind_reloc_tbl = mbp.mind_reloc_tbl } let subst_mind sub mib = { mind_record = mib.mind_record ; mind_finite = mib.mind_finite ; mind_ntypes = mib.mind_ntypes ; mind_hyps = (assert (mib.mind_hyps=[]); []) ; mind_nparams = mib.mind_nparams; mind_nparams_rec = mib.mind_nparams_rec; mind_params_ctxt = map_rel_context (subst_mps sub) mib.mind_params_ctxt; mind_packets = array_smartmap (subst_mind_packet sub) mib.mind_packets ; mind_constraints = mib.mind_constraints } (* Modules *) (* Whenever you change these types, please do update the validation functions below *) type structure_field_body = | SFBconst of constant_body | SFBmind of mutual_inductive_body | SFBmodule of module_body | SFBmodtype of module_type_body and structure_body = (label * structure_field_body) list and struct_expr_body = | SEBident of module_path | SEBfunctor of mod_bound_id * module_type_body * struct_expr_body | SEBapply of struct_expr_body * struct_expr_body * Univ.constraints | SEBstruct of structure_body | SEBwith of struct_expr_body * with_declaration_body and with_declaration_body = With_module_body of identifier list * module_path | With_definition_body of identifier list * constant_body and module_body = { mod_mp : module_path; mod_expr : struct_expr_body option; mod_type : struct_expr_body; mod_type_alg : struct_expr_body option; mod_constraints : Univ.constraints; mod_delta : delta_resolver; mod_retroknowledge : action list} and module_type_body = { typ_mp : module_path; typ_expr : struct_expr_body; typ_expr_alg : struct_expr_body option ; typ_constraints : Univ.constraints; typ_delta :delta_resolver} (* the validation functions: *) let rec val_sfb o = val_sum "struct_field_body" 0 [|[|val_cb|]; (* SFBconst *) [|val_ind_pack|]; (* SFBmind *) [|val_module|]; (* SFBmodule *) [|val_modtype|] (* SFBmodtype *) |] o and val_sb o = val_list (val_tuple ~name:"label*sfb"[|val_id;val_sfb|]) o and val_seb o = val_sum "struct_expr_body" 0 [|[|val_mp|]; (* SEBident *) [|val_uid;val_modtype;val_seb|]; (* SEBfunctor *) [|val_seb;val_seb;val_cstrs|]; (* SEBapply *) [|val_sb|]; (* SEBstruct *) [|val_seb;val_with|] (* SEBwith *) |] o and val_with o = val_sum "with_declaration_body" 0 [|[|val_list val_id;val_mp|]; [|val_list val_id;val_cb|]|] o and val_module o = val_tuple ~name:"module_body" [|val_mp;val_opt val_seb;val_seb; val_opt val_seb;val_cstrs;val_res;no_val|] o and val_modtype o = val_tuple ~name:"module_type_body" [|val_mp;val_seb;val_opt val_seb;val_cstrs;val_res|] o let rec subst_with_body sub = function | With_module_body(id,mp) -> With_module_body(id,subst_mp sub mp) | With_definition_body(id,cb) -> With_definition_body( id,subst_const_body sub cb) and subst_modtype sub mtb= let typ_expr' = subst_struct_expr sub mtb.typ_expr in let typ_alg' = Option.smartmap (subst_struct_expr sub) mtb.typ_expr_alg in let mp = subst_mp sub mtb.typ_mp in if typ_expr'==mtb.typ_expr && typ_alg'==mtb.typ_expr_alg && mp==mtb.typ_mp then mtb else {mtb with typ_mp = mp; typ_expr = typ_expr'; typ_expr_alg = typ_alg'} and subst_structure sub sign = let subst_body = function SFBconst cb -> SFBconst (subst_const_body sub cb) | SFBmind mib -> SFBmind (subst_mind sub mib) | SFBmodule mb -> SFBmodule (subst_module sub mb) | SFBmodtype mtb -> SFBmodtype (subst_modtype sub mtb) in List.map (fun (l,b) -> (l,subst_body b)) sign and subst_module sub mb = let mtb' = subst_struct_expr sub mb.mod_type in let typ_alg' = Option.smartmap (subst_struct_expr sub ) mb.mod_type_alg in let me' = Option.smartmap (subst_struct_expr sub) mb.mod_expr in let mp = subst_mp sub mb.mod_mp in if mtb'==mb.mod_type && mb.mod_expr == me' && mp == mb.mod_mp then mb else { mb with mod_mp = mp; mod_expr = me'; mod_type_alg = typ_alg'; mod_type=mtb'} and subst_struct_expr sub = function | SEBident mp -> SEBident (subst_mp sub mp) | SEBfunctor (mbid, mtb, meb') -> SEBfunctor(mbid,subst_modtype sub mtb ,subst_struct_expr sub meb') | SEBstruct (str)-> SEBstruct( subst_structure sub str) | SEBapply (meb1,meb2,cst)-> SEBapply(subst_struct_expr sub meb1, subst_struct_expr sub meb2, cst) | SEBwith (meb,wdb)-> SEBwith(subst_struct_expr sub meb, subst_with_body sub wdb) coq-8.4pl2/checker/indtypes.mli0000640000175000001440000000254412010532755015602 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Pp.std_ppcmds val prcon : constant -> Pp.std_ppcmds (*s The different kinds of errors that may result of a malformed inductive definition. *) (* Errors related to inductive constructions *) type inductive_error = | NonPos of env * constr * constr | NotEnoughArgs of env * constr * constr | NotConstructor of env * constr * constr | NonPar of env * constr * int * constr * constr | SameNamesTypes of identifier | SameNamesConstructors of identifier | SameNamesOverlap of identifier list | NotAnArity of identifier | BadEntry exception InductiveError of inductive_error (*s The following function does checks on inductive declarations. *) val check_inductive : env -> mutual_inductive -> mutual_inductive_body -> env coq-8.4pl2/checker/typeops.mli0000640000175000001440000000170712010532755015446 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> constr val infer_type : env -> constr -> sorts val check_ctxt : env -> rel_context -> env val check_named_ctxt : env -> named_context -> env val check_polymorphic_arity : env -> rel_context -> polymorphic_arity -> unit val type_of_constant_type : env -> constant_type -> constr coq-8.4pl2/checker/type_errors.ml0000640000175000001440000000711112010532755016142 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* failwith "path_of_dirpath" | l::dir -> {dirpath=List.map string_of_id dir;basename=string_of_id l} let pr_dirlist dp = prlist_with_sep (fun _ -> str".") str (List.rev dp) let pr_path sp = match sp.dirpath with [] -> str sp.basename | sl -> pr_dirlist sl ++ str"." ++ str sp.basename type library_objects type compilation_unit_name = dir_path type library_disk = { md_name : compilation_unit_name; md_compiled : Safe_typing.LightenLibrary.lightened_compiled_library; md_objects : library_objects; md_deps : (compilation_unit_name * Digest.t) list; md_imports : compilation_unit_name list } (************************************************************************) (*s Modules on disk contain the following informations (after the magic number, and before the digest). *) (*s Modules loaded in memory contain the following informations. They are kept in the global table [libraries_table]. *) type library_t = { library_name : compilation_unit_name; library_filename : System.physical_path; library_compiled : Safe_typing.compiled_library; library_deps : (compilation_unit_name * Digest.t) list; library_digest : Digest.t } module LibraryOrdered = struct type t = dir_path let compare d1 d2 = Pervasives.compare (List.rev (repr_dirpath d1)) (List.rev (repr_dirpath d2)) end module LibrarySet = Set.Make(LibraryOrdered) module LibraryMap = Map.Make(LibraryOrdered) (* This is a map from names to loaded libraries *) let libraries_table = ref LibraryMap.empty (* various requests to the tables *) let find_library dir = LibraryMap.find dir !libraries_table let try_find_library dir = try find_library dir with Not_found -> error ("Unknown library " ^ (string_of_dirpath dir)) let library_full_filename dir = (find_library dir).library_filename (* If a library is loaded several time, then the first occurrence must be performed first, thus the libraries_loaded_list ... *) let register_loaded_library m = libraries_table := LibraryMap.add m.library_name m !libraries_table let check_one_lib admit (dir,m) = let file = m.library_filename in let md = m.library_compiled in let dig = m.library_digest in (* Look up if the library is to be admitted correct. We could also check if it carries a validation certificate (yet to be implemented). *) if LibrarySet.mem dir admit then (Flags.if_verbose msgnl (str "Admitting library: " ++ pr_dirpath dir); Safe_typing.unsafe_import file md dig) else (Flags.if_verbose msgnl (str "Checking library: " ++ pr_dirpath dir); Safe_typing.import file md dig); Flags.if_verbose msg(fnl()); register_loaded_library m (*************************************************************************) (*s Load path. Mapping from physical to logical paths etc.*) type logical_path = dir_path let load_paths = ref ([],[] : System.physical_path list * logical_path list) let get_load_paths () = fst !load_paths (* Hints to partially detects if two paths refer to the same repertory *) let rec remove_path_dot p = let curdir = Filename.concat Filename.current_dir_name "" in (* Unix: "./" *) let n = String.length curdir in if String.length p > n && String.sub p 0 n = curdir then remove_path_dot (String.sub p n (String.length p - n)) else p let strip_path p = let cwd = Filename.concat (Sys.getcwd ()) "" in (* Unix: "`pwd`/" *) let n = String.length cwd in if String.length p > n && String.sub p 0 n = cwd then remove_path_dot (String.sub p n (String.length p - n)) else remove_path_dot p let canonical_path_name p = let current = Sys.getcwd () in try Sys.chdir p; let p' = Sys.getcwd () in Sys.chdir current; p' with Sys_error _ -> (* We give up to find a canonical name and just simplify it... *) strip_path p let find_logical_path phys_dir = let phys_dir = canonical_path_name phys_dir in match list_filter2 (fun p d -> p = phys_dir) !load_paths with | _,[dir] -> dir | _,[] -> default_root_prefix | _,l -> anomaly ("Two logical paths are associated to "^phys_dir) let remove_load_path dir = load_paths := list_filter2 (fun p d -> p <> dir) !load_paths let add_load_path (phys_path,coq_path) = if !Flags.debug then msgnl (str "path: " ++ pr_dirpath coq_path ++ str " ->" ++ spc() ++ str phys_path); let phys_path = canonical_path_name phys_path in match list_filter2 (fun p d -> p = phys_path) !load_paths with | _,[dir] -> if coq_path <> dir (* If this is not the default -I . to coqtop *) && not (phys_path = canonical_path_name Filename.current_dir_name && coq_path = default_root_prefix) then begin (* Assume the user is concerned by library naming *) if dir <> default_root_prefix then Flags.if_warn msg_warning (str phys_path ++ strbrk " was previously bound to " ++ pr_dirpath dir ++ strbrk "; it is remapped to " ++ pr_dirpath coq_path); remove_load_path phys_path; load_paths := (phys_path::fst !load_paths, coq_path::snd !load_paths) end | _,[] -> load_paths := (phys_path :: fst !load_paths, coq_path :: snd !load_paths) | _ -> anomaly ("Two logical paths are associated to "^phys_path) let load_paths_of_dir_path dir = fst (list_filter2 (fun p d -> d = dir) !load_paths) (************************************************************************) (*s Locate absolute or partially qualified library names in the path *) exception LibUnmappedDir exception LibNotFound let locate_absolute_library dir = (* Search in loadpath *) let pref, base = split_dirpath dir in let loadpath = load_paths_of_dir_path pref in if loadpath = [] then raise LibUnmappedDir; try let name = string_of_id base^".vo" in let _, file = System.where_in_path ~warn:false loadpath name in (dir, file) with Not_found -> (* Last chance, removed from the file system but still in memory *) try (dir, library_full_filename dir) with Not_found -> raise LibNotFound let locate_qualified_library qid = try let loadpath = (* Search library in loadpath *) if qid.dirpath=[] then get_load_paths () else (* we assume qid is an absolute dirpath *) load_paths_of_dir_path (dir_of_path qid) in if loadpath = [] then raise LibUnmappedDir; let name = qid.basename^".vo" in let path, file = System.where_in_path loadpath name in let dir = extend_dirpath (find_logical_path path) (id_of_string qid.basename) in (* Look if loaded *) try (dir, library_full_filename dir) with Not_found -> (dir, file) with Not_found -> raise LibNotFound let explain_locate_library_error qid = function | LibUnmappedDir -> let prefix = qid.dirpath in errorlabstrm "load_absolute_library_from" (str "Cannot load " ++ pr_path qid ++ str ":" ++ spc () ++ str "no physical path bound to" ++ spc () ++ pr_dirlist prefix ++ fnl ()) | LibNotFound -> errorlabstrm "load_absolute_library_from" (str"Cannot find library " ++ pr_path qid ++ str" in loadpath") | e -> raise e let try_locate_absolute_library dir = try locate_absolute_library dir with e -> explain_locate_library_error (path_of_dirpath dir) e let try_locate_qualified_library qid = try locate_qualified_library qid with e -> explain_locate_library_error qid e (************************************************************************) (*s Low-level interning/externing of libraries to files *) (*s Loading from disk to cache (preparation phase) *) let raw_intern_library = snd (System.raw_extern_intern Coq_config.vo_magic_number ".vo") let with_magic_number_check f a = try f a with System.Bad_magic_number fname -> errorlabstrm "with_magic_number_check" (str"file " ++ str fname ++ spc () ++ str"has bad magic number." ++ spc () ++ str"It is corrupted" ++ spc () ++ str"or was compiled with another version of Coq.") (************************************************************************) (* Internalise libraries *) let mk_library md f table digest = { library_name = md.md_name; library_filename = f; library_compiled = Safe_typing.LightenLibrary.load table md.md_compiled; library_deps = md.md_deps; library_digest = digest } let name_clash_message dir mdir f = str ("The file " ^ f ^ " contains library") ++ spc () ++ pr_dirpath mdir ++ spc () ++ str "and not library" ++ spc() ++ pr_dirpath dir (* Dependency graph *) let depgraph = ref LibraryMap.empty let intern_from_file (dir, f) = Flags.if_verbose msg (str"[intern "++str f++str" ..."); let (md,table,digest) = try let ch = with_magic_number_check raw_intern_library f in let (md:library_disk) = System.marshal_in f ch in let digest = System.marshal_in f ch in let table = (System.marshal_in f ch : Safe_typing.LightenLibrary.table) in close_in ch; if dir <> md.md_name then errorlabstrm "load_physical_library" (name_clash_message dir md.md_name f); Flags.if_verbose msgnl(str" done]"); md,table,digest with e -> Flags.if_verbose msgnl(str" failed!]"); raise e in depgraph := LibraryMap.add md.md_name md.md_deps !depgraph; mk_library md f table digest let get_deps (dir, f) = try LibraryMap.find dir !depgraph with Not_found -> let _ = intern_from_file (dir,f) in LibraryMap.find dir !depgraph (* Read a compiled library and all dependencies, in reverse order. Do not include files that are already in the context. *) let rec intern_library seen (dir, f) needed = if LibrarySet.mem dir seen then failwith "Recursive dependencies!"; (* Look if in the current logical environment *) try let _ = find_library dir in needed with Not_found -> (* Look if already listed and consequently its dependencies too *) if List.mem_assoc dir needed then needed else (* [dir] is an absolute name which matches [f] which must be in loadpath *) let m = intern_from_file (dir,f) in let seen' = LibrarySet.add dir seen in let deps = List.map (fun (d,_) -> try_locate_absolute_library d) m.library_deps in (dir,m) :: List.fold_right (intern_library seen') deps needed (* Compute the reflexive transitive dependency closure *) let rec fold_deps seen ff (dir,f) (s,acc) = if LibrarySet.mem dir seen then failwith "Recursive dependencies!"; if LibrarySet.mem dir s then (s,acc) else let deps = get_deps (dir,f) in let deps = List.map (fun (d,_) -> try_locate_absolute_library d) deps in let seen' = LibrarySet.add dir seen in let (s',acc') = List.fold_right (fold_deps seen' ff) deps (s,acc) in (LibrarySet.add dir s', ff dir acc') and fold_deps_list seen ff modl needed = List.fold_right (fold_deps seen ff) modl needed let fold_deps_list ff modl acc = snd (fold_deps_list LibrarySet.empty ff modl (LibrarySet.empty,acc)) let recheck_library ~norec ~admit ~check = let ml = List.map try_locate_qualified_library check in let nrl = List.map try_locate_qualified_library norec in let al = List.map try_locate_qualified_library admit in let needed = List.rev (List.fold_right (intern_library LibrarySet.empty) (ml@nrl) []) in (* first compute the closure of norec, remove closure of check, add closure of admit, and finally remove norec and check *) let nochk = fold_deps_list LibrarySet.add nrl LibrarySet.empty in let nochk = fold_deps_list LibrarySet.remove ml nochk in let nochk = fold_deps_list LibrarySet.add al nochk in (* explicitly required modules cannot be skipped... *) let nochk = List.fold_right LibrarySet.remove (List.map fst (nrl@ml)) nochk in (* *) Flags.if_verbose msgnl (fnl()++hv 2 (str "Ordered list:" ++ fnl() ++ prlist (fun (dir,_) -> pr_dirpath dir ++ fnl()) needed)); List.iter (check_one_lib nochk) needed; Flags.if_verbose msgnl(str"Modules were successfully checked") open Printf let mem s = let m = try_find_library s in h 0 (str (sprintf "%dk" (size_kb m))) coq-8.4pl2/checker/environ.ml0000640000175000001440000001413511652031713015250 0ustar notinusersopen Util open Names open Univ open Term open Declarations type globals = { env_constants : constant_body Cmap_env.t; env_inductives : mutual_inductive_body Mindmap_env.t; env_inductives_eq : kernel_name KNmap.t; env_modules : module_body MPmap.t; env_modtypes : module_type_body MPmap.t} type stratification = { env_universes : universes; env_engagement : engagement option } type env = { env_globals : globals; env_named_context : named_context; env_rel_context : rel_context; env_stratification : stratification; env_imports : Digest.t MPmap.t } let empty_env = { env_globals = { env_constants = Cmap_env.empty; env_inductives = Mindmap_env.empty; env_inductives_eq = KNmap.empty; env_modules = MPmap.empty; env_modtypes = MPmap.empty}; env_named_context = []; env_rel_context = []; env_stratification = { env_universes = Univ.initial_universes; env_engagement = None}; env_imports = MPmap.empty } let engagement env = env.env_stratification.env_engagement let universes env = env.env_stratification.env_universes let named_context env = env.env_named_context let rel_context env = env.env_rel_context let set_engagement c env = match env.env_stratification.env_engagement with | Some c' -> if c=c' then env else error "Incompatible engagement" | None -> { env with env_stratification = { env.env_stratification with env_engagement = Some c } } (* Digests *) let add_digest env dp digest = { env with env_imports = MPmap.add (MPfile dp) digest env.env_imports } let lookup_digest env dp = MPmap.find (MPfile dp) env.env_imports (* Rel context *) let lookup_rel n env = let rec lookup_rel n sign = match n, sign with | 1, decl :: _ -> decl | n, _ :: sign -> lookup_rel (n-1) sign | _, [] -> raise Not_found in lookup_rel n env.env_rel_context let push_rel d env = { env with env_rel_context = d :: env.env_rel_context } let push_rel_context ctxt x = fold_rel_context push_rel ctxt ~init:x let push_rec_types (lna,typarray,_) env = let ctxt = array_map2_i (fun i na t -> (na, None, lift i t)) lna typarray in Array.fold_left (fun e assum -> push_rel assum e) env ctxt (* Named context *) let push_named d env = (* if not (env.env_rel_context = []) then raise (ASSERT env.env_rel_context); assert (env.env_rel_context = []); *) { env with env_named_context = d :: env.env_named_context } let lookup_named id env = let rec lookup_named id = function | (id',_,_ as decl) :: _ when id=id' -> decl | _ :: sign -> lookup_named id sign | [] -> raise Not_found in lookup_named id env.env_named_context (* A local const is evaluable if it is defined *) let named_type id env = let (_,_,t) = lookup_named id env in t (* Universe constraints *) let add_constraints c env = if c == empty_constraint then env else let s = env.env_stratification in { env with env_stratification = { s with env_universes = merge_constraints c s.env_universes } } (* Global constants *) let lookup_constant kn env = Cmap_env.find kn env.env_globals.env_constants let add_constant kn cs env = if Cmap_env.mem kn env.env_globals.env_constants then Printf.ksprintf anomaly "Constant %s is already defined" (string_of_con kn); let new_constants = Cmap_env.add kn cs env.env_globals.env_constants in let new_globals = { env.env_globals with env_constants = new_constants } in { env with env_globals = new_globals } type const_evaluation_result = NoBody | Opaque exception NotEvaluableConst of const_evaluation_result let constant_value env kn = let cb = lookup_constant kn env in match cb.const_body with | Def l_body -> force_constr l_body | OpaqueDef _ -> raise (NotEvaluableConst Opaque) | Undef _ -> raise (NotEvaluableConst NoBody) (* A global const is evaluable if it is defined and not opaque *) let evaluable_constant cst env = try let _ = constant_value env cst in true with Not_found | NotEvaluableConst _ -> false (* Mutual Inductives *) let scrape_mind env kn= try KNmap.find kn env.env_globals.env_inductives_eq with Not_found -> kn let mind_equiv env (kn1,i1) (kn2,i2) = i1 = i2 && scrape_mind env (user_mind kn1) = scrape_mind env (user_mind kn2) let lookup_mind kn env = Mindmap_env.find kn env.env_globals.env_inductives let add_mind kn mib env = if Mindmap_env.mem kn env.env_globals.env_inductives then Printf.ksprintf anomaly "Inductive %s is already defined" (string_of_mind kn); let new_inds = Mindmap_env.add kn mib env.env_globals.env_inductives in let kn1,kn2 = user_mind kn,canonical_mind kn in let new_inds_eq = if kn1=kn2 then env.env_globals.env_inductives_eq else KNmap.add kn1 kn2 env.env_globals.env_inductives_eq in let new_globals = { env.env_globals with env_inductives = new_inds; env_inductives_eq = new_inds_eq} in { env with env_globals = new_globals } (* Modules *) let add_modtype ln mtb env = if MPmap.mem ln env.env_globals.env_modtypes then Printf.ksprintf anomaly "Module type %s is already defined" (string_of_mp ln); let new_modtypes = MPmap.add ln mtb env.env_globals.env_modtypes in let new_globals = { env.env_globals with env_modtypes = new_modtypes } in { env with env_globals = new_globals } let shallow_add_module mp mb env = if MPmap.mem mp env.env_globals.env_modules then Printf.ksprintf anomaly "Module %s is already defined" (string_of_mp mp); let new_mods = MPmap.add mp mb env.env_globals.env_modules in let new_globals = { env.env_globals with env_modules = new_mods } in { env with env_globals = new_globals } let shallow_remove_module mp env = if not (MPmap.mem mp env.env_globals.env_modules) then Printf.ksprintf anomaly "Module %s is unknown" (string_of_mp mp); let new_mods = MPmap.remove mp env.env_globals.env_modules in let new_globals = { env.env_globals with env_modules = new_mods } in { env with env_globals = new_globals } let lookup_module mp env = MPmap.find mp env.env_globals.env_modules let lookup_modtype ln env = MPmap.find ln env.env_globals.env_modtypes coq-8.4pl2/checker/safe_typing.mli0000640000175000001440000000251312010532755016247 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* env (* exporting and importing modules *) type compiled_library val set_engagement : Declarations.engagement -> unit val import : System.physical_path -> compiled_library -> Digest.t -> unit val unsafe_import : System.physical_path -> compiled_library -> Digest.t -> unit (** Store the body of modules' opaque constants inside a table. This module is used during the serialization and deserialization of vo files. *) module LightenLibrary : sig type table type lightened_compiled_library (** [load table lcl] builds a compiled library from a lightened library [lcl] by remplacing every index by its related opaque terms inside [table]. *) val load : table -> lightened_compiled_library -> compiled_library end coq-8.4pl2/checker/mod_checking.mli0000640000175000001440000000114612010532755016352 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Names.module_path -> Declarations.module_body -> unit coq-8.4pl2/checker/subtyping.ml0000640000175000001440000003370512010532755015621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Labmap.add (label_of_id id) (IndConstr((ip,i+1), mib)) map) oib.mind_consnames map in Labmap.add (label_of_id oib.mind_typename) (IndType (ip, mib)) map in array_fold_right_i add_mip_nameobjects mib.mind_packets map (* creates (namedobject/namedmodule) map for the whole signature *) type labmap = { objs : namedobject Labmap.t; mods : namedmodule Labmap.t } let empty_labmap = { objs = Labmap.empty; mods = Labmap.empty } let get_obj mp map l = try Labmap.find l map.objs with Not_found -> error_no_such_label_sub l mp let get_mod mp map l = try Labmap.find l map.mods with Not_found -> error_no_such_label_sub l mp let make_labmap mp list = let add_one (l,e) map = match e with | SFBconst cb -> { map with objs = Labmap.add l (Constant cb) map.objs } | SFBmind mib -> { map with objs = add_mib_nameobjects mp l mib map.objs } | SFBmodule mb -> { map with mods = Labmap.add l (Module mb) map.mods } | SFBmodtype mtb -> { map with mods = Labmap.add l (Modtype mtb) map.mods } in List.fold_right add_one list empty_labmap let check_conv_error error f env a1 a2 = try f env a1 a2 with NotConvertible -> error () (* for now we do not allow reorderings *) let check_inductive env mp1 l info1 mib2 spec2 subst1 subst2= let kn = make_mind mp1 empty_dirpath l in let error () = error_not_match l spec2 in let check_conv f = check_conv_error error f in let mib1 = match info1 with | IndType ((_,0), mib) -> mib | _ -> error () in let mib2 = subst_mind subst2 mib2 in let check_inductive_type env t1 t2 = (* Due to sort-polymorphism in inductive types, the conclusions of t1 and t2, if in Type, are generated as the least upper bounds of the types of the constructors. By monotonicity of the infered l.u.b. wrt subtyping (i.e. if X:U |- T(X):s and |- M:U' and U'<=U then infer_type(T(M))<=s), each universe in the conclusion of t1 has an bounding universe in the conclusion of t2, so that we don't need to check the subtyping of the conclusions of t1 and t2. Even if we'd like to recheck it, the inference of constraints is not designed to deal with algebraic constraints of the form max-univ(u1..un) <= max-univ(u'1..u'n), so that it is not easy to recheck it (in short, we would need the actual graph of constraints as input while type checking is currently designed to output a set of constraints instead) *) (* So we cheat and replace the subtyping problem on algebraic constraints of the form max-univ(u1..un) <= max-univ(u'1..u'n) (that we know are necessary true) by trivial constraints that the constraint generator knows how to deal with *) let (ctx1,s1) = dest_arity env t1 in let (ctx2,s2) = dest_arity env t2 in let s1,s2 = match s1, s2 with | Type _, Type _ -> (* shortcut here *) Prop Null, Prop Null | (Prop _, Type _) | (Type _,Prop _) -> error () | _ -> (s1, s2) in check_conv conv_leq env (mkArity (ctx1,s1)) (mkArity (ctx2,s2)) in let check_packet p1 p2 = let check f = if f p1 <> f p2 then error () in check (fun p -> p.mind_consnames); check (fun p -> p.mind_typename); (* nf_lc later *) (* nf_arity later *) (* user_lc ignored *) (* user_arity ignored *) check (fun p -> p.mind_nrealargs); (* kelim ignored *) (* listrec ignored *) (* finite done *) (* nparams done *) (* params_ctxt done because part of the inductive types *) (* Don't check the sort of the type if polymorphic *) check_inductive_type env (type_of_inductive env (mib1,p1)) (type_of_inductive env (mib2,p2)) in let check_cons_types i p1 p2 = array_iter2 (check_conv conv env) (arities_of_specif kn (mib1,p1)) (arities_of_specif kn (mib2,p2)) in let check f = if f mib1 <> f mib2 then error () in check (fun mib -> mib.mind_finite); check (fun mib -> mib.mind_ntypes); assert (mib1.mind_hyps=[] && mib2.mind_hyps=[]); assert (Array.length mib1.mind_packets >= 1 && Array.length mib2.mind_packets >= 1); (* Check that the expected numbers of uniform parameters are the same *) (* No need to check the contexts of parameters: it is checked *) (* at the time of checking the inductive arities in check_packet. *) (* Notice that we don't expect the local definitions to match: only *) (* the inductive types and constructors types have to be convertible *) check (fun mib -> mib.mind_nparams); (*begin match mib2.mind_equiv with | None -> () | Some kn2' -> let kn2 = scrape_mind env kn2' in let kn1 = match mib1.mind_equiv with None -> kn | Some kn1' -> scrape_mind env kn1' in if kn1 <> kn2 then error () end;*) (* we check that records and their field names are preserved. *) check (fun mib -> mib.mind_record); if mib1.mind_record then begin let rec names_prod_letin t = match t with | Prod(n,_,t) -> n::(names_prod_letin t) | LetIn(n,_,_,t) -> n::(names_prod_letin t) | Cast(t,_,_) -> names_prod_letin t | _ -> [] in assert (Array.length mib1.mind_packets = 1); assert (Array.length mib2.mind_packets = 1); assert (Array.length mib1.mind_packets.(0).mind_user_lc = 1); assert (Array.length mib2.mind_packets.(0).mind_user_lc = 1); check (fun mib -> names_prod_letin mib.mind_packets.(0).mind_user_lc.(0)); end; (* we first check simple things *) array_iter2 check_packet mib1.mind_packets mib2.mind_packets; (* and constructor types in the end *) let _ = array_map2_i check_cons_types mib1.mind_packets mib2.mind_packets in () let check_constant env mp1 l info1 cb2 spec2 subst1 subst2 = let error () = error_not_match l spec2 in let check_conv f = check_conv_error error f in let check_type env t1 t2 = (* If the type of a constant is generated, it may mention non-variable algebraic universes that the general conversion algorithm is not ready to handle. Anyway, generated types of constants are functions of the body of the constant. If the bodies are the same in environments that are subtypes one of the other, the types are subtypes too (i.e. if Gamma <= Gamma', Gamma |- A |> T, Gamma |- A' |> T' and Gamma |- A=A' then T <= T'). Hence they don't have to be checked again *) let t1,t2 = if isArity t2 then let (ctx2,s2) = destArity t2 in match s2 with | Type v when not (is_univ_variable v) -> (* The type in the interface is inferred and is made of algebraic universes *) begin try let (ctx1,s1) = dest_arity env t1 in match s1 with | Type u when not (is_univ_variable u) -> (* Both types are inferred, no need to recheck them. We cheat and collapse the types to Prop *) mkArity (ctx1,Prop Null), mkArity (ctx2,Prop Null) | Prop _ -> (* The type in the interface is inferred, it may be the case that the type in the implementation is smaller because the body is more reduced. We safely collapse the upper type to Prop *) mkArity (ctx1,Prop Null), mkArity (ctx2,Prop Null) | Type _ -> (* The type in the interface is inferred and the type in the implementation is not inferred or is inferred but from a more reduced body so that it is just a variable. Since constraints of the form "univ <= max(...)" are not expressible in the system of algebraic universes: we fail (the user has to use an explicit type in the interface *) error () with UserError _ (* "not an arity" *) -> error () end | _ -> t1,t2 else (t1,t2) in check_conv conv_leq env t1 t2 in match info1 with | Constant cb1 -> assert (cb1.const_hyps=[] && cb2.const_hyps=[]) ; let cb1 = subst_const_body subst1 cb1 in let cb2 = subst_const_body subst2 cb2 in (*Start by checking types*) let typ1 = Typeops.type_of_constant_type env cb1.const_type in let typ2 = Typeops.type_of_constant_type env cb2.const_type in check_type env typ1 typ2; (* Now we check the bodies: - A transparent constant can only be implemented by a compatible transparent constant. - In the signature, an opaque is handled just as a parameter: anything of the right type can implement it, even if bodies differ. *) (match cb2.const_body with | Undef _ | OpaqueDef _ -> () | Def lc2 -> (match cb1.const_body with | Undef _ | OpaqueDef _ -> error () | Def lc1 -> (* NB: cb1 might have been strengthened and appear as transparent. Anyway [check_conv] will handle that afterwards. *) let c1 = force_constr lc1 in let c2 = force_constr lc2 in check_conv conv env c1 c2)) | IndType ((kn,i),mind1) -> ignore (Util.error ( "The kernel does not recognize yet that a parameter can be " ^ "instantiated by an inductive type. Hint: you can rename the " ^ "inductive type and give a definition to map the old name to the new " ^ "name.")); assert (mind1.mind_hyps=[] && cb2.const_hyps=[]) ; if constant_has_body cb2 then error () ; let arity1 = type_of_inductive env (mind1,mind1.mind_packets.(i)) in let typ2 = Typeops.type_of_constant_type env cb2.const_type in check_conv conv_leq env arity1 typ2 | IndConstr (((kn,i),j) as cstr,mind1) -> ignore (Util.error ( "The kernel does not recognize yet that a parameter can be " ^ "instantiated by a constructor. Hint: you can rename the " ^ "constructor and give a definition to map the old name to the new " ^ "name.")); assert (mind1.mind_hyps=[] && cb2.const_hyps=[]) ; if constant_has_body cb2 then error () ; let ty1 = type_of_constructor cstr (mind1,mind1.mind_packets.(i)) in let ty2 = Typeops.type_of_constant_type env cb2.const_type in check_conv conv env ty1 ty2 let rec check_modules env msb1 msb2 subst1 subst2 = let mty1 = module_type_of_module None msb1 in let mty2 = module_type_of_module None msb2 in check_modtypes env mty1 mty2 subst1 subst2 false; and check_signatures env mp1 sig1 sig2 subst1 subst2 = let map1 = make_labmap mp1 sig1 in let check_one_body (l,spec2) = match spec2 with | SFBconst cb2 -> check_constant env mp1 l (get_obj mp1 map1 l) cb2 spec2 subst1 subst2 | SFBmind mib2 -> check_inductive env mp1 l (get_obj mp1 map1 l) mib2 spec2 subst1 subst2 | SFBmodule msb2 -> begin match get_mod mp1 map1 l with | Module msb -> check_modules env msb msb2 subst1 subst2 | _ -> error_not_match l spec2 end | SFBmodtype mtb2 -> let mtb1 = match get_mod mp1 map1 l with | Modtype mtb -> mtb | _ -> error_not_match l spec2 in let env = add_module (module_body_of_type mtb2.typ_mp mtb2) (add_module (module_body_of_type mtb1.typ_mp mtb1) env) in check_modtypes env mtb1 mtb2 subst1 subst2 true in List.iter check_one_body sig2 and check_modtypes env mtb1 mtb2 subst1 subst2 equiv = if mtb1==mtb2 then () else let mtb1',mtb2'=mtb1.typ_expr,mtb2.typ_expr in let rec check_structure env str1 str2 equiv subst1 subst2 = match str1,str2 with | SEBstruct (list1), SEBstruct (list2) -> check_signatures env mtb1.typ_mp list1 list2 subst1 subst2; if equiv then check_signatures env mtb2.typ_mp list2 list1 subst1 subst2 else () | SEBfunctor (arg_id1,arg_t1,body_t1), SEBfunctor (arg_id2,arg_t2,body_t2) -> check_modtypes env arg_t2 arg_t1 (map_mp arg_t1.typ_mp arg_t2.typ_mp) subst2 equiv ; (* contravariant *) let env = add_module (module_body_of_type (MPbound arg_id2) arg_t2) env in let env = match body_t1 with SEBstruct str -> let env = shallow_remove_module mtb1.typ_mp env in add_module {mod_mp = mtb1.typ_mp; mod_expr = None; mod_type = body_t1; mod_type_alg= None; mod_constraints=mtb1.typ_constraints; mod_retroknowledge = []; mod_delta = mtb1.typ_delta} env | _ -> env in check_structure env body_t1 body_t2 equiv (join (map_mbid arg_id1 (MPbound arg_id2)) subst1) subst2 | _ , _ -> error_incompatible_modtypes mtb1 mtb2 in if mtb1'== mtb2' then () else check_structure env mtb1' mtb2' equiv subst1 subst2 let check_subtypes env sup super = check_modtypes env (strengthen sup sup.typ_mp) super empty_subst (map_mp super.typ_mp sup.typ_mp) false coq-8.4pl2/checker/closure.mli0000640000175000001440000001271212010532755015415 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a (*s Delta implies all consts (both global (= by [kernel_name]) and local (= by [Rel] or [Var])), all evars, and letin's. Rem: reduction of a Rel/Var bound to a term is Delta, but reduction of a LetIn expression is Letin reduction *) type transparent_state = Idpred.t * Cpred.t val all_opaque : transparent_state val all_transparent : transparent_state val is_transparent_variable : transparent_state -> variable -> bool val is_transparent_constant : transparent_state -> constant -> bool (* Sets of reduction kinds. *) module type RedFlagsSig = sig type reds type red_kind (* The different kinds of reduction *) val fBETA : red_kind val fDELTA : red_kind val fIOTA : red_kind val fZETA : red_kind val fCONST : constant -> red_kind val fVAR : identifier -> red_kind (* No reduction at all *) val no_red : reds (* Adds a reduction kind to a set *) val red_add : reds -> red_kind -> reds (* Build a reduction set from scratch = iter [red_add] on [no_red] *) val mkflags : red_kind list -> reds (* Tests if a reduction kind is set *) val red_set : reds -> red_kind -> bool end module RedFlags : RedFlagsSig open RedFlags val betadeltaiota : reds val betaiotazeta : reds val betadeltaiotanolet : reds (***********************************************************************) type table_key = | ConstKey of constant | VarKey of identifier | RelKey of int type 'a infos val ref_value_cache: 'a infos -> table_key -> 'a option val create: ('a infos -> constr -> 'a) -> reds -> env -> 'a infos (************************************************************************) (*s Lazy reduction. *) (* [fconstr] is the type of frozen constr *) type fconstr (* [fconstr] can be accessed by using the function [fterm_of] and by matching on type [fterm] *) type fterm = | FRel of int | FAtom of constr (* Metas and Sorts *) | FCast of fconstr * cast_kind * fconstr | FFlex of table_key | FInd of inductive | FConstruct of constructor | FApp of fconstr * fconstr array | FFix of fixpoint * fconstr subs | FCoFix of cofixpoint * fconstr subs | FCases of case_info * fconstr * fconstr * fconstr array | FLambda of int * (name * constr) list * constr * fconstr subs | FProd of name * fconstr * fconstr | FLetIn of name * fconstr * fconstr * constr * fconstr subs | FEvar of existential_key * fconstr array | FLIFT of int * fconstr | FCLOS of constr * fconstr subs | FLOCKED (************************************************************************) (*s A [stack] is a context of arguments, arguments are pushed by [append_stack] one array at a time but popped with [decomp_stack] one by one *) type stack_member = | Zapp of fconstr array | Zcase of case_info * fconstr * fconstr array | Zfix of fconstr * stack | Zshift of int | Zupdate of fconstr and stack = stack_member list val append_stack : fconstr array -> stack -> stack val eta_expand_stack : stack -> stack (* To lazy reduce a constr, create a [clos_infos] with [create_clos_infos], inject the term to reduce with [inject]; then use a reduction function *) val inject : constr -> fconstr val fterm_of : fconstr -> fterm val term_of_fconstr : fconstr -> constr val destFLambda : (fconstr subs -> constr -> fconstr) -> fconstr -> name * fconstr * fconstr (* Global and local constant cache *) type clos_infos val create_clos_infos : reds -> env -> clos_infos (* Reduction function *) (* [whd_val] is for weak head normalization *) val whd_val : clos_infos -> fconstr -> constr (* [whd_stack] performs weak head normalization in a given stack. It stops whenever a reduction is blocked. *) val whd_stack : clos_infos -> fconstr -> stack -> fconstr * stack (* Conversion auxiliary functions to do step by step normalisation *) (* [unfold_reference] unfolds references in a [fconstr] *) val unfold_reference : clos_infos -> table_key -> fconstr option (* [mind_equiv] checks whether two inductive types are intentionally equal *) val mind_equiv_infos : clos_infos -> inductive -> inductive -> bool val eq_table_key : table_key -> table_key -> bool (************************************************************************) (*i This is for lazy debug *) val lift_fconstr : int -> fconstr -> fconstr val lift_fconstr_vect : int -> fconstr array -> fconstr array val mk_clos : fconstr subs -> constr -> fconstr val mk_clos_vect : fconstr subs -> constr array -> fconstr array val mk_clos_deep : (fconstr subs -> constr -> fconstr) -> fconstr subs -> constr -> fconstr val kni: clos_infos -> fconstr -> stack -> fconstr * stack val knr: clos_infos -> fconstr -> stack -> fconstr * stack val to_constr : (lift -> fconstr -> constr) -> lift -> fconstr -> constr val optimise_closure : fconstr subs -> constr -> fconstr subs * constr (* End of cbn debug section i*) coq-8.4pl2/checker/type_errors.mli0000640000175000001440000000656612010532755016330 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int -> 'a val error_unbound_var : env -> variable -> 'a val error_not_type : env -> unsafe_judgment -> 'a val error_assumption : env -> unsafe_judgment -> 'a val error_reference_variables : env -> constr -> 'a val error_elim_arity : env -> inductive -> sorts_family list -> constr -> unsafe_judgment -> (sorts_family * sorts_family * arity_error) option -> 'a val error_case_not_inductive : env -> unsafe_judgment -> 'a val error_number_branches : env -> unsafe_judgment -> int -> 'a val error_ill_formed_branch : env -> constr -> int -> constr -> constr -> 'a val error_actual_type : env -> unsafe_judgment -> constr -> 'a val error_cant_apply_not_functional : env -> unsafe_judgment -> unsafe_judgment array -> 'a val error_cant_apply_bad_type : env -> int * constr * constr -> unsafe_judgment -> unsafe_judgment array -> 'a val error_ill_formed_rec_body : env -> guard_error -> name array -> int -> 'a val error_ill_typed_rec_body : env -> int -> name array -> unsafe_judgment array -> constr array -> 'a coq-8.4pl2/checker/modops.ml0000640000175000001440000001350212010532755015067 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (arg_id,arg_t,body_t) | _ -> error_not_a_functor mtb let module_body_of_type mp mtb = { mod_mp = mp; mod_type = mtb.typ_expr; mod_type_alg = mtb.typ_expr_alg; mod_expr = None; mod_constraints = mtb.typ_constraints; mod_delta = mtb.typ_delta; mod_retroknowledge = []} let rec add_signature mp sign resolver env = let add_one env (l,elem) = let kn = make_kn mp empty_dirpath l in let con = constant_of_kn kn in let mind = mind_of_delta resolver (mind_of_kn kn) in match elem with | SFBconst cb -> (* let con = constant_of_delta resolver con in*) Environ.add_constant con cb env | SFBmind mib -> (* let mind = mind_of_delta resolver mind in*) Environ.add_mind mind mib env | SFBmodule mb -> add_module mb env (* adds components as well *) | SFBmodtype mtb -> Environ.add_modtype mtb.typ_mp mtb env in List.fold_left add_one env sign and add_module mb env = let mp = mb.mod_mp in let env = Environ.shallow_add_module mp mb env in match mb.mod_type with | SEBstruct (sign) -> add_signature mp sign mb.mod_delta env | SEBfunctor _ -> env | _ -> anomaly "Modops:the evaluation of the structure failed " let strengthen_const mp_from l cb resolver = match cb.const_body with | Def _ -> cb | _ -> let con = make_con mp_from empty_dirpath l in (* let con = constant_of_delta resolver con in*) { cb with const_body = Def (Declarations.from_val (Const con)) } let rec strengthen_mod mp_from mp_to mb = if Declarations.mp_in_delta mb.mod_mp mb.mod_delta then mb else match mb.mod_type with | SEBstruct (sign) -> let resolve_out,sign_out = strengthen_sig mp_from sign mp_to mb.mod_delta in { mb with mod_expr = Some (SEBident mp_to); mod_type = SEBstruct(sign_out); mod_type_alg = mb.mod_type_alg; mod_constraints = mb.mod_constraints; mod_delta = resolve_out(*add_mp_delta_resolver mp_from mp_to (add_delta_resolver mb.mod_delta resolve_out)*); mod_retroknowledge = mb.mod_retroknowledge} | SEBfunctor _ -> mb | _ -> anomaly "Modops:the evaluation of the structure failed " and strengthen_sig mp_from sign mp_to resolver = match sign with | [] -> empty_delta_resolver,[] | (l,SFBconst cb) :: rest -> let item' = l,SFBconst (strengthen_const mp_from l cb resolver) in let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item'::rest' | (_,SFBmind _ as item):: rest -> let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item::rest' | (l,SFBmodule mb) :: rest -> let mp_from' = MPdot (mp_from,l) in let mp_to' = MPdot(mp_to,l) in let mb_out = strengthen_mod mp_from' mp_to' mb in let item' = l,SFBmodule (mb_out) in let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out (*add_delta_resolver resolve_out mb.mod_delta*), item':: rest' | (l,SFBmodtype mty as item) :: rest -> let resolve_out,rest' = strengthen_sig mp_from rest mp_to resolver in resolve_out,item::rest' let strengthen mtb mp = match mtb.typ_expr with | SEBstruct (sign) -> let resolve_out,sign_out = strengthen_sig mtb.typ_mp sign mp mtb.typ_delta in {mtb with typ_expr = SEBstruct(sign_out); typ_delta = resolve_out(*add_delta_resolver mtb.typ_delta (add_mp_delta_resolver mtb.typ_mp mp resolve_out)*)} | SEBfunctor _ -> mtb | _ -> anomaly "Modops:the evaluation of the structure failed " let subst_and_strengthen mb mp = strengthen_mod mb.mod_mp mp (subst_module (map_mp mb.mod_mp mp) mb) let module_type_of_module mp mb = match mp with Some mp -> strengthen { typ_mp = mp; typ_expr = mb.mod_type; typ_expr_alg = None; typ_constraints = mb.mod_constraints; typ_delta = mb.mod_delta} mp | None -> {typ_mp = mb.mod_mp; typ_expr = mb.mod_type; typ_expr_alg = None; typ_constraints = mb.mod_constraints; typ_delta = mb.mod_delta} coq-8.4pl2/checker/inductive.mli0000640000175000001440000000566312010532755015742 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> inductive * constr list type mind_specif = mutual_inductive_body * one_inductive_body (*s Fetching information in the environment about an inductive type. Raises [Not_found] if the inductive type is not found. *) val lookup_mind_specif : env -> inductive -> mind_specif val type_of_inductive : env -> mind_specif -> constr (* Return type as quoted by the user *) val type_of_constructor : constructor -> mind_specif -> constr val arities_of_specif : mutual_inductive -> mind_specif -> constr array (* [type_case_branches env (I,args) (p:A) c] computes useful types about the following Cases expression:

    Cases (c :: (I args)) of b1..bn end It computes the type of every branch (pattern variables are introduced by products) and the type for the whole expression. *) val type_case_branches : env -> inductive * constr list -> constr * constr -> constr -> constr array * constr (* Check a [case_info] actually correspond to a Case expression on the given inductive type. *) val check_case_info : env -> inductive -> case_info -> unit (*s Guard conditions for fix and cofix-points. *) val check_fix : env -> fixpoint -> unit val check_cofix : env -> cofixpoint -> unit (*s Support for sort-polymorphic inductive types *) val type_of_inductive_knowing_parameters : env -> one_inductive_body -> constr array -> constr val max_inductive_sort : sorts array -> Univ.universe val instantiate_universes : env -> rel_context -> polymorphic_arity -> constr array -> rel_context * sorts (***************************************************************) (* Debug *) type size = Large | Strict type subterm_spec = Subterm of (size * wf_paths) | Dead_code | Not_subterm type guard_env = { env : env; (* dB of last fixpoint *) rel_min : int; (* inductive of recarg of each fixpoint *) inds : inductive array; (* the recarg information of inductive family *) recvec : wf_paths array; (* dB of variables denoting subterms *) genv : subterm_spec Lazy.t list; } type stack_element = |SClosure of guard_env*constr |SArg of subterm_spec Lazy.t val subterm_specif : guard_env -> stack_element list -> constr -> subterm_spec val branches_specif : guard_env -> subterm_spec Lazy.t -> case_info -> subterm_spec Lazy.t list array coq-8.4pl2/checker/checker.ml0000640000175000001440000003006612106745424015203 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* =len then dirs else let pos = try String.index_from s n '.' with Not_found -> len in let dir = String.sub s n (pos-n) in decoupe_dirs (dir::dirs) (pos+1) in decoupe_dirs [] 0 let dirpath_of_string s = match parse_dir s with [] -> Check.default_root_prefix | dir -> make_dirpath (List.map id_of_string dir) let path_of_string s = match parse_dir s with [] -> invalid_arg "path_of_string" | l::dir -> {dirpath=dir; basename=l} let (/) = Filename.concat let get_version_date () = try let coqlib = Envars.coqlib () in let ch = open_in (Filename.concat coqlib "revision") in let ver = input_line ch in let rev = input_line ch in (ver,rev) with _ -> (Coq_config.version,Coq_config.date) let print_header () = let (ver,rev) = (get_version_date ()) in Printf.printf "Welcome to Chicken %s (%s)\n" ver rev; flush stdout (* Adding files to Coq loadpath *) let add_path ~unix_path:dir ~coq_root:coq_dirpath = if exists_dir dir then begin Check.add_load_path (dir,coq_dirpath) end else msg_warning (str ("Cannot open " ^ dir)) let convert_string d = try id_of_string d with _ -> if_verbose warning ("Directory "^d^" cannot be used as a Coq identifier (skipped)"); flush_all (); failwith "caught" let add_rec_path ~unix_path ~coq_root = if exists_dir unix_path then let dirs = all_subdirs ~unix_path in let prefix = repr_dirpath coq_root in let convert_dirs (lp,cp) = (lp,make_dirpath (List.map convert_string (List.rev cp)@prefix)) in let dirs = map_succeed convert_dirs dirs in List.iter Check.add_load_path dirs; Check.add_load_path (unix_path, coq_root) else msg_warning (str ("Cannot open " ^ unix_path)) (* By the option -include -I or -R of the command line *) let includes = ref [] let push_include (s, alias) = includes := (s,alias,false) :: !includes let push_rec_include (s, alias) = includes := (s,alias,true) :: !includes let set_default_include d = push_include (d, Check.default_root_prefix) let set_include d p = let p = dirpath_of_string p in push_include (d,p) let set_rec_include d p = let p = dirpath_of_string p in push_rec_include(d,p) (* Initializes the LoadPath *) let init_load_path () = let coqlib = Envars.coqlib () in let user_contrib = coqlib/"user-contrib" in let xdg_dirs = Envars.xdg_dirs in let coqpath = Envars.coqpath in let plugins = coqlib/"plugins" in (* NOTE: These directories are searched from last to first *) (* first standard library *) add_rec_path ~unix_path:(coqlib/"theories") ~coq_root:(Names.make_dirpath[coq_root]); (* then plugins *) add_rec_path ~unix_path:plugins ~coq_root:(Names.make_dirpath [coq_root]); (* then user-contrib *) if Sys.file_exists user_contrib then add_rec_path ~unix_path:user_contrib ~coq_root:Check.default_root_prefix; (* then directories in XDG_DATA_DIRS and XDG_DATA_HOME *) List.iter (fun s -> add_rec_path ~unix_path:s ~coq_root:Check.default_root_prefix) xdg_dirs; (* then directories in COQPATH *) List.iter (fun s -> add_rec_path ~unix_path:s ~coq_root:Check.default_root_prefix) coqpath; (* then current directory *) add_path ~unix_path:"." ~coq_root:Check.default_root_prefix; (* additional loadpath, given with -I -include -R options *) List.iter (fun (unix_path, coq_root, reci) -> if reci then add_rec_path ~unix_path ~coq_root else add_path ~unix_path ~coq_root) (List.rev !includes); includes := [] let set_debug () = Flags.debug := true let engagement = ref None let set_engagement c = engagement := Some c let engage () = match !engagement with Some c -> Safe_typing.set_engagement c | None -> () let admit_list = ref ([] : section_path list) let add_admit s = admit_list := path_of_string s :: !admit_list let norec_list = ref ([] : section_path list) let add_norec s = norec_list := path_of_string s :: !norec_list let compile_list = ref ([] : section_path list) let add_compile s = compile_list := path_of_string s :: !compile_list (*s Parsing of the command line. We no longer use [Arg.parse], in order to use share [Usage.print_usage] between coqtop and coqc. *) let compile_files () = Check.recheck_library ~norec:(List.rev !norec_list) ~admit:(List.rev !admit_list) ~check:(List.rev !compile_list) let version () = Printf.printf "The Coq Proof Checker, version %s (%s)\n" Coq_config.version Coq_config.date; Printf.printf "compiled on %s\n" Coq_config.compile_date; exit 0 (* print the usage of coqtop (or coqc) on channel co *) let print_usage_channel co command = output_string co command; output_string co "coqchk options are:\n"; output_string co " -I dir -as coqdir map physical dir to logical coqdir\ \n -I dir map directory dir to the empty logical path\ \n -include dir (idem)\ \n -R dir -as coqdir recursively map physical dir to logical coqdir\ \n -R dir coqdir (idem)\ \n\ \n -admit module load module and dependencies without checking\ \n -norec module check module but admit dependencies without checking\ \n\ \n -where print coqchk's standard library location and exit\ \n -v print coqchk version and exit\ \n -boot boot mode\ \n -o, --output-context print the list of assumptions\ \n -m, --memory print the maximum heap size\ \n -silent disable trace of constants being checked\ \n\ \n -impredicative-set set sort Set impredicative\ \n\ \n -h, --help print this list of options\ \n" (* print the usage on standard error *) let print_usage = print_usage_channel stderr let print_usage_coqtop () = print_usage "Usage: coqchk modules\n\n" let usage () = print_usage_coqtop (); flush stderr; exit 1 open Type_errors let anomaly_string () = str "Anomaly: " let report () = (str "." ++ spc () ++ str "Please report.") let print_loc loc = if loc = dummy_loc then (str"") else let loc = unloc loc in (int (fst loc) ++ str"-" ++ int (snd loc)) let guill s = "\""^s^"\"" let where s = if !Flags.debug then (str"in " ++ str s ++ str":" ++ spc ()) else (mt ()) let rec explain_exn = function | Stream.Failure -> hov 0 (anomaly_string () ++ str "uncaught Stream.Failure.") | Stream.Error txt -> hov 0 (str "Syntax error: " ++ str txt) | Token.Error txt -> hov 0 (str "Syntax error: " ++ str txt) | Sys_error msg -> hov 0 (anomaly_string () ++ str "uncaught exception Sys_error " ++ str (guill msg) ++ report() ) | UserError(s,pps) -> hov 1 (str "User error: " ++ where s ++ pps) | Out_of_memory -> hov 0 (str "Out of memory") | Stack_overflow -> hov 0 (str "Stack overflow") | Anomaly (s,pps) -> hov 1 (anomaly_string () ++ where s ++ pps ++ report ()) | Match_failure(filename,pos1,pos2) -> hov 1 (anomaly_string () ++ str "Match failure in file " ++ str (guill filename) ++ str " at line " ++ int pos1 ++ str " character " ++ int pos2 ++ report ()) | Not_found -> hov 0 (anomaly_string () ++ str "uncaught exception Not_found" ++ report ()) | Failure s -> hov 0 (str "Failure: " ++ str s ++ report ()) | Invalid_argument s -> hov 0 (anomaly_string () ++ str "uncaught exception Invalid_argument " ++ str (guill s) ++ report ()) | Sys.Break -> hov 0 (fnl () ++ str "User interrupt.") | Univ.UniverseInconsistency (o,u,v) -> let msg = if !Flags.debug (*!Constrextern.print_universes*) then spc() ++ str "(cannot enforce" ++ spc() ++ (*Univ.pr_uni u ++*) spc() ++ str (match o with Univ.Lt -> "<" | Univ.Le -> "<=" | Univ.Eq -> "=") ++ spc() ++ (*Univ.pr_uni v ++*) str")" else mt() in hov 0 (str "Error: Universe inconsistency" ++ msg ++ str ".") | TypeError(ctx,te) -> (* hov 0 (str "Error:" ++ spc () ++ Himsg.explain_type_error ctx *) (* te)*) hov 0 (str "Type error") | Indtypes.InductiveError e -> hov 0 (str "Error related to inductive types") (* let ctx = Check.get_env() in hov 0 (str "Error:" ++ spc () ++ Himsg.explain_inductive_error ctx e)*) | Loc.Exc_located (loc,exc) -> hov 0 ((if loc = dummy_loc then (mt ()) else (str"At location " ++ print_loc loc ++ str":" ++ fnl ())) ++ explain_exn exc) | Assert_failure (s,b,e) -> hov 0 (anomaly_string () ++ str "assert failure" ++ spc () ++ (if s = "" then mt () else (str ("(file \"" ^ s ^ "\", line ") ++ int b ++ str ", characters " ++ int e ++ str "-" ++ int (e+6) ++ str ")")) ++ report ()) | reraise -> hov 0 (anomaly_string () ++ str "Uncaught exception " ++ str (Printexc.to_string reraise)++report()) let parse_args argv = let rec parse = function | [] -> () | "-impredicative-set" :: rem -> set_engagement Declarations.ImpredicativeSet; parse rem | "-coqlib" :: s :: rem -> if not (exists_dir s) then (msgnl (str ("Directory '"^s^"' does not exist")); exit 1); Flags.coqlib := s; Flags.coqlib_spec := true; parse rem | ("-I"|"-include") :: d :: "-as" :: p :: rem -> set_include d p; parse rem | ("-I"|"-include") :: d :: "-as" :: [] -> usage () | ("-I"|"-include") :: d :: rem -> set_default_include d; parse rem | ("-I"|"-include") :: [] -> usage () | "-R" :: d :: "-as" :: p :: rem -> set_rec_include d p;parse rem | "-R" :: d :: "-as" :: [] -> usage () | "-R" :: d :: p :: rem -> set_rec_include d p;parse rem | "-R" :: ([] | [_]) -> usage () | "-debug" :: rem -> set_debug (); parse rem | "-where" :: _ -> print_endline (Envars.coqlib ()); exit 0 | ("-?"|"-h"|"-H"|"-help"|"--help") :: _ -> usage () | ("-v"|"--version") :: _ -> version () | "-boot" :: rem -> boot := true; parse rem | ("-m" | "--memory") :: rem -> Check_stat.memory_stat := true; parse rem | ("-o" | "--output-context") :: rem -> Check_stat.output_context := true; parse rem | "-no-hash-consing" :: rem -> Flags.hash_cons_proofs := false; parse rem | "-admit" :: s :: rem -> add_admit s; parse rem | "-admit" :: [] -> usage () | "-norec" :: s :: rem -> add_norec s; parse rem | "-norec" :: [] -> usage () | "-silent" :: rem -> Flags.make_silent true; parse rem | s :: _ when s<>"" && s.[0]='-' -> msgnl (str "Unknown option " ++ str s); exit 1 | s :: rem -> add_compile s; parse rem in try parse (List.tl (Array.to_list argv)) with | UserError(_,s) as e -> begin try Stream.empty s; exit 1 with Stream.Failure -> msgnl (explain_exn e); exit 1 end | e -> begin msgnl (explain_exn e); exit 1 end (* To prevent from doing the initialization twice *) let initialized = ref false let init_with_argv argv = if not !initialized then begin initialized := true; Sys.catch_break false; (* Ctrl-C is fatal during the initialisation *) try parse_args argv; if_verbose print_header (); init_load_path (); engage (); with e -> flush_all(); message "Error during initialization :"; msgnl (explain_exn e); exit 1 end let init() = init_with_argv Sys.argv let run () = try compile_files (); flush_all() with e -> (flush_all(); Pp.ppnl(explain_exn e); flush_all(); exit 1) let start () = init(); run(); Check_stat.stats(); exit 0 coq-8.4pl2/checker/reduction.mli0000640000175000001440000000345112010532755015735 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr val whd_betadeltaiota : env -> constr -> constr val whd_betadeltaiota_nolet : env -> constr -> constr (************************************************************************) (*s conversion functions *) exception NotConvertible exception NotConvertibleVect of int type 'a conversion_function = env -> 'a -> 'a -> unit type conv_pb = CONV | CUMUL val conv : constr conversion_function val conv_leq : constr conversion_function val vm_conv : conv_pb -> constr conversion_function (************************************************************************) (* Builds an application node, reducing beta redexes it may produce. *) val beta_appvect : constr -> constr array -> constr (* Pseudo-reduction rule Prod(x,A,B) a --> B[x\a] *) val hnf_prod_applist : env -> constr -> constr list -> constr (************************************************************************) (*s Recognizing products and arities modulo reduction *) val dest_prod : env -> constr -> rel_context * constr val dest_prod_assum : env -> constr -> rel_context * constr val dest_arity : env -> constr -> arity coq-8.4pl2/checker/Makefile0000640000175000001440000000401311010352461014662 0ustar notinusersOCAMLC=ocamlc OCAMLOPT=ocamlopt COQSRC=.. MLDIRS=-I $(COQSRC)/config -I $(COQSRC)/lib -I $(COQSRC)/kernel -I +camlp4 BYTEFLAGS=$(MLDIRS) OPTFLAGS=$(MLDIRS) CHECKERNAME=coqchk BINARIES=../bin/$(CHECKERNAME)$(EXE) ../bin/$(CHECKERNAME).opt$(EXE) MCHECKERLOCAL :=\ declarations.cmo environ.cmo \ closure.cmo reduction.cmo \ type_errors.cmo \ modops.cmo \ inductive.cmo typeops.cmo \ indtypes.cmo subtyping.cmo mod_checking.cmo \ validate.cmo \ safe_typing.cmo check.cmo \ check_stat.cmo checker.cmo MCHECKER:=\ $(COQSRC)/config/coq_config.cmo \ $(COQSRC)/lib/pp_control.cmo $(COQSRC)/lib/pp.cmo $(COQSRC)/lib/compat.cmo \ $(COQSRC)/lib/util.cmo $(COQSRC)/lib/option.cmo $(COQSRC)/lib/hashcons.cmo \ $(COQSRC)/lib/system.cmo $(COQSRC)/lib/flags.cmo \ $(COQSRC)/lib/predicate.cmo $(COQSRC)/lib/rtree.cmo \ $(COQSRC)/kernel/names.cmo $(COQSRC)/kernel/univ.cmo \ $(COQSRC)/kernel/esubst.cmo term.cmo \ $(MCHECKERLOCAL) all: $(BINARIES) byte : ../bin/$(CHECKERNAME)$(EXE) opt : ../bin/$(CHECKERNAME).opt$(EXE) check.cma: $(MCHECKERLOCAL) ocamlc $(BYTEFLAGS) -a -o $@ $(MCHECKER) check.cmxa: $(MCHECKERLOCAL:.cmo=.cmx) ocamlopt $(OPTFLAGS) -a -o $@ $(MCHECKER:.cmo=.cmx) ../bin/$(CHECKERNAME)$(EXE): check.cma ocamlc $(BYTEFLAGS) -o $@ unix.cma gramlib.cma check.cma main.ml ../bin/$(CHECKERNAME).opt$(EXE): check.cmxa ocamlopt $(OPTFLAGS) -o $@ unix.cmxa gramlib.cmxa check.cmxa main.ml stats: @echo STRUCTURE @wc names.ml term.ml declarations.ml environ.ml type_errors.ml @echo @echo REDUCTION @-wc esubst.ml closure.ml reduction.ml @echo @echo TYPAGE @wc univ.ml inductive.ml indtypes.ml typeops.ml safe_typing.ml @echo @echo MODULES @wc modops.ml subtyping.ml @echo @echo INTERFACE @wc check*.ml main.ml @echo @echo TOTAL @wc *.ml | tail -1 .SUFFIXES:.ml .mli .cmi .cmo .cmx .ml.cmo: $(OCAMLC) -c $(BYTEFLAGS) $< .ml.cmx: $(OCAMLOPT) -c $(OPTFLAGS) $< .mli.cmi: $(OCAMLC) -c $(BYTEFLAGS) $< depend:: ocamldep *.ml* > .depend clean:: rm -f *.cm* *.o *.a *~ $(BINARIES) -include .depend coq-8.4pl2/checker/.depend0000640000175000001440000000575311010352461014476 0ustar notinuserschecker.cmo: type_errors.cmi term.cmo safe_typing.cmi indtypes.cmi \ declarations.cmi check_stat.cmi check.cmo checker.cmx: type_errors.cmx term.cmx safe_typing.cmx indtypes.cmx \ declarations.cmx check_stat.cmx check.cmx check.cmo: safe_typing.cmi check.cmx: safe_typing.cmx check_stat.cmo: term.cmo safe_typing.cmi indtypes.cmi environ.cmo \ declarations.cmi check_stat.cmi check_stat.cmx: term.cmx safe_typing.cmx indtypes.cmx environ.cmx \ declarations.cmx check_stat.cmi closure.cmo: term.cmo environ.cmo closure.cmi closure.cmx: term.cmx environ.cmx closure.cmi closure.cmi: term.cmo environ.cmo declarations.cmo: term.cmo declarations.cmi declarations.cmx: term.cmx declarations.cmi declarations.cmi: term.cmo environ.cmo: term.cmo declarations.cmi environ.cmx: term.cmx declarations.cmx indtypes.cmo: typeops.cmi term.cmo reduction.cmi inductive.cmi environ.cmo \ declarations.cmi indtypes.cmi indtypes.cmx: typeops.cmx term.cmx reduction.cmx inductive.cmx environ.cmx \ declarations.cmx indtypes.cmi indtypes.cmi: typeops.cmi term.cmo environ.cmo declarations.cmi inductive.cmo: type_errors.cmi term.cmo reduction.cmi environ.cmo \ declarations.cmi inductive.cmi inductive.cmx: type_errors.cmx term.cmx reduction.cmx environ.cmx \ declarations.cmx inductive.cmi inductive.cmi: term.cmo environ.cmo declarations.cmi main.cmo: checker.cmo main.cmx: checker.cmx mod_checking.cmo: typeops.cmi term.cmo subtyping.cmi reduction.cmi modops.cmi \ inductive.cmi indtypes.cmi environ.cmo declarations.cmi mod_checking.cmx: typeops.cmx term.cmx subtyping.cmx reduction.cmx modops.cmx \ inductive.cmx indtypes.cmx environ.cmx declarations.cmx modops.cmo: term.cmo environ.cmo declarations.cmi modops.cmi modops.cmx: term.cmx environ.cmx declarations.cmx modops.cmi modops.cmi: term.cmo environ.cmo declarations.cmi reduction.cmo: term.cmo environ.cmo closure.cmi reduction.cmi reduction.cmx: term.cmx environ.cmx closure.cmx reduction.cmi reduction.cmi: term.cmo environ.cmo safe_typing.cmo: validate.cmo modops.cmi mod_checking.cmo environ.cmo \ declarations.cmi safe_typing.cmi safe_typing.cmx: validate.cmx modops.cmx mod_checking.cmx environ.cmx \ declarations.cmx safe_typing.cmi safe_typing.cmi: term.cmo environ.cmo declarations.cmi subtyping.cmo: typeops.cmi term.cmo reduction.cmi modops.cmi inductive.cmi \ environ.cmo declarations.cmi subtyping.cmi subtyping.cmx: typeops.cmx term.cmx reduction.cmx modops.cmx inductive.cmx \ environ.cmx declarations.cmx subtyping.cmi subtyping.cmi: term.cmo environ.cmo declarations.cmi type_errors.cmo: term.cmo environ.cmo type_errors.cmi type_errors.cmx: term.cmx environ.cmx type_errors.cmi type_errors.cmi: term.cmo environ.cmo typeops.cmo: type_errors.cmi term.cmo reduction.cmi inductive.cmi environ.cmo \ declarations.cmi typeops.cmi typeops.cmx: type_errors.cmx term.cmx reduction.cmx inductive.cmx environ.cmx \ declarations.cmx typeops.cmi typeops.cmi: term.cmo environ.cmo declarations.cmi coq-8.4pl2/checker/term.ml0000640000175000001440000003757312010532755014553 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* InProp | Prop Pos -> InSet | Type _ -> InType let val_sort = val_sum "sort" 0 [|[|val_enum "cnt" 2|];[|val_univ|]|] let val_sortfam = val_enum "sorts_family" 3 (********************************************************************) (* Constructions as implemented *) (********************************************************************) (* [constr array] is an instance matching definitional [named_context] in the same order (i.e. last argument first) *) type 'constr pexistential = existential_key * 'constr array type 'constr prec_declaration = name array * 'constr array * 'constr array type 'constr pfixpoint = (int array * int) * 'constr prec_declaration type 'constr pcofixpoint = int * 'constr prec_declaration let val_evar f = val_tuple ~name:"pexistential" [|val_int;val_array f|] let val_prec f = val_tuple ~name:"prec_declaration" [|val_array val_name; val_array f; val_array f|] let val_fix f = val_tuple ~name:"pfixpoint" [|val_tuple~name:"fix2"[|val_array val_int;val_int|];val_prec f|] let val_cofix f = val_tuple ~name:"pcofixpoint"[|val_int;val_prec f|] type cast_kind = VMcast | DEFAULTcast let val_cast = val_enum "cast_kind" 2 (*s*******************************************************************) (* The type of constructions *) type constr = | Rel of int | Var of identifier | Meta of metavariable | Evar of constr pexistential | Sort of sorts | Cast of constr * cast_kind * constr | Prod of name * constr * constr | Lambda of name * constr * constr | LetIn of name * constr * constr * constr | App of constr * constr array | Const of constant | Ind of inductive | Construct of constructor | Case of case_info * constr * constr * constr array | Fix of constr pfixpoint | CoFix of constr pcofixpoint let val_constr = val_rec_sum "constr" 0 (fun val_constr -> [| [|val_int|]; (* Rel *) [|val_id|]; (* Var *) [|val_int|]; (* Meta *) [|val_evar val_constr|]; (* Evar *) [|val_sort|]; (* Sort *) [|val_constr;val_cast;val_constr|]; (* Cast *) [|val_name;val_constr;val_constr|]; (* Prod *) [|val_name;val_constr;val_constr|]; (* Lambda *) [|val_name;val_constr;val_constr;val_constr|]; (* LetIn *) [|val_constr;val_array val_constr|]; (* App *) [|val_con|]; (* Const *) [|val_ind|]; (* Ind *) [|val_cstr|]; (* Construct *) [|val_ci;val_constr;val_constr;val_array val_constr|]; (* Case *) [|val_fix val_constr|]; (* Fix *) [|val_cofix val_constr|] (* CoFix *) |]) type existential = constr pexistential type rec_declaration = constr prec_declaration type fixpoint = constr pfixpoint type cofixpoint = constr pcofixpoint let rec strip_outer_cast c = match c with | Cast (c,_,_) -> strip_outer_cast c | _ -> c let rec collapse_appl c = match c with | App (f,cl) -> let rec collapse_rec f cl2 = match (strip_outer_cast f) with | App (g,cl1) -> collapse_rec g (Array.append cl1 cl2) | _ -> App (f,cl2) in collapse_rec f cl | _ -> c let decompose_app c = match collapse_appl c with | App (f,cl) -> (f, Array.to_list cl) | _ -> (c,[]) let applist (f,l) = App (f, Array.of_list l) (****************************************************************************) (* Functions for dealing with constr terms *) (****************************************************************************) (*********************) (* Occurring *) (*********************) let iter_constr_with_binders g f n c = match c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> () | Cast (c,_,t) -> f n c; f n t | Prod (_,t,c) -> f n t; f (g n) c | Lambda (_,t,c) -> f n t; f (g n) c | LetIn (_,b,t,c) -> f n b; f n t; f (g n) c | App (c,l) -> f n c; Array.iter (f n) l | Evar (_,l) -> Array.iter (f n) l | Case (_,p,c,bl) -> f n p; f n c; Array.iter (f n) bl | Fix (_,(_,tl,bl)) -> Array.iter (f n) tl; Array.iter (f (iterate g (Array.length tl) n)) bl | CoFix (_,(_,tl,bl)) -> Array.iter (f n) tl; Array.iter (f (iterate g (Array.length tl) n)) bl exception LocalOccur (* (closedn n M) raises FreeVar if a variable of height greater than n occurs in M, returns () otherwise *) let closedn n c = let rec closed_rec n c = match c with | Rel m -> if m>n then raise LocalOccur | _ -> iter_constr_with_binders succ closed_rec n c in try closed_rec n c; true with LocalOccur -> false (* [closed0 M] is true iff [M] is a (deBruijn) closed term *) let closed0 = closedn 0 (* (noccurn n M) returns true iff (Rel n) does NOT occur in term M *) let noccurn n term = let rec occur_rec n c = match c with | Rel m -> if m = n then raise LocalOccur | _ -> iter_constr_with_binders succ occur_rec n c in try occur_rec n term; true with LocalOccur -> false (* (noccur_between n m M) returns true iff (Rel p) does NOT occur in term M for n <= p < n+m *) let noccur_between n m term = let rec occur_rec n c = match c with | Rel(p) -> if n<=p && p iter_constr_with_binders succ occur_rec n c in try occur_rec n term; true with LocalOccur -> false (* Checking function for terms containing existential variables. The function [noccur_with_meta] considers the fact that each existential variable (as well as each isevar) in the term appears applied to its local context, which may contain the CoFix variables. These occurrences of CoFix variables are not considered *) let noccur_with_meta n m term = let rec occur_rec n c = match c with | Rel p -> if n<=p & p (match f with | (Cast (Meta _,_,_)| Meta _) -> () | _ -> iter_constr_with_binders succ occur_rec n c) | Evar (_, _) -> () | _ -> iter_constr_with_binders succ occur_rec n c in try (occur_rec n term; true) with LocalOccur -> false (*********************) (* Lifting *) (*********************) let map_constr_with_binders g f l c = match c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> c | Cast (c,k,t) -> Cast (f l c, k, f l t) | Prod (na,t,c) -> Prod (na, f l t, f (g l) c) | Lambda (na,t,c) -> Lambda (na, f l t, f (g l) c) | LetIn (na,b,t,c) -> LetIn (na, f l b, f l t, f (g l) c) | App (c,al) -> App (f l c, Array.map (f l) al) | Evar (e,al) -> Evar (e, Array.map (f l) al) | Case (ci,p,c,bl) -> Case (ci, f l p, f l c, Array.map (f l) bl) | Fix (ln,(lna,tl,bl)) -> let l' = iterate g (Array.length tl) l in Fix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) | CoFix(ln,(lna,tl,bl)) -> let l' = iterate g (Array.length tl) l in CoFix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) (* The generic lifting function *) let rec exliftn el c = match c with | Rel i -> Rel(reloc_rel i el) | _ -> map_constr_with_binders el_lift exliftn el c (* Lifting the binding depth across k bindings *) let liftn k n = match el_liftn (pred n) (el_shft k el_id) with | ELID -> (fun c -> c) | el -> exliftn el let lift k = liftn k 1 (*********************) (* Substituting *) (*********************) (* (subst1 M c) substitutes M for Rel(1) in c we generalise it to (substl [M1,...,Mn] c) which substitutes in parallel M1,...,Mn for respectively Rel(1),...,Rel(n) in c *) (* 1st : general case *) type info = Closed | Open | Unknown type 'a substituend = { mutable sinfo: info; sit: 'a } let rec lift_substituend depth s = match s.sinfo with | Closed -> s.sit | Open -> lift depth s.sit | Unknown -> s.sinfo <- if closed0 s.sit then Closed else Open; lift_substituend depth s let make_substituend c = { sinfo=Unknown; sit=c } let substn_many lamv n c = let lv = Array.length lamv in if lv = 0 then c else let rec substrec depth c = match c with | Rel k -> if k<=depth then c else if k-depth <= lv then lift_substituend depth lamv.(k-depth-1) else Rel (k-lv) | _ -> map_constr_with_binders succ substrec depth c in substrec n c let substnl laml n = substn_many (Array.map make_substituend (Array.of_list laml)) n let substl laml = substnl laml 0 let subst1 lam = substl [lam] (***************************************************************************) (* Type of assumptions and contexts *) (***************************************************************************) let val_ndecl = val_tuple ~name:"named_declaration"[|val_id;val_opt val_constr;val_constr|] let val_rdecl = val_tuple ~name:"rel_declaration"[|val_name;val_opt val_constr;val_constr|] let val_nctxt = val_list val_ndecl let val_rctxt = val_list val_rdecl type named_declaration = identifier * constr option * constr type rel_declaration = name * constr option * constr type named_context = named_declaration list let empty_named_context = [] let fold_named_context f l ~init = List.fold_right f l init type section_context = named_context type rel_context = rel_declaration list let empty_rel_context = [] let rel_context_length = List.length let rel_context_nhyps hyps = let rec nhyps acc = function | [] -> acc | (_,None,_)::hyps -> nhyps (1+acc) hyps | (_,Some _,_)::hyps -> nhyps acc hyps in nhyps 0 hyps let fold_rel_context f l ~init = List.fold_right f l init let map_context f l = let map_decl (n, body_o, typ as decl) = let body_o' = Option.smartmap f body_o in let typ' = f typ in if body_o' == body_o && typ' == typ then decl else (n, body_o', typ') in list_smartmap map_decl l let map_rel_context = map_context let extended_rel_list n hyps = let rec reln l p = function | (_,None,_) :: hyps -> reln (Rel (n+p) :: l) (p+1) hyps | (_,Some _,_) :: hyps -> reln l (p+1) hyps | [] -> l in reln [] 1 hyps (* Iterate lambda abstractions *) (* compose_lam [xn:Tn;..;x1:T1] b = [x1:T1]..[xn:Tn]b *) let compose_lam l b = let rec lamrec = function | ([], b) -> b | ((v,t)::l, b) -> lamrec (l, Lambda (v,t,b)) in lamrec (l,b) (* Transforms a lambda term [x1:T1]..[xn:Tn]T into the pair ([(xn,Tn);...;(x1,T1)],T), where T is not a lambda *) let decompose_lam = let rec lamdec_rec l c = match c with | Lambda (x,t,c) -> lamdec_rec ((x,t)::l) c | Cast (c,_,_) -> lamdec_rec l c | _ -> l,c in lamdec_rec [] (* Decompose lambda abstractions and lets, until finding n abstractions *) let decompose_lam_n_assum n = if n < 0 then error "decompose_lam_n_assum: integer parameter must be positive"; let rec lamdec_rec l n c = if n=0 then l,c else match c with | Lambda (x,t,c) -> lamdec_rec ((x,None,t) :: l) (n-1) c | LetIn (x,b,t,c) -> lamdec_rec ((x,Some b,t) :: l) n c | Cast (c,_,_) -> lamdec_rec l n c | c -> error "decompose_lam_n_assum: not enough abstractions" in lamdec_rec empty_rel_context n (* Iterate products, with or without lets *) (* Constructs either [(x:t)c] or [[x=b:t]c] *) let mkProd_or_LetIn (na,body,t) c = match body with | None -> Prod (na, t, c) | Some b -> LetIn (na, b, t, c) let it_mkProd_or_LetIn = List.fold_left (fun c d -> mkProd_or_LetIn d c) let decompose_prod_assum = let rec prodec_rec l c = match c with | Prod (x,t,c) -> prodec_rec ((x,None,t) :: l) c | LetIn (x,b,t,c) -> prodec_rec ((x,Some b,t) :: l) c | Cast (c,_,_) -> prodec_rec l c | _ -> l,c in prodec_rec empty_rel_context let decompose_prod_n_assum n = if n < 0 then error "decompose_prod_n_assum: integer parameter must be positive"; let rec prodec_rec l n c = if n=0 then l,c else match c with | Prod (x,t,c) -> prodec_rec ((x,None,t) :: l) (n-1) c | LetIn (x,b,t,c) -> prodec_rec ((x,Some b,t) :: l) (n-1) c | Cast (c,_,_) -> prodec_rec l n c | c -> error "decompose_prod_n_assum: not enough assumptions" in prodec_rec empty_rel_context n (***************************) (* Other term constructors *) (***************************) type arity = rel_context * sorts let mkArity (sign,s) = it_mkProd_or_LetIn (Sort s) sign let destArity = let rec prodec_rec l c = match c with | Prod (x,t,c) -> prodec_rec ((x,None,t)::l) c | LetIn (x,b,t,c) -> prodec_rec ((x,Some b,t)::l) c | Cast (c,_,_) -> prodec_rec l c | Sort s -> l,s | _ -> anomaly "destArity: not an arity" in prodec_rec [] let rec isArity c = match c with | Prod (_,_,c) -> isArity c | LetIn (_,b,_,c) -> isArity (subst1 b c) | Cast (c,_,_) -> isArity c | Sort _ -> true | _ -> false (*******************************) (* alpha conversion functions *) (*******************************) (* alpha conversion : ignore print names and casts *) let compare_constr f t1 t2 = match t1, t2 with | Rel n1, Rel n2 -> n1 = n2 | Meta m1, Meta m2 -> m1 = m2 | Var id1, Var id2 -> id1 = id2 | Sort s1, Sort s2 -> s1 = s2 | Cast (c1,_,_), _ -> f c1 t2 | _, Cast (c2,_,_) -> f t1 c2 | Prod (_,t1,c1), Prod (_,t2,c2) -> f t1 t2 & f c1 c2 | Lambda (_,t1,c1), Lambda (_,t2,c2) -> f t1 t2 & f c1 c2 | LetIn (_,b1,t1,c1), LetIn (_,b2,t2,c2) -> f b1 b2 & f t1 t2 & f c1 c2 | App (c1,l1), App (c2,l2) -> if Array.length l1 = Array.length l2 then f c1 c2 & array_for_all2 f l1 l2 else let (h1,l1) = decompose_app t1 in let (h2,l2) = decompose_app t2 in if List.length l1 = List.length l2 then f h1 h2 & List.for_all2 f l1 l2 else false | Evar (e1,l1), Evar (e2,l2) -> e1 = e2 & array_for_all2 f l1 l2 | Const c1, Const c2 -> eq_con_chk c1 c2 | Ind c1, Ind c2 -> eq_ind_chk c1 c2 | Construct (c1,i1), Construct (c2,i2) -> i1=i2 && eq_ind_chk c1 c2 | Case (_,p1,c1,bl1), Case (_,p2,c2,bl2) -> f p1 p2 & f c1 c2 & array_for_all2 f bl1 bl2 | Fix (ln1,(_,tl1,bl1)), Fix (ln2,(_,tl2,bl2)) -> ln1 = ln2 & array_for_all2 f tl1 tl2 & array_for_all2 f bl1 bl2 | CoFix(ln1,(_,tl1,bl1)), CoFix(ln2,(_,tl2,bl2)) -> ln1 = ln2 & array_for_all2 f tl1 tl2 & array_for_all2 f bl1 bl2 | _ -> false let rec eq_constr m n = (m==n) or compare_constr eq_constr m n let eq_constr m n = eq_constr m n (* to avoid tracing a recursive fun *) coq-8.4pl2/checker/safe_typing.ml0000640000175000001440000001557312010532755016110 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* () | _, None -> () | _, Some ImpredicativeSet -> error "Needs option -impredicative-set" (* Libraries = Compiled modules *) let report_clash f caller dir = let msg = str "compiled library " ++ str(string_of_dirpath caller) ++ spc() ++ str "makes inconsistent assumptions over library" ++ spc() ++ str(string_of_dirpath dir) ++ fnl() in f msg let check_imports f caller env needed = let check (dp,stamp) = try let actual_stamp = lookup_digest env dp in if stamp <> actual_stamp then report_clash f caller dp with Not_found -> error ("Reference to unknown module " ^ (string_of_dirpath dp)) in List.iter check needed type compiled_library = dir_path * module_body * (dir_path * Digest.t) list * engagement option (* Store the body of modules' opaque constants inside a table. This module is used during the serialization and deserialization of vo files. By adding an indirection to the opaque constant definitions, we gain the ability not to load them. As these constant definitions are usually big terms, we save a deserialization time as well as some memory space. *) module LightenLibrary : sig type table type lightened_compiled_library val load : table -> lightened_compiled_library -> compiled_library end = struct (* The table is implemented as an array of [constr_substituted]. Keys are hence integers. To avoid changing the [compiled_library] type, we brutally encode integers into [lazy_constr]. This isn't pretty, but shouldn't be dangerous since the produced structure [lightened_compiled_library] is abstract and only meant for writing to .vo via Marshal (which doesn't care about types). *) type table = constr_substituted array let key_of_lazy_constr (c:lazy_constr) = (Obj.magic c : int) (* To avoid any future misuse of the lightened library that could interpret encoded keys as real [constr_substituted], we hide these kind of values behind an abstract datatype. *) type lightened_compiled_library = compiled_library (* Map a [compiled_library] to another one by just updating the opaque term [t] to [on_opaque_const_body t]. *) let traverse_library on_opaque_const_body = let rec traverse_module mb = match mb.mod_expr with None -> { mb with mod_expr = None; mod_type = traverse_modexpr mb.mod_type; } | Some impl when impl == mb.mod_type-> let mtb = traverse_modexpr mb.mod_type in { mb with mod_expr = Some mtb; mod_type = mtb; } | Some impl -> { mb with mod_expr = Option.map traverse_modexpr mb.mod_expr; mod_type = traverse_modexpr mb.mod_type; } and traverse_struct struc = let traverse_body (l,body) = (l,match body with | (SFBconst cb) when is_opaque cb -> SFBconst {cb with const_body = on_opaque_const_body cb.const_body} | (SFBconst _ | SFBmind _ ) as x -> x | SFBmodule m -> SFBmodule (traverse_module m) | SFBmodtype m -> SFBmodtype ({m with typ_expr = traverse_modexpr m.typ_expr})) in List.map traverse_body struc and traverse_modexpr = function | SEBfunctor (mbid,mty,mexpr) -> SEBfunctor (mbid, ({mty with typ_expr = traverse_modexpr mty.typ_expr}), traverse_modexpr mexpr) | SEBident mp as x -> x | SEBstruct (struc) -> SEBstruct (traverse_struct struc) | SEBapply (mexpr,marg,u) -> SEBapply (traverse_modexpr mexpr,traverse_modexpr marg,u) | SEBwith (seb,wdcl) -> SEBwith (traverse_modexpr seb,wdcl) in fun (dp,mb,depends,s) -> (dp,traverse_module mb,depends,s) (* Loading is also a traversing that decodes the embedded keys that are inside the [lightened_library]. If the [load_proof] flag is set, we lookup inside the table to graft the [constr_substituted]. Otherwise, we set the [const_body] field to [None]. *) let load table lightened_library = let decode_key = function | Undef _ | Def _ -> assert false | OpaqueDef k -> let k = key_of_lazy_constr k in let body = try table.(k) with _ -> error "Error while retrieving an opaque body" in OpaqueDef (lazy_constr_from_val body) in traverse_library decode_key lightened_library end open Validate let val_deps = val_list (val_tuple ~name:"dep"[|val_dp;no_val|]) let val_vo = val_tuple ~name:"vo" [|val_dp;val_module;val_deps;val_opt val_eng|] (* This function should append a certificate to the .vo file. The digest must be part of the certicate to rule out attackers that could change the .vo file between the time it was read and the time the stamp is written. For the moment, .vo are not signed. *) let stamp_library file digest = () (* When the module is checked, digests do not need to match, but a warning is issued in case of mismatch *) let import file (dp,mb,depends,engmt as vo) digest = Validate.apply !Flags.debug val_vo vo; Flags.if_verbose msgnl (str "*** vo structure validated ***"); let env = !genv in check_imports msg_warning dp env depends; check_engagement env engmt; Mod_checking.check_module (add_constraints mb.mod_constraints env) mb.mod_mp mb; stamp_library file digest; (* We drop proofs once checked *) (* let mb = lighten_module mb in*) full_add_module dp mb digest (* When the module is admitted, digests *must* match *) let unsafe_import file (dp,mb,depends,engmt as vo) digest = if !Flags.debug then ignore vo; (*Validate.apply !Flags.debug val_vo vo;*) let env = !genv in check_imports (errorlabstrm"unsafe_import") dp env depends; check_engagement env engmt; (* We drop proofs once checked *) (* let mb = lighten_module mb in*) full_add_module dp mb digest coq-8.4pl2/checker/mod_checking.ml0000640000175000001440000003106512106745424016211 0ustar notinusers open Pp open Util open Names open Term open Inductive open Reduction open Typeops open Indtypes open Modops open Subtyping open Declarations open Environ (************************************************************************) (* Checking constants *) let refresh_arity ar = let ctxt, hd = decompose_prod_assum ar in match hd with Sort (Type u) when not (Univ.is_univ_variable u) -> let u' = Univ.fresh_local_univ() in mkArity (ctxt,Type u'), Univ.enforce_geq u' u Univ.empty_constraint | _ -> ar, Univ.empty_constraint let check_constant_declaration env kn cb = Flags.if_verbose msgnl (str " checking cst: " ++ prcon kn); (* let env = add_constraints cb.const_constraints env in*) let env' = check_named_ctxt env cb.const_hyps in (match cb.const_type with NonPolymorphicType ty -> let ty, cu = refresh_arity ty in let envty = add_constraints cu env' in let _ = infer_type envty ty in (match body_of_constant cb with | Some bd -> let j = infer env' (force_constr bd) in conv_leq envty j ty | None -> ()) | PolymorphicArity(ctxt,par) -> let _ = check_ctxt env ctxt in check_polymorphic_arity env ctxt par); add_constant kn cb env (************************************************************************) (* Checking modules *) exception Not_path let path_of_mexpr = function | SEBident mp -> mp | _ -> raise Not_path let is_modular = function | SFBmodule _ | SFBmodtype _ -> true | SFBconst _ | SFBmind _ -> false let rec list_split_assoc ((k,m) as km) rev_before = function | [] -> raise Not_found | (k',b)::after when k=k' && is_modular b = m -> rev_before,b,after | h::tail -> list_split_assoc km (h::rev_before) tail let check_definition_sub env cb1 cb2 = let check_type env t1 t2 = (* If the type of a constant is generated, it may mention non-variable algebraic universes that the general conversion algorithm is not ready to handle. Anyway, generated types of constants are functions of the body of the constant. If the bodies are the same in environments that are subtypes one of the other, the types are subtypes too (i.e. if Gamma <= Gamma', Gamma |- A |> T, Gamma |- A' |> T' and Gamma |- A=A' then T <= T'). Hence they don't have to be checked again *) let t1,t2 = if isArity t2 then let (ctx2,s2) = destArity t2 in match s2 with | Type v when not (Univ.is_univ_variable v) -> (* The type in the interface is inferred and is made of algebraic universes *) begin try let (ctx1,s1) = dest_arity env t1 in match s1 with | Type u when not (Univ.is_univ_variable u) -> (* Both types are inferred, no need to recheck them. We cheat and collapse the types to Prop *) mkArity (ctx1,Prop Null), mkArity (ctx2,Prop Null) | Prop _ -> (* The type in the interface is inferred, it may be the case that the type in the implementation is smaller because the body is more reduced. We safely collapse the upper type to Prop *) mkArity (ctx1,Prop Null), mkArity (ctx2,Prop Null) | Type _ -> (* The type in the interface is inferred and the type in the implementation is not inferred or is inferred but from a more reduced body so that it is just a variable. Since constraints of the form "univ <= max(...)" are not expressible in the system of algebraic universes: we fail (the user has to use an explicit type in the interface *) raise Reduction.NotConvertible with UserError _ (* "not an arity" *) -> raise Reduction.NotConvertible end | _ -> t1,t2 else (t1,t2) in Reduction.conv_leq env t1 t2 in assert (cb1.const_hyps=[] && cb2.const_hyps=[]) ; (*Start by checking types*) let typ1 = Typeops.type_of_constant_type env cb1.const_type in let typ2 = Typeops.type_of_constant_type env cb2.const_type in check_type env typ1 typ2; (* In the spirit of subtyping.check_constant, we accept any implementations of parameters and opaques terms, as long as they have the right type *) (match cb2.const_body with | Undef _ | OpaqueDef _ -> () | Def lc2 -> (match cb1.const_body with | Def lc1 -> let c1 = force_constr lc1 in let c2 = force_constr lc2 in Reduction.conv env c1 c2 (* Coq only places transparent cb in With_definition_body *) | _ -> assert false)) let lookup_modtype mp env = try Environ.lookup_modtype mp env with Not_found -> failwith ("Unknown module type: "^string_of_mp mp) let lookup_module mp env = try Environ.lookup_module mp env with Not_found -> failwith ("Unknown module: "^string_of_mp mp) let rec check_with env mtb with_decl mp= match with_decl with | With_definition_body (idl,c) -> check_with_def env mtb (idl,c) mp; mtb | With_module_body (idl,mp1) -> check_with_mod env mtb (idl,mp1) mp; mtb and check_with_def env mtb (idl,c) mp = let sig_b = match mtb with | SEBstruct(sig_b) -> sig_b | _ -> error_signature_expected mtb in let id,idl = match idl with | [] -> assert false | id::idl -> id,idl in let l = label_of_id id in try let rev_before,spec,after = list_split_assoc (l,(idl<>[])) [] sig_b in let before = List.rev rev_before in let env' = Modops.add_signature mp before empty_delta_resolver env in if idl = [] then let cb = match spec with SFBconst cb -> cb | _ -> error_not_a_constant l in check_definition_sub env' c cb else let old = match spec with SFBmodule msb -> msb | _ -> error_not_a_module l in begin match old.mod_expr with | None -> check_with_def env' old.mod_type (idl,c) (MPdot(mp,l)) | Some msb -> error_a_generative_module_expected l end with Not_found -> error_no_such_label l | Reduction.NotConvertible -> error_with_incorrect l and check_with_mod env mtb (idl,mp1) mp = let sig_b = match mtb with | SEBstruct(sig_b) -> sig_b | _ -> error_signature_expected mtb in let id,idl = match idl with | [] -> assert false | id::idl -> id,idl in let l = label_of_id id in try let rev_before,spec,after = list_split_assoc (l,false) [] sig_b in let before = List.rev rev_before in let env' = Modops.add_signature mp before empty_delta_resolver env in if idl = [] then let _ = match spec with SFBmodule msb -> msb | _ -> error_not_a_module l in let (_:module_body) = (Environ.lookup_module mp1 env) in () else let old = match spec with SFBmodule msb -> msb | _ -> error_not_a_module l in begin match old.mod_expr with None -> check_with_mod env' old.mod_type (idl,mp1) (MPdot(mp,l)) | Some msb -> error_a_generative_module_expected l end with Not_found -> error_no_such_label l | Reduction.NotConvertible -> error_with_incorrect l and check_module_type env mty = let (_:struct_expr_body) = check_modtype env mty.typ_expr mty.typ_mp mty.typ_delta in () and check_module env mp mb = match mb.mod_expr, mb.mod_type with | None,mtb -> let (_:struct_expr_body) = check_modtype env mtb mb.mod_mp mb.mod_delta in () | Some mexpr, mtb when mtb==mexpr -> let (_:struct_expr_body) = check_modtype env mtb mb.mod_mp mb.mod_delta in () | Some mexpr, _ -> let sign = check_modexpr env mexpr mb.mod_mp mb.mod_delta in let (_:struct_expr_body) = check_modtype env mb.mod_type mb.mod_mp mb.mod_delta in let mtb1 = {typ_mp=mp; typ_expr=sign; typ_expr_alg=None; typ_constraints=Univ.empty_constraint; typ_delta = mb.mod_delta;} and mtb2 = {typ_mp=mp; typ_expr=mb.mod_type; typ_expr_alg=None; typ_constraints=Univ.empty_constraint; typ_delta = mb.mod_delta;} in let env = add_module (module_body_of_type mp mtb1) env in check_subtypes env mtb1 mtb2 and check_structure_field env mp lab res = function | SFBconst cb -> let c = make_con mp empty_dirpath lab in check_constant_declaration env c cb | SFBmind mib -> let kn = make_mind mp empty_dirpath lab in let kn = mind_of_delta res kn in Indtypes.check_inductive env kn mib | SFBmodule msb -> let (_:unit) = check_module env (MPdot(mp,lab)) msb in Modops.add_module msb env | SFBmodtype mty -> check_module_type env mty; add_modtype (MPdot(mp,lab)) mty env and check_modexpr env mse mp_mse res = match mse with | SEBident mp -> let mb = lookup_module mp env in (subst_and_strengthen mb mp_mse).mod_type | SEBfunctor (arg_id, mtb, body) -> check_module_type env mtb ; let env' = add_module (module_body_of_type (MPbound arg_id) mtb) env in let sign = check_modexpr env' body mp_mse res in SEBfunctor (arg_id, mtb, sign) | SEBapply (f,m,cst) -> let sign = check_modexpr env f mp_mse res in let farg_id, farg_b, fbody_b = destr_functor env sign in let mp = try (path_of_mexpr m) with Not_path -> error_application_to_not_path m (* place for nondep_supertype *) in let mtb = module_type_of_module (Some mp) (lookup_module mp env) in check_subtypes env mtb farg_b; (subst_struct_expr (map_mbid farg_id mp) fbody_b) | SEBwith(mte, with_decl) -> let sign = check_modexpr env mte mp_mse res in let sign = check_with env sign with_decl mp_mse in sign | SEBstruct(msb) -> let (_:env) = List.fold_left (fun env (lab,mb) -> check_structure_field env mp_mse lab res mb) env msb in SEBstruct(msb) and check_modtype env mse mp_mse res = match mse with | SEBident mp -> let mtb = lookup_modtype mp env in mtb.typ_expr | SEBfunctor (arg_id, mtb, body) -> check_module_type env mtb; let env' = add_module (module_body_of_type (MPbound arg_id) mtb) env in let body = check_modtype env' body mp_mse res in SEBfunctor(arg_id,mtb,body) | SEBapply (f,m,cst) -> let sign = check_modtype env f mp_mse res in let farg_id, farg_b, fbody_b = destr_functor env sign in let mp = try (path_of_mexpr m) with Not_path -> error_application_to_not_path m (* place for nondep_supertype *) in let mtb = module_type_of_module (Some mp) (lookup_module mp env) in check_subtypes env mtb farg_b; subst_struct_expr (map_mbid farg_id mp) fbody_b | SEBwith(mte, with_decl) -> let sign = check_modtype env mte mp_mse res in let sign = check_with env sign with_decl mp_mse in sign | SEBstruct(msb) -> let (_:env) = List.fold_left (fun env (lab,mb) -> check_structure_field env mp_mse lab res mb) env msb in SEBstruct(msb) (* let rec add_struct_expr_constraints env = function | SEBident _ -> env | SEBfunctor (_,mtb,meb) -> add_struct_expr_constraints (add_modtype_constraints env mtb) meb | SEBstruct (_,structure_body) -> List.fold_left (fun env (l,item) -> add_struct_elem_constraints env item) env structure_body | SEBapply (meb1,meb2,cst) -> (* let g = Univ.merge_constraints cst Univ.initial_universes in msgnl(str"ADDING FUNCTOR APPLICATION CONSTRAINTS:"++fnl()++ Univ.pr_universes g++str"============="++fnl()); *) Environ.add_constraints cst (add_struct_expr_constraints (add_struct_expr_constraints env meb1) meb2) | SEBwith(meb,With_definition_body(_,cb))-> Environ.add_constraints cb.const_constraints (add_struct_expr_constraints env meb) | SEBwith(meb,With_module_body(_,_,cst))-> Environ.add_constraints cst (add_struct_expr_constraints env meb) and add_struct_elem_constraints env = function | SFBconst cb -> Environ.add_constraints cb.const_constraints env | SFBmind mib -> Environ.add_constraints mib.mind_constraints env | SFBmodule mb -> add_module_constraints env mb | SFBalias (mp,Some cst) -> Environ.add_constraints cst env | SFBalias (mp,None) -> env | SFBmodtype mtb -> add_modtype_constraints env mtb and add_module_constraints env mb = let env = match mb.mod_expr with | None -> env | Some meb -> add_struct_expr_constraints env meb in let env = match mb.mod_type with | None -> env | Some mtb -> add_struct_expr_constraints env mtb in Environ.add_constraints mb.mod_constraints env and add_modtype_constraints env mtb = add_struct_expr_constraints env mtb.typ_expr *) coq-8.4pl2/checker/modops.mli0000640000175000001440000000344312010532755015243 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_type_body -> module_body val module_type_of_module : module_path option -> module_body -> module_type_body val destr_functor : env -> struct_expr_body -> mod_bound_id * module_type_body * struct_expr_body val add_signature : module_path -> structure_body -> delta_resolver -> env -> env (* adds a module and its components, but not the constraints *) val add_module : module_body -> env -> env val strengthen : module_type_body -> module_path -> module_type_body val subst_and_strengthen : module_body -> module_path -> module_body val error_incompatible_modtypes : module_type_body -> module_type_body -> 'a val error_not_match : label -> structure_field_body -> 'a val error_with_incorrect : label -> 'a val error_no_such_label : label -> 'a val error_no_such_label_sub : label -> module_path -> 'a val error_signature_expected : struct_expr_body -> 'a val error_not_a_constant : label -> 'a val error_not_a_module : label -> 'a val error_a_generative_module_expected : label -> 'a val error_application_to_not_path : struct_expr_body -> 'a coq-8.4pl2/checker/include0000640000175000001440000001126111337465505014613 0ustar notinusers(* -*-tuareg-*- *) (* Caml script to include for debugging the checker. Usage: from the checker/ directory launch ocaml toplevel and then type #use"include";; This command loads the relevent modules, defines some pretty printers, and provides functions to interactively check modules (mainly run_l and norec). *) #cd "..";; #directory "lib";; #directory "kernel";; #directory "checker";; #directory "+camlp4";; #directory "+camlp5";; #load "unix.cma";; #load "str.cma";; #load "gramlib.cma";; (*#load "toplevellib.cma";; #directory "/usr/lib/ocaml/compiler-libs/utils";; let _ = Clflags.recursive_types:=true;; *) #load "check.cma";; open Typeops;; open Check;; open Pp;; open Util;; open Names;; open Term;; open Environ;; open Declarations;; open Mod_checking;; let pr_id id = str(string_of_id id) let pr_na = function Name id -> pr_id id | _ -> str"_";; let prdp dp = pp(str(string_of_dirpath dp));; (* let prc c = pp(Himsg.pr_lconstr_env (Check.get_env()) c);; let prcs cs = prc (Declarations.force cs);; let pru u = pp(str(Univ.string_of_universe u));;*) let pru u = pp(Univ.pr_uni u);; let prlab l = pp(str(string_of_label l));; let prid id = pp(pr_id id);; let prcon c = pp(Indtypes.prcon c);; let prkn kn = pp(Indtypes.prkn kn);; let prus g = pp(Univ.pr_universes g);; let prcstrs c = let g = Univ.merge_constraints c Univ.initial_universes in pp(Univ.pr_universes g);; (*let prcstrs c = pp(Univ.pr_constraints c);; *) (* let prenvu e = let u = universes e in let pu = str "UNIVERSES:"++fnl()++str" "++hov 0 (Univ.pr_universes u) ++fnl() in pp pu;; let prenv e = let ctx1 = named_context e in let ctx2 = rel_context e in let pe = hov 1 (str"[" ++ prlist_with_sep spc (fun (na,_,_) -> pr_id na) (List.rev ctx1)++ str"]") ++ spc() ++ hov 1 (str"[" ++ prlist_with_sep spc (fun (na,_,_) -> pr_na na) (List.rev ctx2)++ str"]") in pp pe;; *) (* let prsub s = let string_of_mp mp = let s = string_of_mp mp in (match mp with MPbound _ -> "#bound."|_->"")^s in pp (hv 0 (fold_subst (fun msid mp strm -> str "S " ++ str (debug_string_of_msid msid) ++ str " |-> " ++ str (string_of_mp mp) ++ fnl() ++ strm) (fun mbid mp strm -> str"B " ++ str (debug_string_of_mbid mbid) ++ str " |-> " ++ str (string_of_mp mp) ++ fnl() ++ strm) (fun mp1 mp strm -> str"P " ++ str (string_of_mp mp1) ++ str " |-> " ++ str (string_of_mp mp) ++ fnl() ++ strm) s (mt()))) ;; *) #install_printer prid;; #install_printer prcon;; #install_printer prlab;; #install_printer prdp;; #install_printer prkn;; #install_printer pru;; (* #install_printer prc;; #install_printer prcs;; *) #install_printer prcstrs;; (*#install_printer prus;;*) (*#install_printer prenv;;*) (*#install_printer prenvu;; #install_printer prsub;;*) Checker.init_with_argv [|""|];; Flags.make_silent false;; Flags.debug := true;; Sys.catch_break true;; let module_of_file f = let (_,mb,_,_) = Obj.magic ((intern_from_file f).library_compiled) in (mb:module_body) ;; let mod_access m fld = match m.mod_expr with Some(SEBstruct l) -> List.assoc fld l | _ -> failwith "bad structure type" ;; let parse_dp s = make_dirpath(List.map id_of_string (List.rev (Str.split(Str.regexp"\\.") s))) ;; let parse_sp s = let l = List.rev (Str.split(Str.regexp"\\.") s) in {dirpath=List.tl l; basename=List.hd l};; let parse_kn s = let l = List.rev (Str.split(Str.regexp"\\.") s) in let dp = make_dirpath(List.map id_of_string(List.tl l)) in make_kn(MPfile dp) empty_dirpath (label_of_id (id_of_string (List.hd l))) ;; let parse_con s = let l = List.rev (Str.split(Str.regexp"\\.") s) in let dp = make_dirpath(List.map id_of_string(List.tl l)) in make_con(MPfile dp) empty_dirpath (label_of_id (id_of_string (List.hd l))) ;; let get_mod dp = lookup_module dp (Safe_typing.get_env()) ;; let get_mod_type dp = lookup_modtype dp (Safe_typing.get_env()) ;; let get_cst kn = lookup_constant kn (Safe_typing.get_env()) ;; let read_mod s f = let lib = intern_from_file (parse_dp s,f) in ((Obj.magic lib.library_compiled): dir_path * module_body * (dir_path * Digest.t) list * engagement option);; let deref_mod md s = let (Some (SEBstruct l)) = md.mod_expr in List.assoc (label_of_id(id_of_string s)) l ;; let expln f x = try f x with UserError(_,strm) as e -> msgnl strm; raise e let admit_l l = let l = List.map parse_sp l in Check.recheck_library ~admit:l ~check:l;; let run_l l = Check.recheck_library ~admit:[] ~check:(List.map parse_sp l);; let norec q = Check.recheck_library ~norec:[parse_sp q] ~admit:[] ~check:[];; (* admit_l["Bool";"OrderedType";"DecidableType"];; run_l["FSetInterface"];; *) coq-8.4pl2/checker/closure.ml0000640000175000001440000006643712010532755015261 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* red_kind val fVAR : identifier -> red_kind val no_red : reds val red_add : reds -> red_kind -> reds val mkflags : red_kind list -> reds val red_set : reds -> red_kind -> bool end module RedFlags = (struct (* [r_const=(true,cl)] means all constants but those in [cl] *) (* [r_const=(false,cl)] means only those in [cl] *) (* [r_delta=true] just mean [r_const=(true,[])] *) type reds = { r_beta : bool; r_delta : bool; r_const : transparent_state; r_zeta : bool; r_evar : bool; r_iota : bool } type red_kind = BETA | DELTA | IOTA | ZETA | CONST of constant | VAR of identifier let fBETA = BETA let fDELTA = DELTA let fIOTA = IOTA let fZETA = ZETA let fCONST kn = CONST kn let fVAR id = VAR id let no_red = { r_beta = false; r_delta = false; r_const = all_opaque; r_zeta = false; r_evar = false; r_iota = false } let red_add red = function | BETA -> { red with r_beta = true } | DELTA -> { red with r_delta = true; r_const = all_transparent } | CONST kn -> let (l1,l2) = red.r_const in { red with r_const = l1, Cpred.add kn l2 } | IOTA -> { red with r_iota = true } | ZETA -> { red with r_zeta = true } | VAR id -> let (l1,l2) = red.r_const in { red with r_const = Idpred.add id l1, l2 } let mkflags = List.fold_left red_add no_red let red_set red = function | BETA -> incr_cnt red.r_beta beta | CONST kn -> let (_,l) = red.r_const in let c = Cpred.mem kn l in incr_cnt c delta | VAR id -> (* En attendant d'avoir des kn pour les Var *) let (l,_) = red.r_const in let c = Idpred.mem id l in incr_cnt c delta | ZETA -> incr_cnt red.r_zeta zeta | IOTA -> incr_cnt red.r_iota iota | DELTA -> (* Used for Rel/Var defined in context *) incr_cnt red.r_delta delta end : RedFlagsSig) open RedFlags let betadeltaiota = mkflags [fBETA;fDELTA;fZETA;fIOTA] let betadeltaiotanolet = mkflags [fBETA;fDELTA;fIOTA] let betaiotazeta = mkflags [fBETA;fIOTA;fZETA] (* specification of the reduction function *) (* Flags of reduction and cache of constants: 'a is a type that may be * mapped to constr. 'a infos implements a cache for constants and * abstractions, storing a representation (of type 'a) of the body of * this constant or abstraction. * * i_tab is the cache table of the results * * i_repr is the function to get the representation from the current * state of the cache and the body of the constant. The result * is stored in the table. * * i_rels = (4,[(1,c);(3,d)]) means there are 4 free rel variables * and only those with index 1 and 3 have bodies which are c and d resp. * * i_vars is the list of _defined_ named variables. * * ref_value_cache searchs in the tab, otherwise uses i_repr to * compute the result and store it in the table. If the constant can't * be unfolded, returns None, but does not store this failure. * This * doesn't take the RESET into account. You mustn't keep such a table * after a Reset. * This type is not exported. Only its two * instantiations (cbv or lazy) are. *) type table_key = | ConstKey of constant | VarKey of identifier | RelKey of int type 'a infos = { i_flags : reds; i_repr : 'a infos -> constr -> 'a; i_env : env; i_rels : int * (int * constr) list; i_vars : (identifier * constr) list; i_tab : (table_key, 'a) Hashtbl.t } let ref_value_cache info ref = try Some (Hashtbl.find info.i_tab ref) with Not_found -> try let body = match ref with | RelKey n -> let (s,l) = info.i_rels in lift n (List.assoc (s-n) l) | VarKey id -> List.assoc id info.i_vars | ConstKey cst -> constant_value info.i_env cst in let v = info.i_repr info body in Hashtbl.add info.i_tab ref v; Some v with | Not_found (* List.assoc *) | NotEvaluableConst _ (* Const *) -> None let defined_vars flags env = (* if red_local_const (snd flags) then*) fold_named_context (fun (id,b,_) e -> match b with | None -> e | Some body -> (id, body)::e) (named_context env) ~init:[] (* else []*) let defined_rels flags env = (* if red_local_const (snd flags) then*) fold_rel_context (fun (id,b,t) (i,subs) -> match b with | None -> (i+1, subs) | Some body -> (i+1, (i,body) :: subs)) (rel_context env) ~init:(0,[]) (* else (0,[])*) let mind_equiv_infos info = mind_equiv info.i_env let eq_table_key k1 k2 = match k1,k2 with | ConstKey con1 ,ConstKey con2 -> eq_con_chk con1 con2 | _,_ -> k1=k2 let create mk_cl flgs env = { i_flags = flgs; i_repr = mk_cl; i_env = env; i_rels = defined_rels flgs env; i_vars = defined_vars flgs env; i_tab = Hashtbl.create 17 } (**********************************************************************) (* Lazy reduction: the one used in kernel operations *) (* type of shared terms. fconstr and frterm are mutually recursive. * Clone of the constr structure, but completely mutable, and * annotated with reduction state (reducible or not). * - FLIFT is a delayed shift; allows sharing between 2 lifted copies * of a given term. * - FCLOS is a delayed substitution applied to a constr * - FLOCKED is used to erase the content of a reference that must * be updated. This is to allow the garbage collector to work * before the term is computed. *) (* Norm means the term is fully normalized and cannot create a redex when substituted Cstr means the term is in head normal form and that it can create a redex when substituted (i.e. constructor, fix, lambda) Whnf means we reached the head normal form and that it cannot create a redex when substituted Red is used for terms that might be reduced *) type red_state = Norm | Cstr | Whnf | Red let neutr = function | (Whnf|Norm) -> Whnf | (Red|Cstr) -> Red type fconstr = { mutable norm: red_state; mutable term: fterm } and fterm = | FRel of int | FAtom of constr (* Metas and Sorts *) | FCast of fconstr * cast_kind * fconstr | FFlex of table_key | FInd of inductive | FConstruct of constructor | FApp of fconstr * fconstr array | FFix of fixpoint * fconstr subs | FCoFix of cofixpoint * fconstr subs | FCases of case_info * fconstr * fconstr * fconstr array | FLambda of int * (name * constr) list * constr * fconstr subs | FProd of name * fconstr * fconstr | FLetIn of name * fconstr * fconstr * constr * fconstr subs | FEvar of existential_key * fconstr array | FLIFT of int * fconstr | FCLOS of constr * fconstr subs | FLOCKED let fterm_of v = v.term let set_norm v = v.norm <- Norm (* Could issue a warning if no is still Red, pointing out that we loose sharing. *) let update v1 (no,t) = if !share then (v1.norm <- no; v1.term <- t; v1) else {norm=no;term=t} (**********************************************************************) (* The type of (machine) stacks (= lambda-bar-calculus' contexts) *) type stack_member = | Zapp of fconstr array | Zcase of case_info * fconstr * fconstr array | Zfix of fconstr * stack | Zshift of int | Zupdate of fconstr and stack = stack_member list let append_stack v s = if Array.length v = 0 then s else match s with | Zapp l :: s -> Zapp (Array.append v l) :: s | _ -> Zapp v :: s (* Collapse the shifts in the stack *) let zshift n s = match (n,s) with (0,_) -> s | (_,Zshift(k)::s) -> Zshift(n+k)::s | _ -> Zshift(n)::s (* Lifting. Preserves sharing (useful only for cell with norm=Red). lft_fconstr always create a new cell, while lift_fconstr avoids it when the lift is 0. *) let rec lft_fconstr n ft = match ft.term with | (FInd _|FConstruct _|FFlex(ConstKey _|VarKey _)) -> ft | FRel i -> {norm=Norm;term=FRel(i+n)} | FLambda(k,tys,f,e) -> {norm=Cstr; term=FLambda(k,tys,f,subs_shft(n,e))} | FFix(fx,e) -> {norm=Cstr; term=FFix(fx,subs_shft(n,e))} | FCoFix(cfx,e) -> {norm=Cstr; term=FCoFix(cfx,subs_shft(n,e))} | FLIFT(k,m) -> lft_fconstr (n+k) m | FLOCKED -> assert false | _ -> {norm=ft.norm; term=FLIFT(n,ft)} let lift_fconstr k f = if k=0 then f else lft_fconstr k f let lift_fconstr_vect k v = if k=0 then v else Array.map (fun f -> lft_fconstr k f) v let clos_rel e i = match expand_rel i e with | Inl(n,mt) -> lift_fconstr n mt | Inr(k,None) -> {norm=Norm; term= FRel k} | Inr(k,Some p) -> lift_fconstr (k-p) {norm=Red;term=FFlex(RelKey p)} (* since the head may be reducible, we might introduce lifts of 0 *) let compact_stack head stk = let rec strip_rec depth = function | Zshift(k)::s -> strip_rec (depth+k) s | Zupdate(m)::s -> (* Be sure to create a new cell otherwise sharing would be lost by the update operation *) let h' = lft_fconstr depth head in let _ = update m (h'.norm,h'.term) in strip_rec depth s | stk -> zshift depth stk in strip_rec 0 stk (* Put an update mark in the stack, only if needed *) let zupdate m s = if !share & m.norm = Red then let s' = compact_stack m s in let _ = m.term <- FLOCKED in Zupdate(m)::s' else s (* Closure optimization: *) let rec compact_constr (lg, subs as s) c k = match c with Rel i -> if i < k then c,s else (try Rel (k + lg - list_index (i-k+1) subs), (lg,subs) with Not_found -> Rel (k+lg), (lg+1, (i-k+1)::subs)) | (Sort _|Var _|Meta _|Ind _|Const _|Construct _) -> c,s | Evar(ev,v) -> let (v',s) = compact_vect s v k in if v==v' then c,s else Evar(ev,v'),s | Cast(a,ck,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b k in if a==a' && b==b' then c,s else Cast(a', ck, b'), s | App(f,v) -> let (f',s) = compact_constr s f k in let (v',s) = compact_vect s v k in if f==f' && v==v' then c,s else App(f',v'), s | Lambda(n,a,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b (k+1) in if a==a' && b==b' then c,s else Lambda(n,a',b'), s | Prod(n,a,b) -> let (a',s) = compact_constr s a k in let (b',s) = compact_constr s b (k+1) in if a==a' && b==b' then c,s else Prod(n,a',b'), s | LetIn(n,a,ty,b) -> let (a',s) = compact_constr s a k in let (ty',s) = compact_constr s ty k in let (b',s) = compact_constr s b (k+1) in if a==a' && ty==ty' && b==b' then c,s else LetIn(n,a',ty',b'), s | Fix(fi,(na,ty,bd)) -> let (ty',s) = compact_vect s ty k in let (bd',s) = compact_vect s bd (k+Array.length ty) in if ty==ty' && bd==bd' then c,s else Fix(fi,(na,ty',bd')), s | CoFix(i,(na,ty,bd)) -> let (ty',s) = compact_vect s ty k in let (bd',s) = compact_vect s bd (k+Array.length ty) in if ty==ty' && bd==bd' then c,s else CoFix(i,(na,ty',bd')), s | Case(ci,p,a,br) -> let (p',s) = compact_constr s p k in let (a',s) = compact_constr s a k in let (br',s) = compact_vect s br k in if p==p' && a==a' && br==br' then c,s else Case(ci,p',a',br'),s and compact_vect s v k = compact_v [] s v k (Array.length v - 1) and compact_v acc s v k i = if i < 0 then let v' = Array.of_list acc in if array_for_all2 (==) v v' then v,s else v',s else let (a',s') = compact_constr s v.(i) k in compact_v (a'::acc) s' v k (i-1) (* Computes the minimal environment of a closure. Idea: if the subs is not identity, the term will have to be reallocated entirely (to propagate the substitution). So, computing the set of free variables does not change the complexity. *) let optimise_closure env c = if is_subs_id env then (env,c) else let (c',(_,s)) = compact_constr (0,[]) c 1 in let env' = Array.map (fun i -> clos_rel env i) (Array.of_list s) in (subs_cons (env', subs_id 0),c') let mk_lambda env t = let (env,t) = optimise_closure env t in let (rvars,t') = decompose_lam t in FLambda(List.length rvars, List.rev rvars, t', env) let destFLambda clos_fun t = match t.term with FLambda(_,[(na,ty)],b,e) -> (na,clos_fun e ty,clos_fun (subs_lift e) b) | FLambda(n,(na,ty)::tys,b,e) -> (na,clos_fun e ty,{norm=Cstr;term=FLambda(n-1,tys,b,subs_lift e)}) | _ -> assert false (* Optimization: do not enclose variables in a closure. Makes variable access much faster *) let mk_clos e t = match t with | Rel i -> clos_rel e i | Var x -> { norm = Red; term = FFlex (VarKey x) } | Const c -> { norm = Red; term = FFlex (ConstKey c) } | Meta _ | Sort _ -> { norm = Norm; term = FAtom t } | Ind kn -> { norm = Norm; term = FInd kn } | Construct kn -> { norm = Cstr; term = FConstruct kn } | (CoFix _|Lambda _|Fix _|Prod _|Evar _|App _|Case _|Cast _|LetIn _) -> {norm = Red; term = FCLOS(t,e)} let mk_clos_vect env v = Array.map (mk_clos env) v (* Translate the head constructor of t from constr to fconstr. This function is parameterized by the function to apply on the direct subterms. Could be used insted of mk_clos. *) let mk_clos_deep clos_fun env t = match t with | (Rel _|Ind _|Const _|Construct _|Var _|Meta _ | Sort _) -> mk_clos env t | Cast (a,k,b) -> { norm = Red; term = FCast (clos_fun env a, k, clos_fun env b)} | App (f,v) -> { norm = Red; term = FApp (clos_fun env f, Array.map (clos_fun env) v) } | Case (ci,p,c,v) -> { norm = Red; term = FCases (ci, clos_fun env p, clos_fun env c, Array.map (clos_fun env) v) } | Fix fx -> { norm = Cstr; term = FFix (fx, env) } | CoFix cfx -> { norm = Cstr; term = FCoFix(cfx,env) } | Lambda _ -> { norm = Cstr; term = mk_lambda env t } | Prod (n,t,c) -> { norm = Whnf; term = FProd (n, clos_fun env t, clos_fun (subs_lift env) c) } | LetIn (n,b,t,c) -> { norm = Red; term = FLetIn (n, clos_fun env b, clos_fun env t, c, env) } | Evar(ev,args) -> { norm = Whnf; term = FEvar(ev,Array.map (clos_fun env) args) } (* A better mk_clos? *) let mk_clos2 = mk_clos_deep mk_clos (* The inverse of mk_clos_deep: move back to constr *) let rec to_constr constr_fun lfts v = match v.term with | FRel i -> Rel (reloc_rel i lfts) | FFlex (RelKey p) -> Rel (reloc_rel p lfts) | FFlex (VarKey x) -> Var x | FAtom c -> exliftn lfts c | FCast (a,k,b) -> Cast (constr_fun lfts a, k, constr_fun lfts b) | FFlex (ConstKey op) -> Const op | FInd op -> Ind op | FConstruct op -> Construct op | FCases (ci,p,c,ve) -> Case (ci, constr_fun lfts p, constr_fun lfts c, Array.map (constr_fun lfts) ve) | FFix ((op,(lna,tys,bds)),e) -> let n = Array.length bds in let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn n e)) bds in let lfts' = el_liftn n lfts in Fix (op, (lna, Array.map (constr_fun lfts) ftys, Array.map (constr_fun lfts') fbds)) | FCoFix ((op,(lna,tys,bds)),e) -> let n = Array.length bds in let ftys = Array.map (mk_clos e) tys in let fbds = Array.map (mk_clos (subs_liftn n e)) bds in let lfts' = el_liftn (Array.length bds) lfts in CoFix (op, (lna, Array.map (constr_fun lfts) ftys, Array.map (constr_fun lfts') fbds)) | FApp (f,ve) -> App (constr_fun lfts f, Array.map (constr_fun lfts) ve) | FLambda _ -> let (na,ty,bd) = destFLambda mk_clos2 v in Lambda (na, constr_fun lfts ty, constr_fun (el_lift lfts) bd) | FProd (n,t,c) -> Prod (n, constr_fun lfts t, constr_fun (el_lift lfts) c) | FLetIn (n,b,t,f,e) -> let fc = mk_clos2 (subs_lift e) f in LetIn (n, constr_fun lfts b, constr_fun lfts t, constr_fun (el_lift lfts) fc) | FEvar (ev,args) -> Evar(ev,Array.map (constr_fun lfts) args) | FLIFT (k,a) -> to_constr constr_fun (el_shft k lfts) a | FCLOS (t,env) -> let fr = mk_clos2 env t in let unfv = update v (fr.norm,fr.term) in to_constr constr_fun lfts unfv | FLOCKED -> assert false (*mkVar(id_of_string"_LOCK_")*) (* This function defines the correspondance between constr and fconstr. When we find a closure whose substitution is the identity, then we directly return the constr to avoid possibly huge reallocation. *) let term_of_fconstr = let rec term_of_fconstr_lift lfts v = match v.term with | FCLOS(t,env) when is_subs_id env & is_lift_id lfts -> t | FLambda(_,tys,f,e) when is_subs_id e & is_lift_id lfts -> compose_lam (List.rev tys) f | FFix(fx,e) when is_subs_id e & is_lift_id lfts -> Fix fx | FCoFix(cfx,e) when is_subs_id e & is_lift_id lfts -> CoFix cfx | _ -> to_constr term_of_fconstr_lift lfts v in term_of_fconstr_lift el_id (* fstrong applies unfreeze_fun recursively on the (freeze) term and * yields a term. Assumes that the unfreeze_fun never returns a * FCLOS term. let rec fstrong unfreeze_fun lfts v = to_constr (fstrong unfreeze_fun) lfts (unfreeze_fun v) *) let rec zip m stk = match stk with | [] -> m | Zapp args :: s -> zip {norm=neutr m.norm; term=FApp(m, args)} s | Zcase(ci,p,br)::s -> let t = FCases(ci, p, m, br) in zip {norm=neutr m.norm; term=t} s | Zfix(fx,par)::s -> zip fx (par @ append_stack [|m|] s) | Zshift(n)::s -> zip (lift_fconstr n m) s | Zupdate(rf)::s -> zip (update rf (m.norm,m.term)) s let fapp_stack (m,stk) = zip m stk (*********************************************************************) (* The assertions in the functions below are granted because they are called only when m is a constructor, a cofix (strip_update_shift_app), a fix (get_nth_arg) or an abstraction (strip_update_shift, through get_arg). *) (* optimised for the case where there are no shifts... *) let strip_update_shift_app head stk = assert (head.norm <> Red); let rec strip_rec rstk h depth = function | Zshift(k) as e :: s -> strip_rec (e::rstk) (lift_fconstr k h) (depth+k) s | (Zapp args :: s) -> strip_rec (Zapp args :: rstk) {norm=h.norm;term=FApp(h,args)} depth s | Zupdate(m)::s -> strip_rec rstk (update m (h.norm,h.term)) depth s | stk -> (depth,List.rev rstk, stk) in strip_rec [] head 0 stk let get_nth_arg head n stk = assert (head.norm <> Red); let rec strip_rec rstk h n = function | Zshift(k) as e :: s -> strip_rec (e::rstk) (lift_fconstr k h) n s | Zapp args::s' -> let q = Array.length args in if n >= q then strip_rec (Zapp args::rstk) {norm=h.norm;term=FApp(h,args)} (n-q) s' else let bef = Array.sub args 0 n in let aft = Array.sub args (n+1) (q-n-1) in let stk' = List.rev (if n = 0 then rstk else (Zapp bef :: rstk)) in (Some (stk', args.(n)), append_stack aft s') | Zupdate(m)::s -> strip_rec rstk (update m (h.norm,h.term)) n s | s -> (None, List.rev rstk @ s) in strip_rec [] head n stk (* Beta reduction: look for an applied argument in the stack. Since the encountered update marks are removed, h must be a whnf *) let rec get_args n tys f e stk = match stk with Zupdate r :: s -> let _hd = update r (Cstr,FLambda(n,tys,f,e)) in get_args n tys f e s | Zshift k :: s -> get_args n tys f (subs_shft (k,e)) s | Zapp l :: s -> let na = Array.length l in if n == na then (Inl (subs_cons(l,e)),s) else if n < na then (* more arguments *) let args = Array.sub l 0 n in let eargs = Array.sub l n (na-n) in (Inl (subs_cons(args,e)), Zapp eargs :: s) else (* more lambdas *) let etys = list_skipn na tys in get_args (n-na) etys f (subs_cons(l,e)) s | _ -> (Inr {norm=Cstr;term=FLambda(n,tys,f,e)}, stk) (* Eta expansion: add a reference to implicit surrounding lambda at end of stack *) let rec eta_expand_stack = function | (Zapp _ | Zfix _ | Zcase _ | Zshift _ | Zupdate _ as e) :: s -> e :: eta_expand_stack s | [] -> [Zshift 1; Zapp [|{norm=Norm; term= FRel 1}|]] (* Iota reduction: extract the arguments to be passed to the Case branches *) let rec reloc_rargs_rec depth stk = match stk with Zapp args :: s -> Zapp (lift_fconstr_vect depth args) :: reloc_rargs_rec depth s | Zshift(k)::s -> if k=depth then s else reloc_rargs_rec (depth-k) s | _ -> stk let reloc_rargs depth stk = if depth = 0 then stk else reloc_rargs_rec depth stk let rec drop_parameters depth n stk = match stk with Zapp args::s -> let q = Array.length args in if n > q then drop_parameters depth (n-q) s else if n = q then reloc_rargs depth s else let aft = Array.sub args n (q-n) in reloc_rargs depth (append_stack aft s) | Zshift(k)::s -> drop_parameters (depth-k) n s | [] -> assert (n=0); [] | _ -> assert false (* we know that n < stack_args_size(stk) *) (* Iota reduction: expansion of a fixpoint. * Given a fixpoint and a substitution, returns the corresponding * fixpoint body, and the substitution in which it should be * evaluated: its first variables are the fixpoint bodies * * FCLOS(fix Fi {F0 := T0 .. Fn-1 := Tn-1}, S) * -> (S. FCLOS(F0,S) . ... . FCLOS(Fn-1,S), Ti) *) (* does not deal with FLIFT *) let contract_fix_vect fix = let (thisbody, make_body, env, nfix) = match fix with | FFix (((reci,i),(_,_,bds as rdcl)),env) -> (bds.(i), (fun j -> { norm = Cstr; term = FFix (((reci,j),rdcl),env) }), env, Array.length bds) | FCoFix ((i,(_,_,bds as rdcl)),env) -> (bds.(i), (fun j -> { norm = Cstr; term = FCoFix ((j,rdcl),env) }), env, Array.length bds) | _ -> assert false in (subs_cons(Array.init nfix make_body, env), thisbody) (*********************************************************************) (* A machine that inspects the head of a term until it finds an atom or a subterm that may produce a redex (abstraction, constructor, cofix, letin, constant), or a neutral term (product, inductive) *) let rec knh m stk = match m.term with | FLIFT(k,a) -> knh a (zshift k stk) | FCLOS(t,e) -> knht e t (zupdate m stk) | FLOCKED -> assert false | FApp(a,b) -> knh a (append_stack b (zupdate m stk)) | FCases(ci,p,t,br) -> knh t (Zcase(ci,p,br)::zupdate m stk) | FFix(((ri,n),(_,_,_)),_) -> (match get_nth_arg m ri.(n) stk with (Some(pars,arg),stk') -> knh arg (Zfix(m,pars)::stk') | (None, stk') -> (m,stk')) | FCast(t,_,_) -> knh t stk (* cases where knh stops *) | (FFlex _|FLetIn _|FConstruct _|FEvar _| FCoFix _|FLambda _|FRel _|FAtom _|FInd _|FProd _) -> (m, stk) (* The same for pure terms *) and knht e t stk = match t with | App(a,b) -> knht e a (append_stack (mk_clos_vect e b) stk) | Case(ci,p,t,br) -> knht e t (Zcase(ci, mk_clos e p, mk_clos_vect e br)::stk) | Fix _ -> knh (mk_clos2 e t) stk | Cast(a,_,_) -> knht e a stk | Rel n -> knh (clos_rel e n) stk | (Lambda _|Prod _|Construct _|CoFix _|Ind _| LetIn _|Const _|Var _|Evar _|Meta _|Sort _) -> (mk_clos2 e t, stk) (************************************************************************) (* Computes a weak head normal form from the result of knh. *) let rec knr info m stk = match m.term with | FLambda(n,tys,f,e) when red_set info.i_flags fBETA -> (match get_args n tys f e stk with Inl e', s -> knit info e' f s | Inr lam, s -> (lam,s)) | FFlex(ConstKey kn) when red_set info.i_flags (fCONST kn) -> (match ref_value_cache info (ConstKey kn) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FFlex(VarKey id) when red_set info.i_flags (fVAR id) -> (match ref_value_cache info (VarKey id) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FFlex(RelKey k) when red_set info.i_flags fDELTA -> (match ref_value_cache info (RelKey k) with Some v -> kni info v stk | None -> (set_norm m; (m,stk))) | FConstruct(ind,c) when red_set info.i_flags fIOTA -> (match strip_update_shift_app m stk with (depth, args, Zcase(ci,_,br)::s) -> assert (ci.ci_npar>=0); let rargs = drop_parameters depth ci.ci_npar args in kni info br.(c-1) (rargs@s) | (_, cargs, Zfix(fx,par)::s) -> let rarg = fapp_stack(m,cargs) in let stk' = par @ append_stack [|rarg|] s in let (fxe,fxbd) = contract_fix_vect fx.term in knit info fxe fxbd stk' | (_,args,s) -> (m,args@s)) | FCoFix _ when red_set info.i_flags fIOTA -> (match strip_update_shift_app m stk with (_, args, ((Zcase _::_) as stk')) -> let (fxe,fxbd) = contract_fix_vect m.term in knit info fxe fxbd (args@stk') | (_,args,s) -> (m,args@s)) | FLetIn (_,v,_,bd,e) when red_set info.i_flags fZETA -> knit info (subs_cons([|v|],e)) bd stk | _ -> (m,stk) (* Computes the weak head normal form of a term *) and kni info m stk = let (hm,s) = knh m stk in knr info hm s and knit info e t stk = let (ht,s) = knht e t stk in knr info ht s let kh info v stk = fapp_stack(kni info v stk) (************************************************************************) (* Initialization and then normalization *) (* weak reduction *) let whd_val info v = with_stats (lazy (term_of_fconstr (kh info v []))) let inject = mk_clos (subs_id 0) let whd_stack infos m stk = let k = kni infos m stk in let _ = fapp_stack k in (* to unlock Zupdates! *) k (* cache of constants: the body is computed only when needed. *) type clos_infos = fconstr infos let create_clos_infos flgs env = create (fun _ -> inject) flgs env let unfold_reference = ref_value_cache coq-8.4pl2/checker/term.mli0000640000175000001440000001011211447132264014704 0ustar notinusersopen Names type existential_key = int type metavariable = int type case_style = LetStyle | IfStyle | LetPatternStyle | MatchStyle | RegularStyle type case_printing = { ind_nargs : int; style : case_style; } type case_info = { ci_ind : inductive; ci_npar : int; ci_cstr_ndecls : int array; ci_pp_info : case_printing; } type contents = Pos | Null type sorts = Prop of contents | Type of Univ.universe type sorts_family = InProp | InSet | InType val family_of_sort : sorts -> sorts_family type 'a pexistential = existential_key * 'a array type 'a prec_declaration = name array * 'a array * 'a array type 'a pfixpoint = (int array * int) * 'a prec_declaration type 'a pcofixpoint = int * 'a prec_declaration type cast_kind = VMcast | DEFAULTcast type constr = Rel of int | Var of identifier | Meta of metavariable | Evar of constr pexistential | Sort of sorts | Cast of constr * cast_kind * constr | Prod of name * constr * constr | Lambda of name * constr * constr | LetIn of name * constr * constr * constr | App of constr * constr array | Const of constant | Ind of inductive | Construct of constructor | Case of case_info * constr * constr * constr array | Fix of constr pfixpoint | CoFix of constr pcofixpoint type existential = constr pexistential type rec_declaration = constr prec_declaration type fixpoint = constr pfixpoint type cofixpoint = constr pcofixpoint val strip_outer_cast : constr -> constr val collapse_appl : constr -> constr val decompose_app : constr -> constr * constr list val applist : constr * constr list -> constr val iter_constr_with_binders : ('a -> 'a) -> ('a -> constr -> unit) -> 'a -> constr -> unit exception LocalOccur val closedn : int -> constr -> bool val closed0 : constr -> bool val noccurn : int -> constr -> bool val noccur_between : int -> int -> constr -> bool val noccur_with_meta : int -> int -> constr -> bool val map_constr_with_binders : ('a -> 'a) -> ('a -> constr -> constr) -> 'a -> constr -> constr val exliftn : Esubst.lift -> constr -> constr val liftn : int -> int -> constr -> constr val lift : int -> constr -> constr type info = Closed | Open | Unknown type 'a substituend = { mutable sinfo : info; sit : 'a; } val lift_substituend : int -> constr substituend -> constr val make_substituend : 'a -> 'a substituend val substn_many : constr substituend array -> int -> constr -> constr val substnl : constr list -> int -> constr -> constr val substl : constr list -> constr -> constr val subst1 : constr -> constr -> constr type named_declaration = identifier * constr option * constr type rel_declaration = name * constr option * constr type named_context = named_declaration list val empty_named_context : named_context val fold_named_context : (named_declaration -> 'a -> 'a) -> named_context -> init:'a -> 'a type section_context = named_context type rel_context = rel_declaration list val empty_rel_context : rel_context val rel_context_length : rel_context -> int val rel_context_nhyps : rel_context -> int val fold_rel_context : (rel_declaration -> 'a -> 'a) -> rel_context -> init:'a -> 'a val map_context : (constr -> constr) -> named_context -> named_context val map_rel_context : (constr -> constr) -> rel_context -> rel_context val extended_rel_list : int -> rel_context -> constr list val compose_lam : (name * constr) list -> constr -> constr val decompose_lam : constr -> (name * constr) list * constr val decompose_lam_n_assum : int -> constr -> rel_context * constr val mkProd_or_LetIn : name * constr option * constr -> constr -> constr val it_mkProd_or_LetIn : constr -> rel_context -> constr val decompose_prod_assum : constr -> rel_context * constr val decompose_prod_n_assum : int -> constr -> rel_context * constr type arity = rel_context * sorts val mkArity : arity -> constr val destArity : constr -> arity val isArity : constr -> bool val compare_constr : (constr -> constr -> bool) -> constr -> constr -> bool val eq_constr : constr -> constr -> bool (* Validation *) val val_sortfam : Validate.func val val_sort : Validate.func val val_constr : Validate.func val val_rctxt : Validate.func val val_nctxt : Validate.func coq-8.4pl2/checker/declarations.mli0000640000175000001440000001521411546054263016417 0ustar notinusersopen Util open Names open Term (* Bytecode *) type values type reloc_table type to_patch_substituted (*Retroknowledge *) type action type retroknowledge (* Engagements *) type engagement = ImpredicativeSet (* Constants *) type polymorphic_arity = { poly_param_levels : Univ.universe option list; poly_level : Univ.universe; } type constant_type = | NonPolymorphicType of constr | PolymorphicArity of rel_context * polymorphic_arity type constr_substituted val force_constr : constr_substituted -> constr val from_val : constr -> constr_substituted (** Beware! In .vo files, lazy_constr are stored as integers used as indexes for a separate table. The actual lazy_constr is restored later, by [Safe_typing.LightenLibrary.load]. This allows us to use here a different definition of lazy_constr than coqtop: since the checker will inspect all proofs parts, even opaque ones, no need to use Lazy.t here *) type lazy_constr val force_lazy_constr : lazy_constr -> constr val lazy_constr_from_val : constr_substituted -> lazy_constr (** Inlining level of parameters at functor applications. This is ignored by the checker. *) type inline = int option (** A constant can have no body (axiom/parameter), or a transparent body, or an opaque one *) type constant_def = | Undef of inline | Def of constr_substituted | OpaqueDef of lazy_constr type constant_body = { const_hyps : section_context; (* New: younger hyp at top *) const_body : constant_def; const_type : constant_type; const_body_code : to_patch_substituted; const_constraints : Univ.constraints } val body_of_constant : constant_body -> constr_substituted option val constant_has_body : constant_body -> bool val is_opaque : constant_body -> bool (* Mutual inductives *) type recarg = | Norec | Mrec of inductive | Imbr of inductive type wf_paths = recarg Rtree.t val mk_norec : wf_paths val mk_paths : recarg -> wf_paths list array -> wf_paths val dest_recarg : wf_paths -> recarg val dest_subterms : wf_paths -> wf_paths list array type monomorphic_inductive_arity = { mind_user_arity : constr; mind_sort : sorts; } type inductive_arity = | Monomorphic of monomorphic_inductive_arity | Polymorphic of polymorphic_arity type one_inductive_body = { (* Primitive datas *) (* Name of the type: [Ii] *) mind_typename : identifier; (* Arity context of [Ii] with parameters: [forall params, Ui] *) mind_arity_ctxt : rel_context; (* Arity sort, original user arity, and allowed elim sorts, if monomorphic *) mind_arity : inductive_arity; (* Names of the constructors: [cij] *) mind_consnames : identifier array; (* Types of the constructors with parameters: [forall params, Tij], where the Ik are replaced by de Bruijn index in the context I1:forall params, U1 .. In:forall params, Un *) mind_user_lc : constr array; (* Derived datas *) (* Number of expected real arguments of the type (no let, no params) *) mind_nrealargs : int; (* Length of realargs context (with let, no params) *) mind_nrealargs_ctxt : int; (* List of allowed elimination sorts *) mind_kelim : sorts_family list; (* Head normalized constructor types so that their conclusion is atomic *) mind_nf_lc : constr array; (* Length of the signature of the constructors (with let, w/o params) *) mind_consnrealdecls : int array; (* Signature of recursive arguments in the constructors *) mind_recargs : wf_paths; (* Datas for bytecode compilation *) (* number of constant constructor *) mind_nb_constant : int; (* number of no constant constructor *) mind_nb_args : int; mind_reloc_tbl : reloc_table; } type mutual_inductive_body = { (* The component of the mutual inductive block *) mind_packets : one_inductive_body array; (* Whether the inductive type has been declared as a record *) mind_record : bool; (* Whether the type is inductive or coinductive *) mind_finite : bool; (* Number of types in the block *) mind_ntypes : int; (* Section hypotheses on which the block depends *) mind_hyps : section_context; (* Number of expected parameters *) mind_nparams : int; (* Number of recursively uniform (i.e. ordinary) parameters *) mind_nparams_rec : int; (* The context of parameters (includes let-in declaration) *) mind_params_ctxt : rel_context; (* Universes constraints enforced by the inductive declaration *) mind_constraints : Univ.constraints; } (* Modules *) type substitution type delta_resolver val empty_delta_resolver : delta_resolver type structure_field_body = | SFBconst of constant_body | SFBmind of mutual_inductive_body | SFBmodule of module_body | SFBmodtype of module_type_body and structure_body = (label * structure_field_body) list and struct_expr_body = | SEBident of module_path | SEBfunctor of mod_bound_id * module_type_body * struct_expr_body | SEBapply of struct_expr_body * struct_expr_body * Univ.constraints | SEBstruct of structure_body | SEBwith of struct_expr_body * with_declaration_body and with_declaration_body = With_module_body of identifier list * module_path | With_definition_body of identifier list * constant_body and module_body = { mod_mp : module_path; mod_expr : struct_expr_body option; mod_type : struct_expr_body; mod_type_alg : struct_expr_body option; mod_constraints : Univ.constraints; mod_delta : delta_resolver; mod_retroknowledge : action list} and module_type_body = { typ_mp : module_path; typ_expr : struct_expr_body; typ_expr_alg : struct_expr_body option ; typ_constraints : Univ.constraints; typ_delta :delta_resolver} (* Substitutions *) type 'a subst_fun = substitution -> 'a -> 'a val empty_subst : substitution val add_mbid : mod_bound_id -> module_path -> substitution -> substitution val add_mp : module_path -> module_path -> substitution -> substitution val map_mbid : mod_bound_id -> module_path -> substitution val map_mp : module_path -> module_path -> substitution val mp_in_delta : module_path -> delta_resolver -> bool val mind_of_delta : delta_resolver -> mutual_inductive -> mutual_inductive val subst_const_body : constant_body subst_fun val subst_mind : mutual_inductive_body subst_fun val subst_modtype : substitution -> module_type_body -> module_type_body val subst_struct_expr : substitution -> struct_expr_body -> struct_expr_body val subst_structure : substitution -> structure_body -> structure_body val subst_module : substitution -> module_body -> module_body val join : substitution -> substitution -> substitution (* Validation *) val val_eng : Validate.func val val_module : Validate.func val val_modtype : Validate.func coq-8.4pl2/checker/check_stat.mli0000640000175000001440000000114112010532755016043 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit coq-8.4pl2/checker/main.ml0000640000175000001440000000003211003143316014474 0ustar notinusers let _ = Checker.start () coq-8.4pl2/checker/validate.ml0000640000175000001440000001461212010532755015362 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Obj.no_scan_tag then if t = Obj.string_tag then Format.print_string ("\""^String.escaped(Obj.magic o)^"\"") else Format.print_string "?" else (let n = Obj.size o in Format.print_string ("#"^string_of_int t^"("); Format.open_hvbox 0; for i = 0 to n-1 do pr_obj_rec (Obj.field o i); if i<>n-1 then (Format.print_string ","; Format.print_cut()) done; Format.close_box(); Format.print_string ")") else Format.print_string "?" let pr_obj o = pr_obj_rec o; Format.print_newline() (**************************************************************************) (* Obj low-level validators *) type error_context = string list let mt_ec : error_context = [] let (/) (ctx:error_context) s : error_context = s::ctx let overr (ctx:error_context) f = (fun (_:error_context) -> f ctx) let ext s f (ctx:error_context) = f (ctx/s) exception ValidObjError of string * error_context * Obj.t let fail ctx o s = raise (ValidObjError(s,ctx,o)) type func = error_context -> Obj.t -> unit let apply debug f x = let o = Obj.repr x in try f mt_ec o with ValidObjError(msg,ctx,obj) -> if debug then begin print_endline ("Validation failed: "^msg); print_endline ("Context: "^String.concat"/"(List.rev ctx)); pr_obj obj end; failwith "vo structure validation failed" (* data not validated *) let no_val (c:error_context) (o:Obj.t) = () (* Check that object o is a block with tag t *) let val_tag t ctx o = if Obj.is_block o && Obj.tag o = t then () else fail ctx o ("expected tag "^string_of_int t) let val_block ctx o = if Obj.is_block o then (if Obj.tag o > Obj.no_scan_tag then fail ctx o "block: found no scan tag") else fail ctx o "expected block obj" (* Check that an object is a tuple (or a record). v is an array of validation functions for each field. Its size corresponds to the expected size of the object. *) let val_tuple ?name v ctx o = let ctx = match name with Some n -> ctx/n | _ -> ctx in let n = Array.length v in let val_fld i f = f (ctx/("fld="^string_of_int i)) (Obj.field o i) in val_block ctx o; if Obj.size o = n then Array.iteri val_fld v else fail ctx o ("tuple size: found "^string_of_int (Obj.size o)^ ", expected "^string_of_int n) (* Check that the object is either a constant constructor of tag < cc, or a constructed variant. each element of vv is an array of validation functions to be applied to the constructor arguments. The size of vv corresponds to the number of non-constant constructors, and the size of vv.(i) is the expected arity of the i-th non-constant constructor. *) let val_sum name cc vv ctx o = let ctx = ctx/name in if Obj.is_block o then (val_block (ctx/name) o; let n = Array.length vv in let i = Obj.tag o in let ctx' = if n=1 then ctx else ctx/("tag="^string_of_int i) in if i < n then val_tuple vv.(i) ctx' o else fail ctx' o ("sum: unexpected tag")) else if Obj.is_int o then let (n:int) = Obj.magic o in (if n<0 || n>=cc then fail ctx o ("bad constant constructor "^string_of_int n)) else fail ctx o "not a sum" let val_enum s n = val_sum s n [||] (* Recursive types: avoid looping by eta-expansion *) let rec val_rec_sum name cc f ctx o = val_sum name cc (f (overr (ctx/name) (val_rec_sum name cc f))) ctx o (**************************************************************************) (* Builtin types *) (* Check the o is an array of values satisfying f. *) let val_array ?(pos=false) f ctx o = let upd_ctx = if pos then (fun i -> ctx/string_of_int i) else (fun _ -> ctx) in val_block (ctx/"array") o; for i = 0 to Obj.size o - 1 do (f (upd_ctx i) (Obj.field o i):unit) done (* Integer validator *) let val_int ctx o = if not (Obj.is_int o) then fail ctx o "expected an int" (* String validator *) let val_str ctx o = try val_tag Obj.string_tag ctx o with Failure _ -> fail ctx o "expected a string" (* Booleans *) let val_bool = val_enum "bool" 2 (* Option type *) let val_opt ?(name="option") f = val_sum name 1 [|[|f|]|] (* Lists *) let val_list ?(name="list") f ctx = val_rec_sum name 1 (fun vlist -> [|[|ext "elem" f;vlist|]|]) ctx (* Reference *) let val_ref ?(name="ref") f ctx = val_tuple [|f|] (ctx/name) (**************************************************************************) (* Standard library types *) (* Sets *) let val_set ?(name="Set.t") f = val_rec_sum name 1 (fun vset -> [|[|vset;ext "elem" f; vset;ext "bal" val_int|]|]) (* Maps *) let rec val_map ?(name="Map.t") fk fv = val_rec_sum name 1 (fun vmap -> [|[|vmap; ext "key" fk; ext "value" fv; vmap; ext "bal" val_int|]|]) (**************************************************************************) (* Coq types *) (* names *) let val_id = val_str let val_dp = val_list ~name:"dirpath" val_id let val_name = val_sum "name" 1 [|[|val_id|]|] let val_uid = val_tuple ~name:"uniq_ident" [|val_int;val_str;val_dp|] let val_mp = val_rec_sum "module_path" 0 (fun vmp -> [|[|val_dp|];[|val_uid|];[|vmp;val_id|]|]) let val_kn = val_tuple ~name:"kernel_name" [|val_mp;val_dp;val_id|] let val_con = val_tuple ~name:"constant/mutind" [|val_kn;val_kn|] let val_ind = val_tuple ~name:"inductive"[|val_con;val_int|] let val_cstr = val_tuple ~name:"constructor"[|val_ind;val_int|] (* univ *) let val_level = val_sum "level" 1 [|[|val_dp;val_int|]|] let val_univ = val_sum "univ" 0 [|[|val_level|];[|val_list val_level;val_list val_level|]|] let val_cstrs = val_set ~name:"Univ.constraints" (val_tuple ~name:"univ_constraint" [|val_level;val_enum "order_request" 3;val_level|]) coq-8.4pl2/checker/environ.mli0000640000175000001440000000463111652031713015421 0ustar notinusersopen Names open Term (* Environments *) type globals = { env_constants : Declarations.constant_body Cmap_env.t; env_inductives : Declarations.mutual_inductive_body Mindmap_env.t; env_inductives_eq : kernel_name KNmap.t; env_modules : Declarations.module_body MPmap.t; env_modtypes : Declarations.module_type_body MPmap.t} type stratification = { env_universes : Univ.universes; env_engagement : Declarations.engagement option; } type env = { env_globals : globals; env_named_context : named_context; env_rel_context : rel_context; env_stratification : stratification; env_imports : Digest.t MPmap.t; } val empty_env : env (* Engagement *) val engagement : env -> Declarations.engagement option val set_engagement : Declarations.engagement -> env -> env (* Digests *) val add_digest : env -> dir_path -> Digest.t -> env val lookup_digest : env -> dir_path -> Digest.t (* de Bruijn variables *) val rel_context : env -> rel_context val lookup_rel : int -> env -> rel_declaration val push_rel : rel_declaration -> env -> env val push_rel_context : rel_context -> env -> env val push_rec_types : name array * constr array * 'a -> env -> env (* Named variables *) val named_context : env -> named_context val push_named : named_declaration -> env -> env val lookup_named : identifier -> env -> named_declaration val named_type : identifier -> env -> constr (* Universes *) val universes : env -> Univ.universes val add_constraints : Univ.constraints -> env -> env (* Constants *) val lookup_constant : constant -> env -> Declarations.constant_body val add_constant : constant -> Declarations.constant_body -> env -> env type const_evaluation_result = NoBody | Opaque exception NotEvaluableConst of const_evaluation_result val constant_value : env -> constant -> constr val evaluable_constant : constant -> env -> bool (* Inductives *) val mind_equiv : env -> inductive -> inductive -> bool val lookup_mind : mutual_inductive -> env -> Declarations.mutual_inductive_body val add_mind : mutual_inductive -> Declarations.mutual_inductive_body -> env -> env (* Modules *) val add_modtype : module_path -> Declarations.module_type_body -> env -> env val shallow_add_module : module_path -> Declarations.module_body -> env -> env val shallow_remove_module : module_path -> env -> env val lookup_module : module_path -> env -> Declarations.module_body val lookup_modtype : module_path -> env -> Declarations.module_type_body coq-8.4pl2/checker/inductive.ml0000640000175000001440000010237312010532755015565 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = Array.length mib.mind_packets then error "Inductive.lookup_mind_specif: invalid inductive index"; (mib, mib.mind_packets.(tyi)) let find_rectype env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match t with | Ind ind -> (ind, l) | _ -> raise Not_found let find_inductive env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match t with | Ind ind when (fst (lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found let find_coinductive env c = let (t, l) = decompose_app (whd_betadeltaiota env c) in match t with | Ind ind when not (fst (lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found let inductive_params (mib,_) = mib.mind_nparams (************************************************************************) (* Build the substitution that replaces Rels by the appropriate *) (* inductives *) let ind_subst mind mib = let ntypes = mib.mind_ntypes in let make_Ik k = Ind (mind,ntypes-k-1) in list_tabulate make_Ik ntypes (* Instantiate inductives in constructor type *) let constructor_instantiate mind mib c = let s = ind_subst mind mib in substl s c let instantiate_params full t args sign = let fail () = anomaly "instantiate_params: type, ctxt and args mismatch" in let (rem_args, subs, ty) = fold_rel_context (fun (_,copt,_) (largs,subs,ty) -> match (copt, largs, ty) with | (None, a::args, Prod(_,_,t)) -> (args, a::subs, t) | (Some b,_,LetIn(_,_,_,t)) -> (largs, (substl subs b)::subs, t) | (_,[],_) -> if full then fail() else ([], subs, ty) | _ -> fail ()) sign ~init:(args,[],t) in if rem_args <> [] then fail(); substl subs ty let full_inductive_instantiate mib params sign = let dummy = Prop Null in let t = mkArity (sign,dummy) in fst (destArity (instantiate_params true t params mib.mind_params_ctxt)) let full_constructor_instantiate ((mind,_),(mib,_),params) = let inst_ind = constructor_instantiate mind mib in (fun t -> instantiate_params true (inst_ind t) params mib.mind_params_ctxt) (************************************************************************) (************************************************************************) (* Functions to build standard types related to inductive *) (* Computing the actual sort of an applied or partially applied inductive type: I_i: forall uniformparams:utyps, forall otherparams:otyps, Type(a) uniformargs : utyps otherargs : otyps I_1:forall ...,s_1;...I_n:forall ...,s_n |- sort(C_kj(uniformargs)) = s_kj s'_k = max(..s_kj..) merge(..s'_k..) = ..s''_k.. -------------------------------------------------------------------- Gamma |- I_i uniformargs otherargs : phi(s''_i) where - if p=0, phi() = Prop - if p=1, phi(s) = s - if p<>1, phi(s) = sup(Set,s) Remark: Set (predicative) is encoded as Type(0) *) let sort_as_univ = function | Type u -> u | Prop Null -> type0m_univ | Prop Pos -> type0_univ let cons_subst u su subst = try (u, sup su (List.assoc u subst)) :: List.remove_assoc u subst with Not_found -> (u, su) :: subst let actualize_decl_level env lev t = let sign,s = dest_arity env t in mkArity (sign,lev) let polymorphism_on_non_applied_parameters = false (* Bind expected levels of parameters to actual levels *) (* Propagate the new levels in the signature *) let rec make_subst env = function | (_,Some _,_ as t)::sign, exp, args -> let ctx,subst = make_subst env (sign, exp, args) in t::ctx, subst | d::sign, None::exp, args -> let args = match args with _::args -> args | [] -> [] in let ctx,subst = make_subst env (sign, exp, args) in d::ctx, subst | d::sign, Some u::exp, a::args -> (* We recover the level of the argument, but we don't change the *) (* level in the corresponding type in the arity; this level in the *) (* arity is a global level which, at typing time, will be enforce *) (* to be greater than the level of the argument; this is probably *) (* a useless extra constraint *) let s = sort_as_univ (snd (dest_arity env a)) in let ctx,subst = make_subst env (sign, exp, args) in d::ctx, cons_subst u s subst | (na,None,t as d)::sign, Some u::exp, [] -> (* No more argument here: we instantiate the type with a fresh level *) (* which is first propagated to the corresponding premise in the arity *) (* (actualize_decl_level), then to the conclusion of the arity (via *) (* the substitution) *) let ctx,subst = make_subst env (sign, exp, []) in if polymorphism_on_non_applied_parameters then let s = fresh_local_univ () in let t = actualize_decl_level env (Type s) t in (na,None,t)::ctx, cons_subst u s subst else d::ctx, subst | sign, [], _ -> (* Uniform parameters are exhausted *) sign,[] | [], _, _ -> assert false let instantiate_universes env ctx ar argsorts = let args = Array.to_list argsorts in let ctx,subst = make_subst env (ctx,ar.poly_param_levels,args) in let level = subst_large_constraints subst ar.poly_level in ctx, if is_type0m_univ level then Prop Null else if is_type0_univ level then Prop Pos else Type level let type_of_inductive_knowing_parameters env mip paramtyps = match mip.mind_arity with | Monomorphic s -> s.mind_user_arity | Polymorphic ar -> let ctx = List.rev mip.mind_arity_ctxt in let ctx,s = instantiate_universes env ctx ar paramtyps in mkArity (List.rev ctx,s) (* Type of a (non applied) inductive type *) let type_of_inductive env (_,mip) = type_of_inductive_knowing_parameters env mip [||] (* The max of an array of universes *) let cumulate_constructor_univ u = function | Prop Null -> u | Prop Pos -> sup type0_univ u | Type u' -> sup u u' let max_inductive_sort = Array.fold_left cumulate_constructor_univ type0m_univ (************************************************************************) (* Type of a constructor *) let type_of_constructor cstr (mib,mip) = let ind = inductive_of_constructor cstr in let specif = mip.mind_user_lc in let i = index_of_constructor cstr in let nconstr = Array.length mip.mind_consnames in if i > nconstr then error "Not enough constructors in the type"; constructor_instantiate (fst ind) mib specif.(i-1) let arities_of_specif kn (mib,mip) = let specif = mip.mind_nf_lc in Array.map (constructor_instantiate kn mib) specif (************************************************************************) let error_elim_expln kp ki = match kp,ki with | (InType | InSet), InProp -> NonInformativeToInformative | InType, InSet -> StrongEliminationOnNonSmallType (* if Set impredicative *) | _ -> WrongArity (* Type of case predicates *) (* Get type of inductive, with parameters instantiated *) let inductive_sort_family mip = match mip.mind_arity with | Monomorphic s -> family_of_sort s.mind_sort | Polymorphic _ -> InType let mind_arity mip = mip.mind_arity_ctxt, inductive_sort_family mip let get_instantiated_arity (mib,mip) params = let sign, s = mind_arity mip in full_inductive_instantiate mib params sign, s let elim_sorts (_,mip) = mip.mind_kelim let extended_rel_list n hyps = let rec reln l p = function | (_,None,_) :: hyps -> reln (Rel (n+p) :: l) (p+1) hyps | (_,Some _,_) :: hyps -> reln l (p+1) hyps | [] -> l in reln [] 1 hyps let build_dependent_inductive ind (_,mip) params = let realargs,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in applist (Ind ind, List.map (lift mip.mind_nrealargs_ctxt) params @ extended_rel_list 0 realargs) (* This exception is local *) exception LocalArity of (sorts_family * sorts_family * arity_error) option let check_allowed_sort ksort specif = if not (List.exists ((=) ksort) (elim_sorts specif)) then let s = inductive_sort_family (snd specif) in raise (LocalArity (Some(ksort,s,error_elim_expln ksort s))) let is_correct_arity env c (p,pj) ind specif params = let arsign,_ = get_instantiated_arity specif params in let rec srec env pt ar = let pt' = whd_betadeltaiota env pt in match pt', ar with | Prod (na1,a1,t), (_,None,a1')::ar' -> (try conv env a1 a1' with NotConvertible -> raise (LocalArity None)); srec (push_rel (na1,None,a1) env) t ar' | Prod (_,a1,a2), [] -> (* whnf of t was not needed here! *) let ksort = match (whd_betadeltaiota env a2) with | Sort s -> family_of_sort s | _ -> raise (LocalArity None) in let dep_ind = build_dependent_inductive ind specif params in (try conv env a1 dep_ind with NotConvertible -> raise (LocalArity None)); check_allowed_sort ksort specif; true | Sort s', [] -> check_allowed_sort (family_of_sort s') specif; false | _ -> raise (LocalArity None) in try srec env pj (List.rev arsign) with LocalArity kinds -> error_elim_arity env ind (elim_sorts specif) c (p,pj) kinds (************************************************************************) (* Type of case branches *) (* [p] is the predicate, [i] is the constructor number (starting from 0), and [cty] is the type of the constructor (params not instantiated) *) let build_branches_type ind (_,mip as specif) params dep p = let build_one_branch i cty = let typi = full_constructor_instantiate (ind,specif,params) cty in let (args,ccl) = decompose_prod_assum typi in let nargs = rel_context_length args in let (_,allargs) = decompose_app ccl in let (lparams,vargs) = list_chop (inductive_params specif) allargs in let cargs = if dep then let cstr = ith_constructor_of_inductive ind (i+1) in let dep_cstr = applist (Construct cstr,lparams@extended_rel_list 0 args) in vargs @ [dep_cstr] else vargs in let base = beta_appvect (lift nargs p) (Array.of_list cargs) in it_mkProd_or_LetIn base args in Array.mapi build_one_branch mip.mind_nf_lc (* [p] is the predicate, [c] is the match object, [realargs] is the list of real args of the inductive type *) let build_case_type dep p c realargs = let args = if dep then realargs@[c] else realargs in beta_appvect p (Array.of_list args) let type_case_branches env (ind,largs) (p,pj) c = let specif = lookup_mind_specif env ind in let nparams = inductive_params specif in let (params,realargs) = list_chop nparams largs in let dep = is_correct_arity env c (p,pj) ind specif params in let lc = build_branches_type ind specif params dep p in let ty = build_case_type dep p c realargs in (lc, ty) (************************************************************************) (* Checking the case annotation is relevant *) let check_case_info env indsp ci = let (mib,mip) = lookup_mind_specif env indsp in if not (eq_ind indsp ci.ci_ind) or (mib.mind_nparams <> ci.ci_npar) or (mip.mind_consnrealdecls <> ci.ci_cstr_ndecls) then raise (TypeError(env,WrongCaseInfo(indsp,ci))) (************************************************************************) (************************************************************************) (* Guard conditions for fix and cofix-points *) (* Check if t is a subterm of Rel n, and gives its specification, assuming lst already gives index of subterms with corresponding specifications of recursive arguments *) (* A powerful notion of subterm *) (* To each inductive definition corresponds an array describing the structure of recursive arguments for each constructor, we call it the recursive spec of the type (it has type recargs vect). For checking the guard, we start from the decreasing argument (Rel n) with its recursive spec. During checking the guardness condition, we collect patterns variables corresponding to subterms of n, each of them with its recursive spec. They are organised in a list lst of type (int * recargs) list which is sorted with respect to the first argument. *) (*************************************************************) (* Environment annotated with marks on recursive arguments *) (* tells whether it is a strict or loose subterm *) type size = Large | Strict (* merging information *) let size_glb s1 s2 = match s1,s2 with Strict, Strict -> Strict | _ -> Large (* possible specifications for a term: - Not_subterm: when the size of a term is not related to the recursive argument of the fixpoint - Subterm: when the term is a subterm of the recursive argument the wf_paths argument specifies which subterms are recursive - Dead_code: when the term has been built by elimination over an empty type *) type subterm_spec = Subterm of (size * wf_paths) | Dead_code | Not_subterm let spec_of_tree t = lazy (if Rtree.eq_rtree (=) (Lazy.force t) mk_norec then Not_subterm else Subterm(Strict,Lazy.force t)) let subterm_spec_glb = let glb2 s1 s2 = match s1,s2 with _, Dead_code -> s1 | Dead_code, _ -> s2 | Not_subterm, _ -> Not_subterm | _, Not_subterm -> Not_subterm | Subterm (a1,t1), Subterm (a2,t2) -> if Rtree.eq_rtree (=) t1 t2 then Subterm (size_glb a1 a2, t1) (* branches do not return objects with same spec *) else Not_subterm in Array.fold_left glb2 Dead_code type guard_env = { env : env; (* dB of last fixpoint *) rel_min : int; (* inductive of recarg of each fixpoint *) inds : inductive array; (* the recarg information of inductive family *) recvec : wf_paths array; (* dB of variables denoting subterms *) genv : subterm_spec Lazy.t list; } let make_renv env minds recarg (kn,tyi) = let mib = lookup_mind kn env in let mind_recvec = Array.map (fun mip -> mip.mind_recargs) mib.mind_packets in { env = env; rel_min = recarg+2; inds = minds; recvec = mind_recvec; genv = [Lazy.lazy_from_val(Subterm(Large,mind_recvec.(tyi)))] } let push_var renv (x,ty,spec) = { renv with env = push_rel (x,None,ty) renv.env; rel_min = renv.rel_min+1; genv = spec:: renv.genv } let assign_var_spec renv (i,spec) = { renv with genv = list_assign renv.genv (i-1) spec } let push_var_renv renv (x,ty) = push_var renv (x,ty,Lazy.lazy_from_val Not_subterm) (* Fetch recursive information about a variable p *) let subterm_var p renv = try Lazy.force (List.nth renv.genv (p-1)) with Failure _ | Invalid_argument _ -> Not_subterm let push_ctxt_renv renv ctxt = let n = rel_context_length ctxt in { renv with env = push_rel_context ctxt renv.env; rel_min = renv.rel_min+n; genv = iterate (fun ge -> Lazy.lazy_from_val Not_subterm::ge) n renv.genv } let push_fix_renv renv (_,v,_ as recdef) = let n = Array.length v in { renv with env = push_rec_types recdef renv.env; rel_min = renv.rel_min+n; genv = iterate (fun ge -> Lazy.lazy_from_val Not_subterm::ge) n renv.genv } (* Definition and manipulation of the stack *) type stack_element = |SClosure of guard_env*constr |SArg of subterm_spec Lazy.t let push_stack_closures renv l stack = List.fold_right (fun h b -> (SClosure (renv,h))::b) l stack let push_stack_args l stack = List.fold_right (fun h b -> (SArg h)::b) l stack (******************************) (* Computing the recursive subterms of a term (propagation of size information through Cases). *) (* c is a branch of an inductive definition corresponding to the spec lrec. mind_recvec is the recursive spec of the inductive definition of the decreasing argument n. case_branches_specif renv lrec lc will pass the lambdas of c corresponding to pattern variables and collect possibly new subterms variables and returns the bodies of the branches with the correct envs and decreasing args. *) let lookup_subterms env ind = let (_,mip) = lookup_mind_specif env ind in mip.mind_recargs let match_inductive ind ra = match ra with | (Mrec i | Imbr i) -> eq_ind ind i | Norec -> false (* In {match c as z in ci y_s return P with |C_i x_s => t end} [branches_specif renv c_spec ci] returns an array of x_s specs knowing c_spec. *) let branches_specif renv c_spec ci = let car = (* We fetch the regular tree associated to the inductive of the match. This is just to get the number of constructors (and constructor arities) that fit the match branches without forcing c_spec. Note that c_spec might be more precise than [v] below, because of nested inductive types. *) let (_,mip) = lookup_mind_specif renv.env ci.ci_ind in let v = dest_subterms mip.mind_recargs in Array.map List.length v in Array.mapi (fun i nca -> (* i+1-th cstructor has arity nca *) let lvra = lazy (match Lazy.force c_spec with Subterm (_,t) when match_inductive ci.ci_ind (dest_recarg t) -> let vra = Array.of_list (dest_subterms t).(i) in assert (nca = Array.length vra); Array.map (fun t -> Lazy.force (spec_of_tree (lazy t))) vra | Dead_code -> Array.create nca Dead_code | _ -> Array.create nca Not_subterm) in list_tabulate (fun j -> lazy (Lazy.force lvra).(j)) nca) car (* [subterm_specif renv t] computes the recursive structure of [t] and compare its size with the size of the initial recursive argument of the fixpoint we are checking. [renv] collects such information about variables. *) let rec subterm_specif renv stack t = (* maybe reduction is not always necessary! *) let f,l = decompose_app (whd_betadeltaiota renv.env t) in match f with | Rel k -> subterm_var k renv | Case (ci,_,c,lbr) -> let stack' = push_stack_closures renv l stack in let cases_spec = branches_specif renv (lazy_subterm_specif renv [] c) ci in let stl = Array.mapi (fun i br' -> let stack_br = push_stack_args (cases_spec.(i)) stack' in subterm_specif renv stack_br br') lbr in subterm_spec_glb stl | Fix ((recindxs,i),(_,typarray,bodies as recdef)) -> (* when proving that the fixpoint f(x)=e is less than n, it is enough to prove that e is less than n assuming f is less than n furthermore when f is applied to a term which is strictly less than n, one may assume that x itself is strictly less than n *) let (ctxt,clfix) = dest_prod renv.env typarray.(i) in let oind = let env' = push_rel_context ctxt renv.env in try Some(fst(find_inductive env' clfix)) with Not_found -> None in (match oind with None -> Not_subterm (* happens if fix is polymorphic *) | Some ind -> let nbfix = Array.length typarray in let recargs = lookup_subterms renv.env ind in (* pushing the fixpoints *) let renv' = push_fix_renv renv recdef in let renv' = (* Why Strict here ? To be general, it could also be Large... *) assign_var_spec renv' (nbfix-i, lazy (Subterm(Strict,recargs))) in let decrArg = recindxs.(i) in let theBody = bodies.(i) in let nbOfAbst = decrArg+1 in let sign,strippedBody = decompose_lam_n_assum nbOfAbst theBody in (* pushing the fix parameters *) let stack' = push_stack_closures renv l stack in let renv'' = push_ctxt_renv renv' sign in let renv'' = if List.length stack' < nbOfAbst then renv'' else let decrArg = List.nth stack' decrArg in let arg_spec = stack_element_specif decrArg in assign_var_spec renv'' (1, arg_spec) in subterm_specif renv'' [] strippedBody) | Lambda (x,a,b) -> assert (l=[]); let spec,stack' = extract_stack renv a stack in subterm_specif (push_var renv (x,a,spec)) stack' b (* Metas and evars are considered OK *) | (Meta _|Evar _) -> Dead_code (* Other terms are not subterms *) | _ -> Not_subterm and lazy_subterm_specif renv stack t = lazy (subterm_specif renv stack t) and stack_element_specif = function |SClosure (h_renv,h) -> lazy_subterm_specif h_renv [] h |SArg x -> x and extract_stack renv a = function | [] -> Lazy.lazy_from_val Not_subterm , [] | h::t -> stack_element_specif h, t (* Check size x is a correct size for recursive calls. *) let check_is_subterm x = match Lazy.force x with Subterm (Strict,_) | Dead_code -> true | _ -> false (************************************************************************) exception FixGuardError of env * guard_error let error_illegal_rec_call renv fx (arg_renv,arg) = let (_,le_vars,lt_vars) = List.fold_left (fun (i,le,lt) sbt -> match Lazy.force sbt with (Subterm(Strict,_) | Dead_code) -> (i+1, le, i::lt) | (Subterm(Large,_)) -> (i+1, i::le, lt) | _ -> (i+1, le ,lt)) (1,[],[]) renv.genv in raise (FixGuardError (renv.env, RecursionOnIllegalTerm(fx,(arg_renv.env, arg), le_vars,lt_vars))) let error_partial_apply renv fx = raise (FixGuardError (renv.env,NotEnoughArgumentsForFixCall fx)) (* Check if [def] is a guarded fixpoint body with decreasing arg. given [recpos], the decreasing arguments of each mutually defined fixpoint. *) let check_one_fix renv recpos def = let nfi = Array.length recpos in (* Checks if [t] only make valid recursive calls *) let rec check_rec_call renv stack t = (* if [t] does not make recursive calls, it is guarded: *) if noccur_with_meta renv.rel_min nfi t then () else let (f,l) = decompose_app (whd_betaiotazeta t) in match f with | Rel p -> (* Test if [p] is a fixpoint (recursive call) *) if renv.rel_min <= p & p < renv.rel_min+nfi then begin List.iter (check_rec_call renv []) l; (* the position of the invoked fixpoint: *) let glob = renv.rel_min+nfi-1-p in (* the decreasing arg of the rec call: *) let np = recpos.(glob) in let stack' = push_stack_closures renv l stack in if List.length stack' <= np then error_partial_apply renv glob else (* Check the decreasing arg is smaller *) let z = List.nth stack' np in if not (check_is_subterm (stack_element_specif z)) then begin match z with |SClosure (z,z') -> error_illegal_rec_call renv glob (z,z') |SArg _ -> error_partial_apply renv glob end end else begin match pi2 (lookup_rel p renv.env) with | None -> List.iter (check_rec_call renv []) l | Some c -> try List.iter (check_rec_call renv []) l with FixGuardError _ -> check_rec_call renv stack (applist(lift p c,l)) end | Case (ci,p,c_0,lrest) -> List.iter (check_rec_call renv []) (c_0::p::l); (* compute the recarg information for the arguments of each branch *) let case_spec = branches_specif renv (lazy_subterm_specif renv [] c_0) ci in let stack' = push_stack_closures renv l stack in Array.iteri (fun k br' -> let stack_br = push_stack_args case_spec.(k) stack' in check_rec_call renv stack_br br') lrest (* Enables to traverse Fixpoint definitions in a more intelligent way, ie, the rule : if - g = fix g (y1:T1)...(yp:Tp) {struct yp} := e & - f is guarded with respect to the set of pattern variables S in a1 ... am & - f is guarded with respect to the set of pattern variables S in T1 ... Tp & - ap is a sub-term of the formal argument of f & - f is guarded with respect to the set of pattern variables S+{yp} in e then f is guarded with respect to S in (g a1 ... am). Eduardo 7/9/98 *) | Fix ((recindxs,i),(_,typarray,bodies as recdef)) -> List.iter (check_rec_call renv []) l; Array.iter (check_rec_call renv []) typarray; let decrArg = recindxs.(i) in let renv' = push_fix_renv renv recdef in let stack' = push_stack_closures renv l stack in Array.iteri (fun j body -> if i=j && (List.length stack' > decrArg) then let recArg = List.nth stack' decrArg in let arg_sp = stack_element_specif recArg in check_nested_fix_body renv' (decrArg+1) arg_sp body else check_rec_call renv' [] body) bodies | Const kn -> if evaluable_constant kn renv.env then try List.iter (check_rec_call renv []) l with (FixGuardError _ ) -> let value = (applist(constant_value renv.env kn, l)) in check_rec_call renv stack value else List.iter (check_rec_call renv []) l | Lambda (x,a,b) -> assert (l = []); check_rec_call renv [] a ; let spec, stack' = extract_stack renv a stack in check_rec_call (push_var renv (x,a,spec)) stack' b | Prod (x,a,b) -> assert (l = [] && stack = []); check_rec_call renv [] a; check_rec_call (push_var_renv renv (x,a)) [] b | CoFix (i,(_,typarray,bodies as recdef)) -> List.iter (check_rec_call renv []) l; Array.iter (check_rec_call renv []) typarray; let renv' = push_fix_renv renv recdef in Array.iter (check_rec_call renv' []) bodies | (Ind _ | Construct _) -> List.iter (check_rec_call renv []) l | Var id -> begin match pi2 (lookup_named id renv.env) with | None -> List.iter (check_rec_call renv []) l | Some c -> try List.iter (check_rec_call renv []) l with (FixGuardError _) -> check_rec_call renv stack (applist(c,l)) end | Sort _ -> assert (l = []) (* l is not checked because it is considered as the meta's context *) | (Evar _ | Meta _) -> () | (App _ | LetIn _ | Cast _) -> assert false (* beta zeta reduction *) and check_nested_fix_body renv decr recArgsDecrArg body = if decr = 0 then check_rec_call (assign_var_spec renv (1,recArgsDecrArg)) [] body else match body with | Lambda (x,a,b) -> check_rec_call renv [] a; let renv' = push_var_renv renv (x,a) in check_nested_fix_body renv' (decr-1) recArgsDecrArg b | _ -> anomaly "Not enough abstractions in fix body" in check_rec_call renv [] def let inductive_of_mutfix env ((nvect,bodynum),(names,types,bodies as recdef)) = let nbfix = Array.length bodies in if nbfix = 0 or Array.length nvect <> nbfix or Array.length types <> nbfix or Array.length names <> nbfix or bodynum < 0 or bodynum >= nbfix then anomaly "Ill-formed fix term"; let fixenv = push_rec_types recdef env in let raise_err env i err = error_ill_formed_rec_body env err names i in (* Check the i-th definition with recarg k *) let find_ind i k def = (* check fi does not appear in the k+1 first abstractions, gives the type of the k+1-eme abstraction (must be an inductive) *) let rec check_occur env n def = match (whd_betadeltaiota env def) with | Lambda (x,a,b) -> if noccur_with_meta n nbfix a then let env' = push_rel (x, None, a) env in if n = k+1 then (* get the inductive type of the fixpoint *) let (mind, _) = try find_inductive env a with Not_found -> raise_err env i (RecursionNotOnInductiveType a) in (mind, (env', b)) else check_occur env' (n+1) b else anomaly "check_one_fix: Bad occurrence of recursive call" | _ -> raise_err env i NotEnoughAbstractionInFixBody in check_occur fixenv 1 def in (* Do it on every fixpoint *) let rv = array_map2_i find_ind nvect bodies in (Array.map fst rv, Array.map snd rv) let check_fix env ((nvect,_),(names,_,bodies as _recdef) as fix) = let (minds, rdef) = inductive_of_mutfix env fix in for i = 0 to Array.length bodies - 1 do let (fenv,body) = rdef.(i) in let renv = make_renv fenv minds nvect.(i) minds.(i) in try check_one_fix renv nvect body with FixGuardError (fixenv,err) -> error_ill_formed_rec_body fixenv err names i done (* let cfkey = Profile.declare_profile "check_fix";; let check_fix env fix = Profile.profile3 cfkey check_fix env fix;; *) (************************************************************************) (* Co-fixpoints. *) exception CoFixGuardError of env * guard_error let anomaly_ill_typed () = anomaly "check_one_cofix: too many arguments applied to constructor" let rec codomain_is_coind env c = let b = whd_betadeltaiota env c in match b with | Prod (x,a,b) -> codomain_is_coind (push_rel (x, None, a) env) b | _ -> (try find_coinductive env b with Not_found -> raise (CoFixGuardError (env, CodomainNotInductiveType b))) let check_one_cofix env nbfix def deftype = let rec check_rec_call env alreadygrd n vlra t = if not (noccur_with_meta n nbfix t) then let c,args = decompose_app (whd_betadeltaiota env t) in match c with | Rel p when n <= p && p < n+nbfix -> (* recursive call: must be guarded and no nested recursive call allowed *) if not alreadygrd then raise (CoFixGuardError (env,UnguardedRecursiveCall t)) else if not(List.for_all (noccur_with_meta n nbfix) args) then raise (CoFixGuardError (env,NestedRecursiveOccurrences)) | Construct (_,i as cstr_kn) -> let lra = vlra.(i-1) in let mI = inductive_of_constructor cstr_kn in let (mib,mip) = lookup_mind_specif env mI in let realargs = list_skipn mib.mind_nparams args in let rec process_args_of_constr = function | (t::lr), (rar::lrar) -> if rar = mk_norec then if noccur_with_meta n nbfix t then process_args_of_constr (lr, lrar) else raise (CoFixGuardError (env,RecCallInNonRecArgOfConstructor t)) else let spec = dest_subterms rar in check_rec_call env true n spec t; process_args_of_constr (lr, lrar) | [],_ -> () | _ -> anomaly_ill_typed () in process_args_of_constr (realargs, lra) | Lambda (x,a,b) -> assert (args = []); if noccur_with_meta n nbfix a then let env' = push_rel (x, None, a) env in check_rec_call env' alreadygrd (n+1) vlra b else raise (CoFixGuardError (env,RecCallInTypeOfAbstraction a)) | CoFix (j,(_,varit,vdefs as recdef)) -> if (List.for_all (noccur_with_meta n nbfix) args) then let nbfix = Array.length vdefs in if (array_for_all (noccur_with_meta n nbfix) varit) then let env' = push_rec_types recdef env in (Array.iter (check_rec_call env' alreadygrd (n+1) vlra) vdefs; List.iter (check_rec_call env alreadygrd n vlra) args) else raise (CoFixGuardError (env,RecCallInTypeOfDef c)) else raise (CoFixGuardError (env,UnguardedRecursiveCall c)) | Case (_,p,tm,vrest) -> if (noccur_with_meta n nbfix p) then if (noccur_with_meta n nbfix tm) then if (List.for_all (noccur_with_meta n nbfix) args) then Array.iter (check_rec_call env alreadygrd n vlra) vrest else raise (CoFixGuardError (env,RecCallInCaseFun c)) else raise (CoFixGuardError (env,RecCallInCaseArg c)) else raise (CoFixGuardError (env,RecCallInCasePred c)) | Meta _ -> () | Evar _ -> List.iter (check_rec_call env alreadygrd n vlra) args | _ -> raise (CoFixGuardError (env,NotGuardedForm t)) in let (mind, _) = codomain_is_coind env deftype in let vlra = lookup_subterms env mind in check_rec_call env false 1 (dest_subterms vlra) def (* The function which checks that the whole block of definitions satisfies the guarded condition *) let check_cofix env (bodynum,(names,types,bodies as recdef)) = let nbfix = Array.length bodies in for i = 0 to nbfix-1 do let fixenv = push_rec_types recdef env in try check_one_cofix fixenv nbfix bodies.(i) types.(i) with CoFixGuardError (errenv,err) -> error_ill_formed_rec_body errenv err names i done coq-8.4pl2/checker/reduction.ml0000640000175000001440000003657112010532755015575 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | Zupdate _::s -> is_empty_stack s | Zshift _::s -> is_empty_stack s | _ -> false (* Compute the lift to be performed on a term placed in a given stack *) let el_stack el stk = let n = List.fold_left (fun i z -> match z with Zshift n -> i+n | _ -> i) 0 stk in el_shft n el let compare_stack_shape stk1 stk2 = let rec compare_rec bal stk1 stk2 = match (stk1,stk2) with ([],[]) -> bal=0 | ((Zupdate _|Zshift _)::s1, _) -> compare_rec bal s1 stk2 | (_, (Zupdate _|Zshift _)::s2) -> compare_rec bal stk1 s2 | (Zapp l1::s1, _) -> compare_rec (bal+Array.length l1) s1 stk2 | (_, Zapp l2::s2) -> compare_rec (bal-Array.length l2) stk1 s2 | (Zcase(c1,_,_)::s1, Zcase(c2,_,_)::s2) -> bal=0 (* && c1.ci_ind = c2.ci_ind *) && compare_rec 0 s1 s2 | (Zfix(_,a1)::s1, Zfix(_,a2)::s2) -> bal=0 && compare_rec 0 a1 a2 && compare_rec 0 s1 s2 | (_,_) -> false in compare_rec 0 stk1 stk2 type lft_constr_stack_elt = Zlapp of (lift * fconstr) array | Zlfix of (lift * fconstr) * lft_constr_stack | Zlcase of case_info * lift * fconstr * fconstr array and lft_constr_stack = lft_constr_stack_elt list let rec zlapp v = function Zlapp v2 :: s -> zlapp (Array.append v v2) s | s -> Zlapp v :: s let pure_stack lfts stk = let rec pure_rec lfts stk = match stk with [] -> (lfts,[]) | zi::s -> (match (zi,pure_rec lfts s) with (Zupdate _,lpstk) -> lpstk | (Zshift n,(l,pstk)) -> (el_shft n l, pstk) | (Zapp a, (l,pstk)) -> (l,zlapp (Array.map (fun t -> (l,t)) a) pstk) | (Zfix(fx,a),(l,pstk)) -> let (lfx,pa) = pure_rec l a in (l, Zlfix((lfx,fx),pa)::pstk) | (Zcase(ci,p,br),(l,pstk)) -> (l,Zlcase(ci,l,p,br)::pstk)) in snd (pure_rec lfts stk) (****************************************************************************) (* Reduction Functions *) (****************************************************************************) let whd_betaiotazeta x = match x with | (Sort _|Var _|Meta _|Evar _|Const _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _) -> x | _ -> whd_val (create_clos_infos betaiotazeta empty_env) (inject x) let whd_betadeltaiota env t = match t with | (Sort _|Meta _|Evar _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _) -> t | _ -> whd_val (create_clos_infos betadeltaiota env) (inject t) let whd_betadeltaiota_nolet env t = match t with | (Sort _|Meta _|Evar _|Ind _|Construct _| Prod _|Lambda _|Fix _|CoFix _|LetIn _) -> t | _ -> whd_val (create_clos_infos betadeltaiotanolet env) (inject t) (* Beta *) let beta_appvect c v = let rec stacklam env t stack = match t, stack with Lambda(_,_,c), arg::stacktl -> stacklam (arg::env) c stacktl | _ -> applist (substl env t, stack) in stacklam [] c (Array.to_list v) (********************************************************************) (* Conversion *) (********************************************************************) (* Conversion utility functions *) type 'a conversion_function = env -> 'a -> 'a -> unit exception NotConvertible exception NotConvertibleVect of int let compare_stacks f fmind lft1 stk1 lft2 stk2 = let rec cmp_rec pstk1 pstk2 = match (pstk1,pstk2) with | (z1::s1, z2::s2) -> cmp_rec s1 s2; (match (z1,z2) with | (Zlapp a1,Zlapp a2) -> array_iter2 f a1 a2 | (Zlfix(fx1,a1),Zlfix(fx2,a2)) -> f fx1 fx2; cmp_rec a1 a2 | (Zlcase(ci1,l1,p1,br1),Zlcase(ci2,l2,p2,br2)) -> if not (fmind ci1.ci_ind ci2.ci_ind) then raise NotConvertible; f (l1,p1) (l2,p2); array_iter2 (fun c1 c2 -> f (l1,c1) (l2,c2)) br1 br2 | _ -> assert false) | _ -> () in if compare_stack_shape stk1 stk2 then cmp_rec (pure_stack lft1 stk1) (pure_stack lft2 stk2) else raise NotConvertible (* Convertibility of sorts *) type conv_pb = | CONV | CUMUL let sort_cmp univ pb s0 s1 = match (s0,s1) with | (Prop c1, Prop c2) when pb = CUMUL -> if c1 = Pos & c2 = Null then raise NotConvertible | (Prop c1, Prop c2) -> if c1 <> c2 then raise NotConvertible | (Prop c1, Type u) -> (match pb with CUMUL -> () | _ -> raise NotConvertible) | (Type u1, Type u2) -> if not (match pb with | CONV -> check_eq univ u1 u2 | CUMUL -> check_geq univ u2 u1) then raise NotConvertible | (_, _) -> raise NotConvertible let rec no_arg_available = function | [] -> true | Zupdate _ :: stk -> no_arg_available stk | Zshift _ :: stk -> no_arg_available stk | Zapp v :: stk -> Array.length v = 0 && no_arg_available stk | Zcase _ :: _ -> true | Zfix _ :: _ -> true let rec no_nth_arg_available n = function | [] -> true | Zupdate _ :: stk -> no_nth_arg_available n stk | Zshift _ :: stk -> no_nth_arg_available n stk | Zapp v :: stk -> let k = Array.length v in if n >= k then no_nth_arg_available (n-k) stk else false | Zcase _ :: _ -> true | Zfix _ :: _ -> true let rec no_case_available = function | [] -> true | Zupdate _ :: stk -> no_case_available stk | Zshift _ :: stk -> no_case_available stk | Zapp _ :: stk -> no_case_available stk | Zcase _ :: _ -> false | Zfix _ :: _ -> true let in_whnf (t,stk) = match fterm_of t with | (FLetIn _ | FCases _ | FApp _ | FCLOS _ | FLIFT _ | FCast _) -> false | FLambda _ -> no_arg_available stk | FConstruct _ -> no_case_available stk | FCoFix _ -> no_case_available stk | FFix(((ri,n),(_,_,_)),_) -> no_nth_arg_available ri.(n) stk | (FFlex _ | FProd _ | FEvar _ | FInd _ | FAtom _ | FRel _) -> true | FLOCKED -> assert false let oracle_order fl1 fl2 = match fl1,fl2 with ConstKey c1, ConstKey c2 -> (*height c1 > height c2*)false | _, ConstKey _ -> true | _ -> false (* Conversion between [lft1]term1 and [lft2]term2 *) let rec ccnv univ cv_pb infos lft1 lft2 term1 term2 = eqappr univ cv_pb infos (lft1, (term1,[])) (lft2, (term2,[])) (* Conversion between [lft1](hd1 v1) and [lft2](hd2 v2) *) and eqappr univ cv_pb infos (lft1,st1) (lft2,st2) = Util.check_for_interrupt (); (* First head reduce both terms *) let rec whd_both (t1,stk1) (t2,stk2) = let st1' = whd_stack infos t1 stk1 in let st2' = whd_stack infos t2 stk2 in (* Now, whd_stack on term2 might have modified st1 (due to sharing), and st1 might not be in whnf anymore. If so, we iterate ccnv. *) if in_whnf st1' then (st1',st2') else whd_both st1' st2' in let ((hd1,v1),(hd2,v2)) = whd_both st1 st2 in let appr1 = (lft1,(hd1,v1)) and appr2 = (lft2,(hd2,v2)) in (* compute the lifts that apply to the head of the term (hd1 and hd2) *) let el1 = el_stack lft1 v1 in let el2 = el_stack lft2 v2 in match (fterm_of hd1, fterm_of hd2) with (* case of leaves *) | (FAtom a1, FAtom a2) -> (match a1, a2 with | (Sort s1, Sort s2) -> assert (is_empty_stack v1 && is_empty_stack v2); sort_cmp univ cv_pb s1 s2 | (Meta n, Meta m) -> if n=m then convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible | _ -> raise NotConvertible) | (FEvar (ev1,args1), FEvar (ev2,args2)) -> if ev1=ev2 then (convert_stacks univ infos lft1 lft2 v1 v2; convert_vect univ infos el1 el2 args1 args2) else raise NotConvertible (* 2 index known to be bound to no constant *) | (FRel n, FRel m) -> if reloc_rel n el1 = reloc_rel m el2 then convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible (* 2 constants, 2 local defined vars or 2 defined rels *) | (FFlex fl1, FFlex fl2) -> (try (* try first intensional equality *) if eq_table_key fl1 fl2 then convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible with NotConvertible -> (* else the oracle tells which constant is to be expanded *) let (app1,app2) = if oracle_order fl1 fl2 then match unfold_reference infos fl1 with | Some def1 -> ((lft1, whd_stack infos def1 v1), appr2) | None -> (match unfold_reference infos fl2 with | Some def2 -> (appr1, (lft2, whd_stack infos def2 v2)) | None -> raise NotConvertible) else match unfold_reference infos fl2 with | Some def2 -> (appr1, (lft2, whd_stack infos def2 v2)) | None -> (match unfold_reference infos fl1 with | Some def1 -> ((lft1, whd_stack infos def1 v1), appr2) | None -> raise NotConvertible) in eqappr univ cv_pb infos app1 app2) (* other constructors *) | (FLambda _, FLambda _) -> (* Inconsistency: we tolerate that v1, v2 contain shift and update but we throw them away *) assert (is_empty_stack v1 && is_empty_stack v2); let (_,ty1,bd1) = destFLambda mk_clos hd1 in let (_,ty2,bd2) = destFLambda mk_clos hd2 in ccnv univ CONV infos el1 el2 ty1 ty2; ccnv univ CONV infos (el_lift el1) (el_lift el2) bd1 bd2 | (FProd (_,c1,c2), FProd (_,c'1,c'2)) -> assert (is_empty_stack v1 && is_empty_stack v2); (* Luo's system *) ccnv univ CONV infos el1 el2 c1 c'1; ccnv univ cv_pb infos (el_lift el1) (el_lift el2) c2 c'2 (* Eta-expansion on the fly *) | (FLambda _, _) -> if v1 <> [] then anomaly "conversion was given unreduced term (FLambda)"; let (_,_ty1,bd1) = destFLambda mk_clos hd1 in eqappr univ CONV infos (el_lift lft1,(bd1,[])) (el_lift lft2,(hd2,eta_expand_stack v2)) | (_, FLambda _) -> if v2 <> [] then anomaly "conversion was given unreduced term (FLambda)"; let (_,_ty2,bd2) = destFLambda mk_clos hd2 in eqappr univ CONV infos (el_lift lft1,(hd1,eta_expand_stack v1)) (el_lift lft2,(bd2,[])) (* only one constant, defined var or defined rel *) | (FFlex fl1, _) -> (match unfold_reference infos fl1 with | Some def1 -> eqappr univ cv_pb infos (lft1, whd_stack infos def1 v1) appr2 | None -> raise NotConvertible) | (_, FFlex fl2) -> (match unfold_reference infos fl2 with | Some def2 -> eqappr univ cv_pb infos appr1 (lft2, whd_stack infos def2 v2) | None -> raise NotConvertible) (* Inductive types: MutInd MutConstruct Fix Cofix *) | (FInd ind1, FInd ind2) -> if mind_equiv_infos infos ind1 ind2 then convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible | (FConstruct (ind1,j1), FConstruct (ind2,j2)) -> if j1 = j2 && mind_equiv_infos infos ind1 ind2 then convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible | (FFix ((op1,(_,tys1,cl1)),e1), FFix((op2,(_,tys2,cl2)),e2)) -> if op1 = op2 then let n = Array.length cl1 in let fty1 = Array.map (mk_clos e1) tys1 in let fty2 = Array.map (mk_clos e2) tys2 in let fcl1 = Array.map (mk_clos (subs_liftn n e1)) cl1 in let fcl2 = Array.map (mk_clos (subs_liftn n e2)) cl2 in convert_vect univ infos el1 el2 fty1 fty2; convert_vect univ infos (el_liftn n el1) (el_liftn n el2) fcl1 fcl2; convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible | (FCoFix ((op1,(_,tys1,cl1)),e1), FCoFix((op2,(_,tys2,cl2)),e2)) -> if op1 = op2 then let n = Array.length cl1 in let fty1 = Array.map (mk_clos e1) tys1 in let fty2 = Array.map (mk_clos e2) tys2 in let fcl1 = Array.map (mk_clos (subs_liftn n e1)) cl1 in let fcl2 = Array.map (mk_clos (subs_liftn n e2)) cl2 in convert_vect univ infos el1 el2 fty1 fty2; convert_vect univ infos (el_liftn n el1) (el_liftn n el2) fcl1 fcl2; convert_stacks univ infos lft1 lft2 v1 v2 else raise NotConvertible (* Should not happen because both (hd1,v1) and (hd2,v2) are in whnf *) | ( (FLetIn _, _) | (FCases _,_) | (FApp _,_) | (FCLOS _,_) | (FLIFT _,_) | (_, FLetIn _) | (_,FCases _) | (_,FApp _) | (_,FCLOS _) | (_,FLIFT _) | (FLOCKED,_) | (_,FLOCKED) ) -> assert false (* In all other cases, terms are not convertible *) | _ -> raise NotConvertible and convert_stacks univ infos lft1 lft2 stk1 stk2 = compare_stacks (fun (l1,t1) (l2,t2) -> ccnv univ CONV infos l1 l2 t1 t2) (mind_equiv_infos infos) lft1 stk1 lft2 stk2 and convert_vect univ infos lft1 lft2 v1 v2 = array_iter2 (fun t1 t2 -> ccnv univ CONV infos lft1 lft2 t1 t2) v1 v2 let clos_fconv cv_pb env t1 t2 = let infos = create_clos_infos betaiotazeta env in let univ = universes env in ccnv univ cv_pb infos el_id el_id (inject t1) (inject t2) let fconv cv_pb env t1 t2 = if eq_constr t1 t2 then () else clos_fconv cv_pb env t1 t2 let conv = fconv CONV let conv_leq = fconv CUMUL (* option for conversion : no compilation for the checker *) let vm_conv = fconv (********************************************************************) (* Special-Purpose Reduction *) (********************************************************************) (* pseudo-reduction rule: * [hnf_prod_app env s (Prod(_,B)) N --> B[N] * with an HNF on the first argument to produce a product. * if this does not work, then we use the string S as part of our * error message. *) let hnf_prod_app env t n = match whd_betadeltaiota env t with | Prod (_,_,b) -> subst1 n b | _ -> anomaly "hnf_prod_app: Need a product" let hnf_prod_applist env t nl = List.fold_left (hnf_prod_app env) t nl (* Dealing with arities *) let dest_prod env = let rec decrec env m c = let t = whd_betadeltaiota env c in match t with | Prod (n,a,c0) -> let d = (n,None,a) in decrec (push_rel d env) (d::m) c0 | _ -> m,t in decrec env empty_rel_context (* The same but preserving lets *) let dest_prod_assum env = let rec prodec_rec env l ty = let rty = whd_betadeltaiota_nolet env ty in match rty with | Prod (x,t,c) -> let d = (x,None,t) in prodec_rec (push_rel d env) (d::l) c | LetIn (x,b,t,c) -> let d = (x,Some b,t) in prodec_rec (push_rel d env) (d::l) c | Cast (c,_,_) -> prodec_rec env l c | _ -> l,rty in prodec_rec env empty_rel_context let dest_arity env c = let l, c = dest_prod_assum env c in match c with | Sort s -> l,s | _ -> error "not an arity" coq-8.4pl2/checker/subtyping.mli0000640000175000001440000000136212010532755015764 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_type_body -> module_type_body -> unit coq-8.4pl2/checker/indtypes.ml0000640000175000001440000005042612010532755015433 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string_of_dirpath sl | MPbound uid -> "bound("^string_of_mbid uid^")" | MPdot (mp,l) -> debug_string_of_mp mp ^ "." ^ string_of_label l let rec string_of_mp = function | MPfile sl -> string_of_dirpath sl | MPbound uid -> string_of_mbid uid | MPdot (mp,l) -> string_of_mp mp ^ "." ^ string_of_label l let string_of_mp mp = if !Flags.debug then debug_string_of_mp mp else string_of_mp mp let prkn kn = let (mp,_,l) = repr_kn kn in str(string_of_mp mp ^ "." ^ string_of_label l) let prcon c = let ck = canonical_con c in let uk = user_con c in if ck=uk then prkn uk else (prkn uk ++str"(="++prkn ck++str")") (* Same as noccur_between but may perform reductions. Could be refined more... *) let weaker_noccur_between env x nvars t = if noccur_between x nvars t then Some t else let t' = whd_betadeltaiota env t in if noccur_between x nvars t' then Some t' else None let is_constructor_head t = match fst(decompose_app t) with | Rel _ -> true | _ -> false let conv_ctxt_prefix env (ctx1:rel_context) ctx2 = let rec chk env rctx1 rctx2 = match rctx1, rctx2 with (_,None,ty1 as d1)::rctx1', (_,None,ty2)::rctx2' -> conv env ty1 ty2; chk (push_rel d1 env) rctx1' rctx2' | (_,Some bd1,ty1 as d1)::rctx1', (_,Some bd2,ty2)::rctx2' -> conv env ty1 ty2; conv env bd1 bd2; chk (push_rel d1 env) rctx1' rctx2' | [],_ -> () | _ -> failwith "non convertible contexts" in chk env (List.rev ctx1) (List.rev ctx2) (************************************************************************) (* Various well-formedness check for inductive declarations *) (* Errors related to inductive constructions *) type inductive_error = | NonPos of env * constr * constr | NotEnoughArgs of env * constr * constr | NotConstructor of env * constr * constr | NonPar of env * constr * int * constr * constr | SameNamesTypes of identifier | SameNamesConstructors of identifier | SameNamesOverlap of identifier list | NotAnArity of identifier | BadEntry exception InductiveError of inductive_error (************************************************************************) (************************************************************************) (* Typing the arities and constructor types *) let rec sorts_of_constr_args env t = let t = whd_betadeltaiota_nolet env t in match t with | Prod (name,c1,c2) -> let varj = infer_type env c1 in let env1 = push_rel (name,None,c1) env in varj :: sorts_of_constr_args env1 c2 | LetIn (name,def,ty,c) -> let env1 = push_rel (name,Some def,ty) env in sorts_of_constr_args env1 c | _ when is_constructor_head t -> [] | _ -> anomaly "infos_and_sort: not a positive constructor" (* Prop and Set are small *) let is_small_sort = function | Prop _ -> true | _ -> false let is_logic_sort s = (s = Prop Null) (* [infos] is a sequence of pair [islogic,issmall] for each type in the product of a constructor or arity *) let is_small_constr infos = List.for_all (fun s -> is_small_sort s) infos let is_logic_constr infos = List.for_all (fun s -> is_logic_sort s) infos (* An inductive definition is a "unit" if it has only one constructor and that all arguments expected by this constructor are logical, this is the case for equality, conjunction of logical properties *) let is_unit constrsinfos = match constrsinfos with (* One info = One constructor *) | [|constrinfos|] -> is_logic_constr constrinfos | [||] -> (* type without constructors *) true | _ -> false let small_unit constrsinfos = let issmall = array_for_all is_small_constr constrsinfos and isunit = is_unit constrsinfos in issmall, isunit (* check information related to inductive arity *) let typecheck_arity env params inds = let nparamargs = rel_context_nhyps params in let nparamdecls = rel_context_length params in let check_arity arctxt = function Monomorphic mar -> let ar = mar.mind_user_arity in let _ = infer_type env ar in conv env (it_mkProd_or_LetIn (Sort mar.mind_sort) arctxt) ar; ar | Polymorphic par -> check_polymorphic_arity env params par; it_mkProd_or_LetIn (Sort(Type par.poly_level)) arctxt in let env_arities = Array.fold_left (fun env_ar ind -> let ar_ctxt = ind.mind_arity_ctxt in let _ = check_ctxt env ar_ctxt in conv_ctxt_prefix env params ar_ctxt; (* Arities (with params) are typed-checked here *) let arity = check_arity ar_ctxt ind.mind_arity in (* mind_nrealargs *) let nrealargs = rel_context_nhyps ar_ctxt - nparamargs in if ind.mind_nrealargs <> nrealargs then failwith "bad number of real inductive arguments"; let nrealargs_ctxt = rel_context_length ar_ctxt - nparamdecls in if ind.mind_nrealargs_ctxt <> nrealargs_ctxt then failwith "bad length of real inductive arguments signature"; (* We do not need to generate the universe of full_arity; if later, after the validation of the inductive definition, full_arity is used as argument or subject to cast, an upper universe will be generated *) let id = ind.mind_typename in let env_ar' = push_rel (Name id, None, arity) env_ar in env_ar') env inds in env_arities (* Allowed eliminations *) let check_predicativity env s small level = match s, engagement env with Type u, _ -> let u' = fresh_local_univ () in let cst = merge_constraints (enforce_geq u' u empty_constraint) (universes env) in if not (check_geq cst u' level) then failwith "impredicative Type inductive type" | Prop Pos, Some ImpredicativeSet -> () | Prop Pos, _ -> if not small then failwith "impredicative Set inductive type" | Prop Null,_ -> () let sort_of_ind = function Monomorphic mar -> mar.mind_sort | Polymorphic par -> Type par.poly_level let all_sorts = [InProp;InSet;InType] let small_sorts = [InProp;InSet] let logical_sorts = [InProp] let allowed_sorts issmall isunit s = match family_of_sort s with (* Type: all elimination allowed *) | InType -> all_sorts (* Small Set is predicative: all elimination allowed *) | InSet when issmall -> all_sorts (* Large Set is necessarily impredicative: forbids large elimination *) | InSet -> small_sorts (* Unitary/empty Prop: elimination to all sorts are realizable *) (* unless the type is large. If it is large, forbids large elimination *) (* which otherwise allows to simulate the inconsistent system Type:Type *) | InProp when isunit -> if issmall then all_sorts else small_sorts (* Other propositions: elimination only to Prop *) | InProp -> logical_sorts let compute_elim_sorts env_ar params mib arity lc = let inst = extended_rel_list 0 params in let env_params = push_rel_context params env_ar in let lc = Array.map (fun c -> hnf_prod_applist env_params (lift (rel_context_length params) c) inst) lc in let s = sort_of_ind arity in let infos = Array.map (sorts_of_constr_args env_params) lc in let (small,unit) = small_unit infos in (* We accept recursive unit types... *) let unit = unit && mib.mind_ntypes = 1 in (* compute the max of the sorts of the products of the constructor type *) let level = max_inductive_sort (Array.concat (Array.to_list (Array.map Array.of_list infos))) in check_predicativity env_ar s small level; allowed_sorts small unit s let typecheck_one_inductive env params mib mip = (* mind_typename and mind_consnames not checked *) (* mind_reloc_tbl, mind_nb_constant, mind_nb_args not checked (VM) *) (* mind_arity_ctxt, mind_arity, mind_nrealargs DONE (typecheck_arity) *) (* mind_user_lc *) let _ = Array.map (infer_type env) mip.mind_user_lc in (* mind_nf_lc *) let _ = Array.map (infer_type env) mip.mind_nf_lc in array_iter2 (conv env) mip.mind_nf_lc mip.mind_user_lc; (* mind_consnrealdecls *) let check_cons_args c n = let ctx,_ = decompose_prod_assum c in if n <> rel_context_length ctx - rel_context_length params then failwith "bad number of real constructor arguments" in array_iter2 check_cons_args mip.mind_nf_lc mip.mind_consnrealdecls; (* mind_kelim: checked by positivity criterion ? *) let sorts = compute_elim_sorts env params mib mip.mind_arity mip.mind_nf_lc in if List.exists (fun s -> not (List.mem s sorts)) mip.mind_kelim then failwith "elimination not allowed"; (* mind_recargs: checked by positivity criterion *) () (************************************************************************) (************************************************************************) (* Positivity *) type ill_formed_ind = | LocalNonPos of int | LocalNotEnoughArgs of int | LocalNotConstructor | LocalNonPar of int * int exception IllFormedInd of ill_formed_ind (* [mind_extract_params mie] extracts the params from an inductive types declaration, and checks that they are all present (and all the same) for all the given types. *) let mind_extract_params = decompose_prod_n_assum let explain_ind_err ntyp env0 nbpar c err = let (lpar,c') = mind_extract_params nbpar c in let env = push_rel_context lpar env0 in match err with | LocalNonPos kt -> raise (InductiveError (NonPos (env,c',Rel (kt+nbpar)))) | LocalNotEnoughArgs kt -> raise (InductiveError (NotEnoughArgs (env,c',Rel (kt+nbpar)))) | LocalNotConstructor -> raise (InductiveError (NotConstructor (env,c',Rel (ntyp+nbpar)))) | LocalNonPar (n,l) -> raise (InductiveError (NonPar (env,c',n,Rel (nbpar-n+1), Rel (l+nbpar)))) let failwith_non_pos n ntypes c = for k = n to n + ntypes - 1 do if not (noccurn k c) then raise (IllFormedInd (LocalNonPos (k-n+1))) done let failwith_non_pos_vect n ntypes v = Array.iter (failwith_non_pos n ntypes) v; anomaly "failwith_non_pos_vect: some k in [n;n+ntypes-1] should occur" let failwith_non_pos_list n ntypes l = List.iter (failwith_non_pos n ntypes) l; anomaly "failwith_non_pos_list: some k in [n;n+ntypes-1] should occur" (* Conclusion of constructors: check the inductive type is called with the expected parameters *) let check_correct_par (env,n,ntypes,_) hyps l largs = let nparams = rel_context_nhyps hyps in let largs = Array.of_list largs in if Array.length largs < nparams then raise (IllFormedInd (LocalNotEnoughArgs l)); let (lpar,largs') = array_chop nparams largs in let nhyps = List.length hyps in let rec check k index = function | [] -> () | (_,Some _,_)::hyps -> check k (index+1) hyps | _::hyps -> match whd_betadeltaiota env lpar.(k) with | Rel w when w = index -> check (k-1) (index+1) hyps | _ -> raise (IllFormedInd (LocalNonPar (k+1,l))) in check (nparams-1) (n-nhyps) hyps; if not (array_for_all (noccur_between n ntypes) largs') then failwith_non_pos_vect n ntypes largs' (* Arguments of constructor: check the number of recursive parameters nrecp. the first parameters which are constant in recursive arguments n is the current depth, nmr is the maximum number of possible recursive parameters *) let check_rec_par (env,n,_,_) hyps nrecp largs = let (lpar,_) = list_chop nrecp largs in let rec find index = function | ([],_) -> () | (_,[]) -> failwith "number of recursive parameters cannot be greater than the number of parameters." | (lp,(_,Some _,_)::hyps) -> find (index-1) (lp,hyps) | (p::lp,_::hyps) -> (match whd_betadeltaiota env p with | Rel w when w = index -> find (index-1) (lp,hyps) | _ -> failwith "bad number of recursive parameters") in find (n-1) (lpar,List.rev hyps) let lambda_implicit_lift n a = let lambda_implicit a = Lambda(Anonymous,Evar(0,[||]),a) in iterate lambda_implicit n (lift n a) (* This removes global parameters of the inductive types in lc (for nested inductive types only ) *) let abstract_mind_lc env ntyps npars lc = if npars = 0 then lc else let make_abs = list_tabulate (function i -> lambda_implicit_lift npars (Rel (i+1))) ntyps in Array.map (substl make_abs) lc (* [env] is the typing environment [n] is the dB of the last inductive type [ntypes] is the number of inductive types in the definition (i.e. range of inductives is [n; n+ntypes-1]) [lra] is the list of recursive tree of each variable *) let ienv_push_var (env, n, ntypes, lra) (x,a,ra) = (push_rel (x,None,a) env, n+1, ntypes, (Norec,ra)::lra) let ienv_push_inductive (env, n, ntypes, ra_env) (mi,lpar) = let auxntyp = 1 in let specif = lookup_mind_specif env mi in let env' = push_rel (Anonymous,None, hnf_prod_applist env (type_of_inductive env specif) lpar) env in let ra_env' = (Imbr mi,(Rtree.mk_rec_calls 1).(0)) :: List.map (fun (r,t) -> (r,Rtree.lift 1 t)) ra_env in (* New index of the inductive types *) let newidx = n + auxntyp in (env', newidx, ntypes, ra_env') let rec ienv_decompose_prod (env,_,_,_ as ienv) n c = if n=0 then (ienv,c) else let c' = whd_betadeltaiota env c in match c' with Prod(na,a,b) -> let ienv' = ienv_push_var ienv (na,a,mk_norec) in ienv_decompose_prod ienv' (n-1) b | _ -> assert false (* The recursive function that checks positivity and builds the list of recursive arguments *) let check_positivity_one (env, _,ntypes,_ as ienv) hyps nrecp (_,i as ind) indlc = let lparams = rel_context_length hyps in (* check the inductive types occur positively in [c] *) let rec check_pos (env, n, ntypes, ra_env as ienv) c = let x,largs = decompose_app (whd_betadeltaiota env c) in match x with | Prod (na,b,d) -> assert (largs = []); (match weaker_noccur_between env n ntypes b with None -> failwith_non_pos_list n ntypes [b] | Some b -> check_pos (ienv_push_var ienv (na, b, mk_norec)) d) | Rel k -> (try let (ra,rarg) = List.nth ra_env (k-1) in (match ra with Mrec _ -> check_rec_par ienv hyps nrecp largs | _ -> ()); if not (List.for_all (noccur_between n ntypes) largs) then failwith_non_pos_list n ntypes largs else rarg with Failure _ | Invalid_argument _ -> mk_norec) | Ind ind_kn -> (* If the inductive type being defined appears in a parameter, then we have an imbricated type *) if List.for_all (noccur_between n ntypes) largs then mk_norec else check_positive_imbr ienv (ind_kn, largs) | err -> if noccur_between n ntypes x && List.for_all (noccur_between n ntypes) largs then mk_norec else failwith_non_pos_list n ntypes (x::largs) (* accesses to the environment are not factorised, but is it worth it? *) and check_positive_imbr (env,n,ntypes,ra_env as ienv) (mi, largs) = let (mib,mip) = lookup_mind_specif env mi in let auxnpar = mib.mind_nparams_rec in let nonrecpar = mib.mind_nparams - auxnpar in let (lpar,auxlargs) = try list_chop auxnpar largs with Failure _ -> raise (IllFormedInd (LocalNonPos n)) in (* If the inductive appears in the args (non params) then the definition is not positive. *) if not (List.for_all (noccur_between n ntypes) auxlargs) then raise (IllFormedInd (LocalNonPos n)); (* We do not deal with imbricated mutual inductive types *) let auxntyp = mib.mind_ntypes in if auxntyp <> 1 then raise (IllFormedInd (LocalNonPos n)); (* The nested inductive type with parameters removed *) let auxlcvect = abstract_mind_lc env auxntyp auxnpar mip.mind_nf_lc in (* Extends the environment with a variable corresponding to the inductive def *) let (env',_,_,_ as ienv') = ienv_push_inductive ienv (mi,lpar) in (* Parameters expressed in env' *) let lpar' = List.map (lift auxntyp) lpar in let irecargs = (* fails if the inductive type occurs non positively *) (* with recursive parameters substituted *) Array.map (function c -> let c' = hnf_prod_applist env' c lpar' in (* skip non-recursive parameters *) let (ienv',c') = ienv_decompose_prod ienv' nonrecpar c' in check_constructors ienv' false c') auxlcvect in (Rtree.mk_rec [|mk_paths (Imbr mi) irecargs|]).(0) (* check the inductive types occur positively in the products of C, if check_head=true, also check the head corresponds to a constructor of the ith type *) and check_constructors ienv check_head c = let rec check_constr_rec (env,n,ntypes,ra_env as ienv) lrec c = let x,largs = decompose_app (whd_betadeltaiota env c) in match x with | Prod (na,b,d) -> assert (largs = []); let recarg = check_pos ienv b in let ienv' = ienv_push_var ienv (na,b,mk_norec) in check_constr_rec ienv' (recarg::lrec) d | hd -> if check_head then if hd = Rel (n+ntypes-i-1) then check_correct_par ienv hyps (ntypes-i) largs else raise (IllFormedInd LocalNotConstructor) else if not (List.for_all (noccur_between n ntypes) largs) then raise (IllFormedInd (LocalNonPos n)); List.rev lrec in check_constr_rec ienv [] c in let irecargs = Array.map (fun c -> let _,rawc = mind_extract_params lparams c in try check_constructors ienv true rawc with IllFormedInd err -> explain_ind_err (ntypes-i) env lparams c err) indlc in mk_paths (Mrec ind) irecargs let check_subtree (t1:'a) (t2:'a) = if not (Rtree.compare_rtree (fun t1 t2 -> let l1 = fst(Rtree.dest_node t1) in let l2 = fst(Rtree.dest_node t2) in if l1 = Norec || l1 = l2 then 0 else -1) t1 t2) then failwith "bad recursive trees" (* if t1=t2 then () else msg_warning (str"TODO: check recursive positions")*) let check_positivity env_ar mind params nrecp inds = let ntypes = Array.length inds in let rc = Array.mapi (fun j t -> (Mrec(mind,j),t)) (Rtree.mk_rec_calls ntypes) in let lra_ind = List.rev (Array.to_list rc) in let lparams = rel_context_length params in let check_one i mip = let ra_env = list_tabulate (fun _ -> (Norec,mk_norec)) lparams @ lra_ind in let ienv = (env_ar, 1+lparams, ntypes, ra_env) in check_positivity_one ienv params nrecp (mind,i) mip.mind_nf_lc in let irecargs = Array.mapi check_one inds in let wfp = Rtree.mk_rec irecargs in array_iter2 (fun ind wfpi -> check_subtree ind.mind_recargs wfpi) inds wfp (************************************************************************) (************************************************************************) let check_inductive env kn mib = Flags.if_verbose msgnl (str " checking ind: " ++ pr_mind kn); (* check mind_constraints: should be consistent with env *) let env = add_constraints mib.mind_constraints env in (* check mind_record : TODO ? check #constructor = 1 ? *) (* check mind_finite : always OK *) (* check mind_ntypes *) if Array.length mib.mind_packets <> mib.mind_ntypes then error "not the right number of packets"; (* check mind_hyps: should be empty *) if mib.mind_hyps <> empty_named_context then error "section context not empty"; (* check mind_params_ctxt *) let params = mib.mind_params_ctxt in let _ = check_ctxt env params in (* check mind_nparams *) if rel_context_nhyps params <> mib.mind_nparams then error "number the right number of parameters"; (* mind_packets *) (* - check arities *) let env_ar = typecheck_arity env params mib.mind_packets in (* - check constructor types *) Array.iter (typecheck_one_inductive env_ar params mib) mib.mind_packets; (* check mind_nparams_rec: positivity condition *) check_positivity env_ar kn params mib.mind_nparams_rec mib.mind_packets; (* check mind_equiv... *) (* Now we can add the inductive *) add_mind kn mib env coq-8.4pl2/checker/typeops.ml0000640000175000001440000003124512010532755015275 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (try conv_leq env t1 t2 with NotConvertible -> raise (NotConvertibleVect i)); ()) () v1 v2 (* This should be a type (a priori without intension to be an assumption) *) let type_judgment env (c,ty as j) = match whd_betadeltaiota env ty with | Sort s -> (c,s) | _ -> error_not_type env j (* This should be a type intended to be assumed. The error message is *) (* not as useful as for [type_judgment]. *) let assumption_of_judgment env j = try fst(type_judgment env j) with TypeError _ -> error_assumption env j (************************************************) (* Incremental typing rules: builds a typing judgement given the *) (* judgements for the subterms. *) (*s Type of sorts *) (* Prop and Set *) let judge_of_prop = Sort (Type type1_univ) (* Type of Type(i). *) let judge_of_type u = Sort (Type (super u)) (*s Type of a de Bruijn index. *) let judge_of_relative env n = try let (_,_,typ) = lookup_rel n env in lift n typ with Not_found -> error_unbound_rel env n (* Type of variables *) let judge_of_variable env id = try named_type id env with Not_found -> error_unbound_var env id (* Management of context of variables. *) (* Checks if a context of variable can be instantiated by the variables of the current env *) (* TODO: check order? *) let rec check_hyps_inclusion env sign = fold_named_context (fun (id,_,ty1) () -> let ty2 = named_type id env in if not (eq_constr ty2 ty1) then error "types do not match") sign ~init:() let check_args env c hyps = try check_hyps_inclusion env hyps with UserError _ | Not_found -> error_reference_variables env c (* Type of constants *) let type_of_constant_knowing_parameters env t paramtyps = match t with | NonPolymorphicType t -> t | PolymorphicArity (sign,ar) -> let ctx = List.rev sign in let ctx,s = instantiate_universes env ctx ar paramtyps in mkArity (List.rev ctx,s) let type_of_constant_type env t = type_of_constant_knowing_parameters env t [||] let judge_of_constant_knowing_parameters env cst paramstyp = let c = Const cst in let cb = try lookup_constant cst env with Not_found -> failwith ("Cannot find constant: "^string_of_con cst) in let _ = check_args env c cb.const_hyps in type_of_constant_knowing_parameters env cb.const_type paramstyp let judge_of_constant env cst = judge_of_constant_knowing_parameters env cst [||] (* Type of an application. *) let judge_of_apply env (f,funj) argjv = let rec apply_rec n typ = function | [] -> typ | (h,hj)::restjl -> (match whd_betadeltaiota env typ with | Prod (_,c1,c2) -> (try conv_leq env hj c1 with NotConvertible -> error_cant_apply_bad_type env (n,c1, hj) (f,funj) argjv); apply_rec (n+1) (subst1 h c2) restjl | _ -> error_cant_apply_not_functional env (f,funj) argjv) in apply_rec 1 funj (Array.to_list argjv) (* Type of product *) let sort_of_product env domsort rangsort = match (domsort, rangsort) with (* Product rule (s,Prop,Prop) *) | (_, Prop Null) -> rangsort (* Product rule (Prop/Set,Set,Set) *) | (Prop _, Prop Pos) -> rangsort (* Product rule (Type,Set,?) *) | (Type u1, Prop Pos) -> if engagement env = Some ImpredicativeSet then (* Rule is (Type,Set,Set) in the Set-impredicative calculus *) rangsort else (* Rule is (Type_i,Set,Type_i) in the Set-predicative calculus *) Type (sup u1 type0_univ) (* Product rule (Prop,Type_i,Type_i) *) | (Prop Pos, Type u2) -> Type (sup type0_univ u2) (* Product rule (Prop,Type_i,Type_i) *) | (Prop Null, Type _) -> rangsort (* Product rule (Type_i,Type_i,Type_i) *) | (Type u1, Type u2) -> Type (sup u1 u2) (* Type of a type cast *) (* [judge_of_cast env (c,typ1) (typ2,s)] implements the rule env |- c:typ1 env |- typ2:s env |- typ1 <= typ2 --------------------------------------------------------------------- env |- c:typ2 *) let judge_of_cast env (c,cj) k tj = let conversion = match k with | VMcast -> vm_conv CUMUL | DEFAULTcast -> conv_leq in try conversion env cj tj with NotConvertible -> error_actual_type env (c,cj) tj (* Inductive types. *) (* The type is parametric over the uniform parameters whose conclusion is in Type; to enforce the internal constraints between the parameters and the instances of Type occurring in the type of the constructors, we use the level variables _statically_ assigned to the conclusions of the parameters as mediators: e.g. if a parameter has conclusion Type(alpha), static constraints of the form alpha<=v exist between alpha and the Type's occurring in the constructor types; when the parameters is finally instantiated by a term of conclusion Type(u), then the constraints u<=alpha is computed in the App case of execute; from this constraints, the expected dynamic constraints of the form u<=v are enforced *) let judge_of_inductive_knowing_parameters env ind (paramstyp:constr array) = let c = Ind ind in let (mib,mip) = try lookup_mind_specif env ind with Not_found -> failwith ("Cannot find inductive: "^string_of_mind (fst ind)) in check_args env c mib.mind_hyps; type_of_inductive_knowing_parameters env mip paramstyp let judge_of_inductive env ind = judge_of_inductive_knowing_parameters env ind [||] (* Constructors. *) let judge_of_constructor env c = let constr = Construct c in let _ = let ((kn,_),_) = c in let mib = try lookup_mind kn env with Not_found -> failwith ("Cannot find inductive: "^string_of_mind (fst (fst c))) in check_args env constr mib.mind_hyps in let specif = lookup_mind_specif env (inductive_of_constructor c) in type_of_constructor c specif (* Case. *) let check_branch_types env (c,cj) (lfj,explft) = try conv_leq_vecti env lfj explft with NotConvertibleVect i -> error_ill_formed_branch env c i lfj.(i) explft.(i) | Invalid_argument _ -> error_number_branches env (c,cj) (Array.length explft) let judge_of_case env ci pj (c,cj) lfj = let indspec = try find_rectype env cj with Not_found -> error_case_not_inductive env (c,cj) in let _ = check_case_info env (fst indspec) ci in let (bty,rslty) = type_case_branches env indspec pj c in check_branch_types env (c,cj) (lfj,bty); rslty (* Fixpoints. *) (* Checks the type of a general (co)fixpoint, i.e. without checking *) (* the specific guard condition. *) let type_fixpoint env lna lar lbody vdefj = let lt = Array.length vdefj in assert (Array.length lar = lt && Array.length lbody = lt); try conv_leq_vecti env vdefj (Array.map (fun ty -> lift lt ty) lar) with NotConvertibleVect i -> let vdefj = array_map2 (fun b ty -> b,ty) lbody vdefj in error_ill_typed_rec_body env i lna vdefj lar (************************************************************************) (************************************************************************) let refresh_arity env ar = let ctxt, hd = decompose_prod_assum ar in match hd with Sort (Type u) when not (is_univ_variable u) -> let u' = fresh_local_univ() in let env' = add_constraints (enforce_geq u' u empty_constraint) env in env', mkArity (ctxt,Type u') | _ -> env, ar (* The typing machine. *) let rec execute env cstr = match cstr with (* Atomic terms *) | Sort (Prop _) -> judge_of_prop | Sort (Type u) -> judge_of_type u | Rel n -> judge_of_relative env n | Var id -> judge_of_variable env id | Const c -> judge_of_constant env c (* Lambda calculus operators *) | App (App (f,args),args') -> execute env (App(f,Array.append args args')) | App (f,args) -> let jl = execute_array env args in let j = match f with | Ind ind -> (* Sort-polymorphism of inductive types *) judge_of_inductive_knowing_parameters env ind jl | Const cst -> (* Sort-polymorphism of constant *) judge_of_constant_knowing_parameters env cst jl | _ -> (* No sort-polymorphism *) execute env f in let jl = array_map2 (fun c ty -> c,ty) args jl in judge_of_apply env (f,j) jl | Lambda (name,c1,c2) -> let _ = execute_type env c1 in let env1 = push_rel (name,None,c1) env in let j' = execute env1 c2 in Prod(name,c1,j') | Prod (name,c1,c2) -> let varj = execute_type env c1 in let env1 = push_rel (name,None,c1) env in let varj' = execute_type env1 c2 in Sort (sort_of_product env varj varj') | LetIn (name,c1,c2,c3) -> let j1 = execute env c1 in (* /!\ c2 can be an inferred type => refresh (but the pushed type is still c2) *) let _ = let env',c2' = refresh_arity env c2 in let _ = execute_type env' c2' in judge_of_cast env' (c1,j1) DEFAULTcast c2' in let env1 = push_rel (name,Some c1,c2) env in let j' = execute env1 c3 in subst1 c1 j' | Cast (c,k,t) -> let cj = execute env c in let _ = execute_type env t in judge_of_cast env (c,cj) k t; t (* Inductive types *) | Ind ind -> judge_of_inductive env ind | Construct c -> judge_of_constructor env c | Case (ci,p,c,lf) -> let cj = execute env c in let pj = execute env p in let lfj = execute_array env lf in judge_of_case env ci (p,pj) (c,cj) lfj | Fix ((_,i as vni),recdef) -> let fix_ty = execute_recdef env recdef i in let fix = (vni,recdef) in check_fix env fix; fix_ty | CoFix (i,recdef) -> let fix_ty = execute_recdef env recdef i in let cofix = (i,recdef) in check_cofix env cofix; fix_ty (* Partial proofs: unsupported by the kernel *) | Meta _ -> anomaly "the kernel does not support metavariables" | Evar _ -> anomaly "the kernel does not support existential variables" and execute_type env constr = let j = execute env constr in snd (type_judgment env (constr,j)) and execute_recdef env (names,lar,vdef) i = let larj = execute_array env lar in let larj = array_map2 (fun c ty -> c,ty) lar larj in let lara = Array.map (assumption_of_judgment env) larj in let env1 = push_rec_types (names,lara,vdef) env in let vdefj = execute_array env1 vdef in type_fixpoint env1 names lara vdef vdefj; lara.(i) and execute_array env = Array.map (execute env) (* Derived functions *) let infer env constr = execute env constr let infer_type env constr = execute_type env constr (* Typing of several terms. *) let check_ctxt env rels = fold_rel_context (fun d env -> match d with (_,None,ty) -> let _ = infer_type env ty in push_rel d env | (_,Some bd,ty) -> let j1 = infer env bd in let _ = infer env ty in conv_leq env j1 ty; push_rel d env) rels ~init:env let check_named_ctxt env ctxt = fold_named_context (fun (id,_,_ as d) env -> let _ = try let _ = lookup_named id env in failwith ("variable "^string_of_id id^" defined twice") with Not_found -> () in match d with (_,None,ty) -> let _ = infer_type env ty in push_named d env | (_,Some bd,ty) -> let j1 = infer env bd in let _ = infer env ty in conv_leq env j1 ty; push_named d env) ctxt ~init:env (* Polymorphic arities utils *) let check_kind env ar u = if snd (dest_prod env ar) = Sort(Type u) then () else failwith "not the correct sort" let check_polymorphic_arity env params par = let pl = par.poly_param_levels in let rec check_p env pl params = match pl, params with Some u::pl, (na,None,ty)::params -> check_kind env ty u; check_p (push_rel (na,None,ty) env) pl params | None::pl,d::params -> check_p (push_rel d env) pl params | [], _ -> () | _ -> failwith "check_poly: not the right number of params" in check_p env pl (List.rev params) coq-8.4pl2/checker/check.mllib0000640000175000001440000000044311321662572015337 0ustar notinusersCoq_config Pp_control Pp Compat Flags Segmenttree Unicodetable Util Option Hashcons System Envars Predicate Rtree Names Univ Esubst Validate Term Declarations Environ Closure Reduction Type_errors Modops Inductive Typeops Indtypes Subtyping Mod_checking Safe_typing Check Check_stat Checker coq-8.4pl2/library/0000750000175000001440000000000012127276531013300 5ustar notinuserscoq-8.4pl2/library/goptionstyp.mli0000640000175000001440000000165412010532755016403 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* safe_environment val env : unit -> Environ.env val env_is_empty : unit -> bool val universes : unit -> universes val named_context_val : unit -> Environ.named_context_val val named_context : unit -> Sign.named_context val env_is_empty : unit -> bool (** {6 Extending env with variables and local definitions } *) val push_named_assum : (identifier * types) -> Univ.constraints val push_named_def : (identifier * constr * types option) -> Univ.constraints (** {6 ... } *) (** Adding constants, inductives, modules and module types. All these functions verify that given names match those generated by kernel *) val add_constant : dir_path -> identifier -> global_declaration -> constant val add_mind : dir_path -> identifier -> mutual_inductive_entry -> mutual_inductive val add_module : identifier -> module_entry -> inline -> module_path * delta_resolver val add_modtype : identifier -> module_struct_entry -> inline -> module_path val add_include : module_struct_entry -> bool -> inline -> delta_resolver val add_constraints : constraints -> unit val set_engagement : engagement -> unit (** {6 Interactive modules and module types } Both [start_*] functions take the [dir_path] argument to create a [mod_self_id]. This should be the name of the compilation unit. *) (** [start_*] functions return the [module_path] valid for components of the started module / module type *) val start_module : identifier -> module_path val end_module : Summary.frozen ->identifier -> (module_struct_entry * inline) option -> module_path * delta_resolver val add_module_parameter : mod_bound_id -> module_struct_entry -> inline -> delta_resolver val start_modtype : identifier -> module_path val end_modtype : Summary.frozen -> identifier -> module_path val pack_module : unit -> module_body (** Queries *) val lookup_named : variable -> named_declaration val lookup_constant : constant -> constant_body val lookup_inductive : inductive -> mutual_inductive_body * one_inductive_body val lookup_mind : mutual_inductive -> mutual_inductive_body val lookup_module : module_path -> module_body val lookup_modtype : module_path -> module_type_body val constant_of_delta_kn : kernel_name -> constant val mind_of_delta_kn : kernel_name -> mutual_inductive val exists_objlabel : label -> bool (** Compiled modules *) val start_library : dir_path -> module_path val export : dir_path -> module_path * compiled_library val import : compiled_library -> Digest.t -> module_path (** {6 ... } *) (** Function to get an environment from the constants part of the global * environment and a given context. *) val type_of_global : Libnames.global_reference -> types val env_of_context : Environ.named_context_val -> Environ.env (** spiwack: register/unregister function for retroknowledge *) val register : Retroknowledge.field -> constr -> constr -> unit coq-8.4pl2/library/impargs.mli0000640000175000001440000001315712010532755015447 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val make_strict_implicit_args : bool -> unit val make_strongly_strict_implicit_args : bool -> unit val make_reversible_pattern_implicit_args : bool -> unit val make_contextual_implicit_args : bool -> unit val make_maximal_implicit_args : bool -> unit val is_implicit_args : unit -> bool val is_strict_implicit_args : unit -> bool val is_strongly_strict_implicit_args : unit -> bool val is_reversible_pattern_implicit_args : unit -> bool val is_contextual_implicit_args : unit -> bool val is_maximal_implicit_args : unit -> bool type implicits_flags val with_implicits : implicits_flags -> ('a -> 'b) -> 'a -> 'b (** {6 ... } *) (** An [implicits_list] is a list of positions telling which arguments of a reference can be automatically infered *) type argument_position = | Conclusion | Hyp of int (** We remember various information about why an argument is inferable as implicit *) type implicit_explanation = | DepRigid of argument_position (** means that the implicit argument can be found by unification along a rigid path (we do not print the arguments of this kind if there is enough arguments to infer them) *) | DepFlex of argument_position (** means that the implicit argument can be found by unification along a collapsable path only (e.g. as x in (P x) where P is another argument) (we do (defensively) print the arguments of this kind) *) | DepFlexAndRigid of (*flex*) argument_position * (*rig*) argument_position (** means that the least argument from which the implicit argument can be inferred is following a collapsable path but there is a greater argument from where the implicit argument is inferable following a rigid path (useful to know how to print a partial application) *) | Manual (** means the argument has been explicitely set as implicit. *) (** We also consider arguments inferable from the conclusion but it is operational only if [conclusion_matters] is true. *) type maximal_insertion = bool (** true = maximal contextual insertion *) type force_inference = bool (** true = always infer, never turn into evar/subgoal *) type implicit_status = (identifier * implicit_explanation * (maximal_insertion * force_inference)) option (** [None] = Not implicit *) type implicit_side_condition type implicits_list = implicit_side_condition * implicit_status list val is_status_implicit : implicit_status -> bool val is_inferable_implicit : bool -> int -> implicit_status -> bool val name_of_implicit : implicit_status -> identifier val maximal_insertion_of : implicit_status -> bool val force_inference_of : implicit_status -> bool val positions_of_implicits : implicits_list -> int list (** A [manual_explicitation] is a tuple of a positional or named explicitation with maximal insertion, force inference and force usage flags. Forcing usage makes the argument implicit even if the automatic inference considers it not inferable. *) type manual_explicitation = Topconstr.explicitation * (maximal_insertion * force_inference * bool) type manual_implicits = manual_explicitation list val compute_implicits_with_manual : env -> types -> bool -> manual_implicits -> implicit_status list val compute_implicits_names : env -> types -> name list (** {6 Computation of implicits (done using the global environment). } *) val declare_var_implicits : variable -> unit val declare_constant_implicits : constant -> unit val declare_mib_implicits : mutual_inductive -> unit val declare_implicits : bool -> global_reference -> unit (** [declare_manual_implicits local ref enriching l] Manual declaration of which arguments are expected implicit. If not set, we decide if it should enrich by automatically inferd implicits depending on the current state. Unsets implicits if [l] is empty. *) val declare_manual_implicits : bool -> global_reference -> ?enriching:bool -> manual_implicits list -> unit (** If the list is empty, do nothing, otherwise declare the implicits. *) val maybe_declare_manual_implicits : bool -> global_reference -> ?enriching:bool -> manual_implicits -> unit val implicits_of_global : global_reference -> implicits_list list val extract_impargs_data : implicits_list list -> ((int * int) option * implicit_status list) list val lift_implicits : int -> manual_implicits -> manual_implicits val make_implicits_list : implicit_status list -> implicits_list list val drop_first_implicits : int -> implicits_list -> implicits_list val select_impargs_size : int -> implicits_list list -> implicit_status list val select_stronger_impargs : implicits_list list -> implicit_status list type implicit_interactive_request type implicit_discharge_request = | ImplLocal | ImplConstant of constant * implicits_flags | ImplMutualInductive of mutual_inductive * implicits_flags | ImplInteractive of global_reference * implicits_flags * implicit_interactive_request coq-8.4pl2/library/library.mli0000640000175000001440000000647212010532755015453 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool option -> unit val require_library_from_dirpath : (dir_path * string) list -> bool option -> unit val require_library_from_file : identifier option -> System.physical_path -> bool option -> unit (** {6 ... } *) (** Open a module (or a library); if the boolean is true then it's also an export otherwise just a simple import *) val import_module : bool -> qualid located -> unit (** {6 Start the compilation of a library } *) val start_library : string -> dir_path * string (** {6 End the compilation of a library and save it to a ".vo" file } *) val save_library_to : dir_path -> string -> unit (** {6 Interrogate the status of libraries } *) (** - Tell if a library is loaded or opened *) val library_is_loaded : dir_path -> bool val library_is_opened : dir_path -> bool (** - Tell which libraries are loaded or imported *) val loaded_libraries : unit -> dir_path list val opened_libraries : unit -> dir_path list (** - Return the full filename of a loaded library. *) val library_full_filename : dir_path -> string (** - Overwrite the filename of all libraries (used when restoring a state) *) val overwrite_library_filenames : string -> unit (** {6 Hook for the xml exportation of libraries } *) val set_xml_require : (dir_path -> unit) -> unit (** {6 ... } *) (** Global load paths: a load path is a physical path in the file system; to each load path is associated a Coq [dir_path] (the "logical" path of the physical path) *) val get_load_paths : unit -> System.physical_path list val get_full_load_paths : unit -> (System.physical_path * dir_path) list val add_load_path : bool -> System.physical_path * dir_path -> unit val remove_load_path : System.physical_path -> unit val find_logical_path : System.physical_path -> dir_path val is_in_load_paths : System.physical_path -> bool (** {6 Locate a library in the load paths } *) exception LibUnmappedDir exception LibNotFound type library_location = LibLoaded | LibInPath val locate_qualified_library : bool -> qualid -> library_location * dir_path * System.physical_path val try_locate_qualified_library : qualid located -> dir_path * string (** {6 Statistics: display the memory use of a library. } *) val mem : dir_path -> Pp.std_ppcmds coq-8.4pl2/library/lib.mli0000640000175000001440000001747012010532755014555 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Libnames.object_prefix -> lib_objects -> unit val load_objects : int -> Libnames.object_prefix -> lib_objects -> unit val subst_objects : Mod_subst.substitution -> lib_objects -> lib_objects (*val load_and_subst_objects : int -> Libnames.object_prefix -> Mod_subst.substitution -> lib_objects -> lib_objects*) (** [classify_segment seg] verifies that there are no OpenedThings, clears ClosedSections and FrozenStates and divides Leafs according to their answers to the [classify_object] function in three groups: [Substitute], [Keep], [Anticipate] respectively. The order of each returned list is the same as in the input list. *) val classify_segment : library_segment -> lib_objects * lib_objects * Libobject.obj list (** [segment_of_objects prefix objs] forms a list of Leafs *) val segment_of_objects : Libnames.object_prefix -> lib_objects -> library_segment (** {6 ... } *) (** Adding operations (which call the [cache] method, and getting the current list of operations (most recent ones coming first). *) val add_leaf : Names.identifier -> Libobject.obj -> Libnames.object_name val add_anonymous_leaf : Libobject.obj -> unit (** this operation adds all objects with the same name and calls [load_object] for each of them *) val add_leaves : Names.identifier -> Libobject.obj list -> Libnames.object_name val add_frozen_state : unit -> unit (** {6 ... } *) (** The function [contents_after] returns the current library segment, starting from a given section path. If not given, the entire segment is returned. *) val contents_after : Libnames.object_name option -> library_segment (** {6 Functions relative to current path } *) (** User-side names *) val cwd : unit -> Names.dir_path val cwd_except_section : unit -> Names.dir_path val current_dirpath : bool -> Names.dir_path (* false = except sections *) val make_path : Names.identifier -> Libnames.full_path val make_path_except_section : Names.identifier -> Libnames.full_path val path_of_include : unit -> Libnames.full_path (** Kernel-side names *) val current_prefix : unit -> Names.module_path * Names.dir_path val make_kn : Names.identifier -> Names.kernel_name val make_con : Names.identifier -> Names.constant (** Are we inside an opened section *) val sections_are_opened : unit -> bool val sections_depth : unit -> int (** Are we inside an opened module type *) val is_module_or_modtype : unit -> bool val is_modtype : unit -> bool val is_module : unit -> bool val current_mod_id : unit -> Names.module_ident (** Returns the opening node of a given name *) val find_opening_node : Names.identifier -> node (** {6 Modules and module types } *) val start_module : export -> Names.module_ident -> Names.module_path -> Summary.frozen -> Libnames.object_prefix val start_modtype : Names.module_ident -> Names.module_path -> Summary.frozen -> Libnames.object_prefix val end_module : unit -> Libnames.object_name * Libnames.object_prefix * Summary.frozen * library_segment val end_modtype : unit -> Libnames.object_name * Libnames.object_prefix * Summary.frozen * library_segment (** [Lib.add_frozen_state] must be called after each of the above functions *) (** {6 Compilation units } *) val start_compilation : Names.dir_path -> Names.module_path -> unit val end_compilation : Names.dir_path -> Libnames.object_prefix * library_segment (** The function [library_dp] returns the [dir_path] of the current compiling library (or [default_library]) *) val library_dp : unit -> Names.dir_path (** Extract the library part of a name even if in a section *) val dp_of_mp : Names.module_path -> Names.dir_path val split_mp : Names.module_path -> Names.dir_path * Names.dir_path val split_modpath : Names.module_path -> Names.dir_path * Names.identifier list val library_part : Libnames.global_reference -> Names.dir_path val remove_section_part : Libnames.global_reference -> Names.dir_path (** {6 Sections } *) val open_section : Names.identifier -> unit val close_section : unit -> unit (** {6 Backtracking } *) (** NB: The next commands are low-level ones, do not use them directly otherwise the command history stack in [Backtrack] will be out-of-sync. Also note that [reset_initial] is now [reset_label first_command_label] *) (** Adds a "dummy" entry in lib_stk with a unique new label number. *) val mark_end_of_command : unit -> unit (** Returns the current label number *) val current_command_label : unit -> int (** The first label number *) val first_command_label : int (** [reset_label n] resets [lib_stk] to the label n registered by [mark_end_of_command()]. It forgets anything registered after this label. The label should be strictly in the past. *) val reset_label : int -> unit (** search the label registered immediately before adding some definition *) val label_before_name : Names.identifier Util.located -> int (** {6 We can get and set the state of the operations (used in [States]). } *) type frozen val freeze : unit -> frozen val unfreeze : frozen -> unit val init : unit -> unit (** XML output hooks *) val set_xml_open_section : (Names.identifier -> unit) -> unit val set_xml_close_section : (Names.identifier -> unit) -> unit type binding_kind = Explicit | Implicit (** {6 Section management for discharge } *) type variable_info = Names.identifier * binding_kind * Term.constr option * Term.types type variable_context = variable_info list val instance_from_variable_context : variable_context -> Names.identifier array val named_of_variable_context : variable_context -> Sign.named_context val section_segment_of_constant : Names.constant -> variable_context val section_segment_of_mutual_inductive: Names.mutual_inductive -> variable_context val section_instance : Libnames.global_reference -> Names.identifier array val is_in_section : Libnames.global_reference -> bool val add_section_variable : Names.identifier -> binding_kind -> unit val add_section_constant : Names.constant -> Sign.named_context -> unit val add_section_kn : Names.mutual_inductive -> Sign.named_context -> unit val replacement_context : unit -> (Names.identifier array Names.Cmap.t * Names.identifier array Names.Mindmap.t) (** {6 Discharge: decrease the section level if in the current section } *) val discharge_kn : Names.mutual_inductive -> Names.mutual_inductive val discharge_con : Names.constant -> Names.constant val discharge_global : Libnames.global_reference -> Libnames.global_reference val discharge_inductive : Names.inductive -> Names.inductive coq-8.4pl2/library/declaremods.ml0000640000175000001440000010453512121620060016105 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* add_scope_subst sc1 sc2) scl let subst_scope sc = try Stringmap.find sc !scope_subst with Not_found -> sc let reset_scope_subst () = scope_subst := Stringmap.empty (** Which inline annotations should we honor, either None or the ones whose level is less or equal to the given integer *) type inline = | NoInline | DefaultInline | InlineAt of int let default_inline () = Some (Flags.get_inline_level ()) let inl2intopt = function | NoInline -> None | InlineAt i -> Some i | DefaultInline -> default_inline () type funct_app_annot = { ann_inline : inline; ann_scope_subst : scope_subst } let inline_annot a = inl2intopt a.ann_inline type 'a annotated = ('a * funct_app_annot) (* modules and components *) (* OBSOLETE This type is a functional closure of substitutive lib_objects. The first part is a partial substitution (which will be later applied to lib_objects when completed). The second one is a list of bound identifiers which is nonempty only if the objects are owned by a fuctor The third one is the "self" ident of the signature (or structure), which should be substituted in lib_objects with the real name of the module. The fourth one is the segment itself which can contain references to identifiers in the domain of the substitution or in other two parts. These references are invalid in the current scope and therefore must be substitued with valid names before use. *) type substitutive_objects = mod_bound_id list * module_path * lib_objects (* For each module, we store the following things: In modtab_substobjs: substitutive_objects when we will do Module M:=N, the objects of N will be reloaded with M after substitution In modtab_objects: "substituted objects" @ "keep objects" substituted objects - roughly the objects above after the substitution - we need to keep them to call open_object when the module is opened (imported) keep objects - The list of non-substitutive objects - as above, for each of them we will call open_object when the module is opened (Some) Invariants: * If the module is a functor, the two latter lists are empty. * Module objects in substitutive_objects part have empty substituted objects. * Modules which where created with Module M:=mexpr or with Module M:SIG. ... End M. have the keep list empty. *) let modtab_substobjs = ref (MPmap.empty : substitutive_objects MPmap.t) let modtab_objects = ref (MPmap.empty : (object_prefix * lib_objects) MPmap.t) (* currently started interactive module (if any) - its arguments (if it is a functor) and declared output type *) let openmod_info = ref ((MPfile(initial_dir),[],None,[]) : module_path * mod_bound_id list * (module_struct_entry * int option) option * module_type_body list) (* The library_cache here is needed to avoid recalculations of substituted modules object during "reloading" of libraries *) let library_cache = ref Dirmap.empty let _ = Summary.declare_summary "MODULE-INFO" { Summary.freeze_function = (fun () -> !modtab_substobjs, !modtab_objects, !openmod_info, !library_cache); Summary.unfreeze_function = (fun (sobjs,objs,info,libcache) -> modtab_substobjs := sobjs; modtab_objects := objs; openmod_info := info; library_cache := libcache); Summary.init_function = (fun () -> modtab_substobjs := MPmap.empty; modtab_objects := MPmap.empty; openmod_info := ((MPfile(initial_dir), [],None,[])); library_cache := Dirmap.empty) } (* auxiliary functions to transform full_path and kernel_name given by Lib into module_path and dir_path needed for modules *) let mp_of_kn kn = let mp,sec,l = repr_kn kn in if sec=empty_dirpath then MPdot (mp,l) else anomaly ("Non-empty section in module name!" ^ string_of_kn kn) let dir_of_sp sp = let dir,id = repr_path sp in add_dirpath_suffix dir id (* Subtyping checks *) let check_sub mtb sub_mtb_l = (* The constraints are checked and forgot immediately : *) ignore (List.fold_right (fun sub_mtb env -> Environ.add_constraints (Subtyping.check_subtypes env mtb sub_mtb) env) sub_mtb_l (Global.env())) (* This function checks if the type calculated for the module [mp] is a subtype of all signatures in [sub_mtb_l]. Uses only the global environment. *) let check_subtypes mp sub_mtb_l = let mb = try Global.lookup_module mp with Not_found -> assert false in let mtb = Modops.module_type_of_module None mb in check_sub mtb sub_mtb_l (* Same for module type [mp] *) let check_subtypes_mt mp sub_mtb_l = let mtb = try Global.lookup_modtype mp with Not_found -> assert false in check_sub mtb sub_mtb_l (* Create a functor type entry *) let funct_entry args m = List.fold_right (fun (arg_id,(arg_t,_)) mte -> MSEfunctor (arg_id,arg_t,mte)) args m (* Prepare the module type list for check of subtypes *) let build_subtypes interp_modtype mp args mtys = List.map (fun (m,ann) -> let inl = inline_annot ann in let mte = interp_modtype (Global.env()) m in let mtb = Mod_typing.translate_module_type (Global.env()) mp inl mte in let funct_mtb = List.fold_right (fun (arg_id,(arg_t,arg_inl)) mte -> let arg_t = Mod_typing.translate_module_type (Global.env()) (MPbound arg_id) arg_inl arg_t in SEBfunctor(arg_id,arg_t,mte)) args mtb.typ_expr in { mtb with typ_expr = funct_mtb }) mtys (* These functions register the visibility of the module and iterates through its components. They are called by plenty module functions *) let compute_visibility exists what i dir dirinfo = if exists then if try Nametab.locate_dir (qualid_of_dirpath dir) = dirinfo with Not_found -> false then Nametab.Exactly i else errorlabstrm (what^"_module") (pr_dirpath dir ++ str " should already exist!") else if Nametab.exists_dir dir then errorlabstrm (what^"_module") (pr_dirpath dir ++ str " already exists") else Nametab.Until i (* let do_load_and_subst_module i dir mp substobjs keep = let prefix = (dir,(mp,empty_dirpath)) in let dirinfo = DirModule (dir,(mp,empty_dirpath)) in let vis = compute_visibility false "load_and_subst" i dir dirinfo in let objects = compute_subst_objects mp substobjs resolver in Nametab.push_dir vis dir dirinfo; modtab_substobjs := MPmap.add mp substobjs !modtab_substobjs; match objects with | Some (subst,seg) -> let seg = load_and_subst_objects (i+1) prefix subst seg in modtab_objects := MPmap.add mp (prefix,seg) !modtab_objects; load_objects (i+1) prefix keep; Some (seg@keep) | None -> None *) let do_module exists what iter_objects i dir mp substobjs keep= let prefix = (dir,(mp,empty_dirpath)) in let dirinfo = DirModule (dir,(mp,empty_dirpath)) in let vis = compute_visibility exists what i dir dirinfo in Nametab.push_dir vis dir dirinfo; modtab_substobjs := MPmap.add mp substobjs !modtab_substobjs; match substobjs with ([],mp1,objs) -> modtab_objects := MPmap.add mp (prefix,objs@keep) !modtab_objects; iter_objects (i+1) prefix (objs@keep) | (mbids,_,_) -> () let conv_names_do_module exists what iter_objects i (sp,kn) substobjs = let dir,mp = dir_of_sp sp, mp_of_kn kn in do_module exists what iter_objects i dir mp substobjs [] (* Interactive modules and module types cannot be recached! cache_mod* functions can be called only once (and "end_mod*" set the flag to false then) *) let cache_module ((sp,kn),substobjs) = let dir,mp = dir_of_sp sp, mp_of_kn kn in do_module false "cache" load_objects 1 dir mp substobjs [] (* When this function is called the module itself is already in the environment. This function loads its objects only *) let load_module i (oname,substobjs) = conv_names_do_module false "load" load_objects i oname substobjs let open_module i (oname,substobjs) = conv_names_do_module true "open" open_objects i oname substobjs let subst_module (subst,(mbids,mp,objs)) = (mbids,subst_mp subst mp, subst_objects subst objs) let classify_module substobjs = Substitute substobjs let (in_module : substitutive_objects -> obj), (out_module : obj -> substitutive_objects) = declare_object_full {(default_object "MODULE") with cache_function = cache_module; load_function = load_module; open_function = open_module; subst_function = subst_module; classify_function = classify_module } let cache_keep _ = anomaly "This module should not be cached!" let load_keep i ((sp,kn),seg) = let mp = mp_of_kn kn in let prefix = dir_of_sp sp, (mp,empty_dirpath) in begin try let prefix',objects = MPmap.find mp !modtab_objects in if prefix' <> prefix then anomaly "Two different modules with the same path!"; modtab_objects := MPmap.add mp (prefix,objects@seg) !modtab_objects; with Not_found -> anomaly "Keep objects before substitutive" end; load_objects i prefix seg let open_keep i ((sp,kn),seg) = let dirpath,mp = dir_of_sp sp, mp_of_kn kn in open_objects i (dirpath,(mp,empty_dirpath)) seg let in_modkeep : lib_objects -> obj = declare_object {(default_object "MODULE KEEP OBJECTS") with cache_function = cache_keep; load_function = load_keep; open_function = open_keep } (* we remember objects for a module type. In case of a declaration: Module M:SIG:=... The module M gets its objects from SIG *) let modtypetab = ref (MPmap.empty : substitutive_objects MPmap.t) (* currently started interactive module type. We remember its arguments if it is a functor type *) let openmodtype_info = ref ([],[] : mod_bound_id list * module_type_body list) let _ = Summary.declare_summary "MODTYPE-INFO" { Summary.freeze_function = (fun () -> !modtypetab,!openmodtype_info); Summary.unfreeze_function = (fun ft -> modtypetab := fst ft; openmodtype_info := snd ft); Summary.init_function = (fun () -> modtypetab := MPmap.empty; openmodtype_info := [],[]) } let cache_modtype ((sp,kn),(entry,modtypeobjs,sub_mty_l)) = let mp = mp_of_kn kn in (* We enrich the global environment *) let _ = match entry with | None -> anomaly "You must not recache interactive module types!" | Some (mte,inl) -> if mp <> Global.add_modtype (basename sp) mte inl then anomaly "Kernel and Library names do not match" in (* Using declare_modtype should lead here, where we check that any given subtyping is indeed accurate *) check_subtypes_mt mp sub_mty_l; if Nametab.exists_modtype sp then errorlabstrm "cache_modtype" (pr_path sp ++ str " already exists") ; Nametab.push_modtype (Nametab.Until 1) sp mp; modtypetab := MPmap.add mp modtypeobjs !modtypetab let load_modtype i ((sp,kn),(entry,modtypeobjs,_)) = assert (entry = None); if Nametab.exists_modtype sp then errorlabstrm "cache_modtype" (pr_path sp ++ str " already exists") ; Nametab.push_modtype (Nametab.Until i) sp (mp_of_kn kn); modtypetab := MPmap.add (mp_of_kn kn) modtypeobjs !modtypetab let open_modtype i ((sp,kn),(entry,_,_)) = assert (entry = None); if try Nametab.locate_modtype (qualid_of_path sp) <> (mp_of_kn kn) with Not_found -> true then errorlabstrm ("open_modtype") (pr_path sp ++ str " should already exist!"); Nametab.push_modtype (Nametab.Exactly i) sp (mp_of_kn kn) let subst_modtype (subst,(entry,(mbids,mp,objs),_)) = assert (entry = None); (entry,(mbids,subst_mp subst mp,subst_objects subst objs),[]) let classify_modtype (_,substobjs,_) = Substitute (None,substobjs,[]) type modtype_obj = (module_struct_entry * Entries.inline) option (* will be None in vo *) * substitutive_objects * module_type_body list let in_modtype : modtype_obj -> obj = declare_object {(default_object "MODULE TYPE") with cache_function = cache_modtype; open_function = open_modtype; load_function = load_modtype; subst_function = subst_modtype; classify_function = classify_modtype } let rec replace_module_object idl (mbids,mp,lib_stack) (mbids2,mp2,objs) mp1 = if mbids<>[] then anomaly "Unexpected functor objects"; let rec replace_idl = function | _,[] -> [] | id::idl,(id',obj)::tail when id = id' -> if object_tag obj <> "MODULE" then anomaly "MODULE expected!"; let substobjs = if idl = [] then let mp' = MPdot(mp, label_of_id id) in mbids, mp', subst_objects (map_mp mp1 mp' empty_delta_resolver) objs else replace_module_object idl (out_module obj) (mbids2,mp2,objs) mp in (id, in_module substobjs)::tail | idl,lobj::tail -> lobj::replace_idl (idl,tail) in (mbids, mp, replace_idl (idl,lib_stack)) let discr_resolver mb = match mb.mod_type with | SEBstruct _ -> Some mb.mod_delta | _ -> None (* when mp is a functor *) (* Small function to avoid module typing during substobjs retrivial *) let rec get_objs_modtype_application env = function | MSEident mp -> MPmap.find mp !modtypetab,Environ.lookup_modtype mp env,[] | MSEapply (fexpr, MSEident mp) -> let objs,mtb,mp_l= get_objs_modtype_application env fexpr in objs,mtb,mp::mp_l | MSEapply (_,mexpr) -> Modops.error_application_to_not_path mexpr | _ -> error "Application of a non-functor." let rec compute_subst env mbids sign mp_l inl = match mbids,mp_l with | _,[] -> mbids,empty_subst | [],r -> error "Application of a functor with too few arguments." | mbid::mbids,mp::mp_l -> let farg_id, farg_b, fbody_b = Modops.destr_functor env sign in let mb = Environ.lookup_module mp env in let mbid_left,subst = compute_subst env mbids fbody_b mp_l inl in let resolver = match discr_resolver mb with | None -> empty_delta_resolver | Some mp_delta -> Modops.inline_delta_resolver env inl mp farg_id farg_b mp_delta in mbid_left,join (map_mbid mbid mp resolver) subst let rec get_modtype_substobjs env mp_from inline = function MSEident ln -> MPmap.find ln !modtypetab | MSEfunctor (mbid,_,mte) -> let (mbids, mp, objs) = get_modtype_substobjs env mp_from inline mte in (mbid::mbids, mp, objs) | MSEwith (mty, With_Definition _) -> get_modtype_substobjs env mp_from inline mty | MSEwith (mty, With_Module (idl,mp1)) -> let substobjs = get_modtype_substobjs env mp_from inline mty in let modobjs = MPmap.find mp1 !modtab_substobjs in replace_module_object idl substobjs modobjs mp1 | MSEapply (fexpr, MSEident mp) as me -> let (mbids, mp1, objs),mtb_mp1,mp_l = get_objs_modtype_application env me in let mbids_left,subst = compute_subst env mbids mtb_mp1.typ_expr (List.rev mp_l) inline in (mbids_left, mp1,subst_objects subst objs) | MSEapply (_,mexpr) -> Modops.error_application_to_not_path mexpr (* push names of bound modules (and their components) to Nametab *) (* add objects associated to them *) let process_module_bindings argids args = let process_arg id (mbid,(mty,ann)) = let dir = make_dirpath [id] in let mp = MPbound mbid in let (mbids,mp_from,objs) = get_modtype_substobjs (Global.env()) mp (inline_annot ann) mty in let substobjs = (mbids,mp,subst_objects (map_mp mp_from mp empty_delta_resolver) objs)in do_module false "start" load_objects 1 dir mp substobjs [] in List.iter2 process_arg argids args (* Same with module_type_body *) let rec seb2mse = function | SEBident mp -> MSEident mp | SEBapply (s,s',_) -> MSEapply(seb2mse s, seb2mse s') | SEBwith (s,With_module_body (l,mp)) -> MSEwith(seb2mse s,With_Module(l,mp)) | SEBwith (s,With_definition_body(l,cb)) -> (match cb.const_body with | Def c -> MSEwith(seb2mse s,With_Definition(l,Declarations.force c)) | _ -> assert false) | _ -> failwith "seb2mse: received a non-atomic seb" let process_module_seb_binding mbid seb = process_module_bindings [id_of_mbid mbid] [mbid, (seb2mse seb, { ann_inline = DefaultInline; ann_scope_subst = [] })] let intern_args interp_modtype (idl,(arg,ann)) = let inl = inline_annot ann in let lib_dir = Lib.library_dp() in let mbids = List.map (fun (_,id) -> make_mbid lib_dir id) idl in let mty = interp_modtype (Global.env()) arg in let dirs = List.map (fun (_,id) -> make_dirpath [id]) idl in let (mbi,mp_from,objs) = get_modtype_substobjs (Global.env()) (MPbound (List.hd mbids)) inl mty in List.map2 (fun dir mbid -> let resolver = Global.add_module_parameter mbid mty inl in let mp = MPbound mbid in let substobjs = (mbi,mp,subst_objects (map_mp mp_from mp resolver) objs) in do_module false "interp" load_objects 1 dir mp substobjs []; (mbid,(mty,inl))) dirs mbids let start_module_ interp_modtype export id args res fs = let mp = Global.start_module id in let arg_entries = List.concat (List.map (intern_args interp_modtype) args) in let res_entry_o, sub_body_l = match res with | Enforce (res,ann) -> let inl = inline_annot ann in let mte = interp_modtype (Global.env()) res in let _ = Mod_typing.translate_struct_type_entry (Global.env()) inl mte in Some (mte,inl), [] | Check resl -> None, build_subtypes interp_modtype mp arg_entries resl in let mbids = List.map fst arg_entries in openmod_info:=(mp,mbids,res_entry_o,sub_body_l); let prefix = Lib.start_module export id mp fs in Nametab.push_dir (Nametab.Until 1) (fst prefix) (DirOpenModule prefix); Lib.add_frozen_state (); mp let end_module () = let oldoname,oldprefix,fs,lib_stack = Lib.end_module () in let mp,mbids, res_o, sub_l = !openmod_info in let substitute, keep, special = Lib.classify_segment lib_stack in let mp_from,substobjs, keep, special = try match res_o with | None -> (* the module is not sealed *) None,( mbids, mp, substitute), keep, special | Some (MSEident ln as mty, inline) -> let (mbids1,mp1,objs) = get_modtype_substobjs (Global.env()) mp inline mty in Some mp1,(mbids@mbids1,mp1,objs), [], [] | Some (MSEwith _ as mty, inline) -> let (mbids1,mp1,objs) = get_modtype_substobjs (Global.env()) mp inline mty in Some mp1,(mbids@mbids1,mp1,objs), [], [] | Some (MSEfunctor _, _) -> anomaly "Funsig cannot be here..." | Some (MSEapply _ as mty, inline) -> let (mbids1,mp1,objs) = get_modtype_substobjs (Global.env()) mp inline mty in Some mp1,(mbids@mbids1,mp1,objs), [], [] with Not_found -> anomaly "Module objects not found..." in (* must be called after get_modtype_substobjs, because of possible dependencies on functor arguments *) let id = basename (fst oldoname) in let mp,resolver = Global.end_module fs id res_o in check_subtypes mp sub_l; (* we substitute objects if the module is sealed by a signature (ie. mp_from != None *) let substobjs = match mp_from,substobjs with None,_ -> substobjs | Some mp_from,(mbids,_,objs) -> (mbids,mp,subst_objects (map_mp mp_from mp resolver) objs) in let node = in_module substobjs in let objects = if keep = [] || mbids <> [] then special@[node] (* no keep objects or we are defining a functor *) else special@[node;in_modkeep keep] (* otherwise *) in let newoname = Lib.add_leaves id objects in if (fst newoname) <> (fst oldoname) then anomaly "Names generated on start_ and end_module do not match"; if mp_of_kn (snd newoname) <> mp then anomaly "Kernel and Library names do not match"; Lib.add_frozen_state () (* to prevent recaching *); mp let module_objects mp = let prefix,objects = MPmap.find mp !modtab_objects in segment_of_objects prefix objects (************************************************************************) (* libraries *) type library_name = dir_path (* The first two will form substitutive_objects, the last one is keep *) type library_objects = module_path * lib_objects * lib_objects let register_library dir cenv objs digest = let mp = MPfile dir in let substobjs, keep = try ignore(Global.lookup_module mp); (* if it's in the environment, the cached objects should be correct *) Dirmap.find dir !library_cache with Not_found -> if mp <> Global.import cenv digest then anomaly "Unexpected disk module name"; let mp,substitute,keep = objs in let substobjs = [], mp, substitute in let modobjs = substobjs, keep in library_cache := Dirmap.add dir modobjs !library_cache; modobjs in do_module false "register_library" load_objects 1 dir mp substobjs keep let start_library dir = let mp = Global.start_library dir in openmod_info:=mp,[],None,[]; Lib.start_compilation dir mp; Lib.add_frozen_state () let end_library_hook = ref ignore let set_end_library_hook f = end_library_hook := f let end_library dir = !end_library_hook(); let prefix, lib_stack = Lib.end_compilation dir in let mp,cenv = Global.export dir in let substitute, keep, _ = Lib.classify_segment lib_stack in cenv,(mp,substitute,keep) (* implementation of Export M and Import M *) let really_import_module mp = let prefix,objects = MPmap.find mp !modtab_objects in open_objects 1 prefix objects let cache_import (_,(_,mp)) = (* for non-substitutive exports: let mp = Nametab.locate_module (qualid_of_dirpath dir) in *) really_import_module mp let classify_import (export,_ as obj) = if export then Substitute obj else Dispose let subst_import (subst,(export,mp as obj)) = let mp' = subst_mp subst mp in if mp'==mp then obj else (export,mp') let in_import = declare_object {(default_object "IMPORT MODULE") with cache_function = cache_import; open_function = (fun i o -> if i=1 then cache_import o); subst_function = subst_import; classify_function = classify_import } let import_module export mp = Lib.add_anonymous_leaf (in_import (export,mp)) (************************************************************************) (* module types *) let start_modtype_ interp_modtype id args mtys fs = let mp = Global.start_modtype id in let arg_entries = List.concat (List.map (intern_args interp_modtype) args) in let sub_mty_l = build_subtypes interp_modtype mp arg_entries mtys in let mbids = List.map fst arg_entries in openmodtype_info := mbids, sub_mty_l; let prefix = Lib.start_modtype id mp fs in Nametab.push_dir (Nametab.Until 1) (fst prefix) (DirOpenModtype prefix); Lib.add_frozen_state (); mp let end_modtype () = let oldoname,prefix,fs,lib_stack = Lib.end_modtype () in let id = basename (fst oldoname) in let substitute, _, special = Lib.classify_segment lib_stack in let mbids, sub_mty_l = !openmodtype_info in let mp = Global.end_modtype fs id in let modtypeobjs = mbids, mp, substitute in check_subtypes_mt mp sub_mty_l; let oname = Lib.add_leaves id (special@[in_modtype (None, modtypeobjs,[])]) in if fst oname <> fst oldoname then anomaly "Section paths generated on start_ and end_modtype do not match"; if (mp_of_kn (snd oname)) <> mp then anomaly "Kernel and Library names do not match"; Lib.add_frozen_state ()(* to prevent recaching *); mp let declare_modtype_ interp_modtype id args mtys (mty,ann) fs = let inl = inline_annot ann in let mmp = Global.start_modtype id in let arg_entries = List.concat (List.map (intern_args interp_modtype) args) in let entry = funct_entry arg_entries (interp_modtype (Global.env()) mty) in (* NB: check of subtyping will be done in cache_modtype *) let sub_mty_l = build_subtypes interp_modtype mmp arg_entries mtys in let (mbids,mp_from,objs) = get_modtype_substobjs (Global.env()) mmp inl entry in (* Undo the simulated interactive building of the module type *) (* and declare the module type as a whole *) register_scope_subst ann.ann_scope_subst; let substobjs = (mbids,mmp, subst_objects (map_mp mp_from mmp empty_delta_resolver) objs) in reset_scope_subst (); Summary.unfreeze_summaries fs; ignore (add_leaf id (in_modtype (Some (entry,inl), substobjs, sub_mty_l))); mmp (* Small function to avoid module typing during substobjs retrivial *) let rec get_objs_module_application env = function | MSEident mp -> MPmap.find mp !modtab_substobjs,Environ.lookup_module mp env,[] | MSEapply (fexpr, MSEident mp) -> let objs,mtb,mp_l= get_objs_module_application env fexpr in objs,mtb,mp::mp_l | MSEapply (_,mexpr) -> Modops.error_application_to_not_path mexpr | _ -> error "Application of a non-functor." let rec get_module_substobjs env mp_from inl = function | MSEident mp -> MPmap.find mp !modtab_substobjs | MSEfunctor (mbid,mty,mexpr) -> let (mbids, mp, objs) = get_module_substobjs env mp_from inl mexpr in (mbid::mbids, mp, objs) | MSEapply (fexpr, MSEident mp) as me -> let (mbids, mp1, objs),mb_mp1,mp_l = get_objs_module_application env me in let mbids_left,subst = compute_subst env mbids mb_mp1.mod_type (List.rev mp_l) inl in (mbids_left, mp1,subst_objects subst objs) | MSEapply (_,mexpr) -> Modops.error_application_to_not_path mexpr | MSEwith (mty, With_Definition _) -> get_module_substobjs env mp_from inl mty | MSEwith (mty, With_Module (idl,mp)) -> assert false let declare_module_ interp_modtype interp_modexpr id args res mexpr_o fs = let mmp = Global.start_module id in let arg_entries = List.concat (List.map (intern_args interp_modtype) args) in let funct f m = funct_entry arg_entries (f (Global.env ()) m) in let env = Global.env() in let mty_entry_o, subs, inl_res = match res with | Enforce (mty,ann) -> Some (funct interp_modtype mty), [], inline_annot ann | Check mtys -> None, build_subtypes interp_modtype mmp arg_entries mtys, default_inline () in (*let subs = List.map (Mod_typing.translate_module_type env mmp) mty_sub_l in *) let mexpr_entry_o, inl_expr, scl = match mexpr_o with | None -> None, default_inline (), [] | Some (mexpr,ann) -> Some (funct interp_modexpr mexpr), inline_annot ann, ann.ann_scope_subst in let entry = {mod_entry_type = mty_entry_o; mod_entry_expr = mexpr_entry_o } in let substobjs = match entry with | {mod_entry_type = Some mte} -> get_modtype_substobjs env mmp inl_res mte | {mod_entry_expr = Some mexpr} -> get_module_substobjs env mmp inl_expr mexpr | _ -> anomaly "declare_module: No type, no body ..." in let (mbids,mp_from,objs) = substobjs in (* Undo the simulated interactive building of the module *) (* and declare the module as a whole *) Summary.unfreeze_summaries fs; let mp = mp_of_kn (Lib.make_kn id) in let inl = if inl_expr = None then None else inl_res in (*PLTODO *) let mp_env,resolver = Global.add_module id entry inl in if mp_env <> mp then anomaly "Kernel and Library names do not match"; check_subtypes mp subs; register_scope_subst scl; let substobjs = (mbids,mp_env, subst_objects(map_mp mp_from mp_env resolver) objs) in reset_scope_subst (); ignore (add_leaf id (in_module substobjs)); mmp (* Include *) let rec subst_inc_expr subst me = match me with | MSEident mp -> MSEident (subst_mp subst mp) | MSEwith (me,With_Module(idl,mp)) -> MSEwith (subst_inc_expr subst me, With_Module(idl,subst_mp subst mp)) | MSEwith (me,With_Definition(idl,const))-> let const1 = Mod_subst.from_val const in let force = Mod_subst.force subst_mps in MSEwith (subst_inc_expr subst me, With_Definition(idl,force (subst_substituted subst const1))) | MSEapply (me1,me2) -> MSEapply (subst_inc_expr subst me1, subst_inc_expr subst me2) | MSEfunctor(mbid,me1,me2) -> MSEfunctor (mbid, subst_inc_expr subst me1, subst_inc_expr subst me2) let lift_oname (sp,kn) = let mp,_,_ = Names.repr_kn kn in let dir,_ = Libnames.repr_path sp in (dir,mp) let cache_include (oname,(me,(mbis,mp1,objs))) = let dir,mp1 = lift_oname oname in let prefix = (dir,(mp1,empty_dirpath)) in load_objects 1 prefix objs; open_objects 1 prefix objs let load_include i (oname,(me,(mbis,mp1,objs))) = let dir,mp1 = lift_oname oname in let prefix = (dir,(mp1,empty_dirpath)) in load_objects i prefix objs let open_include i (oname,(me,(mbis,mp1,objs))) = let dir,mp1 = lift_oname oname in let prefix = (dir,(mp1,empty_dirpath)) in open_objects i prefix objs let subst_include (subst,(me,substobj)) = let (mbids,mp,objs) = substobj in let substobjs = (mbids,subst_mp subst mp,subst_objects subst objs) in (subst_inc_expr subst me,substobjs) let classify_include (me,substobjs) = Substitute (me,substobjs) type include_obj = module_struct_entry * substitutive_objects let (in_include : include_obj -> obj), (out_include : obj -> include_obj) = declare_object_full {(default_object "INCLUDE") with cache_function = cache_include; load_function = load_include; open_function = open_include; subst_function = subst_include; classify_function = classify_include } let rec include_subst env mb mbids sign inline = match mbids with | [] -> empty_subst | mbid::mbids -> let farg_id, farg_b, fbody_b = Modops.destr_functor env sign in let subst = include_subst env mb mbids fbody_b inline in let mp_delta = Modops.inline_delta_resolver env inline mb.mod_mp farg_id farg_b mb.mod_delta in join (map_mbid mbid mb.mod_mp mp_delta) subst exception NothingToDo let get_includeself_substobjs env objs me is_mod inline = try let mb_mp = match me with | MSEident mp -> if is_mod then Environ.lookup_module mp env else Modops.module_body_of_type mp (Environ.lookup_modtype mp env) | MSEapply(fexpr, MSEident p) as mexpr -> let _,mb_mp,mp_l = if is_mod then get_objs_module_application env mexpr else let o,mtb_mp,mp_l = get_objs_modtype_application env mexpr in o,Modops.module_body_of_type mtb_mp.typ_mp mtb_mp,mp_l in List.fold_left (fun mb _ -> match mb.mod_type with | SEBfunctor(_,_,str) -> {mb with mod_type = str} | _ -> error "Application of a functor with too much arguments.") mb_mp mp_l | _ -> raise NothingToDo in let (mbids,mp_self,objects) = objs in let mb = Global.pack_module() in let subst = include_subst env mb mbids mb_mp.mod_type inline in ([],mp_self,subst_objects subst objects) with NothingToDo -> objs let declare_one_include_inner annot (me,is_mod) = let env = Global.env() in let mp1,_ = current_prefix () in let inl = inline_annot annot in let (mbids,mp,objs)= if is_mod then get_module_substobjs env mp1 inl me else get_modtype_substobjs env mp1 inl me in let (mbids,mp,objs) = if mbids <> [] then get_includeself_substobjs env (mbids,mp,objs) me is_mod inl else (mbids,mp,objs) in let id = current_mod_id() in let resolver = Global.add_include me is_mod inl in register_scope_subst annot.ann_scope_subst; let substobjs = (mbids,mp1, subst_objects (map_mp mp mp1 resolver) objs) in reset_scope_subst (); ignore (add_leaf id (in_include (me, substobjs))) let declare_one_include interp_struct (me_ast,annot) = declare_one_include_inner annot (interp_struct (Global.env()) me_ast) let declare_include_ interp_struct me_asts = List.iter (declare_one_include interp_struct) me_asts (** Versions of earlier functions taking care of the freeze/unfreeze of summaries *) let protect_summaries f = let fs = Summary.freeze_summaries () in try f fs with reraise -> (* Something wrong: undo the whole process *) Summary.unfreeze_summaries fs; raise reraise let declare_include interp_struct me_asts = protect_summaries (fun _ -> declare_include_ interp_struct me_asts) let declare_modtype interp_mt interp_mix id args mtys mty_l = let declare_mt fs = match mty_l with | [] -> assert false | [mty] -> declare_modtype_ interp_mt id args mtys mty fs | mty_l -> ignore (start_modtype_ interp_mt id args mtys fs); declare_include_ interp_mix mty_l; end_modtype () in protect_summaries declare_mt let start_modtype interp_modtype id args mtys = protect_summaries (start_modtype_ interp_modtype id args mtys) let declare_module interp_mt interp_me interp_mix id args mtys me_l = let declare_me fs = match me_l with | [] -> declare_module_ interp_mt interp_me id args mtys None fs | [me] -> declare_module_ interp_mt interp_me id args mtys (Some me) fs | me_l -> ignore (start_module_ interp_mt None id args mtys fs); declare_include_ interp_mix me_l; end_module () in protect_summaries declare_me let start_module interp_modtype export id args res = protect_summaries (start_module_ interp_modtype export id args res) (*s Iterators. *) let iter_all_segments f = let _ = MPmap.iter (fun _ (prefix,objects) -> let rec apply_obj (id,obj) = match object_tag obj with | "INCLUDE" -> let (_,(_,_,objs)) = out_include obj in List.iter apply_obj objs | _ -> f (make_oname prefix id) obj in List.iter apply_obj objects) !modtab_objects in let rec apply_node = function | sp, Leaf o -> f sp o | _ -> () in List.iter apply_node (Lib.contents_after None) let debug_print_modtab _ = let pr_seg = function | [] -> str "[]" | l -> str ("[." ^ string_of_int (List.length l) ^ ".]") in let pr_modinfo mp (prefix,objects) s = s ++ str (string_of_mp mp) ++ (spc ()) ++ (pr_seg (segment_of_objects prefix objects)) in let modules = MPmap.fold pr_modinfo !modtab_objects (mt ()) in hov 0 modules coq-8.4pl2/library/assumptions.ml0000640000175000001440000002135112010532755016214 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* id_ord i1 i2 | Axiom k1 , Axiom k2 -> cst_ord k1 k2 | Opaque k1 , Opaque k2 -> cst_ord k1 k2 | Variable _ , Axiom _ -> -1 | Axiom _ , Variable _ -> 1 | Opaque _ , _ -> -1 | _, Opaque _ -> 1 end module ContextObjectSet = Set.Make (OrderedContextObject) module ContextObjectMap = Map.Make (OrderedContextObject) (** For a constant c in a module sealed by an interface (M:T and not M<:T), [Global.lookup_constant] may return a [constant_body] without body. We fix this by looking in the implementation of the module *) let modcache = ref (MPmap.empty : structure_body MPmap.t) let rec search_mod_label lab = function | [] -> raise Not_found | (l,SFBmodule mb) :: _ when l = lab -> mb | _ :: fields -> search_mod_label lab fields let rec search_cst_label lab = function | [] -> raise Not_found | (l,SFBconst cb) :: _ when l = lab -> cb | _ :: fields -> search_cst_label lab fields let rec lookup_module_in_impl mp = try Global.lookup_module mp with Not_found -> (* The module we search might not be exported by its englobing module(s). We access the upper layer, and then do a manual search *) match mp with | MPfile _ | MPbound _ -> raise Not_found (* should have been found by [lookup_module] *) | MPdot (mp',lab') -> let fields = memoize_fields_of_mp mp' in search_mod_label lab' fields and memoize_fields_of_mp mp = try MPmap.find mp !modcache with Not_found -> let l = fields_of_mp mp in modcache := MPmap.add mp l !modcache; l and fields_of_mp mp = let mb = lookup_module_in_impl mp in let fields,inner_mp,subs = fields_of_mb empty_subst mb [] in let subs = if inner_mp = mp then subs else add_mp inner_mp mp mb.mod_delta subs in Modops.subst_signature subs fields and fields_of_mb subs mb args = let seb = match mb.mod_expr with | None -> mb.mod_type (* cf. Declare Module *) | Some seb -> seb in fields_of_seb subs mb.mod_mp seb args (* TODO: using [empty_delta_resolver] below in [fields_of_seb] is probably slightly incorrect. But: a) I don't see currently what should be used instead b) this shouldn't be critical for Print Assumption. At worse some constants will have a canonical name which is non-canonical, leading to failures in [Global.lookup_constant], but our own [lookup_constant] should work. *) and fields_of_seb subs mp0 seb args = match seb with | SEBstruct l -> assert (args = []); l, mp0, subs | SEBident mp -> let mb = lookup_module_in_impl (subst_mp subs mp) in fields_of_mb subs mb args | SEBapply (seb1,seb2,_) -> (match seb2 with | SEBident mp2 -> fields_of_seb subs mp0 seb1 (mp2::args) | _ -> assert false) (* only legal application is to module names *) | SEBfunctor (mbid,mtb,seb) -> (match args with | [] -> assert false (* we should only encounter applied functors *) | mpa :: args -> let subs = add_mbid mbid mpa empty_delta_resolver subs in fields_of_seb subs mp0 seb args) | SEBwith _ -> assert false (* should not appear in a mod_expr or mod_type field *) let lookup_constant_in_impl cst fallback = try let mp,dp,lab = repr_kn (canonical_con cst) in let fields = memoize_fields_of_mp mp in (* A module found this way is necessarily closed, in particular our constant cannot be in an opened section : *) search_cst_label lab fields with Not_found -> (* Either: - The module part of the constant isn't registered yet : we're still in it, so the [constant_body] found earlier (if any) was a true axiom. - The label has not been found in the structure. This is an error *) match fallback with | Some cb -> cb | None -> anomaly ("Print Assumption: unknown constant "^string_of_con cst) let lookup_constant cst = try let cb = Global.lookup_constant cst in if constant_has_body cb then cb else lookup_constant_in_impl cst (Some cb) with Not_found -> lookup_constant_in_impl cst None let assumptions ?(add_opaque=false) st (* t *) = modcache := MPmap.empty; let (idts,knst) = st in (* Infix definition for chaining function that accumulate on a and a ContextObjectSet, ContextObjectMap. *) let ( ** ) f1 f2 s m = let (s',m') = f1 s m in f2 s' m' in (* This function eases memoization, by checking if an object is already stored before trying and applying a function. If the object is there, the function is not fired (we are in a particular case where memoized object don't need a treatment at all). If the object isn't there, it is stored and the function is fired*) let try_and_go o f s m = if ContextObjectSet.mem o s then (s,m) else f (ContextObjectSet.add o s) m in let identity2 s m = (s,m) in (* Goes recursively into the term to see if it depends on assumptions. The 3 important cases are : - Const _ where we need to first unfold the constant and return the needed assumptions of its body in the environment, - Rel _ which means the term is a variable which has been bound earlier by a Lambda or a Prod (returns [] ), - Var _ which means that the term refers to a section variable or a "Let" definition, in the former it is an assumption of [t], in the latter is must be unfolded like a Const. The other cases are straightforward recursion. Calls to the environment are memoized, thus avoiding to explore the DAG of the environment as if it was a tree (can cause exponential behavior and prevent the algorithm from terminating in reasonable time). [s] is a set of [context_object], representing the object already visited.*) let rec do_constr t s acc = let rec iter t = match kind_of_term t with | Var id -> do_memoize_id id | Meta _ | Evar _ -> assert false | Cast (e1,_,e2) | Prod (_,e1,e2) | Lambda (_,e1,e2) -> (iter e1)**(iter e2) | LetIn (_,e1,e2,e3) -> (iter e1)**(iter e2)**(iter e3) | App (e1, e_array) -> (iter e1)**(iter_array e_array) | Case (_,e1,e2,e_array) -> (iter e1)**(iter e2)**(iter_array e_array) | Fix (_,(_, e1_array, e2_array)) | CoFix (_,(_,e1_array, e2_array)) -> (iter_array e1_array) ** (iter_array e2_array) | Const kn -> do_memoize_kn kn | _ -> identity2 (* closed atomic types + rel *) and iter_array a = Array.fold_right (fun e f -> (iter e)**f) a identity2 in iter t s acc and add_id id s acc = (* a Var can be either a variable, or a "Let" definition.*) match Global.lookup_named id with | (_,None,t) -> (s,ContextObjectMap.add (Variable id) t acc) | (_,Some bdy,_) -> do_constr bdy s acc and do_memoize_id id = try_and_go (Variable id) (add_id id) and add_kn kn s acc = let cb = lookup_constant kn in let do_type cst = let ctype = match cb.Declarations.const_type with | PolymorphicArity (ctx,a) -> mkArity (ctx, Type a.poly_level) | NonPolymorphicType t -> t in (s,ContextObjectMap.add cst ctype acc) in let (s,acc) = if add_opaque && Declarations.constant_has_body cb && (Declarations.is_opaque cb || not (Cpred.mem kn knst)) then do_type (Opaque kn) else (s,acc) in match Declarations.body_of_constant cb with | None -> do_type (Axiom kn) | Some body -> do_constr (Declarations.force body) s acc and do_memoize_kn kn = try_and_go (Axiom kn) (add_kn kn) in fun t -> snd (do_constr t (ContextObjectSet.empty) (ContextObjectMap.empty)) coq-8.4pl2/library/libnames.ml0000640000175000001440000002424012010532755015421 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ -> false let isConstRef = function ConstRef _ -> true | _ -> false let isIndRef = function IndRef _ -> true | _ -> false let isConstructRef = function ConstructRef _ -> true | _ -> false let eq_gr gr1 gr2 = match gr1,gr2 with | ConstRef con1, ConstRef con2 -> eq_constant con1 con2 | IndRef kn1,IndRef kn2 -> eq_ind kn1 kn2 | ConstructRef kn1,ConstructRef kn2 -> eq_constructor kn1 kn2 | _,_ -> gr1=gr2 let destVarRef = function VarRef ind -> ind | _ -> failwith "destVarRef" let destConstRef = function ConstRef ind -> ind | _ -> failwith "destConstRef" let destIndRef = function IndRef ind -> ind | _ -> failwith "destIndRef" let destConstructRef = function ConstructRef ind -> ind | _ -> failwith "destConstructRef" let subst_constructor subst ((kn,i),j as ref) = let kn' = subst_ind subst kn in if kn==kn' then ref, mkConstruct ref else ((kn',i),j), mkConstruct ((kn',i),j) let subst_global subst ref = match ref with | VarRef var -> ref, mkVar var | ConstRef kn -> let kn',t = subst_con subst kn in if kn==kn' then ref, mkConst kn else ConstRef kn', t | IndRef (kn,i) -> let kn' = subst_ind subst kn in if kn==kn' then ref, mkInd (kn,i) else IndRef(kn',i), mkInd (kn',i) | ConstructRef ((kn,i),j as c) -> let c',t = subst_constructor subst c in if c'==c then ref,t else ConstructRef c', t let canonical_gr = function | ConstRef con -> ConstRef(constant_of_kn(canonical_con con)) | IndRef (kn,i) -> IndRef(mind_of_kn(canonical_mind kn),i) | ConstructRef ((kn,i),j )-> ConstructRef((mind_of_kn(canonical_mind kn),i),j) | VarRef id -> VarRef id let global_of_constr c = match kind_of_term c with | Const sp -> ConstRef sp | Ind ind_sp -> IndRef ind_sp | Construct cstr_cp -> ConstructRef cstr_cp | Var id -> VarRef id | _ -> raise Not_found let constr_of_global = function | VarRef id -> mkVar id | ConstRef sp -> mkConst sp | ConstructRef sp -> mkConstruct sp | IndRef sp -> mkInd sp let constr_of_reference = constr_of_global let reference_of_constr = global_of_constr let global_ord_gen fc fmi x y = let ind_ord (indx,ix) (indy,iy) = let c = Pervasives.compare ix iy in if c = 0 then kn_ord (fmi indx) (fmi indy) else c in match x, y with | ConstRef cx, ConstRef cy -> kn_ord (fc cx) (fc cy) | IndRef indx, IndRef indy -> ind_ord indx indy | ConstructRef (indx,jx), ConstructRef (indy,jy) -> let c = Pervasives.compare jx jy in if c = 0 then ind_ord indx indy else c | _, _ -> Pervasives.compare x y let global_ord_can = global_ord_gen canonical_con canonical_mind let global_ord_user = global_ord_gen user_con user_mind (* By default, [global_reference] are ordered on their canonical part *) module RefOrdered = struct type t = global_reference let compare = global_ord_can end module RefOrdered_env = struct type t = global_reference let compare = global_ord_user end module Refset = Set.Make(RefOrdered) module Refmap = Map.Make(RefOrdered) (* Extended global references *) type syndef_name = kernel_name type extended_global_reference = | TrueGlobal of global_reference | SynDef of syndef_name (* We order [extended_global_reference] via their user part (cf. pretty printer) *) module ExtRefOrdered = struct type t = extended_global_reference let compare x y = match x, y with | TrueGlobal rx, TrueGlobal ry -> global_ord_user rx ry | SynDef knx, SynDef kny -> kn_ord knx kny | _, _ -> Pervasives.compare x y end (**********************************************) let pr_dirpath sl = (str (string_of_dirpath sl)) (*s Operations on dirpaths *) (* Pop the last n module idents *) let pop_dirpath_n n dir = make_dirpath (list_skipn n (repr_dirpath dir)) let pop_dirpath p = match repr_dirpath p with | [] -> anomaly "dirpath_prefix: empty dirpath" | _::l -> make_dirpath l let is_dirpath_prefix_of d1 d2 = list_prefix_of (List.rev (repr_dirpath d1)) (List.rev (repr_dirpath d2)) let chop_dirpath n d = let d1,d2 = list_chop n (List.rev (repr_dirpath d)) in make_dirpath (List.rev d1), make_dirpath (List.rev d2) let drop_dirpath_prefix d1 d2 = let d = Util.list_drop_prefix (List.rev (repr_dirpath d1)) (List.rev (repr_dirpath d2)) in make_dirpath (List.rev d) let append_dirpath d1 d2 = make_dirpath (repr_dirpath d2 @ repr_dirpath d1) (* To know how qualified a name should be to be understood in the current env*) let add_dirpath_prefix id d = make_dirpath (repr_dirpath d @ [id]) let split_dirpath d = let l = repr_dirpath d in (make_dirpath (List.tl l), List.hd l) let add_dirpath_suffix p id = make_dirpath (id :: repr_dirpath p) (* parsing *) let parse_dir s = let len = String.length s in let rec decoupe_dirs dirs n = if n = len && n > 0 then error (s ^ " is an invalid path."); if n >= len then dirs else let pos = try String.index_from s n '.' with Not_found -> len in if pos = n then error (s ^ " is an invalid path."); let dir = String.sub s n (pos-n) in decoupe_dirs ((id_of_string dir)::dirs) (pos+1) in decoupe_dirs [] 0 let dirpath_of_string s = make_dirpath (if s = "" then [] else parse_dir s) let string_of_dirpath = Names.string_of_dirpath module Dirset = Set.Make(struct type t = dir_path let compare = compare end) module Dirmap = Map.Make(struct type t = dir_path let compare = compare end) (*s Section paths are absolute names *) type full_path = { dirpath : dir_path ; basename : identifier } let make_path pa id = { dirpath = pa; basename = id } let repr_path { dirpath = pa; basename = id } = (pa,id) (* parsing and printing of section paths *) let string_of_path sp = let (sl,id) = repr_path sp in if repr_dirpath sl = [] then string_of_id id else (string_of_dirpath sl) ^ "." ^ (string_of_id id) let sp_ord sp1 sp2 = let (p1,id1) = repr_path sp1 and (p2,id2) = repr_path sp2 in let p_bit = compare p1 p2 in if p_bit = 0 then id_ord id1 id2 else p_bit module SpOrdered = struct type t = full_path let compare = sp_ord end module Spmap = Map.Make(SpOrdered) let dirpath sp = let (p,_) = repr_path sp in p let basename sp = let (_,id) = repr_path sp in id let path_of_string s = try let dir, id = split_dirpath (dirpath_of_string s) in make_path dir id with | Invalid_argument _ -> invalid_arg "path_of_string" let pr_path sp = str (string_of_path sp) let restrict_path n sp = let dir, s = repr_path sp in let dir' = list_firstn n (repr_dirpath dir) in make_path (make_dirpath dir') s let encode_mind dir id = make_mind (MPfile dir) empty_dirpath (label_of_id id) let encode_con dir id = make_con (MPfile dir) empty_dirpath (label_of_id id) let decode_mind kn = let rec dir_of_mp = function | MPfile dir -> repr_dirpath dir | MPbound mbid -> let _,_,dp = repr_mbid mbid in let id = id_of_mbid mbid in id::(repr_dirpath dp) | MPdot(mp,l) -> (id_of_label l)::(dir_of_mp mp) in let mp,sec_dir,l = repr_mind kn in if (repr_dirpath sec_dir) = [] then (make_dirpath (dir_of_mp mp)),id_of_label l else anomaly "Section part should be empty!" let decode_con kn = let mp,sec_dir,l = repr_con kn in match mp,(repr_dirpath sec_dir) with MPfile dir,[] -> (dir,id_of_label l) | _ , [] -> anomaly "MPfile expected!" | _ -> anomaly "Section part should be empty!" (*s qualified names *) type qualid = full_path let make_qualid = make_path let repr_qualid = repr_path let string_of_qualid = string_of_path let pr_qualid = pr_path let qualid_of_string = path_of_string let qualid_of_path sp = sp let qualid_of_ident id = make_qualid empty_dirpath id let qualid_of_dirpath dir = let (l,a) = split_dirpath dir in make_qualid l a type object_name = full_path * kernel_name type object_prefix = dir_path * (module_path * dir_path) let make_oname (dirpath,(mp,dir)) id = make_path dirpath id, make_kn mp dir (label_of_id id) (* to this type are mapped dir_path's in the nametab *) type global_dir_reference = | DirOpenModule of object_prefix | DirOpenModtype of object_prefix | DirOpenSection of object_prefix | DirModule of object_prefix | DirClosedSection of dir_path (* this won't last long I hope! *) (* | ModRef mp -> let mp' = subst_modpath subst mp in if mp==mp' then ref else ModRef mp' | ModTypeRef kn -> let kn' = subst_kernel_name subst kn in if kn==kn' then ref else ModTypeRef kn' *) type reference = | Qualid of qualid located | Ident of identifier located let qualid_of_reference = function | Qualid (loc,qid) -> loc, qid | Ident (loc,id) -> loc, qualid_of_ident id let string_of_reference = function | Qualid (loc,qid) -> string_of_qualid qid | Ident (loc,id) -> string_of_id id let pr_reference = function | Qualid (_,qid) -> pr_qualid qid | Ident (_,id) -> pr_id id let loc_of_reference = function | Qualid (loc,qid) -> loc | Ident (loc,id) -> loc (* popping one level of section in global names *) let pop_con con = let (mp,dir,l) = repr_con con in Names.make_con mp (pop_dirpath dir) l let pop_kn kn = let (mp,dir,l) = repr_mind kn in Names.make_mind mp (pop_dirpath dir) l let pop_global_reference = function | ConstRef con -> ConstRef (pop_con con) | IndRef (kn,i) -> IndRef (pop_kn kn,i) | ConstructRef ((kn,i),j) -> ConstructRef ((pop_kn kn,i),j) | VarRef id -> anomaly "VarRef not poppable" (* Deprecated synonyms *) let make_short_qualid = qualid_of_ident let qualid_of_sp = qualid_of_path coq-8.4pl2/library/heads.mli0000640000175000001440000000210712010532755015062 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit (** [is_rigid] tells if some term is known to ultimately reduce to a term with a rigid head symbol *) val is_rigid : env -> constr -> bool coq-8.4pl2/library/assumptions.mli0000640000175000001440000000237212010532755016367 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* transparent_state -> constr -> Term.types ContextObjectMap.t coq-8.4pl2/library/decl_kinds.ml0000640000175000001440000000636412010532755015735 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* IsDefinition d | Proof s -> IsProof s let string_of_theorem_kind = function | Theorem -> "Theorem" | Lemma -> "Lemma" | Fact -> "Fact" | Remark -> "Remark" | Property -> "Property" | Proposition -> "Proposition" | Corollary -> "Corollary" let string_of_definition_kind def = match def with | Local, Coercion -> "Coercion Local" | Global, Coercion -> "Coercion" | Local, Definition -> "Let" | Global, Definition -> "Definition" | Local, SubClass -> "Local SubClass" | Global, SubClass -> "SubClass" | Global, CanonicalStructure -> "Canonical Structure" | Global, Example -> "Example" | Local, (CanonicalStructure|Example) -> anomaly "Unsupported local definition kind" | Local, Instance -> "Instance" | Global, Instance -> "Global Instance" | _, (StructureComponent|Scheme|CoFixpoint|Fixpoint|IdentityCoercion|Method) -> anomaly "Internal definition kind" (* Strength *) let strength_of_global = function | VarRef _ -> Local | IndRef _ | ConstructRef _ | ConstRef _ -> Global let string_of_strength = function | Local -> "Local" | Global -> "Global" (* Recursive power *) (* spiwack: this definition might be of use in the kernel, for now I do not push them deeper than needed, though. *) type recursivity_kind = | Finite (* = inductive *) | CoFinite (* = coinductive *) | BiFinite (* = non-recursive, like in "Record" definitions *) (* helper, converts to "finiteness flag" booleans *) let recursivity_flag_of_kind = function | Finite | BiFinite -> true | CoFinite -> false coq-8.4pl2/library/declare.ml0000640000175000001440000002560312010532755015232 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ()) let xml_declare_constant = ref (fun (sp:internal_flag * constant)-> ()) let xml_declare_inductive = ref (fun (sp:internal_flag * object_name) -> ()) let if_xml f x = if !Flags.xml_export then f x else () let set_xml_declare_variable f = xml_declare_variable := if_xml f let set_xml_declare_constant f = xml_declare_constant := if_xml f let set_xml_declare_inductive f = xml_declare_inductive := if_xml f let cache_hook = ref ignore let add_cache_hook f = cache_hook := f (** Declaration of section variables and local definitions *) type section_variable_entry = | SectionLocalDef of constr * types option * bool (* opacity *) | SectionLocalAssum of types * bool (* Implicit status *) type variable_declaration = dir_path * section_variable_entry * logical_kind let cache_variable ((sp,_),o) = match o with | Inl cst -> Global.add_constraints cst | Inr (id,(p,d,mk)) -> (* Constr raisonne sur les noms courts *) if variable_exists id then alreadydeclared (pr_id id ++ str " already exists"); let impl,opaq,cst = match d with (* Fails if not well-typed *) | SectionLocalAssum (ty, impl) -> let cst = Global.push_named_assum (id,ty) in let impl = if impl then Lib.Implicit else Lib.Explicit in impl, true, cst | SectionLocalDef (c,t,opaq) -> let cst = Global.push_named_def (id,c,t) in Lib.Explicit, opaq, cst in Nametab.push (Nametab.Until 1) (restrict_path 0 sp) (VarRef id); add_section_variable id impl; Dischargedhypsmap.set_discharged_hyps sp []; add_variable_data id (p,opaq,cst,mk) let discharge_variable (_,o) = match o with | Inr (id,_) -> Some (Inl (variable_constraints id)) | Inl _ -> Some o type variable_obj = (Univ.constraints, identifier * variable_declaration) union let inVariable : variable_obj -> obj = declare_object { (default_object "VARIABLE") with cache_function = cache_variable; discharge_function = discharge_variable; classify_function = (fun _ -> Dispose) } (* for initial declaration *) let declare_variable id obj = let oname = add_leaf id (inVariable (Inr (id,obj))) in declare_var_implicits id; Notation.declare_ref_arguments_scope (VarRef id); Heads.declare_head (EvalVarRef id); !xml_declare_variable oname; oname (** Declaration of constants and parameters *) type constant_declaration = constant_entry * logical_kind (* At load-time, the segment starting from the module name to the discharge *) (* section (if Remark or Fact) is needed to access a construction *) let load_constant i ((sp,kn),(_,_,kind)) = if Nametab.exists_cci sp then alreadydeclared (pr_id (basename sp) ++ str " already exists"); let con = Global.constant_of_delta_kn kn in Nametab.push (Nametab.Until i) sp (ConstRef con); add_constant_kind con kind (* Opening means making the name without its module qualification available *) let open_constant i ((sp,kn),_) = let con = Global.constant_of_delta_kn kn in Nametab.push (Nametab.Exactly i) sp (ConstRef con) let exists_name id = variable_exists id or Global.exists_objlabel (label_of_id id) let check_exists sp = let id = basename sp in if exists_name id then alreadydeclared (pr_id id ++ str " already exists") let cache_constant ((sp,kn),(cdt,dhyps,kind)) = let id = basename sp in let _,dir,_ = repr_kn kn in check_exists sp; let kn' = Global.add_constant dir id cdt in assert (kn' = constant_of_kn kn); Nametab.push (Nametab.Until 1) sp (ConstRef (constant_of_kn kn)); add_section_constant kn' (Global.lookup_constant kn').const_hyps; Dischargedhypsmap.set_discharged_hyps sp dhyps; add_constant_kind (constant_of_kn kn) kind; !cache_hook sp let discharged_hyps kn sechyps = let (_,dir,_) = repr_kn kn in let args = Array.to_list (instance_from_variable_context sechyps) in List.rev (List.map (Libnames.make_path dir) args) let discharge_constant ((sp,kn),(cdt,dhyps,kind)) = let con = constant_of_kn kn in let cb = Global.lookup_constant con in let repl = replacement_context () in let sechyps = section_segment_of_constant con in let recipe = { d_from=cb; d_modlist=repl; d_abstract=named_of_variable_context sechyps } in Some (GlobalRecipe recipe,(discharged_hyps kn sechyps)@dhyps,kind) (* Hack to reduce the size of .vo: we keep only what load/open needs *) let dummy_constant_entry = ConstantEntry (ParameterEntry (None,mkProp,None)) let dummy_constant (ce,_,mk) = dummy_constant_entry,[],mk let classify_constant cst = Substitute (dummy_constant cst) type constant_obj = global_declaration * Dischargedhypsmap.discharged_hyps * logical_kind let inConstant : constant_obj -> obj = declare_object { (default_object "CONSTANT") with cache_function = cache_constant; load_function = load_constant; open_function = open_constant; classify_function = classify_constant; subst_function = ident_subst_function; discharge_function = discharge_constant } let declare_constant_common id dhyps (cd,kind) = let (sp,kn) = add_leaf id (inConstant (cd,dhyps,kind)) in let c = Global.constant_of_delta_kn kn in declare_constant_implicits c; Heads.declare_head (EvalConstRef c); Notation.declare_ref_arguments_scope (ConstRef c); c let declare_constant ?(internal = UserVerbose) id (cd,kind) = let kn = declare_constant_common id [] (ConstantEntry cd,kind) in !xml_declare_constant (internal,kn); kn (** Declaration of inductive blocks *) let declare_inductive_argument_scopes kn mie = list_iter_i (fun i {mind_entry_consnames=lc} -> Notation.declare_ref_arguments_scope (IndRef (kn,i)); for j=1 to List.length lc do Notation.declare_ref_arguments_scope (ConstructRef ((kn,i),j)); done) mie.mind_entry_inds let inductive_names sp kn mie = let (dp,_) = repr_path sp in let kn = Global.mind_of_delta_kn kn in let names, _ = List.fold_left (fun (names, n) ind -> let ind_p = (kn,n) in let names, _ = List.fold_left (fun (names, p) l -> let sp = Libnames.make_path dp l in ((sp, ConstructRef (ind_p,p)) :: names, p+1)) (names, 1) ind.mind_entry_consnames in let sp = Libnames.make_path dp ind.mind_entry_typename in ((sp, IndRef ind_p) :: names, n+1)) ([], 0) mie.mind_entry_inds in names let load_inductive i ((sp,kn),(_,mie)) = let names = inductive_names sp kn mie in List.iter (fun (sp, ref) -> Nametab.push (Nametab.Until i) sp ref ) names let open_inductive i ((sp,kn),(_,mie)) = let names = inductive_names sp kn mie in List.iter (fun (sp, ref) -> Nametab.push (Nametab.Exactly i) sp ref) names let cache_inductive ((sp,kn),(dhyps,mie)) = let names = inductive_names sp kn mie in List.iter check_exists (List.map fst names); let id = basename sp in let _,dir,_ = repr_kn kn in let kn' = Global.add_mind dir id mie in assert (kn'= mind_of_kn kn); add_section_kn kn' (Global.lookup_mind kn').mind_hyps; Dischargedhypsmap.set_discharged_hyps sp dhyps; List.iter (fun (sp, ref) -> Nametab.push (Nametab.Until 1) sp ref) names; List.iter (fun (sp,_) -> !cache_hook sp) (inductive_names sp kn mie) let discharge_inductive ((sp,kn),(dhyps,mie)) = let mind = Global.mind_of_delta_kn kn in let mie = Global.lookup_mind mind in let repl = replacement_context () in let sechyps = section_segment_of_mutual_inductive mind in Some (discharged_hyps kn sechyps, Discharge.process_inductive (named_of_variable_context sechyps) repl mie) let dummy_one_inductive_entry mie = { mind_entry_typename = mie.mind_entry_typename; mind_entry_arity = mkProp; mind_entry_consnames = mie.mind_entry_consnames; mind_entry_lc = [] } (* Hack to reduce the size of .vo: we keep only what load/open needs *) let dummy_inductive_entry (_,m) = ([],{ mind_entry_params = []; mind_entry_record = false; mind_entry_finite = true; mind_entry_inds = List.map dummy_one_inductive_entry m.mind_entry_inds }) type inductive_obj = Dischargedhypsmap.discharged_hyps * mutual_inductive_entry let inInductive : inductive_obj -> obj = declare_object {(default_object "INDUCTIVE") with cache_function = cache_inductive; load_function = load_inductive; open_function = open_inductive; classify_function = (fun a -> Substitute (dummy_inductive_entry a)); subst_function = ident_subst_function; discharge_function = discharge_inductive } (* for initial declaration *) let declare_mind isrecord mie = let id = match mie.mind_entry_inds with | ind::_ -> ind.mind_entry_typename | [] -> anomaly "cannot declare an empty list of inductives" in let (sp,kn as oname) = add_leaf id (inInductive ([],mie)) in let mind = Global.mind_of_delta_kn kn in declare_mib_implicits mind; declare_inductive_argument_scopes mind mie; !xml_declare_inductive (isrecord,oname); oname (* Declaration messages *) let pr_rank i = str (ordinal (i+1)) let fixpoint_message indexes l = Flags.if_verbose msgnl (match l with | [] -> anomaly "no recursive definition" | [id] -> pr_id id ++ str " is recursively defined" ++ (match indexes with | Some [|i|] -> str " (decreasing on "++pr_rank i++str " argument)" | _ -> mt ()) | l -> hov 0 (prlist_with_sep pr_comma pr_id l ++ spc () ++ str "are recursively defined" ++ match indexes with | Some a -> spc () ++ str "(decreasing respectively on " ++ prlist_with_sep pr_comma pr_rank (Array.to_list a) ++ str " arguments)" | None -> mt ())) let cofixpoint_message l = Flags.if_verbose msgnl (match l with | [] -> anomaly "No corecursive definition." | [id] -> pr_id id ++ str " is corecursively defined" | l -> hov 0 (prlist_with_sep pr_comma pr_id l ++ spc () ++ str "are corecursively defined")) let recursive_message isfix i l = (if isfix then fixpoint_message i else cofixpoint_message) l let definition_message id = Flags.if_verbose msgnl (pr_id id ++ str " is defined") let assumption_message id = Flags.if_verbose msgnl (pr_id id ++ str " is assumed") coq-8.4pl2/library/dischargedhypsmap.ml0000640000175000001440000000245012010532755017325 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [] (*s Registration as global tables and rollback. *) let init () = discharged_hyps_map := Spmap.empty let freeze () = !discharged_hyps_map let unfreeze dhm = discharged_hyps_map := dhm let _ = Summary.declare_summary "discharged_hypothesis" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } coq-8.4pl2/library/global.ml0000640000175000001440000001201212010532755015061 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !global_env); unfreeze_function = (fun fr -> global_env := fr); init_function = (fun () -> global_env := empty_environment) } (* Then we export the functions of [Typing] on that environment. *) let universes () = universes (env()) let named_context () = named_context (env()) let named_context_val () = named_context_val (env()) let push_named_assum a = let (cst,env) = push_named_assum a !global_env in global_env := env; cst let push_named_def d = let (cst,env) = push_named_def d !global_env in global_env := env; cst let add_thing add dir id thing = let kn, newenv = add dir (label_of_id id) thing !global_env in global_env := newenv; kn let add_constant = add_thing add_constant let add_mind = add_thing add_mind let add_modtype x y inl = add_thing (fun _ x y -> add_modtype x y inl) () x y let add_module id me inl = let l = label_of_id id in let mp,resolve,new_env = add_module l me inl !global_env in global_env := new_env; mp,resolve let add_constraints c = global_env := add_constraints c !global_env let set_engagement c = global_env := set_engagement c !global_env let add_include me is_module inl = let resolve,newenv = add_include me is_module inl !global_env in global_env := newenv; resolve let start_module id = let l = label_of_id id in let mp,newenv = start_module l !global_env in global_env := newenv; mp let end_module fs id mtyo = let l = label_of_id id in let mp,resolve,newenv = end_module l mtyo !global_env in Summary.unfreeze_summaries fs; global_env := newenv; mp,resolve let add_module_parameter mbid mte inl = let resolve,newenv = add_module_parameter mbid mte inl !global_env in global_env := newenv; resolve let start_modtype id = let l = label_of_id id in let mp,newenv = start_modtype l !global_env in global_env := newenv; mp let end_modtype fs id = let l = label_of_id id in let kn,newenv = end_modtype l !global_env in Summary.unfreeze_summaries fs; global_env := newenv; kn let pack_module () = pack_module !global_env let lookup_named id = lookup_named id (env()) let lookup_constant kn = lookup_constant kn (env()) let lookup_inductive ind = Inductive.lookup_mind_specif (env()) ind let lookup_mind kn = lookup_mind kn (env()) let lookup_module mp = lookup_module mp (env()) let lookup_modtype kn = lookup_modtype kn (env()) let constant_of_delta_kn kn = let resolver,resolver_param = (delta_of_senv !global_env) in (* TODO : are resolver and resolver_param orthogonal ? the effect of resolver is lost if resolver_param isn't trivial at that spot. *) Mod_subst.constant_of_delta resolver_param (Mod_subst.constant_of_delta_kn resolver kn) let mind_of_delta_kn kn = let resolver,resolver_param = (delta_of_senv !global_env) in (* TODO idem *) Mod_subst.mind_of_delta resolver_param (Mod_subst.mind_of_delta_kn resolver kn) let exists_objlabel id = exists_objlabel id !global_env let start_library dir = let mp,newenv = start_library dir !global_env in global_env := newenv; mp let export s = export !global_env s let import cenv digest = let mp,newenv = import cenv digest !global_env in global_env := newenv; mp (*s Function to get an environment from the constants part of the global environment and a given context. *) let env_of_context hyps = reset_with_named_context hyps (env()) open Libnames let type_of_reference env = function | VarRef id -> Environ.named_type id env | ConstRef c -> Typeops.type_of_constant env c | IndRef ind -> let specif = Inductive.lookup_mind_specif env ind in Inductive.type_of_inductive env specif | ConstructRef cstr -> let specif = Inductive.lookup_mind_specif env (inductive_of_constructor cstr) in Inductive.type_of_constructor cstr specif let type_of_global t = type_of_reference (env ()) t (* spiwack: register/unregister functions for retroknowledge *) let register field value by_clause = let entry = kind_of_term value in let senv = Safe_typing.register !global_env field entry by_clause in global_env := senv coq-8.4pl2/library/impargs.ml0000640000175000001440000006075712121620060015274 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* begin implicit_args := oflags; raise reraise end let set_maximality imps b = (* Force maximal insertion on ending implicits (compatibility) *) b || List.for_all ((<>) None) imps (*s Computation of implicit arguments *) (* We remember various information about why an argument is inferable as implicit - [DepRigid] means that the implicit argument can be found by unification along a rigid path (we do not print the arguments of this kind if there is enough arguments to infer them) - [DepFlex] means that the implicit argument can be found by unification along a collapsable path only (e.g. as x in (P x) where P is another argument) (we do (defensively) print the arguments of this kind) - [DepFlexAndRigid] means that the least argument from which the implicit argument can be inferred is following a collapsable path but there is a greater argument from where the implicit argument is inferable following a rigid path (useful to know how to print a partial application) - [Manual] means the argument has been explicitely set as implicit. We also consider arguments inferable from the conclusion but it is operational only if [conclusion_matters] is true. *) type argument_position = | Conclusion | Hyp of int type implicit_explanation = | DepRigid of argument_position | DepFlex of argument_position | DepFlexAndRigid of (*flex*) argument_position * (*rig*) argument_position | Manual let argument_less = function | Hyp n, Hyp n' -> n true | Conclusion, _ -> false let update pos rig (na,st) = let e = if rig then match st with | None -> DepRigid pos | Some (DepRigid n as x) -> if argument_less (pos,n) then DepRigid pos else x | Some (DepFlexAndRigid (fpos,rpos) as x) -> if argument_less (pos,fpos) or pos=fpos then DepRigid pos else if argument_less (pos,rpos) then DepFlexAndRigid (fpos,pos) else x | Some (DepFlex fpos) -> if argument_less (pos,fpos) or pos=fpos then DepRigid pos else DepFlexAndRigid (fpos,pos) | Some Manual -> assert false else match st with | None -> DepFlex pos | Some (DepRigid rpos as x) -> if argument_less (pos,rpos) then DepFlexAndRigid (pos,rpos) else x | Some (DepFlexAndRigid (fpos,rpos) as x) -> if argument_less (pos,fpos) then DepFlexAndRigid (pos,rpos) else x | Some (DepFlex fpos as x) -> if argument_less (pos,fpos) then DepFlex pos else x | Some Manual -> assert false in na, Some e (* modified is_rigid_reference with a truncated env *) let is_flexible_reference env bound depth f = match kind_of_term f with | Rel n when n >= bound+depth -> (* inductive type *) false | Rel n when n >= depth -> (* previous argument *) true | Rel n -> (* since local definitions have been expanded *) false | Const kn -> let cb = Environ.lookup_constant kn env in (match cb.const_body with Def _ -> true | _ -> false) | Var id -> let (_,value,_) = Environ.lookup_named id env in value <> None | Ind _ | Construct _ -> false | _ -> true let push_lift d (e,n) = (push_rel d e,n+1) let is_reversible_pattern bound depth f l = isRel f & let n = destRel f in (n < bound+depth) & (n >= depth) & array_for_all (fun c -> isRel c & destRel c < depth) l & array_distinct l (* Precondition: rels in env are for inductive types only *) let add_free_rels_until strict strongly_strict revpat bound env m pos acc = let rec frec rig (env,depth as ed) c = let hd = if strict then whd_betadeltaiota env c else c in let c = if strongly_strict then hd else c in match kind_of_term hd with | Rel n when (n < bound+depth) & (n >= depth) -> let i = bound + depth - n - 1 in acc.(i) <- update pos rig acc.(i) | App (f,l) when revpat & is_reversible_pattern bound depth f l -> let i = bound + depth - destRel f - 1 in acc.(i) <- update pos rig acc.(i) | App (f,_) when rig & is_flexible_reference env bound depth f -> if strict then () else iter_constr_with_full_binders push_lift (frec false) ed c | Case _ when rig -> if strict then () else iter_constr_with_full_binders push_lift (frec false) ed c | Evar _ -> () | _ -> iter_constr_with_full_binders push_lift (frec rig) ed c in frec true (env,1) m; acc let rec is_rigid_head t = match kind_of_term t with | Rel _ | Evar _ -> false | Ind _ | Const _ | Var _ | Sort _ -> true | Case (_,_,f,_) -> is_rigid_head f | App (f,args) -> (match kind_of_term f with | Fix ((fi,i),_) -> is_rigid_head (args.(fi.(i))) | _ -> is_rigid_head f) | Lambda _ | LetIn _ | Construct _ | CoFix _ | Fix _ | Prod _ | Meta _ | Cast _ -> assert false (* calcule la liste des arguments implicites *) let find_displayed_name_in all avoid na (_,b as envnames_b) = let flag = RenamingElsewhereFor envnames_b in if all then compute_and_force_displayed_name_in flag avoid na b else compute_displayed_name_in flag avoid na b let compute_implicits_gen strict strongly_strict revpat contextual all env t = let rigid = ref true in let rec aux env avoid n names t = let t = whd_betadeltaiota env t in match kind_of_term t with | Prod (na,a,b) -> let na',avoid' = find_displayed_name_in all avoid na (names,b) in add_free_rels_until strict strongly_strict revpat n env a (Hyp (n+1)) (aux (push_rel (na',None,a) env) avoid' (n+1) (na'::names) b) | _ -> rigid := is_rigid_head t; let names = List.rev names in let v = Array.map (fun na -> na,None) (Array.of_list names) in if contextual then add_free_rels_until strict strongly_strict revpat n env t Conclusion v else v in match kind_of_term (whd_betadeltaiota env t) with | Prod (na,a,b) -> let na',avoid = find_displayed_name_in all [] na ([],b) in let v = aux (push_rel (na',None,a) env) avoid 1 [na'] b in !rigid, Array.to_list v | _ -> true, [] let compute_implicits_flags env f all t = compute_implicits_gen (f.strict or f.strongly_strict) f.strongly_strict f.reversible_pattern f.contextual all env t let compute_auto_implicits env flags enriching t = if enriching then compute_implicits_flags env flags true t else compute_implicits_gen false false false true true env t let compute_implicits_names env t = let _, impls = compute_implicits_gen false false false false true env t in List.map fst impls (* Extra information about implicit arguments *) type maximal_insertion = bool (* true = maximal contextual insertion *) type force_inference = bool (* true = always infer, never turn into evar/subgoal *) type implicit_status = (* None = Not implicit *) (identifier * implicit_explanation * (maximal_insertion * force_inference)) option type implicit_side_condition = DefaultImpArgs | LessArgsThan of int type implicits_list = implicit_side_condition * implicit_status list let is_status_implicit = function | None -> false | _ -> true let name_of_implicit = function | None -> anomaly "Not an implicit argument" | Some (id,_,_) -> id let maximal_insertion_of = function | Some (_,_,(b,_)) -> b | None -> anomaly "Not an implicit argument" let force_inference_of = function | Some (_, _, (_, b)) -> b | None -> anomaly "Not an implicit argument" (* [in_ctx] means we know the expected type, [n] is the index of the argument *) let is_inferable_implicit in_ctx n = function | None -> false | Some (_,DepRigid (Hyp p),_) -> in_ctx or n >= p | Some (_,DepFlex (Hyp p),_) -> false | Some (_,DepFlexAndRigid (_,Hyp q),_) -> in_ctx or n >= q | Some (_,DepRigid Conclusion,_) -> in_ctx | Some (_,DepFlex Conclusion,_) -> false | Some (_,DepFlexAndRigid (_,Conclusion),_) -> in_ctx | Some (_,Manual,_) -> true let positions_of_implicits (_,impls) = let rec aux n = function [] -> [] | Some _ :: l -> n :: aux (n+1) l | None :: l -> aux (n+1) l in aux 1 impls (* Manage user-given implicit arguments *) let rec prepare_implicits f = function | [] -> [] | (Anonymous, Some _)::_ -> anomaly "Unnamed implicit" | (Name id, Some imp)::imps -> let imps' = prepare_implicits f imps in Some (id,imp,(set_maximality imps' f.maximal,true)) :: imps' | _::imps -> None :: prepare_implicits f imps let set_implicit id imp insmax = (id,(match imp with None -> Manual | Some imp -> imp),insmax) let rec assoc_by_pos k = function (ExplByPos (k', x), b) :: tl when k = k' -> (x,b), tl | hd :: tl -> let (x, tl) = assoc_by_pos k tl in x, hd :: tl | [] -> raise Not_found let check_correct_manual_implicits autoimps l = List.iter (function | ExplByName id,(b,fi,forced) -> if not forced then error ("Wrong or non-dependent implicit argument name: "^(string_of_id id)^".") | ExplByPos (i,_id),_t -> if i<1 or i>List.length autoimps then error ("Bad implicit argument number: "^(string_of_int i)^".") else errorlabstrm "" (str "Cannot set implicit argument number " ++ int i ++ str ": it has no name.")) l let set_manual_implicits env flags enriching autoimps l = let try_forced k l = try let (id, (b, fi, fo)), l' = assoc_by_pos k l in if fo then let id = match id with Some id -> id | None -> id_of_string ("arg_" ^ string_of_int k) in l', Some (id,Manual,(b,fi)) else l, None with Not_found -> l, None in if not (list_distinct l) then error ("Some parameters are referred more than once."); (* Compare with automatic implicits to recover printing data and names *) let rec merge k l = function | (Name id,imp)::imps -> let l',imp,m = try let (b, fi, fo) = List.assoc (ExplByName id) l in List.remove_assoc (ExplByName id) l, (Some Manual), (Some (b, fi)) with Not_found -> try let (id, (b, fi, fo)), l' = assoc_by_pos k l in l', (Some Manual), (Some (b,fi)) with Not_found -> l,imp, if enriching && imp <> None then Some (flags.maximal,true) else None in let imps' = merge (k+1) l' imps in let m = Option.map (fun (b,f) -> set_maximality imps' b, f) m in Option.map (set_implicit id imp) m :: imps' | (Anonymous,imp)::imps -> let l', forced = try_forced k l in forced :: merge (k+1) l' imps | [] when l = [] -> [] | [] -> check_correct_manual_implicits autoimps l; [] in merge 1 l autoimps let compute_semi_auto_implicits env f manual t = match manual with | [] -> if not f.auto then [DefaultImpArgs, []] else let _,l = compute_implicits_flags env f false t in [DefaultImpArgs, prepare_implicits f l] | _ -> let _,autoimpls = compute_auto_implicits env f f.auto t in [DefaultImpArgs, set_manual_implicits env f f.auto autoimpls manual] let compute_implicits env t = compute_semi_auto_implicits env !implicit_args [] t (*s Constants. *) let compute_constant_implicits flags manual cst = let env = Global.env () in compute_semi_auto_implicits env flags manual (Typeops.type_of_constant env cst) (*s Inductives and constructors. Their implicit arguments are stored in an array, indexed by the inductive number, of pairs $(i,v)$ where $i$ are the implicit arguments of the inductive and $v$ the array of implicit arguments of the constructors. *) let compute_mib_implicits flags manual kn = let env = Global.env () in let mib = lookup_mind kn env in let ar = Array.to_list (Array.map (* No need to lift, arities contain no de Bruijn *) (fun mip -> (Name mip.mind_typename, None, type_of_inductive env (mib,mip))) mib.mind_packets) in let env_ar = push_rel_context ar env in let imps_one_inductive i mip = let ind = (kn,i) in let ar = type_of_inductive env (mib,mip) in ((IndRef ind,compute_semi_auto_implicits env flags manual ar), Array.mapi (fun j c -> (ConstructRef (ind,j+1),compute_semi_auto_implicits env_ar flags manual c)) mip.mind_nf_lc) in Array.mapi imps_one_inductive mib.mind_packets let compute_all_mib_implicits flags manual kn = let imps = compute_mib_implicits flags manual kn in List.flatten (array_map_to_list (fun (ind,cstrs) -> ind::Array.to_list cstrs) imps) (*s Variables. *) let compute_var_implicits flags manual id = let env = Global.env () in let (_,_,ty) = lookup_named id env in compute_semi_auto_implicits env flags manual ty (* Implicits of a global reference. *) let compute_global_implicits flags manual = function | VarRef id -> compute_var_implicits flags manual id | ConstRef kn -> compute_constant_implicits flags manual kn | IndRef (kn,i) -> let ((_,imps),_) = (compute_mib_implicits flags manual kn).(i) in imps | ConstructRef ((kn,i),j) -> let (_,cimps) = (compute_mib_implicits flags manual kn).(i) in snd cimps.(j-1) (* Merge a manual explicitation with an implicit_status list *) let merge_impls (cond,oldimpls) (_,newimpls) = let oldimpls,usersuffiximpls = list_chop (List.length newimpls) oldimpls in cond, (List.map2 (fun orig ni -> match orig with | Some (_, Manual, _) -> orig | _ -> ni) oldimpls newimpls)@usersuffiximpls (* Caching implicits *) type implicit_interactive_request = | ImplAuto | ImplManual of int type implicit_discharge_request = | ImplLocal | ImplConstant of constant * implicits_flags | ImplMutualInductive of mutual_inductive * implicits_flags | ImplInteractive of global_reference * implicits_flags * implicit_interactive_request let implicits_table = ref Refmap.empty let implicits_of_global ref = try let l = Refmap.find ref !implicits_table in try let rename_l = Arguments_renaming.arguments_names ref in let rename imp name = match imp, name with | Some (_, x,y), Name id -> Some (id, x,y) | _ -> imp in List.map2 (fun (t, il) rl -> t, List.map2 rename il rl) l rename_l with Not_found -> l | Invalid_argument _ -> anomaly "renamings list and implicits list have different lenghts" with Not_found -> [DefaultImpArgs,[]] let cache_implicits_decl (ref,imps) = implicits_table := Refmap.add ref imps !implicits_table let load_implicits _ (_,(_,l)) = List.iter cache_implicits_decl l let cache_implicits o = load_implicits 1 o let subst_implicits_decl subst (r,imps as o) = let r' = fst (subst_global subst r) in if r==r' then o else (r',imps) let subst_implicits (subst,(req,l)) = (ImplLocal,list_smartmap (subst_implicits_decl subst) l) let impls_of_context ctx = List.rev_map (fun (id,impl,_,_) -> if impl = Lib.Implicit then Some (id, Manual, (true,true)) else None) (List.filter (fun (_,_,b,_) -> b = None) ctx) let section_segment_of_reference = function | ConstRef con -> section_segment_of_constant con | IndRef (kn,_) | ConstructRef ((kn,_),_) -> section_segment_of_mutual_inductive kn | _ -> [] let adjust_side_condition p = function | LessArgsThan n -> LessArgsThan (n+p) | DefaultImpArgs -> DefaultImpArgs let add_section_impls vars extra_impls (cond,impls) = let p = List.length vars - List.length extra_impls in adjust_side_condition p cond, extra_impls @ impls let discharge_implicits (_,(req,l)) = match req with | ImplLocal -> None | ImplInteractive (ref,flags,exp) -> (try let vars = section_segment_of_reference ref in let ref' = if isVarRef ref then ref else pop_global_reference ref in let extra_impls = impls_of_context vars in let l' = [ref', List.map (add_section_impls vars extra_impls) (snd (List.hd l))] in Some (ImplInteractive (ref',flags,exp),l') with Not_found -> (* ref not defined in this section *) Some (req,l)) | ImplConstant (con,flags) -> (try let con' = pop_con con in let vars = section_segment_of_constant con in let extra_impls = impls_of_context vars in let l' = [ConstRef con',List.map (add_section_impls vars extra_impls) (snd (List.hd l))] in Some (ImplConstant (con',flags),l') with Not_found -> (* con not defined in this section *) Some (req,l)) | ImplMutualInductive (kn,flags) -> (try let l' = List.map (fun (gr, l) -> let vars = section_segment_of_reference gr in let extra_impls = impls_of_context vars in ((if isVarRef gr then gr else pop_global_reference gr), List.map (add_section_impls vars extra_impls) l)) l in Some (ImplMutualInductive (pop_kn kn,flags),l') with Not_found -> (* ref not defined in this section *) Some (req,l)) let rebuild_implicits (req,l) = match req with | ImplLocal -> assert false | ImplConstant (con,flags) -> let oldimpls = snd (List.hd l) in let newimpls = compute_constant_implicits flags [] con in req, [ConstRef con, List.map2 merge_impls oldimpls newimpls] | ImplMutualInductive (kn,flags) -> let newimpls = compute_all_mib_implicits flags [] kn in let rec aux olds news = match olds, news with | (_, oldimpls) :: old, (gr, newimpls) :: tl -> (gr, List.map2 merge_impls oldimpls newimpls) :: aux old tl | [], [] -> [] | _, _ -> assert false in req, aux l newimpls | ImplInteractive (ref,flags,o) -> (if isVarRef ref && is_in_section ref then ImplLocal else req), match o with | ImplAuto -> let oldimpls = snd (List.hd l) in let newimpls = compute_global_implicits flags [] ref in [ref,List.map2 merge_impls oldimpls newimpls] | ImplManual userimplsize -> let oldimpls = snd (List.hd l) in if flags.auto then let newimpls = List.hd (compute_global_implicits flags [] ref) in let p = List.length (snd newimpls) - userimplsize in let newimpls = on_snd (list_firstn p) newimpls in [ref,List.map (fun o -> merge_impls o newimpls) oldimpls] else [ref,oldimpls] let classify_implicits (req,_ as obj) = if req = ImplLocal then Dispose else Substitute obj type implicits_obj = implicit_discharge_request * (global_reference * implicits_list list) list let inImplicits : implicits_obj -> obj = declare_object {(default_object "IMPLICITS") with cache_function = cache_implicits; load_function = load_implicits; subst_function = subst_implicits; classify_function = classify_implicits; discharge_function = discharge_implicits; rebuild_function = rebuild_implicits } let is_local local ref = local || isVarRef ref && is_in_section ref let declare_implicits_gen req flags ref = let imps = compute_global_implicits flags [] ref in add_anonymous_leaf (inImplicits (req,[ref,imps])) let declare_implicits local ref = let flags = { !implicit_args with auto = true } in let req = if is_local local ref then ImplLocal else ImplInteractive(ref,flags,ImplAuto) in declare_implicits_gen req flags ref let declare_var_implicits id = let flags = !implicit_args in declare_implicits_gen ImplLocal flags (VarRef id) let declare_constant_implicits con = let flags = !implicit_args in declare_implicits_gen (ImplConstant (con,flags)) flags (ConstRef con) let declare_mib_implicits kn = let flags = !implicit_args in let imps = array_map_to_list (fun (ind,cstrs) -> ind::(Array.to_list cstrs)) (compute_mib_implicits flags [] kn) in add_anonymous_leaf (inImplicits (ImplMutualInductive (kn,flags),List.flatten imps)) (* Declare manual implicits *) type manual_explicitation = Topconstr.explicitation * (bool * bool * bool) type manual_implicits = manual_explicitation list let compute_implicits_with_manual env typ enriching l = let _,autoimpls = compute_auto_implicits env !implicit_args enriching typ in set_manual_implicits env !implicit_args enriching autoimpls l let check_inclusion l = (* Check strict inclusion *) let rec aux = function | n1::(n2::_ as nl) -> if n1 <= n2 then error "Sequences of implicit arguments must be of different lengths"; aux nl | _ -> () in aux (List.map (fun (imps,_) -> List.length imps) l) let check_rigidity isrigid = if not isrigid then errorlabstrm "" (strbrk "Multiple sequences of implicit arguments available only for references that cannot be applied to an arbitrarily large number of arguments.") let declare_manual_implicits local ref ?enriching l = let flags = !implicit_args in let env = Global.env () in let t = Global.type_of_global ref in let enriching = Option.default flags.auto enriching in let isrigid,autoimpls = compute_auto_implicits env flags enriching t in let l' = match l with | [] -> assert false | [l] -> [DefaultImpArgs, set_manual_implicits env flags enriching autoimpls l] | _ -> check_rigidity isrigid; let l = List.map (fun imps -> (imps,List.length imps)) l in let l = Sort.list (fun (_,n1) (_,n2) -> n1 > n2) l in check_inclusion l; let nargs = List.length autoimpls in List.map (fun (imps,n) -> (LessArgsThan (nargs-n), set_manual_implicits env flags enriching autoimpls imps)) l in let req = if is_local local ref then ImplLocal else ImplInteractive(ref,flags,ImplManual (List.length autoimpls)) in add_anonymous_leaf (inImplicits (req,[ref,l'])) let maybe_declare_manual_implicits local ref ?enriching l = if l = [] then () else declare_manual_implicits local ref ?enriching [l] let extract_impargs_data impls = let rec aux p = function | (DefaultImpArgs, imps)::_ -> [None,imps] | (LessArgsThan n, imps)::l -> (Some (p,n),imps) :: aux (n+1) l | [] -> [] in aux 0 impls let lift_implicits n = List.map (fun x -> match fst x with ExplByPos (k, id) -> ExplByPos (k + n, id), snd x | _ -> x) let make_implicits_list l = [DefaultImpArgs, l] let rec drop_first_implicits p l = if p = 0 then l else match l with | _,[] as x -> x | DefaultImpArgs,imp::impls -> drop_first_implicits (p-1) (DefaultImpArgs,impls) | LessArgsThan n,imp::impls -> let n = if is_status_implicit imp then n-1 else n in drop_first_implicits (p-1) (LessArgsThan n,impls) let rec select_impargs_size n = function | [] -> [] (* Tolerance for (DefaultImpArgs,[]) *) | [_, impls] | (DefaultImpArgs, impls)::_ -> impls | (LessArgsThan p, impls)::l -> if n <= p then impls else select_impargs_size n l let rec select_stronger_impargs = function | [] -> [] (* Tolerance for (DefaultImpArgs,[]) *) | (_,impls)::_ -> impls (*s Registration as global tables *) let init () = implicits_table := Refmap.empty let freeze () = !implicits_table let unfreeze t = implicits_table := t let _ = Summary.declare_summary "implicits" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } coq-8.4pl2/library/states.ml0000640000175000001440000000265312010532755015136 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if !Flags.load_proofs <> Flags.Force then Util.error "Write State only works with option -force-load-proofs"; raw_extern s (freeze())), (fun s -> unfreeze (with_magic_number_check (raw_intern (Library.get_load_paths ())) s); Library.overwrite_library_filenames s) (* Rollback. *) let with_heavy_rollback f h x = let st = freeze () in try f x with reraise -> let e = h reraise in (unfreeze st; raise e) let with_state_protection f x = let st = freeze () in try let a = f x in unfreeze st; a with reraise -> (unfreeze st; raise reraise) coq-8.4pl2/library/decls.mli0000640000175000001440000000322612010532755015073 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* variable_data -> unit val variable_path : variable -> dir_path val variable_secpath : variable -> qualid val variable_kind : variable -> logical_kind val variable_opacity : variable -> bool val variable_constraints : variable -> Univ.constraints val variable_exists : variable -> bool (** Registration and access to the table of constants *) val add_constant_kind : constant -> logical_kind -> unit val constant_kind : constant -> logical_kind (* Prepare global named context for proof session: remove proofs of opaque section definitions and remove vm-compiled code *) val initialize_named_context_for_proof : unit -> Environ.named_context_val (** Miscellaneous functions *) val last_section_hyps : dir_path -> identifier list coq-8.4pl2/library/library.ml0000640000175000001440000005556112121620060015273 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* p = phys_dir) !load_paths with | [_,dir,_] -> dir | [] -> Nameops.default_root_prefix | l -> anomaly ("Two logical paths are associated to "^phys_dir) let is_in_load_paths phys_dir = let dir = System.canonical_path_name phys_dir in let lp = get_load_paths () in let check_p = fun p -> (String.compare dir p) == 0 in List.exists check_p lp let remove_load_path dir = load_paths := List.filter (fun (p,d,_) -> p <> dir) !load_paths let add_load_path isroot (phys_path,coq_path) = let phys_path = System.canonical_path_name phys_path in match List.filter (fun (p,d,_) -> p = phys_path) !load_paths with | [_,dir,_] -> if coq_path <> dir (* If this is not the default -I . to coqtop *) && not (phys_path = System.canonical_path_name Filename.current_dir_name && coq_path = Nameops.default_root_prefix) then begin (* Assume the user is concerned by library naming *) if dir <> Nameops.default_root_prefix then Flags.if_warn msg_warning (str phys_path ++ strbrk " was previously bound to " ++ pr_dirpath dir ++ strbrk "; it is remapped to " ++ pr_dirpath coq_path); remove_load_path phys_path; load_paths := (phys_path,coq_path,isroot) :: !load_paths; end | [] -> load_paths := (phys_path,coq_path,isroot) :: !load_paths; | _ -> anomaly ("Two logical paths are associated to "^phys_path) let extend_path_with_dirpath p dir = List.fold_left Filename.concat p (List.map string_of_id (List.rev (repr_dirpath dir))) let root_paths_matching_dir_path dir = let rec aux = function | [] -> [] | (p,d,true) :: l when is_dirpath_prefix_of d dir -> let suffix = drop_dirpath_prefix d dir in extend_path_with_dirpath p suffix :: aux l | _ :: l -> aux l in aux !load_paths (* Root p is bound to A.B.C.D and we require file C.D.E.F *) (* We may mean A.B.C.D.E.F, or A.B.C.D.C.D.E.F *) (* Root p is bound to A.B.C.C and we require file C.C.E.F *) (* We may mean A.B.C.C.E.F, or A.B.C.C.C.E.F, or A.B.C.C.C.C.E.F *) let intersections d1 d2 = let rec aux d1 = if d1 = empty_dirpath then [d2] else let rest = aux (snd (chop_dirpath 1 d1)) in if is_dirpath_prefix_of d1 d2 then drop_dirpath_prefix d1 d2 :: rest else rest in aux d1 let loadpaths_matching_dir_path dir = let rec aux = function | [] -> [] | (p,d,true) :: l -> let inters = intersections d dir in List.map (fun tl -> (extend_path_with_dirpath p tl,append_dirpath d tl)) inters @ aux l | (p,d,_) :: l -> (extend_path_with_dirpath p dir,append_dirpath d dir) :: aux l in aux !load_paths let get_full_load_paths () = List.map (fun (a,b,c) -> (a,b)) !load_paths (************************************************************************) (*s Modules on disk contain the following informations (after the magic number, and before the digest). *) type compilation_unit_name = dir_path type library_disk = { md_name : compilation_unit_name; md_compiled : LightenLibrary.lightened_compiled_library; md_objects : Declaremods.library_objects; md_deps : (compilation_unit_name * Digest.t) list; md_imports : compilation_unit_name list } (*s Modules loaded in memory contain the following informations. They are kept in the global table [libraries_table]. *) type library_t = { library_name : compilation_unit_name; library_compiled : compiled_library; library_objects : Declaremods.library_objects; library_deps : (compilation_unit_name * Digest.t) list; library_imports : compilation_unit_name list; library_digest : Digest.t } module LibraryOrdered = struct type t = dir_path let compare d1 d2 = Pervasives.compare (List.rev (repr_dirpath d1)) (List.rev (repr_dirpath d2)) end module LibraryMap = Map.Make(LibraryOrdered) module LibraryFilenameMap = Map.Make(LibraryOrdered) (* This is a map from names to loaded libraries *) let libraries_table = ref LibraryMap.empty (* This is the map of loaded libraries filename *) (* (not synchronized so as not to be caught in the states on disk) *) let libraries_filename_table = ref LibraryFilenameMap.empty (* These are the _ordered_ sets of loaded, imported and exported libraries *) let libraries_loaded_list = ref [] let libraries_imports_list = ref [] let libraries_exports_list = ref [] let freeze () = !libraries_table, !libraries_loaded_list, !libraries_imports_list, !libraries_exports_list let unfreeze (mt,mo,mi,me) = libraries_table := mt; libraries_loaded_list := mo; libraries_imports_list := mi; libraries_exports_list := me let init () = libraries_table := LibraryMap.empty; libraries_loaded_list := []; libraries_imports_list := []; libraries_exports_list := [] let _ = Summary.declare_summary "MODULES" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } (* various requests to the tables *) let find_library dir = LibraryMap.find dir !libraries_table let try_find_library dir = try find_library dir with Not_found -> error ("Unknown library " ^ (string_of_dirpath dir)) let register_library_filename dir f = (* Not synchronized: overwrite the previous binding if one existed *) (* from a previous play of the session *) libraries_filename_table := LibraryFilenameMap.add dir f !libraries_filename_table let library_full_filename dir = try LibraryFilenameMap.find dir !libraries_filename_table with Not_found -> "" let overwrite_library_filenames f = let f = if Filename.is_relative f then Filename.concat (Sys.getcwd ()) f else f in LibraryMap.iter (fun dir _ -> register_library_filename dir f) !libraries_table let library_is_loaded dir = try let _ = find_library dir in true with Not_found -> false let library_is_opened dir = List.exists (fun m -> m.library_name = dir) !libraries_imports_list let loaded_libraries () = List.map (fun m -> m.library_name) !libraries_loaded_list let opened_libraries () = List.map (fun m -> m.library_name) !libraries_imports_list (* If a library is loaded several time, then the first occurrence must be performed first, thus the libraries_loaded_list ... *) let register_loaded_library m = let rec aux = function | [] -> [m] | m'::_ as l when m'.library_name = m.library_name -> l | m'::l' -> m' :: aux l' in libraries_loaded_list := aux !libraries_loaded_list; libraries_table := LibraryMap.add m.library_name m !libraries_table (* ... while if a library is imported/exported several time, then only the last occurrence is really needed - though the imported list may differ from the exported list (consider the sequence Export A; Export B; Import A which results in A;B for exports but in B;A for imports) *) let rec remember_last_of_each l m = match l with | [] -> [m] | m'::l' when m'.library_name = m.library_name -> remember_last_of_each l' m | m'::l' -> m' :: remember_last_of_each l' m let register_open_library export m = libraries_imports_list := remember_last_of_each !libraries_imports_list m; if export then libraries_exports_list := remember_last_of_each !libraries_exports_list m (************************************************************************) (*s Opening libraries *) (* [open_library export explicit m] opens library [m] if not already opened _or_ if explicitly asked to be (re)opened *) let eq_lib_name m1 m2 = m1.library_name = m2.library_name let open_library export explicit_libs m = if (* Only libraries indirectly to open are not reopen *) (* Libraries explicitly mentionned by the user are always reopen *) List.exists (eq_lib_name m) explicit_libs or not (library_is_opened m.library_name) then begin register_open_library export m; Declaremods.really_import_module (MPfile m.library_name) end else if export then libraries_exports_list := remember_last_of_each !libraries_exports_list m (* open_libraries recursively open a list of libraries but opens only once a library that is re-exported many times *) let open_libraries export modl = let to_open_list = List.fold_left (fun l m -> let subimport = List.fold_left (fun l m -> remember_last_of_each l (try_find_library m)) l m.library_imports in remember_last_of_each subimport m) [] modl in List.iter (open_library export modl) to_open_list (**********************************************************************) (* import and export - synchronous operations*) let open_import i (_,(dir,export)) = if i=1 then (* even if the library is already imported, we re-import it *) (* if not (library_is_opened dir) then *) open_libraries export [try_find_library dir] let cache_import obj = open_import 1 obj let subst_import (_,o) = o let classify_import (_,export as obj) = if export then Substitute obj else Dispose let in_import : dir_path * bool -> obj = declare_object {(default_object "IMPORT LIBRARY") with cache_function = cache_import; open_function = open_import; subst_function = subst_import; classify_function = classify_import } (************************************************************************) (*s Low-level interning/externing of libraries to files *) (*s Loading from disk to cache (preparation phase) *) let (raw_extern_library, raw_intern_library) = System.raw_extern_intern Coq_config.vo_magic_number ".vo" (************************************************************************) (*s Locate absolute or partially qualified library names in the path *) exception LibUnmappedDir exception LibNotFound type library_location = LibLoaded | LibInPath let locate_absolute_library dir = (* Search in loadpath *) let pref, base = split_dirpath dir in let loadpath = root_paths_matching_dir_path pref in if loadpath = [] then raise LibUnmappedDir; try let name = (string_of_id base)^".vo" in let _, file = System.where_in_path ~warn:false loadpath name in (dir, file) with Not_found -> (* Last chance, removed from the file system but still in memory *) if library_is_loaded dir then (dir, library_full_filename dir) else raise LibNotFound let locate_qualified_library warn qid = try (* Search library in loadpath *) let dir, base = repr_qualid qid in let loadpath = loadpaths_matching_dir_path dir in if loadpath = [] then raise LibUnmappedDir; let name = string_of_id base ^ ".vo" in let lpath, file = System.where_in_path ~warn (List.map fst loadpath) name in let dir = add_dirpath_suffix (List.assoc lpath loadpath) base in (* Look if loaded *) if library_is_loaded dir then (LibLoaded, dir, library_full_filename dir) (* Otherwise, look for it in the file system *) else (LibInPath, dir, file) with Not_found -> raise LibNotFound let explain_locate_library_error qid = function | LibUnmappedDir -> let prefix, _ = repr_qualid qid in errorlabstrm "load_absolute_library_from" (str "Cannot load " ++ pr_qualid qid ++ str ":" ++ spc () ++ str "no physical path bound to" ++ spc () ++ pr_dirpath prefix ++ fnl ()) | LibNotFound -> errorlabstrm "load_absolute_library_from" (str"Cannot find library " ++ pr_qualid qid ++ str" in loadpath") | e -> raise e let try_locate_absolute_library dir = try locate_absolute_library dir with e when Errors.noncritical e -> explain_locate_library_error (qualid_of_dirpath dir) e let try_locate_qualified_library (loc,qid) = try let (_,dir,f) = locate_qualified_library (Flags.is_verbose()) qid in dir,f with e when Errors.noncritical e -> explain_locate_library_error qid e (************************************************************************) (* Internalise libraries *) let mk_library md table digest = let md_compiled = LightenLibrary.load ~load_proof:!Flags.load_proofs table md.md_compiled in { library_name = md.md_name; library_compiled = md_compiled; library_objects = md.md_objects; library_deps = md.md_deps; library_imports = md.md_imports; library_digest = digest } let fetch_opaque_table (f,pos,digest) = try let ch = System.with_magic_number_check raw_intern_library f in seek_in ch pos; if System.marshal_in f ch <> digest then failwith "File changed!"; let table = (System.marshal_in f ch : LightenLibrary.table) in close_in ch; table with e when Errors.noncritical e -> error ("The file "^f^" is inaccessible or has changed,\n" ^ "cannot load some opaque constant bodies in it.\n") let intern_from_file f = let ch = System.with_magic_number_check raw_intern_library f in let lmd = System.marshal_in f ch in let pos = pos_in ch in let digest = System.marshal_in f ch in let table = lazy (fetch_opaque_table (f,pos,digest)) in register_library_filename lmd.md_name f; let library = mk_library lmd table digest in close_in ch; library let rec intern_library needed (dir, f) = (* Look if in the current logical environment *) try find_library dir, needed with Not_found -> (* Look if already listed and consequently its dependencies too *) try List.assoc dir needed, needed with Not_found -> (* [dir] is an absolute name which matches [f] which must be in loadpath *) let m = intern_from_file f in if dir <> m.library_name then errorlabstrm "load_physical_library" (str ("The file " ^ f ^ " contains library") ++ spc () ++ pr_dirpath m.library_name ++ spc () ++ str "and not library" ++ spc() ++ pr_dirpath dir); m, intern_library_deps needed dir m and intern_library_deps needed dir m = (dir,m)::List.fold_left (intern_mandatory_library dir) needed m.library_deps and intern_mandatory_library caller needed (dir,d) = let m,needed = intern_library needed (try_locate_absolute_library dir) in if d <> m.library_digest then errorlabstrm "" (strbrk ("Compiled library "^(string_of_dirpath caller)^ ".vo makes inconsistent assumptions over library " ^(string_of_dirpath dir))); needed let rec_intern_library needed mref = let _,needed = intern_library needed mref in needed let check_library_short_name f dir = function | Some id when id <> snd (split_dirpath dir) -> errorlabstrm "check_library_short_name" (str ("The file " ^ f ^ " contains library") ++ spc () ++ pr_dirpath dir ++ spc () ++ str "and not library" ++ spc () ++ pr_id id) | _ -> () let rec_intern_by_filename_only id f = let m = try intern_from_file f with Sys_error s -> error s in (* Only the base name is expected to match *) check_library_short_name f m.library_name id; (* We check no other file containing same library is loaded *) if library_is_loaded m.library_name then begin Flags.if_warn msg_warning (pr_dirpath m.library_name ++ str " is already loaded from file " ++ str (library_full_filename m.library_name)); m.library_name, [] end else let needed = intern_library_deps [] m.library_name m in m.library_name, needed let rec_intern_library_from_file idopt f = (* A name is specified, we have to check it contains library id *) let paths = get_load_paths () in let _, f = System.find_file_in_path ~warn:(Flags.is_verbose()) paths (f^".vo") in rec_intern_by_filename_only idopt f (**********************************************************************) (*s [require_library] loads and possibly opens a library. This is a synchronized operation. It is performed as follows: preparation phase: (functions require_library* ) the library and its dependencies are read from to disk (using intern_* ) [they are read from disk to ensure that at section/module discharging time, the physical library referred to outside the section/module is the one that was used at type-checking time in the section/module] execution phase: (through add_leaf and cache_require) the library is loaded in the environment and Nametab, the objects are registered etc, using functions from Declaremods (via load_library, which recursively loads its dependencies) *) type library_reference = dir_path list * bool option let register_library m = Declaremods.register_library m.library_name m.library_compiled m.library_objects m.library_digest; register_loaded_library m (* Follow the semantics of Anticipate object: - called at module or module type closing when a Require occurs in the module or module type - not called from a library (i.e. a module identified with a file) *) let load_require _ (_,(needed,modl,_)) = List.iter register_library needed let open_require i (_,(_,modl,export)) = Option.iter (fun exp -> open_libraries exp (List.map find_library modl)) export (* [needed] is the ordered list of libraries not already loaded *) let cache_require o = load_require 1 o; open_require 1 o let discharge_require (_,o) = Some o (* open_function is never called from here because an Anticipate object *) type require_obj = library_t list * dir_path list * bool option let in_require : require_obj -> obj = declare_object {(default_object "REQUIRE") with cache_function = cache_require; load_function = load_require; open_function = (fun _ _ -> assert false); discharge_function = discharge_require; classify_function = (fun o -> Anticipate o) } (* Require libraries, import them if [export <> None], mark them for export if [export = Some true] *) let xml_require = ref (fun d -> ()) let set_xml_require f = xml_require := f let require_library_from_dirpath modrefl export = let needed = List.fold_left rec_intern_library [] modrefl in let needed = List.rev_map snd needed in let modrefl = List.map fst modrefl in if Lib.is_module_or_modtype () then begin add_anonymous_leaf (in_require (needed,modrefl,None)); Option.iter (fun exp -> List.iter (fun dir -> add_anonymous_leaf (in_import(dir,exp))) modrefl) export end else add_anonymous_leaf (in_require (needed,modrefl,export)); if !Flags.xml_export then List.iter !xml_require modrefl; add_frozen_state () let require_library qidl export = let modrefl = List.map try_locate_qualified_library qidl in require_library_from_dirpath modrefl export let require_library_from_file idopt file export = let modref,needed = rec_intern_library_from_file idopt file in let needed = List.rev_map snd needed in if Lib.is_module_or_modtype () then begin add_anonymous_leaf (in_require (needed,[modref],None)); Option.iter (fun exp -> add_anonymous_leaf (in_import (modref,exp))) export end else add_anonymous_leaf (in_require (needed,[modref],export)); if !Flags.xml_export then !xml_require modref; add_frozen_state () (* the function called by Vernacentries.vernac_import *) let import_module export (loc,qid) = try match Nametab.locate_module qid with | MPfile dir -> if Lib.is_module_or_modtype () || not export then add_anonymous_leaf (in_import (dir, export)) else add_anonymous_leaf (in_import (dir, export)) | mp -> Declaremods.import_module export mp with Not_found -> user_err_loc (loc,"import_library", str ((string_of_qualid qid)^" is not a module")) (************************************************************************) (*s Initializing the compilation of a library. *) let check_coq_overwriting p id = let l = repr_dirpath p in if not !Flags.boot && l <> [] && string_of_id (list_last l) = "Coq" then errorlabstrm "" (strbrk ("Cannot build module "^string_of_dirpath p^"."^string_of_id id^ ": it starts with prefix \"Coq\" which is reserved for the Coq library.")) let start_library f = let paths = get_load_paths () in let _,longf = System.find_file_in_path ~warn:(Flags.is_verbose()) paths (f^".v") in let ldir0 = find_logical_path (Filename.dirname longf) in let id = id_of_string (Filename.basename f) in check_coq_overwriting ldir0 id; let ldir = add_dirpath_suffix ldir0 id in Declaremods.start_library ldir; ldir,longf (************************************************************************) (*s [save_library dir] ends library [dir] and save it to the disk. *) let current_deps () = List.map (fun m -> (m.library_name, m.library_digest)) !libraries_loaded_list let current_reexports () = List.map (fun m -> m.library_name) !libraries_exports_list let error_recursively_dependent_library dir = errorlabstrm "" (strbrk "Unable to use logical name " ++ pr_dirpath dir ++ strbrk " to save current library because" ++ strbrk " it already depends on a library of this name.") (* Security weakness: file might have been changed on disk between writing the content and computing the checksum... *) let save_library_to dir f = let cenv, seg = Declaremods.end_library dir in let cenv, table = LightenLibrary.save cenv in let md = { md_name = dir; md_compiled = cenv; md_objects = seg; md_deps = current_deps (); md_imports = current_reexports () } in if List.mem_assoc dir md.md_deps then error_recursively_dependent_library dir; let (f',ch) = raw_extern_library f in try System.marshal_out ch md; flush ch; (* The loading of the opaque definitions table is optional whereas the digest is loaded all the time. As a consequence, the digest must be serialized before the table (if we want to keep the current simple layout of .vo files). This also entails that the digest does not take opaque terms into account anymore. *) let di = Digest.file f' in System.marshal_out ch di; System.marshal_out ch table; close_out ch with reraise -> warning ("Removed file "^f'); close_out ch; Sys.remove f'; raise reraise (************************************************************************) (*s Display the memory use of a library. *) open Printf let mem s = let m = try_find_library s in h 0 (str (sprintf "%dk (cenv = %dk / seg = %dk)" (size_kb m) (size_kb m.library_compiled) (size_kb m.library_objects))) coq-8.4pl2/library/lib.ml0000640000175000001440000005333512010532755014404 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* f i (make_oname prefix id, obj)) let load_objects = iter_objects load_object let open_objects = iter_objects open_object let subst_objects subst seg = let subst_one = fun (id,obj as node) -> let obj' = subst_object (subst,obj) in if obj' == obj then node else (id, obj') in list_smartmap subst_one seg (*let load_and_subst_objects i prefix subst seg = List.rev (List.fold_left (fun seg (id,obj as node) -> let obj' = subst_object (make_oname prefix id, subst, obj) in let node = if obj == obj' then node else (id, obj') in load_object i (make_oname prefix id, obj'); node :: seg) [] seg) *) let classify_segment seg = let rec clean ((substl,keepl,anticipl) as acc) = function | (_,CompilingLibrary _) :: _ | [] -> acc | ((sp,kn),Leaf o) :: stk -> let id = Names.id_of_label (Names.label kn) in (match classify_object o with | Dispose -> clean acc stk | Keep o' -> clean (substl, (id,o')::keepl, anticipl) stk | Substitute o' -> clean ((id,o')::substl, keepl, anticipl) stk | Anticipate o' -> clean (substl, keepl, o'::anticipl) stk) | (_,ClosedSection _) :: stk -> clean acc stk (* LEM; TODO: Understand what this does and see if what I do is the correct thing for ClosedMod(ule|type) *) | (_,ClosedModule _) :: stk -> clean acc stk | (_,OpenedSection _) :: _ -> error "there are still opened sections" | (_,OpenedModule (ty,_,_,_)) :: _ -> error ("there are still opened " ^ module_kind ty ^"s") | (_,FrozenState _) :: stk -> clean acc stk in clean ([],[],[]) (List.rev seg) let segment_of_objects prefix = List.map (fun (id,obj) -> (make_oname prefix id, Leaf obj)) (* We keep trace of operations in the stack [lib_stk]. [path_prefix] is the current path of sections, where sections are stored in ``correct'' order, the oldest coming first in the list. It may seems costly, but in practice there is not so many openings and closings of sections, but on the contrary there are many constructions of section paths based on the library path. *) let initial_prefix = default_library,(Names.initial_path,Names.empty_dirpath) let lib_stk = ref ([] : library_segment) let comp_name = ref None let library_dp () = match !comp_name with Some m -> m | None -> default_library (* [path_prefix] is a pair of absolute dirpath and a pair of current module path and relative section path *) let path_prefix = ref initial_prefix let sections_depth () = List.length (Names.repr_dirpath (snd (snd !path_prefix))) let sections_are_opened () = match Names.repr_dirpath (snd (snd !path_prefix)) with [] -> false | _ -> true let cwd () = fst !path_prefix let cwd_except_section () = Libnames.pop_dirpath_n (sections_depth ()) (cwd ()) let current_dirpath sec = Libnames.drop_dirpath_prefix (library_dp ()) (if sec then cwd () else cwd_except_section ()) let make_path id = Libnames.make_path (cwd ()) id let make_path_except_section id = Libnames.make_path (cwd_except_section ()) id let path_of_include () = let dir = Names.repr_dirpath (cwd ()) in let new_dir = List.tl dir in let id = List.hd dir in Libnames.make_path (Names.make_dirpath new_dir) id let current_prefix () = snd !path_prefix let make_kn id = let mp,dir = current_prefix () in Names.make_kn mp dir (Names.label_of_id id) let make_con id = let mp,dir = current_prefix () in Names.make_con mp dir (Names.label_of_id id) let make_oname id = make_path id, make_kn id let recalc_path_prefix () = let rec recalc = function | (sp, OpenedSection (dir,_)) :: _ -> dir | (sp, OpenedModule (_,_,dir,_)) :: _ -> dir | (sp, CompilingLibrary dir) :: _ -> dir | _::l -> recalc l | [] -> initial_prefix in path_prefix := recalc !lib_stk let pop_path_prefix () = let dir,(mp,sec) = !path_prefix in path_prefix := fst (split_dirpath dir), (mp, fst (split_dirpath sec)) let find_entry_p p = let rec find = function | [] -> raise Not_found | ent::l -> if p ent then ent else find l in find !lib_stk let find_split_p p = let rec find = function | [] -> raise Not_found | ent::l -> if p ent then ent,l else find l in find !lib_stk let split_lib_gen test = let rec collect after equal = function | hd::before when test hd -> collect after (hd::equal) before | before -> after,equal,before in let rec findeq after = function | hd :: before -> if test hd then Some (collect after [hd] before) else (match hd with | (sp,ClosedModule seg) | (sp,ClosedSection seg) -> (match findeq after seg with | None -> findeq (hd::after) before | Some (sub_after,sub_equal,sub_before) -> Some (sub_after, sub_equal, (List.append sub_before before))) | _ -> findeq (hd::after) before) | [] -> None in match findeq [] !lib_stk with | None -> error "no such entry" | Some r -> r let split_lib sp = split_lib_gen (fun x -> fst x = sp) let split_lib_at_opening sp = let is_sp = function | x,(OpenedSection _|OpenedModule _|CompilingLibrary _) -> x = sp | _ -> false in let a,s,b = split_lib_gen is_sp in assert (List.tl s = []); (a,List.hd s,b) (* Adding operations. *) let add_entry sp node = lib_stk := (sp,node) :: !lib_stk let anonymous_id = let n = ref 0 in fun () -> incr n; Names.id_of_string ("_" ^ (string_of_int !n)) let add_anonymous_entry node = let id = anonymous_id () in let name = make_oname id in add_entry name node; name let add_leaf id obj = if fst (current_prefix ()) = Names.initial_path then error ("No session module started (use -top dir)"); let oname = make_oname id in cache_object (oname,obj); add_entry oname (Leaf obj); oname let add_discharged_leaf id obj = let oname = make_oname id in let newobj = rebuild_object obj in cache_object (oname,newobj); add_entry oname (Leaf newobj) let add_leaves id objs = let oname = make_oname id in let add_obj obj = add_entry oname (Leaf obj); load_object 1 (oname,obj) in List.iter add_obj objs; oname let add_anonymous_leaf obj = let id = anonymous_id () in let oname = make_oname id in cache_object (oname,obj); add_entry oname (Leaf obj) let add_frozen_state () = let _ = add_anonymous_entry (FrozenState (freeze_summaries())) in () (* Modules. *) let is_opening_node = function | _,(OpenedSection _ | OpenedModule _) -> true | _ -> false let is_opening_node_or_lib = function | _,(CompilingLibrary _ | OpenedSection _ | OpenedModule _) -> true | _ -> false let current_mod_id () = try match find_entry_p is_opening_node_or_lib with | oname,OpenedModule (_,_,_,fs) -> basename (fst oname) | oname,CompilingLibrary _ -> basename (fst oname) | _ -> error "you are not in a module" with Not_found -> error "no opened modules" let start_mod is_type export id mp fs = let dir = add_dirpath_suffix (fst !path_prefix) id in let prefix = dir,(mp,Names.empty_dirpath) in let sp = make_path id in let oname = sp, make_kn id in let exists = if is_type then Nametab.exists_cci sp else Nametab.exists_module dir in if exists then errorlabstrm "open_module" (pr_id id ++ str " already exists"); add_entry oname (OpenedModule (is_type,export,prefix,fs)); path_prefix := prefix; prefix (* add_frozen_state () must be called in declaremods *) let start_module = start_mod false let start_modtype = start_mod true None let error_still_opened string oname = let id = basename (fst oname) in errorlabstrm "" (str ("The "^string^" ") ++ pr_id id ++ str " is still opened.") let end_mod is_type = let oname,fs = try match find_entry_p is_opening_node with | oname,OpenedModule (ty,_,_,fs) -> if ty = is_type then oname,fs else error_still_opened (module_kind ty) oname | oname,OpenedSection _ -> error_still_opened "section" oname | _ -> assert false with Not_found -> error "No opened modules." in let (after,mark,before) = split_lib_at_opening oname in lib_stk := before; add_entry oname (ClosedModule (List.rev (mark::after))); let prefix = !path_prefix in recalc_path_prefix (); (* add_frozen_state must be called after processing the module, because we cannot recache interactive modules *) (oname, prefix, fs, after) let end_module () = end_mod false let end_modtype () = end_mod true let contents_after = function | None -> !lib_stk | Some sp -> let (after,_,_) = split_lib sp in after (* Modules. *) (* TODO: use check_for_module ? *) let start_compilation s mp = if !comp_name <> None then error "compilation unit is already started"; if snd (snd (!path_prefix)) <> Names.empty_dirpath then error "some sections are already opened"; let prefix = s, (mp, Names.empty_dirpath) in let _ = add_anonymous_entry (CompilingLibrary prefix) in comp_name := Some s; path_prefix := prefix let end_compilation dir = let _ = try match snd (find_entry_p is_opening_node) with | OpenedSection _ -> error "There are some open sections." | OpenedModule (ty,_,_,_) -> error ("There are some open "^module_kind ty^"s.") | _ -> assert false with Not_found -> () in let is_opening_lib = function _,CompilingLibrary _ -> true | _ -> false in let oname = try match find_entry_p is_opening_lib with | (oname, CompilingLibrary prefix) -> oname | _ -> assert false with Not_found -> anomaly "No module declared" in let _ = match !comp_name with | None -> anomaly "There should be a module name..." | Some m -> if m <> dir then anomaly ("The current open module has name "^ (Names.string_of_dirpath m) ^ " and not " ^ (Names.string_of_dirpath m)); in let (after,mark,before) = split_lib_at_opening oname in comp_name := None; !path_prefix,after (* Returns true if we are inside an opened module or module type *) let is_module_gen which = let test = function | _, OpenedModule (ty,_,_,_) -> which ty | _ -> false in try let _ = find_entry_p test in true with Not_found -> false let is_module_or_modtype () = is_module_gen (fun _ -> true) let is_modtype () = is_module_gen (fun b -> b) let is_module () = is_module_gen (fun b -> not b) (* Returns the opening node of a given name *) let find_opening_node id = try let oname,entry = find_entry_p is_opening_node in let id' = basename (fst oname) in if id <> id' then error ("Last block to end has name "^(Names.string_of_id id')^"."); entry with Not_found -> error "There is nothing to end." (* Discharge tables *) (* At each level of section, we remember - the list of variables in this section - the list of variables on which each constant depends in this section - the list of variables on which each inductive depends in this section - the list of substitution to do at section closing *) type binding_kind = Explicit | Implicit type variable_info = Names.identifier * binding_kind * Term.constr option * Term.types type variable_context = variable_info list type abstr_list = variable_context Names.Cmap.t * variable_context Names.Mindmap.t let sectab = ref ([] : ((Names.identifier * binding_kind) list * Cooking.work_list * abstr_list) list) let add_section () = sectab := ([],(Names.Cmap.empty,Names.Mindmap.empty),(Names.Cmap.empty,Names.Mindmap.empty)) :: !sectab let add_section_variable id impl = match !sectab with | [] -> () (* because (Co-)Fixpoint temporarily uses local vars *) | (vars,repl,abs)::sl -> sectab := ((id,impl)::vars,repl,abs)::sl let extract_hyps (secs,ohyps) = let rec aux = function | ((id,impl)::idl,(id',b,t)::hyps) when id=id' -> (id',impl,b,t) :: aux (idl,hyps) | (id::idl,hyps) -> aux (idl,hyps) | [], _ -> [] in aux (secs,ohyps) let instance_from_variable_context sign = let rec inst_rec = function | (id,b,None,_) :: sign -> id :: inst_rec sign | _ :: sign -> inst_rec sign | [] -> [] in Array.of_list (inst_rec sign) let named_of_variable_context = List.map (fun (id,_,b,t) -> (id,b,t)) let add_section_replacement f g hyps = match !sectab with | [] -> () | (vars,exps,abs)::sl -> let sechyps = extract_hyps (vars,hyps) in let args = instance_from_variable_context (List.rev sechyps) in sectab := (vars,f args exps,g sechyps abs)::sl let add_section_kn kn = let f x (l1,l2) = (l1,Names.Mindmap.add kn x l2) in add_section_replacement f f let add_section_constant kn = let f x (l1,l2) = (Names.Cmap.add kn x l1,l2) in add_section_replacement f f let replacement_context () = pi2 (List.hd !sectab) let section_segment_of_constant con = Names.Cmap.find con (fst (pi3 (List.hd !sectab))) let section_segment_of_mutual_inductive kn = Names.Mindmap.find kn (snd (pi3 (List.hd !sectab))) let rec list_mem_assoc x = function | [] -> raise Not_found | (a,_)::l -> compare a x = 0 or list_mem_assoc x l let section_instance = function | VarRef id -> if list_mem_assoc id (pi1 (List.hd !sectab)) then [||] else raise Not_found | ConstRef con -> Names.Cmap.find con (fst (pi2 (List.hd !sectab))) | IndRef (kn,_) | ConstructRef ((kn,_),_) -> Names.Mindmap.find kn (snd (pi2 (List.hd !sectab))) let is_in_section ref = try ignore (section_instance ref); true with Not_found -> false let init_sectab () = sectab := [] let freeze_sectab () = !sectab let unfreeze_sectab s = sectab := s let _ = Summary.declare_summary "section-context" { Summary.freeze_function = freeze_sectab; Summary.unfreeze_function = unfreeze_sectab; Summary.init_function = init_sectab } (*************) (* Sections. *) (* XML output hooks *) let xml_open_section = ref (fun id -> ()) let xml_close_section = ref (fun id -> ()) let set_xml_open_section f = xml_open_section := f let set_xml_close_section f = xml_close_section := f let open_section id = let olddir,(mp,oldsec) = !path_prefix in let dir = add_dirpath_suffix olddir id in let prefix = dir, (mp, add_dirpath_suffix oldsec id) in let name = make_path id, make_kn id (* this makes little sense however *) in if Nametab.exists_section dir then errorlabstrm "open_section" (pr_id id ++ str " already exists."); let fs = freeze_summaries() in add_entry name (OpenedSection (prefix, fs)); (*Pushed for the lifetime of the section: removed by unfrozing the summary*) Nametab.push_dir (Nametab.Until 1) dir (DirOpenSection prefix); path_prefix := prefix; if !Flags.xml_export then !xml_open_section id; add_section () (* Restore lib_stk and summaries as before the section opening, and add a ClosedSection object. *) let discharge_item ((sp,_ as oname),e) = match e with | Leaf lobj -> Option.map (fun o -> (basename sp,o)) (discharge_object (oname,lobj)) | FrozenState _ -> None | ClosedSection _ | ClosedModule _ -> None | OpenedSection _ | OpenedModule _ | CompilingLibrary _ -> anomaly "discharge_item" let close_section () = let oname,fs = try match find_entry_p is_opening_node with | oname,OpenedSection (_,fs) -> oname,fs | _ -> assert false with Not_found -> error "No opened section." in let (secdecls,mark,before) = split_lib_at_opening oname in lib_stk := before; let full_olddir = fst !path_prefix in pop_path_prefix (); add_entry oname (ClosedSection (List.rev (mark::secdecls))); if !Flags.xml_export then !xml_close_section (basename (fst oname)); let newdecls = List.map discharge_item secdecls in Summary.unfreeze_summaries fs; List.iter (Option.iter (fun (id,o) -> add_discharged_leaf id o)) newdecls; Cooking.clear_cooking_sharing (); Nametab.push_dir (Nametab.Until 1) full_olddir (DirClosedSection full_olddir) (*****************) (* Backtracking. *) let (inLabel : int -> obj), (outLabel : obj -> int) = declare_object_full {(default_object "DOT") with classify_function = (fun _ -> Dispose)} let recache_decl = function | (sp, Leaf o) -> cache_object (sp,o) | (_,OpenedSection _) -> add_section () | _ -> () let recache_context ctx = List.iter recache_decl ctx let is_frozen_state = function (_,FrozenState _) -> true | _ -> false let set_lib_stk new_lib_stk = lib_stk := new_lib_stk; recalc_path_prefix (); let spf = match find_entry_p is_frozen_state with | (sp, FrozenState f) -> unfreeze_summaries f; sp | _ -> assert false in let (after,_,_) = split_lib spf in try recache_context after with | Not_found -> error "Tried to set environment to an incoherent state." let reset_to_gen test = let (_,_,before) = split_lib_gen test in set_lib_stk before let reset_to sp = reset_to_gen (fun x -> fst x = sp) let first_command_label = 1 let mark_end_of_command, current_command_label, reset_command_label = let n = ref (first_command_label-1) in (fun () -> match !lib_stk with (_,Leaf o)::_ when object_tag o = "DOT" -> () | _ -> incr n;add_anonymous_leaf (inLabel !n)), (fun () -> !n), (fun x -> n:=x;add_anonymous_leaf (inLabel x)) let is_label_n n x = match x with | (sp,Leaf o) when object_tag o = "DOT" && n = outLabel o -> true | _ -> false (** Reset the label registered by [mark_end_of_command()] with number n, which should be strictly in the past. *) let reset_label n = if n >= current_command_label () then error "Cannot backtrack to the current label or a future one"; reset_to_gen (is_label_n n); (* forget state numbers after n only if reset succeeded *) reset_command_label n (** Search the last label registered before defining [id] *) let label_before_name (loc,id) = let found = ref false in let search = function | (_,Leaf o) when !found && object_tag o = "DOT" -> true | (sp,_) -> (if id = snd (repr_path (fst sp)) then found := true); false in match find_entry_p search with | (_,Leaf o) -> outLabel o | _ -> raise Not_found (* State and initialization. *) type frozen = Names.dir_path option * library_segment let freeze () = (!comp_name, !lib_stk) let unfreeze (mn,stk) = comp_name := mn; lib_stk := stk; recalc_path_prefix () let init () = lib_stk := []; add_frozen_state (); comp_name := None; path_prefix := initial_prefix; init_summaries() (* Misc *) let mp_of_global ref = match ref with | VarRef id -> fst (current_prefix ()) | ConstRef cst -> Names.con_modpath cst | IndRef ind -> Names.ind_modpath ind | ConstructRef constr -> Names.constr_modpath constr let rec dp_of_mp modp = match modp with | Names.MPfile dp -> dp | Names.MPbound _ -> library_dp () | Names.MPdot (mp,_) -> dp_of_mp mp let rec split_mp mp = match mp with | Names.MPfile dp -> dp, Names.empty_dirpath | Names.MPdot (prfx, lbl) -> let mprec, dprec = split_mp prfx in mprec, Names.make_dirpath (Names.id_of_string (Names.string_of_label lbl) :: (Names.repr_dirpath dprec)) | Names.MPbound mbid -> let (_, id, dp) = Names.repr_mbid mbid in library_dp(), Names.make_dirpath [id] let split_modpath mp = let rec aux = function | Names.MPfile dp -> dp, [] | Names.MPbound mbid -> library_dp (), [Names.id_of_mbid mbid] | Names.MPdot (mp,l) -> let (mp', lab) = aux mp in (mp', Names.id_of_label l :: lab) in let (mp, l) = aux mp in mp, l let library_part ref = match ref with | VarRef id -> library_dp () | _ -> dp_of_mp (mp_of_global ref) let remove_section_part ref = let sp = Nametab.path_of_global ref in let dir,_ = repr_path sp in match ref with | VarRef id -> anomaly "remove_section_part not supported on local variables" | _ -> if is_dirpath_prefix_of dir (cwd ()) then (* Not yet (fully) discharged *) pop_dirpath_n (sections_depth ()) (cwd ()) else (* Theorem/Lemma outside its outer section of definition *) dir (************************) (* Discharging names *) let pop_kn kn = let (mp,dir,l) = Names.repr_mind kn in Names.make_mind mp (pop_dirpath dir) l let pop_con con = let (mp,dir,l) = Names.repr_con con in Names.make_con mp (pop_dirpath dir) l let con_defined_in_sec kn = let _,dir,_ = Names.repr_con kn in dir <> Names.empty_dirpath && fst (split_dirpath dir) = snd (current_prefix ()) let defined_in_sec kn = let _,dir,_ = Names.repr_mind kn in dir <> Names.empty_dirpath && fst (split_dirpath dir) = snd (current_prefix ()) let discharge_global = function | ConstRef kn when con_defined_in_sec kn -> ConstRef (pop_con kn) | IndRef (kn,i) when defined_in_sec kn -> IndRef (pop_kn kn,i) | ConstructRef ((kn,i),j) when defined_in_sec kn -> ConstructRef ((pop_kn kn,i),j) | r -> r let discharge_kn kn = if defined_in_sec kn then pop_kn kn else kn let discharge_con cst = if con_defined_in_sec cst then pop_con cst else cst let discharge_inductive (kn,i) = (discharge_kn kn,i) coq-8.4pl2/library/nametab.ml0000640000175000001440000003712112010532755015240 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val repr : t -> identifier * module_ident list end (* A ['a t] is a map from [user_name] to ['a], with possible lookup by partially qualified names of type [qualid]. The mapping of partially qualified names to ['a] is determined by the [visibility] parameter of [push]. The [shortest_qualid] function given a user_name Coq.A.B.x, tries to find the shortest among x, B.x, A.B.x and Coq.A.B.x that denotes the same object. *) module type NAMETREE = sig type 'a t type user_name val empty : 'a t val push : visibility -> user_name -> 'a -> 'a t -> 'a t val locate : qualid -> 'a t -> 'a val find : user_name -> 'a t -> 'a val exists : user_name -> 'a t -> bool val user_name : qualid -> 'a t -> user_name val shortest_qualid : Idset.t -> user_name -> 'a t -> qualid val find_prefixes : qualid -> 'a t -> 'a list end module Make(U:UserName) : NAMETREE with type user_name = U.t = struct type user_name = U.t type 'a path_status = Nothing | Relative of user_name * 'a | Absolute of user_name * 'a (* Dictionaries of short names *) type 'a nametree = ('a path_status * 'a nametree ModIdmap.t) type 'a t = 'a nametree Idmap.t let empty = Idmap.empty (* [push_until] is used to register [Until vis] visibility and [push_exactly] to [Exactly vis] and [push_tree] chooses the right one*) let rec push_until uname o level (current,dirmap) = function | modid :: path -> let mc = try ModIdmap.find modid dirmap with Not_found -> (Nothing, ModIdmap.empty) in let this = if level <= 0 then match current with | Absolute (n,_) -> (* This is an absolute name, we must keep it otherwise it may become unaccessible forever *) Flags.if_warn msg_warning (str ("Trying to mask the absolute name \"" ^ U.to_string n ^ "\"!")); current | Nothing | Relative _ -> Relative (uname,o) else current in let ptab' = push_until uname o (level-1) mc path in (this, ModIdmap.add modid ptab' dirmap) | [] -> match current with | Absolute (uname',o') -> if o'=o then begin assert (uname=uname'); current, dirmap (* we are putting the same thing for the second time :) *) end else (* This is an absolute name, we must keep it otherwise it may become unaccessible forever *) (* But ours is also absolute! This is an error! *) error ("Cannot mask the absolute name \"" ^ U.to_string uname' ^ "\"!") | Nothing | Relative _ -> Absolute (uname,o), dirmap let rec push_exactly uname o level (current,dirmap) = function | modid :: path -> let mc = try ModIdmap.find modid dirmap with Not_found -> (Nothing, ModIdmap.empty) in if level = 0 then let this = match current with | Absolute (n,_) -> (* This is an absolute name, we must keep it otherwise it may become unaccessible forever *) Flags.if_warn msg_warning (str ("Trying to mask the absolute name \"" ^ U.to_string n ^ "\"!")); current | Nothing | Relative _ -> Relative (uname,o) in (this, dirmap) else (* not right level *) let ptab' = push_exactly uname o (level-1) mc path in (current, ModIdmap.add modid ptab' dirmap) | [] -> anomaly "Prefix longer than path! Impossible!" let push visibility uname o tab = let id,dir = U.repr uname in let ptab = try Idmap.find id tab with Not_found -> (Nothing, ModIdmap.empty) in let ptab' = match visibility with | Until i -> push_until uname o (i-1) ptab dir | Exactly i -> push_exactly uname o (i-1) ptab dir in Idmap.add id ptab' tab let rec search (current,modidtab) = function | modid :: path -> search (ModIdmap.find modid modidtab) path | [] -> current let find_node qid tab = let (dir,id) = repr_qualid qid in search (Idmap.find id tab) (repr_dirpath dir) let locate qid tab = let o = match find_node qid tab with | Absolute (uname,o) | Relative (uname,o) -> o | Nothing -> raise Not_found in o let user_name qid tab = let uname = match find_node qid tab with | Absolute (uname,o) | Relative (uname,o) -> uname | Nothing -> raise Not_found in uname let find uname tab = let id,l = U.repr uname in match search (Idmap.find id tab) l with Absolute (_,o) -> o | _ -> raise Not_found let exists uname tab = try let _ = find uname tab in true with Not_found -> false let shortest_qualid ctx uname tab = let id,dir = U.repr uname in let hidden = Idset.mem id ctx in let rec find_uname pos dir (path,tab) = match path with | Absolute (u,_) | Relative (u,_) when u=uname && not(pos=[] && hidden) -> List.rev pos | _ -> match dir with [] -> raise Not_found | id::dir -> find_uname (id::pos) dir (ModIdmap.find id tab) in let ptab = Idmap.find id tab in let found_dir = find_uname [] dir ptab in make_qualid (make_dirpath found_dir) id let push_node node l = match node with | Absolute (_,o) | Relative (_,o) when not (List.mem o l) -> o::l | _ -> l let rec flatten_idmap tab l = ModIdmap.fold (fun _ (current,idtab) l -> flatten_idmap idtab (push_node current l)) tab l let rec search_prefixes (current,modidtab) = function | modid :: path -> search_prefixes (ModIdmap.find modid modidtab) path | [] -> List.rev (flatten_idmap modidtab (push_node current [])) let find_prefixes qid tab = try let (dir,id) = repr_qualid qid in search_prefixes (Idmap.find id tab) (repr_dirpath dir) with Not_found -> [] end (* Global name tables *************************************************) module SpTab = Make (struct type t = full_path let to_string = string_of_path let repr sp = let dir,id = repr_path sp in id, (repr_dirpath dir) end) type ccitab = extended_global_reference SpTab.t let the_ccitab = ref (SpTab.empty : ccitab) type kntab = kernel_name SpTab.t let the_tactictab = ref (SpTab.empty : kntab) type mptab = module_path SpTab.t let the_modtypetab = ref (SpTab.empty : mptab) module DirTab = Make(struct type t = dir_path let to_string = string_of_dirpath let repr dir = match repr_dirpath dir with | [] -> anomaly "Empty dirpath" | id::l -> (id,l) end) (* If we have a (closed) module M having a submodule N, than N does not have the entry in [the_dirtab]. *) type dirtab = global_dir_reference DirTab.t let the_dirtab = ref (DirTab.empty : dirtab) (* Reversed name tables ***************************************************) (* This table translates extended_global_references back to section paths *) module Globrevtab = Map.Make(ExtRefOrdered) type globrevtab = full_path Globrevtab.t let the_globrevtab = ref (Globrevtab.empty : globrevtab) type mprevtab = dir_path MPmap.t let the_modrevtab = ref (MPmap.empty : mprevtab) type mptrevtab = full_path MPmap.t let the_modtyperevtab = ref (MPmap.empty : mptrevtab) type knrevtab = full_path KNmap.t let the_tacticrevtab = ref (KNmap.empty : knrevtab) (* Push functions *********************************************************) (* This is for permanent constructions (never discharged -- but with possibly limited visibility, i.e. Theorem, Lemma, Definition, Axiom, Parameter but also Remark and Fact) *) let push_xref visibility sp xref = match visibility with | Until _ -> the_ccitab := SpTab.push visibility sp xref !the_ccitab; the_globrevtab := Globrevtab.add xref sp !the_globrevtab | _ -> begin if SpTab.exists sp !the_ccitab then match SpTab.find sp !the_ccitab with | TrueGlobal( ConstRef _) | TrueGlobal( IndRef _) | TrueGlobal( ConstructRef _) as xref -> the_ccitab := SpTab.push visibility sp xref !the_ccitab; | _ -> the_ccitab := SpTab.push visibility sp xref !the_ccitab; else the_ccitab := SpTab.push visibility sp xref !the_ccitab; end let push_cci visibility sp ref = push_xref visibility sp (TrueGlobal ref) (* This is for Syntactic Definitions *) let push_syndef visibility sp kn = push_xref visibility sp (SynDef kn) let push = push_cci let push_modtype vis sp kn = the_modtypetab := SpTab.push vis sp kn !the_modtypetab; the_modtyperevtab := MPmap.add kn sp !the_modtyperevtab (* This is for tactic definition names *) let push_tactic vis sp kn = the_tactictab := SpTab.push vis sp kn !the_tactictab; the_tacticrevtab := KNmap.add kn sp !the_tacticrevtab (* This is to remember absolute Section/Module names and to avoid redundancy *) let push_dir vis dir dir_ref = the_dirtab := DirTab.push vis dir dir_ref !the_dirtab; match dir_ref with DirModule (_,(mp,_)) -> the_modrevtab := MPmap.add mp dir !the_modrevtab | _ -> () (* Locate functions *******************************************************) (* This should be used when syntactic definitions are allowed *) let locate_extended qid = SpTab.locate qid !the_ccitab (* This should be used when no syntactic definitions is expected *) let locate qid = match locate_extended qid with | TrueGlobal ref -> ref | SynDef _ -> raise Not_found let full_name_cci qid = SpTab.user_name qid !the_ccitab let locate_syndef qid = match locate_extended qid with | TrueGlobal _ -> raise Not_found | SynDef kn -> kn let locate_modtype qid = SpTab.locate qid !the_modtypetab let full_name_modtype qid = SpTab.user_name qid !the_modtypetab let locate_tactic qid = SpTab.locate qid !the_tactictab let locate_dir qid = DirTab.locate qid !the_dirtab let locate_module qid = match locate_dir qid with | DirModule (_,(mp,_)) -> mp | _ -> raise Not_found let full_name_module qid = match locate_dir qid with | DirModule (dir,_) -> dir | _ -> raise Not_found let locate_section qid = match locate_dir qid with | DirOpenSection (dir, _) | DirClosedSection dir -> dir | _ -> raise Not_found let locate_all qid = List.fold_right (fun a l -> match a with TrueGlobal a -> a::l | _ -> l) (SpTab.find_prefixes qid !the_ccitab) [] let locate_extended_all qid = SpTab.find_prefixes qid !the_ccitab (* Derived functions *) let locate_constant qid = match locate_extended qid with | TrueGlobal (ConstRef kn) -> kn | _ -> raise Not_found let global_of_path sp = match SpTab.find sp !the_ccitab with | TrueGlobal ref -> ref | _ -> raise Not_found let extended_global_of_path sp = SpTab.find sp !the_ccitab let global r = let (loc,qid) = qualid_of_reference r in try match locate_extended qid with | TrueGlobal ref -> ref | SynDef _ -> user_err_loc (loc,"global", str "Unexpected reference to a notation: " ++ pr_qualid qid) with Not_found -> error_global_not_found_loc loc qid (* Exists functions ********************************************************) let exists_cci sp = SpTab.exists sp !the_ccitab let exists_dir dir = DirTab.exists dir !the_dirtab let exists_section = exists_dir let exists_module = exists_dir let exists_modtype sp = SpTab.exists sp !the_modtypetab (* Reverse locate functions ***********************************************) let path_of_global ref = match ref with | VarRef id -> make_path empty_dirpath id | _ -> Globrevtab.find (TrueGlobal ref) !the_globrevtab let dirpath_of_global ref = fst (repr_path (path_of_global ref)) let basename_of_global ref = snd (repr_path (path_of_global ref)) let path_of_syndef kn = Globrevtab.find (SynDef kn) !the_globrevtab let dirpath_of_module mp = MPmap.find mp !the_modrevtab let path_of_tactic kn = KNmap.find kn !the_tacticrevtab (* Shortest qualid functions **********************************************) let shortest_qualid_of_global ctx ref = match ref with | VarRef id -> make_qualid empty_dirpath id | _ -> let sp = Globrevtab.find (TrueGlobal ref) !the_globrevtab in SpTab.shortest_qualid ctx sp !the_ccitab let shortest_qualid_of_syndef ctx kn = let sp = path_of_syndef kn in SpTab.shortest_qualid ctx sp !the_ccitab let shortest_qualid_of_module mp = let dir = MPmap.find mp !the_modrevtab in DirTab.shortest_qualid Idset.empty dir !the_dirtab let shortest_qualid_of_modtype kn = let sp = MPmap.find kn !the_modtyperevtab in SpTab.shortest_qualid Idset.empty sp !the_modtypetab let shortest_qualid_of_tactic kn = let sp = KNmap.find kn !the_tacticrevtab in SpTab.shortest_qualid Idset.empty sp !the_tactictab let pr_global_env env ref = (* Il est important de laisser le let-in, car les streams s'valuent paresseusement : il faut forcer l'valuation pour capturer l'ventuelle leve d'une exception (le cas choit dans le debugger) *) let s = string_of_qualid (shortest_qualid_of_global env ref) in (str s) let global_inductive r = match global r with | IndRef ind -> ind | ref -> user_err_loc (loc_of_reference r,"global_inductive", pr_reference r ++ spc () ++ str "is not an inductive type") (********************************************************************) (********************************************************************) (* Registration of tables as a global table and rollback *) type frozen = ccitab * dirtab * kntab * kntab * globrevtab * mprevtab * knrevtab * knrevtab let init () = the_ccitab := SpTab.empty; the_dirtab := DirTab.empty; the_modtypetab := SpTab.empty; the_tactictab := SpTab.empty; the_globrevtab := Globrevtab.empty; the_modrevtab := MPmap.empty; the_modtyperevtab := MPmap.empty; the_tacticrevtab := KNmap.empty let freeze () = !the_ccitab, !the_dirtab, !the_modtypetab, !the_tactictab, !the_globrevtab, !the_modrevtab, !the_modtyperevtab, !the_tacticrevtab let unfreeze (ccit,dirt,mtyt,tact,globr,modr,mtyr,tacr) = the_ccitab := ccit; the_dirtab := dirt; the_modtypetab := mtyt; the_tactictab := tact; the_globrevtab := globr; the_modrevtab := modr; the_modtyperevtab := mtyr; the_tacticrevtab := tacr let _ = Summary.declare_summary "names" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } (* Deprecated synonyms *) let extended_locate = locate_extended let absolute_reference = global_of_path coq-8.4pl2/library/dischargedhypsmap.mli0000640000175000001440000000157112010532755017501 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* discharged_hyps -> unit val get_discharged_hyps : full_path -> discharged_hyps coq-8.4pl2/library/declare.mli0000640000175000001440000000603212010532755015376 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* variable_declaration -> object_name (** Declaration of global constructions i.e. Definition/Theorem/Axiom/Parameter/... *) type constant_declaration = constant_entry * logical_kind (** [declare_constant id cd] declares a global declaration (constant/parameter) with name [id] in the current section; it returns the full path of the declaration internal specify if the constant has been created by the kernel or by the user, and in the former case, if its errors should be silent *) type internal_flag = | KernelVerbose | KernelSilent | UserVerbose val declare_constant : ?internal:internal_flag -> identifier -> constant_declaration -> constant (** [declare_mind me] declares a block of inductive types with their constructors in the current section; it returns the path of the whole block (boolean must be true iff it is a record) *) val declare_mind : internal_flag -> mutual_inductive_entry -> object_name (** Hooks for XML output *) val set_xml_declare_variable : (object_name -> unit) -> unit val set_xml_declare_constant : (internal_flag * constant -> unit) -> unit val set_xml_declare_inductive : (internal_flag * object_name -> unit) -> unit (** Hook for the cache function of constants and inductives *) val add_cache_hook : (full_path -> unit) -> unit (** Declaration messages *) val definition_message : identifier -> unit val assumption_message : identifier -> unit val fixpoint_message : int array option -> identifier list -> unit val cofixpoint_message : identifier list -> unit val recursive_message : bool (** true = fixpoint *) -> int array option -> identifier list -> unit val exists_name : identifier -> bool coq-8.4pl2/library/decl_kinds.mli0000640000175000001440000000441412010532755016100 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* logical_kind val string_of_theorem_kind : theorem_kind -> string val string_of_definition_kind : locality * definition_object_kind -> string (** About locality *) val strength_of_global : global_reference -> locality val string_of_strength : locality -> string (** About recursive power of type declarations *) type recursivity_kind = | Finite (** = inductive *) | CoFinite (** = coinductive *) | BiFinite (** = non-recursive, like in "Record" definitions *) (** helper, converts to "finiteness flag" booleans *) val recursivity_flag_of_kind : recursivity_kind -> bool coq-8.4pl2/library/nameops.mli0000640000175000001440000000360212010532755015441 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Pp.std_ppcmds val pr_name : name -> Pp.std_ppcmds val make_ident : string -> int option -> identifier val repr_ident : identifier -> string * int option val atompart_of_id : identifier -> string (** remove trailing digits *) val root_of_id : identifier -> identifier (** remove trailing digits, ' and _ *) val add_suffix : identifier -> string -> identifier val add_prefix : string -> identifier -> identifier val has_subscript : identifier -> bool val lift_subscript : identifier -> identifier val forget_subscript : identifier -> identifier val out_name : name -> identifier val name_fold : (identifier -> 'a -> 'a) -> name -> 'a -> 'a val name_iter : (identifier -> unit) -> name -> unit val name_cons : name -> identifier list -> identifier list val name_app : (identifier -> identifier) -> name -> name val name_fold_map : ('a -> identifier -> 'a * identifier) -> 'a -> name -> 'a * name val pr_lab : label -> Pp.std_ppcmds (** some preset paths *) val default_library : dir_path (** This is the root of the standard library of Coq *) val coq_root : module_ident (** This is the default root prefix for developments which doesn't mention a root *) val default_root_prefix : dir_path (** Metavariables *) val pr_meta : Term.metavariable -> Pp.std_ppcmds val string_of_meta : Term.metavariable -> string coq-8.4pl2/library/goptions.mli0000640000175000001440000001373212010532755015646 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool -> std_ppcmds val synchronous : bool end) -> sig val active : string -> bool val elements : unit -> string list end (** The functor [MakeRefTable] declares a new table of objects of type [A.t] practically denoted by [reference]; the encoding function [encode : reference -> A.t] is typically a globalization function, possibly with some restriction checks; the function [member_message] say what to print when invoking the "Test Toto Titi foo." command; at the end [title] is the table name printed when invoking the "Print Toto Titi." command; [active] is roughly the internal version of the vernacular "Test ...": it tells if a given object is in the table. *) module MakeRefTable : functor (A : sig type t val encode : reference -> t val subst : substitution -> t -> t val printer : t -> std_ppcmds val key : option_name val title : string val member_message : t -> bool -> std_ppcmds val synchronous : bool end) -> sig val active : A.t -> bool val elements : unit -> A.t list end (** {6 Options. } *) (** These types and function are for declaring a new option of name [key] and access functions [read] and [write]; the parameter [name] is the option name used when printing the option value (command "Print Toto Titi." *) type 'a option_sig = { optsync : bool; (** whether the option is synchronous w.r.t to the section/module system. *) optdepr : bool; (** whether the option is DEPRECATED *) optname : string; (** a short string describing the option *) optkey : option_name; (** the low-level name of this option *) optread : unit -> 'a; optwrite : 'a -> unit } (** When an option is declared synchronous ([optsync] is [true]), the output is a synchronous write function. Otherwise it is [optwrite] *) type 'a write_function = 'a -> unit val declare_int_option : int option option_sig -> int option write_function val declare_bool_option : bool option_sig -> bool write_function val declare_string_option: string option_sig -> string write_function (** {6 Special functions supposed to be used only in vernacentries.ml } *) module OptionMap : Map.S with type key = option_name val get_string_table : option_name -> < add : string -> unit; remove : string -> unit; mem : string -> unit; print : unit > val get_ref_table : option_name -> < add : reference -> unit; remove : reference -> unit; mem : reference -> unit; print : unit > (** The first argument is a locality flag. [Some true] = "Local", [Some false]="Global". *) val set_int_option_value_gen : bool option -> option_name -> int option -> unit val set_bool_option_value_gen : bool option -> option_name -> bool -> unit val set_string_option_value_gen : bool option -> option_name -> string -> unit val unset_option_value_gen : bool option -> option_name -> unit val set_int_option_value : option_name -> int option -> unit val set_bool_option_value : option_name -> bool -> unit val set_string_option_value : option_name -> string -> unit val print_option_value : option_name -> unit val get_tables : unit -> Goptionstyp.option_state OptionMap.t val print_tables : unit -> unit val error_undeclared_key : option_name -> 'a coq-8.4pl2/library/states.mli0000640000175000001440000000254212010532755015304 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val extern_state : string -> unit type state val freeze : unit -> state val unfreeze : state -> unit (** {6 Rollback } *) (** [with_heavy_rollback f x] applies [f] to [x] and restores the state of the whole system as it was before the evaluation if an exception is raised. *) val with_heavy_rollback : ('a -> 'b) -> (exn -> exn) -> 'a -> 'b (** [with_state_protection f x] applies [f] to [x] and restores the state of the whole system as it was before the evaluation of f *) val with_state_protection : ('a -> 'b) -> 'a -> 'b coq-8.4pl2/library/nametab.mli0000640000175000001440000001436312010532755015414 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* full_user_name -> object_reference -> unit] Registers the [object_reference] to be referred to by the [full_user_name] (and its suffixes according to [visibility]). [full_user_name] can either be a [full_path] or a [dir_path]. } {- [exists : full_user_name -> bool] Is the [full_user_name] already atributed as an absolute user name of some object? } {- [locate : qualid -> object_reference] Finds the object referred to by [qualid] or raises [Not_found] } {- [full_name : qualid -> full_user_name] Finds the full user name referred to by [qualid] or raises [Not_found] } {- [shortest_qualid_of : object_reference -> user_name] The [user_name] can be for example the shortest non ambiguous [qualid] or the [full_user_name] or [identifier]. Such a function can also have a local context argument.}} *) exception GlobalizationError of qualid exception GlobalizationConstantError of qualid (** Raises a globalization error *) val error_global_not_found_loc : loc -> qualid -> 'a val error_global_not_found : qualid -> 'a val error_global_constant_not_found_loc : loc -> qualid -> 'a (** {6 Register visibility of things } *) (** The visibility can be registered either - for all suffixes not shorter then a given int -- when the object is loaded inside a module -- or - for a precise suffix, when the module containing (the module containing ...) the object is opened (imported) *) type visibility = Until of int | Exactly of int val push : visibility -> full_path -> global_reference -> unit val push_modtype : visibility -> full_path -> module_path -> unit val push_dir : visibility -> dir_path -> global_dir_reference -> unit val push_syndef : visibility -> full_path -> syndef_name -> unit type ltac_constant = kernel_name val push_tactic : visibility -> full_path -> ltac_constant -> unit (** {6 The following functions perform globalization of qualified names } *) (** These functions globalize a (partially) qualified name or fail with [Not_found] *) val locate : qualid -> global_reference val locate_extended : qualid -> extended_global_reference val locate_constant : qualid -> constant val locate_syndef : qualid -> syndef_name val locate_modtype : qualid -> module_path val locate_dir : qualid -> global_dir_reference val locate_module : qualid -> module_path val locate_section : qualid -> dir_path val locate_tactic : qualid -> ltac_constant (** These functions globalize user-level references into global references, like [locate] and co, but raise a nice error message in case of failure *) val global : reference -> global_reference val global_inductive : reference -> inductive (** These functions locate all global references with a given suffix; if [qualid] is valid as such, it comes first in the list *) val locate_all : qualid -> global_reference list val locate_extended_all : qualid -> extended_global_reference list (** Mapping a full path to a global reference *) val global_of_path : full_path -> global_reference val extended_global_of_path : full_path -> extended_global_reference (** {6 These functions tell if the given absolute name is already taken } *) val exists_cci : full_path -> bool val exists_modtype : full_path -> bool val exists_dir : dir_path -> bool val exists_section : dir_path -> bool (** deprecated synonym of [exists_dir] *) val exists_module : dir_path -> bool (** deprecated synonym of [exists_dir] *) (** {6 These functions locate qualids into full user names } *) val full_name_cci : qualid -> full_path val full_name_modtype : qualid -> full_path val full_name_module : qualid -> dir_path (** {6 Reverse lookup } Finding user names corresponding to the given internal name *) (** Returns the full path bound to a global reference or syntactic definition, and the (full) dirpath associated to a module path *) val path_of_syndef : syndef_name -> full_path val path_of_global : global_reference -> full_path val dirpath_of_module : module_path -> dir_path val path_of_tactic : ltac_constant -> full_path (** Returns in particular the dirpath or the basename of the full path associated to global reference *) val dirpath_of_global : global_reference -> dir_path val basename_of_global : global_reference -> identifier (** Printing of global references using names as short as possible *) val pr_global_env : Idset.t -> global_reference -> std_ppcmds (** The [shortest_qualid] functions given an object with [user_name] Coq.A.B.x, try to find the shortest among x, B.x, A.B.x and Coq.A.B.x that denotes the same object. *) val shortest_qualid_of_global : Idset.t -> global_reference -> qualid val shortest_qualid_of_syndef : Idset.t -> syndef_name -> qualid val shortest_qualid_of_modtype : module_path -> qualid val shortest_qualid_of_module : module_path -> qualid val shortest_qualid_of_tactic : ltac_constant -> qualid (** Deprecated synonyms *) val extended_locate : qualid -> extended_global_reference (*= locate_extended *) val absolute_reference : full_path -> global_reference (** = global_of_path *) coq-8.4pl2/library/heads.ml0000640000175000001440000001442112101577526014721 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* phi(x))] where [g] is [fun f => g O] does not launch the evaluation of [phi(0)] and the head of [h] is declared unknown). *) type rigid_head_kind = | RigidParameter of constant (* a Const without body *) | RigidVar of variable (* a Var without body *) | RigidType (* an inductive, a product or a sort *) type head_approximation = | RigidHead of rigid_head_kind | ConstructorHead | FlexibleHead of int * int * int * bool (* [true] if a surrounding case *) | NotImmediatelyComputableHead (** Registration as global tables and rollback. *) module Evalreford = struct type t = evaluable_global_reference let compare x y = let make_name = function | EvalConstRef con -> EvalConstRef(constant_of_kn(canonical_con con)) | k -> k in Pervasives.compare (make_name x) (make_name y) end module Evalrefmap = Map.Make (Evalreford) let head_map = ref Evalrefmap.empty let init () = head_map := Evalrefmap.empty let freeze () = !head_map let unfreeze hm = head_map := hm let _ = Summary.declare_summary "Head_decl" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let variable_head id = Evalrefmap.find (EvalVarRef id) !head_map let constant_head cst = Evalrefmap.find (EvalConstRef cst) !head_map let kind_of_head env t = let rec aux k l t b = match kind_of_term (Reduction.whd_betaiotazeta t) with | Rel n when n > k -> NotImmediatelyComputableHead | Rel n -> FlexibleHead (k,k+1-n,List.length l,b) | Var id -> (try on_subterm k l b (variable_head id) with Not_found -> (* a goal variable *) match pi2 (lookup_named id env) with | Some c -> aux k l c b | None -> NotImmediatelyComputableHead) | Const cst -> (try on_subterm k l b (constant_head cst) with Not_found -> assert false) | Construct _ | CoFix _ -> if b then NotImmediatelyComputableHead else ConstructorHead | Sort _ | Ind _ | Prod _ -> RigidHead RigidType | Cast (c,_,_) -> aux k l c b | Lambda (_,_,c) when l = [] -> assert (not b); aux (k+1) [] c b | Lambda (_,_,c) -> aux k (List.tl l) (subst1 (List.hd l) c) b | LetIn _ -> assert false | Meta _ | Evar _ -> NotImmediatelyComputableHead | App (c,al) -> aux k (Array.to_list al @ l) c b | Case (_,_,c,_) -> aux k [] c true | Fix ((i,j),_) -> let n = i.(j) in try aux k [] (List.nth l n) true with Failure _ -> FlexibleHead (k + n + 1, k + n + 1, 0, true) and on_subterm k l with_case = function | FlexibleHead (n,i,q,with_subcase) -> let m = List.length l in let k',rest,a = if n > m then (* eta-expansion *) let a = if i <= m then (* we pick the head in the existing arguments *) lift (n-m) (List.nth l (i-1)) else (* we pick the head in the added arguments *) mkRel (n-i+1) in k+n-m,[],a else (* enough arguments to [cst] *) k,list_skipn n l,List.nth l (i-1) in let l' = list_tabulate (fun _ -> mkMeta 0) q @ rest in aux k' l' a (with_subcase or with_case) | ConstructorHead when with_case -> NotImmediatelyComputableHead | x -> x in aux 0 [] t false let compute_head = function | EvalConstRef cst -> (match constant_opt_value (Global.env()) cst with | None -> RigidHead (RigidParameter cst) | Some c -> kind_of_head (Global.env()) c) | EvalVarRef id -> (match pi2 (Global.lookup_named id) with | Some c when not (Decls.variable_opacity id) -> kind_of_head (Global.env()) c | _ -> RigidHead (RigidVar id)) let is_rigid env t = match kind_of_head env t with | RigidHead _ | ConstructorHead -> true | _ -> false (** Registration of heads as an object *) let load_head _ (_,(ref,(k:head_approximation))) = head_map := Evalrefmap.add ref k !head_map let cache_head o = load_head 1 o let subst_head_approximation subst = function | RigidHead (RigidParameter cst) as k -> let cst,c = subst_con subst cst in if isConst c && eq_constant (destConst c) cst then (* A change of the prefix of the constant *) k else (* A substitution of the constant by a functor argument *) kind_of_head (Global.env()) c | x -> x let subst_head (subst,(ref,k)) = (subst_evaluable_reference subst ref, subst_head_approximation subst k) let discharge_head (_,(ref,k)) = match ref with | EvalConstRef cst -> Some (EvalConstRef (pop_con cst), k) | EvalVarRef id -> None let rebuild_head (ref,k) = (ref, compute_head ref) type head_obj = evaluable_global_reference * head_approximation let inHead : head_obj -> obj = declare_object {(default_object "HEAD") with cache_function = cache_head; load_function = load_head; subst_function = subst_head; classify_function = (fun x -> Substitute x); discharge_function = discharge_head; rebuild_function = rebuild_head } let declare_head c = let hd = compute_head c in add_anonymous_leaf (inHead (c,hd)) (** Printing *) let pr_head = function | RigidHead (RigidParameter cst) -> str "rigid constant " ++ pr_con cst | RigidHead (RigidType) -> str "rigid type" | RigidHead (RigidVar id) -> str "rigid variable " ++ pr_id id | ConstructorHead -> str "constructor" | FlexibleHead (k,n,p,b) -> int n ++ str "th of " ++ int k ++ str " binders applied to " ++ int p ++ str " arguments" ++ (if b then str " (with case)" else mt()) | NotImmediatelyComputableHead -> str "unknown" coq-8.4pl2/library/decls.ml0000640000175000001440000000507412010532755014725 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !vartab); Summary.unfreeze_function = (fun ft -> vartab := ft); Summary.init_function = (fun () -> vartab := Idmap.empty) } let add_variable_data id o = vartab := Idmap.add id o !vartab let variable_path id = let (p,_,_,_) = Idmap.find id !vartab in p let variable_opacity id = let (_,opaq,_,_) = Idmap.find id !vartab in opaq let variable_kind id = let (_,_,_,k) = Idmap.find id !vartab in k let variable_constraints id = let (_,_,cst,_) = Idmap.find id !vartab in cst let variable_secpath id = let dir = drop_dirpath_prefix (Lib.library_dp()) (variable_path id) in make_qualid dir id let variable_exists id = Idmap.mem id !vartab (** Datas associated to global parameters and constants *) let csttab = ref (Cmap.empty : logical_kind Cmap.t) let _ = Summary.declare_summary "CONSTANT" { Summary.freeze_function = (fun () -> !csttab); Summary.unfreeze_function = (fun ft -> csttab := ft); Summary.init_function = (fun () -> csttab := Cmap.empty) } let add_constant_kind kn k = csttab := Cmap.add kn k !csttab let constant_kind kn = Cmap.find kn !csttab (** Miscellaneous functions. *) let initialize_named_context_for_proof () = let sign = Global.named_context () in List.fold_right (fun (id,c,t as d) signv -> let d = if variable_opacity id then (id,None,t) else d in Environ.push_named_context_val d signv) sign Environ.empty_named_context_val let last_section_hyps dir = fold_named_context (fun (id,_,_) sec_ids -> try if dir=variable_path id then id::sec_ids else sec_ids with Not_found -> sec_ids) (Environ.named_context (Global.env())) ~init:[] coq-8.4pl2/library/declaremods.mli0000640000175000001440000001140512010532755016261 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string (** Which inline annotations should we honor, either None or the ones whose level is less or equal to the given integer *) type inline = | NoInline | DefaultInline | InlineAt of int (** The type of annotations for functor applications *) type funct_app_annot = { ann_inline : inline; ann_scope_subst : scope_subst } type 'a annotated = ('a * funct_app_annot) (** {6 Modules } *) (** [declare_module interp_modtype interp_modexpr id fargs typ expr] declares module [id], with type constructed by [interp_modtype] from functor arguments [fargs] and [typ] and with module body constructed by [interp_modtype] from functor arguments [fargs] and by [interp_modexpr] from [expr]. At least one of [typ], [expr] must be non-empty. The [bool] in [typ] tells if the module must be abstracted [true] with respect to the module type or merely matched without any restriction [false]. *) val declare_module : (env -> 'modast -> module_struct_entry) -> (env -> 'modast -> module_struct_entry) -> (env -> 'modast -> module_struct_entry * bool) -> identifier -> (identifier located list * ('modast annotated)) list -> ('modast annotated) module_signature -> ('modast annotated) list -> module_path val start_module : (env -> 'modast -> module_struct_entry) -> bool option -> identifier -> (identifier located list * ('modast annotated)) list -> ('modast annotated) module_signature -> module_path val end_module : unit -> module_path (** {6 Module types } *) val declare_modtype : (env -> 'modast -> module_struct_entry) -> (env -> 'modast -> module_struct_entry * bool) -> identifier -> (identifier located list * ('modast annotated)) list -> ('modast annotated) list -> ('modast annotated) list -> module_path val start_modtype : (env -> 'modast -> module_struct_entry) -> identifier -> (identifier located list * ('modast annotated)) list -> ('modast annotated) list -> module_path val end_modtype : unit -> module_path (** {6 ... } *) (** Objects of a module. They come in two lists: the substitutive ones and the other *) val module_objects : module_path -> library_segment (** {6 Libraries i.e. modules on disk } *) type library_name = dir_path type library_objects val register_library : library_name -> Safe_typing.compiled_library -> library_objects -> Digest.t -> unit val start_library : library_name -> unit val end_library : library_name -> Safe_typing.compiled_library * library_objects (** set a function to be executed at end_library *) val set_end_library_hook : (unit -> unit) -> unit (** [really_import_module mp] opens the module [mp] (in a Caml sense). It modifies Nametab and performs the [open_object] function for every object of the module. *) val really_import_module : module_path -> unit (** [import_module export mp] is a synchronous version of [really_import_module]. If [export] is [true], the module is also opened every time the module containing it is. *) val import_module : bool -> module_path -> unit (** Include *) val declare_include : (env -> 'struct_expr -> module_struct_entry * bool) -> ('struct_expr annotated) list -> unit (** {6 ... } *) (** [iter_all_segments] iterate over all segments, the modules' segments first and then the current segment. Modules are presented in an arbitrary order. The given function is applied to all leaves (together with their section path). *) val iter_all_segments : (object_name -> obj -> unit) -> unit val debug_print_modtab : unit -> Pp.std_ppcmds (*i val debug_print_modtypetab : unit -> Pp.std_ppcmds i*) (** For translator *) val process_module_bindings : module_ident list -> (mod_bound_id * (module_struct_entry annotated)) list -> unit (** For Printer *) val process_module_seb_binding : mod_bound_id -> Declarations.struct_expr_body -> unit coq-8.4pl2/library/library.mllib0000640000175000001440000000021611651570606015766 0ustar notinusersNameops Libnames Libobject Summary Nametab Global Lib Declaremods Library States Decl_kinds Dischargedhypsmap Goptions Decls Heads Assumptionscoq-8.4pl2/library/libobject.ml0000640000175000001440000001443412010532755015570 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit; load_function : int -> object_name * 'a -> unit; open_function : int -> object_name * 'a -> unit; classify_function : 'a -> 'a substitutivity; subst_function : substitution * 'a -> 'a; discharge_function : object_name * 'a -> 'a option; rebuild_function : 'a -> 'a } let yell s = anomaly s let default_object s = { object_name = s; cache_function = (fun _ -> ()); load_function = (fun _ _ -> ()); open_function = (fun _ _ -> ()); subst_function = (fun _ -> yell ("The object "^s^" does not know how to substitute!")); classify_function = (fun obj -> Keep obj); discharge_function = (fun _ -> None); rebuild_function = (fun x -> x)} (* The suggested object declaration is the following: declare_object { (default_object "MY OBJECT") with cache_function = fun (sp,a) -> Mytbl.add sp a} and the listed functions are only those which definitions actually differ from the default. This helps introducing new functions in objects. *) let ident_subst_function (_,a) = a type obj = Dyn.t (* persistent dynamic objects *) type dynamic_object_declaration = { dyn_cache_function : object_name * obj -> unit; dyn_load_function : int -> object_name * obj -> unit; dyn_open_function : int -> object_name * obj -> unit; dyn_subst_function : substitution * obj -> obj; dyn_classify_function : obj -> obj substitutivity; dyn_discharge_function : object_name * obj -> obj option; dyn_rebuild_function : obj -> obj } let object_tag lobj = Dyn.tag lobj let cache_tab = (Hashtbl.create 17 : (string,dynamic_object_declaration) Hashtbl.t) let declare_object_full odecl = let na = odecl.object_name in let (infun,outfun) = Dyn.create na in let cacher (oname,lobj) = if Dyn.tag lobj = na then odecl.cache_function (oname,outfun lobj) else anomaly "somehow we got the wrong dynamic object in the cachefun" and loader i (oname,lobj) = if Dyn.tag lobj = na then odecl.load_function i (oname,outfun lobj) else anomaly "somehow we got the wrong dynamic object in the loadfun" and opener i (oname,lobj) = if Dyn.tag lobj = na then odecl.open_function i (oname,outfun lobj) else anomaly "somehow we got the wrong dynamic object in the openfun" and substituter (sub,lobj) = if Dyn.tag lobj = na then infun (odecl.subst_function (sub,outfun lobj)) else anomaly "somehow we got the wrong dynamic object in the substfun" and classifier lobj = if Dyn.tag lobj = na then match odecl.classify_function (outfun lobj) with | Dispose -> Dispose | Substitute obj -> Substitute (infun obj) | Keep obj -> Keep (infun obj) | Anticipate (obj) -> Anticipate (infun obj) else anomaly "somehow we got the wrong dynamic object in the classifyfun" and discharge (oname,lobj) = if Dyn.tag lobj = na then Option.map infun (odecl.discharge_function (oname,outfun lobj)) else anomaly "somehow we got the wrong dynamic object in the dischargefun" and rebuild lobj = if Dyn.tag lobj = na then infun (odecl.rebuild_function (outfun lobj)) else anomaly "somehow we got the wrong dynamic object in the rebuildfun" in Hashtbl.add cache_tab na { dyn_cache_function = cacher; dyn_load_function = loader; dyn_open_function = opener; dyn_subst_function = substituter; dyn_classify_function = classifier; dyn_discharge_function = discharge; dyn_rebuild_function = rebuild }; (infun,outfun) let declare_object odecl = fst (declare_object_full odecl) let missing_tab = (Hashtbl.create 17 : (string, unit) Hashtbl.t) (* this function describes how the cache, load, open, and export functions are triggered. In relaxed mode, this function just return a meaningless value instead of raising an exception when they fail. *) let apply_dyn_fun deflt f lobj = let tag = object_tag lobj in try let dodecl = try Hashtbl.find cache_tab tag with Not_found -> failwith "local to_apply_dyn_fun" in f dodecl with Failure "local to_apply_dyn_fun" -> if not (!relax_flag || Hashtbl.mem missing_tab tag) then begin Pp.msg_warning (Pp.str ("Cannot find library functions for an object with tag " ^ tag ^ " (a plugin may be missing)")); Hashtbl.add missing_tab tag () end; deflt let cache_object ((_,lobj) as node) = apply_dyn_fun () (fun d -> d.dyn_cache_function node) lobj let load_object i ((_,lobj) as node) = apply_dyn_fun () (fun d -> d.dyn_load_function i node) lobj let open_object i ((_,lobj) as node) = apply_dyn_fun () (fun d -> d.dyn_open_function i node) lobj let subst_object ((_,lobj) as node) = apply_dyn_fun lobj (fun d -> d.dyn_subst_function node) lobj let classify_object lobj = apply_dyn_fun Dispose (fun d -> d.dyn_classify_function lobj) lobj let discharge_object ((_,lobj) as node) = apply_dyn_fun None (fun d -> d.dyn_discharge_function node) lobj let rebuild_object lobj = apply_dyn_fun lobj (fun d -> d.dyn_rebuild_function lobj) lobj coq-8.4pl2/library/doc.tex0000640000175000001440000000100006771170177014566 0ustar notinusers \newpage \section*{The Coq library} \ocwsection \label{library} This chapter describes the \Coq\ library, which is made of two parts: \begin{itemize} \item a general mechanism to keep a trace of all operations and of the state of the system, with backtrack capabilities; \item a global environment for the CCI, with functions to export and import compiled modules. \end{itemize} The modules of the library are organized as follows. \bigskip \begin{center}\epsfig{file=library.dep.ps}\end{center} coq-8.4pl2/library/summary.ml0000640000175000001440000000352612010532755015330 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a; unfreeze_function : 'a -> unit; init_function : unit -> unit } let summaries = (Hashtbl.create 17 : (string, Dyn.t summary_declaration) Hashtbl.t) let internal_declare_summary sumname sdecl = let (infun,outfun) = Dyn.create sumname in let dyn_freeze () = infun (sdecl.freeze_function()) and dyn_unfreeze sum = sdecl.unfreeze_function (outfun sum) and dyn_init = sdecl.init_function in let ddecl = { freeze_function = dyn_freeze; unfreeze_function = dyn_unfreeze; init_function = dyn_init } in if Hashtbl.mem summaries sumname then anomalylabstrm "Summary.declare_summary" (str "Cannot declare a summary twice: " ++ str sumname); Hashtbl.add summaries sumname ddecl let declare_summary sumname decl = internal_declare_summary (sumname^"-SUMMARY") decl type frozen = Dyn.t Stringmap.t let freeze_summaries () = let m = ref Stringmap.empty in Hashtbl.iter (fun id decl -> m := Stringmap.add id (decl.freeze_function()) !m) summaries; !m let unfreeze_summaries fs = Hashtbl.iter (fun id decl -> try decl.unfreeze_function (Stringmap.find id fs) with Not_found -> decl.init_function()) summaries let init_summaries () = Hashtbl.iter (fun _ decl -> decl.init_function()) summaries coq-8.4pl2/library/nameops.ml0000640000175000001440000001001312010532755015262 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* str "_" | Name id -> pr_id id (* Utilities *) let code_of_0 = Char.code '0' let code_of_9 = Char.code '9' let cut_ident skip_quote s = let s = string_of_id s in let slen = String.length s in (* [n'] is the position of the first non nullary digit *) let rec numpart n n' = if n = 0 then (* ident made of _ and digits only [and ' if skip_quote]: don't cut it *) slen else let c = Char.code (String.get s (n-1)) in if c = code_of_0 && n <> slen then numpart (n-1) n' else if code_of_0 <= c && c <= code_of_9 then numpart (n-1) (n-1) else if skip_quote & (c = Char.code '\'' || c = Char.code '_') then numpart (n-1) (n-1) else n' in numpart slen slen let repr_ident s = let numstart = cut_ident false s in let s = string_of_id s in let slen = String.length s in if numstart = slen then (s, None) else (String.sub s 0 numstart, Some (int_of_string (String.sub s numstart (slen - numstart)))) let make_ident sa = function | Some n -> let c = Char.code (String.get sa (String.length sa -1)) in let s = if c < code_of_0 or c > code_of_9 then sa ^ (string_of_int n) else sa ^ "_" ^ (string_of_int n) in id_of_string s | None -> id_of_string (String.copy sa) let root_of_id id = let suffixstart = cut_ident true id in id_of_string (String.sub (string_of_id id) 0 suffixstart) (* Rem: semantics is a bit different, if an ident starts with toto00 then after successive renamings it comes to toto09, then it goes on with toto10 *) let lift_subscript id = let id = string_of_id id in let len = String.length id in let rec add carrypos = let c = id.[carrypos] in if is_digit c then if c = '9' then begin assert (carrypos>0); add (carrypos-1) end else begin let newid = String.copy id in String.fill newid (carrypos+1) (len-1-carrypos) '0'; newid.[carrypos] <- Char.chr (Char.code c + 1); newid end else begin let newid = id^"0" in if carrypos < len-1 then begin String.fill newid (carrypos+1) (len-1-carrypos) '0'; newid.[carrypos+1] <- '1' end; newid end in id_of_string (add (len-1)) let has_subscript id = let id = string_of_id id in is_digit (id.[String.length id - 1]) let forget_subscript id = let numstart = cut_ident false id in let newid = String.make (numstart+1) '0' in String.blit (string_of_id id) 0 newid 0 numstart; (id_of_string newid) let add_suffix id s = id_of_string (string_of_id id ^ s) let add_prefix s id = id_of_string (s ^ string_of_id id) let atompart_of_id id = fst (repr_ident id) (* Names *) let out_name = function | Name id -> id | Anonymous -> failwith "out_name: expects a defined name" let name_fold f na a = match na with | Name id -> f id a | Anonymous -> a let name_iter f na = name_fold (fun x () -> f x) na () let name_cons na l = match na with | Anonymous -> l | Name id -> id::l let name_app f = function | Name id -> Name (f id) | Anonymous -> Anonymous let name_fold_map f e = function | Name id -> let (e,id) = f e id in (e,Name id) | Anonymous -> e,Anonymous let pr_lab l = str (string_of_label l) let default_library = Names.initial_dir (* = ["Top"] *) (*s Roots of the space of absolute names *) let coq_root = id_of_string "Coq" let default_root_prefix = make_dirpath [] (* Metavariables *) let pr_meta = Pp.int let string_of_meta = string_of_int coq-8.4pl2/library/goptions.ml0000640000175000001440000003017012010532755015470 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit method remove : 'a -> unit method mem : 'a -> unit method print : unit end module MakeTable = functor (A : sig type t type key val table : (string * key table_of_A) list ref val encode : key -> t val subst : substitution -> t -> t val printer : t -> std_ppcmds val key : option_name val title : string val member_message : t -> bool -> std_ppcmds val synchronous : bool end) -> struct type option_mark = | GOadd | GOrmv let nick = nickname A.key let _ = if List.mem_assoc nick !A.table then error "Sorry, this table name is already used." module MySet = Set.Make (struct type t = A.t let compare = compare end) let t = ref (MySet.empty : MySet.t) let _ = if A.synchronous then let freeze () = !t in let unfreeze c = t := c in let init () = t := MySet.empty in Summary.declare_summary nick { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let (add_option,remove_option) = if A.synchronous then let cache_options (_,(f,p)) = match f with | GOadd -> t := MySet.add p !t | GOrmv -> t := MySet.remove p !t in let load_options i o = if i=1 then cache_options o in let subst_options (subst,(f,p as obj)) = let p' = A.subst subst p in if p' == p then obj else (f,p') in let inGo : option_mark * A.t -> obj = Libobject.declare_object {(Libobject.default_object nick) with Libobject.load_function = load_options; Libobject.open_function = load_options; Libobject.cache_function = cache_options; Libobject.subst_function = subst_options; Libobject.classify_function = (fun x -> Substitute x)} in ((fun c -> Lib.add_anonymous_leaf (inGo (GOadd, c))), (fun c -> Lib.add_anonymous_leaf (inGo (GOrmv, c)))) else ((fun c -> t := MySet.add c !t), (fun c -> t := MySet.remove c !t)) let print_table table_name printer table = msg (str table_name ++ (hov 0 (if MySet.is_empty table then str "None" ++ fnl () else MySet.fold (fun a b -> printer a ++ spc () ++ b) table (mt ()) ++ fnl ()))) class table_of_A () = object method add x = add_option (A.encode x) method remove x = remove_option (A.encode x) method mem x = let y = A.encode x in let answer = MySet.mem y !t in msg (A.member_message y answer ++ fnl ()) method print = print_table A.title A.printer !t end let _ = A.table := (nick,new table_of_A ())::!A.table let active c = MySet.mem c !t let elements () = MySet.elements !t end let string_table = ref [] let get_string_table k = List.assoc (nickname k) !string_table module type StringConvertArg = sig val key : option_name val title : string val member_message : string -> bool -> std_ppcmds val synchronous : bool end module StringConvert = functor (A : StringConvertArg) -> struct type t = string type key = string let table = string_table let encode x = x let subst _ x = x let printer = str let key = A.key let title = A.title let member_message = A.member_message let synchronous = A.synchronous end module MakeStringTable = functor (A : StringConvertArg) -> MakeTable (StringConvert(A)) let ref_table = ref [] let get_ref_table k = List.assoc (nickname k) !ref_table module type RefConvertArg = sig type t val encode : reference -> t val subst : substitution -> t -> t val printer : t -> std_ppcmds val key : option_name val title : string val member_message : t -> bool -> std_ppcmds val synchronous : bool end module RefConvert = functor (A : RefConvertArg) -> struct type t = A.t type key = reference let table = ref_table let encode = A.encode let subst = A.subst let printer = A.printer let key = A.key let title = A.title let member_message = A.member_message let synchronous = A.synchronous end module MakeRefTable = functor (A : RefConvertArg) -> MakeTable (RefConvert(A)) (****************************************************************************) (* 2- Flags. *) type 'a option_sig = { optsync : bool; optdepr : bool; optname : string; optkey : option_name; optread : unit -> 'a; optwrite : 'a -> unit } type option_type = bool * (unit -> option_value) -> (option_value -> unit) module OptionMap = Map.Make (struct type t = option_name let compare = compare end) let value_tab = ref OptionMap.empty (* This raises Not_found if option of key [key] is unknown *) let get_option key = OptionMap.find key !value_tab let check_key key = try let _ = get_option key in error "Sorry, this option name is already used." with Not_found -> if List.mem_assoc (nickname key) !string_table or List.mem_assoc (nickname key) !ref_table then error "Sorry, this option name is already used." open Summary open Libobject open Lib let declare_option cast uncast { optsync=sync; optdepr=depr; optname=name; optkey=key; optread=read; optwrite=write } = check_key key; let default = read() in (* spiwack: I use two spaces in the nicknames of "local" and "global" objects. That way I shouldn't collide with [nickname key] for any [key]. As [key]-s are lists of strings *without* spaces. *) let (write,lwrite,gwrite) = if sync then let ldecl_obj = (* "Local": doesn't survive section or modules. *) declare_object {(default_object ("L "^nickname key)) with cache_function = (fun (_,v) -> write v); classify_function = (fun _ -> Dispose)} in let decl_obj = (* default locality: survives sections but not modules. *) declare_object {(default_object (nickname key)) with cache_function = (fun (_,v) -> write v); classify_function = (fun _ -> Dispose); discharge_function = (fun (_,v) -> Some v)} in let gdecl_obj = (* "Global": survives section and modules. *) declare_object {(default_object ("G "^nickname key)) with cache_function = (fun (_,v) -> write v); classify_function = (fun v -> Substitute v); subst_function = (fun (_,v) -> v); discharge_function = (fun (_,v) -> Some v); load_function = (fun _ (_,v) -> write v)} in let _ = declare_summary (nickname key) { freeze_function = read; unfreeze_function = write; init_function = (fun () -> write default) } in begin fun v -> add_anonymous_leaf (decl_obj v) end , begin fun v -> add_anonymous_leaf (ldecl_obj v) end , begin fun v -> add_anonymous_leaf (gdecl_obj v) end else write,write,write in let cread () = cast (read ()) in let cwrite v = write (uncast v) in let clwrite v = lwrite (uncast v) in let cgwrite v = gwrite (uncast v) in value_tab := OptionMap.add key (name, depr, (sync,cread,cwrite,clwrite,cgwrite)) !value_tab; write type 'a write_function = 'a -> unit let declare_int_option = declare_option (fun v -> IntValue v) (function IntValue v -> v | _ -> anomaly "async_option") let declare_bool_option = declare_option (fun v -> BoolValue v) (function BoolValue v -> v | _ -> anomaly "async_option") let declare_string_option = declare_option (fun v -> StringValue v) (function StringValue v -> v | _ -> anomaly "async_option") (* 3- User accessible commands *) (* Setting values of options *) let set_option_value locality check_and_cast key v = let (name, depr, (_,read,write,lwrite,gwrite)) = try get_option key with Not_found -> error ("There is no option "^(nickname key)^".") in let write = match locality with | None -> write | Some true -> lwrite | Some false -> gwrite in write (check_and_cast v (read ())) let bad_type_error () = error "Bad type of value for this option." let check_int_value v = function | IntValue _ -> IntValue v | _ -> bad_type_error () let check_bool_value v = function | BoolValue _ -> BoolValue v | _ -> bad_type_error () let check_string_value v = function | StringValue _ -> StringValue v | _ -> bad_type_error () let check_unset_value v = function | BoolValue _ -> BoolValue false | IntValue _ -> IntValue None | _ -> bad_type_error () (* Nota: For compatibility reasons, some errors are treated as warning. This allows a script to refer to an option that doesn't exist anymore *) let set_int_option_value_gen locality = set_option_value locality check_int_value let set_bool_option_value_gen locality key v = try set_option_value locality check_bool_value key v with UserError (_,s) -> Flags.if_warn msg_warning s let set_string_option_value_gen locality = set_option_value locality check_string_value let unset_option_value_gen locality key = try set_option_value locality check_unset_value key () with UserError (_,s) -> Flags.if_warn msg_warning s let set_int_option_value = set_int_option_value_gen None let set_bool_option_value = set_bool_option_value_gen None let set_string_option_value = set_string_option_value_gen None (* Printing options/tables *) let msg_option_value (name,v) = match v with | BoolValue true -> str "true" | BoolValue false -> str "false" | IntValue (Some n) -> int n | IntValue None -> str "undefined" | StringValue s -> str s (* | IdentValue r -> pr_global_env Idset.empty r *) let print_option_value key = let (name, depr, (_,read,_,_,_)) = get_option key in let s = read () in match s with | BoolValue b -> msg (str ("The "^name^" mode is "^(if b then "on" else "off")) ++ fnl ()) | _ -> msg (str ("Current value of "^name^" is ") ++ msg_option_value (name,s) ++ fnl ()) let get_tables () = let tables = !value_tab in let fold key (name, depr, (sync,read,_,_,_)) accu = let state = { opt_sync = sync; opt_name = name; opt_depr = depr; opt_value = read (); } in OptionMap.add key state accu in OptionMap.fold fold tables OptionMap.empty let print_tables () = let print_option key name value depr = let msg = str (" "^(nickname key)^": ") ++ msg_option_value (name, value) in if depr then msg ++ str " [DEPRECATED]" ++ fnl () else msg ++ fnl () in msg (str "Synchronous options:" ++ fnl () ++ OptionMap.fold (fun key (name, depr, (sync,read,_,_,_)) p -> if sync then p ++ print_option key name (read ()) depr else p) !value_tab (mt ()) ++ str "Asynchronous options:" ++ fnl () ++ OptionMap.fold (fun key (name, depr, (sync,read,_,_,_)) p -> if sync then p else p ++ print_option key name (read ()) depr) !value_tab (mt ()) ++ str "Tables:" ++ fnl () ++ List.fold_right (fun (nickkey,_) p -> p ++ str (" "^nickkey) ++ fnl ()) !string_table (mt ()) ++ List.fold_right (fun (nickkey,_) p -> p ++ str (" "^nickkey) ++ fnl ()) !ref_table (mt ()) ++ fnl () ) coq-8.4pl2/library/libnames.mli0000640000175000001440000001450312010532755015573 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool val isConstRef : global_reference -> bool val isIndRef : global_reference -> bool val isConstructRef : global_reference -> bool val eq_gr : global_reference -> global_reference -> bool val canonical_gr : global_reference -> global_reference val destVarRef : global_reference -> variable val destConstRef : global_reference -> constant val destIndRef : global_reference -> inductive val destConstructRef : global_reference -> constructor val subst_constructor : substitution -> constructor -> constructor * constr val subst_global : substitution -> global_reference -> global_reference * constr (** Turn a global reference into a construction *) val constr_of_global : global_reference -> constr (** Turn a construction denoting a global reference into a global reference; raise [Not_found] if not a global reference *) val global_of_constr : constr -> global_reference (** Obsolete synonyms for constr_of_global and global_of_constr *) val constr_of_reference : global_reference -> constr val reference_of_constr : constr -> global_reference module RefOrdered : sig type t = global_reference val compare : global_reference -> global_reference -> int end module RefOrdered_env : sig type t = global_reference val compare : global_reference -> global_reference -> int end module Refset : Set.S with type elt = global_reference module Refmap : Map.S with type key = global_reference (** {6 Extended global references } *) type syndef_name = kernel_name type extended_global_reference = | TrueGlobal of global_reference | SynDef of syndef_name module ExtRefOrdered : sig type t = extended_global_reference val compare : t -> t -> int end (** {6 Dirpaths } *) val pr_dirpath : dir_path -> Pp.std_ppcmds val dirpath_of_string : string -> dir_path val string_of_dirpath : dir_path -> string (** Pop the suffix of a [dir_path] *) val pop_dirpath : dir_path -> dir_path (** Pop the suffix n times *) val pop_dirpath_n : int -> dir_path -> dir_path (** Give the immediate prefix and basename of a [dir_path] *) val split_dirpath : dir_path -> dir_path * identifier val add_dirpath_suffix : dir_path -> module_ident -> dir_path val add_dirpath_prefix : module_ident -> dir_path -> dir_path val chop_dirpath : int -> dir_path -> dir_path * dir_path val append_dirpath : dir_path -> dir_path -> dir_path val drop_dirpath_prefix : dir_path -> dir_path -> dir_path val is_dirpath_prefix_of : dir_path -> dir_path -> bool module Dirset : Set.S with type elt = dir_path module Dirmap : Map.S with type key = dir_path (** {6 Full paths are {e absolute} paths of declarations } *) type full_path (** Constructors of [full_path] *) val make_path : dir_path -> identifier -> full_path (** Destructors of [full_path] *) val repr_path : full_path -> dir_path * identifier val dirpath : full_path -> dir_path val basename : full_path -> identifier (** Parsing and printing of section path as ["coq_root.module.id"] *) val path_of_string : string -> full_path val string_of_path : full_path -> string val pr_path : full_path -> std_ppcmds module Spmap : Map.S with type key = full_path val restrict_path : int -> full_path -> full_path (** {6 Temporary function to brutally form kernel names from section paths } *) val encode_mind : dir_path -> identifier -> mutual_inductive val decode_mind : mutual_inductive -> dir_path * identifier val encode_con : dir_path -> identifier -> constant val decode_con : constant -> dir_path * identifier (** {6 ... } *) (** A [qualid] is a partially qualified ident; it includes fully qualified names (= absolute names) and all intermediate partial qualifications of absolute names, including single identifiers. The [qualid] are used to access the name table. *) type qualid val make_qualid : dir_path -> identifier -> qualid val repr_qualid : qualid -> dir_path * identifier val pr_qualid : qualid -> std_ppcmds val string_of_qualid : qualid -> string val qualid_of_string : string -> qualid (** Turns an absolute name, a dirpath, or an identifier into a qualified name denoting the same name *) val qualid_of_path : full_path -> qualid val qualid_of_dirpath : dir_path -> qualid val qualid_of_ident : identifier -> qualid (** Both names are passed to objects: a "semantic" [kernel_name], which can be substituted and a "syntactic" [full_path] which can be printed *) type object_name = full_path * kernel_name type object_prefix = dir_path * (module_path * dir_path) val make_oname : object_prefix -> identifier -> object_name (** to this type are mapped [dir_path]'s in the nametab *) type global_dir_reference = | DirOpenModule of object_prefix | DirOpenModtype of object_prefix | DirOpenSection of object_prefix | DirModule of object_prefix | DirClosedSection of dir_path (** this won't last long I hope! *) (** {6 ... } *) (** A [reference] is the user-level notion of name. It denotes either a global name (referred either by a qualified name or by a single name) or a variable *) type reference = | Qualid of qualid located | Ident of identifier located val qualid_of_reference : reference -> qualid located val string_of_reference : reference -> string val pr_reference : reference -> std_ppcmds val loc_of_reference : reference -> loc (** {6 Popping one level of section in global names } *) val pop_con : constant -> constant val pop_kn : mutual_inductive-> mutual_inductive val pop_global_reference : global_reference -> global_reference (** Deprecated synonyms *) val make_short_qualid : identifier -> qualid (** = qualid_of_ident *) val qualid_of_sp : full_path -> qualid (** = qualid_of_path *) coq-8.4pl2/library/libobject.mli0000640000175000001440000001017012010532755015732 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit; load_function : int -> object_name * 'a -> unit; open_function : int -> object_name * 'a -> unit; classify_function : 'a -> 'a substitutivity; subst_function : substitution * 'a -> 'a; discharge_function : object_name * 'a -> 'a option; rebuild_function : 'a -> 'a } (** The default object is a "Keep" object with empty methods. Object creators are advised to use the construction [{(default_object "MY_OBJECT") with cache_function = ... }] and specify only these functions which are not empty/meaningless *) val default_object : string -> 'a object_declaration (** the identity substitution function *) val ident_subst_function : substitution * 'a -> 'a (** {6 ... } *) (** Given an object declaration, the function [declare_object_full] will hand back two functions, the "injection" and "projection" functions for dynamically typed library-objects. *) type obj val declare_object_full : 'a object_declaration -> ('a -> obj) * (obj -> 'a) val declare_object : 'a object_declaration -> ('a -> obj) val object_tag : obj -> string val cache_object : object_name * obj -> unit val load_object : int -> object_name * obj -> unit val open_object : int -> object_name * obj -> unit val subst_object : substitution * obj -> obj val classify_object : obj -> obj substitutivity val discharge_object : object_name * obj -> obj option val rebuild_object : obj -> obj val relax : bool -> unit coq-8.4pl2/library/summary.mli0000640000175000001440000000234712010532755015501 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a; unfreeze_function : 'a -> unit; init_function : unit -> unit } val declare_summary : string -> 'a summary_declaration -> unit type frozen val freeze_summaries : unit -> frozen val unfreeze_summaries : frozen -> unit val init_summaries : unit -> unit (** Beware: if some code is dynamically loaded via dynlink after the initialization of Coq, the init functions of any summary declared by this code may not be run. It is hence the responsability of plugins to initialize themselves properly. *) coq-8.4pl2/doc/0000750000175000001440000000000012127276533012403 5ustar notinuserscoq-8.4pl2/doc/tools/0000750000175000001440000000000012127276531013541 5ustar notinuserscoq-8.4pl2/doc/tools/latex_filter0000750000175000001440000000152011135674403016145 0ustar notinusers#!/bin/sh # First argument is the number of lines to treat # Second argument is optional and, if it is "no", overfull are not displayed i=$1 nooverfull=$2 error=0 verbose=0 chapter="" file="" while : ; do read -r line; case $line in "! "*) echo $line $file; error=1 verbose=1 ;; "LaTeX Font Info"*|"LaTeX Info"*|"Underfull "*) verbose=0 ;; "Overfull "*) verbose=0 if [ "$nooverfull" != "no" ]; then echo $line $file; fi ;; "LaTeX "*) verbose=0 echo $line $chapter ;; "["*|"Chapter "*) verbose=0 ;; "(./"*) file="(file `echo $line | cut -b 4- | cut -d' ' -f 1`)" verbose=0 ;; *) if [ $verbose = 1 ]; then echo $line; fi esac; if [ "$i" = "0" ]; then break; else i=`expr $i - 1`; fi; done exit $error coq-8.4pl2/doc/tools/show_latex_messages0000750000175000001440000000024211445362132017524 0ustar notinusers#!/bin/sh if [ "$1" = "-no-overfull" ]; then cat $2 | ../tools/latex_filter `cat $2 | wc -l` no else cat $1 | ../tools/latex_filter `cat $1 | wc -l` yes fi coq-8.4pl2/doc/tools/Translator.tex0000640000175000001440000007561010377337562016435 0ustar notinusers\ifx\pdfoutput\undefined % si on est pas en pdflatex \documentclass[11pt,a4paper]{article} \else \documentclass[11pt,a4paper,pdftex]{article} \fi \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{pslatex} \usepackage{url} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \usepackage{array} \usepackage{fullpage} \title{Translation from Coq V7 to V8} \author{The Coq Development Team} %% Macros etc. \catcode`\_=13 \let\subscr=_ \def_{\ifmmode\sb\else\subscr\fi} \def\NT#1{\langle\textit{#1}\rangle} \def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} %\def\TERM#1{\textsf{\bf #1}} \def\TERM#1{\texttt{#1}} \newenvironment{transbox} {\begin{center}\tt\begin{tabular}{l|ll} \hfil\textrm{V7} & \hfil\textrm{V8} \\ \hline} {\end{tabular}\end{center}} \def\TRANS#1#2 {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & \begin{tabular}[t]{@{}l@{}}#2\end{tabular} \\} \def\TRANSCOM#1#2#3 {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & \begin{tabular}[t]{@{}l@{}}#2\end{tabular} & #3 \\} %% %% %% \begin{document} \maketitle \section{Introduction} Coq version 8.0 is a major version and carries major changes: the concrete syntax was redesigned almost from scratch, and many notions of the libraries were renamed for uniformisation purposes. We felt that these changes could discourage users with large theories from switching to the new version. The goal of this document is to introduce these changes on simple examples (mainly the syntactic changes), and describe the automated tools to help moving to V8.0. Essentially, it consists of a translator that takes as input a Coq source file in old syntax and produces a file in new syntax and adapted to the new standard library. The main extra features of this translator is that it keeps comments, even those within expressions\footnote{The position of those comment might differ slightly since there is no exact matching of positions between old and new syntax.}. The document is organised as follows: first section describes the new syntax on simple examples. It is very translation-oriented. This should give users of older versions the flavour of the new syntax, and allow them to make translation manually on small examples. Section~\ref{Translation} explains how the translation process can be automatised for the most part (the boring one: applying similar changes over thousands of lines of code). We strongly advise users to follow these indications, in order to avoid many potential complications of the translation process. \section{The new syntax on examples} The goal of this section is to introduce to the new syntax of Coq on simple examples, rather than just giving the new grammar. It is strongly recommended to read first the definition of the new syntax (in the reference manual), but this document should also be useful for the eager user who wants to start with the new syntax quickly. The toplevel has an option {\tt -translate} which allows to interactively translate commands. This toplevel translator accepts a command, prints the translation on standard output (after a % \verb+New syntax:+ balise), executes the command, and waits for another command. The only requirements is that they should be syntactically correct, but they do not have to be well-typed. This interactive translator proved to be useful in two main usages. First as a ``debugger'' of the translation. Before the translation, it may help in spotting possible conflicts between the new syntax and user notations. Or when the translation fails for some reason, it makes it easy to find the exact reason why it failed and make attempts in fixing the problem. The second usage of the translator is when trying to make the first proofs in new syntax. Well trained users will automatically think their scripts in old syntax and might waste much time (and the intuition of the proof) if they have to search the translation in a document. Running a translator in the background will allow the user to instantly have the answer. The rest of this section is a description of all the aspects of the syntax that changed and how they were translated. All the examples below can be tested by entering the V7 commands in the toplevel translator. %% \subsection{Changes in lexical conventions w.r.t. V7} \subsubsection{Identifiers} The lexical conventions changed: \TERM{_} is not a regular identifier anymore. It is used in terms as a placeholder for subterms to be inferred at type-checking, and in patterns as a non-binding variable. Furthermore, only letters (Unicode letters), digits, single quotes and _ are allowed after the first character. \subsubsection{Quoted string} Quoted strings are used typically to give a filename (which may not be a regular identifier). As before they are written between double quotes ("). Unlike for V7, there is no escape character: characters are written normally except the double quote which is doubled. \begin{transbox} \TRANS{"abcd$\backslash\backslash$efg"}{"abcd$\backslash$efg"} \TRANS{"abcd$\backslash$"efg"}{"abcd""efg"} \end{transbox} \subsection{Main changes in terms w.r.t. V7} \subsubsection{Precedence of application} In the new syntax, parentheses are not really part of the syntax of application. The precedence of application (10) is tighter than all prefix and infix notations. It makes it possible to remove parentheses in many contexts. \begin{transbox} \TRANS{(A x)->(f x)=(g y)}{A x -> f x = g y} \TRANS{(f [x]x)}{f (fun x => x)} \end{transbox} \subsubsection{Arithmetics and scopes} The specialized notation for \TERM{Z} and \TERM{R} (introduced by symbols \TERM{`} and \TERM{``}) have disappeared. They have been replaced by the general notion of scope. \begin{center} \begin{tabular}{l|l|l} type & scope name & delimiter \\ \hline types & type_scope & \TERM{type} \\ \TERM{bool} & bool_scope & \\ \TERM{nat} & nat_scope & \TERM{nat} \\ \TERM{Z} & Z_scope & \TERM{Z} \\ \TERM{R} & R_scope & \TERM{R} \\ \TERM{positive} & positive_scope & \TERM{P} \end{tabular} \end{center} In order to use notations of arithmetics on \TERM{Z}, its scope must be opened with command \verb+Open Scope Z_scope.+ Another possibility is using the scope change notation (\TERM{\%}). The latter notation is to be used when notations of several scopes appear in the same expression. In examples below, scope changes are not needed if the appropriate scope has been opened. Scope \verb|nat_scope| is opened in the initial state of Coq. \begin{transbox} \TRANSCOM{`0+x=x+0`}{0+x=x+0}{\textrm{Z_scope}} \TRANSCOM{``0 + [if b then ``1`` else ``2``]``}{0 + if b then 1 else 2}{\textrm{R_scope}} \TRANSCOM{(0)}{0}{\textrm{nat_scope}} \end{transbox} Below is a table that tells which notation is available in which scope. The relative precedences and associativity of operators is the same as in usual mathematics. See the reference manual for more details. However, it is important to remember that unlike V7, the type operators for product and sum are left-associative, in order not to clash with arithmetic operators. \begin{center} \begin{tabular}{l|l} scope & notations \\ \hline nat_scope & \texttt{+ - * < <= > >=} \\ Z_scope & \texttt{+ - * / mod < <= > >= ?=} \\ R_scope & \texttt{+ - * / < <= > >=} \\ type_scope & \texttt{* +} \\ bool_scope & \texttt{\&\& || -} \\ list_scope & \texttt{:: ++} \end{tabular} \end{center} \subsubsection{Notation for implicit arguments} The explicitation of arguments is closer to the \emph{bindings} notation in tactics. Argument positions follow the argument names of the head constant. The example below assumes \verb+f+ is a function with two implicit dependent arguments named \verb+x+ and \verb+y+. \begin{transbox} \TRANS{f 1!t1 2!t2 t3}{f (x:=t1) (y:=t2) t3} \TRANS{!f t1 t2}{@f t1 t2} \end{transbox} \subsubsection{Inferred subterms} Subterms that can be automatically inferred by the type-checker is now written {\tt _} \begin{transbox} \TRANS{?}{_} \end{transbox} \subsubsection{Universal quantification} The universal quantification and dependent product types are now introduced by the \texttt{forall} keyword before the binders and a comma after the binders. The syntax of binders also changed significantly. A binder can simply be a name when its type can be inferred. In other cases, the name and the type of the variable are put between parentheses. When several consecutive variables have the same type, they can be grouped. Finally, if all variables have the same type, parentheses can be omitted. \begin{transbox} \TRANS{(x:A)B}{forall (x:~A), B ~~\textrm{or}~~ forall x:~A, B} \TRANS{(x,y:nat)P}{forall (x y :~nat), P ~~\textrm{or}~~ forall x y :~nat, P} \TRANS{(x,y:nat;z:A)P}{forall (x y :~nat) (z:A), P} \TRANS{(x,y,z,t:?)P}{forall x y z t, P} \TRANS{(x,y:nat;z:?)P}{forall (x y :~nat) z, P} \end{transbox} \subsubsection{Abstraction} The notation for $\lambda$-abstraction follows that of universal quantification. The binders are surrounded by keyword \texttt{fun} and \verb+=>+. \begin{transbox} \TRANS{[x,y:nat; z](f a b c)}{fun (x y:nat) z => f a b c} \end{transbox} \subsubsection{Pattern-matching} Beside the usage of the keyword pair \TERM{match}/\TERM{with} instead of \TERM{Cases}/\TERM{of}, the main change is the notation for the type of branches and return type. It is no longer written between \TERM{$<$ $>$} before the \TERM{Cases} keyword, but interleaved with the destructured objects. The idea is that for each destructured object, one may specify a variable name (after the \TERM{as} keyword) to tell how the branches types depend on this destructured objects (case of a dependent elimination), and also how they depend on the value of the arguments of the inductive type of the destructured objects (after the \TERM{in} keyword). The type of branches is then given after the keyword \TERM{return}, unless it can be inferred. Moreover, when the destructured object is a variable, one may use this variable in the return type. \begin{transbox} \TRANS{Cases n of\\~~ O => O \\| (S k) => (1) end}{match n with\\~~ 0 => 0 \\| S k => 1 end} \TRANS{Cases m n of \\~~0 0 => t \\| ... end}{match m, n with \\~~0, 0 => t \\| ... end} \TRANS{<[n:nat](P n)>Cases T of ... end}{match T as n return P n with ... end} \TRANS{<[n:nat][p:(even n)]\~{}(odd n)>Cases p of\\~~ ... \\end}{match p in even n return \~{} odd n with\\~~ ...\\end} \end{transbox} The annotations of the special pattern-matching operators (\TERM{if}/\TERM{then}/\TERM{else}) and \TERM{let()} also changed. The only restriction is that the destructuring \TERM{let} does not allow dependent case analysis. \begin{transbox} \TRANS{ \begin{tabular}{@{}l} <[n:nat;x:(I n)](P n x)>if t then t1 \\ else t2 \end{tabular}}% {\begin{tabular}{@{}l} if t as x in I n return P n x then t1 \\ else t2 \end{tabular}} \TRANS{<[n:nat](P n)>let (p,q) = t1 in t2}% {let (p,q) in I n return P n := t1 in t2} \end{transbox} \subsubsection{Fixpoints and cofixpoints} An simpler syntax for non-mutual fixpoints is provided, making it very close to the usual notation for non-recursive functions. The decreasing argument is now indicated by an annotation between curly braces, regardless of the binders grouping. The annotation can be omitted if the binders introduce only one variable. The type of the result can be omitted if inferable. \begin{transbox} \TRANS{Fix plus\{plus [n:nat] : nat -> nat :=\\~~ [m]...\}}{fix plus (n m:nat) \{struct n\}: nat := ...} \TRANS{Fix fact\{fact [n:nat]: nat :=\\ ~~Cases n of\\~~~~ O => (1) \\~~| (S k) => (mult n (fact k)) end\}}{fix fact (n:nat) :=\\ ~~match n with \\~~~~0 => 1 \\~~| (S k) => n * fact k end} \end{transbox} There is a syntactic sugar for single fixpoints (defining one variable) associated to a local definition: \begin{transbox} \TRANS{let f := Fix f \{f [x:A] : T := M\} in\\(g (f y))}{let fix f (x:A) : T := M in\\g (f x)} \end{transbox} The same applies to cofixpoints, annotations are not allowed in that case. \subsubsection{Notation for type cast} \begin{transbox} \TRANS{O :: nat}{0 : nat} \end{transbox} \subsection{Main changes in tactics w.r.t. V7} The main change is that all tactic names are lowercase. This also holds for Ltac keywords. \subsubsection{Renaming of induction tactics} \begin{transbox} \TRANS{NewDestruct}{destruct} \TRANS{NewInduction}{induction} \TRANS{Induction}{simple induction} \TRANS{Destruct}{simple destruct} \end{transbox} \subsubsection{Ltac} Definitions of macros are introduced by \TERM{Ltac} instead of \TERM{Tactic Definition}, \TERM{Meta Definition} or \TERM{Recursive Definition}. They are considered recursive by default. \begin{transbox} \TRANS{Meta Definition my_tac t1 t2 := t1; t2.}% {Ltac my_tac t1 t2 := t1; t2.} \end{transbox} Rules of a match command are not between square brackets anymore. Context (understand a term with a placeholder) instantiation \TERM{inst} became \TERM{context}. Syntax is unified with subterm matching. \begin{transbox} \TRANS{Match t With [C[x=y]] -> Inst C[y=x]}% {match t with context C[x=y] => context C[y=x] end} \end{transbox} Arguments of macros use the term syntax. If a general Ltac expression is to be passed, it must be prefixed with ``{\tt ltac :}''. In other cases, when a \'{} was necessary, it is replaced by ``{\tt constr :}'' \begin{transbox} \TRANS{my_tac '(S x)}{my_tac (S x)} \TRANS{my_tac (Let x=tac In x)}{my_tac ltac:(let x:=tac in x)} \TRANS{Let x = '[x](S (S x)) In Apply x}% {let x := constr:(fun x => S (S x)) in apply x} \end{transbox} {\tt Match Context With} is now called {\tt match goal with}. Its argument is an Ltac expression by default. \subsubsection{Named arguments of theorems ({\em bindings})} \begin{transbox} \TRANS{Apply thm with x:=t 1:=u}{apply thm with (x:=t) (1:=u)} \end{transbox} \subsubsection{Occurrences} To avoid ambiguity between a numeric literal and the optional occurrence numbers of this term, the occurrence numbers are put after the term itself and after keyword \TERM{as}. \begin{transbox} \TRANS{Pattern 1 2 (f x) 3 4 d y z}{pattern f x at 1 2, d at 3 4, y, z} \end{transbox} \subsubsection{{\tt LetTac} and {\tt Pose}} Tactic {\tt LetTac} was renamed into {\tt set}, and tactic {\tt Pose} was a particular case of {\tt LetTac} where the abbreviation is folded in the conclusion\footnote{There is a tactic called {\tt pose} in V8, but its behaviour is not to fold the abbreviation at all.}. \begin{transbox} \TRANS{LetTac x = t in H}{set (x := t) in H} \TRANS{Pose x := t}{set (x := t)} \end{transbox} {\tt LetTac} could be followed by a specification (called a clause) of the places where the abbreviation had to be folded (hypothese and/or conclusion). Clauses are the syntactic notion to denote in which parts of a goal a given transformation shold occur. Its basic notation is either \TERM{*} (meaning everywhere), or {\tt\textrm{\em hyps} |- \textrm{\em concl}} where {\em hyps} is either \TERM{*} (to denote all the hypotheses), or a comma-separated list of either hypothesis name, or {\tt (value of $H$)} or {\tt (type of $H$)}. Moreover, occurrences can be specified after every hypothesis after the {\TERM{at}} keyword. {\em concl} is either empty or \TERM{*}, and can be followed by occurences. \begin{transbox} \TRANS{in Goal}{in |- *} \TRANS{in H H1}{in H1, H2 |-} \TRANS{in H H1 ...}{in * |-} \TRANS{in H H1 Goal}{in H1, H2 |- *} \TRANS{in H H1 H2 ... Goal}{in *} \TRANS{in 1 2 H 3 4 H0 1 3 Goal}{in H at 1 2, H0 at 3 4 |- * at 1 3} \end{transbox} \subsection{Main changes in vernacular commands w.r.t. V7} \subsubsection{Require} The default behaviour of {\tt Require} is not to open the loaded module. \begin{transbox} \TRANS{Require Arith}{Require Import Arith} \end{transbox} \subsubsection{Binders} The binders of vernacular commands changed in the same way as those of fixpoints. This also holds for parameters of inductive definitions. \begin{transbox} \TRANS{Definition x [a:A] : T := M}{Definition x (a:A) : T := M} \TRANS{Inductive and [A,B:Prop]: Prop := \\~~conj : A->B->(and A B)}% {Inductive and (A B:Prop): Prop := \\~~conj : A -> B -> and A B} \end{transbox} \subsubsection{Hints} Both {\tt Hints} and {\tt Hint} commands are beginning with {\tt Hint}. Command {\tt HintDestruct} has disappeared. The syntax of \emph{Extern} hints changed: the pattern and the tactic to be applied are separated by a {\tt =>}. \begin{transbox} \TRANS{Hint name := Resolve (f ? x)}% {Hint Resolve (f _ x)} \TRANS{Hint name := Extern 4 (toto ?) Apply lemma}% {Hint Extern 4 (toto _) => apply lemma} \TRANS{Hints Resolve x y z}{Hint Resolve x y z} \TRANS{Hints Resolve f : db1 db2}{Hint Resolve f : db1 db2} \TRANS{Hints Immediate x y z}{Hint Immediate x y z} \TRANS{Hints Unfold x y z}{Hint Unfold x y z} %% \TRANS{\begin{tabular}{@{}l} %% HintDestruct Local Conclusion \\ %% ~~name (f ? ?) 3 [Apply thm] %% \end{tabular}}% %% {\begin{tabular}{@{}l} %% Hint Local Destuct name := \\ %% ~~3 Conclusion (f _ _) => apply thm %% \end{tabular}} \end{transbox} \subsubsection{Implicit arguments} {\tt Set Implicit Arguments} changed its meaning in V8: the default is to turn implicit only the arguments that are {\em strictly} implicit (or rigid), i.e. that remains inferable whatever the other arguments are. For instance {\tt x} inferable from {\tt P x} is not strictly inferable since it can disappears if {\tt P} is instanciated by a term which erases {\tt x}. \begin{transbox} \TRANS{Set Implicit Arguments}% {\begin{tabular}{l} Set Implicit Arguments. \\ Unset Strict Implicits. \end{tabular}} \end{transbox} However, you may wish to adopt the new semantics of {\tt Set Implicit Arguments} (for instance because you think that the choice of arguments it sets implicit is more ``natural'' for you). \subsection{Changes in standard library} Many lemmas had their named changed to improve uniformity. The user generally do not have to care since the translators performs the renaming. Type {\tt entier} from fast_integer.v is renamed into {\tt N} by the translator. As a consequence, user-defined objects of same name {\tt N} are systematically qualified even tough it may not be necessary. The following table lists the main names with which the same problem arises: \begin{transbox} \TRANS{IF}{IF_then_else} \TRANS{ZERO}{Z0} \TRANS{POS}{Zpos} \TRANS{NEG}{Zneg} \TRANS{SUPERIEUR}{Gt} \TRANS{EGAL}{Eq} \TRANS{INFERIEUR}{Lt} \TRANS{add}{Pplus} \TRANS{true_sub}{Pminus} \TRANS{entier}{N} \TRANS{Un_suivi_de}{Ndouble_plus_one} \TRANS{Zero_suivi_de}{Ndouble} \TRANS{Nul}{N0} \TRANS{Pos}{Npos} \end{transbox} \subsubsection{Implicit arguments} %% Hugo: Main definitions of standard library have now implicit arguments. These arguments are dropped in the translated files. This can exceptionally be a source of incompatibilities which has to be solved by hand (it typically happens for polymorphic functions applied to {\tt nil} or {\tt None}). %% preciser: avant ou apres trad ? \subsubsection{Logic about {\tt Type}} Many notations that applied to {\tt Set} have been extended to {\tt Type}, so several definitions in {\tt Type} are superseded by them. \begin{transbox} \TRANS{x==y}{x=y} \TRANS{(EXT x:Prop | Q)}{exists x:Prop, Q} \TRANS{identityT}{identity} \end{transbox} %% Doc of the translator \section{A guide to translation} \label{Translation} %%\subsection{Overview of the translation process} Here is a short description of the tools involved in the translation process: \begin{description} \item{\tt coqc -translate} is the automatic translator. It is a parser/pretty-printer. This means that the translation is made by parsing every command using a parser of old syntax, which is printed using the new syntax. Many efforts were made to preserve as much as possible of the quality of the presentation: it avoids expansion of syntax extensions, comments are not discarded and placed at the same place. \item{\tt translate-v8} (in the translation package) is a small shell-script that will help translate developments that compile with a Makefile with minimum requirements. \end{description} \subsection{Preparation to translation} This step is very important because most of work shall be done before translation. If a problem occurs during translation, it often means that you will have to modify the original source and restart the translation process. This also means that it is recommended not to edit the output of the translator since it would be overwritten if the translation has to be restarted. \subsubsection{Compilation with {\tt coqc -v7}} First of all, it is mandatory that files compile with the current version of Coq (8.0) with option {\tt -v7}. Translation is a complicated task that involves the full compilation of the development. If your development was compiled with older versions, first upgrade to Coq V8.0 with option {\tt -v7}. If you use a Makefile similar to those produced by {\tt coq\_makefile}, you probably just have to do {\tt make OPT="-opt -v7"} ~~~or~~~ {\tt make OPT="-byte -v7"} When the development compiles successfully, there are several changes that might be necessary for the translation. Essentially, this is about syntax extensions (see section below dedicated to porting syntax extensions). If you do not use such features, then you are ready to try and make the translation. \subsection{Translation} \subsubsection{The general case} The preferred way is to use script {\tt translate-v8} if your development is compiled by a Makefile with the following constraints: \begin{itemize} \item compilation is achieved by invoking make without specifying a target \item options are passed to Coq with make variable COQFLAGS that includes variables OPT, COQLIBS, OTHERFLAGS and COQ_XML. \end{itemize} These constraints are met by the makefiles produced by {\tt coq\_makefile} Otherwise, modify your build program so as to pass option {\tt -translate} to program {\tt coqc}. The effect of this option is to ouptut the translated source of any {\tt .v} file in a file with extension {\tt .v8} located in the same directory than the original file. \subsubsection{What may happen during the translation} This section describes events that may happen during the translation and measures to adopt. These are the warnings that may arise during the translation, but they generally do not require any modification for the user: Warnings: \begin{itemize} \item {\tt Unable to detect if $id$ denotes a local definition}\\ This is due to a semantic change in clauses. In a command such as {\tt simpl in H}, the old semantics were to perform simplification in the type of {\tt H}, or in its body if it is defined. With the new semantics, it is performed both in the type and the body (if any). It might lead to incompatibilities \item {\tt Forgetting obsolete module}\\ Some modules have disappeared in V8.0 (new syntax). The user does not need to worry about it, since the translator deals with it. \item {\tt Replacing obsolete module}\\ Same as before but with the module that were renamed. Here again, the translator deals with it. \end{itemize} \subsection{Verification of the translation} The shell-script {\tt translate-v8} also renames {\tt .v8} files into {\tt .v} files (older {\tt .v} files are put in a subdirectory called {\tt v7}) and tries to recompile them. To do so it invokes {\tt make} without option (which should cause the compilation using {\tt coqc} without particular option). If compilation fails at this stage, you should refrain from repairing errors manually on the new syntax, but rather modify the old syntax script and restart the translation. We insist on that because the problem encountered can show up in many instances (especially if the problem comes from a syntactic extension), and fixing the original sources (for instance the {\tt V8only} parts of notations) once will solve all occurrences of the problem. %%\subsubsection{Errors occurring after translation} %%Equality in {\tt Z} or {\tt R}... \subsection{Particular cases} \subsubsection{Lexical conventions} The definition of identifiers changed. Most of those changes are handled by the translator. They include: \begin{itemize} \item {\tt \_} is not an identifier anymore: it is tranlated to {\tt x\_} \item avoid clash with new keywords by adding a trailing {\tt \_} \end{itemize} If the choices made by translation is not satisfactory or in the following cases: \begin{itemize} \item use of latin letters \item use of iso-latin characters in notations \end{itemize} the user should change his development prior to translation. \subsubsection{{\tt Case} and {\tt Match}} These very low-level case analysis are no longer supported. The translator tries hard to translate them into a user-friendly one, but it might lack type information to do so\footnote{The translator tries to typecheck terms before printing them, but it is not always possible to determine the context in which terms appearing in tactics live.}. If this happens, it is preferable to transform it manually before translation. \subsubsection{Syntax extensions with {\tt Grammar} and {\tt Syntax}} {\tt Grammar} and {\tt Syntax} are no longer supported. They should be replaced by an equivalent {\tt Notation} command and be processed as described above. Before attempting translation, users should verify that compilation with option {\tt -v7} succeeds. In the cases where {\tt Grammar} and {\tt Syntax} cannot be emulated by {\tt Notation}, users have to change manually they development as they wish to avoid the use of {\tt Grammar}. If this is not done, the translator will simply expand the notations and the output of the translator will use the regular Coq syntax. \subsubsection{Syntax extensions with {\tt Notation} and {\tt Infix}} These commands do not necessarily need to be changed. Some work will have to be done manually if the notation conflicts with the new syntax (for instance, using keywords like {\tt fun} or {\tt exists}, overloading of symbols of the old syntax, etc.) or if the precedences are not right. Precedence levels are now from 0 to 200. In V8, the precedence and associativity of an operator cannot be redefined. Typical level are (refer to the chapter on notations in the Reference Manual for the full list): \begin{center} \begin{tabular}{|cll|} \hline Notation & Precedence & Associativity \\ \hline \verb!_ <-> _! & 95 & no \\ \verb!_ \/ _! & 85 & right \\ \verb!_ /\ _! & 80 & right \\ \verb!~ _! & 75 & right \\ \verb!_ = _!, \verb!_ <> _!, \verb!_ < _!, \verb!_ > _!, \verb!_ <= _!, \verb!_ >= _! & 70 & no \\ \verb!_ + _!, \verb!_ - _! & 50 & left \\ \verb!_ * _!, \verb!_ / _! & 40 & left \\ \verb!- _! & 35 & right \\ \verb!_ ^ _! & 30 & left \\ \hline \end{tabular} \end{center} By default, the translator keeps the associativity given in V7 while the levels are mapped according to the following table: \begin{center} \begin{tabular}{l|l|l} V7 level & mapped to & associativity \\ \hline 0 & 0 & no \\ 1 & 20 & left \\ 2 & 30 & right \\ 3 & 40 & left \\ 4 & 50 & left \\ 5 & 70 & no \\ 6 & 80 & right \\ 7 & 85 & right \\ 8 & 90 & right \\ 9 & 95 & no \\ 10 & 100 & left \end{tabular} \end{center} If this is OK, just simply apply the translator. \paragraph{Associativity conflict} Since the associativity of the levels obtained by translating a V7 level (as shown on table above) cannot be changed, you have to choose another level with a compatible associativity. You can choose any level between 0 and 200, knowing that the standard operators are already set at the levels shown on the list above. Assume you have a notation \begin{verbatim} Infix NONA 2 "=_S" my_setoid_eq. \end{verbatim} By default, the translator moves it to level 30 which is right associative, hence a conflict with the expected no associativity. To solve the problem, just add the "V8only" modifier to reset the level and enforce the associativity as follows: \begin{verbatim} Infix NONA 2 "=_S" my_setoid_eq V8only (at level 70, no associativity). \end{verbatim} The translator now knows that it has to translate "=_S" at level 70 with no associativity. Remark: 70 is the "natural" level for relations, hence the choice of 70 here, but any other level accepting a no-associativity would have been OK. Second example: assume you have a notation \begin{verbatim} Infix RIGHTA 1 "o" my_comp. \end{verbatim} By default, the translator moves it to level 20 which is left associative, hence a conflict with the expected right associativity. To solve the problem, just add the "V8only" modifier to reset the level and enforce the associativity as follows: \begin{verbatim} Infix RIGHTA 1 "o" my_comp V8only (at level 20, right associativity). \end{verbatim} The translator now knows that it has to translate "o" at level 20 which has the correct "right associativity". Remark: we assumed here that the user wants a strong precedence for composition, in such a way, say, that "f o g + h" is parsed as "(f o g) + h". To get "o" binding less than the arithmetical operators, an appropriated level would have been close of 70, and below, e.g. 65. \paragraph{Conflict: notation hides another notation} Remark: use {\tt Print Grammar constr} in V8 to diagnose the overlap and see the section on factorization in the chapter on notations of the Reference Manual for hints on how to factorize. Example: \begin{verbatim} Notation "{ x }" := (my_embedding x) (at level 1). \end{verbatim} overlaps in V8 with notation \verb#{ x : A & P }# at level 0 and with x at level 99. The conflicts can be solved by left-factorizing the notation as follows: \begin{verbatim} Notation "{ x }" := (my_embedding x) (at level 1) V8only (at level 0, x at level 99). \end{verbatim} \paragraph{Conflict: a notation conflicts with the V8 grammar} Again, use the {\tt V8only} modifier to tell the translator to automatically take in charge the new syntax. Example: \begin{verbatim} Infix 3 "@" app. \end{verbatim} Since {\tt @} is used in the new syntax for deactivating the implicit arguments, another symbol has to be used, e.g. {\tt @@}. This is done via the {\tt V8only} option as follows: \begin{verbatim} Infix 3 "@" app V8only "@@" (at level 40, left associativity). \end{verbatim} or, alternatively by \begin{verbatim} Notation "x @ y" := (app x y) (at level 3, left associativity) V8only "x @@ y" (at level 40, left associativity). \end{verbatim} \paragraph{Conflict: my notation is already defined at another level (or with another associativity)} In V8, the level and associativity of a given notation can no longer be changed. Then, either you adopt the standard reserved levels and associativity for this notation (as given on the list above) or you change your notation. \begin{itemize} \item To change the notation, follow the directions in the previous paragraph \item To adopt the standard level, just use {\tt V8only} without any argument. \end{itemize} Example: \begin{verbatim} Infix 6 "*" my_mult. \end{verbatim} is not accepted as such in V8. Write \begin{verbatim} Infix 6 "*" my_mult V8only. \end{verbatim} to tell the translator to use {\tt *} at the reserved level (i.e. 40 with left associativity). Even better, use interpretation scopes (look at the Reference Manual). \subsubsection{Strict implicit arguments} In the case you want to adopt the new semantics of {\tt Set Implicit Arguments} (only setting rigid arguments as implicit), add the option {\tt -strict-implicit} to the translator. Warning: changing the number of implicit arguments can break the notations. Then use the {\tt V8only} modifier of {\tt Notation}. \end{document} coq-8.4pl2/doc/Makefile.rt0000640000175000001440000000337210406335323014464 0ustar notinusers# Makefile for building Coq Technical Reports # if coqc,coqtop,coq-tex are not in your PATH, you need the environment # variable COQBIN to be correctly set # (COQTOP is autodetected) # (some files are preprocessed using Coq and some part of the documentation # is automatically built from the theories sources) # To compile documentation, you need the following tools: # Dvi: latex (latex2e), bibtex, makeindex, dviselect (package RPM dviutils) # Ps: dvips, psutils (ftp://ftp.dcs.ed.ac.uk/pub/ajcd/psutils.tar.gz) # Pdf: pdflatex # Html: # - hevea: http://para.inria.fr/~maranget/hevea/ # - htmlSplit: http://coq.inria.fr/~delahaye # Rapports INRIA: dviselect, rrkit (par Michel Mauny) include ./Makefile ################### # RT ################### # Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) rt/Reference-Manual-RT.dvi: refman/Reference-Manual.dvi rt/RefMan-cover.tex dviselect -i refman/Reference-Manual.dvi -o rt/RefMan-body.dvi 3: (cd rt; $(LATEX) RefMan-cover.tex) set a=`tail -1 refman/Reference-Manual.log`;\ set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ (cd rt; if $(TEST) "$$a = 0";\ then rrkit RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ else rrkit -odd RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ fi) # Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) rt/Tutorial-RT.dvi : tutorial/Tutorial.v.dvi rt/Tutorial-cover.tex dviselect -i rt/Tutorial.v.dvi -o rt/Tutorial-body.dvi 3: (cd rt; $(LATEX) Tutorial-cover.tex) set a=`tail -1 tutorial/Tutorial.v.log`;\ set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ (cd rt; if $(TEST) "$$a = 0";\ then rrkit Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ else rrkit -odd Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ fi) coq-8.4pl2/doc/stdlib/0000750000175000001440000000000012127276531013662 5ustar notinuserscoq-8.4pl2/doc/stdlib/make-library-files0000750000175000001440000000234111464021670017262 0ustar notinusers#!/bin/sh # Needs COQSRC and GALLINA set # On garde la liste de tous les *.v avec dates dans library.files.ls # Si elle a change depuis la derniere fois ou library.files n'existe pas # on fabrique des .g (si besoin) et la liste library.files dans # l'ordre de ls -tr des *.vo # Ce dernier trie les fichiers dans l'ordre inverse de leur date de cration # En supposant que make fait son boulot, ca fait un tri topologique du # graphe des dpendances LIBDIRS="Arith PArith NArith ZArith Reals Logic Bool Lists Relations Sets Sorting Wellfounded Setoids Program Classes Numbers" rm -f library.files.ls.tmp (cd $COQSRC/theories; find $LIBDIR -name "*.v" -ls) > library.files.ls.tmp if ! test -e library.files || ! cmp library.files.ls library.files.ls.tmp; then mv -f library.files.ls.tmp library.files.ls rm -f library.files; touch library.files ABSOLUTE=`pwd`/library.files cd $COQSRC/theories echo $LIBDIRS for rep in $LIBDIRS ; do (cd $rep echo $rep/intro.tex >> $ABSOLUTE VOFILES=`ls -tr *.vo` for file in $VOFILES ; do VF=`basename $file \.vo` if [ \( ! -e $VF.g \) -o \( $VF.v -nt $VF.g \) ] ; then $GALLINA $VF.v fi echo $rep/$VF.g >> $ABSOLUTE done ) done fi coq-8.4pl2/doc/stdlib/Library.tex0000750000175000001440000000420111450413560016001 0ustar notinusers\documentclass[11pt]{report} \usepackage[utf8x]{inputenc} \usepackage[T1]{fontenc} \usepackage{fullpage} \usepackage{amsfonts} \usepackage[color]{../../coqdoc} \input{../common/version} \input{../common/title} \input{../common/macros} \begin{document} \coverpage{The standard library}% {\ } {This material is distributed under the terms of the GNU Lesser General Public License Version 2.1.} \tableofcontents \newpage % \section*{The \Coq\ standard library} This document is a short description of the \Coq\ standard library. This library comes with the system as a complement of the core library (the {\bf Init} library ; see the Reference Manual for a description of this library). It provides a set of modules directly available through the \verb!Require! command. The standard library is composed of the following subdirectories: \begin{description} \item[Logic] Classical logic and dependent equality \item[Bool] Booleans (basic functions and results) \item[Arith] Basic Peano arithmetic \item[ZArith] Basic integer arithmetic \item[Reals] Classical Real Numbers and Analysis \item[Lists] Monomorphic and polymorphic lists (basic functions and results), Streams (infinite sequences defined with co-inductive types) \item[Sets] Sets (classical, constructive, finite, infinite, power set, etc.) \item[Relations] Relations (definitions and basic results). \item[Sorting] Sorted list (basic definitions and heapsort correctness). \item[Wellfounded] Well-founded relations (basic results). \item[Program] Tactics to deal with dependently-typed programs and their proofs. \item[Classes] Standard type class instances on relations and Coq part of the setoid rewriting tactic. \end{description} Each of these subdirectories contains a set of modules, whose specifications (\gallina{} files) have been roughly, and automatically, pasted in the following pages. There is also a version of this document in HTML format on the WWW, which you can access from the \Coq\ home page at \texttt{http://coq.inria.fr/library}. \input{Library.coqdoc} \end{document} coq-8.4pl2/doc/stdlib/make-library-index0000750000175000001440000000360211742311620017264 0ustar notinusers#!/bin/sh # Instantiate links to library files in index template FILE=$1 HIDDEN=$2 cp -f $FILE.template tmp echo -n Building file index-list.prehtml ... #LIBDIRS="Init Logic Structures Bool Arith PArith NArith ZArith QArith Relations Sets Classes Setoids Lists Vectors Sorting Wellfounded MSets FSets Reals Program Numbers Numbers/Natural/Abstract Numbers/Natural/Peano Numbers/Natural/Binary Numbers/Natural/BigN Numbers/Natural/SpecViaZ Numbers/Integer/Abstract Numbers/Integer/NatPairs Numbers/Integer/Binary Numbers/Integer/SpecViaZ Numbers/Integer/BigZ Numbers/NatInt Numbers/Cyclic/Abstract Numbers/Cyclic/Int31 Numbers/Cyclic/ZModulo Numbers/Cyclic/DoubleCyclic Numbers/Rational/BigQ Numbers/Rational/SpecViaQ Strings" LIBDIRS=`find theories/* -type d | sed -e "s:^theories/::"` for k in $LIBDIRS; do i=theories/$k echo $i d=`basename $i` if [ "$d" != "CVS" ]; then ls $i | grep -q \.v'$' if [ $? = 0 ]; then for j in $i/*.v; do b=`basename $j .v` rm -f tmp2 grep -q theories/$k/$b.v tmp a=$? grep -q theories/$k/$b.v $HIDDEN h=$? if [ $a = 0 ]; then if [ $h = 0 ]; then echo Error: $FILE and $HIDDEN both mention theories/$k/$b.v; exit 1 else p=`echo $k | sed 's:/:.:g'` sed -e "s:theories/$k/$b.v:$b:g" tmp > tmp2 mv -f tmp2 tmp fi else if [ $h = 0 ]; then echo Error: theories/$k/$b.v is missing in the template file exit 1 else echo Error: none of $FILE and $HIDDEN mention theories/$k/$b.v exit 1 fi fi done fi fi rm -f tmp2 sed -e "s/#$d#//" tmp > tmp2 mv -f tmp2 tmp done a=`grep theories tmp` if [ $? = 0 ]; then echo Error: extra files:; echo $a; exit 1; fi mv tmp $FILE echo Done coq-8.4pl2/doc/stdlib/hidden-files0000640000175000001440000000000011745225403016124 0ustar notinuserscoq-8.4pl2/doc/stdlib/index-list.html.template0000640000175000001440000004225511777052043020453 0ustar notinusers

    The Coq Standard Library

    Here is a short description of the Coq standard library, which is distributed with the system. It provides a set of modules directly available through the Require Import command.

    The standard library is composed of the following subdirectories:

    Init: The core library (automatically loaded when starting Coq)
    theories/Init/Notations.v theories/Init/Datatypes.v theories/Init/Logic.v theories/Init/Logic_Type.v theories/Init/Peano.v theories/Init/Specif.v theories/Init/Tactics.v theories/Init/Wf.v (theories/Init/Prelude.v)
    Logic: Classical logic and dependent equality
    theories/Logic/SetIsType.v theories/Logic/Classical_Pred_Set.v theories/Logic/Classical_Pred_Type.v theories/Logic/Classical_Prop.v theories/Logic/Classical_Type.v (theories/Logic/Classical.v) theories/Logic/ClassicalFacts.v theories/Logic/Decidable.v theories/Logic/Eqdep_dec.v theories/Logic/EqdepFacts.v theories/Logic/Eqdep.v theories/Logic/JMeq.v theories/Logic/ChoiceFacts.v theories/Logic/RelationalChoice.v theories/Logic/ClassicalChoice.v theories/Logic/ClassicalDescription.v theories/Logic/ClassicalEpsilon.v theories/Logic/ClassicalUniqueChoice.v theories/Logic/Berardi.v theories/Logic/Diaconescu.v theories/Logic/Hurkens.v theories/Logic/ProofIrrelevance.v theories/Logic/ProofIrrelevanceFacts.v theories/Logic/ConstructiveEpsilon.v theories/Logic/Description.v theories/Logic/Epsilon.v theories/Logic/IndefiniteDescription.v theories/Logic/FunctionalExtensionality.v theories/Logic/ExtensionalityFacts.v
    Structures: Algebraic structures (types with equality, with order, ...). DecidableType* and OrderedType* are there only for compatibility.
    theories/Structures/Equalities.v theories/Structures/EqualitiesFacts.v theories/Structures/Orders.v theories/Structures/OrdersTac.v theories/Structures/OrdersAlt.v theories/Structures/OrdersEx.v theories/Structures/OrdersFacts.v theories/Structures/OrdersLists.v theories/Structures/GenericMinMax.v theories/Structures/DecidableType.v theories/Structures/DecidableTypeEx.v theories/Structures/OrderedType.v theories/Structures/OrderedTypeAlt.v theories/Structures/OrderedTypeEx.v
    Bool: Booleans (basic functions and results)
    theories/Bool/Bool.v theories/Bool/BoolEq.v theories/Bool/DecBool.v theories/Bool/IfProp.v theories/Bool/Sumbool.v theories/Bool/Zerob.v theories/Bool/Bvector.v
    Arith: Basic Peano arithmetic
    theories/Arith/Arith_base.v theories/Arith/Le.v theories/Arith/Lt.v theories/Arith/Plus.v theories/Arith/Minus.v theories/Arith/Mult.v theories/Arith/Gt.v theories/Arith/Between.v theories/Arith/Peano_dec.v theories/Arith/Compare_dec.v (theories/Arith/Arith.v) theories/Arith/Min.v theories/Arith/Max.v theories/Arith/Compare.v theories/Arith/Div2.v theories/Arith/EqNat.v theories/Arith/Euclid.v theories/Arith/Even.v theories/Arith/Bool_nat.v theories/Arith/Factorial.v theories/Arith/Wf_nat.v
    PArith: Binary positive integers
    theories/PArith/BinPosDef.v theories/PArith/BinPos.v theories/PArith/Pnat.v theories/PArith/POrderedType.v (theories/PArith/PArith.v)
    NArith: Binary natural numbers
    theories/NArith/BinNatDef.v theories/NArith/BinNat.v theories/NArith/Nnat.v theories/NArith/Ndigits.v theories/NArith/Ndist.v theories/NArith/Ndec.v theories/NArith/Ndiv_def.v theories/NArith/Ngcd_def.v theories/NArith/Nsqrt_def.v (theories/NArith/NArith.v)
    ZArith: Binary integers
    theories/ZArith/BinIntDef.v theories/ZArith/BinInt.v theories/ZArith/Zorder.v theories/ZArith/Zcompare.v theories/ZArith/Znat.v theories/ZArith/Zmin.v theories/ZArith/Zmax.v theories/ZArith/Zminmax.v theories/ZArith/Zabs.v theories/ZArith/Zeven.v theories/ZArith/auxiliary.v theories/ZArith/ZArith_dec.v theories/ZArith/Zbool.v theories/ZArith/Zmisc.v theories/ZArith/Wf_Z.v theories/ZArith/Zhints.v (theories/ZArith/ZArith_base.v) theories/ZArith/Zcomplements.v theories/ZArith/Zsqrt_compat.v theories/ZArith/Zpow_def.v theories/ZArith/Zpow_alt.v theories/ZArith/Zpower.v theories/ZArith/ZOdiv_def.v theories/ZArith/ZOdiv.v theories/ZArith/Zdiv.v theories/ZArith/Zquot.v theories/ZArith/Zeuclid.v theories/ZArith/Zlogarithm.v (theories/ZArith/ZArith.v) theories/ZArith/Zgcd_alt.v theories/ZArith/Zwf.v theories/ZArith/Znumtheory.v theories/ZArith/Int.v theories/ZArith/Zpow_facts.v theories/ZArith/Zdigits.v
    QArith: Rational numbers
    theories/QArith/QArith_base.v theories/QArith/Qabs.v theories/QArith/Qpower.v theories/QArith/Qreduction.v theories/QArith/Qring.v theories/QArith/Qfield.v (theories/QArith/QArith.v) theories/QArith/Qreals.v theories/QArith/Qcanon.v theories/QArith/Qround.v theories/QArith/QOrderedType.v theories/QArith/Qminmax.v
    Numbers: An experimental modular architecture for arithmetic
      Prelude:
    theories/Numbers/BinNums.v theories/Numbers/NumPrelude.v theories/Numbers/BigNumPrelude.v theories/Numbers/NaryFunctions.v
      NatInt: Abstract mixed natural/integer/cyclic arithmetic
    theories/Numbers/NatInt/NZAdd.v theories/Numbers/NatInt/NZAddOrder.v theories/Numbers/NatInt/NZAxioms.v theories/Numbers/NatInt/NZBase.v theories/Numbers/NatInt/NZMul.v theories/Numbers/NatInt/NZDiv.v theories/Numbers/NatInt/NZMulOrder.v theories/Numbers/NatInt/NZOrder.v theories/Numbers/NatInt/NZDomain.v theories/Numbers/NatInt/NZProperties.v theories/Numbers/NatInt/NZParity.v theories/Numbers/NatInt/NZPow.v theories/Numbers/NatInt/NZSqrt.v theories/Numbers/NatInt/NZLog.v theories/Numbers/NatInt/NZGcd.v theories/Numbers/NatInt/NZBits.v
      Cyclic: Abstract and 31-bits-based cyclic arithmetic
    theories/Numbers/Cyclic/Abstract/CyclicAxioms.v theories/Numbers/Cyclic/Abstract/NZCyclic.v theories/Numbers/Cyclic/DoubleCyclic/DoubleAdd.v theories/Numbers/Cyclic/DoubleCyclic/DoubleBase.v theories/Numbers/Cyclic/DoubleCyclic/DoubleCyclic.v theories/Numbers/Cyclic/DoubleCyclic/DoubleDiv.v theories/Numbers/Cyclic/DoubleCyclic/DoubleDivn1.v theories/Numbers/Cyclic/DoubleCyclic/DoubleLift.v theories/Numbers/Cyclic/DoubleCyclic/DoubleMul.v theories/Numbers/Cyclic/DoubleCyclic/DoubleSqrt.v theories/Numbers/Cyclic/DoubleCyclic/DoubleSub.v theories/Numbers/Cyclic/DoubleCyclic/DoubleType.v theories/Numbers/Cyclic/Int31/Cyclic31.v theories/Numbers/Cyclic/Int31/Ring31.v theories/Numbers/Cyclic/Int31/Int31.v theories/Numbers/Cyclic/ZModulo/ZModulo.v
      Natural: Abstract and 31-bits-words-based natural arithmetic
    theories/Numbers/Natural/Abstract/NAdd.v theories/Numbers/Natural/Abstract/NAddOrder.v theories/Numbers/Natural/Abstract/NAxioms.v theories/Numbers/Natural/Abstract/NBase.v theories/Numbers/Natural/Abstract/NDefOps.v theories/Numbers/Natural/Abstract/NIso.v theories/Numbers/Natural/Abstract/NMulOrder.v theories/Numbers/Natural/Abstract/NOrder.v theories/Numbers/Natural/Abstract/NStrongRec.v theories/Numbers/Natural/Abstract/NSub.v theories/Numbers/Natural/Abstract/NDiv.v theories/Numbers/Natural/Abstract/NMaxMin.v theories/Numbers/Natural/Abstract/NParity.v theories/Numbers/Natural/Abstract/NPow.v theories/Numbers/Natural/Abstract/NSqrt.v theories/Numbers/Natural/Abstract/NLog.v theories/Numbers/Natural/Abstract/NGcd.v theories/Numbers/Natural/Abstract/NLcm.v theories/Numbers/Natural/Abstract/NBits.v theories/Numbers/Natural/Abstract/NProperties.v theories/Numbers/Natural/Binary/NBinary.v theories/Numbers/Natural/Peano/NPeano.v theories/Numbers/Natural/SpecViaZ/NSig.v theories/Numbers/Natural/SpecViaZ/NSigNAxioms.v theories/Numbers/Natural/BigN/BigN.v theories/Numbers/Natural/BigN/Nbasic.v theories/Numbers/Natural/BigN/NMake.v theories/Numbers/Natural/BigN/NMake_gen.v
      Integer: Abstract and concrete (especially 31-bits-words-based) integer arithmetic
    theories/Numbers/Integer/Abstract/ZAdd.v theories/Numbers/Integer/Abstract/ZAddOrder.v theories/Numbers/Integer/Abstract/ZAxioms.v theories/Numbers/Integer/Abstract/ZBase.v theories/Numbers/Integer/Abstract/ZLt.v theories/Numbers/Integer/Abstract/ZMul.v theories/Numbers/Integer/Abstract/ZMulOrder.v theories/Numbers/Integer/Abstract/ZSgnAbs.v theories/Numbers/Integer/Abstract/ZMaxMin.v theories/Numbers/Integer/Abstract/ZParity.v theories/Numbers/Integer/Abstract/ZPow.v theories/Numbers/Integer/Abstract/ZGcd.v theories/Numbers/Integer/Abstract/ZLcm.v theories/Numbers/Integer/Abstract/ZBits.v theories/Numbers/Integer/Abstract/ZProperties.v theories/Numbers/Integer/Abstract/ZDivEucl.v theories/Numbers/Integer/Abstract/ZDivFloor.v theories/Numbers/Integer/Abstract/ZDivTrunc.v theories/Numbers/Integer/Binary/ZBinary.v theories/Numbers/Integer/NatPairs/ZNatPairs.v theories/Numbers/Integer/SpecViaZ/ZSig.v theories/Numbers/Integer/SpecViaZ/ZSigZAxioms.v theories/Numbers/Integer/BigZ/BigZ.v theories/Numbers/Integer/BigZ/ZMake.v
      Rational: Abstract and 31-bits-words-based rational arithmetic
    theories/Numbers/Rational/SpecViaQ/QSig.v theories/Numbers/Rational/BigQ/BigQ.v theories/Numbers/Rational/BigQ/QMake.v
    Relations: Relations (definitions and basic results)
    theories/Relations/Relation_Definitions.v theories/Relations/Relation_Operators.v theories/Relations/Relations.v theories/Relations/Operators_Properties.v
    Sets: Sets (classical, constructive, finite, infinite, powerset, etc.)
    theories/Sets/Classical_sets.v theories/Sets/Constructive_sets.v theories/Sets/Cpo.v theories/Sets/Ensembles.v theories/Sets/Finite_sets_facts.v theories/Sets/Finite_sets.v theories/Sets/Image.v theories/Sets/Infinite_sets.v theories/Sets/Integers.v theories/Sets/Multiset.v theories/Sets/Partial_Order.v theories/Sets/Permut.v theories/Sets/Powerset_Classical_facts.v theories/Sets/Powerset_facts.v theories/Sets/Powerset.v theories/Sets/Relations_1_facts.v theories/Sets/Relations_1.v theories/Sets/Relations_2_facts.v theories/Sets/Relations_2.v theories/Sets/Relations_3_facts.v theories/Sets/Relations_3.v theories/Sets/Uniset.v
    Classes:
    theories/Classes/Init.v theories/Classes/RelationClasses.v theories/Classes/Morphisms.v theories/Classes/Morphisms_Prop.v theories/Classes/Morphisms_Relations.v theories/Classes/Equivalence.v theories/Classes/EquivDec.v theories/Classes/SetoidTactics.v theories/Classes/SetoidClass.v theories/Classes/SetoidDec.v theories/Classes/RelationPairs.v
    Setoids:
    theories/Setoids/Setoid.v
    Lists: Polymorphic lists, Streams (infinite sequences)
    theories/Lists/List.v theories/Lists/ListSet.v theories/Lists/SetoidList.v theories/Lists/SetoidPermutation.v theories/Lists/Streams.v theories/Lists/StreamMemo.v theories/Lists/ListTactics.v
    Vectors: Dependent datastructures storing their length
    theories/Vectors/Fin.v theories/Vectors/VectorDef.v theories/Vectors/VectorSpec.v (theories/Vectors/Vector.v)
    Sorting: Axiomatizations of sorts
    theories/Sorting/Heap.v theories/Sorting/Permutation.v theories/Sorting/Sorting.v theories/Sorting/PermutEq.v theories/Sorting/PermutSetoid.v theories/Sorting/Mergesort.v theories/Sorting/Sorted.v
    Wellfounded: Well-founded Relations
    theories/Wellfounded/Disjoint_Union.v theories/Wellfounded/Inclusion.v theories/Wellfounded/Inverse_Image.v theories/Wellfounded/Lexicographic_Exponentiation.v theories/Wellfounded/Lexicographic_Product.v theories/Wellfounded/Transitive_Closure.v theories/Wellfounded/Union.v theories/Wellfounded/Wellfounded.v theories/Wellfounded/Well_Ordering.v
    MSets: Modular implementation of finite sets using lists or efficient trees. This is a modernization of FSets.
    theories/MSets/MSetInterface.v theories/MSets/MSetFacts.v theories/MSets/MSetDecide.v theories/MSets/MSetProperties.v theories/MSets/MSetEqProperties.v theories/MSets/MSetWeakList.v theories/MSets/MSetList.v theories/MSets/MSetGenTree.v theories/MSets/MSetAVL.v theories/MSets/MSetRBT.v theories/MSets/MSetPositive.v theories/MSets/MSetToFiniteSet.v (theories/MSets/MSets.v)
    FSets: Modular implementation of finite sets/maps using lists or efficient trees. For sets, please consider the more modern MSets.
    theories/FSets/FSetInterface.v theories/FSets/FSetBridge.v theories/FSets/FSetFacts.v theories/FSets/FSetDecide.v theories/FSets/FSetProperties.v theories/FSets/FSetEqProperties.v theories/FSets/FSetList.v theories/FSets/FSetWeakList.v theories/FSets/FSetCompat.v theories/FSets/FSetAVL.v theories/FSets/FSetPositive.v (theories/FSets/FSets.v) theories/FSets/FSetToFiniteSet.v theories/FSets/FMapInterface.v theories/FSets/FMapWeakList.v theories/FSets/FMapList.v theories/FSets/FMapPositive.v theories/FSets/FMapFacts.v (theories/FSets/FMaps.v) theories/FSets/FMapAVL.v theories/FSets/FMapFullAVL.v
    Strings Implementation of string as list of ascii characters
    theories/Strings/Ascii.v theories/Strings/String.v
    Reals: Formalization of real numbers
    theories/Reals/Rdefinitions.v theories/Reals/Raxioms.v theories/Reals/RIneq.v theories/Reals/DiscrR.v theories/Reals/ROrderedType.v theories/Reals/Rminmax.v (theories/Reals/Rbase.v) theories/Reals/RList.v theories/Reals/Ranalysis.v theories/Reals/Rbasic_fun.v theories/Reals/Rderiv.v theories/Reals/Rfunctions.v theories/Reals/Rgeom.v theories/Reals/R_Ifp.v theories/Reals/Rlimit.v theories/Reals/Rseries.v theories/Reals/Rsigma.v theories/Reals/R_sqr.v theories/Reals/Rtrigo_fun.v theories/Reals/Rtrigo1.v theories/Reals/Rtrigo.v theories/Reals/Ratan.v theories/Reals/Machin.v theories/Reals/SplitAbsolu.v theories/Reals/SplitRmult.v theories/Reals/Alembert.v theories/Reals/AltSeries.v theories/Reals/ArithProp.v theories/Reals/Binomial.v theories/Reals/Cauchy_prod.v theories/Reals/Cos_plus.v theories/Reals/Cos_rel.v theories/Reals/Exp_prop.v theories/Reals/Integration.v theories/Reals/MVT.v theories/Reals/NewtonInt.v theories/Reals/PSeries_reg.v theories/Reals/PartSum.v theories/Reals/R_sqrt.v theories/Reals/Ranalysis1.v theories/Reals/Ranalysis2.v theories/Reals/Ranalysis3.v theories/Reals/Ranalysis4.v theories/Reals/Ranalysis5.v theories/Reals/Ranalysis_reg.v theories/Reals/Rcomplete.v theories/Reals/RiemannInt.v theories/Reals/RiemannInt_SF.v theories/Reals/Rpow_def.v theories/Reals/Rpower.v theories/Reals/Rprod.v theories/Reals/Rsqrt_def.v theories/Reals/Rtopology.v theories/Reals/Rtrigo_alt.v theories/Reals/Rtrigo_calc.v theories/Reals/Rtrigo_def.v theories/Reals/Rtrigo_reg.v theories/Reals/SeqProp.v theories/Reals/SeqSeries.v theories/Reals/Sqrt_reg.v theories/Reals/Rlogic.v theories/Reals/LegacyRfield.v (theories/Reals/Reals.v)
    Program: Support for dependently-typed programming.
    theories/Program/Basics.v theories/Program/Wf.v theories/Program/Subset.v theories/Program/Equality.v theories/Program/Tactics.v theories/Program/Utils.v theories/Program/Syntax.v theories/Program/Program.v theories/Program/Combinators.v
    Unicode: Unicode-based notations
    theories/Unicode/Utf8_core.v theories/Unicode/Utf8.v
    coq-8.4pl2/doc/LICENSE0000640000175000001440000007724111046342166013417 0ustar notinusersThe Coq Reference Manual is a collective work from the Coq Development Team whose members are listed in the file CREDITS of the Coq source package. All related documents (the LaTeX and BibTeX sources, the embedded png files, and the PostScript, PDF and html outputs) are copyright (c) INRIA 1999-2006. The material connected to the Reference Manual may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub/). Options A and B are *not* elected. The Coq Tutorial is a work by Grard Huet, Gilles Kahn and Christine Paulin-Mohring. All documents (the LaTeX source and the PostScript, PDF and html outputs) are copyright (c) INRIA 1999-2006. The material connected to the Coq Tutorial may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub/). Options A and B are *not* elected. The Coq Standard Library is a collective work from the Coq Development Team whose members are listed in the file CREDITS of the Coq source package. All related documents (the Coq vernacular source files and the PostScript, PDF and html outputs) are copyright (c) INRIA 1999-2006. The material connected to the Standard Library is distributed under the terms of the Lesser General Public License version 2.1 or later. The FAQ (Coq for the Clueless) is a work by Pierre Castran, Hugo Herbelin, Florent Kirchner, Benjamin Monate, and Julien Narboux. All documents (the LaTeX source and the PostScript, PDF and html outputs) are copyright (c) INRIA 2004-2006. The material connected to the FAQ (Coq for the Clueless) may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub/). Options A and B are *not* elected. The Tutorial on [Co-]Inductive Types in Coq is a work by Pierre Castran and Eduardo Gimenez. All related documents (the LaTeX and BibTeX sources and the PostScript, PDF and html outputs) are copyright (c) INRIA 1997-2006. The material connected to the Tutorial on [Co-]Inductive Types in Coq may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub/). Options A and B are *not* elected. ---------------------------------------------------------------------- *Open Publication License* v1.0, 8 June 1999 *I. REQUIREMENTS ON BOTH UNMODIFIED AND MODIFIED VERSIONS* The Open Publication works may be reproduced and distributed in whole or in part, in any medium physical or electronic, provided that the terms of this license are adhered to, and that this license or an incorporation of it by reference (with any options elected by the author(s) and/or publisher) is displayed in the reproduction. Proper form for an incorporation by reference is as follows: Copyright (c) by . This material may be distributed only subject to the terms and conditions set forth in the Open Publication License, vX.Y or later (the latest version is presently available at http://www.opencontent.org/openpub/). The reference must be immediately followed with any options elected by the author(s) and/or publisher of the document (see section VI). Commercial redistribution of Open Publication-licensed material is permitted. Any publication in standard (paper) book form shall require the citation of the original publisher and author. The publisher and author's names shall appear on all outer surfaces of the book. On all outer surfaces of the book the original publisher's name shall be as large as the title of the work and cited as possessive with respect to the title. *II. COPYRIGHT* The copyright to each Open Publication is owned by its author(s) or designee. *III. SCOPE OF LICENSE* The following license terms apply to all Open Publication works, unless otherwise explicitly stated in the document. Mere aggregation of Open Publication works or a portion of an Open Publication work with other works or programs on the same media shall not cause this license to apply to those other works. The aggregate work shall contain a notice specifying the inclusion of the Open Publication material and appropriate copyright notice. SEVERABILITY. If any part of this license is found to be unenforceable in any jurisdiction, the remaining portions of the license remain in force. NO WARRANTY. Open Publication works are licensed and provided "as is" without warranty of any kind, express or implied, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose or a warranty of non-infringement. *IV. REQUIREMENTS ON MODIFIED WORKS* All modified versions of documents covered by this license, including translations, anthologies, compilations and partial documents, must meet the following requirements: 1. The modified version must be labeled as such. 2. The person making the modifications must be identified and the modifications dated. 3. Acknowledgement of the original author and publisher if applicable must be retained according to normal academic citation practices. 4. The location of the original unmodified document must be identified. 5. The original author's (or authors') name(s) may not be used to assert or imply endorsement of the resulting document without the original author's (or authors') permission. *V. GOOD-PRACTICE RECOMMENDATIONS * In addition to the requirements of this license, it is requested from and strongly recommended of redistributors that: 1. If you are distributing Open Publication works on hardcopy or CD-ROM, you provide email notification to the authors of your intent to redistribute at least thirty days before your manuscript or media freeze, to give the authors time to provide updated documents. This notification should describe modifications, if any, made to the document. 2. All substantive modifications (including deletions) be either clearly marked up in the document or else described in an attachment to the document. 3. Finally, while it is not mandatory under this license, it is considered good form to offer a free copy of any hardcopy and CD-ROM expression of an Open Publication-licensed work to its author(s). *VI. LICENSE OPTIONS* The author(s) and/or publisher of an Open Publication-licensed document may elect certain options by appending language to the reference to or copy of the license. These options are considered part of the license instance and must be included with the license (or its incorporation by reference) in derived works. A. To prohibit distribution of substantively modified versions without the explicit permission of the author(s). "Substantive modification" is defined as a change to the semantic content of the document, and excludes mere changes in format or typographical corrections. To accomplish this, add the phrase `Distribution of substantively modified versions of this document is prohibited without the explicit permission of the copyright holder.' to the license reference or copy. B. To prohibit any publication of this work or derivative works in whole or in part in standard (paper) book form for commercial purposes is prohibited unless prior permission is obtained from the copyright holder. To accomplish this, add the phrase 'Distribution of the work or derivative of the work in any standard (paper) book form is prohibited unless prior permission is obtained from the copyright holder.' to the license reference or copy. ---------------------------------------------------------------------- GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS coq-8.4pl2/doc/tutorial/0000750000175000001440000000000012127276532014245 5ustar notinuserscoq-8.4pl2/doc/tutorial/Tutorial.tex0000750000175000001440000016031711366307247016607 0ustar notinusers\documentclass[11pt,a4paper]{book} \usepackage[T1]{fontenc} \usepackage[latin1]{inputenc} \usepackage{pslatex} \input{../common/version.tex} \input{../common/macros.tex} \input{../common/title.tex} %\makeindex \begin{document} \coverpage{A Tutorial}{Grard Huet, Gilles Kahn and Christine Paulin-Mohring}{} %\tableofcontents \chapter*{Getting started} \Coq\ is a Proof Assistant for a Logical Framework known as the Calculus of Inductive Constructions. It allows the interactive construction of formal proofs, and also the manipulation of functional programs consistently with their specifications. It runs as a computer program on many architectures. %, and mainly on Unix machines. It is available with a variety of user interfaces. The present document does not attempt to present a comprehensive view of all the possibilities of \Coq, but rather to present in the most elementary manner a tutorial on the basic specification language, called Gallina, in which formal axiomatisations may be developed, and on the main proof tools. For more advanced information, the reader could refer to the \Coq{} Reference Manual or the \textit{Coq'Art}, a new book by Y. Bertot and P. Castran on practical uses of the \Coq{} system. Coq can be used from a standard teletype-like shell window but preferably through the graphical user interface CoqIde\footnote{Alternative graphical interfaces exist: Proof General and Pcoq.}. Instructions on installation procedures, as well as more comprehensive documentation, may be found in the standard distribution of \Coq, which may be obtained from \Coq{} web site \texttt{http://coq.inria.fr}. In the following, we assume that \Coq~ is called from a standard teletype-like shell window. All examples preceded by the prompting sequence \verb:Coq < : represent user input, terminated by a period. The following lines usually show \Coq's answer as it appears on the users screen. When used from a graphical user interface such as CoqIde, the prompt is not displayed: user input is given in one window and \Coq's answers are displayed in a different window. The sequence of such examples is a valid \Coq~ session, unless otherwise specified. This version of the tutorial has been prepared on a PC workstation running Linux. The standard invocation of \Coq\ delivers a message such as: \begin{small} \begin{flushleft} \begin{verbatim} unix:~> coqtop Welcome to Coq 8.2 (January 2009) Coq < \end{verbatim} \end{flushleft} \end{small} The first line gives a banner stating the precise version of \Coq~ used. You should always return this banner when you report an anomaly to our bug-tracking system \verb|http://logical.futurs.inria.fr/coq-bugs| \chapter{Basic Predicate Calculus} \section{An overview of the specification language Gallina} A formal development in Gallina consists in a sequence of {\sl declarations} and {\sl definitions}. You may also send \Coq~ {\sl commands} which are not really part of the formal development, but correspond to information requests, or service routine invocations. For instance, the command: \begin{verbatim} Coq < Quit. \end{verbatim} terminates the current session. \subsection{Declarations} A declaration associates a {\sl name} with a {\sl specification}. A name corresponds roughly to an identifier in a programming language, i.e. to a string of letters, digits, and a few ASCII symbols like underscore (\verb"_") and prime (\verb"'"), starting with a letter. We use case distinction, so that the names \verb"A" and \verb"a" are distinct. Certain strings are reserved as key-words of \Coq, and thus are forbidden as user identifiers. A specification is a formal expression which classifies the notion which is being declared. There are basically three kinds of specifications: {\sl logical propositions}, {\sl mathematical collections}, and {\sl abstract types}. They are classified by the three basic sorts of the system, called respectively \verb:Prop:, \verb:Set:, and \verb:Type:, which are themselves atomic abstract types. Every valid expression $e$ in Gallina is associated with a specification, itself a valid expression, called its {\sl type} $\tau(E)$. We write $e:\tau(E)$ for the judgment that $e$ is of type $E$. You may request \Coq~ to return to you the type of a valid expression by using the command \verb:Check:: \begin{coq_eval} Set Printing Width 60. \end{coq_eval} \begin{coq_example} Check O. \end{coq_example} Thus we know that the identifier \verb:O: (the name `O', not to be confused with the numeral `0' which is not a proper identifier!) is known in the current context, and that its type is the specification \verb:nat:. This specification is itself classified as a mathematical collection, as we may readily check: \begin{coq_example} Check nat. \end{coq_example} The specification \verb:Set: is an abstract type, one of the basic sorts of the Gallina language, whereas the notions $nat$ and $O$ are notions which are defined in the arithmetic prelude, automatically loaded when running the \Coq\ system. We start by introducing a so-called section name. The role of sections is to structure the modelisation by limiting the scope of parameters, hypotheses and definitions. It will also give a convenient way to reset part of the development. \begin{coq_example} Section Declaration. \end{coq_example} With what we already know, we may now enter in the system a declaration, corresponding to the informal mathematics {\sl let n be a natural number}. \begin{coq_example} Variable n : nat. \end{coq_example} If we want to translate a more precise statement, such as {\sl let n be a positive natural number}, we have to add another declaration, which will declare explicitly the hypothesis \verb:Pos_n:, with specification the proper logical proposition: \begin{coq_example} Hypothesis Pos_n : (gt n 0). \end{coq_example} Indeed we may check that the relation \verb:gt: is known with the right type in the current context: \begin{coq_example} Check gt. \end{coq_example} which tells us that \verb:gt: is a function expecting two arguments of type \verb:nat: in order to build a logical proposition. What happens here is similar to what we are used to in a functional programming language: we may compose the (specification) type \verb:nat: with the (abstract) type \verb:Prop: of logical propositions through the arrow function constructor, in order to get a functional type \verb:nat->Prop:: \begin{coq_example} Check (nat -> Prop). \end{coq_example} which may be composed one more times with \verb:nat: in order to obtain the type \verb:nat->nat->Prop: of binary relations over natural numbers. Actually the type \verb:nat->nat->Prop: is an abbreviation for \verb:nat->(nat->Prop):. Functional notions may be composed in the usual way. An expression $f$ of type $A\ra B$ may be applied to an expression $e$ of type $A$ in order to form the expression $(f~e)$ of type $B$. Here we get that the expression \verb:(gt n): is well-formed of type \verb:nat->Prop:, and thus that the expression \verb:(gt n O):, which abbreviates \verb:((gt n) O):, is a well-formed proposition. \begin{coq_example} Check gt n O. \end{coq_example} \subsection{Definitions} The initial prelude contains a few arithmetic definitions: \verb:nat: is defined as a mathematical collection (type \verb:Set:), constants \verb:O:, \verb:S:, \verb:plus:, are defined as objects of types respectively \verb:nat:, \verb:nat->nat:, and \verb:nat->nat->nat:. You may introduce new definitions, which link a name to a well-typed value. For instance, we may introduce the constant \verb:one: as being defined to be equal to the successor of zero: \begin{coq_example} Definition one := (S O). \end{coq_example} We may optionally indicate the required type: \begin{coq_example} Definition two : nat := S one. \end{coq_example} Actually \Coq~ allows several possible syntaxes: \begin{coq_example} Definition three : nat := S two. \end{coq_example} Here is a way to define the doubling function, which expects an argument \verb:m: of type \verb:nat: in order to build its result as \verb:(plus m m):: \begin{coq_example} Definition double (m:nat) := plus m m. \end{coq_example} This introduces the constant \texttt{double} defined as the expression \texttt{fun m:nat => plus m m}. The abstraction introduced by \texttt{fun} is explained as follows. The expression \verb+fun x:A => e+ is well formed of type \verb+A->B+ in a context whenever the expression \verb+e+ is well-formed of type \verb+B+ in the given context to which we add the declaration that \verb+x+ is of type \verb+A+. Here \verb+x+ is a bound, or dummy variable in the expression \verb+fun x:A => e+. For instance we could as well have defined \verb:double: as \verb+fun n:nat => (plus n n)+. Bound (local) variables and free (global) variables may be mixed. For instance, we may define the function which adds the constant \verb:n: to its argument as \begin{coq_example} Definition add_n (m:nat) := plus m n. \end{coq_example} However, note that here we may not rename the formal argument $m$ into $n$ without capturing the free occurrence of $n$, and thus changing the meaning of the defined notion. Binding operations are well known for instance in logic, where they are called quantifiers. Thus we may universally quantify a proposition such as $m>0$ in order to get a universal proposition $\forall m\cdot m>0$. Indeed this operator is available in \Coq, with the following syntax: \verb+forall m:nat, gt m O+. Similarly to the case of the functional abstraction binding, we are obliged to declare explicitly the type of the quantified variable. We check: \begin{coq_example} Check (forall m:nat, gt m 0). \end{coq_example} We may clean-up the development by removing the contents of the current section: \begin{coq_example} Reset Declaration. \end{coq_example} \section{Introduction to the proof engine: Minimal Logic} In the following, we are going to consider various propositions, built from atomic propositions $A, B, C$. This may be done easily, by introducing these atoms as global variables declared of type \verb:Prop:. It is easy to declare several names with the same specification: \begin{coq_example} Section Minimal_Logic. Variables A B C : Prop. \end{coq_example} We shall consider simple implications, such as $A\ra B$, read as ``$A$ implies $B$''. Remark that we overload the arrow symbol, which has been used above as the functionality type constructor, and which may be used as well as propositional connective: \begin{coq_example} Check (A -> B). \end{coq_example} Let us now embark on a simple proof. We want to prove the easy tautology $((A\ra (B\ra C))\ra (A\ra B)\ra (A\ra C)$. We enter the proof engine by the command \verb:Goal:, followed by the conjecture we want to verify: \begin{coq_example} Goal (A -> B -> C) -> (A -> B) -> A -> C. \end{coq_example} The system displays the current goal below a double line, local hypotheses (there are none initially) being displayed above the line. We call the combination of local hypotheses with a goal a {\sl judgment}. We are now in an inner loop of the system, in proof mode. New commands are available in this mode, such as {\sl tactics}, which are proof combining primitives. A tactic operates on the current goal by attempting to construct a proof of the corresponding judgment, possibly from proofs of some hypothetical judgments, which are then added to the current list of conjectured judgments. For instance, the \verb:intro: tactic is applicable to any judgment whose goal is an implication, by moving the proposition to the left of the application to the list of local hypotheses: \begin{coq_example} intro H. \end{coq_example} Several introductions may be done in one step: \begin{coq_example} intros H' HA. \end{coq_example} We notice that $C$, the current goal, may be obtained from hypothesis \verb:H:, provided the truth of $A$ and $B$ are established. The tactic \verb:apply: implements this piece of reasoning: \begin{coq_example} apply H. \end{coq_example} We are now in the situation where we have two judgments as conjectures that remain to be proved. Only the first is listed in full, for the others the system displays only the corresponding subgoal, without its local hypotheses list. Remark that \verb:apply: has kept the local hypotheses of its father judgment, which are still available for the judgments it generated. In order to solve the current goal, we just have to notice that it is exactly available as hypothesis $HA$: \begin{coq_example} exact HA. \end{coq_example} Now $H'$ applies: \begin{coq_example} apply H'. \end{coq_example} And we may now conclude the proof as before, with \verb:exact HA.: Actually, we may not bother with the name \verb:HA:, and just state that the current goal is solvable from the current local assumptions: \begin{coq_example} assumption. \end{coq_example} The proof is now finished. We may either discard it, by using the command \verb:Abort: which returns to the standard \Coq~ toplevel loop without further ado, or else save it as a lemma in the current context, under name say \verb:trivial_lemma:: \begin{coq_example} Save trivial_lemma. \end{coq_example} As a comment, the system shows the proof script listing all tactic commands used in the proof. Let us redo the same proof with a few variations. First of all we may name the initial goal as a conjectured lemma: \begin{coq_example} Lemma distr_impl : (A -> B -> C) -> (A -> B) -> A -> C. \end{coq_example} Next, we may omit the names of local assumptions created by the introduction tactics, they can be automatically created by the proof engine as new non-clashing names. \begin{coq_example} intros. \end{coq_example} The \verb:intros: tactic, with no arguments, effects as many individual applications of \verb:intro: as is legal. Then, we may compose several tactics together in sequence, or in parallel, through {\sl tacticals}, that is tactic combinators. The main constructions are the following: \begin{itemize} \item $T_1 ; T_2$ (read $T_1$ then $T_2$) applies tactic $T_1$ to the current goal, and then tactic $T_2$ to all the subgoals generated by $T_1$. \item $T; [T_1 | T_2 | ... | T_n]$ applies tactic $T$ to the current goal, and then tactic $T_1$ to the first newly generated subgoal, ..., $T_n$ to the nth. \end{itemize} We may thus complete the proof of \verb:distr_impl: with one composite tactic: \begin{coq_example} apply H; [ assumption | apply H0; assumption ]. \end{coq_example} Let us now save lemma \verb:distr_impl:: \begin{coq_example} Save. \end{coq_example} Here \verb:Save: needs no argument, since we gave the name \verb:distr_impl: in advance; it is however possible to override the given name by giving a different argument to command \verb:Save:. Actually, such an easy combination of tactics \verb:intro:, \verb:apply: and \verb:assumption: may be found completely automatically by an automatic tactic, called \verb:auto:, without user guidance: \begin{coq_example} Lemma distr_imp : (A -> B -> C) -> (A -> B) -> A -> C. auto. \end{coq_example} This time, we do not save the proof, we just discard it with the \verb:Abort: command: \begin{coq_example} Abort. \end{coq_example} At any point during a proof, we may use \verb:Abort: to exit the proof mode and go back to Coq's main loop. We may also use \verb:Restart: to restart from scratch the proof of the same lemma. We may also use \verb:Undo: to backtrack one step, and more generally \verb:Undo n: to backtrack n steps. We end this section by showing a useful command, \verb:Inspect n.:, which inspects the global \Coq~ environment, showing the last \verb:n: declared notions: \begin{coq_example} Inspect 3. \end{coq_example} The declarations, whether global parameters or axioms, are shown preceded by \verb:***:; definitions and lemmas are stated with their specification, but their value (or proof-term) is omitted. \section{Propositional Calculus} \subsection{Conjunction} We have seen how \verb:intro: and \verb:apply: tactics could be combined in order to prove implicational statements. More generally, \Coq~ favors a style of reasoning, called {\sl Natural Deduction}, which decomposes reasoning into so called {\sl introduction rules}, which tell how to prove a goal whose main operator is a given propositional connective, and {\sl elimination rules}, which tell how to use an hypothesis whose main operator is the propositional connective. Let us show how to use these ideas for the propositional connectives \verb:/\: and \verb:\/:. \begin{coq_example} Lemma and_commutative : A /\ B -> B /\ A. intro. \end{coq_example} We make use of the conjunctive hypothesis \verb:H: with the \verb:elim: tactic, which breaks it into its components: \begin{coq_example} elim H. \end{coq_example} We now use the conjunction introduction tactic \verb:split:, which splits the conjunctive goal into the two subgoals: \begin{coq_example} split. \end{coq_example} and the proof is now trivial. Indeed, the whole proof is obtainable as follows: \begin{coq_example} Restart. intro H; elim H; auto. Qed. \end{coq_example} The tactic \verb:auto: succeeded here because it knows as a hint the conjunction introduction operator \verb+conj+ \begin{coq_example} Check conj. \end{coq_example} Actually, the tactic \verb+Split+ is just an abbreviation for \verb+apply conj.+ What we have just seen is that the \verb:auto: tactic is more powerful than just a simple application of local hypotheses; it tries to apply as well lemmas which have been specified as hints. A \verb:Hint Resolve: command registers a lemma as a hint to be used from now on by the \verb:auto: tactic, whose power may thus be incrementally augmented. \subsection{Disjunction} In a similar fashion, let us consider disjunction: \begin{coq_example} Lemma or_commutative : A \/ B -> B \/ A. intro H; elim H. \end{coq_example} Let us prove the first subgoal in detail. We use \verb:intro: in order to be left to prove \verb:B\/A: from \verb:A:: \begin{coq_example} intro HA. \end{coq_example} Here the hypothesis \verb:H: is not needed anymore. We could choose to actually erase it with the tactic \verb:clear:; in this simple proof it does not really matter, but in bigger proof developments it is useful to clear away unnecessary hypotheses which may clutter your screen. \begin{coq_example} clear H. \end{coq_example} The disjunction connective has two introduction rules, since \verb:P\/Q: may be obtained from \verb:P: or from \verb:Q:; the two corresponding proof constructors are called respectively \verb:or_introl: and \verb:or_intror:; they are applied to the current goal by tactics \verb:left: and \verb:right: respectively. For instance: \begin{coq_example} right. trivial. \end{coq_example} The tactic \verb:trivial: works like \verb:auto: with the hints database, but it only tries those tactics that can solve the goal in one step. As before, all these tedious elementary steps may be performed automatically, as shown for the second symmetric case: \begin{coq_example} auto. \end{coq_example} However, \verb:auto: alone does not succeed in proving the full lemma, because it does not try any elimination step. It is a bit disappointing that \verb:auto: is not able to prove automatically such a simple tautology. The reason is that we want to keep \verb:auto: efficient, so that it is always effective to use. \subsection{Tauto} A complete tactic for propositional tautologies is indeed available in \Coq~ as the \verb:tauto: tactic. \begin{coq_example} Restart. tauto. Qed. \end{coq_example} It is possible to inspect the actual proof tree constructed by \verb:tauto:, using a standard command of the system, which prints the value of any notion currently defined in the context: \begin{coq_example} Print or_commutative. \end{coq_example} It is not easy to understand the notation for proof terms without a few explanations. The \texttt{fun} prefix, such as \verb+fun H:A\/B =>+, corresponds to \verb:intro H:, whereas a subterm such as \verb:(or_intror: \verb:B H0): corresponds to the sequence of tactics \verb:apply or_intror; exact H0:. The generic combinator \verb:or_intror: needs to be instantiated by the two properties \verb:B: and \verb:A:. Because \verb:A: can be deduced from the type of \verb:H0:, only \verb:B: is printed. The two instantiations are effected automatically by the tactic \verb:apply: when pattern-matching a goal. The specialist will of course recognize our proof term as a $\lambda$-term, used as notation for the natural deduction proof term through the Curry-Howard isomorphism. The naive user of \Coq~ may safely ignore these formal details. Let us exercise the \verb:tauto: tactic on a more complex example: \begin{coq_example} Lemma distr_and : A -> B /\ C -> (A -> B) /\ (A -> C). tauto. Qed. \end{coq_example} \subsection{Classical reasoning} The tactic \verb:tauto: always comes back with an answer. Here is an example where it fails: \begin{coq_example} Lemma Peirce : ((A -> B) -> A) -> A. try tauto. \end{coq_example} Note the use of the \verb:Try: tactical, which does nothing if its tactic argument fails. This may come as a surprise to someone familiar with classical reasoning. Peirce's lemma is true in Boolean logic, i.e. it evaluates to \verb:true: for every truth-assignment to \verb:A: and \verb:B:. Indeed the double negation of Peirce's law may be proved in \Coq~ using \verb:tauto:: \begin{coq_example} Abort. Lemma NNPeirce : ~ ~ (((A -> B) -> A) -> A). tauto. Qed. \end{coq_example} In classical logic, the double negation of a proposition is equivalent to this proposition, but in the constructive logic of \Coq~ this is not so. If you want to use classical logic in \Coq, you have to import explicitly the \verb:Classical: module, which will declare the axiom \verb:classic: of excluded middle, and classical tautologies such as de Morgan's laws. The \verb:Require: command is used to import a module from \Coq's library: \begin{coq_example} Require Import Classical. Check NNPP. \end{coq_example} and it is now easy (although admittedly not the most direct way) to prove a classical law such as Peirce's: \begin{coq_example} Lemma Peirce : ((A -> B) -> A) -> A. apply NNPP; tauto. Qed. \end{coq_example} Here is one more example of propositional reasoning, in the shape of a Scottish puzzle. A private club has the following rules: \begin{enumerate} \item Every non-scottish member wears red socks \item Every member wears a kilt or doesn't wear red socks \item The married members don't go out on Sunday \item A member goes out on Sunday if and only if he is Scottish \item Every member who wears a kilt is Scottish and married \item Every scottish member wears a kilt \end{enumerate} Now, we show that these rules are so strict that no one can be accepted. \begin{coq_example} Section club. Variables Scottish RedSocks WearKilt Married GoOutSunday : Prop. Hypothesis rule1 : ~ Scottish -> RedSocks. Hypothesis rule2 : WearKilt \/ ~ RedSocks. Hypothesis rule3 : Married -> ~ GoOutSunday. Hypothesis rule4 : GoOutSunday <-> Scottish. Hypothesis rule5 : WearKilt -> Scottish /\ Married. Hypothesis rule6 : Scottish -> WearKilt. Lemma NoMember : False. tauto. Qed. \end{coq_example} At that point \verb:NoMember: is a proof of the absurdity depending on hypotheses. We may end the section, in that case, the variables and hypotheses will be discharged, and the type of \verb:NoMember: will be generalised. \begin{coq_example} End club. Check NoMember. \end{coq_example} \section{Predicate Calculus} Let us now move into predicate logic, and first of all into first-order predicate calculus. The essence of predicate calculus is that to try to prove theorems in the most abstract possible way, without using the definitions of the mathematical notions, but by formal manipulations of uninterpreted function and predicate symbols. \subsection{Sections and signatures} Usually one works in some domain of discourse, over which range the individual variables and function symbols. In \Coq~ we speak in a language with a rich variety of types, so me may mix several domains of discourse, in our multi-sorted language. For the moment, we just do a few exercises, over a domain of discourse \verb:D: axiomatised as a \verb:Set:, and we consider two predicate symbols \verb:P: and \verb:R: over \verb:D:, of arities respectively 1 and 2. Such abstract entities may be entered in the context as global variables. But we must be careful about the pollution of our global environment by such declarations. For instance, we have already polluted our \Coq~ session by declaring the variables \verb:n:, \verb:Pos_n:, \verb:A:, \verb:B:, and \verb:C:. If we want to revert to the clean state of our initial session, we may use the \Coq~ \verb:Reset: command, which returns to the state just prior the given global notion as we did before to remove a section, or we may return to the initial state using~: \begin{coq_example} Reset Initial. \end{coq_example} \begin{coq_eval} Set Printing Width 60. \end{coq_eval} We shall now declare a new \verb:Section:, which will allow us to define notions local to a well-delimited scope. We start by assuming a domain of discourse \verb:D:, and a binary relation \verb:R: over \verb:D:: \begin{coq_example} Section Predicate_calculus. Variable D : Set. Variable R : D -> D -> Prop. \end{coq_example} As a simple example of predicate calculus reasoning, let us assume that relation \verb:R: is symmetric and transitive, and let us show that \verb:R: is reflexive in any point \verb:x: which has an \verb:R: successor. Since we do not want to make the assumptions about \verb:R: global axioms of a theory, but rather local hypotheses to a theorem, we open a specific section to this effect. \begin{coq_example} Section R_sym_trans. Hypothesis R_symmetric : forall x y:D, R x y -> R y x. Hypothesis R_transitive : forall x y z:D, R x y -> R y z -> R x z. \end{coq_example} Remark the syntax \verb+forall x:D,+ which stands for universal quantification $\forall x : D$. \subsection{Existential quantification} We now state our lemma, and enter proof mode. \begin{coq_example} Lemma refl_if : forall x:D, (exists y, R x y) -> R x x. \end{coq_example} Remark that the hypotheses which are local to the currently opened sections are listed as local hypotheses to the current goals. The rationale is that these hypotheses are going to be discharged, as we shall see, when we shall close the corresponding sections. Note the functional syntax for existential quantification. The existential quantifier is built from the operator \verb:ex:, which expects a predicate as argument: \begin{coq_example} Check ex. \end{coq_example} and the notation \verb+(exists x:D, P x)+ is just concrete syntax for the expression \verb+(ex D (fun x:D => P x))+. Existential quantification is handled in \Coq~ in a similar fashion to the connectives \verb:/\: and \verb:\/: : it is introduced by the proof combinator \verb:ex_intro:, which is invoked by the specific tactic \verb:Exists:, and its elimination provides a witness \verb+a:D+ to \verb:P:, together with an assumption \verb+h:(P a)+ that indeed \verb+a+ verifies \verb:P:. Let us see how this works on this simple example. \begin{coq_example} intros x x_Rlinked. \end{coq_example} Remark that \verb:intros: treats universal quantification in the same way as the premises of implications. Renaming of bound variables occurs when it is needed; for instance, had we started with \verb:intro y:, we would have obtained the goal: \begin{coq_eval} Undo. \end{coq_eval} \begin{coq_example} intro y. \end{coq_example} \begin{coq_eval} Undo. intros x x_Rlinked. \end{coq_eval} Let us now use the existential hypothesis \verb:x_Rlinked: to exhibit an R-successor y of x. This is done in two steps, first with \verb:elim:, then with \verb:intros: \begin{coq_example} elim x_Rlinked. intros y Rxy. \end{coq_example} Now we want to use \verb:R_transitive:. The \verb:apply: tactic will know how to match \verb:x: with \verb:x:, and \verb:z: with \verb:x:, but needs help on how to instantiate \verb:y:, which appear in the hypotheses of \verb:R_transitive:, but not in its conclusion. We give the proper hint to \verb:apply: in a \verb:with: clause, as follows: \begin{coq_example} apply R_transitive with y. \end{coq_example} The rest of the proof is routine: \begin{coq_example} assumption. apply R_symmetric; assumption. \end{coq_example} \begin{coq_example*} Qed. \end{coq_example*} Let us now close the current section. \begin{coq_example} End R_sym_trans. \end{coq_example} Here \Coq's printout is a warning that all local hypotheses have been discharged in the statement of \verb:refl_if:, which now becomes a general theorem in the first-order language declared in section \verb:Predicate_calculus:. In this particular example, the use of section \verb:R_sym_trans: has not been really significant, since we could have instead stated theorem \verb:refl_if: in its general form, and done basically the same proof, obtaining \verb:R_symmetric: and \verb:R_transitive: as local hypotheses by initial \verb:intros: rather than as global hypotheses in the context. But if we had pursued the theory by proving more theorems about relation \verb:R:, we would have obtained all general statements at the closing of the section, with minimal dependencies on the hypotheses of symmetry and transitivity. \subsection{Paradoxes of classical predicate calculus} Let us illustrate this feature by pursuing our \verb:Predicate_calculus: section with an enrichment of our language: we declare a unary predicate \verb:P: and a constant \verb:d:: \begin{coq_example} Variable P : D -> Prop. Variable d : D. \end{coq_example} We shall now prove a well-known fact from first-order logic: a universal predicate is non-empty, or in other terms existential quantification follows from universal quantification. \begin{coq_example} Lemma weird : (forall x:D, P x) -> exists a, P a. intro UnivP. \end{coq_example} First of all, notice the pair of parentheses around \verb+forall x:D, P x+ in the statement of lemma \verb:weird:. If we had omitted them, \Coq's parser would have interpreted the statement as a truly trivial fact, since we would postulate an \verb:x: verifying \verb:(P x):. Here the situation is indeed more problematic. If we have some element in \verb:Set: \verb:D:, we may apply \verb:UnivP: to it and conclude, otherwise we are stuck. Indeed such an element \verb:d: exists, but this is just by virtue of our new signature. This points out a subtle difference between standard predicate calculus and \Coq. In standard first-order logic, the equivalent of lemma \verb:weird: always holds, because such a rule is wired in the inference rules for quantifiers, the semantic justification being that the interpretation domain is assumed to be non-empty. Whereas in \Coq, where types are not assumed to be systematically inhabited, lemma \verb:weird: only holds in signatures which allow the explicit construction of an element in the domain of the predicate. Let us conclude the proof, in order to show the use of the \verb:Exists: tactic: \begin{coq_example} exists d; trivial. Qed. \end{coq_example} Another fact which illustrates the sometimes disconcerting rules of classical predicate calculus is Smullyan's drinkers' paradox: ``In any non-empty bar, there is a person such that if she drinks, then everyone drinks''. We modelize the bar by Set \verb:D:, drinking by predicate \verb:P:. We shall need classical reasoning. Instead of loading the \verb:Classical: module as we did above, we just state the law of excluded middle as a local hypothesis schema at this point: \begin{coq_example} Hypothesis EM : forall A:Prop, A \/ ~ A. Lemma drinker : exists x:D, P x -> forall x:D, P x. \end{coq_example} The proof goes by cases on whether or not there is someone who does not drink. Such reasoning by cases proceeds by invoking the excluded middle principle, via \verb:elim: of the proper instance of \verb:EM:: \begin{coq_example} elim (EM (exists x, ~ P x)). \end{coq_example} We first look at the first case. Let Tom be the non-drinker: \begin{coq_example} intro Non_drinker; elim Non_drinker; intros Tom Tom_does_not_drink. \end{coq_example} We conclude in that case by considering Tom, since his drinking leads to a contradiction: \begin{coq_example} exists Tom; intro Tom_drinks. \end{coq_example} There are several ways in which we may eliminate a contradictory case; a simple one is to use the \verb:absurd: tactic as follows: \begin{coq_example} absurd (P Tom); trivial. \end{coq_example} We now proceed with the second case, in which actually any person will do; such a John Doe is given by the non-emptiness witness \verb:d:: \begin{coq_example} intro No_nondrinker; exists d; intro d_drinks. \end{coq_example} Now we consider any Dick in the bar, and reason by cases according to its drinking or not: \begin{coq_example} intro Dick; elim (EM (P Dick)); trivial. \end{coq_example} The only non-trivial case is again treated by contradiction: \begin{coq_example} intro Dick_does_not_drink; absurd (exists x, ~ P x); trivial. exists Dick; trivial. Qed. \end{coq_example} Now, let us close the main section and look at the complete statements we proved: \begin{coq_example} End Predicate_calculus. Check refl_if. Check weird. Check drinker. \end{coq_example} Remark how the three theorems are completely generic in the most general fashion; the domain \verb:D: is discharged in all of them, \verb:R: is discharged in \verb:refl_if: only, \verb:P: is discharged only in \verb:weird: and \verb:drinker:, along with the hypothesis that \verb:D: is inhabited. Finally, the excluded middle hypothesis is discharged only in \verb:drinker:. Note that the name \verb:d: has vanished as well from the statements of \verb:weird: and \verb:drinker:, since \Coq's pretty-printer replaces systematically a quantification such as \verb+forall d:D, E+, where \verb:d: does not occur in \verb:E:, by the functional notation \verb:D->E:. Similarly the name \verb:EM: does not appear in \verb:drinker:. Actually, universal quantification, implication, as well as function formation, are all special cases of one general construct of type theory called {\sl dependent product}. This is the mathematical construction corresponding to an indexed family of functions. A function $f\in \Pi x:D\cdot Cx$ maps an element $x$ of its domain $D$ to its (indexed) codomain $Cx$. Thus a proof of $\forall x:D\cdot Px$ is a function mapping an element $x$ of $D$ to a proof of proposition $Px$. \subsection{Flexible use of local assumptions} Very often during the course of a proof we want to retrieve a local assumption and reintroduce it explicitly in the goal, for instance in order to get a more general induction hypothesis. The tactic \verb:generalize: is what is needed here: \begin{coq_example} Section Predicate_Calculus. Variables P Q : nat -> Prop. Variable R : nat -> nat -> Prop. Lemma PQR : forall x y:nat, (R x x -> P x -> Q x) -> P x -> R x y -> Q x. intros. generalize H0. \end{coq_example} Sometimes it may be convenient to use a lemma, although we do not have a direct way to appeal to such an already proven fact. The tactic \verb:cut: permits to use the lemma at this point, keeping the corresponding proof obligation as a new subgoal: \begin{coq_example} cut (R x x); trivial. \end{coq_example} We clean the goal by doing an \verb:Abort: command. \begin{coq_example*} Abort. \end{coq_example*} \subsection{Equality} The basic equality provided in \Coq~ is Leibniz equality, noted infix like \verb+x=y+, when \verb:x: and \verb:y: are two expressions of type the same Set. The replacement of \verb:x: by \verb:y: in any term is effected by a variety of tactics, such as \verb:rewrite: and \verb:replace:. Let us give a few examples of equality replacement. Let us assume that some arithmetic function \verb:f: is null in zero: \begin{coq_example} Variable f : nat -> nat. Hypothesis foo : f 0 = 0. \end{coq_example} We want to prove the following conditional equality: \begin{coq_example*} Lemma L1 : forall k:nat, k = 0 -> f k = k. \end{coq_example*} As usual, we first get rid of local assumptions with \verb:intro:: \begin{coq_example} intros k E. \end{coq_example} Let us now use equation \verb:E: as a left-to-right rewriting: \begin{coq_example} rewrite E. \end{coq_example} This replaced both occurrences of \verb:k: by \verb:O:. Now \verb:apply foo: will finish the proof: \begin{coq_example} apply foo. Qed. \end{coq_example} When one wants to rewrite an equality in a right to left fashion, we should use \verb:rewrite <- E: rather than \verb:rewrite E: or the equivalent \verb:rewrite -> E:. Let us now illustrate the tactic \verb:replace:. \begin{coq_example} Hypothesis f10 : f 1 = f 0. Lemma L2 : f (f 1) = 0. replace (f 1) with 0. \end{coq_example} What happened here is that the replacement left the first subgoal to be proved, but another proof obligation was generated by the \verb:replace: tactic, as the second subgoal. The first subgoal is solved immediately by applying lemma \verb:foo:; the second one transitivity and then symmetry of equality, for instance with tactics \verb:transitivity: and \verb:symmetry:: \begin{coq_example} apply foo. transitivity (f 0); symmetry; trivial. \end{coq_example} In case the equality $t=u$ generated by \verb:replace: $u$ \verb:with: $t$ is an assumption (possibly modulo symmetry), it will be automatically proved and the corresponding goal will not appear. For instance: \begin{coq_example} Restart. replace (f 0) with 0. rewrite f10; rewrite foo; trivial. Qed. \end{coq_example} \section{Using definitions} The development of mathematics does not simply proceed by logical argumentation from first principles: definitions are used in an essential way. A formal development proceeds by a dual process of abstraction, where one proves abstract statements in predicate calculus, and use of definitions, which in the contrary one instantiates general statements with particular notions in order to use the structure of mathematical values for the proof of more specialised properties. \subsection{Unfolding definitions} Assume that we want to develop the theory of sets represented as characteristic predicates over some universe \verb:U:. For instance: \begin{coq_example} Variable U : Type. Definition set := U -> Prop. Definition element (x:U) (S:set) := S x. Definition subset (A B:set) := forall x:U, element x A -> element x B. \end{coq_example} Now, assume that we have loaded a module of general properties about relations over some abstract type \verb:T:, such as transitivity: \begin{coq_example} Definition transitive (T:Type) (R:T -> T -> Prop) := forall x y z:T, R x y -> R y z -> R x z. \end{coq_example} Now, assume that we want to prove that \verb:subset: is a \verb:transitive: relation. \begin{coq_example} Lemma subset_transitive : transitive set subset. \end{coq_example} In order to make any progress, one needs to use the definition of \verb:transitive:. The \verb:unfold: tactic, which replaces all occurrences of a defined notion by its definition in the current goal, may be used here. \begin{coq_example} unfold transitive. \end{coq_example} Now, we must unfold \verb:subset:: \begin{coq_example} unfold subset. \end{coq_example} Now, unfolding \verb:element: would be a mistake, because indeed a simple proof can be found by \verb:auto:, keeping \verb:element: an abstract predicate: \begin{coq_example} auto. \end{coq_example} Many variations on \verb:unfold: are provided in \Coq. For instance, we may selectively unfold one designated occurrence: \begin{coq_example} Undo 2. unfold subset at 2. \end{coq_example} One may also unfold a definition in a given local hypothesis, using the \verb:in: notation: \begin{coq_example} intros. unfold subset in H. \end{coq_example} Finally, the tactic \verb:red: does only unfolding of the head occurrence of the current goal: \begin{coq_example} red. auto. Qed. \end{coq_example} \subsection{Principle of proof irrelevance} Even though in principle the proof term associated with a verified lemma corresponds to a defined value of the corresponding specification, such definitions cannot be unfolded in \Coq: a lemma is considered an {\sl opaque} definition. This conforms to the mathematical tradition of {\sl proof irrelevance}: the proof of a logical proposition does not matter, and the mathematical justification of a logical development relies only on {\sl provability} of the lemmas used in the formal proof. Conversely, ordinary mathematical definitions can be unfolded at will, they are {\sl transparent}. \chapter{Induction} \section{Data Types as Inductively Defined Mathematical Collections} All the notions which were studied until now pertain to traditional mathematical logic. Specifications of objects were abstract properties used in reasoning more or less constructively; we are now entering the realm of inductive types, which specify the existence of concrete mathematical constructions. \subsection{Booleans} Let us start with the collection of booleans, as they are specified in the \Coq's \verb:Prelude: module: \begin{coq_example} Inductive bool : Set := true | false. \end{coq_example} Such a declaration defines several objects at once. First, a new \verb:Set: is declared, with name \verb:bool:. Then the {\sl constructors} of this \verb:Set: are declared, called \verb:true: and \verb:false:. Those are analogous to introduction rules of the new Set \verb:bool:. Finally, a specific elimination rule for \verb:bool: is now available, which permits to reason by cases on \verb:bool: values. Three instances are indeed defined as new combinators in the global context: \verb:bool_ind:, a proof combinator corresponding to reasoning by cases, \verb:bool_rec:, an if-then-else programming construct, and \verb:bool_rect:, a similar combinator at the level of types. Indeed: \begin{coq_example} Check bool_ind. Check bool_rec. Check bool_rect. \end{coq_example} Let us for instance prove that every Boolean is true or false. \begin{coq_example} Lemma duality : forall b:bool, b = true \/ b = false. intro b. \end{coq_example} We use the knowledge that \verb:b: is a \verb:bool: by calling tactic \verb:elim:, which is this case will appeal to combinator \verb:bool_ind: in order to split the proof according to the two cases: \begin{coq_example} elim b. \end{coq_example} It is easy to conclude in each case: \begin{coq_example} left; trivial. right; trivial. \end{coq_example} Indeed, the whole proof can be done with the combination of the \verb:simple: \verb:induction:, which combines \verb:intro: and \verb:elim:, with good old \verb:auto:: \begin{coq_example} Restart. simple induction b; auto. Qed. \end{coq_example} \subsection{Natural numbers} Similarly to Booleans, natural numbers are defined in the \verb:Prelude: module with constructors \verb:S: and \verb:O:: \begin{coq_example} Inductive nat : Set := | O : nat | S : nat -> nat. \end{coq_example} The elimination principles which are automatically generated are Peano's induction principle, and a recursion operator: \begin{coq_example} Check nat_ind. Check nat_rec. \end{coq_example} Let us start by showing how to program the standard primitive recursion operator \verb:prim_rec: from the more general \verb:nat_rec:: \begin{coq_example} Definition prim_rec := nat_rec (fun i:nat => nat). \end{coq_example} That is, instead of computing for natural \verb:i: an element of the indexed \verb:Set: \verb:(P i):, \verb:prim_rec: computes uniformly an element of \verb:nat:. Let us check the type of \verb:prim_rec:: \begin{coq_example} Check prim_rec. \end{coq_example} Oops! Instead of the expected type \verb+nat->(nat->nat->nat)->nat->nat+ we get an apparently more complicated expression. Indeed the type of \verb:prim_rec: is equivalent by rule $\beta$ to its expected type; this may be checked in \Coq~ by command \verb:Eval Cbv Beta:, which $\beta$-reduces an expression to its {\sl normal form}: \begin{coq_example} Eval cbv beta in ((fun _:nat => nat) O -> (forall y:nat, (fun _:nat => nat) y -> (fun _:nat => nat) (S y)) -> forall n:nat, (fun _:nat => nat) n). \end{coq_example} Let us now show how to program addition with primitive recursion: \begin{coq_example} Definition addition (n m:nat) := prim_rec m (fun p rec:nat => S rec) n. \end{coq_example} That is, we specify that \verb+(addition n m)+ computes by cases on \verb:n: according to its main constructor; when \verb:n = O:, we get \verb:m:; when \verb:n = S p:, we get \verb:(S rec):, where \verb:rec: is the result of the recursive computation \verb+(addition p m)+. Let us verify it by asking \Coq~to compute for us say $2+3$: \begin{coq_example} Eval compute in (addition (S (S O)) (S (S (S O)))). \end{coq_example} Actually, we do not have to do all explicitly. {\Coq} provides a special syntax {\tt Fixpoint/match} for generic primitive recursion, and we could thus have defined directly addition as: \begin{coq_example} Fixpoint plus (n m:nat) {struct n} : nat := match n with | O => m | S p => S (plus p m) end. \end{coq_example} For the rest of the session, we shall clean up what we did so far with types \verb:bool: and \verb:nat:, in order to use the initial definitions given in \Coq's \verb:Prelude: module, and not to get confusing error messages due to our redefinitions. We thus revert to the state before our definition of \verb:bool: with the \verb:Reset: command: \begin{coq_example} Reset bool. \end{coq_example} \subsection{Simple proofs by induction} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_eval} Set Printing Width 60. \end{coq_eval} Let us now show how to do proofs by structural induction. We start with easy properties of the \verb:plus: function we just defined. Let us first show that $n=n+0$. \begin{coq_example} Lemma plus_n_O : forall n:nat, n = n + 0. intro n; elim n. \end{coq_example} What happened was that \verb:elim n:, in order to construct a \verb:Prop: (the initial goal) from a \verb:nat: (i.e. \verb:n:), appealed to the corresponding induction principle \verb:nat_ind: which we saw was indeed exactly Peano's induction scheme. Pattern-matching instantiated the corresponding predicate \verb:P: to \verb+fun n:nat => n = n + 0+, and we get as subgoals the corresponding instantiations of the base case \verb:(P O): , and of the inductive step \verb+forall y:nat, P y -> P (S y)+. In each case we get an instance of function \verb:plus: in which its second argument starts with a constructor, and is thus amenable to simplification by primitive recursion. The \Coq~tactic \verb:simpl: can be used for this purpose: \begin{coq_example} simpl. auto. \end{coq_example} We proceed in the same way for the base step: \begin{coq_example} simpl; auto. Qed. \end{coq_example} Here \verb:auto: succeeded, because it used as a hint lemma \verb:eq_S:, which say that successor preserves equality: \begin{coq_example} Check eq_S. \end{coq_example} Actually, let us see how to declare our lemma \verb:plus_n_O: as a hint to be used by \verb:auto:: \begin{coq_example} Hint Resolve plus_n_O . \end{coq_example} We now proceed to the similar property concerning the other constructor \verb:S:: \begin{coq_example} Lemma plus_n_S : forall n m:nat, S (n + m) = n + S m. \end{coq_example} We now go faster, remembering that tactic \verb:simple induction: does the necessary \verb:intros: before applying \verb:elim:. Factoring simplification and automation in both cases thanks to tactic composition, we prove this lemma in one line: \begin{coq_example} simple induction n; simpl; auto. Qed. Hint Resolve plus_n_S . \end{coq_example} Let us end this exercise with the commutativity of \verb:plus:: \begin{coq_example} Lemma plus_com : forall n m:nat, n + m = m + n. \end{coq_example} Here we have a choice on doing an induction on \verb:n: or on \verb:m:, the situation being symmetric. For instance: \begin{coq_example} simple induction m; simpl; auto. \end{coq_example} Here \verb:auto: succeeded on the base case, thanks to our hint \verb:plus_n_O:, but the induction step requires rewriting, which \verb:auto: does not handle: \begin{coq_example} intros m' E; rewrite <- E; auto. Qed. \end{coq_example} \subsection{Discriminate} It is also possible to define new propositions by primitive recursion. Let us for instance define the predicate which discriminates between the constructors \verb:O: and \verb:S:: it computes to \verb:False: when its argument is \verb:O:, and to \verb:True: when its argument is of the form \verb:(S n):: \begin{coq_example} Definition Is_S (n:nat) := match n with | O => False | S p => True end. \end{coq_example} Now we may use the computational power of \verb:Is_S: in order to prove trivially that \verb:(Is_S (S n)):: \begin{coq_example} Lemma S_Is_S : forall n:nat, Is_S (S n). simpl; trivial. Qed. \end{coq_example} But we may also use it to transform a \verb:False: goal into \verb:(Is_S O):. Let us show a particularly important use of this feature; we want to prove that \verb:O: and \verb:S: construct different values, one of Peano's axioms: \begin{coq_example} Lemma no_confusion : forall n:nat, 0 <> S n. \end{coq_example} First of all, we replace negation by its definition, by reducing the goal with tactic \verb:red:; then we get contradiction by successive \verb:intros:: \begin{coq_example} red; intros n H. \end{coq_example} Now we use our trick: \begin{coq_example} change (Is_S 0). \end{coq_example} Now we use equality in order to get a subgoal which computes out to \verb:True:, which finishes the proof: \begin{coq_example} rewrite H; trivial. simpl; trivial. \end{coq_example} Actually, a specific tactic \verb:discriminate: is provided to produce mechanically such proofs, without the need for the user to define explicitly the relevant discrimination predicates: \begin{coq_example} Restart. intro n; discriminate. Qed. \end{coq_example} \section{Logic programming} In the same way as we defined standard data-types above, we may define inductive families, and for instance inductive predicates. Here is the definition of predicate $\le$ over type \verb:nat:, as given in \Coq's \verb:Prelude: module: \begin{coq_example*} Inductive le (n:nat) : nat -> Prop := | le_n : le n n | le_S : forall m:nat, le n m -> le n (S m). \end{coq_example*} This definition introduces a new predicate \verb+le:nat->nat->Prop+, and the two constructors \verb:le_n: and \verb:le_S:, which are the defining clauses of \verb:le:. That is, we get not only the ``axioms'' \verb:le_n: and \verb:le_S:, but also the converse property, that \verb:(le n m): if and only if this statement can be obtained as a consequence of these defining clauses; that is, \verb:le: is the minimal predicate verifying clauses \verb:le_n: and \verb:le_S:. This is insured, as in the case of inductive data types, by an elimination principle, which here amounts to an induction principle \verb:le_ind:, stating this minimality property: \begin{coq_example} Check le. Check le_ind. \end{coq_example} Let us show how proofs may be conducted with this principle. First we show that $n\le m \Rightarrow n+1\le m+1$: \begin{coq_example} Lemma le_n_S : forall n m:nat, le n m -> le (S n) (S m). intros n m n_le_m. elim n_le_m. \end{coq_example} What happens here is similar to the behaviour of \verb:elim: on natural numbers: it appeals to the relevant induction principle, here \verb:le_ind:, which generates the two subgoals, which may then be solved easily with the help of the defining clauses of \verb:le:. \begin{coq_example} apply le_n; trivial. intros; apply le_S; trivial. \end{coq_example} Now we know that it is a good idea to give the defining clauses as hints, so that the proof may proceed with a simple combination of \verb:induction: and \verb:auto:. \begin{coq_example} Restart. Hint Resolve le_n le_S . \end{coq_example} We have a slight problem however. We want to say ``Do an induction on hypothesis \verb:(le n m):'', but we have no explicit name for it. What we do in this case is to say ``Do an induction on the first unnamed hypothesis'', as follows. \begin{coq_example} simple induction 1; auto. Qed. \end{coq_example} Here is a more tricky problem. Assume we want to show that $n\le 0 \Rightarrow n=0$. This reasoning ought to follow simply from the fact that only the first defining clause of \verb:le: applies. \begin{coq_example} Lemma tricky : forall n:nat, le n 0 -> n = 0. \end{coq_example} However, here trying something like \verb:induction 1: would lead nowhere (try it and see what happens). An induction on \verb:n: would not be convenient either. What we must do here is analyse the definition of \verb"le" in order to match hypothesis \verb:(le n O): with the defining clauses, to find that only \verb:le_n: applies, whence the result. This analysis may be performed by the ``inversion'' tactic \verb:inversion_clear: as follows: \begin{coq_example} intros n H; inversion_clear H. trivial. Qed. \end{coq_example} \chapter{Modules} \section{Opening library modules} When you start \Coq~ without further requirements in the command line, you get a bare system with few libraries loaded. As we saw, a standard prelude module provides the standard logic connectives, and a few arithmetic notions. If you want to load and open other modules from the library, you have to use the \verb"Require" command, as we saw for classical logic above. For instance, if you want more arithmetic constructions, you should request: \begin{coq_example*} Require Import Arith. \end{coq_example*} Such a command looks for a (compiled) module file \verb:Arith.vo: in the libraries registered by \Coq. Libraries inherit the structure of the file system of the operating system and are registered with the command \verb:Add LoadPath:. Physical directories are mapped to logical directories. Especially the standard library of \Coq~ is pre-registered as a library of name \verb=Coq=. Modules have absolute unique names denoting their place in \Coq~ libraries. An absolute name is a sequence of single identifiers separated by dots. E.g. the module \verb=Arith= has full name \verb=Coq.Arith.Arith= and because it resides in eponym subdirectory \verb=Arith= of the standard library, it can be as well required by the command \begin{coq_example*} Require Import Coq.Arith.Arith. \end{coq_example*} This may be useful to avoid ambiguities if somewhere, in another branch of the libraries known by Coq, another module is also called \verb=Arith=. Notice that by default, when a library is registered, all its contents, and all the contents of its subdirectories recursively are visible and accessible by a short (relative) name as \verb=Arith=. Notice also that modules or definitions not explicitly registered in a library are put in a default library called \verb=Top=. The loading of a compiled file is quick, because the corresponding development is not type-checked again. \section{Creating your own modules} You may create your own module files, by writing {\Coq} commands in a file, say \verb:my_module.v:. Such a module may be simply loaded in the current context, with command \verb:Load my_module:. It may also be compiled, in ``batch'' mode, using the UNIX command \verb:coqc:. Compiling the module \verb:my_module.v: creates a file \verb:my_module.vo:{} that can be reloaded with command \verb:Require: \verb:Import: \verb:my_module:. If a required module depends on other modules then the latters are automatically required beforehand. However their contents is not automatically visible. If you want a module \verb=M= required in a module \verb=N= to be automatically visible when \verb=N= is required, you should use \verb:Require Export M: in your module \verb:N:. \section{Managing the context} It is often difficult to remember the names of all lemmas and definitions available in the current context, especially if large libraries have been loaded. A convenient \verb:SearchAbout: command is available to lookup all known facts concerning a given predicate. For instance, if you want to know all the known lemmas about the less or equal relation, just ask: \begin{coq_example} SearchAbout le. \end{coq_example} Another command \verb:Search: displays only lemmas where the searched predicate appears at the head position in the conclusion. \begin{coq_example} Search le. \end{coq_example} A new and more convenient search tool is \textsf{SearchPattern} developed by Yves Bertot. It allows to find the theorems with a conclusion matching a given pattern, where \verb:\_: can be used in place of an arbitrary term. We remark in this example, that \Coq{} provides usual infix notations for arithmetic operators. \begin{coq_example} SearchPattern (_ + _ = _). \end{coq_example} \section{Now you are on your own} This tutorial is necessarily incomplete. If you wish to pursue serious proving in \Coq, you should now get your hands on \Coq's Reference Manual, which contains a complete description of all the tactics we saw, plus many more. You also should look in the library of developed theories which is distributed with \Coq, in order to acquaint yourself with various proof techniques. \end{document} coq-8.4pl2/doc/rt/0000750000175000001440000000000012127276532013027 5ustar notinuserscoq-8.4pl2/doc/rt/RefMan-cover.tex0000640000175000001440000000310510377337562016043 0ustar notinusers\documentstyle[RRcover]{book} % L'utilisation du style `french' force le rsum franais % apparatre en premier. \RRtitle{Manuel de r\'ef\'erence du syst\`eme Coq \\ version V7.1} \RRetitle{The Coq Proof Assistant \\ Reference Manual \\ Version 7.1 \thanks {This research was partly supported by ESPRIT Basic Research Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} } \RRauthor{Bruno Barras, Samuel Boutin, Cristina Cornes, Judica\"el Courant, Jean-Christophe Filli\^atre, Eduardo Gim\'enez, Hugo Herbelin, G\'erard Huet, C\'esar Mu\~noz, Chetan Murthy, Catherine Parent, Christine Paulin-Mohring, Amokrane Sa{\"\i}bi, Benjamin Werner} \authorhead{} \titlehead{Coq V7.1 Reference Manual} \RRtheme{2} \RRprojet{Coq} \RRNo{0123456789} \RRdate{May 1997} %\RRpages{} \URRocq \RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la v\'erification de preuves formelles dans une logique d'ordre sup\'erieure incluant un riche langage de d\'efinitions de fonctions. Ce document constitue le manuel de r\'ef\'erence de la version V7.1 qui est distribu\'ee par ftp anonyme l'adresse \url{ftp://ftp.inria.fr/INRIA/coq/}} \RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, Calcul des Constructions Inductives} \RRabstract{Coq is a proof assistant based on a higher-order logic allowing powerful definitions of functions. Coq V7.1 is available by anonymous ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} \RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives Constructions} \begin{document} \makeRT \end{document} coq-8.4pl2/doc/rt/Tutorial-cover.tex0000640000175000001440000000313110377337562016475 0ustar notinusers\documentstyle[RRcover]{book} % L'utilisation du style `french' force le rsum franais % apparatre en premier. \RRetitle{ The Coq Proof Assistant \\ A Tutorial \\ Version 7.1 \thanks{This research was partly supported by ESPRIT Basic Research Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} } \RRtitle{Coq \\ Une introduction \\ V7.1 } \RRauthor{G\'erard Huet, Gilles Kahn and Christine Paulin-Mohring} \RRtheme{2} \RRprojet{{Coq \\[15pt] {INRIA Rocquencourt} {\hskip -5.25pt} ~~{\bf ---}~~ \def\thefootnote{\arabic{footnote}\hss} {CNRS - ENS Lyon} \footnote[1]{LIP URA 1398 du CNRS, 46 All\'ee d'Italie, 69364 Lyon CEDEX 07, France.} {\hskip -14pt}}} %\RRNo{0123456789} \RRNo{0204} \RRdate{Ao\^ut 1997} \URRocq \RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la v\'erification de preuves formelles dans une logique d'ordre sup\'erieure incluant un riche langage de d\'efinitions de fonctions. Ce document constitue une introduction pratique \`a l'utilisation de la version V7.1 qui est distribu\'ee par ftp anonyme l'adresse \url{ftp://ftp.inria.fr/INRIA/coq/}} \RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, Calcul des Constructions Inductives} \RRabstract{Coq is a proof assistant based on a higher-order logic allowing powerful definitions of functions. This document is a tutorial for the version V7.1 of Coq. This version is available by anonymous ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} \RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives Constructions} \begin{document} \makeRT \end{document} coq-8.4pl2/doc/faq/0000750000175000001440000000000012127276532013151 5ustar notinuserscoq-8.4pl2/doc/faq/axioms.png0000640000175000001440000002353310406335323015156 0ustar notinusersPNG  IHDR# pHYs M MέNtEXtSoftwareGNU Ghostscript 7.07" IDATxњa3-PEY j{2=iDD]AϾ O}|>o!}K_yJܺmh|q V8dۋ;OO#͟׽w0ޱS\*ƾz=kI?w; X^s;<(FmAg+6'?l&:Yg[{v1.$7qH) fyXmWyF:ryEmfYLޢjwTɆٍOMuv᪍Qd/[Zlr_ZbxEjejOV9~f)Ӵvcs1f=YVU:ORjjL7R2SOݢ9Pky_;gvŋES@ZN1ށF#,| t#L~Qk' Y௭o&Qᨵw׹egT_wml`Qss/u/,Wgq1~sCr '3v7HVNO9Ϡõtk!zb>DJw@+wv;5,odq'_Т|M,.lk]ݧǀ6.7DZmR~sPaEK>٘ZKkſXWL CP4Y os {Ѫ/[`r^J9ĺ\gC`̇ky s `5́90@^ky s `5́90@^ky s `5́90@^kH#Z]wF+͹]V,`Yp~{{{fH^U^QU,OdP9ƶ]0$g+u5|HwdKuwh(g\~6"wwn߁uݝ&~smCQf :Uk1 ~5vuz̛*Dj"ggH*+Qłr^SIx[bZjYԚ.+zJ.mک(f4V Gݶ|t6N7V$oڢgQ"}T 4vA-YV1mEYis(0r|+# q9=fcnZ0 SNzGfdS=gVՒ+p6okYtԝV[bB ?%VuZ"I}xvOej:"mNoyId_֖۽TZd(ޑo&XQyS/@`xޖ_h[~yGzra{(]=nlڗS;v!-PƯ68OægA^,?Ik~ u 5Q'w s:uxu`__!{ND̰f뺔!Ḣ3k<+ 98"7ްhЛAL"܍!Z{`Gyzi`j{B^ cxkc@}Abzy ,=A^n &Ȁ=Xo8 1:.NDy[}:u51 y˾/9<˜k܇ɐfP/A^ż\pbVIq k5>=ְf^GɷnʴgTO@k_(z^wCд i^UԨAGyu>%?8HIKE<ԎJMRku[g׹\%҃PIJyVI+2VW>O1arj+_?njo\C٨yly6a|K8KNǪ&8qfE_^145!}!s9Ɩ]|/w^I^F/WIꊌ2E1Q[%EybZdE/sU3xx1jk\=!:ky{ÀMk\UMi!015p9BLc,xۅjy\a@} k,b ~{N5[B5A^^tˏ T5pyD@^c>  y iD^ac +]5!ԘyӘK輞DB50yG^c}4@^c= b@%X1pw"1Ƴ'xc@W!1bD^c&{'7C\}"16@FD{y@P덼yO$b $=?'KR~p+/ C:}'lvPkT%5ן;^}ț9O|4͉)篿'7W)*#L镼\m "Eau-E傩%@BYɆv S~QLir-[B[~b}1 \!2ƞiBdc:iοv=cRϷA2k53s3Ƃ[6qWeTj-" byڂJ ~>|)O)֞g1{p%8p:~G|E@7rp: |ŝ! 7y s `5zp ܏c90@^ǘ90@^ X y s `5́׆({th̏PooC^?ȾZFh!ya,utrDN^ʩ5|W}a>nF_|>Rj:S@̇LelـcϯQpx!5sz{:95Wż0#:|fJ3rØnKc[ۭeMMU-yp|7 ޙ>ްƱ<of> -+?U~1ԪM-n,[0Z>oc)x TY_u⨅2VI~g0,#|Kql=VZ{ZmI,Di϶j NA(#?A=Ss=ͫy?(2%#O!u Ot=\.kuמO'1J-XjrVٷiFaDhC7czW5 TmC0aɯ`^O!1y[>o_|1" `53qG^kD@^ky 1L^7!q3s)5́9@Db,D,)\^p!0$*5TxI'_\{+ƒkJ <9Dk0p;2E ƛ_N* :C^DenN0?KLw#勑 /D߾n]œu^ќKk{ycָ=Y{9<32Y}895UU.qpyxq'bF_kog\o-+yE۫[iunTݧO1L6fksxY(E4_!`~Y\4=W wWgțeZQފVK-DS86?*W6-sܮ?iGlB]Qp:E,gcPvs'Fg{5g97_R_Ov9Lh|kY2Os@Jdm'z<;(=ZuXvkkT3KW6%T/da]NVbw\x @Y\[.-yIϯv}3veY;en\dQkLkb?`Os դY kyԞ 2v_EH<,Ɣ?/!1=n:AFvc5Ą C^c}L` 5^A7F^tc:5ގO)1 &tL ®׻gW?dTqˎ500&XBd`2}&Ÿg{{mqt /mL17?1<~'1U^ӯPˇ Cz6g,\N.WYaIJ%SeS#׸J#Z:vLozS wqŪk(_&fI֒!q"@ɇjyVW nuBY}*xfGa0;q8TvdobR!Dp_b/9[e6}^L?\*qn&>,VT@<Ť ;0:h1t[uC* UP o&jɝYE^v}t͋5ctF,cAK'sϪkFF0OR ]/i:SyuQ~o7&L3whhHS'I-PC΍Zts\p{8I)k?2,CcQn3_8v8aB߀Ц}1@?Ip?U8 Wgy#1&a"`̇ky s `5Ь >ѐsjZzqb{V]å)AqUGUGm-Y8K\|A!7<^:V|,GPvy1Ro ^FqK^b=W,MʻCJ?_,6O_BM}%P+ 69*5څ*R'k.ERu-EbNò)s+5d<fPKs6_oiT^WVQEgCmm_Ԗ|ѓ1n}M^3H|a‡4(VAӨߟnoՀ{)ƉojJj˞lN;ɞ EL[Î5xƱ6y=O&1s=VSEz?vmZou;sګ3֢Ntm S``ͣjgh@@fMKXc\:Z.NL#@^j/2רb ¥(́5́90@^x|k~i_g3qh'a D^kKqgHmFJ<ԻB%_ДlzQy tc|=}S8lZUղV]뙤OZG17M5y=|HkK?41"fiHZNq)}Wp!! p*: u0a :":uMwN`5́90@^ϡXŰwSZig:6|ԟoX}i_S;yXK{CY֣0ؓS+Z6Dla ?ӈx |'"6=Q-.R+ͳm963yj'WY @-OWЊu8s[>OY,ʧ2vmF2GޤӉZ_T"l4C`'=@ [dשCZώVW'1yԝ]-PvtyZZCNHF}T&sF ègtzN\1T[j8ECG]ew#j:==ԥZG^Grαu&QI~=Ū/ysؒjeVm$"C!_t!C`5́90@^ϡvZ[լ~UM?銍 ,7  !=pŊk0#nrco|*^#:y~'H*,˨q\J4٨u0/i,_ Ygj: oodMQε~ g檭4p,=Z^vMṪu,kcOx?m;|9\{g<5eok]Ѵ%^K?ԱZ5ot&T{ M06M*iz{0:_AXBu9 kwU׾!+?lu*1^k FUP%q4tV5׎Zmu ^u80@"gcl y:HK|HD$5CXP1/*8@^ky s `>:T9-~Kk͜y=Faמ_7UogxúƊqZy=N!w6݅agQ#ë_j> +oR+囸tmlv7(Hڙi;_&nbu} Pgzy&{5BTR~ .(klFڕ_5Y'jս|xḃ}^uL{V tF5rG=MHr6xo?wu@̙#uCi}'>,=Ӫin#ַR܏B=gImړ+[(S)mԭFm ;ra1nM;Cx!4ܰ[o dt6^u8v nS4IE4E뿺+YouaRuH$!_7b| s `5́90zmH=U믪ucuw/nߪ{Vw37]{چMG|/m=\LY޴|:"/{ށyO#|gQvCQ X, ۯ6vSno^Z޳֩%29"y=;MíhK^$'Vmg*_o+j1#Vy&Uךu,_(7?/oڥjeȽVԠ@O[!ѳvx:M -UW}-j4AuLqv̐v\Υ1Hm|3'V rw [k)MwBq/VyAi: =kqZR\c`vky11n~?c`9 X2IENDB`PNG  IHDR# pHYs M MέNtEXtSoftwareGNU Ghostscript 7.07"IDATx w>V ʚ8 ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ iR7 `IENDB`coq-8.4pl2/doc/faq/axioms.fig0000640000175000001440000001277410700725025015143 0ustar notinusers#FIG 3.2 Landscape Center Inches Letter 100.00 Single -2 1200 2 5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 0 14032.500 7222.500 4725 3825 4425 4800 4200 6000 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 1 3600.000 8925.000 3600 9075 3450 8925 3600 8775 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 1 3600.000 8625.000 3600 8775 3450 8625 3600 8475 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 8325.000 3600 8475 3450 8325 3600 8175 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 8625.000 3600 8775 3450 8625 3600 8475 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 8925.000 3600 9075 3450 8925 3600 8775 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 9225.000 3600 9375 3450 9225 3600 9075 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 0 6309.515 5767.724 4200 3825 3450 5550 3825 7200 1 1 1.00 60.00 120.00 6 2025 300 7725 525 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 7725 525 2025 525 4 0 0 50 -1 0 12 0.0000 4 180 5700 2025 450 The dependency graph of axioms in the Calculus of Inductive Constructions\001 -6 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 4200 6225 4200 7200 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 7725 3900 7200 6000 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 7200 6225 7200 7050 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 5550 5625 5550 6000 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 4575 6000 6450 6000 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 3375 3225 3375 3600 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3373 1950 3376 2250 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 3375 2625 3375 3000 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 2175 3600 3750 3600 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3075 2475 2475 2475 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3374 1125 3377 1425 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1049 1950 1052 2250 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1048 1125 1051 1425 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3075 975 1575 975 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3075 1725 2025 1725 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4 8025 5925 8250 5925 9000 4950 9150 4950 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 8625 5400 8250 3900 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 7050 7350 4575 7950 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 4200 7500 4200 7950 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 4714 6255 7039 7080 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1500 3900 2175 6000 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 1139 2771 1364 3521 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 4425 4875 7350 3825 3 0 0 1 0 7 50 -1 -1 0.000 0 0 0 4 6450 7050 4050 6675 3750 6825 3750 7050 0.000 1.000 1.000 0.000 4 0 0 50 -1 0 12 0.0000 4 180 1830 6900 3750 Predicate extensionality\001 4 0 0 50 -1 0 12 0.0000 4 135 1800 3825 3750 Relational choice axiom\001 4 0 0 50 -1 0 12 0.0000 4 180 2100 6150 6150 Propositional extensionality\001 4 0 0 50 -1 0 12 0.0000 4 180 1005 450 1050 Operator iota\001 4 0 0 50 -1 2 12 0.0000 4 135 1020 450 1650 Constructive\001 4 0 0 50 -1 2 12 0.0000 4 180 1530 450 1875 definite description\001 4 0 0 50 -1 2 12 0.0000 4 180 2010 9000 5175 Functional extensionality\001 4 0 0 50 -1 0 12 0.0000 4 180 4740 150 10050 Statements in boldface are the most "interesting" ones for Coq\001 4 0 0 50 -1 0 12 0.0000 4 180 4800 3600 9375 Invariance by substitution of reflexivity proofs for equality on A\001 4 0 0 50 -1 0 12 0.0000 4 180 3735 3600 8475 Uniqueness of reflexivity proofs for equality on A\001 4 0 0 50 -1 0 12 0.0000 4 180 2670 3600 8775 Uniqueness of equality proofs on A\001 4 0 0 50 -1 0 12 0.0000 4 135 1080 3600 8175 Axiom K on A\001 4 0 0 50 -1 2 12 0.0000 4 135 1455 6450 7275 Proof-irrelevance\001 4 0 0 50 -1 2 12 0.0000 4 180 3360 3600 9075 Injectivity of equality on Sigma-types on A\001 4 0 0 50 -1 0 12 0.0000 4 180 2175 4950 6525 (needs Prop-impredicativity)\001 4 0 0 50 -1 0 12 0.0000 4 180 705 6000 6750 (Berardi)\001 4 0 0 50 -1 2 12 0.0000 4 135 1290 3675 6225 Excluded-middle\001 4 0 0 50 -1 0 12 0.0000 4 180 1905 4950 5550 Propositional degeneracy\001 4 0 0 50 -1 0 12 0.0000 4 180 1050 3750 5250 (Diaconescu)\001 4 0 0 50 -1 0 12 0.0000 4 180 2475 3375 7425 Decidability of equality on any A\001 4 0 0 50 -1 0 12 0.0000 4 180 1620 1275 5025 (if Set impredicative)\001 4 0 0 50 -1 0 12 0.0000 4 135 1560 1575 6225 Not excluded-middle\001 4 0 0 50 -1 0 12 0.0000 4 180 1770 450 2625 in propositional context\001 4 0 0 50 -1 2 12 0.0000 4 135 1020 3150 1650 Constructive\001 4 0 0 50 -1 2 12 0.0000 4 180 1665 3150 1875 indefinite description\001 4 0 0 50 -1 0 12 0.0000 4 180 2610 3150 2400 Constructive indefinite description\001 4 0 0 50 -1 0 12 0.0000 4 180 1770 3150 2625 in propositional context\001 4 0 0 50 -1 2 12 0.0000 4 135 1935 3150 3150 Functional choice axiom\001 4 0 0 50 -1 0 12 0.0000 4 135 2100 450 2400 Constructive definite descr.\001 4 0 0 50 -1 2 12 0.0000 4 180 1845 450 3750 Axiom of unique choice\001 4 0 0 50 -1 2 12 0.0000 4 180 1365 3150 1050 Operator epsilon\001 coq-8.4pl2/doc/faq/FAQ.tex0000640000175000001440000024503112010532576014302 0ustar notinusers\RequirePackage{ifpdf} \ifpdf % si on est en pdflatex \documentclass[a4paper,pdftex]{article} \else \documentclass[a4paper]{article} \fi \pagestyle{plain} % yay les symboles \usepackage{stmaryrd} \usepackage{amssymb} \usepackage{url} %\usepackage{multicol} \usepackage{hevea} \usepackage{fullpage} \usepackage[latin1]{inputenc} \usepackage[english]{babel} \ifpdf % si on est en pdflatex \usepackage[pdftex]{graphicx} \else \usepackage[dvips]{graphicx} \fi %\input{../macros.tex} % Making hevea happy %HEVEA \renewcommand{\textbar}{|} %HEVEA \renewcommand{\textunderscore}{\_} \def\Question#1{\stepcounter{question}\subsubsection{#1}} % version et date \def\faqversion{0.1} % les macros d'amour \def\Coq{\textsc{Coq}} \def\Why{\textsc{Why}} \def\Caduceus{\textsc{Caduceus}} \def\Krakatoa{\textsc{Krakatoa}} \def\Ltac{\textsc{Ltac}} \def\CoqIde{\textsc{CoqIde}} \newcommand{\coqtt}[1]{{\tt #1}} \newcommand{\coqimp}{{\mbox{\tt ->}}} \newcommand{\coqequiv}{{\mbox{\tt <->}}} % macro pour les tactics \def\split{{\tt split}} \def\assumption{{\tt assumption}} \def\auto{{\tt auto}} \def\trivial{{\tt trivial}} \def\tauto{{\tt tauto}} \def\left{{\tt left}} \def\right{{\tt right}} \def\decompose{{\tt decompose}} \def\intro{{\tt intro}} \def\intros{{\tt intros}} \def\field{{\tt field}} \def\ring{{\tt ring}} \def\apply{{\tt apply}} \def\exact{{\tt exact}} \def\cut{{\tt cut}} \def\assert{{\tt assert}} \def\solve{{\tt solve}} \def\idtac{{\tt idtac}} \def\fail{{\tt fail}} \def\existstac{{\tt exists}} \def\firstorder{{\tt firstorder}} \def\congruence{{\tt congruence}} \def\gb{{\tt gb}} \def\generalize{{\tt generalize}} \def\abstracttac{{\tt abstract}} \def\eapply{{\tt eapply}} \def\unfold{{\tt unfold}} \def\rewrite{{\tt rewrite}} \def\replace{{\tt replace}} \def\simpl{{\tt simpl}} \def\elim{{\tt elim}} \def\set{{\tt set}} \def\pose{{\tt pose}} \def\case{{\tt case}} \def\destruct{{\tt destruct}} \def\reflexivity{{\tt reflexivity}} \def\transitivity{{\tt transitivity}} \def\symmetry{{\tt symmetry}} \def\Focus{{\tt Focus}} \def\discriminate{{\tt discriminate}} \def\contradiction{{\tt contradiction}} \def\intuition{{\tt intuition}} \def\try{{\tt try}} \def\repeat{{\tt repeat}} \def\eauto{{\tt eauto}} \def\subst{{\tt subst}} \def\symmetryin{{\tt symmetryin}} \def\instantiate{{\tt instantiate}} \def\inversion{{\tt inversion}} \def\specialize{{\tt specialize}} \def\Defined{{\tt Defined}} \def\Qed{{\tt Qed}} \def\pattern{{\tt pattern}} \def\Type{{\tt Type}} \def\Prop{{\tt Prop}} \def\Set{{\tt Set}} \newcommand\vfile[2]{\ahref{#1}{\tt {#2}.v}} \urldef{\InitWf}\url {http://coq.inria.fr/library/Coq.Init.Wf.html} \urldef{\LogicBerardi}\url {http://coq.inria.fr/library/Coq.Logic.Berardi.html} \urldef{\LogicClassical}\url {http://coq.inria.fr/library/Coq.Logic.Classical.html} \urldef{\LogicClassicalFacts}\url {http://coq.inria.fr/library/Coq.Logic.ClassicalFacts.html} \urldef{\LogicClassicalDescription}\url {http://coq.inria.fr/library/Coq.Logic.ClassicalDescription.html} \urldef{\LogicProofIrrelevance}\url {http://coq.inria.fr/library/Coq.Logic.ProofIrrelevance.html} \urldef{\LogicEqdep}\url {http://coq.inria.fr/library/Coq.Logic.Eqdep.html} \urldef{\LogicEqdepDec}\url {http://coq.inria.fr/library/Coq.Logic.Eqdep_dec.html} \begin{document} \bibliographystyle{plain} \newcounter{question} \renewcommand{\thesubsubsection}{\arabic{question}} %%%%%%% Coq pour les nuls %%%%%%% \title{Coq Version 8.4 for the Clueless\\ \large(\protect\ref{lastquestion} \ Hints) } \author{Pierre Castran \and Hugo Herbelin \and Florent Kirchner \and Benjamin Monate \and Julien Narboux} \maketitle %%%%%%% \begin{abstract} This note intends to provide an easy way to get acquainted with the {\Coq} theorem prover. It tries to formulate appropriate answers to some of the questions any newcomers will face, and to give pointers to other references when possible. \end{abstract} %%%%%%% %\begin{multicols}{2} \tableofcontents %\end{multicols} %%%%%%% \newpage \section{Introduction} This FAQ is the sum of the questions that came to mind as we developed proofs in \Coq. Since we are singularly short-minded, we wrote the answers we found on bits of papers to have them at hand whenever the situation occurs again. This is pretty much the result of that: a collection of tips one can refer to when proofs become intricate. Yes, it means we won't take the blame for the shortcomings of this FAQ. But if you want to contribute and send in your own question and answers, feel free to write to us\ldots %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Presentation} \Question{What is {\Coq}?}\label{whatiscoq} The {\Coq} tool is a formal proof management system: a proof done with {\Coq} is mechanically checked by the machine. In particular, {\Coq} allows: \begin{itemize} \item the definition of mathematical objects and programming objects, \item to state mathematical theorems and software specifications, \item to interactively develop formal proofs of these theorems, \item to check these proofs by a small certification ``kernel''. \end{itemize} {\Coq} is based on a logical framework called ``Calculus of Inductive Constructions'' extended by a modular development system for theories. \Question{Did you really need to name it like that?} Some French computer scientists have a tradition of naming their software as animal species: Caml, Elan, Foc or Phox are examples of this tacit convention. In French, ``coq'' means rooster, and it sounds like the initials of the Calculus of Constructions CoC on which it is based. \Question{Is {\Coq} a theorem prover?} {\Coq} comes with decision and semi-decision procedures ( propositional calculus, Presburger's arithmetic, ring and field simplification, resolution, ...) but the main style for proving theorems is interactively by using LCF-style tactics. \Question{What are the other theorem provers?} Many other theorem provers are available for use nowadays. Isabelle, HOL, HOL Light, Lego, Nuprl, PVS are examples of provers that are fairly similar to {\Coq} by the way they interact with the user. Other relatives of {\Coq} are ACL2, Agda/Alfa, Twelf, Kiv, Mizar, NqThm, \begin{htmlonly}% Omega\ldots \end{htmlonly} \begin{latexonly}% {$\Omega$}mega\ldots \end{latexonly} \Question{What do I have to trust when I see a proof checked by Coq?} You have to trust: \begin{description} \item[The theory behind Coq] The theory of {\Coq} version 8.0 is generally admitted to be consistent wrt Zermelo-Fraenkel set theory + inaccessible cardinals. Proofs of consistency of subsystems of the theory of Coq can be found in the literature. \item[The Coq kernel implementation] You have to trust that the implementation of the {\Coq} kernel mirrors the theory behind {\Coq}. The kernel is intentionally small to limit the risk of conceptual or accidental implementation bugs. \item[The Objective Caml compiler] The {\Coq} kernel is written using the Objective Caml language but it uses only the most standard features (no object, no label ...), so that it is highly unprobable that an Objective Caml bug breaks the consistency of {\Coq} without breaking all other kinds of features of {\Coq} or of other software compiled with Objective Caml. \item[Your hardware] In theory, if your hardware does not work properly, it can accidentally be the case that False becomes provable. But it is more likely the case that the whole {\Coq} system will be unusable. You can check your proof using different computers if you feel the need to. \item[Your axioms] Your axioms must be consistent with the theory behind {\Coq}. \end{description} \Question{Where can I find information about the theory behind {\Coq}?} \begin{description} \item[The Calculus of Inductive Constructions] The \ahref{http://coq.inria.fr/doc/Reference-Manual006.html}{corresponding} chapter and the chapter on \ahref{http://coq.inria.fr/doc/Reference-Manual007.html}{modules} in the {\Coq} Reference Manual. \item[Type theory] A book~\cite{ProofsTypes} or some lecture notes~\cite{Types:Dowek}. \item[Inductive types] Christine Paulin-Mohring's habilitation thesis~\cite{Pau96b}. \item[Co-Inductive types] Eduardo Gimnez' thesis~\cite{EGThese}. \item[Miscellaneous] A \ahref{http://coq.inria.fr/doc/biblio.html}{bibliography} about Coq \end{description} \Question{How can I use {\Coq} to prove programs?} You can either extract a program from a proof by using the extraction mechanism or use dedicated tools, such as \ahref{http://why.lri.fr}{\Why}, \ahref{http://krakatoa.lri.fr}{\Krakatoa}, \ahref{http://why.lri.fr/caduceus/index.en.html}{\Caduceus}, to prove annotated programs written in other languages. %\Question{How many {\Coq} users are there?} % %An estimation is about 100 regular users. \Question{How old is {\Coq}?} The first implementation is from 1985 (it was named {\sf CoC} which is the acronym of the name of the logic it implemented: the Calculus of Constructions). The first official release of {\Coq} (version 4.10) was distributed in 1989. \Question{What are the \Coq-related tools?} There are graphical user interfaces: \begin{description} \item[Coqide] A GTK based GUI for \Coq. \item[Pcoq] A GUI for {\Coq} with proof by pointing and pretty printing. \item[coqwc] A tool similar to {\tt wc} to count lines in {\Coq} files. \item[Proof General] A emacs mode for {\Coq} and many other proof assistants. \item[ProofWeb] The ProofWeb online web interface for {\Coq} (and other proof assistants), with a focus on teaching. \item[ProverEditor] is an experimental Eclipse plugin with support for {\Coq}. \end{description} There are documentation and browsing tools: \begin{description} \item[Helm/Mowgli] A rendering, searching and publishing tool. \item[coq-tex] A tool to insert {\Coq} examples within .tex files. \item[coqdoc] A documentation tool for \Coq. \item[coqgraph] A tool to generate a dependency graph from {\Coq} sources. \end{description} There are front-ends for specific languages: \begin{description} \item[Why] A back-end generator of verification conditions. \item[Krakatoa] A Java code certification tool that uses both {\Coq} and {\Why} to verify the soundness of implementations with regards to the specifications. \item[Caduceus] A C code certification tool that uses both {\Coq} and \Why. \item[Zenon] A first-order theorem prover. \item[Focal] The \ahref{http://focal.inria.fr}{Focal} project aims at building an environment to develop certified computer algebra libraries. \item[Concoqtion] is a dependently-typed extension of Objective Caml (and of MetaOCaml) with specifications expressed and proved in Coq. \item[Ynot] is an extension of Coq providing a "Hoare Type Theory" for specifying higher-order, imperative and concurrent programs. \item[Ott]is a tool to translate the descriptions of the syntax and semantics of programming languages to the syntax of Coq, or of other provers. \end{description} \Question{What are the high-level tactics of \Coq} \begin{itemize} \item Decision of quantifier-free Presburger's Arithmetic \item Simplification of expressions on rings and fields \item Decision of closed systems of equations \item Semi-decision of first-order logic \item Prolog-style proof search, possibly involving equalities \end{itemize} \Question{What are the main libraries available for \Coq} \begin{itemize} \item Basic Peano's arithmetic, binary integer numbers, rational numbers, \item Real analysis, \item Libraries for lists, boolean, maps, floating-point numbers, \item Libraries for relations, sets and constructive algebra, \item Geometry \end{itemize} \Question{What are the mathematical applications for {\Coq}?} {\Coq} is used for formalizing mathematical theories, for teaching, and for proving properties of algorithms or programs libraries. The largest mathematical formalization has been done at the University of Nijmegen (see the \ahref{http://c-corn.cs.ru.nl}{Constructive Coq Repository at Nijmegen}). A symbolic step has also been obtained by formalizing in full a proof of the Four Color Theorem. \Question{What are the industrial applications for {\Coq}?} {\Coq} is used e.g. to prove properties of the JavaCard system (especially by Schlumberger and Trusted Logic). It has also been used to formalize the semantics of the Lucid-Synchrone data-flow synchronous calculus used by Esterel-Technologies. \iffalse todo christine compilo lustre? \fi %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Documentation} \Question{Where can I find documentation about {\Coq}?} All the documentation about \Coq, from the reference manual~\cite{Coq:manual} to friendly tutorials~\cite{Coq:Tutorial} and documentation of the standard library, is available \ahref{http://coq.inria.fr/doc-eng.html}{online}. All these documents are viewable either in browsable HTML, or as downloadable postscripts. \Question{Where can I find this FAQ on the web?} This FAQ is available online at \ahref{http://coq.inria.fr/doc/faq.html}{\url{http://coq.inria.fr/doc/faq.html}}. \Question{How can I submit suggestions / improvements / additions for this FAQ?} This FAQ is unfinished (in the sense that there are some obvious sections that are missing). Please send contributions to Coq-Club. \Question{Is there any mailing list about {\Coq}?} The main {\Coq} mailing list is \url{coq-club@inria.fr}, which broadcasts questions and suggestions about the implementation, the logical formalism or proof developments. See \ahref{http://coq.inria.fr/mailman/listinfo/coq-club}{\url{http://sympa-roc.inria.fr/wws/info/coq-club}} for subscription. For bugs reports see question \ref{coqbug}. \Question{Where can I find an archive of the list?} The archives of the {\Coq} mailing list are available at \ahref{http://pauillac.inria.fr/pipermail/coq-club}{\url{http://sympa-roc.inria.fr/wws/arc/coq-club}}. \Question{How can I be kept informed of new releases of {\Coq}?} New versions of {\Coq} are announced on the coq-club mailing list. If you only want to receive information about new releases, you can subscribe to {\Coq} on \ahref{http://freshmeat.net/projects/coq/}{\url{http://freshmeat.net/projects/coq/}}. \Question{Is there any book about {\Coq}?} The first book on \Coq, Yves Bertot and Pierre Castran's Coq'Art has been published by Springer-Verlag in 2004: \begin{quote} ``This book provides a pragmatic introduction to the development of proofs and certified programs using \Coq. With its large collection of examples and exercises it is an invaluable tool for researchers, students, and engineers interested in formal methods and the development of zero-default software.'' \end{quote} \Question{Where can I find some {\Coq} examples?} There are examples in the manual~\cite{Coq:manual} and in the Coq'Art~\cite{Coq:coqart} exercises \ahref{\url{http://www.labri.fr/Perso/~casteran/CoqArt/index.html}}{\url{http://www.labri.fr/Perso/~casteran/CoqArt/index.html}}. You can also find large developments using {\Coq} in the {\Coq} user contributions: \ahref{http://coq.inria.fr/contribs}{\url{http://coq.inria.fr/contribs}}. \Question{How can I report a bug?}\label{coqbug} You can use the web interface accessible at \ahref{http://coq.inria.fr}{\url{http://coq.inria.fr}}, link ``contacts''. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Installation} \Question{What is the license of {\Coq}?} {\Coq} is distributed under the GNU Lesser General License (LGPL). \Question{Where can I find the sources of {\Coq}?} The sources of {\Coq} can be found online in the tar.gz'ed packages (\ahref{http://coq.inria.fr}{\url{http://coq.inria.fr}}, link ``download''). Development sources can be accessed at \ahref{http://coq.gforge.inria.fr/}{\url{http://coq.gforge.inria.fr/}} \Question{On which platform is {\Coq} available?} Compiled binaries are available for Linux, MacOS X, and Windows. The sources can be easily compiled on all platforms supporting Objective Caml. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{The logic of {\Coq}} \subsection{General} \Question{What is the logic of \Coq?} {\Coq} is based on an axiom-free type theory called the Calculus of Inductive Constructions (see Coquand \cite{CoHu86}, Luo~\cite{Luo90} and Coquand--Paulin-Mohring \cite{CoPa89}). It includes higher-order functions and predicates, inductive and co-inductive datatypes and predicates, and a stratified hierarchy of sets. \Question{Is \Coq's logic intuitionistic or classical?} {\Coq}'s logic is modular. The core logic is intuitionistic (i.e. excluded-middle $A\vee\neg A$ is not granted by default). It can be extended to classical logic on demand by requiring an optional module stating $A\vee\neg A$. \Question{Can I define non-terminating programs in \Coq?} All programs in {\Coq} are terminating. Especially, loops must come with an evidence of their termination. Non-terminating programs can be simulated by passing around a bound on how long the program is allowed to run before dying. \Question{How is equational reasoning working in {\Coq}?} {\Coq} comes with an internal notion of computation called {\em conversion} (e.g. $(x+1)+y$ is internally equivalent to $(x+y)+1$; similarly applying argument $a$ to a function mapping $x$ to some expression $t$ converts to the expression $t$ where $x$ is replaced by $a$). This notion of conversion (which is decidable because {\Coq} programs are terminating) covers a certain part of equational reasoning but is limited to sequential evaluation of expressions of (not necessarily closed) programs. Besides conversion, equations have to be treated by hand or using specialised tactics. \subsection{Axioms} \Question{What axioms can be safely added to {\Coq}?} There are a few typical useful axioms that are independent from the Calculus of Inductive Constructions and that are considered consistent with the theory of {\Coq}. Most of these axioms are stated in the directory {\tt Logic} of the standard library of {\Coq}. The most interesting ones are \begin{itemize} \item Excluded-middle: $\forall A:Prop, A \vee \neg A$ \item Proof-irrelevance: $\forall A:Prop \forall p_1 p_2:A, p_1=p_2$ \item Unicity of equality proofs (or equivalently Streicher's axiom $K$): $\forall A \forall x y:A \forall p_1 p_2:x=y, p_1=p_2$ \item Hilbert's $\epsilon$ operator: if $A \neq \emptyset$, then there is $\epsilon_P$ such that $\exists x P(x) \rightarrow P(\epsilon_P)$ \item Church's $\iota$ operator: if $A \neq \emptyset$, then there is $\iota_P$ such that $\exists! x P(x) \rightarrow P(\iota_P)$ \item The axiom of unique choice: $\forall x \exists! y R(x,y) \rightarrow \exists f \forall x R(x,f(x))$ \item The functional axiom of choice: $\forall x \exists y R(x,y) \rightarrow \exists f \forall x R(x,f(x))$ \item Extensionality of predicates: $\forall P Q:A\rightarrow Prop, (\forall x, P(x) \leftrightarrow Q(x)) \rightarrow P=Q$ \item Extensionality of functions: $\forall f g:A\rightarrow B, (\forall x, f(x)=g(x)) \rightarrow f=g$ \end{itemize} Here is a summary of the relative strength of these axioms, most proofs can be found in directory {\tt Logic} of the standard library. The justification of their validity relies on the interpretability in set theory. %HEVEA\imgsrc{axioms.png} %BEGIN LATEX \ifpdf % si on est en pdflatex \includegraphics[width=1.0\textwidth]{axioms.png} \else \includegraphics[width=1.0\textwidth]{axioms.eps} \fi %END LATEX \Question{What standard axioms are inconsistent with {\Coq}?} The axiom of unique choice together with classical logic (e.g. excluded-middle) are inconsistent in the variant of the Calculus of Inductive Constructions where {\Set} is impredicative. As a consequence, the functional form of the axiom of choice and excluded-middle, or any form of the axiom of choice together with predicate extensionality are inconsistent in the {\Set}-impredicative version of the Calculus of Inductive Constructions. The main purpose of the \Set-predicative restriction of the Calculus of Inductive Constructions is precisely to accommodate these axioms which are quite standard in mathematical usage. The $\Set$-predicative system is commonly considered consistent by interpreting it in a standard set-theoretic boolean model, even with classical logic, axiom of choice and predicate extensionality added. \Question{What is Streicher's axiom $K$} \label{Streicher} Streicher's axiom $K$~\cite{HofStr98} is an axiom that asserts dependent elimination of reflexive equality proofs. \begin{coq_example*} Axiom Streicher_K : forall (A:Type) (x:A) (P: x=x -> Prop), P (eq_refl x) -> forall p: x=x, P p. \end{coq_example*} In the general case, axiom $K$ is an independent statement of the Calculus of Inductive Constructions. However, it is true on decidable domains (see file \vfile{\LogicEqdepDec}{Eqdep\_dec}). It is also trivially a consequence of proof-irrelevance (see \ref{proof-irrelevance}) hence of classical logic. Axiom $K$ is equivalent to {\em Uniqueness of Identity Proofs} \cite{HofStr98} \begin{coq_example*} Axiom UIP : forall (A:Set) (x y:A) (p1 p2: x=y), p1 = p2. \end{coq_example*} Axiom $K$ is also equivalent to {\em Uniqueness of Reflexive Identity Proofs} \cite{HofStr98} \begin{coq_example*} Axiom UIP_refl : forall (A:Set) (x:A) (p: x=x), p = eq_refl x. \end{coq_example*} Axiom $K$ is also equivalent to \begin{coq_example*} Axiom eq_rec_eq : forall (A:Set) (x:A) (P: A->Set) (p:P x) (h: x=x), p = eq_rect x P p x h. \end{coq_example*} It is also equivalent to the injectivity of dependent equality (dependent equality is itself equivalent to equality of dependent pairs). \begin{coq_example*} Inductive eq_dep (U:Set) (P:U -> Set) (p:U) (x:P p) : forall q:U, P q -> Prop := eq_dep_intro : eq_dep U P p x p x. Axiom eq_dep_eq : forall (U:Set) (u:U) (P:U -> Set) (p1 p2:P u), eq_dep U P u p1 u p2 -> p1 = p2. \end{coq_example*} \Question{What is proof-irrelevance} \label{proof-irrelevance} A specificity of the Calculus of Inductive Constructions is to permit statements about proofs. This leads to the question of comparing two proofs of the same proposition. Identifying all proofs of the same proposition is called {\em proof-irrelevance}: $$ \forall A:\Prop, \forall p q:A, p=q $$ Proof-irrelevance (in {\Prop}) can be assumed without contradiction in {\Coq}. It expresses that only provability matters, whatever the exact form of the proof is. This is in harmony with the common purely logical interpretation of {\Prop}. Contrastingly, proof-irrelevance is inconsistent in {\Set} since there are types in {\Set}, such as the type of booleans, that provably have at least two distinct elements. Proof-irrelevance (in {\Prop}) is a consequence of classical logic (see proofs in file \vfile{\LogicClassical}{Classical} and \vfile{\LogicBerardi}{Berardi}). Proof-irrelevance is also a consequence of propositional extensionality (i.e. \coqtt{(A {\coqequiv} B) {\coqimp} A=B}, see the proof in file \vfile{\LogicClassicalFacts}{ClassicalFacts}). Proof-irrelevance directly implies Streicher's axiom $K$. \Question{What about functional extensionality?} Extensionality of functions is admittedly consistent with the Set-predicative Calculus of Inductive Constructions. %\begin{coq_example*} % Axiom extensionality : (A,B:Set)(f,g:(A->B))(x:A)(f x)=(g x)->f=g. %\end{coq_example*} Let {\tt A}, {\tt B} be types. To deal with extensionality on \verb=A->B= without relying on a general extensionality axiom, a possible approach is to define one's own extensional equality on \verb=A->B=. \begin{coq_eval} Variables A B : Set. \end{coq_eval} \begin{coq_example*} Definition ext_eq (f g: A->B) := forall x:A, f x = g x. \end{coq_example*} and to reason on \verb=A->B= as a setoid (see the Chapter on Setoids in the Reference Manual). \Question{Is {\Prop} impredicative?} Yes, the sort {\Prop} of propositions is {\em impredicative}. Otherwise said, a statement of the form $\forall A:Prop, P(A)$ can be instantiated by itself: if $\forall A:\Prop, P(A)$ is provable, then $P(\forall A:\Prop, P(A))$ is. \Question{Is {\Set} impredicative?} No, the sort {\Set} lying at the bottom of the hierarchy of computational types is {\em predicative} in the basic {\Coq} system. This means that a family of types in {\Set}, e.g. $\forall A:\Set, A \rightarrow A$, is not a type in {\Set} and it cannot be applied on itself. However, the sort {\Set} was impredicative in the original versions of {\Coq}. For backward compatibility, or for experiments by knowledgeable users, the logic of {\Coq} can be set impredicative for {\Set} by calling {\Coq} with the option {\tt -impredicative-set}. {\Set} has been made predicative from version 8.0 of {\Coq}. The main reason is to interact smoothly with a classical mathematical world where both excluded-middle and the axiom of description are valid (see file \vfile{\LogicClassicalDescription}{ClassicalDescription} for a proof that excluded-middle and description implies the double negation of excluded-middle in {\Set} and file {\tt Hurkens\_Set.v} from the user contribution {\tt Paradoxes} at \ahref{http://coq.inria.fr/contribs}{\url{http://coq.inria.fr/contribs}} for a proof that impredicativity of {\Set} implies the simple negation of excluded-middle in {\Set}). \Question{Is {\Type} impredicative?} No, {\Type} is stratified. This is hidden for the user, but {\Coq} internally maintains a set of constraints ensuring stratification. If {\Type} were impredicative then it would be possible to encode Girard's systems $U-$ and $U$ in {\Coq} and it is known from Girard, Coquand, Hurkens and Miquel that systems $U-$ and $U$ are inconsistent [Girard 1972, Coquand 1991, Hurkens 1993, Miquel 2001]. This encoding can be found in file {\tt Logic/Hurkens.v} of {\Coq} standard library. For instance, when the user see {\tt $\forall$ X:Type, X->X : Type}, each occurrence of {\Type} is implicitly bound to a different level, say $\alpha$ and $\beta$ and the actual statement is {\tt forall X:Type($\alpha$), X->X : Type($\beta$)} with the constraint $\alpha<\beta$. When a statement violates a constraint, the message {\tt Universe inconsistency} appears. Example: {\tt fun (x:Type) (y:$\forall$ X:Type, X {\coqimp} X) => y x x}. \Question{I have two proofs of the same proposition. Can I prove they are equal?} In the base {\Coq} system, the answer is generally no. However, if classical logic is set, the answer is yes for propositions in {\Prop}. The answer is also yes if proof irrelevance holds (see question \ref{proof-irrelevance}). There are also ``simple enough'' propositions for which you can prove the equality without requiring any extra axioms. This is typically the case for propositions defined deterministically as a first-order inductive predicate on decidable sets. See for instance in question \ref{le-uniqueness} an axiom-free proof of the unicity of the proofs of the proposition {\tt le m n} (less or equal on {\tt nat}). % It is an ongoing work of research to natively include proof % irrelevance in {\Coq}. \Question{I have two proofs of an equality statement. Can I prove they are equal?} Yes, if equality is decidable on the domain considered (which is the case for {\tt nat}, {\tt bool}, etc): see {\Coq} file \verb=Eqdep_dec.v=). No otherwise, unless assuming Streicher's axiom $K$ (see \cite{HofStr98}) or a more general assumption such as proof-irrelevance (see \ref{proof-irrelevance}) or classical logic. All of these statements can be found in file \vfile{\LogicEqdep}{Eqdep}. \Question{Can I prove that the second components of equal dependent pairs are equal?} The answer is the same as for proofs of equality statements. It is provable if equality on the domain of the first component is decidable (look at \verb=inj_right_pair= from file \vfile{\LogicEqdepDec}{Eqdep\_dec}), but not provable in the general case. However, it is consistent (with the Calculus of Constructions) to assume it is true. The file \vfile{\LogicEqdep}{Eqdep} actually provides an axiom (equivalent to Streicher's axiom $K$) which entails the result (look at \verb=inj_pair2= in \vfile{\LogicEqdep}{Eqdep}). \subsection{Impredicativity} \Question{Why {\tt injection} does not work on impredicative {\tt Set}?} E.g. in this case (this occurs only in the {\tt Set}-impredicative variant of \Coq): \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive I : Type := intro : forall k:Set, k -> I. Lemma eq_jdef : forall x y:nat, intro _ x = intro _ y -> x = y. Proof. intros x y H; injection H. \end{coq_example*} Injectivity of constructors is restricted to predicative types. If injectivity on large inductive types were not restricted, we would be allowed to derive an inconsistency (e.g. following the lines of Burali-Forti paradox). The question remains open whether injectivity is consistent on some large inductive types not expressive enough to encode known paradoxes (such as type I above). \Question{What is a ``large inductive definition''?} An inductive definition in {\Prop} or {\Set} is called large if its constructors embed sets or propositions. As an example, here is a large inductive type: \begin{coq_example*} Inductive sigST (P:Set -> Set) : Type := existST : forall X:Set, P X -> sigST P. \end{coq_example*} In the {\tt Set} impredicative variant of {\Coq}, large inductive definitions in {\tt Set} have restricted elimination schemes to prevent inconsistencies. Especially, projecting the set or the proposition content of a large inductive definition is forbidden. If it were allowed, it would be possible to encode e.g. Burali-Forti paradox \cite{Gir70,Coq85}. \Question{Is Coq's logic conservative over Coquand's Calculus of Constructions?} Yes for the non Set-impredicative version of the Calculus of Inductive Constructions. Indeed, the impredicative sort of the Calculus of Constructions can only be interpreted as the sort {\Prop} since {\Set} is predicative. But {\Prop} can be %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Talkin' with the Rooster} %%%%%%% \subsection{My goal is ..., how can I prove it?} \Question{My goal is a conjunction, how can I prove it?} Use some theorem or assumption or use the {\split} tactic. \begin{coq_example} Goal forall A B:Prop, A->B-> A/\B. intros. split. assumption. assumption. Qed. \end{coq_example} \Question{My goal contains a conjunction as an hypothesis, how can I use it?} If you want to decompose your hypothesis into other hypothesis you can use the {\decompose} tactic: \begin{coq_example} Goal forall A B:Prop, A/\B-> B. intros. decompose [and] H. assumption. Qed. \end{coq_example} \Question{My goal is a disjunction, how can I prove it?} You can prove the left part or the right part of the disjunction using {\left} or {\right} tactics. If you want to do a classical reasoning step, use the {\tt classic} axiom to prove the right part with the assumption that the left part of the disjunction is false. \begin{coq_example} Goal forall A B:Prop, A-> A\/B. intros. left. assumption. Qed. \end{coq_example} An example using classical reasoning: \begin{coq_example} Require Import Classical. Ltac classical_right := match goal with | _:_ |-?X1 \/ _ => (elim (classic X1);intro;[left;trivial|right]) end. Ltac classical_left := match goal with | _:_ |- _ \/?X1 => (elim (classic X1);intro;[right;trivial|left]) end. Goal forall A B:Prop, (~A -> B) -> A\/B. intros. classical_right. auto. Qed. \end{coq_example} \Question{My goal is an universally quantified statement, how can I prove it?} Use some theorem or assumption or introduce the quantified variable in the context using the {\intro} tactic. If there are several variables you can use the {\intros} tactic. A good habit is to provide names for these variables: {\Coq} will do it anyway, but such automatic naming decreases legibility and robustness. \Question{My goal contains an universally quantified statement, how can I use it?} If the universally quantified assumption matches the goal you can use the {\apply} tactic. If it is an equation you can use the {\rewrite} tactic. Otherwise you can use the {\specialize} tactic to instantiate the quantified variables with terms. The variant {\tt assert(Ht := H t)} makes a copy of assumption {\tt H} before instantiating it. \Question{My goal is an existential, how can I prove it?} Use some theorem or assumption or exhibit the witness using the {\existstac} tactic. \begin{coq_example} Goal exists x:nat, forall y, x+y=y. exists 0. intros. auto. Qed. \end{coq_example} \Question{My goal is solvable by some lemma, how can I prove it?} Just use the {\apply} tactic. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Lemma mylemma : forall x, x+0 = x. auto. Qed. Goal 3+0 = 3. apply mylemma. Qed. \end{coq_example} \Question{My goal contains False as an hypothesis, how can I prove it?} You can use the {\contradiction} or {\intuition} tactics. \Question{My goal is an equality of two convertible terms, how can I prove it?} Just use the {\reflexivity} tactic. \begin{coq_example} Goal forall x, 0+x = x. intros. reflexivity. Qed. \end{coq_example} \Question{My goal is a {\tt let x := a in ...}, how can I prove it?} Just use the {\intro} tactic. \Question{My goal is a {\tt let (a, ..., b) := c in}, how can I prove it?} Just use the {\destruct} c as (a,...,b) tactic. \Question{My goal contains some existential hypotheses, how can I use it?} You can use the tactic {\elim} with you hypotheses as an argument. \Question{My goal contains some existential hypotheses, how can I use it and decompose my knowledge about this new thing into different hypotheses?} \begin{verbatim} Ltac DecompEx H P := elim H;intro P;intro TO;decompose [and] TO;clear TO;clear H. \end{verbatim} \Question{My goal is an equality, how can I swap the left and right hand terms?} Just use the {\symmetry} tactic. \begin{coq_example} Goal forall x y : nat, x=y -> y=x. intros. symmetry. assumption. Qed. \end{coq_example} \Question{My hypothesis is an equality, how can I swap the left and right hand terms?} Just use the {\symmetryin} tactic. \begin{coq_example} Goal forall x y : nat, x=y -> y=x. intros. symmetry in H. assumption. Qed. \end{coq_example} \Question{My goal is an equality, how can I prove it by transitivity?} Just use the {\transitivity} tactic. \begin{coq_example} Goal forall x y z : nat, x=y -> y=z -> x=z. intros. transitivity y. assumption. assumption. Qed. \end{coq_example} \Question{My goal would be solvable using {\tt apply;assumption} if it would not create meta-variables, how can I prove it?} You can use {\tt eapply yourtheorem;eauto} but it won't work in all cases ! (for example if more than one hypothesis match one of the subgoals generated by \eapply) so you should rather use {\tt try solve [eapply yourtheorem;eauto]}, otherwise some metavariables may be incorrectly instantiated. \begin{coq_example} Lemma trans : forall x y z : nat, x=y -> y=z -> x=z. intros. transitivity y;assumption. Qed. Goal forall x y z : nat, x=y -> y=z -> x=z. intros. eapply trans;eauto. Qed. Goal forall x y z t : nat, x=y -> x=t -> y=z -> x=z. intros. eapply trans;eauto. Undo. eapply trans. apply H. auto. Qed. Goal forall x y z t : nat, x=y -> x=t -> y=z -> x=z. intros. eapply trans;eauto. Undo. try solve [eapply trans;eauto]. eapply trans. apply H. auto. Qed. \end{coq_example} \Question{My goal is solvable by some lemma within a set of lemmas and I don't want to remember which one, how can I prove it?} You can use a what is called a hints' base. \begin{coq_example} Require Import ZArith. Require Ring. Local Open Scope Z_scope. Lemma toto1 : 1+1 = 2. ring. Qed. Lemma toto2 : 2+2 = 4. ring. Qed. Lemma toto3 : 2+1 = 3. ring. Qed. Hint Resolve toto1 toto2 toto3 : mybase. Goal 2+(1+1)=4. auto with mybase. Qed. \end{coq_example} \Question{My goal is one of the hypotheses, how can I prove it?} Use the {\assumption} tactic. \begin{coq_example} Goal 1=1 -> 1=1. intro. assumption. Qed. \end{coq_example} \Question{My goal appears twice in the hypotheses and I want to choose which one is used, how can I do it?} Use the {\exact} tactic. \begin{coq_example} Goal 1=1 -> 1=1 -> 1=1. intros. exact H0. Qed. \end{coq_example} \Question{What can be the difference between applying one hypothesis or another in the context of the last question?} From a proof point of view it is equivalent but if you want to extract a program from your proof, the two hypotheses can lead to different programs. \Question{My goal is a propositional tautology, how can I prove it?} Just use the {\tauto} tactic. \begin{coq_example} Goal forall A B:Prop, A-> (A\/B) /\ A. intros. tauto. Qed. \end{coq_example} \Question{My goal is a first order formula, how can I prove it?} Just use the semi-decision tactic: \firstorder. \iffalse todo: demander un exemple Pierre \fi \Question{My goal is solvable by a sequence of rewrites, how can I prove it?} Just use the {\congruence} tactic. \begin{coq_example} Goal forall a b c d e, a=d -> b=e -> c+b=d -> c+e=a. intros. congruence. Qed. \end{coq_example} \Question{My goal is a disequality solvable by a sequence of rewrites, how can I prove it?} Just use the {\congruence} tactic. \begin{coq_example} Goal forall a b c d, a<>d -> b=a -> d=c+b -> b<>c+b. intros. congruence. Qed. \end{coq_example} \Question{My goal is an equality on some ring (e.g. natural numbers), how can I prove it?} Just use the {\ring} tactic. \begin{coq_example} Require Import ZArith. Require Ring. Local Open Scope Z_scope. Goal forall a b : Z, (a+b)*(a+b) = a*a + 2*a*b + b*b. intros. ring. Qed. \end{coq_example} \Question{My goal is an equality on some field (e.g. real numbers), how can I prove it?} Just use the {\field} tactic. \begin{coq_example} Require Import Reals. Require Ring. Local Open Scope R_scope. Goal forall a b : R, b*a<>0 -> (a/b) * (b/a) = 1. intros. field. cut (b*a <>0 -> a<>0). cut (b*a <>0 -> b<>0). auto. auto with real. auto with real. Qed. \end{coq_example} \Question{My goal is an inequality on integers in Presburger's arithmetic (an expression build from +,-,constants and variables), how can I prove it?} \begin{coq_example} Require Import ZArith. Require Omega. Local Open Scope Z_scope. Goal forall a : Z, a>0 -> a+a > a. intros. omega. Qed. \end{coq_example} \Question{My goal is an equation solvable using equational hypothesis on some ring (e.g. natural numbers), how can I prove it?} You need the {\gb} tactic (see Loc Pottier's homepage). \subsection{Tactics usage} \Question{I want to state a fact that I will use later as an hypothesis, how can I do it?} If you want to use forward reasoning (first proving the fact and then using it) you just need to use the {\assert} tactic. If you want to use backward reasoning (proving your goal using an assumption and then proving the assumption) use the {\cut} tactic. \begin{coq_example} Goal forall A B C D : Prop, (A -> B) -> (B->C) -> A -> C. intros. assert (A->C). intro;apply H0;apply H;assumption. apply H2. assumption. Qed. Goal forall A B C D : Prop, (A -> B) -> (B->C) -> A -> C. intros. cut (A->C). intro. apply H2;assumption. intro;apply H0;apply H;assumption. Qed. \end{coq_example} \Question{I want to state a fact that I will use later as an hypothesis and prove it later, how can I do it?} You can use {\cut} followed by {\intro} or you can use the following {\Ltac} command: \begin{verbatim} Ltac assert_later t := cut t;[intro|idtac]. \end{verbatim} \Question{What is the difference between {\Qed} and {\Defined}?} These two commands perform type checking, but when {\Defined} is used the new definition is set as transparent, otherwise it is defined as opaque (see \ref{opaque}). \Question{How can I know what a tactic does?} You can use the {\tt info} command. \Question{Why {\auto} does not work? How can I fix it?} You can increase the depth of the proof search or add some lemmas in the base of hints. Perhaps you may need to use \eauto. \Question{What is {\eauto}?} This is the same tactic as \auto, but it relies on {\eapply} instead of \apply. \Question{How can I speed up {\auto}?} You can use \texttt{info }{\auto} to replace {\auto} by the tactics it generates. You can split your hint bases into smaller ones. \Question{What is the equivalent of {\tauto} for classical logic?} Currently there are no equivalent tactic for classical logic. You can use Gdel's ``not not'' translation. \Question{I want to replace some term with another in the goal, how can I do it?} If one of your hypothesis (say {\tt H}) states that the terms are equal you can use the {\rewrite} tactic. Otherwise you can use the {\replace} {\tt with} tactic. \Question{I want to replace some term with another in an hypothesis, how can I do it?} You can use the {\rewrite} {\tt in} tactic. \Question{I want to replace some symbol with its definition, how can I do it?} You can use the {\unfold} tactic. \Question{How can I reduce some term?} You can use the {\simpl} tactic. \Question{How can I declare a shortcut for some term?} You can use the {\set} or {\pose} tactics. \Question{How can I perform case analysis?} You can use the {\case} or {\destruct} tactics. \Question{How can I prevent the case tactic from losing information ?} You may want to use the (now standard) {\tt case\_eq} tactic. See the Coq'Art page 159. \Question{Why should I name my intros?} When you use the {\intro} tactic you don't have to give a name to your hypothesis. If you do so the name will be generated by {\Coq} but your scripts may be less robust. If you add some hypothesis to your theorem (or change their order), you will have to change your proof to adapt to the new names. \Question{How can I automatize the naming?} You can use the {\tt Show Intro.} or {\tt Show Intros.} commands to generate the names and use your editor to generate a fully named {\intro} tactic. This can be automatized within {\tt xemacs}. \begin{coq_example} Goal forall A B C : Prop, A -> B -> C -> A/\B/\C. Show Intros. (* A B C H H0 H1 *) intros A B C H H0 H1. repeat split;assumption. Qed. \end{coq_example} \Question{I want to automatize the use of some tactic, how can I do it?} You need to use the {\tt proof with T} command and add {\ldots} at the end of your sentences. For instance: \begin{coq_example} Goal forall A B C : Prop, A -> B/\C -> A/\B/\C. Proof with assumption. intros. split... Qed. \end{coq_example} \Question{I want to execute the {\texttt proof with} tactic only if it solves the goal, how can I do it?} You need to use the {\try} and {\solve} tactics. For instance: \begin{coq_example} Require Import ZArith. Require Ring. Local Open Scope Z_scope. Goal forall a b c : Z, a+b=b+a. Proof with try solve [ring]. intros... Qed. \end{coq_example} \Question{How can I do the opposite of the {\intro} tactic?} You can use the {\generalize} tactic. \begin{coq_example} Goal forall A B : Prop, A->B-> A/\B. intros. generalize H. intro. auto. Qed. \end{coq_example} \Question{One of the hypothesis is an equality between a variable and some term, I want to get rid of this variable, how can I do it?} You can use the {\subst} tactic. This will rewrite the equality everywhere and clear the assumption. \Question{What can I do if I get ``{\tt generated subgoal term has metavariables in it }''?} You should use the {\eapply} tactic, this will generate some goals containing metavariables. \Question{How can I instantiate some metavariable?} Just use the {\instantiate} tactic. \Question{What is the use of the {\pattern} tactic?} The {\pattern} tactic transforms the current goal, performing beta-expansion on all the applications featuring this tactic's argument. For instance, if the current goal includes a subterm {\tt phi(t)}, then {\tt pattern t} transforms the subterm into {\tt (fun x:A => phi(x)) t}. This can be useful when {\apply} fails on matching, to abstract the appropriate terms. \Question{What is the difference between assert, cut and generalize?} PS: Notice for people that are interested in proof rendering that \assert and {\pose} (and \cut) are not rendered the same as {\generalize} (see the HELM experimental rendering tool at \ahref{http://helm.cs.unibo.it/library.html}{\url{http://helm.cs.unibo.it}}, link HELM, link COQ Online). Indeed {\generalize} builds a beta-expanded term while \assert, {\pose} and {\cut} uses a let-in. \begin{verbatim} (* Goal is T *) generalize (H1 H2). (* Goal is A->T *) ... a proof of A->T ... \end{verbatim} is rendered into something like \begin{verbatim} (h) ... the proof of A->T ... we proved A->T (h0) by (H1 H2) we proved A by (h h0) we proved T \end{verbatim} while \begin{verbatim} (* Goal is T *) assert q := (H1 H2). (* Goal is A *) ... a proof of A ... (* Goal is A |- T *) ... a proof of T ... \end{verbatim} is rendered into something like \begin{verbatim} (q) ... the proof of A ... we proved A ... the proof of T ... we proved T \end{verbatim} Otherwise said, {\generalize} is not rendered in a forward-reasoning way, while {\assert} is. \Question{What can I do if \Coq can not infer some implicit argument ?} You can state explicitely what this implicit argument is. See \ref{implicit}. \Question{How can I explicit some implicit argument ?}\label{implicit} Just use \texttt{A:=term} where \texttt{A} is the argument. For instance if you want to use the existence of ``nil'' on nat*nat lists: \begin{verbatim} exists (nil (A:=(nat*nat))). \end{verbatim} \iffalse \Question{Is there anyway to do pattern matching with dependent types?} todo \fi \subsection{Proof management} \Question{How can I change the order of the subgoals?} You can use the {\Focus} command to concentrate on some goal. When the goal is proved you will see the remaining goals. \Question{How can I change the order of the hypothesis?} You can use the {\tt Move ... after} command. \Question{How can I change the name of an hypothesis?} You can use the {\tt Rename ... into} command. \Question{How can I delete some hypothesis?} You can use the {\tt Clear} command. \Question{How can use a proof which is not finished?} You can use the {\tt Admitted} command to state your current proof as an axiom. You can use the {\tt admit} tactic to omit a portion of a proof. \Question{How can I state a conjecture?} You can use the {\tt Admitted} command to state your current proof as an axiom. \Question{What is the difference between a lemma, a fact and a theorem?} From {\Coq} point of view there are no difference. But some tools can have a different behavior when you use a lemma rather than a theorem. For instance {\tt coqdoc} will not generate documentation for the lemmas within your development. \Question{How can I organize my proofs?} You can organize your proofs using the section mechanism of \Coq. Have a look at the manual for further information. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Inductive and Co-inductive types} \subsection{General} \Question{How can I prove that two constructors are different?} You can use the {\discriminate} tactic. \begin{coq_example} Inductive toto : Set := | C1 : toto | C2 : toto. Goal C1 <> C2. discriminate. Qed. \end{coq_example} \Question{During an inductive proof, how to get rid of impossible cases of an inductive definition?} Use the {\inversion} tactic. \Question{How can I prove that 2 terms in an inductive set are equal? Or different?} Have a look at \coqtt{decide equality} and \coqtt{discriminate} in the \ahref{http://coq.inria.fr/doc/main.html}{Reference Manual}. \Question{Why is the proof of \coqtt{0+n=n} on natural numbers trivial but the proof of \coqtt{n+0=n} is not?} Since \coqtt{+} (\coqtt{plus}) on natural numbers is defined by analysis on its first argument \begin{coq_example} Print plus. \end{coq_example} {\noindent} The expression \coqtt{0+n} evaluates to \coqtt{n}. As {\Coq} reasons modulo evaluation of expressions, \coqtt{0+n} and \coqtt{n} are considered equal and the theorem \coqtt{0+n=n} is an instance of the reflexivity of equality. On the other side, \coqtt{n+0} does not evaluate to \coqtt{n} and a proof by induction on \coqtt{n} is necessary to trigger the evaluation of \coqtt{+}. \Question{Why is dependent elimination in Prop not available by default?} This is just because most of the time it is not needed. To derive a dependent elimination principle in {\tt Prop}, use the command {\tt Scheme} and apply the elimination scheme using the \verb=using= option of \verb=elim=, \verb=destruct= or \verb=induction=. \Question{Argh! I cannot write expressions like ``~{\tt if n <= p then p else n}~'', as in any programming language} \label{minmax} The short answer : You should use {\texttt le\_lt\_dec n p} instead.\\ The long answer: That's right, you can't. If you type for instance the following ``definition'': \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Definition max (n p : nat) := if n <= p then p else n. \end{coq_example} As \Coq~ says, the term ``~\texttt{n <= p}~'' is a proposition, i.e. a statement that belongs to the mathematical world. There are many ways to prove such a proposition, either by some computation, or using some already proven theoremas. For instance, proving $3-2 \leq 2^{45503}$ is very easy, using some theorems on arithmetical operations. If you compute both numbers before comparing them, you risk to use a lot of time and space. On the contrary, a function for computing the greatest of two natural numbers is an algorithm which, called on two natural numbers $n$ and $p$, determines wether $n\leq p$ or $p < n$. Such a function is a \emph{decision procedure} for the inequality of \texttt{nat}. The possibility of writing such a procedure comes directly from de decidability of the order $\leq$ on natural numbers. When you write a piece of code like ``~\texttt{if n <= p then \dots{} else \dots}~'' in a programming language like \emph{ML} or \emph{Java}, a call to such a decision procedure is generated. The decision procedure is in general a primitive function, written in a low-level language, in the correctness of which you have to trust. The standard Library of the system \emph{Coq} contains a (constructive) proof of decidability of the order $\leq$ on \texttt{nat} : the function \texttt{le\_lt\_dec} of the module \texttt{Compare\_dec} of library \texttt{Arith}. The following code shows how to define correctly \texttt{min} and \texttt{max}, and prove some properties of these functions. \begin{coq_example} Require Import Compare_dec. Definition max (n p : nat) := if le_lt_dec n p then p else n. Definition min (n p : nat) := if le_lt_dec n p then n else p. Eval compute in (min 4 7). Theorem min_plus_max : forall n p, min n p + max n p = n + p. Proof. intros n p; unfold min, max; case (le_lt_dec n p); simpl; auto with arith. Qed. Theorem max_equiv : forall n p, max n p = p <-> n <= p. Proof. unfold max; intros n p; case (le_lt_dec n p);simpl; auto. intuition auto with arith. split. intro e; rewrite e; auto with arith. intro H; absurd (p < p); eauto with arith. Qed. \end{coq_example} \Question{I wrote my own decision procedure for $\leq$, which is much faster than yours, but proving such theorems as \texttt{max\_equiv} seems to be quite difficult} Your code is probably the following one: \begin{coq_example} Fixpoint my_le_lt_dec (n p :nat) {struct n}: bool := match n, p with 0, _ => true | S n', S p' => my_le_lt_dec n' p' | _ , _ => false end. Definition my_max (n p:nat) := if my_le_lt_dec n p then p else n. Definition my_min (n p:nat) := if my_le_lt_dec n p then n else p. \end{coq_example} For instance, the computation of \texttt{my\_max 567 321} is almost immediate, whereas one can't wait for the result of \texttt{max 56 32}, using \emph{Coq's} \texttt{le\_lt\_dec}. This is normal. Your definition is a simple recursive function which returns a boolean value. Coq's \texttt{le\_lt\_dec} is a \emph{certified function}, i.e. a complex object, able not only to tell wether $n\leq p$ or $p n <= p. Theorem my_le_lt_dec_false : forall n p, my_le_lt_dec n p = false <-> p < n. \end{coq_example*} \subsection{Recursion} \Question{Why can't I define a non terminating program?} Because otherwise the decidability of the type-checking algorithm (which involves evaluation of programs) is not ensured. On another side, if non terminating proofs were allowed, we could get a proof of {\tt False}: \begin{coq_example*} (* This is fortunately not allowed! *) Fixpoint InfiniteProof (n:nat) : False := InfiniteProof n. Theorem Paradox : False. Proof (InfiniteProof O). \end{coq_example*} \Question{Why only structurally well-founded loops are allowed?} The structural order on inductive types is a simple and powerful notion of termination. The consistency of the Calculus of Inductive Constructions relies on it and another consistency proof would have to be made for stronger termination arguments (such as the termination of the evaluation of CIC programs themselves!). In spite of this, all non-pathological termination orders can be mapped to a structural order. Tools to do this are provided in the file \vfile{\InitWf}{Wf} of the standard library of {\Coq}. \Question{How to define loops based on non structurally smaller recursive calls?} The procedure is as follows (we consider the definition of {\tt mergesort} as an example). \begin{itemize} \item Define the termination order, say {\tt R} on the type {\tt A} of the arguments of the loop. \begin{coq_eval} Open Scope R_scope. Require Import List. \end{coq_eval} \begin{coq_example*} Definition R (a b:list nat) := length a < length b. \end{coq_example*} \item Prove that this order is well-founded (in fact that all elements in {\tt A} are accessible along {\tt R}). \begin{coq_example*} Lemma Rwf : well_founded R. \end{coq_example*} \item Define the step function (which needs proofs that recursive calls are on smaller arguments). \begin{verbatim} Definition split (l : list nat) : {l1: list nat | R l1 l} * {l2 : list nat | R l2 l} := (* ... *) . Definition concat (l1 l2 : list nat) : list nat := (* ... *) . Definition merge_step (l : list nat) (f: forall l':list nat, R l' l -> list nat) := let (lH1,lH2) := (split l) in let (l1,H1) := lH1 in let (l2,H2) := lH2 in concat (f l1 H1) (f l2 H2). \end{verbatim} \item Define the recursive function by fixpoint on the step function. \begin{coq_example*} Definition merge := Fix Rwf (fun _ => list nat) merge_step. \end{coq_example*} \end{itemize} \Question{What is behind the accessibility and well-foundedness proofs?} Well-foundedness of some relation {\tt R} on some type {\tt A} is defined as the accessibility of all elements of {\tt A} along {\tt R}. \begin{coq_example} Print well_founded. Print Acc. \end{coq_example} The structure of the accessibility predicate is a well-founded tree branching at each node {\tt x} in {\tt A} along all the nodes {\tt x'} less than {\tt x} along {\tt R}. Any sequence of elements of {\tt A} decreasing along the order {\tt R} are branches in the accessibility tree. Hence any decreasing along {\tt R} is mapped into a structural decreasing in the accessibility tree of {\tt R}. This is emphasised in the definition of {\tt fix} which recurs not on its argument {\tt x:A} but on the accessibility of this argument along {\tt R}. See file \vfile{\InitWf}{Wf}. \Question{How to perform simultaneous double induction?} In general a (simultaneous) double induction is simply solved by an induction on the first hypothesis followed by an inversion over the second hypothesis. Here is an example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Inductive even : nat -> Prop := | even_O : even 0 | even_S : forall n:nat, even n -> even (S (S n)). Inductive odd : nat -> Prop := | odd_SO : odd 1 | odd_S : forall n:nat, odd n -> odd (S (S n)). Lemma not_even_and_odd : forall n:nat, even n -> odd n -> False. induction 1. inversion 1. inversion 1. apply IHeven; trivial. \end{coq_example} \begin{coq_eval} Qed. \end{coq_eval} In case the type of the second induction hypothesis is not dependent, {\tt inversion} can just be replaced by {\tt destruct}. \Question{How to define a function by simultaneous double recursion?} The same trick applies, you can even use the pattern-matching compilation algorithm to do the work for you. Here is an example: \begin{coq_example} Fixpoint minus (n m:nat) {struct n} : nat := match n, m with | O, _ => 0 | S k, O => S k | S k, S l => minus k l end. Print minus. \end{coq_example} In case of dependencies in the type of the induction objects $t_1$ and $t_2$, an extra argument stating $t_1=t_2$ must be given to the fixpoint definition \Question{How to perform nested and double induction?} To reason by nested (i.e. lexicographic) induction, just reason by induction on the successive components. \smallskip Double induction (or induction on pairs) is a restriction of the lexicographic induction. Here is an example of double induction. \begin{coq_example} Lemma nat_double_ind : forall P : nat -> nat -> Prop, P 0 0 -> (forall m n, P m n -> P m (S n)) -> (forall m n, P m n -> P (S m) n) -> forall m n, P m n. intros P H00 HmS HSn; induction m. (* case 0 *) induction n; [assumption | apply HmS; apply IHn]. (* case Sm *) intro n; apply HSn; apply IHm. \end{coq_example} \begin{coq_eval} Qed. \end{coq_eval} \Question{How to define a function by nested recursion?} The same trick applies. Here is the example of Ackermann function. \begin{coq_example} Fixpoint ack (n:nat) : nat -> nat := match n with | O => S | S n' => (fix ack' (m:nat) : nat := match m with | O => ack n' 1 | S m' => ack n' (ack' m') end) end. \end{coq_example} \subsection{Co-inductive types} \Question{I have a cofixpoint $t:=F(t)$ and I want to prove $t=F(t)$. How to do it?} Just case-expand $F({\tt t})$ then complete by a trivial case analysis. Here is what it gives on e.g. the type of streams on naturals \begin{coq_eval} Set Implicit Arguments. \end{coq_eval} \begin{coq_example} CoInductive Stream (A:Set) : Set := Cons : A -> Stream A -> Stream A. CoFixpoint nats (n:nat) : Stream nat := Cons n (nats (S n)). Lemma Stream_unfold : forall n:nat, nats n = Cons n (nats (S n)). Proof. intro; change (nats n = match nats n with | Cons x s => Cons x s end). case (nats n); reflexivity. Qed. \end{coq_example} \section{Syntax and notations} \Question{I do not want to type ``forall'' because it is too long, what can I do?} You can define your own notation for forall: \begin{verbatim} Notation "fa x : t, P" := (forall x:t, P) (at level 200, x ident). \end{verbatim} or if your are using {\CoqIde} you can define a pretty symbol for for all and an input method (see \ref{forallcoqide}). \Question{How can I define a notation for square?} You can use for instance: \begin{verbatim} Notation "x ^2" := (Rmult x x) (at level 20). \end{verbatim} Note that you can not use: \begin{tt} Notation "x $^$" := (Rmult x x) (at level 20). \end{tt} because ``$^2$'' is an iso-latin character. If you really want this kind of notation you should use UTF-8. \Question{Why ``no associativity'' and ``left associativity'' at the same level does not work?} Because we relie on Camlp4 for syntactical analysis and Camlp4 does not really implement no associativity. By default, non associative operators are defined as right associative. \Question{How can I know the associativity associated with a level?} You can do ``Print Grammar constr'', and decode the output from Camlp4, good luck ! \section{Modules} %%%%%%% \section{\Ltac} \Question{What is {\Ltac}?} {\Ltac} is the tactic language for \Coq. It provides the user with a high-level ``toolbox'' for tactic creation. \Question{Is there any printing command in {\Ltac}?} You can use the {\idtac} tactic with a string argument. This string will be printed out. The same applies to the {\fail} tactic \Question{What is the syntax for let in {\Ltac}?} If $x_i$ are identifiers and $e_i$ and $expr$ are tactic expressions, then let reads: \begin{center} {\tt let $x_1$:=$e_1$ with $x_2$:=$e_2$\ldots with $x_n$:=$e_n$ in $expr$}. \end{center} Beware that if $expr$ is complex (i.e. features at least a sequence) parenthesis should be added around it. For example: \begin{coq_example} Ltac twoIntro := let x:=intro in (x;x). \end{coq_example} \Question{What is the syntax for pattern matching in {\Ltac}?} Pattern matching on a term $expr$ (non-linear first order unification) with patterns $p_i$ and tactic expressions $e_i$ reads: \begin{center} \hspace{10ex} {\tt match $expr$ with \hspace*{2ex}$p_1$ => $e_1$ \hspace*{1ex}\textbar$p_2$ => $e_2$ \hspace*{1ex}\ldots \hspace*{1ex}\textbar$p_n$ => $e_n$ \hspace*{1ex}\textbar\ \textunderscore\ => $e_{n+1}$ end. } \end{center} Underscore matches all terms. \Question{What is the semantics for ``match goal''?} The semantics of {\tt match goal} depends on whether it returns tactics or not. The {\tt match goal} expression matches the current goal against a series of patterns: {$hyp_1 {\ldots} hyp_n$ \textbar- $ccl$}. It uses a first-order unification algorithm and in case of success, if the right-hand-side is an expression, it tries to type it while if the right-hand-side is a tactic, it tries to apply it. If the typing or the tactic application fails, the {\tt match goal} tries all the possible combinations of $hyp_i$ before dropping the branch and moving to the next one. Underscore matches all terms. \Question{Why can't I use a ``match goal'' returning a tactic in a non tail-recursive position?} This is precisely because the semantics of {\tt match goal} is to apply the tactic on the right as soon as a pattern unifies what is meaningful only in tail-recursive uses. The semantics in non tail-recursive call could have been the one used for terms (i.e. fail if the tactic expression is not typable, but don't try to apply it). For uniformity of semantics though, this has been rejected. \Question{How can I generate a new name?} You can use the following syntax: {\tt let id:=fresh in \ldots}\\ For example: \begin{coq_example} Ltac introIdGen := let id:=fresh in intro id. \end{coq_example} \iffalse \Question{How can I access the type of a term?} You can use typeof. todo \fi \iffalse \Question{How can I define static and dynamic code?} \fi \section{Tactics written in OCaml} \Question{Can you show me an example of a tactic written in OCaml?} Have a look at the skeleton ``Hello World'' tactic from the next question. You also have some examples of tactics written in OCaml in the ``plugins'' directory of {\Coq} sources. \Question{Is there a skeleton of OCaml tactic I can reuse somewhere?} The following steps describe how to write a simplistic ``Hello world'' OCaml tactic. This takes the form of a dynamically loadable OCaml module, which will be invoked from the Coq toplevel. \begin{enumerate} \item In the \verb+plugins+ directory of the Coq source location, create a directory \verb+hello+. Proceed to create a grammar and OCaml file, respectively \verb+plugins/hello/g_hello.ml4+ and \verb+plugins/hello/coq_hello.ml+, containing: \begin{itemize} \item in \verb+g_hello.ml4+: \begin{verbatim} (*i camlp4deps: "parsing/grammar.cma" i*) TACTIC EXTEND Hello | [ "hello" ] -> [ Coq_hello.printHello ] END \end{verbatim} \item in \verb+coq_hello.ml+: \begin{verbatim} let printHello gl = Tacticals.tclIDTAC_MESSAGE (Pp.str "Hello world") gl \end{verbatim} \end{itemize} \item Create a file \verb+plugins/hello/hello_plugin.mllib+, containing the names of the OCaml modules bundled in the dynamic library: \begin{verbatim} Coq_hello G_hello \end{verbatim} \item Append the following lines in \verb+plugins/plugins{byte,opt}.itarget+: \begin{itemize} \item in \verb+pluginsopt.itarget+: \begin{verbatim} hello/hello_plugin.cmxa \end{verbatim} \item in \verb+pluginsbyte.itarget+: \begin{verbatim} hello/hello_plugin.cma \end{verbatim} \end{itemize} \item In the root directory of the Coq source location, modify the file \verb+Makefile.common+: \begin{itemize} \item add \verb+hello+ to the \verb+SRCDIR+ definition (second argument of the \verb+addprefix+ function); \item in the section ``Object and Source files'', add \verb+HELLOCMA:=plugins/hello/hello_plugin.cma+; \item add \verb+$(HELLOCMA)+ to the \verb+PLUGINSCMA+ definition. \end{itemize} \item Modify the file \verb+Makefile.build+, adding in section ``3) plugins'' the line: \begin{verbatim} hello: $(HELLOCMA) \end{verbatim} \item From the command line, run \verb+make hello+, then \verb+make plugins/hello/hello_plugin.cmxs+. \end{enumerate} The call to the tactic \verb+hello+ from a Coq script has to be preceded by \verb+Declare ML Module "hello_plugin"+, which will load the dynamic object \verb+hello_plugin.cmxs+. For instance: \begin{verbatim} Declare ML Module "hello_plugin". Variable A:Prop. Goal A-> A. Proof. hello. auto. Qed. \end{verbatim} \section{Case studies} \iffalse \Question{How can I define vectors or lists of size n?} \fi \Question{How to prove that 2 sets are different?} You need to find a property true on one set and false on the other one. As an example we show how to prove that {\tt bool} and {\tt nat} are discriminable. As discrimination property we take the property to have no more than 2 elements. \begin{coq_example*} Theorem nat_bool_discr : bool <> nat. Proof. pose (discr := fun X:Set => ~ (forall a b:X, ~ (forall x:X, x <> a -> x <> b -> False))). intro Heq; assert (H: discr bool). intro H; apply (H true false); destruct x; auto. rewrite Heq in H; apply H; clear H. destruct a; destruct b as [|n]; intro H0; eauto. destruct n; [ apply (H0 2); discriminate | eauto ]. Qed. \end{coq_example*} \Question{Is there an axiom-free proof of Streicher's axiom $K$ for the equality on {\tt nat}?} \label{K-nat} Yes, because equality is decidable on {\tt nat}. Here is the proof. \begin{coq_example*} Require Import Eqdep_dec. Require Import Peano_dec. Theorem K_nat : forall (x:nat) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof. intros; apply K_dec_set with (p := p). apply eq_nat_dec. assumption. Qed. \end{coq_example*} Similarly, we have \begin{coq_example*} Theorem eq_rect_eq_nat : forall (p:nat) (Q:nat->Type) (x:Q p) (h:p=p), x = eq_rect p Q x p h. Proof. intros; apply K_nat with (p := h); reflexivity. Qed. \end{coq_example*} \Question{How to prove that two proofs of {\tt n<=m} on {\tt nat} are equal?} \label{le-uniqueness} This is provable without requiring any axiom because axiom $K$ directly holds on {\tt nat}. Here is a proof using question \ref{K-nat}. \begin{coq_example*} Require Import Arith. Scheme le_ind' := Induction for le Sort Prop. Theorem le_uniqueness_proof : forall (n m : nat) (p q : n <= m), p = q. Proof. induction p using le_ind'; intro q. replace (le_n n) with (eq_rect _ (fun n0 => n <= n0) (le_n n) _ eq_refl). 2:reflexivity. generalize (eq_refl n). pattern n at 2 4 6 10, q; case q; [intro | intros m l e]. rewrite <- eq_rect_eq_nat; trivial. contradiction (le_Sn_n m); rewrite <- e; assumption. replace (le_S n m p) with (eq_rect _ (fun n0 => n <= n0) (le_S n m p) _ eq_refl). 2:reflexivity. generalize (eq_refl (S m)). pattern (S m) at 1 3 4 6, q; case q; [intro Heq | intros m0 l HeqS]. contradiction (le_Sn_n m); rewrite Heq; assumption. injection HeqS; intro Heq; generalize l HeqS. rewrite <- Heq; intros; rewrite <- eq_rect_eq_nat. rewrite (IHp l0); reflexivity. Qed. \end{coq_example*} \Question{How to exploit equalities on sets} To extract information from an equality on sets, you need to find a predicate of sets satisfied by the elements of the sets. As an example, let's consider the following theorem. \begin{coq_example*} Theorem interval_discr : forall m n:nat, {x : nat | x <= m} = {x : nat | x <= n} -> m = n. \end{coq_example*} We have a proof requiring the axiom of proof-irrelevance. We conjecture that proof-irrelevance can be circumvented by introducing a primitive definition of discrimination of the proofs of \verb!{x : nat | x <= m}!. \begin{latexonly}% The proof can be found in file {\tt interval$\_$discr.v} in this directory. %Here is the proof %\begin{small} %\begin{flushleft} %\begin{texttt} %\def_{\ifmmode\sb\else\subscr\fi} %\include{interval_discr.v} %%% WARNING semantics of \_ has changed ! %\end{texttt} %$a\_b\_c$ %\end{flushleft} %\end{small} \end{latexonly}% \begin{htmlonly}% \ahref{./interval_discr.v}{Here} is the proof. \end{htmlonly} \Question{I have a problem of dependent elimination on proofs, how to solve it?} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive Def1 : Set := c1 : Def1. Inductive DefProp : Def1 -> Prop := c2 : forall d:Def1, DefProp d. Inductive Comb : Set := c3 : forall d:Def1, DefProp d -> Comb. Lemma eq_comb : forall (d1 d1':Def1) (d2:DefProp d1) (d2':DefProp d1'), d1 = d1' -> c3 d1 d2 = c3 d1' d2'. \end{coq_example*} You need to derive the dependent elimination scheme for DefProp by hand using {\coqtt Scheme}. \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example*} Scheme DefProp_elim := Induction for DefProp Sort Prop. Lemma eq_comb : forall d1 d1':Def1, d1 = d1' -> forall (d2:DefProp d1) (d2':DefProp d1'), c3 d1 d2 = c3 d1' d2'. intros. destruct H. destruct d2 using DefProp_elim. destruct d2' using DefProp_elim. reflexivity. Qed. \end{coq_example*} \Question{And what if I want to prove the following?} \begin{coq_example*} Inductive natProp : nat -> Prop := | p0 : natProp 0 | pS : forall n:nat, natProp n -> natProp (S n). Inductive package : Set := pack : forall n:nat, natProp n -> package. Lemma eq_pack : forall n n':nat, n = n' -> forall (np:natProp n) (np':natProp n'), pack n np = pack n' np'. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example*} Scheme natProp_elim := Induction for natProp Sort Prop. Definition pack_S : package -> package. destruct 1. apply (pack (S n)). apply pS; assumption. Defined. Lemma eq_pack : forall n n':nat, n = n' -> forall (np:natProp n) (np':natProp n'), pack n np = pack n' np'. intros n n' Heq np np'. generalize dependent n'. induction np using natProp_elim. induction np' using natProp_elim; intros; auto. discriminate Heq. induction np' using natProp_elim; intros; auto. discriminate Heq. change (pack_S (pack n np) = pack_S (pack n0 np')). apply (f_equal (A:=package)). apply IHnp. auto. Qed. \end{coq_example*} \section{Publishing tools} \Question{How can I generate some latex from my development?} You can use {\tt coqdoc}. \Question{How can I generate some HTML from my development?} You can use {\tt coqdoc}. \Question{How can I generate some dependency graph from my development?} You can use the tool \verb|coqgraph| developped by Philippe Audebaud in 2002. This tool transforms dependancies generated by \verb|coqdep| into 'dot' files which can be visualized using the Graphviz software (http://www.graphviz.org/). \Question{How can I cite some {\Coq} in my latex document?} You can use {\tt coq\_tex}. \Question{How can I cite the {\Coq} reference manual?} You can use this bibtex entry: \begin{verbatim} @Manual{Coq:manual, title = {The Coq proof assistant reference manual}, author = {\mbox{The Coq development team}}, organization = {LogiCal Project}, note = {Version 8.2}, year = {2009}, url = "http://coq.inria.fr" } \end{verbatim} \Question{Where can I publish my developments in {\Coq}?} You can submit your developments as a user contribution to the {\Coq} development team. This ensures its liveness along the evolution and possible changes of {\Coq}. You can also submit your developments to the HELM/MoWGLI repository at the University of Bologna (see \ahref{http://mowgli.cs.unibo.it}{\url{http://mowgli.cs.unibo.it}}). For developments submitted in this database, it is possible to visualize the developments in natural language and execute various retrieving requests. \Question{How can I read my proof in natural language?} You can submit your proof to the HELM/MoWGLI repository and use the rendering tool provided by the server (see \ahref{http://mowgli.cs.unibo.it}{\url{http://mowgli.cs.unibo.it}}). \section{\CoqIde} \Question{What is {\CoqIde}?} {\CoqIde} is a gtk based GUI for \Coq. \Question{How to enable Emacs keybindings?} Depending on your configuration, use either one of these two methods \begin{itemize} \item Insert \texttt{gtk-key-theme-name = "Emacs"} in your \texttt{coqide-gtk2rc} file. It should be in \verb#$XDG_CONFIG_DIRS/coq# dir. This is done by default. \item If in Gnome, run the gnome configuration editor (\texttt{gconf-editor}) and set key \texttt{gtk-key-theme} to \texttt{Emacs} in the category \texttt{desktop/gnome/interface}. \end{itemize} %$ juste pour que la coloration emacs marche \Question{How to enable antialiased fonts?} Set the \verb#GDK_USE_XFT# variable to \verb#1#. This is by default with \verb#Gtk >= 2.2#. If some of your fonts are not available, set \verb#GDK_USE_XFT# to \verb#0#. \Question{How to use those Forall and Exists pretty symbols?}\label{forallcoqide} Thanks to the notation features in \Coq, you just need to insert these lines in your {\Coq} buffer:\\ \begin{tt} Notation "$\forall$ x : t, P" := (forall x:t, P) (at level 200, x ident). \end{tt}\\ \begin{tt} Notation "$\exists$ x : t, P" := (exists x:t, P) (at level 200, x ident). \end{tt} Copy/Paste of these lines from this file will not work outside of \CoqIde. You need to load a file containing these lines or to enter the $\forall$ using an input method (see \ref{inputmeth}). To try it just use \verb#Require Import utf8# from inside \CoqIde. To enable these notations automatically start coqide with \begin{verbatim} coqide -l utf8 \end{verbatim} In the ide subdir of {\Coq} library, you will find a sample utf8.v with some pretty simple notations. \Question{How to define an input method for non ASCII symbols?}\label{inputmeth} \begin{itemize} \item First solution: type \verb#2200# to enter a forall in the script widow. 2200 is the hexadecimal code for forall in unicode charts and is encoded as in UTF-8. 2203 is for exists. See \ahref{http://www.unicode.org}{\url{http://www.unicode.org}} for more codes. \item Second solution: rebind \verb#a# to forall and \verb#e# to exists. Under X11, one can add those lines in the file ~/.xmodmaprc : \begin{verbatim} ! forall keycode 24 = a A a A U2200 NoSymbol U2200 NoSymbol ! exists keycode 26 = e E e E U2203 NoSymbol U2203 NoSymbol \end{verbatim} and then run xmodmap ~/.xmodmaprc. Alternatively, if your version of \verb=xmodmap= does not support unicode, you need to use something like \begin{verbatim} xmodmap -e "keycode 24 = a A F13 F13" xmodmap -e "keycode 26 = e E F14 F14" \end{verbatim} and then to add \verb=bind "F13" {"insert-at-cursor" ("=$\forall$\verb=")}=\\ \verb=bind "F14" {"insert-at-cursor" ("=$\exists$\verb=")}= to your "binding "text"" section in \verb#coqiderc-gtk2rc#. The last arguments to {\tt bind} between "" are the UTF-8 encodings for 0x2200 and 0x2203. You can compute these encodings using the lablgtk2 toplevel with \begin{verbatim} Glib.Utf8.from_unichar 0x2200;; \end{verbatim} Further symbols can be bound on higher Fxx keys or on even on other keys you do not need . \end{itemize} \Question{How to customize the shortcuts for menus?} Two solutions are offered: \begin{itemize} \item Edit \verb+$XDG_CONFIG_HOME/coq/coqide.keys+ (which is usually \verb+$HOME/.config/coq/coqide.keys+) by hand or \item Add "gtk-can-change-accels = 1" in your coqide-gtk2rc file. Then from \CoqIde, you may select a menu entry and press the desired shortcut. \end{itemize} \Question{What encoding should I use? What is this $\backslash$x\{iiii\} in my file?} The encoding option is related to the way files are saved. Keep it as UTF-8 until it becomes important for you to exchange files with non UTF-8 aware applications. If you choose something else than UTF-8, then missing characters will be encoded by $\backslash$x\{....\} or $\backslash$x\{........\} where each dot is an hex. digit. The number between braces is the hexadecimal UNICODE index for the missing character. \Question{How to get rid of annoying unwanted automatic templates?} Some users may experiment problems with unwanted automatic templates while using Coqide. This is due to a change in the modifiers keys available through GTK. The straightest way to get rid of the problem is to edit by hand your coqiderc (either \verb|/home//.config/coq/coqiderc| under Linux, or \\ \verb|C:\Documents and Settings\\.config\coq\coqiderc| under Windows) and replace any occurence of \texttt{MOD4} by \texttt{MOD1}. \section{Extraction} \Question{What is program extraction?} Program extraction consist in generating a program from a constructive proof. \Question{Which language can I extract to?} You can extract your programs to Objective Caml and Haskell. \Question{How can I extract an incomplete proof?} You can provide programs for your axioms. %%%%%%% \section{Glossary} \Question{Can you explain me what an evaluable constant is?} An evaluable constant is a constant which is unfoldable. \Question{What is a goal?} The goal is the statement to be proved. \Question{What is a meta variable?} A meta variable in {\Coq} represents a ``hole'', i.e. a part of a proof that is still unknown. \Question{What is Gallina?} Gallina is the specification language of \Coq. Complete documentation of this language can be found in the Reference Manual. \Question{What is The Vernacular?} It is the language of commands of Gallina i.e. definitions, lemmas, {\ldots} \Question{What is a dependent type?} A dependant type is a type which depends on some term. For instance ``vector of size n'' is a dependant type representing all the vectors of size $n$. Its type depends on $n$ \Question{What is a proof by reflection?} This is a proof generated by some computation which is done using the internal reduction of {\Coq} (not using the tactic language of {\Coq} (\Ltac) nor the implementation language for \Coq). An example of tactic using the reflection mechanism is the {\ring} tactic. The reflection method consist in reflecting a subset of {\Coq} language (for example the arithmetical expressions) into an object of the {\Coq} language itself (in this case an inductive type denoting arithmetical expressions). For more information see~\cite{howe,harrison,boutin} and the last chapter of the Coq'Art. \Question{What is intuitionistic logic?} This is any logic which does not assume that ``A or not A''. \Question{What is proof-irrelevance?} See question \ref{proof-irrelevance} \Question{What is the difference between opaque and transparent?}{\label{opaque}} Opaque definitions can not be unfolded but transparent ones can. \section{Troubleshooting} \Question{What can I do when {\tt Qed.} is slow?} Sometime you can use the {\abstracttac} tactic, which makes as if you had stated some local lemma, this speeds up the typing process. \Question{Why \texttt{Reset Initial.} does not work when using \texttt{coqc}?} The initial state corresponds to the state of \texttt{coqtop} when the interactive session began. It does not make sense in files to compile. \Question{What can I do if I get ``No more subgoals but non-instantiated existential variables''?} This means that {\eauto} or {\eapply} didn't instantiate an existential variable which eventually got erased by some computation. You may backtrack to the faulty occurrence of {\eauto} or {\eapply} and give the missing argument an explicit value. Alternatively, you can use the commands \texttt{Show Existentials.} and \texttt{Existential.} to display and instantiate the remainig existential variables. \begin{coq_example} Lemma example_show_existentials : forall a b c:nat, a=b -> b=c -> a=c. Proof. intros. eapply eq_trans. Show Existentials. eassumption. assumption. Qed. \end{coq_example} \Question{What can I do if I get ``Cannot solve a second-order unification problem''?} You can help {\Coq} using the {\pattern} tactic. \Question{Why does {\Coq} tell me that \texttt{\{x:A|(P x)\}} is not convertible with \texttt{(sig A P)}?} This is because \texttt{\{x:A|P x\}} is a notation for \texttt{sig (fun x:A => P x)}. Since {\Coq} does not reason up to $\eta$-conversion, this is different from \texttt{sig P}. \Question{I copy-paste a term and {\Coq} says it is not convertible to the original term. Sometimes it even says the copied term is not well-typed.} This is probably due to invisible implicit information (implicit arguments, coercions and Cases annotations) in the printed term, which is not re-synthesised from the copied-pasted term in the same way as it is in the original term. Consider for instance {\tt (@eq Type True True)}. This term is printed as {\tt True=True} and re-parsed as {\tt (@eq Prop True True)}. The two terms are not convertible (hence they fool tactics like {\tt pattern}). There is currently no satisfactory answer to the problem. However, the command {\tt Set Printing All} is useful for diagnosing the problem. Due to coercions, one may even face type-checking errors. In some rare cases, the criterion to hide coercions is a bit too loose, which may result in a typing error message if the parser is not able to find again the missing coercion. \section{Conclusion and Farewell.} \label{ccl} \Question{What if my question isn't answered here?} \label{lastquestion} Don't panic \verb+:-)+. You can try the {\Coq} manual~\cite{Coq:manual} for a technical description of the prover. The Coq'Art~\cite{Coq:coqart} is the first book written on {\Coq} and provides a comprehensive review of the theorem prover as well as a number of example and exercises. Finally, the tutorial~\cite{Coq:Tutorial} provides a smooth introduction to theorem proving in \Coq. %%%%%%% \newpage \nocite{LaTeX:intro} \nocite{LaTeX:symb} \bibliography{fk} %%%%%%% \typeout{*********************************************} \typeout{********* That makes {\thequestion} questions **********} \typeout{*********************************************} \end{document} coq-8.4pl2/doc/faq/axioms.eps0000640000175000001440000002776510406335323015174 0ustar notinusers%!PS-Adobe-2.0 EPSF-2.0 %%Title: axioms.fig %%Creator: fig2dev Version 3.2 Patchlevel 4 %%CreationDate: Wed May 5 18:30:03 2004 %%For: herbelin@limoux.polytechnique.fr (Hugo Herbelin) %%BoundingBox: 0 0 437 372 %%Magnification: 1.0000 %%EndComments /$F2psDict 200 dict def $F2psDict begin $F2psDict /mtrx matrix put /col-1 {0 setgray} bind def /col0 {0.000 0.000 0.000 srgb} bind def /col1 {0.000 0.000 1.000 srgb} bind def /col2 {0.000 1.000 0.000 srgb} bind def /col3 {0.000 1.000 1.000 srgb} bind def /col4 {1.000 0.000 0.000 srgb} bind def /col5 {1.000 0.000 1.000 srgb} bind def /col6 {1.000 1.000 0.000 srgb} bind def /col7 {1.000 1.000 1.000 srgb} bind def /col8 {0.000 0.000 0.560 srgb} bind def /col9 {0.000 0.000 0.690 srgb} bind def /col10 {0.000 0.000 0.820 srgb} bind def /col11 {0.530 0.810 1.000 srgb} bind def /col12 {0.000 0.560 0.000 srgb} bind def /col13 {0.000 0.690 0.000 srgb} bind def /col14 {0.000 0.820 0.000 srgb} bind def /col15 {0.000 0.560 0.560 srgb} bind def /col16 {0.000 0.690 0.690 srgb} bind def /col17 {0.000 0.820 0.820 srgb} bind def /col18 {0.560 0.000 0.000 srgb} bind def /col19 {0.690 0.000 0.000 srgb} bind def /col20 {0.820 0.000 0.000 srgb} bind def /col21 {0.560 0.000 0.560 srgb} bind def /col22 {0.690 0.000 0.690 srgb} bind def /col23 {0.820 0.000 0.820 srgb} bind def /col24 {0.500 0.190 0.000 srgb} bind def /col25 {0.630 0.250 0.000 srgb} bind def /col26 {0.750 0.380 0.000 srgb} bind def /col27 {1.000 0.500 0.500 srgb} bind def /col28 {1.000 0.630 0.630 srgb} bind def /col29 {1.000 0.750 0.750 srgb} bind def /col30 {1.000 0.880 0.880 srgb} bind def /col31 {1.000 0.840 0.000 srgb} bind def end save newpath 0 372 moveto 0 0 lineto 437 0 lineto 437 372 lineto closepath clip newpath -90.0 435.2 translate 1 -1 scale /cp {closepath} bind def /ef {eofill} bind def /gr {grestore} bind def /gs {gsave} bind def /sa {save} bind def /rs {restore} bind def /l {lineto} bind def /m {moveto} bind def /rm {rmoveto} bind def /n {newpath} bind def /s {stroke} bind def /sh {show} bind def /slc {setlinecap} bind def /slj {setlinejoin} bind def /slw {setlinewidth} bind def /srgb {setrgbcolor} bind def /rot {rotate} bind def /sc {scale} bind def /sd {setdash} bind def /ff {findfont} bind def /sf {setfont} bind def /scf {scalefont} bind def /sw {stringwidth} bind def /tr {translate} bind def /tnt {dup dup currentrgbcolor 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} bind def /shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul 4 -2 roll mul srgb} bind def /reencdict 12 dict def /ReEncode { reencdict begin /newcodesandnames exch def /newfontname exch def /basefontname exch def /basefontdict basefontname findfont def /newfont basefontdict maxlength dict def basefontdict { exch dup /FID ne { dup /Encoding eq { exch dup length array copy newfont 3 1 roll put } { exch newfont 3 1 roll put } ifelse } { pop pop } ifelse } forall newfont /FontName newfontname put newcodesandnames aload pop 128 1 255 { newfont /Encoding get exch /.notdef put } for newcodesandnames length 2 idiv { newfont /Encoding get 3 1 roll put } repeat newfontname newfont definefont pop end } def /isovec [ 8#055 /minus 8#200 /grave 8#201 /acute 8#202 /circumflex 8#203 /tilde 8#204 /macron 8#205 /breve 8#206 /dotaccent 8#207 /dieresis 8#210 /ring 8#211 /cedilla 8#212 /hungarumlaut 8#213 /ogonek 8#214 /caron 8#220 /dotlessi 8#230 /oe 8#231 /OE 8#240 /space 8#241 /exclamdown 8#242 /cent 8#243 /sterling 8#244 /currency 8#245 /yen 8#246 /brokenbar 8#247 /section 8#250 /dieresis 8#251 /copyright 8#252 /ordfeminine 8#253 /guillemotleft 8#254 /logicalnot 8#255 /hyphen 8#256 /registered 8#257 /macron 8#260 /degree 8#261 /plusminus 8#262 /twosuperior 8#263 /threesuperior 8#264 /acute 8#265 /mu 8#266 /paragraph 8#267 /periodcentered 8#270 /cedilla 8#271 /onesuperior 8#272 /ordmasculine 8#273 /guillemotright 8#274 /onequarter 8#275 /onehalf 8#276 /threequarters 8#277 /questiondown 8#300 /Agrave 8#301 /Aacute 8#302 /Acircumflex 8#303 /Atilde 8#304 /Adieresis 8#305 /Aring 8#306 /AE 8#307 /Ccedilla 8#310 /Egrave 8#311 /Eacute 8#312 /Ecircumflex 8#313 /Edieresis 8#314 /Igrave 8#315 /Iacute 8#316 /Icircumflex 8#317 /Idieresis 8#320 /Eth 8#321 /Ntilde 8#322 /Ograve 8#323 /Oacute 8#324 /Ocircumflex 8#325 /Otilde 8#326 /Odieresis 8#327 /multiply 8#330 /Oslash 8#331 /Ugrave 8#332 /Uacute 8#333 /Ucircumflex 8#334 /Udieresis 8#335 /Yacute 8#336 /Thorn 8#337 /germandbls 8#340 /agrave 8#341 /aacute 8#342 /acircumflex 8#343 /atilde 8#344 /adieresis 8#345 /aring 8#346 /ae 8#347 /ccedilla 8#350 /egrave 8#351 /eacute 8#352 /ecircumflex 8#353 /edieresis 8#354 /igrave 8#355 /iacute 8#356 /icircumflex 8#357 /idieresis 8#360 /eth 8#361 /ntilde 8#362 /ograve 8#363 /oacute 8#364 /ocircumflex 8#365 /otilde 8#366 /odieresis 8#367 /divide 8#370 /oslash 8#371 /ugrave 8#372 /uacute 8#373 /ucircumflex 8#374 /udieresis 8#375 /yacute 8#376 /thorn 8#377 /ydieresis] def /Times-Roman /Times-Roman-iso isovec ReEncode /$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def /$F2psEnd {$F2psEnteredState restore end} def $F2psBegin 10 setmiterlimit 0 slj 0 slc 0.06000 0.06000 sc % % Fig objects follow % % % here starts figure with depth 50 % Arc 7.500 slw gs clippath 3599 6933 m 3626 6879 l 3492 6812 l 3586 6893 l 3465 6865 l cp eoclip n 3600.0 6750.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3465 6865 m 3586 6893 l 3492 6812 l 3465 6865 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 3599 6633 m 3626 6579 l 3492 6512 l 3586 6593 l 3465 6565 l cp eoclip n 3600.0 6450.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3465 6565 m 3586 6593 l 3492 6512 l 3465 6565 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 3626 6020 m 3599 5966 l 3465 6034 l 3586 6007 l 3492 6087 l cp 3599 6333 m 3626 6279 l 3492 6212 l 3586 6293 l 3465 6265 l cp eoclip n 3600.0 6150.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3492 6087 m 3586 6007 l 3465 6034 l 3492 6087 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 3465 6265 m 3586 6293 l 3492 6212 l 3465 6265 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 3626 6320 m 3599 6266 l 3465 6334 l 3586 6307 l 3492 6387 l cp 3599 6633 m 3626 6579 l 3492 6512 l 3586 6593 l 3465 6565 l cp eoclip n 3600.0 6450.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3492 6387 m 3586 6307 l 3465 6334 l 3492 6387 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 3465 6565 m 3586 6593 l 3492 6512 l 3465 6565 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 3626 6620 m 3599 6566 l 3465 6634 l 3586 6607 l 3492 6687 l cp 3599 6933 m 3626 6879 l 3492 6812 l 3586 6893 l 3465 6865 l cp eoclip n 3600.0 6750.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3492 6687 m 3586 6607 l 3465 6634 l 3492 6687 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 3465 6865 m 3586 6893 l 3492 6812 l 3465 6865 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 3626 6920 m 3599 6866 l 3465 6934 l 3586 6907 l 3492 6987 l cp 3599 7233 m 3626 7179 l 3492 7112 l 3586 7193 l 3465 7165 l cp eoclip n 3600.0 7050.0 150.0 90.0 -90.0 arc gs col0 s gr gr % arrowhead n 3492 6987 m 3586 6907 l 3465 6934 l 3492 6987 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 3465 7165 m 3586 7193 l 3492 7112 l 3465 7165 l cp gs 0.00 setgray ef gr col0 s % Arc gs clippath 4168 4060 m 4227 4068 l 4247 3919 l 4202 4034 l 4188 3911 l cp eoclip n 14032.5 5272.5 9908.2 -159.9 -172.9 arcn gs col0 s gr gr % arrowhead n 4188 3911 m 4202 4034 l 4247 3919 l 4188 3911 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 4170 5790 m 4230 5790 l 4230 5639 l 4200 5759 l 4170 5639 l cp eoclip n 4200 5175 m 4200 5775 l gs col0 s gr gr % arrowhead n 4170 5639 m 4200 5759 l 4230 5639 l 4170 5639 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 4553 5749 m 4567 5807 l 4714 5771 l 4591 5771 l 4700 5713 l cp eoclip n 7050 5175 m 4575 5775 l gs col0 s gr gr % arrowhead n 4700 5713 m 4591 5771 l 4714 5771 l 4700 5713 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 4170 4890 m 4230 4890 l 4230 4739 l 4200 4859 l 4170 4739 l cp eoclip n 4200 4275 m 4200 4875 l gs col0 s gr gr % arrowhead n 4170 4739 m 4200 4859 l 4230 4739 l 4170 4739 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 7131 4907 m 7147 4850 l 7001 4810 l 7109 4871 l 6985 4868 l cp eoclip n 4950 4275 m 7125 4875 l gs col0 s gr gr % arrowhead n 6985 4868 m 7109 4871 l 7001 4810 l 6985 4868 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 7167 4057 m 7225 4071 l 7262 3924 l 7204 4034 l 7204 3910 l cp eoclip n 7725 1950 m 7200 4050 l gs col0 s gr gr % arrowhead n 7204 3910 m 7204 4034 l 7262 3924 l 7204 3910 l cp gs 0.00 setgray ef gr col0 s % Polyline n 4350 3075 m 7350 1950 l gs col0 s gr % Polyline gs clippath 7170 4890 m 7230 4890 l 7230 4739 l 7200 4859 l 7170 4739 l cp eoclip n 7200 4275 m 7200 4875 l gs col0 s gr gr % arrowhead n 7170 4739 m 7200 4859 l 7230 4739 l 7170 4739 l cp gs 0.00 setgray ef gr col0 s % Polyline n 3075 1875 m 3975 1875 l gs col0 s gr % Polyline gs clippath 5520 4065 m 5580 4065 l 5580 3914 l 5550 4034 l 5520 3914 l cp 5580 3660 m 5520 3660 l 5520 3811 l 5550 3691 l 5580 3811 l cp eoclip n 5550 3675 m 5550 4050 l gs col0 s gr gr % arrowhead n 5580 3811 m 5550 3691 l 5520 3811 l 5580 3811 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 5520 3914 m 5550 4034 l 5580 3914 l 5520 3914 l cp gs 0.00 setgray ef gr col0 s % Polyline n 4575 4050 m 6450 4050 l gs col0 s gr % Polyline gs clippath 3495 2265 m 3555 2265 l 3555 2114 l 3525 2234 l 3495 2114 l cp 3555 1860 m 3495 1860 l 3495 2011 l 3525 1891 l 3555 2011 l cp eoclip n 3525 1875 m 3525 2250 l gs col0 s gr gr % arrowhead n 3555 2011 m 3525 1891 l 3495 2011 l 3555 2011 l cp gs 0.00 setgray ef gr col0 s % arrowhead n 3495 2114 m 3525 2234 l 3555 2114 l 3495 2114 l cp gs 0.00 setgray ef gr col0 s % Polyline gs clippath 2219 3988 m 2279 3991 l 2285 3840 l 2251 3959 l 2225 3838 l cp eoclip n 2325 1875 m 2250 3975 l gs col0 s gr gr % arrowhead n 2225 3838 m 2251 3959 l 2285 3840 l 2225 3838 l cp gs 0.00 setgray ef gr col0 s % Polyline n 7800 1275 m 2100 1275 l gs col0 s gr /Times-Roman-iso ff 180.00 scf sf 6600 5100 m gs 1 -1 sc (Proof-irrelevance) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3675 4200 m gs 1 -1 sc (Excluded-middle) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 6900 1800 m gs 1 -1 sc (Predicate extensionality) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3375 3525 m gs 1 -1 sc (\(Diaconescu\)) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 4650 3600 m gs 1 -1 sc (Propositional degeneracy) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3825 1800 m gs 1 -1 sc (Relational choice axiom) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 1725 1800 m gs 1 -1 sc (Description principle) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 2550 2400 m gs 1 -1 sc (Functional choice axiom) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 5100 m gs 1 -1 sc (Decidability of equality on $A$) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 4425 4575 m gs 1 -1 sc (\(needs Prop-impredicativity\)) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 5025 4725 m gs 1 -1 sc (\(Berardi\)) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 1500 3075 m gs 1 -1 sc (\(if Set impredicative\)) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 1500 4200 m gs 1 -1 sc (Not excluded-middle) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 6000 m gs 1 -1 sc (Axiom K on A) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 7200 m gs 1 -1 sc (Invariance by substitution of reflexivity proofs for equality on A) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 6150 4200 m gs 1 -1 sc (Propositional extensionality) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 2100 1200 m gs 1 -1 sc (The dependency graph of axioms in the Calculus of Inductive Constructions) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 6900 m gs 1 -1 sc (Injectivity of equality on sigma-types on A) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 6300 m gs 1 -1 sc (Uniqueness of reflexivity proofs for equality on A) col0 sh gr /Times-Roman-iso ff 180.00 scf sf 3600 6600 m gs 1 -1 sc (Uniqueness of equality proofs on A) col0 sh gr % here ends figure; $F2psEnd rs showpage coq-8.4pl2/doc/faq/hevea.sty0000640000175000001440000000517510406335323015003 0ustar notinusers% hevea : hevea.sty % This is a very basic style file for latex document to be processed % with hevea. It contains definitions of LaTeX environment which are % processed in a special way by the translator. % Mostly : % - latexonly, not processed by hevea, processed by latex. % - htmlonly , the reverse. % - rawhtml, to include raw HTML in hevea output. % - toimage, to send text to the image file. % The package also provides hevea logos, html related commands (ahref % etc.), void cutting and image commands. \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{hevea}[2002/01/11] \RequirePackage{comment} \newif\ifhevea\heveafalse \@ifundefined{ifimagen}{\newif\ifimagen\imagenfalse} \makeatletter% \newcommand{\heveasmup}[2]{% \raise #1\hbox{$\m@th$% \csname S@\f@size\endcsname \fontsize\sf@size 0% \math@fontsfalse\selectfont #2% }}% \DeclareRobustCommand{\hevea}{H\kern-.15em\heveasmup{.2ex}{E}\kern-.15emV\kern-.15em\heveasmup{.2ex}{E}\kern-.15emA}% \DeclareRobustCommand{\hacha}{H\kern-.15em\heveasmup{.2ex}{A}\kern-.15emC\kern-.1em\heveasmup{.2ex}{H}\kern-.15emA}% \DeclareRobustCommand{\html}{\protect\heveasmup{0.ex}{HTML}} %%%%%%%%% Hyperlinks hevea style \newcommand{\ahref}[2]{{#2}} \newcommand{\ahrefloc}[2]{{#2}} \newcommand{\aname}[2]{{#2}} \newcommand{\ahrefurl}[1]{\texttt{#1}} \newcommand{\footahref}[2]{#2\footnote{\texttt{#1}}} \newcommand{\mailto}[1]{\texttt{#1}} \newcommand{\imgsrc}[2][]{} \newcommand{\home}[1]{\protect\raisebox{-.75ex}{\char126}#1} \AtBeginDocument {\@ifundefined{url} {%url package is not loaded \let\url\ahref\let\oneurl\ahrefurl\let\footurl\footahref} {}} %% Void cutting instructions \newcounter{cuttingdepth} \newcommand{\tocnumber}{} \newcommand{\notocnumber}{} \newcommand{\cuttingunit}{} \newcommand{\cutdef}[2][]{} \newcommand{\cuthere}[2]{} \newcommand{\cutend}{} \newcommand{\htmlhead}[1]{} \newcommand{\htmlfoot}[1]{} \newcommand{\htmlprefix}[1]{} \newenvironment{cutflow}[1]{}{} \newcommand{\cutname}[1]{} \newcommand{\toplinks}[3]{} %%%% Html only \excludecomment{rawhtml} \newcommand{\rawhtmlinput}[1]{} \excludecomment{htmlonly} %%%% Latex only \newenvironment{latexonly}{}{} \newenvironment{verblatex}{}{} %%%% Image file stuff \def\toimage{\endgroup} \def\endtoimage{\begingroup\def\@currenvir{toimage}} \def\verbimage{\endgroup} \def\endverbimage{\begingroup\def\@currenvir{verbimage}} \newcommand{\imageflush}[1][]{} %%% Bgcolor definition \newsavebox{\@bgcolorbin} \newenvironment{bgcolor}[2][] {\newcommand{\@mycolor}{#2}\begin{lrbox}{\@bgcolorbin}\vbox\bgroup} {\egroup\end{lrbox}% \begin{flushleft}% \colorbox{\@mycolor}{\usebox{\@bgcolorbin}}% \end{flushleft}} %%% Postlude \makeatother coq-8.4pl2/doc/faq/fk.bib0000640000175000001440000023160210413272241014221 0ustar notinusers%%%%%%% FAQ %%%%%%% @book{ProofsTypes, Author="Girard, Jean-Yves and Yves Lafont and Paul Taylor", Title="Proofs and Types", Publisher="Cambrige Tracts in Theoretical Computer Science, Cambridge University Press", Year="1989" } @misc{Types:Dowek, author = "Gilles Dowek", title = "Th{\'e}orie des types", year = 2002, howpublished = "Lecture notes", url= "http://www.lix.polytechnique.fr/~dowek/Cours/theories_des_types.ps.gz" } @PHDTHESIS{EGThese, author = {Eduardo Gimnez}, title = {Un Calcul de Constructions Infinies et son application a la vrification de systmes communicants}, type = {thse d'Universit}, school = {Ecole Normale Suprieure de Lyon}, month = {December}, year = {1996}, } %%%%%%% Semantique %%%%%%% @misc{Sem:cours, author = "Franois Pottier", title = "{Typage et Programmation}", year = "2002", howpublished = "Lecture notes", note = "DEA PSPL" } @inproceedings{Sem:Dubois, author = {Catherine Dubois}, editor = {Mark Aagaard and John Harrison}, title = "{Proving ML Type Soundness Within Coq}", pages = {126-144}, booktitle = {TPHOLs}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {1869}, year = {2000}, isbn = {3-540-67863-8}, bibsource = {DBLP, http://dblp.uni-trier.de} } @techreport{Sem:Plotkin, author = {Gordon D. Plotkin}, institution = {Aarhus University}, number = {{DAIMI FN-19}}, title = {{A structural approach to operational semantics}}, year = {1981} } @article{Sem:RemyV98, author = "Didier R{\'e}my and J{\'e}r{\^o}me Vouillon", title = "Objective {ML}: An effective object-oriented extension to {ML}", journal = "Theory And Practice of Object Systems", year = 1998, volume = "4", number = "1", pages = "27--50", note = {A preliminary version appeared in the proceedings of the 24th ACM Conference on Principles of Programming Languages, 1997} } @book{Sem:Winskel, AUTHOR = {Winskel, Glynn}, TITLE = {The Formal Semantics of Programming Languages}, NOTE = {WIN g2 93:1 P-Ex}, YEAR = {1993}, PUBLISHER = {The MIT Press}, SERIES = {Foundations of Computing}, } @Article{Sem:WrightFelleisen, refkey = "C1210", title = "A Syntactic Approach to Type Soundness", author = "Andrew K. Wright and Matthias Felleisen", pages = "38--94", journal = "Information and Computation", month = "15~" # nov, year = "1994", volume = "115", number = "1" } @inproceedings{Sem:Nipkow-MOD, author={Tobias Nipkow}, title={Jinja: Towards a Comprehensive Formal Semantics for a {J}ava-like Language}, booktitle={Proc.\ Marktobderdorf Summer School 2003}, publisher={IOS Press},editor={H. Schwichtenberg and K. Spies}, year=2003, note={To appear} } %%%%%%% Coq %%%%%%% @book{Coq:coqart, title = "Interactive Theorem Proving and Program Development, Coq'Art: The Calculus of Inductive Constructions", author = "Yves Bertot and Pierre Castran", publisher = "Springer Verlag", series = "Texts in Theoretical Computer Science. An EATCS series", year = 2004 } @phdthesis{Coq:Del01, AUTHOR = "David Delahaye", TITLE = "Conception de langages pour dcrire les preuves et les automatisations dans les outils d'aide la preuve", SCHOOL = {Universit\'e Paris~6}, YEAR = "2001", Type = {Th\`ese de Doctorat} } @techreport{Coq:gimenez-tut, author = "Eduardo Gim\'enez", title = "A Tutorial on Recursive Types in Coq", number = "RT-0221", pages = "42 p.", url = "citeseer.nj.nec.com/gimenez98tutorial.html" } @phdthesis{Coq:Mun97, AUTHOR = "Csar Mu{\~{n}}oz", TITLE = "Un calcul de substitutions pour la repr\'esentation de preuves partielles en th\'eorie de types", SCHOOL = {Universit\'e Paris~7}, Number = {Unit\'e de recherche INRIA-Rocquencourt, TU-0488}, YEAR = "1997", Note = {English version available as INRIA research report RR-3309}, Type = {Th\`ese de Doctorat} } @PHDTHESIS{Coq:Filliatre99, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Preuve de programmes imp\'eratifs en th\'eorie des types}}, TYPE = {Th{\`e}se de Doctorat}, SCHOOL = {Universit\'e Paris-Sud}, YEAR = 1999, MONTH = {July}, } @manual{Coq:Tutorial, AUTHOR = {G\'erard Huet and Gilles Kahn and Christine Paulin-Mohring}, TITLE = {{The Coq Proof Assistant A Tutorial}}, YEAR = 2004 } %%%%%%% PVS %%%%%%% @manual{PVS:prover, title = "{PVS} Prover Guide", author = "N. Shankar and S. Owre and J. M. Rushby and D. W. J. Stringer-Calvert", month = sep, year = "1999", organization = "Computer Science Laboratory, SRI International", address = "Menlo Park, CA", } @techreport{PVS-Semantics:TR, TITLE = {The Formal Semantics of {PVS}}, AUTHOR = {Sam Owre and Natarajan Shankar}, NUMBER = {CR-1999-209321}, INSTITUTION = {Computer Science Laboratory, SRI International}, ADDRESS = {Menlo Park, CA}, MONTH = may, YEAR = 1999, } @techreport{PVS-Tactics:DiVito, TITLE = {A {PVS} Prover Strategy Package for Common Manipulations}, AUTHOR = {Ben L. Di Vito}, NUMBER = {TM-2002-211647}, INSTITUTION = {Langley Research Center}, ADDRESS = {Hampton, VA}, MONTH = apr, YEAR = 2002, } @misc{PVS-Tactics:cours, author = "Csar Muoz", title = "Strategies in {PVS}", howpublished = "Lecture notes", note = "National Institute of Aerospace", year = 2002 } @techreport{PVS-Tactics:field, author = "C. Mu{\~n}oz and M. Mayero", title = "Real Automation in the Field", institution = "ICASE-NASA Langley", number = "NASA/CR-2001-211271 Interim ICASE Report No. 39", month = "dec", year = "2001" } %%%%%%% Autres Prouveurs %%%%%%% @misc{ACL2:repNuPrl, author = "James L. Caldwell and John Cowles", title = "{Representing Nuprl Proof Objects in ACL2: toward a proof checker for Nuprl}", url = "http://www.cs.uwyo.edu/~jlc/papers/proof_checking.ps" } @inproceedings{Elan:ckl-strat, author = {H. Cirstea and C. Kirchner and L. Liquori}, title = "{Rewrite Strategies in the Rewriting Calculus}", booktitle = {WRLA'02}, publisher = "{Elsevier Science B.V.}", series = {Electronic Notes in Theoretical Computer Science}, volume = {71}, year = {2003}, } @book{LCF:GMW, author = {M. Gordon and R. Milner and C. Wadsworth}, publisher = {sv}, series = {lncs}, volume = 78, title = {Edinburgh {LCF}: A Mechanized Logic of Computation}, year = 1979 } %%%%%%% LaTeX %%%%%%% @manual{LaTeX:symb, title = "The Great, Big List of \LaTeX\ Symbols", author = "David Carlisle and Scott Pakin and Alexander Holt", month = feb, year = 2001, } @manual{LaTeX:intro, title = "The Not So Short Introduction to \LaTeX2e", author = "Tobias Oetiker", month = jan, year = 1999, } @MANUAL{CoqManualV7, AUTHOR = {{The {Coq} Development Team}}, TITLE = {{The Coq Proof Assistant Reference Manual -- Version V7.1}}, YEAR = {2001}, MONTH = OCT, NOTE = {http://coq.inria.fr} } @MANUAL{CoqManual96, TITLE = {The {Coq Proof Assistant Reference Manual} Version 6.1}, AUTHOR = {B. Barras and S. Boutin and C. Cornes and J. Courant and J.-C. Filli\^atre and H. Herbelin and G. Huet and P. Manoury and C. Mu{\~{n}}oz and C. Murthy and C. Parent and C. Paulin-Mohring and A. Sa{\"\i}bi and B. Werner}, ORGANIZATION = {{INRIA-Rocquencourt}-{CNRS-ENS Lyon}}, URL = {ftp://ftp.inria.fr/INRIA/coq/V6.1/doc/Reference-Manual.dvi.gz}, YEAR = 1996, MONTH = DEC } @MANUAL{CoqTutorial99, AUTHOR = {G.~Huet and G.~Kahn and Ch.~Paulin-Mohring}, TITLE = {The {\sf Coq} Proof Assistant - A tutorial - Version 6.3}, MONTH = JUL, YEAR = {1999}, ABSTRACT = {http://coq.inria.fr/doc/tutorial.html} } @MANUAL{CoqTutorialV7, AUTHOR = {G.~Huet and G.~Kahn and Ch.~Paulin-Mohring}, TITLE = {The {\sf Coq} Proof Assistant - A tutorial - Version 7.1}, MONTH = OCT, YEAR = {2001}, NOTE = {http://coq.inria.fr} } @TECHREPORT{modelpa2000, AUTHOR = {B. Brard and P. Castran and E. Fleury and L. Fribourg and J.-F. Monin and C. Paulin and A. Petit and D. Rouillard}, TITLE = {Automates temporiss CALIFE}, INSTITUTION = {Calife}, YEAR = 2000, URL = {http://www.loria.fr/projets/calife/WebCalifePublic/FOURNITURES/F1.1.ps.gz}, TYPE = {Fourniture {F1.1}} } @TECHREPORT{CaFrPaRo2000, AUTHOR = {P. Castran and E. Freund and C. Paulin and D. Rouillard}, TITLE = {Bibliothques Coq et Isabelle-HOL pour les systmes de transitions et les p-automates}, INSTITUTION = {Calife}, YEAR = 2000, URL = {http://www.loria.fr/projets/calife/WebCalifePublic/FOURNITURES/F5.4.ps.gz}, TYPE = {Fourniture {F5.4}} } @PROCEEDINGS{TPHOLs99, TITLE = {International Conference on Theorem Proving in Higher Order Logics (TPHOLs'99)}, YEAR = 1999, EDITOR = {Y. Bertot and G. Dowek and C. Paulin-Mohring and L. Th{\'e}ry}, SERIES = {Lecture Notes in Computer Science}, MONTH = SEP, PUBLISHER = {{Sprin\-ger-Verlag}}, ADDRESS = {Nice}, TYPE_PUBLI = {editeur} } @INPROCEEDINGS{Pau01, AUTHOR = {Christine Paulin-Mohring}, TITLE = {Modelisation of Timed Automata in {Coq}}, BOOKTITLE = {Theoretical Aspects of Computer Software (TACS'2001)}, PAGES = {298--315}, YEAR = 2001, EDITOR = {N. Kobayashi and B. Pierce}, VOLUME = 2215, SERIES = {Lecture Notes in Computer Science}, PUBLISHER = {Springer-Verlag} } @PHDTHESIS{Moh89b, AUTHOR = {C. Paulin-Mohring}, MONTH = JAN, SCHOOL = {{Paris 7}}, TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, TYPE = {Thse d'universit}, YEAR = {1989}, URL = {http://www.lri.fr/~paulin/these.ps.gz} } @ARTICLE{HuMo92, AUTHOR = {G. Huet and C. Paulin-Mohring}, EDITION = {INRIA}, JOURNAL = {Courrier du CNRS - Informatique}, TITLE = {Preuves et Construction de Programmes}, YEAR = {1992}, CATEGORY = {national} } @INPROCEEDINGS{LePa94, AUTHOR = {F. Leclerc and C. Paulin-Mohring}, TITLE = {Programming with Streams in {Coq}. A case study : The Sieve of Eratosthenes}, EDITOR = {H. Barendregt and T. Nipkow}, VOLUME = 806, SERIES = {Lecture Notes in Computer Science}, BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, YEAR = 1994, PUBLISHER = {Springer-Verlag} } @INPROCEEDINGS{Moh86, AUTHOR = {C. Mohring}, ADDRESS = {Cambridge, MA}, BOOKTITLE = {Symposium on Logic in Computer Science}, PUBLISHER = {IEEE Computer Society Press}, TITLE = {Algorithm Development in the {Calculus of Constructions}}, YEAR = {1986} } @INPROCEEDINGS{Moh89a, AUTHOR = {C. Paulin-Mohring}, ADDRESS = {Austin}, BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, MONTH = JAN, PUBLISHER = {ACM}, TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, YEAR = {1989} } @INCOLLECTION{Moh89c, AUTHOR = {C. Paulin-Mohring}, TITLE = {{R\'ealisabilit\'e et extraction de programmes}}, BOOKTITLE = {Logique et Informatique : une introduction}, PUBLISHER = {INRIA}, YEAR = 1991, EDITOR = {B. Courcelle}, VOLUME = 8, SERIES = {Collection Didactique}, PAGES = {163-180}, CATEGORY = {national} } @INPROCEEDINGS{Moh93, AUTHOR = {C. Paulin-Mohring}, BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi a nd Applications}, EDITOR = {M. Bezem and J.-F. Groote}, INSTITUTION = {LIP-ENS Lyon}, NOTE = {LIP research report 92-49}, NUMBER = 664, SERIES = {Lecture Notes in Computer Science}, TITLE = {{Inductive Definitions in the System {Coq} - Rules and Properties}}, TYPE = {research report}, YEAR = 1993 } @ARTICLE{PaWe92, AUTHOR = {C. Paulin-Mohring and B. Werner}, JOURNAL = {Journal of Symbolic Computation}, TITLE = {{Synthesis of ML programs in the system Coq}}, VOLUME = {15}, YEAR = {1993}, PAGES = {607--640} } @INPROCEEDINGS{Pau96, AUTHOR = {C. Paulin-Mohring}, TITLE = {Circuits as streams in {Coq} : Verification of a sequential multiplier}, BOOKTITLE = {Types for Proofs and Programs, TYPES'95}, EDITOR = {S. Berardi and M. Coppo}, SERIES = {Lecture Notes in Computer Science}, YEAR = 1996, VOLUME = 1158 } @PHDTHESIS{Pau96b, AUTHOR = {Christine Paulin-Mohring}, TITLE = {Dfinitions Inductives en Thorie des Types d'Ordre Suprieur}, SCHOOL = {Universit Claude Bernard Lyon I}, YEAR = 1996, MONTH = DEC, TYPE = {Habilitation diriger les recherches}, URL = {http://www.lri.fr/~paulin/habilitation.ps.gz} } @INPROCEEDINGS{PfPa89, AUTHOR = {F. Pfenning and C. Paulin-Mohring}, BOOKTITLE = {Proceedings of Mathematical Foundations of Programming Semantics}, NOTE = {technical report CMU-CS-89-209}, PUBLISHER = {Springer-Verlag}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 442, TITLE = {Inductively defined types in the {Calculus of Constructions}}, YEAR = {1990} } @MISC{krakatoa02, AUTHOR = {Claude March\'e and Christine Paulin and Xavier Urbain}, TITLE = {The \textsc{Krakatoa} proof tool}, YEAR = 2002, NOTE = {\url{http://krakatoa.lri.fr/}} } @ARTICLE{marche03jlap, AUTHOR = {Claude March{\'e} and Christine Paulin-Mohring and Xavier Urbain}, TITLE = {The \textsc{Krakatoa} Tool for Certification of \textsc{Java/JavaCard} Programs annotated in \textsc{JML}}, JOURNAL = {Journal of Logic and Algebraic Programming}, YEAR = 2003, NOTE = {To appear}, URL = {http://krakatoa.lri.fr}, TOPICS = {team} } @ARTICLE{marche04jlap, AUTHOR = {Claude March{\'e} and Christine Paulin-Mohring and Xavier Urbain}, TITLE = {The \textsc{Krakatoa} Tool for Certification of \textsc{Java/JavaCard} Programs annotated in \textsc{JML}}, JOURNAL = {Journal of Logic and Algebraic Programming}, YEAR = 2004, VOLUME = 58, NUMBER = {1--2}, PAGES = {89--106}, URL = {http://krakatoa.lri.fr}, TOPICS = {team} } @TECHREPORT{catano03deliv, AUTHOR = {N{\'e}stor Cata{\~n}o and Marek Gawkowski and Marieke Huisman and Bart Jacobs and Claude March{\'e} and Christine Paulin and Erik Poll and Nicole Rauch and Xavier Urbain}, TITLE = {Logical Techniques for Applet Verification}, INSTITUTION = {VerifiCard Project}, YEAR = 2003, TYPE = {Deliverable}, NUMBER = {5.2}, TOPICS = {team}, NOTE = {Available from \url{http://www.verificard.org}} } @TECHREPORT{kmu2002rr, AUTHOR = {Keiichirou Kusakari and Claude March and Xavier Urbain}, TITLE = {Termination of Associative-Commutative Rewriting using Dependency Pairs Criteria}, INSTITUTION = {LRI}, YEAR = 2002, TYPE = {Research Report}, NUMBER = 1304, TYPE_PUBLI = {interne}, TOPICS = {team}, NOTE = {\url{http://www.lri.fr/~urbain/textes/rr1304.ps.gz}}, URL = {http://www.lri.fr/~urbain/textes/rr1304.ps.gz} } @ARTICLE{marche2004jsc, AUTHOR = {Claude March\'e and Xavier Urbain}, TITLE = {Modular {\&} Incremental Proofs of {AC}-Termination}, JOURNAL = {Journal of Symbolic Computation}, YEAR = 2004, TOPICS = {team} } @INPROCEEDINGS{contejean03wst, AUTHOR = {Evelyne Contejean and Claude March and Benjamin Monate and Xavier Urbain}, TITLE = {{Proving Termination of Rewriting with {\sc C\textit{i}ME}}}, CROSSREF = {wst03}, PAGES = {71--73}, NOTE = {\url{http://cime.lri.fr/}}, URL = {http://cime.lri.fr/}, YEAR = 2003, TYPE_PUBLI = {icolcomlec}, TOPICS = {team} } @TECHREPORT{contejean04rr, AUTHOR = {Evelyne Contejean and Claude March{\'e} and Ana-Paula Tom{\'a}s and Xavier Urbain}, TITLE = {Mechanically proving termination using polynomial interpretations}, INSTITUTION = {LRI}, YEAR = {2004}, TYPE = {Research Report}, NUMBER = {1382}, TYPE_PUBLI = {interne}, TOPICS = {team}, URL = {http://www.lri.fr/~urbain/textes/rr1382.ps.gz} } @UNPUBLISHED{duran_sub, AUTHOR = {Francisco Duran and Salvador Lucas and Claude {March\'e} and {Jos\'e} Meseguer and Xavier Urbain}, TITLE = {Termination of Membership Equational Programs}, NOTE = {Submitted} } @PROCEEDINGS{comon95lncs, TITLE = {Term Rewriting}, BOOKTITLE = {Term Rewriting}, TOPICS = {team, cclserver}, YEAR = 1995, EDITOR = {Hubert Comon and Jean-Pierre Jouannaud}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {909}, PUBLISHER = {{Sprin\-ger-Verlag}}, ORGANIZATION = {French Spring School of Theoretical Computer Science}, TYPE_PUBLI = {editeur}, CLEF_LABO = {CJ95} } @PROCEEDINGS{lics94, TITLE = {Proceedings of the Ninth Annual IEEE Symposium on Logic in Computer Science}, BOOKTITLE = {Proceedings of the Ninth Annual IEEE Symposium on Logic in Computer Science}, YEAR = 1994, MONTH = JUL, ADDRESS = {Paris, France}, ORGANIZATION = {{IEEE} Comp. Soc. Press} } @PROCEEDINGS{rta91, TITLE = {4th International Conference on Rewriting Techniques and Applications}, BOOKTITLE = {4th International Conference on Rewriting Techniques and Applications}, EDITOR = {Ronald. V. Book}, YEAR = 1991, MONTH = APR, ADDRESS = {Como, Italy}, PUBLISHER = {{Sprin\-ger-Verlag}}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 488 } @PROCEEDINGS{rta96, TITLE = {7th International Conference on Rewriting Techniques and Applications}, BOOKTITLE = {7th International Conference on Rewriting Techniques and Applications}, EDITOR = {Harald Ganzinger}, PUBLISHER = {{Sprin\-ger-Verlag}}, YEAR = 1996, MONTH = JUL, ADDRESS = {New Brunswick, NJ, USA}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 1103 } @PROCEEDINGS{rta97, TITLE = {8th International Conference on Rewriting Techniques and Applications}, BOOKTITLE = {8th International Conference on Rewriting Techniques and Applications}, EDITOR = {Hubert Comon}, PUBLISHER = {{Sprin\-ger-Verlag}}, YEAR = 1997, MONTH = JUN, ADDRESS = {Barcelona, Spain}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {1232} } @PROCEEDINGS{rta98, TITLE = {9th International Conference on Rewriting Techniques and Applications}, BOOKTITLE = {9th International Conference on Rewriting Techniques and Applications}, EDITOR = {Tobias Nipkow}, PUBLISHER = {{Sprin\-ger-Verlag}}, YEAR = 1998, MONTH = APR, ADDRESS = {Tsukuba, Japan}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {1379} } @PROCEEDINGS{rta00, TITLE = {11th International Conference on Rewriting Techniques and Applications}, BOOKTITLE = {11th International Conference on Rewriting Techniques and Applications}, EDITOR = {Leo Bachmair}, PUBLISHER = {{Sprin\-ger-Verlag}}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 1833, MONTH = JUL, YEAR = 2000, ADDRESS = {Norwich, UK} } @PROCEEDINGS{srt95, TITLE = {Proceedings of the Conference on Symbolic Rewriting Techniques}, BOOKTITLE = {Proceedings of the Conference on Symbolic Rewriting Techniques}, YEAR = 1995, EDITOR = {Manuel Bronstein and Volker Weispfenning}, ADDRESS = {Monte Verita, Switzerland} } @BOOK{comon01cclbook, BOOKTITLE = {Constraints in Computational Logics}, TITLE = {Constraints in Computational Logics}, EDITOR = {Hubert Comon and Claude March{\'e} and Ralf Treinen}, YEAR = 2001, PUBLISHER = {{Sprin\-ger-Verlag}}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 2002, TOPICS = {team}, TYPE_PUBLI = {editeur} } @PROCEEDINGS{wst03, BOOKTITLE = {{Extended Abstracts of the 6th International Workshop on Termination, WST'03}}, TITLE = {{Extended Abstracts of the 6th International Workshop on Termination, WST'03}}, YEAR = {2003}, EDITOR = {Albert Rubio}, MONTH = JUN, NOTE = {Technical Report DSIC II/15/03, Universidad Politcnica de Valencia, Spain} } @INPROCEEDINGS{FilliatreLetouzey03, AUTHOR = {J.-C. Filli\^atre and P. Letouzey}, TITLE = {{Functors for Proofs and Programs}}, BOOKTITLE = {Proceedings of The European Symposium on Programming}, YEAR = 2004, ADDRESS = {Barcelona, Spain}, MONTH = {March 29-April 2}, NOTE = {To appear}, URL = {http://www.lri.fr/~filliatr/ftp/publis/fpp.ps.gz} } @TECHREPORT{Filliatre03, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Why: a multi-language multi-prover verification tool}}, INSTITUTION = {{LRI, Universit\'e Paris Sud}}, TYPE = {{Research Report}}, NUMBER = {1366}, MONTH = {March}, YEAR = 2003, URL = {http://www.lri.fr/~filliatr/ftp/publis/why-tool.ps.gz} } @ARTICLE{FilliatrePottier02, AUTHOR = {J.-C. Filli{\^a}tre and F. Pottier}, TITLE = {{Producing All Ideals of a Forest, Functionally}}, JOURNAL = {Journal of Functional Programming}, VOLUME = 13, NUMBER = 5, PAGES = {945--956}, MONTH = {September}, YEAR = 2003, URL = {http://www.lri.fr/~filliatr/ftp/publis/kr-fp.ps.gz}, ABSTRACT = { We present a functional implementation of Koda and Ruskey's algorithm for generating all ideals of a forest poset as a Gray code. Using a continuation-based approach, we give an extremely concise formulation of the algorithm's core. Then, in a number of steps, we derive a first-order version whose efficiency is comparable to a C implementation given by Knuth.} } @UNPUBLISHED{FORS01, AUTHOR = {J.-C. Filli{\^a}tre and S. Owre and H. Rue{\ss} and N. Shankar}, TITLE = {Deciding Propositional Combinations of Equalities and Inequalities}, NOTE = {Unpublished}, MONTH = OCT, YEAR = 2001, URL = {http://www.lri.fr/~filliatr/ftp/publis/ics.ps}, ABSTRACT = { We address the problem of combining individual decision procedures into a single decision procedure. Our combination approach is based on using the canonizer obtained from Shostak's combination algorithm for equality. We illustrate our approach with a combination algorithm for equality, disequality, arithmetic inequality, and propositional logic. Unlike the Nelson--Oppen combination where the processing of equalities is distributed across different closed decision procedures, our combination involves the centralized processing of equalities in a single procedure. The termination argument for the combination is based on that for Shostak's algorithm. We also give soundness and completeness arguments.} } @INPROCEEDINGS{ICS, AUTHOR = {J.-C. Filli{\^a}tre and S. Owre and H. Rue{\ss} and N. Shankar}, TITLE = {{ICS: Integrated Canonization and Solving (Tool presentation)}}, BOOKTITLE = {Proceedings of CAV'2001}, EDITOR = {G. Berry and H. Comon and A. Finkel}, PUBLISHER = {Springer-Verlag}, SERIES = {Lecture Notes in Computer Science}, VOLUME = 2102, PAGES = {246--249}, YEAR = 2001 } @INPROCEEDINGS{Filliatre01a, AUTHOR = {J.-C. Filli\^atre}, TITLE = {La supriorit de l'ordre suprieur}, BOOKTITLE = {Journes Francophones des Langages Applicatifs}, PAGES = {15--26}, MONTH = {Janvier}, YEAR = 2002, ADDRESS = {Anglet, France}, URL = {http://www.lri.fr/~filliatr/ftp/publis/sos.ps.gz}, CODE = {http://www.lri.fr/~filliatr/ftp/ocaml/misc/koda-ruskey.ps}, ABSTRACT = { Nous prsentons ici une criture fonctionnelle de l'algorithme de Koda-Ruskey, un algorithme pour engendrer une large famille de codes de Gray. En s'inspirant de techniques de programmation par continuation, nous aboutissons un code de neuf lignes seulement, bien plus lgant que les implantations purement impratives proposes jusqu'ici, notamment par Knuth. Dans un second temps, nous montrons comment notre code peut tre lgrement modifi pour aboutir une version de complexit optimale. Notre implantation en Objective Caml rivalise d'efficacit avec les meilleurs codes C. Nous dtaillons les calculs de complexit, un exercice intressant en prsence d'ordre suprieur et d'effets de bord combins.} } @TECHREPORT{Filliatre00c, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Design of a proof assistant: Coq version 7}}, INSTITUTION = {{LRI, Universit\'e Paris Sud}}, TYPE = {{Research Report}}, NUMBER = {1369}, MONTH = {October}, YEAR = 2000, URL = {http://www.lri.fr/~filliatr/ftp/publis/coqv7.ps.gz}, ABSTRACT = { We present the design and implementation of the new version of the Coq proof assistant. The main novelty is the isolation of the critical part of the system, which consists in a type checker for the Calculus of Inductive Constructions. This kernel is now completely independent of the rest of the system and has been rewritten in a purely functional way. This leads to greater clarity and safety, without compromising efficiency. It also opens the way to the ``bootstrap'' of the Coq system, where the kernel will be certified using Coq itself.} } @TECHREPORT{Filliatre00b, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Hash consing in an ML framework}}, INSTITUTION = {{LRI, Universit\'e Paris Sud}}, TYPE = {{Research Report}}, NUMBER = {1368}, MONTH = {September}, YEAR = 2000, URL = {http://www.lri.fr/~filliatr/ftp/publis/hash-consing.ps.gz}, ABSTRACT = { Hash consing is a technique to share values that are structurally equal. Beyond the obvious advantage of saving memory blocks, hash consing may also be used to gain speed in several operations (like equality test) and data structures (like sets or maps) when sharing is maximal. However, physical adresses cannot be used directly for this purpose when the garbage collector is likely to move blocks underneath. We present an easy solution in such a framework, with many practical benefits.} } @MISC{ocamlweb, AUTHOR = {J.-C. Filli\^atre and C. March\'e}, TITLE = {{ocamlweb, a literate programming tool for Objective Caml}}, NOTE = {Available at \url{http://www.lri.fr/~filliatr/ocamlweb/}}, URL = {http://www.lri.fr/~filliatr/ocamlweb/} } @ARTICLE{Filliatre00a, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Verification of Non-Functional Programs using Interpretations in Type Theory}}, JOURNAL = {Journal of Functional Programming}, VOLUME = 13, NUMBER = 4, PAGES = {709--745}, MONTH = {July}, YEAR = 2003, NOTE = {English translation of~\cite{Filliatre99}.}, URL = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, ABSTRACT = {We study the problem of certifying programs combining imperative and functional features within the general framework of type theory. Type theory constitutes a powerful specification language, which is naturally suited for the proof of purely functional programs. To deal with imperative programs, we propose a logical interpretation of an annotated program as a partial proof of its specification. The construction of the corresponding partial proof term is based on a static analysis of the effects of the program, and on the use of monads. The usual notion of monads is refined in order to account for the notion of effect. The missing subterms in the partial proof term are seen as proof obligations, whose actual proofs are left to the user. We show that the validity of those proof obligations implies the total correctness of the program. We also establish a result of partial completeness. This work has been implemented in the Coq proof assistant. It appears as a tactic taking an annotated program as argument and generating a set of proof obligations. Several nontrivial algorithms have been certified using this tactic.} } @ARTICLE{Filliatre99c, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Formal Proof of a Program: Find}}, JOURNAL = {Science of Computer Programming}, YEAR = 2001, NOTE = {To appear}, URL = {http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}, ABSTRACT = {In 1971, C.~A.~R.~Hoare gave the proof of correctness and termination of a rather complex algorithm, in a paper entitled \emph{Proof of a program: Find}. It is a hand-made proof, where the program is given together with its formal specification and where each step is fully justified by a mathematical reasoning. We present here a formal proof of the same program in the system Coq, using the recent tactic of the system developed to establishing the total correctness of imperative programs. We follow Hoare's paper as close as possible, keeping the same program and the same specification. We show that we get exactly the same proof obligations, which are proved in a straightforward way, following the original paper. We also explain how more informal reasonings of Hoare's proof are formalized in the system Coq. This demonstrates the adequacy of the system Coq in the process of certifying imperative programs.} } @TECHREPORT{Filliatre99b, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{A theory of monads parameterized by effects}}, INSTITUTION = {{LRI, Universit\'e Paris Sud}}, TYPE = {{Research Report}}, NUMBER = {1367}, MONTH = {November}, YEAR = 1999, URL = {http://www.lri.fr/~filliatr/ftp/publis/monads.ps.gz}, ABSTRACT = {Monads were introduced in computer science to express the semantics of programs with computational effects, while type and effect inference was introduced to mark out those effects. In this article, we propose a combination of the notions of effects and monads, where the monadic operators are parameterized by effects. We establish some relationships between those generalized monads and the classical ones. Then we use a generalized monad to translate imperative programs into purely functional ones. We establish the correctness of that translation. This work has been put into practice in the Coq proof assistant to establish the correctness of imperative programs.} } @PHDTHESIS{Filliatre99, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Preuve de programmes imp\'eratifs en th\'eorie des types}}, TYPE = {Th{\`e}se de Doctorat}, SCHOOL = {Universit\'e Paris-Sud}, YEAR = 1999, MONTH = {July}, URL = {http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}, ABSTRACT = {Nous tudions le problme de la certification de programmes mlant traits impratifs et fonctionnels dans le cadre de la thorie des types. La thorie des types constitue un puissant langage de spcification, naturellement adapt la preuve de programmes purement fonctionnels. Pour y certifier galement des programmes impratifs, nous commenons par exprimer leur smantique de manire purement fonctionnelle. Cette traduction repose sur une analyse statique des effets de bord des programmes, et sur l'utilisation de la notion de monade, notion que nous raffinons en l'associant la notion d'effet de manire gnrale. Nous montrons que cette traduction est smantiquement correcte. Puis, partir d'un programme annot, nous construisons une preuve de sa spcification, traduite de manire fonctionnelle. Cette preuve est btie sur la traduction fonctionnelle prcdemment introduite. Elle est presque toujours incomplte, les parties manquantes tant autant d'obligations de preuve qui seront laisses la charge de l'utilisateur. Nous montrons que la validit de ces obligations entrane la correction totale du programme. Nous avons implant notre travail dans l'assistant de preuve Coq, avec lequel il est ds prsent distribu. Cette implantation se prsente sous la forme d'une tactique prenant en argument un programme annot et engendrant les obligations de preuve. Plusieurs algorithmes non triviaux ont t certifis l'aide de cet outil (Find, Quicksort, Heapsort, algorithme de Knuth-Morris-Pratt).} } @INPROCEEDINGS{FilliatreMagaud99, AUTHOR = {J.-C. Filli\^atre and N. Magaud}, TITLE = {{Certification of sorting algorithms in the system Coq}}, BOOKTITLE = {Theorem Proving in Higher Order Logics: Emerging Trends}, YEAR = 1999, ABSTRACT = {We present the formal proofs of total correctness of three sorting algorithms in the system Coq, namely \textit{insertion sort}, \textit{quicksort} and \textit{heapsort}. The implementations are imperative programs working in-place on a given array. Those developments demonstrate the usefulness of inductive types and higher-order logic in the process of software certification. They also show that the proof of rather complex algorithms may be done in a small amount of time --- only a few days for each development --- and without great difficulty.}, URL = {http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz} } @INPROCEEDINGS{Filliatre98, AUTHOR = {J.-C. Filli\^atre}, TITLE = {{Proof of Imperative Programs in Type Theory}}, BOOKTITLE = {International Workshop, TYPES '98, Kloster Irsee, Germany}, PUBLISHER = {Springer-Verlag}, VOLUME = 1657, SERIES = {Lecture Notes in Computer Science}, MONTH = MAR, YEAR = {1998}, ABSTRACT = {We present a new approach to certifying imperative programs, in the context of Type Theory. The key is a functional translation of imperative programs, which is made possible by an analysis of their effects. On sequential imperative programs, we get the same proof obligations as those given by Floyd-Hoare logic, but our approach also includes functional constructions. As a side-effect, we propose a way to eradicate the use of auxiliary variables in specifications. This work has been implemented in the Coq Proof Assistant and applied on non-trivial examples.}, URL = {http://www.lri.fr/~filliatr/ftp/publis/types98.ps.gz} } @TECHREPORT{Filliatre97, AUTHOR = {J.-C. Filli\^atre}, INSTITUTION = {LIP - ENS Lyon}, NUMBER = {97--04}, TITLE = {{Finite Automata Theory in Coq: A constructive proof of Kleene's theorem}}, TYPE = {Research Report}, MONTH = {February}, YEAR = {1997}, ABSTRACT = {We describe here a development in the system Coq of a piece of Finite Automata Theory. The main result is the Kleene's theorem, expressing that regular expressions and finite automata define the same languages. From a constructive proof of this result, we automatically obtain a functional program that compiles any regular expression into a finite automata, which constitutes the main part of the implementation of {\tt grep}-like programs. This functional program is obtained by the automatic method of {\em extraction} which removes the logical parts of the proof to keep only its informative contents. Starting with an idea of what we would have written in ML, we write the specification and do the proofs in such a way that we obtain the expected program, which is therefore efficient.}, URL = {ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR97/RR97-04.ps.Z} } @TECHREPORT{Filliatre95, AUTHOR = {J.-C. Filli\^atre}, INSTITUTION = {LIP - ENS Lyon}, NUMBER = {96--25}, TITLE = {{A decision procedure for Direct Predicate Calculus: study and implementation in the Coq system}}, TYPE = {Research Report}, MONTH = {February}, YEAR = {1995}, ABSTRACT = {The paper of J. Ketonen and R. Weyhrauch \emph{A decidable fragment of Predicate Calculus} defines a decidable fragment of first-order predicate logic - Direct Predicate Calculus - as the subset which is provable in Gentzen sequent calculus without the contraction rule, and gives an effective decision procedure for it. This report is a detailed study of this procedure. We extend the decidability to non-prenex formulas. We prove that the intuitionnistic fragment is still decidable, with a refinement of the same procedure. An intuitionnistic version has been implemented in the Coq system using a translation into natural deduction.}, URL = {ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR96/RR96-25.ps.Z} } @TECHREPORT{Filliatre94, AUTHOR = {J.-C. Filli\^atre}, MONTH = {Juillet}, INSTITUTION = {Ecole Normale Sup\'erieure}, TITLE = {{Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct~: \'etude et impl\'ementation dans le syst\`eme Coq}}, TYPE = {Rapport de {DEA}}, YEAR = {1994}, URL = {ftp://ftp.lri.fr/LRI/articles/filliatr/memoire.dvi.gz} } @TECHREPORT{CourantFilliatre93, AUTHOR = {J. Courant et J.-C. Filli\^atre}, MONTH = {Septembre}, INSTITUTION = {Ecole Normale Sup\'erieure}, TITLE = {{Formalisation de la th\'eorie des langages formels en Coq}}, TYPE = {Rapport de ma\^{\i}trise}, YEAR = {1993}, URL = {http://www.ens-lyon.fr/~jcourant/stage_maitrise.dvi.gz}, URL2 = {http://www.ens-lyon.fr/~jcourant/stage_maitrise.ps.gz} } @INPROCEEDINGS{tphols2000-Letouzey, crossref = "tphols2000", title = "Formalizing {S}t{\aa}lmarck's algorithm in {C}oq", author = "Pierre Letouzey and Laurent Th{\'e}ry", pages = "387--404"} @PROCEEDINGS{tphols2000, editor = "J. Harrison and M. Aagaard", booktitle = "Theorem Proving in Higher Order Logics: 13th International Conference, TPHOLs 2000", series = "Lecture Notes in Computer Science", volume = 1869, year = 2000, publisher = "Springer-Verlag"} @InCollection{howe, author = {Doug Howe}, title = {Computation Meta theory in Nuprl}, booktitle = {The Proceedings of the Ninth International Conference of Autom ated Deduction}, volume = {310}, editor = {E. Lusk and R. Overbeek}, publisher = {Springer-Verlag}, pages = {238--257}, year = {1988} } @TechReport{harrison, author = {John Harrison}, title = {Meta theory and Reflection in Theorem Proving:a Survey and Cri tique}, institution = {SRI International Cambridge Computer Science Research Center}, year = {1995}, number = {CRC-053} } @InCollection{cc, author = {Thierry Coquand and Grard Huet}, title = {The Calculus of Constructions}, booktitle = {Information and Computation}, year = {1988}, volume = {76}, number = {2/3} } @InProceedings{coquandcci, author = {Thierry Coquand and Christine Paulin-Mohring}, title = {Inductively defined types}, booktitle = {Proceedings of Colog'88}, year = {1990}, editor = {P. Martin-Lf and G. Mints}, volume = {417}, series = {LNCS}, publisher = {Springer-Verlag} } @InProceedings{boutin, author = {Samuel Boutin}, title = {Using reflection to build efficient and certified decision pro cedures.}, booktitle = {Proceedings of TACS'97}, year = {1997}, editor = {M. Abadi and T. Ito}, volume = {1281}, series = {LNCS}, publisher = {Springer-Verlag} } @Manual{Coq:manual, title = {The Coq proof assistant reference manual}, author = {\mbox{The Coq development team}}, organization = {LogiCal Project}, note = {Version 8.0}, year = {2004}, url = "http://coq.inria.fr" } @string{jfp = "Journal of Functional Programming"} @STRING{lncs="Lecture Notes in Computer Science"} @STRING{lnai="Lecture Notes in Artificial Intelligence"} @string{SV = "{Sprin\-ger-Verlag}"} @INPROCEEDINGS{Aud91, AUTHOR = {Ph. Audebaud}, BOOKTITLE = {Proceedings of the sixth Conf. on Logic in Computer Science.}, PUBLISHER = {IEEE}, TITLE = {Partial {Objects} in the {Calculus of Constructions}}, YEAR = {1991} } @PHDTHESIS{Aud92, AUTHOR = {Ph. Audebaud}, SCHOOL = {{Universit\'e} Bordeaux I}, TITLE = {Extension du Calcul des Constructions par Points fixes}, YEAR = {1992} } @INPROCEEDINGS{Audebaud92b, AUTHOR = {Ph. Audebaud}, BOOKTITLE = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, EDITOR = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, NOTE = {Also Research Report LIP-ENS-Lyon}, PAGES = {pp 21--34}, TITLE = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, YEAR = {1992} } @INPROCEEDINGS{Augustsson85, AUTHOR = {L. Augustsson}, TITLE = {{Compiling Pattern Matching}}, BOOKTITLE = {Conference Functional Programming and Computer Architecture}, YEAR = {1985} } @ARTICLE{BaCo85, AUTHOR = {J.L. Bates and R.L. Constable}, JOURNAL = {ACM transactions on Programming Languages and Systems}, TITLE = {Proofs as {Programs}}, VOLUME = {7}, YEAR = {1985} } @BOOK{Bar81, AUTHOR = {H.P. Barendregt}, PUBLISHER = {North-Holland}, TITLE = {The Lambda Calculus its Syntax and Semantics}, YEAR = {1981} } @TECHREPORT{Bar91, AUTHOR = {H. Barendregt}, INSTITUTION = {Catholic University Nijmegen}, NOTE = {In Handbook of Logic in Computer Science, Vol II}, NUMBER = {91-19}, TITLE = {Lambda {Calculi with Types}}, YEAR = {1991} } @ARTICLE{BeKe92, AUTHOR = {G. Bellin and J. Ketonen}, JOURNAL = {Theoretical Computer Science}, PAGES = {115--142}, TITLE = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, VOLUME = {95}, YEAR = {1992} } @BOOK{Bee85, AUTHOR = {M.J. Beeson}, PUBLISHER = SV, TITLE = {Foundations of Constructive Mathematics, Metamathematical Studies}, YEAR = {1985} } @BOOK{Bis67, AUTHOR = {E. Bishop}, PUBLISHER = {McGraw-Hill}, TITLE = {Foundations of Constructive Analysis}, YEAR = {1967} } @BOOK{BoMo79, AUTHOR = {R.S. Boyer and J.S. Moore}, KEY = {BoMo79}, PUBLISHER = {Academic Press}, SERIES = {ACM Monograph}, TITLE = {A computational logic}, YEAR = {1979} } @MASTERSTHESIS{Bou92, AUTHOR = {S. Boutin}, MONTH = sep, SCHOOL = {{Universit\'e Paris 7}}, TITLE = {Certification d'un compilateur {ML en Coq}}, YEAR = {1992} } @inproceedings{Bou97, title = {Using reflection to build efficient and certified decision procedure s}, author = {S. Boutin}, booktitle = {TACS'97}, editor = {Martin Abadi and Takahashi Ito}, publisher = SV, series = lncs, volume=1281, PS={http://pauillac.inria.fr/~boutin/public_w/submitTACS97.ps.gz}, year = {1997} } @PhdThesis{Bou97These, author = {S. Boutin}, title = {R\'eflexions sur les quotients}, school = {Paris 7}, year = 1997, type = {th\`ese d'Universit\'e}, month = apr } @ARTICLE{Bru72, AUTHOR = {N.J. de Bruijn}, JOURNAL = {Indag. Math.}, TITLE = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, VOLUME = {34}, YEAR = {1972} } @INCOLLECTION{Bru80, AUTHOR = {N.J. de Bruijn}, BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, EDITOR = {J.P. Seldin and J.R. Hindley}, PUBLISHER = {Academic Press}, TITLE = {A survey of the project {Automath}}, YEAR = {1980} } @TECHREPORT{COQ93, AUTHOR = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, INSTITUTION = {INRIA}, MONTH = may, NUMBER = {154}, TITLE = {{The Coq Proof Assistant User's Guide Version 5.8}}, YEAR = {1993} } @TECHREPORT{CPar93, AUTHOR = {C. Parent}, INSTITUTION = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, MONTH = oct, NOTE = {Also in~\cite{Nijmegen93}}, NUMBER = {93-29}, TITLE = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, YEAR = {1993} } @PHDTHESIS{CPar95, AUTHOR = {C. Parent}, SCHOOL = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, TITLE = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, YEAR = {1995} } @BOOK{Caml, AUTHOR = {P. Weis and X. Leroy}, PUBLISHER = {InterEditions}, TITLE = {Le langage Caml}, YEAR = {1993} } @INPROCEEDINGS{ChiPotSimp03, AUTHOR = {Laurent Chicli and Lo\"{\i}c Pottier and Carlos Simpson}, ADDRESS = {Berg en Dal, The Netherlands}, TITLE = {Mathematical Quotients and Quotient Types in Coq}, BOOKTITLE = {TYPES'02}, PUBLISHER = SV, SERIES = LNCS, VOLUME = {2646}, YEAR = {2003} } @TECHREPORT{CoC89, AUTHOR = {Projet Formel}, INSTITUTION = {INRIA}, NUMBER = {110}, TITLE = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, YEAR = {1989} } @INPROCEEDINGS{CoHu85a, AUTHOR = {Thierry Coquand and Grard Huet}, ADDRESS = {Linz}, BOOKTITLE = {EUROCAL'85}, PUBLISHER = SV, SERIES = LNCS, TITLE = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, VOLUME = {203}, YEAR = {1985} } @INPROCEEDINGS{CoHu85b, AUTHOR = {Thierry Coquand and Grard Huet}, BOOKTITLE = {Logic Colloquium'85}, EDITOR = {The Paris Logic Group}, PUBLISHER = {North-Holland}, TITLE = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, YEAR = {1987} } @ARTICLE{CoHu86, AUTHOR = {Thierry Coquand and Grard Huet}, JOURNAL = {Information and Computation}, NUMBER = {2/3}, TITLE = {The {Calculus of Constructions}}, VOLUME = {76}, YEAR = {1988} } @INPROCEEDINGS{CoPa89, AUTHOR = {Thierry Coquand and Christine Paulin-Mohring}, BOOKTITLE = {Proceedings of Colog'88}, EDITOR = {P. Martin-L\"of and G. Mints}, PUBLISHER = SV, SERIES = LNCS, TITLE = {Inductively defined types}, VOLUME = {417}, YEAR = {1990} } @BOOK{Con86, AUTHOR = {R.L. {Constable et al.}}, PUBLISHER = {Prentice-Hall}, TITLE = {{Implementing Mathematics with the Nuprl Proof Development System}}, YEAR = {1986} } @PHDTHESIS{Coq85, AUTHOR = {Thierry Coquand}, MONTH = jan, SCHOOL = {Universit\'e Paris~7}, TITLE = {Une Th\'eorie des Constructions}, YEAR = {1985} } @INPROCEEDINGS{Coq86, AUTHOR = {Thierry Coquand}, ADDRESS = {Cambridge, MA}, BOOKTITLE = {Symposium on Logic in Computer Science}, PUBLISHER = {IEEE Computer Society Press}, TITLE = {{An Analysis of Girard's Paradox}}, YEAR = {1986} } @INPROCEEDINGS{Coq90, AUTHOR = {Thierry Coquand}, BOOKTITLE = {Logic and Computer Science}, EDITOR = {P. Oddifredi}, NOTE = {INRIA Research Report 1088, also in~\cite{CoC89}}, PUBLISHER = {Academic Press}, TITLE = {{Metamathematical Investigations of a Calculus of Constructions}}, YEAR = {1990} } @INPROCEEDINGS{Coq91, AUTHOR = {Thierry Coquand}, BOOKTITLE = {Proceedings 9th Int. Congress of Logic, Methodology and Philosophy of Science}, TITLE = {{A New Paradox in Type Theory}}, MONTH = {August}, YEAR = {1991} } @INPROCEEDINGS{Coq92, AUTHOR = {Thierry Coquand}, TITLE = {{Pattern Matching with Dependent Types}}, YEAR = {1992}, crossref = {Bastad92} } @INPROCEEDINGS{Coquand93, AUTHOR = {Thierry Coquand}, TITLE = {{Infinite Objects in Type Theory}}, YEAR = {1993}, crossref = {Nijmegen93} } @MASTERSTHESIS{Cou94a, AUTHOR = {J. Courant}, MONTH = sep, SCHOOL = {DEA d'Informatique, ENS Lyon}, TITLE = {Explicitation de preuves par r\'ecurrence implicite}, YEAR = {1994} } @INPROCEEDINGS{Del99, author = "Delahaye, D.", title = "Information Retrieval in a Coq Proof Library using Type Isomorphisms", booktitle = {Proceedings of TYPES'99, L\"okeberg}, publisher = SV, series = lncs, year = "1999", url = "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf TYPES99-SIsos.ps.gz}" } @INPROCEEDINGS{Del00, author = "Delahaye, D.", title = "A {T}actic {L}anguage for the {S}ystem {{\sf Coq}}", booktitle = "Proceedings of Logic for Programming and Automated Reasoning (LPAR), Reunion Island", publisher = SV, series = LNCS, volume = "1955", pages = "85--95", month = "November", year = "2000", url = "{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf LPAR2000-ltac.ps.gz}" } @INPROCEEDINGS{DelMay01, author = "Delahaye, D. and Mayero, M.", title = {{\tt Field}: une proc\'edure de d\'ecision pour les nombres r\'eels en {\Coq}}, booktitle = "Journ\'ees Francophones des Langages Applicatifs, Pontarlier", publisher = "INRIA", month = "Janvier", year = "2001", url = "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf JFLA2000-Field.ps.gz}" } @TECHREPORT{Dow90, AUTHOR = {G. Dowek}, INSTITUTION = {INRIA}, NUMBER = {1283}, TITLE = {Naming and Scoping in a Mathematical Vernacular}, TYPE = {Research Report}, YEAR = {1990} } @ARTICLE{Dow91a, AUTHOR = {G. Dowek}, JOURNAL = {Compte-Rendus de l'Acad\'emie des Sciences}, NOTE = {The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors}, NUMBER = {12}, PAGES = {951--956}, TITLE = {L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}, VOLUME = {I, 312}, YEAR = {1991} } @INPROCEEDINGS{Dow91b, AUTHOR = {G. Dowek}, BOOKTITLE = {Proceedings of Mathematical Foundation of Computer Science}, NOTE = {Also INRIA Research Report}, PAGES = {151--160}, PUBLISHER = SV, SERIES = LNCS, TITLE = {A Second Order Pattern Matching Algorithm in the Cube of Typed $\lambda$-calculi}, VOLUME = {520}, YEAR = {1991} } @PHDTHESIS{Dow91c, AUTHOR = {G. Dowek}, MONTH = dec, SCHOOL = {Universit\'e Paris 7}, TITLE = {D\'emonstration automatique dans le Calcul des Constructions}, YEAR = {1991} } @article{Dow92a, AUTHOR = {G. Dowek}, TITLE = {The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}, YEAR = 1993, journal = tcs, volume = 107, number = 2, pages = {349-356} } @ARTICLE{Dow94a, AUTHOR = {G. Dowek}, JOURNAL = {Annals of Pure and Applied Logic}, VOLUME = {69}, PAGES = {135--155}, TITLE = {Third order matching is decidable}, YEAR = {1994} } @INPROCEEDINGS{Dow94b, AUTHOR = {G. Dowek}, BOOKTITLE = {Proceedings of the second international conference on typed lambda calculus and applications}, TITLE = {Lambda-calculus, Combinators and the Comprehension Schema}, YEAR = {1995} } @INPROCEEDINGS{Dyb91, AUTHOR = {P. Dybjer}, BOOKTITLE = {Logical Frameworks}, EDITOR = {G. Huet and G. Plotkin}, PAGES = {59--79}, PUBLISHER = {Cambridge University Press}, TITLE = {Inductive sets and families in {Martin-L{\"o}f's} Type Theory and their set-theoretic semantics: An inversion principle for {Martin-L\"of's} type theory}, VOLUME = {14}, YEAR = {1991} } @ARTICLE{Dyc92, AUTHOR = {Roy Dyckhoff}, JOURNAL = {The Journal of Symbolic Logic}, MONTH = sep, NUMBER = {3}, TITLE = {Contraction-free sequent calculi for intuitionistic logic}, VOLUME = {57}, YEAR = {1992} } @MASTERSTHESIS{Fil94, AUTHOR = {J.-C. Filli\^atre}, MONTH = sep, SCHOOL = {DEA d'Informatique, ENS Lyon}, TITLE = {Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct. {\'E}tude et impl\'ementation dans le syst\`eme {\Coq}}, YEAR = {1994} } @TECHREPORT{Filliatre95, AUTHOR = {J.-C. Filli\^atre}, INSTITUTION = {LIP-ENS-Lyon}, TITLE = {A decision procedure for Direct Predicate Calculus}, TYPE = {Research report}, NUMBER = {96--25}, YEAR = {1995} } @Article{Filliatre03jfp, author = {J.-C. Filli{\^a}tre}, title = {Verification of Non-Functional Programs using Interpretations in Type Theory}, journal = jfp, volume = 13, number = 4, pages = {709--745}, month = jul, year = 2003, note = {[English translation of \cite{Filliatre99}]}, url = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, topics = "team, lri", type_publi = "irevcomlec" } @PhdThesis{Filliatre99, author = {J.-C. Filli\^atre}, title = {Preuve de programmes imp\'eratifs en th\'eorie des types}, type = {Th{\`e}se de Doctorat}, school = {Universit\'e Paris-Sud}, year = 1999, month = {July}, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}} } @Unpublished{Filliatre99c, author = {J.-C. Filli\^atre}, title = {{Formal Proof of a Program: Find}}, month = {January}, year = 2000, note = {Submitted to \emph{Science of Computer Programming}}, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}} } @InProceedings{FilliatreMagaud99, author = {J.-C. Filli\^atre and N. Magaud}, title = {Certification of sorting algorithms in the system {\Coq}}, booktitle = {Theorem Proving in Higher Order Logics: Emerging Trends}, year = 1999, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz}} } @UNPUBLISHED{Fle90, AUTHOR = {E. Fleury}, MONTH = jul, NOTE = {Rapport de Stage}, TITLE = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, YEAR = {1990} } @BOOK{Fourier, AUTHOR = {Jean-Baptiste-Joseph Fourier}, PUBLISHER = {Gauthier-Villars}, TITLE = {Fourier's method to solve linear inequations/equations systems.}, YEAR = {1890} } @INPROCEEDINGS{Gim94, AUTHOR = {Eduardo Gim\'enez}, BOOKTITLE = {Types'94 : Types for Proofs and Programs}, NOTE = {Extended version in LIP research report 95-07, ENS Lyon}, PUBLISHER = SV, SERIES = LNCS, TITLE = {Codifying guarded definitions with recursive schemes}, VOLUME = {996}, YEAR = {1994} } @TechReport{Gim98, author = {E. Gim\'enez}, title = {A Tutorial on Recursive Types in Coq}, institution = {INRIA}, year = 1998, month = mar } @INPROCEEDINGS{Gimenez95b, AUTHOR = {E. Gim\'enez}, BOOKTITLE = {Workshop on Types for Proofs and Programs}, SERIES = LNCS, NUMBER = {1158}, PAGES = {135-152}, TITLE = {An application of co-Inductive types in Coq: verification of the Alternating Bit Protocol}, EDITORS = {S. Berardi and M. Coppo}, PUBLISHER = SV, YEAR = {1995} } @INPROCEEDINGS{Gir70, AUTHOR = {Jean-Yves Girard}, BOOKTITLE = {Proceedings of the 2nd Scandinavian Logic Symposium}, PUBLISHER = {North-Holland}, TITLE = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, YEAR = {1970} } @PHDTHESIS{Gir72, AUTHOR = {Jean-Yves Girard}, SCHOOL = {Universit\'e Paris~7}, TITLE = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, YEAR = {1972} } @BOOK{Gir89, AUTHOR = {Jean-Yves Girard and Yves Lafont and Paul Taylor}, PUBLISHER = {Cambridge University Press}, SERIES = {Cambridge Tracts in Theoretical Computer Science 7}, TITLE = {Proofs and Types}, YEAR = {1989} } @TechReport{Har95, author = {John Harrison}, title = {Metatheory and Reflection in Theorem Proving: A Survey and Critique}, institution = {SRI International Cambridge Computer Science Research Centre,}, year = 1995, type = {Technical Report}, number = {CRC-053}, abstract = {http://www.cl.cam.ac.uk/users/jrh/papers.html} } @MASTERSTHESIS{Hir94, AUTHOR = {Daniel Hirschkoff}, MONTH = sep, SCHOOL = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, TITLE = {{\'E}criture d'une tactique arithm\'etique pour le syst\`eme {\Coq}}, YEAR = {1994} } @INPROCEEDINGS{HofStr98, AUTHOR = {Martin Hofmann and Thomas Streicher}, TITLE = {The groupoid interpretation of type theory}, BOOKTITLE = {Proceedings of the meeting Twenty-five years of constructive type theory}, PUBLISHER = {Oxford University Press}, YEAR = {1998} } @INCOLLECTION{How80, AUTHOR = {W.A. Howard}, BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, EDITOR = {J.P. Seldin and J.R. Hindley}, NOTE = {Unpublished 1969 Manuscript}, PUBLISHER = {Academic Press}, TITLE = {The Formulae-as-Types Notion of Constructions}, YEAR = {1980} } @InProceedings{Hue87tapsoft, author = {G. Huet}, title = {Programming of Future Generation Computers}, booktitle = {Proceedings of TAPSOFT87}, series = LNCS, volume = 249, pages = {276--286}, year = 1987, publisher = SV } @INPROCEEDINGS{Hue87, AUTHOR = {G. Huet}, BOOKTITLE = {Programming of Future Generation Computers}, EDITOR = {K. Fuchi and M. Nivat}, NOTE = {Also in \cite{Hue87tapsoft}}, PUBLISHER = {Elsevier Science}, TITLE = {Induction Principles Formalized in the {Calculus of Constructions}}, YEAR = {1988} } @INPROCEEDINGS{Hue88, AUTHOR = {G. Huet}, BOOKTITLE = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, EDITOR = {R. Narasimhan}, NOTE = {Also in~\cite{CoC89}}, PUBLISHER = {World Scientific Publishing}, TITLE = {{The Constructive Engine}}, YEAR = {1989} } @BOOK{Hue89, EDITOR = {G. Huet}, PUBLISHER = {Addison-Wesley}, SERIES = {The UT Year of Programming Series}, TITLE = {Logical Foundations of Functional Programming}, YEAR = {1989} } @INPROCEEDINGS{Hue92, AUTHOR = {G. Huet}, BOOKTITLE = {Proceedings of 12th FST/TCS Conference, New Delhi}, PAGES = {229--240}, PUBLISHER = SV, SERIES = LNCS, TITLE = {The Gallina Specification Language : A case study}, VOLUME = {652}, YEAR = {1992} } @ARTICLE{Hue94, AUTHOR = {G. Huet}, JOURNAL = {J. Functional Programming}, PAGES = {371--394}, PUBLISHER = {Cambridge University Press}, TITLE = {Residual theory in $\lambda$-calculus: a formal development}, VOLUME = {4,3}, YEAR = {1994} } @INCOLLECTION{HuetLevy79, AUTHOR = {G. Huet and J.-J. L\'{e}vy}, TITLE = {Call by Need Computations in Non-Ambigous Linear Term Rewriting Systems}, NOTE = {Also research report 359, INRIA, 1979}, BOOKTITLE = {Computational Logic, Essays in Honor of Alan Robinson}, EDITOR = {J.-L. Lassez and G. Plotkin}, PUBLISHER = {The MIT press}, YEAR = {1991} } @ARTICLE{KeWe84, AUTHOR = {J. Ketonen and R. Weyhrauch}, JOURNAL = {Theoretical Computer Science}, PAGES = {297--307}, TITLE = {A decidable fragment of {P}redicate {C}alculus}, VOLUME = {32}, YEAR = {1984} } @BOOK{Kle52, AUTHOR = {S.C. Kleene}, PUBLISHER = {North-Holland}, SERIES = {Bibliotheca Mathematica}, TITLE = {Introduction to Metamathematics}, YEAR = {1952} } @BOOK{Kri90, AUTHOR = {J.-L. Krivine}, PUBLISHER = {Masson}, SERIES = {Etudes et recherche en informatique}, TITLE = {Lambda-calcul {types et mod\`eles}}, YEAR = {1990} } @BOOK{LE92, EDITOR = {G. Huet and G. Plotkin}, PUBLISHER = {Cambridge University Press}, TITLE = {Logical Environments}, YEAR = {1992} } @BOOK{LF91, EDITOR = {G. Huet and G. Plotkin}, PUBLISHER = {Cambridge University Press}, TITLE = {Logical Frameworks}, YEAR = {1991} } @ARTICLE{Laville91, AUTHOR = {A. Laville}, TITLE = {Comparison of Priority Rules in Pattern Matching and Term Rewriting}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {11}, PAGES = {321--347}, YEAR = {1991} } @INPROCEEDINGS{LePa94, AUTHOR = {F. Leclerc and C. Paulin-Mohring}, BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, EDITOR = {H. Barendregt and T. Nipkow}, PUBLISHER = SV, SERIES = {LNCS}, TITLE = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, VOLUME = {806}, YEAR = {1994} } @TECHREPORT{Leroy90, AUTHOR = {X. Leroy}, TITLE = {The {ZINC} experiment: an economical implementation of the {ML} language}, INSTITUTION = {INRIA}, NUMBER = {117}, YEAR = {1990} } @INPROCEEDINGS{Let02, author = {P. Letouzey}, title = {A New Extraction for Coq}, booktitle = {Proceedings of the TYPES'2002 workshop}, year = 2002, note = {to appear}, url = {draft at \url{http://www.lri.fr/~letouzey/download/extraction2002.ps.gz}} } @BOOK{MaL84, AUTHOR = {{P. Martin-L\"of}}, PUBLISHER = {Bibliopolis}, SERIES = {Studies in Proof Theory}, TITLE = {Intuitionistic Type Theory}, YEAR = {1984} } @ARTICLE{MaSi94, AUTHOR = {P. Manoury and M. Simonot}, JOURNAL = {TCS}, TITLE = {Automatizing termination proof of recursively defined function}, YEAR = {To appear} } @INPROCEEDINGS{Moh89a, AUTHOR = {Christine Paulin-Mohring}, ADDRESS = {Austin}, BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, MONTH = jan, PUBLISHER = {ACM}, TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, YEAR = {1989} } @PHDTHESIS{Moh89b, AUTHOR = {Christine Paulin-Mohring}, MONTH = jan, SCHOOL = {{Universit\'e Paris 7}}, TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, YEAR = {1989} } @INPROCEEDINGS{Moh93, AUTHOR = {Christine Paulin-Mohring}, BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi and Applications}, EDITOR = {M. Bezem and J.-F. Groote}, NOTE = {Also LIP research report 92-49, ENS Lyon}, NUMBER = {664}, PUBLISHER = SV, SERIES = {LNCS}, TITLE = {{Inductive Definitions in the System Coq - Rules and Properties}}, YEAR = {1993} } @BOOK{Moh97, AUTHOR = {Christine Paulin-Mohring}, MONTH = jan, PUBLISHER = {{ENS Lyon}}, TITLE = {{Le syst\`eme Coq. \mbox{Th\`ese d'habilitation}}}, YEAR = {1997} } @MASTERSTHESIS{Mun94, AUTHOR = {C. Mu{\~n}oz}, MONTH = sep, SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, TITLE = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, YEAR = {1994} } @PHDTHESIS{Mun97d, AUTHOR = "C. Mu{\~{n}}oz", TITLE = "Un calcul de substitutions pour la repr\'esentation de preuves partielles en th\'eorie de types", SCHOOL = {Universit\'e Paris 7}, YEAR = "1997", Note = {Version en anglais disponible comme rapport de recherche INRIA RR-3309}, Type = {Th\`ese de Doctorat} } @BOOK{NoPS90, AUTHOR = {B. {Nordstr\"om} and K. Peterson and J. Smith}, BOOKTITLE = {Information Processing 83}, PUBLISHER = {Oxford Science Publications}, SERIES = {International Series of Monographs on Computer Science}, TITLE = {Programming in {Martin-L\"of's} Type Theory}, YEAR = {1990} } @ARTICLE{Nor88, AUTHOR = {B. {Nordstr\"om}}, JOURNAL = {BIT}, TITLE = {Terminating General Recursion}, VOLUME = {28}, YEAR = {1988} } @BOOK{Odi90, EDITOR = {P. Odifreddi}, PUBLISHER = {Academic Press}, TITLE = {Logic and Computer Science}, YEAR = {1990} } @INPROCEEDINGS{PaMS92, AUTHOR = {M. Parigot and P. Manoury and M. Simonot}, ADDRESS = {St. Petersburg, Russia}, BOOKTITLE = {Logic Programming and automated reasoning}, EDITOR = {A. Voronkov}, MONTH = jul, NUMBER = {624}, PUBLISHER = SV, SERIES = {LNCS}, TITLE = {{ProPre : A Programming language with proofs}}, YEAR = {1992} } @ARTICLE{PaWe92, AUTHOR = {Christine Paulin-Mohring and Benjamin Werner}, JOURNAL = {Journal of Symbolic Computation}, PAGES = {607--640}, TITLE = {{Synthesis of ML programs in the system Coq}}, VOLUME = {15}, YEAR = {1993} } @ARTICLE{Par92, AUTHOR = {M. Parigot}, JOURNAL = {Theoretical Computer Science}, NUMBER = {2}, PAGES = {335--356}, TITLE = {{Recursive Programming with Proofs}}, VOLUME = {94}, YEAR = {1992} } @INPROCEEDINGS{Parent95b, AUTHOR = {C. Parent}, BOOKTITLE = {{Mathematics of Program Construction'95}}, PUBLISHER = SV, SERIES = {LNCS}, TITLE = {{Synthesizing proofs from programs in the Calculus of Inductive Constructions}}, VOLUME = {947}, YEAR = {1995} } @INPROCEEDINGS{Prasad93, AUTHOR = {K.V. Prasad}, BOOKTITLE = {{Proceedings of CONCUR'93}}, PUBLISHER = SV, SERIES = {LNCS}, TITLE = {{Programming with broadcasts}}, VOLUME = {715}, YEAR = {1993} } @BOOK{RC95, author = "di~Cosmo, R.", title = "Isomorphisms of Types: from $\lambda$-calculus to information retrieval and language design", series = "Progress in Theoretical Computer Science", publisher = "Birkhauser", year = "1995", note = "ISBN-0-8176-3763-X" } @TECHREPORT{Rou92, AUTHOR = {J. Rouyer}, INSTITUTION = {INRIA}, MONTH = nov, NUMBER = {1795}, TITLE = {{D{\'e}veloppement de l'Algorithme d'Unification dans le Calcul des Constructions}}, YEAR = {1992} } @TECHREPORT{Saibi94, AUTHOR = {A. Sa\"{\i}bi}, INSTITUTION = {INRIA}, MONTH = dec, NUMBER = {2345}, TITLE = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, YEAR = {1994} } @MASTERSTHESIS{Ter92, AUTHOR = {D. Terrasse}, MONTH = sep, SCHOOL = {IARFA}, TITLE = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, YEAR = {1992} } @TECHREPORT{ThBeKa92, AUTHOR = {L. Th\'ery and Y. Bertot and G. Kahn}, INSTITUTION = {INRIA Sophia}, MONTH = may, NUMBER = {1684}, TITLE = {Real theorem provers deserve real user-interfaces}, TYPE = {Research Report}, YEAR = {1992} } @BOOK{TrDa89, AUTHOR = {A.S. Troelstra and D. van Dalen}, PUBLISHER = {North-Holland}, SERIES = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, TITLE = {Constructivism in Mathematics, an introduction}, YEAR = {1988} } @PHDTHESIS{Wer94, AUTHOR = {B. Werner}, SCHOOL = {Universit\'e Paris 7}, TITLE = {Une th\'eorie des constructions inductives}, TYPE = {Th\`ese de Doctorat}, YEAR = {1994} } @PHDTHESIS{Bar99, AUTHOR = {B. Barras}, SCHOOL = {Universit\'e Paris 7}, TITLE = {Auto-validation d'un systme de preuves avec familles inductives}, TYPE = {Th\`ese de Doctorat}, YEAR = {1999} } @UNPUBLISHED{ddr98, AUTHOR = {D. de Rauglaudre}, TITLE = {Camlp4 version 1.07.2}, YEAR = {1998}, NOTE = {In Camlp4 distribution} } @ARTICLE{dowek93, AUTHOR = {G. Dowek}, TITLE = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, JOURNAL = {Journal Logic Computation}, VOLUME = {3}, NUMBER = {3}, PAGES = {287--315}, MONTH = {June}, YEAR = {1993} } @INPROCEEDINGS{manoury94, AUTHOR = {P. Manoury}, TITLE = {{A User's Friendly Syntax to Define Recursive Functions as Typed $\lambda-$Terms}}, BOOKTITLE = {{Types for Proofs and Programs, TYPES'94}}, SERIES = {LNCS}, VOLUME = {996}, MONTH = jun, YEAR = {1994} } @TECHREPORT{maranget94, AUTHOR = {L. Maranget}, INSTITUTION = {INRIA}, NUMBER = {2385}, TITLE = {{Two Techniques for Compiling Lazy Pattern Matching}}, YEAR = {1994} } @INPROCEEDINGS{puel-suarez90, AUTHOR = {L.Puel and A. Su\'arez}, BOOKTITLE = {{Conference Lisp and Functional Programming}}, SERIES = {ACM}, PUBLISHER = SV, TITLE = {{Compiling Pattern Matching by Term Decomposition}}, YEAR = {1990} } @MASTERSTHESIS{saidi94, AUTHOR = {H. Saidi}, MONTH = sep, SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, TITLE = {R\'esolution d'\'equations dans le syst\`eme T de G\"odel}, YEAR = {1994} } @misc{streicher93semantical, author = "T. Streicher", title = "Semantical Investigations into Intensional Type Theory", note = "Habilitationsschrift, LMU Munchen.", year = "1993" } @Misc{Pcoq, author = {Lemme Team}, title = {Pcoq a graphical user-interface for {Coq}}, note = {\url{http://www-sop.inria.fr/lemme/pcoq/}} } @Misc{ProofGeneral, author = {David Aspinall}, title = {Proof General}, note = {\url{http://proofgeneral.inf.ed.ac.uk/}} } @Book{CoqArt, author = {Yves bertot and Pierre Castran}, title = {Coq'Art}, publisher = {Springer-Verlag}, year = 2004, note = {To appear} } @INCOLLECTION{wadler87, AUTHOR = {P. Wadler}, TITLE = {Efficient Compilation of Pattern Matching}, BOOKTITLE = {The Implementation of Functional Programming Languages}, EDITOR = {S.L. Peyton Jones}, PUBLISHER = {Prentice-Hall}, YEAR = {1987} } @COMMENT{cross-references, must be at end} @BOOK{Bastad92, EDITOR = {B. Nordstr\"om and K. Petersson and G. Plotkin}, PUBLISHER = {Available by ftp at site ftp.inria.fr}, TITLE = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, YEAR = {1992} } @BOOK{Nijmegen93, EDITOR = {H. Barendregt and T. Nipkow}, PUBLISHER = SV, SERIES = LNCS, TITLE = {Types for Proofs and Programs}, VOLUME = {806}, YEAR = {1994} } @PHDTHESIS{Luo90, AUTHOR = {Z. Luo}, TITLE = {An Extended Calculus of Constructions}, SCHOOL = {University of Edinburgh}, YEAR = {1990} } coq-8.4pl2/doc/faq/interval_discr.v0000640000175000001440000002404711776416511016362 0ustar notinusers(** Sketch of the proof of {p:nat|p<=n} = {p:nat|p<=m} -> n=m - preliminary results on the irrelevance of boundedness proofs - introduce the notion of finite cardinal |A| - prove that |{p:nat|p<=n}| = n - prove that |A| = n /\ |A| = m -> n = m if equality is decidable on A - prove that equality is decidable on A - conclude *) (** * Preliminary results on [nat] and [le] *) (** Proving axiom K on [nat] *) Require Import Eqdep_dec. Require Import Arith. Theorem eq_rect_eq_nat : forall (p:nat) (Q:nat->Type) (x:Q p) (h:p=p), x = eq_rect p Q x p h. Proof. intros. apply K_dec_set with (p := h). apply eq_nat_dec. reflexivity. Qed. (** Proving unicity of proofs of [(n<=m)%nat] *) Scheme le_ind' := Induction for le Sort Prop. Theorem le_uniqueness_proof : forall (n m : nat) (p q : n <= m), p = q. Proof. induction p using le_ind'; intro q. replace (le_n n) with (eq_rect _ (fun n0 => n <= n0) (le_n n) _ eq_refl). 2:reflexivity. generalize (eq_refl n). pattern n at 2 4 6 10, q; case q; [intro | intros m l e]. rewrite <- eq_rect_eq_nat; trivial. contradiction (le_Sn_n m); rewrite <- e; assumption. replace (le_S n m p) with (eq_rect _ (fun n0 => n <= n0) (le_S n m p) _ eq_refl). 2:reflexivity. generalize (eq_refl (S m)). pattern (S m) at 1 3 4 6, q; case q; [intro Heq | intros m0 l HeqS]. contradiction (le_Sn_n m); rewrite Heq; assumption. injection HeqS; intro Heq; generalize l HeqS. rewrite <- Heq; intros; rewrite <- eq_rect_eq_nat. rewrite (IHp l0); reflexivity. Qed. (** Proving irrelevance of boundedness proofs while building elements of interval *) Lemma dep_pair_intro : forall (n x y:nat) (Hx : x<=n) (Hy : y<=n), x=y -> exist (fun x => x <= n) x Hx = exist (fun x => x <= n) y Hy. Proof. intros n x y Hx Hy Heq. generalize Hy. rewrite <- Heq. intros. rewrite (le_uniqueness_proof x n Hx Hy0). reflexivity. Qed. (** * Proving that {p:nat|p<=n} = {p:nat|p<=m} -> n=m *) (** Definition of having finite cardinality [n+1] for a set [A] *) Definition card (A:Set) n := exists f, (forall x:A, f x <= n) /\ (forall x y:A, f x = f y -> x = y) /\ (forall m, m <= n -> exists x:A, f x = m). Require Import Arith. (** Showing that the interval [0;n] has cardinality [n+1] *) Theorem card_interval : forall n, card {x:nat|x<=n} n. Proof. intro n. exists (fun x:{x:nat|x<=n} => proj1_sig x). split. (* bounded *) intro x; apply (proj2_sig x). split. (* injectivity *) intros (p,Hp) (q,Hq). simpl. intro Hpq. apply dep_pair_intro; assumption. (* surjectivity *) intros m Hmn. exists (exist (fun x : nat => x <= n) m Hmn). reflexivity. Qed. (** Showing that equality on the interval [0;n] is decidable *) Lemma interval_dec : forall n (x y : {m:nat|m<=n}), {x=y}+{x<>y}. Proof. intros n (p,Hp). induction p; intros ([|q],Hq). left. apply dep_pair_intro. reflexivity. right. intro H; discriminate H. right. intro H; discriminate H. assert (Hp' : p <= n). apply le_Sn_le; assumption. assert (Hq' : q <= n). apply le_Sn_le; assumption. destruct (IHp Hp' (exist (fun m => m <= n) q Hq')) as [Heq|Hneq]. left. injection Heq; intro Heq'. apply dep_pair_intro. apply eq_S. assumption. right. intro HeqS. injection HeqS; intro Heq. apply Hneq. apply dep_pair_intro. assumption. Qed. (** Showing that the cardinality relation is functional on decidable sets *) Lemma card_inj_aux : forall (A:Type) f g n, (forall x:A, f x <= 0) -> (forall x y:A, f x = f y -> x = y) -> (forall m, m <= S n -> exists x:A, g x = m) -> False. Proof. intros A f g n Hfbound Hfinj Hgsurj. destruct (Hgsurj (S n) (le_n _)) as (x,Hx). destruct (Hgsurj n (le_S _ _ (le_n _))) as (x',Hx'). assert (Hfx : 0 = f x). apply le_n_O_eq. apply Hfbound. assert (Hfx' : 0 = f x'). apply le_n_O_eq. apply Hfbound. assert (x=x'). apply Hfinj. rewrite <- Hfx. rewrite <- Hfx'. reflexivity. rewrite H in Hx. rewrite Hx' in Hx. apply (n_Sn _ Hx). Qed. (** For [dec_restrict], we use a lemma on the negation of equality that requires proof-irrelevance. It should be possible to avoid this lemma by generalizing over a first-order definition of [x<>y], say [neq] such that [{x=y}+{neq x y}] and [~(x=y /\ neq x y)]; for such [neq], unicity of proofs could be proven *) Require Import Classical. Lemma neq_dep_intro : forall (A:Set) (z x y:A) (p:x<>z) (q:y<>z), x=y -> exist (fun x => x <> z) x p = exist (fun x => x <> z) y q. Proof. intros A z x y p q Heq. generalize q; clear q; rewrite <- Heq; intro q. rewrite (proof_irrelevance _ p q); reflexivity. Qed. Lemma dec_restrict : forall (A:Set), (forall x y :A, {x=y}+{x<>y}) -> forall z (x y :{a:A|a<>z}), {x=y}+{x<>y}. Proof. intros A Hdec z (x,Hx) (y,Hy). destruct (Hdec x y) as [Heq|Hneq]. left; apply neq_dep_intro; assumption. right; intro Heq; injection Heq; exact Hneq. Qed. Lemma pred_inj : forall n m, 0 <> n -> 0 <> m -> pred m = pred n -> m = n. Proof. destruct n. intros m H; destruct H; reflexivity. destruct m. intros _ H; destruct H; reflexivity. simpl; intros _ _ H. rewrite H. reflexivity. Qed. Lemma le_neq_lt : forall n m, n <= m -> n<>m -> n < m. Proof. intros n m Hle Hneq. destruct (le_lt_eq_dec n m Hle). assumption. contradiction. Qed. Lemma inj_restrict : forall (A:Set) (f:A->nat) x y z, (forall x y : A, f x = f y -> x = y) -> x <> z -> f y < f z -> f z <= f x -> pred (f x) = f y -> False. (* Search error sans le type de f !! *) Proof. intros A f x y z Hfinj Hneqx Hfy Hfx Heq. assert (f z <> f x). apply not_eq_sym. intro Heqf. apply Hneqx. apply Hfinj. assumption. assert (f x = S (f y)). assert (0 < f x). apply le_lt_trans with (f z). apply le_O_n. apply le_neq_lt; assumption. apply pred_inj. apply O_S. apply lt_O_neq; assumption. exact Heq. assert (f z <= f y). destruct (le_lt_or_eq _ _ Hfx). apply lt_n_Sm_le. rewrite <- H0. assumption. contradiction Hneqx. symmetry. apply Hfinj. assumption. contradiction (lt_not_le (f y) (f z)). Qed. Theorem card_inj : forall m n (A:Set), (forall x y :A, {x=y}+{x<>y}) -> card A m -> card A n -> m = n. Proof. induction m; destruct n; intros A Hdec (f,(Hfbound,(Hfinj,Hfsurj))) (g,(Hgbound,(Hginj,Hgsurj))). (* 0/0 *) reflexivity. (* 0/Sm *) destruct (card_inj_aux _ _ _ _ Hfbound Hfinj Hgsurj). (* Sn/0 *) destruct (card_inj_aux _ _ _ _ Hgbound Hginj Hfsurj). (* Sn/Sm *) destruct (Hgsurj (S n) (le_n _)) as (xSn,HSnx). rewrite IHm with (n:=n) (A := {x:A|x<>xSn}). reflexivity. (* decidability of eq on {x:A|x<>xSm} *) apply dec_restrict. assumption. (* cardinality of {x:A|x<>xSn} is m *) pose (f' := fun x' : {x:A|x<>xSn} => let (x,Hneq) := x' in if le_lt_dec (f xSn) (f x) then pred (f x) else f x). exists f'. split. (* f' is bounded *) unfold f'. intros (x,_). destruct (le_lt_dec (f xSn) (f x)) as [Hle|Hge]. change m with (pred (S m)). apply le_pred. apply Hfbound. apply le_S_n. apply le_trans with (f xSn). exact Hge. apply Hfbound. split. (* f' is injective *) unfold f'. intros (x,Hneqx) (y,Hneqy) Heqf'. destruct (le_lt_dec (f xSn) (f x)) as [Hlefx|Hgefx]; destruct (le_lt_dec (f xSn) (f y)) as [Hlefy|Hgefy]. (* f xSn <= f x et f xSn <= f y *) assert (Heq : x = y). apply Hfinj. assert (f xSn <> f y). apply not_eq_sym. intro Heqf. apply Hneqy. apply Hfinj. assumption. assert (0 < f y). apply le_lt_trans with (f xSn). apply le_O_n. apply le_neq_lt; assumption. assert (f xSn <> f x). apply not_eq_sym. intro Heqf. apply Hneqx. apply Hfinj. assumption. assert (0 < f x). apply le_lt_trans with (f xSn). apply le_O_n. apply le_neq_lt; assumption. apply pred_inj. apply lt_O_neq; assumption. apply lt_O_neq; assumption. assumption. apply neq_dep_intro; assumption. (* f y < f xSn <= f x *) destruct (inj_restrict A f x y xSn); assumption. (* f x < f xSn <= f y *) symmetry in Heqf'. destruct (inj_restrict A f y x xSn); assumption. (* f x < f xSn et f y < f xSn *) assert (Heq : x=y). apply Hfinj; assumption. apply neq_dep_intro; assumption. (* f' is surjective *) intros p Hlep. destruct (le_lt_dec (f xSn) p) as [Hle|Hlt]. (* case f xSn <= p *) destruct (Hfsurj (S p) (le_n_S _ _ Hlep)) as (x,Hx). assert (Hneq : x <> xSn). intro Heqx. rewrite Heqx in Hx. rewrite Hx in Hle. apply le_Sn_n with p; assumption. exists (exist (fun a => a<>xSn) x Hneq). unfold f'. destruct (le_lt_dec (f xSn) (f x)) as [Hle'|Hlt']. rewrite Hx; reflexivity. rewrite Hx in Hlt'. contradiction (le_not_lt (f xSn) p). apply lt_trans with (S p). apply lt_n_Sn. assumption. (* case p < f xSn *) destruct (Hfsurj p (le_S _ _ Hlep)) as (x,Hx). assert (Hneq : x <> xSn). intro Heqx. rewrite Heqx in Hx. rewrite Hx in Hlt. apply (lt_irrefl p). assumption. exists (exist (fun a => a<>xSn) x Hneq). unfold f'. destruct (le_lt_dec (f xSn) (f x)) as [Hle'|Hlt']. rewrite Hx in Hle'. contradiction (lt_irrefl p). apply lt_le_trans with (f xSn); assumption. assumption. (* cardinality of {x:A|x<>xSn} is n *) pose (g' := fun x' : {x:A|x<>xSn} => let (x,Hneq) := x' in if Hdec x xSn then 0 else g x). exists g'. split. (* g is bounded *) unfold g'. intros (x,_). destruct (Hdec x xSn) as [_|Hneq]. apply le_O_n. assert (Hle_gx:=Hgbound x). destruct (le_lt_or_eq _ _ Hle_gx). apply lt_n_Sm_le. assumption. contradiction Hneq. apply Hginj. rewrite HSnx. assumption. split. (* g is injective *) unfold g'. intros (x,Hneqx) (y,Hneqy) Heqg'. destruct (Hdec x xSn) as [Heqx|_]. contradiction Hneqx. destruct (Hdec y xSn) as [Heqy|_]. contradiction Hneqy. assert (Heq : x=y). apply Hginj; assumption. apply neq_dep_intro; assumption. (* g is surjective *) intros p Hlep. destruct (Hgsurj p (le_S _ _ Hlep)) as (x,Hx). assert (Hneq : x<>xSn). intro Heq. rewrite Heq in Hx. rewrite Hx in HSnx. rewrite HSnx in Hlep. contradiction (le_Sn_n _ Hlep). exists (exist (fun a => a<>xSn) x Hneq). simpl. destruct (Hdec x xSn) as [Heqx|_]. contradiction Hneq. assumption. Qed. (** Conclusion *) Theorem interval_discr : forall n m, {p:nat|p<=n} = {p:nat|p<=m} -> n=m. Proof. intros n m Heq. apply card_inj with (A := {p:nat|p<=n}). apply interval_dec. apply card_interval. rewrite Heq. apply card_interval. Qed. coq-8.4pl2/doc/common/0000750000175000001440000000000012127276532013672 5ustar notinuserscoq-8.4pl2/doc/common/title.tex0000750000175000001440000000272111756406457015552 0ustar notinusers%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % File title.tex % Page formatting commands % Macro \coverpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %\setlength{\marginparwidth}{0pt} %\setlength{\oddsidemargin}{0pt} %\setlength{\evensidemargin}{0pt} %\setlength{\marginparsep}{0pt} %\setlength{\topmargin}{0pt} %\setlength{\textwidth}{16.9cm} %\setlength{\textheight}{22cm} %\usepackage{fullpage} %\newcommand{\printingdate}{\today} %\newcommand{\isdraft}{\Large\bf\today\\[20pt]} %\newcommand{\isdraft}{\vspace{20pt}} \newcommand{\coverpage}[3]{ \thispagestyle{empty} \begin{center} \bfseries % for the rest of this page, until \end{center} \Huge The Coq Proof Assistant\\[12pt] #1\\[20pt] \Large\today\\[20pt] Version \coqversion\footnote[1]{This research was partly supported by IST working group ``Types''} \vspace{0pt plus .5fill} #2 \par\vfill TypiCal Project (formerly LogiCal) \vspace*{15pt} \end{center} \newpage \thispagestyle{empty} \hbox{}\vfill % without \hbox \vfill does not work at the top of the page \begin{flushleft} %BEGIN LATEX V\coqversion, \today \par\vspace{20pt} %END LATEX \copyright INRIA 1999-2004 ({\Coq} versions 7.x) \copyright INRIA 2004-2012 ({\Coq} versions 8.x) #3 \end{flushleft} } % end of \coverpage definition % \newcommand{\shorttitle}[1]{ % \begin{center} % \begin{huge} % \begin{bf} % The Coq Proof Assistant\\ % \vspace{10pt} % #1\\ % \end{bf} % \end{huge} % \end{center} % \vspace{5pt} % } % Local Variables: % mode: LaTeX % TeX-master: "" % End: coq-8.4pl2/doc/common/macros.tex0000750000175000001440000004566412010532550015704 0ustar notinusers%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % MACROS FOR THE REFERENCE MANUAL OF COQ % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % For commentaries (define \com as {} for the release manual) %\newcommand{\com}[1]{{\it(* #1 *)}} %\newcommand{\com}[1]{} %%OPTIONS for HACHA %\renewcommand{\cuttingunit}{section} %BEGIN LATEX \newenvironment{centerframe}% {\bgroup \dimen0=\textwidth \advance\dimen0 by -2\fboxrule \advance\dimen0 by -2\fboxsep \setbox0=\hbox\bgroup \begin{minipage}{\dimen0}% \begin{center}}% {\end{center}% \end{minipage}\egroup \centerline{\fbox{\box0}}\egroup } %END LATEX %HEVEA \newenvironment{centerframe}{\begin{center}}{\end{center}} %HEVEA \renewcommand{\vec}[1]{\mathbf{#1}} %\renewcommand{\ominus}{-} % Hevea does a good job translating these commands %\renewcommand{\oplus}{+} %\renewcommand{\otimes}{\times} %\newcommand{\land}{\wedge} %\newcommand{\lor}{\vee} %HEVEA \renewcommand{\k}[1]{#1} % \k{a} is supposed to produce a with a little stroke %HEVEA \newcommand{\phantom}[1]{\qquad} %%%%%%%%%%%%%%%%%%%%%%% % Formatting commands % %%%%%%%%%%%%%%%%%%%%%%% \newcommand{\ErrMsg}{\medskip \noindent {\bf Error message: }} \newcommand{\ErrMsgx}{\medskip \noindent {\bf Error messages: }} \newcommand{\variant}{\medskip \noindent {\bf Variant: }} \newcommand{\variants}{\medskip \noindent {\bf Variants: }} \newcommand{\SeeAlso}{\medskip \noindent {\bf See also: }} \newcommand{\Rem}{\medskip \noindent {\bf Remark: }} \newcommand{\Rems}{\medskip \noindent {\bf Remarks: }} \newcommand{\Example}{\medskip \noindent {\bf Example: }} \newcommand{\examples}{\medskip \noindent {\bf Examples: }} \newcommand{\Warning}{\medskip \noindent {\bf Warning: }} \newcommand{\Warns}{\medskip \noindent {\bf Warnings: }} \newcounter{ex} \newcommand{\firstexample}{\setcounter{ex}{1}} \newcommand{\example}[1]{ \medskip \noindent \textbf{Example \arabic{ex}: }\textit{#1} \addtocounter{ex}{1}} \newenvironment{Variant}{\variant\begin{enumerate}}{\end{enumerate}} \newenvironment{Variants}{\variants\begin{enumerate}}{\end{enumerate}} \newenvironment{ErrMsgs}{\ErrMsgx\begin{enumerate}}{\end{enumerate}} \newenvironment{Remarks}{\Rems\begin{enumerate}}{\end{enumerate}} \newenvironment{Warnings}{\Warns\begin{enumerate}}{\end{enumerate}} \newenvironment{Examples}{\medskip\noindent{\bf Examples:} \begin{enumerate}}{\end{enumerate}} %\newcommand{\bd}{\noindent\bf} %\newcommand{\sbd}{\vspace{8pt}\noindent\bf} %\newcommand{\sdoll}[1]{\begin{small}$ #1~ $\end{small}} %\newcommand{\sdollnb}[1]{\begin{small}$ #1 $\end{small}} \newcommand{\kw}[1]{\textsf{#1}} %\newcommand{\spec}[1]{\{\,#1\,\}} % Building regular expressions \newcommand{\zeroone}[1]{\mbox{\sl [}#1\mbox{\sl ]}} %\newcommand{\zeroonemany}[1]{$\{$#1$\}$*} %\newcommand{\onemany}[1]{$\{$#1$\}$+} \newcommand{\nelist}[2]{{#1} {\tt #2}~{\ldots}~{\tt #2} {#1}} \newcommand{\sequence}[2]{{\sl [}{#1} {\tt #2}~{\ldots}~{\tt #2} {#1}{\sl ]}} \newcommand{\nelistwithoutblank}[2]{#1{\tt #2}\ldots{\tt #2}#1} \newcommand{\sequencewithoutblank}[2]{$[$#1{\tt #2}\ldots{\tt #2}#1$]$} % Used for RefMan-gal %\newcommand{\ml}[1]{\hbox{\tt{#1}}} %\newcommand{\op}{\,|\,} %%%%%%%%%%%%%%%%%%%%%%%% % Trademarks and so on % %%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\Coq}{\textsc{Coq}} \newcommand{\gallina}{\textsc{Gallina}} \newcommand{\Gallina}{\textsc{Gallina}} \newcommand{\CoqIDE}{\textsc{CoqIDE}} \newcommand{\ocaml}{\textsc{Objective Caml}} \newcommand{\camlpppp}{\textsc{Camlp4}} \newcommand{\emacs}{\textsc{GNU Emacs}} \newcommand{\CIC}{\pCIC} \newcommand{\pCIC}{p\textsc{Cic}} \newcommand{\iCIC}{\textsc{Cic}} \newcommand{\FW}{\ensuremath{F_{\omega}}} \newcommand{\Program}{\textsc{Program}} \newcommand{\Russell}{\textsc{Russell}} \newcommand{\PVS}{\textsc{PVS}} %\newcommand{\bn}{{\sf BNF}} %%%%%%%%%%%%%%%%%%% % Name of tactics % %%%%%%%%%%%%%%%%%%% %\newcommand{\Natural}{\mbox{\tt Natural}} %%%%%%%%%%%%%%%%% % \rm\sl series % %%%%%%%%%%%%%%%%% \newcommand{\nterm}[1]{\textrm{\textsl{#1}}} \newcommand{\qstring}{\nterm{string}} %% New syntax specific entries \newcommand{\annotation}{\nterm{annotation}} \newcommand{\assums}{\nterm{assums}} % vernac \newcommand{\simpleassums}{\nterm{simple\_assums}} % assumptions \newcommand{\binder}{\nterm{binder}} \newcommand{\binders}{\nterm{binders}} \newcommand{\caseitems}{\nterm{match\_items}} \newcommand{\caseitem}{\nterm{match\_item}} \newcommand{\eqn}{\nterm{equation}} \newcommand{\ifitem}{\nterm{dep\_ret\_type}} \newcommand{\convclause}{\nterm{conversion\_clause}} \newcommand{\occclause}{\nterm{occurrence\_clause}} \newcommand{\occgoalset}{\nterm{goal\_occurrences}} \newcommand{\atoccurrences}{\nterm{at\_occurrences}} \newcommand{\occlist}{\nterm{occurrences}} \newcommand{\params}{\nterm{params}} % vernac \newcommand{\returntype}{\nterm{return\_type}} \newcommand{\idparams}{\nterm{ident\_with\_params}} \newcommand{\statkwd}{\nterm{assertion\_keyword}} % vernac \newcommand{\termarg}{\nterm{arg}} \newcommand{\typecstr}{\zeroone{{\tt :}~{\term}}} \newcommand{\typecstrwithoutblank}{\zeroone{{\tt :}{\term}}} \newcommand{\Fwterm}{\nterm{Fwterm}} \newcommand{\Index}{\nterm{index}} \newcommand{\abbrev}{\nterm{abbreviation}} \newcommand{\atomictac}{\nterm{atomic\_tactic}} \newcommand{\bindinglist}{\nterm{bindings\_list}} \newcommand{\cast}{\nterm{cast}} \newcommand{\cofixpointbodies}{\nterm{cofix\_bodies}} \newcommand{\cofixpointbody}{\nterm{cofix\_body}} \newcommand{\commandtac}{\nterm{tactic\_invocation}} \newcommand{\constructor}{\nterm{constructor}} \newcommand{\convtactic}{\nterm{conv\_tactic}} \newcommand{\assumptionkeyword}{\nterm{assumption\_keyword}} \newcommand{\assumption}{\nterm{assumption}} \newcommand{\definition}{\nterm{definition}} \newcommand{\digit}{\nterm{digit}} \newcommand{\exteqn}{\nterm{ext\_eqn}} \newcommand{\field}{\nterm{field}} \newcommand{\firstletter}{\nterm{first\_letter}} \newcommand{\fixpg}{\nterm{fix\_pgm}} \newcommand{\fixpointbodies}{\nterm{fix\_bodies}} \newcommand{\fixpointbody}{\nterm{fix\_body}} \newcommand{\fixpoint}{\nterm{fixpoint}} \newcommand{\flag}{\nterm{flag}} \newcommand{\form}{\nterm{form}} \newcommand{\entry}{\nterm{entry}} \newcommand{\proditem}{\nterm{production\_item}} \newcommand{\taclevel}{\nterm{tactic\_level}} \newcommand{\tacargtype}{\nterm{tactic\_argument\_type}} \newcommand{\scope}{\nterm{scope}} \newcommand{\delimkey}{\nterm{key}} \newcommand{\optscope}{\nterm{opt\_scope}} \newcommand{\declnotation}{\nterm{decl\_notation}} \newcommand{\symbolentry}{\nterm{symbol}} \newcommand{\modifiers}{\nterm{modifiers}} \newcommand{\localdef}{\nterm{local\_def}} \newcommand{\localdecls}{\nterm{local\_decls}} \newcommand{\ident}{\nterm{ident}} \newcommand{\accessident}{\nterm{access\_ident}} \newcommand{\possiblybracketedident}{\nterm{possibly\_bracketed\_ident}} \newcommand{\inductivebody}{\nterm{ind\_body}} \newcommand{\inductive}{\nterm{inductive}} \newcommand{\naturalnumber}{\nterm{natural}} \newcommand{\integer}{\nterm{integer}} \newcommand{\multpattern}{\nterm{mult\_pattern}} \newcommand{\mutualcoinductive}{\nterm{mutual\_coinductive}} \newcommand{\mutualinductive}{\nterm{mutual\_inductive}} \newcommand{\nestedpattern}{\nterm{nested\_pattern}} \newcommand{\name}{\nterm{name}} \newcommand{\num}{\nterm{num}} \newcommand{\pattern}{\nterm{pattern}} % pattern for pattern-matching \newcommand{\orpattern}{\nterm{or\_pattern}} \newcommand{\intropattern}{\nterm{intro\_pattern}} \newcommand{\disjconjintropattern}{\nterm{disj\_conj\_intro\_pattern}} \newcommand{\namingintropattern}{\nterm{naming\_intro\_pattern}} \newcommand{\termpattern}{\nterm{term\_pattern}} % term with holes \newcommand{\pat}{\nterm{pat}} \newcommand{\pgs}{\nterm{pgms}} \newcommand{\pg}{\nterm{pgm}} %BEGIN LATEX \newcommand{\proof}{\nterm{proof}} %END LATEX %HEVEA \renewcommand{\proof}{\nterm{proof}} \newcommand{\record}{\nterm{record}} \newcommand{\recordkw}{\nterm{record\_keyword}} \newcommand{\rewrule}{\nterm{rewriting\_rule}} \newcommand{\sentence}{\nterm{sentence}} \newcommand{\simplepattern}{\nterm{simple\_pattern}} \newcommand{\sort}{\nterm{sort}} \newcommand{\specif}{\nterm{specif}} \newcommand{\assertion}{\nterm{assertion}} \newcommand{\str}{\nterm{string}} \newcommand{\subsequentletter}{\nterm{subsequent\_letter}} \newcommand{\switch}{\nterm{switch}} \newcommand{\messagetoken}{\nterm{message\_token}} \newcommand{\tac}{\nterm{tactic}} \newcommand{\terms}{\nterm{terms}} \newcommand{\term}{\nterm{term}} \newcommand{\module}{\nterm{module}} \newcommand{\modexpr}{\nterm{module\_expression}} \newcommand{\modtype}{\nterm{module\_type}} \newcommand{\onemodbinding}{\nterm{module\_binding}} \newcommand{\modbindings}{\nterm{module\_bindings}} \newcommand{\qualid}{\nterm{qualid}} \newcommand{\qualidorstring}{\nterm{qualid\_or\_string}} \newcommand{\class}{\nterm{class}} \newcommand{\dirpath}{\nterm{dirpath}} \newcommand{\typedidents}{\nterm{typed\_idents}} \newcommand{\type}{\nterm{type}} \newcommand{\vref}{\nterm{ref}} \newcommand{\zarithformula}{\nterm{zarith\_formula}} \newcommand{\zarith}{\nterm{zarith}} \newcommand{\ltac}{\mbox{${\cal L}_{tac}$}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \mbox{\sf } series for roman text in maths formulas % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\alors}{\mbox{\textsf{then}}} \newcommand{\alter}{\mbox{\textsf{alter}}} \newcommand{\bool}{\mbox{\textsf{bool}}} \newcommand{\conc}{\mbox{\textsf{conc}}} \newcommand{\cons}{\mbox{\textsf{cons}}} \newcommand{\consf}{\mbox{\textsf{consf}}} \newcommand{\emptyf}{\mbox{\textsf{emptyf}}} \newcommand{\EqSt}{\mbox{\textsf{EqSt}}} \newcommand{\false}{\mbox{\textsf{false}}} \newcommand{\filter}{\mbox{\textsf{filter}}} \newcommand{\forest}{\mbox{\textsf{forest}}} \newcommand{\from}{\mbox{\textsf{from}}} \newcommand{\hd}{\mbox{\textsf{hd}}} \newcommand{\Length}{\mbox{\textsf{Length}}} \newcommand{\length}{\mbox{\textsf{length}}} \newcommand{\LengthA}{\mbox {\textsf{Length\_A}}} \newcommand{\List}{\mbox{\textsf{List}}} \newcommand{\ListA}{\mbox{\textsf{List\_A}}} \newcommand{\LNil}{\mbox{\textsf{Lnil}}} \newcommand{\LCons}{\mbox{\textsf{Lcons}}} \newcommand{\nat}{\mbox{\textsf{nat}}} \newcommand{\nO}{\mbox{\textsf{O}}} \newcommand{\nS}{\mbox{\textsf{S}}} \newcommand{\node}{\mbox{\textsf{node}}} \newcommand{\Nil}{\mbox{\textsf{nil}}} \newcommand{\Prop}{\mbox{\textsf{Prop}}} \newcommand{\Set}{\mbox{\textsf{Set}}} \newcommand{\si}{\mbox{\textsf{if}}} \newcommand{\sinon}{\mbox{\textsf{else}}} \newcommand{\Str}{\mbox{\textsf{Stream}}} \newcommand{\tl}{\mbox{\textsf{tl}}} \newcommand{\tree}{\mbox{\textsf{tree}}} \newcommand{\true}{\mbox{\textsf{true}}} \newcommand{\Type}{\mbox{\textsf{Type}}} \newcommand{\unfold}{\mbox{\textsf{unfold}}} \newcommand{\zeros}{\mbox{\textsf{zeros}}} %%%%%%%%% % Misc. % %%%%%%%%% \newcommand{\T}{\texttt{T}} \newcommand{\U}{\texttt{U}} \newcommand{\real}{\textsf{Real}} \newcommand{\Data}{\textit{Data}} \newcommand{\In} {{\textbf{in }}} \newcommand{\AND} {{\textbf{and}}} \newcommand{\If}{{\textbf{if }}} \newcommand{\Else}{{\textbf{else }}} \newcommand{\Then} {{\textbf{then }}} %\newcommand{\Let}{{\textbf{let }}} % looks like this is never used \newcommand{\Where}{{\textbf{where rec }}} \newcommand{\Function}{{\textbf{function }}} \newcommand{\Rec}{{\textbf{rec }}} %\newcommand{\cn}{\centering} \newcommand{\nth}{\mbox{$^{\mbox{\scriptsize th}}$}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Math commands and symbols % %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\la}{\leftarrow} \newcommand{\ra}{\rightarrow} \newcommand{\Ra}{\Rightarrow} \newcommand{\rt}{\Rightarrow} \newcommand{\lla}{\longleftarrow} \newcommand{\lra}{\longrightarrow} \newcommand{\Llra}{\Longleftrightarrow} \newcommand{\mt}{\mapsto} \newcommand{\ov}{\overrightarrow} \newcommand{\wh}{\widehat} \newcommand{\up}{\uparrow} \newcommand{\dw}{\downarrow} \newcommand{\nr}{\nearrow} \newcommand{\se}{\searrow} \newcommand{\sw}{\swarrow} \newcommand{\nw}{\nwarrow} \newcommand{\mto}{.\;} \newcommand{\vm}[1]{\vspace{#1em}} \newcommand{\vx}[1]{\vspace{#1ex}} \newcommand{\hm}[1]{\hspace{#1em}} \newcommand{\hx}[1]{\hspace{#1ex}} \newcommand{\sm}{\mbox{ }} \newcommand{\mx}{\mbox} %\newcommand{\nq}{\neq} %\newcommand{\eq}{\equiv} \newcommand{\fa}{\forall} %\newcommand{\ex}{\exists} \newcommand{\impl}{\rightarrow} %\newcommand{\Or}{\vee} %\newcommand{\And}{\wedge} \newcommand{\ms}{\models} \newcommand{\bw}{\bigwedge} \newcommand{\ts}{\times} \newcommand{\cc}{\circ} %\newcommand{\es}{\emptyset} %\newcommand{\bs}{\backslash} \newcommand{\vd}{\vdash} %\newcommand{\lan}{{\langle }} %\newcommand{\ran}{{\rangle }} %\newcommand{\al}{\alpha} \newcommand{\bt}{\beta} %\newcommand{\io}{\iota} \newcommand{\lb}{\lambda} %\newcommand{\sg}{\sigma} %\newcommand{\sa}{\Sigma} %\newcommand{\om}{\Omega} %\newcommand{\tu}{\tau} %%%%%%%%%%%%%%%%%%%%%%%%% % Custom maths commands % %%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\sumbool}[2]{\{#1\}+\{#2\}} \newcommand{\myifthenelse}[3]{\kw{if} ~ #1 ~\kw{then} ~ #2 ~ \kw{else} ~ #3} \newcommand{\fun}[2]{\item[]{\tt {#1}}. \quad\\ #2} \newcommand{\WF}[2]{\ensuremath{{\cal W\!F}(#1)[#2]}} \newcommand{\WFE}[1]{\WF{E}{#1}} \newcommand{\WT}[4]{\ensuremath{#1[#2] \vdash #3 : #4}} \newcommand{\WTE}[3]{\WT{E}{#1}{#2}{#3}} \newcommand{\WTEG}[2]{\WTE{\Gamma}{#1}{#2}} \newcommand{\WTM}[3]{\WT{#1}{}{#2}{#3}} \newcommand{\WFT}[2]{\ensuremath{#1[] \vdash {\cal W\!F}(#2)}} \newcommand{\WS}[3]{\ensuremath{#1[] \vdash #2 <: #3}} \newcommand{\WSE}[2]{\WS{E}{#1}{#2}} \newcommand{\WEV}[3]{\mbox{$#1[] \vdash #2 \lra #3$}} \newcommand{\WEVT}[3]{\mbox{$#1[] \vdash #2 \lra$}\\ \mbox{$ #3$}} \newcommand{\WTRED}[5]{\mbox{$#1[#2] \vdash #3 #4 #5$}} \newcommand{\WTERED}[4]{\mbox{$E[#1] \vdash #2 #3 #4$}} \newcommand{\WTELECONV}[3]{\WTERED{#1}{#2}{\leconvert}{#3}} \newcommand{\WTEGRED}[3]{\WTERED{\Gamma}{#1}{#2}{#3}} \newcommand{\WTECONV}[3]{\WTERED{#1}{#2}{\convert}{#3}} \newcommand{\WTEGCONV}[2]{\WTERED{\Gamma}{#1}{\convert}{#2}} \newcommand{\WTEGLECONV}[2]{\WTERED{\Gamma}{#1}{\leconvert}{#2}} \newcommand{\lab}[1]{\mathit{labels}(#1)} \newcommand{\dom}[1]{\mathit{dom}(#1)} \newcommand{\CI}[2]{\mbox{$\{#1\}^{#2}$}} \newcommand{\CIP}[3]{\mbox{$\{#1\}_{#2}^{#3}$}} \newcommand{\CIPV}[1]{\CIP{#1}{I_1.. I_k}{P_1.. P_k}} \newcommand{\CIPI}[1]{\CIP{#1}{I}{P}} \newcommand{\CIF}[1]{\mbox{$\{#1\}_{f_1.. f_n}$}} %BEGIN LATEX \newcommand{\NInd}[3]{\mbox{{\sf Ind}$(#1)(\begin{array}[t]{@{}l}#2:=#3 \,)\end{array}$}} \newcommand{\Ind}[4]{\mbox{{\sf Ind}$(#1)[#2](\begin{array}[t]{@{}l@{}}#3:=#4 \,)\end{array}$}} %END LATEX %HEVEA \newcommand{\NInd}[3]{\mbox{{\sf Ind}$(#1)(#2:=#3\,)$}} %HEVEA \newcommand{\Ind}[4]{\mbox{{\sf Ind}$(#1)[#2](#3:=#4\,)$}} \newcommand{\Indp}[5]{\mbox{{\sf Ind}$_{#5}(#1)[#2](\begin{array}[t]{@{}l}#3:=#4 \,)\end{array}$}} \newcommand{\Indpstr}[6]{\mbox{{\sf Ind}$_{#5}(#1)[#2](\begin{array}[t]{@{}l}#3:=#4 \,)/{#6}\end{array}$}} \newcommand{\Def}[4]{\mbox{{\sf Def}$(#1)(#2:=#3:#4)$}} \newcommand{\Assum}[3]{\mbox{{\sf Assum}$(#1)(#2:#3)$}} \newcommand{\Match}[3]{\mbox{$<\!#1\!>\!{\mbox{\tt Match}}~#2~{\mbox{\tt with}}~#3~{\mbox{\tt end}}$}} \newcommand{\Case}[3]{\mbox{$\kw{case}(#2,#1,#3)$}} \newcommand{\match}[3]{\mbox{$\kw{match}~ #2 ~\kw{with}~ #3 ~\kw{end}$}} \newcommand{\Fix}[2]{\mbox{\tt Fix}~#1\{#2\}} \newcommand{\CoFix}[2]{\mbox{\tt CoFix}~#1\{#2\}} \newcommand{\With}[2]{\mbox{\tt ~with~}} \newcommand{\subst}[3]{#1\{#2/#3\}} \newcommand{\substs}[4]{#1\{(#2/#3)_{#4}\}} \newcommand{\Sort}{\mbox{$\cal S$}} \newcommand{\convert}{=_{\beta\delta\iota\zeta\eta}} \newcommand{\leconvert}{\leq_{\beta\delta\iota\zeta\eta}} \newcommand{\NN}{\mathbb{N}} \newcommand{\inference}[1]{$${#1}$$} \newcommand{\compat}[2]{\mbox{$[#1|#2]$}} \newcommand{\tristackrel}[3]{\mathrel{\mathop{#2}\limits_{#3}^{#1}}} \newcommand{\Impl}{{\it Impl}} \newcommand{\elem}{{\it e}} \newcommand{\Mod}[3]{{\sf Mod}({#1}:{#2}\,\zeroone{:={#3}})} \newcommand{\ModS}[2]{{\sf Mod}({#1}:{#2})} \newcommand{\ModType}[2]{{\sf ModType}({#1}:={#2})} \newcommand{\ModA}[2]{{\sf ModA}({#1}=={#2})} \newcommand{\functor}[3]{\ensuremath{{\sf Functor}(#1:#2)\;#3}} \newcommand{\funsig}[3]{\ensuremath{{\sf Funsig}(#1:#2)\;#3}} \newcommand{\sig}[1]{\ensuremath{{\sf Sig}~#1~{\sf End}}} \newcommand{\struct}[1]{\ensuremath{{\sf Struct}~#1~{\sf End}}} \newcommand{\structe}[1]{\ensuremath{ {\sf Struct}~\elem_1;\ldots;\elem_i;#1;\elem_{i+2};\ldots ;\elem_n~{\sf End}}} \newcommand{\structes}[2]{\ensuremath{ {\sf Struct}~\elem_1;\ldots;\elem_i;#1;\elem_{i+2}\{#2\} ;\ldots;\elem_n\{#2\}~{\sf End}}} \newcommand{\with}[3]{\ensuremath{#1~{\sf with}~#2 := #3}} \newcommand{\Spec}{{\it Spec}} \newcommand{\ModSEq}[3]{{\sf Mod}({#1}:{#2}:={#3})} %\newbox\tempa %\newbox\tempb %\newdimen\tempc %\newcommand{\mud}[1]{\hfil $\displaystyle{\mathstrut #1}$\hfil} %\newcommand{\rig}[1]{\hfil $\displaystyle{#1}$} % \newcommand{\irulehelp}[3]{\setbox\tempa=\hbox{$\displaystyle{\mathstrut #2}$}% % \setbox\tempb=\vbox{\halign{##\cr % \mud{#1}\cr % \noalign{\vskip\the\lineskip} % \noalign{\hrule height 0pt} % \rig{\vbox to 0pt{\vss\hbox to 0pt{${\; #3}$\hss}\vss}}\cr % \noalign{\hrule} % \noalign{\vskip\the\lineskip} % \mud{\copy\tempa}\cr}} % \tempc=\wd\tempb % \advance\tempc by \wd\tempa % \divide\tempc by 2 } % \newcommand{\irule}[3]{{\irulehelp{#1}{#2}{#3} % \hbox to \wd\tempa{\hss \box\tempb \hss}}} \newcommand{\sverb}[1]{{\tt #1}} \newcommand{\mover}[2]{{#1\over #2}} \newcommand{\jd}[2]{#1 \vdash #2} \newcommand{\mathline}[1]{\[#1\]} \newcommand{\zrule}[2]{#2: #1} \newcommand{\orule}[3]{#3: {\mover{#1}{#2}}} \newcommand{\trule}[4]{#4: \mover{#1 \qquad #2} {#3}} \newcommand{\thrule}[5]{#5: {\mover{#1 \qquad #2 \qquad #3}{#4}}} % placement of figures %BEGIN LATEX \renewcommand{\topfraction}{.99} \renewcommand{\bottomfraction}{.99} \renewcommand{\textfraction}{.01} \renewcommand{\floatpagefraction}{.9} %END LATEX % Macros Bruno pour description de la syntaxe \def\bfbar{\ensuremath{|\hskip -0.22em{}|\hskip -0.24em{}|}} \def\TERMbar{\bfbar} \def\TERMbarbar{\bfbar\bfbar} %% Macros pour les grammaires \def\GR#1{\text{\large(}#1\text{\large)}} \def\NT#1{\langle\textit{#1}\rangle} \def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} \def\TERM#1{{\bf\textrm{\bf #1}}} %\def\TERM#1{{\bf\textsf{#1}}} \def\KWD#1{\TERM{#1}} \def\ETERM#1{\TERM{#1}} \def\CHAR#1{\TERM{#1}} \def\STAR#1{#1*} \def\STARGR#1{\GR{#1}*} \def\PLUS#1{#1+} \def\PLUSGR#1{\GR{#1}+} \def\OPT#1{#1?} \def\OPTGR#1{\GR{#1}?} %% Tableaux de definition de non-terminaux \newenvironment{cadre} {\begin{array}{|c|}\hline\\} {\\\\\hline\end{array}} \newenvironment{rulebox} {$$\begin{cadre}\begin{array}{r@{~}c@{~}l@{}l@{}r}} {\end{array}\end{cadre}$$} \def\DEFNT#1{\NT{#1} & ::= &} \def\EXTNT#1{\NT{#1} & ::= & ... \\&|&} \def\RNAME#1{(\textsc{#1})} \def\SEPDEF{\\\\} \def\nlsep{\\&|&} \def\nlcont{\\&&} \newenvironment{rules} {\begin{center}\begin{rulebox}} {\end{rulebox}\end{center}} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/common/styles/0000750000175000001440000000000012127276532015215 5ustar notinuserscoq-8.4pl2/doc/common/styles/html/0000750000175000001440000000000012127276532016161 5ustar notinuserscoq-8.4pl2/doc/common/styles/html/simple/0000750000175000001440000000000012127276532017452 5ustar notinuserscoq-8.4pl2/doc/common/styles/html/simple/hevea.css0000640000175000001440000000344611410351032021243 0ustar notinusers .li-itemize{margin:1ex 0ex;} .li-enumerate{margin:1ex 0ex;} .dd-description{margin:0ex 0ex 1ex 4ex;} .dt-description{margin:0ex;} .toc{list-style:none;} .thefootnotes{text-align:left;margin:0ex;} .dt-thefootnotes{margin:0em;} .dd-thefootnotes{margin:0em 0em 0em 2em;} .footnoterule{margin:1em auto 1em 0px;width:50%;} .caption{padding-left:2ex; padding-right:2ex; margin-left:auto; margin-right:auto} .title{margin:2ex auto;text-align:center} .center{text-align:center;margin-left:auto;margin-right:auto;} .flushleft{text-align:left;margin-left:0ex;margin-right:auto;} .flushright{text-align:right;margin-left:auto;margin-right:0ex;} DIV TABLE{margin-left:inherit;margin-right:inherit;} PRE{text-align:left;margin-left:0ex;margin-right:auto;} BLOCKQUOTE{margin-left:4ex;margin-right:4ex;text-align:left;} TD P{margin:0px;} .boxed{border:1px solid black} .textboxed{border:1px solid black} .vbar{border:none;width:2px;background-color:black;} .hbar{border:none;height:2px;width:100%;background-color:black;} .hfill{border:none;height:1px;width:200%;background-color:black;} .vdisplay{border-collapse:separate;border-spacing:2px;width:auto; empty-cells:show; border:2px solid red;} .vdcell{white-space:nowrap;padding:0px;width:auto; border:2px solid green;} .display{border-collapse:separate;border-spacing:2px;width:auto; border:none;} .dcell{white-space:nowrap;padding:0px;width:auto; border:none;} .dcenter{margin:0ex auto;} .vdcenter{border:solid #FF8000 2px; margin:0ex auto;} .minipage{text-align:left; margin-left:0em; margin-right:auto;} .marginpar{border:solid thin black; width:20%; text-align:left;} .marginparleft{float:left; margin-left:0ex; margin-right:1ex;} .marginparright{float:right; margin-left:1ex; margin-right:0ex;} .theorem{text-align:left;margin:1ex auto 1ex 0ex;} .part{margin:2ex auto;text-align:center} coq-8.4pl2/doc/common/styles/html/simple/styles.hva0000640000175000001440000000162511667717030021503 0ustar notinusers\renewcommand{\@meta}{ \begin{rawhtml} \end{rawhtml}} % for HeVeA \htmlhead{\begin{rawhtml}
    \end{rawhtml}} coq-8.4pl2/doc/common/styles/html/simple/cover.html0000640000175000001440000000436511756406457021477 0ustar notinusers Reference Manual | The Coq Proof Assistant




    Reference Manual

    Version 8.41







    The Coq Development Team







    • V7.x INRIA 1999-2004
    • V8.0 INRIA 2004-2008
    • V8.1 INRIA 2006-2011
    • V8.2 INRIA 2008-2011
    • V8.3 INRIA 2010-2011
    • V8.4 INRIA 2012

    This material may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub). Options A and B are not elected.



    1
    This research was partly supported by IST working group ``Types''
    coq-8.4pl2/doc/common/styles/html/simple/footer.html0000640000175000001440000000002011704064021021613 0ustar notinusers coq-8.4pl2/doc/common/styles/html/simple/style.css0000640000175000001440000000030511144561417021320 0ustar notinusers#footer { border-top: solid black 1pt; text-align: center; text-indent: 0pt; } .menu { } .menu li { display: inline; margin: 0pt; padding: .5ex 1em; list-style: none }coq-8.4pl2/doc/common/styles/html/simple/header.html0000640000175000001440000000060311704064021021554 0ustar notinusers The Coq Standard Library coq-8.4pl2/doc/common/styles/html/coqremote/0000750000175000001440000000000012127276532020157 5ustar notinuserscoq-8.4pl2/doc/common/styles/html/coqremote/hevea.css0000640000175000001440000000344611410351032021750 0ustar notinusers .li-itemize{margin:1ex 0ex;} .li-enumerate{margin:1ex 0ex;} .dd-description{margin:0ex 0ex 1ex 4ex;} .dt-description{margin:0ex;} .toc{list-style:none;} .thefootnotes{text-align:left;margin:0ex;} .dt-thefootnotes{margin:0em;} .dd-thefootnotes{margin:0em 0em 0em 2em;} .footnoterule{margin:1em auto 1em 0px;width:50%;} .caption{padding-left:2ex; padding-right:2ex; margin-left:auto; margin-right:auto} .title{margin:2ex auto;text-align:center} .center{text-align:center;margin-left:auto;margin-right:auto;} .flushleft{text-align:left;margin-left:0ex;margin-right:auto;} .flushright{text-align:right;margin-left:auto;margin-right:0ex;} DIV TABLE{margin-left:inherit;margin-right:inherit;} PRE{text-align:left;margin-left:0ex;margin-right:auto;} BLOCKQUOTE{margin-left:4ex;margin-right:4ex;text-align:left;} TD P{margin:0px;} .boxed{border:1px solid black} .textboxed{border:1px solid black} .vbar{border:none;width:2px;background-color:black;} .hbar{border:none;height:2px;width:100%;background-color:black;} .hfill{border:none;height:1px;width:200%;background-color:black;} .vdisplay{border-collapse:separate;border-spacing:2px;width:auto; empty-cells:show; border:2px solid red;} .vdcell{white-space:nowrap;padding:0px;width:auto; border:2px solid green;} .display{border-collapse:separate;border-spacing:2px;width:auto; border:none;} .dcell{white-space:nowrap;padding:0px;width:auto; border:none;} .dcenter{margin:0ex auto;} .vdcenter{border:solid #FF8000 2px; margin:0ex auto;} .minipage{text-align:left; margin-left:0em; margin-right:auto;} .marginpar{border:solid thin black; width:20%; text-align:left;} .marginparleft{float:left; margin-left:0ex; margin-right:1ex;} .marginparright{float:right; margin-left:1ex; margin-right:0ex;} .theorem{text-align:left;margin:1ex auto 1ex 0ex;} .part{margin:2ex auto;text-align:center} coq-8.4pl2/doc/common/styles/html/coqremote/styles.hva0000640000175000001440000000546012062325704022202 0ustar notinusers\renewcommand{\@meta}{ \begin{rawhtml} \end{rawhtml}} % for HeVeA \htmlhead{\begin{rawhtml} \end{rawhtml}} coq-8.4pl2/doc/common/styles/html/coqremote/cover.html0000640000175000001440000000752011756406457022200 0ustar notinusers Cover Page Reference Manual | The Coq Proof Assistant




    Reference Manual

    Version trunk1





    The Coq Development Team





    • V7.x INRIA 1999-2004
    • V8.0 INRIA 2004-2008
    • V8.1 INRIA 2006-2011
    • V8.2 INRIA 2008-2011
    • V8.3 INRIA 2010-2011
    • V8.4 INRIA 2012

    This material may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at http://www.opencontent.org/openpub). Options A and B are not elected.



    1
    This research was partly supported by IST working group ``Types''

    coq-8.4pl2/doc/common/styles/html/coqremote/footer.html0000640000175000001440000000163312062325704022341 0ustar notinusers
    coq-8.4pl2/doc/common/styles/html/coqremote/header.html0000640000175000001440000000352212062325704022272 0ustar notinusers Standard Library | The Coq Proof Assistant
    coq-8.4pl2/doc/refman/0000750000175000001440000000000012127276533013653 5ustar notinuserscoq-8.4pl2/doc/refman/RefMan-ind.tex0000640000175000001440000004430311366307247016323 0ustar notinusers %\documentstyle[11pt]{article} %\input{title} %\include{macros} %\makeindex %\begin{document} %\coverpage{The module {\tt Equality}}{Cristina CORNES} %\tableofcontents \chapter[Tactics for inductive types and families]{Tactics for inductive types and families\label{Addoc-equality}} This chapter details a few special tactics useful for inferring facts from inductive hypotheses. They can be considered as tools that macro-generate complicated uses of the basic elimination tactics for inductive types. Sections \ref{inversion_introduction} to \ref{inversion_using} present inversion tactics and Section~\ref{scheme} describes a command {\tt Scheme} for automatic generation of induction schemes for mutual inductive types. %\end{document} %\documentstyle[11pt]{article} %\input{title} %\begin{document} %\coverpage{Module Inv: Inversion Tactics}{Cristina CORNES} \section[Generalities about inversion]{Generalities about inversion\label{inversion_introduction}} When working with (co)inductive predicates, we are very often faced to some of these situations: \begin{itemize} \item we have an inconsistent instance of an inductive predicate in the local context of hypotheses. Thus, the current goal can be trivially proved by absurdity. \item we have a hypothesis that is an instance of an inductive predicate, and the instance has some variables whose constraints we would like to derive. \end{itemize} The inversion tactics are very useful to simplify the work in these cases. Inversion tools can be classified in three groups: \begin{enumerate} \item tactics for inverting an instance without stocking the inversion lemma in the context: (\texttt{Dependent}) \texttt{Inversion} and (\texttt{Dependent}) \texttt{Inversion\_clear}. \item commands for generating and stocking in the context the inversion lemma corresponding to an instance: \texttt{Derive} (\texttt{Dependent}) \texttt{Inversion}, \texttt{Derive} (\texttt{Dependent}) \texttt{Inversion\_clear}. \item tactics for inverting an instance using an already defined inversion lemma: \texttt{Inversion \ldots using}. \end{enumerate} These tactics work for inductive types of arity $(\vec{x}:\vec{T})s$ where $s \in \{Prop,Set,Type\}$. Sections \ref{inversion_primitive}, \ref{inversion_derivation} and \ref{inversion_using} describe respectively each group of tools. As inversion proofs may be large in size, we recommend the user to stock the lemmas whenever the same instance needs to be inverted several times.\\ Let's consider the relation \texttt{Le} over natural numbers and the following variables: \begin{coq_eval} Restore State "Initial". \end{coq_eval} \begin{coq_example*} Inductive Le : nat -> nat -> Set := | LeO : forall n:nat, Le 0%N n | LeS : forall n m:nat, Le n m -> Le (S n) (S m). Variable P : nat -> nat -> Prop. Variable Q : forall n m:nat, Le n m -> Prop. \end{coq_example*} For example purposes we defined \verb+Le: nat->nat->Set+ but we may have defined it \texttt{Le} of type \verb+nat->nat->Prop+ or \verb+nat->nat->Type+. \section[Inverting an instance]{Inverting an instance\label{inversion_primitive}} \subsection{The non dependent case} \begin{itemize} \item \texttt{Inversion\_clear} \ident~\\ \index{Inversion-clear@{\tt Inversion\_clear}} Let the type of \ident~ in the local context be $(I~\vec{t})$, where $I$ is a (co)inductive predicate. Then, \texttt{Inversion} applied to \ident~ derives for each possible constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary conditions that should hold for the instance $(I~\vec{t})$ to be proved by $c_i$. Finally it erases \ident~ from the context. For example, consider the goal: \begin{coq_eval} Lemma ex : forall n m:nat, Le (S n) m -> P n m. intros. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} To prove the goal we may need to reason by cases on \texttt{H} and to derive that \texttt{m} is necessarily of the form $(S~m_0)$ for certain $m_0$ and that $(Le~n~m_0)$. Deriving these conditions corresponds to prove that the only possible constructor of \texttt{(Le (S n) m)} is \texttt{LeS} and that we can invert the \texttt{->} in the type of \texttt{LeS}. This inversion is possible because \texttt{Le} is the smallest set closed by the constructors \texttt{LeO} and \texttt{LeS}. \begin{coq_example} inversion_clear H. \end{coq_example} Note that \texttt{m} has been substituted in the goal for \texttt{(S m0)} and that the hypothesis \texttt{(Le n m0)} has been added to the context. \item \texttt{Inversion} \ident~\\ \index{Inversion@{\tt Inversion}} This tactic differs from {\tt Inversion\_clear} in the fact that it adds the equality constraints in the context and it does not erase the hypothesis \ident. In the previous example, {\tt Inversion\_clear} has substituted \texttt{m} by \texttt{(S m0)}. Sometimes it is interesting to have the equality \texttt{m=(S m0)} in the context to use it after. In that case we can use \texttt{Inversion} that does not clear the equalities: \begin{coq_example*} Undo. \end{coq_example*} \begin{coq_example} inversion H. \end{coq_example} \begin{coq_eval} Undo. \end{coq_eval} Note that the hypothesis \texttt{(S m0)=m} has been deduced and \texttt{H} has not been cleared from the context. \end{itemize} \begin{Variants} \item \texttt{Inversion\_clear } \ident~ \texttt{in} \ident$_1$ \ldots \ident$_n$\\ \index{Inversion_clear...in@{\tt Inversion\_clear...in}} Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing {\tt Inversion\_clear}. \item \texttt{Inversion } \ident~ \texttt{in} \ident$_1$ \ldots \ident$_n$\\ \index{Inversion ... in@{\tt Inversion ... in}} Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing \texttt{Inversion}. \item \texttt{Simple Inversion} \ident~ \\ \index{Simple Inversion@{\tt Simple Inversion}} It is a very primitive inversion tactic that derives all the necessary equalities but it does not simplify the constraints as \texttt{Inversion} and {\tt Inversion\_clear} do. \end{Variants} \subsection{The dependent case} \begin{itemize} \item \texttt{Dependent Inversion\_clear} \ident~\\ \index{Dependent Inversion-clear@{\tt Dependent Inversion\_clear}} Let the type of \ident~ in the local context be $(I~\vec{t})$, where $I$ is a (co)inductive predicate, and let the goal depend both on $\vec{t}$ and \ident. Then, \texttt{Dependent Inversion\_clear} applied to \ident~ derives for each possible constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary conditions that should hold for the instance $(I~\vec{t})$ to be proved by $c_i$. It also substitutes \ident~ for the corresponding term in the goal and it erases \ident~ from the context. For example, consider the goal: \begin{coq_eval} Lemma ex_dep : forall (n m:nat) (H:Le (S n) m), Q (S n) m H. intros. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} As \texttt{H} occurs in the goal, we may want to reason by cases on its structure and so, we would like inversion tactics to substitute \texttt{H} by the corresponding term in constructor form. Neither \texttt{Inversion} nor {\tt Inversion\_clear} make such a substitution. To have such a behavior we use the dependent inversion tactics: \begin{coq_example} dependent inversion_clear H. \end{coq_example} Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and \texttt{m} by \texttt{(S m0)}. \end{itemize} \begin{Variants} \item \texttt{Dependent Inversion\_clear } \ident~ \texttt{ with } \term\\ \index{Dependent Inversion_clear...with@{\tt Dependent Inversion\_clear...with}} \noindent Behaves as \texttt{Dependent Inversion\_clear} but allows to give explicitly the good generalization of the goal. It is useful when the system fails to generalize the goal automatically. If \ident~ has type $(I~\vec{t})$ and $I$ has type $(\vec{x}:\vec{T})s$, then \term~ must be of type $I:(\vec{x}:\vec{T})(I~\vec{x})\rightarrow s'$ where $s'$ is the type of the goal. \item \texttt{Dependent Inversion} \ident~\\ \index{Dependent Inversion@{\tt Dependent Inversion}} This tactic differs from \texttt{Dependent Inversion\_clear} in the fact that it also adds the equality constraints in the context and it does not erase the hypothesis \ident~. \item \texttt{Dependent Inversion } \ident~ \texttt{ with } \term \\ \index{Dependent Inversion...with@{\tt Dependent Inversion...with}} Analogous to \texttt{Dependent Inversion\_clear .. with..} above. \end{Variants} \section[Deriving the inversion lemmas]{Deriving the inversion lemmas\label{inversion_derivation}} \subsection{The non dependent case} The tactics (\texttt{Dependent}) \texttt{Inversion} and (\texttt{Dependent}) {\tt Inversion\_clear} work on a certain instance $(I~\vec{t})$ of an inductive predicate. At each application, they inspect the given instance and derive the corresponding inversion lemma. If we have to invert the same instance several times it is recommended to stock the lemma in the context and to reuse it whenever we need it. The families of commands \texttt{Derive Inversion}, \texttt{Derive Dependent Inversion}, \texttt{Derive} \\ {\tt Inversion\_clear} and \texttt{Derive Dependent Inversion\_clear} allow to generate inversion lemmas for given instances and sorts. Next section describes the tactic \texttt{Inversion}$\ldots$\texttt{using} that refines the goal with a specified inversion lemma. \begin{itemize} \item \texttt{Derive Inversion\_clear} \ident~ \texttt{with} $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ \index{Derive Inversion_clear...with@{\tt Derive Inversion\_clear...with}} Let $I$ be an inductive predicate and $\vec{x}$ the variables occurring in $\vec{t}$. This command generates and stocks the inversion lemma for the sort \sort~ corresponding to the instance $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf global} environment. When applied it is equivalent to have inverted the instance with the tactic {\tt Inversion\_clear}. For example, to generate the inversion lemma for the instance \texttt{(Le (S n) m)} and the sort \texttt{Prop} we do: \begin{coq_example} Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort Prop. \end{coq_example} Let us inspect the type of the generated lemma: \begin{coq_example} Check leminv. \end{coq_example} \end{itemize} %\variants %\begin{enumerate} %\item \verb+Derive Inversion_clear+ \ident$_1$ \ident$_2$ \\ %\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} % Let \ident$_1$ have type $(I~\vec{t})$ in the local context ($I$ % an inductive predicate). Then, this command has the same semantics % as \verb+Derive Inversion_clear+ \ident$_2$~ \verb+with+ % $(\vec{x}:\vec{T})(I~\vec{t})$ \verb+Sort Prop+ where $\vec{x}$ are the free % variables of $(I~\vec{t})$ declared in the local context (variables % of the global context are considered as constants). %\item \verb+Derive Inversion+ \ident$_1$~ \ident$_2$~\\ %\index{Derive Inversion@{\tt Derive Inversion}} % Analogous to the previous command. %\item \verb+Derive Inversion+ $num$ \ident~ \ident~ \\ %\index{Derive Inversion@{\tt Derive Inversion}} % This command behaves as \verb+Derive Inversion+ \ident~ {\it % namehyp} performed on the goal number $num$. % %\item \verb+Derive Inversion_clear+ $num$ \ident~ \ident~ \\ %\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} % This command behaves as \verb+Derive Inversion_clear+ \ident~ % \ident~ performed on the goal number $num$. %\end{enumerate} A derived inversion lemma is adequate for inverting the instance with which it was generated, \texttt{Derive} applied to different instances yields different lemmas. In general, if we generate the inversion lemma with an instance $(\vec{x}:\vec{T})(I~\vec{t})$ and a sort $s$, the inversion lemma will expect a predicate of type $(\vec{x}:\vec{T})s$ as first argument. \\ \begin{Variant} \item \texttt{Derive Inversion} \ident~ \texttt{with} $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort\\ \index{Derive Inversion...with@{\tt Derive Inversion...with}} Analogous of \texttt{Derive Inversion\_clear .. with ..} but when applied it is equivalent to having inverted the instance with the tactic \texttt{Inversion}. \end{Variant} \subsection{The dependent case} \begin{itemize} \item \texttt{Derive Dependent Inversion\_clear} \ident~ \texttt{with} $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ \index{Derive Dependent Inversion\_clear...with@{\tt Derive Dependent Inversion\_clear...with}} Let $I$ be an inductive predicate. This command generates and stocks the dependent inversion lemma for the sort \sort~ corresponding to the instance $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf global} environment. When applied it is equivalent to having inverted the instance with the tactic \texttt{Dependent Inversion\_clear}. \end{itemize} \begin{coq_example} Derive Dependent Inversion_clear leminv_dep with (forall n m:nat, Le (S n) m) Sort Prop. \end{coq_example} \begin{coq_example} Check leminv_dep. \end{coq_example} \begin{Variants} \item \texttt{Derive Dependent Inversion} \ident~ \texttt{with} $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ \index{Derive Dependent Inversion...with@{\tt Derive Dependent Inversion...with}} Analogous to \texttt{Derive Dependent Inversion\_clear}, but when applied it is equivalent to having inverted the instance with the tactic \texttt{Dependent Inversion}. \end{Variants} \section[Using already defined inversion lemmas]{Using already defined inversion lemmas\label{inversion_using}} \begin{itemize} \item \texttt{Inversion} \ident \texttt{ using} \ident$'$ \\ \index{Inversion...using@{\tt Inversion...using}} Let \ident~ have type $(I~\vec{t})$ ($I$ an inductive predicate) in the local context, and \ident$'$ be a (dependent) inversion lemma. Then, this tactic refines the current goal with the specified lemma. \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} \begin{coq_example} inversion H using leminv. \end{coq_example} \end{itemize} \variant \begin{enumerate} \item \texttt{Inversion} \ident~ \texttt{using} \ident$'$ \texttt{in} \ident$_1$\ldots \ident$_n$\\ \index{Inversion...using...in@{\tt Inversion...using...in}} This tactic behaves as generalizing \ident$_1$\ldots \ident$_n$, then doing \texttt{Use Inversion} \ident~\ident$'$. \end{enumerate} \section[\tt Scheme ...]{\tt Scheme ...\index{Scheme@{\tt Scheme}}\label{Scheme} \label{scheme}} The {\tt Scheme} command is a high-level tool for generating automatically (possibly mutual) induction principles for given types and sorts. Its syntax follows the schema : \noindent {\tt Scheme {\ident$_1$} := Induction for \term$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} .. \\ with {\ident$_m$} := Induction for {\term$_m$} Sort {\sort$_m$}}\\ \term$_1$ \ldots \term$_m$ are different inductive types belonging to the same package of mutual inductive definitions. This command generates {\ident$_1$}\ldots{\ident$_m$} to be mutually recursive definitions. Each term {\ident$_i$} proves a general principle of mutual induction for objects in type {\term$_i$}. \Example The definition of principle of mutual induction for {\tt tree} and {\tt forest} over the sort {\tt Set} is defined by the command: \begin{coq_eval} Restore State "Initial". Variables A B : Set. Inductive tree : Set := node : A -> forest -> tree with forest : Set := | leaf : B -> forest | cons : tree -> forest -> forest. \end{coq_eval} \begin{coq_example*} Scheme tree_forest_rec := Induction for tree Sort Set with forest_tree_rec := Induction for forest Sort Set. \end{coq_example*} You may now look at the type of {\tt tree\_forest\_rec} : \begin{coq_example} Check tree_forest_rec. \end{coq_example} This principle involves two different predicates for {\tt trees} and {\tt forests}; it also has three premises each one corresponding to a constructor of one of the inductive definitions. The principle {\tt tree\_forest\_rec} shares exactly the same premises, only the conclusion now refers to the property of forests. \begin{coq_example} Check forest_tree_rec. \end{coq_example} \begin{Variant} \item {\tt Scheme {\ident$_1$} := Minimality for \term$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} .. \\ with {\ident$_m$} := Minimality for {\term$_m$} Sort {\sort$_m$}}\\ Same as before but defines a non-dependent elimination principle more natural in case of inductively defined relations. \end{Variant} \Example With the predicates {\tt odd} and {\tt even} inductively defined as: % \begin{coq_eval} % Restore State "Initial". % \end{coq_eval} \begin{coq_example*} Inductive odd : nat -> Prop := oddS : forall n:nat, even n -> odd (S n) with even : nat -> Prop := | evenO : even 0%N | evenS : forall n:nat, odd n -> even (S n). \end{coq_example*} The following command generates a powerful elimination principle: \begin{coq_example*} Scheme odd_even := Minimality for odd Sort Prop with even_odd := Minimality for even Sort Prop. \end{coq_example*} The type of {\tt odd\_even} for instance will be: \begin{coq_example} Check odd_even. \end{coq_example} The type of {\tt even\_odd} shares the same premises but the conclusion is {\tt (n:nat)(even n)->(Q n)}. \subsection[\tt Combined Scheme ...]{\tt Combined Scheme ...\index{CombinedScheme@{\tt Combined Scheme}}\label{CombinedScheme} \label{combinedscheme}} The {\tt Combined Scheme} command is a tool for combining induction principles generated by the {\tt Scheme} command. Its syntax follows the schema : \noindent {\tt Combined Scheme {\ident$_0$} from {\ident$_1$}, .., {\ident$_n$}}\\ \ident$_1$ \ldots \ident$_n$ are different inductive principles that must belong to the same package of mutual inductive principle definitions. This command generates {\ident$_0$} to be the conjunction of the principles: it is build from the common premises of the principles and concluded by the conjunction of their conclusions. For exemple, we can combine the induction principles for trees and forests: \begin{coq_example*} Combined Scheme tree_forest_mutind from tree_ind, forest_ind. Check tree_forest_mutind. \end{coq_example*} %\end{document} coq-8.4pl2/doc/refman/RefMan-tus.tex0000640000175000001440000024161011274252012016347 0ustar notinusers%\documentclass[11pt]{article} %\usepackage{fullpage,euler} %\usepackage[latin1]{inputenc} %\begin{document} %\title{Writing ad-hoc Tactics in Coq} %\author{} %\date{} %\maketitle %\tableofcontents %\clearpage \chapter[Writing ad-hoc Tactics in Coq]{Writing ad-hoc Tactics in Coq\label{WritingTactics}} \section{Introduction} \Coq\ is an open proof environment, in the sense that the collection of proof strategies offered by the system can be extended by the user. This feature has two important advantages. First, the user can develop his/her own ad-hoc proof procedures, customizing the system for a particular domain of application. Second, the repetitive and tedious aspects of the proofs can be abstracted away implementing new tactics for dealing with them. For example, this may be useful when a theorem needs several lemmas which are all proven in a similar but not exactly the same way. Let us illustrate this with an example. Consider the problem of deciding the equality of two booleans. The theorem establishing that this is always possible is state by the following theorem: \begin{coq_example*} Theorem decideBool : (x,y:bool){x=y}+{~x=y}. \end{coq_example*} The proof proceeds by case analysis on both $x$ and $y$. This yields four cases to solve. The cases $x=y=\textsl{true}$ and $x=y=\textsl{false}$ are immediate by the reflexivity of equality. The other two cases follow by discrimination. The following script describes the proof: \begin{coq_example*} Destruct x. Destruct y. Left ; Reflexivity. Right; Discriminate. Destruct y. Right; Discriminate. Left ; Reflexivity. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} Now, consider the theorem stating the same property but for the following enumerated type: \begin{coq_example*} Inductive Set Color := Blue:Color | White:Color | Red:Color. Theorem decideColor : (c1,c2:Color){c1=c2}+{~c1=c2}. \end{coq_example*} This theorem can be proven in a very similar way, reasoning by case analysis on $c_1$ and $c_2$. Once more, each of the (now six) cases is solved either by reflexivity or by discrimination: \begin{coq_example*} Destruct c1. Destruct c2. Left ; Reflexivity. Right ; Discriminate. Right ; Discriminate. Destruct c2. Right ; Discriminate. Left ; Reflexivity. Right ; Discriminate. Destruct c2. Right ; Discriminate. Right ; Discriminate. Left ; Reflexivity. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} If we face the same theorem for an enumerated datatype corresponding to the days of the week, it would still follow a similar pattern. In general, the general pattern for proving the property $(x,y:R)\{x=y\}+\{\neg x =y\}$ for an enumerated type $R$ proceeds as follow: \begin{enumerate} \item Analyze the cases for $x$. \item For each of the sub-goals generated by the first step, analyze the cases for $y$. \item The remaining subgoals follow either by reflexivity or by discrimination. \end{enumerate} Let us describe how this general proof procedure can be introduced in \Coq. \section{Tactic Macros} The simplest way to introduce it is to define it as new a \textsl{tactic macro}, as follows: \begin{coq_example*} Tactic Definition DecideEq [$a $b] := [<:tactic:>]. \end{coq_example*} The general pattern of the proof is abstracted away using the tacticals ``\texttt{;}'' and \texttt{Orelse}, and introducing two parameters for the names of the arguments to be analyzed. Once defined, this tactic can be called like any other tactic, just supplying the list of terms corresponding to its real arguments. Let us revisit the proof of the former theorems using the new tactic \texttt{DecideEq}: \begin{coq_example*} Theorem decideBool : (x,y:bool){x=y}+{~x=y}. DecideEq x y. Defined. \end{coq_example*} \begin{coq_example*} Theorem decideColor : (c1,c2:Color){c1=c2}+{~c1=c2}. DecideEq c1 c2. Defined. \end{coq_example*} In general, the command \texttt{Tactic Definition} associates a name to a parameterized tactic expression, built up from the tactics and tacticals that are already available. The general syntax rule for this command is the following: \begin{tabbing} \texttt{Tactic Definition} \textit{tactic-name} \= \texttt{[}\$$id_1\ldots \$id_n$\texttt{]}\\ \> := \texttt{[<:tactic:<} \textit{tactic-expression} \verb+>>]+ \end{tabbing} This command provides a quick but also very primitive mechanism for introducing new tactics. It does not support recursive definitions, and the arguments of a tactic macro are restricted to term expressions. Moreover, there is no static checking of the definition other than the syntactical one. Any error in the definition of the tactic ---for instance, a call to an undefined tactic--- will not be noticed until the tactic is called. %This command provides a very primitive mechanism for introducing new %tactics. The arguments of a tactic macro are restricted to term %expressions. Hence, it is not possible to define higher order tactics %with this command. Also, there is no static checking of the definition %other than syntactical. If the tactic contain errors in its definition %--for instance, a call to an undefined tactic-- this will be noticed %during the tactic call. Let us illustrate the weakness of this way of introducing new tactics trying to extend our proof procedure to work on a larger class of inductive types. Consider for example the decidability of equality for pairs of booleans and colors: \begin{coq_example*} Theorem decideBoolXColor : (p1,p2:bool*Color){p1=p2}+{~p1=p2}. \end{coq_example*} The proof still proceeds by a double case analysis, but now the constructors of the type take two arguments. Therefore, the sub-goals that can not be solved by discrimination need further considerations about the equality of such arguments: \begin{coq_example} Destruct p1; Destruct p2; Try (Right;Discriminate);Intros. \end{coq_example} The half of the disjunction to be chosen depends on whether or not $b=b_0$ and $c=c_0$. These equalities can be decided automatically using the previous lemmas about booleans and colors. If both equalities are satisfied, then it is sufficient to rewrite $b$ into $b_0$ and $c$ into $c_0$, so that the left half of the goal follows by reflexivity. Otherwise, the right half follows by first contraposing the disequality, and then applying the invectiveness of the pairing constructor. As the cases associated to each argument of the pair are very similar, a tactic macro can be introduced to abstract this part of the proof: \begin{coq_example*} Hints Resolve decideBool decideColor. Tactic Definition SolveArg [$t1 $t2] := [<:tactic:< ElimType {$t1=$t2}+{~$t1=$t2}; [(Intro equality;Rewrite equality;Clear equality) | (Intro diseq; Right; Red; Intro absurd; Apply diseq;Injection absurd;Trivial) | Auto]>>]. \end{coq_example*} This tactic is applied to each corresponding pair of arguments of the arguments, until the goal can be solved by reflexivity: \begin{coq_example*} SolveArg b b0; SolveArg c c0; Left; Reflexivity. Defined. \end{coq_example*} Therefore, a more general strategy for deciding the property $(x,y:R)\{x=y\}+\{\neg x =y\}$ on $R$ can be sketched as follows: \begin{enumerate} \item Eliminate $x$ and then $y$. \item Try discrimination to solve those goals where $x$ and $y$ has been introduced by different constructors. \item If $x$ and $y$ have been introduced by the same constructor, then iterate the tactic \textsl{SolveArg} for each pair of arguments. \item Finally, solve the left half of the goal by reflexivity. \end{enumerate} The implementation of this stronger proof strategy needs to perform a term decomposition, in order to extract the list of arguments of each constructor. It also requires the introduction of recursively defined tactics, so that the \textsl{SolveArg} can be iterated on the lists of arguments. These features are not supported by the \texttt{Tactic Definition} command. One possibility could be extended this command in order to introduce recursion, general parameter passing, pattern-matching, etc, but this would quickly lead us to introduce the whole \ocaml{} into \Coq\footnote{This is historically true. In fact, \ocaml{} is a direct descendent of ML, a functional programming language conceived language for programming the tactics of the theorem prover LCF.}. Instead of doing this, we prefer to give to the user the possibility of writing his/her own tactics directly in \ocaml{}, and then to link them dynamically with \Coq's code. This requires a minimal knowledge about \Coq's implementation. The next section provides an overview of \Coq's architecture. %It is important to point out that the introduction of a new tactic %never endangers the correction of the theorems proven in the extended %system. In order to understand why, let us introduce briefly the system %architecture. \section{An Overview of \Coq's Architecture} The implementation of \Coq\ is based on eight \textsl{logical modules}. By ``module'' we mean here a logical piece of code having a conceptual unity, that may concern several \ocaml{} files. By the sake of organization, all the \ocaml{} files concerning a logical module are grouped altogether into the same sub-directory. The eight modules are: \begin{tabular}{lll} 1. & The logical framework & (directory \texttt{src/generic})\\ 2. & The language of constructions & (directory \texttt{src/constr})\\ 3. & The type-checker & (directory \texttt{src/typing})\\ 4. & The proof engine & (directory \texttt{src/proofs})\\ 5. & The language of basic tactics & (directory \texttt{src/tactics})\\ 6. & The vernacular interpreter & (directory \texttt{src/env})\\ 7. & The parser and the pretty-printer & (directory \texttt{src/parsing})\\ 8. & The standard library & (directory \texttt{src/lib}) \end{tabular} \vspace{1em} The following sections briefly present each of the modules above. This presentation is not intended to be a complete description of \Coq's implementation, but rather a guideline to be read before taking a look at the sources. For each of the modules, we also present some of its most important functions, which are sufficient to implement a large class of tactics. \subsection[The Logical Framework]{The Logical Framework\label{LogicalFramework}} At the very heart of \Coq there is a generic untyped language for expressing abstractions, applications and global constants. This language is used as a meta-language for expressing the terms of the Calculus of Inductive Constructions. General operations on terms like collecting the free variables of an expression, substituting a term for a free variable, etc, are expressed in this language. The meta-language \texttt{'op term} of terms has seven main constructors: \begin{itemize} \item $(\texttt{VAR}\;id)$, a reference to a global identifier called $id$; \item $(\texttt{Rel}\;n)$, a bound variable, whose binder is the $nth$ binder up in the term; \item $\texttt{DLAM}\;(x,t)$, a deBruijn's binder on the term $t$; \item $\texttt{DLAMV}\;(x,vt)$, a deBruijn's binder on all the terms of the vector $vt$; \item $(\texttt{DOP0}\;op)$, a unary operator $op$; \item $\texttt{DOP2}\;(op,t_1,t_2)$, the application of a binary operator $op$ to the terms $t_1$ and $t_2$; \item $\texttt{DOPN} (op,vt)$, the application of an n-ary operator $op$ to the vector of terms $vt$. \end{itemize} In this meta-language, bound variables are represented using the so-called deBrujin's indexes. In this representation, an occurrence of a bound variable is denoted by an integer, meaning the number of binders that must be traversed to reach its own binder\footnote{Actually, $(\texttt{Rel}\;n)$ means that $(n-1)$ binders have to be traversed, since indexes are represented by strictly positive integers.}. On the other hand, constants are referred by its name, as usual. For example, if $A$ is a variable of the current section, then the lambda abstraction $[x:A]x$ of the Calculus of Constructions is represented in the meta-language by the term: \begin{displaymath} (DOP2 (Lambda,(Var\;A),DLAM (x,(Rel\;1))) \end{displaymath} In this term, $Lambda$ is a binary operator. Its first argument correspond to the type $A$ of the bound variable, while the second is a body of the abstraction, where $x$ is bound. The name $x$ is just kept to pretty-print the occurrences of the bound variable. %Similarly, the product %$(A:Prop)A$ of the Calculus of Constructions is represented by the %term: %\begin{displaumath} %DOP2 (Prod, DOP0 (Sort (Prop Null)), DLAM (Name \#A, Rel 1)) %\end{displaymath} The following functions perform some of the most frequent operations on the terms of the meta-language: \begin{description} \fun{val Generic.subst1 : 'op term -> 'op term -> 'op term} {$(\texttt{subst1}\;t_1\;t_2)$ substitutes $t_1$ for $\texttt{(Rel}\;1)$ in $t_2$.} \fun{val Generic.occur\_var : identifier -> 'op term -> bool} {Returns true when the given identifier appears in the term, and false otherwise.} \fun{val Generic.eq\_term : 'op term -> 'op term -> bool} {Implements $\alpha$-equality for terms.} \fun{val Generic.dependent : 'op term -> 'op term -> bool} {Returns true if the first term is a sub-term of the second.} %\fun{val Generic.subst\_var : identifier -> 'op term -> 'op term} % { $(\texttt{subst\_var}\;id\;t)$ substitutes the deBruijn's index % associated to $id$ to every occurrence of the term % $(\texttt{VAR}\;id)$ in $t$.} \end{description} \subsubsection{Identifiers, names and sections paths.} Three different kinds of names are used in the meta-language. They are all defined in the \ocaml{} file \texttt{Names}. \paragraph{Identifiers.} The simplest kind of names are \textsl{identifiers}. An identifier is a string possibly indexed by an integer. They are used to represent names that are not unique, like for example the name of a variable in the scope of a section. The following operations can be used for handling identifiers: \begin{description} \fun{val Names.make\_ident : string -> int -> identifier} {The value $(\texttt{make\_ident}\;x\;i)$ creates the identifier $x_i$. If $i=-1$, then the identifier has is created with no index at all.} \fun{val Names.repr\_ident : identifier -> string * int} {The inverse operation of \texttt{make\_ident}: it yields the string and the index of the identifier.} \fun{val Names.lift\_ident : identifier -> identifier} {Increases the index of the identifier by one.} \fun{val Names.next\_ident\_away : \\ \qquad identifier -> identifier list -> identifier} {\\ Generates a new identifier with the same root string than the given one, but with a new index, different from all the indexes of a given list of identifiers.} \fun{val Names.id\_of\_string : string -> identifier} {Creates an identifier from a string.} \fun{val Names.string\_of\_id : identifier -> string} {The inverse operation: transforms an identifier into a string} \end{description} \paragraph{Names.} A \textsl{name} is either an identifier or the special name \texttt{Anonymous}. Names are used as arguments of binders, in order to pretty print bound variables. The following operations can be used for handling names: \begin{description} \fun{val Names.Name: identifier -> Name} {Constructs a name from an identifier.} \fun{val Names.Anonymous : Name} {Constructs a special, anonymous identifier, like the variable abstracted in the term $[\_:A]0$.} \fun{val Names.next\_name\_away\_with\_default : \\ \qquad string->name->identifier list->identifier} {\\ If the name is not anonymous, then this function generates a new identifier different from all the ones in a given list. Otherwise, it generates an identifier from the given string.} \end{description} \paragraph[Section paths.]{Section paths.\label{SectionPaths}} A \textsl{section-path} is a global name to refer to an object without ambiguity. It can be seen as a sort of filename, where open sections play the role of directories. Each section path is formed by three components: a \textsl{directory} (the list of open sections); a \textsl{basename} (the identifier for the object); and a \textsl{kind} (either CCI for the terms of the Calculus of Constructions, FW for the the terms of $F_\omega$, or OBJ for other objects). For example, the name of the following constant: \begin{verbatim} Section A. Section B. Section C. Definition zero := O. \end{verbatim} is internally represented by the section path: $$\underbrace{\mathtt{\#A\#B\#C}}_{\mbox{dirpath}} \underbrace{\mathtt{\tt \#zero}}_{\mbox{basename}} \underbrace{\mathtt{\tt .cci}_{\;}}_{\mbox{kind}}$$ When one of the sections is closed, a new constant is created with an updated section-path,a nd the old one is no longer reachable. In our example, after closing the section \texttt{C}, the new section-path for the constant {\tt zero} becomes: \begin{center} \texttt{ \#A\#B\#zero.cci} \end{center} The following operations can be used to handle section paths: \begin{description} \fun{val Names.string\_of\_path : section\_path -> string} {Transforms the section path into a string.} \fun{val Names.path\_of\_string : string -> section\_path} {Parses a string an returns the corresponding section path.} \fun{val Names.basename : section\_path -> identifier} {Provides the basename of a section path} \fun{val Names.dirpath : section\_path -> string list} {Provides the directory of a section path} \fun{val Names.kind\_of\_path : section\_path -> path\_kind} {Provides the kind of a section path} \end{description} \subsubsection{Signatures} A \textsl{signature} is a mapping associating different informations to identifiers (for example, its type, its definition, etc). The following operations could be useful for working with signatures: \begin{description} \fun{val Names.ids\_of\_sign : 'a signature -> identifier list} {Gets the list of identifiers of the signature.} \fun{val Names.vals\_of\_sign : 'a signature -> 'a list} {Gets the list of values associated to the identifiers of the signature.} \fun{val Names.lookup\_glob1 : \\ \qquad identifier -> 'a signature -> (identifier * 'a)} {\\ Gets the value associated to a given identifier of the signature.} \end{description} \subsection{The Terms of the Calculus of Constructions} The language of the Calculus of Inductive Constructions described in Chapter \ref{Cic} is implemented on the top of the logical framework, instantiating the parameter $op$ of the meta-language with a particular set of operators. In the implementation this language is called \texttt{constr}, the language of constructions. % The only difference %with respect to the one described in Section \ref{} is that the terms %of \texttt{constr} may contain \textsl{existential variables}. An %existential variable is a place holder representing a part of the term %that is still to be constructed. Such ``open terms'' are necessary %when building proofs interactively. \subsubsection{Building Constructions} The user does not need to know the choices made to represent \texttt{constr} in the meta-language. They are abstracted away by the following constructor functions: \begin{description} \fun{val Term.mkRel : int -> constr} {$(\texttt{mkRel}\;n)$ represents deBrujin's index $n$.} \fun{val Term.mkVar : identifier -> constr} {$(\texttt{mkVar}\;id)$ represents a global identifier named $id$, like a variable inside the scope of a section, or a hypothesis in a proof}. \fun{val Term.mkExistential : constr} {\texttt{mkExistential} represents an implicit sub-term, like the question marks in the term \texttt{(pair ? ? O true)}.} %\fun{val Term.mkMeta : int -> constr} % {$(\texttt{mkMeta}\;n)$ represents an existential variable, whose % name is the integer $n$.} \fun{val Term.mkProp : constr} {$\texttt{mkProp}$ represents the sort \textsl{Prop}.} \fun{val Term.mkSet : constr} {$\texttt{mkSet}$ represents the sort \textsl{Set}.} \fun{val Term.mkType : Impuniv.universe -> constr} {$(\texttt{mkType}\;u)$ represents the term $\textsl{Type}(u)$. The universe $u$ is represented as a section path indexed by an integer. } \fun{val Term.mkConst : section\_path -> constr array -> constr} {$(\texttt{mkConst}\;c\;v)$ represents a constant whose name is $c$. The body of the constant is stored in a global table, accessible through the name of the constant. The array of terms $v$ corresponds to the variables of the environment appearing in the body of the constant when it was defined. For instance, a constant defined in the section \textsl{Foo} containing the variable $A$, and whose body is $[x:Prop\ra Prop](x\;A)$ is represented inside the scope of the section by $(\texttt{mkConst}\;\texttt{\#foo\#f.cci}\;[| \texttt{mkVAR}\;A |])$. Once the section is closed, the constant is represented by the term $(\texttt{mkConst}\;\#f.cci\;[| |])$, and its body becomes $[A:Prop][x:Prop\ra Prop](x\;A)$}. \fun{val Term.mkMutInd : section\_path -> int -> constr array ->constr} {$(\texttt{mkMutInd}\;c\;i)$ represents the $ith$ type (starting from zero) of the block of mutually dependent (co)inductive types, whose first type is $c$. Similarly to the case of constants, the array of terms represents the current environment of the (co)inductive type. The definition of the type (its arity, its constructors, whether it is inductive or co-inductive, etc.) is stored in a global hash table, accessible through the name of the type.} \fun{val Term.mkMutConstruct : \\ \qquad section\_path -> int -> int -> constr array ->constr} {\\ $(\texttt{mkMutConstruct}\;c\;i\;j)$ represents the $jth$ constructor of the $ith$ type of the block of mutually dependent (co)inductive types whose first type is $c$. The array of terms represents the current environment of the (co)inductive type.} \fun{val Term.mkCast : constr -> constr -> constr} {$(\texttt{mkCast}\;t\;T)$ represents the annotated term $t::T$ in \Coq's syntax.} \fun{val Term.mkProd : name ->constr ->constr -> constr} {$(\texttt{mkProd}\;x\;A\;B)$ represents the product $(x:A)B$. The free ocurrences of $x$ in $B$ are represented by deBrujin's indexes.} \fun{val Term.mkNamedProd : identifier -> constr -> constr -> constr} {$(\texttt{produit}\;x\;A\;B)$ represents the product $(x:A)B$, but the bound occurrences of $x$ in $B$ are denoted by the identifier $(\texttt{mkVar}\;x)$. The function automatically changes each occurrences of this identifier into the corresponding deBrujin's index.} \fun{val Term.mkArrow : constr -> constr -> constr} {$(\texttt{arrow}\;A\;B)$ represents the type $(A\rightarrow B)$.} \fun{val Term.mkLambda : name -> constr -> constr -> constr} {$(\texttt{mkLambda}\;x\;A\;b)$ represents the lambda abstraction $[x:A]b$. The free ocurrences of $x$ in $B$ are represented by deBrujin's indexes.} \fun{val Term.mkNamedLambda : identifier -> constr -> constr -> constr} {$(\texttt{lambda}\;x\;A\;b)$ represents the lambda abstraction $[x:A]b$, but the bound occurrences of $x$ in $B$ are denoted by the identifier $(\texttt{mkVar}\;x)$. } \fun{val Term.mkAppLA : constr array -> constr} {$(\texttt{mkAppLA}\;t\;[|t_1\ldots t_n|])$ represents the application $(t\;t_1\;\ldots t_n)$.} \fun{val Term.mkMutCaseA : \\ \qquad case\_info -> constr ->constr ->constr array -> constr} {\\ $(\texttt{mkMutCaseA}\;r\;P\;m\;[|f_1\ldots f_n|])$ represents the term \Case{P}{m}{f_1\ldots f_n}. The first argument $r$ is either \texttt{None} or $\texttt{Some}\;(c,i)$, where the pair $(c,i)$ refers to the inductive type that $m$ belongs to.} \fun{val Term.mkFix : \\ \qquad int array->int->constr array->name list->constr array->constr} {\\ $(\texttt{mkFix}\;[|k_1\ldots k_n |]\;i\;[|A_1\ldots A_n|]\;[|f_1\ldots f_n|]\;[|t_1\ldots t_n|])$ represents the term $\Fix{f_i}{f_1/k_1:A_1:=t_1 \ldots f_n/k_n:A_n:=t_n}$} \fun{val Term.mkCoFix : \\ \qquad int -> constr array -> name list -> constr array -> constr} {\\ $(\texttt{mkCoFix}\;i\;[|A_1\ldots A_n|]\;[|f_1\ldots f_n|]\;[|t_1\ldots t_n|])$ represents the term $\CoFix{f_i}{f_1:A_1:=t_1 \ldots f_n:A_n:=t_n}$. There are no decreasing indexes in this case.} \end{description} \subsubsection{Decomposing Constructions} Each of the construction functions above has its corresponding (partial) destruction function, whose name is obtained changing the prefix \texttt{mk} by \texttt{dest}. In addition to these functions, a concrete datatype \texttt{kindOfTerm} can be used to do pattern matching on terms without dealing with their internal representation in the meta-language. This concrete datatype is described in the \ocaml{} file \texttt{term.mli}. The following function transforms a construction into an element of type \texttt{kindOfTerm}: \begin{description} \fun{val Term.kind\_of\_term : constr -> kindOfTerm} {Destructs a term of the language \texttt{constr}, yielding the direct components of the term. Hence, in order to do pattern matching on an object $c$ of \texttt{constr}, it is sufficient to do pattern matching on the value $(\texttt{kind\_of\_term}\;c)$.} \end{description} Part of the information associated to the constants is stored in global tables. The following functions give access to such information: \begin{description} \fun{val Termenv.constant\_value : constr -> constr} {If the term denotes a constant, projects the body of a constant} \fun{Termenv.constant\_type : constr -> constr} {If the term denotes a constant, projects the type of the constant} \fun{val mind\_arity : constr -> constr} {If the term denotes an inductive type, projects its arity (i.e., the type of the inductive type).} \fun{val Termenv.mis\_is\_finite : mind\_specif -> bool} {Determines whether a recursive type is inductive or co-inductive.} \fun{val Termenv.mind\_nparams : constr -> int} {If the term denotes an inductive type, projects the number of its general parameters.} \fun{val Termenv.mind\_is\_recursive : constr -> bool} {If the term denotes an inductive type, determines if the type has at least one recursive constructor. } \fun{val Termenv.mind\_recargs : constr -> recarg list array array} {If the term denotes an inductive type, returns an array $v$ such that the nth element of $v.(i).(j)$ is \texttt{Mrec} if the $nth$ argument of the $jth$ constructor of the $ith$ type is recursive, and \texttt{Norec} if it is not.}. \end{description} \subsection[The Type Checker]{The Type Checker\label{TypeChecker}} The third logical module is the type checker. It concentrates two main tasks concerning the language of constructions. On one hand, it contains the type inference and type-checking functions. The type inference function takes a term $a$ and a signature $\Gamma$, and yields a term $A$ such that $\Gamma \vdash a:A$. The type-checking function takes two terms $a$ and $A$ and a signature $\Gamma$, and determines whether or not $\Gamma \vdash a:A$. On the other hand, this module is in charge of the compilation of \Coq's abstract syntax trees into the language \texttt{constr} of constructions. This compilation seeks to eliminate all the ambiguities contained in \Coq's abstract syntax, restoring the information necessary to type-check it. It concerns at least the following steps: \begin{enumerate} \item Compiling the pattern-matching expressions containing constructor patterns, wild-cards, etc, into terms that only use the primitive \textsl{Case} described in Chapter \ref{Cic} \item Restoring type coercions and synthesizing the implicit arguments (the one denoted by question marks in {\Coq} syntax: see Section~\ref{Coercions}). \item Transforming the named bound variables into deBrujin's indexes. \item Classifying the global names into the different classes of constants (defined constants, constructors, inductive types, etc). \end{enumerate} \subsection{The Proof Engine} The fourth stage of \Coq's implementation is the \textsl{proof engine}: the interactive machine for constructing proofs. The aim of the proof engine is to construct a top-down derivation or \textsl{proof tree}, by the application of \textsl{tactics}. A proof tree has the following general structure:\\ \begin{displaymath} \frac{\Gamma \vdash ? = t(?_1,\ldots?_n) : G} {\hspace{3ex}\frac{\displaystyle \Gamma_1 \vdash ?_1 = t_1(\ldots) : G_1} {\stackrel{\vdots}{\displaystyle {\Gamma_{i_1} \vdash ?_{i_1} : G_{i_1}}}}(tac_1) \;\;\;\;\;\;\;\;\; \frac{\displaystyle \Gamma_n \vdash ?_n = t_n(\ldots) : G_n} {\displaystyle \stackrel{\vdots}{\displaystyle {\Gamma_{i_m} \vdash ?_{i_m} : G_{i_m}}}}(tac_n)} (tac) \end{displaymath} \noindent Each node of the tree is called a \textsl{goal}. A goal is a record type containing the following three fields: \begin{enumerate} \item the conclusion $G$ to be proven; \item a typing signature $\Gamma$ for the free variables in $G$; \item if the goal is an internal node of the proof tree, the definition $t(?_1,\ldots?_n)$ of an \textsl{existential variable} (i.e. a possible undefined constant) $?$ of type $G$ in terms of the existential variables of the children sub-goals. If the node is a leaf, the existential variable maybe still undefined. \end{enumerate} Once all the existential variables have been defined the derivation is completed, and a construction can be generated from the proof tree, replacing each of the existential variables by its definition. This is exactly what happens when one of the commands \texttt{Qed}, \texttt{Save} or \texttt{Defined} is invoked (see Section~\ref{Qed}). The saved theorem becomes a defined constant, whose body is the proof object generated. \paragraph{Important:} Before being added to the context, the proof object is type-checked, in order to verify that it is actually an object of the expected type $G$. Hence, the correctness of the proof actually does not depend on the tactics applied to generate it or the machinery of the proof engine, but only on the type-checker. In other words, extending the system with a potentially bugged new tactic never endangers the consistency of the system. \subsubsection[What is a Tactic?]{What is a Tactic?\label{WhatIsATactic}} %Let us now explain what is a tactic, and how the user can introduce %new ones. From an operational point of view, the current state of the proof engine is given by the mapping $emap$ from existential variables into goals, plus a pointer to one of the leaf goals $g$. Such a pointer indicates where the proof tree will be refined by the application of a \textsl{tactic}. A tactic is a function from the current state $(g,emap)$ of the proof engine into a pair $(l,val)$. The first component of this pair is the list of children sub-goals $g_1,\ldots g_n$ of $g$ to be yielded by the tactic. The second one is a \textsl{validation function}. Once the proof trees $\pi_1,\ldots \pi_n$ for $g_1,\ldots g_n$ have been completed, this validation function must yield a proof tree $(val\;\pi_1,\ldots \pi_n)$ deriving $g$. Tactics can be classified into \textsl{primitive} ones and \textsl{defined} ones. Primitive tactics correspond to the five basic operations of the proof engine: \begin{enumerate} \item Introducing a universally quantified variable into the local context of the goal. \item Defining an undefined existential variable \item Changing the conclusion of the goal for another --definitionally equal-- term. \item Changing the type of a variable in the local context for another definitionally equal term. \item Erasing a variable from the local context. \end{enumerate} \textsl{Defined} tactics are tactics constructed by combining these primitive operations. Defined tactics are registered in a hash table, so that they can be introduced dynamically. In order to define such a tactic table, it is necessary to fix what a \textsl{possible argument} of a tactic may be. The type \texttt{tactic\_arg} of the possible arguments for tactics is a union type including: \begin{itemize} \item quoted strings; \item integers; \item identifiers; \item lists of identifiers; \item plain terms, represented by its abstract syntax tree; \item well-typed terms, represented by a construction; \item a substitution for bound variables, like the substitution in the tactic \\$\texttt{Apply}\;t\;\texttt{with}\;x:=t_1\ldots x_n:=t_n$, (see Section~\ref{apply}); \item a reduction expression, denoting the reduction strategy to be followed. \end{itemize} Therefore, for each function $tac:a \rightarrow tactic$ implementing a defined tactic, an associated dynamic tactic $tacargs\_tac: \texttt{tactic\_arg}\;list \rightarrow tactic$ calling $tac$ must be written. The aim of the auxiliary function $tacargs\_tac$ is to inject the arguments of the tactic $tac$ into the type of possible arguments for a tactic. The following function can be used for registering and calling a defined tactic: \begin{description} \fun{val Tacmach.add\_tactic : \\ \qquad string -> (tactic\_arg list ->tactic) -> unit} {\\ Registers a dynamic tactic with the given string as access index.} \fun{val Tacinterp.vernac\_tactic : string*tactic\_arg list -> tactic} {Interprets a defined tactic given by its entry in the tactics table with a particular list of possible arguments.} \fun{val Tacinterp.vernac\_interp : CoqAst.t -> tactic} {Interprets a tactic expression formed combining \Coq's tactics and tacticals, and described by its abstract syntax tree.} \end{description} When programming a new tactic that calls an already defined tactic $tac$, we have the choice between using the \ocaml{} function implementing $tac$, or calling the tactic interpreter with the name and arguments for interpreting $tac$. In the first case, a tactic call will left the trace of the whole implementation of $tac$ in the proof tree. In the second, the implementation of $tac$ will be hidden, and only an invocation of $tac$ will be recalled (cf. the example of Section \ref{ACompleteExample}. The following combinators can be used to hide the implementation of a tactic: \begin{verbatim} type 'a hiding_combinator = string -> ('a -> tactic) -> ('a -> tactic) val Tacmach.hide_atomic_tactic : string -> tactic -> tactic val Tacmach.hide_constr_tactic : constr hiding_combinator val Tacmach.hide_constrl_tactic : (constr list) hiding_combinator val Tacmach.hide_numarg_tactic : int hiding_combinator val Tacmach.hide_ident_tactic : identifier hiding_combinator val Tacmach.hide_identl_tactic : identifier hiding_combinator val Tacmach.hide_string_tactic : string hiding_combinator val Tacmach.hide_bindl_tactic : substitution hiding_combinator val Tacmach.hide_cbindl_tactic : (constr * substitution) hiding_combinator \end{verbatim} These functions first register the tactic by a side effect, and then yield a function calling the interpreter with the registered name and the right injection into the type of possible arguments. \subsection{Tactics and Tacticals Provided by \Coq} The fifth logical module is the library of tacticals and basic tactics provided by \Coq. This library is distributed into the directories \texttt{tactics} and \texttt{src/tactics}. The former contains those basic tactics that make use of the types contained in the basic state of \Coq. For example, inversion or rewriting tactics are in the directory \texttt{tactics}, since they make use of the propositional equality type. Those tactics which are independent from the context --like for example \texttt{Cut}, \texttt{Intros}, etc-- are defined in the directory \texttt{src/tactics}. This latter directory also contains some useful tools for programming new tactics, referred in Section \ref{SomeUsefulToolsforWrittingTactics}. In practice, it is very unusual that the list of sub-goals and the validation function of the tactic must be explicitly constructed by the user. In most of the cases, the implementation of a new tactic consists in supplying the appropriate arguments to the basic tactics and tacticals. \subsubsection{Basic Tactics} The file \texttt{Tactics} contain the implementation of the basic tactics provided by \Coq. The following tactics are some of the most used ones: \begin{verbatim} val Tactics.intro : tactic val Tactics.assumption : tactic val Tactics.clear : identifier list -> tactic val Tactics.apply : constr -> constr substitution -> tactic val Tactics.one_constructor : int -> constr substitution -> tactic val Tactics.simplest_elim : constr -> tactic val Tactics.elimType : constr -> tactic val Tactics.simplest_case : constr -> tactic val Tactics.caseType : constr -> tactic val Tactics.cut : constr -> tactic val Tactics.reduce : redexpr -> tactic val Tactics.exact : constr -> tactic val Auto.auto : int option -> tactic val Auto.trivial : tactic \end{verbatim} The functions hiding the implementation of these tactics are defined in the module \texttt{Hiddentac}. Their names are prefixed by ``h\_''. \subsubsection[Tacticals]{Tacticals\label{OcamlTacticals}} The following tacticals can be used to combine already existing tactics: \begin{description} \fun{val Tacticals.tclIDTAC : tactic} {The identity tactic: it leaves the goal as it is.} \fun{val Tacticals.tclORELSE : tactic -> tactic -> tactic} {Tries the first tactic and in case of failure applies the second one.} \fun{val Tacticals.tclTHEN : tactic -> tactic -> tactic} {Applies the first tactic and then the second one to each generated subgoal.} \fun{val Tacticals.tclTHENS : tactic -> tactic list -> tactic} {Applies a tactic, and then applies each tactic of the tactic list to the corresponding generated subgoal.} \fun{val Tacticals.tclTHENL : tactic -> tactic -> tactic} {Applies the first tactic, and then applies the second one to the last generated subgoal.} \fun{val Tacticals.tclREPEAT : tactic -> tactic} {If the given tactic succeeds in producing a subgoal, then it is recursively applied to each generated subgoal, and so on until it fails. } \fun{val Tacticals.tclFIRST : tactic list -> tactic} {Tries the tactics of the given list one by one, until one of them succeeds.} \fun{val Tacticals.tclTRY : tactic -> tactic} {Tries the given tactic and in case of failure applies the {\tt tclIDTAC} tactical to the original goal.} \fun{val Tacticals.tclDO : int -> tactic -> tactic} {Applies the tactic a given number of times.} \fun{val Tacticals.tclFAIL : tactic} {The always failing tactic: it raises a {\tt UserError} exception.} \fun{val Tacticals.tclPROGRESS : tactic -> tactic} {Applies the given tactic to the current goal and fails if the tactic leaves the goal unchanged} \fun{val Tacticals.tclNTH\_HYP : int -> (constr -> tactic) -> tactic} {Applies a tactic to the nth hypothesis of the local context. The last hypothesis introduced correspond to the integer 1.} \fun{val Tacticals.tclLAST\_HYP : (constr -> tactic) -> tactic} {Applies a tactic to the last hypothesis introduced.} \fun{val Tacticals.tclCOMPLETE : tactic -> tactic} {Applies a tactic and fails if the tactic did not solve completely the goal} \fun{val Tacticals.tclMAP : ('a -> tactic) -> 'a list -> tactic} {Applied to the function \texttt{f} and the list \texttt{[x\_1; ... ; x\_n]}, this tactical applies the tactic \texttt{tclTHEN (f x1) (tclTHEN (f x2) ... ))))}} \fun{val Tacicals.tclIF : (goal sigma -> bool) -> tactic -> tactic -> tactic} {If the condition holds, apply the first tactic; otherwise, apply the second one} \end{description} \subsection{The Vernacular Interpreter} The sixth logical module of the implementation corresponds to the interpreter of the vernacular phrases of \Coq. These phrases may be expressions from the \gallina{} language (definitions), general directives (setting commands) or tactics to be applied by the proof engine. \subsection[The Parser and the Pretty-Printer]{The Parser and the Pretty-Printer\label{PrettyPrinter}} The last logical module is the parser and pretty printer of \Coq, which is the interface between the vernacular interpreter and the user. They translate the chains of characters entered at the input into abstract syntax trees, and vice versa. Abstract syntax trees are represented by labeled n-ary trees, and its type is called \texttt{CoqAst.t}. For instance, the abstract syntax tree associated to the term $[x:A]x$ is: \begin{displaymath} \texttt{Node} ((0,6), "LAMBDA", [\texttt{Nvar}~((3, 4),"A");~\texttt{Slam}~((0,6),~Some~"x",~\texttt{Nvar}~((5,6),"x"))]) \end{displaymath} The numbers correspond to \textsl{locations}, used to point to some input line and character positions in the error messages. As it was already explained in Section \ref{TypeChecker}, this term is then translated into a construction term in order to be typed. The parser of \Coq\ is implemented using \camlpppp. The lexer and the data used by \camlpppp\ to generate the parser lay in the directory \texttt{src/parsing}. This directory also contains \Coq's pretty-printer. The printing rules lay in the directory \texttt{src/syntax}. The different entries of the grammar are described in the module \texttt{Pcoq.Entry}. Let us present here two important functions of this logical module: \begin{description} \fun{val Pcoq.parse\_string : 'a Grammar.Entry.e -> string -> 'a} {Parses a given string, trying to recognize a phrase corresponding to some entry in the grammar. If it succeeds, it yields a value associated to the grammar entry. For example, applied to the entry \texttt{Pcoq.Command.command}, this function parses a term of \Coq's language, and yields a value of type \texttt{CoqAst.t}. When applied to the entry \texttt{Pcoq.Vernac.vernac}, it parses a vernacular command and returns the corresponding Ast.} \fun{val gentermpr : \\ \qquad path\_kind -> constr assumptions -> constr -> std\_ppcmds} {\\ Pretty-prints a well-typed term of certain kind (cf. Section \ref{SectionPaths}) under its context of typing assumption.} \fun{val gentacpr : CoqAst.t -> std\_ppcmds} {Pretty-prints a given abstract syntax tree representing a tactic expression.} \end{description} \subsection{The General Library} In addition to the ones laying in the standard library of \ocaml{}, several useful modules about lists, arrays, sets, mappings, balanced trees, and other frequently used data structures can be found in the directory \texttt{lib}. Before writing a new one, check if it is not already there! \subsubsection{The module \texttt{Std}} This module in the directory \texttt{src/lib/util} is opened by almost all modules of \Coq{}. Among other things, it contains a definition of the different kinds of errors used in \Coq{} : \begin{description} \fun{exception UserError of string * std\_ppcmds} {This is the class of ``users exceptions''. Such errors arise when the user attempts to do something illegal, for example \texttt{Intro} when the current goal conclusion is not a product.} \fun{val Std.error : string -> 'a} {For simple error messages} \fun{val Std.errorlabstrm : string -> std\_ppcmds -> 'a} {See Section~\ref{PrettyPrinter} : this can be used if the user want to display a term or build a complex error message} \fun{exception Anomaly of string * std\_ppcmds} {This for reporting bugs or things that should not happen. The tacticals \texttt{tclTRY} and \texttt{tclTRY} described in Section~\ref{OcamlTacticals} catch the exceptions of type \texttt{UserError}, but they don't catch the anomalies. So, in your code, don't raise any anomaly, unless you know what you are doing. We also recommend to avoid constructs such as \texttt{try ... with \_ -> ...} : such constructs can trap an anomaly and make the debugging process harder.} \fun{val Std.anomaly : string -> 'a}{} \fun{val Std.anomalylabstrm : string -> std\_ppcmds -> 'a}{} \end{description} \section{The tactic writer mini-HOWTO} \subsection{How to add a vernacular command} The command to register a vernacular command can be found in module \texttt{Vernacinterp}: \begin{verbatim} val vinterp_add : string * (vernac_arg list -> unit -> unit) -> unit;; \end{verbatim} The first argument is the name, the second argument is a function that parses the arguments and returns a function of type \texttt{unit}$\rightarrow$\texttt{unit} that do the job. In this section we will show how to add a vernacular command \texttt{CheckCheck} that print a type of a term and the type of its type. File \texttt{dcheck.ml}: \begin{verbatim} open Vernacinterp;; open Trad;; let _ = vinterp_add ("DblCheck", function [VARG_COMMAND com] -> (fun () -> let evmap = Evd.mt_evd () and sign = Termenv.initial_sign () in let {vAL=c;tYP=t;kIND=k} = fconstruct_with_univ evmap sign com in Pp.mSGNL [< Printer.prterm c; 'sTR ":"; Printer.prterm t; 'sTR ":"; Printer.prterm k >] ) | _ -> bad_vernac_args "DblCheck") ;; \end{verbatim} Like for a new tactic, a new syntax entry must be created. File \texttt{DCheck.v}: \begin{verbatim} Declare ML Module "dcheck.ml". Grammar vernac vernac := dblcheck [ "CheckCheck" comarg($c) ] -> [(DblCheck $c)]. \end{verbatim} We are now able to test our new command: \begin{verbatim} Coq < Require DCheck. Coq < CheckCheck O. O:nat:Set \end{verbatim} Most Coq vernacular commands are registered in the module \verb+src/env/vernacentries.ml+. One can see more examples here. \subsection{How to keep a hashtable synchronous with the reset mechanism} This is far more tricky. Some vernacular commands modify some sort of state (for example by adding something in a hashtable). One wants that \texttt{Reset} has the expected behavior with this commands. \Coq{} provides a general mechanism to do that. \Coq{} environments contains objects of three kinds: CCI, FW and OBJ. CCI and FW are for constants of the calculus. OBJ is a dynamically extensible datatype that contains sections, tactic definitions, hints for auto, and so on. The simplest example of use of such a mechanism is in file \verb+src/proofs/macros.ml+ (which implements the \texttt{Tactic Definition} command). Tactic macros are stored in the imperative hashtable \texttt{mactab}. There are two functions freeze and unfreeze to make a copy of the table and to restore the state of table from the copy. Then this table is declared using \texttt{Library.declare\_summary}. What does \Coq{} with that ? \Coq{} defines synchronization points. At each synchronisation point, the declared tables are frozen (that is, a copy of this tables is stored). When \texttt{Reset }$i$ is called, \Coq{} goes back to the first synchronisation point that is above $i$ and ``replays'' all objects between that point and $i$. It will re-declare constants, re-open section, etc. So we need to declare a new type of objects, TACTIC-MACRO-DATA. To ``replay'' on object of that type is to add the corresponding tactic macro to \texttt{mactab} So, now, we can say that \texttt{mactab} is synchronous with the Reset mechanism$^{\mathrm{TM}}$. Notice that this works for hash tables but also for a single integer (the Undo stack size, modified by the \texttt{Set Undo} command, for example). \subsection{The right way to access to Coq constants from your ML code} With their long names, Coq constants are stored using: \begin{itemize} \item a section path \item an identifier \end{itemize} The identifier is exactly the identifier that is used in \Coq{} to denote the constant; the section path can be known using the \texttt{Locate} command: \begin{coq_example} Locate S. Locate nat. Locate eq. \end{coq_example} Now it is easy to get a constant by its name and section path: \begin{verbatim} let constant sp id = Machops.global_reference (Names.gLOB (Termenv.initial_sign ())) (Names.path_of_string sp) (Names.id_of_string id);; \end{verbatim} The only issue is that if one cannot put: \begin{verbatim} let coq_S = constant "#Datatypes#nat.cci" "S";; \end{verbatim} in his tactic's code. That is because this sentence is evaluated \emph{before} the module \texttt{Datatypes} is loaded. The solution is to use the lazy evaluation of \ocaml{}: \begin{verbatim} let coq_S = lazy (constant "#Datatypes#nat.cci" "S");; ... (Lazy.force coq_S) ... \end{verbatim} Be sure to call always Lazy.force behind a closure -- i.e. inside a function body or behind the \texttt{lazy} keyword. One can see examples of that technique in the source code of \Coq{}, for example \verb+plugins/omega/coq_omega.ml+. \section[Some Useful Tools for Writing Tactics]{Some Useful Tools for Writing Tactics\label{SomeUsefulToolsforWrittingTactics}} When the implementation of a tactic is not a straightforward combination of tactics and tacticals, the module \texttt{Tacmach} provides several useful functions for handling goals, calling the type-checker, parsing terms, etc. This module is intended to be the interface of the proof engine for the user. \begin{description} \fun{val Tacmach.pf\_hyps : goal sigma -> constr signature} {Projects the local typing context $\Gamma$ from a given goal $\Gamma\vdash ?:G$.} \fun{val pf\_concl : goal sigma -> constr} {Projects the conclusion $G$ from a given goal $\Gamma\vdash ?:G$.} \fun{val Tacmach.pf\_nth\_hyp : goal sigma -> int -> identifier * constr} {Projects the $ith$ typing constraint $x_i:A_i$ from the local context of the given goal.} \fun{val Tacmach.pf\_fexecute : goal sigma -> constr -> judgement} {Given a goal whose local context is $\Gamma$ and a term $a$, this function infers a type $A$ and a kind $K$ such that the judgement $a:A:K$ is valid under $\Gamma$, or raises an exception if there is no such judgement. A judgement is just a record type containing the three terms $a$, $A$ and $K$.} \fun{val Tacmach.pf\_infexecute : \\ \qquad goal sigma -> constr -> judgement * information} {\\ In addition to the typing judgement, this function also extracts the $F_{\omega}$ program underlying the term.} \fun{val Tacmach.pf\_type\_of : goal sigma -> constr -> constr} {Infers a term $A$ such that $\Gamma\vdash a:A$ for a given term $a$, where $\Gamma$ is the local typing context of the goal.} \fun{val Tacmach.pf\_check\_type : goal sigma -> constr -> constr -> bool} {This function yields a type $A$ if the two given terms $a$ and $A$ verify $\Gamma\vdash a:A$ in the local typing context $\Gamma$ of the goal. Otherwise, it raises an exception.} \fun{val Tacmach.pf\_constr\_of\_com : goal sigma -> CoqAst.t -> constr} {Transforms an abstract syntax tree into a well-typed term of the language of constructions. Raises an exception if the term cannot be typed.} \fun{val Tacmach.pf\_constr\_of\_com\_sort : goal sigma -> CoqAst.t -> constr} {Transforms an abstract syntax tree representing a type into a well-typed term of the language of constructions. Raises an exception if the term cannot be typed.} \fun{val Tacmach.pf\_parse\_const : goal sigma -> string -> constr} {Constructs the constant whose name is the given string.} \fun{val Tacmach.pf\_reduction\_of\_redexp : \\ \qquad goal sigma -> red\_expr -> constr -> constr} {\\ Applies a certain kind of reduction function, specified by an element of the type red\_expr.} \fun{val Tacmach.pf\_conv\_x : goal sigma -> constr -> constr -> bool} {Test whether two given terms are definitionally equal.} \end{description} \subsection[Patterns]{Patterns\label{Patterns}} The \ocaml{} file \texttt{Pattern} provides a quick way for describing a term pattern and performing second-order, binding-preserving, matching on it. Patterns are described using an extension of \Coq's concrete syntax, where the second-order meta-variables of the pattern are denoted by indexed question marks. Patterns may depend on constants, and therefore only to make have sense when certain theories have been loaded. For this reason, they are stored with a \textsl{module-marker}, telling us which modules have to be open in order to use the pattern. The following functions can be used to store and retrieve patterns form the pattern table: \begin{description} \fun{val Pattern.make\_module\_marker : string list -> module\_mark} {Constructs a module marker from a list of module names.} \fun{val Pattern.put\_pat : module\_mark -> string -> marked\_term} {Constructs a pattern from a parseable string containing holes and a module marker.} \fun{val Pattern.somatches : constr -> marked\_term-> bool} {Tests if a term matches a pattern.} \fun{val dest\_somatch : constr -> marked\_term -> constr list} {If the term matches the pattern, yields the list of sub-terms matching the occurrences of the pattern variables (ordered from left to right). Raises a \texttt{UserError} exception if the term does not match the pattern.} \fun{val Pattern.soinstance : marked\_term -> constr list -> constr} {Substitutes each hole in the pattern by the corresponding term of the given the list.} \end{description} \paragraph{Warning:} Sometimes, a \Coq\ term may have invisible sub-terms that the matching functions are nevertheless sensible to. For example, the \Coq\ term $(?_1,?_2)$ is actually a shorthand for the expression $(\texttt{pair}\;?\;?\;?_1\;?_2)$. Hence, matching this term pattern with the term $(\texttt{true},\texttt{O})$ actually yields the list $[?;?;\texttt{true};\texttt{O}]$ as result (and \textbf{not} $[\texttt{true};\texttt{O}]$, as could be expected). \subsection{Patterns on Inductive Definitions} The module \texttt{Pattern} also includes some functions for testing if the definition of an inductive type satisfies certain properties. Such functions may be used to perform pattern matching independently from the name given to the inductive type and the universe it inhabits. They yield the value $(\texttt{Some}\;r::l)$ if the input term reduces into an application of an inductive type $r$ to a list of terms $l$, and the definition of $r$ satisfies certain conditions. Otherwise, they yield the value \texttt{None}. \begin{description} \fun{val Pattern.match\_with\_non\_recursive\_type : constr list option} {Tests if the inductive type $r$ has no recursive constructors} \fun{val Pattern.match\_with\_disjunction : constr list option} {Tests if the inductive type $r$ is a non-recursive type such that all its constructors have a single argument.} \fun{val Pattern.match\_with\_conjunction : constr list option} {Tests if the inductive type $r$ is a non-recursive type with a unique constructor.} \fun{val Pattern.match\_with\_empty\_type : constr list option} {Tests if the inductive type $r$ has no constructors at all} \fun{val Pattern.match\_with\_equation : constr list option} {Tests if the inductive type $r$ has a single constructor expressing the property of reflexivity for some type. For example, the types $a=b$, $A\mbox{==}B$ and $A\mbox{===}B$ satisfy this predicate.} \end{description} \subsection{Elimination Tacticals} It is frequently the case that the subgoals generated by an elimination can all be solved in a similar way, possibly parametrized on some information about each case, like for example: \begin{itemize} \item the inductive type of the object being eliminated; \item its arguments (if it is an inductive predicate); \item the branch number; \item the predicate to be proven; \item the number of assumptions to be introduced by the case \item the signature of the branch, i.e., for each argument of the branch whether it is recursive or not. \end{itemize} The following tacticals can be useful to deal with such situations. They \begin{description} \fun{val Elim.simple\_elimination\_then : \\ \qquad (branch\_args -> tactic) -> constr -> tactic} {\\ Performs the default elimination on the last argument, and then tries to solve the generated subgoals using a given parametrized tactic. The type branch\_args is a record type containing all information mentioned above.} \fun{val Elim.simple\_case\_then : \\ \qquad (branch\_args -> tactic) -> constr -> tactic} {\\ Similarly, but it performs case analysis instead of induction.} \end{description} \section[A Complete Example]{A Complete Example\label{ACompleteExample}} In order to illustrate the implementation of a new tactic, let us come back to the problem of deciding the equality of two elements of an inductive type. \subsection{Preliminaries} Let us call \texttt{newtactic} the directory that will contain the implementation of the new tactic. In this directory will lay two files: a file \texttt{eqdecide.ml}, containing the \ocaml{} sources that implements the tactic, and a \Coq\ file \texttt{Eqdecide.v}, containing its associated grammar rules and the commands to generate a module that can be loaded dynamically from \Coq's toplevel. To compile our project, we will create a \texttt{Makefile} with the command \texttt{do\_Makefile} (see Section~\ref{Makefile}) : \begin{quotation} \texttt{do\_Makefile eqdecide.ml EqDecide.v > Makefile}\\ \texttt{touch .depend}\\ \texttt{make depend} \end{quotation} We must have kept the sources of \Coq{} somewhere and to set an environment variable \texttt{COQTOP} that points to that directory. \subsection{Implementing the Tactic} The file \texttt{eqdecide.ml} contains the implementation of the tactic in \ocaml{}. Let us recall the main steps of the proof strategy for deciding the proposition $(x,y:R)\{x=y\}+\{\neg x=y\}$ on the inductive type $R$: \begin{enumerate} \item Eliminate $x$ and then $y$. \item Try discrimination to solve those goals where $x$ and $y$ has been introduced by different constructors. \item If $x$ and $y$ have been introduced by the same constructor, then analyze one by one the corresponding pairs of arguments. If they are equal, rewrite one into the other. If they are not, derive a contradiction from the invectiveness of the constructor. \item Once all the arguments have been rewritten, solve the left half of the goal by reflexivity. \end{enumerate} In the sequel we implement these steps one by one. We start opening the modules necessary for the implementation of the tactic: \begin{verbatim} open Names open Term open Tactics open Tacticals open Hiddentac open Equality open Auto open Pattern open Names open Termenv open Std open Proof_trees open Tacmach \end{verbatim} The first step of the procedure can be straightforwardly implemented as follows: \begin{verbatim} let clear_last = (tclLAST_HYP (fun c -> (clear_one (destVar c))));; \end{verbatim} \begin{verbatim} let mkBranches = (tclTHEN intro (tclTHEN (tclLAST_HYP h_simplest_elim) (tclTHEN clear_last (tclTHEN intros (tclTHEN (tclLAST_HYP h_simplest_case) (tclTHEN clear_last intros))))));; \end{verbatim} Notice the use of the tactical \texttt{tclLAST\_HYP}, which avoids to give a (potentially clashing) name to the quantified variables of the goal when they are introduced. The second step of the procedure is implemented by the following tactic: \begin{verbatim} let solveRightBranch = (tclTHEN simplest_right discrConcl);; \end{verbatim} In order to illustrate how the implementation of a tactic can be hidden, let us do it with the tactic above: \begin{verbatim} let h_solveRightBranch = hide_atomic_tactic "solveRightBranch" solveRightBranch ;; \end{verbatim} As it was already mentioned in Section \ref{WhatIsATactic}, the combinator \texttt{hide\_atomic\_tactic} first registers the tactic \texttt{solveRightBranch} in the table, and returns a tactic which calls the interpreter with the used to register it. Hence, when the tactical \texttt{Info} is used, our tactic will just inform that \texttt{solveRightBranch} was applied, omitting all the details corresponding to \texttt{simplest\_right} and \texttt{discrConcl}. The third step requires some auxiliary functions for constructing the type $\{c_1=c_2\}+\{\neg c_1=c_2\}$ for a given inductive type $R$ and two constructions $c_1$ and $c_2$, and for generalizing this type over $c_1$ and $c_2$: \begin{verbatim} let mmk = make_module_marker ["#Logic.obj";"#Specif.obj"];; let eqpat = put_pat mmk "eq";; let sumboolpat = put_pat mmk "sumbool";; let notpat = put_pat mmk "not";; let eq = get_pat eqpat;; let sumbool = get_pat sumboolpat;; let not = get_pat notpat;; let mkDecideEqGoal rectype c1 c2 g = let equality = mkAppL [eq;rectype;c1;c2] in let disequality = mkAppL [not;equality] in mkAppL [sumbool;equality;disequality] ;; let mkGenDecideEqGoal rectype g = let hypnames = ids_of_sign (pf_hyps g) in let xname = next_ident_away (id_of_string "x") hypnames and yname = next_ident_away (id_of_string "y") hypnames in (mkNamedProd xname rectype (mkNamedProd yname rectype (mkDecideEqGoal rectype (mkVar xname) (mkVar yname) g))) ;; \end{verbatim} The tactic will depend on the \Coq modules \texttt{Logic} and \texttt{Specif}, since we use the constants corresponding to propositional equality (\texttt{eq}), computational disjunction (\texttt{sumbool}), and logical negation (\texttt{not}), defined in that modules. This is specified creating the module maker \texttt{mmk} (see Section~\ref{Patterns}). The third step of the procedure can be divided into three sub-steps. Assume that both $x$ and $y$ have been introduced by the same constructor. For each corresponding pair of arguments of that constructor, we have to consider whether they are equal or not. If they are equal, the following tactic is applied to rewrite one into the other: \begin{verbatim} let eqCase tac = (tclTHEN intro (tclTHEN (tclLAST_HYP h_rewriteLR) (tclTHEN clear_last tac))) ;; \end{verbatim} If they are not equal, then the goal is contraposed and a contradiction is reached form the invectiveness of the constructor: \begin{verbatim} let diseqCase = let diseq = (id_of_string "diseq") in let absurd = (id_of_string "absurd") in (tclTHEN (intro_using diseq) (tclTHEN h_simplest_right (tclTHEN red_in_concl (tclTHEN (intro_using absurd) (tclTHEN (h_simplest_apply (mkVar diseq)) (tclTHEN (h_injHyp absurd) trivial )))))) ;; \end{verbatim} In the tactic above we have chosen to name the hypotheses because they have to be applied later on. This introduces a potential risk of name clashing if the context already contains other hypotheses also named ``diseq'' or ``absurd''. We are now ready to implement the tactic \textsl{SolveArg}. Given the two arguments $a_1$ and $a_2$ of the constructor, this tactic cuts the goal with the proposition $\{a_1=a_2\}+\{\neg a_1=a_2\}$, and then applies the tactics above to each of the generated cases. If the disjunction cannot be solved automatically, it remains as a sub-goal to be proven. \begin{verbatim} let solveArg a1 a2 tac g = let rectype = pf_type_of g a1 in let decide = mkDecideEqGoal rectype a1 a2 g in (tclTHENS (h_elimType decide) [(eqCase tac);diseqCase;default_auto]) g ;; \end{verbatim} The following tactic implements the third and fourth steps of the proof procedure: \begin{verbatim} let conclpatt = put_pat mmk "{?2=?3}+{?4}" ;; let solveLeftBranch rectype g = let (_::(lhs::(rhs::_))) = try (dest_somatch (pf_concl g) conclpatt) with UserError ("somatch",_)-> error "Unexpected conclusion!" in let nparams = mind_nparams rectype in let getargs l = snd (chop_list nparams (snd (decomp_app l))) in let rargs = getargs rhs and largs = getargs lhs in List.fold_right2 solveArg largs rargs (tclTHEN h_simplest_left h_reflexivity) g ;; \end{verbatim} Notice the use of a pattern to decompose the goal and obtain the inductive type and the left and right hand sides of the equality. A certain number of arguments correspond to the general parameters of the type, and must be skipped over. Once the corresponding list of arguments \texttt{rargs} and \texttt{largs} have been obtained, the tactic \texttt{solveArg} is iterated on them, leaving a disjunction whose left half can be solved by reflexivity. The following tactic joints together the three steps of the proof procedure: \begin{verbatim} let initialpatt = put_pat mmk "(x,y:?1){x=y}+{~(x=y)}" ;; let decideGralEquality g = let (typ::_) = try (dest_somatch (pf_concl g) initialpatt) with UserError ("somatch",_) -> error "The goal does not have the expected form" in let headtyp = hd_app (pf_compute g typ) in let rectype = match (kind_of_term headtyp) with IsMutInd _ -> headtyp | _ -> error ("This decision procedure only" " works for inductive objects") in (tclTHEN mkBranches (tclORELSE h_solveRightBranch (solveLeftBranch rectype))) g ;; ;; \end{verbatim} The tactic above can be specialized in two different ways: either to decide a particular instance $\{c_1=c_2\}+\{\neg c_1=c_2\}$ of the universal quantification; or to eliminate this property and obtain two subgoals containing the hypotheses $c_1=c_2$ and $\neg c_1=c_2$ respectively. \begin{verbatim} let decideGralEquality = (tclTHEN mkBranches (tclORELSE h_solveRightBranch solveLeftBranch)) ;; let decideEquality c1 c2 g = let rectype = pf_type_of g c1 in let decide = mkGenDecideEqGoal rectype g in (tclTHENS (cut decide) [default_auto;decideGralEquality]) g ;; let compare c1 c2 g = let rectype = pf_type_of g c1 in let decide = mkDecideEqGoal rectype c1 c2 g in (tclTHENS (cut decide) [(tclTHEN intro (tclTHEN (tclLAST_HYP simplest_case) clear_last)); decideEquality c1 c2]) g ;; \end{verbatim} Next, for each of the tactics that will have an entry in the grammar we construct the associated dynamic one to be registered in the table of tactics. This function can be used to overload a tactic name with several similar tactics. For example, the tactic proving the general decidability property and the one proving a particular instance for two terms can be grouped together with the following convention: if the user provides two terms as arguments, then the specialized tactic is used; if no argument is provided then the general tactic is invoked. \begin{verbatim} let dyn_decideEquality args g = match args with [(COMMAND com1);(COMMAND com2)] -> let c1 = pf_constr_of_com g com1 and c2 = pf_constr_of_com g com2 in decideEquality c1 c2 g | [] -> decideGralEquality g | _ -> error "Invalid arguments for dynamic tactic" ;; add_tactic "DecideEquality" dyn_decideEquality ;; let dyn_compare args g = match args with [(COMMAND com1);(COMMAND com2)] -> let c1 = pf_constr_of_com g com1 and c2 = pf_constr_of_com g com2 in compare c1 c2 g | _ -> error "Invalid arguments for dynamic tactic" ;; add_tactic "Compare" tacargs_compare ;; \end{verbatim} This completes the implementation of the tactic. We turn now to the \Coq file \texttt{Eqdecide.v}. \subsection{The Grammar Rules} Associated to the implementation of the tactic there is a \Coq\ file containing the grammar and pretty-printing rules for the new tactic, and the commands to generate an object module that can be then loaded dynamically during a \Coq\ session. In order to generate an ML module, the \Coq\ file must contain a \texttt{Declare ML module} command for all the \ocaml{} files concerning the implementation of the tactic --in our case there is only one file, the file \texttt{eqdecide.ml}: \begin{verbatim} Declare ML Module "eqdecide". \end{verbatim} The following grammar and pretty-printing rules are self-explanatory. We refer the reader to the Section \ref{Grammar} for the details: \begin{verbatim} Grammar tactic simple_tactic := EqDecideRuleG1 [ "Decide" "Equality" comarg($com1) comarg($com2)] -> [(DecideEquality $com1 $com2)] | EqDecideRuleG2 [ "Decide" "Equality" ] -> [(DecideEquality)] | CompareRule [ "Compare" comarg($com1) comarg($com2)] -> [(Compare $com1 $com2)]. Syntax tactic level 0: EqDecideRulePP1 [(DecideEquality)] -> ["Decide" "Equality"] | EqDecideRulePP2 [(DecideEquality $com1 $com2)] -> ["Decide" "Equality" $com1 $com2] | ComparePP [(Compare $com1 $com2)] -> ["Compare" $com1 $com2]. \end{verbatim} \paragraph{Important:} The names used to label the abstract syntax tree in the grammar rules ---in this case ``DecideEquality'' and ``Compare''--- must be the same as the name used to register the tactic in the tactics table. This is what makes the links between the input entered by the user and the tactic executed by the interpreter. \subsection{Loading the Tactic} Once the module \texttt{EqDecide.v} has been compiled, the tactic can be dynamically loaded using the \texttt{Require} command. \begin{coq_example} Require EqDecide. Goal (x,y:nat){x=y}+{~x=y}. Decide Equality. \end{coq_example} The implementation of the tactic can be accessed through the tactical \texttt{Info}: \begin{coq_example} Undo. Info Decide Equality. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Remark that the task performed by the tactic \texttt{solveRightBranch} is not displayed, since we have chosen to hide its implementation. \section[Testing and Debugging your Tactic]{Testing and Debugging your Tactic\label{test-and-debug}} When your tactic does not behave as expected, it is possible to trace it dynamically from \Coq. In order to do this, you have first to leave the toplevel of \Coq, and come back to the \ocaml{} interpreter. This can be done using the command \texttt{Drop} (see Section~\ref{Drop}). Once in the \ocaml{} toplevel, load the file \texttt{tactics/include.ml}. This file installs several pretty printers for proof trees, goals, terms, abstract syntax trees, names, etc. It also contains the function \texttt{go:unit -> unit} that enables to go back to \Coq's toplevel. The modules \texttt{Tacmach} and \texttt{Pfedit} contain some basic functions for extracting information from the state of the proof engine. Such functions can be used to debug your tactic if necessary. Let us mention here some of them: \begin{description} \fun{val get\_pftreestate : unit -> pftreestate} {Projects the current state of the proof engine.} \fun{val proof\_of\_pftreestate : pftreestate -> proof} {Projects the current state of the proof tree. A pretty-printer displays it in a readable form. } \fun{val top\_goal\_of\_pftreestate : pftreestate -> goal sigma} {Projects the goal and the existential variables mapping from the current state of the proof engine.} \fun{val nth\_goal\_of\_pftreestate : int -> pftreestate -> goal sigma} {Projects the goal and mapping corresponding to the $nth$ subgoal that remains to be proven} \fun{val traverse : int -> pftreestate -> pftreestate} {Yields the children of the node that the current state of the proof engine points to.} \fun{val solve\_nth\_pftreestate : \\ \qquad int -> tactic -> pftreestate -> pftreestate} {\\ Provides the new state of the proof engine obtained applying a given tactic to some unproven sub-goal.} \end{description} Finally, the traditional \ocaml{} debugging tools like the directives \texttt{trace} and \texttt{untrace} can be used to follow the execution of your functions. Frequently, a better solution is to use the \ocaml{} debugger, see Chapter \ref{Utilities}. \section[Concrete syntax for ML tactic and vernacular command]{Concrete syntax for ML tactic and vernacular command\label{Notations-for-ML-command}} \subsection{The general case} The standard way to bind an ML-written tactic or vernacular command to a concrete {\Coq} syntax is to use the \verb=TACTIC EXTEND= and \verb=VERNAC COMMAND EXTEND= macros. These macros can be used in any {\ocaml} file defining a (new) ML tactic or vernacular command. They are expanded into pure {\ocaml} code by the {\camlpppp} preprocessor of {\ocaml}. Concretely, files that use these macros need to be compiled by giving to {\tt ocamlc} the option \verb=-pp "camlp4o -I $(COQTOP)/parsing grammar.cma pa_extend.cmo"= \noindent which is the default for every file compiled by means of a Makefile generated by {\tt coq\_makefile} (see Chapter~\ref{Addoc-coqc}). So, just do \verb=make= in this latter case. The syntax of the macros is given on figure \ref{EXTEND-syntax}. They can be used at any place of an {\ocaml} files where an ML sentence (called \verb=str_item= in the {\tt ocamlc} parser) is expected. For each rule, the left-hand-side describes the grammar production and the right-hand-side its interpretation which must be an {\ocaml} expression. Each grammar production starts with the concrete name of the tactic or command in {\Coq} and is followed by arguments, possibly separated by terminal symbols or words. Here is an example: \begin{verbatim} TACTIC EXTEND Replace [ "replace" constr(c1) "with" constr(c2) ] -> [ replace c1 c2 ] END \end{verbatim} \newcommand{\grule}{\textrm{\textsl{rule}}} \newcommand{\stritem}{\textrm{\textsl{ocaml\_str\_item}}} \newcommand{\camlexpr}{\textrm{\textsl{ocaml\_expr}}} \newcommand{\arginfo}{\textrm{\textsl{argument\_infos}}} \newcommand{\lident}{\textrm{\textsl{lower\_ident}}} \newcommand{\argument}{\textrm{\textsl{argument}}} \newcommand{\entry}{\textrm{\textsl{entry}}} \newcommand{\argtype}{\textrm{\textsl{argtype}}} \begin{figure} \begin{tabular}{|lcll|} \hline {\stritem} & ::= & \multicolumn{2}{l|}{{\tt TACTIC EXTEND} {\ident} \nelist{\grule}{$|$} {\tt END}}\\ & $|$ & \multicolumn{2}{l|}{{\tt VERNAC COMMAND EXTEND} {\ident} \nelist{\grule}{$|$} {\tt END}}\\ &&\multicolumn{2}{l|}{}\\ {\grule} & ::= & \multicolumn{2}{l|}{{\tt [} {\str} \sequence{\argument}{} {\tt ] -> [} {\camlexpr} {\tt ]}}\\ &&\multicolumn{2}{l|}{}\\ {\argument} & ::= & {\str} &\mbox{(terminal)}\\ & $|$ & {\entry} {\tt (} {\lident} {\tt )} &\mbox{(non-terminal)}\\ &&\multicolumn{2}{l|}{}\\ {\entry} & ::= & {\tt string} & (a string)\\ & $|$ & {\tt preident} & (an identifier typed as a {\tt string})\\ & $|$ & {\tt ident} & (an identifier of type {\tt identifier})\\ & $|$ & {\tt global} & (a qualified identifier)\\ & $|$ & {\tt constr} & (a {\Coq} term)\\ & $|$ & {\tt openconstr} & (a {\Coq} term with holes)\\ & $|$ & {\tt sort} & (a {\Coq} sort)\\ & $|$ & {\tt tactic} & (an ${\cal L}_{tac}$ expression)\\ & $|$ & {\tt constr\_with\_bindings} & (a {\Coq} term with a list of bindings\footnote{as for the tactics {\tt apply} and {\tt elim}})\\ & $|$ & {\tt int\_or\_var} & (an integer or an identifier denoting an integer)\\ & $|$ & {\tt quantified\_hypothesis} & (a quantified hypothesis\footnote{as for the tactics {\tt intros until}})\\ & $|$ & {\tt {\entry}\_opt} & (an optional {\entry} )\\ & $|$ & {\tt ne\_{\entry}\_list} & (a non empty list of {\entry})\\ & $|$ & {\tt {\entry}\_list} & (a list of {\entry})\\ & $|$ & {\tt bool} & (a boolean: no grammar rule, just for typing)\\ & $|$ & {\lident} & (a user-defined entry)\\ \hline \end{tabular} \caption{Syntax of the macros binding {\ocaml} tactics or commands to a {\Coq} syntax} \label{EXTEND-syntax} \end{figure} There is a set of predefined non-terminal entries which are automatically translated into an {\ocaml} object of a given type. The type is not the same for tactics and for vernacular commands. It is given in the following table: \begin{small} \noindent \begin{tabular}{|l|l|l|} \hline {\entry} & {\it type for tactics} & {\it type for commands} \\ {\tt string} & {\tt string} & {\tt string}\\ {\tt preident} & {\tt string} & {\tt string}\\ {\tt ident} & {\tt identifier} & {\tt identifier}\\ {\tt global} & {\tt global\_reference} & {\tt qualid}\\ {\tt constr} & {\tt constr} & {\tt constr\_expr}\\ {\tt openconstr} & {\tt open\_constr} & {\tt constr\_expr}\\ {\tt sort} & {\tt sorts} & {\tt rawsort}\\ {\tt tactic} & {\tt glob\_tactic\_expr * tactic} & {\tt raw\_tactic\_expr}\\ {\tt constr\_with\_bindings} & {\tt constr with\_bindings} & {\tt constr\_expr with\_bindings}\\\\ {\tt int\_or\_var} & {\tt int or\_var} & {\tt int or\_var}\\ {\tt quantified\_hypothesis} & {\tt quantified\_hypothesis} & {\tt quantified\_hypothesis}\\ {\tt {\entry}\_opt} & {\it the type of entry} {\tt option} & {\it the type of entry} {\tt option}\\ {\tt ne\_{\entry}\_list} & {\it the type of entry} {\tt list} & {\it the type of entry} {\tt list}\\ {\tt {\entry}\_list} & {\it the type of entry} {\tt list} & {\it the type of entry} {\tt list}\\ {\tt bool} & {\tt bool} & {\tt bool}\\ {\lident} & {user-provided, cf next section} & {user-provided, cf next section}\\ \hline \end{tabular} \end{small} \bigskip Notice that {\entry} consists in a single identifier and that the {\tt \_opt}, {\tt \_list}, ... modifiers are part of the identifier. Here is now another example of a tactic which takes either a non empty list of identifiers and executes the {\ocaml} function {\tt subst} or takes no arguments and executes the{\ocaml} function {\tt subst\_all}. \begin{verbatim} TACTIC EXTEND Subst | [ "subst" ne_ident_list(l) ] -> [ subst l ] | [ "subst" ] -> [ subst_all ] END \end{verbatim} \subsection{Adding grammar entries for tactic or command arguments} In case parsing the arguments of the tactic or the vernacular command involves grammar entries other than the predefined entries listed above, you have to declare a new entry using the macros \verb=ARGUMENT EXTEND= or \verb=VERNAC ARGUMENT EXTEND=. The syntax is given on Figure~\ref{ARGUMENT-EXTEND-syntax}. Notice that arguments declared by \verb=ARGUMENT EXTEND= can be used for arguments of both tactics and vernacular commands while arguments declared by \verb=VERNAC ARGUMENT EXTEND= can only be used by vernacular commands. For \verb=VERNAC ARGUMENT EXTEND=, the identifier is the name of the entry and it must be a valid {\ocaml} identifier (especially it must be lowercase). The grammar rules works as before except that they do not have to start by a terminal symbol or word. As an example, here is how the {\Coq} {\tt Extraction Language {\it language}} parses its argument: \begin{verbatim} VERNAC ARGUMENT EXTEND language | [ "Ocaml" ] -> [ Ocaml ] | [ "Haskell" ] -> [ Haskell ] | [ "Scheme" ] -> [ Scheme ] END \end{verbatim} For tactic arguments, and especially for \verb=ARGUMENT EXTEND=, the procedure is more subtle because tactics are objects of the {\Coq} environment which can be printed and interpreted. Then the syntax requires extra information providing a printer and a type telling how the argument behaves. Here is an example of entry parsing a pair of optional {\Coq} terms. \begin{verbatim} let pp_minus_div_arg pr_constr pr_tactic (omin,odiv) = if omin=None && odiv=None then mt() else spc() ++ str "with" ++ pr_opt (fun c -> str "minus := " ++ pr_constr c) omin ++ pr_opt (fun c -> str "div := " ++ pr_constr c) odiv ARGUMENT EXTEND minus_div_arg TYPED AS constr_opt * constr_opt PRINTED BY pp_minus_div_arg | [ "with" minusarg(m) divarg_opt(d) ] -> [ Some m, d ] | [ "with" divarg(d) minusarg_opt(m) ] -> [ m, Some d ] | [ ] -> [ None, None ] END \end{verbatim} Notice that the type {\tt constr\_opt * constr\_opt} tells that the object behaves as a pair of optional {\Coq} terms, i.e. as an object of {\ocaml} type {\tt constr option * constr option} if in a \verb=TACTIC EXTEND= macro and of type {\tt constr\_expr option * constr\_expr option} if in a \verb=VERNAC COMMAND EXTEND= macro. As for the printer, it must be a function expecting a printer for terms, a printer for tactics and returning a printer for the created argument. Especially, each sub-{\term} and each sub-{\tac} in the argument must be typed by the corresponding printers. Otherwise, the {\ocaml} code will not be well-typed. \Rem The entry {\tt bool} is bound to no syntax but it can be used to give the type of an argument as in the following example: \begin{verbatim} let pr_orient _prc _prt = function | true -> mt () | false -> str " <-" ARGUMENT EXTEND orient TYPED AS bool PRINTED BY pr_orient | [ "->" ] -> [ true ] | [ "<-" ] -> [ false ] | [ ] -> [ true ] END \end{verbatim} \begin{figure} \begin{tabular}{|lcl|} \hline {\stritem} & ::= & {\tt ARGUMENT EXTEND} {\ident} {\arginfo} {\nelist{\grule}{$|$}} {\tt END}\\ & $|$ & {\tt VERNAC ARGUMENT EXTEND} {\ident} {\nelist{\grule}{$|$}} {\tt END}\\ \\ {\arginfo} & ::= & {\tt TYPED AS} {\argtype} \\ && {\tt PRINTED BY} {\lident} \\ %&& \zeroone{{\tt INTERPRETED BY} {\lident}}\\ %&& \zeroone{{\tt GLOBALIZED BY} {\lident}}\\ %&& \zeroone{{\tt SUBSTITUTED BY} {\lident}}\\ %&& \zeroone{{\tt RAW\_TYPED AS} {\lident} {\tt RAW\_PRINTED BY} {\lident}}\\ %&& \zeroone{{\tt GLOB\_TYPED AS} {\lident} {\tt GLOB\_PRINTED BY} {\lident}}\\ \\ {\argtype} & ::= & {\argtype} {\tt *} {\argtype} \\ & $|$ & {\entry} \\ \hline \end{tabular} \caption{Syntax of the macros binding {\ocaml} tactics or commands to a {\Coq} syntax} \label{ARGUMENT-EXTEND-syntax} \end{figure} %\end{document} coq-8.4pl2/doc/refman/Setoid.tex0000640000175000001440000010104211776416511015624 0ustar notinusers\newtheorem{cscexample}{Example} \achapter{\protect{User defined equalities and relations}} \aauthor{Matthieu Sozeau} \tacindex{setoid\_replace} \label{setoid_replace} This chapter presents the extension of several equality related tactics to work over user-defined structures (called setoids) that are equipped with ad-hoc equivalence relations meant to behave as equalities. Actually, the tactics have also been generalized to relations weaker then equivalences (e.g. rewriting systems). This documentation is adapted from the previous setoid documentation by Claudio Sacerdoti Coen (based on previous work by Cl\'ement Renard). The new implementation is a drop-in replacement for the old one \footnote{Nicolas Tabareau helped with the gluing}, hence most of the documentation still applies. The work is a complete rewrite of the previous implementation, based on the type class infrastructure. It also improves on and generalizes the previous implementation in several ways: \begin{itemize} \item User-extensible algorithm. The algorithm is separated in two parts: generations of the rewriting constraints (done in ML) and solving of these constraints using type class resolution. As type class resolution is extensible using tactics, this allows users to define general ways to solve morphism constraints. \item Sub-relations. An example extension to the base algorithm is the ability to define one relation as a subrelation of another so that morphism declarations on one relation can be used automatically for the other. This is done purely using tactics and type class search. \item Rewriting under binders. It is possible to rewrite under binders in the new implementation, if one provides the proper morphisms. Again, most of the work is handled in the tactics. \item First-class morphisms and signatures. Signatures and morphisms are ordinary Coq terms, hence they can be manipulated inside Coq, put inside structures and lemmas about them can be proved inside the system. Higher-order morphisms are also allowed. \item Performance. The implementation is based on a depth-first search for the first solution to a set of constraints which can be as fast as linear in the size of the term, and the size of the proof term is linear in the size of the original term. Besides, the extensibility allows the user to customize the proof-search if necessary. \end{itemize} \asection{Relations and morphisms} A parametric \emph{relation} \texttt{R} is any term of type \texttt{forall ($x_1$:$T_1$) \ldots ($x_n$:$T_n$), relation $A$}. The expression $A$, which depends on $x_1$ \ldots $x_n$, is called the \emph{carrier} of the relation and \texttt{R} is said to be a relation over \texttt{A}; the list $x_1,\ldots,x_n$ is the (possibly empty) list of parameters of the relation. \firstexample \begin{cscexample}[Parametric relation] It is possible to implement finite sets of elements of type \texttt{A} as unordered list of elements of type \texttt{A}. The function \texttt{set\_eq: forall (A: Type), relation (list A)} satisfied by two lists with the same elements is a parametric relation over \texttt{(list A)} with one parameter \texttt{A}. The type of \texttt{set\_eq} is convertible with \texttt{forall (A: Type), list A -> list A -> Prop}. \end{cscexample} An \emph{instance} of a parametric relation \texttt{R} with $n$ parameters is any term \texttt{(R $t_1$ \ldots $t_n$)}. Let \texttt{R} be a relation over \texttt{A} with $n$ parameters. A term is a parametric proof of reflexivity for \texttt{R} if it has type \texttt{forall ($x_1$:$T_1$) \ldots ($x_n$:$T_n$), reflexive (R $x_1$ \ldots $x_n$)}. Similar definitions are given for parametric proofs of symmetry and transitivity. \begin{cscexample}[Parametric relation (cont.)] The \texttt{set\_eq} relation of the previous example can be proved to be reflexive, symmetric and transitive. \end{cscexample} A parametric unary function $f$ of type \texttt{forall ($x_1$:$T_1$) \ldots ($x_n$:$T_n$), $A_1$ -> $A_2$} covariantly respects two parametric relation instances $R_1$ and $R_2$ if, whenever $x, y$ satisfy $R_1~x~y$, their images $(f~x)$ and $(f~y)$ satisfy $R_2~(f~x)~(f~y)$ . An $f$ that respects its input and output relations will be called a unary covariant \emph{morphism}. We can also say that $f$ is a monotone function with respect to $R_1$ and $R_2$. The sequence $x_1,\ldots x_n$ represents the parameters of the morphism. Let $R_1$ and $R_2$ be two parametric relations. The \emph{signature} of a parametric morphism of type \texttt{forall ($x_1$:$T_1$) \ldots ($x_n$:$T_n$), $A_1$ -> $A_2$} that covariantly respects two instances $I_{R_1}$ and $I_{R_2}$ of $R_1$ and $R_2$ is written $I_{R_1} \texttt{++>} I_{R_2}$. Notice that the special arrow \texttt{++>}, which reminds the reader of covariance, is placed between the two relation instances, not between the two carriers. The signature relation instances and morphism will be typed in a context introducing variables for the parameters. The previous definitions are extended straightforwardly to $n$-ary morphisms, that are required to be simultaneously monotone on every argument. Morphisms can also be contravariant in one or more of their arguments. A morphism is contravariant on an argument associated to the relation instance $R$ if it is covariant on the same argument when the inverse relation $R^{-1}$ (\texttt{inverse R} in Coq) is considered. The special arrow \texttt{-{}->} is used in signatures for contravariant morphisms. Functions having arguments related by symmetric relations instances are both covariant and contravariant in those arguments. The special arrow \texttt{==>} is used in signatures for morphisms that are both covariant and contravariant. An instance of a parametric morphism $f$ with $n$ parameters is any term \texttt{f $t_1$ \ldots $t_n$}. \begin{cscexample}[Morphisms] Continuing the previous example, let \texttt{union: forall (A: Type), list A -> list A -> list A} perform the union of two sets by appending one list to the other. \texttt{union} is a binary morphism parametric over \texttt{A} that respects the relation instance \texttt{(set\_eq A)}. The latter condition is proved by showing \texttt{forall (A: Type) (S1 S1' S2 S2': list A), set\_eq A S1 S1' -> set\_eq A S2 S2' -> set\_eq A (union A S1 S2) (union A S1' S2')}. The signature of the function \texttt{union A} is \texttt{set\_eq A ==> set\_eq A ==> set\_eq A} for all \texttt{A}. \end{cscexample} \begin{cscexample}[Contravariant morphism] The division function \texttt{Rdiv: R -> R -> R} is a morphism of signature \texttt{le ++> le -{}-> le} where \texttt{le} is the usual order relation over real numbers. Notice that division is covariant in its first argument and contravariant in its second argument. \end{cscexample} Leibniz equality is a relation and every function is a morphism that respects Leibniz equality. Unfortunately, Leibniz equality is not always the intended equality for a given structure. In the next section we will describe the commands to register terms as parametric relations and morphisms. Several tactics that deal with equality in \Coq\ can also work with the registered relations. The exact list of tactic will be given in Sect.~\ref{setoidtactics}. For instance, the tactic \texttt{reflexivity} can be used to close a goal $R~n~n$ whenever $R$ is an instance of a registered reflexive relation. However, the tactics that replace in a context $C[]$ one term with another one related by $R$ must verify that $C[]$ is a morphism that respects the intended relation. Currently the verification consists in checking whether $C[]$ is a syntactic composition of morphism instances that respects some obvious compatibility constraints. \begin{cscexample}[Rewriting] Continuing the previous examples, suppose that the user must prove \texttt{set\_eq int (union int (union int S1 S2) S2) (f S1 S2)} under the hypothesis \texttt{H: set\_eq int S2 (nil int)}. It is possible to use the \texttt{rewrite} tactic to replace the first two occurrences of \texttt{S2} with \texttt{nil int} in the goal since the context \texttt{set\_eq int (union int (union int S1 nil) nil) (f S1 S2)}, being a composition of morphisms instances, is a morphism. However the tactic will fail replacing the third occurrence of \texttt{S2} unless \texttt{f} has also been declared as a morphism. \end{cscexample} \asection{Adding new relations and morphisms} A parametric relation \textit{Aeq}\texttt{: forall ($y_1 : \beta_!$ \ldots $y_m : \beta_m$), relation (A $t_1$ \ldots $t_n$)} over \textit{(A : $\alpha_i$ -> \ldots $\alpha_n$ -> }\texttt{Type}) can be declared with the following command: \comindex{Add Parametric Relation} \begin{quote} \texttt{Add Parametric Relation} ($x_1 : T_1$) \ldots ($x_n : T_k$) : \textit{(A $t_1$ \ldots $t_n$) (Aeq $t'_1$ \ldots $t'_m$)}\\ ~\zeroone{\texttt{reflexivity proved by} \textit{refl}}\\ ~\zeroone{\texttt{symmetry proved by} \textit{sym}}\\ ~\zeroone{\texttt{transitivity proved by} \textit{trans}}\\ \texttt{~as} \textit{id}. \end{quote} after having required the \texttt{Setoid} module with the \texttt{Require Setoid} command. The identifier \textit{id} gives a unique name to the morphism and it is used by the command to generate fresh names for automatically provided lemmas used internally. Notice that the carrier and relation parameters may refer to the context of variables introduced at the beginning of the declaration, but the instances need not be made only of variables. Also notice that \textit{A} is \emph{not} required to be a term having the same parameters as \textit{Aeq}, although that is often the case in practice (this departs from the previous implementation). \comindex{Add Relation} In case the carrier and relations are not parametric, one can use the command \texttt{Add Relation} instead, whose syntax is the same except there is no local context. The proofs of reflexivity, symmetry and transitivity can be omitted if the relation is not an equivalence relation. The proofs must be instances of the corresponding relation definitions: e.g. the proof of reflexivity must have a type convertible to \texttt{reflexive (A $t_1$ \ldots $t_n$) (Aeq $t'_1$ \ldots $t'_n$)}. Each proof may refer to the introduced variables as well. \begin{cscexample}[Parametric relation] For Leibniz equality, we may declare: \texttt{Add Parametric Relation (A : Type) :} \texttt{A (@eq A)}\\ ~\zeroone{\texttt{reflexivity proved by} \texttt{@refl\_equal A}}\\ \ldots \end{cscexample} Some tactics (\texttt{reflexivity}, \texttt{symmetry}, \texttt{transitivity}) work only on relations that respect the expected properties. The remaining tactics (\texttt{replace}, \texttt{rewrite} and derived tactics such as \texttt{autorewrite}) do not require any properties over the relation. However, they are able to replace terms with related ones only in contexts that are syntactic compositions of parametric morphism instances declared with the following command. \comindex{Add Parametric Morphism} \begin{quote} \texttt{Add Parametric Morphism} ($x_1 : \T_!$) \ldots ($x_k : \T_k$)\\ (\textit{f $t_1$ \ldots $t_n$})\\ \texttt{~with signature} \textit{sig}\\ \texttt{~as id}.\\ \texttt{Proof}\\ ~\ldots\\ \texttt{Qed} \end{quote} The command declares \textit{f} as a parametric morphism of signature \textit{sig}. The identifier \textit{id} gives a unique name to the morphism and it is used as the base name of the type class instance definition and as the name of the lemma that proves the well-definedness of the morphism. The parameters of the morphism as well as the signature may refer to the context of variables. The command asks the user to prove interactively that \textit{f} respects the relations identified from the signature. \begin{cscexample} We start the example by assuming a small theory over homogeneous sets and we declare set equality as a parametric equivalence relation and union of two sets as a parametric morphism. \begin{coq_example*} Require Export Setoid. Require Export Relation_Definitions. Set Implicit Arguments. Parameter set: Type -> Type. Parameter empty: forall A, set A. Parameter eq_set: forall A, set A -> set A -> Prop. Parameter union: forall A, set A -> set A -> set A. Axiom eq_set_refl: forall A, reflexive _ (eq_set (A:=A)). Axiom eq_set_sym: forall A, symmetric _ (eq_set (A:=A)). Axiom eq_set_trans: forall A, transitive _ (eq_set (A:=A)). Axiom empty_neutral: forall A (S: set A), eq_set (union S (empty A)) S. Axiom union_compat: forall (A : Type), forall x x' : set A, eq_set x x' -> forall y y' : set A, eq_set y y' -> eq_set (union x y) (union x' y'). Add Parametric Relation A : (set A) (@eq_set A) reflexivity proved by (eq_set_refl (A:=A)) symmetry proved by (eq_set_sym (A:=A)) transitivity proved by (eq_set_trans (A:=A)) as eq_set_rel. Add Parametric Morphism A : (@union A) with signature (@eq_set A) ==> (@eq_set A) ==> (@eq_set A) as union_mor. Proof. exact (@union_compat A). Qed. \end{coq_example*} \end{cscexample} Is is possible to reduce the burden of specifying parameters using (maximally inserted) implicit arguments. If \texttt{A} is always set as maximally implicit in the previous example, one can write: \begin{coq_eval} Reset Initial. Require Export Setoid. Require Export Relation_Definitions. Parameter set: Type -> Type. Parameter empty: forall {A}, set A. Parameter eq_set: forall {A}, set A -> set A -> Prop. Parameter union: forall {A}, set A -> set A -> set A. Axiom eq_set_refl: forall {A}, reflexive (set A) eq_set. Axiom eq_set_sym: forall {A}, symmetric (set A) eq_set. Axiom eq_set_trans: forall {A}, transitive (set A) eq_set. Axiom empty_neutral: forall A (S: set A), eq_set (union S empty) S. Axiom union_compat: forall (A : Type), forall x x' : set A, eq_set x x' -> forall y y' : set A, eq_set y y' -> eq_set (union x y) (union x' y'). \end{coq_eval} \begin{coq_example*} Add Parametric Relation A : (set A) eq_set reflexivity proved by eq_set_refl symmetry proved by eq_set_sym transitivity proved by eq_set_trans as eq_set_rel. Add Parametric Morphism A : (@union A) with signature eq_set ==> eq_set ==> eq_set as union_mor. Proof. exact (@union_compat A). Qed. \end{coq_example*} We proceed now by proving a simple lemma performing a rewrite step and then applying reflexivity, as we would do working with Leibniz equality. Both tactic applications are accepted since the required properties over \texttt{eq\_set} and \texttt{union} can be established from the two declarations above. \begin{coq_example*} Goal forall (S: set nat), eq_set (union (union S empty) S) (union S S). Proof. intros. rewrite empty_neutral. reflexivity. Qed. \end{coq_example*} The tables of relations and morphisms are managed by the type class instance mechanism. The behavior on section close is to generalize the instances by the variables of the section (and possibly hypotheses used in the proofs of instance declarations) but not to export them in the rest of the development for proof search. One can use the \texttt{Existing Instance} command to do so outside the section, using the name of the declared morphism suffixed by \texttt{\_Morphism}, or use the \texttt{Global} modifier for the corresponding class instance declaration (see \S\ref{setoid:first-class}) at definition time. When loading a compiled file or importing a module, all the declarations of this module will be loaded. \asection{Rewriting and non reflexive relations} To replace only one argument of an n-ary morphism it is necessary to prove that all the other arguments are related to themselves by the respective relation instances. \begin{cscexample} To replace \texttt{(union S empty)} with \texttt{S} in \texttt{(union (union S empty) S) (union S S)} the rewrite tactic must exploit the monotony of \texttt{union} (axiom \texttt{union\_compat} in the previous example). Applying \texttt{union\_compat} by hand we are left with the goal \texttt{eq\_set (union S S) (union S S)}. \end{cscexample} When the relations associated to some arguments are not reflexive, the tactic cannot automatically prove the reflexivity goals, that are left to the user. Setoids whose relation are partial equivalence relations (PER) are useful to deal with partial functions. Let \texttt{R} be a PER. We say that an element \texttt{x} is defined if \texttt{R x x}. A partial function whose domain comprises all the defined elements only is declared as a morphism that respects \texttt{R}. Every time a rewriting step is performed the user must prove that the argument of the morphism is defined. \begin{cscexample} Let \texttt{eqO} be \texttt{fun x y => x = y $\land$ ~x$\neq$ 0} (the smaller PER over non zero elements). Division can be declared as a morphism of signature \texttt{eq ==> eq0 ==> eq}. Replace \texttt{x} with \texttt{y} in \texttt{div x n = div y n} opens the additional goal \texttt{eq0 n n} that is equivalent to \texttt{n=n $\land$ n$\neq$0}. \end{cscexample} \asection{Rewriting and non symmetric relations} When the user works up to relations that are not symmetric, it is no longer the case that any covariant morphism argument is also contravariant. As a result it is no longer possible to replace a term with a related one in every context, since the obtained goal implies the previous one if and only if the replacement has been performed in a contravariant position. In a similar way, replacement in an hypothesis can be performed only if the replaced term occurs in a covariant position. \begin{cscexample}[Covariance and contravariance] Suppose that division over real numbers has been defined as a morphism of signature \texttt{Z.div: Z.lt ++> Z.lt -{}-> Z.lt} (i.e. \texttt{Z.div} is increasing in its first argument, but decreasing on the second one). Let \texttt{<} denotes \texttt{Z.lt}. Under the hypothesis \texttt{H: x < y} we have \texttt{k < x / y -> k < x / x}, but not \texttt{k < y / x -> k < x / x}. Dually, under the same hypothesis \texttt{k < x / y -> k < y / y} holds, but \texttt{k < y / x -> k < y / y} does not. Thus, if the current goal is \texttt{k < x / x}, it is possible to replace only the second occurrence of \texttt{x} (in contravariant position) with \texttt{y} since the obtained goal must imply the current one. On the contrary, if \texttt{k < x / x} is an hypothesis, it is possible to replace only the first occurrence of \texttt{x} (in covariant position) with \texttt{y} since the current hypothesis must imply the obtained one. \end{cscexample} Contrary to the previous implementation, no specific error message will be raised when trying to replace a term that occurs in the wrong position. It will only fail because the rewriting constraints are not satisfiable. However it is possible to use the \texttt{at} modifier to specify which occurrences should be rewritten. As expected, composing morphisms together propagates the variance annotations by switching the variance every time a contravariant position is traversed. \begin{cscexample} Let us continue the previous example and let us consider the goal \texttt{x / (x / x) < k}. The first and third occurrences of \texttt{x} are in a contravariant position, while the second one is in covariant position. More in detail, the second occurrence of \texttt{x} occurs covariantly in \texttt{(x / x)} (since division is covariant in its first argument), and thus contravariantly in \texttt{x / (x / x)} (since division is contravariant in its second argument), and finally covariantly in \texttt{x / (x / x) < k} (since \texttt{<}, as every transitive relation, is contravariant in its first argument with respect to the relation itself). \end{cscexample} \asection{Rewriting in ambiguous setoid contexts} One function can respect several different relations and thus it can be declared as a morphism having multiple signatures. \begin{cscexample} Union over homogeneous lists can be given all the following signatures: \texttt{eq ==> eq ==> eq} (\texttt{eq} being the equality over ordered lists) \texttt{set\_eq ==> set\_eq ==> set\_eq} (\texttt{set\_eq} being the equality over unordered lists up to duplicates), \texttt{multiset\_eq ==> multiset\_eq ==> multiset\_eq} (\texttt{multiset\_eq} being the equality over unordered lists). \end{cscexample} To declare multiple signatures for a morphism, repeat the \texttt{Add Morphism} command. When morphisms have multiple signatures it can be the case that a rewrite request is ambiguous, since it is unclear what relations should be used to perform the rewriting. Contrary to the previous implementation, the tactic will always choose the first possible solution to the set of constraints generated by a rewrite and will not try to find \emph{all} possible solutions to warn the user about. \asection{First class setoids and morphisms} \label{setoid:first-class} The implementation is based on a first-class representation of properties of relations and morphisms as type classes. That is, the various combinations of properties on relations and morphisms are represented as records and instances of theses classes are put in a hint database. For example, the declaration: \begin{quote} \texttt{Add Parametric Relation} ($x_1 : T_1$) \ldots ($x_n : T_k$) : \textit{(A $t_1$ \ldots $t_n$) (Aeq $t'_1$ \ldots $t'_m$)}\\ ~\zeroone{\texttt{reflexivity proved by} \textit{refl}}\\ ~\zeroone{\texttt{symmetry proved by} \textit{sym}}\\ ~\zeroone{\texttt{transitivity proved by} \textit{trans}}\\ \texttt{~as} \textit{id}. \end{quote} is equivalent to an instance declaration: \begin{quote} \texttt{Instance} ($x_1 : T_1$) \ldots ($x_n : T_k$) \texttt{=>} \textit{id} : \texttt{@Equivalence} \textit{(A $t_1$ \ldots $t_n$) (Aeq $t'_1$ \ldots $t'_m$)} :=\\ ~\zeroone{\texttt{Equivalence\_Reflexive :=} \textit{refl}}\\ ~\zeroone{\texttt{Equivalence\_Symmetric :=} \textit{sym}}\\ ~\zeroone{\texttt{Equivalence\_Transitive :=} \textit{trans}}. \end{quote} The declaration itself amounts to the definition of an object of the record type \texttt{Coq.Classes.RelationClasses.Equivalence} and a hint added to the \texttt{typeclass\_instances} hint database. Morphism declarations are also instances of a type class defined in \texttt{Classes.Morphisms}. See the documentation on type classes \ref{typeclasses} and the theories files in \texttt{Classes} for further explanations. One can inform the rewrite tactic about morphisms and relations just by using the typeclass mechanism to declare them using \texttt{Instance} and \texttt{Context} vernacular commands. Any object of type \texttt{Proper} (the type of morphism declarations) in the local context will also be automatically used by the rewriting tactic to solve constraints. Other representations of first class setoids and morphisms can also be handled by encoding them as records. In the following example, the projections of the setoid relation and of the morphism function can be registered as parametric relations and morphisms. \begin{cscexample}[First class setoids] \begin{coq_example*} Require Import Relation_Definitions Setoid. Record Setoid: Type := { car:Type; eq:car->car->Prop; refl: reflexive _ eq; sym: symmetric _ eq; trans: transitive _ eq }. Add Parametric Relation (s : Setoid) : (@car s) (@eq s) reflexivity proved by (refl s) symmetry proved by (sym s) transitivity proved by (trans s) as eq_rel. Record Morphism (S1 S2:Setoid): Type := { f:car S1 ->car S2; compat: forall (x1 x2: car S1), eq S1 x1 x2 -> eq S2 (f x1) (f x2) }. Add Parametric Morphism (S1 S2 : Setoid) (M : Morphism S1 S2) : (@f S1 S2 M) with signature (@eq S1 ==> @eq S2) as apply_mor. Proof. apply (compat S1 S2 M). Qed. Lemma test: forall (S1 S2:Setoid) (m: Morphism S1 S2) (x y: car S1), eq S1 x y -> eq S2 (f _ _ m x) (f _ _ m y). Proof. intros. rewrite H. reflexivity. Qed. \end{coq_example*} \end{cscexample} \asection{Tactics enabled on user provided relations} \label{setoidtactics} The following tactics, all prefixed by \texttt{setoid\_}, deal with arbitrary registered relations and morphisms. Moreover, all the corresponding unprefixed tactics (i.e. \texttt{reflexivity, symmetry, transitivity, replace, rewrite}) have been extended to fall back to their prefixed counterparts when the relation involved is not Leibniz equality. Notice, however, that using the prefixed tactics it is possible to pass additional arguments such as \texttt{using relation}. \medskip \comindex{setoid\_reflexivity} \texttt{setoid\_reflexivity} \comindex{setoid\_symmetry} \texttt{setoid\_symmetry} \zeroone{\texttt{in} \textit{ident}} \comindex{setoid\_transitivity} \texttt{setoid\_transitivity} \comindex{setoid\_rewrite} \texttt{setoid\_rewrite} \zeroone{\textit{orientation}} \textit{term} ~\zeroone{\texttt{at} \textit{occs}} ~\zeroone{\texttt{in} \textit{ident}} \comindex{setoid\_replace} \texttt{setoid\_replace} \textit{term} \texttt{with} \textit{term} ~\zeroone{\texttt{in} \textit{ident}} ~\zeroone{\texttt{using relation} \textit{term}} ~\zeroone{\texttt{by} \textit{tactic}} \medskip The \texttt{using relation} arguments cannot be passed to the unprefixed form. The latter argument tells the tactic what parametric relation should be used to replace the first tactic argument with the second one. If omitted, it defaults to the \texttt{DefaultRelation} instance on the type of the objects. By default, it means the most recent \texttt{Equivalence} instance in the environment, but it can be customized by declaring new \texttt{DefaultRelation} instances. As Leibniz equality is a declared equivalence, it will fall back to it if no other relation is declared on a given type. Every derived tactic that is based on the unprefixed forms of the tactics considered above will also work up to user defined relations. For instance, it is possible to register hints for \texttt{autorewrite} that are not proof of Leibniz equalities. In particular it is possible to exploit \texttt{autorewrite} to simulate normalization in a term rewriting system up to user defined equalities. \asection{Printing relations and morphisms} The \texttt{Print Instances} command can be used to show the list of currently registered \texttt{Reflexive} (using \texttt{Print Instances Reflexive}), \texttt{Symmetric} or \texttt{Transitive} relations, \texttt{Equivalence}s, \texttt{PreOrder}s, \texttt{PER}s, and Morphisms (implemented as \texttt{Proper} instances). When the rewriting tactics refuse to replace a term in a context because the latter is not a composition of morphisms, the \texttt{Print Instances} commands can be useful to understand what additional morphisms should be registered. \asection{Deprecated syntax and backward incompatibilities} Due to backward compatibility reasons, the following syntax for the declaration of setoids and morphisms is also accepted. \comindex{Add Setoid} \begin{quote} \texttt{Add Setoid} \textit{A Aeq ST} \texttt{as} \textit{ident} \end{quote} where \textit{Aeq} is a congruence relation without parameters, \textit{A} is its carrier and \textit{ST} is an object of type \texttt{(Setoid\_Theory A Aeq)} (i.e. a record packing together the reflexivity, symmetry and transitivity lemmas). Notice that the syntax is not completely backward compatible since the identifier was not required. \comindex{Add Morphism} \begin{quote} \texttt{Add Morphism} \textit{f}:\textit{ident}.\\ Proof.\\ \ldots\\ Qed. \end{quote} The latter command also is restricted to the declaration of morphisms without parameters. It is not fully backward compatible since the property the user is asked to prove is slightly different: for $n$-ary morphisms the hypotheses of the property are permuted; moreover, when the morphism returns a proposition, the property is now stated using a bi-implication in place of a simple implication. In practice, porting an old development to the new semantics is usually quite simple. Notice that several limitations of the old implementation have been lifted. In particular, it is now possible to declare several relations with the same carrier and several signatures for the same morphism. Moreover, it is now also possible to declare several morphisms having the same signature. Finally, the replace and rewrite tactics can be used to replace terms in contexts that were refused by the old implementation. As discussed in the next section, the semantics of the new \texttt{setoid\_rewrite} command differs slightly from the old one and \texttt{rewrite}. \asection{Rewriting under binders} \textbf{Warning}: Due to compatibility issues, this feature is enabled only when calling the \texttt{setoid\_rewrite} tactics directly and not \texttt{rewrite}. To be able to rewrite under binding constructs, one must declare morphisms with respect to pointwise (setoid) equivalence of functions. Example of such morphisms are the standard \texttt{all} and \texttt{ex} combinators for universal and existential quantification respectively. They are declared as morphisms in the \texttt{Classes.Morphisms\_Prop} module. For example, to declare that universal quantification is a morphism for logical equivalence: \begin{coq_eval} Reset Initial. Require Import Setoid Morphisms. \end{coq_eval} \begin{coq_example} Instance all_iff_morphism (A : Type) : Proper (pointwise_relation A iff ==> iff) (@all A). Proof. simpl_relation. \end{coq_example} \begin{coq_eval} Admitted. \end{coq_eval} One then has to show that if two predicates are equivalent at every point, their universal quantifications are equivalent. Once we have declared such a morphism, it will be used by the setoid rewriting tactic each time we try to rewrite under an \texttt{all} application (products in \Prop{} are implicitly translated to such applications). Indeed, when rewriting under a lambda, binding variable $x$, say from $P~x$ to $Q~x$ using the relation \texttt{iff}, the tactic will generate a proof of \texttt{pointwise\_relation A iff (fun x => P x) (fun x => Q x)} from the proof of \texttt{iff (P x) (Q x)} and a constraint of the form \texttt{Proper (pointwise\_relation A iff ==> ?) m} will be generated for the surrounding morphism \texttt{m}. Hence, one can add higher-order combinators as morphisms by providing signatures using pointwise extension for the relations on the functional arguments (or whatever subrelation of the pointwise extension). For example, one could declare the \texttt{map} combinator on lists as a morphism: \begin{coq_eval} Require Import List. Set Implicit Arguments. Inductive list_equiv {A:Type} (eqA : relation A) : relation (list A) := | eq_nil : list_equiv eqA nil nil | eq_cons : forall x y, eqA x y -> forall l l', list_equiv eqA l l' -> list_equiv eqA (x :: l) (y :: l'). \end{coq_eval} \begin{coq_example*} Instance map_morphism `{Equivalence A eqA, Equivalence B eqB} : Proper ((eqA ==> eqB) ==> list_equiv eqA ==> list_equiv eqB) (@map A B). \end{coq_example*} where \texttt{list\_equiv} implements an equivalence on lists parameterized by an equivalence on the elements. Note that when one does rewriting with a lemma under a binder using \texttt{setoid\_rewrite}, the application of the lemma may capture the bound variable, as the semantics are different from rewrite where the lemma is first matched on the whole term. With the new \texttt{setoid\_rewrite}, matching is done on each subterm separately and in its local environment, and all matches are rewritten \emph{simultaneously} by default. The semantics of the previous \texttt{setoid\_rewrite} implementation can almost be recovered using the \texttt{at 1} modifier. \asection{Sub-relations} Sub-relations can be used to specify that one relation is included in another, so that morphisms signatures for one can be used for the other. If a signature mentions a relation $R$ on the left of an arrow \texttt{==>}, then the signature also applies for any relation $S$ that is smaller than $R$, and the inverse applies on the right of an arrow. One can then declare only a few morphisms instances that generate the complete set of signatures for a particular constant. By default, the only declared subrelation is \texttt{iff}, which is a subrelation of \texttt{impl} and \texttt{inverse impl} (the dual of implication). That's why we can declare only two morphisms for conjunction: \texttt{Proper (impl ==> impl ==> impl) and} and \texttt{Proper (iff ==> iff ==> iff) and}. This is sufficient to satisfy any rewriting constraints arising from a rewrite using \texttt{iff}, \texttt{impl} or \texttt{inverse impl} through \texttt{and}. Sub-relations are implemented in \texttt{Classes.Morphisms} and are a prime example of a mostly user-space extension of the algorithm. \asection{Constant unfolding} The resolution tactic is based on type classes and hence regards user-defined constants as transparent by default. This may slow down the resolution due to a lot of unifications (all the declared \texttt{Proper} instances are tried at each node of the search tree). To speed it up, declare your constant as rigid for proof search using the command \texttt{Typeclasses Opaque} (see \S \ref{TypeclassesTransparency}). %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Omega.tex0000640000175000001440000001525711776416511015441 0ustar notinusers\achapter{Omega: a solver of quantifier-free problems in Presburger Arithmetic} \aauthor{Pierre Crgut} \label{OmegaChapter} \asection{Description of {\tt omega}} \tacindex{omega} \label{description} {\tt omega} solves a goal in Presburger arithmetic, i.e. a universally quantified formula made of equations and inequations. Equations may be specified either on the type \verb=nat= of natural numbers or on the type \verb=Z= of binary-encoded integer numbers. Formulas on \verb=nat= are automatically injected into \verb=Z=. The procedure may use any hypothesis of the current proof session to solve the goal. Multiplication is handled by {\tt omega} but only goals where at least one of the two multiplicands of products is a constant are solvable. This is the restriction meant by ``Presburger arithmetic''. If the tactic cannot solve the goal, it fails with an error message. In any case, the computation eventually stops. \asubsection{Arithmetical goals recognized by {\tt omega}} {\tt omega} applied only to quantifier-free formulas built from the connectors \begin{quote} \verb=/\, \/, ~, ->= \end{quote} on atomic formulas. Atomic formulas are built from the predicates \begin{quote} \verb!=, le, lt, gt, ge! \end{quote} on \verb=nat= or from the predicates \begin{quote} \verb!=, <, <=, >, >=! \end{quote} on \verb=Z=. In expressions of type \verb=nat=, {\tt omega} recognizes \begin{quote} \verb!plus, minus, mult, pred, S, O! \end{quote} and in expressions of type \verb=Z=, {\tt omega} recognizes \begin{quote} \verb!+, -, *, Z.succ!, and constants. \end{quote} All expressions of type \verb=nat= or \verb=Z= not built on these operators are considered abstractly as if they were arbitrary variables of type \verb=nat= or \verb=Z=. \asubsection{Messages from {\tt omega}} \label{errors} When {\tt omega} does not solve the goal, one of the following errors is generated: \begin{ErrMsgs} \item \errindex{omega can't solve this system} This may happen if your goal is not quantifier-free (if it is universally quantified, try {\tt intros} first; if it contains existentials quantifiers too, {\tt omega} is not strong enough to solve your goal). This may happen also if your goal contains arithmetical operators unknown from {\tt omega}. Finally, your goal may be really wrong! \item \errindex{omega: Not a quantifier-free goal} If your goal is universally quantified, you should first apply {\tt intro} as many time as needed. \item \errindex{omega: Unrecognized predicate or connective: {\sl ident}} \item \errindex{omega: Unrecognized atomic proposition: {\sl prop}} \item \errindex{omega: Can't solve a goal with proposition variables} \item \errindex{omega: Unrecognized proposition} \item \errindex{omega: Can't solve a goal with non-linear products} \item \errindex{omega: Can't solve a goal with equality on {\sl type}} \end{ErrMsgs} %% Ce code est dbranch pour l'instant %% % \asubsection{Control over the output} % There are some flags that can be set to get more information on the procedure % \begin{itemize} % \item \verb=Time= to get the time used by the procedure % \item \verb=System= to visualize the normalized systems. % \item \verb=Action= to visualize the actions performed by the OMEGA % procedure (see \ref{technical}). % \end{itemize} % \comindex{Set omega Time} % \comindex{UnSet omega Time} % \comindex{Switch omega Time} % \comindex{Set omega System} % \comindex{UnSet omega System} % \comindex{Switch omega System} % \comindex{Set omega Action} % \comindex{UnSet omega Action} % \comindex{Switch omega Action} % Use {\tt Set omega {\rm\sl flag}} to set the flag % {\rm\sl flag}. Use {\tt Unset omega {\rm\sl flag}} to unset it and % {\tt Switch omega {\rm\sl flag}} to toggle it. \section{Using {\tt omega}} The {\tt omega} tactic does not belong to the core system. It should be loaded by \begin{coq_example*} Require Import Omega. Open Scope Z_scope. \end{coq_example*} \example{} \begin{coq_example} Goal forall m n:Z, 1 + 2 * m <> 2 * n. intros; omega. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \example{} \begin{coq_example} Goal forall z:Z, z > 0 -> 2 * z + 1 > z. intro; omega. \end{coq_example} % Other examples can be found in \verb+$COQLIB/theories/DEMOS/OMEGA+. \asection{Technical data} \label{technical} \asubsection{Overview of the tactic} \begin{itemize} \item The goal is negated twice and the first negation is introduced as an hypothesis. \item Hypothesis are decomposed in simple equations or inequations. Multiple goals may result from this phase. \item Equations and inequations over \verb=nat= are translated over \verb=Z=, multiple goals may result from the translation of substraction. \item Equations and inequations are normalized. \item Goals are solved by the {\it OMEGA} decision procedure. \item The script of the solution is replayed. \end{itemize} \asubsection{Overview of the {\it OMEGA} decision procedure} The {\it OMEGA} decision procedure involved in the {\tt omega} tactic uses a small subset of the decision procedure presented in \begin{quote} "The Omega Test: a fast and practical integer programming algorithm for dependence analysis", William Pugh, Communication of the ACM , 1992, p 102-114. \end{quote} Here is an overview, look at the original paper for more information. \begin{itemize} \item Equations and inequations are normalized by division by the GCD of their coefficients. \item Equations are eliminated, using the Banerjee test to get a coefficient equal to one. \item Note that each inequation defines a half space in the space of real value of the variables. \item Inequations are solved by projecting on the hyperspace defined by cancelling one of the variable. They are partitioned according to the sign of the coefficient of the eliminated variable. Pairs of inequations from different classes define a new edge in the projection. \item Redundant inequations are eliminated or merged in new equations that can be eliminated by the Banerjee test. \item The last two steps are iterated until a contradiction is reached (success) or there is no more variable to eliminate (failure). \end{itemize} It may happen that there is a real solution and no integer one. The last steps of the Omega procedure (dark shadow) are not implemented, so the decision procedure is only partial. \asection{Bugs} \begin{itemize} \item The simplification procedure is very dumb and this results in many redundant cases to explore. \item Much too slow. \item Certainly other bugs! You can report them to \begin{quote} \url{Pierre.Cregut@cnet.francetelecom.fr} \end{quote} \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-ltac.tex0000640000175000001440000013345311776416511016501 0ustar notinusers\chapter[The tactic language]{The tactic language\label{TacticLanguage}} %\geometry{a4paper,body={5in,8in}} This chapter gives a compact documentation of Ltac, the tactic language available in {\Coq}. We start by giving the syntax, and next, we present the informal semantics. If you want to know more regarding this language and especially about its foundations, you can refer to~\cite{Del00}. Chapter~\ref{Tactics-examples} is devoted to giving examples of use of this language on small but also with non-trivial problems. \section{Syntax} \def\tacexpr{\textrm{\textsl{expr}}} \def\tacexprlow{\textrm{\textsl{tacexpr$_1$}}} \def\tacexprinf{\textrm{\textsl{tacexpr$_2$}}} \def\tacexprpref{\textrm{\textsl{tacexpr$_3$}}} \def\atom{\textrm{\textsl{atom}}} %%\def\recclause{\textrm{\textsl{rec\_clause}}} \def\letclause{\textrm{\textsl{let\_clause}}} \def\matchrule{\textrm{\textsl{match\_rule}}} \def\contextrule{\textrm{\textsl{context\_rule}}} \def\contexthyp{\textrm{\textsl{context\_hyp}}} \def\tacarg{\nterm{tacarg}} \def\cpattern{\nterm{cpattern}} The syntax of the tactic language is given Figures~\ref{ltac} and~\ref{ltac_aux}. See Chapter~\ref{BNF-syntax} for a description of the BNF metasyntax used in these grammar rules. Various already defined entries will be used in this chapter: entries {\naturalnumber}, {\integer}, {\ident}, {\qualid}, {\term}, {\cpattern} and {\atomictac} represent respectively the natural and integer numbers, the authorized identificators and qualified names, {\Coq}'s terms and patterns and all the atomic tactics described in Chapter~\ref{Tactics}. The syntax of {\cpattern} is the same as that of terms, but it is extended with pattern matching metavariables. In {\cpattern}, a pattern-matching metavariable is represented with the syntax {\tt ?id} where {\tt id} is an {\ident}. The notation {\tt \_} can also be used to denote metavariable whose instance is irrelevant. In the notation {\tt ?id}, the identifier allows us to keep instantiations and to make constraints whereas {\tt \_} shows that we are not interested in what will be matched. On the right hand side of pattern-matching clauses, the named metavariable are used without the question mark prefix. There is also a special notation for second-order pattern-matching problems: in an applicative pattern of the form {\tt @?id id$_1$ \ldots id$_n$}, the variable {\tt id} matches any complex expression with (possible) dependencies in the variables {\tt id$_1$ \ldots id$_n$} and returns a functional term of the form {\tt fun id$_1$ \ldots id$_n$ => {\term}}. The main entry of the grammar is {\tacexpr}. This language is used in proof mode but it can also be used in toplevel definitions as shown in Figure~\ref{ltactop}. \begin{Remarks} \item The infix tacticals ``\dots\ {\tt ||} \dots'' and ``\dots\ {\tt ;} \dots'' are associative. \item In {\tacarg}, there is an overlap between {\qualid} as a direct tactic argument and {\qualid} as a particular case of {\term}. The resolution is done by first looking for a reference of the tactic language and if it fails, for a reference to a term. To force the resolution as a reference of the tactic language, use the form {\tt ltac :} {\qualid}. To force the resolution as a reference to a term, use the syntax {\tt ({\qualid})}. \item As shown by the figure, tactical {\tt ||} binds more than the prefix tacticals {\tt try}, {\tt repeat}, {\tt do}, {\tt info} and {\tt abstract} which themselves bind more than the postfix tactical ``{\tt \dots\ ;[ \dots\ ]}'' which binds more than ``\dots\ {\tt ;} \dots''. For instance \begin{quote} {\tt try repeat \tac$_1$ || \tac$_2$;\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$];\tac$_4$.} \end{quote} is understood as \begin{quote} {\tt (try (repeat (\tac$_1$ || \tac$_2$)));} \\ {\tt ((\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$]);\tac$_4$).} \end{quote} \end{Remarks} \begin{figure}[htbp] \begin{centerframe} \begin{tabular}{lcl} {\tacexpr} & ::= & {\tacexpr} {\tt ;} {\tacexpr}\\ & | & {\tacexpr} {\tt ; [} \nelist{\tacexpr}{|} {\tt ]}\\ & | & {\tacexprpref}\\ \\ {\tacexprpref} & ::= & {\tt do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} {\tacexprpref}\\ & | & {\tt info} {\tacexprpref}\\ & | & {\tt progress} {\tacexprpref}\\ & | & {\tt repeat} {\tacexprpref}\\ & | & {\tt try} {\tacexprpref}\\ & | & {\tt timeout} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} {\tacexprpref}\\ & | & {\tacexprinf} \\ \\ {\tacexprinf} & ::= & {\tacexprlow} {\tt ||} {\tacexprpref}\\ & | & {\tacexprlow}\\ \\ {\tacexprlow} & ::= & {\tt fun} \nelist{\name}{} {\tt =>} {\atom}\\ & | & {\tt let} \zeroone{\tt rec} \nelist{\letclause}{\tt with} {\tt in} {\atom}\\ & | & {\tt match goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ & | & {\tt match reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ & | & {\tt match} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\ & | & {\tt lazymatch goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ & | & {\tt lazymatch reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ & | & {\tt lazymatch} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\ & | & {\tt abstract} {\atom}\\ & | & {\tt abstract} {\atom} {\tt using} {\ident} \\ & | & {\tt first [} \nelist{\tacexpr}{\tt |} {\tt ]}\\ & | & {\tt solve [} \nelist{\tacexpr}{\tt |} {\tt ]}\\ & | & {\tt idtac} \sequence{\messagetoken}{}\\ & | & {\tt fail} \zeroone{\naturalnumber} \sequence{\messagetoken}{}\\ & | & {\tt fresh} ~|~ {\tt fresh} {\qstring}\\ & | & {\tt context} {\ident} {\tt [} {\term} {\tt ]}\\ & | & {\tt eval} {\nterm{redexpr}} {\tt in} {\term}\\ & | & {\tt type of} {\term}\\ & | & {\tt external} {\qstring} {\qstring} \nelist{\tacarg}{}\\ & | & {\tt constr :} {\term}\\ & | & \atomictac\\ & | & {\qualid} \nelist{\tacarg}{}\\ & | & {\atom}\\ \\ {\atom} & ::= & {\qualid} \\ & | & ()\\ & | & {\integer}\\ & | & {\tt (} {\tacexpr} {\tt )}\\ \\ {\messagetoken}\!\!\!\!\!\! & ::= & {\qstring} ~|~ {\ident} ~|~ {\integer} \\ \end{tabular} \end{centerframe} \caption{Syntax of the tactic language} \label{ltac} \end{figure} \begin{figure}[htbp] \begin{centerframe} \begin{tabular}{lcl} \tacarg & ::= & {\qualid}\\ & $|$ & {\tt ()} \\ & $|$ & {\tt ltac :} {\atom}\\ & $|$ & {\term}\\ \\ \letclause & ::= & {\ident} \sequence{\name}{} {\tt :=} {\tacexpr}\\ \\ \contextrule & ::= & \nelist{\contexthyp}{\tt ,} {\tt |-}{\cpattern} {\tt =>} {\tacexpr}\\ & $|$ & {\tt |-} {\cpattern} {\tt =>} {\tacexpr}\\ & $|$ & {\tt \_ =>} {\tacexpr}\\ \\ \contexthyp & ::= & {\name} {\tt :} {\cpattern}\\ & $|$ & {\name} {\tt :=} {\cpattern} \zeroone{{\tt :} {\cpattern}}\\ \\ \matchrule & ::= & {\cpattern} {\tt =>} {\tacexpr}\\ & $|$ & {\tt context} {\zeroone{\ident}} {\tt [} {\cpattern} {\tt ]} {\tt =>} {\tacexpr}\\ & $|$ & {\tt appcontext} {\zeroone{\ident}} {\tt [} {\cpattern} {\tt ]} {\tt =>} {\tacexpr}\\ & $|$ & {\tt \_ =>} {\tacexpr}\\ \end{tabular} \end{centerframe} \caption{Syntax of the tactic language (continued)} \label{ltac_aux} \end{figure} \begin{figure}[ht] \begin{centerframe} \begin{tabular}{lcl} \nterm{top} & ::= & \zeroone{\tt Local} {\tt Ltac} \nelist{\nterm{ltac\_def}} {\tt with} \\ \\ \nterm{ltac\_def} & ::= & {\ident} \sequence{\ident}{} {\tt :=} {\tacexpr}\\ & $|$ &{\qualid} \sequence{\ident}{} {\tt ::=}{\tacexpr} \end{tabular} \end{centerframe} \caption{Tactic toplevel definitions} \label{ltactop} \end{figure} %% %% Semantics %% \section{Semantics} %\index[tactic]{Tacticals} \index{Tacticals} %\label{Tacticals} Tactic expressions can only be applied in the context of a goal. The evaluation yields either a term, an integer or a tactic. Intermediary results can be terms or integers but the final result must be a tactic which is then applied to the current goal. There is a special case for {\tt match goal} expressions of which the clauses evaluate to tactics. Such expressions can only be used as end result of a tactic expression (never as argument of a non recursive local definition or of an application). The rest of this section explains the semantics of every construction of Ltac. %% \subsection{Values} %% Values are given by Figure~\ref{ltacval}. All these values are tactic values, %% i.e. to be applied to a goal, except {\tt Fun}, {\tt Rec} and $arg$ values. %% \begin{figure}[ht] %% \noindent{}\framebox[6in][l] %% {\parbox{6in} %% {\begin{center} %% \begin{tabular}{lp{0.1in}l} %% $vexpr$ & ::= & $vexpr$ {\tt ;} $vexpr$\\ %% & | & $vexpr$ {\tt ; [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt %% ]}\\ %% & | & $vatom$\\ %% \\ %% $vatom$ & ::= & {\tt Fun} \nelist{\inputfun}{} {\tt ->} {\tacexpr}\\ %% %& | & {\tt Rec} \recclause\\ %% & | & %% {\tt Rec} \nelist{\recclause}{\tt And} {\tt In} %% {\tacexpr}\\ %% & | & %% {\tt Match Context With} {\it (}$context\_rule$ {\tt |}{\it )}$^*$ %% $context\_rule$\\ %% & | & {\tt (} $vexpr$ {\tt )}\\ %% & | & $vatom$ {\tt Orelse} $vatom$\\ %% & | & {\tt Do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} $vatom$\\ %% & | & {\tt Repeat} $vatom$\\ %% & | & {\tt Try} $vatom$\\ %% & | & {\tt First [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\ %% & | & {\tt Solve [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\ %% & | & {\tt Idtac}\\ %% & | & {\tt Fail}\\ %% & | & {\primitivetactic}\\ %% & | & $arg$ %% \end{tabular} %% \end{center}}} %% \caption{Values of ${\cal L}_{tac}$} %% \label{ltacval} %% \end{figure} %% \subsection{Evaluation} \subsubsection[Sequence]{Sequence\tacindex{;} \index{Tacticals!;@{\tt {\tac$_1$};\tac$_2$}}} A sequence is an expression of the following form: \begin{quote} {\tacexpr}$_1$ {\tt ;} {\tacexpr}$_2$ \end{quote} The expressions {\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and $v_2$ which have to be tactic values. The tactic $v_1$ is then applied and $v_2$ is applied to every subgoal generated by the application of $v_1$. Sequence is left-associative. \subsubsection[General sequence]{General sequence\tacindex{;[\ldots$\mid$\ldots$\mid$\ldots]}} %\tacindex{; [ | ]} %\index{; [ | ]@{\tt ;[\ldots$\mid$\ldots$\mid$\ldots]}} \index{Tacticals!; [ \mid ]@{\tt {\tac$_0$};[{\tac$_1$}$\mid$\ldots$\mid$\tac$_n$]}} A general sequence has the following form: \begin{quote} {\tacexpr}$_0$ {\tt ; [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} \end{quote} The expressions {\tacexpr}$_i$ are evaluated to $v_i$, for $i=0,...,n$ and all have to be tactics. The tactic $v_0$ is applied and $v_i$ is applied to the $i$-th generated subgoal by the application of $v_0$, for $=1,...,n$. It fails if the application of $v_0$ does not generate exactly $n$ subgoals. \begin{Variants} \item If no tactic is given for the $i$-th generated subgoal, it behaves as if the tactic {\tt idtac} were given. For instance, {\tt split ; [ | auto ]} is a shortcut for {\tt split ; [ idtac | auto ]}. \item {\tacexpr}$_0$ {\tt ; [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_i$ {\tt |} {\tt ..} {\tt |} {\tacexpr}$_{i+1+j}$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} In this variant, {\tt idtac} is used for the subgoals numbered from $i+1$ to $i+j$ (assuming $n$ is the number of subgoals). Note that {\tt ..} is part of the syntax, while $...$ is the meta-symbol used to describe a list of {\tacexpr} of arbitrary length. \item {\tacexpr}$_0$ {\tt ; [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_i$ {\tt |} {\tacexpr} {\tt ..} {\tt |} {\tacexpr}$_{i+1+j}$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} In this variant, {\tacexpr} is used instead of {\tt idtac} for the subgoals numbered from $i+1$ to $i+j$. \end{Variants} \subsubsection[For loop]{For loop\tacindex{do} \index{Tacticals!do@{\tt do}}} There is a for loop that repeats a tactic {\num} times: \begin{quote} {\tt do} {\num} {\tacexpr} \end{quote} {\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is applied {\num} times. Supposing ${\num}>1$, after the first application of $v$, $v$ is applied, at least once, to the generated subgoals and so on. It fails if the application of $v$ fails before the {\num} applications have been completed. \subsubsection[Repeat loop]{Repeat loop\tacindex{repeat} \index{Tacticals!repeat@{\tt repeat}}} We have a repeat loop with: \begin{quote} {\tt repeat} {\tacexpr} \end{quote} {\tacexpr} is evaluated to $v$. If $v$ denotes a tactic, this tactic is applied to the goal. If the application fails, the tactic is applied recursively to all the generated subgoals until it eventually fails. The recursion stops in a subgoal when the tactic has failed. The tactic {\tt repeat {\tacexpr}} itself never fails. \subsubsection[Error catching]{Error catching\tacindex{try} \index{Tacticals!try@{\tt try}}} We can catch the tactic errors with: \begin{quote} {\tt try} {\tacexpr} \end{quote} {\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is applied. If the application of $v$ fails, it catches the error and leaves the goal unchanged. If the level of the exception is positive, then the exception is re-raised with its level decremented. \subsubsection[Detecting progress]{Detecting progress\tacindex{progress}} We can check if a tactic made progress with: \begin{quote} {\tt progress} {\tacexpr} \end{quote} {\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is applied. If the application of $v$ produced one subgoal equal to the initial goal (up to syntactical equality), then an error of level 0 is raised. \ErrMsg \errindex{Failed to progress} \subsubsection[Branching]{Branching\tacindex{$\mid\mid$} \index{Tacticals!orelse@{\tt $\mid\mid$}}} We can easily branch with the following structure: \begin{quote} {\tacexpr}$_1$ {\tt ||} {\tacexpr}$_2$ \end{quote} {\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and $v_2$. $v_1$ and $v_2$ must be tactic values. $v_1$ is applied and if it fails to progress then $v_2$ is applied. Branching is left-associative. \subsubsection[First tactic to work]{First tactic to work\tacindex{first} \index{Tacticals!first@{\tt first}}} We may consider the first tactic to work (i.e. which does not fail) among a panel of tactics: \begin{quote} {\tt first [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} \end{quote} {\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values, for $i=1,...,n$. Supposing $n>1$, it applies $v_1$, if it works, it stops else it tries to apply $v_2$ and so on. It fails when there is no applicable tactic. \ErrMsg \errindex{No applicable tactic} \subsubsection[Solving]{Solving\tacindex{solve} \index{Tacticals!solve@{\tt solve}}} We may consider the first to solve (i.e. which generates no subgoal) among a panel of tactics: \begin{quote} {\tt solve [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} \end{quote} {\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values, for $i=1,...,n$. Supposing $n>1$, it applies $v_1$, if it solves, it stops else it tries to apply $v_2$ and so on. It fails if there is no solving tactic. \ErrMsg \errindex{Cannot solve the goal} \subsubsection[Identity]{Identity\tacindex{idtac} \index{Tacticals!idtac@{\tt idtac}}} The constant {\tt idtac} is the identity tactic: it leaves any goal unchanged but it appears in the proof script. \variant {\tt idtac \nelist{\messagetoken}{}} This prints the given tokens. Strings and integers are printed literally. If a (term) variable is given, its contents are printed. \subsubsection[Failing]{Failing\tacindex{fail} \index{Tacticals!fail@{\tt fail}}} The tactic {\tt fail} is the always-failing tactic: it does not solve any goal. It is useful for defining other tacticals since it can be catched by {\tt try} or {\tt match goal}. \begin{Variants} \item {\tt fail $n$}\\ The number $n$ is the failure level. If no level is specified, it defaults to $0$. The level is used by {\tt try} and {\tt match goal}. If $0$, it makes {\tt match goal} considering the next clause (backtracking). If non zero, the current {\tt match goal} block or {\tt try} command is aborted and the level is decremented. \item {\tt fail \nelist{\messagetoken}{}}\\ The given tokens are used for printing the failure message. \item {\tt fail $n$ \nelist{\messagetoken}{}}\\ This is a combination of the previous variants. \end{Variants} \ErrMsg \errindex{Tactic Failure {\it message} (level $n$)}. \subsubsection[Timeout]{Timeout\tacindex{timeout} \index{Tacticals!timeout@{\tt timeout}}} We can force a tactic to stop if it has not finished after a certain amount of time: \begin{quote} {\tt timeout} {\num} {\tacexpr} \end{quote} {\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is normally applied, except that it is interrupted after ${\num}$ seconds if it is still running. In this case the outcome is a failure. Warning: For the moment, {\tt timeout} is based on elapsed time in seconds, which is very machine-dependent: a script that works on a quick machine may fail on a slow one. The converse is even possible if you combine a {\tt timeout} with some other tacticals. This tactical is hence proposed only for convenience during debug or other development phases, we strongly advise you to not leave any {\tt timeout} in final scripts. Note also that this tactical isn't available on the native Windows port of Coq. \subsubsection[Local definitions]{Local definitions\index{Ltac!let} \index{Ltac!let rec} \index{let!in Ltac} \index{let rec!in Ltac}} Local definitions can be done as follows: \begin{quote} {\tt let} {\ident}$_1$ {\tt :=} {\tacexpr}$_1$\\ {\tt with} {\ident}$_2$ {\tt :=} {\tacexpr}$_2$\\ ...\\ {\tt with} {\ident}$_n$ {\tt :=} {\tacexpr}$_n$ {\tt in}\\ {\tacexpr} \end{quote} each {\tacexpr}$_i$ is evaluated to $v_i$, then, {\tacexpr} is evaluated by substituting $v_i$ to each occurrence of {\ident}$_i$, for $i=1,...,n$. There is no dependencies between the {\tacexpr}$_i$ and the {\ident}$_i$. Local definitions can be recursive by using {\tt let rec} instead of {\tt let}. In this latter case, the definitions are evaluated lazily so that the {\tt rec} keyword can be used also in non recursive cases so as to avoid the eager evaluation of local definitions. \subsubsection{Application} An application is an expression of the following form: \begin{quote} {\qualid} {\tacarg}$_1$ ... {\tacarg}$_n$ \end{quote} The reference {\qualid} must be bound to some defined tactic definition expecting at least $n$ arguments. The expressions {\tacexpr}$_i$ are evaluated to $v_i$, for $i=1,...,n$. %If {\tacexpr} is a {\tt Fun} or {\tt Rec} value then the body is evaluated by %substituting $v_i$ to the formal parameters, for $i=1,...,n$. For recursive %clauses, the bodies are lazily substituted (when an identifier to be evaluated %is the name of a recursive clause). %\subsection{Application of tactic values} \subsubsection[Function construction]{Function construction\index{fun!in Ltac} \index{Ltac!fun}} A parameterized tactic can be built anonymously (without resorting to local definitions) with: \begin{quote} {\tt fun} {\ident${}_1$} ... {\ident${}_n$} {\tt =>} {\tacexpr} \end{quote} Indeed, local definitions of functions are a syntactic sugar for binding a {\tt fun} tactic to an identifier. \subsubsection[Pattern matching on terms]{Pattern matching on terms\index{Ltac!match} \index{match!in Ltac}} We can carry out pattern matching on terms with: \begin{quote} {\tt match} {\tacexpr} {\tt with}\\ ~~~{\cpattern}$_1$ {\tt =>} {\tacexpr}$_1$\\ ~{\tt |} {\cpattern}$_2$ {\tt =>} {\tacexpr}$_2$\\ ~...\\ ~{\tt |} {\cpattern}$_n$ {\tt =>} {\tacexpr}$_n$\\ ~{\tt |} {\tt \_} {\tt =>} {\tacexpr}$_{n+1}$\\ {\tt end} \end{quote} The expression {\tacexpr} is evaluated and should yield a term which is matched against {\cpattern}$_1$. The matching is non-linear: if a metavariable occurs more than once, it should match the same expression every time. It is first-order except on the variables of the form {\tt @?id} that occur in head position of an application. For these variables, the matching is second-order and returns a functional term. If the matching with {\cpattern}$_1$ succeeds, then {\tacexpr}$_1$ is evaluated into some value by substituting the pattern matching instantiations to the metavariables. If {\tacexpr}$_1$ evaluates to a tactic and the {\tt match} expression is in position to be applied to a goal (e.g. it is not bound to a variable by a {\tt let in}), then this tactic is applied. If the tactic succeeds, the list of resulting subgoals is the result of the {\tt match} expression. If {\tacexpr}$_1$ does not evaluate to a tactic or if the {\tt match} expression is not in position to be applied to a goal, then the result of the evaluation of {\tacexpr}$_1$ is the result of the {\tt match} expression. If the matching with {\cpattern}$_1$ fails, or if it succeeds but the evaluation of {\tacexpr}$_1$ fails, or if the evaluation of {\tacexpr}$_1$ succeeds but returns a tactic in execution position whose execution fails, then {\cpattern}$_2$ is used and so on. The pattern {\_} matches any term and shunts all remaining patterns if any. If all clauses fail (in particular, there is no pattern {\_}) then a no-matching-clause error is raised. \begin{ErrMsgs} \item \errindex{No matching clauses for match} No pattern can be used and, in particular, there is no {\tt \_} pattern. \item \errindex{Argument of match does not evaluate to a term} This happens when {\tacexpr} does not denote a term. \end{ErrMsgs} \begin{Variants} \item \index{lazymatch!in Ltac} \index{Ltac!lazymatch} Using {\tt lazymatch} instead of {\tt match} has an effect if the right-hand-side of a clause returns a tactic. With {\tt match}, the tactic is applied to the current goal (and the next clause is tried if it fails). With {\tt lazymatch}, the tactic is directly returned as the result of the whole {\tt lazymatch} block without being first tried to be applied to the goal. Typically, if the {\tt lazymatch} block is bound to some variable $x$ in a {\tt let in}, then tactic expression gets bound to the variable $x$. \item \index{context!in pattern} There is a special form of patterns to match a subterm against the pattern: \begin{quote} {\tt context} {\ident} {\tt [} {\cpattern} {\tt ]} \end{quote} It matches any term with a subterm matching {\cpattern}. If there is a match, the optional {\ident} is assigned the ``matched context'', i.e. the initial term where the matched subterm is replaced by a hole. The example below will show how to use such term contexts. If the evaluation of the right-hand-side of a valid match fails, the next matching subterm is tried. If no further subterm matches, the next clause is tried. Matching subterms are considered top-bottom and from left to right (with respect to the raw printing obtained by setting option {\tt Printing All}, see Section~\ref{SetPrintingAll}). \begin{coq_example} Ltac f x := match x with context f [S ?X] => idtac X; (* To display the evaluation order *) assert (p := eq_refl 1 : X=1); (* To filter the case X=1 *) let x:= context f[O] in assert (x=O) (* To observe the context *) end. Goal True. f (3+4). \end{coq_example} \item \index{appcontext!in pattern} For historical reasons, {\tt context} considers $n$-ary applications such as {\tt (f 1 2)} as a whole, and not as a sequence of unary applications {\tt ((f 1) 2)}. Hence {\tt context [f ?x]} will fail to find a matching subterm in {\tt (f 1 2)}: if the pattern is a partial application, the matched subterms will be necessarily be applications with exactly the same number of arguments. Alternatively, one may now use the following variant of {\tt context}: \begin{quote} {\tt appcontext} {\ident} {\tt [} {\cpattern} {\tt ]} \end{quote} The behavior of {\tt appcontext} is the same as the one of {\tt context}, except that a matching subterm could be a partial part of a longer application. For instance, in {\tt (f 1 2)}, an {\tt appcontext [f ?x]} will find the matching subterm {\tt (f 1)}. \end{Variants} \subsubsection[Pattern matching on goals]{Pattern matching on goals\index{Ltac!match goal} \index{Ltac!match reverse goal} \index{match goal!in Ltac} \index{match reverse goal!in Ltac}} We can make pattern matching on goals using the following expression: \begin{quote} \begin{tabbing} {\tt match goal with}\\ ~~\={\tt |} $hyp_{1,1}${\tt ,}...{\tt ,}$hyp_{1,m_1}$ ~~{\tt |-}{\cpattern}$_1${\tt =>} {\tacexpr}$_1$\\ \>{\tt |} $hyp_{2,1}${\tt ,}...{\tt ,}$hyp_{2,m_2}$ ~~{\tt |-}{\cpattern}$_2${\tt =>} {\tacexpr}$_2$\\ ~~...\\ \>{\tt |} $hyp_{n,1}${\tt ,}...{\tt ,}$hyp_{n,m_n}$ ~~{\tt |-}{\cpattern}$_n${\tt =>} {\tacexpr}$_n$\\ \>{\tt |\_}~~~~{\tt =>} {\tacexpr}$_{n+1}$\\ {\tt end} \end{tabbing} \end{quote} If each hypothesis pattern $hyp_{1,i}$, with $i=1,...,m_1$ is matched (non-linear first-order unification) by an hypothesis of the goal and if {\cpattern}$_1$ is matched by the conclusion of the goal, then {\tacexpr}$_1$ is evaluated to $v_1$ by substituting the pattern matching to the metavariables and the real hypothesis names bound to the possible hypothesis names occurring in the hypothesis patterns. If $v_1$ is a tactic value, then it is applied to the goal. If this application fails, then another combination of hypotheses is tried with the same proof context pattern. If there is no other combination of hypotheses then the second proof context pattern is tried and so on. If the next to last proof context pattern fails then {\tacexpr}$_{n+1}$ is evaluated to $v_{n+1}$ and $v_{n+1}$ is applied. Note also that matching against subterms (using the {\tt context} {\ident} {\tt [} {\cpattern} {\tt ]}) is available and may itself induce extra backtrackings. \ErrMsg \errindex{No matching clauses for match goal} No clause succeeds, i.e. all matching patterns, if any, fail at the application of the right-hand-side. \medskip It is important to know that each hypothesis of the goal can be matched by at most one hypothesis pattern. The order of matching is the following: hypothesis patterns are examined from the right to the left (i.e. $hyp_{i,m_i}$ before $hyp_{i,1}$). For each hypothesis pattern, the goal hypothesis are matched in order (fresher hypothesis first), but it possible to reverse this order (older first) with the {\tt match reverse goal with} variant. \variant \index{lazymatch goal!in Ltac} \index{Ltac!lazymatch goal} \index{lazymatch reverse goal!in Ltac} \index{Ltac!lazymatch reverse goal} Using {\tt lazymatch} instead of {\tt match} has an effect if the right-hand-side of a clause returns a tactic. With {\tt match}, the tactic is applied to the current goal (and the next clause is tried if it fails). With {\tt lazymatch}, the tactic is directly returned as the result of the whole {\tt lazymatch} block without being first tried to be applied to the goal. Typically, if the {\tt lazymatch} block is bound to some variable $x$ in a {\tt let in}, then tactic expression gets bound to the variable $x$. \begin{coq_example} Ltac test_lazy := lazymatch goal with | _ => idtac "here"; fail | _ => idtac "wasn't lazy"; trivial end. Ltac test_eager := match goal with | _ => idtac "here"; fail | _ => idtac "wasn't lazy"; trivial end. Goal True. test_lazy || idtac "was lazy". test_eager || idtac "was lazy". \end{coq_example} \subsubsection[Filling a term context]{Filling a term context\index{context!in expression}} The following expression is not a tactic in the sense that it does not produce subgoals but generates a term to be used in tactic expressions: \begin{quote} {\tt context} {\ident} {\tt [} {\tacexpr} {\tt ]} \end{quote} {\ident} must denote a context variable bound by a {\tt context} pattern of a {\tt match} expression. This expression evaluates replaces the hole of the value of {\ident} by the value of {\tacexpr}. \ErrMsg \errindex{not a context variable} \subsubsection[Generating fresh hypothesis names]{Generating fresh hypothesis names\index{Ltac!fresh} \index{fresh!in Ltac}} Tactics sometimes have to generate new names for hypothesis. Letting the system decide a name with the {\tt intro} tactic is not so good since it is very awkward to retrieve the name the system gave. The following expression returns an identifier: \begin{quote} {\tt fresh} \nelist{\textrm{\textsl{component}}}{} \end{quote} It evaluates to an identifier unbound in the goal. This fresh identifier is obtained by concatenating the value of the \textrm{\textsl{component}}'s (each of them is, either an {\ident} which has to refer to a name, or directly a name denoted by a {\qstring}). If the resulting name is already used, it is padded with a number so that it becomes fresh. If no component is given, the name is a fresh derivative of the name {\tt H}. \subsubsection[Computing in a constr]{Computing in a constr\index{Ltac!eval} \index{eval!in Ltac}} Evaluation of a term can be performed with: \begin{quote} {\tt eval} {\nterm{redexpr}} {\tt in} {\term} \end{quote} where \nterm{redexpr} is a reduction tactic among {\tt red}, {\tt hnf}, {\tt compute}, {\tt simpl}, {\tt cbv}, {\tt lazy}, {\tt unfold}, {\tt fold}, {\tt pattern}. \subsubsection{Type-checking a term} %\tacindex{type of} \index{Ltac!type of} \index{type of!in Ltac} The following returns the type of {\term}: \begin{quote} {\tt type of} {\term} \end{quote} \subsubsection[Accessing tactic decomposition]{Accessing tactic decomposition\tacindex{info} \index{Tacticals!info@{\tt info}}} Tactical ``{\tt info} {\tacexpr}'' is not really a tactical. For elementary tactics, this is equivalent to \tacexpr. For complex tactic like \texttt{auto}, it displays the operations performed by the tactic. \subsubsection[Proving a subgoal as a separate lemma]{Proving a subgoal as a separate lemma\tacindex{abstract} \index{Tacticals!abstract@{\tt abstract}}} From the outside ``\texttt{abstract \tacexpr}'' is the same as {\tt solve \tacexpr}. Internally it saves an auxiliary lemma called {\ident}\texttt{\_subproof}\textit{n} where {\ident} is the name of the current goal and \textit{n} is chosen so that this is a fresh name. This tactical is useful with tactics such as \texttt{omega} or \texttt{discriminate} that generate huge proof terms. With that tool the user can avoid the explosion at time of the \texttt{Save} command without having to cut manually the proof in smaller lemmas. \begin{Variants} \item \texttt{abstract {\tacexpr} using {\ident}}.\\ Give explicitly the name of the auxiliary lemma. \end{Variants} \ErrMsg \errindex{Proof is not complete} \subsubsection[Calling an external tactic]{Calling an external tactic\index{Ltac!external}} The tactic {\tt external} allows to run an executable outside the {\Coq} executable. The communication is done via an XML encoding of constructions. The syntax of the command is \begin{quote} {\tt external} "\textsl{command}" "\textsl{request}" \nelist{\tacarg}{} \end{quote} The string \textsl{command}, to be interpreted in the default execution path of the operating system, is the name of the external command. The string \textsl{request} is the name of a request to be sent to the external command. Finally the list of tactic arguments have to evaluate to terms. An XML tree of the following form is sent to the standard input of the external command. \medskip \begin{tabular}{l} \texttt{}\\ the XML tree of the first argument\\ {\ldots}\\ the XML tree of the last argument\\ \texttt{}\\ \end{tabular} \medskip Conversely, the external command must send on its standard output an XML tree of the following forms: \medskip \begin{tabular}{l} \texttt{}\\ the XML tree of a term\\ \texttt{}\\ \end{tabular} \medskip \noindent or \medskip \begin{tabular}{l} \texttt{}\\ the XML tree of a first argument\\ {\ldots}\\ the XML tree of a last argument\\ \texttt{}\\ \end{tabular} \medskip \noindent where \textsl{ltac\_qualified\_ident} is the name of a defined {\ltac} function and each subsequent XML tree is recursively a \texttt{CALL} or a \texttt{TERM} node. The Document Type Definition (DTD) for terms of the Calculus of Inductive Constructions is the one developed as part of the MoWGLI European project. It can be found in the file {\tt dev/doc/cic.dtd} of the {\Coq} source archive. An example of parser for this DTD, written in the Objective Caml - Camlp4 language, can be found in the file {\tt parsing/g\_xml.ml4} of the {\Coq} source archive. \section[Tactic toplevel definitions]{Tactic toplevel definitions\comindex{Ltac}} \subsection{Defining {\ltac} functions} Basically, {\ltac} toplevel definitions are made as follows: %{\tt Tactic Definition} {\ident} {\tt :=} {\tacexpr}\\ % %{\tacexpr} is evaluated to $v$ and $v$ is associated to {\ident}. Next, every %script is evaluated by substituting $v$ to {\ident}. % %We can define functional definitions by:\\ \begin{quote} {\tt Ltac} {\ident} {\ident}$_1$ ... {\ident}$_n$ {\tt :=} {\tacexpr} \end{quote} This defines a new {\ltac} function that can be used in any tactic script or new {\ltac} toplevel definition. \Rem The preceding definition can equivalently be written: \begin{quote} {\tt Ltac} {\ident} {\tt := fun} {\ident}$_1$ ... {\ident}$_n$ {\tt =>} {\tacexpr} \end{quote} Recursive and mutual recursive function definitions are also possible with the syntax: \begin{quote} {\tt Ltac} {\ident}$_1$ {\ident}$_{1,1}$ ... {\ident}$_{1,m_1}$~~{\tt :=} {\tacexpr}$_1$\\ {\tt with} {\ident}$_2$ {\ident}$_{2,1}$ ... {\ident}$_{2,m_2}$~~{\tt :=} {\tacexpr}$_2$\\ ...\\ {\tt with} {\ident}$_n$ {\ident}$_{n,1}$ ... {\ident}$_{n,m_n}$~~{\tt :=} {\tacexpr}$_n$ \end{quote} \medskip It is also possible to \emph{redefine} an existing user-defined tactic using the syntax: \begin{quote} {\tt Ltac} {\qualid} {\ident}$_1$ ... {\ident}$_n$ {\tt ::=} {\tacexpr} \end{quote} A previous definition of \qualid must exist in the environment. The new definition will always be used instead of the old one and it goes accross module boundaries. If preceded by the keyword {\tt Local} the tactic definition will not be exported outside the current module. \subsection[Printing {\ltac} tactics]{Printing {\ltac} tactics\comindex{Print Ltac}} Defined {\ltac} functions can be displayed using the command \begin{quote} {\tt Print Ltac {\qualid}.} \end{quote} \section[Debugging {\ltac} tactics]{Debugging {\ltac} tactics\comindex{Set Ltac Debug} \comindex{Unset Ltac Debug} \comindex{Test Ltac Debug}} The {\ltac} interpreter comes with a step-by-step debugger. The debugger can be activated using the command \begin{quote} {\tt Set Ltac Debug.} \end{quote} \noindent and deactivated using the command \begin{quote} {\tt Unset Ltac Debug.} \end{quote} To know if the debugger is on, use the command \texttt{Test Ltac Debug}. When the debugger is activated, it stops at every step of the evaluation of the current {\ltac} expression and it prints information on what it is doing. The debugger stops, prompting for a command which can be one of the following: \medskip \begin{tabular}{ll} simple newline: & go to the next step\\ h: & get help\\ x: & exit current evaluation\\ s: & continue current evaluation without stopping\\ r $n$: & advance $n$ steps further\\ r {\qstring}: & advance up to the next call to ``{\tt idtac} {\qstring}''\\ \end{tabular} \endinput \subsection{Permutation on closed lists} \begin{figure}[b] \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example*} Require Import List. Section Sort. Variable A : Set. Inductive permut : list A -> list A -> Prop := | permut_refl : forall l, permut l l | permut_cons : forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1) | permut_append : forall a l, permut (a :: l) (l ++ a :: nil) | permut_trans : forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2. End Sort. \end{coq_example*} \end{center} \caption{Definition of the permutation predicate} \label{permutpred} \end{figure} Another more complex example is the problem of permutation on closed lists. The aim is to show that a closed list is a permutation of another one. First, we define the permutation predicate as shown on Figure~\ref{permutpred}. \begin{figure}[p] \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example} Ltac Permut n := match goal with | |- (permut _ ?l ?l) => apply permut_refl | |- (permut _ (?a :: ?l1) (?a :: ?l2)) => let newn := eval compute in (length l1) in (apply permut_cons; Permut newn) | |- (permut ?A (?a :: ?l1) ?l2) => match eval compute in n with | 1 => fail | _ => let l1' := constr:(l1 ++ a :: nil) in (apply (permut_trans A (a :: l1) l1' l2); [ apply permut_append | compute; Permut (pred n) ]) end end. Ltac PermutProve := match goal with | |- (permut _ ?l1 ?l2) => match eval compute in (length l1 = length l2) with | (?n = ?n) => Permut n end end. \end{coq_example} \end{minipage}} \end{center} \caption{Permutation tactic} \label{permutltac} \end{figure} \begin{figure}[p] \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example*} Lemma permut_ex1 : permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil). Proof. PermutProve. Qed. Lemma permut_ex2 : permut nat (0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil) (0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil). Proof. PermutProve. Qed. \end{coq_example*} \end{minipage}} \end{center} \caption{Examples of {\tt PermutProve} use} \label{permutlem} \end{figure} Next, we can write naturally the tactic and the result can be seen on Figure~\ref{permutltac}. We can notice that we use two toplevel definitions {\tt PermutProve} and {\tt Permut}. The function to be called is {\tt PermutProve} which computes the lengths of the two lists and calls {\tt Permut} with the length if the two lists have the same length. {\tt Permut} works as expected. If the two lists are equal, it concludes. Otherwise, if the lists have identical first elements, it applies {\tt Permut} on the tail of the lists. Finally, if the lists have different first elements, it puts the first element of one of the lists (here the second one which appears in the {\tt permut} predicate) at the end if that is possible, i.e., if the new first element has been at this place previously. To verify that all rotations have been done for a list, we use the length of the list as an argument for {\tt Permut} and this length is decremented for each rotation down to, but not including, 1 because for a list of length $n$, we can make exactly $n-1$ rotations to generate at most $n$ distinct lists. Here, it must be noticed that we use the natural numbers of {\Coq} for the rotation counter. On Figure~\ref{ltac}, we can see that it is possible to use usual natural numbers but they are only used as arguments for primitive tactics and they cannot be handled, in particular, we cannot make computations with them. So, a natural choice is to use {\Coq} data structures so that {\Coq} makes the computations (reductions) by {\tt eval compute in} and we can get the terms back by {\tt match}. With {\tt PermutProve}, we can now prove lemmas such those shown on Figure~\ref{permutlem}. \subsection{Deciding intuitionistic propositional logic} \begin{figure}[tbp] \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example} Ltac Axioms := match goal with | |- True => trivial | _:False |- _ => elimtype False; assumption | _:?A |- ?A => auto end. Ltac DSimplif := repeat (intros; match goal with | id:(~ _) |- _ => red in id | id:(_ /\ _) |- _ => elim id; do 2 intro; clear id | id:(_ \/ _) |- _ => elim id; intro; clear id | id:(?A /\ ?B -> ?C) |- _ => cut (A -> B -> C); [ intro | intros; apply id; split; assumption ] | id:(?A \/ ?B -> ?C) |- _ => cut (B -> C); [ cut (A -> C); [ intros; clear id | intro; apply id; left; assumption ] | intro; apply id; right; assumption ] | id0:(?A -> ?B),id1:?A |- _ => cut B; [ intro; clear id0 | apply id0; assumption ] | |- (_ /\ _) => split | |- (~ _) => red end). \end{coq_example} \end{minipage}} \end{center} \caption{Deciding intuitionistic propositions (1)} \label{tautoltaca} \end{figure} \begin{figure} \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example} Ltac TautoProp := DSimplif; Axioms || match goal with | id:((?A -> ?B) -> ?C) |- _ => cut (B -> C); [ intro; cut (A -> B); [ intro; cut C; [ intro; clear id | apply id; assumption ] | clear id ] | intro; apply id; intro; assumption ]; TautoProp | id:(~ ?A -> ?B) |- _ => cut (False -> B); [ intro; cut (A -> False); [ intro; cut B; [ intro; clear id | apply id; assumption ] | clear id ] | intro; apply id; red; intro; assumption ]; TautoProp | |- (_ \/ _) => (left; TautoProp) || (right; TautoProp) end. \end{coq_example} \end{minipage}} \end{center} \caption{Deciding intuitionistic propositions (2)} \label{tautoltacb} \end{figure} The pattern matching on goals allows a complete and so a powerful backtracking when returning tactic values. An interesting application is the problem of deciding intuitionistic propositional logic. Considering the contraction-free sequent calculi {\tt LJT*} of Roy~Dyckhoff (\cite{Dyc92}), it is quite natural to code such a tactic using the tactic language. On Figure~\ref{tautoltaca}, the tactic {\tt Axioms} tries to conclude using usual axioms. The {\tt DSimplif} tactic applies all the reversible rules of Dyckhoff's system. Finally, on Figure~\ref{tautoltacb}, the {\tt TautoProp} tactic (the main tactic to be called) simplifies with {\tt DSimplif}, tries to conclude with {\tt Axioms} and tries several paths using the backtracking rules (one of the four Dyckhoff's rules for the left implication to get rid of the contraction and the right or). \begin{figure}[tb] \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example*} Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B. Proof. TautoProp. Qed. Lemma tauto_ex2 : forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B. Proof. TautoProp. Qed. \end{coq_example*} \end{minipage}} \end{center} \caption{Proofs of tautologies with {\tt TautoProp}} \label{tautolem} \end{figure} For example, with {\tt TautoProp}, we can prove tautologies like those of Figure~\ref{tautolem}. \subsection{Deciding type isomorphisms} A more tricky problem is to decide equalities between types and modulo isomorphisms. Here, we choose to use the isomorphisms of the simply typed $\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, \cite{RC95}). The axioms of this $\lb{}$-calculus are given by Figure~\ref{isosax}. \begin{figure} \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Open Scope type_scope. Section Iso_axioms. Variables A B C : Set. Axiom Com : A * B = B * A. Axiom Ass : A * (B * C) = A * B * C. Axiom Cur : (A * B -> C) = (A -> B -> C). Axiom Dis : (A -> B * C) = (A -> B) * (A -> C). Axiom P_unit : A * unit = A. Axiom AR_unit : (A -> unit) = unit. Axiom AL_unit : (unit -> A) = A. Lemma Cons : B = C -> A * B = A * C. Proof. intro Heq; rewrite Heq; reflexivity. Qed. End Iso_axioms. \end{coq_example*} \end{minipage}} \end{center} \caption{Type isomorphism axioms} \label{isosax} \end{figure} The tactic to judge equalities modulo this axiomatization can be written as shown on Figures~\ref{isosltac1} and~\ref{isosltac2}. The algorithm is quite simple. Types are reduced using axioms that can be oriented (this done by {\tt MainSimplif}). The normal forms are sequences of Cartesian products without Cartesian product in the left component. These normal forms are then compared modulo permutation of the components (this is done by {\tt CompareStruct}). The main tactic to be called and realizing this algorithm is {\tt IsoProve}. \begin{figure} \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example} Ltac DSimplif trm := match trm with | (?A * ?B * ?C) => rewrite <- (Ass A B C); try MainSimplif | (?A * ?B -> ?C) => rewrite (Cur A B C); try MainSimplif | (?A -> ?B * ?C) => rewrite (Dis A B C); try MainSimplif | (?A * unit) => rewrite (P_unit A); try MainSimplif | (unit * ?B) => rewrite (Com unit B); try MainSimplif | (?A -> unit) => rewrite (AR_unit A); try MainSimplif | (unit -> ?B) => rewrite (AL_unit B); try MainSimplif | (?A * ?B) => (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) | (?A -> ?B) => (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) end with MainSimplif := match goal with | |- (?A = ?B) => try DSimplif A; try DSimplif B end. Ltac Length trm := match trm with | (_ * ?B) => let succ := Length B in constr:(S succ) | _ => constr:1 end. Ltac assoc := repeat rewrite <- Ass. \end{coq_example} \end{minipage}} \end{center} \caption{Type isomorphism tactic (1)} \label{isosltac1} \end{figure} \begin{figure} \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example} Ltac DoCompare n := match goal with | [ |- (?A = ?A) ] => reflexivity | [ |- (?A * ?B = ?A * ?C) ] => apply Cons; let newn := Length B in DoCompare newn | [ |- (?A * ?B = ?C) ] => match eval compute in n with | 1 => fail | _ => pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n) end end. Ltac CompareStruct := match goal with | [ |- (?A = ?B) ] => let l1 := Length A with l2 := Length B in match eval compute in (l1 = l2) with | (?n = ?n) => DoCompare n end end. Ltac IsoProve := MainSimplif; CompareStruct. \end{coq_example} \end{minipage}} \end{center} \caption{Type isomorphism tactic (2)} \label{isosltac2} \end{figure} Figure~\ref{isoslem} gives examples of what can be solved by {\tt IsoProve}. \begin{figure} \begin{center} \fbox{\begin{minipage}{0.95\textwidth} \begin{coq_example*} Lemma isos_ex1 : forall A B:Set, A * unit * B = B * (unit * A). Proof. intros; IsoProve. Qed. Lemma isos_ex2 : forall A B C:Set, (A * unit -> B * (C * unit)) = (A * unit -> (C -> unit) * C) * (unit -> A -> B). Proof. intros; IsoProve. Qed. \end{coq_example*} \end{minipage}} \end{center} \caption{Type equalities solved by {\tt IsoProve}} \label{isoslem} \end{figure} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/headers.sty0000640000175000001440000000617211404410343016020 0ustar notinusers%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % File headers.sty % Commands for pretty headers, multiple indexes, and the appendix. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{fancyhdr} \setlength{\headheight}{14pt} \pagestyle{fancyplain} \newcommand{\coqfooter}{\tiny Coq Reference Manual, V\coqversion{}, \today} \cfoot{} \lfoot[{\coqfooter}]{} \rfoot[]{{\coqfooter}} \newcommand{\setheaders}[1]{\rhead[\fancyplain{}{\textbf{#1}}]{\fancyplain{}{\thepage}}\lhead[\fancyplain{}{\thepage}]{\fancyplain{}{\textbf{#1}}}} \newcommand{\defaultheaders}{\rhead[\fancyplain{}{\leftmark}]{\fancyplain{}{\thepage}}\lhead[\fancyplain{}{\thepage}]{\fancyplain{}{\rightmark}}} \renewcommand{\chaptermark}[1]{\markboth{{\bf \thechapter~#1}}{}} \renewcommand{\sectionmark}[1]{\markright{\thesection~#1}} \renewcommand{\contentsname}{% \protect\setheaders{Table of contents}Table of contents} \renewcommand{\bibname}{\protect\setheaders{Bibliography}% \protect\RefManCutCommand{BEGINBIBLIO=\thepage}% \protect\addcontentsline{toc}{chapter}{Bibliography}Bibliography} %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Commands for indexes %%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{index} \makeindex \newindex{tactic}{tacidx}{tacind}{% \protect\setheaders{Tactics Index}% \protect\addcontentsline{toc}{chapter}{Tactics Index}Tactics Index} \newindex{command}{comidx}{comind}{% \protect\setheaders{Vernacular Commands Index}% \protect\addcontentsline{toc}{chapter}{Vernacular Commands Index}% Vernacular Commands Index} \newindex{error}{erridx}{errind}{% \protect\setheaders{Index of Error Messages}% \protect\addcontentsline{toc}{chapter}{Index of Error Messages}Index of Error Messages} \renewindex{default}{idx}{ind}{% \protect\addcontentsline{toc}{chapter}{Global Index}% \protect\setheaders{Global Index}Global Index} \newcommand{\tacindex}[1]{% \index{#1@\texttt{#1}}\index[tactic]{#1@\texttt{#1}}} \newcommand{\comindex}[1]{% \index{#1@\texttt{#1}}\index[command]{#1@\texttt{#1}}} \newcommand{\errindex}[1]{\texttt{#1}\index[error]{#1}} \newcommand{\errindexbis}[2]{\texttt{#1}\index[error]{#2}} \newcommand{\ttindex}[1]{\index{#1@\texttt{#1}}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % For the Addendum table of contents %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\aauthor}[1]{{\LARGE \bf #1} \bigskip \bigskip \bigskip} \newcommand{\atableofcontents}{\section*{Contents}\@starttoc{atoc}} \newcommand{\achapter}[1]{ \chapter{#1}\addcontentsline{atoc}{chapter}{#1}} \newcommand{\asection}[1]{ \section{#1}\addcontentsline{atoc}{section}{#1}} \newcommand{\asubsection}[1]{ \subsection{#1}\addcontentsline{atoc}{subsection}{#1}} \newcommand{\asubsubsection}[1]{ \subsubsection{#1}\addcontentsline{atoc}{subsubsection}{#1}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Reference-Manual.sh is generated to cut the Postscript %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %\@starttoc{sh} \newwrite\RefManCut@out% \immediate\openout\RefManCut@out\jobname.sh \newcommand{\RefManCutCommand}[1]{% \immediate\write\RefManCut@out{#1}} \newcommand{\RefManCutClose}{% \immediate\closeout\RefManCut@out} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-pre.tex0000640000175000001440000013122112010315427016315 0ustar notinusers%BEGIN LATEX \setheaders{Credits} %END LATEX \chapter*{Credits} %\addcontentsline{toc}{section}{Credits} \Coq{}~ is a proof assistant for higher-order logic, allowing the development of computer programs consistent with their formal specification. It is the result of about ten years of research of the Coq project. We shall briefly survey here three main aspects: the \emph{logical language} in which we write our axiomatizations and specifications, the \emph{proof assistant} which allows the development of verified mathematical proofs, and the \emph{program extractor} which synthesizes computer programs obeying their formal specifications, written as logical assertions in the language. The logical language used by {\Coq} is a variety of type theory, called the \emph{Calculus of Inductive Constructions}. Without going back to Leibniz and Boole, we can date the creation of what is now called mathematical logic to the work of Frege and Peano at the turn of the century. The discovery of antinomies in the free use of predicates or comprehension principles prompted Russell to restrict predicate calculus with a stratification of \emph{types}. This effort culminated with \emph{Principia Mathematica}, the first systematic attempt at a formal foundation of mathematics. A simplification of this system along the lines of simply typed $\lambda$-calculus occurred with Church's \emph{Simple Theory of Types}. The $\lambda$-calculus notation, originally used for expressing functionality, could also be used as an encoding of natural deduction proofs. This Curry-Howard isomorphism was used by N. de Bruijn in the \emph{Automath} project, the first full-scale attempt to develop and mechanically verify mathematical proofs. This effort culminated with Jutting's verification of Landau's \emph{Grundlagen} in the 1970's. Exploiting this Curry-Howard isomorphism, notable achievements in proof theory saw the emergence of two type-theoretic frameworks; the first one, Martin-L\"of's \emph{Intuitionistic Theory of Types}, attempts a new foundation of mathematics on constructive principles. The second one, Girard's polymorphic $\lambda$-calculus $F_\omega$, is a very strong functional system in which we may represent higher-order logic proof structures. Combining both systems in a higher-order extension of the Automath languages, T. Coquand presented in 1985 the first version of the \emph{Calculus of Constructions}, CoC. This strong logical system allowed powerful axiomatizations, but direct inductive definitions were not possible, and inductive notions had to be defined indirectly through functional encodings, which introduced inefficiencies and awkwardness. The formalism was extended in 1989 by T. Coquand and C. Paulin with primitive inductive definitions, leading to the current \emph{Calculus of Inductive Constructions}. This extended formalism is not rigorously defined here. Rather, numerous concrete examples are discussed. We refer the interested reader to relevant research papers for more information about the formalism, its meta-theoretic properties, and semantics. However, it should not be necessary to understand this theoretical material in order to write specifications. It is possible to understand the Calculus of Inductive Constructions at a higher level, as a mixture of predicate calculus, inductive predicate definitions presented as typed PROLOG, and recursive function definitions close to the language ML. Automated theorem-proving was pioneered in the 1960's by Davis and Putnam in propositional calculus. A complete mechanization (in the sense of a semi-decision procedure) of classical first-order logic was proposed in 1965 by J.A. Robinson, with a single uniform inference rule called \emph{resolution}. Resolution relies on solving equations in free algebras (i.e. term structures), using the \emph{unification algorithm}. Many refinements of resolution were studied in the 1970's, but few convincing implementations were realized, except of course that PROLOG is in some sense issued from this effort. A less ambitious approach to proof development is computer-aided proof-checking. The most notable proof-checkers developed in the 1970's were LCF, designed by R. Milner and his colleagues at U. Edinburgh, specialized in proving properties about denotational semantics recursion equations, and the Boyer and Moore theorem-prover, an automation of primitive recursion over inductive data types. While the Boyer-Moore theorem-prover attempted to synthesize proofs by a combination of automated methods, LCF constructed its proofs through the programming of \emph{tactics}, written in a high-level functional meta-language, ML. The salient feature which clearly distinguishes our proof assistant from say LCF or Boyer and Moore's, is its possibility to extract programs from the constructive contents of proofs. This computational interpretation of proof objects, in the tradition of Bishop's constructive mathematics, is based on a realizability interpretation, in the sense of Kleene, due to C. Paulin. The user must just mark his intention by separating in the logical statements the assertions stating the existence of a computational object from the logical assertions which specify its properties, but which may be considered as just comments in the corresponding program. Given this information, the system automatically extracts a functional term from a consistency proof of its specifications. This functional term may be in turn compiled into an actual computer program. This methodology of extracting programs from proofs is a revolutionary paradigm for software engineering. Program synthesis has long been a theme of research in artificial intelligence, pioneered by R. Waldinger. The Tablog system of Z. Manna and R. Waldinger allows the deductive synthesis of functional programs from proofs in tableau form of their specifications, written in a variety of first-order logic. Development of a systematic \emph{programming logic}, based on extensions of Martin-L\"of's type theory, was undertaken at Cornell U. by the Nuprl team, headed by R. Constable. The first actual program extractor, PX, was designed and implemented around 1985 by S. Hayashi from Kyoto University. It allows the extraction of a LISP program from a proof in a logical system inspired by the logical formalisms of S. Feferman. Interest in this methodology is growing in the theoretical computer science community. We can foresee the day when actual computer systems used in applications will contain certified modules, automatically generated from a consistency proof of their formal specifications. We are however still far from being able to use this methodology in a smooth interaction with the standard tools from software engineering, i.e. compilers, linkers, run-time systems taking advantage of special hardware, debuggers, and the like. We hope that {\Coq} can be of use to researchers interested in experimenting with this new methodology. A first implementation of CoC was started in 1984 by G. Huet and T. Coquand. Its implementation language was CAML, a functional programming language from the ML family designed at INRIA in Rocquencourt. The core of this system was a proof-checker for CoC seen as a typed $\lambda$-calculus, called the \emph{Constructive Engine}. This engine was operated through a high-level notation permitting the declaration of axioms and parameters, the definition of mathematical types and objects, and the explicit construction of proof objects encoded as $\lambda$-terms. A section mechanism, designed and implemented by G. Dowek, allowed hierarchical developments of mathematical theories. This high-level language was called the \emph{Mathematical Vernacular}. Furthermore, an interactive \emph{Theorem Prover} permitted the incremental construction of proof trees in a top-down manner, subgoaling recursively and backtracking from dead-alleys. The theorem prover executed tactics written in CAML, in the LCF fashion. A basic set of tactics was predefined, which the user could extend by his own specific tactics. This system (Version 4.10) was released in 1989. Then, the system was extended to deal with the new calculus with inductive types by C. Paulin, with corresponding new tactics for proofs by induction. A new standard set of tactics was streamlined, and the vernacular extended for tactics execution. A package to compile programs extracted from proofs to actual computer programs in CAML or some other functional language was designed and implemented by B. Werner. A new user-interface, relying on a CAML-X interface by D. de Rauglaudre, was designed and implemented by A. Felty. It allowed operation of the theorem-prover through the manipulation of windows, menus, mouse-sensitive buttons, and other widgets. This system (Version 5.6) was released in 1991. \Coq{} was ported to the new implementation Caml-light of X. Leroy and D. Doligez by D. de Rauglaudre (Version 5.7) in 1992. A new version of \Coq{} was then coordinated by C. Murthy, with new tools designed by C. Parent to prove properties of ML programs (this methodology is dual to program extraction) and a new user-interaction loop. This system (Version 5.8) was released in May 1993. A Centaur interface \textsc{CTCoq} was then developed by Y. Bertot from the Croap project from INRIA-Sophia-Antipolis. In parallel, G. Dowek and H. Herbelin developed a new proof engine, allowing the general manipulation of existential variables consistently with dependent types in an experimental version of \Coq{} (V5.9). The version V5.10 of \Coq{} is based on a generic system for manipulating terms with binding operators due to Chet Murthy. A new proof engine allows the parallel development of partial proofs for independent subgoals. The structure of these proof trees is a mixed representation of derivation trees for the Calculus of Inductive Constructions with abstract syntax trees for the tactics scripts, allowing the navigation in a proof at various levels of details. The proof engine allows generic environment items managed in an object-oriented way. This new architecture, due to C. Murthy, supports several new facilities which make the system easier to extend and to scale up: \begin{itemize} \item User-programmable tactics are allowed \item It is possible to separately verify development modules, and to load their compiled images without verifying them again - a quick relocation process allows their fast loading \item A generic parsing scheme allows user-definable notations, with a symmetric table-driven pretty-printer \item Syntactic definitions allow convenient abbreviations \item A limited facility of meta-variables allows the automatic synthesis of certain type expressions, allowing generic notations for e.g. equality, pairing, and existential quantification. \end{itemize} In the Fall of 1994, C. Paulin-Mohring replaced the structure of inductively defined types and families by a new structure, allowing the mutually recursive definitions. P. Manoury implemented a translation of recursive definitions into the primitive recursive style imposed by the internal recursion operators, in the style of the ProPre system. C. Mu{\~n}oz implemented a decision procedure for intuitionistic propositional logic, based on results of R. Dyckhoff. J.C. Filli{\^a}tre implemented a decision procedure for first-order logic without contraction, based on results of J. Ketonen and R. Weyhrauch. Finally C. Murthy implemented a library of inversion tactics, relieving the user from tedious definitions of ``inversion predicates''. \begin{flushright} Rocquencourt, Feb. 1st 1995\\ Grard Huet \end{flushright} \section*{Credits: addendum for version 6.1} %\addcontentsline{toc}{section}{Credits: addendum for version V6.1} The present version 6.1 of \Coq{} is based on the V5.10 architecture. It was ported to the new language Objective Caml by Bruno Barras. The underlying framework has slightly changed and allows more conversions between sorts. The new version provides powerful tools for easier developments. Cristina Cornes designed an extension of the \Coq{} syntax to allow definition of terms using a powerful pattern-matching analysis in the style of ML programs. Amokrane Sabi wrote a mechanism to simulate inheritance between types families extending a proposal by Peter Aczel. He also developed a mechanism to automatically compute which arguments of a constant may be inferred by the system and consequently do not need to be explicitly written. Yann Coscoy designed a command which explains a proof term using natural language. Pierre Cr{\'e}gut built a new tactic which solves problems in quantifier-free Presburger Arithmetic. Both functionalities have been integrated to the \Coq{} system by Hugo Herbelin. Samuel Boutin designed a tactic for simplification of commutative rings using a canonical set of rewriting rules and equality modulo associativity and commutativity. Finally the organisation of the \Coq{} distribution has been supervised by Jean-Christophe Fillitre with the help of Judical Courant and Bruno Barras. \begin{flushright} Lyon, Nov. 18th 1996\\ Christine Paulin \end{flushright} \section*{Credits: addendum for version 6.2} %\addcontentsline{toc}{section}{Credits: addendum for version V6.2} In version 6.2 of \Coq{}, the parsing is done using camlp4, a preprocessor and pretty-printer for CAML designed by Daniel de Rauglaudre at INRIA. Daniel de Rauglaudre made the first adaptation of \Coq{} for camlp4, this work was continued by Bruno Barras who also changed the structure of \Coq{} abstract syntax trees and the primitives to manipulate them. The result of these changes is a faster parsing procedure with greatly improved syntax-error messages. The user-interface to introduce grammar or pretty-printing rules has also changed. Eduardo Gimnez redesigned the internal tactic libraries, giving uniform names to Caml functions corresponding to \Coq{} tactic names. Bruno Barras wrote new more efficient reductions functions. Hugo Herbelin introduced more uniform notations in the \Coq{} specification language: the definitions by fixpoints and pattern-matching have a more readable syntax. Patrick Loiseleur introduced user-friendly notations for arithmetic expressions. New tactics were introduced: Eduardo Gimnez improved a mechanism to introduce macros for tactics, and designed special tactics for (co)inductive definitions; Patrick Loiseleur designed a tactic to simplify polynomial expressions in an arbitrary commutative ring which generalizes the previous tactic implemented by Samuel Boutin. Jean-Christophe Filli\^atre introduced a tactic for refining a goal, using a proof term with holes as a proof scheme. David Delahaye designed the \textsf{SearchIsos} tool to search an object in the library given its type (up to isomorphism). Henri Laulhre produced the \Coq{} distribution for the Windows environment. Finally, Hugo Herbelin was the main coordinator of the \Coq{} documentation with principal contributions by Bruno Barras, David Delahaye, Jean-Christophe Filli\^atre, Eduardo Gimnez, Hugo Herbelin and Patrick Loiseleur. \begin{flushright} Orsay, May 4th 1998\\ Christine Paulin \end{flushright} \section*{Credits: addendum for version 6.3} The main changes in version V6.3 was the introduction of a few new tactics and the extension of the guard condition for fixpoint definitions. B. Barras extended the unification algorithm to complete partial terms and solved various tricky bugs related to universes.\\ D. Delahaye developed the \texttt{AutoRewrite} tactic. He also designed the new behavior of \texttt{Intro} and provided the tacticals \texttt{First} and \texttt{Solve}.\\ J.-C. Filli\^atre developed the \texttt{Correctness} tactic.\\ E. Gim\'enez extended the guard condition in fixpoints.\\ H. Herbelin designed the new syntax for definitions and extended the \texttt{Induction} tactic.\\ P. Loiseleur developed the \texttt{Quote} tactic and the new design of the \texttt{Auto} tactic, he also introduced the index of errors in the documentation.\\ C. Paulin wrote the \texttt{Focus} command and introduced the reduction functions in definitions, this last feature was proposed by J.-F. Monin from CNET Lannion. \begin{flushright} Orsay, Dec. 1999\\ Christine Paulin \end{flushright} %\newpage \section*{Credits: versions 7} The version V7 is a new implementation started in September 1999 by Jean-Christophe Fillitre. This is a major revision with respect to the internal architecture of the system. The \Coq{} version 7.0 was distributed in March 2001, version 7.1 in September 2001, version 7.2 in January 2002, version 7.3 in May 2002 and version 7.4 in February 2003. Jean-Christophe Fillitre designed the architecture of the new system, he introduced a new representation for environments and wrote a new kernel for type-checking terms. His approach was to use functional data-structures in order to get more sharing, to prepare the addition of modules and also to get closer to a certified kernel. Hugo Herbelin introduced a new structure of terms with local definitions. He introduced ``qualified'' names, wrote a new pattern-matching compilation algorithm and designed a more compact algorithm for checking the logical consistency of universes. He contributed to the simplification of {\Coq} internal structures and the optimisation of the system. He added basic tactics for forward reasoning and coercions in patterns. David Delahaye introduced a new language for tactics. General tactics using pattern-matching on goals and context can directly be written from the {\Coq} toplevel. He also provided primitives for the design of user-defined tactics in \textsc{Caml}. Micaela Mayero contributed the library on real numbers. Olivier Desmettre extended this library with axiomatic trigonometric functions, square, square roots, finite sums, Chasles property and basic plane geometry. Jean-Christophe Fillitre and Pierre Letouzey redesigned a new extraction procedure from \Coq{} terms to \textsc{Caml} or \textsc{Haskell} programs. This new extraction procedure, unlike the one implemented in previous version of \Coq{} is able to handle all terms in the Calculus of Inductive Constructions, even involving universes and strong elimination. P. Letouzey adapted user contributions to extract ML programs when it was sensible. Jean-Christophe Fillitre wrote \verb=coqdoc=, a documentation tool for {\Coq} libraries usable from version 7.2. Bruno Barras improved the reduction algorithms efficiency and the confidence level in the correctness of {\Coq} critical type-checking algorithm. Yves Bertot designed the \texttt{SearchPattern} and \texttt{SearchRewrite} tools and the support for the \textsc{pcoq} interface (\url{http://www-sop.inria.fr/lemme/pcoq/}). Micaela Mayero and David Delahaye introduced {\tt Field}, a decision tactic for commutative fields. Christine Paulin changed the elimination rules for empty and singleton propositional inductive types. Loc Pottier developed {\tt Fourier}, a tactic solving linear inequalities on real numbers. Pierre Crgut developed a new version based on reflexion of the {\tt Omega} decision tactic. Claudio Sacerdoti Coen designed an XML output for the {\Coq} modules to be used in the Hypertextual Electronic Library of Mathematics (HELM cf \url{http://www.cs.unibo.it/helm}). A library for efficient representation of finite maps using binary trees contributed by Jean Goubault was integrated in the basic theories. Pierre Courtieu developed a command and a tactic to reason on the inductive structure of recursively defined functions. Jacek Chrz\k{a}szcz designed and implemented the module system of {\Coq} whose foundations are in Judical Courant's PhD thesis. \bigskip The development was coordinated by C. Paulin. Many discussions within the Dmons team and the LogiCal project influenced significantly the design of {\Coq} especially with %J. Chrz\k{a}szcz, P. Courtieu, J. Courant, J. Duprat, J. Goubault, A. Miquel, C. March, B. Monate and B. Werner. Intensive users suggested improvements of the system : Y. Bertot, L. Pottier, L. Thry, P. Zimmerman from INRIA, C. Alvarado, P. Crgut, J.-F. Monin from France Telecom R \& D. \begin{flushright} Orsay, May. 2002\\ Hugo Herbelin \& Christine Paulin \end{flushright} \section*{Credits: version 8.0} {\Coq} version 8 is a major revision of the {\Coq} proof assistant. First, the underlying logic is slightly different. The so-called {\em impredicativity} of the sort {\tt Set} has been dropped. The main reason is that it is inconsistent with the principle of description which is quite a useful principle for formalizing %classical mathematics within classical logic. Moreover, even in an constructive setting, the impredicativity of {\tt Set} does not add so much in practice and is even subject of criticism from a large part of the intuitionistic mathematician community. Nevertheless, the impredicativity of {\tt Set} remains optional for users interested in investigating mathematical developments which rely on it. Secondly, the concrete syntax of terms has been completely revised. The main motivations were \begin{itemize} \item a more uniform, purified style: all constructions are now lowercase, with a functional programming perfume (e.g. abstraction is now written {\tt fun}), and more directly accessible to the novice (e.g. dependent product is now written {\tt forall} and allows omission of types). Also, parentheses and are no longer mandatory for function application. \item extensibility: some standard notations (e.g. ``<'' and ``>'') were incompatible with the previous syntax. Now all standard arithmetic notations (=, +, *, /, <, <=, ... and more) are directly part of the syntax. \end{itemize} Together with the revision of the concrete syntax, a new mechanism of {\em interpretation scopes} permits to reuse the same symbols (typically +, -, *, /, <, <=) in various mathematical theories without any ambiguities for {\Coq}, leading to a largely improved readability of {\Coq} scripts. New commands to easily add new symbols are also provided. Coming with the new syntax of terms, a slight reform of the tactic language and of the language of commands has been carried out. The purpose here is a better uniformity making the tactics and commands easier to use and to remember. Thirdly, a restructuration and uniformisation of the standard library of {\Coq} has been performed. There is now just one Leibniz' equality usable for all the different kinds of {\Coq} objects. Also, the set of real numbers now lies at the same level as the sets of natural and integer numbers. Finally, the names of the standard properties of numbers now follow a standard pattern and the symbolic notations for the standard definitions as well. The fourth point is the release of \CoqIDE{}, a new graphical gtk2-based interface fully integrated to {\Coq}. Close in style from the Proof General Emacs interface, it is faster and its integration with {\Coq} makes interactive developments more friendly. All mathematical Unicode symbols are usable within \CoqIDE{}. Finally, the module system of {\Coq} completes the picture of {\Coq} version 8.0. Though released with an experimental status in the previous version 7.4, it should be considered as a salient feature of the new version. Besides, {\Coq} comes with its load of novelties and improvements: new or improved tactics (including a new tactic for solving first-order statements), new management commands, extended libraries. \bigskip Bruno Barras and Hugo Herbelin have been the main contributors of the reflexion and the implementation of the new syntax. The smart automatic translator from old to new syntax released with {\Coq} is also their work with contributions by Olivier Desmettre. Hugo Herbelin is the main designer and implementor of the notion of interpretation scopes and of the commands for easily adding new notations. Hugo Herbelin is the main implementor of the restructuration of the standard library. Pierre Corbineau is the main designer and implementor of the new tactic for solving first-order statements in presence of inductive types. He is also the maintainer of the non-domain specific automation tactics. Benjamin Monate is the developer of the \CoqIDE{} graphical interface with contributions by Jean-Christophe Fillitre, Pierre Letouzey, Claude March and Bruno Barras. Claude March coordinated the edition of the Reference Manual for \Coq{} V8.0. Pierre Letouzey and Jacek Chrz\k{a}szcz respectively maintained the extraction tool and module system of {\Coq}. Jean-Christophe Fillitre, Pierre Letouzey, Hugo Herbelin and contributors from Sophia-Antipolis and Nijmegen participated to the extension of the library. Julien Narboux built a NSIS-based automatic {\Coq} installation tool for the Windows platform. Hugo Herbelin and Christine Paulin coordinated the development which was under the responsability of Christine Paulin. \begin{flushright} Palaiseau \& Orsay, Apr. 2004\\ Hugo Herbelin \& Christine Paulin\\ (updated Apr. 2006) \end{flushright} \section*{Credits: version 8.1} {\Coq} version 8.1 adds various new functionalities. Benjamin Grgoire implemented an alternative algorithm to check the convertibility of terms in the {\Coq} type-checker. This alternative algorithm works by compilation to an efficient bytecode that is interpreted in an abstract machine similar to Xavier Leroy's ZINC machine. Convertibility is performed by comparing the normal forms. This alternative algorithm is specifically interesting for proofs by reflection. More generally, it is convenient in case of intensive computations. Christine Paulin implemented an extension of inductive types allowing recursively non uniform parameters. Hugo Herbelin implemented sort-polymorphism for inductive types. Claudio Sacerdoti Coen improved the tactics for rewriting on arbitrary compatible equivalence relations. He also generalized rewriting to arbitrary transition systems. Claudio Sacerdoti Coen added new features to the module system. Benjamin Grgoire, Assia Mahboubi and Bruno Barras developed a new more efficient and more general simplification algorithm on rings and semi-rings. Laurent Thry and Bruno Barras developed a new significantly more efficient simplification algorithm on fields. Hugo Herbelin, Pierre Letouzey, Julien Forest, Julien Narboux and Claudio Sacerdoti Coen added new tactic features. Hugo Herbelin implemented matching on disjunctive patterns. New mechanisms made easier the communication between {\Coq} and external provers. Nicolas Ayache and Jean-Christophe Fillitre implemented connections with the provers {\sc cvcl}, {\sc Simplify} and {\sc zenon}. Hugo Herbelin implemented an experimental protocol for calling external tools from the tactic language. Matthieu Sozeau developed \textsc{Russell}, an experimental language to specify the behavior of programs with subtypes. A mechanism to automatically use some specific tactic to solve unresolved implicit has been implemented by Hugo Herbelin. Laurent Thry's contribution on strings and Pierre Letouzey and Jean-Christophe Fillitre's contribution on finite maps have been integrated to the {\Coq} standard library. Pierre Letouzey developed a library about finite sets `` la Objective Caml''. With Jean-Marc Notin, he extended the library on lists. Pierre Letouzey's contribution on rational numbers has been integrated and extended.. Pierre Corbineau extended his tactic for solving first-order statements. He wrote a reflection-based intuitionistic tautology solver. Pierre Courtieu, Julien Forest and Yves Bertot added extra support to reason on the inductive structure of recursively defined functions. Jean-Marc Notin significantly contributed to the general maintenance of the system. He also took care of {\textsf{coqdoc}}. Pierre Castran contributed to the documentation of (co-)inductive types and suggested improvements to the libraries. Pierre Corbineau implemented a declarative mathematical proof language, usable in combination with the tactic-based style of proof. Finally, many users suggested improvements of the system through the Coq-Club mailing list and bug-tracker systems, especially user groups from INRIA Rocquencourt, Radbout University, University of Pennsylvania and Yale University. \enlargethispage{\baselineskip} \begin{flushright} Palaiseau, July 2006\\ Hugo Herbelin \end{flushright} \section*{Credits: version 8.2} {\Coq} version 8.2 adds new features, new libraries and improves on many various aspects. Regarding the language of Coq, the main novelty is the introduction by Matthieu Sozeau of a package of commands providing Haskell-style type classes. Type classes, that come with a few convenient features such as type-based resolution of implicit arguments, plays a new role of landmark in the architecture of Coq with respect to automatization. For instance, thanks to type classes support, Matthieu Sozeau could implement a new resolution-based version of the tactics dedicated to rewriting on arbitrary transitive relations. Another major improvement of Coq 8.2 is the evolution of the arithmetic libraries and of the tools associated to them. Benjamin Grgoire and Laurent Thry contributed a modular library for building arbitrarily large integers from bounded integers while Evgeny Makarov contributed a modular library of abstract natural and integer arithmetics together with a few convenient tactics. On his side, Pierre Letouzey made numerous extensions to the arithmetic libraries on $\mathbb{Z}$ and $\mathbb{Q}$, including extra support for automatization in presence of various number-theory concepts. Frdric Besson contributed a reflexive tactic based on Krivine-Stengle Positivstellensatz (the easy way) for validating provability of systems of inequalities. The platform is flexible enough to support the validation of any algorithm able to produce a ``certificate'' for the Positivstellensatz and this covers the case of Fourier-Motzkin (for linear systems in $\mathbb{Q}$ and $\mathbb{R}$), Fourier-Motzkin with cutting planes (for linear systems in $\mathbb{Z}$) and sum-of-squares (for non-linear systems). Evgeny Makarov made the platform generic over arbitrary ordered rings. Arnaud Spiwack developed a library of 31-bits machine integers and, relying on Benjamin Grgoire and Laurent Thry's library, delivered a library of unbounded integers in base $2^{31}$. As importantly, he developed a notion of ``retro-knowledge'' so as to safely extend the kernel-located bytecode-based efficient evaluation algorithm of Coq version 8.1 to use 31-bits machine arithmetics for efficiently computing with the library of integers he developed. Beside the libraries, various improvements contributed to provide a more comfortable end-user language and more expressive tactic language. Hugo Herbelin and Matthieu Sozeau improved the pattern-matching compilation algorithm (detection of impossible clauses in pattern-matching, automatic inference of the return type). Hugo Herbelin, Pierre Letouzey and Matthieu Sozeau contributed various new convenient syntactic constructs and new tactics or tactic features: more inference of redundant information, better unification, better support for proof or definition by fixpoint, more expressive rewriting tactics, better support for meta-variables, more convenient notations, ... lie Soubiran improved the module system, adding new features (such as an ``include'' command) and making it more flexible and more general. He and Pierre Letouzey improved the support for modules in the extraction mechanism. Matthieu Sozeau extended the \textsc{Russell} language, ending in an convenient way to write programs of given specifications, Pierre Corbineau extended the Mathematical Proof Language and the automatization tools that accompany it, Pierre Letouzey supervised and extended various parts the standard library, Stphane Glondu contributed a few tactics and improvements, Jean-Marc Notin provided help in debugging, general maintenance and {\tt coqdoc} support, Vincent Siles contributed extensions of the {\tt Scheme} command and of {\tt injection}. Bruno Barras implemented the {\tt coqchk} tool: this is a stand-alone type-checker that can be used to certify {\tt .vo} files. Especially, as this verifier runs in a separate process, it is granted not to be ``hijacked'' by virtually malicious extensions added to {\Coq}. Yves Bertot, Jean-Christophe Fillitre, Pierre Courtieu and Julien Forest acted as maintainers of features they implemented in previous versions of Coq. Julien Narboux contributed to {\CoqIDE}. Nicolas Tabareau made the adaptation of the interface of the old ``setoid rewrite'' tactic to the new version. Lionel Mamane worked on the interaction between Coq and its external interfaces. With Samuel Mimram, he also helped making Coq compatible with recent software tools. Russell O'Connor, Cezary Kaliscyk, Milad Niqui contributed to improved the libraries of integers, rational, and real numbers. We also thank many users and partners for suggestions and feedback, in particular Pierre Castran and Arthur Charguraud, the INRIA Marelle team, Georges Gonthier and the INRIA-Microsoft Mathematical Components team, the Foundations group at Radbout university in Nijmegen, reporters of bugs and participants to the Coq-Club mailing list. \begin{flushright} Palaiseau, June 2008\\ Hugo Herbelin\\ \end{flushright} \section*{Credits: version 8.3} {\Coq} version 8.3 is before all a transition version with refinements or extensions of the existing features and libraries and a new tactic {\tt nsatz} based on Hilbert's Nullstellensatz for deciding systems of equations over rings. With respect to libraries, the main evolutions are due to Pierre Letouzey with a rewriting of the library of finite sets {\tt FSets} and a new round of evolutions in the modular development of arithmetic (library {\tt Numbers}). The reason for making {\tt FSets} evolve is that the computational and logical contents were quite intertwined in the original implementation, leading in some cases to longer computations than expected and this problem is solved in the new {\tt MSets} implementation. As for the modular arithmetic library, it was only dealing with the basic arithmetic operators in the former version and its current extension adds the standard theory of the division, min and max functions, all made available for free to any implementation of $\mathbb{N}$, $\mathbb{Z}$ or $\mathbb{Z}/n\mathbb{Z}$. The main other evolutions of the library are due to Hugo Herbelin who made a revision of the sorting library (includingh a certified merge-sort) and to Guillaume Melquiond who slightly revised and cleaned up the library of reals. The module system evolved significantly. Besides the resolution of some efficiency issues and a more flexible construction of module types, lie Soubiran brought a new model of name equivalence, the $\Delta$-equivalence, which respects as much as possible the names given by the users. He also designed with Pierre Letouzey a new convenient operator \verb!<+! for nesting functor application, what provides a light notation for inheriting the properties of cascading modules. The new tactic {\tt nsatz} is due to Loc Pottier. It works by computing Gr\"obner bases. Regarding the existing tactics, various improvements have been done by Matthieu Sozeau, Hugo Herbelin and Pierre Letouzey. Matthieu Sozeau extended and refined the type classes and {\tt Program} features (the {\sc Russell} language). Pierre Letouzey maintained and improved the extraction mechanism. Bruno Barras and \'Elie Soubiran maintained the Coq checker, Julien Forest maintained the {\tt Function} mechanism for reasoning over recursively defined functions. Matthieu Sozeau, Hugo Herbelin and Jean-Marc Notin maintained {\tt coqdoc}. Frdric Besson maintained the {\sc Micromega} plateform for deciding systems of inequalities. Pierre Courtieu maintained the support for the Proof General Emacs interface. Claude March maintained the plugin for calling external provers ({\tt dp}). Yves Bertot made some improvements to the libraries of lists and integers. Matthias Puech improved the search functions. Guillaume Melquiond usefully contributed here and there. Yann Rgis-Gianas grounded the support for Unicode on a more standard and more robust basis. Though invisible from outside, Arnaud Spiwack improved the general process of management of existential variables. Pierre Letouzey and Stphane Glondu improved the compilation scheme of the Coq archive. Vincent Gross provided support to {\CoqIDE}. Jean-Marc Notin provided support for benchmarking and archiving. Many users helped by reporting problems, providing patches, suggesting improvements or making useful comments, either on the bug tracker or on the Coq-club mailing list. This includes but not exhaustively Cdric Auger, Arthur Charguraud, Franois Garillot, Georges Gonthier, Robin Green, Stphane Lescuyer, Eelis van der Weegen,~... Though not directly related to the implementation, special thanks are going to Yves Bertot, Pierre Castran, Adam Chlipala, and Benjamin Pierce for the excellent teaching materials they provided. \begin{flushright} Paris, April 2010\\ Hugo Herbelin\\ \end{flushright} \section*{Credits: version 8.4} {\Coq} version 8.4 contains the result of three long-term projects: a new modular library of arithmetic by Pierre Letouzey, a new proof engine by Arnaud Spiwack and a new communication protocol for {\CoqIDE} by Vincent Gross. The new modular library of arithmetic extends, generalizes and unifies the existing libraries on Peano arithmetic (types {\tt nat}, {\tt N} and {\tt BigN}), positive arithmetic (type {\tt positive}), integer arithmetic ({\tt Z} and {\tt BigZ}) and machine word arithmetic (type {\tt Int31}). It provides with unified notations (e.g. systematic use of {\tt add} and {\tt mul} for denoting the addition and multiplication operators), systematic and generic development of operators and properties of these operators for all the types mentioned above, including gcd, pcm, power, square root, base 2 logarithm, division, modulo, bitwise operations, logical shifts, comparisons, iterators, ... The most visible feature of the new proof engine is the support for structured scripts (bullets and proof brackets) but, even if yet not user-available, the new engine also provides the basis for refining existential variables using tactics, for applying tactics to several goals simultaneously, for reordering goals, all features which are planned for the next release. The new proof engine forced to reimplement {\tt info} and {\tt Show Script} differently, what was done by Pierre Letouzey. Before version 8.4, {\CoqIDE} was linked to {\Coq} with the graphical interface living in a separate thread. From version 8.4, {\CoqIDE} is a separate process communicating with {\Coq} through a textual channel. This allows for a more robust interfacing, the ability to interrupt {\Coq} without interrupting the interface, and the ability to manage several sessions in parallel. Relying on the infrastructure work made by Vincent Gross, Pierre Letouzey, Pierre Boutillier and Pierre-Marie P\'edrot contributed many various refinements of {\CoqIDE}. {\Coq} 8.4 also comes with a bunch of many various smaller-scale changes and improvements regarding the different components of the system. The underlying logic has been extended with $\eta$-conversion thanks to Hugo Herbelin, St\'ephane Glondu and Benjamin Gr\'egoire. The addition of $\eta$-conversion is justified by the confidence that the formulation of the Calculus of Inductive Constructions based on typed equality (such as the one considered in Lee and Werner to build a set-theoretic model of CIC~\cite{LeeWerner11}) is applicable to the concrete implementation of {\Coq}. The underlying logic benefited also from a refinement of the guard condition for fixpoints by Pierre Boutillier, the point being that it is safe to propagate the information about structurally smaller arguments through $\beta$-redexes that are blocked by the ``match'' construction (blocked commutative cuts). Relying on the added permissiveness of the guard condition, Hugo Herbelin could extend the pattern-matching compilation algorithm so that matching over a sequence of terms involving dependencies of a term or of the indices of the type of a term in the type of other terms is systematically supported. Regarding the high-level specification language, Pierre Boutillier introduced the ability to give implicit arguments to anonymous functions, Hugo Herbelin introduced the ability to define notations with several binders (e.g. \verb=exists x y z, P=), Matthieu Sozeau made the type classes inference mechanism more robust and predictable, Enrico Tassi introduced a command {\tt Arguments} that generalizes {\tt Implicit Arguments} and {\tt Arguments Scope} for assigning various properties to arguments of constants. Various improvements in the type inference algorithm were provided by Matthieu Sozeau and Hugo Herbelin with contributions from Enrico Tassi. Regarding tactics, Hugo Herbelin introduced support for referring to expressions occurring in the goal by pattern in tactics such as {\tt set} or {\tt destruct}. Hugo Herbelin also relied on ideas from Chung-Kil Hur's {\tt Heq} plugin to introduce automatic computation of occurrences to generalize when using {\tt destruct} and {\tt induction} on types with indices. St\'ephane Glondu introduced new tactics {\tt constr\_eq}, {\tt is\_evar} and {\tt has\_evar} to be used when writing complex tactics. Enrico Tassi added support to fine-tuning the behavior of {\tt simpl}. Enrico Tassi added the ability to specify over which variables of a section a lemma has to be exactly generalized. Pierre Letouzey added a tactic {\tt timeout} and the interruptibility of {\tt vm\_compute}. Bug fixes and miscellaneous improvements of the tactic language came from Hugo Herbelin, Pierre Letouzey and Matthieu Sozeau. Regarding decision tactics, Lo\"ic Pottier maintained {\tt Nsatz}, moving in particular to a type-class based reification of goals while Fr\'ed\'eric Besson maintained {\tt Micromega}, adding in particular support for division. Regarding vernacular commands, St\'ephane Glondu provided new commands to analyze the structure of type universes. Regarding libraries, a new library about lists of a given length (called vectors) has been provided by Pierre Boutillier. A new instance of finite sets based on Red-Black trees and provided by Andrew Appel has been adapted for the standard library by Pierre Letouzey. In the library of real analysis, Yves Bertot changed the definition of $\pi$ and provided a proof of the long-standing fact yet remaining unproved in this library, namely that $sin \frac{\pi}{2} = 1$. Pierre Corbineau maintained the Mathematical Proof Language (C-zar). Bruno Barras and Benjamin Gr\'egoire maintained the call-by-value reduction machines. The extraction mechanism benefited from several improvements provided by Pierre Letouzey. Pierre Letouzey maintained the module system, with contributions from \'Elie Soubiran. Julien Forest maintained the {\tt Function} command. Matthieu Sozeau maintained the setoid rewriting mechanism. {\Coq} related tools have been upgraded too. In particular, {\tt coq\_makefile} has been largely revised by Pierre Boutillier. Also, patches from Adam Chlipala for {\tt coqdoc} have been integrated by Pierre Boutillier. Bruno Barras and Pierre Letouzey maintained the {\tt coqchk} checker. Pierre Courtieu and Arnaud Spiwack contributed new features for using {\Coq} through Proof General. The {\tt Dp} plugin has been removed. Use the plugin provided with {\tt Why 3} instead (\url{http://why3.lri.fr}). Under the hood, the {\Coq} architecture benefited from improvements in terms of efficiency and robustness, especially regarding universes management and existential variables management, thanks to Pierre Letouzey and Yann R\'egis-Gianas with contributions from St\'ephane Glondu and Matthias Puech. The build system is maintained by Pierre Letouzey with contributions from St\'ephane Glondu and Pierre Boutillier. A new backtracking mechanism simplifying the task of external interfaces has been designed by Pierre Letouzey. The general maintenance was done by Pierre Letouzey, Hugo Herbelin, Pierre Boutillier, Matthieu Sozeau and St\'ephane Glondu with also specific contributions from Guillaume Melquiond, Julien Narboux and Pierre-Marie Pdrot. Packaging tools were provided by Pierre Letouzey (Windows), Pierre Boutillier (MacOS), St\'ephane Glondu (Debian). Releasing, testing and benchmarking support was provided by Jean-Marc Notin. Many suggestions for improvements were motivated by feedback from users, on either the bug tracker or the coq-club mailing list. Special thanks are going to the users who contributed patches, starting with Tom Prince. Other patch contributors include C\'edric Auger, David Baelde, Dan Grayson, Paolo Herms, Robbert Krebbers, Marc Lasson, Hendrik Tews and Eelis van der Weegen. \begin{flushright} Paris, December 2011\\ Hugo Herbelin\\ \end{flushright} %new Makefile %\newpage % Integration of ZArith lemmas from Sophia and Nijmegen. % $Id: RefMan-pre.tex 15698 2012-08-07 22:47:51Z herbelin $ %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/biblio.bib0000640000175000001440000011417411675042573015604 0ustar notinusers@String{jfp = "Journal of Functional Programming"} @String{lncs = "Lecture Notes in Computer Science"} @String{lnai = "Lecture Notes in Artificial Intelligence"} @String{SV = "{Springer-Verlag}"} @InProceedings{Aud91, author = {Ph. Audebaud}, booktitle = {Proceedings of the sixth Conf. on Logic in Computer Science.}, publisher = {IEEE}, title = {Partial {Objects} in the {Calculus of Constructions}}, year = {1991} } @PhDThesis{Aud92, author = {Ph. Audebaud}, school = {{Universit\'e} Bordeaux I}, title = {Extension du Calcul des Constructions par Points fixes}, year = {1992} } @InProceedings{Audebaud92b, author = {Ph. Audebaud}, booktitle = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, editor = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, note = {Also Research Report LIP-ENS-Lyon}, pages = {21--34}, title = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, year = {1992} } @InProceedings{Augustsson85, author = {L. Augustsson}, title = {{Compiling Pattern Matching}}, booktitle = {Conference Functional Programming and Computer Architecture}, year = {1985} } @Article{BaCo85, author = {J.L. Bates and R.L. Constable}, journal = {ACM transactions on Programming Languages and Systems}, title = {Proofs as {Programs}}, volume = {7}, year = {1985} } @Book{Bar81, author = {H.P. Barendregt}, publisher = {North-Holland}, title = {The Lambda Calculus its Syntax and Semantics}, year = {1981} } @TechReport{Bar91, author = {H. Barendregt}, institution = {Catholic University Nijmegen}, note = {In Handbook of Logic in Computer Science, Vol II}, number = {91-19}, title = {Lambda {Calculi with Types}}, year = {1991} } @Article{BeKe92, author = {G. Bellin and J. Ketonen}, journal = {Theoretical Computer Science}, pages = {115--142}, title = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, volume = {95}, year = {1992} } @Book{Bee85, author = {M.J. Beeson}, publisher = SV, title = {Foundations of Constructive Mathematics, Metamathematical Studies}, year = {1985} } @Book{Bis67, author = {E. Bishop}, publisher = {McGraw-Hill}, title = {Foundations of Constructive Analysis}, year = {1967} } @Book{BoMo79, author = {R.S. Boyer and J.S. Moore}, key = {BoMo79}, publisher = {Academic Press}, series = {ACM Monograph}, title = {A computational logic}, year = {1979} } @MastersThesis{Bou92, author = {S. Boutin}, month = sep, school = {{Universit\'e Paris 7}}, title = {Certification d'un compilateur {ML en Coq}}, year = {1992} } @InProceedings{Bou97, title = {Using reflection to build efficient and certified decision procedure s}, author = {S. Boutin}, booktitle = {TACS'97}, editor = {Martin Abadi and Takahashi Ito}, publisher = SV, series = lncs, volume = 1281, year = {1997} } @PhDThesis{Bou97These, author = {S. Boutin}, title = {R\'eflexions sur les quotients}, school = {Paris 7}, year = 1997, type = {th\`ese d'Universit\'e}, month = apr } @Article{Bru72, author = {N.J. de Bruijn}, journal = {Indag. Math.}, title = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, volume = {34}, year = {1972} } @InCollection{Bru80, author = {N.J. de Bruijn}, booktitle = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, editor = {J.P. Seldin and J.R. Hindley}, publisher = {Academic Press}, title = {A survey of the project {Automath}}, year = {1980} } @TechReport{COQ93, author = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, institution = {INRIA}, month = may, number = {154}, title = {{The Coq Proof Assistant User's Guide Version 5.8}}, year = {1993} } @TechReport{COQ02, author = {The Coq Development Team}, institution = {INRIA}, month = Feb, number = {255}, title = {{The Coq Proof Assistant Reference Manual Version 7.2}}, year = {2002} } @TechReport{CPar93, author = {C. Parent}, institution = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, month = oct, note = {Also in~\cite{Nijmegen93}}, number = {93-29}, title = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, year = {1993} } @PhDThesis{CPar95, author = {C. Parent}, school = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, title = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, year = {1995} } @Book{Caml, author = {P. Weis and X. Leroy}, publisher = {InterEditions}, title = {Le langage Caml}, year = {1993} } @InProceedings{ChiPotSimp03, author = {Laurent Chicli and Lo\"{\i}c Pottier and Carlos Simpson}, title = {Mathematical Quotients and Quotient Types in Coq}, booktitle = {TYPES}, crossref = {DBLP:conf/types/2002}, year = {2002} } @TechReport{CoC89, author = {Projet Formel}, institution = {INRIA}, number = {110}, title = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, year = {1989} } @InProceedings{CoHu85a, author = {Th. Coquand and G. Huet}, address = {Linz}, booktitle = {EUROCAL'85}, publisher = SV, series = LNCS, title = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, volume = {203}, year = {1985} } @InProceedings{CoHu85b, author = {Th. Coquand and G. Huet}, booktitle = {Logic Colloquium'85}, editor = {The Paris Logic Group}, publisher = {North-Holland}, title = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, year = {1987} } @Article{CoHu86, author = {Th. Coquand and G. Huet}, journal = {Information and Computation}, number = {2/3}, title = {The {Calculus of Constructions}}, volume = {76}, year = {1988} } @InProceedings{CoPa89, author = {Th. Coquand and C. Paulin-Mohring}, booktitle = {Proceedings of Colog'88}, editor = {P. Martin-L\"of and G. Mints}, publisher = SV, series = LNCS, title = {Inductively defined types}, volume = {417}, year = {1990} } @Book{Con86, author = {R.L. {Constable et al.}}, publisher = {Prentice-Hall}, title = {{Implementing Mathematics with the Nuprl Proof Development System}}, year = {1986} } @PhDThesis{Coq85, author = {Th. Coquand}, month = jan, school = {Universit\'e Paris~7}, title = {Une Th\'eorie des Constructions}, year = {1985} } @InProceedings{Coq86, author = {Th. Coquand}, address = {Cambridge, MA}, booktitle = {Symposium on Logic in Computer Science}, publisher = {IEEE Computer Society Press}, title = {{An Analysis of Girard's Paradox}}, year = {1986} } @InProceedings{Coq90, author = {Th. Coquand}, booktitle = {Logic and Computer Science}, editor = {P. Oddifredi}, note = {INRIA Research Report 1088, also in~\cite{CoC89}}, publisher = {Academic Press}, title = {{Metamathematical Investigations of a Calculus of Constructions}}, year = {1990} } @InProceedings{Coq91, author = {Th. Coquand}, booktitle = {Proceedings 9th Int. Congress of Logic, Methodology and Philosophy of Science}, title = {{A New Paradox in Type Theory}}, month = {August}, year = {1991} } @InProceedings{Coq92, author = {Th. Coquand}, title = {{Pattern Matching with Dependent Types}}, year = {1992}, crossref = {Bastad92} } @InProceedings{Coquand93, author = {Th. Coquand}, title = {{Infinite Objects in Type Theory}}, year = {1993}, crossref = {Nijmegen93} } @inproceedings{Corbineau08types, author = {P. Corbineau}, title = {A Declarative Language for the Coq Proof Assistant}, editor = {M. Miculan and I. Scagnetto and F. Honsell}, booktitle = {TYPES '07, Cividale del Friuli, Revised Selected Papers}, publisher = {Springer}, series = LNCS, volume = {4941}, year = {2007}, pages = {69-84}, ee = {http://dx.doi.org/10.1007/978-3-540-68103-8_5}, } @PhDThesis{Cor97, author = {C. Cornes}, month = nov, school = {{Universit\'e Paris 7}}, title = {Conception d'un langage de haut niveau de reprsentation de preuves}, type = {Th\`ese de Doctorat}, year = {1997} } @MastersThesis{Cou94a, author = {J. Courant}, month = sep, school = {DEA d'Informatique, ENS Lyon}, title = {Explicitation de preuves par r\'ecurrence implicite}, year = {1994} } @InProceedings{Del99, author = {Delahaye, D.}, title = {Information Retrieval in a Coq Proof Library using Type Isomorphisms}, booktitle = {Proceedings of TYPES '99, L\"okeberg}, publisher = SV, series = lncs, year = {1999}, url = "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf TYPES99-SIsos.ps.gz}" } @InProceedings{Del00, author = {Delahaye, D.}, title = {A {T}actic {L}anguage for the {S}ystem {{\sf Coq}}}, booktitle = {Proceedings of Logic for Programming and Automated Reasoning (LPAR), Reunion Island}, publisher = SV, series = LNCS, volume = {1955}, pages = {85--95}, month = {November}, year = {2000}, url = "{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf LPAR2000-ltac.ps.gz}" } @InProceedings{DelMay01, author = {Delahaye, D. and Mayero, M.}, title = {{\tt Field}: une proc\'edure de d\'ecision pour les nombres r\'eels en {\Coq}}, booktitle = {Journ\'ees Francophones des Langages Applicatifs, Pontarlier}, publisher = {INRIA}, month = {Janvier}, year = {2001}, url = "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# "{\sf JFLA2000-Field.ps.gz}" } @TechReport{Dow90, author = {G. Dowek}, institution = {INRIA}, number = {1283}, title = {Naming and Scoping in a Mathematical Vernacular}, type = {Research Report}, year = {1990} } @Article{Dow91a, author = {G. Dowek}, journal = {Compte-Rendus de l'Acad\'emie des Sciences}, note = {The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors}, number = {12}, pages = {951--956}, title = {L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}, volume = {I, 312}, year = {1991} } @InProceedings{Dow91b, author = {G. Dowek}, booktitle = {Proceedings of Mathematical Foundation of Computer Science}, note = {Also INRIA Research Report}, pages = {151--160}, publisher = SV, series = LNCS, title = {A Second Order Pattern Matching Algorithm in the Cube of Typed $\lambda$-calculi}, volume = {520}, year = {1991} } @PhDThesis{Dow91c, author = {G. Dowek}, month = dec, school = {Universit\'e Paris 7}, title = {D\'emonstration automatique dans le Calcul des Constructions}, year = {1991} } @Article{Dow92a, author = {G. Dowek}, title = {The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}, year = 1993, journal = tcs, volume = 107, number = 2, pages = {349-356} } @Article{Dow94a, author = {G. Dowek}, journal = {Annals of Pure and Applied Logic}, volume = {69}, pages = {135--155}, title = {Third order matching is decidable}, year = {1994} } @InProceedings{Dow94b, author = {G. Dowek}, booktitle = {Proceedings of the second international conference on typed lambda calculus and applications}, title = {Lambda-calculus, Combinators and the Comprehension Schema}, year = {1995} } @InProceedings{Dyb91, author = {P. Dybjer}, booktitle = {Logical Frameworks}, editor = {G. Huet and G. Plotkin}, pages = {59--79}, publisher = {Cambridge University Press}, title = {Inductive sets and families in {Martin-Lf's} Type Theory and their set-theoretic semantics: An inversion principle for {Martin-L\"of's} type theory}, volume = {14}, year = {1991} } @Article{Dyc92, author = {Roy Dyckhoff}, journal = {The Journal of Symbolic Logic}, month = sep, number = {3}, title = {Contraction-free sequent calculi for intuitionistic logic}, volume = {57}, year = {1992} } @MastersThesis{Fil94, author = {J.-C. Filli\^atre}, month = sep, school = {DEA d'Informatique, ENS Lyon}, title = {Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct. tude et impl\'ementation dans le syst\`eme {\Coq}}, year = {1994} } @TechReport{Filliatre95, author = {J.-C. Filli\^atre}, institution = {LIP-ENS-Lyon}, title = {A decision procedure for Direct Predicate Calculus}, type = {Research report}, number = {96--25}, year = {1995} } @Article{Filliatre03jfp, author = {J.-C. Fillitre}, title = {Verification of Non-Functional Programs using Interpretations in Type Theory}, journal = jfp, volume = 13, number = 4, pages = {709--745}, month = jul, year = 2003, note = {[English translation of \cite{Filliatre99}]}, url = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, topics = {team, lri}, type_publi = {irevcomlec} } @PhDThesis{Filliatre99, author = {J.-C. Filli\^atre}, title = {Preuve de programmes imp\'eratifs en th\'eorie des types}, type = {Thse de Doctorat}, school = {Universit\'e Paris-Sud}, year = 1999, month = {July}, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}} } @Unpublished{Filliatre99c, author = {J.-C. Filli\^atre}, title = {{Formal Proof of a Program: Find}}, month = {January}, year = 2000, note = {Submitted to \emph{Science of Computer Programming}}, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}} } @InProceedings{FilliatreMagaud99, author = {J.-C. Filli\^atre and N. Magaud}, title = {Certification of sorting algorithms in the system {\Coq}}, booktitle = {Theorem Proving in Higher Order Logics: Emerging Trends}, year = 1999, url = {\url{http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz}} } @Unpublished{Fle90, author = {E. Fleury}, month = jul, note = {Rapport de Stage}, title = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, year = {1990} } @Book{Fourier, author = {Jean-Baptiste-Joseph Fourier}, publisher = {Gauthier-Villars}, title = {Fourier's method to solve linear inequations/equations systems.}, year = {1890} } @InProceedings{Gim94, author = {E. Gim\'enez}, booktitle = {Types'94 : Types for Proofs and Programs}, note = {Extended version in LIP research report 95-07, ENS Lyon}, publisher = SV, series = LNCS, title = {Codifying guarded definitions with recursive schemes}, volume = {996}, year = {1994} } @TechReport{Gim98, author = {E. Gim\'enez}, title = {A Tutorial on Recursive Types in Coq}, institution = {INRIA}, year = 1998, month = mar } @Unpublished{GimCas05, author = {E. Gim\'enez and P. Cast\'eran}, title = {A Tutorial on [Co-]Inductive Types in Coq}, institution = {INRIA}, year = 2005, month = jan, note = {available at \url{http://coq.inria.fr/doc}} } @InProceedings{Gimenez95b, author = {E. Gim\'enez}, booktitle = {Workshop on Types for Proofs and Programs}, series = LNCS, number = {1158}, pages = {135-152}, title = {An application of co-Inductive types in Coq: verification of the Alternating Bit Protocol}, editorS = {S. Berardi and M. Coppo}, publisher = SV, year = {1995} } @InProceedings{Gir70, author = {J.-Y. Girard}, booktitle = {Proceedings of the 2nd Scandinavian Logic Symposium}, publisher = {North-Holland}, title = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, year = {1970} } @PhDThesis{Gir72, author = {J.-Y. Girard}, school = {Universit\'e Paris~7}, title = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, year = {1972} } @Book{Gir89, author = {J.-Y. Girard and Y. Lafont and P. Taylor}, publisher = {Cambridge University Press}, series = {Cambridge Tracts in Theoretical Computer Science 7}, title = {Proofs and Types}, year = {1989} } @TechReport{Har95, author = {John Harrison}, title = {Metatheory and Reflection in Theorem Proving: A Survey and Critique}, institution = {SRI International Cambridge Computer Science Research Centre,}, year = 1995, type = {Technical Report}, number = {CRC-053}, abstract = {http://www.cl.cam.ac.uk/users/jrh/papers.html} } @MastersThesis{Hir94, author = {D. Hirschkoff}, month = sep, school = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, title = {criture d'une tactique arithm\'etique pour le syst\`eme {\Coq}}, year = {1994} } @InProceedings{HofStr98, author = {Martin Hofmann and Thomas Streicher}, title = {The groupoid interpretation of type theory}, booktitle = {Proceedings of the meeting Twenty-five years of constructive type theory}, publisher = {Oxford University Press}, year = {1998} } @InCollection{How80, author = {W.A. Howard}, booktitle = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, editor = {J.P. Seldin and J.R. Hindley}, note = {Unpublished 1969 Manuscript}, publisher = {Academic Press}, title = {The Formulae-as-Types Notion of Constructions}, year = {1980} } @InProceedings{Hue87tapsoft, author = {G. Huet}, title = {Programming of Future Generation Computers}, booktitle = {Proceedings of TAPSOFT87}, series = LNCS, volume = 249, pages = {276--286}, year = 1987, publisher = SV } @InProceedings{Hue87, author = {G. Huet}, booktitle = {Programming of Future Generation Computers}, editor = {K. Fuchi and M. Nivat}, note = {Also in \cite{Hue87tapsoft}}, publisher = {Elsevier Science}, title = {Induction Principles Formalized in the {Calculus of Constructions}}, year = {1988} } @InProceedings{Hue88, author = {G. Huet}, booktitle = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, editor = {R. Narasimhan}, note = {Also in~\cite{CoC89}}, publisher = {World Scientific Publishing}, title = {{The Constructive Engine}}, year = {1989} } @Book{Hue89, editor = {G. Huet}, publisher = {Addison-Wesley}, series = {The UT Year of Programming Series}, title = {Logical Foundations of Functional Programming}, year = {1989} } @InProceedings{Hue92, author = {G. Huet}, booktitle = {Proceedings of 12th FST/TCS Conference, New Delhi}, pages = {229--240}, publisher = SV, series = LNCS, title = {The Gallina Specification Language : A case study}, volume = {652}, year = {1992} } @Article{Hue94, author = {G. Huet}, journal = {J. Functional Programming}, pages = {371--394}, publisher = {Cambridge University Press}, title = {Residual theory in $\lambda$-calculus: a formal development}, volume = {4,3}, year = {1994} } @InCollection{HuetLevy79, author = {G. Huet and J.-J. L\'{e}vy}, title = {Call by Need Computations in Non-Ambigous Linear Term Rewriting Systems}, note = {Also research report 359, INRIA, 1979}, booktitle = {Computational Logic, Essays in Honor of Alan Robinson}, editor = {J.-L. Lassez and G. Plotkin}, publisher = {The MIT press}, year = {1991} } @Article{KeWe84, author = {J. Ketonen and R. Weyhrauch}, journal = {Theoretical Computer Science}, pages = {297--307}, title = {A decidable fragment of {P}redicate {C}alculus}, volume = {32}, year = {1984} } @Book{Kle52, author = {S.C. Kleene}, publisher = {North-Holland}, series = {Bibliotheca Mathematica}, title = {Introduction to Metamathematics}, year = {1952} } @Book{Kri90, author = {J.-L. Krivine}, publisher = {Masson}, series = {Etudes et recherche en informatique}, title = {Lambda-calcul {types et mod\`eles}}, year = {1990} } @Book{LE92, editor = {G. Huet and G. Plotkin}, publisher = {Cambridge University Press}, title = {Logical Environments}, year = {1992} } @Book{LF91, editor = {G. Huet and G. Plotkin}, publisher = {Cambridge University Press}, title = {Logical Frameworks}, year = {1991} } @Article{Laville91, author = {A. Laville}, title = {Comparison of Priority Rules in Pattern Matching and Term Rewriting}, journal = {Journal of Symbolic Computation}, volume = {11}, pages = {321--347}, year = {1991} } @InProceedings{LePa94, author = {F. Leclerc and C. Paulin-Mohring}, booktitle = {{Types for Proofs and Programs, Types' 93}}, editor = {H. Barendregt and T. Nipkow}, publisher = SV, series = {LNCS}, title = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, volume = {806}, year = {1994} } @TechReport{Leroy90, author = {X. Leroy}, title = {The {ZINC} experiment: an economical implementation of the {ML} language}, institution = {INRIA}, number = {117}, year = {1990} } @InProceedings{Let02, author = {P. Letouzey}, title = {A New Extraction for Coq}, booktitle = {TYPES}, year = 2002, crossref = {DBLP:conf/types/2002}, url = {draft at \url{http://www.pps.jussieu.fr/~letouzey/download/extraction2002.ps.gz}} } @PhDThesis{Luo90, author = {Z. Luo}, title = {An Extended Calculus of Constructions}, school = {University of Edinburgh}, year = {1990} } @Book{MaL84, author = {{P. Martin-L\"of}}, publisher = {Bibliopolis}, series = {Studies in Proof Theory}, title = {Intuitionistic Type Theory}, year = {1984} } @Article{MaSi94, author = {P. Manoury and M. Simonot}, title = {Automatizing Termination Proofs of Recursively Defined Functions.}, journal = {TCS}, volume = {135}, number = {2}, year = {1994}, pages = {319-343}, } @InProceedings{Miquel00, author = {A. Miquel}, title = {A Model for Impredicative Type Systems with Universes, Intersection Types and Subtyping}, booktitle = {{Proceedings of the 15th Annual IEEE Symposium on Logic in Computer Science (LICS'00)}}, publisher = {IEEE Computer Society Press}, year = {2000} } @PhDThesis{Miquel01a, author = {A. Miquel}, title = {Le Calcul des Constructions implicite: syntaxe et s\'emantique}, month = {dec}, school = {{Universit\'e Paris 7}}, year = {2001} } @InProceedings{Miquel01b, author = {A. Miquel}, title = {The Implicit Calculus of Constructions: Extending Pure Type Systems with an Intersection Type Binder and Subtyping}, booktitle = {{Proceedings of the fifth International Conference on Typed Lambda Calculi and Applications (TLCA01), Krakow, Poland}}, publisher = SV, series = {LNCS}, number = 2044, year = {2001} } @InProceedings{MiWer02, author = {A. Miquel and B. Werner}, title = {The Not So Simple Proof-Irrelevant Model of CC}, booktitle = {TYPES}, year = {2002}, pages = {240-258}, ee = {http://link.springer.de/link/service/series/0558/bibs/2646/26460240.htm}, crossref = {DBLP:conf/types/2002}, bibsource = {DBLP, http://dblp.uni-trier.de} } @proceedings{DBLP:conf/types/2002, editor = {H. Geuvers and F. Wiedijk}, title = {Types for Proofs and Programs, Second International Workshop, TYPES 2002, Berg en Dal, The Netherlands, April 24-28, 2002, Selected Papers}, booktitle = {TYPES}, publisher = SV, series = LNCS, volume = {2646}, year = {2003}, isbn = {3-540-14031-X}, bibsource = {DBLP, http://dblp.uni-trier.de} } @InProceedings{Moh89a, author = {C. Paulin-Mohring}, address = {Austin}, booktitle = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, month = jan, publisher = {ACM}, title = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, year = {1989} } @PhDThesis{Moh89b, author = {C. Paulin-Mohring}, month = jan, school = {{Universit\'e Paris 7}}, title = {Extraction de programmes dans le {Calcul des Constructions}}, year = {1989} } @InProceedings{Moh93, author = {C. Paulin-Mohring}, booktitle = {Proceedings of the conference Typed Lambda Calculi and Applications}, editor = {M. Bezem and J.-F. Groote}, note = {Also LIP research report 92-49, ENS Lyon}, number = {664}, publisher = SV, series = {LNCS}, title = {{Inductive Definitions in the System Coq - Rules and Properties}}, year = {1993} } @Book{Moh97, author = {C. Paulin-Mohring}, month = jan, publisher = {{ENS Lyon}}, title = {{Le syst\`eme Coq. \mbox{Th\`ese d'habilitation}}}, year = {1997} } @MastersThesis{Mun94, author = {C. Muoz}, month = sep, school = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, title = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, year = {1994} } @PhDThesis{Mun97d, author = {C. Mu{\~{n}}oz}, title = {Un calcul de substitutions pour la repr\'esentation de preuves partielles en th\'eorie de types}, school = {Universit\'e Paris 7}, year = {1997}, note = {Version en anglais disponible comme rapport de recherche INRIA RR-3309}, type = {Th\`ese de Doctorat} } @Book{NoPS90, author = {B. {Nordstr\"om} and K. Peterson and J. Smith}, booktitle = {Information Processing 83}, publisher = {Oxford Science Publications}, series = {International Series of Monographs on Computer Science}, title = {Programming in {Martin-L\"of's} Type Theory}, year = {1990} } @Article{Nor88, author = {B. {Nordstr\"om}}, journal = {BIT}, title = {Terminating General Recursion}, volume = {28}, year = {1988} } @Book{Odi90, editor = {P. Odifreddi}, publisher = {Academic Press}, title = {Logic and Computer Science}, year = {1990} } @InProceedings{PaMS92, author = {M. Parigot and P. Manoury and M. Simonot}, address = {St. Petersburg, Russia}, booktitle = {Logic Programming and automated reasoning}, editor = {A. Voronkov}, month = jul, number = {624}, publisher = SV, series = {LNCS}, title = {{ProPre : A Programming language with proofs}}, year = {1992} } @Article{PaWe92, author = {C. Paulin-Mohring and B. Werner}, journal = {Journal of Symbolic Computation}, pages = {607--640}, title = {{Synthesis of ML programs in the system Coq}}, volume = {15}, year = {1993} } @Article{Par92, author = {M. Parigot}, journal = {Theoretical Computer Science}, number = {2}, pages = {335--356}, title = {{Recursive Programming with Proofs}}, volume = {94}, year = {1992} } @InProceedings{Parent95b, author = {C. Parent}, booktitle = {{Mathematics of Program Construction'95}}, publisher = SV, series = {LNCS}, title = {{Synthesizing proofs from programs in the Calculus of Inductive Constructions}}, volume = {947}, year = {1995} } @InProceedings{Prasad93, author = {K.V. Prasad}, booktitle = {{Proceedings of CONCUR'93}}, publisher = SV, series = {LNCS}, title = {{Programming with broadcasts}}, volume = {715}, year = {1993} } @Book{RC95, author = {di~Cosmo, R.}, title = {Isomorphisms of Types: from $\lambda$-calculus to information retrieval and language design}, series = {Progress in Theoretical Computer Science}, publisher = {Birkhauser}, year = {1995}, note = {ISBN-0-8176-3763-X} } @TechReport{Rou92, author = {J. Rouyer}, institution = {INRIA}, month = nov, number = {1795}, title = {{Dveloppement de l'Algorithme d'Unification dans le Calcul des Constructions}}, year = {1992} } @Article{Rushby98, title = {Subtypes for Specifications: Predicate Subtyping in {PVS}}, author = {John Rushby and Sam Owre and N. Shankar}, journal = {IEEE Transactions on Software Engineering}, pages = {709--720}, volume = 24, number = 9, month = sep, year = 1998 } @TechReport{Saibi94, author = {A. Sa\"{\i}bi}, institution = {INRIA}, month = dec, number = {2345}, title = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, year = {1994} } @MastersThesis{Ter92, author = {D. Terrasse}, month = sep, school = {IARFA}, title = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, year = {1992} } @TechReport{ThBeKa92, author = {L. Th\'ery and Y. Bertot and G. Kahn}, institution = {INRIA Sophia}, month = may, number = {1684}, title = {Real theorem provers deserve real user-interfaces}, type = {Research Report}, year = {1992} } @Book{TrDa89, author = {A.S. Troelstra and D. van Dalen}, publisher = {North-Holland}, series = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, title = {Constructivism in Mathematics, an introduction}, year = {1988} } @PhDThesis{Wer94, author = {B. Werner}, school = {Universit\'e Paris 7}, title = {Une th\'eorie des constructions inductives}, type = {Th\`ese de Doctorat}, year = {1994} } @PhDThesis{Bar99, author = {B. Barras}, school = {Universit\'e Paris 7}, title = {Auto-validation d'un systme de preuves avec familles inductives}, type = {Th\`ese de Doctorat}, year = {1999} } @Unpublished{ddr98, author = {D. de Rauglaudre}, title = {Camlp4 version 1.07.2}, year = {1998}, note = {In Camlp4 distribution} } @Article{dowek93, author = {G. Dowek}, title = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, journal = {Journal Logic Computation}, volume = {3}, number = {3}, pages = {287--315}, month = {June}, year = {1993} } @InProceedings{manoury94, author = {P. Manoury}, title = {{A User's Friendly Syntax to Define Recursive Functions as Typed $\lambda-$Terms}}, booktitle = {{Types for Proofs and Programs, TYPES'94}}, series = {LNCS}, volume = {996}, month = jun, year = {1994} } @TechReport{maranget94, author = {L. Maranget}, institution = {INRIA}, number = {2385}, title = {{Two Techniques for Compiling Lazy Pattern Matching}}, year = {1994} } @InProceedings{puel-suarez90, author = {L.Puel and A. Su\'arez}, booktitle = {{Conference Lisp and Functional Programming}}, series = {ACM}, publisher = SV, title = {{Compiling Pattern Matching by Term Decomposition}}, year = {1990} } @MastersThesis{saidi94, author = {H. Saidi}, month = sep, school = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, title = {R\'esolution d'\'equations dans le syst\`eme T de G\"odel}, year = {1994} } @inproceedings{sozeau06, author = {Matthieu Sozeau}, title = {Subset Coercions in {C}oq}, year = {2007}, booktitle = {TYPES'06}, pages = {237-252}, volume = {4502}, publisher = "Springer", series = {LNCS} } @inproceedings{sozeau08, Author = {Matthieu Sozeau and Nicolas Oury}, booktitle = {TPHOLs'08}, Pdf = {http://www.lri.fr/~sozeau/research/publications/drafts/classes.pdf}, Title = {{F}irst-{C}lass {T}ype {C}lasses}, Year = {2008}, } @Misc{streicher93semantical, author = {T. Streicher}, title = {Semantical Investigations into Intensional Type Theory}, note = {Habilitationsschrift, LMU Munchen.}, year = {1993} } @Misc{Pcoq, author = {Lemme Team}, title = {Pcoq a graphical user-interface for {Coq}}, note = {\url{http://www-sop.inria.fr/lemme/pcoq/}} } @Misc{ProofGeneral, author = {David Aspinall}, title = {Proof General}, note = {\url{http://proofgeneral.inf.ed.ac.uk/}} } @Book{CoqArt, title = {Interactive Theorem Proving and Program Development. Coq'Art: The Calculus of Inductive Constructions}, author = {Yves Bertot and Pierre Castran}, publisher = {Springer Verlag}, series = {Texts in Theoretical Computer Science. An EATCS series}, year = 2004 } @InCollection{wadler87, author = {P. Wadler}, title = {Efficient Compilation of Pattern Matching}, booktitle = {The Implementation of Functional Programming Languages}, editor = {S.L. Peyton Jones}, publisher = {Prentice-Hall}, year = {1987} } @inproceedings{DBLP:conf/types/CornesT95, author = {Cristina Cornes and Delphine Terrasse}, title = {Automating Inversion of Inductive Predicates in Coq}, booktitle = {TYPES}, year = {1995}, pages = {85-104}, crossref = {DBLP:conf/types/1995}, bibsource = {DBLP, http://dblp.uni-trier.de} } @proceedings{DBLP:conf/types/1995, editor = {Stefano Berardi and Mario Coppo}, title = {Types for Proofs and Programs, International Workshop TYPES'95, Torino, Italy, June 5-8, 1995, Selected Papers}, booktitle = {TYPES}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {1158}, year = {1996}, isbn = {3-540-61780-9}, bibsource = {DBLP, http://dblp.uni-trier.de} } @inproceedings{DBLP:conf/types/McBride00, author = {Conor McBride}, title = {Elimination with a Motive}, booktitle = {TYPES}, year = {2000}, pages = {197-216}, ee = {http://link.springer.de/link/service/series/0558/bibs/2277/22770197.htm}, crossref = {DBLP:conf/types/2000}, bibsource = {DBLP, http://dblp.uni-trier.de} } @proceedings{DBLP:conf/types/2000, editor = {Paul Callaghan and Zhaohui Luo and James McKinna and Robert Pollack}, title = {Types for Proofs and Programs, International Workshop, TYPES 2000, Durham, UK, December 8-12, 2000, Selected Papers}, booktitle = {TYPES}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {2277}, year = {2002}, isbn = {3-540-43287-6}, bibsource = {DBLP, http://dblp.uni-trier.de} } @INPROCEEDINGS{sugar, author = {Alessandro Giovini and Teo Mora and Gianfranco Niesi and Lorenzo Robbiano and Carlo Traverso}, title = {"One sugar cube, please" or Selection strategies in the Buchberger algorithm}, booktitle = { Proceedings of the ISSAC'91, ACM Press}, year = {1991}, pages = {5--4}, publisher = {} } @article{LeeWerner11, author = {Gyesik Lee and Benjamin Werner}, title = {Proof-irrelevant model of {CC} with predicative induction and judgmental equality}, journal = {Logical Methods in Computer Science}, volume = {7}, number = {4}, year = {2011}, ee = {http://dx.doi.org/10.2168/LMCS-7(4:5)2011}, bibsource = {DBLP, http://dblp.uni-trier.de} } @Comment{cross-references, must be at end} @Book{Bastad92, editor = {B. Nordstr\"om and K. Petersson and G. Plotkin}, publisher = {Available by ftp at site ftp.inria.fr}, title = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, year = {1992} } @Book{Nijmegen93, editor = {H. Barendregt and T. Nipkow}, publisher = SV, series = LNCS, title = {Types for Proofs and Programs}, volume = {806}, year = {1994} } @article{ TheOmegaPaper, author = "W. Pugh", title = "The Omega test: a fast and practical integer programming algorithm for dependence analysis", journal = "Communication of the ACM", pages = "102--114", year = "1992", } coq-8.4pl2/doc/refman/Cases.tex0000640000175000001440000006113011742055044015427 0ustar notinusers\achapter{Extended pattern-matching} %BEGIN LATEX \defaultheaders %END LATEX \aauthor{Cristina Cornes and Hugo Herbelin} \label{Mult-match-full} \ttindex{Cases} \index{ML-like patterns} This section describes the full form of pattern-matching in {\Coq} terms. \asection{Patterns}\label{implementation} The full syntax of {\tt match} is presented in Figures~\ref{term-syntax} and~\ref{term-syntax-aux}. Identifiers in patterns are either constructor names or variables. Any identifier that is not the constructor of an inductive or co-inductive type is considered to be a variable. A variable name cannot occur more than once in a given pattern. It is recommended to start variable names by a lowercase letter. If a pattern has the form $(c~\vec{x})$ where $c$ is a constructor symbol and $\vec{x}$ is a linear vector of (distinct) variables, it is called {\em simple}: it is the kind of pattern recognized by the basic version of {\tt match}. On the opposite, if it is a variable $x$ or has the form $(c~\vec{p})$ with $p$ not only made of variables, the pattern is called {\em nested}. A variable pattern matches any value, and the identifier is bound to that value. The pattern ``\texttt{\_}'' (called ``don't care'' or ``wildcard'' symbol) also matches any value, but does not bind anything. It may occur an arbitrary number of times in a pattern. Alias patterns written \texttt{(}{\sl pattern} \texttt{as} {\sl identifier}\texttt{)} are also accepted. This pattern matches the same values as {\sl pattern} does and {\sl identifier} is bound to the matched value. A pattern of the form {\pattern}{\tt |}{\pattern} is called disjunctive. A list of patterns separated with commas is also considered as a pattern and is called {\em multiple pattern}. However multiple patterns can only occur at the root of pattern-matching equations. Disjunctions of {\em multiple pattern} are allowed though. Since extended {\tt match} expressions are compiled into the primitive ones, the expressiveness of the theory remains the same. Once the stage of parsing has finished only simple patterns remain. Re-nesting of pattern is performed at printing time. An easy way to see the result of the expansion is to toggle off the nesting performed at printing (use here {\tt Set Printing Matching}), then by printing the term with \texttt{Print} if the term is a constant, or using the command \texttt{Check}. The extended \texttt{match} still accepts an optional {\em elimination predicate} given after the keyword \texttt{return}. Given a pattern matching expression, if all the right-hand-sides of \texttt{=>} ({\em rhs} in short) have the same type, then this type can be sometimes synthesized, and so we can omit the \texttt{return} part. Otherwise the predicate after \texttt{return} has to be provided, like for the basic \texttt{match}. Let us illustrate through examples the different aspects of extended pattern matching. Consider for example the function that computes the maximum of two natural numbers. We can write it in primitive syntax by: \begin{coq_example} Fixpoint max (n m:nat) {struct m} : nat := match n with | O => m | S n' => match m with | O => S n' | S m' => S (max n' m') end end. \end{coq_example} \paragraph{Multiple patterns} Using multiple patterns in the definition of {\tt max} allows to write: \begin{coq_example} Reset max. Fixpoint max (n m:nat) {struct m} : nat := match n, m with | O, _ => m | S n', O => S n' | S n', S m' => S (max n' m') end. \end{coq_example} which will be compiled into the previous form. The pattern-matching compilation strategy examines patterns from left to right. A \texttt{match} expression is generated {\bf only} when there is at least one constructor in the column of patterns. E.g. the following example does not build a \texttt{match} expression. \begin{coq_example} Check (fun x:nat => match x return nat with | y => y end). \end{coq_example} \paragraph{Aliasing subpatterns} We can also use ``\texttt{as} {\ident}'' to associate a name to a sub-pattern: \begin{coq_example} Reset max. Fixpoint max (n m:nat) {struct n} : nat := match n, m with | O, _ => m | S n' as p, O => p | S n', S m' => S (max n' m') end. \end{coq_example} \paragraph{Nested patterns} Here is now an example of nested patterns: \begin{coq_example} Fixpoint even (n:nat) : bool := match n with | O => true | S O => false | S (S n') => even n' end. \end{coq_example} This is compiled into: \begin{coq_example} Print even. \end{coq_example} In the previous examples patterns do not conflict with, but sometimes it is comfortable to write patterns that admit a non trivial superposition. Consider the boolean function \texttt{lef} that given two natural numbers yields \texttt{true} if the first one is less or equal than the second one and \texttt{false} otherwise. We can write it as follows: \begin{coq_example} Fixpoint lef (n m:nat) {struct m} : bool := match n, m with | O, x => true | x, O => false | S n, S m => lef n m end. \end{coq_example} Note that the first and the second multiple pattern superpose because the couple of values \texttt{O O} matches both. Thus, what is the result of the function on those values? To eliminate ambiguity we use the {\em textual priority rule}: we consider patterns ordered from top to bottom, then a value is matched by the pattern at the $ith$ row if and only if it is not matched by some pattern of a previous row. Thus in the example, \texttt{O O} is matched by the first pattern, and so \texttt{(lef O O)} yields \texttt{true}. Another way to write this function is: \begin{coq_example} Reset lef. Fixpoint lef (n m:nat) {struct m} : bool := match n, m with | O, x => true | S n, S m => lef n m | _, _ => false end. \end{coq_example} Here the last pattern superposes with the first two. Because of the priority rule, the last pattern will be used only for values that do not match neither the first nor the second one. Terms with useless patterns are not accepted by the system. Here is an example: % Test failure \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (**************** Error: This clause is redundant ********************) \end{coq_eval} \begin{coq_example} Check (fun x:nat => match x with | O => true | S _ => false | x => true end). \end{coq_example} \paragraph{Disjunctive patterns} Multiple patterns that share the same right-hand-side can be factorized using the notation \nelist{\multpattern}{\tt |}. For instance, {\tt max} can be rewritten as follows: \begin{coq_eval} Reset max. \end{coq_eval} \begin{coq_example} Fixpoint max (n m:nat) {struct m} : nat := match n, m with | S n', S m' => S (max n' m') | 0, p | p, 0 => p end. \end{coq_example} Similarly, factorization of (non necessary multiple) patterns that share the same variables is possible by using the notation \nelist{\pattern}{\tt |}. Here is an example: \begin{coq_example} Definition filter_2_4 (n:nat) : nat := match n with | 2 as m | 4 as m => m | _ => 0 end. \end{coq_example} Here is another example using disjunctive subpatterns. \begin{coq_example} Definition filter_some_square_corners (p:nat*nat) : nat*nat := match p with | ((2 as m | 4 as m), (3 as n | 5 as n)) => (m,n) | _ => (0,0) end. \end{coq_example} \asection{About patterns of parametric types} When matching objects of a parametric type, constructors in patterns {\em do not expect} the parameter arguments. Their value is deduced during expansion. Consider for example the type of polymorphic lists: \begin{coq_example} Inductive List (A:Set) : Set := | nil : List A | cons : A -> List A -> List A. \end{coq_example} We can check the function {\em tail}: \begin{coq_example} Check (fun l:List nat => match l with | nil => nil nat | cons _ l' => l' end). \end{coq_example} When we use parameters in patterns there is an error message: % Test failure \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (******** Error: The constructor cons expects 2 arguments ************) \end{coq_eval} \begin{coq_example} Check (fun l:List nat => match l with | nil A => nil nat | cons A _ l' => l' end). \end{coq_example} \asection{Matching objects of dependent types} The previous examples illustrate pattern matching on objects of non-dependent types, but we can also use the expansion strategy to destructure objects of dependent type. Consider the type \texttt{listn} of lists of a certain length: \label{listn} \begin{coq_example} Inductive listn : nat -> Set := | niln : listn 0 | consn : forall n:nat, nat -> listn n -> listn (S n). \end{coq_example} \asubsection{Understanding dependencies in patterns} We can define the function \texttt{length} over \texttt{listn} by: \begin{coq_example} Definition length (n:nat) (l:listn n) := n. \end{coq_example} Just for illustrating pattern matching, we can define it by case analysis: \begin{coq_example} Reset length. Definition length (n:nat) (l:listn n) := match l with | niln => 0 | consn n _ _ => S n end. \end{coq_example} We can understand the meaning of this definition using the same notions of usual pattern matching. % % Constraining of dependencies is not longer valid in V7 % \iffalse Now suppose we split the second pattern of \texttt{length} into two cases so to give an alternative definition using nested patterns: \begin{coq_example} Definition length1 (n:nat) (l:listn n) := match l with | niln => 0 | consn n _ niln => S n | consn n _ (consn _ _ _) => S n end. \end{coq_example} It is obvious that \texttt{length1} is another version of \texttt{length}. We can also give the following definition: \begin{coq_example} Definition length2 (n:nat) (l:listn n) := match l with | niln => 0 | consn n _ niln => 1 | consn n _ (consn m _ _) => S (S m) end. \end{coq_example} If we forget that \texttt{listn} is a dependent type and we read these definitions using the usual semantics of pattern matching, we can conclude that \texttt{length1} and \texttt{length2} are different functions. In fact, they are equivalent because the pattern \texttt{niln} implies that \texttt{n} can only match the value $0$ and analogously the pattern \texttt{consn} determines that \texttt{n} can only match values of the form $(S~v)$ where $v$ is the value matched by \texttt{m}. The converse is also true. If we destructure the length value with the pattern \texttt{O} then the list value should be $niln$. Thus, the following term \texttt{length3} corresponds to the function \texttt{length} but this time defined by case analysis on the dependencies instead of on the list: \begin{coq_example} Definition length3 (n:nat) (l:listn n) := match l with | niln => 0 | consn O _ _ => 1 | consn (S n) _ _ => S (S n) end. \end{coq_example} When we have nested patterns of dependent types, the semantics of pattern matching becomes a little more difficult because the set of values that are matched by a sub-pattern may be conditioned by the values matched by another sub-pattern. Dependent nested patterns are somehow constrained patterns. In the examples, the expansion of \texttt{length1} and \texttt{length2} yields exactly the same term but the expansion of \texttt{length3} is completely different. \texttt{length1} and \texttt{length2} are expanded into two nested case analysis on \texttt{listn} while \texttt{length3} is expanded into a case analysis on \texttt{listn} containing a case analysis on natural numbers inside. In practice the user can think about the patterns as independent and it is the expansion algorithm that cares to relate them. \\ \fi % % % \asubsection{When the elimination predicate must be provided} The examples given so far do not need an explicit elimination predicate because all the rhs have the same type and the strategy succeeds to synthesize it. Unfortunately when dealing with dependent patterns it often happens that we need to write cases where the type of the rhs are different instances of the elimination predicate. The function \texttt{concat} for \texttt{listn} is an example where the branches have different type and we need to provide the elimination predicate: \begin{coq_example} Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : listn (n + m) := match l in listn n return listn (n + m) with | niln => l' | consn n' a y => consn (n' + m) a (concat n' y m l') end. \end{coq_example} The elimination predicate is {\tt fun (n:nat) (l:listn n) => listn~(n+m)}. In general if $m$ has type $(I~q_1\ldots q_r~t_1\ldots t_s)$ where $q_1\ldots q_r$ are parameters, the elimination predicate should be of the form~: {\tt fun $y_1$\ldots $y_s$ $x$:($I$~$q_1$\ldots $q_r$~$y_1$\ldots $y_s$) => Q}. In the concrete syntax, it should be written~: \[ \kw{match}~m~\kw{as}~x~\kw{in}~(I~\_\ldots \_~y_1\ldots y_s)~\kw{return}~Q~\kw{with}~\ldots~\kw{end}\] The variables which appear in the \kw{in} and \kw{as} clause are new and bounded in the property $Q$ in the \kw{return} clause. The parameters of the inductive definitions should not be mentioned and are replaced by \kw{\_}. Recall that a list of patterns is also a pattern. So, when we destructure several terms at the same time and the branches have different type we need to provide the elimination predicate for this multiple pattern. It is done using the same scheme, each term may be associated to an \kw{as} and \kw{in} clause in order to introduce a dependent product. For example, an equivalent definition for \texttt{concat} (even though the matching on the second term is trivial) would have been: \begin{coq_example} Reset concat. Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : listn (n + m) := match l in listn n, l' return listn (n + m) with | niln, x => x | consn n' a y, x => consn (n' + m) a (concat n' y m x) end. \end{coq_example} % Notice that this time, the predicate \texttt{[n,\_:nat](listn (plus n % m))} is binary because we % destructure both \texttt{l} and \texttt{l'} whose types have arity one. % In general, if we destructure the terms $e_1\ldots e_n$ % the predicate will be of arity $m$ where $m$ is the sum of the % number of dependencies of the type of $e_1, e_2,\ldots e_n$ % (the $\lambda$-abstractions % should correspond from left to right to each dependent argument of the % type of $e_1\ldots e_n$). When the arity of the predicate (i.e. number of abstractions) is not correct Coq raises an error message. For example: % Test failure \begin{coq_eval} Reset concat. Set Printing Depth 50. (********** The following is not correct and should produce ***********) (** Error: the term l' has type listn m while it is expected to have **) (** type listn (?31 + ?32) **) \end{coq_eval} \begin{coq_example} Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : listn (n + m) := match l, l' with | niln, x => x | consn n' a y, x => consn (n' + m) a (concat n' y m x) end. \end{coq_example} \asection{Using pattern matching to write proofs} In all the previous examples the elimination predicate does not depend on the object(s) matched. But it may depend and the typical case is when we write a proof by induction or a function that yields an object of dependent type. An example of proof using \texttt{match} in given in Section~\ref{refine-example}. For example, we can write the function \texttt{buildlist} that given a natural number $n$ builds a list of length $n$ containing zeros as follows: \begin{coq_example} Fixpoint buildlist (n:nat) : listn n := match n return listn n with | O => niln | S n => consn n 0 (buildlist n) end. \end{coq_example} We can also use multiple patterns. Consider the following definition of the predicate less-equal \texttt{Le}: \begin{coq_example} Inductive LE : nat -> nat -> Prop := | LEO : forall n:nat, LE 0 n | LES : forall n m:nat, LE n m -> LE (S n) (S m). \end{coq_example} We can use multiple patterns to write the proof of the lemma \texttt{forall (n m:nat), (LE n m)}\verb=\/=\texttt{(LE m n)}: \begin{coq_example} Fixpoint dec (n m:nat) {struct n} : LE n m \/ LE m n := match n, m return LE n m \/ LE m n with | O, x => or_introl (LE x 0) (LEO x) | x, O => or_intror (LE x 0) (LEO x) | S n as n', S m as m' => match dec n m with | or_introl h => or_introl (LE m' n') (LES n m h) | or_intror h => or_intror (LE n' m') (LES m n h) end end. \end{coq_example} In the example of \texttt{dec}, the first \texttt{match} is dependent while the second is not. % In general, consider the terms $e_1\ldots e_n$, % where the type of $e_i$ is an instance of a family type % $\lb (\vec{d_i}:\vec{D_i}) \mto T_i$ ($1\leq i % \leq n$). Then, in expression \texttt{match} $e_1,\ldots, % e_n$ \texttt{of} \ldots \texttt{end}, the % elimination predicate ${\cal P}$ should be of the form: % $[\vec{d_1}:\vec{D_1}][x_1:T_1]\ldots [\vec{d_n}:\vec{D_n}][x_n:T_n]Q.$ The user can also use \texttt{match} in combination with the tactic \texttt{refine} (see Section~\ref{refine}) to build incomplete proofs beginning with a \texttt{match} construction. \asection{Pattern-matching on inductive objects involving local definitions} If local definitions occur in the type of a constructor, then there are two ways to match on this constructor. Either the local definitions are skipped and matching is done only on the true arguments of the constructors, or the bindings for local definitions can also be caught in the matching. Example. \begin{coq_eval} Reset Initial. Require Import Arith. \end{coq_eval} \begin{coq_example*} Inductive list : nat -> Set := | nil : list 0 | cons : forall n:nat, let m := (2 * n) in list m -> list (S (S m)). \end{coq_example*} In the next example, the local definition is not caught. \begin{coq_example} Fixpoint length n (l:list n) {struct l} : nat := match l with | nil => 0 | cons n l0 => S (length (2 * n) l0) end. \end{coq_example} But in this example, it is. \begin{coq_example} Fixpoint length' n (l:list n) {struct l} : nat := match l with | nil => 0 | cons _ m l0 => S (length' m l0) end. \end{coq_example} \Rem for a given matching clause, either none of the local definitions or all of them can be caught. \asection{Pattern-matching and coercions} If a mismatch occurs between the expected type of a pattern and its actual type, a coercion made from constructors is sought. If such a coercion can be found, it is automatically inserted around the pattern. Example: \begin{coq_example} Inductive I : Set := | C1 : nat -> I | C2 : I -> I. Coercion C1 : nat >-> I. Check (fun x => match x with | C2 O => 0 | _ => 0 end). \end{coq_example} \asection{When does the expansion strategy fail ?}\label{limitations} The strategy works very like in ML languages when treating patterns of non-dependent type. But there are new cases of failure that are due to the presence of dependencies. The error messages of the current implementation may be sometimes confusing. When the tactic fails because patterns are somehow incorrect then error messages refer to the initial expression. But the strategy may succeed to build an expression whose sub-expressions are well typed when the whole expression is not. In this situation the message makes reference to the expanded expression. We encourage users, when they have patterns with the same outer constructor in different equations, to name the variable patterns in the same positions with the same name. E.g. to write {\small\texttt{(cons n O x) => e1}} and {\small\texttt{(cons n \_ x) => e2}} instead of {\small\texttt{(cons n O x) => e1}} and {\small\texttt{(cons n' \_ x') => e2}}. This helps to maintain certain name correspondence between the generated expression and the original. Here is a summary of the error messages corresponding to each situation: \begin{ErrMsgs} \item \sverb{The constructor } {\sl ident} \sverb{ expects } {\sl num} \sverb{ arguments} \sverb{The variable } {\sl ident} \sverb{ is bound several times in pattern } {\sl term} \sverb{Found a constructor of inductive type } {\term} \sverb{ while a constructor of } {\term} \sverb{ is expected} Patterns are incorrect (because constructors are not applied to the correct number of the arguments, because they are not linear or they are wrongly typed). \item \errindex{Non exhaustive pattern-matching} The pattern matching is not exhaustive. \item \sverb{The elimination predicate } {\sl term} \sverb{ should be of arity } {\sl num} \sverb{ (for non dependent case) or } {\sl num} \sverb{ (for dependent case)} The elimination predicate provided to \texttt{match} has not the expected arity. %\item the whole expression is wrongly typed % CADUC ? % , or the synthesis of % implicit arguments fails (for example to find the elimination % predicate or to resolve implicit arguments in the rhs). % There are {\em nested patterns of dependent type}, the elimination % predicate corresponds to non-dependent case and has the form % $[x_1:T_1]...[x_n:T_n]T$ and {\bf some} $x_i$ occurs {\bf free} in % $T$. Then, the strategy may fail to find out a correct elimination % predicate during some step of compilation. In this situation we % recommend the user to rewrite the nested dependent patterns into % several \texttt{match} with {\em simple patterns}. \item {\tt Unable to infer a match predicate\\ Either there is a type incompatiblity or the problem involves\\ dependencies} There is a type mismatch between the different branches. The user should provide an elimination predicate. % Obsolete ? % \item because of nested patterns, it may happen that even though all % the rhs have the same type, the strategy needs dependent elimination % and so an elimination predicate must be provided. The system warns % about this situation, trying to compile anyway with the % non-dependent strategy. The risen message is: % \begin{itemize} % \item {\tt Warning: This pattern matching may need dependent % elimination to be compiled. I will try, but if fails try again % giving dependent elimination predicate.} % \end{itemize} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LA PROPAGATION DES CONTRAINTES ARRIERE N'EST PAS FAITE DANS LA V7 % TODO % \item there are {\em nested patterns of dependent type} and the % strategy builds a term that is well typed but recursive calls in fix % point are reported as illegal: % \begin{itemize} % \item {\tt Error: Recursive call applied to an illegal term ...} % \end{itemize} % This is because the strategy generates a term that is correct w.r.t. % the initial term but which does not pass the guard condition. In % this situation we recommend the user to transform the nested dependent % patterns into {\em several \texttt{match} of simple patterns}. Let us % explain this with an example. Consider the following definition of a % function that yields the last element of a list and \texttt{O} if it is % empty: % \begin{coq_example} % Fixpoint last [n:nat; l:(listn n)] : nat := % match l of % (consn _ a niln) => a % | (consn m _ x) => (last m x) | niln => O % end. % \end{coq_example} % It fails because of the priority between patterns, we know that this % definition is equivalent to the following more explicit one (which % fails too): % \begin{coq_example*} % Fixpoint last [n:nat; l:(listn n)] : nat := % match l of % (consn _ a niln) => a % | (consn n _ (consn m b x)) => (last n (consn m b x)) % | niln => O % end. % \end{coq_example*} % Note that the recursive call {\tt (last n (consn m b x))} is not % guarded. When treating with patterns of dependent types the strategy % interprets the first definition of \texttt{last} as the second % one\footnote{In languages of the ML family the first definition would % be translated into a term where the variable \texttt{x} is shared in % the expression. When patterns are of non-dependent types, Coq % compiles as in ML languages using sharing. When patterns are of % dependent types the compilation reconstructs the term as in the % second definition of \texttt{last} so to ensure the result of % expansion is well typed.}. Thus it generates a term where the % recursive call is rejected by the guard condition. % You can get rid of this problem by writing the definition with % \emph{simple patterns}: % \begin{coq_example} % Fixpoint last [n:nat; l:(listn n)] : nat := % <[_:nat]nat>match l of % (consn m a x) => Cases x of niln => a | _ => (last m x) end % | niln => O % end. % \end{coq_example} \end{ErrMsgs} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Nsatz.tex0000640000175000001440000000764211576413633015507 0ustar notinusers\achapter{Nsatz: tactics for proving equalities in integral domains} \aauthor{Loc Pottier} The tactic \texttt{nsatz} proves goals of the form \[ \begin{array}{l} \forall X_1,\ldots,X_n \in A,\\ P_1(X_1,\ldots,X_n) = Q_1(X_1,\ldots,X_n) , \ldots , P_s(X_1,\ldots,X_n) =Q_s(X_1,\ldots,X_n)\\ \vdash P(X_1,\ldots,X_n) = Q(X_1,\ldots,X_n)\\ \end{array} \] where $P,Q, P_1,Q_1,\ldots,P_s,Q_s$ are polynomials and A is an integral domain, i.e. a commutative ring with no zero divisor. For example, A can be $\mathbb{R}$, $\mathbb{Z}$, of $\mathbb{Q}$. Note that the equality $=$ used in these goals can be any setoid equality (see \ref{setoidtactics}) , not only Leibnitz equality. It also proves formulas \[ \begin{array}{l} \forall X_1,\ldots,X_n \in A,\\ P_1(X_1,\ldots,X_n) = Q_1(X_1,\ldots,X_n) \wedge \ldots \wedge P_s(X_1,\ldots,X_n) =Q_s(X_1,\ldots,X_n)\\ \rightarrow P(X_1,\ldots,X_n) = Q(X_1,\ldots,X_n)\\ \end{array} \] doing automatic introductions. \asection{Using the basic tactic \texttt{nsatz}} \tacindex{nsatz} Load the \texttt{Nsatz} module: \texttt{Require Import Nsatz}.\\ and use the tactic \texttt{nsatz}. \asection{More about \texttt{nsatz}} Hilbert's Nullstellensatz theorem shows how to reduce proofs of equalities on polynomials on a commutative ring A with no zero divisor to algebraic computations: it is easy to see that if a polynomial $P$ in $A[X_1,\ldots,X_n]$ verifies $c P^r = \sum_{i=1}^{s} S_i P_i$, with $c \in A$, $c \not = 0$, $r$ a positive integer, and the $S_i$s in $A[X_1,\ldots,X_n]$, then $P$ is zero whenever polynomials $P_1,...,P_s$ are zero (the converse is also true when A is an algebraic closed field: the method is complete). So, proving our initial problem can reduce into finding $S_1,\ldots,S_s$, $c$ and $r$ such that $c (P-Q)^r = \sum_{i} S_i (P_i-Q_i)$, which will be proved by the tactic \texttt{ring}. This is achieved by the computation of a Groebner basis of the ideal generated by $P_1-Q_1,...,P_s-Q_s$, with an adapted version of the Buchberger algorithm. This computation is done after a step of {\em reification}, which is performed using {\em Type Classes} (see \ref{typeclasses}) . The \texttt{Nsatz} module defines the tactic \texttt{nsatz}, which can be used without arguments: \\ \vspace*{3mm} \texttt{nsatz}\\ or with the syntax: \\ \vspace*{3mm} \texttt{nsatz with radicalmax:={\em number}\%N strategy:={\em number}\%Z parameters:={\em list of variables} variables:={\em list of variables}}\\ where: \begin{itemize} \item \texttt{radicalmax} is a bound when for searching r s.t.$c (P-Q)^r = \sum_{i=1..s} S_i (P_i - Q_i)$ \item \texttt{strategy} gives the order on variables $X_1,...X_n$ and the strategy used in Buchberger algorithm (see \cite{sugar} for details): \begin{itemize} \item strategy = 0: reverse lexicographic order and newest s-polynomial. \item strategy = 1: reverse lexicographic order and sugar strategy. \item strategy = 2: pure lexicographic order and newest s-polynomial. \item strategy = 3: pure lexicographic order and sugar strategy. \end{itemize} \item \texttt{parameters} is the list of variables $X_{i_1},\ldots,X_{i_k}$ among $X_1,...,X_n$ which are considered as parameters: computation will be performed with rational fractions in these variables, i.e. polynomials are considered with coefficients in $R(X_{i_1},\ldots,X_{i_k})$. In this case, the coefficient $c$ can be a non constant polynomial in $X_{i_1},\ldots,X_{i_k}$, and the tactic produces a goal which states that $c$ is not zero. \item \texttt{variables} is the list of the variables in the decreasing order in which they will be used in Buchberger algorithm. If \texttt{variables} = {(@nil R)}, then \texttt{lvar} is replaced by all the variables which are not in parameters. \end{itemize} See file \texttt{Nsatz.v} for many examples, specially in geometry. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-oth.tex0000640000175000001440000013547412006757263016354 0ustar notinusers\chapter[Vernacular commands]{Vernacular commands\label{Vernacular-commands} \label{Other-commands}} \section{Displaying} \subsection[\tt Print {\qualid}.]{\tt Print {\qualid}.\comindex{Print}} This command displays on the screen informations about the declared or defined object referred by {\qualid}. \begin{ErrMsgs} \item {\qualid} \errindex{not a defined object} \end{ErrMsgs} \begin{Variants} \item {\tt Print Term {\qualid}.} \comindex{Print Term}\\ This is a synonym to {\tt Print {\qualid}} when {\qualid} denotes a global constant. \item {\tt About {\qualid}.} \label{About} \comindex{About}\\ This displays various informations about the object denoted by {\qualid}: its kind (module, constant, assumption, inductive, constructor, abbreviation\ldots), long name, type, implicit arguments and argument scopes. It does not print the body of definitions or proofs. %\item {\tt Print Proof {\qualid}.}\comindex{Print Proof}\\ %In case \qualid\ denotes an opaque theorem defined in a section, %it is stored on a special unprintable form and displayed as %{\tt }. {\tt Print Proof} forces the printable form of \qualid\ %to be computed and displays it. \end{Variants} \subsection[\tt Print All.]{\tt Print All.\comindex{Print All}} This command displays informations about the current state of the environment, including sections and modules. \begin{Variants} \item {\tt Inspect \num.}\comindex{Inspect}\\ This command displays the {\num} last objects of the current environment, including sections and modules. \item {\tt Print Section {\ident}.}\comindex{Print Section}\\ should correspond to a currently open section, this command displays the objects defined since the beginning of this section. % Discontinued %% \item {\tt Print.}\comindex{Print}\\ %% This command displays the axioms and variables declarations in the %% environment as well as the constants defined since the last variable %% was introduced. \end{Variants} \section{Options and Flags} \subsection[\tt Set {\rm\sl option} {\rm\sl value}.]{\tt Set {\rm\sl option} {\rm\sl value}.\comindex{Set}} This command sets {\rm\sl option} to {\rm\sl value}. The original value of {\rm\sl option} is restored when the current module ends. \begin{Variants} \item {\tt Set {\rm\sl flag}.}\\ This command switches {\rm\sl flag} on. The original state of {\rm\sl flag} is restored when the current module ends. \item {\tt Local Set {\rm\sl option} {\rm\sl value}.\comindex{Local Set}} This command sets {\rm\sl option} to {\rm\sl value}. The original value of {\rm\sl option} is restored when the current \emph{section} ends. \item {\tt Local Set {\rm\sl flag}.}\\ This command switches {\rm\sl flag} on. The original state of {\rm\sl flag} is restored when the current \emph{section} ends. \item {\tt Global Set {\rm\sl option} {\rm\sl value}.\comindex{Global Set}} This command sets {\rm\sl option} to {\rm\sl value}. The original value of {\rm\sl option} is \emph{not} restored at the end of the module. Additionally, if set in a file, {\rm\sl option} is set to {\rm\sl value} when the file is {\tt Require}-d. \item {\tt Global Set {\rm\sl flag}.}\\ This command switches {\rm\sl flag} on. The original state of {\rm\sl flag} is \emph{not} restored at the end of the module. Additionally, if set in a file, {\rm\sl flag} is switched on when the file is {\tt Require}-d. \end{Variants} \subsection[\tt Unset {\rm\sl flag}.]{\tt Unset {\rm\sl flag}.\comindex{Unset}} This command switches {\rm\sl flag} off. The original state of {\rm\sl flag} is restored when the current module ends. \begin{Variants} \item {\tt Local Unset {\rm\sl flag}.\comindex{Local Unset}}\\ This command switches {\rm\sl flag} off. The original state of {\rm\sl flag} is restored when the current \emph{section} ends. \item {\tt Global Unset {\rm\sl flag}.\comindex{Global Unset}}\\ This command switches {\rm\sl flag} off. The original state of {\rm\sl flag} is \emph{not} restored at the end of the module. Additionally, if set in a file, {\rm\sl flag} is switched on when the file is {\tt Require}-d. \end{Variants} \subsection[\tt Test {\rm\sl option}.]{\tt Test {\rm\sl option}.\comindex{Test}} This command prints the current value of {\rm\sl option}. \begin{Variants} \item {\tt Test {\rm\sl flag}.}\\ This command prints whether {\rm\sl flag} is on or off. \end{Variants} \section{Requests to the environment} \subsection[\tt Check {\term}.]{\tt Check {\term}.\label{Check} \comindex{Check}} This command displays the type of {\term}. When called in proof mode, the term is checked in the local context of the current subgoal. \subsection[\tt Eval {\rm\sl convtactic} in {\term}.]{\tt Eval {\rm\sl convtactic} in {\term}.\comindex{Eval}} This command performs the specified reduction on {\term}, and displays the resulting term with its type. The term to be reduced may depend on hypothesis introduced in the first subgoal (if a proof is in progress). \SeeAlso Section~\ref{Conversion-tactics}. \subsection[\tt Compute {\term}.]{\tt Compute {\term}.\comindex{Compute}} This command performs a call-by-value evaluation of {\term} by using the bytecode-based virtual machine. It is a shortcut for {\tt Eval vm\_compute in {\term}}. \SeeAlso Section~\ref{Conversion-tactics}. \subsection[\tt Extraction \term.]{\tt Extraction \term.\label{ExtractionTerm} \comindex{Extraction}} This command displays the extracted term from {\term}. The extraction is processed according to the distinction between {\Set} and {\Prop}; that is to say, between logical and computational content (see Section~\ref{Sorts}). The extracted term is displayed in Objective Caml syntax, where global identifiers are still displayed as in \Coq\ terms. \begin{Variants} \item \texttt{Recursive Extraction {\qualid$_1$} \ldots{} {\qualid$_n$}.}\\ Recursively extracts all the material needed for the extraction of globals {\qualid$_1$} \ldots{} {\qualid$_n$}. \end{Variants} \SeeAlso Chapter~\ref{Extraction}. \subsection[\tt Print Assumptions {\qualid}.]{\tt Print Assumptions {\qualid}.\comindex{Print Assumptions}} \label{PrintAssumptions} This commands display all the assumptions (axioms, parameters and variables) a theorem or definition depends on. Especially, it informs on the assumptions with respect to which the validity of a theorem relies. \begin{Variants} \item \texttt{\tt Print Opaque Dependencies {\qualid}. \comindex{Print Opaque Dependencies}}\\ Displays the set of opaque constants {\qualid} relies on in addition to the assumptions. \end{Variants} \subsection[\tt Search {\term}.]{\tt Search {\term}.\comindex{Search}} This command displays the name and type of all theorems of the current context whose statement's conclusion has the form {\tt ({\term} t1 .. tn)}. This command is useful to remind the user of the name of library lemmas. \begin{coq_example} Search le. Search (@eq bool). \end{coq_example} \begin{Variants} \item {\tt Search {\term} inside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions defined in modules {\module$_1$} \ldots{} {\module$_n$}. \item {\tt Search {\term} outside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions not defined in modules {\module$_1$} \ldots{} {\module$_n$}. \begin{ErrMsgs} \item \errindex{Module/section \module{} not found} No module \module{} has been required (see Section~\ref{Require}). \end{ErrMsgs} \end{Variants} \subsection[\tt SearchAbout {\qualid}.]{\tt SearchAbout {\qualid}.\comindex{SearchAbout}} This command displays the name and type of all objects (theorems, axioms, etc) of the current context whose statement contains \qualid. This command is useful to remind the user of the name of library lemmas. \begin{ErrMsgs} \item \errindex{The reference \qualid\ was not found in the current environment}\\ There is no constant in the environment named \qualid. \end{ErrMsgs} \newcommand{\termpatternorstr}{{\termpattern}\textrm{\textsl{-}}{\str}} \begin{Variants} \item {\tt SearchAbout {\str}.} If {\str} is a valid identifier, this command displays the name and type of all objects (theorems, axioms, etc) of the current context whose name contains {\str}. If {\str} is a notation's string denoting some reference {\qualid} (referred to by its main symbol as in \verb="+"= or by its notation's string as in \verb="_ + _"= or \verb="_ 'U' _"=, see Section~\ref{Notation}), the command works like {\tt SearchAbout {\qualid}}. \item {\tt SearchAbout {\str}\%{\delimkey}.} The string {\str} must be a notation or the main symbol of a notation which is then interpreted in the scope bound to the delimiting key {\delimkey} (see Section~\ref{scopechange}). \item {\tt SearchAbout {\termpattern}.} This searches for all statements or types of definition that contains a subterm that matches the pattern {\termpattern} (holes of the pattern are either denoted by ``{\texttt \_}'' or by ``{\texttt ?{\ident}}'' when non linear patterns are expected). \item {\tt SearchAbout \nelist{\zeroone{-}{\termpatternorstr}}{}.}\\ \noindent where {\termpatternorstr} is a {\termpattern} or a {\str}, or a {\str} followed by a scope delimiting key {\tt \%{\delimkey}}. This generalization of {\tt SearchAbout} searches for all objects whose statement or type contains a subterm matching {\termpattern} (or {\qualid} if {\str} is the notation for a reference {\qualid}) and whose name contains all {\str} of the request that correspond to valid identifiers. If a {\termpattern} or a {\str} is prefixed by ``-'', the search excludes the objects that mention that {\termpattern} or that {\str}. \item {\tt SearchAbout \nelist{{\termpatternorstr}}{} inside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions defined in modules {\module$_1$} \ldots{} {\module$_n$}. \item {\tt SearchAbout \nelist{{\termpatternorstr}}{} outside {\module$_1$}...{\module$_n$}.} This restricts the search to constructions not defined in modules {\module$_1$} \ldots{} {\module$_n$}. \item {\tt SearchAbout [ ... ]. } For compatibility with older versions, the list of objects to search may be enclosed by optional {\tt [ ]} delimiters. \end{Variants} \examples \begin{coq_example*} Require Import ZArith. \end{coq_example*} \begin{coq_example} SearchAbout Z.mul Z.add "distr". SearchAbout "+"%Z "*"%Z "distr" -positive -Prop. SearchAbout (?x * _ + ?x * _)%Z outside OmegaLemmas. \end{coq_example} \subsection[\tt SearchPattern {\termpattern}.]{\tt SearchPattern {\term}.\comindex{SearchPattern}} This command displays the name and type of all theorems of the current context whose statement's conclusion or last hypothesis and conclusion matches the expression {\term} where holes in the latter are denoted by ``{\texttt \_}''. It is a variant of {\tt SearchAbout {\termpattern}} that does not look for subterms but searches for statements whose conclusion has exactly the expected form, or whose statement finishes by the given series of hypothesis/conclusion. \begin{coq_example} Require Import Arith. SearchPattern (_ + _ = _ + _). SearchPattern (nat -> bool). SearchPattern (forall l : list _, _ l l). \end{coq_example} Patterns need not be linear: you can express that the same expression must occur in two places by using pattern variables `{\texttt ?{\ident}}''. \begin{coq_example} Require Import Arith. SearchPattern (?X1 + _ = _ + ?X1). \end{coq_example} \begin{Variants} \item {\tt SearchPattern {\term} inside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions defined in modules {\module$_1$} \ldots{} {\module$_n$}. \item {\tt SearchPattern {\term} outside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions not defined in modules {\module$_1$} \ldots{} {\module$_n$}. \end{Variants} \subsection[\tt SearchRewrite {\term}.]{\tt SearchRewrite {\term}.\comindex{SearchRewrite}} This command displays the name and type of all theorems of the current context whose statement's conclusion is an equality of which one side matches the expression {\term}. Holes in {\term} are denoted by ``{\texttt \_}''. \begin{coq_example} Require Import Arith. SearchRewrite (_ + _ + _). \end{coq_example} \begin{Variants} \item {\tt SearchRewrite {\term} inside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions defined in modules {\module$_1$} \ldots{} {\module$_n$}. \item {\tt SearchRewrite {\term} outside {\module$_1$} \ldots{} {\module$_n$}.} This restricts the search to constructions not defined in modules {\module$_1$} \ldots{} {\module$_n$}. \end{Variants} \subsubsection{Nota Bene:} For the {\tt Search}, {\tt SearchAbout}, {\tt SearchPattern} and {\tt SearchRewrite} queries, it is possible to globally filter the search results via the command {\tt Add Search Blacklist "substring1"}. A lemma whose fully-qualified name contains any of the declared substrings will be removed from the search results. The default blacklisted substrings are {\tt "\_admitted" "\_subproof" "Private\_"}. The command {\tt Remove Search Blacklist ...} allows to expunge this blacklist. % \begin{tabbing} % \ \ \ \ \=11.\ \=\kill % \>1.\>$A=B\mx{ if }A\stackrel{\bt{}\io{}}{\lra{}}B$\\ % \>2.\>$\sa{}x:A.B=\sa{}y:A.B[x\la{}y]\mx{ if }y\not\in{}FV(\sa{}x:A.B)$\\ % \>3.\>$\Pi{}x:A.B=\Pi{}y:A.B[x\la{}y]\mx{ if }y\not\in{}FV(\Pi{}x:A.B)$\\ % \>4.\>$\sa{}x:A.B=\sa{}x:B.A\mx{ if }x\not\in{}FV(A,B)$\\ % \>5.\>$\sa{}x:(\sa{}y:A.B).C=\sa{}x:A.\sa{}y:B[y\la{}x].C[x\la{}(x,y)]$\\ % \>6.\>$\Pi{}x:(\sa{}y:A.B).C=\Pi{}x:A.\Pi{}y:B[y\la{}x].C[x\la{}(x,y)]$\\ % \>7.\>$\Pi{}x:A.\sa{}y:B.C=\sa{}y:(\Pi{}x:A.B).(\Pi{}x:A.C[y\la{}(y\sm{}x)]$\\ % \>8.\>$\sa{}x:A.unit=A$\\ % \>9.\>$\sa{}x:unit.A=A[x\la{}tt]$\\ % \>10.\>$\Pi{}x:A.unit=unit$\\ % \>11.\>$\Pi{}x:unit.A=A[x\la{}tt]$ % \end{tabbing} % For more informations about the exact working of this command, see % \cite{Del97}. \subsection[\tt Locate {\qualid}.]{\tt Locate {\qualid}.\comindex{Locate} \label{Locate}} This command displays the full name of the qualified identifier {\qualid} and consequently the \Coq\ module in which it is defined. \begin{coq_eval} (*************** The last line should produce **************************) (*********** Error: I.Dont.Exist not a defined object ******************) \end{coq_eval} \begin{coq_eval} Set Printing Depth 50. \end{coq_eval} \begin{coq_example} Locate nat. Locate Datatypes.O. Locate Init.Datatypes.O. Locate Coq.Init.Datatypes.O. Locate I.Dont.Exist. \end{coq_example} \SeeAlso Section \ref{LocateSymbol} \subsection{The {\sc Whelp} searching tool \label{Whelp}} {\sc Whelp} is an experimental searching and browsing tool for the whole {\Coq} library and the whole set of {\Coq} user contributions. {\sc Whelp} requires a browser to work. {\sc Whelp} has been developed at the University of Bologna as part of the HELM\footnote{Hypertextual Electronic Library of Mathematics} and MoWGLI\footnote{Mathematics on the Web, Get it by Logics and Interfaces} projects. It can be invoked directly from the {\Coq} toplevel or from {\CoqIDE}, assuming a graphical environment is also running. The browser to use can be selected by setting the environment variable {\tt COQREMOTEBROWSER}. If not explicitly set, it defaults to \verb!firefox -remote \"OpenURL(%s,new-tab)\" || firefox %s &"! or \verb!C:\\PROGRA~1\\INTERN~1\\IEXPLORE %s!, depending on the underlying operating system (in the command, the string \verb!%s! serves as metavariable for the url to open). The Whelp tool relies on a dedicated Whelp server and on another server called Getter that retrieves formal documents. The default Whelp server name can be obtained using the command {\tt Test Whelp Server} \comindex{Test Whelp Server} and the default Getter can be obtained using the command: {\tt Test Whelp Getter} \comindex{Test Whelp Getter}. The Whelp server name can be changed using the command: \smallskip \noindent {\tt Set Whelp Server {\str}}.\\ where {\str} is a URL (e.g. {\tt http://mowgli.cs.unibo.it:58080}). \comindex{Set Whelp Server} \smallskip \noindent The Getter can be changed using the command: \smallskip \noindent {\tt Set Whelp Getter {\str}}.\\ where {\str} is a URL (e.g. {\tt http://mowgli.cs.unibo.it:58081}). \comindex{Set Whelp Getter} \bigskip The {\sc Whelp} commands are: \subsubsection{\tt Whelp Locate "{\sl reg\_expr}". \comindex{Whelp Locate}} This command opens a browser window and displays the result of seeking for all names that match the regular expression {\sl reg\_expr} in the {\Coq} library and user contributions. The regular expression can contain the special operators are * and ? that respectively stand for an arbitrary substring and for exactly one character. \variant {\tt Whelp Locate {\ident}.}\\ This is equivalent to {\tt Whelp Locate "{\ident}"}. \subsubsection{\tt Whelp Match {\pattern}. \comindex{Whelp Match}} This command opens a browser window and displays the result of seeking for all statements that match the pattern {\pattern}. Holes in the pattern are represented by the wildcard character ``\_''. \subsubsection[\tt Whelp Instance {\pattern}.]{\tt Whelp Instance {\pattern}.\comindex{Whelp Instance}} This command opens a browser window and displays the result of seeking for all statements that are instances of the pattern {\pattern}. The pattern is here assumed to be an universally quantified expression. \subsubsection[\tt Whelp Elim {\qualid}.]{\tt Whelp Elim {\qualid}.\comindex{Whelp Elim}} This command opens a browser window and displays the result of seeking for all statements that have the ``form'' of an elimination scheme over the type denoted by {\qualid}. \subsubsection[\tt Whelp Hint {\term}.]{\tt Whelp Hint {\term}.\comindex{Whelp Hint}} This command opens a browser window and displays the result of seeking for all statements that can be instantiated so that to prove the statement {\term}. \variant {\tt Whelp Hint.}\\ This is equivalent to {\tt Whelp Hint {\sl goal}} where {\sl goal} is the current goal to prove. Notice that {\Coq} does not send the local environment of definitions to the {\sc Whelp} tool so that it only works on requests strictly based on, only, definitions of the standard library and user contributions. \section{Loading files} \Coq\ offers the possibility of loading different parts of a whole development stored in separate files. Their contents will be loaded as if they were entered from the keyboard. This means that the loaded files are ASCII files containing sequences of commands for \Coq's toplevel. This kind of file is called a {\em script} for \Coq\index{Script file}. The standard (and default) extension of \Coq's script files is {\tt .v}. \subsection[\tt Load {\ident}.]{\tt Load {\ident}.\comindex{Load}\label{Load}} This command loads the file named {\ident}{\tt .v}, searching successively in each of the directories specified in the {\em loadpath}. (see Section~\ref{loadpath}) \begin{Variants} \item {\tt Load {\str}.}\label{Load-str}\\ Loads the file denoted by the string {\str}, where {\str} is any complete filename. Then the \verb.~. and {\tt ..} abbreviations are allowed as well as shell variables. If no extension is specified, \Coq\ will use the default extension {\tt .v} \item {\tt Load Verbose {\ident}.}, {\tt Load Verbose {\str}}\\ \comindex{Load Verbose} Display, while loading, the answers of \Coq\ to each command (including tactics) contained in the loaded file \SeeAlso Section~\ref{Begin-Silent} \end{Variants} \begin{ErrMsgs} \item \errindex{Can't find file {\ident} on loadpath} \end{ErrMsgs} \section[Compiled files]{Compiled files\label{compiled}\index{Compiled files}} This section describes the commands used to load compiled files (see Chapter~\ref{Addoc-coqc} for documentation on how to compile a file). A compiled file is a particular case of module called {\em library file}. %%%%%%%%%%%% % Import and Export described in RefMan-mod.tex % the minor difference (to avoid multiple Exporting of libraries) in % the treatment of normal modules and libraries by Export omitted \subsection[\tt Require {\qualid}.]{\tt Require {\qualid}.\label{Require} \comindex{Require}} This command looks in the loadpath for a file containing module {\qualid} and adds the corresponding module to the environment of {\Coq}. As library files have dependencies in other library files, the command {\tt Require {\qualid}} recursively requires all library files the module {\qualid} depends on and adds the corresponding modules to the environment of {\Coq} too. {\Coq} assumes that the compiled files have been produced by a valid {\Coq} compiler and their contents are then not replayed nor rechecked. To locate the file in the file system, {\qualid} is decomposed under the form {\dirpath}{\tt .}{\textsl{ident}} and the file {\ident}{\tt .vo} is searched in the physical directory of the file system that is mapped in {\Coq} loadpath to the logical path {\dirpath} (see Section~\ref{loadpath}). The mapping between physical directories and logical names at the time of requiring the file must be consistent with the mapping used to compile the file. \begin{Variants} \item {\tt Require Import {\qualid}.} \comindex{Require} This loads and declares the module {\qualid} and its dependencies then imports the contents of {\qualid} as described in Section~\ref{Import}. It does not import the modules on which {\qualid} depends unless these modules were itself required in module {\qualid} using {\tt Require Export}, as described below, or recursively required through a sequence of {\tt Require Export}. If the module required has already been loaded, {\tt Require Import {\qualid}} simply imports it, as {\tt Import {\qualid}} would. \item {\tt Require Export {\qualid}.} \comindex{Require Export} This command acts as {\tt Require Import} {\qualid}, but if a further module, say {\it A}, contains a command {\tt Require Export} {\it B}, then the command {\tt Require Import} {\it A} also imports the module {\it B}. \item {\tt Require \zeroone{Import {\sl |} Export} {\qualid}$_1$ \ldots {\qualid}$_n$.} This loads the modules {\qualid}$_1$, \ldots, {\qualid}$_n$ and their recursive dependencies. If {\tt Import} or {\tt Export} is given, it also imports {\qualid}$_1$, \ldots, {\qualid}$_n$ and all the recursive dependencies that were marked or transitively marked as {\tt Export}. \item {\tt Require \zeroone{Import {\sl |} Export} {\str}.} This shortcuts the resolution of the qualified name into a library file name by directly requiring the module to be found in file {\str}.vo. \end{Variants} \begin{ErrMsgs} \item \errindex{Cannot load {\qualid}: no physical path bound to {\dirpath}} \item \errindex{Cannot find library foo in loadpath} The command did not find the file {\tt foo.vo}. Either {\tt foo.v} exists but is not compiled or {\tt foo.vo} is in a directory which is not in your {\tt LoadPath} (see Section~\ref{loadpath}). \item \errindex{Compiled library {\ident}.vo makes inconsistent assumptions over library {\qualid}} The command tried to load library file {\ident}.vo that depends on some specific version of library {\qualid} which is not the one already loaded in the current {\Coq} session. Probably {\ident}.v was not properly recompiled with the last version of the file containing module {\qualid}. \item \errindex{Bad magic number} \index{Bad-magic-number@{\tt Bad Magic Number}} The file {\tt{\ident}.vo} was found but either it is not a \Coq\ compiled module, or it was compiled with an older and incompatible version of \Coq. \item \errindex{The file {\ident}.vo contains library {\dirpath} and not library {\dirpath'}} The library file {\dirpath'} is indirectly required by the {\tt Require} command but it is bound in the current loadpath to the file {\ident}.vo which was bound to a different library name {\dirpath} at the time it was compiled. \end{ErrMsgs} \SeeAlso Chapter~\ref{Addoc-coqc} \subsection[\tt Print Libraries.]{\tt Print Libraries.\comindex{Print Libraries}} This command displays the list of library files loaded in the current {\Coq} session. For each of these libraries, it also tells if it is imported. \subsection[\tt Declare ML Module {\str$_1$} .. {\str$_n$}.]{\tt Declare ML Module {\str$_1$} .. {\str$_n$}.\comindex{Declare ML Module}} This commands loads the Objective Caml compiled files {\str$_1$} {\dots} {\str$_n$} (dynamic link). It is mainly used to load tactics dynamically. % (see Chapter~\ref{WritingTactics}). The files are searched into the current Objective Caml loadpath (see the command {\tt Add ML Path} in the Section~\ref{loadpath}). Loading of Objective Caml files is only possible under the bytecode version of {\tt coqtop} (i.e. {\tt coqtop} called with options {\tt -byte}, see chapter \ref{Addoc-coqc}), or when Coq has been compiled with a version of Objective Caml that supports native {\tt Dynlink} ($\ge$ 3.11). \begin{Variants} \item {\tt Local Declare ML Module {\str$_1$} .. {\str$_n$}.}\\ This variant is not exported to the modules that import the module where they occur, even if outside a section. \end{Variants} \begin{ErrMsgs} \item \errindex{File not found on loadpath : \str} \item \errindex{Loading of ML object file forbidden in a native Coq} \end{ErrMsgs} \subsection[\tt Print ML Modules.]{\tt Print ML Modules.\comindex{Print ML Modules}} This print the name of all \ocaml{} modules loaded with \texttt{Declare ML Module}. To know from where these module were loaded, the user should use the command \texttt{Locate File} (see Section~\ref{Locate File}) \section[Loadpath]{Loadpath\label{loadpath}\index{Loadpath}} There are currently two loadpaths in \Coq. A loadpath where seeking {\Coq} files (extensions {\tt .v} or {\tt .vo} or {\tt .vi}) and one where seeking Objective Caml files. The default loadpath contains the directory ``\texttt{.}'' denoting the current directory and mapped to the empty logical path (see Section~\ref{LongNames}). \subsection[\tt Pwd.]{\tt Pwd.\comindex{Pwd}\label{Pwd}} This command displays the current working directory. \subsection[\tt Cd {\str}.]{\tt Cd {\str}.\comindex{Cd}} This command changes the current directory according to {\str} which can be any valid path. \begin{Variants} \item {\tt Cd.}\\ Is equivalent to {\tt Pwd.} \end{Variants} \subsection[\tt Add LoadPath {\str} as {\dirpath}.]{\tt Add LoadPath {\str} as {\dirpath}.\comindex{Add LoadPath}\label{AddLoadPath}} This command adds the physical directory {\str} to the current {\Coq} loadpath and maps it to the logical directory {\dirpath}, which means that every file \textrm{\textsl{dirname}}/\textrm{\textsl{basename.v}} physically lying in subdirectory {\str}/\textrm{\textsl{dirname}} becomes accessible in {\Coq} through absolute logical name {\dirpath}{\tt .}\textrm{\textsl{dirname}}{\tt .}\textrm{\textsl{basename}}. \Rem {\tt Add LoadPath} also adds {\str} to the current ML loadpath. \begin{Variants} \item {\tt Add LoadPath {\str}.}\\ Performs as {\tt Add LoadPath {\str} as {\dirpath}} but for the empty directory path. \end{Variants} \subsection[\tt Add Rec LoadPath {\str} as {\dirpath}.]{\tt Add Rec LoadPath {\str} as {\dirpath}.\comindex{Add Rec LoadPath}\label{AddRecLoadPath}} This command adds the physical directory {\str} and all its subdirectories to the current \Coq\ loadpath. The top directory {\str} is mapped to the logical directory {\dirpath} and any subdirectory {\textsl{pdir}} of it is mapped to logical name {\dirpath}{\tt .}\textsl{pdir} and recursively. Subdirectories corresponding to invalid {\Coq} identifiers are skipped, and, by convention, subdirectories named {\tt CVS} or {\tt \_darcs} are skipped too. Otherwise, said, {\tt Add Rec LoadPath {\str} as {\dirpath}} behaves as {\tt Add LoadPath {\str} as {\dirpath}} excepts that files lying in validly named subdirectories of {\str} need not be qualified to be found. In case of files with identical base name, files lying in most recently declared {\dirpath} are found first and explicit qualification is required to refer to the other files of same base name. If several files with identical base name are present in different subdirectories of a recursive loadpath declared via a single instance of {\tt Add Rec LoadPath}, which of these files is found first is system-dependent and explicit qualification is recommended. \Rem {\tt Add Rec LoadPath} also recursively adds {\str} to the current ML loadpath. \begin{Variants} \item {\tt Add Rec LoadPath {\str}.}\\ Works as {\tt Add Rec LoadPath {\str} as {\dirpath}} but for the empty logical directory path. \end{Variants} \subsection[\tt Remove LoadPath {\str}.]{\tt Remove LoadPath {\str}.\comindex{Remove LoadPath}} This command removes the path {\str} from the current \Coq\ loadpath. \subsection[\tt Print LoadPath.]{\tt Print LoadPath.\comindex{Print LoadPath}} This command displays the current \Coq\ loadpath. \begin{Variants} \item {\tt Print LoadPath {\dirpath}.}\\ Works as {\tt Print LoadPath} but displays only the paths that extend the {\dirpath} prefix. \end{Variants} \subsection[\tt Add ML Path {\str}.]{\tt Add ML Path {\str}.\comindex{Add ML Path}} This command adds the path {\str} to the current Objective Caml loadpath (see the command {\tt Declare ML Module} in the Section~\ref{compiled}). \Rem This command is implied by {\tt Add LoadPath {\str} as {\dirpath}}. \subsection[\tt Add Rec ML Path {\str}.]{\tt Add Rec ML Path {\str}.\comindex{Add Rec ML Path}} This command adds the directory {\str} and all its subdirectories to the current Objective Caml loadpath (see the command {\tt Declare ML Module} in the Section~\ref{compiled}). \Rem This command is implied by {\tt Add Rec LoadPath {\str} as {\dirpath}}. \subsection[\tt Print ML Path {\str}.]{\tt Print ML Path {\str}.\comindex{Print ML Path}} This command displays the current Objective Caml loadpath. This command makes sense only under the bytecode version of {\tt coqtop}, i.e. using option {\tt -byte} (see the command {\tt Declare ML Module} in the section \ref{compiled}). \subsection[\tt Locate File {\str}.]{\tt Locate File {\str}.\comindex{Locate File}\label{Locate File}} This command displays the location of file {\str} in the current loadpath. Typically, {\str} is a \texttt{.cmo} or \texttt{.vo} or \texttt{.v} file. \subsection[\tt Locate Library {\dirpath}.]{\tt Locate Library {\dirpath}.\comindex{Locate Library}\label{Locate Library}} This command gives the status of the \Coq\ module {\dirpath}. It tells if the module is loaded and if not searches in the load path for a module of logical name {\dirpath}. \section{Backtracking} The backtracking commands described in this section can only be used interactively, they cannot be part of a vernacular file loaded via {\tt Load} or compiled by {\tt coqc}. \subsection[\tt Reset \ident.]{\tt Reset \ident.\comindex{Reset}} This command removes all the objects in the environment since \ident\ was introduced, including \ident. \ident\ may be the name of a defined or declared object as well as the name of a section. One cannot reset over the name of a module or of an object inside a module. \begin{ErrMsgs} \item \ident: \errindex{no such entry} \end{ErrMsgs} \begin{Variants} \item {\tt Reset Initial.}\comindex{Reset Initial}\\ Goes back to the initial state, just after the start of the interactive session. \end{Variants} \subsection[\tt Back.]{\tt Back.\comindex{Back}} This commands undoes all the effects of the last vernacular command. Commands read from a vernacular file via a {\tt Load} are considered as a single command. Proof managment commands are also handled by this command (see Chapter~\ref{Proof-handling}). For that, {\tt Back} may have to undo more than one command in order to reach a state where the proof managment information is available. For instance, when the last command is a {\tt Qed}, the managment information about the closed proof has been discarded. In this case, {\tt Back} will then undo all the proof steps up to the statement of this proof. \begin{Variants} \item {\tt Back $n$} \\ Undoes $n$ vernacular commands. As for {\tt Back}, some extra commands may be undone in order to reach an adequate state. For instance {\tt Back n} will not re-enter a closed proof, but rather go just before that proof. \end{Variants} \begin{ErrMsgs} \item \errindex{Invalid backtrack} \\ The user wants to undo more commands than available in the history. \end{ErrMsgs} \subsection[\tt BackTo $\num$.]{\tt BackTo $\num$.\comindex{BackTo}} \label{sec:statenums} This command brings back the system to the state labelled $\num$, forgetting the effect of all commands executed after this state. The state label is an integer which grows after each successful command. It is displayed in the prompt when in \texttt{-emacs} mode. Just as {\tt Back} (see above), the {\tt BackTo} command now handles proof states. For that, it may have to undo some extra commands and end on a state $\num' \leq \num$ if necessary. \begin{Variants} \item {\tt Backtrack $\num_1$ $\num_2$ $\num_3$}.\comindex{Backtrack}\\ {\tt Backtrack} is a \emph{deprecated} form of {\tt BackTo} which allows to explicitely manipulate the proof environment. The three numbers $\num_1$, $\num_2$ and $\num_3$ represent the following: \begin{itemize} \item $\num_3$: Number of \texttt{Abort} to perform, i.e. the number of currently opened nested proofs that must be canceled (see Chapter~\ref{Proof-handling}). \item $\num_2$: \emph{Proof state number} to unbury once aborts have been done. Coq will compute the number of \texttt{Undo} to perform (see Chapter~\ref{Proof-handling}). \item $\num_1$: State label to reach, as for {\tt BackTo}. \end{itemize} \end{Variants} \begin{ErrMsgs} \item \errindex{Invalid backtrack} \\ The destination state label is unknown. \end{ErrMsgs} \section{State files} \subsection[\tt Write State \str.]{\tt Write State \str.\comindex{Write State}} Writes the current state into a file \str{} for use in a further session. This file can be given as the {\tt inputstate} argument of the commands {\tt coqtop} and {\tt coqc}. \begin{Variants} \item {\tt Write State \ident}\\ Equivalent to {\tt Write State "}{\ident}{\tt .coq"}. The state is saved in the current directory (see Section~\ref{Pwd}). \end{Variants} \subsection[\tt Restore State \str.]{\tt Restore State \str.\comindex{Restore State}} Restores the state contained in the file \str. \begin{Variants} \item {\tt Restore State \ident}\\ Equivalent to {\tt Restore State "}{\ident}{\tt .coq"}. \end{Variants} \section{Quitting and debugging} \subsection[\tt Quit.]{\tt Quit.\comindex{Quit}} This command permits to quit \Coq. \subsection[\tt Drop.]{\tt Drop.\comindex{Drop}\label{Drop}} This is used mostly as a debug facility by \Coq's implementors and does not concern the casual user. This command permits to leave {\Coq} temporarily and enter the Objective Caml toplevel. The Objective Caml command: \begin{flushleft} \begin{verbatim} #use "include";; \end{verbatim} \end{flushleft} \noindent add the right loadpaths and loads some toplevel printers for all abstract types of \Coq - section\_path, identifiers, terms, judgments, \dots. You can also use the file \texttt{base\_include} instead, that loads only the pretty-printers for section\_paths and identifiers. % See Section~\ref{test-and-debug} more information on the % usage of the toplevel. You can return back to \Coq{} with the command: \begin{flushleft} \begin{verbatim} go();; \end{verbatim} \end{flushleft} \begin{Warnings} \item It only works with the bytecode version of {\Coq} (i.e. {\tt coqtop} called with option {\tt -byte}, see the contents of Section~\ref{binary-images}). \item You must have compiled {\Coq} from the source package and set the environment variable \texttt{COQTOP} to the root of your copy of the sources (see Section~\ref{EnvVariables}). \end{Warnings} \subsection[\tt Time \textrm{\textsl{command}}.]{\tt Time \textrm{\textsl{command}}.\comindex{Time} \label{time}} This command executes the vernacular command \textrm{\textsl{command}} and display the time needed to execute it. \subsection[\tt Timeout \textrm{\textsl{int}} \textrm{\textsl{command}}.]{\tt Timeout \textrm{\textsl{int}} \textrm{\textsl{command}}.\comindex{Timeout} \label{timeout}} This command executes the vernacular command \textrm{\textsl{command}}. If the command has not terminated after the time specified by the integer (time expressed in seconds), then it is interrupted and an error message is displayed. \subsection[\tt Set Default Timeout \textrm{\textsl{int}}.]{\tt Set Default Timeout \textrm{\textsl{int}}.\comindex{Set Default Timeout}} After using this command, all subsequent commands behave as if they were passed to a {\tt Timeout} command. Commands already starting by a {\tt Timeout} are unaffected. \subsection[\tt Unset Default Timeout.]{\tt Unset Default Timeout.\comindex{Unset Default Timeout}} This command turns off the use of a default timeout. \subsection[\tt Test Default Timeout.]{\tt Test Default Timeout.\comindex{Test Default Timeout}} This command displays whether some default timeout has be set or not. \section{Controlling display} \subsection[\tt Set Silent.]{\tt Set Silent.\comindex{Set Silent} \label{Begin-Silent} \index{Silent mode}} This command turns off the normal displaying. \subsection[\tt Unset Silent.]{\tt Unset Silent.\comindex{Unset Silent}} This command turns the normal display on. \subsection[\tt Set Printing Width {\integer}.]{\tt Set Printing Width {\integer}.\comindex{Set Printing Width}} This command sets which left-aligned part of the width of the screen is used for display. \subsection[\tt Unset Printing Width.]{\tt Unset Printing Width.\comindex{Unset Printing Width}} This command resets the width of the screen used for display to its default value (which is 78 at the time of writing this documentation). \subsection[\tt Test Printing Width.]{\tt Test Printing Width.\comindex{Test Printing Width}} This command displays the current screen width used for display. \subsection[\tt Set Printing Depth {\integer}.]{\tt Set Printing Depth {\integer}.\comindex{Set Printing Depth}} This command sets the nesting depth of the formatter used for pretty-printing. Beyond this depth, display of subterms is replaced by dots. \subsection[\tt Unset Printing Depth.]{\tt Unset Printing Depth.\comindex{Unset Printing Depth}} This command resets the nesting depth of the formatter used for pretty-printing to its default value (at the time of writing this documentation, the default value is 50). \subsection[\tt Test Printing Depth.]{\tt Test Printing Depth.\comindex{Test Printing Depth}} This command displays the current nesting depth used for display. %\subsection{\tt Abstraction ...} %Not yet documented. \section{Controlling the reduction strategies and the conversion algorithm} \label{Controlling reduction strategy} {\Coq} provides reduction strategies that the tactics can invoke and two different algorithms to check the convertibility of types. The first conversion algorithm lazily compares applicative terms while the other is a brute-force but efficient algorithm that first normalizes the terms before comparing them. The second algorithm is based on a bytecode representation of terms similar to the bytecode representation used in the ZINC virtual machine~\cite{Leroy90}. It is especially useful for intensive computation of algebraic values, such as numbers, and for reflexion-based tactics. The commands to fine-tune the reduction strategies and the lazy conversion algorithm are described first. \subsection[\tt Opaque \qualid$_1$ {\dots} \qualid$_n$.]{\tt Opaque \qualid$_1$ {\dots} \qualid$_n$.\comindex{Opaque}\label{Opaque}} This command has an effect on unfoldable constants, i.e. on constants defined by {\tt Definition} or {\tt Let} (with an explicit body), or by a command assimilated to a definition such as {\tt Fixpoint}, {\tt Program Definition}, etc, or by a proof ended by {\tt Defined}. The command tells not to unfold the constants {\qualid$_1$} {\dots} {\qualid$_n$} in tactics using $\delta$-conversion (unfolding a constant is replacing it by its definition). {\tt Opaque} has also on effect on the conversion algorithm of {\Coq}, telling to delay the unfolding of a constant as later as possible in case {\Coq} has to check the conversion (see Section~\ref{conv-rules}) of two distinct applied constants. The scope of {\tt Opaque} is limited to the current section, or current file, unless the variant {\tt Global Opaque \qualid$_1$ {\dots} \qualid$_n$} is used. \SeeAlso sections \ref{Conversion-tactics}, \ref{Automatizing}, \ref{Theorem} \begin{ErrMsgs} \item \errindex{The reference \qualid\ was not found in the current environment}\\ There is no constant referred by {\qualid} in the environment. Nevertheless, if you asked \texttt{Opaque foo bar} and if \texttt{bar} does not exist, \texttt{foo} is set opaque. \end{ErrMsgs} \subsection[\tt Transparent \qualid$_1$ {\dots} \qualid$_n$.]{\tt Transparent \qualid$_1$ {\dots} \qualid$_n$.\comindex{Transparent}\label{Transparent}} This command is the converse of {\tt Opaque} and it applies on unfoldable constants to restore their unfoldability after an {\tt Opaque} command. Note in particular that constants defined by a proof ended by {\tt Qed} are not unfoldable and {\tt Transparent} has no effect on them. This is to keep with the usual mathematical practice of {\em proof irrelevance}: what matters in a mathematical development is the sequence of lemma statements, not their actual proofs. This distinguishes lemmas from the usual defined constants, whose actual values are of course relevant in general. The scope of {\tt Transparent} is limited to the current section, or current file, unless the variant {\tt Global Transparent \qualid$_1$ \dots \qualid$_n$} is used. \begin{ErrMsgs} % \item \errindex{Can not set transparent.}\\ % It is a constant from a required module or a parameter. \item \errindex{The reference \qualid\ was not found in the current environment}\\ There is no constant referred by {\qualid} in the environment. \end{ErrMsgs} \SeeAlso sections \ref{Conversion-tactics}, \ref{Automatizing}, \ref{Theorem} \subsection{\tt Strategy {\it level} [ \qualid$_1$ {\dots} \qualid$_n$ ].\comindex{Strategy}\comindex{Local Strategy}\label{Strategy}} This command generalizes the behavior of {\tt Opaque} and {\tt Transparent} commands. It is used to fine-tune the strategy for unfolding constants, both at the tactic level and at the kernel level. This command associates a level to \qualid$_1$ {\dots} \qualid$_n$. Whenever two expressions with two distinct head constants are compared (for instance, this comparison can be triggered by a type cast), the one with lower level is expanded first. In case of a tie, the second one (appearing in the cast type) is expanded. Levels can be one of the following (higher to lower): \begin{description} \item[opaque]: level of opaque constants. They cannot be expanded by tactics (behaves like $+\infty$, see next item). \item[\num]: levels indexed by an integer. Level $0$ corresponds to the default behavior, which corresponds to transparent constants. This level can also be referred to as {\bf transparent}. Negative levels correspond to constants to be expanded before normal transparent constants, while positive levels correspond to constants to be expanded after normal transparent constants. \item[expand]: level of constants that should be expanded first (behaves like $-\infty$) \end{description} These directives survive section and module closure, unless the command is prefixed by {\tt Local}. In the latter case, the behavior regarding sections and modules is the same as for the {\tt Transparent} and {\tt Opaque} commands. \subsection{\tt Declare Reduction \ident\ := {\rm\sl convtactic}.} This command allows to give a short name to a reduction expression, for instance {\tt lazy beta delta [foo bar]}. This short name can then be used in {\tt Eval \ident\ in ...} or {\tt eval} directives. This command accepts the {\tt Local} modifier, for discarding this reduction name at the end of the file or module. For the moment the name cannot be qualified. In particular declaring the same name in several modules or in several functor applications will be refused if these declarations are not local. The name \ident\ cannot be used directly as an Ltac tactic, but nothing prevent the user to also perform a {\tt Ltac \ident\ := {\rm\sl convtactic}}. \SeeAlso sections \ref{Conversion-tactics} \subsection{\tt Set Virtual Machine \label{SetVirtualMachine} \comindex{Set Virtual Machine}} This activates the bytecode-based conversion algorithm. \subsection{\tt Unset Virtual Machine \comindex{Unset Virtual Machine}} This deactivates the bytecode-based conversion algorithm. \subsection{\tt Test Virtual Machine \comindex{Test Virtual Machine}} This tells if the bytecode-based conversion algorithm is activated. The default behavior is to have the bytecode-based conversion algorithm deactivated. \SeeAlso sections~\ref{vmcompute} and~\ref{vmoption}. \section{Controlling the locality of commands} \subsection{{\tt Local}, {\tt Global} \comindex{Local} \comindex{Global} } Some commands support a {\tt Local} or {\tt Global} prefix modifier to control the scope of their effect. There are four kinds of commands: \begin{itemize} \item Commands whose default is to extend their effect both outside the section and the module or library file they occur in. For these commands, the {\tt Local} modifier limits the effect of the command to the current section or module it occurs in. As an example, the {\tt Coercion} (see Section~\ref{Coercions}) and {\tt Strategy} (see Section~\ref{Strategy}) commands belong to this category. \item Commands whose default behavior is to stop their effect at the end of the section they occur in but to extent their effect outside the module or library file they occur in. For these commands, the {\tt Local} modifier limits the effect of the command to the current module if the command does not occur in a section and the {\tt Global} modifier extends the effect outside the current sections and current module if the command occurs in a section. As an example, the {\tt Implicit Arguments} (see Section~\ref{Implicit Arguments}), {\tt Ltac} (see Chapter~\ref{TacticLanguage}) or {\tt Notation} (see Section~\ref{Notation}) commands belong to this category. Notice that a subclass of these commands do not support extension of their scope outside sections at all and the {\tt Global} is not applicable to them. \item Commands whose default behavior is to stop their effect at the end of the section or module they occur in. For these commands, the {\tt Global} modifier extends their effect outside the sections and modules they occurs in. The {\tt Transparent} and {\tt Opaque} (see Section~\ref{Controlling reduction strategy}) commands belong to this category. \item Commands whose default behavior is to extend their effect outside sections but not outside modules when they occur in a section and to extend their effect outside the module or library file they occur in when no section contains them. For these commands, the {\tt Local} modifier limits the effect to the current section or module while the {\tt Global} modifier extends the effect outside the module even when the command occurs in a section. The {\tt Set} and {\tt Unset} commands belong to this category. \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-int.tex0000640000175000001440000001505611366307247016346 0ustar notinusers%BEGIN LATEX \setheaders{Introduction} %END LATEX \chapter*{Introduction} This document is the Reference Manual of version \coqversion{} of the \Coq\ proof assistant. A companion volume, the \Coq\ Tutorial, is provided for the beginners. It is advised to read the Tutorial first. A book~\cite{CoqArt} on practical uses of the \Coq{} system was published in 2004 and is a good support for both the beginner and the advanced user. %The system \Coq\ is designed to develop mathematical proofs. It can be %used by mathematicians to develop mathematical theories and by %computer scientists to write formal specifications, The \Coq{} system is designed to develop mathematical proofs, and especially to write formal specifications, programs and to verify that programs are correct with respect to their specification. It provides a specification language named \gallina. Terms of \gallina\ can represent programs as well as properties of these programs and proofs of these properties. Using the so-called \textit{Curry-Howard isomorphism}, programs, properties and proofs are formalized in the same language called \textit{Calculus of Inductive Constructions}, that is a $\lambda$-calculus with a rich type system. All logical judgments in \Coq\ are typing judgments. The very heart of the Coq system is the type-checking algorithm that checks the correctness of proofs, in other words that checks that a program complies to its specification. \Coq\ also provides an interactive proof assistant to build proofs using specific programs called \textit{tactics}. All services of the \Coq\ proof assistant are accessible by interpretation of a command language called \textit{the vernacular}. \Coq\ has an interactive mode in which commands are interpreted as the user types them in from the keyboard and a compiled mode where commands are processed from a file. \begin{itemize} \item The interactive mode may be used as a debugging mode in which the user can develop his theories and proofs step by step, backtracking if needed and so on. The interactive mode is run with the {\tt coqtop} command from the operating system (which we shall assume to be some variety of UNIX in the rest of this document). \item The compiled mode acts as a proof checker taking a file containing a whole development in order to ensure its correctness. Moreover, \Coq's compiler provides an output file containing a compact representation of its input. The compiled mode is run with the {\tt coqc} command from the operating system. \end{itemize} These two modes are documented in Chapter~\ref{Addoc-coqc}. Other modes of interaction with \Coq{} are possible: through an emacs shell window, an emacs generic user-interface for proof assistant (ProofGeneral~\cite{ProofGeneral}) or through a customized interface (PCoq~\cite{Pcoq}). These facilities are not documented here. There is also a \Coq{} Integrated Development Environment described in Chapter~\ref{Addoc-coqide}. \section*{How to read this book} This is a Reference Manual, not a User Manual, then it is not made for a continuous reading. However, it has some structure that is explained below. \begin{itemize} \item The first part describes the specification language, Gallina. Chapters~\ref{Gallina} and~\ref{Gallina-extension} describe the concrete syntax as well as the meaning of programs, theorems and proofs in the Calculus of Inductive Constructions. Chapter~\ref{Theories} describes the standard library of \Coq. Chapter~\ref{Cic} is a mathematical description of the formalism. Chapter~\ref{chapter:Modules} describes the module system. \item The second part describes the proof engine. It is divided in five chapters. Chapter~\ref{Vernacular-commands} presents all commands (we call them \emph{vernacular commands}) that are not directly related to interactive proving: requests to the environment, complete or partial evaluation, loading and compiling files. How to start and stop proofs, do multiple proofs in parallel is explained in Chapter~\ref{Proof-handling}. In Chapter~\ref{Tactics}, all commands that realize one or more steps of the proof are presented: we call them \emph{tactics}. The language to combine these tactics into complex proof strategies is given in Chapter~\ref{TacticLanguage}. Examples of tactics are described in Chapter~\ref{Tactics-examples}. %\item The third part describes how to extend the system in two ways: % adding parsing and pretty-printing rules % (Chapter~\ref{Addoc-syntax}) and writing new tactics % (Chapter~\ref{TacticLanguage}). \item The third part describes how to extend the syntax of \Coq. It corresponds to the Chapter~\ref{Addoc-syntax}. \item In the fourth part more practical tools are documented. First in Chapter~\ref{Addoc-coqc}, the usage of \texttt{coqc} (batch mode) and \texttt{coqtop} (interactive mode) with their options is described. Then, in Chapter~\ref{Utilities}, various utilities that come with the \Coq\ distribution are presented. Finally, Chapter~\ref{Addoc-coqide} describes the \Coq{} integrated development environment. \end{itemize} At the end of the document, after the global index, the user can find specific indexes for tactics, vernacular commands, and error messages. \section*{List of additional documentation} This manual does not contain all the documentation the user may need about \Coq{}. Various informations can be found in the following documents: \begin{description} \item[Tutorial] A companion volume to this reference manual, the \Coq{} Tutorial, is aimed at gently introducing new users to developing proofs in \Coq{} without assuming prior knowledge of type theory. In a second step, the user can read also the tutorial on recursive types (document {\tt RecTutorial.ps}). \item[Addendum] The fifth part (the Addendum) of the Reference Manual is distributed as a separate document. It contains more detailed documentation and examples about some specific aspects of the system that may interest only certain users. It shares the indexes, the page numbers and the bibliography with the Reference Manual. If you see in one of the indexes a page number that is outside the Reference Manual, it refers to the Addendum. \item[Installation] A text file INSTALL that comes with the sources explains how to install \Coq{}. \item[The \Coq{} standard library] A commented version of sources of the \Coq{} standard library (including only the specifications, the proofs are removed) is given in the additional document {\tt Library.ps}. \end{description} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Coercion.tex0000640000175000001440000004522611423134007016133 0ustar notinusers\achapter{Implicit Coercions} \aauthor{Amokrane Sabi} \label{Coercions-full} \index{Coercions!presentation} \asection{General Presentation} This section describes the inheritance mechanism of {\Coq}. In {\Coq} with inheritance, we are not interested in adding any expressive power to our theory, but only convenience. Given a term, possibly not typable, we are interested in the problem of determining if it can be well typed modulo insertion of appropriate coercions. We allow to write: \begin{itemize} \item $f~a$ where $f:forall~ x:A, B$ and $a:A'$ when $A'$ can be seen in some sense as a subtype of $A$. \item $x:A$ when $A$ is not a type, but can be seen in a certain sense as a type: set, group, category etc. \item $f~a$ when $f$ is not a function, but can be seen in a certain sense as a function: bijection, functor, any structure morphism etc. \end{itemize} \asection{Classes} \index{Coercions!classes} A class with $n$ parameters is any defined name with a type $forall~ (x_1:A_1)..(x_n:A_n), s$ where $s$ is a sort. Thus a class with parameters is considered as a single class and not as a family of classes. An object of a class $C$ is any term of type $C~t_1 .. t_n$. In addition to these user-classes, we have two abstract classes: \begin{itemize} \item {\tt Sortclass}, the class of sorts; its objects are the terms whose type is a sort. \item {\tt Funclass}, the class of functions; its objects are all the terms with a functional type, i.e. of form $forall~ x:A, B$. \end{itemize} Formally, the syntax of a classes is defined on Figure~\ref{fig:classes}. \begin{figure} \begin{centerframe} \begin{tabular}{lcl} {\class} & ::= & {\qualid} \\ & $|$ & {\tt Sortclass} \\ & $|$ & {\tt Funclass} \end{tabular} \end{centerframe} \caption{Syntax of classes} \label{fig:classes} \end{figure} \asection{Coercions} \index{Coercions!Funclass} \index{Coercions!Sortclass} A name $f$ can be declared as a coercion between a source user-class $C$ with $n$ parameters and a target class $D$ if one of these conditions holds: \newcommand{\oftype}{\!:\!} \begin{itemize} \item $D$ is a user-class, then the type of $f$ must have the form $forall~ (x_1 \oftype A_1)..(x_n \oftype A_n)(y\oftype C~x_1..x_n), D~u_1..u_m$ where $m$ is the number of parameters of $D$. \item $D$ is {\tt Funclass}, then the type of $f$ must have the form $forall~ (x_1\oftype A_1)..(x_n\oftype A_n)(y\oftype C~x_1..x_n)(x:A), B$. \item $D$ is {\tt Sortclass}, then the type of $f$ must have the form $forall~ (x_1\oftype A_1)..(x_n\oftype A_n)(y\oftype C~x_1..x_n), s$ with $s$ a sort. \end{itemize} We then write $f:C \mbox{\texttt{>->}} D$. The restriction on the type of coercions is called {\em the uniform inheritance condition}. Remark that the abstract classes {\tt Funclass} and {\tt Sortclass} cannot be source classes. To coerce an object $t:C~t_1..t_n$ of $C$ towards $D$, we have to apply the coercion $f$ to it; the obtained term $f~t_1..t_n~t$ is then an object of $D$. \asection{Identity Coercions} \index{Coercions!identity} Identity coercions are special cases of coercions used to go around the uniform inheritance condition. Let $C$ and $D$ be two classes with respectively $n$ and $m$ parameters and $f:forall~(x_1:T_1)..(x_k:T_k)(y:C~u_1..u_n), D~v_1..v_m$ a function which does not verify the uniform inheritance condition. To declare $f$ as coercion, one has first to declare a subclass $C'$ of $C$: $$C' := fun~ (x_1:T_1)..(x_k:T_k) => C~u_1..u_n$$ \noindent We then define an {\em identity coercion} between $C'$ and $C$: \begin{eqnarray*} Id\_C'\_C & := & fun~ (x_1:T_1)..(x_k:T_k)(y:C'~x_1..x_k) => (y:C~u_1..u_n)\\ \end{eqnarray*} We can now declare $f$ as coercion from $C'$ to $D$, since we can ``cast'' its type as $forall~ (x_1:T_1)..(x_k:T_k)(y:C'~x_1..x_k),D~v_1..v_m$.\\ The identity coercions have a special status: to coerce an object $t:C'~t_1..t_k$ of $C'$ towards $C$, we does not have to insert explicitly $Id\_C'\_C$ since $Id\_C'\_C~t_1..t_k~t$ is convertible with $t$. However we ``rewrite'' the type of $t$ to become an object of $C$; in this case, it becomes $C~u_1^*..u_k^*$ where each $u_i^*$ is the result of the substitution in $u_i$ of the variables $x_j$ by $t_j$. \asection{Inheritance Graph} \index{Coercions!inheritance graph} Coercions form an inheritance graph with classes as nodes. We call {\em coercion path} an ordered list of coercions between two nodes of the graph. A class $C$ is said to be a subclass of $D$ if there is a coercion path in the graph from $C$ to $D$; we also say that $C$ inherits from $D$. Our mechanism supports multiple inheritance since a class may inherit from several classes, contrary to simple inheritance where a class inherits from at most one class. However there must be at most one path between two classes. If this is not the case, only the {\em oldest} one is valid and the others are ignored. So the order of declaration of coercions is important. We extend notations for coercions to coercion paths. For instance $[f_1;..;f_k]:C \mbox{\texttt{>->}} D$ is the coercion path composed by the coercions $f_1..f_k$. The application of a coercion path to a term consists of the successive application of its coercions. \asection{Declaration of Coercions} %%%%% "Class" is useless, since classes are implicitely defined via coercions. % \asubsection{\tt Class {\qualid}.}\comindex{Class} % Declares {\qualid} as a new class. % \begin{ErrMsgs} % \item {\qualid} \errindex{not declared} % \item {\qualid} \errindex{is already a class} % \item \errindex{Type of {\qualid} does not end with a sort} % \end{ErrMsgs} % \begin{Variant} % \item {\tt Class Local {\qualid}.} \\ % Declares the construction denoted by {\qualid} as a new local class to % the current section. % \end{Variant} % END "Class" is useless \asubsection{\tt Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}.} \comindex{Coercion} Declares the construction denoted by {\qualid} as a coercion between {\class$_1$} and {\class$_2$}. % Useless information % The classes {\class$_1$} and {\class$_2$} are first declared if necessary. \begin{ErrMsgs} \item {\qualid} \errindex{not declared} \item {\qualid} \errindex{is already a coercion} \item \errindex{Funclass cannot be a source class} \item \errindex{Sortclass cannot be a source class} \item {\qualid} \errindex{is not a function} \item \errindex{Cannot find the source class of {\qualid}} \item \errindex{Cannot recognize {\class$_1$} as a source class of {\qualid}} \item {\qualid} \errindex{does not respect the uniform inheritance condition} \item \errindex{Found target class {\class} instead of {\class$_2$}} \end{ErrMsgs} When the coercion {\qualid} is added to the inheritance graph, non valid coercion paths are ignored; they are signaled by a warning. \\[0.3cm] \noindent {\bf Warning :} \begin{enumerate} \item \begin{tabbing} {\tt Ambiguous paths: }\= $[f_1^1;..;f_{n_1}^1] : C_1\mbox{\tt >->}D_1$\\ \> ... \\ \>$[f_1^m;..;f_{n_m}^m] : C_m\mbox{\tt >->}D_m$ \end{tabbing} \end{enumerate} \begin{Variants} \item {\tt Local Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}.} \comindex{Local Coercion}\\ Declares the construction denoted by {\qualid} as a coercion local to the current section. \item {\tt Coercion {\ident} := {\term}}\comindex{Coercion}\\ This defines {\ident} just like \texttt{Definition {\ident} := {\term}}, and then declares {\ident} as a coercion between it source and its target. \item {\tt Coercion {\ident} := {\term} : {\type}}\\ This defines {\ident} just like \texttt{Definition {\ident} : {\type} := {\term}}, and then declares {\ident} as a coercion between it source and its target. \item {\tt Local Coercion {\ident} := {\term}}\comindex{Local Coercion}\\ This defines {\ident} just like \texttt{Let {\ident} := {\term}}, and then declares {\ident} as a coercion between it source and its target. \item Assumptions can be declared as coercions at declaration time. This extends the grammar of assumptions from Figure~\ref{sentences-syntax} as follows: \comindex{Variable \mbox{\rm (and coercions)}} \comindex{Axiom \mbox{\rm (and coercions)}} \comindex{Parameter \mbox{\rm (and coercions)}} \comindex{Hypothesis \mbox{\rm (and coercions)}} \begin{tabular}{lcl} %% Declarations {\assumption} & ::= & {\assumptionkeyword} {\assums} {\tt .} \\ &&\\ {\assums} & ::= & {\simpleassums} \\ & $|$ & \nelist{{\tt (} \simpleassums {\tt )}}{} \\ &&\\ {\simpleassums} & ::= & \nelist{\ident}{} {\tt :}\zeroone{{\tt >}} {\term}\\ \end{tabular} If the extra {\tt >} is present before the type of some assumptions, these assumptions are declared as coercions. \item Constructors of inductive types can be declared as coercions at definition time of the inductive type. This extends and modifies the grammar of inductive types from Figure \ref{sentences-syntax} as follows: \comindex{Inductive \mbox{\rm (and coercions)}} \comindex{CoInductive \mbox{\rm (and coercions)}} \begin{center} \begin{tabular}{lcl} %% Inductives {\inductive} & ::= & {\tt Inductive} \nelist{\inductivebody}{with} {\tt .} \\ & $|$ & {\tt CoInductive} \nelist{\inductivebody}{with} {\tt .} \\ & & \\ {\inductivebody} & ::= & {\ident} \zeroone{\binders} {\tt :} {\term} {\tt :=} \\ && ~~~\zeroone{\zeroone{\tt |} \nelist{\constructor}{|}} \\ & & \\ {\constructor} & ::= & {\ident} \zeroone{\binders} \zeroone{{\tt :}\zeroone{\tt >} {\term}} \\ \end{tabular} \end{center} Especially, if the extra {\tt >} is present in a constructor declaration, this constructor is declared as a coercion. \end{Variants} \asubsection{\tt Identity Coercion {\ident}:{\class$_1$} >-> {\class$_2$}.} \comindex{Identity Coercion} We check that {\class$_1$} is a constant with a value of the form $fun~ (x_1:T_1)..(x_n:T_n) => (\mbox{\class}_2~t_1..t_m)$ where $m$ is the number of parameters of \class$_2$. Then we define an identity function with the type $forall~ (x_1:T_1)..(x_n:T_n)(y:\mbox{\class}_1~x_1..x_n), {\mbox{\class}_2}~t_1..t_m$, and we declare it as an identity coercion between {\class$_1$} and {\class$_2$}. \begin{ErrMsgs} \item {\class$_1$} \errindex{must be a transparent constant} \end{ErrMsgs} \begin{Variants} \item {\tt Local Identity Coercion {\ident}:{\ident$_1$} >-> {\ident$_2$}.} \\ Idem but locally to the current section. \item {\tt SubClass {\ident} := {\type}.} \\ \comindex{SubClass} If {\type} is a class {\ident'} applied to some arguments then {\ident} is defined and an identity coercion of name {\tt Id\_{\ident}\_{\ident'}} is declared. Otherwise said, this is an abbreviation for {\tt Definition {\ident} := {\type}.} followed by {\tt Identity Coercion Id\_{\ident}\_{\ident'}:{\ident} >-> {\ident'}}. \item {\tt Local SubClass {\ident} := {\type}.} \\ Same as before but locally to the current section. \end{Variants} \asection{Displaying Available Coercions} \asubsection{\tt Print Classes.} \comindex{Print Classes} Print the list of declared classes in the current context. \asubsection{\tt Print Coercions.} \comindex{Print Coercions} Print the list of declared coercions in the current context. \asubsection{\tt Print Graph.} \comindex{Print Graph} Print the list of valid coercion paths in the current context. \asubsection{\tt Print Coercion Paths {\class$_1$} {\class$_2$}.} \comindex{Print Coercion Paths} Print the list of valid coercion paths from {\class$_1$} to {\class$_2$}. \asection{Activating the Printing of Coercions} \asubsection{\tt Set Printing Coercions.} \comindex{Set Printing Coercions} \comindex{Unset Printing Coercions} This command forces all the coercions to be printed. Conversely, to skip the printing of coercions, use {\tt Unset Printing Coercions}. By default, coercions are not printed. \asubsection{\tt Set Printing Coercion {\qualid}.} \comindex{Set Printing Coercion} \comindex{Unset Printing Coercion} This command forces coercion denoted by {\qualid} to be printed. To skip the printing of coercion {\qualid}, use {\tt Unset Printing Coercion {\qualid}}. By default, a coercion is never printed. \asection{Classes as Records} \label{Coercions-and-records} \index{Coercions!and records} We allow the definition of {\em Structures with Inheritance} (or classes as records) by extending the existing {\tt Record} macro (see Section~\ref{Record}). Its new syntax is: \begin{center} \begin{tabular}{l} {\tt Record \zeroone{>}~{\ident} \zeroone{\binders} : {\sort} := \zeroone{\ident$_0$} \verb+{+} \\ ~~~~\begin{tabular}{l} {\tt \ident$_1$ $[$:$|$:>$]$ \term$_1$ ;} \\ ... \\ {\tt \ident$_n$ $[$:$|$:>$]$ \term$_n$ \verb+}+. } \end{tabular} \end{tabular} \end{center} The identifier {\ident} is the name of the defined record and {\sort} is its type. The identifier {\ident$_0$} is the name of its constructor. The identifiers {\ident$_1$}, .., {\ident$_n$} are the names of its fields and {\term$_1$}, .., {\term$_n$} their respective types. The alternative {\tt $[$:$|$:>$]$} is ``{\tt :}'' or ``{\tt :>}''. If {\tt {\ident$_i$}:>{\term$_i$}}, then {\ident$_i$} is automatically declared as coercion from {\ident} to the class of {\term$_i$}. Remark that {\ident$_i$} always verifies the uniform inheritance condition. If the optional ``{\tt >}'' before {\ident} is present, then {\ident$_0$} (or the default name {\tt Build\_{\ident}} if {\ident$_0$} is omitted) is automatically declared as a coercion from the class of {\term$_n$} to {\ident} (this may fail if the uniform inheritance condition is not satisfied). \Rem The keyword {\tt Structure}\comindex{Structure} is a synonym of {\tt Record}. \asection{Coercions and Sections} \index{Coercions!and sections} The inheritance mechanism is compatible with the section mechanism. The global classes and coercions defined inside a section are redefined after its closing, using their new value and new type. The classes and coercions which are local to the section are simply forgotten. Coercions with a local source class or a local target class, and coercions which do not verify the uniform inheritance condition any longer are also forgotten. \asection{Coercions and Modules} \index{Coercions!and modules} From Coq version 8.3, the coercions present in a module are activated only when the module is explicitly imported. Formerly, the coercions were activated as soon as the module was required, whatever it was imported or not. To recover the behavior of the versions of Coq prior to 8.3, use the following command: \comindex{Set Automatic Coercions Import} \comindex{Unset Automatic Coercions Import} \begin{verbatim} Set Automatic Coercions Import. \end{verbatim} To cancel the effect of the option, use instead: \begin{verbatim} Unset Automatic Coercions Import. \end{verbatim} \asection{Examples} There are three situations: \begin{itemize} \item $f~a$ is ill-typed where $f:forall~x:A,B$ and $a:A'$. If there is a coercion path between $A'$ and $A$, $f~a$ is transformed into $f~a'$ where $a'$ is the result of the application of this coercion path to $a$. We first give an example of coercion between atomic inductive types %\begin{\small} \begin{coq_example} Definition bool_in_nat (b:bool) := if b then 0 else 1. Coercion bool_in_nat : bool >-> nat. Check (0 = true). Set Printing Coercions. Check (0 = true). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} \Warning ``\verb|Check true=O.|'' fails. This is ``normal'' behaviour of coercions. To validate \verb|true=O|, the coercion is searched from \verb=nat= to \verb=bool=. There is none. We give an example of coercion between classes with parameters. %\begin{\small} \begin{coq_example} Parameters (C : nat -> Set) (D : nat -> bool -> Set) (E : bool -> Set). Parameter f : forall n:nat, C n -> D (S n) true. Coercion f : C >-> D. Parameter g : forall (n:nat) (b:bool), D n b -> E b. Coercion g : D >-> E. Parameter c : C 0. Parameter T : E true -> nat. Check (T c). Set Printing Coercions. Check (T c). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} We give now an example using identity coercions. %\begin{small} \begin{coq_example} Definition D' (b:bool) := D 1 b. Identity Coercion IdD'D : D' >-> D. Print IdD'D. Parameter d' : D' true. Check (T d'). Set Printing Coercions. Check (T d'). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} In the case of functional arguments, we use the monotonic rule of sub-typing. Approximatively, to coerce $t:forall~x:A, B$ towards $forall~x:A',B'$, one have to coerce $A'$ towards $A$ and $B$ towards $B'$. An example is given below: %\begin{small} \begin{coq_example} Parameters (A B : Set) (h : A -> B). Coercion h : A >-> B. Parameter U : (A -> E true) -> nat. Parameter t : B -> C 0. Check (U t). Set Printing Coercions. Check (U t). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} Remark the changes in the result following the modification of the previous example. %\begin{small} \begin{coq_example} Parameter U' : (C 0 -> B) -> nat. Parameter t' : E true -> A. Check (U' t'). Set Printing Coercions. Check (U' t'). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} \item An assumption $x:A$ when $A$ is not a type, is ill-typed. It is replaced by $x:A'$ where $A'$ is the result of the application to $A$ of the coercion path between the class of $A$ and {\tt Sortclass} if it exists. This case occurs in the abstraction $fun~ x:A => t$, universal quantification $forall~x:A, B$, global variables and parameters of (co-)inductive definitions and functions. In $forall~x:A, B$, such a coercion path may be applied to $B$ also if necessary. %\begin{small} \begin{coq_example} Parameter Graph : Type. Parameter Node : Graph -> Type. Coercion Node : Graph >-> Sortclass. Parameter G : Graph. Parameter Arrows : G -> G -> Type. Check Arrows. Parameter fg : G -> G. Check fg. Set Printing Coercions. Check fg. \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} \item $f~a$ is ill-typed because $f:A$ is not a function. The term $f$ is replaced by the term obtained by applying to $f$ the coercion path between $A$ and {\tt Funclass} if it exists. %\begin{small} \begin{coq_example} Parameter bij : Set -> Set -> Set. Parameter ap : forall A B:Set, bij A B -> A -> B. Coercion ap : bij >-> Funclass. Parameter b : bij nat nat. Check (b 0). Set Printing Coercions. Check (b 0). \end{coq_example} %\end{small} \begin{coq_eval} Unset Printing Coercions. \end{coq_eval} Let us see the resulting graph of this session. %\begin{small} \begin{coq_example} Print Graph. \end{coq_example} %\end{small} \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Micromega.tex0000640000175000001440000002460711602654707016312 0ustar notinusers\achapter{Micromega : tactics for solving arithmetic goals over ordered rings} \aauthor{Frdric Besson and Evgeny Makarov} \newtheorem{theorem}{Theorem} \asection{Short description of the tactics} \tacindex{psatz} \tacindex{lra} \label{sec:psatz-hurry} The {\tt Psatz} module ({\tt Require Psatz.}) gives access to several tactics for solving arithmetic goals over {\tt Z}\footnote{Support for {\tt nat} and {\tt N} is obtained by pre-processing the goal with the {\tt zify} tactic.}, {\tt Q} and {\tt R}: \begin{itemize} \item {\tt lia} is a decision procedure for linear integer arithmetic (see Section~\ref{sec:lia}); \item {\tt nia} is an incomplete proof procedure for integer non-linear arithmetic (see Section~\ref{sec:nia}); \item {\tt lra} is a decision procedure for linear (real or rational) arithmetic goals (see Section~\ref{sec:lra}); \item {\tt psatz D n} where {\tt D} is {\tt Z}, {\tt Q} or {\tt R} and {\tt n} is an optional integer limiting the proof search depth is is an incomplete proof procedure for non-linear arithmetic. It is based on John Harrison's Hol light driver to the external prover {\tt cspd}\footnote{Sources and binaries can be found at \url{https://projects.coin-or.org/Csdp}}. Note that the {\tt csdp} driver is generating a \emph{proof cache} thus allowing to rerun scripts even without {\tt csdp} (see Section~\ref{sec:psatz}). \end{itemize} The tactics solve propositional formulas parameterised by atomic arithmetics expressions interpreted over a domain $D \in \{\mathbb{Z}, \mathbb{Q}, \mathbb{R} \}$. The syntax of the formulas is the following: \[ \begin{array}{lcl} F &::=& A \mid P \mid \mathit{True} \mid \mathit{False} \mid F_1 \land F_2 \mid F_1 \lor F_2 \mid F_1 \leftrightarrow F_2 \mid F_1 \to F_2 \mid \sim F\\ A &::=& p_1 = p_2 \mid p_1 > p_2 \mid p_1 < p_2 \mid p_1 \ge p_2 \mid p_1 \le p_2 \\ p &::=& c \mid x \mid {-}p \mid p_1 - p_2 \mid p_1 + p_2 \mid p_1 \times p_2 \mid p \verb!^! n \end{array} \] where $c$ is a numeric constant, $x\in D$ is a numeric variable and the operators $-$, $+$, $\times$, are respectively subtraction, addition, product, $p \verb!^!n $ is exponentiation by a constant $n$, $P$ is an arbitrary proposition. % For {\tt Q}, equality is not leibnitz equality {\tt =} but the equality of rationals {\tt ==}. For {\tt Z} (resp. {\tt Q} ), $c$ ranges over integer constants (resp. rational constants). %% The following table details for each domain $D \in \{\mathbb{Z},\mathbb{Q},\mathbb{R}\}$ the range of constants $c$ and exponent $n$. %% \[ %% \begin{array}{|c|c|c|c|} %% \hline %% &\mathbb{Z} & \mathbb{Q} & \mathbb{R} \\ %% \hline %% c &\mathtt{Z} & \mathtt{Q} & (see below) \\ %% \hline %% n &\mathtt{Z} & \mathtt{Z} & \mathtt{nat}\\ %% \hline %% \end{array} %% \] For {\tt R}, the tactic recognises as real constants the following expressions: \begin{verbatim} c ::= R0 | R1 | Rmul(c,c) | Rplus(c,c) | Rminus(c,c) | IZR z | IQR q | Rdiv(c,c) | Rinv c \end{verbatim} where ${\tt z}$ is a constant in {\tt Z} and {\tt q} is a constant in {\tt Q}. This includes integer constants written using the decimal notation \emph{i.e.,} {\tt c\%R}. \asection{\emph{Positivstellensatz} refutations} \label{sec:psatz-back} The name {\tt psatz} is an abbreviation for \emph{positivstellensatz} -- literally positivity theorem -- which generalises Hilbert's \emph{nullstellensatz}. % It relies on the notion of $\mathit{Cone}$. Given a (finite) set of polynomials $S$, $Cone(S)$ is inductively defined as the smallest set of polynomials closed under the following rules: \[ \begin{array}{l} \dfrac{p \in S}{p \in Cone(S)} \quad \dfrac{}{p^2 \in Cone(S)} \quad \dfrac{p_1 \in Cone(S) \quad p_2 \in Cone(S) \quad \Join \in \{+,*\}} {p_1 \Join p_2 \in Cone(S)}\\ \end{array} \] The following theorem provides a proof principle for checking that a set of polynomial inequalities do not have solutions\footnote{Variants deal with equalities and strict inequalities.}: \begin{theorem} \label{thm:psatz} Let $S$ be a set of polynomials.\\ If ${-}1$ belongs to $Cone(S)$ then the conjunction $\bigwedge_{p \in S} p\ge 0$ is unsatisfiable. \end{theorem} A proof based on this theorem is called a \emph{positivstellensatz} refutation. % The tactics work as follows. Formulas are normalised into conjonctive normal form $\bigwedge_i C_i$ where $C_i$ has the general form $(\bigwedge_{j\in S_i} p_j \Join 0) \to \mathit{False})$ and $\Join \in \{>,\ge,=\}$ for $D\in \{\mathbb{Q},\mathbb{R}\}$ and $\Join \in \{\ge, =\}$ for $\mathbb{Z}$. % For each conjunct $C_i$, the tactic calls a oracle which searches for $-1$ within the cone. % Upon success, the oracle returns a \emph{cone expression} that is normalised by the {\tt ring} tactic (see chapter~\ref{ring}) and checked to be $-1$. \asection{{\tt lra} : a decision procedure for linear real and rational arithmetic} \label{sec:lra} The {\tt lra} tactic is searching for \emph{linear} refutations using Fourier elimination\footnote{More efficient linear programming techniques could equally be employed}. As a result, this tactic explores a subset of the $Cone$ defined as: \[ LinCone(S) =\left\{ \left. \sum_{p \in S} \alpha_p \times p\ \right|\ \alpha_p \mbox{ are positive constants} \right\} \] The deductive power of {\tt lra} is the combined deductive power of {\tt ring\_simplify} and {\tt fourier}. % There is also an overlap with the {\tt field} tactic {\emph e.g.}, {\tt x = 10 * x / 10} is solved by {\tt lra}. \asection{ {\tt psatz} : a proof procedure for non-linear arithmetic} \label{sec:psatz} The {\tt psatz} tactic explores the $Cone$ by increasing degrees -- hence the depth parameter $n$. In theory, such a proof search is complete -- if the goal is provable the search eventually stops. Unfortunately, the external oracle is using numeric (approximate) optimisation techniques that might miss a refutation. To illustrate the working of the tactic, consider we wish to prove the following Coq goal.\\ \begin{coq_eval} Require Import ZArith Psatz. Open Scope Z_scope. \end{coq_eval} \begin{coq_example*} Goal forall x, -x^2 >= 0 -> x - 1 >= 0 -> False. \end{coq_example*} \begin{coq_eval} intro x; psatz Z 2. \end{coq_eval} Such a goal is solved by {\tt intro x; psatz Z 2}. The oracle returns the cone expression $2 \times (\mathbf{x-1}) + \mathbf{x-1}\times\mathbf{x-1} + \mathbf{-x^2}$ (polynomial hypotheses are printed in bold). By construction, this expression belongs to $Cone(\{-x^2, x -1\})$. Moreover, by running {\tt ring} we obtain $-1$. By Theorem~\ref{thm:psatz}, the goal is valid. % %% \paragraph{The {\tt sos} tactic} -- where {\tt sos} stands for \emph{sum of squares} -- tries to prove that a %% single polynomial $p$ is positive by expressing it as a sum of squares \emph{i.e.,} $\sum_{i\in S} p_i^2$. %% This amounts to searching for $p$ in the cone without generators \emph{i.e.}, $Cone(\{\})$. % \asection{ {\tt lia} : a tactic for linear integer arithmetic } \tacindex{lia} \label{sec:lia} The tactic {\tt lia} offers an alternative to the {\tt omega} and {\tt romega} tactic (see Chapter~\ref{OmegaChapter}). % Rougthly speaking, the deductive power of {\tt lia} is the combined deductive power of {\tt ring\_simplify} and {\tt omega}. % However, it solves linear goals that {\tt omega} and {\tt romega} do not solve, such as the following so-called \emph{omega nightmare}~\cite{TheOmegaPaper}. \begin{coq_example*} Goal forall x y, 27 <= 11 * x + 13 * y <= 45 -> -10 <= 7 * x - 9 * y <= 4 -> False. \end{coq_example*} \begin{coq_eval} intro x; lia; \end{coq_eval} The estimation of the relative efficiency of lia \emph{vs} {\tt omega} and {\tt romega} is under evaluation. \paragraph{High level view of {\tt lia}.} Over $\mathbb{R}$, \emph{positivstellensatz} refutations are a complete proof principle\footnote{In practice, the oracle might fail to produce such a refutation.}. % However, this is not the case over $\mathbb{Z}$. % Actually, \emph{positivstellensatz} refutations are not even sufficient to decide linear \emph{integer} arithmetics. % The canonical exemple is {\tt 2 * x = 1 -> False} which is a theorem of $\mathbb{Z}$ but not a theorem of $\mathbb{R}$. % To remedy this weakness, the {\tt lia} tactic is using recursively a combination of: % \begin{itemize} \item linear \emph{positivstellensatz} refutations; \item cutting plane proofs; \item case split. \end{itemize} \paragraph{Cutting plane proofs} are a way to take into account the discreetness of $\mathbb{Z}$ by rounding up (rational) constants up-to the closest integer. % \begin{theorem} Let $p$ be an integer and $c$ a rational constant. \[ p \ge c \Rightarrow p \ge \lceil c \rceil \] \end{theorem} For instance, from $2 * x = 1$ we can deduce \begin{itemize} \item $x \ge 1/2$ which cut plane is $ x \ge \lceil 1/2 \rceil = 1$; \item $ x \le 1/2$ which cut plane is $ x \le \lfloor 1/2 \rfloor = 0$. \end{itemize} By combining these two facts (in normal form) $x - 1 \ge 0$ and $-x \ge 0$, we conclude by exhibiting a \emph{positivstellensatz} refutation ($-1 \equiv \mathbf{x-1} + \mathbf{-x} \in Cone(\{x-1,x\})$). Cutting plane proofs and linear \emph{positivstellensatz} refutations are a complete proof principle for integer linear arithmetic. \paragraph{Case split} allow to enumerate over the possible values of an expression. \begin{theorem} Let $p$ be an integer and $c_1$ and $c_2$ integer constants. \[ c_1 \le p \le c_2 \Rightarrow \bigvee_{x \in [c_1,c_2]} p = x \] \end{theorem} Our current oracle tries to find an expression $e$ with a small range $[c_1,c_2]$. % We generate $c_2 - c_1$ subgoals which contexts are enriched with an equation $e = i$ for $i \in [c_1,c_2]$ and recursively search for a proof. \asection{ {\tt nia} : a proof procedure for non-linear integer arithmetic} \tacindex{nia} \label{sec:nia} The {\tt nia} tactic is an {\emph experimental} proof procedure for non-linear integer arithmetic. % The tactic performs a limited amount of non-linear reasoning before running the linear prover of {\tt lia}. This pre-processing does the following: \begin{itemize} \item If the context contains an arithmetic expression of the form $e[x^2]$ where $x$ is a monomial, the context is enriched with $x^2\ge 0$; \item For all pairs of hypotheses $e_1\ge 0$, $e_2 \ge 0$, the context is enriched with $e_1 \times e_2 \ge 0$. \end{itemize} After pre-processing, the linear prover of {\tt lia} is searching for a proof by abstracting monomials by variables. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-cic.tex0000640000175000001440000022747412010532550016303 0ustar notinusers\chapter[Calculus of Inductive Constructions]{Calculus of Inductive Constructions \label{Cic} \index{Cic@\textsc{CIC}} \index{pCic@p\textsc{CIC}} \index{Calculus of (Co)Inductive Constructions}} The underlying formal language of {\Coq} is a {\em Calculus of Constructions} with {\em Inductive Definitions}. It is presented in this chapter. For {\Coq} version V7, this Calculus was known as the {\em Calculus of (Co)Inductive Constructions}\index{Calculus of (Co)Inductive Constructions} (\iCIC\ in short). The underlying calculus of {\Coq} version V8.0 and up is a weaker calculus where the sort \Set{} satisfies predicative rules. We call this calculus the {\em Predicative Calculus of (Co)Inductive Constructions}\index{Predicative Calculus of (Co)Inductive Constructions} (\pCIC\ in short). In Section~\ref{impredicativity} we give the extra-rules for \iCIC. A compiling option of \Coq{} allows to type-check theories in this extended system. In \CIC\, all objects have a {\em type}. There are types for functions (or programs), there are atomic types (especially datatypes)... but also types for proofs and types for the types themselves. Especially, any object handled in the formalism must belong to a type. For instance, the statement {\it ``for all x, P''} is not allowed in type theory; you must say instead: {\it ``for all x belonging to T, P''}. The expression {\it ``x belonging to T''} is written {\it ``x:T''}. One also says: {\it ``x has type T''}. The terms of {\CIC} are detailed in Section~\ref{Terms}. In \CIC\, there is an internal reduction mechanism. In particular, it allows to decide if two programs are {\em intentionally} equal (one says {\em convertible}). Convertibility is presented in section \ref{convertibility}. The remaining sections are concerned with the type-checking of terms. The beginner can skip them. The reader seeking a background on the Calculus of Inductive Constructions may read several papers. Gimnez and Castran~\cite{GimCas05} provide an introduction to inductive and co-inductive definitions in Coq. In their book~\cite{CoqArt}, Bertot and Castran give a precise description of the \CIC{} based on numerous practical examples. Barras~\cite{Bar99}, Werner~\cite{Wer94} and Paulin-Mohring~\cite{Moh97} are the most recent theses dealing with Inductive Definitions. Coquand-Huet~\cite{CoHu85a,CoHu85b,CoHu86} introduces the Calculus of Constructions. Coquand-Paulin~\cite{CoPa89} extended this calculus to inductive definitions. The {\CIC} is a formulation of type theory including the possibility of inductive constructions, Barendregt~\cite{Bar91} studies the modern form of type theory. \section[The terms]{The terms\label{Terms}} In most type theories, one usually makes a syntactic distinction between types and terms. This is not the case for \CIC\ which defines both types and terms in the same syntactical structure. This is because the type-theory itself forces terms and types to be defined in a mutual recursive way and also because similar constructions can be applied to both terms and types and consequently can share the same syntactic structure. Consider for instance the $\ra$ constructor and assume \nat\ is the type of natural numbers. Then $\ra$ is used both to denote $\nat\ra\nat$ which is the type of functions from \nat\ to \nat, and to denote $\nat \ra \Prop$ which is the type of unary predicates over the natural numbers. Consider abstraction which builds functions. It serves to build ``ordinary'' functions as $\kw{fun}~x:\nat \Ra ({\tt mult} ~x~x)$ (assuming {\tt mult} is already defined) but may build also predicates over the natural numbers. For instance $\kw{fun}~x:\nat \Ra (x=x)$ will represent a predicate $P$, informally written in mathematics $P(x)\equiv x=x$. If $P$ has type $\nat \ra \Prop$, $(P~x)$ is a proposition, furthermore $\kw{forall}~x:\nat,(P~x)$ will represent the type of functions which associate to each natural number $n$ an object of type $(P~n)$ and consequently represent proofs of the formula ``$\forall x.P(x)$''. \subsection[Sorts]{Sorts\label{Sorts} \index{Sorts}} When manipulated as terms, types have themselves a type which is called a sort. There is an infinite well-founded typing hierarchy of sorts whose base sorts are {\Prop} and {\Set}. The sort {\Prop} intends to be the type of logical propositions. If $M$ is a logical proposition then it denotes the class of terms representing proofs of $M$. An object $m$ belonging to $M$ witnesses the fact that $M$ is provable. An object of type {\Prop} is called a proposition. The sort {\Set} intends to be the type of small sets. This includes data types such as booleans and naturals, but also products, subsets, and function types over these data types. {\Prop} and {\Set} themselves can be manipulated as ordinary terms. Consequently they also have a type. Because assuming simply that {\Set} has type {\Set} leads to an inconsistent theory, the language of {\CIC} has infinitely many sorts. There are, in addition to {\Set} and {\Prop} a hierarchy of universes {\Type$(i)$} for any integer $i$. Like {\Set}, all of the sorts {\Type$(i)$} contain small sets such as booleans, natural numbers, as well as products, subsets and function types over small sets. But, unlike {\Set}, they also contain large sets, namely the sorts {\Set} and {\Type$(j)$} for $j Forest A -> Tree A % with Forest (A : Set) : Set := % Empty : Forest A % | Cons : Tree A -> Forest A -> Forest A % \end{coq_example*} % will correspond in our formalism to: % \[\NInd{}{{\tt Tree}:\Set\ra\Set;{\tt Forest}:\Set\ra \Set} % {{\tt Node} : \forall A:\Set, A \ra {\tt Forest}~A \ra {\tt Tree}~A, % {\tt Empty} : \forall A:\Set, {\tt Forest}~A, % {\tt Cons} : \forall A:\Set, {\tt Tree}~A \ra {\tt Forest}~A \ra % {\tt Forest}~A}\] We keep track in the syntax of the number of parameters. Formally the representation of an inductive declaration will be \Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C} for an inductive definition valid in a context $\Gamma$ with $p$ parameters, a context of definitions $\Gamma_I$ and a context of constructors $\Gamma_C$. The definition \Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C} will be well-formed exactly when \NInd{\Gamma}{\Gamma_I}{\Gamma_C} is and when $p$ is (less or equal than) the number of parameters in \NInd{\Gamma}{\Gamma_I}{\Gamma_C}. \paragraph{Examples} The declaration for parameterized lists is: \[\Ind{}{1}{\List:\Set\ra\Set}{\Nil:(\forall A:\Set,\List~A),\cons : (\forall A:\Set, A \ra \List~A \ra \List~A)}\] The declaration for the length of lists is: \[\Ind{}{1}{\Length:\forall A:\Set, (\List~A)\ra \nat\ra\Prop} {\LNil:\forall A:\Set, \Length~A~(\Nil~A)~\nO,\\ \LCons :\forall A:\Set,\forall a:A, \forall l:(\List~A),\forall n:\nat, (\Length~A~l~n)\ra (\Length~A~(\cons~A~a~l)~(\nS~n))}\] The declaration for a mutual inductive definition of forests and trees is: \[\NInd{}{\tree:\Set,\forest:\Set} {\\~~\node:\forest \ra \tree, \emptyf:\forest,\consf:\tree \ra \forest \ra \forest\-}\] These representations are the ones obtained as the result of the \Coq\ declaration: \begin{coq_example*} Inductive nat : Set := | O : nat | S : nat -> nat. Inductive list (A:Set) : Set := | nil : list A | cons : A -> list A -> list A. \end{coq_example*} \begin{coq_example*} Inductive Length (A:Set) : list A -> nat -> Prop := | Lnil : Length A (nil A) O | Lcons : forall (a:A) (l:list A) (n:nat), Length A l n -> Length A (cons A a l) (S n). Inductive tree : Set := node : forest -> tree with forest : Set := | emptyf : forest | consf : tree -> forest -> forest. \end{coq_example*} % The inductive declaration in \Coq\ is slightly different from the one % we described theoretically. The difference is that in the type of % constructors the inductive definition is explicitly applied to the % parameters variables. The \Coq\ type-checker verifies that all parameters are applied in the correct manner in the conclusion of the type of each constructors~: In particular, the following definition will not be accepted because there is an occurrence of \List\ which is not applied to the parameter variable in the conclusion of the type of {\tt cons'}: \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (********* Error: The 1st argument of list' must be A in ... *********) \end{coq_eval} \begin{coq_example} Inductive list' (A:Set) : Set := | nil' : list' A | cons' : A -> list' A -> list' (A*A). \end{coq_example} Since \Coq{} version 8.1, there is no restriction about parameters in the types of arguments of constructors. The following definition is valid: \begin{coq_example} Inductive list' (A:Set) : Set := | nil' : list' A | cons' : A -> list' (A->A) -> list' A. \end{coq_example} \subsection{Types of inductive objects} We have to give the type of constants in an environment $E$ which contains an inductive declaration. \begin{description} \item[Ind-Const] Assuming $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is $[c_1:C_1;\ldots;c_n:C_n]$, \inference{\frac{\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C} \in E ~~j=1\ldots k}{(I_j:A_j) \in E}} \inference{\frac{\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C} \in E ~~~~i=1.. n} {(c_i:C_i) \in E}} \end{description} \paragraph{Example.} We have $(\List:\Set \ra \Set), (\cons:\forall~A:\Set,A\ra(\List~A)\ra (\List~A))$, \\ $(\Length:\forall~A:\Set, (\List~A)\ra\nat\ra\Prop)$, $\tree:\Set$ and $\forest:\Set$. From now on, we write $\ListA$ instead of $(\List~A)$ and $\LengthA$ for $(\Length~A)$. %\paragraph{Parameters.} %%The parameters introduce a distortion between the inside specification %%of the inductive declaration where parameters are supposed to be %%instantiated (this representation is appropriate for checking the %%correctness or deriving the destructor principle) and the outside %%typing rules where the inductive objects are seen as objects %%abstracted with respect to the parameters. %In the definition of \List\ or \Length\, $A$ is a parameter because %what is effectively inductively defined is $\ListA$ or $\LengthA$ for %a given $A$ which is constant in the type of constructors. But when %we define $(\LengthA~l~n)$, $l$ and $n$ are not parameters because the %constructors manipulate different instances of this family. \subsection{Well-formed inductive definitions} We cannot accept any inductive declaration because some of them lead to inconsistent systems. We restrict ourselves to definitions which satisfy a syntactic criterion of positivity. Before giving the formal rules, we need a few definitions: \paragraph[Definitions]{Definitions\index{Positivity}\label{Positivity}} A type $T$ is an {\em arity of sort $s$}\index{Arity} if it converts to the sort $s$ or to a product $\forall~x:T,U$ with $U$ an arity of sort $s$. (For instance $A\ra \Set$ or $\forall~A:\Prop,A\ra \Prop$ are arities of sort respectively \Set\ and \Prop). A {\em type of constructor of $I$}\index{Type of constructor} is either a term $(I~t_1\ldots ~t_n)$ or $\fa x:T,C$ with $C$ recursively a {\em type of constructor of $I$}. \smallskip The type of constructor $T$ will be said to {\em satisfy the positivity condition} for a constant $X$ in the following cases: \begin{itemize} \item $T=(X~t_1\ldots ~t_n)$ and $X$ does not occur free in any $t_i$ \item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and the type $V$ satisfies the positivity condition for $X$ \end{itemize} The constant $X$ {\em occurs strictly positively} in $T$ in the following cases: \begin{itemize} \item $X$ does not occur in $T$ \item $T$ converts to $(X~t_1 \ldots ~t_n)$ and $X$ does not occur in any of $t_i$ \item $T$ converts to $\forall~x:U,V$ and $X$ does not occur in type $U$ but occurs strictly positively in type $V$ \item $T$ converts to $(I~a_1 \ldots ~a_m ~ t_1 \ldots ~t_p)$ where $I$ is the name of an inductive declaration of the form $\Ind{\Gamma}{m}{I:A}{c_1:\forall p_1:P_1,\ldots \forall p_m:P_m,C_1;\ldots;c_n:\forall p_1:P_1,\ldots \forall p_m:P_m,C_n}$ (in particular, it is not mutually defined and it has $m$ parameters) and $X$ does not occur in any of the $t_i$, and the (instantiated) types of constructor $C_i\{p_j/a_j\}_{j=1\ldots m}$ of $I$ satisfy the nested positivity condition for $X$ %\item more generally, when $T$ is not a type, $X$ occurs strictly %positively in $T[x:U]u$ if $X$ does not occur in $U$ but occurs %strictly positively in $u$ \end{itemize} The type of constructor $T$ of $I$ {\em satisfies the nested positivity condition} for a constant $X$ in the following cases: \begin{itemize} \item $T=(I~b_1\ldots b_m~u_1\ldots ~u_{p})$, $I$ is an inductive definition with $m$ parameters and $X$ does not occur in any $u_i$ \item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and the type $V$ satisfies the nested positivity condition for $X$ \end{itemize} \paragraph{Example} $X$ occurs strictly positively in $A\ra X$ or $X*A$ or $({\tt list}~ X)$ but not in $X \ra A$ or $(X \ra A)\ra A$ nor $({\tt neg}~X)$ assuming the notion of product and lists were already defined and {\tt neg} is an inductive definition with declaration \Ind{}{A:\Set}{{\tt neg}:\Set}{{\tt neg}:(A\ra{\tt False}) \ra {\tt neg}}. Assuming $X$ has arity ${\tt nat \ra Prop}$ and {\tt ex} is the inductively defined existential quantifier, the occurrence of $X$ in ${\tt (ex~ nat~ \lb n:nat\mto (X~ n))}$ is also strictly positive. \paragraph{Correctness rules.} We shall now describe the rules allowing the introduction of a new inductive definition. \begin{description} \item[W-Ind] Let $E$ be an environment and $\Gamma,\Gamma_P,\Gamma_I,\Gamma_C$ are contexts such that $\Gamma_I$ is $[I_1:\forall \Gamma_P,A_1;\ldots;I_k:\forall \Gamma_P,A_k]$ and $\Gamma_C$ is $[c_1:\forall \Gamma_P,C_1;\ldots;c_n:\forall \Gamma_P,C_n]$. \inference{ \frac{ (\WTE{\Gamma;\Gamma_P}{A_j}{s'_j})_{j=1\ldots k} ~~ (\WTE{\Gamma;\Gamma_I;\Gamma_P}{C_i}{s_{q_i}})_{i=1\ldots n} } {\WF{E;\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C}}{\Gamma}}} provided that the following side conditions hold: \begin{itemize} \item $k>0$ and all of $I_j$ and $c_i$ are distinct names for $j=1\ldots k$ and $i=1\ldots n$, \item $p$ is the number of parameters of \NInd{\Gamma}{\Gamma_I}{\Gamma_C} and $\Gamma_P$ is the context of parameters, \item for $j=1\ldots k$ we have that $A_j$ is an arity of sort $s_j$ and $I_j \notin \Gamma \cup E$, \item for $i=1\ldots n$ we have that $C_i$ is a type of constructor of $I_{q_i}$ which satisfies the positivity condition for $I_1 \ldots I_k$ and $c_i \notin \Gamma \cup E$. \end{itemize} \end{description} One can remark that there is a constraint between the sort of the arity of the inductive type and the sort of the type of its constructors which will always be satisfied for the impredicative sort (\Prop) but may fail to define inductive definition on sort \Set{} and generate constraints between universes for inductive definitions in the {\Type} hierarchy. \paragraph{Examples.} It is well known that existential quantifier can be encoded as an inductive definition. The following declaration introduces the second-order existential quantifier $\exists X.P(X)$. \begin{coq_example*} Inductive exProp (P:Prop->Prop) : Prop := exP_intro : forall X:Prop, P X -> exProp P. \end{coq_example*} The same definition on \Set{} is not allowed and fails~: \begin{coq_eval} (********** The following is not correct and should produce **********) (*** Error: Large non-propositional inductive types must be in Type***) \end{coq_eval} \begin{coq_example} Inductive exSet (P:Set->Prop) : Set := exS_intro : forall X:Set, P X -> exSet P. \end{coq_example} It is possible to declare the same inductive definition in the universe \Type. The \texttt{exType} inductive definition has type $(\Type_i \ra\Prop)\ra \Type_j$ with the constraint that the parameter \texttt{X} of \texttt{exT\_intro} has type $\Type_k$ with $kProp) : Type := exT_intro : forall X:Type, P X -> exType P. \end{coq_example*} %We shall assume for the following definitions that, if necessary, we %annotated the type of constructors such that we know if the argument %is recursive or not. We shall write the type $(x:_R T)C$ if it is %a recursive argument and $(x:_P T)C$ if the argument is not recursive. \paragraph[Sort-polymorphism of inductive families.]{Sort-polymorphism of inductive families.\index{Sort-polymorphism of inductive families}} From {\Coq} version 8.1, inductive families declared in {\Type} are polymorphic over their arguments in {\Type}. If $A$ is an arity and $s$ a sort, we write $A_{/s}$ for the arity obtained from $A$ by replacing its sort with $s$. Especially, if $A$ is well-typed in some environment and context, then $A_{/s}$ is typable by typability of all products in the Calculus of Inductive Constructions. The following typing rule is added to the theory. \begin{description} \item[Ind-Family] Let $\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C}$ be an inductive definition. Let $\Gamma_P = [p_1:P_1;\ldots;p_{p}:P_{p}]$ be its context of parameters, $\Gamma_I = [I_1:\forall \Gamma_P,A_1;\ldots;I_k:\forall \Gamma_P,A_k]$ its context of definitions and $\Gamma_C = [c_1:\forall \Gamma_P,C_1;\ldots;c_n:\forall \Gamma_P,C_n]$ its context of constructors, with $c_i$ a constructor of $I_{q_i}$. Let $m \leq p$ be the length of the longest prefix of parameters such that the $m$ first arguments of all occurrences of all $I_j$ in all $C_k$ (even the occurrences in the hypotheses of $C_k$) are exactly applied to $p_1~\ldots~p_m$ ($m$ is the number of {\em recursively uniform parameters} and the $p-m$ remaining parameters are the {\em recursively non-uniform parameters}). Let $q_1$, \ldots, $q_r$, with $0\leq r\leq m$, be a (possibly) partial instantiation of the recursively uniform parameters of $\Gamma_P$. We have: \inference{\frac {\left\{\begin{array}{l} \Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C} \in E\\ (E[\Gamma] \vdash q_l : P'_l)_{l=1\ldots r}\\ (\WTEGLECONV{P'_l}{\subst{P_l}{p_u}{q_u}_{u=1\ldots l-1}})_{l=1\ldots r}\\ 1 \leq j \leq k \end{array} \right.} {E[\Gamma] \vdash (I_j\,q_1\,\ldots\,q_r:\forall [p_{r+1}:P_{r+1};\ldots;p_{p}:P_{p}], (A_j)_{/s_j})} } provided that the following side conditions hold: \begin{itemize} \item $\Gamma_{P'}$ is the context obtained from $\Gamma_P$ by replacing each $P_l$ that is an arity with $P'_l$ for $1\leq l \leq r$ (notice that $P_l$ arity implies $P'_l$ arity since $\WTEGLECONV{P'_l}{ \subst{P_l}{p_u}{q_u}_{u=1\ldots l-1}}$); \item there are sorts $s_i$, for $1 \leq i \leq k$ such that, for $\Gamma_{I'} = [I_1:\forall \Gamma_{P'},(A_1)_{/s_1};\ldots;I_k:\forall \Gamma_{P'},(A_k)_{/s_k}]$ we have $(\WTE{\Gamma;\Gamma_{I'};\Gamma_{P'}}{C_i}{s_{q_i}})_{i=1\ldots n}$; \item the sorts are such that all eliminations, to {\Prop}, {\Set} and $\Type(j)$, are allowed (see section~\ref{elimdep}). \end{itemize} \end{description} Notice that if $I_j\,q_1\,\ldots\,q_r$ is typable using the rules {\bf Ind-Const} and {\bf App}, then it is typable using the rule {\bf Ind-Family}. Conversely, the extended theory is not stronger than the theory without {\bf Ind-Family}. We get an equiconsistency result by mapping each $\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C}$ occurring into a given derivation into as many different inductive types and constructors as the number of different (partial) replacements of sorts, needed for this derivation, in the parameters that are arities (this is possible because $\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C}$ well-formed implies that $\Ind{\Gamma}{p}{\Gamma_{I'}}{\Gamma_{C'}}$ is well-formed and has the same allowed eliminations, where $\Gamma_{I'}$ is defined as above and $\Gamma_{C'} = [c_1:\forall \Gamma_{P'},C_1;\ldots;c_n:\forall \Gamma_{P'},C_n]$). That is, the changes in the types of each partial instance $q_1\,\ldots\,q_r$ can be characterized by the ordered sets of arity sorts among the types of parameters, and to each signature is associated a new inductive definition with fresh names. Conversion is preserved as any (partial) instance $I_j\,q_1\,\ldots\,q_r$ or $C_i\,q_1\,\ldots\,q_r$ is mapped to the names chosen in the specific instance of $\Ind{\Gamma}{p}{\Gamma_I}{\Gamma_C}$. \newcommand{\Single}{\mbox{\textsf{Set}}} In practice, the rule {\bf Ind-Family} is used by {\Coq} only when all the inductive types of the inductive definition are declared with an arity whose sort is in the $\Type$ hierarchy. Then, the polymorphism is over the parameters whose type is an arity of sort in the {\Type} hierarchy. The sort $s_j$ are chosen canonically so that each $s_j$ is minimal with respect to the hierarchy ${\Prop}\subset{\Set_p}\subset\Type$ where $\Set_p$ is predicative {\Set}. %and ${\Prop_u}$ is the sort of small singleton %inductive types (i.e. of inductive types with one single constructor %and that contains either proofs or inhabitants of singleton types %only). More precisely, an empty or small singleton inductive definition (i.e. an inductive definition of which all inductive types are singleton -- see paragraph~\ref{singleton}) is set in {\Prop}, a small non-singleton inductive family is set in {\Set} (even in case {\Set} is impredicative -- see Section~\ref{impredicativity}), and otherwise in the {\Type} hierarchy. % TODO: clarify the case of a partial application ?? Note that the side-condition about allowed elimination sorts in the rule~{\bf Ind-Family} is just to avoid to recompute the allowed elimination sorts at each instance of a pattern-matching (see section~\ref{elimdep}). As an example, let us consider the following definition: \begin{coq_example*} Inductive option (A:Type) : Type := | None : option A | Some : A -> option A. \end{coq_example*} As the definition is set in the {\Type} hierarchy, it is used polymorphically over its parameters whose types are arities of a sort in the {\Type} hierarchy. Here, the parameter $A$ has this property, hence, if \texttt{option} is applied to a type in {\Set}, the result is in {\Set}. Note that if \texttt{option} is applied to a type in {\Prop}, then, the result is not set in \texttt{Prop} but in \texttt{Set} still. This is because \texttt{option} is not a singleton type (see section~\ref{singleton}) and it would loose the elimination to {\Set} and {\Type} if set in {\Prop}. \begin{coq_example} Check (fun A:Set => option A). Check (fun A:Prop => option A). \end{coq_example} Here is another example. \begin{coq_example*} Inductive prod (A B:Type) : Type := pair : A -> B -> prod A B. \end{coq_example*} As \texttt{prod} is a singleton type, it will be in {\Prop} if applied twice to propositions, in {\Set} if applied twice to at least one type in {\Set} and none in {\Type}, and in {\Type} otherwise. In all cases, the three kind of eliminations schemes are allowed. \begin{coq_example} Check (fun A:Set => prod A). Check (fun A:Prop => prod A A). Check (fun (A:Prop) (B:Set) => prod A B). Check (fun (A:Type) (B:Prop) => prod A B). \end{coq_example} \subsection{Destructors} The specification of inductive definitions with arities and constructors is quite natural. But we still have to say how to use an object in an inductive type. This problem is rather delicate. There are actually several different ways to do that. Some of them are logically equivalent but not always equivalent from the computational point of view or from the user point of view. From the computational point of view, we want to be able to define a function whose domain is an inductively defined type by using a combination of case analysis over the possible constructors of the object and recursion. Because we need to keep a consistent theory and also we prefer to keep a strongly normalizing reduction, we cannot accept any sort of recursion (even terminating). So the basic idea is to restrict ourselves to primitive recursive functions and functionals. For instance, assuming a parameter $A:\Set$ exists in the context, we want to build a function \length\ of type $\ListA\ra \nat$ which computes the length of the list, so such that $(\length~(\Nil~A)) = \nO$ and $(\length~(\cons~A~a~l)) = (\nS~(\length~l))$. We want these equalities to be recognized implicitly and taken into account in the conversion rule. From the logical point of view, we have built a type family by giving a set of constructors. We want to capture the fact that we do not have any other way to build an object in this type. So when trying to prove a property $(P~m)$ for $m$ in an inductive definition it is enough to enumerate all the cases where $m$ starts with a different constructor. In case the inductive definition is effectively a recursive one, we want to capture the extra property that we have built the smallest fixed point of this recursive equation. This says that we are only manipulating finite objects. This analysis provides induction principles. For instance, in order to prove $\forall l:\ListA,(\LengthA~l~(\length~l))$ it is enough to prove: \noindent $(\LengthA~(\Nil~A)~(\length~(\Nil~A)))$ and \smallskip $\forall a:A, \forall l:\ListA, (\LengthA~l~(\length~l)) \ra (\LengthA~(\cons~A~a~l)~(\length~(\cons~A~a~l)))$. \smallskip \noindent which given the conversion equalities satisfied by \length\ is the same as proving: $(\LengthA~(\Nil~A)~\nO)$ and $\forall a:A, \forall l:\ListA, (\LengthA~l~(\length~l)) \ra (\LengthA~(\cons~A~a~l)~(\nS~(\length~l)))$. One conceptually simple way to do that, following the basic scheme proposed by Martin-L\"of in his Intuitionistic Type Theory, is to introduce for each inductive definition an elimination operator. At the logical level it is a proof of the usual induction principle and at the computational level it implements a generic operator for doing primitive recursion over the structure. But this operator is rather tedious to implement and use. We choose in this version of Coq to factorize the operator for primitive recursion into two more primitive operations as was first suggested by Th. Coquand in~\cite{Coq92}. One is the definition by pattern-matching. The second one is a definition by guarded fixpoints. \subsubsection[The {\tt match\ldots with \ldots end} construction.]{The {\tt match\ldots with \ldots end} construction.\label{Caseexpr} \index{match@{\tt match\ldots with\ldots end}}} The basic idea of this destructor operation is that we have an object $m$ in an inductive type $I$ and we want to prove a property $(P~m)$ which in general depends on $m$. For this, it is enough to prove the property for $m = (c_i~u_1\ldots u_{p_i})$ for each constructor of $I$. The \Coq{} term for this proof will be written~: \[\kw{match}~m~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ (c_n~x_{n1}...x_{np_n}) \Ra f_n~ \kw{end}\] In this expression, if $m$ is a term built from a constructor $(c_i~u_1\ldots u_{p_i})$ then the expression will behave as it is specified with $i$-th branch and will reduce to $f_i$ where the $x_{i1}$\ldots $x_{ip_i}$ are replaced by the $u_1\ldots u_p$ according to the $\iota$-reduction. Actually, for type-checking a \kw{match\ldots with\ldots end} expression we also need to know the predicate $P$ to be proved by case analysis. In the general case where $I$ is an inductively defined $n$-ary relation, $P$ is a $n+1$-ary relation: the $n$ first arguments correspond to the arguments of $I$ (parameters excluded), and the last one corresponds to object $m$. \Coq{} can sometimes infer this predicate but sometimes not. The concrete syntax for describing this predicate uses the \kw{as\ldots in\ldots return} construction. For instance, let us assume that $I$ is an unary predicate with one parameter. The predicate is made explicit using the syntax~: \[\kw{match}~m~\kw{as}~ x~ \kw{in}~ I~\verb!_!~a~ \kw{return}~ (P~ x) ~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ (c_n~x_{n1}...x_{np_n}) \Ra f_n \kw{end}\] The \kw{as} part can be omitted if either the result type does not depend on $m$ (non-dependent elimination) or $m$ is a variable (in this case, the result type can depend on $m$). The \kw{in} part can be omitted if the result type does not depend on the arguments of $I$. Note that the arguments of $I$ corresponding to parameters \emph{must} be \verb!_!, because the result type is not generalized to all possible values of the parameters. The expression after \kw{in} must be seen as an \emph{inductive type pattern}. As a final remark, expansion of implicit arguments and notations apply to this pattern. For the purpose of presenting the inference rules, we use a more compact notation~: \[ \Case{(\lb a x \mto P)}{m}{ \lb x_{11}~...~x_{1p_1} \mto f_1 ~|~\ldots~|~ \lb x_{n1}...x_{np_n} \mto f_n}\] %% CP 06/06 Obsolete avec la nouvelle syntaxe et incompatible avec la %% presentation theorique qui suit % \paragraph{Non-dependent elimination.} % % When defining a function of codomain $C$ by case analysis over an % object in an inductive type $I$, we build an object of type $I % \ra C$. The minimality principle on an inductively defined logical % predicate $I$ of type $A \ra \Prop$ is often used to prove a property % $\forall x:A,(I~x)\ra (C~x)$. These are particular cases of the dependent % principle that we stated before with a predicate which does not depend % explicitly on the object in the inductive definition. % For instance, a function testing whether a list is empty % can be % defined as: % \[\kw{fun} l:\ListA \Ra \kw{match}~l~\kw{with}~ \Nil \Ra \true~ % |~(\cons~a~m) \Ra \false \kw{end}\] % represented by % \[\lb l:\ListA \mto\Case{\bool}{l}{\true~ |~ \lb a~m,~\false}\] %\noindent {\bf Remark. } % In the system \Coq\ the expression above, can be % written without mentioning % the dummy abstraction: % \Case{\bool}{l}{\Nil~ \mbox{\tt =>}~\true~ |~ (\cons~a~m)~ % \mbox{\tt =>}~ \false} \paragraph[Allowed elimination sorts.]{Allowed elimination sorts.\index{Elimination sorts}} An important question for building the typing rule for \kw{match} is what can be the type of $P$ with respect to the type of the inductive definitions. We define now a relation \compat{I:A}{B} between an inductive definition $I$ of type $A$ and an arity $B$. This relation states that an object in the inductive definition $I$ can be eliminated for proving a property $P$ of type $B$. The case of inductive definitions in sorts \Set\ or \Type{} is simple. There is no restriction on the sort of the predicate to be eliminated. \paragraph{Notations.} The \compat{I:A}{B} is defined as the smallest relation satisfying the following rules: We write \compat{I}{B} for \compat{I:A}{B} where $A$ is the type of $I$. \begin{description} \item[Prod] \inference{\frac{\compat{(I~x):A'}{B'}} {\compat{I:\forall x:A, A'}{\forall x:A, B'}}} \item[{\Set} \& \Type] \inference{\frac{ s_1 \in \{\Set,\Type(j)\}, s_2 \in \Sort}{\compat{I:s_1}{I\ra s_2}}} \end{description} The case of Inductive definitions of sort \Prop{} is a bit more complicated, because of our interpretation of this sort. The only harmless allowed elimination, is the one when predicate $P$ is also of sort \Prop. \begin{description} \item[\Prop] \inference{\compat{I:\Prop}{I\ra\Prop}} \end{description} \Prop{} is the type of logical propositions, the proofs of properties $P$ in \Prop{} could not be used for computation and are consequently ignored by the extraction mechanism. Assume $A$ and $B$ are two propositions, and the logical disjunction $A\vee B$ is defined inductively by~: \begin{coq_example*} Inductive or (A B:Prop) : Prop := lintro : A -> or A B | rintro : B -> or A B. \end{coq_example*} The following definition which computes a boolean value by case over the proof of \texttt{or A B} is not accepted~: \begin{coq_eval} (***************************************************************) (*** This example should fail with ``Incorrect elimination'' ***) \end{coq_eval} \begin{coq_example} Definition choice (A B: Prop) (x:or A B) := match x with lintro a => true | rintro b => false end. \end{coq_example} From the computational point of view, the structure of the proof of \texttt{(or A B)} in this term is needed for computing the boolean value. In general, if $I$ has type \Prop\ then $P$ cannot have type $I\ra \Set$, because it will mean to build an informative proof of type $(P~m)$ doing a case analysis over a non-computational object that will disappear in the extracted program. But the other way is safe with respect to our interpretation we can have $I$ a computational object and $P$ a non-computational one, it just corresponds to proving a logical property of a computational object. % Also if $I$ is in one of the sorts \{\Prop, \Set\}, one cannot in % general allow an elimination over a bigger sort such as \Type. But % this operation is safe whenever $I$ is a {\em small inductive} type, % which means that all the types of constructors of % $I$ are small with the following definition:\\ % $(I~t_1\ldots t_s)$ is a {\em small type of constructor} and % $\forall~x:T,C$ is a small type of constructor if $C$ is and if $T$ % has type \Prop\ or \Set. \index{Small inductive type} % We call this particular elimination which gives the possibility to % compute a type by induction on the structure of a term, a {\em strong % elimination}\index{Strong elimination}. In the same spirit, elimination on $P$ of type $I\ra \Type$ cannot be allowed because it trivially implies the elimination on $P$ of type $I\ra \Set$ by cumulativity. It also implies that there is two proofs of the same property which are provably different, contradicting the proof-irrelevance property which is sometimes a useful axiom~: \begin{coq_example} Axiom proof_irrelevance : forall (P : Prop) (x y : P), x=y. \end{coq_example} \begin{coq_eval} Reset proof_irrelevance. \end{coq_eval} The elimination of an inductive definition of type \Prop\ on a predicate $P$ of type $I\ra \Type$ leads to a paradox when applied to impredicative inductive definition like the second-order existential quantifier \texttt{exProp} defined above, because it give access to the two projections on this type. %\paragraph{Warning: strong elimination} %\index{Elimination!Strong elimination} %In previous versions of Coq, for a small inductive definition, only the %non-informative strong elimination on \Type\ was allowed, because %strong elimination on \Typeset\ was not compatible with the current %extraction procedure. In this version, strong elimination on \Typeset\ %is accepted but a dummy element is extracted from it and may generate %problems if extracted terms are explicitly used such as in the %{\tt Program} tactic or when extracting ML programs. \paragraph[Empty and singleton elimination]{Empty and singleton elimination\label{singleton} \index{Elimination!Singleton elimination} \index{Elimination!Empty elimination}} There are special inductive definitions in \Prop\ for which more eliminations are allowed. \begin{description} \item[\Prop-extended] \inference{ \frac{I \mbox{~is an empty or singleton definition}~~~s \in \Sort}{\compat{I:\Prop}{I\ra s}} } \end{description} % A {\em singleton definition} has always an informative content, % even if it is a proposition. A {\em singleton definition} has only one constructor and all the arguments of this constructor have type \Prop. In that case, there is a canonical way to interpret the informative extraction on an object in that type, such that the elimination on any sort $s$ is legal. Typical examples are the conjunction of non-informative propositions and the equality. If there is an hypothesis $h:a=b$ in the context, it can be used for rewriting not only in logical propositions but also in any type. % In that case, the term \verb!eq_rec! which was defined as an axiom, is % now a term of the calculus. \begin{coq_example} Print eq_rec. Extraction eq_rec. \end{coq_example} An empty definition has no constructors, in that case also, elimination on any sort is allowed. \paragraph{Type of branches.} Let $c$ be a term of type $C$, we assume $C$ is a type of constructor for an inductive definition $I$. Let $P$ be a term that represents the property to be proved. We assume $r$ is the number of parameters. We define a new type \CI{c:C}{P} which represents the type of the branch corresponding to the $c:C$ constructor. \[ \begin{array}{ll} \CI{c:(I_i~p_1\ldots p_r\ t_1 \ldots t_p)}{P} &\equiv (P~t_1\ldots ~t_p~c) \\[2mm] \CI{c:\forall~x:T,C}{P} &\equiv \forall~x:T,\CI{(c~x):C}{P} \end{array} \] We write \CI{c}{P} for \CI{c:C}{P} with $C$ the type of $c$. \paragraph{Examples.} For $\ListA$ the type of $P$ will be $\ListA\ra s$ for $s \in \Sort$. \\ $ \CI{(\cons~A)}{P} \equiv \forall a:A, \forall l:\ListA,(P~(\cons~A~a~l))$. For $\LengthA$, the type of $P$ will be $\forall l:\ListA,\forall n:\nat, (\LengthA~l~n)\ra \Prop$ and the expression \CI{(\LCons~A)}{P} is defined as:\\ $\forall a:A, \forall l:\ListA, \forall n:\nat, \forall h:(\LengthA~l~n), (P~(\cons~A~a~l)~(\nS~n)~(\LCons~A~a~l~n~l))$.\\ If $P$ does not depend on its third argument, we find the more natural expression:\\ $\forall a:A, \forall l:\ListA, \forall n:\nat, (\LengthA~l~n)\ra(P~(\cons~A~a~l)~(\nS~n))$. \paragraph{Typing rule.} Our very general destructor for inductive definition enjoys the following typing rule % , where we write % \[ % \Case{P}{c}{[x_{11}:T_{11}]\ldots[x_{1p_1}:T_{1p_1}]g_1\ldots % [x_{n1}:T_{n1}]\ldots[x_{np_n}:T_{np_n}]g_n} % \] % for % \[ % \Case{P}{c}{(c_1~x_{11}~...~x_{1p_1}) \Ra g_1 ~|~\ldots~|~ % (c_n~x_{n1}...x_{np_n}) \Ra g_n } % \] \begin{description} \item[match] \label{elimdep} \index{Typing rules!match} \inference{ \frac{\WTEG{c}{(I~q_1\ldots q_r~t_1\ldots t_s)}~~ \WTEG{P}{B}~~\compat{(I~q_1\ldots q_r)}{B} ~~ (\WTEG{f_i}{\CI{(c_{p_i}~q_1\ldots q_r)}{P}})_{i=1\ldots l}} {\WTEG{\Case{P}{c}{f_1|\ldots |f_l}}{(P\ t_1\ldots t_s\ c)}}}%\\[3mm] provided $I$ is an inductive type in a declaration \Ind{\Delta}{r}{\Gamma_I}{\Gamma_C} with $\Gamma_C = [c_1:C_1;\ldots;c_n:C_n]$ and $c_{p_1}\ldots c_{p_l}$ are the only constructors of $I$. \end{description} \paragraph{Example.} For \List\ and \Length\ the typing rules for the {\tt match} expression are (writing just $t:M$ instead of \WTEG{t}{M}, the environment and context being the same in all the judgments). \[\frac{l:\ListA~~P:\ListA\ra s~~~f_1:(P~(\Nil~A))~~ f_2:\forall a:A, \forall l:\ListA, (P~(\cons~A~a~l))} {\Case{P}{l}{f_1~|~f_2}:(P~l)}\] \[\frac{ \begin{array}[b]{@{}c@{}} H:(\LengthA~L~N) \\ P:\forall l:\ListA, \forall n:\nat, (\LengthA~l~n)\ra \Prop\\ f_1:(P~(\Nil~A)~\nO~\LNil) \\ f_2:\forall a:A, \forall l:\ListA, \forall n:\nat, \forall h:(\LengthA~l~n), (P~(\cons~A~a~n)~(\nS~n)~(\LCons~A~a~l~n~h)) \end{array}} {\Case{P}{H}{f_1~|~f_2}:(P~L~N~H)}\] \paragraph[Definition of $\iota$-reduction.]{Definition of $\iota$-reduction.\label{iotared} \index{iota-reduction@$\iota$-reduction}} We still have to define the $\iota$-reduction in the general case. A $\iota$-redex is a term of the following form: \[\Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1|\ldots | f_l}\] with $c_{p_i}$ the $i$-th constructor of the inductive type $I$ with $r$ parameters. The $\iota$-contraction of this term is $(f_i~a_1\ldots a_m)$ leading to the general reduction rule: \[ \Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1|\ldots | f_n} \triangleright_{\iota} (f_i~a_1\ldots a_m) \] \subsection[Fixpoint definitions]{Fixpoint definitions\label{Fix-term} \index{Fix@{\tt Fix}}} The second operator for elimination is fixpoint definition. This fixpoint may involve several mutually recursive definitions. The basic concrete syntax for a recursive set of mutually recursive declarations is (with $\Gamma_i$ contexts)~: \[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n (\Gamma_n) :A_n:=t_n\] The terms are obtained by projections from this set of declarations and are written \[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n (\Gamma_n) :A_n:=t_n~\kw{for}~f_i\] In the inference rules, we represent such a term by \[\Fix{f_i}{f_1:A_1':=t_1' \ldots f_n:A_n':=t_n'}\] with $t_i'$ (resp. $A_i'$) representing the term $t_i$ abstracted (resp. generalized) with respect to the bindings in the context $\Gamma_i$, namely $t_i'=\lb \Gamma_i \mto t_i$ and $A_i'=\forall \Gamma_i, A_i$. \subsubsection{Typing rule} The typing rule is the expected one for a fixpoint. \begin{description} \item[Fix] \index{Typing rules!Fix} \inference{\frac{(\WTEG{A_i}{s_i})_{i=1\ldots n}~~~~ (\WTE{\Gamma,f_1:A_1,\ldots,f_n:A_n}{t_i}{A_i})_{i=1\ldots n}} {\WTEG{\Fix{f_i}{f_1:A_1:=t_1 \ldots f_n:A_n:=t_n}}{A_i}}} \end{description} Any fixpoint definition cannot be accepted because non-normalizing terms will lead to proofs of absurdity. The basic scheme of recursion that should be allowed is the one needed for defining primitive recursive functionals. In that case the fixpoint enjoys a special syntactic restriction, namely one of the arguments belongs to an inductive type, the function starts with a case analysis and recursive calls are done on variables coming from patterns and representing subterms. For instance in the case of natural numbers, a proof of the induction principle of type \[\forall P:\nat\ra\Prop, (P~\nO)\ra(\forall n:\nat, (P~n)\ra(P~(\nS~n)))\ra \forall n:\nat, (P~n)\] can be represented by the term: \[\begin{array}{l} \lb P:\nat\ra\Prop\mto\lb f:(P~\nO)\mto \lb g:(\forall n:\nat, (P~n)\ra(P~(\nS~n))) \mto\\ \Fix{h}{h:\forall n:\nat, (P~n):=\lb n:\nat\mto \Case{P}{n}{f~|~\lb p:\nat\mto (g~p~(h~p))}} \end{array} \] Before accepting a fixpoint definition as being correctly typed, we check that the definition is ``guarded''. A precise analysis of this notion can be found in~\cite{Gim94}. The first stage is to precise on which argument the fixpoint will be decreasing. The type of this argument should be an inductive definition. For doing this the syntax of fixpoints is extended and becomes \[\Fix{f_i}{f_1/k_1:A_1:=t_1 \ldots f_n/k_n:A_n:=t_n}\] where $k_i$ are positive integers. Each $A_i$ should be a type (reducible to a term) starting with at least $k_i$ products $\forall y_1:B_1,\ldots \forall y_{k_i}:B_{k_i}, A'_i$ and $B_{k_i}$ being an instance of an inductive definition. Now in the definition $t_i$, if $f_j$ occurs then it should be applied to at least $k_j$ arguments and the $k_j$-th argument should be syntactically recognized as structurally smaller than $y_{k_i}$ The definition of being structurally smaller is a bit technical. One needs first to define the notion of {\em recursive arguments of a constructor}\index{Recursive arguments}. For an inductive definition \Ind{\Gamma}{r}{\Gamma_I}{\Gamma_C}, the type of a constructor $c$ has the form $\forall p_1:P_1,\ldots \forall p_r:P_r, \forall x_1:T_1, \ldots \forall x_r:T_r, (I_j~p_1\ldots p_r~t_1\ldots t_s)$ the recursive arguments will correspond to $T_i$ in which one of the $I_l$ occurs. The main rules for being structurally smaller are the following:\\ Given a variable $y$ of type an inductive definition in a declaration \Ind{\Gamma}{r}{\Gamma_I}{\Gamma_C} where $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is $[c_1:C_1;\ldots;c_n:C_n]$. The terms structurally smaller than $y$ are: \begin{itemize} \item $(t~u), \lb x:u \mto t$ when $t$ is structurally smaller than $y$ . \item \Case{P}{c}{f_1\ldots f_n} when each $f_i$ is structurally smaller than $y$. \\ If $c$ is $y$ or is structurally smaller than $y$, its type is an inductive definition $I_p$ part of the inductive declaration corresponding to $y$. Each $f_i$ corresponds to a type of constructor $C_q \equiv \forall p_1:P_1,\ldots,\forall p_r:P_r, \forall y_1:B_1, \ldots \forall y_k:B_k, (I~a_1\ldots a_k)$ and can consequently be written $\lb y_1:B'_1\mto \ldots \lb y_k:B'_k\mto g_i$. ($B'_i$ is obtained from $B_i$ by substituting parameters variables) the variables $y_j$ occurring in $g_i$ corresponding to recursive arguments $B_i$ (the ones in which one of the $I_l$ occurs) are structurally smaller than $y$. \end{itemize} The following definitions are correct, we enter them using the {\tt Fixpoint} command as described in Section~\ref{Fixpoint} and show the internal representation. \begin{coq_example} Fixpoint plus (n m:nat) {struct n} : nat := match n with | O => m | S p => S (plus p m) end. Print plus. Fixpoint lgth (A:Set) (l:list A) {struct l} : nat := match l with | nil => O | cons a l' => S (lgth A l') end. Print lgth. Fixpoint sizet (t:tree) : nat := let (f) := t in S (sizef f) with sizef (f:forest) : nat := match f with | emptyf => O | consf t f => plus (sizet t) (sizef f) end. Print sizet. \end{coq_example} \subsubsection[Reduction rule]{Reduction rule\index{iota-reduction@$\iota$-reduction}} Let $F$ be the set of declarations: $f_1/k_1:A_1:=t_1 \ldots f_n/k_n:A_n:=t_n$. The reduction for fixpoints is: \[ (\Fix{f_i}{F}~a_1\ldots a_{k_i}) \triangleright_{\iota} \substs{t_i}{f_k}{\Fix{f_k}{F}}{k=1\ldots n} ~a_1\ldots a_{k_i}\] when $a_{k_i}$ starts with a constructor. This last restriction is needed in order to keep strong normalization and corresponds to the reduction for primitive recursive operators. We can illustrate this behavior on examples. \begin{coq_example} Goal forall n m:nat, plus (S n) m = S (plus n m). reflexivity. Abort. Goal forall f:forest, sizet (node f) = S (sizef f). reflexivity. Abort. \end{coq_example} But assuming the definition of a son function from \tree\ to \forest: \begin{coq_example} Definition sont (t:tree) : forest := let (f) := t in f. \end{coq_example} The following is not a conversion but can be proved after a case analysis. \begin{coq_eval} (******************************************************************) (** Error: Impossible to unify .... **) \end{coq_eval} \begin{coq_example} Goal forall t:tree, sizet t = S (sizef (sont t)). reflexivity. (** this one fails **) destruct t. reflexivity. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} % La disparition de Program devrait rendre la construction Match obsolete % \subsubsection{The {\tt Match \ldots with \ldots end} expression} % \label{Matchexpr} % %\paragraph{A unary {\tt Match\ldots with \ldots end}.} % \index{Match...with...end@{\tt Match \ldots with \ldots end}} % The {\tt Match} operator which was a primitive notion in older % presentations of the Calculus of Inductive Constructions is now just a % macro definition which generates the good combination of {\tt Case} % and {\tt Fix} operators in order to generate an operator for primitive % recursive definitions. It always considers an inductive definition as % a single inductive definition. % The following examples illustrates this feature. % \begin{coq_example} % Definition nat_pr : (C:Set)C->(nat->C->C)->nat->C % :=[C,x,g,n]Match n with x g end. % Print nat_pr. % \end{coq_example} % \begin{coq_example} % Definition forest_pr % : (C:Set)C->(tree->forest->C->C)->forest->C % := [C,x,g,n]Match n with x g end. % \end{coq_example} % Cet exemple faisait error (HH le 12/12/96), j'ai change pour une % version plus simple %\begin{coq_example} %Definition forest_pr % : (P:forest->Set)(P emptyf)->((t:tree)(f:forest)(P f)->(P (consf t f))) % ->(f:forest)(P f) % := [C,x,g,n]Match n with x g end. %\end{coq_example} \subsubsection{Mutual induction} The principles of mutual induction can be automatically generated using the {\tt Scheme} command described in Section~\ref{Scheme}. \section{Co-inductive types} The implementation contains also co-inductive definitions, which are types inhabited by infinite objects. More information on co-inductive definitions can be found in~\cite{Gimenez95b,Gim98,GimCas05}. %They are described in Chapter~\ref{Co-inductives}. \section[\iCIC : the Calculus of Inductive Construction with impredicative \Set]{\iCIC : the Calculus of Inductive Construction with impredicative \Set\label{impredicativity}} \Coq{} can be used as a type-checker for \iCIC{}, the original Calculus of Inductive Constructions with an impredicative sort \Set{} by using the compiler option \texttt{-impredicative-set}. For example, using the ordinary \texttt{coqtop} command, the following is rejected. \begin{coq_eval} (** This example should fail ******************************* Error: The term forall X:Set, X -> X has type Type while it is expected to have type Set ***) \end{coq_eval} \begin{coq_example} Definition id: Set := forall X:Set,X->X. \end{coq_example} while it will type-check, if one use instead the \texttt{coqtop -impredicative-set} command. The major change in the theory concerns the rule for product formation in the sort \Set, which is extended to a domain in any sort~: \begin{description} \item [Prod] \index{Typing rules!Prod (impredicative Set)} \inference{\frac{\WTEG{T}{s}~~~~s \in \Sort~~~~~~ \WTE{\Gamma::(x:T)}{U}{\Set}} { \WTEG{\forall~x:T,U}{\Set}}} \end{description} This extension has consequences on the inductive definitions which are allowed. In the impredicative system, one can build so-called {\em large inductive definitions} like the example of second-order existential quantifier (\texttt{exSet}). There should be restrictions on the eliminations which can be performed on such definitions. The eliminations rules in the impredicative system for sort \Set{} become~: \begin{description} \item[\Set] \inference{\frac{s \in \{\Prop, \Set\}}{\compat{I:\Set}{I\ra s}} ~~~~\frac{I \mbox{~is a small inductive definition}~~~~s \in \{\Type(i)\}} {\compat{I:\Set}{I\ra s}}} \end{description} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/AddRefMan-pre.tex0000640000175000001440000000440211001657640016733 0ustar notinusers%\coverpage{Addendum to the Reference Manual}{\ } %\addcontentsline{toc}{part}{Additional documentation} %BEGIN LATEX \setheaders{Presentation of the Addendum} %END LATEX \chapter*{Presentation of the Addendum} Here you will find several pieces of additional documentation for the \Coq\ Reference Manual. Each of this chapters is concentrated on a particular topic, that should interest only a fraction of the \Coq\ users: that's the reason why they are apart from the Reference Manual. \begin{description} \item[Extended pattern-matching] This chapter details the use of generalized pattern-matching. It is contributed by Cristina Cornes and Hugo Herbelin. \item[Implicit coercions] This chapter details the use of the coercion mechanism. It is contributed by Amokrane Sabi. %\item[Proof of imperative programs] This chapter explains how to % prove properties of annotated programs with imperative features. % It is contributed by Jean-Christophe Fillitre \item[Program extraction] This chapter explains how to extract in practice ML files from $\FW$ terms. It is contributed by Jean-Christophe Fillitre and Pierre Letouzey. \item[Program] This chapter explains the use of the \texttt{Program} vernacular which allows the development of certified programs in \Coq. It is contributed by Matthieu Sozeau and replaces the previous \texttt{Program} tactic by Catherine Parent. %\item[Natural] This chapter is due to Yann Coscoy. It is the user % manual of the tools he wrote for printing proofs in natural % language. At this time, French and English languages are supported. \item[omega] \texttt{omega}, written by Pierre Crgut, solves a whole class of arithmetic problems. \item[The {\tt ring} tactic] This is a tactic to do AC rewriting. This chapter explains how to use it and how it works. The chapter is contributed by Patrick Loiseleur. \item[The {\tt Setoid\_replace} tactic] This is a tactic to do rewriting on types equipped with specific (only partially substitutive) equality. The chapter is contributed by Clment Renard. \item[Calling external provers] This chapter describes several tactics which call external provers. \end{description} \atableofcontents %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Natural.tex0000640000175000001440000002720311776416511016011 0ustar notinusers\achapter{\texttt{Natural} : proofs in natural language} \aauthor{Yann Coscoy} \asection{Introduction} \Natural~ is a package allowing the writing of proofs in natural language. For instance, the proof in \Coq~of the induction principle on pairs of natural numbers looks like this: \begin{coq_example*} Require Natural. \end{coq_example*} \begin{coq_example} Print nat_double_ind. \end{coq_example} Piping it through the \Natural~pretty-printer gives: \comindex{Print Natural} \begin{coq_example} Print Natural nat_double_ind. \end{coq_example} \asection{Activating \Natural} To enable the printing of proofs in natural language, you should type under \texttt{coqtop} or \texttt{coqtop -full} the command \begin{coq_example*} Require Natural. \end{coq_example*} By default, proofs are transcripted in english. If you wish to print them in French, set the French option by \comindex{Set Natural} \begin{coq_example*} Set Natural French. \end{coq_example*} If you want to go back to English, type in \begin{coq_example*} Set Natural English. \end{coq_example*} Currently, only \verb=French= and \verb=English= are available. You may see for example the natural transcription of the proof of the induction principle on pairs of natural numbers: \begin{coq_example*} Print Natural nat_double_ind. \end{coq_example*} You may also show in natural language the current proof in progress: \comindex{Show Natural} \begin{coq_example} Goal (n:nat)(le O n). Induction n. Show Natural Proof. \end{coq_example} \subsection*{Restrictions} For \Natural, a proof is an object of type a proposition (i.e. an object of type something of type {\tt Prop}). Only proofs are written in natural language when typing {\tt Print Natural \ident}. All other objects (the objects of type something which is of type {\tt Set} or {\tt Type}) are written as usual $\lambda$-terms. \asection{Customizing \Natural} The transcription of proofs in natural language is mainly a paraphrase of the formal proofs, but some specific hints in the transcription can be given. Three kinds of customization are available. \asubsection{Implicit proof steps} \subsubsection*{Implicit lemmas} Applying a given lemma or theorem \verb=lem1= of statement, say $A \Rightarrow B$, to an hypothesis, say $H$ (assuming $A$) produces the following kind of output translation: \begin{verbatim} ... Using lem1 with H we get B. ... \end{verbatim} But sometimes, you may prefer not to see the explicit invocation to the lemma. You may prefer to see: \begin{verbatim} ... With H we have A. ... \end{verbatim} This is possible by declaring the lemma as implicit. You should type: \comindex{Add Natural} \begin{coq_example*} Add Natural Implicit lem1. \end{coq_example*} By default, the lemmas \verb=proj1=, \verb=proj2=, \verb=sym_equal= and \verb=sym_eqT= are declared implicit. To remove a lemma or a theorem previously declared as implicit, say \verb=lem1=, use the command \comindex{Remove Natural} \begin{coq_example*} Remove Natural Implicit lem1. \end{coq_example*} To test if the lemma or theorem \verb=lem1= is, or is not, declared as implicit, type \comindex{Test Natural} \begin{coq_example*} Test Natural Implicit for lem1. \end{coq_example*} \subsubsection*{Implicit proof constructors} Let \verb=constr1= be a proof constructor of a given inductive proposition (or predicate) \verb=Q= (of type \verb=Prop=). Assume \verb=constr1= proves \verb=(x:A)(P x)->(Q x)=. Then, applying \verb=constr1= to an hypothesis, say \verb=H= (assuming \verb=(P a)=) produces the following kind of output: \begin{verbatim} ... By the definition of Q, with H we have (Q a). ... \end{verbatim} But sometimes, you may prefer not to see the explicit invocation to this constructor. You may prefer to see: \begin{verbatim} ... With H we have (Q a). ... \end{verbatim} This is possible by declaring the constructor as implicit. You should type, as before: \comindex{Add Natural Implicit} \begin{coq_example*} Add Natural Implicit constr1. \end{coq_example*} By default, the proposition (or predicate) constructors \verb=conj=, \verb=or_introl=, \verb=or_intror=, \verb=ex_intro=, \verb=eq_refl= and \verb=exist= \noindent are declared implicit. Note that declaring implicit the constructor of a datatype (i.e. an inductive type of type \verb=Set=) has no effect. As above, you can remove or test a constant declared implicit. \subsubsection*{Implicit inductive constants} Let \verb=Ind= be an inductive type (either a proposition (or a predicate) -- on \verb=Prop= --, or a datatype -- on \verb=Set=). Suppose the proof proceeds by induction on an hypothesis \verb=h= proving \verb=Ind= (or more generally \verb=(Ind A1 ... An)=). The following kind of output is produced: \begin{verbatim} ... With H, we will prove A by induction on the definition of Ind. Case 1. ... Case 2. ... ... \end{verbatim} But sometimes, you may prefer not to see the explicit invocation to \verb=Ind=. You may prefer to see: \begin{verbatim} ... We will prove A by induction on H. Case 1. ... Case 2. ... ... \end{verbatim} This is possible by declaring the inductive type as implicit. You should type, as before: \comindex{Add Natural Implicit} \begin{coq_example*} Add Natural Implicit Ind. \end{coq_example*} This kind of parameterization works for any inductively defined proposition (or predicate) or datatype. Especially, it works whatever the definition is recursive or purely by cases. By default, the data type \verb=nat= and the inductive connectives \verb=and=, \verb=or=, \verb=sig=, \verb=False=, \verb=eq=, \verb=eqT=, \verb=ex= and \verb=exT= are declared implicit. As above, you can remove or test a constant declared implicit. Use {\tt Remove Natural Contractible $id$} or {\tt Test Natural Contractible for $id$}. \asubsection{Contractible proof steps} \subsubsection*{Contractible lemmas or constructors} Some lemmas, theorems or proof constructors of inductive predicates are often applied in a row and you obtain an output of this kind: \begin{verbatim} ... Using T with H1 and H2 we get P. * By H3 we have Q. Using T with theses results we get R. ... \end{verbatim} where \verb=T=, \verb=H1=, \verb=H2= and \verb=H3= prove statements of the form \verb=(X,Y:Prop)X->Y->(L X Y)=, \verb=A=, \verb=B= and \verb=C= respectively (and thus \verb=R= is \verb=(L (L A B) C)=). You may obtain a condensed output of the form \begin{verbatim} ... Using T with H1, H2, and H3 we get R. ... \end{verbatim} by declaring \verb=T= as contractible: \comindex{Add Natural Contractible} \begin{coq_example*} Add Natural Contractible T. \end{coq_example*} By default, the lemmas \verb=proj1=, \verb=proj2= and the proof constructors \verb=conj=, \verb=or_introl=, \verb=or_intror= are declared contractible. As for implicit notions, you can remove or test a lemma or constructor declared contractible. \subsubsection*{Contractible induction steps} Let \verb=Ind= be an inductive type. When the proof proceeds by induction in a row, you may obtain an output of this kind: \begin{verbatim} ... We have (Ind A (Ind B C)). We use definition of Ind in a study in two cases. Case 1: We have A. Case 2: We have (Ind B C). We use definition of Ind in a study of two cases. Case 2.1: We have B. Case 2.2: We have C. ... \end{verbatim} You may prefer to see \begin{verbatim} ... We have (Ind A (Ind B C)). We use definition of Ind in a study in three cases. Case 1: We have A. Case 2: We have B. Case 3: We have C. ... \end{verbatim} This is possible by declaring \verb=Ind= as contractible: \begin{coq_example*} Add Natural Contractible T. \end{coq_example*} By default, only \verb=or= is declared as a contractible inductive constant. As for implicit notions, you can remove or test an inductive notion declared contractible. \asubsection{Transparent definitions} ``Normal'' definitions are all constructions except proofs and proof constructors. \subsubsection*{Transparent non inductive normal definitions} When using the definition of a non inductive constant, say \verb=D=, the following kind of output is produced: \begin{verbatim} ... We have proved C which is equivalent to D. ... \end{verbatim} But you may prefer to hide that D comes from the definition of C as follows: \begin{verbatim} ... We have prove D. ... \end{verbatim} This is possible by declaring \verb=C= as transparent: \comindex{Add Natural Transparent} \begin{coq_example*} Add Natural Transparent D. \end{coq_example*} By default, only \verb=not= (normally written \verb=~=) is declared as a non inductive transparent definition. As for implicit and contractible definitions, you can remove or test a non inductive definition declared transparent. Use \texttt{Remove Natural Transparent} \ident or \texttt{Test Natural Transparent for} \ident. \subsubsection*{Transparent inductive definitions} Let \verb=Ind= be an inductive proposition (more generally: a predicate \verb=(Ind x1 ... xn)=). Suppose the definition of \verb=Ind= is non recursive and built with just one constructor proving something like \verb=A -> B -> Ind=. When coming back to the definition of \verb=Ind= the following kind of output is produced: \begin{verbatim} ... Assume Ind (H). We use H with definition of Ind. We have A and B. ... \end{verbatim} When \verb=H= is not used a second time in the proof, you may prefer to hide that \verb=A= and \verb=B= comes from the definition of \verb=Ind=. You may prefer to get directly: \begin{verbatim} ... Assume A and B. ... \end{verbatim} This is possible by declaring \verb=Ind= as transparent: \begin{coq_example*} Add Natural Transparent Ind. \end{coq_example*} By default, \verb=and=, \verb=or=, \verb=ex=, \verb=exT=, \verb=sig= are declared as inductive transparent constants. As for implicit and contractible constants, you can remove or test an inductive constant declared transparent. As for implicit and contractible constants, you can remove or test an inductive constant declared transparent. \asubsection{Extending the maximal depth of nested text} The depth of nested text is limited. To know the current depth, do: \comindex{Set Natural Depth} \begin{coq_example} Set Natural Depth. \end{coq_example} To change the maximal depth of nested text (for instance to 125) do: \begin{coq_example} Set Natural Depth 125. \end{coq_example} \asubsection{Restoring the default parameterization} The command \verb=Set Natural Default= sets back the parameterization tables of \Natural~ to their default values, as listed in the above sections. Moreover, the language is set back to English and the max depth of nested text is set back to its initial value. \asubsection{Printing the current parameterization} The commands {\tt Print Natural Implicit}, {\tt Print Natural Contractible} and {\tt Print \\ Natural Transparent} print the list of constructions declared {\tt Implicit}, {\tt Contractible}, {\tt Transparent} respectively. \asubsection{Interferences with \texttt{Reset}} The customization of \texttt{Natural} is dependent of the \texttt{Reset} command. If you reset the environment back to a point preceding an \verb=Add Natural ...= command, the effect of the command will be erased. Similarly, a reset back to a point before a \verb=Remove Natural ... = command invalidates the removal. \asection{Error messages} An error occurs when trying to \verb=Print=, to \verb=Add=, to \verb=Test=, or to \verb=remove= an undefined ident. Similarly, an error occurs when trying to set a language unknown from \Natural. Errors may also occur when trying to parameterize the printing of proofs: some parameterization are effectively forbidden. Note that to \verb=Remove= an ident absent from a table or to \verb=Add= to a table an already present ident does not lead to an error. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-ext.tex0000640000175000001440000020723312111446715016345 0ustar notinusers\chapter[Extensions of \Gallina{}]{Extensions of \Gallina{}\label{Gallina-extension}\index{Gallina}} {\gallina} is the kernel language of {\Coq}. We describe here extensions of the Gallina's syntax. \section{Record types \comindex{Record} \comindex{Inductive} \comindex{CoInductive} \label{Record}} The \verb+Record+ construction is a macro allowing the definition of records as is done in many programming languages. Its syntax is described on Figure~\ref{record-syntax}. In fact, the \verb+Record+ macro is more general than the usual record types, since it allows also for ``manifest'' expressions. In this sense, the \verb+Record+ construction allows to define ``signatures''. \begin{figure}[h] \begin{centerframe} \begin{tabular}{lcl} {\sentence} & ++= & {\record}\\ & & \\ {\record} & ::= & {\recordkw} {\ident} \zeroone{\binders} \zeroone{{\tt :} {\sort}} \verb.:=. \\ && ~~~~\zeroone{\ident} \verb!{! \zeroone{\nelist{\field}{;}} \verb!}! \verb:.:\\ & & \\ {\recordkw} & ::= & {\tt Record} $|$ {\tt Inductive} $|$ {\tt CoInductive}\\ & & \\ {\field} & ::= & {\name} \zeroone{\binders} : {\type} [ {\tt where} {\it notation} ] \\ & $|$ & {\name} \zeroone{\binders} {\typecstr} := {\term} \end{tabular} \end{centerframe} \caption{Syntax for the definition of {\tt Record}} \label{record-syntax} \end{figure} \noindent In the expression \smallskip {\tt Record} {\ident} {\params} \texttt{:} {\sort} := {\ident$_0$} \verb+{+ {\ident$_1$} \binders$_1$ \texttt{:} {\term$_1$}; \dots {\ident$_n$} \binders$_n$ \texttt{:} {\term$_n$} \verb+}+. \smallskip \noindent the identifier {\ident} is the name of the defined record and {\sort} is its type. The identifier {\ident$_0$} is the name of its constructor. If {\ident$_0$} is omitted, the default name {\tt Build\_{\ident}} is used. If {\sort} is omitted, the default sort is ``{\Type}''. The identifiers {\ident$_1$}, .., {\ident$_n$} are the names of fields and {\tt forall} \binders$_1${\tt ,} {\term$_1$}, ..., {\tt forall} \binders$_n${\tt ,} {\term$_n$} their respective types. Remark that the type of {\ident$_i$} may depend on the previous {\ident$_j$} (for $j bottom; Rat_irred_cond : forall x y z:nat, (x * y) = top /\ (x * z) = bottom -> x = 1}. \end{coq_example} Remark here that the field \verb+Rat_cond+ depends on the field \verb+bottom+. %Let us now see the work done by the {\tt Record} macro. %First the macro generates an inductive definition %with just one constructor: % %\medskip %\noindent %{\tt Inductive {\ident} \zeroone{\binders} : {\sort} := \\ %\mbox{}\hspace{0.4cm} {\ident$_0$} : forall ({\ident$_1$}:{\term$_1$}) .. %({\ident$_n$}:{\term$_n$}), {\ident} {\rm\sl params}.} %\medskip Let us now see the work done by the {\tt Record} macro. First the macro generates an inductive definition with just one constructor: \begin{quote} {\tt Inductive {\ident} {\params} :{\sort} :=} \\ \qquad {\tt {\ident$_0$} ({\ident$_1$}:{\term$_1$}) .. ({\ident$_n$}:{\term$_n$}).} \end{quote} To build an object of type {\ident}, one should provide the constructor {\ident$_0$} with $n$ terms filling the fields of the record. As an example, let us define the rational $1/2$: \begin{coq_example*} Require Import Arith. Theorem one_two_irred : forall x y z:nat, x * y = 1 /\ x * z = 2 -> x = 1. \end{coq_example*} \begin{coq_eval} Lemma mult_m_n_eq_m_1 : forall m n:nat, m * n = 1 -> m = 1. destruct m; trivial. intros; apply f_equal with (f := S). destruct m; trivial. destruct n; simpl in H. rewrite <- mult_n_O in H. discriminate. rewrite <- plus_n_Sm in H. discriminate. Qed. intros x y z [H1 H2]. apply mult_m_n_eq_m_1 with (n := y); trivial. \end{coq_eval} \ldots \begin{coq_example*} Qed. \end{coq_example*} \begin{coq_example} Definition half := mkRat true 1 2 (O_S 1) one_two_irred. \end{coq_example} \begin{coq_example} Check half. \end{coq_example} The macro generates also, when it is possible, the projection functions for destructuring an object of type {\ident}. These projection functions have the same name that the corresponding fields. If a field is named ``\verb=_='' then no projection is built for it. In our example: \begin{coq_example} Eval compute in half.(top). Eval compute in half.(bottom). Eval compute in half.(Rat_bottom_cond). \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} Records defined with the {\tt Record} keyword are not allowed to be recursive (references to the record's name in the type of its field raises an error). To define recursive records, one can use the {\tt Inductive} and {\tt CoInductive} keywords, resulting in an inductive or co-inductive record. A \emph{caveat}, however, is that records cannot appear in mutually inductive (or co-inductive) definitions. \begin{Warnings} \item {\tt Warning: {\ident$_i$} cannot be defined.} It can happen that the definition of a projection is impossible. This message is followed by an explanation of this impossibility. There may be three reasons: \begin{enumerate} \item The name {\ident$_i$} already exists in the environment (see Section~\ref{Axiom}). \item The body of {\ident$_i$} uses an incorrect elimination for {\ident} (see Sections~\ref{Fixpoint} and~\ref{Caseexpr}). \item The type of the projections {\ident$_i$} depends on previous projections which themselves could not be defined. \end{enumerate} \end{Warnings} \begin{ErrMsgs} \item \errindex{Records declared with the keyword Record or Structure cannot be recursive.} The record name {\ident} appears in the type of its fields, but uses the keyword {\tt Record}. Use the keyword {\tt Inductive} or {\tt CoInductive} instead. \item \errindex{Cannot handle mutually (co)inductive records.} Records cannot be defined as part of mutually inductive (or co-inductive) definitions, whether with records only or mixed with standard definitions. \item During the definition of the one-constructor inductive definition, all the errors of inductive definitions, as described in Section~\ref{gal_Inductive_Definitions}, may also occur. \end{ErrMsgs} \SeeAlso Coercions and records in Section~\ref{Coercions-and-records} of the chapter devoted to coercions. \Rem {\tt Structure} is a synonym of the keyword {\tt Record}. \Rem Creation of an object of record type can be done by calling {\ident$_0$} and passing arguments in the correct order. \begin{coq_example} Record point := { x : nat; y : nat }. Definition a := Build_point 5 3. \end{coq_example} The following syntax allows to create objects by using named fields. The fields do not have to be in any particular order, nor do they have to be all present if the missing ones can be inferred or prompted for (see Section~\ref{Program}). \begin{coq_example} Definition b := {| x := 5; y := 3 |}. Definition c := {| y := 3; x := 5 |}. \end{coq_example} This syntax can be disabled globally for printing by \begin{quote} {\tt Unset Printing Records.} \end{quote} For a given type, one can override this using either \begin{quote} {\tt Add Printing Record {\ident}.} \end{quote} to get record syntax or \begin{quote} {\tt Add Printing Constructor {\ident}.} \end{quote} to get constructor syntax. This syntax can also be used for pattern matching. \begin{coq_example} Eval compute in ( match b with | {| y := S n |} => n | _ => 0 end). \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \Rem An experimental syntax for projections based on a dot notation is available. The command to activate it is \begin{quote} {\tt Set Printing Projections.} \end{quote} \begin{figure}[t] \begin{centerframe} \begin{tabular}{lcl} {\term} & ++= & {\term} {\tt .(} {\qualid} {\tt )}\\ & $|$ & {\term} {\tt .(} {\qualid} \nelist{\termarg}{} {\tt )}\\ & $|$ & {\term} {\tt .(} {@}{\qualid} \nelist{\term}{} {\tt )} \end{tabular} \end{centerframe} \caption{Syntax of \texttt{Record} projections} \label{fig:projsyntax} \end{figure} The corresponding grammar rules are given Figure~\ref{fig:projsyntax}. When {\qualid} denotes a projection, the syntax {\tt {\term}.({\qualid})} is equivalent to {\qualid~\term}, the syntax {\tt {\term}.({\qualid}~{\termarg}$_1$~ \ldots~ {\termarg}$_n$)} to {\qualid~{\termarg}$_1$ \ldots {\termarg}$_n$~\term}, and the syntax {\tt {\term}.(@{\qualid}~{\term}$_1$~\ldots~{\term}$_n$)} to {@\qualid~{\term}$_1$ \ldots {\term}$_n$~\term}. In each case, {\term} is the object projected and the other arguments are the parameters of the inductive type. To deactivate the printing of projections, use {\tt Unset Printing Projections}. \section{Variants and extensions of {\mbox{\tt match}} \label{Extensions-of-match} \index{match@{\tt match\ldots with\ldots end}}} \subsection{Multiple and nested pattern-matching \index{ML-like patterns} \label{Mult-match}} The basic version of \verb+match+ allows pattern-matching on simple patterns. As an extension, multiple nested patterns or disjunction of patterns are allowed, as in ML-like languages. The extension just acts as a macro that is expanded during parsing into a sequence of {\tt match} on simple patterns. Especially, a construction defined using the extended {\tt match} is generally printed under its expanded form (see~\texttt{Set Printing Matching} in section~\ref{SetPrintingMatching}). \SeeAlso Chapter~\ref{Mult-match-full}. \subsection{Pattern-matching on boolean values: the {\tt if} expression \label{if-then-else} \index{if@{\tt if ... then ... else}}} For inductive types with exactly two constructors and for pattern-matchings expressions which do not depend on the arguments of the constructors, it is possible to use a {\tt if ... then ... else} notation. For instance, the definition \begin{coq_example} Definition not (b:bool) := match b with | true => false | false => true end. \end{coq_example} \noindent can be alternatively written \begin{coq_eval} Reset not. \end{coq_eval} \begin{coq_example} Definition not (b:bool) := if b then false else true. \end{coq_example} More generally, for an inductive type with constructors {\tt C$_1$} and {\tt C$_2$}, we have the following equivalence \smallskip {\tt if {\term} \zeroone{\ifitem} then {\term}$_1$ else {\term}$_2$} $\equiv$ \begin{tabular}[c]{l} {\tt match {\term} \zeroone{\ifitem} with}\\ {\tt \verb!|! C$_1$ \_ {\ldots} \_ \verb!=>! {\term}$_1$} \\ {\tt \verb!|! C$_2$ \_ {\ldots} \_ \verb!=>! {\term}$_2$} \\ {\tt end} \end{tabular} Here is an example. \begin{coq_example} Check (fun x (H:{x=0}+{x<>0}) => match H with | left _ => true | right _ => false end). \end{coq_example} Notice that the printing uses the {\tt if} syntax because {\tt sumbool} is declared as such (see Section~\ref{printing-options}). \subsection{Irrefutable patterns: the destructuring {\tt let} variants \index{let in@{\tt let ... in}} \label{Letin}} Pattern-matching on terms inhabiting inductive type having only one constructor can be alternatively written using {\tt let ... in ...} constructions. There are two variants of them. \subsubsection{First destructuring {\tt let} syntax} The expression {\tt let (}~{\ident$_1$},\ldots,{\ident$_n$}~{\tt ) :=}~{\term$_0$}~{\tt in}~{\term$_1$} performs case analysis on a {\term$_0$} which must be in an inductive type with one constructor having itself $n$ arguments. Variables {\ident$_1$}\ldots{\ident$_n$} are bound to the $n$ arguments of the constructor in expression {\term$_1$}. For instance, the definition \begin{coq_example} Definition fst (A B:Set) (H:A * B) := match H with | pair x y => x end. \end{coq_example} can be alternatively written \begin{coq_eval} Reset fst. \end{coq_eval} \begin{coq_example} Definition fst (A B:Set) (p:A * B) := let (x, _) := p in x. \end{coq_example} Notice that reduction is different from regular {\tt let ... in ...} construction since it happens only if {\term$_0$} is in constructor form. Otherwise, the reduction is blocked. The pretty-printing of a definition by matching on a irrefutable pattern can either be done using {\tt match} or the {\tt let} construction (see Section~\ref{printing-options}). If {\term} inhabits an inductive type with one constructor {\tt C}, we have an equivalence between {\tt let ({\ident}$_1$,\ldots,{\ident}$_n$) \zeroone{\ifitem} := {\term} in {\term}'} \noindent and {\tt match {\term} \zeroone{\ifitem} with C {\ident}$_1$ {\ldots} {\ident}$_n$ \verb!=>! {\term}' end} \subsubsection{Second destructuring {\tt let} syntax\index{let '... in}} Another destructuring {\tt let} syntax is available for inductive types with one constructor by giving an arbitrary pattern instead of just a tuple for all the arguments. For example, the preceding example can be written: \begin{coq_eval} Reset fst. \end{coq_eval} \begin{coq_example} Definition fst (A B:Set) (p:A*B) := let 'pair x _ := p in x. \end{coq_example} This is useful to match deeper inside tuples and also to use notations for the pattern, as the syntax {\tt let 'p := t in b} allows arbitrary patterns to do the deconstruction. For example: \begin{coq_example} Definition deep_tuple (A:Set) (x:(A*A)*(A*A)) : A*A*A*A := let '((a,b), (c, d)) := x in (a,b,c,d). Notation " x 'with' p " := (exist _ x p) (at level 20). Definition proj1_sig' (A:Set) (P:A->Prop) (t:{ x:A | P x }) : A := let 'x with p := t in x. \end{coq_example} When printing definitions which are written using this construct it takes precedence over {\tt let} printing directives for the datatype under consideration (see Section~\ref{printing-options}). \subsection{Controlling pretty-printing of {\tt match} expressions \label{printing-options}} The following commands give some control over the pretty-printing of {\tt match} expressions. \subsubsection{Printing nested patterns \label{SetPrintingMatching} \comindex{Set Printing Matching} \comindex{Unset Printing Matching} \comindex{Test Printing Matching}} The Calculus of Inductive Constructions knows pattern-matching only over simple patterns. It is however convenient to re-factorize nested pattern-matching into a single pattern-matching over a nested pattern. {\Coq}'s printer try to do such limited re-factorization. \begin{quote} {\tt Set Printing Matching.} \end{quote} This tells {\Coq} to try to use nested patterns. This is the default behavior. \begin{quote} {\tt Unset Printing Matching.} \end{quote} This tells {\Coq} to print only simple pattern-matching problems in the same way as the {\Coq} kernel handles them. \begin{quote} {\tt Test Printing Matching.} \end{quote} This tells if the printing matching mode is on or off. The default is on. \subsubsection{Printing of wildcard pattern \comindex{Set Printing Wildcard} \comindex{Unset Printing Wildcard} \comindex{Test Printing Wildcard}} Some variables in a pattern may not occur in the right-hand side of the pattern-matching clause. There are options to control the display of these variables. \begin{quote} {\tt Set Printing Wildcard.} \end{quote} The variables having no occurrences in the right-hand side of the pattern-matching clause are just printed using the wildcard symbol ``{\tt \_}''. \begin{quote} {\tt Unset Printing Wildcard.} \end{quote} The variables, even useless, are printed using their usual name. But some non dependent variables have no name. These ones are still printed using a ``{\tt \_}''. \begin{quote} {\tt Test Printing Wildcard.} \end{quote} This tells if the wildcard printing mode is on or off. The default is to print wildcard for useless variables. \subsubsection{Printing of the elimination predicate \comindex{Set Printing Synth} \comindex{Unset Printing Synth} \comindex{Test Printing Synth}} In most of the cases, the type of the result of a matched term is mechanically synthesizable. Especially, if the result type does not depend of the matched term. \begin{quote} {\tt Set Printing Synth.} \end{quote} The result type is not printed when {\Coq} knows that it can re-synthesize it. \begin{quote} {\tt Unset Printing Synth.} \end{quote} This forces the result type to be always printed. \begin{quote} {\tt Test Printing Synth.} \end{quote} This tells if the non-printing of synthesizable types is on or off. The default is to not print synthesizable types. \subsubsection{Printing matching on irrefutable pattern \comindex{Add Printing Let {\ident}} \comindex{Remove Printing Let {\ident}} \comindex{Test Printing Let for {\ident}} \comindex{Print Table Printing Let}} If an inductive type has just one constructor, pattern-matching can be written using {\tt let} ... {\tt :=} ... {\tt in}~... \begin{quote} {\tt Add Printing Let {\ident}.} \end{quote} This adds {\ident} to the list of inductive types for which pattern-matching is written using a {\tt let} expression. \begin{quote} {\tt Remove Printing Let {\ident}.} \end{quote} This removes {\ident} from this list. \begin{quote} {\tt Test Printing Let for {\ident}.} \end{quote} This tells if {\ident} belongs to the list. \begin{quote} {\tt Print Table Printing Let.} \end{quote} This prints the list of inductive types for which pattern-matching is written using a {\tt let} expression. The list of inductive types for which pattern-matching is written using a {\tt let} expression is managed synchronously. This means that it is sensible to the command {\tt Reset}. \subsubsection{Printing matching on booleans \comindex{Add Printing If {\ident}} \comindex{Remove Printing If {\ident}} \comindex{Test Printing If for {\ident}} \comindex{Print Table Printing If}} If an inductive type is isomorphic to the boolean type, pattern-matching can be written using {\tt if} ... {\tt then} ... {\tt else} ... \begin{quote} {\tt Add Printing If {\ident}.} \end{quote} This adds {\ident} to the list of inductive types for which pattern-matching is written using an {\tt if} expression. \begin{quote} {\tt Remove Printing If {\ident}.} \end{quote} This removes {\ident} from this list. \begin{quote} {\tt Test Printing If for {\ident}.} \end{quote} This tells if {\ident} belongs to the list. \begin{quote} {\tt Print Table Printing If.} \end{quote} This prints the list of inductive types for which pattern-matching is written using an {\tt if} expression. The list of inductive types for which pattern-matching is written using an {\tt if} expression is managed synchronously. This means that it is sensible to the command {\tt Reset}. \subsubsection{Example} This example emphasizes what the printing options offer. \begin{coq_example} Test Printing Let for prod. Print fst. Remove Printing Let prod. Unset Printing Synth. Unset Printing Wildcard. Print fst. \end{coq_example} % \subsection{Still not dead old notations} % The following variant of {\tt match} is inherited from older version % of {\Coq}. % \medskip % \begin{tabular}{lcl} % {\term} & ::= & {\annotation} {\tt Match} {\term} {\tt with} {\terms} {\tt end}\\ % \end{tabular} % \medskip % This syntax is a macro generating a combination of {\tt match} with {\tt % Fix} implementing a combinator for primitive recursion equivalent to % the {\tt Match} construction of \Coq\ V5.8. It is provided only for % sake of compatibility with \Coq\ V5.8. It is recommended to avoid it. % (see Section~\ref{Matchexpr}). % There is also a notation \texttt{Case} that is the % ancestor of \texttt{match}. Again, it is still in the code for % compatibility with old versions but the user should not use it. % Explained in RefMan-gal.tex %% \section{Forced type} %% In some cases, one may wish to assign a particular type to a term. The %% syntax to force the type of a term is the following: %% \medskip %% \begin{tabular}{lcl} %% {\term} & ++= & {\term} {\tt :} {\term}\\ %% \end{tabular} %% \medskip %% It forces the first term to be of type the second term. The %% type must be compatible with %% the term. More precisely it must be either a type convertible to %% the automatically inferred type (see Chapter~\ref{Cic}) or a type %% coercible to it, (see \ref{Coercions}). When the type of a %% whole expression is forced, it is usually not necessary to give the types of %% the variables involved in the term. %% Example: %% \begin{coq_example} %% Definition ID := forall X:Set, X -> X. %% Definition id := (fun X x => x):ID. %% Check id. %% \end{coq_example} \section{Advanced recursive functions} The \emph{experimental} command \begin{center} \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} \{decrease\_annot\} : type$_0$ := \term$_0$} \comindex{Function} \label{Function} \end{center} can be seen as a generalization of {\tt Fixpoint}. It is actually a wrapper for several ways of defining a function \emph{and other useful related objects}, namely: an induction principle that reflects the recursive structure of the function (see \ref{FunInduction}), and its fixpoint equality. The meaning of this declaration is to define a function {\it ident}, similarly to {\tt Fixpoint}. Like in {\tt Fixpoint}, the decreasing argument must be given (unless the function is not recursive), but it must not necessary be \emph{structurally} decreasing. The point of the {\tt \{\}} annotation is to name the decreasing argument \emph{and} to describe which kind of decreasing criteria must be used to ensure termination of recursive calls. The {\tt Function} construction enjoys also the {\tt with} extension to define mutually recursive definitions. However, this feature does not work for non structural recursive functions. % VRAI?? See the documentation of {\tt functional induction} (see Section~\ref{FunInduction}) and {\tt Functional Scheme} (see Section~\ref{FunScheme} and \ref{FunScheme-examples}) for how to use the induction principle to easily reason about the function. \noindent {\bf Remark: } To obtain the right principle, it is better to put rigid parameters of the function as first arguments. For example it is better to define plus like this: \begin{coq_example*} Function plus (m n : nat) {struct n} : nat := match n with | 0 => m | S p => S (plus m p) end. \end{coq_example*} \noindent than like this: \begin{coq_eval} Reset plus. \end{coq_eval} \begin{coq_example*} Function plus (n m : nat) {struct n} : nat := match n with | 0 => m | S p => S (plus p m) end. \end{coq_example*} \paragraph[Limitations]{Limitations\label{sec:Function-limitations}} \term$_0$ must be build as a \emph{pure pattern-matching tree} (\texttt{match...with}) with applications only \emph{at the end} of each branch. Function does not support partial application of the function being defined. Thus, the following example cannot be accepted due to the presence of partial application of \ident{wrong} into the body of \ident{wrong}~: \begin{coq_example*} Function wrong (C:nat) {\ldots} : nat := List.hd(List.map wrong (C::nil)). \end{coq_example*} For now dependent cases are not treated for non structurally terminating functions. \begin{ErrMsgs} \item \errindex{The recursive argument must be specified} \item \errindex{No argument name \ident} \item \errindex{Cannot use mutual definition with well-founded recursion or measure} \item \errindex{Cannot define graph for \ident\dots} (warning) The generation of the graph relation \texttt{(R\_\ident)} used to compute the induction scheme of \ident\ raised a typing error. Only the ident is defined, the induction scheme will not be generated. This error happens generally when: \begin{itemize} \item the definition uses pattern matching on dependent types, which \texttt{Function} cannot deal with yet. \item the definition is not a \emph{pattern-matching tree} as explained above. \end{itemize} \item \errindex{Cannot define principle(s) for \ident\dots} (warning) The generation of the graph relation \texttt{(R\_\ident)} succeeded but the induction principle could not be built. Only the ident is defined. Please report. \item \errindex{Cannot build functional inversion principle} (warning) \texttt{functional inversion} will not be available for the function. \end{ErrMsgs} \SeeAlso{\ref{FunScheme}, \ref{FunScheme-examples}, \ref{FunInduction}} Depending on the {\tt \{$\ldots$\}} annotation, different definition mechanisms are used by {\tt Function}. More precise description given below. \begin{Variants} \item \texttt{ Function {\ident} {\binder$_1$}\ldots{\binder$_n$} : type$_0$ := \term$_0$} Defines the not recursive function \ident\ as if declared with \texttt{Definition}. Moreover the following are defined: \begin{itemize} \item {\tt\ident\_rect}, {\tt\ident\_rec} and {\tt\ident\_ind}, which reflect the pattern matching structure of \term$_0$ (see the documentation of {\tt Inductive} \ref{Inductive}); \item The inductive \texttt{R\_\ident} corresponding to the graph of \ident\ (silently); \item \texttt{\ident\_complete} and \texttt{\ident\_correct} which are inversion information linking the function and its graph. \end{itemize} \item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt \{}{\tt struct} \ident$_0${\tt\}} : type$_0$ := \term$_0$} Defines the structural recursive function \ident\ as if declared with \texttt{Fixpoint}. Moreover the following are defined: \begin{itemize} \item The same objects as above; \item The fixpoint equation of \ident: \texttt{\ident\_equation}. \end{itemize} \item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt \{}{\tt measure \term$_1$} \ident$_0${\tt\}} : type$_0$ := \term$_0$} \item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt \{}{\tt wf \term$_1$} \ident$_0${\tt\}} : type$_0$ := \term$_0$} Defines a recursive function by well founded recursion. \textbf{The module \texttt{Recdef} of the standard library must be loaded for this feature}. The {\tt \{\}} annotation is mandatory and must be one of the following: \begin{itemize} \item {\tt \{measure} \term$_1$ \ident$_0${\tt\}} with \ident$_0$ being the decreasing argument and \term$_1$ being a function from type of \ident$_0$ to \texttt{nat} for which value on the decreasing argument decreases (for the {\tt lt} order on {\tt nat}) at each recursive call of \term$_0$, parameters of the function are bound in \term$_0$; \item {\tt \{wf} \term$_1$ \ident$_0${\tt\}} with \ident$_0$ being the decreasing argument and \term$_1$ an ordering relation on the type of \ident$_0$ (i.e. of type T$_{\ident_0}$ $\to$ T$_{\ident_0}$ $\to$ {\tt Prop}) for which the decreasing argument decreases at each recursive call of \term$_0$. The order must be well founded. parameters of the function are bound in \term$_0$. \end{itemize} Depending on the annotation, the user is left with some proof obligations that will be used to define the function. These proofs are: proofs that each recursive call is actually decreasing with respect to the given criteria, and (if the criteria is \texttt{wf}) a proof that the ordering relation is well founded. %Completer sur measure et wf Once proof obligations are discharged, the following objects are defined: \begin{itemize} \item The same objects as with the \texttt{struct}; \item The lemma \texttt{\ident\_tcc} which collects all proof obligations in one property; \item The lemmas \texttt{\ident\_terminate} and \texttt{\ident\_F} which is needed to be inlined during extraction of \ident. \end{itemize} %Complete!! The way this recursive function is defined is the subject of several papers by Yves Bertot and Antonia Balaa on the one hand, and Gilles Barthe, Julien Forest, David Pichardie, and Vlad Rusu on the other hand. %Exemples ok ici \bigskip \noindent {\bf Remark: } Proof obligations are presented as several subgoals belonging to a Lemma {\ident}{\tt\_tcc}. % These subgoals are independent which means that in order to % abort them you will have to abort each separately. %The decreasing argument cannot be dependent of another?? %Exemples faux ici \end{Variants} \section{Section mechanism \index{Sections} \label{Section}} The sectioning mechanism allows to organize a proof in structured sections. Then local declarations become available (see Section~\ref{Basic-definitions}). \subsection{\tt Section {\ident}\comindex{Section}} This command is used to open a section named {\ident}. %% Discontinued ? %% \begin{Variants} %% \comindex{Chapter} %% \item{\tt Chapter {\ident}}\\ %% Same as {\tt Section {\ident}} %% \end{Variants} \subsection{\tt End {\ident} \comindex{End}} This command closes the section named {\ident}. After closing of the section, the local declarations (variables and local definitions) get {\em discharged}, meaning that they stop being visible and that all global objects defined in the section are generalized with respect to the variables and local definitions they each depended on in the section. Here is an example : \begin{coq_example} Section s1. Variables x y : nat. Let y' := y. Definition x' := S x. Definition x'' := x' + y'. Print x'. End s1. Print x'. Print x''. \end{coq_example} Notice the difference between the value of {\tt x'} and {\tt x''} inside section {\tt s1} and outside. \begin{ErrMsgs} \item \errindex{This is not the last opened section} \end{ErrMsgs} \begin{Remarks} \item Most commands, like {\tt Hint}, {\tt Notation}, option management, ... which appear inside a section are canceled when the section is closed. % see Section~\ref{LongNames} %\item Usually all identifiers must be distinct. %However, a name already used in a closed section (see \ref{Section}) %can be reused. In this case, the old name is no longer accessible. % Obsolte %\item A module implicitly open a section. Be careful not to name a %module with an identifier already used in the module (see \ref{compiled}). \end{Remarks} \input{RefMan-mod.v} \section{Libraries and qualified names} \subsection{Names of libraries and files \label{Libraries} \index{Libraries} \index{Physical paths} \index{Logical paths}} \paragraph{Libraries} The theories developed in {\Coq} are stored in {\em library files} which are hierarchically classified into {\em libraries} and {\em sublibraries}. To express this hierarchy, library names are represented by qualified identifiers {\qualid}, i.e. as list of identifiers separated by dots (see Section~\ref{qualid}). For instance, the library file {\tt Mult} of the standard {\Coq} library {\tt Arith} has name {\tt Coq.Arith.Mult}. The identifier that starts the name of a library is called a {\em library root}. All library files of the standard library of {\Coq} have reserved root {\tt Coq} but library file names based on other roots can be obtained by using {\tt coqc} options {\tt -I} or {\tt -R} (see Section~\ref{coqoptions}). Also, when an interactive {\Coq} session starts, a library of root {\tt Top} is started, unless option {\tt -top} or {\tt -notop} is set (see Section~\ref{coqoptions}). As library files are stored on the file system of the underlying operating system, a translation from file-system names to {\Coq} names is needed. In this translation, names in the file system are called {\em physical} paths while {\Coq} names are contrastingly called {\em logical} names. Logical names are mapped to physical paths using the commands {\tt Add LoadPath} or {\tt Add Rec LoadPath} (see Sections~\ref{AddLoadPath} and~\ref{AddRecLoadPath}). \subsection{Qualified names \label{LongNames} \index{Qualified identifiers} \index{Absolute names}} Library files are modules which possibly contain submodules which eventually contain constructions (axioms, parameters, definitions, lemmas, theorems, remarks or facts). The {\em absolute name}, or {\em full name}, of a construction in some library file is a qualified identifier starting with the logical name of the library file, followed by the sequence of submodules names encapsulating the construction and ended by the proper name of the construction. Typically, the absolute name {\tt Coq.Init.Logic.eq} denotes Leibniz' equality defined in the module {\tt Logic} in the sublibrary {\tt Init} of the standard library of \Coq. The proper name that ends the name of a construction is the {\it short name} (or sometimes {\it base name}) of the construction (for instance, the short name of {\tt Coq.Init.Logic.eq} is {\tt eq}). Any partial suffix of the absolute name is a {\em partially qualified name} (e.g. {\tt Logic.eq} is a partially qualified name for {\tt Coq.Init.Logic.eq}). Especially, the short name of a construction is its shortest partially qualified name. {\Coq} does not accept two constructions (definition, theorem, ...) with the same absolute name but different constructions can have the same short name (or even same partially qualified names as soon as the full names are different). Notice that the notion of absolute, partially qualified and short names also applies to library file names. \paragraph{Visibility} {\Coq} maintains a table called {\it name table} which maps partially qualified names of constructions to absolute names. This table is updated by the commands {\tt Require} (see \ref{Require}), {\tt Import} and {\tt Export} (see \ref{Import}) and also each time a new declaration is added to the context. An absolute name is called {\it visible} from a given short or partially qualified name when this latter name is enough to denote it. This means that the short or partially qualified name is mapped to the absolute name in {\Coq} name table. A similar table exists for library file names. It is updated by the vernacular commands {\tt Add LoadPath} and {\tt Add Rec LoadPath} (or their equivalent as options of the {\Coq} executables, {\tt -I} and {\tt -R}). It may happen that a visible name is hidden by the short name or a qualified name of another construction. In this case, the name that has been hidden must be referred to using one more level of qualification. To ensure that a construction always remains accessible, absolute names can never be hidden. Examples: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Check 0. Definition nat := bool. Check 0. Check Datatypes.nat. Locate nat. \end{coq_example} \SeeAlso Command {\tt Locate} in Section~\ref{Locate} and {\tt Locate Library} in Section~\ref{Locate Library}. %% \paragraph{The special case of remarks and facts} %% %% In contrast with definitions, lemmas, theorems, axioms and parameters, %% the absolute name of remarks includes the segment of sections in which %% it is defined. Concretely, if a remark {\tt R} is defined in %% subsection {\tt S2} of section {\tt S1} in module {\tt M}, then its %% absolute name is {\tt M.S1.S2.R}. The same for facts, except that the %% name of the innermost section is dropped from the full name. Then, if %% a fact {\tt F} is defined in subsection {\tt S2} of section {\tt S1} %% in module {\tt M}, then its absolute name is {\tt M.S1.F}. \section{Implicit arguments \index{Implicit arguments} \label{Implicit Arguments}} An implicit argument of a function is an argument which can be inferred from contextual knowledge. There are different kinds of implicit arguments that can be considered implicit in different ways. There are also various commands to control the setting or the inference of implicit arguments. \subsection{The different kinds of implicit arguments} \subsubsection{Implicit arguments inferable from the knowledge of other arguments of a function} The first kind of implicit arguments covers the arguments that are inferable from the knowledge of the type of other arguments of the function, or of the type of the surrounding context of the application. Especially, such implicit arguments correspond to parameters dependent in the type of the function. Typical implicit arguments are the type arguments in polymorphic functions. There are several kinds of such implicit arguments. \paragraph{Strict Implicit Arguments.} An implicit argument can be either strict or non strict. An implicit argument is said {\em strict} if, whatever the other arguments of the function are, it is still inferable from the type of some other argument. Technically, an implicit argument is strict if it corresponds to a parameter which is not applied to a variable which itself is another parameter of the function (since this parameter may erase its arguments), not in the body of a {\tt match}, and not itself applied or matched against patterns (since the original form of the argument can be lost by reduction). For instance, the first argument of \begin{quote} \verb|cons: forall A:Set, A -> list A -> list A| \end{quote} in module {\tt List.v} is strict because {\tt list} is an inductive type and {\tt A} will always be inferable from the type {\tt list A} of the third argument of {\tt cons}. On the contrary, the second argument of a term of type \begin{quote} \verb|forall P:nat->Prop, forall n:nat, P n -> ex nat P| \end{quote} is implicit but not strict, since it can only be inferred from the type {\tt P n} of the third argument and if {\tt P} is, e.g., {\tt fun \_ => True}, it reduces to an expression where {\tt n} does not occur any longer. The first argument {\tt P} is implicit but not strict either because it can only be inferred from {\tt P n} and {\tt P} is not canonically inferable from an arbitrary {\tt n} and the normal form of {\tt P n} (consider e.g. that {\tt n} is {\tt 0} and the third argument has type {\tt True}, then any {\tt P} of the form {\tt fun n => match n with 0 => True | \_ => \mbox{\em anything} end} would be a solution of the inference problem). \paragraph{Contextual Implicit Arguments.} An implicit argument can be {\em contextual} or not. An implicit argument is said {\em contextual} if it can be inferred only from the knowledge of the type of the context of the current expression. For instance, the only argument of \begin{quote} \verb|nil : forall A:Set, list A| \end{quote} is contextual. Similarly, both arguments of a term of type \begin{quote} \verb|forall P:nat->Prop, forall n:nat, P n \/ n = 0| \end{quote} are contextual (moreover, {\tt n} is strict and {\tt P} is not). \paragraph{Reversible-Pattern Implicit Arguments.} There is another class of implicit arguments that can be reinferred unambiguously if all the types of the remaining arguments are known. This is the class of implicit arguments occurring in the type of another argument in position of reversible pattern, which means it is at the head of an application but applied only to uninstantiated distinct variables. Such an implicit argument is called {\em reversible-pattern implicit argument}. A typical example is the argument {\tt P} of {\tt nat\_rec} in \begin{quote} {\tt nat\_rec : forall P : nat -> Set, P 0 -> (forall n : nat, P n -> P (S n)) -> forall x : nat, P x}. \end{quote} ({\tt P} is reinferable by abstracting over {\tt n} in the type {\tt P n}). See Section~\ref{SetReversiblePatternImplicit} for the automatic declaration of reversible-pattern implicit arguments. \subsubsection{Implicit arguments inferable by resolution} This corresponds to a class of non dependent implicit arguments that are solved based on the structure of their type only. \subsection{Maximal or non maximal insertion of implicit arguments} In case a function is partially applied, and the next argument to be applied is an implicit argument, two disciplines are applicable. In the first case, the function is considered to have no arguments furtherly: one says that the implicit argument is not maximally inserted. In the second case, the function is considered to be implicitly applied to the implicit arguments it is waiting for: one says that the implicit argument is maximally inserted. Each implicit argument can be declared to have to be inserted maximally or non maximally. This can be governed argument per argument by the command {\tt Implicit Arguments} (see~\ref{ImplicitArguments}) or globally by the command {\tt Set Maximal Implicit Insertion} (see~\ref{SetMaximalImplicitInsertion}). See also Section~\ref{PrintImplicit}. \subsection{Casual use of implicit arguments} In a given expression, if it is clear that some argument of a function can be inferred from the type of the other arguments, the user can force the given argument to be guessed by replacing it by ``{\tt \_}''. If possible, the correct argument will be automatically generated. \begin{ErrMsgs} \item \errindex{Cannot infer a term for this placeholder} {\Coq} was not able to deduce an instantiation of a ``{\tt \_}''. \end{ErrMsgs} \subsection{Declaration of implicit arguments for a constant \comindex{Arguments}} \label{ImplicitArguments} In case one wants that some arguments of a given object (constant, inductive types, constructors, assumptions, local or not) are always inferred by Coq, one may declare once and for all which are the expected implicit arguments of this object. There are two ways to do this, a priori and a posteriori. \subsubsection{Implicit Argument Binders} In the first setting, one wants to explicitly give the implicit arguments of a constant as part of its definition. To do this, one has to surround the bindings of implicit arguments by curly braces: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Definition id {A : Type} (x : A) : A := x. \end{coq_example} This automatically declares the argument {\tt A} of {\tt id} as a maximally inserted implicit argument. One can then do as-if the argument was absent in every situation but still be able to specify it if needed: \begin{coq_example} Definition compose {A B C} (g : B -> C) (f : A -> B) := fun x => g (f x). Goal forall A, compose id id = id (A:=A). \end{coq_example} The syntax is supported in all top-level definitions: {\tt Definition}, {\tt Fixpoint}, {\tt Lemma} and so on. For (co-)inductive datatype declarations, the semantics are the following: an inductive parameter declared as an implicit argument need not be repeated in the inductive definition but will become implicit for the constructors of the inductive only, not the inductive type itself. For example: \begin{coq_example} Inductive list {A : Type} : Type := | nil : list | cons : A -> list -> list. Print list. \end{coq_example} One can always specify the parameter if it is not uniform using the usual implicit arguments disambiguation syntax. \subsubsection{Declaring Implicit Arguments} To set implicit arguments for a constant a posteriori, one can use the command: \begin{quote} \tt Arguments {\qualid} \nelist{\possiblybracketedident}{} \end{quote} where the list of {\possiblybracketedident} is the list of all arguments of {\qualid} where the ones to be declared implicit are surrounded by square brackets and the ones to be declared as maximally inserted implicits are surrounded by curly braces. After the above declaration is issued, implicit arguments can just (and have to) be skipped in any expression involving an application of {\qualid}. \begin{Variants} \item {\tt Global Arguments {\qualid} \nelist{\possiblybracketedident}{} \comindex{Global Arguments}} Tell to recompute the implicit arguments of {\qualid} after ending of the current section if any, enforcing the implicit arguments known from inside the section to be the ones declared by the command. \item {\tt Local Arguments {\qualid} \nelist{\possiblybracketedident}{} \comindex{Local Arguments}} When in a module, tell not to activate the implicit arguments of {\qualid} declared by this command to contexts that require the module. \item {\tt \zeroone{Global {\sl |} Local} Arguments {\qualid} \sequence{\nelist{\possiblybracketedident}{}}{,}} For names of constants, inductive types, constructors, lemmas which can only be applied to a fixed number of arguments (this excludes for instance constants whose type is polymorphic), multiple implicit arguments decflarations can be given. Depending on the number of arguments {\qualid} is applied to in practice, the longest applicable list of implicit arguments is used to select which implicit arguments are inserted. For printing, the omitted arguments are the ones of the longest list of implicit arguments of the sequence. \end{Variants} \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive list (A:Type) : Type := | nil : list A | cons : A -> list A -> list A. \end{coq_example*} \begin{coq_example} Check (cons nat 3 (nil nat)). Arguments cons [A] _ _. Arguments nil [A]. Check (cons 3 nil). Fixpoint map (A B:Type) (f:A->B) (l:list A) : list B := match l with nil => nil | cons a t => cons (f a) (map A B f t) end. Fixpoint length (A:Type) (l:list A) : nat := match l with nil => 0 | cons _ m => S (length A m) end. Arguments map [A B] f l. Arguments length {A} l. (* A has to be maximally inserted *) Check (fun l:list (list nat) => map length l). Arguments map [A B] f l, [A] B f l, A B f l. Check (fun l => map length l = map (list nat) nat length l). \end{coq_example} \Rem To know which are the implicit arguments of an object, use the command {\tt Print Implicit} (see \ref{PrintImplicit}). \Rem If the list of arguments is empty, the command removes the implicit arguments of {\qualid}. \subsection{Automatic declaration of implicit arguments for a constant} {\Coq} can also automatically detect what are the implicit arguments of a defined object. The command is just \begin{quote} {\tt Arguments {\qualid} : default implicits \comindex{Arguments}} \end{quote} The auto-detection is governed by options telling if strict, contextual, or reversible-pattern implicit arguments must be considered or not (see Sections~\ref{SetStrictImplicit},~\ref{SetContextualImplicit},~\ref{SetReversiblePatternImplicit} and also~\ref{SetMaximalImplicitInsertion}). \begin{Variants} \item {\tt Global Arguments {\qualid} : default implicits \comindex{Global Arguments}} Tell to recompute the implicit arguments of {\qualid} after ending of the current section if any. \item {\tt Local Arguments {\qualid} : default implicits \comindex{Local Arguments}} When in a module, tell not to activate the implicit arguments of {\qualid} computed by this declaration to contexts that requires the module. \end{Variants} \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive list (A:Set) : Set := | nil : list A | cons : A -> list A -> list A. \end{coq_example*} \begin{coq_example} Arguments cons : default implicits. Print Implicit cons. Arguments nil : default implicits. Print Implicit nil. Set Contextual Implicit. Arguments nil : default implicits. Print Implicit nil. \end{coq_example} The computation of implicit arguments takes account of the unfolding of constants. For instance, the variable {\tt p} below has type {\tt (Transitivity R)} which is reducible to {\tt forall x,y:U, R x y -> forall z:U, R y z -> R x z}. As the variables {\tt x}, {\tt y} and {\tt z} appear strictly in body of the type, they are implicit. \begin{coq_example*} Variable X : Type. Definition Relation := X -> X -> Prop. Definition Transitivity (R:Relation) := forall x y:X, R x y -> forall z:X, R y z -> R x z. Variables (R : Relation) (p : Transitivity R). Arguments p : default implicits. \end{coq_example*} \begin{coq_example} Print p. Print Implicit p. \end{coq_example} \begin{coq_example*} Variables (a b c : X) (r1 : R a b) (r2 : R b c). \end{coq_example*} \begin{coq_example} Check (p r1 r2). \end{coq_example} Implicit arguments can be cleared with the following syntax: \begin{quote} {\tt Arguments {\qualid} : clear implicits \comindex{Arguments}} \end{quote} In the following example implict arguments declarations for the {\tt nil} constant are cleared: \begin{coq_example} Arguments cons : clear implicits. Print Implicit cons. \end{coq_example} \subsection{Mode for automatic declaration of implicit arguments \label{Auto-implicit} \comindex{Set Implicit Arguments} \comindex{Unset Implicit Arguments}} In case one wants to systematically declare implicit the arguments detectable as such, one may switch to the automatic declaration of implicit arguments mode by using the command \begin{quote} \tt Set Implicit Arguments. \end{quote} Conversely, one may unset the mode by using {\tt Unset Implicit Arguments}. The mode is off by default. Auto-detection of implicit arguments is governed by options controlling whether strict and contextual implicit arguments have to be considered or not. \subsection{Controlling strict implicit arguments \comindex{Set Strict Implicit} \comindex{Unset Strict Implicit} \label{SetStrictImplicit}} When the mode for automatic declaration of implicit arguments is on, the default is to automatically set implicit only the strict implicit arguments plus, for historical reasons, a small subset of the non strict implicit arguments. To relax this constraint and to set implicit all non strict implicit arguments by default, use the command \begin{quote} \tt Unset Strict Implicit. \end{quote} Conversely, use the command {\tt Set Strict Implicit} to restore the original mode that declares implicit only the strict implicit arguments plus a small subset of the non strict implicit arguments. In the other way round, to capture exactly the strict implicit arguments and no more than the strict implicit arguments, use the command: \comindex{Set Strongly Strict Implicit} \comindex{Unset Strongly Strict Implicit} \begin{quote} \tt Set Strongly Strict Implicit. \end{quote} Conversely, use the command {\tt Unset Strongly Strict Implicit} to let the option ``{\tt Strict Implicit}'' decide what to do. \Rem In versions of {\Coq} prior to version 8.0, the default was to declare the strict implicit arguments as implicit. \subsection{Controlling contextual implicit arguments \comindex{Set Contextual Implicit} \comindex{Unset Contextual Implicit} \label{SetContextualImplicit}} By default, {\Coq} does not automatically set implicit the contextual implicit arguments. To tell {\Coq} to infer also contextual implicit argument, use command \begin{quote} \tt Set Contextual Implicit. \end{quote} Conversely, use command {\tt Unset Contextual Implicit} to unset the contextual implicit mode. \subsection{Controlling reversible-pattern implicit arguments \comindex{Set Reversible Pattern Implicit} \comindex{Unset Reversible Pattern Implicit} \label{SetReversiblePatternImplicit}} By default, {\Coq} does not automatically set implicit the reversible-pattern implicit arguments. To tell {\Coq} to infer also reversible-pattern implicit argument, use command \begin{quote} \tt Set Reversible Pattern Implicit. \end{quote} Conversely, use command {\tt Unset Reversible Pattern Implicit} to unset the reversible-pattern implicit mode. \subsection{Controlling the insertion of implicit arguments not followed by explicit arguments \comindex{Set Maximal Implicit Insertion} \comindex{Unset Maximal Implicit Insertion} \label{SetMaximalImplicitInsertion}} Implicit arguments can be declared to be automatically inserted when a function is partially applied and the next argument of the function is an implicit one. In case the implicit arguments are automatically declared (with the command {\tt Set Implicit Arguments}), the command \begin{quote} \tt Set Maximal Implicit Insertion. \end{quote} is used to tell to declare the implicit arguments with a maximal insertion status. By default, automatically declared implicit arguments are not declared to be insertable maximally. To restore the default mode for maximal insertion, use command {\tt Unset Maximal Implicit Insertion}. \subsection{Explicit applications \index{Explicitly given implicit arguments} \label{Implicits-explicitation} \index{qualid@{\qualid}} \index{\symbol{64}}} In presence of non strict or contextual argument, or in presence of partial applications, the synthesis of implicit arguments may fail, so one may have to give explicitly certain implicit arguments of an application. The syntax for this is {\tt (\ident:=\term)} where {\ident} is the name of the implicit argument and {\term} is its corresponding explicit term. Alternatively, one can locally deactivate the hiding of implicit arguments of a function by using the notation {\tt @{\qualid}~{\term}$_1$..{\term}$_n$}. This syntax extension is given Figure~\ref{fig:explicitations}. \begin{figure} \begin{centerframe} \begin{tabular}{lcl} {\term} & ++= & @ {\qualid} \nelist{\term}{}\\ & $|$ & @ {\qualid}\\ & $|$ & {\qualid} \nelist{\textrm{\textsl{argument}}}{}\\ \\ {\textrm{\textsl{argument}}} & ::= & {\term} \\ & $|$ & {\tt ({\ident}:={\term})}\\ \end{tabular} \end{centerframe} \caption{Syntax for explicitly giving implicit arguments} \label{fig:explicitations} \end{figure} \noindent {\bf Example (continued): } \begin{coq_example} Check (p r1 (z:=c)). Check (p (x:=a) (y:=b) r1 (z:=c) r2). \end{coq_example} \subsection{Renaming implicit arguments \comindex{Arguments} } Implicit arguments names can be redefined using the following syntax: \begin{quote} {\tt Arguments {\qualid} \nelist{\name}{} : rename} \end{quote} Without the {\tt rename} flag, {\tt Arguments} can be used to assert that a given constant has the expected number of arguments and that these arguments are named as expected. \noindent {\bf Example (continued): } \begin{coq_example} Arguments p [s t] _ [u] _: rename. Check (p r1 (u:=c)). Check (p (s:=a) (t:=b) r1 (u:=c) r2). Fail Arguments p [s t] _ [w] _. \end{coq_example} \subsection{Deprecated Implicit Arguments command \comindex{Implicit Arguments}} Before version 8.4 the command to specify the implicit arguments of a constant was {\tt Implicit Arguments} followed by a bracketed list of identifiers. Starting with Coq 8.4 the {\tt Argument} command must be used (see \ref{ImplicitArguments}). \begin{coq_example} Require Import List. Implicit Arguments map [A B]. Implicit Arguments length [[A]]. \end{coq_example} The first example declares the arguments of {\tt map} named {\tt A} and {\tt B} implicit. The second one declares the argument named {\tt A} implicit and maximally inserted. \subsection{Displaying what the implicit arguments are \comindex{Print Implicit} \label{PrintImplicit}} To display the implicit arguments associated to an object, and to know if each of them is to be used maximally or not, use the command \begin{quote} \tt Print Implicit {\qualid}. \end{quote} \subsection{Explicit displaying of implicit arguments for pretty-printing \comindex{Set Printing Implicit} \comindex{Unset Printing Implicit} \comindex{Set Printing Implicit Defensive} \comindex{Unset Printing Implicit Defensive}} By default the basic pretty-printing rules hide the inferable implicit arguments of an application. To force printing all implicit arguments, use command \begin{quote} {\tt Set Printing Implicit.} \end{quote} Conversely, to restore the hiding of implicit arguments, use command \begin{quote} {\tt Unset Printing Implicit.} \end{quote} By default the basic pretty-printing rules display the implicit arguments that are not detected as strict implicit arguments. This ``defensive'' mode can quickly make the display cumbersome so this can be deactivated by using the command \begin{quote} {\tt Unset Printing Implicit Defensive.} \end{quote} Conversely, to force the display of non strict arguments, use command \begin{quote} {\tt Set Printing Implicit Defensive.} \end{quote} \SeeAlso {\tt Set Printing All} in Section~\ref{SetPrintingAll}. \subsection{Interaction with subtyping} When an implicit argument can be inferred from the type of more than one of the other arguments, then only the type of the first of these arguments is taken into account, and not an upper type of all of them. As a consequence, the inference of the implicit argument of ``='' fails in \begin{coq_example*} Check nat = Prop. \end{coq_example*} but succeeds in \begin{coq_example*} Check Prop = nat. \end{coq_example*} \subsection{Deactivation of implicit arguments for parsing} \comindex{Set Parsing Explicit} \comindex{Unset Parsing Explicit} Use of implicit arguments can be deactivated by issuing the command: \begin{quote} {\tt Set Parsing Explicit.} \end{quote} In this case, all arguments of constants, inductive types, constructors, etc, including the arguments declared as implicit, have to be given as if none arguments were implicit. By symmetry, this also affects printing. To restore parsing and normal printing of implicit arguments, use: \begin{quote} {\tt Set Parsing Explicit.} \end{quote} \subsection{Canonical structures \comindex{Canonical Structure}} A canonical structure is an instance of a record/structure type that can be used to solve equations involving implicit arguments. Assume that {\qualid} denotes an object $(Build\_struc~ c_1~ \ldots~ c_n)$ in the structure {\em struct} of which the fields are $x_1$, ..., $x_n$. Assume that {\qualid} is declared as a canonical structure using the command \begin{quote} {\tt Canonical Structure {\qualid}.} \end{quote} Then, each time an equation of the form $(x_i~ \_)=_{\beta\delta\iota\zeta}c_i$ has to be solved during the type-checking process, {\qualid} is used as a solution. Otherwise said, {\qualid} is canonically used to extend the field $c_i$ into a complete structure built on $c_i$. Canonical structures are particularly useful when mixed with coercions and strict implicit arguments. Here is an example. \begin{coq_example*} Require Import Relations. Require Import EqNat. Set Implicit Arguments. Unset Strict Implicit. Structure Setoid : Type := {Carrier :> Set; Equal : relation Carrier; Prf_equiv : equivalence Carrier Equal}. Definition is_law (A B:Setoid) (f:A -> B) := forall x y:A, Equal x y -> Equal (f x) (f y). Axiom eq_nat_equiv : equivalence nat eq_nat. Definition nat_setoid : Setoid := Build_Setoid eq_nat_equiv. Canonical Structure nat_setoid. \end{coq_example*} Thanks to \texttt{nat\_setoid} declared as canonical, the implicit arguments {\tt A} and {\tt B} can be synthesized in the next statement. \begin{coq_example} Lemma is_law_S : is_law S. \end{coq_example} \Rem If a same field occurs in several canonical structure, then only the structure declared first as canonical is considered. \begin{Variants} \item {\tt Canonical Structure {\ident} := {\term} : {\type}.}\\ {\tt Canonical Structure {\ident} := {\term}.}\\ {\tt Canonical Structure {\ident} : {\type} := {\term}.} These are equivalent to a regular definition of {\ident} followed by the declaration {\tt Canonical Structure {\ident}}. \end{Variants} \SeeAlso more examples in user contribution \texttt{category} (\texttt{Rocq/ALGEBRA}). \subsubsection{Print Canonical Projections. \comindex{Print Canonical Projections}} This displays the list of global names that are components of some canonical structure. For each of them, the canonical structure of which it is a projection is indicated. For instance, the above example gives the following output: \begin{coq_example} Print Canonical Projections. \end{coq_example} \subsection{Implicit types of variables} \comindex{Implicit Types} It is possible to bind variable names to a given type (e.g. in a development using arithmetic, it may be convenient to bind the names {\tt n} or {\tt m} to the type {\tt nat} of natural numbers). The command for that is \begin{quote} \tt Implicit Types \nelist{\ident}{} : {\type} \end{quote} The effect of the command is to automatically set the type of bound variables starting with {\ident} (either {\ident} itself or {\ident} followed by one or more single quotes, underscore or digits) to be {\type} (unless the bound variable is already declared with an explicit type in which case, this latter type is considered). \Example \begin{coq_example} Require Import List. Implicit Types m n : nat. Lemma cons_inj_nat : forall m n l, n :: l = m :: l -> n = m. intros m n. Lemma cons_inj_bool : forall (m n:bool) l, n :: l = m :: l -> n = m. \end{coq_example} \begin{Variants} \item {\tt Implicit Type {\ident} : {\type}}\\ This is useful for declaring the implicit type of a single variable. \item {\tt Implicit Types\,% (\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% \ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% {\term$_n$} {\tt )}.}\\ Adds $n$ blocks of implicit types with different specifications. \end{Variants} \subsection{Implicit generalization \label{implicit-generalization} \comindex{Generalizable Variables}} Implicit generalization is an automatic elaboration of a statement with free variables into a closed statement where these variables are quantified explicitly. Implicit generalization is done inside binders starting with a \verb|`| and terms delimited by \verb|`{ }| and \verb|`( )|, always introducing maximally inserted implicit arguments for the generalized variables. Inside implicit generalization delimiters, free variables in the current context are automatically quantified using a product or a lambda abstraction to generate a closed term. In the following statement for example, the variables \texttt{n} and \texttt{m} are autamatically generalized and become explicit arguments of the lemma as we are using \verb|`( )|: \begin{coq_example} Generalizable All Variables. Lemma nat_comm : `(n = n + 0). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} One can control the set of generalizable identifiers with the \texttt{Generalizable} vernacular command to avoid unexpected generalizations when mistyping identifiers. There are three variants of the command: \begin{quote} {\tt Generalizable (All|No) Variable(s)? ({\ident$_1$ \ident$_n$})?.} \end{quote} \begin{Variants} \item {\tt Generalizable All Variables.} All variables are candidate for generalization if they appear free in the context under a generalization delimiter. This may result in confusing errors in case of typos. In such cases, the context will probably contain some unexpected generalized variable. \item {\tt Generalizable No Variables.} Disable implicit generalization entirely. This is the default behavior. \item {\tt Generalizable Variable(s)? {\ident$_1$ \ident$_n$}.} Allow generalization of the given identifiers only. Calling this command multiple times adds to the allowed identifiers. \item {\tt Global Generalizable} Allows to export the choice of generalizable variables. \end{Variants} One can also use implicit generalization for binders, in which case the generalized variables are added as binders and set maximally implicit. \begin{coq_example*} Definition id `(x : A) : A := x. \end{coq_example*} \begin{coq_example} Print id. \end{coq_example} The generalizing binders \verb|`{ }| and \verb|`( )| work similarly to their explicit counterparts, only binding the generalized variables implicitly, as maximally-inserted arguments. In these binders, the binding name for the bound object is optional, whereas the type is mandatory, dually to regular binders. \section{Coercions \label{Coercions} \index{Coercions}} Coercions can be used to implicitly inject terms from one {\em class} in which they reside into another one. A {\em class} is either a sort (denoted by the keyword {\tt Sortclass}), a product type (denoted by the keyword {\tt Funclass}), or a type constructor (denoted by its name), e.g. an inductive type or any constant with a type of the form \texttt{forall} $(x_1:A_1) .. (x_n:A_n),~s$ where $s$ is a sort. Then the user is able to apply an object that is not a function, but can be coerced to a function, and more generally to consider that a term of type A is of type B provided that there is a declared coercion between A and B. The main command is \comindex{Coercion} \begin{quote} \tt Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}. \end{quote} which declares the construction denoted by {\qualid} as a coercion between {\class$_1$} and {\class$_2$}. More details and examples, and a description of the commands related to coercions are provided in Chapter~\ref{Coercions-full}. \section[Printing constructions in full]{Printing constructions in full\label{SetPrintingAll} \comindex{Set Printing All} \comindex{Unset Printing All}} Coercions, implicit arguments, the type of pattern-matching, but also notations (see Chapter~\ref{Addoc-syntax}) can obfuscate the behavior of some tactics (typically the tactics applying to occurrences of subterms are sensitive to the implicit arguments). The command \begin{quote} {\tt Set Printing All.} \end{quote} deactivates all high-level printing features such as coercions, implicit arguments, returned type of pattern-matching, notations and various syntactic sugar for pattern-matching or record projections. Otherwise said, {\tt Set Printing All} includes the effects of the commands {\tt Set Printing Implicit}, {\tt Set Printing Coercions}, {\tt Set Printing Synth}, {\tt Unset Printing Projections} and {\tt Unset Printing Notations}. To reactivate the high-level printing features, use the command \begin{quote} {\tt Unset Printing All.} \end{quote} \section[Printing universes]{Printing universes\label{PrintingUniverses} \comindex{Set Printing Universes} \comindex{Unset Printing Universes}} The following command: \begin{quote} {\tt Set Printing Universes} \end{quote} activates the display of the actual level of each occurrence of {\Type}. See Section~\ref{Sorts} for details. This wizard option, in combination with \texttt{Set Printing All} (see section~\ref{SetPrintingAll}) can help to diagnose failures to unify terms apparently identical but internally different in the Calculus of Inductive Constructions. To reactivate the display of the actual level of the occurrences of {\Type}, use \begin{quote} {\tt Unset Printing Universes.} \end{quote} \comindex{Print Universes} \comindex{Print Sorted Universes} The constraints on the internal level of the occurrences of {\Type} (see Section~\ref{Sorts}) can be printed using the command \begin{quote} {\tt Print \zeroone{Sorted} Universes.} \end{quote} If the optional {\tt Sorted} option is given, each universe will be made equivalent to a numbered label reflecting its level (with a linear ordering) in the universe hierarchy. This command also accepts an optional output filename: \begin{quote} \tt Print \zeroone{Sorted} Universes {\str}. \end{quote} If {\str} ends in \texttt{.dot} or \texttt{.gv}, the constraints are printed in the DOT language, and can be processed by Graphviz tools. The format is unspecified if {\str} doesn't end in \texttt{.dot} or \texttt{.gv}. \section[Existential variables]{Existential variables\label{ExistentialVariables}} Coq terms can include existential variables. An existential variable is a placeholder intended to eventually be replaced by an actual subterm though which subterm it will be replaced by is still unknown. Existential variables are generated in place of unsolvable implicit arguments when using commands such as \texttt{Check} (see Section~\ref{Check}) or in place of unsolvable instances when using tactics such as \texttt{eapply} (see Section~\ref{eapply}). They can only appear as the result of a command displaying a term and they are represented by ``?'' followed by a number. They cannot be entered by the user (though they can be generated from ``\_'' when the corresponding implicit argument is unsolvable). A given existential variable name can occur several times in a term meaning the corresponding expected instance is shared. Each existential variable is relative to a context, as shown by {\tt Show Existential} when in the process of proving a goal (see Section~\ref{ShowExistentials}). Henceforth, each occurrence of an existential variable in a term is subject to an instance of the variables of its context of definition which is specific to this occurrence. \subsection{Explicit displaying of existential instances for pretty-printing \comindex{Set Printing Existential Instances} \comindex{Unset Printing Existential Instances}} The command: \begin{quote} {\tt Set Printing Existential Instances} \end{quote} activates the display of how the context of an existential variable is instantiated on each of its occurrences. To deactivate the display of the instances of existential variables, use \begin{quote} {\tt Unset Printing Existential Instances.} \end{quote} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-pro.tex0000640000175000001440000004030012111446715016333 0ustar notinusers\chapter[Proof handling]{Proof handling\index{Proof editing} \label{Proof-handling}} In \Coq's proof editing mode all top-level commands documented in Chapter~\ref{Vernacular-commands} remain available and the user has access to specialized commands dealing with proof development pragmas documented in this section. He can also use some other specialized commands called {\em tactics}. They are the very tools allowing the user to deal with logical reasoning. They are documented in Chapter~\ref{Tactics}.\\ When switching in editing proof mode, the prompt \index{Prompt} {\tt Coq <} is changed into {\tt {\ident} <} where {\ident} is the declared name of the theorem currently edited. At each stage of a proof development, one has a list of goals to prove. Initially, the list consists only in the theorem itself. After having applied some tactics, the list of goals contains the subgoals generated by the tactics. To each subgoal is associated a number of hypotheses called the {\em \index*{local context}} of the goal. Initially, the local context contains the local variables and hypotheses of the current section (see Section~\ref{Variable}) and the local variables and hypotheses of the theorem statement. It is enriched by the use of certain tactics (see e.g. {\tt intro} in Section~\ref{intro}). When a proof is completed, the message {\tt Proof completed} is displayed. One can then register this proof as a defined constant in the environment. Because there exists a correspondence between proofs and terms of $\lambda$-calculus, known as the {\em Curry-Howard isomorphism} \cite{How80,Bar91,Gir89,Hue89}, \Coq~ stores proofs as terms of {\sc Cic}. Those terms are called {\em proof terms}\index{Proof term}. \ErrMsg When one attempts to use a proof editing command out of the proof editing mode, \Coq~ raises the error message : \errindex{No focused proof}. \section{Switching on/off the proof editing mode} The proof editing mode is entered by asserting a statement, which typically is the assertion of a theorem: \begin{quote} {\tt Theorem {\ident} \zeroone{\binders} : {\form}.\comindex{Theorem} \label{Theorem}} \end{quote} The list of assertion commands is given in Section~\ref{Assertions}. The command {\tt Goal} can also be used. \subsection[Goal {\form}.]{\tt Goal {\form}.\comindex{Goal}\label{Goal}} This is intended for quick assertion of statements, without knowing in advance which name to give to the assertion, typically for quick testing of the provability of a statement. If the proof of the statement is eventually completed and validated, the statement is then bound to the name {\tt Unnamed\_thm} (or a variant of this name not already used for another statement). \subsection[\tt Qed.]{\tt Qed.\comindex{Qed}\label{Qed}} This command is available in interactive editing proof mode when the proof is completed. Then {\tt Qed} extracts a proof term from the proof script, switches back to {\Coq} top-level and attaches the extracted proof term to the declared name of the original goal. This name is added to the environment as an {\tt Opaque} constant. \begin{ErrMsgs} \item \errindex{Attempt to save an incomplete proof} %\item \ident\ \errindex{already exists}\\ % The implicit name is already defined. You have then to provide % explicitly a new name (see variant 3 below). \item Sometimes an error occurs when building the proof term, because tactics do not enforce completely the term construction constraints. The user should also be aware of the fact that since the proof term is completely rechecked at this point, one may have to wait a while when the proof is large. In some exceptional cases one may even incur a memory overflow. \end{ErrMsgs} \begin{Variants} \item {\tt Defined.} \comindex{Defined} \label{Defined} Defines the proved term as a transparent constant. \item {\tt Save.} \comindex{Save} This is a deprecated equivalent to {\tt Qed}. \item {\tt Save {\ident}.} Forces the name of the original goal to be {\ident}. This command (and the following ones) can only be used if the original goal has been opened using the {\tt Goal} command. \item {\tt Save Theorem {\ident}.} \\ {\tt Save Lemma {\ident}.} \\ {\tt Save Remark {\ident}.}\\ {\tt Save Fact {\ident}.} {\tt Save Corollary {\ident}.} {\tt Save Proposition {\ident}.} Are equivalent to {\tt Save {\ident}.} \end{Variants} \subsection[\tt Admitted.]{\tt Admitted.\comindex{Admitted}\label{Admitted}} This command is available in interactive editing proof mode to give up the current proof and declare the initial goal as an axiom. \subsection[\tt Proof {\term}.]{\tt Proof {\term}.\comindex{Proof} \label{BeginProof}} This command applies in proof editing mode. It is equivalent to {\tt exact {\term}; Save.} That is, you have to give the full proof in one gulp, as a proof term (see Section~\ref{exact}). \variant {\tt Proof.} Is a noop which is useful to delimit the sequence of tactic commands which start a proof, after a {\tt Theorem} command. It is a good practice to use {\tt Proof.} as an opening parenthesis, closed in the script with a closing {\tt Qed.} \SeeAlso {\tt Proof with {\tac}.} in Section~\ref{ProofWith}. \subsection[\tt Proof using {\ident$_1$ \dots {\ident$_n$}}.] {\tt Proof using {\ident$_1$ \dots {\ident$_n$}}. \comindex{Proof using} \label{ProofUsing}} This command applies in proof editing mode. It declares the set of section variables (see~\ref{Variable}) used by the proof. At {\tt Qed} time, the system will assert that the set of section variables actually used in the proof is a subset of the declared one. The set of declared variables is closed under type dependency. For example if {\tt T} is variable and {\tt a} is a variable of type {\tt T}, the commands {\tt Proof using a} and {\tt Proof using T a} are actually equivalent. \variant {\tt Proof using {\ident$_1$ \dots {\ident$_n$}} with {\tac}.} in Section~\ref{ProofWith}. \subsection[\tt Abort.]{\tt Abort.\comindex{Abort}} This command cancels the current proof development, switching back to the previous proof development, or to the \Coq\ toplevel if no other proof was edited. \begin{ErrMsgs} \item \errindex{No focused proof (No proof-editing in progress)} \end{ErrMsgs} \begin{Variants} \item {\tt Abort {\ident}.} Aborts the editing of the proof named {\ident}. \item {\tt Abort All.} Aborts all current goals, switching back to the \Coq\ toplevel. \end{Variants} %%%% \subsection[\tt Existential {\num} := {\term}.]{\tt Existential {\num} := {\term}.\comindex{Existential} \label{Existential}} This command allows to instantiate an existential variable. {\tt \num} is an index in the list of uninstantiated existential variables displayed by {\tt Show Existentials.} (described in Section~\ref{Show}) This command is intended to be used to instantiate existential variables when the proof is completed but some uninstantiated existential variables remain. To instantiate existential variables during proof edition, you should use the tactic {\tt instantiate}. \SeeAlso {\tt instantiate (\num:= \term).} in Section~\ref{instantiate}. \SeeAlso {\tt Grab Existential Variables.} below. \subsection[\tt Grab Existential Variables.]{\tt Grab Existential Variables.\comindex{Grab Existential Variables} \label{GrabEvars}} This command can be run when a proof has no more goal to be solved but has remaining uninstantiated existential variables. It takes every uninstantiated existential variable and turns it into a goal. %%%%%%%% \section{Navigation in the proof tree} %%%%%%%% \subsection[\tt Undo.]{\tt Undo.\comindex{Undo}} This command cancels the effect of the last tactic command. Thus, it backtracks one step. \begin{ErrMsgs} \item \errindex{No focused proof (No proof-editing in progress)} \end{ErrMsgs} \begin{Variants} \item {\tt Undo {\num}.} Repeats {\tt Undo} {\num} times. \end{Variants} \subsection[\tt Restart.]{\tt Restart.\comindex{Restart}} This command restores the proof editing process to the original goal. \begin{ErrMsgs} \item \errindex{No focused proof to restart} \end{ErrMsgs} \subsection[\tt Focus.]{\tt Focus.\comindex{Focus}} This focuses the attention on the first subgoal to prove and the printing of the other subgoals is suspended until the focused subgoal is solved or unfocused. This is useful when there are many current subgoals which clutter your screen. \begin{Variant} \item {\tt Focus {\num}.}\\ This focuses the attention on the $\num^{th}$ subgoal to prove. \end{Variant} \subsection[\tt Unfocus.]{\tt Unfocus.\comindex{Unfocus}} This command restores to focus the goal that were suspended by the last {\tt Focus} command. \subsection[\tt Unfocused.]{\tt Unfocused.\comindex{Unfocused}} Succeeds in the proof is fully unfocused, fails is there are some goals out of focus. \subsection[\tt \{ \textrm{and} \}]{\tt \{ \textrm{and} \}\comindex{\{}\comindex{\}}} The command {\tt \{} (without a terminating period) focuses on the first goal, much like {\tt Focus.} does, however, the subproof can only be unfocused when it has been fully solved (\emph{i.e.} when there is no focused goal left). Unfocusing is then handled by {\tt \}} (again, without a terminating period). See also example in next section. \begin{ErrMsgs} \item \errindex{Error: This proof is focused, but cannot be unfocused this way} You are trying to use a bullet that is already in use or a {\tt \}} but the current subproof has not been fully solved. \end{ErrMsgs} \subsection[Bullets]{Bullets\comindex{+ (command)}\comindex{- (command)}\comindex{* (command)}\index{Bullets}} Alternatively to {\tt \{} and {\tt \}}, proofs can be structured with bullets. The use of a bullet $b$ for the first time focuses on the first goal $g$, the same bullet cannot be used again until the proof of $g$ is completed, then it is mandatory to focus the next goal with $b$. The consequence is that $g$ and all goals present when $g$ was focused are focused with the same bullet $b$. See the example below. Different bullets can be used to nest levels. The scope of bullet does not go beyond enclosing {\tt \{} and {\tt \}}, so bullets can be reused as further nesting levels provided they are delimited by these. Available bullets are {\tt -}, {\tt +} and {\tt *} (without a terminating period). The following example script illustrates all these features: \begin{coq_example*} Goal (((True/\True)/\True)/\True)/\True. Proof. split. - split. + split. * { split. - trivial. - trivial. } * trivial. + trivial. - assert True. { trivial. } assumption. \end{coq_example*} Remark: In ProofGeneral (emacs interface to coq), you must use bullets with this priority ordering to have a correct indentation: {\tt -}, {\tt +}, {\tt *}. That is {\tt -} must be the outer bullet and {\tt *} the inner one, like in the example above. \begin{ErrMsgs} \item \errindex{Error: No such unproven subgoal} there is no proof under focus (because it has just been solved), so the command you are trying to use cannot be applied. You need to first focus the next proof by using the bullet corresponding to the right level (using an incorrect bullet also generates this message). \item \errindex{Error: This proof is focused, but cannot be unfocused this way} You are trying to use a bullet that is already in use or a {\tt \}} but the current subproof has not been fully solved. \end{ErrMsgs} \section{Requesting information} \subsection[\tt Show.]{\tt Show.\comindex{Show}\label{Show}} This command displays the current goals. \begin{Variants} \item {\tt Show {\num}.}\\ Displays only the {\num}-th subgoal.\\ \begin{ErrMsgs} \item \errindex{No such goal} \item \errindex{No focused proof} \end{ErrMsgs} \item {\tt Show Implicits.}\comindex{Show Implicits}\\ Displays the current goals, printing the implicit arguments of constants. \item {\tt Show Implicits {\num}.}\\ Same as above, only displaying the {\num}-th subgoal. \item {\tt Show Script.}\comindex{Show Script}\\ Displays the whole list of tactics applied from the beginning of the current proof. This tactics script may contain some holes (subgoals not yet proved). They are printed under the form \verb!!. \item {\tt Show Tree.}\comindex{Show Tree}\\ This command can be seen as a more structured way of displaying the state of the proof than that provided by {\tt Show Script}. Instead of just giving the list of tactics that have been applied, it shows the derivation tree constructed by then. Each node of the tree contains the conclusion of the corresponding sub-derivation (i.e. a goal with its corresponding local context) and the tactic that has generated all the sub-derivations. The leaves of this tree are the goals which still remain to be proved. %\item {\tt Show Node}\comindex{Show Node}\\ % Not yet documented \item {\tt Show Proof.}\comindex{Show Proof}\\ It displays the proof term generated by the tactics that have been applied. If the proof is not completed, this term contain holes, which correspond to the sub-terms which are still to be constructed. These holes appear as a question mark indexed by an integer, and applied to the list of variables in the context, since it may depend on them. The types obtained by abstracting away the context from the type of each hole-placer are also printed. \item {\tt Show Conjectures.}\comindex{Show Conjectures}\\ It prints the list of the names of all the theorems that are currently being proved. As it is possible to start proving a previous lemma during the proof of a theorem, this list may contain several names. \item{\tt Show Intro.}\comindex{Show Intro}\\ If the current goal begins by at least one product, this command prints the name of the first product, as it would be generated by an anonymous {\tt Intro}. The aim of this command is to ease the writing of more robust scripts. For example, with an appropriate Proof General macro, it is possible to transform any anonymous {\tt Intro} into a qualified one such as {\tt Intro y13}. In the case of a non-product goal, it prints nothing. \item{\tt Show Intros.}\comindex{Show Intros}\\ This command is similar to the previous one, it simulates the naming process of an {\tt Intros}. \item{\tt Show Existentials\label{ShowExistentials}}\comindex{Show Existentials} \\ It displays the set of all uninstantiated existential variables in the current proof tree, along with the type and the context of each variable. \end{Variants} \subsection[\tt Guarded.]{\tt Guarded.\comindex{Guarded}\label{Guarded}} Some tactics (e.g. refine \ref{refine}) allow to build proofs using fixpoint or co-fixpoint constructions. Due to the incremental nature of interactive proof construction, the check of the termination (or guardedness) of the recursive calls in the fixpoint or cofixpoint constructions is postponed to the time of the completion of the proof. The command \verb!Guarded! allows to verify if the guard condition for fixpoint and cofixpoint is violated at some time of the construction of the proof without having to wait the completion of the proof." \section{Controlling the effect of proof editing commands} \subsection[\tt Set Hyps Limit {\num}.]{\tt Set Hyps Limit {\num}.\comindex{Set Hyps Limit}} This command sets the maximum number of hypotheses displayed in goals after the application of a tactic. All the hypotheses remains usable in the proof development. \subsection[\tt Unset Hyps Limit.]{\tt Unset Hyps Limit.\comindex{Unset Hyps Limit}} This command goes back to the default mode which is to print all available hypotheses. \subsection[\tt Set Automatic Introduction.]{\tt Set Automatic Introduction.\comindex{Set Automatic Introduction}\comindex{Unset Automatic Introduction}\label{Set Automatic Introduction}} The option {\tt Automatic Introduction} controls the way binders are handled in assertion commands such as {\tt Theorem {\ident} \zeroone{\binders} : {\form}}. When the option is set, which is the default, {\binders} are automatically put in the local context of the goal to prove. The option can be unset by issuing {\tt Unset Automatic Introduction}. When the option is unset, {\binders} are discharged on the statement to be proved and a tactic such as {\tt intro} (see Section~\ref{intro}) has to be used to move the assumptions to the local context. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/hevea.sty0000640000175000001440000000517510406335323015504 0ustar notinusers% hevea : hevea.sty % This is a very basic style file for latex document to be processed % with hevea. It contains definitions of LaTeX environment which are % processed in a special way by the translator. % Mostly : % - latexonly, not processed by hevea, processed by latex. % - htmlonly , the reverse. % - rawhtml, to include raw HTML in hevea output. % - toimage, to send text to the image file. % The package also provides hevea logos, html related commands (ahref % etc.), void cutting and image commands. \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{hevea}[2002/01/11] \RequirePackage{comment} \newif\ifhevea\heveafalse \@ifundefined{ifimagen}{\newif\ifimagen\imagenfalse} \makeatletter% \newcommand{\heveasmup}[2]{% \raise #1\hbox{$\m@th$% \csname S@\f@size\endcsname \fontsize\sf@size 0% \math@fontsfalse\selectfont #2% }}% \DeclareRobustCommand{\hevea}{H\kern-.15em\heveasmup{.2ex}{E}\kern-.15emV\kern-.15em\heveasmup{.2ex}{E}\kern-.15emA}% \DeclareRobustCommand{\hacha}{H\kern-.15em\heveasmup{.2ex}{A}\kern-.15emC\kern-.1em\heveasmup{.2ex}{H}\kern-.15emA}% \DeclareRobustCommand{\html}{\protect\heveasmup{0.ex}{HTML}} %%%%%%%%% Hyperlinks hevea style \newcommand{\ahref}[2]{{#2}} \newcommand{\ahrefloc}[2]{{#2}} \newcommand{\aname}[2]{{#2}} \newcommand{\ahrefurl}[1]{\texttt{#1}} \newcommand{\footahref}[2]{#2\footnote{\texttt{#1}}} \newcommand{\mailto}[1]{\texttt{#1}} \newcommand{\imgsrc}[2][]{} \newcommand{\home}[1]{\protect\raisebox{-.75ex}{\char126}#1} \AtBeginDocument {\@ifundefined{url} {%url package is not loaded \let\url\ahref\let\oneurl\ahrefurl\let\footurl\footahref} {}} %% Void cutting instructions \newcounter{cuttingdepth} \newcommand{\tocnumber}{} \newcommand{\notocnumber}{} \newcommand{\cuttingunit}{} \newcommand{\cutdef}[2][]{} \newcommand{\cuthere}[2]{} \newcommand{\cutend}{} \newcommand{\htmlhead}[1]{} \newcommand{\htmlfoot}[1]{} \newcommand{\htmlprefix}[1]{} \newenvironment{cutflow}[1]{}{} \newcommand{\cutname}[1]{} \newcommand{\toplinks}[3]{} %%%% Html only \excludecomment{rawhtml} \newcommand{\rawhtmlinput}[1]{} \excludecomment{htmlonly} %%%% Latex only \newenvironment{latexonly}{}{} \newenvironment{verblatex}{}{} %%%% Image file stuff \def\toimage{\endgroup} \def\endtoimage{\begingroup\def\@currenvir{toimage}} \def\verbimage{\endgroup} \def\endverbimage{\begingroup\def\@currenvir{verbimage}} \newcommand{\imageflush}[1][]{} %%% Bgcolor definition \newsavebox{\@bgcolorbin} \newenvironment{bgcolor}[2][] {\newcommand{\@mycolor}{#2}\begin{lrbox}{\@bgcolorbin}\vbox\bgroup} {\egroup\end{lrbox}% \begin{flushleft}% \colorbox{\@mycolor}{\usebox{\@bgcolorbin}}% \end{flushleft}} %%% Postlude \makeatother coq-8.4pl2/doc/refman/RefMan-uti.tex0000640000175000001440000002256011743503527016351 0ustar notinusers\chapter[Utilities]{Utilities\label{Utilities}} The distribution provides utilities to simplify some tedious works beside proof development, tactics writing or documentation. \section[Building a toplevel extended with user tactics]{Building a toplevel extended with user tactics\label{Coqmktop}\index{Coqmktop@{\tt coqmktop}}} The native-code version of \Coq\ cannot dynamically load user tactics using Objective Caml code. It is possible to build a toplevel of \Coq, with Objective Caml code statically linked, with the tool {\tt coqmktop}. For example, one can build a native-code \Coq\ toplevel extended with a tactic which source is in {\tt tactic.ml} with the command \begin{verbatim} % coqmktop -opt -o mytop.out tactic.cmx \end{verbatim} where {\tt tactic.ml} has been compiled with the native-code compiler {\tt ocamlopt}. This command generates an executable called {\tt mytop.out}. To use this executable to compile your \Coq\ files, use {\tt coqc -image mytop.out}. A basic example is the native-code version of \Coq\ ({\tt coqtop.opt}), which can be generated by {\tt coqmktop -opt -o coqopt.opt}. \paragraph[Application: how to use the Objective Caml debugger with Coq.]{Application: how to use the Objective Caml debugger with Coq.\index{Debugger}} One useful application of \texttt{coqmktop} is to build a \Coq\ toplevel in order to debug your tactics with the Objective Caml debugger. You need to have configured and compiled \Coq\ for debugging (see the file \texttt{INSTALL} included in the distribution). Then, you must compile the Caml modules of your tactic with the option \texttt{-g} (with the bytecode compiler) and build a stand-alone bytecode toplevel with the following command: \begin{quotation} \texttt{\% coqmktop -g -o coq-debug}~\emph{} \end{quotation} To launch the \ocaml\ debugger with the image you need to execute it in an environment which correctly sets the \texttt{COQLIB} variable. Moreover, you have to indicate the directories in which \texttt{ocamldebug} should search for Caml modules. A possible solution is to use a wrapper around \texttt{ocamldebug} which detects the executables containing the word \texttt{coq}. In this case, the debugger is called with the required additional arguments. In other cases, the debugger is simply called without additional arguments. Such a wrapper can be found in the \texttt{dev/} subdirectory of the sources. \section[Modules dependencies]{Modules dependencies\label{Dependencies}\index{Dependencies} \index{Coqdep@{\tt coqdep}}} In order to compute modules dependencies (so to use {\tt make}), \Coq\ comes with an appropriate tool, {\tt coqdep}. {\tt coqdep} computes inter-module dependencies for \Coq\ and \ocaml\ programs, and prints the dependencies on the standard output in a format readable by make. When a directory is given as argument, it is recursively looked at. Dependencies of \Coq\ modules are computed by looking at {\tt Require} commands ({\tt Require}, {\tt Requi\-re Export}, {\tt Require Import}, but also at the command {\tt Declare ML Module}. Dependencies of \ocaml\ modules are computed by looking at \verb!open! commands and the dot notation {\em module.value}. However, this is done approximatively and you are advised to use {\tt ocamldep} instead for the \ocaml\ modules dependencies. See the man page of {\tt coqdep} for more details and options. \section[Creating a {\tt Makefile} for \Coq\ modules]{Creating a {\tt Makefile} for \Coq\ modules\label{Makefile} \index{Makefile@{\tt Makefile}} \index{CoqMakefile@{\tt coq\_Makefile}}} When a proof development becomes large, is split into several files or contains Ocaml plugins, it becomes crucial to use a tool like {\tt make} to compile \Coq\ modules. The writing of a generic and complete {\tt Makefile} may be a tedious work and that's why \Coq\ provides a tool to automate its creation, {\tt coq\_makefile}. You can get a description of the arguments by the command \texttt{\% coq\_makefile --help}. Arguments can be directly written on the command line interface but it is recommended to write them in a file ({\tt \_CoqProject} by default) and then call {\tt coq\_makefile -f \_CoqProject -o Makefile}. That means options are read from {\tt \_CoqProject} and written in {\tt Makefile}. This way, {\tt Makefile} will be automagically regenerated when something changes in {\tt \_CoqProject}. The first time you use this tool, you may be happy with: \begin{quotation} \texttt{\% \{ echo '-R .} {\em MyFancyLib} \texttt{' ; find -name '*.v' -print \} > \_CoqProject \&\& coq\_makefile -f \_CoqProject -o Makefile} \end{quotation} To customize things further, remember the following: \begin{itemize} \item \Coq files must end in {\tt .v}, \ocaml modules in {\tt .ml4} if they require camlp preproccessing (and in {\tt .ml} otherwise), and \ocaml module signatures in {\tt .mli}. \item Whenever a directory is passed as argument, any inner {\tt Makefile} will be recursively called. \item {\tt -R} option is for \Coq, {\tt -I} for \ocaml. The same directory can be ``included'' by both. Using {\tt -R} option gives a correct logical path and a correct installation emplacement to your coq files. \item If your files depend on an external library, never use {\tt -R \dots} to include it in the path, the {\em make clean} command would erase it! Take advantage of the \verb:COQPATH: variable (see \ref{envars}) instead if necessary. \end{itemize} Under normal circumstances, the only other variable that you may use is \verb:$COQBIN: to specify the directory where the binaries are. \section[Documenting \Coq\ files with coqdoc]{Documenting \Coq\ files with coqdoc\label{coqdoc} \index{Coqdoc@{\sf coqdoc}}} \input{./coqdoc} \section{Exporting \Coq\ theories to XML} \input{./Helm} \section[Embedded \Coq\ phrases inside \LaTeX\ documents]{Embedded \Coq\ phrases inside \LaTeX\ documents\label{Latex} \index{Coqtex@{\tt coq-tex}}\index{Latex@{\LaTeX}}} When writing a documentation about a proof development, one may want to insert \Coq\ phrases inside a \LaTeX\ document, possibly together with the corresponding answers of the system. We provide a mechanical way to process such \Coq\ phrases embedded in \LaTeX\ files: the {\tt coq-tex} filter. This filter extracts Coq phrases embedded in LaTeX files, evaluates them, and insert the outcome of the evaluation after each phrase. Starting with a file {\em file}{\tt.tex} containing \Coq\ phrases, the {\tt coq-tex} filter produces a file named {\em file}{\tt.v.tex} with the \Coq\ outcome. There are options to produce the \Coq\ parts in smaller font, italic, between horizontal rules, etc. See the man page of {\tt coq-tex} for more details. \medskip\noindent {\bf Remark.} This Reference Manual and the Tutorial have been completely produced with {\tt coq-tex}. \section[\Coq\ and \emacs]{\Coq\ and \emacs\label{Emacs}\index{Emacs}} \subsection{The \Coq\ Emacs mode} \Coq\ comes with a Major mode for \emacs, {\tt coq.el}. This mode provides syntax highlighting and also a rudimentary indentation facility in the style of the Caml \emacs\ mode. Add the following lines to your \verb!.emacs! file: \begin{verbatim} (setq auto-mode-alist (cons '("\\.v$" . coq-mode) auto-mode-alist)) (autoload 'coq-mode "coq" "Major mode for editing Coq vernacular." t) \end{verbatim} The \Coq\ major mode is triggered by visiting a file with extension {\tt .v}, or manually with the command \verb!M-x coq-mode!. It gives you the correct syntax table for the \Coq\ language, and also a rudimentary indentation facility: \begin{itemize} \item pressing {\sc Tab} at the beginning of a line indents the line like the line above; \item extra {\sc Tab}s increase the indentation level (by 2 spaces by default); \item M-{\sc Tab} decreases the indentation level. \end{itemize} An inferior mode to run \Coq\ under Emacs, by Marco Maggesi, is also included in the distribution, in file \texttt{coq-inferior.el}. Instructions to use it are contained in this file. \subsection[Proof General]{Proof General\index{Proof General}} Proof General is a generic interface for proof assistants based on Emacs. The main idea is that the \Coq\ commands you are editing are sent to a \Coq\ toplevel running behind Emacs and the answers of the system automatically inserted into other Emacs buffers. Thus you don't need to copy-paste the \Coq\ material from your files to the \Coq\ toplevel or conversely from the \Coq\ toplevel to some files. Proof General is developped and distributed independently of the system \Coq. It is freely available at \verb!proofgeneral.inf.ed.ac.uk!. \section[Module specification]{Module specification\label{gallina}\index{Gallina@{\tt gallina}}} Given a \Coq\ vernacular file, the {\tt gallina} filter extracts its specification (inductive types declarations, definitions, type of lemmas and theorems), removing the proofs parts of the file. The \Coq\ file {\em file}{\tt.v} gives birth to the specification file {\em file}{\tt.g} (where the suffix {\tt.g} stands for \gallina). See the man page of {\tt gallina} for more details and options. \section[Man pages]{Man pages\label{ManPages}\index{Man pages}} There are man pages for the commands {\tt coqdep}, {\tt gallina} and {\tt coq-tex}. Man pages are installed at installation time (see installation instructions in file {\tt INSTALL}, step 6). %BEGIN LATEX \RefManCutCommand{ENDREFMAN=\thepage} %END LATEX %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: coq-8.4pl2/doc/refman/RefMan-modr.tex0000640000175000001440000003467411366307247016524 0ustar notinusers\chapter[The Module System]{The Module System\label{chapter:Modules}} The module system extends the Calculus of Inductive Constructions providing a convenient way to structure large developments as well as a mean of massive abstraction. %It is described in details in Judicael's thesis and Jacek's thesis \section{Modules and module types} \paragraph{Access path.} It is denoted by $p$, it can be either a module variable $X$ or, if $p'$ is an access path and $id$ an identifier, then $p'.id$ is an access path. \paragraph{Structure element.} It is denoted by \elem\ and is either a definition of a constant, an assumption, a definition of an inductive, a definition of a module, an alias of module or a module type abbreviation. \paragraph{Structure expression.} It is denoted by $S$ and can be: \begin{itemize} \item an access path $p$ \item a plain structure $\struct{\nelist{\elem}{;}}$ \item a functor $\functor{X}{S}{S'}$, where $X$ is a module variable, $S$ and $S'$ are structure expression \item an application $S\,p$, where $S$ is a structure expression and $p$ an access path \item a refined structure $\with{S}{p}{p'}$ or $\with{S}{p}{t:T}$ where $S$ is a structure expression, $p$ and $p'$ are access paths, $t$ is a term and $T$ is the type of $t$. \end{itemize} \paragraph{Module definition,} is written $\Mod{X}{S}{S'}$ and consists of a module variable $X$, a module type $S$ which can be any structure expression and optionally a module implementation $S'$ which can be any structure expression except a refined structure. \paragraph{Module alias,} is written $\ModA{X}{p}$ and consists of a module variable $X$ and a module path $p$. \paragraph{Module type abbreviation,} is written $\ModType{Y}{S}$, where $Y$ is an identifier and $S$ is any structure expression . \section{Typing Modules} In order to introduce the typing system we first slightly extend the syntactic class of terms and environments given in section~\ref{Terms}. The environments, apart from definitions of constants and inductive types now also hold any other structure elements. Terms, apart from variables, constants and complex terms, include also access paths. We also need additional typing judgments: \begin{itemize} \item \WFT{E}{S}, denoting that a structure $S$ is well-formed, \item \WTM{E}{p}{S}, denoting that the module pointed by $p$ has type $S$ in environment $E$. \item \WEV{E}{S}{\overline{S}}, denoting that a structure $S$ is evaluated to a structure $\overline{S}$ in weak head normal form. \item \WS{E}{S_1}{S_2}, denoting that a structure $S_1$ is a subtype of a structure $S_2$. \item \WS{E}{\elem_1}{\elem_2}, denoting that a structure element $\elem_1$ is more precise that a structure element $\elem_2$. \end{itemize} The rules for forming structures are the following: \begin{description} \item[WF-STR] \inference{% \frac{ \WF{E;E'}{} }{%%%%%%%%%%%%%%%%%%%%% \WFT{E}{\struct{E'}} } } \item[WF-FUN] \inference{% \frac{ \WFT{E;\ModS{X}{S}}{\overline{S'}} }{%%%%%%%%%%%%%%%%%%%%%%%%%% \WFT{E}{\functor{X}{S}{S'}} } } \end{description} Evaluation of structures to weak head normal form: \begin{description} \item[WEVAL-APP] \inference{% \frac{ \begin{array}{c} \WEV{E}{S}{\functor{X}{S_1}{S_2}}~~~~~\WEV{E}{S_1}{\overline{S_1}}\\ \WTM{E}{p}{S_3}\qquad \WS{E}{S_3}{\overline{S_1}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{S\,p}{S_2\{p/X,t_1/p_1.c_1,\ldots,t_n/p_n.c_n\}} } } \end{description} In the last rule, $\{t_1/p_1.c_1,\ldots,t_n/p_n.c_n\}$ is the resulting substitution from the inlining mechanism. We substitute in $S$ the inlined fields $p_i.c_i$ form $\ModS{X}{S_1}$ by the corresponding delta-reduced term $t_i$ in $p$. \begin{description} \item[WEVAL-WITH-MOD] \inference{% \frac{ \begin{array}{c} \WEV{E}{S}{\structe{\ModS{X}{S_1}}}~~~~~\WEV{E;\elem_1;\ldots;\elem_i}{S_1}{\overline{S_1}}\\ \WTM{E}{p}{S_2}\qquad \WS{E;\elem_1;\ldots;\elem_i}{S_2}{\overline{S_1}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{array}{c} \WEVT{E}{\with{S}{x}{p}}{\structes{\ModA{X}{p}}{p/X}} \end{array} } } \item[WEVAL-WITH-MOD-REC] \inference{% \frac{ \begin{array}{c} \WEV{E}{S}{\structe{\ModS{X_1}{S_1}}}\\ \WEV{E;\elem_1;\ldots;\elem_i}{\with{S_1}{p}{p_1}}{\overline{S_2}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{array}{c} \WEVT{E}{\with{S}{X_1.p}{p_1}}{\structes{\ModS{X}{\overline{S_2}}}{p_1/X_1.p}} \end{array} } } \item[WEVAL-WITH-DEF] \inference{% \frac{ \begin{array}{c} \WEV{E}{S}{\structe{\Assum{}{c}{T_1}}}\\ \WS{E;\elem_1;\ldots;\elem_i}{\Def{}{c}{t}{T}}{\Assum{}{c}{T_1}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{array}{c} \WEVT{E}{\with{S}{c}{t:T}}{\structe{\Def{}{c}{t}{T}}} \end{array} } } \item[WEVAL-WITH-DEF-REC] \inference{% \frac{ \begin{array}{c} \WEV{E}{S}{\structe{\ModS{X_1}{S_1}}}\\ \WEV{E;\elem_1;\ldots;\elem_i}{\with{S_1}{p}{p_1}}{\overline{S_2}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{array}{c} \WEVT{E}{\with{S}{X_1.p}{t:T}}{\structe{\ModS{X}{\overline{S_2}}}} \end{array} } } \item[WEVAL-PATH-MOD] \inference{% \frac{ \begin{array}{c} \WEV{E}{p}{\structe{ \Mod{X}{S}{S_1}}}\\ \WEV{E;\elem_1;\ldots;\elem_i}{S}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{p.X}{\overline{S}} } } \inference{% \frac{ \begin{array}{c} \WF{E}{}~~~~~~\Mod{X}{S}{S_1}\in E\\ \WEV{E}{S}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{X}{\overline{S}} } } \item[WEVAL-PATH-ALIAS] \inference{% \frac{ \begin{array}{c} \WEV{E}{p}{\structe{\ModA{X}{p_1}}}\\ \WEV{E;\elem_1;\ldots;\elem_i}{p_1}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{p.X}{\overline{S}} } } \inference{% \frac{ \begin{array}{c} \WF{E}{}~~~~~~~\ModA{X}{p_1}\in E\\ \WEV{E}{p_1}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{X}{\overline{S}} } } \item[WEVAL-PATH-TYPE] \inference{% \frac{ \begin{array}{c} \WEV{E}{p}{\structe{\ModType{Y}{S}}}\\ \WEV{E;\elem_1;\ldots;\elem_i}{S}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{p.Y}{\overline{S}} } } \item[WEVAL-PATH-TYPE] \inference{% \frac{ \begin{array}{c} \WF{E}{}~~~~~~~\ModType{Y}{S}\in E\\ \WEV{E}{S}{\overline{S}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WEV{E}{Y}{\overline{S}} } } \end{description} Rules for typing module: \begin{description} \item[MT-EVAL] \inference{% \frac{ \WEV{E}{p}{\overline{S}} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WTM{E}{p}{\overline{S}} } } \item[MT-STR] \inference{% \frac{ \WTM{E}{p}{S} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WTM{E}{p}{S/p} } } \end{description} The last rule, called strengthening is used to make all module fields manifestly equal to themselves. The notation $S/p$ has the following meaning: \begin{itemize} \item if $S\lra\struct{\elem_1;\dots;\elem_n}$ then $S/p=\struct{\elem_1/p;\dots;\elem_n/p}$ where $\elem/p$ is defined as follows: \begin{itemize} \item $\Def{}{c}{t}{T}/p\footnote{Opaque definitions are processed as assumptions.} ~=~ \Def{}{c}{t}{T}$ \item $\Assum{}{c}{U}/p ~=~ \Def{}{c}{p.c}{U}$ \item $\ModS{X}{S}/p ~=~ \ModA{X}{p.X}$ \item $\ModA{X}{p'}/p ~=~ \ModA{X}{p'}$ \item $\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}/p ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ \item $\Indpstr{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}{p} ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}$ \end{itemize} \item if $S\lra\functor{X}{S'}{S''}$ then $S/p=S$ \end{itemize} The notation $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ denotes an inductive definition that is definitionally equal to the inductive definition in the module denoted by the path $p$. All rules which have $\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}$ as premises are also valid for $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$. We give the formation rule for $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ below as well as the equality rules on inductive types and constructors. \\ The module subtyping rules: \begin{description} \item[MSUB-STR] \inference{% \frac{ \begin{array}{c} \WS{E;\elem_1;\dots;\elem_n}{\elem_{\sigma(i)}}{\elem'_i} \textrm{ \ for } i=1..m \\ \sigma : \{1\dots m\} \ra \{1\dots n\} \textrm{ \ injective} \end{array} }{ \WS{E}{\struct{\elem_1;\dots;\elem_n}}{\struct{\elem'_1;\dots;\elem'_m}} } } \item[MSUB-FUN] \inference{% T_1 -> T_2 <: T_1' -> T_2' \frac{ \WS{E}{\overline{S_1'}}{\overline{S_1}}~~~~~~~~~~\WS{E;\ModS{X}{S_1'}}{\overline{S_2}}{\overline{S_2'}} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WS{E}{\functor{X}{S_1}{S_2}}{\functor{X}{S_1'}{S_2'}} } } % these are derived rules % \item[MSUB-EQ] % \inference{% % \frac{ % \WS{E}{T_1}{T_2}~~~~~~~~~~\WTERED{}{T_1}{=}{T_1'}~~~~~~~~~~\WTERED{}{T_2}{=}{T_2'} % }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \WS{E}{T_1'}{T_2'} % } % } % \item[MSUB-REFL] % \inference{% % \frac{ % \WFT{E}{T} % }{ % \WS{E}{T}{T} % } % } \end{description} Structure element subtyping rules: \begin{description} \item[ASSUM-ASSUM] \inference{% \frac{ \WTELECONV{}{T_1}{T_2} }{ \WSE{\Assum{}{c}{T_1}}{\Assum{}{c}{T_2}} } } \item[DEF-ASSUM] \inference{% \frac{ \WTELECONV{}{T_1}{T_2} }{ \WSE{\Def{}{c}{t}{T_1}}{\Assum{}{c}{T_2}} } } \item[ASSUM-DEF] \inference{% \frac{ \WTELECONV{}{T_1}{T_2}~~~~~~~~\WTECONV{}{c}{t_2} }{ \WSE{\Assum{}{c}{T_1}}{\Def{}{c}{t_2}{T_2}} } } \item[DEF-DEF] \inference{% \frac{ \WTELECONV{}{T_1}{T_2}~~~~~~~~\WTECONV{}{t_1}{t_2} }{ \WSE{\Def{}{c}{t_1}{T_1}}{\Def{}{c}{t_2}{T_2}} } } \item[IND-IND] \inference{% \frac{ \WTECONV{}{\Gamma_P}{\Gamma_P'}% ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% }{ \WSE{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}% {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} } } \item[INDP-IND] \inference{% \frac{ \WTECONV{}{\Gamma_P}{\Gamma_P'}% ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% }{ \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} } } \item[INDP-INDP] \inference{% \frac{ \WTECONV{}{\Gamma_P}{\Gamma_P'}% ~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% ~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% ~~~~~~\WTECONV{}{p}{p'} }{ \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% {\Indp{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}{p'}} } } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \item[MOD-MOD] \inference{% \frac{ \WSE{S_1}{S_2} }{ \WSE{\ModS{X}{S_1}}{\ModS{X}{S_2}} } } \item[ALIAS-MOD] \inference{% \frac{ \WTM{E}{p}{S_1}~~~~~~~~\WSE{S_1}{S_2} }{ \WSE{\ModA{X}{p}}{\ModS{X}{S_2}} } } \item[MOD-ALIAS] \inference{% \frac{ \WTM{E}{p}{S_2}~~~~~~~~ \WSE{S_1}{S_2}~~~~~~~~\WTECONV{}{X}{p} }{ \WSE{\ModS{X}{S_1}}{\ModA{X}{p}} } } \item[ALIAS-ALIAS] \inference{% \frac{ \WTECONV{}{p_1}{p_2} }{ \WSE{\ModA{X}{p_1}}{\ModA{X}{p_2}} } } \item[MODTYPE-MODTYPE] \inference{% \frac{ \WSE{S_1}{S_2}~~~~~~~~\WSE{S_2}{S_1} }{ \WSE{\ModType{Y}{S_1}}{\ModType{Y}{S_2}} } } \end{description} New environment formation rules \begin{description} \item[WF-MOD] \inference{% \frac{ \WF{E}{}~~~~~~~~\WFT{E}{S} }{ \WF{E;\ModS{X}{S}}{} } } \item[WF-MOD] \inference{% \frac{ \begin{array}{c} \WS{E}{S_2}{S_1}\\ \WF{E}{}~~~~~\WFT{E}{S_1}~~~~~\WFT{E}{S_2} \end{array} }{ \WF{E;\Mod{X}{S_1}{S_2}}{} } } \item[WF-ALIAS] \inference{% \frac{ \WF{E}{}~~~~~~~~~~~\WTE{}{p}{S} }{ \WF{E,\ModA{X}{p}}{} } } \item[WF-MODTYPE] \inference{% \frac{ \WF{E}{}~~~~~~~~~~~\WFT{E}{S} }{ \WF{E,\ModType{Y}{S}}{} } } \item[WF-IND] \inference{% \frac{ \begin{array}{c} \WF{E;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}{}\\ \WT{E}{}{p:\struct{\elem_1;\dots;\elem_n;\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'};\dots}}\\ \WS{E}{\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}}{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}} \end{array} }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \WF{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{} } } \end{description} Component access rules \begin{description} \item[ACC-TYPE] \inference{% \frac{ \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Assum{}{c}{T};\dots}} }{ \WTEG{p.c}{T} } } \\ \inference{% \frac{ \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Def{}{c}{t}{T};\dots}} }{ \WTEG{p.c}{T} } } \item[ACC-DELTA] Notice that the following rule extends the delta rule defined in section~\ref{delta} \inference{% \frac{ \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Def{}{c}{t}{U};\dots}} }{ \WTEGRED{p.c}{\triangleright_\delta}{t} } } \\ In the rules below we assume $\Gamma_P$ is $[p_1:P_1;\ldots;p_r:P_r]$, $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is $[c_1:C_1;\ldots;c_n:C_n]$ \item[ACC-IND] \inference{% \frac{ \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} }{ \WTEG{p.I_j}{(p_1:P_1)\ldots(p_r:P_r)A_j} } } \inference{% \frac{ \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} }{ \WTEG{p.c_m}{(p_1:P_1)\ldots(p_r:P_r){C_m}{I_j}{(I_j~p_1\ldots p_r)}_{j=1\ldots k}} } } \item[ACC-INDP] \inference{% \frac{ \WT{E}{}{p}{\struct{\elem_1;\dots;\elem_i;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} }{ \WTRED{E}{}{p.I_i}{\triangleright_\delta}{p'.I_i} } } \inference{% \frac{ \WT{E}{}{p}{\struct{\elem_1;\dots;\elem_i;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} }{ \WTRED{E}{}{p.c_i}{\triangleright_\delta}{p'.c_i} } } \end{description} % %%% replaced by \triangle_\delta % Module path equality is a transitive and reflexive closure of the % relation generated by ACC-MODEQ and ENV-MODEQ. % \begin{itemize} % \item []MP-EQ-REFL % \inference{% % \frac{ % \WTEG{p}{T} % }{ % \WTEG{p}{p} % } % } % \item []MP-EQ-TRANS % \inference{% % \frac{ % \WTEGRED{p}{=}{p'}~~~~~~\WTEGRED{p'}{=}{p''} % }{ % \WTEGRED{p'}{=}{p''} % } % } % \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Reference-Manual.tex0000640000175000001440000000757211752753306017523 0ustar notinusers%\RequirePackage{ifpdf} %\ifpdf % \documentclass[11pt,a4paper,pdftex]{book} %\else \documentclass[11pt,a4paper]{book} %\fi \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{times} \usepackage{url} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \usepackage{alltt} \usepackage{hevea} \usepackage{ifpdf} \usepackage[headings]{fullpage} \usepackage{headers} % in this directory \usepackage{multicol} \usepackage{xspace} % for coqide \ifpdf % si on est pas en pdflatex \usepackage[pdftex]{graphicx} \else \usepackage[dvips]{graphicx} \fi %\includeonly{Setoid} \input{../common/version.tex} \input{../common/macros.tex}% extension .tex pour htmlgen \input{../common/title.tex}% extension .tex pour htmlgen %\input{headers} \usepackage[linktocpage,colorlinks]{hyperref} % The manual advises to load hyperref package last to be able to redefine % necessary commands. % The above should work for both latex and pdflatex. Even if PDF is produced % through DVI and PS using dvips and ps2pdf, hyperlinks should still work. % linktocpage option makes page numbers, not section names, to be links in % the table of contents. % colorlinks option colors the links instead of using boxes. % The command \tocnumber was added to HEVEA in version 1.06-6. % It instructs HEVEA to put chapter numbers into the table of % content entries. The table of content is produced by HACHA using % the options -tocbis -o toc.html. HEVEA produces a warning when % a command is not recognized, so versions earlier than 1.06-6 can % still be used. %HEVEA\tocnumber \begin{document} %BEGIN LATEX \sloppy\hbadness=5000 %END LATEX %BEGIN LATEX \coverpage{Reference Manual} {The Coq Development Team} {This material may be distributed only subject to the terms and conditions set forth in the Open Publication License, v1.0 or later (the latest version is presently available at \url{http://www.opencontent.org/openpub}). Options A and B of the licence are {\em not} elected.} %END LATEX %\defaultheaders \include{RefMan-int}% Introduction \include{RefMan-pre}% Credits %BEGIN LATEX \tableofcontents %END LATEX \part{The language} %BEGIN LATEX \defaultheaders %END LATEX \include{RefMan-gal.v}% Gallina \include{RefMan-ext.v}% Gallina extensions \include{RefMan-lib.v}% The coq library \include{RefMan-cic.v}% The Calculus of Constructions \include{RefMan-modr}% The module system \part{The proof engine} \include{RefMan-oth.v}% Vernacular commands \include{RefMan-pro.v}% Proof handling \include{RefMan-tac.v}% Tactics and tacticals \include{RefMan-ltac.v}% Writing tactics \include{RefMan-tacex.v}% Detailed Examples of tactics \include{RefMan-decl.v}% The mathematical proof language \part{User extensions} \include{RefMan-syn.v}% The Syntax and the Grammar commands %%SUPPRIME \include{RefMan-tus.v}% Writing tactics \include{RefMan-sch.v}% The Scheme commands \part{Practical tools} \include{RefMan-com}% The coq commands (coqc coqtop) \include{RefMan-uti}% utilities (gallina, do_Makefile, etc) \include{RefMan-ide}% Coq IDE %BEGIN LATEX \RefManCutCommand{BEGINADDENDUM=\thepage} %END LATEX \part{Addendum to the Reference Manual} \include{AddRefMan-pre}% \include{Cases.v}% \include{Coercion.v}% \include{Classes.v}% %%SUPPRIME \include{Natural.v}% \include{Omega.v}% \include{Micromega.v} %%SUPPRIME \include{Correctness.v}% = preuve de pgms imperatifs \include{Extraction.v}% \include{Program.v}% \include{Polynom.v}% = Ring \include{Nsatz.v}% \include{Setoid.v}% Tactique pour les setoides %BEGIN LATEX \RefManCutCommand{ENDADDENDUM=\thepage} %END LATEX \nocite{*} \bibliographystyle{plain} \bibliography{biblio} \cutname{biblio.html} \printindex \cutname{general-index.html} \printindex[tactic] \cutname{tactic-index.html} \printindex[command] \cutname{command-index.html} \printindex[error] \cutname{error-index.html} %BEGIN LATEX \listoffigures \addcontentsline{toc}{chapter}{\listfigurename} %END LATEX \end{document} coq-8.4pl2/doc/refman/RefMan-gal.tex0000640000175000001440000017316712124102005016301 0ustar notinusers\chapter{The \gallina{} specification language \label{Gallina}\index{Gallina}} \label{BNF-syntax} % Used referred to as a chapter label This chapter describes \gallina, the specification language of {\Coq}. It allows to develop mathematical theories and to prove specifications of programs. The theories are built from axioms, hypotheses, parameters, lemmas, theorems and definitions of constants, functions, predicates and sets. The syntax of logical objects involved in theories is described in Section~\ref{term}. The language of commands, called {\em The Vernacular} is described in section \ref{Vernacular}. In {\Coq}, logical objects are typed to ensure their logical correctness. The rules implemented by the typing algorithm are described in Chapter \ref{Cic}. \subsection*{About the grammars in the manual \index{BNF metasyntax}} Grammars are presented in Backus-Naur form (BNF). Terminal symbols are set in {\tt typewriter font}. In addition, there are special notations for regular expressions. An expression enclosed in square brackets \zeroone{\ldots} means at most one occurrence of this expression (this corresponds to an optional component). The notation ``\nelist{\entry}{sep}'' stands for a non empty sequence of expressions parsed by {\entry} and separated by the literal ``{\tt sep}''\footnote{This is similar to the expression ``{\entry} $\{$ {\tt sep} {\entry} $\}$'' in standard BNF, or ``{\entry}~{$($} {\tt sep} {\entry} {$)$*}'' in the syntax of regular expressions.}. Similarly, the notation ``\nelist{\entry}{}'' stands for a non empty sequence of expressions parsed by the ``{\entry}'' entry, without any separator between. At the end, the notation ``\sequence{\entry}{\tt sep}'' stands for a possibly empty sequence of expressions parsed by the ``{\entry}'' entry, separated by the literal ``{\tt sep}''. \section{Lexical conventions \label{lexical}\index{Lexical conventions}} \paragraph{Blanks} Space, newline and horizontal tabulation are considered as blanks. Blanks are ignored but they separate tokens. \paragraph{Comments} Comments in {\Coq} are enclosed between {\tt (*} and {\tt *)}\index{Comments}, and can be nested. They can contain any character. However, string literals must be correctly closed. Comments are treated as blanks. \paragraph{Identifiers and access identifiers} Identifiers, written {\ident}, are sequences of letters, digits, \verb!_! and \verb!'!, that do not start with a digit or \verb!'!. That is, they are recognized by the following lexical class: \index{ident@\ident} \begin{center} \begin{tabular}{rcl} {\firstletter} & ::= & {\tt a..z} $\mid$ {\tt A..Z} $\mid$ {\tt \_} $\mid$ {\tt unicode-letter} \\ {\subsequentletter} & ::= & {\tt a..z} $\mid$ {\tt A..Z} $\mid$ {\tt 0..9} $\mid$ {\tt \_} % $\mid$ {\tt \$} $\mid$ {\tt '} $\mid$ {\tt unicode-letter} $\mid$ {\tt unicode-id-part} \\ {\ident} & ::= & {\firstletter} \sequencewithoutblank{\subsequentletter}{} \end{tabular} \end{center} All characters are meaningful. In particular, identifiers are case-sensitive. The entry {\tt unicode-letter} non-exhaustively includes Latin, Greek, Gothic, Cyrillic, Arabic, Hebrew, Georgian, Hangul, Hiragana and Katakana characters, CJK ideographs, mathematical letter-like symbols, hyphens, non-breaking space, {\ldots} The entry {\tt unicode-id-part} non-exhaustively includes symbols for prime letters and subscripts. Access identifiers, written {\accessident}, are identifiers prefixed by \verb!.! (dot) without blank. They are used in the syntax of qualified identifiers. \paragraph{Natural numbers and integers} Numerals are sequences of digits. Integers are numerals optionally preceded by a minus sign. \index{num@{\num}} \index{integer@{\integer}} \begin{center} \begin{tabular}{r@{\quad::=\quad}l} {\digit} & {\tt 0..9} \\ {\num} & \nelistwithoutblank{\digit}{} \\ {\integer} & \zeroone{\tt -}{\num} \\ \end{tabular} \end{center} \paragraph[Strings]{Strings\label{strings} \index{string@{\qstring}}} Strings are delimited by \verb!"! (double quote), and enclose a sequence of any characters different from \verb!"! or the sequence \verb!""! to denote the double quote character. In grammars, the entry for quoted strings is {\qstring}. \paragraph{Keywords} The following identifiers are reserved keywords, and cannot be employed otherwise: \begin{center} \begin{tabular}{llllll} \verb!_! & \verb!as! & \verb!at! & \verb!cofix! & \verb!else! & \verb!end! \\ % \verb!exists! & \verb!exists2! & \verb!fix! & \verb!for! & \verb!forall! & \verb!fun! \\ % \verb!if! & \verb!IF! & \verb!in! & \verb!let! & \verb!match! & \verb!mod! \\ % \verb!Prop! & \verb!return! & \verb!Set! & \verb!then! & \verb!Type! & \verb!using! \\ % \verb!where! & \verb!with! & \end{tabular} \end{center} \paragraph{Special tokens} The following sequences of characters are special tokens: \begin{center} \begin{tabular}{lllllll} \verb/!/ & \verb!%! & \verb!&! & \verb!&&! & \verb!(! & \verb!()! & \verb!)! \\ % \verb!*! & \verb!+! & \verb!++! & \verb!,! & \verb!-! & \verb!->! & \verb!.! \\ % \verb!.(! & \verb!..! & \verb!/! & \verb!/\! & \verb!:! & \verb!::! & \verb!:! & \verb!;! & \verb!! & \verb!<:! \\ % \verb!<=! & \verb!<>! & \verb!=! & \verb!=>! & \verb!=_D! & \verb!>! & \verb!>->! \\ % \verb!>=! & \verb!?! & \verb!?=! & \verb!@! & \verb![! & \verb!\/! & \verb!]! \\ % \verb!^! & \verb!{! & \verb!|! & \verb!|-! & \verb!||! & \verb!}! & \verb!~! \\ \end{tabular} \end{center} Lexical ambiguities are resolved according to the ``longest match'' rule: when a sequence of non alphanumerical characters can be decomposed into several different ways, then the first token is the longest possible one (among all tokens defined at this moment), and so on. \section{Terms \label{term}\index{Terms}} \subsection{Syntax of terms} Figures \ref{term-syntax} and \ref{term-syntax-aux} describe the basic syntax of the terms of the {\em Calculus of Inductive Constructions} (also called \CIC). The formal presentation of {\CIC} is given in Chapter \ref{Cic}. Extensions of this syntax are given in chapter \ref{Gallina-extension}. How to customize the syntax is described in Chapter \ref{Addoc-syntax}. \begin{figure}[htbp] \begin{centerframe} \begin{tabular}{lcl@{\quad~}r} % warning: page width exceeded with \qquad {\term} & ::= & {\tt forall} {\binders} {\tt ,} {\term} &(\ref{products})\\ & $|$ & {\tt fun} {\binders} {\tt =>} {\term} &(\ref{abstractions})\\ & $|$ & {\tt fix} {\fixpointbodies} &(\ref{fixpoints})\\ & $|$ & {\tt cofix} {\cofixpointbodies} &(\ref{fixpoints})\\ & $|$ & {\tt let} {\ident} \zeroone{\binders} {\typecstr} {\tt :=} {\term} {\tt in} {\term} &(\ref{let-in})\\ & $|$ & {\tt let fix} {\fixpointbody} {\tt in} {\term} &(\ref{fixpoints})\\ & $|$ & {\tt let cofix} {\cofixpointbody} {\tt in} {\term} &(\ref{fixpoints})\\ & $|$ & {\tt let} {\tt (} \sequence{\name}{,} {\tt )} \zeroone{\ifitem} {\tt :=} {\term} {\tt in} {\term} &(\ref{caseanalysis}, \ref{Mult-match})\\ & $|$ & {\tt let '} {\pattern} \zeroone{{\tt in} {\term}} {\tt :=} {\term} \zeroone{\returntype} {\tt in} {\term} & (\ref{caseanalysis}, \ref{Mult-match})\\ & $|$ & {\tt if} {\term} \zeroone{\ifitem} {\tt then} {\term} {\tt else} {\term} &(\ref{caseanalysis}, \ref{Mult-match})\\ & $|$ & {\term} {\tt :} {\term} &(\ref{typecast})\\ & $|$ & {\term} {\tt <:} {\term} &(\ref{typecast})\\ & $|$ & {\term} {\tt :>} &(\ref{ProgramSyntax})\\ & $|$ & {\term} {\tt ->} {\term} &(\ref{products})\\ & $|$ & {\term} \nelist{\termarg}{}&(\ref{applications})\\ & $|$ & {\tt @} {\qualid} \sequence{\term}{} &(\ref{Implicits-explicitation})\\ & $|$ & {\term} {\tt \%} {\ident} &(\ref{scopechange})\\ & $|$ & {\tt match} \nelist{\caseitem}{\tt ,} \zeroone{\returntype} {\tt with} &\\ && ~~~\zeroone{\zeroone{\tt |} \nelist{\eqn}{|}} {\tt end} &(\ref{caseanalysis})\\ & $|$ & {\qualid} &(\ref{qualid})\\ & $|$ & {\sort} &(\ref{Gallina-sorts})\\ & $|$ & {\num} &(\ref{numerals})\\ & $|$ & {\_} &(\ref{hole})\\ & $|$ & {\tt (} {\term} {\tt )} & \\ & & &\\ {\termarg} & ::= & {\term} &\\ & $|$ & {\tt (} {\ident} {\tt :=} {\term} {\tt )} &(\ref{Implicits-explicitation})\\ %% & $|$ & {\tt (} {\num} {\tt :=} {\term} {\tt )} %% &(\ref{Implicits-explicitation})\\ &&&\\ {\binders} & ::= & \nelist{\binder}{} \\ &&&\\ {\binder} & ::= & {\name} & (\ref{Binders}) \\ & $|$ & {\tt (} \nelist{\name}{} {\tt :} {\term} {\tt )} &\\ & $|$ & {\tt (} {\name} {\typecstr} {\tt :=} {\term} {\tt )} &\\ & & &\\ {\name} & ::= & {\ident} &\\ & $|$ & {\tt \_} &\\ &&&\\ {\qualid} & ::= & {\ident} & \\ & $|$ & {\qualid} {\accessident} &\\ & & &\\ {\sort} & ::= & {\tt Prop} ~$|$~ {\tt Set} ~$|$~ {\tt Type} & \end{tabular} \end{centerframe} \caption{Syntax of terms} \label{term-syntax} \index{term@{\term}} \index{sort@{\sort}} \end{figure} \begin{figure}[htb] \begin{centerframe} \begin{tabular}{lcl} {\fixpointbodies} & ::= & {\fixpointbody} \\ & $|$ & {\fixpointbody} {\tt with} \nelist{\fixpointbody}{{\tt with}} {\tt for} {\ident} \\ {\cofixpointbodies} & ::= & {\cofixpointbody} \\ & $|$ & {\cofixpointbody} {\tt with} \nelist{\cofixpointbody}{{\tt with}} {\tt for} {\ident} \\ &&\\ {\fixpointbody} & ::= & {\ident} {\binders} \zeroone{\annotation} {\typecstr} {\tt :=} {\term} \\ {\cofixpointbody} & ::= & {\ident} \zeroone{\binders} {\typecstr} {\tt :=} {\term} \\ & &\\ {\annotation} & ::= & {\tt \{ struct} {\ident} {\tt \}} \\ &&\\ {\caseitem} & ::= & {\term} \zeroone{{\tt as} \name} \zeroone{{\tt in} \term} \\ &&\\ {\ifitem} & ::= & \zeroone{{\tt as} {\name}} {\returntype} \\ &&\\ {\returntype} & ::= & {\tt return} {\term} \\ &&\\ {\eqn} & ::= & \nelist{\multpattern}{\tt |} {\tt =>} {\term}\\ &&\\ {\multpattern} & ::= & \nelist{\pattern}{\tt ,}\\ &&\\ {\pattern} & ::= & {\qualid} \nelist{\pattern}{} \\ & $|$ & {\pattern} {\tt as} {\ident} \\ & $|$ & {\pattern} {\tt \%} {\ident} \\ & $|$ & {\qualid} \\ & $|$ & {\tt \_} \\ & $|$ & {\num} \\ & $|$ & {\tt (} \nelist{\orpattern}{,} {\tt )} \\ \\ {\orpattern} & ::= & \nelist{\pattern}{\tt |}\\ \end{tabular} \end{centerframe} \caption{Syntax of terms (continued)} \label{term-syntax-aux} \end{figure} %%%%%%% \subsection{Types} {\Coq} terms are typed. {\Coq} types are recognized by the same syntactic class as {\term}. We denote by {\type} the semantic subclass of types inside the syntactic class {\term}. \index{type@{\type}} \subsection{Qualified identifiers and simple identifiers \label{qualid} \label{ident}} {\em Qualified identifiers} ({\qualid}) denote {\em global constants} (definitions, lemmas, theorems, remarks or facts), {\em global variables} (parameters or axioms), {\em inductive types} or {\em constructors of inductive types}. {\em Simple identifiers} (or shortly {\ident}) are a syntactic subset of qualified identifiers. Identifiers may also denote local {\em variables}, what qualified identifiers do not. \subsection{Numerals \label{numerals}} Numerals have no definite semantics in the calculus. They are mere notations that can be bound to objects through the notation mechanism (see Chapter~\ref{Addoc-syntax} for details). Initially, numerals are bound to Peano's representation of natural numbers (see~\ref{libnats}). Note: negative integers are not at the same level as {\num}, for this would make precedence unnatural. \subsection{Sorts \index{Sorts} \index{Type@{\Type}} \index{Set@{\Set}} \index{Prop@{\Prop}} \index{Sorts} \label{Gallina-sorts}} There are three sorts \Set, \Prop\ and \Type. \begin{itemize} \item \Prop\ is the universe of {\em logical propositions}. The logical propositions themselves are typing the proofs. We denote propositions by {\form}. This constitutes a semantic subclass of the syntactic class {\term}. \index{form@{\form}} \item \Set\ is is the universe of {\em program types} or {\em specifications}. The specifications themselves are typing the programs. We denote specifications by {\specif}. This constitutes a semantic subclass of the syntactic class {\term}. \index{specif@{\specif}} \item {\Type} is the type of {\Set} and {\Prop} \end{itemize} \noindent More on sorts can be found in Section~\ref{Sorts}. \bigskip {\Coq} terms are typed. {\Coq} types are recognized by the same syntactic class as {\term}. We denote by {\type} the semantic subclass of types inside the syntactic class {\term}. \index{type@{\type}} \subsection{Binders \label{Binders} \index{binders}} Various constructions such as {\tt fun}, {\tt forall}, {\tt fix} and {\tt cofix} {\em bind} variables. A binding is represented by an identifier. If the binding variable is not used in the expression, the identifier can be replaced by the symbol {\tt \_}. When the type of a bound variable cannot be synthesized by the system, it can be specified with the notation {\tt (}\,{\ident}\,{\tt :}\,{\type}\,{\tt )}. There is also a notation for a sequence of binding variables sharing the same type: {\tt (}\,{\ident$_1$}\ldots{\ident$_n$}\,{\tt :}\,{\type}\,{\tt )}. Some constructions allow the binding of a variable to value. This is called a ``let-binder''. The entry {\binder} of the grammar accepts either an assumption binder as defined above or a let-binder. The notation in the latter case is {\tt (}\,{\ident}\,{\tt :=}\,{\term}\,{\tt )}. In a let-binder, only one variable can be introduced at the same time. It is also possible to give the type of the variable as follows: {\tt (}\,{\ident}\,{\tt :}\,{\term}\,{\tt :=}\,{\term}\,{\tt )}. Lists of {\binder} are allowed. In the case of {\tt fun} and {\tt forall}, it is intended that at least one binder of the list is an assumption otherwise {\tt fun} and {\tt forall} gets identical. Moreover, parentheses can be omitted in the case of a single sequence of bindings sharing the same type (e.g.: {\tt fun~(x~y~z~:~A)~=>~t} can be shortened in {\tt fun~x~y~z~:~A~=>~t}). \subsection{Abstractions \label{abstractions} \index{abstractions}} The expression ``{\tt fun} {\ident} {\tt :} {\type} {\tt =>}~{\term}'' defines the {\em abstraction} of the variable {\ident}, of type {\type}, over the term {\term}. It denotes a function of the variable {\ident} that evaluates to the expression {\term} (e.g. {\tt fun x:$A$ => x} denotes the identity function on type $A$). % The variable {\ident} is called the {\em parameter} of the function % (we sometimes say the {\em formal parameter}). The keyword {\tt fun} can be followed by several binders as given in Section~\ref{Binders}. Functions over several variables are equivalent to an iteration of one-variable functions. For instance the expression ``{\tt fun}~{\ident$_{1}$}~{\ldots}~{\ident$_{n}$}~{\tt :}~\type~{\tt =>}~{\term}'' denotes the same function as ``{\tt fun}~{\ident$_{1}$}~{\tt :}~\type~{\tt =>}~{\ldots}~{\tt fun}~{\ident$_{n}$}~{\tt :}~\type~{\tt =>}~{\term}''. If a let-binder occurs in the list of binders, it is expanded to a local definition (see Section~\ref{let-in}). \subsection{Products \label{products} \index{products}} The expression ``{\tt forall}~{\ident}~{\tt :}~{\type}{\tt ,}~{\term}'' denotes the {\em product} of the variable {\ident} of type {\type}, over the term {\term}. As for abstractions, {\tt forall} is followed by a binder list, and products over several variables are equivalent to an iteration of one-variable products. Note that {\term} is intended to be a type. If the variable {\ident} occurs in {\term}, the product is called {\em dependent product}. The intention behind a dependent product {\tt forall}~$x$~{\tt :}~{$A$}{\tt ,}~{$B$} is twofold. It denotes either the universal quantification of the variable $x$ of type $A$ in the proposition $B$ or the functional dependent product from $A$ to $B$ (a construction usually written $\Pi_{x:A}.B$ in set theory). Non dependent product types have a special notation: ``$A$ {\tt ->} $B$'' stands for ``{\tt forall \_:}$A${\tt ,}~$B$''. The non dependent product is used both to denote the propositional implication and function types. \subsection{Applications \label{applications} \index{applications}} The expression \term$_0$ \term$_1$ denotes the application of \term$_0$ to \term$_1$. The expression {\tt }\term$_0$ \term$_1$ ... \term$_n${\tt} denotes the application of the term \term$_0$ to the arguments \term$_1$ ... then \term$_n$. It is equivalent to {\tt (} {\ldots} {\tt (} {\term$_0$} {\term$_1$} {\tt )} {\ldots} {\tt )} {\term$_n$} {\tt }: associativity is to the left. The notation {\tt (}\,{\ident}\,{\tt :=}\,{\term}\,{\tt )} for arguments is used for making explicit the value of implicit arguments (see Section~\ref{Implicits-explicitation}). \subsection{Type cast \label{typecast} \index{Cast}} The expression ``{\term}~{\tt :}~{\type}'' is a type cast expression. It enforces the type of {\term} to be {\type}. ``{\term}~{\tt <:}~{\type}'' locally sets up the virtual machine (as if option {\tt Virtual Machine} were on, see \ref{SetVirtualMachine}) for checking that {\term} has type {\type}. \subsection{Inferable subterms \label{hole} \index{\_}} Expressions often contain redundant pieces of information. Subterms that can be automatically inferred by {\Coq} can be replaced by the symbol ``\_'' and {\Coq} will guess the missing piece of information. \subsection{Local definitions (let-in) \label{let-in} \index{Local definitions} \index{let-in}} {\tt let}~{\ident}~{\tt :=}~{\term$_1$}~{\tt in}~{\term$_2$} denotes the local binding of \term$_1$ to the variable $\ident$ in \term$_2$. There is a syntactic sugar for local definition of functions: {\tt let} {\ident} {\binder$_1$} {\ldots} {\binder$_n$} {\tt :=} {\term$_1$} {\tt in} {\term$_2$} stands for {\tt let} {\ident} {\tt := fun} {\binder$_1$} {\ldots} {\binder$_n$} {\tt =>} {\term$_1$} {\tt in} {\term$_2$}. \subsection{Definition by case analysis \label{caseanalysis} \index{match@{\tt match\ldots with\ldots end}}} Objects of inductive types can be destructurated by a case-analysis construction called {\em pattern-matching} expression. A pattern-matching expression is used to analyze the structure of an inductive objects and to apply specific treatments accordingly. This paragraph describes the basic form of pattern-matching. See Section~\ref{Mult-match} and Chapter~\ref{Mult-match-full} for the description of the general form. The basic form of pattern-matching is characterized by a single {\caseitem} expression, a {\multpattern} restricted to a single {\pattern} and {\pattern} restricted to the form {\qualid} \nelist{\ident}{}. The expression {\tt match} {\term$_0$} {\returntype} {\tt with} {\pattern$_1$} {\tt =>} {\term$_1$} {\tt $|$} {\ldots} {\tt $|$} {\pattern$_n$} {\tt =>} {\term$_n$} {\tt end}, denotes a {\em pattern-matching} over the term {\term$_0$} (expected to be of an inductive type $I$). The terms {\term$_1$}\ldots{\term$_n$} are the {\em branches} of the pattern-matching expression. Each of {\pattern$_i$} has a form \qualid~\nelist{\ident}{} where {\qualid} must denote a constructor. There should be exactly one branch for every constructor of $I$. The {\returntype} expresses the type returned by the whole {\tt match} expression. There are several cases. In the {\em non dependent} case, all branches have the same type, and the {\returntype} is the common type of branches. In this case, {\returntype} can usually be omitted as it can be inferred from the type of the branches\footnote{Except if the inductive type is empty in which case there is no equation that can be used to infer the return type.}. In the {\em dependent} case, there are three subcases. In the first subcase, the type in each branch may depend on the exact value being matched in the branch. In this case, the whole pattern-matching itself depends on the term being matched. This dependency of the term being matched in the return type is expressed with an ``{\tt as {\ident}}'' clause where {\ident} is dependent in the return type. For instance, in the following example: \begin{coq_example*} Inductive bool : Type := true : bool | false : bool. Inductive eq (A:Type) (x:A) : A -> Prop := eq_refl : eq A x x. Inductive or (A:Prop) (B:Prop) : Prop := | or_introl : A -> or A B | or_intror : B -> or A B. Definition bool_case (b:bool) : or (eq bool b true) (eq bool b false) := match b as x return or (eq bool x true) (eq bool x false) with | true => or_introl (eq bool true true) (eq bool true false) (eq_refl bool true) | false => or_intror (eq bool false true) (eq bool false false) (eq_refl bool false) end. \end{coq_example*} the branches have respective types {\tt or (eq bool true true) (eq bool true false)} and {\tt or (eq bool false true) (eq bool false false)} while the whole pattern-matching expression has type {\tt or (eq bool b true) (eq bool b false)}, the identifier {\tt x} being used to represent the dependency. Remark that when the term being matched is a variable, the {\tt as} clause can be omitted and the term being matched can serve itself as binding name in the return type. For instance, the following alternative definition is accepted and has the same meaning as the previous one. \begin{coq_example*} Definition bool_case (b:bool) : or (eq bool b true) (eq bool b false) := match b return or (eq bool b true) (eq bool b false) with | true => or_introl (eq bool true true) (eq bool true false) (eq_refl bool true) | false => or_intror (eq bool false true) (eq bool false false) (eq_refl bool false) end. \end{coq_example*} The second subcase is only relevant for annotated inductive types such as the equality predicate (see Section~\ref{Equality}), the order predicate on natural numbers % (see Section~\ref{le}) % undefined reference or the type of lists of a given length (see Section~\ref{listn}). In this configuration, the type of each branch can depend on the type dependencies specific to the branch and the whole pattern-matching expression has a type determined by the specific dependencies in the type of the term being matched. This dependency of the return type in the annotations of the inductive type is expressed using a {\tt ``in~I~\_~$\ldots$~\_~\ident$_1$~$\ldots$~\ident$_n$}'' clause, where \begin{itemize} \item $I$ is the inductive type of the term being matched; \item the names \ident$_i$'s correspond to the arguments of the inductive type that carry the annotations: the return type is dependent on them; \item the {\_}'s denote the family parameters of the inductive type: the return type is not dependent on them. \end{itemize} For instance, in the following example: \begin{coq_example*} Definition eq_sym (A:Type) (x y:A) (H:eq A x y) : eq A y x := match H in eq _ _ z return eq A z x with | eq_refl => eq_refl A x end. \end{coq_example*} the type of the branch has type {\tt eq~A~x~x} because the third argument of {\tt eq} is {\tt x} in the type of the pattern {\tt refl\_equal}. On the contrary, the type of the whole pattern-matching expression has type {\tt eq~A~y~x} because the third argument of {\tt eq} is {\tt y} in the type of {\tt H}. This dependency of the case analysis in the third argument of {\tt eq} is expressed by the identifier {\tt z} in the return type. Finally, the third subcase is a combination of the first and second subcase. In particular, it only applies to pattern-matching on terms in a type with annotations. For this third subcase, both the clauses {\tt as} and {\tt in} are available. There are specific notations for case analysis on types with one or two constructors: ``{\tt if {\ldots} then {\ldots} else {\ldots}}'' and ``{\tt let (}\nelist{\ldots}{,}{\tt ) := } {\ldots} {\tt in} {\ldots}'' (see Sections~\ref{if-then-else} and~\ref{Letin}). %\SeeAlso Section~\ref{Mult-match} for convenient extensions of pattern-matching. \subsection{Recursive functions \label{fixpoints} \index{fix@{fix \ident$_i$\{\dots\}}}} The expression ``{\tt fix} \ident$_1$ \binder$_1$ {\tt :} {\type$_1$} \texttt{:=} \term$_1$ {\tt with} {\ldots} {\tt with} \ident$_n$ \binder$_n$~{\tt :} {\type$_n$} \texttt{:=} \term$_n$ {\tt for} {\ident$_i$}'' denotes the $i$\nth component of a block of functions defined by mutual well-founded recursion. It is the local counterpart of the {\tt Fixpoint} command. See Section~\ref{Fixpoint} for more details. When $n=1$, the ``{\tt for}~{\ident$_i$}'' clause is omitted. The expression ``{\tt cofix} \ident$_1$~\binder$_1$ {\tt :} {\type$_1$} {\tt with} {\ldots} {\tt with} \ident$_n$ \binder$_n$ {\tt :} {\type$_n$}~{\tt for} {\ident$_i$}'' denotes the $i$\nth component of a block of terms defined by a mutual guarded co-recursion. It is the local counterpart of the {\tt CoFixpoint} command. See Section~\ref{CoFixpoint} for more details. When $n=1$, the ``{\tt for}~{\ident$_i$}'' clause is omitted. The association of a single fixpoint and a local definition have a special syntax: ``{\tt let fix}~$f$~{\ldots}~{\tt :=}~{\ldots}~{\tt in}~{\ldots}'' stands for ``{\tt let}~$f$~{\tt := fix}~$f$~\ldots~{\tt :=}~{\ldots}~{\tt in}~{\ldots}''. The same applies for co-fixpoints. \section{The Vernacular \label{Vernacular}} \begin{figure}[tbp] \begin{centerframe} \begin{tabular}{lcl} {\sentence} & ::= & {\assumption} \\ & $|$ & {\definition} \\ & $|$ & {\inductive} \\ & $|$ & {\fixpoint} \\ & $|$ & {\assertion} {\proof} \\ &&\\ %% Assumptions {\assumption} & ::= & {\assumptionkeyword} {\assums} {\tt .} \\ &&\\ {\assumptionkeyword} & $\!\!$ ::= & {\tt Axiom} $|$ {\tt Conjecture} \\ & $|$ & {\tt Parameter} $|$ {\tt Parameters} \\ & $|$ & {\tt Variable} $|$ {\tt Variables} \\ & $|$ & {\tt Hypothesis} $|$ {\tt Hypotheses}\\ &&\\ {\assums} & ::= & \nelist{\ident}{} {\tt :} {\term} \\ & $|$ & \nelist{{\tt (} \nelist{\ident}{} {\tt :} {\term} {\tt )}}{} \\ &&\\ %% Definitions {\definition} & ::= & {\tt Definition} {\ident} \zeroone{\binders} {\typecstr} {\tt :=} {\term} {\tt .} \\ & $|$ & {\tt Let} {\ident} \zeroone{\binders} {\typecstr} {\tt :=} {\term} {\tt .} \\ &&\\ %% Inductives {\inductive} & ::= & {\tt Inductive} \nelist{\inductivebody}{with} {\tt .} \\ & $|$ & {\tt CoInductive} \nelist{\inductivebody}{with} {\tt .} \\ & & \\ {\inductivebody} & ::= & {\ident} \zeroone{\binders} {\tt :} {\term} {\tt :=} \\ && ~~\zeroone{\zeroone{\tt |} \nelist{$\!${\ident}$\!$ \zeroone{\binders} {\typecstrwithoutblank}}{|}} \\ & & \\ %% TODO: where ... %% Fixpoints {\fixpoint} & ::= & {\tt Fixpoint} \nelist{\fixpointbody}{with} {\tt .} \\ & $|$ & {\tt CoFixpoint} \nelist{\cofixpointbody}{with} {\tt .} \\ &&\\ %% Lemmas & proofs {\assertion} & ::= & {\statkwd} {\ident} \zeroone{\binders} {\tt :} {\term} {\tt .} \\ &&\\ {\statkwd} & ::= & {\tt Theorem} $|$ {\tt Lemma} \\ & $|$ & {\tt Remark} $|$ {\tt Fact}\\ & $|$ & {\tt Corollary} $|$ {\tt Proposition} \\ & $|$ & {\tt Definition} $|$ {\tt Example} \\\\ &&\\ {\proof} & ::= & {\tt Proof} {\tt .} {\dots} {\tt Qed} {\tt .}\\ & $|$ & {\tt Proof} {\tt .} {\dots} {\tt Defined} {\tt .}\\ & $|$ & {\tt Proof} {\tt .} {\dots} {\tt Admitted} {\tt .}\\ \end{tabular} \end{centerframe} \caption{Syntax of sentences} \label{sentences-syntax} \end{figure} Figure \ref{sentences-syntax} describes {\em The Vernacular} which is the language of commands of \gallina. A sentence of the vernacular language, like in many natural languages, begins with a capital letter and ends with a dot. The different kinds of command are described hereafter. They all suppose that the terms occurring in the sentences are well-typed. %% %% Axioms and Parameters %% \subsection{Assumptions \index{Declarations} \label{Declarations}} Assumptions extend the environment\index{Environment} with axioms, parameters, hypotheses or variables. An assumption binds an {\ident} to a {\type}. It is accepted by {\Coq} if and only if this {\type} is a correct type in the environment preexisting the declaration and if {\ident} was not previously defined in the same module. This {\type} is considered to be the type (or specification, or statement) assumed by {\ident} and we say that {\ident} has type {\type}. \subsubsection{{\tt Axiom {\ident} :{\term} .} \comindex{Axiom} \label{Axiom}} This command links {\term} to the name {\ident} as its specification in the global context. The fact asserted by {\term} is thus assumed as a postulate. \begin{ErrMsgs} \item \errindex{{\ident} already exists} \end{ErrMsgs} \begin{Variants} \item \comindex{Parameter}\comindex{Parameters} {\tt Parameter {\ident} :{\term}.} \\ Is equivalent to {\tt Axiom {\ident} : {\term}} \item {\tt Parameter {\ident$_1$}\ldots{\ident$_n$} {\tt :}{\term}.}\\ Adds $n$ parameters with specification {\term} \item {\tt Parameter\,% (\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% \ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% {\term$_n$} {\tt )}.}\\ Adds $n$ blocks of parameters with different specifications. \item \comindex{Conjecture} {\tt Conjecture {\ident} :{\term}.}\\ Is equivalent to {\tt Axiom {\ident} : {\term}}. \end{Variants} \noindent {\bf Remark: } It is possible to replace {\tt Parameter} by {\tt Parameters}. \subsubsection{{\tt Variable {\ident} :{\term}}. \comindex{Variable} \comindex{Variables} \label{Variable}} This command links {\term} to the name {\ident} in the context of the current section (see Section~\ref{Section} for a description of the section mechanism). When the current section is closed, name {\ident} will be unknown and every object using this variable will be explicitly parametrized (the variable is {\em discharged}). Using the {\tt Variable} command out of any section is equivalent to using {\tt Parameter}. \begin{ErrMsgs} \item \errindex{{\ident} already exists} \end{ErrMsgs} \begin{Variants} \item {\tt Variable {\ident$_1$}\ldots{\ident$_n$} {\tt :}{\term}.}\\ Links {\term} to names {\ident$_1$}\ldots{\ident$_n$}. \item {\tt Variable\,% (\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% \ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% {\term$_n$} {\tt )}.}\\ Adds $n$ blocks of variables with different specifications. \item \comindex{Hypothesis} \comindex{Hypotheses} {\tt Hypothesis {\ident} {\tt :}{\term}.} \\ \texttt{Hypothesis} is a synonymous of \texttt{Variable} \end{Variants} \noindent {\bf Remark: } It is possible to replace {\tt Variable} by {\tt Variables} and {\tt Hypothesis} by {\tt Hypotheses}. It is advised to use the keywords \verb:Axiom: and \verb:Hypothesis: for logical postulates (i.e. when the assertion {\term} is of sort \verb:Prop:), and to use the keywords \verb:Parameter: and \verb:Variable: in other cases (corresponding to the declaration of an abstract mathematical entity). %% %% Definitions %% \subsection{Definitions \index{Definitions} \label{Basic-definitions}} Definitions extend the environment\index{Environment} with associations of names to terms. A definition can be seen as a way to give a meaning to a name or as a way to abbreviate a term. In any case, the name can later be replaced at any time by its definition. The operation of unfolding a name into its definition is called $\delta$-conversion\index{delta-reduction@$\delta$-reduction} (see Section~\ref{delta}). A definition is accepted by the system if and only if the defined term is well-typed in the current context of the definition and if the name is not already used. The name defined by the definition is called a {\em constant}\index{Constant} and the term it refers to is its {\em body}. A definition has a type which is the type of its body. A formal presentation of constants and environments is given in Section~\ref{Typed-terms}. \subsubsection{\tt Definition {\ident} := {\term}. \comindex{Definition}} This command binds {\term} to the name {\ident} in the environment, provided that {\term} is well-typed. \begin{ErrMsgs} \item \errindex{{\ident} already exists} \end{ErrMsgs} \begin{Variants} \item {\tt Definition {\ident} {\tt :}{\term$_1$} := {\term$_2$}.}\\ It checks that the type of {\term$_2$} is definitionally equal to {\term$_1$}, and registers {\ident} as being of type {\term$_1$}, and bound to value {\term$_2$}. \item {\tt Definition {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt :}\term$_1$ {\tt :=} {\term$_2$}.}\\ This is equivalent to \\ {\tt Definition\,{\ident}\,{\tt :\,forall}\,% {\binder$_1$}\ldots{\binder$_n$}{\tt ,}\,\term$_1$\,{\tt :=}}\,% {\tt fun}\,{\binder$_1$}\ldots{\binder$_n$}\,{\tt =>}\,{\term$_2$}\,% {\tt .} \item {\tt Example {\ident} := {\term}.}\\ {\tt Example {\ident} {\tt :}{\term$_1$} := {\term$_2$}.}\\ {\tt Example {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt :}\term$_1$ {\tt :=} {\term$_2$}.}\\ \comindex{Example} These are synonyms of the {\tt Definition} forms. \end{Variants} \begin{ErrMsgs} \item \errindex{Error: The term {\term} has type {\type} while it is expected to have type {\type}} \end{ErrMsgs} \SeeAlso Sections \ref{Opaque}, \ref{Transparent}, \ref{unfold}. \subsubsection{\tt Let {\ident} := {\term}. \comindex{Let}} This command binds the value {\term} to the name {\ident} in the environment of the current section. The name {\ident} disappears when the current section is eventually closed, and, all persistent objects (such as theorems) defined within the section and depending on {\ident} are prefixed by the local definition {\tt let {\ident} := {\term} in}. \begin{ErrMsgs} \item \errindex{{\ident} already exists} \end{ErrMsgs} \begin{Variants} \item {\tt Let {\ident} : {\term$_1$} := {\term$_2$}.} \end{Variants} \SeeAlso Sections \ref{Section} (section mechanism), \ref{Opaque}, \ref{Transparent} (opaque/transparent constants), \ref{unfold} (tactic {\tt unfold}). %% %% Inductive Types %% \subsection{Inductive definitions \index{Inductive definitions} \label{gal_Inductive_Definitions} \comindex{Inductive} \label{Inductive}} We gradually explain simple inductive types, simple annotated inductive types, simple parametric inductive types, mutually inductive types. We explain also co-inductive types. \subsubsection{Simple inductive types} The definition of a simple inductive type has the following form: \medskip {\tt \begin{tabular}{l} Inductive {\ident} : {\sort} := \\ \begin{tabular}{clcl} & {\ident$_1$} &:& {\type$_1$} \\ | & {\ldots} && \\ | & {\ident$_n$} &:& {\type$_n$} \end{tabular} \end{tabular} } \medskip The name {\ident} is the name of the inductively defined type and {\sort} is the universes where it lives. The names {\ident$_1$}, {\ldots}, {\ident$_n$} are the names of its constructors and {\type$_1$}, {\ldots}, {\type$_n$} their respective types. The types of the constructors have to satisfy a {\em positivity condition} (see Section~\ref{Positivity}) for {\ident}. This condition ensures the soundness of the inductive definition. If this is the case, the constants {\ident}, {\ident$_1$}, {\ldots}, {\ident$_n$} are added to the environment with their respective types. Accordingly to the universe where the inductive type lives ({\it e.g.} its type {\sort}), {\Coq} provides a number of destructors for {\ident}. Destructors are named {\ident}{\tt\_ind}, {\ident}{\tt \_rec} or {\ident}{\tt \_rect} which respectively correspond to elimination principles on {\tt Prop}, {\tt Set} and {\tt Type}. The type of the destructors expresses structural induction/recursion principles over objects of {\ident}. We give below two examples of the use of the {\tt Inductive} definitions. The set of natural numbers is defined as: \begin{coq_example} Inductive nat : Set := | O : nat | S : nat -> nat. \end{coq_example} The type {\tt nat} is defined as the least \verb:Set: containing {\tt O} and closed by the {\tt S} constructor. The constants {\tt nat}, {\tt O} and {\tt S} are added to the environment. Now let us have a look at the elimination principles. They are three of them: {\tt nat\_ind}, {\tt nat\_rec} and {\tt nat\_rect}. The type of {\tt nat\_ind} is: \begin{coq_example} Check nat_ind. \end{coq_example} This is the well known structural induction principle over natural numbers, i.e. the second-order form of Peano's induction principle. It allows to prove some universal property of natural numbers ({\tt forall n:nat, P n}) by induction on {\tt n}. The types of {\tt nat\_rec} and {\tt nat\_rect} are similar, except that they pertain to {\tt (P:nat->Set)} and {\tt (P:nat->Type)} respectively . They correspond to primitive induction principles (allowing dependent types) respectively over sorts \verb:Set: and \verb:Type:. The constant {\ident}{\tt \_ind} is always provided, whereas {\ident}{\tt \_rec} and {\ident}{\tt \_rect} can be impossible to derive (for example, when {\ident} is a proposition). \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{Variants} \item \begin{coq_example*} Inductive nat : Set := O | S (_:nat). \end{coq_example*} In the case where inductive types have no annotations (next section gives an example of such annotations), %the positivity condition implies that a constructor can be defined by only giving the type of its arguments. \end{Variants} \subsubsection{Simple annotated inductive types} In an annotated inductive types, the universe where the inductive type is defined is no longer a simple sort, but what is called an arity, which is a type whose conclusion is a sort. As an example of annotated inductive types, let us define the $even$ predicate: \begin{coq_example} Inductive even : nat -> Prop := | even_0 : even O | even_SS : forall n:nat, even n -> even (S (S n)). \end{coq_example} The type {\tt nat->Prop} means that {\tt even} is a unary predicate (inductively defined) over natural numbers. The type of its two constructors are the defining clauses of the predicate {\tt even}. The type of {\tt even\_ind} is: \begin{coq_example} Check even_ind. \end{coq_example} From a mathematical point of view it asserts that the natural numbers satisfying the predicate {\tt even} are exactly in the smallest set of naturals satisfying the clauses {\tt even\_0} or {\tt even\_SS}. This is why, when we want to prove any predicate {\tt P} over elements of {\tt even}, it is enough to prove it for {\tt O} and to prove that if any natural number {\tt n} satisfies {\tt P} its double successor {\tt (S (S n))} satisfies also {\tt P}. This is indeed analogous to the structural induction principle we got for {\tt nat}. \begin{ErrMsgs} \item \errindex{Non strictly positive occurrence of {\ident} in {\type}} \item \errindex{The conclusion of {\type} is not valid; it must be built from {\ident}} \end{ErrMsgs} \subsubsection{Parametrized inductive types} In the previous example, each constructor introduces a different instance of the predicate {\tt even}. In some cases, all the constructors introduces the same generic instance of the inductive definition, in which case, instead of an annotation, we use a context of parameters which are binders shared by all the constructors of the definition. % Inductive types may be parameterized. Parameters differ from inductive % type annotations in the fact that recursive invokations of inductive % types must always be done with the same values of parameters as its % specification. The general scheme is: \begin{center} {\tt Inductive} {\ident} {\binder$_1$}\ldots{\binder$_k$} : {\term} := {\ident$_1$}: {\term$_1$} | {\ldots} | {\ident$_n$}: \term$_n$ {\tt .} \end{center} Parameters differ from inductive type annotations in the fact that the conclusion of each type of constructor {\term$_i$} invoke the inductive type with the same values of parameters as its specification. A typical example is the definition of polymorphic lists: \begin{coq_example*} Inductive list (A:Set) : Set := | nil : list A | cons : A -> list A -> list A. \end{coq_example*} Note that in the type of {\tt nil} and {\tt cons}, we write {\tt (list A)} and not just {\tt list}.\\ The constants {\tt nil} and {\tt cons} will have respectively types: \begin{coq_example} Check nil. Check cons. \end{coq_example} Types of destructors are also quantified with {\tt (A:Set)}. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{Variants} \item \begin{coq_example*} Inductive list (A:Set) : Set := nil | cons (_:A) (_:list A). \end{coq_example*} This is an alternative definition of lists where we specify the arguments of the constructors rather than their full type. \end{Variants} \begin{ErrMsgs} \item \errindex{The {\num}th argument of {\ident} must be {\ident'} in {\type}} \end{ErrMsgs} \paragraph{New from \Coq{} V8.1} The condition on parameters for inductive definitions has been relaxed since \Coq{} V8.1. It is now possible in the type of a constructor, to invoke recursively the inductive definition on an argument which is not the parameter itself. One can define~: \begin{coq_example} Inductive list2 (A:Set) : Set := | nil2 : list2 A | cons2 : A -> list2 (A*A) -> list2 A. \end{coq_example} \begin{coq_eval} Reset list2. \end{coq_eval} that can also be written by specifying only the type of the arguments: \begin{coq_example*} Inductive list2 (A:Set) : Set := nil2 | cons2 (_:A) (_:list2 (A*A)). \end{coq_example*} But the following definition will give an error: \begin{coq_example} Inductive listw (A:Set) : Set := | nilw : listw (A*A) | consw : A -> listw (A*A) -> listw (A*A). \end{coq_example} Because the conclusion of the type of constructors should be {\tt listw A} in both cases. A parametrized inductive definition can be defined using annotations instead of parameters but it will sometimes give a different (bigger) sort for the inductive definition and will produce a less convenient rule for case elimination. \SeeAlso Sections~\ref{Cic-inductive-definitions} and~\ref{Tac-induction}. \subsubsection{Mutually defined inductive types \comindex{Inductive} \label{Mutual-Inductive}} The definition of a block of mutually inductive types has the form: \medskip {\tt \begin{tabular}{l} Inductive {\ident$_1$} : {\type$_1$} := \\ \begin{tabular}{clcl} & {\ident$_1^1$} &:& {\type$_1^1$} \\ | & {\ldots} && \\ | & {\ident$_{n_1}^1$} &:& {\type$_{n_1}^1$} \end{tabular} \\ with\\ ~{\ldots} \\ with {\ident$_m$} : {\type$_m$} := \\ \begin{tabular}{clcl} & {\ident$_1^m$} &:& {\type$_1^m$} \\ | & {\ldots} \\ | & {\ident$_{n_m}^m$} &:& {\type$_{n_m}^m$}. \end{tabular} \end{tabular} } \medskip \noindent It has the same semantics as the above {\tt Inductive} definition for each \ident$_1$, {\ldots}, \ident$_m$. All names \ident$_1$, {\ldots}, \ident$_m$ and \ident$_1^1$, \dots, \ident$_{n_m}^m$ are simultaneously added to the environment. Then well-typing of constructors can be checked. Each one of the \ident$_1$, {\ldots}, \ident$_m$ can be used on its own. It is also possible to parametrize these inductive definitions. However, parameters correspond to a local context in which the whole set of inductive declarations is done. For this reason, the parameters must be strictly the same for each inductive types The extended syntax is: \medskip {\tt \begin{tabular}{l} Inductive {\ident$_1$} {\params} : {\type$_1$} := \\ \begin{tabular}{clcl} & {\ident$_1^1$} &:& {\type$_1^1$} \\ | & {\ldots} && \\ | & {\ident$_{n_1}^1$} &:& {\type$_{n_1}^1$} \end{tabular} \\ with\\ ~{\ldots} \\ with {\ident$_m$} {\params} : {\type$_m$} := \\ \begin{tabular}{clcl} & {\ident$_1^m$} &:& {\type$_1^m$} \\ | & {\ldots} \\ | & {\ident$_{n_m}^m$} &:& {\type$_{n_m}^m$}. \end{tabular} \end{tabular} } \medskip \Example The typical example of a mutual inductive data type is the one for trees and forests. We assume given two types $A$ and $B$ as variables. It can be declared the following way. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Variables A B : Set. Inductive tree : Set := node : A -> forest -> tree with forest : Set := | leaf : B -> forest | cons : tree -> forest -> forest. \end{coq_example*} This declaration generates automatically six induction principles. They are respectively called {\tt tree\_rec}, {\tt tree\_ind}, {\tt tree\_rect}, {\tt forest\_rec}, {\tt forest\_ind}, {\tt forest\_rect}. These ones are not the most general ones but are just the induction principles corresponding to each inductive part seen as a single inductive definition. To illustrate this point on our example, we give the types of {\tt tree\_rec} and {\tt forest\_rec}. \begin{coq_example} Check tree_rec. Check forest_rec. \end{coq_example} Assume we want to parametrize our mutual inductive definitions with the two type variables $A$ and $B$, the declaration should be done the following way: \begin{coq_eval} Reset tree. \end{coq_eval} \begin{coq_example*} Inductive tree (A B:Set) : Set := node : A -> forest A B -> tree A B with forest (A B:Set) : Set := | leaf : B -> forest A B | cons : tree A B -> forest A B -> forest A B. \end{coq_example*} Assume we define an inductive definition inside a section. When the section is closed, the variables declared in the section and occurring free in the declaration are added as parameters to the inductive definition. \SeeAlso Section~\ref{Section}. \subsubsection{Co-inductive types \label{CoInductiveTypes} \comindex{CoInductive}} The objects of an inductive type are well-founded with respect to the constructors of the type. In other words, such objects contain only a {\it finite} number of constructors. Co-inductive types arise from relaxing this condition, and admitting types whose objects contain an infinity of constructors. Infinite objects are introduced by a non-ending (but effective) process of construction, defined in terms of the constructors of the type. An example of a co-inductive type is the type of infinite sequences of natural numbers, usually called streams. It can be introduced in \Coq\ using the \texttt{CoInductive} command: \begin{coq_example} CoInductive Stream : Set := Seq : nat -> Stream -> Stream. \end{coq_example} The syntax of this command is the same as the command \texttt{Inductive} (see Section~\ref{gal_Inductive_Definitions}). Notice that no principle of induction is derived from the definition of a co-inductive type, since such principles only make sense for inductive ones. For co-inductive ones, the only elimination principle is case analysis. For example, the usual destructors on streams \texttt{hd:Stream->nat} and \texttt{tl:Str->Str} can be defined as follows: \begin{coq_example} Definition hd (x:Stream) := let (a,s) := x in a. Definition tl (x:Stream) := let (a,s) := x in s. \end{coq_example} Definition of co-inductive predicates and blocks of mutually co-inductive definitions are also allowed. An example of a co-inductive predicate is the extensional equality on streams: \begin{coq_example} CoInductive EqSt : Stream -> Stream -> Prop := eqst : forall s1 s2:Stream, hd s1 = hd s2 -> EqSt (tl s1) (tl s2) -> EqSt s1 s2. \end{coq_example} In order to prove the extensionally equality of two streams $s_1$ and $s_2$ we have to construct an infinite proof of equality, that is, an infinite object of type $(\texttt{EqSt}\;s_1\;s_2)$. We will see how to introduce infinite objects in Section~\ref{CoFixpoint}. %% %% (Co-)Fixpoints %% \subsection{Definition of recursive functions} \subsubsection{Definition of functions by recursion over inductive objects} This section describes the primitive form of definition by recursion over inductive objects. See Section~\ref{Function} for more advanced constructions. The command: \begin{center} \texttt{Fixpoint {\ident} {\params} {\tt \{struct} \ident$_0$ {\tt \}} : type$_0$ := \term$_0$ \comindex{Fixpoint}\label{Fixpoint}} \end{center} allows to define functions by pattern-matching over inductive objects using a fixed point construction. The meaning of this declaration is to define {\it ident} a recursive function with arguments specified by the binders in {\params} such that {\it ident} applied to arguments corresponding to these binders has type \type$_0$, and is equivalent to the expression \term$_0$. The type of the {\ident} is consequently {\tt forall {\params} {\tt,} \type$_0$} and the value is equivalent to {\tt fun {\params} {\tt =>} \term$_0$}. To be accepted, a {\tt Fixpoint} definition has to satisfy some syntactical constraints on a special argument called the decreasing argument. They are needed to ensure that the {\tt Fixpoint} definition always terminates. The point of the {\tt \{struct \ident {\tt \}}} annotation is to let the user tell the system which argument decreases along the recursive calls. For instance, one can define the addition function as : \begin{coq_example} Fixpoint add (n m:nat) {struct n} : nat := match n with | O => m | S p => S (add p m) end. \end{coq_example} The {\tt \{struct \ident {\tt \}}} annotation may be left implicit, in this case the system try successively arguments from left to right until it finds one that satisfies the decreasing condition. Note that some fixpoints may have several arguments that fit as decreasing arguments, and this choice influences the reduction of the fixpoint. Hence an explicit annotation must be used if the leftmost decreasing argument is not the desired one. Writing explicit annotations can also speed up type-checking of large mutual fixpoints. The {\tt match} operator matches a value (here \verb:n:) with the various constructors of its (inductive) type. The remaining arguments give the respective values to be returned, as functions of the parameters of the corresponding constructor. Thus here when \verb:n: equals \verb:O: we return \verb:m:, and when \verb:n: equals \verb:(S p): we return \verb:(S (add p m)):. The {\tt match} operator is formally described in detail in Section~\ref{Caseexpr}. The system recognizes that in the inductive call {\tt (add p m)} the first argument actually decreases because it is a {\em pattern variable} coming from {\tt match n with}. \Example The following definition is not correct and generates an error message: \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (********* Error: Recursive call to wrongplus ... **********) \end{coq_eval} \begin{coq_example} Fixpoint wrongplus (n m:nat) {struct n} : nat := match m with | O => n | S p => S (wrongplus n p) end. \end{coq_example} because the declared decreasing argument {\tt n} actually does not decrease in the recursive call. The function computing the addition over the second argument should rather be written: \begin{coq_example*} Fixpoint plus (n m:nat) {struct m} : nat := match m with | O => n | S p => S (plus n p) end. \end{coq_example*} The ordinary match operation on natural numbers can be mimicked in the following way. \begin{coq_example*} Fixpoint nat_match (C:Set) (f0:C) (fS:nat -> C -> C) (n:nat) {struct n} : C := match n with | O => f0 | S p => fS p (nat_match C f0 fS p) end. \end{coq_example*} The recursive call may not only be on direct subterms of the recursive variable {\tt n} but also on a deeper subterm and we can directly write the function {\tt mod2} which gives the remainder modulo 2 of a natural number. \begin{coq_example*} Fixpoint mod2 (n:nat) : nat := match n with | O => O | S p => match p with | O => S O | S q => mod2 q end end. \end{coq_example*} In order to keep the strong normalization property, the fixed point reduction will only be performed when the argument in position of the decreasing argument (which type should be in an inductive definition) starts with a constructor. The {\tt Fixpoint} construction enjoys also the {\tt with} extension to define functions over mutually defined inductive types or more generally any mutually recursive definitions. \begin{Variants} \item {\tt Fixpoint {\ident$_1$} {\params$_1$} :{\type$_1$} := {\term$_1$}\\ with {\ldots} \\ with {\ident$_m$} {\params$_m$} :{\type$_m$} := {\term$_m$}}\\ Allows to define simultaneously {\ident$_1$}, {\ldots}, {\ident$_m$}. \end{Variants} \Example The size of trees and forests can be defined the following way: \begin{coq_eval} Reset Initial. Variables A B : Set. Inductive tree : Set := node : A -> forest -> tree with forest : Set := | leaf : B -> forest | cons : tree -> forest -> forest. \end{coq_eval} \begin{coq_example*} Fixpoint tree_size (t:tree) : nat := match t with | node a f => S (forest_size f) end with forest_size (f:forest) : nat := match f with | leaf b => 1 | cons t f' => (tree_size t + forest_size f') end. \end{coq_example*} A generic command {\tt Scheme} is useful to build automatically various mutual induction principles. It is described in Section~\ref{Scheme}. \subsubsection{Definitions of recursive objects in co-inductive types} The command: \begin{center} \texttt{CoFixpoint {\ident} : \type$_0$ := \term$_0$} \comindex{CoFixpoint}\label{CoFixpoint} \end{center} introduces a method for constructing an infinite object of a coinduc\-tive type. For example, the stream containing all natural numbers can be introduced applying the following method to the number \texttt{O} (see Section~\ref{CoInductiveTypes} for the definition of {\tt Stream}, {\tt hd} and {\tt tl}): \begin{coq_eval} Reset Initial. CoInductive Stream : Set := Seq : nat -> Stream -> Stream. Definition hd (x:Stream) := match x with | Seq a s => a end. Definition tl (x:Stream) := match x with | Seq a s => s end. \end{coq_eval} \begin{coq_example} CoFixpoint from (n:nat) : Stream := Seq n (from (S n)). \end{coq_example} Oppositely to recursive ones, there is no decreasing argument in a co-recursive definition. To be admissible, a method of construction must provide at least one extra constructor of the infinite object for each iteration. A syntactical guard condition is imposed on co-recursive definitions in order to ensure this: each recursive call in the definition must be protected by at least one constructor, and only by constructors. That is the case in the former definition, where the single recursive call of \texttt{from} is guarded by an application of \texttt{Seq}. On the contrary, the following recursive function does not satisfy the guard condition: \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (***************** Error: Unguarded recursive call *******************) \end{coq_eval} \begin{coq_example} CoFixpoint filter (p:nat -> bool) (s:Stream) : Stream := if p (hd s) then Seq (hd s) (filter p (tl s)) else filter p (tl s). \end{coq_example} The elimination of co-recursive definition is done lazily, i.e. the definition is expanded only when it occurs at the head of an application which is the argument of a case analysis expression. In any other context, it is considered as a canonical expression which is completely evaluated. We can test this using the command \texttt{Eval}, which computes the normal forms of a term: \begin{coq_example} Eval compute in (from 0). Eval compute in (hd (from 0)). Eval compute in (tl (from 0)). \end{coq_example} \begin{Variants} \item{\tt CoFixpoint {\ident$_1$} {\params} :{\type$_1$} := {\term$_1$}}\\ As for most constructions, arguments of co-fixpoints expressions can be introduced before the {\tt :=} sign. \item{\tt CoFixpoint {\ident$_1$} :{\type$_1$} := {\term$_1$}\\ with\\ \mbox{}\hspace{0.1cm} $\ldots$ \\ with {\ident$_m$} : {\type$_m$} := {\term$_m$}}\\ As in the \texttt{Fixpoint} command (see Section~\ref{Fixpoint}), it is possible to introduce a block of mutually dependent methods. \end{Variants} %% %% Theorems & Lemmas %% \subsection{Assertions and proofs} \label{Assertions} An assertion states a proposition (or a type) of which the proof (or an inhabitant of the type) is interactively built using tactics. The interactive proof mode is described in Chapter~\ref{Proof-handling} and the tactics in Chapter~\ref{Tactics}. The basic assertion command is: \subsubsection{\tt Theorem {\ident} \zeroone{\binders} : {\type}. \comindex{Theorem}} After the statement is asserted, {\Coq} needs a proof. Once a proof of {\type} under the assumptions represented by {\binders} is given and validated, the proof is generalized into a proof of {\tt forall \zeroone{\binders}, {\type}} and the theorem is bound to the name {\ident} in the environment. \begin{ErrMsgs} \item \errindex{The term {\form} has type {\ldots} which should be Set, Prop or Type} \item \errindexbis{{\ident} already exists}{already exists} The name you provided is already defined. You have then to choose another name. \end{ErrMsgs} \begin{Variants} \item {\tt Lemma {\ident} \zeroone{\binders} : {\type}.}\comindex{Lemma}\\ {\tt Remark {\ident} \zeroone{\binders} : {\type}.}\comindex{Remark}\\ {\tt Fact {\ident} \zeroone{\binders} : {\type}.}\comindex{Fact}\\ {\tt Corollary {\ident} \zeroone{\binders} : {\type}.}\comindex{Corollary}\\ {\tt Proposition {\ident} \zeroone{\binders} : {\type}.}\comindex{Proposition} These commands are synonyms of \texttt{Theorem {\ident} \zeroone{\binders} : {\type}}. \item {\tt Theorem \nelist{{\ident} \zeroone{\binders}: {\type}}{with}.} This command is useful for theorems that are proved by simultaneous induction over a mutually inductive assumption, or that assert mutually dependent statements in some mutual co-inductive type. It is equivalent to {\tt Fixpoint} or {\tt CoFixpoint} (see Section~\ref{CoFixpoint}) but using tactics to build the proof of the statements (or the body of the specification, depending on the point of view). The inductive or co-inductive types on which the induction or coinduction has to be done is assumed to be non ambiguous and is guessed by the system. Like in a {\tt Fixpoint} or {\tt CoFixpoint} definition, the induction hypotheses have to be used on {\em structurally smaller} arguments (for a {\tt Fixpoint}) or be {\em guarded by a constructor} (for a {\tt CoFixpoint}). The verification that recursive proof arguments are correct is done only at the time of registering the lemma in the environment. To know if the use of induction hypotheses is correct at some time of the interactive development of a proof, use the command {\tt Guarded} (see Section~\ref{Guarded}). The command can be used also with {\tt Lemma}, {\tt Remark}, etc. instead of {\tt Theorem}. \item {\tt Definition {\ident} \zeroone{\binders} : {\type}.} This allows to define a term of type {\type} using the proof editing mode. It behaves as {\tt Theorem} but is intended to be used in conjunction with {\tt Defined} (see \ref{Defined}) in order to define a constant of which the computational behavior is relevant. The command can be used also with {\tt Example} instead of {\tt Definition}. \SeeAlso Sections~\ref{Opaque} and~\ref{Transparent} ({\tt Opaque} and {\tt Transparent}) and~\ref{unfold} (tactic {\tt unfold}). \item {\tt Let {\ident} \zeroone{\binders} : {\type}.} Like {\tt Definition {\ident} \zeroone{\binders} : {\type}.} except that the definition is turned into a local definition generalized over the declarations depending on it after closing the current section. \item {\tt Fixpoint \nelist{{\ident} {\binders} \zeroone{\annotation} {\typecstr} \zeroone{{\tt :=} {\term}}}{with}.} \comindex{Fixpoint} This generalizes the syntax of {\tt Fixpoint} so that one or more bodies can be defined interactively using the proof editing mode (when a body is omitted, its type is mandatory in the syntax). When the block of proofs is completed, it is intended to be ended by {\tt Defined}. \item {\tt CoFixpoint \nelist{{\ident} \zeroone{\binders} {\typecstr} \zeroone{{\tt :=} {\term}}}{with}.} \comindex{CoFixpoint} This generalizes the syntax of {\tt CoFixpoint} so that one or more bodies can be defined interactively using the proof editing mode. \end{Variants} \subsubsection{{\tt Proof.} {\dots} {\tt Qed.} \comindex{Proof} \comindex{Qed}} A proof starts by the keyword {\tt Proof}. Then {\Coq} enters the proof editing mode until the proof is completed. The proof editing mode essentially contains tactics that are described in chapter \ref{Tactics}. Besides tactics, there are commands to manage the proof editing mode. They are described in Chapter~\ref{Proof-handling}. When the proof is completed it should be validated and put in the environment using the keyword {\tt Qed}. \medskip \ErrMsg \begin{enumerate} \item \errindex{{\ident} already exists} \end{enumerate} \begin{Remarks} \item Several statements can be simultaneously asserted. \item Not only other assertions but any vernacular command can be given while in the process of proving a given assertion. In this case, the command is understood as if it would have been given before the statements still to be proved. \item {\tt Proof} is recommended but can currently be omitted. On the opposite side, {\tt Qed} (or {\tt Defined}, see below) is mandatory to validate a proof. \item Proofs ended by {\tt Qed} are declared opaque. Their content cannot be unfolded (see \ref{Conversion-tactics}), thus realizing some form of {\em proof-irrelevance}. To be able to unfold a proof, the proof should be ended by {\tt Defined} (see below). \end{Remarks} \begin{Variants} \item \comindex{Defined} {\tt Proof.} {\dots} {\tt Defined.}\\ Same as {\tt Proof.} {\dots} {\tt Qed.} but the proof is then declared transparent, which means that its content can be explicitly used for type-checking and that it can be unfolded in conversion tactics (see \ref{Conversion-tactics}, \ref{Opaque}, \ref{Transparent}). %Not claimed to be part of Gallina... %\item {\tt Proof.} {\dots} {\tt Save.}\\ % Same as {\tt Proof.} {\dots} {\tt Qed.} %\item {\tt Goal} \type {\dots} {\tt Save} \ident \\ % Same as {\tt Lemma} \ident {\tt :} \type \dots {\tt Save.} % This is intended to be used in the interactive mode. \item \comindex{Admitted} {\tt Proof.} {\dots} {\tt Admitted.}\\ Turns the current asserted statement into an axiom and exits the proof mode. \end{Variants} % Local Variables: % mode: LaTeX % TeX-master: "Reference-Manual" % End: coq-8.4pl2/doc/refman/RefMan-tacex.tex0000640000175000001440000007262311776416511016663 0ustar notinusers\chapter[Detailed examples of tactics]{Detailed examples of tactics\label{Tactics-examples}} This chapter presents detailed examples of certain tactics, to illustrate their behavior. \section[\tt dependent induction]{\tt dependent induction\label{dependent-induction-example}} \def\depind{{\tt dependent induction}~} \def\depdestr{{\tt dependent destruction}~} The tactics \depind and \depdestr are another solution for inverting inductive predicate instances and potentially doing induction at the same time. It is based on the \texttt{BasicElim} tactic of Conor McBride which works by abstracting each argument of an inductive instance by a variable and constraining it by equalities afterwards. This way, the usual {\tt induction} and {\tt destruct} tactics can be applied to the abstracted instance and after simplification of the equalities we get the expected goals. The abstracting tactic is called {\tt generalize\_eqs} and it takes as argument an hypothesis to generalize. It uses the {\tt JMeq} datatype defined in {\tt Coq.Logic.JMeq}, hence we need to require it before. For example, revisiting the first example of the inversion documentation above: \begin{coq_example*} Require Import Coq.Logic.JMeq. \end{coq_example*} \begin{coq_eval} Require Import Coq.Program.Equality. \end{coq_eval} \begin{coq_eval} Inductive Le : nat -> nat -> Set := | LeO : forall n:nat, Le 0 n | LeS : forall n m:nat, Le n m -> Le (S n) (S m). Variable P : nat -> nat -> Prop. Variable Q : forall n m:nat, Le n m -> Prop. \end{coq_eval} \begin{coq_example*} Goal forall n m:nat, Le (S n) m -> P n m. intros n m H. \end{coq_example*} \begin{coq_example} generalize_eqs H. \end{coq_example} The index {\tt S n} gets abstracted by a variable here, but a corresponding equality is added under the abstract instance so that no information is actually lost. The goal is now almost amenable to do induction or case analysis. One should indeed first move {\tt n} into the goal to strengthen it before doing induction, or {\tt n} will be fixed in the inductive hypotheses (this does not matter for case analysis). As a rule of thumb, all the variables that appear inside constructors in the indices of the hypothesis should be generalized. This is exactly what the \texttt{generalize\_eqs\_vars} variant does: \begin{coq_eval} Undo 1. \end{coq_eval} \begin{coq_example} generalize_eqs_vars H. induction H. \end{coq_example} As the hypothesis itself did not appear in the goal, we did not need to use an heterogeneous equality to relate the new hypothesis to the old one (which just disappeared here). However, the tactic works just a well in this case, e.g.: \begin{coq_eval} Admitted. \end{coq_eval} \begin{coq_example} Goal forall n m (p : Le (S n) m), Q (S n) m p. intros n m p ; generalize_eqs_vars p. \end{coq_example} One drawback of this approach is that in the branches one will have to substitute the equalities back into the instance to get the right assumptions. Sometimes injection of constructors will also be needed to recover the needed equalities. Also, some subgoals should be directly solved because of inconsistent contexts arising from the constraints on indexes. The nice thing is that we can make a tactic based on discriminate, injection and variants of substitution to automatically do such simplifications (which may involve the K axiom). This is what the {\tt simplify\_dep\_elim} tactic from {\tt Coq.Program.Equality} does. For example, we might simplify the previous goals considerably: % \begin{coq_eval} % Abort. % Goal forall n m:nat, Le (S n) m -> P n m. % intros n m H ; generalize_eqs_vars H. % \end{coq_eval} \begin{coq_example} induction p ; simplify_dep_elim. \end{coq_example} The higher-order tactic {\tt do\_depind} defined in {\tt Coq.Program.Equality} takes a tactic and combines the building blocks we have seen with it: generalizing by equalities calling the given tactic with the generalized induction hypothesis as argument and cleaning the subgoals with respect to equalities. Its most important instantiations are \depind and \depdestr that do induction or simply case analysis on the generalized hypothesis. For example we can redo what we've done manually with \depdestr: \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example*} Require Import Coq.Program.Equality. Lemma ex : forall n m:nat, Le (S n) m -> P n m. intros n m H. \end{coq_example*} \begin{coq_example} dependent destruction H. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} This gives essentially the same result as inversion. Now if the destructed hypothesis actually appeared in the goal, the tactic would still be able to invert it, contrary to {\tt dependent inversion}. Consider the following example on vectors: \begin{coq_example*} Require Import Coq.Program.Equality. Set Implicit Arguments. Variable A : Set. Inductive vector : nat -> Type := | vnil : vector 0 | vcons : A -> forall n, vector n -> vector (S n). Goal forall n, forall v : vector (S n), exists v' : vector n, exists a : A, v = vcons a v'. intros n v. \end{coq_example*} \begin{coq_example} dependent destruction v. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In this case, the {\tt v} variable can be replaced in the goal by the generalized hypothesis only when it has a type of the form {\tt vector (S n)}, that is only in the second case of the {\tt destruct}. The first one is dismissed because {\tt S n <> 0}. \subsection{A larger example} Let's see how the technique works with {\tt induction} on inductive predicates on a real example. We will develop an example application to the theory of simply-typed lambda-calculus formalized in a dependently-typed style: \begin{coq_example*} Inductive type : Type := | base : type | arrow : type -> type -> type. Notation " t --> t' " := (arrow t t') (at level 20, t' at next level). Inductive ctx : Type := | empty : ctx | snoc : ctx -> type -> ctx. Notation " G , tau " := (snoc G tau) (at level 20, t at next level). Fixpoint conc (G D : ctx) : ctx := match D with | empty => G | snoc D' x => snoc (conc G D') x end. Notation " G ; D " := (conc G D) (at level 20). Inductive term : ctx -> type -> Type := | ax : forall G tau, term (G, tau) tau | weak : forall G tau, term G tau -> forall tau', term (G, tau') tau | abs : forall G tau tau', term (G , tau) tau' -> term G (tau --> tau') | app : forall G tau tau', term G (tau --> tau') -> term G tau -> term G tau'. \end{coq_example*} We have defined types and contexts which are snoc-lists of types. We also have a {\tt conc} operation that concatenates two contexts. The {\tt term} datatype represents in fact the possible typing derivations of the calculus, which are isomorphic to the well-typed terms, hence the name. A term is either an application of: \begin{itemize} \item the axiom rule to type a reference to the first variable in a context, \item the weakening rule to type an object in a larger context \item the abstraction or lambda rule to type a function \item the application to type an application of a function to an argument \end{itemize} Once we have this datatype we want to do proofs on it, like weakening: \begin{coq_example*} Lemma weakening : forall G D tau, term (G ; D) tau -> forall tau', term (G , tau' ; D) tau. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} The problem here is that we can't just use {\tt induction} on the typing derivation because it will forget about the {\tt G ; D} constraint appearing in the instance. A solution would be to rewrite the goal as: \begin{coq_example*} Lemma weakening' : forall G' tau, term G' tau -> forall G D, (G ; D) = G' -> forall tau', term (G, tau' ; D) tau. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} With this proper separation of the index from the instance and the right induction loading (putting {\tt G} and {\tt D} after the inducted-on hypothesis), the proof will go through, but it is a very tedious process. One is also forced to make a wrapper lemma to get back the more natural statement. The \depind tactic alleviates this trouble by doing all of this plumbing of generalizing and substituting back automatically. Indeed we can simply write: \begin{coq_example*} Require Import Coq.Program.Tactics. Lemma weakening : forall G D tau, term (G ; D) tau -> forall tau', term (G , tau' ; D) tau. Proof with simpl in * ; simpl_depind ; auto. intros G D tau H. dependent induction H generalizing G D ; intros. \end{coq_example*} This call to \depind has an additional arguments which is a list of variables appearing in the instance that should be generalized in the goal, so that they can vary in the induction hypotheses. By default, all variables appearing inside constructors (except in a parameter position) of the instantiated hypothesis will be generalized automatically but one can always give the list explicitly. \begin{coq_example} Show. \end{coq_example} The {\tt simpl\_depind} tactic includes an automatic tactic that tries to simplify equalities appearing at the beginning of induction hypotheses, generally using trivial applications of reflexivity. In cases where the equality is not between constructor forms though, one must help the automation by giving some arguments, using the {\tt specialize} tactic. \begin{coq_example*} destruct D... apply weak ; apply ax. apply ax. destruct D... \end{coq_example*} \begin{coq_example} Show. \end{coq_example} \begin{coq_example} specialize (IHterm G empty). \end{coq_example} Then the automation can find the needed equality {\tt G = G} to narrow the induction hypothesis further. This concludes our example. \begin{coq_example} simpl_depind. \end{coq_example} \SeeAlso The induction \ref{elim}, case \ref{case} and inversion \ref{inversion} tactics. \section[\tt autorewrite]{\tt autorewrite\label{autorewrite-example}} Here are two examples of {\tt autorewrite} use. The first one ({\em Ackermann function}) shows actually a quite basic use where there is no conditional rewriting. The second one ({\em Mac Carthy function}) involves conditional rewritings and shows how to deal with them using the optional tactic of the {\tt Hint~Rewrite} command. \firstexample \example{Ackermann function} %Here is a basic use of {\tt AutoRewrite} with the Ackermann function: \begin{coq_example*} Reset Initial. Require Import Arith. Variable Ack : nat -> nat -> nat. Axiom Ack0 : forall m:nat, Ack 0 m = S m. Axiom Ack1 : forall n:nat, Ack (S n) 0 = Ack n 1. Axiom Ack2 : forall n m:nat, Ack (S n) (S m) = Ack n (Ack (S n) m). \end{coq_example*} \begin{coq_example} Hint Rewrite Ack0 Ack1 Ack2 : base0. Lemma ResAck0 : Ack 3 2 = 29. autorewrite with base0 using try reflexivity. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \example{Mac Carthy function} %The Mac Carthy function shows a more complex case: \begin{coq_example*} Require Import Omega. Variable g : nat -> nat -> nat. Axiom g0 : forall m:nat, g 0 m = m. Axiom g1 : forall n m:nat, (n > 0) -> (m > 100) -> g n m = g (pred n) (m - 10). Axiom g2 : forall n m:nat, (n > 0) -> (m <= 100) -> g n m = g (S n) (m + 11). \end{coq_example*} \begin{coq_example} Hint Rewrite g0 g1 g2 using omega : base1. Lemma Resg0 : g 1 110 = 100. autorewrite with base1 using reflexivity || simpl. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example} Lemma Resg1 : g 1 95 = 91. autorewrite with base1 using reflexivity || simpl. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \section[\tt quote]{\tt quote\tacindex{quote} \label{quote-examples}} The tactic \texttt{quote} allows to use Barendregt's so-called 2-level approach without writing any ML code. Suppose you have a language \texttt{L} of 'abstract terms' and a type \texttt{A} of 'concrete terms' and a function \texttt{f : L -> A}. If \texttt{L} is a simple inductive datatype and \texttt{f} a simple fixpoint, \texttt{quote f} will replace the head of current goal by a convertible term of the form \texttt{(f t)}. \texttt{L} must have a constructor of type: \texttt{A -> L}. Here is an example: \begin{coq_example} Require Import Quote. Parameters A B C : Prop. Inductive formula : Type := | f_and : formula -> formula -> formula (* binary constructor *) | f_or : formula -> formula -> formula | f_not : formula -> formula (* unary constructor *) | f_true : formula (* 0-ary constructor *) | f_const : Prop -> formula (* constructor for constants *). Fixpoint interp_f (f: formula) : Prop := match f with | f_and f1 f2 => interp_f f1 /\ interp_f f2 | f_or f1 f2 => interp_f f1 \/ interp_f f2 | f_not f1 => ~ interp_f f1 | f_true => True | f_const c => c end. Goal A /\ (A \/ True) /\ ~ B /\ (A <-> A). quote interp_f. \end{coq_example} The algorithm to perform this inversion is: try to match the term with right-hand sides expression of \texttt{f}. If there is a match, apply the corresponding left-hand side and call yourself recursively on sub-terms. If there is no match, we are at a leaf: return the corresponding constructor (here \texttt{f\_const}) applied to the term. \begin{ErrMsgs} \item \errindex{quote: not a simple fixpoint} \\ Happens when \texttt{quote} is not able to perform inversion properly. \end{ErrMsgs} \subsection{Introducing variables map} The normal use of \texttt{quote} is to make proofs by reflection: one defines a function \texttt{simplify : formula -> formula} and proves a theorem \texttt{simplify\_ok: (f:formula)(interp\_f (simplify f)) -> (interp\_f f)}. Then, one can simplify formulas by doing: \begin{verbatim} quote interp_f. apply simplify_ok. compute. \end{verbatim} But there is a problem with leafs: in the example above one cannot write a function that implements, for example, the logical simplifications $A \land A \ra A$ or $A \land \lnot A \ra \texttt{False}$. This is because the \Prop{} is impredicative. It is better to use that type of formulas: \begin{coq_eval} Reset formula. \end{coq_eval} \begin{coq_example} Inductive formula : Set := | f_and : formula -> formula -> formula | f_or : formula -> formula -> formula | f_not : formula -> formula | f_true : formula | f_atom : index -> formula. \end{coq_example*} \texttt{index} is defined in module \texttt{quote}. Equality on that type is decidable so we are able to simplify $A \land A$ into $A$ at the abstract level. When there are variables, there are bindings, and \texttt{quote} provides also a type \texttt{(varmap A)} of bindings from \texttt{index} to any set \texttt{A}, and a function \texttt{varmap\_find} to search in such maps. The interpretation function has now another argument, a variables map: \begin{coq_example} Fixpoint interp_f (vm: varmap Prop) (f:formula) {struct f} : Prop := match f with | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 | f_not f1 => ~ interp_f vm f1 | f_true => True | f_atom i => varmap_find True i vm end. \end{coq_example} \noindent\texttt{quote} handles this second case properly: \begin{coq_example} Goal A /\ (B \/ A) /\ (A \/ ~ B). quote interp_f. \end{coq_example} It builds \texttt{vm} and \texttt{t} such that \texttt{(f vm t)} is convertible with the conclusion of current goal. \subsection{Combining variables and constants} One can have both variables and constants in abstracts terms; that is the case, for example, for the \texttt{ring} tactic (chapter \ref{ring}). Then one must provide to \texttt{quote} a list of \emph{constructors of constants}. For example, if the list is \texttt{[O S]} then closed natural numbers will be considered as constants and other terms as variables. Example: \begin{coq_eval} Reset formula. \end{coq_eval} \begin{coq_example*} Inductive formula : Type := | f_and : formula -> formula -> formula | f_or : formula -> formula -> formula | f_not : formula -> formula | f_true : formula | f_const : Prop -> formula (* constructor for constants *) | f_atom : index -> formula. Fixpoint interp_f (vm: (* constructor for variables *) varmap Prop) (f:formula) {struct f} : Prop := match f with | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 | f_not f1 => ~ interp_f vm f1 | f_true => True | f_const c => c | f_atom i => varmap_find True i vm end. Goal A /\ (A \/ True) /\ ~ B /\ (C <-> C). \end{coq_example*} \begin{coq_example} quote interp_f [ A B ]. Undo. quote interp_f [ B C iff ]. \end{coq_example} \Warning Since function inversion is undecidable in general case, don't expect miracles from it! \begin{Variants} \item {\tt quote {\ident} in {\term} using {\tac}} \tac\ must be a functional tactic (starting with {\tt fun x =>}) and will be called with the quoted version of \term\ according to \ident. \item {\tt quote {\ident} [ \ident$_1$ \dots\ \ident$_n$ ] in {\term} using {\tac}} Same as above, but will use \ident$_1$, \dots, \ident$_n$ to chose which subterms are constants (see above). \end{Variants} % \SeeAlso file \texttt{theories/DEMOS/DemoQuote.v} \SeeAlso comments of source file \texttt{plugins/quote/quote.ml} \SeeAlso the \texttt{ring} tactic (Chapter~\ref{ring}) \section{Using the tactical language} \subsection{About the cardinality of the set of natural numbers} A first example which shows how to use the pattern matching over the proof contexts is the proof that natural numbers have more than two elements. The proof of such a lemma can be done as %shown on Figure~\ref{cnatltac}. follows: %\begin{figure} %\begin{centerframe} \begin{coq_eval} Reset Initial. Require Import Arith. Require Import List. \end{coq_eval} \begin{coq_example*} Lemma card_nat : ~ (exists x : nat, exists y : nat, forall z:nat, x = z \/ y = z). Proof. red; intros (x, (y, Hy)). elim (Hy 0); elim (Hy 1); elim (Hy 2); intros; match goal with | [_:(?a = ?b),_:(?a = ?c) |- _ ] => cut (b = c); [ discriminate | transitivity a; auto ] end. Qed. \end{coq_example*} %\end{centerframe} %\caption{A proof on cardinality of natural numbers} %\label{cnatltac} %\end{figure} We can notice that all the (very similar) cases coming from the three eliminations (with three distinct natural numbers) are successfully solved by a {\tt match goal} structure and, in particular, with only one pattern (use of non-linear matching). \subsection{Permutation on closed lists} Another more complex example is the problem of permutation on closed lists. The aim is to show that a closed list is a permutation of another one. First, we define the permutation predicate as shown in table~\ref{permutpred}. \begin{figure} \begin{centerframe} \begin{coq_example*} Section Sort. Variable A : Set. Inductive permut : list A -> list A -> Prop := | permut_refl : forall l, permut l l | permut_cons : forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1) | permut_append : forall a l, permut (a :: l) (l ++ a :: nil) | permut_trans : forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2. End Sort. \end{coq_example*} \end{centerframe} \caption{Definition of the permutation predicate} \label{permutpred} \end{figure} A more complex example is the problem of permutation on closed lists. The aim is to show that a closed list is a permutation of another one. First, we define the permutation predicate as shown on Figure~\ref{permutpred}. \begin{figure} \begin{centerframe} \begin{coq_example} Ltac Permut n := match goal with | |- (permut _ ?l ?l) => apply permut_refl | |- (permut _ (?a :: ?l1) (?a :: ?l2)) => let newn := eval compute in (length l1) in (apply permut_cons; Permut newn) | |- (permut ?A (?a :: ?l1) ?l2) => match eval compute in n with | 1 => fail | _ => let l1' := constr:(l1 ++ a :: nil) in (apply (permut_trans A (a :: l1) l1' l2); [ apply permut_append | compute; Permut (pred n) ]) end end. Ltac PermutProve := match goal with | |- (permut _ ?l1 ?l2) => match eval compute in (length l1 = length l2) with | (?n = ?n) => Permut n end end. \end{coq_example} \end{centerframe} \caption{Permutation tactic} \label{permutltac} \end{figure} Next, we can write naturally the tactic and the result can be seen on Figure~\ref{permutltac}. We can notice that we use two toplevel definitions {\tt PermutProve} and {\tt Permut}. The function to be called is {\tt PermutProve} which computes the lengths of the two lists and calls {\tt Permut} with the length if the two lists have the same length. {\tt Permut} works as expected. If the two lists are equal, it concludes. Otherwise, if the lists have identical first elements, it applies {\tt Permut} on the tail of the lists. Finally, if the lists have different first elements, it puts the first element of one of the lists (here the second one which appears in the {\tt permut} predicate) at the end if that is possible, i.e., if the new first element has been at this place previously. To verify that all rotations have been done for a list, we use the length of the list as an argument for {\tt Permut} and this length is decremented for each rotation down to, but not including, 1 because for a list of length $n$, we can make exactly $n-1$ rotations to generate at most $n$ distinct lists. Here, it must be noticed that we use the natural numbers of {\Coq} for the rotation counter. On Figure~\ref{ltac}, we can see that it is possible to use usual natural numbers but they are only used as arguments for primitive tactics and they cannot be handled, in particular, we cannot make computations with them. So, a natural choice is to use {\Coq} data structures so that {\Coq} makes the computations (reductions) by {\tt eval compute in} and we can get the terms back by {\tt match}. With {\tt PermutProve}, we can now prove lemmas as % shown on Figure~\ref{permutlem}. follows: %\begin{figure} %\begin{centerframe} \begin{coq_example*} Lemma permut_ex1 : permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil). Proof. PermutProve. Qed. Lemma permut_ex2 : permut nat (0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil) (0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil). Proof. PermutProve. Qed. \end{coq_example*} %\end{centerframe} %\caption{Examples of {\tt PermutProve} use} %\label{permutlem} %\end{figure} \subsection{Deciding intuitionistic propositional logic} \begin{figure}[b] \begin{centerframe} \begin{coq_example} Ltac Axioms := match goal with | |- True => trivial | _:False |- _ => elimtype False; assumption | _:?A |- ?A => auto end. \end{coq_example} \end{centerframe} \caption{Deciding intuitionistic propositions (1)} \label{tautoltaca} \end{figure} \begin{figure} \begin{centerframe} \begin{coq_example} Ltac DSimplif := repeat (intros; match goal with | id:(~ _) |- _ => red in id | id:(_ /\ _) |- _ => elim id; do 2 intro; clear id | id:(_ \/ _) |- _ => elim id; intro; clear id | id:(?A /\ ?B -> ?C) |- _ => cut (A -> B -> C); [ intro | intros; apply id; split; assumption ] | id:(?A \/ ?B -> ?C) |- _ => cut (B -> C); [ cut (A -> C); [ intros; clear id | intro; apply id; left; assumption ] | intro; apply id; right; assumption ] | id0:(?A -> ?B),id1:?A |- _ => cut B; [ intro; clear id0 | apply id0; assumption ] | |- (_ /\ _) => split | |- (~ _) => red end). Ltac TautoProp := DSimplif; Axioms || match goal with | id:((?A -> ?B) -> ?C) |- _ => cut (B -> C); [ intro; cut (A -> B); [ intro; cut C; [ intro; clear id | apply id; assumption ] | clear id ] | intro; apply id; intro; assumption ]; TautoProp | id:(~ ?A -> ?B) |- _ => cut (False -> B); [ intro; cut (A -> False); [ intro; cut B; [ intro; clear id | apply id; assumption ] | clear id ] | intro; apply id; red; intro; assumption ]; TautoProp | |- (_ \/ _) => (left; TautoProp) || (right; TautoProp) end. \end{coq_example} \end{centerframe} \caption{Deciding intuitionistic propositions (2)} \label{tautoltacb} \end{figure} The pattern matching on goals allows a complete and so a powerful backtracking when returning tactic values. An interesting application is the problem of deciding intuitionistic propositional logic. Considering the contraction-free sequent calculi {\tt LJT*} of Roy~Dyckhoff (\cite{Dyc92}), it is quite natural to code such a tactic using the tactic language as shown on Figures~\ref{tautoltaca} and~\ref{tautoltacb}. The tactic {\tt Axioms} tries to conclude using usual axioms. The tactic {\tt DSimplif} applies all the reversible rules of Dyckhoff's system. Finally, the tactic {\tt TautoProp} (the main tactic to be called) simplifies with {\tt DSimplif}, tries to conclude with {\tt Axioms} and tries several paths using the backtracking rules (one of the four Dyckhoff's rules for the left implication to get rid of the contraction and the right or). For example, with {\tt TautoProp}, we can prove tautologies like those: % on Figure~\ref{tautolem}. %\begin{figure}[tbp] %\begin{centerframe} \begin{coq_example*} Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B. Proof. TautoProp. Qed. Lemma tauto_ex2 : forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B. Proof. TautoProp. Qed. \end{coq_example*} %\end{centerframe} %\caption{Proofs of tautologies with {\tt TautoProp}} %\label{tautolem} %\end{figure} \subsection{Deciding type isomorphisms} A more tricky problem is to decide equalities between types and modulo isomorphisms. Here, we choose to use the isomorphisms of the simply typed $\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, \cite{RC95}). The axioms of this $\lb{}$-calculus are given by table~\ref{isosax}. \begin{figure} \begin{centerframe} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Open Scope type_scope. Section Iso_axioms. Variables A B C : Set. Axiom Com : A * B = B * A. Axiom Ass : A * (B * C) = A * B * C. Axiom Cur : (A * B -> C) = (A -> B -> C). Axiom Dis : (A -> B * C) = (A -> B) * (A -> C). Axiom P_unit : A * unit = A. Axiom AR_unit : (A -> unit) = unit. Axiom AL_unit : (unit -> A) = A. Lemma Cons : B = C -> A * B = A * C. Proof. intro Heq; rewrite Heq; reflexivity. Qed. End Iso_axioms. \end{coq_example*} \end{centerframe} \caption{Type isomorphism axioms} \label{isosax} \end{figure} A more tricky problem is to decide equalities between types and modulo isomorphisms. Here, we choose to use the isomorphisms of the simply typed $\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, \cite{RC95}). The axioms of this $\lb{}$-calculus are given on Figure~\ref{isosax}. \begin{figure}[ht] \begin{centerframe} \begin{coq_example} Ltac DSimplif trm := match trm with | (?A * ?B * ?C) => rewrite <- (Ass A B C); try MainSimplif | (?A * ?B -> ?C) => rewrite (Cur A B C); try MainSimplif | (?A -> ?B * ?C) => rewrite (Dis A B C); try MainSimplif | (?A * unit) => rewrite (P_unit A); try MainSimplif | (unit * ?B) => rewrite (Com unit B); try MainSimplif | (?A -> unit) => rewrite (AR_unit A); try MainSimplif | (unit -> ?B) => rewrite (AL_unit B); try MainSimplif | (?A * ?B) => (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) | (?A -> ?B) => (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) end with MainSimplif := match goal with | |- (?A = ?B) => try DSimplif A; try DSimplif B end. Ltac Length trm := match trm with | (_ * ?B) => let succ := Length B in constr:(S succ) | _ => constr:1 end. Ltac assoc := repeat rewrite <- Ass. \end{coq_example} \end{centerframe} \caption{Type isomorphism tactic (1)} \label{isosltac1} \end{figure} \begin{figure}[ht] \begin{centerframe} \begin{coq_example} Ltac DoCompare n := match goal with | [ |- (?A = ?A) ] => reflexivity | [ |- (?A * ?B = ?A * ?C) ] => apply Cons; let newn := Length B in DoCompare newn | [ |- (?A * ?B = ?C) ] => match eval compute in n with | 1 => fail | _ => pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n) end end. Ltac CompareStruct := match goal with | [ |- (?A = ?B) ] => let l1 := Length A with l2 := Length B in match eval compute in (l1 = l2) with | (?n = ?n) => DoCompare n end end. Ltac IsoProve := MainSimplif; CompareStruct. \end{coq_example} \end{centerframe} \caption{Type isomorphism tactic (2)} \label{isosltac2} \end{figure} The tactic to judge equalities modulo this axiomatization can be written as shown on Figures~\ref{isosltac1} and~\ref{isosltac2}. The algorithm is quite simple. Types are reduced using axioms that can be oriented (this done by {\tt MainSimplif}). The normal forms are sequences of Cartesian products without Cartesian product in the left component. These normal forms are then compared modulo permutation of the components (this is done by {\tt CompareStruct}). The main tactic to be called and realizing this algorithm is {\tt IsoProve}. % Figure~\ref{isoslem} gives Here are examples of what can be solved by {\tt IsoProve}. %\begin{figure}[ht] %\begin{centerframe} \begin{coq_example*} Lemma isos_ex1 : forall A B:Set, A * unit * B = B * (unit * A). Proof. intros; IsoProve. Qed. Lemma isos_ex2 : forall A B C:Set, (A * unit -> B * (C * unit)) = (A * unit -> (C -> unit) * C) * (unit -> A -> B). Proof. intros; IsoProve. Qed. \end{coq_example*} %\end{centerframe} %\caption{Type equalities solved by {\tt IsoProve}} %\label{isoslem} %\end{figure} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-lib.tex0000640000175000001440000007716711776416511016335 0ustar notinusers\chapter[The {\Coq} library]{The {\Coq} library\index{Theories}\label{Theories}} The \Coq\ library is structured into two parts: \begin{description} \item[The initial library:] it contains elementary logical notions and data-types. It constitutes the basic state of the system directly available when running \Coq; \item[The standard library:] general-purpose libraries containing various developments of \Coq\ axiomatizations about sets, lists, sorting, arithmetic, etc. This library comes with the system and its modules are directly accessible through the \verb!Require! command (see Section~\ref{Require}); \end{description} In addition, user-provided libraries or developments are provided by \Coq\ users' community. These libraries and developments are available for download at \texttt{http://coq.inria.fr} (see Section~\ref{Contributions}). The chapter briefly reviews the \Coq\ libraries. \section[The basic library]{The basic library\label{Prelude}} This section lists the basic notions and results which are directly available in the standard \Coq\ system\footnote{Most of these constructions are defined in the {\tt Prelude} module in directory {\tt theories/Init} at the {\Coq} root directory; this includes the modules {\tt Notations}, {\tt Logic}, {\tt Datatypes}, {\tt Specif}, {\tt Peano}, {\tt Wf} and {\tt Tactics}. Module {\tt Logic\_Type} also makes it in the initial state}. \subsection[Notations]{Notations\label{Notations}} This module defines the parsing and pretty-printing of many symbols (infixes, prefixes, etc.). However, it does not assign a meaning to these notations. The purpose of this is to define and fix once for all the precedence and associativity of very common notations. The main notations fixed in the initial state are listed on Figure~\ref{init-notations}. \begin{figure} \begin{center} \begin{tabular}{|cll|} \hline Notation & Precedence & Associativity \\ \hline \verb!_ <-> _! & 95 & no \\ \verb!_ \/ _! & 85 & right \\ \verb!_ /\ _! & 80 & right \\ \verb!~ _! & 75 & right \\ \verb!_ = _! & 70 & no \\ \verb!_ = _ = _! & 70 & no \\ \verb!_ = _ :> _! & 70 & no \\ \verb!_ <> _! & 70 & no \\ \verb!_ <> _ :> _! & 70 & no \\ \verb!_ < _! & 70 & no \\ \verb!_ > _! & 70 & no \\ \verb!_ <= _! & 70 & no \\ \verb!_ >= _! & 70 & no \\ \verb!_ < _ < _! & 70 & no \\ \verb!_ < _ <= _! & 70 & no \\ \verb!_ <= _ < _! & 70 & no \\ \verb!_ <= _ <= _! & 70 & no \\ \verb!_ + _! & 50 & left \\ \verb!_ || _! & 50 & left \\ \verb!_ - _! & 50 & left \\ \verb!_ * _! & 40 & left \\ \verb!_ && _! & 40 & left \\ \verb!_ / _! & 40 & left \\ \verb!- _! & 35 & right \\ \verb!/ _! & 35 & right \\ \verb!_ ^ _! & 30 & right \\ \hline \end{tabular} \end{center} \caption{Notations in the initial state} \label{init-notations} \end{figure} \subsection[Logic]{Logic\label{Logic}} \begin{figure} \begin{centerframe} \begin{tabular}{lclr} {\form} & ::= & {\tt True} & ({\tt True})\\ & $|$ & {\tt False} & ({\tt False})\\ & $|$ & {\tt\char'176} {\form} & ({\tt not})\\ & $|$ & {\form} {\tt /$\backslash$} {\form} & ({\tt and})\\ & $|$ & {\form} {\tt $\backslash$/} {\form} & ({\tt or})\\ & $|$ & {\form} {\tt ->} {\form} & (\em{primitive implication})\\ & $|$ & {\form} {\tt <->} {\form} & ({\tt iff})\\ & $|$ & {\tt forall} {\ident} {\tt :} {\type} {\tt ,} {\form} & (\em{primitive for all})\\ & $|$ & {\tt exists} {\ident} \zeroone{{\tt :} {\specif}} {\tt ,} {\form} & ({\tt ex})\\ & $|$ & {\tt exists2} {\ident} \zeroone{{\tt :} {\specif}} {\tt ,} {\form} {\tt \&} {\form} & ({\tt ex2})\\ & $|$ & {\term} {\tt =} {\term} & ({\tt eq})\\ & $|$ & {\term} {\tt =} {\term} {\tt :>} {\specif} & ({\tt eq}) \end{tabular} \end{centerframe} \caption{Syntax of formulas} \label{formulas-syntax} \end{figure} The basic library of {\Coq} comes with the definitions of standard (intuitionistic) logical connectives (they are defined as inductive constructions). They are equipped with an appealing syntax enriching the (subclass {\form}) of the syntactic class {\term}. The syntax extension is shown on Figure~\ref{formulas-syntax}. % The basic library of {\Coq} comes with the definitions of standard % (intuitionistic) logical connectives (they are defined as inductive % constructions). They are equipped with an appealing syntax enriching % the (subclass {\form}) of the syntactic class {\term}. The syntax % extension \footnote{This syntax is defined in module {\tt % LogicSyntax}} is shown on Figure~\ref{formulas-syntax}. \Rem Implication is not defined but primitive (it is a non-dependent product of a proposition over another proposition). There is also a primitive universal quantification (it is a dependent product over a proposition). The primitive universal quantification allows both first-order and higher-order quantification. \subsubsection[Propositional Connectives]{Propositional Connectives\label{Connectives} \index{Connectives}} First, we find propositional calculus connectives: \ttindex{True} \ttindex{I} \ttindex{False} \ttindex{not} \ttindex{and} \ttindex{conj} \ttindex{proj1} \ttindex{proj2} \begin{coq_eval} Set Printing Depth 50. \end{coq_eval} \begin{coq_example*} Inductive True : Prop := I. Inductive False : Prop := . Definition not (A: Prop) := A -> False. Inductive and (A B:Prop) : Prop := conj (_:A) (_:B). Section Projections. Variables A B : Prop. Theorem proj1 : A /\ B -> A. Theorem proj2 : A /\ B -> B. End Projections. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \ttindex{or} \ttindex{or\_introl} \ttindex{or\_intror} \ttindex{iff} \ttindex{IF\_then\_else} \begin{coq_example*} Inductive or (A B:Prop) : Prop := | or_introl (_:A) | or_intror (_:B). Definition iff (P Q:Prop) := (P -> Q) /\ (Q -> P). Definition IF_then_else (P Q R:Prop) := P /\ Q \/ ~ P /\ R. \end{coq_example*} \subsubsection[Quantifiers]{Quantifiers\label{Quantifiers} \index{Quantifiers}} Then we find first-order quantifiers: \ttindex{all} \ttindex{ex} \ttindex{exists} \ttindex{ex\_intro} \ttindex{ex2} \ttindex{exists2} \ttindex{ex\_intro2} \begin{coq_example*} Definition all (A:Set) (P:A -> Prop) := forall x:A, P x. Inductive ex (A: Set) (P:A -> Prop) : Prop := ex_intro (x:A) (_:P x). Inductive ex2 (A:Set) (P Q:A -> Prop) : Prop := ex_intro2 (x:A) (_:P x) (_:Q x). \end{coq_example*} The following abbreviations are allowed: \begin{center} \begin{tabular}[h]{|l|l|} \hline \verb+exists x:A, P+ & \verb+ex A (fun x:A => P)+ \\ \verb+exists x, P+ & \verb+ex _ (fun x => P)+ \\ \verb+exists2 x:A, P & Q+ & \verb+ex2 A (fun x:A => P) (fun x:A => Q)+ \\ \verb+exists2 x, P & Q+ & \verb+ex2 _ (fun x => P) (fun x => Q)+ \\ \hline \end{tabular} \end{center} The type annotation ``\texttt{:A}'' can be omitted when \texttt{A} can be synthesized by the system. \subsubsection[Equality]{Equality\label{Equality} \index{Equality}} Then, we find equality, defined as an inductive relation. That is, given a type \verb:A: and an \verb:x: of type \verb:A:, the predicate \verb:(eq A x): is the smallest one which contains \verb:x:. This definition, due to Christine Paulin-Mohring, is equivalent to define \verb:eq: as the smallest reflexive relation, and it is also equivalent to Leibniz' equality. \ttindex{eq} \ttindex{eq\_refl} \begin{coq_example*} Inductive eq (A:Type) (x:A) : A -> Prop := eq_refl : eq A x x. \end{coq_example*} \subsubsection[Lemmas]{Lemmas\label{PreludeLemmas}} Finally, a few easy lemmas are provided. \ttindex{absurd} \begin{coq_example*} Theorem absurd : forall A C:Prop, A -> ~ A -> C. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} \ttindex{eq\_sym} \ttindex{eq\_trans} \ttindex{f\_equal} \ttindex{sym\_not\_eq} \begin{coq_example*} Section equality. Variables A B : Type. Variable f : A -> B. Variables x y z : A. Theorem eq_sym : x = y -> y = x. Theorem eq_trans : x = y -> y = z -> x = z. Theorem f_equal : x = y -> f x = f y. Theorem not_eq_sym : x <> y -> y <> x. \end{coq_example*} \begin{coq_eval} Abort. Abort. Abort. Abort. \end{coq_eval} \ttindex{eq\_ind\_r} \ttindex{eq\_rec\_r} \ttindex{eq\_rect} \ttindex{eq\_rect\_r} %Definition eq_rect: (A:Set)(x:A)(P:A->Type)(P x)->(y:A)(x=y)->(P y). \begin{coq_example*} End equality. Definition eq_ind_r : forall (A:Type) (x:A) (P:A->Prop), P x -> forall y:A, y = x -> P y. Definition eq_rec_r : forall (A:Type) (x:A) (P:A->Set), P x -> forall y:A, y = x -> P y. Definition eq_rect_r : forall (A:Type) (x:A) (P:A->Type), P x -> forall y:A, y = x -> P y. \end{coq_example*} \begin{coq_eval} Abort. Abort. Abort. \end{coq_eval} %Abort (for now predefined eq_rect) \begin{coq_example*} Hint Immediate eq_sym not_eq_sym : core. \end{coq_example*} \ttindex{f\_equal$i$} The theorem {\tt f\_equal} is extended to functions with two to five arguments. The theorem are names {\tt f\_equal2}, {\tt f\_equal3}, {\tt f\_equal4} and {\tt f\_equal5}. For instance {\tt f\_equal3} is defined the following way. \begin{coq_example*} Theorem f_equal3 : forall (A1 A2 A3 B:Type) (f:A1 -> A2 -> A3 -> B) (x1 y1:A1) (x2 y2:A2) (x3 y3:A3), x1 = y1 -> x2 = y2 -> x3 = y3 -> f x1 x2 x3 = f y1 y2 y3. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} \subsection[Datatypes]{Datatypes\label{Datatypes} \index{Datatypes}} \begin{figure} \begin{centerframe} \begin{tabular}{rclr} {\specif} & ::= & {\specif} {\tt *} {\specif} & ({\tt prod})\\ & $|$ & {\specif} {\tt +} {\specif} & ({\tt sum})\\ & $|$ & {\specif} {\tt + \{} {\specif} {\tt \}} & ({\tt sumor})\\ & $|$ & {\tt \{} {\specif} {\tt \} + \{} {\specif} {\tt \}} & ({\tt sumbool})\\ & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \}} & ({\tt sig})\\ & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \&} {\form} {\tt \}} & ({\tt sig2})\\ & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt \}} & ({\tt sigT})\\ & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt \&} {\specif} {\tt \}} & ({\tt sigT2})\\ & & & \\ {\term} & ::= & {\tt (} {\term} {\tt ,} {\term} {\tt )} & ({\tt pair}) \end{tabular} \end{centerframe} \caption{Syntax of data-types and specifications} \label{specif-syntax} \end{figure} In the basic library, we find the definition\footnote{They are in {\tt Datatypes.v}} of the basic data-types of programming, again defined as inductive constructions over the sort \verb:Set:. Some of them come with a special syntax shown on Figure~\ref{specif-syntax}. \subsubsection[Programming]{Programming\label{Programming} \index{Programming} \label{libnats} \ttindex{unit} \ttindex{tt} \ttindex{bool} \ttindex{true} \ttindex{false} \ttindex{nat} \ttindex{O} \ttindex{S} \ttindex{option} \ttindex{Some} \ttindex{None} \ttindex{identity} \ttindex{refl\_identity}} \begin{coq_example*} Inductive unit : Set := tt. Inductive bool : Set := true | false. Inductive nat : Set := O | S (n:nat). Inductive option (A:Set) : Set := Some (_:A) | None. Inductive identity (A:Type) (a:A) : A -> Type := refl_identity : identity A a a. \end{coq_example*} Note that zero is the letter \verb:O:, and {\sl not} the numeral \verb:0:. The predicate {\tt identity} is logically equivalent to equality but it lives in sort {\tt Type}. It is mainly maintained for compatibility. We then define the disjoint sum of \verb:A+B: of two sets \verb:A: and \verb:B:, and their product \verb:A*B:. \ttindex{sum} \ttindex{A+B} \ttindex{+} \ttindex{inl} \ttindex{inr} \ttindex{prod} \ttindex{A*B} \ttindex{*} \ttindex{pair} \ttindex{fst} \ttindex{snd} \begin{coq_example*} Inductive sum (A B:Set) : Set := inl (_:A) | inr (_:B). Inductive prod (A B:Set) : Set := pair (_:A) (_:B). Section projections. Variables A B : Set. Definition fst (H: prod A B) := match H with | pair x y => x end. Definition snd (H: prod A B) := match H with | pair x y => y end. End projections. \end{coq_example*} Some operations on {\tt bool} are also provided: {\tt andb} (with infix notation {\tt \&\&}), {\tt orb} (with infix notation {\tt ||}), {\tt xorb}, {\tt implb} and {\tt negb}. \subsection{Specification} The following notions\footnote{They are defined in module {\tt Specif.v}} allow to build new data-types and specifications. They are available with the syntax shown on Figure~\ref{specif-syntax}. For instance, given \verb|A:Type| and \verb|P:A->Prop|, the construct \verb+{x:A | P x}+ (in abstract syntax \verb+(sig A P)+) is a \verb:Type:. We may build elements of this set as \verb:(exist x p): whenever we have a witness \verb|x:A| with its justification \verb|p:P x|. From such a \verb:(exist x p): we may in turn extract its witness \verb|x:A| (using an elimination construct such as \verb:match:) but {\sl not} its justification, which stays hidden, like in an abstract data-type. In technical terms, one says that \verb:sig: is a ``weak (dependent) sum''. A variant \verb:sig2: with two predicates is also provided. \ttindex{\{x:A $\mid$ (P x)\}} \ttindex{sig} \ttindex{exist} \ttindex{sig2} \ttindex{exist2} \begin{coq_example*} Inductive sig (A:Set) (P:A -> Prop) : Set := exist (x:A) (_:P x). Inductive sig2 (A:Set) (P Q:A -> Prop) : Set := exist2 (x:A) (_:P x) (_:Q x). \end{coq_example*} A ``strong (dependent) sum'' \verb+{x:A & P x}+ may be also defined, when the predicate \verb:P: is now defined as a constructor of types in \verb:Type:. \ttindex{\{x:A \& (P x)\}} \ttindex{\&} \ttindex{sigT} \ttindex{existT} \ttindex{projT1} \ttindex{projT2} \ttindex{sigT2} \ttindex{existT2} \begin{coq_example*} Inductive sigT (A:Type) (P:A -> Type) : Type := existT (x:A) (_:P x). Section Projections. Variable A : Type. Variable P : A -> Type. Definition projT1 (H:sigT A P) := let (x, h) := H in x. Definition projT2 (H:sigT A P) := match H return P (projT1 H) with existT x h => h end. End Projections. Inductive sigT2 (A: Type) (P Q:A -> Type) : Type := existT2 (x:A) (_:P x) (_:Q x). \end{coq_example*} A related non-dependent construct is the constructive sum \verb"{A}+{B}" of two propositions \verb:A: and \verb:B:. \label{sumbool} \ttindex{sumbool} \ttindex{left} \ttindex{right} \ttindex{\{A\}+\{B\}} \begin{coq_example*} Inductive sumbool (A B:Prop) : Set := left (_:A) | right (_:B). \end{coq_example*} This \verb"sumbool" construct may be used as a kind of indexed boolean data-type. An intermediate between \verb"sumbool" and \verb"sum" is the mixed \verb"sumor" which combines \verb"A:Set" and \verb"B:Prop" in the \verb"Set" \verb"A+{B}". \ttindex{sumor} \ttindex{inleft} \ttindex{inright} \ttindex{A+\{B\}} \begin{coq_example*} Inductive sumor (A:Set) (B:Prop) : Set := | inleft (_:A) | inright (_:B). \end{coq_example*} We may define variants of the axiom of choice, like in Martin-Lf's Intuitionistic Type Theory. \ttindex{Choice} \ttindex{Choice2} \ttindex{bool\_choice} \begin{coq_example*} Lemma Choice : forall (S S':Set) (R:S -> S' -> Prop), (forall x:S, {y : S' | R x y}) -> {f : S -> S' | forall z:S, R z (f z)}. Lemma Choice2 : forall (S S':Set) (R:S -> S' -> Set), (forall x:S, {y : S' & R x y}) -> {f : S -> S' & forall z:S, R z (f z)}. Lemma bool_choice : forall (S:Set) (R1 R2:S -> Prop), (forall x:S, {R1 x} + {R2 x}) -> {f : S -> bool | forall x:S, f x = true /\ R1 x \/ f x = false /\ R2 x}. \end{coq_example*} \begin{coq_eval} Abort. Abort. Abort. \end{coq_eval} The next construct builds a sum between a data-type \verb|A:Type| and an exceptional value encoding errors: \ttindex{Exc} \ttindex{value} \ttindex{error} \begin{coq_example*} Definition Exc := option. Definition value := Some. Definition error := None. \end{coq_example*} This module ends with theorems, relating the sorts \verb:Set: or \verb:Type: and \verb:Prop: in a way which is consistent with the realizability interpretation. \ttindex{False\_rect} \ttindex{False\_rec} \ttindex{eq\_rect} \ttindex{absurd\_set} \ttindex{and\_rect} \begin{coq_example*} Definition except := False_rec. Theorem absurd_set : forall (A:Prop) (C:Set), A -> ~ A -> C. Theorem and_rect : forall (A B:Prop) (P:Type), (A -> B -> P) -> A /\ B -> P. \end{coq_example*} %\begin{coq_eval} %Abort. %Abort. %\end{coq_eval} \subsection{Basic Arithmetics} The basic library includes a few elementary properties of natural numbers, together with the definitions of predecessor, addition and multiplication\footnote{This is in module {\tt Peano.v}}. It also provides a scope {\tt nat\_scope} gathering standard notations for common operations (+, *) and a decimal notation for numbers. That is he can write \texttt{3} for \texttt{(S (S (S O)))}. This also works on the left hand side of a \texttt{match} expression (see for example section~\ref{refine-example}). This scope is opened by default. %Remove the redefinition of nat \begin{coq_eval} Reset Initial. \end{coq_eval} The following example is not part of the standard library, but it shows the usage of the notations: \begin{coq_example*} Fixpoint even (n:nat) : bool := match n with | 0 => true | 1 => false | S (S n) => even n end. \end{coq_example*} \ttindex{eq\_S} \ttindex{pred} \ttindex{pred\_Sn} \ttindex{eq\_add\_S} \ttindex{not\_eq\_S} \ttindex{IsSucc} \ttindex{O\_S} \ttindex{n\_Sn} \ttindex{plus} \ttindex{plus\_n\_O} \ttindex{plus\_n\_Sm} \ttindex{mult} \ttindex{mult\_n\_O} \ttindex{mult\_n\_Sm} \begin{coq_example*} Theorem eq_S : forall x y:nat, x = y -> S x = S y. \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example*} Definition pred (n:nat) : nat := match n with | 0 => 0 | S u => u end. Theorem pred_Sn : forall m:nat, m = pred (S m). Theorem eq_add_S : forall n m:nat, S n = S m -> n = m. Hint Immediate eq_add_S : core. Theorem not_eq_S : forall n m:nat, n <> m -> S n <> S m. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \begin{coq_example*} Definition IsSucc (n:nat) : Prop := match n with | 0 => False | S p => True end. Theorem O_S : forall n:nat, 0 <> S n. Theorem n_Sn : forall n:nat, n <> S n. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \begin{coq_example*} Fixpoint plus (n m:nat) {struct n} : nat := match n with | 0 => m | S p => S (p + m) end. where "n + m" := (plus n m) : nat_scope. Lemma plus_n_O : forall n:nat, n = n + 0. Lemma plus_n_Sm : forall n m:nat, S (n + m) = n + S m. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \begin{coq_example*} Fixpoint mult (n m:nat) {struct n} : nat := match n with | 0 => 0 | S p => m + p * m end. where "n * m" := (mult n m) : nat_scope. Lemma mult_n_O : forall n:nat, 0 = n * 0. Lemma mult_n_Sm : forall n m:nat, n * m + n = n * (S m). \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} Finally, it gives the definition of the usual orderings \verb:le:, \verb:lt:, \verb:ge:, and \verb:gt:. \ttindex{le} \ttindex{le\_n} \ttindex{le\_S} \ttindex{lt} \ttindex{ge} \ttindex{gt} \begin{coq_example*} Inductive le (n:nat) : nat -> Prop := | le_n : le n n | le_S : forall m:nat, n <= m -> n <= (S m). where "n <= m" := (le n m) : nat_scope. Definition lt (n m:nat) := S n <= m. Definition ge (n m:nat) := m <= n. Definition gt (n m:nat) := m < n. \end{coq_example*} Properties of these relations are not initially known, but may be required by the user from modules \verb:Le: and \verb:Lt:. Finally, \verb:Peano: gives some lemmas allowing pattern-matching, and a double induction principle. \ttindex{nat\_case} \ttindex{nat\_double\_ind} \begin{coq_example*} Theorem nat_case : forall (n:nat) (P:nat -> Prop), P 0 -> (forall m:nat, P (S m)) -> P n. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \begin{coq_example*} Theorem nat_double_ind : forall R:nat -> nat -> Prop, (forall n:nat, R 0 n) -> (forall n:nat, R (S n) 0) -> (forall n m:nat, R n m -> R (S n) (S m)) -> forall n m:nat, R n m. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \subsection{Well-founded recursion} The basic library contains the basics of well-founded recursion and well-founded induction\footnote{This is defined in module {\tt Wf.v}}. \index{Well foundedness} \index{Recursion} \index{Well founded induction} \ttindex{Acc} \ttindex{Acc\_inv} \ttindex{Acc\_rect} \ttindex{well\_founded} \begin{coq_example*} Section Well_founded. Variable A : Type. Variable R : A -> A -> Prop. Inductive Acc (x:A) : Prop := Acc_intro : (forall y:A, R y x -> Acc y) -> Acc x. Lemma Acc_inv : Acc x -> forall y:A, R y x -> Acc y. \end{coq_example*} \begin{coq_eval} destruct 1; trivial. Defined. \end{coq_eval} %% Acc_rect now primitively defined %% Section AccRec. %% Variable P : A -> Set. %% Variable F : %% forall x:A, %% (forall y:A, R y x -> Acc y) -> (forall y:A, R y x -> P y) -> P x. %% Fixpoint Acc_rec (x:A) (a:Acc x) {struct a} : P x := %% F x (Acc_inv x a) %% (fun (y:A) (h:R y x) => Acc_rec y (Acc_inv x a y h)). %% End AccRec. \begin{coq_example*} Definition well_founded := forall a:A, Acc a. Hypothesis Rwf : well_founded. Theorem well_founded_induction : forall P:A -> Set, (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. Theorem well_founded_ind : forall P:A -> Prop, (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} The automatically generated scheme {\tt Acc\_rect} can be used to define functions by fixpoints using well-founded relations to justify termination. Assuming extensionality of the functional used for the recursive call, the fixpoint equation can be proved. \ttindex{Fix\_F} \ttindex{fix\_eq} \ttindex{Fix\_F\_inv} \ttindex{Fix\_F\_eq} \begin{coq_example*} Section FixPoint. Variable P : A -> Type. Variable F : forall x:A, (forall y:A, R y x -> P y) -> P x. Fixpoint Fix_F (x:A) (r:Acc x) {struct r} : P x := F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)). Definition Fix (x:A) := Fix_F x (Rwf x). Hypothesis F_ext : forall (x:A) (f g:forall y:A, R y x -> P y), (forall (y:A) (p:R y x), f y p = g y p) -> F x f = F x g. Lemma Fix_F_eq : forall (x:A) (r:Acc x), F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)) = Fix_F x r. Lemma Fix_F_inv : forall (x:A) (r s:Acc x), Fix_F x r = Fix_F x s. Lemma fix_eq : forall x:A, Fix x = F x (fun (y:A) (p:R y x) => Fix y). \end{coq_example*} \begin{coq_eval} Abort All. \end{coq_eval} \begin{coq_example*} End FixPoint. End Well_founded. \end{coq_example*} \subsection{Accessing the {\Type} level} The basic library includes the definitions\footnote{This is in module {\tt Logic\_Type.v}} of the counterparts of some data-types and logical quantifiers at the \verb:Type: level: negation, pair, and properties of {\tt identity}. \ttindex{notT} \ttindex{prodT} \ttindex{pairT} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Definition notT (A:Type) := A -> False. Inductive prodT (A B:Type) : Type := pairT (_:A) (_:B). \end{coq_example*} At the end, it defines data-types at the {\Type} level. \subsection{Tactics} A few tactics defined at the user level are provided in the initial state\footnote{This is in module {\tt Tactics.v}}. \section{The standard library} \subsection{Survey} The rest of the standard library is structured into the following subdirectories: \begin{tabular}{lp{12cm}} {\bf Logic} & Classical logic and dependent equality \\ {\bf Arith} & Basic Peano arithmetic \\ {\bf PArith} & Basic positive integer arithmetic \\ {\bf NArith} & Basic binary natural number arithmetic \\ {\bf ZArith} & Basic relative integer arithmetic \\ {\bf Numbers} & Various approaches to natural, integer and cyclic numbers (currently axiomatically and on top of 2$^{31}$ binary words) \\ {\bf Bool} & Booleans (basic functions and results) \\ {\bf Lists} & Monomorphic and polymorphic lists (basic functions and results), Streams (infinite sequences defined with co-inductive types) \\ {\bf Sets} & Sets (classical, constructive, finite, infinite, power set, etc.) \\ {\bf FSets} & Specification and implementations of finite sets and finite maps (by lists and by AVL trees)\\ {\bf Reals} & Axiomatization of real numbers (classical, basic functions, integer part, fractional part, limit, derivative, Cauchy series, power series and results,...)\\ {\bf Relations} & Relations (definitions and basic results) \\ {\bf Sorting} & Sorted list (basic definitions and heapsort correctness) \\ {\bf Strings} & 8-bits characters and strings\\ {\bf Wellfounded} & Well-founded relations (basic results) \\ \end{tabular} \medskip These directories belong to the initial load path of the system, and the modules they provide are compiled at installation time. So they are directly accessible with the command \verb!Require! (see Chapter~\ref{Other-commands}). The different modules of the \Coq\ standard library are described in the additional document \verb!Library.dvi!. They are also accessible on the WWW through the \Coq\ homepage \footnote{\texttt{http://coq.inria.fr}}. \subsection[Notations for integer arithmetics]{Notations for integer arithmetics\index{Arithmetical notations}} On Figure~\ref{zarith-syntax} is described the syntax of expressions for integer arithmetics. It is provided by requiring and opening the module {\tt ZArith} and opening scope {\tt Z\_scope}. \ttindex{+} \ttindex{*} \ttindex{-} \ttindex{/} \ttindex{<=} \ttindex{>=} \ttindex{<} \ttindex{>} \ttindex{?=} \ttindex{mod} \begin{figure} \begin{center} \begin{tabular}{l|l|l|l} Notation & Interpretation & Precedence & Associativity\\ \hline \verb!_ < _! & {\tt Z.lt} &&\\ \verb!x <= y! & {\tt Z.le} &&\\ \verb!_ > _! & {\tt Z.gt} &&\\ \verb!x >= y! & {\tt Z.ge} &&\\ \verb!x < y < z! & {\tt x < y \verb!/\! y < z} &&\\ \verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} &&\\ \verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} &&\\ \verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} &&\\ \verb!_ ?= _! & {\tt Z.compare} & 70 & no\\ \verb!_ + _! & {\tt Z.add} &&\\ \verb!_ - _! & {\tt Z.sub} &&\\ \verb!_ * _! & {\tt Z.mul} &&\\ \verb!_ / _! & {\tt Z.div} &&\\ \verb!_ mod _! & {\tt Z.modulo} & 40 & no \\ \verb!- _! & {\tt Z.opp} &&\\ \verb!_ ^ _! & {\tt Z.pow} &&\\ \end{tabular} \end{center} \caption{Definition of the scope for integer arithmetics ({\tt Z\_scope})} \label{zarith-syntax} \end{figure} Figure~\ref{zarith-syntax} shows the notations provided by {\tt Z\_scope}. It specifies how notations are interpreted and, when not already reserved, the precedence and associativity. \begin{coq_example} Require Import ZArith. Check (2 + 3)%Z. Open Scope Z_scope. Check 2 + 3. \end{coq_example} \subsection[Peano's arithmetic (\texttt{nat})]{Peano's arithmetic (\texttt{nat})\index{Peano's arithmetic} \ttindex{nat\_scope}} While in the initial state, many operations and predicates of Peano's arithmetic are defined, further operations and results belong to other modules. For instance, the decidability of the basic predicates are defined here. This is provided by requiring the module {\tt Arith}. Figure~\ref{nat-syntax} describes notation available in scope {\tt nat\_scope}. \begin{figure} \begin{center} \begin{tabular}{l|l} Notation & Interpretation \\ \hline \verb!_ < _! & {\tt lt} \\ \verb!x <= y! & {\tt le} \\ \verb!_ > _! & {\tt gt} \\ \verb!x >= y! & {\tt ge} \\ \verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ \verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ \verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ \verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ \verb!_ + _! & {\tt plus} \\ \verb!_ - _! & {\tt minus} \\ \verb!_ * _! & {\tt mult} \\ \end{tabular} \end{center} \caption{Definition of the scope for natural numbers ({\tt nat\_scope})} \label{nat-syntax} \end{figure} \subsection{Real numbers library} \subsubsection[Notations for real numbers]{Notations for real numbers\index{Notations for real numbers}} This is provided by requiring and opening the module {\tt Reals} and opening scope {\tt R\_scope}. This set of notations is very similar to the notation for integer arithmetics. The inverse function was added. \begin{figure} \begin{center} \begin{tabular}{l|l} Notation & Interpretation \\ \hline \verb!_ < _! & {\tt Rlt} \\ \verb!x <= y! & {\tt Rle} \\ \verb!_ > _! & {\tt Rgt} \\ \verb!x >= y! & {\tt Rge} \\ \verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ \verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ \verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ \verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ \verb!_ + _! & {\tt Rplus} \\ \verb!_ - _! & {\tt Rminus} \\ \verb!_ * _! & {\tt Rmult} \\ \verb!_ / _! & {\tt Rdiv} \\ \verb!- _! & {\tt Ropp} \\ \verb!/ _! & {\tt Rinv} \\ \verb!_ ^ _! & {\tt pow} \\ \end{tabular} \end{center} \label{reals-syntax} \caption{Definition of the scope for real arithmetics ({\tt R\_scope})} \end{figure} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Require Import Reals. Check (2 + 3)%R. Open Scope R_scope. Check 2 + 3. \end{coq_example} \subsubsection{Some tactics} In addition to the \verb|ring|, \verb|field| and \verb|fourier| tactics (see Chapter~\ref{Tactics}) there are: \begin{itemize} \item {\tt discrR} \tacindex{discrR} Proves that a real integer constant $c_1$ is different from another real integer constant $c_2$. \begin{coq_example*} Require Import DiscrR. Goal 5 <> 0. \end{coq_example*} \begin{coq_example} discrR. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \item {\tt split\_Rabs} allows to unfold {\tt Rabs} constant and splits corresponding conjunctions. \tacindex{split\_Rabs} \begin{coq_example*} Require Import SplitAbsolu. Goal forall x:R, x <= Rabs x. \end{coq_example*} \begin{coq_example} intro; split_Rabs. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \item {\tt split\_Rmult} allows to split a condition that a product is non null into subgoals corresponding to the condition on each operand of the product. \tacindex{split\_Rmult} \begin{coq_example*} Require Import SplitRmult. Goal forall x y z:R, x * y * z <> 0. \end{coq_example*} \begin{coq_example} intros; split_Rmult. \end{coq_example} \end{itemize} All this tactics has been written with the tactic language Ltac described in Chapter~\ref{TacticLanguage}. \begin{coq_eval} Reset Initial. \end{coq_eval} \subsection[List library]{List library\index{Notations for lists} \ttindex{length} \ttindex{head} \ttindex{tail} \ttindex{app} \ttindex{rev} \ttindex{nth} \ttindex{map} \ttindex{flat\_map} \ttindex{fold\_left} \ttindex{fold\_right}} Some elementary operations on polymorphic lists are defined here. They can be accessed by requiring module {\tt List}. It defines the following notions: \begin{center} \begin{tabular}{l|l} \hline {\tt length} & length \\ {\tt head} & first element (with default) \\ {\tt tail} & all but first element \\ {\tt app} & concatenation \\ {\tt rev} & reverse \\ {\tt nth} & accessing $n$-th element (with default) \\ {\tt map} & applying a function \\ {\tt flat\_map} & applying a function returning lists \\ {\tt fold\_left} & iterator (from head to tail) \\ {\tt fold\_right} & iterator (from tail to head) \\ \hline \end{tabular} \end{center} Table show notations available when opening scope {\tt list\_scope}. \begin{figure} \begin{center} \begin{tabular}{l|l|l|l} Notation & Interpretation & Precedence & Associativity\\ \hline \verb!_ ++ _! & {\tt app} & 60 & right \\ \verb!_ :: _! & {\tt cons} & 60 & right \\ \end{tabular} \end{center} \label{list-syntax} \caption{Definition of the scope for lists ({\tt list\_scope})} \end{figure} \section[Users' contributions]{Users' contributions\index{Contributions} \label{Contributions}} Numerous users' contributions have been collected and are available at URL \url{http://coq.inria.fr/contribs/}. On this web page, you have a list of all contributions with informations (author, institution, quick description, etc.) and the possibility to download them one by one. You will also find informations on how to submit a new contribution. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-tac.tex0000640000175000001440000050734012041462774016323 0ustar notinusers% TODO: unify the use of \form and \type to mean a type % or use \form specifically for a type of type Prop \chapter{Tactics \index{Tactics} \label{Tactics}} A deduction rule is a link between some (unique) formula, that we call the {\em conclusion} and (several) formulas that we call the {\em premises}. A deduction rule can be read in two ways. The first one says: {\it ``if I know this and this then I can deduce this''}. For instance, if I have a proof of $A$ and a proof of $B$ then I have a proof of $A \land B$. This is forward reasoning from premises to conclusion. The other way says: {\it ``to prove this I have to prove this and this''}. For instance, to prove $A \land B$, I have to prove $A$ and I have to prove $B$. This is backward reasoning from conclusion to premises. We say that the conclusion is the {\em goal}\index{goal} to prove and premises are the {\em subgoals}\index{subgoal}. The tactics implement {\em backward reasoning}. When applied to a goal, a tactic replaces this goal with the subgoals it generates. We say that a tactic reduces a goal to its subgoal(s). Each (sub)goal is denoted with a number. The current goal is numbered 1. By default, a tactic is applied to the current goal, but one can address a particular goal in the list by writing {\sl n:\tac} which means {\it ``apply tactic {\tac} to goal number {\sl n}''}. We can show the list of subgoals by typing {\tt Show} (see Section~\ref{Show}). Since not every rule applies to a given statement, every tactic cannot be used to reduce any goal. In other words, before applying a tactic to a given goal, the system checks that some {\em preconditions} are satisfied. If it is not the case, the tactic raises an error message. Tactics are built from atomic tactics and tactic expressions (which extends the folklore notion of tactical) to combine those atomic tactics. This chapter is devoted to atomic tactics. The tactic language will be described in Chapter~\ref{TacticLanguage}. \section{Invocation of tactics \label{tactic-syntax} \index{tactic@{\tac}}} A tactic is applied as an ordinary command. If the tactic is not meant to address the first subgoal, the command may be preceded by the wished subgoal number as shown below: \begin{tabular}{lcl} {\commandtac} & ::= & {\num} {\tt :} {\tac} {\tt .}\\ & $|$ & {\tac} {\tt .} \end{tabular} \subsection{Bindings list \index{Binding list} \label{Binding-list}} Tactics that take a term as argument may also support a bindings list, so as to instantiate some parameters of the term by name or position. The general form of a term equipped with a bindings list is {\tt {\term} with {\bindinglist}} where {\bindinglist} may be of two different forms: \begin{itemize} \item In a bindings list of the form {\tt (\vref$_1$ := \term$_1$) \dots\ (\vref$_n$ := \term$_n$)}, {\vref} is either an {\ident} or a {\num}. The references are determined according to the type of {\term}. If \vref$_i$ is an identifier, this identifier has to be bound in the type of {\term} and the binding provides the tactic with an instance for the parameter of this name. If \vref$_i$ is some number $n$, this number denotes the $n$-th non dependent premise of the {\term}, as determined by the type of {\term}. \ErrMsg \errindex{No such binder} \item A bindings list can also be a simple list of terms {\tt \term$_1$ \dots\ \term$_n$}. In that case the references to which these terms correspond are determined by the tactic. In case of {\tt induction}, {\tt destruct}, {\tt elim} and {\tt case} (see Section~\ref{elim}) the terms have to provide instances for all the dependent products in the type of \term\ while in the case of {\tt apply}, or of {\tt constructor} and its variants, only instances for the dependent products that are not bound in the conclusion of the type are required. \ErrMsg \errindex{Not the right number of missing arguments} \end{itemize} \subsection{Occurrences sets and occurrences clauses} \label{Occurrences clauses} \index{Occurrences clauses} An occurrences clause is a modifier to some tactics that obeys the following syntax: \begin{tabular}{lcl} {\occclause} & ::= & {\tt in} {\occgoalset} \\ {\occgoalset} & ::= & \zeroone{{\ident$_1$} \zeroone{\atoccurrences} {\tt ,} \\ & & {\dots} {\tt ,}\\ & & {\ident$_m$} \zeroone{\atoccurrences}}\\ & & \zeroone{{\tt |-} \zeroone{{\tt *} \zeroone{\atoccurrences}}}\\ & | & {\tt *} {\tt |-} \zeroone{{\tt *} \zeroone{\atoccurrences}}\\ & | & {\tt *}\\ {\atoccurrences} & ::= & {\tt at} {\occlist}\\ {\occlist} & ::= & \zeroone{{\tt -}} {\num$_1$} \dots\ {\num$_n$} \end{tabular} The role of an occurrence clause is to select a set of occurrences of a {\term} in a goal. In the first case, the {{\ident$_i$} \zeroone{{\tt at} {\num$_1^i$} \dots\ {\num$_{n_i}^i$}}} parts indicate that occurrences have to be selected in the hypotheses named {\ident$_i$}. If no numbers are given for hypothesis {\ident$_i$}, then all the occurrences of {\term} in the hypothesis are selected. If numbers are given, they refer to occurrences of {\term} when the term is printed using option {\tt Set Printing All} (see Section~\ref{SetPrintingAll}), counting from left to right. In particular, occurrences of {\term} in implicit arguments (see Section~\ref{Implicit Arguments}) or coercions (see Section~\ref{Coercions}) are counted. If a minus sign is given between {\tt at} and the list of occurrences, it negates the condition so that the clause denotes all the occurrences except the ones explicitly mentioned after the minus sign. As an exception to the left-to-right order, the occurrences in the {\tt return} subexpression of a {\tt match} are considered {\em before} the occurrences in the matched term. In the second case, the {\tt *} on the left of {\tt |-} means that all occurrences of {\term} are selected in every hypothesis. In the first and second case, if {\tt *} is mentioned on the right of {\tt |-}, the occurrences of the conclusion of the goal have to be selected. If some numbers are given, then only the occurrences denoted by these numbers are selected. In no numbers are given, all occurrences of {\term} in the goal are selected. Finally, the last notation is an abbreviation for {\tt * |- *}. Note also that {\tt |-} is optional in the first case when no {\tt *} is given. Here are some tactics that understand occurrences clauses: {\tt set}, {\tt remember}, {\tt induction}, {\tt destruct}. \SeeAlso~Sections~\ref{tactic:set}, \ref{Tac-induction}, \ref{SetPrintingAll}. \section{Applying theorems} \subsection{\tt exact \term} \tacindex{exact} \label{exact} This tactic applies to any goal. It gives directly the exact proof term of the goal. Let {\T} be our goal, let {\tt p} be a term of type {\tt U} then {\tt exact p} succeeds iff {\tt T} and {\tt U} are convertible (see Section~\ref{conv-rules}). \begin{ErrMsgs} \item \errindex{Not an exact proof} \end{ErrMsgs} \begin{Variants} \item \texttt{eexact \term}\tacindex{eexact} This tactic behaves like \texttt{exact} but is able to handle terms and goals with meta-variables. \end{Variants} \subsection{\tt assumption} \tacindex{assumption} This tactic looks in the local context for an hypothesis which type is equal to the goal. If it is the case, the subgoal is proved. Otherwise, it fails. \begin{ErrMsgs} \item \errindex{No such assumption} \end{ErrMsgs} \begin{Variants} \tacindex{eassumption} \item \texttt{eassumption} This tactic behaves like \texttt{assumption} but is able to handle goals with meta-variables. \end{Variants} \subsection{\tt refine \term} \tacindex{refine} \label{refine} \label{refine-example} \index{?@{\texttt{?}}} This tactic applies to any goal. It behaves like {\tt exact} with a big difference: the user can leave some holes (denoted by \texttt{\_} or {\tt (\_:\type)}) in the term. {\tt refine} will generate as many subgoals as there are holes in the term. The type of holes must be either synthesized by the system or declared by an explicit cast like \verb|(_:nat->Prop)|. This low-level tactic can be useful to advanced users. \Example \begin{coq_example*} Inductive Option : Set := | Fail : Option | Ok : bool -> Option. \end{coq_example} \begin{coq_example} Definition get : forall x:Option, x <> Fail -> bool. refine (fun x:Option => match x return x <> Fail -> bool with | Fail => _ | Ok b => fun _ => b end). intros; absurd (Fail = Fail); trivial. \end{coq_example} \begin{coq_example*} Defined. \end{coq_example*} \begin{ErrMsgs} \item \errindex{invalid argument}: the tactic \texttt{refine} does not know what to do with the term you gave. \item \texttt{Refine passed ill-formed term}: the term you gave is not a valid proof (not easy to debug in general). This message may also occur in higher-level tactics that call \texttt{refine} internally. \item \errindex{Cannot infer a term for this placeholder}: there is a hole in the term you gave which type cannot be inferred. Put a cast around it. \end{ErrMsgs} \subsection{\tt apply \term} \tacindex{apply} \label{apply} This tactic applies to any goal. The argument {\term} is a term well-formed in the local context. The tactic {\tt apply} tries to match the current goal against the conclusion of the type of {\term}. If it succeeds, then the tactic returns as many subgoals as the number of non-dependent premises of the type of {\term}. If the conclusion of the type of {\term} does not match the goal {\em and} the conclusion is an inductive type isomorphic to a tuple type, then each component of the tuple is recursively matched to the goal in the left-to-right order. The tactic {\tt apply} relies on first-order unification with dependent types unless the conclusion of the type of {\term} is of the form {\tt ($P$ $t_1$ \dots\ $t_n$)} with $P$ to be instantiated. In the latter case, the behavior depends on the form of the goal. If the goal is of the form {\tt (fun $x$ => $Q$)~$u_1$~\ldots~$u_n$} and the $t_i$ and $u_i$ unifies, then $P$ is taken to be {\tt (fun $x$ => $Q$)}. Otherwise, {\tt apply} tries to define $P$ by abstracting over $t_1$~\ldots ~$t_n$ in the goal. See {\tt pattern} in Section~\ref{pattern} to transform the goal so that it gets the form {\tt (fun $x$ => $Q$)~$u_1$~\ldots~$u_n$}. \begin{ErrMsgs} \item \errindex{Impossible to unify \dots\ with \dots} The {\tt apply} tactic failed to match the conclusion of {\term} and the current goal. You can help the {\tt apply} tactic by transforming your goal with the {\tt change} or {\tt pattern} tactics (see sections~\ref{pattern},~\ref{change}). \item \errindex{Unable to find an instance for the variables {\ident} \dots\ {\ident}} This occurs when some instantiations of the premises of {\term} are not deducible from the unification. This is the case, for instance, when you want to apply a transitivity property. In this case, you have to use one of the variants below: \end{ErrMsgs} \begin{Variants} \item{\tt apply {\term} with {\term$_1$} \dots\ {\term$_n$}} \tacindex{apply \dots\ with} Provides {\tt apply} with explicit instantiations for all dependent premises of the type of {\term} which do not occur in the conclusion and consequently cannot be found by unification. Notice that {\term$_1$} \dots\ {\term$_n$} must be given according to the order of these dependent premises of the type of {\term}. \ErrMsg \errindex{Not the right number of missing arguments} \item{\tt apply {\term} with ({\vref$_1$} := {\term$_1$}) \dots\ ({\vref$_n$} := {\term$_n$})} This also provides {\tt apply} with values for instantiating premises. Here, variables are referred by names and non-dependent products by increasing numbers (see syntax in Section~\ref{Binding-list}). \item {\tt apply} {\term$_1$} {\tt ,} \ldots {\tt ,} {\term$_n$} This is a shortcut for {\tt apply} {\term$_1$} {\tt ; [ ..~|} \ldots~{\tt ; [ ..~| {\tt apply} {\term$_n$} ]} \ldots~{\tt ]}, i.e. for the successive applications of {\term$_{i+1}$} on the last subgoal generated by {\tt apply} {\term$_i$}, starting from the application of {\term$_1$}. \item {\tt eapply \term}\tacindex{eapply}\label{eapply} The tactic {\tt eapply} behaves like {\tt apply} but it does not fail when no instantiations are deducible for some variables in the premises. Rather, it turns these variables into so-called existential variables which are variables still to instantiate. An existential variable is identified by a name of the form {\tt ?$n$} where $n$ is a number. The instantiation is intended to be found later in the proof. \item {\tt simple apply {\term}} \tacindex{simple apply} This behaves like {\tt apply} but it reasons modulo conversion only on subterms that contain no variables to instantiate. For instance, the following example does not succeed because it would require the conversion of {\tt id ?1234} and {\tt O}. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Definition id (x : nat) := x. Hypothesis H : forall y, id y = y. Goal O = O. \end{coq_example*} \begin{coq_example} simple apply H. \end{coq_example} Because it reasons modulo a limited amount of conversion, {\tt simple apply} fails quicker than {\tt apply} and it is then well-suited for uses in used-defined tactics that backtrack often. Moreover, it does not traverse tuples as {\tt apply} does. \item \zeroone{{\tt simple}} {\tt apply} {\term$_1$} \zeroone{{\tt with} {\bindinglist$_1$}} {\tt ,} \ldots {\tt ,} {\term$_n$} \zeroone{{\tt with} {\bindinglist$_n$}}\\ \zeroone{{\tt simple}} {\tt eapply} {\term$_1$} \zeroone{{\tt with} {\bindinglist$_1$}} {\tt ,} \ldots {\tt ,} {\term$_n$} \zeroone{{\tt with} {\bindinglist$_n$}} This summarizes the different syntaxes for {\tt apply} and {\tt eapply}. \item {\tt lapply {\term}} \tacindex{lapply} This tactic applies to any goal, say {\tt G}. The argument {\term} has to be well-formed in the current context, its type being reducible to a non-dependent product {\tt A -> B} with {\tt B} possibly containing products. Then it generates two subgoals {\tt B->G} and {\tt A}. Applying {\tt lapply H} (where {\tt H} has type {\tt A->B} and {\tt B} does not start with a product) does the same as giving the sequence {\tt cut B. 2:apply H.} where {\tt cut} is described below. \Warning When {\term} contains more than one non dependent product the tactic {\tt lapply} only takes into account the first product. \end{Variants} \Example Assume we have a transitive relation {\tt R} on {\tt nat}: \label{eapply-example} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Variable R : nat -> nat -> Prop. Hypothesis Rtrans : forall x y z:nat, R x y -> R y z -> R x z. Variables n m p : nat. Hypothesis Rnm : R n m. Hypothesis Rmp : R m p. \end{coq_example*} Consider the goal {\tt (R n p)} provable using the transitivity of {\tt R}: \begin{coq_example*} Goal R n p. \end{coq_example*} The direct application of {\tt Rtrans} with {\tt apply} fails because no value for {\tt y} in {\tt Rtrans} is found by {\tt apply}: %\begin{coq_eval} %Set Printing Depth 50. %(********** The following is not correct and should produce **********) %(**** Error: generated subgoal (R n ?17) has metavariables in it *****) %\end{coq_eval} \begin{coq_example} apply Rtrans. \end{coq_example} A solution is to apply {\tt (Rtrans n m p)} or {\tt (Rtrans n m)}. \begin{coq_example} apply (Rtrans n m p). \end{coq_example} \begin{coq_eval} Undo. \end{coq_eval} Note that {\tt n} can be inferred from the goal, so the following would work too. \begin{coq_example*} apply (Rtrans _ m). \end{coq_example*} \begin{coq_eval} Undo. \end{coq_eval} More elegantly, {\tt apply Rtrans with (y:=m)} allows to only mention the unknown {\tt m}: \begin{coq_example*} apply Rtrans with (y := m). \end{coq_example*} \begin{coq_eval} Undo. \end{coq_eval} Another solution is to mention the proof of {\tt (R x y)} in {\tt Rtrans} \ldots \begin{coq_example} apply Rtrans with (1 := Rnm). \end{coq_example} \begin{coq_eval} Undo. \end{coq_eval} \ldots or the proof of {\tt (R y z)}. \begin{coq_example} apply Rtrans with (2 := Rmp). \end{coq_example} \begin{coq_eval} Undo. \end{coq_eval} On the opposite, one can use {\tt eapply} which postpone the problem of finding {\tt m}. Then one can apply the hypotheses {\tt Rnm} and {\tt Rmp}. This instantiates the existential variable and completes the proof. \begin{coq_example} eapply Rtrans. apply Rnm. apply Rmp. \end{coq_example} \begin{coq_eval} Reset R. \end{coq_eval} \subsection{\tt apply {\term} in {\ident}} \tacindex{apply \dots\ in} This tactic applies to any goal. The argument {\term} is a term well-formed in the local context and the argument {\ident} is an hypothesis of the context. The tactic {\tt apply {\term} in {\ident}} tries to match the conclusion of the type of {\ident} against a non-dependent premise of the type of {\term}, trying them from right to left. If it succeeds, the statement of hypothesis {\ident} is replaced by the conclusion of the type of {\term}. The tactic also returns as many subgoals as the number of other non-dependent premises in the type of {\term} and of the non-dependent premises of the type of {\ident}. If the conclusion of the type of {\term} does not match the goal {\em and} the conclusion is an inductive type isomorphic to a tuple type, then the tuple is (recursively) decomposed and the first component of the tuple of which a non-dependent premise matches the conclusion of the type of {\ident}. Tuples are decomposed in a width-first left-to-right order (for instance if the type of {\tt H1} is a \verb=A <-> B= statement, and the type of {\tt H2} is \verb=A= then {\tt apply H1 in H2} transforms the type of {\tt H2} into {\tt B}). The tactic {\tt apply} relies on first-order pattern-matching with dependent types. \begin{ErrMsgs} \item \errindex{Statement without assumptions} This happens if the type of {\term} has no non dependent premise. \item \errindex{Unable to apply} This happens if the conclusion of {\ident} does not match any of the non dependent premises of the type of {\term}. \end{ErrMsgs} \begin{Variants} \item {\tt apply \nelist{\term}{,} in {\ident}} This applies each of {\term} in sequence in {\ident}. \item {\tt apply \nelist{{\term} with {\bindinglist}}{,} in {\ident}} This does the same but uses the bindings in each {\bindinglist} to instantiate the parameters of the corresponding type of {\term} (see syntax of bindings in Section~\ref{Binding-list}). \item {\tt eapply \nelist{{\term} with {\bindinglist}}{,} in {\ident}} \tacindex{eapply \dots\ in} This works as {\tt apply \nelist{{\term} with {\bindinglist}}{,} in {\ident}} but turns unresolved bindings into existential variables, if any, instead of failing. \item {\tt apply \nelist{{\term}{,} with {\bindinglist}}{,} in {\ident} as {\disjconjintropattern}} This works as {\tt apply \nelist{{\term}{,} with {\bindinglist}}{,} in {\ident}} then destructs the hypothesis {\ident} along {\disjconjintropattern} as {\tt destruct {\ident} as {\disjconjintropattern}} would. \item {\tt eapply \nelist{{\term}{,} with {\bindinglist}}{,} in {\ident} as {\disjconjintropattern}} This works as {\tt apply \nelist{{\term}{,} with {\bindinglist}}{,} in {\ident} as {\disjconjintropattern}} but using {\tt eapply}. \item {\tt simple apply {\term} in {\ident}} \tacindex{simple apply \dots\ in} \tacindex{simple eapply \dots\ in} This behaves like {\tt apply {\term} in {\ident}} but it reasons modulo conversion only on subterms that contain no variables to instantiate. For instance, if {\tt id := fun x:nat => x} and {\tt H : forall y, id y = y -> True} and {\tt H0 :\ O = O} then {\tt simple apply H in H0} does not succeed because it would require the conversion of {\tt id ?1234} and {\tt O} where {\tt ?1234} is a variable to instantiate. Tactic {\tt simple apply {\term} in {\ident}} does not either traverse tuples as {\tt apply {\term} in {\ident}} does. \item {\tt \zeroone{simple} apply \nelist{{\term} \zeroone{with {\bindinglist}}}{,} in {\ident} \zeroone{as {\disjconjintropattern}}}\\ {\tt \zeroone{simple} eapply \nelist{{\term} \zeroone{with {\bindinglist}}}{,} in {\ident} \zeroone{as {\disjconjintropattern}}} This summarizes the different syntactic variants of {\tt apply {\term} in {\ident}} and {\tt eapply {\term} in {\ident}}. \end{Variants} \subsection{\tt constructor \num} \label{constructor} \tacindex{constructor} This tactic applies to a goal such that its conclusion is an inductive type (say {\tt I}). The argument {\num} must be less or equal to the numbers of constructor(s) of {\tt I}. Let {\tt ci} be the {\tt i}-th constructor of {\tt I}, then {\tt constructor i} is equivalent to {\tt intros; apply ci}. \begin{ErrMsgs} \item \errindex{Not an inductive product} \item \errindex{Not enough constructors} \end{ErrMsgs} \begin{Variants} \item \texttt{constructor} This tries \texttt{constructor 1} then \texttt{constructor 2}, \dots\ , then \texttt{constructor} \textit{n} where \textit{n} is the number of constructors of the head of the goal. \item {\tt constructor \num~with} {\bindinglist} Let {\tt ci} be the {\tt i}-th constructor of {\tt I}, then {\tt constructor i with \bindinglist} is equivalent to {\tt intros; apply ci with \bindinglist}. \Warning the terms in the \bindinglist\ are checked in the context where {\tt constructor} is executed and not in the context where {\tt apply} is executed (the introductions are not taken into account). % To document? % \item {\tt constructor {\tactic}} \item {\tt split}\tacindex{split} This applies only if {\tt I} has a single constructor. It is then equivalent to {\tt constructor 1}. It is typically used in the case of a conjunction $A\land B$. \ErrMsg \errindex{Not an inductive goal with 1 constructor} \item {\tt exists {\bindinglist}}\tacindex{exists} This applies only if {\tt I} has a single constructor. It is then equivalent to {\tt intros; constructor 1 with \bindinglist}. It is typically used in the case of an existential quantification $\exists x, P(x)$. \ErrMsg \errindex{Not an inductive goal with 1 constructor} \item {\tt exists \nelist{\bindinglist}{,}} This iteratively applies {\tt exists {\bindinglist}}. \item {\tt left}\tacindex{left}\\ {\tt right}\tacindex{right} These tactics apply only if {\tt I} has two constructors, for instance in the case of a disjunction $A\lor B$. Then, they are respectively equivalent to {\tt constructor 1} and {\tt constructor 2}. \ErrMsg \errindex{Not an inductive goal with 2 constructors} \item {\tt left with \bindinglist}\\ {\tt right with \bindinglist}\\ {\tt split with \bindinglist} As soon as the inductive type has the right number of constructors, these expressions are equivalent to calling {\tt constructor $i$ with \bindinglist} for the appropriate $i$. \item \texttt{econstructor}\tacindex{econstructor}\\ \texttt{eexists}\tacindex{eexists}\\ \texttt{esplit}\tacindex{esplit}\\ \texttt{eleft}\tacindex{eleft}\\ \texttt{eright}\tacindex{eright} These tactics and their variants behave like \texttt{constructor}, \texttt{exists}, \texttt{split}, \texttt{left}, \texttt{right} and their variants but they introduce existential variables instead of failing when the instantiation of a variable cannot be found (cf \texttt{eapply} and Section~\ref{eapply-example}). \end{Variants} \section{Managing the local context} \subsection{\tt intro} \tacindex{intro} \label{intro} This tactic applies to a goal that is either a product or starts with a let binder. If the goal is a product, the tactic implements the ``Lam''\index{Typing rules!Lam} rule given in Section~\ref{Typed-terms}\footnote{Actually, only the second subgoal will be generated since the other one can be automatically checked.}. If the goal starts with a let binder, then the tactic implements a mix of the ``Let''\index{Typing rules!Let} and ``Conv''\index{Typing rules!Conv}. If the current goal is a dependent product $\forall x:T,~U$ (resp {\tt let $x$:=$t$ in $U$}) then {\tt intro} puts {\tt $x$:$T$} (resp {\tt $x$:=$t$}) in the local context. % Obsolete (quantified names already avoid hypotheses names): % Otherwise, it puts % {\tt x}{\it n}{\tt :T} where {\it n} is such that {\tt x}{\it n} is a %fresh name. The new subgoal is $U$. % If the {\tt x} has been renamed {\tt x}{\it n} then it is replaced % by {\tt x}{\it n} in {\tt U}. If the goal is a non-dependent product $T \to U$, then it puts in the local context either {\tt H}{\it n}{\tt :$T$} (if $T$ is of type {\tt Set} or {\tt Prop}) or {\tt X}{\it n}{\tt :$T$} (if the type of $T$ is {\tt Type}). The optional index {\it n} is such that {\tt H}{\it n} or {\tt X}{\it n} is a fresh identifier. In both cases, the new subgoal is $U$. If the goal is neither a product nor starting with a let definition, the tactic {\tt intro} applies the tactic {\tt red} until the tactic {\tt intro} can be applied or the goal is not reducible. \begin{ErrMsgs} \item \errindex{No product even after head-reduction} \item \errindexbis{{\ident} is already used}{is already used} \end{ErrMsgs} \begin{Variants} \item {\tt intros}\tacindex{intros} This repeats {\tt intro} until it meets the head-constant. It never reduces head-constants and it never fails. \item {\tt intro {\ident}} This applies {\tt intro} but forces {\ident} to be the name of the introduced hypothesis. \ErrMsg \errindex{name {\ident} is already used} \Rem If a name used by {\tt intro} hides the base name of a global constant then the latter can still be referred to by a qualified name (see \ref{LongNames}). \item {\tt intros \ident$_1$ \dots\ \ident$_n$} This is equivalent to the composed tactic {\tt intro \ident$_1$; \dots\ ; intro \ident$_n$}. More generally, the \texttt{intros} tactic takes a pattern as argument in order to introduce names for components of an inductive definition or to clear introduced hypotheses. This is explained in~\ref{intros-pattern}. \item {\tt intros until {\ident}} \tacindex{intros until} This repeats {\tt intro} until it meets a premise of the goal having form {\tt (} {\ident}~{\tt :}~{\term} {\tt )} and discharges the variable named {\ident} of the current goal. \ErrMsg \errindex{No such hypothesis in current goal} \item {\tt intros until {\num}} \tacindex{intros until} This repeats {\tt intro} until the {\num}-th non-dependent product. For instance, on the subgoal % \verb+forall x y:nat, x=y -> y=x+ the tactic \texttt{intros until 1} is equivalent to \texttt{intros x y H}, as \verb+x=y -> y=x+ is the first non-dependent product. And on the subgoal % \verb+forall x y z:nat, x=y -> y=x+ the tactic \texttt{intros until 1} is equivalent to \texttt{intros x y z} as the product on \texttt{z} can be rewritten as a non-dependent product: % \verb+forall x y:nat, nat -> x=y -> y=x+ \ErrMsg \errindex{No such hypothesis in current goal} This happens when {\num} is 0 or is greater than the number of non-dependent products of the goal. \item {\tt intro after \ident} \tacindex{intro after}\\ {\tt intro before \ident} \tacindex{intro before}\\ {\tt intro at top} \tacindex{intro at top}\\ {\tt intro at bottom} \tacindex{intro at bottom} These tactics apply {\tt intro} and move the freshly introduced hypothesis respectively after the hypothesis \ident{}, before the hypothesis \ident{}, at the top of the local context, or at the bottom of the local context. All hypotheses on which the new hypothesis depends are moved too so as to respect the order of dependencies between hypotheses. Note that {\tt intro at bottom} is a synonym for {\tt intro} with no argument. \ErrMsg \errindex{No such hypothesis} : {\ident} \item {\tt intro \ident$_1$ after \ident$_2$}\\ {\tt intro \ident$_1$ before \ident$_2$}\\ {\tt intro \ident$_1$ at top}\\ {\tt intro \ident$_1$ at bottom} These tactics behave as previously but naming the introduced hypothesis \ident$_1$. It is equivalent to {\tt intro \ident$_1$} followed by the appropriate call to {\tt move}~(see Section~\ref{move}). \end{Variants} \subsection{\tt intros {\intropattern} {\ldots} {\intropattern}} \label{intros-pattern} \tacindex{intros \intropattern} \index{Introduction patterns} \index{Naming introduction patterns} \index{Disjunctive/conjunctive introduction patterns} This extension of the tactic {\tt intros} combines introduction of variables or hypotheses and case analysis. An {\em introduction pattern} is either: \begin{itemize} \item a {\em naming introduction pattern}, i.e. either one of: \begin{itemize} \item the pattern \texttt{?} \item the pattern \texttt{?\ident} \item an identifier \end{itemize} \item a {\em disjunctive/conjunctive introduction pattern}, i.e. either one of: \begin{itemize} \item a disjunction of lists of patterns: {\tt [$p_{11}$ \dots\ $p_{1m_1}$ | \dots\ | $p_{11}$ \dots\ $p_{nm_n}$]} \item a conjunction of patterns: {\tt ($p_1$ , \dots\ , $p_n$)} \item a list of patterns {\tt ($p_1$ \&\ \dots\ \&\ $p_n$)} for sequence of right-associative binary constructs \end{itemize} \item the wildcard: {\tt \_} \item the rewriting orientations: {\tt ->} or {\tt <-} \end{itemize} Assuming a goal of type $Q \to P$ (non-dependent product), or of type $\forall x:T,~P$ (dependent product), the behavior of {\tt intros $p$} is defined inductively over the structure of the introduction pattern~$p$: \begin{itemize} \item introduction on \texttt{?} performs the introduction, and lets {\Coq} choose a fresh name for the variable; \item introduction on \texttt{?\ident} performs the introduction, and lets {\Coq} choose a fresh name for the variable based on {\ident}; \item introduction on \texttt{\ident} behaves as described in Section~\ref{intro}; \item introduction over a disjunction of list of patterns {\tt [$p_{11}$ \dots\ $p_{1m_1}$ | \dots\ | $p_{11}$ \dots\ $p_{nm_n}$]} expects the product to be over an inductive type whose number of constructors is $n$ (or more generally over a type of conclusion an inductive type built from $n$ constructors, e.g. {\tt C -> A\textbackslash/B if $n=2$}): it destructs the introduced hypothesis as {\tt destruct} (see Section~\ref{destruct}) would and applies on each generated subgoal the corresponding tactic; \texttt{intros}~$p_{i1}$ {\ldots} $p_{im_i}$; if the disjunctive pattern is part of a sequence of patterns and is not the last pattern of the sequence, then {\Coq} completes the pattern so that all the argument of the constructors of the inductive type are introduced (for instance, the list of patterns {\tt [$\;$|$\;$] H} applied on goal {\tt forall x:nat, x=0 -> 0=x} behaves the same as the list of patterns {\tt [$\,$|$\,$?$\,$] H}); \item introduction over a conjunction of patterns {\tt ($p_1$, \ldots, $p_n$)} expects the goal to be a product over an inductive type $I$ with a single constructor that itself has at least $n$ arguments: it performs a case analysis over the hypothesis, as {\tt destruct} would, and applies the patterns $p_1$~\ldots~$p_n$ to the arguments of the constructor of $I$ (observe that {\tt ($p_1$, {\ldots}, $p_n$)} is an alternative notation for {\tt [$p_1$ {\ldots} $p_n$]}); \item introduction via {\tt ($p_1$ \& \dots\ \& $p_n$)} is a shortcut for introduction via {\tt ($p_1$,(\ldots,(\dots,$p_n$)\ldots))}; it expects the hypothesis to be a sequence of right-associative binary inductive constructors such as {\tt conj} or {\tt ex\_intro}; for instance, an hypothesis with type {\tt A\verb|/\|exists x, B\verb|/\|C\verb|/\|D} can be introduced via pattern {\tt (a \& x \& b \& c \& d)}; \item introduction on the wildcard depends on whether the product is dependent or not: in the non-dependent case, it erases the corresponding hypothesis (i.e. it behaves as an {\tt intro} followed by a {\tt clear}, cf Section~\ref{clear}) while in the dependent case, it succeeds and erases the variable only if the wildcard is part of a more complex list of introduction patterns that also erases the hypotheses depending on this variable; \item introduction over {\tt ->} (respectively {\tt <-}) expects the hypothesis to be an equality and the right-hand-side (respectively the left-hand-side) is replaced by the left-hand-side (respectively the right-hand-side) in both the conclusion and the context of the goal; if moreover the term to substitute is a variable, the hypothesis is removed. \end{itemize} \Example \begin{coq_example} Goal forall A B C:Prop, A \/ B /\ C -> (A -> C) -> C. intros A B C [a| [_ c]] f. apply (f a). exact c. Qed. \end{coq_example} \Rem {\tt intros $p_1~\ldots~p_n$} is not equivalent to \texttt{intros $p_1$;\ldots; intros $p_n$} for the following reasons: \begin{itemize} \item A wildcard pattern never succeeds when applied isolated on a dependent product, while it succeeds as part of a list of introduction patterns if the hypotheses that depends on it are erased too. \item A disjunctive or conjunctive pattern followed by an introduction pattern forces the introduction in the context of all arguments of the constructors before applying the next pattern while a terminal disjunctive or conjunctive pattern does not. Here is an example \begin{coq_example} Goal forall n:nat, n = 0 -> n = 0. intros [ | ] H. Show 2. Undo. intros [ | ]; intros H. Show 2. \end{coq_example} \end{itemize} \subsection{\tt clear \ident} \tacindex{clear} \label{clear} This tactic erases the hypothesis named {\ident} in the local context of the current goal. As a consequence, {\ident} is no more displayed and no more usable in the proof development. \begin{ErrMsgs} \item \errindex{No such hypothesis} \item \errindexbis{{\ident} is used in the conclusion}{is used in the conclusion} \item \errindexbis{{\ident} is used in the hypothesis {\ident'}}{is used in the hypothesis} \end{ErrMsgs} \begin{Variants} \item {\tt clear {\ident$_1$} \dots\ {\ident$_n$}} This is equivalent to {\tt clear {\ident$_1$}. {\ldots} clear {\ident$_n$}.} \item {\tt clearbody {\ident}}\tacindex{clearbody} This tactic expects {\ident} to be a local definition then clears its body. Otherwise said, this tactic turns a definition into an assumption. \ErrMsg \errindexbis{{\ident} is not a local definition}{is not a local definition} \item \texttt{clear - {\ident$_1$} \dots\ {\ident$_n$}} This tactic clears all the hypotheses except the ones depending in the hypotheses named {\ident$_1$} {\ldots} {\ident$_n$} and in the goal. \item \texttt{clear} This tactic clears all the hypotheses except the ones the goal depends on. \item {\tt clear dependent \ident \tacindex{clear dependent}} This clears the hypothesis \ident\ and all the hypotheses that depend on it. \end{Variants} \subsection{\tt revert \ident$_1$ \dots\ \ident$_n$} \tacindex{revert} \label{revert} This applies to any goal with variables \ident$_1$ \dots\ \ident$_n$. It moves the hypotheses (possibly defined) to the goal, if this respects dependencies. This tactic is the inverse of {\tt intro}. \begin{ErrMsgs} \item \errindex{No such hypothesis} \item \errindexbis{{\ident} is used in the hypothesis {\ident'}}{is used in the hypothesis} \end{ErrMsgs} \begin{Variants} \item {\tt revert dependent \ident \tacindex{revert dependent}} This moves to the goal the hypothesis \ident\ and all hypotheses which depend on it. \end{Variants} \subsection{\tt move {\ident$_1$} after {\ident$_2$}} \tacindex{move} \label{move} This moves the hypothesis named {\ident$_1$} in the local context after the hypothesis named {\ident$_2$}. The proof term is not changed. If {\ident$_1$} comes before {\ident$_2$} in the order of dependences, then all hypotheses between {\ident$_1$} and {\ident$_2$} that (possibly indirectly) depend on {\ident$_1$} are moved also. If {\ident$_1$} comes after {\ident$_2$} in the order of dependences, then all hypotheses between {\ident$_1$} and {\ident$_2$} that (possibly indirectly) occur in {\ident$_1$} are moved also. \begin{Variants} \item {\tt move {\ident$_1$} before {\ident$_2$}} This moves {\ident$_1$} towards and just before the hypothesis named {\ident$_2$}. \item {\tt move {\ident} at top} This moves {\ident} at the top of the local context (at the beginning of the context). \item {\tt move {\ident} at bottom} This moves {\ident} at the bottom of the local context (at the end of the context). \end{Variants} \begin{ErrMsgs} \item \errindex{No such hypothesis} \item \errindex{Cannot move {\ident$_1$} after {\ident$_2$}: it occurs in {\ident$_2$}} \item \errindex{Cannot move {\ident$_1$} after {\ident$_2$}: it depends on {\ident$_2$}} \end{ErrMsgs} \subsection{\tt rename {\ident$_1$} into {\ident$_2$}} \tacindex{rename} This renames hypothesis {\ident$_1$} into {\ident$_2$} in the current context. The name of the hypothesis in the proof-term, however, is left unchanged. \begin{Variants} \item {\tt rename {\ident$_1$} into {\ident$_2$}, \ldots, {\ident$_{2k-1}$} into {\ident$_{2k}$}} This is equivalent to the sequence of the corresponding atomic {\tt rename}. \end{Variants} \begin{ErrMsgs} \item \errindex{No such hypothesis} \item \errindexbis{{\ident$_2$} is already used}{is already used} \end{ErrMsgs} \subsection{\tt set ( {\ident} := {\term} )} \label{tactic:set} \tacindex{set} This replaces {\term} by {\ident} in the conclusion of the current goal and adds the new definition {\tt {\ident} := \term} to the local context. If {\term} has holes (i.e. subexpressions of the form ``\_''), the tactic first checks that all subterms matching the pattern are compatible before doing the replacement using the leftmost subterm matching the pattern. \begin{ErrMsgs} \item \errindex{The variable {\ident} is already defined} \end{ErrMsgs} \begin{Variants} \item {\tt set ( {\ident} := {\term} ) in {\occgoalset}} This notation allows to specify which occurrences of {\term} have to be substituted in the context. The {\tt in {\occgoalset}} clause is an occurrence clause whose syntax and behavior are described in Section~\ref{Occurrences clauses}. \item {\tt set ( {\ident} \nelist{\binder}{} := {\term} )} This is equivalent to {\tt set ( {\ident} := fun \nelist{\binder}{} => {\term} )}. \item {\tt set \term} This behaves as {\tt set (} {\ident} := {\term} {\tt )} but {\ident} is generated by {\Coq}. This variant also supports an occurrence clause. \item {\tt set ( {\ident$_0$} \nelist{\binder}{} := {\term} ) in {\occgoalset}}\\ {\tt set {\term} in {\occgoalset}} These are the general forms which combine the previous possibilities. \item {\tt remember {\term} as {\ident}}\tacindex{remember} This behaves as {\tt set ( {\ident} := {\term} ) in *} and using a logical (Leibniz's) equality instead of a local definition. \item {\tt remember {\term} as {\ident} eqn:{\ident}} This behaves as {\tt remember {\term} as {\ident}}, except that the name of the generated equality is also given. \item {\tt remember {\term} as {\ident} in {\occgoalset}} This is a more general form of {\tt remember} that remembers the occurrences of {\term} specified by an occurrences set. \item {\tt pose ( {\ident} := {\term} )}\tacindex{pose} This adds the local definition {\ident} := {\term} to the current context without performing any replacement in the goal or in the hypotheses. It is equivalent to {\tt set ( {\ident} {\tt :=} {\term} {\tt ) in |-}}. \item {\tt pose ( {\ident} \nelist{\binder}{} := {\term} )} This is equivalent to {\tt pose (} {\ident} {\tt :=} {\tt fun} \nelist{\binder}{} {\tt =>} {\term} {\tt )}. \item{\tt pose {\term}} This behaves as {\tt pose ( {\ident} := {\term} )} but {\ident} is generated by {\Coq}. \end{Variants} \subsection{\tt decompose [ {\qualid$_1$} \dots\ {\qualid$_n$} ] \term} \label{decompose} \tacindex{decompose} This tactic allows to recursively decompose a complex proposition in order to obtain atomic ones. \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Goal forall A B C:Prop, A /\ B /\ C \/ B /\ C \/ C /\ A -> C. intros A B C H; decompose [and or] H; assumption. \end{coq_example} \begin{coq_example*} Qed. \end{coq_example*} {\tt decompose} does not work on right-hand sides of implications or products. \begin{Variants} \item {\tt decompose sum \term}\tacindex{decompose sum} This decomposes sum types (like \texttt{or}). \item {\tt decompose record \term}\tacindex{decompose record} This decomposes record types (inductive types with one constructor, like \texttt{and} and \texttt{exists} and those defined with the \texttt{Record} macro, see Section~\ref{Record}). \end{Variants} \section{Controlling the proof flow} \subsection{\tt assert ( {\ident} :\ {\form} )} \tacindex{assert} This tactic applies to any goal. {\tt assert (H : U)} adds a new hypothesis of name \texttt{H} asserting \texttt{U} to the current goal and opens a new subgoal \texttt{U}\footnote{This corresponds to the cut rule of sequent calculus.}. The subgoal {\texttt U} comes first in the list of subgoals remaining to prove. \begin{ErrMsgs} \item \errindex{Not a proposition or a type} Arises when the argument {\form} is neither of type {\tt Prop}, {\tt Set} nor {\tt Type}. \end{ErrMsgs} \begin{Variants} \item{\tt assert {\form}} This behaves as {\tt assert ( {\ident} :\ {\form} )} but {\ident} is generated by {\Coq}. \item{\tt assert ( {\ident} := {\term} )} This behaves as {\tt assert ({\ident} :\ {\type});[exact {\term}|idtac]} where {\type} is the type of {\term}. \ErrMsg \errindex{Variable {\ident} is already declared} \item {\tt cut {\form}}\tacindex{cut} This tactic applies to any goal. It implements the non-dependent case of the ``App''\index{Typing rules!App} rule given in Section~\ref{Typed-terms}. (This is Modus Ponens inference rule.) {\tt cut U} transforms the current goal \texttt{T} into the two following subgoals: {\tt U -> T} and \texttt{U}. The subgoal {\tt U -> T} comes first in the list of remaining subgoal to prove. \item \texttt{assert {\form} by {\tac}}\tacindex{assert by} This tactic behaves like \texttt{assert} but applies {\tac} to solve the subgoals generated by \texttt{assert}. \ErrMsg \errindex{Proof is not complete} \item \texttt{assert {\form} as {\intropattern}\tacindex{assert as}} If {\intropattern} is a naming introduction pattern (see Section~\ref{intros-pattern}), the hypothesis is named after this introduction pattern (in particular, if {\intropattern} is {\ident}, the tactic behaves like \texttt{assert ({\ident} :\ {\form})}). If {\intropattern} is a disjunctive/conjunctive introduction pattern, the tactic behaves like \texttt{assert {\form}} then destructing the resulting hypothesis using the given introduction pattern. \item \texttt{assert {\form} as {\intropattern} by {\tac}} This combines the two previous variants of {\tt assert}. \item \texttt{pose proof {\term} as {\intropattern}\tacindex{pose proof}} This tactic behaves like \texttt{assert T as {\intropattern} by exact {\term}} where \texttt{T} is the type of {\term}. In particular, \texttt{pose proof {\term} as {\ident}} behaves as \texttt{assert ({\ident} := {\term})} and \texttt{pose proof {\term} as {\disjconjintropattern}\tacindex{pose proof}} behaves like \texttt{destruct {\term} as {\disjconjintropattern}}. \item {\tt specialize ({\ident} \term$_1$ \dots\ \term$_n$)\tacindex{specialize}} \\ {\tt specialize {\ident} with \bindinglist} The tactic {\tt specialize} works on local hypothesis \ident. The premises of this hypothesis (either universal quantifications or non-dependent implications) are instantiated by concrete terms coming either from arguments \term$_1$ $\ldots$ \term$_n$ or from a bindings list (see Section~\ref{Binding-list} for more about bindings lists). In the second form, all instantiation elements must be given, whereas in the first form the application to \term$_1$ {\ldots} \term$_n$ can be partial. The first form is equivalent to {\tt assert (\ident' := {\ident} {\term$_1$} \dots\ \term$_n$); clear \ident; rename \ident' into \ident}. The name {\ident} can also refer to a global lemma or hypothesis. In this case, for compatibility reasons, the behavior of {\tt specialize} is close to that of {\tt generalize}: the instantiated statement becomes an additional premise of the goal. \begin{ErrMsgs} \item \errindexbis{{\ident} is used in hypothesis \ident'}{is used in hypothesis} \item \errindexbis{{\ident} is used in conclusion}{is used in conclusion} \end{ErrMsgs} %% Moreover, the old syntax allows the use of a number after {\tt specialize} %% for controlling the number of premises to instantiate. Giving this %% number should not be mandatory anymore (automatic detection of how %% many premises can be eaten without leaving meta-variables). Hence %% no documentation for this integer optional argument of specialize \end{Variants} \subsection{\tt generalize \term} \tacindex{generalize} \label{generalize} This tactic applies to any goal. It generalizes the conclusion with respect to one of its subterms. \Example \begin{coq_eval} Goal forall x y:nat, (0 <= x + y + y). intros. \end{coq_eval} \begin{coq_example} Show. generalize (x + y + y). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} If the goal is $G$ and $t$ is a subterm of type $T$ in the goal, then {\tt generalize} \textit{t} replaces the goal by {\tt forall (x:$T$), $G'$} where $G'$ is obtained from $G$ by replacing all occurrences of $t$ by {\tt x}. The name of the variable (here {\tt n}) is chosen based on $T$. \begin{Variants} \item {\tt generalize {\term$_1$ , \dots\ , \term$_n$}} This is equivalent to {\tt generalize \term$_n$; \dots\ ; generalize \term$_1$}. Note that the sequence of \term$_i$'s are processed from $n$ to $1$. \item {\tt generalize {\term} at {\num$_1$ \dots\ \num$_i$}} This is equivalent to {\tt generalize \term} but it generalizes only over the specified occurrences of {\term} (counting from left to right on the expression printed using option {\tt Set Printing All}). \item {\tt generalize {\term} as {\ident}} This is equivalent to {\tt generalize \term} but it uses {\ident} to name the generalized hypothesis. \item {\tt generalize {\term$_1$} at {\num$_{11}$ \dots\ \num$_{1i_1}$} as {\ident$_1$} , {\ldots} , {\term$_n$} at {\num$_{n1}$ \dots\ \num$_{ni_n}$} as {\ident$_2$}} This is the most general form of {\tt generalize} that combines the previous behaviors. \item {\tt generalize dependent \term} \tacindex{generalize dependent} This generalizes {\term} but also {\em all} hypotheses that depend on {\term}. It clears the generalized hypotheses. \end{Variants} \subsection{\tt evar ( {\ident} :\ {\term} )} \tacindex{evar} \label{evar} The {\tt evar} tactic creates a new local definition named \ident\ with type \term\ in the context. The body of this binding is a fresh existential variable. \subsection{\tt instantiate ( {\num} := {\term} )} \tacindex{instantiate} \label{instantiate} The {\tt instantiate} tactic allows to refine (see Section~\ref{refine}) an existential variable with the term \term. The \num\ argument is the position of the existential variable from right to left in the conclusion. This cannot be the number of the existential variable since this number is different in every session. \begin{Variants} \item {\tt instantiate ( {\num} := {\term} ) in \ident} \item {\tt instantiate ( {\num} := {\term} ) in ( Value of {\ident} )} \item {\tt instantiate ( {\num} := {\term} ) in ( Type of {\ident} )} These allow to refer respectively to existential variables occurring in a hypothesis or in the body or the type of a local definition. \item {\tt instantiate} Without argument, the {\tt instantiate} tactic tries to solve as many existential variables as possible, using information gathered from other tactics in the same tactical. This is automatically done after each complete tactic (i.e. after a dot in proof mode), but not, for example, between each tactic when they are sequenced by semicolons. \end{Variants} \subsection{\tt admit} \tacindex{admit} \label{admit} The {\tt admit} tactic ``solves'' the current subgoal by an axiom. This typically allows to temporarily skip a subgoal so as to progress further in the rest of the proof. To know if some proof still relies on unproved subgoals, one can use the command {\tt Print Assumptions} (see Section~\ref{PrintAssumptions}). Admitted subgoals have names of the form {\ident}\texttt{\_admitted} possibly followed by a number. \subsection{\tt absurd \term} \tacindex{absurd} \label{absurd} This tactic applies to any goal. The argument {\term} is any proposition {\tt P} of type {\tt Prop}. This tactic applies {\tt False} elimination, that is it deduces the current goal from {\tt False}, and generates as subgoals {\tt $\sim$P} and {\tt P}. It is very useful in proofs by cases, where some cases are impossible. In most cases, \texttt{P} or $\sim$\texttt{P} is one of the hypotheses of the local context. \subsection{\tt contradiction} \label{contradiction} \tacindex{contradiction} This tactic applies to any goal. The {\tt contradiction} tactic attempts to find in the current context (after all {\tt intros}) one hypothesis that is equivalent to {\tt False}. It permits to prune irrelevant cases. This tactic is a macro for the tactics sequence {\tt intros; elimtype False; assumption}. \begin{ErrMsgs} \item \errindex{No such assumption} \end{ErrMsgs} \begin{Variants} \item {\tt contradiction \ident} The proof of {\tt False} is searched in the hypothesis named \ident. \end{Variants} \subsection{\tt contradict \ident} \label{contradict} \tacindex{contradict} This tactic allows to manipulate negated hypothesis and goals. The name \ident\ should correspond to a hypothesis. With {\tt contradict H}, the current goal and context is transformed in the following way: \begin{itemize} \item {\tt H:$\neg$A $\vd$ B} \ becomes \ {\tt $\vd$ A} \item {\tt H:$\neg$A $\vd$ $\neg$B} \ becomes \ {\tt H: B $\vd$ A } \item {\tt H: A $\vd$ B} \ becomes \ {\tt $\vd$ $\neg$A} \item {\tt H: A $\vd$ $\neg$B} \ becomes \ {\tt H: B $\vd$ $\neg$A} \end{itemize} \subsection{\tt exfalso} \label{exfalso} \tacindex{exfalso} This tactic implements the ``ex falso quodlibet'' logical principle: an elimination of {\tt False} is performed on the current goal, and the user is then required to prove that {\tt False} is indeed provable in the current context. This tactic is a macro for {\tt elimtype False}. \section{Case analysis and induction} The tactics presented in this section implement induction or case analysis on inductive or co-inductive objects (see Section~\ref{Cic-inductive-definitions}). \subsection{\tt destruct \term} \tacindex{destruct} \label{destruct} This tactic applies to any goal. The argument {\term} must be of inductive or co-inductive type and the tactic generates subgoals, one for each possible form of {\term}, i.e. one for each constructor of the inductive or co-inductive type. Unlike {\tt induction}, no induction hypothesis is generated by {\tt destruct}. If the argument is dependent in either the conclusion or some hypotheses of the goal, the argument is replaced by the appropriate constructor form in each of the resulting subgoals, thus performing case analysis. If non-dependent, the tactic simply exposes the inductive or co-inductive structure of the argument. There are special cases: \begin{itemize} \item If {\term} is an identifier {\ident} denoting a quantified variable of the conclusion of the goal, then {\tt destruct {\ident}} behaves as {\tt intros until {\ident}; destruct {\ident}}. \item If {\term} is a {\num}, then {\tt destruct {\num}} behaves as {\tt intros until {\num}} followed by {\tt destruct} applied to the last introduced hypothesis. Remark: For destruction of a numeral, use syntax {\tt destruct ({\num})} (not very interesting anyway). \item The argument {\term} can also be a pattern of which holes are denoted by ``\_''. In this case, the tactic checks that all subterms matching the pattern in the conclusion and the hypotheses are compatible and performs case analysis using this subterm. \end{itemize} \begin{Variants} \item{\tt destruct \term$_1$, \ldots, \term$_n$} This is a shortcut for {\tt destruct \term$_1$; \ldots; destruct \term$_n$}. \item{\tt destruct {\term} as {\disjconjintropattern}} This behaves as {\tt destruct {\term}} but uses the names in {\intropattern} to name the variables introduced in the context. The {\intropattern} must have the form {\tt [} $p_{11}$ \ldots $p_{1n_1}$ {\tt |} {\ldots} {\tt |} $p_{m1}$ \ldots $p_{mn_m}$ {\tt ]} with $m$ being the number of constructors of the type of {\term}. Each variable introduced by {\tt destruct} in the context of the $i^{th}$ goal gets its name from the list $p_{i1}$ \ldots $p_{in_i}$ in order. If there are not enough names, {\tt destruct} invents names for the remaining variables to introduce. More generally, the $p_{ij}$ can be any disjunctive/conjunctive introduction pattern (see Section~\ref{intros-pattern}). This provides a concise notation for nested destruction. % It is recommended to use this variant of {\tt destruct} for % robust proof scripts. \item{\tt destruct {\term} eqn:{\namingintropattern}} This behaves as {\tt destruct {\term}} but adds an equation between {\term} and the value that {\term} takes in each of the possible cases. The name of the equation is specified by {\namingintropattern} (see Section~\ref{intros-pattern}), in particular {\tt ?} can be used to let Coq generate a fresh name. \item{\tt destruct {\term} with \bindinglist} This behaves like \texttt{destruct {\term}} providing explicit instances for the dependent premises of the type of {\term} (see syntax of bindings in Section~\ref{Binding-list}). \item{\tt edestruct {\term}\tacindex{edestruct}} This tactic behaves like \texttt{destruct {\term}} except that it does not fail if the instance of a dependent premises of the type of {\term} is not inferable. Instead, the unresolved instances are left as existential variables to be inferred later, in the same way as {\tt eapply} does (see Section~\ref{eapply-example}). \item{\tt destruct {\term$_1$} using {\term$_2$}}\\ {\tt destruct {\term$_1$} using {\term$_2$} with {\bindinglist}} These are synonyms of {\tt induction {\term$_1$} using {\term$_2$}} and {\tt induction {\term$_1$} using {\term$_2$} with {\bindinglist}}. \item \texttt{destruct {\term} in {\occgoalset}} This syntax is used for selecting which occurrences of {\term} the case analysis has to be done on. The {\tt in {\occgoalset}} clause is an occurrence clause whose syntax and behavior is described in Section~\ref{Occurrences clauses}. \item{\tt destruct {\term$_1$} with {\bindinglist$_1$} as {\disjconjintropattern} eqn:{\namingintropattern} using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}}\\ {\tt edestruct {\term$_1$} with {\bindinglist$_1$} as {\disjconjintropattern} eqn:{\namingintropattern} using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}} These are the general forms of {\tt destruct} and {\tt edestruct}. They combine the effects of the {\tt with}, {\tt as}, {\tt eqn:}, {\tt using}, and {\tt in} clauses. \item{\tt case \term}\label{case}\tacindex{case} The tactic {\tt case} is a more basic tactic to perform case analysis without recursion. It behaves as {\tt elim \term} but using a case-analysis elimination principle and not a recursive one. \item {\tt case {\term} with {\bindinglist}} Analogous to {\tt elim {\term} with {\bindinglist}} above. \item{\tt ecase {\term}\tacindex{ecase}}\\ {\tt ecase {\term} with {\bindinglist}} In case the type of {\term} has dependent premises, or dependent premises whose values are not inferable from the {\tt with {\bindinglist}} clause, {\tt ecase} turns them into existential variables to be resolved later on. \item {\tt simple destruct \ident}\tacindex{simple destruct} This tactic behaves as {\tt intros until {\ident}; case {\tt {\ident}}} when {\ident} is a quantified variable of the goal. \item {\tt simple destruct {\num}} This tactic behaves as {\tt intros until {\num}; case {\tt {\ident}}} where {\ident} is the name given by {\tt intros until {\num}} to the {\num}-th non-dependent premise of the goal. \item{\tt case\_eq \term}\label{case_eq}\tacindex{case\_eq} The tactic {\tt case\_eq} is a variant of the {\tt case} tactic that allow to perform case analysis on a term without completely forgetting its original form. This is done by generating equalities between the original form of the term and the outcomes of the case analysis. % The effect of this tactic is similar to the effect of {\tt % destruct {\term} in |- *} with the exception that no new hypotheses % are introduced in the context. \end{Variants} \subsection{\tt induction \term} \tacindex{induction} \label{Tac-induction} This tactic applies to any goal. The argument {\term} must be of inductive type and the tactic {\tt induction} generates subgoals, one for each possible form of {\term}, i.e. one for each constructor of the inductive type. If the argument is dependent in either the conclusion or some hypotheses of the goal, the argument is replaced by the appropriate constructor form in each of the resulting subgoals and induction hypotheses are added to the local context using names whose prefix is {\tt IH}. There are particular cases: \begin{itemize} \item If {\term} is an identifier {\ident} denoting a quantified variable of the conclusion of the goal, then {\tt induction {\ident}} behaves as {\tt intros until {\ident}; induction {\ident}}. \item If {\term} is a {\num}, then {\tt induction {\num}} behaves as {\tt intros until {\num}} followed by {\tt induction} applied to the last introduced hypothesis. Remark: For simple induction on a numeral, use syntax {\tt induction ({\num})} (not very interesting anyway). \item The argument {\term} can also be a pattern of which holes are denoted by ``\_''. In this case, the tactic checks that all subterms matching the pattern in the conclusion and the hypotheses are compatible and performs induction using this subterm. \end{itemize} \Example \begin{coq_example} Lemma induction_test : forall n:nat, n = n -> n <= n. intros n H. induction n. \end{coq_example} \begin{ErrMsgs} \item \errindex{Not an inductive product} \item \errindex{Unable to find an instance for the variables {\ident} \ldots {\ident}} Use in this case the variant {\tt elim \dots\ with \dots} below. \end{ErrMsgs} \begin{Variants} \item{\tt induction {\term} as {\disjconjintropattern}} This behaves as {\tt induction {\term}} but uses the names in {\disjconjintropattern} to name the variables introduced in the context. The {\disjconjintropattern} must typically be of the form {\tt [} $p_{11}$ \ldots $p_{1n_1}$ {\tt |} {\ldots} {\tt |} $p_{m1}$ \ldots $p_{mn_m}$ {\tt ]} with $m$ being the number of constructors of the type of {\term}. Each variable introduced by {\tt induction} in the context of the $i^{th}$ goal gets its name from the list $p_{i1}$ \ldots $p_{in_i}$ in order. If there are not enough names, {\tt induction} invents names for the remaining variables to introduce. More generally, the $p_{ij}$ can be any disjunctive/conjunctive introduction pattern (see Section~\ref{intros-pattern}). For instance, for an inductive type with one constructor, the pattern notation {\tt ($p_{1}$,\ldots,$p_{n}$)} can be used instead of {\tt [} $p_{1}$ \ldots $p_{n}$ {\tt ]}. \item{\tt induction {\term} eqn:{\namingintropattern}} This behaves as {\tt induction {\term}} but adds an equation between {\term} and the value that {\term} takes in each of the induction case. The name of the equation is built according to {\namingintropattern} which can be an identifier, a ``?'', etc, as indicated in Section~\ref{intros-pattern}. \item{\tt induction {\term} as {\disjconjintropattern} eqn:{\namingintropattern}} This combines the two previous forms. \item{\tt induction {\term} with \bindinglist} This behaves like \texttt{induction {\term}} providing explicit instances for the premises of the type of {\term} (see the syntax of bindings in Section~\ref{Binding-list}). \item{\tt einduction {\term}\tacindex{einduction}} This tactic behaves like \texttt{induction {\term}} excepts that it does not fail if some dependent premise of the type of {\term} is not inferable. Instead, the unresolved premises are posed as existential variables to be inferred later, in the same way as {\tt eapply} does (see Section~\ref{eapply-example}). \item {\tt induction {\term$_1$} using {\term$_2$}} This behaves as {\tt induction {\term$_1$}} but using {\term$_2$} as induction scheme. It does not expect the conclusion of the type of {\term$_1$} to be inductive. \item {\tt induction {\term$_1$} using {\term$_2$} with {\bindinglist}} This behaves as {\tt induction {\term$_1$} using {\term$_2$}} but also providing instances for the premises of the type of {\term$_2$}. \item \texttt{induction {\term}$_1$, $\ldots$, {\term}$_n$ using {\qualid}} This syntax is used for the case {\qualid} denotes an induction principle with complex predicates as the induction principles generated by {\tt Function} or {\tt Functional Scheme} may be. \item \texttt{induction {\term} in {\occgoalset}} This syntax is used for selecting which occurrences of {\term} the induction has to be carried on. The {\tt in \occgoalset} clause is an occurrence clause whose syntax and behavior is described in Section~\ref{Occurrences clauses}. \item {\tt induction {\term$_1$} with {\bindinglist$_1$} as {\disjconjintropattern} eqn:{\namingintropattern} using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}}\\ {\tt einduction {\term$_1$} with {\bindinglist$_1$} as {\disjconjintropattern} eqn:{\namingintropattern} using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}} These are the most general forms of {\tt induction} and {\tt einduction}. It combines the effects of the {\tt with}, {\tt as}, {\tt eqn:}, {\tt using}, and {\tt in} clauses. \item {\tt elim \term}\label{elim} This is a more basic induction tactic. Again, the type of the argument {\term} must be an inductive type. Then, according to the type of the goal, the tactic {\tt elim} chooses the appropriate destructor and applies it as the tactic {\tt apply} would do. For instance, if the proof context contains {\tt n:nat} and the current goal is {\tt T} of type {\tt Prop}, then {\tt elim n} is equivalent to {\tt apply nat\_ind with (n:=n)}. The tactic {\tt elim} does not modify the context of the goal, neither introduces the induction loading into the context of hypotheses. More generally, {\tt elim \term} also works when the type of {\term} is a statement with premises and whose conclusion is inductive. In that case the tactic performs induction on the conclusion of the type of {\term} and leaves the non-dependent premises of the type as subgoals. In the case of dependent products, the tactic tries to find an instance for which the elimination lemma applies and fails otherwise. \item {\tt elim {\term} with {\bindinglist}} Allows to give explicit instances to the premises of the type of {\term} (see Section~\ref{Binding-list}). \item{\tt eelim {\term}\tacindex{eelim}} In case the type of {\term} has dependent premises, this turns them into existential variables to be resolved later on. \item{\tt elim {\term$_1$} using {\term$_2$}}\\ {\tt elim {\term$_1$} using {\term$_2$} with {\bindinglist}\tacindex{elim \dots\ using}} Allows the user to give explicitly an elimination predicate {\term$_2$} which is not the standard one for the underlying inductive type of {\term$_1$}. The {\bindinglist} clause allows to instantiate premises of the type of {\term$_2$}. \item{\tt elim {\term$_1$} with {\bindinglist$_1$} using {\term$_2$} with {\bindinglist$_2$}}\\ {\tt eelim {\term$_1$} with {\bindinglist$_1$} using {\term$_2$} with {\bindinglist$_2$}} These are the most general forms of {\tt elim} and {\tt eelim}. It combines the effects of the {\tt using} clause and of the two uses of the {\tt with} clause. \item {\tt elimtype \form}\tacindex{elimtype} The argument {\form} must be inductively defined. {\tt elimtype I} is equivalent to {\tt cut I. intro H{\rm\sl n}; elim H{\rm\sl n}; clear H{\rm\sl n}}. Therefore the hypothesis {\tt H{\rm\sl n}} will not appear in the context(s) of the subgoal(s). Conversely, if {\tt t} is a term of (inductive) type {\tt I} and which does not occur in the goal then {\tt elim t} is equivalent to {\tt elimtype I; 2: exact t.} \item {\tt simple induction \ident}\tacindex{simple induction} This tactic behaves as {\tt intros until {\ident}; elim {\tt {\ident}}} when {\ident} is a quantified variable of the goal. \item {\tt simple induction {\num}} This tactic behaves as {\tt intros until {\num}; elim {\tt {\ident}}} where {\ident} is the name given by {\tt intros until {\num}} to the {\num}-th non-dependent premise of the goal. %% \item {\tt simple induction {\term}}\tacindex{simple induction} %% If {\term} is an {\ident} corresponding to a quantified variable of %% the goal then the tactic behaves as {\tt intros until {\ident}; elim %% {\tt {\ident}}}. If {\term} is a {\num} then the tactic behaves as %% {\tt intros until {\ident}; elim {\tt {\ident}}}. Otherwise, it is %% a synonym for {\tt elim {\term}}. %% \Rem For simple induction on a numeral, use syntax {\tt simple %% induction ({\num})}. \end{Variants} %\subsection[\tt FixPoint \dots]{\tt FixPoint \dots\tacindex{Fixpoint}} %Not yet documented. \subsection{\tt double induction \ident$_1$ \ident$_2$} \tacindex{double induction} This tactic is deprecated and should be replaced by {\tt induction \ident$_1$; induction \ident$_2$} (or {\tt induction \ident$_1$; destruct \ident$_2$} depending on the exact needs). %% This tactic applies to any goal. If the variables {\ident$_1$} and %% {\ident$_2$} of the goal have an inductive type, then this tactic %% performs double induction on these variables. For instance, if the %% current goal is \verb+forall n m:nat, P n m+ then, {\tt double induction n %% m} yields the four cases with their respective inductive hypotheses. %% In particular, for proving \verb+(P (S n) (S m))+, the generated induction %% hypotheses are \verb+(P (S n) m)+ and \verb+(m:nat)(P n m)+ (of the latter, %% \verb+(P n m)+ and \verb+(P n (S m))+ are derivable). %% \Rem When the induction hypothesis \verb+(P (S n) m)+ is not %% needed, {\tt induction \ident$_1$; destruct \ident$_2$} produces %% more concise subgoals. \begin{Variant} \item {\tt double induction \num$_1$ \num$_2$} This tactic is deprecated and should be replaced by {\tt induction \num$_1$; induction \num$_3$} where \num$_3$ is the result of \num$_2$-\num$_1$. %% This tactic applies to any goal. If the variables {\ident$_1$} and %% This applies double induction on the \num$_1^{th}$ and \num$_2^{th}$ {\it %% non dependent} premises of the goal. More generally, any combination of an %% {\ident} and a {\num} is valid. \end{Variant} \subsection{\tt dependent induction \ident} \tacindex{dependent induction} \label{DepInduction} The \emph{experimental} tactic \texttt{dependent induction} performs induction-inversion on an instantiated inductive predicate. One needs to first require the {\tt Coq.Program.Equality} module to use this tactic. The tactic is based on the BasicElim tactic by Conor McBride \cite{DBLP:conf/types/McBride00} and the work of Cristina Cornes around inversion \cite{DBLP:conf/types/CornesT95}. From an instantiated inductive predicate and a goal, it generates an equivalent goal where the hypothesis has been generalized over its indexes which are then constrained by equalities to be the right instances. This permits to state lemmas without resorting to manually adding these equalities and still get enough information in the proofs. \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Lemma le_minus : forall n:nat, n < 1 -> n = 0. intros n H ; induction H. \end{coq_example} Here we did not get any information on the indexes to help fulfill this proof. The problem is that, when we use the \texttt{induction} tactic, we lose information on the hypothesis instance, notably that the second argument is \texttt{1} here. Dependent induction solves this problem by adding the corresponding equality to the context. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Require Import Coq.Program.Equality. Lemma le_minus : forall n:nat, n < 1 -> n = 0. intros n H ; dependent induction H. \end{coq_example} The subgoal is cleaned up as the tactic tries to automatically simplify the subgoals with respect to the generated equalities. In this enriched context, it becomes possible to solve this subgoal. \begin{coq_example} reflexivity. \end{coq_example} Now we are in a contradictory context and the proof can be solved. \begin{coq_example} inversion H. \end{coq_example} This technique works with any inductive predicate. In fact, the \texttt{dependent induction} tactic is just a wrapper around the \texttt{induction} tactic. One can make its own variant by just writing a new tactic based on the definition found in \texttt{Coq.Program.Equality}. \begin{Variants} \item {\tt dependent induction {\ident} generalizing {\ident$_1$} \dots {\ident$_n$}}\tacindex{dependent induction \dots\ generalizing} This performs dependent induction on the hypothesis {\ident} but first generalizes the goal by the given variables so that they are universally quantified in the goal. This is generally what one wants to do with the variables that are inside some constructors in the induction hypothesis. The other ones need not be further generalized. \item {\tt dependent destruction {\ident}}\tacindex{dependent destruction} This performs the generalization of the instance {\ident} but uses {\tt destruct} instead of {\tt induction} on the generalized hypothesis. This gives results equivalent to {\tt inversion} or {\tt dependent inversion} if the hypothesis is dependent. \end{Variants} \SeeAlso \ref{dependent-induction-example} for a larger example of dependent induction and an explanation of the underlying technique. \subsection{\tt functional induction (\qualid\ \term$_1$ \dots\ \term$_n$)} \tacindex{functional induction} \label{FunInduction} The \emph{experimental} tactic \texttt{functional induction} performs case analysis and induction following the definition of a function. It makes use of a principle generated by \texttt{Function} (see Section~\ref{Function}) or \texttt{Functional Scheme} (see Section~\ref{FunScheme}). \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Functional Scheme minus_ind := Induction for minus Sort Prop. Check minus_ind. Lemma le_minus (n m:nat) : n - m <= n. functional induction (minus n m); simpl; auto. \end{coq_example} \begin{coq_example*} Qed. \end{coq_example*} \Rem \texttt{(\qualid\ \term$_1$ \dots\ \term$_n$)} must be a correct full application of \qualid. In particular, the rules for implicit arguments are the same as usual. For example use \texttt{@\qualid} if you want to write implicit arguments explicitly. \Rem Parentheses over \qualid \dots \term$_n$ are mandatory. \Rem \texttt{functional induction (f x1 x2 x3)} is actually a wrapper for \texttt{induction x1 x2 x3 (f x1 x2 x3) using \qualid} followed by a cleaning phase, where {\qualid} is the induction principle registered for $f$ (by the \texttt{Function} (see Section~\ref{Function}) or \texttt{Functional Scheme} (see Section~\ref{FunScheme}) command) corresponding to the sort of the goal. Therefore \texttt{functional induction} may fail if the induction scheme {\qualid} is not defined. See also Section~\ref{Function} for the function terms accepted by \texttt{Function}. \Rem There is a difference between obtaining an induction scheme for a function by using \texttt{Function} (see Section~\ref{Function}) and by using \texttt{Functional Scheme} after a normal definition using \texttt{Fixpoint} or \texttt{Definition}. See \ref{Function} for details. \SeeAlso{\ref{Function},\ref{FunScheme},\ref{FunScheme-examples}, \ref{sec:functional-inversion}} \begin{ErrMsgs} \item \errindex{Cannot find induction information on \qualid} \item \errindex{Not the right number of induction arguments} \end{ErrMsgs} \begin{Variants} \item {\tt functional induction (\qualid\ \term$_1$ \dots\ \term$_n$) as {\disjconjintropattern} using \term$_{m+1}$ with \bindinglist} Similarly to \texttt{Induction} and \texttt{elim} (see Section~\ref{Tac-induction}), this allows to give explicitly the name of the introduced variables, the induction principle, and the values of dependent premises of the elimination scheme, including \emph{predicates} for mutual induction when {\qualid} is part of a mutually recursive definition. \end{Variants} \subsection{\tt discriminate \term} \label{discriminate} \tacindex{discriminate} This tactic proves any goal from an assumption stating that two structurally different terms of an inductive set are equal. For example, from {\tt (S (S O))=(S O)} we can derive by absurdity any proposition. The argument {\term} is assumed to be a proof of a statement of conclusion {\tt{\term$_1$} = {\term$_2$}} with {\term$_1$} and {\term$_2$} being elements of an inductive set. To build the proof, the tactic traverses the normal forms\footnote{Reminder: opaque constants will not be expanded by $\delta$ reductions.} of {\term$_1$} and {\term$_2$} looking for a couple of subterms {\tt u} and {\tt w} ({\tt u} subterm of the normal form of {\term$_1$} and {\tt w} subterm of the normal form of {\term$_2$}), placed at the same positions and whose head symbols are two different constructors. If such a couple of subterms exists, then the proof of the current goal is completed, otherwise the tactic fails. \Rem The syntax {\tt discriminate {\ident}} can be used to refer to a hypothesis quantified in the goal. In this case, the quantified hypothesis whose name is {\ident} is first introduced in the local context using \texttt{intros until \ident}. \begin{ErrMsgs} \item \errindex{No primitive equality found} \item \errindex{Not a discriminable equality} \end{ErrMsgs} \begin{Variants} \item \texttt{discriminate \num} This does the same thing as \texttt{intros until \num} followed by \texttt{discriminate \ident} where {\ident} is the identifier for the last introduced hypothesis. \item \texttt{discriminate {\term} with \bindinglist} This does the same thing as \texttt{discriminate {\term}} but using the given bindings to instantiate parameters or hypotheses of {\term}. \item \texttt{ediscriminate \num}\tacindex{ediscriminate}\\ \texttt{ediscriminate {\term} \zeroone{with \bindinglist}} This works the same as {\tt discriminate} but if the type of {\term}, or the type of the hypothesis referred to by {\num}, has uninstantiated parameters, these parameters are left as existential variables. \item \texttt{discriminate} This behaves like {\tt discriminate {\ident}} if {\ident} is the name of an hypothesis to which {\tt discriminate} is applicable; if the current goal is of the form {\term$_1$} {\tt <>} {\term$_2$}, this behaves as {\tt intro {\ident}; injection {\ident}}. \ErrMsg \errindex{No discriminable equalities} \end{Variants} \subsection{\tt injection \term} \label{injection} \tacindex{injection} The {\tt injection} tactic is based on the fact that constructors of inductive sets are injections. That means that if $c$ is a constructor of an inductive set, and if $(c~\vec{t_1})$ and $(c~\vec{t_2})$ are two terms that are equal then $~\vec{t_1}$ and $~\vec{t_2}$ are equal too. If {\term} is a proof of a statement of conclusion {\tt {\term$_1$} = {\term$_2$}}, then {\tt injection} applies injectivity as deep as possible to derive the equality of all the subterms of {\term$_1$} and {\term$_2$} placed in the same positions. For example, from {\tt (S (S n))=(S (S (S m)))} we may derive {\tt n=(S m)}. To use this tactic {\term$_1$} and {\term$_2$} should be elements of an inductive set and they should be neither explicitly equal, nor structurally different. We mean by this that, if {\tt n$_1$} and {\tt n$_2$} are their respective normal forms, then: \begin{itemize} \item {\tt n$_1$} and {\tt n$_2$} should not be syntactically equal, \item there must not exist any pair of subterms {\tt u} and {\tt w}, {\tt u} subterm of {\tt n$_1$} and {\tt w} subterm of {\tt n$_2$} , placed in the same positions and having different constructors as head symbols. \end{itemize} If these conditions are satisfied, then, the tactic derives the equality of all the subterms of {\term$_1$} and {\term$_2$} placed in the same positions and puts them as antecedents of the current goal. \Example Consider the following goal: \begin{coq_example*} Inductive list : Set := | nil : list | cons : nat -> list -> list. Variable P : list -> Prop. \end{coq_example*} \begin{coq_eval} Lemma ex : forall (l:list) (n:nat), P nil -> cons n l = cons 0 nil -> P l. intros l n H H0. \end{coq_eval} \begin{coq_example} Show. injection H0. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Beware that \texttt{injection} yields always an equality in a sigma type whenever the injected object has a dependent type. \Rem There is a special case for dependent pairs. If we have a decidable equality over the type of the first argument, then it is safe to do the projection on the second one, and so {\tt injection} will work fine. To define such an equality, you have to use the {\tt Scheme} command (see \ref{Scheme}). \Rem If some quantified hypothesis of the goal is named {\ident}, then {\tt injection {\ident}} first introduces the hypothesis in the local context using \texttt{intros until \ident}. \begin{ErrMsgs} \item \errindex{Not a projectable equality but a discriminable one} \item \errindex{Nothing to do, it is an equality between convertible terms} \item \errindex{Not a primitive equality} \end{ErrMsgs} \begin{Variants} \item \texttt{injection \num} This does the same thing as \texttt{intros until \num} followed by \texttt{injection \ident} where {\ident} is the identifier for the last introduced hypothesis. \item \texttt{injection {\term} with \bindinglist} This does the same as \texttt{injection {\term}} but using the given bindings to instantiate parameters or hypotheses of {\term}. \item \texttt{einjection \num}\tacindex{einjection}\\ \texttt{einjection {\term} \zeroone{with \bindinglist}} This works the same as {\tt injection} but if the type of {\term}, or the type of the hypothesis referred to by {\num}, has uninstantiated parameters, these parameters are left as existential variables. \item{\tt injection} If the current goal is of the form {\term$_1$} {\tt <>} {\term$_2$}, this behaves as {\tt intro {\ident}; injection {\ident}}. \ErrMsg \errindex{goal does not satisfy the expected preconditions} \item \texttt{injection {\term} \zeroone{with \bindinglist} as \nelist{\intropattern}{}}\\ \texttt{injection {\num} as {\intropattern} \dots\ \intropattern}\\ \texttt{injection as {\intropattern} \dots\ \intropattern}\\ \texttt{einjection {\term} \zeroone{with \bindinglist} as \nelist{\intropattern}{}}\\ \texttt{einjection {\num} as {\intropattern} \dots\ \intropattern}\\ \texttt{einjection as {\intropattern} \dots\ \intropattern} \tacindex{injection \dots\ as} These variants apply \texttt{intros} \nelist{\intropattern}{} after the call to \texttt{injection} or \texttt{einjection}. \end{Variants} \subsection{\tt inversion \ident} \tacindex{inversion} Let the type of {\ident} in the local context be $(I~\vec{t})$, where $I$ is a (co)inductive predicate. Then, \texttt{inversion} applied to \ident~ derives for each possible constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary conditions that should hold for the instance $(I~\vec{t})$ to be proved by $c_i$. \Rem If {\ident} does not denote a hypothesis in the local context but refers to a hypothesis quantified in the goal, then the latter is first introduced in the local context using \texttt{intros until \ident}. \Rem As inversion proofs may be large in size, we recommend the user to stock the lemmas whenever the same instance needs to be inverted several times. See Section~\ref{Derive-Inversion}. \begin{Variants} \item \texttt{inversion \num} This does the same thing as \texttt{intros until \num} then \texttt{inversion \ident} where {\ident} is the identifier for the last introduced hypothesis. \item \tacindex{inversion\_clear} \texttt{inversion\_clear \ident} This behaves as \texttt{inversion} and then erases \ident~ from the context. \item \tacindex{inversion \dots\ as} \texttt{inversion {\ident} as \intropattern} This behaves as \texttt{inversion} but using names in {\intropattern} for naming hypotheses. The {\intropattern} must have the form {\tt [} $p_{11}$ \ldots $p_{1n_1}$ {\tt |} {\ldots} {\tt |} $p_{m1}$ \ldots $p_{mn_m}$ {\tt ]} with $m$ being the number of constructors of the type of {\ident}. Be careful that the list must be of length $m$ even if {\tt inversion} discards some cases (which is precisely one of its roles): for the discarded cases, just use an empty list (i.e. $n_i=0$). The arguments of the $i^{th}$ constructor and the equalities that {\tt inversion} introduces in the context of the goal corresponding to the $i^{th}$ constructor, if it exists, get their names from the list $p_{i1}$ \ldots $p_{in_i}$ in order. If there are not enough names, {\tt induction} invents names for the remaining variables to introduce. In case an equation splits into several equations (because {\tt inversion} applies {\tt injection} on the equalities it generates), the corresponding name $p_{ij}$ in the list must be replaced by a sublist of the form {\tt [$p_{ij1}$ \ldots $p_{ijq}$]} (or, equivalently, {\tt ($p_{ij1}$, \ldots, $p_{ijq}$)}) where $q$ is the number of subequalities obtained from splitting the original equation. Here is an example. \begin{coq_eval} Require Import List. \end{coq_eval} \begin{coq_example} Inductive contains0 : list nat -> Prop := | in_hd : forall l, contains0 (0 :: l) | in_tl : forall l b, contains0 l -> contains0 (b :: l). Goal forall l:list nat, contains0 (1 :: l) -> contains0 l. intros l H; inversion H as [ | l' p Hl' [Heqp Heql'] ]. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \item \texttt{inversion {\num} as \intropattern} This allows to name the hypotheses introduced by \texttt{inversion \num} in the context. \item \tacindex{inversion\_clear \dots\ as} \texttt{inversion\_clear {\ident} as \intropattern} This allows to name the hypotheses introduced by \texttt{inversion\_clear} in the context. \item \tacindex{inversion \dots\ in} \texttt{inversion {\ident} in \ident$_1$ \dots\ \ident$_n$} Let \ident$_1$ \dots\ \ident$_n$, be identifiers in the local context. This tactic behaves as generalizing \ident$_1$ \dots\ \ident$_n$, and then performing \texttt{inversion}. \item \tacindex{inversion \dots\ as \dots\ in} \texttt{inversion {\ident} as {\intropattern} in \ident$_1$ \dots\ \ident$_n$} This allows to name the hypotheses introduced in the context by \texttt{inversion {\ident} in \ident$_1$ \dots\ \ident$_n$}. \item \tacindex{inversion\_clear \dots\ in} \texttt{inversion\_clear {\ident} in \ident$_1$ \dots\ \ident$_n$} Let \ident$_1$ \dots\ \ident$_n$, be identifiers in the local context. This tactic behaves as generalizing \ident$_1$ \dots\ \ident$_n$, and then performing {\tt inversion\_clear}. \item \tacindex{inversion\_clear \dots\ as \dots\ in} \texttt{inversion\_clear {\ident} as {\intropattern} in \ident$_1$ \dots\ \ident$_n$} This allows to name the hypotheses introduced in the context by \texttt{inversion\_clear {\ident} in \ident$_1$ \dots\ \ident$_n$}. \item \tacindex{dependent inversion} \texttt{dependent inversion \ident} That must be used when \ident\ appears in the current goal. It acts like \texttt{inversion} and then substitutes \ident\ for the corresponding term in the goal. \item \tacindex{dependent inversion \dots\ as } \texttt{dependent inversion {\ident} as \intropattern} This allows to name the hypotheses introduced in the context by \texttt{dependent inversion} {\ident}. \item \tacindex{dependent inversion\_clear} \texttt{dependent inversion\_clear \ident} Like \texttt{dependent inversion}, except that {\ident} is cleared from the local context. \item \tacindex{dependent inversion\_clear \dots\ as} \texttt{dependent inversion\_clear {\ident} as \intropattern} This allows to name the hypotheses introduced in the context by \texttt{dependent inversion\_clear} {\ident}. \item \tacindex{dependent inversion \dots\ with} \texttt{dependent inversion {\ident} with \term} This variant allows you to specify the generalization of the goal. It is useful when the system fails to generalize the goal automatically. If {\ident} has type $(I~\vec{t})$ and $I$ has type $\forall (\vec{x}:\vec{T}), s$, then \term~ must be of type $I:\forall (\vec{x}:\vec{T}), I~\vec{x}\to s'$ where $s'$ is the type of the goal. \item \tacindex{dependent inversion \dots\ as \dots\ with} \texttt{dependent inversion {\ident} as {\intropattern} with \term} This allows to name the hypotheses introduced in the context by \texttt{dependent inversion {\ident} with \term}. \item \tacindex{dependent inversion\_clear \dots\ with} \texttt{dependent inversion\_clear {\ident} with \term} Like \texttt{dependent inversion \dots\ with} but clears {\ident} from the local context. \item \tacindex{dependent inversion\_clear \dots\ as \dots\ with} \texttt{dependent inversion\_clear {\ident} as {\intropattern} with \term} This allows to name the hypotheses introduced in the context by \texttt{dependent inversion\_clear {\ident} with \term}. \item \tacindex{simple inversion} \texttt{simple inversion \ident} It is a very primitive inversion tactic that derives all the necessary equalities but it does not simplify the constraints as \texttt{inversion} does. \item \tacindex{simple inversion \dots\ as} \texttt{simple inversion {\ident} as \intropattern} This allows to name the hypotheses introduced in the context by \texttt{simple inversion}. \item \tacindex{inversion \dots\ using} \texttt{inversion {\ident} using \ident$'$} Let {\ident} have type $(I~\vec{t})$ ($I$ an inductive predicate) in the local context, and \ident$'$ be a (dependent) inversion lemma. Then, this tactic refines the current goal with the specified lemma. \item \tacindex{inversion \dots\ using \dots\ in} \texttt{inversion {\ident} using \ident$'$ in \ident$_1$\dots\ \ident$_n$} This tactic behaves as generalizing \ident$_1$\dots\ \ident$_n$, then doing \texttt{inversion {\ident} using \ident$'$}. \end{Variants} \firstexample \example{Non-dependent inversion} \label{inversion-examples} Let us consider the relation \texttt{Le} over natural numbers and the following variables: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive Le : nat -> nat -> Set := | LeO : forall n:nat, Le 0 n | LeS : forall n m:nat, Le n m -> Le (S n) (S m). Variable P : nat -> nat -> Prop. Variable Q : forall n m:nat, Le n m -> Prop. \end{coq_example*} Let us consider the following goal: \begin{coq_eval} Lemma ex : forall n m:nat, Le (S n) m -> P n m. intros. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} To prove the goal, we may need to reason by cases on \texttt{H} and to derive that \texttt{m} is necessarily of the form $(S~m_0)$ for certain $m_0$ and that $(Le~n~m_0)$. Deriving these conditions corresponds to prove that the only possible constructor of \texttt{(Le (S n) m)} is \texttt{LeS} and that we can invert the \texttt{->} in the type of \texttt{LeS}. This inversion is possible because \texttt{Le} is the smallest set closed by the constructors \texttt{LeO} and \texttt{LeS}. \begin{coq_example} inversion_clear H. \end{coq_example} Note that \texttt{m} has been substituted in the goal for \texttt{(S m0)} and that the hypothesis \texttt{(Le n m0)} has been added to the context. Sometimes it is interesting to have the equality \texttt{m=(S m0)} in the context to use it after. In that case we can use \texttt{inversion} that does not clear the equalities: \begin{coq_eval} Undo. \end{coq_eval} \begin{coq_example} inversion H. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \example{Dependent inversion} Let us consider the following goal: \begin{coq_eval} Lemma ex_dep : forall (n m:nat) (H:Le (S n) m), Q (S n) m H. intros. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} As \texttt{H} occurs in the goal, we may want to reason by cases on its structure and so, we would like inversion tactics to substitute \texttt{H} by the corresponding term in constructor form. Neither \texttt{Inversion} nor {\tt Inversion\_clear} make such a substitution. To have such a behavior we use the dependent inversion tactics: \begin{coq_example} dependent inversion_clear H. \end{coq_example} Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and \texttt{m} by \texttt{(S m0)}. \subsection{\tt fix {\ident} {\num}} \tacindex{fix} \label{tactic:fix} This tactic is a primitive tactic to start a proof by induction. In general, it is easier to rely on higher-level induction tactics such as the ones described in Section~\ref{Tac-induction}. In the syntax of the tactic, the identifier {\ident} is the name given to the induction hypothesis. The natural number {\num} tells on which premise of the current goal the induction acts, starting from 1 and counting both dependent and non dependent products. Especially, the current lemma must be composed of at least {\num} products. Like in a {\tt fix} expression, the induction hypotheses have to be used on structurally smaller arguments. The verification that inductive proof arguments are correct is done only at the time of registering the lemma in the environment. To know if the use of induction hypotheses is correct at some time of the interactive development of a proof, use the command {\tt Guarded} (see Section~\ref{Guarded}). \begin{Variants} \item {\tt fix \ident$_1$ {\num} with ( \ident$_2$ \nelist{\binder$_2$}{} \zeroone{\{ struct \ident$'_2$ \}} :~\type$_2$ ) \dots\ ( \ident$_n$ \nelist{\binder$_n$}{} \zeroone{\{ struct \ident$'_n$ \}} :~\type$_n$ )} This starts a proof by mutual induction. The statements to be simultaneously proved are respectively {\tt forall} \nelist{{\binder}$_2$}{}{\tt ,} {\type}$_2$, {\ldots}, {\tt forall} \nelist{{\binder}$_n$}{}{\tt ,} {\type}$_n$. The identifiers {\ident}$_1$ {\ldots} {\ident}$_n$ are the names of the induction hypotheses. The identifiers {\ident}$'_2$ {\ldots} {\ident}$'_n$ are the respective names of the premises on which the induction is performed in the statements to be simultaneously proved (if not given, the system tries to guess itself what they are). \end{Variants} \subsection{\tt cofix \ident} \tacindex{cofix} \label{tactic:cofix} This tactic starts a proof by coinduction. The identifier {\ident} is the name given to the coinduction hypothesis. Like in a {\tt cofix} expression, the use of induction hypotheses have to guarded by a constructor. The verification that the use of co-inductive hypotheses is correct is done only at the time of registering the lemma in the environment. To know if the use of coinduction hypotheses is correct at some time of the interactive development of a proof, use the command {\tt Guarded} (see Section~\ref{Guarded}). \begin{Variants} \item {\tt cofix \ident$_1$ with ( \ident$_2$ \nelist{\binder$_2$}{} :~\type$_2$ ) \dots\ ( \ident$_n$ \nelist{\binder$_n$}{} :~\type$_n$ )} This starts a proof by mutual coinduction. The statements to be simultaneously proved are respectively {\tt forall} \nelist{{\binder}$_2$}{}{\tt ,} {\type}$_2$, {\ldots}, {\tt forall} \nelist{{\binder}$_n$}{}{\tt ,} {\type}$_n$. The identifiers {\ident}$_1$ {\ldots} {\ident}$_n$ are the names of the coinduction hypotheses. \end{Variants} \section{Rewriting expressions} These tactics use the equality {\tt eq:forall A:Type, A->A->Prop} defined in file {\tt Logic.v} (see Section~\ref{Equality}). The notation for {\tt eq}~$T~t~u$ is simply {\tt $t$=$u$} dropping the implicit type of $t$ and $u$. \subsection{\tt rewrite \term \label{rewrite} \tacindex{rewrite}} This tactic applies to any goal. The type of {\term} must have the form \texttt{forall (x$_1$:A$_1$) \dots\ (x$_n$:A$_n$)}\texttt{eq} \term$_1$ \term$_2$. \noindent where \texttt{eq} is the Leibniz equality or a registered setoid equality. \noindent Then {\tt rewrite \term} finds the first subterm matching \term$_1$ in the goal, resulting in instances \term$_1'$ and \term$_2'$ and then replaces every occurrence of \term$_1'$ by \term$_2'$. Hence, some of the variables x$_i$ are solved by unification, and some of the types \texttt{A}$_1$, \dots, \texttt{A}$_n$ become new subgoals. % \Rem In case the type of % \term$_1$ contains occurrences of variables bound in the % type of \term, the tactic tries first to find a subterm of the goal % which matches this term in order to find a closed instance \term$'_1$ % of \term$_1$, and then all instances of \term$'_1$ will be replaced. \begin{ErrMsgs} \item \errindex{The term provided does not end with an equation} \item \errindex{Tactic generated a subgoal identical to the original goal}\\ This happens if \term$_1$ does not occur in the goal. \end{ErrMsgs} \begin{Variants} \item {\tt rewrite -> {\term}}\tacindex{rewrite ->}\\ Is equivalent to {\tt rewrite \term} \item {\tt rewrite <- {\term}}\tacindex{rewrite <-}\\ Uses the equality \term$_1${\tt=}\term$_2$ from right to left \item {\tt rewrite {\term} in \textit{clause}} \tacindex{rewrite \dots\ in}\\ Analogous to {\tt rewrite {\term}} but rewriting is done following \textit{clause} (similarly to \ref{Conversion-tactics}). For instance: \begin{itemize} \item \texttt{rewrite H in H1} will rewrite \texttt{H} in the hypothesis \texttt{H1} instead of the current goal. \item \texttt{rewrite H in H1 at 1, H2 at - 2 |- *} means \texttt{rewrite H; rewrite H in H1 at 1; rewrite H in H2 at - 2}. In particular a failure will happen if any of these three simpler tactics fails. \item \texttt{rewrite H in * |- } will do \texttt{rewrite H in H$_i$} for all hypothesis \texttt{H$_i$ <> H}. A success will happen as soon as at least one of these simpler tactics succeeds. \item \texttt{rewrite H in *} is a combination of \texttt{rewrite H} and \texttt{rewrite H in * |-} that succeeds if at least one of these two tactics succeeds. \end{itemize} Orientation {\tt ->} or {\tt <-} can be inserted before the term to rewrite. \item {\tt rewrite {\term} at {\occlist}} \tacindex{rewrite \dots\ at} Rewrite only the given occurrences of \term$_1'$. Occurrences are specified from left to right as for \texttt{pattern} (\S \ref{pattern}). The rewrite is always performed using setoid rewriting, even for Leibniz's equality, so one has to \texttt{Import Setoid} to use this variant. \item {\tt rewrite {\term} by {\tac}} \tacindex{rewrite \dots\ by} Use {\tac} to completely solve the side-conditions arising from the rewrite. \item {\tt rewrite $\term_1$, \ldots, $\term_n$}\\ Is equivalent to the $n$ successive tactics {\tt rewrite $\term_1$} up to {\tt rewrite $\term_n$}, each one working on the first subgoal generated by the previous one. Orientation {\tt ->} or {\tt <-} can be inserted before each term to rewrite. One unique \textit{clause} can be added at the end after the keyword {\tt in}; it will then affect all rewrite operations. \item In all forms of {\tt rewrite} described above, a term to rewrite can be immediately prefixed by one of the following modifiers: \begin{itemize} \item {\tt ?} : the tactic {\tt rewrite ?$\term$} performs the rewrite of $\term$ as many times as possible (perhaps zero time). This form never fails. \item {\tt $n$?} : works similarly, except that it will do at most $n$ rewrites. \item {\tt !} : works as {\tt ?}, except that at least one rewrite should succeed, otherwise the tactic fails. \item {\tt $n$!} (or simply {\tt $n$}) : precisely $n$ rewrites of $\term$ will be done, leading to failure if these $n$ rewrites are not possible. \end{itemize} \item {\tt erewrite {\term}\tacindex{erewrite}} This tactic works as {\tt rewrite {\term}} but turning unresolved bindings into existential variables, if any, instead of failing. It has the same variants as {\tt rewrite} has. \end{Variants} \subsection{\tt cutrewrite -> \term$_1$ = \term$_2$ \label{cutrewrite} \tacindex{cutrewrite}} This tactic acts like {\tt replace {\term$_1$} with {\term$_2$}} (see below). \subsection{\tt replace {\term$_1$} with {\term$_2$} \label{tactic:replace} \tacindex{replace \dots\ with}} This tactic applies to any goal. It replaces all free occurrences of {\term$_1$} in the current goal with {\term$_2$} and generates the equality {\term$_2$}{\tt =}{\term$_1$} as a subgoal. This equality is automatically solved if it occurs amongst the assumption, or if its symmetric form occurs. It is equivalent to {\tt cut \term$_2$=\term$_1$; [intro H{\sl n}; rewrite <- H{\sl n}; clear H{\sl n}| assumption || symmetry; try assumption]}. \begin{ErrMsgs} \item \errindex{terms do not have convertible types} \end{ErrMsgs} \begin{Variants} \item {\tt replace {\term$_1$} with {\term$_2$} by \tac}\\ This acts as {\tt replace {\term$_1$} with {\term$_2$}} but applies {\tt \tac} to solve the generated subgoal {\tt \term$_2$=\term$_1$}. \item {\tt replace {\term}}\\ Replace {\term} with {\term'} using the first assumption whose type has the form {\tt \term=\term'} or {\tt \term'=\term} \item {\tt replace -> {\term}}\\ Replace {\term} with {\term'} using the first assumption whose type has the form {\tt \term=\term'} \item {\tt replace <- {\term}}\\ Replace {\term} with {\term'} using the first assumption whose type has the form {\tt \term'=\term} \item {\tt replace {\term$_1$} with {\term$_2$} \textit{clause} }\\ {\tt replace {\term$_1$} with {\term$_2$} \textit{clause} by \tac }\\ {\tt replace {\term} \textit{clause}}\\ {\tt replace -> {\term} \textit{clause}}\\ {\tt replace <- {\term} \textit{clause}}\\ Act as before but the replacements take place in \textit{clause}~(see Section~\ref{Conversion-tactics}) and not only in the conclusion of the goal.\\ The \textit{clause} argument must not contain any \texttt{type of} nor \texttt{value of}. \end{Variants} \subsection{\tt reflexivity \label{reflexivity} \tacindex{reflexivity}} This tactic applies to a goal which has the form {\tt t=u}. It checks that {\tt t} and {\tt u} are convertible and then solves the goal. It is equivalent to {\tt apply refl\_equal}. \begin{ErrMsgs} \item \errindex{The conclusion is not a substitutive equation} \item \errindex{Impossible to unify \dots\ with \dots.} \end{ErrMsgs} \subsection{\tt symmetry \tacindex{symmetry} \tacindex{symmetry in}} This tactic applies to a goal which has the form {\tt t=u} and changes it into {\tt u=t}. \variant {\tt symmetry in {\ident}}\\ If the statement of the hypothesis {\ident} has the form {\tt t=u}, the tactic changes it to {\tt u=t}. \subsection{\tt transitivity \term \tacindex{transitivity}} This tactic applies to a goal which has the form {\tt t=u} and transforms it into the two subgoals {\tt t={\term}} and {\tt {\term}=u}. \subsection{\tt subst {\ident} \tacindex{subst}} This tactic applies to a goal which has \ident\ in its context and (at least) one hypothesis, say {\tt H}, of type {\tt \ident=t} or {\tt t=\ident}. Then it replaces \ident\ by {\tt t} everywhere in the goal (in the hypotheses and in the conclusion) and clears \ident\ and {\tt H} from the context. \Rem When several hypotheses have the form {\tt \ident=t} or {\tt t=\ident}, the first one is used. \begin{Variants} \item {\tt subst \ident$_1$ \dots \ident$_n$} \\ Is equivalent to {\tt subst \ident$_1$; \dots; subst \ident$_n$}. \item {\tt subst} \\ Applies {\tt subst} repeatedly to all identifiers from the context for which an equality exists. \end{Variants} \subsection[{\tt stepl {\term}}]{{\tt stepl {\term}}\tacindex{stepl}} This tactic is for chaining rewriting steps. It assumes a goal of the form ``$R$ {\term}$_1$ {\term}$_2$'' where $R$ is a binary relation and relies on a database of lemmas of the form {\tt forall} $x$ $y$ $z$, $R$ $x$ $y$ {\tt ->} $eq$ $x$ $z$ {\tt ->} $R$ $z$ $y$ where $eq$ is typically a setoid equality. The application of {\tt stepl {\term}} then replaces the goal by ``$R$ {\term} {\term}$_2$'' and adds a new goal stating ``$eq$ {\term} {\term}$_1$''. Lemmas are added to the database using the command \comindex{Declare Left Step} \begin{quote} {\tt Declare Left Step {\term}.} \end{quote} The tactic is especially useful for parametric setoids which are not accepted as regular setoids for {\tt rewrite} and {\tt setoid\_replace} (see Chapter~\ref{setoid_replace}). \tacindex{stepr} \comindex{Declare Right Step} \begin{Variants} \item{\tt stepl {\term} by {\tac}}\\ This applies {\tt stepl {\term}} then applies {\tac} to the second goal. \item{\tt stepr {\term}}\\ {\tt stepr {\term} by {\tac}}\\ This behaves as {\tt stepl} but on the right-hand-side of the binary relation. Lemmas are expected to be of the form ``{\tt forall} $x$ $y$ $z$, $R$ $x$ $y$ {\tt ->} $eq$ $y$ $z$ {\tt ->} $R$ $x$ $z$'' and are registered using the command \begin{quote} {\tt Declare Right Step {\term}.} \end{quote} \end{Variants} \subsection{\tt change \term \tacindex{change} \label{change}} This tactic applies to any goal. It implements the rule ``Conv''\index{Typing rules!Conv} given in Section~\ref{Conv}. {\tt change U} replaces the current goal \T\ with \U\ providing that \U\ is well-formed and that \T\ and \U\ are convertible. \begin{ErrMsgs} \item \errindex{Not convertible} \end{ErrMsgs} \tacindex{change \dots\ in} \begin{Variants} \item {\tt change \term$_1$ with \term$_2$} This replaces the occurrences of \term$_1$ by \term$_2$ in the current goal. The terms \term$_1$ and \term$_2$ must be convertible. \item {\tt change \term$_1$ at \num$_1$ \dots\ \num$_i$ with \term$_2$} This replaces the occurrences numbered \num$_1$ \dots\ \num$_i$ of \term$_1$ by \term$_2$ in the current goal. The terms \term$_1$ and \term$_2$ must be convertible. \ErrMsg {\tt Too few occurrences} \item {\tt change {\term} in {\ident}} \item {\tt change \term$_1$ with \term$_2$ in {\ident}} \item {\tt change \term$_1$ at \num$_1$ \dots\ \num$_i$ with \term$_2$ in {\ident}} This applies the {\tt change} tactic not to the goal but to the hypothesis {\ident}. \end{Variants} \SeeAlso \ref{Conversion-tactics} \section{Performing computations \index{Conversion tactics} \label{Conversion-tactics}} This set of tactics implements different specialized usages of the tactic \texttt{change}. All conversion tactics (including \texttt{change}) can be parameterized by the parts of the goal where the conversion can occur. This is done using \emph{goal clauses} which consists in a list of hypotheses and, optionally, of a reference to the conclusion of the goal. For defined hypothesis it is possible to specify if the conversion should occur on the type part, the body part or both (default). \index{Clauses} \index{Goal clauses} Goal clauses are written after a conversion tactic (tactics \texttt{set}~\ref{tactic:set}, \texttt{rewrite}~\ref{rewrite}, \texttt{replace}~\ref{tactic:replace} and \texttt{autorewrite}~\ref{tactic:autorewrite} also use goal clauses) and are introduced by the keyword \texttt{in}. If no goal clause is provided, the default is to perform the conversion only in the conclusion. The syntax and description of the various goal clauses is the following: \begin{description} \item[]\texttt{in {\ident}$_1$ $\ldots$ {\ident}$_n$ |- } only in hypotheses {\ident}$_1$ \ldots {\ident}$_n$ \item[]\texttt{in {\ident}$_1$ $\ldots$ {\ident}$_n$ |- *} in hypotheses {\ident}$_1$ \ldots {\ident}$_n$ and in the conclusion \item[]\texttt{in * |-} in every hypothesis \item[]\texttt{in *} (equivalent to \texttt{in * |- *}) everywhere \item[]\texttt{in (type of {\ident}$_1$) (value of {\ident}$_2$) $\ldots$ |-} in type part of {\ident}$_1$, in the value part of {\ident}$_2$, etc. \end{description} For backward compatibility, the notation \texttt{in}~{\ident}$_1$\ldots {\ident}$_n$ performs the conversion in hypotheses {\ident}$_1$\ldots {\ident}$_n$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %voir reduction__conv_x : histoires d'univers. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[{\tt cbv \flag$_1$ \dots\ \flag$_n$}, {\tt lazy \flag$_1$ \dots\ \flag$_n$} and {\tt compute}] {{\tt cbv \flag$_1$ \dots\ \flag$_n$}, {\tt lazy \flag$_1$ \dots\ \flag$_n$} and {\tt compute} \tacindex{cbv} \tacindex{lazy} \tacindex{compute} \tacindex{vm\_compute}\label{vmcompute}} These parameterized reduction tactics apply to any goal and perform the normalization of the goal according to the specified flags. In correspondence with the kinds of reduction considered in \Coq\, namely $\beta$ (reduction of functional application), $\delta$ (unfolding of transparent constants, see \ref{Transparent}), $\iota$ (reduction of pattern-matching over a constructed term, and unfolding of {\tt fix} and {\tt cofix} expressions) and $\zeta$ (contraction of local definitions), the flag are either {\tt beta}, {\tt delta}, {\tt iota} or {\tt zeta}. The {\tt delta} flag itself can be refined into {\tt delta [\qualid$_1$\ldots\qualid$_k$]} or {\tt delta -[\qualid$_1$\ldots\qualid$_k$]}, restricting in the first case the constants to unfold to the constants listed, and restricting in the second case the constant to unfold to all but the ones explicitly mentioned. Notice that the {\tt delta} flag does not apply to variables bound by a let-in construction inside the term itself (use here the {\tt zeta} flag). In any cases, opaque constants are not unfolded (see Section~\ref{Opaque}). The goal may be normalized with two strategies: {\em lazy} ({\tt lazy} tactic), or {\em call-by-value} ({\tt cbv} tactic). The lazy strategy is a call-by-need strategy, with sharing of reductions: the arguments of a function call are partially evaluated only when necessary, and if an argument is used several times then it is computed only once. This reduction is efficient for reducing expressions with dead code. For instance, the proofs of a proposition {\tt exists~$x$. $P(x)$} reduce to a pair of a witness $t$, and a proof that $t$ satisfies the predicate $P$. Most of the time, $t$ may be computed without computing the proof of $P(t)$, thanks to the lazy strategy. The call-by-value strategy is the one used in ML languages: the arguments of a function call are evaluated first, using a weak reduction (no reduction under the $\lambda$-abstractions). Despite the lazy strategy always performs fewer reductions than the call-by-value strategy, the latter is generally more efficient for evaluating purely computational expressions (i.e. with few dead code). \begin{Variants} \item {\tt compute} \tacindex{compute}\\ {\tt cbv} These are synonyms for {\tt cbv beta delta iota zeta}. \item {\tt lazy} This is a synonym for {\tt lazy beta delta iota zeta}. \item {\tt compute [\qualid$_1$\ldots\qualid$_k$]}\\ {\tt cbv [\qualid$_1$\ldots\qualid$_k$]} These are synonyms of {\tt cbv beta delta [\qualid$_1$\ldots\qualid$_k$] iota zeta}. \item {\tt compute -[\qualid$_1$\ldots\qualid$_k$]}\\ {\tt cbv -[\qualid$_1$\ldots\qualid$_k$]} These are synonyms of {\tt cbv beta delta -[\qualid$_1$\ldots\qualid$_k$] iota zeta}. \item {\tt lazy [\qualid$_1$\ldots\qualid$_k$]}\\ {\tt lazy -[\qualid$_1$\ldots\qualid$_k$]} These are respectively synonyms of {\tt lazy beta delta [\qualid$_1$\ldots\qualid$_k$] iota zeta} and {\tt lazy beta delta -[\qualid$_1$\ldots\qualid$_k$] iota zeta}. \item {\tt vm\_compute} \tacindex{vm\_compute} This tactic evaluates the goal using the optimized call-by-value evaluation bytecode-based virtual machine. This algorithm is dramatically more efficient than the algorithm used for the {\tt cbv} tactic, but it cannot be fine-tuned. It is specially interesting for full evaluation of algebraic objects. This includes the case of reflexion-based tactics. \end{Variants} % Obsolete? Anyway not very important message %\begin{ErrMsgs} %\item \errindex{Delta must be specified before} % % A list of constants appeared before the {\tt delta} flag. %\end{ErrMsgs} \subsection{{\tt red} \tacindex{red}} This tactic applies to a goal which has the form {\tt forall (x:T1)\dots(xk:Tk), c t1 \dots\ tn} where {\tt c} is a constant. If {\tt c} is transparent then it replaces {\tt c} with its definition (say {\tt t}) and then reduces {\tt (t t1 \dots\ tn)} according to $\beta\iota\zeta$-reduction rules. \begin{ErrMsgs} \item \errindex{Not reducible} \end{ErrMsgs} \subsection{{\tt hnf} \tacindex{hnf}} This tactic applies to any goal. It replaces the current goal with its head normal form according to the $\beta\delta\iota\zeta$-reduction rules, i.e. it reduces the head of the goal until it becomes a product or an irreducible term. \Example The term \verb+forall n:nat, (plus (S n) (S n))+ is not reduced by {\tt hnf}. \Rem The $\delta$ rule only applies to transparent constants (see Section~\ref{Opaque} on transparency and opacity). \subsection{\tt simpl \tacindex{simpl}} This tactic applies to any goal. The tactic {\tt simpl} first applies $\beta\iota$-reduction rule. Then it expands transparent constants and tries to reduce {\tt T'} according, once more, to $\beta\iota$ rules. But when the $\iota$ rule is not applicable then possible $\delta$-reductions are not applied. For instance trying to use {\tt simpl} on {\tt (plus n O)=n} changes nothing. Notice that only transparent constants whose name can be reused as such in the recursive calls are possibly unfolded. For instance a constant defined by {\tt plus' := plus} is possibly unfolded and reused in the recursive calls, but a constant such as {\tt succ := plus (S O)} is never unfolded. The behavior of {\tt simpl} can be tuned using the {\tt Arguments} vernacular command as follows: \comindex{Arguments} \begin{itemize} \item A constant can be marked to be never unfolded by {\tt simpl}: \begin{coq_example*} Arguments minus x y : simpl never \end{coq_example*} After that command an expression like {\tt (minus (S x) y)} is left untouched by the {\tt simpl} tactic. \item A constant can be marked to be unfolded only if applied to enough arguments. The number of arguments required can be specified using the {\tt /} symbol in the arguments list of the {\tt Arguments} vernacular command. \begin{coq_example*} Definition fcomp A B C f (g : A -> B) (x : A) : C := f (g x). Notation "f \o g" := (fcomp f g) (at level 50). Arguments fcomp {A B C} f g x /. \end{coq_example*} After that command the expression {\tt (f \verb+\+o g)} is left untouched by {\tt simpl} while {\tt ((f \verb+\+o g) t)} is reduced to {\tt (f (g t))}. The same mechanism can be used to make a constant volatile, i.e. always unfolded by {\tt simpl}. \begin{coq_example*} Definition volatile := fun x : nat => x. Arguments volatile / x. \end{coq_example*} \item A constant can be marked to be unfolded only if an entire set of arguments evaluates to a constructor. The {\tt !} symbol can be used to mark such arguments. \begin{coq_example*} Arguments minus !x !y. \end{coq_example*} After that command, the expression {\tt (minus (S x) y)} is left untouched by {\tt simpl}, while {\tt (minus (S x) (S y))} is reduced to {\tt (minus x y)}. \item A special heuristic to determine if a constant has to be unfolded can be activated with the following command: \begin{coq_example*} Arguments minus x y : simpl nomatch \end{coq_example*} The heuristic avoids to perform a simplification step that would expose a {\tt match} construct in head position. For example the expression {\tt (minus (S (S x)) (S y))} is simplified to {\tt (minus (S x) y)} even if an extra simplification is possible. \end{itemize} \tacindex{simpl \dots\ in} \begin{Variants} \item {\tt simpl {\term}} This applies {\tt simpl} only to the occurrences of {\term} in the current goal. \item {\tt simpl {\term} at \num$_1$ \dots\ \num$_i$} This applies {\tt simpl} only to the \num$_1$, \dots, \num$_i$ occurrences of {\term} in the current goal. \ErrMsg {\tt Too few occurrences} \item {\tt simpl {\ident}} This applies {\tt simpl} only to the applicative subterms whose head occurrence is {\ident}. \item {\tt simpl {\ident} at \num$_1$ \dots\ \num$_i$} This applies {\tt simpl} only to the \num$_1$, \dots, \num$_i$ applicative subterms whose head occurrence is {\ident}. \end{Variants} \subsection{\tt unfold \qualid \tacindex{unfold} \label{unfold}} This tactic applies to any goal. The argument {\qualid} must denote a defined transparent constant or local definition (see Sections~\ref{Basic-definitions} and~\ref{Transparent}). The tactic {\tt unfold} applies the $\delta$ rule to each occurrence of the constant to which {\qualid} refers in the current goal and then replaces it with its $\beta\iota$-normal form. \begin{ErrMsgs} \item {\qualid} \errindex{does not denote an evaluable constant} \end{ErrMsgs} \begin{Variants} \item {\tt unfold {\qualid}$_1$, \dots, \qualid$_n$} \tacindex{unfold \dots\ in} Replaces {\em simultaneously} {\qualid}$_1$, \dots, {\qualid}$_n$ with their definitions and replaces the current goal with its $\beta\iota$ normal form. \item {\tt unfold {\qualid}$_1$ at \num$_1^1$, \dots, \num$_i^1$, \dots,\ \qualid$_n$ at \num$_1^n$ \dots\ \num$_j^n$} The lists \num$_1^1$, \dots, \num$_i^1$ and \num$_1^n$, \dots, \num$_j^n$ specify the occurrences of {\qualid}$_1$, \dots, \qualid$_n$ to be unfolded. Occurrences are located from left to right. \ErrMsg {\tt bad occurrence number of {\qualid}$_i$} \ErrMsg {\qualid}$_i$ {\tt does not occur} \item {\tt unfold {\qstring}} If {\qstring} denotes the discriminating symbol of a notation (e.g. {\tt "+"}) or an expression defining a notation (e.g. \verb!"_ + _"!), and this notation refers to an unfoldable constant, then the tactic unfolds it. \item {\tt unfold {\qstring}\%{\delimkey}} This is variant of {\tt unfold {\qstring}} where {\qstring} gets its interpretation from the scope bound to the delimiting key {\delimkey} instead of its default interpretation (see Section~\ref{scopechange}). \item {\tt unfold \qualidorstring$_1$ at \num$_1^1$, \dots, \num$_i^1$, \dots,\ \qualidorstring$_n$ at \num$_1^n$ \dots\ \num$_j^n$} This is the most general form, where {\qualidorstring} is either a {\qualid} or a {\qstring} referring to a notation. \end{Variants} \subsection{{\tt fold} \term \tacindex{fold}} This tactic applies to any goal. The term \term\ is reduced using the {\tt red} tactic. Every occurrence of the resulting term in the goal is then replaced by \term. \begin{Variants} \item {\tt fold} \term$_1$ \dots\ \term$_n$ Equivalent to {\tt fold} \term$_1${\tt;}\ldots{\tt; fold} \term$_n$. \end{Variants} \subsection{{\tt pattern {\term}} \tacindex{pattern} \label{pattern}} This command applies to any goal. The argument {\term} must be a free subterm of the current goal. The command {\tt pattern} performs $\beta$-expansion (the inverse of $\bt$-reduction) of the current goal (say \T) by \begin{enumerate} \item replacing all occurrences of {\term} in {\T} with a fresh variable \item abstracting this variable \item applying the abstracted goal to {\term} \end{enumerate} For instance, if the current goal $T$ is expressible has $\phi(t)$ where the notation captures all the instances of $t$ in $\phi(t)$, then {\tt pattern $t$} transforms it into {\tt (fun x:$A$ => $\phi(${\tt x}$)$) $t$}. This command can be used, for instance, when the tactic {\tt apply} fails on matching. \begin{Variants} \item {\tt pattern {\term} at {\num$_1$} \dots\ {\num$_n$}} Only the occurrences {\num$_1$} \dots\ {\num$_n$} of {\term} are considered for $\beta$-expansion. Occurrences are located from left to right. \item {\tt pattern {\term} at - {\num$_1$} \dots\ {\num$_n$}} All occurrences except the occurrences of indexes {\num$_1$} \dots\ {\num$_n$} of {\term} are considered for $\beta$-expansion. Occurrences are located from left to right. \item {\tt pattern {\term$_1$}, \dots, {\term$_m$}} Starting from a goal $\phi(t_1 \dots\ t_m)$, the tactic {\tt pattern $t_1$, \dots,\ $t_m$} generates the equivalent goal {\tt (fun (x$_1$:$A_1$) \dots\ (x$_m$:$A_m$) => $\phi(${\tt x$_1$\dots\ x$_m$}$)$) $t_1$ \dots\ $t_m$}.\\ If $t_i$ occurs in one of the generated types $A_j$ these occurrences will also be considered and possibly abstracted. \item {\tt pattern {\term$_1$} at {\num$_1^1$} \dots\ {\num$_{n_1}^1$}, \dots, {\term$_m$} at {\num$_1^m$} \dots\ {\num$_{n_m}^m$}} This behaves as above but processing only the occurrences \num$_1^1$, \dots, \num$_i^1$ of \term$_1$, \dots, \num$_1^m$, \dots, \num$_j^m$ of \term$_m$ starting from \term$_m$. \item {\tt pattern} {\term$_1$} \zeroone{{\tt at \zeroone{-}} {\num$_1^1$} \dots\ {\num$_{n_1}^1$}} {\tt ,} \dots {\tt ,} {\term$_m$} \zeroone{{\tt at \zeroone{-}} {\num$_1^m$} \dots\ {\num$_{n_m}^m$}} This is the most general syntax that combines the different variants. \end{Variants} \subsection{Conversion tactics applied to hypotheses} {\convtactic} {\tt in} \ident$_1$ \dots\ \ident$_n$ Applies the conversion tactic {\convtactic} to the hypotheses \ident$_1$, \ldots, \ident$_n$. The tactic {\convtactic} is any of the conversion tactics listed in this section. If \ident$_i$ is a local definition, then \ident$_i$ can be replaced by (Type of \ident$_i$) to address not the body but the type of the local definition. Example: {\tt unfold not in (Type of H1) (Type of H3).} \begin{ErrMsgs} \item \errindex{No such hypothesis} : {\ident}. \end{ErrMsgs} \section{Automation} \subsection{\tt auto \label{auto} \tacindex{auto}} This tactic implements a Prolog-like resolution procedure to solve the current goal. It first tries to solve the goal using the {\tt assumption} tactic, then it reduces the goal to an atomic one using {\tt intros} and introducing the newly generated hypotheses as hints. Then it looks at the list of tactics associated to the head symbol of the goal and tries to apply one of them (starting from the tactics with lower cost). This process is recursively applied to the generated subgoals. By default, \texttt{auto} only uses the hypotheses of the current goal and the hints of the database named {\tt core}. \begin{Variants} \item {\tt auto \num} Forces the search depth to be \num. The maximal search depth is 5 by default. \item {\tt auto with \ident$_1$ \dots\ \ident$_n$} Uses the hint databases $\ident_1$ \dots\ $\ident_n$ in addition to the database {\tt core}. See Section~\ref{Hints-databases} for the list of pre-defined databases and the way to create or extend a database. This option can be combined with the previous one. \item {\tt auto with *} Uses all existing hint databases, minus the special database {\tt v62}. See Section~\ref{Hints-databases} \item \texttt{auto using \nterm{lemma}$_1$ , \ldots , \nterm{lemma}$_n$} Uses \nterm{lemma}$_1$, \ldots, \nterm{lemma}$_n$ in addition to hints (can be combined with the \texttt{with \ident} option). If $lemma_i$ is an inductive type, it is the collection of its constructors which is added as hints. \item \texttt{auto using \nterm{lemma}$_1$ , \ldots , \nterm{lemma}$_n$ with \ident$_1$ \dots\ \ident$_n$} This combines the effects of the {\tt using} and {\tt with} options. \item {\tt trivial}\tacindex{trivial} This tactic is a restriction of {\tt auto} that is not recursive and tries only hints which cost 0. Typically it solves trivial equalities like $X=X$. \item \texttt{trivial with \ident$_1$ \dots\ \ident$_n$} \item \texttt{trivial with *} \end{Variants} \Rem {\tt auto} either solves completely the goal or else leaves it intact. \texttt{auto} and \texttt{trivial} never fail. \SeeAlso Section~\ref{Hints-databases} \subsection{\tt eauto \tacindex{eauto} \label{eauto}} This tactic generalizes {\tt auto}. In contrast with the latter, {\tt eauto} uses unification of the goal against the hints rather than pattern-matching (in other words, it uses {\tt eapply} instead of {\tt apply}). As a consequence, {\tt eauto} can solve such a goal: \begin{coq_example} Hint Resolve ex_intro. Goal forall P:nat -> Prop, P 0 -> exists n, P n. eauto. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Note that {\tt ex\_intro} should be declared as an hint. \SeeAlso Section~\ref{Hints-databases} \subsection{\tt autounfold with \ident$_1$ \dots\ \ident$_n$ \tacindex{autounfold} \label{autounfold}} This tactic unfolds constants that were declared through a {\tt Hint Unfold} in the given databases. \begin{Variants} \item {\tt autounfold with \ident$_1$ \dots\ \ident$_n$ in \textit{clause}} Perform the unfolding in the given clause. \item {\tt autounfold with *} Uses the unfold hints declared in all the hint databases. \end{Variants} \subsection{\tt autorewrite with \ident$_1$ \dots \ident$_n$. \label{tactic:autorewrite} \tacindex{autorewrite}} This tactic \footnote{The behavior of this tactic has much changed compared to the versions available in the previous distributions (V6). This may cause significant changes in your theories to obtain the same result. As a drawback of the re-engineering of the code, this tactic has also been completely revised to get a very compact and readable version.} carries out rewritings according the rewriting rule bases {\tt \ident$_1$ \dots \ident$_n$}. Each rewriting rule of a base \ident$_i$ is applied to the main subgoal until it fails. Once all the rules have been processed, if the main subgoal has progressed (e.g., if it is distinct from the initial main goal) then the rules of this base are processed again. If the main subgoal has not progressed then the next base is processed. For the bases, the behavior is exactly similar to the processing of the rewriting rules. The rewriting rule bases are built with the {\tt Hint~Rewrite} vernacular command. \Warning{} This tactic may loop if you build non terminating rewriting systems. \begin{Variant} \item {\tt autorewrite with \ident$_1$ \dots \ident$_n$ using \tac}\\ Performs, in the same way, all the rewritings of the bases {\tt \ident$_1$ $...$ \ident$_n$} applying {\tt \tac} to the main subgoal after each rewriting step. \item \texttt{autorewrite with {\ident$_1$} \dots \ident$_n$ in {\qualid}} Performs all the rewritings in hypothesis {\qualid}. \item \texttt{autorewrite with {\ident$_1$} \dots \ident$_n$ in {\qualid} using \tac} Performs all the rewritings in hypothesis {\qualid} applying {\tt \tac} to the main subgoal after each rewriting step. \item \texttt{autorewrite with {\ident$_1$} \dots \ident$_n$ in \textit{clause}} Performs all the rewritings in the clause \textit{clause}. \\ The \textit{clause} argument must not contain any \texttt{type of} nor \texttt{value of}. \end{Variant} \SeeAlso Section~\ref{HintRewrite} for feeding the database of lemmas used by {\tt autorewrite}. \SeeAlso Section~\ref{autorewrite-example} for examples showing the use of this tactic. % En attente d'un moyen de valoriser les fichiers de demos %\SeeAlso file \texttt{contrib/Rocq/DEMOS/Demo\_AutoRewrite.v} \section{Controlling automation} \subsection{The hints databases for {\tt auto} and {\tt eauto} \index{Hints databases} \label{Hints-databases} \comindex{Hint}} The hints for \texttt{auto} and \texttt{eauto} are stored in databases. Each database maps head symbols to a list of hints. One can use the command \texttt{Print Hint \ident} to display the hints associated to the head symbol \ident{} (see \ref{PrintHint}). Each hint has a cost that is an nonnegative integer, and an optional pattern. The hints with lower cost are tried first. A hint is tried by \texttt{auto} when the conclusion of the current goal matches its pattern or when it has no pattern. \subsubsection*{Creating Hint databases \label{CreateHintDb}\comindex{CreateHintDb}} One can optionally declare a hint database using the command \texttt{Create HintDb}. If a hint is added to an unknown database, it will be automatically created. \medskip \texttt{Create HintDb} {\ident} [\texttt{discriminated}] \medskip This command creates a new database named \ident. The database is implemented by a Discrimination Tree (DT) that serves as an index of all the lemmas. The DT can use transparency information to decide if a constant should be indexed or not (c.f. \ref{HintTransparency}), making the retrieval more efficient. The legacy implementation (the default one for new databases) uses the DT only on goals without existentials (i.e., auto goals), for non-Immediate hints and do not make use of transparency hints, putting more work on the unification that is run after retrieval (it keeps a list of the lemmas in case the DT is not used). The new implementation enabled by the {\tt discriminated} option makes use of DTs in all cases and takes transparency information into account. However, the order in which hints are retrieved from the DT may differ from the order in which they were inserted, making this implementation observationally different from the legacy one. \begin{Variants} \item\texttt{Local Hint} \textsl{hint\_definition} \texttt{:} \ident$_1$ \ldots\ \ident$_n$ This is used to declare a hint database that must not be exported to the other modules that require and import the current module. Inside a section, the option {\tt Local} is useless since hints do not survive anyway to the closure of sections. \end{Variants} The general command to add a hint to some database \ident$_1$, \dots, \ident$_n$ is: \begin{tabbing} \texttt{Hint} \textsl{hint\_definition} \texttt{:} \ident$_1$ \ldots\ \ident$_n$ \end{tabbing} where {\sl hint\_definition} is one of the following expressions: \begin{itemize} \item \texttt{Resolve} {\term} \comindex{Hint Resolve} This command adds {\tt apply {\term}} to the hint list with the head symbol of the type of \term. The cost of that hint is the number of subgoals generated by {\tt apply {\term}}. In case the inferred type of \term\ does not start with a product the tactic added in the hint list is {\tt exact {\term}}. In case this type can be reduced to a type starting with a product, the tactic {\tt apply {\term}} is also stored in the hints list. If the inferred type of \term\ contains a dependent quantification on a predicate, it is added to the hint list of {\tt eapply} instead of the hint list of {\tt apply}. In this case, a warning is printed since the hint is only used by the tactic {\tt eauto} (see \ref{eauto}). A typical example of a hint that is used only by \texttt{eauto} is a transitivity lemma. \begin{ErrMsgs} \item \errindex{Bound head variable} The head symbol of the type of {\term} is a bound variable such that this tactic cannot be associated to a constant. \item \term\ \errindex{cannot be used as a hint} The type of \term\ contains products over variables which do not appear in the conclusion. A typical example is a transitivity axiom. In that case the {\tt apply} tactic fails, and thus is useless. \end{ErrMsgs} \begin{Variants} \item \texttt{Resolve} {\term$_1$} \dots {\term$_m$} Adds each \texttt{Resolve} {\term$_i$}. \end{Variants} \item \texttt{Immediate {\term}} \comindex{Hint Immediate} This command adds {\tt apply {\term}; trivial} to the hint list associated with the head symbol of the type of {\ident} in the given database. This tactic will fail if all the subgoals generated by {\tt apply {\term}} are not solved immediately by the {\tt trivial} tactic (which only tries tactics with cost $0$). This command is useful for theorems such as the symmetry of equality or $n+1=m+1 \to n=m$ that we may like to introduce with a limited use in order to avoid useless proof-search. The cost of this tactic (which never generates subgoals) is always 1, so that it is not used by {\tt trivial} itself. \begin{ErrMsgs} \item \errindex{Bound head variable} \item \term\ \errindex{cannot be used as a hint} \end{ErrMsgs} \begin{Variants} \item \texttt{Immediate} {\term$_1$} \dots {\term$_m$} Adds each \texttt{Immediate} {\term$_i$}. \end{Variants} \item \texttt{Constructors} {\ident} \comindex{Hint Constructors} If {\ident} is an inductive type, this command adds all its constructors as hints of type \texttt{Resolve}. Then, when the conclusion of current goal has the form \texttt{({\ident} \dots)}, \texttt{auto} will try to apply each constructor. \begin{ErrMsgs} \item {\ident} \errindex{is not an inductive type} \item {\ident} \errindex{not declared} \end{ErrMsgs} \begin{Variants} \item \texttt{Constructors} {\ident$_1$} \dots {\ident$_m$} Adds each \texttt{Constructors} {\ident$_i$}. \end{Variants} \item \texttt{Unfold} {\qualid} \comindex{Hint Unfold} This adds the tactic {\tt unfold {\qualid}} to the hint list that will only be used when the head constant of the goal is \ident. Its cost is 4. \begin{Variants} \item \texttt{Unfold} {\ident$_1$} \dots {\ident$_m$} Adds each \texttt{Unfold} {\ident$_i$}. \end{Variants} \item \texttt{Transparent}, \texttt{Opaque} {\qualid} \label{HintTransparency} \comindex{Hint Transparent} \comindex{Hint Opaque} This adds a transparency hint to the database, making {\tt {\qualid}} a transparent or opaque constant during resolution. This information is used during unification of the goal with any lemma in the database and inside the discrimination network to relax or constrain it in the case of \texttt{discriminated} databases. \begin{Variants} \item \texttt{Transparent}, \texttt{Opaque} {\ident$_1$} \dots {\ident$_m$} Declares each {\ident$_i$} as a transparent or opaque constant. \end{Variants} \item \texttt{Extern \num\ [\pattern]\ => }\textsl{tactic} \comindex{Hint Extern} This hint type is to extend \texttt{auto} with tactics other than \texttt{apply} and \texttt{unfold}. For that, we must specify a cost, an optional pattern and a tactic to execute. Here is an example: \begin{quotation} \begin{verbatim} Hint Extern 4 (~(_ = _)) => discriminate. \end{verbatim} \end{quotation} Now, when the head of the goal is a disequality, \texttt{auto} will try \texttt{discriminate} if it does not manage to solve the goal with hints with a cost less than 4. One can even use some sub-patterns of the pattern in the tactic script. A sub-pattern is a question mark followed by an ident, like \texttt{?X1} or \texttt{?X2}. Here is an example: % Require EqDecide. \begin{coq_example*} Require Import List. \end{coq_example*} \begin{coq_example} Hint Extern 5 ({?X1 = ?X2} + {?X1 <> ?X2}) => generalize X1, X2; decide equality : eqdec. Goal forall a b:list (nat * nat), {a = b} + {a <> b}. info auto with eqdec. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \end{itemize} \Rem One can use an \texttt{Extern} hint with no pattern to do pattern-matching on hypotheses using \texttt{match goal with} inside the tactic. \begin{Variants} \item \texttt{Hint} \textsl{hint\_definition} No database name is given: the hint is registered in the {\tt core} database. \item\texttt{Hint Local} \textsl{hint\_definition} \texttt{:} \ident$_1$ \ldots\ \ident$_n$ This is used to declare hints that must not be exported to the other modules that require and import the current module. Inside a section, the option {\tt Local} is useless since hints do not survive anyway to the closure of sections. \item\texttt{Hint Local} \textsl{hint\_definition} Idem for the {\tt core} database. \end{Variants} % There are shortcuts that allow to define several goal at once: % \begin{itemize} % \item \comindex{Hints Resolve}\texttt{Hints Resolve \ident$_1$ \dots\ \ident$_n$ : \ident.}\\ % This command is a shortcut for the following ones: % \begin{quotation} % \noindent\texttt{Hint \ident$_1$ : \ident\ := Resolve \ident$_1$}\\ % \dots\\ % \texttt{Hint \ident$_1$ : \ident := Resolve \ident$_1$} % \end{quotation} % Notice that the hint name is the same that the theorem given as % hint. % \item \comindex{Hints Immediate}\texttt{Hints Immediate \ident$_1$ \dots\ \ident$_n$ : \ident.}\\ % \item \comindex{Hints Unfold}\texttt{Hints Unfold \qualid$_1$ \dots\ \qualid$_n$ : \ident.}\\ % \end{itemize} %\begin{Warnings} % \item \texttt{Overriding hint named \dots\ in database \dots} %\end{Warnings} \subsection{Hint databases defined in the \Coq\ standard library} Several hint databases are defined in the \Coq\ standard library. The actual content of a database is the collection of the hints declared to belong to this database in each of the various modules currently loaded. Especially, requiring new modules potentially extend a database. At {\Coq} startup, only the {\tt core} and {\tt v62} databases are non empty and can be used. \begin{description} \item[\tt core] This special database is automatically used by \texttt{auto}, except when pseudo-database \texttt{nocore} is given to \texttt{auto}. The \texttt{core} database contains only basic lemmas about negation, conjunction, and so on from. Most of the hints in this database come from the \texttt{Init} and \texttt{Logic} directories. \item[\tt arith] This database contains all lemmas about Peano's arithmetic proved in the directories \texttt{Init} and \texttt{Arith} \item[\tt zarith] contains lemmas about binary signed integers from the directories \texttt{theories/ZArith}. When required, the module {\tt Omega} also extends the database {\tt zarith} with a high-cost hint that calls {\tt omega} on equations and inequalities in {\tt nat} or {\tt Z}. \item[\tt bool] contains lemmas about booleans, mostly from directory \texttt{theories/Bool}. \item[\tt datatypes] is for lemmas about lists, streams and so on that are mainly proved in the \texttt{Lists} subdirectory. \item[\tt sets] contains lemmas about sets and relations from the directories \texttt{Sets} and \texttt{Relations}. \item[\tt typeclass\_instances] contains all the type class instances declared in the environment, including those used for \texttt{setoid\_rewrite}, from the \texttt{Classes} directory. \end{description} There is also a special database called {\tt v62}. It collects all hints that were declared in the versions of {\Coq} prior to version 6.2.4 when the databases {\tt core}, {\tt arith}, and so on were introduced. The purpose of the database {\tt v62} is to ensure compatibility with further versions of {\Coq} for developments done in versions prior to 6.2.4 ({\tt auto} being replaced by {\tt auto with v62}). The database {\tt v62} is intended not to be extended (!). It is not included in the hint databases list used in the {\tt auto with *} tactic. Furthermore, you are advised not to put your own hints in the {\tt core} database, but use one or several databases specific to your development. \subsection{\tt Print Hint \label{PrintHint} \comindex{Print Hint}} This command displays all hints that apply to the current goal. It fails if no proof is being edited, while the two variants can be used at every moment. \begin{Variants} \item {\tt Print Hint {\ident} } This command displays only tactics associated with \ident\ in the hints list. This is independent of the goal being edited, so this command will not fail if no goal is being edited. \item {\tt Print Hint *} This command displays all declared hints. \item {\tt Print HintDb {\ident} } \label{PrintHintDb} \comindex{Print HintDb} This command displays all hints from database \ident. \end{Variants} \subsection{\tt Hint Rewrite \term$_1$ \dots \term$_n$ : \ident \label{HintRewrite} \comindex{Hint Rewrite}} This vernacular command adds the terms {\tt \term$_1$ \dots \term$_n$} (their types must be equalities) in the rewriting base {\tt \ident} with the default orientation (left to right). Notice that the rewriting bases are distinct from the {\tt auto} hint bases and that {\tt auto} does not take them into account. This command is synchronous with the section mechanism (see \ref{Section}): when closing a section, all aliases created by \texttt{Hint Rewrite} in that section are lost. Conversely, when loading a module, all \texttt{Hint Rewrite} declarations at the global level of that module are loaded. \begin{Variants} \item {\tt Hint Rewrite -> \term$_1$ \dots \term$_n$ : \ident}\\ This is strictly equivalent to the command above (we only make explicit the orientation which otherwise defaults to {\tt ->}). \item {\tt Hint Rewrite <- \term$_1$ \dots \term$_n$ : \ident}\\ Adds the rewriting rules {\tt \term$_1$ \dots \term$_n$} with a right-to-left orientation in the base {\tt \ident}. \item {\tt Hint Rewrite \term$_1$ \dots \term$_n$ using {\tac} : {\ident}}\\ When the rewriting rules {\tt \term$_1$ \dots \term$_n$} in {\tt \ident} will be used, the tactic {\tt \tac} will be applied to the generated subgoals, the main subgoal excluded. %% \item %% {\tt Hint Rewrite [ \term$_1$ \dots \term$_n$ ] in \ident}\\ %% {\tt Hint Rewrite [ \term$_1$ \dots \term$_n$ ] in {\ident} using {\tac}}\\ %% These are deprecated syntactic variants for %% {\tt Hint Rewrite \term$_1$ \dots \term$_n$ : \ident} and %% {\tt Hint Rewrite \term$_1$ \dots \term$_n$ using {\tac} : {\ident}}. \item \texttt{Print Rewrite HintDb {\ident}} This command displays all rewrite hints contained in {\ident}. \end{Variants} \subsection{Hints and sections \label{Hint-and-Section}} Hints provided by the \texttt{Hint} commands are erased when closing a section. Conversely, all hints of a module \texttt{A} that are not defined inside a section (and not defined with option {\tt Local}) become available when the module {\tt A} is imported (using e.g. \texttt{Require Import A.}). \subsection{Setting implicit automation tactics} \subsubsection[\tt Proof with {\tac}.]{\tt Proof with {\tac}.\label{ProofWith} \comindex{Proof with}} This command may be used to start a proof. It defines a default tactic to be used each time a tactic command {\tac$_1$} is ended by ``\verb#...#''. In this case the tactic command typed by the user is equivalent to \tac$_1$;{\tac}. \SeeAlso {\tt Proof.} in Section~\ref{BeginProof}. \begin{Variants} \item {\tt Proof with {\tac} using {\ident$_1$ \dots {\ident$_n$}}} Combines in a single line {\tt Proof with} and {\tt Proof using}, see~\ref{ProofUsing} \item {\tt Proof using {\ident$_1$ \dots {\ident$_n$}} with {\tac}} Combines in a single line {\tt Proof with} and {\tt Proof using}, see~\ref{ProofUsing} \end{Variants} \subsubsection[\tt Declare Implicit Tactic {\tac}.]{\tt Declare Implicit Tactic {\tac}.\comindex{Declare Implicit Tactic}} This command declares a tactic to be used to solve implicit arguments that {\Coq} does not know how to solve by unification. It is used every time the term argument of a tactic has one of its holes not fully resolved. Here is an example: \begin{coq_example} Parameter quo : nat -> forall n:nat, n<>0 -> nat. Notation "x // y" := (quo x y _) (at level 40). Declare Implicit Tactic assumption. Goal forall n m, m<>0 -> { q:nat & { r | q * m + r = n } }. intros. exists (n // m). \end{coq_example} The tactic {\tt exists (n // m)} did not fail. The hole was solved by {\tt assumption} so that it behaved as {\tt exists (quo n m H)}. \section{Decision procedures} \subsection{\tt tauto \tacindex{tauto} \label{tauto}} This tactic implements a decision procedure for intuitionistic propositional calculus based on the contraction-free sequent calculi LJT* of Roy Dyckhoff \cite{Dyc92}. Note that {\tt tauto} succeeds on any instance of an intuitionistic tautological proposition. {\tt tauto} unfolds negations and logical equivalence but does not unfold any other definition. The following goal can be proved by {\tt tauto} whereas {\tt auto} would fail: \begin{coq_example} Goal forall (x:nat) (P:nat -> Prop), x = 0 \/ P x -> x <> 0 -> P x. intros. tauto. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Moreover, if it has nothing else to do, {\tt tauto} performs introductions. Therefore, the use of {\tt intros} in the previous proof is unnecessary. {\tt tauto} can for instance prove the following: \begin{coq_example} (* auto would fail *) Goal forall (A:Prop) (P:nat -> Prop), A \/ (forall x:nat, ~ A -> P x) -> forall x:nat, ~ A -> P x. tauto. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \Rem In contrast, {\tt tauto} cannot solve the following goal \begin{coq_example*} Goal forall (A:Prop) (P:nat -> Prop), A \/ (forall x:nat, ~ A -> P x) -> forall x:nat, ~ ~ (A \/ P x). \end{coq_example*} \begin{coq_eval} Abort. \end{coq_eval} because \verb=(forall x:nat, ~ A -> P x)= cannot be treated as atomic and an instantiation of \verb=x= is necessary. \subsection{\tt intuition {\tac} \tacindex{intuition} \label{intuition}} The tactic \texttt{intuition} takes advantage of the search-tree built by the decision procedure involved in the tactic {\tt tauto}. It uses this information to generate a set of subgoals equivalent to the original one (but simpler than it) and applies the tactic {\tac} to them \cite{Mun94}. If this tactic fails on some goals then {\tt intuition} fails. In fact, {\tt tauto} is simply {\tt intuition fail}. For instance, the tactic {\tt intuition auto} applied to the goal \begin{verbatim} (forall (x:nat), P x)/\B -> (forall (y:nat),P y)/\ P O \/B/\ P O \end{verbatim} internally replaces it by the equivalent one: \begin{verbatim} (forall (x:nat), P x), B |- P O \end{verbatim} and then uses {\tt auto} which completes the proof. Originally due to C{\'e}sar~Mu{\~n}oz, these tactics ({\tt tauto} and {\tt intuition}) have been completely re-engineered by David~Delahaye using mainly the tactic language (see Chapter~\ref{TacticLanguage}). The code is now much shorter and a significant increase in performance has been noticed. The general behavior with respect to dependent types, unfolding and introductions has slightly changed to get clearer semantics. This may lead to some incompatibilities. \begin{Variants} \item {\tt intuition}\\ Is equivalent to {\tt intuition auto with *}. \end{Variants} % En attente d'un moyen de valoriser les fichiers de demos %\SeeAlso file \texttt{contrib/Rocq/DEMOS/Demo\_tauto.v} \subsection{\tt rtauto \tacindex{rtauto} \label{rtauto}} The {\tt rtauto} tactic solves propositional tautologies similarly to what {\tt tauto} does. The main difference is that the proof term is built using a reflection scheme applied to a sequent calculus proof of the goal. The search procedure is also implemented using a different technique. Users should be aware that this difference may result in faster proof-search but slower proof-checking, and {\tt rtauto} might not solve goals that {\tt tauto} would be able to solve (e.g. goals involving universal quantifiers). \subsection{{\tt firstorder} \tacindex{firstorder} \label{firstorder}} The tactic \texttt{firstorder} is an {\it experimental} extension of \texttt{tauto} to first-order reasoning, written by Pierre Corbineau. It is not restricted to usual logical connectives but instead may reason about any first-order class inductive definition. \begin{Variants} \item {\tt firstorder {\tac}} \tacindex{firstorder {\tac}} Tries to solve the goal with {\tac} when no logical rule may apply. \item {\tt firstorder with \ident$_1$ \dots\ \ident$_n$ } \tacindex{firstorder with} Adds lemmas \ident$_1$ \dots\ \ident$_n$ to the proof-search environment. \item {\tt firstorder using {\qualid}$_1$ , \dots\ , {\qualid}$_n$ } \tacindex{firstorder using} Adds lemmas in {\tt auto} hints bases {\qualid}$_1$ \dots\ {\qualid}$_n$ to the proof-search environment. If {\qualid}$_i$ refers to an inductive type, it is the collection of its constructors which is added as hints. \item \texttt{firstorder using {\qualid}$_1$ , \dots\ , {\qualid}$_n$ with \ident$_1$ \dots\ \ident$_n$} This combines the effects of the {\tt using} and {\tt with} options. \end{Variants} Proof-search is bounded by a depth parameter which can be set by typing the {\nobreak \tt Set Firstorder Depth $n$} \comindex{Set Firstorder Depth} vernacular command. \subsection{\tt congruence \tacindex{congruence} \label{congruence}} The tactic {\tt congruence}, by Pierre Corbineau, implements the standard Nelson and Oppen congruence closure algorithm, which is a decision procedure for ground equalities with uninterpreted symbols. It also include the constructor theory (see \ref{injection} and \ref{discriminate}). If the goal is a non-quantified equality, {\tt congruence} tries to prove it with non-quantified equalities in the context. Otherwise it tries to infer a discriminable equality from those in the context. Alternatively, congruence tries to prove that a hypothesis is equal to the goal or to the negation of another hypothesis. {\tt congruence} is also able to take advantage of hypotheses stating quantified equalities, you have to provide a bound for the number of extra equalities generated that way. Please note that one of the members of the equality must contain all the quantified variables in order for {\tt congruence} to match against it. \begin{coq_eval} Reset Initial. Variable A:Set. Variables a b:A. Variable f:A->A. Variable g:A->A->A. \end{coq_eval} \begin{coq_example} Theorem T: a=(f a) -> (g b (f a))=(f (f a)) -> (g a b)=(f (g b a)) -> (g a b)=a. intros. congruence. \end{coq_example} \begin{coq_eval} Reset Initial. Variable A:Set. Variables a c d:A. Variable f:A->A*A. \end{coq_eval} \begin{coq_example} Theorem inj : f = pair a -> Some (f c) = Some (f d) -> c=d. intros. congruence. \end{coq_example} \begin{Variants} \item {\tt congruence {\sl n}}\\ Tries to add at most {\tt \sl n} instances of hypotheses stating quantified equalities to the problem in order to solve it. A bigger value of {\tt \sl n} does not make success slower, only failure. You might consider adding some lemmas as hypotheses using {\tt assert} in order for congruence to use them. \end{Variants} \begin{Variants} \item {\tt congruence with \term$_1$ \dots\ \term$_n$}\\ Adds {\tt \term$_1$ \dots\ \term$_n$} to the pool of terms used by {\tt congruence}. This helps in case you have partially applied constructors in your goal. \end{Variants} \begin{ErrMsgs} \item \errindex{I don't know how to handle dependent equality} \\ The decision procedure managed to find a proof of the goal or of a discriminable equality but this proof couldn't be built in {\Coq} because of dependently-typed functions. \item \errindex{I couldn't solve goal} \\ The decision procedure didn't find any way to solve the goal. \item \errindex{Goal is solvable by congruence but some arguments are missing. Try "congruence with \dots", replacing metavariables by arbitrary terms.} \\ The decision procedure could solve the goal with the provision that additional arguments are supplied for some partially applied constructors. Any term of an appropriate type will allow the tactic to successfully solve the goal. Those additional arguments can be given to {\tt congruence} by filling in the holes in the terms given in the error message, using the {\tt with} variant described above. \end{ErrMsgs} \section{Things that do not fit other sections} \section{Everything after this point has yet to be sorted} \subsection{\tt constr\_eq \term$_1$ \term$_2$ \tacindex{constr\_eq} \label{constreq}} This tactic applies to any goal. It checks whether its arguments are equal modulo alpha conversion and casts. \ErrMsg \errindex{Not equal} \subsection{\tt unify \term$_1$ \term$_2$ \tacindex{unify} \label{unify}} This tactic applies to any goal. It checks whether its arguments are unifiable, potentially instantiating existential variables. \ErrMsg \errindex{Not unifiable} \begin{Variants} \item {\tt unify \term$_1$ \term$_2$ with \ident} Unification takes the transparency information defined in the hint database {\tt \ident} into account (see Section~\ref{HintTransparency}). \end{Variants} \subsection{\tt is\_evar \term \tacindex{is\_evar} \label{isevar}} This tactic applies to any goal. It checks whether its argument is an existential variable. Existential variables are uninstantiated variables generated by e.g. {\tt eapply} (see Section~\ref{apply}). \ErrMsg \errindex{Not an evar} \subsection{\tt has\_evar \term \tacindex{has\_evar} \label{hasevar}} This tactic applies to any goal. It checks whether its argument has an existential variable as a subterm. Unlike {\tt context} patterns combined with {\tt is\_evar}, this tactic scans all subterms, including those under binders. \ErrMsg \errindex{No evars} \subsection{\tt is\_var \term \tacindex{is\_var} \label{isvar}} This tactic applies to any goal. It checks whether its argument is a variable or hypothesis in the current goal context or in the opened sections. \ErrMsg \errindex{Not a variable or hypothesis} \section{Equality} \subsection{\tt f\_equal \label{f-equal} \tacindex{f\_equal}} This tactic applies to a goal of the form $f\ a_1\ \ldots\ a_n = f'\ a'_1\ \ldots\ a'_n$. Using {\tt f\_equal} on such a goal leads to subgoals $f=f'$ and $a_1=a'_1$ and so on up to $a_n=a'_n$. Amongst these subgoals, the simple ones (e.g. provable by reflexivity or congruence) are automatically solved by {\tt f\_equal}. \section{Equality and inductive sets} We describe in this section some special purpose tactics dealing with equality and inductive sets or types. These tactics use the equality {\tt eq:forall (A:Type), A->A->Prop}, simply written with the infix symbol {\tt =}. \subsection{\tt decide equality \label{decideequality} \tacindex{decide equality}} This tactic solves a goal of the form {\tt forall $x$ $y$:$R$, \{$x$=$y$\}+\{\verb|~|$x$=$y$\}}, where $R$ is an inductive type such that its constructors do not take proofs or functions as arguments, nor objects in dependent types. It solves goals of the form {\tt \{$x$=$y$\}+\{\verb|~|$x$=$y$\}} as well. \subsection{\tt compare \term$_1$ \term$_2$ \tacindex{compare}} This tactic compares two given objects \term$_1$ and \term$_2$ of an inductive datatype. If $G$ is the current goal, it leaves the sub-goals \term$_1${\tt =}\term$_2$ {\tt ->} $G$ and \verb|~|\term$_1${\tt =}\term$_2$ {\tt ->} $G$. The type of \term$_1$ and \term$_2$ must satisfy the same restrictions as in the tactic \texttt{decide equality}. \subsection{\tt simplify\_eq {\term} \tacindex{simplify\_eq} \tacindex{esimplify\_eq} \label{simplify-eq}} Let {\term} be the proof of a statement of conclusion {\tt {\term$_1$}={\term$_2$}}. If {\term$_1$} and {\term$_2$} are structurally different (in the sense described for the tactic {\tt discriminate}), then the tactic {\tt simplify\_eq} behaves as {\tt discriminate {\term}}, otherwise it behaves as {\tt injection {\term}}. \Rem If some quantified hypothesis of the goal is named {\ident}, then {\tt simplify\_eq {\ident}} first introduces the hypothesis in the local context using \texttt{intros until \ident}. \begin{Variants} \item \texttt{simplify\_eq} \num This does the same thing as \texttt{intros until \num} then \texttt{simplify\_eq \ident} where {\ident} is the identifier for the last introduced hypothesis. \item \texttt{simplify\_eq} \term{} {\tt with} {\bindinglist} This does the same as \texttt{simplify\_eq {\term}} but using the given bindings to instantiate parameters or hypotheses of {\term}. \item \texttt{esimplify\_eq} \num\\ \texttt{esimplify\_eq} \term{} \zeroone{{\tt with} {\bindinglist}} This works the same as {\tt simplify\_eq} but if the type of {\term}, or the type of the hypothesis referred to by {\num}, has uninstantiated parameters, these parameters are left as existential variables. \item{\tt simplify\_eq} If the current goal has form $t_1\verb=<>=t_2$, it behaves as \texttt{intro {\ident}; simplify\_eq {\ident}}. \end{Variants} \subsection{\tt dependent rewrite -> {\ident} \tacindex{dependent rewrite ->} \label{dependent-rewrite}} This tactic applies to any goal. If \ident\ has type \verb+(existT B a b)=(existT B a' b')+ in the local context (i.e. each term of the equality has a sigma type $\{ a:A~ \&~(B~a)\}$) this tactic rewrites \verb+a+ into \verb+a'+ and \verb+b+ into \verb+b'+ in the current goal. This tactic works even if $B$ is also a sigma type. This kind of equalities between dependent pairs may be derived by the injection and inversion tactics. \begin{Variants} \item{\tt dependent rewrite <- {\ident}} \tacindex{dependent rewrite <-} \\ Analogous to {\tt dependent rewrite ->} but uses the equality from right to left. \end{Variants} \section{Inversion \label{inversion}} \subsection[\tt functional inversion \ident]{\tt functional inversion \ident\label{sec:functional-inversion}} \texttt{functional inversion} is a \emph{highly} experimental tactic which performs inversion on hypothesis \ident\ of the form \texttt{\qualid\ \term$_1$\dots\term$_n$\ = \term} or \texttt{\term\ = \qualid\ \term$_1$\dots\term$_n$} where \qualid\ must have been defined using \texttt{Function} (see Section~\ref{Function}). \begin{ErrMsgs} \item \errindex{Hypothesis {\ident} must contain at least one Function} \item \errindex{Cannot find inversion information for hypothesis \ident} This error may be raised when some inversion lemma failed to be generated by Function. \end{ErrMsgs} \begin{Variants} \item {\tt functional inversion \num} This does the same thing as \texttt{intros until \num} then \texttt{functional inversion \ident} where {\ident} is the identifier for the last introduced hypothesis. \item {\tt functional inversion \ident\ \qualid}\\ {\tt functional inversion \num\ \qualid} In case the hypothesis {\ident} (or {\num}) has a type of the form \texttt{\qualid$_1$\ \term$_1$\dots\term$_n$\ =\ \qualid$_2$\ \term$_{n+1}$\dots\term$_{n+m}$} where \qualid$_1$ and \qualid$_2$ are valid candidates to functional inversion, this variant allows to choose which must be inverted. \end{Variants} \subsection{\tt quote \ident \tacindex{quote} \index{2-level approach}} This kind of inversion has nothing to do with the tactic \texttt{inversion} above. This tactic does \texttt{change (\ident\ t)}, where \texttt{t} is a term built in order to ensure the convertibility. In other words, it does inversion of the function \ident. This function must be a fixpoint on a simple recursive datatype: see~\ref{quote-examples} for the full details. \begin{ErrMsgs} \item \errindex{quote: not a simple fixpoint}\\ Happens when \texttt{quote} is not able to perform inversion properly. \end{ErrMsgs} \begin{Variants} \item \texttt{quote {\ident} [ \ident$_1$ \dots \ident$_n$ ]}\\ All terms that are built only with \ident$_1$ \dots \ident$_n$ will be considered by \texttt{quote} as constants rather than variables. \end{Variants} % En attente d'un moyen de valoriser les fichiers de demos % \SeeAlso file \texttt{theories/DEMOS/DemoQuote.v} in the distribution \section[Classical tactics]{Classical tactics\label{ClassicalTactics}} In order to ease the proving process, when the {\tt Classical} module is loaded. A few more tactics are available. Make sure to load the module using the \texttt{Require Import} command. \subsection{{\tt classical\_left, classical\_right} \tacindex{classical\_left} \tacindex{classical\_right}} The tactics \texttt{classical\_left} and \texttt{classical\_right} are the analog of the \texttt{left} and \texttt{right} but using classical logic. They can only be used for disjunctions. Use \texttt{classical\_left} to prove the left part of the disjunction with the assumption that the negation of right part holds. Use \texttt{classical\_right} to prove the right part of the disjunction with the assumption that the negation of left part holds. \section{Automatizing \label{Automatizing}} % EXISTE ENCORE ? % % \subsection{\tt Prolog [ \term$_1$ \dots\ \term$_n$ ] \num} % \tacindex{Prolog}\label{Prolog} % This tactic, implemented by Chet Murthy, is based upon the concept of % existential variables of Gilles Dowek, stating that resolution is a % kind of unification. It tries to solve the current goal using the {\tt % Assumption} tactic, the {\tt intro} tactic, and applying hypotheses % of the local context and terms of the given list {\tt [ \term$_1$ % \dots\ \term$_n$\ ]}. It is more powerful than {\tt auto} since it % may apply to any theorem, even those of the form {\tt (x:A)(P x) -> Q} % where {\tt x} does not appear free in {\tt Q}. The maximal search % depth is {\tt \num}. % \begin{ErrMsgs} % \item \errindex{Prolog failed}\\ % The Prolog tactic was not able to prove the subgoal. % \end{ErrMsgs} %% \subsection{{\tt jp} {\em (Jprover)} %% \tacindex{jp} %% \label{jprover}} %% The tactic \texttt{jp}, due to Huang Guan-Shieng, is an experimental %% port of the {\em Jprover}\cite{SLKN01} semi-decision procedure for %% first-order intuitionistic logic implemented in {\em %% NuPRL}\cite{Kre02}. %% The tactic \texttt{jp}, due to Huang Guan-Shieng, is an {\it %% experimental} port of the {\em Jprover}\cite{SLKN01} semi-decision %% procedure for first-order intuitionistic logic implemented in {\em %% NuPRL}\cite{Kre02}. %% Search may optionnaly be bounded by a multiplicity parameter %% indicating how many (at most) copies of a formula may be used in %% the proof process, its absence may lead to non-termination of the tactic. %% %\begin{coq_eval} %% %Variable S:Set. %% %Variables P Q:S->Prop. %% %Variable f:S->S. %% %\end{coq_eval} %% %\begin{coq_example*} %% %Lemma example: (exists x |P x\/Q x)->(exists x |P x)\/(exists x |Q x). %% %jp. %% %Qed. %% %Lemma example2: (forall x ,P x->P (f x))->forall x,P x->P (f(f x)). %% %jp. %% %Qed. %% %\end{coq_example*} %% \begin{Variants} %% \item {\tt jp $n$}\\ %% \tacindex{jp $n$} %% Tries the {\em Jprover} procedure with multiplicities up to $n$, %% starting from 1. %% \item {\tt jp}\\ %% Tries the {\em Jprover} procedure without multiplicity bound, %% possibly running forever. %% \end{Variants} %% \begin{ErrMsgs} %% \item \errindex{multiplicity limit reached}\\ %% The procedure tried all multiplicities below the limit and %% failed. Goal might be solved by increasing the multiplicity limit. %% \item \errindex{formula is not provable}\\ %% The procedure determined that goal was not provable in %% intuitionistic first-order logic, no matter how big the %% multiplicity is. %% \end{ErrMsgs} % \subsection[\tt Linear]{\tt Linear\tacindex{Linear}\label{Linear}} % The tactic \texttt{Linear}, due to Jean-Christophe Filli{\^a}atre % \cite{Fil94}, implements a decision procedure for {\em Direct % Predicate Calculus}, that is first-order Gentzen's Sequent Calculus % without contraction rules \cite{KeWe84,BeKe92}. Intuitively, a % first-order goal is provable in Direct Predicate Calculus if it can be % proved using each hypothesis at most once. % Unlike the previous tactics, the \texttt{Linear} tactic does not belong % to the initial state of the system, and it must be loaded explicitly % with the command % \begin{coq_example*} % Require Linear. % \end{coq_example*} % For instance, assuming that \texttt{even} and \texttt{odd} are two % predicates on natural numbers, and \texttt{a} of type \texttt{nat}, the % tactic \texttt{Linear} solves the following goal % \begin{coq_eval} % Variables even,odd : nat -> Prop. % Variable a:nat. % \end{coq_eval} % \begin{coq_example*} % Lemma example : (even a) % -> ((x:nat)((even x)->(odd (S x)))) % -> (EX y | (odd y)). % \end{coq_example*} % You can find examples of the use of \texttt{Linear} in % \texttt{theories/DEMOS/DemoLinear.v}. % \begin{coq_eval} % Abort. % \end{coq_eval} % \begin{Variants} % \item {\tt Linear with \ident$_1$ \dots\ \ident$_n$}\\ % \tacindex{Linear with} % Is equivalent to apply first {\tt generalize \ident$_1$ \dots % \ident$_n$} (see Section~\ref{generalize}) then the \texttt{Linear} % tactic. So one can use axioms, lemmas or hypotheses of the local % context with \texttt{Linear} in this way. % \end{Variants} % \begin{ErrMsgs} % \item \errindex{Not provable in Direct Predicate Calculus} % \item \errindex{Found $n$ classical proof(s) but no intuitionistic one}\\ % The decision procedure looks actually for classical proofs of the % goals, and then checks that they are intuitionistic. In that case, % classical proofs have been found, which do not correspond to % intuitionistic ones. % \end{ErrMsgs} \subsection{\tt omega \tacindex{omega} \label{omega}} The tactic \texttt{omega}, due to Pierre Cr{\'e}gut, is an automatic decision procedure for Presburger arithmetic. It solves quantifier-free formulas built with \verb|~|, \verb|\/|, \verb|/\|, \verb|->| on top of equalities, inequalities and disequalities on both the type \texttt{nat} of natural numbers and \texttt{Z} of binary integers. This tactic must be loaded by the command \texttt{Require Import Omega}. See the additional documentation about \texttt{omega} (see Chapter~\ref{OmegaChapter}). \subsection{{\tt ring} and {\tt ring\_simplify \term$_1$ \dots\ \term$_n$} \tacindex{ring} \tacindex{ring\_simplify} \comindex{Add Ring}} The {\tt ring} tactic solves equations upon polynomial expressions of a ring (or semi-ring) structure. It proceeds by normalizing both hand sides of the equation (w.r.t. associativity, commutativity and distributivity, constant propagation) and comparing syntactically the results. {\tt ring\_simplify} applies the normalization procedure described above to the terms given. The tactic then replaces all occurrences of the terms given in the conclusion of the goal by their normal forms. If no term is given, then the conclusion should be an equation and both hand sides are normalized. See Chapter~\ref{ring} for more information on the tactic and how to declare new ring structures. \subsection{{\tt field}, {\tt field\_simplify \term$_1$\dots\ \term$_n$} and {\tt field\_simplify\_eq} \tacindex{field} \tacindex{field\_simplify} \tacindex{field\_simplify\_eq} \comindex{Add Field}} The {\tt field} tactic is built on the same ideas as {\tt ring}: this is a reflexive tactic that solves or simplifies equations in a field structure. The main idea is to reduce a field expression (which is an extension of ring expressions with the inverse and division operations) to a fraction made of two polynomial expressions. Tactic {\tt field} is used to solve subgoals, whereas {\tt field\_simplify \term$_1$\dots\term$_n$} replaces the provided terms by their reduced fraction. {\tt field\_simplify\_eq} applies when the conclusion is an equation: it simplifies both hand sides and multiplies so as to cancel denominators. So it produces an equation without division nor inverse. All of these 3 tactics may generate a subgoal in order to prove that denominators are different from zero. See Chapter~\ref{ring} for more information on the tactic and how to declare new field structures. \Example \begin{coq_example*} Require Import Reals. Goal forall x y:R, (x * y > 0)%R -> (x * (1 / x + x / (x + y)))%R = ((- 1 / y) * y * (- x * (x / (x + y)) - 1))%R. \end{coq_example*} \begin{coq_example} intros; field. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \SeeAlso file {\tt plugins/setoid\_ring/RealField.v} for an example of instantiation,\\ \phantom{\SeeAlso}theory {\tt theories/Reals} for many examples of use of {\tt field}. \subsection{\tt fourier \tacindex{fourier}} This tactic written by Lo{\"\i}c Pottier solves linear inequalities on real numbers using Fourier's method~\cite{Fourier}. This tactic must be loaded by {\tt Require Import Fourier}. \Example \begin{coq_example*} Require Import Reals. Require Import Fourier. Goal forall x y:R, (x < y)%R -> (y + 1 >= x - 1)%R. \end{coq_example*} \begin{coq_example} intros; fourier. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \section{Simple tactic macros \index{Tactic macros} \comindex{Tactic Definition} \label{TacticDefinition}} A simple example has more value than a long explanation: \begin{coq_example} Ltac Solve := simpl; intros; auto. Ltac ElimBoolRewrite b H1 H2 := elim b; [ intros; rewrite H1; eauto | intros; rewrite H2; eauto ]. \end{coq_example} The tactics macros are synchronous with the \Coq\ section mechanism: a tactic definition is deleted from the current environment when you close the section (see also \ref{Section}) where it was defined. If you want that a tactic macro defined in a module is usable in the modules that require it, you should put it outside of any section. Chapter~\ref{TacticLanguage} gives examples of more complex user-defined tactics. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-coi.tex0000640000175000001440000003772111776416511016331 0ustar notinusers%\documentstyle[11pt,../tools/coq-tex/coq]{article} %\input{title} %\include{macros} %\begin{document} %\coverpage{Co-inductive types in Coq}{Eduardo Gim\'enez} \chapter[Co-inductive types in Coq]{Co-inductive types in Coq\label{Co-inductives}} %\begin{abstract} {\it Co-inductive} types are types whose elements may not be well-founded. A formal study of the Calculus of Constructions extended by co-inductive types has been presented in \cite{Gim94}. It is based on the notion of {\it guarded definitions} introduced by Th. Coquand in \cite{Coquand93}. The implementation is by E. Gim\'enez. %\end{abstract} \section{A short introduction to co-inductive types} We assume that the reader is rather familiar with inductive types. These types are characterized by their {\it constructors}, which can be regarded as the basic methods from which the elements of the type can be built up. It is implicit in the definition of an inductive type that its elements are the result of a {\it finite} number of applications of its constructors. Co-inductive types arise from relaxing this implicit condition and admitting that an element of the type can also be introduced by a non-ending (but effective) process of construction defined in terms of the basic methods which characterize the type. So we could think in the wider notion of types defined by constructors (let us call them {\it recursive types}) and classify them into inductive and co-inductive ones, depending on whether or not we consider non-ending methods as admissible for constructing elements of the type. Note that in both cases we obtain a ``closed type'', all whose elements are pre-determined in advance (by the constructors). When we know that $a$ is an element of a recursive type (no matter if it is inductive or co-inductive) what we know is that it is the result of applying one of the basic forms of construction allowed for the type. So the more primitive way of eliminating an element of a recursive type is by case analysis, i.e. by considering through which constructor it could have been introduced. In the case of inductive sets, the additional knowledge that constructors can be applied only a finite number of times provide us with a more powerful way of eliminating their elements, say, the principle of induction. This principle is obviously not valid for co-inductive types, since it is just the expression of this extra knowledge attached to inductive types. An example of a co-inductive type is the type of infinite sequences formed with elements of type $A$, or streams for shorter. In Coq, it can be introduced using the \verb!CoInductive! command~: \begin{coq_example} CoInductive Stream (A:Set) : Set := cons : A -> Stream A -> Stream A. \end{coq_example} The syntax of this command is the same as the command \verb!Inductive! (cf. section \ref{gal_Inductive_Definitions}). Definition of mutually co-inductive types are possible. As was already said, there are not principles of induction for co-inductive sets, the only way of eliminating these elements is by case analysis. In the example of streams, this elimination principle can be used for instance to define the well known destructors on streams $\hd : (\Str\;A)\rightarrow A$ and $\tl: (\Str\;A)\rightarrow (\Str\;A)$ : \begin{coq_example} Section Destructors. Variable A : Set. Definition hd (x:Stream A) := match x with | cons a s => a end. Definition tl (x:Stream A) := match x with | cons a s => s end. \end{coq_example} \begin{coq_example*} End Destructors. \end{coq_example*} \subsection{Non-ending methods of construction} At this point the reader should have realized that we have left unexplained what is a ``non-ending but effective process of construction'' of a stream. In the widest sense, a method is a non-ending process of construction if we can eliminate the stream that it introduces, in other words, if we can reduce any case analysis on it. In this sense, the following ways of introducing a stream are not acceptable. \begin{center} $\zeros = (\cons\;\nat\;\nO\;(\tl\;\zeros))\;\;:\;\;(\Str\;\nat)$\\[12pt] $\filter\;(\cons\;A\;a\;s) = \si\;\;(P\;a)\;\;\alors\;\;(\cons\;A\;a\;(\filter\;s))\;\;\sinon\;\;(\filter\;s) )\;\;:\;\;(\Str\;A)$ \end{center} \noindent The former it is not valid since the stream can not be eliminated to obtain its tail. In the latter, a stream is naively defined as the result of erasing from another (arbitrary) stream all the elements which does not verify a certain property $P$. This does not always makes sense, for example it does not when all the elements of the stream verify $P$, in which case we can not eliminate it to obtain its head\footnote{Note that there is no notion of ``the empty stream'', a stream is always infinite and build by a \texttt{cons}.}. On the contrary, the following definitions are acceptable methods for constructing a stream~: \begin{center} $\zeros = (\cons\;\nat\;\nO\;\zeros)\;\;:\;\;(\Str\;\nat)\;\;\;(*)$\\[12pt] $(\from\;n) = (\cons\;\nat\;n\;(\from\;(\nS\;n)))\;:\;(\Str\;\nat)$\\[12pt] $\alter = (\cons\;\bool\;\true\;(\cons\;\bool\;\false\;\alter))\;:\;(\Str\;\bool)$. \end{center} \noindent The first one introduces a stream containing all the natural numbers greater than a given one, and the second the stream which infinitely alternates the booleans true and false. In general it is not evident to realise when a definition can be accepted or not. However, there is a class of definitions that can be easily recognised as being valid : those where (1) all the recursive calls of the method are done after having explicitly mentioned which is (at least) the first constructor to start building the element, and (2) no other functions apart from constructors are applied to recursive calls. This class of definitions is usually referred as {\it guarded-by-constructors} definitions \cite{Coquand93,Gim94}. The methods $\from$ and $\alter$ are examples of definitions which are guarded by constructors. The definition of function $\filter$ is not, because there is no constructor to guard the recursive call in the {\it else} branch. Neither is the one of $\zeros$, since there is function applied to the recursive call which is not a constructor. However, there is a difference between the definition of $\zeros$ and $\filter$. The former may be seen as a wrong way of characterising an object which makes sense, and it can be reformulated in an admissible way using the equation (*). On the contrary, the definition of $\filter$ can not be patched, since is the idea itself of traversing an infinite construction searching for an element whose existence is not ensured which does not make sense. Guarded definitions are exactly the kind of non-ending process of construction which are allowed in Coq. The way of introducing a guarded definition in Coq is using the special command {\tt CoFixpoint}. This command verifies that the definition introduces an element of a co-inductive type, and checks if it is guarded by constructors. If we try to introduce the definitions above, $\from$ and $\alter$ will be accepted, while $\zeros$ and $\filter$ will be rejected giving some explanation about why. \begin{coq_example} CoFixpoint zeros : Stream nat := cons nat 0%N (tl nat zeros). CoFixpoint zeros : Stream nat := cons nat 0%N zeros. CoFixpoint from (n:nat) : Stream nat := cons nat n (from (S n)). \end{coq_example} As in the \verb!Fixpoint! command (see Section~\ref{Fixpoint}), it is possible to introduce a block of mutually dependent methods. The general syntax for this case is : {\tt CoFixpoint {\ident$_1$} :{\term$_1$} := {\term$_1'$}\\ with\\ \mbox{}\hspace{0.1cm} $\ldots$ \\ with {\ident$_m$} : {\term$_m$} := {\term$_m'$}} \subsection{Non-ending methods and reduction} The elimination of a stream introduced by a \verb!CoFixpoint! definition is done lazily, i.e. its definition can be expanded only when it occurs at the head of an application which is the argument of a case expression. Isolately it is considered as a canonical expression which is completely evaluated. We can test this using the command \verb!compute! to calculate the normal forms of some terms~: \begin{coq_example} Eval compute in (from 0). Eval compute in (hd nat (from 0)). Eval compute in (tl nat (from 0)). \end{coq_example} \noindent Thus, the equality $(\from\;n)\equiv(\cons\;\nat\;n\;(\from \; (\S\;n)))$ does not hold as definitional one. Nevertheless, it can be proved as a propositional equality, in the sense of Leibniz's equality. The version {\it la Leibniz} of the equality above follows from a general lemma stating that eliminating and then re-introducing a stream yields the same stream. \begin{coq_example} Lemma unfold_Stream : forall x:Stream nat, x = match x with | cons a s => cons nat a s end. \end{coq_example} \noindent The proof is immediate from the analysis of the possible cases for $x$, which transforms the equality in a trivial one. \begin{coq_example} olddestruct x. trivial. \end{coq_example} \begin{coq_eval} Qed. \end{coq_eval} The application of this lemma to $(\from\;n)$ puts this constant at the head of an application which is an argument of a case analysis, forcing its expansion. We can test the type of this application using Coq's command \verb!Check!, which infers the type of a given term. \begin{coq_example} Check (fun n:nat => unfold_Stream (from n)). \end{coq_example} \noindent Actually, The elimination of $(\from\;n)$ has actually no effect, because it is followed by a re-introduction, so the type of this application is in fact definitionally equal to the desired proposition. We can test this computing the normal form of the application above to see its type. \begin{coq_example} Transparent unfold_Stream. Eval compute in (fun n:nat => unfold_Stream (from n)). \end{coq_example} \section{Reasoning about infinite objects} At a first sight, it might seem that case analysis does not provide a very powerful way of reasoning about infinite objects. In fact, what we can prove about an infinite object using only case analysis is just what we can prove unfolding its method of construction a finite number of times, which is not always enough. Consider for example the following method for appending two streams~: \begin{coq_example} Variable A : Set. CoFixpoint conc (s1 s2:Stream A) : Stream A := cons A (hd A s1) (conc (tl A s1) s2). \end{coq_example} Informally speaking, we expect that for all pair of streams $s_1$ and $s_2$, $(\conc\;s_1\;s_2)$ defines the ``the same'' stream as $s_1$, in the sense that if we would be able to unfold the definition ``up to the infinite'', we would obtain definitionally equal normal forms. However, no finite unfolding of the definitions gives definitionally equal terms. Their equality can not be proved just using case analysis. The weakness of the elimination principle proposed for infinite objects contrast with the power provided by the inductive elimination principles, but it is not actually surprising. It just means that we can not expect to prove very interesting things about infinite objects doing finite proofs. To take advantage of infinite objects we have to consider infinite proofs as well. For example, if we want to catch up the equality between $(\conc\;s_1\;s_2)$ and $s_1$ we have to introduce first the type of the infinite proofs of equality between streams. This is a co-inductive type, whose elements are build up from a unique constructor, requiring a proof of the equality of the heads of the streams, and an (infinite) proof of the equality of their tails. \begin{coq_example} CoInductive EqSt : Stream A -> Stream A -> Prop := eqst : forall s1 s2:Stream A, hd A s1 = hd A s2 -> EqSt (tl A s1) (tl A s2) -> EqSt s1 s2. \end{coq_example} \noindent Now the equality of both streams can be proved introducing an infinite object of type \noindent $(\EqSt\;s_1\;(\conc\;s_1\;s_2))$ by a \verb!CoFixpoint! definition. \begin{coq_example} CoFixpoint eqproof (s1 s2:Stream A) : EqSt s1 (conc s1 s2) := eqst s1 (conc s1 s2) (eq_refl (hd A (conc s1 s2))) (eqproof (tl A s1) s2). \end{coq_example} \begin{coq_eval} Reset eqproof. \end{coq_eval} \noindent Instead of giving an explicit definition, we can use the proof editor of Coq to help us in the construction of the proof. A tactic \verb!Cofix! allows to place a \verb!CoFixpoint! definition inside a proof. This tactic introduces a variable in the context which has the same type as the current goal, and its application stands for a recursive call in the construction of the proof. If no name is specified for this variable, the name of the lemma is chosen by default. %\pagebreak \begin{coq_example} Lemma eqproof : forall s1 s2:Stream A, EqSt s1 (conc s1 s2). cofix. \end{coq_example} \noindent An easy (and wrong!) way of finishing the proof is just to apply the variable \verb!eqproof!, which has the same type as the goal. \begin{coq_example} intros. apply eqproof. \end{coq_example} \noindent The ``proof'' constructed in this way would correspond to the \verb!CoFixpoint! definition \begin{coq_example*} CoFixpoint eqproof : forall s1 s2:Stream A, EqSt s1 (conc s1 s2) := eqproof. \end{coq_example*} \noindent which is obviously non-guarded. This means that we can use the proof editor to define a method of construction which does not make sense. However, the system will never accept to include it as part of the theory, because the guard condition is always verified before saving the proof. \begin{coq_example} Qed. \end{coq_example} \noindent Thus, the user must be careful in the construction of infinite proofs with the tactic \verb!Cofix!. Remark that once it has been used the application of tactics performing automatic proof search in the environment (like for example \verb!Auto!) could introduce unguarded recursive calls in the proof. The command \verb!Guarded! allows to verify if the guarded condition has been violated during the construction of the proof. This command can be applied even if the proof term is not complete. \begin{coq_example} Restart. cofix. auto. Guarded. Undo. Guarded. \end{coq_example} \noindent To finish with this example, let us restart from the beginning and show how to construct an admissible proof~: \begin{coq_example} Restart. cofix. \end{coq_example} %\pagebreak \begin{coq_example} intros. apply eqst. trivial. simpl. apply eqproof. Qed. \end{coq_example} \section{Experiments with co-inductive types} Some examples involving co-inductive types are available with the distributed system, in the theories library and in the contributions of the Lyon site. Here we present a short description of their contents~: \begin{itemize} \item Directory \verb!theories/LISTS! : \begin{itemize} \item File \verb!Streams.v! : The type of streams and the extensional equality between streams. \end{itemize} \item Directory \verb!contrib/Lyon/COINDUCTIVES! : \begin{itemize} \item Directory \verb!ARITH! : An arithmetic where $\infty$ is an explicit constant of the language instead of a metatheoretical notion. \item Directory \verb!STREAM! : \begin{itemize} \item File \verb!Examples! : Several examples of guarded definitions, as well as of frequent errors in the introduction of a stream. A different way of defining the extensional equality of two streams, and the proofs showing that it is equivalent to the one in \verb!theories!. \item File \verb!Alter.v! : An example showing how an infinite proof introduced by a guarded definition can be also described using an operator of co-recursion \cite{Gimenez95b}. \end{itemize} \item Directory \verb!PROCESSES! : A proof of the alternating bit protocol based on Pra\-sad's Calculus of Broadcasting Systems \cite{Prasad93}, and the verification of an interpreter for this calculus. See \cite{Gimenez95b} for a complete description about this development. \end{itemize} \end{itemize} %\end{document} coq-8.4pl2/doc/refman/Extraction.tex0000640000175000001440000005411012011455307016505 0ustar notinusers\achapter{Extraction of programs in Objective Caml and Haskell} \label{Extraction} \aauthor{Jean-Christophe Fillitre and Pierre Letouzey} \index{Extraction} We present here the \Coq\ extraction commands, used to build certified and relatively efficient functional programs, extracting them from either \Coq\ functions or \Coq\ proofs of specifications. The functional languages available as output are currently \ocaml{}, \textsc{Haskell} and \textsc{Scheme}. In the following, ``ML'' will be used (abusively) to refer to any of the three. \paragraph{Differences with old versions.} The current extraction mechanism is new for version 7.0 of {\Coq}. In particular, the \FW\ toplevel used as an intermediate step between \Coq\ and ML has been withdrawn. It is also not possible any more to import ML objects in this \FW\ toplevel. The current mechanism also differs from the one in previous versions of \Coq: there is no more an explicit toplevel for the language (formerly called \textsc{Fml}). \asection{Generating ML code} \comindex{Extraction} \comindex{Recursive Extraction} \comindex{Separate Extraction} \comindex{Extraction Library} \comindex{Recursive Extraction Library} The next two commands are meant to be used for rapid preview of extraction. They both display extracted term(s) inside \Coq. \begin{description} \item {\tt Extraction \qualid.} ~\par Extracts one constant or module in the \Coq\ toplevel. \item {\tt Recursive Extraction \qualid$_1$ \dots\ \qualid$_n$.} ~\par Recursive extraction of all the globals (or modules) \qualid$_1$ \dots\ \qualid$_n$ and all their dependencies in the \Coq\ toplevel. \end{description} %% TODO error messages All the following commands produce real ML files. User can choose to produce one monolithic file or one file per \Coq\ library. \begin{description} \item {\tt Extraction "{\em file}"} \qualid$_1$ \dots\ \qualid$_n$. ~\par Recursive extraction of all the globals (or modules) \qualid$_1$ \dots\ \qualid$_n$ and all their dependencies in one monolithic file {\em file}. Global and local identifiers are renamed according to the chosen ML language to fulfill its syntactic conventions, keeping original names as much as possible. \item {\tt Extraction Library} \ident. ~\par Extraction of the whole \Coq\ library {\tt\ident.v} to an ML module {\tt\ident.ml}. In case of name clash, identifiers are here renamed using prefixes \verb!coq_! or \verb!Coq_! to ensure a session-independent renaming. \item {\tt Recursive Extraction Library} \ident. ~\par Extraction of the \Coq\ library {\tt\ident.v} and all other modules {\tt\ident.v} depends on. \item {\tt Separate Extraction} \qualid$_1$ \dots\ \qualid$_n$. ~\par Recursive extraction of all the globals (or modules) \qualid$_1$ \dots\ \qualid$_n$ and all their dependencies, just as {\tt Extraction "{\em file}"}, but instead of producing one monolithic file, this command splits the produced code in separate ML files, one per corresponding Coq {\tt .v} file. This command is hence quite similar to {\tt Recursive Extraction Library}, except that only the needed parts of Coq libraries are extracted instead of the whole. The naming convention in case of name clash is the same one as {\tt Extraction Library} : identifiers are here renamed using prefixes \verb!coq_! or \verb!Coq_!. \end{description} The list of globals \qualid$_i$ does not need to be exhaustive: it is automatically completed into a complete and minimal environment. \asection{Extraction options} \asubsection{Setting the target language} \comindex{Extraction Language} The ability to fix target language is the first and more important of the extraction options. Default is Ocaml. \begin{description} \item {\tt Extraction Language Ocaml}. \item {\tt Extraction Language Haskell}. \item {\tt Extraction Language Scheme}. \end{description} \asubsection{Inlining and optimizations} Since Objective Caml is a strict language, the extracted code has to be optimized in order to be efficient (for instance, when using induction principles we do not want to compute all the recursive calls but only the needed ones). So the extraction mechanism provides an automatic optimization routine that will be called each time the user want to generate Ocaml programs. Essentially, it performs constants inlining and reductions. Therefore some constants may not appear in resulting monolithic Ocaml program. In the case of modular extraction, even if some inlining is done, the inlined constant are nevertheless printed, to ensure session-independent programs. Concerning Haskell, such optimizations are less useful because of lazyness. We still make some optimizations, for example in order to produce more readable code. All these optimizations are controled by the following \Coq\ options: \begin{description} \item \comindex{Set Extraction Optimize} {\tt Set Extraction Optimize.} \item \comindex{Unset Extraction Optimize} {\tt Unset Extraction Optimize.} Default is Set. This control all optimizations made on the ML terms (mostly reduction of dummy beta/iota redexes, but also simplifications on Cases, etc). Put this option to Unset if you want a ML term as close as possible to the Coq term. \item \comindex{Set Extraction KeepSingleton} {\tt Set Extraction KeepSingleton.} \item \comindex{Unset Extraction KeepSingleton} {\tt Unset Extraction KeepSingleton.} Default is Unset. Normaly, when the extraction of an inductive type produces a singleton type (i.e. a type with only one constructor, and only one argument to this constructor), the inductive structure is removed and this type is seen as an alias to the inner type. The typical example is {\tt sig}. This option allows to disable this optimization when one wishes to preserve the inductive structure of types. \item \comindex{Set Extraction AutoInline} {\tt Set Extraction AutoInline.} \item \comindex{Unset Extraction AutoInline} {\tt Unset Extraction AutoInline.} Default is Set, so by default, the extraction mechanism feels free to inline the bodies of some defined constants, according to some heuristics like size of bodies, useness of some arguments, etc. Those heuristics are not always perfect, you may want to disable this feature, do it by Unset. \item \comindex{Extraction Inline} {\tt Extraction Inline} \qualid$_1$ \dots\ \qualid$_n$. \item \comindex{Extraction NoInline} {\tt Extraction NoInline} \qualid$_1$ \dots\ \qualid$_n$. In addition to the automatic inline feature, you can now tell precisely to inline some more constants by the {\tt Extraction Inline} command. Conversely, you can forbid the automatic inlining of some specific constants by the {\tt Extraction NoInline} command. Those two commands enable a precise control of what is inlined and what is not. \item \comindex{Print Extraction Inline} {\tt Print Extraction Inline}. Prints the current state of the table recording the custom inlinings declared by the two previous commands. \item \comindex{Reset Extraction Inline} {\tt Reset Extraction Inline}. Puts the table recording the custom inlinings back to empty. \end{description} \paragraph{Inlining and printing of a constant declaration.} A user can explicitly ask for a constant to be extracted by two means: \begin{itemize} \item by mentioning it on the extraction command line \item by extracting the whole \Coq\ module of this constant. \end{itemize} In both cases, the declaration of this constant will be present in the produced file. But this same constant may or may not be inlined in the following terms, depending on the automatic/custom inlining mechanism. For the constants non-explicitly required but needed for dependency reasons, there are two cases: \begin{itemize} \item If an inlining decision is taken, whether automatically or not, all occurrences of this constant are replaced by its extracted body, and this constant is not declared in the generated file. \item If no inlining decision is taken, the constant is normally declared in the produced file. \end{itemize} \asubsection{Extra elimination of useless arguments} \begin{description} \item \comindex{Extraction Implicit} {\tt Extraction Implicit} \qualid\ [ \ident$_1$ \dots\ \ident$_n$ ]. This experimental command allows to declare some arguments of \qualid\ as implicit, i.e. useless in extracted code and hence to be removed by extraction. Here \qualid\ can be any function or inductive constructor, and \ident$_i$ are the names of the concerned arguments. In fact, an argument can also be referred by a number indicating its position, starting from 1. When an actual extraction takes place, an error is raised if the {\tt Extraction Implicit} declarations cannot be honored, that is if any of the implicited variables still occurs in the final code. This declaration of useless arguments is independent but complementary to the main elimination principles of extraction (logical parts and types). \end{description} \asubsection{Realizing axioms}\label{extraction:axioms} Extraction will fail if it encounters an informative axiom not realized (see Section~\ref{extraction:axioms}). A warning will be issued if it encounters an logical axiom, to remind user that inconsistent logical axioms may lead to incorrect or non-terminating extracted terms. It is possible to assume some axioms while developing a proof. Since these axioms can be any kind of proposition or object or type, they may perfectly well have some computational content. But a program must be a closed term, and of course the system cannot guess the program which realizes an axiom. Therefore, it is possible to tell the system what ML term corresponds to a given axiom. \comindex{Extract Constant} \begin{description} \item{\tt Extract Constant \qualid\ => \str.} ~\par Give an ML extraction for the given constant. The \str\ may be an identifier or a quoted string. \item{\tt Extract Inlined Constant \qualid\ => \str.} ~\par Same as the previous one, except that the given ML terms will be inlined everywhere instead of being declared via a let. \end{description} Note that the {\tt Extract Inlined Constant} command is sugar for an {\tt Extract Constant} followed by a {\tt Extraction Inline}. Hence a {\tt Reset Extraction Inline} will have an effect on the realized and inlined axiom. Of course, it is the responsibility of the user to ensure that the ML terms given to realize the axioms do have the expected types. In fact, the strings containing realizing code are just copied in the extracted files. The extraction recognizes whether the realized axiom should become a ML type constant or a ML object declaration. \Example \begin{coq_example} Axiom X:Set. Axiom x:X. Extract Constant X => "int". Extract Constant x => "0". \end{coq_example} Notice that in the case of type scheme axiom (i.e. whose type is an arity, that is a sequence of product finished by a sort), then some type variables has to be given. The syntax is then: \begin{description} \item{\tt Extract Constant \qualid\ \str$_1$ \ldots \str$_n$ => \str.} ~\par \end{description} The number of type variables is checked by the system. \Example \begin{coq_example} Axiom Y : Set -> Set -> Set. Extract Constant Y "'a" "'b" => " 'a*'b ". \end{coq_example} Realizing an axiom via {\tt Extract Constant} is only useful in the case of an informative axiom (of sort Type or Set). A logical axiom have no computational content and hence will not appears in extracted terms. But a warning is nonetheless issued if extraction encounters a logical axiom. This warning reminds user that inconsistent logical axioms may lead to incorrect or non-terminating extracted terms. If an informative axiom has not been realized before an extraction, a warning is also issued and the definition of the axiom is filled with an exception labeled {\tt AXIOM TO BE REALIZED}. The user must then search these exceptions inside the extracted file and replace them by real code. \comindex{Extract Inductive} The system also provides a mechanism to specify ML terms for inductive types and constructors. For instance, the user may want to use the ML native boolean type instead of \Coq\ one. The syntax is the following: \begin{description} \item{\tt Extract Inductive \qualid\ => \str\ [ \str\ \dots \str\ ]\ {\it optstring}.} ~\par Give an ML extraction for the given inductive type. You must specify extractions for the type itself (first \str) and all its constructors (between square brackets). If given, the final optional string should contain a function emulating pattern-matching over this inductive type. If this optional string is not given, the ML extraction must be an ML inductive datatype, and the native pattern-matching of the language will be used. \end{description} For an inductive type with $k$ constructor, the function used to emulate the match should expect $(k+1)$ arguments, first the $k$ branches in functional form, and then the inductive element to destruct. For instance, the match branch \verb$| S n => foo$ gives the functional form \verb$(fun n -> foo)$. Note that a constructor with no argument is considered to have one unit argument, in order to block early evaluation of the branch: \verb$| O => bar$ leads to the functional form \verb$(fun () -> bar)$. For instance, when extracting {\tt nat} into {\tt int}, the code to provide has type: {\tt (unit->'a)->(int->'a)->int->'a}. As for {\tt Extract Inductive}, this command should be used with care: \begin{itemize} \item The ML code provided by the user is currently \emph{not} checked at all by extraction, even for syntax errors. \item Extracting an inductive type to a pre-existing ML inductive type is quite sound. But extracting to a general type (by providing an ad-hoc pattern-matching) will often \emph{not} be fully rigorously correct. For instance, when extracting {\tt nat} to Ocaml's {\tt int}, it is theoretically possible to build {\tt nat} values that are larger than Ocaml's {\tt max\_int}. It is the user's responsability to be sure that no overflow or other bad events occur in practice. \item Translating an inductive type to an ML type does \emph{not} magically improve the asymptotic complexity of functions, even if the ML type is an efficient representation. For instance, when extracting {\tt nat} to Ocaml's {\tt int}, the function {\tt mult} stays quadratic. It might be interesting to associate this translation with some specific {\tt Extract Constant} when primitive counterparts exist. \end{itemize} \Example Typical examples are the following: \begin{coq_example} Extract Inductive unit => "unit" [ "()" ]. Extract Inductive bool => "bool" [ "true" "false" ]. Extract Inductive sumbool => "bool" [ "true" "false" ]. \end{coq_example} If an inductive constructor or type has arity 2 and the corresponding string is enclosed by parenthesis, then the rest of the string is used as infix constructor or type. \begin{coq_example} Extract Inductive list => "list" [ "[]" "(::)" ]. Extract Inductive prod => "(*)" [ "(,)" ]. \end{coq_example} As an example of translation to a non-inductive datatype, let's turn {\tt nat} into Ocaml's {\tt int} (see caveat above): \begin{coq_example} Extract Inductive nat => int [ "0" "succ" ] "(fun fO fS n -> if n=0 then fO () else fS (n-1))". \end{coq_example} \asubsection{Avoiding conflicts with existing filenames} \comindex{Extraction Blacklist} When using {\tt Extraction Library}, the names of the extracted files directly depends from the names of the \Coq\ files. It may happen that these filenames are in conflict with already existing files, either in the standard library of the target language or in other code that is meant to be linked with the extracted code. For instance the module {\tt List} exists both in \Coq\ and in Ocaml. It is possible to instruct the extraction not to use particular filenames. \begin{description} \item{\tt Extraction Blacklist \ident \ldots \ident.} ~\par Instruct the extraction to avoid using these names as filenames for extracted code. \item{\tt Print Extraction Blacklist.} ~\par Show the current list of filenames the extraction should avoid. \item{\tt Reset Extraction Blacklist.} ~\par Allow the extraction to use any filename. \end{description} For Ocaml, a typical use of these commands is {\tt Extraction Blacklist String List}. \asection{Differences between \Coq\ and ML type systems} Due to differences between \Coq\ and ML type systems, some extracted programs are not directly typable in ML. We now solve this problem (at least in Ocaml) by adding when needed some unsafe casting {\tt Obj.magic}, which give a generic type {\tt 'a} to any term. For example, here are two kinds of problem that can occur: \begin{itemize} \item If some part of the program is {\em very} polymorphic, there may be no ML type for it. In that case the extraction to ML works all right but the generated code may be refused by the ML type-checker. A very well known example is the {\em distr-pair} function: \begin{verbatim} Definition dp := fun (A B:Set)(x:A)(y:B)(f:forall C:Set, C->C) => (f A x, f B y). \end{verbatim} In Ocaml, for instance, the direct extracted term would be: \begin{verbatim} let dp x y f = Pair((f () x),(f () y)) \end{verbatim} and would have type: \begin{verbatim} dp : 'a -> 'a -> (unit -> 'a -> 'b) -> ('b,'b) prod \end{verbatim} which is not its original type, but a restriction. We now produce the following correct version: \begin{verbatim} let dp x y f = Pair ((Obj.magic f () x), (Obj.magic f () y)) \end{verbatim} \item Some definitions of \Coq\ may have no counterpart in ML. This happens when there is a quantification over types inside the type of a constructor; for example: \begin{verbatim} Inductive anything : Set := dummy : forall A:Set, A -> anything. \end{verbatim} which corresponds to the definition of an ML dynamic type. In Ocaml, we must cast any argument of the constructor dummy. \end{itemize} Even with those unsafe castings, you should never get error like ``segmentation fault''. In fact even if your program may seem ill-typed to the Ocaml type-checker, it can't go wrong: it comes from a Coq well-typed terms, so for example inductives will always have the correct number of arguments, etc. More details about the correctness of the extracted programs can be found in \cite{Let02}. We have to say, though, that in most ``realistic'' programs, these problems do not occur. For example all the programs of Coq library are accepted by Caml type-checker without any {\tt Obj.magic} (see examples below). \asection{Some examples} We present here two examples of extractions, taken from the \Coq\ Standard Library. We choose \ocaml\ as target language, but all can be done in the other dialects with slight modifications. We then indicate where to find other examples and tests of Extraction. \asubsection{A detailed example: Euclidean division} The file {\tt Euclid} contains the proof of Euclidean division (theorem {\tt eucl\_dev}). The natural numbers defined in the example files are unary integers defined by two constructors $O$ and $S$: \begin{coq_example*} Inductive nat : Set := | O : nat | S : nat -> nat. \end{coq_example*} This module contains a theorem {\tt eucl\_dev}, whose type is: \begin{verbatim} forall b:nat, b > 0 -> forall a:nat, diveucl a b \end{verbatim} where {\tt diveucl} is a type for the pair of the quotient and the modulo, plus some logical assertions that disappear during extraction. We can now extract this program to \ocaml: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Require Import Euclid Wf_nat. Extraction Inline gt_wf_rec lt_wf_rec induction_ltof2. Recursive Extraction eucl_dev. \end{coq_example} The inlining of {\tt gt\_wf\_rec} and others is not mandatory. It only enhances readability of extracted code. You can then copy-paste the output to a file {\tt euclid.ml} or let \Coq\ do it for you with the following command: \begin{verbatim} Extraction "euclid" eucl_dev. \end{verbatim} Let us play the resulting program: \begin{verbatim} # #use "euclid.ml";; type nat = O | S of nat type sumbool = Left | Right val minus : nat -> nat -> nat = val le_lt_dec : nat -> nat -> sumbool = val le_gt_dec : nat -> nat -> sumbool = type diveucl = Divex of nat * nat val eucl_dev : nat -> nat -> diveucl = # eucl_dev (S (S O)) (S (S (S (S (S O)))));; - : diveucl = Divex (S (S O), S O) \end{verbatim} It is easier to test on \ocaml\ integers: \begin{verbatim} # let rec nat_of_int = function 0 -> O | n -> S (nat_of_int (n-1));; val i2n : int -> nat = # let rec int_of_nat = function O -> 0 | S p -> 1+(int_of_nat p);; val n2i : nat -> int = # let div a b = let Divex (q,r) = eucl_dev (nat_of_int b) (nat_of_int a) in (int_of_nat q, int_of_nat r);; val div : int -> int -> int * int = # div 173 15;; - : int * int = (11, 8) \end{verbatim} Note that these {\tt nat\_of\_int} and {\tt int\_of\_nat} are now available via a mere {\tt Require Import ExtrOcamlIntConv} and then adding these functions to the list of functions to extract. This file {\tt ExtrOcamlIntConv.v} and some others in {\tt plugins/extraction/} are meant to help building concrete program via extraction. \asubsection{Extraction's horror museum} Some pathological examples of extraction are grouped in the file {\tt test-suite/success/extraction.v} of the sources of \Coq. \asubsection{Users' Contributions} Several of the \Coq\ Users' Contributions use extraction to produce certified programs. In particular the following ones have an automatic extraction test (just run {\tt make} in those directories): \begin{itemize} \item Bordeaux/Additions \item Bordeaux/EXCEPTIONS \item Bordeaux/SearchTrees \item Dyade/BDDS \item Lannion \item Lyon/CIRCUITS \item Lyon/FIRING-SQUAD \item Marseille/CIRCUITS \item Muenchen/Higman \item Nancy/FOUnify \item Rocq/ARITH/Chinese \item Rocq/COC \item Rocq/GRAPHS \item Rocq/HIGMAN \item Sophia-Antipolis/Stalmarck \item Suresnes/BDD \end{itemize} Lannion, Rocq/HIGMAN and Lyon/CIRCUITS are a bit particular. They are examples of developments where {\tt Obj.magic} are needed. This is probably due to an heavy use of impredicativity. After compilation those two examples run nonetheless, thanks to the correction of the extraction~\cite{Let02}. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-sch.tex0000640000175000001440000003033611750246737016332 0ustar notinusers\chapter{Proof schemes} \section{Generation of induction principles with {\tt Scheme}} \label{Scheme} \index{Schemes} \comindex{Scheme} The {\tt Scheme} command is a high-level tool for generating automatically (possibly mutual) induction principles for given types and sorts. Its syntax follows the schema: \begin{quote} {\tt Scheme {\ident$_1$} := Induction for \ident'$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} \dots\\ with {\ident$_m$} := Induction for {\ident'$_m$} Sort {\sort$_m$}} \end{quote} where \ident'$_1$ \dots\ \ident'$_m$ are different inductive type identifiers belonging to the same package of mutual inductive definitions. This command generates {\ident$_1$}\dots{} {\ident$_m$} to be mutually recursive definitions. Each term {\ident$_i$} proves a general principle of mutual induction for objects in type {\term$_i$}. \begin{Variants} \item {\tt Scheme {\ident$_1$} := Minimality for \ident'$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} \dots\ \\ with {\ident$_m$} := Minimality for {\ident'$_m$} Sort {\sort$_m$}} Same as before but defines a non-dependent elimination principle more natural in case of inductively defined relations. \item {\tt Scheme Equality for \ident$_1$\comindex{Scheme Equality}} Tries to generate a boolean equality and a proof of the decidability of the usual equality. If \ident$_i$ involves some other inductive types, their equality has to be defined first. \item {\tt Scheme Induction for \ident$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} \dots\\ with Induction for {\ident$_m$} Sort {\sort$_m$}} If you do not provide the name of the schemes, they will be automatically computed from the sorts involved (works also with Minimality). \end{Variants} \label{Scheme-examples} \firstexample \example{Induction scheme for \texttt{tree} and \texttt{forest}} The definition of principle of mutual induction for {\tt tree} and {\tt forest} over the sort {\tt Set} is defined by the command: \begin{coq_eval} Reset Initial. Variables A B : Set. \end{coq_eval} \begin{coq_example*} Inductive tree : Set := node : A -> forest -> tree with forest : Set := | leaf : B -> forest | cons : tree -> forest -> forest. Scheme tree_forest_rec := Induction for tree Sort Set with forest_tree_rec := Induction for forest Sort Set. \end{coq_example*} You may now look at the type of {\tt tree\_forest\_rec}: \begin{coq_example} Check tree_forest_rec. \end{coq_example} This principle involves two different predicates for {\tt trees} and {\tt forests}; it also has three premises each one corresponding to a constructor of one of the inductive definitions. The principle {\tt forest\_tree\_rec} shares exactly the same premises, only the conclusion now refers to the property of forests. \begin{coq_example} Check forest_tree_rec. \end{coq_example} \example{Predicates {\tt odd} and {\tt even} on naturals} Let {\tt odd} and {\tt even} be inductively defined as: % Reset Initial. \begin{coq_eval} Open Scope nat_scope. \end{coq_eval} \begin{coq_example*} Inductive odd : nat -> Prop := oddS : forall n:nat, even n -> odd (S n) with even : nat -> Prop := | evenO : even 0 | evenS : forall n:nat, odd n -> even (S n). \end{coq_example*} The following command generates a powerful elimination principle: \begin{coq_example} Scheme odd_even := Minimality for odd Sort Prop with even_odd := Minimality for even Sort Prop. \end{coq_example} The type of {\tt odd\_even} for instance will be: \begin{coq_example} Check odd_even. \end{coq_example} The type of {\tt even\_odd} shares the same premises but the conclusion is {\tt (n:nat)(even n)->(Q n)}. \subsection{Automatic declaration of schemes} \comindex{Set Equality Schemes} \comindex{Set Elimination Schemes} It is possible to deactivate the automatic declaration of the induction principles when defining a new inductive type with the {\tt Unset Elimination Schemes} command. It may be reactivated at any time with {\tt Set Elimination Schemes}. \\ You can also activate the automatic declaration of those boolean equalities (see the second variant of {\tt Scheme}) with the {\tt Set Equality Schemes} command. However you have to be careful with this option since \Coq~ may now reject well-defined inductive types because it cannot compute a boolean equality for them. \subsection{\tt Combined Scheme} \label{CombinedScheme} \comindex{Combined Scheme} The {\tt Combined Scheme} command is a tool for combining induction principles generated by the {\tt Scheme} command. Its syntax follows the schema : \begin{quote} {\tt Combined Scheme {\ident$_0$} from {\ident$_1$}, .., {\ident$_n$}} \end{quote} where \ident$_1$ \ldots \ident$_n$ are different inductive principles that must belong to the same package of mutual inductive principle definitions. This command generates {\ident$_0$} to be the conjunction of the principles: it is built from the common premises of the principles and concluded by the conjunction of their conclusions. \Example We can define the induction principles for trees and forests using: \begin{coq_example} Scheme tree_forest_ind := Induction for tree Sort Prop with forest_tree_ind := Induction for forest Sort Prop. \end{coq_example} Then we can build the combined induction principle which gives the conjunction of the conclusions of each individual principle: \begin{coq_example} Combined Scheme tree_forest_mutind from tree_forest_ind, forest_tree_ind. \end{coq_example} The type of {\tt tree\_forest\_mutrec} will be: \begin{coq_example} Check tree_forest_mutind. \end{coq_example} \section{Generation of induction principles with {\tt Functional Scheme}} \label{FunScheme} \comindex{Functional Scheme} The {\tt Functional Scheme} command is a high-level experimental tool for generating automatically induction principles corresponding to (possibly mutually recursive) functions. Its syntax follows the schema: \begin{quote} {\tt Functional Scheme {\ident$_1$} := Induction for \ident'$_1$ Sort {\sort$_1$} \\ with\\ \mbox{}\hspace{0.1cm} \dots\ \\ with {\ident$_m$} := Induction for {\ident'$_m$} Sort {\sort$_m$}} \end{quote} where \ident'$_1$ \dots\ \ident'$_m$ are different mutually defined function names (they must be in the same order as when they were defined). This command generates the induction principles \ident$_1$\dots\ident$_m$, following the recursive structure and case analyses of the functions \ident'$_1$ \dots\ \ident'$_m$. \Rem There is a difference between obtaining an induction scheme by using \texttt{Functional Scheme} on a function defined by \texttt{Function} or not. Indeed \texttt{Function} generally produces smaller principles, closer to the definition written by the user. \firstexample \example{Induction scheme for \texttt{div2}} \label{FunScheme-examples} We define the function \texttt{div2} as follows: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Require Import Arith. Fixpoint div2 (n:nat) : nat := match n with | O => 0 | S O => 0 | S (S n') => S (div2 n') end. \end{coq_example*} The definition of a principle of induction corresponding to the recursive structure of \texttt{div2} is defined by the command: \begin{coq_example} Functional Scheme div2_ind := Induction for div2 Sort Prop. \end{coq_example} You may now look at the type of {\tt div2\_ind}: \begin{coq_example} Check div2_ind. \end{coq_example} We can now prove the following lemma using this principle: \begin{coq_example*} Lemma div2_le' : forall n:nat, div2 n <= n. intro n. pattern n , (div2 n). \end{coq_example*} \begin{coq_example} apply div2_ind; intros. \end{coq_example} \begin{coq_example*} auto with arith. auto with arith. simpl; auto with arith. Qed. \end{coq_example*} We can use directly the \texttt{functional induction} (\ref{FunInduction}) tactic instead of the pattern/apply trick: \tacindex{functional induction} \begin{coq_example*} Reset div2_le'. Lemma div2_le : forall n:nat, div2 n <= n. intro n. \end{coq_example*} \begin{coq_example} functional induction (div2 n). \end{coq_example} \begin{coq_example*} auto with arith. auto with arith. auto with arith. Qed. \end{coq_example*} \Rem There is a difference between obtaining an induction scheme for a function by using \texttt{Function} (see Section~\ref{Function}) and by using \texttt{Functional Scheme} after a normal definition using \texttt{Fixpoint} or \texttt{Definition}. See \ref{Function} for details. \example{Induction scheme for \texttt{tree\_size}} \begin{coq_eval} Reset Initial. \end{coq_eval} We define trees by the following mutual inductive type: \begin{coq_example*} Variable A : Set. Inductive tree : Set := node : A -> forest -> tree with forest : Set := | empty : forest | cons : tree -> forest -> forest. \end{coq_example*} We define the function \texttt{tree\_size} that computes the size of a tree or a forest. Note that we use \texttt{Function} which generally produces better principles. \begin{coq_example*} Function tree_size (t:tree) : nat := match t with | node A f => S (forest_size f) end with forest_size (f:forest) : nat := match f with | empty => 0 | cons t f' => (tree_size t + forest_size f') end. \end{coq_example*} \Rem \texttt{Function} generates itself non mutual induction principles {\tt tree\_size\_ind} and {\tt forest\_size\_ind}: \begin{coq_example} Check tree_size_ind. \end{coq_example} The definition of mutual induction principles following the recursive structure of \texttt{tree\_size} and \texttt{forest\_size} is defined by the command: \begin{coq_example*} Functional Scheme tree_size_ind2 := Induction for tree_size Sort Prop with forest_size_ind2 := Induction for forest_size Sort Prop. \end{coq_example*} You may now look at the type of {\tt tree\_size\_ind2}: \begin{coq_example} Check tree_size_ind2. \end{coq_example} \section{Generation of inversion principles with \tt Derive Inversion} \label{Derive-Inversion} \comindex{Derive Inversion} The syntax of {\tt Derive Inversion} follows the schema: \begin{quote} {\tt Derive Inversion {\ident} with forall $(\vec{x} : \vec{T})$, $I~\vec{t}$ Sort \sort} \end{quote} This command generates an inversion principle for the \texttt{inversion \dots\ using} tactic. \tacindex{inversion \dots\ using} Let $I$ be an inductive predicate and $\vec{x}$ the variables occurring in $\vec{t}$. This command generates and stocks the inversion lemma for the sort \sort~ corresponding to the instance $\forall (\vec{x}:\vec{T}), I~\vec{t}$ with the name {\ident} in the {\bf global} environment. When applied, it is equivalent to having inverted the instance with the tactic {\tt inversion}. \begin{Variants} \item \texttt{Derive Inversion\_clear {\ident} with forall $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ \comindex{Derive Inversion\_clear} When applied, it is equivalent to having inverted the instance with the tactic \texttt{inversion} replaced by the tactic \texttt{inversion\_clear}. \item \texttt{Derive Dependent Inversion {\ident} with forall $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ \comindex{Derive Dependent Inversion} When applied, it is equivalent to having inverted the instance with the tactic \texttt{dependent inversion}. \item \texttt{Derive Dependent Inversion\_clear {\ident} with forall $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ \comindex{Derive Dependent Inversion\_clear} When applied, it is equivalent to having inverted the instance with the tactic \texttt{dependent inversion\_clear}. \end{Variants} \Example Let us consider the relation \texttt{Le} over natural numbers and the following variable: \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example*} Inductive Le : nat -> nat -> Set := | LeO : forall n:nat, Le 0 n | LeS : forall n m:nat, Le n m -> Le (S n) (S m). Variable P : nat -> nat -> Prop. \end{coq_example*} To generate the inversion lemma for the instance \texttt{(Le (S n) m)} and the sort \texttt{Prop}, we do: \begin{coq_example*} Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort Prop. \end{coq_example*} \begin{coq_example} Check leminv. \end{coq_example} Then we can use the proven inversion lemma: \begin{coq_eval} Lemma ex : forall n m:nat, Le (S n) m -> P n m. intros. \end{coq_eval} \begin{coq_example} Show. \end{coq_example} \begin{coq_example} inversion H using leminv. \end{coq_example} coq-8.4pl2/doc/refman/RefMan-com.tex0000640000175000001440000003250711743503527016330 0ustar notinusers\chapter[The \Coq~commands]{The \Coq~commands\label{Addoc-coqc} \ttindex{coqtop} \ttindex{coqc}} There are three \Coq~commands: \begin{itemize} \item {\tt coqtop}: The \Coq\ toplevel (interactive mode) ; \item {\tt coqc} : The \Coq\ compiler (batch compilation). \item {\tt coqchk} : The \Coq\ checker (validation of compiled libraries) \end{itemize} The options are (basically) the same for the first two commands, and roughly described below. You can also look at the \verb!man! pages of \verb!coqtop! and \verb!coqc! for more details. \section{Interactive use ({\tt coqtop})} In the interactive mode, also known as the \Coq~toplevel, the user can develop his theories and proofs step by step. The \Coq~toplevel is run by the command {\tt coqtop}. \index{byte-code} \index{native code} \label{binary-images} They are two different binary images of \Coq: the byte-code one and the native-code one (if Objective Caml provides a native-code compiler for your platform, which is supposed in the following). When invoking \verb!coqtop! or \verb!coqc!, the native-code version of the system is used. The command-line options \verb!-byte! and \verb!-opt! explicitly select the byte-code and the native-code versions, respectively. The byte-code toplevel is based on a Caml toplevel (to allow the dynamic link of tactics). You can switch to the Caml toplevel with the command \verb!Drop.!, and come back to the \Coq~toplevel with the command \verb!Toplevel.loop();;!. \section{Batch compilation ({\tt coqc})} The {\tt coqc} command takes a name {\em file} as argument. Then it looks for a vernacular file named {\em file}{\tt .v}, and tries to compile it into a {\em file}{\tt .vo} file (See ~\ref{compiled}). \Warning The name {\em file} must be a regular {\Coq} identifier, as defined in the Section~\ref{lexical}. It must only contain letters, digits or underscores (\_). Thus it can be \verb+/bar/foo/toto.v+ but cannot be \verb+/bar/foo/to-to.v+ . Notice that the \verb!-byte! and \verb!-opt! options are still available with \verb!coqc! and allow you to select the byte-code or native-code versions of the system. \section[Customization]{Customization at launch time} \subsection{By resource file\index{Resource file}} When \Coq\ is launched, with either {\tt coqtop} or {\tt coqc}, the resource file \verb:$XDG_CONFIG_HOME/coq/coqrc.xxx: is loaded, where \verb:$XDG_CONFIG_HOME: is the configuration directory of the user (by default its home directory \verb!/.config! and \verb:xxx: is the version number (e.g. 8.3). If this file is not found, then the file \verb:$XDG_CONFIG_HOME/coqrc: is searched. You can also specify an arbitrary name for the resource file (see option \verb:-init-file: below). This file may contain, for instance, \verb:Add LoadPath: commands to add directories to the load path of \Coq. It is possible to skip the loading of the resource file with the option \verb:-q:. \section{By environment variables\label{EnvVariables} \index{Environment variables}\label{envars}} Load path can be specified to the \Coq\ system by setting up \verb:$COQPATH: environment variable. It is a list of directories separated by \verb|:| (\verb|;| on windows). \Coq will also honour \verb:$XDG_DATA_HOME: and \verb:$XDG_DATA_DIRS: (see \url{http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html}). \Coq adds \verb:${XDG_DATA_HOME}/coq: and \verb:${XDG_DATA_DIRS}/coq: to its search path. \subsection{By command line options\index{Options of the command line} \label{vmoption} \label{coqoptions}} The following command-line options are recognized by the commands {\tt coqc} and {\tt coqtop}, unless stated otherwise: \begin{description} \item[{\tt -I} {\em directory}, {\tt -include} {\em directory}[ {\tt -as} {\em dirpath}]]\ Add physical path {\em directory} to the list of directories where to look for a file and bind it to the empty logical directory/the logical directory {\em dirpath}. The sub-directory structure of {\em directory} is recursively available from {\Coq} using absolute names (extending the {\dirpath} prefix) (see Section~\ref{LongNames}). \SeeAlso {\tt Add LoadPath} in Section~\ref{AddLoadPath} and logical paths in Section~\ref{Libraries}. \item[{\tt -R} {\em directory} {\dirpath}, {\tt -R} {\em directory} [{\tt -as} {\dirpath}]]\ Do as {\tt -I} {\em directory} {\tt -as} {\dirpath} but make the sub-directory structure of {\em directory} recursively visible so that the recursive contents of physical {\em directory} is available from {\Coq} using short or partially qualified names. \SeeAlso {\tt Add Rec LoadPath} in Section~\ref{AddRecLoadPath} and logical paths in Section~\ref{Libraries}. \item[{\tt -top} {\dirpath}, {\tt -notop}]\ This sets the toplevel module name to {\dirpath}/the empty logical path instead of {\tt Top}. Not valid for {\tt coqc}. \item[{\tt -exclude-dir} {\em subdirectory}]\ This tells to exclude any sub-directory named {\em subdirectory} while processing option {\tt -R}. Without this option only the conventional version control management sub-directories named {\tt CVS} and {\tt \_darcs} are excluded. \item[{\tt -is} {\em file}, {\tt -inputstate} {\em file}, {\tt -outputstate} {\em file}]\ Load at the beginning/Dump at the end a \Coq{} state from the file {\em file}. Incompatible with some not purely functional aspect of the code \item[{\tt -nois}]\ Cause \Coq~to begin with an empty state. \item[{\tt -init-file} {\em file}, {\tt -q}]\ Take {\em file} as the resource file. / Cause \Coq~not to load the resource file. \item[{\tt -load-ml-source} {\em file}]\ Load the Caml source file {\em file}. \item[{\tt -load-ml-object} {\em file}]\ Load the Caml object file {\em file}. \item[{\tt -l[v]} {\em file}, {\tt -load-vernac-source[-verbose]} {\em file}]\ Load \Coq~file {\em file}{\tt .v} optionally with copy it contents on the standard input. \item[{\tt -load-vernac-object} {\em file}]\ Load \Coq~compiled file {\em file}{\tt .vo} \item[{\tt -require} {\em file}]\ Load \Coq~compiled file {\em file}{\tt .vo} and import it ({\tt Require} {\em file}). \item[{\tt -compile} {\em file},{\tt -compile-verbose} {\em file}, {\tt -batch}]\ {\tt coqtop} options only used internally by {\tt coqc}. This compiles file {\em file}{\tt .v} into {\em file}{\tt .vo} without/with a copy of the contents of the file on standard input. This option implies options {\tt -batch} (exit just after arguments parsing). It is only available for {\tt coqtop}. \item[{\tt -verbose}]\ This option is only for {\tt coqc}. It tells to compile the file with a copy of its contents on standard input. %Mostly unused in the code %\item[{\tt -debug}]\ % % Switch on the debug flag. \item[{\tt -xml}]\ This option is for use with {\tt coqc}. It tells \Coq\ to export on the standard output the content of the compiled file into XML format. \item[{\tt -with-geoproof} (yes|no)]\ Activate or not special functions for Geoproof within Coqide (default is yes). \item[{\tt -beautify}]\ While compiling {\em file}, pretty prints each command just after having parsing it in {\em file}{\tt .beautified} in order to get old-fashion syntax/definitions/notations. \item[{\tt -quality}] Improve the legibility of the proof terms produced by some tactics. \item[{\tt -emacs}, {\tt -ide-slave}]\ Start a special main loop to communicate with ide. \item[{\tt -impredicative-set}]\ Change the logical theory of {\Coq} by declaring the sort {\tt Set} impredicative; warning: this is known to be inconsistent with some standard axioms of classical mathematics such as the functional axiom of choice or the principle of description \item[{\tt -compat} {\em version}] \null Attempt to maintain some of the incompatible changes in their {\em version} behavior. \item[{\tt -dump-glob} {\em file}]\ This dumps references for global names in file {\em file} (to be used by coqdoc, see~\ref{coqdoc}) \item[{\tt -dont-load-proofs}]\ Warning: this is an unsafe mode. Instead of loading in memory the proofs of opaque theorems, they are treated as axioms. This results in smaller memory requirement and faster compilation, but the behavior of the system might slightly change (for instance during module subtyping), and some features won't be available (for example {\tt Print Assumptions}). \item[{\tt -lazy-load-proofs}]\ This is the default behavior. Proofs of opaque theorems aren't loaded immediately in memory, but only when necessary, for instance during some module subtyping or {\tt Print Assumptions}. This should be almost as fast and efficient as {\tt -dont-load-proofs}, with none of its drawbacks. \item[{\tt -force-load-proofs}]\ Proofs of opaque theorems are loaded in memory as soon as the corresponding {\tt Require} is done. This used to be Coq's default behavior. \item[{\tt -no-hash-consing}] \null \item[{\tt -vm}]\ This activates the use of the bytecode-based conversion algorithm for the current session (see Section~\ref{SetVirtualMachine}). \item[{\tt -image} {\em file}]\ This option sets the binary image to be used by {\tt coqc} to be {\em file} instead of the standard one. Not of general use. \item[{\tt -bindir} {\em directory}]\ Set for {\tt coqc} the directory containing \Coq\ binaries. It is equivalent to do \texttt{export COQBIN=}{\em directory} before launching {\tt coqc}. \item[{\tt -where}, {\tt -config}, {\tt -filteropts}]\ Print the \Coq's standard library location or \Coq's binaries, dependencies, libraries locations or the list of command line arguments that {\tt coqtop} has recognize as options and exit. \item[{\tt -v}]\ Print the \Coq's version and exit. \item[{\tt -h}, {\tt --help}]\ Print a short usage and exit. \end{description} \section{Compiled libraries checker ({\tt coqchk})} The {\tt coqchk} command takes a list of library paths as argument. The corresponding compiled libraries (.vo files) are searched in the path, recursively processing the libraries they depend on. The content of all these libraries is then type-checked. The effect of {\tt coqchk} is only to return with normal exit code in case of success, and with positive exit code if an error has been found. Error messages are not deemed to help the user understand what is wrong. In the current version, it does not modify the compiled libraries to mark them as successfully checked. Note that non-logical information is not checked. By logical information, we mean the type and optional body associated to names. It excludes for instance anything related to the concrete syntax of objects (customized syntax rules, association between short and long names), implicit arguments, etc. This tool can be used for several purposes. One is to check that a compiled library provided by a third-party has not been forged and that loading it cannot introduce inconsistencies.\footnote{Ill-formed non-logical information might for instance bind {\tt Coq.Init.Logic.True} to short name {\tt False}, so apparently {\tt False} is inhabited, but using fully qualified names, {\tt Coq.Init.Logic.False} will always refer to the absurd proposition, what we guarantee is that there is no proof of this latter constant.} Another point is to get an even higher level of security. Since {\tt coqtop} can be extended with custom tactics, possibly ill-typed code, it cannot be guaranteed that the produced compiled libraries are correct. {\tt coqchk} is a standalone verifier, and thus it cannot be tainted by such malicious code. Command-line options {\tt -I}, {\tt -R}, {\tt -where} and {\tt -impredicative-set} are supported by {\tt coqchk} and have the same meaning as for {\tt coqtop}. Extra options are: \begin{description} \item[{\tt -norec} $module$]\ Check $module$ but do not force check of its dependencies. \item[{\tt -admit} $module$] \ Do not check $module$ and any of its dependencies, unless explicitly required. \item[{\tt -o}]\ At exit, print a summary about the context. List the names of all assumptions and variables (constants without body). \item[{\tt -silent}]\ Do not write progress information in standard output. \end{description} Environment variable \verb:$COQLIB: can be set to override the location of the standard library. The algorithm for deciding which modules are checked or admitted is the following: assuming that {\tt coqchk} is called with argument $M$, option {\tt -norec} $N$, and {\tt -admit} $A$. Let us write $\overline{S}$ the set of reflexive transitive dependencies of set $S$. Then: \begin{itemize} \item Modules $C=\overline{M}\backslash\overline{A}\cup M\cup N$ are loaded and type-checked before being added to the context. \item And $\overline{M}\cup\overline{N}\backslash C$ is the set of modules that are loaded and added to the context without type-checking. Basic integrity checks (checksums) are nonetheless performed. \end{itemize} As a rule of thumb, the {\tt -admit} can be used to tell that some libraries have already been checked. So {\tt coqchk A B} can be split in {\tt coqchk A \&\& coqchk B -admit A} without type-checking any definition twice. Of course, the latter is slightly slower since it makes more disk access. It is also less secure since an attacker might have replaced the compiled library $A$ after it has been read by the first command, but before it has been read by the second command. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-mod.tex0000640000175000001440000002634411403446003016317 0ustar notinusers\section{Module system \index{Modules} \label{section:Modules}} The module system provides a way of packaging related elements together, as well as a mean of massive abstraction. \begin{figure}[t] \begin{centerframe} \begin{tabular}{rcl} {\modtype} & ::= & {\qualid} \\ & $|$ & {\modtype} \texttt{ with Definition }{\qualid} := {\term} \\ & $|$ & {\modtype} \texttt{ with Module }{\qualid} := {\qualid} \\ & $|$ & {\qualid} \nelist{\qualid}{}\\ & $|$ & $!${\qualid} \nelist{\qualid}{}\\ &&\\ {\onemodbinding} & ::= & {\tt ( [Import|Export] \nelist{\ident}{} : {\modtype} )}\\ &&\\ {\modbindings} & ::= & \nelist{\onemodbinding}{}\\ &&\\ {\modexpr} & ::= & \nelist{\qualid}{} \\ & $|$ & $!$\nelist{\qualid}{} \end{tabular} \end{centerframe} \caption{Syntax of modules} \end{figure} In the syntax of module application, the $!$ prefix indicates that any {\tt Inline} directive in the type of the functor arguments will be ignored (see \ref{Inline} below). \subsection{\tt Module {\ident} \comindex{Module}} This command is used to start an interactive module named {\ident}. \begin{Variants} \item{\tt Module {\ident} {\modbindings}} Starts an interactive functor with parameters given by {\modbindings}. \item{\tt Module {\ident} \verb.:. {\modtype}} Starts an interactive module specifying its module type. \item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype}} Starts an interactive functor with parameters given by {\modbindings}, and output module type {\modtype}. \item{\tt Module {\ident} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:.{ \modtype$_n$}} Starts an interactive module satisfying each {\modtype$_i$}. \item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:. {\modtype$_n$}} Starts an interactive functor with parameters given by {\modbindings}. The output module type is verified against each module type {\modtype$_i$}. \item\texttt{Module [Import|Export]} Behaves like \texttt{Module}, but automatically imports or exports the module. \end{Variants} \subsubsection{Reserved commands inside an interactive module: \comindex{Include}} \begin{enumerate} \item {\tt Include {\module}} Includes the content of {\module} in the current interactive module. Here {\module} can be a module expresssion or a module type expression. If {\module} is a high-order module or module type expression then the system tries to instanciate {\module} by the current interactive module. \item {\tt Include {\module$_1$} \verb.<+. $\ldots$ \verb.<+. {\module$_n$}} is a shortcut for {\tt Include {\module$_1$}} $\ldots$ {\tt Include {\module$_n$}} \end{enumerate} \subsection{\tt End {\ident} \comindex{End}} This command closes the interactive module {\ident}. If the module type was given the content of the module is matched against it and an error is signaled if the matching fails. If the module is basic (is not a functor) its components (constants, inductive types, submodules etc) are now available through the dot notation. \begin{ErrMsgs} \item \errindex{No such label {\ident}} \item \errindex{Signature components for label {\ident} do not match} \item \errindex{This is not the last opened module} \end{ErrMsgs} \subsection{\tt Module {\ident} := {\modexpr} \comindex{Module}} This command defines the module identifier {\ident} to be equal to {\modexpr}. \begin{Variants} \item{\tt Module {\ident} {\modbindings} := {\modexpr}} Defines a functor with parameters given by {\modbindings} and body {\modexpr}. % Particular cases of the next 2 items %\item{\tt Module {\ident} \verb.:. {\modtype} := {\modexpr}} % % Defines a module with body {\modexpr} and interface {\modtype}. %\item{\tt Module {\ident} \verb.<:. {\modtype} := {\modexpr}} % % Defines a module with body {\modexpr}, satisfying {\modtype}. \item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype} := {\modexpr}} Defines a functor with parameters given by {\modbindings} (possibly none), and output module type {\modtype}, with body {\modexpr}. \item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:. {\modtype$_n$}:= {\modexpr}} Defines a functor with parameters given by {\modbindings} (possibly none) with body {\modexpr}. The body is checked against each {\modtype$_i$}. \item{\tt Module {\ident} {\modbindings} := {\modexpr$_1$} \verb.<+. $\ldots$ \verb.<+. {\modexpr$_n$}} is equivalent to an interactive module where each {\modexpr$_i$} are included. \end{Variants} \subsection{\tt Module Type {\ident} \comindex{Module Type}} This command is used to start an interactive module type {\ident}. \begin{Variants} \item{\tt Module Type {\ident} {\modbindings}} Starts an interactive functor type with parameters given by {\modbindings}. \end{Variants} \subsubsection{Reserved commands inside an interactive module type: \comindex{Include}\comindex{Inline}} \label{Inline} \begin{enumerate} \item {\tt Include {\module}} Same as {\tt Include} inside a module. \item {\tt Include {\module$_1$} \verb.<+. $\ldots$ \verb.<+. {\module$_n$}} is a shortcut for {\tt Include {\module$_1$}} $\ldots$ {\tt Include {\module$_n$}} \item {\tt {\assumptionkeyword} Inline {\assums} } The instance of this assumption will be automatically expanded at functor application, except when this functor application is prefixed by a $!$ annotation. \end{enumerate} \subsection{\tt End {\ident} \comindex{End}} This command closes the interactive module type {\ident}. \begin{ErrMsgs} \item \errindex{This is not the last opened module type} \end{ErrMsgs} \subsection{\tt Module Type {\ident} := {\modtype}} Defines a module type {\ident} equal to {\modtype}. \begin{Variants} \item {\tt Module Type {\ident} {\modbindings} := {\modtype}} Defines a functor type {\ident} specifying functors taking arguments {\modbindings} and returning {\modtype}. \item{\tt Module Type {\ident} {\modbindings} := {\modtype$_1$} \verb.<+. $\ldots$ \verb.<+. {\modtype$_n$}} is equivalent to an interactive module type were each {\modtype$_i$} are included. \end{Variants} \subsection{\tt Declare Module {\ident} : {\modtype}} Declares a module {\ident} of type {\modtype}. \begin{Variants} \item{\tt Declare Module {\ident} {\modbindings} \verb.:. {\modtype}} Declares a functor with parameters {\modbindings} and output module type {\modtype}. \end{Variants} \subsubsection{Example} Let us define a simple module. \begin{coq_example} Module M. Definition T := nat. Definition x := 0. Definition y : bool. exact true. Defined. End M. \end{coq_example} Inside a module one can define constants, prove theorems and do any other things that can be done in the toplevel. Components of a closed module can be accessed using the dot notation: \begin{coq_example} Print M.x. \end{coq_example} A simple module type: \begin{coq_example} Module Type SIG. Parameter T : Set. Parameter x : T. End SIG. \end{coq_example} Now we can create a new module from \texttt{M}, giving it a less precise specification: the \texttt{y} component is dropped as well as the body of \texttt{x}. \begin{coq_eval} Set Printing Depth 50. (********** The following is not correct and should produce **********) (***************** Error: N.y not a defined object *******************) \end{coq_eval} \begin{coq_example} Module N : SIG with Definition T := nat := M. Print N.T. Print N.x. Print N.y. \end{coq_example} \begin{coq_eval} Reset N. \end{coq_eval} \noindent The definition of \texttt{N} using the module type expression \texttt{SIG with Definition T:=nat} is equivalent to the following one: \begin{coq_example*} Module Type SIG'. Definition T : Set := nat. Parameter x : T. End SIG'. Module N : SIG' := M. \end{coq_example*} If we just want to be sure that the our implementation satisfies a given module type without restricting the interface, we can use a transparent constraint \begin{coq_example} Module P <: SIG := M. Print P.y. \end{coq_example} Now let us create a functor, i.e. a parametric module \begin{coq_example} Module Two (X Y: SIG). \end{coq_example} \begin{coq_example*} Definition T := (X.T * Y.T)%type. Definition x := (X.x, Y.x). \end{coq_example*} \begin{coq_example} End Two. \end{coq_example} and apply it to our modules and do some computations \begin{coq_example} Module Q := Two M N. Eval compute in (fst Q.x + snd Q.x). \end{coq_example} In the end, let us define a module type with two sub-modules, sharing some of the fields and give one of its possible implementations: \begin{coq_example} Module Type SIG2. Declare Module M1 : SIG. Module M2 <: SIG. Definition T := M1.T. Parameter x : T. End M2. End SIG2. \end{coq_example} \begin{coq_example*} Module Mod <: SIG2. Module M1. Definition T := nat. Definition x := 1. End M1. Module M2 := M. \end{coq_example*} \begin{coq_example} End Mod. \end{coq_example} Notice that \texttt{M} is a correct body for the component \texttt{M2} since its \texttt{T} component is equal \texttt{nat} and hence \texttt{M1.T} as specified. \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{Remarks} \item Modules and module types can be nested components of each other. \item One can have sections inside a module or a module type, but not a module or a module type inside a section. \item Commands like \texttt{Hint} or \texttt{Notation} can also appear inside modules and module types. Note that in case of a module definition like: \smallskip \noindent {\tt Module N : SIG := M.} \smallskip or \smallskip {\tt Module N : SIG.\\ \ \ \dots\\ End N.} \smallskip hints and the like valid for \texttt{N} are not those defined in \texttt{M} (or the module body) but the ones defined in \texttt{SIG}. \end{Remarks} \subsection{\tt Import {\qualid} \comindex{Import} \label{Import}} If {\qualid} denotes a valid basic module (i.e. its module type is a signature), makes its components available by their short names. Example: \begin{coq_example} Module Mod. \end{coq_example} \begin{coq_example} Definition T:=nat. Check T. \end{coq_example} \begin{coq_example} End Mod. Check Mod.T. Check T. (* Incorrect ! *) Import Mod. Check T. (* Now correct *) \end{coq_example} \begin{coq_eval} Reset Mod. \end{coq_eval} Some features defined in modules are activated only when a module is imported. This is for instance the case of notations (see Section~\ref{Notation}). \begin{Variants} \item{\tt Export {\qualid}}\comindex{Export} When the module containing the command {\tt Export {\qualid}} is imported, {\qualid} is imported as well. \end{Variants} \begin{ErrMsgs} \item \errindexbis{{\qualid} is not a module}{is not a module} % this error is impossible in the import command % \item \errindex{Cannot mask the absolute name {\qualid} !} \end{ErrMsgs} \begin{Warnings} \item Warning: Trying to mask the absolute name {\qualid} ! \end{Warnings} \subsection{\tt Print Module {\ident} \comindex{Print Module}} Prints the module type and (optionally) the body of the module {\ident}. \subsection{\tt Print Module Type {\ident} \comindex{Print Module Type}} Prints the module type corresponding to {\ident}. \subsection{\tt Locate Module {\qualid} \comindex{Locate Module}} Prints the full name of the module {\qualid}. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Helm.tex0000640000175000001440000003301211270632660015255 0ustar notinusers\label{Helm} \index{XML exportation} \index{Proof rendering} This section describes the exportation of {\Coq} theories to XML that has been contributed by Claudio Sacerdoti Coen. Currently, the main applications are the rendering and searching tool developed within the HELM\footnote{Hypertextual Electronic Library of Mathematics} and MoWGLI\footnote{Mathematics on the Web, Get it by Logic and Interfaces} projects mainly at the University of Bologna and partly at INRIA-Sophia Antipolis. \subsection{Practical use of the XML exportation tool} The basic way to export the logical content of a file into XML format is to use {\tt coqc} with option {\tt -xml}. When the {\tt -xml} flag is set, every definition or declaration is immediately exported to XML once concluded. The system environment variable {\tt COQ\_XML\_LIBRARY\_ROOT} must be previously set to a directory in which the logical structure of the exported objects is reflected. For {\tt Makefile} files generated by \verb+coq_makefile+ (see section \ref{Makefile}), it is sufficient to compile the files using \begin{quotation} \verb+make COQ_XML=-xml+ \end{quotation} To export a development to XML, the suggested procedure is then: \begin{enumerate} \item add to your own contribution a valid \verb+Make+ file and use \verb+coq_makefile+ to generate the \verb+Makefile+ from the \verb+Make+ file. \Warning Since logical names are used to structure the XML hierarchy, always add to the \verb+Make+ file at least one \verb+"-R"+ option to map physical file names to logical module paths. \item set the \verb+COQ_XML_LIBRARY_ROOT+ environment variable to the directory where the XML file hierarchy must be physically rooted. \item compile your contribution with \verb+"make COQ_XML=-xml"+ \end{enumerate} \Rem In case the system variable {\tt COQ\_XML\_LIBRARY\_ROOT} is not set, the output is done on the standard output. Also, the files are compressed using {\tt gzip} after creation. This is to save disk space since the XML format is very verbose. \subsection{Reflection of the logical structure into the file system} For each {\Coq} logical object, several independent files associated to this object are created. The structure of the long name of the object is reflected in the directory structure of the file system. E.g. an object of long name {\tt {\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}} is exported to files in the subdirectory {{\ident$_1$}/{\ldots}/{\ident$_n$}} of the directory bound to the environment variable {\tt COQ\_XML\_LIBRARY\_ROOT}. \subsection{What is exported?} The XML exportation tool exports the logical content of {\Coq} theories. This covers global definitions (including lemmas, theorems, ...), global assumptions (parameters and axioms), local assumptions or definitions, and inductive definitions. Vernacular files are exported to {\tt .theory.xml} files. %Variables, %definitions, theorems, axioms and proofs are exported to individual %files whose suffixes range from {\tt .var.xml}, {\tt .con.xml}, {\tt %.con.body.xml}, {\tt .con.types.xml} to {\tt .con.proof_tree.xml}. Comments are pre-processed with {\sf coqdoc} (see section \ref{coqdoc}). Especially, they have to be enclosed within {\tt (**} and {\tt *)} to be exported. For each inductive definition of name {\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}, a file named {\tt {\ident}.ind.xml} is created in the subdirectory {\tt {\ident$_1$}/{\ldots}/{\ident$_n$}} of the xml library root directory. It contains the arities and constructors of the type. For mutual inductive definitions, the file is named after the name of the first inductive type of the block. For each global definition of base name {\tt {\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}}, files named {\tt {\ident}.con.body.xml} and {\tt {\ident}.con.xml} are created in the subdirectory {\tt {\ident$_1$}/{\ldots}/{\ident$_n$}}. They respectively contain the body and the type of the definition. For each global assumption of base name {\tt {\ident$_1$}.{\ident$_2$}.{\ldots}.{\ident$_n$}.{\ident}}, a file named {\tt {\ident}.con.xml} is created in the subdirectory {\tt {\ident$_1$}/{\ldots}/{\ident$_n$}}. It contains the type of the global assumption. For each local assumption or definition of base name {\ident} located in sections {\ident$'_1$}, {\ldots}, {\ident$'_p$} of the module {\tt {\ident$_1$}.{\ident$_2$}.{\ldots}.{\ident$_n$}.{\ident}}, a file named {\tt {\ident}.var.xml} is created in the subdirectory {\tt {\ident$_1$}/{\ldots}/{\ident$_n$}/{\ident$'_1$}/\ldots/{\ident$'_p$}}. It contains its type and, if a definition, its body. In order to do proof-rendering (for example in natural language), some redundant typing information is required, i.e. the type of at least some of the subterms of the bodies and types of the CIC objects. These types are called inner types and are exported to files of suffix {\tt .types.xml} by the exportation tool. % Deactivated %% \subsection{Proof trees} %% For each definition or theorem that has been built with tactics, an %% extra file of suffix {\tt proof\_tree.xml} is created. It contains the %% proof scripts and is used for rendering the proof. \subsection[Inner types]{Inner types\label{inner-types}} The type of a subterm of a construction is called an {\em inner type} if it respects the following conditions. \begin{enumerate} \item Its sort is \verb+Prop+\footnote{or {\tt CProp} which is the "sort"-like definition used in C-CoRN (see \url{http://vacuumcleaner.cs.kun.nl/c-corn}) to type computationally relevant predicative propositions.}. \item It is not a type cast nor an atomic term (variable, constructor or constant). \item If it's root is an abstraction, then the root's parent node is not an abstraction, i.e. only the type of the outer abstraction of a block of nested abstractions is printed. \end{enumerate} The rationale for the 3$^{rd}$ condition is that the type of the inner abstractions could be easily computed starting from the type of the outer ones; moreover, the types of the inner abstractions requires a lot of disk/memory space: removing the 3$^{rd}$ condition leads to XML file that are two times as big as the ones exported applying the 3$^{rd}$ condition. \subsection{Interactive exportation commands} There are also commands to be used interactively in {\tt coqtop}. \subsubsection[\tt Print XML {\qualid}]{\tt Print XML {\qualid}\comindex{Print XML}} If the variable {\tt COQ\_XML\_LIBRARY\_ROOT} is set, this command creates files containing the logical content in XML format of {\qualid}. If the variable is not set, the result is displayed on the standard output. \begin{Variants} \item {\tt Print XML File {\str} {\qualid}}\\ This writes the logical content of {\qualid} in XML format to files whose prefix is {\str}. \end{Variants} \subsubsection[{\tt Show XML Proof}]{{\tt Show XML Proof}\comindex{Show XML Proof}} If the variable {\tt COQ\_XML\_LIBRARY\_ROOT} is set, this command creates files containing the current proof in progress in XML format. It writes also an XML file made of inner types. If the variable is not set, the result is displayed on the standard output. \begin{Variants} \item {\tt Show XML File {\str} Proof}\\ This writes the logical content of {\qualid} in XML format to files whose prefix is {\str}. \end{Variants} \subsection{Applications: rendering, searching and publishing} The HELM team at the University of Bologna has developed tools exploiting the XML exportation of {\Coq} libraries. This covers rendering, searching and publishing tools. All these tools require a running http server and, if possible, a MathML compliant browser. The procedure to install the suite of tools ultimately allowing rendering and searching can be found on the HELM web site \url{http://helm.cs.unibo.it/library.html}. It may be easier though to upload your developments on the HELM http server and to re-use the infrastructure running on it. This requires publishing your development. To this aim, follow the instructions on \url{http://mowgli.cs.unibo.it}. Notice that the HELM server already hosts a copy of the standard library of {\Coq} and of the {\Coq} user contributions. \subsection{Technical informations} \subsubsection{CIC with Explicit Named Substitutions} The exported files are XML encoding of the lambda-terms used by the \Coq\ system. The implementative details of the \Coq\ system are hidden as much as possible, so that the XML DTD is a straightforward encoding of the Calculus of (Co)Inductive Constructions. Nevertheless, there is a feature of the \Coq\ system that can not be hidden in a completely satisfactory way: discharging (see Sect.\ref{Section}). In \Coq\ it is possible to open a section, declare variables and use them in the rest of the section as if they were axiom declarations. Once the section is closed, every definition and theorem in the section is discharged by abstracting it over the section variables. Variable declarations as well as section declarations are entirely dropped. Since we are interested in an XML encoding of definitions and theorems as close as possible to those directly provided the user, we do not want to export discharged forms. Exporting non-discharged theorem and definitions together with theorems that rely on the discharged forms obliges the tools that work on the XML encoding to implement discharging to achieve logical consistency. Moreover, the rendering of the files can be misleading, since hyperlinks can be shown between occurrences of the discharge form of a definition and the non-discharged definition, that are different objects. To overcome the previous limitations, Claudio Sacerdoti Coen developed in his PhD. thesis an extension of CIC, called Calculus of (Co)Inductive Constructions with Explicit Named Substitutions, that is a slight extension of CIC where discharging is not necessary. The DTD of the exported XML files describes constants, inductive types and variables of the Calculus of (Co)Inductive Constructions with Explicit Named Substitutions. The conversion to the new calculus is performed during the exportation phase. The following example shows a very small \Coq\ development together with its version in CIC with Explicit Named Substitutions. \begin{verbatim} # CIC version: # Section S. Variable A : Prop. Definition impl := A -> A. Theorem t : impl. (* uses the undischarged form of impl *) Proof. exact (fun (a:A) => a). Qed. End S. Theorem t' : (impl False). (* uses the discharged form of impl *) Proof. exact (t False). (* uses the discharged form of t *) Qed. \end{verbatim} \begin{verbatim} # Corresponding CIC with Explicit Named Substitutions version: # Section S. Variable A : Prop. Definition impl(A) := A -> A. (* theorems and definitions are explicitly abstracted over the variables. The name is sufficient to completely describe the abstraction *) Theorem t(A) : impl. (* impl where A is not instantiated *) Proof. exact (fun (a:A) => a). Qed. End S. Theorem t'() : impl{False/A}. (* impl where A is instantiated with False Notice that t' does not depend on A *) Proof. exact t{False/A}. (* t where A is instantiated with False *) Qed. \end{verbatim} Further details on the typing and reduction rules of the calculus can be found in Claudio Sacerdoti Coen PhD. dissertation, where the consistency of the calculus is also proved. \subsubsection{The CIC with Explicit Named Substitutions XML DTD} A copy of the DTD can be found in the file ``\verb+cic.dtd+'' in the \verb+plugins/xml+ source directory of \Coq. The following is a very brief overview of the elements described in the DTD. \begin{description} \item[]\texttt{} is the root element of the files that correspond to constant types. \item[]\texttt{} is the root element of the files that correspond to constant bodies. It is used only for closed definitions and theorems (i.e. when no metavariable occurs in the body or type of the constant) \item[]\texttt{} is the root element of the file that correspond to the body of a constant that depends on metavariables (e.g. unfinished proofs) \item[]\texttt{} is the root element of the files that correspond to variables \item[]\texttt{} is the root element of the files that correspond to blocks of mutually defined inductive definitions \end{description} The elements \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++ and \verb++ are used to encode the constructors of CIC. The \verb+sort+ or \verb+type+ attribute of the element, if present, is respectively the sort or the type of the term, that is a sort because of the typing rules of CIC. The element \verb++ correspond to the application of an explicit named substitution to its first argument, that is a reference to a definition or declaration in the environment. All the other elements are just syntactic sugar. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/coqide.png0000640000175000001440000005073110406335323015623 0ustar notinusersPNG  IHDRV/sBIT|d IDATxyw}w14˶$_o6%A 1cv7~KpBېMu$@8B >$#Hmٖ%KF}QcrTgWuwu[:﷾eXe P$ھq@SZn2գ/I$VDb1X564:@$RYn$(f۟$mǹY?[tVj$cm}f0=,wܶ%+7Pܶ׬8E)jRcWoIPx8gk'8{b]z}Ʃ4ZuF_ Pf)->hxؿ Őx*8f쫜 ۺ~5KKO]9=%e52<|T=3O_Ύ<ʲ,%=УѱҬ m%s_қoؠщ k0r;o~n|lK:i³tsf49YdY]3gjŪZl~zbNp:;H3$U?c[vYN9c46HJRRlj i¬<_|W(膷mH$?6O;ߪuk~~u`_}/U.[-ʲ,Ydem{IP oFOg=z|u.P˗-ݫ_K6diO=C,_8bon+wޡÃ}_U,7p8HаzN՚sW>QlJ2* l.6S_:}iC4Y[,z5omТά}2Ee,J%-C匥ih.<[qWJr*CCzpK/~]۟K{ έI׹G~;hV_vaesDZrHOeE~NpA_Ԧ%kkcڻi޷u_o;e@!˲46>,4I*NuźuUoM25VHaXGGWT*46>v,XYk;OS6+fFڱ4dҵŽh'׬4-堎mZҮqtFJɼeCe#cF^QRVEue-=qX?U/[sɞ8xddɐ$Ø:Ci9Z*ꚵP풤Lf*af3ǯ!˒$K=hQ͜%*xqnP(;vpl`zrU}Ǵ :dvhLY:r _s2= %м!Ą&B\!U2%JwGㇵ铛7'I2 )hVLs蟾 W}KR&DfXPy^14o<}n+g{;2:c~zw몫We_^9(ӌC>SWg3fОGmC?Emj˷\{[[UlU*k"T*5Ki4׺J٬|>ڳ2Rv:4Y(Ъ86=9zL{~R[>e R{FvFg:t`Z}'{%TҒvKx1ZyXAZN:zj~ѡ^T1ty+%}7U-в, ѱʷ_kA$C뚕]qY톺gvuvٯQ˖-_~~F[xL׾- j?EbI\uY"J%=)=3Q̒&S j^]wZSu/\6H,eYT@Y̢%˔L\0}֔u` ZpQ#22Q(h|Ǐt`ݛ tm._PʅZ}=cRlӜ^Ozx?ݟkz*އ?/8Ko* Z9&'t@,Y?/esj~nd2SeR-FeN <9,+'Q6тT(W4Y(JۯOSW4w }T.՛Tl.a(i%kjyo<_RQzj&Ѳ3K~}~WgZYwXRY,ekfLs*i, [@V^{@kbeKj+OJfIbIFP*ʿ|JzW*9W^~NfٔZB+r_-7׿#ITe[/e}[ߓ CoB:Zs*'~Gcczn4YU6eJ%SGt+i*KrFض=N˒dZ*[ cj={߰@\[,뵮lmyE%9LwǕ2tlLӔaSCK]zl6.7ޭcRٖ g2.L6OiEJ4;,]ć&u ϼ~9ZGܣ{uoou\l*i*]ЭGx[UY;ڧssLi|fHcWV7\N ^S4n46YT,M,)H/-Bq-Xq&}j۫e7D *cfJiidi2&#bcRG moja}dl-a(hOWs̟}Bfv)mѡΎ׺52-Sf|lF35Sjk S_fUݡ=tvi쌮wTVTVTe_˪,(Ckۻ@ӻ4::Riw_շkTg-UoTWRIҡW?7ޡᓺpu[Od2C石Z咥ׯ) C@\uY9lY ZMƆidxH<Y4uuV2L/6ZZ>]т:&Ƥ]h>ı>)?V߮)oҜӹ.Rd iymoտ\W_~YY}ޫ+NQggGb]vnߥ?)%J%EYBbe2㉚iY_C-]6l>uj˩4Q +߬+.[eK32ͩ=OurQТ-?g;>{ F?#|{rc9e9)ӦLƘ !\3C˗/ՋQ;.5C3;TTǻÕ'Mc/dU4R(!Sk߬+N3ڵlq~֛WG=4OkyQjWN#*'͟+3a]seS\V[ֿIW^qnmDc2MKmy͙3[]jkoKlO|^}d/}M_w6={>?ϟN\Oò VFNX+L <3|>Loy YδLHґXx_U"v-7W~7gwjт1#LF ՜9e'״|> ֬]*2MSlF\.3J墆588c*LU.u}(.p&J'N>pQݯX !MLMNNP(k|Ftdd&uf>|N6\fޟmΧSo$`TWhɚt5uuWV\/ꞥMM9B$ǭŦTy=Ͽ/?دɉ I#oJfIa?~-eahȰ]h _w]pJ;v?O?|pn]jok*X|>+/X+N]})c/Y:utڋSef2SCSϋwbeNlyοsֵW_Ž ݷ;yZah4[-OVyBwjj8"7ò,k B#GwRQ%˲408W^99?'VG_wy]&35eK|ْZԍaӭ9ݍMxbC72hZeYݞzm[xjHzT@ V]` @-YK^q$VhZ׮"2kmm[!FCje'wBbݫ'xA:uչXZZto?ZJTh@utƪ1h$0dYVc8*qI IՉ יlFݳg%PY 4SXbQ22$+d2?0ə dd{E7eF,+5U4$rb5<S_Fl3ޕT{4: JKI"%VoOO=K+.X_{[17Wާnt>wтZ 2-S㲌)(IRFT]f֔UՖnitt\GUUV\n.HH]ܒ3gk}{~]9eX ~jѲE*'2fjsc_Kj{S{$}_hp$RSe’%*kβ^/\VU墬%V4w:7'o6ݔfvW7ͭ0+u@rB%V#|_朌q۴Yxq\F2,͘{_MZ70^6|_8)Y |u..$eHRY?ﻬi:t萲Ţ] Qm\6+9rO.?qڙZ0WYdqBlNaddf=Q IDATLe;F}hkb奒l+nӦ+c9eJeijŊ˿Kzaʥ Ud+L{vS;)h^-RiǽeP?_Yʕ+4cf͘bl6'SSؒUid%j)]O~+Nŋ$RmW8z^Yaq.^emK[l~ L퇍cdJfY\N}}}˲4<<,YR^VUZ'gcmOmk^:9˭[KW04[d&VBQO=[&e39M&?zDqilbT҄fY%eJ4w^ؽO#kb]m8˰W~8Z/Zpk[\QqͯFweǽ6Ldљ콟S\{ eK9R6֭B̥gXAg*pvaJ"1]*4+IR]: V^;|w[.L9~˄)>+qKCJJ{fFLJ4Y.htbD?ZU d}^&kPFϞP[kP JV-PnvU}(KBYE.f42gtm TR.c4MYeS#Euwhv|EIJ,p0]R Brܒ^^`bf4w=;[lV NhlrD##,He+7Kg/SoR=gv,FjUF 45xԷ[׮;_/QGGLȰFF{'h`hVZR)vvk$Xǰ@@}C&hٳtv?{EG508QO8YQ243׭%sNUoRƊzfs醷hi խk z嶂! lTՊMoԿ}#ݕ׼Y3te,T˘2 '&V'iUY>W9β{mm@X_! ۶ՓO\u>z,>;8vL^-]X]35w}Uy"mX^_'tފyo>oZٛ_tnܶa[v.]z'L )zN{ztEoh#/ݩޡ4sFǴ'T<_}(3sRb9gI< PPRDYaz EN$KtMޗ_~vP=*Yp[:;uͿY߽_Py#?XhvHSGtkW7s>fKJ/[˖hŊSi1Po.򮷩P(\._0k@ k!a' VI% 0U}yV$X@L$VꮕZ$=V:W:Q8ƺs<5eIwSY!>QãWv.*L9Qʫ5a&{WCTD)*%γLZ%M-/L<պEϭ墲Ͱ1;]ki^۪Kk#)~ez^QcZ?d|"'V+g%YIwγzU9&iJjZݦ'yyqəPT`Ts帕܏j_K^1W^ٷ/[ۭ;W;5 I]Ԭס_ôk{3P@s =xE*nE]%zIzmoZze].(yHb;TmBiIrSH➤k,lqׂ~UK+C'7WsR~'E9UnO sۖޫ׮׮[zzw]7| yA#bTxUM~Xwlf׋2]5x L߯ "eǠ”E5FR­(ׯu%jسq߽wInܶKinIJK-`uH]۷{'0V"تeG3oHǰvҘXY,ƭE,tXխ+ Z[5$+xHxvϫ] >xV/7TZO1Wcj)򨀵Ҫ]qjݥh5iV*(p;@4eb71h.A O5eb$ Mr(^;Ae~۾)ǹno Z.2%$wWSfO.2nܖ`8%n9uܦ%Xn9>$W@u-AWR%cp ( 9nIZHUbtT+sG<9Ĩn^xSvn%Q&کKbU{Z]-p.u[YADIB*n۴ok SعmUyzzھU׮}ޥzdcF'޻$Iwyvnr֥W\4k}DW@h$+)UNWt-Vu@k{Ufr%0Ht]"\> |r~6hAϬJ'<*N9ZWӵX%/Qr&B (U5ɑ="P1m(-VuHM*JrDBOS%V$mT@+}Z%>Ikd h=MXI͗4[iX@=@iʮc4:4ƞ + TQʨK$:Y C+'Z':^'sQѹQKZnͫyu3&a$u}ZT{(ܶ[\GM+y?jY-Cz9+Yq6=h[^F9I$1_ӵXUdQ׉ZѮJ(lyQ{VMRTCRк.ܻ5S`I&UIW/a[ؗf:.hKV٭j%ђQǫ^jДX5Hv{^Ѽc uWYIKq@5]ܨx^ $JDYΘE{ₒ0V-/I0Fkc^MX YvJ[=XvJު$lk kʮ&uO5ػq =khOZњ>F=V^L8)5r>dP-n] trTeZ2bKZQݺub>ϸPuo򺧪2~4Qzp^ɏF) &+ɷ+ƞ-VDb1X@L$VDb1X@L$VDb1X@L$VSo=Wp=@h-biF 8S}1fӉobiTE [8'n͍em5 ʭY,f3i-P{XUT*~Io_֫[y(+0r&'HԜz{aw,֚e^q;cr{FRXk#g@+Xq8٧+AY8v[. p5D:ϸѸeEwJ_[ asl{c&u%hVUZ*fQbWL^Fw 7|xU,A}Uym7jYx]"-VI%mʯUYիŪ^I']j#RbUWM'Gnk 6b?*jA2Ia1׊/Mׂ30o@ uJZZ4wjXݏuR{F+f~m Ws`iIp{jSU:aʲmyIzmعmUyzzھU׮}ޥzPSA-L/αس1h.Iwޭ۶4uן0 ھn;*MZ *NH0y }?T+`hq :Z &+ b"H &+ b"H &+ b"H {S8Ђ6llt@]nx&meS@Sm4ͺ+ubt|j-RL+^ɔ]%t&)`XeO&ZCq&$^ 3 *>߾ߵA%fch|iT9iXyUg>2aֱ'eWʨLsG_ac:^ @n=VJ&+/(As+kj 6e56o Z]$yQD= OwUĪ)d0Pu -h~Ԥ$h[xĉeM#c-VkV^^{n^v7"a-T38#v٠vZj1*ƞU v{$;m[N_޺O@RvmzoPP +I!4ZPbUЪH c4:-nfаjMu2ר!2)a?W<4t Tm|¿F FLjh:4|J[HJS%VQE6ZR𠖹Ԍ9/֚0?@-70Rۭ-jjjʏW W#Jj 紹؄.LĤbl@M5*UVvߣZ^^ĉ*1כ6Ɠ9MJRFZ~t*s@Xm[ʋkkZv. [f0a SAv{^$uLS\OIӨ0JIWRUk5 1f)ڹ\{6{$qڹmIV.]۷{'U1'-]jk͂ &AUC[h~4cfm y^+^\h&i9*ƞf6Zokv:4Ċ{ ,0r&/Nb6@ Xm5!)tjU-krnY2I4J"UUK:% 0pQ kH-Vaт`: X4Dzgf6ݤMs7U.9V^#-㶜[T2nҚB=*e*+JseZu  b G@ƵXԿ4HKRX%o˖Uۄ0!K* J&+O&\blrKVUV%O2~)˃ںe@O*I5+kru;2{JBe ɔ5tJn -Ѩ6aH۩l˯+(9sHJ4H$V Iܺυjkw'] P kk%V:k7>Ѓ=9r^]گ0 *I=Yrxݮ[f݆Fϱ ;k0N"LQQ]&ʾnu` I m+ZL! عmKCĊ|bUk$omp$L ²& &< tHinT@R+iBDbh:׮?>i@bhJgH+@! W!xIDATW҄+ t6$V}V4!4*@0x)1x MhHM+iCbhJgH+@! WW҄+ t6$V}V4!4*@0x)1x MhHM+iCbhJgH+@! WW҄+ b"H &+ b"H &+ b"H &+ b"H &+ b"H &+ b"H &+ b"H &+)]۷6:4B@[vV]itEbvmߪ56b0X49* ܶ%5Ӏ ВAH Ð$YHWهfޗKbF*@8i|z&,]k)h-]&\{s;qUgz$y.z^ku=XZV Eәyo%Oh $Vi%ĭHe+4/if0ۈnb SNxTs Z(1G9~^Ӽʪ{Rm>=L7-՞S2QEؘ {%u=' 0mT*s/Uewe*Qb v߃^=faʉOjaԘ6xݺRyu++{ 5G9_Aq^{)jr9-ϣ0e=_rݒsRxUxqYƫn~UunǏ_~ԶD<jcNB4hy?Iu;mE=_lM۩zQ?j-xu= ~/iIV$!I[N FTp :HP?$Vi]ZY=UڒfwԞ[wjiDs+ 1fEoiV3S-0qx% WǸIJ8]c!{=[_۪q в*_~_nxL.=aI};"7;U։])mo)'N^qiWeU@DY&~\- c蕜Tssg[tA)S˛GI Il:D%m H;+R&M$mOp|h @TWeUSN[ʒ/7 `_~atxָ&|zpzՓϗn%ϖn= VGfZ/ZjfZ_֮` җUyv`Qi櫯] NS,n>+֬ nmf}s}{v56IĪH -v^o~SSPms8wÛ;ԩ=M>k=5S>CaUZ1Ǣ\-҆Wv}smX#C_IߨICCulwۏ;UΘו ^c6|(wqjr\U?_I}OG7SOS7<~mlaI͒9@D+J R*g vs_2R6WϾ6]ųi>Q7}1>} VCSJv7qU5?눤Ʀ+)+U-YDZDOm3m?okFi;+1S>%KFJ_GrTC;$w-9XIi̛t[{9Gƴ\{cm.KOKJuKLڪ9}׾v>KmǬOrh3&|US1Z22eL},U_rKvY}'>CwmԷPی-ڹ.CwVJNxD^^]ZR*953[Rl>jo+xT!j>_UNsJO%U%u*1nc2wvkΝKI8w}g͓ϳϝoc;첬YHK/X '>e+B՚rS Oj* 妴=7h]R չTmYإKS^Bw*\#2h[g(Tծ}T78Ǵᒎvj( ?6-]R342_,(YvSreD=^o9}* UW7wEZ㩎N졟kvW֘cQ-VP߽}ڍ0ӸhƄiO^PKX`Ԯn ^gaÿٿM UFLj@MT(#`* VcB6VSBv\6MӜNTxy=LǞw1|IENDB`coq-8.4pl2/doc/refman/headers.hva0000640000175000001440000000271710774445200015771 0ustar notinusers%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % File headers.hva % Hevea version of headers.sty %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Commands for indexes %%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{index} \makeindex \newindex{tactic}{tacidx}{tacind}{% \protect\addcontentsline{toc}{chapter}{Tactics Index}Tactics Index} \newindex{command}{comidx}{comind}{% \protect\addcontentsline{toc}{chapter}{Vernacular Commands Index}% Vernacular Commands Index} \newindex{error}{erridx}{errind}{% \protect\addcontentsline{toc}{chapter}{Index of Error Messages}Index of Error Messages} \renewindex{default}{idx}{ind}{% \protect\addcontentsline{toc}{chapter}{Global Index}% Global Index} \newcommand{\tacindex}[1]{% \index{#1@\texttt{#1}}\index[tactic]{#1@\texttt{#1}}} \newcommand{\comindex}[1]{% \index{#1@\texttt{#1}}\index[command]{#1@\texttt{#1}}} \newcommand{\errindex}[1]{\texttt{#1}\index[error]{#1}} \newcommand{\errindexbis}[2]{\texttt{#1}\index[error]{#2}} \newcommand{\ttindex}[1]{\index{#1@\texttt{#1}}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % For the Addendum table of contents %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\aauthor}[1]{{\LARGE \bf #1} \bigskip} % 3 \bigskip's that were here originally % may be good for LaTeX but too much for HTML \newcommand{\atableofcontents}{} \newcommand{\achapter}[1]{\chapter{#1}} \newcommand{\asection}{\section} \newcommand{\asubsection}{\subsection} \newcommand{\asubsubsection}{\subsubsection} coq-8.4pl2/doc/refman/coqide.eps0000640000175000001440000046601511441213141015625 0ustar notinusers%!PS-Adobe-3.0 EPSF-3.0 %%Creator: GIMP PostScript file plugin V 1,17 by Peter Kirchgessner %%Title: coqide1.eps %%CreationDate: Mon Aug 30 15:31:11 2010 %%DocumentData: Clean7Bit %%LanguageLevel: 2 %%Pages: 1 %%BoundingBox: 14 14 869 550 %%EndComments %%BeginProlog % Use own dictionary to avoid conflicts 10 dict begin %%EndProlog %%Page: 1 1 % Translate for offset 14.173228346456694 14.173228346456694 translate % Translate to begin of first scanline 0 534.99605377196701 translate 853.99370078740151 -534.99605377196701 scale % Image geometry 854 535 8 % Transformation matrix [ 854 0 0 535 0 0 ] % Strings to hold RGB-samples per scanline /rstr 854 string def /gstr 854 string def /bstr 854 string def {currentfile /ASCII85Decode filter /RunLengthDecode filter rstr readstring pop} {currentfile /ASCII85Decode filter /RunLengthDecode filter gstr readstring pop} {currentfile /ASCII85Decode filter /RunLengthDecode filter bstr readstring pop} true 3 %%BeginData: 157701 ASCII Bytes colorimage JcC<$JcC<$JcC<$X8d\~> JcC<$JcC<$JcC<$X8d\~> JcC<$JcC<$JcC<$X8d\~> !<7W$J_#D'J_#D'J_$sS!!%N~> !<7VpJ^&bjJ^&bjJ^(=A!!%N~> !<7V^J\-KFJ\-KFJ\/%r!!%N~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'XO[@OJ_#D'K[p0s!.Y~> !<7VpJ^&bjXN^_FJ^&bjKZsO]!.Y~> !<7V^J\-KFXLeH4J\-KFKY%80!.Y~> !<;H;!!)oIrW(O#rrDrI!!(!h!!(m,!!&)2!!'FXrW(!ir;ba+!!'@V!!)oIrW(R$!!)K=rrBjc rW%N#hpqh$!.Y~> !<;H2!!)o@rW(NorrDr@!!(!_!!(m#!!&))!!'FOrW(!`r;ba"!!'@M!!)o@rW(Qp!!)K4rrBjZ rW%Mohou1c!.Y~> !<;Gu!!)o.rW(N]rrDr.!!(!M!!(lf!!&(l!!'F=rW(!Nr;b`e!!'@;!!)o.rW(Q^!!)K"rrBjH rW%M]hn&o6!.Y~> !<;lGq>gHE!!)iG!!)'1q>gBC!!)oI!!)oI!!)?9rW)uLrW)67!!)97!!)cE!!(a(q>fU-q>fm5 !!)cE!!(a(q>fL*!!)T@!!([&!!)uK!!'IYrVuuM!9)K/!;Y1G!9D`1!<1RK!<(IK!:JD !<;l>q>gH!!)'(q>gB:!!)o@!!)o@!!)?0rW)uCrW)6.!!)9.!!)cfU$q>fm, !!)cfL!!!)T7!!(Zr!!)uB!!'IPrVuuD!9)0&!;Xk>!9DE(!<17B!<(.B!:J)3!7]9m !<:=C!;"G8!.iDPblO".J,~> !<;l,q>gH*!!)i,!!)&kq>gB(!!)o.!!)o.!!)>srW)u1rW)5q!!)8q!!)c*!!(`bq>fTgq>flo !!)c*!!(`bq>fKd!!)T%!!(Z`!!)u0!!'I>rVuu2!9(Ni!;X5,!9Cck!<0V0!<'M0!:IH!!7\X[ !<9\1!;!f&!.hc>])d-VJ,~> !<;iF!!)uK!!)WA!!)$0!!)uK!!)lH!!)ZB!!)<8rrE&L!!(Bs!!(?r"p!6*!7o^$jOOM8f%'iP ec>'f!!(?r"p!6*!7o^$iRRu/o%!d@g"$-'qpkiLec5^Lf)GaJf)>[Jf)GaMf)GaKf)>[Gf)>[I f),O8ec5^Lf)Ga&ec5^1ec5^Jec5^6ec5^!ec5^Kec5^@ec5]$f&,$ZTE'K~> !<;i=!!)uB!!)W8!!)$'!!)uB!!)l?!!)Z9!!)c2I_@ c27S/bl@bCc2Rdrbl@b(bl@bAbl@b-bl@ambl@bBbl@b7bl@`pc/6bHP5p+~> !<;i+!!)u0!!)W&!!)#j!!)u0!!)l-!!)Z'!!);rrrE&1!!(BX!!(?W"ou9I!5#iCjLYTr]=S'T ])_30!!(?W"ou9I!5#iCiO]'io"+l%ft.4aqmuq1])Vj1]Dhm/]D_g/]Dhm2]Dhm0]D_g,]D_g. ]DMZr])Vj1]Dhl`])Vik])Vj/])Vip])Vi[])Vj0])Vj%])Vh^]AL4$GQ<6~> !<;iF!!)uK!!*#LrW)cF!!)lHrW)<9!!)uK!!*#LrVuuM!<(LJ!<(LG!:A>@!7h/$ec>["r;ciJ rW)uLrW)rKrW)iHrW!#N!!*#Lr;ciJqZ-QFrW)iHr;ciJrr<&MrW)?:&-1;4!7o^$f%'g$f%'g$ f%0g#s47/Lp=9EJf%'iPec>^#r;c`GrW!#N!7q#Ir7:iIqUYWG!S.8Mf),O9ecl-*ec=:P!<(LJ !<1RL!"%3V!7h,Mec5^$rW)fG!!)oIr;ciJqZ-NErW)lIquH3:!!)oI!s$p'!<(IK!<(IK!<1OL !<(IP!7h/$ec>X!!!)oI!!*#L!!*#L!!)uK!!)H[I f)>[Fec5^Hf)>[:ecl-*ec=:P!<1RK!<(LK!!(UL!;tFI!!1XNrRUoIrmq&Krmq&Ks47)Jma_@< rRLrKr7:iIqUPWHrRUuK!7q,LJ_'):!MBFC~> !<;i=!!)uB!!*#CrW)c=!!)l?rW)<0!!)uB!!*#CrVuuD!<(1A!<(1>!:A#7!6kMgblI^er;ciA rW)uCrW)rBrW)i?rW!#E!!*#Cr;ciAqZ-Q=rW)i?r;ciArr<&DrW)?1&-0u"!6ragc-64gc-64g c-?4fs3:NCp<rW!#E!6tB@r6>3@qT]!>!R1WDc27S0bm"0mblH#5!<(1A !<17C!"$mM!6kKDbl@agrW)f>!!)o@r;ciAqZ-N3@qTT!?rQY?B!6tKCJ^*H(!L!M6~> !<;i+!!)u0!!*#1rW)c+!!)l-rW);s!!)u0!!*#1rVuu2!<'P/!<'P,!:@B%!4r6C])_fAr;ci/ rW)u1rW)r0rW)i-rW!#3!!*#1r;ci/qZ-Q+rW)i-r;ci/rr<&2rW)>t&-0>S!5#iC]=S%C]=S%C ]=\%Bs1A71p:CM/]=S'T])_iBr;c`,rW!#3!5&+.r4Dq.qRc_,!P8@2]DMZs]*88I])]IT!<'P/ !<0V1!"$7;!4r42])ViCrW)f,!!)o.r;ci/qZ-N*rW)l.quH2t!!)o.!s#sF!<'M0!<'M0!<0S1 !<'M5!4r6C])_c@!!)o.!!*#1!!*#1!!)u0!!)H!!!)r/!!*#1r;ci/rr !<;iF!s$p'!;b7H!;Y1G!;k=I!<1OL!:A>>!7o^$rRLrKrmq)Lqpk`Iqpk`IlIH+=ec=:P!<1OL !<(IK!<1OL!<(IK!;k=I!;tCJ!<1RL!<1OL!<(IK!<(IK!;G%E!;tCJ!<(IK!<1RL!<1OL!:/29 !;k=Q!7h/$ec=:P!<(IK!:n\@!;tCJ!<(IK!<(IK!<1RL!<1OL!;G%E!;k=I!<1RL!<:UM!<(IK !:/29!;k=I!<1OL!<1RL!!q-U!7h/$ec5^Lec5^Iec5^Jec5^Kec5^Kec5^Gec5^Lec5^Lec5^K ec5^=ec5^IecPp'ec>["!!*#L!!)rJ!!*#LrrDiF!!)rJ!!)rJ!s$p'!9r&7!;Y1G!<(IK!<1RL !!q-U!7h/$ec5^Lec5^Iec5^Gec5^Iec5^Lec5^ !<;i=!s$Tj!;aq?!;Xk>!;k"@!<14C!:A#5!6ragrQPbl@bCbl@bCbl@bB bl@b4bl@b@bl[sjblI^e!!*#C!!)rA!!*#CrrDi=!!)rA!!)rA!s$Tj!9q`.!;Xk>!<(.B!<17C !!pgL!6kMgbl@bCbl@b@bl@b>bl@b@bl@bCbl@b3bm"0mblH#5!;t(A!;t+A!<14C!<14C!<17C !<14C!<(.B!<14C!;t(D!6ragrQP6ArlkECJ^*K)!L!M6~> !<;i+!s#sF!;a;-!;X5,!;jA.!<0S1!:@B#!5#iCrOW%0rk&11qmuh.qmuh.lFR3"])]IT!<0S1 !<'M0!<0S1!<'M0!;jA.!;sG/!<0V1!<0S1!<'M0!<'M0!;F)*!;sG/!<'M0!<0V1!<0S1!:.5s !;jA6!4r6C])]IT!<'M0!:m`%!;sG/!<'M0!<'M0!<0V1!<0S1!;F)*!;jA.!<0V1!<9Y2!<'M0 !:.5s!;jA.!<0S1!<0V1!!p1:!4r6C])Vj1])Vj.])Vj/])Vj0])Vj0])Vj,])Vj1])Vj1])Vj0 ])Vj"])Vj.])r&F])_fA!!*#1!!)r/!!*#1rrDi+!!)r/!!)r/!s#sF!9q)q!;X5,!<'M0!<0V1 !!p1:!4r6C])Vj1])Vj.])Vj,])Vj.])Vj1])Vj!]*88I])]IT!;sG/!;sJ/!<0S1!<0S1!<0V1 !<0S1!<'M0!<0S1!;sG2!5#iCrOW%0m^iH!rOW%0rOW%0rjr.1qmuh.r4Dt/rjr.1J\13Z!I+Tp~> !<;iFr;c]F!!)iG!!)rJ!!)rJ!!)H["!!)?9!!)oIrrDrI!!)uK!!)T@ !!)cE!!*#L!!)rJ!!*#L!!)cE!!)rJ!!)rJ!s$p'!9Vi4!;tCJ!;tCV!7o^$f%'iPec=:P!;tCJ !;tCJ!;G%E!<(IK!;b7H!;tCM!7o^$lIGq8qpkiLf%'j"ec5^Lf(oCGec5^Eec5^Jf(oCGf)5U9 ec5^Hec5^IedMQ0ec=:P!7o^$f%'j!ec5^Jec5^Gec5^Jec5^Jec5^=ecl-*!7h/$!;tCJ!;tCJ !<(IN!7o^$r71rMf%'iuedMQ0ec=:P!7o^$f%'icf)#IHec5^Jec5^Jec5^Jec5^Jec5]$f'Clf TE'K~> !<;i=r;c]=!!)i>!!)rA!!)rA!!)H3r;ciA!!)rA!!)o@!!)o@!!)bl@bCbl@bB bl@b@bl@bBbl@bAbl@b>bl@bBbl@bbl@bc2@Y0 bl@b?bl@b@bmXTsblH#5!6ragc-67dbl@bAbl@b>bl@bAbl@bAbl@b4bm"0m!6kMg!;t(A!;t(A !<(.E!6ragr65 !<;i+r;c]+!!)i,!!)r/!!)r/!!)H!r;ci/!!)r/!!)o.!!)o.!!);r"TZ0H!4r7,])Vj1])Vj0 ])Vj.])Vj0])Vj/])Vj,])Vj0])Vj*])Vj0])Vj.])r&F])_fA!!)>s!!)o.rrDr.!!)u0!!)T% !!)c*!!*#1!!)r/!!*#1!!)c*!!)r/!!)r/!s#sF!9Uln!;sG/!;sG;!5#iC]=S'T])]IT!;sG/ !;sG/!;F)*!<'M0!;a;-!;sG2!5#iClFR#rqmuq1]=S(A])Vj1]D;O,])Vj*])Vj/]D;O,]DV`s ])Vj-])Vj.]*n\O])]IT!5#iC]=S(@])Vj/])Vj,])Vj/])Vj/])Vj"]*88I!4r6C!;sG/!;sG/ !<'M3!5#iCr4<%2]=S(?]*n\O])]IT!5#iC]=S(-]DDU-])Vj/])Vj/])Vj/])Vj/])Vh^]Bd'0 GQ<6~> !<;iF!s$p'!;b7H!;Y1G!;tFE!:JD?!7o^$rmh&Lr71iJqpk`Iqpk`IlIH+=f%'iP!<(LH!<(IN !7o^$qUPWHrRLrKr71iJrRUlHrRLrKpXT6k !!)oIquH`I!!)fF!!)cE!!)rJ!!)fFr;c-6!!)rJq>gQH#lrQ-!7o^$f%'j!ec5^Jec5^If),OH ec5^Hf(oCGf)5U:ec5^IecPp'ec>["!!*#L!!)fF!!)cE!!)rJ!!)ZB!!)HX!!!)rJ!!)iG!!)rJq>g!8"p!3Sec=8$r71iJr71iJrRM&Nf%'j!ecPp'ec>Tu #lrQ-!7o^$f%'j#f)5U9ec5^Kec5^Lf(oCEec5^Jec5^Jec5]$f'ClfTE'K~> !<;i=!s$Tj!;aq?!;Xk>!;t+bl[sjblI:Y !!)o@quH`@!!)f=!!)cgQ?#lr5p!6ragc-67dbl@bAbl@b@c27S? bl@b?c2%G>c2@Y1bl@b@bl[sjblI^e!!*#C!!)f=!!)c!!)rAq>g!/"oumJblGugr653Ar653ArQPEEc-67dbl[sjblIXc #lr5p!6ragc-67fc2@Y0bl@bBbl@bCc2%G !<;i+!s#sF!;a;-!;X5,!;sJ*!:IH$!5#iCrjr.1r4;q/qmuh.qmuh.lFR3"]=S'T!<'P-!<'M3 !5#iCqRZ_-rOW%0r4;q/rO_t-rOW%0pU^D*rOW%0qmuq1]=S(A])Vis])Vj.])Vj,])r&F])_B5 !!)o.quH`.!!)f+!!)c*!!)r/!!)f+r;c,p!!)r/q>gQ-#lqTL!5#iC]=S(@])Vj/])Vj.]DM[- ])Vj-]D;O,]DV`t])Vj.])r&F])_fA!!*#1!!)f+!!)c*!!)r/!!)Z'!!)H!!!)l-!!)o.$imoO !5#iC]=S'T])_c@!!)r/!!)i,!!)r/q>fur"ou78])]GCr4;q/r4;q/rOW.3]=S(@])r&F])_`? #lqTL!5#iC]=S(B]DV`s])Vj0])Vj1]D;O*])Vj/])Vj/])Vh^]Bd'0GQ<6~> !<;iF!!)cE!!)iG!!)rJ!!)66!!)uK!W^dOr71iJqpk`Iqpk`IlIGq8rmq)Lrmh&LrRLrKrRM&N f%'itec5^Kec5^Jec5^Lec5^Kec5^Kec5^Eec5^Kec5^IecPp'ec>["!!)?9!!)oI!!)iG!s$p' !:eV?!;tCJ!<(IK!<1OL!;P+F!;G%E!;tCJ!;+hB!:/29!;tCJ!;Y1P!7o^$f%'iPec>X!!!)rJ !!)rJ!!)uK!!)uK!!)lH!!)ZB!!)H !<;i=!!)c!!)rA!!)6-!!)uB!W^IFr653Aqoo*@qoo*@lHK;/rltHCrlkECrQP!s$Tj !:e;6!;t(A!<(.B!<14C!;Oe=!;F_!;t(A!9hZ-!<14C!;k"@ !;t(A!<(.E!6ragr65bl@b3bl@bBbl@bCbl@b;bl@bAbl@bAbl@`p c0NUTP5p+~> !<;i+!!)c*!!)i,!!)r/!!)5p!!)u0!W]h4r4;q/qmuh.qmuh.lFR#rrk&11rjr.1rOW%0rOW.3 ]=S(>])Vj0])Vj/])Vj1])Vj0])Vj0])Vj*])Vj0])Vj.])r&F])_fA!!)>s!!)o.!!)i,!s#sF !:dZ$!;sG/!<'M0!<0S1!;O/+!;F)*!;sG/!;*l'!:.5s!;sG/!;X55!5#iC]=S'T])_c@!!)r/ !!)r/!!)u0!!)u0!!)l-!!)Z'!!)H!!!)u0!!*#1!!*#1rrE#0!!)u0!!*#1!!)c*!!)o.!!)u0 !s#sF!<'M0!:IH!!;a;-!;jA:!5#iC]=S'T])]IT!;sG/!;sG/!;X5,!;sG/!9h#p!<0S1!;jA. !;sG/!<'M3!5#iCr4<%2]=S(?])Vj1]*&,G!4r7,])Vj!])Vj0])Vj1])Vj)])Vj/])Vj/])Vh^ ]Bd'0GQ<6~> !<;iF!!)cE!!)iG!!)oI!!)uK!!)H!;tCJ!<(IK!<(IK!<(IK!<1OL!<1OL!;k=I!;k=I!<(IN!7o^$rRLrKldc%9 qpk`IrRMDXf%'iPec=:P!7o^$!<1OL!;k=I!;tCJ!<(IK!<(IK!<1OL!<(IK!<(IN!7o^$rRLrK mFM49r7:iI!nI>OrRUoIrmpuIr7:cGr7:fHrmpuIm+).:rRM&Nf%'j"ec5^Led;E.ec=:P!7o^$ !<1OL!;k=I!;Y1G!;k=I!<(IK!:JD !<;i=!!)c!!)o@!!)uB!!)H3!!)uB!s$Tj!<17C!;k"@!;k"@!<14C!:J)3!<17C!<14C !<(.B!;t+A!;Xk>!;t(A!<17C!<14C!<(.B!<(.B!<14C!;k"@!;t(A!<(.B!<14C!<(.B!:.l0 !;k"@!;Oh=!:\55!;t(A!<(.B!<(.B!<(.B!<14C!<14C!;k"@!;k"@!<(.E!6ragrQP3@!mL]FrQY9@rlt?@r6>->r6>0?rlt?@m*,M1rQPEEc-67ebl@bCbmFHqblH#5!6rag !<14C!;k"@!;Xk>!;k"@!<(.B!:J)3!<14C!;k"@!;t(A!<(.B!<14C!<17C!<14C!<(.B!<(1B !!:CF!<14C!<(.B!:J)3!<(.B!<(.B!<(.B!;t(A!;t+A!<14C!.iD[blO".J,~> !<;i+!!)c*!!)i,!!)o.!!)u0!!)H!!!)u0!s#sF!<0V1!;jA.!;jA.!<0S1!:IH!!<0V1!<0S1 !<'M0!;sJ/!;X5,!;sG/!<0V1!<0S1!<'M0!<'M0!<0S1!;jA.!;sG/!<'M0!<0S1!<'M0!:.5s !;jA.!;O2+!:[T#!;sG/!<'M0!<'M0!<'M0!<0S1!<0S1!;jA.!;jA.!<'M3!5#iCrOW%0lam,s qmuh.rOWL=]=S'T])]IT!5#iC!<0S1!;jA.!;sG/!<'M0!<'M0!<0S1!<'M0!<'M3!5#iCrOW%0 mCW;sr4Dq.!kSF4rO`".rk&(.r4Dk,r4Dn-rk&(.m(35trOW.3]=S(A])Vj1]*\PM])]IT!5#iC !<0S1!;jA.!;X5,!;jA.!<'M0!:IH!!<0S1!;jA.!;sG/!<'M0!<0S1!<0V1!<0S1!<'M0!<'P0 !!9b4!<0S1!<'M0!:IH!!<'M0!<'M0!<'M0!;sG/!;sJ/!<0S1!.hcI])d-VJ,~> !<;lGr;c`GquH]HquHZGr;c9:q>gNGrW!#N!!*#LquHZGrW)B;rW)rK!!)uKr;ZlL!<(LK!;k@F !<(LJ!!1XNrRUoI!S.8Lf)>[If),OHf)5UJf)>[Lf)>[;f),OIf),OEec5^@f),OHf)5RLec>[" r;ccHrW)lIquHZGr;ciJquH*7quHZGr;clKrW!5T!!(RM!7h/$rW)lIquH]Hr;ZlL!<(LJ!;k@G !<1RI!:8;:!<1OL!4UPX!<(LI!<1RK!!q-U!7h,Mec=;#!;tFG!<(LH!;tFH!:A>;!<1OL!<(LH !<:XL!<:XL!<1RK!!:^O!<1RJ!;tCJ!<1OL!<1RI!:JG;!<:XL!<(LI!<(LH!<1OM!7q,LJ_'): !MBFC~> !<;l>r;c`>quH]?quHZ>r;c91q>gN>rW!#E!!*#CquHZ>rW)B2rW)rB!!)uBr;ZlC!<(1B!;k%= !<(1A!!1=ErQY9@!R1WCc2I_@c27S?c2@YAc2I_Cc2I_2c27S@c27Sr;ciAquH*.quHZ>r;clBrW!5K!!(7D!6kMgrW)l@quH]?r;ZlC!<(1A!;k%> !<17@!:7u1!<14C!4U5O!<(1@!<17B!!pgL!6kKDblH#f!;t+>!<(1?!;t+?!:A#2!<14C!<(1? !<:=C!<:=C!<17B!!:CF!<17A!;t(A!<14C!<17@!:J,2!<:=C!<(1@!<(1?!<14D!6tKCJ^*H( !L!M6~> !<;l,r;c`,quH]-quHZ,r;c8tq>gN,rW!#3!!*#1quHZ,rW)AurW)r0!!)u0r;Zl1!<'P0!;jD+ !<'P/!!0\3rO`".!P8@1]D_g.]DM[-]DVa/]D_g1]D_fu]DM[.]DM[*])Vj%]DM[-]DV^1])_fA r;cc-rW)l.quHZ,r;ci/quH)qquHZ,r;cl0rW!59!!'V2!4r6CrW)l.quH]-r;Zl1!<'P/!;jD, !<0V.!:7>t!<0S1!4TT=!<'P.!<0V0!!p1:!4r42])]JB!;sJ,!<'P-!;sJ-!:@Au!<0S1!<'P- !<9\1!<9\1!<0V0!!9b4!<0V/!;sG/!<0S1!<0V.!:IJu!<9\1!<'P.!<'P-!<0S2!5&41J\10Y !I+Tp~> !<7W$kgf_6Xk!IPLt2P+][d)`f)5T>ec5]$f&YB^!.i__ecDEDJ,~> !<7Vpkfj)-Xj$hGLs5o"]ZgHWc2@X5bl@`pc/d+L!.iDVblO".J,~> !<7V^kdpfpXh+Q5Lq !<;oHp]/mrp]/mrp]0:(!!'jdp]1! !<;o?p]/mip]/mip]09t!!'j[p]1!3!!&5-o`0Qfm`kM,m`b_3ccu1a]Zp3Om`b_3J^*<$!L!M6~> !<;o-p]/mWp]/mWp]09b!!'jIp]1!!!!&4po`0QTm^r5om^iH!cb&oO]Y!q=m^iH!J\1$U!I+Tp~> !<7W$mFM49Xk*FNMUq_+^Xi/[T[s&AJ_'#8r;_E"kLK[,!.Y~> !<7VpmEPS0Xj-eEMTu)"^WlNRT[!E8J^*B&r;_DnkKO$k!.Y~> !<7V^mCW;sXh4N3MS&fe^Us7@TY(.&J\1*Wr;_D\kIUb>!.Y~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !WU`%TRibeTRibeTRic=TE"tB~> !WUDqPC\pKPC\pKPC\q#P5kT5~> !WTc_G_(*jG_(*jG_(+BGQ7_o~> !<7TMJH16$JH16$JH2bOJ,~> !<7TMJH16$JH16$JH2bOJ,~> !<7TMJH16$JH16$JH2bOJ,~> JcC<$JcC<$JcC<$X8d\~> JcC<$JcC<$JcC<$X8d\~> JcC<$JcC<$JcC<$X8d\~> !<7W$J_#D'J_#D'J_$sS!!%N~> !<7VpJ^&bjJ^&bjJ^(=A!!%N~> !<7V^J\-KFJ\-KFJ\/%r!!%N~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !<;iF$.R51XKAV+\BNI(ec=7&ed9mN] !<;i=$-U8mUS=KXYf"YcblGtibmDV3[&TstVn:!nJ^&bjJ^&bjJ^*l4!L!M6~> !<;i+$+dI;Q'.;qTsD82])]FE]*Z'SVP'HIR] !<;oH%Frn7OD@p,@8gWnYL(hEecMJje`t]dS^dYi!7gGe!4DH]!3#A;!7(G'!57dN$e2i"eE#]C ]T]/6eUc;&eq)D'eq)EGecDEDJ,~> !<;o?%EuqqI7c+G0.K;6VT6p*blX !<;o-"hM16Cc373(^)R-QaXKM])mo!]'?m.M7W\u!4qO/!1rh4!0cl`!4;TP!2](p$bE1?g$S:j `fQ:e\q.JE]7ISF]7ITf])d-VJ,~> !<;rI&(T$lKos4'f@%dBIs6jte^)eLUKM`bea(cj(Q6Yjf%&= !<;r@&'W(R>=`"2A7/J'/kU`9bg4N:S6p9Ubj3LX(PToRc-4E!blVZOCAhcF>5V.s5st+c#du!q AQ_r.-A1a2%Eu0#oChY:j5/COPJbUqc%4,jc%4,jc1fH`P5p+~> !<;r.&%]3"5pRs7/Lr"T$8Zk5]$ItkOB6)%]'Hs4'md="]=PT@])lG4@f9U5:&I?Z2*LE?#c8h[ >YdmZ+*d4c%D&C[q"j^UnaG\1LU"FA]7ISF]7ISF]D&o !<;uJ&_56gK#?R+h:^6"`P/0kL !<;uA&^8:J7Ug4E>#J*g7moBtI)jJ$blGEQ9)tLIbm'2aZTN*EbjN^\bf^i9J]G3Qr_!9>Z16'? CQ-[>blO`"rJ:Pf6gr8*[Ro2rUlC-pKC6tn^;T^@p[@G0hqQe7VT=>5J^&bjJ^&bjpWW`&!.Y~> !<;u/&\>Do-SRA&)%dDi%LiddE4*:I])\r/6i`84]* !<;uJ&^m"tc/e!Kj5/D6aMGHeEKAQ !<;uA&]p&%A8YXT=B/3m8OYs-4,:rJbmCO1TpKt;B/P+\m*,g(GLXWS3H<'_o$%0?r@89p`F<@: ?=Jk/=Y>l\o?@b\)C['_.4?8T(`=/-&4>p2#e!:R`L2!m2hUSD&'VEHp\FRSmH !<;u/&\!?>0/FCW&ePZc%LrpV+a^]g]*Y)_N.5/-9-nVim(3O]Dn_oE.qc6#o"+pt./kS&'95Xh 1)V/i+rD\]]( !<<#K'@aIF^Z";-^sh5qbdY*o[$c'AeFV5ue[%BFWchTJ.=fn7edANfbI=9]<@N+5eaq?)Z9cXU dFQ=B886]BoBY#,5*F=cec=+rb65qh0:'`Yb5flWed/uX^o)?Z<[`gnedp3Io(W%Vp$qG3gXXf# ][VM`J_#D'J_#D'psoJ=!.Y~> !<<#B'?dO`?Y`kI:fCCn:.duB4uQ*6bO`scbc=+T`L(pm2gWX6bmL=[iRQ#&Cbp+Jbk''lW^YRn hWF(P=&6*Ed`8VB/r[mDblGi`_Z[fQ09F!J_Z7aGbm:aKe\mo%F#J[>bn%q4o(W%Vp$qG3hV6b2 [*+$FJ^&bjJ^&bjprri'!.Y~> !<<#0'=sc'/h[nQ(D7;l&f22k$k#"_](=Ylq:u.YFr'YlM3#]*P9$^8,[C9-\]M]+;Bdq"jpbq=jUSmcs.c Uq2,hJ\-KFJ\-KFpq$QO!.Y~> !<<#K'@)`!gu7.k[C40'[(X5f[&e>'bk'BmeZq;U _R$eia2G;ZNpZcBqrNc:[++Ws#1KGcM^Fb;ldc?U6+j.>>#m&.n(%Tdbjb<)p^$Tbm,-@BJ@3jW eq)D'eq)D'f(desTE'K~> !<<#B'?,GgCKjt]Rs.h1D4u\_5 !<<#0'=;[$1`7$*NE7#499X0;%1ESTZLb[8\skddUi0%%,]C0[]*sU)d'^ !<<#K'=r(mg"tT)rq"=CbOk,E[&f6m[.DiUeZq?GVfl6E."Kb7eddsYcFLEnUhX498LeW'eb.K, YW&)1O-GusQB*nmq#:6a^1EDQo%")rVIH#m%AE,^c]o@=GtaR/Qk q"a^Xlep=AI^RXUeq)D'eq)D'f(desTE'K~> !<<#B'=(PA>ZG%(roAuW^\#Nd5W^VaXn0dEbc4(V`L;'n2Lr?B:bk93o WBRbkYd_3S]!Ot]hrWY;PXpYOo$%H`T4>%0>rIV^nBDNdQYUAD7*W]>H#kl3CMS[Eo?I !<<#0';IiI*t]ERrnV?b](EC(%gr[tT(BQ$\skgfUi0%%,B('[]+0F)c+19kUL@1t6RH3J](NZK RPKbBGC4jfI<6Vs?rp6"1)t_No",1=O^+ZG6S*_rn@K7@MIL$r4L(`%:J2or@UaYjo=GO?aS#<$ q>:'bnaPf.K:]W)]7ISF]7ISF]D/u=GQ<6~> !<<#K'sgK7e+)!#e"S?]DMPEpG?S*:0e3`qd.>g! d^SH,n)a',lJgOHdVukKJ_#D'J_#D'J_'VI!MBFC~> !<<#B';RSFk&7bfd"ScGc5tF#JW*lY`N0J\-KFJ\-KFJ\1]h!I+Tp~> !<<#K'<><_cdg^ual`U%pu0B-Z)O'XV"W@NeV#J),sAW6G?S#q)_jU.A,/\edg0-1kDd3WcqWM=A0Qo ddks!eZ@E_j5/kVfZ_joZuj?>eq)D'eq)D'f([_rTE'K~> !<<#B';7!b9MeVfIG=c#lD&)f4ZP86RJ+l:b^M&m,=KeqRX&s4)Cd7Ebk''j[[2tu8_hb5JSHT1 K8?#6bP'0oW\]cfJV&N.Ik>3AiTf@IX@mECo?@ZT4M/VDXEJS`/XXcPbmqpj32O^;`gV0tG%DhT an![dbbs.Yl/Lphh:1WO]l1Z:c%4,jc%4,jc1fH`P5p+~> !<<#0'9X1^%hK,`B8(]+2B8/paS"W,PO.:-&r9 \+7-@\t.mLn)X$/k1]%`a_%],]7ISF]7ISF]D&o !<<#K'^`T7$tptNQkY,7+?OB6U"&V`%+Vm=H).E`>$4HD OjigN%E,!sg;V[pc-k%^B!9U5eq)D'eq)D'f([_rTE'K~> !<<#B';6s^8PDlWGM)oom$CL44#\`(RJ+l:bIpVj]#;\0RX'*q@o"M_b4Ejhbf@W$8(l;-IVC0E ZbODubkB9pW?>\W?=JG*/QJq'jPnn0O>_'ip!!uiXujbB\[JS_F)Y5'R`k6G%*Yt0R`!+5H$3[L Mp(V7%D8@nk0N#DeD]ZDAuE_$c%4,jc%4,jc1fH`P5p+~> !<<#0'9X1^%M'*c;:GODk'8!W$O[4AMtXao\u\A'S#E?bE)05"5:IhU\F[tc5H Up%&3](W`LRLh/%.4mtJ'.-4F'\%BQ8hlHe\Xe)]`MAsp,Z]7ISF]7ISF]D&o !<<#K' !<<#B';R3[7S$'gchZkirp5hA3Ai6#SG(2=bf7.jEkTN5RX'.":,oDpbjipdbd0gld(k'D-uHT> bk0-nZTU9,2\0LH4+kK(g==EPED`";p<=/lT.=o6O1;X"G&'i<57iptnBD9?8WUZUG[ !<<#0':'Ua%Ls%Ganb#Vro\JH$jm4FNqU'r]!SD/;4=PaE)08%.P7:!](*B@\thO%Z(,&m*Fl.\ ](ETJUc:.c1'fTF'ei3U;,9kQ+>&DRp:CmHOXOshI\*$f<_YP+/e*HBn@K!s3GY:,:,XJK\*pp< \t,DQd_sDGgtH_!Z+>LuJ\-KFJ\-KFpU^HN!.Y~> !<<#K'>&.[]tgAZrq>1"o`"$dVNu!s[Ir)\eBYg2<1'Y/>$!^OVomNXedB)R\Z_H2;BBhreFV6) e&oG;\d^AX4.bL%oB4Z/87B$Tp=9f&R:g/:32`7+?;Vb0B4lqDnC@lpV,;)J9/MXTmFDZp\!k=U b0Ie*S]TE5eq)D'eq)D'f(RYqTE'K~> !<<#B'=1V-6Ua87roT]2jSm2.3&2[?Y4^$LbJgqlD7RWSH#ubMTYen@bmLjJeBGRPBI@Z0bO`sl bJL`pZ4B)[/pk:Rbe^N@2I3Q-p<=/iP%/!(5J=J+G%W0e@q1&.nBD6^T3$l5A3,LdmEH$^YEcr@ a2kemQGLdrc%4,jc%4,jc1]B_P5p+~> !<<#0';S#:%1O#9rni0Dh>XrI$jm.mT(T]*]!SA0:R.hs:J*)rP-MUe]*bAt\?;3%9,qHC](+(bA3N9h@]-)-*B=p:CmFKj%YV32W.&<_=>l>$?'Un@Jt;O%3kr3[]9_mCNb;To$(g ]X+,)M6ORB]7ISF]7ISF]Cri;GQ<6~> !<<#K'@)_h\@JZEh:KNZd,WEXTS-0obk0HpeBYO/>b-%@3^][ae*bco]eYh-E`>^TEQ$Auee?T_ d*KuSL88KLVod#kUdJ(Yajfn4#eiL+RZ1WN*f946#1K;57jX8DldcBk^@1@Fe?3;te:H2%eq)D' eq)EFecDEDJ,~> !<<#B'?,Pb7R9:\[usfoM7emK2(BPd_t;1^bJgYmFLmob:.CbibO3U^[6:diO(r&OD7n'`bnJ=D a2c.8JY$C/I[$^hGqi-g_9;Do#e!+=[&eF`-\La0#0NE,?nI8VlcfaZ[c?5l`1C>HbCRohc%4,j c%4.4blO".J,~> !<<#0'=;g&'b(^WVfaC;CTM\3$O-fIZLka;]!S,-;#-Ye'D;#c9ehR>Y*<*-ge]#.T[S3#d=TlamJ6S%,npMe5ne\q.JE]7ISF ]7ITe])d-VJ,~> !<<#K'@aC9Tsh>8_7[.^\\#2FP\4F-e+;,te]k@$<,4fZVodETedB(r)AWqr%L7$meFV8ieIL'+ ^rOR)M<`Xpe#4^Xdd#BieA(MAjOUJF`79FiJ_#D'J_#D'p!s/: !.Y~> !<<#B'?dFW7n5d9A5PTZ*;b43^^[RSBHX`e>*KBg\abFpGEam.+WbI6KneAn4j]ZbrOJ^&bjJ^&bjp!!N$ !.Y~> !<<#0'=s]$+;YJ-4 !<;uJ&^ce_VR!D/Y-"_&Un3`c=,bMred0gN\PG+6[*$kaecO=7Z2C[)\B37gedU$Se()ga\#l^: _TgR!ed/uW\!rCa;'(S_ecKt7,Q8kg+Vbmn*<@$Pn(%fp^@(.:bcbWseUc;&eq)D'eq)EFecDED J,~> !<;uA&]fkl84bs-4?GP`2`363/VD+6bm;P3Z;!h?Xhr9JblZ(sWr/[pYeeNNbm_b8bKS#?YGS+q ]#;e]bm:aJb-e?pB.&?#blVl),lT"i+r;6t*<@'Nn')0^[H$)e]qJiHb^n#ic%4,jc%4.4blO". J,~> !<;u/&\!36*uth#$k!IQ$k!=F)gJg]]*Q!SU.$@RT!>im])oO?S,AQRTs2,r]*u3X\[o5NTU1t6 WO'/)]*P9$\!i7V8fW*E])lUa+oWSc*toLi)?CU@n%/n;S\;_,M/?4n\q.JE]7ISF]7ITe])d-V J,~> !<;rI&C?>QQDpdZV5'WFIUZN7cdhO9#1Ur5=LHB&lIH!bcMYofcI;"."kD&Hbg+]*ed/uSWJl4, :*>D^ecKfP`r3UOQi)C8AOc.>$.ZKIi6]?1B#iGQeq)D'eq)D'f(ISpTE'K~> !<;r@&BBAc69[@g2` !<;r."L`M$*Y\M\rsAr:"p>rd[CYk>#.h1>9qJZ%lFR),[/@62[(5D4"hM1MZF%.0]*P8tW/>jt 803$E])lH'`;R=LP'[(P!F7kW]*Z'<[Bb^;?WO>@J\-KFJ\-KFJ\1Te!I+Tp~> !<;rI&(T$mGAMqqP(S0n:MGg7eaCukeBZ.[eE>E\e`53\ea1im^)T[%0.&"XM=9"*E'RZ1=]eci /h\h00-EaQed0g*Ohe/V7[),Keq)D'eq)D'f(@MoTE'K~> !<;r@&'W(U;^X(K0IeIl)b#J$bjN^YbJq<8bNI.Jbi?qJbjAK(KC@%pCIDlLBP1sT 5!M4p5:]+ibm;OfLp`q'5`Na6c%4,jc%4,jc1K6]P5p+~> !<;r.&%]3%57.Z%#mU\<"uU\6]'d05]!SZI]'$^']&UC&]'R$7VAr)`/L)GMGji6S@R+.!=BAQc .4HYn.3(M>]*Q!2C5d"+/V(3Z]7ISF]7ISF]C`]9GQ<6~> !<;oH%Frn9Lh&\31GW0gYgM"fecOCNeZIC&cME@t]'n2MZ1NkQU@-S;e%'$]8>aMYJ_#D'J_#D' J_'JE!MBFC~> !<;o?%EuqtGt8nh&/ITUW6!3LblZ,4bcT+i`VP2eZL?$J^&bjJ^&bj J^*i3!L!M6~> !<;o-%D'$AC,ue+!Xgl!RCBco])oRS\uiRE[/,"IU[PepS+Lq*NpaLa\Y37u5EW1]J\-KFJ\-KF J\1Qd!I+Tp~> !<;iF$.R82UnXW[\' !<;i=$-U;mSsl( !<;i+$+dL;OGf$XTX21[]@FLs\u_-l[CUq$J\-KFJ\-KFo=G$J!.Y~> !<7W$J_#D'J_#D'J_%!T!MBFC~> !<7VpJ^&bjJ^&bjJ^(@B!L!M6~> !<7V^J\-KFJ\-KFJ\/(s!I+Tp~> !WU`%TRibeTRibeTRic=TE"tB~> !WUDqPC\pKPC\pKPC\q#P5kT5~> !WTc_G_(*jG_(*jG_(+BGQ7_o~> !<7TMJH16$JH16$JH2bOJ,~> !<7TMJH16$JH16$JH2bOJ,~> !<7TMJH16$JH16$JH2bOJ,~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> J_&6"Sc=8limr\\JcC<$ir9"OrmlT~> J^)TeSc=8cim!&SJcC<$ir9"Brlor~> J\0=ASc=8Qik'dAJcC<$ir9"'rk![~> J_&9#!<8V@!!%T$j4451JY7ReJY;"qrmlT~> J^)Wf!<8V7!!%Spj37T(JWkYKJWo)Wrlor~> J\0@B!<8V%!!%S^j1> rRQPXf&-Q.T%3q8!.i_]ecGfDJ_#D'J_&r6!WShlepm~> rQToKf%^9*T$7;"!.iDTblRj.J^&bjJ^*<$!WSA_c%#~> rO[X0f$aX!T">#J!.hcB])hqVJ\-KFJ\1$U!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&P<^s.FqoJ_#D'jjjJ4T`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/[%Ls-&#YJ^&bjjimi+PQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*0+,J\-KFjgtQnGlG=*~> rmh)-J[3k4!<8YA!MBFDf&P<^s.FqoJ_#D'jjjJ4T`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/[%Ls-&#YJ^&bjjimi+PQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*0+,J\-KFjgtQnGlG=*~> rmh)-J[3k4!<8YA!MBFDf&P<^s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/[%Ls-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*4OTJcC<$JcG'9rjr42GlG=*~> rmh)-J[3k4!<<#K!Re@IT`M+:Zdo/M!.i_]ecGfDrmq)K!!)!XrrCmTrW%NLJcE7[rmh,MT`3Mm~> rlkGuJZIA&!<<#B!QhG8RK9,#ZcrN7!.iDTblRj.rltHB!!)!XrrCmTrW%NLJcE7[rlkKDPQ&gW~> rjr0ZJXk;]!<<#0!OnWlN<,6LZb$6_!.hcB])hqVrk&10!!)!XrrCmTrW%NLJcE7[rjr42GlG=*~> rmh)-pTadsrh0=eJ[4CC!<<#K#ddQ17!!EiEcM%r"B.ugMUSr`!MBFDf&P<^s.KABs8W*!i;WcW g]%6RJcC<$\GuQZ!WShlepm~> rlkGupT":ergEhXJZIn5!<<#B#cq':6!mu&>?h)."^+Ds=+n`'blO".J^*6"!WSA_c2[hD!9!kW !8IMR!.k0$s1&+.blRj.rlor~> rjr0ZpRD5Hrf$o=JXkhl!<<#0#b>.;6<[Pm=',B""]\-#??iue])d-VJ\0sS!WRED]Dqp2!9!kW !8IMR!.k0$s1&+.])hqVrk![~> rmh)-q6C:&QT,?^#oo;*J[4IE!<<#K&YWC!JE,\JhqQf5eBOG9 rlkGuq5XdmOZ rjr0Zq4%_PKeWlA#8i,RJXknn!<<#0&WLe\GDLm"J:`2oH#di'IT3YaOSl]kGQ<<,j1>?lGlG;W rrE'!i;WcWg]%6RJcC<$\GuQ?!WRED]79~> rmh)-qQ^I%9HQN(A5+Zg2O]3&kct3grRM+F=Hn#e'ZOdR rlkGuqPssm9dj(nP'BY@30\upkc4^`rQPqCA[?KGPa%JmOcG6MG`Gp/_7B"3blO".J^*6"!WSA_ c2R_C!:g*e!<<*!!<3#u!<3!!!<<#ur;Z`r!ri6#rr;osr;Z]qqYpNqJcC<$\GuQQ!WSA_c%#~> rjr0ZqO@nP6lS*^ rmh)-qm$X+?:01,O*G5966JCT]!D>9i3N:]r3H8$i3E@_rRM(E=-AX>rWTl]6!MHmq>f62!!)8F r71nA!.i_]ecGfDrmq)K!!)Ti!!)ut!!*#u!!)ut!!*#urrE&u!!*#u!!*#urrE&u!!)ut!!*#u !!)ut!!)or!!%TMJcE7[rmh,MT`3Mm~> rlkGuql:-s?WET)ZD*Vf7;!!)9O r658+!.iDTblRj.rltHB!!)Ti!!)ut!!*#u!!)ut!!*#urrE&u!!*#u!!*#urrE&u!!)ut!!*#u !!)ut!!)or!!%TMJcE7[rlkKDPQ&gW~> rjr0Zqj\(V<'GD^K5=RW2&J0%\t&d#i10`Gr1*]ci1'fIrOW/kDTS(:rWTm.Al,[mq>f62!!)8F r4;uS!.hcB])hqVrk&10!!)Ti!!)ut!!*#u!!)ut!!*#urrE&u!!*#u!!*#urrE&u!!)ut!!*#u !!)ut!!)or!!%TMJcE7[rjr42GlG=*~> rmh)-r3?g/;G`O`[\SXj<(]:o2jOKMtg!<%uZ!8 rlkGur2U=!;IZfQc,$NKL3mVn3fj3>!!)qtrW)u!rW(3D!!)_nqZ-2f!!)hq!!)2_!!)bo!<<#B ".Ef)M>>j2fV4U-=MX@m!<) rjr0Zr1"7Y84n`>Y*sZ=6olq8/qX"k!!)qerW)tgrW(35!!)__qZ-2W!!)hb!!)2P!!)b`!<<#0 "-%3?MYYs3fV53N:p5tB!<%uZ!8 rmh)-r3?g"+b-uFb--\7:e3i+&U+)X!!)o%!!)u'!!(6K!!)\t!!)u'!!)Vr!!)i#!!)2f!!)c! !<<#K"//eWL%tNM\:?SDacMSarK%!ZrK.!YrK.$Zs,d6\s,d6\"cr]aO8o:[OSo1YOS]%NOT#7[ OT#4[OSmZ1TE'Poj4482T`3OArr<&hs8;rrrr<&trr<&urr<&srrN3#!;uj!!<<'!qu6Wrrr;lr qu6WrJcC<$\GuQZ!WShlepm~> rlkGur2U rjr0Zr1"7N*d4a-`27ua5WCJD$YTC-!!)nd!!)tf!!(65!!)\^!!)tf!!)V\!!)hb!!)2P!!)b` !<<#0"-%3?lGlG>Srr<&hs8;rrrr<&trr<&urr<&srrN3#!;uj!!<<'!qu6Wrrr;lr qu6WrJcC<$\GuQ?!WRED]79~> rmh)-rN['63_5s_g!m<`@n]?V2^'c>ZEjB.rNZD'rNZS,ZEaH1ZMjk'ZMsn)ZMjk%ZMae&ZMsn. Z2am1!!)u'rW)l%rVuu)!;3Vt!<'2+!4&m1!<0;(!<0;$!<0;$!;s/%!<'5'!<9A)!<0;%!;Wo# s8CLOTiI#.k5+0IjK!ep>KMtg!0E9Br/^mYrf@*[rK%TkO8tB(!0E9B!0@0\O8tB(!<%uZ!<%uZ !:c-N!<%u\!0E rlkGurMpR(3aTSQjPe:dQA0j1Bg=G:X/l-urMonurMp)%X/c4#X8W+uX8`/"X8W+sX8N%tX8`/' WrN.#!!)turW)ksrVuu"!;3Am!<&r$!3<.#!<0&!!<0%r!<0%r!;rns!<&tu!<9,"!<0%s!;WYq s8C1FRUN7Tk5+0IjK"M>=MX@m!:K7Tr9aObrpBadrU(6tmJu\C!:K7T!:BgemJu\C!<) rjr0ZrL=L`1-h2>e^1=C<&uif-Pe%_S=KKWrL rmh)-rN[!.%;M]Oa1n9r>tI@F2_PVtrNZD'qm$2%rNZD'rj)P(riuM(rj)P(riuM(riuM(rNZD' rj)P("L8"-Z2jm0!!*#(!!*#(!!*#(rrD_t!!)u'!!*#(!!)u'!!)r&!!)i#!!)l$!!*#(!!)u' "TYh.ZEaK/Z2an!Z2jq'ec_[FNK!B&mfp%G6 rlkGurMpKu&rXTRg!md8P(\4(C02@-rMonuql9\srMonuri?&!ri6#!ri?&!ri6#!ri6#!rMonu ri?&!"KMM&WrW."!!*#!!!*#!!!*#!rrD_m!!)tu!!*#!!!)tu!!)qt!!)hq!!)kr!!*#!!!)tu "TYS'X/c7!WrN.oWrW1ubljJFXFTg9mfp%[9N]*umem.amJm7bmJm7dmf*:amKN[ZmJu\C!;c*` !<) rjr0ZrL=FY$X]C2_75bL9K+XU-R&kBrL rmh)-rN[!.&9X;6UmZpO;FNi778"PVriuS*Z2jg.!!)u'!!*#(!!)u'!!*#(!!)u'!!)i#!!*#( #lq9:!4&m1ZEaK.Z2t$3!;s,&!;3Yq!<'2'!<'2'!;s,&!;Wo#!;j&%!;s,&!<0;(!;`u&!3uV" Z2jq'eeb#WL3nArH@'d[I!g6^J2(9MOH9I(O9#6@q>gMV!!)kW"osaH!0E9BrfI$XrK%!Zn;m_P OH9JAOSmZ1TE'Poj4482T`3LBs82lhs82lps8E!&rr<'!!!*&u!;ulr!!3*"rVufrr;Z]q!WN0! s82kJs+13]s8LRMs.KABJ,~> rlkGurMpKu(7r"N^q-V;Mh$+oF)r[^ri6)#WrW'u!!)tu!!*#!!!)tu!!*#!!!)tu!!)hq!!*#! #lq$,!3<.#X/c6uWr`:%!;rkt!;3Dj!<&qu!<&qu!;rkt!;WYq!;ies!;rkt!<0&!!;`_t!36+p WrW1ubljJEVi6<.JId3ZJV&E"QU4fimd:)CmK!4Rq>gN_!!)l`"p")Z!:K7TrpK[arU'XcnEpAY md:)Smek<1P5p0Yj37W)PQ&f,s82lhs82lps8E!&rr<'!!!*&u!;ulr!!3*"rVufrr;Z]q!WN0! s82kJs+13]s8L7Ds-*H,J,~> rjr0ZrL=FY%V^ljRZD\t5r1,E2Eqt&rgX#iS,iJW!!)tf!!*"g!!)tf!!*"g!!)tf!!)hb!!*"g #lpKc!1]PZS=BTWS,r\\!;r>e!;2l[!<&Df!<&Df!;r>e!;W,b!;i8d!;r>e!gMV!!)kW"osaH!0E9BrfI$XrK%!Zn;m_P OH9JAOSl]kGQ<<,j1>?lGlG;Ts82lhs82lps8E!&rr<'!!!*&u!;ulr!!3*"rVufrr;Z]q!WN0! s82kJs+13]s8KV2s*4OTJ,~> rmh)-rN[*1$t+aHKR%<783]:/@UU76ZEaK0Z2an'Z2an'Z2an(Z2an'Z2an(Z2an'Z2an'ZMX_% Z3UH9Z2h29!4&p,!!0A*r3?;&os+PtrNZD'riuM(rNZD'r3?;&q6Bu#qm-%uriuM(qQ^)$riuM( q6C##rRM[V;1Wq/@VKarDf';uDiQGSacMSaqN([WplGIUqN)!`OH9I(O8tB(!<%uZ!<%uZ!:Z'P !0E9BrfI'/!MBFDf&P<^s.KABcMmkEJcC<$TE"oA!WShlepm~> rlkGurMpU#&r!?gX.u,BK6h]dLOUY)X/c7"WrN.uWrN.uWrN/!WrN.uWrN/!WrN.uWrN.uX8Dts WsA^+WrT3$!3<0s!!0,#r2TetorA&mrMonuri6#!rMonur2Tetq5XJqqlBPnri6#!qPsSrri6#! q5XMqrQQ%F?_Nd@Fa\spK7SMsLT!@<_!^UjqX+=`q!J+^qX+Ximd:)CmJu\C!<) rjr0ZrL=O\$<28!G\gSR2D$I@<`^/aS=BTYS,`QfS,`QfS,`QgS,`QfS,`QgS,`QfS,`QfSGWBd S-T+bS,f(L!1]SU!!/Sir1!`eopc!^rL rmh)-rN[!0*^"@]CLC185sIqMH;u2Op9FYurNZD'riuM(rNZD'riuM(rNZD'riuM(rNZD'riuh1 ZEaJ9Z2h29!;`u$!;s,&!;3Vt!<'2'!<08(!<'2'!;s,&!;Wo#!;j&%!;Ni"!:@&ls8CL_Ti$;Q BVW/V_7$A):/I*+>KMtg!;V]V!<%uZ!Armh,MT`3Mm~> rlkGurMpL#+BOnuQ]R&TH?X[kQYS%.p8\/nrMonuri6#!rMonuri6#!rMonuri6#!rMonuri6>* X/c6$WrT3$!;`_r!;rkt!;3Am!<&qu!<0#!!<&qu!;rkt!;WYq!;ies!;NSp!:?fes8C1VRU;\@ E21(b`42n/?#$K)=MX@m!;Z$_!<)ArlkKDPQ&gW~> rjr0ZrL=F[)Dbi6>u=$N0J,:cED@X+p7)*_rLe!;2i^!<&Df!e!;W,b!;i8d!;N&a!:?9Vs8BPDNGQ.q FJ?Fg`OMq+B7Rog:p5tB!;V]V!<%uZ!Arjr42GlG=*~> rmh)-r3?g!'iSLE<_Z"L94N9i/U-ln!!)u'!!*#(!!)u'!!*#(!!)u'!!*#(!!)u'!!*#("ots7 !4&m1riuM(rNZM*ZEaK0ZMsptZ2an'Z2an(Z2an(ZMsq&Z2an#Z2an$Z2an'Z2an(Z2amlZ2jq' ee4ZRHt/#mD-4*_]uWlAQ7Q7^OSo1UOSo1YOSf+XOT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlX!;_fW !<&#WecDEDJ_&l4!WShlf$jZt!.k0$s.02hecGfDrmlT~> rlkGur2U rjr0Zr1"7M&k>hp7QiU`4BQT>.;/=D!!)tf!!*"g!!)tf!!*"g!!)tf!!*"g!!)tf!!*"g"ot0` !1]PZrgWrgrL[?6F(A50/=ZGC%$_E^r$OSo1UOSo1YOSf+XOT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlX!;_fW !<&#W])d-VJ\0sS!WRED]@6fY!.k0$s.02h])hqVrk![~> rmh)-r3?g+2^h?s9hIlNBRP%h16(q3r;ci&rW)u(rVur(rW)u(rW)r'r;Zo)!4)S("gS+.Z2an( ZMae%ZMjh*Z2an!ZMOY"ZMjh,Z2am1ZMX_$ZMX_"ZMae&ZMX^lZ2jq'ee+TOH";it?VdQ8bM>uh OtKt\\WHa?!MBFDf&P<^s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkGur2Uth$,aP1!7 XZ'@t\aKC?!L!M7c/[%Ls-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr0Zr1"7V0HiYK5W^nj>]agD/:@)[r;cherW)tgrVuqgrW)tgrW)qfr;Znh!1a#g"e5PmS,`Qg SG`HdSGiKiS,`Q`SGN rmh)-qm$X!5:&Ha:KV=XE^)UBJ[4OG!<<#K&YVC/>_Ca09VB\jk"JC'0O)Z0OSmZ1TE'Poj4482 T`3Mns+13Irr<&rrr<&mrr<&srr<&qrr<&_s8LRMs.KABJ,~> rlkGuql:-i5s&ImFaT(4Mc)cmJZJ%9!<<#B&Xl:NFbJCD8=[l^j]Q,l2-@Z)mek<1P5p0Yj37W) PQ&gXs+13Irr<&rrr<&mrr<&srr<&qrr<&_s8L7Ds-*H,J,~> rjr0Zqj\(M2BFY;6VLj-C,n(qJXktp!<<#0&WK\_M1sGN6']IHiaI#E3DcoiOSl]kGQ<<,j1>?l GlG=+s+13Irr<&rrr<&mrr<&srr<&qrr<&_s8KV2s*4OTJ,~> rmh)-qQ^Hm3#45BAnOsk0oCOfkct3grRMS(DEC9oPE;91['Z;]3Gp?_agmN2ecDEDJ_&l4!WShl eq*jPs/5ks!;lfr!;HKn!;uls!;ZWp!9sO_ecGfDrmlT~> rlkGuqPss`4![C4Jq7Po22$O]kc4^`rQPqlCIM02OGoU&ZEora9T(T:_7B"3blO".J^*6"!WSA_ c%5nGs/5ks!;lfr!;HKn!;uls!;ZWp!9sO_blRj.rlor~> rjr0ZqO@nE0bPp&?!^,O.sZ]9kaVYQrOWZH@nU*:MM@@fZ*BQa>+IHTYdokn])d-VJ\0sS!WRED ]7L!5s/5ks!;lfr!;HKn!;uls!;ZWp!9sO_])hqVrk![~> rmh)-q6C9kFtsjX*[5ffJ[4IE!<;uJ"Nc!s>[7)/")2PYcEu.'TE'Poj4482T`3Mns+13Jrr<&s rrN3#!;HKn!<)p!!<3&prr<&`s8LRMs.KABJ,~> rlkGuq5Xd^EA\gg-7 rjr0Zq4%_BB.=T<)]iX>JXknn!<;u/"KuW>;,R-c"(kl2['[FGGQ<<,j1>?lGlG=+s+13Jrr<&s rrN3#!;HKn!<)p!!<3&prr<&`s8KV2s*4OTJ,~> rmh)-pTadcre^];J[4CC!<;uJ!nG`CouR6[e$R[,TE'Poj4482T`3Mns+13Krr<&prr<&orr<&q rr<&orr<&as8LRMs.KABJ,~> rlkGupT":Wre1?1JZIn5!<;uA!mJd(p:paG`lka6!L!M7c/[%Ls-*H,JcC<$W;chtq>UEpq#: rjr0ZpRD5;rcnKnJXkhl!<;u/!kPkHp9+P%[(3QU!I+Tq]ApL(s*4OTJcC<$W;chtq>UEpq#: rmh)-J[3k4!<8YA!MBFDf&P<^s.KABJcC<$W;chtq>UEpq#: rlkGuJZIA&!<8Y8!L!M7c/[%Ls-*H,JcC<$W;chtq>UEpq#: rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*4OTJcC<$W;chtq>UEpq#: rmh)-J[3k4!<8YA!MBFDf&P<^s.KABJcC<$W;chtq>UEpq>UEpq>UEpq#: rlkGuJZIA&!<8Y8!L!M7c/[%Ls-*H,JcC<$W;chtq>UEpq>UEpq>UEpq#: rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*4OTJcC<$W;chtq>UEpq>UEpq>UEpq#: rmh)-J[3k4!<8YA!MBFDf&P<^s.KABJcC<$W;chtq>UEpq>UEpq>UEpq#: rlkGuJZIA&!<8Y8!L!M7c/[%Ls-*H,JcC<$W;chtq>UEpq>UEpq>UEpq#: rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*4OTJcC<$W;chtq>UEpq>UEpq>UEpq#: rmh)-J[3k4!<8YA!MBFDf&P<^s.KABJcC<$W;chtq>UEpqYpNqq#: rlkGuJZIA&!<8Y8!L!M7c/[%Ls-*H,JcC<$W;chtq>UEpqYpNqq#: rjr0ZJXk;]!<8Y&!I+Tq]ApL(s*4OTJcC<$W;chtq>UEpqYpNqq#: rmlZ#ec2,iJcG!7!!)rJ!WShleq*jPs/>qt!;ZWp!;c]q!;QQo!;QQo!:'U`ecGfDrmlT~> rlp#oec2,`JcG!7!!)rA!WSA_c%5nGs/>qt!;ZWp!;c]q!;QQo!;QQo!:'U`blRj.rlor~> rk!a]ec2,NJcG!7!!)r/!WRED]7L!5s/>qt!;ZWp!;c]q!;QQo!;QQo!:'U`])hqVrk![~> rmh)LJ_#D'J_&K)!!)rJ!WShleq*jPs/5ks!;ulp!<)ot!;ZZm!;lcr!9sO_ecGfDrmlT~> rlkHCJ^&bjJ^)il!!)rA!WSA_c%5nGs/5ks!;ulp!<)ot!;ZZm!;lcr!9sO_blRj.rlor~> rjr11J\-KFJ\0RH!!)r/!WRED]7L!5s/5ks!;ulp!<)ot!;ZZm!;lcr!9sO_])hqVrk![~> rmh,Meq*jPs+14)s8S_l!;tCKs.KABJcC<$VuH_soD\djmf*7el2Ub6!WShlepm~> rlkKDc%5nGs+14)s8S8_!;t(Bs-*H,JcC<$VuH_soD\djmf*7el2Ub-!WSA_c%#~> rjr42]7L!5s+14)s8R rmh/Nf)L;oJY7Reg7SEGr71oKT`3Mns+13Irr<&Xrr<&_s8LRMs.KABJ,~> rlkNEc2W?YJWkYKg62L:r659BPQ&gXs+13Irr<&Xrr<&_s8L7Ds-*H,J,~> rjr73]DmG,JTu`jg3 rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlW#JH2_N_>jMc!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlouoJH2_N_>jMZ!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!^]JH2_N_>jMH!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&Jr;bmXrW)iqrW%NLJcD;@rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEAr;bmXrW)iqrW%NLJcD;@rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./r;bmXrW)iqrW%NLJcD;@rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!(jT!!)ip!!&Dd!!(LJ!!'V1!!'V1!!(%=rmh,M T`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!(jT!!)ip!!&Dd!!(LJ!!'V1!!'V1!!(%=rlkKD PQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!(jT!!)ip!!&Dd!!(LJ!!'V1!!'V1!!(%=rjr42 GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!(jT!!)ip!!&Dd!!(OK!W`6#hu!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!(jT!!)ip!!&Dd!!(OK!W`6#hu rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!(jT!!)ip!!&Dd!!(OK!W`6#hu rmh2Of)MCos+13$s5!_TTE"uiecGfDrmh)LqZ-Tpr;cfrrrE*!rrE#tr;c]o!!)ip!!)ZkrVuru rW)ZlrW)uurW)WkquHHjrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-!_rVururW)Qi!W`6#p&G$l !<<#unc&Rho`+mjs8W&uo`"smrrD`lrr<'!rW(pWquH?g!W`6#p&G$l!<<#ul2Ub`oDeg@!WShl epm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlkHCqZ-Tpr;cfrrrE*!rrE#tr;c]o!!)ip!!)ZkrVuru rW)ZlrW)uurW)WkquHHjrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-!_rVururW)Qi!W`6#p&G$l !<<#unc&Rho`+mjs8W&uo`"smrrD`lrr<'!rW(pWquH?g!W`6#p&G$l!<<#ul2Ub`oDeg7!WSA_ c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrjr11qZ-Tpr;cfrrrE*!rrE#tr;c]o!!)ip!!)ZkrVuru rW)ZlrW)uurW)WkquHHjrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-!_rVururW)Qi!W`6#p&G$l !<<#unc&Rho`+mjs8W&uo`"smrrD`lrr<'!rW(pWquH?g!W`6#p&G$l!<<#ul2Ub`oDeg%!WRED ]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!)or!!)ut!!)ut#QXl)s8N*!rrE#t!!)or!!)ip !!)Wj!!*#u!!)Zk!!)ut!!)Zk!!*#u!!)]lrrE&u!!)rsrrDusrrE&u!!*#u!!)ut!!)ut!!)6_ !!*#u!!)Wj!!*#u!!)]lrrE&u!!)Ti!!)Wj!!)ut!!)Zk!!*#u!!)]lrrE&u!!)]lq>g3g!!*#u !!)Zk!!*#u!!)]lrrE&u!!)6_rrD`lrmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!)or!!)ut!!)ut#QXl)s8N*!rrE#t!!)or!!)ip !!)Wj!!*#u!!)Zk!!)ut!!)Zk!!*#u!!)]lrrE&u!!)rsrrDusrrE&u!!*#u!!)ut!!)ut!!)6_ !!*#u!!)Wj!!*#u!!)]lrrE&u!!)Ti!!)Wj!!)ut!!)Zk!!*#u!!)]lrrE&u!!)]lq>g3g!!*#u !!)Zk!!*#u!!)]lrrE&u!!)6_rrD`lrlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!)or!!)ut!!)ut#QXl)s8N*!rrE#t!!)or!!)ip !!)Wj!!*#u!!)Zk!!)ut!!)Zk!!*#u!!)]lrrE&u!!)rsrrDusrrE&u!!*#u!!)ut!!)ut!!)6_ !!*#u!!)Wj!!*#u!!)]lrrE&u!!)Ti!!)Wj!!)ut!!)Zk!!*#u!!)]lrrE&u!!)]lq>g3g!!*#u !!)Zk!!*#u!!)]lrrE&u!!)6_rrD`lrjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!)rs!!)or!!*#urrDcm!!)or!!)ip!!)Ti!W`6# oD\djrVlitnc&Rho`"mkrVlitp&>!lrVlitq>UEprVlitkPkS`rrDWi!!*#u!!)]l!!)ut!!)]l q>g6h!!)ut!!)Zk!!*#u!!)]l!!)ut!!(pV!!)Wj!!*#u!!)]l!!)ut!!)]lqZ-Ek!!)`mrmh,M T`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!)rs!!)or!!*#urrDcm!!)or!!)ip!!)Ti!W`6# oD\djrVlitnc&Rho`"mkrVlitp&>!lrVlitq>UEprVlitkPkS`rrDWi!!*#u!!)]l!!)ut!!)]l q>g6h!!)ut!!)Zk!!*#u!!)]l!!)ut!!(pV!!)Wj!!*#u!!)]l!!)ut!!)]lqZ-Ek!!)`mrlkKD PQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!)rs!!)or!!*#urrDcm!!)or!!)ip!!)Ti!W`6# oD\djrVlitnc&Rho`"mkrVlitp&>!lrVlitq>UEprVlitkPkS`rrDWi!!*#u!!)]l!!)ut!!)]l q>g6h!!)ut!!)Zk!!*#u!!)]l!!)ut!!(pV!!)Wj!!*#u!!)]l!!)ut!!)]lqZ-Ek!!)`mrjr42 GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!)rs!!)or!!*#u!!)ipquHWo!!)ip!!)Qh!!)Qh !s&B$!:g'h!;$3j!<)ot!;6?l!<)ot!<)rq!<)ot!9X:]!9!kW!<)ot!:p-i!:p-l!<<'!ir8uY rVlitp&FjgoD\djiVrlXrVlitkl:Y_p&G$B!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!)rs!!)or!!*#u!!)ipquHWo!!)ip!!)Qh!!)Qh !s&B$!:g'h!;$3j!<)ot!;6?l!<)ot!<)rq!<)ot!9X:]!9!kW!<)ot!:p-i!:p-l!<<'!ir8uY rVlitp&FjgoD\djiVrlXrVlitkl:Y_p&G$9!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!)rs!!)or!!*#u!!)ipquHWo!!)ip!!)Qh!!)Qh !s&B$!:g'h!;$3j!<)ot!;6?l!<)ot!<)rq!<)ot!9X:]!9!kW!<)ot!:p-i!:p-l!<<'!ir8uY rVlitp&FjgoD\djiVrlXrVlitkl:Y_p&G$'!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!)rs!!)or!!*#u!!)lq!!)ut!!)or!!)ip!!)Ti !W`6#o)Adls8N)irr<&irr<&trr<&lrr<&trr<&urr<&trr<&trr<&^rrN3#!9*qX!<)ot!:p-i !:p-l!<<'!ir8uYrVlitiVrlXi;WcWrVlitlMpkaoDeg@!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!)rs!!)or!!*#u!!)lq!!)ut!!)or!!)ip!!)Ti !W`6#o)Adls8N)irr<&irr<&trr<&lrr<&trr<&urr<&trr<&trr<&^rrN3#!9*qX!<)ot!:p-i !:p-l!<<'!ir8uYrVlitiVrlXi;WcWrVlitlMpkaoDeg7!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!)rs!!)or!!*#u!!)lq!!)ut!!)or!!)ip!!)Ti !W`6#o)Adls8N)irr<&irr<&trr<&lrr<&trr<&urr<&trr<&trr<&^rrN3#!9*qX!<)ot!:p-i !:p-l!<<'!ir8uYrVlitiVrlXi;WcWrVlitlMpkaoDeg%!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq)K!!)or!!)ut!!)ut!!)lq!!)ut!!)or!!)ip!!)Wj !!*#u!!)TirrDWi!!*#u!!)]l!!)ut!!)rsrrDus!!)ut!!*#u!!)ut!!)ut!!*#u!!)rsrrDZj !!*#u!!)$Y!!)ut!!)Ti!!)QhrrD$X!!)ut!!)$Y!!*#u!!)'Z!!)ut!!)BcrrDThrmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltHB!!)or!!)ut!!)ut!!)lq!!)ut!!)or!!)ip!!)Wj !!*#u!!)TirrDWi!!*#u!!)]l!!)ut!!)rsrrDus!!)ut!!*#u!!)ut!!)ut!!*#u!!)rsrrDZj !!*#u!!)$Y!!)ut!!)Ti!!)QhrrD$X!!)ut!!)$Y!!*#u!!)'Z!!)ut!!)BcrrDThrlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&10!!)or!!)ut!!)ut!!)lq!!)ut!!)or!!)ip!!)Wj !!*#u!!)TirrDWi!!*#u!!)]l!!)ut!!)rsrrDus!!)ut!!*#u!!)ut!!)ut!!*#u!!)rsrrDZj !!*#u!!)$Y!!)ut!!)Ti!!)QhrrD$X!!)ut!!)$Y!!*#u!!)'Z!!)ut!!)BcrrDThrjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmh)LquHZpr;cfrquHZpr;Zlu!<3#r!<)rq!;?Hl!!*&u !:p-i!:p0f!;?Hl!<<)u!<)rt!<)rs!<<)u!<3#s!!3*"rVuisqu?Zro`+mj!<<#ujT#2Zs8W&u iVrlXirAuXs8W&uj8]#WjT#2Zs8W&ug].9(!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlkHCquHZpr;cfrquHZpr;Zlu!<3#r!<)rq!;?Hl!!*&u !:p-i!:p0f!;?Hl!<<)u!<)rt!<)rs!<<)u!<3#s!!3*"rVuisqu?Zro`+mj!<<#ujT#2Zs8W&u iVrlXirAuXs8W&uj8]#WjT#2Zs8W&ug].8t!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrjr11quHZpr;cfrquHZpr;Zlu!<3#r!<)rq!;?Hl!!*&u !:p-i!:p0f!;?Hl!<<)u!<)rt!<)rs!<<)u!<3#s!!3*"rVuisqu?Zro`+mj!<<#ujT#2Zs8W&u iVrlXirAuXs8W&uj8]#WjT#2Zs8W&ug].8b!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmn4O!!'>)rrB"t!!%TMli6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlqSF!!'>)rrB"t!!%TMli6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk#<4!!'>)rrB"t!!%TMli6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmn4O!!'>)!!&qs!!%TMli6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlqSF!!'>)!!&qs!!%TMli6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk#<4!!'>)!!&qs!!%TMli6sr!WRED]79~> rmh2Of)MCoOF`LEOS+_"s8S_l!;tCKs.KABYQ+P$JcFI(r;_EKm/R(9!WShlepm~> rlkQFc2WuYmXaeWme-#4s8S8_!;t(Bs-*H,YQ+P$JcFI(r;_EKm/R(0!WSA_c%#~> rjr:4]Dm,,OF`LEOS+_"s8R rmh2Of)ME8O9#=;O9# rlkQFc2X""mJo]4mJo\RmJm7#mJm7TmJm6nmJm6emcjcOP5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-JO8o:;O8o9YO8o9oO8o:KO8o9eO8o9\OQlfFGQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh5Pf)MD)qZ$JT!<:o;!<;VO!<9Qj!!(!"!!)eU!!)MM!!)PN!!(l;!!)/C!!(9*!!(r=l2UdV !!)rJ!WShlf$aTt!.k0$s.02hecGfDrmlT~> rlkTGc2X!qqF1>M!(Gc4!(HJH!(FEc!!("+!!)f^!!)NV!!)QW!!(mD!!)0L!!(:3!!(sFl2UdI !!)rA!WSA_c-lXk!.k0$s.02hblRj.rlor~> rjr=5]Dm,;q>gGT!!(l;!!)SO!!'Nj!!(!"!!)eU!!)MM!!)PN!!(l;!!)/C!!(9*!!(r=l2Ud. !!)r/!WRED]@-`Y!.k0$s.02h])hqVrk![~> rmh;Rf)MD)OT59@!<:-%!<9Qj!!(!"!!)eU!!)MM!!)PN!!(l;!!)/C!!(9*!!(r=l2UdV!!)rJ !WShlf$jX!!<2uu!.k0$s.98iecGfDrmlT~> rlkZIc2X!qmR@*B!(Fus!(FEc!!("+!!)f^!!)NV!!)QW!!(mD!!)0L!!(:3!!(sFl2UdI!!)rA !WSA_c-u[m!<2uu!.k0$s.98iblRj.rlor~> rjrC7]Dm,;O9#6@!!(*%!!'Nj!!(!"!!)eU!!)MM!!)PN!!(l;!!)/C!!(9*!!(r=l2Ud.!!)r/ !WRED]@6c[!<2uu!.k0$s.98i])hqVrk![~> rmh;Rf)MD)OT59@!<<%[rr;qYrr2t[rr<"[s8N(\rr;nXrVueWrr;qYs8N(\rr;tZqu?>Ns8N(\ rr;nXrVukYrr;tZrr;tZrr;qYs8W+\s8V\P!!)qYrVuq[rW)YRrr<%\rW)hWrrE"Zrr<%\rW)kX r;chYqZ-JS!!)PN!!)kWquHbYqZ-SVrrE(\rrE(\rrE%[rrDtYrW!"]!0I'XooT1R!0I0[qiCdX n;s.B>lr71oKT`3O-s8N)crr<&urr<%Ms+13As8LRM s.KABJ,~> rlkZIc2X!qmR@*B!(HnTr^HeRr^?hTr^HkTs$ZqUr^HbQrC-YPr^HeRs$ZqUr^HhSqaL2Gs$ZqU r^HbQrC-_Rr^HhSr^HhSr^HeRs$ctUs$cPI!!)rbrVurdrW)Z[rr<&erW)i`rrE#crr<&erW)la r;cibqZ-K\!!)QW!!)l`quHcbqZ-T_rrE)errE)errE&drrDubrW!#f!:K^ap$Vh[!:KgdqsFFa nF$;Vo^;_Z!:Kgdr9jL`rpKX`o'ZMXo^:uDs-!E_r659BPQ&hls8N)crr<&urr<%Ms+13As8L7D s-*H,J,~> rjrC7]Dm,;O9#6@!!*"[rW)nYrVuq[rW)t[rr<%\rW)kXr;cbWrW)nYrr<%\rW)qZqZ-;Nrr<%\ rW)kXr;chYrW)qZrW)qZrW)nYrrE(\rrDYP!!)qYrVuq[rW)YRrr<%\rW)hWrrE"Zrr<%\rW)kX r;chYqZ-JS!!)PN!!)kWquHbYqZ-SVrrE(\rrE(\rrE%[rrDtYrW!"]!0I'XooT1R!0I0[qiCdX n;s*+MDr4<"0GlG>?s8N)crr<&urr<%Ms+13As8KV2 s*4OTJ,~> rmhDUf)MD)OT1E(s8%fWs87rYs8J)[s8A&Zs8J)[s8J)[s8A#Zs8.lXs87uYs8J)[s8A#Zs7)3N s8J)[s8J)[s8A#Zs8J)[s87rYs8J)[s8J)[s8A#_s,d8BOT4s7!!)nX!!*"[!!)YQrrE%[!!)qY rrDtYrrE%[!!*"[!!)tZ!!)tZ!!)bT!!)PN!!)nX!!)tZ!!)tZ!!)hV#QTqdOH9I(O9#6@!!)tZ !!*"[rrE%[!!)PNrrE%[!!)qY!!)MMrrDYPrrE%[!!*"[!!)tZ!!)tZ!!)JLrrDtYq>^MWl2UdV !!)rJ!WShlf)G^Ms8E#hs8N)]rr<%Ms+13As8LRMs.KABJ,~> rlkcLc2X!qmR?R37JcEP7JuQR7K2]T7K)ZS7K2]T7K2]T7K)WS7JlKQ7JuTR7K2]T7K)WS7IfgG 7K2]T7K2]T7K)WS7K2]T7JuQR7K2]T7K2]T7K)WX7IEV4mR?d9!!)oa!!*#d!!)ZZrrE&d!!)rb rrDubrrE&d!!*#d!!)uc!!)uc!!)c]!!)QW!!)oa!!)uc!!)uc!!)i_#QX8mmd:)CmK!4R!!)uc !!*#drrE&d!!)QWrrE&d!!)rb!!)NVrrDZYrrE&d!!*#d!!)uc!!)uc!!)KUrrDubq>^N`l2UdI !!)rA!WSA_c2RbDs8E#hs8N)]rr<%Ms+13As8L7Ds-*H,J,~> rjrL:]Dm,;O8tB(!;_cW!;qoY!^MWl2Ud. !!)r/!WRED]Dhj2s8E#hs8N)]rr<%Ms+13As8KV2s*4OTJ,~> rmh8Qf)MD)OT#4UO9#=XO95HDs87rYs87r[s,d9XO9#=YO9#=YO9#=ZO9#=ZO9#=NO9#=YO95HD s8.lds,`?BOHBL(OT1E(s87rYs8J,[s6u*M!;_cY!0@3PO8o:ZO8o:RO8o:ZO8o:VO8o:ZO8o:T O8o:OO8o:WO8o:UO8o:VOT,=XO8o:ZO8o:[O8o:YO8o:[O8o:NO8o:ZO8o:XO8o:>O8o:ZO8o:V O8o:ZO8o:=OQlfFTE"uiecGfDrmh/N!!*#u!!)]lrrDoqq>g-e!!%TMJcD;@rmh,MT`3Mm~> rlkWHc2X!qmeoTNmJo]QmK,iF7JuQR7JuQT7IEVAmJo]RmJo]RmJo]SmJo]SmJo]GmJo]RmK,iF 7JlK]7IKS4mdg-e!!%TMJcD;@rlkKDPQ&gW~> rjr@6]Dm,;OSo1UO8o:XO9,ED!;qoY!;qo[!0@3XO8o:YO8o:YO8o:ZO8o:ZO8o:NO8o:YO9,ED !;hid!0E9BOH9I(O8tB(!;qoY!O8o:ZO8o:V O8o:ZO8o:=OQlfFGQ7aA])hqVrjr73!!*#u!!)]lrrDoqq>g-e!!%TMJcD;@rjr42GlG=*~> rmhDUf)MD)OT1E(s8%fWs8%fWs8.lXs87r[s,d9XO9#=YO9#=YO9#=ZO9#=ZO9#=NO9#=YO95HD s8.lcs,`?BOHBL(OT1E(qZ$PV!<;ML!!)hV!!)SO!!)tZ!!)\R!!)tZ!!)tZquH\W!!)bT!!)PN !!)kWr;cbW!!)hV!!)kW!!)tZ!!*"[!!)eU!!)PN!!)tZ!!)qY!!(r=!!)tZ!!)tZquH\W!!)5E q>^MWl2UdV!!)rJ!WShlecGdNrVlitpAY*mjSo2[JcC<$SH&T>!WShlepm~> rlkcLc2X!qmR?R37JcEP7JcEP7JlKQ7JuQT7IEVAmJo]RmJo]RmJo]SmJo]SmJo]GmJo]RmK,iF 7JlK\7IKS4md^N`l2UdI!!)rA!WSA_blRhErVlitpAY*mjSo2[JcC<$SH&T5!WSA_c%#~> rjrL:]Dm,;O8tB(!;_cW!;_cW!;hiX!;qo[!0@3XO8o:YO8o:YO8o:ZO8o:ZO8o:NO8o:YO9,ED !;hic!0E9BOH9I(O8tB(q>gMV!!)JL!!)hV!!)SO!!)tZ!!)\R!!)tZ!!)tZquH\W!!)bT!!)PN !!)kWr;cbW!!)hV!!)kW!!)tZ!!*"[!!)eU!!)PN!!)tZ!!)qY!!(r=!!)tZ!!)tZquH\W!!)5E q>^MWl2Ud.!!)r/!WRED])hp3rVlitpAY*mjSo2[JcC<$SH&T#!WRED]79~> rmh;Rf)MD)OT5':!<;qX!rnA^r/^pYr/_![OT53>!<;tY!<;tY!<<"Z!<<"Z!<;SN!<;tY!rnA^ qiCgXrf@9_OT1FBrf@-[plGLUmuRMLqN(aYO9"m6!!)tZ!!)\R!!)tZ!!*"[!!)tZ!!)tZ!!)bT !!)PN!!)_S!!)tZ!!)hV!!)kW!!)tZ!!*"[!!)eU!!)PN!!)tZ!!)qY!!(r=!!)tZ!!*"[!!)tZ !!)tZ!!(r=l2UdV!!)rJ!WShlecGdNrVlitp&G$lqZ$Blp&>!lJcC<$S,`K=!WShlepm~> rlkZIc2X!qmR?m!lJcC<$S,`K4!WSA_c%#~> rjrC7]Dm,;O9#$:!!)nX!W\;^r/^mYr/^s[O9#0>!!)qY!!)qY!!)tZ!!)tZ!!)PN!!)qY!W\;^ qiCdXrf@6_O8t@Brf@*[plGIUmuRMLqN(aYO9"m6!!)tZ!!)\R!!)tZ!!*"[!!)tZ!!)tZ!!)bT !!)PN!!)_S!!)tZ!!)hV!!)kW!!)tZ!!*"[!!)eU!!)PN!!)tZ!!)qY!!(r=!!)tZ!!*"[!!)tZ !!)tZ!!(r=l2Ud.!!)r/!WRED])hp3rVlitp&G$lqZ$Blp&>!lJcC<$S,`K"!WRED]79~> rmh;Rf)MD)OT5':!<;tY!<<%[!<<"Zs8W([!<<%[!<<"Z!<;qX!<;tY!<<"Z!<<"Z!<<%[!<;_R s8W([!<<%[!<<"Z!<<"Zs8N.^s8W%Z!<<"Z!<<%[!<;ML!!)nX!!*"[!!)YQ!!)tZ!!)qYrrDtY !!)tZ!!*"[!!)tZ!!)tZ!!*"[!!)nX!!)PN!!)nX!!)tZ!!)tZ!!*"[!!)tZ!!)kW!!*"[rrE"Z !!)tZ!!*"[!!*"[!!)\R!!)tZ!!)qY!!)MMrrDYP!!)tZ!!*"[!!)tZ!!)tZ!!*"[!!)VPrrD\Q l2UdV!!)rJ!WShlecGdNrVlitoDegjlMghaJcC<$RfEB rlkZIc2X!qmR?m rjrC7]Dm,;O9#$:!!)qY!!*"[!!)tZrrE%[!!*"[!!)tZ!!)nX!!)qY!!)tZ!!)tZ!!*"[!!)\R rrE%[!!*"[!!)tZ!!)tZrr<+^!!)tZ!!)tZ!!*"[!!)JL!!)nX!!*"[!!)YQ!!)tZ!!)qYrrDtY !!)tZ!!*"[!!)tZ!!)tZ!!*"[!!)nX!!)PN!!)nX!!)tZ!!)tZ!!*"[!!)tZ!!)kW!!*"[rrE"Z !!)tZ!!*"[!!*"[!!)\R!!)tZ!!)qY!!)MMrrDYP!!)tZ!!*"[!!)tZ!!)tZ!!*"[!!)VPrrD\Q l2Ud.!!)r/!WRED])hp3rVlitoDegjlMghaJcC<$RfEB!!WRED]79~> rmh5Pf)MD)rVubVr;ZeYrr2t[rr;tZ!WS8\s87uWs87uVs8S2[s8S2[s8A&Ys7DBRs,d6[r/gmW r/^pYrf@-[r/gmWrfI'XnW3_NrK.!Y!0I0[p5o7Rs,d3[rK.$ZrK.!Ys,d3[rfI'Y!K[<[OT#7W O8o:MO8o:XOSf+UOT#7YOSf+VOT#4]O8o:ZOSo1WOT#7QOT#7[OT#7YO8o:MOT,=QOT#7[OT#7Z OSo.[O9#6@rW)MNrrD\Ql2UdV!!)rJ!WShlecGdNrVlitnc/Uhm/I%crr2ruJcC<$T)\f@!WShl epm~> rlkTGc2X!qrC-VOr'gYRr^?hTr^HhS!CcGU7JuTP7JuTO7K;fT7K;fT7K)ZR7J-!K7IL.Dr9jMP r9aPRrpBbTr9jMPrpK\Qna6AWrU0Xb!:Kgdp?qn[s6fjdrU0[crU0Xbs6fjdrpK^b!U]sdmf!4` mJm7VmJm7amed(^mf!4bmed(_mf!1fmJm7cmem.`mf!4Zmf!4dmf!4bmJm7Vmf*:Zmf!4dmf!4c mem+dmK!4RrW)NWrrD]Zl2UdI!!)rA!WSA_blRhErVlitnc/Uhm/I%crr2ruJcC<$T)\f7!WSA_ c%#~> rjr=5]Dm,;r;c_VquHbYrVuq[rW)qZ! rmh2Of)ME)O9#=)O9#=&O8o9mO8o:MO8o9cO8o9]OQlfFTE"uiecGfDrmq&Ks8W&ug].0OJcC<$ T)\f@!WShlepm~> rlkQFc2X!hmJo]"mJo\tmJm7!mJm7VmJm6lmJm6fmcjcOP5kU\blRj.rltEBs8W&ug].0OJcC<$ T)\f7!WSA_c%#~> rjr:4]Dm-;O8o:)O8o:&O8o9mO8o:MO8o9cO8o9]OQlfFGQ7aA])hqVrk&.0s8W&ug].0OJcC<$ T)\f%!WRED]79~> rmh2Of)ME)O9#=)O9#=%O8o9oO8o:KO8o9eO8o9\OQlfFTE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!hmJo]"mJo\smJm7#mJm7TmJm6nmJm6emcjcOP5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-;O8o:)O8o:%O8o9oO8o:KO8o9eO8o9\OQlfFGQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)ME*OT#4*OT#31OF`M9OQlfFTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!imeoT#meoS*mXafKmcjcOP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm- rmh2Of)MCqOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2Wu[mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,.OFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOT,=/O8o:QOT,=WOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!Nmf*:8mJm7Zmf*:`mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OT,=/O8o:QOT,=WOFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDtO8o:NO8o:/O8o:XO8o:WO8o:WOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!^mJm7WmJm78mJm7amJm7`mJm7`mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-1O8o:NO8o:/O8o:XO8o:WO8o:WOFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)MDtO8o:NO8o:(O8o:WO8o:WOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!^mJm7WmJm71mJm7`mJm7`mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-1O8o:NO8o:(O8o:WO8o:WOFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME3OT,:aO8o9B!!*"[r;chYqZ-MTrW!+`!0E9BOT#7QOT,:\OT#7QOT#7ZOT#7[OT#7Y OS]%VO9#?CrW)qZJcC<$g].;H!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X!rmf*7jmJm7T!!*#dr;cibqZ-N]rW!,i!:K7Tmf!4Zmf*7emf!4Zmf!4cmf!4dmf!4b me["_mK!=UrW)rcJcC<$g].;;!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-EOT,:aO8o9B!!*"[r;chYqZ-MTrW!+`!0E9BOT#7QOT,:\OT#7QOT#7ZOT#7[OT#7Y OS]%VO9#?CrW)qZJcC<$g].:u!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)ME2OT,:cO8o9B!0E9BrK%!ZrK%!Zq2bRVrfI-[s,d6\rf@*[ooT1Rrf@*[ooK.Rr/^mY r/^mYqiCdXqN1^Wrf@0]OH>%nJcFL)s.B>lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2X!qmf*7lmJm7T!:K7TrU'XcrU'XcqZ+JcFL)s-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm-DOT,:cO8o9B!0E9BrK%!ZrK%!Zq2bRVrfI-[s,d6\rf@*[ooT1Rrf@*[ooK.Rr/^mY r/^mYqiCdXqN1^Wrf@0]OH>%nJcFL)s*+MDr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)ME2O9P]HO8tB(!;_cW!<%uZ!;_cW!;qo\!0E9BrK%!ZooK.RrK%!ZooK@XOH9I(O9#3? !!)nX!!)kW!!)tZ!W\=DJcC<$g].;H!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X!qmKN[ZmJu\C!;c*`!<) rjr:4]Dm-DO9P]HO8tB(!;_cW!<%uZ!;_cW!;qo\!0E9BrK%!ZooK.RrK%!ZooK@XOH9I(O9#3? !!)nX!!)kW!!)tZ!W\=DJcC<$g].:u!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)ME2O9P]HO8tB(! rlkQFc2X!qmKN[ZmJu\C!<2Ea!<) rjr:4]Dm-DO9P]HO8tB(! rmh2Of)ME2O9koKO8tB(!0E9BrK%!ZrK%!ZqN([Wq2bRVrK%!ZooK.RrK%!ZoT01UO8t@BqiCdX qiCdXqN([WrK%'\OH>%nJcFL)s.B>lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2X!qmKim]mJu\C!:K7TrU'XcrU'XcqX+=`qZ+JcFL)s-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm-DO9koKO8tB(!0E9BrK%!ZrK%!ZqN([Wq2bRVrK%!ZooK.RrK%!ZoT01UO8t@BqiCdX qiCdXqN([WrK%'\OH>%nJcFL)s*+MDr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)ME2O9koKO8tB(!0E9BrK%!ZrK%!Zrf@*[rK%!ZrK%*]OH9J@O8o:RO8o:ZO8o:QOT,:^ O8o:XO8o:XO8o:[O8o:[O8o:ZO9,EDOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!qmKim]mJu\C!:K7TrU'XcrU'XcrpBadrU'XcrU'afmd:)RmJm7[mJm7cmJm7Zmf*7g mJm7amJm7amJm7dmJm7dmJm7cmK*CVmXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-DO9koKO8tB(!0E9BrK%!ZrK%!Zrf@*[rK%!ZrK%*]OH9J@O8o:RO8o:ZO8o:QOT,:^ O8o:XO8o:XO8o:[O8o:[O8o:ZO9,EDOFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME3OT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlV!<8/[!<8/[!;DTS!<8/[!;2ER! rlkQFc2X!rmf!1kmJm7T!!)HTr;Zld!<)?b!;l3_!<;Kd!<;Kd!;Gp\!<;Kd!;5a[!<2Bd!<)?` !;u9a!<2Ec!<;Kd!!)G rjr:4]Dm-EOT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlV!<8/[!<8/[!;DTS!<8/[!;2ER! rmh2Of)MCqOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2Wu[mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,.OFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCqOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2Wu[mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,.OFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCqOFdF_s4dSRTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2Wu[mXbChs4dSRP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,.OFdF_s4dSRGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MD@OFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!*mXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,ROFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDFO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!0mJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,XO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o:NOSo13OT,=YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7Wmem. rjr:4]Dm-BO8o:NOSo13OT,=YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o:OO8o:ZO8o:IOT,=NO9,ED!;qq0s+13Zs8S_l!;tCKs.KABJcC<$JcG'9rmh,M T`3Mm~> rlkQFc2X!omJm7XmJm7cmJm7Rmf*:WmK*CV!;u89s+13Zs8S8_!;t(Bs-*H,JcC<$JcG'9rlkKD PQ&gW~> rjr:4]Dm-BO8o:OO8o:ZO8o:IOT,=NO9,ED!;qq0s+13Zs8R rmh2Of)ME0O8o:PO8o:XO8o:HOT,=NO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!omJm7YmJm7amJm7Qmf*:WmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-BO8o:PO8o:XO8o:HOT,=NO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)ME0O8o:PO8o:XO8o:SOSStSOT,=PO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8 !WShlepm~> rlkQFc2X!omJm7YmJm7amJm7\meQq\mf*:YmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/ !WSA_c%#~> rjr:4]Dm-BO8o:PO8o:XO8o:SOSStSOT,=PO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr !WRED]79~> rmh2Of)ME0O8o:PO8o:XO8o:DO8o:QO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!omJm7YmJm7amJm7MmJm7ZmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-BO8o:PO8o:XO8o:DO8o:QO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)ME0O8o:PO8o:XO8o:SOSStSOT,=PO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8 !WShlepm~> rlkQFc2X!omJm7YmJm7amJm7\meQq\mf*:YmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/ !WSA_c%#~> rjr:4]Dm-BO8o:PO8o:XO8o:SOSStSOT,=PO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr !WRED]79~> rmh2Of)ME0O8o:PO8o:XO8o:HOT,=NO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!omJm7YmJm7amJm7Qmf*:WmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-BO8o:PO8o:XO8o:HOT,=NO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)ME0O8o:OO8o:ZO8o:IOT,=LO8o:YOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShl epm~> rlkQFc2X!omJm7XmJm7cmJm7Rmf*:UmJm7bmXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-BO8o:OO8o:ZO8o:IOT,=LO8o:YOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED ]79~> rmh2Of)ME0O8o:NOSo14OSf+XOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7Wmem.=med(amXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-BO8o:NOSo14OSf+XOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o9oOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7#mXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-BO8o9oOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o9oOFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7#mXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-BO8o9oOFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MD@OFdF_s0r%.TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!*mXbChs0r%.P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,ROFdF_s0r%.GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoOK"=ms0_n,TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYm]#W*s0_n,P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,OK"=ms0_n,GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoOK"=ms0_n,TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYm]#W*s0_n,P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,OK"=ms0_n,GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o:NOT#4\O8uYL!!&aTJcE.Xs.B>lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2X!omJm7Wmf!1emJsW^!!&b]JcE.Xs-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm-BO8o:NOT#4\O8uYL!!&aTJcE.Xs*+MDr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)ME0O8o:OO8o:[OT,=7OT,=:O8o9TOFdG>s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X!omJm7XmJm7dmf*:@mf*:CmJm6]mXbDGs8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-BO8o:OO8o:[OT,=7OT,=:O8o9TOFdG>s8R rmh2Of)ME0O8o:OO8o:ZO8o:SOT,:aO8o9B!!)>HrrD\QrVuq[rW)VQquHDOrr<%\rW)kXr;chY rW)qZrW)qZrW)nYrrE(\rrDbSrVuq[rW)YRrr<7b!!&)\!0Dd3[K$9"!!)rJ!WShleq*jPs+149 s8LRMs.KABJ,~> rlkQFc2X!omJm7XmJm7cmJm7\mf*7jmJm7T!!)?QrrD]ZrVurdrW)WZquHEXrr<&erW)lar;cib rW)rcrW)rcrW)obrrE)errDc\rVurdrW)Z[rr<8k!!)Ee!:GF<[K$8j!!)rA!WSA_c%5nGs+149 s8L7Ds-*H,J,~> rjr:4]Dm-BO8o:OO8o:ZO8o:SOT,:aO8o9B!!)>HrrD\QrVuq[rW)VQquHDOrr<%\rW)kXr;chY rW)qZrW)qZrW)nYrrE(\rrDbSrVuq[rW)YRrr<7b!!&)\!0Dd3[K$8O!!)r/!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh2Of)ME0O8o:OO8o:MOT,:`O8o9B!;;NN!;hlX!;2ER!s8S_l!;tCKs.KAB JcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X!omJm7XmJm7Vmf*7imJm7T!;>jW!;l3a!;5a[!<2Bd!:oOX!:oRX!<2Bd!<2Bd!<) rjr:4]Dm-BO8o:OO8o:MOT,:`O8o9B!;;NN!;hlX!;2ER!s8R rmh2Of)ME0O8o:NOSo1OO9P]HO8tB(!9]FD!;2ET!0@3OO9,ED!:u9P!;qo[!0@3XO:2,NO8tB( !0E9BOH9J?O8o:[OT,=NO9,ED!:u9W!0E9BOH9I(JcE.Xs.B>lr71oKT`3Mns+13$s6K^aecGfD rmlT~> rlkQFc2X!omJm7Wmem.XmKN[ZmJu\C!9`bM!;5a]!:BjXmK*CV!;#UY!;u6d!:BjamL0*`mJu\C !:K7Tmd:)QmJm7dmf*:WmK*CV!;#U`!:K7Tmd:)CJcE.Xs-!E_r659BPQ&gXs+13$s6K^ablRj. rlor~> rjr:4]Dm-BO8o:NOSo1OO9P]HO8tB(!9]FD!;2ET!0@3OO9,ED!:u9P!;qo[!0@3XO:2,NO8tB( !0E9BOH9J?O8o:[OT,=NO9,ED!:u9W!0E9BOH9I(JcE.Xs*+MDr4<"0GlG=+s+13$s6K^a])hqV rk![~> rmh2Of)ME0O8o:JO8o:RO9P]HO8tB(!;;NN!;hlX!:u9P!:l3O! rlkQFc2X!omJm7SmJm7[mKN[ZmJu\C!;>jW!;l3a!;#UY!:oOX!<2Bd!;,[Z!;u6d!:BjamL'$_ mJu\C!:K7TmdBoOrpBadn*U/Uo'Q__md:)CmJu[+s0_n,P5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr:4]Dm-BO8o:JO8o:RO9P]HO8tB(!;;NN!;hlX!:u9P!:l3O! rmh2Of)ME0O8o:OO8o:ZO8o:RO9P]HO8tB(!:,aH!:l3Q!0@3>O8o:YO9,ED!;hiX! rlkQFc2X!omJm7XmJm7cmJm7[mKN[ZmJu\C!:0(Q!:oOZ!:BjGmJm7bmK*CV!;l0a!<2Bh!:BjT !<2Bd!;Ps^!:]CX!:BjYmKWa[mJu\C!:GF<[K$8j!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-BO8o:OO8o:ZO8o:RO9P]HO8tB(!:,aH!:l3Q!0@3>O8o:YO9,ED!;hiX! rmh2Of)ME0O8o:OOT,=[O8o:RO9P]HO8tB(!:>mJ!:c-N!lr71oKT`3Mns+13$s6K^a ecGfDrmlT~> rlkQFc2X!omJm7Xmf*:dmJm7[mKN[ZmJu\C!:B4S!:fIW!<2Bd!93GH!<2Bd!<2Bd!<) rjr:4]Dm-BO8o:OOT,=[O8o:RO9P]HO8tB(!:>mJ!:c-N! rmh2Of)ME0O8o:OO9#?CrW)VQrW!+`!!&)\!8Wb9!!&,[!99.A!0I0[r/gjWr/^mYrf@*[r/gjW rfI$XooT.Q!0I0[p5o7R"cr]aO8o93s0_n,TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7XmK!=UrW)WZrW!,i!!)Ee!8[)B!!)Hd!9 rjr:4]Dm-BO8o:OO9#?CrW)VQrW!+`!!&)\!8Wb9!!&,[!99.A!0I0[r/gjWr/^mYrf@*[r/gjW rfI$XooT.Q!0I0[p5o7R"cr]aO8o93s0_n,GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o9AO8o9dOFdG>s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X!omJm6JmJm6mmXbDGs8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-BO8o9AO8o9dOFdG>s8R rmh2Of)ME0O8o9AO8o9dOFdG>s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X!omJm6JmJm6mmXbDGs8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-BO8o9AO8o9dOFdG>s8R rmh2Of)MCoOSY(;!47i=s0_n,TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYmeZAM!4;0Fs0_n,P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,OSY(;!47i=s0_n,GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDqOT,=SOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X![mf*:\mXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-.OT,=SOFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDpO8o:SOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!ZmJm7\mXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm--O8o:SOFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDpO8o:SOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!ZmJm7\mXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm--O8o:SOFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0OT#7ZOT,:\OT#7XOT#4\O9#!9JcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRMs.KAB J,~> rlkQFc2X!omf!4cmf*7emf!4amf!1emJutKJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7Ds-*H, J,~> rjr:4]Dm-BOT#7ZOT,:\OT#7XOT#4\O9#!9JcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2s*4OT J,~> rmh2Of)ME1O8o:[O8o:[OT,=[O8o:[O8o:[OT,=SOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8 !WShlepm~> rlkQFc2X!pmJm7dmJm7dmf*:dmJm7dmJm7dmf*:\mXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/ !WSA_c%#~> rjr:4]Dm-CO8o:[O8o:[OT,=[O8o:[O8o:[OT,=SOFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr !WRED]79~> rmh2Of)ME2O8o:YO95KEO9#6@!s"FE!;qoY!;;M*s+136s8S_l!;tCKs.KABJcC<$JcG'9rmh,M T`3Mm~> rlkQFc2X!qmJm7bmK3IWmK!4R!s%cW!;u6b!;>i3s+136s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKD PQ&gW~> rjr:4]Dm-DO8o:YO95KEO9#6@!s"FE!;qoY!;;M*s+136s8R rmh2Of)ME2OSStWO8o:ZO95KEO9#3?!!)_SJcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRMs.KAB J,~> rlkQFc2X!qmeQq`mJm7cmK3IWmK!1Q!!)`\JcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7Ds-*H, J,~> rjr:4]Dm-DOSStWO8o:ZO95KEO9#3?!!)_SJcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2s*4OT J,~> rmh2Of)ME2O8o:VO8o:ZO95KEO9#3?!!)_SJcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRMs.KAB J,~> rlkQFc2X!qmJm7_mJm7cmK3IWmK!1Q!!)`\JcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7Ds-*H, J,~> rjr:4]Dm-DO8o:VO8o:ZO95KEO9#3?!!)_SJcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2s*4OT J,~> rmh2Of)ME1O8o:ZO95KEO9#6@!!*"[!!*"[rrDtYrrE"ZJcC<$PQ1ZU!!)rJ!WShleq*jPs+149 s8LRMs.KABJ,~> rlkQFc2X!pmJm7cmK3IWmK!4R!!*#d!!*#drrDubrrE#cJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149 s8L7Ds-*H,J,~> rjr:4]Dm-CO8o:ZO95KEO9#6@!!*"[!!*"[rrDtYrrE"ZJcC<$PQ1Z-!!)r/!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh2Of)ME0OSo1ZOT#7[OT#7ZOT#4]O8o:ZOT,=ZOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8 !WShlepm~> rlkQFc2X!omem.cmf!4dmf!4cmf!1fmJm7cmf*:cmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/ !WSA_c%#~> rjr:4]Dm-BOSo1ZOT#7[OT#7ZOT#4]O8o:ZOT,=ZOFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr !WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoOF`MPOFdH1s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYmXafbmXbE:s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,OF`MPOFdH1s8R rmh2Of)MDiO8o:=O95KEO9!Ug!s"FE!7d/2!3;1]!:l5&s8DutTE"uiecGfDrmlZ#JcC<$li6t8 !WShlepm~> rlkQFc2X!SmJm7FmK3IWmJtT$!s%cW!7gK;!3>Mf!:oQ/s8DutP5kU\blRj.rlp#oJcC<$li6t/ !WSA_c%#~> rjr:4]Dm-&O8o:=O95KEO9!Ug!s"FE!7d/2!3;1]!:l5&s8DutGQ7aA])hqVrk!a]JcC<$li6sr !WRED]79~> rmh2Of)MEB!<8/[!;2ER!:Z'M!;DQT!8rq@!0E9Bk`>cEl&YuIOH9InO8o9[O8o:POFdH1s8S_l !;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X",!<;Kd!;5a[!:]CV!;Gm]!9!8I!:K7TkjAENl0\WRmd:)+mJm6dmJm7YmXbE:s8S8_ !;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-T!<8/[!;2ER!:Z'M!;DQT!8rq@!0E9Bk`>cEl&YuIOH9InO8o9[O8o:POFdH1s8R rmh;Rf)MD)!!*"[!!)YQ!!)MM!!(Q2!s"FE!9oRH!0@3GO95KEO9!jn!!'![!!)VPJcG]Ks.B>l r71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkZIc2X!q!!*#d!!)ZZ!!)NV!!(R;!s%cW!9rnQ!:BjPmK3IWmJti+!!'"d!!)WYJcG]Ks-!E_ r659BPQ&gXs+13$s6K^ablRj.rlor~> rjrC7]Dm,;!!*"[!!)YQ!!)MM!!(Q2!s"FE!9oRH!0@3GO95KEO9!jn!!'![!!)VPJcG]Ks*+MD r4<"0GlG=+s+13$s6K^a])hqVrk![~> rmhGVf)MD)!0@2BO9#6@r;chYqZ-PUr;chYqZ-PUrW)hWr;chYrr<%\rW)PO!s"FE!lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkfMc2X!q!:BjTmK!4Rr;cibqZ-Q^r;cibqZ-Q^rW)i`r;cibrr<&erW)QX!s%cW!<2Ec!!)Hd !;#U[!:Bj[mf*7emf!4amK3IWmJuhGrrD9N!!)rbrr<&erW)lar;cibrW)rcrW)rcrW)obrrE)e rrDc\rVurdrW)Z[rr<&erW)f_!!)ZZJcG]Ks-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjrO;]Dm,;!0@2BO9#6@r;chYqZ-PUr;chYqZ-PUrW)hWr;chYrr<%\rW)PO!s"FE! rmhGVf)MD)!0@2BO9#9A!!)tZ!!)tZ!!)kW!!)tZ!!)tZ!!)bT!!)qY!!)tZ!!*"[rrE%[!!)AI !!*"[!!)VP!!*"[!!)\RrrE%[!!);GrrDtYq>g/L!!)nXrrE%[!!*"[!!)tZ!!*"[!!)qY!!*"[ !!*"[!!)tZ"TXVaOH9J9O8o:[O8o:QOT,=[O8o:XO8o:QOFdH1s8S_l!;tCKs.KABJcC<$JcG'9 rmh,MT`3Mm~> rlkfMc2X!q!:BjTmK!7S!!)uc!!)uc!!)l`!!)uc!!)uc!!)c]!!)rb!!)uc!!*#drrE&d!!)BR !!*#d!!)WY!!*#d!!)][rrE&d!!)g0U!!)oarrE&d!!*#d!!)uc!!*#d!!)rb!!*#d !!*#d!!)uc"T[rjmd:)KmJm7dmJm7Zmf*:dmJm7amJm7ZmXbE:s8S8_!;t(Bs-*H,JcC<$JcG'9 rlkKDPQ&gW~> rjrO;]Dm,;!0@2BO9#9A!!)tZ!!)tZ!!)kW!!)tZ!!)tZ!!)bT!!)qY!!)tZ!!*"[rrE%[!!)AI !!*"[!!)VP!!*"[!!)\RrrE%[!!);GrrDtYq>g/L!!)nXrrE%[!!*"[!!)tZ!!*"[!!)qY!!*"[ !!*"[!!)tZ"TXVaOH9J9O8o:[O8o:QOT,=[O8o:XO8o:QOFdH1s8R rmhPYf)MD)!0E9BO8tB(!;hiX!lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkoPc2X!q!:K7TmJu\C!;l0a!<2Bd!;5a[!<) rjrX>]Dm,;!0E9BO8tB(!;hiX! rmhPYf)MD)!0E9BO8tB(!;hiX!$NQ9M!0E9BOH9I(OSStVO8o:LO8o:OO8o:ZO8o:XO8o:Q OFdH1s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkoPc2X!q!:K7TmJu\C!;l0a!<2Bd!;Z'\!<) rjrX>]Dm,;!0E9BO8tB(!;hiX!$NQ9M!0E9BOH9I(OSStVO8o:LO8o:OO8o:ZO8o:XO8o:Q OFdH1s8R rmh8Qf)MD)!lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkWHc2X!q!<2Ed!<;He!;l0a!<2Bd!;c*`!<)XmJukH!!)uc!!)oa !!)ZZJcG]Ks-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr@6]Dm,;! rmh8Qf)MD)! rlkWHc2X!q!<2Ed!<2Bd!<):s8DutP5kU\blRj.rlp#oJcC<$li6t/!WSA_ c%#~> rjr@6]Dm,;! rmh2Of)MEB! rlkQFc2X",!<2Bd!<)?a!;l3`!;u9`!!2KfrU0Xbr9jI_rU0UarpKacs6fjdmdC&S!:KgdjR3!I s6fjdlgFcQkO& rjr:4]Dm-T! rmh2Of)MCoONERa!;qoY!3_Ia!:u;'s8DutTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYm`Fks!;u6b!3bej!;#W0s8DutP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,ONERa!;qoY!3_Ia!:u;'s8DutGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoON rlkQFc2WuYm`=er!<) rjr:4]Dm,,ON rmh2Of)MCoOMm7Z!2>R+s8DutTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYm_nPl!2An4s8DutP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,OMm7Z!2>R+s8DutGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoOR\E`s+14;s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYmd]^rs+14;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,OR\E`s+14;s8R rmh2Of)ME rlkQFc2X"&mf,_,melL(s+14;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-NOT,<3OSk2ks+14;s8R rmh5Pf)MD)qZ$SW!<9ruqZ#92!<;;FJcC<$mJm3Z!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkTGc2X!qqF1GP!(FfnqF0-+!(H/?JcC<$mJm3M!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr=5]Dm,;q>gPW!!'ouq>f62!!)8FJcC<$mJm32!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmhSZf)MD)s,`?BOHBL(OT31Z!<<"Z!<:c7!<;;FJcC<$mJm3Z!!)rJ!WShleq*jPs+149s8LRM s.KABJ,~> rlkrQc2X!q7IKS4md"\!(HkS!(GW0!(H/?JcC<$mJm3M!!)rA!WSA_c%5nGs+149s8L7D s-*H,J,~> rjr[?]Dm,;!0E9BOH9I(O9!.Z!!)tZ!!(`7!!)8FJcC<$mJm32!!)r/!WRED]7L!5s+149s8KV2 s*4OTJ,~> rmhV[f)MD)s,`?BOHBL(OT1FAs8.oWs87uWs8A&Zs8S2\s87uXs8J,[rrnMas,d9\ooK1RrK%$Z rK.$YrK.'Zs,d9\s,d9\"d&caOT5@[OT#4YOSf(KOT,=ZOFdF_s6]jdTE"uiecGfDrmlZ#JcC<$ li6t8!WShlepm~> rlkuRc2X!q7IKS4md rjr^@]Dm,;!0E9BOH9I(O8tCA!;hlW!;qrW!<&#Z!<8/\!;qrX! rmh2Of)MEAO9#=YOT5@[O9#=ZO9#=[O9#=[O9#=ZO9#=ZO9PZGs,`?Brf@-[rf@-[rfI0["H`Z` OT5!8"94IEs87rYs8J)[s8A#ks,d8BOT1E(s8S2\s,d8BOT59@!<<"Z!<;MLrrE"ZJcC<$mJm3Z !!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X"+mJo]Rmf,`TmJo]SmJo]TmJo]TmJo]SmJo]SmKH&I7IKS4rpBbTrpBbTrpKeT"R\]I mR?g:"%DYG7JuQR7K2]T7K)Wd7IEV4mR?R377HW57IEV4mR@*B!(HkS!(HAErrE#cJcC<$mJm3M !!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-SO8o:YOT,=[O8o:ZO8o:[O8o:[O8o:ZO8o:ZO9GWG!0E9Brf@*[rf@*[rfI-["HWT` O9"s8!s"FE!;qoY! rmh2Of)MEAO9#=YO9#=ZO9#=[O9#=YO95HDs8.lXs8J,[s8.lXs87rbs,`?BOHBL(OT5!8rVuhX !<;tY!<<%[s8VtX#60dHs,`?BqN(^WrK%$Zl&^JrJcG-;s.B>lr71oKT`3Mns+13$s6K^aecGfD rmlT~> rlkQFc2X"+mJo]RmJo]SmJo]TmJo]RmK,iF7JlKQ7K2`T7JlKQ7JuQ[7IKS4mdPrU'YSl0a-&JcG-;s-!E_r659BPQ&gXs+13$s6K^ablRj. rlor~> rjr:4]Dm-SO8o:YO8o:ZO8o:[O8o:YO9,ED!;hiX! rmh2Of)MEAO9#=YO9#=ZO9#=[OS\tXOT53>!<<%[!<;nWqZ$SW#60dHs,`?BooK:UOHBM@OS]"V O9#=WO9Y`HOT1E(s8J,Xs8A#Zs65Vrs+14;s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+mJo]RmJo]SmJo]TmeT?QmR@$@!(HnT!(HbPqF1GP#"@tJ7IKS4p$MoNmd rjr:4]Dm-SO8o:YO8o:ZO8o:[OSSqXO9#0>!!*"[!!)kWq>gPW"osaH!0E9BooK7UOH9J@OSStV O8o:WO9P]HO8tB(! rmh2Of)MEAO9#=YO9#=ZO9#=[O9#=WO9#=XO9#=[O9#=WO9#=VO9Y`HOT1E(s7MHRs8%fWs7hZU s8%f`s,`?BOHBL(OT59@!<<"Z!<;;FJcC<$mJm3Z!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X"+mJo]RmJo]SmJo]TmJo]PmJo]QmJo]TmJo]PmJo]OmKQ,JmR?R37J6'K7JcEP7JQ9N 7JcEY7IKS4md rjr:4]Dm-SO8o:YO8o:ZO8o:[O8o:WO8o:XO8o:[O8o:WO8o:VO9P]HO8tB(!;2ER!;_cW!;MWU !;_c`!0E9BOH9I(O9#6@!!)tZ!!)8FJcC<$mJm32!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)MEAO9#=YO9#=ZO9#=ZO9#=ZO9>NEOT59@!<<"Z!<;kV!<<"Z$3-*Ks,`?BOHBM8O9#=V O9#=ZO9#=[O9#=WO9trKOT1E(s,`?BrK%$ZrK%$Zrf@-[o8rtPrK)U1JcG-;s.B>lr71oKT`3Mn s+13$s6K^aecGfDrmlT~> rlkQFc2X"+mJo]RmJo]SmJo]SmJo]SmK5oGmR@*B!(HkS!(H_O!(HkS#t=:M7IKS4mdMmR?R37IKS4rU'YSrU'YSrpBbToBuVYrU,7:JcG-;s-!E_r659BPQ&gX s+13$s6K^ablRj.rlor~> rjr:4]Dm-SO8o:YO8o:ZO8o:ZO8o:ZO95KEO9#6@!!)tZ!!)hV!!)tZ#lp'K!0E9BOH9J8O8o:V O8o:ZO8o:[O8o:WO9koKO8tB(!0E9BrK%!ZrK%!Zrf@*[o8rtPrK)U1JcG-;s*+MDr4<"0GlG=+ s+13$s6K^a])hqVrk![~> rmh8Qf)MD)OSo.YOT,:[OT,:YOT#4XOT#4XOSo.UOT#4ZOT,7`OT5?Bs8VhTrVu_UrVukYr;ZbX rr34bs8S2\s,`?@rrJ5]rK.$YnrWkOrK)U1JcG-;s.B>lr71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkWHc2X!qmefNRmf#ZTmf#ZRmeoTQmeoTQmefNNmeoTSmf#WYmR9S477Ht=rC-SNrC-_Rr'gVQ r^@([77HW57IKUB702gFrU0YRo'ZMXrU,7:JcG-;s-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr@6]Dm,;OSf+YOT#7[OT#7YOSo1XOSo1XOSf+UOSo1ZOT#4`O8o9B!!)bTr;c\Ur;chYquH_X rW!1b!!&)\!0E<@!!//]rK.!YnrWkOrK)U1JcG-;s*+MDr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)MCoOR\E`s+14;s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYmd]^rs+14;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,OR\E`s+14;s8R rmh2Of)MCoOR\E`s+14;s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYmd]^rs+14;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,OR\E`s+14;s8R rmh2Of)MCoOR\E`s+14;s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYmd]^rs+14;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,OR\E`s+14;s8R rmh2Of)MCoOF`LEOPZ'$s.H"br71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2WuYmXaeWmb[@6s-&WHr659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm,,OF`LEOPZ'$s*/bgr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)ME.O8o:WOSo1>OT#7WOT#62OF`M&OT#7WO9P]HOT1uH!;tCKs.KABJcC<$JcG'9rmh,M T`3Mm~> rlkQFc2X!mmJm7`mem.Gmf!4`mf!3;mXaf8mf!4`mKN[Zmf/K7!;t(Bs-*H,JcC<$JcG'9rlkKD PQ&gW~> rjr:4]Dm-@O8o:WOSo1>OT#7WOT#62OF`M&OT#7WO9P]HOT0QM!;sG0s*4OTJcC<$JcG'9rjr42 GlG=*~> rmh2Of)ME/O8o:WO8o::O8o:VO8o9JO8o:9O8o:;O8o:)O8o9TO8o:[O8o:XO9GWGs.H"br71oK T`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2X!nmJm7`mJm7CmJm7_mJm6SmJm7BmJm7DmJm72mJm6]mJm7dmJm7amKEUYs-&WHr659B PQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm-AO8o:WO8o::O8o:VO8o9JO8o:9O8o:;O8o:)O8o9TO8o:[O8o:XO9GWGs*/bgr4<"0 GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)ME/O8o:WO8o::O8o:VO8o9JO8o::O9,ED!:c-N!:c-P!0@3+O9,ED!90+?!8EV8!:5dI ! rlkQFc2X!nmJm7`mJm7CmJm7_mJm6SmJm7CmK*CV!:fIW!:fIY!:Bj4mK*CV!93GH!8HrA!:9+R !<2Bd!;l0f!:KlNP5tO*!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-AO8o:WO8o::O8o:VO8o9JO8o::O9,ED!:c-N!:c-P!0@3+O9,ED!90+?!8EV8!:5dI ! rmh2Of)ME0O8o:XOS]%VOSo1XOT,=\OT,=ZOSo1UO8o:VO8o:QOT#4[OT#7ROT#7[OT#7QOSf+P OT,:\OT#7WOT,=ZOT,:\OT#7XOSo1YOS]%EOT#4[OT#7XO9,ED! rlkQFc2X!omJm7ame["_mem.amf*:emf*:cmem.^mJm7_mJm7Zmf!1dmf!4[mf!4dmf!4Zmed(Y mf*7emf!4`mf*:cmf*7emf!4amem.bme["Nmf!1dmf!4amK*CV!<2Ed!!)Hd!;c*`!<)?b!<;Kd !<) rjr:4]Dm-BO8o:XOS]%VOSo1XOT,=\OT,=ZOSo1UO8o:VO8o:QOT#4[OT#7ROT#7[OT#7QOSf+P OT,:\OT#7WOT,=ZOT,:\OT#7XOSo1YOS]%EOT#4[OT#7XO9,ED! rmh2Of)ME0O8o:VO8o:XO8o:ZO8o:ZO9biJ!0E9BOH9J@O8o:XO8o:VO8o:PO8o:[O8o:QO8o:Z O8o:QO8o:[O8o:ROT,=[O8o:YOT,=YOT,=[O8o:[O8o:ZO8o:ZO8o:EO8o:[O8o:YO8o:[O8o:[ OT,=[O8o:XO8o:YO8o:ZO8o:ZO8o:[O8o:[OT,=[O8o:ROSStMO8o:[O8o:ZO8o:[O8o:[OT,=[ O8o:EOT,=ROT,=[O8o:ROT,=WOSStKO8o:VO9>R`TV);_ecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7_mJm7amJm7cmJm7cmK`g\!:K7Tmd:)RmJm7amJm7_mJm7YmJm7dmJm7ZmJm7c mJm7ZmJm7dmJm7[mf*:dmJm7bmf*:bmf*:dmJm7dmJm7cmJm7cmJm7NmJm7dmJm7bmJm7dmJm7d mf*:dmJm7amJm7bmJm7cmJm7cmJm7dmJm7dmf*:dmJm7[meQqVmJm7dmJm7cmJm7dmJm7dmf*:d mJm7Nmf*:[mf*:dmJm7[mf*:`meQqTmJm7_mK rjr:4]Dm-BO8o:VO8o:XO8o:ZO8o:ZO9biJ!0E9BOH9J@O8o:XO8o:VO8o:PO8o:[O8o:QO8o:Z O8o:QO8o:[O8o:ROT,=[O8o:YOT,=YOT,=[O8o:[O8o:ZO8o:ZO8o:EO8o:[O8o:YO8o:[O8o:[ OT,=[O8o:XO8o:YO8o:ZO8o:ZO8o:[O8o:[OT,=[O8o:ROSStMO8o:[O8o:ZO8o:[O8o:[OT,=[ O8o:EOT,=ROT,=[O8o:ROT,=WOSStKO8o:VO9>R`G^'5d])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o:VO8o:YO8o:XO8o:[OT,=SO8o:XO8o:VO8o:OO9,ED!:u9P!<%uZ!:c-N!;)?Q !<%uZ!;2ER!<%uZ!;V]V!<%uZ!9]FF!0@3XO8o:[O8o:[O8o:ZO8o:[OSStWO8o:ZO8o:ZO8o:[ O8o:[O8o:ZO8o:R` TV);_ecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7_mJm7bmJm7amJm7dmf*:\mJm7amJm7_mJm7XmK*CV!;#UY!<) rjr:4]Dm-BO8o:VO8o:YO8o:XO8o:[OT,=SO8o:XO8o:VO8o:OO9,ED!:u9P!<%uZ!:c-N!;)?Q !<%uZ!;2ER!<%uZ!;V]V!<%uZ!9]FF!0@3XO8o:[O8o:[O8o:ZO8o:[OSStWO8o:ZO8o:ZO8o:[ O8o:[O8o:ZO8o:R` G^'5d])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME0O8o:VO8o:YO8o:XO8o:[O8o:VOSf+UO8o:VO8o:NO8o:NO95KEO9"g4!!)VP!!)tZ !!)\R!!)tZ!!)tZquH\W!!)/C!!)SO!!)tZ!!)nX!!)nX!s"FE!;)?Q!<%uZ!;2HM!:u9P!:u9P !<%uZ!9fOE!;2ER!<%uZ!;2HR!;_fR!;2ER!;DQX!<8^b!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X!omJm7_mJm7bmJm7amJm7dmJm7_med(^mJm7_mJm7WmJm7WmK3IWmJueF!!)WY!!)uc !!)][!!)uc!!)ucquH]`!!)0L!!)TX!!)uc!!)oa!!)oa!s%cW!;,[Z!<) rjr:4]Dm-BO8o:VO8o:YO8o:XO8o:[O8o:VOSf+UO8o:VO8o:NO8o:NO95KEO9"g4!!)VP!!)tZ !!)\R!!)tZ!!)tZquH\W!!)/C!!)SO!!)tZ!!)nX!!)nX!s"FE!;)?Q!<%uZ!;2HM!:u9P!:u9P !<%uZ!9fOE!;2ER!<%uZ!;2HR!;_fR!;2ER!;DQX!<7:g!;sG0s*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)ME0O8o:VO8o:YO8o:XO8o:[O8o:WO8o:ZO8o:XO8o:VO8o:OO9,ED!:l3R!0E9BnrNhO nrNhOrK%!ZooK.RrK%!Zrf@*[rK%!ZrK%!ZkE#`FO9"m6!!)tZ!!)nX!!)nX!s"FE!;)?Q!<%uZ !9'">!:l3O!<%uZ!:#[G!:u9P!<%uZ!:u rlkQFc2X!omJm7_mJm7bmJm7amJm7dmJm7`mJm7cmJm7amJm7_mJm7XmK*CV!:oO[!:K7To'QJX o'QJXrU'Xcp$Me[rU'XcrpBadrU'XcrU'XckO&BOmJukH!!)uc!!)oa!!)oa!s%cW!;,[Z!<)G!:oOX!<)g`!<87H!;t(Bs-*H,JcC<$JcG'9rlkKD PQ&gW~> rjr:4]Dm-BO8o:VO8o:YO8o:XO8o:[O8o:WO8o:ZO8o:XO8o:VO8o:OO9,ED!:l3R!0E9BnrNhO nrNhOrK%!ZooK.RrK%!Zrf@*[rK%!ZrK%!ZkE#`FO9"m6!!)tZ!!)nX!!)nX!s"FE!;)?Q!<%uZ !9'">!:l3O!<%uZ!:#[G!:u9P!<%uZ!:u rmh2Of)ME0O8o:VO8o:XO8o:ZO8o:ZO8o:WO8o:ZO8o:XO8o:VO8o:PO8o:[O8o:OOT,=OO8o:[ O8o:RO8o:ZO8o:YOT,=YO8o:ZO8o:[O8o:ZO8o:ZO8o:[O8o:YOT,=PO8o:[O8o:QO8o:ZO8o:X O8o:WOT,=PO8o:ZO8o:?O8o:[O8o:RO8o:ZO8o:IOT,=NO8o:ZO8o:NOT,=IO8o:[O8o:WO9>R` TV);_ecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!omJm7_mJm7amJm7cmJm7cmJm7`mJm7cmJm7amJm7_mJm7YmJm7dmJm7Xmf*:XmJm7d mJm7[mJm7cmJm7bmf*:bmJm7cmJm7dmJm7cmJm7cmJm7dmJm7bmf*:YmJm7dmJm7ZmJm7cmJm7a mJm7`mf*:YmJm7cmJm7HmJm7dmJm7[mJm7cmJm7Rmf*:WmJm7cmJm7Wmf*:RmJm7dmJm7`mK rjr:4]Dm-BO8o:VO8o:XO8o:ZO8o:ZO8o:WO8o:ZO8o:XO8o:VO8o:PO8o:[O8o:OOT,=OO8o:[ O8o:RO8o:ZO8o:YOT,=YO8o:ZO8o:[O8o:ZO8o:ZO8o:[O8o:YOT,=PO8o:[O8o:QO8o:ZO8o:X O8o:WOT,=PO8o:ZO8o:?O8o:[O8o:RO8o:ZO8o:IOT,=NO8o:ZO8o:NOT,=IO8o:[O8o:WO9>R` G^'5d])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)ME/O8o:YOSf+VOSo1XOSf+VOSo.[O9#9AquH\WquHGPrVuq[rW)MN!!)SOquHGPrW)t[ rW)nYrrE"ZrW)t[rW)qZr;Zk[!<&#Y!;hlX!;)BP!!&,[!;;NR!<8/[!:u9P!;)BP!<8/[!991= !;;NR!<8/[!8N\8!<8/[!8EV5!;hi]!0I5RTE+o@!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X!nmJm7bmed(_mem.amed(_mem+dmK!7SquH]`quHHYrVurdrW)NW!!)TXquHHYrW)ud rW)obrrE#crW)udrW)rcr;Zld!<)?b!;l3a!;,^Y!!)Hd!;>j[!<;Kd!;#UY!;,^Y!<;Kd!9j[!<;Kd!8R#A!<;Kd!8Hr>!;l0f!:KlNP5tO*!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-AO8o:YOSf+VOSo1XOSf+VOSo.[O9#9AquH\WquHGPrVuq[rW)MN!!)SOquHGPrW)t[ rW)nYrrE"ZrW)t[rW)qZr;Zk[!<&#Y!;hlX!;)BP!!&,[!;;NR!<8/[!:u9P!;)BP!<8/[!991= !;;NR!<8/[!8N\8!<8/[!8EV5!;hi]!0I5*GQ@ZR!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)ME/O8o9[O8o9dOT,=)O8o93OOoQt!0I5RTE+o@!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2X!nmJm6dmJm6mmf*:2mJm6 rjr:4]Dm-AO8o9[O8o9dOT,=)O8o93OOoQt!0I5*GQ@ZR!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)ME.O8o9\O8o9dO8o:(O8o93OP#X!!0E rlkQFc2X!mmJm6emJm6mmJm71mJm6 rjr:4]Dm-@O8o9\O8o9dO8o:(O8o93OP#X!!0E rmh2Of)MD-OSo01OT(@?!.g6_O9>NUTE+o@!WShleq*jPs+149s8LRMs.KABJ,~> rlkQFc2Wulmem-:mf)YQ!.jRhmK rjr:4]Dm,?OSo01OT(@?!.g6_O9>N-GQ@ZR!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDrOSo1ROFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!\mem.[mXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-/OT#4ROFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh5Pf)MD)qZ,T:!!)VPJcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkTGc2X!qqZ,UC!!)WYJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr=5]Dm,;qu>W:!<;YPJcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh;Rf)MD)O9#6@!!)#?!!)VPJcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRMs.KABJ,~> rlkZIc2X!qmK!4R!!)$H!!)WYJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjrC7]Dm,;OT59@!<;&?!<;YPJcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh;Rf)MD)O9#6@#QTsJ!!&+B!!)tZr;cbWr;ceXqZ->OJcC<$PQ1ZU!!)rJ!WShleq*jPs+149 s8LRMs.KABJ,~> rlkZIc2X!qmK!4R#QX;\!!)HT!!)ucr;cc`r;cfaqZ-?XJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149 s8L7Ds-*H,J,~> rjrC7]Dm,;OT59@#lg!Js8S1Bs8W%ZrVueWrVuhXqu?AOJcC<$PQ1Z-!!)r/!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh;Rf)MD)O9#6@!!*"[#QTqdOH9I(O9#6@!!*"[!!)tZ!!)qY!!)VPJcC<$PQ1ZU!!)rJ!WShl eq*jPs+149s8LRMs.KABJ,~> rlkZIc2X!qmK!4R!!*#d#QX8mmd:)CmK!4R!!*#d!!)uc!!)rb!!)WYJcC<$PQ1ZH!!)rA!WSA_ c%5nGs+149s8L7Ds-*H,J,~> rjrC7]Dm,;OT59@!<<%[#lg"dOHBL(OT59@!<<%[!<<"Z!<;tY!<;YPJcC<$PQ1Z-!!)r/!WRED ]7L!5s+149s8KV2s*4OTJ,~> rmh;Rf)MD)O9#6@!!*"[rrDtY!!)nX!W\;^qiCdXrK%!Zo8nP'JcCr6s.B>lr71oKT`3Mns+13$ s6K^aecGfDrmlT~> rlkZIc2X!qmK!4R!!*#drrDub!!)oa!W_WgqsFFarU'XcoBq20JcCr6s-!E_r659BPQ&gXs+13$ s6K^ablRj.rlor~> rjrC7]Dm,;OT59@!<<%[s8W"Y!<;qX!rnA^qiCgXrK%$Zo8nP'JcCr6s*+MDr4<"0GlG=+s+13$ s6K^a])hqVrk![~> rmh8Qf)MD)OSf+WO8o:XO8o:XO9,ED!;hiX!<%uZ!:u;'s+136s8S_l!;tCKs.KABJcC<$JcG'9 rmh,MT`3Mm~> rlkWHc2X!qmed(`mJm7amJm7amK*CV!;l0a!<) rjr@6]Dm,;OSo.WO9#=XO9#=XO95HDs8.lXs8A#Zs7;>'s+136s8R rmh;Rf)MD)O9#*lr71oKT`3Mns+13$s6K^a ecGfDrmlT~> rlkZIc2X!qmK!(N!!)oa!!)oa!W_WgqsFFarU'XcoBq20JcCr6s-!E_r659BPQ&gXs+13$s6K^a blRj.rlor~> rjrC7]Dm,;OT5- rmh;Rf)MD)O9#* rlkZIc2X!qmK!(N!!)l`!!)uc!!*#d!!)uc!!)rb!!)i_rrE#cJcC<$PQ1ZH!!)rA!WSA_c%5nG s+149s8L7Ds-*H,J,~> rjrC7]Dm,;OT5- rmh5Pf)MD)r;cbWquHYVr;cbWr;ceXquHVUrrE"ZJcC<$PQ1ZU!!)rJ!WShleq*jPs+149s8LRM s.KABJ,~> rlkTGc2X!qr;cc`quHZ_r;cc`r;cfaquHW^rrE#cJcC<$PQ1ZH!!)rA!WSA_c%5nGs+149s8L7D s-*H,J,~> rjr=5]Dm,;rVueWr;Z\VrVueWrVuhXr;ZYUrrE"ZJcC<$PQ1Z-!!)r/!WRED]7L!5s+149s8KV2 s*4OTJ,~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MDdOFdF_s-*K_TE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!NmXbChs-*K_P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-!OFdF_s-*K_GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X!ms8N)Wrr<%Ms+13;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-@s8N)Wrr<%Ms+13;s8R rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkWHc2X"-s82ldrr<&brr<&nrr<%Ms+13;s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr@6]Dm-Us82ldrr<&brr<&nrr<%Ms+13;s8R rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2X"+rr<&err<&brr<%Ms+130s8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-Srr<&err<&brr<%Ms+130s8R rmh2Of)MD@s8N'!s8E"Ls+13`s8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+rr<&ts8N'!s8E#rs8E!%rrE*!!!*#urrDusrW!$"!<;orr;Z`rqu?Tprr;uu!<<#u p&G$l!<<#uJcC<$^&S+r!!)rA!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr:4]Dm-Srr<&ts8N'!s8E#rs8E!%rrE*!!!*#urrDusrW!$"!<;orr;Z`rqu?Tprr;uu!<<#u p&G$l!<<#uJcC<$^&S+W!!)r/!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)MD?s8N)urr<%Ms+13as8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+rr<&ss8N)urr<&urr<&us8N)urr<&trr<&trr<&us8N)urr<&nrr<&srr<&trr<&u s8N)urr<&ls8N)urr<%Ms+13as8S8_!;t(Bs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm-Srr<&ss8N)urr<&urr<&us8N)urr<&trr<&trr<&us8N)urr<&nrr<&srr<&trr<&u s8N)urr<&ls8N)urr<%Ms+13as8R rmh2Of)MD?rr<&trr<%Ms+13as8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+rr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)rs!!*#u!!)cn!!)ut!!)or!s&B$ !<)ot!;6?l!<)ot!.k0$s1\O5P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-Srr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)rs!!*#u!!)cn!!)ut!!)or!s&B$ !<)ot!;6?l!<)ot!.k0$s1\O5GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MD?rr<&trr<%Ms+13as8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+rr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)fo!!)cn!!)ut!!)or!s&B$!<)ot !;6?l!<)ot!.k0$s1\O5P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-Srr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)fo!!)cn!!)ut!!)or!s&B$!<)ot !;6?l!<)ot!.k0$s1\O5GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MD?rr<&trr<%Ms+13as8S_l!;tCKs.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2X"+rr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)fo!!)cn!!)ut!!)or!s&B$!<)ot !;6?l!<)ot!.k0$s1\O5P5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-Srr<&srr<&trrW9$rrDus!!*#u!!)ut!!*#u!!)fo!!)cn!!)ut!!)or!s&B$!<)ot !;6?l!<)ot!.k0$s1\O5GQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MD?rr<&trr<&ss8N(Ms+13hs8S_s!7q2Mf%0kormlZ#JcC<$li6t8!WShlepm~> rlkQFc2X"+rr<&srr<&trr<&urr<&us8N)urr<&us8N)trr<&trr<&urr<&urr<&rrr<&srr<&t rr<&urr<&trr<&lrr<&trr<&ss8N(Ms+13hs8S8f!6tQDc-?9Prlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm-Srr<&srr<&trr<&urr<&us8N)urr<&us8N)trr<&trr<&urr<&urr<&rrr<&srr<&t rr<&urr<&trr<&lrr<&trr<&ss8N(Ms+13hs8R rmh2Of)MD@s8E#us8E#ss8N(Ms+13hs8S_s!7q2$!7q1CrmlZ#JcC<$li6t8!WShlepm~> rlkWHc2X"-s82lss8E#us8E#ts8E!"rr<&us8E!"rr<&ts8;rqs8E#rs82lqs8;rss8E#us8E#m s8E#us8E#ss8N(Ms+13hs8S8f!6tPg!6tP-rlp#oJcC<$li6t/!WSA_c%#~> rjr@6]Dm-Us82lss8E#us8E#ts8E!"rr<&us8E!"rr<&ts8;rqs8E#rs82lqs8;rss8E#us8E#m s8E#us8E#ss8N(Ms+13hs8R rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkTGc2X"-YlK@TJcDnQs-!E_r659BPQ&gXs+13$s6K^ablRj.rlor~> rjr=5]Dm-UYlK@TJcDnQs*+MDr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkTGc2X"-YlK@TJcDnQs-!Zfc2[ggc2X",c%5nGs+149s8L7Ds-*H,J,~> rjr=5]Dm-UYlK@TJcDnQs*+bK]DqoC]Dm-T]7L!5s+149s8KV2s*4OTJ,~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s8U?gs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s8T^Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s8U?gs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s8T^Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s8U?gs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s8T^Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s8U?gs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s8T^Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTEbJJs8U[$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s8U?gs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s8T^Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJs4.2$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4s31Pgs-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\s189Cs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTEbJJec5^$s.KABJcC<$JcG'9rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP6V*4bl@ags-*H,JcC<$JcG'9rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGR"5\])ViCs*4OTJcC<$JcG'9rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDJ_#D'J_&r6!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.J^&bjJ^*<$!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVJ\-KFJ\1$U!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDJ_%]hs8W)Ls8W)Ls8W)Ls8W)Ls8W)Ls8W)Ls8RZ$`7=th T`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.J^)'Vs8W)Cs8W)Cs8W)Cs8W)Cs8W)Cs8W)Cs8RYp`6A>_ PQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVJ\/e2s8W)1s8W)1s8W)1s8W)1s8W)1s8W)1s8RY^`4H'M GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDJ_%]h+TL!mf%0lP!7oa$ec=:Ps4.2$f)O;$f%0lP!7oa$ ec:9P`RY(iT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.J^)'V+TK[dc-?:5!6rdgblH#5s31Pgc2Z#gc-?:5!6rdg blE=>`Q\G`PQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVJ\/e2+TK%R]=\*T!5#lC])]ITs189C]DoJC]=\*T!5#lC ])[Do`Oc0NGlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDJ_%ZgrrE&LrrE&LrrE&LrrE&LrrE&LrrE&Lrr@W$`RY(i T`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.J^)$UrrE&CrrE&CrrE&CrrE&CrrE&CrrE&Crr@Vp`Q\G` PQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVJ\/b1rrE&1rrE&1rrE&1rrE&1rrE&1rrE&1rr@V^`Oc0N GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDJ_#D'J_&r6!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.J^&bjJ^*<$!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVJ\-KFJ\1$U!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmnX[rrCLIr;`ktrrD!W!!&>br;bdUrrD`lrmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlr"RrrCLIr;`ktrrD!W!!&>br;bdUrrD`lrlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk#`@rrCLIr;`ktrrD!W!!&>br;bdUrrD`lrjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmh)Lq>eY;q>gQq!!(LJ!!'Y2quH6d!!)?b!!)cn!!'&! !!)Kf!!(dR!!)]lrmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlkHCq>eY;q>gQq!!(LJ!!'Y2quH6d!!)?b!!)cn!!'&! !!)Kf!!(dR!!)]lrlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrjr11q>eY;q>gQq!!(LJ!!'Y2quH6d!!)?b!!)cn!!'&! !!)Kf!!(dR!!)]lrjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!!)ut!!(.@#lt#*!<<'!s8N)Jrr<&0rr<&err<&b rr<%krr<&frr<&Rrr<&ls8LRMs.KABJ,~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!!)ut!!(.@#lt#*!<<'!s8N)Jrr<&0rr<&err<&b rr<%krr<&frr<&Rrr<&ls8L7Ds-*H,J,~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!!)ut!!(.@#lt#*!<<'!s8N)Jrr<&0rr<&err<&b rr<%krr<&frr<&Rrr<&ls8KV2s*4OTJ,~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!!)ut#QXo)!!*'!!!*#urrE*!rrE#tr;cfrrrE*! rrDusrrDZj$3:,+!<<'!s8N*!rW)iqrW)TjrrE*!rrDusrW)osqZ-QorW)osrrE*!rrDusrW)rt rr<'!rW)iqrW!!!!<)rs!:g'h!<)rt!!*&u!;ulr!!WB&s8N'!rr;uur;Z`r!ri9#qZ-QorW)iq r;cisrr<'!rW)WkrW)rtrW)uur;ccqquHHjrr<'!rW)lrr;cisqZ-9gqZ-Tpr;cisrrE&urrE*! rr<'!rW)lrrW!!!!;6BkecGfDrmlT~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!!)ut#QXo)!!*'!!!*#urrE*!rrE#tr;cfrrrE*! rrDusrrDZj$3:,+!<<'!s8N*!rW)iqrW)TjrrE*!rrDusrW)osqZ-QorW)osrrE*!rrDusrW)rt rr<'!rW)iqrW!!!!<)rs!:g'h!<)rt!!*&u!;ulr!!WB&s8N'!rr;uur;Z`r!ri9#qZ-QorW)iq r;cisrr<'!rW)WkrW)rtrW)uur;ccqquHHjrr<'!rW)lrr;cisqZ-9gqZ-Tpr;cisrrE&urrE*! rr<'!rW)lrrW!!!!;6BkblRj.rlor~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!!)ut#QXo)!!*'!!!*#urrE*!rrE#tr;cfrrrE*! rrDusrrDZj$3:,+!<<'!s8N*!rW)iqrW)TjrrE*!rrDusrW)osqZ-QorW)osrrE*!rrDusrW)rt rr<'!rW)iqrW!!!!<)rs!:g'h!<)rt!!*&u!;ulr!!WB&s8N'!rr;uur;Z`r!ri9#qZ-QorW)iq r;cisrr<'!rW)WkrW)rtrW)uur;ccqquHHjrr<'!rW)lrr;cisqZ-9gqZ-Tpr;cisrrE&urrE*! rr<'!rW)lrrW!!!!;6Bk])hqVrk![~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!s&B$!;uj#!<3'!rrE&u#QXl)s8N*!rrE#t!!)ut "T\Q&s8N)ts8N)grr<&ss8N)urr<&trr<&urr<&krriE&!<<'!rr2rurr2rur;Q`sqYpNqrr2ru rVm$$rrE*!!<2uu!<2uu!<3#u!<2uu!<)ot!<3#u!<2uu!<2uu!:p-i!;uls!<2uu!<2uu!<3#u !<2uu!<)ot!<)ot!<3#u!<2uu!;HKn!;uis!<)ot!<3#u!<2uu!;6?l!;uj!!<<'!rVlitrr2ru rVlitp&G$lrr2rurr2rurVlitrVlitn,E@fqu6WrrVlitrr2rurVlitrr;uurr2rurr2rurr;uu p&G$B!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!s&B$!;uj#!<3'!rrE&u#QXl)s8N*!rrE#t!!)ut "T\Q&s8N)ts8N)grr<&ss8N)urr<&trr<&urr<&krriE&!<<'!rr2rurr2rur;Q`sqYpNqrr2ru rVm$$rrE*!!<2uu!<2uu!<3#u!<2uu!<)ot!<3#u!<2uu!<2uu!:p-i!;uls!<2uu!<2uu!<3#u !<2uu!<)ot!<)ot!<3#u!<2uu!;HKn!;uis!<)ot!<3#u!<2uu!;6?l!;uj!!<<'!rVlitrr2ru rVlitp&G$lrr2rurr2rurVlitrVlitn,E@fqu6WrrVlitrr2rurVlitrr;uurr2rurr2rurr;uu p&G$9!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!s&B$!;uj#!<3'!rrE&u#QXl)s8N*!rrE#t!!)ut "T\Q&s8N)ts8N)grr<&ss8N)urr<&trr<&urr<&krriE&!<<'!rr2rurr2rur;Q`sqYpNqrr2ru rVm$$rrE*!!<2uu!<2uu!<3#u!<2uu!<)ot!<3#u!<2uu!<2uu!:p-i!;uls!<2uu!<2uu!<3#u !<2uu!<)ot!<)ot!<3#u!<2uu!;HKn!;uis!<)ot!<3#u!<2uu!;6?l!;uj!!<<'!rVlitrr2ru rVlitp&G$lrr2rurr2rurVlitrVlitn,E@fqu6WrrVlitrr2rurVlitrr;uurr2rurr2rurr;uu p&G$'!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,Lr;ccqrrDoqrrDus!!)or!!*#urrD3]!!)rs!!)ut !!*#u!!)rs!!)]lrrDrr!!)rs!!)ut!!)or!!)rs!!*#urrDrr!!)rs!s&B$!<)ot!<2uu!;uj! !<<'!r;Q`soD\djr;Q`srVls"s8N)srr<&urr<&trr<&urr<&srr<&urr<&nrr<&trr<&rrrW9$ rrE#t!!)]l"p"]'!<<'!qYpNqrr2runG`IgrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t !!*#u!!)ut!s&B$!;uis!;6BkecGfDrmlT~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKCr;ccqrrDoqrrDus!!)or!!*#urrD3]!!)rs!!)ut !!*#u!!)rs!!)]lrrDrr!!)rs!!)ut!!)or!!)rs!!*#urrDrr!!)rs!s&B$!<)ot!<2uu!;uj! !<<'!r;Q`soD\djr;Q`srVls"s8N)srr<&urr<&trr<&urr<&srr<&urr<&nrr<&trr<&rrrW9$ rrE#t!!)]l"p"]'!<<'!qYpNqrr2runG`IgrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t !!*#u!!)ut!s&B$!;uis!;6BkblRj.rlor~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41r;ccqrrDoqrrDus!!)or!!*#urrD3]!!)rs!!)ut !!*#u!!)rs!!)]lrrDrr!!)rs!!)ut!!)or!!)rs!!*#urrDrr!!)rs!s&B$!<)ot!<2uu!;uj! !<<'!r;Q`soD\djr;Q`srVls"s8N)srr<&urr<&trr<&urr<&srr<&urr<&nrr<&trr<&rrrW9$ rrE#t!!)]l"p"]'!<<'!qYpNqrr2runG`IgrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t !!*#u!!)ut!s&B$!;uis!;6Bk])hqVrk![~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!s&B$!;uis!;ZWp!;lcr!;lcr!<2uu!9O4\!;uis !<)ot!<3#p!;6?l!;c`l!<)ot!;lfm!<2uu!;c`l!<<'!!<)ot!<2uu!;ZZk!;$3j!;uis!<)p" !<<'!r;Q`srr2rurVlitrr2ruq#: rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!s&B$!;uis!;ZWp!;lcr!;lcr!<2uu!9O4\!;uis !<)ot!<3#p!;6?l!;c`l!<)ot!;lfm!<2uu!;c`l!<<'!!<)ot!<2uu!;ZZk!;$3j!;uis!<)p" !<<'!r;Q`srr2rurVlitrr2ruq#: rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!s&B$!;uis!;ZWp!;lcr!;lcr!<2uu!9O4\!;uis !<)ot!<3#p!;6?l!;c`l!<)ot!;lfm!<2uu!;c`l!<<'!!<)ot!<2uu!;ZZk!;$3j!;uis!<)p" !<<'!r;Q`srr2rurVlitrr2ruq#: rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!!)ut!!*#u!!)ip!!)or!!)or!!*#u!!)-\!!)rs !!)ut!!*#u!!)Kf!!)lq!!)cn!!)or!!)fo!!)lq!!)ip!!)ut!!*#u!!)ip!!)Ed!!)rs!!)ut !s&B$!;uis!<2uu!<)ot!<2uu!;QQo!;HKn!<)ot!;lcu!<<'!rVlito`#$orrE'!rr2rurVlit q>UEpp&>!lrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t!!*#u!!)ut!s&B$!;uis!;6Bk ecGfDrmlT~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!!)ut!!*#u!!)ip!!)or!!)or!!*#u!!)-\!!)rs !!)ut!!*#u!!)Kf!!)lq!!)cn!!)or!!)fo!!)lq!!)ip!!)ut!!*#u!!)ip!!)Ed!!)rs!!)ut !s&B$!;uis!<2uu!<)ot!<2uu!;QQo!;HKn!<)ot!;lcu!<<'!rVlito`#$orrE'!rr2rurVlit q>UEpp&>!lrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t!!*#u!!)ut!s&B$!;uis!;6Bk blRj.rlor~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!!)ut!!*#u!!)ip!!)or!!)or!!*#u!!)-\!!)rs !!)ut!!*#u!!)Kf!!)lq!!)cn!!)or!!)fo!!)lq!!)ip!!)ut!!*#u!!)ip!!)Ed!!)rs!!)ut !s&B$!;uis!<2uu!<)ot!<2uu!;QQo!;HKn!<)ot!;lcu!<<'!rVlito`#$orrE'!rr2rurVlit q>UEpp&>!lrVls"s8N)rrr<&urr<&frr<&srr<&rrrW9$rrE#t!!*#u!!)ut!s&B$!;uis!;6Bk ])hqVrk![~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,L!!)ut!!*#u!!)ip!!)lq!!)ut!!)ut!!)forrDQg !!)rs!!)ut!!)ut!!)ut!!)]l!!)ip!!)ut!!)ut!!)lq!!)ut!!*#u!!)ip!!)ut!s&B$!<)ot !<)ot!<)ot!<2uu!<)ot!;$3j!;uis!<)ot!<2uu!<3#u!<2uu!<3#u!<)ot!<)ot!<2uu!<2uu !;lcr!;uis!<)ot!<2uu!<)ot!;- rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKC!!)ut!!*#u!!)ip!!)lq!!)ut!!)ut!!)forrDQg !!)rs!!)ut!!)ut!!)ut!!)]l!!)ip!!)ut!!)ut!!)lq!!)ut!!*#u!!)ip!!)ut!s&B$!<)ot !<)ot!<)ot!<2uu!<)ot!;$3j!;uis!<)ot!<2uu!<3#u!<2uu!<3#u!<)ot!<)ot!<2uu!<2uu !;lcr!;uis!<)ot!<2uu!<)ot!;- rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41!!)ut!!*#u!!)ip!!)lq!!)ut!!)ut!!)forrDQg !!)rs!!)ut!!)ut!!)ut!!)]l!!)ip!!)ut!!)ut!!)lq!!)ut!!*#u!!)ip!!)ut!s&B$!<)ot !<)ot!<)ot!<2uu!<)ot!;$3j!;uis!<)ot!<2uu!<3#u!<2uu!<3#u!<)ot!<)ot!<2uu!<2uu !;lcr!;uis!<)ot!<2uu!<)ot!;- rmh2Of)MCos+13$s5!_TTE"uiecGfDrmh)Lq>gQqquH]qquHZpr;cfrquHWorrDWiquHcsrW)uu rW)osr;cNjquHWor;cisquHWor;cisquHWor;cltrW)uurW)osr;ccqr;cKiquHcsrW)uurW)rt rW!$"!!*#urW!$"!!)utr;ccqrW)lrquH]qr;cisrW)uurW)Wk!!*#u!!)utr;Zs"!<<)s!;6Bk !<<)u!<3#s!;lfq!;- rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlkHCq>gQqquH]qquHZpr;cfrquHWorrDWiquHcsrW)uu rW)osr;cNjquHWor;cisquHWor;cisquHWor;cltrW)uurW)osr;ccqr;cKiquHcsrW)uurW)rt rW!$"!!*#urW!$"!!)utr;ccqrW)lrquH]qr;cisrW)uurW)Wk!!*#u!!)utr;Zs"!<<)s!;6Bk !<<)u!<3#s!;lfq!;- rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrjr11q>gQqquH]qquHZpr;cfrquHWorrDWiquHcsrW)uu rW)osr;cNjquHWor;cisquHWor;cisquHWor;cltrW)uurW)osr;ccqr;cKiquHcsrW)uurW)rt rW!$"!!*#urW!$"!!)utr;ccqrW)lrquH]qr;cisrW)uurW)Wk!!*#u!!)utr;Zs"!<<)s!;6Bk !<<)u!<3#s!;lfq!;- rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)$Yrr@WM!!%TMP5kO4!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)$Yrr@WM!!%TMP5kO+!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)$Yrr@WM!!%TMP5kNn!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)9`!!)lq!!'8'!!(LJ!!(" rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)9`!!)lq!!'8'!!(LJ!!(" rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)9`!!)lq!!'8'!!(LJ!!(" rmh2Of)MCos+13$s5!_TTE"uiecGfDrmp01!!)lq!!'8'!!&>b!!%TMci=!q!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlsO(!!)lq!!'8'!!&>b!!%TMci=!h!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk%7k!!)lq!!'8'!!&>b!!%TMci=!V!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,LrW)osrr<'!rW)WkqZ-Tp! rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKCrW)osrr<'!rW)WkqZ-Tp! rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41rW)osrr<'!rW)WkqZ-Tp! rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)rsrrE&u!!)Zk!!)lqrrE&u!!)ut!!*#u!!)Zk !!*#urrE*!!!)ut!!)ut"T\Q&s8N)urriE&!<<'!rr2rurr2rurr;uurr2rurVlitnG`Igrr2ru rr;uurr2rurr2rurVlitqu6Wrqu6p%rrE*!!<<'!rVlitrr;uurr2rurr;uu"TJH%rrE&u!!*#u !!*#urrE&u!!)ut!!%TMci=!q!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)rsrrE&u!!)Zk!!)lqrrE&u!!)ut!!*#u!!)Zk !!*#urrE*!!!)ut!!)ut"T\Q&s8N)urriE&!<<'!rr2rurr2rurr;uurr2rurVlitnG`Igrr2ru rr;uurr2rurr2rurVlitqu6Wrqu6p%rrE*!!<<'!rVlitrr;uurr2rurr;uu"TJH%rrE&u!!*#u !!*#urrE&u!!)ut!!%TMci=!h!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)rsrrE&u!!)Zk!!)lqrrE&u!!)ut!!*#u!!)Zk !!*#urrE*!!!)ut!!)ut"T\Q&s8N)urriE&!<<'!rr2rurr2rurr;uurr2rurVlitnG`Igrr2ru rr;uurr2rurr2rurVlitqu6Wrqu6p%rrE*!!<<'!rVlitrr;uurr2rurr;uu"TJH%rrE&u!!*#u !!*#urrE&u!!)ut!!%TMci=!V!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)rs!!)`m !!)rs!s&B$!<)ot!<)rt!;c`q!;lcr!;uj!!<<'!rVlitrVlitnc&Rhr;Qj!s8N)trr<&urr<&t rr<&rrr<&rs8N)srr<&rrrW9$rrE#t!!*#u#lt#*!<<'!s8N)srrW9$rrE#t!!)ut!!%TMci=!q !WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)rs!!)`m !!)rs!s&B$!<)ot!<)rt!;c`q!;lcr!;uj!!<<'!rVlitrVlitnc&Rhr;Qj!s8N)trr<&urr<&t rr<&rrr<&rs8N)srr<&rrrW9$rrE#t!!*#u#lt#*!<<'!s8N)srrW9$rrE#t!!)ut!!%TMci=!h !WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)rs!!)`m !!)rs!s&B$!<)ot!<)rt!;c`q!;lcr!;uj!!<<'!rVlitrVlitnc&Rhr;Qj!s8N)trr<&urr<&t rr<&rrr<&rs8N)srr<&rrrW9$rrE#t!!*#u#lt#*!<<'!s8N)srrW9$rrE#t!!)ut!!%TMci=!V !WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#uq>g6h!!)ip !!)ut!!)ut!!)ip!!)lqq>gQq!!)ut!!)ut!!)Qhq>gQq!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr36(s8N*!rrE*!q>gQq!!)ut!!)ut!!%TMci=!q!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#uq>g6h!!)ip !!)ut!!)ut!!)ip!!)lqq>gQq!!)ut!!)ut!!)Qhq>gQq!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr36(s8N*!rrE*!q>gQq!!)ut!!)ut!!%TMci=!h!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#uq>g6h!!)ip !!)ut!!)ut!!)ip!!)lqq>gQq!!)ut!!)ut!!)Qhq>gQq!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr36(s8N*!rrE*!q>gQq!!)ut!!)ut!!%TMci=!V!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)Ng!!)ip !!)ut!!)ut!!)ip!!)lq!!)ip!!)ut!!)ut!!)Qh!!)ip!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr39)s8N*!rrE*!!;ZWp!<)ot!<)ot!.k0rs8LRMs.KABJ,~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)Ng!!)ip !!)ut!!)ut!!)ip!!)lq!!)ip!!)ut!!)ut!!)Qh!!)ip!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr39)s8N*!rrE*!!;ZWp!<)ot!<)ot!.k0rs8L7Ds-*H,J,~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)rs!!)ut!!)Zk!!)lq!!)ut!!*#u!!)Ng!!)ip !!)ut!!)ut!!)ip!!)lq!!)ip!!)ut!!)ut!!)Qh!!)ip!!)ut!!)ut!s&B$!;c]q!;lcr!;lcr !;lcu!<<'!rVlitrr39)s8N*!rrE*!!;ZWp!<)ot!<)ot!.k0rs8KV2s*4OTJ,~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq&J!!)rs!!)ut!!)Zk!!*#u!!*#u!!)ut!!)ut!!)ut !!)]l!!)ut!s&B$!<3#u!<)ot!;ZWp!;ZWp!<)p"!<<'!rVlitrVlitrr2ruo`"mkrVls"s8N)t rr<&ss8N)prr<&rrr<&qrr<&trr<&urr<&trr<&urrrK'rrE*!!<2uu!<)p"!<<'!rVlitrVlit rr2ruJcF4!rmh,MT`3Mm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltEA!!)rs!!)ut!!)Zk!!*#u!!*#u!!)ut!!)ut!!)ut !!)]l!!)ut!s&B$!<3#u!<)ot!;ZWp!;ZWp!<)p"!<<'!rVlitrVlitrr2ruo`"mkrVls"s8N)t rr<&ss8N)prr<&rrr<&qrr<&trr<&urr<&trr<&urrrK'rrE*!!<2uu!<)p"!<<'!rVlitrVlit rr2ruJcF4!rlkKDPQ&gW~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&./!!)rs!!)ut!!)Zk!!*#u!!*#u!!)ut!!)ut!!)ut !!)]l!!)ut!s&B$!<3#u!<)ot!;ZWp!;ZWp!<)p"!<<'!rVlitrVlitrr2ruo`"mkrVls"s8N)t rr<&ss8N)prr<&rrr<&qrr<&trr<&urr<&trr<&urrrK'rrE*!!<2uu!<)p"!<<'!rVlitrVlit rr2ruJcF4!rjr42GlG=*~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmq,LquHcsrW)uurW)TjrW)rtrW)uurW)osr;cHhr;cfr rW!*$!!*'!quH]qquHWor;cltrW)uurW)osrW)Nhr;cltrW)uurW)osrrDrrquH`rquHZpr;cis rW)uurVururW!-%!!*$!!<3#s!<<)u!<<)u!<)rs!.k0us8LRMs.KABJ,~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rltKCquHcsrW)uurW)TjrW)rtrW)uurW)osr;cHhr;cfr rW!*$!!*'!quH]qquHWor;cltrW)uurW)osrW)Nhr;cltrW)uurW)osrrDrrquH`rquHZpr;cis rW)uurVururW!-%!!*$!!<3#s!<<)u!<<)u!<)rs!.k0us8L7Ds-*H,J,~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk&41quHcsrW)uurW)TjrW)rtrW)uurW)osr;cHhr;cfr rW!*$!!*'!quH]qquHWor;cltrW)uurW)osrW)Nhr;cltrW)uurW)osrrDrrquH`rquHZpr;cis rW)uurVururW!-%!!*$!!<3#s!<<)u!<<)u!<)rs!.k0us8KV2s*4OTJ,~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCos+13$s5!_TTE"uiecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkQFc2WuYs+13$s5!_TP5kU\blRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr:4]Dm,,s+13$s5!_TGQ7aA])hqVrk!a]JcC<$li6sr!WRED]79~> rmh2Of)MCoeq)D'f&"s[s.H"br71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2WuYc%4,jc/-\Is-&WHr659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm,,]7ISF]AC.%s*/bgr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)MCoeq)D'f&"s[s.H"br71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2WuYc%4,jc/-\Is-&WHr659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm,,]7ISF]AC.%s*/bgr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)MCoeq)D'f&"s[s.H"br71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2WuYc%4,jc/-\Is-&WHr659BPQ&gXs+13$s6K^ablRj.rlor~> rjr:4]Dm,,]7ISF]AC.%s*/bgr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh2Of)MCoIt<*#J)5YWs.H"br71oKT`3Mns+13$s6K^aecGfDrmlT~> rlkQFc2WuYH%C6lH/ rjr:4]Dm,,COp8PCYih/s*/bgr4<"0GlG=+s+13$s6K^a])hqVrk![~> rmh5Pf)MCmJcC<$JcGTHos42h"Q9 rlkTGc2WuQJcC<$JcGTHorI]a"Pj$@P5tO*!WSA_c%5nGs+149s8L7Ds-*H,J,~> rjr=5]Dm+kJcC<$JcGTHopkXR"OmBqGQ@ZR!WRED]7L!5s+149s8KV2s*4OTJ,~> rmh8Qf)MCms7OqD!<7W$J_#_0!!)\t!<;`C"T[K\TV);_ecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkWHc2WuQs7OV;!<7VpJ^'(s!!)\m!<;`:"T[?XPEQ"EblRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr@6]Dm+ks7Nu)!<7V^J\-fO!!)\^!<;`("T[$OG^'5d])hqVrk!a]JcC<$li6sr!WRED]79~> rmh8Qf)MCms7Y"FTE,"Ceq)D1ecDEDos+Stp=9G@!9!mMTE+o@!WShleq*jPs+149s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4,tblO".orA)mp< rjr@6]Dm+ks7X&+GQ@bp]7ISP])d-Vopc$^p:CNR!7UsmGQ@ZR!WRED]7L!5s+149s8KV2s*4OT J,~> rmh8Qf)MCms81@I!<(IMTE,"Ceq)D1ecDEDos+StrRLrKqpktE!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs81%@!<(.DP5tW6c%4,tblO".orA)mrQP/!8RU rjr@6]Dm+ks80D.!<'M2GQ@bp]7ISP])d-Vopc$^rOW%0qn!&W!7UsmGQ@ZR!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh8Qf)MCms8:IJ!<(IMTE,"Ceq)D1ecDEDos+StrRUuKr72(F!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs8:.A!<(.DP5tW6c%4,tblO".orA)mrQY?Br65G0!8RU rjr@6]Dm+ks89M/!<'M2GQ@bp]7ISP])d-Vopc$^rO`(0r4 rmh8Qf)MCms8COJ!<(IMTE,"Ceq)D1ecDEDos+StrRUrJrRM1G!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs8C4A!<(.DP5tW6c%4,tblO".orA)mrQY rjr@6]Dm+ks8BS/!<'M2GQ@bp]7ISP])d-Vopc$^rO`%/rOW8Y!7UsmGQ@ZR!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh8Qf)MCms8LUJ!<(IMTE,"Ceq)D1ecDEDos+StrRUoIrmh:H!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs8L:A!<(.DP5tW6c%4,tblO".orA)mrQY9@rlkY2!8RU rjr@6]Dm+ks8KY/!<'M2GQ@bp]7ISP])d-Vopc$^rO`".rjrAZ!7UsmGQ@ZR!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh8Qf)MCms8COJ!<(IMTE,"Ceq)D1ecDEDos+StrRUrJrRM1G!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs8C4A!<(.DP5tW6c%4,tblO".orA)mrQY rjr@6]Dm+ks8BS/!<'M2GQ@bp]7ISP])d-Vopc$^rO`%/rOW8Y!7UsmGQ@ZR!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh8Qf)MCms8:IJ!<(IMTE,"Ceq)D1ecDEDos+StrRUuKr72(F!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs8:.A!<(.DP5tW6c%4,tblO".orA)mrQY?Br65G0!8RU rjr@6]Dm+ks89M/!<'M2GQ@bp]7ISP])d-Vopc$^rO`(0r4 rmh8Qf)MCms81@I!<(IMTE,"Ceq)D1ecDEDos+StrRLrKqpktE!9!mMTE+o@!WShleq*jPs+149 s8LRMs.KABJ,~> rlkWHc2WuQs81%@!<(.DP5tW6c%4,tblO".orA)mrQP/!8RU rjr@6]Dm+ks80D.!<'M2GQ@bp]7ISP])d-Vopc$^rOW%0qn!&W!7UsmGQ@ZR!WRED]7L!5s+149 s8KV2s*4OTJ,~> rmh8Qf)MCms7Y"FTE,"Ceq)D1ecDEDos+Stp=9G@!9!mMTE+o@!WShleq*jPs+149s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4,tblO".orA)mp< rjr@6]Dm+ks7X&+GQ@bp]7ISP])d-Vopc$^p:CNR!7UsmGQ@ZR!WRED]7L!5s+149s8KV2s*4OT J,~> rmh8Qf)MCms7Y"FTE,"Ceq)D1ecDEDos+Stp=9G@!9!mMTE+o@!WShleq*jPs+149s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4,tblO".orA)mp< rjr@6]Dm+ks7X&+GQ@bp]7ISP])d-Vopc$^p:CNR!7UsmGQ@ZR!WRED]7L!5s+149s8KV2s*4OT J,~> rmh;Rf)MCms46n:!W`8OJY7ReMkC%Mos+Vuf(Pa?!9!mMTE+o@!WShleq*jPs+149s8LRMs.KAB J,~> rlkZIc2WuQs3:8$!W`8FJWkYKMj",@orA,nc1[>)!8RU rjrC7]Dm+ks1@uL!W`84JTu`jMg,4%opc'_]CpIQ!7UsmGQ@ZR!WRED]7L!5s+149s8KV2s*4OT J,~> rmh8Qf)MCms7?9ks+(0$!/U^KZ2jps!!M6[TV);_ecGfDrmlZ#JcC<$li6t8!WShlepm~> rlkWHc2WuQs7?9ks+(0$!/U^KWrW1l!!M*WPEQ"EblRj.rlp#oJcC<$li6t/!WSA_c%#~> rjr@6]Dm+ks7?9ks+(0$!/U^KS,iT]!!LdNG^'5d])hqVrk!a]JcC<$li6sr!WRED]79~> rmh5Pf)MCmJ`)+;J`,GD"95$e!;tCKs.FqoJ_#D'jjjJ4T`3Mm~> rlkTGc2WuQJ_Yh3J_]/<"94RK!;t(Bs-&#YJ^&bjjimi+PQ&gW~> rjr=5]Dm+kJ^]2!J^`N*"93Uj!;sG0s*0+,J\-KFjgtQnGlG=*~> rmh,Meq*jPs+14)s8S_l!;tCKs.FqoJ_#D'jjjJ4T`3Mm~> rlkKDc%5nGs+14)s8S8_!;t(Bs-&#YJ^&bjjimi+PQ&gW~> rjr42]7L!5s+14)s8R rmh,Meq'KFTRichTE"uiecGfDJ_#D'J_&r6!WShlepm~> rlkKDc%2(0PC\qNP5kU\blRj.J^&bjJ^*<$!WSA_c%#~> rjr42]7G3XG_(+mGQ7aA])hqVJ\-KFJ\1$U!WRED]79~> rmh)LJY7ReJY:Yg!!)rJJcC<$JcFa0!20>BJ,~> rlkHCJWkYKJWn`M!!)rAJcC<$JcFa0!0dE,J,~> rjr11JTu`jJU#gl!!)r/JcC<$JcFa0!-nLTJ,~> rmlW#JH16$ec>X!JY7ReJY:tprmlT~> rlouoJH16$ec>WmJWkYKJWo&Vrlor~> rk!^]JH16$ec>W[JTu`jJU$-urk![~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> J_#D'J_#D'J_#D'X4De~> J^&bjJ^&bjJ^&bjX3H.~> J\-KFJ\-KFJ\-KFX1Nl~> JY7ReJY7Rerh+hn`7BL4l_&b~> JWkYKJWkYKrf_oX`6Ejsl]Zi~> JTu`jJTu`jrcj"+`4LSFlZdp~> !2+lCJH16$JH,]Ns+/b>ec;@of'V#gs*t~> !0_s6JH16$JH,]Ns+/G5blErYc0`aUs*t~> !-j%pJH16$JH,]Ns+.f#])[),]C!31s*t~> !MBFDeq)D'eq)BPs+/b>ec;@of'V#gs*t~> !L!M7c%4,jc%4+>s+/G5blErYc0`aUs*t~> !I+Tq]7ISF]7IQos+.f#])[),]C!31s*t~> !MBGOf)G`_ec5]$eq)D'f'1`cs+/b>ec;@of'V#gs*t~> !L!NBc2RdVbl@`pc%4,jc0 !I+V']DhlD])Vh^]7ISF]BQp-s+.f#])[),]C!31s*t~> !20/gk10M4][d&_iR[i*g=?6(J_#D'\CLZ[J_%cj!2+nomFD:;J,~> !0d6Zk03l+]ZgEViQ_3!g !-n>?k.:Tn]Xn.DiOepdg:I=bJ\-KF\@Vb@J\/k4!-j(,mCNAuJ,~> "/#YF!<(IK!9_o5!1MI=!<(IK!8?!(!.i^Pf!s92s+/b>ec;@of'V#gs*t~> "-W`0!<(.B!9_T,!1M.4!<(.B!8>Zt!.iCGc+)!us+/G5blErYc0`aUs*t~> "*agX!<'M0!9^ro!1LM"!<'M0!8>$b!.hb5]=>HQs+.f#])[),]C!31s*t~> "/#YF!<(IK!;tFI!;tFH!;tFI!!CdPf%0g#s47/LmahCOp!s*CrRLrKrRUrJrRUuKs472Ms472M"kEYRec5^Lf)5UJf)#Gu eq)Daec>`$f#6,>T[)-g!4(2U!;"bAs*t~> "-W`0!<(.B!;t+@!;t+?!;t+@!!CIGc-?4fs3:NCm`kb3!6tKCrQY?Bs3:QDrQY9@rltEBs3:NC rltEBrQY?B!6tKCr6>3@!mL]Fp!!I:rQP "*agX!<'M0!;sJ.!;sJ-!;sJ.!!Bh5]=\%Bs1A71m^rK!!5&41rO`(0s1A:2rO`".rk&.0s1A71 rk&.0rO`(0!5&41r4Dq.!kSF4ot(2(rOW%0rO`%/rO`(0s1A:2s1A:2"hOa7])Vj1]DVa/]DDSZ ]7IT+])_k^]>V;]Gg "/#YF!<(IK!<(IK!<1OL!<1OL!<(IK!<1OL!<1RL!<1OL!<(IK!:8;:!<1OL!<(IS!7h/$ec=:P !<(IK!<1OL!<(IK!;k=I!;tFJ!<1OL!<1OL!<1RL!;+hE!7o^$r71iJrmh&LrRMP\ec=:P!7o^$ !7h,Mec=:P!<(IK!<(IK!.i^Pf"'B3s7FkB!5$e^!;Y4F!:8;:!5[4d!;tCJT_-h7!!1XNm+2(7 q:5NGimn)0kLKV5m+).:o@ "-W`0!<(.B!<(.B!<14C!<14C!<(.B!<14C!<17C!<14C!<(.B!:7u1!<14C!<(.J!6kMgblH#5 !<(.B!<14C!<(.B!;k"@!;t+A!<14C!<14C!<17C!;+MilqH'kKNu,m*,M1o?@:8J,~> "*agX!<'M0!<'M0!<0S1!<0S1!<'M0!<0S1!<0V1!<0S1!<'M0!:7>t!<0S1!<'M8!4r6C])]IT !<'M0!<0S1!<'M0!;jA.!;sJ/!<0S1!<0S1!<0V1!;*l*!5#iCr4;q/rjr.1rOWXA])]IT!5#iC !4r42])]IT!<'M0!<'M0!.hb5]=GQRs7Eo'!5#iC!;X8+!:7>t!5Z8I!;sG/GkAWI!!0\3m( "/#YF!<1OL!<(IK!;tCJ!;b7K!7o^$r71iJrmh&LrRLrKm+).:r71iJrmq)Lr71iJqpkiLf%'j" ec5^Iec5^Jec5^KecPp'ec>X!!!)ZBr;cfI!!)rJ!!*#LrrDrI"p!6*!7o^$qUPWHrRLrKJ_#D' ]%-r_T`=oiq:5NG][m)_qUPWHrmh&Lo@Em@"P*S(ec "-W`0!<14C!<(.B!;t(A!;aqB!6ragr653ArlkECrQP]ZpHVqTT!?rlkECo?I77"O-qkblGW*rrDuA!0d*#!!*#CrrD?/!!)c "*agX!<0S1!<'M0!;sG/!;a;0!5#iCr4;q/rjr.1rOW%0m(35tr4;q/rk&11r4;q/qmuq1]=S(A ])Vj.])Vj/])Vj0])r&F])_c@!!)Z'r;cf.!!)r/!!*#1rrDr."ou9I!5#iCqRZ_-rOW%0J\-KF ]"8%DGlR[Aq7?V,]Y"1DqRZ_-rjr.1o=Ou%"M4ZG])]^[rrDu/!-n1K!!*#1rrD>r!!)c*!!)#j !!)2o!!)At!!)W&!<7Q~> !h]PEr;ccHq>gNGquHcJ!!)rJ!!)uK!s$p'!:/29!;tCJ!<1OL!;k=I!;k=I!<1OO!7o^$qUPWH r71iJrRM&Nf%'j!ec5^BecPp'ec>["q>gNG!!)lH"p!6*!7o^$rmpuIrRLrKJ_#D']@I2dTV2>9 ec<#,!W^dOp=93Do[X!Brmq)Ls4./M_q"khec>X!!20&:!!)rJ!!*#Lr;ccHrW!#N!!)rJ!!)oI rVuuM!;tFI!;"e>!<:XI!;tFH!<(LK!<:XM!<:XI!;k@H!;tFI!!1XNo@ !ggN>quHcA!!)rA!!)uB!s$Tj!:.l0!;t(A!<14C!;k"@!;k"@!<14F!6ragqTT!? r653ArQPEEc-67dbl@b9bl[sjblI^eq>gN>!!)l?"ouom!6ragrlt?@rQP !dF^Wr;cc-q>gN,quHc/!!)r/!!)u0!s#sF!:.5s!;sG/!<0S1!;jA.!;jA.!<0S4!5#iCqRZ_- r4;q/rOW.3]=S(@])Vj'])r&F])_fAq>gN,!!)l-"ou9I!5#iCrk&(.rOW%0J\-KF]=S:IG^08# ])].K!W]h4p:C;)oXb)'rk&11s1872_n,sM])_c@!-n4L!!)r/!!*#1r;cc-rW!#3!!)r/!!)o. rVuu2!;sJ.!;!i#!<9\.!;sJ-!<'P0!<9\2!<9\.!;jD-!;sJ.!!0\3o=G#&J,~> "/#YF!<1OL!<(IK!;Y1G!<(IN!7o^$r71iJrRM&Nf%'ieec5^Jec5^Lec5^Iec5^Iec5^LecPp' ec>Qt!!)rJ!!)uK!s$p'!;tCJ!;+hB!;b7H!;P+F!;b7Q!7o^$f%'iPec>["!!)uK!!%T$J_%Kb #QLHif)O=P!;Y4F!<(LK!!(UL!;k@H!;k@I!7o^$!;=tD!;4nC!;tCN!7o^$f)>[If)5UIf)GaM f)GaJf)Ga-ec5^Jec;B:ec5^Gec5^Kec5^Lec5^Lf)GaIec5^Jec5^Lf)GaKec5^Lec5^Cec5^K ec5^Kec5^Hec5^Kec5^Kecc')!7o^$rmh&Lq:5NGrmh&Lrmh&Lrmq)Lo@ "-W`0!<14C!<(.B!;Xk>!<(.E!6ragr653ArQPEEc-67Sbl@bAbl@bCbl@b@bl@b@bl@bCbl[sj blIUb!!)rA!!)uB!s$Tj!;t(A!;+M9!;aq?!;Oe=!;aqH!6ragc-675blI^e!!)uB!!%SpJ^(jP #QL!Oc2Z&5!;Xn=!<(1B!!(:C!;k%?!;k%@!7oBp!;=Y;!;4S:!;t(E!6ragc2I_@c2@Y@c2ReD c2ReAc2Re$bl@bAblEt$bl@b>bl@bBbl@bCbl@bCc2Re@bl@bAbl@bCc2ReBbl@bCbl@b:bl@bB bl@bBbl@b?bl@bBbl@bBbln*l!6ragrlkECq98m>rlkECrlkECrltHCo?@:8J,~> "*agX!<0S1!<'M0!;X5,!<'M3!5#iCr4;q/rOW.3]=S(/])Vj/])Vj1])Vj.])Vj.])Vj1])r&F ])_]>!!)r/!!)u0!s#sF!;sG/!;*l'!;a;-!;O/+!;a;6!5#iC]=S'T])_fA!!)u0!!%S^J\/S, #QK$n]DoLT!;X8+!<'P0!!'Y1!;jD-!;jD.!7na^!;=#)!;3r(!;sG3!5#iC]D_g.]DVa.]Dhm2 ]Dhm/]Dhlg])Vj/])[*L])Vj,])Vj0])Vj1])Vj1]Dhm.])Vj/])Vj1]Dhm0])Vj1])Vj(])Vj0 ])Vj0])Vj-])Vj0])Vj0]*/2H!5#iCrjr.1q7?V,rjr.1rjr.1rk&11o=G#&J,~> "/#YF!<(IK!<(IK!<(IN!7o^$rRLrKrmh&Lrmq)Lr7:lJqUYZHo@EpArmh&LrRLrKqUPWHrRLrK r7:lJq:5NGr71iJrRLrKrmh&Lrmq)Lo[X!Bq:5NGrRLrKrmh&LqUPrQf%'iPec=:P!<(IK!<(IK !<1OL!.i^Pf"ooCs.H%9f)O=P!;G%E!;tFJ!<1OL!<(IK!<1OL!;tFJ!7o^$!;P.F!;+hB!;Y4G !<1OL!<1OL!<(IK!<(IP!7h/$ec>["rrCs-!!)rJ!20&:!!)lH!!)oI!W^dOr71iJqpk`IrRLrK r71iJrmh&Lr71iJp=93DpsoEFp!s*CrRUuKqUPWHqUPWHr71oLec>X!!!)WA!<7Q~> "-W`0!<(.B!<(.B!<(.E!6ragrQP6AqT]$?o?I:8rlkECrQP6Aq98m>r653ArQPrQP !<14C!<14C!<(.B!<(.G!6kMgblI^errCs$!!)rA!0d-$!!)l?!!)o@!W^IFr653Aqoo*@rQP "*agX!<'M0!<'M0!<'M3!5#iCrOW%0rjr.1rk&11r4Dt/qRcb-o=P#&rjr.1rOW%0qRZ_-rOW%0 r4Dt/q7?V,r4;q/rOW%0rjr.1rk&11oXb)'q7?V,rOW%0rjr.1qR[%6]=S'T])]IT!<'M0!<'M0 !<0S1!.hb5]>;)bs*/e#]DoLT!;F)*!;sJ/!<0S1!<'M0!<0S1!;sJ/!7na^!;O2+!;*l'!;X8, !<0S1!<0S1!<'M0!<'M5!4r6C])_fArrCrg!!)r/!-n4L!!)l-!!)o.!W]h4r4;q/qmuh.rOW%0 r4;q/rjr.1r4;q/p:C;)pq$M+ot(2(rO`(0qRZ_-qRZ_-r4<"1])_c@!!)W&!<7Q~> !208jrRLrKrRUoIrRUoI!S.8Mf)>XNec5^Jec5^Hf)GaAec>d%rW)oJquHZGr;c`GrrDrIquHcJ rW)uLrW)rKrVuuM!;4qA!;b:F!<1RI!<1RK!!h'T!7h,Mf%0d"!S.8Lf)>Z#eq)Dged;ALT[q]o s45a$pXTX!!!)oI!!)uK!!)rJ!!*#Lq>g3>r;ccH!!)iGquH]H!!)iG!!)lHq>^QI !;tCJ!;"bAs*t~> !0d?]rQPr;c`>rrDr@quHcA rW)uCrW)rBrVuuD!;4V8!;at=!<17@!<17B!!gaK!6kKDc-?1e!R1WCc2I]oc%4-UbmFE6PKhAP s38dgpWW[rQPrQY?Be]e'or655* p<g35r;cc?!!)i>quH]?!!)i>!!)l?q>^Q@ !;t(A!;"G8s*t~> !-nGBrOW%0rO`".rO`".!P8@2]D_d3])Vj/])Vj-]Dhm&])_oDrW)o/quHZ,r;c`,rrDr.quHc/ rW)u1rW)r0rVuu2!;3u&!;a>+!<0V.!<0V0!!g+9!4r42]=\"A!P8@1]D_e]]7IT1]*\L^Ge:Tf s1>lCpU^D*r4;q/rOW%0rjr.1r4;q/d(98Xp:C;)ot(2(q7?V,rOW%0q7?V,rO`(0e[ke]r4;rR p:C;)qRZ_-qmun0])_c@!!)o.!!)u0!!)r/!!*#1q>g3#r;cc-!!)i,quH]-!!)i,!!)l-q>^Q. !;sG/!;!f&s*t~> !MBGHec5^Hf)Ga@ec5]jec5]$eq)D*edDGMT[q]oT``#~> !L!N;bl@b?c2Re7bl@aabl@`pc%4,mbmOK7PKhAPPQ/nP!<(.B!;t(A!;t(A!<(.B!<17>!78sj !;=Y;!;4S:!;Xk>!<(.B!<(1?!<(.B!7]6n!;t(APP36$!;aq?!;k"B!6kNAbl@b@bl@bBbl@bA bl@bCbl@b0bl@bBbl@b?bl@bBbl@bBbl@b>bl@b?bl@b?bl@bAbl@b8blIco~> !I+Uu])Vj-]Dhm%])ViO])Vh^]7ISI]*eR_Ge:TfGlPCf!<'M0!;sG/!;sG/!<'M0!<0V,!78=X !;=#)!;3r(!;X5,!<'M0!<'P-!<'M0!7\U\!;sG/GkS`L!;a;-!;jA0!4r7/])Vj.])Vj0])Vj/ ])Vj1])Vis])Vj0])Vj-])Vj0])Vj0])Vj,])Vj-])Vj-])Vj/])Vj&])_k]~> !MBGHec5^Hec5^?ec5]kec5]$eq)D*edMMNT[q]oTV2>9ec>["!!)rJ!!)rJ!!)uK!!*#L!!(0m !!)`D!!)]C!!)iG!!)uK!!*#L!!)uK!!)uK!!(O"!!)rJ!20#9!!)uK!s$p'!<(IK!<1OL!<1RL !;k=I!;tCJ!<1RL!<(IK!<(IK!;=tD!<(IK!<(IK!<1OL!<1OL!<(IK!<(IK!;Y1G!<1OL!<(IK !<(IN!7o^$rmq)Lo@ !L!N;bl@b?bl@b6bl@abbl@`pc%4,mbmXQ8PKhAPPEZ$kblI^e!!)rA!!)rA!!)uB!!*#C!!(0d !!)`;!!)]:!!)i>!!)uB!!*#C!!)uB!!)uB!!(Nn!!)rA!0d*#!!)uB!s$Tj!<(.B!<14C!<17C !;k"@!;t(A!<17C!<(.B!<(.B!;=Y;!<(.B!<(.B!<14C!<14C!<(.B!<(.B!;Xk>!<14C!<(.B !<(.E!6ragrltHCo?@:8J,~> !I+Uu])Vj-])Vj$])ViP])Vh^]7ISI]*nX`Ge:TfG^08#])_fA!!)r/!!)r/!!)u0!!*#1!!(0R !!)`)!!)](!!)i,!!)u0!!*#1!!)u0!!)u0!!(N\!!)r/!-n1K!!)u0!s#sF!<'M0!<0S1!<0V1 !;jA.!;sG/!<0V1!<'M0!<'M0!;=#)!<'M0!<'M0!<0S1!<0S1!<'M0!<'M0!;X5,!<0S1!<'M0 !<'M3!5#iCrk&11o=G#&J,~> !MBGJf)5U7f)5Tof)5T"eq)D*edVSOT[q]oTV0m9f%'j"ec5^Jec5^Jec5^Kec5^Kec5^Kec5^K f)Ga$ec5^Iec5^Kec5^Bec5^KecPp'ec>["!!*#L!!)uK!!)uK!!)fFrrCs-!!)rJ!2/u8r;cfI r;ccHrVuuM!<(LH!<(LJ!!:^O!<(LI!;4q@!;k@H!;tFH!!CdPf%0a!qpt`Hqpt]GrRUrJ!nI>O o[X$BJ,~> !L!N=c2@Y.c2@Xfc2@Wnc%4,mbmaW9PKhAPPEX8kc-67ebl@bAbl@bAbl@bBbl@bBbl@bBbl@bB c2Rdpbl@b@bl@bBbl@b9bl@bBbl[sjblI^e!!*#C!!)uB!!)uB!!)f=rrCs$!!)rA!0d'"r;cf@ r;cc?rVuuD!<(1?!<(1A!!:CF!<(1@!;4V7!;k%?!;t+?!!CIGc-?.dqp#*?qp#'>rQY !I+V"]DV`q]DV`T]DV_\]7ISI]+"^aGe:TfG^-k#]=S(A])Vj/])Vj/])Vj0])Vj0])Vj0])Vj0 ]Dhl^])Vj.])Vj0])Vj'])Vj0])r&F])_fA!!*#1!!)u0!!)u0!!)f+rrCrg!!)r/!-n.Jr;cf. r;cc-rVuu2!<'P-!<'P/!!9b4!<'P.!;3u%!;jD-!;sJ-!!Bh5]=[t@qn)h-qn)e,rO`%/!kSF4 oXb,'J,~> !MBFDeq)D'er/)ds.H%9f)MD9f%0ls!<1RI!<:XL!<:XL!<(LI!;tFJ!8,m#!;tFH!:n_>!<:XL !<:XL!<1RJ!!CdPf%0a!qptcIiR[o,rmh(Bhpqc-OOaF3J,~> !L!M7c%4,jc&9gRs-&Ykc2Wukc-?:a!<17@!<:=C!<:=C!<(1@!;t+A!8,Qo!;t+?!:nD5!<:=C !<:=C!<17A!!CIGc-?.dqp#-@iQ_9#rlkG,hou-$ONde*J,~> !I+Tq]7ISF]8O9.s*/e#]Dm,#]=\+=!<0V.!<9\1!<9\1!<'P.!;sJ/!8+p]!;sJ-!:mc#!<9\1 !<9\1!<0V/!!Bh5]=[t@qn)k.iOf!frjr/Thn&jgOLkMmJ,~> !MBFDeq)D'er8/fs.H%9f)MD9f%0m$J_%cj!2/6#!!&,3!<7Q~> !L!M7c%4,jc&BmTs-&Ykc2Wukc-?:gJ^)-X!0c !I+Tq]7ISF]8X?0s*/e#]Dm,#]=\+CJ\/k4!-mD5!!&+m!<7Q~> !MBFDeq)D'erA5hs.H%9f)MD9f%0kos+/b>ec;B%f)5T2ec>`#~> !L!M7c%4,jc&KsVs-&Ykc2Wukc-?9Ps+/G5blEsdc2@X)blIco~> !I+Tq]7ISF]8aE2s*/e#]Dm,#]=\)fs+.f#])[*7]DV_l])_k]~> !MBFDeq)D'erJ;js.H%9f)MD9f%0koT`9So`7=p]J_'5>!<7Q~> !L!M7c%4,jc&U$Xs-&Ykc2Wukc-?9PPQ-3Y`6A:GJ^*T,!<7Q~> !I+Tq]7ISF]8jK4s*/e#]Dm,#]=\)fGlN?,`4H"oJ\1<]!<7Q~> !MBFDeq)D'erSAls.H%9f)MD9f%0koT[s-of#6,>TRk\]ec>`#~> !L!M7c%4,jc&^*Zs-&Ykc2Wukc-?9PPKj,Yc,@j,PC_!GblIco~> !I+Tq]7ISF]8sQ6s*/e#]Dm,#]=\)fGe=!,]>V;]G_*Ko])_k]~> !MBFDeq)D'er\Gns.H%9f)MD9f%0koT[q]oJ_%cj!2+nomFD:;J,~> !L!M7c%4,jc&g0\s-&Ykc2Wukc-?9PPKhAPJ^)-X!0_uYmEGY2J,~> !I+Tq]7ISF]9'W8s*/e#]Dm,#]=\)fGe:TfJ\/k4!-j(,mCNAuJ,~> !MBFDeq)D'ereMls.H%9f)MD9f%0koT`3LBs+/b>ec;@of'V#gs*t~> !L!M7c%4,jc&p6Zs-&Ykc2Wukc-?9PPQ&f,s+/G5blErYc0`aUs*t~> !I+Tq]7ISF]90]6s*/e#]Dm,#]=\)fGlG;Ts+.f#])[),]C!31s*t~> !MBFDeq)D'ernSms.H%9f)MD9f%0koT`*FAs+/b>ec;@of'V#gs*t~> !L!M7c%4,jc'$<[s-&Ykc2Wukc-?9PPPr`+s+/G5blErYc0`aUs*t~> !I+Tq]7ISF]99c7s*/e#]Dm,#]=\)fGl>5Ss+.f#])[),]C!31s*t~> !2+oCJcC<$JcGcMJ_%cj!2+oCm/MS~> !0`!6JcC<$JcGcMJ^)-X!0`!6m/MS~> !-j(pJcC<$JcGcMJ\/k4!-j(pm/MS~> %%EndData showpage %%Trailer end %%EOF coq-8.4pl2/doc/refman/Classes.tex0000640000175000001440000003620311776416511016000 0ustar notinusers\def\Haskell{\textsc{Haskell}\xspace} \def\eol{\setlength\parskip{0pt}\par} \def\indent#1{\noindent\kern#1} \def\cst#1{\textsf{#1}} \newcommand\tele[1]{\overrightarrow{#1}} \achapter{\protect{Type Classes}} \aauthor{Matthieu Sozeau} \label{typeclasses} \begin{flushleft} \em The status of Type Classes is (extremely) experimental. \end{flushleft} This chapter presents a quick reference of the commands related to type classes. For an actual introduction to type classes, there is a description of the system \cite{sozeau08} and the literature on type classes in \Haskell which also applies. \asection{Class and Instance declarations} \label{ClassesInstances} The syntax for class and instance declarations is the same as record syntax of \Coq: \def\kw{\texttt} \def\classid{\texttt} \begin{center} \[\begin{array}{l} \kw{Class}~\classid{Id}~(\alpha_1 : \tau_1) \cdots (\alpha_n : \tau_n) [: \sort] := \{\\ \begin{array}{p{0em}lcl} & \cst{f}_1 & : & \type_1 ; \\ & \vdots & & \\ & \cst{f}_m & : & \type_m \}. \end{array}\end{array}\] \end{center} \begin{center} \[\begin{array}{l} \kw{Instance}~\ident~:~\classid{Id}~\term_1 \cdots \term_n := \{\\ \begin{array}{p{0em}lcl} & \cst{f}_1 & := & \term_{f_1} ; \\ & \vdots & & \\ & \cst{f}_m & := & \term_{f_m} \}. \end{array}\end{array}\] \end{center} \begin{coq_eval} Reset Initial. Generalizable All Variables. \end{coq_eval} The $\tele{\alpha_i : \tau_i}$ variables are called the \emph{parameters} of the class and the $\tele{f_k : \type_k}$ are called the \emph{methods}. Each class definition gives rise to a corresponding record declaration and each instance is a regular definition whose name is given by $\ident$ and type is an instantiation of the record type. We'll use the following example class in the rest of the chapter: \begin{coq_example*} Class EqDec (A : Type) := { eqb : A -> A -> bool ; eqb_leibniz : forall x y, eqb x y = true -> x = y }. \end{coq_example*} This class implements a boolean equality test which is compatible with Leibniz equality on some type. An example implementation is: \begin{coq_example*} Instance unit_EqDec : EqDec unit := { eqb x y := true ; eqb_leibniz x y H := match x, y return x = y with tt, tt => eq_refl tt end }. \end{coq_example*} If one does not give all the members in the \texttt{Instance} declaration, Coq enters the proof-mode and the user is asked to build inhabitants of the remaining fields, e.g.: \begin{coq_example*} Instance eq_bool : EqDec bool := { eqb x y := if x then y else negb y }. \end{coq_example*} \begin{coq_example} Proof. intros x y H. destruct x ; destruct y ; (discriminate || reflexivity). Defined. \end{coq_example} One has to take care that the transparency of every field is determined by the transparency of the \texttt{Instance} proof. One can use alternatively the \texttt{Program} \texttt{Instance} \comindex{Program Instance} variant which has richer facilities for dealing with obligations. \asection{Binding classes} Once a type class is declared, one can use it in class binders: \begin{coq_example} Definition neqb {A} {eqa : EqDec A} (x y : A) := negb (eqb x y). \end{coq_example} When one calls a class method, a constraint is generated that is satisfied only in contexts where the appropriate instances can be found. In the example above, a constraint \texttt{EqDec A} is generated and satisfied by \texttt{{eqa : EqDec A}}. In case no satisfying constraint can be found, an error is raised: \begin{coq_example} Definition neqb' (A : Type) (x y : A) := negb (eqb x y). \end{coq_example} The algorithm used to solve constraints is a variant of the eauto tactic that does proof search with a set of lemmas (the instances). It will use local hypotheses as well as declared lemmas in the \texttt{typeclass\_instances} database. Hence the example can also be written: \begin{coq_example} Definition neqb' A (eqa : EqDec A) (x y : A) := negb (eqb x y). \end{coq_example} However, the generalizing binders should be used instead as they have particular support for type classes: \begin{itemize} \item They automatically set the maximally implicit status for type class arguments, making derived functions as easy to use as class methods. In the example above, \texttt{A} and \texttt{eqa} should be set maximally implicit. \item They support implicit quantification on partialy applied type classes (\S \ref{implicit-generalization}). Any argument not given as part of a type class binder will be automatically generalized. \item They also support implicit quantification on superclasses (\S \ref{classes:superclasses}) \end{itemize} Following the previous example, one can write: \begin{coq_example} Definition neqb_impl `{eqa : EqDec A} (x y : A) := negb (eqb x y). \end{coq_example} Here \texttt{A} is implicitly generalized, and the resulting function is equivalent to the one above. \asection{Parameterized Instances} One can declare parameterized instances as in \Haskell simply by giving the constraints as a binding context before the instance, e.g.: \begin{coq_example} Instance prod_eqb `(EA : EqDec A, EB : EqDec B) : EqDec (A * B) := { eqb x y := match x, y with | (la, ra), (lb, rb) => andb (eqb la lb) (eqb ra rb) end }. \end{coq_example} \begin{coq_eval} Admitted. \end{coq_eval} These instances are used just as well as lemmas in the instance hint database. \asection{Sections and contexts} \label{SectionContext} To ease the parametrization of developments by type classes, we provide a new way to introduce variables into section contexts, compatible with the implicit argument mechanism. The new command works similarly to the \texttt{Variables} vernacular (see \ref{Variable}), except it accepts any binding context as argument. For example: \begin{coq_example} Section EqDec_defs. Context `{EA : EqDec A}. \end{coq_example} \begin{coq_example*} Global Instance option_eqb : EqDec (option A) := { eqb x y := match x, y with | Some x, Some y => eqb x y | None, None => true | _, _ => false end }. \end{coq_example*} \begin{coq_eval} Proof. intros x y ; destruct x ; destruct y ; intros H ; try discriminate ; try apply eqb_leibniz in H ; subst ; auto. Defined. \end{coq_eval} \begin{coq_example} End EqDec_defs. About option_eqb. \end{coq_example} Here the \texttt{Global} modifier redeclares the instance at the end of the section, once it has been generalized by the context variables it uses. \asection{Building hierarchies} \subsection{Superclasses} \label{classes:superclasses} One can also parameterize classes by other classes, generating a hierarchy of classes and superclasses. In the same way, we give the superclasses as a binding context: \begin{coq_example*} Class Ord `(E : EqDec A) := { le : A -> A -> bool }. \end{coq_example*} Contrary to \Haskell, we have no special syntax for superclasses, but this declaration is morally equivalent to: \begin{verbatim} Class `(E : EqDec A) => Ord A := { le : A -> A -> bool }. \end{verbatim} This declaration means that any instance of the \texttt{Ord} class must have an instance of \texttt{EqDec}. The parameters of the subclass contain at least all the parameters of its superclasses in their order of appearance (here \texttt{A} is the only one). As we have seen, \texttt{Ord} is encoded as a record type with two parameters: a type \texttt{A} and an \texttt{E} of type \texttt{EqDec A}. However, one can still use it as if it had a single parameter inside generalizing binders: the generalization of superclasses will be done automatically. \begin{coq_example*} Definition le_eqb `{Ord A} (x y : A) := andb (le x y) (le y x). \end{coq_example*} In some cases, to be able to specify sharing of structures, one may want to give explicitly the superclasses. It is is possible to do it directly in regular binders, and using the \texttt{!} modifier in class binders. For example: \begin{coq_example*} Definition lt `{eqa : EqDec A, ! Ord eqa} (x y : A) := andb (le x y) (neqb x y). \end{coq_example*} The \texttt{!} modifier switches the way a binder is parsed back to the regular interpretation of Coq. In particular, it uses the implicit arguments mechanism if available, as shown in the example. \subsection{Substructures} Substructures are components of a class which are instances of a class themselves. They often arise when using classes for logical properties, e.g.: \begin{coq_eval} Require Import Relations. \end{coq_eval} \begin{coq_example*} Class Reflexive (A : Type) (R : relation A) := reflexivity : forall x, R x x. Class Transitive (A : Type) (R : relation A) := transitivity : forall x y z, R x y -> R y z -> R x z. \end{coq_example*} This declares singleton classes for reflexive and transitive relations, (see \ref{SingletonClass} for an explanation). These may be used as part of other classes: \begin{coq_example*} Class PreOrder (A : Type) (R : relation A) := { PreOrder_Reflexive :> Reflexive A R ; PreOrder_Transitive :> Transitive A R }. \end{coq_example*} The syntax \texttt{:>} indicates that each \texttt{PreOrder} can be seen as a \texttt{Reflexive} relation. So each time a reflexive relation is needed, a preorder can be used instead. This is very similar to the coercion mechanism of \texttt{Structure} declarations. The implementation simply declares each projection as an instance. One can also declare existing objects or structure projections using the \texttt{Existing Instance} command to achieve the same effect. \section{Summary of the commands \label{TypeClassCommands}} \subsection{\tt Class {\ident} {\binder$_1$ \ldots~\binder$_n$} : \sort := \{ field$_1$ ; \ldots ; field$_k$ \}.} \comindex{Class} \label{Class} The \texttt{Class} command is used to declare a type class with parameters {\binder$_1$} to {\binder$_n$} and fields {\tt field$_1$} to {\tt field$_k$}. \begin{Variants} \item \label{SingletonClass} {\tt Class {\ident} {\binder$_1$ \ldots \binder$_n$} : \sort := \ident$_1$ : \type$_1$.} This variant declares a \emph{singleton} class whose only method is {\tt \ident$_1$}. This singleton class is a so-called definitional class, represented simply as a definition {\tt {\ident} \binder$_1$ \ldots \binder$_n$ := \type$_1$} and whose instances are themselves objects of this type. Definitional classes are not wrapped inside records, and the trivial projection of an instance of such a class is convertible to the instance itself. This can be useful to make instances of existing objects easily and to reduce proof size by not inserting useless projections. The class constant itself is declared rigid during resolution so that the class abstraction is maintained. \item \label{ExistingClass} {\tt Existing Class {\ident}.\comindex{Existing Class}} This variant declares a class a posteriori from a constant or inductive definition. No methods or instances are defined. \end{Variants} \subsection{\tt Instance {\ident} {\binder$_1$ \ldots \binder$_n$} : {Class} {t$_1$ \ldots t$_n$} [| \textit{priority}] := \{ field$_1$ := b$_1$ ; \ldots ; field$_i$ := b$_i$ \}} \comindex{Instance} \label{Instance} The \texttt{Instance} command is used to declare a type class instance named {\ident} of the class \emph{Class} with parameters {t$_1$} to {t$_n$} and fields {\tt b$_1$} to {\tt b$_i$}, where each field must be a declared field of the class. Missing fields must be filled in interactive proof mode. An arbitrary context of the form {\tt \binder$_1$ \ldots \binder$_n$} can be put after the name of the instance and before the colon to declare a parameterized instance. An optional \textit{priority} can be declared, 0 being the highest priority as for auto hints. \begin{Variants} \item {\tt Instance {\ident} {\binder$_1$ \ldots \binder$_n$} : forall {\binder$_{n+1}$ \ldots \binder$_m$}, {Class} {t$_1$ \ldots t$_n$} [| \textit{priority}] := \term} This syntax is used for declaration of singleton class instances or for directly giving an explicit term of type {\tt forall {\binder$_{n+1}$ \ldots \binder$_m$}, {Class} {t$_1$ \ldots t$_n$}}. One need not even mention the unique field name for singleton classes. \item {\tt Global Instance} One can use the \texttt{Global} modifier on instances declared in a section so that their generalization is automatically redeclared after the section is closed. \item {\tt Program Instance} \comindex{Program Instance} Switches the type-checking to \Program~(chapter \ref{Program}) and uses the obligation mechanism to manage missing fields. \item {\tt Declare Instance} \comindex{Declare Instance} In a {\tt Module Type}, this command states that a corresponding concrete instance should exist in any implementation of this {\tt Module Type}. This is similar to the distinction between {\tt Parameter} vs. {\tt Definition}, or between {\tt Declare Module} and {\tt Module}. \end{Variants} Besides the {\tt Class} and {\tt Instance} vernacular commands, there are a few other commands related to type classes. \subsection{\tt Existing Instance {\ident}} \comindex{Existing Instance} \label{ExistingInstance} This commands adds an arbitrary constant whose type ends with an applied type class to the instance database. It can be used for redeclaring instances at the end of sections, or declaring structure projections as instances. This is almost equivalent to {\tt Hint Resolve {\ident} : typeclass\_instances}. \begin{Variants} \item {\tt Existing Instances {\ident}$_1$ \ldots {\ident}$_n$} \comindex{Existing Instances} With this command, several existing instances can be declared at once. \end{Variants} \subsection{\tt Context {\binder$_1$ \ldots \binder$_n$}} \comindex{Context} \label{Context} Declares variables according to the given binding context, which might use implicit generalization (see \ref{SectionContext}). \subsection{\tt Typeclasses Transparent, Opaque {\ident$_1$ \ldots \ident$_n$}} \comindex{Typeclasses Transparent} \comindex{Typeclasses Opaque} \label{TypeclassesTransparency} This commands defines the transparency of {\ident$_1$ \ldots \ident$_n$} during type class resolution. It is useful when some constants prevent some unifications and make resolution fail. It is also useful to declare constants which should never be unfolded during proof-search, like fixpoints or anything which does not look like an abbreviation. This can additionally speed up proof search as the typeclass map can be indexed by such rigid constants (see \ref{HintTransparency}). By default, all constants and local variables are considered transparent. One should take care not to make opaque any constant that is used to abbreviate a type, like {\tt relation A := A -> A -> Prop}. This is equivalent to {\tt Hint Transparent,Opaque} {\ident} {\tt: typeclass\_instances}. \subsection{\tt Typeclasses eauto := [debug] [dfs | bfs] [\emph{depth}]} \comindex{Typeclasses eauto} \label{TypeclassesEauto} This commands allows to customize the type class resolution tactic, based on a variant of eauto. The flags semantics are: \begin{itemize} \item {\tt debug} In debug mode, the trace of successfully applied tactics is printed. \item {\tt dfs, bfs} This sets the search strategy to depth-first search (the default) or breadth-first search. \item {\emph{depth}} This sets the depth of the search (the default is 100). \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Polynom.tex0000640000175000001440000011724411776416511016045 0ustar notinusers\achapter{The \texttt{ring} and \texttt{field} tactic families} \aauthor{Bruno Barras, Benjamin Gr\'egoire, Assia Mahboubi, Laurent Th\'ery\footnote{based on previous work from Patrick Loiseleur and Samuel Boutin}} \label{ring} \tacindex{ring} This chapter presents the tactics dedicated to deal with ring and field equations. \asection{What does this tactic do?} \texttt{ring} does associative-commutative rewriting in ring and semi-ring structures. Assume you have two binary functions $\oplus$ and $\otimes$ that are associative and commutative, with $\oplus$ distributive on $\otimes$, and two constants 0 and 1 that are unities for $\oplus$ and $\otimes$. A \textit{polynomial} is an expression built on variables $V_0, V_1, \dots$ and constants by application of $\oplus$ and $\otimes$. Let an {\it ordered product} be a product of variables $V_{i_1} \otimes \ldots \otimes V_{i_n}$ verifying $i_1 \le i_2 \le \dots \le i_n$. Let a \textit{monomial} be the product of a constant and an ordered product. We can order the monomials by the lexicographic order on products of variables. Let a \textit{canonical sum} be an ordered sum of monomials that are all different, i.e. each monomial in the sum is strictly less than the following monomial according to the lexicographic order. It is an easy theorem to show that every polynomial is equivalent (modulo the ring properties) to exactly one canonical sum. This canonical sum is called the \textit{normal form} of the polynomial. In fact, the actual representation shares monomials with same prefixes. So what does \texttt{ring}? It normalizes polynomials over any ring or semi-ring structure. The basic use of \texttt{ring} is to simplify ring expressions, so that the user does not have to deal manually with the theorems of associativity and commutativity. \begin{Examples} \item In the ring of integers, the normal form of $x (3 + yx + 25(1 - z)) + zx$ is $28x + (-24)xz + xxy$. \end{Examples} \texttt{ring} is also able to compute a normal form modulo monomial equalities. For example, under the hypothesis that $2x^2 = yz+1$, the normal form of $2(x + 1)x - x - zy$ is $x+1$. \asection{The variables map} It is frequent to have an expression built with + and $\times$, but rarely on variables only. Let us associate a number to each subterm of a ring expression in the \gallina\ language. For example in the ring \texttt{nat}, consider the expression: \begin{quotation} \begin{verbatim} (plus (mult (plus (f (5)) x) x) (mult (if b then (4) else (f (3))) (2))) \end{verbatim} \end{quotation} \noindent As a ring expression, it has 3 subterms. Give each subterm a number in an arbitrary order: \begin{tabular}{ccl} 0 & $\mapsto$ & \verb|if b then (4) else (f (3))| \\ 1 & $\mapsto$ & \verb|(f (5))| \\ 2 & $\mapsto$ & \verb|x| \\ \end{tabular} \noindent Then normalize the ``abstract'' polynomial $$((V_1 \otimes V_2) \oplus V_2) \oplus (V_0 \otimes 2) $$ \noindent In our example the normal form is: $$(2 \otimes V_0) \oplus (V_1 \otimes V_2) \oplus (V_2 \otimes V_2)$$ \noindent Then substitute the variables by their values in the variables map to get the concrete normal polynomial: \begin{quotation} \begin{verbatim} (plus (mult (2) (if b then (4) else (f (3)))) (plus (mult (f (5)) x) (mult x x))) \end{verbatim} \end{quotation} \asection{Is it automatic?} Yes, building the variables map and doing the substitution after normalizing is automatically done by the tactic. So you can just forget this paragraph and use the tactic according to your intuition. \asection{Concrete usage in \Coq \tacindex{ring} \tacindex{ring\_simplify}} The {\tt ring} tactic solves equations upon polynomial expressions of a ring (or semi-ring) structure. It proceeds by normalizing both hand sides of the equation (w.r.t. associativity, commutativity and distributivity, constant propagation, rewriting of monomials) and comparing syntactically the results. {\tt ring\_simplify} applies the normalization procedure described above to the terms given. The tactic then replaces all occurrences of the terms given in the conclusion of the goal by their normal forms. If no term is given, then the conclusion should be an equation and both hand sides are normalized. The tactic can also be applied in a hypothesis. The tactic must be loaded by \texttt{Require Import Ring}. The ring structures must be declared with the \texttt{Add Ring} command (see below). The ring of booleans is predefined; if one wants to use the tactic on \texttt{nat} one must first require the module \texttt{ArithRing} (exported by \texttt{Arith}); for \texttt{Z}, do \texttt{Require Import ZArithRing} or simply \texttt{Require Import ZArith}; for \texttt{N}, do \texttt{Require Import NArithRing} or \texttt{Require Import NArith}. \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Require Import ZArith. Open Scope Z_scope. Goal forall a b c:Z, (a + b + c)^2 = a * a + b^2 + c * c + 2 * a * b + 2 * a * c + 2 * b * c. \end{coq_example} \begin{coq_example} intros; ring. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example} Goal forall a b:Z, 2*a*b = 30 -> (a+b)^2 = a^2 + b^2 + 30. \end{coq_example} \begin{coq_example} intros a b H; ring [H]. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{Variants} \item {\tt ring [\term$_1$ {\ldots} \term$_n$]} decides the equality of two terms modulo ring operations and rewriting of the equalities defined by \term$_1$ {\ldots} \term$_n$. Each of \term$_1$ {\ldots} \term$_n$ has to be a proof of some equality $m = p$, where $m$ is a monomial (after ``abstraction''), $p$ a polynomial and $=$ the corresponding equality of the ring structure. \item {\tt ring\_simplify [\term$_1$ {\ldots} \term$_n$] $t_1 \ldots t_m$ in {\ident}} performs the simplification in the hypothesis named {\tt ident}. \end{Variants} \Warning \texttt{ring\_simplify \term$_1$; ring\_simplify \term$_2$} is not equivalent to \texttt{ring\_simplify \term$_1$ \term$_2$}. In the latter case the variables map is shared between the two terms, and common subterm $t$ of \term$_1$ and \term$_2$ will have the same associated variable number. So the first alternative should be avoided for terms belonging to the same ring theory. \begin{ErrMsgs} \item \errindex{not a valid ring equation} The conclusion of the goal is not provable in the corresponding ring theory. \item \errindex{arguments of ring\_simplify do not have all the same type} {\tt ring\_simplify} cannot simplify terms of several rings at the same time. Invoke the tactic once per ring structure. \item \errindex{cannot find a declared ring structure over {\tt term}} No ring has been declared for the type of the terms to be simplified. Use {\tt Add Ring} first. \item \errindex{cannot find a declared ring structure for equality {\tt term}} Same as above is the case of the {\tt ring} tactic. \end{ErrMsgs} \asection{Adding a ring structure \comindex{Add Ring}} Declaring a new ring consists in proving that a ring signature (a carrier set, an equality, and ring operations: {\tt Ring\_theory.ring\_theory} and {\tt Ring\_theory.semi\_ring\_theory}) satisfies the ring axioms. Semi-rings (rings without $+$ inverse) are also supported. The equality can be either Leibniz equality, or any relation declared as a setoid (see~\ref{setoidtactics}). The definition of ring and semi-rings (see module {\tt Ring\_theory}) is: \begin{verbatim} Record ring_theory : Prop := mk_rt { Radd_0_l : forall x, 0 + x == x; Radd_sym : forall x y, x + y == y + x; Radd_assoc : forall x y z, x + (y + z) == (x + y) + z; Rmul_1_l : forall x, 1 * x == x; Rmul_sym : forall x y, x * y == y * x; Rmul_assoc : forall x y z, x * (y * z) == (x * y) * z; Rdistr_l : forall x y z, (x + y) * z == (x * z) + (y * z); Rsub_def : forall x y, x - y == x + -y; Ropp_def : forall x, x + (- x) == 0 }. Record semi_ring_theory : Prop := mk_srt { SRadd_0_l : forall n, 0 + n == n; SRadd_sym : forall n m, n + m == m + n ; SRadd_assoc : forall n m p, n + (m + p) == (n + m) + p; SRmul_1_l : forall n, 1*n == n; SRmul_0_l : forall n, 0*n == 0; SRmul_sym : forall n m, n*m == m*n; SRmul_assoc : forall n m p, n*(m*p) == (n*m)*p; SRdistr_l : forall n m p, (n + m)*p == n*p + m*p }. \end{verbatim} This implementation of {\tt ring} also features a notion of constant that can be parameterized. This can be used to improve the handling of closed expressions when operations are effective. It consists in introducing a type of \emph{coefficients} and an implementation of the ring operations, and a morphism from the coefficient type to the ring carrier type. The morphism needs not be injective, nor surjective. As an example, one can consider the real numbers. The set of coefficients could be the rational numbers, upon which the ring operations can be implemented. The fact that there exists a morphism is defined by the following properties: \begin{verbatim} Record ring_morph : Prop := mkmorph { morph0 : [cO] == 0; morph1 : [cI] == 1; morph_add : forall x y, [x +! y] == [x]+[y]; morph_sub : forall x y, [x -! y] == [x]-[y]; morph_mul : forall x y, [x *! y] == [x]*[y]; morph_opp : forall x, [-!x] == -[x]; morph_eq : forall x y, x?=!y = true -> [x] == [y] }. Record semi_morph : Prop := mkRmorph { Smorph0 : [cO] == 0; Smorph1 : [cI] == 1; Smorph_add : forall x y, [x +! y] == [x]+[y]; Smorph_mul : forall x y, [x *! y] == [x]*[y]; Smorph_eq : forall x y, x?=!y = true -> [x] == [y] }. \end{verbatim} where {\tt c0} and {\tt cI} denote the 0 and 1 of the coefficient set, {\tt +!}, {\tt *!}, {\tt -!} are the implementations of the ring operations, {\tt ==} is the equality of the coefficients, {\tt ?+!} is an implementation of this equality, and {\tt [x]} is a notation for the image of {\tt x} by the ring morphism. Since {\tt Z} is an initial ring (and {\tt N} is an initial semi-ring), it can always be considered as a set of coefficients. There are basically three kinds of (semi-)rings: \begin{description} \item[abstract rings] to be used when operations are not effective. The set of coefficients is {\tt Z} (or {\tt N} for semi-rings). \item[computational rings] to be used when operations are effective. The set of coefficients is the ring itself. The user only has to provide an implementation for the equality. \item[customized ring] for other cases. The user has to provide the coefficient set and the morphism. \end{description} This implementation of ring can also recognize simple power expressions as ring expressions. A power function is specified by the following property: \begin{verbatim} Section POWER. Variable Cpow : Set. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Record power_theory : Prop := mkpow_th { rpow_pow_N : forall r n, req (rpow r (Cp_phi n)) (pow_N rI rmul r n) }. End POWER. \end{verbatim} The syntax for adding a new ring is {\tt Add Ring $name$ : $ring$ ($mod_1$,\dots,$mod_2$)}. The name is not relevent. It is just used for error messages. The term $ring$ is a proof that the ring signature satisfies the (semi-)ring axioms. The optional list of modifiers is used to tailor the behavior of the tactic. The following list describes their syntax and effects: \begin{description} \item[abstract] declares the ring as abstract. This is the default. \item[decidable \term] declares the ring as computational. The expression \term{} is the correctness proof of an equality test {\tt ?=!} (which should be evaluable). Its type should be of the form {\tt forall x y, x?=!y = true $\rightarrow$ x == y}. \item[morphism \term] declares the ring as a customized one. The expression \term{} is a proof that there exists a morphism between a set of coefficient and the ring carrier (see {\tt Ring\_theory.ring\_morph} and {\tt Ring\_theory.semi\_morph}). \item[setoid \term$_1$ \term$_2$] forces the use of given setoid. The expression \term$_1$ is a proof that the equality is indeed a setoid (see {\tt Setoid.Setoid\_Theory}), and \term$_2$ a proof that the ring operations are morphisms (see {\tt Ring\_theory.ring\_eq\_ext} and {\tt Ring\_theory.sring\_eq\_ext}). This modifier needs not be used if the setoid and morphisms have been declared. \item[constants [\ltac]] specifies a tactic expression that, given a term, returns either an object of the coefficient set that is mapped to the expression via the morphism, or returns {\tt InitialRing.NotConstant}. The default behaviour is to map only 0 and 1 to their counterpart in the coefficient set. This is generally not desirable for non trivial computational rings. \item[preprocess [\ltac]] specifies a tactic that is applied as a preliminary step for {\tt ring} and {\tt ring\_simplify}. It can be used to transform a goal so that it is better recognized. For instance, {\tt S n} can be changed to {\tt plus 1 n}. \item[postprocess [\ltac]] specifies a tactic that is applied as a final step for {\tt ring\_simplify}. For instance, it can be used to undo modifications of the preprocessor. \item[power\_tac {\term} [\ltac]] allows {\tt ring} and {\tt ring\_simplify} to recognize power expressions with a constant positive integer exponent (example: $x^2$). The term {\term} is a proof that a given power function satisfies the specification of a power function ({\term} has to be a proof of {\tt Ring\_theory.power\_theory}) and {\ltac} specifies a tactic expression that, given a term, ``abstracts'' it into an object of type {\tt N} whose interpretation via {\tt Cp\_phi} (the evaluation function of power coefficient) is the original term, or returns {\tt InitialRing.NotConstant} if not a constant coefficient (i.e. {\ltac} is the inverse function of {\tt Cp\_phi}). See files {\tt plugins/setoid\_ring/ZArithRing.v} and {\tt plugins/setoid\_ring/RealField.v} for examples. By default the tactic does not recognize power expressions as ring expressions. \item[sign {\term}] allows {\tt ring\_simplify} to use a minus operation when outputing its normal form, i.e writing $x - y$ instead of $x + (-y)$. The term {\term} is a proof that a given sign function indicates expressions that are signed ({\term} has to be a proof of {\tt Ring\_theory.get\_sign}). See {\tt plugins/setoid\_ring/IntialRing.v} for examples of sign function. \item[div {\term}] allows {\tt ring} and {\tt ring\_simplify} to use moniomals with coefficient other than 1 in the rewriting. The term {\term} is a proof that a given division function satisfies the specification of an euclidean division function ({\term} has to be a proof of {\tt Ring\_theory.div\_theory}). For example, this function is called when trying to rewrite $7x$ by $2x = z$ to tell that $7 = 3 * 2 + 1$. See {\tt plugins/setoid\_ring/IntialRing.v} for examples of div function. \end{description} \begin{ErrMsgs} \item \errindex{bad ring structure} The proof of the ring structure provided is not of the expected type. \item \errindex{bad lemma for decidability of equality} The equality function provided in the case of a computational ring has not the expected type. \item \errindex{ring {\it operation} should be declared as a morphism} A setoid associated to the carrier of the ring structure as been found, but the ring operation should be declared as morphism. See~\ref{setoidtactics}. \end{ErrMsgs} \asection{How does it work?} The code of \texttt{ring} is a good example of tactic written using \textit{reflection}. What is reflection? Basically, it is writing \Coq{} tactics in \Coq, rather than in \ocaml. From the philosophical point of view, it is using the ability of the Calculus of Constructions to speak and reason about itself. For the \texttt{ring} tactic we used \Coq\ as a programming language and also as a proof environment to build a tactic and to prove it correctness. The interested reader is strongly advised to have a look at the file \texttt{Ring\_polynom.v}. Here a type for polynomials is defined: \begin{small} \begin{flushleft} \begin{verbatim} Inductive PExpr : Type := | PEc : C -> PExpr | PEX : positive -> PExpr | PEadd : PExpr -> PExpr -> PExpr | PEsub : PExpr -> PExpr -> PExpr | PEmul : PExpr -> PExpr -> PExpr | PEopp : PExpr -> PExpr | PEpow : PExpr -> N -> PExpr. \end{verbatim} \end{flushleft} \end{small} Polynomials in normal form are defined as: \begin{small} \begin{flushleft} \begin{verbatim} Inductive Pol : Type := | Pc : C -> Pol | Pinj : positive -> Pol -> Pol | PX : Pol -> positive -> Pol -> Pol. \end{verbatim} \end{flushleft} \end{small} where {\tt Pinj n P} denotes $P$ in which $V_i$ is replaced by $V_{i+n}$, and {\tt PX P n Q} denotes $P \otimes V_1^{n} \oplus Q'$, $Q'$ being $Q$ where $V_i$ is replaced by $V_{i+1}$. Variables maps are represented by list of ring elements, and two interpretation functions, one that maps a variables map and a polynomial to an element of the concrete ring, and the second one that does the same for normal forms: \begin{small} \begin{flushleft} \begin{verbatim} Definition PEeval : list R -> PExpr -> R := [...]. Definition Pphi_dev : list R -> Pol -> R := [...]. \end{verbatim} \end{flushleft} \end{small} A function to normalize polynomials is defined, and the big theorem is its correctness w.r.t interpretation, that is: \begin{small} \begin{flushleft} \begin{verbatim} Definition norm : PExpr -> Pol := [...]. Lemma Pphi_dev_ok : forall l pe npe, norm pe = npe -> PEeval l pe == Pphi_dev l npe. \end{verbatim} \end{flushleft} \end{small} So now, what is the scheme for a normalization proof? Let \texttt{p} be the polynomial expression that the user wants to normalize. First a little piece of ML code guesses the type of \texttt{p}, the ring theory \texttt{T} to use, an abstract polynomial \texttt{ap} and a variables map \texttt{v} such that \texttt{p} is $\beta\delta\iota$-equivalent to \verb|(PEeval v ap)|. Then we replace it by \verb|(Pphi_dev v (norm ap))|, using the main correctness theorem and we reduce it to a concrete expression \texttt{p'}, which is the concrete normal form of \texttt{p}. This is summarized in this diagram: \begin{center} \begin{tabular}{rcl} \texttt{p} & $\rightarrow_{\beta\delta\iota}$ & \texttt{(PEeval v ap)} \\ & & $=_{\mathrm{(by\ the\ main\ correctness\ theorem)}}$ \\ \texttt{p'} & $\leftarrow_{\beta\delta\iota}$ & \texttt{(Pphi\_dev v (norm ap))} \end{tabular} \end{center} The user do not see the right part of the diagram. From outside, the tactic behaves like a $\beta\delta\iota$ simplification extended with AC rewriting rules. Basically, the proof is only the application of the main correctness theorem to well-chosen arguments. \asection{Dealing with fields \tacindex{field} \tacindex{field\_simplify} \tacindex{field\_simplify\_eq}} The {\tt field} tactic is an extension of the {\tt ring} to deal with rational expresision. Given a rational expression $F=0$. It first reduces the expression $F$ to a common denominator $N/D= 0$ where $N$ and $D$ are two ring expressions. For example, if we take $F = (1 - 1/x) x - x + 1$, this gives $ N= (x -1) x - x^2 + x$ and $D= x$. It then calls {\tt ring} to solve $N=0$. Note that {\tt field} also generates non-zero conditions for all the denominators it encounters in the reduction. In our example, it generates the condition $x \neq 0$. These conditions appear as one subgoal which is a conjunction if there are several denominators. Non-zero conditions are {\it always} polynomial expressions. For example when reducing the expression $1/(1 + 1/x)$, two side conditions are generated: $x\neq 0$ and $x + 1 \neq 0$. Factorized expressions are broken since a field is an integral domain, and when the equality test on coefficients is complete w.r.t. the equality of the target field, constants can be proven different from zero automatically. The tactic must be loaded by \texttt{Require Import Field}. New field structures can be declared to the system with the \texttt{Add Field} command (see below). The field of real numbers is defined in module \texttt{RealField} (in texttt{plugins/setoid\_ring}). It is exported by module \texttt{Rbase}, so that requiring \texttt{Rbase} or \texttt{Reals} is enough to use the field tactics on real numbers. Rational numbers in canonical form are also declared as a field in module \texttt{Qcanon}. \Example \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{coq_example} Require Import Reals. Open Scope R_scope. Goal forall x, x <> 0 -> (1 - 1/x) * x - x + 1 = 0. \end{coq_example} \begin{coq_example} intros; field; auto. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \begin{coq_example} Goal forall x y, y <> 0 -> y = x -> x/y = 1. \end{coq_example} \begin{coq_example} intros x y H H1; field [H1]; auto. \end{coq_example} \begin{coq_eval} Reset Initial. \end{coq_eval} \begin{Variants} \item {\tt field [\term$_1$ {\ldots} \term$_n$]} decides the equality of two terms modulo field operations and rewriting of the equalities defined by \term$_1$ {\ldots} \term$_n$. Each of \term$_1$ {\ldots} \term$_n$ has to be a proof of some equality $m = p$, where $m$ is a monomial (after ``abstraction''), $p$ a polynomial and $=$ the corresponding equality of the field structure. Beware that rewriting works with the equality $m=p$ only if $p$ is a polynomial since rewriting is handled by the underlying {\tt ring} tactic. \item {\tt field\_simplify} performs the simplification in the conclusion of the goal, $F_1 = F_2$ becomes $N_1/D_1 = N_2/D_2$. A normalization step (the same as the one for rings) is then applied to $N_1$, $D_1$, $N_2$ and $D_2$. This way, polynomials remain in factorized form during the fraction simplifications. This yields smaller expressions when reducing to the same denominator since common factors can be cancelled. \item {\tt field\_simplify [\term$_1$ {\ldots} \term$_n$]} performs the simplification in the conclusion of the goal using the equalities defined by \term$_1$ {\ldots} \term$_n$. \item {\tt field\_simplify [\term$_1$ {\ldots} \term$_n$] $t_1$ \ldots $t_m$} performs the simplification in the terms $t_1$ \ldots $t_m$ of the conclusion of the goal using the equalities defined by \term$_1$ {\ldots} \term$_n$. \item {\tt field\_simplify in $H$} performs the simplification in the assumption $H$. \item {\tt field\_simplify [\term$_1$ {\ldots} \term$_n$] in $H$} performs the simplification in the assumption $H$ using the equalities defined by \term$_1$ {\ldots} \term$_n$. \item {\tt field\_simplify [\term$_1$ {\ldots} \term$_n$] $t_1$ \ldots $t_m$ in $H$} performs the simplification in the terms $t_1$ \ldots $t_n$ of the assumption $H$ using the equalities defined by \term$_1$ {\ldots} \term$_m$. \item {\tt field\_simplify\_eq} performs the simplification in the conclusion of the goal removing the denominator. $F_1 = F_2$ becomes $N_1 D_2 = N_2 D_1$. \item {\tt field\_simplify\_eq [\term$_1$ {\ldots} \term$_n$]} performs the simplification in the conclusion of the goal using the equalities defined by \term$_1$ {\ldots} \term$_n$. \item {\tt field\_simplify\_eq} in $H$ performs the simplification in the assumption $H$. \item {\tt field\_simplify\_eq [\term$_1$ {\ldots} \term$_n$] in $H$} performs the simplification in the assumption $H$ using the equalities defined by \term$_1$ {\ldots} \term$_n$. \end{Variants} \asection{Adding a new field structure \comindex{Add Field}} Declaring a new field consists in proving that a field signature (a carrier set, an equality, and field operations: {\tt Field\_theory.field\_theory} and {\tt Field\_theory.semi\_field\_theory}) satisfies the field axioms. Semi-fields (fields without $+$ inverse) are also supported. The equality can be either Leibniz equality, or any relation declared as a setoid (see~\ref{setoidtactics}). The definition of fields and semi-fields is: \begin{verbatim} Record field_theory : Prop := mk_field { F_R : ring_theory rO rI radd rmul rsub ropp req; F_1_neq_0 : ~ 1 == 0; Fdiv_def : forall p q, p / q == p * / q; Finv_l : forall p, ~ p == 0 -> / p * p == 1 }. Record semi_field_theory : Prop := mk_sfield { SF_SR : semi_ring_theory rO rI radd rmul req; SF_1_neq_0 : ~ 1 == 0; SFdiv_def : forall p q, p / q == p * / q; SFinv_l : forall p, ~ p == 0 -> / p * p == 1 }. \end{verbatim} The result of the normalization process is a fraction represented by the following type: \begin{verbatim} Record linear : Type := mk_linear { num : PExpr C; denum : PExpr C; condition : list (PExpr C) }. \end{verbatim} where {\tt num} and {\tt denum} are the numerator and denominator; {\tt condition} is a list of expressions that have appeared as a denominator during the normalization process. These expressions must be proven different from zero for the correctness of the algorithm. The syntax for adding a new field is {\tt Add Field $name$ : $field$ ($mod_1$,\dots,$mod_2$)}. The name is not relevent. It is just used for error messages. $field$ is a proof that the field signature satisfies the (semi-)field axioms. The optional list of modifiers is used to tailor the behaviour of the tactic. Since field tactics are built upon ring tactics, all mofifiers of the {\tt Add Ring} apply. There is only one specific modifier: \begin{description} \item[completeness \term] allows the field tactic to prove automatically that the image of non-zero coefficients are mapped to non-zero elements of the field. \term is a proof of {\tt forall x y, [x] == [y] -> x?=!y = true}, which is the completeness of equality on coefficients w.r.t. the field equality. \end{description} \asection{Legacy implementation} \Warning This tactic is the {\tt ring} tactic of previous versions of \Coq{} and it should be considered as deprecated. It will probably be removed in future releases. It has been kept only for compatibility reasons and in order to help moving existing code to the newer implementation described above. For more details, please refer to the Coq Reference Manual, version 8.0. \subsection{\tt legacy ring \term$_1$ \dots\ \term$_n$ \tacindex{legacy ring} \comindex{Add Legacy Ring} \comindex{Add Legacy Semi Ring}} This tactic, written by Samuel Boutin and Patrick Loiseleur, applies associative commutative rewriting on every ring. The tactic must be loaded by \texttt{Require Import LegacyRing}. The ring must be declared in the \texttt{Add Ring} command. The ring of booleans (with \texttt{andb} as multiplication and \texttt{xorb} as addition) is predefined; if one wants to use the tactic on \texttt{nat} one must first require the module \texttt{LegacyArithRing}; for \texttt{Z}, do \texttt{Require Import LegacyZArithRing}; for \texttt{N}, do \texttt{Require Import LegacyNArithRing}. The terms \term$_1$, \dots, \term$_n$ must be subterms of the goal conclusion. The tactic \texttt{ring} normalizes these terms w.r.t. associativity and commutativity and replace them by their normal form. \begin{Variants} \item \texttt{legacy ring} When the goal is an equality $t_1=t_2$, it acts like \texttt{ring\_simplify} $t_1$ $t_2$ and then solves the equality by reflexivity. \item \texttt{ring\_nat} is a tactic macro for \texttt{repeat rewrite S\_to\_plus\_one; ring}. The theorem \texttt{S\_to\_plus\_one} is a proof that \texttt{forall (n:nat), S n = plus (S O) n}. \end{Variants} You can have a look at the files \texttt{LegacyRing.v}, \texttt{ArithRing.v}, \texttt{ZArithRing.v} to see examples of the \texttt{Add Ring} command. \subsection{Add a ring structure} It can be done in the \Coq toplevel (No ML file to edit and to link with \Coq). First, \texttt{ring} can handle two kinds of structure: rings and semi-rings. Semi-rings are like rings without an opposite to addition. Their precise specification (in \gallina) can be found in the file \begin{quotation} \begin{verbatim} plugins/ring/Ring_theory.v \end{verbatim} \end{quotation} The typical example of ring is \texttt{Z}, the typical example of semi-ring is \texttt{nat}. The specification of a ring is divided in two parts: first the record of constants ($\oplus$, $\otimes$, 1, 0, $\ominus$) and then the theorems (associativity, commutativity, etc.). \begin{small} \begin{flushleft} \begin{verbatim} Section Theory_of_semi_rings. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. (* There is also a "weakly decidable" equality on A. That means that if (A_eq x y)=true then x=y but x=y can arise when (A_eq x y)=false. On an abstract ring the function [x,y:A]false is a good choice. The proof of A_eq_prop is in this case easy. *) Variable Aeq : A -> A -> bool. Record Semi_Ring_Theory : Prop := { SR_plus_sym : (n,m:A)[| n + m == m + n |]; SR_plus_assoc : (n,m,p:A)[| n + (m + p) == (n + m) + p |]; SR_mult_sym : (n,m:A)[| n*m == m*n |]; SR_mult_assoc : (n,m,p:A)[| n*(m*p) == (n*m)*p |]; SR_plus_zero_left :(n:A)[| 0 + n == n|]; SR_mult_one_left : (n:A)[| 1*n == n |]; SR_mult_zero_left : (n:A)[| 0*n == 0 |]; SR_distr_left : (n,m,p:A) [| (n + m)*p == n*p + m*p |]; SR_plus_reg_left : (n,m,p:A)[| n + m == n + p |] -> m==p; SR_eq_prop : (x,y:A) (Is_true (Aeq x y)) -> x==y }. \end{verbatim} \end{flushleft} \end{small} \begin{small} \begin{flushleft} \begin{verbatim} Section Theory_of_rings. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Record Ring_Theory : Prop := { Th_plus_sym : (n,m:A)[| n + m == m + n |]; Th_plus_assoc : (n,m,p:A)[| n + (m + p) == (n + m) + p |]; Th_mult_sym : (n,m:A)[| n*m == m*n |]; Th_mult_assoc : (n,m,p:A)[| n*(m*p) == (n*m)*p |]; Th_plus_zero_left :(n:A)[| 0 + n == n|]; Th_mult_one_left : (n:A)[| 1*n == n |]; Th_opp_def : (n:A) [| n + (-n) == 0 |]; Th_distr_left : (n,m,p:A) [| (n + m)*p == n*p + m*p |]; Th_eq_prop : (x,y:A) (Is_true (Aeq x y)) -> x==y }. \end{verbatim} \end{flushleft} \end{small} To define a ring structure on A, you must provide an addition, a multiplication, an opposite function and two unities 0 and 1. You must then prove all theorems that make (A,Aplus,Amult,Aone,Azero,Aeq) a ring structure, and pack them with the \verb|Build_Ring_Theory| constructor. Finally to register a ring the syntax is: \comindex{Add Legacy Ring} \begin{quotation} \texttt{Add Legacy Ring} \textit{A Aplus Amult Aone Azero Ainv Aeq T} \texttt{[} \textit{c1 \dots cn} \texttt{].} \end{quotation} \noindent where \textit{A} is a term of type \texttt{Set}, \textit{Aplus} is a term of type \texttt{A->A->A}, \textit{Amult} is a term of type \texttt{A->A->A}, \textit{Aone} is a term of type \texttt{A}, \textit{Azero} is a term of type \texttt{A}, \textit{Ainv} is a term of type \texttt{A->A}, \textit{Aeq} is a term of type \texttt{A->bool}, \textit{T} is a term of type \texttt{(Ring\_Theory }\textit{A Aplus Amult Aone Azero Ainv Aeq}\texttt{)}. The arguments \textit{c1 \dots cn}, are the names of constructors which define closed terms: a subterm will be considered as a constant if it is either one of the terms \textit{c1 \dots cn} or the application of one of these terms to closed terms. For \texttt{nat}, the given constructors are \texttt{S} and \texttt{O}, and the closed terms are \texttt{O}, \texttt{(S O)}, \texttt{(S (S O))}, \ldots \begin{Variants} \item \texttt{Add Legacy Semi Ring} \textit{A Aplus Amult Aone Azero Aeq T} \texttt{[} \textit{c1 \dots\ cn} \texttt{].}\comindex{Add Legacy Semi Ring} There are two differences with the \texttt{Add Ring} command: there is no inverse function and the term $T$ must be of type \texttt{(Semi\_Ring\_Theory }\textit{A Aplus Amult Aone Azero Aeq}\texttt{)}. \item \texttt{Add Legacy Abstract Ring} \textit{A Aplus Amult Aone Azero Ainv Aeq T}\texttt{.}\comindex{Add Legacy Abstract Ring} This command should be used for when the operations of rings are not computable; for example the real numbers of \texttt{theories/REALS/}. Here $0+1$ is not beta-reduced to $1$ but you still may want to \textit{rewrite} it to $1$ using the ring axioms. The argument \texttt{Aeq} is not used; a good choice for that function is \verb+[x:A]false+. \item \texttt{Add Legacy Abstract Semi Ring} \textit{A Aplus Amult Aone Azero Aeq T}\texttt{.}\comindex{Add Legacy Abstract Semi Ring} \end{Variants} \begin{ErrMsgs} \item \errindex{Not a valid (semi)ring theory}. That happens when the typing condition does not hold. \end{ErrMsgs} Currently, the hypothesis is made than no more than one ring structure may be declared for a given type in \texttt{Set} or \texttt{Type}. This allows automatic detection of the theory used to achieve the normalization. On popular demand, we can change that and allow several ring structures on the same set. The table of ring theories is compatible with the \Coq\ sectioning mechanism. If you declare a ring inside a section, the declaration will be thrown away when closing the section. And when you load a compiled file, all the \texttt{Add Ring} commands of this file that are not inside a section will be loaded. The typical example of ring is \texttt{Z}, and the typical example of semi-ring is \texttt{nat}. Another ring structure is defined on the booleans. \Warning Only the ring of booleans is loaded by default with the \texttt{Ring} module. To load the ring structure for \texttt{nat}, load the module \texttt{ArithRing}, and for \texttt{Z}, load the module \texttt{ZArithRing}. \subsection{\tt legacy field \tacindex{legacy field}} This tactic written by David~Delahaye and Micaela~Mayero solves equalities using commutative field theory. Denominators have to be non equal to zero and, as this is not decidable in general, this tactic may generate side conditions requiring some expressions to be non equal to zero. This tactic must be loaded by {\tt Require Import LegacyField}. Field theories are declared (as for {\tt legacy ring}) with the {\tt Add Legacy Field} command. \subsection{\tt Add Legacy Field \comindex{Add Legacy Field}} This vernacular command adds a commutative field theory to the database for the tactic {\tt field}. You must provide this theory as follows: \begin{flushleft} {\tt Add Legacy Field {\it A} {\it Aplus} {\it Amult} {\it Aone} {\it Azero} {\it Aopp} {\it Aeq} {\it Ainv} {\it Rth} {\it Tinvl}} \end{flushleft} where {\tt {\it A}} is a term of type {\tt Type}, {\tt {\it Aplus}} is a term of type {\tt A->A->A}, {\tt {\it Amult}} is a term of type {\tt A->A->A}, {\tt {\it Aone}} is a term of type {\tt A}, {\tt {\it Azero}} is a term of type {\tt A}, {\tt {\it Aopp}} is a term of type {\tt A->A}, {\tt {\it Aeq}} is a term of type {\tt A->bool}, {\tt {\it Ainv}} is a term of type {\tt A->A}, {\tt {\it Rth}} is a term of type {\tt (Ring\_Theory {\it A Aplus Amult Aone Azero Ainv Aeq})}, and {\tt {\it Tinvl}} is a term of type {\tt forall n:{\it A}, {\~{}}(n={\it Azero})->({\it Amult} ({\it Ainv} n) n)={\it Aone}}. To build a ring theory, refer to Chapter~\ref{ring} for more details. This command adds also an entry in the ring theory table if this theory is not already declared. So, it is useless to keep, for a given type, the {\tt Add Ring} command if you declare a theory with {\tt Add Field}, except if you plan to use specific features of {\tt ring} (see Chapter~\ref{ring}). However, the module {\tt ring} is not loaded by {\tt Add Field} and you have to make a {\tt Require Import Ring} if you want to call the {\tt ring} tactic. \begin{Variants} \item {\tt Add Legacy Field {\it A} {\it Aplus} {\it Amult} {\it Aone} {\it Azero} {\it Aopp} {\it Aeq} {\it Ainv} {\it Rth} {\it Tinvl}}\\ {\tt \phantom{Add Field }with minus:={\it Aminus}} Adds also the term {\it Aminus} which must be a constant expressed by means of {\it Aopp}. \item {\tt Add Legacy Field {\it A} {\it Aplus} {\it Amult} {\it Aone} {\it Azero} {\it Aopp} {\it Aeq} {\it Ainv} {\it Rth} {\it Tinvl}}\\ {\tt \phantom{Add Legacy Field }with div:={\it Adiv}} Adds also the term {\it Adiv} which must be a constant expressed by means of {\it Ainv}. \end{Variants} \SeeAlso \cite{DelMay01} for more details regarding the implementation of {\tt legacy field}. \asection{History of \texttt{ring}} First Samuel Boutin designed the tactic \texttt{ACDSimpl}. This tactic did lot of rewriting. But the proofs terms generated by rewriting were too big for \Coq's type-checker. Let us see why: \begin{coq_eval} Require Import ZArith. Open Scope Z_scope. \end{coq_eval} \begin{coq_example} Goal forall x y z:Z, x + 3 + y + y * z = x + 3 + y + z * y. \end{coq_example} \begin{coq_example*} intros; rewrite (Z.mul_comm y z); reflexivity. Save toto. \end{coq_example*} \begin{coq_example} Print toto. \end{coq_example} At each step of rewriting, the whole context is duplicated in the proof term. Then, a tactic that does hundreds of rewriting generates huge proof terms. Since \texttt{ACDSimpl} was too slow, Samuel Boutin rewrote it using reflection (see his article in TACS'97 \cite{Bou97}). Later, the stuff was rewritten by Patrick Loiseleur: the new tactic does not any more require \texttt{ACDSimpl} to compile and it makes use of $\beta\delta\iota$-reduction not only to replace the rewriting steps, but also to achieve the interleaving of computation and reasoning (see \ref{DiscussReflection}). He also wrote a few ML code for the \texttt{Add Ring} command, that allow to register new rings dynamically. Proofs terms generated by \texttt{ring} are quite small, they are linear in the number of $\oplus$ and $\otimes$ operations in the normalized terms. Type-checking those terms requires some time because it makes a large use of the conversion rule, but memory requirements are much smaller. \asection{Discussion} \label{DiscussReflection} Efficiency is not the only motivation to use reflection here. \texttt{ring} also deals with constants, it rewrites for example the expression $34 + 2*x -x + 12$ to the expected result $x + 46$. For the tactic \texttt{ACDSimpl}, the only constants were 0 and 1. So the expression $34 + 2*(x - 1) + 12$ is interpreted as $V_0 \oplus V_1 \otimes (V_2 \ominus 1) \oplus V_3$, with the variables mapping $\{V_0 \mt 34; V_1 \mt 2; V_2 \mt x; V_3 \mt 12 \}$. Then it is rewritten to $34 - x + 2*x + 12$, very far from the expected result. Here rewriting is not sufficient: you have to do some kind of reduction (some kind of \textit{computation}) to achieve the normalization. The tactic \texttt{ring} is not only faster than a classical one: using reflection, we get for free integration of computation and reasoning that would be very complex to implement in the classic fashion. Is it the ultimate way to write tactics? The answer is: yes and no. The \texttt{ring} tactic uses intensively the conversion rule of \CIC, that is replaces proof by computation the most as it is possible. It can be useful in all situations where a classical tactic generates huge proof terms. Symbolic Processing and Tautologies are in that case. But there are also tactics like \texttt{auto} or \texttt{linear} that do many complex computations, using side-effects and backtracking, and generate a small proof term. Clearly, it would be significantly less efficient to replace them by tactics using reflection. Another idea suggested by Benjamin Werner: reflection could be used to couple an external tool (a rewriting program or a model checker) with \Coq. We define (in \Coq) a type of terms, a type of \emph{traces}, and prove a correction theorem that states that \emph{replaying traces} is safe w.r.t some interpretation. Then we let the external tool do every computation (using side-effects, backtracking, exception, or others features that are not available in pure lambda calculus) to produce the trace: now we can check in Coq{} that the trace has the expected semantic by applying the correction lemma. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/RefMan-add.tex0000640000175000001440000000502411366307247016276 0ustar notinusers\chapter[List of additional documentation]{List of additional documentation\label{Addoc}} \section[Tutorials]{Tutorials\label{Tutorial}} A companion volume to this reference manual, the \Coq\ Tutorial, is aimed at gently introducing new users to developing proofs in \Coq\ without assuming prior knowledge of type theory. In a second step, the user can read also the tutorial on recursive types (document {\tt RecTutorial.ps}). \section[The \Coq\ standard library]{The \Coq\ standard library\label{Addoc-library}} A brief description of the \Coq\ standard library is given in the additional document {\tt Library.dvi}. \section[Installation and un-installation procedures]{Installation and un-installation procedures\label{Addoc-install}} A \verb!INSTALL! file in the distribution explains how to install \Coq. \section[{\tt Extraction} of programs]{{\tt Extraction} of programs\label{Addoc-extract}} {\tt Extraction} is a package offering some special facilities to extract ML program files. It is described in the separate document {\tt Extraction.dvi} \index{Extraction of programs} \section[{\tt Program}]{A tool for {\tt Program}-ing\label{Addoc-program}} {\tt Program} is a package offering some special facilities to extract ML program files. It is described in the separate document {\tt Program.dvi} \index{Program-ing} \section[Proof printing in {\tt Natural} language]{Proof printing in {\tt Natural} language\label{Addoc-natural}} {\tt Natural} is a tool to print proofs in natural language. It is described in the separate document {\tt Natural.dvi}. \index{Natural@{\tt Print Natural}} \index{Printing in natural language} \section[The {\tt Omega} decision tactic]{The {\tt Omega} decision tactic\label{Addoc-omega}} {\bf Omega} is a tactic to automatically solve arithmetical goals in Presburger arithmetic (i.e. arithmetic without multiplication). It is described in the separate document {\tt Omega.dvi}. \index{Omega@{\tt Omega}} \section[Simplification on rings]{Simplification on rings\label{Addoc-polynom}} A documentation of the package {\tt polynom} (simplification on rings) can be found in the document {\tt Polynom.dvi} \index{Polynom@{\tt Polynom}} \index{Simplification on rings} %\section[Anomalies]{Anomalies\label{Addoc-anomalies}} %The separate document {\tt Anomalies.*} gives a list of known %anomalies and bugs of the system. Before communicating us an %anomalous behavior, please check first whether it has been already %reported in this document. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/coqide-queries.png0000640000175000001440000006526410406335323017305 0ustar notinusersPNG  IHDRT_sBIT|d IDATxyU7oU6L2Ifd_! K! Y( ^W{A+"EEMII@'dLf&K-TuWWWo3ONsjsꔠi"""""" vo^ze5!DDDDDDf] lb@EDDDDDd*"""ͭY*Q %%e2flٶ?Nȴ}δ}o;kZu[<OaF6YJ2]9>rBs\N}fy2g=`"St̾[SH;$ݜnZ ҳP}К]ș Ŀ?VLT}1fyδȄ/LO%Ec>F _m5ۙc+u{B 3wkLޟT2:FYe)_i2J| [+V>hK8_>m]<+1+KȬᖎh,Q(MlWhؤgt5҅U?Y`|lLC\z߬,O{6R"0iP?D>a IFc;+D L1z<;+Ӌ>FeͭY,xnNg>2/DeIVQ]]wҘkL -u>8V޿37[fb +;F3=9Lw:Q&_d3ʲ oh)UzYp~KDnNRn*/vJYf<;1I/wA?#NC"5d^VhѮ'+um]m'a" *~A%U|'SXi*bZ ) h}.SLƝLkjn]E޳Vzx)Tqa fdRw~L) L9ucO:+J埊.VO z>g\*+chWu[p}`=m fl4Y=f:_y9}=5;hTfuB߳^4V;Dm:n~kQqlvҁJ8ynlh?8O:2&/|ćEY} F:Pqr<8~)xQ*qM*f#""""" lbY"""""#h zehzep?"""""̭YI)M lb@EDDDDDd*""""""\Pi3Z AS ! cǔA=ߏ Tn(χ&""".Ohἤ7vp3:c y#b#qo7~X,?ۑ:#Ąq 4nMRw yMB@Q9k1/X)sU 4%cHE]|s#J ӫàLeU@iNcڳ7]^'iƗ|yCZ{s'BV!:7l.>yCGG~qeAOO7E]+oŠ˗:Rid߰F~k->?֋qe $Gml^|u,^/NkfsCD2y6+ƺe.v\0"""""cN<_Qh#2=buK֛>xPi4 nC."tw-;ik-3NEeĄ˽pم8cy nECm-iCGˍ4mF5xhlνxB;+#L-7\\3=m~@ **&q:J0ˮXPPՍ);!n IE \n,Y~ yp""""J˭ĮnmjwDO_7>6ݛ?#uXt Ox+Zy5j1gL}H.ɑc}?A Xqѵ}l߅f5`x&C8r`;oygо?GޣPON @4 @X!.ψ '؍4\= WLa|hQ (8=Ў%?4 } N[c)IE9 ?W%&M(˕^*""""R4|K~aϛ]BtAɀ8^Tܽٴ_ׯ(83kw^ ^}rm7x~?N,Ï@n/>R=ng??#N-o.DJx.{[Ԇy>`0 ;VOW^[nz2UPw?/ބSCNh'FA8R5O hΟX/ A 4#P (@D n !If:iS`EA^8x^($^c!x ۋ<0"""""=Ӧ4@Р il;O[cV (((Ǽ3g7OO=޾~kDI_4Ѣ 3v o+qh^ꆐ[E(XӋpq}Uk_ۅ>6{nǚGæOv IxlEU +AۺMjM8W|ӗQA$4N9E7A,ƸcG.PGVrTU;1$nG+iv^!ac@ gj:/@cI!ʓe(`*}n:[4MCO_>ݿMp%= OYHXys<~h^En6{aCXm{UU{pՉiUO-ݘS(⼅<^G>rw}]!tuqH 'Ubj*؍~rxװuցaCD6aqwV&# ʊ{߀R46GV|}Ŷ)BB_ d -Ap9wQV5zj)-Ge7A/ d f^R\|^~ ܐڸcn򣧱 q TTrvJ""""9L{AAaA>ϗoÿ{/Xv%(/L!{B՛1vl)^#3 08"+Pd!Y*' !x +Coody kW~ͻ{1k,tv u I"zzev<> @ qϼ**{pӠtuqo߃k:L+U8c|w b91IEɸ3PPm;qӊpE***䙳Qߵ ԅ\Zէ41{Py=ȝXӗazȭmUYQRZl jJ%\R6'̟?YPGǁef!7'ϟ"""""SQ[Ȣ(񠨰_Z<ήoKME<&#'&=w`/zҌz"~ieWcr$#o"GBqJЅ!?r mz*h^"TCSs1oDw6Egec׊rxA̩  =]h;> ӧME^g/^ƕuW\??^y%.G4I7j2rssfbU?!2P!tl5Q4U`r?l}<4 ?NSλ,A u(tI.xԟ\}x Ǎ';mO@a`ڜ9h1;#"~u6 Hn>~ ͻ׃zi(kov=! ̓Qr( LXbR>śǯ>]+_Rq=B%:lLۅ[ }~P~h(NTGүOEtK=c*?ABKk $ """'=UEX/v&=ƨ&ry_=6~ΖN ǻ?a7ա pgLttcfyUBe.,_~!6ȻkG(w(ttUUOA"bEw2,Yr6_;NYPi-X+V\ (Ł)=:ɪKqQ!VNXY_x6&3.;C_֠yr0gByz 杶US'; `Y(.*Ÿ]fhNLO5$h޼.QJU5A4˅1%W64MCGgkDII1Ǝ-qF^Dq`“ʊA@IqJR^6ejY"""""#hqxQ];DDDDDD6a;DDDDDD61""""""QVsڲtW"  "TCsf@EYki؀0skcצBtWFaUnQy:ADY^t(朶l؏xCܢrlaJ+Edהi]""""8)MFNTADDNu>*QdYzuĽiSKB͈(AhZze.?uЋ9""6>ע$Ju@A:n)&YB!@Pe K.$I"r>hHM2.a*EDm{݋? -BSTAIqQ2JSk[?< n:TV Θze# OvXYDٹ7S(KؿԴ(! ޓִ1 NZD4:$X((3 0[m߾3DYa9TUA KrATȂ UT!J;n8#GTfAFdhhev,x|T@Q(>ݭP*AY^FD[>v%]d_xQ z{bըBAN! |)!H ** œoj2&M`h!oۙ^YIOk5fyYsQݢ-!uP IDATV/+iחo>vjUn Q=7;2 M(r]d~JW m~>_zzFlBk"9+f@ }^:H m-ӌ>/@Ve( @ay>Ewwp"lPm,}1G|mgu2lQMcެQNe*AEnnnなB 3 2 )֙-# h@lb }0~ Ґ?%!oL.:>{u-= Be QPSr\3,Ȁ$mcZ>83ڗ}ގL "־GKk>kC4B2zӇ(0poUP4HYAYCh bBO {d*'Tݿ ;E=Hv2)vFNDh{I+bl*t?([Y dYFsC3߅Dh:K B5z 1_Oc{)JVL[JPPP;!=A (@UUh@EE%(, %`@_"U' AL KTTIz&U8Mf\_Y9F3Յ2[x~e.aצԹEضa;:Nw'L熬PdUF'J&6 ]Gp5ŌS㮔"<+ 5_{gE<ϡUQ^Vzj'+7M{1|dfh;&:q 7c G2|??˗-oT7H.94oز^܏]֦F49m~}8C9!ic"q ӳcP> Ex|(栤p JsǠ߂Pk7^V0Dol2\,LvFˢO3)ˉI=Mpف(`E;.  /@TGDDDD ǗW>¤1j" Jku`>zEMW_ٳ%DΙS‡>RHEn՗qy#no.Q،:47CB N1LDDD0C>!S@[`rD9z {ڊC! _qƕ"JT^n.nJ:ji 8<@B>Eg^e8D rz|2!؊+ʊ= ((k@0(7.U( b̞wo^V1zyPM00ɺ9B\.\/o"""""KbNNDDDDD8;ΩKTDDDDDD61"""""ʄ$2&)cʴ Q\:صi)JI#MDDg]lQ"BDDDь)d?LDDݗ* V)Oa7O4yFDDDDDdKVtRzPtWh5u.Hdt;$`t[U:_`k:}MNDttv?g|΄p%""""ԉȠ`Mۃ7#:DILJϱ!E_`FX)JYVxO1 t RL'Q<"Y^f=TVEd8F}e}Ф=Rg'Y)+G_(""oXK+e~dOH?h=1w@ hrXA*z⩳UN1""""dh#V$3 pN1tN>VxYg6lh}pΉ䌀D4@ȰhS5~VeMyEY9FO:G+ö:3ai+3aav`3!,T*atF볉uNb%`ݟeT'"LomEmoNY :'7/}z;Fx|ȼy:Fۍ9Kaa>|&8!!fCLD"Y^vl7dmUXUvYDD̨1kM72gL%R+Zg-[$Ft fuXZ묗:yKgB2E &0Z O8$Zp2+K:[m_ce;ED<4dR(P>eBpgH=z#':q:;}='Zg{9XIkuY:;C d;yXMs2iȘQ${ɶ` 4ؕiuδXD3%82m:Q1D?N Er/ZVnv˶w2G|Y'ܯb%]>$TDD4eRCy)tR6D ^?CL=7rifxcN2k۩O'ꛌ:>P7pJޱl$|1zY7}|-7eVM95뙈Po8fBoD>8u )yghHd\wlǀF$}C?F~dGc#$2dNSYH;Dv0"""5ՙYvy bHׯ>dޯI2%ҋX%V h)~btȄS}|+uNŵ1"rL&4&F5ˣ?eȔQ=2nPLW4XHTWRG l0  VJw6zRs?z"k֤=L޿5mk>\G!"""J>޼.:*h&6#8ɩƽr"tt:XπToo`#XqxGӺ}˜tՈ?#Sh8Vzv9??o}W|=zJw-/Xc'֖K`ŕ9.Ӎ "x?ֽc+ݑۧRCz bGHF_Ndxz̢ b%Xu5j-KpCp7>X-uNQ^S+DDP=SC֭ÖEں}7V^5]/Yk<~)Ӌ<~O~pqڍ޽nvD<[N3 ҽ}X"k}ch4b5E+`&Z>ݨkYE;?NXQ;ys`WիmM5ypI*pojrsT}>ƍ; ~-gȉ\(0J~+~V0Kc'dhsNCTK]0d}ה{p{3J" ae_w~Pp6nrgQ6nҋۆl7B\vU_>--xoλo~C˜9k6LXk͞3+ƍ/GkK3^zyЕo$V~_Xs<\ܼ<<zhaZT|_$I8t ^x44_""d  o?z^4GYy-KK=TFCCaSɼnLogL;`6lW`!?1ڨfދsۖSᆛWٿ<Ç`jxJ<Spڣ1k<ܺ JtFè6pt۾g܏)[(2j6u2;sÿ@ GII[/}O=8vNjۿt~}%""J`0 >{j-`,]➔¬ʽ6`)6|NɬоC^/X@nvX|A<݁p/*. X,;oEcC|>/7;oŒe'\d~s-yq~Xvʉ|}['?vo>sՃ.^z)~s-ky܈.^=i8[;oE((mGo%/Alg6ٵ"""JPD՜۽탸ק  {iÁTdѶO[PE2kR otDX {9DKx-}VNOP}oh?_]+ ڝ.Ԩ|MӐP/L+* ɫK_>b y!ij2C)Sׇ}tC?x ?sekNfy[~Wga^ˬ}fTNqفPt?R)VQ<ۛR֏)mb$܃C7hVfU>}фDq0ֱgJcw9qN3eͯ=5U5yW]'^x7˯ ǏGG{k̺[!E%"<`]CP ˁ-%E=4×7KUըjb Z3#tH ym!{&L|BAo|)S*姂I)"+f=>VDKg7Dω+o9J9u>6L~!ϥ~0~o Kk,p2ߨ֖FTOO+&\#r%x}9Pdy0J!u0"x' {Ԣ_u!Ic 'O6m'6 /eep+gM(Ub Njg{V8+;ec!~etvv~|իW'[J̈́!Pw5g]cU*"- VL: j\ζZQU=uGONe:a:Dno9UӇ߮UQ_wx0MqX[x&ֿqZ'}JǎU~.[)hi/4DD"{zyuV7 b q% Cx9]1^ƀjpN;ngkqa՘8 `7>'UXooa>558Kqn6AQdx}W^2زapۆc0{(*YsaΏE@47Wcx "_EBfۮAYp$IDg{3r 0kn jJ4MÔhmn$I(,i3*Jy1BScEƘ 4RQFpbDғKO5Z. hȄ@R߫鑁ΜR2,TO<`4%I0}D -8'9)>;3f7? GmYLr͍G1q=Ta9; `u0z`:%؇CP=c>|9yC?6tx߶UUۅC1}qoJ^wblT͘Qi*""S s.#=>,ѲtP[roAV=Tͻax/ns!A_4`<2 =ɐeA~C;N !`/|#{p`a[[~,/ya@EDDfwL>UCEaZW/?7 .8sכw,dPLQǺwzTa@E^o6VeLY:e݁ݑۧ*"ygܖj_ !UIZP%C}@DDD4*/<TDI{tWhT[<壅ĔFDDDDD7rPĀ&TDDDDDD61"""""" =}1|>[h;[n烢(WV},; yyyEѲ(/x K(# #?/7reYFkk;=[w*'g UUő#G>fTOŌiU5PDDDDƀh8Z[Uя~aiv_vnǟ;Vx|>2eY`Oh2::P]]pNlA1Ÿj|ajkM8؀o>wgPX% *Q"//yyy004z*ywUHwyMͭxWq%8Sa玽6ld7:;* pKP]]SNύ;xKw"""dP=`ADI&"D<4 "@Q[o/~[߸3J%̙瞻/*]AhF466cLY oBqQ5 A1P^uN""""G9Cu >lm˴[6 F `0v466,Yz\Ⓒ&Ů]QV5ȡ Ϙb  ~?P% ñcՃGiQp@bf4* ʬYU:pִ>GU`J @JSOTMq@}@qQ&WNYgj:&_69 'UP^ʘP>B{{3A?\=}m{ŀ2^!O1 (L :2>#h kZ̭Yn<9CBUUhjDPi0l+_ 7p=:::Ј;W^õ8fa\C840l8Ǝ-GI! AEooZZǎ5P}'"""JG,} >N,kYYYˣ''6Flcwb Z`hY,Orr"+w` TEr0@hPUKBAArrrx w 2TUx ;qy^|9`$QBߏDSS{،>E""""'[ֳֿNDiؘ>2x'( |:>FuLE띊0ӛvִ1 М'qlVbǎxM=8tp/jkxc[qQ4oASs֭>C͢I9DDDDNC~X=!VzC̖ R9,0e'd'NϔY~JNN;*TE wCi EQ ! Q,U> IDATQTXwY s#7]xW#Gt :fBL: 3gN%Ipd"""l[+avݓEJ<9-rx]2C,3 uҴ!CrI1FϋhnjBI(-q#? N{0~v r%%"""J~M4qcPe]V37(Ke"JQU( tP=T L^ x]$A$LXޞnTL@G{<^J KIc</ed-Hͨgg=+D7lb 'hR8C*,APUe0 OZNQH_W|9^xQT<`(8e}}CPPQ*+b3YQ'{ܯx')T>f_fpPkZ70Mo,`r)Y RxT$ă~UEB=Moo~~z4M?@kk; Q |޸@DDDl)G#WeLߏ^)ux/:=TBl\xY3{:c[=APHi:W_q)NɓPX< I (3ʀ*^lCr}ۅiS0zJzx0{kte#v鷳}0:vJ}809vo^Ө']"""}&hRPp@ˍ }ވ~}8tVȺG([o+{r|"˲ݜӖbnrNLDDDiF܃}$F 1:SvcDDD4Zd=Tɜ,9-cz"{dA#"""""eL@OoU=RfE*SDKd]a߉QJ*+h#ngHlEN*O}*}`d(E;>3HY^G<"pnkghSmhfi3>hh40{o*|P1p{HvmZ;_&,""""2* 4A>>]Lc@-7U c#_f0mGDDDDe? ~"L+1 zeB"""l*ihL b s*aDDDD$(݌\fkEDDD䌬rJ!AU1f&/3sOcdw%*ݿ萌__ZHouSKp``/6@|CNt(vJd| ~ ?[N 94$O*2pv8ȖukdLj)/H3 KfqC( 07e"gnrMk\. ˔F:85FQ^Jql8FJg#[3{:%"N)L8ڴvXLUd;rUr!Mp蹊>/ .-/Y4Æa"E62(SͭY>,2Zm.;QFcƬ(SĚ#mǛvÿ7 օ8t9j˟/7+FY딈({N)L n?D4mX`Nۻ^;`{}WJ pT.@ HXYF&ryЀ$˸2^/RTB$HopRK.3 xٙݽ̟M5*X.\i8)X7S]A@58ǐ^2Jc4<J}0~`ͽ? tဇ>ۉ64PYB8Z\Qh@[y u‘>=_͐3XFnjc`Rˡ炦,~ sjgatyzSb(uww=}?)zh=t@GV,m(Mq`d[{%4 jhՀ}@,@+͡鹠).p?`KT@ mcP[tPQ^|uoyeU~S?7,.1j[*[XeJ,C5 R '?oͺy!nŶ75Oƒ.j4}޼^kss|wwr$ADoXTmyFnM{gK@+aJ=Z~upVـ*006(xp_cTpse \yǞXaTRz-Jw^ùeߝ4 ,Pza^5C 幍JzDn)X4V,{45!WJEnh\IZ݇SWSբl]!U% 7R TS9 ZS))Xҍi)sE>n% $=T?%W[[/7%= `E S1 ܊{ 2_#5ɽT ҡ}ko+UsZP b.\ɐUR%yMUsXySm[!Du.-WzDŽ9o]z6E]w? א:vChX 9˝ߚ)rgʵk0EͰӚtK%N[52 9Tp@&*8~5K#,O@Pɐ?XA쁱ױ+|mh @{ـU8a~Su?F4P=Iu]w~㒼[=kk)%ݓ#`Kـo wF%Z55 /(E9|o /R`! =T~5?& n|hOܹtaީc čcLR\C3k->o1 w5Y6}.ا5msS0ʕTw/W߯6}^>0ɫ{`su?9}`Z/\bhEyg,J@;=ToFuMZy: ?-˿["cgsJ`&8ϩJ @[Z}2);G&'H\- [=jl~INn̔`j\9e^J<]p~]j\ʒudҀcMn+Y/uakN7W.-k} Q8-F]7pATږ.m.K,T8쐿ԷcVHeTkVн{djP5WKX8ԺG:%eWn0HsSҎ3#:c_+uc+U) _Ӫ:]sgzPO@[;6r]>8z&cY1֥7إ Ot ZQ;T?|V9Оt:8 6r!>VG}+-ʵ/&jp} %u==TpRg];in lUZ{ԺHbAJ(9fE ZԶ)bVsG9gcl`axWu)]*؈:tT8 @%sba5_Z3P WR[8ܪ0 `A O#Z1N(̧{YUDyNN,u*X>9rԔsز狥K.XoޏgTpBS =wJ1'|]OޙX/Az>4-l>yܿLEi>]*90=i0{k159פ\"O)CqIO-pOm/MF|zj0Ǝ۪^eWr,=kPh_T<6_'l엤)9^M>-nU=}w KZ9Tk.^BcsU.[X5Ze^1(O7LVLzKIͩ :֚,ϔGO,L,Jr}GЯzIt(J ږ †{jnQ** LƎ*%c*)sxUs8a`t*`K=' z)݃ϾOp˲qp?|uV7^|~=w~>˭IyfVn%5wQ2 z?뺮O/_Yu˿ٟ^xo?zުs8\QyP@pF*& _K ɦP8y[XȔH M ¥_*\IPN1+Lӯ܆ 'Uߐݺ?ܣ4}G[r|er C\IS/Th@zƆمX5֣Uv|J37):Rz_0;{7JGa0ѧg-Xک yox[zòl=dso偮K{3P}?@?ʷ{멈p)u9Ky}9[c=6V oS0Ě̩nЈ^Lu&5` ˰ULu]9[ಷ@׵Y2۪<Քwx\V}o`рjJcwn>a\OcѪb.}Ӓǚ'0l )sa^s뵕'9J뾷k0nPO8T#6XS׵ǭ k95-ʳ=(uP 6L6^\`JUVe+V 0bo@\=.P=EdtۯJ ZU-PR]vUu8CTO>z70tG^oe+-O_:-{UZ_ d0`:x˶u9ojZ10m S3s꽒֚Z`OCO|pM O9.V&ܮ =|?u(sT GҰ+L[&!HKC\w쐿@$bcih_ >0*5 Wl +y ]Ո GO'y*pWi^*XTVsrM,]_"7t$U=u\_~e e4Si,6)6)7)Pɲ,O#8+(M鵊 PVog D}p{a,Ն}+ 0Y*_VV󻻢 g(2P{~~w5 {bWZU0v{'xJ7Ux 9^|hreG@/%~EW;Lڲq뚅 ?Jn˧YN>QS-P\ܜ*8ԃc~6lL .uRE.R1#HE*P բ!;ǚp\4ս~;Y-j1_ܶ¶uc2 ؅Я-LP G FѰܚ=׹-!7· {jp UZh0UE)sxʯ4Q>ޔG;TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT>غݛ׷mÒ]ko*TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT>r".g_l] 7_o] pwwuX  TЃgz4;ٯ7(1oa^i*h_O`(@%@%@%s]wGlS;s? Y/}uϟ}?z=X9TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT>m|Z gPP'ʇ-\&T|pdUC|ЂzQ s3lCj,?*>(MXzO|pNEU풶|p^w698->^|d@_YÉTϟ}v9罀ʤaƨ N@uwwU9Qlᄎm]#`_J*JX?EUi~_l]`E$_$҃ޮw_mY<|qr(`-|ɧ] b>S[;C;M@4 w_%rf8gd9q 5>5H?પ ұKkƎYk8ܰLayƂ)*-˔k1LT6*sy^*tXHd>߱2mױ|+pM-s<=}S9 NW7g⸿i%f/IO`),%)ckcłc;oK>r)0s0݃Ͼ?ꔜᗇ>_Rv<;',\a^[֫D,~*9|L3 7(WkHiyv Ɔib,^p~{`Z4`s, 44sǶe~AK~iHퟚ+;uLS0o]wjy^xKs=`-s{;*ҖL̥9Whs=拥?535~rpрT/7k8Xs3E0{\[oJ uKm EA/W}s\M,(EN(6To/Ul<5KX;ϡJ)٧gITIJL=KkuM>/SNsx9GT0gwF{ yg8P_2\0%A`.2Cr1s8q?&1Ă)k jQ5e>[M_|\=uZ3gLꦴj^PysX;z^le^/_~_J~QW/_lPJη|Y JcC<$JcC<$JcC<$XoEn~> JcC<$JcC<$JcC<$XoEn~> !<7W$J_#D'J_#D'J_%$U!!%N~> !<7VpJ^&bjJ^&bjJ^(CC!!%N~> !<7V^J\-KFJ\-KFJ\/+t!!%N~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'XO[@OJ_#D'L=QBu!.Y~> !<7VpJ^&bjXN^_FJ^&bjL !<7V^J\-KFXLeH4J\-KFL:[J2!.Y~> !<;H;!!)oIrW(O#rrDrI!!(!h!!(m,!!&)2!!'FXrW(!ir;ba+!!'@V!!)oIrW(R$!!)K=rrBjc rW%N#iRS%&!.Y~> !<;H2!!)o@rW(NorrDr@!!(!_!!(m#!!&))!!'FOrW(!`r;ba"!!'@M!!)o@rW(Qp!!)K4rrBjZ rW%MoiQVCe!.Y~> !<;Gu!!)o.rW(N]rrDr.!!(!M!!(lf!!&(l!!'F=rW(!Nr;b`e!!'@;!!)o.rW(Q^!!)K"rrBjH rW%M]iO],8!.Y~> !<;lGq>gHE!!)iG!!)'1q>gBC!!)oI!!)oI!!)?9rW)uLrW)67!!)97!!)cE!!(a(q>fU-q>fm5 !!)cE!!(a(q>fL*!!)T@!!([&!!)uK!!'IYrVuuM!9)K/!;Y1G!9D`1!<1RK!<(IK!:JD !<;l>q>gH!!)'(q>gB:!!)o@!!)o@!!)?0rW)uCrW)6.!!)9.!!)cfU$q>fm, !!)cfL!!!)T7!!(Zr!!)uB!!'IPrVuuD!9)0&!;Xk>!9DE(!<17B!<(.B!:J)3!7]9m !<:=C!;"G8!.iDRblO".J,~> !<;l,q>gH*!!)i,!!)&kq>gB(!!)o.!!)o.!!)>srW)u1rW)5q!!)8q!!)c*!!(`bq>fTgq>flo !!)c*!!(`bq>fKd!!)T%!!(Z`!!)u0!!'I>rVuu2!9(Ni!;X5,!9Cck!<0V0!<'M0!:IH!!7\X[ !<9\1!;!f&!.hc@])d-VJ,~> !<;iF!!)uK!!)WA!!)$0!!)uK!!)lH!!)ZB!!)<8rrE&L!!(Bs!!(?r"p!6*!7o^$jOOM8f%'iP ec>'f!!(?r"p!6*!7o^$iRRu/o%!d@g"$-'qpkiLec5^Lf)GaJf)>[Jf)GaMf)GaKf)>[Gf)>[I f),O8ec5^Lf)Ga&ec5^1ec5^Jec5^6ec5^!ec5^Kec5^@ec5]$f&>0\TE'K~> !<;i=!!)uB!!)W8!!)$'!!)uB!!)l?!!)Z9!!)c2I_@ c27S/bl@bCc2Rdrbl@b(bl@bAbl@b-bl@ambl@bBbl@b7bl@`pc/HnJP5p+~> !<;i+!!)u0!!)W&!!)#j!!)u0!!)l-!!)Z'!!);rrrE&1!!(BX!!(?W"ou9I!5#iCjLYTr]=S'T ])_30!!(?W"ou9I!5#iCiO]'io"+l%ft.4aqmuq1])Vj1]Dhm/]D_g/]Dhm2]Dhm0]D_g,]D_g. ]DMZr])Vj1]Dhl`])Vik])Vj/])Vip])Vi[])Vj0])Vj%])Vh^]A^@&GQ<6~> !<;iF!!)uK!!*#LrW)cF!!)lHrW)<9!!)uK!!*#LrVuuM!<(LJ!<(LG!:A>@!7h/$ec>["r;ciJ rW)uLrW)rKrW)iHrW!#N!!*#Lr;ciJqZ-QFrW)iHr;ciJrr<&MrW)?:&-1;4!7o^$f%'g$f%'g$ f%0g#s47/Lp=9EJf%'iPec>^#r;c`GrW!#N!7q#Ir7:iIqUYWG!S.8Mf),O9ecl-*ec=:P!<(LJ !<1RL!"%3V!7h,Mec5^$rW)fG!!)oIr;ciJqZ-NErW)lIquH3:!!)oI!s$p'!<(IK!<(IK!<1OL !<(IP!7h/$ec>X!!!)oI!!*#L!!*#L!!)uK!!)H[I f)>[Fec5^Hf)>[:ecl-*ec=:P!<1RK!<(LK!!(UL!;tFI!!1XNrRUoIrmq&Krmq&Ks47)Jma_@< rRLrKr7:iIqUPWHrRUuK!7q,LJ_'/ !<;i=!!)uB!!*#CrW)c=!!)l?rW)<0!!)uB!!*#CrVuuD!<(1A!<(1>!:A#7!6kMgblI^er;ciA rW)uCrW)rBrW)i?rW!#E!!*#Cr;ciAqZ-Q=rW)i?r;ciArr<&DrW)?1&-0u"!6ragc-64gc-64g c-?4fs3:NCp<rW!#E!6tB@r6>3@qT]!>!R1WDc27S0bm"0mblH#5!<(1A !<17C!"$mM!6kKDbl@agrW)f>!!)o@r;ciAqZ-N3@qTT!?rQY?B!6tKCJ^*N*!L!M6~> !<;i+!!)u0!!*#1rW)c+!!)l-rW);s!!)u0!!*#1rVuu2!<'P/!<'P,!:@B%!4r6C])_fAr;ci/ rW)u1rW)r0rW)i-rW!#3!!*#1r;ci/qZ-Q+rW)i-r;ci/rr<&2rW)>t&-0>S!5#iC]=S%C]=S%C ]=\%Bs1A71p:CM/]=S'T])_iBr;c`,rW!#3!5&+.r4Dq.qRc_,!P8@2]DMZs]*88I])]IT!<'P/ !<0V1!"$7;!4r42])ViCrW)f,!!)o.r;ci/qZ-N*rW)l.quH2t!!)o.!s#sF!<'M0!<'M0!<0S1 !<'M5!4r6C])_c@!!)o.!!*#1!!*#1!!)u0!!)H!!!)r/!!*#1r;ci/rr !<;iF!s$p'!;b7H!;Y1G!;k=I!<1OL!:A>>!7o^$rRLrKrmq)Lqpk`Iqpk`IlIH+=ec=:P!<1OL !<(IK!<1OL!<(IK!;k=I!;tCJ!<1RL!<1OL!<(IK!<(IK!;G%E!;tCJ!<(IK!<1RL!<1OL!:/29 !;k=Q!7h/$ec=:P!<(IK!:n\@!;tCJ!<(IK!<(IK!<1RL!<1OL!;G%E!;k=I!<1RL!<:UM!<(IK !:/29!;k=I!<1OL!<1RL!!q-U!7h/$ec5^Lec5^Iec5^Jec5^Kec5^Kec5^Gec5^Lec5^Lec5^K ec5^=ec5^IecPp'ec>["!!*#L!!)rJ!!*#LrrDiF!!)rJ!!)rJ!s$p'!9r&7!;Y1G!<(IK!<1RL !!q-U!7h/$ec5^Lec5^Iec5^Gec5^Iec5^Lec5^ !<;i=!s$Tj!;aq?!;Xk>!;k"@!<14C!:A#5!6ragrQPbl@bCbl@bCbl@bB bl@b4bl@b@bl[sjblI^e!!*#C!!)rA!!*#CrrDi=!!)rA!!)rA!s$Tj!9q`.!;Xk>!<(.B!<17C !!pgL!6kMgbl@bCbl@b@bl@b>bl@b@bl@bCbl@b3bm"0mblH#5!;t(A!;t+A!<14C!<14C!<17C !<14C!<(.B!<14C!;t(D!6ragrQP6ArlkECJ^*Q+!L!M6~> !<;i+!s#sF!;a;-!;X5,!;jA.!<0S1!:@B#!5#iCrOW%0rk&11qmuh.qmuh.lFR3"])]IT!<0S1 !<'M0!<0S1!<'M0!;jA.!;sG/!<0V1!<0S1!<'M0!<'M0!;F)*!;sG/!<'M0!<0V1!<0S1!:.5s !;jA6!4r6C])]IT!<'M0!:m`%!;sG/!<'M0!<'M0!<0V1!<0S1!;F)*!;jA.!<0V1!<9Y2!<'M0 !:.5s!;jA.!<0S1!<0V1!!p1:!4r6C])Vj1])Vj.])Vj/])Vj0])Vj0])Vj,])Vj1])Vj1])Vj0 ])Vj"])Vj.])r&F])_fA!!*#1!!)r/!!*#1rrDi+!!)r/!!)r/!s#sF!9q)q!;X5,!<'M0!<0V1 !!p1:!4r6C])Vj1])Vj.])Vj,])Vj.])Vj1])Vj!]*88I])]IT!;sG/!;sJ/!<0S1!<0S1!<0V1 !<0S1!<'M0!<0S1!;sG2!5#iCrOW%0m^iH!rOW%0rOW%0rjr.1qmuh.r4Dt/rjr.1J\19\!I+Tp~> !<;iFr;c]F!!)iG!!)rJ!!)rJ!!)H["!!)?9!!)oIrrDrI!!)uK!!)T@ !!)cE!!*#L!!)rJ!!*#L!!)cE!!)rJ!!)rJ!s$p'!9Vi4!;tCJ!;tCV!7o^$f%'iPec=:P!;tCJ !;tCJ!;G%E!<(IK!;b7H!;tCM!7o^$lIGq8qpkiLf%'j"ec5^Lf(oCGec5^Eec5^Jf(oCGf)5U9 ec5^Hec5^IedMQ0ec=:P!7o^$f%'j!ec5^Jec5^Gec5^Jec5^Jec5^=ecl-*!7h/$!;tCJ!;tCJ !<(IN!7o^$r71rMf%'iuedMQ0ec=:P!7o^$f%'icf)#IHec5^Jec5^Jec5^Jec5^Jec5]$f'V#h TE'K~> !<;i=r;c]=!!)i>!!)rA!!)rA!!)H3r;ciA!!)rA!!)o@!!)o@!!)bl@bCbl@bB bl@b@bl@bBbl@bAbl@b>bl@bBbl@bbl@bc2@Y0 bl@b?bl@b@bmXTsblH#5!6ragc-67dbl@bAbl@b>bl@bAbl@bAbl@b4bm"0m!6kMg!;t(A!;t(A !<(.E!6ragr65 !<;i+r;c]+!!)i,!!)r/!!)r/!!)H!r;ci/!!)r/!!)o.!!)o.!!);r"TZ0H!4r7,])Vj1])Vj0 ])Vj.])Vj0])Vj/])Vj,])Vj0])Vj*])Vj0])Vj.])r&F])_fA!!)>s!!)o.rrDr.!!)u0!!)T% !!)c*!!*#1!!)r/!!*#1!!)c*!!)r/!!)r/!s#sF!9Uln!;sG/!;sG;!5#iC]=S'T])]IT!;sG/ !;sG/!;F)*!<'M0!;a;-!;sG2!5#iClFR#rqmuq1]=S(A])Vj1]D;O,])Vj*])Vj/]D;O,]DV`s ])Vj-])Vj.]*n\O])]IT!5#iC]=S(@])Vj/])Vj,])Vj/])Vj/])Vj"]*88I!4r6C!;sG/!;sG/ !<'M3!5#iCr4<%2]=S(?]*n\O])]IT!5#iC]=S(-]DDU-])Vj/])Vj/])Vj/])Vj/])Vh^]C!32 GQ<6~> !<;iF!s$p'!;b7H!;Y1G!;tFE!:JD?!7o^$rmh&Lr71iJqpk`Iqpk`IlIH+=f%'iP!<(LH!<(IN !7o^$qUPWHrRLrKr71iJrRUlHrRLrKpXT6k !!)oIquH`I!!)fF!!)cE!!)rJ!!)fFr;c-6!!)rJq>gQH#lrQ-!7o^$f%'j!ec5^Jec5^If),OH ec5^Hf(oCGf)5U:ec5^IecPp'ec>["!!*#L!!)fF!!)cE!!)rJ!!)ZB!!)HX!!!)rJ!!)iG!!)rJq>g!8"p!3Sec=8$r71iJr71iJrRM&Nf%'j!ecPp'ec>Tu #lrQ-!7o^$f%'j#f)5U9ec5^Kec5^Lf(oCEec5^Jec5^Jec5]$f'V#hTE'K~> !<;i=!s$Tj!;aq?!;Xk>!;t+bl[sjblI:Y !!)o@quH`@!!)f=!!)cgQ?#lr5p!6ragc-67dbl@bAbl@b@c27S? bl@b?c2%G>c2@Y1bl@b@bl[sjblI^e!!*#C!!)f=!!)c!!)rAq>g!/"oumJblGugr653Ar653ArQPEEc-67dbl[sjblIXc #lr5p!6ragc-67fc2@Y0bl@bBbl@bCc2%G !<;i+!s#sF!;a;-!;X5,!;sJ*!:IH$!5#iCrjr.1r4;q/qmuh.qmuh.lFR3"]=S'T!<'P-!<'M3 !5#iCqRZ_-rOW%0r4;q/rO_t-rOW%0pU^D*rOW%0qmuq1]=S(A])Vis])Vj.])Vj,])r&F])_B5 !!)o.quH`.!!)f+!!)c*!!)r/!!)f+r;c,p!!)r/q>gQ-#lqTL!5#iC]=S(@])Vj/])Vj.]DM[- ])Vj-]D;O,]DV`t])Vj.])r&F])_fA!!*#1!!)f+!!)c*!!)r/!!)Z'!!)H!!!)l-!!)o.$imoO !5#iC]=S'T])_c@!!)r/!!)i,!!)r/q>fur"ou78])]GCr4;q/r4;q/rOW.3]=S(@])r&F])_`? #lqTL!5#iC]=S(B]DV`s])Vj0])Vj1]D;O*])Vj/])Vj/])Vh^]C!32GQ<6~> !<;iF!!)cE!!)iG!!)rJ!!)66!!)uK!W^dOr71iJqpk`Iqpk`IlIGq8rmq)Lrmh&LrRLrKrRM&N f%'itec5^Kec5^Jec5^Lec5^Kec5^Kec5^Eec5^Kec5^IecPp'ec>["!!)?9!!)oI!!)iG!s$p' !:eV?!;tCJ!<(IK!<1OL!;P+F!;G%E!;tCJ!;+hB!:/29!;tCJ!;Y1P!7o^$f%'iPec>X!!!)rJ !!)rJ!!)uK!!)uK!!)lH!!)ZB!!)H !<;i=!!)c!!)rA!!)6-!!)uB!W^IFr653Aqoo*@qoo*@lHK;/rltHCrlkECrQP!s$Tj !:e;6!;t(A!<(.B!<14C!;Oe=!;F_!;t(A!9hZ-!<14C!;k"@ !;t(A!<(.E!6ragr65bl@b3bl@bBbl@bCbl@b;bl@bAbl@bAbl@`p c0`aVP5p+~> !<;i+!!)c*!!)i,!!)r/!!)5p!!)u0!W]h4r4;q/qmuh.qmuh.lFR#rrk&11rjr.1rOW%0rOW.3 ]=S(>])Vj0])Vj/])Vj1])Vj0])Vj0])Vj*])Vj0])Vj.])r&F])_fA!!)>s!!)o.!!)i,!s#sF !:dZ$!;sG/!<'M0!<0S1!;O/+!;F)*!;sG/!;*l'!:.5s!;sG/!;X55!5#iC]=S'T])_c@!!)r/ !!)r/!!)u0!!)u0!!)l-!!)Z'!!)H!!!)u0!!*#1!!*#1rrE#0!!)u0!!*#1!!)c*!!)o.!!)u0 !s#sF!<'M0!:IH!!;a;-!;jA:!5#iC]=S'T])]IT!;sG/!;sG/!;X5,!;sG/!9h#p!<0S1!;jA. !;sG/!<'M3!5#iCr4<%2]=S(?])Vj1]*&,G!4r7,])Vj!])Vj0])Vj1])Vj)])Vj/])Vj/])Vh^ ]C!32GQ<6~> !<;iF!!)cE!!)iG!!)oI!!)uK!!)H!;tCJ!<(IK!<(IK!<(IK!<1OL!<1OL!;k=I!;k=I!<(IN!7o^$rRLrKldc%9 qpk`IrRMDXf%'iPec=:P!7o^$!<1OL!;k=I!;tCJ!<(IK!<(IK!<1OL!<(IK!<(IN!7o^$rRLrK mFM49r7:iI!nI>OrRUoIrmpuIr7:cGr7:fHrmpuIm+).:rRM&Nf%'j"ec5^Led;E.ec=:P!7o^$ !<1OL!;k=I!;Y1G!;k=I!<(IK!:JD !<;i=!!)c!!)o@!!)uB!!)H3!!)uB!s$Tj!<17C!;k"@!;k"@!<14C!:J)3!<17C!<14C !<(.B!;t+A!;Xk>!;t(A!<17C!<14C!<(.B!<(.B!<14C!;k"@!;t(A!<(.B!<14C!<(.B!:.l0 !;k"@!;Oh=!:\55!;t(A!<(.B!<(.B!<(.B!<14C!<14C!;k"@!;k"@!<(.E!6ragrQP3@!mL]FrQY9@rlt?@r6>->r6>0?rlt?@m*,M1rQPEEc-67ebl@bCbmFHqblH#5!6rag !<14C!;k"@!;Xk>!;k"@!<(.B!:J)3!<14C!;k"@!;t(A!<(.B!<14C!<17C!<14C!<(.B!<(1B !!:CF!<14C!<(.B!:J)3!<(.B!<(.B!<(.B!;t(A!;t+A!<14C!.iD]blO".J,~> !<;i+!!)c*!!)i,!!)o.!!)u0!!)H!!!)u0!s#sF!<0V1!;jA.!;jA.!<0S1!:IH!!<0V1!<0S1 !<'M0!;sJ/!;X5,!;sG/!<0V1!<0S1!<'M0!<'M0!<0S1!;jA.!;sG/!<'M0!<0S1!<'M0!:.5s !;jA.!;O2+!:[T#!;sG/!<'M0!<'M0!<'M0!<0S1!<0S1!;jA.!;jA.!<'M3!5#iCrOW%0lam,s qmuh.rOWL=]=S'T])]IT!5#iC!<0S1!;jA.!;sG/!<'M0!<'M0!<0S1!<'M0!<'M3!5#iCrOW%0 mCW;sr4Dq.!kSF4rO`".rk&(.r4Dk,r4Dn-rk&(.m(35trOW.3]=S(A])Vj1]*\PM])]IT!5#iC !<0S1!;jA.!;X5,!;jA.!<'M0!:IH!!<0S1!;jA.!;sG/!<'M0!<0S1!<0V1!<0S1!<'M0!<'P0 !!9b4!<0S1!<'M0!:IH!!<'M0!<'M0!<'M0!;sG/!;sJ/!<0S1!.hcK])d-VJ,~> !<;lGr;c`GquH]HquHZGr;c9:q>gNGrW!#N!!*#LquHZGrW)B;rW)rK!!)uKr;ZlL!<(LK!;k@F !<(LJ!!1XNrRUoI!S.8Lf)>[If),OHf)5UJf)>[Lf)>[;f),OIf),OEec5^@f),OHf)5RLec>[" r;ccHrW)lIquHZGr;ciJquH*7quHZGr;clKrW!5T!!(RM!7h/$rW)lIquH]Hr;ZlL!<(LJ!;k@G !<1RI!:8;:!<1OL!4UPX!<(LI!<1RK!!q-U!7h,Mec=;#!;tFG!<(LH!;tFH!:A>;!<1OL!<(LH !<:XL!<:XL!<1RK!!:^O!<1RJ!;tCJ!<1OL!<1RI!:JG;!<:XL!<(LI!<(LH!<1OM!7q,LJ_'/< !MBFC~> !<;l>r;c`>quH]?quHZ>r;c91q>gN>rW!#E!!*#CquHZ>rW)B2rW)rB!!)uBr;ZlC!<(1B!;k%= !<(1A!!1=ErQY9@!R1WCc2I_@c27S?c2@YAc2I_Cc2I_2c27S@c27Sr;ciAquH*.quHZ>r;clBrW!5K!!(7D!6kMgrW)l@quH]?r;ZlC!<(1A!;k%> !<17@!:7u1!<14C!4U5O!<(1@!<17B!!pgL!6kKDblH#f!;t+>!<(1?!;t+?!:A#2!<14C!<(1? !<:=C!<:=C!<17B!!:CF!<17A!;t(A!<14C!<17@!:J,2!<:=C!<(1@!<(1?!<14D!6tKCJ^*N* !L!M6~> !<;l,r;c`,quH]-quHZ,r;c8tq>gN,rW!#3!!*#1quHZ,rW)AurW)r0!!)u0r;Zl1!<'P0!;jD+ !<'P/!!0\3rO`".!P8@1]D_g.]DM[-]DVa/]D_g1]D_fu]DM[.]DM[*])Vj%]DM[-]DV^1])_fA r;cc-rW)l.quHZ,r;ci/quH)qquHZ,r;cl0rW!59!!'V2!4r6CrW)l.quH]-r;Zl1!<'P/!;jD, !<0V.!:7>t!<0S1!4TT=!<'P.!<0V0!!p1:!4r42])]JB!;sJ,!<'P-!;sJ-!:@Au!<0S1!<'P- !<9\1!<9\1!<0V0!!9b4!<0V/!;sG/!<0S1!<0V.!:IJu!<9\1!<'P.!<'P-!<0S2!5&41J\16[ !I+Tp~> !<7W$kgf_6Xk!IPLt2P+][d)`f)5T>ec5]$f&YB^!.i_aecDEDJ,~> !<7Vpkfj)-Xj$hGLs5o"]ZgHWc2@X5bl@`pc/d+L!.iDXblO".J,~> !<7V^kdpfpXh+Q5Lq !<;oHp]/mrp]/mrp]0:(!!'jdp]1! !<;o?p]/mip]/mip]09t!!'j[p]1!3!!&5-o`0Qfm`kM,m`b_3ccu1a]Zp3Om`b_3J^*B&!L!M6~> !<;o-p]/mWp]/mWp]09b!!'jIp]1!!!!&4po`0QTm^r5om^iH!cb&oO]Y!q=m^iH!J\1*W!I+Tp~> !<7W$mFM49Xk*FNMUq_+^Xi/[T[s&AJ_'#8r;_E"l.,m.!.Y~> !<7VpmEPS0Xj-eEMTu)"^WlNRT[!E8J^*B&r;_Dnl-06m!.Y~> !<7V^mCW;sXh4N3MS&fe^Us7@TY(.&J\1*Wr;_D\l+6t@!.Y~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !WU`%TRibeTRibeTRic?TE"tB~> !WUDqPC\pKPC\pKPC\q%P5kT5~> !WTc_G_(*jG_(*jG_(+DGQ7_o~> !<7TMJH16$JH16$JH2hQJ,~> !<7TMJH16$JH16$JH2hQJ,~> !<7TMJH16$JH16$JH2hQJ,~> JcC<$JcC<$JcC<$XoEn~> JcC<$JcC<$JcC<$XoEn~> JcC<$JcC<$JcC<$XoEn~> !<7W$J_#D'J_#D'J_%$U!!%N~> !<7VpJ^&bjJ^&bjJ^(CC!!%N~> !<7V^J\-KFJ\-KFJ\/+t!!%N~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !<9OZ!7e4&$.R51XKAV+\BNIied9mN] !<9OQ!6hRi$-U8mUS=KXYf"ZObmDV3[&TstVn:!nJ^&bjJ^&bjJ^*r6!L!M6~> !<9O?!4o;E$+dI;Q'.;qTsD8s]*Z'SVP'HIR] !<:X$!h[iilIH!,eGe#!ea1ie[IfR]W:/:;cMEY'^$aG\e^CjeCKFtR;dtZgd+.X:$e2i"eE#]C ]T]/6eUc;&eq)D'eq)EIecDEDJ,~> !<:Wp!h%*QlHK?qbPo`dbj !<:W^!fOLulFR(R])K;A]'R$/T(IL4POH*`[/,7PV=(r!]!n5H2B2W4">OVW[^tt?$bE1?g$S:j `fQ:e\q.JE]7ISF]7ITh])d-VJ,~> !<:[%"J]+BcIM73"rE3Ee^i:$mFD>X87VU*!)NM^!BEX !<:Zq"Irh=`m!bn"rE!6bg"AUmEG]J88A*9!*]:r!C9*Abm:a'D.QjK:aUh`bn&%=NE\qYAn5:N 5;#Q[a3=,!%Eu0#oChY:j5/COPJbUqc%4,jc%4,jc2#TbP5p+~> !<:Z_"HZ\t[(>S8"r2Hm]">PbmCNF/87MO(!)(73?dC]+;K]J3bG20J+e!% LP"8[CYq@%D&C[q"j^UnaG\1LU"FA]7ISF]7ISF]D9&>GQ<6~> !<;iF!6"_/!2Ji1#*jk3131[jma_Wne.u/@cbGD;:'#pVn^[b\LV3Sn!/UPk!E2JVed/uZb-["H DDR.=ee-HZNe51-iSE28bJh2lFb\5mo@Er]$haAjn*B)lg"(bWd=Kl"eq)D'eq)EJecDEDJ,~> !<;i=!5//$!1`?!#*+MG4Di*^m`c!\b8=lCa0p^$9*'FFn]_,NN7%UK!N1hRM#h6Vn')-H8_V@` NJBcOnBDQha+HijD.?aI:.n)F3]MN-bk''g^;T^@p[@G0hqQe7VT=>5J^&bjJ^&bjq98r(!.Y~> !<;i+!3H#b!06?V#(hAn/nD?#m^rM1"VOLO[AJ9L6ih;$n[ej3LV!Gi!J>"_AH>Fen%/k&7EDff C1T&Cn@K:D[WX#P1aEqN&.T0Z$QX2o]( !<;iF$+;OLKOe%[.>-.:ed.mf^S>!dFjAP.ecE?6r[\HoV,_4iJs*4*I5%&Zf$qB^:#ei[AW-2?F.>dQF&^m"tc/e!Kj5/D6aMGHeEKAQ?edp !<;i=$*H.ZQZm.V21<[7bm9_]eAcSjEQ65nblG'J.h!?8@UqE(@Z5)*/s=ELbmnM-.4?Sn-m'9C )&O2%6gr8*[ReoeR<`j4KCI+tac8f#DHp=H !<;i+$(il0IpPfA-$$T`]*O=5^7eLVB=,8@])eZTr[/*fN_*J2)':XY(ak$Ko=GKA)'^=L,9dm6% hK6^$9dji#c9u&VJf7',^Agj&\!?>0/FCW&ePZc%LrpV+a^]j]+;KQnbW.Zp\+4Kmd&MBYI]:sJ \-KFJ\-KFqRZcQ!.Y~> !<;lG$IuIF[&S:R="/[KmFDWH@EZ-UBiQU*ajp"6'!agGb0ee[Yt/T\m.9PpW'6Blo[X#irQ>>c XY6!^rQ>2en(%cY7)5^D>#m&.n^\B'[YDQUjk7Gui7cDa\@],9Fg9Hhedp3Io(W%Vp$qG3gXXf# ][VM`J_#D'J_#D'qUP\?!.Y~> !<;l>$I#PnNgmEH!8B&V:]KPWGb_9DMq&unCQg"tlUe5oGNbL4G9IkH]$oZ[BWrPJcS VD"%HrPJWUn')-H8CbYIH#klrn]_`jY$>=L>?=X"<`;ac7RB9s9W?5.bn%q4o(W%Vp$qG3hV6b2 [*+$FJ^&bjJ^&bjqTT&)!.Y~> !<;l,$G)`gZ)MY<9I+PdmCN^m@*5pQ?qqt[Ye'G<&t;&!^W+?tR6;N[A8"q//KfSQoXb+4rNQL0 Qn!`lrNQ@1n%/k&6b]@7:J2pWn[fIGSjY1a'GM>r&J5Wl'+PEY2k%>N]+;Bdq"jpbq=jUSmcs.c Uq2,hJ\-KFJ\-KFqRZcQ!.Y~> !<;lG$IuFE[&8(N<[iRIma_f=@F;NmV/'O>-#UlUo[X\\6c-1?_T0U$:6+`>r;5T7 !<;l>$I#M;c,H,sF#SEem`c0.B'@apabG.V0P\JKoZ\&L8D;PHjm2-\?@p_iiRPYX9WcM.bm)@` Cn*#;aQq(V[RS]aRX&s4KCR2"`IC6j>?Z!8G>qLXTnZK14",3gp!!TYb4,*'p^$TbmGd!VJ?I(H c%4,jc%4,jc2,ZcP5p+~> !<;l,$G)]gZ(u;69-eGbm^imc?dQ3hULID#,%%UdoXbd+5dHoSWirLm5tlBEA5PiA31d_O]*>g. Ar7db\*LX3VEh\gDbj"NGk&BZZu7*E)&6#. !<;lG$IuFF[AS1O<[`LHn(%u6B[4/t\"mbF?]MQ:WiodI>]ud[kDs@m.5uG?S#q7rWF+cLoa%\o,+4hVQ6No?4Yeo]s`.XGpMmp!sZ!aR/Qk q"a^Xlep=AI^RXUeq)D'eq)D'f)!quTE'K~> !<;l>$I#M !<;l,$G)]hZD;D79-\Aan%0']B$Ilp[\I839g^78[(>qB':L\6FE`+]I=62\5@@Pj9JRT9[dq$9 \rg6mPX@!O\+%!@[u+X(=$3aaE)0+O5%7uI[.V$EUHpRU&K6f2h.ocsoX.@'$OJMYot(aAaS#<$ q>:'bnaPf.K:]W)]7ISF]7ISF]DB,?GQ<6~> !<;oH%+Wk44gh53>#m&$dFId<&="7u7;]ZEUhEg_)]B!&cdha?' !<;o?%*Znp7*`c?H#klhaNX2!&<@bs9RGFAaFe@m+sI;0a3=8%'<*dMJV/Z2LQ.*LIG!iDdAfWU _=GhWY$&!JY'!`(Nlpn:&B^Ik;h1=Y`gV0r>=(@]MT>D7';RSFt,; agC="nE'3/lf@-hheogEJ^&bjJ^&bjJ^+&9!L!M6~> !<;o-s1/Q,4L1f%:J2pN\%;+A&;(i_7;TQBU0pbI)&NTh[CZ(D':L7V7n?6F927nm0N2%P:-TS/ Yk#C4Sl;GiN`A4qK"1?_&@d`H5\u.UVf5F+4=_H]I^\p]':'^m'+kpTci2nNo\Y-c%LWUNNqL!p \$GSsp#u/Do'>W*lY`N0J\-KFJ\-KFJ\1cj!I+Tp~> !<;uJ&ClFb/1!HdW-;EG-lX]gA(C2%&AOg$I6k]VULmFH$+H8Oj`mQ'<><_cdg^ual`U%pu0B-Z)O'X V"N:KeZ@E_j5/kVfZ_joZuj?>eq)D'eq)D'f(mktTE'K~> !<;uA&BoSR.jII,`L;'n2B"%q?d/,d&@RpbGY-]=`IVbP;1j[o_TVYt'<3jNJ:W<)Kmt[b\`)i3 c`BcV]^sAU]0*^F\ZBso<@sT^n]_QeTfAmiaMsZoG]mXAMokV9';7!b9MeVfIG=c#lD&)f4ZP86 RJ"f7bbs.Yl/Lphh:1WO]l1Z:c%4,jc%4,jc2#TbP5p+~> !<;u/&@up1-QkOVV/T4',8V[S=1\"8&>k>9D*btDTO(>58pG`DZ+9S?':U=W7n6*@8O!n<78m8d 9gKe0XRj%3W]75fTSuoL4X>rsn[f:AP:h0^XJg)6:JNq^J%,'^'9X1^%hKGQ<6~> !<;uJ&CeZr@%OF#WcqWL;bB,#^sho+&Co+MYs5q/Tk..DDnF*6d+.j@'G?<^Q$)kkdOedU*>.s[qiG?S-A-'?[Jee>,fbK8&XdDb2GrqP6_[B?Bb 3hu>u%E,!sg;V[pc-k%^B!9U5eq)D'eq)D'f(mktTE'K~> !<;uA&Bha]Ek931`gV0sDduQk\B=Ef&Br24W^kai_gZ;IC9t[ma3=8%'<**a?=%)kE\1:ne`>`D _2pQo]CjDYbHY.Ua;Kpdmrp5tI5 !<;u/&A"r4=IZ7hVf5F,83\OJW4;H2&A#9SRm4TmT3Y,1@AL*3[^u1E':KId.4Hi;3"mW/:K:@k 8MUihX7a(7\t1goXe2,B<`;j\(lc,s]*u9E-$,iZE)01h)h5Ei]+^>E.Llt#'iEHZroSJI%1
      GQ<6~> !<;uJ&Co(A<`YfnWHVNL=#r:Kc.2@8%+VsR]X"/>;]g"te(+*A'>$Lk3B7I:(J:CgrVcQ_fUZNA anF9se#"^3G^l?UNa>[J1,9B+dI>]nZU&W,G?IBlOOE^N'!.Y~> !<;uA&Br/(;KBIk`gV0tFA=fu`6@br%*Z%Jf[.E^Bddi1bKTV''=0h]2`D7Z,s-atf\4m"Z#NX+ _=l+bbF^;)K9*ONXEJAS5X,gAaRIF\X%AAfRWr."MTPG6';R3[7S$'gchZkirp5hA3Ai6#SFk&8 bcI2`ccO\WgsL#"_p!\VJ^&bjJ^&bjq98r(!.Y~> !<;u/&A#6I92V.YW,PO.8h)EbZb#\=%(`5u]LuJ\-KFJ\-KFq7?ZP!.Y~> !<;rI%b/e==B(ZeG?S'.6`8)Vn(%iZ4h7bb@T"7e`74D0'@Y+=]=JG+5!,@rnFPhnYt/<5d.Z$% d@"W[><&OMNa>R(6"ahacLBBje>6#ZDbDpHd.,[#]5FO[^nS^Fp>+?mro^qkQ>9I-o@= !<;r@%a2i#=)GLeRX'!L7\IWAn')3I7b#YpHY=dD]Z]ok'?e8%Za(*)4"blnaiqT*MDebEa7dah aHC%C=?jL,XEJ5(5@\2R`UM+XbF_jmP@Q_0a77CfZVs@]76mUHk*qFmrl8G,1*Si`o?@ZdYEcr@ a2kemQGLdrc%4,jc%4,jc1oNaP5p+~> !<;r.%_B$F9i%%PE)0(U30LPXn%/q'4h.Y^=\B`EXLe#8'=kBGUn\8D2Adu992.uF1)X)c[e@@McNFi3EfU/[.([5\rT?pB0%De[dgsCUdQ44%2>'+hgTP#rjb-"$3`/Uo=GCATo$(g ]X+,)M6ORB]7ISF]7ISF]D/u=GQ<6~> !<;oH%+NS3?!NqD=ZSpFaj]\/$GA3ZLN6?f+E>?Lo@=T*e'QFd(l"i"6DqrH\th\lVodHZed/uR YF:GW;'(S[ecsTt6:W>`d.#U"cB3Rc\Z;sjf>>>Mj3G!(KMd?Fo%",r^@1@Fe?3;te:H2%eq)D' eq)EHecDEDJ,~> !<;o?%*QVn@!&uKG>:Vt_9;8k$FN%%UQ9.t.;c`@o?@rmb/_QA(P//g1k([dNHm6TTY\hBbm:aF `3cUhB.&>tbm)=[8RO--a7.=e`I^0@5sTM#P#k_A]T6#L.NHbCRohc%4,j c%4.6blO".J,~> !<;o-%(`g=;,s$&9e&,_Yds26$DfJ>KlBaT*b2:_o=G[I\@/fP'm5UB)BL_"0c^p!P-DOg]*P8t YF1;L8fW*A]*>d*3]J(-[d^mBZu[<(%1l[?FXS_TXEH3("pS\Fo",4=S%,npMe5ne\q.JE]7ISF ]7ITg])d-VJ,~> !<;lG$J!D+=B''&2lFdHmFDWX#T=Hk%h8hb`74D0rRDD,Vng?iM-_MT+]EE_aOB\1#eiX6RuUfP +,T:6"Oj:uOj`^L'@aC9Tsh>8_7[.^\\#2FP\4F-e+2&reA(MAjOUJF`79FiJ_#D'J_#D'pXTA< !.Y~> !<;l>$I$Gh=`'4&32"+2mEH!G$R$K,'+tUg]Z]okrQGbpTX_bJK2`^1(Ir>=^WQ)k#e!4E[B4Ub ."gg0"O!JhMokG4'?dFW7n5d9A5PTZ !<;l,$G*R59hf(M/X!?KmCN_%#T=Hk%1EGOXLe#8rONKLP,PO_GWZ3="u'PQYIO,7#c9qsRYt3= *I-k]"M'^=J%+mY'=s]$+;YJ-4 !<;iF#h@21:Gs`1b1#_.!n4lgr2]t0ajfb0%+ESPe^2:EXfJbEc-u15#ei[8RuUfP*f9@:!d,M. rZVCe'cHrj!?"W'ee-;nEi-.GYHG"-W2-#JCfcSBn^\#r^@(.:bcbWseUc;&eq)D'eq)EHecDED J,~> !<;i=#gC5m9h-b:_TV;j!mA$Pr1sJ!_9;8k%*HW6bf@E#VPL6'`67Yp#e!7G[B4Ub-\Lm4!c]2* rZ_Ig(EEAo!?+T%bn8$T4%Mq+4Zked3B&fK+YA>en]_B`[H$)e]qJiHb^n#ic%4,jc%4.6blO". J,~> !<;i+#eI@:73s4NZ+955!kG1tr0@DXYe'87%(NaV]!e]5Q^4,?ZaoS;#c9tuRZ(9>*-gqa!bWGq rZD7a',^Zd!>\&o]+MN$,9Ip9%1EUQ%1EUM"rhfqn[f+=S\;_,M/?4n\q.JE]7ISF]7ITg])d-V J,~> !<;fE#1Ur5=LHB&lIH!bcMYofcI;"."kD&Hbg+]*ed/uSWJl4,:*>D^ecKfP`r3UOQi)C8AOc7A &C?>QQDpdZV5'WFIUZN7cdhR:$.ZKIi6]?1B#iGQeq)D'eq)D'f([_rTE'K~> !<;f<#0Xup<3F-^lHK@P`r*aU`lmSj"jG*-_oBjebm:aG]r%39@k!'!blV[DfDX/!ZC(TW!I.!& bn.pI4[M=l3&``O0IIkoI)jJ$bmDUtgt' !<;f*#.h1>9qJZ%lFR),[/@62[(5D4"hM1MZF%.0]*P8tW/>jt803$E])lH'`;R=LP'[(P!F7kZ ]*,Qd-6@J\-KFJ\-KFJ\1Zg!I+Tp~> !<;cD"Ot`:o%"?%d$AllKT_LN I:Q,fNR-/Eed0g*Ohe/V7[),Keq)D'eq)D'f(RYqTE'K~> !<;c;"O"g"`67>gr643G!6jfS#dtjS6p*1W'nbu"%VnIHB4tpj;*I3.5!LPbo$%]ha,j4I.k`S+ -ls !<;c)"M1tDZb#>3r4:q#!4qO/#c8S32(^$s%s[QT%Uq\-='/O*4"2U,-n--!o",FD[Y$dO#RC_A #6Y5gH*t6Q]*Q!2C5d"+/V(3Z]7ISF]7ISF]Cri;GQ<6~> !<;`C!nGcMXk!KtqM>3`n(%KDoSERBn^\0"d^P$C5!1VV>D9aMYJ_#D'J_#D' J_'PG!MBFC~> !<;`:!mJj3Xj$jbqL\dRn'(j4oRd.5n]_Neafft\*#&r"6?]J^&bjJ^&bj J^*o5!L!M6~> !<;`(!kPqRXh+S?qKN"8n%/RhoQU@rn[f7A\"IIs%0HY>2/&De]'d08\Y37u5EW1]J\-KFJ\-KF J\1Wf!I+Tp~> !<7W$imnFccFKaFUSPBDe*GQge]"Q#cddTtJ_#D'J_#D'p!s/:!.Y~> !<7VpilqeQ`Nc#'R@UP"bNmCVbe9aWa39+ZJ^&bjJ^&bjp!!N$!.Y~> !<7V^ik#N.[%WS>MiXO;\a-j2\u_-l[CUq$J\-KFJ\-KFot(6L!.Y~> !<7W$J_#D'J_#D'J_%'V!MBFC~> !<7VpJ^&bjJ^&bjJ^(FD!L!M6~> !<7V^J\-KFJ\-KFJ\/.u!I+Tp~> !WU`%TRibeTRibeTRic?TE"tB~> !WUDqPC\pKPC\pKPC\q%P5kT5~> !WTc_G_(*jG_(*jG_(+DGQ7_o~> !<7TMJH16$JH16$JH2hQJ,~> !<7TMJH16$JH16$JH2hQJ,~> !<7TMJH16$JH16$JH2hQJ,~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> J_&6"Sc=8lj48e]JcC<$j8T+PrmlT~> J^)TeSc=8cj3 J\0=ASc=8Qj1BmBJcC<$j8T+(rk![~> J_&9#!<8V@!!%T$jOO>2JY7ReJY;%rrmlT~> J^)Wf!<8V7!!%SpjNR])JWkYKJWo,Xrlor~> J\0@B!<8V%!!%S^jLYElJTu`jJU$4"rk![~> rRQPXf&-Q.T%3q8!.i_^ecGfDJ_#D'J_&u7!WShlepm~> rQToKf%^9*T$7;"!.iDUblRj.J^&bjJ^*?%!WSA_c%#~> rO[X0f$aX!T">#J!.hcC])hqVJ\-KFJ\1'V!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&YB_s.FqoJ_#D'k10S5T`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-&#YJ^&bjk03r,PQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*0+,J\-KFk.:ZoGlG=*~> rmh)-J[3k4!<8YA!MBFDf&YB_s.FqoJ_#D'k10S5T`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-&#YJ^&bjk03r,PQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*0+,J\-KFk.:ZoGlG=*~> rmh)-J[3k4!<8YA!MBFDf&YB_s.KABJcC<$JcG*:rmh,MT`3Mm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-*H,JcC<$JcG*:rlkKDPQ&gW~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*4OTJcC<$JcG*:rjr42GlG=*~> rmh)-J[3k4!<<#K!Re@IT`M+:Zdo/M!.i_^ecGfDrmq&JrrD*ZrW(jUr;_EKJcE:\rmh,MT`3Mm~> rlkGuJZIA&!<<#B!QhG8RK9,#ZcrN7!.iDUblRj.rltEArrD*ZrW(jUr;_EKJcE:\rlkKDPQ&gW~> rjr0ZJXk;]!<<#0!OnWlN<,6LZb$6_!.hcC])hqVrk&./rrD*ZrW(jUr;_EKJcE:\rjr42GlG=*~> rmh)-pTadsrh0=eJ[4CC!<<#K#ddQ17!!EiEcM%r"B.ugMUSr`!MBFDf&YB_s.KABs8W#tirB#Y h>dKTJcC<$\c;Z[!WShlepm~> rlkGupT":ergEhXJZIn5!<<#B#cq':6!mu&>?h)."^+Ds=+n`'blO".J^*9#!WSA_c2[hB!94%Y !8[\T!.k0$s1/1/blRj.rlor~> rjr0ZpRD5Hrf$o=JXkhl!<<#0#b>.;6<[Pm=',B""]\-#??iue])d-VJ\1!T!WRED]Dqp0!94%Y !8[\T!.k0$s1/1/])hqVrk![~> rmh)-q6C:&QT,?^#oo;*J[4IE!<<#K&YWC!JE,\JhqQf5eBOG9 rlkGuq5XdmOZ rjr0Zq4%_PKeWlA#8i,RJXknn!<<#0&WLe\GDLm"J:`2oH#di'IT3YaOSl]kGQ<<,jLYHmGlG>R s8N)Ys8N)Ts8N(Ms+13\s8KV2s*4OTJ,~> rmh)-qQ^I%9HQN(A5+Zg2O]3&kct3grRM+F=Hn#e'ZOdRXK!;$6f!!*&u!<<)u!<<*!!!*&u!<3#t!!*&u!<3#s!<)rq!;lfr!.k0$s1/1/ecGfDrmlT~> rlkGuqPssm9dj(nP'BY@30\upkc4^`rQPqCA[?KGPa%JmOcG6MG`Gp/_7B"3blO".J^*9#!WSA_ c2I\B!;$6f!!*&u!<<)u!<<*!!!*&u!<3#t!!*&u!<3#s!<)rq!;lfr!.k0$s1/1/blRj.rlor~> rjr0ZqO@nP6lS*^ rmh)-qm$X+?:01,O*G5966JCT]!D>9i3N:]r3H8$i3E@_rRM(E=-AX>rWTl]6!MHmq>f62!!)8F r71nA!.i_^ecGfDrmq&JrrD]krrE&urrE*!rrE&urrE*!rW)uurr<3%!!*'!rW)rtrrE*!rrDoq rrDusrr@WMJcE:\rmh,MT`3Mm~> rlkGuql:-s?WET)ZD*Vf7;!!)9O r658+!.iDUblRj.rltEArrD]krrE&urrE*!rrE&urrE*!rW)uurr<3%!!*'!rW)rtrrE*!rrDoq rrDusrr@WMJcE:\rlkKDPQ&gW~> rjr0Zqj\(V<'GD^K5=RW2&J0%\t&d#i10`Gr1*]ci1'fIrOW/kDTS(:rWTm.Al,[mq>f62!!)8F r4;uS!.hcC])hqVrk&./rrD]krrE&urrE*!rrE&urrE*!rW)uurr<3%!!*'!rW)rtrrE*!rrDoq rrDusrr@WMJcE:\rjr42GlG=*~> rmh)-r3?g/;G`O`[\SXj<(]:o2jOKMtg!<%uZ!8XK!;- rlkGur2U=!;IZfQc,$NKL3mVn3fj3>!!)qtrW)u!rW(3D!!)_nqZ-2f!!)hq!!)2_!!)bo!<<#B ".Ef)M>>j2fV4U-=MX@m!<) rjr0Zr1"7Y84n`>Y*sZ=6olq8/qX"k!!)qerW)tgrW(35!!)__qZ-2W!!)hb!!)2P!!)b`!<<#0 "-%3?MYYs3fV53N:p5tB!<%uZ!8 rmh)-r3?g"+b-uFb--\7:e3i+&U+)X!!)o%!!)u'!!(6K!!)\t!!)u'!!)Vr!!)i#!!)2f!!)c! !<<#K"//eWL%tNM\:?SDacMSarK%!ZrK.!YrK.$Zs,d6\s,d6\"cr]aO8o:[OSo1YOS]%NOT#7[ OT#4[OSmZ1TE'PojOOA3T`3O@s8N)js82lrs8N)us8N*!s8N)us8N'#rr<&us8N*!s8N)ts8N*! s8)fos8N(Ms+13\s8LRMs.KABJ,~> rlkGur2U rjr0Zr1"7N*d4a-`27ua5WCJD$YTC-!!)nd!!)tf!!(65!!)\^!!)tf!!)V\!!)hb!!)2P!!)b` !<<#0"-%3Rs8N)js82lrs8N)us8N*!s8N)us8N'#rr<&us8N*!s8N)ts8N*! s8)fos8N(Ms+13\s8KV2s*4OTJ,~> rmh)-rN['63_5s_g!m<`@n]?V2^'c>ZEjB.rNZD'rNZS,ZEaH1ZMjk'ZMsn)ZMjk%ZMae&ZMsn. Z2am1!!)u'rW)l%rVuu)!;3Vt!<'2+!4&m1!<0;(!<0;$!<0;$!;s/%!<'5'!<9A)!<0;%!;Wo# s8CLOTiI#.k5+0IjK!ep>KMtg!0E9Br/^mYrf@*[rK%TkO8tB(!0E9B!0@0\O8tB(!<%uZ!<%uZ !:c-N!<%u\!0EXK!:Tsf!<<*!!<3#u!<<*!!<3#u!!<0#!<3#u!<<*! !<)rt!!<0#!<3#u!;uls!.k0$s1/1/ecGfDrmlT~> rlkGurMpR(3aTSQjPe:dQA0j1Bg=G:X/l-urMonurMp)%X/c4#X8W+uX8`/"X8W+sX8N%tX8`/' WrN.#!!)turW)ksrVuu"!;3Am!<&r$!3<.#!<0&!!<0%r!<0%r!;rns!<&tu!<9,"!<0%s!;WYq s8C1FRUN7Tk5+0IjK"M>=MX@m!:K7Tr9aObrpBadrU(6tmJu\C!:K7T!:BgemJu\C!<) rjr0ZrL=L`1-h2>e^1=C<&uif-Pe%_S=KKWrL rmh)-rN[!.%;M]Oa1n9r>tI@F2_PVtrNZD'qm$2%rNZD'rj)P(riuM(rj)P(riuM(riuM(rNZD' rj)P("L8"-Z2jm0!!*#(!!*#(!!*#(rrD_t!!)u'!!*#(!!)u'!!)r&!!)i#!!)l$!!*#(!!)u' "TYh.ZEaK/Z2an!Z2jq'ec_[FNK!B&mfp%G6XK!;- rlkGurMpKu&rXTRg!md8P(\4(C02@-rMonuql9\srMonuri?&!ri6#!ri?&!ri6#!ri6#!rMonu ri?&!"KMM&WrW."!!*#!!!*#!!!*#!rrD_m!!)tu!!*#!!!)tu!!)qt!!)hq!!)kr!!*#!!!)tu "TYS'X/c7!WrN.oWrW1ubljJFXFTg9mfp%[9N]*umem.amJm7bmJm7dmf*:amKN[ZmJu\C!;c*` !<) rjr0ZrL=FY$X]C2_75bL9K+XU-R&kBrL rmh)-rN[!.&9X;6UmZpO;FNi778"PVriuS*Z2jg.!!)u'!!*#(!!)u'!!*#(!!)u'!!)i#!!*#( #lq9:!4&m1ZEaK.Z2t$3!;s,&!;3Yq!<'2'!<'2'!;s,&!;Wo#!;j&%!;s,&!<0;(!;`u&!3uV" Z2jq'eeb#WL3nArH@'d[I!g6^J2(9MOH9I(O9#6@q>gMV!!)kW"osaH!0E9BrfI$XrK%!Zn;m_P OH9JAOSmZ1TE'PojOOA3T`3OBs8)fis8)fps8Duus8)crs8E#ts8E!"rr<&ts8;rrs8Duus8E#u s8)eIs+13^s8LRMs.KABJ,~> rlkGurMpKu(7r"N^q-V;Mh$+oF)r[^ri6)#WrW'u!!)tu!!*#!!!)tu!!*#!!!)tu!!)hq!!*#! #lq$,!3<.#X/c6uWr`:%!;rkt!;3Dj!<&qu!<&qu!;rkt!;WYq!;ies!;rkt!<0&!!;`_t!36+p WrW1ubljJEVi6<.JId3ZJV&E"QU4fimd:)CmK!4Rq>gN_!!)l`"p")Z!:K7TrpK[arU'XcnEpAY md:)Smek<1P5p0YjNR`*PQ&i,s8)fis8)fps8Duus8)crs8E#ts8E!"rr<&ts8;rrs8Duus8E#u s8)eIs+13^s8L7Ds-*H,J,~> rjr0ZrL=FY%V^ljRZD\t5r1,E2Eqt&rgX#iS,iJW!!)tf!!*"g!!)tf!!*"g!!)tf!!)hb!!*"g #lpKc!1]PZS=BTWS,r\\!;r>e!;2l[!<&Df!<&Df!;r>e!;W,b!;i8d!;r>e!gMV!!)kW"osaH!0E9BrfI$XrK%!Zn;m_P OH9JAOSl]kGQ<<,jLYHmGlG>Ts8)fis8)fps8Duus8)crs8E#ts8E!"rr<&ts8;rrs8Duus8E#u s8)eIs+13^s8KV2s*4OTJ,~> rmh)-rN[*1$t+aHKR%<783]:/@UU76ZEaK0Z2an'Z2an'Z2an(Z2an'Z2an(Z2an'Z2an'ZMX_% Z3UH9Z2h29!4&p,!!0A*r3?;&os+PtrNZD'riuM(rNZD'r3?;&q6Bu#qm-%uriuM(qQ^)$riuM( q6C##rRM[V;1Wq/@VKarDf';uDiQGSacMSaqN([WplGIUqN)!`OH9I(O8tB(!<%uZ!<%uZ!:Z'P !0E9BrfI'/!MBFDf&YB_s.KABci="FJcC<$T`>#B!WShlepm~> rlkGurMpU#&r!?gX.u,BK6h]dLOUY)X/c7"WrN.uWrN.uWrN/!WrN.uWrN/!WrN.uWrN.uX8Dts WsA^+WrT3$!3<0s!!0,#r2TetorA&mrMonuri6#!rMonur2Tetq5XJqqlBPnri6#!qPsSrri6#! q5XMqrQQ%F?_Nd@Fa\spK7SMsLT!@<_!^UjqX+=`q!J+^qX+Ximd:)CmJu\C!<)#'!WRED]79~> rmh)-rN[!0*^"@]CLC185sIqMH;u2Op9FYurNZD'riuM(rNZD'riuM(rNZD'riuM(rNZD'riuh1 ZEaJ9Z2h29!;`u$!;s,&!;3Vt!<'2'!<08(!<'2'!;s,&!;Wo#!;j&%!;Ni"!:@&ls8CL_Ti$;Q BVW/V_7$A):/I*+>KMtg!;V]V!<%uZ! rlkGurMpL#+BOnuQ]R&TH?X[kQYS%.p8\/nrMonuri6#!rMonuri6#!rMonuri6#!rMonuri6>* X/c6$WrT3$!;`_r!;rkt!;3Am!<&qu!<0#!!<&qu!;rkt!;WYq!;ies!;NSp!:?fes8C1VRU;\@ E21(b`42n/?#$K)=MX@m!;Z$_!<) rjr0ZrL=F[)Dbi6>u=$N0J,:cED@X+p7)*_rLe!;2i^!<&Df!e!;W,b!;i8d!;N&a!:?9Vs8BPDNGQ.q FJ?Fg`OMq+B7Rog:p5tB!;V]V!<%uZ! rmh)-r3?g!'iSLE<_Z"L94N9i/U-ln!!)u'!!*#(!!)u'!!*#(!!)u'!!*#(!!)u'!!*#("ots7 !4&m1riuM(rNZM*ZEaK0ZMsptZ2an'Z2an(Z2an(ZMsq&Z2an#Z2an$Z2an'Z2an(Z2amlZ2jq' ee4ZRHt/#mD-4*_]uWlAQ7Q7^OSo1UOSo1YOSf+XOT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlX!;_fW !<&#WecDEDJ_&o5!WShlf$jZs!.k0$s.B>jecGfDrmlT~> rlkGur2UjblRj.rlor~> rjr0Zr1"7M&k>hp7QiU`4BQT>.;/=D!!)tf!!*"g!!)tf!!*"g!!)tf!!*"g!!)tf!!*"g"ot0` !1]PZrgWrgrL[?6F(A50/=ZGC%$_E^r$OSo1UOSo1YOSf+XOT#4bO8o9B!!&+Br;Zk[!<&#Y!;hlX!;_fW !<&#W])d-VJ\1!T!WRED]@6fX!.k0$s.B>j])hqVrk![~> rmh)-r3?g+2^h?s9hIlNBRP%h16(q3r;ci&rW)u(rVur(rW)u(rW)r'r;Zo)!4)S("gS+.Z2an( ZMae%ZMjh*Z2an!ZMOY"ZMjh,Z2am1ZMX_$ZMX_"ZMae&ZMX^lZ2jq'ee+TOH";it?VdQ8bM>uh OtKt\\WHa?!MBFDf&YB_s.KABJcC<$JcG*:rmh,MT`3Mm~> rlkGur2Uth$,aP1!7 XZ'@t\aKC?!L!M7c/d+Ms-*H,JcC<$JcG*:rlkKDPQ&gW~> rjr0Zr1"7V0HiYK5W^nj>]agD/:@)[r;cherW)tgrVuqgrW)tgrW)qfr;Znh!1a#g"e5PmS,`Qg SG`HdSGiKiS,`Q`SGN rmh)-qm$X!5:&Ha:KV=XE^)UBJ[4OG!<<#K&YVC/>_Ca09VB\jk"JC'0O)Z0OSmZ1TE'PojOOA3 T`3Mns+13$s6TdbecGfDrmlT~> rlkGuql:-i5s&ImFaT(4Mc)cmJZJ%9!<<#B&Xl:NFbJCD8=[l^j]Q,l2-@Z)mek<1P5p0YjNR`* PQ&gXs+13$s6TdbblRj.rlor~> rjr0Zqj\(M2BFY;6VLj-C,n(qJXktp!<<#0&WK\_M1sGN6']IHiaI#E3DcoiOSl]kGQ<<,jLYHm GlG=+s+13$s6Tdb])hqVrk![~> rmh)-qQ^Hm3#45BAnOsk0oCOfkct3grRMS(DEC9oPE;91['Z;]3Gp?_agmN2ecDEDJ_&o5!WShl eq*jPs.98j!;ZZp!;lfr!9=+YecGfDrmlT~> rlkGuqPss`4![C4Jq7Po22$O]kc4^`rQPqlCIM02OGoU&ZEora9T(T:_7B"3blO".J^*9#!WSA_ c%5nGs.98j!;ZZp!;lfr!9=+YblRj.rlor~> rjr0ZqO@nE0bPp&?!^,O.sZ]9kaVYQrOWZH@nU*:MM@@fZ*BQa>+IHTYdokn])d-VJ\1!T!WRED ]7L!5s.98j!;ZZp!;lfr!9=+Y])hqVrk![~> rmh)-q6C9kFtsjX*[5ffJ[4IE!<;uJ"Nc!s>[7)/")2PYcEu.'TE'PojOOA3T`3Mns+13Js8N)s s8;rns8N)ts8;rqs8N)as8LRMs.KABJ,~> rlkGuq5Xd^EA\gg-7 rjr0Zq4%_BB.=T<)]iX>JXknn!<;u/"KuW>;,R-c"(kl2['[FGGQ<<,jLYHmGlG=+s+13Js8N)s s8;rns8N)ts8;rqs8N)as8KV2s*4OTJ,~> rmh)-pTadcre^];J[4CC!<;uJ!nG`CouR6[e$R[,TE'PojOOA3T`3Mns+13Ks8N)ps8N)qs8N)q s8N)rs8N)bs8LRMs.KABJ,~> rlkGupT":Wre1?1JZIn5!<;uA!mJd(p:paG`lka6!L!M7c/d+Ms-*H,JcC<$W;lktq>^HpqZ$Qq qZ$Qqqu?Zrli6t/!WSA_c%#~> rjr0ZpRD5;rcnKnJXkhl!<;u/!kPkHp9+P%[(3QU!I+Tq]B$R)s*4OTJcC<$W;lktq>^HpqZ$Qq qZ$Qqqu?Zrli6sr!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&YB_s.KABJcC<$W;lktq>^HpqZ$QqqZ$Qqqu?Zrli6t8!WShlepm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-*H,JcC<$W;lktq>^HpqZ$QqqZ$Qqqu?Zrli6t/!WSA_c%#~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*4OTJcC<$W;lktq>^HpqZ$QqqZ$Qqqu?Zrli6sr!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&YB_s.KABJcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R(9!WShlepm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-*H,JcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R(0!WSA_c%#~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*4OTJcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R's!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&YB_s.KABJcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R(9!WShlepm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-*H,JcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R(0!WSA_c%#~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*4OTJcC<$WW2tuq#C?oqu?Zrq>^HpqZ$Qqm/R's!WRED]79~> rmh)-J[3k4!<8YA!MBFDf&YB_s.KABJcC<$WW2tuq#C?or;Zcsq#C?oqZ$Qqm/R(9!WShlepm~> rlkGuJZIA&!<8Y8!L!M7c/d+Ms-*H,JcC<$WW2tuq#C?or;Zcsq#C?oqZ$Qqm/R(0!WSA_c%#~> rjr0ZJXk;]!<8Y&!I+Tq]B$R)s*4OTJcC<$WW2tuq#C?or;Zcsq#C?oqZ$Qqm/R's!WRED]79~> rmlZ#ec2,iJcG$8!!)rJ!WShleq*jPs/H%u!;QTo!;uls!;QTo!;c`q!:9abecGfDrmlT~> rlp#oec2,`JcG$8!!)rA!WSA_c%5nGs/H%u!;QTo!;uls!;QTo!;c`q!:9abblRj.rlor~> rk!a]ec2,NJcG$8!!)r/!WRED]7L!5s/H%u!;QTo!;uls!;QTo!;c`q!:9ab])hqVrk![~> rmh)LJ_#D'J_&N*!!)rJ!WShleq*jPs/H%u!;QTo!<)rt!;HNn!;c`q!:9abecGfDrmlT~> rlkHCJ^&bjJ^)lm!!)rA!WSA_c%5nGs/H%u!;QTo!<)rt!;HNn!;c`q!:9abblRj.rlor~> rjr11J\-KFJ\0UI!!)r/!WRED]7L!5s/H%u!;QTo!<)rt!;HNn!;c`q!:9ab])hqVrk![~> rmh,Meq*jPs+14*s8S_l!;tCKs.KABJcC<$W;lktqu?Nns8W*!q>^ rlkKDc%5nGs+14*s8S8_!;t(Bs-*H,JcC<$W;lktqu?Nns8W*!q>^ rjr42]7L!5s+14*s8R^ rmh/Nf)L;oJY7RegRnNHr71oKT`3Mns+13Ks8N)ks8N)es8N)bs8LRMs.KABJ,~> rlkNEc2W?YJWkYKgQMU;r659BPQ&gXs+13Ks8N)ks8N)es8N)bs8L7Ds-*H,J,~> rjr73]DmG,JTu`jgNW\ur4<"0GlG=+s+13Ks8N)ks8N)es8N)bs8KV2s*4OTJ,~> rmh2Of)MCqOFdF_s4mYSTE"uiecGfDrmlW#JH2_NrVultp&G$ln,NCflMpk7!WShlepm~> rlkQFc2Wu[mXbChs4mYSP5kU\blRj.rlouoJH2_NrVultp&G$ln,NCflMpk.!WSA_c%#~> rjr:4]Dm,.OFdF_s4mYSGQ7aA])hqVrk!^]JH2_NrVultp&G$ln,NCflMpjq!WRED]79~> rmh2Of)MDsOT,=#OSo1FOFdF_s4mYSTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2X!]mf*:,mem.OmXbChs4mYSP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm-0OT5@#OT#4FOFdF_s4mYSGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)ME>!9'%>!90+:!:#[G!:,_ts+14*s8S_l!;tCKs.KABJcC<$JcG*:rmh,MT`3Mm~> rlkQFc2X"(!9*AG!93GC!:'"P!:0'(s+14*s8S8_!;t(Bs-*H,JcC<$JcG*:rlkKDPQ&gW~> rjr:4]Dm-Ps5B(>s5K.:s6>^Gs6Gbts+14*s8R rmh;Rf)MD)!!*"[rrBirrrE%[rrDAHrrDAHJcC<$h#IDI!!)rJ!WShleq*jPs+14:s8LRMs.KAB J,~> rlkZIc2X!q!!*#drrBk&rrE&drrDBQrrDBQJcC<$h#ID rjrC7]Dm,;s8W([s8Tlrs8W([s8VDHs8VDHJcC<$h#ID!!!)r/!WRED]7L!5s+14:s8KV2s*4OT J,~> rmh;Rf)MD)!!*"[rrE%[r;ceXrVuq[rVuq[rW)t[rW)t[r;cbWrVuq[rW)qZr;cJOrrE%[rrE%[ r;ceXquHVUrrDqXqZ-APJcC<$h#IDI!!)rJ!WShlf)>XI!9F1Y!;ulq!.k1&s8E#)s8E"fs8LRM s.KABJ,~> rlkZIc2X!q!!*#drrE&dr;cfarVurdrVurdrW)udrW)udr;cc`rVurdrW)rcr;cKXrrE&drrE&d r;cfaquHW^rrDraqZ-BYJcC<$h#ID rjrC7]Dm,;s8W([s8W([rVuhXrr2t[rr2t[rr<"[rr<"[rVueWrr2t[rr;tZrVuMOs8W([s8W([ rVuhXr;ZYUs8VtXqu?DPJcC<$h#ID!!!)r/!WRED]D_d.!9F1Y!;ulq!.k1&s8E#)s8E"fs8KV2 s*4OTJ,~> rmh;Rf)MD)!!*"[rrE(\rrE(\rrE(\rrE(\rW)qZrrE%[rrE"ZrrDqXrW)kXrrE(\rrD_RrrE%[ rrE(\rrE(\rrDnWrrDtYrrDtYrrE%[rrDeTJcC<$h#IDI!!)rJ!WShlf)G^L!8mhV!;c`q!1 rlkZIc2X!q!!*#drrE)errE)errE)errE)erW)rcrrE&drrE#crrDrarW)larrE)errD`[rrE&d rrE)errE)errDo`rrDubrrDubrrE&drrDf]JcC<$h#ID rjrC7]Dm,;s8W([s8W+\s8W+\s8W+\s8W+\rr;tZs8W([s8W%Zs8VtXrr;nXs8W+\s8VbRs8W([ s8W+\s8W+\s8VqWs8W"Ys8W"Ys8W([s8VhTJcC<$h#ID!!!)r/!WRED]Dhj1!8mhV!;c`q!1 rmhSZf)MD)!!&+B!!&+B!!)tZrr<+^!!*"[rrE%[rrE%[rrE"ZrrDqXrrDtYrrE"ZrrDbSrrE(\ rrE(\rrE"ZrrDqXrrDtYrrDtYrrDVOJcC<$h#IDI!!)rJ!WShlf)G^L!8mhV!;c`q!1 rlkrQc2X!q!!)HT!!)HT!!)ucrr<,g!!*#drrE&drrE&drrE#crrDrarrDubrrE#crrDc\rrE)e rrE)errE#crrDrarrDubrrDubrrDWXJcC<$h#ID rjr[?]Dm,;s8S1Bs8S1Bs8W%Zs8N.^s8W([s8W([s8W([s8W%Zs8VtXs8W"Ys8W%Zs8VeSs8W+\ s8W+\s8W%Zs8VtXs8W"Ys8W"Ys8VYOJcC<$h#ID!!!)r/!WRED]Dhj1!8mhV!;c`q!1 rmh5Pf)MD)quH_Xq#CJX!!*"[rrE%[rrE%[rrE"ZrrDqXrrDtYq#L,MquH_Xq#LGVqZ-PUrrDqX quHGPJcC<$h#IDI!!)rJ!WShlec>aI!<)rr!<)rs!!*&u!<<)s!;lfr!;c`q!;- rlkTGc2X!qquH`aq#CKa!!*#drrE&drrE&drrE#crrDrarrDubq#L-VquH`aq#LH_qZ-Q^rrDra quHHYJcC<$h#ID rjr=5]Dm,;r;ZbXq>UMXs8W([s8W([s8W([s8W%Zs8VtXs8W"Yq>^/Mr;ZbXq>^JVqu?SUs8VtX r;ZJPJcC<$h#ID!!!)r/!WRED])_m.!<)rr!<)rs!!*&u!<<)s!;lfr!;c`q!;- rmhSZf)MD)!!&+B!!&+B!!)kWrrE%[rrE%[rrE%[rrE"ZrrDqXrrDtYrrDPMrrE(\rrE(\rrDnW rrE%[rrDtYrrDeTrrDeTJcC<$h#IDI!!)rJ!WShlf)G^L!;uls!<<*!!<)rs!;QTo!;uls!;c`q !;$6j!<<*!!;6Bl!<3#u!;?Em!<3#u!;6Bk!<<*!!<)rt!;ulr!<<*!!;lfr!<)rt!9sO`!<<*! !:g*g!;?Hm!<3#u!9sO_!;?Hm!<3#u!:p0h!;?Hm!<3#u!9sO_!;?Hm!<<*!!;-9k!<2uu!;6Bk !<<*!!:KmdecGfDrmlT~> rlkrQc2X!q!!)HT!!)HT!!)l`rrE&drrE&drrE&drrE#crrDrarrDubrrDQVrrE)errE)errDo` rrE&drrDubrrDf]rrDf]JcC<$h#ID rjr[?]Dm,;s8S1Bs8S1Bs8VqWs8W([s8W([s8W([s8W%Zs8VtXs8W"Ys8VSMs8W+\s8W+\s8VqW s8W([s8W"Ys8VhTs8VhTJcC<$h#ID!!!)r/!WRED]Dhj1!;uls!<<*!!<)rs!;QTo!;uls!;c`q !;$6j!<<*!!;6Bl!<3#u!;?Em!<3#u!;6Bk!<<*!!<)rt!;ulr!<<*!!;lfr!<)rt!9sO`!<<*! !:g*g!;?Hm!<3#u!9sO_!;?Hm!<3#u!:p0h!;?Hm!<3#u!9sO_!;?Hm!<<*!!;-9k!<2uu!;6Bk !<<*!!:Kmd])hqVrk![~> rmh;Rf)MD)!!*"[rrE(\rrE(\rW!(_!!&+BrW)qZrrE(\rW)nYrrDqXrrDqXrrE(\rW)YRrrE%[ rrE(\rrE(\rW!(_!!&+BrW)kXrrDtYrrE%[rrE"ZrrE"ZJcC<$h#IDI!!)rJ!WShlf)G^L!<)rt !<)rt!<3#u!;HNn!;uls!;c`q!:p0g!;- rlkZIc2X!q!!*#drrE)errE)erW!)h!!)HTrW)rcrrE)erW)obrrDrarrDrarrE)erW)Z[rrE&d rrE)errE)erW!)h!!)HTrW)larrDubrrE&drrE#crrE#cJcC<$h#ID rjrC7]Dm,;s8W([s8W+\s8W+\rr3+_s8S1Brr;tZs8W+\rr;qYs8VtXs8VtXs8W+\rr;\Rs8W([ s8W+\s8W+\rr3+_s8S1Brr;nXs8W"Ys8W([s8W%ZrrE"ZJcC<$h#ID!!!)r/!WRED]Dhj1!<)rt !<)rt!<3#u!;HNn!;uls!;c`q!:p0g!;- rmh2Of)MEA!<8/[!<8/Y! rlkQFc2X"+!<;Kd!<;Kb!<2Ec!!;Qg!<)?b!!)Hd!!)Ha!<2E`!<)?`!;>jZ!<;Kd!<;Kb!<2Ec !!)Hd!<;Ka!<2E`!;u9b!<)>:s+14*s8S8_!;t(Bs-*H,rr;uurVultrVultrr;uuqu?Nnr;Zcs qZ$Qqnc/Uho)J^i!ri6#oDegjoDegjrr;uupAb-mrr;uus8VrrrVultkPtP^n,N@epAb-mrr;uu l2U__p&G$l!ri6#nc/RgpAb-mrr;uul2U__o`+pkiW&oXrr;uumf3:2!WSA_c%#~> rjr:4]Dm-Ss8S2[s8S2Ys8J,ZrrS;^s8A&YrrA/[rrA/Xs8J,Ws8A&Ws7VQQs8S2[s8S2Ys8J,Z rrA/[s8S2Xs8J,Ws87uY!<&"1s+14*s8R rmh2Of)ME.OT, rlkQFc2X!mmf*9WmXbChs4mYSP5kU\blRj.rltHBrrE#trrE#trrE&urrDusrrE&urrDusrrDoq rrDWir;cHhrr<-#!!)ZkrrDWirrE&urrDcmrrE&urr<-#!!*#urrE#trrD9_r;cEgrW)TjrrE&u rrDBbrW)Qirr<-#!!)WjrW)TjrrE&urrDBbrW)Qir;bjWrrE&urrDKerlkKDPQ&gW~> rjr:4]Dm-@OT5?NOFdF_s4mYSGQ7aA])hqVrk&10rrE#trrE#trrE&urrDusrrE&urrDusrrDoq rrDWir;cHhrr<-#!!)ZkrrDWirrE&urrDcmrrE&urr<-#!!*#urrE#trrD9_r;cEgrW)TjrrE&u rrDBbrW)Qirr<-#!!)WjrW)TjrrE&urrDBbrW)Qir;bjWrrE&urrDKerjr42GlG=*~> rmh2Of)ME.OT, rlkQFc2X!mmf*9WmXbChs4mYSP5kU\blRj.rltHBrrDusrrE*!rrE#trrDusrrE*!rW)lrrrDoq rrDZjrrE*!rrDZjrW)TjrrE&u!!)]lrrE&urrE#trrDusrrE&urr<3%!!*'!rW)osrrE*!rrE#t rrDZjrrE*!rrD`lrW)Kgrr<-#!!)BcrW)HfrW)TjrW)Kgrr<-#!!)BcrW)NhrrE*!rrD*ZrrE&u rrDKerlkKDPQ&gW~> rjr:4]Dm-@OT5?NOFdF_s4mYSGQ7aA])hqVrk&10rrDusrrE*!rrE#trrDusrrE*!rW)lrrrDoq rrDZjrrE*!rrDZjrW)TjrrE&u!!)]lrrE&urrE#trrDusrrE&urr<3%!!*'!rW)osrrE*!rrE#t rrDZjrrE*!rrD`lrW)Kgrr<-#!!)BcrW)HfrW)TjrW)Kgrr<-#!!)BcrW)NhrrE*!rrD*ZrrE&u rrDKerjr42GlG=*~> rmh2Of)ME/OSo0MOFdF_s4mYSTE"uiecGfDrmh)LqZ-Tpr;cfrqZ-WqrVururW)uuqZ-WqqZ-?i rW)uurW)TjrW)TjqZ-?ir;Zitr;cisrrE#tr;Zitr;ZitrVururW)osr;ccqrrD]krW)uurW(sX rW(RMrW(mVrW([PrW)uurW)'[r;Zitr;c rlkQFc2X!nmem-VmXbChs4mYSP5kU\blRj.rlkHCqZ-Tpr;cfrqZ-WqrVururW)uuqZ-WqqZ-?i rW)uurW)TjrW)TjqZ-?ir;Zitr;cisrrE#tr;Zitr;ZitrVururW)osr;ccqrrD]krW)uurW(sX rW(RMrW(mVrW([PrW)uurW)'[r;Zitr;c rjr:4]Dm-AOT#3MOFdF_s4mYSGQ7aA])hqVrjr11qZ-Tpr;cfrqZ-WqrVururW)uuqZ-WqqZ-?i rW)uurW)TjrW)TjqZ-?ir;Zitr;cisrrE#tr;Zitr;ZitrVururW)osr;ccqrrD]krW)uurW(sX rW(RMrW(mVrW([PrW)uurW)'[r;Zitr;c rmh2Of)MCqOFdF_s4mYSTE"uiecGfDrmn4OrrBD*rrA\krr@WMpAb-C!WShlepm~> rlkQFc2Wu[mXbChs4mYSP5kU\blRj.rlqSFrrBD*rrA\krr@WMpAb-:!WSA_c%#~> rjr:4]Dm,.OFdF_s4mYSGQ7aA])hqVrk#<4rrBD*rrA\krr@WMpAb-(!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmn4OrrBD*!!&Vjrr@WMpAb-C!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlqSFrrBD*!!&Vjrr@WMpAb-:!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk#<4rrBD*!!&Vjrr@WMpAb-(!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmn:QquD rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlqYHquD rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk#B6quD rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#VuQ\qJcEmmrmh,MT`3Mm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oVuQ\qJcEmmrlkKDPQ&gW~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]VuQ\qJcEmmrjr42GlG=*~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmod&!!'V1!!&GerrE*!rr@WMbQ%Rm!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rls-r!!'V1!!&GerrE*!rr@WMbQ%Rd!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk$k`!!'V1!!&GerrE*!rr@WMbQ%RR!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!(pVrW'V2rW(XOrW(aRrW)?crrE*!rr@WMbQ%Rm !WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!(pVrW'V2rW(XOrW(aRrW)?crrE*!rr@WMbQ%Rd !WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!(pVrW'V2rW(XOrW(aRrW)?crrE*!rr@WMbQ%RR !WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!)ZkrW)rtrW)Zlrr<-#!!)`mrVururW)!YqZ-9g rr<-#!!)`mrVururW)6`rW)WkrVururW)QirW)-]rr@WMbQ%Rm!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!)ZkrW)rtrW)Zlrr<-#!!)`mrVururW)!YqZ-9g rr<-#!!)`mrVururW)6`rW)WkrVururW)QirW)-]rr@WMbQ%Rd!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!)ZkrW)rtrW)Zlrr<-#!!)`mrVururW)!YqZ-9g rr<-#!!)`mrVururW)6`rW)WkrVururW)QirW)-]rr@WMbQ%RR!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!)WjrrE&urrD`l!!*#u!!)]lrW)uurrDcmq>g6h !!*#urrD]k!!*#u!!)]lrW)uurrD<`rW)ZlrW)uurrDcmrW)iqq>g*drr@WMbQ%Rm!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!)WjrrE&urrD`l!!*#u!!)]lrW)uurrDcmq>g6h !!*#urrD]k!!*#u!!)]lrW)uurrD<`rW)ZlrW)uurrDcmrW)iqq>g*drr@WMbQ%Rd!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!)WjrrE&urrD`l!!*#u!!)]lrW)uurrDcmq>g6h !!*#urrD]k!!*#u!!)]lrW)uurrD<`rW)ZlrW)uurrDcmrW)iqq>g*drr@WMbQ%RR!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmh)Lq>g6hrrE&urrD-[rrE&urrD$XrrD'YrrE&urrDcm qZ-EkrrDfnrrE&urrDfnrrD-[rr@WMb5_Il!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlkHCq>g6hrrE&urrD-[rrE&urrD$XrrD'YrrE&urrDcm qZ-EkrrDfnrrE&urrDfnrrD-[rr@WMb5_Ic!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrjr11q>g6hrrE&urrD-[rrE&urrD$XrrD'YrrE&urrDcm qZ-EkrrDfnrrE&urrDfnrrD-[rr@WMb5_IQ!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!)Tirr<-#!!)'ZrrE&urrD'YrrD$XrrE&urrD<` rW)ZlrrE&urrDcmrW)-]rr@WMaoD@k!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!)Tirr<-#!!)'ZrrE&urrD'YrrD$XrrE&urrD<` rW)ZlrrE&urrDcmrW)-]rr@WMaoD@b!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!)Tirr<-#!!)'ZrrE&urrD'YrrD$XrrE&urrD<` rW)ZlrrE&urrDcmrW)-]rr@WMaoD@P!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!)Tirr<-#!!)'ZrrE&urrDcmq>g3grrD!WrrE&u rrDBbrW)TjrrE&urrD]krW)osq>g3grr@WMaT)7j!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!)Tirr<-#!!)'ZrrE&urrDcmq>g3grrD!WrrE&u rrDBbrW)TjrrE&urrD]krW)osq>g3grr@WMaT)7a!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!)Tirr<-#!!)'ZrrE&urrDcmq>g3grrD!WrrE&u rrDBbrW)TjrrE&urrD]krW)osq>g3grr@WMaT)7O!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmq&J!!)QhrW(sXrrE&urrD-[rrE&u!!)'ZrrE&urrDHd rW)NhrrE&urrDWirW)?crr@WMa8c.i!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rltEA!!)QhrW(sXrrE&urrD-[rrE&u!!)'ZrrE&urrDHd rW)NhrrE&urrDWirW)?crr@WMa8c.`!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk&./!!)QhrW(sXrrE&urrD-[rrE&u!!)'ZrrE&urrDHd rW)NhrrE&urrDWirW)?crr@WMa8c.N!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmpH9rW)!Yr;Zitr;bsZqZ,^Wr;Zitr;b[Rr;Zitr;bXQ qZ)3IbQ%Rm!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlsg0rW)!Yr;Zitr;bsZqZ,^Wr;Zitr;b[Rr;Zitr;bXQ qZ)3IbQ%Rd!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk%OsrW)!Yr;Zitr;bsZqZ,^Wr;Zitr;b[Rr;Zitr;bXQ qZ)3IbQ%RR!WRED]79~> rmh2Of)MCos0_lX!.b-$!.b- rlkQFc2WuYs0_lX!.b-$!.b- rjr:4]Dm,,s0_lX!.b-$!.b- rmh2Of)MCoOLC7%!.b-$!.b-o!7(TFCB3o)rmh,MT`3Mm~> rlkQFc2WuYm^DP7!.b-$!.b-o!7(TFB)qK%rlkKDPQ&gW~> rjr:4]Dm,,OLC7%!.b-$!.b-o!7(TF?3'Nqrjr42GlG=*~> rmh2Of)MDmOT#7NOT#7(OT,:^O8o:8OT#7LOT,:^O8o:NOT,<3$\8\:$\8]3$NL1PecBOdnc/U> !WShlepm~> rlkQFc2X!Wmf!4Wmf!41mf*7gmJm7Amf!4Umf*7gmJm7Wmf*9<$\8\:$\8]3$NL1PblMGWnc/U5 !WSA_c%#~> rjr:4]Dm-*OT#7NOT#7(OT,:^O8o:8OT#7LOT,:^O8o:NOT,<3$@rP8$@rQ1$31(O])c4 rmh8Qf)MD)OSo.[O9"$srrDnWquH\WrrD5DrrDSNr;cbWrr<+^!!)qYrVut\!9B7A!:Z*M!!85^ !<&#X!;V`V!.br;(P*BR(XElu!7p_H!<;Vq!<;Vq"TR'(CB3o)rmh,MT`3Mm~> rlkWHc2X!qmem+dmJu#0rrDo`quH]`rrD6MrrDTWr;cc`rr<,g!!)rbrVuue!9ESJ!:]FV!!;Qg !<)?a!;Z'_!.bo:(4d6P(=*`s!6t)8!<;Vj!<;Vj"TQ`kB)qK%rlkKDPQ&gW~> rjr@6]Dm,;OSo.[O9"$srrDnWquH\WrrD5DrrDSNr;cbWrr<+^!!)qYrVut\!9B7A!:Z*M!!85^ !<&#X!;V`V!.bi8'S-sL'[IHo!5%fl!<;V[!<;V["TQ*G?3'Nqrjr42GlG=*~> rmhATf)MD)!!&+BrW(]7rrDhUrW)nYrrD5DrrDSNrrDnWrr<+^!!)tZrrE(\rrD,ArrDPMrr<+^ !!)nXrrDkVrrE$C!.b%Kp`fb=lQZU)R`cDL!7poi!7oLA!7l]GJKo^jcm&Slf!=m_!WTIs CB=I@o4nL/f%+U:nc/U>!WShlepm~> rlk`Kc2X!q!!)HTrW(^@rrDi^rW)obrrD6MrrDTWrrDo`rr<,g!!)ucrrE)errD-JrrDQVrr<,g !!)oarrDl_rrE$B!.b%Kp`]\ rjrI9]Dm,;!!&+BrW(]7rrDhUrW)nYrrD5DrrDSNrrDnWrr<+^!!)tZrrE(\rrD,ArrDPMrr<+^ !!)nXrrDkVrrE$?!.b%Kp`BJ9lQ6="R)[UlN\fuC!7poe!7oL=!7l]CJKKFbclW;h]:AJ!!WS\] ?31(ro3MS"]=VALnc/U#!WRED]79~> rmh;Rf)MD)!!*"[rrE%[r;ceXquH\WrVuq[rW)qZr;[%`!0E9B!0I0[rK.$Z!g!B^rfI-[!0I0[ rK-sXrfI*Zs,d3[s,d*XoT9(Qq2b[YOH9JAOT,=[OT,=\OSf+XOT#7[OT#7ZOSo.`O8tB(!!&,[ !<8/[! rlkZIc2X!q!!*#drrE&dr;cfaquH]`rVurdrW)rcr;[&i!:K7T!:KgdrU0[c!q$$grpKdd!:Kgd rU0UarpKacs6fjds6faao^;_ZqtBE-6$B)qK%rlkKDPQ&gW~> rjrC7]Dm,;!!*"[rrE%[r;ceXquH\WrVuq[rW)qZr;[%`!0E9B!0I0[rK.$Z!g!B^rfI-[!0I0[ rK-sXrfI*Zs,d3[s,d*XoT9(Qq2b[YOH9JAOT,=[OT,=\OSf+XOT#7[OT#7ZOSo.`O8tB(!!&,[ !<8/[!e?N7XL?3'Nqrjr42GlG=*~> rmh5Pf)MD)rW)hWrrE(\rrDnWrrE"ZrW)kXrrE(\rW)t[rW)t[rrE%[rr<+^!!*"[rW)t[rrE(\ rrE(\rrE%[rrE%[rrE%[rrDSNrrDVOrrDbSrrE%[rrE%[rrE(\rrE(\rW)t[rW)t[rrE(\rrE%[ rrDYPrrDkVrrDsYrVuqKr&XoZJGD;,!;nnY!;nn[9L7kQS8-s0?G?pZMgeC]Du:CB3o)rmh,MT`3Mm~> rlkTGc2X!qrW)i`rrE)errDo`rrE#crW)larrE)erW)udrW)udrrE&drr<,g!!*#drW)udrrE)e rrE)errE&drrE&drrE&drrDTWrrDWXrrDc\rrE&drrE&drrE)errE)erW)udrW)udrrE)errE&d rrDZYrrDl_rrDsVrVuqKr&=]WJGD2)!;neV!;neX;oM?+3!"`3lo5!rpc&9)JN/3?JN05\"9?]I BE._e"fl;9X(VJ,j9;Sss/Tf4pZ)OZBE-6$B)qK%rlkKDPQ&gW~> rjr=5]Dm,;rW)hWrrE(\rrDnWrrE"ZrW)kXrrE(\rW)t[rW)t[rrE%[rr<+^!!*"[rW)t[rrE(\ rrE(\rrE%[rrE%[rrE%[rrDSNrrDVOrrDbSrrE%[rrE%[rrE(\rrE(\rW)t[rW)t[rrE(\rrE%[ rrDYPrrDkVrrDsQrVuqKr%e?RJGD#$!;nVQ!;nVS:W,[!1BE**ln\XmpbMp$JMVj5JMWlR"9?'( ?N9cT"e8p!S6l6ig]a3Ss.!EqpY5tC?N7XL?3'Nqrjr42GlG=*~> rmh8Qf)MD)OSf+YOT,=ZOT,=XOT,=ZOT,=YOT,=ZOT,=\OT,=[OT,=[O8o:[O8o:[OT,=[OT,:^ O8o:ZOT,=\OT,=[OT,=[OT,=NOT,=OOT,=SOT,=[OT,=[OT,:^O8o:ZOT,=\OT,=[OT,=\OT,=[ OT,=POT,=VOT,=Y7f\)=!<#Ff!;K%c!<3&u8,iSe7ff6!WShlepm~> rlkWHc2X!qmed(bmf*:cmf*:amf*:cmf*:bmf*:cmf*:emf*:dmf*:dmJm7dmJm7dmf*:dmf*7g mJm7cmf*:emf*:dmf*:dmf*:Wmf*:Xmf*:\mf*:dmf*:dmf*7gmJm7cmf*:emf*:dmf*:emf*:d mf*:Ymf*:_mf*:b6i_c:!<#=c!;Jq`!<3&u7/m8b6iij5qa:HZIJuV;E4s`6E;`0.E4aT6E7Y"V !7q*:"`(Zt`.?M=6j.A=d_9\q6jRbDe%]kQ6q#W(6ibg;6j.A=d_9]"6jD4oaO@@]f)7u>D7e61 E;i6/EmrUbY$HTHJOG&Wi^ rjr@6]Dm,;OSf+YOT,=ZOT,=XOT,=ZOT,=YOT,=ZOT,=\OT,=[OT,=[O8o:[O8o:[OT,=[OT,:^ O8o:ZOT,=\OT,=[OT,=[OT,=NOT,=OOT,=SOT,=[OT,=[OT,:^O8o:ZOT,=\OT,=[OT,=\OT,=[ OT,=POT,=VOT,=Y4og-4!<#+]!;J_Z!<3&u55tW\4oq.*q`Y$OH2]u1CqS3/D#HO$CqA*0CtASL !7q*4"_P6m_gU,84p5Q1d_0Jl4pYu9e%TYL5!IQq4oj154p5Q1d_0Jr4pKSeaO@:Uf)7c8BY)X* D#QU%DU[1^XB:$;JNeWKi][JF]:<4-pXKJ rmh2Of)ME@OT#4[OSJnVOS]%VOT,=YOT,=VOT,=[OT,=\OT,=[OT,=\OT,=[OT,:^O8o:ZOT,=\ OT,=[OT,=[OT,=NOT,=OOT,=WOS]%WOT,=[OT,:^O8o:VOT,=[OT,=[OT,:^O8o:OOT,=VOT,=X ;ZM@I!<5suJ,o=B!W`9#rW!!!!;oar`G#!E%s,m;=*D`,;c@\WFB ;fKp)=*)H%qGR]?aE1L4cIPomf)/GZel,LdbBR*0aDb'Vf"5HuDH#-c;\!ciGZSt-E)T]bI8a:( ^k#F8;h7m,;Zm*OZ>9L7g]k'"s0?G?h"^pTgp1_qZ>9KqrW)uRrW!2YZ>9KkenY9Us8LRMs.KAB J,~> rlkQFc2X"*mf!1dmeHk_me["_mf*:bmf*:_mf*:dmf*:emf*:dmf*:emf*:dmf*7gmJm7cmf*:e mf*:dmf*:dmf*:Wmf*:Xmf*:`me["`mf*:dmf*7gmJm7_mf*:dmf*:dmf*7gmJm7Xmf*:_mf*:a :B5qE!<5gqJ,o=>!W`9#rW!!!!;oUn`+A[=%rf[5;fg*":JYlLEDq5/f)AGFf)AGUCW:?mG0ST6 :Mn9t;fKcoqG.E8aDk.*c.5Zdf)/;Vek](YbB6a&a)+UJf",9nCJWO\:C_6bF]38"D,3sWH;@Op ^O]13:Ou=$:BU[BX(VJ,fESBhs/Tf4f_GLPfW/ZbX(VIbrW)uNrW!2UX(VIWc"@%Hs8L7Ds-*H, J,~> rjr:4]Dm-ROT#4[OSJnVOS]%VOT,=YOT,=VOT,=[OT,=\OT,=[OT,=\OT,=[OT,:^O8o:ZOT,=\ OT,=[OT,=[OT,=NOT,=OOT,=WOS]%WOT,=[OT,:^O8o:VOT,=[OT,=[OT,:^O8o:OOT,=VOT,=X 8-"2>!<5RjJ,o=7!W`9#rW!!!!;o@g_dE./%r9:*9Q%se84[I8CeT&tf)A2?f)A2NA]8IZER!'* 88$(a9P_[^qFCp,a)"Fnc.5EVf)/&Oejr>Eb&C$ia(S"7f!np_AOtDO8.K:TE(k&fBLk\CF\#;Z ^4Ah+8:a=k8-Aq)S6l6icN]nGs.!EqchRPGc^\>AS6l6ArW)uErW!2LS6l6-]3YL-s8KV2s*4OT J,~> rmh;Rf)MD)!!*"[rr<+^!!)kWrrE%[rrE"ZrrDtYrrDkVrrE%[rrE(\q>gPWrrE%[rr<+^!!)tZ rrE(\rrE%[rrE%[rrDSNrrDVOrrDqXrrE%[rrE%[rrE%[rr<+^!!)hVrrE%[rrE%[rr<+^!!)SO rrDkVrrDn&rW)tK!!)q(qu6m$ZN%61ZMsq&?NJYepKms.N;d/fN3lr!Z&/He!Ku%5?NA[V?NI9b ra,hhemDdhN;d/bLp?e`!O7L\?NcPC?XMC^?Neqe?a5Rh?Nl7j?XMB4rEfQUra,]2N;d/fLp:;o eU rlkZIc2X!q!!*#drr<,g!!)l`rrE&drrE#crrDubrrDl_rrE&drrE)eq>gQ`rrE&drr<,g!!)uc rrE)errE&drrE&drrDTWrrDWXrrDrarrE&drrE&drrE&drr<,g!!)i_rrE&drrE&drr<,g!!)TX rrDl_rrDn"rW)tK!!)q$qu6m$X8f7#X8`1t>635]pKI[(M>g]_M6gDjY_W3^!Kbn3>6*7R>61g[ r`]Pbelu@_MZ-f\Ks:DX!O.@Y>6L&=>?f\V>6NM]>Hj"a>6T_b>?f[)rEB9Qr`]E-MZ-f`Ks4cc eTudNJQm\0hHL*_c)YLWrmL`FrmD(QBE,#Fs3^lIrR1]G#h#bds/Tf4daJ!ms3_+PBE-6$B)qK% rlkKDPQ&gW~> rjrC7]Dm,;!!*"[rr<+^!!)kWrrE%[rrE"ZrrDtYrrDkVrrE%[rrE(\q>gPWrrE%[rr<+^!!)tZ rrE(\rrE%[rrE%[rrDSNrrDVOrrDqXrrE%[rrE%[rrE%[rr<+^!!)hVrrE%[rrE%[rr<+^!!)SO rrDkVrrDmorW)tK!!)pqqu6m$SH#,ZSGrTe;ZYBMpJV*sK`4mRKX"HSXb-ON!K5P.;ZPDJ;ZWkJ r_iuUel,MML&P!OJ?AZH!NgtQ;Zr$0;cD9F;ZtZM;m(iR;[%]R;cD7grDN^Ir_ij#L&P!SJ?;aK eTHFAJQ%+uhGXOW]:<4-rlG$A8?N6O%s2Y0?rQ,!=#frNBs.!EqaN45Ys2YD7?N7XL?3'Nq rjr42GlG=*~> rmh5Pf)MD)rW)t[rrE(\rrE(\rW!(_!!&+BrW)nYrrDqXrrE%[rrE(\rrE%[rrE(\rrE%[rrE(\ rW)t[rrE(\rrE(\rrE%[rrE(\rW)qZrrE(\rrD_RrrDSNrrE(\rr<1`!!&+BrW)qZrrE(\rW)t[ rrE%[rrE(\rrE%[rrE"ZrW)JMrrDqXr;cY.rr<(7!;gO.Z3LB8s0D[1rrDt3!Rku]C&uV$rb;UX c>%+=F8fnTFk6P,!7q-a!R,T`C'Ab2C22"5rb;IRc2)/Uc#`l!"Ei:1C=`;t"P.=&c#N_t"`_e% C=[>%C&liaC&uS!rb;^Yc"_!lRZiDAJSKaNJSO.Y"9@#YC]FC]r;ciE#dS7GZ>9Kf!!)uFrr<8N Z>9KGC]FC]r;ciE#-r%Ef%+U:nc/U>!WShlepm~> rlkTGc2X!qrW)udrrE)errE)erW!)h!!)HTrW)obrrDrarrE&drrE)errE&drrE)errE&drrE)e rW)udrrE)errE)errE&drrE)erW)rcrrE)errD`[rrDTWrrE)err<2i!!)HTrW)rcrrE)erW)ud rrE&drrE)errE&drrE#crW)KVrrDrar;cY*rr<(3!;gC*Ws8X*s/Z1#rrDt/!RkiYAc^1rral=Q c=U\4DuO>LEn:5%!7q-]!R,H\Ad*2*AnK8(ral1Jc2)#Qb]!Jm"EN(*B%Hll"P.0sb\d>k"`;Lr B%CbrAcUE]Ac^+mralFQc":RcR#ll5JS'IFJS*kQ"9?]IBE.tTr;ci@#chV rjr=5]Dm,;rW)t[rrE(\rrE(\rW!(_!!&+BrW)nYrrDqXrrE%[rrE(\rrE%[rrE(\rrE%[rrE(\ rW)t[rrE(\rrE(\rrE%[rrE(\rW)qZrrE(\rrD_RrrDSNrrE(\rr<1`!!&+BrW)qZrrE(\rW)t[ rrE%[rrE(\rrE%[rrE"ZrW)JMrrDqXr;cY!rr<(*!;g(!S-K%as.'+ZrrDt&!RkQQ>li2_r`o\? cm4rn?!YZcr`oP9bkbTGb\-o\"DZLn?.SpZ"P-jab[pcZ"_Gn` ?.NNa>l`IT>li/\r`oe@b["MLPDXZoJR*h4JR.5?"9?'(?N:#Ar;ci6#b56$S6l64!!)u7rr<8? S6l5c?N:#Ar;ci6#+T$"]=VALnc/U#!WRED]79~> rmh;Rf)MD)!0I-ZrK-pWrfI*Z!0I0[s,d*XrK-pWs,d0Z!0I!V!0I$W!0I0[rK-sXr/gmX!0I0[ rfI'YoT9(Qn G5;4=Z3105!3uV&FofX9KGC]DQX!<(+@!!^WPC]C\Us8L4?!<1.GZ>9KkenY9Us8LRMs.KABJ,~> rlkZIc2X!q!:KdcrU0R`rpKac!:Kgds6faarU0R`s6fgc!:KX_!:K[`!:KgdrU0Uar9jOa!:Kgd rpK^bo^;_ZnF$5TrpKac!:KgdrpKac!:Kgds6fdbs6fgc!:KdcrpKacna?DWqsOC_q/HK:E<$"9 EV]\8WrrF'!36+tE<4(2pMg5`F8g._F4>2LeT:_0!HQcjE<+ShE<4R1rc&'^eo>]Zr71ktqJcPd F8g.^En55Mf)0Lieo>]Zr71u"EH0S-E<+ShE<+Mded0cFEHSM/f$T)#EIi+\ET1s@!6qQWs8L%: !<0tDX(VI5BE,sO!<'q;!!^HDBE,#Fs8L%:!<0tBX(VIWc"@%Hs8L7Ds-*H,J,~> rjrC7]Dm,;!0I-ZrK-pWrfI*Z!0I0[s,d*XrK-pWs,d0Z!0I!V!0I$W!0I0[rK-sXr/gmX!0I0[ rfI'YoT9(Qn rmh2Of)MD,OT,.IJ !f`qpJV&H)JV)m5"9@#YC]FCRr;ci:#dS7GZ>9K[!!)u;rr<>EZ>9KGC]DDoqZ-Z9#-r%Ef%+U: nc/U>!WShlepm~> rlkQFc2Wukmf*:)mf*:Vmf*:_mf*:`I0'D!JGoQBI/s>KqZ-PB!OAR!I0G(iI=9Irrd=s(d!^>f K`6B#K@^$M!7q-t!R?AtI0G8SI=9IoomHkdK`6B&K@YI!f):"#JC]-sd";+B!7q'r!7q-t!RQGm I0,7sa+8YiI=Zh+I09PpX(VJ3_#494^^-uRs/Tf4^]4B5_#FB?^oM,JX(VIJ_#"-3^]piPs38f$ !:g*gblRj.rlor~> rjr:4]Dm,>OT,EcTNOol:)XHN%pfH.I"Vf)9UmFk1SRd!5D-!7q'g!7q-i!RQ)c EWU`X`IW&\Ee/8jEWcBSS6l6p[J^+)[0W:/s.!Eq[/^4*[Jp44[@CS'S6l6'[JKt([0E.-s1>mL !:g*g])hqVrk![~> rmh2Of)MD,OSo0uOT,=OOSo1TOT,=XNWT93NfO)=!.ag(rW)qXrrDtW!h7K"r/LrYZ]:'_WW%n] WksdV]W1sk!N4NJNW@Z0NWHhmrf./[erGd-W;_eXT#phh!PGH3NWc92NfM]hNWl*,NfLuTrf.,Q ac;D4r/Ld.rf.#rW;_eXT#phh!7l^]JW>;Aif4-Af!9)kpV7!=C]C\Us1nF1#JdW\s0?G?_#=?6 _#=<=^p7bUf%+U:nc/U>!WShlepm~> rlkQFc2Wukmem.)mf*:Xmem.]mf*:aL][X-Lku$1!.ag"rW)qRrrDtQ!g_)lr.kNPZ&"4OV>c8S VSS(E\u,I_!Me6FL]H$*L]P)`reL`Qeqf-tV#H/NR`P>]!P50-L]jL(LksX\L]s="LkrjBreL]G aG>Z'r.k@(reLTjV#H/NR`P>]!7l^WJV\l5ieR^;c)YLWpUUR0BE,#Fs18"+#J-sKs/Tf4])D^0 ])D[7\uTKDc-9l$nc/U5!WSA_c%#~> rjr:4]Dm,>OSo0uOT,=OOSo1TOT,=XHijA!I!knn!.afkrW)qFrrDtE!fY?Vr-S[HiVasHi^[Ird4m=epMkYSGmm:ON.-E!O\R!Hj#qiI!jNDHj,_bI!iVtrd4j1 `dE0br-SLqrd4a[SGmm:ON.-E!7l^KJUE#rid:k/]:<4-pTFdk?N6O%s0)4u#HsY(s.!EqYPnP% YPnM,YFJr!]=VALnc/U#!WRED]79~> rmh2Of)MCoOO]Hj!9]ID!;r8d!.b$:rVuqKpmV3_r0dTcqjJG3cEEh/U:A:7R[U5'Wgg$9WLB9M cE*M8dFS;c!7q.<LPS#OccR[U5'X.6$1YPO\,U<]j0WmTCdRanO/SZC&eR[U5'X.6$1YF:p2 r0dW:rgF=rdAre6bcdJ(bHmb.XjqukJXV.YigKuMf!9)kpUUR7C]C\Us18"+$+dEXs0?G?]".n? \d8AJ]!?,Of%+U:nc/U>!WShlepm~> rlkQFc2WuYma^b'!9`eM!;r&^!.b$4rVuqKpltdYr0.0]qii#*c)[4sS[HM(Pa&#lVO"(*V3I48 cDQu)d+82\!7q.6&"_+FQDMsZPa&#lVj=%"X88&"S^+.!V9meXPguatQ`&*ZPa&#lVj=%"X-Ak" r0.34rfdnhdAN;(bH$klb-..rWRZQaJWt_MifjQGc)YLWpTt.*BE,#Fs0VS%$+-aGs/Tf4['Ti3 Zj?`>[&[j>c-9l$nc/U5!WSA_c%#~> rjr:4]Dm,,OO]Hj!9]ID!;qTQ!.b$'rVuqKpkSkLr.b7PqhH)kbG'rOP-;f]LPMAGS;Ec^Rtlc^ basZ]cdr)N!7q.)&!P;1M4;fALPMAGSr&cTU\]`bP0BMSSBocALXi&QMOr#BLPMAGSr&cTUPFWU r.b:'reCuQd%6)[aeOWHaJXrOT[eUKJVSf3ieIX:]:<4-pS\:d?N6O%s/>_n$)jA#s.!EqW2K^p W!NI&W172o]=VALnc/U#!WRED]79~> rmh2Of)MCoOLC8N!<&bqJ,oME!.b%Kl)"Zmb1"qg^Sq+."LcY,cF*VJ"g?)!dCe'0V?#3GV?O3I e^1hWrh]h+c.14QpneP)b14nPeu#=Pe^i6mZI]#P"LZM)dCT=S!7q(F!7q+G"LZM)dCT=S"g6"t f%%0ZVLbUqVW+HU!7nGks7`o)Z>9KGC]F.:"gVqDZ>9L7[0EpPs45b:!:g*gecGfDrmlT~> rlkQFc2WuYm^DQ`!<&MjJ,oM>!.b%Kl(80aaOA\c]:o1q"L68&c*7/="f]Smd(%[+T)dI@T*;:< e^(VRrgs=tbgk(Kpn&%qaOJSHet8S=e^i6lXOdBC"L-,#d'iqG!7q(?!7q+@"L-,#d'iqG"fTMl f%%!UT7NVcTAlIG!6qQWs7`Z"X(VI5BE._/"fl;9X(VJ,Xp1q>s38f$!:g*gblRj.rlor~> rjr:4]Dm,,OLC8N!<&#\J,oM0!.b%Kl&c1J`R<;][?UWP"K0GnbGP6#"eN]_c`u(#OT\b14e?plQ&X`RN5=erc)me^i6iU=T=+"K'>lcE@//!7q(1!7q+2"K'>lcE@//"eEW] f%$[LOb&XGOlDK+!5"b-s7`5kS6l5c?N9bo"e8p!S6l6iU'@,os1>mL!:g*g])hqVrk![~> rmh2Of)MCos0ht-!<08)J,oGO!.b%KdBWc'h6I%3J[0j4J[2\h!s$oXC\=t8C]C\Uos+[8s0?G5 Z3>%Df%+U:nc/U>!WShlepm~> rlkQFc2WuYs0ht-! rjr:4]Dm,,s0ht-! rmh2Of)MCos0ht-!<0\4!;O;-!!%Pt^&Za?^&Z`8^4F"L^9tZV!7nIICB=I@o4nC,ZL_i7s45b: !:g*gecGfDrmlT~> rlkQFc2WuYs0ht-!<0D,!;O#%!!%Pt[K+n7[K+m0[Xkl<[^EOF!6qS9B*&%5o4J+(X7Ks,s38f$ !:g*gblRj.rlor~> rjr:4]Dm,,s0ht-!mL !:g*g])hqVrk![~> rmh2Of)MCos0ht-!5ZeXf%nIKeq(tpb(7^CaoMLndJs6t!G_]*s8LRMs.KABJ,~> rlkQFc2WuYs0ht-!5ZJOf%n.Beq(Yg_1BG1_#XP\dJs6k!G;E&s8L7Ds-*H,J,~> rjr:4]Dm,,s0ht-!5Yl>f%mP1eq(&VY^s$dYQ4a9dJs6Y!F>crs8KV2s*4OTJ,~> rmh2Of)MCos0ht,!.ep:COp8PCRo5BenY9Us8LRMs.KABJ,~> rlkQFc2WuYs0ht,!.ed6B7X]HB:WZ:c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,s0ht,!.eI-?@cF6?CbC(]3YL-s8KV2s*4OTJ,~> rmh2Of)MCos0ht,!!$ecs+13$s+13Brr[&fCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0ht,!!$Y_s+13$s+13BrrZoYB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0ht,!!$>Vs+13$s+13BrrZT>?3'Nqrjr42GlG=*~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U"0>HCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU!3]?B?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFTt:F-?FsMDnc/U#!WRED]79~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U=KL@!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFU:USR!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U=KL@!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFU:USR!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U=KL@!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFU:USR!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U=KL@!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFU:USR!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0ht,!!-m8J_#D'J_#D'U=KL@!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4J^&bjJ^&bjU rjr:4]Dm,,s0ht,!!-F+J\-KFJ\-KFU:USR!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0ht,!!-m8hUV_V^\G,9]?flq$I6:j*[W*O17ZJ9J_#D'J_#D'n^[l:!,L/$!:g*g ecGfDrmlT~> rlkQFc2WuYs0ht,!!-a4hTZ)D[eQm(ZcDIX$H9AV*[W*O0p]VuJ^&bjJ^&bjn]_6$!,'Ph!:g*g blRj.rlor~> rjr:4]Dm,,s0ht,!!-F+hR`fuVYHY^Up]U%$FHX/*[W*O/VpUAJ\-KFJ\-KFn[esL!+*9D!:g*g ])hqVrk![~> rmh2Of)MCos0ht,!!-m8hUVe4;G'd_;$0KjMTYJ?&(84bD-qS4mGc#m5%BDPb^n>req)D'f(7Gr TE&cYCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0ht,!!-a4hTZ/$:J+I\;$0KiKYm9(&';8IBjGu,mGc#l4^Wf=_h$'`c%4,jc1B0` P5o7?B)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0ht,!!-F+hR`lX91i%X;$0KfH*QnO&%AEo?W;'flJKB_2HG$iZ@TW=]7ISF]CWW< GQ;'^?3'Nqrjr42GlG=*~> rmh2Of)MCos0ht,!!-m8hUVdkD"7K&rr;p"m'2=ccL]U!d[G)j>'um@StE?nG["!]G0#6cJ_#D' J_'JE"eYj2enY9Us8LRMs.KABJ,~> rlkQFc2WuYs0ht,!!-a4hTZ.\D"7K&rr;p"m'2:^`Uh=dacgLT=aQX;SY!*hG?RdWEPI(LJ^&bj J^*i3"d8q!c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,s0ht,!!-F+hR`l>D"7K&rr;p"m'24U[.CmA[t\9q9PKEVPa/);Akb["B rmh2Of)MCos0ht,!!-m8hUVajGPqF=rr!#bc/F%Se+D3']LeC2d\h7W2D[XYR]!?W;2'SPJ_#D' J_'JE"eYj2enY9Us8LRMs.KABJ,~> rlkQFc2WuYs0ht,!!-a4hTZ+[GPqF=rr!#bc/F%Pb4NpjZpp8"d%kbO2).CURAR-S:k+&@J^&bj J^*i3"d8q!c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,s0ht,!!-F+hR`i=GPqF=rr!#bc/F%H\FdBFUcs=5\<18^/Lrf,K9;IO7!+!lJ\-KF J\1Qd"aC#R]3YL-s8KV2s*4OTJ,~> rmh2Of)MCoOLC8M!!-m8hUV^iH2ddNrVZQjqr5cifPP'Gp!s_hEmWP&a-h'S6WSB$[C!,PB7\Wb eq)D'f(@MsTE&cYCB3o)rmh,MT`3Mm~> rlkQFc2WuYm^DQ_!!-a4hTZ(ZH2ddNrVZQjqr5cifPOm9p!")WEmE=u`KtXK5ur,tZa6cI@tDmU c%4,jc1K6aP5o7?B)qK%rlkKDPQ&gW~> rjr:4]Dm,,OLC8M!!-F+hR`fSTj3([Y8S=,XF>(O;: ]7ISF]C`]=GQ;'^?3'Nqrjr42GlG=*~> rmh2Of)ME9OT5@qtg-aptWI8fWT7Ech>p%ZUj3B jm1FC]sXr?\[oVS>A&N rlkQFc2X"#mf,`5mf,_,mca*qtg-aptWI8fWT4>a7daiX%;:7 j6G+>]X+Z9\@B;L>%<-.J^&bjJ^*i3"d8q!c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-KOT,=qt^$_pY375fWT.1[J%3ES3kc\ e_8!XWLK*3StN!?9j3+[J\-KFJ\1Qd"aC#R]3YL-s8KV2s*4OTJ,~> rmh2Of)ME=s8A&Zs5/qUZl 4#%p7T%3Sd&[O!WShlepm~> rlkQFc2X"'7K)ZS7GmP57IomH7Chjd!64I,!!)Hd!!-a4pW`^LQj 4"qg3Qd#3K&Z[^ddHpDhl0%-Vc-XD,G"H1pc%4,jc%4.3bls:2B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-O!<&#Z!8in rmh;Rf)MD)s8W([s8U0%s8TZlrrC$"rr<%\rVut7s7Y%D!!(UL!:SJOOa6j"q"OLTp%7k?fu^IP 4FGWPee,5On\I;),_J_#D'J_#D'o[X2=!,L/$!:g*gecGfDrmlT~> rlkZIc2X!q77I4Ds$b#ss$aNerrC%+rr<&erVut3s7X_;!!(:C!:S/FMg>3qp\+:Pp%7h=fZ:7K 3d8a@bn7!+P,Q1Gk2t[Ye\\JTHY>fWJ^&bjJ^&bjoZ[Q'!,'Ph!:g*gblRj.rlor~> rjrC7]Dm,;!!*"[rrC-%rrBWlrrC$"rr<%\rVut*s7X))!!'Y1!:RN4IsLncp\"1MoCDG7f#FkC 2Jotu]+LPYI$V%Zgtp`3`j)%dC0 rmh;Rf)MD)s8W([s8W+\rVuhXrr<"[rr2t[rr2t[rr;qYrVuhXrVuhXrr2t[rr;tZqu?AOrr2t[ rr;qYrVunZrr;qYrr<"[rVuhXrr2t[rr;VPrrE"ZrW)t[rW)\SrVuq[rW)kXrrE"ZrVuq[rW)qZ quH\Wq>^P3s7Y%A!:JDNOa6ftp@\(LoCDD;k2O^f89iPVee,i$=I$2/cdKqEZD*;,A5PC[J_#D' J_#D'o[X2=!,L/$!:g*gecGfDrmlT~> rlkZIc2X!q77I4Ds$ctUrC-\Qr^HkTr^?hTr^?hTr^HeRrC-\QrC-\Qr^?hTr^HhSqaL5Hr^?hT r^HeRrC-bSr^HeRr^HkTrC-\Qr^?hTr^HJIrrE#crW)udrW)]\rVurdrW)larrE#crVurdrW)rc quH]`q>^P/s7X_8!:J)9Mg>0mp%7kTnd+aOjl+La7 rjrC7]Dm,;!!*"[rrE(\r;ceXrW)t[rVuq[rVuq[rW)nYr;ceXr;ceXrVuq[rW)qZqZ->OrVuq[ rW)nYr;ckZrW)nYrW)t[r;ceXrVuq[rW)SPrrE"ZrW)t[rW)\SrVuq[rW)kXrrE"ZrVuq[rW)qZ quH\Wq>^P&s7X)&!:IH3IsLk_o^hYDnaPu3j58+X6#!h&]+M,;;1kLH^V[RUR#-$& rmhATf)MD)s8S2\qN1aWr/gsYs,d9\rfI-Zs,d9\s,d9\s,d9\r/gsYr/gpXs,d9\rfI0[nrWkN s,d9\s,d9\s,d9\s,d9\%?UViOT5?BOT5?BOT5@ZOT,:NOT,=XOT,=\OT,=ROT#7[OT,=ZOT,=Y OT#7[OT,=XOT,=ZOT,:\OT#4\C]F(YrW)?:&s7J7o^h\EnF?#:m-<]h\4LnlpXTr,MC_^uX0/k9 VN?[Y3Y=8:dXfu#eq)D'f(@MsTE&cYCB3o)rmh,MT`3Mm~> rlk`Kc2X!q77HW5qX4APr9jSRs6fnUrpKbSs6fnUs6fnUs6fnUr9jSRr9jPQs6fnUrpKeTo'ZKG s6fnUs6fnUs6fnUs6fnU%IQYRmR9S4mR9S4mR9SCmf#ZGmf*:amf*:emf*:[mf!4dmf*:cmf*:b mf!4dmf*:amf*:cmf*7emf!1eBE.YLrW)?1&rV&1o^_SCnF?#9lfmKd[RYAZpWX;oKe-1pX/rY3 UlU@T3>"/7aaq]fc%4,jc1K6aP5o7?B)qK%rlkKDPQ&gW~> rjrI9]Dm,;!!&)\qN1^Wr/gpYs,d6\rfI*Zs,d6\s,d6\s,d6\r/gpYr/gmXs,d6\rfI-[nrWhN s,d6\s,d6\s,d6\s,d6\%?LPiO8o9BO8o9BO8o:ZOT#7NOT,=XOT,=\OT,=ROT#7[OT,=ZOT,=Y OT#7[OT,=XOT,=ZOT,:\OT#4\?N9]1rW)>t&q>3$o'u8=mdKT1l0%'[YsE'4pU_$KGpZ6TVO3sH Nd>h`2@_?#[t2/B]7ISF]C`]=GQ;'^?3'Nqrjr42GlG=*~> rmh5Pf)MD)rVu_Us8VtXrVuhXs8W([s8N.^s8W%Zs8W%Zs8W"Ys8W([s8W([s8VYOs8W([s8N.^ s8W%Zs8NIgs8S2\s,d9\OT5@ZOT5@[OT5@NOT,=WOSo1OOT,=[OT,=SOT,=[OT,=XOT,=ZOT,:\ OT#4\C]F(Yr;c9:"d+*)o'l)Hmg8FLl/gs`a%gj+p=9b:1gX?X;)1Nb%M0dL2`F3Weq)D'eq)ED ech]HCY!8$nc/U>!WShlepm~> rlkTGc2X!qrC-SNs$chQrC-\Qs$cqTs$["W77I1Cs$cnSs$ckRs$cqTs$cqTs$cMHs$cqTs$["W 77I1Cs$[=`77HW57IET5mR9SCmf,`Tmf,`Gmf*:`mem.Xmf*:dmf*:\mf*:dmf*:amf*:cmf*7e mf!1eBE.YLr;c91"H.R!naQJE#jgd9inq[IEldt!%uW8N=\2IG6mE0Q+[J%G6\3L5c%4,jc1B0` P5o7?B)qK%rlkKDPQ&gW~> rjr=5]Dm,;r;c\UrrDqXr;ceXrrE%[rr<+^!!)tZrrE"ZrrDtYrrE%[rrE%[rrDVOrrE%[rr<+^ !!)tZrr rmhATf)MD)s8S2\qN1aWqN1aWr/gsYrfI0[!g*H^rK.'ZrK.'Zr/gsYrfI0[rfI0[nrWnOrfI0[ !g*H^rK.'Z$'>2eOT5?Bs8S2Vs8J,[s7)3N!;V`V!:u rlk`Kc2X!q77HW5qX4APqX4APr9jSRrpKeT!q&KGrU0\SrU0\Sr9jSRrpKeTrpKeTo'ZNHrpKeT !q&KGrU0\S$1:5NmR9S477HY?7K2`T7IfgG!;Z'_!;#XY!<2Ed!;>j\!<2Ed!<;Ka!<)?c!!)Hd !!-a4p rjrI9]Dm,;!!&)\qN1^WqN1^Wr/gpYrfI-[!g!B^rK.$ZrK.$Zr/gpYrfI-[rfI-[nrWkOrfI-[ !g!B^rK.$Z$'5,eO8o9B!!&,V!tABXd]I%t?94DHLsY@:;/i4?P8W4b:4r]7ISF]CWW< GQ;'^?3'Nqrjr42GlG=*~> rmh;Rf)MD)s8VkUs8VtXrVuhXs8W([s8N.^s8W%Zs8W%Zs8W"Ys8W([s8W([s8VYOs8W([s8N.^ s8W%Zs8W+\$3-+es,d8BOT5@UOT5@NOT,=WOSo1OOT,=[OT,=SOT,=[OT,:^O8o:[OT,=ZOT,:\ OT#4\C]F+Zrr<&Mr;c?<#*F3&mHj3*rosdQjl>:Ua\m?1p=9e;5&$I-SoT(487,.g5u:##J_#D' J_#D'o[X2=!,L/$!:g*gecGfDrmlT~> rlkZIc2X!q77I">s$chQrC-\Qs$cqTs$["W77I1Cs$cnSs$ckRs$cqTs$cqTs$cMHs$cqTs$["W 77I1Cs$ctU#t=8N7IEV4mR9S>mf,`Gmf*:`mem.Xmf*:dmf*:\mf*:dmf*7gmJm7dmf*:cmf*7e mf!1eBE.\Mrr<&Dr;c?3#)dcum-O''roj^OjPo%Pa&$ftp<=/,5&$F+ST/n187,.g5u9nlJ^&bj J^&bjoZ[Q'!,'Ph!:g*gblRj.rlor~> rjrC7]Dm,;!!)eUrrDqXr;ceXrrE%[rr<+^!!)tZrrE"ZrrDtYrrE%[rrE%[rrDVOrrE%[rr<+^ !!)tZrrE(\#lp%e!0@2BO8o:UOT,=NOT,=WOSo1OOT,=[OT,=SOT,=[OT,:^O8o:[OT,=ZOT,:\ OT#4\?N9`2rr<&2r;c?!&q>2tlK[WtkND!giSN>B^e/:Lp:Clc1KT<>O(2sM4AJ1/2*sF:J\-KF J\-KFoXb9O!+*9D!:g*g])hqVrk![~> rmh;Rf)MD)s8VkUs8W"Ys8W+\s8W([rr<"[s8W+\s8W+\s8W"Ys8W"Ys8W([s8W([s8W+\s8VeS rr<"[s8W+\s8W+\s8W([s8W+\s8W([s8W+\rr;tZs8VVNrrDqXrrE(\rrD_RrrE%[rrE"ZrrDtY rrE%[rr<1`!!&+BrW)nYrr<%\rVut7s7b+E!<1RJ!:eVQOa6K]l0.?okN:mdiSE;1;KpO_ee?Mm 5&$m=^QFh[ rlkZIc2X!q77I">s$ckRs$ctUs$cqTr^HkTs$ctUs$ctUs$ckRs$ckRs$cqTs$cqTs$ctUs$cYL r^HkTs$ctUs$ctUs$cqTs$ctUs$cqTs$ctUr^HhSs$cJGrrDrarrE)errD`[rrE&drrE#crrDub rrE&drr<2i!!)HTrW)obrr<&erVut3s7ae rjrC7]Dm,;!!)eUrrDtYrrE(\rrE%[rW)t[rrE(\rrE(\rrDtYrrDtYrrE%[rrE%[rrE(\rrDbS rW)t[rrE(\rrE(\rrE%[rrE(\rrE%[rrE(\rW)qZrrDSNrrDqXrrE(\rrD_RrrE%[rrE"ZrrDtY rrE%[rr<1`!!&+BrW)nYrr<%\rVut*s7a/*!<0V/!:dZ6IsLPGk2k[ajPo.Tgt:5n8SGU-]+_]% 1KKWKX+#_c7peeZ4A.cE\@R7'J\-KFJ\1Wf"aC#R]3YL-s8KV2s*4OTJ,~> rmh2Of)ME@s87uUs8S2[s8S2[s8S2\rrA/[s8A&Xs8A&Vs8S2ZrrA/Zs8J,Ys7MKRrrA/[s8A&X s8A&Zs8S2\s8A&Ws8J,Ws7;?P!<&#Y!<8/[!;DTR!!&,Z! rlkQFc2X"*7JuTN7K;fT7K;fT7K;fU70)cT7K)ZQ7K)ZO7K;fS70)cS7K2`R7J6*K70)cT7K)ZQ 7K)ZS7K;fU7K)ZP7K2`P7J#sI!<)?b!<;Kd!;Gp[!!)Hc!<2Ed!<)?a!!)Hc!!)Hd!!)Hd!<)?` !!-a4pWW[6AnBD#mH1([mrSn1Bhqd#=_G>3npWXAkJNdeBGK6,IZ!D`[>"Vs\]u8*Fc%4,j c%4.5bls:2B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-R!;qrU!<8/[!<8/[!<8/\!!&,[!<&#X!<&#V!<8/Z!!&,Z! rmh2Of)ME)OT5@*OT5@(OT,tgtLE!;0UF^ee>rM5%pI2a-rj0 ?$odY8m+*Y_9RV_J_#D'J_'PG"eYj2enY9Us8LRMs.KABJ,~> rlkQFc2X!hmf,`#mf,`!mf*:(mf*7gc"I(5bls#Jl/CRZrSRt"VpTWk$*"c%4,jc%4.5bls:2B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-;OT,=*OT,=(OT, rmh2Of)ME)OT5@*OT5@'OT,R`enbcA2IiJ+a-rj0 ?$odY92@@?]$#ZUJ_#D'J_'PG"eYj2enY9Us8LRMs.KABJ,~> rlkQFc2X!hmf,`#mf,_umf*:)mK rjr:4]Dm-;OT,=*OT,='OT,R`]3bNo])mIuhYl1.fa6Bbf@A rmh2Of)ME*OT#4*OT#3;O9>R`enb rlkQFc2X!imeoT#meoS4mK"6W$G='a]",;YUHik@pWXA]KfD\`F1IO3Z!D]X <(eiCT=)Xhc%4,jc%4.5bls:2B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-R`]3bNo]*]i9p)9ZJ\-KFJ\-KFp:CKQ!+*9D!:g*g])hqVrk![~> rmh2Of)MCoOLC5R!<:Vcs5)<-X[i7L!BE4?3WD&@0fYHHpXU"uZ[Xe\/T5!WShlepm~> rlkQFc2WuYm^DNd!<:;Vs5)!$VFCAAr&=X6""uNmanO$n^T2W8.4oIOI=P7"2B3T;PH)= rjr:4]Dm,,OLC5R!<9Z;s5(?mQU*Jc1c$pEr\Fd71GU[80S?k/]+_?;D_3.Z?s%/[6qT!H'MsOR XLEbmJ\-KFJ\1Wf"aC#R]3YL-s8KV2s*4OTJ,~> rmh2Of)MCqOL5#j!<:Vcs5)<.dBf;6Pld`!eG%N-ccEi!MGt)t'GM&f/o-BF\AZ\CJ_#D'J_#D' p=9D?!,L/$!:g*gecGfDrmlT~> rlkQFc2Wu[m^2us!<:;Vs5)!&aK(Mcoo9*D]?Brn'?@krTnQ>S&/5fh%P27HUThDNb^n#ic%4,j c1]BcP5o7?B)qK%rlkKDPQ&gW~> rjr:4]Dm,.OL5#j!<9Z;s5(?i\"&2*on!7*X1J&;'=G$@PBfb/%1j'Z$R]8,Q(b:d\q.JE]7ISF ]Cri?GQ;'^?3'Nqrjr42GlG=*~> rmh2Of)MDdOT#70OT,=QOT#7WOL5#j!<:Vcs+/aPeq)D'etU_!TE&cYCB3o)rmh,MT`3Mm~> rlkQFc2X!Nmf!49mf*:Zmf!4`m^2us!<:;Vs+/FGc%4,jc(`GdP5o7?B)qK%rlkKDPQ&gW~> rjr:4]Dm-!OT#70OT,=QOT#7WOL5#j!<9Z;s+.e5]7ISF]:un@GQ;'^?3'Nqrjr42GlG=*~> rmh2Of)MDtOT,=OOT,=1OT,=XOT,=XOT,=XOL5#k!<:Vcs42aoJY7ReJY8a1"9<5gCB3o)rmh,M T`3Mm~> rlkQFc2X!^mf*:Xmf*::mf*:amf*:amf*:am^2ut!<:;Vs36+YJWkYKJWlgl"9<)ZB)qK%rlkKD PQ&gW~> rjr:4]Dm-1OT,=OOT,=1OT,=XOT,=XOT,=XOL5#k!<9Z;s1 rmh2Of)MDtOT,=OOT,=)OT,=XOT,=XOL5#j!<:Vcs+(0$!.b-$!2';oCY!8$nc/U>!WShlepm~> rlkQFc2X!^mf*:Xmf*:2mf*:amf*:am^2us!<:;Vs+(0$!.b-$!2';oB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-1OT,=OOT,=)OT,=XOT,=XOL5#j!<9Z;s+(0$!.b-$!2';o?FsMDnc/U#!WRED]79~> rmh2Of)ME3OT,:\OSf+YOSf+WOS]%VOSo.`O8tB(!!&,[!;;NR!!&,[!;DTS!<&#Y!!&,Z!;qrU !!WShlepm~> rlkQFc2X!rmf*7emed(bmed(`me["_mem+imJu\C!!)Hd!;>j[!!)Hd!;Gp\!<)?b!!)Hc!;u9^ !<2Ed!!)Hd!<;Jorr`?%c"G;;s+/FGc%4,jc/?hKB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-EOT,:\OSf+YOSf+WOS]%VOSo.`O8tB(!!&,[!;;NR!!&,[!;DTS!<&#Y!!&,Z!;qrU ! rmh2Of)ME2OT,:aO8o9B!!)qYrrE"ZrrDqXrrE(\rW)t[rW)t[rrDbSrW)t[rrDeTrr<4a!!&)\ !<&#Z!;hlX!;hlW!<8/\!!&+frr`?%enb<%ec5]$eq)D'eq)E2ecT^=CB3o)rmh,MT`3Mm~> rlkQFc2X!qmf*7jmJm7T!!)rbrrE#crrDrarrE)erW)udrW)udrrDc\rW)udrrDf]rr<5j!!)Ee !<)?c!;l3a!;l3`!<;Ke!!)Gorr`?%c"I'mbl@`pc%4,jc%4-ubl_V'B)qK%rlkKDPQ&gW~> rjr:4]Dm-DOT,:aO8o9B!!)qYrrE"ZrrDqXrrE(\rW)t[rW)t[rrDbSrW)t[rrDeTrr<4a!!&)\ !<&#Z!;hlX!;hlW!<8/\!!&+frr`?%]3bNR])Vh^]7ISF]7ITQ])uBO?3'Nqrjr42GlG=*~> rmh2Of)ME2OT,:aO8o9B!!)qYrrE"ZrrDtYrrE"ZrrE(\rrE%[rrDbSrrE%[rrDeTrr<4a!!&)\ !<&#Z!;hlX!;hlX! rlkQFc2X!qmf*7jmJm7T!!)rbrrE#crrDubrrE#crrE)errE&drrDc\rrE&drrDf]rr<5j!!)Ee !<)?c!;l3a!;l3a!<2Ed!!)Gorr`?%c"I'nblO".J^&bjJ^&bjiQVI;c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-DOT,:aO8o9B!!)qYrrE"ZrrDtYrrE"ZrrE(\rrE%[rrDbSrrE%[rrDeTrr<4a!!&)\ !<&#Z!;hlX!;hlX! rmh2Of)ME2OT,:bO8o9B!!&,X!<&#Z!;qrY!;V`V! rlkQFc2X!qmf*7kmJm7T!!)Ha!<)?c!;u9b!;Z'_!<2Ed!;>j\!<2Ed!;Gp]!!Vcj!:BgerU0[c qsOIaqsOIarpKdd!:I5o"9AJHBE,in!L!M7c%4,jc%4-ubl_V'B)qK%rlkKDPQ&gW~> rjr:4]Dm-DOT,:bO8o9B!!&,X!<&#Z!;qrY!;V`V! rmh2Of)ME2OT,:_O8o9Br;chYrrE"ZrrDtYrrDkVrrE%[rrDbSrrE%[rrDbS"TXVa!0@3YOT,=X OT,=XOT,=[OT,:\OL5#j!<:Vcs2*=fTE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2X!qmf*7hmJm7Tr;cibrrE#crrDubrrDl_rrE&drrDc\rrE&drrDc\"T[rj!:Bjbmf*:a mf*:amf*:dmf*7em^2us!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-DOT,:_O8o9Br;chYrrE"ZrrDtYrrDkVrrE%[rrDbSrrE%[rrDbS"TXVa!0@3YOT,=X OT,=XOT,=[OT,:\OL5#j!<9Z;s2)AKGQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)ME2OT,:_O8o9Br;ckZrW)nYrrE(\rrE(\rrE%[rrE(\rrE%[rrDbSrrE%[rrDbSrrE(\ rrDtYrrDqXrrE(\rrE(\rrE%[rr<%\[/U7/s41uc_U\a\!.i^Peq)D'f&>0^CY!8$nc/U>!WShl epm~> rlkQFc2X!qmf*7hmJm7Tr;clcrW)obrrE)errE)errE&drrE)errE&drrDc\rrE&drrDc\rrE)e rrDubrrDrarrE)errE)errE&drr<&e[/U7/s353V_T`+F!.iCGc%4,jc/HnLB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm-DOT,:_O8o9Br;ckZrW)nYrrE(\rrE(\rrE%[rrE(\rrE%[rrDbSrrE%[rrDbSrrE(\ rrDtYrrDqXrrE(\rrE(\rrE%[rr<%\[/U7/s1;V;_Rfhn!.hb5]7ISF]A^@(?FsMDnc/U#!WRED ]79~> rmh2Of)ME3OT#4aO8o9B!!&,[!!&,[!<&#X!<&#W!<8/Z!!&,Z!;MZS!!&,Z!;DTT!<8/\! rlkQFc2X!rmf!1jmJm7T!!)Hd!!)Hd!<)?a!<)?`!<;Kc!!)Hc!;Q!\!!)Hc!;Gp]!<;Ke!<2E` !;u9`!<;Kc!!)Hc!4;b/!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-EOT#4aO8o9B!!&,[!!&,[!<&#X!<&#W!<8/Z!!&,Z!;MZS!!&,Z!;DTT!<8/\! rmh2Of)MCqOL5#j!<:Vcs30$o!;P+GTE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2Wu[m^2us!<:;Vs3/^f!;Oe>P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm,.OL5#j!<9Z;s3/(T!;O/,GQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)MCqOL5#j!<:Vcs81CE!85s'!;P+GTE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2Wu[m^2us!<:;Vs81(P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm,.OL5#j!<9Z;s80G*!85!a!;O/,GQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)MCqOL5#j!<:Vcs8(:H!<(IK!8H'+!7h/FecDEDJ_#D'J_#D'iRS*HenY9Us8LRMs.KAB J,~> rlkQFc2Wu[m^2us!<:;Vs8't?!<(.B!8Ga"!6kN=blO".J^&bjJ^&bjiQVI;c"@%Hs8L7Ds-*H, J,~> rjr:4]Dm,.OL5#j!<9Z;s8'>-!<'M0!8G*e!4r7+])d-VJ\-KFJ\-KFiO]1u]3YL-s8KV2s*4OT J,~> rmh2Of)MCqOL5#j!<:Vcs8(:H!<(IK!<1RJ!;tFI!!:^O!<(LJ!:n\@!;P+GTE'PoJ_#D'J_&f2 ")HJ'!:g*gecGfDrmlT~> rlkQFc2Wu[m^2us!<:;Vs8't?!<(.B!<17A!;t+@!!:CF!<(1A!:nA7!;Oe>P5p0YJ^&bjJ^*/u ")#kk!:g*gblRj.rlor~> rjr:4]Dm,.OL5#j!<9Z;s8'>-!<'M0!<0V/!;sJ.!!9b4!<'P/!:m`%!;O/,GQ<<,J\-KFJ\0mQ "(&TG!:g*g])hqVrk![~> rmh2Of)MD@OHBJF!<:Vcs8(:H!<(IN!7o^$rRLrKrmh&Lrmq)LrRLrKrmh&Lo@0^CY!8$nc/U>!WShlepm~> rlkQFc2X!*mZ@GO!<:;Vs8't?!<(.E!6ragrQP rjr:4]Dm,ROHBJF!<9Z;s8'>-!<'M3!5#iCrOW%0rjr.1rk&11rOW%0rjr.1o=Fu&pq$QO!.hb5 ]7ISF]A^@(?FsMDnc/U#!WRED]79~> rmh2Of)MDsOT#72OT,=[OHBJF!<:Vcs8(:H!<(IK!;b7K!7o^$r71iJrmh&Lr71iJo[X!BpsoJ= !.i^Peq)D'f&>0^CY!8$nc/U>!WShlepm~> rlkQFc2X!]mf!4;mf*:dmZ@GO!<:;Vs8't?!<(.B!;aqB!6ragr653ArlkECr653AoZ[@9prri' !.iCGc%4,jc/HnLB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-0OT#72OT,=[OHBJF!<9Z;s8'>-!<'M0!;a;0!5#iCr4;q/rjr.1r4;q/oXb)'pq$QO !.hb5]7ISF]A^@(?FsMDnc/U#!WRED]79~> rmh2Of)ME0OT,=OOT,:^O8o:6OSo1YOHBJF!<:Vcs8(=E!<(LH!<:UM!;tCJ!<1RG!;+hB!;P+G TE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2X!omf*:Xmf*7gmJm7?mem.bmZ@GO!<:;Vs8("!;+M9!;Oe> P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT,=OOT,:^O8o:6OSo1YOHBJF!<9Z;s8'A*!<'P-!<9Y2!;sG/!<0V,!;*l'!;O/, GQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)ME0OT,=POT,=[OT,=JOT#7KOT,=[OHBJF!<:Vcs8(:H!;b7H!<(IN!7o^$r71iJrmh&L ma_@0^CY!8$nc/U>!WShlepm~> rlkQFc2X!omf*:Ymf*:dmf*:Smf!4Tmf*:dmZ@GO!<:;Vs8't?!;aq?!<(.E!6ragr653ArlkEC m`b_3prri'!.iCGc%4,jc/HnLB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-BOT,=POT,=[OT,=JOT#7KOT,=[OHBJF!<9Z;s8'>-!;a;-!<'M3!5#iCr4;q/rjr.1 m^iH!pq$QO!.hb5]7ISF]A^@(?FsMDnc/U#!WRED]79~> rmh2Of)ME0OT,=POT,=[OT,=HOT#7MOT,=[OHBJF!<:Vcs8(:H!;b7H!<(IK!<1OL!<1RL!<(IK !<(IK!;+hB!;P+GTE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:dmf*:Qmf!4Vmf*:dmZ@GO!<:;Vs8't?!;aq?!<(.B!<14C!<17C!<(.B !<(.B!;+M9!;Oe>P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=HOT#7MOT,=[OHBJF!<9Z;s8'>-!;a;-!<'M0!<0S1!<0V1!<'M0 !<'M0!;*l'!;O/,GQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)ME0OT,=POT,=[OT,=SOSStSOT#7OOT,=[OHBJF!<:Vcs81CG!;k@G!!1XNrmq&K!S.8K f)5UAf),OEecDEDJ_#D'J_#D'iRS*HenY9Us8LRMs.KABJ,~> rlkQFc2X!omf*:Ymf*:dmf*:\meQq\mf!4Xmf*:dmZ@GO!<:;Vs81(>!;k%>!!1=ErltEB!R1WB c2@Y8c27S rjr:4]Dm-BOT,=POT,=[OT,=SOSStSOT#7OOT,=[OHBJF!<9Z;s80G,!;jD,!!0\3rk&.0!P8@0 ]DVa&]DM[*])d-VJ\-KFJ\-KFiO]1u]3YL-s8KV2s*4OTJ,~> rmh2Of)ME0OT,=POT,=[OT,=DOT,=QOT,=[OHBJF!<:Vcs5_`2!8uE/TE'PoJ_#D'J_&f2")HJ' !:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:dmf*:Mmf*:Zmf*:dmZ@GO!<:;Vs5_E)!8u*&P5p0YJ^&bjJ^*/u")#kk !:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=DOT,=QOT,=[OHBJF!<9Z;s5^cl!8tHiGQ<<,J\-KFJ\0mQ"(&TG !:g*g])hqVrk![~> rmh2Of)ME0OT,=POT,=[OT,=FOT#7OOT,=[OHBJF!<:Vcs5hf3!8l?.TE'PoJ_#D'J_&f2")HJ' !:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:dmf*:Omf!4Xmf*:dmZ@GO!<:;Vs5hK*!8l$%P5p0YJ^&bjJ^*/u")#kk !:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=FOT#7OOT,=[OHBJF!<9Z;s5gim!8kBhGQ<<,J\-KFJ\0mQ"(&TG !:g*g])hqVrk![~> rmh2Of)ME0OT,=POT,=[OT,=SOSStUOT#7MOT,=[OHBJF!<:Vcs68,5!8c9-TE'PoJ_#D'J_&f2 ")HJ'!:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:dmf*:\meQq^mf!4Vmf*:dmZ@GO!<:;Vs67f,!8bs$P5p0YJ^&bjJ^*/u ")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=SOSStUOT#7MOT,=[OHBJF!<9Z;s67/o!8b rmh2Of)ME0OT,=OOT,:^O8o:IOT#7KOT,=[OHBJF!<:Vcs2*=fTE'PoJ_#D'J_&f2")HJ'!:g*g ecGfDrmlT~> rlkQFc2X!omf*:Xmf*7gmJm7Rmf!4Tmf*:dmZ@GO!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*g blRj.rlor~> rjr:4]Dm-BOT,=OOT,:^O8o:IOT#7KOT,=[OHBJF!<9Z;s2)AKGQ<<,J\-KFJ\0mQ"(&TG!:g*g ])hqVrk![~> rmh2Of)ME0OT,=NOT#74OS]"XOHBJF!<:Vcs2*=fTE'PoJ_#D'J_&f2")HJ'!:g*gecGfDrmlT~> rlkQFc2X!omf*:Wmf!4=meZtamZ@GO!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT,=NOT#74OS]"XOHBJF!<9Z;s2)AKGQ<<,J\-KFJ\0mQ"(&TG!:g*g])hqVrk![~> rmh2Of)ME0OT, rlkQFc2X!omf*:$mZ@GO!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT, rmh2Of)ME0OT, rlkQFc2X!omf*:$mZ@GO!<:;Vs2*"]P5p0YJ^&bjJ^*/u")#kk!:g*gblRj.rlor~> rjr:4]Dm-BOT, rmh2Of)MD@OHBJF!<:Vcs1m3:s+13$s+140rr`=;enY9Us8LRMs.KABJ,~> rlkQFc2X!*mZ@GO!<:;Vs1lm1s+13$s+140rr`=7c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,ROHBJF!<9Z;s1l6ts+13$s+140rr`=.]3YL-s8KV2s*4OTJ,~> rmh2Of)MD@OHBJF!<:Vcs+/aPeq)D'etLXt!,L/$!:g*gecGfDrmlT~> rlkQFc2X!*mZ@GO!<:;Vs+/FGc%4,jc(WAb!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,ROHBJF!<9Z;s+.e5]7ISF]:lh>!+*9D!:g*g])hqVrk![~> rmh2Of)MCoOLC5S!<:Vcs42c$JcC<$JcDPGs.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYm^DNe!<:;Vs36,pJcC<$JcDPGs-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OLC5S!<9Z;s1 rmh2Of)MCoOLC5T!<:Vcs474$TRibeTRic2TEG71enY9Us8LRMs.KABJ,~> rlkQFc2WuYm^DNf!<:;Vs3:RpPC\pKPC\pmP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,OLC5T!<9Z;s1A;^G_(*jG_(+7GQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)ME0OT,=OOSo.[O8uYL!!'6b#6=eTC]DucTRk[Feq)D'f#-)"s8S_p!,L/$!:g*gecGfD rmlT~> rlkQFc2X!omf*:Xmem+dmJsW^!!'7k#6=eKBE-6VPC^u0c%4,jc,7fes8S8c!,'Ph!:g*gblRj. rlor~> rjr:4]Dm-BOT,=OOSo.[O8uYL!!'6b#6=e9?N7Y;G_*JX]7ISF]>M8As8R rmh2Of)ME0OT,=POT,=\OT#77OT#7;O9>QF!0@2dO9P^benb<:s+-j&T^;]0TRibeT_>#Ys6%r= !<8^b!,L/$!:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:emf!4@mf!4DmK rjr:4]Dm-BOT,=POT,=\OT#77OT#7;O9>QF!0@2dO9P^b]3bNLs+,FSGjPH]G_(*jGkQ@^s6%!" !<7:g!+*9D!:g*g])hqVrk![~> rmh2Of)ME0OT,=POT,=[OT,=TOT,:\OSf+EOT#7QOT#7[OT#7QOSf+POT#4[OT#7YOSo1ZOT#7Y OT#7[OSo1XOT#4[OT#7SOT#7[OT#7WO9bjdenb<:s.H$9!9F4\s6eG=!2+lCJH16$p]1?okgg'5 !<8^b!,L/$!:g*gecGfDrmlT~> rlkQFc2X!omf*:Ymf*:dmf*:]mf*7emed(Nmf!4Zmf!4dmf!4Zmed(Ymf!1dmf!4bmem.cmf!4b mf!4dmem.amf!1dmf!4\mf!4dmf!4`mK`gmc"I($s-&Xt!9F4\s6e,4!0_s6JH16$p]1?okfjEt !<87H!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=TOT,:\OSf+EOT#7QOT#7[OT#7QOSf+POT#4[OT#7YOSo1ZOT#7Y OT#7[OSo1XOT#4[OT#7SOT#7[OT#7WO9bjd]3bNLs*/d>!9F4\s6dK"!-j%pJH16$p]1?okdq.G !<7:g!+*9D!:g*g])hqVrk![~> rmh2Of)ME0OT,=POT#7NOT,:aO8o9B!!)bTq>gDSrW)YRrrE(\rrDYPrW)SPrW)t[rrE(\rrE(\ rrE(\rr rlkQFc2X!omf*:Ymf!4Wmf*7jmJm7T!!)c]q>gE\rW)Z[rrE)errDZYrW)TYrW)udrrE)errE)e rrE)err rjr:4]Dm-BOT,=POT#7NOT,:aO8o9B!!)bTq>gDSrW)YRrrE(\rrDYPrW)SPrW)t[rrE(\rrE(\ rrE(\rr rmh2Of)ME0OT,=OOSf+OOT,:aO8o9B!!)5ErrDbSr;cGNrr<+^!!)\RrrE%[rr<+^!!)tZrr!WShlepm~> rlkQFc2X!omf*:Xmed(Xmf*7jmJm7T!!)6NrrDc\r;cHWrr<,g!!)][rrE&drr<,g!!)ucrr rjr:4]Dm-BOT,=OOSf+OOT,:aO8o9B!!)5ErrDbSr;cGNrr<+^!!)\RrrE%[rr<+^!!)tZrr rmh2Of)ME0OT,=LOT#7ROT,:aO8o9B!!);GrW)SPrrD#>rrE%[rr<+^!!)tZrr<@e!!&)\!0@0\ OSJnUOT,=MOT,=UO9kpeenb<:s.H"bJcFp5!nRDOn(%T6!2'=Cs+13$s8)`sf)Pd6ed7uLs.H"b CY!8$nc/U>!WShlepm~> rlkQFc2X!omf*:Umf!4[mf*7jmJm7T!!) rjr:4]Dm-BOT,=LOT#7ROT,:aO8o9B!!);GrW)SPrrD#>rrE%[rr<+^!!)tZrr<@e!!&)\!0@0\ OSJnUOT,=MOT,=UO9kpe]3bNLs*/bgJcFp5!k\L4n%/[H!-eKps+13$s8)`s]Dqop]*W]^s*/bg ?FsMDnc/U#!WRED]79~> rmh2Of)ME0OT,=POT,=[OT,=SOT,:aO8o9B!!)bTq>gJUrW)POr;bi=rrE%[rr<+^!!)tZrrE(\ #lp%e!0@2BO8o:UOT,=NOSo1TO9kpeenb<:s.H"bl2L_`Jc>iPf)Pd=ecVQFTE+ rlkQFc2X!omf*:Ymf*:dmf*:\mf*7jmJm7T!!)c]q>gK^rW)QXr;bjFrrE&drr<,g!!)ucrrE)e #lsAn!:BjTmJm7^mf*:Wmem.]mKimnc"I($s-&WHl2L_`Jc>iPc2[h4bla.0P5sqKrW%NLJcCr6 !mUcFo?I:8prs,/!<87H!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-BOT,=POT,=[OT,=SOT,:aO8o9B!!)bTq>gJUrW)POr;bi=rrE%[rr<+^!!)tZrrE(\ #lp%e!0@2BO8o:UOT,=NOSo1TO9kpe]3bNLs*/bgl2L_`Jc>iP]Dqp"]*!9XGQ@(0rW%NLJcCr6 !k\L4o=P#&pq$iW!<7:g!+*9D!:g*g])hqVrk![~> rmh2Of)ME0OT,=POT#7[OT,=SOT,:aO8o9B!!)GKrW)MNrrE(\rrD)@rW)t[rrE(\rrE(\rrE%[ rrE(\rrE%[rrE(\rW)qZrrDVOrrE(\rrDnW#lt"VC]DucTV);as8)ferr<&irr<%\rrUdOs6nM@ TE(Vbl2L_`qu6WrJcC<$Rf rlkQFc2X!omf*:Ymf!4dmf*:\mf*7jmJm7T!!)HTrW)NWrrE)errD*IrW)udrrE)errE)errE&d rrE)errE&drrE)erW)rcrrDWXrrE)errDo`#lt"MBE-6VPEQ"Gs8)ferr<&irr<%\rrUIFs6n27 P5pdHl2L_`qu6WrJcC<$Rf rjr:4]Dm-BOT,=POT#7[OT,=SOT,:aO8o9B!!)GKrW)MNrrE(\rrD)@rW)t[rrE(\rrE(\rrE%[ rrE(\rrE%[rrE(\rW)qZrrDVOrrE(\rrDnW#lt";?N7Y;G^'5fs8)ferr<&irr<%\rrTh4s6mQ% GQ;sgl2L_`qu6WrJcC<$Rf rmh2Of)ME0OT,=PO9#?Cr;cPQrW!+`!!&)\!8Wb9!<8/[!9B7A!!&,[!<&#X!<&#Z!<8/\!<&#W !Vrr<&rrr<%M s+13=rrUdOs8:FJ!<(IK!<(IK!;P+OTE,"bTE&cYCB3o)rmh,MT`3Mm~> rlkQFc2X!omf*:YmK!=Ur;cQZrW!,i!!)Ee!8[)B!<;Kd!9ESJ!!)Hd!<)?a!<)?c!<;Ke!<)?` !<2E`!;>j[!<;Kd!;l0j!<:;Vs3:S-P5tR]!!)ut!!)'Z!!&,\!mUcFn'(ru!0[EIrr<&rrr<%M s+13=rrUIFs8:+A!<(.B!<(.B!;OeFP5tWHP5o7?B)qK%rlkKDPQ&gW~> rjr:4]Dm-BOT,=PO9#?Cr;cPQrW!+`!!&)\!8Wb9!<8/[!9B7A!!&,[!<&#X!<&#Z!<8/\!<&#W ! rmh2Of)ME0OT,0#r!<<'!!<3$!rr<'!!!*#urrDrr!!)utqZ)3IJcD;@!nRDOrRLrKqpk`I rmh,Nf%0g#rRM:J!<8^b!,L/$!:g*gecGfDrmlT~> rlkQFc2X!omf*9Kmf*:&mKimnc"I($s-&WHrVlitrVm-'s8N'!s8N'!rVuisrVult!<<#urr;iq PQ(^/s8W#Ap]19;& rjr:4]Dm-BOT, rmh2Of)ME0OT, rlkQFc2X!omf*9Kmf*:&mKimnc"I($s-&WHrVlitrVlitrr3-%rrE*!!;uis!;uls!<2uu!<)ot !0I3^c2[h@c2%G rjr:4]Dm-BOT, rmh2Of)MCoOSY(;!5Xa'!<:Vcs474CTE+rj!!)ut!!*#urrDio!!)rs!!)ut!!)ut!!&,\!nRDO qUYQEqpkkB!2'>krs8]*rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$Rf rlkQFc2WuYmeZAM!5\(0!<:;Vs3:S-P5tR]!!)ut!!*#urrDio!!)rs!!)ut!!)ut!!&,\!mUcF qT\p1!<87H!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,OSY(;!5Xa'!<9Z;s1A;UGQ@^B!!)ut!!*#urrDio!!)rs!!)ut!!)ut!!&,\!k\L4 qRcY*qmurT!-eMCrs8]*rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$Rf rmh2Of)MCoOLC5W!<:Vcs474CTE+rjquH]q!!)cn!!)rs!!)ut!!)ut!!&,\!nRDOq:>NFqUPbA !2'>krs8]*rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$Rf rlkQFc2WuYm^DNi!<:;Vs3:S-P5tR]quH]q!!)cn!!)rs!!)ut!!)ut!!&,\!mUcFq9Am=qTT,+ !0[E^rs8]*rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$Rf$'PAhPEPuZ c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,OLC5W!<9Z;s1A;UGQ@^BquH]q!!)cn!!)rs!!)ut!!)ut!!&,\!k\L4q7HV+qRZiS !-eMCrs8]*rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$Rf rmh2Of)MDdOFdGurs8]*enb<:s.H"brVlitq>UEpp\t3nr;Q`srVlitrVlitOT,C5s8VlF!!)iG "/#Xe!<3!)!<<'!s8N*!rrE#t!!)or!!)or!!%TMJcD2=!nRDOrRLrKqpk`Irmh,Nec>Qt$(q:u TV):#enY9Us8LRMs.KABJ,~> rlkQFc2X!NmXbE)rs8]*c"I($s-&WHrVlitq>UEpp\t3nr;Q`srVlitrVlitOT,C,s8Vl=!!)i> "-W_K!<3!)!<<'!s8N*!rrE#t!!)or!!)or!!%TMJcD2=!mUcFrQP rjr:4]Dm-!OFdGurs8]*]3bNLs*/bgrVlitq>UEpp\t3nr;Q`srVlitrVlitOT,Bos8Vl+!!)i, "*afj!<3!)!<<'!s8N*!rrE#t!!)or!!)or!!%TMJcD2=!k\L4rOW%0qmuh.rjr43])_]>$$ZIM G^'3p]3YL-s8KV2s*4OTJ,~> rmh2Of)MDrOT#7ROFdGurs8]*enb<:s.H"brVlitq>UEpp\t3nr;Q`srVlitrVlitrr2ruPlCg9 s8VQ="/#Xe!<3!)!<<'!s8N*!rrE&urrDrr!!)or!!*#u!!%TMJcD>A!nRDOr71iJrRLrKrRM&N f%'iued7uLs.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X!\mf!4[mXbE)rs8]*c"I($s-&WHrVlitq>UEpp\t3nr;Q`srVlitrVlitrr2ruPlCg0 s8VQ4"-W_K!<3!)!<<'!s8N*!rrE&urrDrr!!)or!!*#u!!%TMJcD>A!mUcFr653ArQP rjr:4]Dm-/OT#7ROFdGurs8]*]3bNLs*/bgrVlitq>UEpp\t3nr;Q`srVlitrVlitrr2ruPlCfs s8VQ""*afj!<3!)!<<'!s8N*!rrE&urrDrr!!)or!!*#u!!%TMJcD>A!k\L4r4;q/rOW%0rOW.3 ]=S(?]*W]^s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)MDqOT,=SOFdGurs8]*enb<:s.H"brr;osr;ZZpr;ZZps8W&us8W&urVuisPQ(^8s8VQ= "eYjg!<<)u!!iN(!<3$!s8W&u!ri6#rr;lrr;Z`rJcC<$Sc8cBs8VuIr;cfIrrE)MrW)oJ$(q:u TV):#enY9Us8LRMs.KABJ,~> rlkQFc2X![mf*:\mXbE)rs8]*c"I($s-&WHrr;osr;ZZpr;ZZps8W&us8W&urVuisPQ(^/s8VQ4 "d8qM!<<)u!!iN(!<3$!s8W&u!ri6#rr;lrr;Z`rJcC<$Sc8c9s8Vu@r;cf@rrE)DrW)oA$'PAh PEPuZc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-.OT,=SOFdGurs8]*]3bNLs*/bgrr;osr;ZZpr;ZZps8W&us8W&urVuisPQ(]rs8VQ" "aC#l!<<)u!!iN(!<3$!s8W&u!ri6#rr;lrr;Z`rJcC<$Sc8c's8Vu.r;cf.rrE)2rW)o/$$ZIM G^'3p]3YL-s8KV2s*4OTJ,~> rmh2Of)MDqOT,=SOFdGurs8]*enb<:s.H"bJcFp5!nRDOn(%T6!2'=Cs+13$s8)`sf)Pd6ed7uL s.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X![mf*:\mXbE)rs8]*c"I($s-&WHJcFp5!mUcFn'(ru!0[D6s+13$s8)`sc2[h-bmBR6 s-&WHB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-.OT,=SOFdGurs8]*]3bNLs*/bgJcFp5!k\L4n%/[H!-eKps+13$s8)`s]Dqop]*W]^ s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)ME1OSo1YOT#4[OT#7ZOT#4]O8o:SOFdGurs8]*enb<:s.H"bJcFp5!nRDOn(%T6!2'=C s+13$s8)`sf)Pd6ed7uLs.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X!pmem.bmf!1dmf!4cmf!1fmJm7\mXbE)rs8]*c"I($s-&WHJcFp5!mUcFn'(ru!0[D6 s+13$s8)`sc2[h-bmBR6s-&WHB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-COSo1YOT#4[OT#7ZOT#4]O8o:SOFdGurs8]*]3bNLs*/bgJcFp5!k\L4n%/[H!-eKp s+13$s8)`s]Dqop]*W]^s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)ME2OT,=\OT,=[OT#7[OT,:`O8o9BOT#7ROFdGurs8]*enb<:s.H"bJcFp5!nRDOn(%T6 !2'=Cs+13$s8)`sf)Pd6ed7uLs.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X!qmf*:emf*:dmf!4dmf*7imJm7Tmf!4[mXbE)rs8]*c"I($s-&WHJcFp5!mUcFn'(ru !0[D6s+13$s8)`sc2[h-bmBR6s-&WHB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-DOT,=\OT,=[OT#7[OT,:`O8o9BOT#7ROFdGurs8]*]3bNLs*/bgJcFp5!k\L4n%/[H !-eKps+13$s8)`s]Dqop]*W]^s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)ME3OT,=ZOT,=\OT,=[OT,:^O8o:[OT,=SOFdGurs8]*enb<:s.H"bJcFp5!nRDOn(%T6 !2'=Cs+13$s8)`sf)Pd6ed7uLs.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X!rmf*:cmf*:emf*:dmf*7gmJm7dmf*:\mXbE)rs8]*c"I($s-&WHJcFp5!mUcFn'(ru !0[D6s+13$s8)`sc2[h-bmBR6s-&WHB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-EOT,=ZOT,=\OT,=[OT,:^O8o:[OT,=SOFdGurs8]*]3bNLs*/bgJcFp5!k\L4n%/[H !-eKps+13$s8)`s]Dqop]*W]^s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)ME3OSJnVOT,=[OT,:^O8o:[OT,=SOFdGurs8]*enb<:s.H"bJcFp5!nRDOn(%T6!2'=C s+13$s8)`sf)Pd6ed7uLs.H"bCY!8$nc/U>!WShlepm~> rlkQFc2X!rmeHk_mf*:dmf*7gmJm7dmf*:\mXbE)rs8]*c"I($s-&WHJcFp5!mUcFn'(ru!0[D6 s+13$s8)`sc2[h-bmBR6s-&WHB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-EOSJnVOT,=[OT,:^O8o:[OT,=SOFdGurs8]*]3bNLs*/bgJcFp5!k\L4n%/[H!-eKp s+13$s8)`s]Dqop]*W]^s*/bg?FsMDnc/U#!WRED]79~> rmh2Of)ME3OT,=VOT,=[OT,:^O8o:[OT,=SOFdGurs8]*enb<:s.H"bJ_&u7s8N)Mn"9^^TE'Po J_#D'q:>TGkgg'5!<8^b!,L/$!:g*gecGfDrmlT~> rlkQFc2X!rmf*:_mf*:dmf*7gmJm7dmf*:\mXbE)rs8]*c"I($s-&WHJ^*?%s8N)DmumeQP5p0Y J^&bjq9As>kfjEt!<87H!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-EOT,=VOT,=[OT,:^O8o:[OT,=SOFdGurs8]*]3bNLs*/bgJ\1'Vs8N)2ms"m6GQ<<, J\-KFq7H\,kdq.G!<7:g!+*9D!:g*g])hqVrk![~> rmh2Of)ME2OT,=\OT#7[OT,=[OT,:`O8o9BOT#7XOT,=ZOFdGurs/W)enb<:s.H$9s5X.H!!&[C s+13$s7ZHmf'&b4!<8^b!,L/$!:g*gecGfDrmlT~> rlkQFc2X!qmf*:emf!4dmf*:dmf*7imJm7Tmf!4amf*:cmXbE)rs/W)c"I($s-&Xts5X.H!!&46 s+13$s7ZHmc01>s!<87H!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-DOT,=\OT#7[OT,=[OT,:`O8o9BOT#7XOT,=ZOFdGurs/W)]3bNLs*/d>s5X.H!!%7p s+13$s7ZHm]BFJF!<7:g!+*9D!:g*g])hqVrk![~> rmh2Of)ME1OSf+YOSo.ZOSo.ZOT#4[OT#7YOT,=ZOFdGurs&Q(enb<:s.FqoJ_#D'J_%`i!<;0] #QLHi!,L/$!:g*gecGfDrmlT~> rlkQFc2X!pmed(bmem+cmem+cmf!1dmf!4bmf*:cmXbE)rs&Q(c"I($s-&#YJ^&bjJ^)*W!<;0] #QL!O!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-COSf+YOSo.ZOSo.ZOT#4[OT#7YOT,=ZOFdGurs&Q(]3bNLs*0+,J\-KFJ\/h3!<;0] #QK$n!+*9D!:g*g])hqVrk![~> rmh2Of)MDdOFdGurs&Q(enb<:s.FrCJcC<$JcDVIs.BJpCY!8$nc/U>!WShlepm~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6JcC<$JcDVIs-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pJcC<$JcDVIs*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MDdOFdGurs&Q(enb<:s.J-HrW)-]r;`ktrrD!W!!(=ErW%NLJcD>As.BJpCY!8$nc/U> !WShlepm~> rlkQFc2X!NmXbE)rs&Q(c"I($s-)4;rW)-]r;`ktrrD!W!!(=ErW%NLJcD>As-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*3;urW)-]r;`ktrrD!W!!(=ErW%NLJcD>As*+YH?FsMDnc/U# !WRED]79~> rmh2Of)MDdOFdGurs&Q(enb<:s.K5hhugMqp&Fmho)A[io)A[ih#@?Squ6Wr JcC<$V>pRg"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X!NmXbE)rs&Q(c"I($s-*<[hugMqp&Fmho)A[io)A[ih#@?Squ6Wr JcC<$V>pRZ"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*4D@hugMqp&Fmho)A[io)A[ih#@?Squ6Wr JcC<$V>pR?"9;c??3'Nqrjr42GlG=*~> rmh2Of)MDdOFdGurs8]*enb<:s.KAlrVliti;WcWqu6Wrn,E@fV>gMqo`"mkrVlitj8T)Zh#@?S qu6WrJcC<$V>pRg"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X!NmXbE)rs8]*c"I($s-*H_rVliti;WcWqu6Wrn,E@fV>gMqo`"mkrVlitj8T)Zh#@?S qu6WrJcC<$V>pRZ"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-!OFdGurs8]*]3bNLs*4PDrVliti;WcWqu6Wrn,E@fV>gMqo`"mkrVlitj8T)Zh#@?S qu6WrJcC<$V>pR?"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq6!<:Vcs474CrrE#t!!)rsrW)lrquHcsrrE&urrDrr!!)utqZ-9gqZ-Tpr;cfr rrE*!rrDZjrW!!!!<3#s!<3#u!"T#/!<3$!rr<'!!!*$!!<3#s!<3#u!!*&u!;ulr!!3*"o`"mk rVm-'s8N'!s8N'!rVuisrVult!<<#urr;iqp&G$l#lal)rr<'!!!*#urrDrr!!)utqZ,RSrr@WM JcEpns.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq6!<:;Vs3:S-rrE#t!!)rsrW)lrquHcsrrE&urrDrr!!)utqZ-9gqZ-Tpr;cfr rrE*!rrDZjrW!!!!<3#s!<3#u!"T#/!<3$!rr<'!!!*$!!<3#s!<3#u!!*&u!;ulr!!3*"o`"mk rVm-'s8N'!s8N'!rVuisrVult!<<#urr;iqp&G$l#lal)rr<'!!!*#urrDrr!!)utqZ,RSrr@WM JcEpns-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE#t!!)rsrW)lrquHcsrrE&urrDrr!!)utqZ-9gqZ-Tpr;cfr rrE*!rrDZjrW!!!!<3#s!<3#u!"T#/!<3$!rr<'!!!*$!!<3#s!<3#u!!*&u!;ulr!!3*"o`"mk rVm-'s8N'!s8N'!rVuisrVult!<<#urr;iqp&G$l#lal)rr<'!!!*#urrDrr!!)utqZ,RSrr@WM JcEpns*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq6!<:Vcs474CrrE#t!!)ut!!*#u!!*#u!!)ut!!*#u!!)ut!!)or!!)or!!)Kf !!)or!!)ut!!)ut"T\Q&s8N)lrr<&us8N*!rr<&trr<&us8N'1rr<'!!<<'!!<3$!rrE*!!<)ot !<3#u!<2uu!<2uu!<3#u!;-9k!<)ot!<3!%!<3'!rrDus!!)rsrrE&u!!)ut!!)Qhrr<<(!!*$! s8N)trr<&rrr<&rrr<&Ts8N(Ms+13ns8S_p!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0hq6!<:;Vs3:S-rrE#t!!)ut!!*#u!!*#u!!)ut!!*#u!!)ut!!)or!!)or!!)Kf !!)or!!)ut!!)ut"T\Q&s8N)lrr<&us8N*!rr<&trr<&us8N'1rr<'!!<<'!!<3$!rrE*!!<)ot !<3#u!<2uu!<2uu!<3#u!;-9k!<)ot!<3!%!<3'!rrDus!!)rsrrE&u!!)ut!!)Qhrr<<(!!*$! s8N)trr<&rrr<&rrr<&Ts8N(Ms+13ns8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE#t!!)ut!!*#u!!*#u!!)ut!!*#u!!)ut!!)or!!)or!!)Kf !!)or!!)ut!!)ut"T\Q&s8N)lrr<&us8N*!rr<&trr<&us8N'1rr<'!!<<'!!<3$!rrE*!!<)ot !<3#u!<2uu!<2uu!<3#u!;-9k!<)ot!<3!%!<3'!rrDus!!)rsrrE&u!!)ut!!)Qhrr<<(!!*$! s8N)trr<&rrr<&rrr<&Ts8N(Ms+13ns8R rmh2Of)MCos0hq6!<:Vcs474CrrE&u!!)ut!!)rs!s&B$!;ZWp!<)ot!;lcr!;lcr!:Tpf!;uis !;lcr!<3#u!:p-i!;uiu!<3&rrt5>3rrE*!!<<'!s8N*!rrE*!!;c]q!<2uu!<)p"!<<'!r;Q`s o`"mkrVlitrr;uuq#: rlkQFc2WuYs0hq6!<:;Vs3:S-rrE&u!!)ut!!)rs!s&B$!;ZWp!<)ot!;lcr!;lcr!:Tpf!;uis !;lcr!<3#u!:p-i!;uiu!<3&rrt5>3rrE*!!<<'!s8N*!rrE*!!;c]q!<2uu!<)p"!<<'!r;Q`s o`"mkrVlitrr;uuq#: rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE&u!!)ut!!)rs!s&B$!;ZWp!<)ot!;lcr!;lcr!:Tpf!;uis !;lcr!<3#u!:p-i!;uiu!<3&rrt5>3rrE*!!<<'!s8N*!rrE*!!;c]q!<2uu!<)p"!<<'!r;Q`s o`"mkrVlitrr;uuq#: rmh2Of)MCos0hq5!<:Vcs474Cs8;rqs7u`ps8;rrrr<&trr<&rrr<&rrr<&frr<&srr<&rrr<&u rr<&hrr<&qrr<&rrt5>3rrE*!!<<'!s8N*!rrE*!!<3#r!<2uu!<)p"!<<'!r;Q`so`+ghrVlit p\t3nr;Q`srVlitrVlitnc&mqs8N*!rrE*!!<)ot!;lcr!;lcr!.k0$s/#bqTEG71enY9Us8LRM s.KABJ,~> rlkQFc2WuYs0hq5!<:;Vs3:S-s8;rqs7u`ps8;rrrr<&trr<&rrr<&rrr<&frr<&srr<&rrr<&u rr<&hrr<&qrr<&rrt5>3rrE*!!<<'!s8N*!rrE*!!<3#r!<2uu!<)p"!<<'!r;Q`so`+ghrVlit p\t3nr;Q`srVlitrVlitnc&mqs8N*!rrE*!!<)ot!;lcr!;lcr!.k0$s/#bqP6:kuc"@%Hs8L7D s-*H,J,~> rjr:4]Dm,,s0hq5!<9Z;s1A;Us8;rqs7u`ps8;rrrr<&trr<&rrr<&rrr<&frr<&srr<&rrr<&u rr<&hrr<&qrr<&rrt5>3rrE*!!<<'!s8N*!rrE*!!<3#r!<2uu!<)p"!<<'!r;Q`so`+ghrVlit p\t3nr;Q`srVlitrVlitnc&mqs8N*!rrE*!!<)ot!;lcr!;lcr!.k0$s/#bqGQ\"Q]3YL-s8KV2 s*4OTJ,~> rmh2Of)MCos0hq6!<:Vcs474CrrE&u!!)ut!!)Zk!!*#u!!)ut!!)or!!)or!!)Kf!!)rs!!)or !!*#u!!)Qh!!)lq!!)or'`e:6!<<'!s8N*!rrE*!!<<'!s8N)trr<&urr<&trrW9$rrDus!!)Zk !!)ip!!)cn!!)rs!!)ut!!)ut!!)Qh#lt#*!<<'!s8N)trr<&rrr<&rrr<%Ms+13Hs8S_p!,L/$ !:g*gecGfDrmlT~> rlkQFc2WuYs0hq6!<:;Vs3:S-rrE&u!!)ut!!)Zk!!*#u!!)ut!!)or!!)or!!)Kf!!)rs!!)or !!*#u!!)Qh!!)lq!!)or'`e:6!<<'!s8N*!rrE*!!<<'!s8N)trr<&urr<&trrW9$rrDus!!)Zk !!)ip!!)cn!!)rs!!)ut!!)ut!!)Qh#lt#*!<<'!s8N)trr<&rrr<&rrr<%Ms+13Hs8S8c!,'Ph !:g*gblRj.rlor~> rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE&u!!)ut!!)Zk!!*#u!!)ut!!)or!!)or!!)Kf!!)rs!!)or !!*#u!!)Qh!!)lq!!)or'`e:6!<<'!s8N*!rrE*!!<<'!s8N)trr<&urr<&trrW9$rrDus!!)Zk !!)ip!!)cn!!)rs!!)ut!!)ut!!)Qh#lt#*!<<'!s8N)trr<&rrr<&rrr<%Ms+13Hs8R rmh2Of)MCos0hq6!<:Vcs474CrrE#t!!)ut!!)ut!s&B$!<)ot!<2uu!<3#u!;lcr!;lcr!<2uu !;$3j!;lcr!<)ot!<)ot!:^!g!<)p"!<<'!rVlitrr3T2s8N*!rrE*!!<<'!s8N*!rrE#t!!*#u !!)ut!!*#u!!*#urrD]k!!)ip!!)cn!!)rs!!)ut!!)ut!!*#u!!)]l#lt#*!<<'!s8N)us8N)r rr<&rrr<&urr<&js8N)hs8N(Ms+13ns8S_p!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0hq6!<:;Vs3:S-rrE#t!!)ut!!)ut!s&B$!<)ot!<2uu!<3#u!;lcr!;lcr!<2uu !;$3j!;lcr!<)ot!<)ot!:^!g!<)p"!<<'!rVlitrr3T2s8N*!rrE*!!<<'!s8N*!rrE#t!!*#u !!)ut!!*#u!!*#urrD]k!!)ip!!)cn!!)rs!!)ut!!)ut!!*#u!!)]l#lt#*!<<'!s8N)us8N)r rr<&rrr<&urr<&js8N)hs8N(Ms+13ns8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE#t!!)ut!!)ut!s&B$!<)ot!<2uu!<3#u!;lcr!;lcr!<2uu !;$3j!;lcr!<)ot!<)ot!:^!g!<)p"!<<'!rVlitrr3T2s8N*!rrE*!!<<'!s8N*!rrE#t!!*#u !!)ut!!*#u!!*#urrD]k!!)ip!!)cn!!)rs!!)ut!!)ut!!*#u!!)]l#lt#*!<<'!s8N)us8N)r rr<&rrr<&urr<&js8N)hs8N(Ms+13ns8R rmh2Of)MCos0hq4!<:Vcs474CrW)os!!)utr;cisquHZprW!$"!!*#uquHZprW)TjquHZpr;cfr quH rlkQFc2WuYs0hq4!<:;Vs3:S-rW)os!!)utr;cisquHZprW!$"!!*#uquHZprW)TjquHZpr;cfr quH rjr:4]Dm,,s0hq4!<9Z;s1A;UrW)os!!)utr;cisquHZprW!$"!!*#uquHZprW)TjquHZpr;cfr quH rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CmJm.cJcC<$JcC<$^&S,*"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-mJm.cJcC<$JcC<$^&S+r"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UmJm.cJcC<$JcC<$^&S+W"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474Cli-qbqu6WrJcC<$JcC<$`;fk1"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-li-qbqu6WrJcC<$JcC<$`;fk$"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uli-qbqu6WrJcC<$JcC<$`;fj^"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474Cli-qbqu6WrJcC<$JcC<$`;fk1"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-li-qbqu6WrJcC<$JcC<$`;fk$"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uli-qbqu6WrJcC<$JcC<$`;fj^"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq?!<:Vcs474C!!*$!!<3$!rr<&us8N)rrr<&ts8)eIs+13$s+13js8S_p!,L/$ !:g*gecGfDrmlT~> rlkQFc2WuYs0hq?!<:;Vs3:S-!!*$!!<3$!rr<&us8N)rrr<&ts8)eIs+13$s+13js8S8c!,'Ph !:g*gblRj.rlor~> rjr:4]Dm,,s0hq?!<9Z;s1A;U!!*$!!<3$!rr<&us8N)rrr<&ts8)eIs+13$s+13js8R rmh2Of)MCoOLC5`!<:Vcs474Crr<'!!!*$!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8S_p !,L/$!:g*gecGfDrmlT~> rlkQFc2WuYm^DNr!<:;Vs3:S-rr<'!!!*$!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8S8c !,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,OLC5`!<9Z;s1A;Urr<'!!!*$!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8R rmh2Of)MDjOT,==OT,:^O8o:-OT,:^O8o:!O:hQnenb<:s.KAls8N*!rrE*!!<)ot!;lcr!;lcr !.k0$s+13$s2=s;TEG71enY9Us8LRMs.KABJ,~> rlkQFc2X!Tmf*:Fmf*7gmJm76mf*7gmJm7*mLfO"c"I($s-*H_s8N*!rrE*!!<)ot!;lcr!;lcr !.k0$s+13$s2=s;P6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-'OT,==OT,:^O8o:-OT,:^O8o:!O:hQn]3bNLs*4PDs8N*!rrE*!!<)ot!;lcr!;lcr !.k0$s+13$s2=s;GQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MEB!<8/Z!;;NS!:c0N!;V`V!8rt=!!85^!9oRF!9oUF!!85^!8!>4!:Z'_!<:Vcs474C rrE*!!<<'!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8S_p!,L/$!:g*gecGfDrmlT~> rlkQFc2X",!<;Kc!;>j\!:fLW!;Z'_!9!;F!!;Qg!9rnO!9rqO!!;Qg!8$Z=!:]Ch!<:;Vs3:S- rrE*!!<<'!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-T!<8/Z!;;NS!:c0N!;V`V!8rt=!!85^!9oRF!9oUF!!85^!8!>4!:Z'_!<9Z;s1A;U rrE*!!<<'!s8N)trr<&rrr<&rrr<&hs7u_Hs+13$s+14)s8R rmh;Rf)MD)!!*"[rrD_RrrDSNrrCW3rr<+^!!);GrW)5Frr<+^!!(Z5rrDML&chs_C]DucT`5#l rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$JcC<$`;fk1"9<5gCB3o)rmh,MT`3Mm~> rlkZIc2X!q!!*#drrD`[rrDTWrrCXrrDNU&chsVBE-6VPQ(X_ rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$JcC<$`;fk$"9<)ZB)qK%rlkKDPQ&gW~> rjrC7]Dm,;!!*"[rrD_RrrDSNrrCW3rr<+^!!);GrW)5Frr<+^!!(Z5rrDML&chsD?N7Y;GlIdD rrE*!!<<'!rVlitqu6Wrqu6WrJcC<$JcC<$`;fj^"9;c??3'Nqrjr42GlG=*~> rmh5Pf)MD)rW)t[rrE%[r;ceXqZ-VWquH\WqZ-VWr;c_Vr;chYrVuq[rW)PO"TXXG!0E rlkTGc2X!qrW)udrrE&dr;cfaqZ-W`quH]`qZ-W`r;c`_r;cibrVurdrW)QX"T[uY!:K:S!<;Kd !;5d[!!;Qg!;>j[!!)Hd!;u6e!:K7ToBuVYkjJHNrU0Xb!:KgdrU0Ua&c^*gs3:S-rrE*!!<<'! s8N)us8N)rrr<&rrr<&urr<%Ms+13$s+13ks8S8c!,'Ph!:g*gblRj.rlor~> rjr=5]Dm,;rW)t[rrE%[r;ceXqZ-VWquH\WqZ-VWr;c_Vr;chYrVuq[rW)PO"TXXG!0E rmh5Pf)MD)rW)t[rrE(\rrE(\rrE"ZrrDeTrrE"ZrrDkVrrDtYrrE(\rrE%[rW)t[rrDGJrrE(\ rrD\Q!!*"[!!)\RrW)t[rrDAHrrDtYq>g2MrrDqXrW)t[rrE(\rrE(\#6=eTC]DucT`+orrr<'! !!*'!rW!$"!!*#uquHZprW%NLJcC<$JcEdjs.BJpCY!8$nc/U>!WShlepm~> rlkTGc2X!qrW)udrrE)errE)errE#crrDf]rrE#crrDl_rrDubrrE)errE&drW)udrrDHSrrE)e rrD]Z!!*#d!!)][rW)udrrDBQrrDubq>g3VrrDrarW)udrrE)errE)e#6=eKBE-6VPPtOerr<'! !!*'!rW!$"!!*#uquHZprW%NLJcC<$JcEdjs-!QcB?b&hnc/U5!WSA_c%#~> rjr=5]Dm,;rW)t[rrE(\rrE(\rrE"ZrrDeTrrE"ZrrDkVrrDtYrrE(\rrE%[rW)t[rrDGJrrE(\ rrD\Q!!*"[!!)\RrW)t[rrDAHrrDtYq>g2MrrDqXrW)t[rrE(\rrE(\#6=e9?N7Y;Gl@[Jrr<'! !!*'!rW!$"!!*#uquHZprW%NLJcC<$JcEdjs*+YH?FsMDnc/U#!WRED]79~> rmhSZf)MD)!!&)\O8o9B!!)tZrrE%[rrDeTrrE"ZrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDDIr;bi= rrE%[rrCH.rrDqXrrE%[rr<+^!!*"[#6=eTC]DucTRm,os+13$s/,hrTEG71enY9Us8LRMs.KAB J,~> rlkrQc2X!q!!)EemJm7T!!)ucrrE&drrDf]rrE#crrDl_rrE#crrE#crrE)errE&drrDERr;bjF rrE&drrCI7rrDrarrE&drr<,g!!*#d#6=eKBE-6VPC`abs+13$s/,hrP6:kuc"@%Hs8L7Ds-*H, J,~> rjr[?]Dm,;!!&)\O8o9B!!)tZrrE%[rrDeTrrE"ZrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDDIr;bi= rrE%[rrCH.rrDqXrrE%[rr<+^!!*"[#6=e9?N7Y;G_,mGs+13$s/,hrGQ\"Q]3YL-s8KV2s*4OT J,~> rmhSZf)MD)!!&)\O8o9B!!)tZrrE%[rrDqXqZ-SVrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDAHrrD#> rrE%[rrCH.rrDqXrrE%[rr<+^!!*"[#6=eTC]DucTRm,os+13$s/,hrTEG71enY9Us8LRMs.KAB J,~> rlkrQc2X!q!!)EemJm7T!!)ucrrE&drrDraqZ-T_rrDl_rrE#crrE#crrE)errE&drrDBQrrD$G rrE&drrCI7rrDrarrE&drr<,g!!*#d#6=eKBE-6VPC`abs+13$s/,hrP6:kuc"@%Hs8L7Ds-*H, J,~> rjr[?]Dm,;!!&)\O8o9B!!)tZrrE%[rrDqXqZ-SVrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDAHrrD#> rrE%[rrCH.rrDqXrrE%[rr<+^!!*"[#6=e9?N7Y;G_,mGs+13$s/,hrGQ\"Q]3YL-s8KV2s*4OT J,~> rmhATf)MD)!!&+BrW!"]!!)tZrrE%[rrDtYrrE%[rrE"ZrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDDI r;bi=rrE%[rrD,Aq>g2MrrDqXrrE%[rr<+^!!*"[#6=eTC]DucTRm,os+13$s/,hrTEG71enY9U s8LRMs.KABJ,~> rlk`Kc2X!q!!)HTrW!#f!!)ucrrE&drrDubrrE&drrE#crrDl_rrE#crrE#crrE)errE&drrDER r;bjFrrE&drrD-Jq>g3VrrDrarrE&drr<,g!!*#d#6=eKBE-6VPC`abs+13$s/,hrP6:kuc"@%H s8L7Ds-*H,J,~> rjrI9]Dm,;!!&+BrW!"]!!)tZrrE%[rrDtYrrE%[rrE"ZrrDkVrrE"ZrrE"ZrrE(\rrE%[rrDDI r;bi=rrE%[rrD,Aq>g2MrrDqXrrE%[rr<+^!!*"[#6=e9?N7Y;G_,mGs+13$s/,hrGQ\"Q]3YL- s8KV2s*4OTJ,~> rmhATf)MD)!!&+BrW)t[rrE(\rrE"ZrrE(\rr<1`!!&+BrW)nYrrE(\rrE"ZrrDtYrrE(\rrE%[ rrE%[rrDGJrrE(\rrD)@rrE%[rrDAHrrD;FrrDqXrW)t[rrE(\rrE(\#6=eTC]DucTRm,os+13$ s/,hrTEG71enY9Us8LRMs.KABJ,~> rlk`Kc2X!q!!)HTrW)udrrE)errE#crrE)err<2i!!)HTrW)obrrE)errE#crrDubrrE)errE&d rrE&drrDHSrrE)errD*IrrE&drrDBQrrD rjrI9]Dm,;!!&+BrW)t[rrE(\rrE"ZrrE(\rr<1`!!&+BrW)nYrrE(\rrE"ZrrDtYrrE(\rrE%[ rrE%[rrDGJrrE(\rrD)@rrE%[rrDAHrrD;FrrDqXrW)t[rrE(\rrE(\#6=e9?N7Y;G_,mGs+13$ s/,hrGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MEA!<8/\! rlkQFc2X"+!<;Ke!<2Eb!;l3_!<2Ec!!)Hd!<)?a!<2E`!<)?a!<2Eb!!)Hc!:T@T!<;Kd!9NYI !!)Hc!:9.R!9ikN!;u9b!!)Hd!<)?a!!`JJBE-6VPPkF\!;c`o!;uis!8%8M!9X:]!71ZF!.k0$ s+13\s8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-S!<8/\! rmh2Of)MCoONN[b!;qrY!;;KZ!<:Vcs474CrVlitqYpNqq#: rlkQFc2WuYm`Ott!;u9b!;>gc!<:;Vs3:S-rVlitqYpNqq#: rjr:4]Dm,,ONN[b!;qrY!;;KZ!<9Z;s1A;UrVlitqYpNqq#: rmh2Of)MCoONEUa!<&#Z!;;KZ!<:Vcs474CrVlitqYpNqbPqPBqu6Wrmf*7efDbgNq#: rlkQFc2WuYm`Fns!<)?c!;>gc!<:;Vs3:S-rVlitqYpNqbPqPBqu6Wrmf*7efDbgNq#: rjr:4]Dm,,ONEUa!<&#Z!;;KZ!<9Z;s1A;UrVlitqYpNqbPqPBqu6Wrmf*7efDbgNq#: rmh2Of)MCoOMm7Z!;DQ[!<:Vcs474Crr2ruqu?NnrVuisrVuis!<<#upAb-m#lal)rr<'!!!*#u rrDrr!!)utqZ-3e!!)rsrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-Km!!)QhrrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ-3err@WMJcEUes.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYm_nPl!;Gmd!<:;Vs3:S-rr2ruqu?NnrVuisrVuis!<<#upAb-m#lal)rr<'!!!*#u rrDrr!!)utqZ-3e!!)rsrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-Km!!)QhrrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ-3err@WMJcEUes-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OMm7Z!;DQ[!<9Z;s1A;Urr2ruqu?NnrVuisrVuis!<<#upAb-m#lal)rr<'!!!*#u rrDrr!!)utqZ-3e!!)rsrr<'!rW)iqrrE#trr<'!rW)lrr;cisqZ-Km!!)QhrrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ-3err@WMJcEUes*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCoOLC5U!<:Vcs474Crr2ruq>UEpq#: rlkQFc2WuYm^DNg!<:;Vs3:S-rr2ruq>UEpq#: rjr:4]Dm,,OLC5U!<9Z;s1A;Urr2ruq>UEpq#: rmh2Of)MCos0hq4!<:Vcs474Crr2ruq>UEpq#: rlkQFc2WuYs0hq4!<:;Vs3:S-rr2ruq>UEpq#: rjr:4]Dm,,s0hq4!<9Z;s1A;Urr2ruq>UEpq#: rmh2Of)MCos0hq4!<:Vcs474Crr2ruq>UEpq#: rlkQFc2WuYs0hq4!<:;Vs3:S-rr2ruq>UEpq#: rjr:4]Dm,,s0hq4!<9Z;s1A;Urr2ruq>UEpq#: rmh2Of)MCos0hq4!<:Vcs474Crr2ruq>UEpq#: rlkQFc2WuYs0hq4!<:;Vs3:S-rr2ruq>UEpq#: rjr:4]Dm,,s0hq4!<9Z;s1A;Urr2ruq>UEpq#: rmh2Of)MCos0hq4!<:Vcs474Crr2ruq>UEpq#: rlkQFc2WuYs0hq4!<:;Vs3:S-rr2ruq>UEpq#: rjr:4]Dm,,s0hq4!<9Z;s1A;Urr2ruq>UEpq#: rmh2Of)MCos0hq4!<:Vcs474CrVlitr;ZZpr;ZZps8W&u!<<#upAb*l#QFc(rr<'!s8E!"rr<&u s82lps8E#grr<&ts8E#us8E#ss8N)ts8E#us8E#ts8;ourrE#trW)iq!!)NgrrD]krW)uurW)rt r;Zlu!<)rs!8@JP!<<)u!<3#s!!3*"rVuiso)J^iJcC<$_Z0Y/"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-rVlitr;ZZpr;ZZps8W&u!<<#upAb*l#QFc(rr<'!s8E!"rr<&u s82lps8E#grr<&ts8E#us8E#ss8N)ts8E#us8E#ts8;ourrE#trW)iq!!)NgrrD]krW)uurW)rt r;Zlu!<)rs!8@JP!<<)u!<3#s!!3*"rVuiso)J^iJcC<$_Z0Y""9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UrVlitr;ZZpr;ZZps8W&u!<<#upAb*l#QFc(rr<'!s8E!"rr<&u s82lps8E#grr<&ts8E#us8E#ss8N)ts8E#us8E#ts8;ourrE#trW)iq!!)NgrrD]krW)uurW)rt r;Zlu!<)rs!8@JP!<<)u!<3#s!!3*"rVuiso)J^iJcC<$_Z0X\"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CrVlitW;chtc2RbDJcC<$JcE=]s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-rVlitW;chtc2RbDJcC<$JcE=]s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;UrVlitW;chtc2RbDJcC<$JcE=]s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq4!<:Vcs474Cr;Q`sW;chtci3tFJcC<$JcE:\s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-r;Q`sW;chtci3tFJcC<$JcE:\s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;Ur;Q`sW;chtci3tFJcC<$JcE:\s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CirArWJcG6>rrCLI!!)Zkrr@WMJcD5>s.BJpCY!8$nc/U>!WShl epm~> rlkQFc2WuYs0hq4!<:;Vs3:S-irArWJcG6>rrCLI!!)Zkrr@WMJcD5>s-!QcB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;UirArWJcG6>rrCLI!!)Zkrr@WMJcD5>s*+YH?FsMDnc/U#!WRED ]79~> rmh2Of)MCos0hq4!<:Vcs474Cj8T)Z]Dhj2_uB]:nc&Rhdf0:Iqu6WrqYpNqJcC<$S,`M]"9<5g CB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-j8T)Z]Dhj2_uB]:nc&Rhdf0:Iqu6WrqYpNqJcC<$S,`MP"9<)Z B)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uj8T)Z]Dhj2_uB]:nc&Rhdf0:Iqu6WrqYpNqJcC<$S,`M5"9;c? ?3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474Cj8T)Z]Dhj2kl:Y_hZ!QUnc&RhbPqPBqYpNqJcC<$S,`M]"9<5g CB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-j8T)Z]Dhj2kl:Y_hZ!QUnc&RhbPqPBqYpNqJcC<$S,`MP"9<)Z B)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uj8T)Z]Dhj2kl:Y_hZ!QUnc&RhbPqPBqYpNqJcC<$S,`M5"9;c? ?3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474Cjo>2Xs8W*!rr;uus8W*!!<<#up&G$l"oeQ&rr<&ts8N)ts8N'! s8E#rs8;rss8)f\s8N)ks8N'&rr<'!!!*#ur;cisqZ-NnrW!-%!<<'!s8E#ks8N'!s8E#ks8E#t s8E#us8E#ss8)fprrE-"rW%NLJcDABs.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-jo>2Xs8W*!rr;uus8W*!!<<#up&G$l"oeQ&rr<&ts8N)ts8N'! s8E#rs8;rss8)f\s8N)ks8N'&rr<'!!!*#ur;cisqZ-NnrW!-%!<<'!s8E#ks8N'!s8E#ks8E#t s8E#us8E#ss8)fprrE-"rW%NLJcDABs-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;Ujo>2Xs8W*!rr;uus8W*!!<<#up&G$l"oeQ&rr<&ts8N)ts8N'! s8E#rs8;rss8)f\s8N)ks8N'&rr<'!!!*#ur;cisqZ-NnrW!-%!<<'!s8E#ks8N'!s8E#ks8E#t s8E#us8E#ss8)fprrE-"rW%NLJcDABs*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq4!<:Vcs474Cj8T)Zqu6WrrVlitrr;uurr2rup&G$l"TJH%rrE#trrDusrrE&u !!*#u!!)ut!!)ut!!)Qhq>gEmrrD`lrr<<(!!*$!s8N)trr<&trr<&prr<&us8N*!s8N)urr<&l s8N)urr<&lrr<&srr<&srr<&rrr<&qs8N)urr<%Ms+13Cs8S_p!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0hq4!<:;Vs3:S-j8T)Zqu6WrrVlitrr;uurr2rup&G$l"TJH%rrE#trrDusrrE&u !!*#u!!)ut!!)ut!!)Qhq>gEmrrD`lrr<<(!!*$!s8N)trr<&trr<&prr<&us8N*!s8N)urr<&l s8N)urr<&lrr<&srr<&srr<&rrr<&qs8N)urr<%Ms+13Cs8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uj8T)Zqu6WrrVlitrr;uurr2rup&G$l"TJH%rrE#trrDusrrE&u !!*#u!!)ut!!)ut!!)Qhq>gEmrrD`lrr<<(!!*$!s8N)trr<&trr<&prr<&us8N*!s8N)urr<&l s8N)urr<&lrr<&srr<&srr<&rrr<&qs8N)urr<%Ms+13Cs8R rmh2Of)MCos0hq4!<:Vcs474Cj8T)Zqu6WrrVlitrr2rurVlitp&>3rs8N*!rrDcm!!)ut!!)ip !!)ut!!)$Y!!)`m"p"]'!<<'!qYpNqrVlitqYpNqr;Qj!s8N)trr<&lrr<&trr<&lrrrK'rrE*! !;uis!;lcr!;c]q!<)ot!.k0$s.KDlTEG71enY9Us8LRMs.KABJ,~> rlkQFc2WuYs0hq4!<:;Vs3:S-j8T)Zqu6WrrVlitrr2rurVlitp&>3rs8N*!rrDcm!!)ut!!)ip !!)ut!!)$Y!!)`m"p"]'!<<'!qYpNqrVlitqYpNqr;Qj!s8N)trr<&lrr<&trr<&lrrrK'rrE*! !;uis!;lcr!;c]q!<)ot!.k0$s.KDlP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,s0hq4!<9Z;s1A;Uj8T)Zqu6WrrVlitrr2rurVlitp&>3rs8N*!rrDcm!!)ut!!)ip !!)ut!!)$Y!!)`m"p"]'!<<'!qYpNqrVlitqYpNqr;Qj!s8N)trr<&lrr<&trr<&lrrrK'rrE*! !;uis!;lcr!;c]q!<)ot!.k0$s.KDlGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MCoOR\FPrs&Q(enb<:s.JEP!!)or!!)ut!!*#u!!)ut!!)]l"p"]'!<<'!pAY*mrVlit rVucqrVlitnc/Fcqu?Zrp&>3rs8N*!rrE&uquH]q!!)lq!!)ip!!)ut!!)]l!!)ut!!)]l"p"]' !<<'!r;Q`squ6WrqYpNqrVlitJcC<$T`>%b"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYmd]_brs&Q(c"I($s-)LC!!)or!!)ut!!*#u!!)ut!!)]l"p"]'!<<'!pAY*mrVlit rVucqrVlitnc/Fcqu?Zrp&>3rs8N*!rrE&uquH]q!!)lq!!)ip!!)ut!!)]l!!)ut!!)]l"p"]' !<<'!r;Q`squ6WrqYpNqrVlitJcC<$T`>%U"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,OR\FPrs&Q(]3bNLs*3T(!!)or!!)ut!!*#u!!)ut!!)]l"p"]'!<<'!pAY*mrVlit rVucqrVlitnc/Fcqu?Zrp&>3rs8N*!rrE&uquH]q!!)lq!!)ip!!)ut!!)]l!!)ut!!)]l"p"]' !<<'!r;Q`squ6WrqYpNqrVlitJcC<$T`>%:"9;c??3'Nqrjr42GlG=*~> rmh2Of)ME rlkQFc2X"&mf#Y+meuRnrs&Q(c"I($s-)LC!!)or!!)ut!!*#u!!)ut!!)]l"p"]'!<<'!pAY*m rVlitrr2rurVlitrVlitk5YG]oD]*ss8N*!rrE*!!<)ot!<)ot!;c]q!;ZWp!<)ot!;6?l!<)ot !;-9o!<3'!!;lcr!;lcr!;c]q!<)ot!.k0$s.KDlP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-NOT#62OSt9\rs&Q(]3bNLs*3T(!!)or!!)ut!!*#u!!)ut!!)]l"p"]'!<<'!pAY*m rVlitrr2rurVlitrVlitk5YG]oD]*ss8N*!rrE*!!<)ot!<)ot!;c]q!;ZWp!<)ot!;6?l!<)ot !;-9o!<3'!!;lcr!;lcr!;c]q!<)ot!.k0$s.KDlGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)ME=s8S2\s2C)qs4WS7s6>]crs&Q(enb<:s.JEP!!)or!!*#urrE&u!!)ut!!)]l"p"]' !<<'!rVultr;Q`srVlitrr2rurVlitrVlitrr2rum/R(cnc&mqs8N*!rrE*!!<)ot!<)ot!<2uu !<)ot!<)p"!<<'!rVlitp&>!lrVlito`+pk!ri6#qu6Wrqu6Wrrr2rurr2rurVlitJcC<$T`>%b "9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X"'7K;fU7E+]j7G@207I'<\rs&Q(c"I($s-)LC!!)or!!*#urrE&u!!)ut!!)]l"p"]' !<<'!rVultr;Q`srVlitrr2rurVlitrVlitrr2rum/R(cnc&mqs8N*!rrE*!!<)ot!<)ot!<2uu !<)ot!<)p"!<<'!rVlitp&>!lrVlito`+pk!ri6#qu6Wrqu6Wrrr2rurr2rurVlitJcC<$T`>%U "9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-O!<8/\!6(&q!8!lrVlito`+pk!ri6#qu6Wrqu6Wrrr2rurr2rurVlitJcC<$T`>%: "9;c??3'Nqrjr42GlG=*~> rmhV[f)MECs,d9\OT5?BOT5@!OT5@[OT5@7OT5@GON.;*!<:Vcs474Cjo>5Yr;Z`r"9/?$s8E#u s8E#ms8E!%rr<'!!!*#urrE#trW)uurW)rtr;Zlu!<)rs!8@JP!!iN(!<3$!s8W#t!WN/us8E#q s8;rts8E#us8E#ms8E#us8E#krr<&urr<&ts82lps8E#ts8E#us8E"Ls+13Ds8S_p!,L/$!:g*g ecGfDrmlT~> rlkuRc2Wtr7IET5mR9S4mR9R_mf,`Tmf,`0mf,`@m`,83!<:;Vs3:S-jo>5Yr;Z`r"9/?$s8E#u s8E#ms8E!%rr<'!!!*#urrE#trW)uurW)rtr;Zlu!<)rs!8@JP!!iN(!<3$!s8W#t!WN/us8E#q s8;rts8E#us8E#ms8E#us8E#krr<&urr<&ts82lps8E#ts8E#us8E"Ls+13Ds8S8c!,'Ph!:g*g blRj.rlor~> rjr^@]Dm*U!0@0\O8o9BO8o:!OT,=[OT,=7OT,=GON.;*!<9Z;s1A;Ujo>5Yr;Z`r"9/?$s8E#u s8E#ms8E!%rr<'!!!*#urrE#trW)uurW)rtr;Zlu!<)rs!8@JP!!iN(!<3$!s8W#t!WN/us8E#q s8;rts8E#us8E#ms8E#us8E#krr<&urr<&ts82lps8E#ts8E#us8E"Ls+13Ds8R rmhY\f)MECs,d9\OT5?BOT5?Brr;qYrVueWrVuhXrr2t[rr;tZrVukYs8N(\r;ZJPs8W([s8W([ rVuhXrr2t[rr3(^s8S2Ys8S2Ys8A&Vs7;?P!<&#!rs&Q(enb<:s.FrCJcC<$JcDVIs.BJpCY!8$ nc/U>!WShlepm~> rll#Sc2Wtr7IET5mR9S4mR9S4r^HeRrC-YPrC-\Qr^?hTr^HhSrC-_Rs$ZqUr'g>Is$cqTs$cqT rC-\Qr^?hTr^?qW77HYB7K;fR7K)ZO7J#sI!<)?*rs&Q(c"I($s-&$6JcC<$JcDVIs-!QcB?b&h nc/U5!WSA_c%#~> rjraA]Dm*U!0@0\O8o9BO8o9BrW)nYr;cbWr;ceXrVuq[rW)qZr;chYrr<%\quHGPrrE%[rrE%[ r;ceXrVuq[rW!%^!!&,Y!<8/Y!<&#V!:u rmh2Of)MEBOT5@YOT,:[OT5@\OT5@\OT5@[OT5@\OT5@ZOT,:XOT5@\OT5@[OT5=aOT5?Bs8VhT s8N+]s87uYs8S2\s8A&Ys87uYrrnMas,d9\r/gsYrK.'Zn rlkQFc2X",mf,`Rmf#ZTmf,`Umf,`Umf,`Tmf,`Umf,`Smf#ZQmf,`Umf,`Tmf,]ZmR9S477Ht= s$ZtV7JuTR7K;fU7K)ZR7JuTR70W*J7IET5r9jSRrU0\SnF$;VrU.]*#6=eKBE-6VPC`abs+13$ s/,hrP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-TOT,=YOT#7[OT,=\OT,=\OT,=[OT,=\OT,=ZOT#7XOT,=\OT,=[OT,:aO8o9B!!)bT rr<(]!;qrY!<8/\!<&#Y!;qrY!!SGa!0@0\r/gpYrK.$Zn rmh2Of)MEBOT5@YOT5@[OT5=^OT5@ZOT5=^OT5@ZOT5@[OT5@YOT5@ZOT5@\OT5=aOT5?Bs8VhT rVuhXs8W%Zs8W([s8VtXs8N7as8S2\s87uYs8A&Zs6>]crs&Q(enb<:s.FrCJcC<$JcDVIs.BJp CY!8$nc/U>!WShlepm~> rlkQFc2X",mf,`Rmf,`Tmf,]WmR9SCmf,]WmR9SCmf,`Tmf,`Rmf,`Smf,`Umf,]ZmR9S477Ht= rC-\Qs$cnSs$cqTs$chQs$[+Z77HW57JuTR7K)ZS7I'<\rs&Q(c"I($s-&$6JcC<$JcDVIs-!Qc B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-TOT,=YOT,=[OT,:^O8o:ZOT,:^O8o:ZOT,=[OT,=YOT,=ZOT,=\OT,:aO8o9B!!)bT r;ceXrrE"ZrrE%[rrDqXrr<4a!!&)\!;qrY!<&#Z!:#Zcrs&Q(]3bNLs*0+pJcC<$JcDVIs*+YH ?FsMDnc/U#!WRED]79~> rmh2Of)MEBOT5@YOT5@[OT5=\OSSnXOT5@ZOT5@[OT5@YOSSqVOT5=aOT5?Bs8VhTs8N+]s8A&T s8J,[s8.oXrs"Sbs,d9\OSf(VOT5@GON.;*!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,M T`3Mm~> rlkQFc2X",mf,`Rmf,`Tmf,]UmeK9QmR9SCmf,`Tmf,`RmeK rjr:4]Dm-TOT,=YOT,=[OT,:\OSJkXO8o:ZOT,=[OT,=YOSJnVOT,:aO8o9B!!)bTrr<(]!<&#T ! rmh2Of)MEBOT5@YOT5@[OT5=^OT5@WOT5@ZOT5@[OT5@YOT5@VOT5=aOT5?Bs8VhTs8VtXs8VkU s8VtXs8N1_s8S2Zs8J,[s8A&Zs6>]crs&Q(enb<:s.FrCYQ+V&JcC<$JcGTHs.BJpCY!8$nc/U> !WShlepm~> rlkQFc2X",mf,`Rmf,`Tmf,]WmR9S@mf,`Smf,`Tmf,`Rmf,`Omf,]ZmR9S477Ht=s$chQs$c_N s$chQs$[%X77HYC7K2`T7K)ZS7I'<\rs&Q(c"I($s-&$6YQ+V&JcC<$JcGTHs-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm-TOT,=YOT,=[OT,:^O8o:WOT,=ZOT,=[OT,=YOT,=VOT,:aO8o9B!!)bTrrDqXrrDhU rrDqXrr<._!!&,Z! rmh2Of)MEBOT5@YOT5@[OT5@\OT5@\OT,:[OT5@\OT5@ZOT5@XOT5@\OT,:[OT5=aOT5?Bs8VhT s8VqWs8W+\rr;tZs8VtXs8N1_s8S2Zs8S2[s8A&Zs8S2\s7DEQ!<&#!rs&Q(enb<:s.FrCmf*7e nc/Off`)$Rs8N(Ms+13$s82irTEG71enY9Us8LRMs.KABJ,~> rlkQFc2X",mf,`Rmf,`Tmf,`Umf,`Umf#ZTmf,`Umf,`Smf,`Qmf,`Umf#ZTmf,]ZmR9S477Ht= s$cePs$ctUr^HhSs$chQs$[%X77HYC7K;fT7K)ZS7K;fU7J-$J!<)?*rs&Q(c"I($s-&$6mf*7e nc/Off`)$Rs8N(Ms+13$s82irP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-TOT,=YOT,=[OT,=\OT,=\OT#7[OT,=\OT,=ZOT,=XOT,=\OT#7[OT,:aO8o9B!!)bT rrDnWrrE(\rW)qZrrDqXrr<._!!&,Z!<8/[!<&#Z!<8/\!;)BQ!<&#!rs&Q(]3bNLs*0+pmf*7e nc/Off`)$Rs8N(Ms+13$s82irGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh8Qf)MD)OT#4YOT#1ZOT#4ZOSo.WOT#4XOSf(VOSo.YOT,7`OT5?Bs8VkUr;ZYUr;ZbXqu?\X rr31as8S2\s,d6[!0I3[rK.!Xo8rtPrK,&!#6=eTC]DucTRm.2rr<&irr<&trr<&cs8N)hrr<&t rr<%Ms+13$s8;osTEG71enY9Us8LRMs.KABJ,~> rlkWHc2X!qmeoTRmeoQSmeoTSmefNPmeoTQme]HOmefNRmf#WYmR9S477I">r'gMNr'gVQqaLPQ r^@%Z77HW57IL.D!:KhTrU0VQoBuVYrU.]*#6=eKBE-6VPC`c%rr<&irr<&trr<&cs8N)hrr<&t rr<%Ms+13$s8;osP6:kuc"@%Hs8L7Ds-*H,J,~> rjr@6]Dm,;OSo1YOSo.ZOSo1ZOSf+WOSo1XOS]%VOSf+YOT#4`O8o9B!!)eUquHVUquH_XqZ-YX rW!.a!!&)\!0I0[!0I0[rK-sXo8rtPrK,&!#6=e9?N7Y;G_,n_rr<&irr<&trr<&cs8N)hrr<&t rr<%Ms+13$s8;osGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MCoOR\FPrs&Q(enb<:s.FrCmf*7eoD\djqu6Wrli6tboD\djrVlitJcC<$JcGZJs.BJp CY!8$nc/U>!WShlepm~> rlkQFc2WuYmd]_brs&Q(c"I($s-&$6mf*7eoD\djqu6Wrli6tboD\djrVlitJcC<$JcGZJs-!Qc B?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OR\FPrs&Q(]3bNLs*0+pmf*7eoD\djqu6Wrli6tboD\djrVlitJcC<$JcGZJs*+YH ?FsMDnc/U#!WRED]79~> rmh2Of)MCoOR\FPrs&Q(enb<:s.FrCmf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYmd]_brs&Q(c"I($s-&$6mf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OR\FPrs&Q(]3bNLs*0+pmf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCoOR\FPrs&Q(enb<:s.FrCmf*7eoD\djqu6WrkPkM^pAY*mrVlitJcC<$JcGZJs.BJp CY!8$nc/U>!WShlepm~> rlkQFc2WuYmd]_brs&Q(c"I($s-&$6mf*7eoD\djqu6WrkPkM^pAY*mrVlitJcC<$JcGZJs-!Qc B?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OR\FPrs&Q(]3bNLs*0+pmf*7eoD\djqu6WrkPkM^pAY*mrVlitJcC<$JcGZJs*+YH ?FsMDnc/U#!WRED]79~> rmh2Of)MCoOR\FPrs&Q(enb<:s.FrCmf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYmd]_brs&Q(c"I($s-&$6mf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,OR\FPrs&Q(]3bNLs*0+pmf*7eoD\djqu6WrpAashqu?Zrp&>!lrVlitJcC<$JcGZJ s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCoOLC5U!<:Vcs474CJcG0 rlkQFc2WuYm^DNg!<:;Vs3:S-JcG0 rjr:4]Dm,,OLC5U!<9Z;s1A;UJcG0 rmh2Of)MEAOSo1?OSo1WOSo0>OT#7:O9Ydcenb<:s.FrCmf*7eo)A[irVlitm/R(cnG`Rjs8N(M s+13$s82irTEG71enY9Us8LRMs.KABJ,~> rlkQFc2X"+mem.Hmem.`mem-Gmf!4CmKWalc"I($s-&$6mf*7eo)A[irVlitm/R(cnG`Rjs8N(M s+13$s82irP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-SOSo1?OSo1WOSo0>OT#7:O9Ydc]3bNLs*0+pmf*7eo)A[irVlitm/R(cnG`Rjs8N(M s+13$s82irGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MEBOT,=!WShlepm~> rlkQFc2X",mf*:Emf*:`mf*9Jmf*7gmJm7EmKWalc"I($s-&$6mf*7enc/OffDkjNJcC<$JcGTH s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-TOT,= rmh2Of)MEBOT,=!WShlepm~> rlkQFc2X",mf*:Emf*:`mf*9Tmf!4`mf*:dmf*:Smf!4\mKWalc"I($s-&$6mf*7eJcC<$JcE=] s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-TOT,= rmh5Pf)MD)qZ-SVr;ceXrVuq[rW)t[quHVUrrDnWrrD\QrW)t[rW)\SrW)qZrW)\SqZ->OrVuq[ rW)>IrW)t[rW)nYrW)nYrrE%[rrDAHrW)bU#6=eTC]DucTRm.2rr<%Ms+13$s1871TEG71enY9U s8LRMs.KABJ,~> rlkTGc2X!qqZ-T_r;cfarVurdrW)udquHW^rrDo`rrD]ZrW)udrW)]\rW)rcrW)]\qZ-?XrVurd rW)?RrW)udrW)obrW)obrrE&drrDBQrW)c^#6=eKBE-6VPC`c%rr<%Ms+13$s1871P6:kuc"@%H s8L7Ds-*H,J,~> rjr=5]Dm,;qZ-SVr;ceXrVuq[rW)t[quHVUrrDnWrrD\QrW)t[rW)\SrW)qZrW)\SqZ->OrVuq[ rW)>IrW)t[rW)nYrW)nYrrE%[rrDAHrW)bU#6=e9?N7Y;G_,n_rr<%Ms+13$s1871GQ\"Q]3YL- s8KV2s*4OTJ,~> rmh2Of)MEBOT,=YOT,=\OT,=ZOT#7TOT,=YOT,=WOT,=POT,=\OT,=ROT,=[OT,=SO8o:[OT,=R OT#7[OT,=JOT,=\OT,=WOT#7[OT,=[OT,=FOT#7WO9Ydcenb<:s.FrCJcC<$JcDVIs.BJpCY!8$ nc/U>!WShlepm~> rlkQFc2X",mf*:bmf*:emf*:cmf!4]mf*:bmf*:`mf*:Ymf*:emf*:[mf*:dmf*:\mJm7dmf*:[ mf!4dmf*:Smf*:emf*:`mf!4dmf*:dmf*:Omf!4`mKWalc"I($s-&$6JcC<$JcDVIs-!QcB?b&h nc/U5!WSA_c%#~> rjr:4]Dm-TOT,=YOT,=\OT,=ZOT#7TOT,=YOT,=WOT,=POT,=\OT,=ROT,=[OT,=SO8o:[OT,=R OT#7[OT,=JOT,=\OT,=WOT#7[OT,=[OT,=FOT#7WO9Ydc]3bNLs*0+pJcC<$JcDVIs*+YH?FsMD nc/U#!WRED]79~> rmh2Of)MEBOT,=ZOT,=ZOT,=[OT,=TOT,=YOT,=WOT,=OOSo1OOT,=[OT,=POT,=QOT,=[OT,=I OSo1ROT,:^O8o:[OT,=SOS]%QOT,=YO9Ydcenb<:s.FrCJcC<$JcDVIs.BJpCY!8$nc/U>!WShl epm~> rlkQFc2X",mf*:cmf*:cmf*:dmf*:]mf*:bmf*:`mf*:Xmem.Xmf*:dmf*:Ymf*:Zmf*:dmf*:R mem.[mf*7gmJm7dmf*:\me["Zmf*:bmKWalc"I($s-&$6JcC<$JcDVIs-!QcB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm-TOT,=ZOT,=ZOT,=[OT,=TOT,=YOT,=WOT,=OOSo1OOT,=[OT,=POT,=QOT,=[OT,=I OSo1ROT,:^O8o:[OT,=SOS]%QOT,=YO9Ydc]3bNLs*0+pJcC<$JcDVIs*+YH?FsMDnc/U#!WRED ]79~> rmh2Of)MEBOT,=ZOT,=ZOT,=[OT,=XOS]%UOT,=WOT,=NOT,=OOT,:^O8o:POT,=POT,=[OT,=H OT,=UOT#7[OT,=[OT,=FOT#7WO9Ydcenb<:s.FrCJcFs6rW%NLJcEUes.BJpCY!8$nc/U>!WShl epm~> rlkQFc2X",mf*:cmf*:cmf*:dmf*:ame["^mf*:`mf*:Wmf*:Xmf*7gmJm7Ymf*:Ymf*:dmf*:Q mf*:^mf!4dmf*:dmf*:Omf!4`mKWalc"I($s-&$6JcFs6rW%NLJcEUes-!QcB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm-TOT,=ZOT,=ZOT,=[OT,=XOS]%UOT,=WOT,=NOT,=OOT,:^O8o:POT,=POT,=[OT,=H OT,=UOT#7[OT,=[OT,=FOT#7WO9Ydc]3bNLs*0+pJcFs6rW%NLJcEUes*+YH?FsMDnc/U#!WRED ]79~> rmh2Of)MEBOT,=ZOT,=ZOT,=[OT,=YOT,=[OT,=YOT,=WOT,=OOSo1NOT,:^O8o:QOT,=OOT,=[ OT,=IOSo1VOT#7YOT,=[OT,=HOT#7UO9Ydcenb<:s.FrCmf*7enc/Rg!WN.\rr<&rrr<%Ms+13l s8S_p!,L/$!:g*gecGfDrmlT~> rlkQFc2X",mf*:cmf*:cmf*:dmf*:bmf*:dmf*:bmf*:`mf*:Xmem.Wmf*7gmJm7Zmf*:Xmf*:d mf*:Rmem._mf!4bmf*:dmf*:Qmf!4^mKWalc"I($s-&$6mf*7enc/Rg!WN.\rr<&rrr<%Ms+13l s8S8c!,'Ph!:g*gblRj.rlor~> rjr:4]Dm-TOT,=ZOT,=ZOT,=[OT,=YOT,=[OT,=YOT,=WOT,=OOSo1NOT,:^O8o:QOT,=OOT,=[ OT,=IOSo1VOT#7YOT,=[OT,=HOT#7UO9Ydc]3bNLs*0+pmf*7enc/Rg!WN.\rr<&rrr<%Ms+13l s8R rmh2Of)MEBOT,=YOT,=\OT,=ZOT,=YOT,=\OT#7XOT,=WOT,=POT,=\OT,=POT#7POT,=[O8o:R OT,=[OT,=ZOT,=POT,=\OT,=[OT#7VOT,:^O8o:IOT#7SO9Ydcenb<:s.FrCmf*7eo)A[irr;uu gAh0Qh#@?ShZ!QUqu6WrJcC<$aoDC6"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X",mf*:bmf*:emf*:cmf*:bmf*:emf!4amf*:`mf*:Ymf*:emf*:Ymf!4Ymf*:dmJm7[ mf*:dmf*:cmf*:Ymf*:emf*:dmf!4_mf*7gmJm7Rmf!4\mKWalc"I($s-&$6mf*7eo)A[irr;uu gAh0Qh#@?ShZ!QUqu6WrJcC<$aoDC)"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-TOT,=YOT,=\OT,=ZOT,=YOT,=\OT#7XOT,=WOT,=POT,=\OT,=POT#7POT,=[O8o:R OT,=[OT,=ZOT,=POT,=\OT,=[OT#7VOT,:^O8o:IOT#7SO9Ydc]3bNLs*0+pmf*7eo)A[irr;uu gAh0Qh#@?ShZ!QUqu6WrJcC<$aoDBc"9;c??3'Nqrjr42GlG=*~> rmh5Pf)MD)qZ-SVr;ceXqZ-VWrVuq[rW)t[qZ-VWqZ->OrW)t[rW)SPrW)SPqZ->Or;ZhZr;chY rrD\QrW)t[rW)SPrW(f:#6=eTC]DucTRm.2rr<&irr<&trr<&ms8N'!s8E#_s8N)ks8N'&rr<'! !!)Wj!!)Zkrr rlkTGc2X!qqZ-T_r;cfaqZ-W`rVurdrW)udqZ-W`qZ-?XrW)udrW)TYrW)TYqZ-?Xr;Zicr;cib rrD]ZrW)udrW)TYrW(gC#6=eKBE-6VPC`c%rr<&irr<&trr<&ms8N'!s8E#_s8N)ks8N'&rr<'! !!)Wj!!)Zkrr rjr=5]Dm,;qZ-SVr;ceXqZ-VWrVuq[rW)t[qZ-VWqZ->OrW)t[rW)SPrW)SPqZ->Or;ZhZr;chY rrD\QrW)t[rW)SPrW(f:#6=e9?N7Y;G_,n_rr<&irr<&trr<&ms8N'!s8E#_s8N)ks8N'&rr<'! !!)Wj!!)Zkrr rmh2Of)MDFOT,=4OT, rlkQFc2X!0mf*:=mf*9smKWalc"I($s-&$6mf*7eo)A[inGiLgrr2rup&Fjgqu?Zrp&G$l"TJH% rrDZj!!)Wjrr<<(!!*$!s8N)trr<&rrr<&rrr<&hs8N)urr<&ls8N'%rr<'!!.k0$s763iP6:ku c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,XOT,=4OT, rmh2Of)MDFOT,=4O8o9iO9Ydcenb<:s.FrCmf*7enc/Ofo`"mkr;Q`skPkM^pAYg6h#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iTEG71enY9U s8LRMs.KABJ,~> rlkQFc2X!0mf*:=mJm6rmKWalc"I($s-&$6mf*7enc/Ofo`"mkr;Q`skPkM^pAYg6h#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iP6:kuc"@%H s8L7Ds-*H,J,~> rjr:4]Dm,XOT,=4O8o9iO9Ydc]3bNLs*0+pmf*7enc/Ofo`"mkr;Q`skPkM^pAYg6h#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iGQ\"Q]3YL- s8KV2s*4OTJ,~> rmh2Of)MDHOSf*!lr;Q`spAashqu?Zrp&>3rs8N*!rrDZj !!)Wj#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iTEG71enY9U s8LRMs.KABJ,~> rlkQFc2X!2med'EmKWalc"I($s-&$6mf*7emJd.dp&>!lr;Q`spAashqu?Zrp&>3rs8N*!rrDZj !!)Wj#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iP6:kuc"@%H s8L7Ds-*H,J,~> rjr:4]Dm,ZOSf*!lr;Q`spAashqu?Zrp&>3rs8N*!rrDZj !!)Wj#lt#*!<<'!s8N)trr<&rrr<&rrr<&hrr<&srr<&mrrrK'rrE*!!.k0$s763iGQ\"Q]3YL- s8KV2s*4OTJ,~> rmh2Of)MCoOLC5U!<:Vcs474CJcG0 rlkQFc2WuYm^DNg!<:;Vs3:S-JcG0 rjr:4]Dm,,OLC5U!<9Z;s1A;UJcG0 rmh2Of)MDdOFdGurs&Q(enb<:s.FrCmf*7eo)J^irr2rup&G$lrr2rum/R(cnc&dns8N*!rrDZj !!)Wj#lt#*!<<'!s8N)us8N)rrr<&rrr<&urr<&ls8N)urr<&lrrrK'rrE*!!.k0$s763iTEG71 enY9Us8LRMs.KABJ,~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6mf*7eo)J^irr2rup&G$lrr2rum/R(cnc&dns8N*!rrDZj !!)Wj#lt#*!<<'!s8N)us8N)rrr<&rrr<&urr<&ls8N)urr<&lrrrK'rrE*!!.k0$s763iP6:ku c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pmf*7eo)J^irr2rup&G$lrr2rum/R(cnc&dns8N*!rrDZj !!)Wj#lt#*!<<'!s8N)us8N)rrr<&rrr<&urr<&ls8N)urr<&lrrrK'rrE*!!.k0$s763iGQ\"Q ]3YL-s8KV2s*4OTJ,~> rmh2Of)MDrOSo1ROFdGurs&Q(enb<:s.FrCmf*7eo)A^js8E#jrrE-"rW([PrW!-%!!*$!!9X=\ !!iN(!<3$!s8W&u!ri6#rr;lrr;Z`ro`"pls8E#ks8E!%rr<'!!!%TMJcG?As.BJpCY!8$nc/U> !WShlepm~> rlkQFc2X!\mem.[mXbE)rs&Q(c"I($s-&$6mf*7eo)A^js8E#jrrE-"rW([PrW!-%!!*$!!9X=\ !!iN(!<3$!s8W&u!ri6#rr;lrr;Z`ro`"pls8E#ks8E!%rr<'!!!%TMJcG?As-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm-/OT#4ROFdGurs&Q(]3bNLs*0+pmf*7eo)A^js8E#jrrE-"rW([PrW!-%!!*$!!9X=\ !!iN(!<3$!s8W&u!ri6#rr;lrr;Z`ro`"pls8E#ks8E!%rr<'!!!%TMJcG?As*+YH?FsMDnc/U# !WRED]79~> rmh2Of)ME>!9'%>!;)A(s6Taj!<:Vcs474CJcG0 !WShlepm~> rlkQFc2X"(!9*AG!;,]1s6Taj!<:;Vs3:S-JcG0 rjr:4]Dm-Ps5B(>s7DD(s6Taj!<9Z;s1A;UJcG0 rmh;Rf)MD)!!*"[rrD&?rrD\QJcG*:#6=eTC]DucTRm.2rr<&Wrr<%Prr<%Ms+14(s8S_p!,L/$ !:g*gecGfDrmlT~> rlkZIc2X!q!!*#drrD'HrrD]ZJcG*:#6=eKBE-6VPC`c%rr<&Wrr<%Prr<%Ms+14(s8S8c!,'Ph !:g*gblRj.rlor~> rjrC7]Dm,;s8W([s8V)?s8V_QJcG*:#6=e9?N7Y;G_,n_rr<&Wrr<%Prr<%Ms+14(s8R rmh;Rf)MD)!!*"[rrE(\rVuq[rW)qZr;cbWr;ceXqZ->OJcG*:#6=eTC]DucTRm-fs8;qQs8;qK s+14*s8S_p!,L/$!:g*gecGfDrmlT~> rlkZIc2X!q!!*#drrE)erVurdrW)rcr;cc`r;cfaqZ-?XJcG*:#6=eKBE-6VPC`bYs8;qQs8;qK s+14*s8S8c!,'Ph!:g*gblRj.rlor~> rjrC7]Dm,;s8W([s8W+\rr2t[rr;tZrVueWrVuhXqu?AOJcG*:#6=e9?N7Y;G_,n>s8;qQs8;qK s+14*s8R rmh;Rf)MD)!!*"[rrE%[rW)kXrrE(\rrE%[rrE(\rrDtYrrD\QJcG*:#6=eTC]DucTRm,os+13$ s/,hrTEG71enY9Us8LRMs.KABJ,~> rlkZIc2X!q!!*#drrE&drW)larrE)errE&drrE)errDubrrD]ZJcG*:#6=eKBE-6VPC`abs+13$ s/,hrP6:kuc"@%Hs8L7Ds-*H,J,~> rjrC7]Dm,;s8W([s8W([rr;nXs8W+\s8W([s8W+\s8W"Ys8V_QJcG*:#6=e9?N7Y;G_,mGs+13$ s/,hrGQ\"Q]3YL-s8KV2s*4OTJ,~> rmhGVf)MD)!!&+B!!)tZrrDtYrrE"Zrr<+^!!)tZrrE"ZrrD\QJcG*:#6=eTC]DucTRm-ss8N)t rr<%Ms+13$s4%)KTEG71enY9Us8LRMs.KABJ,~> rlkfMc2X!q!!)HT!!)ucrrDubrrE#crr<,g!!)ucrrE#crrD]ZJcG*:#6=eKBE-6VPC`bfs8N)t rr<%Ms+13$s4%)KP6:kuc"@%Hs8L7Ds-*H,J,~> rjrO;]Dm,;s8S1Bs8W%Zs8W"Ys8W%Zs8N.^s8W%Zs8W%Zs8V_QJcG*:#6=e9?N7Y;G_,nKs8N)t rr<%Ms+13$s4%)KGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh5Pf)MD)quHYVrrDtYrrE"Zrr<+^!!)tZrrE"ZrrD\QJcG*:#6=eTC]DucTRm-rrr<&srr<%M s+13$s4./LTEG71enY9Us8LRMs.KABJ,~> rlkTGc2X!qquHZ_rrDubrrE#crr<,g!!)ucrrE#crrD]ZJcG*:#6=eKBE-6VPC`berr<&srr<%M s+13$s4./LP6:kuc"@%Hs8L7Ds-*H,J,~> rjr=5]Dm,;r;Z\Vs8W"Ys8W%Zs8N.^s8W%Zs8W%Zs8V_QJcG*:#6=e9?N7Y;G_,nJrr<&srr<%M s+13$s4./LGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh;Rf)MD)!!)hVrrDtYrrE"Zrr<+^!!)tZrrE"ZrrD\QJcG*:#6=eTC]DucTRm-rrr<&srr<%M s+13$s4./LTEG71enY9Us8LRMs.KABJ,~> rlkZIc2X!q!!)i_rrDubrrE#crr<,g!!)ucrrE#crrD]ZJcG*:#6=eKBE-6VPC`berr<&srr<%M s+13$s4./LP6:kuc"@%Hs8L7Ds-*H,J,~> rjrC7]Dm,;s8VnVs8W"Ys8W%Zs8N.^s8W%Zs8W%Zs8V_QJcG*:#6=e9?N7Y;G_,nJrr<&srr<%M s+13$s4./LGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh;Rf)MD)!!)hVrrDqXrrE(\rrE%[rrE(\rrDtYrrDnWrrE"ZJcG*:#6=eTC]DucTRm.2s8E#t s8N'!s8E#rs8E!!rrDrr!!%TMJcC<$f)PcC"9<5gCB3o)rmh,MT`3Mm~> rlkZIc2X!q!!)i_rrDrarrE)errE&drrE)errDubrrDo`rrE#cJcG*:#6=eKBE-6VPC`c%s8E#t s8N'!s8E#rs8E!!rrDrr!!%TMJcC<$f)Pc6"9<)ZB)qK%rlkKDPQ&gW~> rjrC7]Dm,;s8VnVs8VtXs8W+\s8W([s8W+\s8W"Ys8VqWrrE"ZJcG*:#6=e9?N7Y;G_,n_s8E#t s8N'!s8E#rs8E!!rrDrr!!%TMJcC<$f)Pbp"9;c??3'Nqrjr42GlG=*~> rmh2Of)ME@!;qrU!<&#X!;qrW!<&#V!;qrY!<&"1s6Taj!<:Vcs474CJcG3=!!*#u!!*#urrE&u !!*#u!!*#urrDrr!!%TMJcC<$f)PcC"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X"*!;u9^!<)?a!;u9`!<)?_!;u9b!<)>:s6Taj!<:;Vs3:S-JcG3=!!*#u!!*#urrE&u !!*#u!!*#urrDrr!!%TMJcC<$f)Pc6"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-Rs87uUs8A&Xs87uWs8A&Vs87uY!<&"1s6Taj!<9Z;s1A;UJcG3=!!*#u!!*#urrE&u !!*#u!!*#urrDrr!!%TMJcC<$f)Pbp"9;c??3'Nqrjr42GlG=*~> rmh2Of)MDdOFdGurs&Q(enb<:s.FrCnG`Igr;Qj!s8N)trrW9$rrDus!!)or!!%TMJcC<$f)PcC "9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6nG`Igr;Qj!s8N)trrW9$rrDus!!)or!!%TMJcC<$f)Pc6 "9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pnG`Igr;Qj!s8N)trrW9$rrDus!!)or!!%TMJcC<$f)Pbp "9;c??3'Nqrjr42GlG=*~> rmh2Of)MDdOFdGurs&Q(enb<:s.FrCnGi=bs8N'!rVls"s8N)srr<&rrr<%Ms+13$s475MTEG71 enY9Us8LRMs.KABJ,~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6nGi=bs8N'!rVls"s8N)srr<&rrr<%Ms+13$s475MP6:ku c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pnGi=bs8N'!rVls"s8N)srr<&rrr<%Ms+13$s475MGQ\"Q ]3YL-s8KV2s*4OTJ,~> rmh2Of)MDdOFdGurs&Q(enb<:s.FrCnG`Igq>UEprVls"s8N)srr<&rrr<%Ms+13$s475MTEG71 enY9Us8LRMs.KABJ,~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6nG`Igq>UEprVls"s8N)srr<&rrr<%Ms+13$s475MP6:ku c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pnG`Igq>UEprVls"s8N)srr<&rrr<%Ms+13$s475MGQ\"Q ]3YL-s8KV2s*4OTJ,~> rmh2Of)MDdOFdGurs&Q(enb<:s.FrCn,E@frVls"s8N)trr<&urr<&us8N)rrr<%Ms+13$s475M TEG71enY9Us8LRMs.KABJ,~> rlkQFc2X!NmXbE)rs&Q(c"I($s-&$6n,E@frVls"s8N)trr<&urr<&us8N)rrr<%Ms+13$s475M P6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-!OFdGurs&Q(]3bNLs*0+pn,E@frVls"s8N)trr<&urr<&us8N)rrr<%Ms+13$s475M GQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)ME!J:[aSrs&Q(enb<:s.FrCmf34cs8W&us8W&urr;rt!ri6#rVlitJcC<$JcF:#s.BJp CY!8$nc/U>!WShlepm~> rlkQFc2X!`!.k1(rs&Q(c"I($s-&$6mf34cs8W&us8W&urr;rt!ri6#rVlitJcC<$JcF:#s-!Qc B?b&hnc/U5!WSA_c%#~> rjr:4]Dm-3!.k1(rs&Q(]3bNLs*0+pmf34cs8W&us8W&urr;rt!ri6#rVlitJcC<$JcF:#s*+YH ?FsMDnc/U#!WRED]79~> rmh2Of)ME2JGoN?J:[aSrs&Q(enb<:s.FrCe,KCJJcC<$JcF:#s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2X!q!<)ri!.k1(rs&Q(c"I($s-&$6e,KCJJcC<$JcF:#s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-D!<)ri!.k1(rs&Q(]3bNLs*0+pe,KCJJcC<$JcF:#s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)ME0JH,ZHJH,ZIJ:[aSrs&Q(enb<:s.FrCeGfLKJcC<$JcF7"s.BJpCY!8$nc/U>!WShl epm~> rlkQFc2X!o!<<)r!<<)s!.k1(rs&Q(c"I($s-&$6eGfLKJcC<$JcF7"s-!QcB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm-B!<<)r!<<)s!.k1(rs&Q(]3bNLs*0+peGfLKJcC<$JcF7"s*+YH?FsMDnc/U#!WRED ]79~> rmh2Of)ME0JH,ZHJH,ZIJ:[aSrs&Q(enb<:s.FrCJcC<$JcDVIs.BJpCY!8$nc/U>!WShlepm~> rlkQFc2X!o!<<)r!<<)s!.k1(rs&Q(c"I($s-&$6JcC<$JcDVIs-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm-B!<<)r!<<)s!.k1(rs&Q(]3bNLs*0+pJcC<$JcDVIs*+YH?FsMDnc/U#!WRED]79~> rmh;Rf)MECs+(%I!.b(Ks+(+KrIFqJr.+\Es+#\#gA_BXs41ucf)MCos+13$s+13Is8S_p!,L/$ !:g*gecGfDrmlT~> rlkZIc2X"-rrDus!!*#urrE&ur;cltquHWorr@WMgA_BXs353Vc2WuYs+13$s+13Is8S8c!,'Ph !:g*gblRj.rlor~> rjrC7]Dm-UrrDus!!*#urrE&ur;cltquHWorr@WMgA_BXs1;V;]Dm,,s+13$s+13Is8R rmhV[f)MCns8RWLs+(.LJH,ZKJH,ZJJH,ZHJH,ZIJ:[aSrs&Q(enb<:s.FrCJcC<$JcDVIs.BJp CY!8$ma_F=T`3Mm~> rlkuRc2Wt-s8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(c"I($s-&$6JcC<$JcDVIs-!Qc B?b&hm`be4PQ&gW~> rjr^@]Dm*Us8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(]3bNLs*0+pJcC<$JcDVIs*+YH ?FsMDm^iN"GlG=*~> rmhV[f)MCns8RWLs+(.LJH,ZKJH,ZJJH,ZHJH,ZIJ:[aSrs&Q(enb<:s.HOp!!'q:!!'q:!!%TM JcC<$q#CAe"9<5gCB3eR!WShlepm~> rlkuRc2Wt-s8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(c"I($s-'Vc!!'q:!!'q:!!%TM JcC<$q#CAX"9<)ZB)qAE!WSA_c%#~> rjr^@]Dm*Us8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(]3bNLs*1^H!!'q:!!'q:!!%TM JcC<$q#CA="9;c??3'E*!WRED]79~> rmhV[f)MCns8RWLs+(.LJH,ZKJH,ZJJH,ZHJH,ZIJ:[aSrs&Q(enb<:s.HOp!!)6_rrCpU!!)6_ rrCpU!!%TMJcC<$q#CAe"9<5gCB3eR!WShlepm~> rlkuRc2Wt-s8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(c"I($s-'Vc!!)6_rrCpU!!)6_ rrCpU!!%TMJcC<$q#CAX"9<)ZB)qAE!WSA_c%#~> rjr^@]Dm*Us8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(]3bNLs*1^H!!)6_rrCpU!!)6_ rrCpU!!%TMJcC<$q#CA="9;c??3'E*!WRED]79~> rmhV[f)MCns8RWLs+(.LJH,ZKJH,ZJJH,ZHJH,ZIJ:[aSrs&Q(enb<:s.Id>rrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ)3IJcC<$qu?\h"9<5g CB3eR!WShlepm~> rlkuRc2Wt-s8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(c"I($s-(k1rrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ)3IJcC<$qu?\["9<)Z B)qAE!WSA_c%#~> rjr^@]Dm*Us8N*!rrE*!!<<)u!<<)t!<<)r!<<)s!.k1(rs&Q(]3bNLs*2rkrrD]krr<'!rW)lr r;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ,m\rrD]krr<'!rW)lrr;cisqZ)3IJcC<$qu?\@"9;c? ?3'E*!WRED]79~> rmh\]f)MCns8RWLs+(.LJH,Y"JH#TIJH,ZHJH,ZLJH,WLJ:[aSrs&Q(enb<:s.Id>rrDZjrrE&u !!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut !!%TMJcC<$q#CAe"9<5gCB3eR!WShlepm~> rll&Tc2Wt-s8N*!rrE*!!<<'!!<3#s!<<)r!<<*!!<<'!!.k1(rs&Q(c"I($s-(k1rrDZjrrE&u !!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut !!%TMJcC<$q#CAX"9<)ZB)qAE!WSA_c%#~> rjrdB]Dm*Us8N*!rrE*!!<<'!!<3#s!<<)r!<<*!!<<'!!.k1(rs&Q(]3bNLs*2rkrrDZjrrE&u !!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut!!)*[rrD`lrrE&u!!*#u!!)ut!!)ut !!%TMJcC<$q#CA="9;c??3'E*!WRED]79~> rmh2Of)MEBrs+)Ss+(.LJ:RY!!.b(K!.atHr.+bGs+#\#gA_BXs41ucf)MDZrr<&trr<&prr<&t rr<&hs8)fkrr<&mrr<&trr<&prr<&trr<&hs8)fkrr<&mrr<&trr<&prr<&trr<%Ms+13$s7lWo TEG71enY9Us8LRMs.KABJ,~> rlkQFc2X",rs&Q(rrE*!!!*#u!!*#u!!)orquH]qrr@WMgA_BXs353Vc2X!Drr<&trr<&prr<&t rr<&hs8)fkrr<&mrr<&trr<&prr<&trr<&hs8)fkrr<&mrr<&trr<&prr<&trr<%Ms+13$s7lWo P6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-Trs&Q(rrE*!!!*#u!!*#u!!)orquH]qrr@WMgA_BXs1;V;]Dm,lrr<&trr<&prr<&t rr<&hs8)fkrr<&mrr<&trr<&prr<&trr<&hs8)fkrr<&mrr<&trr<&prr<&trr<%Ms+13$s7lWo GQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)ME!J:[aSrs&Q(enb<:s.I4.!!)ut!!)utquH]q!!)*[rrD`l!!)ut!!)utquH]q!!)*[ rrD`l!!)ut!!)utquH]q!!%TMJcC<$q#CAe"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X!`!.k1(rs&Q(c"I($s-(;!!!)ut!!)utquH]q!!)*[rrD`l!!)ut!!)utquH]q!!)*[ rrD`l!!)ut!!)utquH]q!!%TMJcC<$q#CAX"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-3!.k1(rs&Q(]3bNLs*2B[!!)ut!!)utquH]q!!)*[rrD`l!!)ut!!)utquH]q!!)*[ rrD`l!!)ut!!)utquH]q!!%TMJcC<$q#CA="9;c??3'Nqrjr42GlG=*~> rmh2Of)ME!J:[aSrs&Q(enb<:s.I4.!!)ut!!*#u!!)ut!!)ut!!)0]rrDZj!!)ut!!*#u!!)ut !!)ut!!)0]rrDZj!!)ut!!*#u!!)ut!!)ut!!%TMJcC<$q#CAe"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2X!`!.k1(rs&Q(c"I($s-(;!!!)ut!!*#u!!)ut!!)ut!!)0]rrDZj!!)ut!!*#u!!)ut !!)ut!!)0]rrDZj!!)ut!!*#u!!)ut!!)ut!!%TMJcC<$q#CAX"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm-3!.k1(rs&Q(]3bNLs*2B[!!)ut!!*#u!!)ut!!)ut!!)0]rrDZj!!)ut!!*#u!!)ut !!)ut!!)0]rrDZj!!)ut!!*#u!!)ut!!)ut!!%TMJcC<$q#CA="9;c??3'Nqrjr42GlG=*~> rmh2Of)ME!J:[aSrs&Q(enb<:s.Id>rrDZj!!)ut!!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut !!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut!!*#u!!)ut!!)ut!!*#u!!%TMJcC<$r;Zei"9<5g CB3o)rmh,MT`3Mm~> rlkQFc2X!`!.k1(rs&Q(c"I($s-(k1rrDZj!!)ut!!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut !!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut!!*#u!!)ut!!)ut!!*#u!!%TMJcC<$r;Ze\"9<)Z B)qK%rlkKDPQ&gW~> rjr:4]Dm-3!.k1(rs&Q(]3bNLs*2rkrrDZj!!)ut!!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut !!*#u!!)ut!!)ut!!*#u!!)BcrrDTh!!)ut!!*#u!!)ut!!)ut!!*#u!!%TMJcC<$r;ZeA"9;c? ?3'Nqrjr42GlG=*~> rmh2Of)ME!J:[aSrs&Q(enb<:s.Id>rrD]krW)uurW)rtr;Zlu!<)rs!8@JP!<<)u!<3#s!!3*" rVuisgAh-Ps8W&urr;os!WN/us8E"Ls+13$s82irTEG71enY9Us8LRMs.KABJ,~> rlkQFc2X!`!.k1(rs&Q(c"I($s-(k1rrD]krW)uurW)rtr;Zlu!<)rs!8@JP!<<)u!<3#s!!3*" rVuisgAh-Ps8W&urr;os!WN/us8E"Ls+13$s82irP6:kuc"@%Hs8L7Ds-*H,J,~> rjr:4]Dm-3!.k1(rs&Q(]3bNLs*2rkrrD]krW)uurW)rtr;Zlu!<)rs!8@JP!<<)u!<3#s!!3*" rVuisgAh-Ps8W&urr;os!WN/us8E"Ls+13$s82irGQ\"Q]3YL-s8KV2s*4OTJ,~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcEmmrW%NLdJs1GJcCr6s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcEmmrW%NLdJs1GJcCr6s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcEmmrW%NLdJs1GJcCr6s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq5!<:Vcs474Cs8E#:rr<%Xrr<&_rr<%trr<&8rr<%Ms-*K_TEG71enY9Us8LRM s.KABJ,~> rlkQFc2WuYs0hq5!<:;Vs3:S-s8E#:rr<%Xrr<&_rr<%trr<&8rr<%Ms-*K_P6:kuc"@%Hs8L7D s-*H,J,~> rjr:4]Dm,,s0hq5!<9Z;s1A;Us8E#:rr<%Xrr<&_rr<%trr<&8rr<%Ms-*K_GQ\"Q]3YL-s8KV2 s*4OTJ,~> rmh2Of)MCos0hq4!<:Vcs474Crr2ru`;]f;N;ikXkl1V_W;cht_>aK8JcCr6s.BJpCY!8$nc/U> !WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-rr2ru`;]f;N;ikXkl1V_W;cht_>aK8JcCr6s-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;Urr2ru`;]f;N;ikXkl1V_W;cht_>aK8JcCr6s*+YH?FsMDnc/U# !WRED]79~> rmh2Of)MCos0hq9!<:Vcs474Cs8N*!!;uls!<<*!!<)rs!!WB&!<3$!rr;uus8W*!"oeQ&rr<&t s8E#ts8N'!s8E#ts8)ffs82lps8E!!rrE&ur;cisrr<'!rW)iqrW)lrquHBhr;cfrrrE*!rrDus rW)Nh!!)rsrr<'!rW)lrr;cisqZ-6fquHZprW!!!!<3#s!<3#u!!*&u!;lfq!;6Bl!!*&u!;ulq !<3#q!;$6g!;ulr!!3*"rr;osrr;uu!<<#uqu?WqqYpNqJcCr6s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq9!<:;Vs3:S-s8N*!!;uls!<<*!!<)rs!!WB&!<3$!rr;uus8W*!"oeQ&rr<&t s8E#ts8N'!s8E#ts8)ffs82lps8E!!rrE&ur;cisrr<'!rW)iqrW)lrquHBhr;cfrrrE*!rrDus rW)Nh!!)rsrr<'!rW)lrr;cisqZ-6fquHZprW!!!!<3#s!<3#u!!*&u!;lfq!;6Bl!!*&u!;ulq !<3#q!;$6g!;ulr!!3*"rr;osrr;uu!<<#uqu?WqqYpNqJcCr6s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq9!<9Z;s1A;Us8N*!!;uls!<<*!!<)rs!!WB&!<3$!rr;uus8W*!"oeQ&rr<&t s8E#ts8N'!s8E#ts8)ffs82lps8E!!rrE&ur;cisrr<'!rW)iqrW)lrquHBhr;cfrrrE*!rrDus rW)Nh!!)rsrr<'!rW)lrr;cisqZ-6fquHZprW!!!!<3#s!<3#u!!*&u!;lfq!;6Bl!!*&u!;ulq !<3#q!;$6g!;ulr!!3*"rr;osrr;uu!<<#uqu?WqqYpNqJcCr6s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq9!<:Vcs474Cs8N*!!;ld%!<3'!rrE*!!<3#u!<2uu!<)ot!<3#u!!N<%!<3&u rr<&urr<&us8N)urr<&trr<&hrr<&trr<&trr<&us8N*!rr<&trr<&us8N)urr<&trr<&urr<&u rr<&trr<&lrr<&trr<&trriE&!<<'!rr2rurr2ruoD\djqu?Zrrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2rup&G$lrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2ruqu6WrJcCr6s.BJpCY!8$nc/U>!WShl epm~> rlkQFc2WuYs0hq9!<:;Vs3:S-s8N*!!;ld%!<3'!rrE*!!<3#u!<2uu!<)ot!<3#u!!N<%!<3&u rr<&urr<&us8N)urr<&trr<&hrr<&trr<&trr<&us8N*!rr<&trr<&us8N)urr<&trr<&urr<&u rr<&trr<&lrr<&trr<&trriE&!<<'!rr2rurr2ruoD\djqu?Zrrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2rup&G$lrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2ruqu6WrJcCr6s-!QcB?b&hnc/U5!WSA_ c%#~> rjr:4]Dm,,s0hq9!<9Z;s1A;Us8N*!!;ld%!<3'!rrE*!!<3#u!<2uu!<)ot!<3#u!!N<%!<3&u rr<&urr<&us8N)urr<&trr<&hrr<&trr<&trr<&us8N*!rr<&trr<&us8N)urr<&trr<&urr<&u rr<&trr<&lrr<&trr<&trriE&!<<'!rr2rurr2ruoD\djqu?Zrrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2rup&G$lrr2rurr2rurVlitrVlitnc&Rh rVlitrVlitrr;uus8N'!rVlitrr;uurr2rurVlitrr2ruqu6WrJcCr6s*+YH?FsMDnc/U#!WRED ]79~> rmh2Of)MCos0hq6!<:Vcs474CrrE&u!!)rsrrDus!!)rs!!*#u!!)ut!!*#u#lt#*!<<'!s8N)s rrW9$rrE#t!!)ut!!)Qh!!)ip!!)rs!W`6#qu6`us8N)srrW9$rrDus!s&B$!:0Xb!<)rt!;lcr !;uis!;-9k!;lcr!<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;?Em !<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;uis!.k06s8S_p!,L/$ !:g*gecGfDrmlT~> rlkQFc2WuYs0hq6!<:;Vs3:S-rrE&u!!)rsrrDus!!)rs!!*#u!!)ut!!*#u#lt#*!<<'!s8N)s rrW9$rrE#t!!)ut!!)Qh!!)ip!!)rs!W`6#qu6`us8N)srrW9$rrDus!s&B$!:0Xb!<)rt!;lcr !;uis!;-9k!;lcr!<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;?Em !<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;uis!.k06s8S8c!,'Ph !:g*gblRj.rlor~> rjr:4]Dm,,s0hq6!<9Z;s1A;UrrE&u!!)rsrrDus!!)rs!!*#u!!)ut!!*#u#lt#*!<<'!s8N)s rrW9$rrE#t!!)ut!!)Qh!!)ip!!)rs!W`6#qu6`us8N)srrW9$rrDus!s&B$!:0Xb!<)rt!;lcr !;uis!;-9k!;lcr!<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;?Em !<)ot!;ZWp!<)ot!:g'h!;ZWp!;uiu!<3&rrrW9$rrDus!s&B$!;uis!;uis!.k06s8R rmh2Of)MCos0hq5!<:Vcs474Cs82lprr<&rrr<&srr<&urr<&trr<&urs/W)rrE*!!<<)q!<<'! !<)ot!<)ot!:^$e!<)ot!;c]q!;lcu!<<'!r;Qfus8Voqrr;osoDe^grVlitqZ$Blo`"mkqu6Wr rVlitrVucqrVlitnGiFerVlitqYpNqqu6`us8N)srrN3#s7u`hrr<&trr<&ts82lqrr<&gs8;rr rr<&qrr<&rrrW9$rrDus!W`9#q>gHn!!%TMPQ1ZU"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq5!<:;Vs3:S-s82lprr<&rrr<&srr<&urr<&trr<&urs/W)rrE*!!<<)q!<<'! !<)ot!<)ot!:^$e!<)ot!;c]q!;lcu!<<'!r;Qfus8Voqrr;osoDe^grVlitqZ$Blo`"mkqu6Wr rVlitrVucqrVlitnGiFerVlitqYpNqqu6`us8N)srrN3#s7u`hrr<&trr<&ts82lqrr<&gs8;rr rr<&qrr<&rrrW9$rrDus!W`9#q>gHn!!%TMPQ1ZH"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq5!<9Z;s1A;Us82lprr<&rrr<&srr<&urr<&trr<&urs/W)rrE*!!<<)q!<<'! !<)ot!<)ot!:^$e!<)ot!;c]q!;lcu!<<'!r;Qfus8Voqrr;osoDe^grVlitqZ$Blo`"mkqu6Wr rVlitrVucqrVlitnGiFerVlitqYpNqqu6`us8N)srrN3#s7u`hrr<&trr<&ts82lqrr<&gs8;rr rr<&qrr<&rrrW9$rrDus!W`9#q>gHn!!%TMPQ1Z-"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq5!<:Vcs474C!;uis!<)ot!;lcr!;uis!<2uu!<)ot!<3!)!<<'!s8N*!rrDlp !!)ut!!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!o`"mkp&>!lrVlitrVlitqYpNqmf*7e qu6WrrVlitrr2rurVlitrVlitm/I%crr2ruqYpNqqu6`us8N)srrW9$rrDQg!!)ut!!*#u!!)ut !!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!pAY*mJcCr6s.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq5!<:;Vs3:S-!;uis!<)ot!;lcr!;uis!<2uu!<)ot!<3!)!<<'!s8N*!rrDlp !!)ut!!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!o`"mkp&>!lrVlitrVlitqYpNqmf*7e qu6WrrVlitrr2rurVlitrVlitm/I%crr2ruqYpNqqu6`us8N)srrW9$rrDQg!!)ut!!*#u!!)ut !!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!pAY*mJcCr6s-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq5!<9Z;s1A;U!;uis!<)ot!;lcr!;uis!<2uu!<)ot!<3!)!<<'!s8N*!rrDlp !!)ut!!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!o`"mkp&>!lrVlitrVlitqYpNqmf*7e qu6WrrVlitrr2rurVlitrVlitm/I%crr2ruqYpNqqu6`us8N)srrW9$rrDQg!!)ut!!*#u!!)ut !!)ut!!)Bc!!*#u!!)lq!!)or!s&B$!;uj!!<<'!pAY*mJcCr6s*+YH?FsMDnc/U#!WRED]79~> rmh2Of)MCos0hq5!<:Vcs474C!;uis!<)ot!;c]q!<3#u!<2uu!<3#u!<3!&!<<'!s8N)urr<&t rrW9$rrE#t!!)ut!!*#u!!)]l!!)ut!!)ut!!)ut!s&B$!<)ot!<3#u!<2uu!<)ot!<)p"!<<'! rVlitp&>!lrVlitrVlitq>UEprVlito`"mkqu6WrrVlitrr2rurVlitrVlitrr2rup&>!lrVlit rVlitrVls"s8N)trr<&us8N)urr<&trr<&trr<&mrr<&trr<&urr<&trr<&trr<&urr<&lrr<&t rr<&trr<&trrW9$rrE#t!!*#urrE&u!!)ut!!)ut!!)rs!!%TMPQ1ZU"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq5!<:;Vs3:S-!;uis!<)ot!;c]q!<3#u!<2uu!<3#u!<3!&!<<'!s8N)urr<&t rrW9$rrE#t!!)ut!!*#u!!)]l!!)ut!!)ut!!)ut!s&B$!<)ot!<3#u!<2uu!<)ot!<)p"!<<'! rVlitp&>!lrVlitrVlitq>UEprVlito`"mkqu6WrrVlitrr2rurVlitrVlitrr2rup&>!lrVlit rVlitrVls"s8N)trr<&us8N)urr<&trr<&trr<&mrr<&trr<&urr<&trr<&trr<&urr<&lrr<&t rr<&trr<&trrW9$rrE#t!!*#urrE&u!!)ut!!)ut!!)rs!!%TMPQ1ZH"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq5!<9Z;s1A;U!;uis!<)ot!;c]q!<3#u!<2uu!<3#u!<3!&!<<'!s8N)urr<&t rrW9$rrE#t!!)ut!!*#u!!)]l!!)ut!!)ut!!)ut!s&B$!<)ot!<3#u!<2uu!<)ot!<)p"!<<'! rVlitp&>!lrVlitrVlitq>UEprVlito`"mkqu6WrrVlitrr2rurVlitrVlitrr2rup&>!lrVlit rVlitrVls"s8N)trr<&us8N)urr<&trr<&trr<&mrr<&trr<&urr<&trr<&trr<&urr<&lrr<&t rr<&trr<&trrW9$rrE#t!!*#urrE&u!!)ut!!)ut!!)rs!!%TMPQ1Z-"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CrVurur;cltquHZprW!!!!<)rs!!E6$!<<#u"oeQ&rr<&us8;rt s8E#us8E#ss8E#js82los8;rrs8;rrrrE-"rW)iqr;cisquH?gr;Zs"!<<)s!;lfp!;$3j!;ulr !<<)u!<3#s!!3*"rVuiso`+ghqu?TprVufrrVllus8E#qs8;rks8E#us8E#ts8;ourrE#trW)Tj quHWor;cfrr;cfr! rlkQFc2WuYs0hq4!<:;Vs3:S-rVurur;cltquHZprW!!!!<)rs!!E6$!<<#u"oeQ&rr<&us8;rt s8E#us8E#ss8E#js82los8;rrs8;rrrrE-"rW)iqr;cisquH?gr;Zs"!<<)s!;lfp!;$3j!;ulr !<<)u!<3#s!!3*"rVuiso`+ghqu?TprVufrrVllus8E#qs8;rks8E#us8E#ts8;ourrE#trW)Tj quHWor;cfrr;cfr! rjr:4]Dm,,s0hq4!<9Z;s1A;UrVurur;cltquHZprW!!!!<)rs!!E6$!<<#u"oeQ&rr<&us8;rt s8E#us8E#ss8E#js82los8;rrs8;rrrrE-"rW)iqr;cisquH?gr;Zs"!<<)s!;lfp!;$3j!;ulr !<<)u!<3#s!!3*"rVuiso`+ghqu?TprVufrrVllus8E#qs8;rks8E#us8E#ts8;ourrE#trW)Tj quHWor;cfrr;cfr! rmh2Of)MCos0hq4!<:Vcs474Cl2L_`XoAA$\GlO/]`.s3W;chtmJd.dJcCr6s.BJpCY!8$nc/U> !WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-l2L_`XoAA$\GlO/]`.s3W;chtmJd.dJcCr6s-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;Ul2L_`XoAA$\GlO/]`.s3W;chtmJd.dJcCr6s*+YH?FsMDnc/U# !WRED]79~> rmh2Of)MCos0hq4!<:Vcs474ClMghaXT&8#\GuO.^Ae05W;chtn,N@eJcCr6s.BJpCY!8$nc/U> !WShlepm~> rlkQFc2WuYs0hq4!<:;Vs3:S-lMghaXT&8#\GuO.^Ae05W;chtn,N@eJcCr6s-!QcB?b&hnc/U5 !WSA_c%#~> rjr:4]Dm,,s0hq4!<9Z;s1A;UlMghaXT&8#\GuO.^Ae05W;chtn,N@eJcCr6s*+YH?FsMDnc/U# !WRED]79~> rmh2Of)MCos0hq4!<:Vcs474Cmf34cXT/5!RK*$^k5YA[ci rlkQFc2WuYs0hq4!<:;Vs3:S-mf34cXT/5!RK*$^k5YA[ci rjr:4]Dm,,s0hq4!<9Z;s1A;Umf34cXT/5!RK*$^k5YA[ci rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq4!<:Vcs474CJcC<$JcC<$VZ6[h"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq4!<:;Vs3:S-JcC<$JcC<$VZ6[["9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq4!<9Z;s1A;UJcC<$JcC<$VZ6[@"9;c??3'Nqrjr42GlG=*~> rmh2Of)MCos0hq2!<:Vcs42c$JcC<$JcDPGs.BJpCY!8$nc/U>!WShlepm~> rlkQFc2WuYs0hq2!<:;Vs36,pJcC<$JcDPGs-!QcB?b&hnc/U5!WSA_c%#~> rjr:4]Dm,,s0hq2!<9Z;s1 rmh2Of)MCos0hq2!<:Vcs42aoJY7ReJY8a1"9<5gCB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq2!<:;Vs36+YJWkYKJWlgl"9<)ZB)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq2!<9Z;s1 rmh2Of)MCos0hq1!<:Vcs+-hoTRibeTV8"]!,L/$!:g*gecGfDrmlT~> rlkQFc2WuYs0hq1!<:;Vs+-AbPC\pKPG+0C!,'Ph!:g*gblRj.rlor~> rjr:4]Dm,,s0hq1!<9Z;s+,EGG_(*jGbK?b!+*9D!:g*g])hqVrk![~> rmh2Of)MCos0hq0!<:VcJH16$JH16$T)\t/enY9Us8LRMs.KABJ,~> rlkQFc2WuYs0hq0!<:;VJH16$JH16$T)\t+c"@%Hs8L7Ds-*H,J,~> rjr:4]Dm,,s0hq0!<9Z;JH16$JH16$T)\t"]3YL-s8KV2s*4OTJ,~> rmh2Of)MCos0hq/!<:W$COp8PCOp8lCBE(&!:g*gecGfDrmlT~> rlkQFc2WuYs0hq/!<:;pB7X]HB7X]dB*-=j!:g*gblRj.rlor~> rjr:4]Dm,,s0hq/!<9Z^?@cF6?@cFR?37`F!:g*g])hqVrk![~> rmh2Of)MCos0hq-!:KjeCOuM:s+13$s2+d9C[V3'CB3o)rmh,MT`3Mm~> rlkQFc2WuYs0hq-!:KjeB7^)6s+13$s2+d9BC>d#B)qK%rlkKDPQ&gW~> rjr:4]Dm,,s0hq-!:Kje?@i--s+13$s2+d9?LIgo?3'Nqrjr42GlG=*~> rmh2Of)MCos0hq.!<;Mn!Gh^dZ@T<4Z@T=!Z2nb@n$2t0!:g*gecGfDrmlT~> rlkQFc2WuYs0hq.!<;Mg!GDF`X+@=&X+@=hWrZl5n#HJ%!:g*gblRj.rlor~> rjr:4]Dm,,s0hq.!<;MX!FGeWS:R2]S:R3JS,lsrn!jDb!:g*g])hqVrk![~> rmh2Of)MCos0hq.!<;Mn!Gh^dZ@T<4Z@T=!Z2nb@n$2t0!:g*gecGfDrmlT~> rlkQFc2WuYs0hq.!<;Mg!GDF`X+@=&X+@=hWrZl5n#HJ%!:g*gblRj.rlor~> rjr:4]Dm,,s0hq.!<;MX!FGeWS:R2]S:R3JS,lsrn!jDb!:g*g])hqVrk![~> rmh2Of)MCos0hq.!<;Mn!Gh^dZ@T<4Z@T=!Z2nb@n$2t0!:g*gecGfDrmlT~> rlkQFc2WuYs0hq.!<;Mg!GDF`X+@=&X+@=hWrZl5n#HJ%!:g*gblRj.rlor~> rjr:4]Dm,,s0hq.!<;MX!FGeWS:R2]S:R3JS,lsrn!jDb!:g*g])hqVrk![~> rmh2Of)MCos0hq.!<;Mn!Gh^dZ@T<4Z@T=!Z2nb@n$2t0!:g*gecGfDrmlT~> rlkQFc2WuYs0hq.!<;Mg!GDF`X+@=&X+@=hWrZl5n#HJ%!:g*gblRj.rlor~> rjr:4]Dm,,s0hq.!<;MX!FGeWS:R2]S:R3JS,lsrn!jDb!:g*g])hqVrk![~> rmh2Of)MCos0hq-!.ep:COp8PCRJr rlkQFc2WuYs0hq-!.ed6B7X]HB:3B4!:g*gblRj.rlor~> rjr:4]Dm,,s0hq-!.eI-?@cF6?C>+"!:g*g])hqVrk![~> rmh2Of)MCos0_lX!.b-$!.b- rlkQFc2WuYs0_lX!.b-$!.b- rjr:4]Dm,,s0_lX!.b-$!.b- rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCos+13$s5*eUTE"uiecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkQFc2WuYs+13$s5*eUP5kU\blRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr:4]Dm,,s+13$s5*eUGQ7aA])hqVrk!a]JcC<$m/R's!WRED]79~> rmh2Of)MCoeq)D'f&,$\s.H"br71oKT`3Mns+13$s6TdbecGfDrmlT~> rlkQFc2WuYc%4,jc/6bJs-&WHr659BPQ&gXs+13$s6TdbblRj.rlor~> rjr:4]Dm,,]7ISF]AL4&s*/bgr4<"0GlG=+s+13$s6Tdb])hqVrk![~> rmh2Of)MCoeq)D'f&,$\s.H"br71oKT`3Mns+13$s6TdbecGfDrmlT~> rlkQFc2WuYc%4,jc/6bJs-&WHr659BPQ&gXs+13$s6TdbblRj.rlor~> rjr:4]Dm,,]7ISF]AL4&s*/bgr4<"0GlG=+s+13$s6Tdb])hqVrk![~> rmh2Of)MCoeq)D'f&,$\s.H"br71oKT`3Mns+13$s6TdbecGfDrmlT~> rlkQFc2WuYc%4,jc/6bJs-&WHr659BPQ&gXs+13$s6TdbblRj.rlor~> rjr:4]Dm,,]7ISF]AL4&s*/bgr4<"0GlG=+s+13$s6Tdb])hqVrk![~> rmh2Of)MCoIt<*#J)>_Xs.H"br71oKT`3Mns+13$s6TdbecGfDrmlT~> rlkQFc2WuYH%C6lH/ElLs-&WHr659BPQ&gXs+13$s6TdbblRj.rlor~> rjr:4]Dm,,COp8PCYrn0s*/bgr4<"0GlG=+s+13$s6Tdb])hqVrk![~> rmh5Pf)MCmJcC<$VZ4fEoD\sOs.H"br71oKT`3Mns+13$s6TdbecGfDrmlT~> rlkTGc2WuQJcC<$VZ4f>oD\sKs-&WHr659BPQ&gXs+13$s6TdbblRj.rlor~> rjr=5]Dm+kJcC<$VZ4f/oD\sBs*/bgr4<"0GlG=+s+13$s6Tdb])hqVrk![~> rmh8Qf)MCms7OqD!<7W$J_%9\!!(0I!<;`C"T[K\TV);_ecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkWHc2WuQs7OV;!<7VpJ^(XJ!!(0B!<;`:"T[?XPEQ"EblRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr@6]Dm+ks7Nu)!<7V^J\/A&!!(03!<;`("T[$OG^'5d])hqVrk!a]JcC<$m/R's!WRED]79~> rmh8Qf)MCms7Y"FTE,"Ceq)D]ecDEDb-D$Ip=9G@!9!mMTE+o@!WShleq*jPs+14:s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4-KblO".b,YOBp< rjr@6]Dm+ks7X&+GQ@bp]7IT'])d-Vb+&J3p:CNR!7UsmGQ@ZR!WRED]7L!5s+14:s8KV2s*4OT J,~> rmh8Qf)MCms81@I!<(IMTE,"Ceq)D]ecDEDb-D$IrRLrKqpktE!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs81%@!<(.DP5tW6c%4-KblO".b,YOBrQP/!8RU rjr@6]Dm+ks80D.!<'M2GQ@bp]7IT'])d-Vb+&J3rOW%0qn!&W!7UsmGQ@ZR!WRED]7L!5s+14: s8KV2s*4OTJ,~> rmh8Qf)MCms8:IJ!<(IMTE,"Ceq)D]ecDEDb-D$IrRUuKr72(F!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs8:.A!<(.DP5tW6c%4-KblO".b,YOBrQY?Br65G0!8RU rjr@6]Dm+ks89M/!<'M2GQ@bp]7IT'])d-Vb+&J3rO`(0r4 rmh8Qf)MCms8COJ!<(IMTE,"Ceq)D]ecDEDb-D$IrRUrJrRM1G!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs8C4A!<(.DP5tW6c%4-KblO".b,YOBrQY rjr@6]Dm+ks8BS/!<'M2GQ@bp]7IT'])d-Vb+&J3rO`%/rOW8Y!7UsmGQ@ZR!WRED]7L!5s+14: s8KV2s*4OTJ,~> rmh8Qf)MCms8LUJ!<(IMTE,"Ceq)D]ecDEDb-D$IrRUoIrmh:H!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs8L:A!<(.DP5tW6c%4-KblO".b,YOBrQY9@rlkY2!8RU rjr@6]Dm+ks8KY/!<'M2GQ@bp]7IT'])d-Vb+&J3rO`".rjrAZ!7UsmGQ@ZR!WRED]7L!5s+14: s8KV2s*4OTJ,~> rmh8Qf)MCms8COJ!<(IMTE,"Ceq)D]ecDEDb-D$IrRUrJrRM1G!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs8C4A!<(.DP5tW6c%4-KblO".b,YOBrQY rjr@6]Dm+ks8BS/!<'M2GQ@bp]7IT'])d-Vb+&J3rO`%/rOW8Y!7UsmGQ@ZR!WRED]7L!5s+14: s8KV2s*4OTJ,~> rmh8Qf)MCms8:IJ!<(IMTE,"Ceq)D]ecDEDb-D$IrRUuKr72(F!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs8:.A!<(.DP5tW6c%4-KblO".b,YOBrQY?Br65G0!8RU rjr@6]Dm+ks89M/!<'M2GQ@bp]7IT'])d-Vb+&J3rO`(0r4 rmh8Qf)MCms81@I!<(IMTE,"Ceq)D]ecDEDb-D$IrRLrKqpktE!9!mMTE+o@!WShleq*jPs+14: s8LRMs.KABJ,~> rlkWHc2WuQs81%@!<(.DP5tW6c%4-KblO".b,YOBrQP/!8RU rjr@6]Dm+ks80D.!<'M2GQ@bp]7IT'])d-Vb+&J3rOW%0qn!&W!7UsmGQ@ZR!WRED]7L!5s+14: s8KV2s*4OTJ,~> rmh8Qf)MCms7Y"FTE,"Ceq)D]ecDEDb-D$Ip=9G@!9!mMTE+o@!WShleq*jPs+14:s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4-KblO".b,YOBp< rjr@6]Dm+ks7X&+GQ@bp]7IT'])d-Vb+&J3p:CNR!7UsmGQ@ZR!WRED]7L!5s+14:s8KV2s*4OT J,~> rmh8Qf)MCms7Y"FTE,"Ceq)D]ecDEDb-D$Ip=9G@!9!mMTE+o@!WShleq*jPs+14:s8LRMs.KAB J,~> rlkWHc2WuQs7X\=P5tW6c%4-KblO".b,YOBp< rjr@6]Dm+ks7X&+GQ@bp]7IT'])d-Vb+&J3p:CNR!7UsmGQ@ZR!WRED]7L!5s+14:s8KV2s*4OT J,~> rmh;Rf)MCms46n:!W`8OJY7Re\"E^$b-D'Jf(Pa?!9!mMTE+o@!WShleq*jPs+14:s8LRMs.KAB J,~> rlkZIc2WuQs3:8$!W`8FJWkYK\!$dlb,YRCc1[>)!8RU rjrC7]Dm+ks1@uL!W`84JTu`j[s.lQb+&M4]CpIQ!7UsmGQ@ZR!WRED]7L!5s+14:s8KV2s*4OT J,~> rmh8Qf)MCms7?9ks+(0$!4;gLZ2jps!!M6[TV);_ecGfDrmlZ#JcC<$m/R(9!WShlepm~> rlkWHc2WuQs7?9ks+(0$!4;gLWrW1l!!M*WPEQ"EblRj.rlp#oJcC<$m/R(0!WSA_c%#~> rjr@6]Dm+ks7?9ks+(0$!4;gLS,iT]!!LdNG^'5d])hqVrk!a]JcC<$m/R's!WRED]79~> rmh5Pf)MCmJ`)+;J`,JE"95$e!;tCKs.FqoJ_#D'k10S5T`3Mm~> rlkTGc2WuQJ_Yh3J_]2="94RK!;t(Bs-&#YJ^&bjk03r,PQ&gW~> rjr=5]Dm+kJ^]2!J^`Q+"93Uj!;sG0s*0+,J\-KFk.:ZoGlG=*~> rmh,Meq*jPs+14*s8S_l!;tCKs.FqoJ_#D'k10S5T`3Mm~> rlkKDc%5nGs+14*s8S8_!;t(Bs-&#YJ^&bjk03r,PQ&gW~> rjr42]7L!5s+14*s8R rmh,Meq'KFTRiciTE"uiecGfDJ_#D'J_&u7!WShlepm~> rlkKDc%2(0PC\qOP5kU\blRj.J^&bjJ^*?%!WSA_c%#~> rjr42]7G3XG_(+nGQ7aA])hqVJ\-KFJ\1'V!WRED]79~> rmh)LJY7ReJY:\h!!)rJJcC<$JcFd1!20>BJ,~> rlkHCJWkYKJWncN!!)rAJcC<$JcFd1!0dE,J,~> rjr11JTu`jJU#jm!!)r/JcC<$JcFd1!-nLTJ,~> rmlW#JH16$f)Ya"JY7ReJY;"qrmlT~> rlouoJH16$f)Y`nJWkYKJWo)Wrlor~> rk!^]JH16$f)Y`\JTu`jJU$1!rk![~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> J_#D'J_#D'J_#D'Xk&"~> J^&bjJ^&bjJ^&bjXj)@~> J\-KFJ\-KFJ\-KFXh0)~> JY7ReJY7Re!2+no`7BL4l_&b~> JWkYKJWkYK!0_uY`6Ejsl]Zi~> JTu`jJTu`j!-j(,`4LSFlZdp~> !2+lCJH16$JcGfNJ_%cj!2+nomFD:;J,~> !0_s6JH16$JcGfNJ^)-X!0_uYmEGY2J,~> !-j%pJH16$JcGfNJ\/k4!-j(,mCNAuJ,~> !MBFDeq)D'eq;NRs+/b>ec;ACf&Qeiec>`#~> !L!M7c%4,jc%F7@s+/G5blEs-c/\i`blIco~> !I+Tq]7ISF]7[]qs+.f#])[)U]ArqN])_k]~> !MBGOf)G`_ec5]$eq)D'f'Cles+/b>ec;ACec>a3ec5^0ec>`#~> !L!NBc2RdVbl@`pc%4,jc0NUSs+/G5blEs-blIe*bl@b'blIco~> !I+V']DhlD])Vh^]7ISF]Bd'/s+.f#])[)U])_lm])Vij])_k]~> !20/gk10M4][d&_iR[i*g=?6(J_#D']%-l]J_%cj!2-@C!<;34!MBGPec>`#~> !0d6Zk03l+]ZgEViQ_3!g !-n>?k.:Tn]Xn.DiOepdg:I=bJ\-KF]"7tBJ\/k4!-kNU!<;2n!I+V(])_k]~> "/#YF!<(IK!9_o5!1MI=!<(IK!8?!(!.i^Pf"0E4s+/b>ec;ACec>a4ecDEDimn,0J,~> "-W`0!<(.B!9_T,!1M.4!<(.B!8>Zt!.iCGc+;."s+/G5blEs-blIe+blO".ilqK'J,~> "*agX!<'M0!9^ro!1LM"!<'M0!8>$b!.hb5]=PTSs+.f#])[)U])_ln])d-Vik#3jJ,~> "/#YF!<(IK!;tFI!;tFH!;tFI!!CdPf%0g#s47/LmahCOp!s*CrRLrKrRUrJrRUuKs472Ms472M"kEYRec5^Lf)5UJf)#Gu eq)Dcec>`$f#6,>TW6QCs5ql5TE+!&!<7Q~> "-W`0!<(.B!;t+@!;t+?!;t+@!!CIGc-?4fs3:NCm`kb3!6tKCrQY?Bs3:QDrQY9@rltEBs3:NC rltEBrQY?B!6tKCr6>3@!mL]Fp!!I:rQP "*agX!<'M0!;sJ.!;sJ-!;sJ.!!Bh5]=\%Bs1A71m^rK!!5&41rO`(0s1A:2rO`".rk&.0s1A71 rk&.0rO`(0!5&41r4Dq.!kSF4ot(2(rOW%0rO`%/rO`(0s1A:2s1A:2"hOa7])Vj1]DVa/]DDSZ ]7IT-])_k^]>V;]GcJ@Us5pooGQ?a8!<7Q~> "/#YF!<(IK!<(IK!<1OL!<1OL!<(IK!<1OL!<1RL!<1OL!<(IK!:8;:!<1OL!<(IS!7h/$ec=:P !<(IK!<1OL!<(IK!;k=I!;tFJ!<1OL!<1OL!<1RL!;+hE!7o^$r71iJrmh&LrRMP\ec=:P!7o^$ !7h,Mec=:P!<(IK!<(IK!.i^Pf"9N5s7FkB!5$e^!;Y4F!:8;:!5d=b!<:UMTW6QCs5ql5TE+!& !<7Q~> "-W`0!<(.B!<(.B!<14C!<14C!<(.B!<14C!<17C!<14C!<(.B!:7u1!<14C!<(.J!6kMgblH#5 !<(.B!<14C!<(.B!;k"@!;t+A!<14C!<14C!<17C!;+M "*agX!<'M0!<'M0!<0S1!<0S1!<'M0!<0S1!<0V1!<0S1!<'M0!:7>t!<0S1!<'M8!4r6C])]IT !<'M0!<0S1!<'M0!;jA.!;sJ/!<0S1!<0S1!<0V1!;*l*!5#iCr4;q/rjr.1rOWXA])]IT!5#iC !4r42])]IT!<'M0!<'M0!.hb5]=Y]Ts7Eo'!5#iC!;X8+!:7>t!5cAG!<9Y2GcJ@Us5pooGQ?a8 !<7Q~> "/#YF!<1OL!<(IK!;tCJ!;b7K!7o^$r71iJrmh&LrRLrKm+).:r71iJrmq)Lr71iJqpkiLf%'j" ec5^Iec5^Jec5^KecPp'ec>X!!!)ZBr;cfI!!)rJ!!*#LrrDrI"p!6*!7o^$qUPWHrRLrKJ_#D' ][d/aT`=oiq:5NG][m)_qUPWHn(.I<"P*S(ec`#~> "-W`0!<14C!<(.B!;t(A!;aqB!6ragr653ArlkECrQP]ZpHVqTT!?n'1h3"O-qkblGW*!!)o@!0aG-!<;3+!L!NCblIco~> "*agX!<0S1!<'M0!;sG/!;a;0!5#iCr4;q/rjr.1rOW%0m(35tr4;q/rk&11r4;q/qmuq1]=S(A ])Vj.])Vj/])Vj0])r&F])_c@!!)Z'r;cf.!!)r/!!*#1rrDr."ou9I!5#iCqRZ_-rOW%0J\-KF ]Xn7FGlR[Aq7?V,]Y"1DqRZ_-n%8Q!"M4ZG])]^[!!)o.!-kNU!<;2n!I+V(])_k]~> !h]PEr;ccHq>gNGquHcJ!!)rJ!!)uK!s$p'!:/29!;tCJ!<1OL!;k=I!;k=I!<1OO!7o^$qUPWH r71iJrRM&Nf%'j!ec5^BecPp'ec>["q>gNG!!)lH"p!6*!7o^$rmpuIrRLrKJ_#D'^"*DfTV2>9 ec<#,!W^dOqpk`In(%I=rmq)Ls4./M_U\\eqpkb?Wn%1Mk10R+!92Q0s*t~> !ggN>quHcA!!)rA!!)uB!s$Tj!:.l0!;t(A!<14C!;k"@!;k"@!<14F!6ragqTT!? r653ArQPEEc-67dbl@b9bl[sjblI^eq>gN>!!)l?"ouom!6ragrlt?@rQP !dF^Wr;cc-q>gN,quHc/!!)r/!!)u0!s#sF!:.5s!;sG/!<0S1!;jA.!;jA.!<0S4!5#iCqRZ_- r4;q/rOW.3]=S(@])Vj'])r&F])_fAq>gN,!!)l-"ou9I!5#iCrk&(.rOW%0J\-KF]t4LKG^08# ])].K!W]h4qmuh.n%/Q"rk&11s1872_RfdJqmuiQWk/92k.:Y=!91Tjs*t~> "/#YF!<1OL!<(IK!;Y1G!<(IN!7o^$r71iJrRM&Nf%'ieec5^Jec5^Lec5^Iec5^Iec5^LecPp' ec>Qt!!)rJ!!)uK!s$p'!;tCJ!;+hB!;b7H!;P+F!;b7Q!7o^$f%'iPec>["!!)uK!!%T$J_%Qd #QLHif)O=P!;Y4F!<(LK!!(UL!;k@H!;k@I!7o^$!;k=I!:\P>!;tCN!7o^$f)>[If)5UIf)GaM f)GaJf)Ga.ec5^Iec;ACec>a4ecDEDimn,0J,~> "-W`0!<14C!<(.B!;Xk>!<(.E!6ragr653ArQPEEc-67Sbl@bAbl@bCbl@b@bl@b@bl@bCbl[sj blIUb!!)rA!!)uB!s$Tj!;t(A!;+M9!;aq?!;Oe=!;aqH!6ragc-675blI^e!!)uB!!%SpJ^(pR #QL!Oc2Z&5!;Xn=!<(1B!!(:C!;k%?!;k%@!7oBp!;k"@!:\55!;t(E!6ragc2I_@c2@Y@c2ReD c2ReAc2Re%bl@b@blEs-blIe+blO".ilqK'J,~> "*agX!<0S1!<'M0!;X5,!<'M3!5#iCr4;q/rOW.3]=S(/])Vj/])Vj1])Vj.])Vj.])Vj1])r&F ])_]>!!)r/!!)u0!s#sF!;sG/!;*l'!;a;-!;O/+!;a;6!5#iC]=S'T])_fA!!)u0!!%S^J\/Y. #QK$n]DoLT!;X8+!<'P0!!'Y1!;jD-!;jD.!7na^!;jA.!:[T#!;sG3!5#iC]D_g.]DVa.]Dhm2 ]Dhm/]Dhlh])Vj.])[)U])_ln])d-Vik#3jJ,~> "/#YF!<(IK!<(IK!<(IN!7o^$rRLrKrmh&Lrmq)Lr7:lJqUYZHo@EpArmh&LrRLrKqUPWHrRLrK r7:lJq:5NGr71iJrRLrKrmh&Lrmq)Lo[X!Bq:5NGrRLrKrmh&LqUPrQf%'iPec=:P!<(IK!<(IK !<1OL!.i^Pf#-&Es.H%9f)O=P!;G%E!;tFJ!<1OL!<(IK!<1OL!;tFJ!7o^$!;k=J!7q,Lo[X!B q:>QGrmh&Lrmh&LrRLrKrRM,Pec=:P!<(LK!8uH,!<1OLTW6QCs5ql5TE+!&!<7Q~> "-W`0!<(.B!<(.B!<(.E!6ragrQP6AqT]$?o?I:8rlkECrQP6Aq98m>r653ArQPrQPrlkECrlkECrQP "*agX!<'M0!<'M0!<'M3!5#iCrOW%0rjr.1rk&11r4Dt/qRcb-o=P#&rjr.1rOW%0qRZ_-rOW%0 r4Dt/q7?V,r4;q/rOW%0rjr.1rk&11oXb)'q7?V,rOW%0rjr.1qR[%6]=S'T])]IT!<'M0!<'M0 !<0S1!.hb5]>M5ds*/e#]DoLT!;F)*!;sJ/!<0S1!<'M0!<0S1!;sJ/!7na^!;jA/!5&41oXb)' q7HY,rjr.1rjr.1rOW%0rOW45])]IT!<'P0!8tKf!<0S1GcJ@Us5pooGQ?a8!<7Q~> !208jrRLrKrRUoIrRUoI!S.8Mf)>XNec5^Jec5^Hf)GaAec>d%rW)oJquHZGr;c`GrrDrIquHcJ rW)uLrW)rKrVuuM!;4qA!;b:F!<1RI!<1RK!!h'T!7h,Mf%0d"!S.8Lf)>Z#eq)Died;ALT[q]o s45a$pXTa4ecDEDimn,0J,~> !0d?]rQPr;c`>rrDr@quHcA rW)uCrW)rBrVuuD!;4V8!;at=!<17@!<17B!!gaK!6kKDc-?1e!R1WCc2I]oc%4-WbmFE6PKhAP s38dgpWW[rQPrQY?Bd`hjo c-;HPblIe+blO".ilqK'J,~> !-nGBrOW%0rO`".rO`".!P8@2]D_d3])Vj/])Vj-]Dhm&])_oDrW)o/quHZ,r;c`,rrDr.quHc/ rW)u1rW)r0rVuu2!;3u&!;a>+!<0V.!<0V0!!g+9!4r42]=\"A!P8@1]D_e]]7IT3]*\L^Ge:Tf s1>lCpU^D*r4;q/rOW%0rjr.1r4;q/d(98Xqn)k.rjr.1ot(2(q7?V,rOW%0q7?V,rO`(0d^oS] ]=W !MBGHec5^Hf)Ga@ec5]jec5]$eq)D,edDGMT[q]oT` !L!N;bl@b?c2Re7bl@aabl@`pc%4,obmOK7PKhAPPQ/nP!<(.B!;t(A!;t(A!<(.B!<17>!78sj !;k"@!<(.B!;4S:!;Xk>!<(.B!<(1?!<(.B!7B$n!6rcPWm(PDk03pj!926's*t~> !I+Uu])Vj-]Dhm%])ViO])Vh^]7ISK]*eR_Ge:TfGlPCf!<'M0!;sG/!;sG/!<'M0!<0V,!78=X !;jA.!<'M0!;3r(!;X5,!<'M0!<'P-!<'M0!7AC\!5#jfWk/92k.:Y=!91Tjs*t~> !MBGHec5^Hec5^?ec5]kec5]$eq)D,edMMNT[q]oTV2>9ec>["!!)rJ!!)rJ!!)uK!!*#L!!(0m !!)oI!!)uK!!)]C!!)iG!!)uK!!*#L!!)uK!!)uK!!(Et!s$p'TW6QCs5ql5TE+!&!<7Q~> !L!N;bl@b?bl@b6bl@abbl@`pc%4,obmXQ8PKhAPPEZ$kblI^e!!)rA!!)rA!!)uB!!*#C!!(0d !!)o@!!)uB!!)]:!!)i>!!)uB!!*#C!!)uB!!)uB!!(Ek!s$TjPH)k-s5qQ,P5sUe!<7Q~> !I+Uu])Vj-])Vj$])ViP])Vh^]7ISK]*nX`Ge:TfG^08#])_fA!!)r/!!)r/!!)u0!!*#1!!(0R !!)o.!!)u0!!)](!!)i,!!)u0!!*#1!!)u0!!)u0!!(EY!s#sFGcJ@Us5pooGQ?a8!<7Q~> !MBGJf)5U7f)5Tof)5T"eq)D,edVSOT[q]oTV0m9f%'j"ec5^Jec5^Jec5^Kec5^Kec5^Kec5^K f)Ga$ec5^Hec5^Lec5^Bec5^KecPp'ec>["!!*#L!!)uK!!)uK!!)fFrrD$/!!)uK!s$p'TW6QC s5ql5TE+!&!<7Q~> !L!N=c2@Y.c2@Xfc2@Wnc%4,obmaW9PKhAPPEX8kc-67ebl@bAbl@bAbl@bBbl@bBbl@bBbl@bB c2Rdpbl@b?bl@bCbl@b9bl@bBbl[sjblI^e!!*#C!!)uB!!)uB!!)f=rrD$&!!)uB!s$TjPH)k- s5qQ,P5sUe!<7Q~> !I+V"]DV`q]DV`T]DV_\]7ISK]+"^aGe:TfG^-k#]=S(A])Vj/])Vj/])Vj0])Vj0])Vj0])Vj0 ]Dhl^])Vj-])Vj1])Vj'])Vj0])r&F])_fA!!*#1!!)u0!!)u0!!)f+rrD#i!!)u0!s#sFGcJ@U s5pooGQ?a8!<7Q~> !MBFDeq)D'erA5fs.H%9f)MD9f%0ls!<1RI!<:XL!<:XL!<(LI!;tFJ!8,m#!;k@H!:n_>!<:XL !<:XL!<1RJ!!CdPf%0a!qptcIi7@i,rmh(BWn%1Mk10R+!92Q0s*t~> !L!M7c%4,jc&KsTs-&Ykc2Wukc-?:a!<17@!<:=C!<:=C!<(1@!;t+A!8,Qo!;k%?!:nD5!<:=C !<:=C!<17A!!CIGc-?.dqp#-@i6D3#rlkG,Wm(PDk03pj!926's*t~> !I+Tq]7ISF]8aE0s*/e#]Dm,#]=\+=!<0V.!<9\1!<9\1!<'P.!;sJ/!8+p]!;jD-!:mc#!<9\1 !<9\1!<0V/!!Bh5]=[t@qn)k.i4Jpfrjr/TWk/92k.:Y=!91Tjs*t~> !MBFDeq)D'erJ;hs.H%9f)MD9f%0m$J_%cj!2-@C!<;34!MBGPec>`#~> !L!M7c%4,jc&U$Vs-&Ykc2Wukc-?:gJ^)-X!0aG-!<;3+!L!NCblIco~> !I+Tq]7ISF]8jK2s*/e#]Dm,#]=\+CJ\/k4!-kNU!<;2n!I+V(])_k]~> !MBFDeq)D'erSAjs.H%9f)MD9f%0kos+/b>ec;ACec>a4ecDEDimn,0J,~> !L!M7c%4,jc&^*Xs-&Ykc2Wukc-?9Ps+/G5blEs-blIe+blO".ilqK'J,~> !I+Tq]7ISF]8sQ4s*/e#]Dm,#]=\)fs+.f#])[)U])_ln])d-Vik#3jJ,~> !MBFDeq)D'er\Gls.H%9f)MD9f%0koT`9So`7=p]Wn%1Mk10R+!92Q0s*t~> !L!M7c%4,jc&g0Zs-&Ykc2Wukc-?9PPQ-3Y`6A:GWm(PDk03pj!926's*t~> !I+Tq]7ISF]9'W6s*/e#]Dm,#]=\)fGlN?,`4H"oWk/92k.:Y=!91Tjs*t~> !MBFDeq)D'ereMns.H%9f)MD9f%0koT[s-of#6,>TW6QCs5ql5TE+!&!<7Q~> !L!M7c%4,jc&p6\s-&Ykc2Wukc-?9PPKj,Yc,@j,PH)k-s5qQ,P5sUe!<7Q~> !I+Tq]7ISF]90]8s*/e#]Dm,#]=\)fGe=!,]>V;]GcJ@Us5pooGQ?a8!<7Q~> !MBFDeq)D'ernSps.H%9f)MD9f%0koT[q]oJ_%cj!2-@C!WUa5TE"uOec>`#~> !L!M7c%4,jc'$<^s-&Ykc2Wukc-?9PPKhAPJ^)-X!0aG-!WUF,P5kUBblIco~> !I+Tq]7ISF]99c:s*/e#]Dm,#]=\)fGe:TfJ\/k4!-kNU!WTdoGQ7a'])_k]~> !MBFDeq)D'es"Yns.H%9f)MD9f%0koT`3LBs+/b>ec;ACec>a2!92Q0s*t~> !L!M7c%4,jc'-B\s-&Ykc2Wukc-?9PPQ&f,s+/G5blEs-blIe)!926's*t~> !I+Tq]7ISF]9Bi8s*/e#]Dm,#]=\)fGlG;Ts+.f#])[)U])_ll!91Tjs*t~> !MBFDeq)D'es+_os.H%9f)MD9f%0koT`*FAs+/b>ec;@of'V#gs*t~> !L!M7c%4,jc'6H]s-&Ykc2Wukc-?9PPPr`+s+/G5blErYc0`aUs*t~> !I+Tq]7ISF]9Ko9s*/e#]Dm,#]=\)fGl>5Ss+.f#])[),]C!31s*t~> !2+oCJcC<$JcC;P`7=p]JcG*:J,~> !0`!6JcC<$JcC;G`6A:GJcG*:J,~> !-j(pJcC<$JcC;5`4H"oJcG*:J,~> %%EndData showpage %%Trailer end %%EOF coq-8.4pl2/doc/refman/RefMan-ide.tex0000640000175000001440000003266211552015162016304 0ustar notinusers\chapter[\Coq{} Integrated Development Environment]{\Coq{} Integrated Development Environment\label{Addoc-coqide} \ttindex{coqide}} The \Coq{} Integrated Development Environment is a graphical tool, to be used as a user-friendly replacement to \texttt{coqtop}. Its main purpose is to allow the user to navigate forward and backward into a \Coq{} vernacular file, executing corresponding commands or undoing them respectively. % CREDITS ? Proof general, lablgtk, ... \CoqIDE{} is run by typing the command \verb|coqide| on the command line. Without argument, the main screen is displayed with an ``unnamed buffer'', and with a file name as argument, another buffer displaying the contents of that file. Additionally, \verb|coqide| accepts the same options as \verb|coqtop|, given in Chapter~\ref{Addoc-coqc}, the ones having obviously no meaning for \CoqIDE{} being ignored. Additionally, \verb|coqide| accepts the option \verb|-enable-geoproof| to enable the support for \emph{GeoProof} \footnote{\emph{GeoProof} is dynamic geometry software which can be used in conjunction with \CoqIDE{} to interactively build a Coq statement corresponding to a geometric figure. More information about \emph{GeoProof} can be found here: \url{http://home.gna.org/geoproof/} }. \begin{figure}[t] \begin{center} %HEVEA\imgsrc{coqide.png} %BEGIN LATEX \ifpdf % si on est en pdflatex \includegraphics[width=1.0\textwidth]{coqide.png} \else \includegraphics[width=1.0\textwidth]{coqide.eps} \fi %END LATEX \end{center} \caption{\CoqIDE{} main screen} \label{fig:coqide} \end{figure} A sample \CoqIDE{} main screen, while navigating into a file \verb|Fermat.v|, is shown on Figure~\ref{fig:coqide}. At the top is a menu bar, and a tool bar below it. The large window on the left is displaying the various \emph{script buffers}. The upper right window is the \emph{goal window}, where goals to prove are displayed. The lower right window is the \emph{message window}, where various messages resulting from commands are displayed. At the bottom is the status bar. \section{Managing files and buffers, basic edition} In the script window, you may open arbitrarily many buffers to edit. The \emph{File} menu allows you to open files or create some, save them, print or export them into various formats. Among all these buffers, there is always one which is the current \emph{running buffer}, whose name is displayed on a green background, which is the one where Coq commands are currently executed. Buffers may be edited as in any text editor, and classical basic editing commands (Copy/Paste, \ldots) are available in the \emph{Edit} menu. \CoqIDE{} offers only basic editing commands, so if you need more complex editing commands, you may launch your favorite text editor on the current buffer, using the \emph{Edit/External Editor} menu. \section{Interactive navigation into \Coq{} scripts} The running buffer is the one where navigation takes place. The toolbar proposes five basic commands for this. The first one, represented by a down arrow icon, is for going forward executing one command. If that command is successful, the part of the script that has been executed is displayed on a green background. If that command fails, the error message is displayed in the message window, and the location of the error is emphasized by a red underline. On Figure~\ref{fig:coqide}, the running buffer is \verb|Fermat.v|, all commands until the \verb|Theorem| have been already executed, and the user tried to go forward executing \verb|Induction n|. That command failed because no such tactic exist (tactics are now in lowercase\ldots), and the wrong word is underlined. Notice that the green part of the running buffer is not editable. If you ever want to modify something you have to go backward using the up arrow tool, or even better, put the cursor where you want to go back and use the \textsf{goto} button. Unlike with \verb|coqtop|, you should never use \verb|Undo| to go backward. Two additional tool buttons exist, one to go directly to the end and one to go back to the beginning. If you try to go to the end, or in general to run several commands using the \textsf{goto} button, the execution will stop whenever an error is found. If you ever try to execute a command which happens to run during a long time, and would like to abort it before its termination, you may use the interrupt button (the white cross on a red circle). Finally, notice that these navigation buttons are also available in the menu, where their keyboard shortcuts are given. \section[Try tactics automatically]{Try tactics automatically\label{sec:trytactics}} The menu \texttt{Try Tactics} provides some features for automatically trying to solve the current goal using simple tactics. If such a tactic succeeds in solving the goal, then its text is automatically inserted into the script. There is finally a combination of these tactics, called the \emph{proof wizard} which will try each of them in turn. This wizard is also available as a tool button (the light bulb). The set of tactics tried by the wizard is customizable in the preferences. These tactics are general ones, in particular they do not refer to particular hypotheses. You may also try specific tactics related to the goal or one of the hypotheses, by clicking with the right mouse button on the goal or the considered hypothesis. This is the ``contextual menu on goals'' feature, that may be disabled in the preferences if undesirable. \section{Proof folding} As your script grows bigger and bigger, it might be useful to hide the proofs of your theorems and lemmas. This feature is toggled via the \texttt{Hide} entry of the \texttt{Navigation} menu. The proof shall be enclosed between \texttt{Proof.} and \texttt{Qed.}, both with their final dots. The proof that shall be hidden or revealed is the first one whose beginning statement (such as \texttt{Theorem}) precedes the insertion cursor. \section{Vernacular commands, templates} The \texttt{Templates} menu allows to use shortcuts to insert vernacular commands. This is a nice way to proceed if you are not sure of the spelling of the command you want. Moreover, this menu offers some \emph{templates} which will automatic insert a complex command like Fixpoint with a convenient shape for its arguments. \section{Queries} \begin{figure}[t] \begin{center} %HEVEA\imgsrc{coqide-queries.png} %BEGIN LATEX \ifpdf % si on est en pdflatex \includegraphics[width=1.0\textwidth]{coqide-queries.png} \else \includegraphics[width=1.0\textwidth]{coqide-queries.eps} \fi %END LATEX \end{center} \caption{\CoqIDE{}: the query window} \label{fig:querywindow} \end{figure} We call \emph{query} any vernacular command that do not change the current state, such as \verb|Check|, \verb|SearchAbout|, etc. Those commands are of course useless during compilation of a file, hence should not be included in scripts. To run such commands without writing them in the script, \CoqIDE{} offers another input window called the \emph{query window}. This window can be displayed on demand, either by using the \texttt{Window} menu, or directly using shortcuts given in the \texttt{Queries} menu. Indeed, with \CoqIDE{} the simplest way to perform a \texttt{SearchAbout} on some identifier is to select it using the mouse, and pressing \verb|F2|. This will both make appear the query window and run the \texttt{SearchAbout} in it, displaying the result. Shortcuts \verb|F3| and \verb|F4| are for \verb|Check| and \verb|Print| respectively. Figure~\ref{fig:querywindow} displays the query window after selection of the word ``mult'' in the script windows, and pressing \verb|F4| to print its definition. \section{Compilation} The \verb|Compile| menu offers direct commands to: \begin{itemize} \item compile the current buffer \item run a compilation using \verb|make| \item go to the last compilation error \item create a \verb|makefile| using \verb|coq_makefile|. \end{itemize} \section{Customizations} You may customize your environment using menu \texttt{Edit/Preferences}. A new window will be displayed, with several customization sections presented as a notebook. The first section is for selecting the text font used for scripts, goal and message windows. The second section is devoted to file management: you may configure automatic saving of files, by periodically saving the contents into files named \verb|#f#| for each opened file \verb|f|. You may also activate the \emph{revert} feature: in case a opened file is modified on the disk by a third party, \CoqIDE{} may read it again for you. Note that in the case you edited that same file, you will be prompt to choose to either discard your changes or not. The \texttt{File charset encoding} choice is described below in Section~\ref{sec:coqidecharencoding} The \verb|Externals| section allows to customize the external commands for compilation, printing, web browsing. In the browser command, you may use \verb|%s| to denote the URL to open, for example: % \verb|mozilla -remote "OpenURL(%s)"|. The \verb|Tactics Wizard| section allows to defined the set of tactics that should be tried, in sequence, to solve the current goal. The last section is for miscellaneous boolean settings, such as the ``contextual menu on goals'' feature presented in Section~\ref{sec:trytactics}. Notice that these settings are saved in the file \verb|.coqiderc| of your home directory. A gtk2 accelerator keymap is saved under the name \verb|.coqide.keys|. This file should not be edited manually: to modify a given menu shortcut, go to the corresponding menu item without releasing the mouse button, press the key you want for the new shortcut, and release the mouse button afterwards. For experts: it is also possible to set up a specific gtk resource file, under the name \verb|.coqide-gtk2rc|, following the gtk2 resources syntax \url{http://developer.gnome.org/doc/API/2.0/gtk/gtk-Resource-Files.html}. Such a default resource file can be found in the subdirectory \verb=lib/coq/ide= of the root installation directory of \Coq{} (alternatively, it can be found in the subdirectory \verb=ide= of the source archive of \Coq{}). You may copy this file into your home directory, and edit it using any text editor, \CoqIDE{} itself for example. \section{Using unicode symbols} \CoqIDE{} supports unicode character encoding in its text windows, consequently a large set of symbols is available for notations. \subsection{Displaying unicode symbols} You just need to define suitable notations as described in Chapter~\ref{Addoc-syntax}. For example, to use the mathematical symbols $\forall$ and $\exists$, you may define \begin{quote}\tt Notation "$\forall$ x : t, P" := \\ \qquad (forall x:t, P) (at level 200, x ident).\\ Notation "$\exists$ x : t, P" := \\ \qquad (exists x:t, P) (at level 200, x ident). \end{quote} There exists a small set of such notations already defined, in the file \verb|utf8.v| of \Coq{} library, so you may enable them just by \verb|Require utf8| inside \CoqIDE{}, or equivalently, by starting \CoqIDE{} with \verb|coqide -l utf8|. However, there are some issues when using such unicode symbols: you of course need to use a character font which supports them. In the Fonts section of the preferences, the Preview line displays some unicode symbols, so you could figure out if the selected font is OK. Related to this, one thing you may need to do is choose whether Gtk should use antialiased fonts or not, by setting the environment variable \verb|GDK_USE_XFT| to 1 or 0 respectively. \subsection{Defining an input method for non ASCII symbols} To input an Unicode symbol, a general method is to press both the CONTROL and the SHIFT keys, and type the hexadecimal code of the symbol required, for example \verb|2200| for the $\forall$ symbol. A list of symbol codes is available at \url{http://www.unicode.org}. This method obviously doesn't scale, that's why the preferred alternative is to use an Input Method Editor. On POSIX systems (Linux distros, BSD variants and MacOS X), you can use \texttt{uim} version 1.6 or later which provides a \LaTeX{}-style input method. To configure \texttt{uim}, execute \texttt{uim-pref-gtk} as your regular user. In the "Global Settings" group set the default Input Method to "ELatin" (don't forget to tick the checkbox "Specify default IM"). In the "ELatin" group set the layout to "TeX", and remember the content of the "[ELatin] on" field (by default "\textbackslash"). You can now execute CoqIDE with the following commands (assuming you use a Bourne-style shell): \begin{verbatim} $ export GTK_IM_MODULE=uim $ coqide \end{verbatim} Activate the ELatin Input Method with Ctrl-\textbackslash, then type the sequence "\verb=\Gamma=". You will see the sequence being replaced by $\Gamma$ as soon as you type the second "a". \subsection[Character encoding for saved files]{Character encoding for saved files\label{sec:coqidecharencoding}} In the \texttt{Files} section of the preferences, the encoding option is related to the way files are saved. If you have no need to exchange files with non UTF-8 aware applications, it is better to choose the UTF-8 encoding, since it guarantees that your files will be read again without problems. (This is because when \CoqIDE{} reads a file, it tries to automatically detect its character encoding.) If you choose something else than UTF-8, then missing characters will be written encoded by \verb|\x{....}| or \verb|\x{........}| where each dot is an hexadecimal digit: the number between braces is the hexadecimal UNICODE index for the missing character. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/refman/Program.tex0000640000175000001440000003115711776416511016015 0ustar notinusers\achapter{\Program{}} \label{Program} \aauthor{Matthieu Sozeau} \index{Program} We present here the \Program\ tactic commands, used to build certified \Coq\ programs, elaborating them from their algorithmic skeleton and a rich specification \cite{Sozeau06}. It can be sought of as a dual of extraction (see Chapter~\ref{Extraction}). The goal of \Program~is to program as in a regular functional programming language whilst using as rich a specification as desired and proving that the code meets the specification using the whole \Coq{} proof apparatus. This is done using a technique originating from the ``Predicate subtyping'' mechanism of \PVS \cite{Rushby98}, which generates type-checking conditions while typing a term constrained to a particular type. Here we insert existential variables in the term, which must be filled with proofs to get a complete \Coq\ term. \Program\ replaces the \Program\ tactic by Catherine Parent \cite{Parent95b} which had a similar goal but is no longer maintained. The languages available as input are currently restricted to \Coq's term language, but may be extended to \ocaml{}, \textsc{Haskell} and others in the future. We use the same syntax as \Coq\ and permit to use implicit arguments and the existing coercion mechanism. Input terms and types are typed in an extended system (\Russell) and interpreted into \Coq\ terms. The interpretation process may produce some proof obligations which need to be resolved to create the final term. \asection{Elaborating programs} The main difference from \Coq\ is that an object in a type $T : \Set$ can be considered as an object of type $\{ x : T~|~P\}$ for any wellformed $P : \Prop$. If we go from $T$ to the subset of $T$ verifying property $P$, we must prove that the object under consideration verifies it. \Russell\ will generate an obligation for every such coercion. In the other direction, \Russell\ will automatically insert a projection. Another distinction is the treatment of pattern-matching. Apart from the following differences, it is equivalent to the standard {\tt match} operation (see Section~\ref{Caseexpr}). \begin{itemize} \item Generation of equalities. A {\tt match} expression is always generalized by the corresponding equality. As an example, the expression: \begin{coq_example*} match x with | 0 => t | S n => u end. \end{coq_example*} will be first rewrote to: \begin{coq_example*} (match x as y return (x = y -> _) with | 0 => fun H : x = 0 -> t | S n => fun H : x = S n -> u end) (eq_refl n). \end{coq_example*} This permits to get the proper equalities in the context of proof obligations inside clauses, without which reasoning is very limited. \item Generation of inequalities. If a pattern intersects with a previous one, an inequality is added in the context of the second branch. See for example the definition of {\tt div2} below, where the second branch is typed in a context where $\forall p, \_ <> S (S p)$. \item Coercion. If the object being matched is coercible to an inductive type, the corresponding coercion will be automatically inserted. This also works with the previous mechanism. \end{itemize} \subsection{Syntactic control over equalities} \label{ProgramSyntax} To give more control over the generation of equalities, the typechecker will fall back directly to \Coq's usual typing of dependent pattern-matching if a {\tt return} or {\tt in} clause is specified. Likewise, the {\tt if} construct is not treated specially by \Program{} so boolean tests in the code are not automatically reflected in the obligations. One can use the {\tt dec} combinator to get the correct hypotheses as in: \begin{coq_eval} Require Import Program Arith. \end{coq_eval} \begin{coq_example} Program Definition id (n : nat) : { x : nat | x = n } := if dec (leb n 0) then 0 else S (pred n). \end{coq_example} The let tupling construct {\tt let (x1, ..., xn) := t in b} does not produce an equality, contrary to the let pattern construct {\tt let '(x1, ..., xn) := t in b}. Also, {\tt {\term}:>} explicitly asks the system to coerce {\tt \term} to its support type. It can be useful in notations, for example: \begin{coq_example} Notation " x `= y " := (@eq _ (x :>) (y :>)) (only parsing). \end{coq_example} This notation denotes equality on subset types using equality on their support types, avoiding uses of proof-irrelevance that would come up when reasoning with equality on the subset types themselves. The next two commands are similar to their standard counterparts Definition (see Section~\ref{Basic-definitions}) and Fixpoint (see Section~\ref{Fixpoint}) in that they define constants. However, they may require the user to prove some goals to construct the final definitions. \subsection{\tt Program Definition {\ident} := {\term}. \comindex{Program Definition}\label{ProgramDefinition}} This command types the value {\term} in \Russell\ and generate proof obligations. Once solved using the commands shown below, it binds the final \Coq\ term to the name {\ident} in the environment. \begin{ErrMsgs} \item \errindex{{\ident} already exists} \end{ErrMsgs} \begin{Variants} \item {\tt Program Definition {\ident} {\tt :}{\term$_1$} := {\term$_2$}.}\\ It interprets the type {\term$_1$}, potentially generating proof obligations to be resolved. Once done with them, we have a \Coq\ type {\term$_1'$}. It then checks that the type of the interpretation of {\term$_2$} is coercible to {\term$_1'$}, and registers {\ident} as being of type {\term$_1'$} once the set of obligations generated during the interpretation of {\term$_2$} and the aforementioned coercion derivation are solved. \item {\tt Program Definition {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt :}\term$_1$ {\tt :=} {\term$_2$}.}\\ This is equivalent to \\ {\tt Program Definition\,{\ident}\,{\tt :\,forall} % {\binder$_1$}\ldots{\binder$_n$}{\tt ,}\,\term$_1$\,{\tt :=}} \\ \qquad {\tt fun}\,{\binder$_1$}\ldots{\binder$_n$}\,{\tt =>}\,{\term$_2$}\,% {\tt .} \end{Variants} \begin{ErrMsgs} \item \errindex{In environment {\dots} the term: {\term$_2$} does not have type {\term$_1$}}.\\ \texttt{Actually, it has type {\term$_3$}}. \end{ErrMsgs} \SeeAlso Sections \ref{Opaque}, \ref{Transparent}, \ref{unfold} \subsection{\tt Program Fixpoint {\ident} {\params} {\tt \{order\}} : type := \term \comindex{Program Fixpoint} \label{ProgramFixpoint}} The structural fixpoint operator behaves just like the one of Coq (see Section~\ref{Fixpoint}), except it may also generate obligations. It works with mutually recursive definitions too. \begin{coq_eval} Admit Obligations. \end{coq_eval} \begin{coq_example} Program Fixpoint div2 (n : nat) : { x : nat | n = 2 * x \/ n = 2 * x + 1 } := match n with | S (S p) => S (div2 p) | _ => O end. \end{coq_example} Here we have one obligation for each branch (branches for \verb:0: and \verb:(S 0): are automatically generated by the pattern-matching compilation algorithm). \begin{coq_example} Obligation 1. \end{coq_example} One can use a well-founded order or a measure as termination orders using the syntax: \begin{coq_eval} Reset Initial. Require Import Arith. Require Import Program. \end{coq_eval} \begin{coq_example*} Program Fixpoint div2 (n : nat) {measure n} : { x : nat | n = 2 * x \/ n = 2 * x + 1 } := match n with | S (S p) => S (div2 p) | _ => O end. \end{coq_example*} The order annotation can be either: \begin{itemize} \item {\tt measure f (R)?} where {\tt f} is a value of type {\tt X} computed on any subset of the arguments and the optional (parenthesised) term {\tt (R)} is a relation on {\tt X}. By default {\tt X} defaults to {\tt nat} and {\tt R} to {\tt lt}. \item {\tt wf R x} which is equivalent to {\tt measure x (R)}. \end{itemize} \paragraph{Caution} When defining structurally recursive functions, the generated obligations should have the prototype of the currently defined functional in their context. In this case, the obligations should be transparent (e.g. defined using {\tt Defined}) so that the guardedness condition on recursive calls can be checked by the kernel's type-checker. There is an optimization in the generation of obligations which gets rid of the hypothesis corresponding to the functionnal when it is not necessary, so that the obligation can be declared opaque (e.g. using {\tt Qed}). However, as soon as it appears in the context, the proof of the obligation is \emph{required} to be declared transparent. No such problems arise when using measures or well-founded recursion. \subsection{\tt Program Lemma {\ident} : type. \comindex{Program Lemma} \label{ProgramLemma}} The \Russell\ language can also be used to type statements of logical properties. It will generate obligations, try to solve them automatically and fail if some unsolved obligations remain. In this case, one can first define the lemma's statement using {\tt Program Definition} and use it as the goal afterwards. Otherwise the proof will be started with the elobarted version as a goal. The {\tt Program} prefix can similarly be used as a prefix for {\tt Variable}, {\tt Hypothesis}, {\tt Axiom} etc... \section{Solving obligations} The following commands are available to manipulate obligations. The optional identifier is used when multiple functions have unsolved obligations (e.g. when defining mutually recursive blocks). The optional tactic is replaced by the default one if not specified. \begin{itemize} \item {\tt [Local|Global] Obligation Tactic := \tacexpr}\comindex{Obligation Tactic} Sets the default obligation solving tactic applied to all obligations automatically, whether to solve them or when starting to prove one, e.g. using {\tt Next}. Local makes the setting last only for the current module. Inside sections, local is the default. \item {\tt Show Obligation Tactic}\comindex{Show Obligation Tactic} Displays the current default tactic. \item {\tt Obligations [of \ident]}\comindex{Obligations} Displays all remaining obligations. \item {\tt Obligation num [of \ident]}\comindex{Obligation} Start the proof of obligation {\tt num}. \item {\tt Next Obligation [of \ident]}\comindex{Next Obligation} Start the proof of the next unsolved obligation. \item {\tt Solve Obligations [of \ident] [using \tacexpr]}\comindex{Solve Obligations} Tries to solve each obligation of \ident using the given tactic or the default one. \item {\tt Solve All Obligations [using \tacexpr]} Tries to solve each obligation of every program using the given tactic or the default one (useful for mutually recursive definitions). \item {\tt Admit Obligations [of \ident]}\comindex{Admit Obligations} Admits all obligations (does not work with structurally recursive programs). \item {\tt Preterm [of \ident]}\comindex{Preterm} Shows the term that will be fed to the kernel once the obligations are solved. Useful for debugging. \item {\tt Set Transparent Obligations}\comindex{Set Transparent Obligations} Control whether all obligations should be declared as transparent (the default), or if the system should infer which obligations can be declared opaque. \end{itemize} The module {\tt Coq.Program.Tactics} defines the default tactic for solving obligations called {\tt program\_simpl}. Importing {\tt Coq.Program.Program} also adds some useful notations, as documented in the file itself. \section{Frequently Asked Questions \label{ProgramFAQ}} \begin{itemize} \item {Ill-formed recursive definitions} This error can happen when one tries to define a function by structural recursion on a subset object, which means the Coq function looks like: \verb$Program Fixpoint f (x : A | P) := match x with A b => f b end.$ Supposing $b : A$, the argument at the recursive call to f is not a direct subterm of x as b is wrapped inside an {\tt exist} constructor to build an object of type \verb${x : A | P}$. Hence the definition is rejected by the guardedness condition checker. However one can use wellfounded recursion on subset objects like this: \begin{verbatim} Program Fixpoint f (x : A | P) { measure (size x) } := match x with A b => f b end. \end{verbatim} One will then just have to prove that the measure decreases at each recursive call. There are three drawbacks though: \begin{enumerate} \item A measure function has to be defined; \item The reduction is a little more involved, although it works well using lazy evaluation; \item Mutual recursion on the underlying inductive type isn't possible anymore, but nested mutual recursion is always possible. \end{enumerate} \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% compile-command: "BIBINPUTS=\".\" make QUICK=1 -C ../.. doc/refman/Reference-Manual.pdf" %%% End: coq-8.4pl2/doc/refman/coqdoc.tex0000640000175000001440000004263411676124706015661 0ustar notinusers %\newcommand{\Coq}{\textsf{Coq}} \newcommand{\javadoc}{\textsf{javadoc}} \newcommand{\ocamldoc}{\textsf{ocamldoc}} \newcommand{\coqdoc}{\textsf{coqdoc}} \newcommand{\texmacs}{\TeX{}macs} \newcommand{\monurl}[1]{#1} %HEVEA\renewcommand{\monurl}[1]{\ahref{#1}{#1}} %\newcommand{\lnot}{not} % Hevea handles these symbols nicely %\newcommand{\lor}{or} %\newcommand{\land}{\&} %%% attention : -- dans un argument de \texttt est affich comme un %%% seul - d'o l'utilisation de la macro suivante \newcommand{\mm}{\symbol{45}\symbol{45}} \coqdoc\ is a documentation tool for the proof assistant \Coq, similar to \javadoc\ or \ocamldoc. The task of \coqdoc\ is \begin{enumerate} \item to produce a nice \LaTeX\ and/or HTML document from the \Coq\ sources, readable for a human and not only for the proof assistant; \item to help the user navigating in his own (or third-party) sources. \end{enumerate} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Principles} Documentation is inserted into \Coq\ files as \emph{special comments}. Thus your files will compile as usual, whether you use \coqdoc\ or not. \coqdoc\ presupposes that the given \Coq\ files are well-formed (at least lexically). Documentation starts with \texttt{(**}, followed by a space, and ends with the pending \texttt{*)}. The documentation format is inspired by Todd~A.~Coram's \emph{Almost Free Text (AFT)} tool: it is mainly ASCII text with some syntax-light controls, described below. \coqdoc\ is robust: it shouldn't fail, whatever the input is. But remember: ``garbage in, garbage out''. \paragraph{\Coq\ material inside documentation.} \Coq\ material is quoted between the delimiters \texttt{[} and \texttt{]}. Square brackets may be nested, the inner ones being understood as being part of the quoted code (thus you can quote a term like \texttt{fun x => u} by writing \texttt{[fun x => u]}). Inside quotations, the code is pretty-printed in the same way as it is in code parts. Pre-formatted vernacular is enclosed by \texttt{[[} and \texttt{]]}. The former must be followed by a newline and the latter must follow a newline. \paragraph{Pretty-printing.} \coqdoc\ uses different faces for identifiers and keywords. The pretty-printing of \Coq\ tokens (identifiers or symbols) can be controlled using one of the following commands: \begin{alltt} (** printing \emph{token} %...\LaTeX...% #...HTML...# *) \end{alltt} or \begin{alltt} (** printing \emph{token} $...\LaTeX\ math...$ #...HTML...# *) \end{alltt} It gives the \LaTeX\ and HTML texts to be produced for the given \Coq\ token. One of the \LaTeX\ or HTML text may be omitted, causing the default pretty-printing to be used for this token. The printing for one token can be removed with \begin{alltt} (** remove printing \emph{token} *) \end{alltt} Initially, the pretty-printing table contains the following mapping: \begin{center} \begin{tabular}{ll@{\qquad\qquad}ll@{\qquad\qquad}ll@{\qquad\qquad}} \verb!->! & $\rightarrow$ & \verb!<-! & $\leftarrow$ & \verb|*| & $\times$ \\ \verb|<=| & $\le$ & \verb|>=| & $\ge$ & \verb|=>| & $\Rightarrow$ \\ \verb|<>| & $\not=$ & \verb|<->| & $\leftrightarrow$ & \verb!|-! & $\vdash$ \\ \verb|\/| & $\lor$ & \verb|/\| & $\land$ & \verb|~| & $\lnot$ \end{tabular} \end{center} Any of these can be overwritten or suppressed using the \texttt{printing} commands. Important note: the recognition of tokens is done by a (ocaml)lex automaton and thus applies the longest-match rule. For instance, \verb!->~! is recognized as a single token, where \Coq\ sees two tokens. It is the responsibility of the user to insert space between tokens \emph{or} to give pretty-printing rules for the possible combinations, e.g. \begin{verbatim} (** printing ->~ %\ensuremath{\rightarrow\lnot}% *) \end{verbatim} \paragraph{Sections.} Sections are introduced by 1 to 4 leading stars (i.e. at the beginning of the line) followed by a space. One star is a section, two stars a sub-section, etc. The section title is given on the remaining of the line. Example: \begin{verbatim} (** * Well-founded relations In this section, we introduce... *) \end{verbatim} %TODO \paragraph{Fonts.} \paragraph{Lists.} List items are introduced by a leading dash. \coqdoc\ uses whitespace to determine the depth of a new list item and which text belongs in which list items. A list ends when a line of text starts at or before the level of indenting of the list's dash. A list item's dash must always be the first non-space character on its line (so, in particular, a list can not begin on the first line of a comment - start it on the second line instead). Example: \begin{verbatim} We go by induction on [n]: - If [n] is 0... - If [n] is [S n'] we require... two paragraphs of reasoning, and two subcases: - In the first case... - In the second case... So the theorem holds. \end{verbatim} \paragraph{Rules.} More than 4 leading dashes produce an horizontal rule. \paragraph{Emphasis.} Text can be italicized by placing it in underscores. A non-identifier character must precede the leading underscore and follow the trailing underscore, so that uses of underscores in names aren't mistaken for emphasis. Usually, these are spaces or punctuation. \begin{verbatim} This sentence contains some _emphasized text_. \end{verbatim} \paragraph{Escaping to \LaTeX\ and HTML.} Pure \LaTeX\ or HTML material can be inserted using the following escape sequences: \begin{itemize} \item \verb+$...LaTeX stuff...$+ inserts some \LaTeX\ material in math mode. Simply discarded in HTML output. \item \verb+%...LaTeX stuff...%+ inserts some \LaTeX\ material. Simply discarded in HTML output. \item \verb+#...HTML stuff...#+ inserts some HTML material. Simply discarded in \LaTeX\ output. \end{itemize} Note: to simply output the characters \verb+$+, \verb+%+ and \verb+#+ and escaping their escaping role, these characters must be doubled. \paragraph{Verbatim.} Verbatim material is introduced by a leading \verb+<<+ and closed by \verb+>>+ at the beginning of a line. Example: \begin{verbatim} Here is the corresponding caml code: << let rec fact n = if n <= 1 then 1 else n * fact (n-1) >> \end{verbatim} \paragraph{Hyperlinks.} Hyperlinks can be inserted into the HTML output, so that any identifier is linked to the place of its definition. \texttt{coqc \emph{file}.v} automatically dumps localization information in \texttt{\emph{file}.glob} or appends it to a file specified using option \texttt{\mm{}dump-glob \emph{file}}. Take care of erasing this global file, if any, when starting the whole compilation process. Then invoke \texttt{coqdoc} or \texttt{coqdoc \mm{}glob-from \emph{file}} to tell \coqdoc\ to look for name resolutions into the file \texttt{\emph{file}} (it will look in \texttt{\emph{file}.glob} by default). Identifiers from the \Coq\ standard library are linked to the \Coq\ web site at \url{http://coq.inria.fr/library/}. This behavior can be changed using command line options \url{--no-externals} and \url{--coqlib}; see below. \paragraph{Hiding / Showing parts of the source.} Some parts of the source can be hidden using command line options \texttt{-g} and \texttt{-l} (see below), or using such comments: \begin{alltt} (* begin hide *) \emph{some Coq material} (* end hide *) \end{alltt} Conversely, some parts of the source which would be hidden can be shown using such comments: \begin{alltt} (* begin show *) \emph{some Coq material} (* end show *) \end{alltt} The latter cannot be used around some inner parts of a proof, but can be used around a whole proof. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Usage} \coqdoc\ is invoked on a shell command line as follows: \begin{displaymath} \texttt{coqdoc }<\textit{options and files}> \end{displaymath} Any command line argument which is not an option is considered to be a file (even if it starts with a \verb!-!). \Coq\ files are identified by the suffixes \verb!.v! and \verb!.g! and \LaTeX\ files by the suffix \verb!.tex!. \begin{description} \item[HTML output] ~\par This is the default output. One HTML file is created for each \Coq\ file given on the command line, together with a file \texttt{index.html} (unless option \texttt{-no-index} is passed). The HTML pages use a style sheet named \texttt{style.css}. Such a file is distributed with \coqdoc. \item[\LaTeX\ output] ~\par A single \LaTeX\ file is created, on standard output. It can be redirected to a file with option \texttt{-o}. The order of files on the command line is kept in the final document. \LaTeX\ files given on the command line are copied `as is' in the final document . DVI and PostScript can be produced directly with the options \texttt{-dvi} and \texttt{-ps} respectively. \item[\texmacs\ output] ~\par To translate the input files to \texmacs\ format, to be used by the \texmacs\ Coq interface (see \url{http://www-sop.inria.fr/lemme/Philippe.Audebaud/tmcoq/}). \end{description} \subsubsection*{Command line options} \paragraph{Overall options} \begin{description} \item[\texttt{\mm{}html}] ~\par Select a HTML output. \item[\texttt{\mm{}latex}] ~\par Select a \LaTeX\ output. \item[\texttt{\mm{}dvi}] ~\par Select a DVI output. \item[\texttt{\mm{}ps}] ~\par Select a PostScript output. \item[\texttt{\mm{}texmacs}] ~\par Select a \texmacs\ output. \item[\texttt{--stdout}] ~\par Write output to stdout. \item[\texttt{-o }\textit{file}, \texttt{\mm{}output }\textit{file}] ~\par Redirect the output into the file `\textit{file}' (meaningless with \texttt{-html}). \item[\texttt{-d }\textit{dir}, \texttt{\mm{}directory }\textit{dir}] ~\par Output files into directory `\textit{dir}' instead of current directory (option \texttt{-d} does not change the filename specified with option \texttt{-o}, if any). \item[\texttt{\mm{}body-only}] ~\par Suppress the header and trailer of the final document. Thus, you can insert the resulting document into a larger one. \item[\texttt{-p} \textit{string}, \texttt{\mm{}preamble} \textit{string}]~\par Insert some material in the \LaTeX\ preamble, right before \verb!\begin{document}! (meaningless with \texttt{-html}). \item[\texttt{\mm{}vernac-file }\textit{file}, \texttt{\mm{}tex-file }\textit{file}] ~\par Considers the file `\textit{file}' respectively as a \verb!.v! (or \verb!.g!) file or a \verb!.tex! file. \item[\texttt{\mm{}files-from }\textit{file}] ~\par Read file names to process in file `\textit{file}' as if they were given on the command line. Useful for program sources split up into several directories. \item[\texttt{-q}, \texttt{\mm{}quiet}] ~\par Be quiet. Do not print anything except errors. \item[\texttt{-h}, \texttt{\mm{}help}] ~\par Give a short summary of the options and exit. \item[\texttt{-v}, \texttt{\mm{}version}] ~\par Print the version and exit. \end{description} \paragraph{Index options} Default behavior is to build an index, for the HTML output only, into \texttt{index.html}. \begin{description} \item[\texttt{\mm{}no-index}] ~\par Do not output the index. \item[\texttt{\mm{}multi-index}] ~\par Generate one page for each category and each letter in the index, together with a top page \texttt{index.html}. \item[\texttt{\mm{}index }\textit{string}] ~\par Make the filename of the index \textit{string} instead of ``index''. Useful since ``index.html'' is special. \end{description} \paragraph{Table of contents option} \begin{description} \item[\texttt{-toc}, \texttt{\mm{}table-of-contents}] ~\par Insert a table of contents. For a \LaTeX\ output, it inserts a \verb!\tableofcontents! at the beginning of the document. For a HTML output, it builds a table of contents into \texttt{toc.html}. \item[\texttt{\mm{}toc-depth }\textit{int}] ~\par Only include headers up to depth \textit{int} in the table of contents. \end{description} \paragraph{Hyperlinks options} \begin{description} \item[\texttt{\mm{}glob-from }\textit{file}] ~\par Make references using \Coq\ globalizations from file \textit{file}. (Such globalizations are obtained with \Coq\ option \texttt{-dump-glob}). \item[\texttt{\mm{}no-externals}] ~\par Do not insert links to the \Coq\ standard library. \item[\texttt{\mm{}external }\textit{url}~\textit{coqdir}] ~\par Use given URL for linking references whose name starts with prefix \textit{coqdir}. \item[\texttt{\mm{}coqlib }\textit{url}] ~\par Set base URL for the \Coq\ standard library (default is \url{http://coq.inria.fr/library/}). This is equivalent to \texttt{\mm{}external }\textit{url}~\texttt{Coq}. \item[\texttt{-R }\textit{dir }\textit{coqdir}] ~\par Map physical directory \textit{dir} to \Coq\ logical directory \textit{coqdir} (similarly to \Coq\ option \texttt{-R}). Note: option \texttt{-R} only has effect on the files \emph{following} it on the command line, so you will probably need to put this option first. \end{description} \paragraph{Title options} \begin{description} \item[\texttt{-s }, \texttt{\mm{}short}] ~\par Do not insert titles for the files. The default behavior is to insert a title like ``Library Foo'' for each file. \item[\texttt{\mm{}lib-name }\textit{string}] ~\par Print ``\textit{string} Foo'' instead of ``Library Foo'' in titles. For example ``Chapter'' and ``Module'' are reasonable choices. \item[\texttt{\mm{}no-lib-name}] ~\par Print just ``Foo'' instead of ``Library Foo'' in titles. \item[\texttt{\mm{}lib-subtitles}] ~\par Look for library subtitles. When enabled, the beginning of each file is checked for a comment of the form: \begin{alltt} (** * ModuleName : text *) \end{alltt} where \texttt{ModuleName} must be the name of the file. If it is present, the \texttt{text} is used as a subtitle for the module in appropriate places. \item[\texttt{-t }\textit{string}, \texttt{\mm{}title }\textit{string}] ~\par Set the document title. \end{description} \paragraph{Contents options} \begin{description} \item[\texttt{-g}, \texttt{\mm{}gallina}] ~\par Do not print proofs. \item[\texttt{-l}, \texttt{\mm{}light}] ~\par Light mode. Suppress proofs (as with \texttt{-g}) and the following commands: \begin{itemize} \item {}[\texttt{Recursive}] \texttt{Tactic Definition} \item \texttt{Hint / Hints} \item \texttt{Require} \item \texttt{Transparent / Opaque} \item \texttt{Implicit Argument / Implicits} \item \texttt{Section / Variable / Hypothesis / End} \end{itemize} \end{description} The behavior of options \texttt{-g} and \texttt{-l} can be locally overridden using the \texttt{(* begin show *)} \dots\ \texttt{(* end show *)} environment (see above). There are a few options to drive the parsing of comments: \begin{description} \item[\texttt{\mm{}parse-comments}] ~\par Parses regular comments delimited by \texttt{(*} and \texttt{*)} as well. They are typeset inline. \item[\texttt{\mm{}plain-comments}] ~\par Do not interpret comments, simply copy them as plain-text. \item[\texttt{\mm{}interpolate}] ~\par Use the globalization information to typeset identifiers appearing in \Coq{} escapings inside comments. \end{description} \paragraph{Language options} Default behavior is to assume ASCII 7 bits input files. \begin{description} \item[\texttt{-latin1}, \texttt{\mm{}latin1}] ~\par Select ISO-8859-1 input files. It is equivalent to \texttt{--inputenc latin1 --charset iso-8859-1}. \item[\texttt{-utf8}, \texttt{\mm{}utf8}] ~\par Select UTF-8 (Unicode) input files. It is equivalent to \texttt{--inputenc utf8 --charset utf-8}. \LaTeX\ UTF-8 support can be found at \url{http://www.ctan.org/tex-archive/macros/latex/contrib/supported/unicode/}. \item[\texttt{\mm{}inputenc} \textit{string}] ~\par Give a \LaTeX\ input encoding, as an option to \LaTeX\ package \texttt{inputenc}. \item[\texttt{\mm{}charset} \textit{string}] ~\par Specify the HTML character set, to be inserted in the HTML header. \end{description} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[The coqdoc \LaTeX{} style file]{The coqdoc \LaTeX{} style file\label{section:coqdoc.sty}} In case you choose to produce a document without the default \LaTeX{} preamble (by using option \verb|--no-preamble|), then you must insert into your own preamble the command \begin{quote} \verb|\usepackage{coqdoc}| \end{quote} The package optionally takes the argument \verb|[color]| to typeset identifiers with colors (this requires the \verb|xcolor| package). Then you may alter the rendering of the document by redefining some macros: \begin{description} \item[\texttt{coqdockw}, \texttt{coqdocid}, \ldots] ~ The one-argument macros for typesetting keywords and identifiers. Defaults are sans-serif for keywords and italic for identifiers. For example, if you would like a slanted font for keywords, you may insert \begin{verbatim} \renewcommand{\coqdockw}[1]{\textsl{#1}} \end{verbatim} anywhere between \verb|\usepackage{coqdoc}| and \verb|\begin{document}|. \item[\texttt{coqdocmodule}] ~ One-argument macro for typesetting the title of a \verb|.v| file. Default is \begin{verbatim} \newcommand{\coqdocmodule}[1]{\section*{Module #1}} \end{verbatim} and you may redefine it using \verb|\renewcommand|. \end{description} coq-8.4pl2/doc/refman/menu.html0000640000175000001440000000150510443545033015500 0ustar notinusers
      Cover page
      Table of contents
      Bibliography
      Global Index
      Tactics Index
      Vernacular Commands Index
      Index of Error Messages
      coq-8.4pl2/doc/refman/index.html0000640000175000001440000000027310774445200015646 0ustar notinusers The Coq Proof Assistant Reference Manual coq-8.4pl2/doc/refman/RefMan-decl.tex0000640000175000001440000005504711205007222016445 0ustar notinusers\newcommand{\DPL}{Mathematical Proof Language} \chapter{The \DPL\label{DPL}\index{DPL}} \section{Introduction} \subsection{Foreword} In this chapter, we describe an alternative language that may be used to do proofs using the Coq proof assistant. The language described here uses the same objects (proof-terms) as Coq, but it differs in the way proofs are described. This language was created by Pierre Corbineau at the Radboud University of Nijmegen, The Netherlands. The intent is to provide language where proofs are less formalism-{} and implementation-{}sensitive, and in the process to ease a bit the learning of computer-{}aided proof verification. \subsection{What is a declarative proof ?{}} In vanilla Coq, proofs are written in the imperative style: the user issues commands that transform a so called proof state until it reaches a state where the proof is completed. In the process, the user mostly described the transitions of this system rather than the intermediate states it goes through. The purpose of a declarative proof language is to take the opposite approach where intermediate states are always given by the user, but the transitions of the system are automated as much as possible. \subsection{Well-formedness and Completeness} The \DPL{} introduces a notion of well-formed proofs which are weaker than correct (and complete) proofs. Well-formed proofs are actually proof script where only the reasoning is incomplete. All the other aspects of the proof are correct: \begin{itemize} \item All objects referred to exist where they are used \item Conclusion steps actually prove something related to the conclusion of the theorem (the {\tt thesis}. \item Hypothesis introduction steps are done when the goal is an implication with a corresponding assumption. \item Sub-objects in the elimination steps for tuples are correct sub-objects of the tuple being decomposed. \item Patterns in case analysis are type-correct, and induction is well guarded. \end{itemize} \subsection{Note for tactics users} This section explain what differences the casual Coq user will experience using the \DPL . \begin{enumerate} \item The focusing mechanism is constrained so that only one goal at a time is visible. \item Giving a statement that Coq cannot prove does not produce an error, only a warning: this allows to go on with the proof and fill the gap later. \item Tactics can still be used for justifications and after {\texttt{escape}}. \end{enumerate} \subsection{Compatibility} The \DPL{} is available for all Coq interfaces that use text-based interaction, including: \begin{itemize} \item the command-{}line toplevel {\texttt{coqtop}} \item the native GUI {\texttt{coqide}} \item the Proof-{}General emacs mode \item Cezary Kaliszyk'{}s Web interface \item L.E. Mamane'{}s tmEgg TeXmacs plugin \end{itemize} However it is not supported by structured editors such as PCoq. \section{Syntax} Here is a complete formal description of the syntax for DPL commands. \begin{figure}[htbp] \begin{centerframe} \begin{tabular}{lcl@{\qquad}r} instruction & ::= & {\tt proof} \\ & $|$ & {\tt assume } \nelist{statement}{\tt and} \zeroone{[{\tt and } \{{\tt we have}\}-clause]} \\ & $|$ & \{{\tt let},{\tt be}\}-clause \\ & $|$ & \{{\tt given}\}-clause \\ & $|$ & \{{\tt consider}\}-clause {\tt from} term \\ & $|$ & ({\tt have} $|$ {\tt then} $|$ {\tt thus} $|$ {\tt hence}]) statement justification \\ & $|$ & \zeroone{\tt thus} ($\sim${\tt =}|{\tt =}$\sim$) \zeroone{\ident{\tt :}}\term\relax justification \\ & $|$ & {\tt suffices} (\{{\tt to have}\}-clause $|$ \nelist{statement}{\tt and } \zeroone{{\tt and} \{{\tt to have}\}-clause})\\ & & {\tt to show} statement justification \\ & $|$ & ({\tt claim} $|$ {\tt focus on}) statement \\ & $|$ & {\tt take} \term \\ & $|$ & {\tt define} \ident \sequence{var}{,} {\tt as} \term\\ & $|$ & {\tt reconsider} (\ident $|$ {\tt thesis}) {\tt as} type\\ & $|$ & {\tt per} ({\tt cases}$|${\tt induction}) {\tt on} \term \\ & $|$ & {\tt per cases of} type justification \\ & $|$ & {\tt suppose} \zeroone{\nelist{ident}{,} {\tt and}}~ {\tt it is }pattern\\ & & \zeroone{{\tt such that} \nelist{statement} {\tt and} \zeroone{{\tt and} \{{\tt we have}\}-clause}} \\ & $|$ & {\tt end} ({\tt proof} $|$ {\tt claim} $|$ {\tt focus} $|$ {\tt cases} $|$ {\tt induction}) \\ & $|$ & {\tt escape} \\ & $|$ & {\tt return} \medskip \\ \{$\alpha,\beta$\}-clause & ::=& $\alpha$ \nelist{var}{,}~ $\beta$ {\tt such that} \nelist{statement}{\tt and } \\ & & \zeroone{{\tt and } \{$\alpha,\beta$\}-clause} \medskip\\ statement & ::= & \zeroone{\ident {\tt :}} type \\ & $|$ & {\tt thesis} \\ & $|$ & {\tt thesis for} \ident \medskip \\ var & ::= & \ident \zeroone{{\tt :} type} \medskip \\ justification & ::= & \zeroone{{\tt by} ({\tt *} | \nelist{\term}{,})} ~\zeroone{{\tt using} tactic} \\ \end{tabular} \end{centerframe} \caption{Syntax of mathematical proof commands} \end{figure} The lexical conventions used here follows those of section \ref{lexical}. Conventions:\begin{itemize} \item {\texttt{<{}tactic>{}}} stands for an Coq tactic. \end{itemize} \subsection{Temporary names} In proof commands where an optional name is asked for, omitting the name will trigger the creation of a fresh temporary name (e.g. for a hypothesis). Temporary names always start with an undescore '\_' character (e.g. {\tt \_hyp0}). Temporary names have a lifespan of one command: they get erased after the next command. They can however be safely in the step after their creation. \section{Language description} \subsection{Starting and Ending a mathematical proof} The standard way to use the \DPL is to first state a {\texttt{Lemma/Theorem/Definition}} and then use the {\texttt{proof}} command to switch the current subgoal to mathematical mode. After the proof is completed, the {\texttt{end proof}} command will close the mathematical proof. If any subgoal remains to be proved, they will be displayed using the usual Coq display. \begin{coq_example} Theorem this_is_trivial: True. proof. thus thesis. end proof. Qed. \end{coq_example} The {\texttt{proof}} command only applies to \emph{one subgoal}, thus if several sub-goals are already present, the {\texttt{proof .. end proof}} sequence has to be used several times. \begin{coq_eval} Theorem T: (True /\ True) /\ True. split. split. \end{coq_eval} \begin{coq_example} Show. proof. (* first subgoal *) thus thesis. end proof. trivial. (* second subgoal *) proof. (* third subgoal *) thus thesis. end proof. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} As with all other block structures, the {\texttt{end proof}} command assumes that your proof is complete. If not, executing it will be equivalent to admitting that the statement is proved: A warning will be issued and you will not be able to run the {\texttt{Qed}} command. Instead, you can run {\texttt{Admitted}} if you wish to start another theorem and come back later. \begin{coq_example} Theorem this_is_not_so_trivial: False. proof. end proof. (* here a warning is issued *) Qed. (* fails : the proof in incomplete *) Admitted. (* Oops! *) \end{coq_example} \begin{coq_eval} Reset this_is_not_so_trivial. \end{coq_eval} \subsection{Switching modes} When writing a mathematical proof, you may wish to use procedural tactics at some point. One way to do so is to write a using-{}phrase in a deduction step (see section~\ref{justifications}). The other way is to use an {\texttt{escape...return}} block. \begin{coq_eval} Theorem T: True. proof. \end{coq_eval} \begin{coq_example} Show. escape. auto. return. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} The return statement expects all subgoals to be closed, otherwise a warning is issued and the proof cannot be saved anymore. It is possible to use the {\texttt{proof}} command inside an {\texttt{escape...return}} block, thus nesting a mathematical proof inside a procedural proof inside a mathematical proof ... \subsection{Computation steps} The {\tt reconsider ... as} command allows to change the type of a hypothesis or of {\tt thesis} to a convertible one. \begin{coq_eval} Theorem T: let a:=false in let b:= true in ( if a then True else False -> if b then True else False). intros a b. proof. assume H:(if a then True else False). \end{coq_eval} \begin{coq_example} Show. reconsider H as False. reconsider thesis as True. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Deduction steps} The most common instruction in a mathematical proof is the deduction step: it asserts a new statement (a formula/type of the \CIC) and tries to prove it using a user-provided indication : the justification. The asserted statement is then added as a hypothesis to the proof context. \begin{coq_eval} Theorem T: forall x, x=2 -> 2+x=4. proof. let x be such that H:(x=2). \end{coq_eval} \begin{coq_example} Show. have H':(2+x=2+2) by H. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} It is very often the case that the justifications uses the last hypothesis introduced in the context, so the {\tt then} keyword can be used as a shortcut, e.g. if we want to do the same as the last example : \begin{coq_eval} Theorem T: forall x, x=2 -> 2+x=4. proof. let x be such that H:(x=2). \end{coq_eval} \begin{coq_example} Show. then (2+x=2+2). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In this example, you can also see the creation of a temporary name {\tt \_fact}. \subsection{Iterated equalities} A common proof pattern when doing a chain of deductions, is to do multiple rewriting steps over the same term, thus proving the corresponding equalities. The iterated equalities are a syntactic support for this kind of reasoning: \begin{coq_eval} Theorem T: forall x, x=2 -> x + x = x * x. proof. let x be such that H:(x=2). \end{coq_eval} \begin{coq_example} Show. have (4 = 4). ~= (2 * 2). ~= (x * x) by H. =~ (2 + 2). =~ H':(x + x) by H. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Notice that here we use temporary names heavily. \subsection{Subproofs} When an intermediate step in a proof gets too complicated or involves a well contained set of intermediate deductions, it can be useful to insert its proof as a subproof of the current proof. this is done by using the {\tt claim ... end claim} pair of commands. \begin{coq_eval} Theorem T: forall x, x + x = x * x -> x = 0 \/ x = 2. proof. let x be such that H:(x + x = x * x). \end{coq_eval} \begin{coq_example} Show. claim H':((x - 2) * x = 0). \end{coq_example} A few steps later ... \begin{coq_example} thus thesis. end claim. \end{coq_example} Now the rest of the proof can happen. \begin{coq_eval} Abort. \end{coq_eval} \subsection{Conclusion steps} The commands described above have a conclusion counterpart, where the new hypothesis is used to refine the conclusion. \begin{figure}[b] \centering \begin{tabular}{c|c|c|c|c|} X & \,simple\, & \,with previous step\, & \,opens sub-proof\, & \,iterated equality\, \\ \hline intermediate step & {\tt have} & {\tt then} & {\tt claim} & {\tt $\sim$=/=$\sim$}\\ conclusion step & {\tt thus} & {\tt hence} & {\tt focus on} & {\tt thus $\sim$=/=$\sim$}\\ \hline \end{tabular} \caption{Correspondence between basic forward steps and conclusion steps} \end{figure} Let us begin with simple examples : \begin{coq_eval} Theorem T: forall (A B:Prop), A -> B -> A /\ B. intros A B HA HB. proof. \end{coq_eval} \begin{coq_example} Show. hence B. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In the next example, we have to use {\tt thus} because {\tt HB} is no longer the last hypothesis. \begin{coq_eval} Theorem T: forall (A B C:Prop), A -> B -> C -> A /\ B /\ C. intros A B C HA HB HC. proof. \end{coq_eval} \begin{coq_example} Show. thus B by HB. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} The command fails the refinement process cannot find a place to fit the object in a proof of the conclusion. \begin{coq_eval} Theorem T: forall (A B C:Prop), A -> B -> C -> A /\ B. intros A B C HA HB HC. proof. \end{coq_eval} \begin{coq_example} Show. hence C. (* fails *) \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} The refinement process may induce non reversible choices, e.g. when proving a disjunction it may {\it choose} one side of the disjunction. \begin{coq_eval} Theorem T: forall (A B:Prop), B -> A \/ B. intros A B HB. proof. \end{coq_eval} \begin{coq_example} Show. hence B. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In this example you can see that the right branch was chosen since {\tt D} remains to be proved. \begin{coq_eval} Theorem T: forall (A B C D:Prop), C -> D -> (A /\ B) \/ (C /\ D). intros A B C D HC HD. proof. \end{coq_eval} \begin{coq_example} Show. thus C by HC. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Now for existential statements, we can use the {\tt take} command to choose {\tt 2} as an explicit witness of existence. \begin{coq_eval} Theorem T: forall (P:nat -> Prop), P 2 -> exists x,P x. intros P HP. proof. \end{coq_eval} \begin{coq_example} Show. take 2. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} It is also possible to prove the existence directly. \begin{coq_eval} Theorem T: forall (P:nat -> Prop), P 2 -> exists x,P x. intros P HP. proof. \end{coq_eval} \begin{coq_example} Show. hence (P 2). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Here a more involved example where the choice of {\tt P 2} propagates the choice of {\tt 2} to another part of the formula. \begin{coq_eval} Theorem T: forall (P:nat -> Prop) (R:nat -> nat -> Prop), P 2 -> R 0 2 -> exists x, exists y, P y /\ R x y. intros P R HP HR. proof. \end{coq_eval} \begin{coq_example} Show. thus (P 2) by HP. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Now, an example with the {\tt suffices} command. {\tt suffices} is a sort of dual for {\tt have}: it allows to replace the conclusion (or part of it) by a sufficient condition. \begin{coq_eval} Theorem T: forall (A B:Prop) (P:nat -> Prop), (forall x, P x -> B) -> A -> A /\ B. intros A B P HP HA. proof. \end{coq_eval} \begin{coq_example} Show. suffices to have x such that HP':(P x) to show B by HP,HP'. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Finally, an example where {\tt focus} is handy : local assumptions. \begin{coq_eval} Theorem T: forall (A:Prop) (P:nat -> Prop), P 2 -> A -> A /\ (forall x, x = 2 -> P x). intros A P HP HA. proof. \end{coq_eval} \begin{coq_example} Show. focus on (forall x, x = 2 -> P x). let x be such that (x = 2). hence thesis by HP. end focus. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Declaring an Abbreviation} In order to shorten long expressions, it is possible to use the {\tt define ... as ...} command to give a name to recurring expressions. \begin{coq_eval} Theorem T: forall x, x = 0 -> x + x = x * x . proof. let x be such that H:(x = 0). \end{coq_eval} \begin{coq_example} Show. define sqr x as (x * x). reconsider thesis as (x + x = sqr x). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Introduction steps} When the {\tt thesis} consists of a hypothetical formula (implication or universal quantification (e.g. \verb+A -> B+) , it is possible to assume the hypothetical part {\tt A} and then prove {\tt B}. In the \DPL{}, this comes in two syntactic flavors that are semantically equivalent : {\tt let} and {\tt assume}. Their syntax is designed so that {\tt let} works better for universal quantifiers and {\tt assume} for implications. \begin{coq_eval} Theorem T: forall (P:nat -> Prop), forall x, P x -> P x. proof. let P:(nat -> Prop). \end{coq_eval} \begin{coq_example} Show. let x:nat. assume HP:(P x). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In the {\tt let} variant, the type of the assumed object is optional provided it can be deduced from the command. The objects introduced by let can be followed by assumptions using {\tt such that}. \begin{coq_eval} Theorem T: forall (P:nat -> Prop), forall x, P x -> P x. proof. let P:(nat -> Prop). \end{coq_eval} \begin{coq_example} Show. let x. (* fails because x's type is not clear *) let x be such that HP:(P x). (* here x's type is inferred from (P x) *) \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} In the {\tt assume } variant, the type of the assumed object is mandatory but the name is optional : \begin{coq_eval} Theorem T: forall (P:nat -> Prop), forall x, P x -> P x -> P x. proof. let P:(nat -> Prop). let x:nat. \end{coq_eval} \begin{coq_example} Show. assume (P x). (* temporary name created *) \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} After {\tt such that}, it is also the case : \begin{coq_eval} Theorem T: forall (P:nat -> Prop), forall x, P x -> P x. proof. let P:(nat -> Prop). \end{coq_eval} \begin{coq_example} Show. let x be such that (P x). (* temporary name created *) \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Tuple elimination steps} In the \CIC, many objects dealt with in simple proofs are tuples : pairs , records, existentially quantified formulas. These are so common that the \DPL{} provides a mechanism to extract members of those tuples, and also objects in tuples within tuples within tuples... \begin{coq_eval} Theorem T: forall (P:nat -> Prop) (A:Prop), (exists x, (P x /\ A)) -> A. proof. let P:(nat -> Prop),A:Prop be such that H:(exists x, P x /\ A) . \end{coq_eval} \begin{coq_example} Show. consider x such that HP:(P x) and HA:A from H. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} Here is an example with pairs: \begin{coq_eval} Theorem T: forall p:(nat * nat)%type, (fst p >= snd p) \/ (fst p < snd p). proof. let p:(nat * nat)%type. \end{coq_eval} \begin{coq_example} Show. consider x:nat,y:nat from p. reconsider thesis as (x >= y \/ x < y). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} It is sometimes desirable to combine assumption and tuple decomposition. This can be done using the {\tt given} command. \begin{coq_eval} Theorem T: forall P:(nat -> Prop), (forall n , P n -> P (n - 1)) -> (exists m, P m) -> P 0. proof. let P:(nat -> Prop) be such that HP:(forall n , P n -> P (n - 1)). \end{coq_eval} \begin{coq_example} Show. given m such that Hm:(P m). \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Disjunctive reasoning} In some proofs (most of them usually) one has to consider several cases and prove that the {\tt thesis} holds in all the cases. This is done by first specifying which object will be subject to case distinction (usually a disjunction) using {\tt per cases}, and then specifying which case is being proved by using {\tt suppose}. \begin{coq_eval} Theorem T: forall (A B C:Prop), (A -> C) -> (B -> C) -> (A \/ B) -> C. proof. let A:Prop,B:Prop,C:Prop be such that HAC:(A -> C) and HBC:(B -> C). assume HAB:(A \/ B). \end{coq_eval} \begin{coq_example} per cases on HAB. suppose A. hence thesis by HAC. suppose HB:B. thus thesis by HB,HBC. end cases. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} The proof is well formed (but incomplete) even if you type {\tt end cases} or the next {\tt suppose} before the previous case is proved. If the disjunction is derived from a more general principle, e.g. the excluded middle axiom), it is desirable to just specify which instance of it is being used : \begin{coq_eval} Section Coq. \end{coq_eval} \begin{coq_example} Hypothesis EM : forall P:Prop, P \/ ~ P. \end{coq_example} \begin{coq_eval} Theorem T: forall (A C:Prop), (A -> C) -> (~A -> C) -> C. proof. let A:Prop,C:Prop be such that HAC:(A -> C) and HNAC:(~A -> C). \end{coq_eval} \begin{coq_example} per cases of (A \/ ~A) by EM. suppose (~A). hence thesis by HNAC. suppose A. hence thesis by HAC. end cases. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Proofs per cases} If the case analysis is to be made on a particular object, the script is very similar: it starts with {\tt per cases on }\emph{object} instead. \begin{coq_eval} Theorem T: forall (A C:Prop), (A -> C) -> (~A -> C) -> C. proof. let A:Prop,C:Prop be such that HAC:(A -> C) and HNAC:(~A -> C). \end{coq_eval} \begin{coq_example} per cases on (EM A). suppose (~A). \end{coq_example} \begin{coq_eval} Abort. End Coq. \end{coq_eval} If the object on which a case analysis occurs in the statement to be proved, the command {\tt suppose it is }\emph{pattern} is better suited than {\tt suppose}. \emph{pattern} may contain nested patterns with {\tt as} clauses. A detailed description of patterns is to be found in figure \ref{term-syntax-aux}. here is an example. \begin{coq_eval} Theorem T: forall (A B:Prop) (x:bool), (if x then A else B) -> A \/ B. proof. let A:Prop,B:Prop,x:bool. \end{coq_eval} \begin{coq_example} per cases on x. suppose it is true. assume A. hence A. suppose it is false. assume B. hence B. end cases. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Proofs by induction} Proofs by induction are very similar to proofs per cases: they start with {\tt per induction on }{\tt object} and proceed with {\tt suppose it is }\emph{pattern}{\tt and }\emph{induction hypothesis}. The induction hypothesis can be given explicitly or identified by the sub-object $m$ it refers to using {\tt thesis for }\emph{m}. \begin{coq_eval} Theorem T: forall (n:nat), n + 0 = n. proof. let n:nat. \end{coq_eval} \begin{coq_example} per induction on n. suppose it is 0. thus (0 + 0 = 0). suppose it is (S m) and H:thesis for m. then (S (m + 0) = S m). thus =~ (S m + 0). end induction. \end{coq_example} \begin{coq_eval} Abort. \end{coq_eval} \subsection{Justifications}\label{justifications} Intuitively, justifications are hints for the system to understand how to prove the statements the user types in. In the case of this language justifications are made of two components: Justification objects : {\texttt{by}} followed by a comma-{}separated list of objects that will be used by a selected tactic to prove the statement. This defaults to the empty list (the statement should then be tautological). The * wildcard provides the usual tactics behavior: use all statements in local context. However, this wildcard should be avoided since it reduces the robustness of the script. Justification tactic : {\texttt{using}} followed by a Coq tactic that is executed to prove the statement. The default is a solver for (intuitionistic) first-{}order with equality. \section{More details and Formal Semantics} I suggest the users looking for more information have a look at the paper \cite{corbineau08types}. They will find in that paper a formal semantics of the proof state transition induces by mathematical commands. coq-8.4pl2/doc/refman/RefMan-syn.tex0000640000175000001440000013276111742055044016361 0ustar notinusers\chapter[Syntax extensions and interpretation scopes]{Syntax extensions and interpretation scopes\label{Addoc-syntax}} In this chapter, we introduce advanced commands to modify the way {\Coq} parses and prints objects, i.e. the translations between the concrete and internal representations of terms and commands. The main commands are {\tt Notation} and {\tt Infix} which are described in section \ref{Notation}. It also happens that the same symbolic notation is expected in different contexts. To achieve this form of overloading, {\Coq} offers a notion of interpretation scope. This is described in Section~\ref{scopes}. \Rem The commands {\tt Grammar}, {\tt Syntax} and {\tt Distfix} which were present for a while in {\Coq} are no longer available from {\Coq} version 8.0. The underlying AST structure is also no longer available. The functionalities of the command {\tt Syntactic Definition} are still available, see Section~\ref{Abbreviations}. \section[Notations]{Notations\label{Notation} \comindex{Notation}} \subsection{Basic notations} A {\em notation} is a symbolic abbreviation denoting some term or term pattern. A typical notation is the use of the infix symbol \verb=/\= to denote the logical conjunction (\texttt{and}). Such a notation is declared by \begin{coq_example*} Notation "A /\ B" := (and A B). \end{coq_example*} The expression \texttt{(and A B)} is the abbreviated term and the string \verb="A /\ B"= (called a {\em notation}) tells how it is symbolically written. A notation is always surrounded by double quotes (excepted when the abbreviation is a single identifier, see \ref{Abbreviations}). The notation is composed of {\em tokens} separated by spaces. Identifiers in the string (such as \texttt{A} and \texttt{B}) are the {\em parameters} of the notation. They must occur at least once each in the denoted term. The other elements of the string (such as \verb=/\=) are the {\em symbols}. An identifier can be used as a symbol but it must be surrounded by simple quotes to avoid the confusion with a parameter. Similarly, every symbol of at least 3 characters and starting with a simple quote must be quoted (then it starts by two single quotes). Here is an example. \begin{coq_example*} Notation "'IF' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3). \end{coq_example*} %TODO quote the identifier when not in front, not a keyword, as in "x 'U' y" ? A notation binds a syntactic expression to a term. Unless the parser and pretty-printer of {\Coq} already know how to deal with the syntactic expression (see \ref{ReservedNotation}), explicit precedences and associativity rules have to be given. \subsection[Precedences and associativity]{Precedences and associativity\index{Precedences} \index{Associativity}} Mixing different symbolic notations in a same text may cause serious parsing ambiguity. To deal with the ambiguity of notations, {\Coq} uses precedence levels ranging from 0 to 100 (plus one extra level numbered 200) and associativity rules. Consider for example the new notation \begin{coq_example*} Notation "A \/ B" := (or A B). \end{coq_example*} Clearly, an expression such as {\tt forall A:Prop, True \verb=/\= A \verb=\/= A \verb=\/= False} is ambiguous. To tell the {\Coq} parser how to interpret the expression, a priority between the symbols \verb=/\= and \verb=\/= has to be given. Assume for instance that we want conjunction to bind more than disjunction. This is expressed by assigning a precedence level to each notation, knowing that a lower level binds more than a higher level. Hence the level for disjunction must be higher than the level for conjunction. Since connectives are the less tight articulation points of a text, it is reasonable to choose levels not so far from the higher level which is 100, for example 85 for disjunction and 80 for conjunction\footnote{which are the levels effectively chosen in the current implementation of {\Coq}}. Similarly, an associativity is needed to decide whether {\tt True \verb=/\= False \verb=/\= False} defaults to {\tt True \verb=/\= (False \verb=/\= False)} (right associativity) or to {\tt (True \verb=/\= False) \verb=/\= False} (left associativity). We may even consider that the expression is not well-formed and that parentheses are mandatory (this is a ``no associativity'')\footnote{ {\Coq} accepts notations declared as no associative but the parser on which {\Coq} is built, namely {\camlpppp}, currently does not implement the no-associativity and replace it by a left associativity; hence it is the same for {\Coq}: no-associativity is in fact left associativity}. We don't know of a special convention of the associativity of disjunction and conjunction, let's apply for instance a right associativity (which is the choice of {\Coq}). Precedence levels and associativity rules of notations have to be given between parentheses in a list of modifiers that the \texttt{Notation} command understands. Here is how the previous examples refine. \begin{coq_example*} Notation "A /\ B" := (and A B) (at level 80, right associativity). Notation "A \/ B" := (or A B) (at level 85, right associativity). \end{coq_example*} By default, a notation is considered non associative, but the precedence level is mandatory (except for special cases whose level is canonical). The level is either a number or the mention {\tt next level} whose meaning is obvious. The list of levels already assigned is on Figure~\ref{init-notations}. \subsection{Complex notations} Notations can be made from arbitraly complex symbols. One can for instance define prefix notations. \begin{coq_example*} Notation "~ x" := (not x) (at level 75, right associativity). \end{coq_example*} One can also define notations for incomplete terms, with the hole expected to be inferred at typing time. \begin{coq_example*} Notation "x = y" := (@eq _ x y) (at level 70, no associativity). \end{coq_example*} One can define {\em closed} notations whose both sides are symbols. In this case, the default precedence level for inner subexpression is 200. \begin{coq_eval} Set Printing Depth 50. (********** The following is correct but produces **********) (**** an incompatibility with the reserved notation ********) \end{coq_eval} \begin{coq_example*} Notation "( x , y )" := (@pair _ _ x y) (at level 0). \end{coq_example*} One can also define notations for binders. \begin{coq_eval} Set Printing Depth 50. (********** The following is correct but produces **********) (**** an incompatibility with the reserved notation ********) \end{coq_eval} \begin{coq_example*} Notation "{ x : A | P }" := (sig A (fun x => P)) (at level 0). \end{coq_example*} In the last case though, there is a conflict with the notation for type casts. This last notation, as shown by the command {\tt Print Grammar constr} is at level 100. To avoid \verb=x : A= being parsed as a type cast, it is necessary to put {\tt x} at a level below 100, typically 99. Hence, a correct definition is \begin{coq_example*} Notation "{ x : A | P }" := (sig A (fun x => P)) (at level 0, x at level 99). \end{coq_example*} %This change has retrospectively an effect on the notation for notation %{\tt "{ A } + { B }"}. For the sake of factorization, {\tt A} must be %put at level 99 too, which gives % %\begin{coq_example*} %Notation "{ A } + { B }" := (sumbool A B) (at level 0, A at level 99). %\end{coq_example*} See the next section for more about factorization. \subsection{Simple factorization rules} {\Coq} extensible parsing is performed by Camlp5 which is essentially a LL1 parser. Hence, some care has to be taken not to hide already existing rules by new rules. Some simple left factorization work has to be done. Here is an example. \begin{coq_eval} (********** The next rule for notation _ < _ < _ produces **********) (*** Error: Notation _ < _ < _ is already defined at level 70 ... ***) \end{coq_eval} \begin{coq_example*} Notation "x < y" := (lt x y) (at level 70). Notation "x < y < z" := (x < y /\ y < z) (at level 70). \end{coq_example*} In order to factorize the left part of the rules, the subexpression referred by {\tt y} has to be at the same level in both rules. However the default behavior puts {\tt y} at the next level below 70 in the first rule (no associativity is the default), and at the level 200 in the second rule (level 200 is the default for inner expressions). To fix this, we need to force the parsing level of {\tt y}, as follows. \begin{coq_example*} Notation "x < y" := (lt x y) (at level 70). Notation "x < y < z" := (x < y /\ y < z) (at level 70, y at next level). \end{coq_example*} For the sake of factorization with {\Coq} predefined rules, simple rules have to be observed for notations starting with a symbol: e.g. rules starting with ``\{'' or ``('' should be put at level 0. The list of {\Coq} predefined notations can be found in Chapter~\ref{Theories}. The command to display the current state of the {\Coq} term parser is \comindex{Print Grammar constr} \begin{quote} \tt Print Grammar constr. \end{quote} \variant \comindex{Print Grammar pattern} {\tt Print Grammar pattern.}\\ This displays the state of the subparser of patterns (the parser used in the grammar of the {\tt match} {\tt with} constructions). \subsection{Displaying symbolic notations} The command \texttt{Notation} has an effect both on the {\Coq} parser and on the {\Coq} printer. For example: \begin{coq_example} Check (and True True). \end{coq_example} However, printing, especially pretty-printing, requires more care than parsing. We may want specific indentations, line breaks, alignment if on several lines, etc. The default printing of notations is very rudimentary. For printing a notation, a {\em formatting box} is opened in such a way that if the notation and its arguments cannot fit on a single line, a line break is inserted before the symbols of the notation and the arguments on the next lines are aligned with the argument on the first line. A first, simple control that a user can have on the printing of a notation is the insertion of spaces at some places of the notation. This is performed by adding extra spaces between the symbols and parameters: each extra space (other than the single space needed to separate the components) is interpreted as a space to be inserted by the printer. Here is an example showing how to add spaces around the bar of the notation. \begin{coq_example} Notation "{{ x : A | P }}" := (sig (fun x : A => P)) (at level 0, x at level 99). Check (sig (fun x : nat => x=x)). \end{coq_example} The second, more powerful control on printing is by using the {\tt format} modifier. Here is an example \begin{small} \begin{coq_example} Notation "'If' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3) (at level 200, right associativity, format "'[v ' 'If' c1 '/' '[' 'then' c2 ']' '/' '[' 'else' c3 ']' ']'"). \end{coq_example} \end{small} A {\em format} is an extension of the string denoting the notation with the possible following elements delimited by single quotes: \begin{itemize} \item extra spaces are translated into simple spaces \item tokens of the form \verb='/ '= are translated into breaking point, in case a line break occurs, an indentation of the number of spaces after the ``\verb=/='' is applied (2 spaces in the given example) \item token of the form \verb='//'= force writing on a new line \item well-bracketed pairs of tokens of the form \verb='[ '= and \verb=']'= are translated into printing boxes; in case a line break occurs, an extra indentation of the number of spaces given after the ``\verb=[='' is applied (4 spaces in the example) \item well-bracketed pairs of tokens of the form \verb='[hv '= and \verb=']'= are translated into horizontal-orelse-vertical printing boxes; if the content of the box does not fit on a single line, then every breaking point forces a newline and an extra indentation of the number of spaces given after the ``\verb=[='' is applied at the beginning of each newline (3 spaces in the example) \item well-bracketed pairs of tokens of the form \verb='[v '= and \verb=']'= are translated into vertical printing boxes; every breaking point forces a newline, even if the line is large enough to display the whole content of the box, and an extra indentation of the number of spaces given after the ``\verb=[='' is applied at the beginning of each newline \end{itemize} Thus, for the previous example, we get %\footnote{The ``@'' is here to shunt %the notation "'IF' A 'then' B 'else' C" which is defined in {\Coq} %initial state}: Notations do not survive the end of sections. No typing of the denoted expression is performed at definition time. Type-checking is done only at the time of use of the notation. \begin{coq_example} Check (IF_then_else (IF_then_else True False True) (IF_then_else True False True) (IF_then_else True False True)). \end{coq_example} \Rem Sometimes, a notation is expected only for the parser. %(e.g. because %the underlying parser of {\Coq}, namely {\camlpppp}, is LL1 and some extra %rules are needed to circumvent the absence of factorization). To do so, the option {\em only parsing} is allowed in the list of modifiers of \texttt{Notation}. \subsection{The \texttt{Infix} command \comindex{Infix}} The \texttt{Infix} command is a shortening for declaring notations of infix symbols. Its syntax is \begin{quote} \noindent\texttt{Infix "{\symbolentry}" :=} {\qualid} {\tt (} \nelist{\em modifier}{,} {\tt )}. \end{quote} and it is equivalent to \begin{quote} \noindent\texttt{Notation "x {\symbolentry} y" := ({\qualid} x y) (} \nelist{\em modifier}{,} {\tt )}. \end{quote} where {\tt x} and {\tt y} are fresh names distinct from {\qualid}. Here is an example. \begin{coq_example*} Infix "/\" := and (at level 80, right associativity). \end{coq_example*} \subsection{Reserving notations \label{ReservedNotation} \comindex{Reserved Notation}} A given notation may be used in different contexts. {\Coq} expects all uses of the notation to be defined at the same precedence and with the same associativity. To avoid giving the precedence and associativity every time, it is possible to declare a parsing rule in advance without giving its interpretation. Here is an example from the initial state of {\Coq}. \begin{coq_example} Reserved Notation "x = y" (at level 70, no associativity). \end{coq_example} Reserving a notation is also useful for simultaneously defined an inductive type or a recursive constant and a notation for it. \Rem The notations mentioned on Figure~\ref{init-notations} are reserved. Hence their precedence and associativity cannot be changed. \subsection{Simultaneous definition of terms and notations \comindex{Fixpoint {\ldots} where {\ldots}} \comindex{CoFixpoint {\ldots} where {\ldots}} \comindex{Inductive {\ldots} where {\ldots}}} Thanks to reserved notations, the inductive, co-inductive, recursive and corecursive definitions can benefit of customized notations. To do this, insert a {\tt where} notation clause after the definition of the (co)inductive type or (co)recursive term (or after the definition of each of them in case of mutual definitions). The exact syntax is given on Figure~\ref{notation-syntax}. Here are examples: \begin{coq_eval} Set Printing Depth 50. (********** The following is correct but produces an error **********) (********** because the symbol /\ is already bound **********) (**** Error: The conclusion of A -> B -> A /\ B is not valid *****) \end{coq_eval} \begin{coq_example*} Inductive and (A B:Prop) : Prop := conj : A -> B -> A /\ B where "A /\ B" := (and A B). \end{coq_example*} \begin{coq_eval} Set Printing Depth 50. (********** The following is correct but produces an error **********) (********** because the symbol + is already bound **********) (**** Error: no recursive definition *****) \end{coq_eval} \begin{coq_example*} Fixpoint plus (n m:nat) {struct n} : nat := match n with | O => m | S p => S (p+m) end where "n + m" := (plus n m). \end{coq_example*} \subsection{Displaying informations about notations \comindex{Set Printing Notations} \comindex{Unset Printing Notations}} To deactivate the printing of all notations, use the command \begin{quote} \tt Unset Printing Notations. \end{quote} To reactivate it, use the command \begin{quote} \tt Set Printing Notations. \end{quote} The default is to use notations for printing terms wherever possible. \SeeAlso {\tt Set Printing All} in Section~\ref{SetPrintingAll}. \subsection{Locating notations \comindex{Locate} \label{LocateSymbol}} To know to which notations a given symbol belongs to, use the command \begin{quote} \tt Locate {\symbolentry} \end{quote} where symbol is any (composite) symbol surrounded by quotes. To locate a particular notation, use a string where the variables of the notation are replaced by ``\_''. \Example \begin{coq_example} Locate "exists". Locate "'exists' _ , _". \end{coq_example} \SeeAlso Section \ref{Locate}. \begin{figure} \begin{small} \begin{centerframe} \begin{tabular}{lcl} {\sentence} & ::= & \zeroone{\tt Local} \texttt{Notation} {\str} \texttt{:=} {\term} \zeroone{\modifiers} \zeroone{:{\scope}} .\\ & $|$ & \zeroone{\tt Local} \texttt{Infix} {\str} \texttt{:=} {\qualid} \zeroone{\modifiers} \zeroone{:{\scope}} .\\ & $|$ & \zeroone{\tt Local} \texttt{Reserved Notation} {\str} \zeroone{\modifiers} .\\ & $|$ & {\tt Inductive} \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ & $|$ & {\tt CoInductive} \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ & $|$ & {\tt Fixpoint} \nelist{{\fixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ & $|$ & {\tt CoFixpoint} \nelist{{\cofixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ \\ {\declnotation} & ::= & \zeroone{{\tt where} \nelist{{\str} {\tt :=} {\term} \zeroone{:{\scope}}}{\tt and}}. \\ \\ {\modifiers} & ::= & \nelist{\ident}{,} {\tt at level} {\naturalnumber} \\ & $|$ & \nelist{\ident}{,} {\tt at next level} \\ & $|$ & {\tt at level} {\naturalnumber} \\ & $|$ & {\tt left associativity} \\ & $|$ & {\tt right associativity} \\ & $|$ & {\tt no associativity} \\ & $|$ & {\ident} {\tt ident} \\ & $|$ & {\ident} {\tt binder} \\ & $|$ & {\ident} {\tt closed binder} \\ & $|$ & {\ident} {\tt global} \\ & $|$ & {\ident} {\tt bigint} \\ & $|$ & {\tt only parsing} \\ & $|$ & {\tt format} {\str} \end{tabular} \end{centerframe} \end{small} \caption{Syntax of the variants of {\tt Notation}} \label{notation-syntax} \end{figure} \subsection{Notations and simple binders} Notations can be defined for binders as in the example: \begin{coq_eval} Set Printing Depth 50. (********** The following is correct but produces **********) (**** an incompatibility with the reserved notation ********) \end{coq_eval} \begin{coq_example*} Notation "{ x : A | P }" := (sig (fun x : A => P)) (at level 0). \end{coq_example*} The binding variables in the left-hand-side that occur as a parameter of the notation naturally bind all their occurrences appearing in their respective scope after instantiation of the parameters of the notation. Contrastingly, the binding variables that are not a parameter of the notation do not capture the variables of same name that could appear in their scope after instantiation of the notation. E.g., for the notation \begin{coq_example*} Notation "'exists_different' n" := (exists p:nat, p<>n) (at level 200). \end{coq_example*} the next command fails because {\tt p} does not bind in the instance of {\tt n}. \begin{coq_eval} Set Printing Depth 50. (********** The following produces **********) (**** The reference p was not found in the current environment ********) \end{coq_eval} \begin{coq_example} Check (exists_different p). \end{coq_example} \Rem Binding variables must not necessarily be parsed using the {\tt ident} entry. For factorization purposes, they can be said to be parsed at another level (e.g. {\tt x} in \verb="{ x : A | P }"= must be parsed at level 99 to be factorized with the notation \verb="{ A } + { B }"= for which {\tt A} can be any term). However, even if parsed as a term, this term must at the end be effectively a single identifier. \subsection{Notations with recursive patterns} \label{RecursiveNotations} A mechanism is provided for declaring elementary notations with recursive patterns. The basic example is: \begin{coq_example*} Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..). \end{coq_example*} On the right-hand side, an extra construction of the form {\tt ..} $t$ {\tt ..} can be used. Notice that {\tt ..} is part of the {\Coq} syntax and it must not be confused with the three-dots notation $\ldots$ used in this manual to denote a sequence of arbitrary size. On the left-hand side, the part ``$x$ $s$ {\tt ..} $s$ $y$'' of the notation parses any number of time (but at least one time) a sequence of expressions separated by the sequence of tokens $s$ (in the example, $s$ is just ``{\tt ;}''). In the right-hand side, the term enclosed within {\tt ..} must be a pattern with two holes of the form $\phi([~]_E,[~]_I)$ where the first hole is occupied either by $x$ or by $y$ and the second hole is occupied by an arbitrary term $t$ called the {\it terminating} expression of the recursive notation. The subterm {\tt ..} $\phi(x,t)$ {\tt ..} (or {\tt ..} $\phi(y,t)$ {\tt ..}) must itself occur at second position of the same pattern where the first hole is occupied by the other variable, $y$ or $x$. Otherwise said, the right-hand side must contain a subterm of the form either $\phi(x,${\tt ..} $\phi(y,t)$ {\tt ..}$)$ or $\phi(y,${\tt ..} $\phi(x,t)$ {\tt ..}$)$. The pattern $\phi$ is the {\em iterator} of the recursive notation and, of course, the name $x$ and $y$ can be chosen arbitrarily. The parsing phase produces a list of expressions which are used to fill in order the first hole of the iterating pattern which is repeatedly nested as many times as the length of the list, the second hole being the nesting point. In the innermost occurrence of the nested iterating pattern, the second hole is finally filled with the terminating expression. In the example above, the iterator $\phi([~]_E,[~]_I)$ is {\tt cons $[~]_E$ $[~]_I$} and the terminating expression is {\tt nil}. Here are other examples: \begin{coq_example*} Notation "( x , y , .. , z )" := (pair .. (pair x y) .. z) (at level 0). Notation "[| t * ( x , y , .. , z ) ; ( a , b , .. , c ) * u |]" := (pair (pair .. (pair (pair t x) (pair t y)) .. (pair t z)) (pair .. (pair (pair a u) (pair b u)) .. (pair c u))) (t at level 39). \end{coq_example*} Notations with recursive patterns can be reserved like standard notations, they can also be declared within interpretation scopes (see section \ref{scopes}). \subsection{Notations with recursive patterns involving binders} Recursive notations can also be used with binders. The basic example is: \begin{coq_example*} Notation "'exists' x .. y , p" := (ex (fun x => .. (ex (fun y => p)) ..)) (at level 200, x binder, y binder, right associativity). \end{coq_example*} The principle is the same as in Section~\ref{RecursiveNotations} except that in the iterator $\phi([~]_E,[~]_I)$, the first hole is a placeholder occurring at the position of the binding variable of a {\tt fun} or a {\tt forall}. To specify that the part ``$x$ {\tt ..} $y$'' of the notation parses a sequence of binders, $x$ and $y$ must be marked as {\tt binder} in the list of modifiers of the notation. Then, the list of binders produced at the parsing phase are used to fill in the first hole of the iterating pattern which is repeatedly nested as many times as the number of binders generated. If ever the generalization operator {\tt `} (see Section~\ref{implicit-generalization}) is used in the binding list, the added binders are taken into account too. Binders parsing exist in two flavors. If $x$ and $y$ are marked as {\tt binder}, then a sequence such as {\tt a b c : T} will be accepted and interpreted as the sequence of binders {\tt (a:T) (b:T) (c:T)}. For instance, in the notation above, the syntax {\tt exists a b : nat, a = b} is provided. The variables $x$ and $y$ can also be marked as {\tt closed binder} in which case only well-bracketed binders of the form {\tt (a b c:T)} or {\tt \{a b c:T\}} etc. are accepted. With closed binders, the recursive sequence in the left-hand side can be of the general form $x$ $s$ {\tt ..} $s$ $y$ where $s$ is an arbitrary sequence of tokens. With open binders though, $s$ has to be empty. Here is an example of recursive notation with closed binders: \begin{coq_example*} Notation "'mylet' f x .. y := t 'in' u":= (let f := fun x => .. (fun y => t) .. in u) (x closed binder, y closed binder, at level 200, right associativity). \end{coq_example*} \subsection{Summary} \paragraph{Syntax of notations} The different syntactic variants of the command \texttt{Notation} are given on Figure~\ref{notation-syntax}. The optional {\tt :{\scope}} is described in the Section~\ref{scopes}. \Rem No typing of the denoted expression is performed at definition time. Type-checking is done only at the time of use of the notation. \Rem Many examples of {\tt Notation} may be found in the files composing the initial state of {\Coq} (see directory {\tt \$COQLIB/theories/Init}). \Rem The notation \verb="{ x }"= has a special status in such a way that complex notations of the form \verb="x + { y }"= or \verb="x * { y }"= can be nested with correct precedences. Especially, every notation involving a pattern of the form \verb="{ x }"= is parsed as a notation where the pattern \verb="{ x }"= has been simply replaced by \verb="x"= and the curly brackets are parsed separately. E.g. \verb="y + { z }"= is not parsed as a term of the given form but as a term of the form \verb="y + z"= where \verb=z= has been parsed using the rule parsing \verb="{ x }"=. Especially, level and precedences for a rule including patterns of the form \verb="{ x }"= are relative not to the textual notation but to the notation where the curly brackets have been removed (e.g. the level and the associativity given to some notation, say \verb="{ y } & { z }"= in fact applies to the underlying \verb="{ x }"=-free rule which is \verb="y & z"=). \paragraph{Persistence of notations} Notations do not survive the end of sections. They survive modules unless the command {\tt Local Notation} is used instead of {\tt Notation}. \section[Interpretation scopes]{Interpretation scopes\index{Interpretation scopes} \label{scopes}} % Introduction An {\em interpretation scope} is a set of notations for terms with their interpretation. Interpretation scopes provides with a weak, purely syntactical form of notations overloading: a same notation, for instance the infix symbol \verb=+= can be used to denote distinct definitions of an additive operator. Depending on which interpretation scopes is currently open, the interpretation is different. Interpretation scopes can include an interpretation for numerals and strings. However, this is only made possible at the {\ocaml} level. See Figure \ref{notation-syntax} for the syntax of notations including the possibility to declare them in a given scope. Here is a typical example which declares the notation for conjunction in the scope {\tt type\_scope}. \begin{verbatim} Notation "A /\ B" := (and A B) : type_scope. \end{verbatim} \Rem A notation not defined in a scope is called a {\em lonely} notation. \subsection{Global interpretation rules for notations} At any time, the interpretation of a notation for term is done within a {\em stack} of interpretation scopes and lonely notations. In case a notation has several interpretations, the actual interpretation is the one defined by (or in) the more recently declared (or open) lonely notation (or interpretation scope) which defines this notation. Typically if a given notation is defined in some scope {\scope} but has also an interpretation not assigned to a scope, then, if {\scope} is open before the lonely interpretation is declared, then the lonely interpretation is used (and this is the case even if the interpretation of the notation in {\scope} is given after the lonely interpretation: otherwise said, only the order of lonely interpretations and opening of scopes matters, and not the declaration of interpretations within a scope). The initial state of {\Coq} declares three interpretation scopes and no lonely notations. These scopes, in opening order, are {\tt core\_scope}, {\tt type\_scope} and {\tt nat\_scope}. The command to add a scope to the interpretation scope stack is \comindex{Open Scope} \comindex{Close Scope} \begin{quote} {\tt Open Scope} {\scope}. \end{quote} It is also possible to remove a scope from the interpretation scope stack by using the command \begin{quote} {\tt Close Scope} {\scope}. \end{quote} Notice that this command does not only cancel the last {\tt Open Scope {\scope}} but all the invocation of it. \Rem {\tt Open Scope} and {\tt Close Scope} do not survive the end of sections where they occur. When defined outside of a section, they are exported to the modules that import the module where they occur. \begin{Variants} \item {\tt Local Open Scope} {\scope}. \item {\tt Local Close Scope} {\scope}. These variants are not exported to the modules that import the module where they occur, even if outside a section. \item {\tt Global Open Scope} {\scope}. \item {\tt Global Close Scope} {\scope}. These variants survive sections. They behave as if {\tt Global} were absent when not inside a section. \end{Variants} \subsection{Local interpretation rules for notations} In addition to the global rules of interpretation of notations, some ways to change the interpretation of subterms are available. \subsubsection{Local opening of an interpretation scope \label{scopechange} \index{\%} \comindex{Delimit Scope}} It is possible to locally extend the interpretation scope stack using the syntax ({\term})\%{\delimkey} (or simply {\term}\%{\delimkey} for atomic terms), where {\delimkey} is a special identifier called {\em delimiting key} and bound to a given scope. In such a situation, the term {\term}, and all its subterms, are interpreted in the scope stack extended with the scope bound to {\delimkey}. To bind a delimiting key to a scope, use the command \begin{quote} \texttt{Delimit Scope} {\scope} \texttt{with} {\ident} \end{quote} \subsubsection{Binding arguments of a constant to an interpretation scope \comindex{Arguments}} It is possible to set in advance that some arguments of a given constant have to be interpreted in a given scope. The command is \begin{quote} {\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} \end{quote} where the list is the list of the arguments of {\qualid} eventually annotated with their {\scope}. Grouping round parentheses can be used to decorate multiple arguments with the same scope. {\scope} can be either a scope name or its delimiting key. For example the following command puts the first two arguments of {\tt plus\_fct} in the scope delimited by the key {\tt F} ({\tt Rfun\_scope}) and the last argument in the scope delimited by the key {\tt R} ({\tt R\_scope}). \begin{coq_example*} Arguments plus_fct (f1 f2)%F x%R. \end{coq_example*} The {\tt Arguments} command accepts scopes decoration to all grouping parentheses. In the following example arguments {\tt A} and {\tt B} are marked as maximally inserted implicit arguments and are put into the {\tt type\_scope} scope. \begin{coq_example*} Arguments respectful {A B}%type (R R')%signature _ _. \end{coq_example*} When interpreting a term, if some of the arguments of {\qualid} are built from a notation, then this notation is interpreted in the scope stack extended by the scopes bound (if any) to these arguments. Arguments scopes can be cleared with the following command: \begin{quote} {\tt Arguments {\qualid} : clear scopes} \end{quote} \begin{Variants} \item {\tt Global Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} This behaves like {\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} but survives when a section is closed instead of stopping working at section closing. Without the {\tt Global} modifier, the effect of the command stops when the section it belongs to ends. \item {\tt Local Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} This behaves like {\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} but does not survive modules and files. Without the {\tt Local} modifier, the effect of the command is visible from within other modules or files. \end{Variants} \SeeAlso The command to show the scopes bound to the arguments of a function is described in Section~\ref{About}. \subsubsection{Binding types of arguments to an interpretation scope} When an interpretation scope is naturally associated to a type (e.g. the scope of operations on the natural numbers), it may be convenient to bind it to this type. The effect of this is that any argument of a function that syntactically expects a parameter of this type is interpreted using scope. More precisely, it applies only if this argument is built from a notation, and if so, this notation is interpreted in the scope stack extended by this particular scope. It does not apply to the subterms of this notation (unless the interpretation of the notation itself expects arguments of the same type that would trigger the same scope). \comindex{Bind Scope} More generally, any {\class} (see Chapter~\ref{Coercions-full}) can be bound to an interpretation scope. The command to do it is \begin{quote} {\tt Bind Scope} {\scope} \texttt{with} {\class} \end{quote} \Example \begin{coq_example} Parameter U : Set. Bind Scope U_scope with U. Parameter Uplus : U -> U -> U. Parameter P : forall T:Set, T -> U -> Prop. Parameter f : forall T:Set, T -> U. Infix "+" := Uplus : U_scope. Unset Printing Notations. Open Scope nat_scope. (* Define + on the nat as the default for + *) Check (fun x y1 y2 z t => P _ (x + t) ((f _ (y1 + y2) + z))). \end{coq_example} \Rem The scope {\tt type\_scope} has also a local effect on interpretation. See the next section. \SeeAlso The command to show the scopes bound to the arguments of a function is described in Section~\ref{About}. \subsection[The {\tt type\_scope} interpretation scope]{The {\tt type\_scope} interpretation scope\index{type\_scope}} The scope {\tt type\_scope} has a special status. It is a primitive interpretation scope which is temporarily activated each time a subterm of an expression is expected to be a type. This includes goals and statements, types of binders, domain and codomain of implication, codomain of products, and more generally any type argument of a declared or defined constant. \subsection{Interpretation scopes used in the standard library of {\Coq}} We give an overview of the scopes used in the standard library of {\Coq}. For a complete list of notations in each scope, use the commands {\tt Print Scopes} or {\tt Print Scope {\scope}}. \subsubsection{\tt type\_scope} This includes infix {\tt *} for product types and infix {\tt +} for sum types. It is delimited by key {\tt type}. \subsubsection{\tt nat\_scope} This includes the standard arithmetical operators and relations on type {\tt nat}. Positive numerals in this scope are mapped to their canonical representent built from {\tt O} and {\tt S}. The scope is delimited by key {\tt nat}. \subsubsection{\tt N\_scope} This includes the standard arithmetical operators and relations on type {\tt N} (binary natural numbers). It is delimited by key {\tt N} and comes with an interpretation for numerals as closed term of type {\tt Z}. \subsubsection{\tt Z\_scope} This includes the standard arithmetical operators and relations on type {\tt Z} (binary integer numbers). It is delimited by key {\tt Z} and comes with an interpretation for numerals as closed term of type {\tt Z}. \subsubsection{\tt positive\_scope} This includes the standard arithmetical operators and relations on type {\tt positive} (binary strictly positive numbers). It is delimited by key {\tt positive} and comes with an interpretation for numerals as closed term of type {\tt positive}. \subsubsection{\tt Q\_scope} This includes the standard arithmetical operators and relations on type {\tt Q} (rational numbers defined as fractions of an integer and a strictly positive integer modulo the equality of the numerator-denominator cross-product). As for numerals, only $0$ and $1$ have an interpretation in scope {\tt Q\_scope} (their interpretations are $\frac{0}{1}$ and $\frac{1}{1}$ respectively). \subsubsection{\tt Qc\_scope} This includes the standard arithmetical operators and relations on the type {\tt Qc} of rational numbers defined as the type of irreducible fractions of an integer and a strictly positive integer. \subsubsection{\tt real\_scope} This includes the standard arithmetical operators and relations on type {\tt R} (axiomatic real numbers). It is delimited by key {\tt R} and comes with an interpretation for numerals as term of type {\tt R}. The interpretation is based on the binary decomposition. The numeral 2 is represented by $1+1$. The interpretation $\phi(n)$ of an odd positive numerals greater $n$ than 3 is {\tt 1+(1+1)*$\phi((n-1)/2)$}. The interpretation $\phi(n)$ of an even positive numerals greater $n$ than 4 is {\tt (1+1)*$\phi(n/2)$}. Negative numerals are represented as the opposite of the interpretation of their absolute value. E.g. the syntactic object {\tt -11} is interpreted as {\tt -(1+(1+1)*((1+1)*(1+(1+1))))} where the unit $1$ and all the operations are those of {\tt R}. \subsubsection{\tt bool\_scope} This includes notations for the boolean operators. It is delimited by key {\tt bool}. \subsubsection{\tt list\_scope} This includes notations for the list operators. It is delimited by key {\tt list}. \subsubsection{\tt core\_scope} This includes the notation for pairs. It is delimited by key {\tt core}. \subsubsection{\tt string\_scope} This includes notation for strings as elements of the type {\tt string}. Special characters and escaping follow {\Coq} conventions on strings (see Section~\ref{strings}). Especially, there is no convention to visualize non printable characters of a string. The file {\tt String.v} shows an example that contains quotes, a newline and a beep (i.e. the ascii character of code 7). \subsubsection{\tt char\_scope} This includes interpretation for all strings of the form \verb!"!$c$\verb!"! where $c$ is an ascii character, or of the form \verb!"!$nnn$\verb!"! where $nnn$ is a three-digits number (possibly with leading 0's), or of the form \verb!""""!. Their respective denotations are the ascii code of $c$, the decimal ascii code $nnn$, or the ascii code of the character \verb!"! (i.e. the ascii code 34), all of them being represented in the type {\tt ascii}. \subsection{Displaying informations about scopes} \subsubsection{\tt Print Visibility\comindex{Print Visibility}} This displays the current stack of notations in scopes and lonely notations that is used to interpret a notation. The top of the stack is displayed last. Notations in scopes whose interpretation is hidden by the same notation in a more recently open scope are not displayed. Hence each notation is displayed only once. \variant {\tt Print Visibility {\scope}}\\ This displays the current stack of notations in scopes and lonely notations assuming that {\scope} is pushed on top of the stack. This is useful to know how a subterm locally occurring in the scope of {\scope} is interpreted. \subsubsection{\tt Print Scope {\scope}\comindex{Print Scope}} This displays all the notations defined in interpretation scope {\scope}. It also displays the delimiting key if any and the class to which the scope is bound, if any. \subsubsection{\tt Print Scopes\comindex{Print Scopes}} This displays all the notations, delimiting keys and corresponding class of all the existing interpretation scopes. It also displays the lonely notations. \section[Abbreviations]{Abbreviations\index{Abbreviations} \label{Abbreviations} \comindex{Notation}} An {\em abbreviation} is a name, possibly applied to arguments, that denotes a (presumably) more complex expression. Here are examples: \begin{coq_eval} Require Import List. Require Import Relations. Set Printing Notations. \end{coq_eval} \begin{coq_example} Notation Nlist := (list nat). Check 1 :: 2 :: 3 :: nil. Notation reflexive R := (forall x, R x x). Check forall A:Prop, A <-> A. Check reflexive iff. \end{coq_example} An abbreviation expects no precedence nor associativity, since it follows the usual syntax of application. Abbreviations are used as much as possible by the {\Coq} printers unless the modifier \verb=(only parsing)= is given. Abbreviations are bound to an absolute name as an ordinary definition is, and they can be referred by qualified names too. Abbreviations are syntactic in the sense that they are bound to expressions which are not typed at the time of the definition of the abbreviation but at the time it is used. Especially, abbreviations can be bound to terms with holes (i.e. with ``\_''). The general syntax for abbreviations is \begin{quote} \zeroone{{\tt Local}} \texttt{Notation} {\ident} \sequence{\ident} {\ident} \texttt{:=} {\term} \zeroone{{\tt (only parsing)}}~\verb=.= \end{quote} \Example \begin{coq_eval} Set Strict Implicit. Reset Initial. \end{coq_eval} \begin{coq_example} Definition explicit_id (A:Set) (a:A) := a. Notation id := (explicit_id _). Check (id 0). \end{coq_example} Abbreviations do not survive the end of sections. No typing of the denoted expression is performed at definition time. Type-checking is done only at the time of use of the abbreviation. %\Rem \index{Syntactic Definition} % %Abbreviations are similar to the {\em syntactic %definitions} available in versions of {\Coq} prior to version 8.0, %except that abbreviations are used for printing (unless the modifier %\verb=(only parsing)= is given) while syntactic definitions were not. \section{Tactic Notations \comindex{Tactic Notation}} Tactic notations allow to customize the syntax of the tactics of the tactic language\footnote{Tactic notations are just a simplification of the {\tt Grammar tactic simple\_tactic} command that existed in versions prior to version 8.0.}. Tactic notations obey the following syntax \medskip \noindent \begin{tabular}{lcl} {\sentence} & ::= & \texttt{Tactic Notation} \zeroone{\taclevel} \nelist{\proditem}{} \\ & & \texttt{:= {\tac} .}\\ {\proditem} & ::= & {\str} $|$ {\tacargtype}{\tt ({\ident})} \\ {\taclevel} & ::= & {\tt (at level} {\naturalnumber}{\tt )} \\ {\tacargtype} & ::= & %{\tt preident} $|$ {\tt ident} $|$ {\tt simple\_intropattern} $|$ {\tt reference} \\ & $|$ & {\tt hyp} $|$ {\tt hyp\_list} $|$ {\tt ne\_hyp\_list} \\ & $|$ & % {\tt quantified\_hypothesis} \\ & $|$ & {\tt constr} $|$ {\tt constr\_list} $|$ {\tt ne\_constr\_list} \\ & $|$ & %{\tt castedopenconstr} $|$ {\tt integer} $|$ {\tt integer\_list} $|$ {\tt ne\_integer\_list} \\ & $|$ & {\tt int\_or\_var} $|$ {\tt int\_or\_var\_list} $|$ {\tt ne\_int\_or\_var\_list} \\ & $|$ & {\tt tactic} $|$ {\tt tactic$n$} \qquad\mbox{(for $0\leq n\leq 5$)} \end{tabular} \medskip A tactic notation {\tt Tactic Notation {\taclevel} {\sequence{\proditem}{}} := {\tac}} extends the parser and pretty-printer of tactics with a new rule made of the list of production items. It then evaluates into the tactic expression {\tac}. For simple tactics, it is recommended to use a terminal symbol, i.e. a {\str}, for the first production item. The tactic level indicates the parsing precedence of the tactic notation. This information is particularly relevant for notations of tacticals. Levels 0 to 5 are available (default is 0). To know the parsing precedences of the existing tacticals, use the command {\tt Print Grammar tactic.} Each type of tactic argument has a specific semantic regarding how it is parsed and how it is interpreted. The semantic is described in the following table. The last command gives examples of tactics which use the corresponding kind of argument. \medskip \noindent \begin{tabular}{l|l|l|l} Tactic argument type & parsed as & interpreted as & as in tactic \\ \hline & & & \\ {\tt\small ident} & identifier & a user-given name & {\tt intro} \\ {\tt\small simple\_intropattern} & intro\_pattern & an intro\_pattern & {\tt intros}\\ {\tt\small hyp} & identifier & an hypothesis defined in context & {\tt clear}\\ %% quantified_hypothesis actually not supported %%{\tt\small quantified\_hypothesis} & identifier or integer & a named or non dep. hyp. of the goal & {\tt intros until}\\ {\tt\small reference} & qualified identifier & a global reference of term & {\tt unfold}\\ {\tt\small constr} & term & a term & {\tt exact} \\ %% castedopenconstr actually not supported %%{\tt\small castedopenconstr} & term & a term with its sign. of exist. var. & {\tt refine}\\ {\tt\small integer} & integer & an integer & \\ {\tt\small int\_or\_var} & identifier or integer & an integer & {\tt do} \\ {\tt\small tactic} & tactic at level 5 & a tactic & \\ {\tt\small tactic$n$} & tactic at level $n$ & a tactic & \\ {\tt\small {\nterm{entry}}\_list} & list of {\nterm{entry}} & a list of how {\nterm{entry}} is interpreted & \\ {\tt\small ne\_{\nterm{entry}}\_list} & non-empty list of {\nterm{entry}} & a list of how {\nterm{entry}} is interpreted& \\ \end{tabular} \Rem In order to be bound in tactic definitions, each syntactic entry for argument type must include the case of simple {\ltac} identifier as part of what it parses. This is naturally the case for {\tt ident}, {\tt simple\_intropattern}, {\tt reference}, {\tt constr}, ... but not for {\tt integer}. This is the reason for introducing a special entry {\tt int\_or\_var} which evaluates to integers only but which syntactically includes identifiers in order to be usable in tactic definitions. \Rem The {\tt {\nterm{entry}}\_list} and {\tt ne\_{\nterm{entry}}\_list} entries can be used in primitive tactics or in other notations at places where a list of the underlying entry can be used: {\nterm{entry}} is either {\tt\small constr}, {\tt\small hyp}, {\tt\small integer} or {\tt\small int\_or\_var}. %%% Local Variables: %%% mode: latex %%% TeX-master: "Reference-Manual" %%% End: coq-8.4pl2/doc/RecTutorial/0000750000175000001440000000000012127276533014640 5ustar notinuserscoq-8.4pl2/doc/RecTutorial/RecTutorial.v0000640000175000001440000006130611776416511017274 0ustar notinusersCheck (forall A:Type, (exists x:A, forall (y:A), x <> y) -> 2 = 3). Inductive nat : Set := | O : nat | S : nat->nat. Check nat. Check O. Check S. Reset nat. Print nat. Print le. Theorem zero_leq_three: 0 <= 3. Proof. constructor 2. constructor 2. constructor 2. constructor 1. Qed. Print zero_leq_three. Lemma zero_leq_three': 0 <= 3. repeat constructor. Qed. Lemma zero_lt_three : 0 < 3. Proof. repeat constructor. Qed. Print zero_lt_three. Inductive le'(n:nat):nat -> Prop := | le'_n : le' n n | le'_S : forall p, le' (S n) p -> le' n p. Hint Constructors le'. Require Import List. Print list. Check list. Check (nil (A:=nat)). Check (nil (A:= nat -> nat)). Check (fun A: Type => (cons (A:=A))). Check (cons 3 (cons 2 nil)). Check (nat :: bool ::nil). Check ((3<=4) :: True ::nil). Check (Prop::Set::nil). Require Import Bvector. Print vector. Check (Vnil nat). Check (fun (A:Type)(a:A)=> Vcons _ a _ (Vnil _)). Check (Vcons _ 5 _ (Vcons _ 3 _ (Vnil _))). Lemma eq_3_3 : 2 + 1 = 3. Proof. reflexivity. Qed. Print eq_3_3. Lemma eq_proof_proof : eq_refl (2*6) = eq_refl (3*4). Proof. reflexivity. Qed. Print eq_proof_proof. Lemma eq_lt_le : ( 2 < 4) = (3 <= 4). Proof. reflexivity. Qed. Lemma eq_nat_nat : nat = nat. Proof. reflexivity. Qed. Lemma eq_Set_Set : Set = Set. Proof. reflexivity. Qed. Lemma eq_Type_Type : Type = Type. Proof. reflexivity. Qed. Check (2 + 1 = 3). Check (Type = Type). Goal Type = Type. reflexivity. Qed. Print or. Print and. Print sumbool. Print ex. Require Import ZArith. Require Import Compare_dec. Check le_lt_dec. Definition max (n p :nat) := match le_lt_dec n p with | left _ => p | right _ => n end. Theorem le_max : forall n p, n <= p -> max n p = p. Proof. intros n p ; unfold max ; case (le_lt_dec n p); simpl. trivial. intros; absurd (p < p); eauto with arith. Qed. Extraction max. Inductive tree(A:Type) : Type := node : A -> forest A -> tree A with forest (A: Type) : Type := nochild : forest A | addchild : tree A -> forest A -> forest A. Inductive even : nat->Prop := evenO : even O | evenS : forall n, odd n -> even (S n) with odd : nat->Prop := oddS : forall n, even n -> odd (S n). Lemma odd_49 : odd (7 * 7). simpl; repeat constructor. Qed. Definition nat_case := fun (Q : Type)(g0 : Q)(g1 : nat -> Q)(n:nat) => match n return Q with | 0 => g0 | S p => g1 p end. Eval simpl in (nat_case nat 0 (fun p => p) 34). Eval simpl in (fun g0 g1 => nat_case nat g0 g1 34). Eval simpl in (fun g0 g1 => nat_case nat g0 g1 0). Definition pred (n:nat) := match n with O => O | S m => m end. Eval simpl in pred 56. Eval simpl in pred 0. Eval simpl in fun p => pred (S p). Definition xorb (b1 b2:bool) := match b1, b2 with | false, true => true | true, false => true | _ , _ => false end. Definition pred_spec (n:nat) := {m:nat | n=0 /\ m=0 \/ n = S m}. Definition predecessor : forall n:nat, pred_spec n. intro n;case n. unfold pred_spec;exists 0;auto. unfold pred_spec; intro n0;exists n0; auto. Defined. Print predecessor. Extraction predecessor. Theorem nat_expand : forall n:nat, n = match n with 0 => 0 | S p => S p end. intro n;case n;simpl;auto. Qed. Check (fun p:False => match p return 2=3 with end). Theorem fromFalse : False -> 0=1. intro absurd. contradiction. Qed. Section equality_elimination. Variables (A: Type) (a b : A) (p : a = b) (Q : A -> Type). Check (fun H : Q a => match p in (eq _ y) return Q y with eq_refl => H end). End equality_elimination. Theorem trans : forall n m p:nat, n=m -> m=p -> n=p. Proof. intros n m p eqnm. case eqnm. trivial. Qed. Lemma Rw : forall x y: nat, y = y * x -> y * x * x = y. intros x y e; do 2 rewrite <- e. reflexivity. Qed. Require Import Arith. Check mult_1_l. (* mult_1_l : forall n : nat, 1 * n = n *) Check mult_plus_distr_r. (* mult_plus_distr_r : forall n m p : nat, (n + m) * p = n * p + m * p *) Lemma mult_distr_S : forall n p : nat, n * p + p = (S n)* p. simpl;auto with arith. Qed. Lemma four_n : forall n:nat, n+n+n+n = 4*n. intro n;rewrite <- (mult_1_l n). Undo. intro n; pattern n at 1. rewrite <- mult_1_l. repeat rewrite mult_distr_S. trivial. Qed. Section Le_case_analysis. Variables (n p : nat) (H : n <= p) (Q : nat -> Prop) (H0 : Q n) (HS : forall m, n <= m -> Q (S m)). Check ( match H in (_ <= q) return (Q q) with | le_n => H0 | le_S m Hm => HS m Hm end ). End Le_case_analysis. Lemma predecessor_of_positive : forall n, 1 <= n -> exists p:nat, n = S p. Proof. intros n H; case H. exists 0; trivial. intros m Hm; exists m;trivial. Qed. Definition Vtail_total (A : Type) (n : nat) (v : vector A n) : vector A (pred n):= match v in (vector _ n0) return (vector A (pred n0)) with | Vnil => Vnil A | Vcons _ n0 v0 => v0 end. Definition Vtail' (A:Type)(n:nat)(v:vector A n) : vector A (pred n). intros A n v; case v. simpl. exact (Vnil A). simpl. auto. Defined. (* Inductive Lambda : Set := lambda : (Lambda -> False) -> Lambda. Error: Non strictly positive occurrence of "Lambda" in "(Lambda -> False) -> Lambda" *) Section Paradox. Variable Lambda : Set. Variable lambda : (Lambda -> False) ->Lambda. Variable matchL : Lambda -> forall Q:Prop, ((Lambda ->False) -> Q) -> Q. (* understand matchL Q l (fun h : Lambda -> False => t) as match l return Q with lambda h => t end *) Definition application (f x: Lambda) :False := matchL f False (fun h => h x). Definition Delta : Lambda := lambda (fun x : Lambda => application x x). Definition loop : False := application Delta Delta. Theorem two_is_three : 2 = 3. Proof. elim loop. Qed. End Paradox. Require Import ZArith. Inductive itree : Set := | ileaf : itree | inode : Z-> (nat -> itree) -> itree. Definition isingle l := inode l (fun i => ileaf). Definition t1 := inode 0 (fun n => isingle (Z.of_nat (2*n))). Definition t2 := inode 0 (fun n : nat => inode (Z.of_nat n) (fun p => isingle (Z.of_nat (n*p)))). Inductive itree_le : itree-> itree -> Prop := | le_leaf : forall t, itree_le ileaf t | le_node : forall l l' s s', Z.le l l' -> (forall i, exists j:nat, itree_le (s i) (s' j)) -> itree_le (inode l s) (inode l' s'). Theorem itree_le_trans : forall t t', itree_le t t' -> forall t'', itree_le t' t'' -> itree_le t t''. induction t. constructor 1. intros t'; case t'. inversion 1. intros z0 i0 H0. intro t'';case t''. inversion 1. intros. inversion_clear H1. constructor 2. inversion_clear H0;eauto with zarith. inversion_clear H0. intro i2; case (H4 i2). intros. generalize (H i2 _ H0). intros. case (H3 x);intros. generalize (H5 _ H6). exists x0;auto. Qed. Inductive itree_le' : itree-> itree -> Prop := | le_leaf' : forall t, itree_le' ileaf t | le_node' : forall l l' s s' g, Z.le l l' -> (forall i, itree_le' (s i) (s' (g i))) -> itree_le' (inode l s) (inode l' s'). Lemma t1_le_t2 : itree_le t1 t2. unfold t1, t2. constructor. auto with zarith. intro i; exists (2 * i). unfold isingle. constructor. auto with zarith. exists i;constructor. Qed. Lemma t1_le'_t2 : itree_le' t1 t2. unfold t1, t2. constructor 2 with (fun i : nat => 2 * i). auto with zarith. unfold isingle; intro i ; constructor 2 with (fun i :nat => i). auto with zarith. constructor . Qed. Require Import List. Inductive ltree (A:Set) : Set := lnode : A -> list (ltree A) -> ltree A. Inductive prop : Prop := prop_intro : Prop -> prop. Check (prop_intro prop). Inductive ex_Prop (P : Prop -> Prop) : Prop := exP_intro : forall X : Prop, P X -> ex_Prop P. Lemma ex_Prop_inhabitant : ex_Prop (fun P => P -> P). Proof. exists (ex_Prop (fun P => P -> P)). trivial. Qed. (* Check (fun (P:Prop->Prop)(p: ex_Prop P) => match p with exP_intro X HX => X end). Error: Incorrect elimination of "p" in the inductive type "ex_Prop", the return type has sort "Type" while it should be "Prop" Elimination of an inductive object of sort "Prop" is not allowed on a predicate in sort "Type" because proofs can be eliminated only to build proofs *) Inductive typ : Type := typ_intro : Type -> typ. Definition typ_inject: typ. split. exact typ. (* Defined. Error: Universe Inconsistency. *) Abort. (* Inductive aSet : Set := aSet_intro: Set -> aSet. User error: Large non-propositional inductive types must be in Type *) Inductive ex_Set (P : Set -> Prop) : Type := exS_intro : forall X : Set, P X -> ex_Set P. Inductive comes_from_the_left (P Q:Prop): P \/ Q -> Prop := c1 : forall p, comes_from_the_left P Q (or_introl (A:=P) Q p). Goal (comes_from_the_left _ _ (or_introl True I)). split. Qed. Goal ~(comes_from_the_left _ _ (or_intror True I)). red;inversion 1. (* discriminate H0. *) Abort. Reset comes_from_the_left. (* Definition comes_from_the_left (P Q:Prop)(H:P \/ Q): Prop := match H with | or_introl p => True | or_intror q => False end. Error: Incorrect elimination of "H" in the inductive type "or", the return type has sort "Type" while it should be "Prop" Elimination of an inductive object of sort "Prop" is not allowed on a predicate in sort "Type" because proofs can be eliminated only to build proofs *) Definition comes_from_the_left_sumbool (P Q:Prop)(x:{P}+{Q}): Prop := match x with | left p => True | right q => False end. Close Scope Z_scope. Theorem S_is_not_O : forall n, S n <> 0. Definition Is_zero (x:nat):= match x with | 0 => True | _ => False end. Lemma O_is_zero : forall m, m = 0 -> Is_zero m. Proof. intros m H; subst m. (* ============================ Is_zero 0 *) simpl;trivial. Qed. red; intros n Hn. apply O_is_zero with (m := S n). assumption. Qed. Theorem disc2 : forall n, S (S n) <> 1. Proof. intros n Hn; discriminate. Qed. Theorem disc3 : forall n, S (S n) = 0 -> forall Q:Prop, Q. Proof. intros n Hn Q. discriminate. Qed. Theorem inj_succ : forall n m, S n = S m -> n = m. Proof. Lemma inj_pred : forall n m, n = m -> pred n = pred m. Proof. intros n m eq_n_m. rewrite eq_n_m. trivial. Qed. intros n m eq_Sn_Sm. apply inj_pred with (n:= S n) (m := S m); assumption. Qed. Lemma list_inject : forall (A:Type)(a b :A)(l l':list A), a :: b :: l = b :: a :: l' -> a = b /\ l = l'. Proof. intros A a b l l' e. injection e. auto. Qed. Theorem not_le_Sn_0 : forall n:nat, ~ (S n <= 0). Proof. red; intros n H. case H. Undo. Lemma not_le_Sn_0_with_constraints : forall n p , S n <= p -> p = 0 -> False. Proof. intros n p H; case H ; intros; discriminate. Qed. eapply not_le_Sn_0_with_constraints; eauto. Qed. Theorem not_le_Sn_0' : forall n:nat, ~ (S n <= 0). Proof. red; intros n H ; inversion H. Qed. Derive Inversion le_Sn_0_inv with (forall n :nat, S n <= 0). Check le_Sn_0_inv. Theorem le_Sn_0'' : forall n p : nat, ~ S n <= 0 . Proof. intros n p H; inversion H using le_Sn_0_inv. Qed. Derive Inversion_clear le_Sn_0_inv' with (forall n :nat, S n <= 0). Check le_Sn_0_inv'. Theorem le_reverse_rules : forall n m:nat, n <= m -> n = m \/ exists p, n <= p /\ m = S p. Proof. intros n m H; inversion H. left;trivial. right; exists m0; split; trivial. Restart. intros n m H; inversion_clear H. left;trivial. right; exists m0; split; trivial. Qed. Inductive ArithExp : Set := Zero : ArithExp | Succ : ArithExp -> ArithExp | Plus : ArithExp -> ArithExp -> ArithExp. Inductive RewriteRel : ArithExp -> ArithExp -> Prop := RewSucc : forall e1 e2 :ArithExp, RewriteRel e1 e2 -> RewriteRel (Succ e1) (Succ e2) | RewPlus0 : forall e:ArithExp, RewriteRel (Plus Zero e) e | RewPlusS : forall e1 e2:ArithExp, RewriteRel e1 e2 -> RewriteRel (Plus (Succ e1) e2) (Succ (Plus e1 e2)). Fixpoint plus (n p:nat) {struct n} : nat := match n with | 0 => p | S m => S (plus m p) end. Fixpoint plus' (n p:nat) {struct p} : nat := match p with | 0 => n | S q => S (plus' n q) end. Fixpoint plus'' (n p:nat) {struct n} : nat := match n with | 0 => p | S m => plus'' m (S p) end. Fixpoint even_test (n:nat) : bool := match n with 0 => true | 1 => false | S (S p) => even_test p end. Reset even_test. Fixpoint even_test (n:nat) : bool := match n with | 0 => true | S p => odd_test p end with odd_test (n:nat) : bool := match n with | 0 => false | S p => even_test p end. Eval simpl in even_test. Eval simpl in (fun x : nat => even_test x). Eval simpl in (fun x : nat => plus 5 x). Eval simpl in (fun x : nat => even_test (plus 5 x)). Eval simpl in (fun x : nat => even_test (plus x 5)). Section Principle_of_Induction. Variable P : nat -> Prop. Hypothesis base_case : P 0. Hypothesis inductive_step : forall n:nat, P n -> P (S n). Fixpoint nat_ind (n:nat) : (P n) := match n return P n with | 0 => base_case | S m => inductive_step m (nat_ind m) end. End Principle_of_Induction. Scheme Even_induction := Minimality for even Sort Prop with Odd_induction := Minimality for odd Sort Prop. Theorem even_plus_four : forall n:nat, even n -> even (4+n). Proof. intros n H. elim H using Even_induction with (P0 := fun n => odd (4+n)); simpl;repeat constructor;assumption. Qed. Section Principle_of_Double_Induction. Variable P : nat -> nat ->Prop. Hypothesis base_case1 : forall x:nat, P 0 x. Hypothesis base_case2 : forall x:nat, P (S x) 0. Hypothesis inductive_step : forall n m:nat, P n m -> P (S n) (S m). Fixpoint nat_double_ind (n m:nat){struct n} : P n m := match n, m return P n m with | 0 , x => base_case1 x | (S x), 0 => base_case2 x | (S x), (S y) => inductive_step x y (nat_double_ind x y) end. End Principle_of_Double_Induction. Section Principle_of_Double_Recursion. Variable P : nat -> nat -> Type. Hypothesis base_case1 : forall x:nat, P 0 x. Hypothesis base_case2 : forall x:nat, P (S x) 0. Hypothesis inductive_step : forall n m:nat, P n m -> P (S n) (S m). Fixpoint nat_double_rect (n m:nat){struct n} : P n m := match n, m return P n m with | 0 , x => base_case1 x | (S x), 0 => base_case2 x | (S x), (S y) => inductive_step x y (nat_double_rect x y) end. End Principle_of_Double_Recursion. Definition min : nat -> nat -> nat := nat_double_rect (fun (x y:nat) => nat) (fun (x:nat) => 0) (fun (y:nat) => 0) (fun (x y r:nat) => S r). Eval compute in (min 5 8). Eval compute in (min 8 5). Lemma not_circular : forall n:nat, n <> S n. Proof. intro n. apply nat_ind with (P:= fun n => n <> S n). discriminate. red; intros n0 Hn0 eqn0Sn0;injection eqn0Sn0;trivial. Qed. Definition eq_nat_dec : forall n p:nat , {n=p}+{n <> p}. Proof. intros n p. apply nat_double_rect with (P:= fun (n q:nat) => {q=p}+{q <> p}). Undo. pattern p,n. elim n using nat_double_rect. destruct x; auto. destruct x; auto. intros n0 m H; case H. intro eq; rewrite eq ; auto. intro neg; right; red ; injection 1; auto. Defined. Definition eq_nat_dec' : forall n p:nat, {n=p}+{n <> p}. decide equality. Defined. Require Import Le. Lemma le'_le : forall n p, le' n p -> n <= p. Proof. induction 1;auto with arith. Qed. Lemma le'_n_Sp : forall n p, le' n p -> le' n (S p). Proof. induction 1;auto. Qed. Hint Resolve le'_n_Sp. Lemma le_le' : forall n p, n<=p -> le' n p. Proof. induction 1;auto with arith. Qed. Print Acc. Require Import Minus. (* Fixpoint div (x y:nat){struct x}: nat := if eq_nat_dec x 0 then 0 else if eq_nat_dec y 0 then x else S (div (x-y) y). Error: Recursive definition of div is ill-formed. In environment div : nat -> nat -> nat x : nat y : nat _ : x <> 0 _ : y <> 0 Recursive call to div has principal argument equal to "x - y" instead of a subterm of x *) Lemma minus_smaller_S: forall x y:nat, x - y < S x. Proof. intros x y; pattern y, x; elim x using nat_double_ind. destruct x0; auto with arith. simpl; auto with arith. simpl; auto with arith. Qed. Lemma minus_smaller_positive : forall x y:nat, x <>0 -> y <> 0 -> x - y < x. Proof. destruct x; destruct y; ( simpl;intros; apply minus_smaller_S || intros; absurd (0=0); auto). Qed. Definition minus_decrease : forall x y:nat, Acc lt x -> x <> 0 -> y <> 0 -> Acc lt (x-y). Proof. intros x y H; case H. intros Hz posz posy. apply Hz; apply minus_smaller_positive; assumption. Defined. Print minus_decrease. Definition div_aux (x y:nat)(H: Acc lt x):nat. fix 3. intros. refine (if eq_nat_dec x 0 then 0 else if eq_nat_dec y 0 then y else div_aux (x-y) y _). apply (minus_decrease x y H);assumption. Defined. Print div_aux. (* div_aux = (fix div_aux (x y : nat) (H : Acc lt x) {struct H} : nat := match eq_nat_dec x 0 with | left _ => 0 | right _ => match eq_nat_dec y 0 with | left _ => y | right _0 => div_aux (x - y) y (minus_decrease x y H _ _0) end end) : forall x : nat, nat -> Acc lt x -> nat *) Require Import Wf_nat. Definition div x y := div_aux x y (lt_wf x). Extraction div. (* let div x y = div_aux x y *) Extraction div_aux. (* let rec div_aux x y = match eq_nat_dec x O with | Left -> O | Right -> (match eq_nat_dec y O with | Left -> y | Right -> div_aux (minus x y) y) *) Lemma vector0_is_vnil : forall (A:Type)(v:vector A 0), v = Vnil A. Proof. intros A v;inversion v. Abort. (* Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:vector A n), n= 0 -> v = Vnil A. Toplevel input, characters 40281-40287 > Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:vector A n), n= 0 -> v = Vnil A. > ^^^^^^ Error: In environment A : Set n : nat v : vector A n e : n = 0 The term "Vnil A" has type "vector A 0" while it is expected to have type "vector A n" *) Require Import JMeq. (* On devrait changer Set en Type ? *) Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:vector A n), n= 0 -> JMeq v (Vnil A). Proof. destruct v. auto. intro; discriminate. Qed. Lemma vector0_is_vnil : forall (A:Type)(v:vector A 0), v = Vnil A. Proof. intros a v;apply JMeq_eq. apply vector0_is_vnil_aux. trivial. Qed. Implicit Arguments Vcons [A n]. Implicit Arguments Vnil [A]. Implicit Arguments Vhead [A n]. Implicit Arguments Vtail [A n]. Definition Vid : forall (A : Type)(n:nat), vector A n -> vector A n. Proof. destruct n; intro v. exact Vnil. exact (Vcons (Vhead v) (Vtail v)). Defined. Eval simpl in (fun (A:Type)(v:vector A 0) => (Vid _ _ v)). Eval simpl in (fun (A:Type)(v:vector A 0) => v). Lemma Vid_eq : forall (n:nat) (A:Type)(v:vector A n), v=(Vid _ n v). Proof. destruct v. reflexivity. reflexivity. Defined. Theorem zero_nil : forall A (v:vector A 0), v = Vnil. Proof. intros. change (Vnil (A:=A)) with (Vid _ 0 v). apply Vid_eq. Defined. Theorem decomp : forall (A : Type) (n : nat) (v : vector A (S n)), v = Vcons (Vhead v) (Vtail v). Proof. intros. change (Vcons (Vhead v) (Vtail v)) with (Vid _ (S n) v). apply Vid_eq. Defined. Definition vector_double_rect : forall (A:Type) (P: forall (n:nat),(vector A n)->(vector A n) -> Type), P 0 Vnil Vnil -> (forall n (v1 v2 : vector A n) a b, P n v1 v2 -> P (S n) (Vcons a v1) (Vcons b v2)) -> forall n (v1 v2 : vector A n), P n v1 v2. induction n. intros; rewrite (zero_nil _ v1); rewrite (zero_nil _ v2). auto. intros v1 v2; rewrite (decomp _ _ v1);rewrite (decomp _ _ v2). apply X0; auto. Defined. Require Import Bool. Definition bitwise_or n v1 v2 : vector bool n := vector_double_rect bool (fun n v1 v2 => vector bool n) Vnil (fun n v1 v2 a b r => Vcons (orb a b) r) n v1 v2. Fixpoint vector_nth (A:Type)(n:nat)(p:nat)(v:vector A p){struct v} : option A := match n,v with _ , Vnil => None | 0 , Vcons b _ _ => Some b | S n', Vcons _ p' v' => vector_nth A n' p' v' end. Implicit Arguments vector_nth [A p]. Lemma nth_bitwise : forall (n:nat) (v1 v2: vector bool n) i a b, vector_nth i v1 = Some a -> vector_nth i v2 = Some b -> vector_nth i (bitwise_or _ v1 v2) = Some (orb a b). Proof. intros n v1 v2; pattern n,v1,v2. apply vector_double_rect. simpl. destruct i; discriminate 1. destruct i; simpl;auto. injection 1; injection 2;intros; subst a; subst b; auto. Qed. Set Implicit Arguments. CoInductive Stream (A:Type) : Type := | Cons : A -> Stream A -> Stream A. CoInductive LList (A: Type) : Type := | LNil : LList A | LCons : A -> LList A -> LList A. Definition head (A:Type)(s : Stream A) := match s with Cons a s' => a end. Definition tail (A : Type)(s : Stream A) := match s with Cons a s' => s' end. CoFixpoint repeat (A:Type)(a:A) : Stream A := Cons a (repeat a). CoFixpoint iterate (A: Type)(f: A -> A)(a : A) : Stream A:= Cons a (iterate f (f a)). CoFixpoint map (A B:Type)(f: A -> B)(s : Stream A) : Stream B:= match s with Cons a tl => Cons (f a) (map f tl) end. Eval simpl in (fun (A:Type)(a:A) => repeat a). Eval simpl in (fun (A:Type)(a:A) => head (repeat a)). CoInductive EqSt (A: Type) : Stream A -> Stream A -> Prop := eqst : forall s1 s2: Stream A, head s1 = head s2 -> EqSt (tail s1) (tail s2) -> EqSt s1 s2. Section Parks_Principle. Variable A : Type. Variable R : Stream A -> Stream A -> Prop. Hypothesis bisim1 : forall s1 s2:Stream A, R s1 s2 -> head s1 = head s2. Hypothesis bisim2 : forall s1 s2:Stream A, R s1 s2 -> R (tail s1) (tail s2). CoFixpoint park_ppl : forall s1 s2:Stream A, R s1 s2 -> EqSt s1 s2 := fun s1 s2 (p : R s1 s2) => eqst s1 s2 (bisim1 p) (park_ppl (bisim2 p)). End Parks_Principle. Theorem map_iterate : forall (A:Type)(f:A->A)(x:A), EqSt (iterate f (f x)) (map f (iterate f x)). Proof. intros A f x. apply park_ppl with (R:= fun s1 s2 => exists x: A, s1 = iterate f (f x) /\ s2 = map f (iterate f x)). intros s1 s2 (x0,(eqs1,eqs2));rewrite eqs1;rewrite eqs2;reflexivity. intros s1 s2 (x0,(eqs1,eqs2)). exists (f x0);split;[rewrite eqs1|rewrite eqs2]; reflexivity. exists x;split; reflexivity. Qed. Ltac infiniteproof f := cofix f; constructor; [clear f| simpl; try (apply f; clear f)]. Theorem map_iterate' : forall (A:Type)(f:A->A)(x:A), EqSt (iterate f (f x)) (map f (iterate f x)). infiniteproof map_iterate'. reflexivity. Qed. Implicit Arguments LNil [A]. Lemma Lnil_not_Lcons : forall (A:Type)(a:A)(l:LList A), LNil <> (LCons a l). intros;discriminate. Qed. Lemma injection_demo : forall (A:Type)(a b : A)(l l': LList A), LCons a (LCons b l) = LCons b (LCons a l') -> a = b /\ l = l'. Proof. intros A a b l l' e; injection e; auto. Qed. Inductive Finite (A:Type) : LList A -> Prop := | Lnil_fin : Finite (LNil (A:=A)) | Lcons_fin : forall a l, Finite l -> Finite (LCons a l). CoInductive Infinite (A:Type) : LList A -> Prop := | LCons_inf : forall a l, Infinite l -> Infinite (LCons a l). Lemma LNil_not_Infinite : forall (A:Type), ~ Infinite (LNil (A:=A)). Proof. intros A H;inversion H. Qed. Lemma Finite_not_Infinite : forall (A:Type)(l:LList A), Finite l -> ~ Infinite l. Proof. intros A l H; elim H. apply LNil_not_Infinite. intros a l0 F0 I0' I1. case I0'; inversion_clear I1. trivial. Qed. Lemma Not_Finite_Infinite : forall (A:Type)(l:LList A), ~ Finite l -> Infinite l. Proof. cofix H. destruct l. intro; absurd (Finite (LNil (A:=A)));[auto|constructor]. constructor. apply H. red; intro H1;case H0. constructor. trivial. Qed. coq-8.4pl2/doc/RecTutorial/manbiblio.bib0000640000175000001440000010053711644266723017264 0ustar notinusers @STRING{toappear="To appear"} @STRING{lncs="Lecture Notes in Computer Science"} @TECHREPORT{RefManCoq, AUTHOR = {Bruno~Barras, Samuel~Boutin, Cristina~Cornes, Judical~Courant, Yann~Coscoy, David~Delahaye, Daniel~de~Rauglaudre, Jean-Christophe~Fillitre, Eduardo~Gimnez, Hugo~Herbelin, Grard~Huet, Henri~Laulhre, Csar~Muoz, Chetan~Murthy, Catherine~Parent-Vigouroux, Patrick~Loiseleur, Christine~Paulin-Mohring, Amokrane~Sabi, Benjamin~Werner}, INSTITUTION = {INRIA}, TITLE = {{The Coq Proof Assistant Reference Manual -- Version V6.2}}, YEAR = {1998} } @INPROCEEDINGS{Aud91, AUTHOR = {Ph. Audebaud}, BOOKTITLE = {Proceedings of the sixth Conf. on Logic in Computer Science.}, PUBLISHER = {IEEE}, TITLE = {Partial {Objects} in the {Calculus of Constructions}}, YEAR = {1991} } @PHDTHESIS{Aud92, AUTHOR = {Ph. Audebaud}, SCHOOL = {{Universit\'e} Bordeaux I}, TITLE = {Extension du Calcul des Constructions par Points fixes}, YEAR = {1992} } @INPROCEEDINGS{Audebaud92b, AUTHOR = {Ph. Audebaud}, BOOKTITLE = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, EDITOR = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, NOTE = {Also Research Report LIP-ENS-Lyon}, PAGES = {pp 21--34}, TITLE = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, YEAR = {1992} } @INPROCEEDINGS{Augustsson85, AUTHOR = {L. Augustsson}, TITLE = {{Compiling Pattern Matching}}, BOOKTITLE = {Conference Functional Programming and Computer Architecture}, YEAR = {1985} } @INPROCEEDINGS{EG94a, AUTHOR = {E. Gim\'enez}, EDITORS = {P. Dybjer and B. Nordstr\"om and J. Smith}, BOOKTITLE = {Workshop on Types for Proofs and Programs}, PAGES = {39-59}, SERIES = {LNCS}, NUMBER = {996}, TITLE = {{Codifying guarded definitions with recursive schemes}}, YEAR = {1994}, PUBLISHER = {Springer-Verlag}, } @INPROCEEDINGS{EG95a, AUTHOR = {E. Gim\'enez}, BOOKTITLE = {Workshop on Types for Proofs and Programs}, SERIES = {LNCS}, NUMBER = {1158}, PAGES = {135-152}, TITLE = {An application of co-Inductive types in Coq: verification of the Alternating Bit Protocol}, EDITORS = {S. Berardi and M. Coppo}, PUBLISHER = {Springer-Verlag}, YEAR = {1995} } @PhdThesis{EG96, author = {E. Gim\'enez}, title = {A Calculus of Infinite Constructions and its application to the verification of communicating systems}, school = {Ecole Normale Sup\'erieure de Lyon}, year = {1996} } @ARTICLE{BaCo85, AUTHOR = {J.L. Bates and R.L. Constable}, JOURNAL = {ACM transactions on Programming Languages and Systems}, TITLE = {Proofs as {Programs}}, VOLUME = {7}, YEAR = {1985} } @BOOK{Bar81, AUTHOR = {H.P. Barendregt}, PUBLISHER = {North-Holland}, TITLE = {The Lambda Calculus its Syntax and Semantics}, YEAR = {1981} } @TECHREPORT{Bar91, AUTHOR = {H. Barendregt}, INSTITUTION = {Catholic University Nijmegen}, NOTE = {In Handbook of Logic in Computer Science, Vol II}, NUMBER = {91-19}, TITLE = {Lambda {Calculi with Types}}, YEAR = {1991} } @BOOK{Bastad92, EDITOR = {B. Nordstr\"om and K. Petersson and G. Plotkin}, PUBLISHER = {Available by ftp at site ftp.inria.fr}, TITLE = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, YEAR = {1992} } @BOOK{Bee85, AUTHOR = {M.J. Beeson}, PUBLISHER = {Springer-Verlag}, TITLE = {Foundations of Constructive Mathematics, Metamathematical Studies}, YEAR = {1985} } @ARTICLE{BeKe92, AUTHOR = {G. Bellin and J. Ketonen}, JOURNAL = {Theoretical Computer Science}, PAGES = {115--142}, TITLE = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, VOLUME = {95}, YEAR = {1992} } @BOOK{Bis67, AUTHOR = {E. Bishop}, PUBLISHER = {McGraw-Hill}, TITLE = {Foundations of Constructive Analysis}, YEAR = {1967} } @BOOK{BoMo79, AUTHOR = {R.S. Boyer and J.S. Moore}, KEY = {BoMo79}, PUBLISHER = {Academic Press}, SERIES = {ACM Monograph}, TITLE = {A computational logic}, YEAR = {1979} } @MASTERSTHESIS{Bou92, AUTHOR = {S. Boutin}, MONTH = sep, SCHOOL = {{Universit\'e Paris 7}}, TITLE = {Certification d'un compilateur {ML en Coq}}, YEAR = {1992} } @ARTICLE{Bru72, AUTHOR = {N.J. de Bruijn}, JOURNAL = {Indag. Math.}, TITLE = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, VOLUME = {34}, YEAR = {1972} } @INCOLLECTION{Bru80, AUTHOR = {N.J. de Bruijn}, BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, EDITOR = {J.P. Seldin and J.R. Hindley}, PUBLISHER = {Academic Press}, TITLE = {A survey of the project {Automath}}, YEAR = {1980} } @TECHREPORT{Leroy90, AUTHOR = {X. Leroy}, TITLE = {The {ZINC} experiment: an economical implementation of the {ML} language}, INSTITUTION = {INRIA}, NUMBER = {117}, YEAR = {1990} } @BOOK{Caml, AUTHOR = {P. Weis and X. Leroy}, PUBLISHER = {InterEditions}, TITLE = {Le langage Caml}, YEAR = {1993} } @TECHREPORT{CoC89, AUTHOR = {Projet Formel}, INSTITUTION = {INRIA}, NUMBER = {110}, TITLE = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, YEAR = {1989} } @INPROCEEDINGS{CoHu85a, AUTHOR = {Th. Coquand and G. Huet}, ADDRESS = {Linz}, BOOKTITLE = {EUROCAL'85}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, VOLUME = {203}, YEAR = {1985} } @Misc{Bar98, author = {B. Barras}, title = {A formalisation of \uppercase{B}urali-\uppercase{F}orti's paradox in Coq}, howpublished = {Distributed within the bunch of contribution to the Coq system}, year = {1998}, month = {March}, note = {\texttt{http://pauillac.inria.fr/coq}} } @INPROCEEDINGS{CoHu85b, AUTHOR = {Th. Coquand and G. Huet}, BOOKTITLE = {Logic Colloquium'85}, EDITOR = {The Paris Logic Group}, PUBLISHER = {North-Holland}, TITLE = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, YEAR = {1987} } @ARTICLE{CoHu86, AUTHOR = {Th. Coquand and G. Huet}, JOURNAL = {Information and Computation}, NUMBER = {2/3}, TITLE = {The {Calculus of Constructions}}, VOLUME = {76}, YEAR = {1988} } @BOOK{Con86, AUTHOR = {R.L. {Constable et al.}}, PUBLISHER = {Prentice-Hall}, TITLE = {{Implementing Mathematics with the Nuprl Proof Development System}}, YEAR = {1986} } @INPROCEEDINGS{CoPa89, AUTHOR = {Th. Coquand and C. Paulin-Mohring}, BOOKTITLE = {Proceedings of Colog'88}, EDITOR = {P. Martin-L{\"o}f and G. Mints}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {Inductively defined types}, VOLUME = {417}, YEAR = {1990} } @PHDTHESIS{Coq85, AUTHOR = {Th. Coquand}, MONTH = jan, SCHOOL = {Universit\'e Paris~7}, TITLE = {Une Th\'eorie des Constructions}, YEAR = {1985} } @INPROCEEDINGS{Coq86, AUTHOR = {Th. Coquand}, ADDRESS = {Cambridge, MA}, BOOKTITLE = {Symposium on Logic in Computer Science}, PUBLISHER = {IEEE Computer Society Press}, TITLE = {{An Analysis of Girard's Paradox}}, YEAR = {1986} } @INPROCEEDINGS{Coq90, AUTHOR = {Th. Coquand}, BOOKTITLE = {Logic and Computer Science}, EDITOR = {P. Oddifredi}, NOTE = {INRIA Research Report 1088, also in~\cite{CoC89}}, PUBLISHER = {Academic Press}, TITLE = {{Metamathematical Investigations of a Calculus of Constructions}}, YEAR = {1990} } @INPROCEEDINGS{Coq92, AUTHOR = {Th. Coquand}, BOOKTITLE = {in \cite{Bastad92}}, TITLE = {{Pattern Matching with Dependent Types}}, YEAR = {1992}, crossref = {Bastad92} } @TECHREPORT{COQ93, AUTHOR = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, INSTITUTION = {INRIA}, MONTH = may, NUMBER = {154}, TITLE = {{The Coq Proof Assistant User's Guide Version 5.8}}, YEAR = {1993} } @INPROCEEDINGS{Coquand93, AUTHOR = {Th. Coquand}, BOOKTITLE = {in \cite{Nijmegen93}}, TITLE = {{Infinite Objects in Type Theory}}, YEAR = {1993}, crossref = {Nijmegen93} } @MASTERSTHESIS{Cou94a, AUTHOR = {J. Courant}, MONTH = sep, SCHOOL = {DEA d'Informatique, ENS Lyon}, TITLE = {Explicitation de preuves par r\'ecurrence implicite}, YEAR = {1994} } @TECHREPORT{CPar93, AUTHOR = {C. Parent}, INSTITUTION = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, MONTH = oct, NOTE = {Also in~\cite{Nijmegen93}}, NUMBER = {93-29}, TITLE = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, YEAR = {1993} } @PHDTHESIS{CPar95, AUTHOR = {C. Parent}, SCHOOL = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, TITLE = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, YEAR = {1995} } @TECHREPORT{Dow90, AUTHOR = {G. Dowek}, INSTITUTION = {INRIA}, NUMBER = {1283}, TITLE = {{Naming and Scoping in a Mathematical Vernacular}}, TYPE = {Research Report}, YEAR = {1990} } @ARTICLE{Dow91a, AUTHOR = {G. Dowek}, JOURNAL = {{Compte Rendu de l'Acad\'emie des Sciences}}, NOTE = {(The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors)}, NUMBER = {12}, PAGES = {951--956}, TITLE = {{L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}}, VOLUME = {I, 312}, YEAR = {1991} } @INPROCEEDINGS{Dow91b, AUTHOR = {G. Dowek}, BOOKTITLE = {Proceedings of Mathematical Foundation of Computer Science}, NOTE = {Also INRIA Research Report}, PAGES = {151--160}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{A Second Order Pattern Matching Algorithm in the Cube of Typed {$\lambda$}-calculi}}, VOLUME = {520}, YEAR = {1991} } @PHDTHESIS{Dow91c, AUTHOR = {G. Dowek}, MONTH = dec, SCHOOL = {{Universit\'e Paris 7}}, TITLE = {{D\'emonstration automatique dans le Calcul des Constructions}}, YEAR = {1991} } @ARTICLE{dowek93, AUTHOR = {G. Dowek}, TITLE = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, JOURNAL = {Journal Logic Computation}, VOLUME = {3}, NUMBER = {3}, PAGES = {287--315}, MONTH = {June}, YEAR = {1993} } @UNPUBLISHED{Dow92a, AUTHOR = {G. Dowek}, NOTE = {To appear in Theoretical Computer Science}, TITLE = {{The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}}, YEAR = {1992} } @ARTICLE{Dow94a, AUTHOR = {G. Dowek}, JOURNAL = {Annals of Pure and Applied Logic}, VOLUME = {69}, PAGES = {135--155}, TITLE = {Third order matching is decidable}, YEAR = {1994} } @INPROCEEDINGS{Dow94b, AUTHOR = {G. Dowek}, BOOKTITLE = {Proceedings of the second international conference on typed lambda calculus and applications}, TITLE = {{Lambda-calculus, Combinators and the Comprehension Schema}}, YEAR = {1995} } @INPROCEEDINGS{Dyb91, AUTHOR = {P. Dybjer}, BOOKTITLE = {Logical Frameworks}, EDITOR = {G. Huet and G. Plotkin}, PAGES = {59--79}, PUBLISHER = {Cambridge University Press}, TITLE = {{Inductive sets and families in {Martin-L{\"o}f's Type Theory} and their set-theoretic semantics : An inversion principle for {Martin-L\"of's} type theory}}, VOLUME = {14}, YEAR = {1991} } @ARTICLE{Dyc92, AUTHOR = {Roy Dyckhoff}, JOURNAL = {The Journal of Symbolic Logic}, MONTH = sep, NUMBER = {3}, TITLE = {Contraction-free sequent calculi for intuitionistic logic}, VOLUME = {57}, YEAR = {1992} } @MASTERSTHESIS{Fil94, AUTHOR = {J.-C. Filli\^atre}, MONTH = sep, SCHOOL = {DEA d'Informatique, ENS Lyon}, TITLE = {Une proc\'edure de d\'ecision pour le {C}alcul des {P}r\'edicats {D}irect. {E}tude et impl\'ementation dans le syst\`eme {C}oq}, YEAR = {1994} } @TECHREPORT{Filliatre95, AUTHOR = {J.-C. Filli\^atre}, INSTITUTION = {LIP-ENS-Lyon}, TITLE = {{A decision procedure for Direct Predicate Calculus}}, TYPE = {Research report}, NUMBER = {96--25}, YEAR = {1995} } @UNPUBLISHED{Fle90, AUTHOR = {E. Fleury}, MONTH = jul, NOTE = {Rapport de Stage}, TITLE = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, YEAR = {1990} } @TechReport{Gim98, author = {E. Gim\'nez}, title = {A Tutorial on Recursive Types in Coq}, institution = {INRIA}, year = {1998} } @TECHREPORT{HKP97, author = {G. Huet and G. Kahn and Ch. Paulin-Mohring}, title = {The {Coq} Proof Assistant - A tutorial, Version 6.1}, institution = {INRIA}, type = {rapport technique}, month = {Aot}, year = {1997}, note = {Version rvise distribue avec {Coq}}, number = {204}, } @INPROCEEDINGS{Gir70, AUTHOR = {J.-Y. Girard}, BOOKTITLE = {Proceedings of the 2nd Scandinavian Logic Symposium}, PUBLISHER = {North-Holland}, TITLE = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, YEAR = {1970} } @PHDTHESIS{Gir72, AUTHOR = {J.-Y. Girard}, SCHOOL = {Universit\'e Paris~7}, TITLE = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, YEAR = {1972} } @BOOK{Gir89, AUTHOR = {J.-Y. Girard and Y. Lafont and P. Taylor}, PUBLISHER = {Cambridge University Press}, SERIES = {Cambridge Tracts in Theoretical Computer Science 7}, TITLE = {Proofs and Types}, YEAR = {1989} } @MASTERSTHESIS{Hir94, AUTHOR = {D. Hirschkoff}, MONTH = sep, SCHOOL = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, TITLE = {{Ecriture d'une tactique arithm\'etique pour le syst\`eme Coq}}, YEAR = {1994} } @INCOLLECTION{How80, AUTHOR = {W.A. Howard}, BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, EDITOR = {J.P. Seldin and J.R. Hindley}, NOTE = {Unpublished 1969 Manuscript}, PUBLISHER = {Academic Press}, TITLE = {The Formulae-as-Types Notion of Constructions}, YEAR = {1980} } @INCOLLECTION{HuetLevy79, AUTHOR = {G. Huet and J.-J. L\'{e}vy}, TITLE = {Call by Need Computations in Non-Ambigous Linear Term Rewriting Systems}, NOTE = {Also research report 359, INRIA, 1979}, BOOKTITLE = {Computational Logic, Essays in Honor of Alan Robinson}, EDITOR = {J.-L. Lassez and G. Plotkin}, PUBLISHER = {The MIT press}, YEAR = {1991} } @INPROCEEDINGS{Hue87, AUTHOR = {G. Huet}, BOOKTITLE = {Programming of Future Generation Computers}, EDITOR = {K. Fuchi and M. Nivat}, NOTE = {Also in Proceedings of TAPSOFT87, LNCS 249, Springer-Verlag, 1987, pp 276--286}, PUBLISHER = {Elsevier Science}, TITLE = {Induction Principles Formalized in the {Calculus of Constructions}}, YEAR = {1988} } @INPROCEEDINGS{Hue88, AUTHOR = {G. Huet}, BOOKTITLE = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, EDITOR = {R. Narasimhan}, NOTE = {Also in~\cite{CoC89}}, PUBLISHER = {World Scientific Publishing}, TITLE = {{The Constructive Engine}}, YEAR = {1989} } @BOOK{Hue89, EDITOR = {G. Huet}, PUBLISHER = {Addison-Wesley}, SERIES = {The UT Year of Programming Series}, TITLE = {Logical Foundations of Functional Programming}, YEAR = {1989} } @INPROCEEDINGS{Hue92, AUTHOR = {G. Huet}, BOOKTITLE = {Proceedings of 12th FST/TCS Conference, New Delhi}, PAGES = {229--240}, PUBLISHER = {Springer Verlag}, SERIES = {LNCS}, TITLE = {{The Gallina Specification Language : A case study}}, VOLUME = {652}, YEAR = {1992} } @ARTICLE{Hue94, AUTHOR = {G. Huet}, JOURNAL = {J. Functional Programming}, PAGES = {371--394}, PUBLISHER = {Cambridge University Press}, TITLE = {Residual theory in $\lambda$-calculus: a formal development}, VOLUME = {4,3}, YEAR = {1994} } @ARTICLE{KeWe84, AUTHOR = {J. Ketonen and R. Weyhrauch}, JOURNAL = {Theoretical Computer Science}, PAGES = {297--307}, TITLE = {A decidable fragment of {P}redicate {C}alculus}, VOLUME = {32}, YEAR = {1984} } @BOOK{Kle52, AUTHOR = {S.C. Kleene}, PUBLISHER = {North-Holland}, SERIES = {Bibliotheca Mathematica}, TITLE = {Introduction to Metamathematics}, YEAR = {1952} } @BOOK{Kri90, AUTHOR = {J.-L. Krivine}, PUBLISHER = {Masson}, SERIES = {Etudes et recherche en informatique}, TITLE = {Lambda-calcul {types et mod\`eles}}, YEAR = {1990} } @ARTICLE{Laville91, AUTHOR = {A. Laville}, TITLE = {Comparison of Priority Rules in Pattern Matching and Term Rewriting}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {11}, PAGES = {321--347}, YEAR = {1991} } @BOOK{LE92, EDITOR = {G. Huet and G. Plotkin}, PUBLISHER = {Cambridge University Press}, TITLE = {Logical Environments}, YEAR = {1992} } @INPROCEEDINGS{LePa94, AUTHOR = {F. Leclerc and C. Paulin-Mohring}, BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, EDITOR = {H. Barendregt and T. Nipkow}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, VOLUME = {806}, YEAR = {1994} } @BOOK{LF91, EDITOR = {G. Huet and G. Plotkin}, PUBLISHER = {Cambridge University Press}, TITLE = {Logical Frameworks}, YEAR = {1991} } @BOOK{MaL84, AUTHOR = {{P. Martin-L\"of}}, PUBLISHER = {Bibliopolis}, SERIES = {Studies in Proof Theory}, TITLE = {Intuitionistic Type Theory}, YEAR = {1984} } @INPROCEEDINGS{manoury94, AUTHOR = {P. Manoury}, TITLE = {{A User's Friendly Syntax to Define Recursive Functions as Typed $\lambda-$Terms}}, BOOKTITLE = {{Types for Proofs and Programs, TYPES'94}}, SERIES = {LNCS}, VOLUME = {996}, MONTH = jun, YEAR = {1994} } @ARTICLE{MaSi94, AUTHOR = {P. Manoury and M. Simonot}, JOURNAL = {TCS}, TITLE = {Automatizing termination proof of recursively defined function}, YEAR = {To appear} } @TECHREPORT{maranget94, AUTHOR = {L. Maranget}, INSTITUTION = {INRIA}, NUMBER = {2385}, TITLE = {{Two Techniques for Compiling Lazy Pattern Matching}}, YEAR = {1994} } @INPROCEEDINGS{Moh89a, AUTHOR = {C. Paulin-Mohring}, ADDRESS = {Austin}, BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, MONTH = jan, PUBLISHER = {ACM}, TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, YEAR = {1989} } @PHDTHESIS{Moh89b, AUTHOR = {C. Paulin-Mohring}, MONTH = jan, SCHOOL = {{Universit\'e Paris 7}}, TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, YEAR = {1989} } @INPROCEEDINGS{Moh93, AUTHOR = {C. Paulin-Mohring}, BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi and Applications}, EDITOR = {M. Bezem and J.-F. Groote}, NOTE = {Also LIP research report 92-49, ENS Lyon}, NUMBER = {664}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{Inductive Definitions in the System Coq - Rules and Properties}}, YEAR = {1993} } @MASTERSTHESIS{Mun94, AUTHOR = {C. Mu\~noz}, MONTH = sep, SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, TITLE = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, YEAR = {1994} } @BOOK{Nijmegen93, EDITOR = {H. Barendregt and T. Nipkow}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {Types for Proofs and Programs}, VOLUME = {806}, YEAR = {1994} } @BOOK{NoPS90, AUTHOR = {B. {Nordstr\"om} and K. Peterson and J. Smith}, BOOKTITLE = {Information Processing 83}, PUBLISHER = {Oxford Science Publications}, SERIES = {International Series of Monographs on Computer Science}, TITLE = {Programming in {Martin-L\"of's} Type Theory}, YEAR = {1990} } @ARTICLE{Nor88, AUTHOR = {B. {Nordstr\"om}}, JOURNAL = {BIT}, TITLE = {Terminating General Recursion}, VOLUME = {28}, YEAR = {1988} } @BOOK{Odi90, EDITOR = {P. Odifreddi}, PUBLISHER = {Academic Press}, TITLE = {Logic and Computer Science}, YEAR = {1990} } @INPROCEEDINGS{PaMS92, AUTHOR = {M. Parigot and P. Manoury and M. Simonot}, ADDRESS = {St. Petersburg, Russia}, BOOKTITLE = {Logic Programming and automated reasoning}, EDITOR = {A. Voronkov}, MONTH = jul, NUMBER = {624}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{ProPre : A Programming language with proofs}}, YEAR = {1992} } @ARTICLE{Par92, AUTHOR = {M. Parigot}, JOURNAL = {Theoretical Computer Science}, NUMBER = {2}, PAGES = {335--356}, TITLE = {{Recursive Programming with Proofs}}, VOLUME = {94}, YEAR = {1992} } @INPROCEEDINGS{Parent95b, AUTHOR = {C. Parent}, BOOKTITLE = {{Mathematics of Program Construction'95}}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{Synthesizing proofs from programs in the Calculus of Inductive Constructions}}, VOLUME = {947}, YEAR = {1995} } @ARTICLE{PaWe92, AUTHOR = {C. Paulin-Mohring and B. Werner}, JOURNAL = {Journal of Symbolic Computation}, PAGES = {607--640}, TITLE = {{Synthesis of ML programs in the system Coq}}, VOLUME = {15}, YEAR = {1993} } @INPROCEEDINGS{Prasad93, AUTHOR = {K.V. Prasad}, BOOKTITLE = {{Proceedings of CONCUR'93}}, PUBLISHER = {Springer-Verlag}, SERIES = {LNCS}, TITLE = {{Programming with broadcasts}}, VOLUME = {715}, YEAR = {1993} } @INPROCEEDINGS{puel-suarez90, AUTHOR = {L.Puel and A. Su\'arez}, BOOKTITLE = {{Conference Lisp and Functional Programming}}, SERIES = {ACM}, PUBLISHER = {Springer-Verlag}, TITLE = {{Compiling Pattern Matching by Term Decomposition}}, YEAR = {1990} } @UNPUBLISHED{Rou92, AUTHOR = {J. Rouyer}, MONTH = aug, NOTE = {To appear as a technical report}, TITLE = {{D\'eveloppement de l'Algorithme d'Unification dans le Calcul des Constructions}}, YEAR = {1992} } @TECHREPORT{Saibi94, AUTHOR = {A. Sa\"{\i}bi}, INSTITUTION = {INRIA}, MONTH = dec, NUMBER = {2345}, TITLE = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, YEAR = {1994} } @MASTERSTHESIS{saidi94, AUTHOR = {H. Saidi}, MONTH = sep, SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, TITLE = {R\'esolution d'\'equations dans le syst\`eme T de G\"odel}, YEAR = {1994} } @MASTERSTHESIS{Ter92, AUTHOR = {D. Terrasse}, MONTH = sep, SCHOOL = {IARFA}, TITLE = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, YEAR = {1992} } @TECHREPORT{ThBeKa92, AUTHOR = {L. Th\'ery and Y. Bertot and G. Kahn}, INSTITUTION = {INRIA Sophia}, MONTH = may, NUMBER = {1684}, TITLE = {Real theorem provers deserve real user-interfaces}, TYPE = {Research Report}, YEAR = {1992} } @BOOK{TrDa89, AUTHOR = {A.S. Troelstra and D. van Dalen}, PUBLISHER = {North-Holland}, SERIES = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, TITLE = {Constructivism in Mathematics, an introduction}, YEAR = {1988} } @INCOLLECTION{wadler87, AUTHOR = {P. Wadler}, TITLE = {Efficient Compilation of Pattern Matching}, BOOKTITLE = {The Implementation of Functional Programming Languages}, EDITOR = {S.L. Peyton Jones}, PUBLISHER = {Prentice-Hall}, YEAR = {1987} } @PHDTHESIS{Wer94, AUTHOR = {B. Werner}, SCHOOL = {Universit\'e Paris 7}, TITLE = {Une th\'eorie des constructions inductives}, TYPE = {Th\`ese de Doctorat}, YEAR = {1994} } coq-8.4pl2/doc/RecTutorial/morebib.bib0000640000175000001440000000300110406335323016717 0ustar notinusers@book{coqart, title = "Interactive Theorem Proving and Program Development. Coq'Art: The Calculus of Inductive Constructions", author = "Yves Bertot and Pierre Castran", publisher = "Springer Verlag", series = "Texts in Theoretical Computer Science. An EATCS series", year = 2004 } @Article{Coquand:Huet, author = {Thierry Coquand and Grard Huet}, title = {The Calculus of Constructions}, journal = {Information and Computation}, year = {1988}, volume = {76}, } @INcollection{Coquand:metamathematical, author = "Thierry Coquand", title = "Metamathematical Investigations on a Calculus of Constructions", booktitle="Logic and Computer Science", year = {1990}, editor="P. Odifreddi", publisher = "Academic Press", } @Misc{coqrefman, title = {The {C}oq reference manual}, author={{C}oq {D}evelopment Team}, note= {LogiCal Project, \texttt{http://coq.inria.fr/}} } @Misc{coqsite, author= {{C}oq {D}evelopment Team}, title = {The \emph{Coq} proof assistant}, note = {Documentation, system download. {C}ontact: \texttt{http://coq.inria.fr/}} } @Misc{Booksite, author = {Yves Bertot and Pierre Cast\'eran}, title = {Coq'{A}rt: examples and exercises}, note = {\url{http://www.labri.fr/Perso/~casteran/CoqArt}} } @InProceedings{conor:motive, author ="Conor McBride", title = "Elimination with a motive", booktitle = "Types for Proofs and Programs'2000", volume = 2277, pages = "197-217", year = "2002", } coq-8.4pl2/doc/RecTutorial/RecTutorial.tex0000640000175000001440000035213011776416511017625 0ustar notinusers\documentclass[11pt]{article} \title{A Tutorial on [Co-]Inductive Types in Coq} \author{Eduardo Gim\'enez\thanks{Eduardo.Gimenez@inria.fr}, Pierre Cast\'eran\thanks{Pierre.Casteran@labri.fr}} \date{May 1998 --- \today} \usepackage{multirow} % \usepackage{aeguill} % \externaldocument{RefMan-gal.v} % \externaldocument{RefMan-ext.v} % \externaldocument{RefMan-tac.v} % \externaldocument{RefMan-oth} % \externaldocument{RefMan-tus.v} % \externaldocument{RefMan-syn.v} % \externaldocument{Extraction.v} \input{recmacros} \input{coqartmacros} \newcommand{\refmancite}[1]{{}} % \newcommand{\refmancite}[1]{\cite{coqrefman}} % \newcommand{\refmancite}[1]{\cite[#1] {]{coqrefman}} \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{makeidx} % \usepackage{multind} \usepackage{alltt} \usepackage{verbatim} \usepackage{amssymb} \usepackage{amsmath} \usepackage{theorem} \usepackage[dvips]{epsfig} \usepackage{epic} \usepackage{eepic} % \usepackage{ecltree} \usepackage{moreverb} \usepackage{color} \usepackage{pifont} \usepackage{xr} \usepackage{url} \usepackage{alltt} \renewcommand{\familydefault}{ptm} \renewcommand{\seriesdefault}{m} \renewcommand{\shapedefault}{n} \newtheorem{exercise}{Exercise}[section] \makeindex \begin{document} \maketitle \begin{abstract} This document\footnote{The first versions of this document were entirely written by Eduardo Gimenez. Pierre Cast\'eran wrote the 2004 and 2006 revisions.} is an introduction to the definition and use of inductive and co-inductive types in the {\coq} proof environment. It explains how types like natural numbers and infinite streams are defined in {\coq}, and the kind of proof techniques that can be used to reason about them (case analysis, induction, inversion of predicates, co-induction, etc). Each technique is illustrated through an executable and self-contained {\coq} script. \end{abstract} %\RRkeyword{Proof environments, recursive types.} %\makeRT \addtocontents{toc}{\protect \thispagestyle{empty}} \pagenumbering{arabic} \cleardoublepage \tableofcontents \clearpage \section{About this document} This document is an introduction to the definition and use of inductive and co-inductive types in the {\coq} proof environment. It was born from the notes written for the course about the version V5.10 of {\coq}, given by Eduardo Gimenez at the Ecole Normale Sup\'erieure de Lyon in March 1996. This article is a revised and improved version of these notes for the version V8.0 of the system. We assume that the reader has some familiarity with the proofs-as-programs paradigm of Logic \cite{Coquand:metamathematical} and the generalities of the {\coq} system \cite{coqrefman}. You would take a greater advantage of this document if you first read the general tutorial about {\coq} and {\coq}'s FAQ, both available on \cite{coqsite}. A text book \cite{coqart}, accompanied with a lot of examples and exercises \cite{Booksite}, presents a detailed description of the {\coq} system and its underlying formalism: the Calculus of Inductive Construction. Finally, the complete description of {\coq} is given in the reference manual \cite{coqrefman}. Most of the tactics and commands we describe have several options, which we do not present exhaustively. If some script herein uses a non described feature, please refer to the Reference Manual. If you are familiar with other proof environments based on type theory and the LCF style ---like PVS, LEGO, Isabelle, etc--- then you will find not difficulty to guess the unexplained details. The better way to read this document is to start up the {\coq} system, type by yourself the examples and exercises, and observe the behavior of the system. All the examples proposed in this tutorial can be downloaded from the same site as the present document. The tutorial is organised as follows. The next section describes how inductive types are defined in {\coq}, and introduces some useful ones, like natural numbers, the empty type, the propositional equality type, and the logical connectives. Section \ref{CaseAnalysis} explains definitions by pattern-matching and their connection with the principle of case analysis. This principle is the most basic elimination rule associated with inductive or co-inductive types and follows a general scheme that we illustrate for some of the types introduced in Section \ref{Introduction}. Section \ref{CaseTechniques} illustrates the pragmatics of this principle, showing different proof techniques based on it. Section \ref{StructuralInduction} introduces definitions by structural recursion and proofs by induction. Section~\ref{CaseStudy} presents some elaborate techniques about dependent case analysis. Finally, Section \ref{CoInduction} is a brief introduction to co-inductive types --i.e., types containing infinite objects-- and the principle of co-induction. Thanks to Bruno Barras, Yves Bertot, Hugo Herbelin, Jean-Fran\c{c}ois Monin and Michel L\'evy for their help. \subsection*{Lexical conventions} The \texttt{typewriter} font is used to represent text input by the user, while the \textit{italic} font is used to represent the text output by the system as answers. Moreover, the mathematical symbols \coqle{}, \coqdiff, \(\exists\), \(\forall\), \arrow{}, $\rightarrow{}$ \coqor{}, \coqand{}, and \funarrow{} stand for the character strings \citecoq{<=}, \citecoq{<>}, \citecoq{exists}, \citecoq{forall}, \citecoq{->}, \citecoq{<-}, \texttt{\char'134/}, \texttt{/\char'134}, and \citecoq{=>}, respectively. For instance, the \coq{} statement %V8 A prendre % inclusion numero 1 % traduction numero 1 \begin{alltt} \hide{Open Scope nat_scope. Check (}forall A:Type,(exists x : A, forall (y:A), x <> y) -> 2 = 3\hide{).} \end{alltt} is written as follows in this tutorial: %V8 A prendre % inclusion numero 2 % traduction numero 2 \begin{alltt} \hide{Check (}{\prodsym}A:Type,(\exsym{}x:A, {\prodsym}y:A, x {\coqdiff} y) \arrow{} 2 = 3\hide{).} \end{alltt} When a fragment of \coq{} input text appears in the middle of regular text, we often place this fragment between double quotes ``\dots.'' These double quotes do not belong to the \coq{} syntax. Finally, any string enclosed between \texttt{(*} and \texttt{*)} is a comment and is ignored by the \coq{} system. \section{Introducing Inductive Types} \label{Introduction} Inductive types are types closed with respect to their introduction rules. These rules explain the most basic or \textsl{canonical} ways of constructing an element of the type. In this sense, they characterize the recursive type. Different rules must be considered as introducing different objects. In order to fix ideas, let us introduce in {\coq} the most well-known example of a recursive type: the type of natural numbers. %V8 A prendre \begin{alltt} Inductive nat : Set := | O : nat | S : nat\arrow{}nat. \end{alltt} The definition of a recursive type has two main parts. First, we establish what kind of recursive type we will characterize (a set, in this case). Second, we present the introduction rules that define the type ({\Z} and {\SUCC}), also called its {\sl constructors}. The constructors {\Z} and {\SUCC} determine all the elements of this type. In other words, if $n\mbox{:}\nat$, then $n$ must have been introduced either by the rule {\Z} or by an application of the rule {\SUCC} to a previously constructed natural number. In this sense, we can say that {\nat} is \emph{closed}. On the contrary, the type $\Set$ is an {\it open} type, since we do not know {\it a priori} all the possible ways of introducing an object of type \texttt{Set}. After entering this command, the constants {\nat}, {\Z} and {\SUCC} are available in the current context. We can see their types using the \texttt{Check} command \refmancite{Section \ref{Check}}: %V8 A prendre \begin{alltt} Check nat. \it{}nat : Set \tt{}Check O. \it{}O : nat \tt{}Check S. \it{}S : nat {\arrow} nat \end{alltt} Moreover, {\coq} adds to the context three constants named $\natind$, $\natrec$ and $\natrect$, which correspond to different principles of structural induction on natural numbers that {\coq} infers automatically from the definition. We will come back to them in Section \ref{StructuralInduction}. In fact, the type of natural numbers as well as several useful theorems about them are already defined in the basic library of {\coq}, so there is no need to introduce them. Therefore, let us throw away our (re)definition of {\nat}, using the command \texttt{Reset}. %V8 A prendre \begin{alltt} Reset nat. Print nat. \it{}Inductive nat : Set := O : nat | S : nat \arrow{} nat For S: Argument scope is [nat_scope] \end{alltt} Notice that \coq{}'s \emph{interpretation scope} for natural numbers (called \texttt{nat\_scope}) allows us to read and write natural numbers in decimal form (see \cite{coqrefman}). For instance, the constructor \texttt{O} can be read or written as the digit $0$, and the term ``~\texttt{S (S (S O))}~'' as $3$. %V8 A prendre \begin{alltt} Check O. \it 0 : nat. \tt Check (S (S (S O))). \it 3 : nat \end{alltt} Let us now take a look to some other recursive types contained in the standard library of {\coq}. \subsection{Lists} Lists are defined in library \citecoq{List}\footnote{Notice that in versions of {\coq} prior to 8.1, the parameter $A$ had sort \citecoq{Set} instead of \citecoq{Type}; the constant \citecoq{list} was thus of type \citecoq{Set\arrow{} Set}.} \begin{alltt} Require Import List. Print list. \it Inductive list (A : Type) : Type:= nil : list A | cons : A {\arrow} list A {\arrow} list A For nil: Argument A is implicit For cons: Argument A is implicit For list: Argument scope is [type_scope] For nil: Argument scope is [type_scope] For cons: Argument scopes are [type_scope _ _] \end{alltt} In this definition, \citecoq{A} is a \emph{general parameter}, global to both constructors. This kind of definition allows us to build a whole family of inductive types, indexed over the sort \citecoq{Type}. This can be observed if we consider the type of identifiers \citecoq{list}, \citecoq{cons} and \citecoq{nil}. Notice the notation \citecoq{(A := \dots)} which must be used when {\coq}'s type inference algorithm cannot infer the implicit parameter \citecoq{A}. \begin{alltt} Check list. \it list : Type {\arrow} Type \tt Check (nil (A:=nat)). \it nil : list nat \tt Check (nil (A:= nat {\arrow} nat)). \it nil : list (nat {\arrow} nat) \tt Check (fun A: Type {\funarrow} (cons (A:=A))). \it fun A : Type {\funarrow} cons (A:=A) : {\prodsym} A : Type, A {\arrow} list A {\arrow} list A \tt Check (cons 3 (cons 2 nil)). \it 3 :: 2 :: nil : list nat \tt Check (nat :: bool ::nil). \it nat :: bool :: nil : list Set \tt Check ((3<=4) :: True ::nil). \it (3<=4) :: True :: nil : list Prop \tt Check (Prop::Set::nil). \it Prop::Set::nil : list Type \end{alltt} \subsection{Vectors.} \label{vectors} Like \texttt{list}, \citecoq{vector} is a polymorphic type: if $A$ is a type, and $n$ a natural number, ``~\citecoq{vector $A$ $n$}~'' is the type of vectors of elements of $A$ and size $n$. \begin{alltt} Require Import Bvector. Print vector. \it Inductive vector (A : Type) : nat {\arrow} Type := Vnil : vector A 0 | Vcons : A {\arrow} {\prodsym} n : nat, vector A n {\arrow} vector A (S n) For vector: Argument scopes are [type_scope nat_scope] For Vnil: Argument scope is [type_scope] For Vcons: Argument scopes are [type_scope _ nat_scope _] \end{alltt} Remark the difference between the two parameters $A$ and $n$: The first one is a \textsl{general parameter}, global to all the introduction rules,while the second one is an \textsl{index}, which is instantiated differently in the introduction rules. Such types parameterized by regular values are called \emph{dependent types}. \begin{alltt} Check (Vnil nat). \it Vnil nat : vector nat 0 \tt Check (fun (A:Type)(a:A){\funarrow} Vcons _ a _ (Vnil _)). \it fun (A : Type) (a : A) {\funarrow} Vcons A a 0 (Vnil A) : {\prodsym} A : Type, A {\arrow} vector A 1 \tt Check (Vcons _ 5 _ (Vcons _ 3 _ (Vnil _))). \it Vcons nat 5 1 (Vcons nat 3 0 (Vnil nat)) : vector nat 2 \end{alltt} \subsection{The contradictory proposition.} Another example of an inductive type is the contradictory proposition. This type inhabits the universe of propositions, and has no element at all. %V8 A prendre \begin{alltt} Print False. \it{} Inductive False : Prop := \end{alltt} \noindent Notice that no constructor is given in this definition. \subsection{The tautological proposition.} Similarly, the tautological proposition {\True} is defined as an inductive type with only one element {\I}: %V8 A prendre \begin{alltt} Print True. \it{}Inductive True : Prop := I : True \end{alltt} \subsection{Relations as inductive types.} Some relations can also be introduced in a smart way as an inductive family of propositions. Let us take as example the order $n \leq m$ on natural numbers, called \citecoq{le} in {\coq}. This relation is introduced through the following definition, quoted from the standard library\footnote{In the interpretation scope for Peano arithmetic: \citecoq{nat\_scope}, ``~\citecoq{n <= m}~'' is equivalent to ``~\citecoq{le n m}~'' .}: %V8 A prendre \begin{alltt} Print le. \it Inductive le (n:nat) : nat\arrow{}Prop := | le_n: n {\coqle} n | le_S: {\prodsym} m, n {\coqle} m \arrow{} n {\coqle} S m. \end{alltt} Notice that in this definition $n$ is a general parameter, while the second argument of \citecoq{le} is an index (see section ~\ref{vectors}). This definition introduces the binary relation $n {\leq} m$ as the family of unary predicates ``\textsl{to be greater or equal than a given $n$}'', parameterized by $n$. The introduction rules of this type can be seen as a sort of Prolog rules for proving that a given integer $n$ is less or equal than another one. In fact, an object of type $n{\leq} m$ is nothing but a proof built up using the constructors \textsl{le\_n} and \textsl{le\_S} of this type. As an example, let us construct a proof that zero is less or equal than three using {\coq}'s interactive proof mode. Such an object can be obtained applying three times the second introduction rule of \citecoq{le}, to a proof that zero is less or equal than itself, which is provided by the first constructor of \citecoq{le}: %V8 A prendre \begin{alltt} Theorem zero_leq_three: 0 {\coqle} 3. Proof. \it{} 1 subgoal ============================ 0 {\coqle} 3 \tt{}Proof. constructor 2. \it{} 1 subgoal ============================ 0 {\coqle} 2 \tt{} constructor 2. \it{} 1 subgoal ============================ 0 {\coqle} 1 \tt{} constructor 2 \it{} 1 subgoal ============================ 0 {\coqle} 0 \tt{} constructor 1. \it{}Proof completed \tt{}Qed. \end{alltt} \noindent When the current goal is an inductive type, the tactic ``~\citecoq{constructor $i$}~'' \refmancite{Section \ref{constructor}} applies the $i$-th constructor in the definition of the type. We can take a look at the proof constructed using the command \texttt{Print}: %V8 A prendre \begin{alltt} Print Print zero_leq_three. \it{}zero_leq_three = zero_leq_three = le_S 0 2 (le_S 0 1 (le_S 0 0 (le_n 0))) : 0 {\coqle} 3 \end{alltt} When the parameter $i$ is not supplied, the tactic \texttt{constructor} tries to apply ``~\texttt{constructor $1$}~'', ``~\texttt{constructor $2$}~'',\dots, ``~\texttt{constructor $n$}~'' where $n$ is the number of constructors of the inductive type (2 in our example) of the conclusion of the goal. Our little proof can thus be obtained iterating the tactic \texttt{constructor} until it fails: %V8 A prendre \begin{alltt} Lemma zero_leq_three': 0 {\coqle} 3. repeat constructor. Qed. \end{alltt} Notice that the strict order on \texttt{nat}, called \citecoq{lt} is not inductively defined: the proposition $n Prop := | le'_n : le' n n | le'_S : forall p, le' (S n) p -> le' n p. Hint Constructors le'. \end{alltt} We notice that the type of the second constructor of \citecoq{le'} has an argument whose type is \citecoq{le' (S n) p}. This constrasts with earlier versions of {\coq}, in which a general parameter $a$ of an inductive type $I$ had to appear only in applications of the form $I\,\dots\,a$. Since version $8.1$, if $a$ is a general parameter of an inductive type $I$, the type of an argument of a constructor of $I$ may be of the form $I\,\dots\,t_a$ , where $t_a$ is any term. Notice that the final type of the constructors must be of the form $I\,\dots\,a$, since these constructors describe how to form inhabitants of type $I\,\dots\,a$ (this is the role of parameter $a$). Another example of this new feature is {\coq}'s definition of accessibility (see Section~\ref{WellFoundedRecursion}), which has a general parameter $x$; the constructor for the predicate ``$x$ is accessible'' takes an argument of type ``$y$ is accessible''. In earlier versions of {\coq}, a relation like \citecoq{le'} would have to be defined without $n$ being a general parameter. \begin{alltt} Reset le'. Inductive le': nat-> nat -> Prop := | le'_n : forall n, le' n n | le'_S : forall n p, le' (S n) p -> le' n p. \end{alltt} \subsection{The propositional equality type.} \label{equality} In {\coq}, the propositional equality between two inhabitants $a$ and $b$ of the same type $A$ , noted $a=b$, is introduced as a family of recursive predicates ``~\textsl{to be equal to $a$}~'', parameterised by both $a$ and its type $A$. This family of types has only one introduction rule, which corresponds to reflexivity. Notice that the syntax ``\citecoq{$a$ = $b$}~'' is an abbreviation for ``\citecoq{eq $a$ $b$}~'', and that the parameter $A$ is \emph{implicit}, as it can be infered from $a$. %V8 A prendre \begin{alltt} Print eq. \it{} Inductive eq (A : Type) (x : A) : A \arrow{} Prop := eq_refl : x = x For eq: Argument A is implicit For eq_refl: Argument A is implicit For eq: Argument scopes are [type_scope _ _] For eq_refl: Argument scopes are [type_scope _] \end{alltt} Notice also that the first parameter $A$ of \texttt{eq} has type \texttt{Type}. The type system of {\coq} allows us to consider equality between various kinds of terms: elements of a set, proofs, propositions, types, and so on. Look at \cite{coqrefman, coqart} to get more details on {\coq}'s type system, as well as implicit arguments and argument scopes. \begin{alltt} Lemma eq_3_3 : 2 + 1 = 3. Proof. reflexivity. Qed. Lemma eq_proof_proof : eq_refl (2*6) = eq_refl (3*4). Proof. reflexivity. Qed. Print eq_proof_proof. \it eq_proof_proof = eq_refl (eq_refl (3 * 4)) : eq_refl (2 * 6) = eq_refl (3 * 4) \tt Lemma eq_lt_le : ( 2 < 4) = (3 {\coqle} 4). Proof. reflexivity. Qed. Lemma eq_nat_nat : nat = nat. Proof. reflexivity. Qed. Lemma eq_Set_Set : Set = Set. Proof. reflexivity. Qed. \end{alltt} \subsection{Logical connectives.} \label{LogicalConnectives} The conjunction and disjunction of two propositions are also examples of recursive types: \begin{alltt} Inductive or (A B : Prop) : Prop := or_introl : A \arrow{} A {\coqor} B | or_intror : B \arrow{} A {\coqor} B Inductive and (A B : Prop) : Prop := conj : A \arrow{} B \arrow{} A {\coqand} B \end{alltt} The propositions $A$ and $B$ are general parameters of these connectives. Choosing different universes for $A$ and $B$ and for the inductive type itself gives rise to different type constructors. For example, the type \textsl{sumbool} is a disjunction but with computational contents. \begin{alltt} Inductive sumbool (A B : Prop) : Set := left : A \arrow{} \{A\} + \{B\} | right : B \arrow{} \{A\} + \{B\} \end{alltt} This type --noted \texttt{\{$A$\}+\{$B$\}} in {\coq}-- can be used in {\coq} programs as a sort of boolean type, to check whether it is $A$ or $B$ that is true. The values ``~\citecoq{left $p$}~'' and ``~\citecoq{right $q$}~'' replace the boolean values \textsl{true} and \textsl{false}, respectively. The advantage of this type over \textsl{bool} is that it makes available the proofs $p$ of $A$ or $q$ of $B$, which could be necessary to construct a verification proof about the program. For instance, let us consider the certified program \citecoq{le\_lt\_dec} of the Standard Library. \begin{alltt} Require Import Compare_dec. Check le_lt_dec. \it le_lt_dec : {\prodsym} n m : nat, \{n {\coqle} m\} + \{m < n\} \end{alltt} We use \citecoq{le\_lt\_dec} to build a function for computing the max of two natural numbers: \begin{alltt} Definition max (n p :nat) := match le_lt_dec n p with | left _ {\funarrow} p | right _ {\funarrow} n end. \end{alltt} In the following proof, the case analysis on the term ``~\citecoq{le\_lt\_dec n p}~'' gives us an access to proofs of $n\leq p$ in the first case, $pFrom these constants, it is possible to define application by case analysis. Then, through auto-application, the well-known looping term $(\lambda x.(x\;x)\;\lambda x.(x\;x))$ provides a proof of falsehood. \begin{alltt} Definition application (f x: Lambda) :False := matchL f False (fun h {\funarrow} h x). Definition Delta : Lambda := lambda (fun x : Lambda {\funarrow} application x x). Definition loop : False := application Delta Delta. Theorem two_is_three : 2 = 3. Proof. elim loop. Qed. End Paradox. \end{alltt} \noindent This example can be seen as a formulation of Russell's paradox in type theory associating $(\textsl{application}\;x\;x)$ to the formula $x\not\in x$, and \textsl{Delta} to the set $\{ x \mid x\not\in x\}$. If \texttt{matchL} would satisfy the reduction rule associated to case analysis, that is, $$ \citecoq{matchL (lambda $f$) $Q$ $h$} \Longrightarrow h\;f$$ then the term \texttt{loop} would compute into itself. This is not actually surprising, since the proof of the logical soundness of {\coq} strongly lays on the property that any well-typed term must terminate. Hence, non-termination is usually a synonymous of inconsistency. %\paragraph{} In this case, the construction of a non-terminating %program comes from the so-called \textsl{negative occurrence} of %$\Lambda$ in the type of the constructor $\lambda$. In order to be %admissible for {\coq}, all the occurrences of the recursive type in its %own introduction rules must be positive, in the sense on the following %definition: % %\begin{enumerate} %\item $R$ is positive in $(R\;\vec{t})$; %\item $R$ is positive in $(x: A)C$ if it does not %occur in $A$ and $R$ is positive in $C$; %\item if $P\equiv (\vec{x}:\vec{T})Q$, then $R$ is positive in $(P %\rightarrow C)$ if $R$ does not occur in $\vec{T}$, $R$ is positive %in $C$, and either %\begin{enumerate} %\item $Q\equiv (R\;\vec{q})$ or %\item $Q\equiv (J\;\vec{t})$, \label{relax} % where $J$ is a recursive type, and for any term $t_i$ either : % \begin{enumerate} % \item $R$ does not occur in $t_i$, or % \item $t_i\equiv (z:\vec{Z})(R\;\vec{q})$, $R$ does not occur % in $\vec{Z}$, $t_i$ instantiates a general % parameter of $J$, and this parameter is positive in the % arguments of the constructors of $J$. % \end{enumerate} %\end{enumerate} %\end{enumerate} %\noindent Those types obtained by erasing option (\ref{relax}) in the %definition above are called \textsl{strictly positive} types. \subsubsection*{Remark} In this case, the construction of a non-terminating program comes from the so-called \textsl{negative occurrence} of \texttt{Lambda} in the argument of the constructor \texttt{lambda}. The reader will find in the Reference Manual a complete formal definition of the notions of \emph{positivity condition} and \emph{strict positivity} that an inductive definition must satisfy. %In order to be %admissible for {\coq}, the type $R$ must be positive in the types of the %arguments of its own introduction rules, in the sense on the following %definition: %\textbf{La dfinition du manuel de rfrence est plus complexe: %la recopier ou donner seulement des exemples? %} %\begin{enumerate} %\item $R$ is positive in $T$ if $R$ does not occur in $T$; %\item $R$ is positive in $(R\;\vec{t})$ if $R$ does not occur in $\vec{t}$; %\item $R$ is positive in $(x:A)C$ if it does not % occur in $A$ and $R$ is positive in $C$; %\item $R$ is positive in $(J\;\vec{t})$, \label{relax} % if $J$ is a recursive type, and for any term $t_i$ either : % \begin{enumerate} % \item $R$ does not occur in $t_i$, or % \item $R$ is positive in $t_i$, $t_i$ instantiates a general % parameter of $J$, and this parameter is positive in the % arguments of the constructors of $J$. % \end{enumerate} %\end{enumerate} %\noindent When we can show that $R$ is positive without using the item %(\ref{relax}) of the definition above, then we say that $R$ is %\textsl{strictly positive}. %\textbf{Changer le discours sur les ordinaux} Notice that the positivity condition does not forbid us to put functional recursive arguments in the constructors. For instance, let us consider the type of infinitely branching trees, with labels in \texttt{Z}. \begin{alltt} Require Import ZArith. Inductive itree : Set := | ileaf : itree | inode : Z {\arrow} (nat {\arrow} itree) {\arrow} itree. \end{alltt} In this representation, the $i$-th child of a tree represented by ``~\texttt{inode $z$ $s$}~'' is obtained by applying the function $s$ to $i$. The following definitions show how to construct a tree with a single node, a tree of height 1 and a tree of height 2: \begin{alltt} Definition isingle l := inode l (fun i {\funarrow} ileaf). Definition t1 := inode 0 (fun n {\funarrow} isingle (Z.of_nat n)). Definition t2 := inode 0 (fun n : nat {\funarrow} inode (Z.of_nat n) (fun p {\funarrow} isingle (Z.of_nat (n*p)))). \end{alltt} Let us define a preorder on infinitely branching trees. In order to compare two non-leaf trees, it is necessary to compare each of their children without taking care of the order in which they appear: \begin{alltt} Inductive itree_le : itree{\arrow} itree {\arrow} Prop := | le_leaf : {\prodsym} t, itree_le ileaf t | le_node : {\prodsym} l l' s s', Z.le l l' {\arrow} ({\prodsym} i, {\exsym} j:nat, itree_le (s i) (s' j)){\arrow} itree_le (inode l s) (inode l' s'). \end{alltt} Notice that a call to the predicate \texttt{itree\_le} appears as a general parameter of the inductive type \texttt{ex} (see Sect.\ref{ex-def}). This kind of definition is accepted by {\coq}, but may lead to some difficulties, since the induction principle automatically generated by the system is not the most appropriate (see chapter 14 of~\cite{coqart} for a detailed explanation). The following definition, obtained by skolemising the proposition \linebreak $\forall\, i,\exists\, j,(\texttt{itree\_le}\;(s\;i)\;(s'\;j))$ in the type of \texttt{itree\_le}, does not present this problem: \begin{alltt} Inductive itree_le' : itree{\arrow} itree {\arrow} Prop := | le_leaf' : {\prodsym} t, itree_le' ileaf t | le_node' : {\prodsym} l l' s s' g, Z.le l l' {\arrow} ({\prodsym} i, itree_le' (s i) (s' (g i))) {\arrow} itree_le' (inode l s) (inode l' s'). \end{alltt} \iffalse \begin{alltt} Lemma t1_le'_t2 : itree_le' t1 t2. Proof. unfold t1, t2. constructor 2 with (fun i : nat {\funarrow} 2 * i). auto with zarith. unfold isingle; intro i ; constructor 2 with (fun i :nat {\funarrow} i). auto with zarith. constructor . Qed. \end{alltt} \fi %In general, strictly positive definitions are preferable to only %positive ones. The reason is that it is sometimes difficult to derive %structural induction combinators for the latter ones. Such combinators %are automatically generated for strictly positive types, but not for %the only positive ones. Nevertheless, sometimes non-strictly positive %definitions provide a smarter or shorter way of declaring a recursive %type. Another example is the type of trees of unbounded width, in which a recursive subterm \texttt{(ltree A)} instantiates the type of polymorphic lists: \begin{alltt} Require Import List. Inductive ltree (A:Set) : Set := lnode : A {\arrow} list (ltree A) {\arrow} ltree A. \end{alltt} This declaration can be transformed adding an extra type to the definition, as was done in Section \ref{MutuallyDependent}. \subsubsection{Impredicative Inductive Types} An inductive type $I$ inhabiting a universe $U$ is \textsl{predicative} if the introduction rules of $I$ do not make a universal quantification on a universe containing $U$. All the recursive types previously introduced are examples of predicative types. An example of an impredicative one is the following type: %\textsl{exT}, the dependent product %of a certain set (or proposition) $x$, and a proof of a property $P$ %about $x$. %\begin{alltt} %Print exT. %\end{alltt} %\textbf{ttention, EXT c'est ex!} %\begin{alltt} %Check (exists P:Prop, P {\arrow} not P). %\end{alltt} %This type is useful for expressing existential quantification over %types, like ``there exists a proposition $x$ such that $(P\;x)$'' %---written $(\textsl{EXT}\; x:Prop \mid (P\;x))$ in {\coq}. However, \begin{alltt} Inductive prop : Prop := prop_intro : Prop {\arrow} prop. \end{alltt} Notice that the constructor of this type can be used to inject any proposition --even itself!-- into the type. \begin{alltt} Check (prop_intro prop).\it prop_intro prop : prop \end{alltt} A careless use of such a self-contained objects may lead to a variant of Burali-Forti's paradox. The construction of Burali-Forti's paradox is more complicated than Russel's one, so we will not describe it here, and point the interested reader to \cite{Bar98,Coq86}. Another example is the second order existential quantifier for propositions: \begin{alltt} Inductive ex_Prop (P : Prop {\arrow} Prop) : Prop := exP_intro : {\prodsym} X : Prop, P X {\arrow} ex_Prop P. \end{alltt} %\begin{alltt} %(* %Check (match prop_inject with (prop_intro p _) {\funarrow} p end). %Error: Incorrect elimination of "prop_inject" in the inductive type % ex %The elimination predicate ""fun _ : prop {\funarrow} Prop" has type % "prop {\arrow} Type" %It should be one of : % "Prop" %Elimination of an inductive object of sort : "Prop" %is not allowed on a predicate in sort : "Type" %because non-informative objects may not construct informative ones. %*) %Print prop_inject. %(* %prop_inject = %prop_inject = prop_intro prop (fun H : prop {\funarrow} H) % : prop %*) %\end{alltt} % \textbf{Et par a? %} Notice that predicativity on sort \citecoq{Set} forbids us to build the following definitions. \begin{alltt} Inductive aSet : Set := aSet_intro: Set {\arrow} aSet. \it{}User error: Large non-propositional inductive types must be in Type \tt Inductive ex_Set (P : Set {\arrow} Prop) : Set := exS_intro : {\prodsym} X : Set, P X {\arrow} ex_Set P. \it{}User error: Large non-propositional inductive types must be in Type \end{alltt} Nevertheless, one can define types like \citecoq{aSet} and \citecoq{ex\_Set}, as inhabitants of \citecoq{Type}. \begin{alltt} Inductive ex_Set (P : Set {\arrow} Prop) : Type := exS_intro : {\prodsym} X : Set, P X {\arrow} ex_Set P. \end{alltt} In the following example, the inductive type \texttt{typ} can be defined, but the term associated with the interactive Definition of \citecoq{typ\_inject} is incompatible with {\coq}'s hierarchy of universes: \begin{alltt} Inductive typ : Type := typ_intro : Type {\arrow} typ. Definition typ_inject: typ. split; exact typ. \it Proof completed \tt{}Defined. \it Error: Universe Inconsistency. \tt Abort. \end{alltt} One possible way of avoiding this new source of paradoxes is to restrict the kind of eliminations by case analysis that can be done on impredicative types. In particular, projections on those universes equal or bigger than the one inhabited by the impredicative type must be forbidden \cite{Coq86}. A consequence of this restriction is that it is not possible to define the first projection of the type ``~\citecoq{ex\_Prop $P$}~'': \begin{alltt} Check (fun (P:Prop{\arrow}Prop)(p: ex_Prop P) {\funarrow} match p with exP_intro X HX {\funarrow} X end). \it Error: Incorrect elimination of "p" in the inductive type "ex_Prop", the return type has sort "Type" while it should be "Prop" Elimination of an inductive object of sort "Prop" is not allowed on a predicate in sort "Type" because proofs can be eliminated only to build proofs. \end{alltt} %In order to explain why, let us consider for example the following %impredicative type \texttt{ALambda}. %\begin{alltt} %Inductive ALambda : Set := % alambda : (A:Set)(A\arrow{}False)\arrow{}ALambda. % %Definition Lambda : Set := ALambda. %Definition lambda : (ALambda\arrow{}False)\arrow{}ALambda := (alambda ALambda). %Lemma CaseAL : (Q:Prop)ALambda\arrow{}((ALambda\arrow{}False)\arrow{}Q)\arrow{}Q. %\end{alltt} % %This type contains all the elements of the dangerous type $\Lambda$ %described at the beginning of this section. Try to construct the %non-ending term $(\Delta\;\Delta)$ as an object of %\texttt{ALambda}. Why is it not possible? \subsubsection{Extraction Constraints} There is a final constraint on case analysis that is not motivated by the potential introduction of paradoxes, but for compatibility reasons with {\coq}'s extraction mechanism \refmancite{Appendix \ref{CamlHaskellExtraction}}. This mechanism is based on the classification of basic types into the universe $\Set$ of sets and the universe $\Prop$ of propositions. The objects of a type in the universe $\Set$ are considered as relevant for computation purposes. The objects of a type in $\Prop$ are considered just as formalised comments, not necessary for execution. The extraction mechanism consists in erasing such formal comments in order to obtain an executable program. Hence, in general, it is not possible to define an object in a set (that should be kept by the extraction mechanism) by case analysis of a proof (which will be thrown away). Nevertheless, this general rule has an exception which is important in practice: if the definition proceeds by case analysis on a proof of a \textsl{singleton proposition} or an empty type (\emph{e.g.} \texttt{False}), then it is allowed. A singleton proposition is a non-recursive proposition with a single constructor $c$, all whose arguments are proofs. For example, the propositional equality and the conjunction of two propositions are examples of singleton propositions. %From the point of view of the extraction %mechanism, such types are isomorphic to a type containing a single %object $c$, so a definition $\Case{x}{c \Rightarrow b}$ is %directly replaced by $b$ as an extra optimisation. \subsubsection{Strong Case Analysis on Proofs} One could consider allowing to define a proposition $Q$ by case analysis on the proofs of another recursive proposition $R$. As we will see in Section \ref{Discrimination}, this would enable one to prove that different introduction rules of $R$ construct different objects. However, this property would be in contradiction with the principle of excluded middle of classical logic, because this principle entails that the proofs of a proposition cannot be distinguished. This principle is not provable in {\coq}, but it is frequently introduced by the users as an axiom, for reasoning in classical logic. For this reason, the definition of propositions by case analysis on proofs is not allowed in {\coq}. \begin{alltt} Definition comes_from_the_left (P Q:Prop)(H:P{\coqor}Q): Prop := match H with | or_introl p {\funarrow} True | or_intror q {\funarrow} False end. \it Error: Incorrect elimination of "H" in the inductive type "or", the return type has sort "Type" while it should be "Prop" Elimination of an inductive object of sort "Prop" is not allowed on a predicate in sort "Type" because proofs can be eliminated only to build proofs. \end{alltt} On the other hand, if we replace the proposition $P {\coqor} Q$ with the informative type $\{P\}+\{Q\}$, the elimination is accepted: \begin{alltt} Definition comes_from_the_left_sumbool (P Q:Prop)(x:\{P\} + \{Q\}): Prop := match x with | left p {\funarrow} True | right q {\funarrow} False end. \end{alltt} \subsubsection{Summary of Constraints} To end with this section, the following table summarizes which universe $U_1$ may inhabit an object of type $Q$ defined by case analysis on $x:R$, depending on the universe $U_2$ inhabited by the inductive types $R$.\footnote{In the box indexed by $U_1=\citecoq{Type}$ and $U_2=\citecoq{Set}$, the answer ``yes'' takes into account the predicativity of sort \citecoq{Set}. If you are working with the option ``impredicative-set'', you must put in this box the condition ``if $R$ is predicative''.} \begin{center} %%% displease hevea less by using * in multirow rather than \LL \renewcommand{\multirowsetup}{\centering} %\newlength{\LL} %\settowidth{\LL}{$x : R : U_2$} \begin{tabular}{|c|c|c|c|c|} \hline \multirow{5}*{$x : R : U_2$} & \multicolumn{4}{|c|}{$Q : U_1$}\\ \hline & &\textsl{Set} & \textsl{Prop} & \textsl{Type}\\ \cline{2-5} &\textsl{Set} & yes & yes & yes\\ \cline{2-5} &\textsl{Prop} & if $R$ singleton & yes & no\\ \cline{2-5} &\textsl{Type} & yes & yes & yes\\ \hline \end{tabular} \end{center} \section{Some Proof Techniques Based on Case Analysis} \label{CaseTechniques} In this section we illustrate the use of case analysis as a proof principle, explaining the proof techniques behind three very useful {\coq} tactics, called \texttt{discriminate}, \texttt{injection} and \texttt{inversion}. \subsection{Discrimination of introduction rules} \label{Discrimination} In the informal semantics of recursive types described in Section \ref{Introduction} it was said that each of the introduction rules of a recursive type is considered as being different from all the others. It is possible to capture this fact inside the logical system using the propositional equality. We take as example the following theorem, stating that \textsl{O} constructs a natural number different from any of those constructed with \texttt{S}. \begin{alltt} Theorem S_is_not_O : {\prodsym} n, S n {\coqdiff} 0. \end{alltt} In order to prove this theorem, we first define a proposition by case analysis on natural numbers, so that the proposition is true for {\Z} and false for any natural number constructed with {\SUCC}. This uses the empty and singleton type introduced in Sections \ref{Introduction}. \begin{alltt} Definition Is_zero (x:nat):= match x with | 0 {\funarrow} True | _ {\funarrow} False end. \end{alltt} \noindent Then, we prove the following lemma: \begin{alltt} Lemma O_is_zero : {\prodsym} m, m = 0 {\arrow} Is_zero m. Proof. intros m H; subst m. \it{} ================ Is_zero 0 \tt{} simpl;trivial. Qed. \end{alltt} \noindent Finally, the proof of \texttt{S\_is\_not\_O} follows by the application of the previous lemma to $S\;n$. \begin{alltt} red; intros n Hn. \it{} n : nat Hn : S n = 0 ============================ False \tt apply O_is_zero with (m := S n). assumption. Qed. \end{alltt} The tactic \texttt{discriminate} \refmancite{Section \ref{Discriminate}} is a special-purpose tactic for proving disequalities between two elements of a recursive type introduced by different constructors. It generalizes the proof method described here for natural numbers to any [co]-inductive type. This tactic is also capable of proving disequalities where the difference is not in the constructors at the head of the terms, but deeper inside them. For example, it can be used to prove the following theorem: \begin{alltt} Theorem disc2 : {\prodsym} n, S (S n) {\coqdiff} 1. Proof. intros n Hn; discriminate. Qed. \end{alltt} When there is an assumption $H$ in the context stating a false equality $t_1=t_2$, \texttt{discriminate} solves the goal by first proving $(t_1\not =t_2)$ and then reasoning by absurdity with respect to $H$: \begin{alltt} Theorem disc3 : {\prodsym} n, S (S n) = 0 {\arrow} {\prodsym} Q:Prop, Q. Proof. intros n Hn Q. discriminate. Qed. \end{alltt} \noindent In this case, the proof proceeds by absurdity with respect to the false equality assumed, whose negation is proved by discrimination. \subsection{Injectiveness of introduction rules} Another useful property about recursive types is the \textsl{injectiveness} of introduction rules, i.e., that whenever two objects were built using the same introduction rule, then this rule should have been applied to the same element. This can be stated formally using the propositional equality: \begin{alltt} Theorem inj : {\prodsym} n m, S n = S m {\arrow} n = m. Proof. \end{alltt} \noindent This theorem is just a corollary of a lemma about the predecessor function: \begin{alltt} Lemma inj_pred : {\prodsym} n m, n = m {\arrow} pred n = pred m. Proof. intros n m eq_n_m. rewrite eq_n_m. trivial. Qed. \end{alltt} \noindent Once this lemma is proven, the theorem follows directly from it: \begin{alltt} intros n m eq_Sn_Sm. apply inj_pred with (n:= S n) (m := S m); assumption. Qed. \end{alltt} This proof method is implemented by the tactic \texttt{injection} \refmancite{Section \ref{injection}}. This tactic is applied to a term $t$ of type ``~$c\;{t_1}\;\dots\;t_n = c\;t'_1\;\dots\;t'_n$~'', where $c$ is some constructor of an inductive type. The tactic \texttt{injection} is applied as deep as possible to derive the equality of all pairs of subterms of $t_i$ and $t'_i$ placed in the same position. All these equalities are put as antecedents of the current goal. Like \texttt{discriminate}, the tactic \citecoq{injection} can be also applied if $x$ does not occur in a direct sub-term, but somewhere deeper inside it. Its application may leave some trivial goals that can be easily solved using the tactic \texttt{trivial}. \begin{alltt} Lemma list_inject : {\prodsym} (A:Type)(a b :A)(l l':list A), a :: b :: l = b :: a :: l' {\arrow} a = b {\coqand} l = l'. Proof. intros A a b l l' e. \it e : a :: b :: l = b :: a :: l' ============================ a = b {\coqand} l = l' \tt injection e. \it ============================ l = l' {\arrow} b = a {\arrow} a = b {\arrow} a = b {\coqand} l = l' \tt{} auto. Qed. \end{alltt} \subsection{Inversion Techniques}\label{inversion} In section \ref{DependentCase}, we motivated the rule of dependent case analysis as a way of internalizing the informal equalities $n=O$ and $n=\SUCC\;p$ associated to each case. This internalisation consisted in instantiating $n$ with the corresponding term in the type of each branch. However, sometimes it could be better to internalise these equalities as extra hypotheses --for example, in order to use the tactics \texttt{rewrite}, \texttt{discriminate} or \texttt{injection} presented in the previous sections. This is frequently the case when the element analysed is denoted by a term which is not a variable, or when it is an object of a particular instance of a recursive family of types. Consider for example the following theorem: \begin{alltt} Theorem not_le_Sn_0 : {\prodsym} n:nat, ~ (S n {\coqle} 0). \end{alltt} \noindent Intuitively, this theorem should follow by case analysis on the hypothesis $H:(S\;n\;\leq\;\Z)$, because no introduction rule allows to instantiate the arguments of \citecoq{le} with respectively a successor and zero. However, there is no way of capturing this with the typing rule for case analysis presented in section \ref{Introduction}, because it does not take into account what particular instance of the family the type of $H$ is. Let us try it: \begin{alltt} Proof. red; intros n H; case H. \it 2 subgoals n : nat H : S n {\coqle} 0 ============================ False subgoal 2 is: {\prodsym} m : nat, S n {\coqle} m {\arrow} False \tt Undo. \end{alltt} \noindent What is necessary here is to make available the equalities ``~$\SUCC\;n = \Z$~'' and ``~$\SUCC\;m = \Z$~'' as extra hypotheses of the branches, so that the goal can be solved using the \texttt{Discriminate} tactic. In order to obtain the desired equalities as hypotheses, let us prove an auxiliary lemma, that our theorem is a corollary of: \begin{alltt} Lemma not_le_Sn_0_with_constraints : {\prodsym} n p , S n {\coqle} p {\arrow} p = 0 {\arrow} False. Proof. intros n p H; case H . \it 2 subgoals n : nat p : nat H : S n {\coqle} p ============================ S n = 0 {\arrow} False subgoal 2 is: {\prodsym} m : nat, S n {\coqle} m {\arrow} S m = 0 {\arrow} False \tt intros;discriminate. intros;discriminate. Qed. \end{alltt} \noindent Our main theorem can now be solved by an application of this lemma: \begin{alltt} Show. \it 2 subgoals n : nat p : nat H : S n {\coqle} p ============================ S n = 0 {\arrow} False subgoal 2 is: {\prodsym} m : nat, S n {\coqle} m {\arrow} S m = 0 {\arrow} False \tt eapply not_le_Sn_0_with_constraints; eauto. Qed. \end{alltt} The general method to address such situations consists in changing the goal to be proven into an implication, introducing as preconditions the equalities needed to eliminate the cases that make no sense. This proof technique is implemented by the tactic \texttt{inversion} \refmancite{Section \ref{Inversion}}. In order to prove a goal $G\;\vec{q}$ from an object of type $R\;\vec{t}$, this tactic automatically generates a lemma $\forall, \vec{x}. (R\;\vec{x}) \rightarrow \vec{x}=\vec{t}\rightarrow \vec{B}\rightarrow (G\;\vec{q})$, where the list of propositions $\vec{B}$ correspond to the subgoals that cannot be directly proven using \texttt{discriminate}. This lemma can either be saved for later use, or generated interactively. In this latter case, the subgoals yielded by the tactic are the hypotheses $\vec{B}$ of the lemma. If the lemma has been stored, then the tactic \linebreak ``~\citecoq{inversion \dots using \dots}~'' can be used to apply it. Let us show both techniques on our previous example: \subsubsection{Interactive mode} \begin{alltt} Theorem not_le_Sn_0' : {\prodsym} n:nat, ~ (S n {\coqle} 0). Proof. red; intros n H ; inversion H. Qed. \end{alltt} \subsubsection{Static mode} \begin{alltt} Derive Inversion le_Sn_0_inv with ({\prodsym} n :nat, S n {\coqle} 0). Theorem le_Sn_0'' : {\prodsym} n p : nat, ~ S n {\coqle} 0 . Proof. intros n p H; inversion H using le_Sn_0_inv. Qed. \end{alltt} In the example above, all the cases are solved using discriminate, so there remains no subgoal to be proven (i.e. the list $\vec{B}$ is empty). Let us present a second example, where this list is not empty: \begin{alltt} TTheorem le_reverse_rules : {\prodsym} n m:nat, n {\coqle} m {\arrow} n = m {\coqor} {\exsym} p, n {\coqle} p {\coqand} m = S p. Proof. intros n m H; inversion H. \it 2 subgoals n : nat m : nat H : n {\coqle} m H0 : n = m ============================ m = m {\coqor} ({\exsym} p : nat, m {\coqle} p {\coqand} m = S p) subgoal 2 is: n = S m0 {\coqor} ({\exsym} p : nat, n {\coqle} p {\coqand} S m0 = S p) \tt left;trivial. right; exists m0; split; trivial. \it Proof completed \end{alltt} This example shows how this tactic can be used to ``reverse'' the introduction rules of a recursive type, deriving the possible premises that could lead to prove a given instance of the predicate. This is why these tactics are called \texttt{inversion} tactics: they go back from conclusions to premises. The hypotheses corresponding to the propositional equalities are not needed in this example, since the tactic does the necessary rewriting to solve the subgoals. When the equalities are no longer needed after the inversion, it is better to use the tactic \texttt{Inversion\_clear}. This variant of the tactic clears from the context all the equalities introduced. \begin{alltt} Restart. intros n m H; inversion_clear H. \it \it n : nat m : nat ============================ m = m {\coqor} ({\exsym} p : nat, m {\coqle} p {\coqand} m = S p) \tt left;trivial. \it n : nat m : nat m0 : nat H0 : n {\coqle} m0 ============================ n = S m0 {\coqor} ({\exsym} p : nat, n {\coqle} p {\coqand} S m0 = S p) \tt right; exists m0; split; trivial. Qed. \end{alltt} %This proof technique works in most of the cases, but not always. In %particular, it could not if the list $\vec{t}$ contains a term $t_j$ %whose type $T$ depends on a previous term $t_i$, with $i % Cases p of % lt_intro1 {\funarrow} (lt_intro1 (S n)) % | (lt_intro2 m1 p2) {\funarrow} (lt_intro2 (S n) (S m1) (lt_n_S n m1 p2)) % end. %\end{alltt} %The guardedness condition must be satisfied only by the last argument %of the enclosed list. For example, the following declaration is an %alternative way of defining addition: %\begin{alltt} %Reset add. %Fixpoint add [n:nat] : nat\arrow{}nat := % Cases n of % O {\funarrow} [x:nat]x % | (S m) {\funarrow} [x:nat](add m (S x)) % end. %\end{alltt} In the following definition of addition, the second argument of {\tt plus{'}{'}} grows at each recursive call. However, as the first one always decreases, the definition is sound. \begin{alltt} Fixpoint plus'' (n p:nat) \{struct n\} : nat := match n with | 0 {\funarrow} p | S m {\funarrow} plus'' m (S p) end. \end{alltt} Moreover, the argument in the recursive call could be a deeper component of $n$. This is the case in the following definition of a boolean function determining whether a number is even or odd: \begin{alltt} Fixpoint even_test (n:nat) : bool := match n with 0 {\funarrow} true | 1 {\funarrow} false | S (S p) {\funarrow} even_test p end. \end{alltt} Mutually dependent definitions by structural induction are also allowed. For example, the previous function \textsl{even} could alternatively be defined using an auxiliary function \textsl{odd}: \begin{alltt} Reset even_test. Fixpoint even_test (n:nat) : bool := match n with | 0 {\funarrow} true | S p {\funarrow} odd_test p end with odd_test (n:nat) : bool := match n with | 0 {\funarrow} false | S p {\funarrow} even_test p end. \end{alltt} %\begin{exercise} %Define a function by structural induction that computes the number of %nodes of a tree structure defined in page \pageref{Forest}. %\end{exercise} Definitions by structural induction are computed only when they are applied, and the decreasing argument is a term having a constructor at the head. We can check this using the \texttt{Eval} command, which computes the normal form of a well typed term. \begin{alltt} Eval simpl in even_test. \it = even_test : nat {\arrow} bool \tt Eval simpl in (fun x : nat {\funarrow} even x). \it = fun x : nat {\funarrow} even x : nat {\arrow} Prop \tt Eval simpl in (fun x : nat => plus 5 x). \it = fun x : nat {\funarrow} S (S (S (S (S x)))) \tt Eval simpl in (fun x : nat {\funarrow} even_test (plus 5 x)). \it = fun x : nat {\funarrow} odd_test x : nat {\arrow} bool \tt Eval simpl in (fun x : nat {\funarrow} even_test (plus x 5)). \it = fun x : nat {\funarrow} even_test (x + 5) : nat {\arrow} bool \end{alltt} %\begin{exercise} %Prove that the second definition of even satisfies the following %theorem: %\begin{verbatim} %Theorem unfold_even : % (x:nat) % (even x)= (Cases x of % O {\funarrow} true % | (S O) {\funarrow} false % | (S (S m)) {\funarrow} (even m) % end). %\end{verbatim} %\end{exercise} \subsection{Proofs by Structural Induction} The principle of structural induction can be also used in order to define proofs, that is, to prove theorems. Let us call an \textsl{elimination combinator} any function that, given a predicate $P$, defines a proof of ``~$P\;x$~'' by structural induction on $x$. In {\coq}, the principle of proof by induction on natural numbers is a particular case of an elimination combinator. The definition of this combinator depends on three general parameters: the predicate to be proven, the base case, and the inductive step: \begin{alltt} Section Principle_of_Induction. Variable P : nat {\arrow} Prop. Hypothesis base_case : P 0. Hypothesis inductive_step : {\prodsym} n:nat, P n {\arrow} P (S n). Fixpoint nat_ind (n:nat) : (P n) := match n return P n with | 0 {\funarrow} base_case | S m {\funarrow} inductive_step m (nat_ind m) end. End Principle_of_Induction. \end{alltt} As this proof principle is used very often, {\coq} automatically generates it when an inductive type is introduced. Similar principles \texttt{nat\_rec} and \texttt{nat\_rect} for defining objects in the universes $\Set$ and $\Type$ are also automatically generated \footnote{In fact, whenever possible, {\coq} generates the principle \texttt{$I$\_rect}, then derives from it the weaker principles \texttt{$I$\_ind} and \texttt{$I$\_rec}. If some principle has to be defined by hand, the user may try to build \texttt{$I$\_rect} (if possible). Thanks to {\coq}'s conversion rule, this principle can be used directly to build proofs and/or programs.}. The command \texttt{Scheme} \refmancite{Section \ref{Scheme}} can be used to generate an elimination combinator from certain parameters, like the universe that the defined objects must inhabit, whether the case analysis in the definitions must be dependent or not, etc. For example, it can be used to generate an elimination combinator for reasoning on even natural numbers from the mutually dependent predicates introduced in page \pageref{Even}. We do not display the combinators here by lack of space, but you can see them using the \texttt{Print} command. \begin{alltt} Scheme Even_induction := Minimality for even Sort Prop with Odd_induction := Minimality for odd Sort Prop. \end{alltt} \begin{alltt} Theorem even_plus_four : {\prodsym} n:nat, even n {\arrow} even (4+n). Proof. intros n H. elim H using Even_induction with (P0 := fun n {\funarrow} odd (4+n)); simpl;repeat constructor;assumption. Qed. \end{alltt} Another example of an elimination combinator is the principle of double induction on natural numbers, introduced by the following definition: \begin{alltt} Section Principle_of_Double_Induction. Variable P : nat {\arrow} nat {\arrow}Prop. Hypothesis base_case1 : {\prodsym} m:nat, P 0 m. Hypothesis base_case2 : {\prodsym} n:nat, P (S n) 0. Hypothesis inductive_step : {\prodsym} n m:nat, P n m {\arrow} \,\, P (S n) (S m). Fixpoint nat_double_ind (n m:nat)\{struct n\} : P n m := match n, m return P n m with | 0 , x {\funarrow} base_case1 x | (S x), 0 {\funarrow} base_case2 x | (S x), (S y) {\funarrow} inductive_step x y (nat_double_ind x y) end. End Principle_of_Double_Induction. \end{alltt} Changing the type of $P$ into $\nat\rightarrow\nat\rightarrow\Type$, another combinator for constructing (certified) programs, \texttt{nat\_double\_rect}, can be defined in exactly the same way. This definition is left as an exercise.\label{natdoublerect} \iffalse \begin{alltt} Section Principle_of_Double_Recursion. Variable P : nat {\arrow} nat {\arrow} Type. Hypothesis base_case1 : {\prodsym} x:nat, P 0 x. Hypothesis base_case2 : {\prodsym} x:nat, P (S x) 0. Hypothesis inductive_step : {\prodsym} n m:nat, P n m {\arrow} P (S n) (S m). Fixpoint nat_double_rect (n m:nat)\{struct n\} : P n m := match n, m return P n m with 0 , x {\funarrow} base_case1 x | (S x), 0 {\funarrow} base_case2 x | (S x), (S y) {\funarrow} inductive_step x y (nat_double_rect x y) end. End Principle_of_Double_Recursion. \end{alltt} \fi For instance the function computing the minimum of two natural numbers can be defined in the following way: \begin{alltt} Definition min : nat {\arrow} nat {\arrow} nat := nat_double_rect (fun (x y:nat) {\funarrow} nat) (fun (x:nat) {\funarrow} 0) (fun (y:nat) {\funarrow} 0) (fun (x y r:nat) {\funarrow} S r). Eval compute in (min 5 8). \it = 5 : nat \end{alltt} %\begin{exercise} % %Define the combinator \texttt{nat\_double\_rec}, and apply it %to give another definition of \citecoq{le\_lt\_dec} (using the theorems %of the \texttt{Arith} library). %\end{exercise} \subsection{Using Elimination Combinators.} The tactic \texttt{apply} can be used to apply one of these proof principles during the development of a proof. \begin{alltt} Lemma not_circular : {\prodsym} n:nat, n {\coqdiff} S n. Proof. intro n. apply nat_ind with (P:= fun n {\funarrow} n {\coqdiff} S n). \it 2 subgoals n : nat ============================ 0 {\coqdiff} 1 subgoal 2 is: {\prodsym} n0 : nat, n0 {\coqdiff} S n0 {\arrow} S n0 {\coqdiff} S (S n0) \tt discriminate. red; intros n0 Hn0 eqn0Sn0;injection eqn0Sn0;trivial. Qed. \end{alltt} The tactic \texttt{elim} \refmancite{Section \ref{Elim}} is a refinement of \texttt{apply}, specially designed for the application of elimination combinators. If $t$ is an object of an inductive type $I$, then ``~\citecoq{elim $t$}~'' tries to find an abstraction $P$ of the current goal $G$ such that $(P\;t)\equiv G$. Then it solves the goal applying ``~$I\texttt{\_ind}\;P$~'', where $I$\texttt{\_ind} is the combinator associated to $I$. The different cases of the induction then appear as subgoals that remain to be solved. In the previous proof, the tactic call ``~\citecoq{apply nat\_ind with (P:= fun n {\funarrow} n {\coqdiff} S n)}~'' can simply be replaced with ``~\citecoq{elim n}~''. The option ``~\citecoq{\texttt{elim} $t$ \texttt{using} $C$}~'' allows to use a derived combinator $C$ instead of the default one. Consider the following theorem, stating that equality is decidable on natural numbers: \label{iseqpage} \begin{alltt} Lemma eq_nat_dec : {\prodsym} n p:nat, \{n=p\}+\{n {\coqdiff} p\}. Proof. intros n p. \end{alltt} Let us prove this theorem using the combinator \texttt{nat\_double\_rect} of section~\ref{natdoublerect}. The example also illustrates how \texttt{elim} may sometimes fail in finding a suitable abstraction $P$ of the goal. Note that if ``~\texttt{elim n}~'' is used directly on the goal, the result is not the expected one. \vspace{12pt} %\pagebreak \begin{alltt} elim n using nat_double_rect. \it 4 subgoals n : nat p : nat ============================ {\prodsym} x : nat, \{x = p\} + \{x {\coqdiff} p\} subgoal 2 is: nat {\arrow} \{0 = p\} + \{0 {\coqdiff} p\} subgoal 3 is: nat {\arrow} {\prodsym} m : nat, \{m = p\} + \{m {\coqdiff} p\} {\arrow} \{S m = p\} + \{S m {\coqdiff} p\} subgoal 4 is: nat \end{alltt} The four sub-goals obtained do not correspond to the premises that would be expected for the principle \texttt{nat\_double\_rec}. The problem comes from the fact that this principle for eliminating $n$ has a universally quantified formula as conclusion, which confuses \texttt{elim} about the right way of abstracting the goal. %In effect, let us consider the type of the goal before the call to %\citecoq{elim}: ``~\citecoq{\{n = p\} + \{n {\coqdiff} p\}}~''. %Among all the abstractions that can be built by ``~\citecoq{elim n}~'' %let us consider this one %$P=$\citecoq{fun n :nat {\funarrow} fun q : nat {\funarrow} {\{q= p\} + \{q {\coqdiff} p\}}}. %It is easy to verify that %$P$ has type \citecoq{nat {\arrow} nat {\arrow} Set}, and that, if some %$q:\citecoq{nat}$ is given, then $P\;q\;$ matches the current goal. %Then applying \citecoq{nat\_double\_rec} with $P$ generates %four goals, corresponding to Therefore, in this case the abstraction must be explicited using the \texttt{pattern} tactic. Once the right abstraction is provided, the rest of the proof is immediate: \begin{alltt} Undo. pattern p,n. \it n : nat p : nat ============================ (fun n0 n1 : nat {\funarrow} \{n1 = n0\} + \{n1 {\coqdiff} n0\}) p n \tt elim n using nat_double_rec. \it 3 subgoals n : nat p : nat ============================ {\prodsym} x : nat, \{x = 0\} + \{x {\coqdiff} 0\} subgoal 2 is: {\prodsym} x : nat, \{0 = S x\} + \{0 {\coqdiff} S x\} subgoal 3 is: {\prodsym} n0 m : nat, \{m = n0\} + \{m {\coqdiff} n0\} {\arrow} \{S m = S n0\} + \{S m {\coqdiff} S n0\} \tt destruct x; auto. destruct x; auto. intros n0 m H; case H. intro eq; rewrite eq ; auto. intro neg; right; red ; injection 1; auto. Defined. \end{alltt} Notice that the tactic ``~\texttt{decide equality}~'' \refmancite{Section\ref{DecideEquality}} generalises the proof above to a large class of inductive types. It can be used for proving a proposition of the form $\forall\,(x,y:R),\{x=y\}+\{x{\coqdiff}y\}$, where $R$ is an inductive datatype all whose constructors take informative arguments ---like for example the type {\nat}: \begin{alltt} Definition eq_nat_dec' : {\prodsym} n p:nat, \{n=p\} + \{n{\coqdiff}p\}. decide equality. Defined. \end{alltt} \begin{exercise} \begin{enumerate} \item Define a recursive function of name \emph{nat2itree} that maps any natural number $n$ into an infinitely branching tree of height $n$. \item Provide an elimination combinator for these trees. \item Prove that the relation \citecoq{itree\_le} is a preorder (i.e. reflexive and transitive). \end{enumerate} \end{exercise} \begin{exercise} \label{zeroton} Define the type of lists, and a predicate ``being an ordered list'' using an inductive family. Then, define the function $(from\;n)=0::1\;\ldots\; n::\texttt{nil}$ and prove that it always generates an ordered list. \end{exercise} \begin{exercise} Prove that \citecoq{le' n p} and \citecoq{n $\leq$ p} are logically equivalent for all n and p. (\citecoq{le'} is defined in section \ref{parameterstuff}). \end{exercise} \subsection{Well-founded Recursion} \label{WellFoundedRecursion} Structural induction is a strong elimination rule for inductive types. This method can be used to define any function whose termination is a consequence of the well-foundedness of a certain order relation $R$ decreasing at each recursive call. What makes this principle so strong is the possibility of reasoning by structural induction on the proof that certain $R$ is well-founded. In order to illustrate this we have first to introduce the predicate of accessibility. \begin{alltt} Print Acc. \it Inductive Acc (A : Type) (R : A {\arrow} A {\arrow} Prop) (x:A) : Prop := Acc_intro : ({\prodsym} y : A, R y x {\arrow} Acc R y) {\arrow} Acc R x For Acc: Argument A is implicit For Acc_intro: Arguments A, R are implicit \dots \end{alltt} \noindent This inductive predicate characterizes those elements $x$ of $A$ such that any descending $R$-chain $\ldots x_2\;R\;x_1\;R\;x$ starting from $x$ is finite. A well-founded relation is a relation such that all the elements of $A$ are accessible. \emph{Notice the use of parameter $x$ (see Section~\ref{parameterstuff}, page \pageref{parameterstuff}).} Consider now the problem of representing in {\coq} the following ML function $\textsl{div}(x,y)$ on natural numbers, which computes $\lceil\frac{x}{y}\rceil$ if $y>0$ and yields $x$ otherwise. \begin{verbatim} let rec div x y = if x = 0 then 0 else if y = 0 then x else (div (x-y) y)+1;; \end{verbatim} The equality test on natural numbers can be implemented using the function \textsl{eq\_nat\_dec} that is defined page \pageref{iseqpage}. Giving $x$ and $y$, this function yields either the value $(\textsl{left}\;p)$ if there exists a proof $p:x=y$, or the value $(\textsl{right}\;q)$ if there exists $q:a\not = b$. The subtraction function is already defined in the library \citecoq{Minus}. Hence, direct translation of the ML function \textsl{div} would be: \begin{alltt} Require Import Minus. Fixpoint div (x y:nat)\{struct x\}: nat := if eq_nat_dec x 0 then 0 else if eq_nat_dec y 0 then x else S (div (x-y) y). \it Error: Recursive definition of div is ill-formed. In environment div : nat {\arrow} nat {\arrow} nat x : nat y : nat _ : x {\coqdiff} 0 _ : y {\coqdiff} 0 Recursive call to div has principal argument equal to "x - y" instead of a subterm of x \end{alltt} The program \texttt{div} is rejected by {\coq} because it does not verify the syntactical condition to ensure termination. In particular, the argument of the recursive call is not a pattern variable issued from a case analysis on $x$. We would have the same problem if we had the directive ``~\citecoq{\{struct y\}}~'' instead of ``~\citecoq{\{struct x\}}~''. However, we know that this program always stops. One way to justify its termination is to define it by structural induction on a proof that $x$ is accessible trough the relation $<$. Notice that any natural number $x$ is accessible for this relation. In order to do this, it is first necessary to prove some auxiliary lemmas, justifying that the first argument of \texttt{div} decreases at each recursive call. \begin{alltt} Lemma minus_smaller_S : {\prodsym} x y:nat, x - y < S x. Proof. intros x y; pattern y, x; elim x using nat_double_ind. destruct x0; auto with arith. simpl; auto with arith. simpl; auto with arith. Qed. Lemma minus_smaller_positive : {\prodsym} x y:nat, x {\coqdiff}0 {\arrow} y {\coqdiff} 0 {\arrow} x - y < x. Proof. destruct x; destruct y; ( simpl;intros; apply minus_smaller || intros; absurd (0=0); auto). Qed. \end{alltt} \noindent The last two lemmas are necessary to prove that for any pair of positive natural numbers $x$ and $y$, if $x$ is accessible with respect to \citecoq{lt}, then so is $x-y$. \begin{alltt} Definition minus_decrease : {\prodsym} x y:nat, Acc lt x {\arrow} x {\coqdiff} 0 {\arrow} y {\coqdiff} 0 {\arrow} Acc lt (x-y). Proof. intros x y H; case H. intros Hz posz posy. apply Hz; apply minus_smaller_positive; assumption. Defined. \end{alltt} Let us take a look at the proof of the lemma \textsl{minus\_decrease}, since the way in which it has been proven is crucial for what follows. \begin{alltt} Print minus_decrease. \it minus_decrease = fun (x y : nat) (H : Acc lt x) {\funarrow} match H in (Acc _ y0) return (y0 {\coqdiff} 0 {\arrow} y {\coqdiff} 0 {\arrow} Acc lt (y0 - y)) with | Acc_intro z Hz {\funarrow} fun (posz : z {\coqdiff} 0) (posy : y {\coqdiff} 0) {\funarrow} Hz (z - y) (minus_smaller_positive z y posz posy) end : {\prodsym} x y : nat, Acc lt x {\arrow} x {\coqdiff} 0 {\arrow} y {\coqdiff} 0 {\arrow} Acc lt (x - y) \end{alltt} \noindent Notice that the function call $(\texttt{minus\_decrease}\;n\;m\;H)$ indeed yields an accessibility proof that is \textsl{structurally smaller} than its argument $H$, because it is (an application of) its recursive component $Hz$. This enables to justify the following definition of \textsl{div\_aux}: \begin{alltt} Definition div_aux (x y:nat)(H: Acc lt x):nat. fix 3. intros. refine (if eq_nat_dec x 0 then 0 else if eq_nat_dec y 0 then y else div_aux (x-y) y _). \it div_aux : {\prodsym} x : nat, nat {\arrow} Acc lt x {\arrow} nat x : nat y : nat H : Acc lt x _ : x {\coqdiff} 0 _0 : y {\coqdiff} 0 ============================ Acc lt (x - y) \tt apply (minus_decrease x y H);auto. Defined. \end{alltt} The main division function is easily defined, using the theorem \citecoq{lt\_wf} of the library \citecoq{Wf\_nat}. This theorem asserts that \citecoq{nat} is well founded w.r.t. \citecoq{lt}, thus any natural number is accessible. \begin{alltt} Definition div x y := div_aux x y (lt_wf x). \end{alltt} Let us explain the proof above. In the definition of \citecoq{div\_aux}, what decreases is not $x$ but the \textsl{proof} of the accessibility of $x$. The tactic ``~\texttt{fix 3}~'' is used to indicate that the proof proceeds by structural induction on the third argument of the theorem --that is, on the accessibility proof. It also introduces a new hypothesis in the context, named as the current theorem, and with the same type as the goal. Then, the proof is refined with an incomplete proof term, containing a hole \texttt{\_}. This hole corresponds to the proof of accessibility for $x-y$, and is filled up with the (smaller!) accessibility proof provided by the function \texttt{minus\_decrease}. \noindent Let us take a look to the term \textsl{div\_aux} defined: \pagebreak \begin{alltt} Print div_aux. \it div_aux = (fix div_aux (x y : nat) (H : Acc lt x) \{struct H\} : nat := match eq_nat_dec x 0 with | left _ {\funarrow} 0 | right _ {\funarrow} match eq_nat_dec y 0 with | left _ {\funarrow} y | right _0 {\funarrow} div_aux (x - y) y (minus_decrease x y H _ _0) end end) : {\prodsym} x : nat, nat {\arrow} Acc lt x {\arrow} nat \end{alltt} If the non-informative parts from this proof --that is, the accessibility proof-- are erased, then we obtain exactly the program that we were looking for. \begin{alltt} Extraction div. \it let div x y = div_aux x y \tt Extraction div_aux. \it let rec div_aux x y = match eq_nat_dec x O with | Left {\arrow} O | Right {\arrow} (match eq_nat_dec y O with | Left {\arrow} y | Right {\arrow} div_aux (minus x y) y) \end{alltt} This methodology enables the representation of any program whose termination can be proved in {\coq}. Once the expected properties from this program have been verified, the justification of its termination can be thrown away, keeping just the desired computational behavior for it. \section{A case study in dependent elimination}\label{CaseStudy} Dependent types are very expressive, but ignoring some useful techniques can cause some problems to the beginner. Let us consider again the type of vectors (see section~\ref{vectors}). We want to prove a quite trivial property: the only value of type ``~\citecoq{vector A 0}~'' is ``~\citecoq{Vnil $A$}~''. Our first naive attempt leads to a \emph{cul-de-sac}. \begin{alltt} Lemma vector0_is_vnil : {\prodsym} (A:Type)(v:vector A 0), v = Vnil A. Proof. intros A v;inversion v. \it 1 subgoal A : Set v : vector A 0 ============================ v = Vnil A \tt Abort. \end{alltt} Another attempt is to do a case analysis on a vector of any length $n$, under an explicit hypothesis $n=0$. The tactic \texttt{discriminate} will help us to get rid of the case $n=\texttt{S $p$}$. Unfortunately, even the statement of our lemma is refused! \begin{alltt} Lemma vector0_is_vnil_aux : {\prodsym} (A:Type)(n:nat)(v:vector A n), n = 0 {\arrow} v = Vnil A. \it Error: In environment A : Type n : nat v : vector A n e : n = 0 The term "Vnil A" has type "vector A 0" while it is expected to have type "vector A n" \end{alltt} In effect, the equality ``~\citecoq{v = Vnil A}~'' is ill-typed and this is because the type ``~\citecoq{vector A n}~'' is not \emph{convertible} with ``~\citecoq{vector A 0}~''. This problem can be solved if we consider the heterogeneous equality \citecoq{JMeq} \cite{conor:motive} which allows us to consider terms of different types, even if this equality can only be proven for terms in the same type. The axiom \citecoq{JMeq\_eq}, from the library \citecoq{JMeq} allows us to convert a heterogeneous equality to a standard one. \begin{alltt} Lemma vector0_is_vnil_aux : {\prodsym} (A:Type)(n:nat)(v:vector A n), n= 0 {\arrow} JMeq v (Vnil A). Proof. destruct v. auto. intro; discriminate. Qed. \end{alltt} Our property of vectors of null length can be easily proven: \begin{alltt} Lemma vector0_is_vnil : {\prodsym} (A:Type)(v:vector A 0), v = Vnil A. intros a v;apply JMeq_eq. apply vector0_is_vnil_aux. trivial. Qed. \end{alltt} It is interesting to look at another proof of \citecoq{vector0\_is\_vnil}, which illustrates a technique developed and used by various people (consult in the \emph{Coq-club} mailing list archive the contributions by Yves Bertot, Pierre Letouzey, Laurent Thry, Jean Duprat, and Nicolas Magaud, Venanzio Capretta and Conor McBride). This technique is also used for unfolding infinite list definitions (see chapter13 of~\cite{coqart}). Notice that this definition does not rely on any axiom (\emph{e.g.} \texttt{JMeq\_eq}). We first give a new definition of the identity on vectors. Before that, we make the use of constructors and selectors lighter thanks to the implicit arguments feature: \begin{alltt} Implicit Arguments Vcons [A n]. Implicit Arguments Vnil [A]. Implicit Arguments Vhead [A n]. Implicit Arguments Vtail [A n]. Definition Vid : {\prodsym} (A : Type)(n:nat), vector A n {\arrow} vector A n. Proof. destruct n; intro v. exact Vnil. exact (Vcons (Vhead v) (Vtail v)). Defined. \end{alltt} Then we prove that \citecoq{Vid} is the identity on vectors: \begin{alltt} Lemma Vid_eq : {\prodsym} (n:nat) (A:Type)(v:vector A n), v=(Vid _ n v). Proof. destruct v. \it A : Type ============================ Vnil = Vid A 0 Vnil subgoal 2 is: Vcons a v = Vid A (S n) (Vcons a v) \tt reflexivity. reflexivity. Defined. \end{alltt} Why defining a new identity function on vectors? The following dialogue shows that \citecoq{Vid} has some interesting computational properties: \begin{alltt} Eval simpl in (fun (A:Type)(v:vector A 0) {\funarrow} (Vid _ _ v)). \it = fun (A : Type) (_ : vector A 0) {\funarrow} Vnil : {\prodsym} A : Type, vector A 0 {\arrow} vector A 0 \end{alltt} Notice that the plain identity on vectors doesn't convert \citecoq{v} into \citecoq{Vnil}. \begin{alltt} Eval simpl in (fun (A:Type)(v:vector A 0) {\funarrow} v). \it = fun (A : Type) (v : vector A 0) {\funarrow} v : {\prodsym} A : Type, vector A 0 {\arrow} vector A 0 \end{alltt} Then we prove easily that any vector of length 0 is \citecoq{Vnil}: \begin{alltt} Theorem zero_nil : {\prodsym} A (v:vector A 0), v = Vnil. Proof. intros. change (Vnil (A:=A)) with (Vid _ 0 v). \it 1 subgoal A : Type v : vector A 0 ============================ v = Vid A 0 v \tt apply Vid_eq. Defined. \end{alltt} A similar result can be proven about vectors of strictly positive length\footnote{As for \citecoq{Vid} and \citecoq{Vid\_eq}, this definition is from Jean Duprat.}. \begin{alltt} Theorem decomp : {\prodsym} (A : Type) (n : nat) (v : vector A (S n)), v = Vcons (Vhead v) (Vtail v). Proof. intros. change (Vcons (Vhead v) (Vtail v)) with (Vid _ (S n) v). \it 1 subgoal A : Type n : nat v : vector A (S n) ============================ v = Vid A (S n) v \tt{} apply Vid_eq. Defined. \end{alltt} Both lemmas: \citecoq{zero\_nil} and \citecoq{decomp}, can be used to easily derive a double recursion principle on vectors of same length: \begin{alltt} Definition vector_double_rect : {\prodsym} (A:Type) (P: {\prodsym} (n:nat),(vector A n){\arrow}(vector A n) {\arrow} Type), P 0 Vnil Vnil {\arrow} ({\prodsym} n (v1 v2 : vector A n) a b, P n v1 v2 {\arrow} P (S n) (Vcons a v1) (Vcons b v2)) {\arrow} {\prodsym} n (v1 v2 : vector A n), P n v1 v2. induction n. intros; rewrite (zero_nil _ v1); rewrite (zero_nil _ v2). auto. intros v1 v2; rewrite (decomp _ _ v1);rewrite (decomp _ _ v2). apply X0; auto. Defined. \end{alltt} Notice that, due to the conversion rule of {\coq}'s type system, this function can be used directly with \citecoq{Prop} or \citecoq{Type} instead of type (thus it is useless to build \citecoq{vector\_double\_ind} and \citecoq{vector\_double\_rec}) from scratch. We finish this example with showing how to define the bitwise \emph{or} on boolean vectors of the same length, and proving a little property about this operation. \begin{alltt} Definition bitwise_or n v1 v2 : vector bool n := vector_double_rect bool (fun n v1 v2 {\funarrow} vector bool n) Vnil (fun n v1 v2 a b r {\funarrow} Vcons (orb a b) r) n v1 v2. \end{alltt} Let us define recursively the $n$-th element of a vector. Notice that it must be a partial function, in case $n$ is greater or equal than the length of the vector. Since {\coq} only considers total functions, the function returns a value in an \emph{option} type. \begin{alltt} Fixpoint vector_nth (A:Type)(n:nat)(p:nat)(v:vector A p) \{struct v\} : option A := match n,v with _ , Vnil {\funarrow} None | 0 , Vcons b _ _ {\funarrow} Some b | S n', Vcons _ p' v' {\funarrow} vector_nth A n' p' v' end. Implicit Arguments vector_nth [A p]. \end{alltt} We can now prove --- using the double induction combinator --- a simple property relying \citecoq{vector\_nth} and \citecoq{bitwise\_or}: \begin{alltt} Lemma nth_bitwise : {\prodsym} (n:nat) (v1 v2: vector bool n) i a b, vector_nth i v1 = Some a {\arrow} vector_nth i v2 = Some b {\arrow} vector_nth i (bitwise_or _ v1 v2) = Some (orb a b). Proof. intros n v1 v2; pattern n,v1,v2. apply vector_double_rect. simpl. destruct i; discriminate 1. destruct i; simpl;auto. injection 1; injection 2;intros; subst a; subst b; auto. Qed. \end{alltt} \section{Co-inductive Types and Non-ending Constructions} \label{CoInduction} The objects of an inductive type are well-founded with respect to the constructors of the type. In other words, these objects are built by applying \emph{a finite number of times} the constructors of the type. Co-inductive types are obtained by relaxing this condition, and may contain non-well-founded objects \cite{EG96,EG95a}. An example of a co-inductive type is the type of infinite sequences formed with elements of type $A$, also called streams. This type can be introduced through the following definition: \begin{alltt} CoInductive Stream (A: Type) :Type := | Cons : A\arrow{}Stream A\arrow{}Stream A. \end{alltt} If we are interested in finite or infinite sequences, we consider the type of \emph{lazy lists}: \begin{alltt} CoInductive LList (A: Type) : Type := | LNil : LList A | LCons : A {\arrow} LList A {\arrow} LList A. \end{alltt} It is also possible to define co-inductive types for the trees with infinitely-many branches (see Chapter 13 of~\cite{coqart}). Structural induction is the way of expressing that inductive types only contain well-founded objects. Hence, this elimination principle is not valid for co-inductive types, and the only elimination rule for streams is case analysis. This principle can be used, for example, to define the destructors \textsl{head} and \textsl{tail}. \begin{alltt} Definition head (A:Type)(s : Stream A) := match s with Cons a s' {\funarrow} a end. Definition tail (A : Type)(s : Stream A) := match s with Cons a s' {\funarrow} s' end. \end{alltt} Infinite objects are defined by means of (non-ending) methods of construction, like in lazy functional programming languages. Such methods can be defined using the \texttt{CoFixpoint} command \refmancite{Section \ref{CoFixpoint}}. For example, the following definition introduces the infinite list $[a,a,a,\ldots]$: \begin{alltt} CoFixpoint repeat (A:Type)(a:A) : Stream A := Cons a (repeat a). \end{alltt} However, not every co-recursive definition is an admissible method of construction. Similarly to the case of structural induction, the definition must verify a \textsl{guardedness} condition to be accepted. This condition states that any recursive call in the definition must be protected --i.e, be an argument of-- some constructor, and only an argument of constructors \cite{EG94a}. The following definitions are examples of valid methods of construction: \begin{alltt} CoFixpoint iterate (A: Type)(f: A {\arrow} A)(a : A) : Stream A:= Cons a (iterate f (f a)). CoFixpoint map (A B:Type)(f: A {\arrow} B)(s : Stream A) : Stream B:= match s with Cons a tl {\funarrow} Cons (f a) (map f tl) end. \end{alltt} \begin{exercise} Define two different methods for constructing the stream which infinitely alternates the values \citecoq{true} and \citecoq{false}. \end{exercise} \begin{exercise} Using the destructors \texttt{head} and \texttt{tail}, define a function which takes the n-th element of an infinite stream. \end{exercise} A non-ending method of construction is computed lazily. This means that its definition is unfolded only when the object that it introduces is eliminated, that is, when it appears as the argument of a case expression. We can check this using the command \texttt{Eval}. \begin{alltt} Eval simpl in (fun (A:Type)(a:A) {\funarrow} repeat a). \it = fun (A : Type) (a : A) {\funarrow} repeat a : {\prodsym} A : Type, A {\arrow} Stream A \tt Eval simpl in (fun (A:Type)(a:A) {\funarrow} head (repeat a)). \it = fun (A : Type) (a : A) {\funarrow} a : {\prodsym} A : Type, A {\arrow} A \end{alltt} %\begin{exercise} %Prove the following theorem: %\begin{verbatim} %Theorem expand_repeat : (a:A)(repeat a)=(Cons a (repeat a)). %\end{verbatim} %Hint: Prove first the streams version of the lemma in exercise %\ref{expand}. %\end{exercise} \subsection{Extensional Properties} Case analysis is also a valid proof principle for infinite objects. However, this principle is not sufficient to prove \textsl{extensional} properties, that is, properties concerning the whole infinite object \cite{EG95a}. A typical example of an extensional property is the predicate expressing that two streams have the same elements. In many cases, the minimal reflexive relation $a=b$ that is used as equality for inductive types is too small to capture equality between streams. Consider for example the streams $\texttt{iterate}\;f\;(f\;x)$ and $(\texttt{map}\;f\;(\texttt{iterate}\;f\;x))$. Even though these two streams have the same elements, no finite expansion of their definitions lead to equal terms. In other words, in order to deal with extensional properties, it is necessary to construct infinite proofs. The type of infinite proofs of equality can be introduced as a co-inductive predicate, as follows: \begin{alltt} CoInductive EqSt (A: Type) : Stream A {\arrow} Stream A {\arrow} Prop := eqst : {\prodsym} s1 s2: Stream A, head s1 = head s2 {\arrow} EqSt (tail s1) (tail s2) {\arrow} EqSt s1 s2. \end{alltt} It is possible to introduce proof principles for reasoning about infinite objects as combinators defined through \texttt{CoFixpoint}. However, oppositely to the case of inductive types, proof principles associated to co-inductive types are not elimination but \textsl{introduction} combinators. An example of such a combinator is Park's principle for proving the equality of two streams, usually called the \textsl{principle of co-induction}. It states that two streams are equal if they satisfy a \textit{bisimulation}. A bisimulation is a binary relation $R$ such that any pair of streams $s_1$ ad $s_2$ satisfying $R$ have equal heads, and tails also satisfying $R$. This principle is in fact a method for constructing an infinite proof: \begin{alltt} Section Parks_Principle. Variable A : Type. Variable R : Stream A {\arrow} Stream A {\arrow} Prop. Hypothesis bisim1 : {\prodsym} s1 s2:Stream A, R s1 s2 {\arrow} head s1 = head s2. Hypothesis bisim2 : {\prodsym} s1 s2:Stream A, R s1 s2 {\arrow} R (tail s1) (tail s2). CoFixpoint park_ppl : {\prodsym} s1 s2:Stream A, R s1 s2 {\arrow} EqSt s1 s2 := fun s1 s2 (p : R s1 s2) {\funarrow} eqst s1 s2 (bisim1 s1 s2 p) (park_ppl (tail s1) (tail s2) (bisim2 s1 s2 p)). End Parks_Principle. \end{alltt} Let us use the principle of co-induction to prove the extensional equality mentioned above. \begin{alltt} Theorem map_iterate : {\prodsym} (A:Type)(f:A{\arrow}A)(x:A), EqSt (iterate f (f x)) (map f (iterate f x)). Proof. intros A f x. apply park_ppl with (R:= fun s1 s2 {\funarrow} {\exsym} x: A, s1 = iterate f (f x) {\coqand} s2 = map f (iterate f x)). intros s1 s2 (x0,(eqs1,eqs2)); rewrite eqs1; rewrite eqs2; reflexivity. intros s1 s2 (x0,(eqs1,eqs2)). exists (f x0);split; [rewrite eqs1|rewrite eqs2]; reflexivity. exists x;split; reflexivity. Qed. \end{alltt} The use of Park's principle is sometimes annoying, because it requires to find an invariant relation and prove that it is indeed a bisimulation. In many cases, a shorter proof can be obtained trying to construct an ad-hoc infinite proof, defined by a guarded declaration. The tactic ``~``\texttt{Cofix $f$}~'' can be used to do that. Similarly to the tactic \texttt{fix} indicated in Section \ref{WellFoundedRecursion}, this tactic introduces an extra hypothesis $f$ into the context, whose type is the same as the current goal. Note that the applications of $f$ in the proof \textsl{must be guarded}. In order to prevent us from doing unguarded calls, we can define a tactic that always apply a constructor before using $f$ \refmancite{Chapter \ref{WritingTactics}} : \begin{alltt} Ltac infiniteproof f := cofix f; constructor; [clear f| simpl; try (apply f; clear f)]. \end{alltt} In the example above, this tactic produces a much simpler proof that the former one: \begin{alltt} Theorem map_iterate' : {\prodsym} ((A:Type)f:A{\arrow}A)(x:A), EqSt (iterate f (f x)) (map f (iterate f x)). Proof. infiniteproof map_iterate'. reflexivity. Qed. \end{alltt} \begin{exercise} Define a co-inductive type of name $Nat$ that contains non-standard natural numbers --this is, verifying $$\exists m \in \mbox{\texttt{Nat}}, \forall\, n \in \mbox{\texttt{Nat}}, n Infinite l A : Type a : A l : LList A H0 : ~ Finite (LCons a l) ============================ Infinite l \end{alltt} At this point, one must not apply \citecoq{H}! . It would be possible to solve the current goal by an inversion of ``~\citecoq{Finite (LCons a l)}~'', but, since the guard condition would be violated, the user would get an error message after typing \citecoq{Qed}. In order to satisfy the guard condition, we apply the constructor of \citecoq{Infinite}, \emph{then} apply \citecoq{H}. \begin{alltt} constructor. apply H. red; intro H1;case H0. constructor. trivial. Qed. \end{alltt} The reader is invited to replay this proof and understand each of its steps. \bibliographystyle{abbrv} \bibliography{manbiblio,morebib} \end{document} coq-8.4pl2/doc/RecTutorial/coqartmacros.tex0000640000175000001440000001432610406335323020056 0ustar notinusers\usepackage{url} \newcommand{\variantspringer}[1]{#1} \newcommand{\marginok}[1]{\marginpar{\raggedright OK:#1}} \newcommand{\tab}{{\null\hskip1cm}} \newcommand{\Ltac}{\mbox{\emph{$\cal L$}tac}} \newcommand{\coq}{\mbox{\emph{Coq}}} \newcommand{\lcf}{\mbox{\emph{LCF}}} \newcommand{\hol}{\mbox{\emph{HOL}}} \newcommand{\pvs}{\mbox{\emph{PVS}}} \newcommand{\isabelle}{\mbox{\emph{Isabelle}}} \newcommand{\prolog}{\mbox{\emph{Prolog}}} \newcommand{\goalbar}{\tt{}============================\it} \newcommand{\gallina}{\mbox{\emph{Gallina}}} \newcommand{\joker}{\texttt{\_}} \newcommand{\eprime}{\(\e^{\prime}\)} \newcommand{\Ztype}{\citecoq{Z}} \newcommand{\propsort}{\citecoq{Prop}} \newcommand{\setsort}{\citecoq{Set}} \newcommand{\typesort}{\citecoq{Type}} \newcommand{\ocaml}{\mbox{\emph{OCAML}}} \newcommand{\haskell}{\mbox{\emph{Haskell}}} \newcommand{\why}{\mbox{\emph{Why}}} \newcommand{\Pascal}{\mbox{\emph{Pascal}}} \newcommand{\ml}{\mbox{\emph{ML}}} \newcommand{\scheme}{\mbox{\emph{Scheme}}} \newcommand{\lisp}{\mbox{\emph{Lisp}}} \newcommand{\implarrow}{\mbox{$\Rightarrow$}} \newcommand{\metavar}[1]{?#1} \newcommand{\notincoq}[1]{#1} \newcommand{\coqscope}[1]{\%#1} \newcommand{\arrow}{\mbox{$\rightarrow$}} \newcommand{\fleche}{\arrow} \newcommand{\funarrow}{\mbox{$\Rightarrow$}} \newcommand{\ltacarrow}{\funarrow} \newcommand{\coqand}{\mbox{\(\wedge\)}} \newcommand{\coqor}{\mbox{\(\vee\)}} \newcommand{\coqnot}{\mbox{\(\neg\)}} \newcommand{\hide}[1]{} \newcommand{\hidedots}[1]{...} \newcommand{\sig}[3]{\texttt{\{}#1\texttt{:}#2 \texttt{|} #3\texttt{\}}} \renewcommand{\neg}{\sim} \renewcommand{\marginpar}[1]{} \addtocounter{secnumdepth}{1} \providecommand{\og}{} \providecommand{\fg}{} \newcommand{\hard}{\mbox{\small *}} \newcommand{\xhard}{\mbox{\small **}} \newcommand{\xxhard}{\mbox{\small ***}} %%% Operateurs, etc. \newcommand{\impl}{\mbox{$\rightarrow$}} \newcommand{\appli}[2]{\mbox{\tt{#1 #2}}} \newcommand{\applis}[1]{\mbox{\texttt{#1}}} \newcommand{\abst}[3]{\mbox{\tt{fun #1:#2 \funarrow #3}}} \newcommand{\coqle}{\mbox{$\leq$}} \newcommand{\coqge}{\mbox{$\geq$}} \newcommand{\coqdiff}{\mbox{$\neq$}} \newcommand{\coqiff}{\mbox{$\leftrightarrow$}} \newcommand{\prodsym}{\mbox{\(\forall\,\)}} \newcommand{\exsym}{\mbox{\(\exists\,\)}} \newcommand{\substsign}{/} \newcommand{\subst}[3]{\mbox{#1\{#2\substsign{}#3\}}} \newcommand{\anoabst}[2]{\mbox{\tt[#1]#2}} \newcommand{\letin}[3]{\mbox{\tt let #1:=#2 in #3}} \newcommand{\prodep}[3]{\mbox{\tt \(\forall\,\)#1:#2,$\,$#3}} \newcommand{\prodplus}[2]{\mbox{\tt\(\forall\,\)$\,$#1,$\,$#2}} \newcommand{\dom}[1]{\textrm{dom}(#1)} % domaine d'un contexte (log function) \newcommand{\norm}[1]{\textrm{n}(#1)} % forme normale (log function) \newcommand{\coqZ}[1]{\mbox{\tt{`#1`}}} \newcommand{\coqnat}[1]{\mbox{\tt{#1}}} \newcommand{\coqcart}[2]{\mbox{\tt{#1*#2}}} \newcommand{\alphacong}{\mbox{$\,\cong_{\alpha}\,$}} % alpha-congruence \newcommand{\betareduc}{\mbox{$\,\rightsquigarrow_{\!\beta}$}\,} % beta reduction %\newcommand{\betastar}{\mbox{$\,\Rightarrow_{\!\beta}^{*}\,$}} % beta reduction \newcommand{\deltareduc}{\mbox{$\,\rightsquigarrow_{\!\delta}$}\,} % delta reduction \newcommand{\dbreduc}{\mbox{$\,\rightsquigarrow_{\!\delta\beta}$}\,} % delta,beta reduction \newcommand{\ireduc}{\mbox{$\,\rightsquigarrow_{\!\iota}$}\,} % delta,beta reduction % jugement de typage \newcommand{\these}{\boldsymbol{\large \vdash}} \newcommand{\disj}{\mbox{$\backslash/$}} \newcommand{\conj}{\mbox{$/\backslash$}} %\newcommand{\juge}[3]{\mbox{$#1 \boldsymbol{\vdash} #2 : #3 $}} \newcommand{\juge}[4]{\mbox{$#1,#2 \these #3 \boldsymbol{:} #4 $}} \newcommand{\smalljuge}[3]{\mbox{$#1 \these #2 \boldsymbol{:} #3 $}} \newcommand{\goal}[3]{\mbox{$#1,#2 \these^{\!\!\!?} #3 $}} \newcommand{\sgoal}[2]{\mbox{$#1\these^{\!\!\!\!?} #2 $}} \newcommand{\reduc}[5]{\mbox{$#1,#2 \these #3 \rhd_{#4}#5 $}} \newcommand{\convert}[5]{\mbox{$#1,#2 \these #3 =_{#4}#5 $}} \newcommand{\convorder}[5]{\mbox{$#1,#2 \these #3\leq _{#4}#5 $}} \newcommand{\wouff}[2]{\mbox{$\emph{WF}(#1)[#2]$}} %\newcommand{\mthese}{\underset{M}{\vdash}} \newcommand{\mthese}{\boldsymbol{\vdash}_{\!\!M}} \newcommand{\type}{\boldsymbol{:}} % jugement absolu %\newcommand{\ajuge}[2]{\mbox{$ \boldsymbol{\vdash} #1 : #2 $}} \newcommand{\ajuge}[2]{\mbox{$\these #1 \boldsymbol{:} #2 $}} %%% logique minimale \newcommand{\propzero}{\mbox{$P_0$}} % types de Fzero %%% logique propositionnelle classique \newcommand {\ff}{\boldsymbol{f}} % faux \newcommand {\vv}{\boldsymbol{t}} % vrai \newcommand{\verite}{\mbox{$\cal{B}$}} % {\ff,\vv} \newcommand{\sequ}[2]{\mbox{$#1 \vdash #2 $}} % sequent \newcommand{\strip}[1]{#1^o} % enlever les variables d'un contexte %%% tactiques \newcommand{\decomp}{\delta} % decomposition \newcommand{\recomp}{\rho} % recomposition %%% divers \newcommand{\cqfd}{\mbox{\textbf{cqfd}}} \newcommand{\fail}{\mbox{\textbf{F}}} \newcommand{\succes}{\mbox{$\blacksquare$}} %%% Environnements %% Fzero \newcommand{\con}{\mbox{$\cal C$}} \newcommand{\var}{\mbox{$\cal V$}} \newcommand{\atomzero}{\mbox{${\cal A}_0$}} % types de base de Fzero \newcommand{\typezero}{\mbox{${\cal T}_0$}} % types de Fzero \newcommand{\termzero}{\mbox{$\Lambda_0$}} % termes de Fzero \newcommand{\conzero}{\mbox{$\cal C_0$}} % contextes de Fzero \newcommand{\buts}{\mbox{$\cal B$}} % buts %%% for drawing terms % abstraction [x:t]e \newcommand{\PicAbst}[3]{\begin{bundle}{\bf abst}\chunk{#1}\chunk{#2}\chunk{#3}% \end{bundle}} % the same in DeBruijn form \newcommand{\PicDbj}[2]{\begin{bundle}{\bf abst}\chunk{#1}\chunk{#2} \end{bundle}} % applications \newcommand{\PicAppl}[2]{\begin{bundle}{\bf appl}\chunk{#1}\chunk{#2}% \end{bundle}} % variables \newcommand{\PicVar}[1]{\begin{bundle}{\bf var}\chunk{#1} \end{bundle}} % constantes \newcommand{\PicCon}[1]{\begin{bundle}{\bf const}\chunk{#1}\end{bundle}} % arrows \newcommand{\PicImpl}[2]{\begin{bundle}{\impl}\chunk{#1}\chunk{#2}% \end{bundle}} %%%% scripts coq \newcommand{\prompt}{\mbox{\sl Coq $<\;$}} \newcommand{\natquicksort}{\texttt{nat\_quicksort}} \newcommand{\citecoq}[1]{\mbox{\texttt{#1}}} \newcommand{\safeit}{\it} \newtheorem{remarque}{Remark}[section] %\newtheorem{definition}{Definition}[chapter] coq-8.4pl2/doc/RecTutorial/recmacros.tex0000640000175000001440000000350310406335323017331 0ustar notinusers%=================================== % Style of the document %=================================== %\newtheorem{example}{Example}[section] %\newtheorem{exercise}{Exercise}[section] \newcommand{\comentario}[1]{\texttt{#1}} %=================================== % Keywords %=================================== \newcommand{\Prop}{\texttt{Prop}} \newcommand{\Set}{\texttt{Set}} \newcommand{\Type}{\texttt{Type}} \newcommand{\true}{\texttt{true}} \newcommand{\false}{\texttt{false}} \newcommand{\Lth}{\texttt{Lth}} \newcommand{\Nat}{\texttt{nat}} \newcommand{\nat}{\texttt{nat}} \newcommand{\Z} {\texttt{O}} \newcommand{\SUCC}{\texttt{S}} \newcommand{\pred}{\texttt{pred}} \newcommand{\False}{\texttt{False}} \newcommand{\True}{\texttt{True}} \newcommand{\I}{\texttt{I}} \newcommand{\natind}{\texttt{nat\_ind}} \newcommand{\natrec}{\texttt{nat\_rec}} \newcommand{\natrect}{\texttt{nat\_rect}} \newcommand{\eqT}{\texttt{eqT}} \newcommand{\identityT}{\texttt{identityT}} \newcommand{\map}{\texttt{map}} \newcommand{\iterates}{\texttt{iterates}} %=================================== % Numbering %=================================== \newtheorem{definition}{Definition}[section] \newtheorem{example}{Example}[section] %=================================== % Judgements %=================================== \newcommand{\JM}[2]{\ensuremath{#1 : #2}} %=================================== % Expressions %=================================== \newcommand{\Case}[3][]{\ensuremath{#1\textsf{Case}~#2~\textsf of}~#3~\textsf{end}} %======================================= \newcommand{\snreglados} [3] {\begin{tabular}{c} \ensuremath{#1} \\[2pt] \ensuremath{#2}\\ \hline \ensuremath{#3} \end{tabular}} \newcommand{\snregla} [2] {\begin{tabular}{c} \ensuremath{#1}\\ \hline \ensuremath{#2} \end{tabular}} %======================================= coq-8.4pl2/INSTALL0000640000175000001440000003074212037562607012676 0ustar notinusers INSTALLATION PROCEDURES FOR THE COQ V8.4 SYSTEM ----------------------------------------------- WHAT DO YOU NEED ? ================== Coq is designed to work on computers equipped with a POSIX (Unix or a clone) operating system. It also works under Microsoft Windows (see INSTALL.win); for a precompiled MacOS X package, see INSTALL.macosx. Coq is known to be actively used under GNU/Linux (i386, amd64 and ppc) and FreeBSD. Automated tests are run under many, many different architectures under GNU/Linux. Naturally, Coq will run faster on an architecture where OCaml can compile to native code, rather than only bytecode. At time of writing, that is IA32, PowerPC, AMD64, Alpha, Sparc, Mips, IA64, HPPA and StrongArm. See http://caml.inria.fr/ocaml/portability.en.html for details. Your OS may already contain Coq under the form of a precompiled package or ready-to-compile port. In this case, and if the supplied version suits you, follow the usual procedure for your OS to install it. E.g.: - Debian GNU/Linux (or Debian GNU/k*BSD or ...): aptitude install coq - Gentoo GNU/Linux: emerge sci-mathematics/coq - Mandriva GNU/Linux: urpmi coq Should you need or prefer to compile Coq V8.4 yourself, you need: - Objective Caml version 3.11.2 or later (available at http://caml.inria.fr/) - Camlp5 (version <= 4.08, or 5.* transitional) - GNU Make version 3.81 or later ( available at http://www.gnu.org/software/make/, but also a standard or optional add-on part to most Unices and Unix clones, sometimes under the name "gmake". If a new enough version is not included in your system, nor easily available as an add-on, this should get you a working make: #Download it (wget is an example, use your favourite FTP or HTTP client) wget http://ftp.gnu.org/pub/gnu/make/make-3.81.tar.bz2 bzip2 -cd make-3.81.tar.bz2 | tar x #If you don't have bzip2, you can download the gzipped version instead. cd make-3.81 ./configure --prefix=${HOME} make install Then, make sure that ${HOME}/bin is first in your $PATH. ) - a C compiler - for Coqide, the Lablgtk development files, and the GTK libraries, see INSTALL.ide for more details By FTP, Coq comes as a single compressed tar-file. You have probably already decompressed it if you are reading this document. QUICK INSTALLATION PROCEDURE. ============================= 1. ./configure 2. make world 3. make install (you may need superuser rights) 4. make clean INSTALLATION PROCEDURE IN DETAILS (NORMAL USERS). ================================================= 1- Check that you have the Objective Caml compiler version 3.11.2 (or later) installed on your computer and that "ocamlmktop" and "ocamlc" (or its native code version "ocamlc.opt") lie in a directory which is present in your $PATH environment variable. To get Coq in native-code, (it runs 4 to 10 times faster than bytecode, but it takes more time to get compiled and the binary is bigger), you will also need the "ocamlopt" (or its native code version "ocamlopt.opt") command. 2- Check that you have Camlp4 installed on your computer and that the command "camlp4" lies in a directory which is present in your $PATH environment variable path. (You need Camlp4 in both bytecode and native versions if your platform supports it). Note: in the latest ocaml distributions, camlp4 comes with ocaml so you do not have to check this point anymore. 3- The uncompression and un-tarring of the distribution file gave birth to a directory named "coq-8.xx". You can rename this directory and put it wherever you want. Just keep in mind that you will need some spare space during the compilation (reckon on about 50 Mb of disk space for the whole system in native-code compilation). Once installed, the binaries take about 14 Mb, and the library about 9 Mb. 4- First you need to configure the system. It is done automatically with the command: ./configure The "configure" script will ask you for directories where to put the Coq binaries, standard library, man pages, etc. It will propose you some default values. For a list of options accepted by the "configure" script, run "./configure -help". The main options accepted are: -prefix Binaries, library, man pages and Emacs mode will be respectively installed in /bin, /lib/coq, /man and /lib/emacs/site-lisp -bindir (default: /usr/local/bin) Directory where the binaries will be installed -libdir (default: /usr/local/lib/coq) Directory where the Coq standard library will be installed -mandir (default: /usr/local/man) Directory where the Coq manual pages will be installed -emacslib (default: /usr/local/lib/emacs/site-lisp) Directory where the Coq Emacs mode will be installed -arch (default is the result of the command "arch") An arbitrary architecture name for your machine (useful when compiling Coq on two different architectures for which the result of "arch" is the same, e.g. Sun OS and Solaris) -local Compile Coq to run in its source directory. The installation (step 6) is not necessary in that case. -opt Use the ocamlc.opt compiler instead of ocamlc (and ocamlopt.opt compiler instead of ocamlopt). Makes compilation faster (recommended). -browser Use to open an URL in a browser. %s must appear in , and will be replaced by the URL. 5- Still in the root directory, do make world to compile Coq in Objective Caml bytecode (and native-code if supported). This will compile the entire system. This phase can take more or less time, depending on your architecture and is fairly verbose. 6- You can now install the Coq system. Executables, libraries, manual pages and emacs mode are copied in some standard places of your system, defined at configuration time (step 3). Just do umask 022 make install Of course, you may need superuser rights to do that. To use the Coq emacs mode you also need to put the following lines in you .emacs file: (setq auto-mode-alist (cons '("\\.v$" . coq-mode) auto-mode-alist)) (autoload 'coq-mode "coq" "Major mode for editing Coq vernacular." t) 7- You can now clean all the sources. (You can even erase them.) make clean INSTALLATION PROCEDURE FOR ADVANCED USERS. ========================================== If you wish to write tactics (and that really means that you belong to advanced users!) you *must* keep the Coq sources, without cleaning them. Therefore, to avoid a duplication of binaries and library, it is not necessary to do the installation step (6- above). You just have to tell it at configuration step (4- above) with the option -local : ./configure -local Then compile the sources as described in step 5 above. The resulting binaries will reside in the subdirectory bin/. If you want to compile the sources for debugging (i.e. with the option -g of the Caml compiler) then add the -debug option at configuration step : ./configure -debug and then compile the sources (step 5). Then you must make a Coq toplevel including your own tactics, which must be compiled with -g, with coqmktop. See the chapter 16 of the Coq Reference Manual for details about how to use coqmktop and the Objective Caml debugger with Coq. THE AVAILABLE COMMANDS. ======================= There are two Coq commands: coqtop The Coq toplevel coqc The Coq compiler There are actually two binaries for the interactive system, coqtop.byte and coqtop.opt (respectively bytecode and native code versions of Coq). coqtop is a link to the fastest version, i.e. coqtop.opt if any, and coqtop.byte otherwise. coqc also invokes the fastest version of Coq. Options -opt and -byte to coqtop and coqc selects a particular binary. * `coqtop' launches Coq in the interactive mode. The default state (see the "-inputstate" option) is `initial.coq', which contains some basic logical definitions, the associated parsing and printing rules, and the following tactic modules: Equality, Tauto, Inv, EAuto and Refine. * `coqc' allows compilation of Coq files directly from the command line. To compile a file foo.v, do: coqc foo.v It will produce a file foo.vo, that you can now load through the Coq command "Require". A detailed description of these commands and of their options is given in the Reference Manual (which you can get by FTP, in the doc/ directory, or read online on http://coq.inria.fr/doc/) and in the corresponding manual pages. There is also a tutorial and a FAQ; see http://coq.inria.fr/doc1-eng.html COMMON PROBLEMS. ================ * On some sites, when running `./configure', `pwd' returned a path which is not valid from another machine (it may look like "/tmp_mnt/foo/...") and, as a consequence, you won't be able to run coqtop or coqc. The solution is to give the correct value, with ./configure -src * The `make install' procedure uses mkdirhier, a program that may not be present on certain systems. To fix that, try to replace mkdirhier with mkdir -p * See also section on dynamically loaded libraries. COMPILING FOR DIFFERENT ARCHITECTURES. ====================================== This section explains how to compile Coq for several architecture, sharing the same sources. The important fact is that some files are architecture dependent (.cmx, .o and executable files for instance) but others are not (.cmo and .vo). Consequently, you can : o save some time during compilation by not cleaning the architecture independent files; o save some space during installation by sharing the Coq standard library (which is fully architecture independent). So, in order to compile Coq for a new architecture, proceed as follows: * Omit step 7 above and clean only the architecture dependent files: it is done automatically with the command make archclean * Configure the system for the new architecture: ./configure You can specify the same directory for the standard library but you MUST specify a different directory for the binaries (of course). * Compile and install the system as described in steps 5 and 6 above. MOVING BINARIES OR LIBRARY. =========================== If you move the binaries or the library, Coq will be "lost". Running "coqtop" would then return an error message of the kind: Error during initialization : Error: Can't find file initial.coq on loadpath If you really have (or want) to move the binaries or the library, then you have to indicate their new places to Coq, using the options -bindir (for the binaries directory) and -libdir (for the standard library directory) : coqtop -bindir -libdir See also next section. DYNAMICALLY LOADED LIBRARIES FOR BYTECODE EXECUTABLES. ====================================================== Some bytecode executables of Coq use the OCaml runtime, which dynamically loads a shared library (.so or .dll). When it is not installed properly, you can get an error message of this kind: Fatal error: cannot load shared library dllcoqrun Reason: dllcoqrun.so: cannot open shared object file: No such file or directory In this case, you need either: - to set the CAML_LD_LIBRARY_PATH environment variable to point to the directory where dllcoqrun.so is; this is suitable when you want to run the command a limited number of times in a controlled environment (e.g. during compilation of binary packages); - install dllcoqrun.so in a location listed in the file ld.conf that is in the directory of the standard library of OCaml; - recompile your bytecode executables after reconfiguring the location of of the shared library: ./configure -coqrunbyteflags "-dllib -lcoqrun -dllpath " ... where is the directory where the dllcoqrun.so is installed; - (not recommended) compile bytecode executables with a custom OCaml runtime by using: ./configure -custom ... be aware that stripping executables generated this way, or performing other executable-specific operations, will make them useless. coq-8.4pl2/lib/0000750000175000001440000000000012127276534012405 5ustar notinuserscoq-8.4pl2/lib/xml_lexer.mli0000640000175000001440000000256111663475433015117 0ustar notinusers(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) type error = | EUnterminatedComment | EUnterminatedString | EIdentExpected | ECloseExpected | ENodeExpected | EAttributeNameExpected | EAttributeValueExpected | EUnterminatedEntity exception Error of error type token = | Tag of string * (string * string) list * bool | PCData of string | Endtag of string | Eof type pos = int * int * int * int val init : Lexing.lexbuf -> unit val close : Lexing.lexbuf -> unit val token : Lexing.lexbuf -> token val pos : Lexing.lexbuf -> pos val restore : pos -> unitcoq-8.4pl2/lib/predicate.mli0000640000175000001440000000534711366307247015062 0ustar notinusers (** Module [Pred]: sets over infinite ordered types with complement. *) (** This module implements the set data structure, given a total ordering function over the set elements. All operations over sets are purely applicative (no side-effects). The implementation uses the Set library. *) module type OrderedType = sig type t val compare: t -> t -> int end (** The input signature of the functor [Pred.Make]. [t] is the type of the set elements. [compare] is a total ordering function over the set elements. This is a two-argument function [f] such that [f e1 e2] is zero if the elements [e1] and [e2] are equal, [f e1 e2] is strictly negative if [e1] is smaller than [e2], and [f e1 e2] is strictly positive if [e1] is greater than [e2]. Example: a suitable ordering function is the generic structural comparison function [compare]. *) module type S = sig type elt (** The type of the set elements. *) type t (** The type of sets. *) val empty: t (** The empty set. *) val full: t (** The whole type. *) val is_empty: t -> bool (** Test whether a set is empty or not. *) val is_full: t -> bool (** Test whether a set contains the whole type or not. *) val mem: elt -> t -> bool (** [mem x s] tests whether [x] belongs to the set [s]. *) val singleton: elt -> t (** [singleton x] returns the one-element set containing only [x]. *) val add: elt -> t -> t (** [add x s] returns a set containing all elements of [s], plus [x]. If [x] was already in [s], [s] is returned unchanged. *) val remove: elt -> t -> t (** [remove x s] returns a set containing all elements of [s], except [x]. If [x] was not in [s], [s] is returned unchanged. *) val union: t -> t -> t val inter: t -> t -> t val diff: t -> t -> t val complement: t -> t (** Union, intersection, difference and set complement. *) val equal: t -> t -> bool (** [equal s1 s2] tests whether the sets [s1] and [s2] are equal, that is, contain equal elements. *) val subset: t -> t -> bool (** [subset s1 s2] tests whether the set [s1] is a subset of the set [s2]. *) val elements: t -> bool * elt list (** Gives a finite representation of the predicate: if the boolean is false, then the predicate is given in extension. if it is true, then the complement is given *) end module Make(Ord: OrderedType): (S with type elt = Ord.t) (** Functor building an implementation of the set structure given a totally ordered type. *) coq-8.4pl2/lib/fset.mli0000640000175000001440000000111711271301462014036 0ustar notinusersmodule Make : functor (X : Set.OrderedType) -> sig type elt = X.t type t val empty : t val is_empty : t -> bool val mem : elt -> t -> bool val add : elt -> t -> t val singleton : elt -> t val remove : elt -> t -> t val union : t -> t -> t val inter : t -> t -> t val diff : t -> t -> t val compare : t -> t -> int val equal : t -> t -> bool val subset : t -> t -> bool val iter : ( elt -> unit) -> t -> unit val fold : (elt -> 'b -> 'b) -> t -> 'b -> 'b val cardinal : t -> int val elements : t -> elt list val min_elt : t -> elt val max_elt : t -> elt val choose : t -> elt end coq-8.4pl2/lib/xml_lexer.mll0000640000175000001440000001376011770660731015121 0ustar notinusers{(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) open Lexing type error = | EUnterminatedComment | EUnterminatedString | EIdentExpected | ECloseExpected | ENodeExpected | EAttributeNameExpected | EAttributeValueExpected | EUnterminatedEntity exception Error of error type pos = int * int * int * int type token = | Tag of string * (string * string) list * bool | PCData of string | Endtag of string | Eof let last_pos = ref 0 and current_line = ref 0 and current_line_start = ref 0 let tmp = Buffer.create 200 let idents = Hashtbl.create 0 let _ = begin Hashtbl.add idents "gt;" ">"; Hashtbl.add idents "lt;" "<"; Hashtbl.add idents "amp;" "&"; Hashtbl.add idents "apos;" "'"; Hashtbl.add idents "quot;" "\""; end let init lexbuf = current_line := 1; current_line_start := lexeme_start lexbuf; last_pos := !current_line_start let close lexbuf = Buffer.reset tmp let pos lexbuf = !current_line , !current_line_start , !last_pos , lexeme_start lexbuf let restore (cl,cls,lp,_) = current_line := cl; current_line_start := cls; last_pos := lp let newline lexbuf = incr current_line; last_pos := lexeme_end lexbuf; current_line_start := !last_pos let error lexbuf e = last_pos := lexeme_start lexbuf; raise (Error e) } let newline = ['\n'] let break = ['\r'] let space = [' ' '\t'] let identchar = ['A'-'Z' 'a'-'z' '_' '0'-'9' ':' '-'] let entitychar = ['A'-'Z' 'a'-'z'] let pcchar = [^ '\r' '\n' '<' '>' '&'] rule token = parse | newline | (newline break) | break { newline lexbuf; PCData "\n" } | "" { () } | eof { raise (Error EUnterminatedComment) } | _ { comment lexbuf } and header = parse | newline | (newline break) | break { newline lexbuf; header lexbuf } | "?>" { () } | eof { error lexbuf ECloseExpected } | _ { header lexbuf } and pcdata = parse | newline | (newline break) | break { Buffer.add_char tmp '\n'; newline lexbuf; pcdata lexbuf } | pcchar+ { Buffer.add_string tmp (lexeme lexbuf); pcdata lexbuf } | "&#" { Buffer.add_string tmp (lexeme lexbuf); pcdata lexbuf; } | '&' { Buffer.add_string tmp (entity lexbuf); pcdata lexbuf } | "" { Buffer.contents tmp } and entity = parse | entitychar+ ';' { let ident = lexeme lexbuf in try Hashtbl.find idents (String.lowercase ident) with Not_found -> "&" ^ ident } | _ | eof { raise (Error EUnterminatedEntity) } and ident_name = parse | identchar+ { lexeme lexbuf } | _ | eof { error lexbuf EIdentExpected } and close_tag = parse | '>' { () } | _ | eof { error lexbuf ECloseExpected } and attributes = parse | '>' { [], false } | "/>" { [], true } | "" (* do not read a char ! *) { let key = attribute lexbuf in let data = attribute_data lexbuf in ignore_spaces lexbuf; let others, closed = attributes lexbuf in (key, data) :: others, closed } and attribute = parse | identchar+ { lexeme lexbuf } | _ | eof { error lexbuf EAttributeNameExpected } and attribute_data = parse | space* '=' space* '"' { Buffer.reset tmp; last_pos := lexeme_end lexbuf; dq_string lexbuf } | space* '=' space* '\'' { Buffer.reset tmp; last_pos := lexeme_end lexbuf; q_string lexbuf } | _ | eof { error lexbuf EAttributeValueExpected } and dq_string = parse | '"' { Buffer.contents tmp } | '\\' [ '"' '\\' ] { Buffer.add_char tmp (lexeme_char lexbuf 1); dq_string lexbuf } | eof { raise (Error EUnterminatedString) } | _ { Buffer.add_char tmp (lexeme_char lexbuf 0); dq_string lexbuf } and q_string = parse | '\'' { Buffer.contents tmp } | '\\' [ '\'' '\\' ] { Buffer.add_char tmp (lexeme_char lexbuf 1); q_string lexbuf } | eof { raise (Error EUnterminatedString) } | _ { Buffer.add_char tmp (lexeme_char lexbuf 0); q_string lexbuf } coq-8.4pl2/lib/errors.ml0000640000175000001440000000544412121620060014240 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bottom e | h::stk' -> try h e with | Unhandled -> print_gen bottom stk' e | any -> print_gen bottom stk' any (** Only anomalies should reach the bottom of the handler stack. In usual situation, the [handle_stack] is treated as it if was always non-empty with [print_anomaly] as its bottom handler. *) let where s = if !Flags.debug then str ("in "^s^":") ++ spc () else mt () let raw_anomaly e = match e with | Util.Anomaly (s,pps) -> where s ++ pps ++ str "." | Assert_failure _ | Match_failure _ -> str (Printexc.to_string e ^ ".") | _ -> str ("Uncaught exception " ^ Printexc.to_string e ^ ".") let print_anomaly askreport e = if askreport then hov 0 (str "Anomaly: " ++ raw_anomaly e ++ spc () ++ str "Please report.") else hov 0 (raw_anomaly e) (** The standard exception printer *) let print e = print_gen (print_anomaly true) !handle_stack e (** Same as [print], except that the "Please report" part of an anomaly isn't printed (used in Ltac debugging). *) let print_no_report e = print_gen (print_anomaly false) !handle_stack e (** Same as [print], except that anomalies are not printed but re-raised (used for the Fail command) *) let print_no_anomaly e = print_gen (fun e -> raise e) !handle_stack e (** Predefined handlers **) let _ = register_handler begin function | Util.UserError(s,pps) -> hov 0 (str "Error: " ++ where s ++ pps) | _ -> raise Unhandled end (** Critical exceptions shouldn't be catched and ignored by mistake by inner functions during a [vernacinterp]. They should be handled only at the very end of interp, to be displayed to the user. *) (** NB: in the 8.4 branch, for maximal compatibility, anomalies are considered non-critical *) let noncritical = function | Sys.Break | Out_of_memory | Stack_overflow -> false | _ -> true coq-8.4pl2/lib/explore.ml0000640000175000001440000000511312010532755014405 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* state list val success : state -> bool val pp : state -> std_ppcmds end module Make = functor(S : SearchProblem) -> struct type position = int list let msg_with_position p pp = let rec pp_rec = function | [] -> mt () | [i] -> int i | i :: l -> pp_rec l ++ str "." ++ int i in msg_debug (h 0 (pp_rec p) ++ pp) (*s Depth first search. *) let rec depth_first s = if S.success s then s else depth_first_many (S.branching s) and depth_first_many = function | [] -> raise Not_found | [s] -> depth_first s | s :: l -> try depth_first s with Not_found -> depth_first_many l let debug_depth_first s = let rec explore p s = msg_with_position p (S.pp s); if S.success s then s else explore_many 1 p (S.branching s) and explore_many i p = function | [] -> raise Not_found | [s] -> explore (i::p) s | s :: l -> try explore (i::p) s with Not_found -> explore_many (succ i) p l in explore [1] s (*s Breadth first search. We use functional FIFOS la Okasaki. *) type 'a queue = 'a list * 'a list exception Empty let empty = [],[] let push x (h,t) = (x::h,t) let pop = function | h, x::t -> x, (h,t) | h, [] -> match List.rev h with x::t -> x, ([],t) | [] -> raise Empty let breadth_first s = let rec explore q = let (s, q') = try pop q with Empty -> raise Not_found in enqueue q' (S.branching s) and enqueue q = function | [] -> explore q | s :: l -> if S.success s then s else enqueue (push s q) l in enqueue empty [s] let debug_breadth_first s = let rec explore q = let ((p,s), q') = try pop q with Empty -> raise Not_found in enqueue 1 p q' (S.branching s) and enqueue i p q = function | [] -> explore q | s :: l -> let ps = i::p in msg_with_position ps (S.pp s); if S.success s then s else enqueue (succ i) p (push (ps,s) q) l in enqueue 1 [] empty [s] end coq-8.4pl2/lib/hashcons.mli0000640000175000001440000000315612010532755014713 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t val equal : t -> t -> bool val hash : t -> int end module type S = sig type t type u val f : unit -> (u -> t -> t) end module Make(X:Comp) : (S with type t = X.t and type u = X.u) val simple_hcons : (unit -> 'u -> 't -> 't) -> ('u -> 't -> 't) val recursive_hcons : (unit -> ('t -> 't) * 'u -> 't -> 't) -> ('u -> 't -> 't) val recursive_loop_hcons : (unit -> ('t -> 't) * 'u -> 't -> 't) -> ('u -> 't -> 't) val recursive2_hcons : (unit -> ('t1 -> 't1) * ('t2 -> 't2) * 'u1 -> 't1 -> 't1) -> (unit -> ('t1 -> 't1) * ('t2 -> 't2) * 'u2 -> 't2 -> 't2) -> 'u1 -> 'u2 -> ('t1 -> 't1) * ('t2 -> 't2) (** Declaring and reinitializing global hash-consing functions *) val init : unit -> unit val register_hcons : ('u -> 't -> 't) -> ('u -> 't -> 't) module Hstring : (S with type t = string and type u = unit) module Hobj : (S with type t = Obj.t and type u = (Obj.t -> Obj.t) * unit) val string : string -> string val obj : Obj.t -> Obj.t val magic_hash : 'a -> 'a coq-8.4pl2/lib/pp_control.ml0000640000175000001440000000514012010532755015106 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* pp_global_params -> unit * set the parameters of a formatter *) let set_gp ft gp = Format.pp_set_margin ft gp.margin ; Format.pp_set_max_indent ft gp.max_indent ; Format.pp_set_max_boxes ft gp.max_depth ; Format.pp_set_ellipsis_text ft gp.ellipsis let set_dflt_gp ft = set_gp ft dflt_gp let get_gp ft = { margin = Format.pp_get_margin ft (); max_indent = Format.pp_get_max_indent ft (); max_depth = Format.pp_get_max_boxes ft (); ellipsis = Format.pp_get_ellipsis_text ft () } (* with_fp : 'a pp_formatter_params -> Format.formatter * returns of formatter for given formatter functions *) let with_fp chan out_function flush_function = let ft = Format.make_formatter out_function flush_function in Format.pp_set_formatter_out_channel ft chan; ft (* Output on a channel ch *) let with_output_to ch = let ft = with_fp ch (output ch) (fun () -> flush ch) in set_gp ft deep_gp; ft let std_ft = ref Format.std_formatter let _ = set_dflt_gp !std_ft let err_ft = ref Format.err_formatter let _ = set_gp !err_ft deep_gp let deep_ft = ref (with_output_to stdout) let _ = set_gp !deep_ft deep_gp (* For parametrization through vernacular *) let default = Format.pp_get_max_boxes !std_ft () let default_margin = Format.pp_get_margin !std_ft () let get_depth_boxes () = Some (Format.pp_get_max_boxes !std_ft ()) let set_depth_boxes v = Format.pp_set_max_boxes !std_ft (match v with None -> default | Some v -> v) let get_margin () = Some (Format.pp_get_margin !std_ft ()) let set_margin v = let v = match v with None -> default_margin | Some v -> v in Format.pp_set_margin !std_ft v; Format.pp_set_margin !deep_ft v coq-8.4pl2/lib/option.mli0000640000175000001440000001032312010532755014407 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool exception IsNone (** [get x] returns [y] where [x] is [Some y]. It raises IsNone if [x] equals [None]. *) val get : 'a option -> 'a (** [make x] returns [Some x]. *) val make : 'a -> 'a option (** [init b x] returns [Some x] if [b] is [true] and [None] otherwise. *) val init : bool -> 'a -> 'a option (** [flatten x] is [Some y] if [x] is [Some (Some y)] and [None] otherwise. *) val flatten : 'a option option -> 'a option (** {6 "Iterators"} ***) (** [iter f x] executes [f y] if [x] equals [Some y]. It does nothing otherwise. *) val iter : ('a -> unit) -> 'a option -> unit exception Heterogeneous (** [iter2 f x y] executes [f z w] if [x] equals [Some z] and [y] equals [Some w]. It does nothing if both [x] and [y] are [None]. And raises [Heterogeneous] otherwise. *) val iter2 : ('a -> 'b -> unit) -> 'a option -> 'b option -> unit (** [map f x] is [None] if [x] is [None] and [Some (f y)] if [x] is [Some y]. *) val map : ('a -> 'b) -> 'a option -> 'b option (** [smartmap f x] does the same as [map f x] except that it tries to share some memory. *) val smartmap : ('a -> 'a) -> 'a option -> 'a option (** [fold_left f a x] is [f a y] if [x] is [Some y], and [a] otherwise. *) val fold_left : ('b -> 'a -> 'b) -> 'b -> 'a option -> 'b (** [fold_left2 f a x y] is [f z w] if [x] is [Some z] and [y] is [Some w]. It is [a] if both [x] and [y] are [None]. Otherwise it raises [Heterogeneous]. *) val fold_left2 : ('a -> 'b -> 'c -> 'a) -> 'a -> 'b option -> 'c option -> 'a (** [fold_right f x a] is [f y a] if [x] is [Some y], and [a] otherwise. *) val fold_right : ('a -> 'b -> 'b) -> 'a option -> 'b -> 'b (** [fold_map f a x] is [a, f y] if [x] is [Some y], and [a] otherwise. *) val fold_map : ('a -> 'b -> 'a * 'c) -> 'a -> 'b option -> 'a * 'c option (** [cata e f x] is [e] if [x] is [None] and [f a] if [x] is [Some a] *) val cata : ('a -> 'b) -> 'b -> 'a option -> 'b (** {6 More Specific Operations} ***) (** [default a x] is [y] if [x] is [Some y] and [a] otherwise. *) val default : 'a -> 'a option -> 'a (** [lift] is the same as {!map}. *) val lift : ('a -> 'b) -> 'a option -> 'b option (** [lift_right f a x] is [Some (f a y)] if [x] is [Some y], and [None] otherwise. *) val lift_right : ('a -> 'b -> 'c) -> 'a -> 'b option -> 'c option (** [lift_left f x a] is [Some (f y a)] if [x] is [Some y], and [None] otherwise. *) val lift_left : ('a -> 'b -> 'c) -> 'a option -> 'b -> 'c option (** [lift2 f x y] is [Some (f z w)] if [x] equals [Some z] and [y] equals [Some w]. It is [None] otherwise. *) val lift2 : ('a -> 'b -> 'c) -> 'a option -> 'b option -> 'c option (** {6 Operations with Lists} *) module List : sig (** [List.cons x l] equals [y::l] if [x] is [Some y] and [l] otherwise. *) val cons : 'a option -> 'a list -> 'a list (** [List.flatten l] is the list of all the [y]s such that [l] contains [Some y] (in the same order). *) val flatten : 'a option list -> 'a list end (** {6 Miscelaneous Primitives} *) module Misc : sig (** [Misc.compare f x y] lifts the equality predicate [f] to option types. That is, if both [x] and [y] are [None] then it returns [true], if they are bothe [Some _] then [f] is called. Otherwise it returns [false]. *) val compare : ('a -> 'a -> bool) -> 'a option -> 'a option -> bool end coq-8.4pl2/lib/segmenttree.ml0000640000175000001440000001050111447122051015243 0ustar notinusers(** This module is a very simple implementation of "segment trees". A segment tree of type ['a t] represents a mapping from a union of disjoint segments to some values of type 'a. *) (** Misc. functions. *) let list_iteri f l = let rec loop i = function | [] -> () | x :: xs -> f i x; loop (i + 1) xs in loop 0 l let log2 x = log x /. log 2. let log2n x = int_of_float (ceil (log2 (float_of_int x))) (** We focus on integers but this module can be generalized. *) type elt = int (** A value of type [domain] is interpreted differently given its position in the tree. On internal nodes, a domain represents the set of integers which are _not_ in the set of keys handled by the tree. On leaves, a domain represents the st of integers which are in the set of keys. *) type domain = (** On internal nodes, a domain [Interval (a, b)] represents the interval [a + 1; b - 1]. On leaves, it represents [a; b]. We always have [a] <= [b]. *) | Interval of elt * elt (** On internal node or root, a domain [Universe] represents all the integers. When the tree is not a trivial root, [Universe] has no interpretation on leaves. (The lookup function should never reach the leaves.) *) | Universe (** We use an array to store the almost complete tree. This array contains at least one element. *) type 'a t = (domain * 'a option) array (** The root is the first item of the array. *) (** Standard layout for left child. *) let left_child i = 2 * i + 1 (** Standard layout for right child. *) let right_child i = 2 * i + 2 (** Extract the annotation of a node, be it internal or a leaf. *) let value_of i t = match t.(i) with (_, Some x) -> x | _ -> raise Not_found (** Initialize the array to store [n] leaves. *) let create n init = Array.make (1 lsl (log2n n + 1) - 1) init (** Make a complete interval tree from a list of disjoint segments. Precondition : the segments must be sorted. *) let make segments = let nsegments = List.length segments in let tree = create nsegments (Universe, None) in let leaves_offset = (1 lsl (log2n nsegments)) - 1 in (** The algorithm proceeds in two steps using an intermediate tree to store minimum and maximum of each subtree as annotation of the node. *) (** We start from leaves: the last level of the tree is initialized with the given segments... *) list_iteri (fun i ((start, stop), value) -> let k = leaves_offset + i in let i = Interval (start, stop) in tree.(k) <- (i, Some i)) segments; (** ... the remaining leaves are initialized with neutral information. *) for k = leaves_offset + nsegments to Array.length tree -1 do tree.(k) <- (Universe, Some Universe) done; (** We traverse the tree bottom-up and compute the interval and annotation associated to each node from the annotations of its children. *) for k = leaves_offset - 1 downto 0 do let node, annotation = match value_of (left_child k) tree, value_of (right_child k) tree with | Interval (left_min, left_max), Interval (right_min, right_max) -> (Interval (left_max, right_min), Interval (left_min, right_max)) | Interval (min, max), Universe -> (Interval (max, max), Interval (min, max)) | Universe, Universe -> Universe, Universe | Universe, _ -> assert false in tree.(k) <- (node, Some annotation) done; (** Finally, annotation are replaced with the image related to each leaf. *) let final_tree = Array.mapi (fun i (segment, value) -> (segment, None)) tree in list_iteri (fun i ((start, stop), value) -> final_tree.(leaves_offset + i) <- (Interval (start, stop), Some value)) segments; final_tree (** [lookup k t] looks for an image for key [k] in the interval tree [t]. Raise [Not_found] if it fails. *) let lookup k t = let i = ref 0 in while (snd t.(!i) = None) do match fst t.(!i) with | Interval (start, stop) -> if k <= start then i := left_child !i else if k >= stop then i:= right_child !i else raise Not_found | Universe -> raise Not_found done; match fst t.(!i) with | Interval (start, stop) -> if k >= start && k <= stop then match snd t.(!i) with | Some v -> v | None -> assert false else raise Not_found | Universe -> assert false coq-8.4pl2/lib/system.ml0000640000175000001440000002574212121620060014253 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* warning ("Environment variable "^var^" not found: using '"^def^"' ."); flush_all (); def let getenv_else s dft = try Sys.getenv s with Not_found -> dft (* On win32, the home directory is probably not in $HOME, but in some other environment variable *) let home = try Sys.getenv "HOME" with Not_found -> try (Sys.getenv "HOMEDRIVE")^(Sys.getenv "HOMEPATH") with Not_found -> try Sys.getenv "USERPROFILE" with Not_found -> warning ("Cannot determine user home directory, using '.' ."); flush_all (); Filename.current_dir_name let safe_getenv n = safe_getenv_def n ("$"^n) let rec expand_atom s i = let l = String.length s in if i let n = expand_atom s (i+1) in let v = safe_getenv (String.sub s (i+1) (n-i-1)) in let s = (String.sub s 0 i)^v^(String.sub s n (l-n)) in expand_macros s (i + String.length v) | '~' when i = 0 -> let n = expand_atom s (i+1) in let v = if n=i+1 then home else (getpwnam (String.sub s (i+1) (n-i-1))).pw_dir in let s = v^(String.sub s n (l-n)) in expand_macros s (String.length v) | c -> expand_macros s (i+1) let expand_path_macros s = expand_macros s 0 (* Files and load path. *) type physical_path = string type load_path = physical_path list let physical_path_of_string s = s let string_of_physical_path p = p (* * Split a path into a list of directories. A one-liner with Str, but Coq * doesn't seem to use this library at all, so here is a slighly longer version. *) let lpath_from_path path path_separator = let n = String.length path in let rec aux i l = if i < n then let j = try String.index_from path i path_separator with Not_found -> n in let dir = String.sub path i (j-i) in aux (j+1) (dir::l) else l in List.rev (aux 0 []) (* Hints to partially detects if two paths refer to the same repertory *) let rec remove_path_dot p = let curdir = Filename.concat Filename.current_dir_name "" in (* Unix: "./" *) let n = String.length curdir in let l = String.length p in if l > n && String.sub p 0 n = curdir then let n' = let sl = String.length Filename.dir_sep in let i = ref n in while !i <= l - sl && String.sub p !i sl = Filename.dir_sep do i := !i + sl done; !i in remove_path_dot (String.sub p n' (l - n')) else p let strip_path p = let cwd = Filename.concat (Sys.getcwd ()) "" in (* Unix: "`pwd`/" *) let n = String.length cwd in let l = String.length p in if l > n && String.sub p 0 n = cwd then let n' = let sl = String.length Filename.dir_sep in let i = ref n in while !i <= l - sl && String.sub p !i sl = Filename.dir_sep do i := !i + sl done; !i in remove_path_dot (String.sub p n' (l - n')) else remove_path_dot p let canonical_path_name p = let current = Sys.getcwd () in try Sys.chdir p; let p' = Sys.getcwd () in Sys.chdir current; p' with Sys_error _ -> (* We give up to find a canonical name and just simplify it... *) strip_path p (* All subdirectories, recursively *) let exists_dir dir = try let _ = closedir (opendir dir) in true with Unix_error _ -> false let skipped_dirnames = ref ["CVS"; "_darcs"] let exclude_search_in_dirname f = skipped_dirnames := f :: !skipped_dirnames let ok_dirname f = f <> "" && f.[0] <> '.' && not (List.mem f !skipped_dirnames) && try ignore (check_ident f); true with e when e <> Sys.Break -> false let all_subdirs ~unix_path:root = let l = ref [] in let add f rel = l := (f, rel) :: !l in let rec traverse dir rel = let dirh = opendir dir in try while true do let f = readdir dirh in if ok_dirname f then let file = Filename.concat dir f in try if (stat file).st_kind = S_DIR then begin let newrel = rel@[f] in add file newrel; traverse file newrel end with Unix_error (e,s1,s2) -> () done with End_of_file -> closedir dirh in if exists_dir root then traverse root []; List.rev !l let where_in_path ?(warn=true) path filename = let rec search = function | lpe :: rem -> let f = Filename.concat lpe filename in if Sys.file_exists f then (lpe,f) :: search rem else search rem | [] -> [] in let rec check_and_warn l = match l with | [] -> raise Not_found | (lpe, f) :: l' -> if warn & l' <> [] then msg_warning (str filename ++ str " has been found in" ++ spc () ++ hov 0 (str "[ " ++ hv 0 (prlist_with_sep (fun () -> str " " ++ pr_semicolon()) (fun (lpe,_) -> str lpe) l) ++ str " ];") ++ fnl () ++ str "loading " ++ str f); (lpe, f) in check_and_warn (search path) let find_file_in_path ?(warn=true) paths filename = if not (Filename.is_implicit filename) then if Sys.file_exists filename then let root = Filename.dirname filename in root, filename else errorlabstrm "System.find_file_in_path" (hov 0 (str "Can't find file" ++ spc () ++ str filename)) else try where_in_path ~warn paths filename with Not_found -> errorlabstrm "System.find_file_in_path" (hov 0 (str "Can't find file" ++ spc () ++ str filename ++ spc () ++ str "on loadpath")) let is_in_path lpath filename = try ignore (where_in_path ~warn:false lpath filename); true with Not_found -> false let path_separator = if Sys.os_type = "Unix" then ':' else ';' let is_in_system_path filename = let path = try Sys.getenv "PATH" with Not_found -> error "system variable PATH not found" in let lpath = lpath_from_path path path_separator in is_in_path lpath filename let make_suffix name suffix = if Filename.check_suffix name suffix then name else (name ^ suffix) let file_readable_p name = try access name [R_OK];true with Unix_error (_, _, _) -> false let open_trapping_failure name = try open_out_bin name with e when e <> Sys.Break -> error ("Can't open " ^ name) let try_remove filename = try Sys.remove filename with e when e <> Sys.Break -> msgnl (str"Warning: " ++ str"Could not remove file " ++ str filename ++ str" which is corrupted!" ) let marshal_out ch v = Marshal.to_channel ch v [] let marshal_in filename ch = try Marshal.from_channel ch with | End_of_file -> error "corrupted file: reached end of file" | Failure _ (* e.g. "truncated object" *) -> error (filename ^ " is corrupted, try to rebuild it.") exception Bad_magic_number of string let raw_extern_intern magic suffix = let extern_state name = let filename = make_suffix name suffix in let channel = open_trapping_failure filename in output_binary_int channel magic; filename,channel and intern_state filename = let channel = open_in_bin filename in if input_binary_int channel <> magic then raise (Bad_magic_number filename); channel in (extern_state,intern_state) let extern_intern ?(warn=true) magic suffix = let (raw_extern,raw_intern) = raw_extern_intern magic suffix in let extern_state name val_0 = try let (filename,channel) = raw_extern name in try marshal_out channel val_0; close_out channel with reraise -> begin try_remove filename; raise reraise end with Sys_error s -> error ("System error: " ^ s) and intern_state paths name = try let _,filename = find_file_in_path ~warn paths (make_suffix name suffix) in let channel = raw_intern filename in let v = marshal_in filename channel in close_in channel; v with Sys_error s -> error("System error: " ^ s) in (extern_state,intern_state) let with_magic_number_check f a = try f a with Bad_magic_number fname -> errorlabstrm "with_magic_number_check" (str"File " ++ str fname ++ strbrk" has bad magic number." ++ spc () ++ strbrk "It is corrupted or was compiled with another version of Coq.") (* Communication through files with another executable *) let connect writefun readfun com = let name = Filename.basename com in let tmp_to = Filename.temp_file ("coq-"^name^"-in") ".xml" in let tmp_from = Filename.temp_file ("coq-"^name^"-out") ".xml" in let ch_to_in,ch_to_out = try open_in tmp_to, open_out tmp_to with Sys_error s -> error ("Cannot set connection to "^com^"("^s^")") in let ch_from_in,ch_from_out = try open_in tmp_from, open_out tmp_from with Sys_error s -> close_out ch_to_out; close_in ch_to_in; error ("Cannot set connection from "^com^"("^s^")") in writefun ch_to_out; close_out ch_to_out; let pid = let ch_to' = Unix.descr_of_in_channel ch_to_in in let ch_from' = Unix.descr_of_out_channel ch_from_out in try Unix.create_process com [|com|] ch_to' ch_from' Unix.stdout with Unix.Unix_error (err,_,_) -> close_in ch_to_in; close_in ch_from_in; close_out ch_from_out; unlink tmp_from; unlink tmp_to; error ("Cannot execute "^com^"("^(Unix.error_message err)^")") in close_in ch_to_in; close_out ch_from_out; (match snd (Unix.waitpid [] pid) with | Unix.WEXITED 127 -> error (com^": cannot execute") | Unix.WEXITED 0 -> () | _ -> error (com^" exited abnormally")); let a = readfun ch_from_in in close_in ch_from_in; unlink tmp_from; unlink tmp_to; a let run_command converter f c = let result = Buffer.create 127 in let cin,cout,cerr = Unix.open_process_full c (Unix.environment ()) in let buff = String.make 127 ' ' in let buffe = String.make 127 ' ' in let n = ref 0 in let ne = ref 0 in while n:= input cin buff 0 127 ; ne := input cerr buffe 0 127 ; !n+ !ne <> 0 do let r = converter (String.sub buff 0 !n) in f r; Buffer.add_string result r; let r = converter (String.sub buffe 0 !ne) in f r; Buffer.add_string result r done; (Unix.close_process_full (cin,cout,cerr), Buffer.contents result) (* Time stamps. *) type time = float * float * float let get_time () = let t = times () in (time(), t.tms_utime, t.tms_stime) let time_difference (t1,_,_) (t2,_,_) = t2 -. t1 let fmt_time_difference (startreal,ustart,sstart) (stopreal,ustop,sstop) = real (stopreal -. startreal) ++ str " secs " ++ str "(" ++ real ((-.) ustop ustart) ++ str "u" ++ str "," ++ real ((-.) sstop sstart) ++ str "s" ++ str ")" coq-8.4pl2/lib/bigint.mli0000640000175000001440000000260512010532755014357 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bigint val to_string : bigint -> string val of_int : int -> bigint val to_int : bigint -> int (** May raise a Failure on oversized numbers *) val zero : bigint val one : bigint val two : bigint val div2_with_rest : bigint -> bigint * bool (** true=odd; false=even *) val add_1 : bigint -> bigint val sub_1 : bigint -> bigint val mult_2 : bigint -> bigint val add : bigint -> bigint -> bigint val sub : bigint -> bigint -> bigint val mult : bigint -> bigint -> bigint val euclid : bigint -> bigint -> bigint * bigint val less_than : bigint -> bigint -> bool val equal : bigint -> bigint -> bool val is_strictly_pos : bigint -> bool val is_strictly_neg : bigint -> bool val is_pos_or_zero : bigint -> bool val is_neg_or_zero : bigint -> bool val neg : bigint -> bigint val pow : bigint -> int -> bigint coq-8.4pl2/lib/xml_parser.mli0000640000175000001440000000667312050203174015262 0ustar notinusers(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) (** Xml Light Parser While basic parsing functions can be used in the {!Xml} module, this module is providing a way to create, configure and run an Xml parser. *) (** An Xml node is either [Element (tag-name, attributes, children)] or [PCData text] *) type xml = | Element of (string * (string * string) list * xml list) | PCData of string (** Abstract type for an Xml parser. *) type t (** {6:exc Xml Exceptions} *) (** Several exceptions can be raised when parsing an Xml document : {ul {li {!Xml.Error} is raised when an xml parsing error occurs. the {!Xml.error_msg} tells you which error occured during parsing and the {!Xml.error_pos} can be used to retreive the document location where the error occured at.} {li {!Xml.File_not_found} is raised when and error occured while opening a file with the {!Xml.parse_file} function.} } *) type error_pos type error_msg = | UnterminatedComment | UnterminatedString | UnterminatedEntity | IdentExpected | CloseExpected | NodeExpected | AttributeNameExpected | AttributeValueExpected | EndOfTagExpected of string | EOFExpected | Empty type error = error_msg * error_pos exception Error of error exception File_not_found of string (** Get a full error message from an Xml error. *) val error : error -> string (** Get the Xml error message as a string. *) val error_msg : error_msg -> string (** Get the line the error occured at. *) val line : error_pos -> int (** Get the relative character range (in current line) the error occured at.*) val range : error_pos -> int * int (** Get the absolute character range the error occured at. *) val abs_range : error_pos -> int * int val pos : Lexing.lexbuf -> error_pos (** Several kind of resources can contain Xml documents. *) type source = | SFile of string | SChannel of in_channel | SString of string | SLexbuf of Lexing.lexbuf (** This function returns a new parser with default options. *) val make : unit -> t (** When a Xml document is parsed, the parser will check that the end of the document is reached, so for example parsing [""] will fail instead of returning only the A element. You can turn off this check by setting [check_eof] to [false] {i (by default, check_eof is true)}. *) val check_eof : t -> bool -> unit (** Once the parser is configurated, you can run the parser on a any kind of xml document source to parse its contents into an Xml data structure. *) val parse : t -> source -> xml coq-8.4pl2/lib/tries.ml0000640000175000001440000000455112010532755014062 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* functor (Y : Map.OrderedType) -> struct module T_dom = Fset.Make(X) module T_codom = Fmap.Make(Y) type t = Node of T_dom.t * t T_codom.t let codom_to_list m = T_codom.fold (fun x y l -> (x,y)::l) m [] let codom_rng m = T_codom.fold (fun _ y acc -> y::acc) m [] let codom_dom m = T_codom.fold (fun x _ acc -> x::acc) m [] let empty = Node (T_dom.empty, T_codom.empty) let map (Node (_,m)) lbl = T_codom.find lbl m let xtract (Node (hereset,_)) = T_dom.elements hereset let dom (Node (_,m)) = codom_dom m let in_dom (Node (_,m)) lbl = T_codom.mem lbl m let is_empty_node (Node(a,b)) = (T_dom.elements a = []) & (codom_to_list b = []) let assure_arc m lbl = if T_codom.mem lbl m then m else T_codom.add lbl (Node (T_dom.empty,T_codom.empty)) m let cleanse_arcs (Node (hereset,m)) = let l = codom_rng m in Node(hereset, if List.for_all is_empty_node l then T_codom.empty else m) let rec at_path f (Node (hereset,m)) = function | [] -> cleanse_arcs (Node(f hereset,m)) | h::t -> let m = assure_arc m h in cleanse_arcs (Node(hereset, T_codom.add h (at_path f (T_codom.find h m) t) m)) let add tm (path,v) = at_path (fun hereset -> T_dom.add v hereset) tm path let rmv tm (path,v) = at_path (fun hereset -> T_dom.remove v hereset) tm path let app f tlm = let rec apprec pfx (Node(hereset,m)) = let path = List.rev pfx in T_dom.iter (fun v -> f(path,v)) hereset; T_codom.iter (fun l tm -> apprec (l::pfx) tm) m in apprec [] tlm let to_list tlm = let rec torec pfx (Node(hereset,m)) = let path = List.rev pfx in List.flatten((List.map (fun v -> (path,v)) (T_dom.elements hereset)):: (List.map (fun (l,tm) -> torec (l::pfx) tm) (codom_to_list m))) in torec [] tlm end coq-8.4pl2/lib/pp.mli0000640000175000001440000000732112010532755013522 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val make_pp_nonemacs:unit -> unit (** Pretty-printers. *) type ppcmd type std_ppcmds = ppcmd Stream.t (** {6 Formatting commands. } *) val str : string -> std_ppcmds val stras : int * string -> std_ppcmds val brk : int * int -> std_ppcmds val tbrk : int * int -> std_ppcmds val tab : unit -> std_ppcmds val fnl : unit -> std_ppcmds val pifb : unit -> std_ppcmds val ws : int -> std_ppcmds val mt : unit -> std_ppcmds val ismt : std_ppcmds -> bool val comment : int -> std_ppcmds val comments : ((int * int) * string) list ref (** {6 Concatenation. } *) val (++) : std_ppcmds -> std_ppcmds -> std_ppcmds (** {6 Evaluation. } *) val eval_ppcmds : std_ppcmds -> std_ppcmds (** {6 Derived commands. } *) val spc : unit -> std_ppcmds val cut : unit -> std_ppcmds val align : unit -> std_ppcmds val int : int -> std_ppcmds val real : float -> std_ppcmds val bool : bool -> std_ppcmds val qstring : string -> std_ppcmds val qs : string -> std_ppcmds val quote : std_ppcmds -> std_ppcmds val strbrk : string -> std_ppcmds val xmlescape : ppcmd -> ppcmd (** {6 Boxing commands. } *) val h : int -> std_ppcmds -> std_ppcmds val v : int -> std_ppcmds -> std_ppcmds val hv : int -> std_ppcmds -> std_ppcmds val hov : int -> std_ppcmds -> std_ppcmds val t : std_ppcmds -> std_ppcmds (** {6 Opening and closing of boxes. } *) val hb : int -> std_ppcmds val vb : int -> std_ppcmds val hvb : int -> std_ppcmds val hovb : int -> std_ppcmds val tb : unit -> std_ppcmds val close : unit -> std_ppcmds val tclose : unit -> std_ppcmds (** {6 Pretty-printing functions {% \emph{%}without flush{% }%}. } *) val pp_with : Format.formatter -> std_ppcmds -> unit val ppnl_with : Format.formatter -> std_ppcmds -> unit val warning_with : Format.formatter -> string -> unit val warn_with : Format.formatter -> std_ppcmds -> unit val pp_flush_with : Format.formatter -> unit -> unit val set_warning_function : (Format.formatter -> std_ppcmds -> unit) -> unit (** {6 Pretty-printing functions {% \emph{%}with flush{% }%}. } *) val msg_with : Format.formatter -> std_ppcmds -> unit val msgnl_with : Format.formatter -> std_ppcmds -> unit (** {6 ... } *) (** The following functions are instances of the previous ones on [std_ft] and [err_ft]. *) (** {6 Pretty-printing functions {% \emph{%}without flush{% }%} on [stdout] and [stderr]. } *) val pp : std_ppcmds -> unit val ppnl : std_ppcmds -> unit val pperr : std_ppcmds -> unit val pperrnl : std_ppcmds -> unit val message : string -> unit (** = pPNL *) val warning : string -> unit val warn : std_ppcmds -> unit val pp_flush : unit -> unit val flush_all: unit -> unit (** {6 Pretty-printing functions {% \emph{%}with flush{% }%} on [stdout] and [stderr]. } *) val msg : std_ppcmds -> unit val msgnl : std_ppcmds -> unit val msgerr : std_ppcmds -> unit val msgerrnl : std_ppcmds -> unit val msg_warning : std_ppcmds -> unit (** Same specific display in emacs as warning, but without the "Warning:" **) val msg_debug : std_ppcmds -> unit val string_of_ppcmds : std_ppcmds -> string coq-8.4pl2/lib/pp_control.mli0000640000175000001440000000242412010532755015261 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* pp_global_params -> unit val set_dflt_gp : Format.formatter -> unit val get_gp : Format.formatter -> pp_global_params (** {6 Output functions of pretty-printing. } *) val with_output_to : out_channel -> Format.formatter val std_ft : Format.formatter ref val err_ft : Format.formatter ref val deep_ft : Format.formatter ref (** {6 For parametrization through vernacular. } *) val set_depth_boxes : int option -> unit val get_depth_boxes : unit -> int option val set_margin : int option -> unit val get_margin : unit -> int option coq-8.4pl2/lib/xml_utils.mli0000640000175000001440000000664411663475433015146 0ustar notinusers(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) (** Xml Light Xml Light is a minimal Xml parser & printer for OCaml. It provide few functions to parse a basic Xml document into an OCaml data structure and to print back the data structures to an Xml document. Xml Light has also support for {b DTD} (Document Type Definition). {i (c)Copyright 2002-2003 Nicolas Cannasse} *) open Xml_parser (** {6 Xml Functions} *) exception Not_element of xml exception Not_pcdata of xml exception No_attribute of string (** [tag xdata] returns the tag value of the xml node. Raise {!Xml.Not_element} if the xml is not an element *) val tag : xml -> string (** [pcdata xdata] returns the PCData value of the xml node. Raise {!Xml.Not_pcdata} if the xml is not a PCData *) val pcdata : xml -> string (** [attribs xdata] returns the attribute list of the xml node. First string if the attribute name, second string is attribute value. Raise {!Xml.Not_element} if the xml is not an element *) val attribs : xml -> (string * string) list (** [attrib xdata "href"] returns the value of the ["href"] attribute of the xml node (attribute matching is case-insensitive). Raise {!Xml.No_attribute} if the attribute does not exists in the node's attribute list Raise {!Xml.Not_element} if the xml is not an element *) val attrib : xml -> string -> string (** [children xdata] returns the children list of the xml node Raise {!Xml.Not_element} if the xml is not an element *) val children : xml -> xml list (*** [enum xdata] returns the children enumeration of the xml node Raise {!Xml.Not_element} if the xml is not an element *) (* val enum : xml -> xml Enum.t *) (** [iter f xdata] calls f on all children of the xml node. Raise {!Xml.Not_element} if the xml is not an element *) val iter : (xml -> unit) -> xml -> unit (** [map f xdata] is equivalent to [List.map f (Xml.children xdata)] Raise {!Xml.Not_element} if the xml is not an element *) val map : (xml -> 'a) -> xml -> 'a list (** [fold f init xdata] is equivalent to [List.fold_left f init (Xml.children xdata)] Raise {!Xml.Not_element} if the xml is not an element *) val fold : ('a -> xml -> 'a) -> 'a -> xml -> 'a (** {6 Xml Printing} *) (** Print the xml data structure to a channel into a compact xml string (without any user-readable formating ). *) val print_xml : out_channel -> xml -> unit (** Print the xml data structure into a compact xml string (without any user-readable formating ). *) val to_string : xml -> string (** Print the xml data structure into an user-readable string with tabs and lines break between different nodes. *) val to_string_fmt : xml -> string coq-8.4pl2/lib/unionfind.ml0000640000175000001440000000612212010532755014721 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t (** Add (in place) an element in the partition, or do nothing if the element is already in the partition. *) val add : elt -> t -> unit (** Find the canonical representative of an element. Raise [not_found] if the element isn't known yet. *) val find : elt -> t -> elt (** Merge (in place) the equivalence classes of two elements. This will add the elements in the partition if necessary. *) val union : elt -> elt -> t -> unit (** Merge (in place) the equivalence classes of many elements. *) val union_set : set -> t -> unit (** Listing the different components of the partition *) val partition : t -> set list end module Make (S:Set.S)(M:Map.S with type key = S.elt) = struct type elt = S.elt type set = S.t type node = | Canon of set | Equiv of elt type t = node ref M.t ref let create () = ref (M.empty : node ref M.t) let fresh x p = let node = ref (Canon (S.singleton x)) in p := M.add x node !p; x, node let rec lookup x p = let node = M.find x !p in match !node with | Canon _ -> x, node | Equiv y -> let ((z,_) as res) = lookup y p in if not (z == y) then node := Equiv z; res let add x p = if not (M.mem x !p) then ignore (fresh x p) let find x p = fst (lookup x p) let canonical x p = try lookup x p with Not_found -> fresh x p let union x y p = let ((x,_) as xcan) = canonical x p in let ((y,_) as ycan) = canonical y p in if x = y then () else let xcan, ycan = if x < y then xcan, ycan else ycan, xcan in let x,xnode = xcan and y,ynode = ycan in match !xnode, !ynode with | Canon lx, Canon ly -> xnode := Canon (S.union lx ly); ynode := Equiv x; | _ -> assert false let union_set s p = try let x = S.min_elt s in S.iter (fun y -> union x y p) s with Not_found -> () let partition p = List.rev (M.fold (fun x node acc -> match !node with | Equiv _ -> acc | Canon lx -> lx::acc) !p []) end coq-8.4pl2/lib/dnet.ml0000640000175000001440000002311712010532755013665 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'b) -> 'a t -> 'b t val map2 : ('a -> 'b -> 'c) -> 'a t -> 'b t -> 'c t val fold : ('a -> 'b -> 'a) -> 'a -> 'b t -> 'a val fold2 : ('a -> 'b -> 'c -> 'a) -> 'a -> 'b t -> 'c t -> 'a val compare : unit t -> unit t -> int val terminal : 'a t -> bool val choose : ('a -> 'b) -> 'a t -> 'b end module type S = sig type t type ident type meta type 'a structure module Idset : Set.S with type elt=ident type 'a pattern = | Term of 'a | Meta of meta type term_pattern = ('a structure) pattern as 'a val empty : t val add : t -> term_pattern -> ident -> t val find_all : t -> Idset.t val fold_pattern : ('a -> (Idset.t * meta * t) -> 'a) -> 'a -> term_pattern -> t -> Idset.t option * 'a val find_match : term_pattern -> t -> Idset.t val inter : t -> t -> t val union : t -> t -> t val map : (ident -> ident) -> (unit structure -> unit structure) -> t -> t end module Make = functor (T:Datatype) -> functor (Ident:Set.OrderedType) -> functor (Meta:Set.OrderedType) -> struct type ident = Ident.t type meta = Meta.t type 'a pattern = | Term of 'a | Meta of meta type 'a structure = 'a T.t module Idset = Set.Make(Ident) module Mmap = Map.Make(Meta) module Tmap = Map.Make(struct type t = unit structure let compare = T.compare end) type term_pattern = term_pattern structure pattern type idset = Idset.t (* we store identifiers at the leaf of the dnet *) type node = | Node of t structure | Terminal of t structure * idset (* at each node, we have a bunch of nodes (actually a map between the bare node and a subnet) and a bunch of metavariables *) and t = Nodes of node Tmap.t * idset Mmap.t let empty : t = Nodes (Tmap.empty, Mmap.empty) (* the head of a data is of type unit structure *) let head w = T.map (fun c -> ()) w (* given a node of the net and a word, returns the subnet with the same head as the word (with the rest of the nodes) *) let split l (w:'a structure) : node * node Tmap.t = let elt : node = Tmap.find (head w) l in (elt, Tmap.remove (head w) l) let select l w = Tmap.find (head w) l let rec add (Nodes (t,m):t) (w:term_pattern) (id:ident) : t = match w with Term w -> ( try let (n,tl) = split t w in let new_node = match n with | Terminal (e,is) -> Terminal (e,Idset.add id is) | Node e -> Node (T.map2 (fun t p -> add t p id) e w) in Nodes ((Tmap.add (head w) new_node tl), m) with Not_found -> let new_content = T.map (fun p -> add empty p id) w in let new_node = if T.terminal w then Terminal (new_content, Idset.singleton id) else Node new_content in Nodes ((Tmap.add (head w) new_node t), m) ) | Meta i -> let m = try Mmap.add i (Idset.add id (Mmap.find i m)) m with Not_found -> Mmap.add i (Idset.singleton id) m in Nodes (t, m) let add t w id = add t w id let rec find_all (Nodes (t,m)) : idset = Idset.union (Mmap.fold (fun _ -> Idset.union) m Idset.empty) (Tmap.fold ( fun _ n acc -> let s2 = match n with | Terminal (_,is) -> is | Node e -> T.choose find_all e in Idset.union acc s2 ) t Idset.empty) (* (\* optimization hack: Not_found is catched in fold_pattern *\) *) (* let fast_inter s1 s2 = *) (* if Idset.is_empty s1 || Idset.is_empty s2 then raise Not_found *) (* else Idset.inter s1 s2 *) (* let option_any2 f s1 s2 = match s1,s2 with *) (* | Some s1, Some s2 -> f s1 s2 *) (* | (Some s, _ | _, Some s) -> s *) (* | _ -> raise Not_found *) (* let fold_pattern ?(complete=true) f acc pat dn = *) (* let deferred = ref [] in *) (* let leafs,metas = ref None, ref None in *) (* let leaf s = leafs := match !leafs with *) (* | None -> Some s *) (* | Some s' -> Some (fast_inter s s') in *) (* let meta s = metas := match !metas with *) (* | None -> Some s *) (* | Some s' -> Some (Idset.union s s') in *) (* let defer c = deferred := c::!deferred in *) (* let rec fp_rec (p:term_pattern) (Nodes(t,m) as dn:t) = *) (* Mmap.iter (fun _ -> meta) m; (\* TODO: gérer patterns nonlin ici *\) *) (* match p with *) (* | Meta m -> defer (m,dn) *) (* | Term w -> *) (* try match select t w with *) (* | Terminal (_,is) -> leaf is *) (* | Node e -> *) (* if complete then T.fold2 (fun _ -> fp_rec) () w e else *) (* if T.fold2 *) (* (fun b p dn -> match p with *) (* | Term _ -> fp_rec p dn; false *) (* | Meta _ -> b *) (* ) true w e *) (* then T.choose (T.choose fp_rec w) e *) (* with Not_found -> *) (* if Mmap.is_empty m then raise Not_found else () *) (* in try *) (* fp_rec pat dn; *) (* (try Some (option_any2 Idset.union !leafs !metas) with Not_found -> None), *) (* List.fold_left (fun acc (m,dn) -> f m dn acc) acc !deferred *) (* with Not_found -> None,acc *) (* Sets with a neutral element for inter *) module OSet (S:Set.S) = struct type t = S.t option let union s1 s2 = match s1,s2 with | (None, _ | _, None) -> None | Some a, Some b -> Some (S.union a b) let inter s1 s2 = match s1,s2 with | (None, a | a, None) -> a | Some a, Some b -> Some (S.inter a b) let is_empty = function | None -> false | Some s -> S.is_empty s (* optimization hack: Not_found is catched in fold_pattern *) let fast_inter s1 s2 = if is_empty s1 || is_empty s2 then raise Not_found else let r = inter s1 s2 in if is_empty r then raise Not_found else r let full = None let empty = Some S.empty end module OIdset = OSet(Idset) let fold_pattern ?(complete=true) f acc pat dn = let deferred = ref [] in let defer c = deferred := c::!deferred in let rec fp_rec metas p (Nodes(t,m) as dn:t) = (* TODO gérer les dnets non-linéaires *) let metas = Mmap.fold (fun _ -> Idset.union) m metas in match p with | Meta m -> defer (metas,m,dn); OIdset.full | Term w -> let curm = Mmap.fold (fun _ -> Idset.union) m Idset.empty in try match select t w with | Terminal (_,is) -> Some (Idset.union curm is) | Node e -> let ids = if complete then T.fold2 (fun acc w e -> OIdset.fast_inter acc (fp_rec metas w e) ) OIdset.full w e else let (all_metas, res) = T.fold2 (fun (b,acc) w e -> match w with | Term _ -> false, OIdset.fast_inter acc (fp_rec metas w e) | Meta _ -> b, acc ) (true,OIdset.full) w e in if all_metas then T.choose (T.choose (fp_rec metas) w) e else res in OIdset.union ids (Some curm) with Not_found -> if Idset.is_empty metas then raise Not_found else Some curm in let cand = try fp_rec Idset.empty pat dn with Not_found -> OIdset.empty in let res = List.fold_left f acc !deferred in cand, res (* intersection of two dnets. keep only the common pairs *) let rec inter (t1:t) (t2:t) : t = let inter_map f (Nodes (t1,m1):t) (Nodes (t2,m2):t) : t = Nodes (Tmap.fold ( fun k e acc -> try Tmap.add k (f e (Tmap.find k t2)) acc with Not_found -> acc ) t1 Tmap.empty, Mmap.fold ( fun m s acc -> try Mmap.add m (Idset.inter s (Mmap.find m m2)) acc with Not_found -> acc ) m1 Mmap.empty ) in inter_map (fun n1 n2 -> match n1,n2 with | Terminal (e1,s1), Terminal (_,s2) -> Terminal (e1,Idset.inter s1 s2) | Node e1, Node e2 -> Node (T.map2 inter e1 e2) | _ -> assert false ) t1 t2 let rec union (t1:t) (t2:t) : t = let union_map f (Nodes (t1,m1):t) (Nodes (t2,m2):t) : t = Nodes (Tmap.fold ( fun k e acc -> try Tmap.add k (f e (Tmap.find k acc)) acc with Not_found -> Tmap.add k e acc ) t1 t2, Mmap.fold ( fun m s acc -> try Mmap.add m (Idset.inter s (Mmap.find m acc)) acc with Not_found -> Mmap.add m s acc ) m1 m2 ) in union_map (fun n1 n2 -> match n1,n2 with | Terminal (e1,s1), Terminal (_,s2) -> Terminal (e1,Idset.union s1 s2) | Node e1, Node e2 -> Node (T.map2 union e1 e2) | _ -> assert false ) t1 t2 let find_match (p:term_pattern) (t:t) : idset = let metas = ref Mmap.empty in let (mset,lset) = fold_pattern ~complete:false (fun acc (mset,m,t) -> let all = OIdset.fast_inter acc (Some(let t = try inter t (Mmap.find m !metas) with Not_found -> t in metas := Mmap.add m t !metas; find_all t)) in OIdset.union (Some mset) all ) None p t in Option.get (OIdset.inter mset lset) let fold_pattern f acc p dn = fold_pattern ~complete:true f acc p dn let idset_map f is = Idset.fold (fun e acc -> Idset.add (f e) acc) is Idset.empty let tmap_map f g m = Tmap.fold (fun k e acc -> Tmap.add (f k) (g e) acc) m Tmap.empty let rec map sidset sterm (Nodes (t,m)) : t = let snode = function | Terminal (e,is) -> Terminal (e,idset_map sidset is) | Node e -> Node (T.map (map sidset sterm) e) in Nodes (tmap_map sterm snode t, Mmap.map (idset_map sidset) m) end coq-8.4pl2/lib/gmap.mli0000640000175000001440000000226412010532755014030 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool val add : 'a -> 'b -> ('a,'b) t -> ('a,'b) t val find : 'a -> ('a,'b) t -> 'b val remove : 'a -> ('a,'b) t -> ('a,'b) t val mem : 'a -> ('a,'b) t -> bool val iter : ('a -> 'b -> unit) -> ('a,'b) t -> unit val map : ('b -> 'c) -> ('a,'b) t -> ('a,'c) t val fold : ('a -> 'b -> 'c -> 'c) -> ('a,'b) t -> 'c -> 'c (** Additions with respect to ocaml standard library. *) val dom : ('a,'b) t -> 'a list val rng : ('a,'b) t -> 'b list val to_list : ('a,'b) t -> ('a * 'b) list coq-8.4pl2/lib/unicodetable.ml0000640000175000001440000016631411321662572015405 0ustar notinusers (** Unicode tables generated from Camomile. *) (* Letter, Uppercase *) let lu = [ (0x00041,0x0005A); (0x000C0,0x000D6); (0x000D8,0x000DE); (0x00100,0x00100); (0x00102,0x00102); (0x00104,0x00104); (0x00106,0x00106); (0x00108,0x00108); (0x0010A,0x0010A); (0x0010C,0x0010C); (0x0010E,0x0010E); (0x00110,0x00110); (0x00112,0x00112); (0x00114,0x00114); (0x00116,0x00116); (0x00118,0x00118); (0x0011A,0x0011A); (0x0011C,0x0011C); (0x0011E,0x0011E); (0x00120,0x00120); (0x00122,0x00122); (0x00124,0x00124); (0x00126,0x00126); (0x00128,0x00128); (0x0012A,0x0012A); (0x0012C,0x0012C); (0x0012E,0x0012E); (0x00130,0x00130); (0x00132,0x00132); (0x00134,0x00134); (0x00136,0x00136); (0x00139,0x00139); (0x0013B,0x0013B); (0x0013D,0x0013D); (0x0013F,0x0013F); (0x00141,0x00141); (0x00143,0x00143); (0x00145,0x00145); (0x00147,0x00147); (0x0014A,0x0014A); (0x0014C,0x0014C); (0x0014E,0x0014E); (0x00150,0x00150); (0x00152,0x00152); (0x00154,0x00154); (0x00156,0x00156); (0x00158,0x00158); (0x0015A,0x0015A); (0x0015C,0x0015C); (0x0015E,0x0015E); (0x00160,0x00160); (0x00162,0x00162); (0x00164,0x00164); (0x00166,0x00166); (0x00168,0x00168); (0x0016A,0x0016A); (0x0016C,0x0016C); (0x0016E,0x0016E); (0x00170,0x00170); (0x00172,0x00172); (0x00174,0x00174); (0x00176,0x00176); (0x00178,0x00179); (0x0017B,0x0017B); (0x0017D,0x0017D); (0x00181,0x00182); (0x00184,0x00184); (0x00186,0x00187); (0x00189,0x0018B); (0x0018E,0x00191); (0x00193,0x00194); (0x00196,0x00198); (0x0019C,0x0019D); (0x0019F,0x001A0); (0x001A2,0x001A2); (0x001A4,0x001A4); (0x001A6,0x001A7); (0x001A9,0x001A9); (0x001AC,0x001AC); (0x001AE,0x001AF); (0x001B1,0x001B3); (0x001B5,0x001B5); (0x001B7,0x001B8); (0x001BC,0x001BC); (0x001C4,0x001C4); (0x001C7,0x001C7); (0x001CA,0x001CA); (0x001CD,0x001CD); (0x001CF,0x001CF); (0x001D1,0x001D1); (0x001D3,0x001D3); (0x001D5,0x001D5); (0x001D7,0x001D7); (0x001D9,0x001D9); (0x001DB,0x001DB); (0x001DE,0x001DE); (0x001E0,0x001E0); (0x001E2,0x001E2); (0x001E4,0x001E4); (0x001E6,0x001E6); (0x001E8,0x001E8); (0x001EA,0x001EA); (0x001EC,0x001EC); (0x001EE,0x001EE); (0x001F1,0x001F1); (0x001F4,0x001F4); (0x001F6,0x001F8); (0x001FA,0x001FA); (0x001FC,0x001FC); (0x001FE,0x001FE); (0x00200,0x00200); (0x00202,0x00202); (0x00204,0x00204); (0x00206,0x00206); (0x00208,0x00208); (0x0020A,0x0020A); (0x0020C,0x0020C); (0x0020E,0x0020E); (0x00210,0x00210); (0x00212,0x00212); (0x00214,0x00214); (0x00216,0x00216); (0x00218,0x00218); (0x0021A,0x0021A); (0x0021C,0x0021C); (0x0021E,0x0021E); (0x00220,0x00220); (0x00222,0x00222); (0x00224,0x00224); (0x00226,0x00226); (0x00228,0x00228); (0x0022A,0x0022A); (0x0022C,0x0022C); (0x0022E,0x0022E); (0x00230,0x00230); (0x00232,0x00232); (0x00386,0x00386); (0x00388,0x0038A); (0x0038C,0x0038C); (0x0038E,0x0038F); (0x00391,0x003A1); (0x003A3,0x003AB); (0x003D2,0x003D4); (0x003D8,0x003D8); (0x003DA,0x003DA); (0x003DC,0x003DC); (0x003DE,0x003DE); (0x003E0,0x003E0); (0x003E2,0x003E2); (0x003E4,0x003E4); (0x003E6,0x003E6); (0x003E8,0x003E8); (0x003EA,0x003EA); (0x003EC,0x003EC); (0x003EE,0x003EE); (0x003F4,0x003F4); (0x00400,0x0042F); (0x00460,0x00460); (0x00462,0x00462); (0x00464,0x00464); (0x00466,0x00466); (0x00468,0x00468); (0x0046A,0x0046A); (0x0046C,0x0046C); (0x0046E,0x0046E); (0x00470,0x00470); (0x00472,0x00472); (0x00474,0x00474); (0x00476,0x00476); (0x00478,0x00478); (0x0047A,0x0047A); (0x0047C,0x0047C); (0x0047E,0x0047E); (0x00480,0x00480); (0x0048A,0x0048A); (0x0048C,0x0048C); (0x0048E,0x0048E); (0x00490,0x00490); (0x00492,0x00492); (0x00494,0x00494); (0x00496,0x00496); (0x00498,0x00498); (0x0049A,0x0049A); (0x0049C,0x0049C); (0x0049E,0x0049E); (0x004A0,0x004A0); (0x004A2,0x004A2); (0x004A4,0x004A4); (0x004A6,0x004A6); (0x004A8,0x004A8); (0x004AA,0x004AA); (0x004AC,0x004AC); (0x004AE,0x004AE); (0x004B0,0x004B0); (0x004B2,0x004B2); (0x004B4,0x004B4); (0x004B6,0x004B6); (0x004B8,0x004B8); (0x004BA,0x004BA); (0x004BC,0x004BC); (0x004BE,0x004BE); (0x004C0,0x004C1); (0x004C3,0x004C3); (0x004C5,0x004C5); (0x004C7,0x004C7); (0x004C9,0x004C9); (0x004CB,0x004CB); (0x004CD,0x004CD); (0x004D0,0x004D0); (0x004D2,0x004D2); (0x004D4,0x004D4); (0x004D6,0x004D6); (0x004D8,0x004D8); (0x004DA,0x004DA); (0x004DC,0x004DC); (0x004DE,0x004DE); (0x004E0,0x004E0); (0x004E2,0x004E2); (0x004E4,0x004E4); (0x004E6,0x004E6); (0x004E8,0x004E8); (0x004EA,0x004EA); (0x004EC,0x004EC); (0x004EE,0x004EE); (0x004F0,0x004F0); (0x004F2,0x004F2); (0x004F4,0x004F4); (0x004F8,0x004F8); (0x00500,0x00500); (0x00502,0x00502); (0x00504,0x00504); (0x00506,0x00506); (0x00508,0x00508); (0x0050A,0x0050A); (0x0050C,0x0050C); (0x0050E,0x0050E); (0x00531,0x00556); (0x010A0,0x010C5); (0x01E00,0x01E00); (0x01E02,0x01E02); (0x01E04,0x01E04); (0x01E06,0x01E06); (0x01E08,0x01E08); (0x01E0A,0x01E0A); (0x01E0C,0x01E0C); (0x01E0E,0x01E0E); (0x01E10,0x01E10); (0x01E12,0x01E12); (0x01E14,0x01E14); (0x01E16,0x01E16); (0x01E18,0x01E18); (0x01E1A,0x01E1A); (0x01E1C,0x01E1C); (0x01E1E,0x01E1E); (0x01E20,0x01E20); (0x01E22,0x01E22); (0x01E24,0x01E24); (0x01E26,0x01E26); (0x01E28,0x01E28); (0x01E2A,0x01E2A); (0x01E2C,0x01E2C); (0x01E2E,0x01E2E); (0x01E30,0x01E30); (0x01E32,0x01E32); (0x01E34,0x01E34); (0x01E36,0x01E36); (0x01E38,0x01E38); (0x01E3A,0x01E3A); (0x01E3C,0x01E3C); (0x01E3E,0x01E3E); (0x01E40,0x01E40); (0x01E42,0x01E42); (0x01E44,0x01E44); (0x01E46,0x01E46); (0x01E48,0x01E48); (0x01E4A,0x01E4A); (0x01E4C,0x01E4C); (0x01E4E,0x01E4E); (0x01E50,0x01E50); (0x01E52,0x01E52); (0x01E54,0x01E54); (0x01E56,0x01E56); (0x01E58,0x01E58); (0x01E5A,0x01E5A); (0x01E5C,0x01E5C); (0x01E5E,0x01E5E); (0x01E60,0x01E60); (0x01E62,0x01E62); (0x01E64,0x01E64); (0x01E66,0x01E66); (0x01E68,0x01E68); (0x01E6A,0x01E6A); (0x01E6C,0x01E6C); (0x01E6E,0x01E6E); (0x01E70,0x01E70); (0x01E72,0x01E72); (0x01E74,0x01E74); (0x01E76,0x01E76); (0x01E78,0x01E78); (0x01E7A,0x01E7A); (0x01E7C,0x01E7C); (0x01E7E,0x01E7E); (0x01E80,0x01E80); (0x01E82,0x01E82); (0x01E84,0x01E84); (0x01E86,0x01E86); (0x01E88,0x01E88); (0x01E8A,0x01E8A); (0x01E8C,0x01E8C); (0x01E8E,0x01E8E); (0x01E90,0x01E90); (0x01E92,0x01E92); (0x01E94,0x01E94); (0x01EA0,0x01EA0); (0x01EA2,0x01EA2); (0x01EA4,0x01EA4); (0x01EA6,0x01EA6); (0x01EA8,0x01EA8); (0x01EAA,0x01EAA); (0x01EAC,0x01EAC); (0x01EAE,0x01EAE); (0x01EB0,0x01EB0); (0x01EB2,0x01EB2); (0x01EB4,0x01EB4); (0x01EB6,0x01EB6); (0x01EB8,0x01EB8); (0x01EBA,0x01EBA); (0x01EBC,0x01EBC); (0x01EBE,0x01EBE); (0x01EC0,0x01EC0); (0x01EC2,0x01EC2); (0x01EC4,0x01EC4); (0x01EC6,0x01EC6); (0x01EC8,0x01EC8); (0x01ECA,0x01ECA); (0x01ECC,0x01ECC); (0x01ECE,0x01ECE); (0x01ED0,0x01ED0); (0x01ED2,0x01ED2); (0x01ED4,0x01ED4); (0x01ED6,0x01ED6); (0x01ED8,0x01ED8); (0x01EDA,0x01EDA); (0x01EDC,0x01EDC); (0x01EDE,0x01EDE); (0x01EE0,0x01EE0); (0x01EE2,0x01EE2); (0x01EE4,0x01EE4); (0x01EE6,0x01EE6); (0x01EE8,0x01EE8); (0x01EEA,0x01EEA); (0x01EEC,0x01EEC); (0x01EEE,0x01EEE); (0x01EF0,0x01EF0); (0x01EF2,0x01EF2); (0x01EF4,0x01EF4); (0x01EF6,0x01EF6); (0x01EF8,0x01EF8); (0x01F08,0x01F0F); (0x01F18,0x01F1D); (0x01F28,0x01F2F); (0x01F38,0x01F3F); (0x01F48,0x01F4D); (0x01F59,0x01F59); (0x01F5B,0x01F5B); (0x01F5D,0x01F5D); (0x01F5F,0x01F5F); (0x01F68,0x01F6F); (0x01FB8,0x01FBB); (0x01FC8,0x01FCB); (0x01FD8,0x01FDB); (0x01FE8,0x01FEC); (0x01FF8,0x01FFB); (0x02102,0x02102); (0x02107,0x02107); (0x0210B,0x0210D); (0x02110,0x02112); (0x02115,0x02115); (0x02119,0x0211D); (0x02124,0x02124); (0x02126,0x02126); (0x02128,0x02128); (0x0212A,0x0212D); (0x02130,0x02131); (0x02133,0x02133); (0x0213E,0x0213F); (0x02145,0x02145); (0x0FF21,0x0FF3A); (0x10400,0x10425); (0x1D400,0x1D419); (0x1D434,0x1D44D); (0x1D468,0x1D481); (0x1D49C,0x1D49C); (0x1D49E,0x1D49F); (0x1D4A2,0x1D4A2); (0x1D4A5,0x1D4A6); (0x1D4A9,0x1D4AC); (0x1D4AE,0x1D4B5); (0x1D4D0,0x1D4E9); (0x1D504,0x1D505); (0x1D507,0x1D50A); (0x1D50D,0x1D514); (0x1D516,0x1D51C); (0x1D538,0x1D539); (0x1D53B,0x1D53E); (0x1D540,0x1D544); (0x1D546,0x1D546); (0x1D54A,0x1D550); (0x1D56C,0x1D585); (0x1D5A0,0x1D5B9); (0x1D5D4,0x1D5ED); (0x1D608,0x1D621); (0x1D63C,0x1D655); (0x1D670,0x1D689); (0x1D6A8,0x1D6C0); (0x1D6E2,0x1D6FA); (0x1D71C,0x1D734); (0x1D756,0x1D76E); (0x1D790,0x1D7A8) ] (* Letter, Lowercase *) let ll = [ (0x00061,0x0007A); (0x000AA,0x000AA); (0x000B5,0x000B5); (0x000BA,0x000BA); (0x000DF,0x000F6); (0x000F8,0x000FF); (0x00101,0x00101); (0x00103,0x00103); (0x00105,0x00105); (0x00107,0x00107); (0x00109,0x00109); (0x0010B,0x0010B); (0x0010D,0x0010D); (0x0010F,0x0010F); (0x00111,0x00111); (0x00113,0x00113); (0x00115,0x00115); (0x00117,0x00117); (0x00119,0x00119); (0x0011B,0x0011B); (0x0011D,0x0011D); (0x0011F,0x0011F); (0x00121,0x00121); (0x00123,0x00123); (0x00125,0x00125); (0x00127,0x00127); (0x00129,0x00129); (0x0012B,0x0012B); (0x0012D,0x0012D); (0x0012F,0x0012F); (0x00131,0x00131); (0x00133,0x00133); (0x00135,0x00135); (0x00137,0x00138); (0x0013A,0x0013A); (0x0013C,0x0013C); (0x0013E,0x0013E); (0x00140,0x00140); (0x00142,0x00142); (0x00144,0x00144); (0x00146,0x00146); (0x00148,0x00149); (0x0014B,0x0014B); (0x0014D,0x0014D); (0x0014F,0x0014F); (0x00151,0x00151); (0x00153,0x00153); (0x00155,0x00155); (0x00157,0x00157); (0x00159,0x00159); (0x0015B,0x0015B); (0x0015D,0x0015D); (0x0015F,0x0015F); (0x00161,0x00161); (0x00163,0x00163); (0x00165,0x00165); (0x00167,0x00167); (0x00169,0x00169); (0x0016B,0x0016B); (0x0016D,0x0016D); (0x0016F,0x0016F); (0x00171,0x00171); (0x00173,0x00173); (0x00175,0x00175); (0x00177,0x00177); (0x0017A,0x0017A); (0x0017C,0x0017C); (0x0017E,0x00180); (0x00183,0x00183); (0x00185,0x00185); (0x00188,0x00188); (0x0018C,0x0018D); (0x00192,0x00192); (0x00195,0x00195); (0x00199,0x0019B); (0x0019E,0x0019E); (0x001A1,0x001A1); (0x001A3,0x001A3); (0x001A5,0x001A5); (0x001A8,0x001A8); (0x001AA,0x001AB); (0x001AD,0x001AD); (0x001B0,0x001B0); (0x001B4,0x001B4); (0x001B6,0x001B6); (0x001B9,0x001BA); (0x001BD,0x001BF); (0x001C6,0x001C6); (0x001C9,0x001C9); (0x001CC,0x001CC); (0x001CE,0x001CE); (0x001D0,0x001D0); (0x001D2,0x001D2); (0x001D4,0x001D4); (0x001D6,0x001D6); (0x001D8,0x001D8); (0x001DA,0x001DA); (0x001DC,0x001DD); (0x001DF,0x001DF); (0x001E1,0x001E1); (0x001E3,0x001E3); (0x001E5,0x001E5); (0x001E7,0x001E7); (0x001E9,0x001E9); (0x001EB,0x001EB); (0x001ED,0x001ED); (0x001EF,0x001F0); (0x001F3,0x001F3); (0x001F5,0x001F5); (0x001F9,0x001F9); (0x001FB,0x001FB); (0x001FD,0x001FD); (0x001FF,0x001FF); (0x00201,0x00201); (0x00203,0x00203); (0x00205,0x00205); (0x00207,0x00207); (0x00209,0x00209); (0x0020B,0x0020B); (0x0020D,0x0020D); (0x0020F,0x0020F); (0x00211,0x00211); (0x00213,0x00213); (0x00215,0x00215); (0x00217,0x00217); (0x00219,0x00219); (0x0021B,0x0021B); (0x0021D,0x0021D); (0x0021F,0x0021F); (0x00223,0x00223); (0x00225,0x00225); (0x00227,0x00227); (0x00229,0x00229); (0x0022B,0x0022B); (0x0022D,0x0022D); (0x0022F,0x0022F); (0x00231,0x00231); (0x00233,0x00233); (0x00250,0x002AD); (0x00390,0x00390); (0x003AC,0x003CE); (0x003D0,0x003D1); (0x003D5,0x003D7); (0x003D9,0x003D9); (0x003DB,0x003DB); (0x003DD,0x003DD); (0x003DF,0x003DF); (0x003E1,0x003E1); (0x003E3,0x003E3); (0x003E5,0x003E5); (0x003E7,0x003E7); (0x003E9,0x003E9); (0x003EB,0x003EB); (0x003ED,0x003ED); (0x003EF,0x003F3); (0x003F5,0x003F5); (0x00430,0x0045F); (0x00461,0x00461); (0x00463,0x00463); (0x00465,0x00465); (0x00467,0x00467); (0x00469,0x00469); (0x0046B,0x0046B); (0x0046D,0x0046D); (0x0046F,0x0046F); (0x00471,0x00471); (0x00473,0x00473); (0x00475,0x00475); (0x00477,0x00477); (0x00479,0x00479); (0x0047B,0x0047B); (0x0047D,0x0047D); (0x0047F,0x0047F); (0x00481,0x00481); (0x0048B,0x0048B); (0x0048D,0x0048D); (0x0048F,0x0048F); (0x00491,0x00491); (0x00493,0x00493); (0x00495,0x00495); (0x00497,0x00497); (0x00499,0x00499); (0x0049B,0x0049B); (0x0049D,0x0049D); (0x0049F,0x0049F); (0x004A1,0x004A1); (0x004A3,0x004A3); (0x004A5,0x004A5); (0x004A7,0x004A7); (0x004A9,0x004A9); (0x004AB,0x004AB); (0x004AD,0x004AD); (0x004AF,0x004AF); (0x004B1,0x004B1); (0x004B3,0x004B3); (0x004B5,0x004B5); (0x004B7,0x004B7); (0x004B9,0x004B9); (0x004BB,0x004BB); (0x004BD,0x004BD); (0x004BF,0x004BF); (0x004C2,0x004C2); (0x004C4,0x004C4); (0x004C6,0x004C6); (0x004C8,0x004C8); (0x004CA,0x004CA); (0x004CC,0x004CC); (0x004CE,0x004CE); (0x004D1,0x004D1); (0x004D3,0x004D3); (0x004D5,0x004D5); (0x004D7,0x004D7); (0x004D9,0x004D9); (0x004DB,0x004DB); (0x004DD,0x004DD); (0x004DF,0x004DF); (0x004E1,0x004E1); (0x004E3,0x004E3); (0x004E5,0x004E5); (0x004E7,0x004E7); (0x004E9,0x004E9); (0x004EB,0x004EB); (0x004ED,0x004ED); (0x004EF,0x004EF); (0x004F1,0x004F1); (0x004F3,0x004F3); (0x004F5,0x004F5); (0x004F9,0x004F9); (0x00501,0x00501); (0x00503,0x00503); (0x00505,0x00505); (0x00507,0x00507); (0x00509,0x00509); (0x0050B,0x0050B); (0x0050D,0x0050D); (0x0050F,0x0050F); (0x00561,0x00587); (0x01E01,0x01E01); (0x01E03,0x01E03); (0x01E05,0x01E05); (0x01E07,0x01E07); (0x01E09,0x01E09); (0x01E0B,0x01E0B); (0x01E0D,0x01E0D); (0x01E0F,0x01E0F); (0x01E11,0x01E11); (0x01E13,0x01E13); (0x01E15,0x01E15); (0x01E17,0x01E17); (0x01E19,0x01E19); (0x01E1B,0x01E1B); (0x01E1D,0x01E1D); (0x01E1F,0x01E1F); (0x01E21,0x01E21); (0x01E23,0x01E23); (0x01E25,0x01E25); (0x01E27,0x01E27); (0x01E29,0x01E29); (0x01E2B,0x01E2B); (0x01E2D,0x01E2D); (0x01E2F,0x01E2F); (0x01E31,0x01E31); (0x01E33,0x01E33); (0x01E35,0x01E35); (0x01E37,0x01E37); (0x01E39,0x01E39); (0x01E3B,0x01E3B); (0x01E3D,0x01E3D); (0x01E3F,0x01E3F); (0x01E41,0x01E41); (0x01E43,0x01E43); (0x01E45,0x01E45); (0x01E47,0x01E47); (0x01E49,0x01E49); (0x01E4B,0x01E4B); (0x01E4D,0x01E4D); (0x01E4F,0x01E4F); (0x01E51,0x01E51); (0x01E53,0x01E53); (0x01E55,0x01E55); (0x01E57,0x01E57); (0x01E59,0x01E59); (0x01E5B,0x01E5B); (0x01E5D,0x01E5D); (0x01E5F,0x01E5F); (0x01E61,0x01E61); (0x01E63,0x01E63); (0x01E65,0x01E65); (0x01E67,0x01E67); (0x01E69,0x01E69); (0x01E6B,0x01E6B); (0x01E6D,0x01E6D); (0x01E6F,0x01E6F); (0x01E71,0x01E71); (0x01E73,0x01E73); (0x01E75,0x01E75); (0x01E77,0x01E77); (0x01E79,0x01E79); (0x01E7B,0x01E7B); (0x01E7D,0x01E7D); (0x01E7F,0x01E7F); (0x01E81,0x01E81); (0x01E83,0x01E83); (0x01E85,0x01E85); (0x01E87,0x01E87); (0x01E89,0x01E89); (0x01E8B,0x01E8B); (0x01E8D,0x01E8D); (0x01E8F,0x01E8F); (0x01E91,0x01E91); (0x01E93,0x01E93); (0x01E95,0x01E9B); (0x01EA1,0x01EA1); (0x01EA3,0x01EA3); (0x01EA5,0x01EA5); (0x01EA7,0x01EA7); (0x01EA9,0x01EA9); (0x01EAB,0x01EAB); (0x01EAD,0x01EAD); (0x01EAF,0x01EAF); (0x01EB1,0x01EB1); (0x01EB3,0x01EB3); (0x01EB5,0x01EB5); (0x01EB7,0x01EB7); (0x01EB9,0x01EB9); (0x01EBB,0x01EBB); (0x01EBD,0x01EBD); (0x01EBF,0x01EBF); (0x01EC1,0x01EC1); (0x01EC3,0x01EC3); (0x01EC5,0x01EC5); (0x01EC7,0x01EC7); (0x01EC9,0x01EC9); (0x01ECB,0x01ECB); (0x01ECD,0x01ECD); (0x01ECF,0x01ECF); (0x01ED1,0x01ED1); (0x01ED3,0x01ED3); (0x01ED5,0x01ED5); (0x01ED7,0x01ED7); (0x01ED9,0x01ED9); (0x01EDB,0x01EDB); (0x01EDD,0x01EDD); (0x01EDF,0x01EDF); (0x01EE1,0x01EE1); (0x01EE3,0x01EE3); (0x01EE5,0x01EE5); (0x01EE7,0x01EE7); (0x01EE9,0x01EE9); (0x01EEB,0x01EEB); (0x01EED,0x01EED); (0x01EEF,0x01EEF); (0x01EF1,0x01EF1); (0x01EF3,0x01EF3); (0x01EF5,0x01EF5); (0x01EF7,0x01EF7); (0x01EF9,0x01EF9); (0x01F00,0x01F07); (0x01F10,0x01F15); (0x01F20,0x01F27); (0x01F30,0x01F37); (0x01F40,0x01F45); (0x01F50,0x01F57); (0x01F60,0x01F67); (0x01F70,0x01F7D); (0x01F80,0x01F87); (0x01F90,0x01F97); (0x01FA0,0x01FA7); (0x01FB0,0x01FB4); (0x01FB6,0x01FB7); (0x01FBE,0x01FBE); (0x01FC2,0x01FC4); (0x01FC6,0x01FC7); (0x01FD0,0x01FD3); (0x01FD6,0x01FD7); (0x01FE0,0x01FE7); (0x01FF2,0x01FF4); (0x01FF6,0x01FF7); (0x02071,0x02071); (0x0207F,0x0207F); (0x0210A,0x0210A); (0x0210E,0x0210F); (0x02113,0x02113); (0x0212F,0x0212F); (0x02134,0x02134); (0x02139,0x02139); (0x0213D,0x0213D); (0x02146,0x02149); (0x0FB00,0x0FB06); (0x0FB13,0x0FB17); (0x0FF41,0x0FF5A); (0x10428,0x1044D); (0x1D41A,0x1D433); (0x1D44E,0x1D454); (0x1D456,0x1D467); (0x1D482,0x1D49B); (0x1D4B6,0x1D4B9); (0x1D4BB,0x1D4BB); (0x1D4BD,0x1D4C0); (0x1D4C2,0x1D4C3); (0x1D4C5,0x1D4CF); (0x1D4EA,0x1D503); (0x1D51E,0x1D537); (0x1D552,0x1D56B); (0x1D586,0x1D59F); (0x1D5BA,0x1D5D3); (0x1D5EE,0x1D607); (0x1D622,0x1D63B); (0x1D656,0x1D66F); (0x1D68A,0x1D6A3); (0x1D6C2,0x1D6DA); (0x1D6DC,0x1D6E1); (0x1D6FC,0x1D714); (0x1D716,0x1D71B); (0x1D736,0x1D74E); (0x1D750,0x1D755); (0x1D770,0x1D788); (0x1D78A,0x1D78F); (0x1D7AA,0x1D7C2); (0x1D7C4,0x1D7C9) ] (* Letter, Titlecase *) let lt = [ (0x001C5,0x001C5); (0x001C8,0x001C8); (0x001CB,0x001CB); (0x001F2,0x001F2); (0x01F88,0x01F8F); (0x01F98,0x01F9F); (0x01FA8,0x01FAF); (0x01FBC,0x01FBC); (0x01FCC,0x01FCC); (0x01FFC,0x01FFC) ] (* Mark, Non-Spacing *) let mn = [ (0x00300,0x0034F); (0x00360,0x0036F); (0x00483,0x00486); (0x00591,0x005A1); (0x005A3,0x005B9); (0x005BB,0x005BD); (0x005BF,0x005BF); (0x005C1,0x005C2); (0x005C4,0x005C4); (0x0064B,0x00655); (0x00670,0x00670); (0x006D6,0x006DC); (0x006DF,0x006E4); (0x006E7,0x006E8); (0x006EA,0x006ED); (0x00711,0x00711); (0x00730,0x0074A); (0x007A6,0x007B0); (0x00901,0x00902); (0x0093C,0x0093C); (0x00941,0x00948); (0x0094D,0x0094D); (0x00951,0x00954); (0x00962,0x00963); (0x00981,0x00981); (0x009BC,0x009BC); (0x009C1,0x009C4); (0x009CD,0x009CD); (0x009E2,0x009E3); (0x00A02,0x00A02); (0x00A3C,0x00A3C); (0x00A41,0x00A42); (0x00A47,0x00A48); (0x00A4B,0x00A4D); (0x00A70,0x00A71); (0x00A81,0x00A82); (0x00ABC,0x00ABC); (0x00AC1,0x00AC5); (0x00AC7,0x00AC8); (0x00ACD,0x00ACD); (0x00B01,0x00B01); (0x00B3C,0x00B3C); (0x00B3F,0x00B3F); (0x00B41,0x00B43); (0x00B4D,0x00B4D); (0x00B56,0x00B56); (0x00B82,0x00B82); (0x00BC0,0x00BC0); (0x00BCD,0x00BCD); (0x00C3E,0x00C40); (0x00C46,0x00C48); (0x00C4A,0x00C4D); (0x00C55,0x00C56); (0x00CBF,0x00CBF); (0x00CC6,0x00CC6); (0x00CCC,0x00CCD); (0x00D41,0x00D43); (0x00D4D,0x00D4D); (0x00DCA,0x00DCA); (0x00DD2,0x00DD4); (0x00DD6,0x00DD6); (0x00E31,0x00E31); (0x00E34,0x00E3A); (0x00E47,0x00E4E); (0x00EB1,0x00EB1); (0x00EB4,0x00EB9); (0x00EBB,0x00EBC); (0x00EC8,0x00ECD); (0x00F18,0x00F19); (0x00F35,0x00F35); (0x00F37,0x00F37); (0x00F39,0x00F39); (0x00F71,0x00F7E); (0x00F80,0x00F84); (0x00F86,0x00F87); (0x00F90,0x00F97); (0x00F99,0x00FBC); (0x00FC6,0x00FC6); (0x0102D,0x01030); (0x01032,0x01032); (0x01036,0x01037); (0x01039,0x01039); (0x01058,0x01059); (0x01712,0x01714); (0x01732,0x01734); (0x01752,0x01753); (0x01772,0x01773); (0x017B7,0x017BD); (0x017C6,0x017C6); (0x017C9,0x017D3); (0x0180B,0x0180D); (0x018A9,0x018A9); (0x020D0,0x020DC); (0x020E1,0x020E1); (0x020E5,0x020EA); (0x0302A,0x0302F); (0x03099,0x0309A); (0x0FB1E,0x0FB1E); (0x0FE00,0x0FE0F); (0x0FE20,0x0FE23); (0x1D167,0x1D169); (0x1D17B,0x1D182); (0x1D185,0x1D18B); (0x1D1AA,0x1D1AD) ] (* Mark, Spacing Combining *) let mc = [ (0x00903,0x00903); (0x0093E,0x00940); (0x00949,0x0094C); (0x00982,0x00983); (0x009BE,0x009C0); (0x009C7,0x009C8); (0x009CB,0x009CC); (0x009D7,0x009D7); (0x00A3E,0x00A40); (0x00A83,0x00A83); (0x00ABE,0x00AC0); (0x00AC9,0x00AC9); (0x00ACB,0x00ACC); (0x00B02,0x00B03); (0x00B3E,0x00B3E); (0x00B40,0x00B40); (0x00B47,0x00B48); (0x00B4B,0x00B4C); (0x00B57,0x00B57); (0x00BBE,0x00BBF); (0x00BC1,0x00BC2); (0x00BC6,0x00BC8); (0x00BCA,0x00BCC); (0x00BD7,0x00BD7); (0x00C01,0x00C03); (0x00C41,0x00C44); (0x00C82,0x00C83); (0x00CBE,0x00CBE); (0x00CC0,0x00CC4); (0x00CC7,0x00CC8); (0x00CCA,0x00CCB); (0x00CD5,0x00CD6); (0x00D02,0x00D03); (0x00D3E,0x00D40); (0x00D46,0x00D48); (0x00D4A,0x00D4C); (0x00D57,0x00D57); (0x00D82,0x00D83); (0x00DCF,0x00DD1); (0x00DD8,0x00DDF); (0x00DF2,0x00DF3); (0x00F3E,0x00F3F); (0x00F7F,0x00F7F); (0x0102C,0x0102C); (0x01031,0x01031); (0x01038,0x01038); (0x01056,0x01057); (0x017B4,0x017B6); (0x017BE,0x017C5); (0x017C7,0x017C8); (0x1D165,0x1D166); (0x1D16D,0x1D172) ] (* Mark, Enclosing *) let me = [ (0x00488,0x00489); (0x006DE,0x006DE); (0x020DD,0x020E0); (0x020E2,0x020E4) ] (* Number, Decimal Digit *) let nd = [ (0x00030,0x00039); (0x00660,0x00669); (0x006F0,0x006F9); (0x00966,0x0096F); (0x009E6,0x009EF); (0x00A66,0x00A6F); (0x00AE6,0x00AEF); (0x00B66,0x00B6F); (0x00BE7,0x00BEF); (0x00C66,0x00C6F); (0x00CE6,0x00CEF); (0x00D66,0x00D6F); (0x00E50,0x00E59); (0x00ED0,0x00ED9); (0x00F20,0x00F29); (0x01040,0x01049); (0x01369,0x01371); (0x017E0,0x017E9); (0x01810,0x01819); (0x0FF10,0x0FF19); (0x1D7CE,0x1D7FF) ] (* Number, Letter *) let nl = [ (0x016EE,0x016F0); (0x02160,0x02183); (0x03007,0x03007); (0x03021,0x03029); (0x03038,0x0303A); (0x1034A,0x1034A) ] (* Number, Other *) let no = [ (0x000B2,0x000B3); (0x000B9,0x000B9); (0x000BC,0x000BE); (0x009F4,0x009F9); (0x00BF0,0x00BF2); (0x00F2A,0x00F33); (0x01372,0x0137C); (0x02070,0x02070); (0x02074,0x02079); (0x02080,0x02089); (0x02153,0x0215F); (0x02460,0x0249B); (0x024EA,0x024FE); (0x02776,0x02793); (0x03192,0x03195); (0x03220,0x03229); (0x03251,0x0325F); (0x03280,0x03289); (0x032B1,0x032BF); (0x10320,0x10323) ] (* Separator, Space *) let zs = [ (0x00020,0x00020); (0x000A0,0x000A0); (0x01680,0x01680); (0x02000,0x0200B); (0x0202F,0x0202F); (0x0205F,0x0205F); (0x03000,0x03000) ] (* Separator, Line *) let zl = [ (0x02028,0x02028) ] (* Separator, Paragraph *) let zp = [ (0x02029,0x02029) ] (* Other, Control *) let cc = [ (0x00000,0x0001F); (0x0007F,0x0009F) ] (* Other, Format *) let cf = [ (0x006DD,0x006DD); (0x0070F,0x0070F); (0x0180E,0x0180E); (0x0200C,0x0200F); (0x0202A,0x0202E); (0x02060,0x02063); (0x0206A,0x0206F); (0x0FEFF,0x0FEFF); (0x0FFF9,0x0FFFB); (0x1D173,0x1D17A); (0xE0001,0xE0001); (0xE0020,0xE007F) ] (* Other, Surrogate *) let cs = [ (0x0D800,0x0DEFE); (0x0DFFF,0x0DFFF) ] (* Other, Private Use *) let co = [ (0x0E000,0x0F8FF) ] (* Other, Not Assigned *) let cn = [ (0x00221,0x00221); (0x00234,0x0024F); (0x002AE,0x002AF); (0x002EF,0x002FF); (0x00350,0x0035F); (0x00370,0x00373); (0x00376,0x00379); (0x0037B,0x0037D); (0x0037F,0x00383); (0x0038B,0x0038B); (0x0038D,0x0038D); (0x003A2,0x003A2); (0x003CF,0x003CF); (0x003F7,0x003FF); (0x00487,0x00487); (0x004CF,0x004CF); (0x004F6,0x004F7); (0x004FA,0x004FF); (0x00510,0x00530); (0x00557,0x00558); (0x00560,0x00560); (0x00588,0x00588); (0x0058B,0x00590); (0x005A2,0x005A2); (0x005BA,0x005BA); (0x005C5,0x005CF); (0x005EB,0x005EF); (0x005F5,0x0060B); (0x0060D,0x0061A); (0x0061C,0x0061E); (0x00620,0x00620); (0x0063B,0x0063F); (0x00656,0x0065F); (0x006EE,0x006EF); (0x006FF,0x006FF); (0x0070E,0x0070E); (0x0072D,0x0072F); (0x0074B,0x0077F); (0x007B2,0x00900); (0x00904,0x00904); (0x0093A,0x0093B); (0x0094E,0x0094F); (0x00955,0x00957); (0x00971,0x00980); (0x00984,0x00984); (0x0098D,0x0098E); (0x00991,0x00992); (0x009A9,0x009A9); (0x009B1,0x009B1); (0x009B3,0x009B5); (0x009BA,0x009BB); (0x009BD,0x009BD); (0x009C5,0x009C6); (0x009C9,0x009CA); (0x009CE,0x009D6); (0x009D8,0x009DB); (0x009DE,0x009DE); (0x009E4,0x009E5); (0x009FB,0x00A01); (0x00A03,0x00A04); (0x00A0B,0x00A0E); (0x00A11,0x00A12); (0x00A29,0x00A29); (0x00A31,0x00A31); (0x00A34,0x00A34); (0x00A37,0x00A37); (0x00A3A,0x00A3B); (0x00A3D,0x00A3D); (0x00A43,0x00A46); (0x00A49,0x00A4A); (0x00A4E,0x00A58); (0x00A5D,0x00A5D); (0x00A5F,0x00A65); (0x00A75,0x00A80); (0x00A84,0x00A84); (0x00A8C,0x00A8C); (0x00A8E,0x00A8E); (0x00A92,0x00A92); (0x00AA9,0x00AA9); (0x00AB1,0x00AB1); (0x00AB4,0x00AB4); (0x00ABA,0x00ABB); (0x00AC6,0x00AC6); (0x00ACA,0x00ACA); (0x00ACE,0x00ACF); (0x00AD1,0x00ADF); (0x00AE1,0x00AE5); (0x00AF0,0x00B00); (0x00B04,0x00B04); (0x00B0D,0x00B0E); (0x00B11,0x00B12); (0x00B29,0x00B29); (0x00B31,0x00B31); (0x00B34,0x00B35); (0x00B3A,0x00B3B); (0x00B44,0x00B46); (0x00B49,0x00B4A); (0x00B4E,0x00B55); (0x00B58,0x00B5B); (0x00B5E,0x00B5E); (0x00B62,0x00B65); (0x00B71,0x00B81); (0x00B84,0x00B84); (0x00B8B,0x00B8D); (0x00B91,0x00B91); (0x00B96,0x00B98); (0x00B9B,0x00B9B); (0x00B9D,0x00B9D); (0x00BA0,0x00BA2); (0x00BA5,0x00BA7); (0x00BAB,0x00BAD); (0x00BB6,0x00BB6); (0x00BBA,0x00BBD); (0x00BC3,0x00BC5); (0x00BC9,0x00BC9); (0x00BCE,0x00BD6); (0x00BD8,0x00BE6); (0x00BF3,0x00C00); (0x00C04,0x00C04); (0x00C0D,0x00C0D); (0x00C11,0x00C11); (0x00C29,0x00C29); (0x00C34,0x00C34); (0x00C3A,0x00C3D); (0x00C45,0x00C45); (0x00C49,0x00C49); (0x00C4E,0x00C54); (0x00C57,0x00C5F); (0x00C62,0x00C65); (0x00C70,0x00C81); (0x00C84,0x00C84); (0x00C8D,0x00C8D); (0x00C91,0x00C91); (0x00CA9,0x00CA9); (0x00CB4,0x00CB4); (0x00CBA,0x00CBD); (0x00CC5,0x00CC5); (0x00CC9,0x00CC9); (0x00CCE,0x00CD4); (0x00CD7,0x00CDD); (0x00CDF,0x00CDF); (0x00CE2,0x00CE5); (0x00CF0,0x00D01); (0x00D04,0x00D04); (0x00D0D,0x00D0D); (0x00D11,0x00D11); (0x00D29,0x00D29); (0x00D3A,0x00D3D); (0x00D44,0x00D45); (0x00D49,0x00D49); (0x00D4E,0x00D56); (0x00D58,0x00D5F); (0x00D62,0x00D65); (0x00D70,0x00D81); (0x00D84,0x00D84); (0x00D97,0x00D99); (0x00DB2,0x00DB2); (0x00DBC,0x00DBC); (0x00DBE,0x00DBF); (0x00DC7,0x00DC9); (0x00DCB,0x00DCE); (0x00DD5,0x00DD5); (0x00DD7,0x00DD7); (0x00DE0,0x00DF1); (0x00DF5,0x00E00); (0x00E3B,0x00E3E); (0x00E5C,0x00E80); (0x00E83,0x00E83); (0x00E85,0x00E86); (0x00E89,0x00E89); (0x00E8B,0x00E8C); (0x00E8E,0x00E93); (0x00E98,0x00E98); (0x00EA0,0x00EA0); (0x00EA4,0x00EA4); (0x00EA6,0x00EA6); (0x00EA8,0x00EA9); (0x00EAC,0x00EAC); (0x00EBA,0x00EBA); (0x00EBE,0x00EBF); (0x00EC5,0x00EC5); (0x00EC7,0x00EC7); (0x00ECE,0x00ECF); (0x00EDA,0x00EDB); (0x00EDE,0x00EFF); (0x00F48,0x00F48); (0x00F6B,0x00F70); (0x00F8C,0x00F8F); (0x00F98,0x00F98); (0x00FBD,0x00FBD); (0x00FCD,0x00FCE); (0x00FD0,0x00FFF); (0x01022,0x01022); (0x01028,0x01028); (0x0102B,0x0102B); (0x01033,0x01035); (0x0103A,0x0103F); (0x0105A,0x0109F); (0x010C6,0x010CF); (0x010F9,0x010FA); (0x010FC,0x010FF); (0x0115A,0x0115E); (0x011A3,0x011A7); (0x011FA,0x011FF); (0x01207,0x01207); (0x01247,0x01247); (0x01249,0x01249); (0x0124E,0x0124F); (0x01257,0x01257); (0x01259,0x01259); (0x0125E,0x0125F); (0x01287,0x01287); (0x01289,0x01289); (0x0128E,0x0128F); (0x012AF,0x012AF); (0x012B1,0x012B1); (0x012B6,0x012B7); (0x012BF,0x012BF); (0x012C1,0x012C1); (0x012C6,0x012C7); (0x012CF,0x012CF); (0x012D7,0x012D7); (0x012EF,0x012EF); (0x0130F,0x0130F); (0x01311,0x01311); (0x01316,0x01317); (0x0131F,0x0131F); (0x01347,0x01347); (0x0135B,0x01360); (0x0137D,0x0139F); (0x013F5,0x01400); (0x01677,0x0167F); (0x0169D,0x0169F); (0x016F1,0x016FF); (0x0170D,0x0170D); (0x01715,0x0171F); (0x01737,0x0173F); (0x01754,0x0175F); (0x0176D,0x0176D); (0x01771,0x01771); (0x01774,0x0177F); (0x017DD,0x017DF); (0x017EA,0x017FF); (0x0180F,0x0180F); (0x0181A,0x0181F); (0x01878,0x0187F); (0x018AA,0x01DFF); (0x01E9C,0x01E9F); (0x01EFA,0x01EFF); (0x01F16,0x01F17); (0x01F1E,0x01F1F); (0x01F46,0x01F47); (0x01F4E,0x01F4F); (0x01F58,0x01F58); (0x01F5A,0x01F5A); (0x01F5C,0x01F5C); (0x01F5E,0x01F5E); (0x01F7E,0x01F7F); (0x01FB5,0x01FB5); (0x01FC5,0x01FC5); (0x01FD4,0x01FD5); (0x01FDC,0x01FDC); (0x01FF0,0x01FF1); (0x01FF5,0x01FF5); (0x01FFF,0x01FFF); (0x02053,0x02056); (0x02058,0x0205E); (0x02064,0x02069); (0x02072,0x02073); (0x0208F,0x0209F); (0x020B2,0x020CF); (0x020EB,0x020FF); (0x0213B,0x0213C); (0x0214C,0x02152); (0x02184,0x0218F); (0x023CF,0x023FF); (0x02427,0x0243F); (0x0244B,0x0245F); (0x024FF,0x024FF); (0x02614,0x02615); (0x02618,0x02618); (0x0267E,0x0267F); (0x0268A,0x02700); (0x02705,0x02705); (0x0270A,0x0270B); (0x02728,0x02728); (0x0274C,0x0274C); (0x0274E,0x0274E); (0x02753,0x02755); (0x02757,0x02757); (0x0275F,0x02760); (0x02795,0x02797); (0x027B0,0x027B0); (0x027BF,0x027CF); (0x027EC,0x027EF); (0x02B00,0x02E7F); (0x02E9A,0x02E9A); (0x02EF4,0x02EFF); (0x02FD6,0x02FEF); (0x02FFC,0x02FFF); (0x03040,0x03040); (0x03097,0x03098); (0x03100,0x03104); (0x0312D,0x03130); (0x0318F,0x0318F); (0x031B8,0x031EF); (0x0321D,0x0321F); (0x03244,0x03250); (0x0327C,0x0327E); (0x032CC,0x032CF); (0x032FF,0x032FF); (0x03377,0x0337A); (0x033DE,0x033DF); (0x033FF,0x033FF); (0x04DB6,0x04DFF); (0x09FA6,0x09FFF); (0x0A48D,0x0A48F); (0x0A4C7,0x0ABFF); (0x0D7A4,0x0D7FF); (0x0DEFF,0x0DFFE); (0x0FA2E,0x0FA2F); (0x0FA6B,0x0FAFF); (0x0FB07,0x0FB12); (0x0FB18,0x0FB1C); (0x0FB37,0x0FB37); (0x0FB3D,0x0FB3D); (0x0FB3F,0x0FB3F); (0x0FB42,0x0FB42); (0x0FB45,0x0FB45); (0x0FBB2,0x0FBD2); (0x0FD40,0x0FD4F); (0x0FD90,0x0FD91); (0x0FDC8,0x0FDEF); (0x0FDFD,0x0FDFF); (0x0FE10,0x0FE1F); (0x0FE24,0x0FE2F); (0x0FE47,0x0FE48); (0x0FE53,0x0FE53); (0x0FE67,0x0FE67); (0x0FE6C,0x0FE6F); (0x0FE75,0x0FE75); (0x0FEFD,0x0FEFE); (0x0FF00,0x0FF00); (0x0FFBF,0x0FFC1); (0x0FFC8,0x0FFC9); (0x0FFD0,0x0FFD1); (0x0FFD8,0x0FFD9); (0x0FFDD,0x0FFDF); (0x0FFE7,0x0FFE7); (0x0FFEF,0x0FFF8); (0x0FFFE,0x102FF); (0x1031F,0x1031F); (0x10324,0x1032F); (0x1034B,0x103FF); (0x10426,0x10427); (0x1044E,0x1CFFF); (0x1D0F6,0x1D0FF); (0x1D127,0x1D129); (0x1D1DE,0x1D3FF); (0x1D455,0x1D455); (0x1D49D,0x1D49D); (0x1D4A0,0x1D4A1); (0x1D4A3,0x1D4A4); (0x1D4A7,0x1D4A8); (0x1D4AD,0x1D4AD); (0x1D4BA,0x1D4BA); (0x1D4BC,0x1D4BC); (0x1D4C1,0x1D4C1); (0x1D4C4,0x1D4C4); (0x1D506,0x1D506); (0x1D50B,0x1D50C); (0x1D515,0x1D515); (0x1D51D,0x1D51D); (0x1D53A,0x1D53A); (0x1D53F,0x1D53F); (0x1D545,0x1D545); (0x1D547,0x1D549); (0x1D551,0x1D551); (0x1D6A4,0x1D6A7); (0x1D7CA,0x1D7CD); (0x1D800,0x1FFFF); (0x2A6D7,0x2F7FF); (0x2FA1E,0xE0000); (0xE0002,0xE001F); (0xE0080,0x7FFFFFFF) ] (* Letter, Modifier *) let lm = [ (0x002B0,0x002B8); (0x002BB,0x002C1); (0x002D0,0x002D1); (0x002E0,0x002E4); (0x002EE,0x002EE); (0x0037A,0x0037A); (0x00559,0x00559); (0x00640,0x00640); (0x006E5,0x006E6); (0x00E46,0x00E46); (0x00EC6,0x00EC6); (0x017D7,0x017D7); (0x01843,0x01843); (0x03005,0x03005); (0x03031,0x03035); (0x0303B,0x0303B); (0x0309D,0x0309E); (0x030FC,0x030FE); (0x0FF70,0x0FF70); (0x0FF9E,0x0FF9F) ] (* Letter, Other *) let lo = [ (0x001BB,0x001BB); (0x001C0,0x001C3); (0x005D0,0x005EA); (0x005F0,0x005F2); (0x00621,0x0063A); (0x00641,0x0064A); (0x0066E,0x0066F); (0x00671,0x006D3); (0x006D5,0x006D5); (0x006FA,0x006FC); (0x00710,0x00710); (0x00712,0x0072C); (0x00780,0x007A5); (0x007B1,0x007B1); (0x00905,0x00939); (0x0093D,0x0093D); (0x00950,0x00950); (0x00958,0x00961); (0x00985,0x0098C); (0x0098F,0x00990); (0x00993,0x009A8); (0x009AA,0x009B0); (0x009B2,0x009B2); (0x009B6,0x009B9); (0x009DC,0x009DD); (0x009DF,0x009E1); (0x009F0,0x009F1); (0x00A05,0x00A0A); (0x00A0F,0x00A10); (0x00A13,0x00A28); (0x00A2A,0x00A30); (0x00A32,0x00A33); (0x00A35,0x00A36); (0x00A38,0x00A39); (0x00A59,0x00A5C); (0x00A5E,0x00A5E); (0x00A72,0x00A74); (0x00A85,0x00A8B); (0x00A8D,0x00A8D); (0x00A8F,0x00A91); (0x00A93,0x00AA8); (0x00AAA,0x00AB0); (0x00AB2,0x00AB3); (0x00AB5,0x00AB9); (0x00ABD,0x00ABD); (0x00AD0,0x00AD0); (0x00AE0,0x00AE0); (0x00B05,0x00B0C); (0x00B0F,0x00B10); (0x00B13,0x00B28); (0x00B2A,0x00B30); (0x00B32,0x00B33); (0x00B36,0x00B39); (0x00B3D,0x00B3D); (0x00B5C,0x00B5D); (0x00B5F,0x00B61); (0x00B83,0x00B83); (0x00B85,0x00B8A); (0x00B8E,0x00B90); (0x00B92,0x00B95); (0x00B99,0x00B9A); (0x00B9C,0x00B9C); (0x00B9E,0x00B9F); (0x00BA3,0x00BA4); (0x00BA8,0x00BAA); (0x00BAE,0x00BB5); (0x00BB7,0x00BB9); (0x00C05,0x00C0C); (0x00C0E,0x00C10); (0x00C12,0x00C28); (0x00C2A,0x00C33); (0x00C35,0x00C39); (0x00C60,0x00C61); (0x00C85,0x00C8C); (0x00C8E,0x00C90); (0x00C92,0x00CA8); (0x00CAA,0x00CB3); (0x00CB5,0x00CB9); (0x00CDE,0x00CDE); (0x00CE0,0x00CE1); (0x00D05,0x00D0C); (0x00D0E,0x00D10); (0x00D12,0x00D28); (0x00D2A,0x00D39); (0x00D60,0x00D61); (0x00D85,0x00D96); (0x00D9A,0x00DB1); (0x00DB3,0x00DBB); (0x00DBD,0x00DBD); (0x00DC0,0x00DC6); (0x00E01,0x00E30); (0x00E32,0x00E33); (0x00E40,0x00E45); (0x00E81,0x00E82); (0x00E84,0x00E84); (0x00E87,0x00E88); (0x00E8A,0x00E8A); (0x00E8D,0x00E8D); (0x00E94,0x00E97); (0x00E99,0x00E9F); (0x00EA1,0x00EA3); (0x00EA5,0x00EA5); (0x00EA7,0x00EA7); (0x00EAA,0x00EAB); (0x00EAD,0x00EB0); (0x00EB2,0x00EB3); (0x00EBD,0x00EBD); (0x00EC0,0x00EC4); (0x00EDC,0x00EDD); (0x00F00,0x00F00); (0x00F40,0x00F47); (0x00F49,0x00F6A); (0x00F88,0x00F8B); (0x01000,0x01021); (0x01023,0x01027); (0x01029,0x0102A); (0x01050,0x01055); (0x010D0,0x010F8); (0x01100,0x01159); (0x0115F,0x011A2); (0x011A8,0x011F9); (0x01200,0x01206); (0x01208,0x01246); (0x01248,0x01248); (0x0124A,0x0124D); (0x01250,0x01256); (0x01258,0x01258); (0x0125A,0x0125D); (0x01260,0x01286); (0x01288,0x01288); (0x0128A,0x0128D); (0x01290,0x012AE); (0x012B0,0x012B0); (0x012B2,0x012B5); (0x012B8,0x012BE); (0x012C0,0x012C0); (0x012C2,0x012C5); (0x012C8,0x012CE); (0x012D0,0x012D6); (0x012D8,0x012EE); (0x012F0,0x0130E); (0x01310,0x01310); (0x01312,0x01315); (0x01318,0x0131E); (0x01320,0x01346); (0x01348,0x0135A); (0x013A0,0x013F4); (0x01401,0x0166C); (0x0166F,0x01676); (0x01681,0x0169A); (0x016A0,0x016EA); (0x01700,0x0170C); (0x0170E,0x01711); (0x01720,0x01731); (0x01740,0x01751); (0x01760,0x0176C); (0x0176E,0x01770); (0x01780,0x017B3); (0x017DC,0x017DC); (0x01820,0x01842); (0x01844,0x01877); (0x01880,0x018A8); (0x02135,0x02138); (0x03006,0x03006); (0x0303C,0x0303C); (0x03041,0x03096); (0x0309F,0x0309F); (0x030A1,0x030FA); (0x030FF,0x030FF); (0x03105,0x0312C); (0x03131,0x0318E); (0x031A0,0x031B7); (0x031F0,0x031FF); (0x03400,0x04DB5); (0x04E00,0x09FA5); (0x0A000,0x0A48C); (0x0AC00,0x0D7A3); (0x0F900,0x0FA2D); (0x0FA30,0x0FA6A); (0x0FB1D,0x0FB1D); (0x0FB1F,0x0FB28); (0x0FB2A,0x0FB36); (0x0FB38,0x0FB3C); (0x0FB3E,0x0FB3E); (0x0FB40,0x0FB41); (0x0FB43,0x0FB44); (0x0FB46,0x0FBB1); (0x0FBD3,0x0FD3D); (0x0FD50,0x0FD8F); (0x0FD92,0x0FDC7); (0x0FDF0,0x0FDFB); (0x0FE70,0x0FE74); (0x0FE76,0x0FEFC); (0x0FF66,0x0FF6F); (0x0FF71,0x0FF9D); (0x0FFA0,0x0FFBE); (0x0FFC2,0x0FFC7); (0x0FFCA,0x0FFCF); (0x0FFD2,0x0FFD7); (0x0FFDA,0x0FFDC); (0x10300,0x1031E); (0x10330,0x10349); (0x20000,0x2A6D6); (0x2F800,0x2FA1D) ] (* Punctuation, Connector *) let pc = [ (0x0005F,0x0005F); (0x0203F,0x02040); (0x030FB,0x030FB); (0x0FE33,0x0FE34); (0x0FE4D,0x0FE4F); (0x0FF3F,0x0FF3F); (0x0FF65,0x0FF65) ] (* Punctuation, Dash *) let pd = [ (0x0002D,0x0002D); (0x000AD,0x000AD); (0x0058A,0x0058A); (0x01806,0x01806); (0x02010,0x02015); (0x0301C,0x0301C); (0x03030,0x03030); (0x030A0,0x030A0); (0x0FE31,0x0FE32); (0x0FE58,0x0FE58); (0x0FE63,0x0FE63); (0x0FF0D,0x0FF0D) ] (* Punctuation, Open *) let ps = [ (0x00028,0x00028); (0x0005B,0x0005B); (0x0007B,0x0007B); (0x00F3A,0x00F3A); (0x00F3C,0x00F3C); (0x0169B,0x0169B); (0x0201A,0x0201A); (0x0201E,0x0201E); (0x02045,0x02045); (0x0207D,0x0207D); (0x0208D,0x0208D); (0x02329,0x02329); (0x023B4,0x023B4); (0x02768,0x02768); (0x0276A,0x0276A); (0x0276C,0x0276C); (0x0276E,0x0276E); (0x02770,0x02770); (0x02772,0x02772); (0x02774,0x02774); (0x027E6,0x027E6); (0x027E8,0x027E8); (0x027EA,0x027EA); (0x02983,0x02983); (0x02985,0x02985); (0x02987,0x02987); (0x02989,0x02989); (0x0298B,0x0298B); (0x0298D,0x0298D); (0x0298F,0x0298F); (0x02991,0x02991); (0x02993,0x02993); (0x02995,0x02995); (0x02997,0x02997); (0x029D8,0x029D8); (0x029DA,0x029DA); (0x029FC,0x029FC); (0x03008,0x03008); (0x0300A,0x0300A); (0x0300C,0x0300C); (0x0300E,0x0300E); (0x03010,0x03010); (0x03014,0x03014); (0x03016,0x03016); (0x03018,0x03018); (0x0301A,0x0301A); (0x0301D,0x0301D); (0x0FD3E,0x0FD3E); (0x0FE35,0x0FE35); (0x0FE37,0x0FE37); (0x0FE39,0x0FE39); (0x0FE3B,0x0FE3B); (0x0FE3D,0x0FE3D); (0x0FE3F,0x0FE3F); (0x0FE41,0x0FE41); (0x0FE43,0x0FE43); (0x0FE59,0x0FE59); (0x0FE5B,0x0FE5B); (0x0FE5D,0x0FE5D); (0x0FF08,0x0FF08); (0x0FF3B,0x0FF3B); (0x0FF5B,0x0FF5B); (0x0FF5F,0x0FF5F); (0x0FF62,0x0FF62) ] (* Punctuation, Close *) let pe = [ (0x00029,0x00029); (0x0005D,0x0005D); (0x0007D,0x0007D); (0x00F3B,0x00F3B); (0x00F3D,0x00F3D); (0x0169C,0x0169C); (0x02046,0x02046); (0x0207E,0x0207E); (0x0208E,0x0208E); (0x0232A,0x0232A); (0x023B5,0x023B5); (0x02769,0x02769); (0x0276B,0x0276B); (0x0276D,0x0276D); (0x0276F,0x0276F); (0x02771,0x02771); (0x02773,0x02773); (0x02775,0x02775); (0x027E7,0x027E7); (0x027E9,0x027E9); (0x027EB,0x027EB); (0x02984,0x02984); (0x02986,0x02986); (0x02988,0x02988); (0x0298A,0x0298A); (0x0298C,0x0298C); (0x0298E,0x0298E); (0x02990,0x02990); (0x02992,0x02992); (0x02994,0x02994); (0x02996,0x02996); (0x02998,0x02998); (0x029D9,0x029D9); (0x029DB,0x029DB); (0x029FD,0x029FD); (0x03009,0x03009); (0x0300B,0x0300B); (0x0300D,0x0300D); (0x0300F,0x0300F); (0x03011,0x03011); (0x03015,0x03015); (0x03017,0x03017); (0x03019,0x03019); (0x0301B,0x0301B); (0x0301E,0x0301F); (0x0FD3F,0x0FD3F); (0x0FE36,0x0FE36); (0x0FE38,0x0FE38); (0x0FE3A,0x0FE3A); (0x0FE3C,0x0FE3C); (0x0FE3E,0x0FE3E); (0x0FE40,0x0FE40); (0x0FE42,0x0FE42); (0x0FE44,0x0FE44); (0x0FE5A,0x0FE5A); (0x0FE5C,0x0FE5C); (0x0FE5E,0x0FE5E); (0x0FF09,0x0FF09); (0x0FF3D,0x0FF3D); (0x0FF5D,0x0FF5D); (0x0FF60,0x0FF60); (0x0FF63,0x0FF63) ] (* Punctuation, Initial quote *) let pi = [ (0x000AB,0x000AB); (0x02018,0x02018); (0x0201B,0x0201C); (0x0201F,0x0201F); (0x02039,0x02039) ] (* Punctuation, Final quote *) let pf = [ (0x000BB,0x000BB); (0x02019,0x02019); (0x0201D,0x0201D); (0x0203A,0x0203A) ] (* Punctuation, Other *) let po = [ (0x00021,0x00023); (0x00025,0x00027); (0x0002A,0x0002A); (0x0002C,0x0002C); (0x0002E,0x0002F); (0x0003A,0x0003B); (0x0003F,0x00040); (0x0005C,0x0005C); (0x000A1,0x000A1); (0x000B7,0x000B7); (0x000BF,0x000BF); (0x0037E,0x0037E); (0x00387,0x00387); (0x0055A,0x0055F); (0x00589,0x00589); (0x005BE,0x005BE); (0x005C0,0x005C0); (0x005C3,0x005C3); (0x005F3,0x005F4); (0x0060C,0x0060C); (0x0061B,0x0061B); (0x0061F,0x0061F); (0x0066A,0x0066D); (0x006D4,0x006D4); (0x00700,0x0070D); (0x00964,0x00965); (0x00970,0x00970); (0x00DF4,0x00DF4); (0x00E4F,0x00E4F); (0x00E5A,0x00E5B); (0x00F04,0x00F12); (0x00F85,0x00F85); (0x0104A,0x0104F); (0x010FB,0x010FB); (0x01361,0x01368); (0x0166D,0x0166E); (0x016EB,0x016ED); (0x01735,0x01736); (0x017D4,0x017D6); (0x017D8,0x017DA); (0x01800,0x01805); (0x01807,0x0180A); (0x02016,0x02017); (0x02020,0x02027); (0x02030,0x02038); (0x0203B,0x0203E); (0x02041,0x02043); (0x02047,0x02051); (0x02057,0x02057); (0x023B6,0x023B6); (0x03001,0x03003); (0x0303D,0x0303D); (0x0FE30,0x0FE30); (0x0FE45,0x0FE46); (0x0FE49,0x0FE4C); (0x0FE50,0x0FE52); (0x0FE54,0x0FE57); (0x0FE5F,0x0FE61); (0x0FE68,0x0FE68); (0x0FE6A,0x0FE6B); (0x0FF01,0x0FF03); (0x0FF05,0x0FF07); (0x0FF0A,0x0FF0A); (0x0FF0C,0x0FF0C); (0x0FF0E,0x0FF0F); (0x0FF1A,0x0FF1B); (0x0FF1F,0x0FF20); (0x0FF3C,0x0FF3C); (0x0FF61,0x0FF61); (0x0FF64,0x0FF64) ] (* Symbol, Math *) let sm = [ (0x0002B,0x0002B); (0x0003C,0x0003E); (0x0007C,0x0007C); (0x0007E,0x0007E); (0x000AC,0x000AC); (0x000B1,0x000B1); (0x000D7,0x000D7); (0x000F7,0x000F7); (0x003F6,0x003F6); (0x02044,0x02044); (0x02052,0x02052); (0x0207A,0x0207C); (0x0208A,0x0208C); (0x02140,0x02144); (0x0214B,0x0214B); (0x02190,0x02194); (0x0219A,0x0219B); (0x021A0,0x021A0); (0x021A3,0x021A3); (0x021A6,0x021A6); (0x021AE,0x021AE); (0x021CE,0x021CF); (0x021D2,0x021D2); (0x021D4,0x021D4); (0x021F4,0x022FF); (0x02308,0x0230B); (0x02320,0x02321); (0x0237C,0x0237C); (0x0239B,0x023B3); (0x025B7,0x025B7); (0x025C1,0x025C1); (0x025F8,0x025FF); (0x0266F,0x0266F); (0x027D0,0x027E5); (0x027F0,0x027FF); (0x02900,0x02982); (0x02999,0x029D7); (0x029DC,0x029FB); (0x029FE,0x02AFF); (0x0FB29,0x0FB29); (0x0FE62,0x0FE62); (0x0FE64,0x0FE66); (0x0FF0B,0x0FF0B); (0x0FF1C,0x0FF1E); (0x0FF5C,0x0FF5C); (0x0FF5E,0x0FF5E); (0x0FFE2,0x0FFE2); (0x0FFE9,0x0FFEC); (0x1D6C1,0x1D6C1); (0x1D6DB,0x1D6DB); (0x1D6FB,0x1D6FB); (0x1D715,0x1D715); (0x1D735,0x1D735); (0x1D74F,0x1D74F); (0x1D76F,0x1D76F); (0x1D789,0x1D789); (0x1D7A9,0x1D7A9); (0x1D7C3,0x1D7C3) ] (* Symbol, Currency *) let sc = [ (0x00024,0x00024); (0x000A2,0x000A5); (0x009F2,0x009F3); (0x00E3F,0x00E3F); (0x017DB,0x017DB); (0x020A0,0x020B1); (0x0FDFC,0x0FDFC); (0x0FE69,0x0FE69); (0x0FF04,0x0FF04); (0x0FFE0,0x0FFE1); (0x0FFE5,0x0FFE6) ] (* Symbol, Modifier *) let sk = [ (0x0005E,0x0005E); (0x00060,0x00060); (0x000A8,0x000A8); (0x000AF,0x000AF); (0x000B4,0x000B4); (0x000B8,0x000B8); (0x002B9,0x002BA); (0x002C2,0x002CF); (0x002D2,0x002DF); (0x002E5,0x002ED); (0x00374,0x00375); (0x00384,0x00385); (0x01FBD,0x01FBD); (0x01FBF,0x01FC1); (0x01FCD,0x01FCF); (0x01FDD,0x01FDF); (0x01FED,0x01FEF); (0x01FFD,0x01FFE); (0x0309B,0x0309C); (0x0FF3E,0x0FF3E); (0x0FF40,0x0FF40); (0x0FFE3,0x0FFE3) ] (* Symbol, Other *) let so = [ (0x000A6,0x000A7); (0x000A9,0x000A9); (0x000AE,0x000AE); (0x000B0,0x000B0); (0x000B6,0x000B6); (0x00482,0x00482); (0x006E9,0x006E9); (0x006FD,0x006FE); (0x009FA,0x009FA); (0x00B70,0x00B70); (0x00F01,0x00F03); (0x00F13,0x00F17); (0x00F1A,0x00F1F); (0x00F34,0x00F34); (0x00F36,0x00F36); (0x00F38,0x00F38); (0x00FBE,0x00FC5); (0x00FC7,0x00FCC); (0x00FCF,0x00FCF); (0x02100,0x02101); (0x02103,0x02106); (0x02108,0x02109); (0x02114,0x02114); (0x02116,0x02118); (0x0211E,0x02123); (0x02125,0x02125); (0x02127,0x02127); (0x02129,0x02129); (0x0212E,0x0212E); (0x02132,0x02132); (0x0213A,0x0213A); (0x0214A,0x0214A); (0x02195,0x02199); (0x0219C,0x0219F); (0x021A1,0x021A2); (0x021A4,0x021A5); (0x021A7,0x021AD); (0x021AF,0x021CD); (0x021D0,0x021D1); (0x021D3,0x021D3); (0x021D5,0x021F3); (0x02300,0x02307); (0x0230C,0x0231F); (0x02322,0x02328); (0x0232B,0x0237B); (0x0237D,0x0239A); (0x023B7,0x023CE); (0x02400,0x02426); (0x02440,0x0244A); (0x0249C,0x024E9); (0x02500,0x025B6); (0x025B8,0x025C0); (0x025C2,0x025F7); (0x02600,0x02613); (0x02616,0x02617); (0x02619,0x0266E); (0x02670,0x0267D); (0x02680,0x02689); (0x02701,0x02704); (0x02706,0x02709); (0x0270C,0x02727); (0x02729,0x0274B); (0x0274D,0x0274D); (0x0274F,0x02752); (0x02756,0x02756); (0x02758,0x0275E); (0x02761,0x02767); (0x02794,0x02794); (0x02798,0x027AF); (0x027B1,0x027BE); (0x02800,0x028FF); (0x02E80,0x02E99); (0x02E9B,0x02EF3); (0x02F00,0x02FD5); (0x02FF0,0x02FFB); (0x03004,0x03004); (0x03012,0x03013); (0x03020,0x03020); (0x03036,0x03037); (0x0303E,0x0303F); (0x03190,0x03191); (0x03196,0x0319F); (0x03200,0x0321C); (0x0322A,0x03243); (0x03260,0x0327B); (0x0327F,0x0327F); (0x0328A,0x032B0); (0x032C0,0x032CB); (0x032D0,0x032FE); (0x03300,0x03376); (0x0337B,0x033DD); (0x033E0,0x033FE); (0x0A490,0x0A4C6); (0x0FFE4,0x0FFE4); (0x0FFE8,0x0FFE8); (0x0FFED,0x0FFEE); (0x0FFFC,0x0FFFD); (0x1D000,0x1D0F5); (0x1D100,0x1D126); (0x1D12A,0x1D164); (0x1D16A,0x1D16C); (0x1D183,0x1D184); (0x1D18C,0x1D1A9); (0x1D1AE,0x1D1DD) ] (* Conversion to lower case. *) let to_lower = [ (0x00041,0x0005A), `Delta (32); (0x000C0,0x000D6), `Delta (32); (0x000D8,0x000DE), `Delta (32); (0x00100,0x00100), `Abs (0x00101); (0x00102,0x00102), `Abs (0x00103); (0x00104,0x00104), `Abs (0x00105); (0x00106,0x00106), `Abs (0x00107); (0x00108,0x00108), `Abs (0x00109); (0x0010A,0x0010A), `Abs (0x0010B); (0x0010C,0x0010C), `Abs (0x0010D); (0x0010E,0x0010E), `Abs (0x0010F); (0x00110,0x00110), `Abs (0x00111); (0x00112,0x00112), `Abs (0x00113); (0x00114,0x00114), `Abs (0x00115); (0x00116,0x00116), `Abs (0x00117); (0x00118,0x00118), `Abs (0x00119); (0x0011A,0x0011A), `Abs (0x0011B); (0x0011C,0x0011C), `Abs (0x0011D); (0x0011E,0x0011E), `Abs (0x0011F); (0x00120,0x00120), `Abs (0x00121); (0x00122,0x00122), `Abs (0x00123); (0x00124,0x00124), `Abs (0x00125); (0x00126,0x00126), `Abs (0x00127); (0x00128,0x00128), `Abs (0x00129); (0x0012A,0x0012A), `Abs (0x0012B); (0x0012C,0x0012C), `Abs (0x0012D); (0x0012E,0x0012E), `Abs (0x0012F); (0x00130,0x00130), `Abs (0x00069); (0x00132,0x00132), `Abs (0x00133); (0x00134,0x00134), `Abs (0x00135); (0x00136,0x00136), `Abs (0x00137); (0x00139,0x00139), `Abs (0x0013A); (0x0013B,0x0013B), `Abs (0x0013C); (0x0013D,0x0013D), `Abs (0x0013E); (0x0013F,0x0013F), `Abs (0x00140); (0x00141,0x00141), `Abs (0x00142); (0x00143,0x00143), `Abs (0x00144); (0x00145,0x00145), `Abs (0x00146); (0x00147,0x00147), `Abs (0x00148); (0x0014A,0x0014A), `Abs (0x0014B); (0x0014C,0x0014C), `Abs (0x0014D); (0x0014E,0x0014E), `Abs (0x0014F); (0x00150,0x00150), `Abs (0x00151); (0x00152,0x00152), `Abs (0x00153); (0x00154,0x00154), `Abs (0x00155); (0x00156,0x00156), `Abs (0x00157); (0x00158,0x00158), `Abs (0x00159); (0x0015A,0x0015A), `Abs (0x0015B); (0x0015C,0x0015C), `Abs (0x0015D); (0x0015E,0x0015E), `Abs (0x0015F); (0x00160,0x00160), `Abs (0x00161); (0x00162,0x00162), `Abs (0x00163); (0x00164,0x00164), `Abs (0x00165); (0x00166,0x00166), `Abs (0x00167); (0x00168,0x00168), `Abs (0x00169); (0x0016A,0x0016A), `Abs (0x0016B); (0x0016C,0x0016C), `Abs (0x0016D); (0x0016E,0x0016E), `Abs (0x0016F); (0x00170,0x00170), `Abs (0x00171); (0x00172,0x00172), `Abs (0x00173); (0x00174,0x00174), `Abs (0x00175); (0x00176,0x00176), `Abs (0x00177); (0x00178,0x00178), `Abs (0x000FF); (0x00179,0x00179), `Abs (0x0017A); (0x0017B,0x0017B), `Abs (0x0017C); (0x0017D,0x0017D), `Abs (0x0017E); (0x00181,0x00181), `Abs (0x00253); (0x00182,0x00182), `Abs (0x00183); (0x00184,0x00184), `Abs (0x00185); (0x00186,0x00186), `Abs (0x00254); (0x00187,0x00187), `Abs (0x00188); (0x00189,0x0018A), `Delta (205); (0x0018B,0x0018B), `Abs (0x0018C); (0x0018E,0x0018E), `Abs (0x001DD); (0x0018F,0x0018F), `Abs (0x00259); (0x00190,0x00190), `Abs (0x0025B); (0x00191,0x00191), `Abs (0x00192); (0x00193,0x00193), `Abs (0x00260); (0x00194,0x00194), `Abs (0x00263); (0x00196,0x00196), `Abs (0x00269); (0x00197,0x00197), `Abs (0x00268); (0x00198,0x00198), `Abs (0x00199); (0x0019C,0x0019C), `Abs (0x0026F); (0x0019D,0x0019D), `Abs (0x00272); (0x0019F,0x0019F), `Abs (0x00275); (0x001A0,0x001A0), `Abs (0x001A1); (0x001A2,0x001A2), `Abs (0x001A3); (0x001A4,0x001A4), `Abs (0x001A5); (0x001A6,0x001A6), `Abs (0x00280); (0x001A7,0x001A7), `Abs (0x001A8); (0x001A9,0x001A9), `Abs (0x00283); (0x001AC,0x001AC), `Abs (0x001AD); (0x001AE,0x001AE), `Abs (0x00288); (0x001AF,0x001AF), `Abs (0x001B0); (0x001B1,0x001B2), `Delta (217); (0x001B3,0x001B3), `Abs (0x001B4); (0x001B5,0x001B5), `Abs (0x001B6); (0x001B7,0x001B7), `Abs (0x00292); (0x001B8,0x001B8), `Abs (0x001B9); (0x001BC,0x001BC), `Abs (0x001BD); (0x001C4,0x001C4), `Abs (0x001C6); (0x001C7,0x001C7), `Abs (0x001C9); (0x001CA,0x001CA), `Abs (0x001CC); (0x001CD,0x001CD), `Abs (0x001CE); (0x001CF,0x001CF), `Abs (0x001D0); (0x001D1,0x001D1), `Abs (0x001D2); (0x001D3,0x001D3), `Abs (0x001D4); (0x001D5,0x001D5), `Abs (0x001D6); (0x001D7,0x001D7), `Abs (0x001D8); (0x001D9,0x001D9), `Abs (0x001DA); (0x001DB,0x001DB), `Abs (0x001DC); (0x001DE,0x001DE), `Abs (0x001DF); (0x001E0,0x001E0), `Abs (0x001E1); (0x001E2,0x001E2), `Abs (0x001E3); (0x001E4,0x001E4), `Abs (0x001E5); (0x001E6,0x001E6), `Abs (0x001E7); (0x001E8,0x001E8), `Abs (0x001E9); (0x001EA,0x001EA), `Abs (0x001EB); (0x001EC,0x001EC), `Abs (0x001ED); (0x001EE,0x001EE), `Abs (0x001EF); (0x001F1,0x001F1), `Abs (0x001F3); (0x001F4,0x001F4), `Abs (0x001F5); (0x001F6,0x001F6), `Abs (0x00195); (0x001F7,0x001F7), `Abs (0x001BF); (0x001F8,0x001F8), `Abs (0x001F9); (0x001FA,0x001FA), `Abs (0x001FB); (0x001FC,0x001FC), `Abs (0x001FD); (0x001FE,0x001FE), `Abs (0x001FF); (0x00200,0x00200), `Abs (0x00201); (0x00202,0x00202), `Abs (0x00203); (0x00204,0x00204), `Abs (0x00205); (0x00206,0x00206), `Abs (0x00207); (0x00208,0x00208), `Abs (0x00209); (0x0020A,0x0020A), `Abs (0x0020B); (0x0020C,0x0020C), `Abs (0x0020D); (0x0020E,0x0020E), `Abs (0x0020F); (0x00210,0x00210), `Abs (0x00211); (0x00212,0x00212), `Abs (0x00213); (0x00214,0x00214), `Abs (0x00215); (0x00216,0x00216), `Abs (0x00217); (0x00218,0x00218), `Abs (0x00219); (0x0021A,0x0021A), `Abs (0x0021B); (0x0021C,0x0021C), `Abs (0x0021D); (0x0021E,0x0021E), `Abs (0x0021F); (0x00220,0x00220), `Abs (0x0019E); (0x00222,0x00222), `Abs (0x00223); (0x00224,0x00224), `Abs (0x00225); (0x00226,0x00226), `Abs (0x00227); (0x00228,0x00228), `Abs (0x00229); (0x0022A,0x0022A), `Abs (0x0022B); (0x0022C,0x0022C), `Abs (0x0022D); (0x0022E,0x0022E), `Abs (0x0022F); (0x00230,0x00230), `Abs (0x00231); (0x00232,0x00232), `Abs (0x00233); (0x00386,0x00386), `Abs (0x003AC); (0x00388,0x0038A), `Delta (37); (0x0038C,0x0038C), `Abs (0x003CC); (0x0038E,0x0038F), `Delta (63); (0x00391,0x003A1), `Delta (32); (0x003A3,0x003AB), `Delta (32); (0x003D8,0x003D8), `Abs (0x003D9); (0x003DA,0x003DA), `Abs (0x003DB); (0x003DC,0x003DC), `Abs (0x003DD); (0x003DE,0x003DE), `Abs (0x003DF); (0x003E0,0x003E0), `Abs (0x003E1); (0x003E2,0x003E2), `Abs (0x003E3); (0x003E4,0x003E4), `Abs (0x003E5); (0x003E6,0x003E6), `Abs (0x003E7); (0x003E8,0x003E8), `Abs (0x003E9); (0x003EA,0x003EA), `Abs (0x003EB); (0x003EC,0x003EC), `Abs (0x003ED); (0x003EE,0x003EE), `Abs (0x003EF); (0x003F4,0x003F4), `Abs (0x003B8); (0x00400,0x0040F), `Delta (80); (0x00410,0x0042F), `Delta (32); (0x00460,0x00460), `Abs (0x00461); (0x00462,0x00462), `Abs (0x00463); (0x00464,0x00464), `Abs (0x00465); (0x00466,0x00466), `Abs (0x00467); (0x00468,0x00468), `Abs (0x00469); (0x0046A,0x0046A), `Abs (0x0046B); (0x0046C,0x0046C), `Abs (0x0046D); (0x0046E,0x0046E), `Abs (0x0046F); (0x00470,0x00470), `Abs (0x00471); (0x00472,0x00472), `Abs (0x00473); (0x00474,0x00474), `Abs (0x00475); (0x00476,0x00476), `Abs (0x00477); (0x00478,0x00478), `Abs (0x00479); (0x0047A,0x0047A), `Abs (0x0047B); (0x0047C,0x0047C), `Abs (0x0047D); (0x0047E,0x0047E), `Abs (0x0047F); (0x00480,0x00480), `Abs (0x00481); (0x0048A,0x0048A), `Abs (0x0048B); (0x0048C,0x0048C), `Abs (0x0048D); (0x0048E,0x0048E), `Abs (0x0048F); (0x00490,0x00490), `Abs (0x00491); (0x00492,0x00492), `Abs (0x00493); (0x00494,0x00494), `Abs (0x00495); (0x00496,0x00496), `Abs (0x00497); (0x00498,0x00498), `Abs (0x00499); (0x0049A,0x0049A), `Abs (0x0049B); (0x0049C,0x0049C), `Abs (0x0049D); (0x0049E,0x0049E), `Abs (0x0049F); (0x004A0,0x004A0), `Abs (0x004A1); (0x004A2,0x004A2), `Abs (0x004A3); (0x004A4,0x004A4), `Abs (0x004A5); (0x004A6,0x004A6), `Abs (0x004A7); (0x004A8,0x004A8), `Abs (0x004A9); (0x004AA,0x004AA), `Abs (0x004AB); (0x004AC,0x004AC), `Abs (0x004AD); (0x004AE,0x004AE), `Abs (0x004AF); (0x004B0,0x004B0), `Abs (0x004B1); (0x004B2,0x004B2), `Abs (0x004B3); (0x004B4,0x004B4), `Abs (0x004B5); (0x004B6,0x004B6), `Abs (0x004B7); (0x004B8,0x004B8), `Abs (0x004B9); (0x004BA,0x004BA), `Abs (0x004BB); (0x004BC,0x004BC), `Abs (0x004BD); (0x004BE,0x004BE), `Abs (0x004BF); (0x004C1,0x004C1), `Abs (0x004C2); (0x004C3,0x004C3), `Abs (0x004C4); (0x004C5,0x004C5), `Abs (0x004C6); (0x004C7,0x004C7), `Abs (0x004C8); (0x004C9,0x004C9), `Abs (0x004CA); (0x004CB,0x004CB), `Abs (0x004CC); (0x004CD,0x004CD), `Abs (0x004CE); (0x004D0,0x004D0), `Abs (0x004D1); (0x004D2,0x004D2), `Abs (0x004D3); (0x004D4,0x004D4), `Abs (0x004D5); (0x004D6,0x004D6), `Abs (0x004D7); (0x004D8,0x004D8), `Abs (0x004D9); (0x004DA,0x004DA), `Abs (0x004DB); (0x004DC,0x004DC), `Abs (0x004DD); (0x004DE,0x004DE), `Abs (0x004DF); (0x004E0,0x004E0), `Abs (0x004E1); (0x004E2,0x004E2), `Abs (0x004E3); (0x004E4,0x004E4), `Abs (0x004E5); (0x004E6,0x004E6), `Abs (0x004E7); (0x004E8,0x004E8), `Abs (0x004E9); (0x004EA,0x004EA), `Abs (0x004EB); (0x004EC,0x004EC), `Abs (0x004ED); (0x004EE,0x004EE), `Abs (0x004EF); (0x004F0,0x004F0), `Abs (0x004F1); (0x004F2,0x004F2), `Abs (0x004F3); (0x004F4,0x004F4), `Abs (0x004F5); (0x004F8,0x004F8), `Abs (0x004F9); (0x00500,0x00500), `Abs (0x00501); (0x00502,0x00502), `Abs (0x00503); (0x00504,0x00504), `Abs (0x00505); (0x00506,0x00506), `Abs (0x00507); (0x00508,0x00508), `Abs (0x00509); (0x0050A,0x0050A), `Abs (0x0050B); (0x0050C,0x0050C), `Abs (0x0050D); (0x0050E,0x0050E), `Abs (0x0050F); (0x00531,0x00556), `Delta (48); (0x01E00,0x01E00), `Abs (0x01E01); (0x01E02,0x01E02), `Abs (0x01E03); (0x01E04,0x01E04), `Abs (0x01E05); (0x01E06,0x01E06), `Abs (0x01E07); (0x01E08,0x01E08), `Abs (0x01E09); (0x01E0A,0x01E0A), `Abs (0x01E0B); (0x01E0C,0x01E0C), `Abs (0x01E0D); (0x01E0E,0x01E0E), `Abs (0x01E0F); (0x01E10,0x01E10), `Abs (0x01E11); (0x01E12,0x01E12), `Abs (0x01E13); (0x01E14,0x01E14), `Abs (0x01E15); (0x01E16,0x01E16), `Abs (0x01E17); (0x01E18,0x01E18), `Abs (0x01E19); (0x01E1A,0x01E1A), `Abs (0x01E1B); (0x01E1C,0x01E1C), `Abs (0x01E1D); (0x01E1E,0x01E1E), `Abs (0x01E1F); (0x01E20,0x01E20), `Abs (0x01E21); (0x01E22,0x01E22), `Abs (0x01E23); (0x01E24,0x01E24), `Abs (0x01E25); (0x01E26,0x01E26), `Abs (0x01E27); (0x01E28,0x01E28), `Abs (0x01E29); (0x01E2A,0x01E2A), `Abs (0x01E2B); (0x01E2C,0x01E2C), `Abs (0x01E2D); (0x01E2E,0x01E2E), `Abs (0x01E2F); (0x01E30,0x01E30), `Abs (0x01E31); (0x01E32,0x01E32), `Abs (0x01E33); (0x01E34,0x01E34), `Abs (0x01E35); (0x01E36,0x01E36), `Abs (0x01E37); (0x01E38,0x01E38), `Abs (0x01E39); (0x01E3A,0x01E3A), `Abs (0x01E3B); (0x01E3C,0x01E3C), `Abs (0x01E3D); (0x01E3E,0x01E3E), `Abs (0x01E3F); (0x01E40,0x01E40), `Abs (0x01E41); (0x01E42,0x01E42), `Abs (0x01E43); (0x01E44,0x01E44), `Abs (0x01E45); (0x01E46,0x01E46), `Abs (0x01E47); (0x01E48,0x01E48), `Abs (0x01E49); (0x01E4A,0x01E4A), `Abs (0x01E4B); (0x01E4C,0x01E4C), `Abs (0x01E4D); (0x01E4E,0x01E4E), `Abs (0x01E4F); (0x01E50,0x01E50), `Abs (0x01E51); (0x01E52,0x01E52), `Abs (0x01E53); (0x01E54,0x01E54), `Abs (0x01E55); (0x01E56,0x01E56), `Abs (0x01E57); (0x01E58,0x01E58), `Abs (0x01E59); (0x01E5A,0x01E5A), `Abs (0x01E5B); (0x01E5C,0x01E5C), `Abs (0x01E5D); (0x01E5E,0x01E5E), `Abs (0x01E5F); (0x01E60,0x01E60), `Abs (0x01E61); (0x01E62,0x01E62), `Abs (0x01E63); (0x01E64,0x01E64), `Abs (0x01E65); (0x01E66,0x01E66), `Abs (0x01E67); (0x01E68,0x01E68), `Abs (0x01E69); (0x01E6A,0x01E6A), `Abs (0x01E6B); (0x01E6C,0x01E6C), `Abs (0x01E6D); (0x01E6E,0x01E6E), `Abs (0x01E6F); (0x01E70,0x01E70), `Abs (0x01E71); (0x01E72,0x01E72), `Abs (0x01E73); (0x01E74,0x01E74), `Abs (0x01E75); (0x01E76,0x01E76), `Abs (0x01E77); (0x01E78,0x01E78), `Abs (0x01E79); (0x01E7A,0x01E7A), `Abs (0x01E7B); (0x01E7C,0x01E7C), `Abs (0x01E7D); (0x01E7E,0x01E7E), `Abs (0x01E7F); (0x01E80,0x01E80), `Abs (0x01E81); (0x01E82,0x01E82), `Abs (0x01E83); (0x01E84,0x01E84), `Abs (0x01E85); (0x01E86,0x01E86), `Abs (0x01E87); (0x01E88,0x01E88), `Abs (0x01E89); (0x01E8A,0x01E8A), `Abs (0x01E8B); (0x01E8C,0x01E8C), `Abs (0x01E8D); (0x01E8E,0x01E8E), `Abs (0x01E8F); (0x01E90,0x01E90), `Abs (0x01E91); (0x01E92,0x01E92), `Abs (0x01E93); (0x01E94,0x01E94), `Abs (0x01E95); (0x01EA0,0x01EA0), `Abs (0x01EA1); (0x01EA2,0x01EA2), `Abs (0x01EA3); (0x01EA4,0x01EA4), `Abs (0x01EA5); (0x01EA6,0x01EA6), `Abs (0x01EA7); (0x01EA8,0x01EA8), `Abs (0x01EA9); (0x01EAA,0x01EAA), `Abs (0x01EAB); (0x01EAC,0x01EAC), `Abs (0x01EAD); (0x01EAE,0x01EAE), `Abs (0x01EAF); (0x01EB0,0x01EB0), `Abs (0x01EB1); (0x01EB2,0x01EB2), `Abs (0x01EB3); (0x01EB4,0x01EB4), `Abs (0x01EB5); (0x01EB6,0x01EB6), `Abs (0x01EB7); (0x01EB8,0x01EB8), `Abs (0x01EB9); (0x01EBA,0x01EBA), `Abs (0x01EBB); (0x01EBC,0x01EBC), `Abs (0x01EBD); (0x01EBE,0x01EBE), `Abs (0x01EBF); (0x01EC0,0x01EC0), `Abs (0x01EC1); (0x01EC2,0x01EC2), `Abs (0x01EC3); (0x01EC4,0x01EC4), `Abs (0x01EC5); (0x01EC6,0x01EC6), `Abs (0x01EC7); (0x01EC8,0x01EC8), `Abs (0x01EC9); (0x01ECA,0x01ECA), `Abs (0x01ECB); (0x01ECC,0x01ECC), `Abs (0x01ECD); (0x01ECE,0x01ECE), `Abs (0x01ECF); (0x01ED0,0x01ED0), `Abs (0x01ED1); (0x01ED2,0x01ED2), `Abs (0x01ED3); (0x01ED4,0x01ED4), `Abs (0x01ED5); (0x01ED6,0x01ED6), `Abs (0x01ED7); (0x01ED8,0x01ED8), `Abs (0x01ED9); (0x01EDA,0x01EDA), `Abs (0x01EDB); (0x01EDC,0x01EDC), `Abs (0x01EDD); (0x01EDE,0x01EDE), `Abs (0x01EDF); (0x01EE0,0x01EE0), `Abs (0x01EE1); (0x01EE2,0x01EE2), `Abs (0x01EE3); (0x01EE4,0x01EE4), `Abs (0x01EE5); (0x01EE6,0x01EE6), `Abs (0x01EE7); (0x01EE8,0x01EE8), `Abs (0x01EE9); (0x01EEA,0x01EEA), `Abs (0x01EEB); (0x01EEC,0x01EEC), `Abs (0x01EED); (0x01EEE,0x01EEE), `Abs (0x01EEF); (0x01EF0,0x01EF0), `Abs (0x01EF1); (0x01EF2,0x01EF2), `Abs (0x01EF3); (0x01EF4,0x01EF4), `Abs (0x01EF5); (0x01EF6,0x01EF6), `Abs (0x01EF7); (0x01EF8,0x01EF8), `Abs (0x01EF9); (0x01F08,0x01F0F), `Delta (-8); (0x01F18,0x01F1D), `Delta (-8); (0x01F28,0x01F2F), `Delta (-8); (0x01F38,0x01F3F), `Delta (-8); (0x01F48,0x01F4D), `Delta (-8); (0x01F59,0x01F59), `Abs (0x01F51); (0x01F5B,0x01F5B), `Abs (0x01F53); (0x01F5D,0x01F5D), `Abs (0x01F55); (0x01F5F,0x01F5F), `Abs (0x01F57); (0x01F68,0x01F6F), `Delta (-8); (0x01FB8,0x01FB9), `Delta (-8); (0x01FBA,0x01FBB), `Delta (-74); (0x01FC8,0x01FCB), `Delta (-86); (0x01FD8,0x01FD9), `Delta (-8); (0x01FDA,0x01FDB), `Delta (-100); (0x01FE8,0x01FE9), `Delta (-8); (0x01FEA,0x01FEB), `Delta (-112); (0x01FEC,0x01FEC), `Abs (0x01FE5); (0x01FF8,0x01FF9), `Delta (-128); (0x01FFA,0x01FFB), `Delta (-126); (0x02126,0x02126), `Abs (0x003C9); (0x0212A,0x0212A), `Abs (0x0006B); (0x0212B,0x0212B), `Abs (0x000E5); (0x0FF21,0x0FF3A), `Delta (32); (0x10400,0x10425), `Delta (40); (0x001C5,0x001C5), `Abs (0x001C6); (0x001C8,0x001C8), `Abs (0x001C9); (0x001CB,0x001CB), `Abs (0x001CC); (0x001F2,0x001F2), `Abs (0x001F3); (0x01F88,0x01F8F), `Delta (-8); (0x01F98,0x01F9F), `Delta (-8); (0x01FA8,0x01FAF), `Delta (-8); (0x01FBC,0x01FBC), `Abs (0x01FB3); (0x01FCC,0x01FCC), `Abs (0x01FC3); (0x01FFC,0x01FFC), `Abs (0x01FF3); (0x02160,0x0216F), `Delta (16) ] coq-8.4pl2/lib/predicate.ml0000640000175000001440000000565311366307247014711 0ustar notinusers(************************************************************************) (* *) (* Objective Caml *) (* *) (* Xavier Leroy, projet Cristal, INRIA Rocquencourt *) (* *) (* Copyright 1996 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. This file is distributed *) (* under the terms of the GNU Library General Public License. *) (* *) (************************************************************************) (* Sets over ordered types *) module type OrderedType = sig type t val compare: t -> t -> int end module type S = sig type elt type t val empty: t val full: t val is_empty: t -> bool val is_full: t -> bool val mem: elt -> t -> bool val singleton: elt -> t val add: elt -> t -> t val remove: elt -> t -> t val union: t -> t -> t val inter: t -> t -> t val diff: t -> t -> t val complement: t -> t val equal: t -> t -> bool val subset: t -> t -> bool val elements: t -> bool * elt list end module Make(Ord: OrderedType) = struct module EltSet = Set.Make(Ord) (* when bool is false, the denoted set is the complement of the given set *) type elt = Ord.t type t = bool * EltSet.t let elements (b,s) = (b, EltSet.elements s) let empty = (false,EltSet.empty) let full = (true,EltSet.empty) (* assumes the set is infinite *) let is_empty (b,s) = not b & EltSet.is_empty s let is_full (b,s) = b & EltSet.is_empty s let mem x (b,s) = if b then not (EltSet.mem x s) else EltSet.mem x s let singleton x = (false,EltSet.singleton x) let add x (b,s) = if b then (b,EltSet.remove x s) else (b,EltSet.add x s) let remove x (b,s) = if b then (b,EltSet.add x s) else (b,EltSet.remove x s) let complement (b,s) = (not b, s) let union s1 s2 = match (s1,s2) with ((false,p1),(false,p2)) -> (false,EltSet.union p1 p2) | ((true,n1),(true,n2)) -> (true,EltSet.inter n1 n2) | ((false,p1),(true,n2)) -> (true,EltSet.diff n2 p1) | ((true,n1),(false,p2)) -> (true,EltSet.diff n1 p2) let inter s1 s2 = complement (union (complement s1) (complement s2)) let diff s1 s2 = inter s1 (complement s2) let subset s1 s2 = match (s1,s2) with ((false,p1),(false,p2)) -> EltSet.subset p1 p2 | ((true,n1),(true,n2)) -> EltSet.subset n2 n1 | ((false,p1),(true,n2)) -> EltSet.is_empty (EltSet.inter p1 n2) | ((true,_),(false,_)) -> false let equal (b1,s1) (b2,s2) = b1=b2 & EltSet.equal s1 s2 end coq-8.4pl2/lib/unionfind.mli0000640000175000001440000000363712010532755015102 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t (** Add (in place) an element in the partition, or do nothing if the element is already in the partition. *) val add : elt -> t -> unit (** Find the canonical representative of an element. Raise [not_found] if the element isn't known yet. *) val find : elt -> t -> elt (** Merge (in place) the equivalence classes of two elements. This will add the elements in the partition if necessary. *) val union : elt -> elt -> t -> unit (** Merge (in place) the equivalence classes of many elements. *) val union_set : set -> t -> unit (** Listing the different components of the partition *) val partition : t -> set list end module Make : functor (S:Set.S) -> functor (M:Map.S with type key = S.elt) -> PartitionSig with type elt = S.elt and type set = S.t coq-8.4pl2/lib/pp.ml40000640000175000001440000002757012121620060013433 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [Util] -> [Pp] -> [Flags. *) let print_emacs = ref false let make_pp_emacs() = print_emacs:=true let make_pp_nonemacs() = print_emacs:=false (* The different kinds of blocks are: \begin{description} \item[hbox:] Horizontal block no line breaking; \item[vbox:] Vertical block each break leads to a new line; \item[hvbox:] Horizontal-vertical block: same as vbox, except if this block is small enough to fit on a single line \item[hovbox:] Horizontal or Vertical block: breaks lead to new line only when necessary to print the content of the block \item[tbox:] Tabulation block: go to tabulation marks and no line breaking (except if no mark yet on the reste of the line) \end{description} *) let comments = ref [] let rec split_com comacc acc pos = function [] -> comments := List.rev acc; comacc | ((b,e),c as com)::coms -> (* Take all comments that terminates before pos, or begin exactly at pos (used to print comments attached after an expression) *) if e<=pos || pos=b then split_com (c::comacc) acc pos coms else split_com comacc (com::acc) pos coms type block_type = | Pp_hbox of int | Pp_vbox of int | Pp_hvbox of int | Pp_hovbox of int | Pp_tbox type 'a ppcmd_token = | Ppcmd_print of 'a | Ppcmd_box of block_type * ('a ppcmd_token Stream.t) | Ppcmd_print_break of int * int | Ppcmd_set_tab | Ppcmd_print_tbreak of int * int | Ppcmd_white_space of int | Ppcmd_force_newline | Ppcmd_print_if_broken | Ppcmd_open_box of block_type | Ppcmd_close_box | Ppcmd_close_tbox | Ppcmd_comment of int type 'a ppdir_token = | Ppdir_ppcmds of 'a ppcmd_token Stream.t | Ppdir_print_newline | Ppdir_print_flush type ppcmd = (int*string) ppcmd_token type std_ppcmds = ppcmd Stream.t type 'a ppdirs = 'a ppdir_token Stream.t (* Compute length of an UTF-8 encoded string Rem 1 : utf8_length <= String.length (equal if pure ascii) Rem 2 : if used for an iso8859_1 encoded string, the result is wrong in very rare cases. Such a wrong case corresponds to any sequence of a character in range 192..253 immediately followed by a character in range 128..191 (typical case in french is "du" which is counted 3 instead of 4); then no real harm to use always utf8_length even if using an iso8859_1 encoding *) let utf8_length s = let len = String.length s and cnt = ref 0 and nc = ref 0 and p = ref 0 in while !p < len do begin match s.[!p] with | '\000'..'\127' -> nc := 0 (* ascii char *) | '\128'..'\191' -> nc := 0 (* cannot start with a continuation byte *) | '\192'..'\223' -> nc := 1 (* expect 1 continuation byte *) | '\224'..'\239' -> nc := 2 (* expect 2 continuation bytes *) | '\240'..'\247' -> nc := 3 (* expect 3 continuation bytes *) | '\248'..'\251' -> nc := 4 (* expect 4 continuation bytes *) | '\252'..'\253' -> nc := 5 (* expect 5 continuation bytes *) | '\254'..'\255' -> nc := 0 (* invalid byte *) end ; incr p ; while !p < len && !nc > 0 do match s.[!p] with | '\128'..'\191' (* next continuation byte *) -> incr p ; decr nc | _ (* not a continuation byte *) -> nc := 0 done ; incr cnt done ; !cnt (* formatting commands *) let str s = [< 'Ppcmd_print (utf8_length s,s) >] let stras (i,s) = [< 'Ppcmd_print (i,s) >] let brk (a,b) = [< 'Ppcmd_print_break (a,b) >] let tbrk (a,b) = [< 'Ppcmd_print_tbreak (a,b) >] let tab () = [< 'Ppcmd_set_tab >] let fnl () = [< 'Ppcmd_force_newline >] let pifb () = [< 'Ppcmd_print_if_broken >] let ws n = [< 'Ppcmd_white_space n >] let comment n = [< ' Ppcmd_comment n >] (* derived commands *) let mt () = [< >] let spc () = [< 'Ppcmd_print_break (1,0) >] let cut () = [< 'Ppcmd_print_break (0,0) >] let align () = [< 'Ppcmd_print_break (0,0) >] let int n = str (string_of_int n) let real r = str (string_of_float r) let bool b = str (string_of_bool b) let strbrk s = let rec aux p n = if n < String.length s then if s.[n] = ' ' then if p=n then [< spc (); aux (n+1) (n+1) >] else [< str (String.sub s p (n-p)); spc (); aux (n+1) (n+1) >] else aux p (n+1) else if p=n then [< >] else [< str (String.sub s p (n-p)) >] in aux 0 0 let ismt s = try let _ = Stream.empty s in true with Stream.Failure -> false (* boxing commands *) let h n s = [< 'Ppcmd_box(Pp_hbox n,s) >] let v n s = [< 'Ppcmd_box(Pp_vbox n,s) >] let hv n s = [< 'Ppcmd_box(Pp_hvbox n,s) >] let hov n s = [< 'Ppcmd_box(Pp_hovbox n,s) >] let t s = [< 'Ppcmd_box(Pp_tbox,s) >] (* Opening and closing of boxes *) let hb n = [< 'Ppcmd_open_box(Pp_hbox n) >] let vb n = [< 'Ppcmd_open_box(Pp_vbox n) >] let hvb n = [< 'Ppcmd_open_box(Pp_hvbox n) >] let hovb n = [< 'Ppcmd_open_box(Pp_hovbox n) >] let tb () = [< 'Ppcmd_open_box Pp_tbox >] let close () = [< 'Ppcmd_close_box >] let tclose () = [< 'Ppcmd_close_tbox >] let (++) = Stream.iapp let rec eval_ppcmds l = let rec aux l = try let a = match Stream.next l with | Ppcmd_box (b,s) -> Ppcmd_box (b,eval_ppcmds s) | a -> a in let rest = aux l in a :: rest with Stream.Failure -> [] in Stream.of_list (aux l) (* In new syntax only double quote char is escaped by repeating it *) let rec escape_string s = let rec escape_at s i = if i<0 then s else if s.[i] == '"' then let s' = String.sub s 0 i^"\""^String.sub s i (String.length s - i) in escape_at s' (i-1) else escape_at s (i-1) in escape_at s (String.length s - 1) let qstring s = str ("\""^escape_string s^"\"") let qs = qstring let quote s = h 0 (str "\"" ++ s ++ str "\"") let rec xmlescape ppcmd = let rec escape what withwhat (len, str) = try let pos = String.index str what in let (tlen, tail) = escape what withwhat ((len - pos - 1), (String.sub str (pos + 1) (len - pos - 1))) in (pos + tlen + String.length withwhat, String.sub str 0 pos ^ withwhat ^ tail) with Not_found -> (len, str) in match ppcmd with | Ppcmd_print (len, str) -> Ppcmd_print (escape '"' """ (escape '<' "<" (escape '&' "&" (len, str)))) (* In XML we always print whole content so we can npeek the whole stream *) | Ppcmd_box (x, str) -> Ppcmd_box (x, Stream.of_list (List.map xmlescape (Stream.npeek max_int str))) | x -> x (* This flag tells if the last printed comment ends with a newline, to avoid empty lines *) let com_eol = ref false let com_brk ft = com_eol := false let com_if ft f = if !com_eol then (com_eol := false; Format.pp_force_newline ft ()) else Lazy.force f let rec pr_com ft s = let (s1,os) = try let n = String.index s '\n' in String.sub s 0 n, Some (String.sub s (n+1) (String.length s - n - 1)) with Not_found -> s,None in com_if ft (Lazy.lazy_from_val()); (* let s1 = if String.length s1 <> 0 && s1.[0] = ' ' then (Format.pp_print_space ft (); String.sub s1 1 (String.length s1 - 1)) else s1 in*) Format.pp_print_as ft (utf8_length s1) s1; match os with Some s2 -> if String.length s2 = 0 then (com_eol := true) else (Format.pp_force_newline ft (); pr_com ft s2) | None -> () (* pretty printing functions *) let pp_dirs ft = let pp_open_box = function | Pp_hbox n -> Format.pp_open_hbox ft () | Pp_vbox n -> Format.pp_open_vbox ft n | Pp_hvbox n -> Format.pp_open_hvbox ft n | Pp_hovbox n -> Format.pp_open_hovbox ft n | Pp_tbox -> Format.pp_open_tbox ft () in let rec pp_cmd = function | Ppcmd_print(n,s) -> com_if ft (Lazy.lazy_from_val()); Format.pp_print_as ft n s | Ppcmd_box(bty,ss) -> (* Prevent evaluation of the stream! *) com_if ft (Lazy.lazy_from_val()); pp_open_box bty ; if not (Format.over_max_boxes ()) then Stream.iter pp_cmd ss; Format.pp_close_box ft () | Ppcmd_open_box bty -> com_if ft (Lazy.lazy_from_val()); pp_open_box bty | Ppcmd_close_box -> Format.pp_close_box ft () | Ppcmd_close_tbox -> Format.pp_close_tbox ft () | Ppcmd_white_space n -> com_if ft (Lazy.lazy_from_fun (fun()->Format.pp_print_break ft n 0)) | Ppcmd_print_break(m,n) -> com_if ft (Lazy.lazy_from_fun(fun()->Format.pp_print_break ft m n)) | Ppcmd_set_tab -> Format.pp_set_tab ft () | Ppcmd_print_tbreak(m,n) -> com_if ft (Lazy.lazy_from_fun(fun()->Format.pp_print_tbreak ft m n)) | Ppcmd_force_newline -> com_brk ft; Format.pp_force_newline ft () | Ppcmd_print_if_broken -> com_if ft (Lazy.lazy_from_fun(fun()->Format.pp_print_if_newline ft ())) | Ppcmd_comment i -> let coms = split_com [] [] i !comments in (* Format.pp_open_hvbox ft 0;*) List.iter (pr_com ft) coms(*; Format.pp_close_box ft ()*) in let pp_dir = function | Ppdir_ppcmds cmdstream -> Stream.iter pp_cmd cmdstream | Ppdir_print_newline -> com_brk ft; Format.pp_print_newline ft () | Ppdir_print_flush -> Format.pp_print_flush ft () in fun dirstream -> try Stream.iter pp_dir dirstream; com_brk ft with | reraise -> Format.pp_print_flush ft () ; raise reraise (* pretty print on stdout and stderr *) (* Special chars for emacs, to detect warnings inside goal output *) let emacs_quote_start = String.make 1 (Char.chr 254) let emacs_quote_end = String.make 1 (Char.chr 255) let emacs_quote strm = if !print_emacs then [< str emacs_quote_start; hov 0 strm; str emacs_quote_end >] else hov 0 strm let warnbody strm = emacs_quote (str "Warning: " ++ strm) (* pretty printing functions WITHOUT FLUSH *) let pp_with ft strm = pp_dirs ft [< 'Ppdir_ppcmds strm >] let ppnl_with ft strm = pp_dirs ft [< 'Ppdir_ppcmds [< strm ; 'Ppcmd_force_newline >] >] let default_warn_with ft strm = ppnl_with ft (warnbody strm) let pp_warn_with = ref default_warn_with let set_warning_function pp_warn = pp_warn_with := pp_warn let warn_with ft strm = !pp_warn_with ft strm let warning_with ft string = warn_with ft (str string) let pp_flush_with ft = Format.pp_print_flush ft (* pretty printing functions WITH FLUSH *) let msg_with ft strm = pp_dirs ft [< 'Ppdir_ppcmds strm ; 'Ppdir_print_flush >] let msgnl_with ft strm = pp_dirs ft [< 'Ppdir_ppcmds strm ; 'Ppdir_print_newline >] let msg_warning_with ft strm = msgnl_with ft (warnbody strm) (* pretty printing functions WITHOUT FLUSH *) let pp x = pp_with !std_ft x let ppnl x = ppnl_with !std_ft x let pperr x = pp_with !err_ft x let pperrnl x = ppnl_with !err_ft x let message s = ppnl (str s) let warning x = warning_with !err_ft x let warn x = warn_with !err_ft x let pp_flush x = Format.pp_print_flush !std_ft x let flush_all() = flush stderr; flush stdout; pp_flush() (* pretty printing functions WITH FLUSH *) let msg x = msg_with !std_ft x let msgnl x = msgnl_with !std_ft x let msgerr x = msg_with !err_ft x let msgerrnl x = msgnl_with !err_ft x let msg_warning x = msg_warning_with !err_ft x (* Same specific display in emacs as warning, but without the "Warning:" *) let msg_debug x = msgnl (emacs_quote x) let string_of_ppcmds c = msg_with Format.str_formatter c; Format.flush_str_formatter () coq-8.4pl2/lib/dyn.mli0000640000175000001440000000122012010532755013665 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ('a -> t) * (t -> 'a) val tag : t -> string coq-8.4pl2/lib/store.mli0000640000175000001440000000155411366307247014252 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t ; get : t -> 'a option ; remove : t -> t } type 'a t = 'a field end val empty : t val field : unit -> 'a Field.field coq-8.4pl2/lib/flags.mli0000640000175000001440000000535712010532755014206 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool val version_less_or_equal : compat_version -> bool val pr_version : compat_version -> string val beautify : bool ref val make_beautify : bool -> unit val do_beautify : unit -> bool val beautify_file : bool ref val make_silent : bool -> unit val is_silent : unit -> bool val is_verbose : unit -> bool val silently : ('a -> 'b) -> 'a -> 'b val verbosely : ('a -> 'b) -> 'a -> 'b val if_silent : ('a -> unit) -> 'a -> unit val if_verbose : ('a -> unit) -> 'a -> unit val make_auto_intros : bool -> unit val is_auto_intros : unit -> bool val make_warn : bool -> unit val if_warn : ('a -> unit) -> 'a -> unit val hash_cons_proofs : bool ref (** Temporary activate an option (to activate option [o] on [f x y z], use [with_option o (f x y) z]) *) val with_option : bool ref -> ('a -> 'b) -> 'a -> 'b (** Temporary deactivate an option *) val without_option : bool ref -> ('a -> 'b) -> 'a -> 'b (** If [None], no limit *) val set_print_hyps_limit : int option -> unit val print_hyps_limit : unit -> int option val add_unsafe : string -> unit val is_unsafe : string -> bool (** Options for external tools *) (** Returns string format for default browser to use from Coq or CoqIDE *) val browser_cmd_fmt : string val is_standard_doc_url : string -> bool (** Substitute %s in the first chain by the second chain *) val subst_command_placeholder : string -> string -> string (** Options for specifying where coq librairies reside *) val coqlib_spec : bool ref val coqlib : string ref (** Options for specifying where OCaml binaries reside *) val camlbin_spec : bool ref val camlbin : string ref val camlp4bin_spec : bool ref val camlp4bin : string ref (** Level of inlining during a functor application *) val set_inline_level : int -> unit val get_inline_level : unit -> int val default_inline_level : int coq-8.4pl2/lib/hashcons.ml0000640000175000001440000001406612010532755014544 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t1)*(t2->t2)*...). * [hash_sub u x] is a function that hash-cons the sub-structures of x using * the hash-consing functions u provides. * [equal] is a comparison function. It is allowed to use physical equality * on the sub-terms hash-consed by the hash_sub function. * [hash] is the hash function given to the Hashtbl.Make function * * Note that this module type coerces to the argument of Hashtbl.Make. *) module type Comp = sig type t type u val hash_sub : u -> t -> t val equal : t -> t -> bool val hash : t -> int end (* The output is a function f such that * [f ()] has the side-effect of creating (internally) a hash-table of the * hash-consed objects. The result is a function taking the sub-hashcons * functions and an object, and hashcons it. It does not really make sense * to call f() with different sub-hcons functions. That's why we use the * wrappers simple_hcons, recursive_hcons, ... The latter just take as * argument the sub-hcons functions (the tables are created at that moment), * and returns the hcons function for t. *) module type S = sig type t type u val f : unit -> (u -> t -> t) end module Make(X:Comp) = struct type t = X.t type u = X.u (* We create the type of hashtables for t, with our comparison fun. * An invariant is that the table never contains two entries equals * w.r.t (=), although the equality on keys is X.equal. This is * granted since we hcons the subterms before looking up in the table. *) module Htbl = Hashtbl.Make( struct type t=X.t type u=X.u let hash=X.hash let equal x1 x2 = (*incr comparaison;*) X.equal x1 x2 end) (* The table is created when () is applied. * Hashconsing is then very simple: * 1- hashcons the subterms using hash_sub and u * 2- look up in the table, if we do not get a hit, we add it *) let f () = let tab = Htbl.create 97 in (fun u x -> let y = X.hash_sub u x in (* incr acces;*) try let r = Htbl.find tab y in(* incr succes;*) r with Not_found -> Htbl.add tab y y; y) end (* A few usefull wrappers: * takes as argument the function f above and build a function of type * u -> t -> t that creates a fresh table each time it is applied to the * sub-hcons functions. *) (* For non-recursive types it is quite easy. *) let simple_hcons h u = h () u (* For a recursive type T, we write the module of sig Comp with u equals * to (T -> T) * u0 * The first component will be used to hash-cons the recursive subterms * The second one to hashcons the other sub-structures. * We just have to take the fixpoint of h *) let recursive_hcons h u = let hc = h () in let rec hrec x = hc (hrec,u) x in hrec (* If the structure may contain loops, use this one. *) let recursive_loop_hcons h u = let hc = h () in let rec hrec visited x = if List.memq x visited then x else hc (hrec (x::visited),u) x in hrec [] (* For 2 mutually recursive types *) let recursive2_hcons h1 h2 u1 u2 = let hc1 = h1 () in let hc2 = h2 () in let rec hrec1 x = hc1 (hrec1,hrec2,u1) x and hrec2 x = hc2 (hrec1,hrec2,u2) x in (hrec1,hrec2) (* A set of global hashcons functions *) let hashcons_resets = ref [] let init() = List.iter (fun f -> f()) !hashcons_resets (* [register_hcons h u] registers the hcons function h, result of the above * wrappers. It returns another hcons function that always uses the same * table, which can be reinitialized by init() *) let register_hcons h u = let hf = ref (h u) in let reset() = hf := h u in hashcons_resets := reset :: !hashcons_resets; (fun x -> !hf x) (* Basic hashcons modules for string and obj. Integers do not need be hashconsed. *) (* string *) module Hstring = Make( struct type t = string type u = unit let hash_sub () s =(* incr accesstr;*) s let equal s1 s2 =(* incr comparaisonstr; if*) s1=s2(* then (incr successtr; true) else false*) let hash = Hashtbl.hash end) (* Obj.t *) exception NotEq (* From CAMLLIB/caml/mlvalues.h *) let no_scan_tag = 251 let tuple_p obj = Obj.is_block obj & (Obj.tag obj < no_scan_tag) let comp_obj o1 o2 = if tuple_p o1 & tuple_p o2 then let n1 = Obj.size o1 and n2 = Obj.size o2 in if n1=n2 then try for i = 0 to pred n1 do if not (Obj.field o1 i == Obj.field o2 i) then raise NotEq done; true with NotEq -> false else false else o1=o2 let hash_obj hrec o = begin if tuple_p o then let n = Obj.size o in for i = 0 to pred n do Obj.set_field o i (hrec (Obj.field o i)) done end; o module Hobj = Make( struct type t = Obj.t type u = (Obj.t -> Obj.t) * unit let hash_sub (hrec,_) = hash_obj hrec let equal = comp_obj let hash = Hashtbl.hash end) (* Hashconsing functions for string and obj. Always use the same * global tables. The latter can be reinitialized with init() *) (* string : string -> string *) (* obj : Obj.t -> Obj.t *) let string = register_hcons (simple_hcons Hstring.f) () let obj = register_hcons (recursive_hcons Hobj.f) () (* The unsafe polymorphic hashconsing function *) let magic_hash (c : 'a) = init(); let r = obj (Obj.repr c) in init(); (Obj.magic r : 'a) coq-8.4pl2/lib/envars.mli0000640000175000001440000000177012010532755014403 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val docdir : unit -> string val coqbin : string val coqroot : string (* coqpath is stored in reverse order, since that is the order it * gets added to the searc path *) val xdg_config_home : string val xdg_dirs : string list val coqpath : string list val camlbin : unit -> string val camlp4bin : unit -> string val camllib : unit -> string val camlp4lib : unit -> string coq-8.4pl2/lib/compat.ml40000640000175000001440000001405012010532755014276 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string end end ELSE module type LexerSig = Camlp4.Sig.Lexer with module Loc = Loc and type Token.t = Tok.t END (** Signature and implementation of grammars *) IFDEF CAMLP5 THEN module type GrammarSig = sig include Grammar.S with type te = Tok.t type 'a entry = 'a Entry.e type internal_entry = Tok.t Gramext.g_entry type symbol = Tok.t Gramext.g_symbol type action = Gramext.g_action type production_rule = symbol list * action type single_extend_statment = string option * gram_assoc option * production_rule list type extend_statment = gram_position option * single_extend_statment list val action : 'a -> action val entry_create : string -> 'a entry val entry_parse : 'a entry -> parsable -> 'a val entry_print : 'a entry -> unit val srules' : production_rule list -> symbol val parse_tokens_after_filter : 'a entry -> Tok.t Stream.t -> 'a end module GrammarMake (L:LexerSig) : GrammarSig = struct include Grammar.GMake (L) type 'a entry = 'a Entry.e type internal_entry = Tok.t Gramext.g_entry type symbol = Tok.t Gramext.g_symbol type action = Gramext.g_action type production_rule = symbol list * action type single_extend_statment = string option * gram_assoc option * production_rule list type extend_statment = gram_position option * single_extend_statment list let action = Gramext.action let entry_create = Entry.create let entry_parse = Entry.parse IFDEF CAMLP5_6_02_1 THEN let entry_print x = Entry.print !Pp_control.std_ft x ELSE let entry_print = Entry.print END let srules' = Gramext.srules let parse_tokens_after_filter = Entry.parse_token end ELSE module type GrammarSig = sig include Camlp4.Sig.Grammar.Static with module Loc = Loc and type Token.t = Tok.t type 'a entry = 'a Entry.t type action = Action.t type parsable val parsable : char Stream.t -> parsable val action : 'a -> action val entry_create : string -> 'a entry val entry_parse : 'a entry -> parsable -> 'a val entry_print : 'a entry -> unit val srules' : production_rule list -> symbol end module GrammarMake (L:LexerSig) : GrammarSig = struct include Camlp4.Struct.Grammar.Static.Make (L) type 'a entry = 'a Entry.t type action = Action.t type parsable = char Stream.t let parsable s = s let action = Action.mk let entry_create = Entry.mk let entry_parse e s = parse e (*FIXME*)Loc.ghost s let entry_print x = Entry.print !Pp_control.std_ft x let srules' = srules (entry_create "dummy") end END (** Misc functional adjustments *) (** - The lexer produces streams made of pairs in camlp4 *) let get_tok = IFDEF CAMLP5 THEN fun x -> x ELSE fst END (** - Gram.extend is more currified in camlp5 than in camlp4 *) IFDEF CAMLP5 THEN let maybe_curry f x y = f (x,y) let maybe_uncurry f (x,y) = f x y ELSE let maybe_curry f = f let maybe_uncurry f = f END (** Compatibility with camlp5 strict mode *) IFDEF CAMLP5 THEN IFDEF STRICT THEN let vala x = Ploc.VaVal x ELSE let vala x = x END ELSE let vala x = x END (** Fix a quotation difference in [str_item] *) let declare_str_items loc l = IFDEF CAMLP5 THEN MLast.StDcl (loc, vala l) (* correspond to <:str_item< declare $list:l'$ end >> *) ELSE Ast.stSem_of_list l END (** Quotation difference for match clauses *) let default_patt loc = (<:patt< _ >>, vala None, <:expr< failwith "Extension: cannot occur" >>) IFDEF CAMLP5 THEN let make_fun loc cl = let l = cl @ [default_patt loc] in MLast.ExFun (loc, vala l) (* correspond to <:expr< fun [ $list:l$ ] >> *) ELSE let make_fun loc cl = let mk_when = function | Some w -> w | None -> Ast.ExNil loc in let mk_clause (patt,optwhen,expr) = (* correspond to <:match_case< ... when ... -> ... >> *) Ast.McArr (loc, patt, mk_when optwhen, expr) in let init = mk_clause (default_patt loc) in let add_clause x acc = Ast.McOr (loc, mk_clause x, acc) in let l = List.fold_right add_clause cl init in Ast.ExFun (loc,l) (* correspond to <:expr< fun [ $l$ ] >> *) END (** Explicit antiquotation $anti:... $ *) IFDEF CAMLP5 THEN let expl_anti loc e = <:expr< $anti:e$ >> ELSE let expl_anti _loc e = e (* FIXME: understand someday if we can do better *) END coq-8.4pl2/lib/option.ml0000640000175000001440000001121612010532755014240 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false | _ -> true exception IsNone (** [get x] returns [y] where [x] is [Some y]. It raises IsNone if [x] equals [None]. *) let get = function | Some y -> y | _ -> raise IsNone (** [make x] returns [Some x]. *) let make x = Some x (** [init b x] returns [Some x] if [b] is [true] and [None] otherwise. *) let init b x = if b then Some x else None (** [flatten x] is [Some y] if [x] is [Some (Some y)] and [None] otherwise. *) let flatten = function | Some (Some y) -> Some y | _ -> None (** {6 "Iterators"} ***) (** [iter f x] executes [f y] if [x] equals [Some y]. It does nothing otherwise. *) let iter f = function | Some y -> f y | _ -> () exception Heterogeneous (** [iter2 f x y] executes [f z w] if [x] equals [Some z] and [y] equals [Some w]. It does nothing if both [x] and [y] are [None]. And raises [Heterogeneous] otherwise. *) let iter2 f x y = match x,y with | Some z, Some w -> f z w | None,None -> () | _,_ -> raise Heterogeneous (** [map f x] is [None] if [x] is [None] and [Some (f y)] if [x] is [Some y]. *) let map f = function | Some y -> Some (f y) | _ -> None (** [smartmap f x] does the same as [map f x] except that it tries to share some memory. *) let smartmap f = function | Some y as x -> let y' = f y in if y' == y then x else Some y' | _ -> None (** [fold_left f a x] is [f a y] if [x] is [Some y], and [a] otherwise. *) let fold_left f a = function | Some y -> f a y | _ -> a (** [fold_left2 f a x y] is [f z w] if [x] is [Some z] and [y] is [Some w]. It is [a] if both [x] and [y] are [None]. Otherwise it raises [Heterogeneous]. *) let fold_left2 f a x y = match x,y with | Some x, Some y -> f a x y | None, None -> a | _ -> raise Heterogeneous (** [fold_right f x a] is [f y a] if [x] is [Some y], and [a] otherwise. *) let fold_right f x a = match x with | Some y -> f y a | _ -> a (** [fold_map f a x] is [a, f y] if [x] is [Some y], and [a] otherwise. *) let fold_map f a x = match x with | Some y -> let a, z = f a y in a, Some z | _ -> a, None (** [cata f a x] is [a] if [x] is [None] and [f y] if [x] is [Some y]. *) let cata f a = function | Some c -> f c | None -> a (** {6 More Specific operations} ***) (** [default a x] is [y] if [x] is [Some y] and [a] otherwise. *) let default a = function | Some y -> y | _ -> a (** [lift f x] is the same as [map f x]. *) let lift = map (** [lift_right f a x] is [Some (f a y)] if [x] is [Some y], and [None] otherwise. *) let lift_right f a = function | Some y -> Some (f a y) | _ -> None (** [lift_left f x a] is [Some (f y a)] if [x] is [Some y], and [None] otherwise. *) let lift_left f x a = match x with | Some y -> Some (f y a) | _ -> None (** [lift2 f x y] is [Some (f z w)] if [x] equals [Some z] and [y] equals [Some w]. It is [None] otherwise. *) let lift2 f x y = match x,y with | Some z, Some w -> Some (f z w) | _,_ -> None (** {6 Operations with Lists} *) module List = struct (** [List.cons x l] equals [y::l] if [x] is [Some y] and [l] otherwise. *) let cons x l = match x with | Some y -> y::l | _ -> l (** [List.flatten l] is the list of all the [y]s such that [l] contains [Some y] (in the same order). *) let rec flatten = function | x::l -> cons x (flatten l) | [] -> [] end (** {6 Miscelaneous Primitives} *) module Misc = struct (** [Misc.compare f x y] lifts the equality predicate [f] to option types. That is, if both [x] and [y] are [None] then it returns [true], if they are bothe [Some _] then [f] is called. Otherwise it returns [false]. *) let compare f x y = match x,y with | None, None -> true | Some z, Some w -> f z w | _,_ -> false end coq-8.4pl2/lib/gmapl.mli0000640000175000001440000000166512010532755014210 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ('a,'b) t -> bool val iter : ('a -> 'b list -> unit) -> ('a,'b) t -> unit val map : ('b list -> 'c list) -> ('a,'b) t -> ('a,'c) t val fold : ('a -> 'b list -> 'c -> 'c) -> ('a,'b) t -> 'c -> 'c val add : 'a -> 'b -> ('a,'b) t -> ('a,'b) t val find : 'a -> ('a,'b) t -> 'b list val remove : 'a -> 'b -> ('a,'b) t -> ('a,'b) t coq-8.4pl2/lib/profile.mli0000640000175000001440000001134612010532755014545 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val print_profile : unit -> unit val reset_profile : unit -> unit val init_profile : unit -> unit val declare_profile : string -> profile_key val profile1 : profile_key -> ('a -> 'b) -> 'a -> 'b val profile2 : profile_key -> ('a -> 'b -> 'c) -> 'a -> 'b -> 'c val profile3 : profile_key -> ('a -> 'b -> 'c -> 'd) -> 'a -> 'b -> 'c -> 'd val profile4 : profile_key -> ('a -> 'b -> 'c -> 'd -> 'e) -> 'a -> 'b -> 'c -> 'd -> 'e val profile5 : profile_key -> ('a -> 'b -> 'c -> 'd -> 'e -> 'f) -> 'a -> 'b -> 'c -> 'd -> 'e -> 'f val profile6 : profile_key -> ('a -> 'b -> 'c -> 'd -> 'e -> 'f -> 'g) -> 'a -> 'b -> 'c -> 'd -> 'e -> 'f -> 'g val profile7 : profile_key -> ('a -> 'b -> 'c -> 'd -> 'e -> 'f -> 'g -> 'h) -> 'a -> 'b -> 'c -> 'd -> 'e -> 'f -> 'g -> 'h (** Some utilities to compute the logical and physical sizes and depth of ML objects *) (** Print logical size (in words) and depth of its argument This function does not disturb the heap *) val print_logical_stats : 'a -> unit (** Print physical size, logical size (in words) and depth of its argument This function allocates itself a lot (the same order of magnitude as the physical size of its argument) *) val print_stats : 'a -> unit (** Return logical size (first for strings, then for not strings), (in words) and depth of its argument This function allocates itself a lot *) val obj_stats : 'a -> int * int * int (** Return physical size of its argument (string part and rest) This function allocates itself a lot *) val obj_shared_size : 'a -> int * int coq-8.4pl2/lib/bigint.ml0000640000175000001440000003712612010532755014214 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let rec aux j l n = if j=size then l else aux (j+1) (string_of_int (n mod 10) :: l) (n/10) in String.concat "" (aux 0 [] n) (* The base is 10^size *) let base = let rec exp10 = function 0 -> 1 | n -> 10 * exp10 (n-1) in exp10 size (******************************************************************) (* First, we represent all numbers by int arrays. Later, we will optimize the particular case of small integers *) (******************************************************************) module ArrayInt = struct (* Basic numbers *) let zero = [||] let neg_one = [|-1|] (* An array is canonical when - it is empty - it is [|-1|] - its first bloc is in [-base;-1[U]0;base[ and the other blocs are in [0;base[. *) let canonical n = let ok x = (0 <= x && x < base) in let rec ok_tail k = (k = 0) || (ok n.(k) && ok_tail (k-1)) in let ok_init x = (-base <= x && x < base && x <> -1 && x <> 0) in (n = [||]) || (n = [|-1|]) || (ok_init n.(0) && ok_tail (Array.length n - 1)) (* [normalize_pos] : removing initial blocks of 0 *) let normalize_pos n = let k = ref 0 in while !k < Array.length n & n.(!k) = 0 do incr k done; Array.sub n !k (Array.length n - !k) (* [normalize_neg] : avoid (-1) as first bloc. input: an array with -1 as first bloc and other blocs in [0;base[ output: a canonical array *) let normalize_neg n = let k = ref 1 in while !k < Array.length n & n.(!k) = base - 1 do incr k done; let n' = Array.sub n !k (Array.length n - !k) in if Array.length n' = 0 then [|-1|] else (n'.(0) <- n'.(0) - base; n') (* [normalize] : avoid 0 and (-1) as first bloc. input: an array with first bloc in [-base;base[ and others in [0;base[ output: a canonical array *) let rec normalize n = if Array.length n = 0 then n else if n.(0) = -1 then normalize_neg n else if n.(0) = 0 then normalize_pos n else n (* Opposite (expects and returns canonical arrays) *) let neg m = if m = zero then zero else let n = Array.copy m in let i = ref (Array.length m - 1) in while !i > 0 & n.(!i) = 0 do decr i done; if !i = 0 then begin n.(0) <- - n.(0); (* n.(0) cannot be 0 since m is canonical *) if n.(0) = -1 then normalize_neg n else if n.(0) = base then (n.(0) <- 0; Array.append [| 1 |] n) else n end else begin (* here n.(!i) <> 0, hence 0 < base - n.(!i) < base for n canonical *) n.(!i) <- base - n.(!i); decr i; while !i > 0 do n.(!i) <- base - 1 - n.(!i); decr i done; (* since -base <= n.(0) <= base-1, hence -base <= -n.(0)-1 <= base-1 *) n.(0) <- - n.(0) - 1; (* since m is canonical, m.(0)<>0 hence n.(0)<>-1, and m=-1 is already handled above, so here m.(0)<>-1 hence n.(0)<>0 *) n end let push_carry r j = let j = ref j in while !j > 0 & r.(!j) < 0 do r.(!j) <- r.(!j) + base; decr j; r.(!j) <- r.(!j) - 1 done; while !j > 0 & r.(!j) >= base do r.(!j) <- r.(!j) - base; decr j; r.(!j) <- r.(!j) + 1 done; (* here r.(0) could be in [-2*base;2*base-1] *) if r.(0) >= base then (r.(0) <- r.(0) - base; Array.append [| 1 |] r) else if r.(0) < -base then (r.(0) <- r.(0) + 2*base; Array.append [| -2 |] r) else normalize r (* in case r.(0) is 0 or -1 *) let add_to r a j = if a = zero then r else begin for i = Array.length r - 1 downto j+1 do r.(i) <- r.(i) + a.(i-j); if r.(i) >= base then (r.(i) <- r.(i) - base; r.(i-1) <- r.(i-1) + 1) done; r.(j) <- r.(j) + a.(0); push_carry r j end let add n m = let d = Array.length n - Array.length m in if d > 0 then add_to (Array.copy n) m d else add_to (Array.copy m) n (-d) let sub_to r a j = if a = zero then r else begin for i = Array.length r - 1 downto j+1 do r.(i) <- r.(i) - a.(i-j); if r.(i) < 0 then (r.(i) <- r.(i) + base; r.(i-1) <- r.(i-1) - 1) done; r.(j) <- r.(j) - a.(0); push_carry r j end let sub n m = let d = Array.length n - Array.length m in if d >= 0 then sub_to (Array.copy n) m d else let r = neg m in add_to r n (Array.length r - Array.length n) let rec mult m n = if m = zero or n = zero then zero else let l = Array.length m + Array.length n in let r = Array.create l 0 in for i = Array.length m - 1 downto 0 do for j = Array.length n - 1 downto 0 do let p = m.(i) * n.(j) + r.(i+j+1) in let (q,s) = if p < 0 then (p + 1) / base - 1, (p + 1) mod base + base - 1 else p / base, p mod base in r.(i+j+1) <- s; if q <> 0 then r.(i+j) <- r.(i+j) + q; done done; normalize r (* Comparisons *) let is_strictly_neg n = n<>[||] && n.(0) < 0 let is_strictly_pos n = n<>[||] && n.(0) > 0 let is_neg_or_zero n = n=[||] or n.(0) < 0 let is_pos_or_zero n = n=[||] or n.(0) > 0 let rec less_than_same_size m n i j = i < Array.length m && (m.(i) < n.(j) or (m.(i) = n.(j) && less_than_same_size m n (i+1) (j+1))) let less_than m n = if is_strictly_neg m then is_pos_or_zero n or Array.length m > Array.length n or (Array.length m = Array.length n && less_than_same_size m n 0 0) else is_strictly_pos n && (Array.length m < Array.length n or (Array.length m = Array.length n && less_than_same_size m n 0 0)) (* For this equality test it is critical that n and m are canonical *) let equal m n = (m = n) let less_than_shift_pos k m n = (Array.length m - k < Array.length n) or (Array.length m - k = Array.length n && less_than_same_size m n k 0) let rec can_divide k m d i = (i = Array.length d) or (m.(k+i) > d.(i)) or (m.(k+i) = d.(i) && can_divide k m d (i+1)) (* For two big nums m and d and a small number q, computes m - d * q * base^(|m|-|d|-k) in-place (in m). Both m d and q are positive. *) let sub_mult m d q k = if q <> 0 then for i = Array.length d - 1 downto 0 do let v = d.(i) * q in m.(k+i) <- m.(k+i) - v mod base; if m.(k+i) < 0 then (m.(k+i) <- m.(k+i) + base; m.(k+i-1) <- m.(k+i-1) -1); if v >= base then begin m.(k+i-1) <- m.(k+i-1) - v / base; let j = ref (i-1) in while m.(k + !j) < 0 do (* result is positive, hence !j remains >= 0 *) m.(k + !j) <- m.(k + !j) + base; decr j; m.(k + !j) <- m.(k + !j) -1 done end done (** Euclid division m/d = (q,r) This is the "Floor" variant, as with ocaml's / (but not as ocaml's Big_int.quomod_big_int). We have sign r = sign m *) let euclid m d = let isnegm, m = if is_strictly_neg m then (-1),neg m else 1,Array.copy m in let isnegd, d = if is_strictly_neg d then (-1),neg d else 1,d in if d = zero then raise Division_by_zero; let q,r = if less_than m d then (zero,m) else let ql = Array.length m - Array.length d in let q = Array.create (ql+1) 0 in let i = ref 0 in while not (less_than_shift_pos !i m d) do if m.(!i)=0 then incr i else if can_divide !i m d 0 then begin let v = if Array.length d > 1 && d.(0) <> m.(!i) then (m.(!i) * base + m.(!i+1)) / (d.(0) * base + d.(1) + 1) else m.(!i) / d.(0) in q.(!i) <- q.(!i) + v; sub_mult m d v !i end else begin let v = (m.(!i) * base + m.(!i+1)) / (d.(0) + 1) in q.(!i) <- q.(!i) + v / base; sub_mult m d (v / base) !i; q.(!i+1) <- q.(!i+1) + v mod base; if q.(!i+1) >= base then (q.(!i+1) <- q.(!i+1)-base; q.(!i) <- q.(!i)+1); sub_mult m d (v mod base) (!i+1) end done; (normalize q, normalize m) in (if isnegd * isnegm = -1 then neg q else q), (if isnegm = -1 then neg r else r) (* Parsing/printing ordinary 10-based numbers *) let of_string s = let len = String.length s in let isneg = len > 1 & s.[0] = '-' in let d = ref (if isneg then 1 else 0) in while !d < len && s.[!d] = '0' do incr d done; if !d = len then zero else let r = (len - !d) mod size in let h = String.sub s (!d) r in let e = if h<>"" then 1 else 0 in let l = (len - !d) / size in let a = Array.create (l + e) 0 in if e=1 then a.(0) <- int_of_string h; for i=1 to l do a.(i+e-1) <- int_of_string (String.sub s ((i-1)*size + !d + r) size) done; if isneg then neg a else a let to_string_pos sgn n = if Array.length n = 0 then "0" else sgn ^ String.concat "" (string_of_int n.(0) :: List.map format_size (List.tl (Array.to_list n))) let to_string n = if is_strictly_neg n then to_string_pos "-" (neg n) else to_string_pos "" n end (******************************************************************) (* Optimized operations on (unbounded) integer numbers *) (* integers smaller than base are represented as machine integers *) (******************************************************************) open ArrayInt type bigint = Obj.t (* Since base is the largest power of 10 such that base*base <= max_int, we have max_int < 100*base*base : any int can be represented by at most three blocs *) let small n = (-base <= n) && (n < base) let mkarray n = (* n isn't small, this case is handled separately below *) let lo = n mod base and hi = n / base in let t = if small hi then [|hi;lo|] else [|hi/base;hi mod base;lo|] in for i = Array.length t -1 downto 1 do if t.(i) < 0 then (t.(i) <- t.(i) + base; t.(i-1) <- t.(i-1) -1) done; t let ints_of_int n = if n = 0 then [| |] else if small n then [| n |] else mkarray n let of_int n = if small n then Obj.repr n else Obj.repr (mkarray n) let of_ints n = let n = normalize n in (* TODO: using normalize here seems redundant now *) if n = zero then Obj.repr 0 else if Array.length n = 1 then Obj.repr n.(0) else Obj.repr n let coerce_to_int = (Obj.magic : Obj.t -> int) let coerce_to_ints = (Obj.magic : Obj.t -> int array) let to_ints n = if Obj.is_int n then ints_of_int (coerce_to_int n) else coerce_to_ints n let int_of_ints = let maxi = mkarray max_int and mini = mkarray min_int in fun t -> let l = Array.length t in if (l > 3) || (l = 3 && (less_than maxi t || less_than t mini)) then failwith "Bigint.to_int: too large"; let sum = ref 0 in let pow = ref 1 in for i = l-1 downto 0 do sum := !sum + t.(i) * !pow; pow := !pow*base; done; !sum let to_int n = if Obj.is_int n then coerce_to_int n else int_of_ints (coerce_to_ints n) let app_pair f (m, n) = (f m, f n) let add m n = if Obj.is_int m & Obj.is_int n then of_int (coerce_to_int m + coerce_to_int n) else of_ints (add (to_ints m) (to_ints n)) let sub m n = if Obj.is_int m & Obj.is_int n then of_int (coerce_to_int m - coerce_to_int n) else of_ints (sub (to_ints m) (to_ints n)) let mult m n = if Obj.is_int m & Obj.is_int n then of_int (coerce_to_int m * coerce_to_int n) else of_ints (mult (to_ints m) (to_ints n)) let euclid m n = if Obj.is_int m & Obj.is_int n then app_pair of_int (coerce_to_int m / coerce_to_int n, coerce_to_int m mod coerce_to_int n) else app_pair of_ints (euclid (to_ints m) (to_ints n)) let less_than m n = if Obj.is_int m & Obj.is_int n then coerce_to_int m < coerce_to_int n else less_than (to_ints m) (to_ints n) let neg n = if Obj.is_int n then of_int (- (coerce_to_int n)) else of_ints (neg (to_ints n)) let of_string m = of_ints (of_string m) let to_string m = to_string (to_ints m) let zero = of_int 0 let one = of_int 1 let two = of_int 2 let sub_1 n = sub n one let add_1 n = add n one let mult_2 n = add n n let div2_with_rest n = let (q,b) = euclid n two in (q, b = one) let is_strictly_neg n = is_strictly_neg (to_ints n) let is_strictly_pos n = is_strictly_pos (to_ints n) let is_neg_or_zero n = is_neg_or_zero (to_ints n) let is_pos_or_zero n = is_pos_or_zero (to_ints n) let equal m n = (m = n) (* spiwack: computes n^m *) (* The basic idea of the algorithm is that n^(2m) = (n^2)^m *) (* In practice the algorithm performs : k*n^0 = k k*n^(2m) = k*(n*n)^m k*n^(2m+1) = (n*k)*(n*n)^m *) let pow = let rec pow_aux odd_rest n m = (* odd_rest is the k from above *) if m<=0 then odd_rest else let quo = m lsr 1 (* i.e. m/2 *) and odd = (m land 1) <> 0 in pow_aux (if odd then mult n odd_rest else odd_rest) (mult n n) quo in pow_aux one (** Testing suite w.r.t. OCaml's Big_int *) (* module B = struct open Big_int let zero = zero_big_int let to_string = string_of_big_int let of_string = big_int_of_string let add = add_big_int let opp = minus_big_int let sub = sub_big_int let mul = mult_big_int let abs = abs_big_int let sign = sign_big_int let euclid n m = let n' = abs n and m' = abs m in let q',r' = quomod_big_int n' m' in (if sign (mul n m) < 0 && sign q' <> 0 then opp q' else q'), (if sign n < 0 then opp r' else r') end let check () = let roots = [ 1; 100; base; 100*base; base*base ] in let rands = [ 1234; 5678; 12345678; 987654321 ] in let nums = (List.flatten (List.map (fun x -> [x-1;x;x+1]) roots)) @ rands in let numbers = List.map string_of_int nums @ List.map (fun n -> string_of_int (-n)) nums in let i = ref 0 in let compare op x y n n' = incr i; let s = Printf.sprintf "%30s" (to_string n) in let s' = Printf.sprintf "%30s" (B.to_string n') in if s <> s' then Printf.printf "%s%s%s: %s <> %s\n" x op y s s' in let test x y = let n = of_string x and m = of_string y in let n' = B.of_string x and m' = B.of_string y in let a = add n m and a' = B.add n' m' in let s = sub n m and s' = B.sub n' m' in let p = mult n m and p' = B.mul n' m' in let q,r = try euclid n m with Division_by_zero -> zero,zero and q',r' = try B.euclid n' m' with Division_by_zero -> B.zero, B.zero in compare "+" x y a a'; compare "-" x y s s'; compare "*" x y p p'; compare "/" x y q q'; compare "%" x y r r' in List.iter (fun a -> List.iter (test a) numbers) numbers; Printf.printf "%i tests done\n" !i *) coq-8.4pl2/lib/xml_parser.ml0000640000175000001440000001511412121620060015073 0ustar notinusers(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * Copyright (C) 2003 Jacques Garrigue * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) open Printf type xml = | Element of (string * (string * string) list * xml list) | PCData of string type error_pos = { eline : int; eline_start : int; emin : int; emax : int; } type error_msg = | UnterminatedComment | UnterminatedString | UnterminatedEntity | IdentExpected | CloseExpected | NodeExpected | AttributeNameExpected | AttributeValueExpected | EndOfTagExpected of string | EOFExpected | Empty type error = error_msg * error_pos exception Error of error exception File_not_found of string type t = { mutable check_eof : bool; mutable concat_pcdata : bool; } type source = | SFile of string | SChannel of in_channel | SString of string | SLexbuf of Lexing.lexbuf type state = { source : Lexing.lexbuf; stack : Xml_lexer.token Stack.t; xparser : t; } exception Internal_error of error_msg exception NoMoreData let xml_error = ref (fun _ -> assert false) let file_not_found = ref (fun _ -> assert false) let is_blank s = let len = String.length s in let break = ref true in let i = ref 0 in while !break && !i < len do let c = s.[!i] in (* no '\r' because we replaced them in the lexer *) if c = ' ' || c = '\n' || c = '\t' then incr i else break := false done; !i = len let _raises e f = xml_error := e; file_not_found := f let make () = { check_eof = true; concat_pcdata = true; } let check_eof p v = p.check_eof <- v let concat_pcdata p v = p.concat_pcdata <- v let pop s = try Stack.pop s.stack with Stack.Empty -> Xml_lexer.token s.source let push t s = Stack.push t s.stack let canonicalize l = let has_elt = List.exists (function Element _ -> true | _ -> false) l in if has_elt then List.filter (function PCData s -> not (is_blank s) | _ -> true) l else l let rec read_node s = match pop s with | Xml_lexer.PCData s -> PCData s | Xml_lexer.Tag (tag, attr, true) -> Element (tag, attr, []) | Xml_lexer.Tag (tag, attr, false) -> let elements = read_elems tag s in Element (tag, attr, canonicalize elements) | t -> push t s; raise NoMoreData and read_elems tag s = let elems = ref [] in (try while true do let node = read_node s in match node, !elems with | PCData c , (PCData c2) :: q -> elems := PCData (c2 ^ c) :: q | _, l -> elems := node :: l done with NoMoreData -> ()); match pop s with | Xml_lexer.Endtag s when s = tag -> List.rev !elems | t -> raise (Internal_error (EndOfTagExpected tag)) let rec read_xml s = let node = read_node s in match node with | Element _ -> node | PCData c -> if is_blank c then read_xml s else raise (Xml_lexer.Error Xml_lexer.ENodeExpected) let convert = function | Xml_lexer.EUnterminatedComment -> UnterminatedComment | Xml_lexer.EUnterminatedString -> UnterminatedString | Xml_lexer.EIdentExpected -> IdentExpected | Xml_lexer.ECloseExpected -> CloseExpected | Xml_lexer.ENodeExpected -> NodeExpected | Xml_lexer.EAttributeNameExpected -> AttributeNameExpected | Xml_lexer.EAttributeValueExpected -> AttributeValueExpected | Xml_lexer.EUnterminatedEntity -> UnterminatedEntity let error_of_exn stk = function | NoMoreData when Stack.pop stk = Xml_lexer.Eof -> Empty | NoMoreData -> NodeExpected | Internal_error e -> e | Xml_lexer.Error e -> convert e | e -> raise e let do_parse xparser source = let stk = Stack.create() in try Xml_lexer.init source; let s = { source = source; xparser = xparser; stack = stk } in let x = read_xml s in if xparser.check_eof && pop s <> Xml_lexer.Eof then raise (Internal_error EOFExpected); Xml_lexer.close source; x with e when e <> Sys.Break -> Xml_lexer.close source; raise (!xml_error (error_of_exn stk e) source) let parse p = function | SChannel ch -> do_parse p (Lexing.from_channel ch) | SString str -> do_parse p (Lexing.from_string str) | SLexbuf lex -> do_parse p lex | SFile fname -> let ch = (try open_in fname with Sys_error _ -> raise (!file_not_found fname)) in try let x = do_parse p (Lexing.from_channel ch) in close_in ch; x with reraise -> close_in ch; raise reraise let error_msg = function | UnterminatedComment -> "Unterminated comment" | UnterminatedString -> "Unterminated string" | UnterminatedEntity -> "Unterminated entity" | IdentExpected -> "Ident expected" | CloseExpected -> "Element close expected" | NodeExpected -> "Xml node expected" | AttributeNameExpected -> "Attribute name expected" | AttributeValueExpected -> "Attribute value expected" | EndOfTagExpected tag -> sprintf "End of tag expected : '%s'" tag | EOFExpected -> "End of file expected" | Empty -> "Empty" let error (msg,pos) = if pos.emin = pos.emax then sprintf "%s line %d character %d" (error_msg msg) pos.eline (pos.emin - pos.eline_start) else sprintf "%s line %d characters %d-%d" (error_msg msg) pos.eline (pos.emin - pos.eline_start) (pos.emax - pos.eline_start) let line e = e.eline let range e = e.emin - e.eline_start , e.emax - e.eline_start let abs_range e = e.emin , e.emax let pos source = let line, lstart, min, max = Xml_lexer.pos source in { eline = line; eline_start = lstart; emin = min; emax = max; } let () = _raises (fun x p -> (* local cast : Xml.error_msg -> error_msg *) Error (x, pos p)) (fun f -> File_not_found f) coq-8.4pl2/lib/heap.ml0000640000175000001440000001047212010532755013650 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> int end module type S =sig (* Type of functional heaps *) type t (* Type of elements *) type elt (* The empty heap *) val empty : t (* [add x h] returns a new heap containing the elements of [h], plus [x]; complexity $O(log(n))$ *) val add : elt -> t -> t (* [maximum h] returns the maximum element of [h]; raises [EmptyHeap] when [h] is empty; complexity $O(1)$ *) val maximum : t -> elt (* [remove h] returns a new heap containing the elements of [h], except the maximum of [h]; raises [EmptyHeap] when [h] is empty; complexity $O(log(n))$ *) val remove : t -> t (* usual iterators and combinators; elements are presented in arbitrary order *) val iter : (elt -> unit) -> t -> unit val fold : (elt -> 'a -> 'a) -> t -> 'a -> 'a end exception EmptyHeap (*s Functional implementation *) module Functional(X : Ordered) = struct (* Heaps are encoded as complete binary trees, i.e., binary trees which are full expect, may be, on the bottom level where it is filled from the left. These trees also enjoy the heap property, namely the value of any node is greater or equal than those of its left and right subtrees. There are 4 kinds of complete binary trees, denoted by 4 constructors: [FFF] for a full binary tree (and thus 2 full subtrees); [PPF] for a partial tree with a partial left subtree and a full right subtree; [PFF] for a partial tree with a full left subtree and a full right subtree (but of different heights); and [PFP] for a partial tree with a full left subtree and a partial right subtree. *) type t = | Empty | FFF of t * X.t * t (* full (full, full) *) | PPF of t * X.t * t (* partial (partial, full) *) | PFF of t * X.t * t (* partial (full, full) *) | PFP of t * X.t * t (* partial (full, partial) *) type elt = X.t let empty = Empty (* smart constructors for insertion *) let p_f l x r = match l with | Empty | FFF _ -> PFF (l, x, r) | _ -> PPF (l, x, r) let pf_ l x = function | Empty | FFF _ as r -> FFF (l, x, r) | r -> PFP (l, x, r) let rec add x = function | Empty -> FFF (Empty, x, Empty) (* insertion to the left *) | FFF (l, y, r) | PPF (l, y, r) -> if X.compare x y > 0 then p_f (add y l) x r else p_f (add x l) y r (* insertion to the right *) | PFF (l, y, r) | PFP (l, y, r) -> if X.compare x y > 0 then pf_ l x (add y r) else pf_ l y (add x r) let maximum = function | Empty -> raise EmptyHeap | FFF (_, x, _) | PPF (_, x, _) | PFF (_, x, _) | PFP (_, x, _) -> x (* smart constructors for removal; note that they are different from the ones for insertion! *) let p_f l x r = match l with | Empty | FFF _ -> FFF (l, x, r) | _ -> PPF (l, x, r) let pf_ l x = function | Empty | FFF _ as r -> PFF (l, x, r) | r -> PFP (l, x, r) let rec remove = function | Empty -> raise EmptyHeap | FFF (Empty, _, Empty) -> Empty | PFF (l, _, Empty) -> l (* remove on the left *) | PPF (l, x, r) | PFF (l, x, r) -> let xl = maximum l in let xr = maximum r in let l' = remove l in if X.compare xl xr >= 0 then p_f l' xl r else p_f l' xr (add xl (remove r)) (* remove on the right *) | FFF (l, x, r) | PFP (l, x, r) -> let xl = maximum l in let xr = maximum r in let r' = remove r in if X.compare xl xr > 0 then pf_ (add xr (remove l)) xl r' else pf_ l xr r' let rec iter f = function | Empty -> () | FFF (l, x, r) | PPF (l, x, r) | PFF (l, x, r) | PFP (l, x, r) -> iter f l; f x; iter f r let rec fold f h x0 = match h with | Empty -> x0 | FFF (l, x, r) | PPF (l, x, r) | PFF (l, x, r) | PFP (l, x, r) -> fold f l (fold f r (f x x0)) end coq-8.4pl2/lib/util.ml0000640000175000001440000012161511741766763013734 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = 'a' && c <= 'z') or (c >= 'A' && c <= 'Z') let is_digit c = (c >= '0' && c <= '9') let is_ident_tail c = is_letter c or is_digit c or c = '\'' or c = '_' let is_blank = function | ' ' | '\r' | '\t' | '\n' -> true | _ -> false (* Strings *) let explode s = let rec explode_rec n = if n >= String.length s then [] else String.make 1 (String.get s n) :: explode_rec (succ n) in explode_rec 0 let implode sl = String.concat "" sl let strip s = let n = String.length s in let rec lstrip_rec i = if i < n && is_blank s.[i] then lstrip_rec (i+1) else i in let rec rstrip_rec i = if i >= 0 && is_blank s.[i] then rstrip_rec (i-1) else i in let a = lstrip_rec 0 and b = rstrip_rec (n-1) in String.sub s a (b-a+1) let drop_simple_quotes s = let n = String.length s in if n > 2 & s.[0] = '\'' & s.[n-1] = '\'' then String.sub s 1 (n-2) else s (* substring searching... *) (* gdzie = where, co = what *) (* gdzie=gdzie(string) gl=gdzie(length) gi=gdzie(index) *) let rec is_sub gdzie gl gi co cl ci = (ci>=cl) || ((String.unsafe_get gdzie gi = String.unsafe_get co ci) && (is_sub gdzie gl (gi+1) co cl (ci+1))) let rec raw_str_index i gdzie l c co cl = (* First adapt to ocaml 3.11 new semantics of index_from *) if (i+cl > l) then raise Not_found; (* Then proceed as in ocaml < 3.11 *) let i' = String.index_from gdzie i c in if (i'+cl <= l) && (is_sub gdzie l i' co cl 0) then i' else raw_str_index (i'+1) gdzie l c co cl let string_index_from gdzie i co = if co="" then i else raw_str_index i gdzie (String.length gdzie) (String.unsafe_get co 0) co (String.length co) let string_string_contains ~where ~what = try let _ = string_index_from where 0 what in true with Not_found -> false let plural n s = if n<>1 then s^"s" else s let ordinal n = let s = match n mod 10 with 1 -> "st" | 2 -> "nd" | 3 -> "rd" | _ -> "th" in string_of_int n ^ s (* string parsing *) let split_string_at c s = let len = String.length s in let rec split n = try let pos = String.index_from s n c in let dir = String.sub s n (pos-n) in dir :: split (succ pos) with | Not_found -> [String.sub s n (len-n)] in if len = 0 then [] else split 0 let parse_loadpath s = let l = split_string_at '/' s in if List.mem "" l then invalid_arg "parse_loadpath: find an empty dir in loadpath"; l module Stringset = Set.Make(struct type t = string let compare = compare end) module Stringmap = Map.Make(struct type t = string let compare = compare end) type utf8_status = UnicodeLetter | UnicodeIdentPart | UnicodeSymbol exception UnsupportedUtf8 (* The following table stores classes of Unicode characters that are used by the lexer. There are 3 different classes so 2 bits are allocated for each character. We only use 16 bits over the 31 bits to simplify the masking process. (This choice seems to be a good trade-off between speed and space after some benchmarks.) *) (* A 256ko table, initially filled with zeros. *) let table = Array.create (1 lsl 17) 0 (* Associate a 2-bit pattern to each status at position [i]. Only the 3 lowest bits of [i] are taken into account to define the position of the pattern in the word. Notice that pattern "00" means "undefined". *) let mask i = function | UnicodeLetter -> 1 lsl ((i land 7) lsl 1) (* 01 *) | UnicodeIdentPart -> 2 lsl ((i land 7) lsl 1) (* 10 *) | UnicodeSymbol -> 3 lsl ((i land 7) lsl 1) (* 11 *) (* Helper to reset 2 bits in a word. *) let reset_mask i = lnot (3 lsl ((i land 7) lsl 1)) (* Initialize the lookup table from a list of segments, assigning a status to every character of each segment. The order of these assignments is relevant: it is possible to assign status [s] to a segment [(c1, c2)] and later assign [s'] to [c] even if [c] is between [c1] and [c2]. *) let mk_lookup_table_from_unicode_tables_for status tables = List.iter (List.iter (fun (c1, c2) -> for i = c1 to c2 do table.(i lsr 3) <- (table.(i lsr 3) land (reset_mask i)) lor (mask i status) done)) tables (* Look up into the table and interpret the found pattern. *) let lookup x = let v = (table.(x lsr 3) lsr ((x land 7) lsl 1)) land 3 in if v = 1 then UnicodeLetter else if v = 2 then UnicodeIdentPart else if v = 3 then UnicodeSymbol else raise UnsupportedUtf8 (* [classify_unicode] discriminates between 3 different kinds of symbols based on the standard unicode classification (extracted from Camomile). *) let classify_unicode = let single c = [ (c, c) ] in (* General tables. *) mk_lookup_table_from_unicode_tables_for UnicodeSymbol [ Unicodetable.sm; (* Symbol, maths. *) Unicodetable.sc; (* Symbol, currency. *) Unicodetable.so; (* Symbol, modifier. *) Unicodetable.pd; (* Punctation, dash. *) Unicodetable.pc; (* Punctation, connector. *) Unicodetable.pe; (* Punctation, open. *) Unicodetable.ps; (* Punctation, close. *) Unicodetable.pi; (* Punctation, initial quote. *) Unicodetable.pf; (* Punctation, final quote. *) Unicodetable.po; (* Punctation, other. *) ]; mk_lookup_table_from_unicode_tables_for UnicodeLetter [ Unicodetable.lu; (* Letter, uppercase. *) Unicodetable.ll; (* Letter, lowercase. *) Unicodetable.lt; (* Letter, titlecase. *) Unicodetable.lo; (* Letter, others. *) ]; mk_lookup_table_from_unicode_tables_for UnicodeIdentPart [ Unicodetable.nd; (* Number, decimal digits. *) Unicodetable.nl; (* Number, letter. *) Unicodetable.no; (* Number, other. *) ]; (* Exceptions (from a previous version of this function). *) mk_lookup_table_from_unicode_tables_for UnicodeSymbol [ single 0x000B2; (* Squared. *) single 0x0002E; (* Dot. *) ]; mk_lookup_table_from_unicode_tables_for UnicodeLetter [ single 0x005F; (* Underscore. *) single 0x00A0; (* Non breaking space. *) ]; mk_lookup_table_from_unicode_tables_for UnicodeIdentPart [ single 0x0027; (* Special space. *) ]; (* Lookup *) lookup exception End_of_input let utf8_of_unicode n = if n < 128 then String.make 1 (Char.chr n) else if n < 2048 then let s = String.make 2 (Char.chr (128 + n mod 64)) in begin s.[0] <- Char.chr (192 + n / 64); s end else if n < 65536 then let s = String.make 3 (Char.chr (128 + n mod 64)) in begin s.[1] <- Char.chr (128 + (n / 64) mod 64); s.[0] <- Char.chr (224 + n / 4096); s end else let s = String.make 4 (Char.chr (128 + n mod 64)) in begin s.[2] <- Char.chr (128 + (n / 64) mod 64); s.[1] <- Char.chr (128 + (n / 4096) mod 64); s.[0] <- Char.chr (240 + n / 262144); s end let next_utf8 s i = let err () = invalid_arg "utf8" in let l = String.length s - i in if l = 0 then raise End_of_input else let a = Char.code s.[i] in if a <= 0x7F then 1, a else if a land 0x40 = 0 or l = 1 then err () else let b = Char.code s.[i+1] in if b land 0xC0 <> 0x80 then err () else if a land 0x20 = 0 then 2, (a land 0x1F) lsl 6 + (b land 0x3F) else if l = 2 then err () else let c = Char.code s.[i+2] in if c land 0xC0 <> 0x80 then err () else if a land 0x10 = 0 then 3, (a land 0x0F) lsl 12 + (b land 0x3F) lsl 6 + (c land 0x3F) else if l = 3 then err () else let d = Char.code s.[i+3] in if d land 0xC0 <> 0x80 then err () else if a land 0x08 = 0 then 4, (a land 0x07) lsl 18 + (b land 0x3F) lsl 12 + (c land 0x3F) lsl 6 + (d land 0x3F) else err () (* Check the well-formedness of an identifier *) let check_initial handle j n s = match classify_unicode n with | UnicodeLetter -> () | _ -> let c = String.sub s 0 j in handle ("Invalid character '"^c^"' at beginning of identifier \""^s^"\".") let check_trailing handle i j n s = match classify_unicode n with | UnicodeLetter | UnicodeIdentPart -> () | _ -> let c = String.sub s i j in handle ("Invalid character '"^c^"' in identifier \""^s^"\".") let check_ident_gen handle s = let i = ref 0 in if s <> ".." then try let j, n = next_utf8 s 0 in check_initial handle j n s; i := !i + j; try while true do let j, n = next_utf8 s !i in check_trailing handle !i j n s; i := !i + j done with End_of_input -> () with | End_of_input -> error "The empty string is not an identifier." | UnsupportedUtf8 -> error (s^": unsupported character in utf8 sequence.") | Invalid_argument _ -> error (s^": invalid utf8 sequence.") let check_ident_soft = check_ident_gen warning let check_ident = check_ident_gen error let lowercase_unicode = let tree = Segmenttree.make Unicodetable.to_lower in fun unicode -> try match Segmenttree.lookup unicode tree with | `Abs c -> c | `Delta d -> unicode + d with Not_found -> unicode let lowercase_first_char_utf8 s = assert (s <> ""); let j, n = next_utf8 s 0 in utf8_of_unicode (lowercase_unicode n) (** For extraction, we need to encode unicode character into ascii ones *) let ascii_of_ident s = let check_ascii s = let ok = ref true in String.iter (fun c -> if Char.code c >= 128 then ok := false) s; !ok in if check_ascii s then s else let i = ref 0 and out = ref "" in begin try while true do let j, n = next_utf8 s !i in out := if n >= 128 then Printf.sprintf "%s__U%04x_" !out n else Printf.sprintf "%s%c" !out s.[!i]; i := !i + j done with End_of_input -> () end; !out (* Lists *) let rec list_compare cmp l1 l2 = match l1,l2 with [], [] -> 0 | _::_, [] -> 1 | [], _::_ -> -1 | x1::l1, x2::l2 -> (match cmp x1 x2 with | 0 -> list_compare cmp l1 l2 | c -> c) let rec list_equal cmp l1 l2 = match l1, l2 with | [], [] -> true | x1 :: l1, x2 :: l2 -> cmp x1 x2 && list_equal cmp l1 l2 | _ -> false let list_intersect l1 l2 = List.filter (fun x -> List.mem x l2) l1 let list_union l1 l2 = let rec urec = function | [] -> l2 | a::l -> if List.mem a l2 then urec l else a::urec l in urec l1 let list_unionq l1 l2 = let rec urec = function | [] -> l2 | a::l -> if List.memq a l2 then urec l else a::urec l in urec l1 let list_subtract l1 l2 = if l2 = [] then l1 else List.filter (fun x -> not (List.mem x l2)) l1 let list_subtractq l1 l2 = if l2 = [] then l1 else List.filter (fun x -> not (List.memq x l2)) l1 let list_tabulate f len = let rec tabrec n = if n = len then [] else (f n)::(tabrec (n+1)) in tabrec 0 let list_addn n v = let rec aux n l = if n = 0 then l else aux (pred n) (v::l) in if n < 0 then invalid_arg "list_addn" else aux n let list_make n v = list_addn n v [] let list_assign l n e = let rec assrec stk = function | ((h::t), 0) -> List.rev_append stk (e::t) | ((h::t), n) -> assrec (h::stk) (t, n-1) | ([], _) -> failwith "list_assign" in assrec [] (l,n) let rec list_smartmap f l = match l with [] -> l | h::tl -> let h' = f h and tl' = list_smartmap f tl in if h'==h && tl'==tl then l else h'::tl' let list_map_left f = (* ensures the order in case of side-effects *) let rec map_rec = function | [] -> [] | x::l -> let v = f x in v :: map_rec l in map_rec let list_map_i f = let rec map_i_rec i = function | [] -> [] | x::l -> let v = f i x in v :: map_i_rec (i+1) l in map_i_rec let list_map2_i f i l1 l2 = let rec map_i i = function | ([], []) -> [] | ((h1::t1), (h2::t2)) -> let v = f i h1 h2 in v :: map_i (succ i) (t1,t2) | (_, _) -> invalid_arg "map2_i" in map_i i (l1,l2) let list_map3 f l1 l2 l3 = let rec map = function | ([], [], []) -> [] | ((h1::t1), (h2::t2), (h3::t3)) -> let v = f h1 h2 h3 in v::map (t1,t2,t3) | (_, _, _) -> invalid_arg "map3" in map (l1,l2,l3) let list_map4 f l1 l2 l3 l4 = let rec map = function | ([], [], [], []) -> [] | ((h1::t1), (h2::t2), (h3::t3), (h4::t4)) -> let v = f h1 h2 h3 h4 in v::map (t1,t2,t3,t4) | (_, _, _, _) -> invalid_arg "map4" in map (l1,l2,l3,l4) let list_map_to_array f l = Array.of_list (List.map f l) let rec list_smartfilter f l = match l with [] -> l | h::tl -> let tl' = list_smartfilter f tl in if f h then if tl' == tl then l else h :: tl' else tl' let list_index_f f x = let rec index_x n = function | y::l -> if f x y then n else index_x (succ n) l | [] -> raise Not_found in index_x 1 let list_index0_f f x l = list_index_f f x l - 1 let list_index x = let rec index_x n = function | y::l -> if x = y then n else index_x (succ n) l | [] -> raise Not_found in index_x 1 let list_index0 x l = list_index x l - 1 let list_unique_index x = let rec index_x n = function | y::l -> if x = y then if List.mem x l then raise Not_found else n else index_x (succ n) l | [] -> raise Not_found in index_x 1 let list_fold_right_i f i l = let rec it_list_f i l a = match l with | [] -> a | b::l -> f (i-1) b (it_list_f (i-1) l a) in it_list_f (List.length l + i) l let list_fold_left_i f = let rec it_list_f i a = function | [] -> a | b::l -> it_list_f (i+1) (f i a b) l in it_list_f let rec list_fold_left3 f accu l1 l2 l3 = match (l1, l2, l3) with ([], [], []) -> accu | (a1::l1, a2::l2, a3::l3) -> list_fold_left3 f (f accu a1 a2 a3) l1 l2 l3 | (_, _, _) -> invalid_arg "list_fold_left3" (* [list_fold_right_and_left f [a1;...;an] hd = f (f (... (f (f hd an [an-1;...;a1]) an-1 [an-2;...;a1]) ...) a2 [a1]) a1 []] *) let rec list_fold_right_and_left f l hd = let rec aux tl = function | [] -> hd | a::l -> let hd = aux (a::tl) l in f hd a tl in aux [] l let list_iter3 f l1 l2 l3 = let rec iter = function | ([], [], []) -> () | ((h1::t1), (h2::t2), (h3::t3)) -> f h1 h2 h3; iter (t1,t2,t3) | (_, _, _) -> invalid_arg "map3" in iter (l1,l2,l3) let list_iter_i f l = list_fold_left_i (fun i _ x -> f i x) 0 () l let list_for_all_i p = let rec for_all_p i = function | [] -> true | a::l -> p i a && for_all_p (i+1) l in for_all_p let list_except x l = List.filter (fun y -> not (x = y)) l let list_remove = list_except (* Alias *) let rec list_remove_first a = function | b::l when a = b -> l | b::l -> b::list_remove_first a l | [] -> raise Not_found let rec list_remove_assoc_in_triple x = function | [] -> [] | (y,_,_ as z)::l -> if x = y then l else z::list_remove_assoc_in_triple x l let rec list_assoc_snd_in_triple x = function [] -> raise Not_found | (a,b,_)::l -> if compare a x = 0 then b else list_assoc_snd_in_triple x l let list_add_set x l = if List.mem x l then l else x::l let list_eq_set l1 l2 = let rec aux l1 = function | [] -> l1 = [] | a::l2 -> aux (list_remove_first a l1) l2 in try aux l1 l2 with Not_found -> false let list_for_all2eq f l1 l2 = try List.for_all2 f l1 l2 with Invalid_argument _ -> false let list_filter_i p = let rec filter_i_rec i = function | [] -> [] | x::l -> let l' = filter_i_rec (succ i) l in if p i x then x::l' else l' in filter_i_rec 0 let rec list_sep_last = function | [] -> failwith "sep_last" | hd::[] -> (hd,[]) | hd::tl -> let (l,tl) = list_sep_last tl in (l,hd::tl) let list_try_find_i f = let rec try_find_f n = function | [] -> failwith "try_find_i" | h::t -> try f n h with Failure _ -> try_find_f (n+1) t in try_find_f let list_try_find f = let rec try_find_f = function | [] -> failwith "try_find" | h::t -> try f h with Failure _ -> try_find_f t in try_find_f let list_uniquize l = let visited = Hashtbl.create 23 in let rec aux acc = function | h::t -> if Hashtbl.mem visited h then aux acc t else begin Hashtbl.add visited h h; aux (h::acc) t end | [] -> List.rev acc in aux [] l let rec list_distinct l = let visited = Hashtbl.create 23 in let rec loop = function | h::t -> if Hashtbl.mem visited h then false else begin Hashtbl.add visited h h; loop t end | [] -> true in loop l let rec list_merge_uniq cmp l1 l2 = match l1, l2 with | [], l2 -> l2 | l1, [] -> l1 | h1 :: t1, h2 :: t2 -> let c = cmp h1 h2 in if c = 0 then h1 :: list_merge_uniq cmp t1 t2 else if c <= 0 then h1 :: list_merge_uniq cmp t1 l2 else h2 :: list_merge_uniq cmp l1 t2 let rec list_duplicates = function | [] -> [] | x::l -> let l' = list_duplicates l in if List.mem x l then list_add_set x l' else l' let rec list_filter2 f = function | [], [] as p -> p | d::dp, l::lp -> let (dp',lp' as p) = list_filter2 f (dp,lp) in if f d l then d::dp', l::lp' else p | _ -> invalid_arg "list_filter2" let rec list_map_filter f = function | [] -> [] | x::l -> let l' = list_map_filter f l in match f x with None -> l' | Some y -> y::l' let list_map_filter_i f = let rec aux i = function | [] -> [] | x::l -> let l' = aux (succ i) l in match f i x with None -> l' | Some y -> y::l' in aux 0 let list_filter_along f filter l = snd (list_filter2 (fun b c -> f b) (filter,l)) let list_filter_with filter l = list_filter_along (fun x -> x) filter l let list_subset l1 l2 = let t2 = Hashtbl.create 151 in List.iter (fun x -> Hashtbl.add t2 x ()) l2; let rec look = function | [] -> true | x::ll -> try Hashtbl.find t2 x; look ll with Not_found -> false in look l1 (* [list_chop i l] splits [l] into two lists [(l1,l2)] such that [l1++l2=l] and [l1] has length [i]. It raises [Failure] when [i] is negative or greater than the length of [l] *) let list_chop n l = let rec chop_aux i acc = function | tl when i=0 -> (List.rev acc, tl) | h::t -> chop_aux (pred i) (h::acc) t | [] -> failwith "list_chop" in chop_aux n [] l (* [list_split_when p l] splits [l] into two lists [(l1,a::l2)] such that [l1++(a::l2)=l], [p a=true] and [p b = false] for every element [b] of [l1]. If there is no such [a], then it returns [(l,[])] instead *) let list_split_when p = let rec split_when_loop x y = match y with | [] -> (List.rev x,[]) | (a::l) -> if (p a) then (List.rev x,y) else split_when_loop (a::x) l in split_when_loop [] (* [list_split_by p l] splits [l] into two lists [(l1,l2)] such that elements of [l1] satisfy [p] and elements of [l2] do not; order is preserved *) let list_split_by p = let rec split_by_loop = function | [] -> ([],[]) | a::l -> let (l1,l2) = split_by_loop l in if p a then (a::l1,l2) else (l1,a::l2) in split_by_loop let rec list_split3 = function | [] -> ([], [], []) | (x,y,z)::l -> let (rx, ry, rz) = list_split3 l in (x::rx, y::ry, z::rz) let rec list_insert_in_class f a = function | [] -> [[a]] | (b::_ as l)::classes when f a b -> (a::l)::classes | l::classes -> l :: list_insert_in_class f a classes let list_partition_by f l = List.fold_right (list_insert_in_class f) l [] let list_firstn n l = let rec aux acc = function | (0, l) -> List.rev acc | (n, (h::t)) -> aux (h::acc) (pred n, t) | _ -> failwith "firstn" in aux [] (n,l) let rec list_last = function | [] -> failwith "list_last" | [x] -> x | _ :: l -> list_last l let list_lastn n l = let len = List.length l in let rec aux m l = if m = n then l else aux (m - 1) (List.tl l) in if len < n then failwith "lastn" else aux len l let rec list_skipn n l = match n,l with | 0, _ -> l | _, [] -> failwith "list_skipn" | n, _::l -> list_skipn (pred n) l let rec list_skipn_at_least n l = try list_skipn n l with Failure _ -> [] let list_prefix_of prefl l = let rec prefrec = function | (h1::t1, h2::t2) -> h1 = h2 && prefrec (t1,t2) | ([], _) -> true | (_, _) -> false in prefrec (prefl,l) let list_drop_prefix p l = (* if l=p++t then return t else l *) let rec list_drop_prefix_rec = function | ([], tl) -> Some tl | (_, []) -> None | (h1::tp, h2::tl) -> if h1 = h2 then list_drop_prefix_rec (tp,tl) else None in match list_drop_prefix_rec (p,l) with | Some r -> r | None -> l let list_map_append f l = List.flatten (List.map f l) let list_join_map = list_map_append (* Alias *) let list_map_append2 f l1 l2 = List.flatten (List.map2 f l1 l2) let list_share_tails l1 l2 = let rec shr_rev acc = function | ((x1::l1), (x2::l2)) when x1 == x2 -> shr_rev (x1::acc) (l1,l2) | (l1,l2) -> (List.rev l1, List.rev l2, acc) in shr_rev [] (List.rev l1, List.rev l2) let rec list_fold_map f e = function | [] -> (e,[]) | h::t -> let e',h' = f e h in let e'',t' = list_fold_map f e' t in e'',h'::t' (* (* tail-recursive version of the above function *) let list_fold_map f e l = let g (e,b') h = let (e',h') = f e h in (e',h'::b') in let (e',lrev) = List.fold_left g (e,[]) l in (e',List.rev lrev) *) (* The same, based on fold_right, with the effect accumulated on the right *) let list_fold_map' f l e = List.fold_right (fun x (l,e) -> let (y,e) = f x e in (y::l,e)) l ([],e) let list_map_assoc f = List.map (fun (x,a) -> (x,f a)) let rec list_assoc_f f a = function | (x, e) :: xs -> if f a x then e else list_assoc_f f a xs | [] -> raise Not_found (* Specification: - =p= is set equality (double inclusion) - f such that \forall l acc, (f l acc) =p= append (f l []) acc - let g = fun x -> f x [] in - union_map f l acc =p= append (flatten (map g l)) acc *) let list_union_map f l acc = List.fold_left (fun x y -> f y x) acc l (* A generic cartesian product: for any operator (**), [list_cartesian (**) [x1;x2] [y1;y2] = [x1**y1; x1**y2; x2**y1; x2**y1]], and so on if there are more elements in the lists. *) let rec list_cartesian op l1 l2 = list_map_append (fun x -> List.map (op x) l2) l1 (* [list_cartesians] is an n-ary cartesian product: it iterates [list_cartesian] over a list of lists. *) let list_cartesians op init ll = List.fold_right (list_cartesian op) ll [init] (* list_combinations [[a;b];[c;d]] gives [[a;c];[a;d];[b;c];[b;d]] *) let list_combinations l = list_cartesians (fun x l -> x::l) [] l let rec list_combine3 x y z = match x, y, z with | [], [], [] -> [] | (x :: xs), (y :: ys), (z :: zs) -> (x, y, z) :: list_combine3 xs ys zs | _, _, _ -> raise (Invalid_argument "list_combine3") (* Keep only those products that do not return None *) let rec list_cartesian_filter op l1 l2 = list_map_append (fun x -> list_map_filter (op x) l2) l1 (* Keep only those products that do not return None *) let rec list_cartesians_filter op init ll = List.fold_right (list_cartesian_filter op) ll [init] (* Drop the last element of a list *) let rec list_drop_last = function [] -> assert false | hd :: [] -> [] | hd :: tl -> hd :: list_drop_last tl (* Factorize lists of pairs according to the left argument *) let rec list_factorize_left = function | (a,b)::l -> let al,l' = list_split_by (fun (a',b) -> a=a') l in (a,(b::List.map snd al)) :: list_factorize_left l' | [] -> [] (* Arrays *) let array_compare item_cmp v1 v2 = let c = compare (Array.length v1) (Array.length v2) in if c<>0 then c else let rec cmp = function -1 -> 0 | i -> let c' = item_cmp v1.(i) v2.(i) in if c'<>0 then c' else cmp (i-1) in cmp (Array.length v1 - 1) let array_equal cmp t1 t2 = Array.length t1 = Array.length t2 && let rec aux i = (i = Array.length t1) || (cmp t1.(i) t2.(i) && aux (i + 1)) in aux 0 let array_exists f v = let rec exrec = function | -1 -> false | n -> (f v.(n)) || (exrec (n-1)) in exrec ((Array.length v)-1) let array_for_all f v = let rec allrec = function | -1 -> true | n -> (f v.(n)) && (allrec (n-1)) in allrec ((Array.length v)-1) let array_for_all2 f v1 v2 = let rec allrec = function | -1 -> true | n -> (f v1.(n) v2.(n)) && (allrec (n-1)) in let lv1 = Array.length v1 in lv1 = Array.length v2 && allrec (pred lv1) let array_for_all3 f v1 v2 v3 = let rec allrec = function | -1 -> true | n -> (f v1.(n) v2.(n) v3.(n)) && (allrec (n-1)) in let lv1 = Array.length v1 in lv1 = Array.length v2 && lv1 = Array.length v3 && allrec (pred lv1) let array_for_all4 f v1 v2 v3 v4 = let rec allrec = function | -1 -> true | n -> (f v1.(n) v2.(n) v3.(n) v4.(n)) && (allrec (n-1)) in let lv1 = Array.length v1 in lv1 = Array.length v2 && lv1 = Array.length v3 && lv1 = Array.length v4 && allrec (pred lv1) let array_for_all_i f i v = let rec allrec i n = n = Array.length v || f i v.(n) && allrec (i+1) (n+1) in allrec i 0 exception Found of int let array_find_i (pred: int -> 'a -> bool) (arr: 'a array) : int option = try for i=0 to Array.length arr - 1 do if pred i (arr.(i)) then raise (Found i) done; None with Found i -> Some i let array_hd v = match Array.length v with | 0 -> failwith "array_hd" | _ -> v.(0) let array_tl v = match Array.length v with | 0 -> failwith "array_tl" | n -> Array.sub v 1 (pred n) let array_last v = match Array.length v with | 0 -> failwith "array_last" | n -> v.(pred n) let array_cons e v = Array.append [|e|] v let array_rev t = let n=Array.length t in if n <=0 then () else let tmp=ref t.(0) in for i=0 to pred (n/2) do tmp:=t.((pred n)-i); t.((pred n)-i)<- t.(i); t.(i)<- !tmp done let array_fold_right_i f v a = let rec fold a n = if n=0 then a else let k = n-1 in fold (f k v.(k) a) k in fold a (Array.length v) let array_fold_left_i f v a = let n = Array.length a in let rec fold i v = if i = n then v else fold (succ i) (f i v a.(i)) in fold 0 v let array_fold_right2 f v1 v2 a = let lv1 = Array.length v1 in let rec fold a n = if n=0 then a else let k = n-1 in fold (f v1.(k) v2.(k) a) k in if Array.length v2 <> lv1 then invalid_arg "array_fold_right2"; fold a lv1 let array_fold_left2 f a v1 v2 = let lv1 = Array.length v1 in let rec fold a n = if n >= lv1 then a else fold (f a v1.(n) v2.(n)) (succ n) in if Array.length v2 <> lv1 then invalid_arg "array_fold_left2"; fold a 0 let array_fold_left2_i f a v1 v2 = let lv1 = Array.length v1 in let rec fold a n = if n >= lv1 then a else fold (f n a v1.(n) v2.(n)) (succ n) in if Array.length v2 <> lv1 then invalid_arg "array_fold_left2"; fold a 0 let array_fold_left3 f a v1 v2 v3 = let lv1 = Array.length v1 in let rec fold a n = if n >= lv1 then a else fold (f a v1.(n) v2.(n) v3.(n)) (succ n) in if Array.length v2 <> lv1 || Array.length v3 <> lv1 then invalid_arg "array_fold_left2"; fold a 0 let array_fold_left_from n f a v = let rec fold a n = if n >= Array.length v then a else fold (f a v.(n)) (succ n) in fold a n let array_fold_right_from n f v a = let rec fold n = if n >= Array.length v then a else f v.(n) (fold (succ n)) in fold n let array_app_tl v l = if Array.length v = 0 then invalid_arg "array_app_tl"; array_fold_right_from 1 (fun e l -> e::l) v l let array_list_of_tl v = if Array.length v = 0 then invalid_arg "array_list_of_tl"; array_fold_right_from 1 (fun e l -> e::l) v [] let array_map_to_list f v = List.map f (Array.to_list v) let array_chop n v = let vlen = Array.length v in if n > vlen then failwith "array_chop"; (Array.sub v 0 n, Array.sub v n (vlen-n)) exception Local of int (* If none of the elements is changed by f we return ar itself. The for loop looks for the first such an element. If found it is temporarily stored in a ref and the new array is produced, but f is not re-applied to elements that are already checked *) let array_smartmap f ar = let ar_size = Array.length ar in let aux = ref None in try for i = 0 to ar_size-1 do let a = ar.(i) in let a' = f a in if a != a' then (* pointer (in)equality *) begin aux := Some a'; raise (Local i) end done; ar with Local i -> let copy j = if j a' | None -> failwith "Error" else f (ar.(j)) in Array.init ar_size copy let array_map2 f v1 v2 = if Array.length v1 <> Array.length v2 then invalid_arg "array_map2"; if Array.length v1 == 0 then [| |] else begin let res = Array.create (Array.length v1) (f v1.(0) v2.(0)) in for i = 1 to pred (Array.length v1) do res.(i) <- f v1.(i) v2.(i) done; res end let array_map2_i f v1 v2 = if Array.length v1 <> Array.length v2 then invalid_arg "array_map2"; if Array.length v1 == 0 then [| |] else begin let res = Array.create (Array.length v1) (f 0 v1.(0) v2.(0)) in for i = 1 to pred (Array.length v1) do res.(i) <- f i v1.(i) v2.(i) done; res end let array_map3 f v1 v2 v3 = if Array.length v1 <> Array.length v2 || Array.length v1 <> Array.length v3 then invalid_arg "array_map3"; if Array.length v1 == 0 then [| |] else begin let res = Array.create (Array.length v1) (f v1.(0) v2.(0) v3.(0)) in for i = 1 to pred (Array.length v1) do res.(i) <- f v1.(i) v2.(i) v3.(i) done; res end let array_map_left f a = (* Ocaml does not guarantee Array.map is LR *) let l = Array.length a in (* (even if so), then we rewrite it *) if l = 0 then [||] else begin let r = Array.create l (f a.(0)) in for i = 1 to l - 1 do r.(i) <- f a.(i) done; r end let array_map_left_pair f a g b = let l = Array.length a in if l = 0 then [||],[||] else begin let r = Array.create l (f a.(0)) in let s = Array.create l (g b.(0)) in for i = 1 to l - 1 do r.(i) <- f a.(i); s.(i) <- g b.(i) done; r, s end let array_iter2 f v1 v2 = let n = Array.length v1 in if Array.length v2 <> n then invalid_arg "array_iter2" else for i = 0 to n - 1 do f v1.(i) v2.(i) done let pure_functional = false let array_fold_map' f v e = if pure_functional then let (l,e) = Array.fold_right (fun x (l,e) -> let (y,e) = f x e in (y::l,e)) v ([],e) in (Array.of_list l,e) else let e' = ref e in let v' = Array.map (fun x -> let (y,e) = f x !e' in e' := e; y) v in (v',!e') let array_fold_map f e v = let e' = ref e in let v' = Array.map (fun x -> let (e,y) = f !e' x in e' := e; y) v in (!e',v') let array_fold_map2' f v1 v2 e = let e' = ref e in let v' = array_map2 (fun x1 x2 -> let (y,e) = f x1 x2 !e' in e' := e; y) v1 v2 in (v',!e') let array_distinct v = let visited = Hashtbl.create 23 in try Array.iter (fun x -> if Hashtbl.mem visited x then raise Exit else Hashtbl.add visited x x) v; true with Exit -> false let array_union_map f a acc = Array.fold_left (fun x y -> f y x) acc a let array_rev_to_list a = let rec tolist i res = if i >= Array.length a then res else tolist (i+1) (a.(i) :: res) in tolist 0 [] let array_filter_along f filter v = Array.of_list (list_filter_along f filter (Array.to_list v)) let array_filter_with filter v = Array.of_list (list_filter_with filter (Array.to_list v)) (* Stream *) let stream_nth n st = try List.nth (Stream.npeek (n+1) st) n with Failure _ -> raise Stream.Failure let stream_njunk n st = for i = 1 to n do Stream.junk st done (* Matrices *) let matrix_transpose mat = List.fold_right (List.map2 (fun p c -> p::c)) mat (if mat = [] then [] else List.map (fun _ -> []) (List.hd mat)) (* Functions *) let identity x = x let compose f g x = f (g x) let const x _ = x let iterate f = let rec iterate_f n x = if n <= 0 then x else iterate_f (pred n) (f x) in iterate_f let repeat n f x = for i = 1 to n do f x done let iterate_for a b f x = let rec iterate i v = if i > b then v else iterate (succ i) (f i v) in iterate a x (* Delayed computations *) type 'a delayed = unit -> 'a let delayed_force f = f () (* Misc *) type ('a,'b) union = Inl of 'a | Inr of 'b module Intset = Set.Make(struct type t = int let compare = compare end) module Intmap = Map.Make(struct type t = int let compare = compare end) let intmap_in_dom x m = try let _ = Intmap.find x m in true with Not_found -> false let intmap_to_list m = Intmap.fold (fun n v l -> (n,v)::l) m [] let intmap_inv m b = Intmap.fold (fun n v l -> if v = b then n::l else l) m [] let interval n m = let rec interval_n (l,m) = if n > m then l else interval_n (m::l,pred m) in interval_n ([],m) let map_succeed f = let rec map_f = function | [] -> [] | h::t -> try (let x = f h in x :: map_f t) with Failure _ -> map_f t in map_f (* Pretty-printing *) let pr_spc = spc let pr_fnl = fnl let pr_int = int let pr_str = str let pr_comma () = str "," ++ spc () let pr_semicolon () = str ";" ++ spc () let pr_bar () = str "|" ++ spc () let pr_arg pr x = spc () ++ pr x let pr_opt pr = function None -> mt () | Some x -> pr_arg pr x let pr_opt_no_spc pr = function None -> mt () | Some x -> pr x let nth n = str (ordinal n) (* [prlist pr [a ; ... ; c]] outputs [pr a ++ ... ++ pr c] *) let rec prlist elem l = match l with | [] -> mt () | h::t -> Stream.lapp (fun () -> elem h) (prlist elem t) (* unlike all other functions below, [prlist] works lazily. if a strict behavior is needed, use [prlist_strict] instead. evaluation is done from left to right. *) let rec prlist_strict elem l = match l with | [] -> mt () | h::t -> let e = elem h in let r = prlist_strict elem t in e++r (* [prlist_with_sep sep pr [a ; ... ; c]] outputs [pr a ++ sep() ++ ... ++ sep() ++ pr c] *) let rec prlist_with_sep sep elem l = match l with | [] -> mt () | [h] -> elem h | h::t -> let e = elem h and s = sep() and r = prlist_with_sep sep elem t in e ++ s ++ r (* Print sequence of objects separated by space (unless an element is empty) *) let rec pr_sequence elem = function | [] -> mt () | [h] -> elem h | h::t -> let e = elem h and r = pr_sequence elem t in if e = mt () then r else e ++ spc () ++ r (* [pr_enum pr [a ; b ; ... ; c]] outputs [pr a ++ str "," ++ pr b ++ str "," ++ ... ++ str "and" ++ pr c] *) let pr_enum pr l = let c,l' = list_sep_last l in prlist_with_sep pr_comma pr l' ++ (if l'<>[] then str " and" ++ spc () else mt()) ++ pr c let pr_vertical_list pr = function | [] -> str "none" ++ fnl () | l -> fnl () ++ str " " ++ hov 0 (prlist_with_sep pr_fnl pr l) ++ fnl () (* [prvecti_with_sep sep pr [|a0 ; ... ; an|]] outputs [pr 0 a0 ++ sep() ++ ... ++ sep() ++ pr n an] *) let prvecti_with_sep sep elem v = let rec pr i = if i = 0 then elem 0 v.(0) else let r = pr (i-1) and s = sep () and e = elem i v.(i) in r ++ s ++ e in let n = Array.length v in if n = 0 then mt () else pr (n - 1) (* [prvecti pr [|a0 ; ... ; an|]] outputs [pr 0 a0 ++ ... ++ pr n an] *) let prvecti elem v = prvecti_with_sep mt elem v (* [prvect_with_sep sep pr [|a ; ... ; c|]] outputs [pr a ++ sep() ++ ... ++ sep() ++ pr c] *) let prvect_with_sep sep elem v = prvecti_with_sep sep (fun _ -> elem) v (* [prvect pr [|a ; ... ; c|]] outputs [pr a ++ ... ++ pr c] *) let prvect elem v = prvect_with_sep mt elem v let pr_located pr (loc,x) = if Flags.do_beautify() && loc<>dummy_loc then let (b,e) = unloc loc in comment b ++ pr x ++ comment e else pr x let surround p = hov 1 (str"(" ++ p ++ str")") (*s Memoization *) let memo1_eq eq f = let m = ref None in fun x -> match !m with Some(x',y') when eq x x' -> y' | _ -> let y = f x in m := Some(x,y); y let memo1_1 f = memo1_eq (==) f let memo1_2 f = let f' = memo1_eq (fun (x,y) (x',y') -> x==x' && y==y') (fun (x,y) -> f x y) in (fun x y -> f'(x,y)) (* Memorizes the last n distinct calls to f. Since there is no hash, Efficient only for small n. *) let memon_eq eq n f = let cache = ref [] in (* the cache: a stack *) let m = ref 0 in (* usage of the cache *) let rec find x = function | (x',y')::l when eq x x' -> y', l (* cell is moved to the top *) | [] -> (* we assume n>0, so creating one memo cell is OK *) incr m; (f x, []) | [_] when !m>=n -> f x,[] (* cache is full: dispose of last cell *) | p::l (* not(eq x (fst p)) *) -> let (y,l') = find x l in (y, p::l') in (fun x -> let (y,l) = find x !cache in cache := (x,y)::l; y) (*s Size of ocaml values. *) module Size = struct (*s Pointers already visited are stored in a hash-table, where comparisons are done using physical equality. *) module H = Hashtbl.Make( struct type t = Obj.t let equal = (==) let hash o = Hashtbl.hash (Obj.magic o : int) end) let node_table = (H.create 257 : unit H.t) let in_table o = try H.find node_table o; true with Not_found -> false let add_in_table o = H.add node_table o () let reset_table () = H.clear node_table (*s Objects are traversed recursively, as soon as their tags are less than [no_scan_tag]. [count] records the numbers of words already visited. *) let size_of_double = Obj.size (Obj.repr 1.0) let count = ref 0 let rec traverse t = if not (in_table t) then begin add_in_table t; if Obj.is_block t then begin let n = Obj.size t in let tag = Obj.tag t in if tag < Obj.no_scan_tag then begin count := !count + 1 + n; for i = 0 to n - 1 do let f = Obj.field t i in if Obj.is_block f then traverse f done end else if tag = Obj.string_tag then count := !count + 1 + n else if tag = Obj.double_tag then count := !count + size_of_double else if tag = Obj.double_array_tag then count := !count + 1 + size_of_double * n else incr count end end (*s Sizes of objects in words and in bytes. The size in bytes is computed system-independently according to [Sys.word_size]. *) let size_w o = reset_table (); count := 0; traverse (Obj.repr o); !count let size_b o = (size_w o) * (Sys.word_size / 8) let size_kb o = (size_w o) / (8192 / Sys.word_size) end let size_w = Size.size_w let size_b = Size.size_b let size_kb = Size.size_kb (*s Total size of the allocated ocaml heap. *) let heap_size () = let stat = Gc.stat () and control = Gc.get () in let max_words_total = stat.Gc.heap_words + control.Gc.minor_heap_size in (max_words_total * (Sys.word_size / 8)) let heap_size_kb () = (heap_size () + 1023) / 1024 (*s interruption *) let interrupt = ref false let check_for_interrupt () = if !interrupt then begin interrupt := false; raise Sys.Break end coq-8.4pl2/lib/xml_utils.ml0000640000175000001440000001407111663737661014772 0ustar notinusers(* * Xml Light, an small Xml parser/printer with DTD support. * Copyright (C) 2003 Nicolas Cannasse (ncannasse@motion-twin.com) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) open Printf open Xml_parser exception Not_element of xml exception Not_pcdata of xml exception No_attribute of string let default_parser = Xml_parser.make() let parse (p:Xml_parser.t) (source:Xml_parser.source) = (* local cast Xml.xml -> xml *) (Obj.magic Xml_parser.parse p source : xml) let parse_in ch = parse default_parser (Xml_parser.SChannel ch) let parse_string str = parse default_parser (Xml_parser.SString str) let parse_file f = parse default_parser (Xml_parser.SFile f) let tag = function | Element (tag,_,_) -> tag | x -> raise (Not_element x) let pcdata = function | PCData text -> text | x -> raise (Not_pcdata x) let attribs = function | Element (_,attr,_) -> attr | x -> raise (Not_element x) let attrib x att = match x with | Element (_,attr,_) -> (try let att = String.lowercase att in snd (List.find (fun (n,_) -> String.lowercase n = att) attr) with Not_found -> raise (No_attribute att)) | x -> raise (Not_element x) let children = function | Element (_,_,clist) -> clist | x -> raise (Not_element x) (*let enum = function | Element (_,_,clist) -> List.to_enum clist | x -> raise (Not_element x) *) let iter f = function | Element (_,_,clist) -> List.iter f clist | x -> raise (Not_element x) let map f = function | Element (_,_,clist) -> List.map f clist | x -> raise (Not_element x) let fold f v = function | Element (_,_,clist) -> List.fold_left f v clist | x -> raise (Not_element x) let tmp = Buffer.create 200 let buffer_pcdata text = let l = String.length text in for p = 0 to l-1 do match text.[p] with | '>' -> Buffer.add_string tmp ">" | '<' -> Buffer.add_string tmp "<" | '&' -> if p < l-1 && text.[p+1] = '#' then Buffer.add_char tmp '&' else Buffer.add_string tmp "&" | '\'' -> Buffer.add_string tmp "'" | '"' -> Buffer.add_string tmp """ | c -> Buffer.add_char tmp c done let print_pcdata chan text = let l = String.length text in for p = 0 to l-1 do match text.[p] with | '>' -> Printf.fprintf chan ">" | '<' -> Printf.fprintf chan "<" | '&' -> if p < l-1 && text.[p+1] = '#' then Printf.fprintf chan "&" else Printf.fprintf chan "&" | '\'' -> Printf.fprintf chan "'" | '"' -> Printf.fprintf chan """ | c -> Printf.fprintf chan "%c" c done let buffer_attr (n,v) = Buffer.add_char tmp ' '; Buffer.add_string tmp n; Buffer.add_string tmp "=\""; let l = String.length v in for p = 0 to l-1 do match v.[p] with | '\\' -> Buffer.add_string tmp "\\\\" | '"' -> Buffer.add_string tmp "\\\"" | c -> Buffer.add_char tmp c done; Buffer.add_char tmp '"' let rec print_attr chan (n, v) = Printf.fprintf chan " %s=\"" n; let l = String.length v in for p = 0 to l-1 do match v.[p] with | '\\' -> Printf.fprintf chan "\\\\" | '"' -> Printf.fprintf chan "\\\"" | c -> Printf.fprintf chan "%c" c done; Printf.fprintf chan "\"" let print_attrs chan l = List.iter (print_attr chan) l let rec print_xml chan = function | Element (tag, alist, []) -> Printf.fprintf chan "<%s%a/>" tag print_attrs alist; | Element (tag, alist, l) -> Printf.fprintf chan "<%s%a>%a" tag print_attrs alist (fun chan -> List.iter (print_xml chan)) l tag | PCData text -> print_pcdata chan text let to_string x = let pcdata = ref false in let rec loop = function | Element (tag,alist,[]) -> Buffer.add_char tmp '<'; Buffer.add_string tmp tag; List.iter buffer_attr alist; Buffer.add_string tmp "/>"; pcdata := false; | Element (tag,alist,l) -> Buffer.add_char tmp '<'; Buffer.add_string tmp tag; List.iter buffer_attr alist; Buffer.add_char tmp '>'; pcdata := false; List.iter loop l; Buffer.add_string tmp "'; pcdata := false; | PCData text -> if !pcdata then Buffer.add_char tmp ' '; buffer_pcdata text; pcdata := true; in Buffer.reset tmp; loop x; let s = Buffer.contents tmp in Buffer.reset tmp; s let to_string_fmt x = let rec loop ?(newl=false) tab = function | Element (tag,alist,[]) -> Buffer.add_string tmp tab; Buffer.add_char tmp '<'; Buffer.add_string tmp tag; List.iter buffer_attr alist; Buffer.add_string tmp "/>"; if newl then Buffer.add_char tmp '\n'; | Element (tag,alist,[PCData text]) -> Buffer.add_string tmp tab; Buffer.add_char tmp '<'; Buffer.add_string tmp tag; List.iter buffer_attr alist; Buffer.add_string tmp ">"; buffer_pcdata text; Buffer.add_string tmp "'; if newl then Buffer.add_char tmp '\n'; | Element (tag,alist,l) -> Buffer.add_string tmp tab; Buffer.add_char tmp '<'; Buffer.add_string tmp tag; List.iter buffer_attr alist; Buffer.add_string tmp ">\n"; List.iter (loop ~newl:true (tab^" ")) l; Buffer.add_string tmp tab; Buffer.add_string tmp "'; if newl then Buffer.add_char tmp '\n'; | PCData text -> buffer_pcdata text; if newl then Buffer.add_char tmp '\n'; in Buffer.reset tmp; loop "" x; let s = Buffer.contents tmp in Buffer.reset tmp; s coq-8.4pl2/lib/rtree.ml0000640000175000001440000001350012010532755014047 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Param(0,j)) let mk_node lab sons = Node (lab, sons) (* The usual lift operation *) let rec lift_rtree_rec depth n = function Param (i,j) as t -> if i < depth then t else Param (i+n,j) | Node (l,sons) -> Node (l,Array.map (lift_rtree_rec depth n) sons) | Rec(j,defs) -> Rec(j, Array.map (lift_rtree_rec (depth+1) n) defs) let lift n t = if n=0 then t else lift_rtree_rec 0 n t (* The usual subst operation *) let rec subst_rtree_rec depth sub = function Param (i,j) as t -> if i < depth then t else if i-depth < Array.length sub then lift depth sub.(i-depth).(j) else Param (i-Array.length sub,j) | Node (l,sons) -> Node (l,Array.map (subst_rtree_rec depth sub) sons) | Rec(j,defs) -> Rec(j, Array.map (subst_rtree_rec (depth+1) sub) defs) let subst_rtree sub t = subst_rtree_rec 0 [|sub|] t (* To avoid looping, we must check that every body introduces a node or a parameter *) let rec expand = function | Rec(j,defs) -> let sub = Array.init (Array.length defs) (fun i -> Rec(i,defs)) in expand (subst_rtree sub defs.(j)) | t -> t (* Given a vector of n bodies, builds the n mutual recursive trees. Recursive calls are made with parameters (0,0) to (0,n-1). We check the bodies actually build something by checking it is not directly one of the parameters of depth 0. Some care is taken to accept definitions like rec X=Y and Y=f(X,Y) *) let mk_rec defs = let rec check histo d = match expand d with Param(0,j) when List.mem j histo -> failwith "invalid rec call" | Param(0,j) -> check (j::histo) defs.(j) | _ -> () in Array.mapi (fun i d -> check [i] d; Rec(i,defs)) defs (* let v(i,j) = lift i (mk_rec_calls(j+1)).(j);; let r = (mk_rec[|(mk_rec[|v(1,0)|]).(0)|]).(0);; let r = mk_rec[|v(0,1);v(1,0)|];; the last one should be accepted *) (* Tree destructors, expanding loops when necessary *) let dest_param t = match expand t with Param (i,j) -> (i,j) | _ -> failwith "Rtree.dest_param" let dest_node t = match expand t with Node (l,sons) -> (l,sons) | _ -> failwith "Rtree.dest_node" let is_node t = match expand t with Node _ -> true | _ -> false let rec map f t = match t with Param(i,j) -> Param(i,j) | Node (a,sons) -> Node (f a, Array.map (map f) sons) | Rec(j,defs) -> Rec (j, Array.map (map f) defs) let rec smartmap f t = match t with Param _ -> t | Node (a,sons) -> let a'=f a and sons' = Util.array_smartmap (map f) sons in if a'==a && sons'==sons then t else Node (a',sons') | Rec(j,defs) -> let defs' = Util.array_smartmap (map f) defs in if defs'==defs then t else Rec(j,defs') (* Fixpoint operator on trees: f is the body of the fixpoint. Arguments passed to f are: - a boolean telling if the subtree has already been seen - the current subtree - a function to make recursive calls on subtrees *) let fold f t = let rec fold histo t = let seen = List.mem t histo in let nhisto = if not seen then t::histo else histo in f seen (expand t) (fold nhisto) in fold [] t (* Tests if a given tree is infinite, i.e. has an branch of infinte length. *) let is_infinite t = fold (fun seen t is_inf -> seen || (match t with Node(_,v) -> array_exists is_inf v | Param _ -> false | _ -> assert false)) t let fold2 f t x = let rec fold histo t x = let seen = List.mem (t,x) histo in let nhisto = if not seen then (t,x)::histo else histo in f seen (expand t) x (fold nhisto) in fold [] t x let compare_rtree f = fold2 (fun seen t1 t2 cmp -> seen || let b = f t1 t2 in if b < 0 then false else if b > 0 then true else match expand t1, expand t2 with Node(_,v1), Node(_,v2) when Array.length v1 = Array.length v2 -> array_for_all2 cmp v1 v2 | _ -> false) let eq_rtree cmp t1 t2 = t1 == t2 || t1=t2 || compare_rtree (fun t1 t2 -> if cmp (fst(dest_node t1)) (fst(dest_node t2)) then 0 else (-1)) t1 t2 (* Pretty-print a tree (not so pretty) *) open Pp let rec pp_tree prl t = match t with Param (i,j) -> str"#"++int i++str","++int j | Node(lab,[||]) -> hov 2 (str"("++prl lab++str")") | Node(lab,v) -> hov 2 (str"("++prl lab++str","++brk(1,0)++ Util.prvect_with_sep Util.pr_comma (pp_tree prl) v++str")") | Rec(i,v) -> if Array.length v = 0 then str"Rec{}" else if Array.length v = 1 then hov 2 (str"Rec{"++pp_tree prl v.(0)++str"}") else hov 2 (str"Rec{"++int i++str","++brk(1,0)++ Util.prvect_with_sep Util.pr_comma (pp_tree prl) v++str"}") coq-8.4pl2/lib/fmap.ml0000640000175000001440000000743511271301462013660 0ustar notinusers module Make = functor (X:Map.OrderedType) -> struct type key = X.t type 'a t = Empty | Node of 'a t * key * 'a * 'a t * int let empty = Empty let is_empty = function Empty -> true | _ -> false let height = function Empty -> 0 | Node(_,_,_,_,h) -> h let create l x d r = let hl = height l and hr = height r in Node(l, x, d, r, (if hl >= hr then hl + 1 else hr + 1)) let bal l x d r = let hl = match l with Empty -> 0 | Node(_,_,_,_,h) -> h in let hr = match r with Empty -> 0 | Node(_,_,_,_,h) -> h in if hl > hr + 2 then begin match l with Empty -> invalid_arg "Map.bal" | Node(ll, lv, ld, lr, _) -> if height ll >= height lr then create ll lv ld (create lr x d r) else begin match lr with Empty -> invalid_arg "Map.bal" | Node(lrl, lrv, lrd, lrr, _)-> create (create ll lv ld lrl) lrv lrd (create lrr x d r) end end else if hr > hl + 2 then begin match r with Empty -> invalid_arg "Map.bal" | Node(rl, rv, rd, rr, _) -> if height rr >= height rl then create (create l x d rl) rv rd rr else begin match rl with Empty -> invalid_arg "Map.bal" | Node(rll, rlv, rld, rlr, _) -> create (create l x d rll) rlv rld (create rlr rv rd rr) end end else Node(l, x, d, r, (if hl >= hr then hl + 1 else hr + 1)) let rec add x data = function Empty -> Node(Empty, x, data, Empty, 1) | Node(l, v, d, r, h) -> let c = X.compare x v in if c = 0 then Node(l, x, data, r, h) else if c < 0 then bal (add x data l) v d r else bal l v d (add x data r) let rec find x = function Empty -> raise Not_found | Node(l, v, d, r, _) -> let c = X.compare x v in if c = 0 then d else find x (if c < 0 then l else r) let rec mem x = function Empty -> false | Node(l, v, d, r, _) -> let c = X.compare x v in c = 0 || mem x (if c < 0 then l else r) let rec min_binding = function Empty -> raise Not_found | Node(Empty, x, d, r, _) -> (x, d) | Node(l, x, d, r, _) -> min_binding l let rec remove_min_binding = function Empty -> invalid_arg "Map.remove_min_elt" | Node(Empty, x, d, r, _) -> r | Node(l, x, d, r, _) -> bal (remove_min_binding l) x d r let merge t1 t2 = match (t1, t2) with (Empty, t) -> t | (t, Empty) -> t | (_, _) -> let (x, d) = min_binding t2 in bal t1 x d (remove_min_binding t2) let rec remove x = function Empty -> Empty | Node(l, v, d, r, h) -> let c = X.compare x v in if c = 0 then merge l r else if c < 0 then bal (remove x l) v d r else bal l v d (remove x r) let rec iter f = function Empty -> () | Node(l, v, d, r, _) -> iter f l; f v d; iter f r let rec map f = function Empty -> Empty | Node(l, v, d, r, h) -> Node(map f l, v, f d, map f r, h) (* Maintien de fold_right par compatibilité (changé en fold_left dans ocaml-3.09.0) *) let rec fold f m accu = match m with Empty -> accu | Node(l, v, d, r, _) -> fold f l (f v d (fold f r accu)) (* Added with respect to ocaml standard library. *) let dom m = fold (fun x _ acc -> x::acc) m [] let rng m = fold (fun _ y acc -> y::acc) m [] let to_list m = fold (fun x y acc -> (x,y)::acc) m [] end coq-8.4pl2/lib/gmap.ml0000640000175000001440000001063112010532755013654 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ -> false let height = function Empty -> 0 | Node(_,_,_,_,h) -> h let create l x d r = let hl = height l and hr = height r in Node(l, x, d, r, (if hl >= hr then hl + 1 else hr + 1)) let bal l x d r = let hl = match l with Empty -> 0 | Node(_,_,_,_,h) -> h in let hr = match r with Empty -> 0 | Node(_,_,_,_,h) -> h in if hl > hr + 2 then begin match l with Empty -> invalid_arg "Map.bal" | Node(ll, lv, ld, lr, _) -> if height ll >= height lr then create ll lv ld (create lr x d r) else begin match lr with Empty -> invalid_arg "Map.bal" | Node(lrl, lrv, lrd, lrr, _)-> create (create ll lv ld lrl) lrv lrd (create lrr x d r) end end else if hr > hl + 2 then begin match r with Empty -> invalid_arg "Map.bal" | Node(rl, rv, rd, rr, _) -> if height rr >= height rl then create (create l x d rl) rv rd rr else begin match rl with Empty -> invalid_arg "Map.bal" | Node(rll, rlv, rld, rlr, _) -> create (create l x d rll) rlv rld (create rlr rv rd rr) end end else Node(l, x, d, r, (if hl >= hr then hl + 1 else hr + 1)) let rec add x data = function Empty -> Node(Empty, x, data, Empty, 1) | Node(l, v, d, r, h) -> let c = Pervasives.compare x v in if c = 0 then Node(l, x, data, r, h) else if c < 0 then bal (add x data l) v d r else bal l v d (add x data r) let rec find x = function Empty -> raise Not_found | Node(l, v, d, r, _) -> let c = Pervasives.compare x v in if c = 0 then d else find x (if c < 0 then l else r) let rec mem x = function Empty -> false | Node(l, v, d, r, _) -> let c = Pervasives.compare x v in c = 0 || mem x (if c < 0 then l else r) let rec min_binding = function Empty -> raise Not_found | Node(Empty, x, d, r, _) -> (x, d) | Node(l, x, d, r, _) -> min_binding l let rec remove_min_binding = function Empty -> invalid_arg "Map.remove_min_elt" | Node(Empty, x, d, r, _) -> r | Node(l, x, d, r, _) -> bal (remove_min_binding l) x d r let merge t1 t2 = match (t1, t2) with (Empty, t) -> t | (t, Empty) -> t | (_, _) -> let (x, d) = min_binding t2 in bal t1 x d (remove_min_binding t2) let rec remove x = function Empty -> Empty | Node(l, v, d, r, h) -> let c = Pervasives.compare x v in if c = 0 then merge l r else if c < 0 then bal (remove x l) v d r else bal l v d (remove x r) let rec iter f = function Empty -> () | Node(l, v, d, r, _) -> iter f l; f v d; iter f r let rec map f = function Empty -> Empty | Node(l, v, d, r, h) -> Node(map f l, v, f d, map f r, h) (* Maintien de fold_right par compatibilit (chang en fold_left dans ocaml-3.09.0) *) let rec fold f m accu = match m with Empty -> accu | Node(l, v, d, r, _) -> fold f l (f v d (fold f r accu)) (* Added with respect to ocaml standard library. *) let dom m = fold (fun x _ acc -> x::acc) m [] let rng m = fold (fun _ y acc -> y::acc) m [] let to_list m = fold (fun x y acc -> (x,y)::acc) m [] coq-8.4pl2/lib/hashtbl_alt.ml0000640000175000001440000000747612074326511015233 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> bool end module type S = sig type elt (* [may_add_and_get key constr] uses [key] to look for [constr] in the hash table [H]. If [constr] is in [H], returns the specific representation that is stored in [H]. Otherwise, [constr] is stored in [H] and will be used as the canonical representation of this value in the future. *) val may_add_and_get : int -> elt -> elt end module Make (E : Hashtype) = struct type elt = E.t type bucketlist = Empty | Cons of elt * int * bucketlist let initial_size = 19991 let table_data = ref (Array.make initial_size Empty) let table_size = ref 0 let resize () = let odata = !table_data in let osize = Array.length odata in let nsize = min (2 * osize + 1) Sys.max_array_length in if nsize <> osize then begin let ndata = Array.create nsize Empty in let rec insert_bucket = function | Empty -> () | Cons (key, hash, rest) -> let nidx = hash mod nsize in ndata.(nidx) <- Cons (key, hash, ndata.(nidx)); insert_bucket rest in for i = 0 to osize - 1 do insert_bucket odata.(i) done; table_data := ndata end let add hash key = let odata = !table_data in let osize = Array.length odata in let i = hash mod osize in odata.(i) <- Cons (key, hash, odata.(i)); incr table_size; if !table_size > osize lsl 1 then resize () let find_rec hash key bucket = let rec aux = function | Empty -> add hash key; key | Cons (k, h, rest) -> if hash == h && E.equals key k then k else aux rest in aux bucket let may_add_and_get hash key = let odata = !table_data in match odata.(hash mod (Array.length odata)) with | Empty -> add hash key; key | Cons (k1, h1, rest1) -> if hash == h1 && E.equals key k1 then k1 else match rest1 with | Empty -> add hash key; key | Cons (k2, h2, rest2) -> if hash == h2 && E.equals key k2 then k2 else match rest2 with | Empty -> add hash key; key | Cons (k3, h3, rest3) -> if hash == h3 && E.equals key k3 then k3 else find_rec hash key rest3 end module Combine = struct (* These are helper functions to combine the hash keys in a similar way as [Hashtbl.hash] does. The constants [alpha] and [beta] must be prime numbers. There were chosen empirically. Notice that the problem of hashing trees is hard and there are plenty of study on this topic. Therefore, there must be room for improvement here. *) let alpha = 65599 let beta = 7 let combine x y = x * alpha + y let combine3 x y z = combine x (combine y z) let combine4 x y z t = combine x (combine3 y z t) let combinesmall x y = beta * x + y end coq-8.4pl2/lib/errors.mli0000640000175000001440000000370712121620060014411 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Pp.std_ppcmds) -> unit (** The standard exception printer *) val print : exn -> Pp.std_ppcmds (** Same as [print], except that the "Please report" part of an anomaly isn't printed (used in Ltac debugging). *) val print_no_report : exn -> Pp.std_ppcmds (** Same as [print], except that anomalies are not printed but re-raised (used for the Fail command) *) val print_no_anomaly : exn -> Pp.std_ppcmds (** Critical exceptions shouldn't be catched and ignored by mistake by inner functions during a [vernacinterp]. They should be handled only in [Toplevel.do_vernac] (or Ideslave), to be displayed to the user. Typical example: [Sys.Break]. In the 8.4 branch, for maximal compatibility, anomalies are not considered as critical... *) val noncritical : exn -> bool coq-8.4pl2/lib/heap.mli0000640000175000001440000000302512010532755014015 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> int end module type S =sig (** Type of functional heaps *) type t (** Type of elements *) type elt (** The empty heap *) val empty : t (** [add x h] returns a new heap containing the elements of [h], plus [x]; complexity {% $ %}O(log(n)){% $ %} *) val add : elt -> t -> t (** [maximum h] returns the maximum element of [h]; raises [EmptyHeap] when [h] is empty; complexity {% $ %}O(1){% $ %} *) val maximum : t -> elt (** [remove h] returns a new heap containing the elements of [h], except the maximum of [h]; raises [EmptyHeap] when [h] is empty; complexity {% $ %}O(log(n)){% $ %} *) val remove : t -> t (** usual iterators and combinators; elements are presented in arbitrary order *) val iter : (elt -> unit) -> t -> unit val fold : (elt -> 'a -> 'a) -> t -> 'a -> 'a end exception EmptyHeap (** {6 Functional implementation. } *) module Functional(X: Ordered) : S with type elt=X.t coq-8.4pl2/lib/util.mli0000640000175000001440000004005412010532755014060 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a val anomalylabstrm : string -> std_ppcmds -> 'a exception UserError of string * std_ppcmds val error : string -> 'a val errorlabstrm : string -> std_ppcmds -> 'a exception AlreadyDeclared of std_ppcmds val alreadydeclared : std_ppcmds -> 'a (** [todo] is for running of an incomplete code its implementation is "do nothing" (or print a message), but this function should not be used in a released code *) val todo : string -> unit exception Timeout type loc = Loc.t type 'a located = loc * 'a val unloc : loc -> int * int val make_loc : int * int -> loc val dummy_loc : loc val join_loc : loc -> loc -> loc val anomaly_loc : loc * string * std_ppcmds -> 'a val user_err_loc : loc * string * std_ppcmds -> 'a val invalid_arg_loc : loc * string -> 'a val located_fold_left : ('a -> 'b -> 'a) -> 'a -> 'b located -> 'a val located_iter2 : ('a -> 'b -> unit) -> 'a located -> 'b located -> unit val down_located : ('a -> 'b) -> 'a located -> 'b (** Like [Exc_located], but specifies the outermost file read, the input buffer associated to the location of the error (or the module name if boolean is true), and the error itself. *) exception Error_in_file of string * (bool * string * loc) * exn (** Mapping under pairs *) val on_fst : ('a -> 'b) -> 'a * 'c -> 'b * 'c val on_snd : ('a -> 'b) -> 'c * 'a -> 'c * 'b val map_pair : ('a -> 'b) -> 'a * 'a -> 'b * 'b (** Going down pairs *) val down_fst : ('a -> 'b) -> 'a * 'c -> 'b val down_snd : ('a -> 'b) -> 'c * 'a -> 'b (** Mapping under triple *) val on_pi1 : ('a -> 'b) -> 'a * 'c * 'd -> 'b * 'c * 'd val on_pi2 : ('a -> 'b) -> 'c * 'a * 'd -> 'c * 'b * 'd val on_pi3 : ('a -> 'b) -> 'c * 'd * 'a -> 'c * 'd * 'b (** {6 Projections from triplets } *) val pi1 : 'a * 'b * 'c -> 'a val pi2 : 'a * 'b * 'c -> 'b val pi3 : 'a * 'b * 'c -> 'c (** {6 Chars. } *) val is_letter : char -> bool val is_digit : char -> bool val is_ident_tail : char -> bool val is_blank : char -> bool (** {6 Strings. } *) val explode : string -> string list val implode : string list -> string val strip : string -> string val drop_simple_quotes : string -> string val string_index_from : string -> int -> string -> int val string_string_contains : where:string -> what:string -> bool val plural : int -> string -> string val ordinal : int -> string val split_string_at : char -> string -> string list val parse_loadpath : string -> string list module Stringset : Set.S with type elt = string module Stringmap : Map.S with type key = string type utf8_status = UnicodeLetter | UnicodeIdentPart | UnicodeSymbol exception UnsupportedUtf8 val classify_unicode : int -> utf8_status val check_ident : string -> unit val check_ident_soft : string -> unit val lowercase_first_char_utf8 : string -> string val ascii_of_ident : string -> string (** {6 Lists. } *) val list_compare : ('a -> 'a -> int) -> 'a list -> 'a list -> int val list_equal : ('a -> 'a -> bool) -> 'a list -> 'a list -> bool val list_add_set : 'a -> 'a list -> 'a list val list_eq_set : 'a list -> 'a list -> bool val list_intersect : 'a list -> 'a list -> 'a list val list_union : 'a list -> 'a list -> 'a list val list_unionq : 'a list -> 'a list -> 'a list val list_subtract : 'a list -> 'a list -> 'a list val list_subtractq : 'a list -> 'a list -> 'a list (** [list_tabulate f n] builds [[f 0; ...; f (n-1)]] *) val list_tabulate : (int -> 'a) -> int -> 'a list val list_make : int -> 'a -> 'a list val list_assign : 'a list -> int -> 'a -> 'a list val list_distinct : 'a list -> bool val list_duplicates : 'a list -> 'a list val list_filter2 : ('a -> 'b -> bool) -> 'a list * 'b list -> 'a list * 'b list val list_map_filter : ('a -> 'b option) -> 'a list -> 'b list val list_map_filter_i : (int -> 'a -> 'b option) -> 'a list -> 'b list val list_filter_with : bool list -> 'a list -> 'a list val list_filter_along : ('a -> bool) -> 'a list -> 'b list -> 'b list (** [list_smartmap f [a1...an] = List.map f [a1...an]] but if for all i [ f ai == ai], then [list_smartmap f l==l] *) val list_smartmap : ('a -> 'a) -> 'a list -> 'a list val list_map_left : ('a -> 'b) -> 'a list -> 'b list val list_map_i : (int -> 'a -> 'b) -> int -> 'a list -> 'b list val list_map2_i : (int -> 'a -> 'b -> 'c) -> int -> 'a list -> 'b list -> 'c list val list_map3 : ('a -> 'b -> 'c -> 'd) -> 'a list -> 'b list -> 'c list -> 'd list val list_map4 : ('a -> 'b -> 'c -> 'd -> 'e) -> 'a list -> 'b list -> 'c list -> 'd list -> 'e list val list_map_to_array : ('a -> 'b) -> 'a list -> 'b array val list_filter_i : (int -> 'a -> bool) -> 'a list -> 'a list (** [list_smartfilter f [a1...an] = List.filter f [a1...an]] but if for all i [f ai = true], then [list_smartfilter f l==l] *) val list_smartfilter : ('a -> bool) -> 'a list -> 'a list (** [list_index] returns the 1st index of an element in a list (counting from 1) *) val list_index : 'a -> 'a list -> int val list_index_f : ('a -> 'a -> bool) -> 'a -> 'a list -> int (** [list_unique_index x l] returns [Not_found] if [x] doesn't occur exactly once *) val list_unique_index : 'a -> 'a list -> int (** [list_index0] behaves as [list_index] except that it starts counting at 0 *) val list_index0 : 'a -> 'a list -> int val list_index0_f : ('a -> 'a -> bool) -> 'a -> 'a list -> int val list_iter3 : ('a -> 'b -> 'c -> unit) -> 'a list -> 'b list -> 'c list -> unit val list_iter_i : (int -> 'a -> unit) -> 'a list -> unit val list_fold_right_i : (int -> 'a -> 'b -> 'b) -> int -> 'a list -> 'b -> 'b val list_fold_left_i : (int -> 'a -> 'b -> 'a) -> int -> 'a -> 'b list -> 'a val list_fold_right_and_left : ('a -> 'b -> 'b list -> 'a) -> 'b list -> 'a -> 'a val list_fold_left3 : ('a -> 'b -> 'c -> 'd -> 'a) -> 'a -> 'b list -> 'c list -> 'd list -> 'a val list_for_all_i : (int -> 'a -> bool) -> int -> 'a list -> bool val list_except : 'a -> 'a list -> 'a list val list_remove : 'a -> 'a list -> 'a list val list_remove_first : 'a -> 'a list -> 'a list val list_remove_assoc_in_triple : 'a -> ('a * 'b * 'c) list -> ('a * 'b * 'c) list val list_assoc_snd_in_triple : 'a -> ('a * 'b * 'c) list -> 'b val list_for_all2eq : ('a -> 'b -> bool) -> 'a list -> 'b list -> bool val list_sep_last : 'a list -> 'a * 'a list val list_try_find_i : (int -> 'a -> 'b) -> int -> 'a list -> 'b val list_try_find : ('a -> 'b) -> 'a list -> 'b val list_uniquize : 'a list -> 'a list (** merges two sorted lists and preserves the uniqueness property: *) val list_merge_uniq : ('a -> 'a -> int) -> 'a list -> 'a list -> 'a list val list_subset : 'a list -> 'a list -> bool val list_chop : int -> 'a list -> 'a list * 'a list (* former [list_split_at] was a duplicate of [list_chop] *) val list_split_when : ('a -> bool) -> 'a list -> 'a list * 'a list val list_split_by : ('a -> bool) -> 'a list -> 'a list * 'a list val list_split3 : ('a * 'b * 'c) list -> 'a list * 'b list * 'c list val list_partition_by : ('a -> 'a -> bool) -> 'a list -> 'a list list val list_firstn : int -> 'a list -> 'a list val list_last : 'a list -> 'a val list_lastn : int -> 'a list -> 'a list val list_skipn : int -> 'a list -> 'a list val list_skipn_at_least : int -> 'a list -> 'a list val list_addn : int -> 'a -> 'a list -> 'a list val list_prefix_of : 'a list -> 'a list -> bool (** [list_drop_prefix p l] returns [t] if [l=p++t] else return [l] *) val list_drop_prefix : 'a list -> 'a list -> 'a list val list_drop_last : 'a list -> 'a list (** [map_append f [x1; ...; xn]] returns [(f x1)@(f x2)@...@(f xn)] *) val list_map_append : ('a -> 'b list) -> 'a list -> 'b list val list_join_map : ('a -> 'b list) -> 'a list -> 'b list (** raises [Invalid_argument] if the two lists don't have the same length *) val list_map_append2 : ('a -> 'b -> 'c list) -> 'a list -> 'b list -> 'c list val list_share_tails : 'a list -> 'a list -> 'a list * 'a list * 'a list (** [list_fold_map f e_0 [l_1...l_n] = e_n,[k_1...k_n]] where [(e_i,k_i)=f e_{i-1} l_i] *) val list_fold_map : ('a -> 'b -> 'a * 'c) -> 'a -> 'b list -> 'a * 'c list val list_fold_map' : ('b -> 'a -> 'c * 'a) -> 'b list -> 'a -> 'c list * 'a val list_map_assoc : ('a -> 'b) -> ('c * 'a) list -> ('c * 'b) list val list_assoc_f : ('a -> 'a -> bool) -> 'a -> ('a * 'b) list -> 'b (** A generic cartesian product: for any operator (**), [list_cartesian (**) [x1;x2] [y1;y2] = [x1**y1; x1**y2; x2**y1; x2**y1]], and so on if there are more elements in the lists. *) val list_cartesian : ('a -> 'b -> 'c) -> 'a list -> 'b list -> 'c list (** [list_cartesians] is an n-ary cartesian product: it iterates [list_cartesian] over a list of lists. *) val list_cartesians : ('a -> 'b -> 'b) -> 'b -> 'a list list -> 'b list (** list_combinations [[a;b];[c;d]] returns [[a;c];[a;d];[b;c];[b;d]] *) val list_combinations : 'a list list -> 'a list list val list_combine3 : 'a list -> 'b list -> 'c list -> ('a * 'b * 'c) list (** Keep only those products that do not return None *) val list_cartesian_filter : ('a -> 'b -> 'c option) -> 'a list -> 'b list -> 'c list val list_cartesians_filter : ('a -> 'b -> 'b option) -> 'b -> 'a list list -> 'b list val list_union_map : ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b val list_factorize_left : ('a * 'b) list -> ('a * 'b list) list (** {6 Arrays. } *) val array_compare : ('a -> 'a -> int) -> 'a array -> 'a array -> int val array_equal : ('a -> 'a -> bool) -> 'a array -> 'a array -> bool val array_exists : ('a -> bool) -> 'a array -> bool val array_for_all : ('a -> bool) -> 'a array -> bool val array_for_all2 : ('a -> 'b -> bool) -> 'a array -> 'b array -> bool val array_for_all3 : ('a -> 'b -> 'c -> bool) -> 'a array -> 'b array -> 'c array -> bool val array_for_all4 : ('a -> 'b -> 'c -> 'd -> bool) -> 'a array -> 'b array -> 'c array -> 'd array -> bool val array_for_all_i : (int -> 'a -> bool) -> int -> 'a array -> bool val array_find_i : (int -> 'a -> bool) -> 'a array -> int option val array_hd : 'a array -> 'a val array_tl : 'a array -> 'a array val array_last : 'a array -> 'a val array_cons : 'a -> 'a array -> 'a array val array_rev : 'a array -> unit val array_fold_right_i : (int -> 'b -> 'a -> 'a) -> 'b array -> 'a -> 'a val array_fold_left_i : (int -> 'a -> 'b -> 'a) -> 'a -> 'b array -> 'a val array_fold_right2 : ('a -> 'b -> 'c -> 'c) -> 'a array -> 'b array -> 'c -> 'c val array_fold_left2 : ('a -> 'b -> 'c -> 'a) -> 'a -> 'b array -> 'c array -> 'a val array_fold_left3 : ('a -> 'b -> 'c -> 'd -> 'a) -> 'a -> 'b array -> 'c array -> 'd array -> 'a val array_fold_left2_i : (int -> 'a -> 'b -> 'c -> 'a) -> 'a -> 'b array -> 'c array -> 'a val array_fold_left_from : int -> ('a -> 'b -> 'a) -> 'a -> 'b array -> 'a val array_fold_right_from : int -> ('a -> 'b -> 'b) -> 'a array -> 'b -> 'b val array_app_tl : 'a array -> 'a list -> 'a list val array_list_of_tl : 'a array -> 'a list val array_map_to_list : ('a -> 'b) -> 'a array -> 'b list val array_chop : int -> 'a array -> 'a array * 'a array val array_smartmap : ('a -> 'a) -> 'a array -> 'a array val array_map2 : ('a -> 'b -> 'c) -> 'a array -> 'b array -> 'c array val array_map2_i : (int -> 'a -> 'b -> 'c) -> 'a array -> 'b array -> 'c array val array_map3 : ('a -> 'b -> 'c -> 'd) -> 'a array -> 'b array -> 'c array -> 'd array val array_map_left : ('a -> 'b) -> 'a array -> 'b array val array_map_left_pair : ('a -> 'b) -> 'a array -> ('c -> 'd) -> 'c array -> 'b array * 'd array val array_iter2 : ('a -> 'b -> unit) -> 'a array -> 'b array -> unit val array_fold_map' : ('a -> 'c -> 'b * 'c) -> 'a array -> 'c -> 'b array * 'c val array_fold_map : ('a -> 'b -> 'a * 'c) -> 'a -> 'b array -> 'a * 'c array val array_fold_map2' : ('a -> 'b -> 'c -> 'd * 'c) -> 'a array -> 'b array -> 'c -> 'd array * 'c val array_distinct : 'a array -> bool val array_union_map : ('a -> 'b -> 'b) -> 'a array -> 'b -> 'b val array_rev_to_list : 'a array -> 'a list val array_filter_along : ('a -> bool) -> 'a list -> 'b array -> 'b array val array_filter_with : bool list -> 'a array -> 'a array (** {6 Streams. } *) val stream_nth : int -> 'a Stream.t -> 'a val stream_njunk : int -> 'a Stream.t -> unit (** {6 Matrices. } *) val matrix_transpose : 'a list list -> 'a list list (** {6 Functions. } *) val identity : 'a -> 'a val compose : ('a -> 'b) -> ('c -> 'a) -> 'c -> 'b val const : 'a -> 'b -> 'a val iterate : ('a -> 'a) -> int -> 'a -> 'a val repeat : int -> ('a -> unit) -> 'a -> unit val iterate_for : int -> int -> (int -> 'a -> 'a) -> 'a -> 'a (** {6 Delayed computations. } *) type 'a delayed = unit -> 'a val delayed_force : 'a delayed -> 'a (** {6 Misc. } *) type ('a,'b) union = Inl of 'a | Inr of 'b module Intset : Set.S with type elt = int module Intmap : Map.S with type key = int val intmap_in_dom : int -> 'a Intmap.t -> bool val intmap_to_list : 'a Intmap.t -> (int * 'a) list val intmap_inv : 'a Intmap.t -> 'a -> int list val interval : int -> int -> int list (** In [map_succeed f l] an element [a] is removed if [f a] raises [Failure _] otherwise behaves as [List.map f l] *) val map_succeed : ('a -> 'b) -> 'a list -> 'b list (** {6 Pretty-printing. } *) val pr_spc : unit -> std_ppcmds val pr_fnl : unit -> std_ppcmds val pr_int : int -> std_ppcmds val pr_str : string -> std_ppcmds val pr_comma : unit -> std_ppcmds val pr_semicolon : unit -> std_ppcmds val pr_bar : unit -> std_ppcmds val pr_arg : ('a -> std_ppcmds) -> 'a -> std_ppcmds val pr_opt : ('a -> std_ppcmds) -> 'a option -> std_ppcmds val pr_opt_no_spc : ('a -> std_ppcmds) -> 'a option -> std_ppcmds val nth : int -> std_ppcmds val prlist : ('a -> std_ppcmds) -> 'a list -> std_ppcmds (** unlike all other functions below, [prlist] works lazily. if a strict behavior is needed, use [prlist_strict] instead. *) val prlist_strict : ('a -> std_ppcmds) -> 'a list -> std_ppcmds val prlist_with_sep : (unit -> std_ppcmds) -> ('b -> std_ppcmds) -> 'b list -> std_ppcmds val prvect : ('a -> std_ppcmds) -> 'a array -> std_ppcmds val prvecti : (int -> 'a -> std_ppcmds) -> 'a array -> std_ppcmds val prvect_with_sep : (unit -> std_ppcmds) -> ('a -> std_ppcmds) -> 'a array -> std_ppcmds val prvecti_with_sep : (unit -> std_ppcmds) -> (int -> 'a -> std_ppcmds) -> 'a array -> std_ppcmds val pr_vertical_list : ('b -> std_ppcmds) -> 'b list -> std_ppcmds val pr_enum : ('a -> std_ppcmds) -> 'a list -> std_ppcmds val pr_located : ('a -> std_ppcmds) -> 'a located -> std_ppcmds val pr_sequence : ('a -> std_ppcmds) -> 'a list -> std_ppcmds val surround : std_ppcmds -> std_ppcmds (** {6 Memoization. } *) (** General comments on memoization: - cache is created whenever the function is supplied (because of ML's polymorphic value restriction). - cache is never flushed (unless the memoized fun is GC'd) One cell memory: memorizes only the last call *) val memo1_1 : ('a -> 'b) -> ('a -> 'b) val memo1_2 : ('a -> 'b -> 'c) -> ('a -> 'b -> 'c) (** with custom equality (used to deal with various arities) *) val memo1_eq : ('a -> 'a -> bool) -> ('a -> 'b) -> ('a -> 'b) (** Memorizes the last [n] distinct calls. Efficient only for small [n]. *) val memon_eq : ('a -> 'a -> bool) -> int -> ('a -> 'b) -> ('a -> 'b) (** {6 Size of an ocaml value (in words, bytes and kilobytes). } *) val size_w : 'a -> int val size_b : 'a -> int val size_kb : 'a -> int (** {6 Total size of the allocated ocaml heap. } *) val heap_size : unit -> int val heap_size_kb : unit -> int (** {6 ... } *) (** Coq interruption: set the following boolean reference to interrupt Coq (it eventually raises [Break], simulating a Ctrl-C) *) val interrupt : bool ref val check_for_interrupt : unit -> unit coq-8.4pl2/lib/explore.mli0000640000175000001440000000351412010532755014561 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* state list val success : state -> bool val pp : state -> Pp.std_ppcmds end (** {6 ... } *) (** Functor [Make] returns some search functions given a search problem. Search functions raise [Not_found] if no success is found. States are always visited in the order they appear in the output of [branching] (whatever the search method is). Debugging versions of the search functions print the position of the visited state together with the state it-self (using [S.pp]). *) module Make : functor(S : SearchProblem) -> sig val depth_first : S.state -> S.state val debug_depth_first : S.state -> S.state val breadth_first : S.state -> S.state val debug_breadth_first : S.state -> S.state end coq-8.4pl2/lib/dyn.ml0000640000175000001440000000160412010532755013522 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (s,Obj.repr v)), (fun (s',rv) -> if s = s' then Obj.magic rv else failwith "dyn_out")) let tag (s,_) = s coq-8.4pl2/lib/rtree.mli0000640000175000001440000000606212010532755014225 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a t array -> 'a t (** Build mutually recursive trees: X_1 = f_1(X_1,..,X_n) ... X_n = f_n(X_1,..,X_n) is obtained by the following pseudo-code let vx = mk_rec_calls n in let [|x_1;..;x_n|] = mk_rec[|f_1(vx.(0),..,vx.(n-1);..;f_n(vx.(0),..,vx.(n-1))|] First example: build rec X = a(X,Y) and Y = b(X,Y,Y) let [|vx;vy|] = mk_rec_calls 2 in let [|x;y|] = mk_rec [|mk_node a [|vx;vy|]; mk_node b [|vx;vy;vy|]|] Another example: nested recursive trees rec Y = b(rec X = a(X,Y),Y,Y) let [|vy|] = mk_rec_calls 1 in let [|vx|] = mk_rec_calls 1 in let [|x|] = mk_rec[|mk_node a vx;lift 1 vy|] let [|y|] = mk_rec[|mk_node b x;vy;vy|] (note the lift to avoid *) val mk_rec_calls : int -> 'a t array val mk_rec : 'a t array -> 'a t array (** [lift k t] increases of [k] the free parameters of [t]. Needed to avoid captures when a tree appears under [mk_rec] *) val lift : int -> 'a t -> 'a t val is_node : 'a t -> bool (** Destructors (recursive calls are expanded) *) val dest_node : 'a t -> 'a * 'a t array (** dest_param is not needed for closed trees (i.e. with no free variable) *) val dest_param : 'a t -> int * int (** Tells if a tree has an infinite branch *) val is_infinite : 'a t -> bool (** [compare_rtree f t1 t2] compares t1 t2 (top-down). f is called on each node: if the result is negative then the traversal ends on false, it is is positive then deeper nodes are not examined, and the traversal continues on respective siblings, and if it is 0, then the traversal continues on sons, pairwise. In this latter case, if the nodes do not have the same number of sons, then the traversal ends on false. In case of loop, the traversal is successful and it resumes on siblings. *) val compare_rtree : ('a t -> 'b t -> int) -> 'a t -> 'b t -> bool val eq_rtree : ('a -> 'a -> bool) -> 'a t -> 'a t -> bool (** Iterators *) val map : ('a -> 'b) -> 'a t -> 'b t (** [(smartmap f t) == t] if [(f a) ==a ] for all nodes *) val smartmap : ('a -> 'a) -> 'a t -> 'a t val fold : (bool -> 'a t -> ('a t -> 'b) -> 'b) -> 'a t -> 'b val fold2 : (bool -> 'a t -> 'b -> ('a t -> 'b -> 'c) -> 'c) -> 'a t -> 'b -> 'c (** A rather simple minded pretty-printer *) val pp_tree : ('a -> Pp.std_ppcmds) -> 'a t -> Pp.std_ppcmds coq-8.4pl2/lib/segmenttree.mli0000640000175000001440000000130211366253725015430 0ustar notinusers(** This module is a very simple implementation of "segment trees". A segment tree of type ['a t] represents a mapping from a union of disjoint segments to some values of type 'a. *) (** A mapping from a union of disjoint segments to some values of type ['a]. *) type 'a t (** [make [(i1, j1), v1; (i2, j2), v2; ...]] creates a mapping that associates to every integer [x] the value [v1] if [i1 <= x <= j1], [v2] if [i2 <= x <= j2], and so one. Precondition: the segments must be sorted. *) val make : ((int * int) * 'a) list -> 'a t (** [lookup k t] looks for an image for key [k] in the interval tree [t]. Raise [Not_found] if it fails. *) val lookup : int -> 'a t -> 'a coq-8.4pl2/lib/fmap.mli0000640000175000001440000000104711366253725014037 0ustar notinusers module Make : functor (X : Map.OrderedType) -> sig type key = X.t type 'a t val empty : 'a t val is_empty : 'a t -> bool val add : key -> 'a -> 'a t -> 'a t val find : key -> 'a t -> 'a val remove : key -> 'a t -> 'a t val mem : key -> 'a t -> bool val iter : (key -> 'a -> unit) -> 'a t -> unit val map : ('a -> 'b) -> 'a t -> 'b t val fold : (key -> 'a -> 'c -> 'c) -> 'a t -> 'c -> 'c (** Additions with respect to ocaml standard library. *) val dom : 'a t -> key list val rng : 'a t -> 'a list val to_list : 'a t -> (key * 'a) list end coq-8.4pl2/lib/flags.ml0000640000175000001440000001064612121620060014020 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* o := old; raise reraise let without_option o f x = let old = !o in o:=false; try let r = f x in o := old; r with reraise -> o := old; raise reraise let boot = ref false let batch_mode = ref false let debug = ref false let print_emacs = ref false let term_quality = ref false let xml_export = ref false type load_proofs = Force | Lazy | Dont let load_proofs = ref Lazy let raw_print = ref false let record_print = ref true (* Compatibility mode *) (* Current means no particular compatibility consideration. For correct comparisons, this constructor should remain the last one. *) type compat_version = V8_2 | V8_3 | Current let compat_version = ref Current let version_strictly_greater v = !compat_version > v let version_less_or_equal v = not (version_strictly_greater v) let pr_version = function | V8_2 -> "8.2" | V8_3 -> "8.3" | Current -> "current" (* Translate *) let beautify = ref false let make_beautify f = beautify := f let do_beautify () = !beautify let beautify_file = ref false (* Silent / Verbose *) let silent = ref false let make_silent flag = silent := flag; () let is_silent () = !silent let is_verbose () = not !silent let silently f x = with_option silent f x let verbosely f x = without_option silent f x let if_silent f x = if !silent then f x let if_verbose f x = if not !silent then f x let auto_intros = ref true let make_auto_intros flag = auto_intros := flag let is_auto_intros () = version_strictly_greater V8_2 && !auto_intros let hash_cons_proofs = ref true let warn = ref true let make_warn flag = warn := flag; () let if_warn f x = if !warn then f x (* The number of printed hypothesis in a goal *) let print_hyps_limit = ref (None : int option) let set_print_hyps_limit n = print_hyps_limit := n let print_hyps_limit () = !print_hyps_limit (* A list of the areas of the system where "unsafe" operation * has been requested *) module Stringset = Set.Make(struct type t = string let compare = compare end) let unsafe_set = ref Stringset.empty let add_unsafe s = unsafe_set := Stringset.add s !unsafe_set let is_unsafe s = Stringset.mem s !unsafe_set (* Flags for external tools *) let subst_command_placeholder s t = let buff = Buffer.create (String.length s + String.length t) in let i = ref 0 in while (!i < String.length s) do if s.[!i] = '%' & !i+1 < String.length s & s.[!i+1] = 's' then (Buffer.add_string buff t;incr i) else Buffer.add_char buff s.[!i]; incr i done; Buffer.contents buff let browser_cmd_fmt = try let coq_netscape_remote_var = "COQREMOTEBROWSER" in Sys.getenv coq_netscape_remote_var with Not_found -> Coq_config.browser let is_standard_doc_url url = let wwwcompatprefix = "http://www.lix.polytechnique.fr/coq/" in let n = String.length Coq_config.wwwcoq in let n' = String.length Coq_config.wwwrefman in url = Coq_config.localwwwrefman || url = Coq_config.wwwrefman || url = wwwcompatprefix ^ String.sub Coq_config.wwwrefman n (n'-n) (* same as in System, but copied here because of dependencies *) let canonical_path_name p = let current = Sys.getcwd () in Sys.chdir p; let result = Sys.getcwd () in Sys.chdir current; result (* Options for changing coqlib *) let coqlib_spec = ref false let coqlib = ref ( (* same as Envars.coqroot, but copied here because of dependencies *) Filename.dirname (canonical_path_name (Filename.dirname Sys.executable_name)) ) (* Options for changing camlbin (used by coqmktop) *) let camlbin_spec = ref false let camlbin = ref Coq_config.camlbin (* Options for changing camlp4bin (used by coqmktop) *) let camlp4bin_spec = ref false let camlp4bin = ref Coq_config.camlp4bin (* Level of inlining during a functor application *) let default_inline_level = 100 let inline_level = ref default_inline_level let set_inline_level = (:=) inline_level let get_inline_level () = !inline_level coq-8.4pl2/lib/store.ml0000640000175000001440000000330212121620060014047 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t ; get : t -> 'a option } val embed : unit -> 'a etype end (* We use a dynamic "name" allocator. But if we needed to serialise stores, we might want something static to avoid troubles with plugins order. *) let next = let count = ref 0 in fun () -> let n = !count in incr count; n type t = Obj.t Util.Intmap.t module Field = struct type 'a field = { set : 'a -> t -> t ; get : t -> 'a option ; remove : t -> t } type 'a t = 'a field end open Field let empty = Util.Intmap.empty let field () = let fid = next () in let set a s = Util.Intmap.add fid (Obj.repr a) s in let get s = try Some (Obj.obj (Util.Intmap.find fid s)) with Not_found -> None in let remove s = Util.Intmap.remove fid s in { set = set ; get = get ; remove = remove } coq-8.4pl2/lib/lib.mllib0000640000175000001440000000035111663441004014162 0ustar notinusersXml_lexer Xml_parser Xml_utils Pp_control Pp Compat Flags Segmenttree Unicodetable Util Errors Bigint Hashcons Dyn System Envars Gmap Fset Fmap Tries Gmapl Profile Explore Predicate Rtree Heap Option Dnet Store Unionfind Hashtbl_alt coq-8.4pl2/lib/doc.tex0000640000175000001440000000021706771170177013701 0ustar notinusers \newpage \section*{Utility libraries} \ocwsection \label{lib} This chapter describes the various utility libraries used in the code of \Coq. coq-8.4pl2/lib/system.mli0000640000175000001440000000550212103025606014421 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val exclude_search_in_dirname : string -> unit val all_subdirs : unix_path:string -> (physical_path * string list) list val is_in_path : load_path -> string -> bool val is_in_system_path : string -> bool val where_in_path : ?warn:bool -> load_path -> string -> physical_path * string val physical_path_of_string : string -> physical_path val string_of_physical_path : physical_path -> string val make_suffix : string -> string -> string val file_readable_p : string -> bool val expand_path_macros : string -> string val getenv_else : string -> string -> string val home : string val exists_dir : string -> bool val find_file_in_path : ?warn:bool -> load_path -> string -> physical_path * string (** {6 I/O functions } *) (** Generic input and output functions, parameterized by a magic number and a suffix. The intern functions raise the exception [Bad_magic_number] when the check fails, with the full file name. *) val marshal_out : out_channel -> 'a -> unit val marshal_in : string -> in_channel -> 'a exception Bad_magic_number of string val raw_extern_intern : int -> string -> (string -> string * out_channel) * (string -> in_channel) val extern_intern : ?warn:bool -> int -> string -> (string -> 'a -> unit) * (load_path -> string -> 'a) val with_magic_number_check : ('a -> 'b) -> 'a -> 'b (** {6 Sending/receiving once with external executable } *) val connect : (out_channel -> unit) -> (in_channel -> 'a) -> string -> 'a (** {6 Executing commands } *) (** [run_command converter f com] launches command [com], and returns the contents of stdout and stderr that have been processed with [converter]; the processed contents of stdout and stderr is also passed to [f] *) val run_command : (string -> string) -> (string -> unit) -> string -> Unix.process_status * string (** {6 Time stamps.} *) type time val get_time : unit -> time val time_difference : time -> time -> float (** in seconds *) val fmt_time_difference : time -> time -> Pp.std_ppcmds coq-8.4pl2/lib/hashtbl_alt.mli0000640000175000001440000000333212010532755015366 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> bool end module type S = sig type elt (* [may_add_and_get key constr] uses [key] to look for [constr] in the hash table [H]. If [constr] is in [H], returns the specific representation that is stored in [H]. Otherwise, [constr] is stored in [H] and will be used as the canonical representation of this value in the future. *) val may_add_and_get : int -> elt -> elt end module Make (E : Hashtype) : S with type elt = E.t module Combine : sig val combine : int -> int -> int val combinesmall : int -> int -> int val combine3 : int -> int -> int -> int val combine4 : int -> int -> int -> int -> int end coq-8.4pl2/lib/envars.ml0000640000175000001440000001062412121620060014216 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let coqlib = match Coq_config.coqlib with | Some coqlib -> coqlib | None -> coqroot in if Sys.file_exists (Filename.concat coqlib file) then coqlib else Util.error "cannot guess a path for Coq libraries; please use -coqlib option") let coqlib () = if !Flags.coqlib_spec then !Flags.coqlib else (if !Flags.boot then coqroot else guess_coqlib ()) let docdir () = reldir (if Coq_config.arch = "win32" then ["doc"] else ["share";"doc";"coq"]) "html" (fun () -> Coq_config.docdir) let path_to_list p = let sep = if Sys.os_type = "Win32" then ';' else ':' in Util.split_string_at sep p let xdg_data_home = Filename.concat (System.getenv_else "XDG_DATA_HOME" (Filename.concat System.home ".local/share")) "coq" let xdg_config_home = Filename.concat (System.getenv_else "XDG_CONFIG_HOME" (Filename.concat System.home ".config")) "coq" let xdg_data_dirs = (try List.map (fun dir -> Filename.concat dir "coq") (path_to_list (Sys.getenv "XDG_DATA_DIRS")) with Not_found -> ["/usr/local/share/coq";"/usr/share/coq"]) @ (match Coq_config.datadir with |None -> [] |Some datadir -> [datadir]) let xdg_dirs = let dirs = xdg_data_home :: xdg_data_dirs in List.rev (List.filter Sys.file_exists dirs) let coqpath = try let path = Sys.getenv "COQPATH" in List.rev (List.filter Sys.file_exists (path_to_list path)) with Not_found -> [] let rec which l f = match l with | [] -> raise Not_found | p :: tl -> if Sys.file_exists (Filename.concat p f) then p else which tl f let guess_camlbin () = let path = Sys.getenv "PATH" in (* may raise Not_found *) let lpath = path_to_list path in which lpath (exe "ocamlc") let guess_camlp4bin () = let path = Sys.getenv "PATH" in (* may raise Not_found *) let lpath = path_to_list path in which lpath (exe Coq_config.camlp4) let camlbin () = if !Flags.camlbin_spec then !Flags.camlbin else if !Flags.boot then Coq_config.camlbin else try guess_camlbin () with e when e <> Sys.Break -> Coq_config.camlbin let camllib () = if !Flags.boot then Coq_config.camllib else let camlbin = camlbin () in let com = (Filename.concat camlbin "ocamlc") ^ " -where" in let _,res = System.run_command (fun x -> x) (fun _ -> ()) com in Util.strip res let camlp4bin () = if !Flags.camlp4bin_spec then !Flags.camlp4bin else if !Flags.boot then Coq_config.camlp4bin else try guess_camlp4bin () with e when e <> Sys.Break -> let cb = camlbin () in if Sys.file_exists (Filename.concat cb (exe Coq_config.camlp4)) then cb else Coq_config.camlp4bin let camlp4lib () = if !Flags.boot then Coq_config.camlp4lib else let camlp4bin = camlp4bin () in let com = (Filename.concat camlp4bin Coq_config.camlp4) ^ " -where" in let ex,res = System.run_command (fun x -> x) (fun _ -> ()) com in match ex with |Unix.WEXITED 0 -> Util.strip res |_ -> "/dev/null" coq-8.4pl2/lib/tries.mli0000640000175000001440000000101311366253725014233 0ustar notinusers module Make : functor (X : Set.OrderedType) -> functor (Y : Map.OrderedType) -> sig type t val empty : t (** Work on labels, not on paths. *) val map : t -> Y.t -> t val xtract : t -> X.t list val dom : t -> Y.t list val in_dom : t -> Y.t -> bool (** Work on paths, not on labels. *) val add : t -> Y.t list * X.t -> t val rmv : t -> Y.t list * X.t -> t val app : ((Y.t list * X.t) -> unit) -> t -> unit val to_list : t -> (Y.t list * X.t) list end coq-8.4pl2/lib/gmapl.ml0000640000175000001440000000172412010532755014033 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Gmap.add x [y] m let find x m = try Gmap.find x m with Not_found -> [] let remove x y m = let l = Gmap.find x m in Gmap.add x (if List.mem y l then list_subtract l [y] else l) m coq-8.4pl2/lib/profile.ml0000640000175000001440000005667112121620060014374 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* e::l let magic = 1249 let merge_profile filename (curr_table, curr_outside, curr_total as new_data) = let (old_table, old_outside, old_total) = try let c = open_in filename in if input_binary_int c <> magic then Printf.printf "Incompatible recording file: %s\n" filename; let old_data = input_value c in close_in c; old_data with Sys_error msg -> (Printf.printf "Unable to open %s: %s\n" filename msg; new_data) in let updated_data = let updated_table = List.fold_right ajoute_to_list curr_table old_table in ajoute curr_outside old_outside; ajoute curr_total old_total; (updated_table, old_outside, old_total) in begin (try let c = open_out_gen [Open_creat;Open_wronly;Open_trunc;Open_binary] 0o644 filename in output_binary_int c magic; output_value c updated_data; close_out c with Sys_error _ -> Printf.printf "Unable to create recording file"); updated_data end (************************************************) (* Compute a rough estimation of time overheads *) (* Time and space are not measured in the same way *) (* Byte allocation is an exact number and for long runs, the total number of allocated bytes may exceed the maximum integer capacity (2^31 on 32-bits architectures); therefore, allocation is measured by small steps, total allocations are computed by adding elementary measures and carries are controled from step to step *) (* Unix measure of time is approximative and shoitt delays are often unperceivable; therefore, total times are measured in one (big) step to avoid rounding errors and to get the best possible approximation. Note: Sys.time is the same as: Unix.(let x = times () in x.tms_utime +. x.tms_stime) *) (* ---------- start profile for f1 overheadA| ... ---------- [1w1] 1st call to get_time for f1 overheadB| ... ---------- start f1 real 1 | ... ---------- start profile for 1st call to f2 inside f1 overheadA| ... ---------- [2w1] 1st call to get_time for 1st f2 overheadB| ... ---------- start 1st f2 real 2 | ... ---------- end 1st f2 overheadC| ... ---------- [2w1] 2nd call to get_time for 1st f2 overheadD| ... ---------- end profile for 1st f2 real 1 | ... ---------- start profile for 2nd call to f2 inside f1 overheadA| ... ---------- [2'w1] 1st call to get_time for 2nd f2 overheadB| ... ---------- start 2nd f2 real 2' | ... ---------- end 2nd f2 overheadC| ... ---------- [2'w2] 2nd call to get_time for 2nd f2 overheadD| ... ---------- end profile for f2 real 1 | ... ---------- end f1 overheadC| ... ---------- [1w1'] 2nd call to get_time for f1 overheadD| ... ---------- end profile for f1 When profiling f2, overheadB + overheadC should be subtracted from measure and overheadA + overheadB + overheadC + overheadD should be subtracted from the amount for f1 Then the relevant overheads are : "overheadB + overheadC" to be subtracted to the measure of f as many time as f is called and "overheadA + overheadB + overheadC + overheadD" to be subtracted to the measure of f as many time as f calls a profiled function (itself included) *) let dummy_last_alloc = ref 0.0 let dummy_spent_alloc () = let now = get_alloc () in let before = !last_alloc in last_alloc := now; now -. before let dummy_f x = x let dummy_stack = ref [create_record ()] let dummy_ov = 0 let loops = 10000 let time_overhead_A_D () = let e = create_record () in let before = get_time () in for i=1 to loops do (* This is a copy of profile1 for overhead estimation *) let dw = dummy_spent_alloc () in match !dummy_stack with [] -> assert false | p::_ -> ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let dt = get_time () - 1 in e.tottime <- dt + dummy_ov; e.owntime <- e.owntime + e.tottime; ajoute_ownalloc p dw; ajoute_totalloc p dw; p.owntime <- p.owntime - e.tottime; ajoute_totalloc p (e.totalloc-.totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !dummy_stack with [] -> assert false | _::s -> stack := s); dummy_last_alloc := get_alloc () done; let after = get_time () in let beforeloop = get_time () in for i=1 to loops do () done; let afterloop = get_time () in float_of_int ((after - before) - (afterloop - beforeloop)) /. float_of_int loops let time_overhead_B_C () = let dummy_x = 0 in let before = get_time () in for i=1 to loops do try dummy_last_alloc := get_alloc (); let _r = dummy_f dummy_x in let _dw = dummy_spent_alloc () in let _dt = get_time () in () with e when e <> Sys.Break -> assert false done; let after = get_time () in let beforeloop = get_time () in for i=1 to loops do () done; let afterloop = get_time () in float_of_int ((after - before) - (afterloop - beforeloop)) /. float_of_int loops let compute_alloc lo = lo /. (float_of_int word_length) (************************************************) (* End a profiling session and print the result *) let format_profile (table, outside, total) = print_newline (); Printf.printf "%-23s %9s %9s %10s %10s %10s\n" "Function name" "Own time" "Tot. time" "Own alloc" "Tot. alloc" "Calls "; let l = Sort.list (fun (_,{tottime=p}) (_,{tottime=p'}) -> p > p') table in List.iter (fun (name,e) -> Printf.printf "%-23s %9.2f %9.2f %10.0f %10.0f %6d %6d\n" name (float_of_time e.owntime) (float_of_time e.tottime) (compute_alloc e.ownalloc) (compute_alloc e.totalloc) e.owncount e.intcount) l; Printf.printf "%-23s %9.2f %9.2f %10.0f %10.0f %6d\n" "others" (float_of_time outside.owntime) (float_of_time outside.tottime) (compute_alloc outside.ownalloc) (compute_alloc outside.totalloc) outside.intcount; (* Here, own contains overhead time/alloc *) Printf.printf "%-23s %9.2f %9.2f %10.0f %10.0f\n" "Est. overhead/total" (float_of_time total.owntime) (float_of_time total.tottime) (compute_alloc total.ownalloc) (compute_alloc total.totalloc); Printf.printf "Time in seconds and allocation in words (1 word = %d bytes)\n" word_length let recording_file = ref "" let set_recording s = recording_file := s let adjust_time ov_bc ov_ad e = let bc_imm = float_of_int e.owncount *. ov_bc in let ad_imm = float_of_int e.immcount *. ov_ad in let abcd_all = float_of_int e.intcount *. (ov_ad +. ov_bc) in {e with tottime = e.tottime - int_of_float (abcd_all +. bc_imm); owntime = e.owntime - int_of_float (ad_imm +. bc_imm) } let close_profile print = let dw = spent_alloc () in let t = get_time () in match !stack with | [outside] -> outside.tottime <- outside.tottime + t; outside.owntime <- outside.owntime + t; ajoute_ownalloc outside dw; ajoute_totalloc outside dw; if !prof_table <> [] then begin let ov_bc = time_overhead_B_C () (* B+C overhead *) in let ov_ad = time_overhead_A_D () (* A+D overhead *) in let adjust (n,e) = (n, adjust_time ov_bc ov_ad e) in let adjtable = List.map adjust !prof_table in let adjoutside = adjust_time ov_bc ov_ad outside in let totalloc = !last_alloc -. !init_alloc in let total = create_record () in total.tottime <- outside.tottime; total.totalloc <- totalloc; (* We compute estimations of overhead, put into "own" fields *) total.owntime <- outside.tottime - adjoutside.tottime; total.ownalloc <- totalloc -. outside.totalloc; let current_data = (adjtable, adjoutside, total) in let updated_data = match !recording_file with | "" -> current_data | name -> merge_profile !recording_file current_data in if print then format_profile updated_data; init_profile () end | _ -> failwith "Inconsistency" let print_profile () = close_profile true let declare_profile name = if name = "___outside___" or name = "___total___" then failwith ("Error: "^name^" is a reserved keyword"); let e = create_record () in prof_table := (name,e)::!prof_table; e (* Default initialisation, may be overriden *) let _ = init_profile () (******************************) (* Entry points for profiling *) let profile1 e f a = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile2 e f a b = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile3 e f a b c = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b c in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile4 e f a b c d = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b c d in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile5 e f a b c d g = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b c d g in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile6 e f a b c d g h = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b c d g h in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise let profile7 e f a b c d g h i = let dw = spent_alloc () in match !stack with [] -> assert false | p::_ -> (* We add spent alloc since last measure to current caller own/total alloc *) ajoute_ownalloc p dw; ajoute_totalloc p dw; e.owncount <- e.owncount + 1; if not (p==e) then stack := e::!stack; let totalloc0 = e.totalloc in let intcount0 = e.intcount in let t = get_time () in try last_alloc := get_alloc (); let r = f a b c d g h i in let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); r with reraise -> let dw = spent_alloc () in let dt = get_time () - t in e.tottime <- e.tottime + dt; e.owntime <- e.owntime + dt; ajoute_ownalloc e dw; ajoute_totalloc e dw; p.owntime <- p.owntime - dt; ajoute_totalloc p (e.totalloc -. totalloc0); p.intcount <- p.intcount + e.intcount - intcount0 + 1; p.immcount <- p.immcount + 1; if not (p==e) then (match !stack with [] -> assert false | _::s -> stack := s); last_alloc := get_alloc (); raise reraise (* Some utilities to compute the logical and physical sizes and depth of ML objects *) let c = ref 0 let s = ref 0 let b = ref 0 let m = ref 0 let rec obj_stats d t = if Obj.is_int t then m := max d !m else if Obj.tag t >= Obj.no_scan_tag then if Obj.tag t = Obj.string_tag then (c := !c + Obj.size t; b := !b + 1; m := max d !m) else if Obj.tag t = Obj.double_tag then (s := !s + 2; b := !b + 1; m := max d !m) else if Obj.tag t = Obj.double_array_tag then (s := !s + 2 * Obj.size t; b := !b + 1; m := max d !m) else (b := !b + 1; m := max d !m) else let n = Obj.size t in s := !s + n; b := !b + 1; block_stats (d + 1) (n - 1) t and block_stats d i t = if i >= 0 then (obj_stats d (Obj.field t i); block_stats d (i-1) t) let obj_stats a = c := 0; s:= 0; b:= 0; m:= 0; obj_stats 0 (Obj.repr a); (!c, !s + !b, !m) module H = Hashtbl.Make( struct type t = Obj.t let equal = (==) let hash o = Hashtbl.hash (Obj.magic o : int) end) let tbl = H.create 13 let rec obj_shared_size s t = if Obj.is_int t then s else if H.mem tbl t then s else begin H.add tbl t (); let n = Obj.size t in if Obj.tag t >= Obj.no_scan_tag then if Obj.tag t = Obj.string_tag then (c := !c + n; s + 1) else if Obj.tag t = Obj.double_tag then s + 3 else if Obj.tag t = Obj.double_array_tag then s + 2 * n + 1 else s + 1 else block_shared_size (s + n + 1) (n - 1) t end and block_shared_size s i t = if i < 0 then s else block_shared_size (obj_shared_size s (Obj.field t i)) (i-1) t let obj_shared_size a = H.clear tbl; c := 0; let s = obj_shared_size 0 (Obj.repr a) in (!c, s) let print_logical_stats a = let (c, s, d) = obj_stats a in Printf.printf "Expanded size: %10d (str: %8d) Depth: %6d\n" (s+c) c d let print_stats a = let (c1, s, d) = obj_stats a in let (c2, o) = obj_shared_size a in Printf.printf "Size: %8d (str: %8d) (exp: %10d) Depth: %6d\n" (o + c2) c2 (s + c1) d (* let _ = Gc.set { (Gc.get()) with Gc.verbose = 13 } *) coq-8.4pl2/lib/fset.ml0000640000175000001440000001623711271301462013676 0ustar notinusersmodule Make = functor (X : Set.OrderedType) -> struct type elt = X.t type t = Empty | Node of t * elt * t * int (* Sets are represented by balanced binary trees (the heights of the children differ by at most 2 *) let height = function Empty -> 0 | Node(_, _, _, h) -> h (* Creates a new node with left son l, value x and right son r. l and r must be balanced and | height l - height r | <= 2. Inline expansion of height for better speed. *) let create l x r = let hl = match l with Empty -> 0 | Node(_,_,_,h) -> h in let hr = match r with Empty -> 0 | Node(_,_,_,h) -> h in Node(l, x, r, (if hl >= hr then hl + 1 else hr + 1)) (* Same as create, but performs one step of rebalancing if necessary. Assumes l and r balanced. Inline expansion of create for better speed in the most frequent case where no rebalancing is required. *) let bal l x r = let hl = match l with Empty -> 0 | Node(_,_,_,h) -> h in let hr = match r with Empty -> 0 | Node(_,_,_,h) -> h in if hl > hr + 2 then begin match l with Empty -> invalid_arg "Set.bal" | Node(ll, lv, lr, _) -> if height ll >= height lr then create ll lv (create lr x r) else begin match lr with Empty -> invalid_arg "Set.bal" | Node(lrl, lrv, lrr, _)-> create (create ll lv lrl) lrv (create lrr x r) end end else if hr > hl + 2 then begin match r with Empty -> invalid_arg "Set.bal" | Node(rl, rv, rr, _) -> if height rr >= height rl then create (create l x rl) rv rr else begin match rl with Empty -> invalid_arg "Set.bal" | Node(rll, rlv, rlr, _) -> create (create l x rll) rlv (create rlr rv rr) end end else Node(l, x, r, (if hl >= hr then hl + 1 else hr + 1)) (* Same as bal, but repeat rebalancing until the final result is balanced. *) let rec join l x r = match bal l x r with Empty -> invalid_arg "Set.join" | Node(l', x', r', _) as t' -> let d = height l' - height r' in if d < -2 or d > 2 then join l' x' r' else t' (* Merge two trees l and r into one. All elements of l must precede the elements of r. Assumes | height l - height r | <= 2. *) let rec merge t1 t2 = match (t1, t2) with (Empty, t) -> t | (t, Empty) -> t | (Node(l1, v1, r1, h1), Node(l2, v2, r2, h2)) -> bal l1 v1 (bal (merge r1 l2) v2 r2) (* Same as merge, but does not assume anything about l and r. *) let rec concat t1 t2 = match (t1, t2) with (Empty, t) -> t | (t, Empty) -> t | (Node(l1, v1, r1, h1), Node(l2, v2, r2, h2)) -> join l1 v1 (join (concat r1 l2) v2 r2) (* Splitting *) let rec split x = function Empty -> (Empty, None, Empty) | Node(l, v, r, _) -> let c = X.compare x v in if c = 0 then (l, Some v, r) else if c < 0 then let (ll, vl, rl) = split x l in (ll, vl, join rl v r) else let (lr, vr, rr) = split x r in (join l v lr, vr, rr) (* Implementation of the set operations *) let empty = Empty let is_empty = function Empty -> true | _ -> false let rec mem x = function Empty -> false | Node(l, v, r, _) -> let c = X.compare x v in c = 0 || mem x (if c < 0 then l else r) let rec add x = function Empty -> Node(Empty, x, Empty, 1) | Node(l, v, r, _) as t -> let c = X.compare x v in if c = 0 then t else if c < 0 then bal (add x l) v r else bal l v (add x r) let singleton x = Node(Empty, x, Empty, 1) let rec remove x = function Empty -> Empty | Node(l, v, r, _) -> let c = X.compare x v in if c = 0 then merge l r else if c < 0 then bal (remove x l) v r else bal l v (remove x r) let rec union s1 s2 = match (s1, s2) with (Empty, t2) -> t2 | (t1, Empty) -> t1 | (Node(l1, v1, r1, h1), Node(l2, v2, r2, h2)) -> if h1 >= h2 then if h2 = 1 then add v2 s1 else begin let (l2, _, r2) = split v1 s2 in join (union l1 l2) v1 (union r1 r2) end else if h1 = 1 then add v1 s2 else begin let (l1, _, r1) = split v2 s1 in join (union l1 l2) v2 (union r1 r2) end let rec inter s1 s2 = match (s1, s2) with (Empty, t2) -> Empty | (t1, Empty) -> Empty | (Node(l1, v1, r1, _), t2) -> match split v1 t2 with (l2, None, r2) -> concat (inter l1 l2) (inter r1 r2) | (l2, Some _, r2) -> join (inter l1 l2) v1 (inter r1 r2) let rec diff s1 s2 = match (s1, s2) with (Empty, t2) -> Empty | (t1, Empty) -> t1 | (Node(l1, v1, r1, _), t2) -> match split v1 t2 with (l2, None, r2) -> join (diff l1 l2) v1 (diff r1 r2) | (l2, Some _, r2) -> concat (diff l1 l2) (diff r1 r2) let rec compare_aux l1 l2 = match (l1, l2) with ([], []) -> 0 | ([], _) -> -1 | (_, []) -> 1 | (Empty :: t1, Empty :: t2) -> compare_aux t1 t2 | (Node(Empty, v1, r1, _) :: t1, Node(Empty, v2, r2, _) :: t2) -> let c = compare v1 v2 in if c <> 0 then c else compare_aux (r1::t1) (r2::t2) | (Node(l1, v1, r1, _) :: t1, t2) -> compare_aux (l1 :: Node(Empty, v1, r1, 0) :: t1) t2 | (t1, Node(l2, v2, r2, _) :: t2) -> compare_aux t1 (l2 :: Node(Empty, v2, r2, 0) :: t2) let compare s1 s2 = compare_aux [s1] [s2] let equal s1 s2 = compare s1 s2 = 0 let rec subset s1 s2 = match (s1, s2) with Empty, _ -> true | _, Empty -> false | Node (l1, v1, r1, _), (Node (l2, v2, r2, _) as t2) -> let c = X.compare v1 v2 in if c = 0 then subset l1 l2 && subset r1 r2 else if c < 0 then subset (Node (l1, v1, Empty, 0)) l2 && subset r1 t2 else subset (Node (Empty, v1, r1, 0)) r2 && subset l1 t2 let rec iter f = function Empty -> () | Node(l, v, r, _) -> iter f l; f v; iter f r let rec fold f s accu = match s with Empty -> accu | Node(l, v, r, _) -> fold f l (f v (fold f r accu)) let rec cardinal = function Empty -> 0 | Node(l, v, r, _) -> cardinal l + 1 + cardinal r let rec elements_aux accu = function Empty -> accu | Node(l, v, r, _) -> elements_aux (v :: elements_aux accu r) l let elements s = elements_aux [] s let rec min_elt = function Empty -> raise Not_found | Node(Empty, v, r, _) -> v | Node(l, v, r, _) -> min_elt l let rec max_elt = function Empty -> raise Not_found | Node(l, v, Empty, _) -> v | Node(l, v, r, _) -> max_elt r let choose = min_elt end coq-8.4pl2/lib/dnet.mli0000640000175000001440000001062212010532755014033 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* | Leaf | Node of btree * btree | Node of 'a * 'a *) (** datatype you want to build a dnet on *) module type Datatype = sig (** parametric datatype. ['a] is morally the recursive argument *) type 'a t (** non-recursive mapping of subterms *) val map : ('a -> 'b) -> 'a t -> 'b t val map2 : ('a -> 'b -> 'c) -> 'a t -> 'b t -> 'c t (** non-recursive folding of subterms *) val fold : ('a -> 'b -> 'a) -> 'a -> 'b t -> 'a val fold2 : ('a -> 'b -> 'c -> 'a) -> 'a -> 'b t -> 'c t -> 'a (** comparison of constructors *) val compare : unit t -> unit t -> int (** for each constructor, is it not-parametric on 'a? *) val terminal : 'a t -> bool (** [choose f w] applies f on ONE of the subterms of w *) val choose : ('a -> 'b) -> 'a t -> 'b end module type S = sig type t (** provided identifier type *) type ident (** provided metavariable type *) type meta (** provided parametrized datastructure *) type 'a structure (** returned sets of solutions *) module Idset : Set.S with type elt=ident (** a pattern is a term where each node can be a unification variable *) type 'a pattern = | Term of 'a | Meta of meta type term_pattern = 'a structure pattern as 'a val empty : t (** [add t w i] adds a new association (w,i) in t. *) val add : t -> term_pattern -> ident -> t (** [find_all t] returns all identifiers contained in t. *) val find_all : t -> Idset.t (** [fold_pattern f acc p dn] folds f on each meta of p, passing the meta and the sub-dnet under it. The result includes: - Some set if identifiers were gathered on the leafs of the term - None if the pattern contains no leaf (only Metas at the leafs). *) val fold_pattern : ('a -> (Idset.t * meta * t) -> 'a) -> 'a -> term_pattern -> t -> Idset.t option * 'a (** [find_match p t] returns identifiers of all terms matching p in t. *) val find_match : term_pattern -> t -> Idset.t (** set operations on dnets *) val inter : t -> t -> t val union : t -> t -> t (** apply a function on each identifier and node of terms in a dnet *) val map : (ident -> ident) -> (unit structure -> unit structure) -> t -> t end module Make : functor (T:Datatype) -> functor (Ident:Set.OrderedType) -> functor (Meta:Set.OrderedType) -> S with type ident = Ident.t and type meta = Meta.t and type 'a structure = 'a T.t coq-8.4pl2/bin/0000750000175000001440000000000012127276534012407 5ustar notinuserscoq-8.4pl2/CHANGES0000640000175000001440000034531312126510521012626 0ustar notinusersChanges from V8.4pl1 to V8.4pl2 =============================== Bug fixes - Solved bugs : #2466 #2629 #2668 #2750 #2839 #2869 #2954 #2955 #2959 #2962 #2966 #2967 #2969 #2970 #2975 #2976 #2977 #2978 #2981 #2983 #2995 #3000 #3004 #3008 - Partially fixed bugs : #2830 #2949 - Coqtop should now react more reliably when receiving interrupt signals: all the "try...with" constructs have been protected against undue handling of the Sys.Break exception. Coqide - The Windows-specific code handling the interrupt button of Coqide had to be reworked (cf. bug #2869). Now, in Win32 this button does not target a specific coqtop client, but instead sends a Ctrl-C to any process sharing its console with Coqide. To avoid awkward effects, it is recommended to launch Coqide via its icon, its menu, or in a dedicated console window. Extraction - The option Extraction AccessOpaque is now set by default, restoring compatibility of older versions of Coq (cf bug #2952). Changes from V8.4 to V8.4pl1 ============================ Bug fixes - Solved bugs : #2851 #2863 #2865 #2893 #2895 #2892 #2905 #2906 #2907 #2917 #2921 #2930 #2941 #2878 - Partially fixed bug : #2904 - Various fixes concerning coq_makefile Optimizations - "Union by rank" optimization for universes contributed by J.H. Jourdan and G. Sherrer (see union-find-and-coq-universes on gagallium blog). Libraries - Internal organisation of some modular libraries have slightly changed due to bug #2904 (GenericMinMax, OrdersTac) - No more constant "int" in ZArith/Int.v to avoid name clash with OCaml (cf bug #2878). Coqide - Improved shutdown of coqtop processes spawned by coqide (in particular added a missing close_on_exec primitive before forking). - On windows, launching coqide with the -debug option now produces a log file in the user's temporary directory. The location of this log file is displayed in the "About" message. Changes from V8.4beta2 to V8.4 ============================== Vernacular commands - The "Reset" command is now supported again in files given to coqc or Load. - "Show Script" now indents again the displayed scripts. It can also work correctly across Load'ed files if the option "Unset Atomic Load" is used. - "Open Scope" can now be given the delimiter (e.g. Z) instead of the full scope name (e.g. Z_scope). Notations - Most compatibility notations of the standard library are now tagged as (compat xyz), where xyz is a former Coq version, for instance "8.3". These notations behave as (only parsing) notations, except that they may triggers warnings (or errors) when used while Coq is not in a corresponding -compat mode. - To activate these compatibility warnings, use "Set Verbose Compat Notations" or the command-line flag -verbose-compat-notations. - For a strict mode without these compatibility notations, use "Unset Compat Notations" or the command-line flag -no-compat-notations. Tactics - An annotation "eqn:H" or "eqn:?" can be added to a "destruct" or "induction" to make it generate equations in the spirit of "case_eq". The former syntax "_eqn" is discontinued. - The name of the hypothesis introduced by tactic "remember" can be set via the new syntax "remember t as x eqn:H" (wish #2489). Libraries - Reals: changed definition of PI, no more axiom about sin(PI/2). - SetoidPermutation: a notion of permutation for lists modulo a setoid equality. - BigN: fixed the ocaml code doing the parsing/printing of big numbers. Changes from V8.4beta to V8.4beta2 ================================== Vernacular commands - Commands "Back" and "BackTo" are now handling the proof states. They may perform some extra steps of backtrack to avoid states where the proof state is unavailable (typically a closed proof). - The commands "Suspend" and "Resume" have been removed. - A basic Show Script has been reintroduced (no indentation). - New command "Set Parsing Explicit" for deactivating parsing (and printing) of implicit arguments (useful for teaching). - New command "Grab Existential Variables" to transform the unresolved evars at the end of a proof into goals. Tactics - Still no general "info" tactical, but new specific tactics info_auto, info_eauto, info_trivial which provides information on the proofs found by auto/eauto/trivial. Display of these details could also be activated by "Set Info Auto"/"Set Info Eauto"/"Set Info Trivial". - Details on everything tried by auto/eauto/trivial during a proof search could be obtained by "debug auto", "debug eauto", "debug trivial" or by a global "Set Debug Auto"/"Set Debug Eauto"/"Set Debug Trivial". - New command "r string" in Ltac debugger that interprets "idtac string" in Ltac code as a breakpoint and jumps to its next use. - Tactics from the Dp plugin (simplify, ergo, yices, cvc3, z3, cvcl, harvey, zenon, gwhy) have been removed, since Why2 has not been maintained for the last few years. The Why3 plugin should be a suitable replacement in most cases. Libraries - MSetRBT: a new implementation of MSets via Red-Black trees (initial contribution by Andrew Appel). - MSetAVL: for maximal sharing with the new MSetRBT, the argument order of Node has changed (this should be transparent to regular MSets users). Module System - The names of modules (and module types) are now in a fully separated namespace from ordinary definitions: "Definition E:=0. Module E. End E." is now accepted. CoqIDE - Coqide now supports the "Restart" command, and "Undo" (with a warning). Better support for "Abort". Changes from V8.3 to V8.4beta ============================= Logic - Standard eta-conversion now supported (dependent product only). - Guard condition improvement: subterm property is propagated through beta-redex blocked by pattern-matching, as in "(match v with C .. => fun x => u end) x"; this allows for instance to use "rewrite ... in ..." without breaking the guard condition. Specification language and notations - Maximal implicit arguments can now be set locally by { }. The registration traverses fixpoints and lambdas. Because there is conversion in types, maximal implicit arguments are not taken into account in partial applications (use eta expanded form with explicit { } instead). - Added support for recursive notations with binders (allows for instance to write "exists x y z, P"). - Structure/Record printing can be disable by "Unset Printing Records". In addition, it can be controlled on type by type basis using "Add Printing Record" or "Add Printing Constructor". - Pattern-matching compilation algorithm: in "match x, y with ... end", possible dependencies of x (or of the indices of its type) in the type of y are now taken into account. Tactics - New proof engine. - Scripts can now be structured thanks to bullets - * + and to subgoal delimitation via { }. Note: for use with Proof General, a cvs version of Proof General no older than mid-July 2011 is currently required. - Support for tactical "info" is suspended. - Support for command "Show Script" is suspended. - New tactics constr_eq, is_evar and has_evar for use in Ltac. - Removed the two-argument variant of "decide equality". - New experimental tactical "timeout ". Since is a time in second for the moment, this feature should rather be avoided in scripts meant to be machine-independent. - Fix in "destruct": removal of unexpected local definitions in context might result in some rare incompatibilities (solvable by adapting name hypotheses). - Introduction pattern "_" made more robust. - Tactic (and Eval command) vm_compute can now be interrupted via Ctrl-C. - Unification in "apply" supports unification of patterns of the form ?f x y = g(x,y) (compatibility ensured by using "Unset Tactic Pattern Unification"). It also supports (full) betaiota. - Tactic autorewrite does no longer instantiate pre-existing existential variables (theoretical source of possible incompatibilities). - Tactic "dependent rewrite" now supports equality in "sig". - Tactic omega now understands Zpred (wish #1912) and can prove any goal from a context containing an arithmetical contradiction (wish #2236). - Using "auto with nocore" disables the use of the "core" database (wish #2188). This pseudo-database "nocore" can also be used with trivial and eauto. - Tactics "set", "destruct" and "induction" accepts incomplete terms and use the goal to complete the pattern assuming it is non ambiguous. - When used on arguments with a dependent type, tactics such as "destruct", "induction", "case", "elim", etc. now try to abstract automatically the dependencies over the arguments of the types (based on initial ideas from Chung-Kil Hur, extension to nested dependencies suggested by Dan Grayson) - Tactic "injection" now failing on an equality showing no constructors while it was formerly generalizing again the goal over the given equality. - In Ltac, the "context [...]" syntax has now a variant "appcontext [...]" allowing to match partial applications in larger applications. - When applying destruct or inversion on a fixpoint hiding an inductive type, recursive calls to the fixpoint now remain folded by default (rare source of incompatibility generally solvable by adding a call to simpl). - In an ltac pattern containing a "match", a final "| _ => _" branch could be used now instead of enumerating all remaining constructors. Moreover, the pattern "match _ with _ => _ end" now allows to match any "match". A "in" annotation can also be added to restrict to a precise inductive type. - The behavior of "simpl" can be tuned using the "Arguments" vernacular. In particular constants can be marked so that they are always/never unfolded by "simpl", or unfolded only when a set of arguments evaluates to a constructor. Last one can mark a constant so that it is unfolded only if the simplified term does not expose a match in head position. Vernacular commands - It is now mandatory to have a space (or tabulation or newline or end-of-file) after a "." ending a sentence. - In SearchAbout, the [ ] delimiters are now optional. - New command "Add/Remove Search Blacklist ...": a Search or SearchAbout or similar query will never mention lemmas whose qualified names contain any of the declared substrings. The default blacklisted substrings are "_admitted" "_subproof" "Private_". - When the output file of "Print Universes" ends in ".dot" or ".gv", the universe graph is printed in the DOT language, and can be processed by Graphviz tools. - New command "Print Sorted Universes". - The undocumented and obsolete option "Set/Unset Boxed Definitions" has been removed, as well as syntaxes like "Boxed Fixpoint foo". - A new option "Set Default Timeout n / Unset Default Timeout". - Qed now uses information from the reduction tactics used in proof script to avoid conversion at Qed time to go into a very long computation. - New command "Show Goal ident" to display the statement of a goal, even a closed one (available from Proof General). - Command "Proof" accept a new modifier "using" to force generalization over a given list of section variables at section ending. - New command "Arguments" generalizing "Implicit Arguments" and "Arguments Scope" and that also allows to rename the parameters of a definition and to tune the behavior of the tactic "simpl". Module System - During subtyping checks, an opaque constant in a module type could now be implemented by anything of the right type, even if bodies differ. Said otherwise, with respect to subtyping, an opaque constant behaves just as a parameter. Coqchk was already implementing this, but not coqtop. - The inlining done during application of functors can now be controlled more precisely, by the annotations (no inline) or (inline at level XX). With the latter annotation, only functor parameters whose levels are lower or equal than XX will be inlined. The level of a parameter can be fixed by "Parameter Inline(30) foo". When levels aren't given, the default value is 100. One can also use the flag "Set Inline Level ..." to set a level. - Print Assumptions should now handle correctly opaque modules (#2168). - Print Module (Type) now tries to print more details, such as types and bodies of the module elements. Note that Print Module Type could be used on a module to display only its interface. The option "Set Short Module Printing" could be used to switch back to the earlier behavior were only field names were displayed. Libraries - Extension of the abstract part of Numbers, which now provide axiomatizations and results about many more integer functions, such as pow, gcd, lcm, sqrt, log2 and bitwise functions. These functions are implemented for nat, N, BigN, Z, BigZ. See in particular file NPeano for new functions about nat. - The definition of types positive, N, Z is now in file BinNums.v - Major reorganization of ZArith. The initial file ZArith/BinInt.v now contains an internal module Z implementing the Numbers interface for integers. This module Z regroups: * all functions over type Z : Z.add, Z.mul, ... * the minimal proofs of specifications for these functions : Z.add_0_l, ... * an instantation of all derived properties proved generically in Numbers : Z.add_comm, Z.add_assoc, ... A large part of ZArith is now simply compatibility notations, for instance Zplus_comm is an alias for Z.add_comm. The direct use of module Z is now recommended instead of relying on these compatibility notations. - Similar major reorganization of NArith, via a module N in NArith/BinNat.v - Concerning the positive datatype, BinPos.v is now in a specific directory PArith, and contains an internal submodule Pos. We regroup there functions such as Pos.add Pos.mul etc as well as many results about them. These results are here proved directly (no Number interface for strictly positive numbers). - Note that in spite of the compatibility layers, all these reorganizations may induce some marginal incompatibilies in scripts. In particular: * the "?=" notation for positive now refers to a binary function Pos.compare, instead of the infamous ternary Pcompare (now Pos.compare_cont). * some hypothesis names generated by the system may changed (typically for a "destruct Z_le_gt_dec") since naming is done after the short name of the head predicate (here now "le" in module Z instead of "Zle", etc). * the internals of Z.add has changed, now relying of Z.pos_sub. - Also note these new notations: * "= XP SP1. - The communication between CoqIDE and Coqtop is now done via a dialect of XML. - The backtrack engine of CoqIDE has been reworked, it now uses the "Backtrack" command similarly to Proof General. - The Coqide parsing of sentences has be reworked and now supports tactic delimitation via { }. - Coqide now accepts the Abort command (wish #2357). - Coqide can read coq_makefile files as "project file" and use it to set automatically options to send to coqtop. - Preference files have moved to $XDG_CONFIG_HOME/coq and accelerators are not stored as a list anymore. Tools - Coq now searches directories specified in COQPATH, $XDG_DATA_HOME/coq, $XDG_DATA_DIRS/coq, and user-contribs before the standard library. - Coq rc file has moved to $XDG_CONFIG_HOME/coq. - Major changes to coq_makefile: * mli/mlpack/mllib taken into account, ml not preproccessed anymore, ml4 work; * mlihtml generates doc of mli, install-doc install the html doc in DOCDIR with the same policy as vo in COQLIB; * More variables are given by coqtop -config, others are defined only if the users doesn't have defined them elsewhere. Consequently, generated makefile should work directly on any architecture; * Packagers can take advantage of $(DSTROOT) introduction. Installation can be made in $XDG_DATA_HOME/coq; * -arg option allows to send option as argument to coqc. Changes from V8.2 to V8.3 ========================= Rewriting tactics - Tactic "rewrite" now supports rewriting on ad hoc equalities such as eq_true. - "Hint Rewrite" now checks that the lemma looks like an equation. - New tactic "etransitivity". - Support for heterogeneous equality (JMeq) in "injection" and "discriminate". - Tactic "subst" now supports heterogeneous equality and equality proofs that are dependent (use "simple subst" for preserving compatibility). - Added support for Leibniz-rewriting of dependent hypotheses. - Renamed "Morphism" into "Proper" and "respect" into "proper_prf" (possible source of incompatibility). A partial fix is to define "Notation Morphism R f := (Proper (R%signature) f)." - New tactic variants "rewrite* by" and "autorewrite*" that rewrite respectively the first and all matches whose side-conditions are solved. - "Require Import Setoid" does not export all of "Morphisms" and "RelationClasses" anymore (possible source of incompatibility, fixed by importing "Morphisms" too). - Support added for using Chung-Kil Hur's Heq library for rewriting over heterogeneous equality (courtesy of the library's author). - Tactic "replace" supports matching terms with holes. Automation tactics - Tactic "intuition" now preserves inner "iff" and "not" (exceptional source of incompatibilities solvable by redefining "intuition" as "unfold iff, not in *; intuition", or, for iff only, by using "Set Intuition Iff Unfolding".) - Tactic "tauto" now proves classical tautologies as soon as classical logic (i.e. library Classical_Prop or Classical) is loaded. - Tactic "gappa" has been removed from the Dp plugin. - Tactic "firstorder" now supports the combination of its "using" and "with" options. - New "Hint Resolve ->" (or "<-") for declaring iff's as oriented hints (wish #2104). - An inductive type as argument of the "using" option of "auto/eauto/firstorder" is interpreted as using the collection of its constructors. - New decision tactic "nsatz" to prove polynomial equations by computation of Groebner bases. Other tactics - Tactic "discriminate" now performs intros before trying to discriminate an hypothesis of the goal (previously it applied intro only if the goal had the form t1<>t2) (exceptional source of incompatibilities - former behavior can be obtained by "Unset Discriminate Introduction"). - Tactic "quote" now supports quotation of arbitrary terms (not just the goal). - Tactic "idtac" now displays its "list" arguments. - New introduction patterns "*" for introducing the next block of dependent variables and "**" for introducing all quantified variables and hypotheses. - Pattern Unification for existential variables activated in tactics and new option "Unset Tactic Evars Pattern Unification" to deactivate it. - Resolution of canonical structure is now part of the tactic's unification algorithm. - New tactic "decide lemma with hyp" for rewriting decidability lemmas when one knows which side is true. - Improved support of dependent goals over objects in dependent types for "destruct" (rare source of incompatibility that can be avoided by unsetting option "Dependent Propositions Elimination"). - Tactic "exists", "eexists", "destruct" and "edestruct" supports iteration using comma-separated arguments. - Tactic names "case" and "elim" now support clauses "as" and "in" and become then synonymous of "destruct" and "induction" respectively. - A new tactic name "exfalso" for the use of 'ex-falso quodlibet' principle. This tactic is simply a shortcut for "elimtype False". - Made quantified hypotheses get the name they would have if introduced in the context (possible but rare source of incompatibilities). - When applying a component of a conjunctive lemma, "apply in" (and sequences of "apply in") now leave the side conditions of the lemmas uniformly after the main goal (possible source of rare incompatibilities). - In "simpl c" and "change c with d", c can be a pattern. - Tactic "revert" now preserves let-in's making it the exact inverse of "intro". - New tactics "clear dependent H" and "revert dependent H" that clears (resp. reverts) H and all the hypotheses that depend on H. - Ltac's pattern-matching now supports matching metavariables that depend on variables bound upwards in the pattern. Tactic definitions - Ltac definitions support Local option for non-export outside modules. - Support for parsing non-empty lists with separators in tactic notations. - New command "Locate Ltac" to get the full name of an Ltac definition. Notations - Record syntax "{|x=...; y=...|}" now works inside patterns too. - Abbreviations from non-imported module now invisible at printing time. - Abbreviations now use implicit arguments and arguments scopes for printing. - Abbreviations to pure names now strictly behave like the name they refer to (make redirections of qualified names easier). - Abbreviations for applied constant now propagate the implicit arguments and arguments scope of the underlying reference (possible source of incompatibilities generally solvable by changing such abbreviations from e.g. "Notation foo' := (foo x)" to "Notation foo' y := (foo x (y:=y))"). - The "where" clause now supports multiple notations per defined object. - Recursive notations automatically expand one step on the left for better factorization; recursion notations inner separators now ensured being tokens. - Added "Reserved Infix" as a specific shortcut of the corresponding "Reserved Notation". - Open/Close Scope command supports Global option in sections. Specification language - New support for local binders in the syntax of Record/Structure fields. - Fixpoint/CoFixpoint now support building part or all of bodies using tactics. - Binders given before ":" in lemmas and in definitions built by tactics are now automatically introduced (possible source of incompatibility that can be resolved by invoking "Unset Automatic Introduction"). - New support for multiple implicit arguments signatures per reference. Module system - Include Type is now deprecated since Include now accept both modules and module types. - Declare ML Module supports Local option. - The sharing between non-logical object and the management of the name-space has been improved by the new "Delta-equivalence" on qualified name. - The include operator has been extended to high-order structures - Sequences of Include can be abbreviated via new syntax "<+". - A module (or module type) can be given several "<:" signatures. - Interactive proofs are now permitted in module type. Functors can hence be declared as Module Type and be used later to type themselves. - A functor application can be prefixed by a "!" to make it ignore any "Inline" annotation in the type of its argument(s) (for examples of use of the new features, see libraries Structures and Numbers). - Coercions are now active only when modules are imported (use "Set Automatic Coercions Import" to get the behavior of the previous versions of Coq). Extraction - When using (Recursive) Extraction Library, the filenames are directly the Coq ones with new appropriate extensions : we do not force anymore uncapital first letters for Ocaml and capital ones for Haskell. - The extraction now tries harder to avoid code transformations that can be dangerous for the complexity. In particular many eta-expansions at the top of functions body are now avoided, clever partial applications will likely be preserved, let-ins are almost always kept, etc. - In the same spirit, auto-inlining is now disabled by default, except for induction principles, since this feature was producing more frequently weird code than clear gain. The previous behavior can be restored via "Set Extraction AutoInline". - Unicode characters in identifiers are now transformed into ascii strings that are legal in Ocaml and other languages. - Harsh support of module extraction to Haskell and Scheme: module hierarchy is flattened, module abbreviations and functor applications are expanded, module types and unapplied functors are discarded. - Less unsupported situations when extracting modules to Ocaml. In particular module parameters might be alpha-renamed if a name clash is detected. - Extract Inductive is now possible toward non-inductive types (e.g. nat => int) - Extraction Implicit: this new experimental command allows to mark some arguments of a function or constructor for removed during extraction, even if these arguments don't fit the usual elimination principles of extraction, for instance the length n of a vector. - Files ExtrOcaml*.v in plugins/extraction try to provide a library of common extraction commands: mapping of basics types toward Ocaml's counterparts, conversions from/to int and big_int, or even complete mapping of nat,Z,N to int or big_int, or mapping of ascii to char and string to char list (in this case recognition of ascii constants is hard-wired in the extraction). Program - Streamlined definitions using well-founded recursion and measures so that they can work on any subset of the arguments directly (uses currying). - Try to automatically clear structural fixpoint prototypes in obligations to avoid issues with opacity. - Use return type clause inference in pattern-matching as in the standard typing algorithm. - Support [Local Obligation Tactic] and [Next Obligation with tactic]. - Use [Show Obligation Tactic] to print the current default tactic. - [fst] and [snd] have maximal implicit arguments in Program now (possible source of incompatibility). Type classes - Declaring axiomatic type class instances in Module Type should be now done via new command "Declare Instance", while the syntax "Instance" now always provides a concrete instance, both in and out of Module Type. - Use [Existing Class foo] to declare foo as a class a posteriori. [foo] can be an inductive type or a constant definition. No projections or instances are defined. - Various bug fixes and improvements: support for defined fields, anonymous instances, declarations giving terms, better handling of sections and [Context]. Vernacular commands - New command "Timeout ." interprets a command and a timeout interrupts the interpretation after seconds. - New command "Compute ." is a shortcut for "Eval vm_compute in ". - New command "Fail ." interprets a command and is successful iff the command fails on an error (but not an anomaly). Handy for tests and illustration of wrong commands. - Most commands referring to constant (e.g. Print or About) now support referring to the constant by a notation string. - New option "Boolean Equality Schemes" to make generation of boolean equality automatic for datatypes (together with option "Decidable Equality Schemes", this replaces deprecated option "Equality Scheme"). - Made support for automatic generation of case analysis schemes available to user (governed by option "Set Case Analysis Schemes"). - New command "(Global?) Generalizable [All|No] Variable(s)? ident(s)?" to declare which identifiers are generalizable in `{} and `() binders. - New command "Print Opaque Dependencies" to display opaque constants in addition to all variables, parameters or axioms a theorem or definition relies on. - New command "Declare Reduction := ", allowing to write later "Eval in ...". This command accepts a Local variant. - Syntax of Implicit Type now supports more than one block of variables of a given type. - Command "Canonical Structure" now warns when it has no effects. - Commands of the form "Set X" or "Unset X" now support "Local" and "Global" prefixes. Library - Use "standard" Coq names for the properties of eq and identity (e.g. refl_equal is now eq_refl). Support for compatibility is provided. - The function Compare_dec.nat_compare is now defined directly, instead of relying on lt_eq_lt_dec. The earlier version is still available under the name nat_compare_alt. - Lemmas in library Relations and Reals have been homogenized a bit. - The implicit argument of Logic.eq is now maximally inserted, allowing to simply write "eq" instead of "@eq _" in morphism signatures. - Wrongly named lemmas (Zlt_gt_succ and Zlt_succ_gt) fixed (potential source of incompatibilities) - List library: - Definitions of list, length and app are now in Init/Datatypes. Support for compatibility is provided. - Definition of Permutation is now in Sorting/Permtation.v - Some other light revisions and extensions (possible source of incompatibilities solvable by qualifying names accordingly). - In ListSet, set_map has been fixed (source of incompatibilities if used). - Sorting library: - new mergesort of worst-case complexity O(n*ln(n)) made available in Mergesort.v; - former notion of permutation up to setoid from Permutation.v is deprecated and moved to PermutSetoid.v; - heapsort from Heap.v of worst-case complexity O(n*n) is deprecated; - new file Sorted.v for some definitions of being sorted. - Structure library. This new library is meant to contain generic structures such as types with equalities or orders, either in Module version (for now) or Type Classes (still to do): - DecidableType.v and OrderedType.v: initial notions for FSets/FMaps, left for compatibility but considered as deprecated. - Equalities.v and Orders.v: evolutions of the previous files, with fine-grain Module architecture, many variants, use of Equivalence and other relevant Type Classes notions. - OrdersTac.v: a generic tactic for solving chains of (in)equalities over variables. See {Nat,N,Z,P}OrderedType.v for concrete instances. - GenericMinMax.v: any ordered type can be equipped with min and max. We derived here all the generic properties of these functions. - MSets library: an important evolution of the FSets library. "MSets" stands for Modular (Finite) Sets, by contrast with a forthcoming library of Class (Finite) Sets contributed by S. Lescuyer which will be integrated with the next release of Coq. The main features of MSets are: - The use of Equivalence, Proper and other Type Classes features easing the handling of setoid equalities. - The interfaces are now stated in iff-style. Old specifications are now derived properties. - The compare functions are now pure, and return a "comparison" value. Thanks to the CompSpec inductive type, reasoning on them remains easy. - Sets structures requiring invariants (i.e. sorted lists) are built first as "Raw" sets (pure objects and separate proofs) and attached with their proofs thanks to a generic functor. "Raw" sets have now a proper interface and can be manipulated directly. Note: No Maps yet in MSets. The FSets library is still provided for compatibility, but will probably be considered as deprecated in the next release of Coq. - Numbers library: - The abstract layer (NatInt, Natural/Abstract, Integer/Abstract) has been simplified and enhance thanks to new features of the module system such as Include (see above). It has been extended to Euclidean division (three flavors for integers: Trunc, Floor and Math). - The arbitrary-large efficient numbers (BigN, BigZ, BigQ) has also been reworked. They benefit from the abstract layer improvements (especially for div and mod). Note that some specifications have slightly changed (compare, div, mod, shift{r,l}). Ring/Field should work better (true recognition of constants). Tools - Option -R now supports binding Coq root read-only. - New coqtop/coqc option -beautify to reformat .v files (usable e.g. to globally update notations). - New tool beautify-archive to beautify a full archive of developments. - New coqtop/coqc option -compat X.Y to simulate the general behavior of previous versions of Coq (provides e.g. support for 8.2 compatibility). Coqdoc - List have been revamped. List depth and scope is now determined by an "offside" whitespace rule. - Text may be italicized by placing it in _underscores_. - The "--index " flag changes the filename of the index. - The "--toc-depth " flag limits the depth of headers which are included in the table of contents. - The "--lib-name " flag prints " Foo" instead of "Library Foo" where library titles are called for. The "--no-lib-name" flag eliminates the extra title. - New option "--parse-comments" to allow parsing of regular "(* *)" comments. - New option "--plain-comments" to disable interpretation inside comments. - New option "--interpolate" to try and typeset identifiers in Coq escapings using the available globalization information. - New option "--external url root" to refer to external libraries. - Links to section variables and notations now supported. Internal infrastructure - To avoid confusion with the repository of user's contributions, the subdirectory "contrib" has been renamed into "plugins". On platforms supporting ocaml native dynlink, code located there is built as loadable plugins for coqtop. - An experimental build mechanism via ocamlbuild is provided. From the top of the archive, run ./configure as usual, and then ./build. Feedback about this build mechanism is most welcome. Compiling Coq on platforms such as Windows might be simpler this way, but this remains to be tested. - The Makefile system has been simplified and factorized with the ocamlbuild system. In particular "make" takes advantage of .mllib files for building .cma/.cmxa. The .vo files to compile are now listed in several vo.itarget files. Changes from V8.1 to V8.2 ========================= Language - If a fixpoint is not written with an explicit { struct ... }, then all arguments are tried successively (from left to right) until one is found that satisfies the structural decreasing condition. - New experimental typeclass system giving ad-hoc polymorphism and overloading based on dependent records and implicit arguments. - New syntax "let 'pat := b in c" for let-binding using irrefutable patterns. - New syntax "forall {A}, T" for specifying maximally inserted implicit arguments in terms. - Sort of Record/Structure, Inductive and CoInductive defaults to Type if omitted. - (Co)Inductive types can be defined as records (e.g. "CoInductive stream := { hd : nat; tl : stream }.") - New syntax "Theorem id1:t1 ... with idn:tn" for proving mutually dependent statements. - Support for sort-polymorphism on constants denoting inductive types. - Several evolutions of the module system (handling of module aliases, functorial module types, an Include feature, etc). - Prop now a subtype of Set (predicative and impredicative forms). - Recursive inductive types in Prop with a single constructor of which all arguments are in Prop is now considered to be a singleton type. It consequently supports all eliminations to Prop, Set and Type. As a consequence, Acc_rect has now a more direct proof [possible source of easily fixed incompatibility in case of manual definition of a recursor in a recursive singleton inductive type]. Vernacular commands - Added option Global to "Arguments Scope" for section surviving. - Added option "Unset Elimination Schemes" to deactivate the automatic generation of elimination schemes. - Modification of the Scheme command so you can ask for the name to be automatically computed (e.g. Scheme Induction for nat Sort Set). - New command "Combined Scheme" to build combined mutual induction principles from existing mutual induction principles. - New command "Scheme Equality" to build a decidable (boolean) equality for simple inductive datatypes and a decision property over this equality (e.g. Scheme Equality for nat). - Added option "Set Equality Scheme" to make automatic the declaration of the boolean equality when possible. - Source of universe inconsistencies now printed when option "Set Printing Universes" is activated. - New option "Set Printing Existential Instances" for making the display of existential variable instances explicit. - Support for option "[id1 ... idn]", and "-[id1 ... idn]", for the "compute"/"cbv" reduction strategy, respectively meaning reduce only, or everything but, the constants id1 ... idn. "lazy" alone or followed by "[id1 ... idn]", and "-[id1 ... idn]" also supported, meaning apply all of beta-iota-zeta-delta, possibly restricting delta. - New command "Strategy" to control the expansion of constants during conversion tests. It generalizes commands Opaque and Transparent by introducing a range of levels. Lower levels are assigned to constants that should be expanded first. - New options Global and Local to Opaque and Transparent. - New command "Print Assumptions" to display all variables, parameters or axioms a theorem or definition relies on. - "Add Rec LoadPath" now provides references to libraries using partially qualified names (this holds also for coqtop/coqc option -R). - SearchAbout supports negated search criteria, reference to logical objects by their notation, and more generally search of subterms. - "Declare ML Module" now allows to import .cmxs files when Coq is compiled in native code with a version of OCaml that supports native Dynlink (>= 3.11). - Specific sort constraints on Record now taken into account. - "Print LoadPath" supports a path argument to filter the display. Libraries - Several parts of the libraries are now in Type, in particular FSets, SetoidList, ListSet, Sorting, Zmisc. This may induce a few incompatibilities. In case of trouble while fixing existing development, it may help to simply declare Set as an alias for Type (see file SetIsType). - New arithmetical library in theories/Numbers. It contains: * an abstract modular development of natural and integer arithmetics in Numbers/Natural/Abstract and Numbers/Integer/Abstract * an implementation of efficient computational bounded and unbounded integers that can be mapped to processor native arithmetics. See Numbers/Cyclic/Int31 for 31-bit integers and Numbers/Natural/BigN for unbounded natural numbers and Numbers/Integer/BigZ for unbounded integers. * some proofs that both older libraries Arith, ZArith and NArith and newer BigN and BigZ implement the abstract modular development. This allows in particular BigN and BigZ to already come with a large database of basic lemmas and some generic tactics (ring), This library has still an experimental status, as well as the processor-acceleration mechanism, but both its abstract and its concrete parts are already quite usable and could challenge the use of nat, N and Z in actual developments. Moreover, an extension of this framework to rational numbers is ongoing, and an efficient Q structure is already provided (see Numbers/Rational/BigQ), but this part is currently incomplete (no abstract layer and generic lemmas). - Many changes in FSets/FMaps. In practice, compatibility with earlier version should be fairly good, but some adaptations may be required. * Interfaces of unordered ("weak") and ordered sets have been factorized thanks to new features of Coq modules (in particular Include), see FSetInterface. Same for maps. Hints in these interfaces have been reworked (they are now placed in a "set" database). * To allow full subtyping between weak and ordered sets, a field "eq_dec" has been added to OrderedType. The old version of OrderedType is now called MiniOrderedType and functor MOT_to_OT allow to convert to the new version. The interfaces and implementations of sets now contain also such a "eq_dec" field. * FSetDecide, contributed by Aaron Bohannon, contains a decision procedure allowing to solve basic set-related goals (for instance, is a point in a particular set ?). See FSetProperties for examples. * Functors of properties have been improved, especially the ones about maps, that now propose some induction principles. Some properties of fold need less hypothesis. * More uniformity in implementations of sets and maps: they all use implicit arguments, and no longer export unnecessary scopes (see bug #1347) * Internal parts of the implementations based on AVL have evolved a lot. The main files FSetAVL and FMapAVL are now much more lightweight now. In particular, minor changes in some functions has allowed to fully separate the proofs of operational correctness from the proofs of well-balancing: well-balancing is critical for efficiency, but not anymore for proving that these trees implement our interfaces, hence we have moved these proofs into appendix files FSetFullAVL and FMapFullAVL. Moreover, a few functions like union and compare have been modified in order to be structural yet efficient. The appendix files also contains alternative versions of these few functions, much closer to the initial Ocaml code and written via the Function framework. - Library IntMap, subsumed by FSets/FMaps, has been removed from Coq Standard Library and moved into a user contribution Cachan/IntMap - Better computational behavior of some constants (eq_nat_dec and le_lt_dec more efficient, Z_lt_le_dec and Positive_as_OT.compare transparent, ...) (exceptional source of incompatibilities). - Boolean operators moved from module Bool to module Datatypes (may need to rename qualified references in script and force notations || and && to be at levels 50 and 40 respectively). - The constructors xI and xO of type positive now have postfix notations "~1" and "~0", allowing to write numbers in binary form easily, for instance 6 is 1~1~0 and 4*p is p~0~0 (see BinPos.v). - Improvements to NArith (Nminus, Nmin, Nmax), and to QArith (in particular a better power function). - Changes in ZArith: several additional lemmas (used in theories/Numbers), especially in Zdiv, Znumtheory, Zpower. Moreover, many results in Zdiv have been generalized: the divisor may simply be non-null instead of strictly positive (see lemmas with name ending by "_full"). An alternative file ZOdiv proposes a different behavior (the one of Ocaml) when dividing by negative numbers. - Changes in Arith: EqNat and Wf_nat now exported from Arith, some constructions on nat that were outside Arith are now in (e.g. iter_nat). - In SetoidList, eqlistA now expresses that two lists have similar elements at the same position, while the predicate previously called eqlistA is now equivlistA (this one only states that the lists contain the same elements, nothing more). - Changes in Reals: * Most statement in "sigT" (including the completeness axiom) are now in "sig" (in case of incompatibility, use proj1_sig instead of projT1, sig instead of sigT, etc). * More uniform naming scheme (identifiers in French moved to English, consistent use of 0 -- zero -- instead of O -- letter O --, etc). * Lemma on prod_f_SO is now on prod_f_R0. * Useless hypothesis of ln_exists1 dropped. * New Rlogic.v states a few logical properties about R axioms. * RIneq.v extended and made cleaner. - Slight restructuration of the Logic library regarding choice and classical logic. Addition of files providing intuitionistic axiomatizations of descriptions: Epsilon.v, Description.v and IndefiniteDescription.v. - Definition of pred and minus made compatible with the structural decreasing criterion for use in fixpoints. - Files Relations/Rstar.v and Relations/Newman.v moved out to the user contribution repository (contribution CoC_History). New lemmas about transitive closure added and some bound variables renamed (exceptional risk of incompatibilities). - Syntax for binders in terms (e.g. for "exists") supports anonymous names. Notations, coercions, implicit arguments and type inference - More automation in the inference of the return clause of dependent pattern-matching problems. - Experimental allowance for omission of the clauses easily detectable as impossible in pattern-matching problems. - Improved inference of implicit arguments. - New options "Set Maximal Implicit Insertion", "Set Reversible Pattern Implicit", "Set Strongly Strict Implicit" and "Set Printing Implicit Defensive" for controlling inference and use of implicit arguments. - New modifier in "Implicit Arguments" to force an implicit argument to be maximally inserted. - New modifier of "Implicit Arguments" to enrich the set of implicit arguments. - New options Global and Local to "Implicit Arguments" for section surviving or non export outside module. - Level "constr" moved from 9 to 8. - Structure/Record now printed as Record (unless option Printing All is set). - Support for parametric notations defining constants. - Insertion of coercions below product types refrains to unfold constants (possible source of incompatibility). - New support for fix/cofix in notations. Tactic Language - Second-order pattern-matching now working in Ltac "match" clauses (syntax for second-order unification variable is "@?X"). - Support for matching on let bindings in match context using syntax "H := body" or "H := body : type". - Ltac accepts integer arguments (syntax is "ltac:nnn" for nnn an integer). - The general sequence tactical "expr_0 ; [ expr_1 | ... | expr_n ]" is extended so that at most one expr_i may have the form "expr .." or just "..". Also, n can be different from the number of subgoals generated by expr_0. In this case, the value of expr (or idtac in case of just "..") is applied to the intermediate subgoals to make the number of tactics equal to the number of subgoals. - A name used as the name of the parameter of a lemma (like f in "apply f_equal with (f:=t)") is now interpreted as a ltac variable if such a variable exists (this is a possible source of incompatibility and it can be fixed by renaming the variables of a ltac function into names that do not clash with the lemmas parameter names used in the tactic). - New syntax "Ltac tac ::= ..." to rebind a tactic to a new expression. - "let rec ... in ... " now supported for expressions without explicit parameters; interpretation is lazy to the contrary of "let ... in ..."; hence, the "rec" keyword can be used to turn the argument of a "let ... in ..." into a lazy one. - Patterns for hypotheses types in "match goal" are now interpreted in type_scope. - A bound variable whose name is not used elsewhere now serves as metavariable in "match" and it gets instantiated by an identifier (allow e.g. to extract the name of a statement like "exists x, P x"). - New printing of Ltac call trace for better debugging. Tactics - New tactics "apply -> term", "apply <- term", "apply -> term in ident", "apply <- term in ident" for applying equivalences (iff). - Slight improvement of the hnf and simpl tactics when applied on expressions with explicit occurrences of match or fix. - New tactics "eapply in", "erewrite", "erewrite in". - New tactics "ediscriminate", "einjection", "esimplify_eq". - Tactics "discriminate", "injection", "simplify_eq" now support any term as argument. Clause "with" is also supported. - Unfoldable references can be given by notation's string rather than by name in unfold. - The "with" arguments are now typed using informations from the current goal: allows support for coercions and more inference of implicit arguments. - Application of "f_equal"-style lemmas works better. - Tactics elim, case, destruct and induction now support variants eelim, ecase, edestruct and einduction. - Tactics destruct and induction now support the "with" option and the "in" clause option. If the option "in" is used, an equality is added to remember the term to which the induction or case analysis applied (possible source of parsing incompatibilities when destruct or induction is part of a let-in expression in Ltac; extra parentheses are then required). - New support for "as" clause in tactics "apply in" and "eapply in". - Some new intro patterns: * intro pattern "?A" genererates a fresh name based on A. Caveat about a slight loss of compatibility: Some intro patterns don't need space between them. In particular intros ?a?b used to be legal and equivalent to intros ? a ? b. Now it is still legal but equivalent to intros ?a ?b. * intro pattern "(A & ... & Y & Z)" synonym to "(A,....,(Y,Z)))))" for right-associative constructs like /\ or exists. - Several syntax extensions concerning "rewrite": * "rewrite A,B,C" can be used to rewrite A, then B, then C. These rewrites occur only on the first subgoal: in particular, side-conditions of the "rewrite A" are not concerned by the "rewrite B,C". * "rewrite A by tac" allows to apply tac on all side-conditions generated by the "rewrite A". * "rewrite A at n" allows to select occurrences to rewrite: rewrite only happen at the n-th exact occurrence of the first successful matching of A in the goal. * "rewrite 3 A" or "rewrite 3!A" is equivalent to "rewrite A,A,A". * "rewrite !A" means rewriting A as long as possible (and at least once). * "rewrite 3?A" means rewriting A at most three times. * "rewrite ?A" means rewriting A as long as possible (possibly never). * many of the above extensions can be combined with each other. - Introduction patterns better respect the structure of context in presence of missing or extra names in nested disjunction-conjunction patterns [possible source of rare incompatibilities]. - New syntax "rename a into b, c into d" for "rename a into b; rename c into d" - New tactics "dependent induction/destruction H [ generalizing id_1 .. id_n ]" to do induction-inversion on instantiated inductive families à la BasicElim. - Tactics "apply" and "apply in" now able to reason modulo unfolding of constants (possible source of incompatibility in situations where apply may fail, e.g. as argument of a try or a repeat and in a ltac function); versions that do not unfold are renamed into "simple apply" and "simple apply in" (usable for compatibility or for automation). - Tactics "apply" and "apply in" now able to traverse conjunctions and to select the first matching lemma among the components of the conjunction; tactic "apply" also able to apply lemmas of conclusion an empty type. - Tactic "apply" now supports application of several lemmas in a row. - Tactics "set" and "pose" can set functions using notation "(f x1..xn := c)". - New tactic "instantiate" (without argument). - Tactic firstorder "with" and "using" options have their meaning swapped for consistency with auto/eauto (source of incompatibility). - Tactic "generalize" now supports "at" options to specify occurrences and "as" options to name the quantified hypotheses. - New tactic "specialize H with a" or "specialize (H a)" allows to transform in-place a universally-quantified hypothesis (H : forall x, T x) into its instantiated form (H : T a). Nota: "specialize" was in fact there in earlier versions of Coq, but was undocumented, and had a slightly different behavior. - New tactic "contradict H" can be used to solve any kind of goal as long as the user can provide afterwards a proof of the negation of the hypothesis H. If H is already a negation, say ~T, then a proof of T is asked. If the current goal is a negation, say ~U, then U is saved in H afterwards, hence this new tactic "contradict" extends earlier tactic "swap", which is now obsolete. - Tactics f_equal is now done in ML instead of Ltac: it now works on any equality of functions, regardless of the arity of the function. - New options "before id", "at top", "at bottom" for tactics "move"/"intro". - Some more debug of reflexive omega (romega), and internal clarifications. Moreover, romega now has a variant "romega with *" that can be also used on non-Z goals (nat, N, positive) via a call to a translation tactic named zify (its purpose is to Z-ify your goal...). This zify may also be used independantly of romega. - Tactic "remember" now supports an "in" clause to remember only selected occurrences of a term. - Tactic "pose proof" supports name overwriting in case of specialization of an hypothesis. - Semi-decision tactic "jp" for first-order intuitionistic logic moved to user contributions (subsumed by "firstorder"). Program - Moved useful tactics in theories/Program and documented them. - Add Program.Basics which contains standard definitions for functional programming (id, apply, flip...) - More robust obligation handling, dependent pattern-matching and well-founded definitions. - New syntax " dest term as pat in term " for destructing objects using an irrefutable pattern while keeping equalities (use this instead of "let" in Programs). - Program CoFixpoint is accepted, Program Fixpoint uses the new way to infer which argument decreases structurally. - Program Lemma, Axiom etc... now permit to have obligations in the statement iff they can be automatically solved by the default tactic. - Renamed "Obligations Tactic" command to "Obligation Tactic". - New command "Preterm [ of id ]" to see the actual term fed to Coq for debugging purposes. - New option "Transparent Obligations" to control the declaration of obligations as transparent or opaque. All obligations are now transparent by default, otherwise the system declares them opaque if possible. - Changed the notations "left" and "right" to "in_left" and "in_right" to hide the proofs in standard disjunctions, to avoid breaking existing scripts when importing Program. Also, put them in program_scope. Type Classes - New "Class", "Instance" and "Program Instance" commands to define classes and instances documented in the reference manual. - New binding construct " [ Class_1 param_1 .. param_n, Class_2 ... ] " for binding type classes, usable everywhere. - New command " Print Classes " and " Print Instances some_class " to print tables for typeclasses. - New default eauto hint database "typeclass_instances" used by the default typeclass instance search tactic. - New theories directory "theories/Classes" for standard typeclasses declarations. Module Classes.RelationClasses is a typeclass port of Relation_Definitions plus a generic development of algebra on n-ary heterogeneous predicates. Setoid rewriting - Complete (and still experimental) rewrite of the tactic based on typeclasses. The old interface and semantics are almost entirely respected, except: - Import Setoid is now mandatory to be able to call setoid_replace and declare morphisms. - "-->", "++>" and "==>" are now right associative notations declared at level 55 in scope signature_scope. Their introduction may break existing scripts that defined them as notations with different levels. - One needs to use [Typeclasses unfold [cst]] if [cst] is used as an abbreviation hiding products in types of morphisms, e.g. if ones redefines [relation] and declares morphisms whose type mentions [relation]. - The [setoid_rewrite]'s semantics change when rewriting with a lemma: it can rewrite two different instantiations of the lemma at once. Use [setoid_rewrite H at 1] for (almost) the usual semantics. [setoid_rewrite] will also try to rewrite under binders now, and can succeed on different terms than before. In particular, it will unify under let-bound variables. When called through [rewrite], the semantics are unchanged though. - [Add Morphism term : id] has different semantics when used with parametric morphism: it will try to find a relation on the parameters too. The behavior has also changed with respect to default relations: the most recently declared Setoid/Relation will be used, the documentation explains how to customize this behavior. - Parametric Relation and Morphism are declared differently, using the new [Add Parametric] commands, documented in the manual. - Setoid_Theory is now an alias to Equivalence, scripts building objects of type Setoid_Theory need to unfold (or "red") the definitions of Reflexive, Symmetric and Transitive in order to get the same goals as before. Scripts which introduced variables explicitely will not break. - The order of subgoals when doing [setoid_rewrite] with side-conditions is always the same: first the new goal, then the conditions. - New standard library modules Classes.Morphisms declares standard morphisms on refl/sym/trans relations. Classes.Morphisms_Prop declares morphisms on propositional connectives and Classes.Morphisms_Relations on generalized predicate connectives. Classes.Equivalence declares notations and tactics related to equivalences and Classes.SetoidTactics defines the setoid_replace tactics and some support for the "Add *" interface, notably the tactic applied automatically before each "Add Morphism" proof. - User-defined subrelations are supported, as well as higher-order morphisms and rewriting under binders. The tactic is also extensible entirely in Ltac. The documentation has been updated to cover these features. - [setoid_rewrite] and [rewrite] now support the [at] modifier to select occurrences to rewrite, and both use the [setoid_rewrite] code, even when rewriting with leibniz equality if occurrences are specified. Extraction - Improved behavior of the Caml extraction of modules: name clashes should not happen anymore. - The command Extract Inductive has now a syntax for infix notations. This allows in particular to map Coq lists and pairs onto Caml ones: Extract Inductive list => list [ "[]" "(::)" ]. Extract Inductive prod => "(*)" [ "(,)" ]. - In pattern matchings, a default pattern "| _ -> ..." is now used whenever possible if several branches are identical. For instance, functions corresponding to decidability of equalities are now linear instead of quadratic. - A new instruction Extraction Blacklist id1 .. idn allows to prevent filename conflits with existing code, for instance when extracting module List to Ocaml. CoqIDE - CoqIDE font defaults to monospace so as indentation to be meaningful. - CoqIDE supports nested goals and any other kind of declaration in the middle of a proof. - Undoing non-tactic commands in CoqIDE works faster. - New CoqIDE menu for activating display of various implicit informations. - Added the possibility to choose the location of tabs in coqide: (in Edit->Preferences->Misc) - New Open and Save As dialogs in CoqIDE which filter *.v files. Tools - New stand-alone .vo files verifier "coqchk". - Extended -I coqtop/coqc option to specify a logical dir: "-I dir -as coqdir". - New coqtop/coqc option -exclude-dir to exclude subdirs for option -R. - The binary "parser" has been renamed to "coq-parser". - Improved coqdoc and dump of globalization information to give more meta-information on identifiers. All categories of Coq definitions are supported, which makes typesetting trivial in the generated documentation. Support for hyperlinking and indexing developments in the tex output has been implemented as well. Miscellaneous - Coq installation provides enough files so that Ocaml's extensions need not the Coq sources to be compiled (this assumes O'Caml 3.10 and Camlp5). - New commands "Set Whelp Server" and "Set Whelp Getter" to customize the Whelp search tool. - Syntax of "Test Printing Let ref" and "Test Printing If ref" changed into "Test Printing Let for ref" and "Test Printing If for ref". - An overhauled build system (new Makefiles); see dev/doc/build-system.txt. - Add -browser option to configure script. - Build a shared library for the C part of Coq, and use it by default on non-(Windows or MacOS) systems. Bytecode executables are now pure. The behaviour is configurable with -coqrunbyteflags, -coqtoolsbyteflags and -custom configure options. - Complexity tests can be skipped by setting the environment variable COQTEST_SKIPCOMPLEXITY. Changes from V8.1gamma to V8.1 ============================== Bug fixes - Many bugs have been fixed (cf coq-bugs web page) Tactics - New tactics ring, ring_simplify and new tactic field now able to manage power to a positive integer constant. Tactic ring on Z and R, and field on R manage power (may lead to incompatibilities with V8.1gamma). - Tactic field_simplify now applicable in hypotheses. - New field_simplify_eq for simplifying field equations into ring equations. - Tactics ring, ring_simplify, field, field_simplify and field_simplify_eq all able to apply user-given equations to rewrite monoms on the fly (see documentation). Libraries - New file ConstructiveEpsilon.v defining an epsilon operator and proving the axiom of choice constructively for a countable domain and a decidable predicate. Changes from V8.1beta to V8.1gamma ================================== Syntax - changed parsing precedence of let/in and fun constructions of Ltac: let x := t in e1; e2 is now parsed as let x := t in (e1;e2). Language and commands - Added sort-polymorphism for definitions in Type (but finally abandonned). - Support for implicit arguments in the types of parameters in (co-)fixpoints and (co-)inductive declarations. - Improved type inference: use as much of possible general information. before applying irreversible unification heuristics (allow e.g. to infer the predicate in "(exist _ 0 (refl_equal 0) : {n:nat | n=0 })"). - Support for Miller-Pfenning's patterns unification in type synthesis (e.g. can infer P such that P x y = phi(x,y)). - Support for "where" clause in cofixpoint definitions. - New option "Set Printing Universes" for making Type levels explicit. Tactics - Improved implementation of the ring and field tactics. For compatibility reasons, the previous tactics are renamed as legacy ring and legacy field, but should be considered as deprecated. - New declarative mathematical proof language. - Support for argument lists of arbitrary length in Tactic Notation. - [rewrite ... in H] now fails if [H] is used either in an hypothesis or in the goal. - The semantics of [rewrite ... in *] has been slightly modified (see doc). - Support for "as" clause in tactic injection. - New forward-reasoning tactic "apply in". - Ltac fresh operator now builds names from a concatenation of its arguments. - New ltac tactic "remember" to abstract over a subterm and keep an equality - Support for Miller-Pfenning's patterns unification in apply/rewrite/... (may lead to few incompatibilities - generally now useless tactic calls). Bug fixes - Fix for notations involving basic "match" expressions. - Numerous other bugs solved (a few fixes may lead to incompatibilities). Changes from V8.0 to V8.1beta ============================= Logic - Added sort-polymorphism on inductive families - Allowance for recursively non uniform parameters in inductive types Syntax - No more support for version 7 syntax and for translation to version 8 syntax. - In fixpoints, the { struct ... } annotation is not mandatory any more when only one of the arguments has an inductive type - Added disjunctive patterns in match-with patterns - Support for primitive interpretation of string literals - Extended support for Unicode ranges Vernacular commands - Added "Print Ltac qualid" to print a user defined tactic. - Added "Print Rewrite HintDb" to print the content of a DB used by autorewrite. - Added "Print Canonical Projections". - Added "Example" as synonym of "Definition". - Added "Proposition" and "Corollary" as extra synonyms of "Lemma". - New command "Whelp" to send requests to the Helm database of proofs formalized in the Calculus of Inductive Constructions. - Command "functional induction" has been re-implemented from the new "Function" command. Ltac and tactic syntactic extensions - New primitive "external" for communication with tool external to Coq - New semantics for "match t with": if a clause returns a tactic, it is now applied to the current goal. If it fails, the next clause or next matching subterm is tried (i.e. it behaves as "match goal with" does). The keyword "lazymatch" can be used to delay the evaluation of tactics occurring in matching clauses. - Hint base names can be parametric in auto and trivial. - Occurrence values can be parametric in unfold, pattern, etc. - Added entry constr_may_eval for tactic extensions. - Low-priority term printer made available in ML-written tactic extensions. - "Tactic Notation" extended to allow notations of tacticals. Tactics - New implementation and generalization of [setoid_]* (setoid_rewrite, setoid_symmetry, setoid_transitivity, setoid_reflexivity and autorewite). New syntax for declaring relations and morphisms (old syntax still working with minor modifications, but deprecated). - New implementation (still experimental) of the ring tactic with a built-in notion of coefficients and a better usage of setoids. - New conversion tactic "vm_compute": evaluates the goal (or an hypothesis) with a call-by-value strategy, using the compiled version of terms. - When rewriting H where H is not directly a Coq equality, search first H for a registered setoid equality before starting to reduce in H. This is unlikely to break any script. Should this happen nonetheless, one can insert manually some "unfold ... in H" before rewriting. - Fixed various bugs about (setoid) rewrite ... in ... (in particular #1101) - "rewrite ... in" now accepts a clause as place where to rewrite instead of juste a simple hypothesis name. For instance: rewrite H in H1,H2 |- * means rewrite H in H1; rewrite H in H2; rewrite H rewrite H in * |- will do try rewrite H in Hi for all hypothesis Hi <> H. - Added "dependent rewrite term" and "dependent rewrite term in hyp". - Added "autorewrite with ... in hyp [using ...]". - Tactic "replace" now accepts a "by" tactic clause. - Added "clear - id" to clear all hypotheses except the ones depending in id. - The argument of Declare Left Step and Declare Right Step is now a term (it used to be a reference). - Omega now handles arbitrary precision integers. - Several bug fixes in Reflexive Omega (romega). - Idtac can now be left implicit in a [...|...] construct: for instance, [ foo | | bar ] stands for [ foo | idtac | bar ]. - Fixed a "fold" bug (non critical but possible source of incompatibilities). - Added classical_left and classical_right which transforms |- A \/ B into ~B |- A and ~A |- B respectively. - Added command "Declare Implicit Tactic" to set up a default tactic to be used to solve unresolved subterms of term arguments of tactics. - Better support for coercions to Sortclass in tactics expecting type arguments. - Tactic "assert" now accepts "as" intro patterns and "by" tactic clauses. - New tactic "pose proof" that generalizes "assert (id:=p)" with intro patterns. - New introduction pattern "?" for letting Coq choose a name. - Introduction patterns now support side hypotheses (e.g. intros [|] on "(nat -> nat) -> nat" works). - New introduction patterns "->" and "<-" for immediate rewriting of introduced hypotheses. - Introduction patterns coming after non trivial introduction patterns now force full introduction of the first pattern (e.g. "intros [[|] p]" on "nat->nat->nat" now behaves like "intros [[|?] p]") - Added "eassumption". - Added option 'using lemmas' to auto, trivial and eauto. - Tactic "congruence" is now complete for its intended scope (ground equalities and inequalities with constructors). Furthermore, it tries to equates goal and hypotheses. - New tactic "rtauto" solves pure propositional logic and gives a reflective version of the available proof. - Numbering of "pattern", "unfold", "simpl", ... occurrences in "match with" made consistent with the printing of the return clause after the term to match in the "match-with" construct (use "Set Printing All" to see hidden occurrences). - Generalization of induction "induction x1...xn using scheme" where scheme is an induction principle with complex predicates (like the ones generated by function induction). - Some small Ltac tactics has been added to the standard library (file Tactics.v): * f_equal : instead of using the different f_equalX lemmas * case_eq : a "case" without loss of information. An equality stating the current situation is generated in every sub-cases. * swap : for a negated goal ~B and a negated hypothesis H:~A, swap H asks you to prove A from hypothesis B * revert : revert H is generalize H; clear H. Extraction - All type parts should now disappear instead of sometimes producing _ (for instance in Map.empty). - Haskell extraction: types of functions are now printed, better unsafeCoerce mechanism, both for hugs and ghc. - Scheme extraction improved, see http://www.pps.jussieu.fr/~letouzey/scheme. - Many bug fixes. Modules - Added "Locate Module qualid" to get the full path of a module. - Module/Declare Module syntax made more uniform. - Added syntactic sugar "Declare Module Export/Import" and "Module Export/Import". - Added syntactic sugar "Module M(Export/Import X Y: T)" and "Module Type M(Export/Import X Y: T)" (only for interactive definitions) - Construct "with" generalized to module paths: T with (Definition|Module) M1.M2....Mn.l := l'. Notations - Option "format" aware of recursive notations. - Added insertion of spaces by default in recursive notations w/o separators. - No more automatic printing box in case of user-provided printing "format". - New notation "exists! x:A, P" for unique existence. - Notations for specific numerals now compatible with generic notations of numerals (e.g. "1" can be used to denote the unit of a group without hiding 1%nat) Libraries - New library on String and Ascii characters (contributed by L. Thery). - New library FSets+FMaps of finite sets and maps. - New library QArith on rational numbers. - Small extension of Zmin.V, new Zmax.v, new Zminmax.v. - Reworking and extension of the files on classical logic and description principles (possible incompatibilities) - Few other improvements in ZArith potentially exceptionally breaking the compatibility (useless hypothesys of Zgt_square_simpl and Zlt_square_simpl removed; fixed names mentioning letter O instead of digit 0; weaken premises in Z_lt_induction). - Restructuration of Eqdep_dec.v and Eqdep.v: more lemmas in Type. - Znumtheory now contains a gcd function that can compute within Coq. - More lemmas stated on Type in Wf.v, removal of redundant Acc_iter and Acc_iter2. - Change of the internal names of lemmas in OmegaLemmas. - Acc in Wf.v and clos_refl_trans in Relation_Operators.v now rely on the allowance for recursively non uniform parameters (possible source of incompatibilities: explicit pattern-matching on these types may require to remove the occurrence associated to their recursively non uniform parameter). - Coq.List.In_dec has been set transparent (this may exceptionally break proof scripts, set it locally opaque for compatibility). - More on permutations of lists in List.v and Permutation.v. - List.v has been much expanded. - New file SetoidList.v now contains results about lists seen with respect to a setoid equality. - Library NArith has been expanded, mostly with results coming from Intmap (for instance a bitwise xor), plus also a bridge between N and Bitvector. - Intmap has been reorganized. In particular its address type "addr" is now N. User contributions known to use Intmap have been adapted accordingly. If you're using this library please contact us. A wrapper FMapIntMap now presents Intmap as a particular implementation of FMaps. New developments are strongly encouraged to use either this wrapper or any other implementations of FMap instead of using directly this obsolete Intmap. Tools - New semantics for coqtop options ("-batch" expects option "-top dir" for loading vernac file that contains definitions). - Tool coq_makefile now removes custom targets that are file names in "make clean" - New environment variable COQREMOTEBROWSER to set the command invoked to start the remote browser both in Coq and coqide. Standard syntax: "%s" is the placeholder for the URL. Changes from V8.0beta to V8.0 ============================= Vernacular commands - New option "Set Printing All" to deactivate all high-level forms of printing (implicit arguments, coercions, destructing let, if-then-else, notations, projections) - "Functional Scheme" and "Functional Induction" extended to polymorphic types and dependent types - Notation now allows recursive patterns, hence recovering parts of the fonctionalities of pre-V8 Grammar/Syntax commands - Command "Print." discontinued. - Redundant syntax "Implicit Arguments On/Off" discontinued New syntax - Semantics change of the if-then-else construction in new syntax: "if c then t1 else t2" now stands for "match c with c1 _ ... _ => t1 | c2 _ ... _ => t2 end" with no dependency of t1 and t2 in the arguments of the constructors; this may cause incompatibilities for files translated using coq 8.0beta Interpretation scopes - Delimiting key %bool for bool_scope added - Import no more needed to activate argument scopes from a module Tactics and the tactic Language - Semantics of "assert" is now consistent with the reference manual - New tactics stepl and stepr for chaining transitivity steps - Tactic "replace ... with ... in" added - Intro patterns now supported in Ltac (parsed with prefix "ipattern:") Executables and tools - Added option -top to change the name of the toplevel module "Top" - Coqdoc updated to new syntax and now part of Coq sources - XML exportation tool now exports the structure of vernacular files (cf chapter 13 in the reference manual) User contributions - User contributions have been updated to the new syntax Bug fixes - Many bugs have been fixed (cf coq-bugs web page) Changes from V8.0beta old syntax to V8.0beta ============================================ New concrete syntax - A completely new syntax for terms - A more uniform syntax for tactics and the tactic language - A few syntactic changes for vernacular commands - A smart automatic translator translating V8.0 files in old syntax to files valid for V8.0 Syntax extensions - "Grammar" for terms disappears - "Grammar" for tactics becomes "Tactic Notation" - "Syntax" disappears - Introduction of a notion of interpretation scope allowing to use the same notations in various contexts without using specific delimiters (e.g the same expression "4<=3+x" is interpreted either in "nat", "positive", "N" (previously "entier"), "Z", "R", depending on which interpretation scope is currently open) [see documentation for details] - Notation now mandatorily requires a precedence and associativity (default was to set precedence to 1 and associativity to none) Revision of the standard library - Many lemmas and definitions names have been made more uniform mostly in Arith, NArith, ZArith and Reals (e.g : "times" -> "Pmult", "times_sym" -> "Pmult_comm", "Zle_Zmult_pos_right" -> "Zmult_le_compat_r", "SUPERIEUR" -> "Gt", "ZERO" -> "Z0") - Order and names of arguments of basic lemmas on nat, Z, positive and R have been made uniform. - Notions of Coq initial state are declared with (strict) implicit arguments - eq merged with eqT: old eq disappear, new eq (written =) is old eqT and new eqT is syntactic sugar for new eq (notation == is an alias for = and is written as it, exceptional source of incompatibilities) - Similarly, ex, ex2, all, identity are merged with exT, exT2, allT, identityT - Arithmetical notations for nat, positive, N, Z, R, without needing any backquote or double-backquotes delimiters. - In Lists: new concrete notations; argument of nil is now implicit - All changes in the library are taken in charge by the translator Semantical changes during translation - Recursive keyword set by default (and no longer needed) in Tactic Definition - Set Implicit Arguments is strict by default in new syntax - reductions in hypotheses of the form "... in H" now apply to the type also if H is a local definition - etc Gallina - New syntax of the form "Inductive bool : Set := true, false : bool." for enumerated types - Experimental syntax of the form p.(fst) for record projections (activable with option "Set Printing Projections" which is recognized by the translator) Known problems of the automatic translation - iso-latin-1 characters are no longer supported: move your files to 7-bits ASCII or unicode before translation (swith to unicode is automatically done if a file is loaded and saved again by coqide) - Renaming in ZArith: incompatibilities in Coq user contribs due to merging names INZ, from Reals, and inject_nat. - Renaming and new lemmas in ZArith: may clash with names used by users - Restructuration of ZArith: replace requirement of specific modules in ZArith by "Require Import ZArith_base" or "Require Import ZArith" - Some implicit arguments must be made explicit before translation: typically for "length nil", the implicit argument of length must be made explicit - Grammar rules, Infix notations and V7.4 Notations must be updated wrt the new scheme for syntactic extensions (see translator documentation) - Unsafe for annotation Cases when constructors coercions are used or when annotations are eta-reduced predicates Changes from V7.4 to V8.0beta old syntax ======================================== Logic - Set now predicative by default - New option -impredicative-set to set Set impredicative - The standard library doesn't need impredicativity of Set and is compatible with the classical axioms which contradict Set impredicativity Syntax for arithmetic - Notation "=" and "<>" in Z and R are no longer implicitly in Z or R (with possible introduction of a coercion), use ...=... or ...<>... instead - Locate applied to a simple string (e.g. "+") searches for all notations containing this string Vernacular commands - "Declare ML Module" now allows to import .cma files. This avoids to use a bunch of "Declare ML Module" statements when using several ML files. - "Set Printing Width n" added, allows to change the size of width printing. - "Implicit Variables Type x,y:t" (new syntax: "Implicit Types x y:t") assigns default types for binding variables. - Declarations of Hints and Notation now accept a "Local" flag not to be exported outside the current file even if not in section - "Print Scopes" prints all notations - New command "About name" for light printing of type, implicit arguments, etc. - New command "Admitted" to declare incompletely proven statement as axioms - New keyword "Conjecture" to declare an axiom intended to be provable - SearchAbout can now search for lemmas referring to more than one constant and on substrings of the name of the lemma - "Print Implicit" displays the implicit arguments of a constant - Locate now searches for all names having a given suffix - New command "Functional Scheme" for building an induction principle from a function defined by case analysis and fix. Commands - new coqtop/coqc option -dont-load-proofs not to load opaque proofs in memory Implicit arguments - Inductive in sections declared with implicits now "discharged" with implicits (like constants and variables) - Implicit Arguments flags are now synchronous with reset - New switch "Unset/Set Printing Implicits" (new syntax: "Unset/Set Printing Implicit") to globally control printing of implicits Grammar extensions - Many newly supported UTF-8 encoded unicode blocks - Greek letters (0380-03FF), Hebrew letters (U05D0-05EF), letter-like symbols (2100-214F, that includes double N,Z,Q,R), prime signs (from 2080-2089) and characters from many written languages are valid in identifiers - mathematical operators (2200-22FF), supplemental mathematical operators (2A00-2AFF), miscellaneous technical (2300-23FF that includes sqrt symbol), miscellaneous symbols (2600-26FF), arrows (2190-21FF and 2900-297F), invisible mathematical operators (from 2080-2089), ... are valid symbols Library - New file about the factorial function in Arith - An additional elimination Acc_iter for Acc, simplier than Acc_rect. This new elimination principle is used for definition well_founded_induction. - New library NArith on binary natural numbers - R is now of type Set - Restructuration in ZArith library - "true_sub" used in Zplus now a definition, not a local one (source of incompatibilities in proof referring to true_sub, may need extra Unfold) - Some lemmas about minus moved from fast_integer to Arith/Minus.v (le_minus, lt_mult_left) (theoretical source of incompatibilities) - Several lemmas moved from auxiliary.v and zarith_aux.v to fast_integer.v (theoretical source of incompatibilities) - Variables names of iff_trans changed (source of incompatibilities) - ZArith lemmas named OMEGA something or fast_ something, and lemma new_var are now out of ZArith (except OMEGA2) - Redundant ZArith lemmas have been renamed: for the following pairs, use the second name (Zle_Zmult_right2, Zle_mult_simpl), (OMEGA2, Zle_0_plus), (Zplus_assoc_l, Zplus_assoc), (Zmult_one, Zmult_1_n), (Zmult_assoc_l, Zmult_assoc), (Zmult_minus_distr, Zmult_Zminus_distr_l) (add_un_double_moins_un_xO, is_double_moins_un), (Rlt_monotony_rev,Rlt_monotony_contra) (source of incompatibilities) - Few minor changes (no more implicit arguments in Zmult_Zminus_distr_l and Zmult_Zminus_distr_r, lemmas moved from Zcomplements to other files) (rare source of incompatibilities) - New lemmas provided by users added Tactic language - Fail tactic now accepts a failure message - Idtac tactic now accepts a message - New primitive tactic "FreshId" (new syntax: "fresh") to generate new names - Debugger prints levels of calls Tactics - Replace can now replace proofs also - Fail levels are now decremented at "Match Context" blocks only and if the right-hand-side of "Match term With" are tactics, these tactics are never evaluated immediately and do not induce backtracking (in contrast with "Match Context") - Quantified names now avoid global names of the current module (like Intro names did) [source of rare incompatibilities: 2 changes in the set of user contribs] - NewDestruct/NewInduction accepts intro patterns as introduction names - NewDestruct/NewInduction now work for non-inductive type using option "using" - A NewInduction naming bug for inductive types with functional arguments (e.g. the accessibility predicate) has been fixed (source of incompatibilities) - Symmetry now applies to hypotheses too - Inversion now accept option "as [ ... ]" to name the hypotheses - Contradiction now looks also for contradictory hypotheses stating ~A and A (source of incompatibility) - "Contradiction c" try to find an hypothesis in context which contradicts the type of c - Ring applies to new library NArith (require file NArithRing) - Field now works on types in Set - Auto with reals now try to replace le by ge (Rge_le is no longer an immediate hint), resulting in shorter proofs - Instantiate now works in hyps (syntax : Instantiate in ...) - Some new tactics : EConstructor, ELeft, Eright, ESplit, EExists - New tactic "functional induction" to perform case analysis and induction following the definition of a function. - Clear now fails when trying to remove a local definition used by a constant appearing in the current goal Extraction (See details in plugins/extraction/CHANGES) - The old commands: (Recursive) Extraction Module M. are now: (Recursive) Extraction Library M. To use these commands, M should come from a library M.v - The other syntax Extraction & Recursive Extraction now accept module names as arguments. Bugs - see coq-bugs server for the complete list of fixed bugs Miscellaneous - Implicit parameters of inductive types definition now taken into account for infering other implicit arguments Incompatibilities - Persistence of true_sub (4 incompatibilities in Coq user contributions) - Variable names of some constants changed for a better uniformity (2 changes in Coq user contributions) - Naming of quantified names in goal now avoid global names (2 occurrences) - NewInduction naming for inductive types with functional arguments (no incompatibility in Coq user contributions) - Contradiction now solve more goals (source of 2 incompatibilities) - Merge of eq and eqT may exceptionally result in subgoals now solved automatically - Redundant pairs of ZArith lemmas may have different names: it may cause "Apply/Rewrite with" to fail if using the first name of a pair of redundant lemmas (this is solved by renaming the variables bound by "with"; 3 incompatibilities in Coq user contribs) - ML programs referring to constants from fast_integer.v must use "Coqlib.gen_constant_modules Coqlib.zarith_base_modules" instead Changes from V7.3.1 to V7.4 =========================== Symbolic notations - Introduction of a notion of scope gathering notations in a consistent set; a notation sets has been developped for nat, Z and R (undocumented) - New command "Notation" for declaring notations simultaneously for parsing and printing (see chap 10 of the reference manual) - Declarations with only implicit arguments now handled (e.g. the argument of nil can be set implicit; use !nil to refer to nil without arguments) - "Print Scope sc" and "Locate ntn" allows to know to what expression a notation is bound - New defensive strategy for printing or not implicit arguments to ensure re-type-checkability of the printed term - In Grammar command, the only predefined non-terminal entries are ident, global, constr and pattern (e.g. nvar, numarg disappears); the only allowed grammar types are constr and pattern; ast and ast list are no longer supported; some incompatibilities in Grammar: when a syntax is a initial segment of an other one, Grammar does not work, use Notation Library - Lemmas in Set from Compare_dec.v (le_lt_dec, ...) and Wf_nat.v (lt_wf_rec, ...) are now transparent. This may be source of incompatibilities. - Syntactic Definitions Fst, Snd, Ex, All, Ex2, AllT, ExT, ExT2, ProjS1, ProjS2, Error, Value and Except are turned to notations. They now must be applied (incompatibilities only in unrealistic cases). - More efficient versions of Zmult and times (30% faster) - Reals: the library is now divided in 6 parts (Rbase, Rfunctions, SeqSeries, Rtrigo, Ranalysis, Integration). New tactics: Sup and RCompute. See Reals.v for details. Modules - Beta version, see doc chap 2.5 for commands and chap 5 for theory Language - Inductive definitions now accept ">" in constructor types to declare the corresponding constructor as a coercion. - Idem for assumptions declarations and constants when the type is mentionned. - The "Coercion" and "Canonical Structure" keywords now accept the same syntax as "Definition", i.e. "hyps :=c (:t)?" or "hyps :t". - Theorem-like declaration now accepts the syntax "Theorem thm [x:t;...] : u". - Remark's and Fact's now definitively behave as Theorem and Lemma: when sections are closed, the full name of a Remark or a Fact has no longer a section part (source of incompatibilities) - Opaque Local's (i.e. built by tactics and ended by Qed), do not survive section closing any longer; as a side-effect, Opaque Local's now appear in the local context of proofs; their body is hidden though (source of incompatibilities); use one of Remark/Fact/Lemma/Theorem instead to simulate the old behaviour of Local (the section part of the name is not kept though) ML tactic and vernacular commands - "Grammar tactic" and "Grammar vernac" of type "ast" are no longer supported (only "Grammar tactic simple_tactic" of type "tactic" remains available). - Concrete syntax for ML written vernacular commands and tactics is now declared at ML level using camlp4 macros TACTIC EXTEND et VERNAC COMMAND EXTEND. - "Check n c" now "n:Check c", "Eval n ..." now "n:Eval ..." - "Proof with T" (* no documentation *) - SearchAbout id - prints all theorems which contain id in their type Tactic definitions - Static globalisation of identifiers and global references (source of incompatibilities, especially, Recursive keyword is required for mutually recursive definitions). - New evaluation semantics: no more partial evaluation at definition time; evaluation of all Tactic/Meta Definition, even producing terms, expect a proof context to be evaluated (especially "()" is no longer needed). - Debugger now shows the nesting level and the reasons of failure Tactics - Equality tactics (Rewrite, Reflexivity, Symmetry, Transitivity) now understand JM equality - Simpl and Change now apply to subterms also - "Simpl f" reduces subterms whose head constant is f - Double Induction now referring to hypotheses like "Intros until" - "Inversion" now applies also on quantified hypotheses (naming as for Intros until) - NewDestruct now accepts terms with missing hypotheses - NewDestruct and NewInduction now accept user-provided elimination scheme - NewDestruct and NewInduction now accept user-provided introduction names - Omega could solve goals such as ~`x=y` but failed when the hypothesis was unfolded to `x < y` -> False. This is fixed. In addition, it can also recognize 'False' in the hypothesis and use it to solve the goal. - Coercions now handled in "with" bindings - "Subst x" replaces all ocurrences of x by t in the goal and hypotheses when an hypothesis x=t or x:=t or t=x exists - Fresh names for Assert and Pose now based on collision-avoiding Intro naming strategy (exceptional source of incompatibilities) - LinearIntuition (* no documentation *) - Unfold expects a correct evaluable argument - Clear expects existing hypotheses Extraction (See details in plugins/extraction/CHANGES and README): - An experimental Scheme extraction is provided. - Concerning Ocaml, extracted code is now ensured to always type-check, thanks to automatic inserting of Obj.magic. - Experimental extraction of Coq new modules to Ocaml modules. Proof rendering in natural language - Export of theories to XML for publishing and rendering purposes now includes proof-trees (see http://www.cs.unibo.it/helm) Miscellaneous - Printing Coercion now used through the standard keywords Set/Add, Test, Print - "Print Term id" is an alias for "Print id" - New switch "Unset/Set Printing Symbols" to control printing of symbolic notations - Two new variants of implicit arguments are available - "Unset/Set Contextual Implicits" tells to consider implicit also the arguments inferable from the context (e.g. for nil or refl_eq) - "Unset/Set Strict Implicits" tells to consider implicit only the arguments that are inferable in any case (i.e. arguments that occurs as argument of rigid constants in the type of the remaining arguments; e.g. the witness of an existential is not strict since it can vanish when applied to a predicate which does not use its argument) Incompatibilities - "Grammar tactic ... : ast" and "Grammar vernac ... : ast" are no longer supported, use TACTIC EXTEND and VERNAC COMMAND EXTEND on the ML-side instead - Transparency of le_lt_dec and co (leads to some simplification in proofs; in some cases, incompatibilites is solved by declaring locally opaque the relevant constant) - Opaque Local do not now survive section closing (rename them into Remark/Lemma/... to get them still surviving the sections; this renaming allows also to solve incompatibilites related to now forbidden calls to the tactic Clear) - Remark and Fact have no longer (very) long names (use Local instead in case of name conflict) Bugs - Improved localisation of errors in Syntactic Definitions - Induction principle creation failure in presence of let-in fixed (#238) - Inversion bugs fixed (#212 and #220) - Omega bug related to Set fixed (#180) - Type-checking inefficiency of nested destructuring let-in fixed (#216) - Improved handling of let-in during holes resolution phase (#239) Efficiency - Implementation of a memory sharing strategy reducing memory requirements by an average ratio of 3. Changes from V7.3 to V7.3.1 =========================== Bug fixes - Corrupted Field tactic and Match Context tactic construction fixed - Checking of names already existing in Assert added (PR#182) - Invalid argument bug in Exact tactic solved (PR#183) - Colliding bound names bug fixed (PR#202) - Wrong non-recursivity test for Record fixed (PR#189) - Out of memory/seg fault bug related to parametric inductive fixed (PR#195) - Setoid_replace/Setoid_rewrite bug wrt "==" fixed Misc - Ocaml version >= 3.06 is needed to compile Coq from sources - Simplification of fresh names creation strategy for Assert, Pose and LetTac (PR#192) Changes from V7.2 to V7.3 ========================= Language - Slightly improved compilation of pattern-matching (slight source of incompatibilities) - Record's now accept anonymous fields "_" which does not build projections - Changes in the allowed elimination sorts for certain class of inductive definitions : an inductive definition without constructors of Sort Prop can be eliminated on sorts Set and Type A "singleton" inductive definition (one constructor with arguments in the sort Prop like conjunction of two propositions or equality) can be eliminated directly on sort Type (In V7.2, only the sorts Prop and Set were allowed) Tactics - New tactic "Rename x into y" for renaming hypotheses - New tactics "Pose x:=u" and "Pose u" to add definitions to local context - Pattern now working on partially applied subterms - Ring no longer applies irreversible congruence laws of mult but better applies congruence laws of plus (slight source of incompatibilities). - Field now accepts terms to be simplified as arguments (as for Ring). This extension has been also implemented using the toplevel tactic language. - Intuition does no longer unfold constants except "<->" and "~". It can be parameterized by a tactic. It also can introduce dependent product if needed (source of incompatibilities) - "Match Context" now matching more recent hypotheses first and failing only on user errors and Fail tactic (possible source of incompatibilities) - Tactic Definition's without arguments now allowed in Coq states - Better simplification and discrimination made by Inversion (source of incompatibilities) Bugs - "Intros H" now working like "Intro H" trying first to reduce if not a product - Forward dependencies in Cases now taken into account - Known bugs related to Inversion and let-in's fixed - Bug unexpected Delta with let-in now fixed Extraction (details in plugins/extraction/CHANGES or documentation) - Signatures of extracted terms are now mostly expunged from dummy arguments. - Haskell extraction is now operational (tested & debugged). Standard library - Some additions in [ZArith]: three files (Zcomplements.v, Zpower.v and Zlogarithms.v) moved from plugins/omega in order to be more visible, one Zsgn function, more induction principles (Wf_Z.v and tail of Zcomplements.v), one more general Euclid theorem - Peano_dec.v and Compare_dec.v now part of Arith.v Tools - new option -dump-glob to coqtop to dump globalizations (to be used by the new documentation tool coqdoc; see http://www.lri.fr/~filliatr/coqdoc) User Contributions - CongruenceClosure (congruence closure decision procedure) [Pierre Corbineau, ENS Cachan] - MapleMode (an interface to embed Maple simplification procedures over rational fractions in Coq) [David Delahaye, Micaela Mayero, Chalmers University] - Presburger: A formalization of Presburger's algorithm [Laurent Thery, INRIA Sophia Antipolis] - Chinese has been rewritten using Z from ZArith as datatype ZChinese is the new version, Chinese the obsolete one [Pierre Letouzey, LRI Orsay] Incompatibilities - Ring: exceptional incompatibilities (1 above 650 in submitted user contribs, leading to a simplification) - Intuition: does not unfold any definition except "<->" and "~" - Cases: removal of some extra Cases in configurations of the form "Cases ... of C _ => ... | _ D => ..." (effects on 2 definitions of submitted user contributions necessitating the removal of now superfluous proof steps in 3 different proofs) - Match Context, in case of incompatibilities because of a now non trapped error (e.g. Not_found or Failure), use instead tactic Fail to force Match Context trying the next clause - Inversion: better simplification and discrimination may occasionally lead to less subgoals and/or hypotheses and different naming of hypotheses - Unification done by Apply/Elim has been changed and may exceptionally lead to incompatible instantiations - Peano_dec.v and Compare_dec.v parts of Arith.v make Auto more powerful if these files were not already required (1 occurrence of this in submitted user contribs) Changes from V7.1 to V7.2 ========================= Language - Automatic insertion of patterns for local definitions in the type of the constructors of an inductive types (for compatibility with V6.3 let-in style) - Coercions allowed in Cases patterns - New declaration "Canonical Structure id = t : I" to help resolution of equations of the form (proj ?)=a; if proj(e)=a then a is canonically equipped with the remaining fields in e, i.e. ? is instantiated by e Tactics - New tactic "ClearBody H" to clear the body of definitions in local context - New tactic "Assert H := c" for forward reasoning - Slight improvement in naming strategy for NewInduction/NewDestruct - Intuition/Tauto do not perform useless unfolding and work up to conversion Extraction (details in plugins/extraction/CHANGES or documentation) - Syntax changes: there are no more options inside the extraction commands. New commands for customization and options have been introduced instead. - More optimizations on extracted code. - Extraction tests are now embedded in 14 user contributions. Standard library - In [Relations], Rstar.v and Newman.v now axiom-free. - In [Sets], Integers.v now based on nat - In [Arith], more lemmas in Min.v, new file Max.v, tail-recursive plus and mult added to Plus.v and Mult.v respectively - New directory [Sorting] with a proof of heapsort (dragged from 6.3.1 lib) - In [Reals], more lemmas in Rbase.v, new lemmas on square, square root and trigonometric functions (R_sqr.v - Rtrigo.v); a complementary approach and new theorems about continuity and derivability in Ranalysis.v; some properties in plane geometry such as translation, rotation or similarity in Rgeom.v; finite sums and Chasles property in Rsigma.v Bugs - Confusion between implicit args of locals and globals of same base name fixed - Various incompatibilities wrt inference of "?" in V6.3.1 fixed - Implicits in infix section variables bug fixed - Known coercions bugs fixed - Apply "universe anomaly" bug fixed - NatRing now working - "Discriminate 1", "Injection 1", "Simplify_eq 1" now working - NewInduction bugs with let-in and recursively dependent hypotheses fixed - Syntax [x:=t:T]u now allowed as mentioned in documentation - Bug with recursive inductive types involving let-in fixed - Known pattern-matching bugs fixed - Known Cases elimination predicate bugs fixed - Improved errors messages for pattern-matching and projections - Better error messages for ill-typed Cases expressions Incompatibilities - New naming strategy for NewInduction/NewDestruct may affect 7.1 compatibility - Extra parentheses may exceptionally be needed in tactic definitions. - Coq extensions written in Ocaml need to be updated (see dev/changements.txt for a description of the main changes in the interface files of V7.2) - New behaviour of Intuition/Tauto may exceptionally lead to incompatibilities ---------------------------------------------------------------------------- Changes from V6.3.1 and V7.0 to V7.1 ==================================== Notes: - items followed by (**) are important sources of incompatibilities - items followed by (*) may exceptionally be sources of incompatibilities - items followed by (+) have been introduced in version 7.0 Main novelties ============== References are to Coq V7.1 reference manual - New primitive let-in construct (see sections 1.2.8 and ) - Long names (see sections 2.6 and 2.7) - New high-level tactic language (see chapter 10) - Improved search facilities (see section 5.2) - New extraction algorithm managing the Type level (see chapter 17) - New rewriting tactic for arbitrary equalities (see chapter 19) - New tactic Field to decide equalities on commutative fields (see 7.11) - New tactic Fourier to solve linear inequalities on reals numbers (see 7.11) - New tactics for induction/case analysis in "natural" style (see 7.7) - Deep restructuration of the code (safer, simpler and more efficient) - Export of theories to XML for publishing and rendering purposes (see http://www.cs.unibo.it/helm) Details of changes ================== Language: new "let-in" construction ----------------------------------- - New construction for local definitions (let-in) with syntax [x:=u]t (*)(+) - Local definitions allowed in Record (a.k.a. record à la Randy Pollack) Language: long names -------------------- - Each construction has a unique absolute names built from a base name, the name of the module in which they are defined (Top if in coqtop), and possibly an arbitrary long sequence of directory (e.g. "Coq.Lists.PolyList.flat_map" where "Coq" means that "flat_map" is part of Coq standard library, "Lists" means it is defined in the Lists library and "PolyList" means it is in the file Polylist) (+) - Constructions can be referred by their base name, or, in case of conflict, by a "qualified" name, where the base name is prefixed by the module name (and possibly by a directory name, and so on). A fully qualified name is an absolute name which always refer to the construction it denotes (to preserve the visibility of all constructions, no conflict is allowed for an absolute name) (+) - Long names are available for modules with the possibility of using the directory name as a component of the module full name (with option -R to coqtop and coqc, or command Add LoadPath) (+) - Improved conflict resolution strategy (the Unix PATH model), allowing more constructions to be referred just by their base name Language: miscellaneous ----------------------- - The names of variables for Record projections _and_ for induction principles (e.g. sum_ind) is now based on the first letter of their type (main source of incompatibility) (**)(+) - Most typing errors have now a precise location in the source (+) - Slightly different mechanism to solve "?" (*)(+) - More arguments may be considered implicit at section closing (*)(+) - Bug with identifiers ended by a number greater than 2^30 fixed (+) - New visibility discipline for Remark, Fact and Local: Remark's and Fact's now survive at the end of section, but are only accessible using a qualified names as soon as their strength expires; Local's disappear and are moved into local definitions for each construction persistent at section closing Language: Cases --------------- - Cases no longer considers aliases inferable from dependencies in types (*)(+) - A redundant clause in Cases is now an error (*) Reduction --------- - New reduction flags "Zeta" and "Evar" in Eval Compute, for inlining of local definitions and instantiation of existential variables - Delta reduction flag does not perform Zeta and Evar reduction any more (*) - Constants declared as opaque (using Qed) can no longer become transparent (a constant intended to be alternatively opaque and transparent must be declared as transparent (using Defined)); a risk exists (until next Coq version) that Simpl and Hnf reduces opaque constants (*) New tactics ----------- - New set of tactics to deal with types equipped with specific equalities (a.k.a. Setoids, e.g. nat equipped with eq_nat) [by C. Renard] - New tactic Assert, similar to Cut but expected to be more user-friendly - New tactic NewDestruct and NewInduction intended to replace Elim and Induction, Case and Destruct in a more user-friendly way (see restrictions in the reference manual) - New tactic ROmega: an experimental alternative (based on reflexion) to Omega [by P. Crégut] - New tactic language Ltac (see reference manual) (+) - New versions of Tauto and Intuition, fully rewritten in the new Ltac language; they run faster and produce more compact proofs; Tauto is fully compatible but, in exchange of a better uniformity, Intuition is slightly weaker (then use Tauto instead) (**)(+) - New tactic Field to decide equalities on commutative fields (as a special case, it works on real numbers) (+) - New tactic Fourier to solve linear inequalities on reals numbers [by L. Pottier] (+) - New tactics dedicated to real numbers: DiscrR, SplitRmult, SplitAbsolu (+) Changes in existing tactics --------------------------- - Reduction tactics in local definitions apply only to the body - New syntax of the form "Compute in Type of H." to require a reduction on the types of local definitions - Inversion, Injection, Discriminate, ... apply also on the quantified premises of a goal (using the "Intros until" syntax) - Decompose has been fixed but hypotheses may get different names (*)(+) - Tauto now manages uniformly hypotheses and conclusions of the form "t=t" which all are considered equivalent to "True". Especially, Tauto now solves goals of the form "H : ~ t = t |- A". - The "Let" tactic has been renamed "LetTac" and is now based on the primitive "let-in" (+) - Elim can no longer be used with an elimination schema different from the one defined at definition time of the inductive type. To overload an elimination schema, use "Elim using " (*)(+) - Simpl no longer unfolds the recursive calls of a mutually defined fixpoint (*)(+) - Intro now fails if the hypothesis name already exists (*)(+) - "Require Prolog" is no longer needed (i.e. it is available by default) (*)(+) - Unfold now fails on a non unfoldable identifier (*)(+) - Unfold also applies on definitions of the local context - AutoRewrite now deals only with the main goal and it is the purpose of Hint Rewrite to deal with generated subgoals (+) - Redundant or incompatible instantiations in Apply ... with ... are now correctly managed (+) Efficiency ---------- - Excessive memory uses specific to V7.0 fixed - Sizes of .vo files vary a lot compared to V6.3 (from -30% to +300% depending on the developments) - An improved reduction strategy for lazy evaluation - A more economical mechanism to ensure logical consistency at the Type level; warning: this is experimental and may produce "universes" anomalies (please report) Concrete syntax of constructions -------------------------------- - Only identifiers starting with "_" or a letter, and followed by letters, digits, "_" or "'" are allowed (e.g. "$" and "@" are no longer allowed) (*) - A multiple binder like (a:A)(a,b:(P a))(Q a) is no longer parsed as (a:A)(a0:(P a))(b:(P a))(Q a0) but as (a:A)(a0:(P a))(b:(P a0))(Q a0) (*)(+) - A dedicated syntax has been introduced for Reals (e.g ``3+1/x``) (+) - Pretty-printing of Infix notations fixed. (+) Parsing and grammar extension ----------------------------- - More constraints when writing ast - "{...}" and the macros $LIST, $VAR, etc. now expect a metavariable (an identifier starting with $) (*) - identifiers should starts with a letter or "_" and be followed by letters, digits, "_" or "'" (other characters are still supported but it is not advised to use them) (*)(+) - Entry "command" in "Grammar" and quotations (<<...>> stuff) is renamed "constr" as in "Syntax" (+) - New syntax "[" sentence_1 ... sentence_n"]." to group sentences (useful for Time and to write grammar rules abbreviating several commands) (+) - The default parser for actions in the grammar rules (and for patterns in the pretty-printing rules) is now the one associated to the grammar (i.e. vernac, tactic or constr); no need then for quotations as in <:vernac:<...>>; to return an "ast", the grammar must be explicitly typed with tag ": ast" or ": ast list", or if a syntax rule, by using <<...>> in the patterns (expression inside these angle brackets are parsed as "ast"); for grammars other than vernac, tactic or constr, you may explicitly type the action with tags ": constr", ": tactic", or ":vernac" (**)(+) - Interpretation of names in Grammar rule is now based on long names, which allows to avoid problems (or sometimes tricks;) related to overloaded names (+) New commands ------------ - New commands "Print XML All", "Show XML Proof", ... to show or export theories to XML to be used with Helm's publishing and rendering tools (see http://www.cs.unibo.it/helm) (by Claudio Sacerdoti Coen) (+) - New commands to manually set implicit arguments (+) - "Implicits ident." to activate the implicit arguments mode just for ident - "Implicits ident [num1 num2 ...]." to explicitly give which arguments have to be considered as implicit - New SearchPattern/SearchRewrite (by Yves Bertot) (+) - New commands "Debug on"/"Debug off" to activate/deactivate the tactic language debugger (+) - New commands to map physical paths to logical paths (+) - Add LoadPath physical_dir as logical_dir - Add Rec LoadPath physical_dir as logical_dir Changes in existing commands ---------------------------- - Generalization of the usage of qualified identifiers in tactics and commands about globals, e.g. Decompose, Eval Delta; Hints Unfold, Transparent, Require - Require synchronous with Reset; Require's scope stops at Section ending (*) - For a module indirectly loaded by a "Require" but not exported, the command "Import module" turns the constructions defined in the module accessible by their short name, and activates the Grammar, Syntax, Hint, ... declared in the module (+) - The scope of the "Search" command can be restricted to some modules (+) - Final dot in command (full stop/period) must be followed by a blank (newline, tabulation or whitespace) (+) - Slight restriction of the syntax for Cbv Delta: if present, option [-myconst] must immediately follow the Delta keyword (*)(+) - SearchIsos currently not supported - Add ML Path is now implied by Add LoadPath (+) - New names for the following commands (+) AddPath -> Add LoadPath Print LoadPath -> Print LoadPath DelPath -> Remove LoadPath AddRecPath -> Add Rec LoadPath Print Path -> Print Coercion Paths Implicit Arguments On -> Set Implicit Arguments Implicit Arguments Off -> Unset Implicit Arguments Begin Silent -> Set Silent End Silent -> Unset Silent. Tools ----- - coqtop (+) - Two executables: coqtop.byte and coqtop.opt (if supported by the platform) - coqtop is a link to the more efficient executable (coqtop.opt if present) - option -full is obsolete (+) - do_Makefile renamed into coq_makefile (+) - New option -R to coqtop and coqc to map a physical directory to a logical one (+) - coqc no longer needs to create a temporary file - No more warning if no initialization file .coqrc exists Extraction ---------- - New algorithm for extraction able to deal with "Type" (+) (by J.-C. Filliâtre and P. Letouzey) Standard library ---------------- - New library on maps on integers (IntMap, contributed by Jean Goubault) - New lemmas about integer numbers [ZArith] - New lemmas and a "natural" syntax for reals [Reals] (+) - Exc/Error/Value renamed into Option/Some/None (*) New user contributions ---------------------- - Constructive complex analysis and the Fundamental Theorem of Algebra [FTA] (Herman Geuvers, Freek Wiedijk, Jan Zwanenburg, Randy Pollack, Henk Barendregt, Nijmegen) - A new axiomatization of ZFC set theory [Functions_in_ZFC] (C. Simpson, Sophia-Antipolis) - Basic notions of graph theory [GRAPHS-BASICS] (Jean Duprat, Lyon) - A library for floating-point numbers [Float] (Laurent Théry, Sylvie Boldo, Sophia-Antipolis) - Formalisation of CTL and TCTL temporal logic [CtlTctl] (Carlos Daniel Luna,Montevideo) - Specification and verification of the Railroad Crossing Problem in CTL and TCTL [RailroadCrossing] (Carlos Daniel Luna,Montevideo) - P-automaton and the ABR algorithm [PAutomata] (Christine Paulin, Emmanuel Freund, Orsay) - Semantics of a subset of the C language [MiniC] (Eduardo Giménez, Emmanuel Ledinot, Suresnes) - Correctness proofs of the following imperative algorithms: Bresenham line drawing algorithm [Bresenham], Marché's minimal edition distance algorithm [Diff] (Jean-Christophe Filliâtre, Orsay) - Correctness proofs of Buchberger's algorithm [Buchberger] and RSA cryptographic algorithm [Rsa] (Laurent Théry, Sophia-Antipolis) - Correctness proof of Stalmarck tautology checker algorithm [Stalmarck] (Laurent Théry, Pierre Letouzey, Sophia-Antipolis) LocalWords: recommended coq-8.4pl2/COPYRIGHT0000640000175000001440000000122611756406457013142 0ustar notinusers The Coq proof assistant Copyright 1999-2012 The Coq development team, INRIA, CNRS, University Paris Sud, University Paris 7, Ecole Polytechnique. This product includes also software developed by Pierre Crégut, France Telecom R & D (plugins/omega and plugins/romega) Pierre Courtieu and Julien Forest, CNAM (plugins/funind) Claudio Sacerdoti Coen, HELM, University of Bologna, (plugins/xml) Pierre Corbineau, Radbout University, Nijmegen (declarative mode) John Harrison, University of Cambridge (csdp wrapper) The file CREDITS contains a list of contributors. The credits section in the Reference Manual details contributions. coq-8.4pl2/INSTALL.doc0000640000175000001440000000362511107334326013432 0ustar notinusers The Coq documentation ===================== The Coq documentation includes - A Reference Manual - A Tutorial - A document presenting the Coq standard library - A list of questions/answers in the FAQ style The sources of the documents are mainly made of LaTeX code from which user-readable PostScript or PDF files, or a user-browsable bunch of html files are generated. Prerequisite ------------ To produce the PostScript documents, the following tools are needed: - latex (latex2e) - dvips - bibtex - makeindex - pngtopnm and pnmtops (for the Reference Manual and the FAQ) To produce the PDF documents, the following tools are needed: - pdflatex - bibtex To produce the html documents, the following tools are needed: - hevea (e.g. 1.07 works) Under Debian based operating systems (Debian, Ubuntu, ...) a working set of packages for compiling the documentation for Coq is: texlive texlive-latex-extra texlive-math-extra texlive-fonts-extra texlive-lang-french texlive-humanities texlive-pictures latex-xcolor hevea netpbm Compilation ----------- To produce all documentation about Coq, just run: make doc Alternatively, you can use some specific targets: make doc-ps to produce all PostScript documents make doc-pdf to produce all PDF documents make doc-html to produce all html documents make refman to produce all formats of the reference manual make tutorial to produce all formats of the tutorial make rectutorial to produce all formats of the tutorial on recursive types make faq to produce all formats of the FAQ make stdlib to produce all formats of the Coq standard library Installation ------------ To install all produced documents, do: make DOCDIR=/some/directory/for/documentation install-doc DOCDIR defauts to /usr/share/doc/coq coq-8.4pl2/man/0000750000175000001440000000000012127276534012412 5ustar notinuserscoq-8.4pl2/man/coq-interface.10000640000175000001440000000114011052201656015176 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coq\-interface \- Customized Coq toplevel to make user interfaces .SH SYNOPSIS .B coq-interface [ .B options ] .SH DESCRIPTION .B coq-interface is a Coq customized toplevel system for Coq containing some modules useful for the graphical interface. This program is not for the casual user. .SH OPTIONS .TP .B \-h Help. Will give you the complete list of options accepted by coq-interface (the same as coqtop). .SH SEE ALSO .BR coqc (1), .BR coqdep (1), .BR coqtop (1), .BR coq\-parser (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coq-tex.10000640000175000001440000000600311047055674014054 0ustar notinusers.TH COQ-TEX 1 "29 March 1995" .SH NAME coq-tex \- Process Coq phrases embedded in LaTeX files .SH SYNOPSIS .B coq-tex [ .BI \-o \ output-file ] [ .BI \-n \ line-width ] [ .BI \-image \ coq-image ] [ .B \-w ] [ .B \-v ] [ .B \-sl ] [ .B \-hrule ] [ .B \-small ] .I input-file ... .SH DESCRIPTION The .B coq-tex filter extracts Coq phrases embedded in LaTeX files, evaluates them, and insert the outcome of the evaluation after each phrase. Three LaTeX environments are provided to include Coq code in the input files: .TP .B coq_example The phrases between \\begin{coq_example} and \\end{coq_example} are evaluated and copied into the output file. Each phrase is followed by the response of the toplevel loop. .TP .B coq_example* The phrases between \\begin{coq_example*} and \\end{coq_example*} are evaluated and copied into the output file. The responses of the toplevel loop are discarded. .TP .B coq_eval The phrases between \\begin{coq_eval} and \\end{coq_eval} are silently evaluated. They are not copied into the output file, and the responses of the toplevel loop are discarded. .PP The resulting LaTeX code is stored in the file .IR file \&.v.tex if the input file has a name of the form .IR file \&.tex, otherwise the name of the output file is the name of the input file with `.v.tex' appended. The files produced by .B coq-tex can be directly processed by LaTeX. Both the Coq phrases and the toplevel output are typeset in typewriter font. .SH OPTIONS .TP .BI \-o \ output-file Specify the name of a file where the LaTeX output is to be stored. A dash `\-' causes the LaTeX output to be printed on standard output. .TP .BI \-n \ line-width Set the line width. The default is 72 characters. The responses of the toplevel loop are folded if they are longer than the line width. No folding is performed on the Coq input text. .TP .BI \-image \ coq-image Cause the file .IR coq-image to be executed to evaluate the Coq phrases. By default, this is the command .IR coqtop without specifying any path which is used to evaluate the Coq phrases. .TP .B \-w Cause lines to be folded on a space character whenever possible, avoiding word cuts in the output. By default, folding occurs at the line width, regardless of word cuts. .TP .B \-v Verbose mode. Prints the Coq answers on the standard output. Useful to detect errors in Coq phrases. .TP .B \-sl Slanted mode. The Coq answers are written in a slanted font. .TP .B \-hrule Horizontal lines mode. The Coq parts are written between two horizontal lines. .TP .B \-small Small font mode. The Coq parts are written in a smaller font. .SH CAVEATS The \\begin... and \\end... phrases must sit on a line by themselves, with no characters before the backslash or after the closing brace. Each Coq phrase must be terminated by `.' at the end of a line. Blank space is accepted between `.' and the newline, but any other character will cause coq-tex to ignore the end of the phrase, resulting in an incorrect shuffling of the responses into the phrases. (The responses ``lag behind''.) .SH SEE ALSO .B coqtop (1). coq-8.4pl2/man/coqide.10000640000175000001440000000501411662501456013736 0ustar notinusers.TH COQIDE 1 "July 16, 2004" .SH NAME coqide \- The Coq Proof Assistant graphical interface .SH SYNOPSIS .B coqide [ .B options ] .SH DESCRIPTION .B coqide is a gtk graphical interface for the Coq proof assistant. For command-line-oriented use of Coq, see .BR coqtop (1) ; for batch-oriented use of Coq, see .BR coqc (1). .SH OPTIONS .TP .B \-h Show the complete list of options accepted by .BR coqide . .TP .BI \-I\ dir ,\ \-include\ dir Add directory dir in the include path. .TP .BI \-R\ dir\ coqdir Recursively map physical .I dir to logical .IR coqdir . .TP .B \-src Add source directories in the include path. .TP .BI \-is\ f ,\ \-inputstate\ f Read state from .IR f .coq. .TP .B \-nois Start with an empty state. .TP .BI \-outputstate\ f Write state in file .IR f .coq. .TP .BI \-load\-ml\-object\ f Load ML object file .IR f . .TP .BI \-load\-ml\-source\ f Load ML file .IR f . .TP .BI \-l\ f ,\ \-load\-vernac\-source\ f Load Coq file .IR f .v (Load .IR f .). .TP .BI \-lv\ f ,\ \-load\-vernac\-source\-verbose\ f Load Coq file .IR f .v (Load Verbose .IR f .). .TP .BI \-load\-vernac\-object\ f Load Coq object file .IR f .vo. .TP .BI \-require\ f Load Coq object file .IR f .vo and import it (Require .IR f .). .TP .BI \-compile\ f Compile Coq file .IR f .v (implies .BR \-batch ). .TP .BI \-compile\-verbose\ f Verbosely compile Coq file .IR f .v (implies .BR -batch ). .TP .B \-opt Run the native-code version of Coq or Coq_SearchIsos. .TP .B \-byte Run the bytecode version of Coq or Coq_SearchIsos. .TP .B \-where Print Coq's standard library location and exit. .TP .B -v Print Coq version and exit. .TP .B \-q Skip loading of rcfile. .TP .BI \-init\-file\ f Set the rcfile to .IR f . .TP .B \-batch Batch mode (exits just after arguments parsing). .TP .B \-boot Boot mode (implies .B \-q and .BR \-batch ). .TP .B \-emacs Tells Coq it is executed under Emacs. .TP .BI \-dump\-glob\ f Dump globalizations in file .I f (to be used by .BR coqdoc (1)). .TP .B \-impredicative\-set Set sort Set impredicative. .TP .B \-dont\-load\-proofs Don't load opaque proofs in memory. .TP .B \-xml Export XML files either to the hierarchy rooted in the directory .B COQ_XML_LIBRARY_ROOT (if set) or to stdout (if unset). .SH SEE ALSO .BR coqc (1), .BR coqtop (1), .BR coq-tex (1), .BR coqdep (1). .br .I The Coq Reference Manual, .I The Coq web site: http://coq.inria.fr, .I /usr/share/doc/coqide/FAQ. .SH AUTHOR This manual page was written by Samuel Mimram , for the Debian project (but may be used by others). coq-8.4pl2/man/coqc.10000640000175000001440000000167311743503527013427 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coqc \- The Coq Proof Assistant compiler .SH SYNOPSIS .B coqc [ .B general \ Coq \ options ] .I file .SH DESCRIPTION .B coqc is the batch compiler for the Coq Proof Assistant. The options are basically the same as coqtop(1). .IR file.v \& is the vernacular file to compile. .IR file \& must be formed only with the characters `a` to `Z`, `0`-`9` or `_` and must begin with a letter. The compiler produces an object file .IR file.vo \&. For interactive use of Coq, see .BR coqtop(1). .SH OPTIONS .B coqc is a script that simply runs .B coqtop with option .B \-compile it accepts the same options as .B coqtop. .TP .BI \-image \ bin use .I bin as underlying .B coqtop instead of the default one. .TP .BI \-verbose print the compiled file on the standard output. .SH SEE ALSO .BR coqtop (1), .BR coq_makefile (1), .BR coqdep (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coqtop.10000640000175000001440000000537511743503527014012 0ustar notinusers.TH COQ 1 "October 11, 2006" .SH NAME coqtop \- The Coq Proof Assistant toplevel system .SH SYNOPSIS .B coqtop [ .B options ] .SH DESCRIPTION .B coqtop is the toplevel system of Coq, for interactive use. It reads phrases on the standard input, and prints results on the standard output. For batch-oriented use of Coq, see .BR coqc(1). .SH OPTIONS .TP .B \-h, \-\-help Help. Will give you the complete list of options accepted by coqtop. .TP .BI \-I \ dir, \ \-\-include \ dir add directory .I dir in the include path .TP .BI \-R \ dir\ coqdir recursively map physical .I dir to logical .I coqdir .TP .BI \-top \ coqdir set the toplevel name to be .I coqdir instead of Top .TP .BI \-inputstate \ filename, \ \-is \ filename read state from file .I filename.coq .TP .B \-nois start with an empty initial state .TP .BI \-outputstate filename write state in file .I filename.coq .TP .BI \-load\-ml\-object \ filename load ML object file .I filenname .TP .BI \-load\-ml\-source \ filename load ML file .I filename .TP .BI \-load\-vernac\-source \ filename, \ \-l \ filename load Coq file .I filename.v (Load filename.) .TP .BI \-load\-vernac\-source\-verbose \ filename, \ \-lv \ filename load verbosely Coq file .I filename.v (Load Verbose filename.) .TP .BI \-load\-vernac\-object \ filename load Coq object file .I filename.vo .TP .BI \-require \ filename load Coq object file .I filename.vo and import it (Require Import filename.) .TP .BI \-compile \ filename compile Coq file .I filename.v (implies .B \-batch ) .TP .BI \-compile\-verbose \ filename verbosely compile Coq file .I filename.v (implies .B \-batch ) .TP .B \-opt run the native\-code version of Coq .TP .B \-byte run the bytecode version of Coq .TP .B \-where print Coq's standard library location and exit .TP .B \-v print Coq version and exit .TP .B \-q skip loading of rcfile .TP .BI \-init\-file \ filename set the rcfile to .I filename .TP .B \-batch batch mode (exits just after arguments parsing) .TP .B \-boot boot mode (implies .B \-q and .B \-batch ) .TP .B \-emacs tells Coq it is executed under Emacs .TP .BI \-dump\-glob \ filename dump globalizations in file f (to be used by .B coqdoc(1) ) .TP .BI \-with\-geoproof \ (yes|no) to (de)activate special functions for Geoproof within Coqide (default is .I yes ) .TP .B \-impredicative\-set set sort Set impredicative .TP .B \-dont\-load\-proofs don't load opaque proofs in memory .TP .B \-xml export XML files either to the hierarchy rooted in the directory $COQ_XML_LIBRARY_ROOT (if set) or to stdout (if unset) .TP .B \-quality improve the legibility of the proof terms produced by some tactics .SH SEE ALSO .BR coqc (1), .BR coq-tex (1), .BR coqdep (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coqtop.opt.10000640000175000001440000000061007271545546014606 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coqtop.opt \- The native-code Coq toplevel .SH SYNOPSIS .B coqopt.opt [ .B options ] [ .I file ] .SH DESCRIPTION .B coqopt.opt is the native-code version of Coq. It should not be called directly, but only by .B coqtop and .B coqc .SH SEE ALSO .BR coqtop (1), .BR coqc (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coqtop.byte.10000640000175000001440000000060507271545546014753 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coqtop.byte \- The bytecode Coq toplevel .SH SYNOPSIS .B coqtop.byte [ .B options ] [ .I file ] .SH DESCRIPTION .B coqopt.byte is the bytecode version of Coq. It should not be called directly, but only by .B coqtop and .B coqc .SH SEE ALSO .BR coqtop (1), .BR coqc (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/gallina.10000640000175000001440000000223607271477052014111 0ustar notinusers.TH COQ 1 "29 March 1995" "Coq tools" .SH NAME gallina \- extracts specification from Coq vernacular files .SH SYNOPSIS .B gallina [ .BI \- ] [ .BI \-stdout ] [ .BI \-nocomments ] .I file ... .SH DESCRIPTION .B gallina takes Coq files as arguments and builds the corresponding specification files. The Coq file .IR foo.v \& gives bearth to the specification file .IR foo.g. \& The suffix '.g' stands for Gallina. For that purpose, gallina removes all commands that follow a "Theorem", "Lemma", "Fact", "Remark" or "Goal" statement until it reaches a command "Abort.", "Save.", "Qed.", "Defined." or "Proof <...>.". It also removes every "Hint", "Syntax", "Immediate" or "Transparent" command. Files without the .v suffix are ignored. .SH OPTIONS .TP .BI \-stdout Prints the result on standard output. .TP .BI \- Coq source is taken on standard input. The result is printed on standard output. .TP .BI \-nocomments Comments are removed in the *.g file. .SH NOTES Nested comments are correctly handled. In particular, every command "Save." or "Abort." in a comment is not taken into account. .SH BUGS Please report any bug to .B coq@pauillac.inria.fr coq-8.4pl2/man/coqchk.10000640000175000001440000000352011605317363013741 0ustar notinusers.TH COQ 1 "July 7, 201" .SH NAME coqchk \- The Coq Proof Checker compiled libraries verifier .SH SYNOPSIS .B coqchk [ .B options ] .I modules .SH DESCRIPTION .B coqchk is the standalone checker of compiled libraries (.vo files produced by coqc) for the Coq Proof Assistant. See the Reference Manual for more information. It returns with exit code 0 if all the requested tasks succeeded. A non-zero return code means that something went wrong: some library was not found, corrupted content, type-checking failure, etc. .IR modules \& is a list of modules to be checked. Modules can be referred to by a short or qualified name. .SH OPTIONS .TP .BI \-I \ dir, \ \-\-include \ dir add directory .I dir in the include path .TP .BI \-R \ dir\ coqdir recursively map physical .I dir to logical .I coqdir .TP .BI \-silent makes coqchk less verbose. .TP .BI \-admit \ module tag the specified module and all its dependencies as trusted, and will not be rechecked, unless explicitly requested by other options. .TP .BI \-norec \ module specifies that the given module shall be verified without requesting to check its dependencies. .TP .BI \-m,\ \-\-memory displays a summary of the memory used by the checker. .TP .BI \-o,\ \-\-output\-context displays a summary of the logical content that have been verified: assumptions and usage of impredicativity. .TP .BI \-impredicative\-set allows the checker to accept libraries that have been compiled with this flag. .TP .BI \-v print coqchk version and exit. .TP .BI \-coqlib \ dir overrides the default location of the standard library. .TP .BI \-where print coqchk standard library location and exit. .TP .BI \-h,\ \-\-help print list of options .SH SEE ALSO .BR coqtop (1), .BR coqc (1), .BR coq_makefile (1), .BR coqdep (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coqdoc.10000640000175000001440000001072411334513525013742 0ustar notinusers.TH coqdoc 1 "April, 2006" .SH NAME coqdoc \- A documentation tool for the Coq proof assistant .SH SYNOPSIS .B coqdoc [ .B options ] .B files .SH DESCRIPTION .B coqdoc is a documentation tool for the Coq proof assistant. It creates LaTeX or HTML documents from a set of Coq files. See the Coq reference manual for documentation (url below). .SH OPTIONS .SS Overall options .TP .BI \-h Help. Will give you the complete list of options accepted by coqdoc. .TP .B \-\-html Select a HTML output. .TP .B \-\-latex Select a LATEX output. .TP .B \-\-dvi Select a DVI output. .TP .B \-\-ps Select a PostScript output. .TP .B \-\-texmacs Select a TeXmacs output. .TP .B \-\-stdout Redirect the output to stdout .TP .BI \-o \ file, \-\-output \ file Redirect the output into the file .I file. .TP .BI \-d \ dir, \ \-\-directory \ dir Output files into directory .I dir instead of current directory (option \-d does not change the filename specified with option \-o, if any). .TP .B \-s, \ \-\-short Do not insert titles for the files. The default behavior is to insert a title like ``Library Foo'' for each file. .TP .BI \-t \ string, \ \-\-title \ string Set the document title. .TP .B \-\-body\-only Suppress the header and trailer of the final document. Thus, you can insert the resulting document into a larger one. .TP .BI \-p \ string, \ \-\-preamble \ string Insert some material in the LATEX preamble, right before \\begin{document} (meaningless with \-html). .TP .BI \-\-vernac\-file \ file, \ \-\-tex\-file \ file Considers the file `file' respectively as a .v (or .g) file or a .tex file. .TP .BI \-\-files\-from \ file Read file names to process in file `file' as if they were given on the command line. Useful for program sources split in several directories. .TP .B \-q, \ \-\-quiet Be quiet. Do not print anything except errors. .TP .B \-h, \ \-\-help Give a short summary of the options and exit. .TP .B \-v, \ \-\-version Print the version and exit. .SS Index options Default behavior is to build an index, for the HTML output only, into index.html. .TP .B \-\-no\-index Do not output the index. .TP .B \-\-multi\-index Generate one page for each category and each letter in the index, together with a top page index.html. .SS Table of contents option .TP .B \-toc, \ \-\-table\-of\-contents Insert a table of contents. For a LATEX output, it inserts a \\tableofcontents at the beginning of the document. For a HTML output, it builds a table of contents into toc.html. .SS Hyperlinks options .TP .B \-\-glob\-from \ file Make references using Coq globalizations from file file. (Such globalizations are obtained with Coq option \-dump\-glob). .TP .B \-\-no\-externals Do not insert links to the Coq standard library. .TP .BI \-\-external \ url \ libroot Set base URL for the external library whose root prefix is libroot. .TP .BI \-\-coqlib \ url Set base URL for the Coq standard library (default is http://coq.inria.fr/library/). .TP .BI \-\-coqlib_path \ dir Set the base path where the Coq files are installed, especially style files coqdoc.sty and coqdoc.css. .TP .BI \-R \ dir \ coqdir Map physical directory dir to Coq logical directory coqdir (similarly to Coq option \-R). .B Note: option \-R only has effect on the files following it on the command line, so you will probably need to put this option first. .SS Contents options .TP .B \-g, \ \-\-gallina Do not print proofs. .TP .B \-l, \ \-\-light Light mode. Suppress proofs (as with \-g) and the following commands: * [Recursive] Tactic Definition * Hint / Hints * Require * Transparent / Opaque * Implicit Argument / Implicits * Section / Variable / Hypothesis / End The behavior of options \-g and \-l can be locally overridden using the (* begin show *) ... (* end show *) environment (see above). .SS Language options Default behavior is to assume ASCII 7 bits input files. .TP .B \-latin1, \ \-\-latin1 Select ISO-8859-1 input files. It is equivalent to \-\-inputenc latin1 \-\-charset iso\-8859\-1. .TP .B \-utf8, \ \-\-utf8 Select UTF-8 (Unicode) input files. It is equivalent to \-\-inputenc utf8 \-\-charset utf\-8. LATEX UTF-8 support can be found at http://www.ctan.org/tex\-archive/macros/latex/contrib/supported/unicode/. .TP .BI \-\-inputenc \ string Give a LATEX input encoding, as an option to LATEX package inputenc. .TP .BI \-\-charset \ string Specify the HTML character set, to be inserted in the HTML header. .SH SEE ALSO .I The Coq Reference Manual from http://coq.inria.fr/ coq-8.4pl2/man/coqdep.10000640000175000001440000000700511156712440013742 0ustar notinusers.TH COQ 1 "28 March 1995" "Coq tools" .SH NAME coqdep \- Compute inter-module dependencies for Coq and Caml programs .SH SYNOPSIS .B coqdep [ .BI \-w ] [ .BI \-I \ directory ] [ .BI \-coqlib \ directory ] [ .BI \-c ] [ .BI \-i ] [ .BI \-D ] [ .BI \-slash ] .I filename ... .I directory ... .SH DESCRIPTION .B coqdep compute inter-module dependencies for Coq and Caml programs, and prints the dependencies on the standard output in a format readable by make. When a directory is given as argument, it is recursively looked at. Dependencies of Coq modules are computed by looking at .IR Require \& commands (Require, Require Export, Require Import), .IR Declare \& .IR ML \& .IR Module \& commands and .IR Load \& commands. Dependencies relative to modules from the Coq library are not printed. Dependencies of Caml modules are computed by looking at .IR open \& directives and the dot notation .IR module.value \&. .SH OPTIONS .TP .BI \-c Prints the dependencies of Caml modules. (On Caml modules, the behaviour is exactly the same as ocamldep). .TP .BI \-w Prints a warning if a Coq command .IR Declare \& .IR ML \& .IR Module \& is incorrect. (For instance, you wrote `Declare ML Module "A".', but the module A contains #open "B"). The correct command is printed (see option \-D). The warning is printed on standard error. .TP .BI \-D This commands looks for every command .IR Declare \& .IR ML \& .IR Module \& of each Coq file given as argument and complete (if needed) the list of Caml modules. The new command is printed on the standard output. No dependency is computed with this option. .TP .BI \-slash Prints paths using a slash instead of the OS specific separator. This option is useful when developping under Cygwin. .TP .BI \-I \ directory The files .v .ml .mli of the directory .IR directory \& are taken into account during the calculus of dependencies, but their own dependencies are not printed. .TP .BI \-coqlib \ directory Indicates where is the Coq library. The default value has been determined at installation time, and therefore this option should not be used under normal circumstances. .SH SEE ALSO .BR ocamlc (1), .BR coqc (1), .BR make (1). .br .SH NOTES Lexers (for Coq and Caml) correctly handle nested comments and strings. The treatment of symbolic links is primitive. If two files have the same name, in two different directories, a warning is printed on standard error. There is no way to limit the scope of the recursive search for directories. .SH EXAMPLES .LP Consider the files (in the same directory): A.ml B.ml C.ml D.ml X.v Y.v and Z.v where .TP .BI \+ D.ml contains the commands `open A', `open B' and `type t = C.t' ; .TP .BI \+ Y.v contains the command `Require X' ; .TP .BI \+ Z.v contains the commands `Require X' and `Declare ML Module "D"'. .LP To get the dependencies of the Coq files: .IP .B example% coqdep \-I . *.v .RS .sp .5 .nf .B Z.vo: Z.v ./X.vo ./D.cmo .B Y.vo: Y.v ./X.vo .B X.vo: X.v .fi .RE .br .ne 7 .LP With a warning: .IP .B example% coqdep \-w \-I . *.v .RS .sp .5 .nf .B Z.vo: Z.v ./X.vo ./D.cmo .B Y.vo: Y.v ./X.vo .B X.vo: X.v ### Warning : In file Z.v, the ML modules declaration should be ### Declare ML Module "A" "B" "C" "D". .fi .RE .br .ne 7 .LP To get only the Caml dependencies: .IP .B example% coqdep \-c \-I . *.ml .RS .sp .5 .nf .B D.cmo: D.ml ./A.cmo ./B.cmo ./C.cmo .B D.cmx: D.ml ./A.cmx ./B.cmx ./C.cmx .B C.cmo: C.ml .B C.cmx: C.ml .B B.cmo: B.ml .B B.cmx: B.ml .B A.cmo: A.ml .B A.cmx: A.ml .fi .RE .br .ne 7 .SH BUGS Please report any bug to .B coq\-bugs@pauillac.inria.fr coq-8.4pl2/man/coq-parser.10000640000175000001440000000072311052201656014540 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coq\-parser \- Coq parser .SH SYNOPSIS .B coq\-parser [ .B options ] .SH DESCRIPTION .B parser is a program reading Coq proof developments and outputing them in the structured format given in the INRIA technical report RT154. This program is not for the casual user. .SH SEE ALSO .BR coq\-interface (1), .BR coqc (1), .BR coqtop (1), .BR coqdep (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coqwc.10000640000175000001440000000112011047055674013603 0ustar notinusers.TH COQ 1 "16 March 2004" "Coq tools" .SH NAME coqwc \- print the number of specification, proof and comment lines in Coq files .SH SYNOPSIS .B coqwc [ .BI \-p ] [ .BI \-s ] [ .BI \-r ] [ .BI \-e ] .I files ... .SH DESCRIPTION .B coqwc computes the number of specification lines, proof lines and comment lines in Coq files. .SH OPTIONS .TP .BI \-p Print the percentage of comments .TP .BI \-s Print only the number of specification lines .TP .BI \-r Print only the number of proof lines .TP .BI \-e Do not skip headers .SH BUGS Please report any bug to .B coq\-bugs@pauillac.inria.fr coq-8.4pl2/man/coqmktop.10000640000175000001440000000207311551076547014336 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coqmktop \- The Coq Proof Assistant user-tactics linker .SH SYNOPSIS .B coqmktop [ .I options ] .I files .SH DESCRIPTION .B coqmktop builds a new Coq toplevel extended with user-tactics. .IR files \& are the Objective Caml object or library files (i.e. with suffix .cmo, .cmx, .cma or .cmxa) to link with the Coq system. The linker produces an executable Coq toplevel which can be called directly or through coqc(1), using the \-image option. .SH OPTIONS .TP .BI \-h Help. List the available options. .TP .BI \-srcdir \ dir Specify where the Coq source files are .TP .BI \-o \ exec\-file Specify the name of the resulting toplevel .TP .B \-opt Compile in native code .TP .B \-full Link high level tactics .TP .B \-top Build Coq on a ocaml toplevel (incompatible with .BR \-opt ) .TP .BI \-R \ dir Specify recursively directories for Ocaml .TP .B \-v8 Link with V8 grammar .SH SEE ALSO .BR coqtop (1), .BR ocamlmktop (1). .BR ocamlc (1). .BR ocamlopt (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/man/coq_makefile.10000640000175000001440000000072207644347257015126 0ustar notinusers.TH COQ 1 "April 25, 2001" .SH NAME coq_makefile \- The Coq Proof Assistant makefile generator .SH SYNOPSIS .B coq_makefile [ .B arguments ] .SH DESCRIPTION .B coq_makefile is a makefile generator for Coq proof developments. .SH OPTIONS .TP .BI \-h Will give you a description of the whole list of options of coq_makefile. .SH SEE ALSO .BR coqtop (1), .BR coqtc (1), .BR coqdep (1). .br .I The Coq Reference Manual. .I The Coq web site: http://coq.inria.fr coq-8.4pl2/Makefile0000640000175000001440000002371611761130562013302 0ustar notinusers####################################################################### # v # The Coq Proof Assistant / The Coq Development Team # # = 3.81. # # This Makefile is now separated into Makefile.{common,build,doc}. # You won't find Makefiles in sub-directories and this is done on purpose. # If you are not yet convinced of the advantages of a single Makefile, please # read # http://miller.emu.id.au/pmiller/books/rmch/ # before complaining. # # When you are working in a subdir, you can compile without moving to the # upper directory using "make -C ..", and the output is still understood # by Emacs' next-error. # # Specific command-line options to this Makefile: # # make VERBOSE=1 # restore the raw echoing of commands # make NO_RECALC_DEPS=1 # avoid recomputing dependencies # make NO_RECOMPILE_LIB=1 # a coqtop rebuild does not trigger a stdlib rebuild # # Nota: the 1 above can be replaced by any non-empty value # # ---------------------------------------------------------------------- # See dev/doc/build-system*.txt for more details/FAQ about this Makefile # ---------------------------------------------------------------------- ########################################################################### # File lists ########################################################################### # NB: due to limitations in Win32, please refrain using 'export' too much # to communicate between make sub-calls (in Win32, 8kb max per env variable, # 32kb total) # !! Before using FIND_VCS_CLAUSE, please read how you should in the !! # !! FIND_VCS_CLAUSE section of dev/doc/build-system.dev.txt !! FIND_VCS_CLAUSE:='(' \ -name '{arch}' -o \ -name '.svn' -o \ -name '_darcs' -o \ -name '.git' -o \ -name '.bzr' -o \ -name 'debian' -o \ -name "$${GIT_DIR}" -o \ -name '_build' \ ')' -prune -o define find $(shell find . $(FIND_VCS_CLAUSE) '(' -name $(1) ')' -print | sed 's|^\./||') endef ## Files in the source tree YACCFILES:=$(call find, '*.mly') LEXFILES := $(call find, '*.mll') export MLLIBFILES := $(call find, '*.mllib') export ML4FILES := $(call find, '*.ml4') export CFILES := $(call find, '*.c') # NB: The lists of currently existing .ml and .mli files will change # before and after a build or a make clean. Hence we do not export # these variables, but cleaned-up versions (see below MLFILES and co) EXISTINGML := $(call find, '*.ml') EXISTINGMLI := $(call find, '*.mli') ## Files that will be generated GENML4FILES:= $(ML4FILES:.ml4=.ml) GENMLFILES:=$(LEXFILES:.mll=.ml) $(YACCFILES:.mly=.ml) \ scripts/tolink.ml kernel/copcodes.ml GENMLIFILES:=$(YACCFILES:.mly=.mli) GENPLUGINSMOD:=$(filter plugins/%,$(MLLIBFILES:%.mllib=%_mod.ml)) export GENHFILES:=kernel/byterun/coq_jumptbl.h export GENVFILES:=theories/Numbers/Natural/BigN/NMake_gen.v export GENFILES:=$(GENMLFILES) $(GENMLIFILES) $(GENHFILES) $(GENVFILES) $(GENPLUGINSMOD) # NB: all files in $(GENFILES) can be created initially, while # .ml files in $(GENML4FILES) might need some intermediate building. # That's why we keep $(GENML4FILES) out of $(GENFILES) ## More complex file lists define diff $(strip $(foreach f, $(1), $(if $(filter $(f),$(2)),,$f))) endef export MLEXTRAFILES := $(GENMLFILES) $(GENML4FILES) $(GENPLUGINSMOD) export MLSTATICFILES := $(call diff, $(EXISTINGML), $(MLEXTRAFILES)) export MLIFILES := $(sort $(GENMLIFILES) $(EXISTINGMLI)) include Makefile.common ########################################################################### # Starting rules ########################################################################### NOARG: world .PHONY: NOARG help always help: @echo "Please use either" @echo " ./configure" @echo " make world" @echo " make install" @echo " make clean" @echo "or make archclean" @echo @echo "For make to be verbose, add VERBOSE=1" UNSAVED_FILES:=$(shell find . -name '.\#*v' -o -name '.\#*.ml' -o -name '.\#*.ml?') ifdef UNSAVED_FILES $(error You have unsaved changes in your editor (emacs?) [$(UNSAVED_FILES)]; \ cancel them or save before proceeding. Or your editor crashed. \ Then, you may want to consider whether you want to restore the autosaves) #If you try to simply remove this explicit test, the compilation may #fail later. In particular, if a .#*.v file exists, coqdep fails to #run. endif # Apart from clean and tags, everything will be done in a sub-call to make # on Makefile.build. This way, we avoid doing here the -include of .d : # since they trigger some compilations, we do not want them for a mere clean ifdef COQ_CONFIGURED %:: always $(MAKE) --warn-undefined-variable --no-builtin-rules -f Makefile.build "$@" else %:: always @echo "Please run ./configure first" >&2; exit 1 endif always : ; # To speed-up things a bit, let's dissuade make to attempt rebuilding makefiles Makefile Makefile.build Makefile.common config/Makefile : ; ########################################################################### # Cleaning ########################################################################### .PHONY: clean cleankeepvo objclean cruftclean indepclean doclean archclean optclean clean-ide ml4clean ml4depclean depclean cleanconfig distclean voclean devdocclean clean: objclean cruftclean depclean docclean devdocclean cleankeepvo: indepclean clean-ide optclean cruftclean depclean docclean devdocclean objclean: archclean indepclean cruftclean: ml4clean find . -name '*~' -o -name '*.annot' | xargs rm -f rm -f gmon.out core indepclean: rm -f $(GENFILES) rm -f $(COQTOPBYTE) $(COQMKTOPBYTE) $(COQCBYTE) $(CHICKENBYTE) bin/fake_ide find . -name '*~' -o -name '*.cm[ioa]' | xargs rm -f rm -f */*.pp[iox] plugins/*/*.pp[iox] rm -rf $(SOURCEDOCDIR) rm -f toplevel/mltop.byteml toplevel/mltop.optml rm -f test-suite/check.log rm -f glob.dump rm -f config/revision.ml revision $(MAKE) -C test-suite clean docclean: rm -f doc/*/*.dvi doc/*/*.aux doc/*/*.log doc/*/*.bbl doc/*/*.blg doc/*/*.toc \ doc/*/*.idx doc/*/*~ doc/*/*.ilg doc/*/*.ind doc/*/*.dvi.gz doc/*/*.ps.gz doc/*/*.pdf.gz\ doc/*/*.???idx doc/*/*.???ind doc/*/*.v.tex doc/*/*.atoc doc/*/*.lof\ doc/*/*.hatoc doc/*/*.haux doc/*/*.hcomind doc/*/*.herrind doc/*/*.hidx doc/*/*.hind \ doc/*/*.htacind doc/*/*.htoc doc/*/*.v.html rm -f doc/stdlib/index-list.html doc/stdlib/index-body.html \ doc/stdlib/*Library.coqdoc.tex doc/stdlib/library.files \ doc/stdlib/library.files.ls doc/stdlib/FullLibrary.tex rm -f doc/*/*.ps doc/*/*.pdf rm -rf doc/refman/html doc/stdlib/html doc/faq/html doc/tutorial/tutorial.v.html rm -f doc/refman/euclid.ml doc/refman/euclid.mli rm -f doc/refman/heapsort.ml doc/refman/heapsort.mli rm -f doc/common/version.tex rm -f doc/refman/styles.hva doc/refman/cover.html doc/refman/Reference-Manual.html rm -f doc/coq.tex rm -f doc/refman/styles.hva doc/refman/cover.html archclean: clean-ide optclean voclean rm -rf _build myocamlbuild_config.ml rm -f $(ALLSTDLIB).* optclean: rm -f $(COQTOPEXE) $(COQMKTOP) $(COQC) $(CHICKEN) $(COQDEPBOOT) rm -f $(COQTOPOPT) $(COQMKTOPOPT) $(COQCOPT) $(CHICKENOPT) rm -f $(TOOLS) $(CSDPCERT) find . -name '*.cmx' -o -name '*.cmxs' -o -name '*.cmxa' -o -name '*.[soa]' -o -name '*.so' | xargs rm -f clean-ide: rm -f $(COQIDECMO) $(COQIDECMX) $(COQIDECMO:.cmo=.cmi) $(COQIDEBYTE) $(COQIDEOPT) $(COQIDE) rm -f ide/input_method_lexer.ml rm -f ide/highlight.ml ide/config_lexer.ml ide/config_parser.mli ide/config_parser.ml rm -f ide/utf8_convert.ml ml4clean: rm -f $(GENML4FILES) ml4depclean: find . -name '*.ml4.d' | xargs rm -f depclean: find . $(FIND_VCS_CLAUSE) '(' -name '*.d' ')' -print | xargs rm -f cleanconfig: rm -f config/Makefile config/coq_config.ml dev/ocamldebug-v7 ide/undo.mli distclean: clean cleanconfig voclean: rm -f states/*.coq find theories plugins test-suite -name '*.vo' -o -name '*.glob' | xargs rm -f devdocclean: find . -name '*.dep.ps' -o -name '*.dot' | xargs rm -f rm -f $(OCAMLDOCDIR)/*.log $(OCAMLDOCDIR)/*.aux $(OCAMLDOCDIR)/*.toc rm -f $(OCAMLDOCDIR)/ocamldoc.sty $(OCAMLDOCDIR)/coq.tex rm -f $(OCAMLDOCDIR)/html/*.html ########################################################################### # Emacs tags ########################################################################### .PHONY: tags otags tags: echo $(MLIFILES) $(MLSTATICFILES) $(ML4FILES) | sort -r | xargs \ etags --language=none\ "--regex=/let[ \t]+\([^ \t]+\)/\1/" \ "--regex=/let[ \t]+rec[ \t]+\([^ \t]+\)/\1/" \ "--regex=/and[ \t]+\([^ \t]+\)/\1/" \ "--regex=/type[ \t]+\([^ \t]+\)/\1/" \ "--regex=/exception[ \t]+\([^ \t]+\)/\1/" \ "--regex=/val[ \t]+\([^ \t]+\)/\1/" \ "--regex=/module[ \t]+\([^ \t]+\)/\1/" echo $(ML4FILES) | sort -r | xargs \ etags --append --language=none\ "--regex=/[ \t]*\([^: \t]+\)[ \t]*:/\1/" otags: echo $(MLIFILES) $(MLSTATICFILES) | sort -r | xargs otags echo $(ML4FILES) | sort -r | xargs \ etags --append --language=none\ "--regex=/let[ \t]+\([^ \t]+\)/\1/" \ "--regex=/let[ \t]+rec[ \t]+\([^ \t]+\)/\1/" \ "--regex=/and[ \t]+\([^ \t]+\)/\1/" \ "--regex=/type[ \t]+\([^ \t]+\)/\1/" \ "--regex=/exception[ \t]+\([^ \t]+\)/\1/" \ "--regex=/val[ \t]+\([^ \t]+\)/\1/" \ "--regex=/module[ \t]+\([^ \t]+\)/\1/" %.elc: %.el ifdef COQ_CONFIGURED echo "(setq load-path (cons \".\" load-path))" > $*.compile echo "(byte-compile-file \"$<\")" >> $*.compile - $(EMACS) -batch -l $*.compile rm -f $*.compile else @echo "Please run ./configure first" >&2; exit 1 endif # Useful to check that the exported variables are within the win32 limits printenv: @env @echo @echo -n "Maxsize (win32 limit is 8k) : " @env | wc -L @echo -n "Total (win32 limit is 32k) : " @env | wc -m coq-8.4pl2/dev/0000750000175000001440000000000012127276535012416 5ustar notinuserscoq-8.4pl2/dev/vm_printers.ml0000640000175000001440000000444711267622364015331 0ustar notinusersopen Format open Term open Names open Cbytecodes open Cemitcodes open Vm let ppripos (ri,pos) = (match ri with | Reloc_annot a -> let sp,i = a.ci.ci_ind in print_string ("annot : MutInd("^(string_of_mind sp)^","^(string_of_int i)^")\n") | Reloc_const _ -> print_string "structured constant\n" | Reloc_getglobal kn -> print_string ("getglob "^(string_of_con kn)^"\n")); print_flush () let print_vfix () = print_string "vfix" let print_vfix_app () = print_string "vfix_app" let print_vswith () = print_string "switch" let ppsort = function | Prop(Pos) -> print_string "Set" | Prop(Null) -> print_string "Prop" | Type u -> print_string "Type" let print_idkey idk = match idk with | ConstKey sp -> print_string "Cons("; print_string (string_of_con sp); print_string ")" | VarKey id -> print_string (string_of_id id) | RelKey i -> print_string "~";print_int i let rec ppzipper z = match z with | Zapp args -> let n = nargs args in open_hbox (); for i = 0 to n-2 do ppvalues (arg args i);print_string ";";print_space() done; if n-1 >= 0 then ppvalues (arg args (n-1)); close_box() | Zfix _ -> print_string "Zfix" | Zswitch _ -> print_string "Zswitch" and ppstack s = open_hovbox 0; print_string "["; List.iter (fun z -> ppzipper z;print_string " | ") s; print_string "]"; close_box() and ppatom a = match a with | Aid idk -> print_idkey idk | Aiddef(idk,_) -> print_string "&";print_idkey idk | Aind(sp,i) -> print_string "Ind("; print_string (string_of_mind sp); print_string ","; print_int i; print_string ")" and ppwhd whd = match whd with | Vsort s -> ppsort s | Vprod _ -> print_string "product" | Vfun _ -> print_string "function" | Vfix _ -> print_vfix() | Vcofix _ -> print_string "cofix" | Vconstr_const i -> print_string "C(";print_int i;print_string")" | Vconstr_block b -> ppvblock b | Vatom_stk(a,s) -> open_hbox();ppatom a;close_box(); print_string"@";ppstack s and ppvblock b = open_hbox(); print_string "Cb(";print_int (btag b); let n = bsize b in for i = 0 to n -1 do print_string ",";ppvalues (bfield b i) done; print_string")"; close_box() and ppvalues v = open_hovbox 0;ppwhd (whd_val v);close_box(); print_flush() coq-8.4pl2/dev/base_db0000640000175000001440000000025707216352072013717 0ustar notinusersload_printer "gramlib.cma" load_printer "top_printers.cmo" install_printer Top_printers.prid install_printer Top_printers.prsp install_printer Top_printers.print_pure_constr coq-8.4pl2/dev/v8-syntax/0000750000175000001440000000000012127276534014276 5ustar notinuserscoq-8.4pl2/dev/v8-syntax/syntax-v8.tex0000640000175000001440000011637411505230573016706 0ustar notinusers \documentclass{article} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \usepackage{array} \usepackage{fullpage} \author{B.~Barras} \title{Syntax of Coq V8} %% Le _ est un caractère normal \catcode`\_=13 \let\subscr=_ \def_{\ifmmode\sb\else\subscr\fi} \def\bfbar{\ensuremath{|\hskip -0.22em{}|\hskip -0.24em{}|}} \def\TERMbar{\bfbar} \def\TERMbarbar{\bfbar\bfbar} \def\notv{\text{_}} \def\infx#1{\notv#1\notv} %% Macros pour les grammaires \def\GR#1{\text{\large(}#1\text{\large)}} \def\NT#1{\langle\textit{#1}\rangle} \def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} \def\TERM#1{{\bf\textrm{\bf #1}}} %\def\TERM#1{{\bf\textsf{#1}}} \def\KWD#1{\TERM{#1}} \def\ETERM#1{\TERM{#1}} \def\CHAR#1{\TERM{#1}} \def\STAR#1{#1*} \def\STARGR#1{\GR{#1}*} \def\PLUS#1{#1+} \def\PLUSGR#1{\GR{#1}+} \def\OPT#1{#1?} \def\OPTGR#1{\GR{#1}?} %% Tableaux de definition de non-terminaux \newenvironment{cadre} {\begin{array}{|c|}\hline\\} {\\\\\hline\end{array}} \newenvironment{rulebox} {$$\begin{cadre}\begin{array}{r@{~}c@{~}l@{}l@{}r}} {\end{array}\end{cadre}$$} \def\DEFNT#1{\NT{#1} & ::= &} \def\EXTNT#1{\NT{#1} & ::= & ... \\&|&} \def\RNAME#1{(\textsc{#1})} \def\SEPDEF{\\\\} \def\nlsep{\\&|&} \def\nlcont{\\&&} \newenvironment{rules} {\begin{center}\begin{rulebox}} {\end{rulebox}\end{center}} \begin{document} \maketitle \section{Meta notations used in this document} Non-terminals are printed between angle brackets (e.g. $\NT{non-terminal}$) and terminal symbols are printed in bold font (e.g. $\ETERM{terminal}$). Lexemes are displayed as non-terminals. The usual operators on regular expressions: \begin{center} \begin{tabular}{l|l} \hfil notation & \hfil meaning \\ \hline $\STAR{regexp}$ & repeat $regexp$ 0 or more times \\ $\PLUS{regexp}$ & repeat $regexp$ 1 or more times \\ $\OPT{regexp}$ & $regexp$ is optional \\ $regexp_1~\mid~regexp_2$ & alternative \end{tabular} \end{center} Parenthesis are used to group regexps. Beware to distinguish this operator $\GR{~}$ from the terminals $\ETERM{( )}$, and $\mid$ from terminal \TERMbar. Rules are optionaly annotated in the right margin with: \begin{itemize} \item a precedence and associativity (L for left, R for right and N for no associativity), indicating how to solve conflicts; lower levels are tighter; \item a rule name. \end{itemize} In order to solve some conflicts, a non-terminal may be invoked with a precedence (notation: $\NTL{entry}{prec}$), meaning that rules with higher precedence do not apply. \section{Lexical conventions} Lexical categories are: \begin{rules} \DEFNT{ident} \STARGR{\NT{letter}\mid\CHAR{_}} \STARGR{\NT{letter}\mid \NT{digit} \mid \CHAR{'} \mid \CHAR{_}} \SEPDEF \DEFNT{field} \CHAR{.}\NT{ident} \SEPDEF \DEFNT{meta-ident} \CHAR{?}\NT{ident} \SEPDEF \DEFNT{num} \PLUS{\NT{digit}} \SEPDEF \DEFNT{int} \NT{num} \mid \CHAR{-}\NT{num} \SEPDEF \DEFNT{digit} \CHAR{0}-\CHAR{9} \SEPDEF \DEFNT{letter} \CHAR{a}-\CHAR{z}\mid\CHAR{A}-\CHAR{Z} \mid\NT{unicode-letter} \SEPDEF \DEFNT{string} \CHAR{"}~\STARGR{\CHAR{""}\mid\NT{unicode-char-but-"}}~\CHAR{"} \end{rules} Reserved identifiers for the core syntax are: \begin{quote} \KWD{as}, \KWD{cofix}, \KWD{else}, \KWD{end}, \KWD{fix}, \KWD{for}, \KWD{forall}, \KWD{fun}, \KWD{if}, \KWD{in}, \KWD{let}, \KWD{match}, \KWD{Prop}, \KWD{return}, \KWD{Set}, \KWD{then}, \KWD{Type}, \KWD{with} \end{quote} Symbols used in the core syntax: $$ \KWD{(} ~~ \KWD{)} ~~ \KWD{\{} ~~ \KWD{\}} ~~ \KWD{:} ~~ \KWD{,} ~~ \Rightarrow ~~ \rightarrow ~~ \KWD{:=} ~~ \KWD{_} ~~ \TERMbar ~~ \KWD{@} ~~ \KWD{\%} ~~ \KWD{.(} $$ Note that \TERM{struct} is not a reserved identifier. \section{Syntax of terms} \subsection{Core syntax} The main entry point of the term grammar is $\NTL{constr}{9}$. When no conflict can appear, $\NTL{constr}{200}$ is also used as entry point. \begin{rules} \DEFNT{constr} \NT{binder-constr} &200R~~ &\RNAME{binders} \nlsep \NT{constr}~\KWD{:}~\NT{constr} &100R &\RNAME{cast} \nlsep \NT{constr}~\KWD{:}~\NT{binder-constr} &100R &\RNAME{cast'} \nlsep \NT{constr}~\KWD{$\rightarrow$}~\NT{constr} &80R &\RNAME{arrow} \nlsep \NT{constr}~\KWD{$\rightarrow$}~\NT{binder-constr} &80R &\RNAME{arrow'} \nlsep \NT{constr}~\PLUS{\NT{appl-arg}} &10L &\RNAME{apply} \nlsep \KWD{@}~\NT{reference}~\STAR{\NTL{constr}{9}} &10L &\RNAME{expl-apply} \nlsep \NT{constr}~\KWD{.(} ~\NT{reference}~\STAR{\NT{appl-arg}}~\TERM{)} &1L & \RNAME{proj} \nlsep \NT{constr}~\KWD{.(}~\TERM{@} ~\NT{reference}~\STAR{\NTL{constr}{9}}~\TERM{)} &1L & \RNAME{expl-proj} \nlsep \NT{constr} ~ \KWD{\%} ~ \NT{ident} &1L &\RNAME{scope-chg} \nlsep \NT{atomic-constr} &0 \nlsep \NT{match-expr} &0 \nlsep \KWD{(}~\NT{constr}~\KWD{)} &0 \SEPDEF \DEFNT{binder-constr} \KWD{forall}~\NT{binder-list}~\KWD{,}~\NTL{constr}{200} &&\RNAME{prod} \nlsep \KWD{fun} ~\NT{binder-list} ~\KWD{$\Rightarrow$}~\NTL{constr}{200} &&\RNAME{lambda} \nlsep \NT{fix-expr} \nlsep \KWD{let}~\NT{ident-with-params} ~\KWD{:=}~\NTL{constr}{200} ~\KWD{in}~\NTL{constr}{200} &&\RNAME{let} \nlsep \KWD{let}~\NT{single-fix} ~\KWD{in}~\NTL{constr}{200} &&\RNAME{rec-let} \nlsep \KWD{let}~\KWD{(}~\OPT{\NT{let-pattern}}~\KWD{)}~\OPT{\NT{return-type}} ~\KWD{:=}~\NTL{constr}{200}~\KWD{in}~\NTL{constr}{200} &&\RNAME{let-case} \nlsep \KWD{if}~\NT{if-item} ~\KWD{then}~\NTL{constr}{200}~\KWD{else}~\NTL{constr}{200} &&\RNAME{if-case} \SEPDEF \DEFNT{appl-arg} \KWD{(}~\NT{ident}~\!\KWD{:=}~\NTL{constr}{200}~\KWD{)} &&\RNAME{impl-arg} \nlsep \KWD{(}~\NT{num}~\!\KWD{:=}~\NTL{constr}{200}~\KWD{)} &&\RNAME{impl-arg} \nlsep \NTL{constr}{9} \SEPDEF \DEFNT{atomic-constr} \NT{reference} && \RNAME{variables} \nlsep \NT{sort} && \RNAME{CIC-sort} \nlsep \NT{num} && \RNAME{number} \nlsep \KWD{_} && \RNAME{hole} \nlsep \NT{meta-ident} && \RNAME{meta/evar} \end{rules} \begin{rules} \DEFNT{ident-with-params} \NT{ident}~\STAR{\NT{binder-let}}~\NT{type-cstr} \SEPDEF \DEFNT{binder-list} \NT{binder}~\STAR{\NT{binder-let}} \nlsep \PLUS{\NT{name}}~\KWD{:}~\NT{constr} \SEPDEF \DEFNT{binder} \NT{name} &&\RNAME{infer} \nlsep \KWD{(}~\PLUS{\NT{name}}~\KWD{:}~\NT{constr} ~\KWD{)} &&\RNAME{binder} \SEPDEF \DEFNT{binder-let} \NT{binder} \nlsep \KWD{(}~\NT{name}~\NT{type-cstr}~\KWD{:=}~\NT{constr}~\KWD{)} \SEPDEF \DEFNT{let-pattern} \NT{name} \nlsep \NT{name} ~\KWD{,} ~\NT{let-pattern} \SEPDEF \DEFNT{type-cstr} \OPTGR{\KWD{:}~\NT{constr}} \SEPDEF \DEFNT{reference} \NT{ident} && \RNAME{short-ident} \nlsep \NT{ident}~\PLUS{\NT{field}} && \RNAME{qualid} \SEPDEF \DEFNT{sort} \KWD{Prop} ~\mid~ \KWD{Set} ~\mid~ \KWD{Type} \SEPDEF \DEFNT{name} \NT{ident} ~\mid~ \KWD{_} \end{rules} \begin{rules} \DEFNT{fix-expr} \NT{single-fix} \nlsep \NT{single-fix}~\PLUSGR{\KWD{with}~\NT{fix-decl}} ~\KWD{for}~\NT{ident} \SEPDEF \DEFNT{single-fix} \NT{fix-kw}~\NT{fix-decl} \SEPDEF \DEFNT{fix-kw} \KWD{fix} ~\mid~ \KWD{cofix} \SEPDEF \DEFNT{fix-decl} \NT{ident}~\STAR{\NT{binder-let}}~\OPT{\NT{annot}}~\NT{type-cstr} ~\KWD{:=}~\NTL{constr}{200} \SEPDEF \DEFNT{annot} \KWD{\{}~\TERM{struct}~\NT{ident}~\KWD{\}} \end{rules} \begin{rules} \DEFNT{match-expr} \KWD{match}~\NT{match-items}~\OPT{\NT{return-type}}~\KWD{with} ~\OPT{\TERMbar}~\OPT{\NT{branches}}~\KWD{end} &&\RNAME{match} \SEPDEF \DEFNT{match-items} \NT{match-item} ~\KWD{,} ~\NT{match-items} \nlsep \NT{match-item} \SEPDEF \DEFNT{match-item} \NTL{constr}{100}~\OPTGR{\KWD{as}~\NT{name}} ~\OPTGR{\KWD{in}~\NTL{constr}{100}} \SEPDEF \DEFNT{return-type} \KWD{return}~\NTL{constr}{100} \SEPDEF \DEFNT{if-item} \NT{constr}~\OPTGR{\OPTGR{\KWD{as}~\NT{name}}~\NT{return-type}} \SEPDEF \DEFNT{branches} \NT{eqn}~\TERMbar~\NT{branches} \nlsep \NT{eqn} \SEPDEF \DEFNT{eqn} \NT{pattern} ~\STARGR{\KWD{,}~\NT{pattern}} ~\KWD{$\Rightarrow$}~\NT{constr} \SEPDEF \DEFNT{pattern} \NT{reference}~\PLUS{\NT{pattern}} &1L~~ & \RNAME{constructor} \nlsep \NT{pattern}~\KWD{as}~\NT{ident} &1L & \RNAME{alias} \nlsep \NT{pattern}~\KWD{\%}~\NT{ident} &1L & \RNAME{scope-change} \nlsep \NT{reference} &0 & \RNAME{pattern-var} \nlsep \KWD{_} &0 & \RNAME{hole} \nlsep \NT{num} &0 \nlsep \KWD{(}~\NT{tuple-pattern}~\KWD{)} \SEPDEF \DEFNT{tuple-pattern} \NT{pattern} \nlsep \NT{tuple-pattern}~\KWD{,}~\NT{pattern} && \RNAME{pair} \end{rules} \subsection{Notations of the prelude (logic and basic arithmetic)} Reserved notations: $$ \begin{array}{l|c} \text{Symbol} & \text{precedence} \\ \hline \infx{,} & 250L \\ \KWD{IF}~\notv~\KWD{then}~\notv~\KWD{else}~\notv & 200R \\ \infx{:} & 100R \\ \infx{\leftrightarrow} & 95N \\ \infx{\rightarrow} & 90R \\ \infx{\vee} & 85R \\ \infx{\wedge} & 80R \\ \tilde{}\notv & 75R \\ \begin{array}[c]{@{}l@{}} \infx{=}\quad \infx{=}\KWD{$:>$}\notv \quad \infx{=}=\notv \quad \infx{\neq} \quad \infx{\neq}\KWD{$:>$}\notv \\ \infx{<}\quad\infx{>} \quad \infx{\leq}\quad\infx{\geq} \quad \infx{<}<\notv \quad \infx{<}\leq\notv \quad \infx{\leq}<\notv \quad \infx{\leq}\leq\notv \end{array} & 70N \\ \infx{+}\quad\infx{-}\quad -\notv & 50L \\ \infx{*}\quad\infx{/}\quad /\notv & 40L \\ \end{array} $$ Existential quantifiers follows the \KWD{forall} notation (with same precedence 200), but only one quantified variable is allowed. \begin{rules} \EXTNT{binder-constr} \NT{quantifier-kwd}~\NT{name}~\NT{type-cstr}~\KWD{,}~\NTL{constr}{200} \\ \SEPDEF \DEFNT{quantifier-kwd} \TERM{exists} && \RNAME{ex} \nlsep \TERM{exists2} && \RNAME{ex2} \end{rules} $$ \begin{array}{l|c|l} \text{Symbol} & \text{precedence} \\ \hline \notv+\{\notv\} & 50 & \RNAME{sumor} \\ \{\notv:\notv~|~\notv\} & 0 & \RNAME{sig} \\ \{\notv:\notv~|~\notv \& \notv \} & 0 & \RNAME{sig2} \\ \{\notv:\notv~\&~\notv \} & 0 & \RNAME{sigS} \\ \{\notv:\notv~\&~\notv \& \notv \} & 0 & \RNAME{sigS2} \\ \{\notv\}+\{\notv\} & 0 & \RNAME{sumbool} \\ \end{array} $$ %% Strange: nat + {x:nat|x=x} * nat == ( + ) * \section{Grammar of tactics} \def\tacconstr{\NTL{constr}{9}} \def\taclconstr{\NTL{constr}{200}} Additional symbols are: $$ \TERM{'} ~~ \KWD{;} ~~ \TERM{()} ~~ \TERMbarbar ~~ \TERM{$\vdash$} ~~ \TERM{[} ~~ \TERM{]} ~~ \TERM{$\leftarrow$} $$ Additional reserved keywords are: $$ \KWD{at} ~~ \TERM{using} $$ \subsection{Basic tactics} \begin{rules} \DEFNT{simple-tactic} \TERM{intros}~\TERM{until}~\NT{quantified-hyp} \nlsep \TERM{intros}~\NT{intro-patterns} \nlsep \TERM{intro}~\OPT{\NT{ident}}~\OPTGR{\TERM{after}~\NT{ident}} %% \nlsep \TERM{assumption} \nlsep \TERM{exact}~\tacconstr %% \nlsep \TERM{apply}~\NT{constr-with-bindings} \nlsep \TERM{elim}~\NT{constr-with-bindings}~\OPT{\NT{eliminator}} \nlsep \TERM{elimtype}~\tacconstr \nlsep \TERM{case}~\NT{constr-with-bindings} \nlsep \TERM{casetype}~\tacconstr \nlsep \KWD{fix}~\OPT{\NT{ident}}~\NT{num} \nlsep \KWD{fix}~\NT{ident}~\NT{num}~\KWD{with}~\PLUS{\NT{fix-spec}} \nlsep \KWD{cofix}~\OPT{\NT{ident}} \nlsep \KWD{cofix}~\NT{ident}~\PLUS{\NT{fix-spec}} %% \nlsep \TERM{cut}~\tacconstr \nlsep \TERM{assert}~\tacconstr \nlsep \TERM{assert}~ \TERM{(}~\NT{ident}~\KWD{:}~\taclconstr~\TERM{)} \nlsep \TERM{assert}~ \TERM{(}~\NT{ident}~\KWD{:=}~\taclconstr~\TERM{)} \nlsep \TERM{pose}~\tacconstr \nlsep \TERM{pose}~ \TERM{(}~\NT{ident}~\KWD{:=}~\taclconstr~\TERM{)} \nlsep \TERM{generalize}~\PLUS{\tacconstr} \nlsep \TERM{generalize}~\TERM{dependent}~\tacconstr \nlsep \TERM{set}~\tacconstr~\OPT{\NT{clause}} \nlsep \TERM{set}~ \TERM{(}~\NT{ident}~\KWD{:=}~\taclconstr~\TERM{)}~\OPT{\NT{clause}} \nlsep \TERM{instantiate}~ \TERM{(}~\NT{num}~\TERM{:=}~\taclconstr~\TERM{)}~\OPT{\NT{clause}} %% \nlsep \TERM{specialize}~\OPT{\NT{num}}~\NT{constr-with-bindings} \nlsep \TERM{lapply}~\tacconstr %% \nlsep \TERM{simple}~\TERM{induction}~\NT{quantified-hyp} \nlsep \TERM{induction}~\NT{induction-arg}~\OPT{\NT{with-names}} ~\OPT{\NT{eliminator}} \nlsep \TERM{double}~\TERM{induction}~\NT{quantified-hyp}~\NT{quantified-hyp} \nlsep \TERM{simple}~\TERM{destruct}~\NT{quantified-hyp} \nlsep \TERM{destruct}~\NT{induction-arg}~\OPT{\NT{with-names}} ~\OPT{\NT{eliminator}} \nlsep \TERM{decompose}~\TERM{record}~\tacconstr \nlsep \TERM{decompose}~\TERM{sum}~\tacconstr \nlsep \TERM{decompose}~\TERM{[}~\PLUS{\NT{reference}}~\TERM{]} ~\tacconstr %% \nlsep ... \end{rules} \begin{rules} \EXTNT{simple-tactic} \TERM{trivial}~\OPT{\NT{hint-bases}} \nlsep \TERM{auto}~\OPT{\NT{num}}~\OPT{\NT{hint-bases}} %% %%\nlsep \TERM{autotdb}~\OPT{\NT{num}} %%\nlsep \TERM{cdhyp}~\NT{ident} %%\nlsep \TERM{dhyp}~\NT{ident} %%\nlsep \TERM{dconcl} %%\nlsep \TERM{superauto}~\NT{auto-args} \nlsep \TERM{auto}~\OPT{\NT{num}}~\TERM{decomp}~\OPT{\NT{num}} %% \nlsep \TERM{clear}~\PLUS{\NT{ident}} \nlsep \TERM{clearbody}~\PLUS{\NT{ident}} \nlsep \TERM{move}~\NT{ident}~\TERM{after}~\NT{ident} \nlsep \TERM{rename}~\NT{ident}~\TERM{into}~\NT{ident} %% \nlsep \TERM{left}~\OPT{\NT{with-binding-list}} \nlsep \TERM{right}~\OPT{\NT{with-binding-list}} \nlsep \TERM{split}~\OPT{\NT{with-binding-list}} \nlsep \TERM{exists}~\OPT{\NT{binding-list}} \nlsep \TERM{constructor}~\NT{num}~\OPT{\NT{with-binding-list}} \nlsep \TERM{constructor}~\OPT{\NT{tactic}} %% \nlsep \TERM{reflexivity} \nlsep \TERM{symmetry}~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{transitivity}~\tacconstr %% \nlsep \NT{inversion-kwd}~\NT{quantified-hyp}~\OPT{\NT{with-names}}~\OPT{\NT{clause}} \nlsep \TERM{dependent}~\NT{inversion-kwd}~\NT{quantified-hyp} ~\OPT{\NT{with-names}}~\OPTGR{\KWD{with}~\tacconstr} \nlsep \TERM{inversion}~\NT{quantified-hyp}~\TERM{using}~\tacconstr~\OPT{\NT{clause}} %% \nlsep \NT{red-expr}~\OPT{\NT{clause}} \nlsep \TERM{change}~\NT{conversion}~\OPT{\NT{clause}} \SEPDEF \DEFNT{red-expr} \TERM{red} ~\mid~ \TERM{hnf} ~\mid~ \TERM{compute} \nlsep \TERM{simpl}~\OPT{\NT{pattern-occ}} \nlsep \TERM{cbv}~\PLUS{\NT{red-flag}} \nlsep \TERM{lazy}~\PLUS{\NT{red-flag}} \nlsep \TERM{unfold}~\NT{unfold-occ}~\STARGR{\KWD{,}~\NT{unfold-occ}} \nlsep \TERM{fold}~\PLUS{\tacconstr} \nlsep \TERM{pattern}~\NT{pattern-occ}~\STARGR{\KWD{,}~\NT{pattern-occ}} \SEPDEF \DEFNT{conversion} \NT{pattern-occ}~\KWD{with}~\tacconstr \nlsep \tacconstr \SEPDEF \DEFNT{inversion-kwd} \TERM{inversion} ~\mid~ \TERM{invesion_clear} ~\mid~ \TERM{simple}~\TERM{inversion} \end{rules} Conflicts exists between integers and constrs. \begin{rules} \DEFNT{quantified-hyp} \NT{int}~\mid~\NT{ident} \SEPDEF \DEFNT{induction-arg} \NT{int}~\mid~\tacconstr \SEPDEF \DEFNT{fix-spec} \KWD{(}~\NT{ident}~\STAR{\NT{binder}}~\OPT{\NT{annot}} ~\KWD{:}~\taclconstr~\KWD{)} \SEPDEF \DEFNT{intro-patterns} \STAR{\NT{intro-pattern}} \SEPDEF \DEFNT{intro-pattern} \NT{name} \nlsep \TERM{[}~\NT{intro-patterns}~\STARGR{\TERMbar~\NT{intro-patterns}} ~\TERM{]} \nlsep \KWD{(}~\NT{intro-pattern}~\STARGR{\KWD{,}~\NT{intro-pattern}} ~\KWD{)} \SEPDEF \DEFNT{with-names} % \KWD{as}~\TERM{[}~\STAR{\NT{ident}}~\STARGR{\TERMbar~\STAR{\NT{ident}}} % ~\TERM{]} \KWD{as}~\NT{intro-pattern} \SEPDEF \DEFNT{eliminator} \TERM{using}~\NT{constr-with-bindings} \SEPDEF \DEFNT{constr-with-bindings} % dangling ``with'' of ``fix'' can conflict with ``with'' \tacconstr~\OPT{\NT{with-binding-list}} \SEPDEF \DEFNT{with-binding-list} \KWD{with}~\NT{binding-list} \SEPDEF \DEFNT{binding-list} \PLUS{\tacconstr} \nlsep \PLUS{\NT{simple-binding}} \SEPDEF \DEFNT{simple-binding} \KWD{(}~\NT{quantified-hyp}~\KWD{:=}~\taclconstr~\KWD{)} \SEPDEF \DEFNT{red-flag} \TERM{beta} ~\mid~ \TERM{iota} ~\mid~ \TERM{zeta} ~\mid~ \TERM{delta} ~\mid~ \TERM{delta}~\OPT{\TERM{-}}~\TERM{[}~\PLUS{\NT{reference}}~\TERM{]} \SEPDEF \DEFNT{clause} \KWD{in}~\TERM{*} \nlsep \KWD{in}~\TERM{*}~\KWD{$\vdash$}~\OPT{\NT{concl-occ}} \nlsep \KWD{in}~\OPT{\NT{hyp-ident-list}} ~\KWD{$\vdash$} ~\OPT{\NT{concl-occ}} \nlsep \KWD{in}~\OPT{\NT{hyp-ident-list}} \SEPDEF \DEFNT{hyp-ident-list} \NT{hyp-ident} \nlsep \NT{hyp-ident}~\KWD{,}~\NT{hyp-ident-list} \SEPDEF \DEFNT{hyp-ident} \NT{ident} \nlsep \KWD{(}~\TERM{type}~\TERM{of}~\NT{ident}~\KWD{)} \nlsep \KWD{(}~\TERM{value}~\TERM{of}~\NT{ident}~\KWD{)} \SEPDEF \DEFNT{concl-occ} \TERM{*} ~\NT{occurrences} \SEPDEF \DEFNT{pattern-occ} \tacconstr ~\NT{occurrences} \SEPDEF \DEFNT{unfold-occ} \NT{reference}~\NT{occurrences} \SEPDEF \DEFNT{occurrences} ~\OPTGR{\KWD{at}~\PLUS{\NT{int}}} \SEPDEF \DEFNT{hint-bases} \KWD{with}~\TERM{*} \nlsep \KWD{with}~\PLUS{\NT{ident}} \SEPDEF \DEFNT{auto-args} \OPT{\NT{num}}~\OPTGR{\TERM{adding}~\TERM{[}~\PLUS{\NT{reference}} ~\TERM{]}}~\OPT{\TERM{destructuring}}~\OPTGR{\TERM{using}~\TERM{tdb}} \end{rules} \subsection{Ltac} %% Currently, there are conflicts with keyword \KWD{in}: in the following, %% has the keyword to be associated to \KWD{let} or to tactic \TERM{simpl} ? %% \begin{center} %% \texttt{let x := simpl in ...} %% \end{center} \begin{rules} \DEFNT{tactic} \NT{tactic} ~\KWD{;} ~\NT{tactic} &5 &\RNAME{Then} \nlsep \NT{tactic} ~\KWD{;}~\TERM{[} ~\OPT{\NT{tactic-seq}} ~\TERM{]} &5 &\RNAME{Then-seq} %% \nlsep \TERM{try} ~\NT{tactic} &3R &\RNAME{Try} \nlsep \TERM{do} ~\NT{int-or-var} ~\NT{tactic} \nlsep \TERM{repeat} ~\NT{tactic} \nlsep \TERM{progress} ~\NT{tactic} \nlsep \TERM{info} ~\NT{tactic} \nlsep \TERM{abstract}~\NTL{tactic}{2}~\OPTGR{\TERM{using}~\NT{ident}} %% \nlsep \NT{tactic} ~\TERMbarbar ~\NT{tactic} &2R &\RNAME{Orelse} %% \nlsep \KWD{fun} ~\PLUS{\NT{name}} ~\KWD{$\Rightarrow$} ~\NT{tactic} &1 &\RNAME{Fun-tac} \nlsep \KWD{let} ~\NT{let-clauses} ~\KWD{in} ~\NT{tactic} \nlsep \KWD{let} ~\TERM{rec} ~\NT{rec-clauses} ~\KWD{in} ~\NT{tactic} \nlsep \KWD{match}~\OPT{\TERM{reverse}}~\TERM{goal}~\KWD{with} ~\OPT{\TERMbar}~\OPT{\NT{match-goal-rules}} ~\KWD{end} \nlsep \KWD{match} ~\NT{tactic} ~\KWD{with} ~\OPT{\TERMbar}~\OPT{\NT{match-rules}} ~\KWD{end} \nlsep \TERM{first}~\TERM{[} ~\NT{tactic-seq} ~\TERM{]} \nlsep \TERM{solve}~\TERM{[} ~\NT{tactic-seq} ~\TERM{]} \nlsep \TERM{idtac} \nlsep \TERM{fail} ~\OPT{\NT{num}} ~\OPT{\NT{string}} \nlsep \TERM{constr}~\KWD{:}~\tacconstr \nlsep \TERM{ipattern}~\KWD{:}~\NT{intro-pattern} \nlsep \NT{term-ltac} \nlsep \NT{reference}~\STAR{\NT{tactic-arg}} &&\RNAME{call-tactic} \nlsep \NT{simple-tactic} %% \nlsep \NT{tactic-atom} &0 &\RNAME{atomic} \nlsep \KWD{(} ~\NT{tactic} ~\KWD{)} \SEPDEF \DEFNT{tactic-arg} \TERM{ltac}~\KWD{:}~\NTL{tactic}{0} \nlsep \TERM{ipattern}~\KWD{:}~\NT{intro-pattern} \nlsep \NT{term-ltac} \nlsep \NT{tactic-atom} \nlsep \tacconstr \SEPDEF \DEFNT{term-ltac} \TERM{fresh} ~\OPT{\NT{string}} \nlsep \TERM{context} ~\NT{ident} ~\TERM{[} ~\taclconstr ~\TERM{]} \nlsep \TERM{eval} ~\NT{red-expr} ~\KWD{in} ~\tacconstr \nlsep \TERM{type} ~\tacconstr \SEPDEF \DEFNT{tactic-atom} \NT{reference} \nlsep \TERM{()} \SEPDEF \DEFNT{tactic-seq} \NT{tactic} ~\TERMbar ~\NT{tactic-seq} \nlsep \NT{tactic} \end{rules} \begin{rules} \DEFNT{let-clauses} \NT{let-clause} ~\STARGR{\KWD{with}~\NT{let-clause}} \SEPDEF \DEFNT{let-clause} \NT{ident} ~\STAR{\NT{name}} ~\KWD{:=} ~\NT{tactic} \SEPDEF \DEFNT{rec-clauses} \NT{rec-clause} ~\KWD{with} ~\NT{rec-clauses} \nlsep \NT{rec-clause} \SEPDEF \DEFNT{rec-clause} \NT{ident} ~\PLUS{\NT{name}} ~\KWD{:=} ~\NT{tactic} \SEPDEF \DEFNT{match-goal-rules} \NT{match-goal-rule} \nlsep \NT{match-goal-rule} ~\TERMbar ~\NT{match-goal-rules} \SEPDEF \DEFNT{match-goal-rule} \NT{match-hyps-list} ~\TERM{$\vdash$} ~\NT{match-pattern} ~\KWD{$\Rightarrow$} ~\NT{tactic} \nlsep \KWD{[}~\NT{match-hyps-list} ~\TERM{$\vdash$} ~\NT{match-pattern} ~\KWD{]}~\KWD{$\Rightarrow$} ~\NT{tactic} \nlsep \KWD{_} ~\KWD{$\Rightarrow$} ~\NT{tactic} \SEPDEF \DEFNT{match-hyps-list} \NT{match-hyps} ~\KWD{,} ~\NT{match-hyps-list} \nlsep \NT{match-hyps} \SEPDEF \DEFNT{match-hyps} \NT{name} ~\KWD{:} ~\NT{match-pattern} \SEPDEF \DEFNT{match-rules} \NT{match-rule} \nlsep \NT{match-rule} ~\TERMbar ~\NT{match-rules} \SEPDEF \DEFNT{match-rule} \NT{match-pattern} ~\KWD{$\Rightarrow$} ~\NT{tactic} \nlsep \KWD{_} ~\KWD{$\Rightarrow$} ~\NT{tactic} \SEPDEF \DEFNT{match-pattern} \TERM{context}~\OPT{\NT{ident}} ~\TERM{[} ~\NT{constr-pattern} ~\TERM{]} &&\RNAME{subterm} \nlsep \NT{constr-pattern} \SEPDEF \DEFNT{constr-pattern} \tacconstr \end{rules} \subsection{Other tactics} \begin{rules} \EXTNT{simple-tactic} \TERM{rewrite} ~\NT{orient} ~\NT{constr-with-bindings} ~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{replace} ~\tacconstr ~\KWD{with} ~\tacconstr ~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{replace} ~\OPT{\NT{orient}} ~\tacconstr ~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{symplify_eq} ~\OPT{\NT{quantified-hyp}} \nlsep \TERM{discriminate} ~\OPT{\NT{quantified-hyp}} \nlsep \TERM{injection} ~\OPT{\NT{quantified-hyp}} \nlsep \TERM{conditional}~\NT{tactic}~\TERM{rewrite}~\NT{orient} ~\NT{constr-with-bindings}~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{dependent}~\TERM{rewrite}~\NT{orient}~\NT{ident} \nlsep \TERM{cutrewrite}~\NT{orient}~\tacconstr ~\OPTGR{\KWD{in}~\NT{ident}} \nlsep \TERM{absurd} ~\tacconstr \nlsep \TERM{contradiction} \nlsep \TERM{autorewrite}~\NT{hint-bases}~\OPTGR{\KWD{using}~\NT{tactic}} \nlsep \TERM{refine}~\tacconstr \nlsep \TERM{setoid_replace} ~\tacconstr ~\KWD{with} ~\tacconstr \nlsep \TERM{setoid_rewrite} ~\NT{orient} ~\tacconstr \nlsep \TERM{subst} ~\STAR{\NT{ident}} %% eqdecide.ml4 \nlsep \TERM{decide}~\TERM{equality} ~\OPTGR{\tacconstr~\tacconstr} \nlsep \TERM{compare}~\tacconstr~\tacconstr %% eauto \nlsep \TERM{eexact}~\tacconstr \nlsep \TERM{eapply}~\NT{constr-with-bindings} \nlsep \TERM{prolog}~\TERM{[}~\STAR{\tacconstr}~\TERM{]} ~\NT{quantified-hyp} \nlsep \TERM{eauto}~\OPT{\NT{quantified-hyp}}~\OPT{\NT{quantified-hyp}} ~\NT{hint-bases} \nlsep \TERM{eautod}~\OPT{\NT{quantified-hyp}}~\OPT{\NT{quantified-hyp}} ~\NT{hint-bases} %% tauto \nlsep \TERM{tauto} \nlsep \TERM{simplif} \nlsep \TERM{intuition}~\OPT{\NTL{tactic}{0}} \nlsep \TERM{linearintuition}~\OPT{\NT{num}} %% plugins/cc \nlsep \TERM{cc} %% plugins/field \nlsep \TERM{field}~\STAR{\tacconstr} %% plugins/firstorder \nlsep \TERM{ground}~\OPT{\NTL{tactic}{0}} \nlsep \TERM{ground}~\OPT{\NTL{tactic}{0}}~\KWD{with}~\PLUS{\NT{reference}} \nlsep \TERM{ground}~\OPT{\NTL{tactic}{0}}~\KWD{using}~\PLUS{\NT{ident}} %%\nlsep \TERM{gtauto} \nlsep \TERM{gintuition}~\OPT{\NTL{tactic}{0}} %% plugins/fourier \nlsep \TERM{fourierZ} %% plugins/funind \nlsep \TERM{functional}~\TERM{induction}~\tacconstr~\PLUS{\tacconstr} %% plugins/jprover \nlsep \TERM{jp}~\OPT{\NT{num}} %% plugins/omega \nlsep \TERM{omega} %% plugins/ring \nlsep \TERM{quote}~\NT{ident}~\OPTGR{\KWD{[}~\PLUS{\NT{ident}}~\KWD{]}} \nlsep \TERM{ring}~\STAR{\tacconstr} %% plugins/romega \nlsep \TERM{romega} \SEPDEF \DEFNT{orient} \KWD{$\rightarrow$}~\mid~\KWD{$\leftarrow$} \end{rules} \section{Grammar of commands} New symbols: $$ \TERM{.} ~~ \TERM{..} ~~ \TERM{\tt >->} ~~ \TERM{:$>$} ~~ \TERM{$<$:} $$ New keyword: $$ \KWD{where} $$ \subsection{Classification of commands} \begin{rules} \DEFNT{vernac} \TERM{Time}~\NT{vernac} &2~~ &\RNAME{Timing} %% \nlsep \NT{gallina}~\TERM{.} &1 \nlsep \NT{command}~\TERM{.} \nlsep \NT{syntax}~\TERM{.} \nlsep \TERM{[}~\PLUS{\NT{vernac}}~\TERM{]}~\TERM{.} %% \nlsep \OPTGR{\NT{num}~\KWD{:}}~\NT{subgoal-command}~\TERM{.} ~~~&0 \SEPDEF \DEFNT{subgoal-command} \NT{check-command} \nlsep %\OPT{\TERM{By}}~ \NT{tactic}~\OPT{\KWD{..}} \end{rules} \subsection{Gallina and extensions} \begin{rules} \DEFNT{gallina} \NT{thm-token}~\NT{ident}~\STAR{\NT{binder-let}}~\KWD{:}~\NT{constr} \nlsep \NT{def-token}~\NT{ident}~\NT{def-body} \nlsep \NT{assum-token}~\NT{assum-list} \nlsep \NT{finite-token}~\NT{inductive-definition} ~\STARGR{\KWD{with}~\NT{inductive-definition}} \nlsep \TERM{Fixpoint}~\NT{fix-decl}~\STARGR{\KWD{with}~\NT{fix-decl}} \nlsep \TERM{CoFixpoint}~\NT{fix-decl}~\STARGR{\KWD{with}~\NT{fix-decl}} \nlsep \TERM{Scheme}~\NT{scheme}~\STARGR{\KWD{with}~\NT{scheme}} %% Extension: record \nlsep \NT{record-tok}~\OPT{\TERM{$>$}}~\NT{ident}~\STAR{\NT{binder-let}} ~\KWD{:}~\NT{constr}~\KWD{:=} ~\OPT{\NT{ident}}~\KWD{\{}~\NT{field-list}~\KWD{\}} \nlsep \TERM{Ltac}~\NT{ltac-def}~\STARGR{~\TERM{with}~\NT{ltac-def}} \end{rules} \begin{rules} \DEFNT{thm-token} \TERM{Theorem} ~\mid~ \TERM{Lemma} ~\mid~ \TERM{Fact} ~\mid~ \TERM{Remark} \SEPDEF \DEFNT{def-token} \TERM{Definition} ~\mid~ \TERM{Let} ~\mid~ \OPT{\TERM{Local}}~\TERM{SubClass} \SEPDEF \DEFNT{assum-token} \TERM{Hypothesis} ~\mid~ \TERM{Variable} ~\mid~ \TERM{Axiom} ~\mid~ \TERM{Parameter} \SEPDEF \DEFNT{finite-token} \TERM{Inductive} ~\mid~ \TERM{CoInductive} \SEPDEF \DEFNT{record-tok} \TERM{Record} ~\mid~ \TERM{Structure} \end{rules} \begin{rules} \DEFNT{def-body} \STAR{\NT{binder-let}}~\NT{type-cstr}~\KWD{:=} ~\OPT{\NT{reduce}}~\NT{constr} \nlsep \STAR{\NT{binder-let}}~\KWD{:}~\NT{constr} \SEPDEF \DEFNT{reduce} \TERM{Eval}~\NT{red-expr}~\KWD{in} \SEPDEF \DEFNT{ltac-def} \NT{ident}~\STAR{\NT{name}}~\KWD{:=}~\NT{tactic} \SEPDEF \DEFNT{rec-definition} \NT{fix-decl}~\OPT{\NT{decl-notation}} \SEPDEF \DEFNT{inductive-definition} \OPT{\NT{string}}~\NT{ident}~\STAR{\NT{binder-let}}~\KWD{:} ~\NT{constr}~\KWD{:=} ~\OPT{\TERMbar}~\OPT{\NT{constructor-list}} ~\OPT{\NT{decl-notation}} \SEPDEF \DEFNT{constructor-list} \NT{constructor}~\TERMbar~\NT{constructor-list} \nlsep \NT{constructor} \SEPDEF \DEFNT{constructor} \NT{ident}~\STAR{\NT{binder-let}}\OPTGR{\NT{coerce-kwd}~\NT{constr}} \SEPDEF \DEFNT{decl-notation} \TERM{where}~\NT{string}~\TERM{:=}~\NT{constr} \SEPDEF \DEFNT{field-list} \NT{field}~\KWD{;}~\NT{field-list} \nlsep \NT{field} \SEPDEF \DEFNT{field} \NT{ident}~\OPTGR{\NT{coerce-kwd}~\NT{constr}} \nlsep \NT{ident}~\NT{type-cstr-coe}~\KWD{:=}~\NT{constr} \SEPDEF \DEFNT{assum-list} \PLUS{\GR{\KWD{(}~\NT{simple-assum-coe}~\KWD{)}}} \nlsep \NT{simple-assum-coe} \SEPDEF \DEFNT{simple-assum-coe} \PLUS{\NT{ident}}~\NT{coerce-kwd}~\NT{constr} \SEPDEF \DEFNT{coerce-kwd} \TERM{:$>$} ~\mid~ \KWD{:} \SEPDEF \DEFNT{type-cstr-coe} \OPTGR{\NT{coerce-kwd}~\NT{constr}} \SEPDEF \DEFNT{scheme} \NT{ident}~\KWD{:=}~\NT{dep-scheme}~\KWD{for}~\NT{reference} ~\TERM{Sort}~\NT{sort} \SEPDEF \DEFNT{dep-scheme} \TERM{Induction}~\mid~\TERM{Minimality} \end{rules} \subsection{Modules and sections} \begin{rules} \DEFNT{gallina} \TERM{Module}~\NT{ident}~\STAR{\NT{mbinder}}~\OPT{\NT{of-mod-type}} ~\OPTGR{\KWD{:=}~\NT{mod-expr}} \nlsep \TERM{Module}~\KWD{Type}~\NT{ident}~\STAR{\NT{mbinder}} ~\OPTGR{\KWD{:=}~\NT{mod-type}} \nlsep \TERM{Declare}~\TERM{Module}~\NT{ident}~\STAR{\NT{mbinder}} ~\OPT{\NT{of-mod-type}} ~\OPTGR{\KWD{:=}~\NT{mod-expr}} \nlsep \TERM{Section}~\NT{ident} \nlsep \TERM{Chapter}~\NT{ident} \nlsep \TERM{End}~\NT{ident} %% \nlsep \TERM{Require}~\OPT{\NT{export-token}}~\OPT{\NT{specif-token}} ~\PLUS{\NT{reference}} \nlsep \TERM{Require}~\OPT{\NT{export-token}}~\OPT{\NT{specif-token}} ~\NT{string} \nlsep \TERM{Import}~\PLUS{\NT{reference}} \nlsep \TERM{Export}~\PLUS{\NT{reference}} \SEPDEF \DEFNT{export-token} \TERM{Import} ~\mid~ \TERM{Export} \SEPDEF \DEFNT{specif-token} \TERM{Implementation} ~\mid~ \TERM{Specification} \SEPDEF \DEFNT{mod-expr} \NT{reference} \nlsep \NT{mod-expr}~\NT{mod-expr} & L \nlsep \KWD{(}~\NT{mod-expr}~\KWD{)} \SEPDEF \DEFNT{mod-type} \NT{reference} \nlsep \NT{mod-type}~\KWD{with}~\NT{with-declaration} \SEPDEF \DEFNT{with-declaration} %on forcera les ( ) %si exceptionnellemt %un fixpoint ici \TERM{Definition}~\NT{ident}~\KWD{:=}~\NTL{constr}{} %{100} \nlsep \TERM{Module}~\NT{ident}~\KWD{:=}~\NT{reference} \SEPDEF \DEFNT{of-mod-type} \KWD{:}~\NT{mod-type} \nlsep \TERM{$<$:}~\NT{mod-type} \SEPDEF \DEFNT{mbinder} \KWD{(}~\PLUS{\NT{ident}}~\KWD{:}~\NT{mod-type}~\KWD{)} \end{rules} \begin{rules} \DEFNT{gallina} \TERM{Transparent}~\PLUS{\NT{reference}} \nlsep \TERM{Opaque}~\PLUS{\NT{reference}} \nlsep \TERM{Canonical}~\TERM{Structure}~\NT{reference}~\OPT{\NT{def-body}} \nlsep \TERM{Coercion}~\OPT{\TERM{Local}}~\NT{reference}~\NT{def-body} \nlsep \TERM{Coercion}~\OPT{\TERM{Local}}~\NT{reference}~\KWD{:} ~\NT{class-rawexpr}~\TERM{$>->$}~\NT{class-rawexpr} \nlsep \TERM{Identity}~\TERM{Coercion}~\OPT{\TERM{Local}}~\NT{ident}~\KWD{:} ~\NT{class-rawexpr}~\TERM{$>->$}~\NT{class-rawexpr} \nlsep \TERM{Implicit}~\TERM{Arguments}~\NT{reference}~\TERM{[}~\STAR{\NT{num}}~\TERM{]} \nlsep \TERM{Implicit}~\TERM{Arguments}~\NT{reference} \nlsep \TERM{Implicit}~\KWD{Type}~\PLUS{\NT{ident}}~\KWD{:}~\NT{constr} \SEPDEF \DEFNT{command} \TERM{Comments}~\STAR{\NT{comment}} \nlsep \TERM{Pwd} \nlsep \TERM{Cd}~\OPT{\NT{string}} \nlsep \TERM{Drop} ~\mid~ \TERM{ProtectedLoop} ~\mid~\TERM{Quit} %% \nlsep \TERM{Load}~\OPT{\TERM{Verbose}}~\NT{ident} \nlsep \TERM{Load}~\OPT{\TERM{Verbose}}~\NT{string} \nlsep \TERM{Declare}~\TERM{ML}~\TERM{Module}~\PLUS{\NT{string}} \nlsep \TERM{Locate}~\NT{locatable} \nlsep \TERM{Add}~\OPT{\TERM{Rec}}~\TERM{LoadPath}~\NT{string}~\OPT{\NT{as-dirpath}} \nlsep \TERM{Remove}~\TERM{LoadPath}~\NT{string} \nlsep \TERM{Add}~\OPT{\TERM{Rec}}~\TERM{ML}~\TERM{Path}~\NT{string} %% \nlsep \KWD{Type}~\NT{constr} \nlsep \TERM{Print}~\NT{printable} \nlsep \TERM{Print}~\NT{reference} \nlsep \TERM{Inspect}~\NT{num} \nlsep \TERM{About}~\NT{reference} %% \nlsep \TERM{Search}~\NT{reference}~\OPT{\NT{in-out-modules}} \nlsep \TERM{SearchPattern}~\NT{constr-pattern}~\OPT{\NT{in-out-modules}} \nlsep \TERM{SearchRewrite}~\NT{constr-pattern}~\OPT{\NT{in-out-modules}} \nlsep \TERM{SearchAbout}~\NT{reference}~\OPT{\NT{in-out-modules}} \nlsep \TERM{SearchAbout}~\TERM{[}~\STAR{\NT{ref-or-string}}~\TERM{]}\OPT{\NT{in-out-modules}} \nlsep \KWD{Set}~\NT{ident}~\OPT{\NT{opt-value}} \nlsep \TERM{Unset}~\NT{ident} \nlsep \KWD{Set}~\NT{ident}~\NT{ident}~\OPT{\NT{opt-value}} \nlsep \KWD{Set}~\NT{ident}~\NT{ident}~\PLUS{\NT{opt-ref-value}} \nlsep \TERM{Unset}~\NT{ident}~\NT{ident}~\STAR{\NT{opt-ref-value}} %% \nlsep \TERM{Print}~\TERM{Table}~\NT{ident}~\NT{ident} \nlsep \TERM{Print}~\TERM{Table}~\NT{ident} \nlsep \TERM{Add}~\NT{ident}~\OPT{\NT{ident}}~\PLUS{\NT{opt-ref-value}} %% \nlsep \TERM{Test}~\NT{ident}~\OPT{\NT{ident}}~\STAR{\NT{opt-ref-value}} %% \nlsep \TERM{Remove}~\NT{ident}~\OPT{\NT{ident}}~\PLUS{\NT{opt-ref-value}} \SEPDEF \DEFNT{check-command} \TERM{Eval}~\NT{red-expr}~\KWD{in}~\NT{constr} \nlsep \TERM{Check}~\NT{constr} \SEPDEF \DEFNT{ref-or-string} \NT{reference} \nlsep \NT{string} \end{rules} \begin{rules} \DEFNT{printable} \TERM{Term}~\NT{reference} \nlsep \TERM{All} \nlsep \TERM{Section}~\NT{reference} \nlsep \TERM{Grammar}~\NT{ident} \nlsep \TERM{LoadPath} \nlsep \TERM{Module}~\OPT{\KWD{Type}}~\NT{reference} \nlsep \TERM{Modules} \nlsep \TERM{ML}~\TERM{Path} \nlsep \TERM{ML}~\TERM{Modules} \nlsep \TERM{Graph} \nlsep \TERM{Classes} \nlsep \TERM{Coercions} \nlsep \TERM{Coercion}~\TERM{Paths}~\NT{class-rawexpr}~\NT{class-rawexpr} \nlsep \TERM{Tables} % \nlsep \TERM{Proof}~\NT{reference} % Obsolete, useful in V6.3 ?? \nlsep \TERM{Hint}~\OPT{\NT{reference}} \nlsep \TERM{Hint}~\TERM{*} \nlsep \TERM{HintDb}~\NT{ident} \nlsep \TERM{Scopes} \nlsep \TERM{Scope}~\NT{ident} \nlsep \TERM{Visibility}~\OPT{\NT{ident}} \nlsep \TERM{Implicit}~\NT{reference} \SEPDEF \DEFNT{class-rawexpr} \TERM{Funclass}~\mid~\TERM{Sortclass}~\mid~\NT{reference} \SEPDEF \DEFNT{locatable} \NT{reference} \nlsep \TERM{File}~\NT{string} \nlsep \TERM{Library}~\NT{reference} \nlsep \NT{string} \SEPDEF \DEFNT{opt-value} \NT{ident} ~\mid~ \NT{string} \SEPDEF \DEFNT{opt-ref-value} \NT{reference} ~\mid~ \NT{string} \SEPDEF \DEFNT{as-dirpath} \KWD{as}~\NT{reference} \SEPDEF \DEFNT{in-out-modules} \TERM{inside}~\PLUS{\NT{reference}} \nlsep \TERM{outside}~\PLUS{\NT{reference}} \SEPDEF \DEFNT{comment} \NT{constr} \nlsep \NT{string} \end{rules} \subsection{Other commands} %% TODO: min/maj pas a jour \begin{rules} \EXTNT{command} \TERM{Debug}~\TERM{On} \nlsep \TERM{Debug}~\TERM{Off} %% TODO: vernac \nlsep \TERM{Add}~\TERM{setoid}~\tacconstr~\tacconstr~\tacconstr \nlsep \TERM{Add}~\TERM{morphism}~\tacconstr~\KWD{:}~\NT{ident} \nlsep \TERM{Derive}~\TERM{inversion_clear} ~\OPT{\NT{num}}~\NT{ident}~\NT{ident} \nlsep \TERM{Derive}~\TERM{inversion_clear} ~\NT{ident}~\KWD{with}~\tacconstr~\OPTGR{\TERM{Sort}~\NT{sort}} \nlsep \TERM{Derive}~\TERM{inversion} ~\OPT{\NT{num}}~\NT{ident}~\NT{ident} \nlsep \TERM{Derive}~\TERM{inversion} ~\NT{ident}~\KWD{with}~\tacconstr~\OPTGR{\TERM{Sort}~\NT{sort}} \nlsep \TERM{Derive}~\TERM{dependent}~\TERM{inversion_clear} ~\NT{ident}~\KWD{with}~\tacconstr~\OPTGR{\TERM{Sort}~\NT{sort}} \nlsep \TERM{Derive}~\TERM{dependent}~\TERM{inversion} ~\NT{ident}~\KWD{with}~\tacconstr~\OPTGR{\TERM{Sort}~\NT{sort}} %% Correctness: obsolete ? %\nlsep Correctness %\nlsep Global Variable %% TODO: extraction \nlsep Extraction ... %% field \nlsep \TERM{Add}~\TERM{Field}~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\tacconstr~\OPT{\NT{minus-div}} %% funind \nlsep \TERM{Functional}~\TERM{Scheme}~\NT{ident}~\KWD{:=} ~\TERM{Induction}~\KWD{for}~\tacconstr ~\OPTGR{\KWD{with}~\PLUS{\tacconstr}} %% ring \nlsep \TERM{Add}~\TERM{Ring}~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\tacconstr~\KWD{[}~\PLUS{\tacconstr}~\KWD{]} \nlsep \TERM{Add}~\TERM{Semi}~\TERM{Ring}~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\KWD{[}~\PLUS{\tacconstr}~\KWD{]} \nlsep \TERM{Add}~\TERM{Abstract}~\TERM{Ring}~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\tacconstr \nlsep \TERM{Add}~\TERM{Abstract}~\TERM{Semi}~\TERM{Ring}~\tacconstr ~\tacconstr~\tacconstr~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr \nlsep \TERM{Add}~\TERM{Setoid}~\TERM{Ring}~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\tacconstr~\tacconstr~\tacconstr~\tacconstr~\tacconstr ~\tacconstr~\KWD{[}~\PLUS{\tacconstr}~\KWD{]} \nlsep \TERM{Add}~\TERM{Setoid}~\TERM{Semi}~\TERM{Ring}~\tacconstr~\tacconstr ~\tacconstr~\tacconstr~\tacconstr~\tacconstr \nlcont~~~~\tacconstr~\tacconstr~\tacconstr~\tacconstr~\tacconstr ~\KWD{[}~\PLUS{tacconstr}~\KWD{]} \SEPDEF \DEFNT{minus-div} \KWD{with}~\NT{minus-arg}~\NT{div-arg} \nlsep \KWD{with}~\NT{div-arg}~\NT{minus-arg} \SEPDEF \DEFNT{minus-arg} \TERM{minus}~\KWD{:=}~\tacconstr \SEPDEF \DEFNT{div-arg} \TERM{div}~\KWD{:=}~\tacconstr \end{rules} \begin{rules} \EXTNT{command} \TERM{Write}~\TERM{State}~\NT{ident} \nlsep \TERM{Write}~\TERM{State}~\NT{string} \nlsep \TERM{Restore}~\TERM{State}~\NT{ident} \nlsep \TERM{Restore}~\TERM{State}~\NT{string} \nlsep \TERM{Reset}~\NT{ident} \nlsep \TERM{Reset}~\TERM{Initial} \nlsep \TERM{Back}~\OPT{\NT{num}} \end{rules} \subsection{Proof-editing commands} \begin{rules} \EXTNT{command} \TERM{Goal}~\NT{constr} \nlsep \TERM{Proof}~\OPT{\NT{constr}} \nlsep \TERM{Proof}~\KWD{with}~\NT{tactic} \nlsep \TERM{Abort}~\OPT{\TERM{All}} \nlsep \TERM{Abort}~\NT{ident} \nlsep \TERM{Existential}~\NT{num}~\KWD{:=}~\NT{constr-body} \nlsep \TERM{Qed} \nlsep \TERM{Save}~\OPTGR{\NT{thm-token}~\NT{ident}} \nlsep \TERM{Defined}~\OPT{\NT{ident}} \nlsep \TERM{Suspend} \nlsep \TERM{Resume}~\OPT{\NT{ident}} \nlsep \TERM{Restart} \nlsep \TERM{Undo}~\OPT{\NT{num}} \nlsep \TERM{Focus}~\OPT{\NT{num}} \nlsep \TERM{Unfocus} \nlsep \TERM{Show}~\OPT{\NT{num}} \nlsep \TERM{Show}~\TERM{Implicit}~\TERM{Arguments}~\OPT{\NT{num}} \nlsep \TERM{Show}~\TERM{Node} \nlsep \TERM{Show}~\TERM{Script} \nlsep \TERM{Show}~\TERM{Existentials} \nlsep \TERM{Show}~\TERM{Tree} \nlsep \TERM{Show}~\TERM{Conjecture} \nlsep \TERM{Show}~\TERM{Proof} \nlsep \TERM{Show}~\TERM{Intro} \nlsep \TERM{Show}~\TERM{Intros} %% Correctness: obsolete ? %%\nlsep \TERM{Show}~\TERM{Programs} \nlsep \TERM{Hint}~\OPT{\TERM{Local}}~\NT{hint}~\OPT{\NT{inbases}} %% PrintConstr not documented \end{rules} \begin{rules} \DEFNT{constr-body} \NT{type-cstr}~\KWD{:=}~\NT{constr} \SEPDEF \DEFNT{hint} \TERM{Resolve}~\PLUS{\NTL{constr}{9}} \nlsep \TERM{Immediate}~\PLUS{\NTL{constr}{9}} \nlsep \TERM{Unfold}~\PLUS{\NT{reference}} \nlsep \TERM{Constructors}~\PLUS{\NT{reference}} \nlsep \TERM{Extern}~\NT{num}~\NT{constr}~\KWD{$\Rightarrow$}~\NT{tactic} \nlsep \TERM{Destruct}~\NT{ident}~\KWD{:=}~\NT{num}~\NT{destruct-loc} ~\NT{constr}~\KWD{$\Rightarrow$}~\NT{tactic} \nlsep \TERM{Rewrite}~\NT{orient}~\PLUS{\NTL{constr}{9}} ~\OPTGR{\KWD{using}~\NT{tactic}} \SEPDEF \DEFNT{inbases} \KWD{:}~\PLUS{\NT{ident}} \SEPDEF \DEFNT{destruct-loc} \TERM{Conclusion} \nlsep \OPT{\TERM{Discardable}}~\TERM{Hypothesis} \end{rules} \subsection{Syntax extensions} \begin{rules} \DEFNT{syntax} \TERM{Open}~\TERM{Scope}~\NT{ident} \nlsep \TERM{Close}~\TERM{Scope}~\NT{ident} \nlsep \TERM{Delimit}~\TERM{Scope}~\NT{ident}~\KWD{with}~\NT{ident} \nlsep \TERM{Bind}~\TERM{Scope}~\NT{ident}~\KWD{with}~\PLUS{\NT{class-rawexpr}} \nlsep \TERM{Arguments}~\TERM{Scope}~\NT{reference} ~\TERM{[}~\PLUS{\NT{name}}~\TERM{]} \nlsep \TERM{Infix}~\OPT{\TERM{Local}} %%% ~\NT{prec}~\OPT{\NT{num}} ~\NT{string}~\KWD{:=}~\NT{reference}~\OPT{\NT{modifiers}} ~\OPT{\NT{in-scope}} \nlsep \TERM{Notation}~\OPT{\TERM{Local}}~\NT{string}~\KWD{:=}~\NT{constr} ~\OPT{\NT{modifiers}}~\OPT{\NT{in-scope}} \nlsep \TERM{Notation}~\OPT{\TERM{Local}}~\NT{ident}~\KWD{:=}~\NT{constr} ~\OPT{\KWD{(}\TERM{only~\TERM{parsing}\KWD{)}}} \nlsep \TERM{Reserved}~\TERM{Notation}~\OPT{\TERM{Local}}~\NT{string} ~\OPT{\NT{modifiers}} \nlsep \TERM{Tactic}~\TERM{Notation}~\NT{string}~\STAR{\NT{tac-production}} ~\KWD{:=}~\NT{tactic} \SEPDEF \DEFNT{modifiers} \KWD{(}~\NT{mod-list}~\KWD{)} \SEPDEF \DEFNT{mod-list} \NT{modifier} \nlsep \NT{modifier}~\KWD{,}~\NT{mod-list} \SEPDEF \DEFNT{modifier} \NT{ident}~\KWD{at}~\NT{num} \nlsep \NT{ident}~\STARGR{\KWD{,}~\NT{ident}}~\KWD{at}~\NT{num} \nlsep \KWD{at}~\TERM{next}~\TERM{level} \nlsep \KWD{at}~\TERM{level}~\NT{num} \nlsep \TERM{left}~\TERM{associativity} \nlsep \TERM{right}~\TERM{associativity} \nlsep \TERM{no}~\TERM{associativity} \nlsep \NT{ident}~\NT{syntax-entry} \nlsep \TERM{only}~\TERM{parsing} \nlsep \TERM{format}~\NT{string} \SEPDEF \DEFNT{in-scope} \KWD{:}~\NT{ident} \SEPDEF \DEFNT{syntax-entry} \TERM{ident}~\mid~\TERM{global}~\mid~\TERM{bigint} \SEPDEF \DEFNT{tac-production} \NT{string} \nlsep \NT{ident}~\TERM{(}~\NT{ident}~\TERM{)} %%% \SEPDEF %%% \DEFNT{prec} %%% \TERM{LeftA}~\mid~\TERM{RightA}~\mid~\TERM{NonA} \end{rules} \end{document} coq-8.4pl2/dev/v8-syntax/check-grammar0000750000175000001440000000227111330405500016705 0ustar notinusers#!/bin/sh # This scripts checks that the new grammar of Coq as defined in syntax-v8.tex # is consistent in the sense that all invoked non-terminals are defined defined_nt() { grep "\\DEFNT{.*}" syntax-v8.tex | sed -e "s|.*DEFNT{\([^}]*\)}.*|\1|"|\ sort | sort -u } used_nt() { cat syntax-v8.tex | tr \\\\ \\n | grep "^NT{.*}" |\ sed -e "s|^NT{\([^}]*\)}.*|\1|" | egrep -v ^\#1\|non-terminal | sort -u } used_term() { cat syntax-v8.tex | tr \\\\ \\n | grep "^TERM{.*}" |\ sed -e "s|^TERM{\([^}]*\)}.*|\1|" -e "s|\\$||g" | egrep -v ^\#1\|terminal | sort -u } used_kwd() { cat syntax-v8.tex | tr \\\\ \\n | grep "^KWD{.*}" |\ sed -e "s|^KWD{\([^}]*\)}.*|\1|" -e "s|\\$||g" | egrep -v ^\#1 | sort -u } defined_nt > def used_nt > use used_term > use-t used_kwd > use-k diff def use > df ############################### echo if grep ^\> df > /dev/null 2>&1 ; then echo Undefined non-terminals: echo ======================== echo grep ^\> df | sed -e "s|^> ||" echo fi if grep ^\< df > /dev/null 2>&1 ; then echo Unused non-terminals: echo ===================== echo grep ^\< df | sed -e "s|^< ||" echo fi #echo Used terminals: #echo =============== #echo #cat use-tcoq-8.4pl2/dev/v8-syntax/memo-v8.tex0000640000175000001440000002305110434701777016313 0ustar notinusers \documentclass{article} \usepackage{verbatim} \usepackage{amsmath} \usepackage{amssymb} \usepackage{array} \usepackage{fullpage} \author{B.~Barras} \title{An introduction to syntax of Coq V8} %% Le _ est un caractère normal \catcode`\_=13 \let\subscr=_ \def_{\ifmmode\sb\else\subscr\fi} \def\NT#1{\langle\textit{#1}\rangle} \def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} \def\TERM#1{\textsf{\bf #1}} \newenvironment{transbox} {\begin{center}\tt\begin{tabular}{l|ll} \hfil\textrm{V7} & \hfil\textrm{V8} \\ \hline} {\end{tabular}\end{center}} \def\TRANS#1#2 {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & \begin{tabular}[t]{@{}l@{}}#2\end{tabular} \\} \def\TRANSCOM#1#2#3 {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & \begin{tabular}[t]{@{}l@{}}#2\end{tabular} & #3 \\} \begin{document} \maketitle The goal of this document is to introduce by example to the new syntax of Coq. It is strongly recommended to read first the definition of the new syntax, but this document should also be useful for the eager user who wants to start with the new syntax quickly. \section{Changes in lexical conventions w.r.t. V7} \subsection{Identifiers} The lexical conventions changed: \TERM{_} is not a regular identifier anymore. It is used in terms as a placeholder for subterms to be inferred at type-checking, and in patterns as a non-binding variable. Furthermore, only letters (unicode letters), digits, single quotes and _ are allowed after the first character. \subsection{Quoted string} Quoted strings are used typically to give a filename (which may not be a regular identifier). As before they are written between double quotes ("). Unlike for V7, there is no escape character: characters are written normaly but the double quote which is doubled. \section{Main changes in terms w.r.t. V7} \subsection{Precedence of application} In the new syntax, parentheses are not really part of the syntax of application. The precedence of application (10) is tighter than all prefix and infix notations. It makes it possible to remove parentheses in many contexts. \begin{transbox} \TRANS{(A x)->(f x)=(g y)}{A x -> f x = g y} \TRANS{(f [x]x)}{f (fun x => x)} \end{transbox} \subsection{Arithmetics and scopes} The specialized notation for \TERM{Z} and \TERM{R} (introduced by symbols \TERM{`} and \TERM{``}) have disappeared. They have been replaced by the general notion of scope. \begin{center} \begin{tabular}{l|l|l} type & scope name & delimiter \\ \hline types & type_scope & \TERM{T} \\ \TERM{bool} & bool_scope & \\ \TERM{nat} & nat_scope & \TERM{nat} \\ \TERM{Z} & Z_scope & \TERM{Z} \\ \TERM{R} & R_scope & \TERM{R} \\ \TERM{positive} & positive_scope & \TERM{P} \end{tabular} \end{center} In order to use notations of arithmetics on \TERM{Z}, its scope must be opened with command \verb+Open Scope Z_scope.+ Another possibility is using the scope change notation (\TERM{\%}). The latter notation is to be used when notations of several scopes appear in the same expression. In examples below, scope changes are not needed if the appropriate scope has been opened. Scope nat_scope is opened in the initial state of Coq. \begin{transbox} \TRANSCOM{`0+x=x+0`}{0+x=x+0}{\textrm{Z_scope}} \TRANSCOM{``0 + [if b then ``1`` else ``2``]``}{0 + if b then 1 else 2}{\textrm{R_scope}} \TRANSCOM{(0)}{0}{\textrm{nat_scope}} \end{transbox} Below is a table that tells which notation is available in which scope. The relative precedences and associativity of operators is the same as in usual mathematics. See the reference manual for more details. However, it is important to remember that unlike V7, the type operators for product and sum are left associative, in order not to clash with arithmetic operators. \begin{center} \begin{tabular}{l|l} scope & notations \\ \hline nat_scope & $+ ~- ~* ~< ~\leq ~> ~\geq$ \\ Z_scope & $+ ~- ~* ~/ ~\TERM{mod} ~< ~\leq ~> ~\geq ~?=$ \\ R_scope & $+ ~- ~* ~/ ~< ~\leq ~> ~\geq$ \\ type_scope & $* ~+$ \\ bool_scope & $\TERM{\&\&} ~\TERM{$||$} ~\TERM{-}$ \\ list_scope & $\TERM{::} ~\TERM{++}$ \end{tabular} \end{center} (Note: $\leq$ is written \TERM{$<=$}) \subsection{Notation for implicit arguments} The explicitation of arguments is closer to the \emph{bindings} notation in tactics. Argument positions follow the argument names of the head constant. \begin{transbox} \TRANS{f 1!t1 2!t2}{f (x:=t1) (y:=t2)} \TRANS{!f t1 t2}{@f t1 t2} \end{transbox} \subsection{Universal quantification} The universal quantification and dependent product types are now materialized with the \TERM{forall} keyword before the binders and a comma after the binders. The syntax of binders also changed significantly. A binder can simply be a name when its type can be inferred. In other cases, the name and the type of the variable are put between parentheses. When several consecutive variables have the same type, they can be grouped. Finally, if all variables have the same type parentheses can be omitted. \begin{transbox} \TRANS{(x:A)B}{forall (x:~A), B ~~\textrm{or}~~ forall x:~A, B} \TRANS{(x,y:nat)P}{forall (x y :~nat), P ~~\textrm{or}~~ forall x y :~nat, P} \TRANS{(x,y:nat;z:A)P}{forall (x y :~nat) (z:A), P} \TRANS{(x,y,z,t:?)P}{forall x y z t, P} \TRANS{(x,y:nat;z:?)P}{forall (x y :~nat) z, P} \end{transbox} \subsection{Abstraction} The notation for $\lambda$-abstraction follows that of universal quantification. The binders are surrounded by keyword \TERM{fun} and $\Rightarrow$ (\verb+=>+ in ascii). \begin{transbox} \TRANS{[x,y:nat; z](f a b c)}{fun (x y:nat) z => f a b c} \end{transbox} \subsection{Pattern-matching} Beside the usage of the keyword pair \TERM{match}/\TERM{with} instead of \TERM{Cases}/\TERM{of}, the main change is the notation for the type of branches and return type. It is no longer written between \TERM{$<$ $>$} before the \TERM{Cases} keyword, but interleaved with the destructured objects. The idea is that for each destructured object, one may specify a variable name to tell how the branches types depend on this destructured objects (case of a dependent elimination), and also how they depend on the value of the arguments of the inductive type of the destructured objects. The type of branches is then given after the keyword \TERM{return}, unless it can be inferred. Moreover, when the destructured object is a variable, one may use this variable in the return type. \begin{transbox} \TRANS{Cases n of\\~~ O => O \\| (S k) => (1) end}{match n with\\~~ 0 => 0 \\| (S k) => 1 end} \TRANS{Cases m n of \\~~0 0 => t \\| ... end}{match m, n with \\~~0, 0 => t \\| .. end} \TRANS{<[n:nat](P n)>Cases T of ... end}{match T as n return P n with ... end} \TRANS{<[n:nat][p:(even n)]\~{}(odd n)>Cases p of\\~~ ... \\end}{match p in even n return \~{} odd n with\\~~ ...\\end} \end{transbox} \subsection{Fixpoints and cofixpoints} An easier syntax for non-mutual fixpoints is provided, making it very close to the usual notation for non-recursive functions. The decreasing argument is now indicated by an annotation between curly braces, regardless of the binders grouping. The annotation can be omitted if the binders introduce only one variable. The type of the result can be omitted if inferable. \begin{transbox} \TRANS{Fix plus\{plus [n:nat] : nat -> nat :=\\~~ [m]...\}}{fix plus (n m:nat) \{struct n\}: nat := ...} \TRANS{Fix fact\{fact [n:nat]: nat :=\\ ~~Cases n of\\~~~~ O => (1) \\~~| (S k) => (mult n (fact k)) end\}}{fix fact (n:nat) :=\\ ~~match n with \\~~~~0 => 1 \\~~| (S k) => n * fact k end} \end{transbox} There is a syntactic sugar for mutual fixpoints associated to a local definition: \begin{transbox} \TRANS{let f := Fix f \{f [x:A] : T := M\} in\\(g (f y))}{let fix f (x:A) : T := M in\\g (f x)} \end{transbox} The same applies to cofixpoints, annotations are not allowed in that case. \subsection{Notation for type cast} \begin{transbox} \TRANS{O :: nat}{0 : nat} \end{transbox} \section{Main changes in tactics w.r.t. V7} The main change is that all tactic names are lowercase. This also holds for Ltac keywords. \subsection{Ltac} Definitions of macros are introduced by \TERM{Ltac} instead of \TERM{Tactic Definition}, \TERM{Meta Definition} or \TERM{Recursive Definition}. Rules of a match command are not between square brackets anymore. Context (understand a term with a placeholder) instantiation \TERM{inst} became \TERM{context}. Syntax is unified with subterm matching. \begin{transbox} \TRANS{match t with [C[x=y]] => inst C[y=x]}{match t with context C[x=y] => context C[y=x]} \end{transbox} \subsection{Named arguments of theorems} \begin{transbox} \TRANS{Apply thm with x:=t 1:=u}{apply thm with (x:=t) (1:=u)} \end{transbox} \subsection{Occurrences} To avoid ambiguity between a numeric literal and the optionnal occurence numbers of this term, the occurence numbers are put after the term itself. This applies to tactic \TERM{pattern} and also \TERM{unfold} \begin{transbox} \TRANS{Pattern 1 2 (f x) 3 4 d y z}{pattern (f x at 1 2) (d at 3 4) y z} \end{transbox} \section{Main changes in vernacular commands w.r.t. V7} \subsection{Binders} The binders of vernacular commands changed in the same way as those of fixpoints. This also holds for parameters of inductive definitions. \begin{transbox} \TRANS{Definition x [a:A] : T := M}{Definition x (a:A) : T := M} \TRANS{Inductive and [A,B:Prop]: Prop := \\~~conj : A->B->(and A B)}% {Inductive and (A B:Prop): Prop := \\~~conj : A -> B -> and A B} \end{transbox} \subsection{Hints} The syntax of \emph{extern} hints changed: the pattern and the tactic to be applied are separated by a \TERM{$\Rightarrow$}. \begin{transbox} \TRANS{Hint Extern 4 (toto ?) Apply lemma}{Hint Extern 4 (toto _) => apply lemma} \end{transbox} \end{document} coq-8.4pl2/dev/ocamldoc/0000750000175000001440000000000012127276534014176 5ustar notinuserscoq-8.4pl2/dev/ocamldoc/docintro0000640000175000001440000000302011504715005015723 0ustar notinusers{!indexlist} This is Coq, a proof assistant for the Calculus of Inductive Constructions. This document describes the implementation of Coq. It has been automatically generated from the source of Coq using {{:http://caml.inria.fr/}ocamldoc}. The source files are organized in several directories ordered like that: {ol {- Utility libraries : lib describes the various utility libraries used in the code of Coq.} {- Kernel : kernel describes the Coq kernel, which is a type checker for the Calculus of Inductive Constructions.} {- Library : library describes the Coq library, which is made of two parts: - a general mechanism to keep a trace of all operations and of the state of the system, with backtrack capabilities; - a global environment for the CCI, with functions to export and import compiled modules. } {- Pretyping : pretyping } {- Front abstract syntax of terms : interp describes the translation from Coq context-dependent front abstract syntax of terms {v constr_expr v} to and from the context-free, untyped, globalized form of constructions {v glob_constr v}.} {- Parsers and printers : parsing describes the implementation of the Coq parsers and printers.} {- Proof engine : proofs describes the Coq proof engine, which is also called the ``refiner'', since it provides a way to build terms by successive refining steps. Those steps are either primitive rules or higher-level tactics.} {- Tacticts : tactics describes the Coq main tactics.} {- Toplevel : toplevel describes the highest modules of the Coq system.} } coq-8.4pl2/dev/ocamldoc/html/0000750000175000001440000000000012127276534015142 5ustar notinuserscoq-8.4pl2/dev/ocamldoc/html/style.css0000640000175000001440000000625011366332340017010 0ustar notinusersa:visited { color: #416DFF; text-decoration: none; } a:link { color: #416DFF; text-decoration: none; } a:hover { color: Red; text-decoration: none; background-color: #5FFF88 } a:active { color: Red; text-decoration: underline; } .keyword { font-weight: bold; color: Red } .keywordsign { color: #C04600 } .superscript { font-size: 8 } .subscript { font-size: 8 } .comment { color: Green } .constructor { color: Blue } .type { color: #5C6585 } .string { color: Maroon } .warning { color: Red; font-weight: bold } .info { margin-left: 3em; margin-right: 3em } .param_info { margin-top: 4px; margin-left: 3em; margin-right: 3em } .code { color: #465F91; } h1 { font-size: 20pt; text-align: center; } h5, h6, div.h7, div.h8, div.h9 { font-size: 20pt; border: 1px solid #000000; margin-top: 5px; margin-bottom: 2px; text-align: center; padding: 2px; } h5 { background-color: #90FDFF; } h6 { background-color: #016699; color: white; } div.h7 { background-color: #E0FFFF; } div.h8 { background-color: #F0FFFF; } div.h9 { background-color: #FFFFFF; } .typetable, .indextable, .paramstable { border-style: hidden; } .paramstable { padding: 5pt 5pt; } body { background-color: white; } tr { background-color: white; } td.typefieldcomment { background-color: #FFFFFF; font-size: smaller; } pre { margin-bottom: 4px; } div.sig_block { margin-left: 2em; } h2 { font-family: Arial, Helvetica, sans-serif; font-size: 16pt; font-weight: normal; border-bottom: 1px solid #dadada; border-top: 1px solid #dadada; color: #101010; background: #eeeeff; margin: 25px 0px 10px 0px; padding: 1px 1px 1px 1px; } h3 { font-family: Arial, Helvetica, sans-serif; font-size: 12pt; color: #016699; font-weight: bold; padding: 15px 0 0 0ex; margin: 5px 0 0 0; } h4 { font-family: Arial, Helvetica, sans-serif; font-size: 10pt; color: #016699; padding: 15px 0 0 0ex; margin: 5px 0 0 0; } /* Here starts the overwrite of default rules to give a better look */ body { font-family: Calibri, Georgia, Garamond, Baskerville, serif; font-size: 12pt; background-color: white; } a:link, a { color: #6895c3 !important; } a:hover { color: #2F4459 !important; background-color: white; } hr { height: 1px; color: #016699; background-color: #016699; border-width: 0; } h1, h1 a:link, h1 a:visited, h1 a { font-family: Cambria, Georgia, Garamond, Baskerville, serif; color: #016699; } .navbar { float: left; } .navbar a, .navbar a:link, .navbar a:visited { color: #016699; font-family: Arial, Helvetica, sans-serif; font-weight: bold; font-size: 80%; } .keyword { color: #c13939; } .constructor { color: #3c8f7e; } pre, code { font-family: "DejaVu Sans Mono", "Bitstream Vera Mono", "Courrier New", monospace; white-space: normal; font-size: 9pt; font-weight: bold; } .type br { display: none; } .info { margin-left: 1em; font-size: 12pt; } coq-8.4pl2/dev/db_printers.ml0000640000175000001440000000124612010532755015255 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raise Not_found) (* std_ppcmds *) let pppp x = pp x (* name printers *) let ppid id = pp (pr_id id) let pplab l = pp (pr_lab l) let ppmbid mbid = pp (str (debug_string_of_mbid mbid)) let ppdir dir = pp (pr_dirpath dir) let ppmp mp = pp(str (string_of_mp mp)) let ppcon con = pp(debug_pr_con con) let ppkn kn = pp(pr_kn kn) let ppmind kn = pp(debug_pr_mind kn) let ppsp sp = pp(pr_path sp) let ppqualid qid = pp(pr_qualid qid) let ppclindex cl = pp(Classops.pr_cl_index cl) (* term printers *) let rawdebug = ref false let ppconstr x = pp (Termops.print_constr x) let ppconstrdb x = pp(Flags.with_option rawdebug Termops.print_constr x) let ppterm = ppconstr let ppsconstr x = ppconstr (Declarations.force x) let ppconstr_univ x = Constrextern.with_universes ppconstr x let ppglob_constr = (fun x -> pp(pr_lglob_constr x)) let pppattern = (fun x -> pp(pr_constr_pattern x)) let pptype = (fun x -> try pp(pr_ltype x) with e -> pp (str (Printexc.to_string e))) let ppfconstr c = ppconstr (Closure.term_of_fconstr c) let ppbigint n = pp (str (Bigint.to_string n));; let prset pr l = str "[" ++ hov 0 (prlist_with_sep spc pr l) ++ str "]" let ppintset l = pp (prset int (Intset.elements l)) let ppidset l = pp (prset pr_id (Idset.elements l)) let prset' pr l = str "[" ++ hov 0 (prlist_with_sep pr_comma pr l) ++ str "]" let ppidmap pr l = let pr (id,b) = pr_id id ++ str "=>" ++ pr id b in pp (prset' pr (Idmap.fold (fun a b l -> (a,b)::l) l [])) let ppevarsubst = ppidmap (fun id0 -> prset (fun (c,copt,id) -> hov 0 (Termops.print_constr c ++ (match copt with None -> mt () | Some c -> spc () ++ str "") ++ (if id = id0 then mt () else spc () ++ str "")))) let pP s = pp (hov 0 s) let safe_pr_global = function | ConstRef kn -> pp (str "CONSTREF(" ++ debug_pr_con kn ++ str ")") | IndRef (kn,i) -> pp (str "INDREF(" ++ debug_pr_mind kn ++ str "," ++ int i ++ str ")") | ConstructRef ((kn,i),j) -> pp (str "INDREF(" ++ debug_pr_mind kn ++ str "," ++ int i ++ str "," ++ int j ++ str ")") | VarRef id -> pp (str "VARREF(" ++ pr_id id ++ str ")") let ppglobal x = try pp(pr_global x) with _ -> safe_pr_global x let ppconst (sp,j) = pp (str"#" ++ pr_kn sp ++ str"=" ++ pr_lconstr j.uj_val) let ppvar ((id,a)) = pp (str"#" ++ pr_id id ++ str":" ++ pr_lconstr a) let genppj f j = let (c,t) = f j in (c ++ str " : " ++ t) let ppj j = pp (genppj pr_ljudge j) let prsubst s = pp (Mod_subst.debug_pr_subst s) let prdelta s = pp (Mod_subst.debug_pr_delta s) let pp_idpred s = pp (pr_idpred s) let pp_cpred s = pp (pr_cpred s) let pp_transparent_state s = pp (pr_transparent_state s) (* proof printers *) let ppmetas metas = pp(pr_metaset metas) let ppevm evd = pp(pr_evar_map (Some 2) evd) let ppevmall evd = pp(pr_evar_map None evd) let pr_existentialset evars = prlist_with_sep spc pr_meta (ExistentialSet.elements evars) let ppexistentialset evars = pp (pr_existentialset evars) let ppclenv clenv = pp(pr_clenv clenv) let ppgoalgoal gl = pp(Goal.pr_goal gl) let ppgoal g = pp(Printer.pr_goal g) (* spiwack: deactivated until a replacement is found let pppftreestate p = pp(print_pftreestate p) *) (* let ppgoal g = pp(db_pr_goal g) *) (* let pr_gls gls = *) (* hov 0 (pr_evar_defs (sig_sig gls) ++ fnl () ++ db_pr_goal (sig_it gls)) *) (* let pr_glls glls = *) (* hov 0 (pr_evar_defs (sig_sig glls) ++ fnl () ++ *) (* prlist_with_sep pr_fnl db_pr_goal (sig_it glls)) *) (* let ppsigmagoal g = pp(pr_goal (sig_it g)) *) (* let prgls gls = pp(pr_gls gls) *) (* let prglls glls = pp(pr_glls glls) *) (* let pproof p = pp(print_proof Evd.empty empty_named_context p) *) let ppuni u = pp(pr_uni u) let ppuniverses u = pp (str"[" ++ pr_universes u ++ str"]") let ppconstraints c = pp (pr_constraints c) let ppenv e = pp (str "[" ++ pr_named_context_of e ++ str "]" ++ spc() ++ str "[" ++ pr_rel_context e (rel_context e) ++ str "]") let pptac = (fun x -> pp(Pptactic.pr_glob_tactic (Global.env()) x)) let ppinsts c = pp (pr_instance_gmap c) let ppobj obj = Format.print_string (Libobject.object_tag obj) let cnt = ref 0 let cast_kind_display k = match k with | VMcast -> "VMcast" | DEFAULTcast -> "DEFAULTcast" | REVERTcast -> "REVERTcast" let constr_display csr = let rec term_display c = match kind_of_term c with | Rel n -> "Rel("^(string_of_int n)^")" | Meta n -> "Meta("^(string_of_int n)^")" | Var id -> "Var("^(string_of_id id)^")" | Sort s -> "Sort("^(sort_display s)^")" | Cast (c,k, t) -> "Cast("^(term_display c)^","^(cast_kind_display k)^","^(term_display t)^")" | Prod (na,t,c) -> "Prod("^(name_display na)^","^(term_display t)^","^(term_display c)^")\n" | Lambda (na,t,c) -> "Lambda("^(name_display na)^","^(term_display t)^","^(term_display c)^")\n" | LetIn (na,b,t,c) -> "LetIn("^(name_display na)^","^(term_display b)^"," ^(term_display t)^","^(term_display c)^")" | App (c,l) -> "App("^(term_display c)^","^(array_display l)^")\n" | Evar (e,l) -> "Evar("^(string_of_int e)^","^(array_display l)^")" | Const c -> "Const("^(string_of_con c)^")" | Ind (sp,i) -> "MutInd("^(string_of_mind sp)^","^(string_of_int i)^")" | Construct ((sp,i),j) -> "MutConstruct(("^(string_of_mind sp)^","^(string_of_int i)^")," ^(string_of_int j)^")" | Case (ci,p,c,bl) -> "MutCase(,"^(term_display p)^","^(term_display c)^"," ^(array_display bl)^")" | Fix ((t,i),(lna,tl,bl)) -> "Fix(([|"^(Array.fold_right (fun x i -> (string_of_int x)^(if not(i="") then (";"^i) else "")) t "")^"|],"^(string_of_int i)^")," ^(array_display tl)^",[|" ^(Array.fold_right (fun x i -> (name_display x)^(if not(i="") then (";"^i) else "")) lna "")^"|]," ^(array_display bl)^")" | CoFix(i,(lna,tl,bl)) -> "CoFix("^(string_of_int i)^")," ^(array_display tl)^"," ^(Array.fold_right (fun x i -> (name_display x)^(if not(i="") then (";"^i) else "")) lna "")^"," ^(array_display bl)^")" and array_display v = "[|"^ (Array.fold_right (fun x i -> (term_display x)^(if not(i="") then (";"^i) else "")) v "")^"|]" and sort_display = function | Prop(Pos) -> "Prop(Pos)" | Prop(Null) -> "Prop(Null)" | Type u -> incr cnt; pp (str "with " ++ int !cnt ++ pr_uni u ++ fnl ()); "Type("^(string_of_int !cnt)^")" and name_display = function | Name id -> "Name("^(string_of_id id)^")" | Anonymous -> "Anonymous" in msg (str (term_display csr) ++fnl ()) open Format;; let print_pure_constr csr = let rec term_display c = match kind_of_term c with | Rel n -> print_string "#"; print_int n | Meta n -> print_string "Meta("; print_int n; print_string ")" | Var id -> print_string (string_of_id id) | Sort s -> sort_display s | Cast (c,_, t) -> open_hovbox 1; print_string "("; (term_display c); print_cut(); print_string "::"; (term_display t); print_string ")"; close_box() | Prod (Name(id),t,c) -> open_hovbox 1; print_string"("; print_string (string_of_id id); print_string ":"; box_display t; print_string ")"; print_cut(); box_display c; close_box() | Prod (Anonymous,t,c) -> print_string"("; box_display t; print_cut(); print_string "->"; box_display c; print_string ")"; | Lambda (na,t,c) -> print_string "["; name_display na; print_string ":"; box_display t; print_string "]"; print_cut(); box_display c; | LetIn (na,b,t,c) -> print_string "["; name_display na; print_string "="; box_display b; print_cut(); print_string ":"; box_display t; print_string "]"; print_cut(); box_display c; | App (c,l) -> print_string "("; box_display c; Array.iter (fun x -> print_space (); box_display x) l; print_string ")" | Evar (e,l) -> print_string "Evar#"; print_int e; print_string "{"; Array.iter (fun x -> print_space (); box_display x) l; print_string"}" | Const c -> print_string "Cons("; sp_con_display c; print_string ")" | Ind (sp,i) -> print_string "Ind("; sp_display sp; print_string ","; print_int i; print_string ")" | Construct ((sp,i),j) -> print_string "Constr("; sp_display sp; print_string ","; print_int i; print_string ","; print_int j; print_string ")" | Case (ci,p,c,bl) -> open_vbox 0; print_string "<"; box_display p; print_string ">"; print_cut(); print_string "Case"; print_space(); box_display c; print_space (); print_string "of"; open_vbox 0; Array.iter (fun x -> print_cut(); box_display x) bl; close_box(); print_cut(); print_string "end"; close_box() | Fix ((t,i),(lna,tl,bl)) -> print_string "Fix("; print_int i; print_string ")"; print_cut(); open_vbox 0; let rec print_fix () = for k = 0 to (Array.length tl) - 1 do open_vbox 0; name_display lna.(k); print_string "/"; print_int t.(k); print_cut(); print_string ":"; box_display tl.(k) ; print_cut(); print_string ":="; box_display bl.(k); close_box (); print_cut() done in print_string"{"; print_fix(); print_string"}" | CoFix(i,(lna,tl,bl)) -> print_string "CoFix("; print_int i; print_string ")"; print_cut(); open_vbox 0; let rec print_fix () = for k = 0 to (Array.length tl) - 1 do open_vbox 1; name_display lna.(k); print_cut(); print_string ":"; box_display tl.(k) ; print_cut(); print_string ":="; box_display bl.(k); close_box (); print_cut(); done in print_string"{"; print_fix (); print_string"}" and box_display c = open_hovbox 1; term_display c; close_box() and sort_display = function | Prop(Pos) -> print_string "Set" | Prop(Null) -> print_string "Prop" | Type u -> open_hbox(); print_string "Type("; pp (pr_uni u); print_string ")"; close_box() and name_display = function | Name id -> print_string (string_of_id id) | Anonymous -> print_string "_" (* Remove the top names for library and Scratch to avoid long names *) and sp_display sp = (* let dir,l = decode_kn sp in let ls = match List.rev (List.map string_of_id (repr_dirpath dir)) with ("Top"::l)-> l | ("Coq"::_::l) -> l | l -> l in List.iter (fun x -> print_string x; print_string ".") ls;*) print_string (debug_string_of_mind sp) and sp_con_display sp = (* let dir,l = decode_kn sp in let ls = match List.rev (List.map string_of_id (repr_dirpath dir)) with ("Top"::l)-> l | ("Coq"::_::l) -> l | l -> l in List.iter (fun x -> print_string x; print_string ".") ls;*) print_string (debug_string_of_con sp) in try box_display csr; print_flush() with e -> print_string (Printexc.to_string e);print_flush (); raise e let ppfconstr c = ppconstr (Closure.term_of_fconstr c) let pploc x = let (l,r) = unloc x in print_string"(";print_int l;print_string",";print_int r;print_string")" (* extendable tactic arguments *) let rec pr_argument_type = function (* Basic types *) | BoolArgType -> str"bool" | IntArgType -> str"int" | IntOrVarArgType -> str"int-or-var" | StringArgType -> str"string" | PreIdentArgType -> str"pre-ident" | IntroPatternArgType -> str"intro-pattern" | IdentArgType true -> str"ident" | IdentArgType false -> str"pattern_ident" | VarArgType -> str"var" | RefArgType -> str"ref" (* Specific types *) | SortArgType -> str"sort" | ConstrArgType -> str"constr" | ConstrMayEvalArgType -> str"constr-may-eval" | QuantHypArgType -> str"qhyp" | OpenConstrArgType _ -> str"open-constr" | ConstrWithBindingsArgType -> str"constr-with-bindings" | BindingsArgType -> str"bindings" | RedExprArgType -> str"redexp" | List0ArgType t -> pr_argument_type t ++ str" list0" | List1ArgType t -> pr_argument_type t ++ str" list1" | OptArgType t -> pr_argument_type t ++ str" opt" | PairArgType (t1,t2) -> str"("++ pr_argument_type t1 ++ str"*" ++ pr_argument_type t2 ++str")" | ExtraArgType s -> str"\"" ++ str s ++ str "\"" let pp_argument_type t = pp (pr_argument_type t) let pp_generic_argument arg = pp(str"") (**********************************************************************) (* Vernac-level debugging commands *) let in_current_context f c = let (evmap,sign) = try Pfedit.get_current_goal_context () with e when Logic.catchable_exception e -> (Evd.empty, Global.env()) in f (Constrintern.interp_constr evmap sign c) (* We expand the result of preprocessing to be independent of camlp4 VERNAC COMMAND EXTEND PrintPureConstr | [ "PrintPureConstr" constr(c) ] -> [ in_current_context print_pure_constr c ] END VERNAC COMMAND EXTEND PrintConstr [ "PrintConstr" constr(c) ] -> [ in_current_context constr_display c ] END *) open Pcoq open Genarg open Egrammar let _ = try Vernacinterp.vinterp_add "PrintConstr" (function [c] when genarg_tag c = ConstrArgType && true -> let c = out_gen rawwit_constr c in (fun () -> in_current_context constr_display c) | _ -> failwith "Vernac extension: cannot occur") with e -> Pp.pp (Errors.print e) let _ = extend_vernac_command_grammar "PrintConstr" None [[GramTerminal "PrintConstr"; GramNonTerminal (dummy_loc,ConstrArgType,Aentry ("constr","constr"), Some (Names.id_of_string "c"))]] let _ = try Vernacinterp.vinterp_add "PrintPureConstr" (function [c] when genarg_tag c = ConstrArgType && true -> let c = out_gen rawwit_constr c in (fun () -> in_current_context print_pure_constr c) | _ -> failwith "Vernac extension: cannot occur") with e -> Pp.pp (Errors.print e) let _ = extend_vernac_command_grammar "PrintPureConstr" None [[GramTerminal "PrintPureConstr"; GramNonTerminal (dummy_loc,ConstrArgType,Aentry ("constr","constr"), Some (Names.id_of_string "c"))]] (* Setting printer of unbound global reference *) open Names open Nameops open Libnames let encode_path loc prefix mpdir suffix id = let dir = match mpdir with | None -> [] | Some (mp,dir) -> (repr_dirpath (dirpath_of_string (string_of_mp mp))@ repr_dirpath dir) in Qualid (loc, make_qualid (make_dirpath (List.rev (id_of_string prefix::dir@suffix))) id) let raw_string_of_ref loc _ = function | ConstRef cst -> let (mp,dir,id) = repr_con cst in encode_path loc "CST" (Some (mp,dir)) [] (id_of_label id) | IndRef (kn,i) -> let (mp,dir,id) = repr_mind kn in encode_path loc "IND" (Some (mp,dir)) [id_of_label id] (id_of_string ("_"^string_of_int i)) | ConstructRef ((kn,i),j) -> let (mp,dir,id) = repr_mind kn in encode_path loc "CSTR" (Some (mp,dir)) [id_of_label id;id_of_string ("_"^string_of_int i)] (id_of_string ("_"^string_of_int j)) | VarRef id -> encode_path loc "SECVAR" None [] id let short_string_of_ref loc _ = function | VarRef id -> Ident (loc,id) | ConstRef cst -> Ident (loc,id_of_label (pi3 (repr_con cst))) | IndRef (kn,0) -> Ident (loc,id_of_label (pi3 (repr_mind kn))) | IndRef (kn,i) -> encode_path loc "IND" None [id_of_label (pi3 (repr_mind kn))] (id_of_string ("_"^string_of_int i)) | ConstructRef ((kn,i),j) -> encode_path loc "CSTR" None [id_of_label (pi3 (repr_mind kn));id_of_string ("_"^string_of_int i)] (id_of_string ("_"^string_of_int j)) (* Anticipate that printers can be used from ocamldebug and that pretty-printer should not make calls to the global env since ocamldebug runs in a different process and does not have the proper env at hand *) let _ = Constrextern.in_debugger := true let _ = Constrextern.set_extern_reference (if !rawdebug then raw_string_of_ref else short_string_of_ref) coq-8.4pl2/dev/tools/0000750000175000001440000000000012127276535013556 5ustar notinuserscoq-8.4pl2/dev/tools/Makefile.common0000640000175000001440000000000010507222450016457 0ustar notinuserscoq-8.4pl2/dev/tools/Makefile.subdir0000640000175000001440000000040010507222450016463 0ustar notinusers# if you work in a sub/sub-rectory of Coq # you should make a link to that makefile # ln -s ../../dev/tools/Makefile.subdir Makefile # in order to have all the facilities of dev/tools/Makefile.dir TOPDIR=../.. include $(TOPDIR)/dev/tools/Makefile.dir coq-8.4pl2/dev/tools/objects.el0000640000175000001440000000757010434701777015543 0ustar notinusers(defun add-survive-module nil (interactive) (query-replace-regexp " \\([ ]*\\)\\(Summary\.\\)?survive_section" " \\1\\2survive_module = false; \\1\\2survive_section") ) (global-set-key [f2] 'add-survive-module) ; functions to change old style object declaration to new style (defun repl-open nil (interactive) (query-replace-regexp "open_function\\([ ]*\\)=\\([ ]*\\)cache_\\([a-zA-Z0-9'_]*\\)\\( *\\);" "open_function\\1=\\2(fun i o -> if i=1 then cache_\\3 o)\\4;") ) (global-set-key [f6] 'repl-open) (defun repl-load nil (interactive) (query-replace-regexp "load_function\\([ ]*\\)=\\([ ]*\\)cache_\\([a-zA-Z0-9'_]*\\)\\( *\\);" "load_function\\1=\\2(fun _ -> cache_\\3)\\4;") ) (global-set-key [f7] 'repl-load) (defun repl-decl nil (interactive) (query-replace-regexp "\\(Libobject\.\\)?declare_object[ ]*([ ]*\\(.*\\)[ ]*,[ ]* \\([ ]*\\){\\([ ]*\\)\\([^ ][^}]*\\)}[ ]*)" "\\1declare_object {(\\1default_object \\2) with \\3 \\4\\5}") ; "|$1=\\1|$2=\\2|$3=\\3|$4=\\4|") ) (global-set-key [f9] 'repl-decl) ; eval the above and try f9 f6 f7 on the following: let (inThing,outThing) = declare_object ("THING", { load_function = cache_thing; cache_function = cache_thing; open_function = cache_thing; export_function = (function x -> Some x) }) ;%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ;%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ; functions helping writing non-copying substitutions (defun make-subst (name) (interactive "s") (defun f (l) (save-excursion (query-replace-regexp (concat "\\([a-zA-z_0-9]*\\)[ ]*:[ ]*" (car l) "\\([ ]*;\\|[ ]*\}\\)") (concat "let \\1\' = " (cdr l) " " name "\\1 in")) ) ) (mapcar 'f '(("constr"."subst_mps subst") ("Coqast.t"."subst_ast subst") ("Coqast.t list"."list_smartmap (subst_ast subst)") ("'pat"."subst_pat subst") ("'pat unparsing_hunk"."subst_hunk subst_pat subst") ("'pat unparsing_hunk list"."list_smartmap (subst_hunk subst_pat subst)") ("'pat syntax_entry"."subst_syntax_entry subst_pat subst") ("'pat syntax_entry list"."list_smartmap (subst_syntax_entry subst_pat subst)") ("constr option"."option_smartmap (subst_mps subst)") ("constr list"."list_smartmap (subst_mps subst)") ("constr array"."array_smartmap (subst_mps subst)") ("constr_pattern"."subst_pattern subst") ("constr_pattern option"."option_smartmap (subst_pattern subst)") ("constr_pattern array"."array_smartmap (subst_pattern subst)") ("constr_pattern list"."list_smartmap (subst_pattern subst)") ("global_reference"."subst_global subst") ("extended_global_reference"."subst_ext subst") ("obj_typ"."subst_obj subst") ) ) ) (global-set-key [f2] 'make-subst) (defun make-if (name) (interactive "s") (save-excursion (query-replace-regexp "\\([a-zA-z_0-9]*\\)[ ]*:[ ]*['a-zA-z_. ]*\\(;\\|[ ]*\}\\)" (concat "&& \\1\' == " name "\\1") ) ) ) (global-set-key [f4] 'make-if) (defun make-record nil (interactive) (save-excursion (query-replace-regexp "\\([a-zA-z_0-9]*\\)[ ]*:[ ]*['a-zA-z_. ]*\\(;\\|[ ]*\}\\)" (concat "\\1 = \\1\' ;") ) ) ) (global-set-key [f5] 'make-record) (defun make-prim nil (interactive) (save-excursion (query-replace-regexp "\\<[a-zA-Z'_0-9]*\\>" "\\&'")) ) (global-set-key [f6] 'make-prim) ; eval the above, yank the text below and do ; paste f2 morph. ; paste f4 morph. ; paste f5 lem : constr; profil : bool list; arg_types : constr list; lem2 : constr option } ; and you almost get Setoid_replace.subst_morph :) ; and now f5 on this: (ref,(c1,c2)) coq-8.4pl2/dev/tools/Makefile.dir0000640000175000001440000000617110507222450015764 0ustar notinusers# make a link to this file if you are working hard in one directory of Coq # ln -s ../dev/tools/Makefile.dir Makefile # if you are working in a sub/dir/ make a link to dev/tools/Makefile.subdir instead # this Makefile provides many useful facilities to develop Coq # it is not completely compatible with .ml4 files unfortunately ifndef TOPDIR TOPDIR=.. endif # this complicated thing should work for subsubdirs as well BASEDIR=$(shell (dir=`pwd`; cd $(TOPDIR); top=`pwd`; echo $$dir | sed -e "s|$$top/||")) noargs: dir test-dir: @echo TOPDIR=$(TOPDIR) @echo BASEDIR=$(BASEDIR) include $(TOPDIR)/dev/tools/Makefile.common # make this directory dir: $(MAKE) -C $(TOPDIR) $(notdir $(BASEDIR)) # make all cmo's in this directory. Useful in case the main Makefile is not # up-to-date all: @( ( for i in *.ml; do \ echo -n $(BASEDIR)/`basename $$i .ml`.cmo "" ; \ done; \ for i in *.ml4; do \ echo -n $(BASEDIR)/`basename $$i .ml4`.cmo "" ; \ done ) \ | xargs $(MAKE) -C $(TOPDIR) ) # lists all files that should be compiled in this directory list: @(for i in *.mli; do \ ls -l `basename $$i .mli`.cmi; \ done) @(for i in *.ml; do \ ls -l `basename $$i .ml`.cmo; \ done) @(for i in *.ml4; do \ ls -l `basename $$i .ml4`.cmo; \ done) clean:: rm -f *.cmi *.cmo *.cmx *.o # if grammar.cmo files cannot be compiled and main .depend cannot be # rebuilt, this is quite useful depend: (cd $(TOPDIR); ocamldep -I $(BASEDIR) $(BASEDIR)/*.ml $(BASEDIR)/*.mli > .depend.devel) # displays the dependency graph of the current directory (vertically, # unlike in doc/) graph: (ocamldep *.ml *.mli | ocamldot | dot -Tps | gv -) & # the pretty entry draws a dependency graph marking red those nodes # which do not have their .cmo files .INTERMEDIATE: depend.dot depend.2.dot .PHONY: depend.ps depend.dot: ocamldep *.ml *.mli | ocamldot > $@ depend.2.dot: depend.dot (i=`cat $< | wc -l`; i=`expr $$i - 1`; head -n $$i $<) > $@ (for ml in *.ml; do \ base=`basename $$ml .ml`; \ fst=`echo $$base | cut -c1 | tr [:lower:] [:upper:]`; \ rest=`echo $$base | cut -c2-`; \ name=`echo $$fst $$rest | tr -d " "`; \ cmo=$$base.cmo; \ if [ ! -e $$cmo ]; then \ echo \"$$name\" [color=red]\; >> $@;\ fi;\ done;\ echo } >> $@) depend.ps: depend.2.dot dot -Tps $< > $@ clean:: rm -f depend.ps pretty: depend.ps (gv -spartan $<; rm $<) & # gv -spartan $< & # generating file.ml.mli by tricking make to pass -i to ocamlc %.ml.mli: FORCE @(cmo=`basename $@ .ml.mli`.cmo ; \ mv -f $$cmo $$cmo.tmp ; \ $(MAKE) -s -C $(TOPDIR) $(BASEDIR)/$$cmo CAMLDEBUG=-i > $@ ; \ echo Generated interface file $@ ; \ mv -f $$cmo.tmp $$cmo) %.annot: FORCE @(cmo=`basename $@ .annot`.cmo ; \ mv -f $$cmo $$cmo.tmp ; \ $(MAKE) -s -C $(TOPDIR) $(BASEDIR)/$$cmo CAMLDEBUG=-dtypes ; \ echo Generated annotation file $@ ; \ mv -f $$cmo.tmp $$cmo) FORCE: clean:: rm -f *.ml.mli # this is not perfect but mostly WORKS! It just calls the main makefile %.cmi: FORCE $(MAKE) -C $(TOPDIR) $(BASEDIR)/$@ %.cmo: FORCE $(MAKE) -C $(TOPDIR) $(BASEDIR)/$@ coqtop: $(MAKE) -C $(TOPDIR) bin/coqtop.byte coq-8.4pl2/dev/tools/Makefile.devel0000640000175000001440000000351110507222450016300 0ustar notinusers# to be linked to makefile (lowercase - takes precedence over Makefile) # in main directory # make devel in main directory should do this for you. TOPDIR=. BASEDIR= SOURCEDIRS=lib kernel library pretyping parsing proofs tactics toplevel default: usage noargument usage:: @echo Usage: make \ @echo Targets are: usage:: @echo " setup-devel -- set the devel makefile" setup-devel: @ln -sfv dev/tools/Makefile.devel makefile @(for i in $(SOURCEDIRS); do \ (cd $(TOPDIR)/$$i; ln -sfv ../dev/tools/Makefile.dir Makefile) \ done) usage:: @echo " clean-devel -- clear all devel files" clean-devel: echo rm -f makefile .depend.devel echo rm -f $(foreach dir,$(SOURCEDIRS), $(TOPDIR)/$(dir)/Makefile) usage:: @echo " coqtop -- make until the bytecode executable, make the link" coqtop: bin/coqtop.byte ln -sf bin/coqtop.byte coqtop usage:: @echo " quick -- make bytecode executable and states" quick: $(MAKE) states BEST=byte include Makefile include $(TOPDIR)/dev/tools/Makefile.common # this file is better described in dev/tools/Makefile.dir include .depend.devel #if dev/tools/Makefile.local exists, it is included ifneq ($(wildcard $(TOPDIR)/dev/tools/Makefile.local),) include $(TOPDIR)/dev/tools/Makefile.local endif usage:: @echo " total -- runs coqtop with all theories required" total: ledit ./bin/coqtop.byte $(foreach th,$(THEORIESVO),-require $(notdir $(basename $(th)))) usage:: @echo " run -- makes and runs bytecode coqtop using ledit and the history file" @echo " if you want to pass arguments to coqtop, use make run ARG=" run: $(TOPDIR)/coqtop ledit -h $(TOPDIR)/dev/debug_history -x $(TOPDIR)/coqtop $(ARG) $(ARGS) usage:: @echo " vars -- echos commands to set COQTOP and COQBIN variables" vars: @(cd $(TOPDIR); \ echo export COQTOP=`pwd`/ ; \ echo export COQBIN=`pwd`/bin/ ) coq-8.4pl2/dev/tools/change-header0000750000175000001440000000256411422606420016151 0ustar notinusers#!/bin/sh #This script changes the header of .ml* files if [ ! $# = 2 ]; then echo Usage: change-header old-header-file new-header-file exit 1 fi oldheader=$1 newheader=$2 if [ ! -f $oldheader ]; then echo Cannot read file $oldheader; exit 1; fi if [ ! -f $newheader ]; then echo Cannot read file $newheader; exit 1; fi n=`wc -l $oldheader | sed -e "s/ *\([0-9]*\).*/\1/g"` nsucc=`expr $n + 1` linea='(* -*- coding:utf-8 -*- *)' lineb='(* -*- compile-command: "make -C ../.. bin/coqdoc" -*- *)' modified=0 kept=0 for i in `find . -name \*.mli -o -name \*.ml -o -name \*.ml4 -o -name \*.mll -o -name \*.mly -o -name \*.mlp -o -name \*.v`; do headline=`head -n 1 $i` if `echo $headline | grep "(\* -\*- .* \*)" > /dev/null`; then # Has emacs header head -n +$nsucc $i | tail -n $n > $i.head.tmp$$ hasheadline=1 nnext=`expr $nsucc + 1` else head -n +$n $i > $i.head.tmp$$ hasheadline=0 nnext=$nsucc fi if diff -a -q $oldheader $i.head.tmp$$ > /dev/null; then echo "$i: header changed" if [ $hasheadline = 1 ]; then echo $headline > $i.tmp$$ else touch $i.tmp$$ fi cat $newheader >> $i.tmp$$ tail -n +$nnext $i >> $i.tmp$$ mv $i.tmp$$ $i modified=`expr $modified + 1` else kept=`expr $kept + 1` fi rm $i.head.tmp$$ done echo $modified files updated echo $kept files unchanged coq-8.4pl2/dev/printers.mllib0000640000175000001440000000231111706377057015306 0ustar notinusersCoq_config Pp_control Pp Compat Flags Segmenttree Unicodetable Util Errors Bigint Hashcons Dyn System Envars Store Gmap Fset Fmap Gmapl Profile Explore Predicate Rtree Heap Option Dnet Hashtbl_alt Names Univ Esubst Term Mod_subst Sign Cbytecodes Copcodes Cemitcodes Declarations Retroknowledge Pre_env Cbytegen Environ Conv_oracle Closure Reduction Type_errors Entries Modops Inductive Typeops Indtypes Cooking Term_typing Subtyping Mod_typing Safe_typing Summary Nameops Libnames Global Nametab Libobject Lib Goptions Decls Heads Assumptions Termops Namegen Evd Glob_term Reductionops Inductiveops Retyping Cbv Pretype_errors Evarutil Term_dnet Recordops Evarconv Arguments_renaming Typing Pattern Matching Tacred Classops Typeclasses_errors Typeclasses Detyping Indrec Coercion Unification Cases Pretyping Declaremods Tok Lexer Ppextend Genarg Topconstr Notation Dumpglob Reserve Impargs Syntax_def Implicit_quantifiers Smartlocate Constrintern Modintern Constrextern Tacexpr Proof_type Goal Logic Refiner Clenv Evar_refiner Proofview Proof Proof_global Pfedit Tactic_debug Decl_mode Ppconstr Extend Extrawit Pcoq Printer Pptactic Ppdecl_proof Tactic_printer Egrammar Himsg Cerrors Vernacexpr Vernacinterp Top_printers coq-8.4pl2/dev/html/0000750000175000001440000000000012127276535013362 5ustar notinuserscoq-8.4pl2/dev/doc/0000750000175000001440000000000012127276535013163 5ustar notinuserscoq-8.4pl2/dev/doc/notes-on-conversion0000640000175000001440000000501510615107107017021 0ustar notinusers(**********************************************************************) (* A few examples showing the current limits of the conversion algorithm *) (**********************************************************************) (*** We define (pseudo-)divergence from Ackermann function ***) Definition ack (n : nat) := (fix F (n0 : nat) : nat -> nat := match n0 with | O => S | S n1 => fun m : nat => (fix F0 (n2 : nat) : nat := match n2 with | O => F n1 1 | S n3 => F n1 (F0 n3) end) m end) n. Notation OMEGA := (ack 4 4). Definition f (x:nat) := x. (* Evaluation in tactics can somehow be controled *) Lemma l1 : OMEGA = OMEGA. reflexivity. (* succeed: identity *) Qed. (* succeed: identity *) Lemma l2 : OMEGA = f OMEGA. reflexivity. (* fail: conversion wants to convert OMEGA with f OMEGA *) Abort. (* but it reduces the right side first! *) Lemma l3 : f OMEGA = OMEGA. reflexivity. (* succeed: reduce left side first *) Qed. (* succeed: expected concl (the one with f) is on the left *) Lemma l4 : OMEGA = OMEGA. assert (f OMEGA = OMEGA) by reflexivity. (* succeed *) unfold f in H. (* succeed: no type-checking *) exact H. (* succeed: identity *) Qed. (* fail: "f" is on the left *) (* This example would fail whatever the preferred side is *) Lemma l5 : OMEGA = f OMEGA. unfold f. assert (f OMEGA = OMEGA) by reflexivity. unfold f in H. exact H. Qed. (* needs to convert (f OMEGA = OMEGA) and (OMEGA = f OMEGA) *) (**********************************************************************) (* Analysis of the inefficiency in Nijmegen/LinAlg/LinAlg/subspace_dim.v *) (* (proof of span_ind_uninject_prop *) In the proof, a problem of the form (Equal S t1 t2) is "simpl"ified, then "red"uced to (Equal S' t1 t1) where the new t1's are surrounded by invisible coercions. A reflexivity steps conclude the proof. The trick is that Equal projects the equality in the setoid S, and that (Equal S) itself reduces to some (fun x y => Equal S' (f x) (g y)). At the Qed time, the problem to solve is (Equal S t1 t2) = (Equal S' t1 t1) and the algorithm is to first compare S and S', and t1 and t2. Unfortunately it does not work, and since t1 and t2 involve concrete instances of algebraic structures, it takes a lot of time to realize that it is not convertible. The only hope to improve this problem is to observe that S' hides (behind two indirections) a Setoid constructor. This could be the argument to solve the problem. coq-8.4pl2/dev/doc/changes.txt0000640000175000001440000005754011645113152015335 0ustar notinusers========================================= = CHANGES BETWEEN COQ V8.3 AND COQ V8.4 = ========================================= ** Functions in unification.ml have now the evar_map coming just after the env ** Removal of Tacinterp.constr_of_id ** Use instead either global_reference or construct_reference in constrintern.ml. ** Optimizing calls to Evd functions ** Evars are split into defined evars and undefined evars; for efficiency, when an evar is known to be undefined, it is preferable to use specific functions about undefined evars since these ones are generally fewer than the defined ones. ** Type changes in TACTIC EXTEND rules ** Arguments bound with tactic(_) in TACTIC EXTEND rules are now of type glob_tactic_expr, instead of glob_tactic_expr * tactic. Only the first component is kept, the second one can be obtained via Tacinterp.eval_tactic. ** ARGUMENT EXTEND ** It is now forbidden to use TYPED simultaneously with {RAW,GLOB}_TYPED in ARGUMENT EXTEND statements. ** Renaming of rawconstr to glob_constr ** The "rawconstr" type has been renamed to "glob_constr" for consistency. The "raw" in everything related to former rawconstr has been changed to "glob". For more details about the rationale and scripts to migrate code using Coq's internals, see commits 13743, 13744, 13755, 13756, 13757, 13758, 13761 (by glondu, end of December 2010) in Subversion repository. Contribs have been fixed too, and commit messages there might also be helpful for migrating. ========================================= = CHANGES BETWEEN COQ V8.2 AND COQ V8.3 = ========================================= ** Light cleaning in evarutil.ml ** whd_castappevar is now whd_head_evar obsolete whd_ise disappears ** Restructuration of the syntax of binders ** binders_let -> binders binders_let_fixannot -> binders_fixannot binder_let -> closed_binder (and now covers only bracketed binders) binder was already obsolete and has been removed ** Semantical change of h_induction_destruct ** Warning, the order of the isrec and evar_flag was inconsistent and has been permuted. Tactic induction_destruct in tactics.ml is unchanged. ** Internal tactics renamed There is no more difference between bindings and ebindings. The following tactics are therefore renamed apply_with_ebindings_gen -> apply_with_bindings_gen left_with_ebindings -> left_with_bindings right_with_ebindings -> right_with_bindings split_with_ebindings -> split_with_bindings and the following tactics are removed apply_with_ebindings (use instead apply_with_bindings) eapply_with_ebindings (use instead eapply_with_bindings) ** Obsolete functions in typing.ml For mtype_of, msort_of, mcheck, now use type_of, sort_of, check ** Renaming functions renamed concrete_name -> compute_displayed_name_in concrete_let_name -> compute_displayed_let_name_in rename_rename_bound_var -> rename_bound_vars_as_displayed lookup_name_as_renamed -> lookup_name_as_displayed next_global_ident_away true -> next_ident_away_in_goal next_global_ident_away false -> next_global_ident_away ** Cleaning in commmand.ml Functions about starting/ending a lemma are in lemmas.ml Functions about inductive schemes are in indschemes.ml Functions renamed: declare_one_assumption -> declare_assumption declare_assumption -> declare_assumptions Command.syntax_definition -> Metasyntax.add_syntactic_definition declare_interning_data merged with add_notation_interpretation compute_interning_datas -> compute_full_internalization_env implicits_env -> internalization_env full_implicits_env -> full_internalization_env build_mutual -> do_mutual_inductive build_recursive -> do_fixpoint build_corecursive -> do_cofixpoint build_induction_scheme -> build_mutual_induction_scheme build_indrec -> build_induction_scheme instantiate_type_indrec_scheme -> weaken_sort_scheme instantiate_indrec_scheme -> modify_sort_scheme make_case_dep, make_case_nodep -> build_case_analysis_scheme make_case_gen -> build_case_analysis_scheme_default Types: decl_notation -> decl_notation option ** Cleaning in libnames/nametab interfaces Functions: dirpath_prefix -> pop_dirpath extract_dirpath_prefix pop_dirpath_n extend_dirpath -> add_dirpath_suffix qualid_of_sp -> qualid_of_path pr_sp -> pr_path make_short_qualid -> qualid_of_ident sp_of_syntactic_definition -> path_of_syntactic_definition sp_of_global -> path_of_global id_of_global -> basename_of_global absolute_reference -> global_of_path locate_syntactic_definition -> locate_syndef path_of_syntactic_definition -> path_of_syndef push_syntactic_definition -> push_syndef Types: section_path -> full_path ** Cleaning in parsing extensions (commit 12108) Many moves and renamings, one new file (Extrawit, that contains wit_tactic). ** Cleaning in tactical.mli tclLAST_HYP -> onLastHyp tclLAST_DECL -> onLastDecl tclLAST_NHYPS -> onNLastHypsId tclNTH_DECL -> onNthDecl tclNTH_HYP -> onNthHyp onLastHyp -> onLastHypId onNLastHyps -> onNLastDecls onClauses -> onClause allClauses -> allHypsAndConcl + removal of various unused combinators on type "clause" ========================================= = CHANGES BETWEEN COQ V8.1 AND COQ V8.2 = ========================================= A few differences in Coq ML interfaces between Coq V8.1 and V8.2 ================================================================ ** Datatypes List of occurrences moved from "int list" to "Termops.occurrences" (an alias to "bool * int list") ETIdent renamed to ETName ** Functions Eauto: e_resolve_constr, vernac_e_resolve_constr -> simplest_eapply Tactics: apply_with_bindings -> apply_with_bindings_wo_evars Eauto.simplest_apply -> Hiddentac.h_simplest_apply Evarutil.define_evar_as_arrow -> define_evar_as_product Old version of Tactics.assert_tac disappears Tactics.true_cut renamed into Tactics.assert_tac Constrintern.interp_constrpattern -> intern_constr_pattern Hipattern.match_with_conjunction is a bit more restrictive Hipattern.match_with_disjunction is a bit more restrictive ** Universe names (univ.mli) base_univ -> type0_univ (* alias of Set is the Type hierarchy *) prop_univ -> type1_univ (* the type of Set in the Type hierarchy *) neutral_univ -> lower_univ (* semantic alias of Prop in the Type hierarchy *) is_base_univ -> is_type1_univ is_empty_univ -> is_lower_univ ** Sort names (term.mli) mk_Set -> set_sort mk_Prop -> prop_sort type_0 -> type1_sort ========================================= = CHANGES BETWEEN COQ V8.0 AND COQ V8.1 = ========================================= A few differences in Coq ML interfaces between Coq V8.0 and V8.1 ================================================================ ** Functions Util: option_app -> option_map Term: substl_decl -> subst_named_decl Lib: library_part -> remove_section_part Printer: prterm -> pr_lconstr Printer: prterm_env -> pr_lconstr_env Ppconstr: pr_sort -> pr_rawsort Evd: in_dom, etc got standard ocaml names (i.e. mem, etc) Pretyping: - understand_gen_tcc and understand_gen_ltac merged into understand_ltac - type_constraints can now say typed by a sort (use OfType to get the previous behavior) Library: import_library -> import_module ** Constructors Declarations: mind_consnrealargs -> mind_consnrealdecls NoRedun -> NoDup Cast and RCast have an extra argument: you can recover the previous behavior by setting the extra argument to "CastConv DEFAULTcast" and "DEFAULTcast" respectively Names: "kernel_name" is now "constant" when argument of Term.Const Tacexpr: TacTrueCut and TacForward(false,_,_) merged into new TacAssert Tacexpr: TacForward(true,_,_) branched to TacLetTac ** Modules module Decl_kinds: new interface module Bigint: new interface module Tacred spawned module Redexpr module Symbols -> Notation module Coqast, Ast, Esyntax, Termast, and all other modules related to old syntax are removed module Instantiate: integrated to Evd module Pretyping now a functor: use Pretyping.Default instead ** Internal names OBJDEF and OBJDEF1 -> CANONICAL-STRUCTURE ** Tactic extensions - printers have an extra parameter which is a constr printer at high precedence - the tactic printers have an extra arg which is the expected precedence - level is now a precedence in declare_extra_tactic_pprule - "interp" functions now of types the actual arg type, not its encapsulation as a generic_argument ========================================= = CHANGES BETWEEN COQ V7.4 AND COQ V8.0 = ========================================= See files in dev/syntax-v8 ============================================== = MAIN CHANGES BETWEEN COQ V7.3 AND COQ V7.4 = ============================================== CHANGES DUE TO INTRODUCTION OF MODULES ====================================== 1.Kernel -------- The module level has no effect on constr except for the structure of section_path. The type of unique names for constructions (what section_path served) is now called a kernel name and is defined by type uniq_ident = int * string * dir_path (* int may be enough *) type module_path = | MPfile of dir_path (* reference to physical module, e.g. file *) | MPbound of uniq_ident (* reference to a module parameter in a functor *) | MPself of uniq_ident (* reference to one of the containing module *) | MPdot of module_path * label type label = identifier type kernel_name = module_path * dir_path * label ^^^^^^^^^^^ ^^^^^^^^ ^^^^^ | | \ | | the base name | \ / the (true) section path example: (non empty only inside open sections) L = (* i.e. some file of logical name L *) struct module A = struct Def a = ... end end M = (* i.e. some file of logical name M *) struct Def t = ... N = functor (X : sig module T = struct Def b = ... end end) -> struct module O = struct Def u = ... end Def x := ... .t ... .O.u ... X.T.b ... L.A.a and are self-references, X is a bound reference and L is a reference to a physical module. Notice that functor application is not part of a path: it must be named by a "module M = F(A)" declaration to be used in a kernel name. Notice that Jacek chose a practical approach, making directories not modules. Another approach could have been to replace the constructor MPfile by a constant constructor MProot representing the root of the world. Other relevant informations are in kernel/entries.ml (type module_expr) and kernel/declarations.ml (type module_body and module_type_body). 2. Library ---------- i) tables [Summaries] - the only change is the special treatment of the global environmet. ii) objects [Libobject] declares persistent objects, given with methods: * cache_function specifying how to add the object in the current scope; * load_function, specifying what to do when the module containing the object is loaded; * open_function, specifying what to do when the module containing the object is opened (imported); * classify_function, specyfying what to do with the object, when the current module (containing the object) is ended. * subst_function * export_function, to signal end_section survival (Almost) Each of these methods is called with a parameter of type object_name = section_path * kernel_name where section_path is the full user name of the object (such as Coq.Init.Datatypes.Fst) and kernel_name is its substitutive internal version such as (MPself,[],"Fst") (see above) What happens at the end of an interactive module ? ================================================== (or when a file is stored and reloaded from disk) All summaries (except Global environment) are reverted to the state from before the beginning of the module, and: a) the objects (again, since last Declaremods.start_module or Library.start_library) are classified using the classify_function. To simplify consider only those who returned Substitute _ or Keep _. b) If the module is not a functor, the subst_function for each object of the first group is called with the substitution [MPself "" |-> MPfile "Coq.Init.Datatypes"]. Then the load_function is called for substituted objects and the "keep" object. (If the module is a library the substitution is done at reloading). c) The objects which returned substitute are stored in the modtab together with the self ident of the module, and functor argument names if the module was a functor. They will be used (substituted and loaded) when a command like Module M := F(N) or Module Z := N is evaluated The difference between "substitute" and "keep" objects ======================================================== i) The "keep" objects can _only_ reference other objects by section_paths and qualids. They do not need the substitution function. They will work after end_module (or reloading a compiled library), because these operations do not change section_path's They will obviously not work after Module Z:=N. These would typically be grammar rules, pretty printing rules etc. ii) The "substitute" objects can _only_ reference objects by kernel_names. They must have a valid subst_function. They will work after end_module _and_ after Module Z:=N or Module Z:=F(M). Other kinds of objects: iii) "Dispose" - objects which do not survive end_module As a consequence, objects which reference other objects sometimes by kernel_names and sometimes by section_path must be of this kind... iv) "Anticipate" - objects which must be treated individually by end_module (typically "REQUIRE" objects) Writing subst_thing functions ============================= The subst_thing shoud not copy the thing if it hasn't actually changed. There are some cool emacs macros in dev/objects.el to help writing subst functions this way quickly and without errors. Also there are *_smartmap functions in Util. The subst_thing functions are already written for many types, including constr (Term.subst_mps), global_reference (Libnames.subst_global), rawconstr (Rawterm.subst_raw) etc They are all (apart from constr, for now) written in the non-copying way. Nametab ======= Nametab has been made more uniform. For every kind of thing there is only one "push" function and one "locate" function. Lib === library_segment is now a list of object_name * library_item, where object_name = section_path * kernel_name (see above) New items have been added for open modules and module types Declaremods ========== Functions to declare interactive and noninteractive modules and module types. Library ======= Uses Declaremods to actually communicate with Global and to register objects. OTHER CHANGES ============= Internal representation of tactics bindings has changed (see type Rawterm.substitution). New parsing model for tactics and vernacular commands - Introduction of a dedicated type for tactic expressions (Tacexpr.raw_tactic_expr) - Introduction of a dedicated type for vernac expressions (Vernacexpr.vernac_expr) - Declaration of new vernacular parsing rules by a new camlp4 macro GRAMMAR COMMAND EXTEND ... END to be used in ML files - Declaration of new tactics parsing/printing rules by a new camlp4 macro TACTIC EXTEND ... END to be used in ML files New organisation of THENS: tclTHENS tac tacs : tacs is now an array tclTHENSFIRSTn tac1 tacs tac2 : apply tac1 then, apply the array tacs on the first n subgoals and tac2 on the remaining subgoals (previously tclTHENST) tclTHENSLASTn tac1 tac2 tacs : apply tac1 then, apply tac2 on the first subgoals and apply the array tacs on the last n subgoals tclTHENFIRSTn tac1 tacs = tclTHENSFIRSTn tac1 tacs tclIDTAC (prev. tclTHENSI) tclTHENLASTn tac1 tacs = tclTHENSLASTn tac1 tclIDTAC tacs tclTHENFIRST tac1 tac2 = tclTHENFIRSTn tac1 [|tac2|] tclTHENLAST tac1 tac2 = tclTHENLASTn tac1 [|tac2|] (previously tclTHENL) tclTHENS tac1 tacs = tclTHENSFIRSTn tac1 tacs (fun _ -> error "wrong number") tclTHENSV same as tclTHENS but with an array tclTHENSi : no longer available Proof_type: subproof field in type proof_tree glued with the ref field Tacmach: no more echo from functions of module Refiner Files plugins/*/g_*.ml4 take the place of files plugins/*/*.v. Files parsing/{vernac,tac}extend.ml{4,i} implements TACTIC EXTEND andd VERNAC COMMAND EXTEND macros File syntax/PPTactic.v moved to parsing/pptactic.ml Tactics about False and not now in tactics/contradiction.ml Tactics depending on Init now tactics/*.ml4 (no longer in tactics/*.v) File tacinterp.ml moved from proofs to directory tactics ========================================== = MAIN CHANGES FROM COQ V7.1 TO COQ V7.2 = ========================================== The core of Coq (kernel) has meen minimized with the following effects: kernel/term.ml split into kernel/term.ml, pretyping/termops.ml kernel/reduction.ml split into kernel/reduction.ml, pretyping/reductionops.ml kernel/names.ml split into kernel/names.ml, library/nameops.ml kernel/inductive.ml split into kernel/inductive.ml, pretyping/inductiveops.ml the prefixes "Is" ans "IsMut" have been dropped from kind_of_term constructors, e.g. IsRel is now Rel, IsMutCase is now Case, etc. ======================================================= = PRINCIPAUX CHANGEMENTS ENTRE COQ V6.3.1 ET COQ V7.0 = ======================================================= Changements d'organisation / modules : -------------------------------------- Std, More_util -> lib/util.ml Names -> kernel/names.ml et kernel/sign.ml (les parties noms et signatures ont t spares) Avm,Mavm,Fmavm,Mhm -> utiliser plutt Map (et freeze alors gratuit) Mhb -> Bij Generic est intgr Term (et un petit peu Closure) Changements dans les types de donnes : --------------------------------------- dans Generic: free_rels : constr -> int Listset.t devient : constr -> Intset.t type_judgement -> typed_type environment -> context context -> typed_type signature ATTENTION: ---------- Il y a maintenant d'autres exceptions que UserError (TypeError, RefinerError, etc.) Il ne faut donc plus se contenter (pour rattraper) de faire try . .. with UserError _ -> ... mais crire la place try ... with e when Logic.catchable_exception e -> ... Changements dans les fonctions : -------------------------------- Vectops. it_vect -> Array.fold_left vect_it -> Array.fold_right exists_vect -> Util.array_exists for_all2eq_vect -> Util.array_for_all2 tabulate_vect -> Array.init hd_vect -> Util.array_hd tl_vect -> Util.array_tl last_vect -> Util.array_last it_vect_from -> array_fold_left_from vect_it_from -> array_fold_right_from app_tl_vect -> array_app_tl cons_vect -> array_cons map_i_vect -> Array.mapi map2_vect -> array_map2 list_of_tl_vect -> array_list_of_tl Names sign_it -> fold_var_context (se fait sur env maintenant) it_sign -> fold_var_context_reverse (sur env maintenant) Generic noccur_bet -> noccur_between substn_many -> substnl Std comp -> Util.compose rev_append -> List.rev_append Termenv mind_specif_of_mind -> Global.lookup_mind_specif ou Environ.lookup_mind_specif si on a un env sous la main mis_arity -> instantiate_arity mis_lc -> instantiate_lc Ex-Environ mind_of_path -> Global.lookup_mind Printer gentermpr -> gen_pr_term term0 -> prterm_env pr_sign -> pr_var_context pr_context_opt -> pr_context_of pr_ne_env -> pr_ne_context_of Typing, Machops type_of_type -> judge_of_type fcn_proposition -> judge_of_prop_contents safe_fmachine -> safe_infer Reduction, Clenv whd_betadeltat -> whd_betaevar whd_betadeltatiota -> whd_betaiotaevar find_mrectype -> Inductive.find_mrectype find_minductype -> Inductive.find_inductive find_mcoinductype -> Inductive.find_coinductive Astterm constr_of_com_casted -> interp_casted_constr constr_of_com_sort -> interp_type constr_of_com -> interp_constr rawconstr_of_com -> interp_rawconstr type_of_com -> type_judgement_of_rawconstr judgement_of_com -> judgement_of_rawconstr Termast bdize -> ast_of_constr Tacmach pf_constr_of_com_sort -> pf_interp_type pf_constr_of_com -> pf_interp_constr pf_get_hyp -> pf_get_hyp_typ pf_hyps, pf_untyped_hyps -> pf_env (tout se fait sur env maintenant) Pattern raw_sopattern_of_compattern -> Astterm.interp_constrpattern somatch -> is_matching dest_somatch -> matches Tacticals matches -> gl_is_matching dest_match -> gl_matches suff -> utiliser sort_of_goal lookup_eliminator -> utiliser sort_of_goal pour le dernier arg Divers initial_sign -> var_context Sign ids_of_sign -> ids_of_var_context (or Environ.ids_of_context) empty_sign -> empty_var_context Pfedit list_proofs -> get_all_proof_names get_proof -> get_current_proof_name abort_goal -> abort_proof abort_goals -> abort_all_proofs abort_cur_goal -> abort_current_proof get_evmap_sign -> get_goal_context/get_current_goal_context unset_undo -> reset_undo Proof_trees mkGOAL -> mk_goal Declare machine_constant -> declare_constant (+ modifs) ex-Trad, maintenant Pretyping inh_cast_rel -> Coercion.inh_conv_coerce_to inh_conv_coerce_to -> Coercion.inh_conv_coerce_to_fail ise_resolve1 -> understand, understand_type ise_resolve -> understand_judgment, understand_type_judgment ex-Tradevar, maintenant Evarutil mt_tycon -> empty_tycon Recordops struc_info -> find_structure Changements dans les inductifs ------------------------------ Nouveaux types "constructor" et "inductive" dans Term La plupart des fonctions de typage des inductives prennent maintenant un inductive au lieu d'un oonstr comme argument. Les seules fonctions traduire un constr en inductive sont les find_rectype and co. Changements dans les grammaires ------------------------------- . le lexer (parsing/lexer.mll) est maintenant un lexer ocamllex . attention : LIDENT -> IDENT (les identificateurs n'ont pas de casse particulire dans Coq) . Le mot "command" est remplac par "constr" dans les noms de fichiers, noms de modules et non-terminaux relatifs au parsing des termes; aussi les changements suivants "COMMAND"/"CONSTR" dans g_vernac.ml4, VARG_COMMAND/VARG_CONSTR dans vernac*.ml* . Les constructeurs d'arguments de tactiques IDENTIFIER, CONSTR, ...n passent en minuscule Identifier, Constr, ... . Plusieurs parsers ont chang de format (ex: sortarg) Changements dans le pretty-printing ----------------------------------- . Dcouplage de la traduction de constr -> rawconstr (dans detyping) et de rawconstr -> ast (dans termast) . Dplacement des options d'affichage de printer vers termast . Dplacement des raiguillage d'univers du pp de printer vers esyntax Changements divers ------------------ . il n'y a plus de script coqtop => coqtop et coqtop.byte sont directement le rsultat du link du code => debuggage et profiling directs . il n'y a plus d'installation locale dans bin/$ARCH . #use "include.ml" => #use "include" go() => loop() . il y a "make depend" et "make dependcamlp4" car ce dernier prend beaucoup de temps coq-8.4pl2/dev/doc/unification.txt0000640000175000001440000001222511575506566016245 0ustar notinusersSome notes about the use of unification in Coq ---------------------------------------------- There are several applications of unification and pattern-matching ** Unification of types ** - For type inference, inference of implicit arguments * this basically amounts to solve problems of the form T <= U or T = U where T and U are types coming from a given typing problem * this kind of problem has to succeed and all the power of unification is a priori expected (full beta/delta/iota/zeta/nu/mu, pattern-unification, pruning, imitation/projection heuristics, ...) - For lemma application (apply, auto, ...) * these are also problems of the form T <= U on types but with T coming from a lemma and U from the goal * it is not obvious that we always want unification and not matching * it is not clear which amounts of delta one wants to use ** Looking for subterms ** - For tactics applying on subterms: induction, destruct, rewrite - As part of unification of types in the presence of higher-order evars (e.g. when applying a lemma of conclusion "?P t") ---------------------------------------------------------------------- Here are examples of features one may want or not when looking for subterms A- REWRITING 1- Full conversion on closed terms 1a- Full conversion on closed terms in the presence of at least one evars (meta) Section A1. Variable y: nat. Hypothesis H: forall x, x+2 = 0. Goal y+(1+1) = 0. rewrite H. (* 0 = 0 *) Abort. Goal 2+(1+1) = 0. rewrite H. (* 0 = 0 *) Abort. (* This exists since the very beginning of Chet's unification for tactics *) (* But this fails for setoid rewrite *) 1b- Full conversion on closed terms without any evars in the lemma 1b.1- Fails on rewrite (because Unification.w_unify_to_subterm_list replaces unification by check for a syntactic subterm if terms has no evar/meta) Goal 0+1 = 0 -> 0+(1+0) = 0. intros H; rewrite H. (* fails *) Abort. 1b.2- Works with setoid rewrite Require Import Setoid. Goal 0+1 = 0 -> 0+(1+0) = 0. intros H; rewrite H at 1. (* 0 = 0 *) Abort. 2- Using known instances in full conversion on closed terms Section A2. Hypothesis H: forall x, x+(2+x) = 0. Goal 1+(1+2) = 0. rewrite H. Abort. End A2. (* This exists since 8.2 (HH) *) 3- Pattern-unification on Rels Section A3a. Variable F: (nat->nat->nat)->nat. Goal exists f, F (fun x y => f x y) = 0 -> F (fun x y => plus y x) = 0. eexists. intro H; rewrite H. (* 0 = 0 *) Abort. End A3a. (* Works since pattern unification on Meta applied to Rel was introduced *) (* in unification.ml (8.1, Sep 2006, HH) *) Section A3b. Variables x y: nat. Variable H: forall f, f x y = 0. Goal plus y x = 0. rewrite H. (* 0 = 0 *) Abort. End A3b. (* Works since pattern unification on all Meta was supported *) (* in unification.ml (8.4, Jun 2011, HH) *) 4- Unification with open terms Section A4. Hypothesis H: forall x, S x = 0. Goal S 0 = 0. rewrite (H _). (* 0 = 0 *) Abort. End A4. (* Works since unification on Evar was introduced so as to support rewriting *) (* with open terms (8.2, MS, r11543, Unification.w_unify_to_subterm_list ) *) 5- Unification of pre-existing evars 5a- Basic unification of pre-existing evars Section A4. Variables x y: nat. Goal exists z, S z = 0 -> S (plus y x) = 0. eexists. intro H; rewrite H. (* 0 = 0 *) Abort. End A4. (* This worked in 8.2 and 8.3 as a side-effect of support for rewriting *) (* with open terms (8.2, MS, r11543) *) 5b- Pattern-unification of pre-existing evars in rewriting lemma Goal exists f, forall x y, f x y = 0 -> plus y x = 0. eexists. intros x y H; rewrite H. (* 0 = 0 *) Abort. (* Works since pattern-unification on Evar was introduced *) (* in unification.ml (8.3, HH, r12229) *) (* currently governed by a flag: use_evars_pattern_unification *) 5c- Pattern-unification of pre-existing evars in goal Goal exists f, forall x y, plus x y = 0 -> f y x = 0. eexists. intros x y H; rewrite H. (* 0 = 0 *) Abort. (* This worked in 8.2 and 8.3 but was removed for autorewrite in 8.4 *) 5d- Mixing pattern-unification of pre-existing evars in goal and evars in lemma Goal exists f, forall x, (forall y, plus x y = 0) -> forall y:nat, f y x = 0. eexists. intros x H y. rewrite H. (* 0 = 0 *) Abort. (* This worked in 8.2 and 8.3 but was removed for autorewrite in 8.4 *) 6- Multiple non-identical but convertible occurrences Tactic rewrite only considers the first one, from left-to-right, e.g.: Section A6. Variable y: nat. Hypothesis H: forall x, x+2 = 0. Goal (y+(2+0))+(y+(1+1)) = (y+(1+1))+(y+(2+0)). rewrite H. (* 0+(y+(1+1)) = y+(1+1)+0 *) Abort. End A6. Tactic setoid rewrite first looks for syntactically equal terms and if not uses the leftmost occurrence modulo delta. Require Import Setoid. Section A6. Variable y: nat. Hypothesis H: forall x, x+2 = 0. Goal (y+(2+0))+(y+2) = (y+2)+(y+(2+0)). rewrite H at 1 2 3 4. (* (y+(2+0))+0 = 0+(y+(2+0)) *) Abort. Goal (y+(2+0))+(y+(1+1)) = (y+(1+1))+(y+(2+0)). rewrite H at 1 2 3 4. (* 0+(y+(1+1)) = y+(1+1)+0 *) Abort. End A6. 7- Conversion Section A6. Variable y: nat. Hypothesis H: forall x, S x = 0. Goal id 1 = 0. rewrite H. B- ELIMINATION (INDUCTION / CASE ANALYSIS) This is simpler because open terms are not allowed and no unification is involved (8.3). coq-8.4pl2/dev/doc/about-hints0000640000175000001440000002474111641554555015355 0ustar notinusersAn investigation of how ZArith lemmas could be classified in different automation classes - Reversible lemmas relating operators (to be declared as hints but needing precedences) - Equivalent notions (one has to be considered as primitive and the other rewritten into the canonical one) - Isomorphisms between structure (one structure has to be considered as more primitive than the other for a give operator) - Irreversible simplifications (to be declared with precedences) - Reversible bottom-up simplifications (to be used in hypotheses) - Irreversible bottom-up simplifications (to be used in hypotheses with precedences) - Rewriting rules (relevant for autorewrite, or for an improved auto) Note: this analysis, made in 2001, was previously stored in theories/ZArith/Zhints.v. It has been moved here to avoid obfuscating the standard library. (**********************************************************************) (** * Reversible lemmas relating operators *) (** Probably to be declared as hints but need to define precedences *) (** ** Conversion between comparisons/predicates and arithmetic operators *) (** Lemmas ending by eq *) (** << Zegal_left: (x,y:Z)`x = y`->`x+(-y) = 0` Zabs_eq: (x:Z)`0 <= x`->`|x| = x` Zeven_div2: (x:Z)(Zeven x)->`x = 2*(Zdiv2 x)` Zodd_div2: (x:Z)`x >= 0`->(Zodd x)->`x = 2*(Zdiv2 x)+1` >> *) (** Lemmas ending by Zgt *) (** << Zgt_left_rev: (x,y:Z)`x+(-y) > 0`->`x > y` Zgt_left_gt: (x,y:Z)`x > y`->`x+(-y) > 0` >> *) (** Lemmas ending by Zlt *) (** << Zlt_left_rev: (x,y:Z)`0 < y+(-x)`->`x < y` Zlt_left_lt: (x,y:Z)`x < y`->`0 < y+(-x)` Zlt_O_minus_lt: (n,m:Z)`0 < n-m`->`m < n` >> *) (** Lemmas ending by Zle *) (** << Zle_left: (x,y:Z)`x <= y`->`0 <= y+(-x)` Zle_left_rev: (x,y:Z)`0 <= y+(-x)`->`x <= y` Zlt_left: (x,y:Z)`x < y`->`0 <= y+(-1)+(-x)` Zge_left: (x,y:Z)`x >= y`->`0 <= x+(-y)` Zgt_left: (x,y:Z)`x > y`->`0 <= x+(-1)+(-y)` >> *) (** ** Conversion between nat comparisons and Z comparisons *) (** Lemmas ending by eq *) (** << inj_eq: (x,y:nat)x=y->`(inject_nat x) = (inject_nat y)` >> *) (** Lemmas ending by Zge *) (** << inj_ge: (x,y:nat)(ge x y)->`(inject_nat x) >= (inject_nat y)` >> *) (** Lemmas ending by Zgt *) (** << inj_gt: (x,y:nat)(gt x y)->`(inject_nat x) > (inject_nat y)` >> *) (** Lemmas ending by Zlt *) (** << inj_lt: (x,y:nat)(lt x y)->`(inject_nat x) < (inject_nat y)` >> *) (** Lemmas ending by Zle *) (** << inj_le: (x,y:nat)(le x y)->`(inject_nat x) <= (inject_nat y)` >> *) (** ** Conversion between comparisons *) (** Lemmas ending by Zge *) (** << not_Zlt: (x,y:Z)~`x < y`->`x >= y` Zle_ge: (m,n:Z)`m <= n`->`n >= m` >> *) (** Lemmas ending by Zgt *) (** << Zle_gt_S: (n,p:Z)`n <= p`->`(Zs p) > n` not_Zle: (x,y:Z)~`x <= y`->`x > y` Zlt_gt: (m,n:Z)`m < n`->`n > m` Zle_S_gt: (n,m:Z)`(Zs n) <= m`->`m > n` >> *) (** Lemmas ending by Zlt *) (** << not_Zge: (x,y:Z)~`x >= y`->`x < y` Zgt_lt: (m,n:Z)`m > n`->`n < m` Zle_lt_n_Sm: (n,m:Z)`n <= m`->`n < (Zs m)` >> *) (** Lemmas ending by Zle *) (** << Zlt_ZERO_pred_le_ZERO: (x:Z)`0 < x`->`0 <= (Zpred x)` not_Zgt: (x,y:Z)~`x > y`->`x <= y` Zgt_le_S: (n,p:Z)`p > n`->`(Zs n) <= p` Zgt_S_le: (n,p:Z)`(Zs p) > n`->`n <= p` Zge_le: (m,n:Z)`m >= n`->`n <= m` Zlt_le_S: (n,p:Z)`n < p`->`(Zs n) <= p` Zlt_n_Sm_le: (n,m:Z)`n < (Zs m)`->`n <= m` Zlt_le_weak: (n,m:Z)`n < m`->`n <= m` Zle_refl: (n,m:Z)`n = m`->`n <= m` >> *) (** ** Irreversible simplification involving several comparaisons *) (** useful with clear precedences *) (** Lemmas ending by Zlt *) (** << Zlt_le_reg :(a,b,c,d:Z)`a < b`->`c <= d`->`a+c < b+d` Zle_lt_reg : (a,b,c,d:Z)`a <= b`->`c < d`->`a+c < b+d` >> *) (** ** What is decreasing here ? *) (** Lemmas ending by eq *) (** << Zplus_minus: (n,m,p:Z)`n = m+p`->`p = n-m` >> *) (** Lemmas ending by Zgt *) (** << Zgt_pred: (n,p:Z)`p > (Zs n)`->`(Zpred p) > n` >> *) (** Lemmas ending by Zlt *) (** << Zlt_pred: (n,p:Z)`(Zs n) < p`->`n < (Zpred p)` >> *) (**********************************************************************) (** * Useful Bottom-up lemmas *) (** ** Bottom-up simplification: should be used *) (** Lemmas ending by eq *) (** << Zeq_add_S: (n,m:Z)`(Zs n) = (Zs m)`->`n = m` Zsimpl_plus_l: (n,m,p:Z)`n+m = n+p`->`m = p` Zplus_unit_left: (n,m:Z)`n+0 = m`->`n = m` Zplus_unit_right: (n,m:Z)`n = m+0`->`n = m` >> *) (** Lemmas ending by Zgt *) (** << Zsimpl_gt_plus_l: (n,m,p:Z)`p+n > p+m`->`n > m` Zsimpl_gt_plus_r: (n,m,p:Z)`n+p > m+p`->`n > m` Zgt_S_n: (n,p:Z)`(Zs p) > (Zs n)`->`p > n` >> *) (** Lemmas ending by Zlt *) (** << Zsimpl_lt_plus_l: (n,m,p:Z)`p+n < p+m`->`n < m` Zsimpl_lt_plus_r: (n,m,p:Z)`n+p < m+p`->`n < m` Zlt_S_n: (n,m:Z)`(Zs n) < (Zs m)`->`n < m` >> *) (** Lemmas ending by Zle *) (** << Zsimpl_le_plus_l: (p,n,m:Z)`p+n <= p+m`->`n <= m` Zsimpl_le_plus_r: (p,n,m:Z)`n+p <= m+p`->`n <= m` Zle_S_n: (n,m:Z)`(Zs m) <= (Zs n)`->`m <= n` >> *) (** ** Bottom-up irreversible (syntactic) simplification *) (** Lemmas ending by Zle *) (** << Zle_trans_S: (n,m:Z)`(Zs n) <= m`->`n <= m` >> *) (** ** Other unclearly simplifying lemmas *) (** Lemmas ending by Zeq *) (** << Zmult_eq: (x,y:Z)`x <> 0`->`y*x = 0`->`y = 0` >> *) (* Lemmas ending by Zgt *) (** << Zmult_gt: (x,y:Z)`x > 0`->`x*y > 0`->`y > 0` >> *) (* Lemmas ending by Zlt *) (** << pZmult_lt: (x,y:Z)`x > 0`->`0 < y*x`->`0 < y` >> *) (* Lemmas ending by Zle *) (** << Zmult_le: (x,y:Z)`x > 0`->`0 <= y*x`->`0 <= y` OMEGA1: (x,y:Z)`x = y`->`0 <= x`->`0 <= y` >> *) (**********************************************************************) (** * Irreversible lemmas with meta-variables *) (** To be used by EAuto *) (* Hints Immediate *) (** Lemmas ending by eq *) (** << Zle_antisym: (n,m:Z)`n <= m`->`m <= n`->`n = m` >> *) (** Lemmas ending by Zge *) (** << Zge_trans: (n,m,p:Z)`n >= m`->`m >= p`->`n >= p` >> *) (** Lemmas ending by Zgt *) (** << Zgt_trans: (n,m,p:Z)`n > m`->`m > p`->`n > p` Zgt_trans_S: (n,m,p:Z)`(Zs n) > m`->`m > p`->`n > p` Zle_gt_trans: (n,m,p:Z)`m <= n`->`m > p`->`n > p` Zgt_le_trans: (n,m,p:Z)`n > m`->`p <= m`->`n > p` >> *) (** Lemmas ending by Zlt *) (** << Zlt_trans: (n,m,p:Z)`n < m`->`m < p`->`n < p` Zlt_le_trans: (n,m,p:Z)`n < m`->`m <= p`->`n < p` Zle_lt_trans: (n,m,p:Z)`n <= m`->`m < p`->`n < p` >> *) (** Lemmas ending by Zle *) (** << Zle_trans: (n,m,p:Z)`n <= m`->`m <= p`->`n <= p` >> *) (**********************************************************************) (** * Unclear or too specific lemmas *) (** Not to be used ? *) (** ** Irreversible and too specific (not enough regular) *) (** Lemmas ending by Zle *) (** << Zle_mult: (x,y:Z)`x > 0`->`0 <= y`->`0 <= y*x` Zle_mult_approx: (x,y,z:Z)`x > 0`->`z > 0`->`0 <= y`->`0 <= y*x+z` OMEGA6: (x,y,z:Z)`0 <= x`->`y = 0`->`0 <= x+y*z` OMEGA7: (x,y,z,t:Z)`z > 0`->`t > 0`->`0 <= x`->`0 <= y`->`0 <= x*z+y*t` >> *) (** ** Expansion and too specific ? *) (** Lemmas ending by Zge *) (** << Zge_mult_simpl: (a,b,c:Z)`c > 0`->`a*c >= b*c`->`a >= b` >> *) (** Lemmas ending by Zgt *) (** << Zgt_mult_simpl: (a,b,c:Z)`c > 0`->`a*c > b*c`->`a > b` Zgt_square_simpl: (x,y:Z)`x >= 0`->`y >= 0`->`x*x > y*y`->`x > y` >> *) (** Lemmas ending by Zle *) (** << Zle_mult_simpl: (a,b,c:Z)`c > 0`->`a*c <= b*c`->`a <= b` Zmult_le_approx: (x,y,z:Z)`x > 0`->`x > z`->`0 <= y*x+z`->`0 <= y` >> *) (** ** Reversible but too specific ? *) (** Lemmas ending by Zlt *) (** << Zlt_minus: (n,m:Z)`0 < m`->`n-m < n` >> *) (**********************************************************************) (** * Lemmas to be used as rewrite rules *) (** but can also be used as hints *) (** Left-to-right simplification lemmas (a symbol disappears) *) (** << Zcompare_n_S: (n,m:Z)(Zcompare (Zs n) (Zs m))=(Zcompare n m) Zmin_n_n: (n:Z)`(Zmin n n) = n` Zmult_1_n: (n:Z)`1*n = n` Zmult_n_1: (n:Z)`n*1 = n` Zminus_plus: (n,m:Z)`n+m-n = m` Zle_plus_minus: (n,m:Z)`n+(m-n) = m` Zopp_Zopp: (x:Z)`(-(-x)) = x` Zero_left: (x:Z)`0+x = x` Zero_right: (x:Z)`x+0 = x` Zplus_inverse_r: (x:Z)`x+(-x) = 0` Zplus_inverse_l: (x:Z)`(-x)+x = 0` Zopp_intro: (x,y:Z)`(-x) = (-y)`->`x = y` Zmult_one: (x:Z)`1*x = x` Zero_mult_left: (x:Z)`0*x = 0` Zero_mult_right: (x:Z)`x*0 = 0` Zmult_Zopp_Zopp: (x,y:Z)`(-x)*(-y) = x*y` >> *) (** Right-to-left simplification lemmas (a symbol disappears) *) (** << Zpred_Sn: (m:Z)`m = (Zpred (Zs m))` Zs_pred: (n:Z)`n = (Zs (Zpred n))` Zplus_n_O: (n:Z)`n = n+0` Zmult_n_O: (n:Z)`0 = n*0` Zminus_n_O: (n:Z)`n = n-0` Zminus_n_n: (n:Z)`0 = n-n` Zred_factor6: (x:Z)`x = x+0` Zred_factor0: (x:Z)`x = x*1` >> *) (** Unclear orientation (no symbol disappears) *) (** << Zplus_n_Sm: (n,m:Z)`(Zs (n+m)) = n+(Zs m)` Zmult_n_Sm: (n,m:Z)`n*m+n = n*(Zs m)` Zmin_SS: (n,m:Z)`(Zs (Zmin n m)) = (Zmin (Zs n) (Zs m))` Zplus_assoc_l: (n,m,p:Z)`n+(m+p) = n+m+p` Zplus_assoc_r: (n,m,p:Z)`n+m+p = n+(m+p)` Zplus_permute: (n,m,p:Z)`n+(m+p) = m+(n+p)` Zplus_Snm_nSm: (n,m:Z)`(Zs n)+m = n+(Zs m)` Zminus_plus_simpl: (n,m,p:Z)`n-m = p+n-(p+m)` Zminus_Sn_m: (n,m:Z)`(Zs (n-m)) = (Zs n)-m` Zmult_plus_distr_l: (n,m,p:Z)`(n+m)*p = n*p+m*p` Zmult_minus_distr: (n,m,p:Z)`(n-m)*p = n*p-m*p` Zmult_assoc_r: (n,m,p:Z)`n*m*p = n*(m*p)` Zmult_assoc_l: (n,m,p:Z)`n*(m*p) = n*m*p` Zmult_permute: (n,m,p:Z)`n*(m*p) = m*(n*p)` Zmult_Sm_n: (n,m:Z)`n*m+m = (Zs n)*m` Zmult_Zplus_distr: (x,y,z:Z)`x*(y+z) = x*y+x*z` Zmult_plus_distr: (n,m,p:Z)`(n+m)*p = n*p+m*p` Zopp_Zplus: (x,y:Z)`(-(x+y)) = (-x)+(-y)` Zplus_sym: (x,y:Z)`x+y = y+x` Zplus_assoc: (x,y,z:Z)`x+(y+z) = x+y+z` Zmult_sym: (x,y:Z)`x*y = y*x` Zmult_assoc: (x,y,z:Z)`x*(y*z) = x*y*z` Zopp_Zmult: (x,y:Z)`(-x)*y = (-(x*y))` Zplus_S_n: (x,y:Z)`(Zs x)+y = (Zs (x+y))` Zopp_one: (x:Z)`(-x) = x*(-1)` Zopp_Zmult_r: (x,y:Z)`(-(x*y)) = x*(-y)` Zmult_Zopp_left: (x,y:Z)`(-x)*y = x*(-y)` Zopp_Zmult_l: (x,y:Z)`(-(x*y)) = (-x)*y` Zred_factor1: (x:Z)`x+x = x*2` Zred_factor2: (x,y:Z)`x+x*y = x*(1+y)` Zred_factor3: (x,y:Z)`x*y+x = x*(1+y)` Zred_factor4: (x,y,z:Z)`x*y+x*z = x*(y+z)` Zminus_Zplus_compatible: (x,y,n:Z)`x+n-(y+n) = x-y` Zmin_plus: (x,y,n:Z)`(Zmin (x+n) (y+n)) = (Zmin x y)+n` >> *) (** nat <-> Z *) (** << inj_S: (y:nat)`(inject_nat (S y)) = (Zs (inject_nat y))` inj_plus: (x,y:nat)`(inject_nat (plus x y)) = (inject_nat x)+(inject_nat y)` inj_mult: (x,y:nat)`(inject_nat (mult x y)) = (inject_nat x)*(inject_nat y)` inj_minus1: (x,y:nat)(le y x)->`(inject_nat (minus x y)) = (inject_nat x)-(inject_nat y)` inj_minus2: (x,y:nat)(gt y x)->`(inject_nat (minus x y)) = 0` >> *) (** Too specific ? *) (** << Zred_factor5: (x,y:Z)`x*0+y = y` >> *) coq-8.4pl2/dev/doc/minicoq.tex0000640000175000001440000000560306765236016015352 0ustar notinusers\documentclass{article} \usepackage{fullpage} \input{./macros.tex} \newcommand{\minicoq}{\textsf{minicoq}} \newcommand{\nonterm}[1]{\textit{#1}} \newcommand{\terminal}[1]{\textsf{#1}} \newcommand{\listzero}{\textit{LIST$_0$}} \newcommand{\listun}{\textit{LIST$_1$}} \newcommand{\sep}{\textit{SEP}} \title{Minicoq: a type-checker for the pure \\ Calculus of Inductive Constructions} \begin{document} \maketitle \section{Introduction} \minicoq\ is a minimal toplevel for the \Coq\ kernel. \section{Grammar of terms} The grammar of \minicoq's terms is given in Figure~\ref{fig:terms}. \begin{figure}[htbp] \hrulefill \begin{center} \begin{tabular}{lrl} term & ::= & identifier \\ & $|$ & \terminal{Rel} integer \\ & $|$ & \terminal{Set} \\ & $|$ & \terminal{Prop} \\ & $|$ & \terminal{Type} \\ & $|$ & \terminal{Const} identifier \\ & $|$ & \terminal{Ind} identifier integer \\ & $|$ & \terminal{Construct} identifier integer integer \\ & $|$ & \terminal{[} name \terminal{:} term \terminal{]} term \\ & $|$ & \terminal{(} name \terminal{:} term \terminal{)} term \\ & $|$ & term \verb!->! term \\ & $|$ & \terminal{(} \listun\ term \terminal{)} \\ & $|$ & \terminal{(} term \terminal{::} term \terminal{)} \\ & $|$ & \verb!! \terminal{Case} term \terminal{of} \listzero\ term \terminal{end} \\[1em] name & ::= & \verb!_! \\ & $|$ & identifier \end{tabular} \end{center} \hrulefill \caption{Grammar of terms} \label{fig:terms} \end{figure} \section{Commands} The grammar of \minicoq's commands are given in Figure~\ref{fig:commands}. All commands end with a dot. \begin{figure}[htbp] \hrulefill \begin{center} \begin{tabular}{lrl} command & ::= & \terminal{Definition} identifier \terminal{:=} term. \\ & $|$ & \terminal{Definition} identifier \terminal{:} term \terminal{:=} term. \\ & $|$ & \terminal{Parameter} identifier \terminal{:} term. \\ & $|$ & \terminal{Variable} identifier \terminal{:} term. \\ & $|$ & \terminal{Inductive} \terminal{[} \listzero\ param \terminal{]} \listun\ inductive \sep\ \terminal{with}. \\ & $|$ & \terminal{Check} term. \\[1em] param & ::= & identifier \\[1em] inductive & ::= & identifier \terminal{:} term \terminal{:=} \listzero\ constructor \sep\ \terminal{$|$} \\[1em] constructor & ::= & identifier \terminal{:} term \end{tabular} \end{center} \hrulefill \caption{Commands} \label{fig:commands} \end{figure} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: coq-8.4pl2/dev/doc/build-system.txt0000640000175000001440000003240711343756546016360 0ustar notinusersSince July 2007, Coq features a build system overhauled by Pierre Corbineau and Lionel Elie Mamane. --------------------------------------------------------------------- WARNING: In March 2010 this build system has been heavily adapted by Pierre Letouzey. In particular there no more explicit stage1,2. Stage3 was removed some time ago when coqdep was splitted into coqdep_boot and full coqdep. Ideas are still similar to what is describe below, but: 1) .ml4 are explicitely turned into .ml files, which stay after build 2) we let "make" handle the inclusion of .d without trying to guess what could be done at what time. Some initial inclusions hence _fail_, but "make" tries again later and succeed. TODO: remove obsolete sections below and better describe the new approach ----------------------------------------------------------------------- This file documents what a Coq developer needs to know about the build system. If you want to enhance the build system itself (or are curious about its implementation details), see build-system.dev.txt . The build system is not at its optimal state, see TODO section. FAQ: special features used in this Makefile ------------------------------------------- * Order-only dependencies: | Dependencies placed after a bar (|) should be built before the current rule, but having one of them is out-of-date do not trigger a rebuild of the current rule. See http://www.gnu.org/software/make/manual/make.htmlPrerequisite-Types * Annotation before commands: +/-/@ a command starting by - is always successful (errors are ignored) a command starting by + is runned even if option -n is given to make a command starting by @ is not echoed before being runned * Custom functions Definition via "define foo" followed by commands (arg is $(1) etc) Call via "$(call foo,arg1)" * Useful builtin functions $(subst ...), $(patsubst ...), $(shell ...), $(foreach ...), $(if ...) * Behavior of -include If the file given to -include doesn't exist, make tries to build it, but doesn't care if this build fails. This can be quite surprising, see in particular the -include in Makefile.stage* Stages in build system ---------------------- The build system is separated into three stages, corresponding to the tool(s) necessary to compute the dependencies necessary at this stage: stage1: ocamldep, sed, camlp4 without Coq extensions stage2: camlp4 with grammar.cma and/or q_constr.cmo stage3: coqdep (.vo) The file "Makefile" itself serves as minimum stage for targets that should not need any dependency (such as *clean*). Changes (for old-timers) ------------------------ The contents of the old Makefile has been mostly split into: - variable declarations for file lists in Makefile.common. These declarations are now static (for faster Makefile execution), so their definitions are order-dependent. - actual building rules and compiler flags variables in Makefile.build The handling of globals is now: the globals of FOO.v are in FOO.glob and the global glob.dump is created by concatenation of all .glob files. In particular, .glob files are now always created. See also section "cleaning targets" Reducing build system overhead ------------------------------ When you are actively working on a file in a "make a change, make to test, make a change, make to test", etc mode, here are a few tips to save precious time: - Always ask for what you want directly (e.g. bin/coqtop, foo/bar.cmo, ...), don't do "make world" and interrupt it when it has done what you want. This will try to minimise the stage at which what you ask for is done (instead of maximising it in order to maximise parallelism of the build process). For example, if you only want to test whether bin/coqtop still builds (and eventually start it to test your bugfix or new feature), don't do "make world" and interrupt it when bin/coqtop is built. Use "make bin/coqtop" or "make coqbinaries" or something like that. This will avoid entering the stage 3, and cut build system overhead by 50% (1.2s instead of 2.4 on writer's machine). - You can turn off rebuilding of the standard library each time bin/coqtop is rebuilt with NO_RECOMPILE_LIB=1. - If you want to avoid all .ml4 files being recompiled only because grammar.cma was rebuilt, do "make ml4depclean" once and then use NO_RECOMPILE_ML4=1. - The CM_STAGE1=1 option to make will build all .cm* files mentioned as targets on the command line in stage1. Whether this will work is your responsibility. It should work for .ml files that don't depend (nor directly nor indirectly through transitive closure of the dependencies) on any .ml4 file, or where those dependencies can be safely ignored in the current situation (e.g. all these .ml4 files don't need to be recompiled). This will avoid entering the stage2 (a reduction of 33% in overhead, 0.4s on the writer's machine). - To jump directly into a stage (e.g. because you know nothing is to be done in stage 1 or (1 and 2) or because you know that the target you give can be, in this situation, done in a lower stage than the build system dares to), use GOTO_STAGE=n. This will jump into stage n and try to do the targets you gave in that stage. - To disable all dependency recalculation, use the NO_RECALC_DEPS=1 option. It disables REcalculation of dependencies, not calculation of dependencies. In other words, if a .d file does not exist, it is still created, but it is not updated every time the source file (e.g. .ml) is changed. General speed improvements: - When building both the native and bytecode versions, the KEEP_ML4_PREPROCESSED=1 option may reduce global compilation time by running camlp4o only once on every .ml4 file, at the expense of readability of compilation error messages for .ml4 files. Dependencies ------------ There are no dependencies in the archive anymore, they are always bootstrapped. The dependencies of a file FOO are in FOO.d . This enables partial recalculation of dependencies (only the dependencies of changed files are recomputed). If you add a dependency to a Coq camlp4 extension (grammar.cma or q_constr.cmo), then see sections ".ml4 files" and "new files". Cleaning Targets ---------------- Targets for cleaning various parts: - distclean: clean everything; must leave only what should end up in distribution tarball and/or is in a svn checkout. - clean: clean everything except effect of "./configure" and documentation. - cleanconfig: clean effect of "./configure" only - archclean: remove all architecture-dependent generated files - indepclean: remove all architecture-independent generated files (not documentation) - objclean: clean all generated files, but not Makefile meta-data (e.g. dependencies), nor debugging/development information nor other cruft (e.g. editor backup files), nor documentation - docclean: clean documentation .ml4 files ---------- The camlp4-preprocessed version of FOO.ml4 is FOO.ml4-preprocessed and can be obtained with: make FOO.ml4-preprocessed If a .ml4 file uses a grammar extension from Coq (such as grammar.cma or q_constr.cmo), it must contain a line like: (*i camlp4deps: "grammar.cma q_constr.cmo" i*) If it uses a standard grammar extension, it must contain a line like: (*i camlp4use: "pa_ifdef.cmo" i*) It can naturally contain both a camlp4deps and a camlp4use line. Both are used for preprocessing. It is thus _not_ necessary to add a specific rule for a .ml4 file in the Makefile.build just because it uses grammar extensions. By default, the build system is geared towards development that may use the Coq grammar extensions, but not development of Coq's grammar extensions themselves. This means that .ml4 files are compiled directly (using ocamlc/opt's -pp option), without use of an intermediary .ml (or .ml4-preprocessed) file. This is so that if a compilation error occurs, the location in the error message is a location in the .ml4 file. If you are modifying the grammar extensions, you may be more interested in the location of the error in the .ml4-preprocessed file, so that you can see what your new grammar extension made wrong. In that case, use the KEEP_ML4_PREPROCESSED=1 option. This will make compilation of a .ml4 file a two-stage process: 1) create the .ml4-preprocessed file with camlp4o 2) compile it with straight ocamlc/opt without preprocessor and will instruct make not to delete .ml4-preprocessed files automatically just because they are intermediary files, so that you can inspect them. If you add a _new_ grammar extension to Coq: - if it can be built at stage1, that is the .ml4 file does not use a Coq grammar extension itself, then add it, and all .cmo files it needs to STAGE1_TARGETS and STAGE_ML4 in Makefile.common. See the handling of grammar.cma and q_constr.cmo for an example. - if it cannot be built at stage1, that is the .ml4 file itself needs to be preprocessed with a Coq camlp4 grammar extension, then, congratulations, you need to add a new stage between stage1 and stage2. New files --------- For a new file, in most cases, you just have to add it to the proper file list(s) in Makefile.common, such as ARITHVO or TACTICS. The list of all ml4 files is not handled manually anymore. Exceptions are: - The file is necessary at stage1, that it is necessary to build the Coq camlp4 grammar extensions. In this case, make sure it ends up in STAGE1_CMO and (for .ml4 files) STAGE1_ML4. See the handling of grammar.cma and/or q_constr.cmo for an example. - if the file needs to be compiled with -rectypes, add it to RECTYPESML in Makefile.common. If it is a .ml4 file, implement RECTYPESML4 or '(*i ocamlflags i*)'; see TODO. - the file needs a specific Makefile entry; add it to Makefile.build - the files produced from the added file do not match an existing pattern or entry in "Makefile". (All the common cases of .ml{,i,l,y,4}, .v, .c, ... files that produces (respectively) .cm[iox], .vo, .glob, .o, ... files with the same basename are already covered.) In this case, see section "New targets". New targets ----------- If you want to add: - a new PHONY target to the build system, that is a target that is not the name of the file it creates, - a normal target is not already mapped to a stage by "Makefile" then: - add the necessary rule to Makefile.build, if any - add the target to STAGEn_TARGETS, with n being the smallest stage it can be built at, that is: * 1 for OCaml code that doesn't use any Coq camlp4 grammar extension * 2 for OCaml code that uses (directly or indirectly) a Coq camlp4 grammar extension. Indirectly means a dependency of it does. * 3 for Coq (.v) code. *or* add a pattern matching the target to the pattern lists for the smallest stage it can be built at in "Makefile". TODO ---- delegate pa_extend.cmo to camlp4use statements and remove it from standard camlp4 options. maybe manage compilation flags (such as -rectypes or the CoqIDE ones) from a (*i ocamlflags: "-rectypes" i*) statement in the .ml(4) files themselves, like camlp4use. The CoqIDE files could have (*i ocamlflags: "${COQIDEFLAGS}" i*) and COQIDEFLAGS is still defined (and exported by) the Makefile.build. Clean up doc/Makefile config/Makefile looks like it contains a lot of unused variables, clean that up (are any maybe used by nightly scripts on pauillac?). Also, the COQTOP variable from config/Makefile (and used in contribs) has a very poorly chosen name, because "coqtop" is the name of a Coq executable! In the coq Makefiles, $(COQTOPEXE) is used to refer to that executable. Promote the granular .glob handling to official way of doing things for Coq developments, that is implement it in coq_makefile and the contribs. Here are a few hints: >> Les fichiers de constantes produits par -dump-glob sont maintenant >> produits par fichier et sont ensuite concaténés dans >> glob.dump. Ilsont produits par défaut (avec les bonnes >> dépendances). > C'est une chose que l'on voulait faire aussi. (J'ai testé et débogué ce concept sur CoRN dans les derniers mois.) > Est-ce que vous sauriez modifier coq_makefile pour qu'il procède de > la même façon Dans cette optique, il serait alors plus propre de changer coqdep pour qu'il produise directement l'output que nous mettons maintenant dans les .v.d (qui est celui de coqdoc post-processé avec sed). Si cette manière de gérer les glob devient le standard béni officiellement par "the Coq development team", ne voudrions nous pas changer coqc pour qu'il produise FOO.glob lors de la compilation de FOO.v par défaut (sans argument "-dump-glob")? > et que la production de a.html par coqdoc n'ait une dépendance qu'en > les a.v et a.glob correspondant ? Je crois que coqdoc exige un glob-dump unique, il convient donc de concaténer les .glob correspondants. Soit un glob-dump global par projet (par Makefile), soit un glob-dump global par .v(o), qui contient son .glob et ceux de tous les .v(o) atteignables par le graphe des dépendances. CoRN contient déjà un outil de calcul de partie atteignable du graphe des dépendances (il y est pour un autre usage, pour calculer les .v à mettre dans les différents tarballs sur http://corn.cs.ru.nl/download.html; les parties partielles sont définies par liste de fichiers .v + toutes leurs dépendances (in)directes), il serait alors adéquat de le mettre dans les tools de Coq. coq-8.4pl2/dev/doc/newsyntax.tex0000640000175000001440000005777107573144356015773 0ustar notinusers %% -*-french-tex-*- \documentclass{article} \usepackage{verbatim} \usepackage[T1]{fontenc} \usepackage[latin1]{inputenc} \usepackage[french]{babel} \usepackage{amsmath} \usepackage{amssymb} \usepackage{array} \author{B.~Barras} \title{Proposition de syntaxe pour Coq} %% Le _ est un caractre normal \catcode`\_=13 \let\subscr=_ \def_{\ifmmode\sb\else\subscr\fi} %% Macros pour les grammaires \def\NT#1{\langle\textit{#1}\rangle} \def\TERM#1{\textsf{#1}} \def\STAR#1{#1\!*} \def\PLUS#1{#1\!+} %% Tableaux de definition de non-terminaux \newenvironment{cadre} {\begin{array}{|c|}\hline\\} {\\\\\hline\end{array}} \newenvironment{rulebox} {$$\begin{cadre}\begin{array}{r@{~}c@{~}l@{}r}} {\end{array}\end{cadre}$$} \def\DEFNT#1{\NT{#1} & ::= &} \def\EXTNT#1{\NT{#1} & ::= & ... \\&|&} \def\RNAME#1{(\textsc{#1})} \def\SEPDEF{\\\\} \def\nlsep{\\&|&} \begin{document} \maketitle \section{Grammaire des tactiques} \label{tacticsyntax} La rflexion de la rnovation de la syntaxe des tactiques n'est pas encore aussi pousse que pour les termes (section~\ref{constrsyntax}), mais cette section vise noncer les quelques principes que l'on souhaite suivre. \begin{itemize} \item Rutiliser les mots-cls de la syntaxe des termes (i.e. en minuscules) pour les constructions similaires de tactiques (let_in, match, and, etc.). Le connecteur logique \texttt{and} n'tant que rarement utilis autrement que sous la forme \texttt{$\wedge$} (sauf dans le code ML), on pourrait dgager ce mot-cl. \item Les arguments passs aux tactiques sont principalement des termes, on prconise l'utilisation d'un symbole spcial (par exemple l'apostrophe) pour passer une tactique ou une expression (AST). L'ide tant que l'on crit plus souvent des tactiques prenant des termes en argument que des tacticals. \end{itemize} \begin{figure} \begin{rulebox} \DEFNT{tactic} \NT{tactic} ~\TERM{\&} ~\NT{tactic} & \RNAME{then} \nlsep \TERM{[} ~\NT{tactic}~\TERM{|}~... ~\TERM{|}~\NT{tactic}~\TERM{]} & \RNAME{par} \nlsep \NT{ident} ~\STAR{\NT{tactic-arg}} ~~~ & \RNAME{apply} \nlsep \TERM{fun} ~.... & \RNAME{function} \nlsep \NT{simple-tactic} \SEPDEF \DEFNT{tactic-arg} \NT{constr} \nlsep \TERM{'} ~\NT{tactic} \SEPDEF \DEFNT{simple-tactic} \TERM{Apply} ~\NT{binding-term} \nlsep \NT{elim-kw} ~\NT{binding-term} \nlsep \NT{elim-kw} ~\NT{binding-term} ~\TERM{using} ~\NT{binding-term} \nlsep \TERM{Intros} ~\NT{intro-pattern} \SEPDEF \DEFNT{elim-kw} \TERM{Elim} ~\mid~ \TERM{Case} ~\mid~ \TERM{Induction} ~\mid~ \TERM{Destruct} \end{rulebox} \caption{Grammaire des tactiques} \label{tactic} \end{figure} \subsection{Arguments de tactiques} La syntaxe actuelle des arguments de tactiques est que l'on parse par dfaut une expression de tactique, ou bien l'on parse un terme si celui-ci est prfix par \TERM{'} (sauf dans le cas des variables). Cela est gnant pour les utilisateurs qui doivent crire des \TERM{'} pour leurs tactiques. mon avis, il n'est pas souhaitable pour l'utilisateur de l'obliger marquer une diffrence entre les tactiques ``primitives'' (en fait ``systme'') et les tactiques dfinies par Ltac. En effet, on se dirige invitablement vers une situation o il existera des librairies de tactiques et il va devenir difficile de savoir facilement s'il faut ou non mettre des \TERM{'}. \subsection{Bindings} Dans un premier temps, les ``bindings'' sont toujours considrs comme une construction du langage des tactiques, mais il est intressant de prvoir l'extension de ce procd aux termes, puisqu'il s'agit simplement de construire un n{\oe}ud d'application dans lequel on donne les arguments par nom ou par position, les autres restant infrer. Le principal point est de trouver comment combiner de manire uniforme ce procd avec les arguments implicites. Il est toutefois important de rflchir ds maintenant une syntaxe pour viter de rechanger encore la syntaxe. Intgrer la notation \TERM{with} aux termes peut poser des problmes puisque ce mot-cl est utilis pour le filtrage: comment parser (en LL(1)) l'expression: \begin{verbatim} Cases x with y ... \end{verbatim} Soit on trouve un autre mot-cl, soit on joue avec les niveaus de priorit en obligeant a parenthser le \TERM{with} des ``bindings'': \begin{verbatim} Cases (x with y) with (C z) => ... \end{verbatim} ce qui introduit un constructeur moralement quivalent une application situ une priorit totalement diffrente (les ``bindings'' seraient au plus haut niveau alors que l'application est un niveau bas). \begin{figure} \begin{rulebox} \DEFNT{binding-term} \NT{constr} ~\TERM{with} ~\STAR{\NT{binding}} \SEPDEF \DEFNT{binding} \NT{constr} \end{rulebox} \caption{Grammaire des bindings} \label{bindings} \end{figure} \subsection{Enregistrements} Il faudrait amnager la syntaxe des enregistrements dans l'optique d'avoir des enregistrements anonymes (termes de premire classe), mme si pour l'instant, on ne dispose que d'enregistrements dfinis a toplevel. Exemple de syntaxe pour les types d'enregistrements: \begin{verbatim} { x1 : A1; x2 : A2(x1); _ : T; (* Pas de projection disponible *) y; (* Type infere *) ... (* ; optionnel pour le dernier champ *) } \end{verbatim} Exemple de syntaxe pour le constructeur: \begin{verbatim} { x1 = O; x2 : A2(x1) = v1; _ = v2; ... } \end{verbatim} Quant aux dpendences, une convention pourrait tre de considrer les champs non annots par le type comme non dpendants. Plusieurs interrogations: \begin{itemize} \item l'ordre des champs doit-il tre respect ? sinon, que faire pour les champs sans projection ? \item autorise-t-on \texttt{v1} a mentionner \texttt{x1} (comme dans la dfinition d'un module), ce qui se comporterait comme si on avait crit \texttt{v1} la place. Cela pourrait tre une autre manire de dclarer les dpendences \end{itemize} La notation pointe pour les projections pose un problme de parsing, sauf si l'on a une convention lexicale qui discrimine les noms de modules des projections et identificateurs: \texttt{x.y.z} peut tre compris comme \texttt{(x.y).z} ou texttt{x.(y.z)}. \section{Grammaire des termes} \label{constrsyntax} \subsection{Quelques principes} \begin{enumerate} \item Diminuer le nombre de niveaux de priorit en regroupant les rgles qui se ressemblent: infixes, prfixes, lieurs (constructions ouvertes droite), etc. \item viter de surcharger la signification d'un symbole (ex: \verb+( )+ comme parenthsage et produit dans la V7). \item Faire en sorte que les membres gauches (motifs de Cases, lieurs d'abstraction ou de produits) utilisent une syntaxe compatible avec celle des membres droits (branches de Cases et corps de fonction). \end{enumerate} \subsection{Prsentation de la grammaire} \begin{figure} \begin{rulebox} \DEFNT{paren-constr} \NT{cast-constr}~\TERM{,}~\NT{paren-constr} &\RNAME{pair} \nlsep \NT{cast-constr} \SEPDEF \DEFNT{cast-constr} \NT{constr}~\TERM{\!\!:}~\NT{cast-constr} &\RNAME{cast} \nlsep \NT{constr} \SEPDEF \DEFNT{constr} \NT{appl-constr}~\NT{infix}~\NT{constr} &\RNAME{infix} \nlsep \NT{prefix}~\NT{constr} &\RNAME{prefix} \nlsep \NT{constr}~\NT{postfix} &\RNAME{postfix} \nlsep \NT{appl-constr} \SEPDEF \DEFNT{appl-constr} \NT{appl-constr}~\PLUS{\NT{appl-arg}} &\RNAME{apply} \nlsep \TERM{@}~\NT{global}~\PLUS{\NT{simple-constr}} &\RNAME{expl-apply} \nlsep \NT{simple-constr} \SEPDEF \DEFNT{appl-arg} \TERM{@}~\NT{int}~\TERM{\!:=}~\NT{simple-constr} &\RNAME{impl-arg} \nlsep \NT{simple-constr} \SEPDEF \DEFNT{simple-constr} \NT{atomic-constr} \nlsep \TERM{(}~\NT{paren-constr}~\TERM{)} \nlsep \NT{match-constr} \nlsep \NT{fix-constr} %% \nlsep \TERM{<\!\!:ast\!\!:<}~\NT{ast}~\TERM{>\!>} &\RNAME{quotation} \end{rulebox} \caption{Grammaire des termes} \label{constr} \end{figure} \begin{figure} \begin{rulebox} \DEFNT{prefix} \TERM{!}~\PLUS{\NT{binder}}~\TERM{.}~ &\RNAME{prod} \nlsep \TERM{fun} ~\PLUS{\NT{binder}} ~\TERM{$\Rightarrow$} &\RNAME{lambda} \nlsep \TERM{let}~\NT{ident}~\STAR{\NT{binder}} ~\TERM{=}~\NT{constr} ~\TERM{in} &\RNAME{let} %\nlsep \TERM{let (}~\NT{comma-ident-list}~\TERM{) =}~\NT{constr} % ~\TERM{in} &~~~\RNAME{let-case} \nlsep \TERM{if}~\NT{constr}~\TERM{then}~\NT{constr}~\TERM{else} &\RNAME{if-case} \nlsep \TERM{eval}~\NT{red-fun}~\TERM{in} &\RNAME{eval} \SEPDEF \DEFNT{infix} \TERM{$\rightarrow$} & \RNAME{impl} \SEPDEF \DEFNT{atomic-constr} \TERM{_} \nlsep \TERM{?}\NT{int} \nlsep \NT{sort} \nlsep \NT{global} \SEPDEF \DEFNT{binder} \NT{ident} &\RNAME{infer} \nlsep \TERM{(}~\NT{ident}~\NT{type}~\TERM{)} &\RNAME{binder} \SEPDEF \DEFNT{type} \TERM{\!:}~\NT{constr} \nlsep \epsilon \end{rulebox} \caption{Grammaires annexes aux termes} \label{gram-annexes} \end{figure} La grammaire des termes (correspondant l'tat \texttt{barestate}) est dcrite figures~\ref{constr} et~\ref{gram-annexes}. On constate par rapport aux prcdentes versions de Coq d'importants changements de priorit, le plus marquant tant celui de l'application qui se trouve dsormais juste au dessus\footnote{La convention est de considrer les oprateurs moins lieurs comme ``au dessus'', c'est--dire ayant un niveau de priorit plus lv (comme c'est le cas avec le niveau de la grammaire actuelle des termes).} des constructions fermes gauche et droite. La grammaire des noms globaux est la suivante: \begin{eqnarray*} \DEFNT{global} \NT{ident} %% \nlsep \TERM{\$}\NT{ident} \nlsep \NT{ident}\TERM{.}\NT{global} \end{eqnarray*} Le $\TERM{_}$ dnote les termes synthtiser. Les mtavariables sont reconnues au niveau du lexer pour ne pas entrer en conflit avec le $\TERM{?}$ de l'existentielle. Les oprateurs infixes ou prfixes sont tous au mme niveau de priorit du point de vue de Camlp4. La solution envisage est de les grer la manire de Yacc, avec une pile (voir discussions plus bas). Ainsi, l'implication est un infixe normal; la quantification universelle et le let sont vus comme des oprateurs prfixes avec un niveau de priorit plus haut (i.e. moins lieur). Il subsiste des problmes si l'on ne veut pas crire de parenthses dans: \begin{verbatim} A -> (!x. B -> (let y = C in D)) \end{verbatim} La solution propose est d'analyser le membre droit d'un infixe de manire autoriser les prfixes et les infixes de niveau infrieur, et d'exiger le parenthsage que pour les infixes de niveau suprieurs. En revanche, l'affichage, certains membres droits seront plus lisibles s'ils n'utilisent pas cette astuce: \begin{verbatim} (fun x => x) = fun x => x \end{verbatim} La proposition est d'autoriser ce type d'critures au parsing, mais l'afficheur crit de manire standardise en mettant quelques parenthses superflues: $\TERM{=}$ serait symtrique alors que $\rightarrow$ appellerait l'afficheur de priorit leve pour son sous-terme droit. Les priorits des oprateurs primitifs sont les suivantes (le signe $*$ signifie que pour le membre droit les oprateurs prfixes seront affichs sans parenthses quel que soit leur priorit): $$ \begin{array}{c|l} $symbole$ & $priorit$ \\ \hline \TERM{!} & 200\,R* \\ \TERM{fun} & 200\,R* \\ \TERM{let} & 200\,R* \\ \TERM{if} & 200\,R \\ \TERM{eval} & 200\,R \\ \rightarrow & 90\,R* \end{array} $$ Il y a deux points d'entre pour les termes: $\NT{constr}$ et $\NT{simple-constr}$. Le premier peut tre utilis lorsqu'il est suivi d'un sparateur particulier. Dans le cas o l'on veut une liste de termes spars par un espace, il faut lire des $\NT{simple-constr}$. Les constructions $\TERM{fix}$ et $\TERM{cofix}$ (voir aussi figure~\ref{gram-fix}) sont fermes par end pour simplifier l'analyse. Sinon, une expression de point fixe peut tre suivie par un \TERM{in} ou un \TERM{and}, ce qui pose les mmes problmes que le ``dangling else'': dans \begin{verbatim} fix f1 x {x} = fix f2 y {y} = ... and ... in ... \end{verbatim} il faut dfinir une stratgie pour associer le \TERM{and} et le \TERM{in} au bon point fixe. Un autre avantage est de faire apparaitre que le \TERM{fix} est un constructeur de terme de premire classe et pas un lieur: \begin{verbatim} fix f1 ... and f2 ... in f1 end x \end{verbatim} Les propositions prcdentes laissaient \texttt{f1} et \texttt{x} accols, ce qui est source de confusion lorsque l'on fait par exemple \texttt{Pattern (f1 x)}. Les corps de points fixes et co-points fixes sont identiques, bien que ces derniers n'aient pas d'information de dcroissance. Cela fonctionne puisque l'annotation est optionnelle. Cela prfigure des cas o l'on arrive infrer quel est l'argument qui dcroit structurellement (en particulier dans le cas o il n'y a qu'un seul argument). \begin{figure} \begin{rulebox} \DEFNT{fix-expr} \TERM{fix}~\NT{fix-decls} ~\NT{fix-select} ~\TERM{end} &\RNAME{fix} \nlsep \TERM{cofix}~\NT{cofix-decls}~\NT{fix-select} ~\TERM{end} &\RNAME{cofix} \SEPDEF \DEFNT{fix-decls} \NT{fix-decl}~\TERM{and}~\NT{fix-decls} \nlsep \NT{fix-decl} \SEPDEF \DEFNT{fix-decl} \NT{ident}~\PLUS{\NT{binder}}~\NT{type}~\NT{annot} ~\TERM{=}~\NT{constr} \SEPDEF \DEFNT{annot} \TERM{\{}~\NT{ident}~\TERM{\}} \nlsep \epsilon \SEPDEF \DEFNT{fix-select} \TERM{in}~\NT{ident} \nlsep \epsilon \end{rulebox} \caption{Grammaires annexes des points fixes} \label{gram-fix} \end{figure} La construction $\TERM{Case}$ peut-tre considre comme obsolte. Quant au $\TERM{Match}$ de la V6, il disparat purement et simplement. \begin{figure} \begin{rulebox} \DEFNT{match-expr} \TERM{match}~\NT{case-items}~\NT{case-type}~\TERM{with}~ \NT{branches}~\TERM{end} &\RNAME{match} \nlsep \TERM{match}~\NT{case-items}~\TERM{with}~ \NT{branches}~\TERM{end} &\RNAME{infer-match} %%\nlsep \TERM{case}~\NT{constr}~\NT{case-predicate}~\TERM{of}~ %% \STAR{\NT{constr}}~\TERM{end} &\RNAME{case} \SEPDEF \DEFNT{case-items} \NT{case-item} ~\TERM{\&} ~\NT{case-items} \nlsep \NT{case-item} \SEPDEF \DEFNT{case-item} \NT{constr}~\NT{pred-pattern} &\RNAME{dep-case} \nlsep \NT{constr} &\RNAME{nodep-case} \SEPDEF \DEFNT{case-type} \TERM{$\Rightarrow$}~\NT{constr} \nlsep \epsilon \SEPDEF \DEFNT{pred-pattern} \TERM{as}~\NT{ident} ~\TERM{\!:}~\NT{constr} \SEPDEF \DEFNT{branches} \TERM{|} ~\NT{patterns} ~\TERM{$\Rightarrow$} ~\NT{constr} ~\NT{branches} \nlsep \epsilon \SEPDEF \DEFNT{patterns} \NT{pattern} ~\TERM{\&} ~\NT{patterns} \nlsep \NT{pattern} \SEPDEF \DEFNT{pattern} ... \end{rulebox} \caption{Grammaires annexes du filtrage} \label{gram-match} \end{figure} De manire globale, l'introduction de dfinitions dans les termes se fait avec le symbole $=$, et le $\!:=$ est rserv aux dfinitions au niveau vernac. Il y avait un manque de cohrence dans la V6, puisque l'on utilisait $=$ pour le $\TERM{let}$ et $\!:=$ pour les points fixes et les commandes vernac. % OBSOLETE: lieurs multiples supprimes %On peut remarquer que $\NT{binder}$ est un sous-ensemble de %$\NT{simple-constr}$, l'exception de $\texttt{(a,b\!\!:T)}$: en tant %que lieur, {\tt a} et {\tt b} sont tous deux contraints, alors qu'en %tant que terme, seul {\tt b} l'est. Cela qui signifie que l'objectif %de rendre compatibles les membres gauches et droits est {\it presque} %atteint. \subsection{Infixes} \subsubsection{Infixes extensibles} Le problme de savoir si la liste des symboles pouvant apparatre en infixe est fixe ou extensible par l'utilisateur reste voir. Notons que la solution o les symboles infixes sont des identificateurs que l'on peut dfinir parat difficilement praticable: par exemple $\texttt{Logic.eq}$ n'est pas un oprateur binaire, mais ternaire. Il semble plus simple de garder des dclarations infixes qui relient un symbole infixe un terme avec deux ``trous''. Par exemple: $$\begin{array}{c|l} $infixe$ & $identificateur$ \\ \hline = & \texttt{Logic.eq _ ?1 ?2} \\ == & \texttt{JohnMajor.eq _ ?1 _ ?2} \end{array}$$ La syntaxe d'une dclaration d'infixe serait par exemple: \begin{verbatim} Infix "=" 50 := Logic.eq _ ?1 ?2; \end{verbatim} \subsubsection{Gestion des prcdences} Les infixes peuvent tre soit laiss Camlp4, ou bien (comme ici) considrer que tous les oprateurs ont la mme prcdence et grer soit mme la recomposition des termes l'aide d'une pile (comme Yacc). \subsection{Extensions de syntaxe} \subsubsection{Litraux numriques} La proposition est de considerer les litraux numriques comme de simples identificateurs. Comme il en existe une infinit, il faut un nouveau mcanisme pour leur associer une dfinition. Par exemple, en ce qui concerne \texttt{Arith}, la dfinition de $5$ serait $\texttt{S}~4$. Pour \texttt{ZArith}, $5$ serait $\texttt{xI}~2$. Comme les infixes, les constantes numriques peuvent tre qualifies pour indiquer dans quels module est le type que l'on veut rfrencer. Par exemple (si on renomme \texttt{Arith} en \texttt{N} et \texttt{ZArith} en \texttt{Z}): \verb+N.5+, \verb+Z.5+. \begin{eqnarray*} \EXTNT{global} \NT{int} \end{eqnarray*} \subsubsection{Nouveaux lieurs} $$ \begin{array}{rclr} \EXTNT{constr} \TERM{ex}~\PLUS{\NT{binder}}~\TERM{.}~\NT{constr} &\RNAME{ex} \nlsep \TERM{ex}~\PLUS{\NT{binder}}~\TERM{.}~\NT{constr}~\TERM{,}~\NT{constr} &\RNAME{ex2} \nlsep \TERM{ext}~\PLUS{\NT{binder}}~\TERM{.}~\NT{constr} &\RNAME{exT} \nlsep \TERM{ext}~\PLUS{\NT{binder}}~\TERM{.}~\NT{constr}~\TERM{,}~\NT{constr} &\RNAME{exT2} \end{array} $$ Pour l'instant l'existentielle n'admet qu'une seule variable, ce qui oblige crire des cascades de $\TERM{ex}$. Pour parser les existentielles avec deux prdicats, on peut considrer \TERM{\&} comme un infixe intermdiaire et l'oprateur existentiel en prsence de cet infixe se transforme en \texttt{ex2}. \subsubsection{Nouveaux infixes} Prcdences des oprateurs infixes (les plus grands associent moins fort): $$ \begin{array}{l|l|c|l} $identificateur$ & $module$ & $infixe/prfixe$ & $prcdence$ \\ \hline \texttt{iff} & $Logic$ & \longleftrightarrow & 100 \\ \texttt{or} & $Logic$ & \vee & 80\, R \\ \texttt{sum} & $Datatypes$ & + & 80\, R \\ \texttt{and} & $Logic$ & \wedge & 70\, R \\ \texttt{prod} & $Datatypes$ & * & 70\, R \\ \texttt{not} & $Logic$ & \tilde{} & 60\, L \\ \texttt{eq _} & $Logic$ & = & 50 \\ \texttt{eqT _} & $Logic_Type$ & = & 50 \\ \texttt{identityT _} & $Data_Type$ & = & 50 \\ \texttt{le} & $Peano$ & $<=$ & 50 \\ \texttt{lt} & $Peano$ & $<$ & 50 \\ \texttt{ge} & $Peano$ & $>=$ & 50 \\ \texttt{gt} & $Peano$ & $>$ & 50 \\ \texttt{Zle} & $zarith_aux$ & $<=$ & 50 \\ \texttt{Zlt} & $zarith_aux$ & $<$ & 50 \\ \texttt{Zge} & $zarith_aux$ & $>=$ & 50 \\ \texttt{Zgt} & $zarith_aux$ & $>$ & 50 \\ \texttt{Rle} & $Rdefinitions$ & $<=$ & 50 \\ \texttt{Rlt} & $Rdefinitions$ & $<$ & 50 \\ \texttt{Rge} & $Rdefinitions$ & $>=$ & 50 \\ \texttt{Rgt} & $Rdefinitions$ & $>$ & 50 \\ \texttt{plus} & $Peano$ & + & 40\,L \\ \texttt{Zplus} & $fast_integer$ & + & 40\,L \\ \texttt{Rplus} & $Rdefinitions$ & + & 40\,L \\ \texttt{minus} & $Minus$ & - & 40\,L \\ \texttt{Zminus} & $zarith_aux$ & - & 40\,L \\ \texttt{Rminus} & $Rdefinitions$ & - & 40\,L \\ \texttt{Zopp} & $fast_integer$ & - & 40\,L \\ \texttt{Ropp} & $Rdefinitions$ & - & 40\,L \\ \texttt{mult} & $Peano$ & * & 30\,L \\ \texttt{Zmult} & $fast_integer$ & * & 30\,L \\ \texttt{Rmult} & $Rdefinitions$ & * & 30\,L \\ \texttt{Rdiv} & $Rdefinitions$ & / & 30\,L \\ \texttt{pow} & $Rfunctions$ & \hat & 20\,L \\ \texttt{fact} & $Rfunctions$ & ! & 20\,L \\ \end{array} $$ Notons qu'il faudrait dcouper {\tt Logic_Type} en deux car celui-ci dfinit deux galits, ou alors les mettre dans des modules diffrents. \subsection{Exemples} \begin{verbatim} Definition not (A:Prop) := A->False; Inductive eq (A:Set) (x:A) : A->Prop := refl_equal : eq A x x; Inductive ex (A:Set) (P:A->Prop) : Prop := ex_intro : !x. P x -> ex A P; Lemma not_all_ex_not : !(P:U->Prop). ~(!n. P n) -> ?n. ~ P n; Fixpoint plus n m : nat {struct n} := match n with O => m | (S k) => S (plus k m) end; \end{verbatim} \subsection{Questions ouvertes} Voici les points sur lesquels la discussion est particulirement ouverte: \begin{itemize} \item choix d'autres symboles pour les quantificateurs \TERM{!} et \TERM{?}. En l'tat actuel des discussions, on garderait le \TERM{!} pour la qunatification universelle, mais on choisirait quelquechose comme \TERM{ex} pour l'existentielle, afin de ne pas suggrer trop de symtrie entre ces quantificateurs (l'un est primitif, l'autre pas). \item syntaxe particulire pour les \texttt{sig}, \texttt{sumor}, etc. \item la possibilit d'introduire plusieurs variables du mme type est pour l'instant supprime au vu des problmes de compatibilit de syntaxe entre les membres gauches et membres droits. L'ide tant que l'inference de type permet d'viter le besoin de dclarer tous les types. \end{itemize} \subsection{Autres extensions} \subsubsection{Lieur multiple} L'criture de types en prsence de polymorphisme est souvent assez pnible: \begin{verbatim} Check !(A:Set) (x:A) (B:Set) (y:B). P A x B y; \end{verbatim} On pourrait avoir des dclarations introduisant la fois un type d'une certaine sorte et une variable de ce type: \begin{verbatim} Check !(x:A:Set) (y:B:Set). P A x B y; \end{verbatim} Noter que l'on aurait pu crire: \begin{verbatim} Check !A x B y. P A (x:A:Set) B (y:B:Set); \end{verbatim} \section{Syntaxe des tactiques} \subsection{Questions diverses} Changer ``Pattern nl c ... nl c'' en ``Pattern [ nl ] c ... [ nl ] c'' pour permettre des chiffres seuls dans la catgorie syntaxique des termes. Par uniformit remplacer ``Unfold nl c'' par ``Unfold [ nl ] c'' ? Mme problme pour l'entier de Specialize (ou virer Specialize ?) ? \subsection{Questions en suspens} \verb=EAuto= : deux syntaxes diffrentes pour la recherche en largeur et en profondeur ? Quelle recherche par dfaut ? \section*{Remarques ple-mle (HH)} Autoriser la syntaxe \begin{verbatim} Variable R (a : A) (b : B) : Prop. Hypotheses H (a : A) (b : B) : Prop; Y (u : U) : V. Variables H (a : A) (b : B), J (k : K) : nat; Z (v : V) : Set. \end{verbatim} Renommer eqT, refl_eqT, eqT_ind, eqT_rect, eqT_rec en eq, refl_equal, etc. Remplacer == en =. Mettre des \verb=?x= plutot que des \verb=?1= dans les motifs de ltac ?? \section{Moulinette} \begin{itemize} \item Mettre \verb=/= et * au mme niveau dans R. \item Changer la prcdence du - unaire dans R. \item Ajouter Require Arith par necessite si Require ArithRing ou Require ZArithRing. \item Ajouter Require ZArith par necessite si Require ZArithRing ou Require Omega. \item Enlever le Export de Bool, Arith et ZARith de Ring quand inappropri et l'ajouter ct des Require Ring. \item Remplacer "Check n" par "n:Check ..." \item Renommer Variable/Hypothesis hors section en Parameter/Axiom. \item Renommer les \verb=command0=, \verb=command1=, ... \verb=lcommand= etc en \verb=constr0=, \verb=constr1=, ... \verb=lconstr=. \item Remplacer les noms Coq.omega.Omega par Coq.Omega ... \item Remplacer AddPath par Add LoadPath (ou + court) \item Unify + and \{\}+\{\} and +\{\} using Prop $\leq$ Set ?? \item Remplacer Implicit Arguments On/Off par Set/Unset Implicit Arguments. \item La syntaxe \verb=Intros (a,b)= est inutile, \verb=Intros [a b]= fait l'affaire. \item Virer \verb=Goal= sans argument (synonyme de \verb=Proof= et sans effets). \item Remplacer Save. par Qed. \item Remplacer \verb=Zmult_Zplus_distr= par \verb=Zmult_plus_distr_r= et \verb=Zmult_plus_distr= par \verb=Zmult_plus_distr_l=. \end{itemize} \end{document} coq-8.4pl2/dev/doc/universes.txt0000640000175000001440000000117211505230573015740 0ustar notinusersHow to debug universes? 1. There is a command Print Universes in Coq toplevel Print Universes. prints the graph of universes in the form of constraints Print Universes "file". produces the "file" containing universe constraints in the form univ1 # univ2 ; where # can be either > >= or = If "file" ends with .gv or .dot, the resulting file will be in dot format. *) for dot see http://www.research.att.com/sw/tools/graphviz/ 2. There is a printing option {Set,Unset} Printing Universes. which, when set, makes all pretty-printed Type's annotated with the name of the universe. coq-8.4pl2/dev/doc/versions-history.tex0000640000175000001440000002543311206234225017247 0ustar notinusers\documentclass[a4paper]{book} \usepackage{fullpage} \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{amsfonts} \newcommand{\feature}[1]{{\em #1}} \begin{document} \begin{center} \begin{huge} An history of Coq versions \end{huge} \end{center} \bigskip \centerline{\large 1984-1989: The Calculus of Constructions} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline CoC V1.10& mention of dates from 6 December & implementation language is Caml\\ & 1984 to 13 February 1985 \\ CoC V1.11& mention of dates from 6 December\\ & 1984 to 19 February 1985\\ CoC V2.13& dated 16 December 1985\\ CoC V2.13& dated 25 June 1986\\ CoC V3.1& dated 20 November 1986 & \feature{auto}\\ CoC V3.2& dated 27 November 1986\\ CoC V3.3 and V3.4& dated 1 January 1987 & creation of a directory for examples\\ CoC V4.1& dated 24 July 1987\\ CoC V4.2& dated 10 September 1987\\ CoC V4.3& dated 15 September 1987\\ CoC V4.4& dated 27 January 1988\\ CoC V4.5 and V4.5.5& dated 15 March 1988\\ CoC V4.6 and V4.7& dated 1 September 1988\\ CoC V4.8& dated 1 December 1988\\ CoC V4.8.5& dated 1 February 1989\\ CoC V4.9& dated 1 March 1989\\ CoC V4.10 and 4.10.1& dated 1 May 1989 & first public release - in English\\ \end{tabular} \bigskip \bigskip \newpage \centerline{\large 1989-now: The Calculus of Inductive Constructions} \mbox{}\\ \centerline{I- RCS archives in Caml and Caml-Light} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline Coq V5.0 & headers dated 1 January 1990 & internal use \\ & & \feature{inductive types with primitive recursor}\\ Coq V5.1 & ended 12 July 1990 & internal use \\ Coq V5.2 & log dated 4 October 1990 & internal use \\ Coq V5.3 & log dated 12 October 1990 & internal use \\ Coq V5.4 & headers dated 24 October 1990 & internal use, \feature{extraction} (version 1) [3-12-90]\\ Coq V5.5 & started 6 December 1990 & internal use \\ Coq V5.6 beta & 1991 & first announce of the new Coq based on CIC \\ & & (in May at TYPES?)\\ & & \feature{rewrite tactic}\\ & & use of RCS at least from February 1991\\ Coq V5.6& 7 August 1991 & \\ Coq V5.6 patch 1& 13 November 1991 & \\ Coq V5.6 (last) & mention of 27 November 1992\\ Coq V5.7.0& 1992 & translation to Caml-Light \footnotemark\\ Coq V5.8& 12 February 1993 & \feature{Program} (version 1), \feature{simpl}\\ & & has the xcoq graphical interface\\ & & first explicit notion of standard library\\ & & includes a MacOS 7-9 version\\ Coq V5.8.1& released 28 April 1993 & with xcoq graphical interface and MacOS 7-9 support\\ Coq V5.8.2& released 9 July 1993 & with xcoq graphical interface and MacOS 7-9 support\\ Coq V5.8.3& released 6 December 1993 % Announce on coq-club & with xcoq graphical interface and MacOS 7-9 support\\ & & 3 branches: Lyon (V5.8.x), Ulm (V5.10.x) and Rocq (V5.9)\\ Coq V5.9 alpha& 7 July 1993 & experimental version based on evars refinement \\ & & (merge from experimental ``V6.0'' and some pre-V5.8.3 \\ & & version), not released\\ & March 1994 & \feature{tauto} tactic in V5.9 branch\\ Coq V5.9 & 27 January 1993 & experimental version based on evars refinement\\ & & not released\\ \end{tabular} \bigskip \bigskip \footnotetext{archive lost?} \newpage \centerline{II- Starting with CVS archives in Caml-Light} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline Coq V5.10 ``Murthy'' & 22 January 1994 & introduction of the ``DOPN'' structure\\ & & \feature{eapply/prolog} tactics\\ & & private use of cvs on madiran.inria.fr\\ Coq V5.10.1 ``Murthy''& 15 April 1994 \\ Coq V5.10.2 ``Murthy''& 19 April 1994 & \feature{mutual inductive types, fixpoint} (from Lyon's branch)\\ Coq V5.10.3& 28 April 1994 \\ Coq V5.10.5& dated 13 May 1994 & \feature{inversion}, \feature{discriminate}, \feature{injection} \\ & & \feature{type synthesis of hidden arguments}\\ & & \feature{separate compilation}, \feature{reset mechanism} \\ Coq V5.10.6& dated 30 May 1994\\ Coq Lyon's archive & in 1994 & cvs server set up on woodstock.ens-lyon.fr\\ Coq V5.10.9& announced on 17 August 1994 & % Announced by Catherine Parent on coqdev % Version avec une copie de THEORIES pour les inductifs mutuels \\ Coq V5.10.11& announced on 2 February 1995 & \feature{compute}\\ Coq Rocq's archive & on 16 February 1995 & set up of ``V5.10'' cvs archive on pauillac.inria.fr \\ & & with first dispatch of files over src/* directories\\ Coq V5.10.12& dated 30 January 1995 & on Lyon's cvs\\ Coq V5.10.13& dated 9 June 1995 & on Lyon's cvs\\ Coq V5.10.14.OO& dated 30 June 1995 & on Lyon's cvs\\ Coq V5.10.14.a& announced 5 September 1995 & bug-fix release \\ % Announce on coq-club by BW Coq V5.10.14.b& released 2 October 1995 & bug-fix release\\ & & MS-DOS version released on 30 October 1995\\ % still available at ftp://ftp.ens-lyon.fr/pub/LIP/COQ/V5.10.14.old/ in May 2009 % also known in /net/pauillac/constr archive as ``V5.11 old'' \\ % A copy of Coq V5.10.15 dated 1 January 1996 coming from Lyon's CVS is % known in /net/pauillac/constr archive as ``V5.11 new old'' \\ Coq V5.10.15 & released 20 February 1996 & \feature{Logic, Sorting, new Sets and Relations libraries} \\ % Announce on coq-club by BW % dated 15 February 1996 and bound to pauillac's cvs in /net/pauillac/constr archive & & MacOS 7-9 version released on 1 March 1996 \\ % Announce on coq-club by BW Coq V5.11 & dated 1 March 1996 & not released, not in pauillac's CVS, \feature{eauto} \\ \end{tabular} \bigskip \bigskip \newpage \centerline{III- A CVS archive in Caml Special Light} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline Coq ``V6'' archive & 20 March 1996 & new cvs repository on pauillac.inria.fr with code ported \\ & & to Caml Special Light (to later become Objective Caml)\\ & & has implicit arguments and coercions\\ Coq V6.1beta& released 18 November 1996 & \feature{coercions} [23-5-1996], \feature{user-level implicit arguments} [23-5-1996]\\ & & \feature{omega} [10-9-1996] \\ & & \feature{natural language proof printing} (stopped from Coq V7) [6-9-1996]\\ & & \feature{pattern-matching compilation} [7-10-1996]\\ & & \feature{ring} (version 1, ACSimpl) [11-12-1996]\\ Coq V6.1& released December 1996 & \\ Coq V6.2beta& released 30 January 1998 & % Announced on coq-club 2-2-1998 by CP \feature{SearchIsos} (stopped from Coq V7) [9-11-1997]\\ & & grammar extension mechanism moved to Camlp4 [12-6-1997]\\ & & \feature{refine tactic}\\ & & includes a Windows version\\ Coq V6.2& released 4 May 1998 & % Announced on coq-club 5-5-1998 by CP \feature{ring} (version 2) [7-4-1998] \\ Coq V6.2.1& released 23 July 1998\\ Coq V6.2.2 beta& released 30 January 1998\\ Coq V6.2.2& released 23 September 1998\\ Coq V6.2.3& released 22 December 1998 & \feature{Real numbers library} [from 13-11-1998] \\ Coq V6.2.4& released 8 February 1999\\ Coq V6.3& released 27 July 1999 & \feature{autorewrite} [25-3-1999]\\ & & \feature{Correctness} (deprecated in V8, led to Why) [28-10-1997]\\ Coq V6.3.1& released 7 December 1999\\ \end{tabular} \medskip \bigskip \newpage \centerline{IV- New CVS, back to a kernel-centric implementation} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline Coq ``V7'' archive & August 1999 & new cvs archive based on J.-C. Fillitre's \\ & & \feature{kernel-centric} architecture \\ & & more care for outside readers\\ & & (indentation, ocaml warning protection)\\ Coq V7.0beta& released 27 December 2000 & \feature{${\cal L}_{\mathit{tac}}$} \\ Coq V7.0beta2& released 2 February 2001\\ Coq V7.0& released 25 April 2001 & \feature{extraction} (version 2) [6-2-2001] \\ & & \feature{field} (version 1) [19-4-2001], \feature{fourier} [20-4-2001] \\ Coq V7.1& released 25 September 2001 & \feature{setoid rewriting} (version 1) [10-7-2001]\\ Coq V7.2& released 10 January 2002\\ Coq V7.3& released 16 May 2002\\ Coq V7.3.1& released 5 October 2002 & \feature{module system} [2-8-2002]\\ & & \feature{pattern-matching compilation} (version 2) [13-6-2002]\\ Coq V7.4& released 6 February 2003 & \feature{notation}, \feature{scopes} [13-10-2002]\\ Coq V8.0& released 21 April 2004 & \feature{new concrete syntax}, \feature{Set predicative}, \feature{CoqIDE} [from 4-2-2003]\\ Coq V8.0pl1& released 18 July 2004\\ Coq V8.0pl2& released 22 January 2005\\ Coq V8.0pl3& released 13 January 2006\\ Coq V8.0pl4& released 26 January 2007\\ Coq ``svn'' archive & 6 March 2006 & cvs archive moved to subversion control management\\ Coq V8.1beta& released 12 July 2006 & \feature{bytecode compiler} [20-10-2004] \\ & & \feature{setoid rewriting} (version 2) [3-9-2004]\\ & & \feature{functional induction} [1-2-2006]\\ & & \feature{Strings library} [8-2-2006], \feature{FSets/FMaps library} [15-3-2006] \\ & & \feature{Program} (version 2, Russell) [5-3-2006] \\ & & \feature{declarative language} [20-9-2006]\\ & & \feature{ring} (version 3) [18-11-2005]\\ Coq V8.1gamma& released 7 November 2006 & \feature{field} (version 2) [29-9-2006]\\ Coq V8.1& released 10 February 2007 & \\ Coq V8.1pl1& released 27 July 2007 & \\ Coq V8.1pl2& released 13 October 2007 & \\ Coq V8.1pl3& released 13 December 2007 & \\ Coq V8.1pl4& released 9 October 2008 & \\ Coq V8.2 beta1& released 13 June 2008 & \\ Coq V8.2 beta2& released 19 June 2008 & \\ Coq V8.2 beta3& released 27 June 2008 & \\ Coq V8.2 beta4& released 8 August 2008 & \\ Coq V8.2 & released 17 February 2009 & \feature{type classes} [10-12-2007], \feature{machine words} [11-5-2007]\\ & & \feature{big integers} [11-5-2007], \feature{abstract arithmetics} [9-2007]\\ & & \feature{setoid rewriting} (version 3) [18-12-2007] \\ & & \feature{micromega solving platform} [19-5-2008]\\ & & a first package released on February 11 was incomplete\\ \end{tabular} \medskip \bigskip \newpage \centerline{\large Other important dates} \mbox{}\\ \mbox{}\\ \begin{tabular}{l|l|l} version & date & comments \\ \hline Lechenadec's version in C& mention of \\ & 13 January 1985 on \\ & some vernacular files\\ Set up of the coq-club mailing list & 28 July 1993\\ Coq V6.0 ``evars'' & & experimentation based on evars refinement started \\ & & in 1991 by Gilles from V5.6 beta,\\ & & with work by Hugo in July 1992\\ Coq V6.0 ``evars'' ``light'' & July 1993 & Hugo's port of the first evars-based experimentation \\ & & to Coq V5.7, version from October/November 1992\\ CtCoq & released 25 October 1995 & first beta-version \\ % Announce on coq-club by Janet Proto with explicit substitutions & 1997 &\\ Coq web site & 15 April 1998 & new site designed by David Delahaye \\ Coq web site & January 2004 & web site new style \\ & & designed by Julien Narboux and Florent Kirchner \\ Coq web site & April 2009 & new Drupal-based site \\ & & designed by Jean-Marc Notin and Denis Cousineau \\ \end{tabular} \end{document} coq-8.4pl2/dev/doc/cic.dtd0000640000175000001440000001267210434701777014427 0ustar notinusers coq-8.4pl2/dev/doc/patch.ocaml-3.10.drop.rectypes0000640000175000001440000000220311171113351020434 0ustar notinusersIndex: scripts/coqmktop.ml =================================================================== --- scripts/coqmktop.ml (révision 12084) +++ scripts/coqmktop.ml (copie de travail) @@ -231,12 +231,25 @@ end;; let ppf = Format.std_formatter;; + let set_rectypes_hack () = + if String.length (Sys.ocaml_version) >= 4 & + String.sub (Sys.ocaml_version) 0 4 = \"3.10\" + then + (* ocaml 3.10 does not have #rectypes but needs it *) + (* simulate a call with option -rectypes before *) + (* jumping to the ocaml toplevel *) + for i = 1 to Array.length Sys.argv - 1 do + Sys.argv.(i) <- \"-rectypes\" + done + else + () in + Mltop.set_top {Mltop.load_obj= (fun f -> if not (Topdirs.load_file ppf f) then failwith \"error\"); Mltop.use_file=Topdirs.dir_use ppf; Mltop.add_dir=Topdirs.dir_directory; - Mltop.ml_loop=(fun () -> Toploop.loop ppf) };;\n" + Mltop.ml_loop=(fun () -> set_rectypes_hack(); Topmain.main()) };;\n" (* create a temporary main file to link *) let create_tmp_main_file modules = coq-8.4pl2/dev/doc/debugging.txt0000640000175000001440000000641511646314246015663 0ustar notinusersDebugging from Coq toplevel using Caml trace mechanism ====================================================== 1. Launch bytecode version of Coq (coqtop.byte or coqtop -byte) 2. Access Ocaml toplevel using vernacular command 'Drop.' 3. Install load paths and pretty printers for terms, idents, ... using Ocaml command '#use "base_include";;' (use '#use "include";;' for installing the advanced term pretty printers) 4. Use #trace to tell which function(s) to trace 5. Go back to Coq toplevel with 'go();;' 6. Test your Coq command and observe the result of tracing your functions 7. Freely switch from Coq to Ocaml toplevels with 'Drop.' and 'go();;' You can avoid typing #use "include" (or "base_include") after Drop by adding the following lines in your $HOME/.ocamlinit : if Filename.basename Sys.argv.(0) = "coqtop.byte" then ignore (Toploop.use_silently Format.std_formatter "include") Hints: To remove high-level pretty-printing features (coercions, notations, ...), use "Set Printing All". It will affect the #trace printers too. Note for Ocaml 3.10.x: Ocaml 3.10.x requires that modules compiled with -rectypes are loaded in an environment with -rectypes set but there is no way to tell the toplevel to support -rectypes. To make it works, use "patch -p0 < dev/doc/patch.ocaml-3.10.drop.rectypes" to hack script/coqmktop.ml, then recompile coqtop.byte. The procedure above then works as soon as coqtop.byte is called with at least one argument (add neutral option -byte to ensure at least one argument). Debugging from Caml debugger ============================ Needs tuareg mode in Emacs Coq must be configured with -debug and -local (./configure -debug -local) 1. M-x camldebug 2. give the binary name bin/coqtop.byte 3. give ../dev/ocamldebug-coq 4. source db (to get pretty-printers) 5. add breakpoints with C-x C-a C-b from the buffer displaying the ocaml source 6. get more help from ocamldebug manual run step back start next last print x (abbreviated into p x) ... 7. some hints: - To debug a failure/error/anomaly, add a breakpoint in Vernac.vernac_com at the with clause of the "try ... interp com with ..." block, then go "back" a few steps to find where the failure/error/anomaly has been raised - Alternatively, for an error or an anomaly, add breakpoints in the middle of each of error* functions or anomaly* functions in lib/util.ml - If "source db" fails, recompile printers.cma with "make dev/printers.cma" and try again Global gprof-based profiling ============================ Coq must be configured with option -profile 1. Run native Coq which must end normally (use Quit or option -batch) 2. gprof ./coqtop gmon.out Per function profiling ====================== 1. To profile function foo in file bar.ml, add the following lines, just after the definition of the function: let fookey = Profile.declare_profile "foo";; let foo a b c = Profile.profile3 fookey foo a b c;; where foo is assumed to have three arguments (adapt using Profile.profile1, Profile. profile2, etc). This has the effect to cumulate the time passed in foo under a line of name "foo" which is displayed at the time coqtop exits. coq-8.4pl2/dev/doc/naming-conventions.tex0000640000175000001440000005443111311255505017515 0ustar notinusers\documentclass[a4paper]{article} \usepackage{fullpage} \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{amsfonts} \parindent=0pt \parskip=10pt %%%%%%%%%%%%% % Macros \newcommand\itemrule[3]{ \subsubsection{#1} \begin{quote} \begin{tt} #3 \end{tt} \end{quote} \begin{quote} Name: \texttt{#2} \end{quote}} \newcommand\formula[1]{\begin{tt}#1\end{tt}} \newcommand\tactic[1]{\begin{tt}#1\end{tt}} \newcommand\command[1]{\begin{tt}#1\end{tt}} \newcommand\term[1]{\begin{tt}#1\end{tt}} \newcommand\library[1]{\texttt{#1}} \newcommand\name[1]{\texttt{#1}} \newcommand\zero{\texttt{zero}} \newcommand\op{\texttt{op}} \newcommand\opPrime{\texttt{op'}} \newcommand\opSecond{\texttt{op''}} \newcommand\phimapping{\texttt{phi}} \newcommand\D{\texttt{D}} \newcommand\elt{\texttt{elt}} \newcommand\rel{\texttt{rel}} \newcommand\relp{\texttt{rel'}} %%%%%%%%%%%%% \begin{document} \begin{center} \begin{huge} Proposed naming conventions for the Coq standard library \end{huge} \end{center} \bigskip The following document describes a proposition of canonical naming schemes for the Coq standard library. Obviously and unfortunately, the current state of the library is not as homogeneous as it would be if it would systematically follow such a scheme. To tend in this direction, we however recommend to follow the following suggestions. \tableofcontents \section{General conventions} \subsection{Variable names} \begin{itemize} \item Variables are preferably quantified at the head of the statement, even if some premisses do not depend of one of them. For instance, one would state \begin{quote} \begin{tt} {forall x y z:D, x <= y -> x+z <= y+z} \end{tt} \end{quote} and not \begin{quote} \begin{tt} {forall x y:D, x <= y -> forall z:D, x+z <= y+z} \end{tt} \end{quote} \item Variables are preferably quantified (and named) in the order of ``importance'', then of appearance, from left to right, even if for the purpose of some tactics it would have been more convenient to have, say, the variables not occurring in the conclusion first. For instance, one would state \begin{quote} \begin{tt} {forall x y z:D, x+z <= y+z -> x <= y} \end{tt} \end{quote} and not \begin{quote} \begin{tt} {forall z x y:D, x+z <= y+z -> x <= y} \end{tt} \end{quote} nor \begin{quote} \begin{tt} {forall x y z:D, y+x <= z+x -> y <= z} \end{tt} \end{quote} \item Choice of effective names is domain-dependent. For instance, on natural numbers, the convention is to use the variables $n$, $m$, $p$, $q$, $r$, $s$ in this order. On generic domains, the convention is to use the letters $x$, $y$, $z$, $t$. When more than three variables are needed, indexing variables It is conventional to use specific names for variables having a special meaning. For instance, $eps$ or $\epsilon$ can be used to denote a number intended to be as small as possible. Also, $q$ and $r$ can be used to denote a quotient and a rest. This is good practice. \end{itemize} \subsection{Disjunctive statements} A disjunctive statement with a computational content will be suffixed by \name{\_inf}. For instance, if \begin{quote} \begin{tt} {forall x y, op x y = zero -> x = zero \/ y = zero} \end{tt} \end{quote} has name \texttt{D\_integral}, then \begin{quote} \begin{tt} {forall x y, op x y = zero -> \{x = zero\} + \{y = zero\}} \end{tt} \end{quote} will have name \texttt{D\_integral\_inf}. As an exception, decidability statements, such as \begin{quote} \begin{tt} {forall x y, \{x = y\} + \{x <> y\}} \end{tt} \end{quote} will have a named ended in \texttt{\_dec}. Idem for cotransitivity lemmas which are inherently computational that are ended in \texttt{\_cotrans}. \subsection{Inductive types constructor names} As a general rule, constructor names start with the name of the inductive type being defined as in \texttt{Inductive Z := Z0 : Z | Zpos : Z -> Z | Zneg : Z -> Z} to the exception of very standard types like \texttt{bool}, \texttt{nat}, \texttt{list}... For inductive predicates, constructor names also start with the name of the notion being defined with one or more suffixes separated with \texttt{\_} for discriminating the different cases as e.g. in \begin{verbatim} Inductive even : nat -> Prop := | even_O : even 0 | even_S n : odd n -> even (S n) with odd : nat -> Prop := | odd_S n : even n -> odd (S n). \end{verbatim} As a general rule, inductive predicate names should be lowercase (to the exception of notions referring to a proper name, e.g. \texttt{Bezout}) and multiple words must be separated by ``{\_}''. As an exception, when extending libraries whose general rule is that predicates names start with a capital letter, the convention of this library should be kept and the separation between multiple words is done by making the initial of each work a capital letter (if one of these words is a proper name, then a ``{\_}'' is added to emphasize that the capital letter is proper and not an application of the rule for marking the change of word). Inductive predicates that characterize the specification of a function should be named after the function it specifies followed by \texttt{\_spec} as in: \begin{verbatim} Inductive nth_spec : list A -> nat -> A -> Prop := | nth_spec_O a l : nth_spec (a :: l) 0 a | nth_spec_S n a b l : nth_spec l n a -> nth_spec (b :: l) (S n) a. \end{verbatim} \section{Equational properties of operations} \subsection{General conventions} If the conclusion is in the other way than listed below, add suffix \name{\_reverse} to the lemma name. \subsection{Specific conventions} \itemrule{Associativity of binary operator {\op} on domain {\D}}{Dop\_assoc} {forall x y z:D, op x (op y z) = op (op x y) z} Remark: Symmetric form: \name{Dop\_assoc\_reverse}: \formula{forall x y z:D, op (op x y) z = op x (op y z)} \itemrule{Commutativity of binary operator {\op} on domain {\D}}{Dop\_comm} {forall x y:D, op x y = op y x} Remark: Avoid \formula{forall x y:D, op y x = op x y}, or at worst, call it \name{Dop\_comm\_reverse} \itemrule{Left neutrality of element elt for binary operator {\op}}{Dop\_elt\_l} {forall x:D, op elt x = x} Remark: In English, ``{\elt} is an identity for {\op}'' seems to be a more common terminology. \itemrule{Right neutrality of element elt for binary operator {\op}}{Dop\_elt\_r} {forall x:D, op x elt = x} Remark: By convention, if the identities are reminiscent to zero or one, they are written 1 and 0 in the name of the property. \itemrule{Left absorption of element elt for binary operator {\op}}{Dop\_elt\_l} {forall x:D, op elt x = elt} Remarks: \begin{itemize} \item In French school, this property is named "elt est absorbant pour op" \item English, the property seems generally named "elt is a zero of op" \item In the context of lattices, this a boundedness property, it may be called "elt is a bound on D", or referring to a (possibly arbitrarily oriented) order "elt is a least element of D" or "elt is a greatest element of D" \end{itemize} \itemrule{Right absorption of element {\elt} for binary operator {\op}}{Dop\_elt\_l [BAD ??]} {forall x:D, op x elt = elt} \itemrule{Left distributivity of binary operator {\op} over {\opPrime} on domain {\D}}{Dop\_op'\_distr\_l} {forall x y z:D, op (op' x y) z = op' (op x z) (op y z)} Remark: Some authors say ``distribution''. \itemrule{Right distributivity of binary operator {\op} over {\opPrime} on domain {\D}}{Dop\_op'\_distr\_r} {forall x y z:D, op z (op' x y) = op' (op z x) (op z y)} Remark: Note the order of arguments. \itemrule{Distributivity of unary operator {\op} over binary op' on domain {\D}}{Dop\_op'\_distr} {forall x y:D, op (op' x y) = op' (op x) (op y)} \itemrule{Distributivity of unary operator {\op} over binary op' on domain {\D}}{Dop\_op'\_distr} {forall x y:D, op (op' x y) = op' (op x) (op y)} Remark: For a non commutative operation with inversion of arguments, as in \formula{forall x y z:D, op (op' x y) = op' (op y) (op y z)}, we may probably still call the property distributivity since there is no ambiguity. Example: \formula{forall n m : Z, -(n+m) = (-n)+(-m)}. Example: \formula{forall l l' : list A, rev (l++l') = (rev l)++(rev l')}. \itemrule{Left extrusion of unary operator {\op} over binary op' on domain {\D}}{Dop\_op'\_distr\_l} {forall x y:D, op (op' x y) = op' (op x) y} Question: Call it left commutativity ?? left swap ? \itemrule{Right extrusion of unary operator {\op} over binary op' on domain {\D}}{Dop\_op'\_distr\_r} {forall x y:D, op (op' x y) = op' x (op y)} \itemrule{Idempotency of binary operator {\op} on domain {\D}}{Dop\_idempotent} {forall x:D, op x n = x} \itemrule{Idempotency of unary operator {\op} on domain {\D}}{Dop\_idempotent} {forall x:D, op (op x) = op x} Remark: This is actually idempotency of {\op} wrt to composition and identity. \itemrule{Idempotency of element elt for binary operator {\op} on domain {\D}}{Dop\_elt\_idempotent} {op elt elt = elt} Remark: Generally useless in CIC for concrete, computable operators Remark: The general definition is ``exists n, iter n op x = x''. \itemrule{Nilpotency of element elt wrt a ring D with additive neutral element {\zero} and multiplicative binary operator {\op}}{Delt\_nilpotent} {op elt elt = zero} Remark: We leave the ring structure of D implicit; the general definition is ``exists n, iter n op elt = zero''. \itemrule{Zero-product property in a ring D with additive neutral element {\zero} and multiplicative binary operator {\op}}{D\_integral} {forall x y, op x y = zero -> x = zero \/ y = zero} Remark: We leave the ring structure of D implicit; the Coq library uses either \texttt{\_is\_O} (for \texttt{nat}), \texttt{\_integral} (for \texttt{Z}, \texttt{Q} and \texttt{R}), \texttt{eq\_mul\_0} (for \texttt{NZ}). Remark: The French school says ``integrit''. \itemrule{Nilpotency of binary operator {\op} wrt to its absorbing element zero in D}{Dop\_nilpotent} {forall x, op x x = zero} Remark: Did not find this definition on the web, but it used in the Coq library (to characterize \name{xor}). \itemrule{Involutivity of unary op on D}{Dop\_involutive} {forall x:D, op (op x) = x} \itemrule{Absorption law on the left for binary operator {\op} over binary operator {\op}' on the left}{Dop\_op'\_absorption\_l\_l} {forall x y:D, op x (op' x y) = x} \itemrule{Absorption law on the left for binary operator {\op} over binary operator {\op}' on the right}{Dop\_op'\_absorption\_l\_r} {forall x y:D, op x (op' y x) = x} Remark: Similarly for \name{Dop\_op'\_absorption\_r\_l} and \name{Dop\_op'\_absorption\_r\_r}. \itemrule{De Morgan law's for binary operators {\opPrime} and {\opSecond} wrt to unary op on domain {\D}}{Dop'\_op''\_de\_morgan, Dop''\_op'\_de\_morgan ?? \mbox{leaving the complementing operation implicit})} {forall x y:D, op (op' x y) = op'' (op x) (op y)\\ forall x y:D, op (op'' x y) = op' (op x) (op y)} \itemrule{Left complementation of binary operator {\op} by means of unary {\opPrime} wrt neutral element {\elt} of {\op} on domain {\D}}{Dop\_op'\_opp\_l} {forall x:D, op (op' x) x = elt} Remark: If the name of the opposite function is reminiscent of the notion of complement (e.g. if it is called \texttt{opp}), one can simply say {Dop\_opp\_l}. \itemrule{Right complementation of binary operator {\op} by means of unary {\op'} wrt neutral element {\elt} of {\op} on domain {\D}}{Dop\_opp\_r} {forall x:D, op x (op' x) = elt} Example: \formula{Radd\_opp\_l: forall r : R, - r + r = 0} \itemrule{Associativity of binary operators {\op} and {\op'}}{Dop\_op'\_assoc} {forall x y z, op x (op' y z) = op (op' x y) z} Example: \formula{forall x y z, x + (y - z) = (x + y) - z} \itemrule{Right extrusion of binary operator {\opPrime} over binary operator {\op}}{Dop\_op'\_extrusion\_r} {forall x y z, op x (op' y z) = op' (op x y) z} Remark: This requires {\op} and {\opPrime} to have their right and left argument respectively and their return types identical. Example: \formula{forall x y z, x + (y - z) = (x + y) - z} Remark: Other less natural combinations are possible, such as \formula{forall x y z, op x (op' y z) = op' y (op x z)}. \itemrule{Left extrusion of binary operator {\opPrime} over binary operator {\op}}{Dop\_op'\_extrusion\_l} {forall x y z, op (op' x y) z = op' x (op y z)} Remark: Operations are not necessarily internal composition laws. It is only required that {\op} and {\opPrime} have their right and left argument respectively and their return type identical. Remark: When the type are heterogeneous, only one extrusion law is possible and it can simply be named {Dop\_op'\_extrusion}. Example: \formula{app\_cons\_extrusion : forall a l l', (a :: l) ++ l' = a :: (l ++ l')}. %====================================================================== %\section{Properties of elements} %Remark: Not used in current library %====================================================================== \section{Preservation and compatibility properties of operations} \subsection{With respect to equality} \itemrule{Injectivity of unary operator {\op}}{Dop\_inj} {forall x y:D, op x = op y -> x = y} \itemrule{Left regularity of binary operator {\op}}{Dop\_reg\_l, Dop\_inj\_l, or Dop\_cancel\_l} {forall x y z:D, op z x = op z y -> x = y} Remark: Note the order of arguments. Remark: The Coq usage is to called it regularity but the English standard seems to be cancellation. The recommended form is not decided yet. Remark: Shall a property like $n^p \leq n^q \rightarrow p \leq q$ (for $n\geq 1$) be called cancellation or should it be reserved for operators that have an inverse? \itemrule{Right regularity of binary operator {\op}}{Dop\_reg\_r, Dop\_inj\_r, Dop\_cancel\_r} {forall x y z:D, op x z = op y z -> x = y} \subsection{With respect to a relation {\rel}} \itemrule{Compatibility of unary operator {\op}}{Dop\_rel\_compat} {forall x y:D, rel x y -> rel (op x) (op y)} \itemrule{Left compatibility of binary operator {\op}}{Dop\_rel\_compat\_l} {forall x y z:D, rel x y -> rel (op z x) (op z y)} \itemrule{Right compatibility of binary operator {\op}}{Dop\_rel\_compat\_r} {forall x y z:D, rel x y -> rel (op x z) (op y z)} Remark: For equality, use names of the form \name{Dop\_eq\_compat\_l} or \name{Dop\_eq\_compat\_r} (\formula{forall x y z:D, y = x -> op y z = op x z} and \formula{forall x y z:D, y = x -> op y z = op x z}) Remark: Should we admit (or even prefer) the name \name{Dop\_rel\_monotone}, \name{Dop\_rel\_monotone\_l}, \name{Dop\_rel\_monotone\_r} when {\rel} is an order ? \itemrule{Left regularity of binary operator {\op}}{Dop\_rel\_reg\_l} {forall x y z:D, rel (op z x) (op z y) -> rel x y} \itemrule{Right regularity of binary operator {\op}}{Dop\_rel\_reg\_r} {forall x y z:D, rel (op x z) (op y z) -> rel x y} Question: Would it be better to have \name{z} as first argument, since it is missing in the conclusion ?? (or admit we shall use the options ``\texttt{with p}''?) \itemrule{Left distributivity of binary operator {\op} over {\opPrime} along relation {\rel} on domain {\D}}{Dop\_op'\_rel\_distr\_l} {forall x y z:D, rel (op (op' x y) z) (op' (op x z) (op y z))} Example: standard property of (not necessarily distributive) lattices Remark: In a (non distributive) lattice, by swapping join and meet, one would like also, \formula{forall x y z:D, rel (op' (op x z) (op y z)) (op (op' x y) z)}. How to name it with a symmetric name (use \name{Dop\_op'\_rel\_distr\_mon\_l} and \name{Dop\_op'\_rel\_distr\_anti\_l})? \itemrule{Commutativity of binary operator {\op} along (equivalence) relation {\rel} on domain {\D}}{Dop\_op'\_rel\_comm} {forall x y z:D, rel (op x y) (op y x)} Example: \formula{forall l l':list A, Permutation (l++l') (l'++l)} \itemrule{Irreducibility of binary operator {\op} on domain {\D}}{Dop\_irreducible} {forall x y z:D, z = op x y -> z = x $\backslash/$ z = y} Question: What about the constructive version ? Call it \name{Dop\_irreducible\_inf} ? \formula{forall x y z:D, z = op x y -> \{z = x\} + \{z = y\}} \itemrule{Primality of binary operator {\op} along relation {\rel} on domain {\D}}{Dop\_rel\_prime} {forall x y z:D, rel z (op x y) -> rel z x $\backslash/$ rel z y} %====================================================================== \section{Morphisms} \itemrule{Morphism between structures {\D} and {\D'}}{\name{D'\_of\_D}}{D -> D'} Remark: If the domains are one-letter long, one can used \texttt{IDD'} as for \name{INR} or \name{INZ}. \itemrule{Morphism {\phimapping} mapping unary operators {\op} to {\op'}}{phi\_op\_op', phi\_op\_op'\_morphism} {forall x:D, phi (op x) = op' (phi x)} Remark: If the operators have the same name in both domains, one use \texttt{D'\_of\_D\_op} or \texttt{IDD'\_op}. Example: \formula{Z\_of\_nat\_mult: forall n m : nat, Z\_of\_nat (n * m) = (Z\_of\_nat n * Z\_of\_nat m)\%Z}. Remark: If the operators have different names on distinct domains, one can use \texttt{op\_op'}. \itemrule{Morphism {\phimapping} mapping binary operators {\op} to {\op'}}{phi\_op\_op', phi\_op\_op'\_morphism} {forall x y:D, phi (op x y) = op' (phi x) (phi y)} Remark: If the operators have the same name in both domains, one use \texttt{D'\_of\_D\_op} or \texttt{IDD'\_op}. Remark: If the operators have different names on distinct domains, one can use \texttt{op\_op'}. \itemrule{Morphism {\phimapping} mapping binary operator {\op} to binary relation {\rel}}{phi\_op\_rel, phi\_op\_rel\_morphism} {forall x y:D, phi (op x y) <-> rel (phi x) (phi y)} Remark: If the operator and the relation have similar name, one uses \texttt{phi\_op}. Question: How to name each direction? (add \_elim for -> and \_intro for <- ?? -- as done in Bool.v ??) Example: \formula{eq\_true\_neg: \~{} eq\_true b <-> eq\_true (negb b)}. %====================================================================== \section{Preservation and compatibility properties of operations wrt order} \itemrule{Compatibility of binary operator {\op} wrt (strict order) {\rel} and (large order) {\rel'}}{Dop\_rel\_rel'\_compat} {forall x y z t:D, rel x y -> rel' z t -> rel (op x z) (op y t)} \itemrule{Compatibility of binary operator {\op} wrt (large order) {\relp} and (strict order) {\rel}}{Dop\_rel'\_rel\_compat} {forall x y z t:D, rel' x y -> rel z t -> rel (op x z) (op y t)} %====================================================================== \section{Properties of relations} \itemrule{Reflexivity of relation {\rel} on domain {\D}}{Drel\_refl} {forall x:D, rel x x} \itemrule{Symmetry of relation {\rel} on domain {\D}}{Drel\_sym} {forall x y:D, rel x y -> rel y x} \itemrule{Transitivity of relation {\rel} on domain {\D}}{Drel\_trans} {forall x y z:D, rel x y -> rel y z -> rel x z} \itemrule{Antisymmetry of relation {\rel} on domain {\D}}{Drel\_antisym} {forall x y:D, rel x y -> rel y x -> x = y} \itemrule{Irreflexivity of relation {\rel} on domain {\D}}{Drel\_irrefl} {forall x:D, \~{} rel x x} \itemrule{Asymmetry of relation {\rel} on domain {\D}}{Drel\_asym} {forall x y:D, rel x y -> \~{} rel y x} \itemrule{Cotransitivity of relation {\rel} on domain {\D}}{Drel\_cotrans} {forall x y z:D, rel x y -> \{rel z y\} + \{rel x z\}} \itemrule{Linearity of relation {\rel} on domain {\D}}{Drel\_trichotomy} {forall x y:D, \{rel x y\} + \{x = y\} + \{rel y x\}} Questions: Or call it \name{Drel\_total}, or \name{Drel\_linear}, or \name{Drel\_connected}? Use $\backslash/$ ? or use a ternary sumbool, or a ternary disjunction, for nicer elimination. \itemrule{Informative decidability of relation {\rel} on domain {\D}}{Drel\_dec (or Drel\_dect, Drel\_dec\_inf ?)} {forall x y:D, \{rel x y\} + \{\~{} rel x y\}} Remark: If equality: \name{D\_eq\_dec} or \name{D\_dec} (not like \name{eq\_nat\_dec}) \itemrule{Non informative decidability of relation {\rel} on domain {\D}}{Drel\_dec\_prop (or Drel\_dec)} {forall x y:D, rel x y $\backslash/$ \~{} rel x y} \itemrule{Inclusion of relation {\rel} in relation {\rel}' on domain {\D}}{Drel\_rel'\_incl (or Drel\_incl\_rel')} {forall x y:D, rel x y -> rel' x y} Remark: Use \name{Drel\_rel'\_weak} for a strict inclusion ?? %====================================================================== \section{Relations between properties} \itemrule{Equivalence of properties \texttt{P} and \texttt{Q}}{P\_Q\_iff} {forall x1 .. xn, P <-> Q} Remark: Alternatively use \name{P\_iff\_Q} if it is too difficult to recover what pertains to \texttt{P} and what pertains to \texttt{Q} in their concatenation (as e.g. in \texttt{Godel\_Dummett\_iff\_right\_distr\_implication\_over\_disjunction}). %====================================================================== \section{Arithmetical conventions} \begin{minipage}{6in} \renewcommand{\thefootnote}{\thempfootnote} % For footnotes... \begin{tabular}{lll} Zero on domain {\D} & D0 & (notation \verb=0=)\\ One on domain {\D} & D1 (if explicitly defined) & (notation \verb=1=)\\ Successor on domain {\D} & Dsucc\\ Predessor on domain {\D} & Dpred\\ Addition on domain {\D} & Dadd/Dplus\footnote{Coq historically uses \texttt{plus} and \texttt{mult} for addition and multiplication which are inconsistent notations, the recommendation is to use \texttt{add} and \texttt{mul} except in existng libraries that already use \texttt{plus} and \texttt{mult}} & (infix notation \verb=+= [50,L])\\ Multiplication on domain {\D} & Dmul/Dmult\footnotemark[\value{footnote}] & (infix notation \verb=*= [40,L]))\\ Soustraction on domain {\D} & Dminus & (infix notation \verb=-= [50,L])\\ Opposite on domain {\D} & Dopp (if any) & (prefix notation \verb=-= [35,R]))\\ Inverse on domain {\D} & Dinv (if any) & (prefix notation \verb=/= [35,R]))\\ Power on domain {\D} & Dpower & (infix notation \verb=^= [30,R])\\ Minimal element on domain {\D} & Dmin\\ Maximal element on domain {\D} & Dmax\\ Large less than order on {\D} & Dle & (infix notations \verb!<=! and \verb!>=! [70,N]))\\ Strict less than order on {\D} & Dlt & (infix notations \verb=<= and \verb=>= [70,N]))\\ \end{tabular} \bigskip \end{minipage} \bigskip The status of \verb!>=! and \verb!>! is undecided yet. It will eithet be accepted only as parsing notations or may also accepted as a {\em definition} for the \verb!<=! and \verb! : 95 (no associativity) -> : 90 (right associativity) \/ : 85 (right associativity) /\ : 80 (right associativity) ~ : 75 (right associativity) =, <, >, <=, >=, <> : 70 (no associativity) +, - : 50 (left associativity) *, / : 40 (left associativity) ^ : 30 (right associativity) 1) Translating a V7 notation as it was By default, the translator keeps the associativity given in V7 while the levels are mapped according to the following table: the V7 levels [ 0; 1; 2; 3; 4; 5; 6; 7; 8; 9; 10] are resp. mapped in V8 to [ 0; 20; 30; 40; 50; 70; 80; 85; 90; 95; 100] with predefined assoc [ No; L; R; L; L; No; R; R; R; No; L] If this is OK for you, just simply apply the translator. 2) Translating a V7 notation which conflicts with the new syntax a) Associativity conflict Since the associativity of the levels obtained by translating a V7 level (as shown on table above) cannot be changed, you have to choose another level with a compatible associativity. You can choose any level between 0 and 200, knowing that the standard operators are already set at the levels shown on the list above. Example 1: Assume you have a notation Infix NONA 2 "=_S" my_setoid_eq. By default, the translator moves it to level 30 which is right associative, hence a conflict with the expected no associativity. To solve the problem, just add the "V8only" modifier to reset the level and enforce the associativity as follows: Infix NONA 2 "=_S" my_setoid_eq V8only (at level 70, no associativity). The translator now knows that it has to translate "=_S" at level 70 with no associativity. Rem: 70 is the "natural" level for relations, hence the choice of 70 here, but any other level accepting a no-associativity would have been OK. Example 2: Assume you have a notation Infix RIGHTA 1 "o" my_comp. By default, the translator moves it to level 20 which is left associative, hence a conflict with the expected right associativity. To solve the problem, just add the "V8only" modifier to reset the level and enforce the associativity as follows: Infix RIGHTA 1 "o" my_comp V8only (at level 20, right associativity). The translator now knows that it has to translate "o" at level 20 which has the correct "right associativity". Rem: We assumed here that the user wants a strong precedence for composition, in such a way, say, that "f o g + h" is parsed as "(f o g) + h". To get "o" binding less than the arithmetical operators, an appropriated level would have been close of 70, and below, e.g. 65. b) Conflicts with other notations Since the new syntax comes with new keywords and new predefined symbols, new conflicts can occur. Again, you can use the option V8only to inform the translator of the new syntax to use. b1) A notation hides another notation Rem: use Print Grammar constr in V8 to diagnose the overlap and see the section on factorization in the chapter on notations of the Reference Manual for hints on how to factorize. Example: Notation "{ x }" := (my_embedding x) (at level 1). overlaps in V8 with notation "{ x : A & P }" at level 0 and with x at level 99. The conflicts can be solved by left-factorizing the notation as follows: Notation "{ x }" := (my_embedding x) (at level 1) V8only (at level 0, x at level 99). b2) A notation conflicts with the V8 grammar. Again, use the V8only modifier to tell the translator to automatically take in charge the new syntax. Example: Infix 3 "@" app. Since "@" is used in the new syntax for deactivating the implicit arguments, another symbol has to be used, e.g. "@@". This is done via the V8only option as follows: Infix 3 "@" app V8only "@@" (at level 40, left associativity). or, alternatively by Notation "x @ y" := (app x y) (at level 3, left associativity) V8only "x @@ y" (at level 40, left associativity). b3) My notation is already defined at another level (or with another associativity) In V8, the level and associativity of a given notation can no longer be changed. Then, either you adopt the standard reserved levels and associativity for this notation (as given on the list above) or you change your notation. - To change the notation, follow the directions in section b2. - To adopt the standard level, just use V8only without any argument. Example. Infix 6 "*" my_mult. is not accepted as such in V8. Write Infix 6 "*" my_mult V8only. to tell the translator to use "*" at the reserved level (i.e. 40 with left associativity). Even better, use interpretation scopes (look at the Reference Manual). c) How to use V8only with Distfix ? You can't, use Notation instead of Distfix. d) Can I overload a notation in V8, e.g. use "*" and "+" for my own algebraic operations ? Yes, using interpretation scopes (see the corresponding chapter in the Reference Manual). 3) Using the translator to have simplest notations Thanks to the new syntax, * has now the expected left associativity, and the symbols <, >, <= and >= are now available. Thanks to the interpretation scopes, you can overload the interpretation of these operators with the default interpretation provided in Coq. This may be a motivation to use the translator to automatically change the notations while switching to the new syntax. See sections b) and d) above for examples. 4) Setting the translator to automatically use new notations that wasn't used in old syntax Thanks to the "Notation" mechanism, defining symbolic notations is simpler than in the previous versions of Coq. Thanks to the new syntax and interpretation scopes, new symbols and overloading is available. This may be a motivation for using the translator to automatically change the notations while switching to the new syntax. Use for that the commands V8Notation and V8Infix. Examples: V8Infix "==>" my_relation (at level 65, right associativity). tells the translator to write an infix "==>" instead of my_relation in the translated files. V8Infix ">=" my_ge. tells the translator to write an infix ">=" instead of my_ge in the translated files and that the level and associativity are the standard one (as defined in the chart above). V8Infix ">=" my_ge : my_scope. tells the translator to write an infix ">=" instead of my_ge in the translated files, that the level and associativity are the standard one (as defined in the chart above), but only if scope my_scope is open or if a delimiting key is available for "my_scope" (see the Reference Manual). 5) Defining a construction and its notation simultaneously This is permitted by the new syntax. Look at the Reference Manual for explanation. The translator is not fully able to take this in charge... III) Various pitfalls ---------------- 1) New keywords The following identifiers are new keywords "forall"; "fun"; "match"; "fix"; "cofix"; "for"; "if"; "then"; "else"; "return"; "mod"; "at"; "let"; "_"; ".(" The translator automatically add a "_" to names clashing with a keyword, except for files. Hence users may need to rename the files whose name clashes with a keyword. Remark: "in"; "with"; "end"; "as"; "Prop"; "Set"; "Type" were already keywords 2) Old "Case" and "Match" "Case" and "Match" are normally automatically translated into "match" or "match" and "fix", but sometimes it fails to do so. It typically fails when the Case or Match is argument of a tactic whose typing context is unknown because of a preceding Intro/Intros, as e.g. in Intros; Exists [m:nat](Case m of t [p:nat](f m) end) The solution is then to replace the invocation of the sequence of tactics into several invocation of the elementary tactics as follows Intros. Exists [m:nat](Case m of t [p:nat](f m) end) ^^^ 3) Change of definition or theorem names Type "entier" from fast_integer.v is renamed into "N" by the translator. As a consequence, user-defined objects of same name "N" are systematically qualified even tough it may not be necessary. The same apply for names "GREATER", "EQUAL", "LESS", etc... [COMPLETE LIST TO GIVE]. 4) Change of tactics names Since tactics names are now lowercase, this can clash with user-defined tactic definitions. To pally this, clashing names are renamed by adding an extra "_" to their name. ====================================================================== Main examples for new syntax ---------------------------- 1) Constructions Applicative terms don't any longer require to be surrounded by parentheses as e.g in "x = f y -> S x = S (f y)" Product is written "forall x y : T, U" "forall x y, U" "forall (x y : T) z (v w : V), U" etc. Abstraction is written "fun x y : T, U" "fun x y, U" "fun (x y : T) z (v w : V), U" etc. Pattern-matching is written "match x with c1 x1 x2 => t | c2 y as z => u end" "match v1, v2 with c1 x1 x2, _ => t | c2 y, d z => u end" "match v1 as y in le _ n, v2 as z in I p q return P n y p q z with c1 x1 x2, _ => t | c2 y, d z => u end" The last example is the new form of what was written "<[n;y:(le ? n);p;q;z:(I p q)](P n y p q z)>Cases v1 v2 of (c1 x1 x2) _ => t | (c2 y) (d z) => u end" Pattern-matching of type with one constructors and no dependencies of the arguments in the resulting type can be written "let (x,y,z) as u return P u := t in v" Local fixpoints are written "fix f (n m:nat) z (x : X) {struct m} : nat := ... with ..." and "struct" tells which argument is structurally decreasing. Explicitation of implicit arguments is written "f @1:=u v @3:=w t" "@f u v w t" 2) Tactics The main change is that tactics names are now lowercase. Besides this, the following renaming are applied: "NewDestruct" -> "destruct" "NewInduction" -> "induction" "Induction" -> "simple induction" "Destruct" -> "simple destruct" For tactics with occurrences, the occurrences now comes after and repeated use is separated by comma as in "Pattern 1 3 c d 4 e" -> "pattern c at 3 1, d, e at 4" "Unfold 1 3 f 4 g" -> "unfold f at 1 3, g at 4" "Simpl 1 3 e" -> "simpl e at 1 3" 3) Tactic language Definitions are now introduced with keyword "Ltac" (instead of "Tactic"/"Meta" "Definition") and are implicitly recursive ("Recursive" is no longer used). The new rule for distinguishing terms from ltac expressions is: Write "ltac:" in front of any tactic in argument position and "constr:" in front of any construction in head position 4) Vernacular language a) Assumptions The syntax for commands is mainly unchanged. Declaration of assumptions is now done as follows Variable m : t. Variables m n p : t. Variables (m n : t) (u v : s) (w : r). b) Definitions Definitions are done as follows Definition f m n : t := ... . Definition f m n := ... . Definition f m n := ... : t. Definition f (m n : u) : t := ... . Definition f (m n : u) := ... : t. Definition f (m n : u) := ... . Definition f a b (p q : v) r s (m n : t) : t := ... . Definition f a b (p q : v) r s (m n : t) := ... . Definition f a b (p q : v) r s (m n : t) := ... : t. c) Fixpoints Fixpoints are done this way Fixpoint f x (y : t) z a (b c : u) {struct z} : v := ... with ... . Fixpoint f x : v := ... . Fixpoint f (x : t) : v := ... . It is possible to give a concrete notation to a fixpoint as follows Fixpoint plus (n m:nat) {struct n} : nat as "n + m" := match n with | O => m | S p => S (p + m) end. d) Inductive types The syntax for inductive types is as follows Inductive t (a b : u) (d : e) : v := c1 : w1 | c2 : w2 | ... . Inductive t (a b : u) (d : e) : v := c1 : w1 | c2 : w2 | ... . Inductive t (a b : u) (d : e) : v := c1 (x y : t) : w1 | c2 (z : r) : w2 | ... . As seen in the last example, arguments of the constructors can be given before the colon. If the type itself is omitted (allowed only in case the inductive type has no real arguments), this yields an ML-style notation as follows Inductive nat : Set := O | S (n:nat). Inductive bool : Set := true | false. It is even possible to define a syntax at the same time, as follows: Inductive or (A B:Prop) : Prop as "A \/ B":= | or_introl (a:A) : A \/ B | or_intror (b:B) : A \/ B. Inductive and (A B:Prop) : Prop as "A /\ B" := conj (a:A) (b:B). coq-8.4pl2/dev/doc/build-system.dev.txt0000640000175000001440000001340411343756546017131 0ustar notinusers Since July 2007, Coq features a build system overhauled by Pierre Corbineau and Lionel Elie Mamane. --------------------------------------------------------------------- WARNING: In March 2010 this build system has been heavily adapted by Pierre Letouzey. In particular there no more explicit stage1,2. Stage3 was removed some time ago when coqdep was splitted into coqdep_boot and full coqdep. Ideas are still similar to what is describe below, but: 1) .ml4 are explicitely turned into .ml files, which stay after build 2) we let "make" handle the inclusion of .d without trying to guess what could be done at what time. Some initial inclusions hence _fail_, but "make" tries again later and succeed. TODO: remove obsolete sections below and better describe the new approach ----------------------------------------------------------------------- This file documents internals of the implementation of the build system. For what a Coq developer needs to know about the build system, see build-system.txt . .ml4 files ---------- .ml files corresponding to .ml4 files are created to keep ocamldep happy only. To ensure they are not used for compilation, they contain invalid OCaml. multi-stage build ----------------- Le processus de construction est séparé en trois étapes qui correspondent aux outils nécessaires pour calculer les dépendances de cette étape: stage1: ocamldep, sed , camlp4 sans fichiers de Coq stage2: camlp4 avec grammar.cma et/ou q_constr.cmo stage3: coqdep (.vo) Le Makefile a été séparé en plusieurs fichiers : - Makefile: coquille vide qui délègue les cibles à la bonne étape sauf clean et les fichiers pour emacs (car ils sont en quelque sorte en "stage0": aucun calcul de dépendance nécessaire). - Makefile.common : définitions des variables (essentiellement des listes de fichiers) - Makefile.build : les règles de compilation sans inclure de dépendances - Makefile.stage* : fichiers qui incluent les dépendances calculables à cette étape ainsi que Makefile.build. The build needs to be cut in stages because make will not take into account one include when making another include. Parallélisation --------------- Le découpage en étapes veut dire que le makefile est un petit peu moins parallélisable que strictement possible en théorie: par exemple, certaines choses faites en stage2 pourraient être faites en parallèle avec des choses de stage1. Nous essayons de minimiser cet effet, mais nous ne l'avons pas complètement éliminé parce que cela mènerait à un makefile très complexe. La minimisation est principalement que si on demande un objet spécifique (par exemple "make parsing/g_constr.cmx"), il est fait dans l'étape la plus basse possible (simplement), mais si un objet est fait comme dépendance de la cible demandée (par exemple dans un "make world"), il est fait le plus tard possible (par exemple, tout code OCaml non nécessaire pour coqdep ni grammar.cma ni q_constr.cmo est compilé en stage3 lors d'un "make world"; cela permet le parallélisme de compilation de code OCaml et de fichiers Coq (.v)). Le "(simplement)" ci-dessus veut dire que savoir si un fichier non nécessaire pour grammar.cma/q_constr.cmo peut en fait être fait en stage1 est compliqué avec make, alors nous retombons en général sur le stage2. La séparation entre le stage2 et stage3 est plus facile, donc l'optimisation ci-dessus s'y applique pleinement. En d'autres mots, nous avons au niveau conceptuel deux assignations d'étape pour chaque fichier: - l'étape la plus petite où nous savons qu'il peut être fait. - l'étape la plus grande où il peut être fait. Mais seule la première est gérée explicitement, la seconde est implicite. FIND_VCS_CLAUSE --------------- The recommended style of using FIND_VCS_CLAUSE is for example find . $(FIND_VCS_CLAUSE) '(' -name '*.example' ')' -print find . $(FIND_VCS_CLAUSE) '(' -name '*.example' -or -name '*.foo' ')' -print 1) The parentheses even in the one-criteria case is so that if one adds other conditions, e.g. change the first example to the second find . $(FIND_VCS_CLAUSE) '(' -name '*.example' -and -not -name '*.bak.example' ')' -print one is not tempted to write find . $(FIND_VCS_CLAUSE) -name '*.example' -and -not -name '*.bak.example' -print because this will not necessarily work as expected; $(FIND_VCS_CLAUSE) ends with an -or, and how it combines with what comes later depends on operator precedence and all that. Much safer to override it with parentheses. In short, it protects against the -or one doesn't see. 2) As to the -print at the end, yes it is necessary. Here's why. You are used to write: find . -name '*.example' and it works fine. But the following will not: find . $(FIND_VCS_CLAUSE) -name '*.example' it will also list things directly matched by FIND_VCS_CLAUSE (directories we want to prune, in which we don't want to find anything). C'est subtil... Il y a effectivement un -print implicite à la fin, qui fait que la commande habituelle sans print fonctionne bien, mais dès que l'on introduit d'autres commandes dans le lot (le -prune de FIND_VCS_CLAUSE), ça se corse à cause d'histoires de parenthèses du -print implicite par rapport au parenthésage dans la forme recommandée d'utilisation: Si on explicite le -print et les parenthèses implicites, cela devient: find . '(' '(' '(' -name .git -or -name debian ')' -prune ')' -or \ '(' -name '*.example' ')' ')' -print Le print agit TOUT ce qui précède, soit sur ce qui matche "'(' -name .git -or -name debian ')'" ET sur ce qui matche "'(' -name '*.example' ')'". alors qu'ajouter le print explicite change cela en find . '(' '(' -name .git -or -name debian ')' -prune ')' -or \ '(' '(' -name '*.example' ')' -print ')' Le print n'agit plus que sur ce qui matche "'(' -name '*.example' ')'" coq-8.4pl2/dev/doc/perf-analysis0000640000175000001440000001312311403664065015657 0ustar notinusersPerformance analysis (trunk repository) --------------------------------------- Jun 7, 2010: delayed re-typing of Ltac instances in matching (-1% on HighSchoolGeometry, -2% on JordanCurveTheorem) Jun 4, 2010: improvement in eauto and type classes inference by removing systematic preparation of debugging pretty-printing streams (std_ppcmds) (-7% in ATBR, visible only on V8.3 logs since ATBR is broken in trunk; -6% in HighSchoolGeometry) Apr 19, 2010: small improvement obtained by reducing evar instantiation from O(n^3) to O(n^2) in the size of the instance (-2% in Compcert, -2% AreaMethod, -15% in Ssreflect) Apr 17, 2010: small improvement obtained by not repeating unification twice in auto (-2% in Compcert, -2% in Algebra) Feb 15, 2010: Global decrease due to unicode inefficiency repaired Jan 8, 2010: Global increase due to an inefficiency in unicode treatment Dec 1, 2009 - Dec 19, 2009: Temporary addition of [forall x, P x] hints to exact (generally not significative but, e.g., +25% on Subst, +8% on ZFC, +5% on AreaMethod) Oct 19, 2009: Change in modules (CoLoR +35%) Aug 9, 2009: new files added in AreaMethod May 21, 2008: New version of CoRN (needs +84% more time to compile) Apr 25-29, 2008: Temporary attempt with delta in eauto (Matthieu) (+28% CoRN) Apr 17, 2008: improvement probably due to commit 10807 or 10813 (bug fixes, control of zeta in rewrite, auto (??)) (-18% Buchberger, -40% PAutomata, -28% IntMap, -43% CoRN, -13% LinAlg, but CatsInZFC -0.5% only, PiCalc stable, PersistentUnionFind -1%) Mar 11, 2008: (+19% PersistentUnionFind wrt Mar 3, +21% Angles, +270% Continuations between 7/3 and 18/4) Mar 7, 2008: (-10% PersistentUnionFind wrt Mar 3) Feb 20, 2008: temporary 1-day slow down (+64% LinAlg) Feb 14, 2008: (-10% PersistentUnionFind, -19% Groups) Feb 7, 8, 2008: temporary 2-days long slow down (+20 LinAlg, +50% BDDs) Feb 2, 2008: many updates of the module system (-13% LinAlg, -50% AMM11262, -5% Goedel, -1% PersistentUnionFind, -42% ExactRealArithmetic, -41% Icharate, -42% Kildall, -74% SquareMatrices) Jan 1, 2008: merge of TypeClasses branch (+8% PersistentUnionFind, +36% LinAlg, +76% Goedel) Nov 16, 17, 2007: (+18% Cantor, +4% LinAlg, +27% IEEE1394 on 2 days) Nov 8, 2007: (+18% Cantor, +16% LinAlg, +55% Continuations, +200% IEEE1394, +170% CTLTCTL, +220% SquareMatrices) Oct 29, V8.1 (+ 3% geometry but CoRN, Godel, Kildall, Stalmark stables) Between Oct 12 and Oct 27, 2007: inefficiency temporarily introduced in the tactic interpreter (from revision 10222 to 10267) (+22% CoRN, +10% geometry, ...) Sep 16, 2007: (+16% PersistentUnionFind on 3 days, LinAlg stable, Sep 4, 2007: (+26% PersistentUnionFind, LinAlg stable, Jun 6, 2007: optimization of the need for type unification in with-bindings (-3.5% Stalmark, -6% Kildall) May 20, 21, 22, 2007: improved inference of with-bindings (including activation of unification on types) (+4% PICALC, +5% Stalmark, +7% Kildall) May 11, 2007: added primitive integers (+6% CoLoR, +7% CoRN, +5% FSets, ...) Between Feb 22 and March 16, 2007: bench temporarily moved on JMN's computer (-25% CoRN, -25% Fairisle, ...) Oct 29 and Oct 30, 2006: abandoned attempt to add polymorphism on definitions (+4% in general during these two days) Oct 17, 2006: improvement in new field [r9248] (QArith -3%, geometry: -2%) Oct 5, 2006: fixing wrong unification of Meta below binders (e.g. CatsInZFC: +10%, CoRN: -2.5%, Godel: +4%, LinAlg: +7%, DISTRIBUTED_REFERENCE_COUNTING: +10%, CoLoR: +1%) Sep 26, 2006: new field [r9178-9181] (QArith: -16%, geometry: -5%, Float: +6%, BDDS:+5% but no ring in it) Sep 12, 2006: Rocq/AREA_METHOD extended (~ 530s) Aug 12, 2006: Rocq/AREA_METHOD added (~ 480s) May 30, 2006: Nancy/CoLoR added (~ 319s) May 23, 2006: new, lighter version of polymorphic inductive types (CoRN: -27%, back to Mar-24 time) May 17, 2006: changes in List.v (DISTRIBUTED_REFERENCE_COUNTING: -) May 5, 2006: improvement in closure (array instead of lists) (e.g. CatsInZFC: -10%, CoRN: -3%, May 23, 2006: polymorphic inductive types (precise, heavy algorithm) (CoRN: +37%) Dec 29, 2005: new test and use of -vm in Stalmarck Dec 27, 2005: contrib Karatsuba added (~ 30s) Dec 28, 2005: size decrease mainly due to Defined moved to Qed in FSets (reduction from 95M to 7Mo) Dec 1-14, 2005: benchmarking server down between the two dates: Godel: -10%, CoRN: -10% probably due to changes around vm (new informative Cast, change of equality in named_context_val) Oct 6, 2005: contribs IPC and Tait added (~ 22s and ~ 25s) Aug 19, 2005: time decrease after application of "Array.length x=0" Xavier's suggestions for optimisation (e.g. Nijmegen/QArith: -3%, Nijmegen/CoRN: -7%, Godel: -3%) Aug 1, 2005: contrib Kildall added (~ 65s) Jul 26-Aug 2, 2005: bench down Jul 14-15, 2005: 4 contribs failed including CoRN Jul 14, 2005: time increase after activation of "closure optimisation" (e.g. Nijmegen/QArith: +8%, Nijmegen/CoRN: +3%, Godel: +13%) Jul 7, 2005: adding contrib Fermat4 Jun 17, 2005: contrib Goodstein extended and moved to CantorOrdinals (~ 30s) May 19, 2005: contrib Goodstein and prfx (~ 9s) added Apr 21, 2005: strange time decrease (could it be due to the change of Back and Reset mechanism) (e.g. Nijmegen/CoRN: -2%, Nijmegen/QARITH: -4%, Godel: -11%) Mar 20, 2005: fixed Logic.with_check bug global time decrease (e.g. Nijmegen/CoRN: -3%, Nijmegen/QARITH: -1.5%) Jan 31-Feb 8, 2005: small instability (e.g. CoRN: ~2015s -> ~1999s -> ~2032s, Godel: ~340s -> ~370s) Jan 13, 2005: contrib SumOfTwoSquare added (~ 38s) coq-8.4pl2/dev/doc/extensions.txt0000640000175000001440000000131310434703304016106 0ustar notinusersComment ajouter une nouvelle entre primitive pour les TACTIC EXTEND ? ====================================================================== Exemple de l'ajout de l'entre "clause": - ajouter un type ClauseArgType dans interp/genarg.ml{,i}, avec les wit_, rawwit_, et globwit_ correspondants - ajouter partout o Genarg.argument_type est filtr le cas traitant de ce nouveau ClauseArgType - utiliser le rawwit_clause pour dfinir une entre clause du bon type et du bon nom dans le module Tactic de pcoq.ml4 - il faut aussi exporter la rgle hors de g_tactic.ml4. Pour cela, il faut rejouter clause dans le GLOBAL du GEXTEND - seulement aprs, le nom clause sera accessible dans les TACTIC EXTEND ! coq-8.4pl2/dev/doc/style.txt0000640000175000001440000000312711045643050015054 0ustar notinusers << L'uniformit du style est plus importante que le style lui-mme. >> (Kernigan & Pike, The Practice of Programming) Mode Emacs ========== Tuareg, que l'on trouve ici : http://www.prism.uvsq.fr/~acohen/tuareg/ avec le rglage suivant : (setq tuareg-in-indent 2) Types rcursifs et filtrages ============================ Une barre de sparation y compris sur le premier constructeur type t = | A | B of machin match expr with | A -> ... | B x -> ... Remarque : partir de la 8.2 environ, la tendance est utiliser le format suivant qui permet de limiter l'escalade d'indentation tout en produisant un aspect visuel intressant de bloc : type t = | A | B of machin match expr with | A -> ... | B x -> ... let f expr = match expr with | A -> ... | B x -> ... let f expr = function | A -> ... | B x -> ... Le deuxime cas est obtenu sous tuareg avec les rglages (setq tuareg-with-indent 0) (setq tuareg-function-indent 0) (setq tuareg-let-always-indent nil) /// notons que cette dernire est bien /// pour les let mais pas pour les let-in Conditionnelles =============== if condition then premier-cas else deuxieme-cas Si effets de bord dans les branches, utilisez begin ... end et non des parenthses i.e. if condition then begin instr1; instr2 end else begin instr3; instr4 end Si la premire branche lve une exception, vitez le else i.e. if condition then if condition then error "machin"; error "machin" -----> suite else suite coq-8.4pl2/dev/TODO0000640000175000001440000000140007043703536013077 0ustar notinusers o options de la ligne de commande - reporter les options de l'ancien script coqtop sur le nouveau coqtop.ml o arguments implicites - les calculer une fois pour toutes la dclaration (dans Declare) et stocker cette information dans le in_variable, in_constant, etc. o Environnements compils (type Environ.compiled_env) - pas de timestamp mais plutt un checksum avec Digest (mais comment ?) o Efficacit - utiliser DOPL plutt que DOPN (sauf pour Case) - batch mode => pas de undo, ni de reset - conversion : dplier la constante la plus rcente - un cache pour type_of_const, type_of_inductive, type_of_constructor, lookup_mind_specif o Toplevel - parsing de la ligne de commande : utiliser Arg ??? coq-8.4pl2/dev/Makefile.oug0000640000175000001440000000500211447122040014626 0ustar notinusers####################################################################### # v # The Coq Proof Assistant / The Coq Development Team # # " --useless-elements $@ core_intf.oug: $(OUG) --dump-data $@ -rectypes $(MLINCLUDES) $(COREML) $(COREMLI) core_intf.useless: core_intf.oug $(OUG) --load-data $< --no-reduce --print-loc --roots "" --useless-elements $@ # Analysis of coqchk, considering only files in the checker/ subdir CHECKERML:=$(call local_ml_of_cma,checker/check.cma) CHECKERMLI:=$(call mli_of_ml,$(CHECKERML)) ## BUG: in oug, include dirs have reversed priority compared with ocaml, cannot use CHKLIBS MYCHKINCL:=$(MLINCLUDES) -I checker checker.oug: $(OUG) --dump-data $@ -rectypes $(MYCHKINCL) $(CHECKERML) #$(CHECKERMLI) checker.useless: checker.oug $(OUG) --load-data $< --no-reduce --print-loc --roots "" --useless-elements $@ # Analysis of extraction EXTRACTIONML:=$(call local_ml_of_cma,$(EXTRACTIONCMA)) EXTRACTIONMLI:=$(call mli_of_ml,$(EXTRACTIONMLI)) extraction.oug: $(OUG) --dump-data $@ -rectypes $(MLINCLUDES) $(EXTRACTIONML) #$(EXTRACTIONMLI) extraction.useless: extraction.oug $(OUG) --load-data $< --no-reduce --print-loc --useless-elements $@ # More to come ...coq-8.4pl2/dev/set_raw_db0000640000175000001440000000005010555151327014440 0ustar notinusersinstall_printer Top_printers.ppconstrdb coq-8.4pl2/dev/ocamlopt_shared_os5fix.sh0000750000175000001440000000213611414353610017404 0ustar notinusers#/bin/sh ### Temporary fix for production of .cmxs on MacOS 10.5 OCAMLOPT=$1 CMXS=$2 DIR=`dirname $CMXS` BASE=`basename $CMXS .cmxs` CMXA=$DIR/$BASE.cmxa ARC=$DIR/$BASE.a # we assume that all object files are at the same place than the rest OBJS=`ar t $ARC | sed -e "s|^|$DIR/|" | grep -v SYMDEF` $OCAMLOPT -dstartup -linkall -shared -o $CMXS $CMXA # Fix1: add a dummy instruction before the caml generic functions # Fix2: make all caml generic functions private rm -f $CMXS $CMXS.startup.fixed.s cat $CMXS.startup.s | sed \ -e "s/_caml_shared_startup__code_begin:/_caml_shared_startup__code_begin: ret/" \ -e "s/.globl _caml_curry/.private_extern _caml_curry/" \ -e "s/.globl _caml_apply/.private_extern _caml_apply/" \ -e "s/.globl _caml_tuplify/.private_extern _caml_tuplify/" \ > $CMXS.startup.fixed.s # Recompile fixed startup code as -o $CMXS.startup.o $CMXS.startup.fixed.s # Build fixed .cmxs (assume all object files are at the same place) ld -bundle -flat_namespace -undefined warning -read_only_relocs suppress -o $CMXS $OBJS $CMXS.startup.o rm $CMXS.startup.o $CMXS.startup.s $CMXS.startup.fixed.scoq-8.4pl2/dev/macosify_accel.sh0000750000175000001440000000016412010750376015706 0ustar notinusers#!/usr/bin/sed -f s/^;\{0,1\} *\(.*\)\(.*\)$/\1\2/ s/^;\{0,1\} *\(.*\)\(.*\)$/\1\2/ coq-8.4pl2/dev/base_include0000640000175000001440000001076712122674544014767 0ustar notinusers (* File to include to get some Coq facilities under the ocaml toplevel. This file is loaded by include *) #cd".";; #directory "parsing";; #directory "interp";; #directory "toplevel";; #directory "library";; #directory "kernel";; #directory "pretyping";; #directory "lib";; #directory "proofs";; #directory "tactics";; #directory "translate";; #directory "+camlp4";; (* lazy solution: add both of camlp4/5 so that *) #directory "+camlp5";; (* Gramext is found in top_printers.ml *) #use "top_printers.ml";; #use "vm_printers.ml";; #install_printer (* identifier *) ppid;; #install_printer (* identifier *) ppidset;; #install_printer (* Intset.t *) ppintset;; #install_printer (* label *) pplab;; #install_printer (* mod_bound_id *) ppmbid;; #install_printer (* dir_path *) ppdir;; #install_printer (* module_path *) ppmp;; #install_printer (* section_path *) ppsp;; #install_printer (* qualid *) ppqualid;; #install_printer (* kernel_name *) ppkn;; #install_printer (* constant *) ppcon;; #install_printer (* cl_index *) ppclindex;; #install_printer (* constr *) print_pure_constr;; #install_printer (* patch *) ppripos;; #install_printer (* values *) ppvalues;; #install_printer (* Idpred.t *) pp_idpred;; #install_printer (* Cpred.t *) pp_cpred;; #install_printer ppzipper;; #install_printer ppstack;; #install_printer ppatom;; #install_printer ppwhd;; #install_printer ppvblock;; #install_printer (* bigint *) ppbigint;; #install_printer (* loc *) pploc;; #install_printer (* substitution *) prsubst;; (* Open main files *) open Names open Term open Typeops open Term_typing open Univ open Inductive open Indtypes open Cooking open Closure open Reduction open Safe_typing open Declare open Declaremods open Impargs open Libnames open Nametab open Library open Cases open Pattern open Cbv open Classops open Pretyping open Pretyping.Default open Pretyping.Default.Cases open Cbv open Classops open Clenv open Clenvtac open Glob_term open Coercion open Coercion.Default open Recordops open Detyping open Reductionops open Evarconv open Retyping open Evarutil open Tacred open Evd open Termops open Namegen open Indrec open Typing open Inductiveops open Unification open Matching open Constrextern open Constrintern open Coqlib open Genarg open Modintern open Notation open Ppextend open Reserve open Syntax_def open Topconstr open Prettyp open Search open Evar_refiner open Logic open Pfedit open Proof_type open Redexpr open Refiner open Tacmach open Decl_proof_instr open Tactic_debug open Decl_mode open Auto open Autorewrite open Contradiction open Eauto open Elim open Equality open Evar_tactics open Extraargs open Extratactics open Hiddentac open Hipattern open Inv open Leminv open Refine open Tacinterp open Tacticals open Tactics open Eqschemes open Cerrors open Class open Command open Indschemes open Ind_tables open Auto_ind_decl open Lemmas open Coqinit open Coqtop open Discharge open Himsg open Metasyntax open Mltop open Record open Toplevel open Vernacentries open Vernacinterp open Vernac (* Various utilities *) let qid = Libnames.qualid_of_string;; (* parsing of terms *) let parse_constr = Pcoq.parse_string Pcoq.Constr.constr;; let parse_tac = Pcoq.parse_string Pcoq.Tactic.tactic;; let parse_vernac = Pcoq.parse_string Pcoq.Vernac_.vernac;; (* build a term of type glob_constr without type-checking or resolution of implicit syntax *) let e s = Constrintern.intern_constr Evd.empty (Global.env()) (parse_constr s);; (* build a term of type constr with type-checking and resolution of implicit syntax *) let constr_of_string s = Constrintern.interp_constr Evd.empty (Global.env()) (parse_constr s);; (* get the body of a constant *) open Declarations;; let constbody_of_string s = let b = Global.lookup_constant (Nametab.locate_constant (qualid_of_string s)) in Option.get (body_of_constant b);; (* Get the current goal *) (* let getgoal x = top_goal_of_pftreestate (Pfedit.get_pftreestate x);; let get_nth_goal n = nth_goal_of_pftreestate n (Pfedit.get_pftreestate ());; let current_goal () = get_nth_goal 1;; *) let pf_e gl s = Constrintern.interp_constr (project gl) (pf_env gl) (parse_constr s);; (* Set usual printing since the global env is available from the tracer *) let _ = Constrextern.in_debugger := false let _ = Constrextern.set_extern_reference (fun loc _ r -> Libnames.Qualid (loc,Nametab.shortest_qualid_of_global Idset.empty r));; open Toplevel let go = loop let _ = print_string ("\n\tOcaml toplevel with Coq printers and utilities (use go();; to exit)\n\n"); flush_all() coq-8.4pl2/coq-win32.itarget0000640000175000001440000000004711341277101014730 0ustar notinusersbinariesopt plugins/pluginsdyn.otarget coq-8.4pl2/coq.itarget0000640000175000001440000000033711401761315013774 0ustar notinusers# NB: for the moment we start with bytecode compilation # for early error detection in .ml binariesbyte plugins/pluginsbyte.otarget binariesopt plugins/pluginsopt.otarget theories/theories.otarget plugins/pluginsvo.otarget coq-8.4pl2/pretyping/0000750000175000001440000000000012127276536013662 5ustar notinuserscoq-8.4pl2/pretyping/pretyping.mllib0000640000175000001440000000042011662502172016710 0ustar notinusersTermops Evd Reductionops Vnorm Namegen Inductiveops Retyping Cbv Pretype_errors Evarutil Term_dnet Recordops Evarconv Arguments_renaming Typing Glob_term Pattern Matching Tacred Typeclasses_errors Typeclasses Classops Coercion Unification Detyping Indrec Cases Pretyping coq-8.4pl2/pretyping/recordops.mli0000640000175000001440000000602612010532755016357 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit (** [lookup_structure isp] returns the struc_typ associated to the inductive path [isp] if it corresponds to a structure, otherwise it fails with [Not_found] *) val lookup_structure : inductive -> struc_typ (** [lookup_projections isp] returns the projections associated to the inductive path [isp] if it corresponds to a structure, otherwise it fails with [Not_found] *) val lookup_projections : inductive -> constant option list (** raise [Not_found] if not a projection *) val find_projection_nparams : global_reference -> int (** raise [Not_found] if not a projection *) val find_projection : global_reference -> struc_typ (** we keep an index (dnet) of record's arguments + fields (=methods). Here is how to declare them: *) val declare_method : global_reference -> Evd.evar -> Evd.evar_map -> unit (** and here is how to search for methods matched by a given term: *) val methods_matching : constr -> ((global_reference*Evd.evar*Evd.evar_map) * (constr*existential_key)*Termops.subst) list (** {6 Canonical structures } *) (** A canonical structure declares "canonical" conversion hints between the effective components of a structure and the projections of the structure *) type cs_pattern = Const_cs of global_reference | Prod_cs | Sort_cs of sorts_family | Default_cs type obj_typ = { o_DEF : constr; o_INJ : int; (** position of trivial argument *) o_TABS : constr list; (** ordered *) o_TPARAMS : constr list; (** ordered *) o_NPARAMS : int; o_TCOMPS : constr list } (** ordered *) val cs_pattern_of_constr : constr -> cs_pattern * int * constr list val pr_cs_pattern : cs_pattern -> Pp.std_ppcmds val lookup_canonical_conversion : (global_reference * cs_pattern) -> obj_typ val declare_canonical_structure : global_reference -> unit val is_open_canonical_projection : Environ.env -> Evd.evar_map -> (constr * constr list) -> bool val canonical_projections : unit -> ((global_reference * cs_pattern) * obj_typ) list coq-8.4pl2/pretyping/namegen.ml0000640000175000001440000002660412010532755015624 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let rec find_prefix = function |MPfile dp1 -> not (dp1=dp) |MPdot(mp,_) -> find_prefix mp |MPbound(_) -> false in find_prefix current_mp | p -> false let is_imported_ref = function | VarRef _ -> false | IndRef (kn,_) | ConstructRef ((kn,_),_) -> let (mp,_,_) = repr_mind kn in is_imported_modpath mp | ConstRef kn -> let (mp,_,_) = repr_con kn in is_imported_modpath mp let is_global id = try let ref = locate (qualid_of_ident id) in not (is_imported_ref ref) with Not_found -> false let is_constructor id = try match locate (qualid_of_ident id) with | ConstructRef _ -> true | _ -> false with Not_found -> false (**********************************************************************) (* Generating "intuitive" names from its type *) let lowercase_first_char id = (* First character of a constr *) lowercase_first_char_utf8 (string_of_id id) let sort_hdchar = function | Prop(_) -> "P" | Type(_) -> "T" let hdchar env c = let rec hdrec k c = match kind_of_term c with | Prod (_,_,c) -> hdrec (k+1) c | Lambda (_,_,c) -> hdrec (k+1) c | LetIn (_,_,_,c) -> hdrec (k+1) c | Cast (c,_,_) -> hdrec k c | App (f,l) -> hdrec k f | Const kn -> lowercase_first_char (id_of_label (con_label kn)) | Ind x -> lowercase_first_char (basename_of_global (IndRef x)) | Construct x -> lowercase_first_char (basename_of_global (ConstructRef x)) | Var id -> lowercase_first_char id | Sort s -> sort_hdchar s | Rel n -> (if n<=k then "p" (* the initial term is flexible product/function *) else try match Environ.lookup_rel (n-k) env with | (Name id,_,_) -> lowercase_first_char id | (Anonymous,_,t) -> hdrec 0 (lift (n-k) t) with Not_found -> "y") | Fix ((_,i),(lna,_,_)) -> let id = match lna.(i) with Name id -> id | _ -> assert false in lowercase_first_char id | CoFix (i,(lna,_,_)) -> let id = match lna.(i) with Name id -> id | _ -> assert false in lowercase_first_char id | Meta _|Evar _|Case (_, _, _, _) -> "y" in hdrec 0 c let id_of_name_using_hdchar env a = function | Anonymous -> id_of_string (hdchar env a) | Name id -> id let named_hd env a = function | Anonymous -> Name (id_of_string (hdchar env a)) | x -> x let mkProd_name env (n,a,b) = mkProd (named_hd env a n, a, b) let mkLambda_name env (n,a,b) = mkLambda (named_hd env a n, a, b) let lambda_name = mkLambda_name let prod_name = mkProd_name let prod_create env (a,b) = mkProd (named_hd env a Anonymous, a, b) let lambda_create env (a,b) = mkLambda (named_hd env a Anonymous, a, b) let name_assumption env (na,c,t) = match c with | None -> (named_hd env t na, None, t) | Some body -> (named_hd env body na, c, t) let name_context env hyps = snd (List.fold_left (fun (env,hyps) d -> let d' = name_assumption env d in (push_rel d' env, d' :: hyps)) (env,[]) (List.rev hyps)) let mkProd_or_LetIn_name env b d = mkProd_or_LetIn (name_assumption env d) b let mkLambda_or_LetIn_name env b d = mkLambda_or_LetIn (name_assumption env d)b let it_mkProd_or_LetIn_name env b hyps = it_mkProd_or_LetIn b (name_context env hyps) let it_mkLambda_or_LetIn_name env b hyps = it_mkLambda_or_LetIn b (name_context env hyps) (**********************************************************************) (* Fresh names *) let default_x = id_of_string "x" (* Looks for next "good" name by lifting subscript *) let next_ident_away_from id bad = let rec name_rec id = if bad id then name_rec (lift_subscript id) else id in name_rec id (* Restart subscript from x0 if name starts with xN, or x00 if name starts with x0N, etc *) let restart_subscript id = if not (has_subscript id) then id else (* Ce serait sans doute mieux avec quelque chose inspir de *** make_ident id (Some 0) *** mais a brise la compatibilit... *) forget_subscript id (* Now, there are different renaming strategies... *) (* 1- Looks for a fresh name for printing in cases pattern *) let next_name_away_in_cases_pattern na avoid = let id = match na with Name id -> id | Anonymous -> default_x in next_ident_away_from id (fun id -> List.mem id avoid or is_constructor id) (* 2- Looks for a fresh name for introduction in goal *) (* The legacy strategy for renaming introduction variables is not very uniform: - if the name to use is fresh in the context but used as a global name, then a fresh name is taken by finding a free subscript starting from the current subscript; - but if the name to use is not fresh in the current context, the fresh name is taken by finding a free subscript starting from 0 *) let next_ident_away_in_goal id avoid = let id = if List.mem id avoid then restart_subscript id else id in let bad id = List.mem id avoid || (is_global id & not (is_section_variable id)) in next_ident_away_from id bad let next_name_away_in_goal na avoid = let id = match na with Name id -> id | Anonymous -> id_of_string "H" in next_ident_away_in_goal id avoid (* 3- Looks for next fresh name outside a list that is moreover valid as a global identifier; the legacy algorithm is that if the name is already used in the list, one looks for a name of same base with lower available subscript; if the name is not in the list but is used globally, one looks for a name of same base with lower subscript beyond the current subscript *) let next_global_ident_away id avoid = let id = if List.mem id avoid then restart_subscript id else id in let bad id = List.mem id avoid || is_global id in next_ident_away_from id bad (* 4- Looks for next fresh name outside a list; if name already used, looks for same name with lower available subscript *) let next_ident_away id avoid = if List.mem id avoid then next_ident_away_from (restart_subscript id) (fun id -> List.mem id avoid) else id let next_name_away_with_default default na avoid = let id = match na with Name id -> id | Anonymous -> id_of_string default in next_ident_away id avoid let reserved_type_name = ref (fun t -> Anonymous) let set_reserved_typed_name f = reserved_type_name := f let next_name_away_with_default_using_types default na avoid t = let id = match na with | Name id -> id | Anonymous -> match !reserved_type_name t with | Name id -> id | Anonymous -> id_of_string default in next_ident_away id avoid let next_name_away = next_name_away_with_default "H" let make_all_name_different env = let avoid = ref (ids_of_named_context (named_context env)) in process_rel_context (fun (na,c,t) newenv -> let id = next_name_away na !avoid in avoid := id::!avoid; push_rel (Name id,c,t) newenv) env (* 5- Looks for next fresh name outside a list; avoids also to use names that would clash with short name of global references; if name is already used, looks for name of same base with lower available subscript beyond current subscript *) let occur_rel p env id = try lookup_name_of_rel p env = Name id with Not_found -> false (* Unbound indice : may happen in debug *) let visibly_occur_id id (nenv,c) = let rec occur n c = match kind_of_term c with | Const _ | Ind _ | Construct _ | Var _ when shortest_qualid_of_global Idset.empty (global_of_constr c) = qualid_of_ident id -> raise Occur | Rel p when p>n & occur_rel (p-n) nenv id -> raise Occur | _ -> iter_constr_with_binders succ occur n c in try occur 1 c; false with Occur -> true | Not_found -> false (* Happens when a global is not in the env *) let next_ident_away_for_default_printing env_t id avoid = let bad id = List.mem id avoid or visibly_occur_id id env_t in next_ident_away_from id bad let next_name_away_for_default_printing env_t na avoid = let id = match na with | Name id -> id | Anonymous -> (* In principle, an anonymous name is not dependent and will not be *) (* taken into account by the function compute_displayed_name_in; *) (* just in case, invent a valid name *) id_of_string "H" in next_ident_away_for_default_printing env_t id avoid (**********************************************************************) (* Displaying terms avoiding bound variables clashes *) (* Renaming strategy introduced in December 1998: - Rule number 1: all names, even if unbound and not displayed, contribute to the list of names to avoid - Rule number 2: only the dependency status is used for deciding if a name is displayed or not Example: bool_ind: "forall (P:bool->Prop)(f:(P true))(f:(P false))(b:bool), P b" is displayed "forall P:bool->Prop, P true -> P false -> forall b:bool, P b" but f and f0 contribute to the list of variables to avoid (knowing that f and f0 are how the f's would be named if introduced, assuming no other f and f0 are already used). *) type renaming_flags = | RenamingForCasesPattern | RenamingForGoal | RenamingElsewhereFor of (name list * constr) let next_name_for_display flags = match flags with | RenamingForCasesPattern -> next_name_away_in_cases_pattern | RenamingForGoal -> next_name_away_in_goal | RenamingElsewhereFor env_t -> next_name_away_for_default_printing env_t (* Remark: Anonymous var may be dependent in Evar's contexts *) let compute_displayed_name_in flags avoid na c = if na = Anonymous & noccurn 1 c then (Anonymous,avoid) else let fresh_id = next_name_for_display flags na avoid in let idopt = if noccurn 1 c then Anonymous else Name fresh_id in (idopt, fresh_id::avoid) let compute_and_force_displayed_name_in flags avoid na c = if na = Anonymous & noccurn 1 c then (Anonymous,avoid) else let fresh_id = next_name_for_display flags na avoid in (Name fresh_id, fresh_id::avoid) let compute_displayed_let_name_in flags avoid na c = let fresh_id = next_name_for_display flags na avoid in (Name fresh_id, fresh_id::avoid) let rec rename_bound_vars_as_displayed avoid env c = let rec rename avoid env c = match kind_of_term c with | Prod (na,c1,c2) -> let na',avoid' = compute_displayed_name_in (RenamingElsewhereFor (env,c2)) avoid na c2 in mkProd (na', c1, rename avoid' (add_name na' env) c2) | LetIn (na,c1,t,c2) -> let na',avoid' = compute_displayed_let_name_in (RenamingElsewhereFor (env,c2)) avoid na c2 in mkLetIn (na',c1,t, rename avoid' (add_name na' env) c2) | Cast (c,k,t) -> mkCast (rename avoid env c, k,t) | _ -> c in rename avoid env c coq-8.4pl2/pretyping/glob_term.ml0000640000175000001440000003653012064012270016155 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* loc | PatCstr(loc,_,_,_) -> loc type patvar = identifier type glob_sort = GProp of Term.contents | GType of Univ.universe option type binding_kind = Lib.binding_kind = Explicit | Implicit type quantified_hypothesis = AnonHyp of int | NamedHyp of identifier type 'a explicit_bindings = (loc * quantified_hypothesis * 'a) list type 'a bindings = | ImplicitBindings of 'a list | ExplicitBindings of 'a explicit_bindings | NoBindings type 'a with_bindings = 'a * 'a bindings type 'a cast_type = | CastConv of cast_kind * 'a | CastCoerce (* Cast to a base type (eg, an underlying inductive type) *) type glob_constr = | GRef of (loc * global_reference) | GVar of (loc * identifier) | GEvar of loc * existential_key * glob_constr list option | GPatVar of loc * (bool * patvar) (* Used for patterns only *) | GApp of loc * glob_constr * glob_constr list | GLambda of loc * name * binding_kind * glob_constr * glob_constr | GProd of loc * name * binding_kind * glob_constr * glob_constr | GLetIn of loc * name * glob_constr * glob_constr | GCases of loc * case_style * glob_constr option * tomatch_tuples * cases_clauses | GLetTuple of loc * name list * (name * glob_constr option) * glob_constr * glob_constr | GIf of loc * glob_constr * (name * glob_constr option) * glob_constr * glob_constr | GRec of loc * fix_kind * identifier array * glob_decl list array * glob_constr array * glob_constr array | GSort of loc * glob_sort | GHole of (loc * hole_kind) | GCast of loc * glob_constr * glob_constr cast_type and glob_decl = name * binding_kind * glob_constr option * glob_constr and fix_recursion_order = GStructRec | GWfRec of glob_constr | GMeasureRec of glob_constr * glob_constr option and fix_kind = | GFix of ((int option * fix_recursion_order) array * int) | GCoFix of int and predicate_pattern = name * (loc * inductive * int * name list) option and tomatch_tuple = (glob_constr * predicate_pattern) and tomatch_tuples = tomatch_tuple list and cases_clause = (loc * identifier list * cases_pattern list * glob_constr) and cases_clauses = cases_clause list let cases_predicate_names tml = List.flatten (List.map (function | (tm,(na,None)) -> [na] | (tm,(na,Some (_,_,_,nal))) -> na::nal) tml) let mkGApp loc p t = match p with | GApp (loc,f,l) -> GApp (loc,f,l@[t]) | _ -> GApp (loc,p,[t]) let map_glob_decl_left_to_right f (na,k,obd,ty) = let comp1 = Option.map f obd in let comp2 = f ty in (na,k,comp1,comp2) let map_glob_constr_left_to_right f = function | GApp (loc,g,args) -> let comp1 = f g in let comp2 = Util.list_map_left f args in GApp (loc,comp1,comp2) | GLambda (loc,na,bk,ty,c) -> let comp1 = f ty in let comp2 = f c in GLambda (loc,na,bk,comp1,comp2) | GProd (loc,na,bk,ty,c) -> let comp1 = f ty in let comp2 = f c in GProd (loc,na,bk,comp1,comp2) | GLetIn (loc,na,b,c) -> let comp1 = f b in let comp2 = f c in GLetIn (loc,na,comp1,comp2) | GCases (loc,sty,rtntypopt,tml,pl) -> let comp1 = Option.map f rtntypopt in let comp2 = Util.list_map_left (fun (tm,x) -> (f tm,x)) tml in let comp3 = Util.list_map_left (fun (loc,idl,p,c) -> (loc,idl,p,f c)) pl in GCases (loc,sty,comp1,comp2,comp3) | GLetTuple (loc,nal,(na,po),b,c) -> let comp1 = Option.map f po in let comp2 = f b in let comp3 = f c in GLetTuple (loc,nal,(na,comp1),comp2,comp3) | GIf (loc,c,(na,po),b1,b2) -> let comp1 = Option.map f po in let comp2 = f b1 in let comp3 = f b2 in GIf (loc,f c,(na,comp1),comp2,comp3) | GRec (loc,fk,idl,bl,tyl,bv) -> let comp1 = Array.map (Util.list_map_left (map_glob_decl_left_to_right f)) bl in let comp2 = Array.map f tyl in let comp3 = Array.map f bv in GRec (loc,fk,idl,comp1,comp2,comp3) | GCast (loc,c,k) -> let comp1 = f c in let comp2 = match k with CastConv (k,t) -> CastConv (k, f t) | x -> x in GCast (loc,comp1,comp2) | (GVar _ | GSort _ | GHole _ | GRef _ | GEvar _ | GPatVar _) as x -> x let map_glob_constr = map_glob_constr_left_to_right (* let name_app f e = function | Name id -> let (id, e) = f id e in (Name id, e) | Anonymous -> Anonymous, e let fold_ident g idl e = let (idl,e) = Array.fold_right (fun id (idl,e) -> let id,e = g id e in (id::idl,e)) idl ([],e) in (Array.of_list idl,e) let map_glob_constr_with_binders_loc loc g f e = function | GVar (_,id) -> GVar (loc,id) | GApp (_,a,args) -> GApp (loc,f e a, List.map (f e) args) | GLambda (_,na,ty,c) -> let na,e = name_app g e na in GLambda (loc,na,f e ty,f e c) | GProd (_,na,ty,c) -> let na,e = name_app g e na in GProd (loc,na,f e ty,f e c) | GLetIn (_,na,b,c) -> let na,e = name_app g e na in GLetIn (loc,na,f e b,f e c) | GCases (_,tyopt,tml,pl) -> (* We don't modify pattern variable since we don't traverse patterns *) let g' id e = snd (g id e) in let h (_,idl,p,c) = (loc,idl,p,f (List.fold_right g' idl e) c) in GCases (loc,Option.map (f e) tyopt,List.map (f e) tml, List.map h pl) | GRec (_,fk,idl,tyl,bv) -> let idl',e' = fold_ident g idl e in GRec (loc,fk,idl',Array.map (f e) tyl,Array.map (f e') bv) | GCast (_,c,t) -> GCast (loc,f e c,f e t) | GSort (_,x) -> GSort (loc,x) | GHole (_,x) -> GHole (loc,x) | GRef (_,x) -> GRef (loc,x) | GEvar (_,x,l) -> GEvar (loc,x,l) | GPatVar (_,x) -> GPatVar (loc,x) *) let fold_glob_constr f acc = let rec fold acc = function | GVar _ -> acc | GApp (_,c,args) -> List.fold_left fold (fold acc c) args | GLambda (_,_,_,b,c) | GProd (_,_,_,b,c) | GLetIn (_,_,b,c) -> fold (fold acc b) c | GCases (_,_,rtntypopt,tml,pl) -> List.fold_left fold_pattern (List.fold_left fold (Option.fold_left fold acc rtntypopt) (List.map fst tml)) pl | GLetTuple (_,_,rtntyp,b,c) -> fold (fold (fold_return_type acc rtntyp) b) c | GIf (_,c,rtntyp,b1,b2) -> fold (fold (fold (fold_return_type acc rtntyp) c) b1) b2 | GRec (_,_,_,bl,tyl,bv) -> let acc = Array.fold_left (List.fold_left (fun acc (na,k,bbd,bty) -> fold (Option.fold_left fold acc bbd) bty)) acc bl in Array.fold_left fold (Array.fold_left fold acc tyl) bv | GCast (_,c,k) -> fold (match k with CastConv (_, t) -> fold acc t | CastCoerce -> acc) c | (GSort _ | GHole _ | GRef _ | GEvar _ | GPatVar _) -> acc and fold_pattern acc (_,idl,p,c) = fold acc c and fold_return_type acc (na,tyopt) = Option.fold_left fold acc tyopt in fold acc let iter_glob_constr f = fold_glob_constr (fun () -> f) () let occur_glob_constr id = let rec occur = function | GVar (loc,id') -> id = id' | GApp (loc,f,args) -> (occur f) or (List.exists occur args) | GLambda (loc,na,bk,ty,c) -> (occur ty) or ((na <> Name id) & (occur c)) | GProd (loc,na,bk,ty,c) -> (occur ty) or ((na <> Name id) & (occur c)) | GLetIn (loc,na,b,c) -> (occur b) or ((na <> Name id) & (occur c)) | GCases (loc,sty,rtntypopt,tml,pl) -> (occur_option rtntypopt) or (List.exists (fun (tm,_) -> occur tm) tml) or (List.exists occur_pattern pl) | GLetTuple (loc,nal,rtntyp,b,c) -> occur_return_type rtntyp id or (occur b) or (not (List.mem (Name id) nal) & (occur c)) | GIf (loc,c,rtntyp,b1,b2) -> occur_return_type rtntyp id or (occur c) or (occur b1) or (occur b2) | GRec (loc,fk,idl,bl,tyl,bv) -> not (array_for_all4 (fun fid bl ty bd -> let rec occur_fix = function [] -> not (occur ty) && (fid=id or not(occur bd)) | (na,k,bbd,bty)::bl -> not (occur bty) && (match bbd with Some bd -> not (occur bd) | _ -> true) && (na=Name id or not(occur_fix bl)) in occur_fix bl) idl bl tyl bv) | GCast (loc,c,k) -> (occur c) or (match k with CastConv (_, t) -> occur t | CastCoerce -> false) | (GSort _ | GHole _ | GRef _ | GEvar _ | GPatVar _) -> false and occur_pattern (loc,idl,p,c) = not (List.mem id idl) & (occur c) and occur_option = function None -> false | Some p -> occur p and occur_return_type (na,tyopt) id = na <> Name id & occur_option tyopt in occur let add_name_to_ids set na = match na with | Anonymous -> set | Name id -> Idset.add id set let free_glob_vars = let rec vars bounded vs = function | GVar (loc,id') -> if Idset.mem id' bounded then vs else Idset.add id' vs | GApp (loc,f,args) -> List.fold_left (vars bounded) vs (f::args) | GLambda (loc,na,_,ty,c) | GProd (loc,na,_,ty,c) | GLetIn (loc,na,ty,c) -> let vs' = vars bounded vs ty in let bounded' = add_name_to_ids bounded na in vars bounded' vs' c | GCases (loc,sty,rtntypopt,tml,pl) -> let vs1 = vars_option bounded vs rtntypopt in let vs2 = List.fold_left (fun vs (tm,_) -> vars bounded vs tm) vs1 tml in List.fold_left (vars_pattern bounded) vs2 pl | GLetTuple (loc,nal,rtntyp,b,c) -> let vs1 = vars_return_type bounded vs rtntyp in let vs2 = vars bounded vs1 b in let bounded' = List.fold_left add_name_to_ids bounded nal in vars bounded' vs2 c | GIf (loc,c,rtntyp,b1,b2) -> let vs1 = vars_return_type bounded vs rtntyp in let vs2 = vars bounded vs1 c in let vs3 = vars bounded vs2 b1 in vars bounded vs3 b2 | GRec (loc,fk,idl,bl,tyl,bv) -> let bounded' = Array.fold_right Idset.add idl bounded in let vars_fix i vs fid = let vs1,bounded1 = List.fold_left (fun (vs,bounded) (na,k,bbd,bty) -> let vs' = vars_option bounded vs bbd in let vs'' = vars bounded vs' bty in let bounded' = add_name_to_ids bounded na in (vs'',bounded') ) (vs,bounded') bl.(i) in let vs2 = vars bounded1 vs1 tyl.(i) in vars bounded1 vs2 bv.(i) in array_fold_left_i vars_fix vs idl | GCast (loc,c,k) -> let v = vars bounded vs c in (match k with CastConv (_,t) -> vars bounded v t | _ -> v) | (GSort _ | GHole _ | GRef _ | GEvar _ | GPatVar _) -> vs and vars_pattern bounded vs (loc,idl,p,c) = let bounded' = List.fold_right Idset.add idl bounded in vars bounded' vs c and vars_option bounded vs = function None -> vs | Some p -> vars bounded vs p and vars_return_type bounded vs (na,tyopt) = let bounded' = add_name_to_ids bounded na in vars_option bounded' vs tyopt in fun rt -> let vs = vars Idset.empty Idset.empty rt in Idset.elements vs let loc_of_glob_constr = function | GRef (loc,_) -> loc | GVar (loc,_) -> loc | GEvar (loc,_,_) -> loc | GPatVar (loc,_) -> loc | GApp (loc,_,_) -> loc | GLambda (loc,_,_,_,_) -> loc | GProd (loc,_,_,_,_) -> loc | GLetIn (loc,_,_,_) -> loc | GCases (loc,_,_,_,_) -> loc | GLetTuple (loc,_,_,_,_) -> loc | GIf (loc,_,_,_,_) -> loc | GRec (loc,_,_,_,_,_) -> loc | GSort (loc,_) -> loc | GHole (loc,_) -> loc | GCast (loc,_,_) -> loc (**********************************************************************) (* Conversion from glob_constr to cases pattern, if possible *) let rec cases_pattern_of_glob_constr na = function | GVar (loc,id) when na<>Anonymous -> (* Unable to manage the presence of both an alias and a variable *) raise Not_found | GVar (loc,id) -> PatVar (loc,Name id) | GHole (loc,_) -> PatVar (loc,na) | GRef (loc,ConstructRef cstr) -> PatCstr (loc,cstr,[],na) | GApp (loc,GRef (_,ConstructRef (ind,_ as cstr)),args) -> let mib,_ = Global.lookup_inductive ind in let nparams = mib.Declarations.mind_nparams in if nparams > List.length args then user_err_loc (loc,"",Pp.str "Invalid notation for pattern."); let params,args = list_chop nparams args in List.iter (function GHole _ -> () | _ -> user_err_loc (loc,"",Pp.str"Invalid notation for pattern.")) params; let args = List.map (cases_pattern_of_glob_constr Anonymous) args in PatCstr (loc,cstr,args,na) | _ -> raise Not_found let rec cases_pattern_of_glob_constr na = function | GVar (loc,id) when na<>Anonymous -> (* Unable to manage the presence of both an alias and a variable *) raise Not_found | GVar (loc,id) -> PatVar (loc,Name id) | GHole (loc,_) -> PatVar (loc,na) | GRef (loc,ConstructRef cstr) -> PatCstr (loc,cstr,[],na) | GApp (loc,GRef (_,ConstructRef (ind,_ as cstr)),args) -> let mib,_ = Global.lookup_inductive ind in let nparams = mib.Declarations.mind_nparams in if nparams > List.length args then user_err_loc (loc,"",Pp.str "Invalid notation for pattern."); let params,args = list_chop nparams args in List.iter (function GHole _ -> () | _ -> user_err_loc (loc,"",Pp.str"Invalid notation for pattern.")) params; let args = List.map (cases_pattern_of_glob_constr Anonymous) args in PatCstr (loc,cstr,args,na) | _ -> raise Not_found (* Turn a closed cases pattern into a glob_constr *) let rec glob_constr_of_closed_cases_pattern_aux = function | PatCstr (loc,cstr,[],Anonymous) -> GRef (loc,ConstructRef cstr) | PatCstr (loc,cstr,l,Anonymous) -> let ref = GRef (loc,ConstructRef cstr) in GApp (loc,ref, List.map glob_constr_of_closed_cases_pattern_aux l) | _ -> raise Not_found let glob_constr_of_closed_cases_pattern = function | PatCstr (loc,cstr,l,na) -> na,glob_constr_of_closed_cases_pattern_aux (PatCstr (loc,cstr,l,Anonymous)) | _ -> raise Not_found (**********************************************************************) (* Reduction expressions *) type 'a glob_red_flag = { rBeta : bool; rIota : bool; rZeta : bool; rDelta : bool; (* true = delta all but rConst; false = delta only on rConst*) rConst : 'a list } let all_flags = {rBeta = true; rIota = true; rZeta = true; rDelta = true; rConst = []} type 'a or_var = ArgArg of 'a | ArgVar of identifier located type occurrences_expr = bool * int or_var list let all_occurrences_expr_but l = (false,l) let no_occurrences_expr_but l = (true,l) let all_occurrences_expr = (false,[]) let no_occurrences_expr = (true,[]) type 'a with_occurrences = occurrences_expr * 'a type ('a,'b,'c) red_expr_gen = | Red of bool | Hnf | Simpl of 'c with_occurrences option | Cbv of 'b glob_red_flag | Lazy of 'b glob_red_flag | Unfold of 'b with_occurrences list | Fold of 'a list | Pattern of 'a with_occurrences list | ExtraRedExpr of string | CbvVm type ('a,'b,'c) may_eval = | ConstrTerm of 'a | ConstrEval of ('a,'b,'c) red_expr_gen * 'a | ConstrContext of (loc * identifier) * 'a | ConstrTypeOf of 'a coq-8.4pl2/pretyping/cbv.ml0000640000175000001440000003342012010532755014756 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [S.c]b * This corresponds to the usual strategy of weak reduction * FIXP(op,bd,S,args) is the fixpoint (Fix or Cofix) of bodies bd under * the bindings S, and then applied to args. Here again, * weak reduction. * CONSTR(c,args) is the constructor [c] applied to [args]. * *) type cbv_value = | VAL of int * constr | STACK of int * cbv_value * cbv_stack | CBN of constr * cbv_value subs | LAM of int * (name * constr) list * constr * cbv_value subs | FIXP of fixpoint * cbv_value subs * cbv_value array | COFIXP of cofixpoint * cbv_value subs * cbv_value array | CONSTR of constructor * cbv_value array (* type of terms with a hole. This hole can appear only under App or Case. * TOP means the term is considered without context * APP(v,stk) means the term is applied to v, and then the context stk * (v.0 is the first argument). * this corresponds to the application stack of the KAM. * The members of l are values: we evaluate arguments before calling the function. * CASE(t,br,pat,S,stk) means the term is in a case (which is himself in stk * t is the type of the case and br are the branches, all of them under * the subs S, pat is information on the patterns of the Case * (Weak reduction: we propagate the sub only when the selected branch * is determined) * * Important remark: the APPs should be collapsed: * (APP (l,(APP ...))) forbidden *) and cbv_stack = | TOP | APP of cbv_value array * cbv_stack | CASE of constr * constr array * case_info * cbv_value subs * cbv_stack (* les vars pourraient etre des constr, cela permet de retarder les lift: utile ?? *) (* relocation of a value; used when a value stored in a context is expanded * in a larger context. e.g. [%k (S.t)](k+1) --> [^k]t (t is shifted of k) *) let rec shift_value n = function | VAL (k,t) -> VAL (k+n,t) | STACK(k,v,stk) -> STACK(k+n,v,stk) | CBN (t,s) -> CBN(t,subs_shft(n,s)) | LAM (nlams,ctxt,b,s) -> LAM (nlams,ctxt,b,subs_shft (n,s)) | FIXP (fix,s,args) -> FIXP (fix,subs_shft (n,s), Array.map (shift_value n) args) | COFIXP (cofix,s,args) -> COFIXP (cofix,subs_shft (n,s), Array.map (shift_value n) args) | CONSTR (c,args) -> CONSTR (c, Array.map (shift_value n) args) let shift_value n v = if n = 0 then v else shift_value n v (* Contracts a fixpoint: given a fixpoint and a bindings, * returns the corresponding fixpoint body, and the bindings in which * it should be evaluated: its first variables are the fixpoint bodies * (S, (fix Fi {F0 := T0 .. Fn-1 := Tn-1})) * -> (S. [S]F0 . [S]F1 ... . [S]Fn-1, Ti) *) let contract_fixp env ((reci,i),(_,_,bds as bodies)) = let make_body j = FIXP(((reci,j),bodies), env, [||]) in let n = Array.length bds in subs_cons(Array.init n make_body, env), bds.(i) let contract_cofixp env (i,(_,_,bds as bodies)) = let make_body j = COFIXP((j,bodies), env, [||]) in let n = Array.length bds in subs_cons(Array.init n make_body, env), bds.(i) let make_constr_ref n = function | RelKey p -> mkRel (n+p) | VarKey id -> mkVar id | ConstKey cst -> mkConst cst (* Adds an application list. Collapse APPs! *) let stack_app appl stack = if Array.length appl = 0 then stack else match stack with | APP(args,stk) -> APP(Array.append appl args,stk) | _ -> APP(appl, stack) let rec stack_concat stk1 stk2 = match stk1 with TOP -> stk2 | APP(v,stk1') -> APP(v,stack_concat stk1' stk2) | CASE(c,b,i,s,stk1') -> CASE(c,b,i,s,stack_concat stk1' stk2) (* merge stacks when there is no shifts in between *) let mkSTACK = function v, TOP -> v | STACK(0,v0,stk0), stk -> STACK(0,v0,stack_concat stk0 stk) | v,stk -> STACK(0,v,stk) (* Change: zeta reduction cannot be avoided in CBV *) open RedFlags let red_set_ref flags = function | RelKey _ -> red_set flags fDELTA | VarKey id -> red_set flags (fVAR id) | ConstKey sp -> red_set flags (fCONST sp) (* Transfer application lists from a value to the stack * useful because fixpoints may be totally applied in several times. * On the other hand, irreductible atoms absorb the full stack. *) let strip_appl head stack = match head with | FIXP (fix,env,app) -> (FIXP(fix,env,[||]), stack_app app stack) | COFIXP (cofix,env,app) -> (COFIXP(cofix,env,[||]), stack_app app stack) | CONSTR (c,app) -> (CONSTR(c,[||]), stack_app app stack) | _ -> (head, stack) (* Tests if fixpoint reduction is possible. *) let fixp_reducible flgs ((reci,i),_) stk = if red_set flgs fIOTA then match stk with | APP(appl,_) -> Array.length appl > reci.(i) && (match appl.(reci.(i)) with CONSTR _ -> true | _ -> false) | _ -> false else false let cofixp_reducible flgs _ stk = if red_set flgs fIOTA then match stk with | (CASE _ | APP(_,CASE _)) -> true | _ -> false else false (* The main recursive functions * * Go under applications and cases (pushed in the stack), expand head * constants or substitued de Bruijn, and try to make appear a * constructor, a lambda or a fixp in the head. If not, it is a value * and is completely computed here. The head redexes are NOT reduced: * the function returns the pair of a cbv_value and its stack. * * Invariant: if the result of norm_head is CONSTR or (CO)FIXP, it last * argument is []. Because we must put all the applied terms in the * stack. *) let rec norm_head info env t stack = (* no reduction under binders *) match kind_of_term t with (* stack grows (remove casts) *) | App (head,args) -> (* Applied terms are normalized immediately; they could be computed when getting out of the stack *) let nargs = Array.map (cbv_stack_term info TOP env) args in norm_head info env head (stack_app nargs stack) | Case (ci,p,c,v) -> norm_head info env c (CASE(p,v,ci,env,stack)) | Cast (ct,_,_) -> norm_head info env ct stack (* constants, axioms * the first pattern is CRUCIAL, n=0 happens very often: * when reducing closed terms, n is always 0 *) | Rel i -> (match expand_rel i env with | Inl (0,v) -> strip_appl v stack | Inl (n,v) -> strip_appl (shift_value n v) stack | Inr (n,None) -> (VAL(0, mkRel n), stack) | Inr (n,Some p) -> norm_head_ref (n-p) info env stack (RelKey p)) | Var id -> norm_head_ref 0 info env stack (VarKey id) | Const sp -> norm_head_ref 0 info env stack (ConstKey sp) | LetIn (_, b, _, c) -> (* zeta means letin are contracted; delta without zeta means we *) (* allow bindings but leave let's in place *) if red_set (info_flags info) fZETA then (* New rule: for Cbv, Delta does not apply to locally bound variables or red_set (info_flags info) fDELTA *) let env' = subs_cons ([|cbv_stack_term info TOP env b|],env) in norm_head info env' c stack else (CBN(t,env), stack) (* Considrer une coupure commutative ? *) | Evar ev -> (match evar_value info ev with Some c -> norm_head info env c stack | None -> (VAL(0, t), stack)) (* non-neutral cases *) | Lambda _ -> let ctxt,b = decompose_lam t in (LAM(List.length ctxt, List.rev ctxt,b,env), stack) | Fix fix -> (FIXP(fix,env,[||]), stack) | CoFix cofix -> (COFIXP(cofix,env,[||]), stack) | Construct c -> (CONSTR(c, [||]), stack) (* neutral cases *) | (Sort _ | Meta _ | Ind _) -> (VAL(0, t), stack) | Prod _ -> (CBN(t,env), stack) and norm_head_ref k info env stack normt = if red_set_ref (info_flags info) normt then match ref_value_cache info normt with | Some body -> strip_appl (shift_value k body) stack | None -> (VAL(0,make_constr_ref k normt),stack) else (VAL(0,make_constr_ref k normt),stack) (* cbv_stack_term performs weak reduction on constr t under the subs * env, with context stack, i.e. ([env]t stack). First computes weak * head normal form of t and checks if a redex appears with the stack. * If so, recursive call to reach the real head normal form. If not, * we build a value. *) and cbv_stack_term info stack env t = match norm_head info env t stack with (* a lambda meets an application -> BETA *) | (LAM (nlams,ctxt,b,env), APP (args, stk)) when red_set (info_flags info) fBETA -> let nargs = Array.length args in if nargs == nlams then cbv_stack_term info stk (subs_cons(args,env)) b else if nlams < nargs then let env' = subs_cons(Array.sub args 0 nlams, env) in let eargs = Array.sub args nlams (nargs-nlams) in cbv_stack_term info (APP(eargs,stk)) env' b else let ctxt' = list_skipn nargs ctxt in LAM(nlams-nargs,ctxt', b, subs_cons(args,env)) (* a Fix applied enough -> IOTA *) | (FIXP(fix,env,[||]), stk) when fixp_reducible (info_flags info) fix stk -> let (envf,redfix) = contract_fixp env fix in cbv_stack_term info stk envf redfix (* constructor guard satisfied or Cofix in a Case -> IOTA *) | (COFIXP(cofix,env,[||]), stk) when cofixp_reducible (info_flags info) cofix stk-> let (envf,redfix) = contract_cofixp env cofix in cbv_stack_term info stk envf redfix (* constructor in a Case -> IOTA *) | (CONSTR((sp,n),[||]), APP(args,CASE(_,br,ci,env,stk))) when red_set (info_flags info) fIOTA -> let cargs = Array.sub args ci.ci_npar (Array.length args - ci.ci_npar) in cbv_stack_term info (stack_app cargs stk) env br.(n-1) (* constructor of arity 0 in a Case -> IOTA *) | (CONSTR((_,n),[||]), CASE(_,br,_,env,stk)) when red_set (info_flags info) fIOTA -> cbv_stack_term info stk env br.(n-1) (* may be reduced later by application *) | (FIXP(fix,env,[||]), APP(appl,TOP)) -> FIXP(fix,env,appl) | (COFIXP(cofix,env,[||]), APP(appl,TOP)) -> COFIXP(cofix,env,appl) | (CONSTR(c,[||]), APP(appl,TOP)) -> CONSTR(c,appl) (* definitely a value *) | (head,stk) -> mkSTACK(head, stk) (* When we are sure t will never produce a redex with its stack, we * normalize (even under binders) the applied terms and we build the * final term *) let rec apply_stack info t = function | TOP -> t | APP (args,st) -> apply_stack info (mkApp(t,Array.map (cbv_norm_value info) args)) st | CASE (ty,br,ci,env,st) -> apply_stack info (mkCase (ci, cbv_norm_term info env ty, t, Array.map (cbv_norm_term info env) br)) st (* performs the reduction on a constr, and returns a constr *) and cbv_norm_term info env t = (* reduction under binders *) cbv_norm_value info (cbv_stack_term info TOP env t) (* reduction of a cbv_value to a constr *) and cbv_norm_value info = function (* reduction under binders *) | VAL (n,t) -> lift n t | STACK (0,v,stk) -> apply_stack info (cbv_norm_value info v) stk | STACK (n,v,stk) -> lift n (apply_stack info (cbv_norm_value info v) stk) | CBN(t,env) -> map_constr_with_binders subs_lift (cbv_norm_term info) env t | LAM (n,ctxt,b,env) -> let nctxt = list_map_i (fun i (x,ty) -> (x,cbv_norm_term info (subs_liftn i env) ty)) 0 ctxt in compose_lam (List.rev nctxt) (cbv_norm_term info (subs_liftn n env) b) | FIXP ((lij,(names,lty,bds)),env,args) -> mkApp (mkFix (lij, (names, Array.map (cbv_norm_term info env) lty, Array.map (cbv_norm_term info (subs_liftn (Array.length lty) env)) bds)), Array.map (cbv_norm_value info) args) | COFIXP ((j,(names,lty,bds)),env,args) -> mkApp (mkCoFix (j, (names,Array.map (cbv_norm_term info env) lty, Array.map (cbv_norm_term info (subs_liftn (Array.length lty) env)) bds)), Array.map (cbv_norm_value info) args) | CONSTR (c,args) -> mkApp(mkConstruct c, Array.map (cbv_norm_value info) args) (* with profiling *) let cbv_norm infos constr = with_stats (lazy (cbv_norm_term infos (subs_id 0) constr)) type cbv_infos = cbv_value infos (* constant bodies are normalized at the first expansion *) let create_cbv_infos flgs env sigma = create (fun old_info c -> cbv_stack_term old_info TOP (subs_id 0) c) flgs env (Reductionops.safe_evar_value sigma) coq-8.4pl2/pretyping/coercion.mli0000640000175000001440000000552112010532755016157 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> unsafe_judgment -> evar_map * unsafe_judgment (** [inh_coerce_to_sort env isevars j] coerces [j] to a type; i.e. it inserts a coercion into [j], if needed, in such a way it gets as type a sort; it fails if no coercion is applicable *) val inh_coerce_to_sort : loc -> env -> evar_map -> unsafe_judgment -> evar_map * unsafe_type_judgment (** [inh_coerce_to_base env isevars j] coerces [j] to its base type; i.e. it inserts a coercion into [j], if needed, in such a way it gets as type its base type (the notion depends on the coercion system) *) val inh_coerce_to_base : loc -> env -> evar_map -> unsafe_judgment -> evar_map * unsafe_judgment (** [inh_coerce_to_prod env isevars t] coerces [t] to a product type *) val inh_coerce_to_prod : loc -> env -> evar_map -> type_constraint_type -> evar_map * type_constraint_type (** [inh_conv_coerce_to loc env isevars j t] coerces [j] to an object of type [t]; i.e. it inserts a coercion into [j], if needed, in such a way [t] and [j.uj_type] are convertible; it fails if no coercion is applicable *) val inh_conv_coerce_to : loc -> env -> evar_map -> unsafe_judgment -> type_constraint_type -> evar_map * unsafe_judgment val inh_conv_coerce_rigid_to : loc -> env -> evar_map -> unsafe_judgment -> type_constraint_type -> evar_map * unsafe_judgment (** [inh_conv_coerces_to loc env isevars t t'] checks if an object of type [t] is coercible to an object of type [t'] adding evar constraints if needed; it fails if no coercion exists *) val inh_conv_coerces_to : loc -> env -> evar_map -> types -> type_constraint_type -> evar_map (** [inh_pattern_coerce_to loc env isevars pat ind1 ind2] coerces the Cases pattern [pat] typed in [ind1] into a pattern typed in [ind2]; raises [Not_found] if no coercion found *) val inh_pattern_coerce_to : loc -> cases_pattern -> inductive -> inductive -> cases_pattern end module Default : S coq-8.4pl2/pretyping/pattern.ml0000640000175000001440000003276412010532755015673 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* _ ? *) } type constr_pattern = | PRef of global_reference | PVar of identifier | PEvar of existential_key * constr_pattern array | PRel of int | PApp of constr_pattern * constr_pattern array | PSoApp of patvar * constr_pattern list | PLambda of name * constr_pattern * constr_pattern | PProd of name * constr_pattern * constr_pattern | PLetIn of name * constr_pattern * constr_pattern | PSort of glob_sort | PMeta of patvar option | PIf of constr_pattern * constr_pattern * constr_pattern | PCase of case_info_pattern * constr_pattern * constr_pattern * (int * int * constr_pattern) list (** constructor index, nb of args *) | PFix of fixpoint | PCoFix of cofixpoint let rec occur_meta_pattern = function | PApp (f,args) -> (occur_meta_pattern f) or (array_exists occur_meta_pattern args) | PLambda (na,t,c) -> (occur_meta_pattern t) or (occur_meta_pattern c) | PProd (na,t,c) -> (occur_meta_pattern t) or (occur_meta_pattern c) | PLetIn (na,t,c) -> (occur_meta_pattern t) or (occur_meta_pattern c) | PIf (c,c1,c2) -> (occur_meta_pattern c) or (occur_meta_pattern c1) or (occur_meta_pattern c2) | PCase(_,p,c,br) -> (occur_meta_pattern p) or (occur_meta_pattern c) or (List.exists (fun (_,_,p) -> occur_meta_pattern p) br) | PMeta _ | PSoApp _ -> true | PEvar _ | PVar _ | PRef _ | PRel _ | PSort _ | PFix _ | PCoFix _ -> false exception BoundPattern;; let rec head_pattern_bound t = match t with | PProd (_,_,b) -> head_pattern_bound b | PLetIn (_,_,b) -> head_pattern_bound b | PApp (c,args) -> head_pattern_bound c | PIf (c,_,_) -> head_pattern_bound c | PCase (_,p,c,br) -> head_pattern_bound c | PRef r -> r | PVar id -> VarRef id | PEvar _ | PRel _ | PMeta _ | PSoApp _ | PSort _ | PFix _ -> raise BoundPattern (* Perhaps they were arguments, but we don't beta-reduce *) | PLambda _ -> raise BoundPattern | PCoFix _ -> anomaly "head_pattern_bound: not a type" let head_of_constr_reference c = match kind_of_term c with | Const sp -> ConstRef sp | Construct sp -> ConstructRef sp | Ind sp -> IndRef sp | Var id -> VarRef id | _ -> anomaly "Not a rigid reference" open Evd let pattern_of_constr sigma t = let ctx = ref [] in let rec pattern_of_constr t = match kind_of_term t with | Rel n -> PRel n | Meta n -> PMeta (Some (id_of_string ("META" ^ string_of_int n))) | Var id -> PVar id | Sort (Prop c) -> PSort (GProp c) | Sort (Type _) -> PSort (GType None) | Cast (c,_,_) -> pattern_of_constr c | LetIn (na,c,_,b) -> PLetIn (na,pattern_of_constr c,pattern_of_constr b) | Prod (na,c,b) -> PProd (na,pattern_of_constr c,pattern_of_constr b) | Lambda (na,c,b) -> PLambda (na,pattern_of_constr c,pattern_of_constr b) | App (f,a) -> (match match kind_of_term f with Evar (evk,args as ev) -> (match snd (Evd.evar_source evk sigma) with MatchingVar (true,id) -> ctx := (id,None,existential_type sigma ev)::!ctx; Some id | _ -> None) | _ -> None with | Some n -> PSoApp (n,Array.to_list (Array.map pattern_of_constr a)) | None -> PApp (pattern_of_constr f,Array.map (pattern_of_constr) a)) | Const sp -> PRef (ConstRef (constant_of_kn(canonical_con sp))) | Ind sp -> PRef (canonical_gr (IndRef sp)) | Construct sp -> PRef (canonical_gr (ConstructRef sp)) | Evar (evk,ctxt as ev) -> (match snd (Evd.evar_source evk sigma) with | MatchingVar (b,id) -> ctx := (id,None,existential_type sigma ev)::!ctx; assert (not b); PMeta (Some id) | GoalEvar -> PEvar (evk,Array.map pattern_of_constr ctxt) | _ -> PMeta None) | Case (ci,p,a,br) -> let cip = { cip_style = ci.ci_pp_info.style; cip_ind = Some ci.ci_ind; cip_ind_args = Some (ci.ci_npar, ci.ci_pp_info.ind_nargs); cip_extensible = false } in let branch_of_constr i c = (i, ci.ci_cstr_ndecls.(i), pattern_of_constr c) in PCase (cip, pattern_of_constr p, pattern_of_constr a, Array.to_list (Array.mapi branch_of_constr br)) | Fix f -> PFix f | CoFix f -> PCoFix f in let p = pattern_of_constr t in (* side-effect *) (* Warning: the order of dependencies in ctx is not ensured *) (!ctx,p) (* To process patterns, we need a translation without typing at all. *) let map_pattern_with_binders g f l = function | PApp (p,pl) -> PApp (f l p, Array.map (f l) pl) | PSoApp (n,pl) -> PSoApp (n, List.map (f l) pl) | PLambda (n,a,b) -> PLambda (n,f l a,f (g n l) b) | PProd (n,a,b) -> PProd (n,f l a,f (g n l) b) | PLetIn (n,a,b) -> PLetIn (n,f l a,f (g n l) b) | PIf (c,b1,b2) -> PIf (f l c,f l b1,f l b2) | PCase (ci,po,p,pl) -> PCase (ci,f l po,f l p, List.map (fun (i,n,c) -> (i,n,f l c)) pl) (* Non recursive *) | (PVar _ | PEvar _ | PRel _ | PRef _ | PSort _ | PMeta _ (* Bound to terms *) | PFix _ | PCoFix _ as x) -> x let error_instantiate_pattern id l = let is = if List.length l = 1 then "is" else "are" in errorlabstrm "" (str "Cannot substitute the term bound to " ++ pr_id id ++ strbrk " in pattern because the term refers to " ++ pr_enum pr_id l ++ strbrk " which " ++ str is ++ strbrk " not bound in the pattern.") let instantiate_pattern sigma lvar c = let rec aux vars = function | PVar id as x -> (try let ctx,c = List.assoc id lvar in try let inst = List.map (fun id -> mkRel (list_index (Name id) vars)) ctx in let c = substl inst c in snd (pattern_of_constr sigma c) with Not_found (* list_index failed *) -> let vars = list_map_filter (function Name id -> Some id | _ -> None) vars in error_instantiate_pattern id (list_subtract ctx vars) with Not_found (* List.assoc failed *) -> x) | (PFix _ | PCoFix _) -> error ("Non instantiable pattern.") | c -> map_pattern_with_binders (fun id vars -> id::vars) aux vars c in aux [] c let rec liftn_pattern k n = function | PRel i as x -> if i >= n then PRel (i+k) else x | PFix x -> PFix (destFix (liftn k n (mkFix x))) | PCoFix x -> PCoFix (destCoFix (liftn k n (mkCoFix x))) | c -> map_pattern_with_binders (fun _ -> succ) (liftn_pattern k) n c let lift_pattern k = liftn_pattern k 1 let rec subst_pattern subst pat = match pat with | PRef ref -> let ref',t = subst_global subst ref in if ref' == ref then pat else snd (pattern_of_constr Evd.empty t) | PVar _ | PEvar _ | PRel _ -> pat | PApp (f,args) -> let f' = subst_pattern subst f in let args' = array_smartmap (subst_pattern subst) args in if f' == f && args' == args then pat else PApp (f',args') | PSoApp (i,args) -> let args' = list_smartmap (subst_pattern subst) args in if args' == args then pat else PSoApp (i,args') | PLambda (name,c1,c2) -> let c1' = subst_pattern subst c1 in let c2' = subst_pattern subst c2 in if c1' == c1 && c2' == c2 then pat else PLambda (name,c1',c2') | PProd (name,c1,c2) -> let c1' = subst_pattern subst c1 in let c2' = subst_pattern subst c2 in if c1' == c1 && c2' == c2 then pat else PProd (name,c1',c2') | PLetIn (name,c1,c2) -> let c1' = subst_pattern subst c1 in let c2' = subst_pattern subst c2 in if c1' == c1 && c2' == c2 then pat else PLetIn (name,c1',c2') | PSort _ | PMeta _ -> pat | PIf (c,c1,c2) -> let c' = subst_pattern subst c in let c1' = subst_pattern subst c1 in let c2' = subst_pattern subst c2 in if c' == c && c1' == c1 && c2' == c2 then pat else PIf (c',c1',c2') | PCase (cip,typ,c,branches) -> let ind = cip.cip_ind in let ind' = Option.smartmap (Inductiveops.subst_inductive subst) ind in let cip' = if ind' == ind then cip else { cip with cip_ind = ind' } in let typ' = subst_pattern subst typ in let c' = subst_pattern subst c in let subst_branch ((i,n,c) as br) = let c' = subst_pattern subst c in if c' == c then br else (i,n,c') in let branches' = list_smartmap subst_branch branches in if cip' == cip && typ' == typ && c' == c && branches' == branches then pat else PCase(cip', typ', c', branches') | PFix fixpoint -> let cstr = mkFix fixpoint in let fixpoint' = destFix (subst_mps subst cstr) in if fixpoint' == fixpoint then pat else PFix fixpoint' | PCoFix cofixpoint -> let cstr = mkCoFix cofixpoint in let cofixpoint' = destCoFix (subst_mps subst cstr) in if cofixpoint' == cofixpoint then pat else PCoFix cofixpoint' let mkPLambda na b = PLambda(na,PMeta None,b) let rev_it_mkPLambda = List.fold_right mkPLambda let err loc pp = user_err_loc (loc,"pattern_of_glob_constr", pp) let rec pat_of_raw metas vars = function | GVar (_,id) -> (try PRel (list_index (Name id) vars) with Not_found -> PVar id) | GPatVar (_,(false,n)) -> metas := n::!metas; PMeta (Some n) | GRef (_,gr) -> PRef (canonical_gr gr) (* Hack pour ne pas rcrire une interprtation complte des patterns*) | GApp (_, GPatVar (_,(true,n)), cl) -> metas := n::!metas; PSoApp (n, List.map (pat_of_raw metas vars) cl) | GApp (_,c,cl) -> PApp (pat_of_raw metas vars c, Array.of_list (List.map (pat_of_raw metas vars) cl)) | GLambda (_,na,bk,c1,c2) -> name_iter (fun n -> metas := n::!metas) na; PLambda (na, pat_of_raw metas vars c1, pat_of_raw metas (na::vars) c2) | GProd (_,na,bk,c1,c2) -> name_iter (fun n -> metas := n::!metas) na; PProd (na, pat_of_raw metas vars c1, pat_of_raw metas (na::vars) c2) | GLetIn (_,na,c1,c2) -> name_iter (fun n -> metas := n::!metas) na; PLetIn (na, pat_of_raw metas vars c1, pat_of_raw metas (na::vars) c2) | GSort (_,s) -> PSort s | GHole _ -> PMeta None | GCast (_,c,_) -> Flags.if_warn Pp.msg_warning (str "Cast not taken into account in constr pattern"); pat_of_raw metas vars c | GIf (_,c,(_,None),b1,b2) -> PIf (pat_of_raw metas vars c, pat_of_raw metas vars b1,pat_of_raw metas vars b2) | GLetTuple (loc,nal,(_,None),b,c) -> let mkGLambda c na = GLambda (loc,na,Explicit,GHole (loc,Evd.InternalHole),c) in let c = List.fold_left mkGLambda c nal in let cip = { cip_style = LetStyle; cip_ind = None; cip_ind_args = None; cip_extensible = false } in PCase (cip, PMeta None, pat_of_raw metas vars b, [0,1,pat_of_raw metas vars c]) | GCases (loc,sty,p,[c,(na,indnames)],brs) -> let get_ind = function | (_,_,[PatCstr(_,(ind,_),_,_)],_)::_ -> Some ind | _ -> None in let ind_nargs,ind = match indnames with | Some (_,ind,n,nal) -> Some (n,List.length nal), Some ind | None -> None, get_ind brs in let ext,brs = pats_of_glob_branches loc metas vars ind brs in let pred = match p,indnames with | Some p, Some (_,_,_,nal) -> rev_it_mkPLambda nal (mkPLambda na (pat_of_raw metas vars p)) | _ -> PMeta None in let info = { cip_style = sty; cip_ind = ind; cip_ind_args = ind_nargs; cip_extensible = ext } in (* Nota : when we have a non-trivial predicate, the inductive type is known. Same when we have at least one non-trivial branch. These facts are used in [Constrextern]. *) PCase (info, pred, pat_of_raw metas vars c, brs) | r -> err (loc_of_glob_constr r) (Pp.str "Non supported pattern.") and pats_of_glob_branches loc metas vars ind brs = let get_arg = function | PatVar(_,na) -> na | PatCstr(loc,_,_,_) -> err loc (Pp.str "Non supported pattern.") in let rec get_pat indexes = function | [] -> false, [] | [(_,_,[PatVar(_,Anonymous)],GHole _)] -> true, [] (* ends with _ => _ *) | (_,_,[PatCstr(_,(indsp,j),lv,_)],br) :: brs -> if ind <> None && ind <> Some indsp then err loc (Pp.str "All constructors must be in the same inductive type."); if Intset.mem (j-1) indexes then err loc (str "No unique branch for " ++ int j ++ str"-th constructor."); let lna = List.map get_arg lv in let vars' = List.rev lna @ vars in let pat = rev_it_mkPLambda lna (pat_of_raw metas vars' br) in let ext,pats = get_pat (Intset.add (j-1) indexes) brs in ext, ((j-1, List.length lv, pat) :: pats) | (loc,_,_,_) :: _ -> err loc (Pp.str "Non supported pattern.") in get_pat Intset.empty brs let pattern_of_glob_constr c = let metas = ref [] in let p = pat_of_raw metas [] c in (!metas,p) coq-8.4pl2/pretyping/evd.ml0000640000175000001440000007273012102010667014764 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true) (named_context_of_val hyps); evar_source = (dummy_loc,InternalHole); evar_candidates = None; evar_extra = Store.empty } let evar_concl evi = evi.evar_concl let evar_hyps evi = evi.evar_hyps let evar_context evi = named_context_of_val evi.evar_hyps let evar_body evi = evi.evar_body let evar_filter evi = evi.evar_filter let evar_filtered_context evi = snd (list_filter2 (fun b c -> b) (evar_filter evi,evar_context evi)) let evar_filtered_hyps evi = List.fold_right push_named_context_val (evar_filtered_context evi) empty_named_context_val let evar_unfiltered_env evi = Global.env_of_context evi.evar_hyps let evar_env evi = List.fold_right push_named (evar_filtered_context evi) (reset_context (Global.env())) let eq_evar_info ei1 ei2 = ei1 == ei2 || eq_constr ei1.evar_concl ei2.evar_concl && eq_named_context_val (ei1.evar_hyps) (ei2.evar_hyps) && ei1.evar_body = ei2.evar_body (* spiwack: Revised hierarchy : - ExistentialMap ( Maps of existential_keys ) - EvarInfoMap ( .t = evar_info ExistentialMap.t * evar_info ExistentialMap ) - EvarMap ( .t = EvarInfoMap.t * sort_constraints ) - evar_map (exported) *) module ExistentialMap = Intmap module ExistentialSet = Intset (* This exception is raised by *.existential_value *) exception NotInstantiatedEvar (* Note: let-in contributes to the instance *) let make_evar_instance sign args = let rec instrec = function | (id,_,_) :: sign, c::args when isVarId id c -> instrec (sign,args) | (id,_,_) :: sign, c::args -> (id,c) :: instrec (sign,args) | [],[] -> [] | [],_ | _,[] -> anomaly "Signature and its instance do not match" in instrec (sign,args) let instantiate_evar sign c args = let inst = make_evar_instance sign args in if inst = [] then c else replace_vars inst c module EvarInfoMap = struct type t = evar_info ExistentialMap.t * evar_info ExistentialMap.t let empty = ExistentialMap.empty, ExistentialMap.empty let is_empty (d,u) = ExistentialMap.is_empty d && ExistentialMap.is_empty u let has_undefined (_,u) = not (ExistentialMap.is_empty u) let to_list (def,undef) = (* Workaround for change in Map.fold behavior in ocaml 3.08.4 *) let l = ref [] in ExistentialMap.iter (fun evk x -> l := (evk,x)::!l) def; ExistentialMap.iter (fun evk x -> l := (evk,x)::!l) undef; !l let undefined_list (def,undef) = (* Order is important: needs ocaml >= 3.08.4 from which "fold" is a "fold_left" *) ExistentialMap.fold (fun evk evi l -> (evk,evi)::l) undef [] let undefined_evars (def,undef) = (ExistentialMap.empty,undef) let defined_evars (def,undef) = (def,ExistentialMap.empty) let find (def,undef) k = try ExistentialMap.find k def with Not_found -> ExistentialMap.find k undef let find_undefined (def,undef) k = ExistentialMap.find k undef let remove (def,undef) k = (ExistentialMap.remove k def,ExistentialMap.remove k undef) let mem (def,undef) k = ExistentialMap.mem k def || ExistentialMap.mem k undef let fold (def,undef) f a = ExistentialMap.fold f def (ExistentialMap.fold f undef a) let fold_undefined (def,undef) f a = ExistentialMap.fold f undef a let exists_undefined (def,undef) f = ExistentialMap.fold (fun k v b -> b || f k v) undef false let add (def,undef) evk newinfo = if newinfo.evar_body = Evar_empty then (def,ExistentialMap.add evk newinfo undef) else (ExistentialMap.add evk newinfo def,undef) let add_undefined (def,undef) evk newinfo = assert (newinfo.evar_body = Evar_empty); (def,ExistentialMap.add evk newinfo undef) let map f (def,undef) = (ExistentialMap.map f def, ExistentialMap.map f undef) let define (def,undef) evk body = let oldinfo = try ExistentialMap.find evk undef with Not_found -> try ExistentialMap.find evk def with Not_found -> anomaly "Evd.define: cannot define undeclared evar" in let newinfo = { oldinfo with evar_body = Evar_defined body } in match oldinfo.evar_body with | Evar_empty -> (ExistentialMap.add evk newinfo def,ExistentialMap.remove evk undef) | _ -> anomaly "Evd.define: cannot define an evar twice" let is_evar = mem let is_defined (def,undef) evk = ExistentialMap.mem evk def let is_undefined (def,undef) evk = ExistentialMap.mem evk undef (*******************************************************************) (* Formerly Instantiate module *) (* Existentials. *) let existential_type sigma (n,args) = let info = try find sigma n with Not_found -> anomaly ("Evar "^(string_of_existential n)^" was not declared") in let hyps = evar_filtered_context info in instantiate_evar hyps info.evar_concl (Array.to_list args) let existential_value sigma (n,args) = let info = find sigma n in let hyps = evar_filtered_context info in match evar_body info with | Evar_defined c -> instantiate_evar hyps c (Array.to_list args) | Evar_empty -> raise NotInstantiatedEvar let existential_opt_value sigma ev = try Some (existential_value sigma ev) with NotInstantiatedEvar -> None end module EvarMap = struct type t = EvarInfoMap.t * (Univ.UniverseLSet.t * Univ.universes) let empty = EvarInfoMap.empty, (Univ.UniverseLSet.empty, Univ.initial_universes) let is_empty (sigma,_) = EvarInfoMap.is_empty sigma let has_undefined (sigma,_) = EvarInfoMap.has_undefined sigma let add (sigma,sm) k v = (EvarInfoMap.add sigma k v, sm) let add_undefined (sigma,sm) k v = (EvarInfoMap.add_undefined sigma k v, sm) let find (sigma,_) = EvarInfoMap.find sigma let find_undefined (sigma,_) = EvarInfoMap.find_undefined sigma let remove (sigma,sm) k = (EvarInfoMap.remove sigma k, sm) let mem (sigma,_) = EvarInfoMap.mem sigma let to_list (sigma,_) = EvarInfoMap.to_list sigma let undefined_list (sigma,_) = EvarInfoMap.undefined_list sigma let undefined_evars (sigma,sm) = (EvarInfoMap.undefined_evars sigma, sm) let defined_evars (sigma,sm) = (EvarInfoMap.defined_evars sigma, sm) let fold (sigma,_) = EvarInfoMap.fold sigma let fold_undefined (sigma,_) = EvarInfoMap.fold_undefined sigma let define (sigma,sm) k v = (EvarInfoMap.define sigma k v, sm) let is_evar (sigma,_) = EvarInfoMap.is_evar sigma let is_defined (sigma,_) = EvarInfoMap.is_defined sigma let is_undefined (sigma,_) = EvarInfoMap.is_undefined sigma let existential_value (sigma,_) = EvarInfoMap.existential_value sigma let existential_type (sigma,_) = EvarInfoMap.existential_type sigma let existential_opt_value (sigma,_) = EvarInfoMap.existential_opt_value sigma let progress_evar_map (sigma1,sm1 as x) (sigma2,sm2 as y) = not (x == y) && (EvarInfoMap.exists_undefined sigma1 (fun k v -> assert (v.evar_body = Evar_empty); EvarInfoMap.is_defined sigma2 k)) let merge e e' = fold e' (fun n v sigma -> add sigma n v) e let add_constraints (sigma, (us, sm)) cstrs = (sigma, (us, Univ.merge_constraints cstrs sm)) end (*******************************************************************) (* Metamaps *) (*******************************************************************) (* Constraints for existential variables *) (*******************************************************************) type 'a freelisted = { rebus : 'a; freemetas : Intset.t } (* Collects all metavars appearing in a constr *) let metavars_of c = let rec collrec acc c = match kind_of_term c with | Meta mv -> Intset.add mv acc | _ -> fold_constr collrec acc c in collrec Intset.empty c let mk_freelisted c = { rebus = c; freemetas = metavars_of c } let map_fl f cfl = { cfl with rebus=f cfl.rebus } (* Status of an instance found by unification wrt to the meta it solves: - a supertype of the meta (e.g. the solution to ?X <= T is a supertype of ?X) - a subtype of the meta (e.g. the solution to T <= ?X is a supertype of ?X) - a term that can be eta-expanded n times while still being a solution (e.g. the solution [P] to [?X u v = P u v] can be eta-expanded twice) *) type instance_constraint = IsSuperType | IsSubType | Conv (* Status of the unification of the type of an instance against the type of the meta it instantiates: - CoerceToType means that the unification of types has not been done and that a coercion can still be inserted: the meta should not be substituted freely (this happens for instance given via the "with" binding clause). - TypeProcessed means that the information obtainable from the unification of types has been extracted. - TypeNotProcessed means that the unification of types has not been done but it is known that no coercion may be inserted: the meta can be substituted freely. *) type instance_typing_status = CoerceToType | TypeNotProcessed | TypeProcessed (* Status of an instance together with the status of its type unification *) type instance_status = instance_constraint * instance_typing_status (* Clausal environments *) type clbinding = | Cltyp of name * constr freelisted | Clval of name * (constr freelisted * instance_status) * constr freelisted let map_clb f = function | Cltyp (na,cfl) -> Cltyp (na,map_fl f cfl) | Clval (na,(cfl1,pb),cfl2) -> Clval (na,(map_fl f cfl1,pb),map_fl f cfl2) (* name of defined is erased (but it is pretty-printed) *) let clb_name = function Cltyp(na,_) -> (na,false) | Clval (na,_,_) -> (na,true) (***********************) module Metaset = Intset let meta_exists p s = Metaset.fold (fun x b -> b || (p x)) s false module Metamap = Intmap let metamap_to_list m = Metamap.fold (fun n v l -> (n,v)::l) m [] (*************************) (* Unification state *) type conv_pb = Reduction.conv_pb type evar_constraint = conv_pb * Environ.env * constr * constr type evar_map = { evars : EvarMap.t; conv_pbs : evar_constraint list; last_mods : ExistentialSet.t; metas : clbinding Metamap.t } (*** Lifting primitive from EvarMap. ***) (* HH: The progress tactical now uses this function. *) let progress_evar_map d1 d2 = EvarMap.progress_evar_map d1.evars d2.evars (* spiwack: tentative. It might very well not be the semantics we want for merging evar_map *) let merge d1 d2 = { evars = EvarMap.merge d1.evars d2.evars ; conv_pbs = List.rev_append d1.conv_pbs d2.conv_pbs ; last_mods = ExistentialSet.union d1.last_mods d2.last_mods ; metas = Metamap.fold (fun k m r -> Metamap.add k m r) d2.metas d1.metas } let add d e i = { d with evars=EvarMap.add d.evars e i } let remove d e = { d with evars=EvarMap.remove d.evars e } let find d e = EvarMap.find d.evars e let find_undefined d e = EvarMap.find_undefined d.evars e let mem d e = EvarMap.mem d.evars e (* spiwack: this function loses information from the original evar_map it might be an idea not to export it. *) let to_list d = EvarMap.to_list d.evars let undefined_list d = EvarMap.undefined_list d.evars let undefined_evars d = { d with evars=EvarMap.undefined_evars d.evars } let defined_evars d = { d with evars=EvarMap.defined_evars d.evars } (* spiwack: not clear what folding over an evar_map, for now we shall simply fold over the inner evar_map. *) let fold f d a = EvarMap.fold d.evars f a let fold_undefined f d a = EvarMap.fold_undefined d.evars f a let is_evar d e = EvarMap.is_evar d.evars e let is_defined d e = EvarMap.is_defined d.evars e let is_undefined d e = EvarMap.is_undefined d.evars e let existential_value d e = EvarMap.existential_value d.evars e let existential_type d e = EvarMap.existential_type d.evars e let existential_opt_value d e = EvarMap.existential_opt_value d.evars e let add_constraints d e = {d with evars= EvarMap.add_constraints d.evars e} (*** /Lifting... ***) (* evar_map are considered empty disregarding histories *) let is_empty d = EvarMap.is_empty d.evars && d.conv_pbs = [] && Metamap.is_empty d.metas let subst_named_context_val s = map_named_val (subst_mps s) let subst_evar_info s evi = let subst_evb = function Evar_empty -> Evar_empty | Evar_defined c -> Evar_defined (subst_mps s c) in { evi with evar_concl = subst_mps s evi.evar_concl; evar_hyps = subst_named_context_val s evi.evar_hyps; evar_body = subst_evb evi.evar_body } let subst_evar_defs_light sub evd = assert (Univ.is_initial_universes (snd (snd evd.evars))); assert (evd.conv_pbs = []); { evd with metas = Metamap.map (map_clb (subst_mps sub)) evd.metas; evars = EvarInfoMap.map (subst_evar_info sub) (fst evd.evars), (snd evd.evars) } let subst_evar_map = subst_evar_defs_light (* spiwack: deprecated *) let create_evar_defs sigma = { sigma with conv_pbs=[]; last_mods=ExistentialSet.empty; metas=Metamap.empty } (* spiwack: tentatively deprecated *) let create_goal_evar_defs sigma = { sigma with (* conv_pbs=[]; last_mods=ExistentialSet.empty; metas=Metamap.empty } *) metas=Metamap.empty } let empty = { evars=EvarMap.empty; conv_pbs=[]; last_mods = ExistentialSet.empty; metas=Metamap.empty } let has_undefined evd = EvarMap.has_undefined evd.evars let evars_reset_evd ?(with_conv_pbs=false) evd d = {d with evars = evd.evars; conv_pbs = if with_conv_pbs then evd.conv_pbs else d.conv_pbs } let add_conv_pb pb d = {d with conv_pbs = pb::d.conv_pbs} let evar_source evk d = (EvarMap.find d.evars evk).evar_source (* define the existential of section path sp as the constr body *) let define evk body evd = { evd with evars = EvarMap.define evd.evars evk body; last_mods = match evd.conv_pbs with | [] -> evd.last_mods | _ -> ExistentialSet.add evk evd.last_mods } let evar_declare hyps evk ty ?(src=(dummy_loc,InternalHole)) ?filter ?candidates evd = let filter = if filter = None then List.map (fun _ -> true) (named_context_of_val hyps) else (let filter = Option.get filter in assert (List.length filter = List.length (named_context_of_val hyps)); filter) in { evd with evars = EvarMap.add_undefined evd.evars evk {evar_hyps = hyps; evar_concl = ty; evar_body = Evar_empty; evar_filter = filter; evar_source = src; evar_candidates = candidates; evar_extra = Store.empty } } let is_defined_evar evd (evk,_) = EvarMap.is_defined evd.evars evk (* Does k corresponds to an (un)defined existential ? *) let is_undefined_evar evd c = match kind_of_term c with | Evar ev -> not (is_defined_evar evd ev) | _ -> false (* extracts conversion problems that satisfy predicate p *) (* Note: conv_pbs not satisying p are stored back in reverse order *) let extract_conv_pbs evd p = let (pbs,pbs1) = List.fold_left (fun (pbs,pbs1) pb -> if p pb then (pb::pbs,pbs1) else (pbs,pb::pbs1)) ([],[]) evd.conv_pbs in {evd with conv_pbs = pbs1; last_mods = ExistentialSet.empty}, pbs let extract_changed_conv_pbs evd p = extract_conv_pbs evd (p evd.last_mods) let extract_all_conv_pbs evd = extract_conv_pbs evd (fun _ -> true) (* spiwack: should it be replaced by Evd.merge? *) let evar_merge evd evars = { evd with evars = EvarMap.merge evd.evars evars.evars } let evar_list evd c = let rec evrec acc c = match kind_of_term c with | Evar (evk, _ as ev) when mem evd evk -> ev :: acc | _ -> fold_constr evrec acc c in evrec [] c let collect_evars c = let rec collrec acc c = match kind_of_term c with | Evar (evk,_) -> ExistentialSet.add evk acc | _ -> fold_constr collrec acc c in collrec ExistentialSet.empty c (**********************************************************) (* Sort variables *) let new_univ_variable ({ evars = (sigma,(us,sm)) } as d) = let u = Termops.new_univ_level () in let us' = Univ.UniverseLSet.add u us in ({d with evars = (sigma, (us', sm))}, Univ.make_universe u) let new_sort_variable d = let (d', u) = new_univ_variable d in (d', Type u) let is_sort_variable {evars=(_,(us,_))} s = match s with Type u -> true | _ -> false let whd_sort_variable {evars=(_,sm)} t = t let univ_of_sort = function | Type u -> u | Prop Pos -> Univ.type0_univ | Prop Null -> Univ.type0m_univ let is_eq_sort s1 s2 = if s1 = s2 then None else let u1 = univ_of_sort s1 and u2 = univ_of_sort s2 in if u1 = u2 then None else Some (u1, u2) let is_univ_var_or_set u = Univ.is_univ_variable u || u = Univ.type0_univ let set_leq_sort ({evars = (sigma, (us, sm))} as d) s1 s2 = match is_eq_sort s1 s2 with | None -> d | Some (u1, u2) -> match s1, s2 with | Prop c, Prop c' -> if c = Null && c' = Pos then d else (raise (Univ.UniverseInconsistency (Univ.Le, u1, u2))) | Type u, Prop c -> if c = Pos then add_constraints d (Univ.enforce_geq Univ.type0_univ u Univ.empty_constraint) else raise (Univ.UniverseInconsistency (Univ.Le, u1, u2)) | _, Type u -> if is_univ_var_or_set u then add_constraints d (Univ.enforce_geq u2 u1 Univ.empty_constraint) else raise (Univ.UniverseInconsistency (Univ.Le, u1, u2)) let is_univ_level_var us u = match Univ.universe_level u with | Some u -> Univ.UniverseLSet.mem u us | None -> false let set_eq_sort ({evars = (sigma, (us, sm))} as d) s1 s2 = match is_eq_sort s1 s2 with | None -> d | Some (u1, u2) -> match s1, s2 with | Prop c, Type u when is_univ_level_var us u -> add_constraints d (Univ.enforce_eq u1 u2 Univ.empty_constraint) | Type u, Prop c when is_univ_level_var us u -> add_constraints d (Univ.enforce_eq u1 u2 Univ.empty_constraint) | Type u, Type v when (is_univ_level_var us u) || (is_univ_level_var us v) -> add_constraints d (Univ.enforce_eq u1 u2 Univ.empty_constraint) | Prop c, Type u when is_univ_var_or_set u && Univ.check_eq sm u1 u2 -> d | Type u, Prop c when is_univ_var_or_set u && Univ.check_eq sm u1 u2 -> d | Type u, Type v when is_univ_var_or_set u && is_univ_var_or_set v -> add_constraints d (Univ.enforce_eq u1 u2 Univ.empty_constraint) | _, _ -> raise (Univ.UniverseInconsistency (Univ.Eq, u1, u2)) (**********************************************************) (* Accessing metas *) let meta_list evd = metamap_to_list evd.metas let find_meta evd mv = Metamap.find mv evd.metas let undefined_metas evd = List.sort Pervasives.compare (map_succeed (function | (n,Clval(_,_,typ)) -> failwith "" | (n,Cltyp (_,typ)) -> n) (meta_list evd)) let metas_of evd = List.map (function | (n,Clval(_,_,typ)) -> (n,typ.rebus) | (n,Cltyp (_,typ)) -> (n,typ.rebus)) (meta_list evd) let map_metas_fvalue f evd = { evd with metas = Metamap.map (function | Clval(id,(c,s),typ) -> Clval(id,(mk_freelisted (f c.rebus),s),typ) | x -> x) evd.metas } let meta_opt_fvalue evd mv = match Metamap.find mv evd.metas with | Clval(_,b,_) -> Some b | Cltyp _ -> None let meta_defined evd mv = match Metamap.find mv evd.metas with | Clval _ -> true | Cltyp _ -> false let try_meta_fvalue evd mv = match Metamap.find mv evd.metas with | Clval(_,b,_) -> b | Cltyp _ -> raise Not_found let meta_fvalue evd mv = try try_meta_fvalue evd mv with Not_found -> anomaly "meta_fvalue: meta has no value" let meta_value evd mv = (fst (try_meta_fvalue evd mv)).rebus let meta_ftype evd mv = match Metamap.find mv evd.metas with | Cltyp (_,b) -> b | Clval(_,_,b) -> b let meta_type evd mv = (meta_ftype evd mv).rebus let meta_declare mv v ?(name=Anonymous) evd = { evd with metas = Metamap.add mv (Cltyp(name,mk_freelisted v)) evd.metas } let meta_assign mv (v,pb) evd = match Metamap.find mv evd.metas with | Cltyp(na,ty) -> { evd with metas = Metamap.add mv (Clval(na,(mk_freelisted v,pb),ty)) evd.metas } | _ -> anomaly "meta_assign: already defined" let meta_reassign mv (v,pb) evd = match Metamap.find mv evd.metas with | Clval(na,_,ty) -> { evd with metas = Metamap.add mv (Clval(na,(mk_freelisted v,pb),ty)) evd.metas } | _ -> anomaly "meta_reassign: not yet defined" (* If the meta is defined then forget its name *) let meta_name evd mv = try fst (clb_name (Metamap.find mv evd.metas)) with Not_found -> Anonymous let meta_with_name evd id = let na = Name id in let (mvl,mvnodef) = Metamap.fold (fun n clb (l1,l2 as l) -> let (na',def) = clb_name clb in if na = na' then if def then (n::l1,l2) else (n::l1,n::l2) else l) evd.metas ([],[]) in match mvnodef, mvl with | _,[] -> errorlabstrm "Evd.meta_with_name" (str"No such bound variable " ++ pr_id id ++ str".") | ([n],_|_,[n]) -> n | _ -> errorlabstrm "Evd.meta_with_name" (str "Binder name \"" ++ pr_id id ++ strbrk "\" occurs more than once in clause.") let meta_merge evd1 evd2 = {evd2 with metas = List.fold_left (fun m (n,v) -> Metamap.add n v m) evd2.metas (metamap_to_list evd1.metas) } type metabinding = metavariable * constr * instance_status let retract_coercible_metas evd = let mc,ml = Metamap.fold (fun n v (mc,ml) -> match v with | Clval (na,(b,(Conv,CoerceToType as s)),typ) -> (n,b.rebus,s)::mc, Metamap.add n (Cltyp (na,typ)) ml | v -> mc, Metamap.add n v ml) evd.metas ([],Metamap.empty) in mc, { evd with metas = ml } let rec list_assoc_in_triple x = function [] -> raise Not_found | (a,b,_)::l -> if compare a x = 0 then b else list_assoc_in_triple x l let subst_defined_metas bl c = let rec substrec c = match kind_of_term c with | Meta i -> substrec (list_assoc_snd_in_triple i bl) | _ -> map_constr substrec c in try Some (substrec c) with Not_found -> None (*******************************************************************) type open_constr = evar_map * constr (*******************************************************************) (* The type constructor ['a sigma] adds an evar map to an object of type ['a] *) type 'a sigma = { it : 'a ; sigma : evar_map} let sig_it x = x.it let sig_sig x = x.sigma (**********************************************************) (* Failure explanation *) type unsolvability_explanation = SeveralInstancesFound of int (**********************************************************) (* Pretty-printing *) let pr_instance_status (sc,typ) = begin match sc with | IsSubType -> str " [or a subtype of it]" | IsSuperType -> str " [or a supertype of it]" | Conv -> mt () end ++ begin match typ with | CoerceToType -> str " [up to coercion]" | TypeNotProcessed -> mt () | TypeProcessed -> str " [type is checked]" end let pr_meta_map mmap = let pr_name = function Name id -> str"[" ++ pr_id id ++ str"]" | _ -> mt() in let pr_meta_binding = function | (mv,Cltyp (na,b)) -> hov 0 (pr_meta mv ++ pr_name na ++ str " : " ++ print_constr b.rebus ++ fnl ()) | (mv,Clval(na,(b,s),t)) -> hov 0 (pr_meta mv ++ pr_name na ++ str " := " ++ print_constr b.rebus ++ str " : " ++ print_constr t.rebus ++ spc () ++ pr_instance_status s ++ fnl ()) in prlist pr_meta_binding (metamap_to_list mmap) let pr_decl ((id,b,_),ok) = match b with | None -> if ok then pr_id id else (str "{" ++ pr_id id ++ str "}") | Some c -> str (if ok then "(" else "{") ++ pr_id id ++ str ":=" ++ print_constr c ++ str (if ok then ")" else "}") let pr_evar_source = function | QuestionMark _ -> str "underscore" | CasesType -> str "pattern-matching return predicate" | BinderType (Name id) -> str "type of " ++ Nameops.pr_id id | BinderType Anonymous -> str "type of anonymous binder" | ImplicitArg (c,(n,ido),b) -> let id = Option.get ido in str "parameter " ++ pr_id id ++ spc () ++ str "of" ++ spc () ++ print_constr (constr_of_global c) | InternalHole -> str "internal placeholder" | TomatchTypeParameter (ind,n) -> nth n ++ str " argument of type " ++ print_constr (mkInd ind) | GoalEvar -> str "goal evar" | ImpossibleCase -> str "type of impossible pattern-matching clause" | MatchingVar _ -> str "matching variable" let pr_evar_info evi = let phyps = try let decls = List.combine (evar_context evi) (evar_filter evi) in prlist_with_sep pr_spc pr_decl (List.rev decls) with Invalid_argument _ -> str "Ill-formed filtered context" in let pty = print_constr evi.evar_concl in let pb = match evi.evar_body with | Evar_empty -> mt () | Evar_defined c -> spc() ++ str"=> " ++ print_constr c in let candidates = match evi.evar_body, evi.evar_candidates with | Evar_empty, Some l -> spc () ++ str "{" ++ prlist_with_sep (fun () -> str "|") print_constr l ++ str "}" | _ -> mt () in let src = str "(" ++ pr_evar_source (snd evi.evar_source) ++ str ")" in hov 2 (str"[" ++ phyps ++ spc () ++ str"|- " ++ pty ++ pb ++ str"]" ++ candidates ++ spc() ++ src) let compute_evar_dependency_graph (sigma:evar_map) = (* Compute the map binding ev to the evars whose body depends on ev *) fold (fun evk evi acc -> let deps = match evar_body evi with | Evar_empty -> ExistentialSet.empty | Evar_defined c -> collect_evars c in ExistentialSet.fold (fun evk' acc -> let tab = try ExistentialMap.find evk' acc with Not_found -> [] in ExistentialMap.add evk' ((evk,evi)::tab) acc) deps acc) sigma ExistentialMap.empty let evar_dependency_closure n sigma = let graph = compute_evar_dependency_graph sigma in let order a b = fst a < fst b in let rec aux n l = if n=0 then l else let l' = list_map_append (fun (evk,_) -> try ExistentialMap.find evk graph with Not_found -> []) l in aux (n-1) (list_uniquize (Sort.list order (l@l'))) in aux n (undefined_list sigma) let pr_evar_map_t depth sigma = let (evars,(uvs,univs)) = sigma.evars in let pr_evar_list l = h 0 (prlist_with_sep pr_fnl (fun (ev,evi) -> h 0 (str(string_of_existential ev) ++ str"==" ++ pr_evar_info evi)) l) in let evs = if EvarInfoMap.is_empty evars then mt () else match depth with | None -> (* Print all evars *) str"EVARS:"++brk(0,1)++pr_evar_list (to_list sigma)++fnl() | Some n -> (* Print all evars *) str"UNDEFINED EVARS"++ (if n=0 then mt() else str" (+level "++int n++str" closure):")++ brk(0,1)++ pr_evar_list (evar_dependency_closure n sigma)++fnl() and svs = if Univ.UniverseLSet.is_empty uvs then mt () else str"UNIVERSE VARIABLES:"++brk(0,1)++ h 0 (prlist_with_sep pr_fnl (fun u -> Univ.pr_uni_level u) (Univ.UniverseLSet.elements uvs))++fnl() and cs = if Univ.is_initial_universes univs then mt () else str"UNIVERSES:"++brk(0,1)++ h 0 (Univ.pr_universes univs)++fnl() in evs ++ svs ++ cs let print_env_short env = let pr_body n = function None -> pr_name n | Some b -> str "(" ++ pr_name n ++ str " := " ++ print_constr b ++ str ")" in let pr_named_decl (n, b, _) = pr_body (Name n) b in let pr_rel_decl (n, b, _) = pr_body n b in let nc = List.rev (named_context env) in let rc = List.rev (rel_context env) in str "[" ++ prlist_with_sep pr_spc pr_named_decl nc ++ str "]" ++ spc () ++ str "[" ++ prlist_with_sep pr_spc pr_rel_decl rc ++ str "]" let pr_constraints pbs = h 0 (prlist_with_sep pr_fnl (fun (pbty,env,t1,t2) -> print_env_short env ++ spc () ++ str "|-" ++ spc () ++ print_constr t1 ++ spc() ++ str (match pbty with | Reduction.CONV -> "==" | Reduction.CUMUL -> "<=") ++ spc() ++ print_constr t2) pbs) let pr_evar_map_constraints evd = if evd.conv_pbs = [] then mt() else pr_constraints evd.conv_pbs++fnl() let pr_evar_map allevars evd = let pp_evm = if EvarMap.is_empty evd.evars then mt() else pr_evar_map_t allevars evd++fnl() in let cstrs = if evd.conv_pbs = [] then mt() else str"CONSTRAINTS:"++brk(0,1)++pr_constraints evd.conv_pbs++fnl() in let pp_met = if Metamap.is_empty evd.metas then mt() else str"METAS:"++brk(0,1)++pr_meta_map evd.metas in v 0 (pp_evm ++ cstrs ++ pp_met) let pr_metaset metas = str "[" ++ prlist_with_sep spc pr_meta (Metaset.elements metas) ++ str "]" coq-8.4pl2/pretyping/pretyping.ml0000640000175000001440000007142412121620060016221 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1=List.length l) possible_indexes then let indexes = Array.of_list (List.map List.hd possible_indexes) in let fix = ((indexes, 0),fixdefs) in (try check_fix env fix with e when Errors.noncritical e -> if loc = dummy_loc then raise e else Loc.raise loc e); indexes else (* we now search recursively amoungst all combinations *) (try List.iter (fun l -> let indexes = Array.of_list l in let fix = ((indexes, 0),fixdefs) in try check_fix env fix; raise (Found indexes) with TypeError _ -> ()) (list_combinations possible_indexes); let errmsg = "Cannot guess decreasing argument of fix." in if loc = dummy_loc then error errmsg else user_err_loc (loc,"search_guard", Pp.str errmsg) with Found indexes -> indexes) (* To embed constr in glob_constr *) let ((constr_in : constr -> Dyn.t), (constr_out : Dyn.t -> constr)) = Dyn.create "constr" (** Miscellaneous interpretation functions *) let interp_sort = function | GProp c -> Prop c | GType _ -> new_Type_sort () let interp_elimination_sort = function | GProp Null -> InProp | GProp Pos -> InSet | GType _ -> InType let resolve_evars env evdref fail_evar resolve_classes = if resolve_classes then evdref := (Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals ~split:true ~fail:fail_evar env !evdref); (* Resolve eagerly, potentially making wrong choices *) evdref := (try consider_remaining_unif_problems ~ts:(Typeclasses.classes_transparent_state ()) env !evdref with e when Errors.noncritical e -> if fail_evar then raise e else !evdref) let solve_remaining_evars fail_evar use_classes hook env initial_sigma (evd,c) = let evdref = ref evd in resolve_evars env evdref fail_evar use_classes; let rec proc_rec c = let c = Reductionops.whd_evar !evdref c in match kind_of_term c with | Evar (evk,args as ev) when not (Evd.mem initial_sigma evk) -> let sigma = !evdref in (try let c = hook env sigma ev in evdref := Evd.define evk c !evdref; c with Exit -> if fail_evar then let evi = Evd.find_undefined sigma evk in let (loc,src) = evar_source evk !evdref in Pretype_errors.error_unsolvable_implicit loc env sigma evi src None else c) | _ -> map_constr proc_rec c in let c = proc_rec c in (* Side-effect *) !evdref,c module type S = sig module Cases : Cases.S (* Allow references to syntaxically inexistent variables (i.e., if applied on an inductive) *) val allow_anonymous_refs : bool ref (* Generic call to the interpreter from glob_constr to open_constr, leaving unresolved holes as evars and returning the typing contexts of these evars. Work as [understand_gen] for the rest. *) val understand_tcc : ?resolve_classes:bool -> evar_map -> env -> ?expected_type:types -> glob_constr -> open_constr val understand_tcc_evars : ?fail_evar:bool -> ?resolve_classes:bool -> evar_map ref -> env -> typing_constraint -> glob_constr -> constr (* More general entry point with evars from ltac *) (* Generic call to the interpreter from glob_constr to constr, failing unresolved holes in the glob_constr cannot be instantiated. In [understand_ltac expand_evars sigma env ltac_env constraint c], resolve_classes : launch typeclass resolution after typechecking. expand_evars : expand inferred evars by their value if any sigma : initial set of existential variables (typically dependent subgoals) ltac_env : partial substitution of variables (used for the tactic language) constraint : tell if interpreted as a possibly constrained term or a type *) val understand_ltac : ?resolve_classes:bool -> bool -> evar_map -> env -> ltac_var_map -> typing_constraint -> glob_constr -> pure_open_constr (* Standard call to get a constr from a glob_constr, resolving implicit args *) val understand : evar_map -> env -> ?expected_type:Term.types -> glob_constr -> constr (* Idem but the glob_constr is intended to be a type *) val understand_type : evar_map -> env -> glob_constr -> constr (* A generalization of the two previous case *) val understand_gen : typing_constraint -> evar_map -> env -> glob_constr -> constr (* Idem but returns the judgment of the understood term *) val understand_judgment : evar_map -> env -> glob_constr -> unsafe_judgment (* Idem but do not fail on unresolved evars *) val understand_judgment_tcc : evar_map ref -> env -> glob_constr -> unsafe_judgment (*i*) (* Internal of Pretyping... * Unused outside, but useful for debugging *) val pretype : type_constraint -> env -> evar_map ref -> ltac_var_map -> glob_constr -> unsafe_judgment val pretype_type : val_constraint -> env -> evar_map ref -> ltac_var_map -> glob_constr -> unsafe_type_judgment val pretype_gen : bool -> bool -> bool -> evar_map ref -> env -> ltac_var_map -> typing_constraint -> glob_constr -> constr (*i*) end module Pretyping_F (Coercion : Coercion.S) = struct module Cases = Cases.Cases_F(Coercion) (* Allow references to syntaxically inexistent variables (i.e., if applied on an inductive) *) let allow_anonymous_refs = ref false let evd_comb0 f evdref = let (evd',x) = f !evdref in evdref := evd'; x let evd_comb1 f evdref x = let (evd',y) = f !evdref x in evdref := evd'; y let evd_comb2 f evdref x y = let (evd',z) = f !evdref x y in evdref := evd'; z let evd_comb3 f evdref x y z = let (evd',t) = f !evdref x y z in evdref := evd'; t let mt_evd = Evd.empty (* Utilis pour infrer le prdicat des Cases *) (* Semble exagrement fort *) (* Faudra prfrer une unification entre les types de toutes les clauses *) (* et autoriser des ? rester dans le rsultat de l'unification *) let evar_type_fixpoint loc env evdref lna lar vdefj = let lt = Array.length vdefj in if Array.length lar = lt then for i = 0 to lt-1 do if not (e_cumul env evdref (vdefj.(i)).uj_type (lift lt lar.(i))) then error_ill_typed_rec_body_loc loc env !evdref i lna vdefj lar done (* coerce to tycon if any *) let inh_conv_coerce_to_tycon loc env evdref j = function | None -> j | Some t -> evd_comb2 (Coercion.inh_conv_coerce_to loc env) evdref j t let push_rels vars env = List.fold_right push_rel vars env (* used to enforce a name in Lambda when the type constraints itself is named, hence possibly dependent *) let orelse_name name name' = match name with | Anonymous -> name' | _ -> name let invert_ltac_bound_name env id0 id = try mkRel (pi1 (lookup_rel_id id (rel_context env))) with Not_found -> errorlabstrm "" (str "Ltac variable " ++ pr_id id0 ++ str " depends on pattern variable name " ++ pr_id id ++ str " which is not bound in current context.") let protected_get_type_of env sigma c = try Retyping.get_type_of env sigma c with Anomaly _ -> errorlabstrm "" (str "Cannot reinterpret " ++ quote (print_constr c) ++ str " in the current environment.") let pretype_id loc env sigma (lvar,unbndltacvars) id = (* Look for the binder of [id] *) try let (n,_,typ) = lookup_rel_id id (rel_context env) in { uj_val = mkRel n; uj_type = lift n typ } with Not_found -> (* Check if [id] is an ltac variable *) try let (ids,c) = List.assoc id lvar in let subst = List.map (invert_ltac_bound_name env id) ids in let c = substl subst c in { uj_val = c; uj_type = protected_get_type_of env sigma c } with Not_found -> (* Check if [id] is a section or goal variable *) try let (_,_,typ) = lookup_named id env in { uj_val = mkVar id; uj_type = typ } with Not_found -> (* [id] not found, build nice error message if [id] yet known from ltac *) try match List.assoc id unbndltacvars with | None -> user_err_loc (loc,"", str "Variable " ++ pr_id id ++ str " should be bound to a term.") | Some id0 -> Pretype_errors.error_var_not_found_loc loc id0 with Not_found -> (* [id] not found, standard error message *) error_var_not_found_loc loc id let evar_kind_of_term sigma c = kind_of_term (whd_evar sigma c) (*************************************************************************) (* Main pretyping function *) let pretype_ref evdref env ref = let c = constr_of_global ref in make_judge c (Retyping.get_type_of env Evd.empty c) let pretype_ref loc evdref env = function | VarRef id -> (* Section variable *) (try let (_,_,ty) = lookup_named id env in make_judge (mkVar id) ty with Not_found -> (* This may happen if env is a goal env and section variables have been cleared - section variables should be different from goal variables *) Pretype_errors.error_var_not_found_loc loc id) | ref -> let c = constr_of_global ref in make_judge c (Retyping.get_type_of env Evd.empty c) let pretype_sort evdref = function | GProp c -> judge_of_prop_contents c | GType _ -> evd_comb0 judge_of_new_Type evdref exception Found of fixpoint let new_type_evar evdref env loc = evd_comb0 (fun evd -> Evarutil.new_type_evar evd env ~src:(loc,InternalHole)) evdref (* [pretype tycon env evdref lvar lmeta cstr] attempts to type [cstr] *) (* in environment [env], with existential variables [evdref] and *) (* the type constraint tycon *) let rec pretype (tycon : type_constraint) env evdref lvar = function | GRef (loc,ref) -> inh_conv_coerce_to_tycon loc env evdref (pretype_ref loc evdref env ref) tycon | GVar (loc, id) -> inh_conv_coerce_to_tycon loc env evdref (pretype_id loc env !evdref lvar id) tycon | GEvar (loc, evk, instopt) -> (* Ne faudrait-il pas s'assurer que hyps est bien un sous-contexte du contexte courant, et qu'il n'y a pas de Rel "cach" *) let hyps = evar_filtered_context (Evd.find !evdref evk) in let args = match instopt with | None -> instance_from_named_context hyps | Some inst -> failwith "Evar subtitutions not implemented" in let c = mkEvar (evk, args) in let j = (Retyping.get_judgment_of env !evdref c) in inh_conv_coerce_to_tycon loc env evdref j tycon | GPatVar (loc,(someta,n)) -> let ty = match tycon with | Some (None, ty) -> ty | None | Some _ -> new_type_evar evdref env loc in let k = MatchingVar (someta,n) in { uj_val = e_new_evar evdref env ~src:(loc,k) ty; uj_type = ty } | GHole (loc,k) -> let ty = match tycon with | Some (None, ty) -> ty | None | Some _ -> new_type_evar evdref env loc in { uj_val = e_new_evar evdref env ~src:(loc,k) ty; uj_type = ty } | GRec (loc,fixkind,names,bl,lar,vdef) -> let rec type_bl env ctxt = function [] -> ctxt | (na,bk,None,ty)::bl -> let ty' = pretype_type empty_valcon env evdref lvar ty in let dcl = (na,None,ty'.utj_val) in type_bl (push_rel dcl env) (add_rel_decl dcl ctxt) bl | (na,bk,Some bd,ty)::bl -> let ty' = pretype_type empty_valcon env evdref lvar ty in let bd' = pretype (mk_tycon ty'.utj_val) env evdref lvar ty in let dcl = (na,Some bd'.uj_val,ty'.utj_val) in type_bl (push_rel dcl env) (add_rel_decl dcl ctxt) bl in let ctxtv = Array.map (type_bl env empty_rel_context) bl in let larj = array_map2 (fun e ar -> pretype_type empty_valcon (push_rel_context e env) evdref lvar ar) ctxtv lar in let lara = Array.map (fun a -> a.utj_val) larj in let ftys = array_map2 (fun e a -> it_mkProd_or_LetIn a e) ctxtv lara in let nbfix = Array.length lar in let names = Array.map (fun id -> Name id) names in (* Note: bodies are not used by push_rec_types, so [||] is safe *) let newenv = push_rec_types (names,ftys,[||]) env in let vdefj = array_map2_i (fun i ctxt def -> (* we lift nbfix times the type in tycon, because of * the nbfix variables pushed to newenv *) let (ctxt,ty) = decompose_prod_n_assum (rel_context_length ctxt) (lift nbfix ftys.(i)) in let nenv = push_rel_context ctxt newenv in let j = pretype (mk_tycon ty) nenv evdref lvar def in { uj_val = it_mkLambda_or_LetIn j.uj_val ctxt; uj_type = it_mkProd_or_LetIn j.uj_type ctxt }) ctxtv vdef in evar_type_fixpoint loc env evdref names ftys vdefj; let ftys = Array.map (nf_evar !evdref) ftys in let fdefs = Array.map (fun x -> nf_evar !evdref (j_val x)) vdefj in let fixj = match fixkind with | GFix (vn,i) -> (* First, let's find the guard indexes. *) (* If recursive argument was not given by user, we try all args. An earlier approach was to look only for inductive arguments, but doing it properly involves delta-reduction, and it finally doesn't seem worth the effort (except for huge mutual fixpoints ?) *) let possible_indexes = Array.to_list (Array.mapi (fun i (n,_) -> match n with | Some n -> [n] | None -> list_map_i (fun i _ -> i) 0 ctxtv.(i)) vn) in let fixdecls = (names,ftys,fdefs) in let indexes = search_guard loc env possible_indexes fixdecls in make_judge (mkFix ((indexes,i),fixdecls)) ftys.(i) | GCoFix i -> let cofix = (i,(names,ftys,fdefs)) in (try check_cofix env cofix with e when Errors.noncritical e -> Loc.raise loc e); make_judge (mkCoFix cofix) ftys.(i) in inh_conv_coerce_to_tycon loc env evdref fixj tycon | GSort (loc,s) -> let j = pretype_sort evdref s in inh_conv_coerce_to_tycon loc env evdref j tycon | GApp (loc,f,args) -> let fj = pretype empty_tycon env evdref lvar f in let floc = loc_of_glob_constr f in let rec apply_rec env n resj = function | [] -> resj | c::rest -> let argloc = loc_of_glob_constr c in let resj = evd_comb1 (Coercion.inh_app_fun env) evdref resj in let resty = whd_betadeltaiota env !evdref resj.uj_type in match kind_of_term resty with | Prod (na,c1,c2) -> let hj = pretype (mk_tycon c1) env evdref lvar c in let value, typ = applist (j_val resj, [j_val hj]), subst1 hj.uj_val c2 in apply_rec env (n+1) { uj_val = value; uj_type = typ } rest | _ -> let hj = pretype empty_tycon env evdref lvar c in error_cant_apply_not_functional_loc (join_loc floc argloc) env !evdref resj [hj] in let resj = apply_rec env 1 fj args in let resj = match evar_kind_of_term !evdref resj.uj_val with | App (f,args) -> let f = whd_evar !evdref f in begin match kind_of_term f with | Ind _ | Const _ when isInd f or has_polymorphic_type (destConst f) -> let sigma = !evdref in let c = mkApp (f,Array.map (whd_evar sigma) args) in let t = Retyping.get_type_of env sigma c in make_judge c (* use this for keeping evars: resj.uj_val *) t | _ -> resj end | _ -> resj in inh_conv_coerce_to_tycon loc env evdref resj tycon | GLambda(loc,name,bk,c1,c2) -> let (name',dom,rng) = evd_comb1 (split_tycon loc env) evdref tycon in let dom_valcon = valcon_of_tycon dom in let j = pretype_type dom_valcon env evdref lvar c1 in let var = (name,None,j.utj_val) in let j' = pretype rng (push_rel var env) evdref lvar c2 in judge_of_abstraction env (orelse_name name name') j j' | GProd(loc,name,bk,c1,c2) -> let j = pretype_type empty_valcon env evdref lvar c1 in let j' = if name = Anonymous then let j = pretype_type empty_valcon env evdref lvar c2 in { j with utj_val = lift 1 j.utj_val } else let var = (name,j.utj_val) in let env' = push_rel_assum var env in pretype_type empty_valcon env' evdref lvar c2 in let resj = try judge_of_product env name j j' with TypeError _ as e -> Loc.raise loc e in inh_conv_coerce_to_tycon loc env evdref resj tycon | GLetIn(loc,name,c1,c2) -> let j = match c1 with | GCast (loc, c, CastConv (DEFAULTcast, t)) -> let tj = pretype_type empty_valcon env evdref lvar t in pretype (mk_tycon tj.utj_val) env evdref lvar c | _ -> pretype empty_tycon env evdref lvar c1 in let t = refresh_universes j.uj_type in let var = (name,Some j.uj_val,t) in let tycon = lift_tycon 1 tycon in let j' = pretype tycon (push_rel var env) evdref lvar c2 in { uj_val = mkLetIn (name, j.uj_val, t, j'.uj_val) ; uj_type = subst1 j.uj_val j'.uj_type } | GLetTuple (loc,nal,(na,po),c,d) -> let cj = pretype empty_tycon env evdref lvar c in let (IndType (indf,realargs)) = try find_rectype env !evdref cj.uj_type with Not_found -> let cloc = loc_of_glob_constr c in error_case_not_inductive_loc cloc env !evdref cj in let cstrs = get_constructors env indf in if Array.length cstrs <> 1 then user_err_loc (loc,"",str "Destructing let is only for inductive types with one constructor."); let cs = cstrs.(0) in if List.length nal <> cs.cs_nargs then user_err_loc (loc,"", str "Destructing let on this type expects " ++ int cs.cs_nargs ++ str " variables."); let fsign = List.map2 (fun na (_,c,t) -> (na,c,t)) (List.rev nal) cs.cs_args in let env_f = push_rels fsign env in (* Make dependencies from arity signature impossible *) let arsgn = let arsgn,_ = get_arity env indf in if not !allow_anonymous_refs then List.map (fun (_,b,t) -> (Anonymous,b,t)) arsgn else arsgn in let psign = (na,None,build_dependent_inductive env indf)::arsgn in let nar = List.length arsgn in (match po with | Some p -> let env_p = push_rels psign env in let pj = pretype_type empty_valcon env_p evdref lvar p in let ccl = nf_evar !evdref pj.utj_val in let psign = make_arity_signature env true indf in (* with names *) let p = it_mkLambda_or_LetIn ccl psign in let inst = (Array.to_list cs.cs_concl_realargs) @[build_dependent_constructor cs] in let lp = lift cs.cs_nargs p in let fty = hnf_lam_applist env !evdref lp inst in let fj = pretype (mk_tycon fty) env_f evdref lvar d in let f = it_mkLambda_or_LetIn fj.uj_val fsign in let v = let ind,_ = dest_ind_family indf in let ci = make_case_info env ind LetStyle in Typing.check_allowed_sort env !evdref ind cj.uj_val p; mkCase (ci, p, cj.uj_val,[|f|]) in { uj_val = v; uj_type = substl (realargs@[cj.uj_val]) ccl } | None -> let tycon = lift_tycon cs.cs_nargs tycon in let fj = pretype tycon env_f evdref lvar d in let f = it_mkLambda_or_LetIn fj.uj_val fsign in let ccl = nf_evar !evdref fj.uj_type in let ccl = if noccur_between 1 cs.cs_nargs ccl then lift (- cs.cs_nargs) ccl else error_cant_find_case_type_loc loc env !evdref cj.uj_val in let ccl = refresh_universes ccl in let p = it_mkLambda_or_LetIn (lift (nar+1) ccl) psign in let v = let ind,_ = dest_ind_family indf in let ci = make_case_info env ind LetStyle in Typing.check_allowed_sort env !evdref ind cj.uj_val p; mkCase (ci, p, cj.uj_val,[|f|]) in { uj_val = v; uj_type = ccl }) | GIf (loc,c,(na,po),b1,b2) -> let cj = pretype empty_tycon env evdref lvar c in let (IndType (indf,realargs)) = try find_rectype env !evdref cj.uj_type with Not_found -> let cloc = loc_of_glob_constr c in error_case_not_inductive_loc cloc env !evdref cj in let cstrs = get_constructors env indf in if Array.length cstrs <> 2 then user_err_loc (loc,"", str "If is only for inductive types with two constructors."); let arsgn = let arsgn,_ = get_arity env indf in if not !allow_anonymous_refs then (* Make dependencies from arity signature impossible *) List.map (fun (_,b,t) -> (Anonymous,b,t)) arsgn else arsgn in let nar = List.length arsgn in let psign = (na,None,build_dependent_inductive env indf)::arsgn in let pred,p = match po with | Some p -> let env_p = push_rels psign env in let pj = pretype_type empty_valcon env_p evdref lvar p in let ccl = nf_evar !evdref pj.utj_val in let pred = it_mkLambda_or_LetIn ccl psign in let typ = lift (- nar) (beta_applist (pred,[cj.uj_val])) in pred, typ | None -> let p = match tycon with | Some (None, ty) -> ty | None | Some _ -> new_type_evar evdref env loc in it_mkLambda_or_LetIn (lift (nar+1) p) psign, p in let pred = nf_evar !evdref pred in let p = nf_evar !evdref p in let f cs b = let n = rel_context_length cs.cs_args in let pi = lift n pred in (* liftn n 2 pred ? *) let pi = beta_applist (pi, [build_dependent_constructor cs]) in let csgn = if not !allow_anonymous_refs then List.map (fun (_,b,t) -> (Anonymous,b,t)) cs.cs_args else List.map (fun (n, b, t) -> match n with Name _ -> (n, b, t) | Anonymous -> (Name (id_of_string "H"), b, t)) cs.cs_args in let env_c = push_rels csgn env in let bj = pretype (mk_tycon pi) env_c evdref lvar b in it_mkLambda_or_LetIn bj.uj_val cs.cs_args in let b1 = f cstrs.(0) b1 in let b2 = f cstrs.(1) b2 in let v = let ind,_ = dest_ind_family indf in let ci = make_case_info env ind IfStyle in let pred = nf_evar !evdref pred in Typing.check_allowed_sort env !evdref ind cj.uj_val pred; mkCase (ci, pred, cj.uj_val, [|b1;b2|]) in { uj_val = v; uj_type = p } | GCases (loc,sty,po,tml,eqns) -> Cases.compile_cases loc sty ((fun vtyc env evdref -> pretype vtyc env evdref lvar),evdref) tycon env (* loc *) (po,tml,eqns) | GCast (loc,c,k) -> let cj = match k with CastCoerce -> let cj = pretype empty_tycon env evdref lvar c in evd_comb1 (Coercion.inh_coerce_to_base loc env) evdref cj | CastConv (k,t) -> let tj = pretype_type empty_valcon env evdref lvar t in let cj = pretype empty_tycon env evdref lvar c in let cty = nf_evar !evdref cj.uj_type and tval = nf_evar !evdref tj.utj_val in let cj = match k with | VMcast -> if not (occur_existential cty || occur_existential tval) then begin try ignore (Reduction.vm_conv Reduction.CUMUL env cty tval); cj with Reduction.NotConvertible -> error_actual_type_loc loc env !evdref cj tval end else user_err_loc (loc,"",str "Cannot check cast with vm: unresolved arguments remain.") | _ -> inh_conv_coerce_to_tycon loc env evdref cj (mk_tycon tval) in let v = mkCast (cj.uj_val, k, tval) in { uj_val = v; uj_type = tval } in inh_conv_coerce_to_tycon loc env evdref cj tycon (* [pretype_type valcon env evdref lvar c] coerces [c] into a type *) and pretype_type valcon env evdref lvar = function | GHole loc -> (match valcon with | Some v -> let s = let sigma = !evdref in let t = Retyping.get_type_of env sigma v in match kind_of_term (whd_betadeltaiota env sigma t) with | Sort s -> s | Evar ev when is_Type (existential_type sigma ev) -> evd_comb1 (define_evar_as_sort) evdref ev | _ -> anomaly "Found a type constraint which is not a type" in { utj_val = v; utj_type = s } | None -> let s = evd_comb0 new_sort_variable evdref in { utj_val = e_new_evar evdref env ~src:loc (mkSort s); utj_type = s}) | c -> let j = pretype empty_tycon env evdref lvar c in let loc = loc_of_glob_constr c in let tj = evd_comb1 (Coercion.inh_coerce_to_sort loc env) evdref j in match valcon with | None -> tj | Some v -> if e_cumul env evdref v tj.utj_val then tj else error_unexpected_type_loc (loc_of_glob_constr c) env !evdref tj.utj_val v let pretype_gen expand_evar fail_evar resolve_classes evdref env lvar kind c = let c' = match kind with | OfType exptyp -> let tycon = match exptyp with None -> empty_tycon | Some t -> mk_tycon t in (pretype tycon env evdref lvar c).uj_val | IsType -> (pretype_type empty_valcon env evdref lvar c).utj_val in resolve_evars env evdref fail_evar resolve_classes; let c = if expand_evar then nf_evar !evdref c' else c' in if fail_evar then check_evars env Evd.empty !evdref c; c (* TODO: comment faire remonter l'information si le typage a resolu des variables du sigma original. il faudrait que la fonction de typage retourne aussi le nouveau sigma... *) let understand_judgment sigma env c = let evdref = ref sigma in let j = pretype empty_tycon env evdref ([],[]) c in resolve_evars env evdref true true; let j = j_nf_evar !evdref j in check_evars env sigma !evdref (mkCast(j.uj_val,DEFAULTcast, j.uj_type)); j let understand_judgment_tcc evdref env c = let j = pretype empty_tycon env evdref ([],[]) c in resolve_evars env evdref false true; j_nf_evar !evdref j (* Raw calls to the unsafe inference machine: boolean says if we must fail on unresolved evars; the unsafe_judgment list allows us to extend env with some bindings *) let ise_pretype_gen expand_evar fail_evar resolve_classes sigma env lvar kind c = let evdref = ref sigma in let c = pretype_gen expand_evar fail_evar resolve_classes evdref env lvar kind c in !evdref, c (** Entry points of the high-level type synthesis algorithm *) let understand_gen kind sigma env c = snd (ise_pretype_gen true true true sigma env ([],[]) kind c) let understand sigma env ?expected_type:exptyp c = snd (ise_pretype_gen true true true sigma env ([],[]) (OfType exptyp) c) let understand_type sigma env c = snd (ise_pretype_gen true true true sigma env ([],[]) IsType c) let understand_ltac ?(resolve_classes=false) expand_evar sigma env lvar kind c = ise_pretype_gen expand_evar false resolve_classes sigma env lvar kind c let understand_tcc ?(resolve_classes=true) sigma env ?expected_type:exptyp c = ise_pretype_gen true false resolve_classes sigma env ([],[]) (OfType exptyp) c let understand_tcc_evars ?(fail_evar=false) ?(resolve_classes=true) evdref env kind c = pretype_gen true fail_evar resolve_classes evdref env ([],[]) kind c end module Default : S = Pretyping_F(Coercion.Default) coq-8.4pl2/pretyping/term_dnet.ml0000640000175000001440000003152512010532755016171 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* std_ppcmds = function | DRel -> str "*" | DSort -> str "Sort" | DRef _ -> str "Ref" | DCtx (ctx,t) -> f ctx ++ spc() ++ str "|-" ++ spc () ++ f t | DLambda (t1,t2) -> str "fun"++ spc() ++ f t1 ++ spc() ++ str"->" ++ spc() ++ f t2 | DApp (t1,t2) -> f t1 ++ spc() ++ f t2 | DCase (_,t1,t2,ta) -> str "case" | DFix _ -> str "fix" | DCoFix _ -> str "cofix" | DCons ((t,dopt),tl) -> f t ++ (match dopt with Some t' -> str ":=" ++ f t' | None -> str "") ++ spc() ++ str "::" ++ spc() ++ f tl | DNil -> str "[]" (* * Functional iterators for the t datatype * a.k.a boring and error-prone boilerplate code *) let map f = function | (DRel | DSort | DNil | DRef _) as c -> c | DCtx (ctx,c) -> DCtx (f ctx, f c) | DLambda (t,c) -> DLambda (f t, f c) | DApp (t,u) -> DApp (f t,f u) | DCase (ci,p,c,bl) -> DCase (ci, f p, f c, Array.map f bl) | DFix (ia,i,ta,ca) -> DFix (ia,i,Array.map f ta,Array.map f ca) | DCoFix(i,ta,ca) -> DCoFix (i,Array.map f ta,Array.map f ca) | DCons ((t,topt),u) -> DCons ((f t,Option.map f topt), f u) let compare x y = let make_name n = match n with | DRef(ConstRef con) -> DRef(ConstRef(constant_of_kn(canonical_con con))) | DRef(IndRef (kn,i)) -> DRef(IndRef(mind_of_kn(canonical_mind kn),i)) | DRef(ConstructRef ((kn,i),j ))-> DRef(ConstructRef((mind_of_kn(canonical_mind kn),i),j)) | k -> k in Pervasives.compare (make_name x) (make_name y) let fold f acc = function | (DRel | DNil | DSort | DRef _) -> acc | DCtx (ctx,c) -> f (f acc ctx) c | DLambda (t,c) -> f (f acc t) c | DApp (t,u) -> f (f acc t) u | DCase (ci,p,c,bl) -> Array.fold_left f (f (f acc p) c) bl | DFix (ia,i,ta,ca) -> Array.fold_left f (Array.fold_left f acc ta) ca | DCoFix(i,ta,ca) -> Array.fold_left f (Array.fold_left f acc ta) ca | DCons ((t,topt),u) -> f (Option.fold_left f (f acc t) topt) u let choose f = function | (DRel | DSort | DNil | DRef _) -> invalid_arg "choose" | DCtx (ctx,c) -> f ctx | DLambda (t,c) -> f t | DApp (t,u) -> f u | DCase (ci,p,c,bl) -> f c | DFix (ia,i,ta,ca) -> f ta.(0) | DCoFix (i,ta,ca) -> f ta.(0) | DCons ((t,topt),u) -> f u let fold2 (f:'a -> 'b -> 'c -> 'a) (acc:'a) (c1:'b t) (c2:'c t) : 'a = let head w = map (fun _ -> ()) w in if compare (head c1) (head c2) <> 0 then invalid_arg "fold2:compare" else match c1,c2 with | (DRel, DRel | DNil, DNil | DSort, DSort | DRef _, DRef _) -> acc | (DCtx (c1,t1), DCtx (c2,t2) | DApp (c1,t1), DApp (c2,t2) | DLambda (c1,t1), DLambda (c2,t2)) -> f (f acc c1 c2) t1 t2 | DCase (ci,p1,c1,bl1),DCase (_,p2,c2,bl2) -> array_fold_left2 f (f (f acc p1 p2) c1 c2) bl1 bl2 | DFix (ia,i,ta1,ca1), DFix (_,_,ta2,ca2) -> array_fold_left2 f (array_fold_left2 f acc ta1 ta2) ca1 ca2 | DCoFix(i,ta1,ca1), DCoFix(_,ta2,ca2) -> array_fold_left2 f (array_fold_left2 f acc ta1 ta2) ca1 ca2 | DCons ((t1,topt1),u1), DCons ((t2,topt2),u2) -> f (Option.fold_left2 f (f acc t1 t2) topt1 topt2) u1 u2 | _ -> assert false let map2 (f:'a -> 'b -> 'c) (c1:'a t) (c2:'b t) : 'c t = let head w = map (fun _ -> ()) w in if compare (head c1) (head c2) <> 0 then invalid_arg "map2_t:compare" else match c1,c2 with | (DRel, DRel | DSort, DSort | DNil, DNil | DRef _, DRef _) as cc -> let (c,_) = cc in c | DCtx (c1,t1), DCtx (c2,t2) -> DCtx (f c1 c2, f t1 t2) | DLambda (t1,c1), DLambda (t2,c2) -> DLambda (f t1 t2, f c1 c2) | DApp (t1,u1), DApp (t2,u2) -> DApp (f t1 t2,f u1 u2) | DCase (ci,p1,c1,bl1), DCase (_,p2,c2,bl2) -> DCase (ci, f p1 p2, f c1 c2, array_map2 f bl1 bl2) | DFix (ia,i,ta1,ca1), DFix (_,_,ta2,ca2) -> DFix (ia,i,array_map2 f ta1 ta2,array_map2 f ca1 ca2) | DCoFix (i,ta1,ca1), DCoFix (_,ta2,ca2) -> DCoFix (i,array_map2 f ta1 ta2,array_map2 f ca1 ca2) | DCons ((t1,topt1),u1), DCons ((t2,topt2),u2) -> DCons ((f t1 t2,Option.lift2 f topt1 topt2), f u1 u2) | _ -> assert false let terminal = function | (DRel | DSort | DNil | DRef _) -> true | _ -> false end (* * Terms discrimination nets * Uses the general dnet datatype on DTerm.t * (here you can restart reading) *) (* * Construction of the module *) module type IDENT = sig type t val compare : t -> t -> int val subst : substitution -> t -> t val constr_of : t -> constr end module type OPT = sig val reduce : constr -> constr val direction : bool end module Make = functor (Ident : IDENT) -> functor (Opt : OPT) -> struct module TDnet : Dnet.S with type ident=Ident.t and type 'a structure = 'a DTerm.t and type meta = metavariable = Dnet.Make(DTerm)(Ident) (struct type t = metavariable let compare = Pervasives.compare end) type t = TDnet.t type ident = TDnet.ident type 'a pattern = 'a TDnet.pattern type term_pattern = term_pattern DTerm.t pattern type idset = TDnet.Idset.t type result = ident * (constr*existential_key) * Termops.subst open DTerm open TDnet let rec pat_of_constr c : term_pattern = match kind_of_term c with | Rel _ -> Term DRel | Sort _ -> Term DSort | Var i -> Term (DRef (VarRef i)) | Const c -> Term (DRef (ConstRef c)) | Ind i -> Term (DRef (IndRef i)) | Construct c -> Term (DRef (ConstructRef c)) | Term.Meta _ -> assert false | Evar (i,_) -> Meta i | Case (ci,c1,c2,ca) -> Term(DCase(ci,pat_of_constr c1,pat_of_constr c2,Array.map pat_of_constr ca)) | Fix ((ia,i),(_,ta,ca)) -> Term(DFix(ia,i,Array.map pat_of_constr ta, Array.map pat_of_constr ca)) | CoFix (i,(_,ta,ca)) -> Term(DCoFix(i,Array.map pat_of_constr ta,Array.map pat_of_constr ca)) | Cast (c,_,_) -> pat_of_constr c | Lambda (_,t,c) -> Term(DLambda (pat_of_constr t, pat_of_constr c)) | (Prod (_,_,_) | LetIn(_,_,_,_)) -> let (ctx,c) = ctx_of_constr (Term DNil) c in Term (DCtx (ctx,c)) | App (f,ca) -> Array.fold_left (fun c a -> Term (DApp (c,a))) (pat_of_constr f) (Array.map pat_of_constr ca) and ctx_of_constr ctx c : term_pattern * term_pattern = match kind_of_term c with | Prod (_,t,c) -> ctx_of_constr (Term(DCons((pat_of_constr t,None),ctx))) c | LetIn(_,d,t,c) -> ctx_of_constr (Term(DCons((pat_of_constr t, Some (pat_of_constr d)),ctx))) c | _ -> ctx,pat_of_constr c let empty_ctx : term_pattern -> term_pattern = function | Meta _ as c -> c | Term (DCtx(_,_)) as c -> c | c -> Term (DCtx (Term DNil, c)) (* * Basic primitives *) let empty = TDnet.empty let subst s t = let sleaf id = Ident.subst s id in let snode = function | DTerm.DRef gr -> DTerm.DRef (fst (subst_global s gr)) | n -> n in TDnet.map sleaf snode t let union = TDnet.union let add (c:constr) (id:Ident.t) (dn:t) = let c = Opt.reduce c in let c = empty_ctx (pat_of_constr c) in TDnet.add dn c id let new_meta_no = let ctr = ref 0 in fun () -> decr ctr; !ctr let new_meta_no = Evarutil.new_untyped_evar let neutral_meta = new_meta_no() let new_meta () = Meta (new_meta_no()) let new_evar () = mkEvar(new_meta_no(),[||]) let rec remove_cap : term_pattern -> term_pattern = function | Term (DCons (t,u)) -> Term (DCons (t,remove_cap u)) | Term DNil -> new_meta() | Meta _ as m -> m | _ -> assert false let under_prod : term_pattern -> term_pattern = function | Term (DCtx (t,u)) -> Term (DCtx (remove_cap t,u)) | Meta m -> Term (DCtx(new_meta(), Meta m)) | _ -> assert false let init = let e = new_meta_no() in (mkEvar (e,[||]),e) let rec e_subst_evar i (t:unit->constr) c = match kind_of_term c with | Evar (j,_) when i=j -> t() | _ -> map_constr (e_subst_evar i t) c let subst_evar i c = e_subst_evar i (fun _ -> c) (* debug *) let rec pr_term_pattern p = (fun pr_t -> function | Term t -> pr_t t | Meta m -> str"["++Util.pr_int (Obj.magic m)++str"]" ) (pr_dconstr pr_term_pattern) p let search_pat cpat dpat dn (up,plug) = let whole_c = subst_evar plug cpat up in (* if we are at the root, add an empty context *) let dpat = if isEvar_or_Meta up then under_prod (empty_ctx dpat) else dpat in TDnet.Idset.fold (fun id acc -> let c_id = Opt.reduce (Ident.constr_of id) in let (ctx,wc) = try Termops.align_prod_letin whole_c c_id with Invalid_argument _ -> [],c_id in let up = it_mkProd_or_LetIn up ctx in let wc,whole_c = if Opt.direction then whole_c,wc else wc,whole_c in try (id,(up,plug),Termops.filtering ctx Reduction.CUMUL wc whole_c)::acc with Termops.CannotFilter -> (* msgnl(str"recon "++Termops.print_constr_env (Global.env()) wc); *) acc ) (TDnet.find_match dpat dn) [] let fold_pattern_neutral f = fold_pattern (fun acc (mset,m,dn) -> if m=neutral_meta then acc else f m dn acc) let fold_pattern_nonlin f = let defined = ref Gmap.empty in fold_pattern_neutral ( fun m dn acc -> let dn = try TDnet.inter dn (Gmap.find m !defined) with Not_found -> dn in defined := Gmap.add m dn !defined; f m dn acc ) let fold_pattern_up f acc dpat cpat dn (up,plug) = fold_pattern_nonlin ( fun m dn acc -> f dn (subst_evar plug (e_subst_evar neutral_meta new_evar cpat) up, m) acc ) acc dpat dn let possibly_under pat k dn (up,plug) = let rec aux fst dn (up,plug) acc = let cpat = pat() in let dpat = pat_of_constr cpat in let dpat = if fst then under_prod (empty_ctx dpat) else dpat in (k dn (up,plug)) @ snd (fold_pattern_up (aux false) acc dpat cpat dn (up,plug)) in aux true dn (up,plug) [] let eq_pat eq () = mkApp(eq,[|mkEvar(neutral_meta,[||]);new_evar();new_evar()|]) let app_pat () = mkApp(new_evar(),[|mkEvar(neutral_meta,[||])|]) (* * High-level primitives describing specific search problems *) let search_pattern dn pat = let pat = Opt.reduce pat in search_pat pat (empty_ctx (pat_of_constr pat)) dn init let search_concl dn pat = let pat = Opt.reduce pat in search_pat pat (under_prod (empty_ctx (pat_of_constr pat))) dn init let search_eq_concl dn eq pat = let pat = Opt.reduce pat in let eq_pat = eq_pat eq () in let eq_dpat = under_prod (empty_ctx (pat_of_constr eq_pat)) in snd (fold_pattern_up (fun dn up acc -> search_pat pat (pat_of_constr pat) dn up @ acc ) [] eq_dpat eq_pat dn init) let search_head_concl dn pat = let pat = Opt.reduce pat in possibly_under app_pat (search_pat pat (pat_of_constr pat)) dn init let find_all dn = Idset.elements (TDnet.find_all dn) let map f dn = TDnet.map f (fun x -> x) dn end module type S = sig type t type ident type result = ident * (constr*existential_key) * Termops.subst val empty : t val add : constr -> ident -> t -> t val union : t -> t -> t val subst : substitution -> t -> t val search_pattern : t -> constr -> result list val search_concl : t -> constr -> result list val search_head_concl : t -> constr -> result list val search_eq_concl : t -> constr -> constr -> result list val find_all : t -> ident list val map : (ident -> ident) -> t -> t end coq-8.4pl2/pretyping/unification.mli0000640000175000001440000000560712063732052016673 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> conv_pb -> ?flags:unify_flags -> constr -> constr -> evar_map (** [w_unify_to_subterm env (c,t) m] performs unification of [c] with a subterm of [t]. Constraints are added to [m] and the matched subterm of [t] is also returned. *) val w_unify_to_subterm : env -> evar_map -> ?flags:unify_flags -> constr * constr -> evar_map * constr val w_unify_to_subterm_all : env -> evar_map -> ?flags:unify_flags -> constr * constr -> (evar_map * constr) list val w_unify_meta_types : env -> ?flags:unify_flags -> evar_map -> evar_map (** [w_coerce_to_type env evd c ctyp typ] tries to coerce [c] of type [ctyp] so that its gets type [typ]; [typ] may contain metavariables *) val w_coerce_to_type : env -> evar_map -> constr -> types -> types -> evar_map * constr (*i This should be in another module i*) (** [abstract_list_all env evd t c l] abstracts the terms in l over c to get a term of type t (exported for inv.ml) *) val abstract_list_all : env -> evar_map -> constr -> constr -> constr list -> constr (* For tracing *) val w_merge : env -> bool -> unify_flags -> evar_map * (metavariable * constr * (instance_constraint * instance_typing_status)) list * (env * types pexistential * types) list -> evar_map val unify_0 : Environ.env -> Evd.evar_map -> Evd.conv_pb -> unify_flags -> Term.types -> Term.types -> Evd.evar_map * Evd.metabinding list * (Environ.env * Term.types Term.pexistential * Term.constr) list coq-8.4pl2/pretyping/detyping.ml0000640000175000001440000006114412121620060016021 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* inductive val member_message : std_ppcmds -> bool -> std_ppcmds val field : string val title : string end) -> struct type t = inductive let encode = Test.encode let subst subst (kn, ints as obj) = let kn' = subst_ind subst kn in if kn' == kn then obj else kn', ints let printer ind = pr_global_env Idset.empty (IndRef ind) let key = ["Printing";Test.field] let title = Test.title let member_message x = Test.member_message (printer x) let synchronous = true end module PrintingCasesIf = PrintingInductiveMake (struct let encode = encode_bool let field = "If" let title = "Types leading to pretty-printing of Cases using a `if' form: " let member_message s b = str "Cases on elements of " ++ s ++ str (if b then " are printed using a `if' form" else " are not printed using a `if' form") end) module PrintingCasesLet = PrintingInductiveMake (struct let encode = encode_tuple let field = "Let" let title = "Types leading to a pretty-printing of Cases using a `let' form:" let member_message s b = str "Cases on elements of " ++ s ++ str (if b then " are printed using a `let' form" else " are not printed using a `let' form") end) module PrintingIf = Goptions.MakeRefTable(PrintingCasesIf) module PrintingLet = Goptions.MakeRefTable(PrintingCasesLet) (* Flags.for printing or not wildcard and synthetisable types *) open Goptions let wildcard_value = ref true let force_wildcard () = !wildcard_value let _ = declare_bool_option { optsync = true; optdepr = false; optname = "forced wildcard"; optkey = ["Printing";"Wildcard"]; optread = force_wildcard; optwrite = (:=) wildcard_value } let synth_type_value = ref true let synthetize_type () = !synth_type_value let _ = declare_bool_option { optsync = true; optdepr = false; optname = "pattern matching return type synthesizability"; optkey = ["Printing";"Synth"]; optread = synthetize_type; optwrite = (:=) synth_type_value } let reverse_matching_value = ref true let reverse_matching () = !reverse_matching_value let _ = declare_bool_option { optsync = true; optdepr = false; optname = "pattern-matching reversibility"; optkey = ["Printing";"Matching"]; optread = reverse_matching; optwrite = (:=) reverse_matching_value } (* Auxiliary function for MutCase printing *) (* [computable] tries to tell if the predicate typing the result is inferable*) let computable p k = (* We first remove as many lambda as the arity, then we look if it remains a lambda for a dependent elimination. This function works for normal eta-expanded term. For non eta-expanded or non-normal terms, it may affirm the pred is synthetisable because of an undetected ultimate dependent variable in the second clause, or else, it may affirms the pred non synthetisable because of a non normal term in the fourth clause. A solution could be to store, in the MutCase, the eta-expanded normal form of pred to decide if it depends on its variables Lorsque le prdicat est dpendant de manire certaine, on ne dclare pas le prdicat synthtisable (mme si la variable dpendante ne l'est pas effectivement) parce que sinon on perd la rciprocit de la synthse (qui, lui, engendrera un prdicat non dpendant) *) let sign,ccl = decompose_lam_assum p in (rel_context_length sign = k+1) && noccur_between 1 (k+1) ccl let lookup_name_as_displayed env t s = let rec lookup avoid n c = match kind_of_term c with | Prod (name,_,c') -> (match compute_displayed_name_in RenamingForGoal avoid name c' with | (Name id,avoid') -> if id=s then Some n else lookup avoid' (n+1) c' | (Anonymous,avoid') -> lookup avoid' (n+1) (pop c')) | LetIn (name,_,_,c') -> (match compute_displayed_name_in RenamingForGoal avoid name c' with | (Name id,avoid') -> if id=s then Some n else lookup avoid' (n+1) c' | (Anonymous,avoid') -> lookup avoid' (n+1) (pop c')) | Cast (c,_,_) -> lookup avoid n c | _ -> None in lookup (ids_of_named_context (named_context env)) 1 t let lookup_index_as_renamed env t n = let rec lookup n d c = match kind_of_term c with | Prod (name,_,c') -> (match compute_displayed_name_in RenamingForGoal [] name c' with (Name _,_) -> lookup n (d+1) c' | (Anonymous,_) -> if n=0 then Some (d-1) else if n=1 then Some d else lookup (n-1) (d+1) c') | LetIn (name,_,_,c') -> (match compute_displayed_name_in RenamingForGoal [] name c' with | (Name _,_) -> lookup n (d+1) c' | (Anonymous,_) -> if n=0 then Some (d-1) else if n=1 then Some d else lookup (n-1) (d+1) c' ) | Cast (c,_,_) -> lookup n d c | _ -> if n=0 then Some (d-1) else None in lookup n 1 t (**********************************************************************) (* Fragile algorithm to reverse pattern-matching compilation *) let update_name na ((_,e),c) = match na with | Name _ when force_wildcard () & noccurn (list_index na e) c -> Anonymous | _ -> na let rec decomp_branch n nal b (avoid,env as e) c = let flag = if b then RenamingForGoal else RenamingForCasesPattern in if n=0 then (List.rev nal,(e,c)) else let na,c,f = match kind_of_term (strip_outer_cast c) with | Lambda (na,_,c) -> na,c,compute_displayed_let_name_in | LetIn (na,_,_,c) -> na,c,compute_displayed_name_in | _ -> Name (id_of_string "x"),(applist (lift 1 c, [mkRel 1])), compute_displayed_name_in in let na',avoid' = f flag avoid na c in decomp_branch (n-1) (na'::nal) b (avoid',add_name na' env) c let rec build_tree na isgoal e ci cl = let mkpat n rhs pl = PatCstr(dl,(ci.ci_ind,n+1),pl,update_name na rhs) in let cnl = ci.ci_cstr_ndecls in List.flatten (list_tabulate (fun i -> contract_branch isgoal e (cnl.(i),mkpat i,cl.(i))) (Array.length cl)) and align_tree nal isgoal (e,c as rhs) = match nal with | [] -> [[],rhs] | na::nal -> match kind_of_term c with | Case (ci,p,c,cl) when c = mkRel (list_index na (snd e)) & (* don't contract if p dependent *) computable p (ci.ci_pp_info.ind_nargs) -> let clauses = build_tree na isgoal e ci cl in List.flatten (List.map (fun (pat,rhs) -> let lines = align_tree nal isgoal rhs in List.map (fun (hd,rest) -> pat::hd,rest) lines) clauses) | _ -> let pat = PatVar(dl,update_name na rhs) in let mat = align_tree nal isgoal rhs in List.map (fun (hd,rest) -> pat::hd,rest) mat and contract_branch isgoal e (cn,mkpat,b) = let nal,rhs = decomp_branch cn [] isgoal e b in let mat = align_tree nal isgoal rhs in List.map (fun (hd,rhs) -> (mkpat rhs hd,rhs)) mat (**********************************************************************) (* Transform internal representation of pattern-matching into list of *) (* clauses *) let is_nondep_branch c n = try let sign,ccl = decompose_lam_n_assum n c in noccur_between 1 (rel_context_length sign) ccl with e when Errors.noncritical e -> (* Not eta-expanded or not reduced *) false let extract_nondep_branches test c b n = let rec strip n r = if n=0 then r else match r with | GLambda (_,_,_,_,t) -> strip (n-1) t | GLetIn (_,_,_,t) -> strip (n-1) t | _ -> assert false in if test c n then Some (strip n b) else None let it_destRLambda_or_LetIn_names n c = let rec aux n nal c = if n=0 then (List.rev nal,c) else match c with | GLambda (_,na,_,_,c) -> aux (n-1) (na::nal) c | GLetIn (_,na,_,c) -> aux (n-1) (na::nal) c | _ -> (* eta-expansion *) let rec next l = let x = next_ident_away (id_of_string "x") l in (* Not efficient but unusual and no function to get free glob_vars *) (* if occur_glob_constr x c then next (x::l) else x in *) x in let x = next (free_glob_vars c) in let a = GVar (dl,x) in aux (n-1) (Name x :: nal) (match c with | GApp (loc,p,l) -> GApp (loc,c,l@[a]) | _ -> (GApp (dl,c,[a]))) in aux n [] c let detype_case computable detype detype_eqns testdep avoid data p c bl = let (indsp,st,nparams,consnargsl,k) = data in let synth_type = synthetize_type () in let tomatch = detype c in let alias, aliastyp, pred= if (not !Flags.raw_print) & synth_type & computable & Array.length bl<>0 then Anonymous, None, None else match Option.map detype p with | None -> Anonymous, None, None | Some p -> let nl,typ = it_destRLambda_or_LetIn_names k p in let n,typ = match typ with | GLambda (_,x,_,t,c) -> x, c | _ -> Anonymous, typ in let aliastyp = if List.for_all ((=) Anonymous) nl then None else Some (dl,indsp,nparams,nl) in n, aliastyp, Some typ in let constructs = Array.init (Array.length bl) (fun i -> (indsp,i+1)) in let tag = try if !Flags.raw_print then RegularStyle else if st = LetPatternStyle then st else if PrintingLet.active indsp then LetStyle else if PrintingIf.active indsp then IfStyle else st with Not_found -> st in match tag with | LetStyle when aliastyp = None -> let bl' = Array.map detype bl in let (nal,d) = it_destRLambda_or_LetIn_names consnargsl.(0) bl'.(0) in GLetTuple (dl,nal,(alias,pred),tomatch,d) | IfStyle when aliastyp = None -> let bl' = Array.map detype bl in let nondepbrs = array_map3 (extract_nondep_branches testdep) bl bl' consnargsl in if array_for_all ((<>) None) nondepbrs then GIf (dl,tomatch,(alias,pred), Option.get nondepbrs.(0),Option.get nondepbrs.(1)) else let eqnl = detype_eqns constructs consnargsl bl in GCases (dl,tag,pred,[tomatch,(alias,aliastyp)],eqnl) | _ -> let eqnl = detype_eqns constructs consnargsl bl in GCases (dl,tag,pred,[tomatch,(alias,aliastyp)],eqnl) let detype_sort = function | Prop c -> GProp c | Type u -> GType (Some u) type binder_kind = BProd | BLambda | BLetIn (**********************************************************************) (* Main detyping function *) let detype_anonymous = ref (fun loc n -> anomaly "detype: index to an anonymous variable") let set_detype_anonymous f = detype_anonymous := f let rec detype (isgoal:bool) avoid env t = match kind_of_term (collapse_appl t) with | Rel n -> (try match lookup_name_of_rel n env with | Name id -> GVar (dl, id) | Anonymous -> !detype_anonymous dl n with Not_found -> let s = "_UNBOUND_REL_"^(string_of_int n) in GVar (dl, id_of_string s)) | Meta n -> (* Meta in constr are not user-parsable and are mapped to Evar *) GEvar (dl, n, None) | Var id -> (try let _ = Global.lookup_named id in GRef (dl, VarRef id) with e when Errors.noncritical e -> GVar (dl, id)) | Sort s -> GSort (dl,detype_sort s) | Cast (c1,REVERTcast,c2) when not !Flags.raw_print -> detype isgoal avoid env c1 | Cast (c1,k,c2) -> GCast(dl,detype isgoal avoid env c1, CastConv (k, detype isgoal avoid env c2)) | Prod (na,ty,c) -> detype_binder isgoal BProd avoid env na ty c | Lambda (na,ty,c) -> detype_binder isgoal BLambda avoid env na ty c | LetIn (na,b,_,c) -> detype_binder isgoal BLetIn avoid env na b c | App (f,args) -> GApp (dl,detype isgoal avoid env f, array_map_to_list (detype isgoal avoid env) args) | Const sp -> GRef (dl, ConstRef sp) | Evar (ev,cl) -> GEvar (dl, ev, Some (List.map (detype isgoal avoid env) (Array.to_list cl))) | Ind ind_sp -> GRef (dl, IndRef ind_sp) | Construct cstr_sp -> GRef (dl, ConstructRef cstr_sp) | Case (ci,p,c,bl) -> let comp = computable p (ci.ci_pp_info.ind_nargs) in detype_case comp (detype isgoal avoid env) (detype_eqns isgoal avoid env ci comp) is_nondep_branch avoid (ci.ci_ind,ci.ci_pp_info.style,ci.ci_npar, ci.ci_cstr_ndecls,ci.ci_pp_info.ind_nargs) (Some p) c bl | Fix (nvn,recdef) -> detype_fix isgoal avoid env nvn recdef | CoFix (n,recdef) -> detype_cofix isgoal avoid env n recdef and detype_fix isgoal avoid env (vn,_ as nvn) (names,tys,bodies) = let def_avoid, def_env, lfi = Array.fold_left (fun (avoid, env, l) na -> let id = next_name_away na avoid in (id::avoid, add_name (Name id) env, id::l)) (avoid, env, []) names in let n = Array.length tys in let v = array_map3 (fun c t i -> share_names isgoal (i+1) [] def_avoid def_env c (lift n t)) bodies tys vn in GRec(dl,GFix (Array.map (fun i -> Some i, GStructRec) (fst nvn), snd nvn),Array.of_list (List.rev lfi), Array.map (fun (bl,_,_) -> bl) v, Array.map (fun (_,_,ty) -> ty) v, Array.map (fun (_,bd,_) -> bd) v) and detype_cofix isgoal avoid env n (names,tys,bodies) = let def_avoid, def_env, lfi = Array.fold_left (fun (avoid, env, l) na -> let id = next_name_away na avoid in (id::avoid, add_name (Name id) env, id::l)) (avoid, env, []) names in let ntys = Array.length tys in let v = array_map2 (fun c t -> share_names isgoal 0 [] def_avoid def_env c (lift ntys t)) bodies tys in GRec(dl,GCoFix n,Array.of_list (List.rev lfi), Array.map (fun (bl,_,_) -> bl) v, Array.map (fun (_,_,ty) -> ty) v, Array.map (fun (_,bd,_) -> bd) v) and share_names isgoal n l avoid env c t = match kind_of_term c, kind_of_term t with (* factorize even when not necessary to have better presentation *) | Lambda (na,t,c), Prod (na',t',c') -> let na = match (na,na') with Name _, _ -> na | _, Name _ -> na' | _ -> na in let t = detype isgoal avoid env t in let id = next_name_away na avoid in let avoid = id::avoid and env = add_name (Name id) env in share_names isgoal (n-1) ((Name id,Explicit,None,t)::l) avoid env c c' (* May occur for fix built interactively *) | LetIn (na,b,t',c), _ when n > 0 -> let t' = detype isgoal avoid env t' in let b = detype isgoal avoid env b in let id = next_name_away na avoid in let avoid = id::avoid and env = add_name (Name id) env in share_names isgoal n ((Name id,Explicit,Some b,t')::l) avoid env c (lift 1 t) (* Only if built with the f/n notation or w/o let-expansion in types *) | _, LetIn (_,b,_,t) when n > 0 -> share_names isgoal n l avoid env c (subst1 b t) (* If it is an open proof: we cheat and eta-expand *) | _, Prod (na',t',c') when n > 0 -> let t' = detype isgoal avoid env t' in let id = next_name_away na' avoid in let avoid = id::avoid and env = add_name (Name id) env in let appc = mkApp (lift 1 c,[|mkRel 1|]) in share_names isgoal (n-1) ((Name id,Explicit,None,t')::l) avoid env appc c' (* If built with the f/n notation: we renounce to share names *) | _ -> if n>0 then warning "Detyping.detype: cannot factorize fix enough"; let c = detype isgoal avoid env c in let t = detype isgoal avoid env t in (List.rev l,c,t) and detype_eqns isgoal avoid env ci computable constructs consnargsl bl = try if !Flags.raw_print or not (reverse_matching ()) then raise Exit; let mat = build_tree Anonymous isgoal (avoid,env) ci bl in List.map (fun (pat,((avoid,env),c)) -> (dl,[],[pat],detype isgoal avoid env c)) mat with e when Errors.noncritical e -> Array.to_list (array_map3 (detype_eqn isgoal avoid env) constructs consnargsl bl) and detype_eqn isgoal avoid env constr construct_nargs branch = let make_pat x avoid env b ids = if force_wildcard () & noccurn 1 b then PatVar (dl,Anonymous),avoid,(add_name Anonymous env),ids else let id = next_name_away_in_cases_pattern x avoid in PatVar (dl,Name id),id::avoid,(add_name (Name id) env),id::ids in let rec buildrec ids patlist avoid env n b = if n=0 then (dl, ids, [PatCstr(dl, constr, List.rev patlist,Anonymous)], detype isgoal avoid env b) else match kind_of_term b with | Lambda (x,_,b) -> let pat,new_avoid,new_env,new_ids = make_pat x avoid env b ids in buildrec new_ids (pat::patlist) new_avoid new_env (n-1) b | LetIn (x,_,_,b) -> let pat,new_avoid,new_env,new_ids = make_pat x avoid env b ids in buildrec new_ids (pat::patlist) new_avoid new_env (n-1) b | Cast (c,_,_) -> (* Oui, il y a parfois des cast *) buildrec ids patlist avoid env n c | _ -> (* eta-expansion : n'arrivera plus lorsque tous les termes seront construits partir de la syntaxe Cases *) (* nommage de la nouvelle variable *) let new_b = applist (lift 1 b, [mkRel 1]) in let pat,new_avoid,new_env,new_ids = make_pat Anonymous avoid env new_b ids in buildrec new_ids (pat::patlist) new_avoid new_env (n-1) new_b in buildrec [] [] avoid env construct_nargs branch and detype_binder isgoal bk avoid env na ty c = let flag = if isgoal then RenamingForGoal else RenamingElsewhereFor (env,c) in let na',avoid' = if bk = BLetIn then compute_displayed_let_name_in flag avoid na c else compute_displayed_name_in flag avoid na c in let r = detype isgoal avoid' (add_name na' env) c in match bk with | BProd -> GProd (dl, na',Explicit,detype false avoid env ty, r) | BLambda -> GLambda (dl, na',Explicit,detype false avoid env ty, r) | BLetIn -> GLetIn (dl, na',detype false avoid env ty, r) let rec detype_rel_context where avoid env sign = let where = Option.map (fun c -> it_mkLambda_or_LetIn c sign) where in let rec aux avoid env = function | [] -> [] | (na,b,t)::rest -> let na',avoid' = match where with | None -> na,avoid | Some c -> if b<>None then compute_displayed_let_name_in (RenamingElsewhereFor (env,c)) avoid na c else compute_displayed_name_in (RenamingElsewhereFor (env,c)) avoid na c in let b = Option.map (detype false avoid env) b in let t = detype false avoid env t in (na',Explicit,b,t) :: aux avoid' (add_name na' env) rest in aux avoid env (List.rev sign) (**********************************************************************) (* Module substitution: relies on detyping *) let rec subst_cases_pattern subst pat = match pat with | PatVar _ -> pat | PatCstr (loc,((kn,i),j),cpl,n) -> let kn' = subst_ind subst kn and cpl' = list_smartmap (subst_cases_pattern subst) cpl in if kn' == kn && cpl' == cpl then pat else PatCstr (loc,((kn',i),j),cpl',n) let rec subst_glob_constr subst raw = match raw with | GRef (loc,ref) -> let ref',t = subst_global subst ref in if ref' == ref then raw else detype false [] [] t | GVar _ -> raw | GEvar _ -> raw | GPatVar _ -> raw | GApp (loc,r,rl) -> let r' = subst_glob_constr subst r and rl' = list_smartmap (subst_glob_constr subst) rl in if r' == r && rl' == rl then raw else GApp(loc,r',rl') | GLambda (loc,n,bk,r1,r2) -> let r1' = subst_glob_constr subst r1 and r2' = subst_glob_constr subst r2 in if r1' == r1 && r2' == r2 then raw else GLambda (loc,n,bk,r1',r2') | GProd (loc,n,bk,r1,r2) -> let r1' = subst_glob_constr subst r1 and r2' = subst_glob_constr subst r2 in if r1' == r1 && r2' == r2 then raw else GProd (loc,n,bk,r1',r2') | GLetIn (loc,n,r1,r2) -> let r1' = subst_glob_constr subst r1 and r2' = subst_glob_constr subst r2 in if r1' == r1 && r2' == r2 then raw else GLetIn (loc,n,r1',r2') | GCases (loc,sty,rtno,rl,branches) -> let rtno' = Option.smartmap (subst_glob_constr subst) rtno and rl' = list_smartmap (fun (a,x as y) -> let a' = subst_glob_constr subst a in let (n,topt) = x in let topt' = Option.smartmap (fun (loc,(sp,i),x,y as t) -> let sp' = subst_ind subst sp in if sp == sp' then t else (loc,(sp',i),x,y)) topt in if a == a' && topt == topt' then y else (a',(n,topt'))) rl and branches' = list_smartmap (fun (loc,idl,cpl,r as branch) -> let cpl' = list_smartmap (subst_cases_pattern subst) cpl and r' = subst_glob_constr subst r in if cpl' == cpl && r' == r then branch else (loc,idl,cpl',r')) branches in if rtno' == rtno && rl' == rl && branches' == branches then raw else GCases (loc,sty,rtno',rl',branches') | GLetTuple (loc,nal,(na,po),b,c) -> let po' = Option.smartmap (subst_glob_constr subst) po and b' = subst_glob_constr subst b and c' = subst_glob_constr subst c in if po' == po && b' == b && c' == c then raw else GLetTuple (loc,nal,(na,po'),b',c') | GIf (loc,c,(na,po),b1,b2) -> let po' = Option.smartmap (subst_glob_constr subst) po and b1' = subst_glob_constr subst b1 and b2' = subst_glob_constr subst b2 and c' = subst_glob_constr subst c in if c' == c & po' == po && b1' == b1 && b2' == b2 then raw else GIf (loc,c',(na,po'),b1',b2') | GRec (loc,fix,ida,bl,ra1,ra2) -> let ra1' = array_smartmap (subst_glob_constr subst) ra1 and ra2' = array_smartmap (subst_glob_constr subst) ra2 in let bl' = array_smartmap (list_smartmap (fun (na,k,obd,ty as dcl) -> let ty' = subst_glob_constr subst ty in let obd' = Option.smartmap (subst_glob_constr subst) obd in if ty'==ty & obd'==obd then dcl else (na,k,obd',ty'))) bl in if ra1' == ra1 && ra2' == ra2 && bl'==bl then raw else GRec (loc,fix,ida,bl',ra1',ra2') | GSort _ -> raw | GHole (loc,ImplicitArg (ref,i,b)) -> let ref',_ = subst_global subst ref in if ref' == ref then raw else GHole (loc,InternalHole) | GHole (loc, (BinderType _ | QuestionMark _ | CasesType | InternalHole | TomatchTypeParameter _ | GoalEvar | ImpossibleCase | MatchingVar _)) -> raw | GCast (loc,r1,k) -> (match k with CastConv (k,r2) -> let r1' = subst_glob_constr subst r1 and r2' = subst_glob_constr subst r2 in if r1' == r1 && r2' == r2 then raw else GCast (loc,r1', CastConv (k,r2')) | CastCoerce -> let r1' = subst_glob_constr subst r1 in if r1' == r1 then raw else GCast (loc,r1',k)) (* Utilities to transform kernel cases to simple pattern-matching problem *) let simple_cases_matrix_of_branches ind brs = List.map (fun (i,n,b) -> let nal,c = it_destRLambda_or_LetIn_names n b in let mkPatVar na = PatVar (dummy_loc,na) in let p = PatCstr (dummy_loc,(ind,i+1),List.map mkPatVar nal,Anonymous) in let ids = map_succeed Nameops.out_name nal in (dummy_loc,ids,[p],c)) brs let return_type_of_predicate ind nparams nrealargs_ctxt pred = let nal,p = it_destRLambda_or_LetIn_names (nrealargs_ctxt+1) pred in (List.hd nal, Some (dummy_loc, ind, nparams, List.tl nal)), Some p coq-8.4pl2/pretyping/typing.mli0000640000175000001440000000302112010532755015661 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> constr -> types (** Typecheck a term and return its type + updated evars *) val e_type_of : env -> evar_map -> constr -> evar_map * types (** Typecheck a type and return its sort *) val sort_of : env -> evar_map -> types -> sorts (** Typecheck a term has a given type (assuming the type is OK) *) val check : env -> evar_map -> constr -> types -> unit (** Returns the instantiated type of a metavariable *) val meta_type : evar_map -> metavariable -> types (** Solve existential variables using typing *) val solve_evars : env -> evar_map -> constr -> evar_map * constr (** Raise an error message if incorrect elimination for this inductive *) (** (first constr is term to match, second is return predicate) *) val check_allowed_sort : env -> evar_map -> inductive -> constr -> constr -> unit coq-8.4pl2/pretyping/pretype_errors.ml0000640000175000001440000001467612010532755017304 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ -> false let nf_evar = Reductionops.nf_evar let j_nf_evar sigma j = { uj_val = nf_evar sigma j.uj_val; uj_type = nf_evar sigma j.uj_type } let j_nf_betaiotaevar sigma j = { uj_val = nf_evar sigma j.uj_val; uj_type = Reductionops.nf_betaiota sigma j.uj_type } let jl_nf_evar sigma jl = List.map (j_nf_evar sigma) jl let jv_nf_betaiotaevar sigma jl = Array.map (j_nf_betaiotaevar sigma) jl let jv_nf_evar sigma = Array.map (j_nf_evar sigma) let tj_nf_evar sigma {utj_val=v;utj_type=t} = {utj_val=nf_evar sigma v;utj_type=t} let env_nf_evar sigma env = process_rel_context (fun d e -> push_rel (map_rel_declaration (nf_evar sigma) d) e) env let env_nf_betaiotaevar sigma env = process_rel_context (fun d e -> push_rel (map_rel_declaration (Reductionops.nf_betaiota sigma) d) e) env (* This simplifies the typing context of Cases clauses *) (* hope it does not disturb other typing contexts *) let contract env lc = let l = ref [] in let contract_context (na,c,t) env = match c with | Some c' when isRel c' -> l := (substl !l c') :: !l; env | _ -> let t' = substl !l t in let c' = Option.map (substl !l) c in let na' = named_hd env t' na in l := (mkRel 1) :: List.map (lift 1) !l; push_rel (na',c',t') env in let env = process_rel_context contract_context env in (env, List.map (substl !l) lc) let contract2 env a b = match contract env [a;b] with | env, [a;b] -> env,a,b | _ -> assert false let contract3 env a b c = match contract env [a;b;c] with | env, [a;b;c] -> env,a,b,c | _ -> assert false let raise_pretype_error (loc,env,sigma,te) = Loc.raise loc (PretypeError(env,sigma,te)) let raise_located_type_error (loc,env,sigma,te) = Loc.raise loc (PretypeError(env,sigma,TypingError te)) let error_actual_type_loc loc env sigma {uj_val=c;uj_type=actty} expty = let env, c, actty, expty = contract3 env c actty expty in let j = {uj_val=c;uj_type=actty} in raise_located_type_error (loc, env, sigma, ActualType (j, expty)) let error_cant_apply_not_functional_loc loc env sigma rator randl = raise_located_type_error (loc, env, sigma, CantApplyNonFunctional (rator, Array.of_list randl)) let error_cant_apply_bad_type_loc loc env sigma (n,c,t) rator randl = raise_located_type_error (loc, env, sigma, CantApplyBadType ((n,c,t), rator, Array.of_list randl)) let error_ill_formed_branch_loc loc env sigma c i actty expty = raise_located_type_error (loc, env, sigma, IllFormedBranch (c, i, actty, expty)) let error_number_branches_loc loc env sigma cj expn = raise_located_type_error (loc, env, sigma, NumberBranches (cj, expn)) let error_case_not_inductive_loc loc env sigma cj = raise_located_type_error (loc, env, sigma, CaseNotInductive cj) let error_ill_typed_rec_body_loc loc env sigma i na jl tys = raise_located_type_error (loc, env, sigma, IllTypedRecBody (i, na, jl, tys)) let error_not_a_type_loc loc env sigma j = raise_located_type_error (loc, env, sigma, NotAType j) (*s Implicit arguments synthesis errors. It is hard to find a precise location. *) let error_occur_check env sigma ev c = raise (PretypeError (env, sigma, OccurCheck (ev,c))) let error_not_clean env sigma ev c (loc,k) = Loc.raise loc (PretypeError (env, sigma, NotClean (ev,c,k))) let error_unsolvable_implicit loc env sigma evi e explain = Loc.raise loc (PretypeError (env, sigma, UnsolvableImplicit (evi, e, explain))) let error_cannot_unify env sigma (m,n) = raise (PretypeError (env, sigma,CannotUnify (m,n))) let error_cannot_unify_local env sigma (m,n,sn) = raise (PretypeError (env, sigma,CannotUnifyLocal (m,n,sn))) let error_cannot_coerce env sigma (m,n) = raise (PretypeError (env, sigma,CannotUnify (m,n))) let error_cannot_find_well_typed_abstraction env sigma p l = raise (PretypeError (env, sigma,CannotFindWellTypedAbstraction (p,l))) let error_abstraction_over_meta env sigma hdmeta metaarg = let m = Evd.meta_name sigma hdmeta and n = Evd.meta_name sigma metaarg in raise (PretypeError (env, sigma,AbstractionOverMeta (m,n))) let error_non_linear_unification env sigma hdmeta t = let m = Evd.meta_name sigma hdmeta in raise (PretypeError (env, sigma,NonLinearUnification (m,t))) (*s Ml Case errors *) let error_cant_find_case_type_loc loc env sigma expr = raise_pretype_error (loc, env, sigma, CantFindCaseType expr) (*s Pretyping errors *) let error_unexpected_type_loc loc env sigma actty expty = let env, actty, expty = contract2 env actty expty in raise_pretype_error (loc, env, sigma, UnexpectedType (actty, expty)) let error_not_product_loc loc env sigma c = raise_pretype_error (loc, env, sigma, NotProduct c) (*s Error in conversion from AST to glob_constr *) let error_var_not_found_loc loc s = raise_pretype_error (loc, empty_env, Evd.empty, VarNotFound s) coq-8.4pl2/pretyping/inductiveops.mli0000640000175000001440000001242112010532755017067 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* inductive -> types (** Return type as quoted by the user *) val type_of_constructor : env -> constructor -> types val type_of_constructors : env -> inductive -> types array (** Return constructor types in normal form *) val arities_of_constructors : env -> inductive -> types array (** An inductive type with its parameters *) type inductive_family val make_ind_family : inductive * constr list -> inductive_family val dest_ind_family : inductive_family -> inductive * constr list val map_ind_family : (constr -> constr) -> inductive_family -> inductive_family val liftn_inductive_family : int -> int -> inductive_family -> inductive_family val lift_inductive_family : int -> inductive_family -> inductive_family val substnl_ind_family : constr list -> int -> inductive_family -> inductive_family (** An inductive type with its parameters and real arguments *) type inductive_type = IndType of inductive_family * constr list val make_ind_type : inductive_family * constr list -> inductive_type val dest_ind_type : inductive_type -> inductive_family * constr list val map_inductive_type : (constr -> constr) -> inductive_type -> inductive_type val liftn_inductive_type : int -> int -> inductive_type -> inductive_type val lift_inductive_type : int -> inductive_type -> inductive_type val substnl_ind_type : constr list -> int -> inductive_type -> inductive_type val mkAppliedInd : inductive_type -> constr val mis_is_recursive_subset : int list -> wf_paths -> bool val mis_is_recursive : inductive * mutual_inductive_body * one_inductive_body -> bool val mis_nf_constructor_type : inductive * mutual_inductive_body * one_inductive_body -> int -> constr (** Extract information from an inductive name *) (** Arity of constructors excluding parameters and local defs *) val mis_constr_nargs : inductive -> int array val mis_constr_nargs_env : env -> inductive -> int array val nconstructors : inductive -> int (** Return the lengths of parameters signature and real arguments signature *) val inductive_nargs : env -> inductive -> int * int val mis_constructor_nargs_env : env -> constructor -> int val constructor_nrealargs : env -> constructor -> int val constructor_nrealhyps : env -> constructor -> int val get_full_arity_sign : env -> inductive -> rel_context val allowed_sorts : env -> inductive -> sorts_family list (** Extract information from an inductive family *) type constructor_summary = { cs_cstr : constructor; (* internal name of the constructor *) cs_params : constr list; (* parameters of the constructor in current ctx *) cs_nargs : int; (* length of arguments signature (letin included) *) cs_args : rel_context; (* signature of the arguments (letin included) *) cs_concl_realargs : constr array; (* actual realargs in the concl of cstr *) } val lift_constructor : int -> constructor_summary -> constructor_summary val get_constructor : inductive * mutual_inductive_body * one_inductive_body * constr list -> int -> constructor_summary val get_arity : env -> inductive_family -> rel_context * sorts_family val get_constructors : env -> inductive_family -> constructor_summary array val build_dependent_constructor : constructor_summary -> constr val build_dependent_inductive : env -> inductive_family -> constr val make_arity_signature : env -> bool -> inductive_family -> rel_context val make_arity : env -> bool -> inductive_family -> sorts -> types val build_branch_type : env -> bool -> constr -> constructor_summary -> types (** Raise [Not_found] if not given an valid inductive type *) val extract_mrectype : constr -> inductive * constr list val find_mrectype : env -> evar_map -> types -> inductive * constr list val find_rectype : env -> evar_map -> types -> inductive_type val find_inductive : env -> evar_map -> types -> inductive * constr list val find_coinductive : env -> evar_map -> types -> inductive * constr list (********************) (** Builds the case predicate arity (dependent or not) *) val arity_of_case_predicate : env -> inductive_family -> bool -> sorts -> types val type_case_branches_with_names : env -> inductive * constr list -> constr -> constr -> types array * types (** Annotation for cases *) val make_case_info : env -> inductive -> case_style -> case_info (*i Compatibility val make_default_case_info : env -> case_style -> inductive -> case_info i*) (********************) val type_of_inductive_knowing_conclusion : env -> one_inductive_body -> types -> types (********************) val control_only_guard : env -> types -> unit val subst_inductive : Mod_subst.substitution -> inductive -> inductive coq-8.4pl2/pretyping/arguments_renaming.ml0000640000175000001440000000727712010532755020104 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !name_table); Summary.unfreeze_function = (fun r -> name_table := r); Summary.init_function = (fun () -> name_table := empty_name_table) } type req = | ReqLocal | ReqGlobal of global_reference * name list list let load_rename_args _ (_, (_, (r, names))) = name_table := Refmap.add r names !name_table let cache_rename_args o = load_rename_args 1 o let classify_rename_args = function | ReqLocal, _ -> Dispose | ReqGlobal _, _ as o -> Substitute o let subst_rename_args (subst, (_, (r, names as orig))) = ReqLocal, let r' = fst (subst_global subst r) in if r==r' then orig else (r', names) let section_segment_of_reference = function | ConstRef con -> Lib.section_segment_of_constant con | IndRef (kn,_) | ConstructRef ((kn,_),_) -> Lib.section_segment_of_mutual_inductive kn | _ -> [] let discharge_rename_args = function | _, (ReqGlobal (c, names), _) -> let c' = pop_global_reference c in let vars = section_segment_of_reference c in let var_names = List.map (fun (id, _,_,_) -> Name id) vars in let names' = List.map (fun l -> var_names @ l) names in Some (ReqGlobal (c', names), (c', names')) | _ -> None let rebuild_rename_args x = x let inRenameArgs = declare_object { (default_object "RENAME-ARGUMENTS" ) with load_function = load_rename_args; cache_function = cache_rename_args; classify_function = classify_rename_args; subst_function = subst_rename_args; discharge_function = discharge_rename_args; rebuild_function = rebuild_rename_args; } let rename_arguments local r names = let req = if local then ReqLocal else ReqGlobal (r, names) in Lib.add_anonymous_leaf (inRenameArgs (req, (r, names))) let arguments_names r = Refmap.find r !name_table let rec rename_prod c = function | [] -> c | (Name _ as n) :: tl -> (match kind_of_type c with | ProdType (_, s, t) -> mkProd (n, s, rename_prod t tl) | _ -> c) | _ :: tl -> match kind_of_type c with | ProdType (n, s, t) -> mkProd (n, s, rename_prod t tl) | _ -> c let rename_type ty ref = try rename_prod ty (List.hd (arguments_names ref)) with Not_found -> ty let rename_type_of_constant env c = let ty = Typeops.type_of_constant env c in rename_type ty (ConstRef c) let rename_type_of_inductive env ind = let ty = Inductiveops.type_of_inductive env ind in rename_type ty (IndRef ind) let rename_type_of_constructor env cstruct = let ty = Inductiveops.type_of_constructor env cstruct in rename_type ty (ConstructRef cstruct) let rename_typing env c = let j = Typeops.typing env c in match kind_of_term c with | Const c -> { j with uj_type = rename_type j.uj_type (ConstRef c) } | Ind i -> { j with uj_type = rename_type j.uj_type (IndRef i) } | Construct k -> { j with uj_type = rename_type j.uj_type (ConstructRef k) } | _ -> j coq-8.4pl2/pretyping/pattern.mli0000640000175000001440000001124212010532755016030 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ?p,fun y x => ?p)] => [forall x y => p]] will be accepted. Thanks to the reference by index, a matching problem like [match ... with [(fun x => ?p)] => [forall x => p]] will work even if [x] is also the name of an existing goal variable. Note: we do not keep types in the signature. Besides simplicity, the main reason is that it would force to close the signature over binders that occur only in the types of effective binders but not in the term itself (e.g. for a term [f x] with [f:A -> True] and [x:A]). On the opposite side, by not keeping the types, we loose opportunity to propagate type informations which otherwise would not be inferable, as e.g. when matching [forall x, x = 0] with pattern [forall x, ?h = 0] and using the solution "x|-h:=x" in expression [forall x, h = x] where nothing tells how the type of x could be inferred. We also loose the ability of typing ltac variables before calling the right-hand-side of ltac matching clauses. *) type constr_under_binders = identifier list * constr (** Types of substitutions with or w/o bound variables *) type patvar_map = (patvar * constr) list type extended_patvar_map = (patvar * constr_under_binders) list (** {5 Patterns} *) type case_info_pattern = { cip_style : case_style; cip_ind : inductive option; cip_ind_args : (int * int) option; (** number of params and args *) cip_extensible : bool (** does this match end with _ => _ ? *) } type constr_pattern = | PRef of global_reference | PVar of identifier | PEvar of existential_key * constr_pattern array | PRel of int | PApp of constr_pattern * constr_pattern array | PSoApp of patvar * constr_pattern list | PLambda of name * constr_pattern * constr_pattern | PProd of name * constr_pattern * constr_pattern | PLetIn of name * constr_pattern * constr_pattern | PSort of glob_sort | PMeta of patvar option | PIf of constr_pattern * constr_pattern * constr_pattern | PCase of case_info_pattern * constr_pattern * constr_pattern * (int * int * constr_pattern) list (** index of constructor, nb of args *) | PFix of fixpoint | PCoFix of cofixpoint (** Nota : in a [PCase], the array of branches might be shorter than expected, denoting the use of a final "_ => _" branch *) (** {5 Functions on patterns} *) val occur_meta_pattern : constr_pattern -> bool val subst_pattern : substitution -> constr_pattern -> constr_pattern exception BoundPattern (** [head_pattern_bound t] extracts the head variable/constant of the type [t] or raises [BoundPattern] (even if a sort); it raises an anomaly if [t] is an abstraction *) val head_pattern_bound : constr_pattern -> global_reference (** [head_of_constr_reference c] assumes [r] denotes a reference and returns its label; raises an anomaly otherwise *) val head_of_constr_reference : Term.constr -> global_reference (** [pattern_of_constr c] translates a term [c] with metavariables into a pattern; currently, no destructor (Cases, Fix, Cofix) and no existential variable are allowed in [c] *) val pattern_of_constr : Evd.evar_map -> constr -> named_context * constr_pattern (** [pattern_of_glob_constr l c] translates a term [c] with metavariables into a pattern; variables bound in [l] are replaced by the pattern to which they are bound *) val pattern_of_glob_constr : glob_constr -> patvar list * constr_pattern val instantiate_pattern : Evd.evar_map -> (identifier * (identifier list * constr)) list -> constr_pattern -> constr_pattern val lift_pattern : int -> constr_pattern -> constr_pattern coq-8.4pl2/pretyping/reductionops.mli0000640000175000001440000002142312010532755017073 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a stack -> 'a stack val append_stack_list : 'a list -> 'a stack -> 'a stack val decomp_stack : 'a stack -> ('a * 'a stack) option val list_of_stack : 'a stack -> 'a list val array_of_stack : 'a stack -> 'a array val stack_assign : 'a stack -> int -> 'a -> 'a stack val stack_args_size : 'a stack -> int val app_stack : constr * constr stack -> constr val stack_tail : int -> 'a stack -> 'a stack val stack_nth : 'a stack -> int -> 'a (************************************************************************) type state = constr * constr stack type contextual_reduction_function = env -> evar_map -> constr -> constr type reduction_function = contextual_reduction_function type local_reduction_function = evar_map -> constr -> constr type contextual_stack_reduction_function = env -> evar_map -> constr -> constr * constr list type stack_reduction_function = contextual_stack_reduction_function type local_stack_reduction_function = evar_map -> constr -> constr * constr list type contextual_state_reduction_function = env -> evar_map -> state -> state type state_reduction_function = contextual_state_reduction_function type local_state_reduction_function = evar_map -> state -> state (** Removes cast and put into applicative form *) val whd_stack : local_stack_reduction_function (** For compatibility: alias for whd\_stack *) val whd_castapp_stack : local_stack_reduction_function (** {6 Reduction Function Operators } *) val strong : reduction_function -> reduction_function val local_strong : local_reduction_function -> local_reduction_function val strong_prodspine : local_reduction_function -> local_reduction_function (*i val stack_reduction_of_reduction : 'a reduction_function -> 'a state_reduction_function i*) val stacklam : (state -> 'a) -> constr list -> constr -> constr stack -> 'a (** {6 Generic Optimized Reduction Function using Closures } *) val clos_norm_flags : Closure.RedFlags.reds -> reduction_function (** Same as [(strong whd_beta[delta][iota])], but much faster on big terms *) val nf_beta : local_reduction_function val nf_betaiota : local_reduction_function val nf_betadeltaiota : reduction_function val nf_evar : evar_map -> constr -> constr val nf_betaiota_preserving_vm_cast : reduction_function (** Lazy strategy, weak head reduction *) val whd_evar : evar_map -> constr -> constr val whd_beta : local_reduction_function val whd_betaiota : local_reduction_function val whd_betaiotazeta : local_reduction_function val whd_betadeltaiota : contextual_reduction_function val whd_betadeltaiota_nolet : contextual_reduction_function val whd_betaetalet : local_reduction_function val whd_betalet : local_reduction_function val whd_beta_stack : local_stack_reduction_function val whd_betaiota_stack : local_stack_reduction_function val whd_betaiotazeta_stack : local_stack_reduction_function val whd_betadeltaiota_stack : contextual_stack_reduction_function val whd_betadeltaiota_nolet_stack : contextual_stack_reduction_function val whd_betaetalet_stack : local_stack_reduction_function val whd_betalet_stack : local_stack_reduction_function val whd_beta_state : local_state_reduction_function val whd_betaiota_state : local_state_reduction_function val whd_betaiotazeta_state : local_state_reduction_function val whd_betadeltaiota_state : contextual_state_reduction_function val whd_betadeltaiota_nolet_state : contextual_state_reduction_function val whd_betaetalet_state : local_state_reduction_function val whd_betalet_state : local_state_reduction_function (** {6 Head normal forms } *) val whd_delta_stack : stack_reduction_function val whd_delta_state : state_reduction_function val whd_delta : reduction_function val whd_betadelta_stack : stack_reduction_function val whd_betadelta_state : state_reduction_function val whd_betadelta : reduction_function val whd_betadeltaeta_stack : stack_reduction_function val whd_betadeltaeta_state : state_reduction_function val whd_betadeltaeta : reduction_function val whd_betadeltaiotaeta_stack : stack_reduction_function val whd_betadeltaiotaeta_state : state_reduction_function val whd_betadeltaiotaeta : reduction_function val whd_eta : constr -> constr val whd_zeta : constr -> constr (** Various reduction functions *) val safe_evar_value : evar_map -> existential -> constr option val beta_applist : constr * constr list -> constr val hnf_prod_app : env -> evar_map -> constr -> constr -> constr val hnf_prod_appvect : env -> evar_map -> constr -> constr array -> constr val hnf_prod_applist : env -> evar_map -> constr -> constr list -> constr val hnf_lam_app : env -> evar_map -> constr -> constr -> constr val hnf_lam_appvect : env -> evar_map -> constr -> constr array -> constr val hnf_lam_applist : env -> evar_map -> constr -> constr list -> constr val splay_prod : env -> evar_map -> constr -> (name * constr) list * constr val splay_lam : env -> evar_map -> constr -> (name * constr) list * constr val splay_arity : env -> evar_map -> constr -> (name * constr) list * sorts val sort_of_arity : env -> evar_map -> constr -> sorts val splay_prod_n : env -> evar_map -> int -> constr -> rel_context * constr val splay_lam_n : env -> evar_map -> int -> constr -> rel_context * constr val splay_prod_assum : env -> evar_map -> constr -> rel_context * constr val decomp_sort : env -> evar_map -> types -> sorts val is_sort : env -> evar_map -> types -> bool type 'a miota_args = { mP : constr; (** the result type *) mconstr : constr; (** the constructor *) mci : case_info; (** special info to re-build pattern *) mcargs : 'a list; (** the constructor's arguments *) mlf : 'a array } (** the branch code vector *) val reducible_mind_case : constr -> bool val reduce_mind_case : constr miota_args -> constr val find_conclusion : env -> evar_map -> constr -> (constr,constr) kind_of_term val is_arity : env -> evar_map -> constr -> bool val whd_programs : reduction_function (** [reduce_fix redfun fix stk] contracts [fix stk] if it is actually reducible; the structural argument is reduced by [redfun] *) type fix_reduction_result = NotReducible | Reduced of state val fix_recarg : fixpoint -> constr stack -> (int * constr) option val reduce_fix : local_state_reduction_function -> evar_map -> fixpoint -> constr stack -> fix_reduction_result (** {6 Querying the kernel conversion oracle: opaque/transparent constants } *) val is_transparent : 'a tableKey -> bool (** {6 Conversion Functions (uses closures, lazy strategy) } *) type conversion_test = constraints -> constraints val pb_is_equal : conv_pb -> bool val pb_equal : conv_pb -> conv_pb val sort_cmp : conv_pb -> sorts -> sorts -> conversion_test val is_conv : env -> evar_map -> constr -> constr -> bool val is_conv_leq : env -> evar_map -> constr -> constr -> bool val is_fconv : conv_pb -> env -> evar_map -> constr -> constr -> bool val is_trans_conv : transparent_state -> env -> evar_map -> constr -> constr -> bool val is_trans_conv_leq : transparent_state -> env -> evar_map -> constr -> constr -> bool val is_trans_fconv : conv_pb -> transparent_state -> env -> evar_map -> constr -> constr -> bool (** {6 Special-Purpose Reduction Functions } *) val whd_meta : evar_map -> constr -> constr val plain_instance : (metavariable * constr) list -> constr -> constr val instance :evar_map -> (metavariable * constr) list -> constr -> constr val head_unfold_under_prod : transparent_state -> reduction_function (** {6 Heuristic for Conversion with Evar } *) val whd_betaiota_deltazeta_for_iota_state : transparent_state -> state_reduction_function (** {6 Meta-related reduction functions } *) val meta_instance : evar_map -> constr freelisted -> constr val nf_meta : evar_map -> constr -> constr val meta_reducible_instance : evar_map -> constr freelisted -> constr coq-8.4pl2/pretyping/term_dnet.mli0000640000175000001440000000657012010532755016344 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> int (** how to substitute them for storage *) val subst : substitution -> t -> t (** how to recover the term from the identifier *) val constr_of : t -> constr end (** Options : *) module type OPT = sig (** pre-treatment to terms before adding or searching *) val reduce : constr -> constr (** direction of post-filtering w.r.t sort subtyping : - true means query <= terms in the structure - false means terms <= query *) val direction : bool end module type S = sig type t type ident (** results of filtering : identifier, a context (term with Evar hole) and the substitution in that context*) type result = ident * (constr*existential_key) * Termops.subst val empty : t (** [add c i dn] adds the binding [(c,i)] to [dn]. [c] can be a closed term or a pattern (with untyped Evars). No Metas accepted *) val add : constr -> ident -> t -> t (** merge of dnets. Faster than re-adding all terms *) val union : t -> t -> t val subst : substitution -> t -> t (* * High-level primitives describing specific search problems *) (** [search_pattern dn c] returns all terms/patterns in dn matching/matched by c *) val search_pattern : t -> constr -> result list (** [search_concl dn c] returns all matches under products and letins, i.e. it finds subterms whose conclusion matches c. The complexity depends only on c ! *) val search_concl : t -> constr -> result list (** [search_head_concl dn c] matches under products and applications heads. Finds terms of the form [forall H_1...H_n, C t_1...t_n] where C matches c *) val search_head_concl : t -> constr -> result list (** [search_eq_concl dn eq c] searches terms of the form [forall H1...Hn, eq _ X1 X2] where either X1 or X2 matches c *) val search_eq_concl : t -> constr -> constr -> result list (** [find_all dn] returns all idents contained in dn *) val find_all : t -> ident list val map : (ident -> ident) -> t -> t end module Make : functor (Ident : IDENT) -> functor (Opt : OPT) -> S with type ident = Ident.t coq-8.4pl2/pretyping/pretyping.mli0000640000175000001440000001062212010532755016375 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* env -> int list list -> rec_declaration -> int array type typing_constraint = OfType of types option | IsType type var_map = (identifier * Pattern.constr_under_binders) list type unbound_ltac_var_map = (identifier * identifier option) list type ltac_var_map = var_map * unbound_ltac_var_map type glob_constr_ltac_closure = ltac_var_map * glob_constr type pure_open_constr = evar_map * constr module type S = sig module Cases : Cases.S (** Allow references to syntaxically inexistent variables (i.e., if applied on an inductive) *) val allow_anonymous_refs : bool ref (** Generic call to the interpreter from glob_constr to open_constr, leaving unresolved holes as evars and returning the typing contexts of these evars. Work as [understand_gen] for the rest. *) val understand_tcc : ?resolve_classes:bool -> evar_map -> env -> ?expected_type:types -> glob_constr -> open_constr val understand_tcc_evars : ?fail_evar:bool -> ?resolve_classes:bool -> evar_map ref -> env -> typing_constraint -> glob_constr -> constr (** More general entry point with evars from ltac *) (** Generic call to the interpreter from glob_constr to constr, failing unresolved holes in the glob_constr cannot be instantiated. In [understand_ltac expand_evars sigma env ltac_env constraint c], resolve_classes : launch typeclass resolution after typechecking. expand_evars : expand inferred evars by their value if any sigma : initial set of existential variables (typically dependent subgoals) ltac_env : partial substitution of variables (used for the tactic language) constraint : tell if interpreted as a possibly constrained term or a type *) val understand_ltac : ?resolve_classes:bool -> bool -> evar_map -> env -> ltac_var_map -> typing_constraint -> glob_constr -> pure_open_constr (** Standard call to get a constr from a glob_constr, resolving implicit args *) val understand : evar_map -> env -> ?expected_type:Term.types -> glob_constr -> constr (** Idem but the glob_constr is intended to be a type *) val understand_type : evar_map -> env -> glob_constr -> constr (** A generalization of the two previous case *) val understand_gen : typing_constraint -> evar_map -> env -> glob_constr -> constr (** Idem but returns the judgment of the understood term *) val understand_judgment : evar_map -> env -> glob_constr -> unsafe_judgment (** Idem but do not fail on unresolved evars *) val understand_judgment_tcc : evar_map ref -> env -> glob_constr -> unsafe_judgment (**/**) (** Internal of Pretyping... *) val pretype : type_constraint -> env -> evar_map ref -> ltac_var_map -> glob_constr -> unsafe_judgment val pretype_type : val_constraint -> env -> evar_map ref -> ltac_var_map -> glob_constr -> unsafe_type_judgment val pretype_gen : bool -> bool -> bool -> evar_map ref -> env -> ltac_var_map -> typing_constraint -> glob_constr -> constr (**/**) end module Pretyping_F (C : Coercion.S) : S module Default : S (** To embed constr in glob_constr *) val constr_in : constr -> Dyn.t val constr_out : Dyn.t -> constr val interp_sort : glob_sort -> sorts val interp_elimination_sort : glob_sort -> sorts_family (** Last chance for solving evars, possibly using external solver *) val solve_remaining_evars : bool -> bool -> (env -> evar_map -> existential -> constr) -> env -> evar_map -> pure_open_constr -> pure_open_constr coq-8.4pl2/pretyping/evarutil.ml0000640000175000001440000023710512122677614016055 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (match existential_opt_value sigma ev with | None -> raise (Uninstantiated_evar evk) | Some c -> flush_and_check_evars sigma c) | _ -> map_constr (flush_and_check_evars sigma) c let nf_evar = Pretype_errors.nf_evar let j_nf_evar = Pretype_errors.j_nf_evar let jl_nf_evar = Pretype_errors.jl_nf_evar let jv_nf_evar = Pretype_errors.jv_nf_evar let tj_nf_evar = Pretype_errors.tj_nf_evar let nf_named_context_evar sigma ctx = Sign.map_named_context (Reductionops.nf_evar sigma) ctx let nf_rel_context_evar sigma ctx = Sign.map_rel_context (Reductionops.nf_evar sigma) ctx let nf_env_evar sigma env = let nc' = nf_named_context_evar sigma (Environ.named_context env) in let rel' = nf_rel_context_evar sigma (Environ.rel_context env) in push_rel_context rel' (reset_with_named_context (val_of_named_context nc') env) let nf_evar_info evc info = { info with evar_concl = Reductionops.nf_evar evc info.evar_concl; evar_hyps = map_named_val (Reductionops.nf_evar evc) info.evar_hyps; evar_body = match info.evar_body with | Evar_empty -> Evar_empty | Evar_defined c -> Evar_defined (Reductionops.nf_evar evc c) } let nf_evars evm = Evd.fold (fun ev evi evm' -> Evd.add evm' ev (nf_evar_info evm evi)) evm Evd.empty let nf_evars_undefined evm = Evd.fold_undefined (fun ev evi evm' -> Evd.add evm' ev (nf_evar_info evm evi)) evm (defined_evars evm) let nf_evar_map evd = Evd.evars_reset_evd (nf_evars evd) evd let nf_evar_map_undefined evd = Evd.evars_reset_evd (nf_evars_undefined evd) evd (*-------------------*) (* Auxiliary functions for the conversion algorithms modulo evars *) let has_undefined_evars_or_sorts evd t = let rec has_ev t = match kind_of_term t with | Evar (ev,args) -> (match evar_body (Evd.find evd ev) with | Evar_defined c -> has_ev c; Array.iter has_ev args | Evar_empty -> raise NotInstantiatedEvar) | Sort s when is_sort_variable evd s -> raise Not_found | _ -> iter_constr has_ev t in try let _ = has_ev t in false with (Not_found | NotInstantiatedEvar) -> true let is_ground_term evd t = not (has_undefined_evars_or_sorts evd t) let is_ground_env evd env = let is_ground_decl = function (_,Some b,_) -> is_ground_term evd b | _ -> true in List.for_all is_ground_decl (rel_context env) && List.for_all is_ground_decl (named_context env) (* Memoization is safe since evar_map and environ are applicative structures *) let is_ground_env = memo1_2 is_ground_env (* Return the head evar if any *) exception NoHeadEvar let head_evar = let rec hrec c = match kind_of_term c with | Evar (evk,_) -> evk | Case (_,_,c,_) -> hrec c | App (c,_) -> hrec c | Cast (c,_,_) -> hrec c | _ -> raise NoHeadEvar in hrec (* Expand head evar if any (currently consider only applications but I guess it should consider Case too) *) let whd_head_evar_stack sigma c = let rec whrec (c, l as s) = match kind_of_term c with | Evar (evk,args as ev) when Evd.is_defined sigma evk -> whrec (existential_value sigma ev, l) | Cast (c,_,_) -> whrec (c, l) | App (f,args) -> whrec (f, Array.fold_right (fun a l -> a::l) args l) | _ -> s in whrec (c, []) let whd_head_evar sigma c = applist (whd_head_evar_stack sigma c) let noccur_evar env evd evk c = let rec occur_rec k c = match kind_of_term c with | Evar (evk',args' as ev') -> (match safe_evar_value evd ev' with | Some c -> occur_rec k c | None -> if evk = evk' then raise Occur else Array.iter (occur_rec k) args') | Rel i when i > k -> (match pi2 (Environ.lookup_rel (i-k) env) with | None -> () | Some b -> occur_rec k (lift i b)) | _ -> iter_constr_with_binders succ occur_rec k c in try occur_rec 0 c; true with Occur -> false let normalize_evar evd ev = match kind_of_term (whd_evar evd (mkEvar ev)) with | Evar (evk,args) -> (evk,args) | _ -> assert false (**********************) (* Creating new metas *) (**********************) (* Generator of metavariables *) let new_meta = let meta_ctr = ref 0 in Summary.declare_summary "meta counter" { Summary.freeze_function = (fun () -> !meta_ctr); Summary.unfreeze_function = (fun n -> meta_ctr := n); Summary.init_function = (fun () -> meta_ctr := 0) }; fun () -> incr meta_ctr; !meta_ctr let mk_new_meta () = mkMeta(new_meta()) let collect_evars emap c = let rec collrec acc c = match kind_of_term c with | Evar (evk,_) -> if Evd.is_undefined emap evk then evk::acc else (* No recursion on the evar instantiation *) acc | _ -> fold_constr collrec acc c in list_uniquize (collrec [] c) let push_dependent_evars sigma emap = Evd.fold_undefined (fun ev {evar_concl = ccl} (sigma',emap') -> List.fold_left (fun (sigma',emap') ev -> (Evd.add sigma' ev (Evd.find emap' ev),Evd.remove emap' ev)) (sigma',emap') (collect_evars emap' ccl)) emap (sigma,emap) let push_duplicated_evars sigma emap c = let rec collrec (one,(sigma,emap) as acc) c = match kind_of_term c with | Evar (evk,_) when not (Evd.mem sigma evk) -> if List.mem evk one then let sigma' = Evd.add sigma evk (Evd.find emap evk) in let emap' = Evd.remove emap evk in (one,(sigma',emap')) else (evk::one,(sigma,emap)) | _ -> fold_constr collrec acc c in snd (collrec ([],(sigma,emap)) c) (* replaces a mapping of existentials into a mapping of metas. Problem if an evar appears in the type of another one (pops anomaly) *) let evars_to_metas sigma (emap, c) = let emap = nf_evar_map_undefined emap in let sigma',emap' = push_dependent_evars sigma emap in let sigma',emap' = push_duplicated_evars sigma' emap' c in (* if an evar has been instantiated in [emap] (as part of typing [c]) then it is instantiated in [sigma]. *) let repair_evars sigma emap = fold_undefined begin fun ev _ sigma' -> try let info = find emap ev in match evar_body info with | Evar_empty -> sigma' | Evar_defined body -> define ev body sigma' with Not_found -> sigma' end sigma sigma in let sigma' = repair_evars sigma' emap in let change_exist evar = let ty = nf_betaiota emap (existential_type emap evar) in let n = new_meta() in mkCast (mkMeta n, DEFAULTcast, ty) in let rec replace c = match kind_of_term c with | Evar (evk,_ as ev) when Evd.mem emap' evk -> change_exist ev | _ -> map_constr replace c in (sigma', replace c) (* The list of non-instantiated existential declarations (order is important) *) let non_instantiated sigma = let listev = Evd.undefined_list sigma in List.map (fun (ev,evi) -> (ev,nf_evar_info sigma evi)) listev (************************) (* Manipulating filters *) (************************) let apply_subfilter filter subfilter = fst (List.fold_right (fun oldb (l,filter) -> if oldb then List.hd filter::l,List.tl filter else (false::l,filter)) filter ([], List.rev subfilter)) let extract_subfilter initial_filter refined_filter = snd (list_filter2 (fun b1 b2 -> b1) (initial_filter,refined_filter)) (**********************) (* Creating new evars *) (**********************) (* Generator of existential names *) let new_untyped_evar = let evar_ctr = ref 0 in Summary.declare_summary "evar counter" { Summary.freeze_function = (fun () -> !evar_ctr); Summary.unfreeze_function = (fun n -> evar_ctr := n); Summary.init_function = (fun () -> evar_ctr := 0) }; fun () -> incr evar_ctr; existential_of_int !evar_ctr (*------------------------------------* * functional operations on evar sets * *------------------------------------*) (* [push_rel_context_to_named_context] builds the defining context and the * initial instance of an evar. If the evar is to be used in context * * Gamma = a1 ... an xp ... x1 * \- named part -/ \- de Bruijn part -/ * * then the x1...xp are turned into variables so that the evar is declared in * context * * a1 ... an xp ... x1 * \----------- named part ------------/ * * but used applied to the initial instance "a1 ... an Rel(p) ... Rel(1)" * so that ev[a1:=a1 ... an:=an xp:=Rel(p) ... x1:=Rel(1)] is correctly typed * in context Gamma. * * Remark 1: The instance is reverted in practice (i.e. Rel(1) comes first) * Remark 2: If some of the ai or xj are definitions, we keep them in the * instance. This is necessary so that no unfolding of local definitions * happens when inferring implicit arguments (consider e.g. the problem * "x:nat; x':=x; f:forall y, y=y -> Prop |- f _ (refl_equal x')" which * produces the equation "?y[x,x']=?y[x,x']" =? "x'=x'": we want * the hole to be instantiated by x', not by x (which would have been * the case in [invert_definition] if x' had disappeared from the instance). * Note that at any time, if, in some context env, the instance of * declaration x:A is t and the instance of definition x':=phi(x) is u, then * we have the property that u and phi(t) are convertible in env. *) let push_rel_context_to_named_context env typ = (* compute the instances relative to the named context and rel_context *) let ids = List.map pi1 (named_context env) in let inst_vars = List.map mkVar ids in let inst_rels = List.rev (rel_list 0 (nb_rel env)) in (* move the rel context to a named context and extend the named instance *) (* with vars of the rel context *) (* We do keep the instances corresponding to local definition (see above) *) let (subst, _, env) = Sign.fold_rel_context (fun (na,c,t) (subst, avoid, env) -> let id = next_name_away na avoid in let d = (id,Option.map (substl subst) c,substl subst t) in (mkVar id :: subst, id::avoid, push_named d env)) (rel_context env) ~init:([], ids, env) in (named_context_val env, substl subst typ, inst_rels@inst_vars, subst) (*------------------------------------* * Entry points to define new evars * *------------------------------------*) let default_source = (dummy_loc,InternalHole) let new_pure_evar evd sign ?(src=default_source) ?filter ?candidates typ = let newevk = new_untyped_evar() in let evd = evar_declare sign newevk typ ~src ?filter ?candidates evd in (evd,newevk) let new_evar_instance sign evd typ ?src ?filter ?candidates instance = assert (not !Flags.debug || list_distinct (ids_of_named_context (named_context_of_val sign))); let evd,newevk = new_pure_evar evd sign ?src ?filter ?candidates typ in (evd,mkEvar (newevk,Array.of_list instance)) (* [new_evar] declares a new existential in an env env with type typ *) (* Converting the env into the sign of the evar to define *) let new_evar evd env ?src ?filter ?candidates typ = let sign,typ',instance,subst = push_rel_context_to_named_context env typ in let candidates = Option.map (List.map (substl subst)) candidates in let instance = match filter with | None -> instance | Some filter -> list_filter_with filter instance in new_evar_instance sign evd typ' ?src ?filter ?candidates instance let new_type_evar ?src ?filter evd env = let evd', s = new_sort_variable evd in new_evar evd' env ?src ?filter (mkSort s) (* The same using side-effect *) let e_new_evar evdref env ?(src=(dummy_loc,InternalHole)) ?filter ?candidates ty = let (evd',ev) = new_evar !evdref env ~src:src ?filter ?candidates ty in evdref := evd'; ev (*------------------------------------* * Restricting existing evars * *------------------------------------*) let restrict_evar_key evd evk filter candidates = if filter = None && candidates = None then evd,evk else let evi = Evd.find_undefined evd evk in let oldfilter = evar_filter evi in if filter = Some oldfilter && candidates = None then evd,evk else let filter = match filter with | None -> evar_filter evi | Some filter -> filter in let candidates = match candidates with None -> evi.evar_candidates | _ -> candidates in let ccl = evi.evar_concl in let sign = evar_hyps evi in let src = evi.evar_source in let evd,newevk = new_pure_evar evd sign ccl ~src ~filter ?candidates in let ctxt = snd (list_filter2 (fun b c -> b) (filter,evar_context evi)) in let id_inst = Array.of_list (List.map (fun (id,_,_) -> mkVar id) ctxt) in Evd.define evk (mkEvar(newevk,id_inst)) evd,newevk (* Restrict an applied evar and returns its restriction in the same context *) let restrict_applied_evar evd (evk,argsv) filter candidates = let evd,newevk = restrict_evar_key evd evk filter candidates in let newargsv = match filter with | None -> (* optim *) argsv | Some filter -> let evi = Evd.find evd evk in let subfilter = extract_subfilter (evar_filter evi) filter in array_filter_with subfilter argsv in evd,(newevk,newargsv) (* Restrict an evar in the current evar_map *) let restrict_evar evd evk filter candidates = fst (restrict_evar_key evd evk filter candidates) (* Restrict an evar in the current evar_map *) let restrict_instance evd evk filter argsv = match filter with None -> argsv | Some filter -> let evi = Evd.find evd evk in array_filter_with (extract_subfilter (evar_filter evi) filter) argsv (* This assumes an evar with identity instance and generalizes it over only the De Bruijn part of the context *) let generalize_evar_over_rels sigma (ev,args) = let evi = Evd.find sigma ev in let sign = named_context_of_val evi.evar_hyps in List.fold_left2 (fun (c,inst as x) a d -> if isRel a then (mkNamedProd_or_LetIn d c,a::inst) else x) (evi.evar_concl,[]) (Array.to_list args) sign (***************************************) (* Managing chains of local definitons *) (***************************************) (* Expand rels and vars that are bound to other rels or vars so that dependencies in variables are canonically associated to the most ancient variable in its family of aliased variables *) let compute_var_aliases sign = List.fold_right (fun (id,b,c) aliases -> match b with | Some t -> (match kind_of_term t with | Var id' -> let aliases_of_id = try Idmap.find id' aliases with Not_found -> [] in Idmap.add id (aliases_of_id@[t]) aliases | _ -> Idmap.add id [t] aliases) | None -> aliases) sign Idmap.empty let compute_rel_aliases var_aliases rels = snd (List.fold_right (fun (_,b,t) (n,aliases) -> (n-1, match b with | Some t -> (match kind_of_term t with | Var id' -> let aliases_of_n = try Idmap.find id' var_aliases with Not_found -> [] in Intmap.add n (aliases_of_n@[t]) aliases | Rel p -> let aliases_of_n = try Intmap.find (p+n) aliases with Not_found -> [] in Intmap.add n (aliases_of_n@[mkRel (p+n)]) aliases | _ -> Intmap.add n [lift n t] aliases) | None -> aliases)) rels (List.length rels,Intmap.empty)) let make_alias_map env = (* We compute the chain of aliases for each var and rel *) let var_aliases = compute_var_aliases (named_context env) in let rel_aliases = compute_rel_aliases var_aliases (rel_context env) in (var_aliases,rel_aliases) let lift_aliases n (var_aliases,rel_aliases as aliases) = if n = 0 then aliases else (var_aliases, Intmap.fold (fun p l -> Intmap.add (p+n) (List.map (lift n) l)) rel_aliases Intmap.empty) let get_alias_chain_of aliases x = match kind_of_term x with | Rel n -> (try Intmap.find n (snd aliases) with Not_found -> []) | Var id -> (try Idmap.find id (fst aliases) with Not_found -> []) | _ -> [] let normalize_alias_opt aliases x = match get_alias_chain_of aliases x with | [] -> None | a::_ when isRel a or isVar a -> Some a | [_] -> None | _::a::_ -> Some a let normalize_alias aliases x = match normalize_alias_opt aliases x with | Some a -> a | None -> x let normalize_alias_var var_aliases id = destVar (normalize_alias (var_aliases,Intmap.empty) (mkVar id)) let extend_alias (_,b,_) (var_aliases,rel_aliases) = let rel_aliases = Intmap.fold (fun n l -> Intmap.add (n+1) (List.map (lift 1) l)) rel_aliases Intmap.empty in let rel_aliases = match b with | Some t -> (match kind_of_term t with | Var id' -> let aliases_of_binder = try Idmap.find id' var_aliases with Not_found -> [] in Intmap.add 1 (aliases_of_binder@[t]) rel_aliases | Rel p -> let aliases_of_binder = try Intmap.find (p+1) rel_aliases with Not_found -> [] in Intmap.add 1 (aliases_of_binder@[mkRel (p+1)]) rel_aliases | _ -> Intmap.add 1 [lift 1 t] rel_aliases) | None -> rel_aliases in (var_aliases, rel_aliases) let expand_alias_once aliases x = match get_alias_chain_of aliases x with | [] -> None | l -> Some (list_last l) let rec expansions_of_var aliases x = match get_alias_chain_of aliases x with | [] -> [x] | a::_ as l when isRel a || isVar a -> x :: List.rev l | _::l -> x :: List.rev l let expansion_of_var aliases x = match get_alias_chain_of aliases x with | [] -> x | a::_ -> a let rec expand_vars_in_term_using aliases t = match kind_of_term t with | Rel _ | Var _ -> normalize_alias aliases t | _ -> map_constr_with_full_binders extend_alias expand_vars_in_term_using aliases t let expand_vars_in_term env = expand_vars_in_term_using (make_alias_map env) let free_vars_and_rels_up_alias_expansion aliases c = let acc1 = ref Intset.empty and acc2 = ref Idset.empty in let cache_rel = ref Intset.empty and cache_var = ref Idset.empty in let is_in_cache depth = function | Rel n -> Intset.mem (n-depth) !cache_rel | Var s -> Idset.mem s !cache_var | _ -> false in let put_in_cache depth = function | Rel n -> cache_rel := Intset.add (n-depth) !cache_rel | Var s -> cache_var := Idset.add s !cache_var | _ -> () in let rec frec (aliases,depth) c = match kind_of_term c with | Rel _ | Var _ as ck -> if is_in_cache depth ck then () else begin put_in_cache depth ck; let c = expansion_of_var aliases c in match kind_of_term c with | Var id -> acc2 := Idset.add id !acc2 | Rel n -> if n >= depth+1 then acc1 := Intset.add (n-depth) !acc1 | _ -> frec (aliases,depth) c end | Const _ | Ind _ | Construct _ -> acc2 := List.fold_right Idset.add (vars_of_global (Global.env()) c) !acc2 | _ -> iter_constr_with_full_binders (fun d (aliases,depth) -> (extend_alias d aliases,depth+1)) frec (aliases,depth) c in frec (aliases,0) c; (!acc1,!acc2) (************************************) (* Removing a dependency in an evar *) (************************************) type clear_dependency_error = | OccurHypInSimpleClause of identifier option | EvarTypingBreak of existential exception ClearDependencyError of identifier * clear_dependency_error open Store.Field let cleared = Store.field () let rec check_and_clear_in_constr evdref err ids c = (* returns a new constr where all the evars have been 'cleaned' (ie the hypotheses ids have been removed from the contexts of evars) *) let check id' = if List.mem id' ids then raise (ClearDependencyError (id',err)) in match kind_of_term c with | Var id' -> check id'; c | ( Const _ | Ind _ | Construct _ ) -> let vars = Environ.vars_of_global (Global.env()) c in List.iter check vars; c | Evar (evk,l as ev) -> if Evd.is_defined !evdref evk then (* If evk is already defined we replace it by its definition *) let nc = whd_evar !evdref c in (check_and_clear_in_constr evdref err ids nc) else (* We check for dependencies to elements of ids in the evar_info corresponding to e and in the instance of arguments. Concurrently, we build a new evar corresponding to e where hypotheses of ids have been removed *) let evi = Evd.find_undefined !evdref evk in let ctxt = Evd.evar_filtered_context evi in let (nhyps,nargs,rids) = List.fold_right2 (fun (rid,ob,c as h) a (hy,ar,ri) -> (* Check if some id to clear occurs in the instance a of rid in ev and remember the dependency *) match List.filter (fun id -> List.mem id ids) (Idset.elements (collect_vars a)) with | id :: _ -> (hy,ar,(rid,id)::ri) | _ -> (* Check if some rid to clear in the context of ev has dependencies in another hyp of the context of ev and transitively remember the dependency *) match List.filter (fun (id,_) -> occur_var_in_decl (Global.env()) id h) ri with | (_,id') :: _ -> (hy,ar,(rid,id')::ri) | _ -> (* No dependency at all, we can keep this ev's context hyp *) (h::hy,a::ar,ri)) ctxt (Array.to_list l) ([],[],[]) in (* Check if some rid to clear in the context of ev has dependencies in the type of ev and adjust the source of the dependency *) let nconcl = try check_and_clear_in_constr evdref (EvarTypingBreak ev) (List.map fst rids) (evar_concl evi) with ClearDependencyError (rid,err) -> raise (ClearDependencyError (List.assoc rid rids,err)) in if rids = [] then c else begin let env = Sign.fold_named_context push_named nhyps ~init:(empty_env) in let ev'= e_new_evar evdref env ~src:(evar_source evk !evdref) nconcl in evdref := Evd.define evk ev' !evdref; let (evk',_) = destEvar ev' in (* spiwack: hacking session to mark the old [evk] as having been "cleared" *) let evi = Evd.find !evdref evk in let extra = evi.evar_extra in let extra' = cleared.set true extra in let evi' = { evi with evar_extra = extra' } in evdref := Evd.add !evdref evk evi' ; (* spiwack: /hacking session *) mkEvar(evk', Array.of_list nargs) end | _ -> map_constr (check_and_clear_in_constr evdref err ids) c let clear_hyps_in_evi evdref hyps concl ids = (* clear_hyps_in_evi erases hypotheses ids in hyps, checking if some hypothesis does not depend on a element of ids, and erases ids in the contexts of the evars occuring in evi *) let nconcl = check_and_clear_in_constr evdref (OccurHypInSimpleClause None) ids concl in let nhyps = let check_context (id,ob,c) = let err = OccurHypInSimpleClause (Some id) in (id, Option.map (check_and_clear_in_constr evdref err ids) ob, check_and_clear_in_constr evdref err ids c) in let check_value vk = match !vk with | VKnone -> vk | VKvalue (v,d) -> if (List.for_all (fun e -> not (Idset.mem e d)) ids) then (* v does depend on any of ids, it's ok *) vk else (* v depends on one of the cleared hyps: we forget the computed value *) ref VKnone in remove_hyps ids check_context check_value hyps in (nhyps,nconcl) (********************************) (* Managing pattern-unification *) (********************************) let rec expand_and_check_vars aliases = function | [] -> [] | a::l when isRel a or isVar a -> let a = expansion_of_var aliases a in if isRel a or isVar a then a :: expand_and_check_vars aliases l else raise Exit | _ -> raise Exit module Constrhash = Hashtbl.Make (struct type t = constr let equal = eq_constr let hash = hash_constr end) let rec constr_list_distinct l = let visited = Constrhash.create 23 in let rec loop = function | h::t -> if Constrhash.mem visited h then false else (Constrhash.add visited h h; loop t) | [] -> true in loop l let get_actual_deps aliases l t = if occur_meta_or_existential t then (* Probably no restrictions on allowed vars in presence of evars *) l else (* Probably strong restrictions coming from t being evar-closed *) let (fv_rels,fv_ids) = free_vars_and_rels_up_alias_expansion aliases t in List.filter (fun c -> match kind_of_term c with | Var id -> Idset.mem id fv_ids | Rel n -> Intset.mem n fv_rels | _ -> assert false) l let remove_instance_local_defs evd evk args = let evi = Evd.find evd evk in let rec aux = function | (_,Some _,_)::sign, a::args -> aux (sign,args) | (_,None,_)::sign, a::args -> a::aux (sign,args) | [], [] -> [] | _ -> assert false in aux (evar_filtered_context evi, args) (* Check if an applied evar "?X[args] l" is a Miller's pattern *) let find_unification_pattern_args env l t = if List.for_all (fun x -> isRel x || isVar x) l (* common failure case *) then let aliases = make_alias_map env in match (try Some (expand_and_check_vars aliases l) with Exit -> None) with | Some l as x when constr_list_distinct (get_actual_deps aliases l t) -> x | _ -> None else None let is_unification_pattern_meta env nb m l t = (* Variables from context and rels > nb are implicitly all there *) (* so we need to be a rel <= nb *) if List.for_all (fun x -> isRel x && destRel x <= nb) l then match find_unification_pattern_args env l t with | Some _ as x when not (dependent (mkMeta m) t) -> x | _ -> None else None let is_unification_pattern_evar env evd (evk,args) l t = if List.for_all (fun x -> isRel x || isVar x) l & noccur_evar env evd evk t then let args = remove_instance_local_defs evd evk (Array.to_list args) in let n = List.length args in match find_unification_pattern_args env (args @ l) t with | Some l -> Some (list_skipn n l) | _ -> None else None let is_unification_pattern_pure_evar env evd (evk,args) t = is_unification_pattern_evar env evd (evk,args) [] t <> None let is_unification_pattern (env,nb) evd f l t = match kind_of_term f with | Meta m -> is_unification_pattern_meta env nb m l t | Evar ev -> is_unification_pattern_evar env evd ev l t | _ -> None (* From a unification problem "?X l = c", build "\x1...xn.(term1 l2)" (pattern unification). It is assumed that l is made of rel's that are distinct and not bound to aliases. *) (* It is also assumed that c does not contain metas because metas *implicitly* depend on Vars but lambda abstraction will not reflect this dependency: ?X x = ?1 (?1 is a meta) will return \_.?1 while it should return \y. ?1{x\y} (non constant function if ?1 depends on x) (BB) *) let solve_pattern_eqn env l c = let c' = List.fold_right (fun a c -> let c' = subst_term (lift 1 a) (lift 1 c) in match kind_of_term a with (* Rem: if [a] links to a let-in, do as if it were an assumption *) | Rel n -> let d = map_rel_declaration (lift n) (lookup_rel n env) in mkLambda_or_LetIn d c' | Var id -> let d = lookup_named id env in mkNamedLambda_or_LetIn d c' | _ -> assert false) l c in (* Warning: we may miss some opportunity to eta-reduce more since c' is not in normal form *) whd_eta c' (*****************************************) (* Refining/solving unification problems *) (*****************************************) (* Knowing that [Gamma |- ev : T] and that [ev] is applied to [args], * [make_projectable_subst ev args] builds the substitution [Gamma:=args]. * If a variable and an alias of it are bound to the same instance, we skip * the alias (we just use eq_constr -- instead of conv --, since anyway, * only instances that are variables -- or evars -- are later considered; * morever, we can bet that similar instances came at some time from * the very same substitution. The removal of aliased duplicates is * useful to ensure the uniqueness of a projection. *) let make_projectable_subst aliases sigma evi args = let sign = evar_filtered_context evi in let evar_aliases = compute_var_aliases sign in let (_,full_subst,cstr_subst) = List.fold_right (fun (id,b,c) (args,all,cstrs) -> match b,args with | None, a::rest -> let a = whd_evar sigma a in let cstrs = let a',args = decompose_app_vect a in match kind_of_term a' with | Construct cstr -> let l = try Constrmap.find cstr cstrs with Not_found -> [] in Constrmap.add cstr ((args,id)::l) cstrs | _ -> cstrs in (rest,Idmap.add id [a,normalize_alias_opt aliases a,id] all,cstrs) | Some c, a::rest -> let a = whd_evar sigma a in (match kind_of_term c with | Var id' -> let idc = normalize_alias_var evar_aliases id' in let sub = try Idmap.find idc all with Not_found -> [] in if List.exists (fun (c,_,_) -> eq_constr a c) sub then (rest,all,cstrs) else (rest, Idmap.add idc ((a,normalize_alias_opt aliases a,id)::sub) all, cstrs) | _ -> (rest,Idmap.add id [a,normalize_alias_opt aliases a,id] all,cstrs)) | _ -> anomaly "Instance does not match its signature") sign (array_rev_to_list args,Idmap.empty,Constrmap.empty) in (full_subst,cstr_subst) let make_pure_subst evi args = snd (List.fold_right (fun (id,b,c) (args,l) -> match args with | a::rest -> (rest, (id,a)::l) | _ -> anomaly "Instance does not match its signature") (evar_filtered_context evi) (array_rev_to_list args,[])) (*------------------------------------* * operations on the evar constraints * *------------------------------------*) (* We have a unification problem Σ; Γ |- ?e[u1..uq] = t : s where ?e is not yet * declared in Σ but yet known to be declarable in some context x1:T1..xq:Tq. * [define_evar_from_virtual_equation ... Γ Σ t (x1:T1..xq:Tq) .. (u1..uq) (x1..xq)] * declares x1:T1..xq:Tq |- ?e : s such that ?e[u1..uq] = t holds. *) let define_evar_from_virtual_equation define_fun env evd t_in_env sign filter inst_in_env = let ty_t_in_env = Retyping.get_type_of env evd t_in_env in let evd,evar_in_env = new_evar_instance sign evd ty_t_in_env ~filter inst_in_env in let t_in_env = whd_evar evd t_in_env in let evd = define_fun env evd (destEvar evar_in_env) t_in_env in let ids = List.map pi1 (named_context_of_val sign) in let inst_in_sign = List.map mkVar (list_filter_with filter ids) in let evar_in_sign = mkEvar (fst (destEvar evar_in_env), Array.of_list inst_in_sign) in (evd,whd_evar evd evar_in_sign) (* We have x1..xq |- ?e1 : τ and had to solve something like * Σ; Γ |- ?e1[u1..uq] = (...\y1 ... \yk ... c), where c is typically some * ?e2[v1..vn], hence flexible. We had to go through k binders and now * virtually have x1..xq, y1'..yk' | ?e1' : τ' and the equation * Γ, y1..yk |- ?e1'[u1..uq y1..yk] = c. * [materialize_evar Γ evd k (?e1[u1..uq]) τ'] extends Σ with the declaration * of ?e1' and returns both its instance ?e1'[x1..xq y1..yk] in an extension * of the context of e1 so that e1 can be instantiated by * (...\y1' ... \yk' ... ?e1'[x1..xq y1'..yk']), * and the instance ?e1'[u1..uq y1..yk] so that the remaining equation * ?e1'[u1..uq y1..yk] = c can be registered * * Note that, because invert_definition does not check types, we need to * guess the types of y1'..yn' by inverting the types of y1..yn along the * substitution u1..uq. *) let materialize_evar define_fun env evd k (evk1,args1) ty_in_env = let evi1 = Evd.find_undefined evd evk1 in let env1,rel_sign = env_rel_context_chop k env in let sign1 = evar_hyps evi1 in let filter1 = evar_filter evi1 in let ids1 = List.map pi1 (named_context_of_val sign1) in let inst_in_sign = List.map mkVar (list_filter_with filter1 ids1) in let (sign2,filter2,inst2_in_env,inst2_in_sign,_,evd,_) = List.fold_right (fun (na,b,t_in_env as d) (sign,filter,inst_in_env,inst_in_sign,env,evd,avoid) -> let id = next_name_away na avoid in let evd,t_in_sign = define_evar_from_virtual_equation define_fun env evd t_in_env sign filter inst_in_env in let evd,b_in_sign = match b with | None -> evd,None | Some b -> let evd,b = define_evar_from_virtual_equation define_fun env evd b sign filter inst_in_env in evd,Some b in (push_named_context_val (id,b_in_sign,t_in_sign) sign,true::filter, (mkRel 1)::(List.map (lift 1) inst_in_env), (mkRel 1)::(List.map (lift 1) inst_in_sign), push_rel d env,evd,id::avoid)) rel_sign (sign1,filter1,Array.to_list args1,inst_in_sign,env1,evd,ids1) in let evd,ev2ty_in_sign = define_evar_from_virtual_equation define_fun env evd ty_in_env sign2 filter2 inst2_in_env in let evd,ev2_in_sign = new_evar_instance sign2 evd ev2ty_in_sign ~filter:filter2 inst2_in_sign in let ev2_in_env = (fst (destEvar ev2_in_sign), Array.of_list inst2_in_env) in (evd, ev2_in_sign, ev2_in_env) let restrict_upon_filter evd evk p args = let newfilter = List.map p args in if List.for_all (fun id -> id) newfilter then None else let oldfullfilter = evar_filter (Evd.find_undefined evd evk) in Some (apply_subfilter oldfullfilter newfilter) (* Inverting constructors in instances (common when inferring type of match) *) let find_projectable_constructor env evd cstr k args cstr_subst = try let l = Constrmap.find cstr cstr_subst in let args = Array.map (lift (-k)) args in let l = List.filter (fun (args',id) -> (* is_conv is maybe too strong (and source of useless computation) *) (* (at least expansion of aliases is needed) *) array_for_all2 (is_conv env evd) args args') l in List.map snd l with Not_found -> [] (* [find_projectable_vars env sigma y subst] finds all vars of [subst] * that project on [y]. It is able to find solutions to the following * two kinds of problems: * * - ?n[...;x:=y;...] = y * - ?n[...;x:=?m[args];...] = y with ?m[args] = y recursively solvable * * (see test-suite/success/Fixpoint.v for an example of application of * the second kind of problem). * * The seek for [y] is up to variable aliasing. In case of solutions that * differ only up to aliasing, the binding that requires the less * steps of alias reduction is kept. At the end, only one solution up * to aliasing is kept. * * [find_projectable_vars] also unifies against evars that themselves mention * [y] and recursively. * * In short, the following situations give the following solutions: * * problem evar ctxt soluce remark * z1; z2:=z1 |- ?ev[z1;z2] = z1 y1:A; y2:=y1 y1 \ thanks to defs kept in * z1; z2:=z1 |- ?ev[z1;z2] = z2 y1:A; y2:=y1 y2 / subst and preferring = * z1; z2:=z1 |- ?ev[z1] = z2 y1:A y1 thanks to expand_var * z1; z2:=z1 |- ?ev[z2] = z1 y1:A y1 thanks to expand_var * z3 |- ?ev[z3;z3] = z3 y1:A; y2:=y1 y2 see make_projectable_subst * * Remark: [find_projectable_vars] assumes that identical instances of * variables in the same set of aliased variables are already removed (see * [make_projectable_subst]) *) type evar_projection = | ProjectVar | ProjectEvar of existential * evar_info * identifier * evar_projection exception NotUnique exception NotUniqueInType of (identifier * evar_projection) list let rec assoc_up_to_alias sigma aliases y yc = function | [] -> raise Not_found | (c,cc,id)::l -> let c' = whd_evar sigma c in if eq_constr y c' then id else if l <> [] then assoc_up_to_alias sigma aliases y yc l else (* Last chance, we reason up to alias conversion *) match (if c == c' then cc else normalize_alias_opt aliases c') with | Some cc when eq_constr yc cc -> id | _ -> if eq_constr yc c then id else raise Not_found let rec find_projectable_vars with_evars aliases sigma y subst = let yc = normalize_alias aliases y in let is_projectable idc idcl subst' = (* First test if some [id] aliased to [idc] is bound to [y] in [subst] *) try let id = assoc_up_to_alias sigma aliases y yc idcl in (id,ProjectVar)::subst' with Not_found -> (* Then test if [idc] is (indirectly) bound in [subst] to some evar *) (* projectable on [y] *) if with_evars then let idcl' = List.filter (fun (c,_,id) -> isEvar c) idcl in match idcl' with | [c,_,id] -> begin let (evk,argsv as t) = destEvar c in let evi = Evd.find sigma evk in let subst,_ = make_projectable_subst aliases sigma evi argsv in let l = find_projectable_vars with_evars aliases sigma y subst in match l with | [id',p] -> (id,ProjectEvar (t,evi,id',p))::subst' | _ -> subst' end | [] -> subst' | _ -> anomaly "More than one non var in aliases class of evar instance" else subst' in Idmap.fold is_projectable subst [] (* [filter_solution] checks if one and only one possible projection exists * among a set of solutions to a projection problem *) let filter_solution = function | [] -> raise Not_found | (id,p)::_::_ -> raise NotUnique | [id,p] -> (mkVar id, p) let project_with_effects aliases sigma effects t subst = let c, p = filter_solution (find_projectable_vars false aliases sigma t subst) in effects := p :: !effects; c let rec find_solution_type evarenv = function | (id,ProjectVar)::l -> pi3 (lookup_named id evarenv) | [id,ProjectEvar _] -> (* bugged *) pi3 (lookup_named id evarenv) | (id,ProjectEvar _)::l -> find_solution_type evarenv l | [] -> assert false (* In case the solution to a projection problem requires the instantiation of * subsidiary evars, [do_projection_effects] performs them; it * also try to instantiate the type of those subsidiary evars if their * type is an evar too. * * Note: typing creates new evar problems, which induces a recursive dependency * with [define]. To avoid a too large set of recursive functions, we * pass [define] to [do_projection_effects] as a parameter. *) let rec do_projection_effects define_fun env ty evd = function | ProjectVar -> evd | ProjectEvar ((evk,argsv),evi,id,p) -> let evd = Evd.define evk (mkVar id) evd in (* TODO: simplify constraints involving evk *) let evd = do_projection_effects define_fun env ty evd p in let ty = whd_betadeltaiota env evd (Lazy.force ty) in if not (isSort ty) then (* Don't try to instantiate if a sort because if evar_concl is an evar it may commit to a univ level which is not the right one (however, regarding coercions, because t is obtained by unif, we know that no coercion can be inserted) *) let subst = make_pure_subst evi argsv in let ty' = replace_vars subst evi.evar_concl in let ty' = whd_evar evd ty' in if isEvar ty' then define_fun env evd (destEvar ty') ty else evd else evd (* Assuming Σ; Γ, y1..yk |- c, [invert_arg_from_subst Γ k Σ [x1:=u1..xn:=un] c] * tries to return φ(x1..xn) such that equation φ(u1..un) = c is valid. * The strategy is to imitate the structure of c and then to invert * the variables of c (i.e. rels or vars of Γ) using the algorithm * implemented by project_with_effects/find_projectable_vars. * It returns either a unique solution or says whether 0 or more than * 1 solutions is found. * * Precondition: Σ; Γ, y1..yk |- c /\ Σ; Γ |- u1..un * Postcondition: if φ(x1..xn) is returned then * Σ; Γ, y1..yk |- φ(u1..un) = c /\ x1..xn |- φ(x1..xn) * * The effects correspond to evars instantiated while trying to project. * * [invert_arg_from_subst] is used on instances of evars. Since the * evars are flexible, these instances are potentially erasable. This * is why we don't investigate whether evars in the instances of evars * are unifiable, to the contrary of [invert_definition]. *) type projectibility_kind = | NoUniqueProjection | UniqueProjection of constr * evar_projection list type projectibility_status = | CannotInvert | Invertible of projectibility_kind let invert_arg_from_subst evd aliases k0 subst_in_env_extended_with_k_binders c_in_env_extended_with_k_binders = let effects = ref [] in let rec aux k t = let t = whd_evar evd t in match kind_of_term t with | Rel i when i>k0+k -> aux' k (mkRel (i-k)) | Var id -> aux' k t | _ -> map_constr_with_binders succ aux k t and aux' k t = try project_with_effects aliases evd effects t subst_in_env_extended_with_k_binders with Not_found -> match expand_alias_once aliases t with | None -> raise Not_found | Some c -> aux k c in try let c = aux 0 c_in_env_extended_with_k_binders in Invertible (UniqueProjection (c,!effects)) with | Not_found -> CannotInvert | NotUnique -> Invertible NoUniqueProjection let invert_arg fullenv evd aliases k evk subst_in_env_extended_with_k_binders c_in_env_extended_with_k_binders = let res = invert_arg_from_subst evd aliases k subst_in_env_extended_with_k_binders c_in_env_extended_with_k_binders in match res with | Invertible (UniqueProjection (c,_)) when not (noccur_evar fullenv evd evk c) -> CannotInvert | _ -> res let effective_projections = map_succeed (function Invertible c -> c | _ -> failwith"") let instance_of_projection f env t evd projs = let ty = lazy (nf_evar evd (Retyping.get_type_of env evd t)) in match projs with | NoUniqueProjection -> raise NotUnique | UniqueProjection (c,effects) -> (List.fold_left (do_projection_effects f env ty) evd effects, c) exception NotEnoughInformationToInvert let extract_unique_projections projs = List.map (function | Invertible (UniqueProjection (c,_)) -> c | _ -> (* For instance, there are evars with non-invertible arguments and *) (* we cannot arbitrarily restrict these evars before knowing if there *) (* will really be used; it can also be due to some argument *) (* (typically a rel) that is not inversible and that cannot be *) (* inverted either because it is needed for typing the conclusion *) (* of the evar to project *) raise NotEnoughInformationToInvert) projs let extract_candidates sols = try Some (List.map (function (id,ProjectVar) -> mkVar id | _ -> raise Exit) sols) with Exit -> None let filter_of_projection = function Invertible _ -> true | _ -> false let invert_invertible_arg fullenv evd aliases k (evk,argsv) args' = let evi = Evd.find_undefined evd evk in let subst,_ = make_projectable_subst aliases evd evi argsv in let projs = array_map_to_list (invert_arg fullenv evd aliases k evk subst) args' in Array.of_list (extract_unique_projections projs) (* Redefines an evar with a smaller context (i.e. it may depend on less * variables) such that c becomes closed. * Example: in "fun (x:?1) (y:list ?2[x]) => x = y :> ?3[x,y] /\ x = nil bool" * ?3 <-- ?1 no pb: env of ?3 is larger than ?1's * ?1 <-- list ?2 pb: ?2 may depend on x, but not ?1. * What we do is that ?2 is defined by a new evar ?4 whose context will be * a prefix of ?2's env, included in ?1's env. * * If "hyps |- ?e : T" and "filter" selects a subset hyps' of hyps then * [do_restrict_hyps evd ?e filter] sets ?e:=?e'[hyps'] and returns ?e' * such that "hyps' |- ?e : T" *) let filter_candidates evd evk filter candidates = let evi = Evd.find_undefined evd evk in let candidates = match candidates with | None -> evi.evar_candidates | Some _ -> candidates in match candidates,filter with | None,_ | _, None -> candidates | Some l, Some filter -> let ids = List.map pi1 (list_filter_with filter (evar_context evi)) in Some (List.filter (fun a -> list_subset (Idset.elements (collect_vars a)) ids) l) let closure_of_filter evd evk filter = let evi = Evd.find_undefined evd evk in let vars = collect_vars (nf_evar evd (evar_concl evi)) in let test (id,c,_) b = b || Idset.mem id vars || c <> None in let newfilter = List.map2 test (evar_context evi) filter in if newfilter = evar_filter evi then None else Some newfilter let restrict_hyps evd evk filter candidates = (* What to do with dependencies? Assume we have x:A, y:B(x), z:C(x,y) |- ?e:T(x,y,z) and restrict on y. - If y is in a non-erasable position in C(x,y) (i.e. it is not below an occurrence of x in the hnf of C), then z should be removed too. - If y is in a non-erasable position in T(x,y,z) then the problem is unsolvable. Computing whether y is erasable or not may be costly and the interest for this early detection in practice is not obvious. We let it for future work. In any case, thanks to the use of filters, the whole (unrestricted) context remains consistent. *) let candidates = filter_candidates evd evk (Some filter) candidates in let typablefilter = closure_of_filter evd evk filter in (typablefilter,candidates) exception EvarSolvedWhileRestricting of evar_map * constr let do_restrict_hyps evd (evk,args as ev) filter candidates = let filter,candidates = match filter with | None -> None,candidates | Some filter -> restrict_hyps evd evk filter candidates in match candidates,filter with | Some [], _ -> error "Not solvable." | Some [nc],_ -> let evd = Evd.define evk nc evd in raise (EvarSolvedWhileRestricting (evd,whd_evar evd (mkEvar ev))) | None, None -> evd,ev | _ -> restrict_applied_evar evd ev filter candidates (* [postpone_non_unique_projection] postpones equation of the form ?e[?] = c *) (* ?e is assumed to have no candidates *) let postpone_non_unique_projection env evd (evk,argsv as ev) sols rhs = let rhs = expand_vars_in_term env rhs in let filter = restrict_upon_filter evd evk (* Keep only variables that occur in rhs *) (* This is not safe: is the variable is a local def, its body *) (* may contain references to variables that are removed, leading to *) (* a ill-formed context. We would actually need a notion of filter *) (* that says that the body is hidden. Note that expand_vars_in_term *) (* expands only rels and vars aliases, not rels or vars bound to an *) (* arbitrary complex term *) (fun a -> not (isRel a || isVar a) || dependent a rhs || List.exists (fun (id,_) -> isVarId id a) sols) (Array.to_list argsv) in let filter = match filter with | None -> None | Some filter -> closure_of_filter evd evk filter in let candidates = extract_candidates sols in if candidates <> None then restrict_evar evd evk filter candidates else (* We made an approximation by not expanding a local definition *) let evd,ev = restrict_applied_evar evd ev filter None in let pb = (Reduction.CONV,env,mkEvar ev,rhs) in Evd.add_conv_pb pb evd (* [postpone_evar_evar] postpones an equation of the form ?e1[?1] = ?e2[?2] *) let postpone_evar_evar f env evd filter1 ev1 filter2 ev2 = (* Leave an equation between (restrictions of) ev1 andv ev2 *) try let evd,ev1' = do_restrict_hyps evd ev1 filter1 None in try let evd,ev2' = do_restrict_hyps evd ev2 filter2 None in add_conv_pb (Reduction.CONV,env,mkEvar ev1',mkEvar ev2') evd with EvarSolvedWhileRestricting (evd,ev2) -> (* ev2 solved on the fly *) f env evd ev1' ev2 with EvarSolvedWhileRestricting (evd,ev1) -> (* ev1 solved on the fly *) f env evd ev2 ev1 (* [solve_evar_evar f Γ Σ ?e1[u1..un] ?e2[v1..vp]] applies an heuristic * to solve the equation Σ; Γ ⊢ ?e1[u1..un] = ?e2[v1..vp]: * - if there are at most one φj for each vj s.t. vj = φj(u1..un), * we first restrict ?e2 to the subset v_k1..v_kq of the vj that are * inversible and we set ?e1[x1..xn] := ?e2[φk1(x1..xn)..φkp(x1..xn)] * (this is a case of pattern-unification) * - symmetrically if there are at most one ψj for each uj s.t. * uj = ψj(v1..vp), * - otherwise, each position i s.t. ui does not occur in v1..vp has to * be restricted and similarly for the vi, and we leave the equation * as an open equation (performed by [postpone_evar]) * * Warning: the notion of unique φj is relative to some given class * of unification problems * * Note: argument f is the function used to instantiate evars. *) let are_canonical_instances args1 args2 env = let n1 = Array.length args1 in let n2 = Array.length args2 in let rec aux n = function | (id,_,c)::sign when n < n1 && isVarId id args1.(n) && isVarId id args2.(n) -> aux (n+1) sign | [] -> let rec aux2 n = n = n1 || (isRelN (n1-n) args1.(n) && isRelN (n1-n) args2.(n) && aux2 (n+1)) in aux2 n | _ -> false in n1 = n2 & aux 0 (named_context env) let filter_compatible_candidates conv_algo env evd evi args rhs c = let c' = instantiate_evar (evar_filtered_context evi) c args in let evd, b = conv_algo env evd Reduction.CONV rhs c' in if b then Some (c,evd) else None exception DoesNotPreserveCandidateRestriction let restrict_candidates conv_algo env evd filter1 (evk1,argsv1) (evk2,argsv2) = let evi1 = Evd.find evd evk1 in let evi2 = Evd.find evd evk2 in let cand1 = filter_candidates evd evk1 filter1 None in let cand2 = evi2.evar_candidates in match cand1, cand2 with | _, None -> cand1 | None, Some _ -> raise DoesNotPreserveCandidateRestriction | Some l1, Some l2 -> let args1 = Array.to_list argsv1 in let args2 = Array.to_list argsv2 in let l1' = List.filter (fun c1 -> let c1' = instantiate_evar (evar_filtered_context evi1) c1 args1 in List.filter (fun c2 -> (filter_compatible_candidates conv_algo env evd evi2 args2 c1' c2 <> None)) l2 <> []) l1 in if List.length l1 = List.length l1' then None else Some l1' exception CannotProject of bool list option (* Assume that FV(?n[x1:=t1..xn:=tn]) belongs to some set U. Can ?n be instantiated by a term u depending essentially on xi such that the FV(u[x1:=t1..xn:=tn]) are in the set U? - If ti is a variable, it has to be in U. - If ti is a constructor, its parameters cannot be erased even if u matches on it, so we have to discard ti if the parameters contain variables not in U. - If ti is rigid, we have to discard it if it contains variables in U. Note: when restricting as part of an equation ?n[x1:=t1..xn:=tn] = ?m[...] then, occurrences of ?m in the ti can be seen, like variables, as occurrences of subterms to eventually discard so as to be allowed to keep ti. *) let rec is_constrainable_in k (ev,(fv_rels,fv_ids) as g) t = let f,args = decompose_app_vect t in match kind_of_term f with | Construct (ind,_) -> let nparams = (fst (Global.lookup_inductive ind)).Declarations.mind_nparams in if nparams > Array.length args then true (* We don't try to be more clever *) else let params,_ = array_chop nparams args in array_for_all (is_constrainable_in k g) params | Ind _ -> array_for_all (is_constrainable_in k g) args | Prod (_,t1,t2) -> is_constrainable_in k g t1 && is_constrainable_in k g t2 | Evar (ev',_) -> ev' <> ev (*If ev' needed, one may also try to restrict it*) | Var id -> Idset.mem id fv_ids | Rel n -> n <= k || Intset.mem n fv_rels | Sort _ -> true | _ -> (* We don't try to be more clever *) true let has_constrainable_free_vars evd aliases k ev (fv_rels,fv_ids as fvs) t = let t = expansion_of_var aliases t in match kind_of_term t with | Var id -> Idset.mem id fv_ids | Rel n -> n <= k || Intset.mem n fv_rels | _ -> is_constrainable_in k (ev,fvs) t let ensure_evar_independent g env evd (evk1,argsv1 as ev1) (evk2,argsv2 as ev2)= let filter1 = restrict_upon_filter evd evk1 (noccur_evar env evd evk2) (Array.to_list argsv1) in let candidates1 = restrict_candidates g env evd filter1 ev1 ev2 in let evd,(evk1,_ as ev1) = do_restrict_hyps evd ev1 filter1 candidates1 in let filter2 = restrict_upon_filter evd evk2 (noccur_evar env evd evk1) (Array.to_list argsv2) in let candidates2 = restrict_candidates g env evd filter2 ev2 ev1 in let evd,ev2 = do_restrict_hyps evd ev2 filter2 candidates2 in evd,ev1,ev2 exception EvarSolvedOnTheFly of evar_map * constr let project_evar_on_evar g env evd aliases k2 (evk1,argsv1 as ev1) (evk2,argsv2 as ev2) = (* Apply filtering on ev1 so that fvs(ev1) are in fvs(ev2). *) let fvs2 = free_vars_and_rels_up_alias_expansion aliases (mkEvar ev2) in let filter1 = restrict_upon_filter evd evk1 (has_constrainable_free_vars evd aliases k2 evk2 fvs2) (Array.to_list argsv1) in (* Only try pruning on variable substitutions, postpone otherwise. *) (* Rules out non-linear instances. *) if is_unification_pattern_pure_evar env evd ev2 (mkEvar ev1) then try let candidates1 = restrict_candidates g env evd filter1 ev1 ev2 in let evd,(evk1',args1) = do_restrict_hyps evd ev1 filter1 candidates1 in evd,mkEvar (evk1',invert_invertible_arg env evd aliases k2 ev2 args1) with | EvarSolvedWhileRestricting (evd,ev1) -> raise (EvarSolvedOnTheFly (evd,ev1)) | DoesNotPreserveCandidateRestriction | NotEnoughInformationToInvert -> raise (CannotProject filter1) else raise (CannotProject filter1) let solve_evar_evar_l2r f g env evd aliases ev1 (evk2,_ as ev2) = try let evd,body = project_evar_on_evar g env evd aliases 0 ev1 ev2 in Evd.define evk2 body evd with EvarSolvedOnTheFly (evd,c) -> f env evd ev2 c let solve_evar_evar ?(force=false) f g env evd (evk1,args1 as ev1) (evk2,args2 as ev2) = if are_canonical_instances args1 args2 env then (* If instances are canonical, we solve the problem in linear time *) let sign = evar_filtered_context (Evd.find evd evk2) in let id_inst = list_map_to_array (fun (id,_,_) -> mkVar id) sign in Evd.define evk2 (mkEvar(evk1,id_inst)) evd else let evd,ev1,ev2 = (* If an evar occurs in the instance of the other evar and the use of an heuristic is forced, we restrict *) if force then ensure_evar_independent g env evd ev1 ev2 else (evd,ev1,ev2) in let aliases = make_alias_map env in try solve_evar_evar_l2r f g env evd aliases ev1 ev2 with CannotProject filter1 -> try solve_evar_evar_l2r f g env evd aliases ev2 ev1 with CannotProject filter2 -> postpone_evar_evar f env evd filter1 ev1 filter2 ev2 type conv_fun = env -> evar_map -> conv_pb -> constr -> constr -> evar_map * bool let check_evar_instance evd evk1 body conv_algo = let evi = Evd.find evd evk1 in let evenv = evar_unfiltered_env evi in (* FIXME: The body might be ill-typed when this is called from w_merge *) let ty = try Retyping.get_type_of evenv evd body with e when Errors.noncritical e -> error "Ill-typed evar instance" in let evd,b = conv_algo evenv evd Reduction.CUMUL ty evi.evar_concl in if b then evd else user_err_loc (fst (evar_source evk1 evd),"", str "Unable to find a well-typed instantiation") (* Solve pbs ?e[t1..tn] = ?e[u1..un] which arise often in fixpoint * definitions. We try to unify the ti with the ui pairwise. The pairs * that don't unify are discarded (i.e. ?e is redefined so that it does not * depend on these args). *) let solve_refl ?(can_drop=false) conv_algo env evd evk argsv1 argsv2 = if array_equal eq_constr argsv1 argsv2 then evd else (* Filter and restrict if needed *) let untypedfilter = restrict_upon_filter evd evk (fun (a1,a2) -> snd (conv_algo env evd Reduction.CONV a1 a2)) (List.combine (Array.to_list argsv1) (Array.to_list argsv2)) in let candidates = filter_candidates evd evk untypedfilter None in let filter = match untypedfilter with | None -> None | Some filter -> closure_of_filter evd evk filter in let evd,ev1 = restrict_applied_evar evd (evk,argsv1) filter candidates in if fst ev1 = evk & can_drop then (* No refinement *) evd else (* either progress, or not allowed to drop, e.g. to preserve possibly *) (* informative equations such as ?e[x:=?y]=?e[x:=?y'] where we don't know *) (* if e can depend on x until ?y is not resolved, or, conversely, we *) (* don't know if ?y has to be unified with ?y, until e is resolved *) let argsv2 = restrict_instance evd evk filter argsv2 in let ev2 = (fst ev1,argsv2) in (* Leave a unification problem *) Evd.add_conv_pb (Reduction.CONV,env,mkEvar ev1,mkEvar ev2) evd (* If the evar can be instantiated by a finite set of candidates known in advance, we check which of them apply *) exception NoCandidates let solve_candidates conv_algo env evd (evk,argsv as ev) rhs = let evi = Evd.find evd evk in let args = Array.to_list argsv in match evi.evar_candidates with | None -> raise NoCandidates | Some l -> let l' = list_map_filter (filter_compatible_candidates conv_algo env evd evi args rhs) l in match l' with | [] -> error_cannot_unify env evd (mkEvar ev, rhs) | [c,evd] -> (* solve_candidates might have been called recursively in the mean *) (* time and the evar been solved by the filtering process *) if Evd.is_undefined evd evk then Evd.define evk c evd else evd | l when List.length l < List.length l' -> let candidates = List.map fst l in restrict_evar evd evk None (Some candidates) | l -> evd (* We try to instantiate the evar assuming the body won't depend * on arguments that are not Rels or Vars, or appearing several times * (i.e. we tackle a generalization of Miller-Pfenning patterns unification) * * 1) Let "env |- ?ev[hyps:=args] = rhs" be the unification problem * 2) We limit it to a patterns unification problem "env |- ev[subst] = rhs" * where only Rel's and Var's are relevant in subst * 3) We recur on rhs, "imitating" the term, and failing if some Rel/Var is * not in the scope of ?ev. For instance, the problem * "y:nat |- ?x[] = y" where "|- ?1:nat" is not satisfiable because * ?1 would be instantiated by y which is not in the scope of ?1. * 4) We try to "project" the term if the process of imitation fails * and that only one projection is possible * * Note: we don't assume rhs in normal form, it may fail while it would * have succeeded after some reductions. * * This is the work of [invert_definition Γ Σ ?ev[hyps:=args] c] * Precondition: Σ; Γ, y1..yk |- c /\ Σ; Γ |- u1..un * Postcondition: if φ(x1..xn) is returned then * Σ; Γ, y1..yk |- φ(u1..un) = c /\ x1..xn |- φ(x1..xn) *) exception NotInvertibleUsingOurAlgorithm of constr exception NotEnoughInformationToProgress of (identifier * evar_projection) list exception OccurCheckIn of evar_map * constr let rec invert_definition conv_algo choose env evd (evk,argsv as ev) rhs = let aliases = make_alias_map env in let evdref = ref evd in let progress = ref false in let evi = Evd.find evd evk in let subst,cstr_subst = make_projectable_subst aliases evd evi argsv in (* Projection *) let project_variable t = (* Evar/Var problem: unifiable iff variable projectable from ev subst *) try let sols = find_projectable_vars true aliases !evdref t subst in let c, p = match sols with | [] -> raise Not_found | [id,p] -> (mkVar id, p) | (id,p)::_::_ -> if choose then (mkVar id, p) else raise (NotUniqueInType sols) in let ty = lazy (Retyping.get_type_of env !evdref t) in let evd = do_projection_effects (evar_define conv_algo) env ty !evdref p in evdref := evd; c with | Not_found -> raise (NotInvertibleUsingOurAlgorithm t) | NotUniqueInType sols -> if not !progress then raise (NotEnoughInformationToProgress sols); (* No unique projection but still restrict to where it is possible *) (* materializing is necessary, but is restricting useful? *) let ty = find_solution_type (evar_env evi) sols in let sign = evar_filtered_context evi in let ty' = instantiate_evar sign ty (Array.to_list argsv) in let (evd,evar,(evk',argsv' as ev')) = materialize_evar (evar_define conv_algo) env !evdref 0 ev ty' in let ts = expansions_of_var aliases t in let test c = isEvar c or List.mem c ts in let filter = array_map_to_list test argsv' in let filter = apply_subfilter (evar_filter (Evd.find_undefined evd evk)) filter in let filter = closure_of_filter evd evk' filter in let candidates = extract_candidates sols in let evd = if candidates <> None then restrict_evar evd evk' filter candidates else let evd,ev'' = restrict_applied_evar evd ev' filter None in Evd.add_conv_pb (Reduction.CONV,env,mkEvar ev'',t) evd in evdref := evd; evar in let rec imitate (env',k as envk) t = let t = whd_evar !evdref t in match kind_of_term t with | Rel i when i>k -> (match pi2 (Environ.lookup_rel (i-k) env') with | None -> project_variable (mkRel (i-k)) | Some b -> try project_variable (mkRel (i-k)) with NotInvertibleUsingOurAlgorithm _ -> imitate envk (lift i b)) | Var id -> (match pi2 (Environ.lookup_named id env') with | None -> project_variable t | Some b -> try project_variable t with NotInvertibleUsingOurAlgorithm _ -> imitate envk b) | Evar (evk',args' as ev') -> if evk = evk' then raise (OccurCheckIn (evd,rhs)); (* Evar/Evar problem (but left evar is virtual) *) let aliases = lift_aliases k aliases in (try let ev = (evk,Array.map (lift k) argsv) in let evd,body = project_evar_on_evar conv_algo env' !evdref aliases k ev' ev in evdref := evd; body with | EvarSolvedOnTheFly (evd,t) -> evdref:=evd; imitate envk t | CannotProject filter' -> assert !progress; (* Make the virtual left evar real *) let ty = get_type_of env' !evdref t in let (evd,evar'',ev'') = materialize_evar (evar_define conv_algo) env' !evdref k ev ty in (* materialize_evar may instantiate ev' by another evar; adjust it *) let (evk',args' as ev') = normalize_evar evd ev' in let evd = (* Try to project (a restriction of) the left evar ... *) try let evd,body = project_evar_on_evar conv_algo env' evd aliases 0 ev'' ev' in Evd.define evk' body evd with | EvarSolvedOnTheFly _ -> assert false (* ev has no candidates *) | CannotProject filter'' -> (* ... or postpone the problem *) postpone_evar_evar (evar_define conv_algo) env' evd filter'' ev'' filter' ev' in evdref := evd; evar'') | _ -> progress := true; match let c,args = decompose_app_vect t in match kind_of_term c with | Construct cstr when noccur_between 1 k t -> (* This is common case when inferring the return clause of match *) (* (currently rudimentary: we do not treat the case of multiple *) (* possible inversions; we do not treat overlap with a possible *) (* alternative inversion of the subterms of the constructor, etc)*) (match find_projectable_constructor env evd cstr k args cstr_subst with | _::_ as l -> Some (List.map mkVar l) | _ -> None) | _ -> None with | Some l -> let ty = get_type_of env' !evdref t in let candidates = try let t = map_constr_with_full_binders (fun d (env,k) -> push_rel d env, k+1) imitate envk t in t::l with e when Errors.noncritical e -> l in (match candidates with | [x] -> x | _ -> let (evd,evar'',ev'') = materialize_evar (evar_define conv_algo) env' !evdref k ev ty in evdref := restrict_evar evd (fst ev'') None (Some candidates); evar'') | None -> (* Evar/Rigid problem (or assimilated if not normal): we "imitate" *) map_constr_with_full_binders (fun d (env,k) -> push_rel d env, k+1) imitate envk t in let rhs = whd_beta evd rhs (* heuristic *) in let body = imitate (env,0) rhs in (!evdref,body) (* [define] tries to solve the problem "?ev[args] = rhs" when "?ev" is * an (uninstantiated) evar such that "hyps |- ?ev : typ". Otherwise said, * [define] tries to find an instance lhs such that * "lhs [hyps:=args]" unifies to rhs. The term "lhs" must be closed in * context "hyps" and not referring to itself. *) and evar_define conv_algo ?(choose=false) env evd (evk,argsv as ev) rhs = match kind_of_term rhs with | Evar (evk2,argsv2 as ev2) -> if evk = evk2 then solve_refl ~can_drop:choose conv_algo env evd evk argsv argsv2 else solve_evar_evar ~force:choose (evar_define conv_algo) conv_algo env evd ev ev2 | _ -> try solve_candidates conv_algo env evd ev rhs with NoCandidates -> try let (evd',body) = invert_definition conv_algo choose env evd ev rhs in if occur_meta body then error "Meta cannot occur in evar body."; (* invert_definition may have instantiate some evars of rhs with evk *) (* so we recheck acyclicity *) if occur_evar evk body then raise (OccurCheckIn (evd',body)); (* needed only if an inferred type *) let body = refresh_universes body in (* Cannot strictly type instantiations since the unification algorithm * does not unify applications from left to right. * e.g problem f x == g y yields x==y and f==g (in that order) * Another problem is that type variables are evars of type Type let _ = try let env = evar_env evi in let ty = evi.evar_concl in Typing.check env evd' body ty with e -> pperrnl (str "Ill-typed evar instantiation: " ++ fnl() ++ pr_evar_map evd' ++ fnl() ++ str "----> " ++ int ev ++ str " := " ++ print_constr body); raise e in*) let evd' = Evd.define evk body evd' in check_evar_instance evd' evk body conv_algo with | NotEnoughInformationToProgress sols -> postpone_non_unique_projection env evd ev sols rhs | NotInvertibleUsingOurAlgorithm t -> error_not_clean env evd evk t (evar_source evk evd) | OccurCheckIn (evd,rhs) -> (* last chance: rhs actually reduces to ev *) let c = whd_betadeltaiota env evd rhs in match kind_of_term c with | Evar (evk',argsv2) when evk = evk' -> solve_refl (fun env sigma pb c c' -> (evd,is_fconv pb env sigma c c')) env evd evk argsv argsv2 | _ -> error_occur_check env evd evk rhs (* This code (i.e. solve_pb, etc.) takes a unification * problem, and tries to solve it. If it solves it, then it removes * all the conversion problems, and re-runs conversion on each one, in * the hopes that the new solution will aid in solving them. * * The kinds of problems it knows how to solve are those in which * the usable arguments of an existential var are all themselves * universal variables. * The solution to this problem is to do renaming for the Var's, * to make them match up with the Var's which are found in the * hyps of the existential, to do a "pop" for each Rel which is * not an argument of the existential, and a subst1 for each which * is, again, with the corresponding variable. This is done by * define * * Thus, we take the arguments of the existential which we are about * to assign, and zip them with the identifiers in the hypotheses. * Then, we process all the Var's in the arguments, and sort the * Rel's into ascending order. Then, we just march up, doing * subst1's and pop's. * * NOTE: We can do this more efficiently for the relative arguments, * by building a long substituend by hand, but this is a pain in the * ass. *) let status_changed lev (pbty,_,t1,t2) = (try ExistentialSet.mem (head_evar t1) lev with NoHeadEvar -> false) or (try ExistentialSet.mem (head_evar t2) lev with NoHeadEvar -> false) let reconsider_conv_pbs conv_algo evd = let (evd,pbs) = extract_changed_conv_pbs evd status_changed in List.fold_left (fun (evd,b as p) (pbty,env,t1,t2) -> if b then conv_algo env evd pbty t1 t2 else p) (evd,true) pbs (* Tries to solve problem t1 = t2. * Precondition: t1 is an uninstantiated evar * Returns an optional list of evars that were instantiated, or None * if the problem couldn't be solved. *) (* Rq: uncomplete algorithm if pbty = CONV_X_LEQ ! *) let solve_simple_eqn conv_algo ?(choose=false) env evd (pbty,(evk1,args1 as ev1),t2) = try let t2 = whd_betaiota evd t2 in (* includes whd_evar *) let evd = match pbty with | Some true when isEvar t2 -> add_conv_pb (Reduction.CUMUL,env,mkEvar ev1,t2) evd | Some false when isEvar t2 -> add_conv_pb (Reduction.CUMUL,env,t2,mkEvar ev1) evd | _ -> evar_define conv_algo ~choose env evd ev1 t2 in reconsider_conv_pbs conv_algo evd with e when precatchable_exception e -> (evd,false) (** The following functions return the set of evars immediately contained in the object, including defined evars *) let evars_of_term c = let rec evrec acc c = match kind_of_term c with | Evar (n, l) -> Intset.add n (Array.fold_left evrec acc l) | _ -> fold_constr evrec acc c in evrec Intset.empty c (* spiwack: a few functions to gather evars on which goals depend. *) let queue_set q is_dependent set = Intset.iter (fun a -> Queue.push (is_dependent,a) q) set let queue_term q is_dependent c = queue_set q is_dependent (evars_of_term c) let process_dependent_evar q acc evm is_dependent e = let evi = Evd.find evm e in (* Queues evars appearing in the types of the goal (conclusion, then hypotheses), they are all dependent. *) queue_term q true evi.evar_concl; List.iter begin fun (_,b,t) -> queue_term q true t; match b with | None -> () | Some b -> queue_term q true b end (Environ.named_context_of_val evi.evar_hyps); match evi.evar_body with | Evar_empty -> if is_dependent then Intmap.add e None acc else acc | Evar_defined b -> let subevars = evars_of_term b in (* evars appearing in the definition of an evar [e] are marked as dependent when [e] is dependent itself: if [e] is a non-dependent goal, then, unless they are reach from another path, these evars are just other non-dependent goals. *) queue_set q is_dependent subevars; if is_dependent then Intmap.add e (Some subevars) acc else acc let gather_dependent_evars q evm = let acc = ref Intmap.empty in while not (Queue.is_empty q) do let (is_dependent,e) = Queue.pop q in (* checks if [e] has already been added to [!acc] *) begin if not (Intmap.mem e !acc) then acc := process_dependent_evar q !acc evm is_dependent e end done; !acc let gather_dependent_evars evm l = let q = Queue.create () in List.iter (fun a -> Queue.add (false,a) q) l; gather_dependent_evars q evm (* /spiwack *) let evars_of_named_context nc = List.fold_right (fun (_, b, t) s -> Option.fold_left (fun s t -> Intset.union s (evars_of_term t)) (Intset.union s (evars_of_term t)) b) nc Intset.empty let evars_of_evar_info evi = Intset.union (evars_of_term evi.evar_concl) (Intset.union (match evi.evar_body with | Evar_empty -> Intset.empty | Evar_defined b -> evars_of_term b) (evars_of_named_context (named_context_of_val evi.evar_hyps))) (** The following functions return the set of undefined evars contained in the object, the defined evars being traversed. This is roughly a combination of the previous functions and [nf_evar]. *) let undefined_evars_of_term evd t = let rec evrec acc c = match kind_of_term c with | Evar (n, l) -> let acc = Array.fold_left evrec acc l in (try match (Evd.find evd n).evar_body with | Evar_empty -> Intset.add n acc | Evar_defined c -> evrec acc c with Not_found -> anomaly "undefined_evars_of_term: evar not found") | _ -> fold_constr evrec acc c in evrec Intset.empty t let undefined_evars_of_named_context evd nc = List.fold_right (fun (_, b, t) s -> Option.fold_left (fun s t -> Intset.union s (undefined_evars_of_term evd t)) (Intset.union s (undefined_evars_of_term evd t)) b) nc Intset.empty let undefined_evars_of_evar_info evd evi = Intset.union (undefined_evars_of_term evd evi.evar_concl) (Intset.union (match evi.evar_body with | Evar_empty -> Intset.empty | Evar_defined b -> undefined_evars_of_term evd b) (undefined_evars_of_named_context evd (named_context_of_val evi.evar_hyps))) (* [check_evars] fails if some unresolved evar remains *) let check_evars env initial_sigma sigma c = let rec proc_rec c = match kind_of_term c with | Evar (evk,_ as ev) -> (match existential_opt_value sigma ev with | Some c -> proc_rec c | None -> if not (Evd.mem initial_sigma evk) then let (loc,k) = evar_source evk sigma in match k with | ImplicitArg (gr, (i, id), false) -> () | _ -> let evi = nf_evar_info sigma (Evd.find_undefined sigma evk) in error_unsolvable_implicit loc env sigma evi k None) | _ -> iter_constr proc_rec c in proc_rec c open Glob_term (****************************************) (* Operations on value/type constraints *) (****************************************) type type_constraint_type = (int * int) option * constr type type_constraint = type_constraint_type option type val_constraint = constr option (* Old comment... * Basically, we have the following kind of constraints (in increasing * strength order): * (false,(None,None)) -> no constraint at all * (true,(None,None)) -> we must build a judgement which _TYPE is a kind * (_,(None,Some ty)) -> we must build a judgement which _TYPE is ty * (_,(Some v,_)) -> we must build a judgement which _VAL is v * Maybe a concrete datatype would be easier to understand. * We differentiate (true,(None,None)) from (_,(None,Some Type)) * because otherwise Case(s) would be misled, as in * (n:nat) Case n of bool [_]nat end would infer the predicate Type instead * of Set. *) (* The empty type constraint *) let empty_tycon = None let mk_tycon_type c = (None, c) let mk_abstr_tycon_type n c = (Some (n, n), c) (* First component is initial abstraction, second is current abstraction *) (* Builds a type constraint *) let mk_tycon ty = Some (mk_tycon_type ty) let mk_abstr_tycon n ty = Some (mk_abstr_tycon_type n ty) (* Constrains the value of a type *) let empty_valcon = None (* Builds a value constraint *) let mk_valcon c = Some c let idx = id_of_string "x" (* Refining an evar to a product *) let define_pure_evar_as_product evd evk = let evi = Evd.find_undefined evd evk in let evenv = evar_unfiltered_env evi in let id = next_ident_away idx (ids_of_named_context (evar_context evi)) in let evd1,dom = new_type_evar evd evenv ~filter:(evar_filter evi) in let evd2,rng = let newenv = push_named (id, None, dom) evenv in let src = evar_source evk evd1 in let filter = true::evar_filter evi in new_type_evar evd1 newenv ~src ~filter in let prod = mkProd (Name id, dom, subst_var id rng) in let evd3 = Evd.define evk prod evd2 in evd3,prod (* Refine an applied evar to a product and returns its instantiation *) let define_evar_as_product evd (evk,args) = let evd,prod = define_pure_evar_as_product evd evk in (* Quick way to compute the instantiation of evk with args *) let na,dom,rng = destProd prod in let evdom = mkEvar (fst (destEvar dom), args) in let evrngargs = array_cons (mkRel 1) (Array.map (lift 1) args) in let evrng = mkEvar (fst (destEvar rng), evrngargs) in evd,mkProd (na, evdom, evrng) (* Refine an evar with an abstraction I.e., solve x1..xq |- ?e:T(x1..xq) with e:=λy:A.?e'[x1..xq,y] where: - either T(x1..xq) = πy:A(x1..xq).B(x1..xq,y) or T(x1..xq) = ?d[x1..xq] and we define ?d := πy:?A.?B with x1..xq |- ?A:Type and x1..xq,y |- ?B:Type - x1..xq,y:A |- ?e':B *) let define_pure_evar_as_lambda env evd evk = let evi = Evd.find_undefined evd evk in let evenv = evar_unfiltered_env evi in let typ = whd_betadeltaiota env evd (evar_concl evi) in let evd1,(na,dom,rng) = match kind_of_term typ with | Prod (na,dom,rng) -> (evd,(na,dom,rng)) | Evar ev' -> let evd,typ = define_evar_as_product evd ev' in evd,destProd typ | _ -> error_not_product_loc dummy_loc env evd typ in let avoid = ids_of_named_context (evar_context evi) in let id = next_name_away_with_default_using_types "x" na avoid (whd_evar evd dom) in let newenv = push_named (id, None, dom) evenv in let filter = true::evar_filter evi in let src = evar_source evk evd1 in let evd2,body = new_evar evd1 newenv ~src (subst1 (mkVar id) rng) ~filter in let lam = mkLambda (Name id, dom, subst_var id body) in Evd.define evk lam evd2, lam let define_evar_as_lambda env evd (evk,args) = let evd,lam = define_pure_evar_as_lambda env evd evk in (* Quick way to compute the instantiation of evk with args *) let na,dom,body = destLambda lam in let evbodyargs = array_cons (mkRel 1) (Array.map (lift 1) args) in let evbody = mkEvar (fst (destEvar body), evbodyargs) in evd,mkLambda (na, dom, evbody) let rec evar_absorb_arguments env evd (evk,args as ev) = function | [] -> evd,ev | a::l -> (* TODO: optimize and avoid introducing intermediate evars *) let evd,lam = define_pure_evar_as_lambda env evd evk in let _,_,body = destLambda lam in let evk = fst (destEvar body) in evar_absorb_arguments env evd (evk, array_cons a args) l (* Refining an evar to a sort *) let define_evar_as_sort evd (ev,args) = let evd, s = new_sort_variable evd in Evd.define ev (mkSort s) evd, s (* We don't try to guess in which sort the type should be defined, since any type has type Type. May cause some trouble, but not so far... *) let judge_of_new_Type evd = let evd', s = new_univ_variable evd in evd', Typeops.judge_of_type s (* Propagation of constraints through application and abstraction: Given a type constraint on a functional term, returns the type constraint on its domain and codomain. If the input constraint is an evar instantiate it with the product of 2 new evars. *) let unlift_tycon init cur c = if cur = 1 then None, c else Some (init, pred cur), c let split_tycon loc env evd tycon = let rec real_split evd c = let t = whd_betadeltaiota env evd c in match kind_of_term t with | Prod (na,dom,rng) -> evd, (na, dom, rng) | Evar ev (* ev is undefined because of whd_betadeltaiota *) -> let (evd',prod) = define_evar_as_product evd ev in let (_,dom,rng) = destProd prod in evd',(Anonymous, dom, rng) | App (c,args) when isEvar c -> let (evd',lam) = define_evar_as_lambda env evd (destEvar c) in real_split evd' (mkApp (lam,args)) | _ -> error_not_product_loc loc env evd c in match tycon with | None -> evd,(Anonymous,None,None) | Some (abs, c) -> (match abs with None -> let evd', (n, dom, rng) = real_split evd c in evd', (n, mk_tycon dom, mk_tycon rng) | Some (init, cur) -> evd, (Anonymous, None, Some (unlift_tycon init cur c))) let valcon_of_tycon x = match x with | Some (None, t) -> Some t | _ -> None let lift_abstr_tycon_type n (abs, t) = match abs with None -> raise (Invalid_argument "lift_abstr_tycon_type: not an abstraction") | Some (init, abs) -> let abs' = abs + n in if abs' < 0 then raise (Invalid_argument "lift_abstr_tycon_type") else (Some (init, abs'), t) let lift_tycon_type n (abs, t) = (abs, lift n t) let lift_tycon n = Option.map (lift_tycon_type n) let pr_tycon_type env (abs, t) = match abs with None -> Termops.print_constr_env env t | Some (init, cur) -> str "Abstract (" ++ int init ++ str "," ++ int cur ++ str ") " ++ Termops.print_constr_env env t let pr_tycon env = function None -> str "None" | Some t -> pr_tycon_type env t coq-8.4pl2/pretyping/detyping.mli0000640000175000001440000000546212010532755016205 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* cases_pattern -> cases_pattern val subst_glob_constr : substitution -> glob_constr -> glob_constr (** [detype isgoal avoid ctx c] turns a closed [c], into a glob_constr de Bruijn indexes are turned to bound names, avoiding names in [avoid] [isgoal] tells if naming must avoid global-level synonyms as intro does [ctx] gives the names of the free variables *) val detype : bool -> identifier list -> names_context -> constr -> glob_constr val detype_case : bool -> ('a -> glob_constr) -> (constructor array -> int array -> 'a array -> (loc * identifier list * cases_pattern list * glob_constr) list) -> ('a -> int -> bool) -> identifier list -> inductive * case_style * int * int array * int -> 'a option -> 'a -> 'a array -> glob_constr val detype_sort : sorts -> glob_sort val detype_rel_context : constr option -> identifier list -> names_context -> rel_context -> glob_decl list (** look for the index of a named var or a nondep var as it is renamed *) val lookup_name_as_displayed : env -> constr -> identifier -> int option val lookup_index_as_renamed : env -> constr -> int -> int option val set_detype_anonymous : (loc -> int -> glob_constr) -> unit val force_wildcard : unit -> bool val synthetize_type : unit -> bool (** Utilities to transform kernel cases to simple pattern-matching problem *) val it_destRLambda_or_LetIn_names : int -> glob_constr -> name list * glob_constr val simple_cases_matrix_of_branches : inductive -> (int * int * glob_constr) list -> cases_clauses val return_type_of_predicate : inductive -> int -> int -> glob_constr -> predicate_pattern * glob_constr option module PrintingInductiveMake : functor (Test : sig val encode : Libnames.reference -> Names.inductive val member_message : Pp.std_ppcmds -> bool -> Pp.std_ppcmds val field : string val title : string end) -> sig type t = Names.inductive val encode : Libnames.reference -> Names.inductive val subst : substitution -> t -> t val printer : t -> Pp.std_ppcmds val key : Goptions.option_name val title : string val member_message : t -> bool -> Pp.std_ppcmds val synchronous : bool end coq-8.4pl2/pretyping/cases.ml0000640000175000001440000021734012063656004015311 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* case_style -> (type_constraint -> env -> evar_map ref -> glob_constr -> unsafe_judgment) * evar_map ref -> type_constraint -> env -> glob_constr option * tomatch_tuples * cases_clauses -> unsafe_judgment end let rec list_try_compile f = function | [a] -> f a | [] -> anomaly "try_find_f" | h::t -> try f h with UserError _ | TypeError _ | PretypeError _ | PatternMatchingError _ | Loc.Exc_located (_, (UserError _ | TypeError _ | PretypeError _ | PatternMatchingError _)) -> list_try_compile f t let force_name = let nx = Name (id_of_string "x") in function Anonymous -> nx | na -> na (************************************************************************) (* Pattern-matching compilation (Cases) *) (************************************************************************) (************************************************************************) (* Configuration, errors and warnings *) open Pp let msg_may_need_inversion () = strbrk "Found a matching with no clauses on a term unknown to have an empty inductive type." (* Utils *) let make_anonymous_patvars n = list_make n (PatVar (dummy_loc,Anonymous)) (* Environment management *) let push_rels vars env = List.fold_right push_rel vars env (* We have x1:t1...xn:tn,xi':ti,y1..yk |- c and re-generalize over xi:ti to get x1:t1...xn:tn,xi':ti,y1..yk |- c[xi:=xi'] *) let relocate_rel n1 n2 k j = if j = n1+k then n2+k else j let rec relocate_index n1 n2 k t = match kind_of_term t with | Rel j when j = n1+k -> mkRel (n2+k) | Rel j when j < n1+k -> t | Rel j when j > n1+k -> t | _ -> map_constr_with_binders succ (relocate_index n1 n2) k t (**********************************************************************) (* Structures used in compiling pattern-matching *) type 'a rhs = { rhs_env : env; rhs_vars : identifier list; avoid_ids : identifier list; it : 'a option} type 'a equation = { patterns : cases_pattern list; rhs : 'a rhs; alias_stack : name list; eqn_loc : loc; used : bool ref } type 'a matrix = 'a equation list (* 1st argument of IsInd is the original ind before extracting the summary *) type tomatch_type = | IsInd of types * inductive_type * name list | NotInd of constr option * types type tomatch_status = | Pushed of ((constr * tomatch_type) * int list * name) | Alias of (name * constr * (constr * types)) | NonDepAlias | Abstract of int * rel_declaration type tomatch_stack = tomatch_status list (* We keep a constr for aliases and a cases_pattern for error message *) type pattern_history = | Top | MakeConstructor of constructor * pattern_continuation and pattern_continuation = | Continuation of int * cases_pattern list * pattern_history | Result of cases_pattern list let start_history n = Continuation (n, [], Top) let feed_history arg = function | Continuation (n, l, h) when n>=1 -> Continuation (n-1, arg :: l, h) | Continuation (n, _, _) -> anomaly ("Bad number of expected remaining patterns: "^(string_of_int n)) | Result _ -> anomaly "Exhausted pattern history" (* This is for non exhaustive error message *) let rec glob_pattern_of_partial_history args2 = function | Continuation (n, args1, h) -> let args3 = make_anonymous_patvars (n - (List.length args2)) in build_glob_pattern (List.rev_append args1 (args2@args3)) h | Result pl -> pl and build_glob_pattern args = function | Top -> args | MakeConstructor (pci, rh) -> glob_pattern_of_partial_history [PatCstr (dummy_loc, pci, args, Anonymous)] rh let complete_history = glob_pattern_of_partial_history [] (* This is to build glued pattern-matching history and alias bodies *) let rec pop_history_pattern = function | Continuation (0, l, Top) -> Result (List.rev l) | Continuation (0, l, MakeConstructor (pci, rh)) -> feed_history (PatCstr (dummy_loc,pci,List.rev l,Anonymous)) rh | _ -> anomaly "Constructor not yet filled with its arguments" let pop_history h = feed_history (PatVar (dummy_loc, Anonymous)) h (* Builds a continuation expecting [n] arguments and building [ci] applied to this [n] arguments *) let push_history_pattern n pci cont = Continuation (n, [], MakeConstructor (pci, cont)) (* A pattern-matching problem has the following form: env, evd |- match terms_to_tomatch return pred with mat end where terms_to_match is some sequence of "instructions" (t1 ... tp) and mat is some matrix (p11 ... p1n -> rhs1) ( ... ) (pm1 ... pmn -> rhsm) Terms to match: there are 3 kinds of instructions - "Pushed" terms to match are typed in [env]; these are usually just Rel(n) except for the initial terms given by user; in Pushed ((c,tm),deps,na), [c] is the reference to the term (which is a Rel or an initial term), [tm] is its type (telling whether we know if it is an inductive type or not), [deps] is the list of terms to abstract before matching on [c] (these are rels too) - "Abstract" instructions mean that an abstraction has to be inserted in the current branch to build (this means a pattern has been detected dependent in another one and a generalization is necessary to ensure well-typing) Abstract instructions extend the [env] in which the other instructions are typed - "Alias" instructions mean an alias has to be inserted (this alias is usually removed at the end, except when its type is not the same as the type of the matched term from which it comes - typically because the inductive types are "real" parameters) - "NonDepAlias" instructions mean the completion of a matching over a term to match as for Alias but without inserting this alias because there is no dependency in it Right-hand sides: They consist of a raw term to type in an environment specific to the clause they belong to: the names of declarations are those of the variables present in the patterns. Therefore, they come with their own [rhs_env] (actually it is the same as [env] except for the names of variables). *) type 'a pattern_matching_problem = { env : env; evdref : evar_map ref; pred : constr; tomatch : tomatch_stack; history : pattern_continuation; mat : 'a matrix; caseloc : loc; casestyle : case_style; typing_function: type_constraint -> env -> evar_map ref -> 'a option -> unsafe_judgment } (*--------------------------------------------------------------------------* * A few functions to infer the inductive type from the patterns instead of * * checking that the patterns correspond to the ind. type of the * * destructurated object. Allows type inference of examples like * * match n with O => true | _ => false end * * match x in I with C => true | _ => false end * *--------------------------------------------------------------------------*) (* Computing the inductive type from the matrix of patterns *) (* We use the "in I" clause to coerce the terms to match and otherwise use the constructor to know in which type is the matching problem Note that insertion of coercions inside nested patterns is done each time the matrix is expanded *) let rec find_row_ind = function [] -> None | PatVar _ :: l -> find_row_ind l | PatCstr(loc,c,_,_) :: _ -> Some (loc,c) let inductive_template evdref env tmloc ind = let arsign = get_full_arity_sign env ind in let hole_source = match tmloc with | Some loc -> fun i -> (loc, TomatchTypeParameter (ind,i)) | None -> fun _ -> (dummy_loc, InternalHole) in let (_,evarl,_) = List.fold_right (fun (na,b,ty) (subst,evarl,n) -> match b with | None -> let ty' = substl subst ty in let e = e_new_evar evdref env ~src:(hole_source n) ty' in (e::subst,e::evarl,n+1) | Some b -> (substl subst b::subst,evarl,n+1)) arsign ([],[],1) in applist (mkInd ind,List.rev evarl) let try_find_ind env sigma typ realnames = let (IndType(_,realargs) as ind) = find_rectype env sigma typ in let names = match realnames with | Some names -> names | None -> list_make (List.length realargs) Anonymous in IsInd (typ,ind,names) let inh_coerce_to_ind evdref env ty tyi = let expected_typ = inductive_template evdref env None tyi in (* devrait être indifférent d'exiger leq ou pas puisque pour un inductif cela doit être égal *) let _ = e_cumul env evdref expected_typ ty in () let binding_vars_of_inductive = function | NotInd _ -> [] | IsInd (_,IndType(_,realargs),_) -> List.filter isRel realargs let extract_inductive_data env sigma (_,b,t) = if b<>None then (NotInd (None,t),[]) else let tmtyp = try try_find_ind env sigma t None with Not_found -> NotInd (None,t) in let tmtypvars = binding_vars_of_inductive tmtyp in (tmtyp,tmtypvars) let unify_tomatch_with_patterns evdref env loc typ pats realnames = match find_row_ind pats with | None -> NotInd (None,typ) | Some (_,(ind,_)) -> inh_coerce_to_ind evdref env typ ind; try try_find_ind env !evdref typ realnames with Not_found -> NotInd (None,typ) let find_tomatch_tycon evdref env loc = function (* Try if some 'in I ...' is present and can be used as a constraint *) | Some (_,ind,_,realnal) -> mk_tycon (inductive_template evdref env loc ind),Some (List.rev realnal) | None -> empty_tycon,None let coerce_row typing_fun evdref env pats (tomatch,(_,indopt)) = let loc = Some (loc_of_glob_constr tomatch) in let tycon,realnames = find_tomatch_tycon evdref env loc indopt in let j = typing_fun tycon env evdref tomatch in let typ = nf_evar !evdref j.uj_type in let t = try try_find_ind env !evdref typ realnames with Not_found -> unify_tomatch_with_patterns evdref env loc typ pats realnames in (j.uj_val,t) let coerce_to_indtype typing_fun evdref env matx tomatchl = let pats = List.map (fun r -> r.patterns) matx in let matx' = match matrix_transpose pats with | [] -> List.map (fun _ -> []) tomatchl (* no patterns at all *) | m -> m in List.map2 (coerce_row typing_fun evdref env) matx' tomatchl (************************************************************************) (* Utils *) let mkExistential env ?(src=(dummy_loc,InternalHole)) evdref = e_new_evar evdref env ~src:src (new_Type ()) let evd_comb2 f evdref x y = let (evd',y) = f !evdref x y in evdref := evd'; y module Cases_F(Coercion : Coercion.S) : S = struct let adjust_tomatch_to_pattern pb ((current,typ),deps,dep) = (* Ideally, we could find a common inductive type to which both the term to match and the patterns coerce *) (* In practice, we coerce the term to match if it is not already an inductive type and it is not dependent; moreover, we use only the first pattern type and forget about the others *) let typ,names = match typ with IsInd(t,_,names) -> t,Some names | NotInd(_,t) -> t,None in let tmtyp = try try_find_ind pb.env !(pb.evdref) typ names with Not_found -> NotInd (None,typ) in match tmtyp with | NotInd (None,typ) -> let tm1 = List.map (fun eqn -> List.hd eqn.patterns) pb.mat in (match find_row_ind tm1 with | None -> (current,tmtyp) | Some (_,(ind,_)) -> let indt = inductive_template pb.evdref pb.env None ind in let current = if deps = [] & isEvar typ then (* Don't insert coercions if dependent; only solve evars *) let _ = e_cumul pb.env pb.evdref indt typ in current else (evd_comb2 (Coercion.inh_conv_coerce_to dummy_loc pb.env) pb.evdref (make_judge current typ) (mk_tycon_type indt)).uj_val in let sigma = !(pb.evdref) in (current,try_find_ind pb.env sigma indt names)) | _ -> (current,tmtyp) let type_of_tomatch = function | IsInd (t,_,_) -> t | NotInd (_,t) -> t let mkDeclTomatch na = function | IsInd (t,_,_) -> (na,None,t) | NotInd (c,t) -> (na,c,t) let map_tomatch_type f = function | IsInd (t,ind,names) -> IsInd (f t,map_inductive_type f ind,names) | NotInd (c,t) -> NotInd (Option.map f c, f t) let liftn_tomatch_type n depth = map_tomatch_type (liftn n depth) let lift_tomatch_type n = liftn_tomatch_type n 1 (**********************************************************************) (* Utilities on patterns *) let current_pattern eqn = match eqn.patterns with | pat::_ -> pat | [] -> anomaly "Empty list of patterns" let alias_of_pat = function | PatVar (_,name) -> name | PatCstr(_,_,_,name) -> name let remove_current_pattern eqn = match eqn.patterns with | pat::pats -> { eqn with patterns = pats; alias_stack = alias_of_pat pat :: eqn.alias_stack } | [] -> anomaly "Empty list of patterns" let push_current_pattern (cur,ty) eqn = match eqn.patterns with | pat::pats -> let rhs_env = push_rel (alias_of_pat pat,Some cur,ty) eqn.rhs.rhs_env in { eqn with rhs = { eqn.rhs with rhs_env = rhs_env }; patterns = pats } | [] -> anomaly "Empty list of patterns" let prepend_pattern tms eqn = {eqn with patterns = tms@eqn.patterns } (**********************************************************************) (* Well-formedness tests *) (* Partial check on patterns *) exception NotAdjustable let rec adjust_local_defs loc = function | (pat :: pats, (_,None,_) :: decls) -> pat :: adjust_local_defs loc (pats,decls) | (pats, (_,Some _,_) :: decls) -> PatVar (loc, Anonymous) :: adjust_local_defs loc (pats,decls) | [], [] -> [] | _ -> raise NotAdjustable let check_and_adjust_constructor env ind cstrs = function | PatVar _ as pat -> pat | PatCstr (loc,((_,i) as cstr),args,alias) as pat -> (* Check it is constructor of the right type *) let ind' = inductive_of_constructor cstr in if eq_ind ind' ind then (* Check the constructor has the right number of args *) let ci = cstrs.(i-1) in let nb_args_constr = ci.cs_nargs in if List.length args = nb_args_constr then pat else try let args' = adjust_local_defs loc (args, List.rev ci.cs_args) in PatCstr (loc, cstr, args', alias) with NotAdjustable -> error_wrong_numarg_constructor_loc loc (Global.env()) cstr nb_args_constr else (* Try to insert a coercion *) try Coercion.inh_pattern_coerce_to loc pat ind' ind with Not_found -> error_bad_constructor_loc loc cstr ind let check_all_variables typ mat = List.iter (fun eqn -> match current_pattern eqn with | PatVar (_,id) -> () | PatCstr (loc,cstr_sp,_,_) -> error_bad_pattern_loc loc cstr_sp typ) mat let check_unused_pattern env eqn = if not !(eqn.used) then raise_pattern_matching_error (eqn.eqn_loc, env, UnusedClause eqn.patterns) let set_used_pattern eqn = eqn.used := true let extract_rhs pb = match pb.mat with | [] -> errorlabstrm "build_leaf" (msg_may_need_inversion()) | eqn::_ -> set_used_pattern eqn; eqn.rhs (**********************************************************************) (* Functions to deal with matrix factorization *) let occur_in_rhs na rhs = match na with | Anonymous -> false | Name id -> List.mem id rhs.rhs_vars let is_dep_patt_in eqn = function | PatVar (_,name) -> occur_in_rhs name eqn.rhs | PatCstr _ -> true let mk_dep_patt_row (pats,_,eqn) = List.map (is_dep_patt_in eqn) pats let dependencies_in_pure_rhs nargs eqns = if eqns = [] then list_make nargs false (* Only "_" patts *) else let deps_rows = List.map mk_dep_patt_row eqns in let deps_columns = matrix_transpose deps_rows in List.map (List.exists ((=) true)) deps_columns let dependent_decl a = function | (na,None,t) -> dependent a t | (na,Some c,t) -> dependent a t || dependent a c let rec dep_in_tomatch n = function | (Pushed _ | Alias _ | NonDepAlias) :: l -> dep_in_tomatch n l | Abstract (_,d) :: l -> dependent_decl (mkRel n) d or dep_in_tomatch (n+1) l | [] -> false let dependencies_in_rhs nargs current tms eqns = match kind_of_term current with | Rel n when dep_in_tomatch n tms -> list_make nargs true | _ -> dependencies_in_pure_rhs nargs eqns (* Computing the matrix of dependencies *) (* [find_dependency_list tmi [d(i+1);...;dn]] computes in which declarations [d(i+1);...;dn] the term [tmi] is dependent in. [find_dependencies_signature (used1,...,usedn) ((tm1,d1),...,(tmn,dn))] returns [(deps1,...,depsn)] where [depsi] is a subset of n,..,i+1 denoting in which of the d(i+1)...dn, the term tmi is dependent. Dependencies are expressed by index, e.g. in dependency list [n-2;1], [1] points to [dn] and [n-2] to [d3] *) let rec find_dependency_list tmblock = function | [] -> [] | (used,tdeps,d)::rest -> let deps = find_dependency_list tmblock rest in if used && List.exists (fun x -> dependent_decl x d) tmblock then list_add_set (List.length rest + 1) (list_union deps tdeps) else deps let find_dependencies is_dep_or_cstr_in_rhs (tm,(_,tmtypleaves),d) nextlist = let deps = find_dependency_list (tm::tmtypleaves) nextlist in if is_dep_or_cstr_in_rhs || deps <> [] then ((true ,deps,d)::nextlist) else ((false,[] ,d)::nextlist) let find_dependencies_signature deps_in_rhs typs = let l = List.fold_right2 find_dependencies deps_in_rhs typs [] in List.map (fun (_,deps,_) -> deps) l (* Assume we had terms t1..tq to match in a context xp:Tp,...,x1:T1 |- and xn:Tn has just been regeneralized into x:Tn so that the terms to match are now to be considered in the context xp:Tp,...,x1:T1,x:Tn |-. [relocate_index_tomatch n 1 tomatch] updates t1..tq so that former references to xn1 are now references to x. Note that t1..tq are already adjusted to the context xp:Tp,...,x1:T1,x:Tn |-. [relocate_index_tomatch 1 n tomatch] will go the way back. *) let relocate_index_tomatch n1 n2 = let rec genrec depth = function | [] -> [] | Pushed ((c,tm),l,na) :: rest -> let c = relocate_index n1 n2 depth c in let tm = map_tomatch_type (relocate_index n1 n2 depth) tm in let l = List.map (relocate_rel n1 n2 depth) l in Pushed ((c,tm),l,na) :: genrec depth rest | Alias (na,c,d) :: rest -> (* [c] is out of relocation scope *) Alias (na,c,map_pair (relocate_index n1 n2 depth) d) :: genrec depth rest | NonDepAlias :: rest -> NonDepAlias :: genrec depth rest | Abstract (i,d) :: rest -> let i = relocate_rel n1 n2 depth i in Abstract (i,map_rel_declaration (relocate_index n1 n2 depth) d) :: genrec (depth+1) rest in genrec 0 (* [replace_tomatch n c tomatch] replaces [Rel n] by [c] in [tomatch] *) let rec replace_term n c k t = if isRel t && destRel t = n+k then lift k c else map_constr_with_binders succ (replace_term n c) k t let length_of_tomatch_type_sign na = function | NotInd _ -> if na<>Anonymous then 1 else 0 | IsInd (_,_,names) -> List.length names + if na<>Anonymous then 1 else 0 let replace_tomatch n c = let rec replrec depth = function | [] -> [] | Pushed ((b,tm),l,na) :: rest -> let b = replace_term n c depth b in let tm = map_tomatch_type (replace_term n c depth) tm in List.iter (fun i -> if i=n+depth then anomaly "replace_tomatch") l; Pushed ((b,tm),l,na) :: replrec depth rest | Alias (na,b,d) :: rest -> (* [b] is out of replacement scope *) Alias (na,b,map_pair (replace_term n c depth) d) :: replrec depth rest | NonDepAlias :: rest -> NonDepAlias :: replrec depth rest | Abstract (i,d) :: rest -> Abstract (i,map_rel_declaration (replace_term n c depth) d) :: replrec (depth+1) rest in replrec 0 (* [liftn_tomatch_stack]: a term to match has just been substituted by some constructor t = (ci x1...xn) and the terms x1 ... xn have been added to match; all pushed terms to match must be lifted by n (knowing that [Abstract] introduces a binder in the list of pushed terms to match). *) let rec liftn_tomatch_stack n depth = function | [] -> [] | Pushed ((c,tm),l,na)::rest -> let c = liftn n depth c in let tm = liftn_tomatch_type n depth tm in let l = List.map (fun i -> if i Alias (na,liftn n depth c,map_pair (liftn n depth) d) ::(liftn_tomatch_stack n depth rest) | NonDepAlias :: rest -> NonDepAlias :: liftn_tomatch_stack n depth rest | Abstract (i,d)::rest -> let i = if i x | x => x end] should be compiled into [match y with O => y | (S n) => match n with O => y | (S x) => x end end] and [match y with (S (S n)) => n | n => n end] into [match y with O => y | (S n0) => match n0 with O => y | (S n) => n end end] i.e. user names should be preserved and created names should not interfere with user names The exact names here are not important for typing (because they are put in pb.env and not in the rhs.rhs_env of branches. However, whether a name is Anonymous or not may have an effect on whether a generalization is done or not. *) let merge_name get_name obj = function | Anonymous -> get_name obj | na -> na let merge_names get_name = List.map2 (merge_name get_name) let get_names env sign eqns = let names1 = list_make (List.length sign) Anonymous in (* If any, we prefer names used in pats, from top to bottom *) let names2,aliasname = List.fold_right (fun (pats,pat_alias,eqn) (names,aliasname) -> (merge_names alias_of_pat pats names, merge_name (fun x -> x) pat_alias aliasname)) eqns (names1,Anonymous) in (* Otherwise, we take names from the parameters of the constructor but avoiding conflicts with user ids *) let allvars = List.fold_left (fun l (_,_,eqn) -> list_union l eqn.rhs.avoid_ids) [] eqns in let names3,_ = List.fold_left2 (fun (l,avoid) d na -> let na = merge_name (fun (na,_,t) -> Name (next_name_away (named_hd env t na) avoid)) d na in (na::l,(out_name na)::avoid)) ([],allvars) (List.rev sign) names2 in names3,aliasname (*****************************************************************) (* Recovering names for variables pushed to the rhs' environment *) (* We just factorized a match over a matrix of equations *) (* "C xi1 .. xin as xi" as a single match over "C y1 .. yn as y" *) (* We now replace the names y1 .. yn y by the actual names *) (* xi1 .. xin xi to be found in the i-th clause of the matrix *) let set_declaration_name x (_,c,t) = (x,c,t) let recover_initial_subpattern_names = List.map2 set_declaration_name let recover_alias_names get_name = List.map2 (fun x (_,c,t) ->(get_name x,c,t)) let push_rels_eqn sign eqn = {eqn with rhs = {eqn.rhs with rhs_env = push_rels sign eqn.rhs.rhs_env} } let push_rels_eqn_with_names sign eqn = let subpats = List.rev (list_firstn (List.length sign) eqn.patterns) in let subpatnames = List.map alias_of_pat subpats in let sign = recover_initial_subpattern_names subpatnames sign in push_rels_eqn sign eqn let push_generalized_decl_eqn env n (na,c,t) eqn = let na = match na with | Anonymous -> Anonymous | Name id -> pi1 (Environ.lookup_rel n eqn.rhs.rhs_env) in push_rels_eqn [(na,c,t)] eqn let drop_alias_eqn eqn = { eqn with alias_stack = List.tl eqn.alias_stack } let push_alias_eqn alias eqn = let aliasname = List.hd eqn.alias_stack in let eqn = drop_alias_eqn eqn in let alias = set_declaration_name aliasname alias in push_rels_eqn [alias] eqn (**********************************************************************) (* Functions to deal with elimination predicate *) (* Infering the predicate *) (* The problem to solve is the following: We match Gamma |- t : I(u01..u0q) against the following constructors: Gamma, x11...x1p1 |- C1(x11..x1p1) : I(u11..u1q) ... Gamma, xn1...xnpn |- Cn(xn1..xnp1) : I(un1..unq) Assume the types in the branches are the following Gamma, x11...x1p1 |- branch1 : T1 ... Gamma, xn1...xnpn |- branchn : Tn Assume the type of the global case expression is Gamma |- T The predicate has the form phi = [y1..yq][z:I(y1..yq)]psi and it has to satisfy the following n+1 equations: Gamma, x11...x1p1 |- (phi u11..u1q (C1 x11..x1p1)) = T1 ... Gamma, xn1...xnpn |- (phi un1..unq (Cn xn1..xnpn)) = Tn Gamma |- (phi u01..u0q t) = T Some hints: - Clearly, if xij occurs in Ti, then, a "match z with (Ci xi1..xipi) => ... end" or a "psi(yk)", with psi extracting xij from uik, should be inserted somewhere in Ti. - If T is undefined, an easy solution is to insert a "match z with (Ci xi1..xipi) => ... end" in front of each Ti - Otherwise, T1..Tn and T must be step by step unified, if some of them diverge, then try to replace the diverging subterm by one of y1..yq or z. - The main problem is what to do when an existential variables is encountered *) (* Propagation of user-provided predicate through compilation steps *) let rec map_predicate f k ccl = function | [] -> f k ccl | Pushed ((_,tm),_,na) :: rest -> let k' = length_of_tomatch_type_sign na tm in map_predicate f (k+k') ccl rest | (Alias _ | NonDepAlias) :: rest -> map_predicate f k ccl rest | Abstract _ :: rest -> map_predicate f (k+1) ccl rest let noccur_predicate_between n = map_predicate (noccur_between n) let liftn_predicate n = map_predicate (liftn n) let lift_predicate n = liftn_predicate n 1 let regeneralize_index_predicate n = map_predicate (relocate_index n 1) 0 let substnl_predicate sigma = map_predicate (substnl sigma) (* This is parallel bindings *) let subst_predicate (args,copt) ccl tms = let sigma = match copt with | None -> List.rev args | Some c -> c::(List.rev args) in substnl_predicate sigma 0 ccl tms let specialize_predicate_var (cur,typ,dep) tms ccl = let c = if dep<>Anonymous then Some cur else None in let l = match typ with | IsInd (_,IndType(_,realargs),names) -> if names<>[] then realargs else [] | NotInd _ -> [] in subst_predicate (l,c) ccl tms (*****************************************************************************) (* We have pred = [X:=realargs;x:=c]P typed in Gamma1, x:I(realargs), Gamma2 *) (* and we want to abstract P over y:t(x) typed in the same context to get *) (* *) (* pred' = [X:=realargs;x':=c](y':t(x'))P[y:=y'] *) (* *) (* We first need to lift t(x) s.t. it is typed in Gamma, X:=rargs, x' *) (* then we have to replace x by x' in t(x) and y by y' in P *) (*****************************************************************************) let generalize_predicate (names,na) ny d tms ccl = if na=Anonymous then anomaly "Undetected dependency"; let p = List.length names + 1 in let ccl = lift_predicate 1 ccl tms in regeneralize_index_predicate (ny+p+1) ccl tms (*****************************************************************************) (* We just matched over cur:ind(realargs) in the following matching problem *) (* *) (* env |- match cur tms return ccl with ... end *) (* *) (* and we want to build the predicate corresponding to the individual *) (* matching over cur *) (* *) (* pred = fun X:realargstyps x:ind(X)] PI tms.ccl *) (* *) (* where pred is computed by abstract_predicate and PI tms.ccl by *) (* extract_predicate *) (*****************************************************************************) let rec extract_predicate ccl = function | (Alias _ | NonDepAlias)::tms -> (* substitution already done in build_branch *) extract_predicate ccl tms | Abstract (i,d)::tms -> mkProd_wo_LetIn d (extract_predicate ccl tms) | Pushed ((cur,NotInd _),_,na)::tms -> let tms = if na<>Anonymous then lift_tomatch_stack 1 tms else tms in let pred = extract_predicate ccl tms in if na<>Anonymous then subst1 cur pred else pred | Pushed ((cur,IsInd (_,IndType(_,realargs),_)),_,na)::tms -> let realargs = List.rev realargs in let k = if na<>Anonymous then 1 else 0 in let tms = lift_tomatch_stack (List.length realargs + k) tms in let pred = extract_predicate ccl tms in substl (if na<>Anonymous then cur::realargs else realargs) pred | [] -> ccl let abstract_predicate env sigma indf cur realargs (names,na) tms ccl = let sign = make_arity_signature env true indf in (* n is the number of real args + 1 (+ possible let-ins in sign) *) let n = List.length sign in (* Before abstracting we generalize over cur and on those realargs *) (* that are rels, consistently with the specialization made in *) (* build_branch *) let tms = List.fold_right2 (fun par arg tomatch -> match kind_of_term par with | Rel i -> relocate_index_tomatch (i+n) (destRel arg) tomatch | _ -> tomatch) (realargs@[cur]) (extended_rel_list 0 sign) (lift_tomatch_stack n tms) in (* Pred is already dependent in the current term to match (if *) (* (na<>Anonymous) and its realargs; we just need to adjust it to *) (* full sign if dep in cur is not taken into account *) let ccl = if na <> Anonymous then ccl else lift_predicate 1 ccl tms in let pred = extract_predicate ccl tms in (* Build the predicate properly speaking *) let sign = List.map2 set_declaration_name (na::names) sign in it_mkLambda_or_LetIn_name env pred sign (* [expand_arg] is used by [specialize_predicate] if Yk denotes [Xk;xk] or [Xk], it replaces gamma, x1...xn, x1...xk Yk+1...Yn |- pred by gamma, x1...xn, x1...xk-1 [Xk;xk] Yk+1...Yn |- pred (if dep) or by gamma, x1...xn, x1...xk-1 [Xk] Yk+1...Yn |- pred (if not dep) *) let expand_arg tms (p,ccl) ((_,t),_,na) = let k = length_of_tomatch_type_sign na t in (p+k,liftn_predicate (k-1) (p+1) ccl tms) let adjust_impossible_cases pb pred tomatch submat = if submat = [] then match kind_of_term (whd_evar !(pb.evdref) pred) with | Evar (evk,_) when snd (evar_source evk !(pb.evdref)) = ImpossibleCase -> let default = (coq_unit_judge ()).uj_type in pb.evdref := Evd.define evk default !(pb.evdref); (* we add an "assert false" case *) let pats = List.map (fun _ -> PatVar (dummy_loc,Anonymous)) tomatch in let aliasnames = map_succeed (function Alias _ | NonDepAlias -> Anonymous | _ -> failwith"") tomatch in [ { patterns = pats; rhs = { rhs_env = pb.env; rhs_vars = []; avoid_ids = []; it = None }; alias_stack = Anonymous::aliasnames; eqn_loc = dummy_loc; used = ref false } ] | _ -> submat else submat (*****************************************************************************) (* Let pred = PI [X;x:I(X)]. PI tms. P be a typing predicate for the *) (* following pattern-matching problem: *) (* *) (* Gamma |- match Pushed(c:I(V)) as x in I(X), tms return pred with...end *) (* *) (* where the branch with constructor Ci:(x1:T1)...(xn:Tn)->I(realargsi) *) (* is considered. Assume each Ti is some Ii(argsi) with Ti:PI Ui. sort_i *) (* We let subst = X:=realargsi;x:=Ci(x1,...,xn) and replace pred by *) (* *) (* pred' = PI [X1:Ui;x1:I1(X1)]...[Xn:Un;xn:In(Xn)]. (PI tms. P)[subst] *) (* *) (* s.t. the following well-typed sub-pattern-matching problem is obtained *) (* *) (* Gamma,x'1..x'n |- *) (* match *) (* Pushed(x'1) as x1 in I(X1), *) (* .., *) (* Pushed(x'n) as xn in I(Xn), *) (* tms *) (* return pred' *) (* with .. end *) (* *) (*****************************************************************************) let specialize_predicate newtomatchs (names,depna) arsign cs tms ccl = (* Assume some gamma st: gamma |- PI [X,x:I(X)]. PI tms. ccl *) let nrealargs = List.length names in let k = nrealargs + (if depna<>Anonymous then 1 else 0) in (* We adjust pred st: gamma, x1..xn |- PI [X,x:I(X)]. PI tms. ccl' *) (* so that x can later be instantiated by Ci(x1..xn) *) (* and X by the realargs for Ci *) let n = cs.cs_nargs in let ccl' = liftn_predicate n (k+1) ccl tms in (* We prepare the substitution of X and x:I(X) *) let realargsi = if nrealargs <> 0 then adjust_subst_to_rel_context arsign (Array.to_list cs.cs_concl_realargs) else [] in let copti = if depna<>Anonymous then Some (build_dependent_constructor cs) else None in (* The substituends realargsi, copti are all defined in gamma, x1...xn *) (* We need _parallel_ bindings to get gamma, x1...xn |- PI tms. ccl'' *) (* Note: applying the substitution in tms is not important (is it sure?) *) let ccl'' = whd_betaiota Evd.empty (subst_predicate (realargsi, copti) ccl' tms) in (* We adjust ccl st: gamma, x'1..x'n, x1..xn, tms |- ccl'' *) let ccl''' = liftn_predicate n (n+1) ccl'' tms in (* We finally get gamma,x'1..x'n,x |- [X1;x1:I(X1)]..[Xn;xn:I(Xn)]pred'''*) snd (List.fold_left (expand_arg tms) (1,ccl''') newtomatchs) let find_predicate loc env evdref p current (IndType (indf,realargs)) dep tms = let pred = abstract_predicate env !evdref indf current realargs dep tms p in (pred, whd_betaiota !evdref (applist (pred, realargs@[current]))) (* Take into account that a type has been discovered to be inductive, leading to more dependencies in the predicate if the type has indices *) let adjust_predicate_from_tomatch tomatch (current,typ as ct) pb = let ((_,oldtyp),deps,na) = tomatch in match typ, oldtyp with | IsInd (_,_,names), NotInd _ -> let k = if na <> Anonymous then 2 else 1 in let n = List.length names in { pb with pred = liftn_predicate n k pb.pred pb.tomatch }, (ct,List.map (fun i -> if i >= k then i+n else i) deps,na) | _ -> pb, (ct,deps,na) (* Remove commutative cuts that turn out to be non-dependent after some evars have been instantiated *) let rec ungeneralize n ng body = match kind_of_term body with | Lambda (_,_,c) when ng = 0 -> subst1 (mkRel n) c | Lambda (na,t,c) -> (* We traverse an inner generalization *) mkLambda (na,t,ungeneralize (n+1) (ng-1) c) | LetIn (na,b,t,c) -> (* We traverse an alias *) mkLetIn (na,b,t,ungeneralize (n+1) ng c) | Case (ci,p,c,brs) -> (* We traverse a split *) let p = let sign,p = decompose_lam_assum p in let sign2,p = decompose_prod_n_assum ng p in let p = prod_applist p [mkRel (n+List.length sign+ng)] in it_mkLambda_or_LetIn (it_mkProd_or_LetIn p sign2) sign in mkCase (ci,p,c,array_map2 (fun q c -> let sign,b = decompose_lam_n_assum q c in it_mkLambda_or_LetIn (ungeneralize (n+q) ng b) sign) ci.ci_cstr_ndecls brs) | App (f,args) -> (* We traverse an inner generalization *) assert (isCase f); mkApp (ungeneralize n (ng+Array.length args) f,args) | _ -> assert false let ungeneralize_branch n k (sign,body) cs = (sign,ungeneralize (n+cs.cs_nargs) k body) let postprocess_dependencies evd tocheck brs tomatch pred deps cs = let rec aux k brs tomatch pred tocheck deps = match deps, tomatch with | [], _ -> brs,tomatch,pred,[] | n::deps, Abstract (i,d) :: tomatch -> let d = map_rel_declaration (nf_evar evd) d in if List.exists (fun c -> dependent_decl (lift k c) d) tocheck || pi2 d <> None then (* Dependency in the current term to match and its dependencies is real *) let brs,tomatch,pred,inst = aux (k+1) brs tomatch pred (mkRel n::tocheck) deps in let inst = if pi2 d = None then mkRel n::inst else inst in brs, Abstract (i,d) :: tomatch, pred, inst else (* Finally, no dependency remains, so, we can replace the generalized *) (* terms by its actual value in both the remaining terms to match and *) (* the bodies of the Case *) let pred = lift_predicate (-1) pred tomatch in let tomatch = relocate_index_tomatch 1 (n+1) tomatch in let tomatch = lift_tomatch_stack (-1) tomatch in let brs = array_map2 (ungeneralize_branch n k) brs cs in aux k brs tomatch pred tocheck deps | _ -> assert false in aux 0 brs tomatch pred tocheck deps (************************************************************************) (* Sorting equations by constructor *) let rec irrefutable env = function | PatVar (_,name) -> true | PatCstr (_,cstr,args,_) -> let ind = inductive_of_constructor cstr in let (_,mip) = Inductive.lookup_mind_specif env ind in let one_constr = Array.length mip.mind_user_lc = 1 in one_constr & List.for_all (irrefutable env) args let first_clause_irrefutable env = function | eqn::mat -> List.for_all (irrefutable env) eqn.patterns | _ -> false let group_equations pb ind current cstrs mat = let mat = if first_clause_irrefutable pb.env mat then [List.hd mat] else mat in let brs = Array.create (Array.length cstrs) [] in let only_default = ref true in let _ = List.fold_right (* To be sure it's from bottom to top *) (fun eqn () -> let rest = remove_current_pattern eqn in let pat = current_pattern eqn in match check_and_adjust_constructor pb.env ind cstrs pat with | PatVar (_,name) -> (* This is a default clause that we expand *) for i=1 to Array.length cstrs do let args = make_anonymous_patvars cstrs.(i-1).cs_nargs in brs.(i-1) <- (args, name, rest) :: brs.(i-1) done | PatCstr (loc,((_,i)),args,name) -> (* This is a regular clause *) only_default := false; brs.(i-1) <- (args, name, rest) :: brs.(i-1)) mat () in (brs,!only_default) (************************************************************************) (* Here starts the pattern-matching compilation algorithm *) (* Abstracting over dependent subterms to match *) let rec generalize_problem names pb = function | [] -> pb, [] | i::l -> let (na,b,t as d) = map_rel_declaration (lift i) (Environ.lookup_rel i pb.env) in let pb',deps = generalize_problem names pb l in if na = Anonymous & b <> None then pb',deps else let d = on_pi3 (whd_betaiota !(pb.evdref)) d in (* for better rendering *) let tomatch = lift_tomatch_stack 1 pb'.tomatch in let tomatch = relocate_index_tomatch (i+1) 1 tomatch in { pb' with tomatch = Abstract (i,d) :: tomatch; pred = generalize_predicate names i d pb'.tomatch pb'.pred }, i::deps (* No more patterns: typing the right-hand side of equations *) let build_leaf pb = let rhs = extract_rhs pb in let j = pb.typing_function (mk_tycon pb.pred) rhs.rhs_env pb.evdref rhs.it in j_nf_evar !(pb.evdref) j (* Build the sub-pattern-matching problem for a given branch "C x1..xn as x" *) let build_branch current realargs deps (realnames,curname) pb arsign eqns const_info = (* We remember that we descend through constructor C *) let history = push_history_pattern const_info.cs_nargs const_info.cs_cstr pb.history in (* We prepare the matching on x1:T1 .. xn:Tn using some heuristic to *) (* build the name x1..xn from the names present in the equations *) (* that had matched constructor C *) let cs_args = const_info.cs_args in let names,aliasname = get_names pb.env cs_args eqns in let typs = List.map2 (fun (_,c,t) na -> (na,c,t)) cs_args names in (* We build the matrix obtained by expanding the matching on *) (* "C x1..xn as x" followed by a residual matching on eqn into *) (* a matching on "x1 .. xn eqn" *) let submat = List.map (fun (tms,_,eqn) -> prepend_pattern tms eqn) eqns in (* We adjust the terms to match in the context they will be once the *) (* context [x1:T1,..,xn:Tn] will have been pushed on the current env *) let typs' = list_map_i (fun i d -> (mkRel i,map_rel_declaration (lift i) d)) 1 typs in let extenv = push_rels typs pb.env in let typs' = List.map (fun (c,d) -> (c,extract_inductive_data extenv !(pb.evdref) d,d)) typs' in (* We compute over which of x(i+1)..xn and x matching on xi will need a *) (* generalization *) let dep_sign = find_dependencies_signature (dependencies_in_rhs const_info.cs_nargs current pb.tomatch eqns) (List.rev typs') in (* The dependent term to subst in the types of the remaining UnPushed terms is relative to the current context enriched by topushs *) let ci = build_dependent_constructor const_info in (* Current context Gamma has the form Gamma1;cur:I(realargs);Gamma2 *) (* We go from Gamma |- PI tms. pred to *) (* Gamma;x1..xn;curalias:I(x1..xn) |- PI tms'. pred' *) (* where, in tms and pred, those realargs that are vars are *) (* replaced by the corresponding xi and cur replaced by curalias *) let cirealargs = Array.to_list const_info.cs_concl_realargs in (* Do the specialization for terms to match *) let tomatch = List.fold_right2 (fun par arg tomatch -> match kind_of_term par with | Rel i -> replace_tomatch (i+const_info.cs_nargs) arg tomatch | _ -> tomatch) (current::realargs) (ci::cirealargs) (lift_tomatch_stack const_info.cs_nargs pb.tomatch) in let pred_is_not_dep = noccur_predicate_between 1 (List.length realnames + 1) pb.pred tomatch in let typs' = List.map2 (fun (tm,(tmtyp,_),(na,_,_)) deps -> let na = match curname with | Name _ -> (if na <> Anonymous then na else curname) | Anonymous -> if deps = [] && pred_is_not_dep then Anonymous else force_name na in ((tm,tmtyp),deps,na)) typs' (List.rev dep_sign) in (* Do the specialization for the predicate *) let pred = specialize_predicate typs' (realnames,curname) arsign const_info tomatch pb.pred in let currents = List.map (fun x -> Pushed x) typs' in let alias = if aliasname = Anonymous then NonDepAlias else let cur_alias = lift const_info.cs_nargs current in let ind = appvect ( applist (mkInd (inductive_of_constructor const_info.cs_cstr), List.map (lift const_info.cs_nargs) const_info.cs_params), const_info.cs_concl_realargs) in Alias (aliasname,cur_alias,(ci,ind)) in let tomatch = List.rev_append (alias :: currents) tomatch in let submat = adjust_impossible_cases pb pred tomatch submat in if submat = [] then raise_pattern_matching_error (dummy_loc, pb.env, NonExhaustive (complete_history history)); typs, { pb with env = extenv; tomatch = tomatch; pred = pred; history = history; mat = List.map (push_rels_eqn_with_names typs) submat } (********************************************************************** INVARIANT: pb = { env, pred, tomatch, mat, ...} tomatch = list of Pushed (c:T), Abstract (na:T), Alias (c:T) or NonDepAlias all terms and types in Pushed, Abstract and Alias are relative to env enriched by the Abstract coming before *) (**********************************************************************) (* Main compiling descent *) let rec compile pb = match pb.tomatch with | Pushed cur :: rest -> match_current { pb with tomatch = rest } cur | Alias x :: rest -> compile_alias pb x rest | NonDepAlias :: rest -> compile_non_dep_alias pb rest | Abstract (i,d) :: rest -> compile_generalization pb i d rest | [] -> build_leaf pb (* Case splitting *) and match_current pb tomatch = let tm = adjust_tomatch_to_pattern pb tomatch in let pb,tomatch = adjust_predicate_from_tomatch tomatch tm pb in let ((current,typ),deps,dep) = tomatch in match typ with | NotInd (_,typ) -> check_all_variables typ pb.mat; shift_problem tomatch pb | IsInd (_,(IndType(indf,realargs) as indt),names) -> let mind,_ = dest_ind_family indf in let cstrs = get_constructors pb.env indf in let arsign, _ = get_arity pb.env indf in let eqns,onlydflt = group_equations pb mind current cstrs pb.mat in if (Array.length cstrs <> 0 or pb.mat <> []) & onlydflt then shift_problem tomatch pb else (* We generalize over terms depending on current term to match *) let pb,deps = generalize_problem (names,dep) pb deps in (* We compile branches *) let brvals = array_map2 (compile_branch current realargs (names,dep) deps pb arsign) eqns cstrs in (* We build the (elementary) case analysis *) let depstocheck = current::binding_vars_of_inductive typ in let brvals,tomatch,pred,inst = postprocess_dependencies !(pb.evdref) depstocheck brvals pb.tomatch pb.pred deps cstrs in let brvals = Array.map (fun (sign,body) -> it_mkLambda_or_LetIn body sign) brvals in let (pred,typ) = find_predicate pb.caseloc pb.env pb.evdref pred current indt (names,dep) tomatch in let ci = make_case_info pb.env mind pb.casestyle in let pred = nf_betaiota !(pb.evdref) pred in let case = mkCase (ci,pred,current,brvals) in Typing.check_allowed_sort pb.env !(pb.evdref) mind current pred; { uj_val = applist (case, inst); uj_type = prod_applist typ inst } (* Building the sub-problem when all patterns are variables *) and shift_problem ((current,t),_,na) pb = let ty = type_of_tomatch t in let tomatch = lift_tomatch_stack 1 pb.tomatch in let pred = specialize_predicate_var (current,t,na) pb.tomatch pb.pred in let pb = { pb with env = push_rel (na,Some current,ty) pb.env; tomatch = tomatch; pred = lift_predicate 1 pred tomatch; history = pop_history pb.history; mat = List.map (push_current_pattern (current,ty)) pb.mat } in let j = compile pb in { uj_val = subst1 current j.uj_val; uj_type = subst1 current j.uj_type } (* Building the sub-problem when all patterns are variables *) and compile_branch current realargs names deps pb arsign eqns cstr = let sign, pb = build_branch current realargs deps names pb arsign eqns cstr in sign, (compile pb).uj_val (* Abstract over a declaration before continuing splitting *) and compile_generalization pb i d rest = let pb = { pb with env = push_rel d pb.env; tomatch = rest; mat = List.map (push_generalized_decl_eqn pb.env i d) pb.mat } in let j = compile pb in { uj_val = mkLambda_or_LetIn d j.uj_val; uj_type = mkProd_wo_LetIn d j.uj_type } and compile_alias pb (na,orig,(expanded,expanded_typ)) rest = let f c t = let alias = (na,Some c,t) in let pb = { pb with env = push_rel alias pb.env; tomatch = lift_tomatch_stack 1 rest; pred = lift_predicate 1 pb.pred pb.tomatch; history = pop_history_pattern pb.history; mat = List.map (push_alias_eqn alias) pb.mat } in let j = compile pb in { uj_val = if isRel c || isVar c || count_occurrences (mkRel 1) j.uj_val <= 1 then subst1 c j.uj_val else mkLetIn (na,c,t,j.uj_val); uj_type = subst1 c j.uj_type } in if isRel orig or isVar orig then (* Try to compile first using non expanded alias *) try f orig (Retyping.get_type_of pb.env !(pb.evdref) orig) with e when precatchable_exception e -> (* Try then to compile using expanded alias *) f expanded expanded_typ else (* Try to compile first using expanded alias *) try f expanded expanded_typ with e when precatchable_exception e -> (* Try then to compile using non expanded alias *) f orig (Retyping.get_type_of pb.env !(pb.evdref) orig) (* Remember that a non-trivial pattern has been consumed *) and compile_non_dep_alias pb rest = let pb = { pb with tomatch = rest; history = pop_history_pattern pb.history; mat = List.map drop_alias_eqn pb.mat } in compile pb (* pour les alias des initiaux, enrichir les env de ce qu'il faut et substituer après par les initiaux *) (**************************************************************************) (* Preparation of the pattern-matching problem *) (* builds the matrix of equations testing that each eqn has n patterns * and linearizing the _ patterns. * Syntactic correctness has already been done in astterm *) let matx_of_eqns env tomatchl eqns = let build_eqn (loc,ids,lpat,rhs) = let initial_lpat,initial_rhs = lpat,rhs in let initial_rhs = rhs in let rhs = { rhs_env = env; rhs_vars = free_glob_vars initial_rhs; avoid_ids = ids@(ids_of_named_context (named_context env)); it = Some initial_rhs } in { patterns = initial_lpat; alias_stack = []; eqn_loc = loc; used = ref false; rhs = rhs } in List.map build_eqn eqns (***************** Building an inversion predicate ************************) (* Let "match t1 in I1 u11..u1n_1 ... tm in Im um1..umn_m with ... end : T" be a pattern-matching problem. We assume that each uij can be decomposed under the form pij(vij1..vijq_ij) where pij(aij1..aijq_ij) is a pattern depending on some variables aijk and the vijk are instances of these variables. We also assume that each ti has the form of a pattern qi(wi1..wiq_i) where qi(bi1..biq_i) is a pattern depending on some variables bik and the wik are instances of these variables (in practice, there is no reason that ti is already constructed and the qi will be degenerated). We then look for a type U(..a1jk..b1 .. ..amjk..bm) so that T = U(..v1jk..t1 .. ..vmjk..tm). This a higher-order matching problem with a priori different solutions (one of them if T itself!). We finally invert the uij and the ti and build the return clause phi(x11..x1n_1y1..xm1..xmn_mym) = match x11..x1n_1 y1 .. xm1..xmn_m ym with | p11..p1n_1 q1 .. pm1..pmn_m qm => U(..a1jk..b1 .. ..amjk..bm) | _ .. _ _ .. _ .. _ _ => True end so that "phi(u11..u1n_1t1..um1..umn_mtm) = T" (note that the clause returning True never happens and any inhabited type can be put instead). *) let adjust_to_extended_env_and_remove_deps env extenv subst t = let n = rel_context_length (rel_context env) in let n' = rel_context_length (rel_context extenv) in (* We first remove the bindings that are dependently typed (they are difficult to manage and it is not sure these are so useful in practice); Notes: - [subst] is made of pairs [(id,u)] where id is a name in [extenv] and [u] a term typed in [env]; - [subst0] is made of items [(p,u,(u,ty))] where [ty] is the type of [u] and both are adjusted to [extenv] while [p] is the index of [id] in [extenv] (after expansion of the aliases) *) let subst0 = map_succeed (fun (x,u) -> (* d1 ... dn dn+1 ... dn'-p+1 ... dn' *) (* \--env-/ (= x:ty) *) (* \--------------extenv------------/ *) let (p,_,_) = lookup_rel_id x (rel_context extenv) in let rec traverse_local_defs p = match pi2 (lookup_rel p extenv) with | Some c -> assert (isRel c); traverse_local_defs (p + destRel c) | None -> p in let p = traverse_local_defs p in let u = lift (n'-n) u in (p,u,expand_vars_in_term extenv u)) subst in let t0 = lift (n'-n) t in (subst0,t0) let push_binder d (k,env,subst) = (k+1,push_rel d env,List.map (fun (na,u,d) -> (na,lift 1 u,d)) subst) let rec list_assoc_in_triple x = function [] -> raise Not_found | (a,b,_)::l -> if compare a x = 0 then b else list_assoc_in_triple x l (* Let vijk and ti be a set of dependent terms and T a type, all * defined in some environment env. The vijk and ti are supposed to be * instances for variables aijk and bi. * * [abstract_tycon Gamma0 Sigma subst T Gamma] looks for U(..v1jk..t1 .. ..vmjk..tm) * defined in some extended context * "Gamma0, ..a1jk:V1jk.. b1:W1 .. ..amjk:Vmjk.. bm:Wm" * such that env |- T = U(..v1jk..t1 .. ..vmjk..tm). To not commit to * a particular solution, we replace each subterm t in T that unifies with * a subset u1..ul of the vijk and ti by a special evar * ?id(x=t;c1:=c1,..,cl=cl) defined in context Gamma0,x,c1,...,cl |- ?id * (where the c1..cl are the aijk and bi matching the u1..ul), and * similarly for each ti. *) let abstract_tycon loc env evdref subst _tycon extenv t = let sigma = !evdref in let t = nf_betaiota sigma t in (* it helps in some cases to remove K-redex *) let subst0,t0 = adjust_to_extended_env_and_remove_deps env extenv subst t in (* We traverse the type T of the original problem Xi looking for subterms that match the non-constructor part of the constraints (this part is in subst); these subterms are the "good" subterms and we replace them by an evar that may depend (and only depend) on the corresponding convertible subterms of the substitution *) let rec aux (k,env,subst as x) t = let t = whd_evar !evdref t in match kind_of_term t with | Rel n when pi2 (lookup_rel n env) <> None -> map_constr_with_full_binders push_binder aux x t | Evar ev -> let ty = get_type_of env sigma t in let inst = list_map_i (fun i _ -> try list_assoc_in_triple i subst0 with Not_found -> mkRel i) 1 (rel_context env) in let ev = e_new_evar evdref env ~src:(loc, CasesType) ty in evdref := add_conv_pb (Reduction.CONV,env,substl inst ev,t) !evdref; ev | _ -> let good = List.filter (fun (_,u,_) -> is_conv_leq env sigma t u) subst in if good <> [] then let u = pi3 (List.hd good) in (* u is in extenv *) let vl = List.map pi1 good in let ty = lift (-k) (aux x (get_type_of env !evdref t)) in let depvl = free_rels ty in let inst = list_map_i (fun i _ -> if List.mem i vl then u else mkRel i) 1 (rel_context extenv) in let rel_filter = List.map (fun a -> not (isRel a) || dependent a u || Intset.mem (destRel a) depvl) inst in let named_filter = List.map (fun (id,_,_) -> dependent (mkVar id) u) (named_context extenv) in let filter = rel_filter@named_filter in let candidates = u :: List.map mkRel vl in let ev = e_new_evar evdref extenv ~src:(loc, CasesType) ~filter ~candidates ty in lift k ev else map_constr_with_full_binders push_binder aux x t in aux (0,extenv,subst0) t0 let build_tycon loc env tycon_env subst tycon extenv evdref t = let t,tt = match t with | None -> (* This is the situation we are building a return predicate and we are in an impossible branch *) let n = rel_context_length (rel_context env) in let n' = rel_context_length (rel_context tycon_env) in let tt = new_Type () in let impossible_case_type = e_new_evar evdref env ~src:(loc,ImpossibleCase) tt in (lift (n'-n) impossible_case_type, tt) | Some t -> let t = abstract_tycon loc tycon_env evdref subst tycon extenv t in let evd,tt = Typing.e_type_of extenv !evdref t in evdref := evd; (t,tt) in { uj_val = t; uj_type = tt } (* For a multiple pattern-matching problem Xi on t1..tn with return * type T, [build_inversion_problem Gamma Sigma (t1..tn) T] builds a return * predicate for Xi that is itself made by an auxiliary * pattern-matching problem of which the first clause reveals the * pattern structure of the constraints on the inductive types of the t1..tn, * and the second clause is a wildcard clause for catching the * impossible cases. See above "Building an inversion predicate" for * further explanations *) let build_inversion_problem loc env sigma tms t = let make_patvar t (subst,avoid) = let id = next_name_away (named_hd env t Anonymous) avoid in PatVar (dummy_loc,Name id), ((id,t)::subst, id::avoid) in let rec reveal_pattern t (subst,avoid as acc) = match kind_of_term (whd_betadeltaiota env sigma t) with | Construct cstr -> PatCstr (dummy_loc,cstr,[],Anonymous), acc | App (f,v) when isConstruct f -> let cstr = destConstruct f in let n = constructor_nrealargs env cstr in let l = list_lastn n (Array.to_list v) in let l,acc = list_fold_map' reveal_pattern l acc in PatCstr (dummy_loc,cstr,l,Anonymous), acc | _ -> make_patvar t acc in let rec aux n env acc_sign tms acc = match tms with | [] -> [], acc_sign, acc | (t, IsInd (_,IndType(indf,realargs),_)) :: tms -> let patl,acc = list_fold_map' reveal_pattern realargs acc in let pat,acc = make_patvar t acc in let indf' = lift_inductive_family n indf in let sign = make_arity_signature env true indf' in let sign = recover_alias_names alias_of_pat (pat :: List.rev patl) sign in let p = List.length realargs in let env' = push_rels sign env in let patl',acc_sign,acc = aux (n+p+1) env' (sign@acc_sign) tms acc in patl@pat::patl',acc_sign,acc | (t, NotInd (bo,typ)) :: tms -> let pat,acc = make_patvar t acc in let d = (alias_of_pat pat,None,t) in let patl,acc_sign,acc = aux (n+1) (push_rel d env) (d::acc_sign) tms acc in pat::patl,acc_sign,acc in let avoid0 = ids_of_context env in (* [patl] is a list of patterns revealing the substructure of constructors present in the constraints on the type of the multiple terms t1..tn that are matched in the original problem; [subst] is the substitution of the free pattern variables in [patl] that returns the non-constructor parts of the constraints. Especially, if the ti has type I ui1..uin_i, and the patterns associated to ti are pi1..pin_i, then subst(pij) is uij; the substitution is useful to recognize which subterms of the whole type T of the original problem have to be abstracted *) let patl,sign,(subst,avoid) = aux 0 env [] tms ([],avoid0) in let n = List.length sign in let decls = list_map_i (fun i d -> (mkRel i,map_rel_declaration (lift i) d)) 1 sign in let pb_env = push_rels sign env in let decls = List.map (fun (c,d) -> (c,extract_inductive_data pb_env sigma d,d)) decls in let decls = List.rev decls in let dep_sign = find_dependencies_signature (list_make n true) decls in let sub_tms = List.map2 (fun deps (tm,(tmtyp,_),(na,b,t)) -> let na = if deps = [] then Anonymous else force_name na in Pushed ((tm,tmtyp),deps,na)) dep_sign decls in let subst = List.map (fun (na,t) -> (na,lift n t)) subst in (* [eqn1] is the first clause of the auxiliary pattern-matching that serves as skeleton for the return type: [patl] is the substructure of constructors extracted from the list of constraints on the inductive types of the multiple terms matched in the original pattern-matching problem Xi *) let eqn1 = { patterns = patl; alias_stack = []; eqn_loc = dummy_loc; used = ref false; rhs = { rhs_env = pb_env; (* we assume all vars are used; in practice we discard dependent vars so that the field rhs_vars is normally not used *) rhs_vars = List.map fst subst; avoid_ids = avoid; it = Some (lift n t) } } in (* [eqn2] is the default clause of the auxiliary pattern-matching: it will catch the clauses of the original pattern-matching problem Xi whose type constraints are incompatible with the constraints on the inductive types of the multiple terms matched in Xi *) let eqn2 = { patterns = List.map (fun _ -> PatVar (dummy_loc,Anonymous)) patl; alias_stack = []; eqn_loc = dummy_loc; used = ref false; rhs = { rhs_env = pb_env; rhs_vars = []; avoid_ids = avoid0; it = None } } in (* [pb] is the auxiliary pattern-matching serving as skeleton for the return type of the original problem Xi *) let evdref = ref sigma in let pb = { env = pb_env; evdref = evdref; pred = new_Type(); tomatch = sub_tms; history = start_history n; mat = [eqn1;eqn2]; caseloc = loc; casestyle = RegularStyle; typing_function = build_tycon loc env pb_env subst} in let pred = (compile pb).uj_val in (!evdref,pred) (* Here, [pred] is assumed to be in the context built from all *) (* realargs and terms to match *) let build_initial_predicate arsign pred = let rec buildrec n pred tmnames = function | [] -> List.rev tmnames,pred | ((na,c,t)::realdecls)::lnames -> let n' = n + List.length realdecls in buildrec (n'+1) pred (force_name na::tmnames) lnames | _ -> assert false in buildrec 0 pred [] (List.rev arsign) let extract_arity_signature env0 tomatchl tmsign = let get_one_sign n tm (na,t) = match tm with | NotInd (bo,typ) -> (match t with | None -> [na,Option.map (lift n) bo,lift n typ] | Some (loc,_,_,_) -> user_err_loc (loc,"", str"Unexpected type annotation for a term of non inductive type.")) | IsInd (term,IndType(indf,realargs),_) -> let indf' = lift_inductive_family n indf in let (ind,_) = dest_ind_family indf' in let nparams_ctxt,nrealargs_ctxt = inductive_nargs env0 ind in let arsign = fst (get_arity env0 indf') in let realnal = match t with | Some (loc,ind',nparams,realnal) -> if ind <> ind' then user_err_loc (loc,"",str "Wrong inductive type."); if nparams_ctxt <> nparams or nrealargs_ctxt <> List.length realnal then anomaly "Ill-formed 'in' clause in cases"; List.rev realnal | None -> list_make nrealargs_ctxt Anonymous in (na,None,build_dependent_inductive env0 indf') ::(List.map2 (fun x (_,c,t) ->(x,c,t)) realnal arsign) in let rec buildrec n = function | [],[] -> [] | (_,tm)::ltm, (_,x)::tmsign -> let l = get_one_sign n tm x in l :: buildrec (n + List.length l) (ltm,tmsign) | _ -> assert false in List.rev (buildrec 0 (tomatchl,tmsign)) let inh_conv_coerce_to_tycon loc env evdref j tycon = match tycon with | Some p -> let (evd',j) = Coercion.inh_conv_coerce_to loc env !evdref j p in evdref := evd'; j | None -> j (* We put the tycon inside the arity signature, possibly discovering dependencies. *) let prepare_predicate_from_arsign_tycon loc tomatchs arsign c = let nar = List.fold_left (fun n sign -> List.length sign + n) 0 arsign in let subst, len = List.fold_left2 (fun (subst, len) (tm, tmtype) sign -> let signlen = List.length sign in match kind_of_term tm with | Rel n when dependent tm c && signlen = 1 (* The term to match is not of a dependent type itself *) -> ((n, len) :: subst, len - signlen) | Rel n when signlen > 1 (* The term is of a dependent type, maybe some variable in its type appears in the tycon. *) -> (match tmtype with NotInd _ -> (subst, len - signlen) | IsInd (_, IndType(indf,realargs),_) -> let subst = if dependent tm c && List.for_all isRel realargs then (n, 1) :: subst else subst in List.fold_left (fun (subst, len) arg -> match kind_of_term arg with | Rel n when dependent arg c -> ((n, len) :: subst, pred len) | _ -> (subst, pred len)) (subst, len) realargs) | _ -> (subst, len - signlen)) ([], nar) tomatchs arsign in let rec predicate lift c = match kind_of_term c with | Rel n when n > lift -> (try (* Make the predicate dependent on the matched variable *) let idx = List.assoc (n - lift) subst in mkRel (idx + lift) with Not_found -> (* A variable that is not matched, lift over the arsign. *) mkRel (n + nar)) | _ -> map_constr_with_binders succ predicate lift c in predicate 0 c (* Builds the predicate. If the predicate is dependent, its context is * made of 1+nrealargs assumptions for each matched term in an inductive * type and 1 assumption for each term not _syntactically_ in an * inductive type. * Each matched terms are independently considered dependent or not. * A type constraint but no annotation case: we try to specialize the * tycon to make the predicate if it is not closed. *) let prepare_predicate loc typing_fun sigma env tomatchs arsign tycon pred = let preds = match pred, tycon with (* No type annotation *) | None, Some (None, t) when not (noccur_with_meta 0 max_int t) -> (* If the tycon is not closed w.r.t real variables, we try *) (* two different strategies *) (* First strategy: we abstract the tycon wrt to the dependencies *) let pred1 = prepare_predicate_from_arsign_tycon loc tomatchs arsign t in (* Second strategy: we build an "inversion" predicate *) let sigma2,pred2 = build_inversion_problem loc env sigma tomatchs t in [sigma, pred1; sigma2, pred2] | None, _ -> (* No dependent type constraint, or no constraints at all: *) (* we use two strategies *) let sigma,t = match tycon with | Some (None, t) -> sigma,t | _ -> new_type_evar sigma env ~src:(loc, CasesType) in (* First strategy: we build an "inversion" predicate *) let sigma1,pred1 = build_inversion_problem loc env sigma tomatchs t in (* Second strategy: we directly use the evar as a non dependent pred *) let pred2 = lift (List.length (List.flatten arsign)) t in [sigma1, pred1; sigma, pred2] (* Some type annotation *) | Some rtntyp, _ -> (* We extract the signature of the arity *) let envar = List.fold_right push_rels arsign env in let sigma, newt = new_sort_variable sigma in let evdref = ref sigma in let predcclj = typing_fun (mk_tycon (mkSort newt)) envar evdref rtntyp in let sigma = Option.cata (fun tycon -> let na = Name (id_of_string "x") in let tms = List.map (fun tm -> Pushed(tm,[],na)) tomatchs in let predinst = extract_predicate predcclj.uj_val tms in Coercion.inh_conv_coerces_to loc env !evdref predinst tycon) !evdref tycon in let predccl = (j_nf_evar sigma predcclj).uj_val in [sigma, predccl] in List.map (fun (sigma,pred) -> let (nal,pred) = build_initial_predicate arsign pred in sigma,nal,pred) preds (**************************************************************************) (* Main entry of the matching compilation *) let compile_cases loc style (typing_fun, evdref) tycon env (predopt, tomatchl, eqns) = (* We build the matrix of patterns and right-hand side *) let matx = matx_of_eqns env tomatchl eqns in (* We build the vector of terms to match consistently with the *) (* constructors found in patterns *) let tomatchs = coerce_to_indtype typing_fun evdref env matx tomatchl in (* If an elimination predicate is provided, we check it is compatible with the type of arguments to match; if none is provided, we build alternative possible predicates *) let arsign = extract_arity_signature env tomatchs tomatchl in let preds = prepare_predicate loc typing_fun !evdref env tomatchs arsign tycon predopt in let compile_for_one_predicate (sigma,nal,pred) = (* We push the initial terms to match and push their alias to rhs' envs *) (* names of aliases will be recovered from patterns (hence Anonymous *) (* here) *) let out_tmt na = function NotInd (c,t) -> (na,c,t) | IsInd (typ,_,_) -> (na,None,typ) in let typs = List.map2 (fun na (tm,tmt) -> (tm,out_tmt na tmt)) nal tomatchs in let typs = List.map (fun (c,d) -> (c,extract_inductive_data env sigma d,d)) typs in let dep_sign = find_dependencies_signature (list_make (List.length typs) true) typs in let typs' = list_map3 (fun (tm,tmt) deps na -> let deps = if not (isRel tm) then [] else deps in ((tm,tmt),deps,na)) tomatchs dep_sign nal in let initial_pushed = List.map (fun x -> Pushed x) typs' in (* A typing function that provides with a canonical term for absurd cases*) let typing_fun tycon env evdref = function | Some t -> typing_fun tycon env evdref t | None -> coq_unit_judge () in let myevdref = ref sigma in let pb = { env = env; evdref = myevdref; pred = pred; tomatch = initial_pushed; history = start_history (List.length initial_pushed); mat = matx; caseloc = loc; casestyle = style; typing_function = typing_fun } in let j = compile pb in evdref := !myevdref; j in (* Return the term compiled with the first possible elimination *) (* predicate for which the compilation succeeds *) let j = list_try_compile compile_for_one_predicate preds in (* We check for unused patterns *) List.iter (check_unused_pattern env) matx; (* We coerce to the tycon (if an elim predicate was provided) *) inh_conv_coerce_to_tycon loc env evdref j tycon end coq-8.4pl2/pretyping/arguments_renaming.mli0000640000175000001440000000174312010532755020245 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* global_reference -> name list list -> unit (** [Not_found] is raised is no names are defined for [r] *) val arguments_names : global_reference -> name list list val rename_type_of_constant : env -> constant -> types val rename_type_of_inductive : env -> inductive -> types val rename_type_of_constructor : env -> constructor -> types val rename_typing : env -> constr -> unsafe_judgment coq-8.4pl2/pretyping/typeclasses.ml0000640000175000001440000003734512121620060016543 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* assert false) let register_add_instance_hint = (:=) add_instance_hint_ref let add_instance_hint id = !add_instance_hint_ref id let remove_instance_hint_ref = ref (fun id -> assert false) let register_remove_instance_hint = (:=) remove_instance_hint_ref let remove_instance_hint id = !remove_instance_hint_ref id let set_typeclass_transparency_ref = ref (fun id local c -> assert false) let register_set_typeclass_transparency = (:=) set_typeclass_transparency_ref let set_typeclass_transparency gr local c = !set_typeclass_transparency_ref gr local c let classes_transparent_state_ref = ref (fun () -> assert false) let register_classes_transparent_state = (:=) classes_transparent_state_ref let classes_transparent_state () = !classes_transparent_state_ref () let solve_instanciation_problem = ref (fun _ _ _ -> assert false) let resolve_one_typeclass env evm t = !solve_instanciation_problem env evm t type rels = constr list type direction = Forward | Backward (* This module defines type-classes *) type typeclass = { (* The class implementation *) cl_impl : global_reference; (* Context in which the definitions are typed. Includes both typeclass parameters and superclasses. *) cl_context : (global_reference * bool) option list * rel_context; (* Context of definitions and properties on defs, will not be shared *) cl_props : rel_context; (* The method implementaions as projections. *) cl_projs : (name * (direction * int option) option * constant option) list; } module Gmap = Fmap.Make(RefOrdered) type typeclasses = typeclass Gmap.t type instance = { is_class: global_reference; is_pri: int option; (* Sections where the instance should be redeclared, -1 for discard, 0 for none, mutable to avoid redeclarations when multiple rebuild_object happen. *) is_global: int; is_impl: global_reference; } type instances = (instance Gmap.t) Gmap.t let instance_impl is = is.is_impl let new_instance cl pri glob impl = let global = if glob then Lib.sections_depth () else -1 in { is_class = cl.cl_impl; is_pri = pri ; is_global = global ; is_impl = impl } (* * states management *) let classes : typeclasses ref = ref Gmap.empty let instances : instances ref = ref Gmap.empty let freeze () = !classes, !instances let unfreeze (cl,is) = classes:=cl; instances:=is let init () = classes:= Gmap.empty; instances:= Gmap.empty let _ = Summary.declare_summary "classes_and_instances" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let class_info c = try Gmap.find c !classes with Not_found -> not_a_class (Global.env()) (constr_of_global c) let global_class_of_constr env c = try class_info (global_of_constr c) with Not_found -> not_a_class env c let dest_class_app env c = let cl, args = decompose_app c in global_class_of_constr env cl, args let dest_class_arity env c = let rels, c = Term.decompose_prod_assum c in rels, dest_class_app env c let class_of_constr c = try Some (dest_class_arity (Global.env ()) c) with e when Errors.noncritical e -> None let rec is_class_type evd c = match kind_of_term c with | Prod (_, _, t) -> is_class_type evd t | Evar (e, _) when is_defined evd e -> is_class_type evd (Evarutil.nf_evar evd c) | _ -> class_of_constr c <> None let is_class_evar evd evi = is_class_type evd evi.Evd.evar_concl (* * classes persistent object *) let load_class (_, cl) = classes := Gmap.add cl.cl_impl cl !classes let cache_class = load_class let subst_class (subst,cl) = let do_subst_con c = fst (Mod_subst.subst_con subst c) and do_subst c = Mod_subst.subst_mps subst c and do_subst_gr gr = fst (subst_global subst gr) in let do_subst_ctx ctx = list_smartmap (fun (na, b, t) -> (na, Option.smartmap do_subst b, do_subst t)) ctx in let do_subst_context (grs,ctx) = list_smartmap (Option.smartmap (fun (gr,b) -> do_subst_gr gr, b)) grs, do_subst_ctx ctx in let do_subst_projs projs = list_smartmap (fun (x, y, z) -> (x, y, Option.smartmap do_subst_con z)) projs in { cl_impl = do_subst_gr cl.cl_impl; cl_context = do_subst_context cl.cl_context; cl_props = do_subst_ctx cl.cl_props; cl_projs = do_subst_projs cl.cl_projs; } let discharge_class (_,cl) = let repl = Lib.replacement_context () in let rel_of_variable_context ctx = List.fold_right ( fun (n,_,b,t) (ctx', subst) -> let decl = (Name n, Option.map (substn_vars 1 subst) b, substn_vars 1 subst t) in (decl :: ctx', n :: subst) ) ctx ([], []) in let discharge_rel_context subst n rel = let rel = map_rel_context (Cooking.expmod_constr repl) rel in let ctx, _ = List.fold_right (fun (id, b, t) (ctx, k) -> (id, Option.smartmap (substn_vars k subst) b, substn_vars k subst t) :: ctx, succ k) rel ([], n) in ctx in let abs_context cl = match cl.cl_impl with | VarRef _ | ConstructRef _ -> assert false | ConstRef cst -> Lib.section_segment_of_constant cst | IndRef (ind,_) -> Lib.section_segment_of_mutual_inductive ind in let discharge_context ctx' subst (grs, ctx) = let grs' = let newgrs = List.map (fun (_, _, t) -> match class_of_constr t with | None -> None | Some (_, (tc, _)) -> Some (tc.cl_impl, true)) ctx' in list_smartmap (Option.smartmap (fun (gr, b) -> Lib.discharge_global gr, b)) grs @ newgrs in grs', discharge_rel_context subst 1 ctx @ ctx' in let cl_impl' = Lib.discharge_global cl.cl_impl in if cl_impl' == cl.cl_impl then cl else let ctx = abs_context cl in let ctx, subst = rel_of_variable_context ctx in let context = discharge_context ctx subst cl.cl_context in let props = discharge_rel_context subst (succ (List.length (fst cl.cl_context))) cl.cl_props in { cl_impl = cl_impl'; cl_context = context; cl_props = props; cl_projs = list_smartmap (fun (x, y, z) -> x, y, Option.smartmap Lib.discharge_con z) cl.cl_projs } let rebuild_class cl = try let cst = Tacred.evaluable_of_global_reference (Global.env ()) cl.cl_impl in set_typeclass_transparency cst false false; cl with e when Errors.noncritical e -> cl let class_input : typeclass -> obj = declare_object { (default_object "type classes state") with cache_function = cache_class; load_function = (fun _ -> load_class); open_function = (fun _ -> load_class); classify_function = (fun x -> Substitute x); discharge_function = (fun a -> Some (discharge_class a)); rebuild_function = rebuild_class; subst_function = subst_class } let add_class cl = Lib.add_anonymous_leaf (class_input cl) (** Build the subinstances hints. *) let check_instance env sigma c = try let (evd, c) = resolve_one_typeclass env sigma (Retyping.get_type_of env sigma c) in Evd.is_empty (Evd.undefined_evars evd) with e when Errors.noncritical e -> false let build_subclasses ~check env sigma glob pri = let rec aux pri c = let ty = Evarutil.nf_evar sigma (Retyping.get_type_of env sigma c) in match class_of_constr ty with | None -> [] | Some (rels, (tc, args)) -> let instapp = Reductionops.whd_beta sigma (appvectc c (Termops.extended_rel_vect 0 rels)) in let projargs = Array.of_list (args @ [instapp]) in let projs = list_map_filter (fun (n, b, proj) -> match b with | None -> None | Some (Backward, _) -> None | Some (Forward, pri') -> let proj = Option.get proj in let body = it_mkLambda_or_LetIn (mkApp (mkConst proj, projargs)) rels in if check && check_instance env sigma body then None else let pri = match pri, pri' with | Some p, Some p' -> Some (p + p') | Some p, None -> Some (p + 1) | _, _ -> None in Some (ConstRef proj, pri, body)) tc.cl_projs in let declare_proj hints (cref, pri, body) = let rest = aux pri body in hints @ (pri, body) :: rest in List.fold_left declare_proj [] projs in aux pri (constr_of_global glob) (* * instances persistent object *) type instance_action = | AddInstance | RemoveInstance let load_instance inst = let insts = try Gmap.find inst.is_class !instances with Not_found -> Gmap.empty in let insts = Gmap.add inst.is_impl inst insts in instances := Gmap.add inst.is_class insts !instances let remove_instance inst = let insts = try Gmap.find inst.is_class !instances with Not_found -> assert false in let insts = Gmap.remove inst.is_impl insts in instances := Gmap.add inst.is_class insts !instances let cache_instance (_, (action, i)) = match action with | AddInstance -> load_instance i | RemoveInstance -> remove_instance i let subst_instance (subst, (action, inst)) = action, { inst with is_class = fst (subst_global subst inst.is_class); is_impl = fst (subst_global subst inst.is_impl) } let discharge_instance (_, (action, inst)) = if inst.is_global <= 0 then None else Some (action, { inst with is_global = pred inst.is_global; is_class = Lib.discharge_global inst.is_class; is_impl = Lib.discharge_global inst.is_impl }) let is_local i = i.is_global = -1 let add_instance check inst = add_instance_hint (constr_of_global inst.is_impl) (is_local inst) inst.is_pri; List.iter (fun (pri, c) -> add_instance_hint c (is_local inst) pri) (build_subclasses ~check:(check && not (isVarRef inst.is_impl)) (Global.env ()) Evd.empty inst.is_impl inst.is_pri) let rebuild_instance (action, inst) = if action = AddInstance then add_instance true inst; (action, inst) let classify_instance (action, inst) = if is_local inst then Dispose else Substitute (action, inst) let load_instance (_, (action, inst) as ai) = cache_instance ai; if action = AddInstance then add_instance_hint (constr_of_global inst.is_impl) (is_local inst) inst.is_pri let instance_input : instance_action * instance -> obj = declare_object { (default_object "type classes instances state") with cache_function = cache_instance; load_function = (fun _ x -> cache_instance x); open_function = (fun _ x -> cache_instance x); classify_function = classify_instance; discharge_function = discharge_instance; rebuild_function = rebuild_instance; subst_function = subst_instance } let add_instance i = Lib.add_anonymous_leaf (instance_input (AddInstance, i)); add_instance true i let remove_instance i = Lib.add_anonymous_leaf (instance_input (RemoveInstance, i)); remove_instance_hint i.is_impl let declare_instance pri local glob = let c = constr_of_global glob in let ty = Retyping.get_type_of (Global.env ()) Evd.empty c in match class_of_constr ty with | Some (rels, (tc, args) as _cl) -> add_instance (new_instance tc pri (not local) glob) (* let path, hints = build_subclasses (not local) (Global.env ()) Evd.empty glob in *) (* let entries = List.map (fun (path, pri, c) -> (pri, local, path, c)) hints in *) (* Auto.add_hints local [typeclasses_db] (Auto.HintsResolveEntry entries); *) (* Auto.add_hints local [typeclasses_db] *) (* (Auto.HintsCutEntry (PathSeq (PathStar (PathAtom PathAny), path))) *) | None -> () let add_class cl = add_class cl; List.iter (fun (n, inst, body) -> match inst with | Some (Backward, pri) -> declare_instance pri false (ConstRef (Option.get body)) | _ -> ()) cl.cl_projs open Declarations let add_constant_class cst = let ty = Typeops.type_of_constant (Global.env ()) cst in let ctx, arity = decompose_prod_assum ty in let tc = { cl_impl = ConstRef cst; cl_context = (List.map (const None) ctx, ctx); cl_props = [(Anonymous, None, arity)]; cl_projs = [] } in add_class tc; set_typeclass_transparency (EvalConstRef cst) false false let add_inductive_class ind = let mind, oneind = Global.lookup_inductive ind in let k = let ctx = oneind.mind_arity_ctxt in let ty = Inductive.type_of_inductive_knowing_parameters (push_rel_context ctx (Global.env ())) oneind (Termops.extended_rel_vect 0 ctx) in { cl_impl = IndRef ind; cl_context = List.map (const None) ctx, ctx; cl_props = [Anonymous, None, ty]; cl_projs = [] } in add_class k (* * interface functions *) let instance_constructor cl args = let lenpars = List.length (List.filter (fun (na, b, t) -> b = None) (snd cl.cl_context)) in let pars = fst (list_chop lenpars args) in match cl.cl_impl with | IndRef ind -> Some (applistc (mkConstruct (ind, 1)) args), applistc (mkInd ind) pars | ConstRef cst -> let term = if args = [] then None else Some (list_last args) in term, applistc (mkConst cst) pars | _ -> assert false let typeclasses () = Gmap.fold (fun _ l c -> l :: c) !classes [] let cmap_elements c = Gmap.fold (fun k v acc -> v :: acc) c [] let instances_of c = try cmap_elements (Gmap.find c.cl_impl !instances) with Not_found -> [] let all_instances () = Gmap.fold (fun k v acc -> Gmap.fold (fun k v acc -> v :: acc) v acc) !instances [] let instances r = let cl = class_info r in instances_of cl let is_class gr = Gmap.fold (fun k v acc -> acc || v.cl_impl = gr) !classes false let is_instance = function | ConstRef c -> (match Decls.constant_kind c with | IsDefinition Instance -> true | _ -> false) | VarRef v -> (match Decls.variable_kind v with | IsDefinition Instance -> true | _ -> false) | ConstructRef (ind,_) -> is_class (IndRef ind) | _ -> false (* To embed a boolean for resolvability status. This is essentially a hack to mark which evars correspond to goals and do not need to be resolved when we have nested [resolve_all_evars] calls (e.g. when doing apply in an External hint in typeclass_instances). Would be solved by having real evars-as-goals. Nota: we will only check the resolvability status of undefined evars. *) let resolvable = Store.field () open Store.Field let is_resolvable evi = assert (evi.evar_body = Evar_empty); Option.default true (resolvable.get evi.evar_extra) let mark_resolvability_undef b evi = let t = resolvable.set b evi.evar_extra in { evi with evar_extra = t } let mark_resolvability b evi = assert (evi.evar_body = Evar_empty); mark_resolvability_undef b evi let mark_unresolvable evi = mark_resolvability false evi let mark_resolvable evi = mark_resolvability true evi let mark_resolvability b sigma = Evd.fold_undefined (fun ev evi evs -> Evd.add evs ev (mark_resolvability_undef b evi)) sigma (Evd.defined_evars sigma) let mark_unresolvables sigma = mark_resolvability false sigma let has_typeclasses evd = Evd.fold_undefined (fun ev evi has -> has || (is_resolvable evi && is_class_evar evd evi)) evd false let solve_instanciations_problem = ref (fun _ _ _ _ _ -> assert false) type evar_filter = hole_kind -> bool let no_goals = function GoalEvar -> false | _ -> true let all_evars _ = true let resolve_typeclasses ?(filter=no_goals) ?(split=true) ?(fail=true) env evd = if not (has_typeclasses evd) then evd else !solve_instanciations_problem env evd filter split fail coq-8.4pl2/pretyping/recordops.ml0000640000175000001440000003116012121620060016171 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Cmap.add proj struc)) projs !projection_table let cache_structure o = load_structure 1 o let subst_structure (subst,((kn,i),id,kl,projs as obj)) = let kn' = subst_ind subst kn in let projs' = (* invariant: struc.s_PROJ is an evaluable reference. Thus we can take *) (* the first component of subst_con. *) list_smartmap (Option.smartmap (fun kn -> fst (subst_con subst kn))) projs in let id' = fst (subst_constructor subst id) in if projs' == projs && kn' == kn && id' == id then obj else ((kn',i),id',kl,projs') let discharge_constructor (ind, n) = (Lib.discharge_inductive ind, n) let discharge_structure (_,(ind,id,kl,projs)) = Some (Lib.discharge_inductive ind, discharge_constructor id, kl, List.map (Option.map Lib.discharge_con) projs) let inStruc : struc_tuple -> obj = declare_object {(default_object "STRUCTURE") with cache_function = cache_structure; load_function = load_structure; subst_function = subst_structure; classify_function = (fun x -> Substitute x); discharge_function = discharge_structure } let declare_structure (s,c,kl,pl) = Lib.add_anonymous_leaf (inStruc (s,c,kl,pl)) let lookup_structure indsp = Indmap.find indsp !structure_table let lookup_projections indsp = (lookup_structure indsp).s_PROJ let find_projection_nparams = function | ConstRef cst -> (Cmap.find cst !projection_table).s_EXPECTEDPARAM | _ -> raise Not_found let find_projection = function | ConstRef cst -> Cmap.find cst !projection_table | _ -> raise Not_found (* Management of a field store : each field + argument of the inferred * records are stored in a discrimination tree *) let subst_id s (gr,ev,evm) = (fst(subst_global s gr),ev,Evd.subst_evar_map s evm) module MethodsDnet : Term_dnet.S with type ident = global_reference * Evd.evar * Evd.evar_map = Term_dnet.Make (struct type t = global_reference * Evd.evar * Evd.evar_map let compare = Pervasives.compare let subst = subst_id let constr_of (_,ev,evm) = Evd.evar_concl (Evd.find evm ev) end) (struct let reduce c = Reductionops.head_unfold_under_prod Names.full_transparent_state (Global.env()) Evd.empty c let direction = true end) let meth_dnet = ref MethodsDnet.empty open Summary let _ = declare_summary "record-methods-state" { freeze_function = (fun () -> !meth_dnet); unfreeze_function = (fun m -> meth_dnet := m); init_function = (fun () -> meth_dnet := MethodsDnet.empty) } open Libobject let load_method (_,(ty,id)) = meth_dnet := MethodsDnet.add ty id !meth_dnet let in_method : constr * MethodsDnet.ident -> obj = declare_object { (default_object "RECMETHODS") with load_function = (fun _ -> load_method); cache_function = load_method; subst_function = (fun (s,(ty,id)) -> Mod_subst.subst_mps s ty,subst_id s id); classify_function = (fun x -> Substitute x) } let methods_matching c = MethodsDnet.search_pattern !meth_dnet c let declare_method cons ev sign = Lib.add_anonymous_leaf (in_method ((Evd.evar_concl (Evd.find sign ev)),(cons,ev,sign))) (************************************************************************) (*s A canonical structure declares "canonical" conversion hints between *) (* the effective components of a structure and the projections of the *) (* structure *) (* Table des definitions "object" : pour chaque object c, c := [x1:B1]...[xk:Bk](Build_R a1...am t1...t_n) If ti has the form (ci ui1...uir) where ci is a global reference and if the corresponding projection Li of the structure R is defined, one declare a "conversion" between ci and Li x1:B1..xk:Bk |- (Li a1..am (c x1..xk)) =_conv (ci ui1...uir) that maps the pair (Li,ci) to the following data o_DEF = c o_TABS = B1...Bk o_PARAMS = a1...am o_NARAMS = m o_TCOMP = ui1...uir *) type obj_typ = { o_DEF : constr; o_INJ : int; (* position of trivial argument (negative= none) *) o_TABS : constr list; (* ordered *) o_TPARAMS : constr list; (* ordered *) o_NPARAMS : int; o_TCOMPS : constr list } (* ordered *) type cs_pattern = Const_cs of global_reference | Prod_cs | Sort_cs of sorts_family | Default_cs let object_table = ref (Refmap.empty : (cs_pattern * obj_typ) list Refmap.t) let canonical_projections () = Refmap.fold (fun x -> List.fold_right (fun (y,c) acc -> ((x,y),c)::acc)) !object_table [] let keep_true_projections projs kinds = map_succeed (function (p,(_,true)) -> p | _ -> failwith "") (List.combine projs kinds) let cs_pattern_of_constr t = match kind_of_term t with App (f,vargs) -> begin try Const_cs (global_of_constr f) , -1, Array.to_list vargs with e when Errors.noncritical e -> raise Not_found end | Rel n -> Default_cs, pred n, [] | Prod (_,a,b) when not (Termops.dependent (mkRel 1) b) -> Prod_cs, -1, [a; Termops.pop b] | Sort s -> Sort_cs (family_of_sort s), -1, [] | _ -> begin try Const_cs (global_of_constr t) , -1, [] with e when Errors.noncritical e -> raise Not_found end (* Intended to always succeed *) let compute_canonical_projections (con,ind) = let v = mkConst con in let c = Environ.constant_value (Global.env()) con in let lt,t = Reductionops.splay_lam (Global.env()) Evd.empty c in let lt = List.rev (List.map snd lt) in let args = snd (decompose_app t) in let { s_EXPECTEDPARAM = p; s_PROJ = lpj; s_PROJKIND = kl } = lookup_structure ind in let params, projs = list_chop p args in let lpj = keep_true_projections lpj kl in let lps = List.combine lpj projs in let comp = List.fold_left (fun l (spopt,t) -> (* comp=components *) match spopt with | Some proji_sp -> begin try let patt, n , args = cs_pattern_of_constr t in ((ConstRef proji_sp, patt, n, args) :: l) with Not_found -> if Flags.is_verbose () then (let con_pp = Nametab.pr_global_env Idset.empty (ConstRef con) and proji_sp_pp = Nametab.pr_global_env Idset.empty (ConstRef proji_sp) in msg_warning (str "No global reference exists for projection value" ++ Termops.print_constr t ++ str " in instance " ++ con_pp ++ str " of " ++ proji_sp_pp ++ str ", ignoring it.")); l end | _ -> l) [] lps in List.map (fun (refi,c,inj,argj) -> (refi,c), {o_DEF=v; o_INJ=inj; o_TABS=lt; o_TPARAMS=params; o_NPARAMS=List.length params; o_TCOMPS=argj}) comp let pr_cs_pattern = function Const_cs c -> Nametab.pr_global_env Idset.empty c | Prod_cs -> str "_ -> _" | Default_cs -> str "_" | Sort_cs s -> Termops.pr_sort_family s let open_canonical_structure i (_,o) = if i=1 then let lo = compute_canonical_projections o in List.iter (fun ((proj,cs_pat),s) -> let l = try Refmap.find proj !object_table with Not_found -> [] in let ocs = try Some (List.assoc cs_pat l) with Not_found -> None in match ocs with | None -> object_table := Refmap.add proj ((cs_pat,s)::l) !object_table; | Some cs -> if Flags.is_verbose () then let old_can_s = (Termops.print_constr cs.o_DEF) and new_can_s = (Termops.print_constr s.o_DEF) in let prj = (Nametab.pr_global_env Idset.empty proj) and hd_val = (pr_cs_pattern cs_pat) in msg_warning (str "Ignoring canonical projection to " ++ hd_val ++ str " by " ++ prj ++ str " in " ++ new_can_s ++ str ": redundant with " ++ old_can_s)) lo let cache_canonical_structure o = open_canonical_structure 1 o let subst_canonical_structure (subst,(cst,ind as obj)) = (* invariant: cst is an evaluable reference. Thus we can take *) (* the first component of subst_con. *) let cst' = fst (subst_con subst cst) in let ind' = Inductiveops.subst_inductive subst ind in if cst' == cst & ind' == ind then obj else (cst',ind') let discharge_canonical_structure (_,(cst,ind)) = Some (Lib.discharge_con cst,Lib.discharge_inductive ind) let inCanonStruc : constant * inductive -> obj = declare_object {(default_object "CANONICAL-STRUCTURE") with open_function = open_canonical_structure; cache_function = cache_canonical_structure; subst_function = subst_canonical_structure; classify_function = (fun x -> Substitute x); discharge_function = discharge_canonical_structure } let add_canonical_structure x = Lib.add_anonymous_leaf (inCanonStruc x) (*s High-level declaration of a canonical structure *) let error_not_structure ref = errorlabstrm "object_declare" (Nameops.pr_id (basename_of_global ref) ++ str" is not a structure object.") let check_and_decompose_canonical_structure ref = let sp = match ref with ConstRef sp -> sp | _ -> error_not_structure ref in let env = Global.env () in let vc = match Environ.constant_opt_value env sp with | Some vc -> vc | None -> error_not_structure ref in let body = snd (splay_lam (Global.env()) Evd.empty vc) in let f,args = match kind_of_term body with | App (f,args) -> f,args | _ -> error_not_structure ref in let indsp = match kind_of_term f with | Construct (indsp,1) -> indsp | _ -> error_not_structure ref in let s = try lookup_structure indsp with Not_found -> error_not_structure ref in let ntrue_projs = List.length (List.filter (fun (_, x) -> x) s.s_PROJKIND) in if s.s_EXPECTEDPARAM + ntrue_projs > Array.length args then error_not_structure ref; (sp,indsp) let declare_canonical_structure ref = add_canonical_structure (check_and_decompose_canonical_structure ref) let lookup_canonical_conversion (proj,pat) = List.assoc pat (Refmap.find proj !object_table) let is_open_canonical_projection env sigma (c,args) = try let n = find_projection_nparams (global_of_constr c) in try let arg = whd_betadeltaiota env sigma (List.nth args n) in let hd = match kind_of_term arg with App (hd, _) -> hd | _ -> arg in not (isConstruct hd) with Failure _ -> false with Not_found -> false let freeze () = !structure_table, !projection_table, !object_table let unfreeze (s,p,o) = structure_table := s; projection_table := p; object_table := o let init () = structure_table := Indmap.empty; projection_table := Cmap.empty; object_table := Refmap.empty let _ = init() let _ = Summary.declare_summary "objdefs" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } coq-8.4pl2/pretyping/vnorm.ml0000640000175000001440000002461012121620060015334 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (j+1) (* Argggg, ces constructeurs de ... qui commencent a 1*) let find_rectype_a env c = let (t, l) = let t = whd_betadeltaiota env c in try destApp t with e when Errors.noncritical e -> (t,[||]) in match kind_of_term t with | Ind ind -> (ind, l) | _ -> raise Not_found (* Instantiate inductives and parameters in constructor type *) let type_constructor mind mib typ params = let s = ind_subst mind mib in let ctyp = substl s typ in let nparams = Array.length params in if nparams = 0 then ctyp else let _,ctyp = decompose_prod_n nparams ctyp in substl (List.rev (Array.to_list params)) ctyp let construct_of_constr const env tag typ = let (mind,_ as ind), allargs = find_rectype_a env typ in (* spiwack : here be a branch for specific decompilation handled by retroknowledge *) try if const then ((retroknowledge Retroknowledge.get_vm_decompile_constant_info env (Ind ind) tag), typ) (*spiwack: this may need to be changed in case there are parameters in the type which may cause a constant value to have an arity. (type_constructor seems to be all about parameters actually) but it shouldn't really matter since constant values don't use their ctyp in the rest of the code.*) else raise Not_found (* No retroknowledge function (yet) for block decompilation *) with Not_found -> let mib,mip = lookup_mind_specif env ind in let nparams = mib.mind_nparams in let i = invert_tag const tag mip.mind_reloc_tbl in let params = Array.sub allargs 0 nparams in let ctyp = type_constructor mind mib (mip.mind_nf_lc.(i-1)) params in (mkApp(mkConstruct(ind,i), params), ctyp) let construct_of_constr_const env tag typ = fst (construct_of_constr true env tag typ) let construct_of_constr_block = construct_of_constr false let constr_type_of_idkey env idkey = match idkey with | ConstKey cst -> mkConst cst, Typeops.type_of_constant env cst | VarKey id -> let (_,_,ty) = lookup_named id env in mkVar id, ty | RelKey i -> let n = (nb_rel env - i) in let (_,_,ty) = lookup_rel n env in mkRel n, lift n ty let type_of_ind env ind = type_of_inductive env (Inductive.lookup_mind_specif env ind) let build_branches_type env (mind,_ as _ind) mib mip params dep p = let rtbl = mip.mind_reloc_tbl in (* [build_one_branch i cty] construit le type de la ieme branche (commence a 0) et les lambda correspondant aux realargs *) let build_one_branch i cty = let typi = type_constructor mind mib cty params in let decl,indapp = decompose_prod_assum typi in let ind,cargs = find_rectype_a env indapp in let nparams = Array.length params in let carity = snd (rtbl.(i)) in let crealargs = Array.sub cargs nparams (Array.length cargs - nparams) in let codom = let papp = mkApp(lift (List.length decl) p,crealargs) in if dep then let cstr = ith_constructor_of_inductive ind (i+1) in let relargs = Array.init carity (fun i -> mkRel (carity-i)) in let dep_cstr = mkApp(mkApp(mkConstruct cstr,params),relargs) in mkApp(papp,[|dep_cstr|]) else papp in decl, codom in Array.mapi build_one_branch mip.mind_nf_lc let build_case_type dep p realargs c = if dep then mkApp(mkApp(p, realargs), [|c|]) else mkApp(p, realargs) (* La fonction de normalisation *) let rec nf_val env v t = nf_whd env (whd_val v) t and nf_vtype env v = nf_val env v crazy_type and nf_whd env whd typ = match whd with | Vsort s -> mkSort s | Vprod p -> let dom = nf_vtype env (dom p) in let name = Name (id_of_string "x") in let vc = body_of_vfun (nb_rel env) (codom p) in let codom = nf_vtype (push_rel (name,None,dom) env) vc in mkProd(name,dom,codom) | Vfun f -> nf_fun env f typ | Vfix(f,None) -> nf_fix env f | Vfix(f,Some vargs) -> fst (nf_fix_app env f vargs) | Vcofix(cf,_,None) -> nf_cofix env cf | Vcofix(cf,_,Some vargs) -> let cfd = nf_cofix env cf in let i,(_,ta,_) = destCoFix cfd in let t = ta.(i) in let _, args = nf_args env vargs t in mkApp(cfd,args) | Vconstr_const n -> construct_of_constr_const env n typ | Vconstr_block b -> let capp,ctyp = construct_of_constr_block env (btag b) typ in let args = nf_bargs env b ctyp in mkApp(capp,args) | Vatom_stk(Aid idkey, stk) -> let c,typ = constr_type_of_idkey env idkey in nf_stk env c typ stk | Vatom_stk(Aiddef(idkey,v), stk) -> nf_whd env (whd_stack v stk) typ | Vatom_stk(Aind ind, stk) -> nf_stk env (mkInd ind) (type_of_ind env ind) stk and nf_stk env c t stk = match stk with | [] -> c | Zapp vargs :: stk -> let t, args = nf_args env vargs t in nf_stk env (mkApp(c,args)) t stk | Zfix (f,vargs) :: stk -> let fa, typ = nf_fix_app env f vargs in let _,_,codom = try decompose_prod env typ with e when Errors.noncritical e -> exit 120 in nf_stk env (mkApp(fa,[|c|])) (subst1 c codom) stk | Zswitch sw :: stk -> let (mind,_ as ind),allargs = find_rectype_a env t in let (mib,mip) = Inductive.lookup_mind_specif env ind in let nparams = mib.mind_nparams in let params,realargs = Util.array_chop nparams allargs in let pT = hnf_prod_applist env (type_of_ind env ind) (Array.to_list params) in let pT = whd_betadeltaiota env pT in let dep, p = nf_predicate env ind mip params (type_of_switch sw) pT in (* Calcul du type des branches *) let btypes = build_branches_type env ind mib mip params dep p in (* calcul des branches *) let bsw = branch_of_switch (nb_rel env) sw in let mkbranch i (n,v) = let decl,codom = btypes.(i) in let b = nf_val (push_rel_context decl env) v codom in it_mkLambda_or_LetIn b decl in let branchs = Array.mapi mkbranch bsw in let tcase = build_case_type dep p realargs c in let ci = case_info sw in nf_stk env (mkCase(ci, p, c, branchs)) tcase stk and nf_predicate env ind mip params v pT = match whd_val v, kind_of_term pT with | Vfun f, Prod _ -> let k = nb_rel env in let vb = body_of_vfun k f in let name,dom,codom = try decompose_prod env pT with e when Errors.noncritical e -> exit 121 in let dep,body = nf_predicate (push_rel (name,None,dom) env) ind mip params vb codom in dep, mkLambda(name,dom,body) | Vfun f, _ -> let k = nb_rel env in let vb = body_of_vfun k f in let name = Name (id_of_string "c") in let n = mip.mind_nrealargs in let rargs = Array.init n (fun i -> mkRel (n-i)) in let params = if n=0 then params else Array.map (lift n) params in let dom = mkApp(mkInd ind,Array.append params rargs) in let body = nf_vtype (push_rel (name,None,dom) env) vb in true, mkLambda(name,dom,body) | _, _ -> false, nf_val env v crazy_type and nf_args env vargs t = let t = ref t in let len = nargs vargs in let args = Array.init len (fun i -> let _,dom,codom = try decompose_prod env !t with e when Errors.noncritical e -> exit 123 in let c = nf_val env (arg vargs i) dom in t := subst1 c codom; c) in !t,args and nf_bargs env b t = let t = ref t in let len = bsize b in let args = Array.init len (fun i -> let _,dom,codom = try decompose_prod env !t with e when Errors.noncritical e -> exit 124 in let c = nf_val env (bfield b i) dom in t := subst1 c codom; c) in args and nf_fun env f typ = let k = nb_rel env in let vb = body_of_vfun k f in let name,dom,codom = try decompose_prod env typ with e when Errors.noncritical e -> raise (Type_errors.TypeError(env,Type_errors.ReferenceVariables typ)) in let body = nf_val (push_rel (name,None,dom) env) vb codom in mkLambda(name,dom,body) and nf_fix env f = let init = current_fix f in let rec_args = rec_args f in let k = nb_rel env in let vb, vt = reduce_fix k f in let ndef = Array.length vt in let ft = Array.map (fun v -> nf_val env v crazy_type) vt in let name = Array.init ndef (fun _ -> (Name (id_of_string "Ffix"))) in let env = push_rec_types (name,ft,ft) env in let fb = Util.array_map2 (fun v t -> nf_fun env v t) vb ft in mkFix ((rec_args,init),(name,ft,fb)) and nf_fix_app env f vargs = let fd = nf_fix env f in let (_,i),(_,ta,_) = destFix fd in let t = ta.(i) in let t, args = nf_args env vargs t in mkApp(fd,args),t and nf_cofix env cf = let init = current_cofix cf in let k = nb_rel env in let vb,vt = reduce_cofix k cf in let ndef = Array.length vt in let cft = Array.map (fun v -> nf_val env v crazy_type) vt in let name = Array.init ndef (fun _ -> (Name (id_of_string "Fcofix"))) in let env = push_rec_types (name,cft,cft) env in let cfb = Util.array_map2 (fun v t -> nf_val env v t) vb cft in mkCoFix (init,(name,cft,cfb)) let cbv_vm env c t = let transp = transp_values () in if not transp then set_transp_values true; let v = Vconv.val_of_constr env c in let c = nf_val env v t in if not transp then set_transp_values false; c coq-8.4pl2/pretyping/retyping.ml0000640000175000001440000001614412104155710016045 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* typ | h::rest -> match kind_of_term (whd_betadeltaiota env sigma typ) with | Prod (na,c1,c2) -> subst_type env sigma (subst1 h c2) rest | _ -> anomaly "Non-functional construction" (* Si ft est le type d'un terme f, lequel est appliqu args, *) (* [sort_of_atomic_ty] calcule ft[args] qui doit tre une sorte *) (* On suit une mthode paresseuse, en esprant que ft est une arit *) (* et sinon on substitue *) let sort_of_atomic_type env sigma ft args = let rec concl_of_arity env ar args = match kind_of_term (whd_betadeltaiota env sigma ar), args with | Prod (na, t, b), h::l -> concl_of_arity (push_rel (na,Some h,t) env) b l | Sort s, [] -> s | _ -> anomaly "Not a sort" in concl_of_arity env ft (Array.to_list args) let type_of_var env id = try let (_,_,ty) = lookup_named id env in ty with Not_found -> anomaly ("type_of: variable "^(string_of_id id)^" unbound") let retype ?(polyprop=true) sigma = let rec type_of env cstr= match kind_of_term cstr with | Meta n -> (try strip_outer_cast (Evd.meta_ftype sigma n).Evd.rebus with Not_found -> anomaly ("type_of: unknown meta " ^ string_of_int n)) | Rel n -> let (_,_,ty) = lookup_rel n env in lift n ty | Var id -> type_of_var env id | Const cst -> Typeops.type_of_constant env cst | Evar ev -> Evd.existential_type sigma ev | Ind ind -> type_of_inductive env ind | Construct cstr -> type_of_constructor env cstr | Case (_,p,c,lf) -> let Inductiveops.IndType(_,realargs) = try Inductiveops.find_rectype env sigma (type_of env c) with Not_found -> anomaly "type_of: Bad recursive type" in let t = whd_beta sigma (applist (p, realargs)) in (match kind_of_term (whd_betadeltaiota env sigma (type_of env t)) with | Prod _ -> whd_beta sigma (applist (t, [c])) | _ -> t) | Lambda (name,c1,c2) -> mkProd (name, c1, type_of (push_rel (name,None,c1) env) c2) | LetIn (name,b,c1,c2) -> subst1 b (type_of (push_rel (name,Some b,c1) env) c2) | Fix ((_,i),(_,tys,_)) -> tys.(i) | CoFix (i,(_,tys,_)) -> tys.(i) | App(f,args) when isGlobalRef f -> let t = type_of_global_reference_knowing_parameters env f args in strip_outer_cast (subst_type env sigma t (Array.to_list args)) | App(f,args) -> strip_outer_cast (subst_type env sigma (type_of env f) (Array.to_list args)) | Cast (c,_, t) -> t | Sort _ | Prod _ -> mkSort (sort_of env cstr) and sort_of env t = match kind_of_term t with | Cast (c,_, s) when isSort s -> destSort s | Sort (Prop c) -> type1_sort | Sort (Type u) -> Type (Univ.super u) | Prod (name,t,c2) -> (match (sort_of env t, sort_of (push_rel (name,None,t) env) c2) with | _, (Prop Null as s) -> s | Prop _, (Prop Pos as s) -> s | Type _, (Prop Pos as s) when Environ.engagement env = Some ImpredicativeSet -> s | (Type _, _) | (_, Type _) -> new_Type_sort () (* | Type u1, Prop Pos -> Type (Univ.sup u1 Univ.type0_univ) | Prop Pos, (Type u2) -> Type (Univ.sup Univ.type0_univ u2) | Prop Null, (Type _ as s) -> s | Type u1, Type u2 -> Type (Univ.sup u1 u2)*)) | App(f,args) when isGlobalRef f -> let t = type_of_global_reference_knowing_parameters env f args in sort_of_atomic_type env sigma t args | App(f,args) -> sort_of_atomic_type env sigma (type_of env f) args | Lambda _ | Fix _ | Construct _ -> anomaly "sort_of: Not a type (1)" | _ -> decomp_sort env sigma (type_of env t) and sort_family_of env t = match kind_of_term t with | Cast (c,_, s) when isSort s -> family_of_sort (destSort s) | Sort (Prop c) -> InType | Sort (Type u) -> InType | Prod (name,t,c2) -> let s2 = sort_family_of (push_rel (name,None,t) env) c2 in if Environ.engagement env <> Some ImpredicativeSet && s2 = InSet & sort_family_of env t = InType then InType else s2 | App(f,args) when isGlobalRef f -> let t = type_of_global_reference_knowing_parameters env f args in family_of_sort (sort_of_atomic_type env sigma t args) | App(f,args) -> family_of_sort (sort_of_atomic_type env sigma (type_of env f) args) | Lambda _ | Fix _ | Construct _ -> anomaly "sort_of: Not a type (1)" | _ -> family_of_sort (decomp_sort env sigma (type_of env t)) and type_of_global_reference_knowing_parameters env c args = let argtyps = Array.map (fun c -> nf_evar sigma (type_of env c)) args in match kind_of_term c with | Ind ind -> let (_,mip) = lookup_mind_specif env ind in (try Inductive.type_of_inductive_knowing_parameters ~polyprop env mip argtyps with Reduction.NotArity -> anomaly "type_of: Not an arity") | Const cst -> let t = constant_type env cst in (try Typeops.type_of_constant_knowing_parameters env t argtyps with Reduction.NotArity -> anomaly "type_of: Not an arity") | Var id -> type_of_var env id | Construct cstr -> type_of_constructor env cstr | _ -> assert false in type_of, sort_of, sort_family_of, type_of_global_reference_knowing_parameters let get_sort_of ?(polyprop=true) env sigma t = let _,f,_,_ = retype ~polyprop sigma in f env t let get_sort_family_of ?(polyprop=true) env sigma c = let _,_,f,_ = retype ~polyprop sigma in f env c let type_of_global_reference_knowing_parameters env sigma c args = let _,_,_,f = retype sigma in f env c args let type_of_global_reference_knowing_conclusion env sigma c conclty = let conclty = nf_evar sigma conclty in match kind_of_term c with | Ind ind -> let (_,mip) = Inductive.lookup_mind_specif env ind in type_of_inductive_knowing_conclusion env mip conclty | Const cst -> let t = constant_type env cst in (* TODO *) Typeops.type_of_constant_knowing_parameters env t [||] | Var id -> type_of_var env id | Construct cstr -> type_of_constructor env cstr | _ -> assert false (* We are outside the kernel: we take fresh universes *) (* to avoid tactics and co to refresh universes themselves *) let get_type_of ?(polyprop=true) ?(refresh=true) env sigma c = let f,_,_,_ = retype ~polyprop sigma in let t = f env c in if refresh then refresh_universes t else t (* Makes an assumption from a constr *) let get_assumption_of env evc c = c (* Makes an unsafe judgment from a constr *) let get_judgment_of env evc c = { uj_val = c; uj_type = get_type_of env evc c } coq-8.4pl2/pretyping/matching.mli0000640000175000001440000000755312010532755016157 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> patvar_map (** [extended_matches pat c] also returns the names of bound variables in [c] that matches the bound variables in [pat]; if several bound variables or metavariables have the same name, the metavariable, or else the rightmost bound variable, takes precedence *) val extended_matches : constr_pattern -> constr -> bound_ident_map * extended_patvar_map (** [is_matching pat c] just tells if [c] matches against [pat] *) val is_matching : constr_pattern -> constr -> bool (** [matches_conv env sigma] matches up to conversion in environment [(env,sigma)] when constants in pattern are concerned; it raises [PatternMatchingFailure] if not matchable; bindings are given in increasing order based on the numbers given in the pattern *) val matches_conv :env -> Evd.evar_map -> constr_pattern -> constr -> patvar_map (** The type of subterm matching results: a substitution + a context (whose hole is denoted with [special_meta]) + a continuation that either returns the next matching subterm or raise PatternMatchingFailure *) type subterm_matching_result = (bound_ident_map * patvar_map) * constr * (unit -> subterm_matching_result) (** [match_subterm n pat c] returns the substitution and the context corresponding to the first **closed** subterm of [c] matching [pat], and a continuation that looks for the next matching subterm. It raises PatternMatchingFailure if no subterm matches the pattern *) val match_subterm : constr_pattern -> constr -> subterm_matching_result (** [match_appsubterm pat c] returns the substitution and the context corresponding to the first **closed** subterm of [c] matching [pat], considering application contexts as well. It also returns a continuation that looks for the next matching subterm. It raises PatternMatchingFailure if no subterm matches the pattern *) val match_appsubterm : constr_pattern -> constr -> subterm_matching_result (** [match_subterm_gen] calls either [match_subterm] or [match_appsubterm] *) val match_subterm_gen : bool (** true = with app context *) -> constr_pattern -> constr -> subterm_matching_result (** [is_matching_appsubterm pat c] tells if a subterm of [c] matches against [pat] taking partial subterms into consideration *) val is_matching_appsubterm : ?closed:bool -> constr_pattern -> constr -> bool (** [is_matching_conv env sigma pat c] tells if [c] matches against [pat] up to conversion for constants in patterns *) val is_matching_conv : env -> Evd.evar_map -> constr_pattern -> constr -> bool coq-8.4pl2/pretyping/evarutil.mli0000640000175000001440000002222712125631355016216 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* metavariable val mk_new_meta : unit -> constr (** [new_untyped_evar] is a generator of unique evar keys *) val new_untyped_evar : unit -> existential_key (** {6 Creating a fresh evar given their type and context} *) val new_evar : evar_map -> env -> ?src:loc * hole_kind -> ?filter:bool list -> ?candidates:constr list -> types -> evar_map * constr (** the same with side-effects *) val e_new_evar : evar_map ref -> env -> ?src:loc * hole_kind -> ?filter:bool list -> ?candidates:constr list -> types -> constr (** Create a new Type existential variable, as we keep track of them during type-checking and unification. *) val new_type_evar : ?src:loc * hole_kind -> ?filter:bool list -> evar_map -> env -> evar_map * constr (** Create a fresh evar in a context different from its definition context: [new_evar_instance sign evd ty inst] creates a new evar of context [sign] and type [ty], [inst] is a mapping of the evar context to the context where the evar should occur. This means that the terms of [inst] are typed in the occurrence context and their type (seen as a telescope) is [sign] *) val new_evar_instance : named_context_val -> evar_map -> types -> ?src:loc * hole_kind -> ?filter:bool list -> ?candidates:constr list -> constr list -> evar_map * constr val make_pure_subst : evar_info -> constr array -> (identifier * constr) list (** {6 Instantiate evars} *) type conv_fun = env -> evar_map -> conv_pb -> constr -> constr -> evar_map * bool (** [evar_define choose env ev c] try to instantiate [ev] with [c] (typed in [env]), possibly solving related unification problems, possibly leaving open some problems that cannot be solved in a unique way (except if choose is true); fails if the instance is not valid for the given [ev] *) val evar_define : conv_fun -> ?choose:bool -> env -> evar_map -> existential -> constr -> evar_map (** {6 Evars/Metas switching...} *) (** [evars_to_metas] generates new metavariables for each non dependent existential and performs the replacement in the given constr; it also returns the evar_map extended with dependent evars *) val evars_to_metas : evar_map -> open_constr -> (evar_map * constr) val non_instantiated : evar_map -> (evar * evar_info) list (** {6 Unification utils} *) (** [head_evar c] returns the head evar of [c] if any *) exception NoHeadEvar val head_evar : constr -> existential_key (** may raise NoHeadEvar *) (* Expand head evar if any *) val whd_head_evar : evar_map -> constr -> constr val is_ground_term : evar_map -> constr -> bool val is_ground_env : evar_map -> env -> bool val solve_refl : ?can_drop:bool -> conv_fun -> env -> evar_map -> existential_key -> constr array -> constr array -> evar_map val solve_evar_evar : ?force:bool -> (env -> evar_map -> existential -> constr -> evar_map) -> conv_fun -> env -> evar_map -> existential -> existential -> evar_map val solve_simple_eqn : conv_fun -> ?choose:bool -> env -> evar_map -> bool option * existential * constr -> evar_map * bool val reconsider_conv_pbs : conv_fun -> evar_map -> evar_map * bool (** [check_evars env initial_sigma extended_sigma c] fails if some new unresolved evar remains in [c] *) val check_evars : env -> evar_map -> evar_map -> constr -> unit val define_evar_as_product : evar_map -> existential -> evar_map * types val define_evar_as_lambda : env -> evar_map -> existential -> evar_map * types val define_evar_as_sort : evar_map -> existential -> evar_map * sorts val is_unification_pattern_evar : env -> evar_map -> existential -> constr list -> constr -> constr list option val is_unification_pattern : env * int -> evar_map -> constr -> constr list -> constr -> constr list option val evar_absorb_arguments : env -> evar_map -> existential -> constr list -> evar_map * existential val solve_pattern_eqn : env -> constr list -> constr -> constr (** The following functions return the set of evars immediately contained in the object, including defined evars *) val evars_of_term : constr -> Intset.t val evars_of_named_context : named_context -> Intset.t val evars_of_evar_info : evar_info -> Intset.t (** [gather_dependent_evars evm seeds] classifies the evars in [evm] as dependent_evars and goals (these may overlap). A goal is an evar in [seeds] or an evar appearing in the (partial) definition of a goal. A dependent evar is an evar appearing in the type (hypotheses and conclusion) of a goal, or in the type or (partial) definition of a dependent evar. The value return is a map associating to each dependent evar [None] if it has no (partial) definition or [Some s] if [s] is the list of evars appearing in its (partial) definition. *) val gather_dependent_evars : evar_map -> evar list -> (Intset.t option) Intmap.t (** The following functions return the set of undefined evars contained in the object, the defined evars being traversed. This is roughly a combination of the previous functions and [nf_evar]. *) val undefined_evars_of_term : evar_map -> constr -> Intset.t val undefined_evars_of_named_context : evar_map -> named_context -> Intset.t val undefined_evars_of_evar_info : evar_map -> evar_info -> Intset.t (** {6 Value/Type constraints} *) val judge_of_new_Type : evar_map -> evar_map * unsafe_judgment type type_constraint_type = (int * int) option * constr type type_constraint = type_constraint_type option type val_constraint = constr option val empty_tycon : type_constraint val mk_tycon_type : constr -> type_constraint_type val mk_abstr_tycon_type : int -> constr -> type_constraint_type val mk_tycon : constr -> type_constraint val mk_abstr_tycon : int -> constr -> type_constraint val empty_valcon : val_constraint val mk_valcon : constr -> val_constraint val split_tycon : loc -> env -> evar_map -> type_constraint -> evar_map * (name * type_constraint * type_constraint) val valcon_of_tycon : type_constraint -> val_constraint val lift_abstr_tycon_type : int -> type_constraint_type -> type_constraint_type val lift_tycon_type : int -> type_constraint_type -> type_constraint_type val lift_tycon : int -> type_constraint -> type_constraint (***********************************************************) (** [flush_and_check_evars] raise [Uninstantiated_evar] if an evar remains uninstantiated; [nf_evar] leaves uninstantiated evars as is *) val nf_evar : evar_map -> constr -> constr val j_nf_evar : evar_map -> unsafe_judgment -> unsafe_judgment val jl_nf_evar : evar_map -> unsafe_judgment list -> unsafe_judgment list val jv_nf_evar : evar_map -> unsafe_judgment array -> unsafe_judgment array val tj_nf_evar : evar_map -> unsafe_type_judgment -> unsafe_type_judgment val nf_named_context_evar : evar_map -> named_context -> named_context val nf_rel_context_evar : evar_map -> rel_context -> rel_context val nf_env_evar : evar_map -> env -> env val nf_evar_info : evar_map -> evar_info -> evar_info val nf_evar_map : evar_map -> evar_map val nf_evar_map_undefined : evar_map -> evar_map (** Replacing all evars, possibly raising [Uninstantiated_evar] *) exception Uninstantiated_evar of existential_key val flush_and_check_evars : evar_map -> constr -> constr (** Replace the vars and rels that are aliases to other vars and rels by their representative that is most ancient in the context *) val expand_vars_in_term : env -> constr -> constr (** {6 debug pretty-printer:} *) val pr_tycon_type : env -> type_constraint_type -> Pp.std_ppcmds val pr_tycon : env -> type_constraint -> Pp.std_ppcmds (** {6 Removing hyps in evars'context} raise OccurHypInSimpleClause if the removal breaks dependencies *) type clear_dependency_error = | OccurHypInSimpleClause of identifier option | EvarTypingBreak of existential exception ClearDependencyError of identifier * clear_dependency_error (* spiwack: marks an evar that has been "defined" by clear. used by [Goal] and (indirectly) [Proofview] to handle the clear tactic gracefully*) val cleared : bool Store.Field.t val clear_hyps_in_evi : evar_map ref -> named_context_val -> types -> identifier list -> named_context_val * types val push_rel_context_to_named_context : Environ.env -> types -> named_context_val * types * constr list * constr list val generalize_evar_over_rels : evar_map -> existential -> types * constr list val check_evar_instance : evar_map -> existential_key -> constr -> conv_fun -> evar_map val remove_instance_local_defs : evar_map -> existential_key -> constr list -> constr list coq-8.4pl2/pretyping/unification.ml0000640000175000001440000013516212121620060016510 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raise Occur | Evar (ev,args) -> (match evar_body (Evd.find evd ev) with | Evar_defined c -> occrec c; Array.iter occrec args | Evar_empty -> raise Occur) | Sort s when is_sort_variable evd s -> raise Occur | _ -> iter_constr occrec c in try occrec c; false with Occur | Not_found -> true let occur_meta_evd sigma mv c = let rec occrec c = (* Note: evars are not instantiated by terms with metas *) let c = whd_evar sigma (whd_meta sigma c) in match kind_of_term c with | Meta mv' when mv = mv' -> raise Occur | _ -> iter_constr occrec c in try occrec c; false with Occur -> true (* if lname_typ is [xn,An;..;x1,A1] and l is a list of terms, gives [x1:A1]..[xn:An]c' such that c converts to ([x1:A1]..[xn:An]c' l) *) let abstract_scheme env c l lname_typ = List.fold_left2 (fun t (locc,a) (na,_,ta) -> let na = match kind_of_term a with Var id -> Name id | _ -> na in (* [occur_meta ta] test removed for support of eelim/ecase but consequences are unclear... if occur_meta ta then error "cannot find a type for the generalisation" else *) if occur_meta a then mkLambda_name env (na,ta,t) else mkLambda_name env (na,ta,subst_closed_term_occ locc a t)) c (List.rev l) lname_typ let abstract_list_all env evd typ c l = let ctxt,_ = splay_prod_n env evd (List.length l) typ in let l_with_all_occs = List.map (function a -> (all_occurrences,a)) l in let p = abstract_scheme env c l_with_all_occs ctxt in try if is_conv_leq env evd (Typing.type_of env evd p) typ then p else error "abstract_list_all" with UserError _ | Type_errors.TypeError _ -> error_cannot_find_well_typed_abstraction env evd p l let set_occurrences_of_last_arg args = Some all_occurrences :: List.tl (array_map_to_list (fun _ -> None) args) let abstract_list_all_with_dependencies env evd typ c l = let evd,ev = new_evar evd env typ in let evd,ev' = evar_absorb_arguments env evd (destEvar ev) l in let argoccs = set_occurrences_of_last_arg (snd ev') in let evd,b = Evarconv.second_order_matching empty_transparent_state env evd ev' argoccs c in if b then nf_evar evd (existential_value evd (destEvar ev)) else error "Cannot find a well-typed abstraction." (**) (* A refinement of [conv_pb]: the integers tells how many arguments were applied in the context of the conversion problem; if the number is non zero, steps of eta-expansion will be allowed *) let opp_status = function | IsSuperType -> IsSubType | IsSubType -> IsSuperType | Conv -> Conv let add_type_status (x,y) = ((x,TypeNotProcessed),(y,TypeNotProcessed)) let extract_instance_status = function | CUMUL -> add_type_status (IsSubType, IsSuperType) | CONV -> add_type_status (Conv, Conv) let rec assoc_pair x = function [] -> raise Not_found | (a,b,_)::l -> if compare a x = 0 then b else assoc_pair x l let rec subst_meta_instances bl c = match kind_of_term c with | Meta i -> (try assoc_pair i bl with Not_found -> c) | _ -> map_constr (subst_meta_instances bl) c let pose_all_metas_as_evars env evd t = let evdref = ref evd in let rec aux t = match kind_of_term t with | Meta mv -> (match Evd.meta_opt_fvalue !evdref mv with | Some ({rebus=c},_) -> c | None -> let {rebus=ty;freemetas=mvs} = Evd.meta_ftype evd mv in let ty = if mvs = Evd.Metaset.empty then ty else aux ty in let ev = Evarutil.e_new_evar evdref env ~src:(dummy_loc,GoalEvar) ty in evdref := meta_assign mv (ev,(Conv,TypeNotProcessed)) !evdref; ev) | _ -> map_constr aux t in let c = aux t in (* side-effect *) (!evdref, c) let solve_pattern_eqn_array (env,nb) f l c (sigma,metasubst,evarsubst) = match kind_of_term f with | Meta k -> let sigma,c = pose_all_metas_as_evars env sigma c in let c = solve_pattern_eqn env l c in let pb = (Conv,TypeNotProcessed) in if noccur_between 1 nb c then sigma,(k,lift (-nb) c,pb)::metasubst,evarsubst else error_cannot_unify_local env sigma (applist (f, l),c,c) | Evar ev -> let sigma,c = pose_all_metas_as_evars env sigma c in sigma,metasubst,(env,ev,solve_pattern_eqn env l c)::evarsubst | _ -> assert false let push d (env,n) = (push_rel_assum d env,n+1) (*******************************) (* Unification à l'ordre 0 de m et n: [unify_0 env sigma cv_pb m n] renvoie deux listes: metasubst:(int*constr)list récolte les instances des (Meta k) evarsubst:(constr*constr)list récolte les instances des (Const "?k") Attention : pas d'unification entre les différences instances d'une même meta ou evar, il peut rester des doublons *) (* Unification order: *) (* Left to right: unifies first argument and then the other arguments *) (*let unify_l2r x = List.rev x (* Right to left: unifies last argument and then the other arguments *) let unify_r2l x = x let sort_eqns = unify_r2l *) (* Option introduced and activated in Coq 8.3 *) let global_evars_pattern_unification_flag = ref true open Goptions let _ = declare_bool_option { optsync = true; optdepr = false; optname = "pattern-unification for existential variables in tactics"; optkey = ["Tactic";"Evars";"Pattern";"Unification"]; optread = (fun () -> !global_evars_pattern_unification_flag); optwrite = (:=) global_evars_pattern_unification_flag } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "pattern-unification for existential variables in tactics"; optkey = ["Tactic";"Pattern";"Unification"]; optread = (fun () -> !global_evars_pattern_unification_flag); optwrite = (:=) global_evars_pattern_unification_flag } type unify_flags = { modulo_conv_on_closed_terms : Names.transparent_state option; (* What this flag controls was activated with all constants transparent, *) (* even for auto, since Coq V5.10 *) use_metas_eagerly_in_conv_on_closed_terms : bool; (* This refinement of the conversion on closed terms is activable *) (* (and activated for apply, rewrite but not auto since Feb 2008 for 8.2) *) modulo_delta : Names.transparent_state; (* This controls which constants are unfoldable; this is on for apply *) (* (but not simple apply) since Feb 2008 for 8.2 *) modulo_delta_types : Names.transparent_state; modulo_delta_in_merge : Names.transparent_state option; (* This controls whether unfoldability is different when trying to unify *) (* several instances of the same metavariable *) (* Typical situation is when we give a pattern to be matched *) (* syntactically against a subterm but we want the metas of the *) (* pattern to be modulo convertibility *) check_applied_meta_types : bool; (* This controls whether meta's applied to arguments have their *) (* type unified with the type of their instance *) resolve_evars : bool; (* This says if type classes instances resolution must be used to infer *) (* the remaining evars *) use_pattern_unification : bool; (* This says if type classes instances resolution must be used to infer *) (* the remaining evars *) use_meta_bound_pattern_unification : bool; (* This solves pattern "?n x1 ... xn = t" when the xi are distinct rels *) (* This allows for instance to unify "forall x:A, B(x)" with "A' -> B'" *) (* This was on for all tactics, including auto, since Sep 2006 for 8.1 *) frozen_evars : ExistentialSet.t; (* Evars of this set are considered axioms and never instantiated *) (* Useful e.g. for autorewrite *) restrict_conv_on_strict_subterms : bool; (* No conversion at the root of the term; potentially useful for rewrite *) modulo_betaiota : bool; (* Support betaiota in the reduction *) (* Note that zeta is always used *) modulo_eta : bool; (* Support eta in the reduction *) allow_K_in_toplevel_higher_order_unification : bool (* This is used only in second/higher order unification when looking for *) (* subterms (rewrite and elim) *) } (* Default flag for unifying a type against a type (e.g. apply) *) (* We set all conversion flags (no flag should be modified anymore) *) let default_unify_flags = { modulo_conv_on_closed_terms = Some full_transparent_state; use_metas_eagerly_in_conv_on_closed_terms = true; modulo_delta = full_transparent_state; modulo_delta_types = full_transparent_state; modulo_delta_in_merge = None; check_applied_meta_types = true; resolve_evars = false; use_pattern_unification = true; use_meta_bound_pattern_unification = true; frozen_evars = ExistentialSet.empty; restrict_conv_on_strict_subterms = false; modulo_betaiota = true; modulo_eta = true; allow_K_in_toplevel_higher_order_unification = false (* in fact useless when not used in w_unify_to_subterm_list *) } let set_merge_flags flags = match flags.modulo_delta_in_merge with | None -> flags | Some ts -> { flags with modulo_delta = ts; modulo_conv_on_closed_terms = Some ts } (* Default flag for the "simple apply" version of unification of a *) (* type against a type (e.g. apply) *) (* We set only the flags available at the time the new "apply" extends *) (* out of "simple apply" *) let default_no_delta_unify_flags = { default_unify_flags with modulo_delta = empty_transparent_state; check_applied_meta_types = false; use_pattern_unification = false; use_meta_bound_pattern_unification = true; modulo_betaiota = false; } (* Default flags for looking for subterms in elimination tactics *) (* Not used in practice at the current date, to the exception of *) (* allow_K) because only closed terms are involved in *) (* induction/destruct/case/elim and w_unify_to_subterm_list does not *) (* call w_unify for induction/destruct/case/elim (13/6/2011) *) let elim_flags = { default_unify_flags with restrict_conv_on_strict_subterms = false; (* ? *) modulo_betaiota = false; allow_K_in_toplevel_higher_order_unification = true } let elim_no_delta_flags = { elim_flags with modulo_delta = empty_transparent_state; check_applied_meta_types = false; use_pattern_unification = false; } let set_no_head_reduction flags = { flags with restrict_conv_on_strict_subterms = true } let use_evars_pattern_unification flags = !global_evars_pattern_unification_flag && flags.use_pattern_unification && Flags.version_strictly_greater Flags.V8_2 let use_metas_pattern_unification flags nb l = !global_evars_pattern_unification_flag && flags.use_pattern_unification || (Flags.version_less_or_equal Flags.V8_3 || flags.use_meta_bound_pattern_unification) && array_for_all (fun c -> isRel c && destRel c <= nb) l let expand_key env = function | Some (ConstKey cst) -> constant_opt_value env cst | Some (VarKey id) -> (try named_body id env with Not_found -> None) | Some (RelKey _) -> None | None -> None let subterm_restriction is_subterm flags = not is_subterm && flags.restrict_conv_on_strict_subterms let key_of b flags f = if subterm_restriction b flags then None else match kind_of_term f with | Const cst when is_transparent (ConstKey cst) && Cpred.mem cst (snd flags.modulo_delta) -> Some (ConstKey cst) | Var id when is_transparent (VarKey id) && Idpred.mem id (fst flags.modulo_delta) -> Some (VarKey id) | _ -> None let oracle_order env cf1 cf2 = match cf1 with | None -> (match cf2 with | None -> None | Some k2 -> Some false) | Some k1 -> match cf2 with | None -> Some true | Some k2 -> Some (Conv_oracle.oracle_order false k1 k2) let do_reduce ts (env, nb) sigma c = let (t, stack') = whd_betaiota_deltazeta_for_iota_state ts env sigma (c, empty_stack) in let l = list_of_stack stack' in applist (t, l) let use_full_betaiota flags = flags.modulo_betaiota && Flags.version_strictly_greater Flags.V8_3 let isAllowedEvar flags c = match kind_of_term c with | Evar (evk,_) -> not (ExistentialSet.mem evk flags.frozen_evars) | _ -> false let check_compatibility env (sigma,metasubst,evarsubst) tyM tyN = match subst_defined_metas metasubst tyM with | None -> () | Some m -> match subst_defined_metas metasubst tyN with | None -> () | Some n -> if not (is_trans_fconv CONV full_transparent_state env sigma m n) && is_ground_term sigma m && is_ground_term sigma n then error_cannot_unify env sigma (m,n) let unify_0_with_initial_metas (sigma,ms,es as subst) conv_at_top env cv_pb flags m n = let rec unirec_rec (curenv,nb as curenvnb) pb b wt ((sigma,metasubst,evarsubst) as substn) curm curn = let cM = Evarutil.whd_head_evar sigma curm and cN = Evarutil.whd_head_evar sigma curn in match (kind_of_term cM,kind_of_term cN) with | Meta k1, Meta k2 -> if k1 = k2 then substn else let stM,stN = extract_instance_status pb in if wt && flags.check_applied_meta_types then (let tyM = Typing.meta_type sigma k1 in let tyN = Typing.meta_type sigma k2 in check_compatibility curenv substn tyM tyN); if k2 < k1 then sigma,(k1,cN,stN)::metasubst,evarsubst else sigma,(k2,cM,stM)::metasubst,evarsubst | Meta k, _ when not (dependent cM cN) (* helps early trying alternatives *) -> if wt && flags.check_applied_meta_types then (let tyM = Typing.meta_type sigma k in let tyN = get_type_of curenv sigma cN in check_compatibility curenv substn tyM tyN); (* Here we check that [cN] does not contain any local variables *) if nb = 0 then sigma,(k,cN,snd (extract_instance_status pb))::metasubst,evarsubst else if noccur_between 1 nb cN then (sigma, (k,lift (-nb) cN,snd (extract_instance_status pb))::metasubst, evarsubst) else error_cannot_unify_local curenv sigma (m,n,cN) | _, Meta k when not (dependent cN cM) (* helps early trying alternatives *) -> if wt && flags.check_applied_meta_types then (let tyM = get_type_of curenv sigma cM in let tyN = Typing.meta_type sigma k in check_compatibility curenv substn tyM tyN); (* Here we check that [cM] does not contain any local variables *) if nb = 0 then (sigma,(k,cM,fst (extract_instance_status pb))::metasubst,evarsubst) else if noccur_between 1 nb cM then (sigma,(k,lift (-nb) cM,fst (extract_instance_status pb))::metasubst, evarsubst) else error_cannot_unify_local curenv sigma (m,n,cM) | Evar (evk,_ as ev), _ when not (ExistentialSet.mem evk flags.frozen_evars) -> let cmvars = free_rels cM and cnvars = free_rels cN in if Intset.subset cnvars cmvars then sigma,metasubst,((curenv,ev,cN)::evarsubst) else error_cannot_unify_local curenv sigma (m,n,cN) | _, Evar (evk,_ as ev) when not (ExistentialSet.mem evk flags.frozen_evars) -> let cmvars = free_rels cM and cnvars = free_rels cN in if Intset.subset cmvars cnvars then sigma,metasubst,((curenv,ev,cM)::evarsubst) else error_cannot_unify_local curenv sigma (m,n,cN) | Sort s1, Sort s2 -> (try let sigma' = if cv_pb = CUMUL then Evd.set_leq_sort sigma s1 s2 else Evd.set_eq_sort sigma s1 s2 in (sigma', metasubst, evarsubst) with e when Errors.noncritical e -> error_cannot_unify curenv sigma (m,n)) | Lambda (na,t1,c1), Lambda (_,t2,c2) -> unirec_rec (push (na,t1) curenvnb) CONV true wt (unirec_rec curenvnb CONV true false substn t1 t2) c1 c2 | Prod (na,t1,c1), Prod (_,t2,c2) -> unirec_rec (push (na,t1) curenvnb) pb true false (unirec_rec curenvnb CONV true false substn t1 t2) c1 c2 | LetIn (_,a,_,c), _ -> unirec_rec curenvnb pb b wt substn (subst1 a c) cN | _, LetIn (_,a,_,c) -> unirec_rec curenvnb pb b wt substn cM (subst1 a c) (* eta-expansion *) | Lambda (na,t1,c1), _ when flags.modulo_eta -> unirec_rec (push (na,t1) curenvnb) CONV true wt substn c1 (mkApp (lift 1 cN,[|mkRel 1|])) | _, Lambda (na,t2,c2) when flags.modulo_eta -> unirec_rec (push (na,t2) curenvnb) CONV true wt substn (mkApp (lift 1 cM,[|mkRel 1|])) c2 | Case (_,p1,c1,cl1), Case (_,p2,c2,cl2) -> (try array_fold_left2 (unirec_rec curenvnb CONV true wt) (unirec_rec curenvnb CONV true false (unirec_rec curenvnb CONV true false substn p1 p2) c1 c2) cl1 cl2 with ex when precatchable_exception ex -> reduce curenvnb pb b wt substn cM cN) | App (f1,l1), _ when (isMeta f1 && use_metas_pattern_unification flags nb l1 || use_evars_pattern_unification flags && isAllowedEvar flags f1) -> (match is_unification_pattern curenvnb sigma f1 (Array.to_list l1) cN with | None -> (match kind_of_term cN with | App (f2,l2) -> unify_app curenvnb pb b substn cM f1 l1 cN f2 l2 | _ -> unify_not_same_head curenvnb pb b wt substn cM cN) | Some l -> solve_pattern_eqn_array curenvnb f1 l cN substn) | _, App (f2,l2) when (isMeta f2 && use_metas_pattern_unification flags nb l2 || use_evars_pattern_unification flags && isAllowedEvar flags f2) -> (match is_unification_pattern curenvnb sigma f2 (Array.to_list l2) cM with | None -> (match kind_of_term cM with | App (f1,l1) -> unify_app curenvnb pb b substn cM f1 l1 cN f2 l2 | _ -> unify_not_same_head curenvnb pb b wt substn cM cN) | Some l -> solve_pattern_eqn_array curenvnb f2 l cM substn) | App (f1,l1), App (f2,l2) -> unify_app curenvnb pb b substn cM f1 l1 cN f2 l2 | _ -> unify_not_same_head curenvnb pb b wt substn cM cN and unify_app curenvnb pb b substn cM f1 l1 cN f2 l2 = try let (f1,l1,f2,l2) = adjust_app_array_size f1 l1 f2 l2 in array_fold_left2 (unirec_rec curenvnb CONV true false) (unirec_rec curenvnb CONV true true substn f1 f2) l1 l2 with ex when precatchable_exception ex -> try reduce curenvnb pb b false substn cM cN with ex when precatchable_exception ex -> try expand curenvnb pb b false substn cM f1 l1 cN f2 l2 with ex when precatchable_exception ex -> canonical_projections curenvnb pb b cM cN substn and unify_not_same_head curenvnb pb b wt substn cM cN = try canonical_projections curenvnb pb b cM cN substn with ex when precatchable_exception ex -> if constr_cmp cv_pb cM cN then substn else try reduce curenvnb pb b wt substn cM cN with ex when precatchable_exception ex -> let (f1,l1) = match kind_of_term cM with App (f,l) -> (f,l) | _ -> (cM,[||]) in let (f2,l2) = match kind_of_term cN with App (f,l) -> (f,l) | _ -> (cN,[||]) in expand curenvnb pb b wt substn cM f1 l1 cN f2 l2 and reduce curenvnb pb b wt (sigma, metas, evars as substn) cM cN = if use_full_betaiota flags && not (subterm_restriction b flags) then let cM' = do_reduce flags.modulo_delta curenvnb sigma cM in if not (eq_constr cM cM') then unirec_rec curenvnb pb b wt substn cM' cN else let cN' = do_reduce flags.modulo_delta curenvnb sigma cN in if not (eq_constr cN cN') then unirec_rec curenvnb pb b wt substn cM cN' else error_cannot_unify (fst curenvnb) sigma (cM,cN) else error_cannot_unify (fst curenvnb) sigma (cM,cN) and expand (curenv,_ as curenvnb) pb b wt (sigma,metasubst,_ as substn) cM f1 l1 cN f2 l2 = if (* Try full conversion on meta-free terms. *) (* Back to 1995 (later on called trivial_unify in 2002), the heuristic was to apply conversion on meta-free (but not evar-free!) terms in all cases (i.e. for apply but also for auto and rewrite, even though auto and rewrite did not use modulo conversion in the rest of the unification algorithm). By compatibility we need to support this separately from the main unification algorithm *) (* The exploitation of known metas has been added in May 2007 (it is used by apply and rewrite); it might now be redundant with the support for delta-expansion (which is used essentially for apply)... *) not (subterm_restriction b flags) && match flags.modulo_conv_on_closed_terms with | None -> false | Some convflags -> let subst = if flags.use_metas_eagerly_in_conv_on_closed_terms then metasubst else ms in match subst_defined_metas subst cM with | None -> (* some undefined Metas in cM *) false | Some m1 -> match subst_defined_metas subst cN with | None -> (* some undefined Metas in cN *) false | Some n1 -> (* No subterm restriction there, too much incompatibilities *) if is_trans_fconv pb convflags env sigma m1 n1 then true else if is_ground_term sigma m1 && is_ground_term sigma n1 then error_cannot_unify curenv sigma (cM,cN) else false then substn else let cf1 = key_of b flags f1 and cf2 = key_of b flags f2 in match oracle_order curenv cf1 cf2 with | None -> error_cannot_unify curenv sigma (cM,cN) | Some true -> (match expand_key curenv cf1 with | Some c -> unirec_rec curenvnb pb b wt substn (whd_betaiotazeta sigma (mkApp(c,l1))) cN | None -> (match expand_key curenv cf2 with | Some c -> unirec_rec curenvnb pb b wt substn cM (whd_betaiotazeta sigma (mkApp(c,l2))) | None -> error_cannot_unify curenv sigma (cM,cN))) | Some false -> (match expand_key curenv cf2 with | Some c -> unirec_rec curenvnb pb b wt substn cM (whd_betaiotazeta sigma (mkApp(c,l2))) | None -> (match expand_key curenv cf1 with | Some c -> unirec_rec curenvnb pb b wt substn (whd_betaiotazeta sigma (mkApp(c,l1))) cN | None -> error_cannot_unify curenv sigma (cM,cN))) and canonical_projections curenvnb pb b cM cN (sigma,_,_ as substn) = let f1 () = if isApp cM then let f1l1 = decompose_app cM in if is_open_canonical_projection env sigma f1l1 then let f2l2 = decompose_app cN in solve_canonical_projection curenvnb pb b cM f1l1 cN f2l2 substn else error_cannot_unify (fst curenvnb) sigma (cM,cN) else error_cannot_unify (fst curenvnb) sigma (cM,cN) in if flags.modulo_conv_on_closed_terms = None || subterm_restriction b flags then error_cannot_unify (fst curenvnb) sigma (cM,cN) else try f1 () with e when precatchable_exception e -> if isApp cN then let f2l2 = decompose_app cN in if is_open_canonical_projection env sigma f2l2 then let f1l1 = decompose_app cM in solve_canonical_projection curenvnb pb b cN f2l2 cM f1l1 substn else error_cannot_unify (fst curenvnb) sigma (cM,cN) else error_cannot_unify (fst curenvnb) sigma (cM,cN) and solve_canonical_projection curenvnb pb b cM f1l1 cN f2l2 (sigma,ms,es) = let (c,bs,(params,params1),(us,us2),(ts,ts1),c1,(n,t2)) = try Evarconv.check_conv_record f1l1 f2l2 with Not_found -> error_cannot_unify (fst curenvnb) sigma (cM,cN) in let (evd,ks,_) = List.fold_left (fun (evd,ks,m) b -> if m=n then (evd,t2::ks, m-1) else let mv = new_meta () in let evd' = meta_declare mv (substl ks b) evd in (evd', mkMeta mv :: ks, m - 1)) (sigma,[],List.length bs - 1) bs in let unilist2 f substn l l' = try List.fold_left2 f substn l l' with Invalid_argument "List.fold_left2" -> error_cannot_unify (fst curenvnb) sigma (cM,cN) in let substn = unilist2 (fun s u1 u -> unirec_rec curenvnb pb b false s u1 (substl ks u)) (evd,ms,es) us2 us in let substn = unilist2 (fun s u1 u -> unirec_rec curenvnb pb b false s u1 (substl ks u)) substn params1 params in let substn = unilist2 (unirec_rec curenvnb pb b false) substn ts ts1 in unirec_rec curenvnb pb b false substn c1 (applist (c,(List.rev ks))) in let evd = sigma in if (if occur_meta_or_undefined_evar evd m || occur_meta_or_undefined_evar evd n || subterm_restriction conv_at_top flags then false else if (match flags.modulo_conv_on_closed_terms with | Some convflags -> is_trans_fconv cv_pb convflags env sigma m n | _ -> constr_cmp cv_pb m n) then true else if (match flags.modulo_conv_on_closed_terms, flags.modulo_delta with | Some (cv_id, cv_k), (dl_id, dl_k) -> Idpred.subset dl_id cv_id && Cpred.subset dl_k cv_k | None,(dl_id, dl_k) -> Idpred.is_empty dl_id && Cpred.is_empty dl_k) then error_cannot_unify env sigma (m, n) else false) then subst else unirec_rec (env,0) cv_pb conv_at_top false subst m n let unify_0 env sigma = unify_0_with_initial_metas (sigma,[],[]) true env let left = true let right = false let rec unify_with_eta keptside flags env sigma c1 c2 = (* Question: try whd_betadeltaiota on ci if not two lambdas? *) match kind_of_term c1, kind_of_term c2 with | (Lambda (na,t1,c1'), Lambda (_,t2,c2')) -> let env' = push_rel_assum (na,t1) env in let sigma,metas,evars = unify_0 env sigma CONV flags t1 t2 in let side,(sigma,metas',evars') = unify_with_eta keptside flags env' sigma c1' c2' in (side,(sigma,metas@metas',evars@evars')) | (Lambda (na,t,c1'),_)-> let env' = push_rel_assum (na,t) env in let side = left in (* expansion on the right: we keep the left side *) unify_with_eta side flags env' sigma c1' (mkApp (lift 1 c2,[|mkRel 1|])) | (_,Lambda (na,t,c2')) -> let env' = push_rel_assum (na,t) env in let side = right in (* expansion on the left: we keep the right side *) unify_with_eta side flags env' sigma (mkApp (lift 1 c1,[|mkRel 1|])) c2' | _ -> (keptside,unify_0 env sigma CONV flags c1 c2) (* We solved problems [?n =_pb u] (i.e. [u =_(opp pb) ?n]) and [?n =_pb' u'], we now compute the problem on [u =? u'] and decide which of u or u' is kept Rem: the upper constraint is lost in case u <= ?n <= u' (and symmetrically in the case u' <= ?n <= u) *) let merge_instances env sigma flags st1 st2 c1 c2 = match (opp_status st1, st2) with | (Conv, Conv) -> let side = left (* arbitrary choice, but agrees with compatibility *) in let (side,res) = unify_with_eta side flags env sigma c1 c2 in (side,Conv,res) | ((IsSubType | Conv as oppst1), (IsSubType | Conv)) -> let res = unify_0 env sigma CUMUL flags c2 c1 in if oppst1=st2 then (* arbitrary choice *) (left, st1, res) else if st2=IsSubType then (left, st1, res) else (right, st2, res) | ((IsSuperType | Conv as oppst1), (IsSuperType | Conv)) -> let res = unify_0 env sigma CUMUL flags c1 c2 in if oppst1=st2 then (* arbitrary choice *) (left, st1, res) else if st2=IsSuperType then (left, st1, res) else (right, st2, res) | (IsSuperType,IsSubType) -> (try (left, IsSubType, unify_0 env sigma CUMUL flags c2 c1) with e when Errors.noncritical e -> (right, IsSubType, unify_0 env sigma CUMUL flags c1 c2)) | (IsSubType,IsSuperType) -> (try (left, IsSuperType, unify_0 env sigma CUMUL flags c1 c2) with e when Errors.noncritical e -> (right, IsSuperType, unify_0 env sigma CUMUL flags c2 c1)) (* Unification * * Procedure: * (1) The function [unify mc wc M N] produces two lists: * (a) a list of bindings Meta->RHS * (b) a list of bindings EVAR->RHS * * The Meta->RHS bindings cannot themselves contain * meta-vars, so they get applied eagerly to the other * bindings. This may or may not close off all RHSs of * the EVARs. For each EVAR whose RHS is closed off, * we can just apply it, and go on. For each which * is not closed off, we need to do a mimick step - * in general, we have something like: * * ?X == (c e1 e2 ... ei[Meta(k)] ... en) * * so we need to do a mimick step, converting ?X * into * * ?X -> (c ?z1 ... ?zn) * * of the proper types. Then, we can decompose the * equation into * * ?z1 --> e1 * ... * ?zi --> ei[Meta(k)] * ... * ?zn --> en * * and keep on going. Whenever we find that a R.H.S. * is closed, we can, as before, apply the constraint * directly. Whenever we find an equation of the form: * * ?z -> Meta(n) * * we can reverse the equation, put it into our metavar * substitution, and keep going. * * The most efficient mimick possible is, for each * Meta-var remaining in the term, to declare a * new EVAR of the same type. This is supposedly * determinable from the clausale form context - * we look up the metavar, take its type there, * and apply the metavar substitution to it, to * close it off. But this might not always work, * since other metavars might also need to be resolved. *) let applyHead env evd n c = let rec apprec n c cty evd = if n = 0 then (evd, c) else match kind_of_term (whd_betadeltaiota env evd cty) with | Prod (_,c1,c2) -> let (evd',evar) = Evarutil.new_evar evd env ~src:(dummy_loc,GoalEvar) c1 in apprec (n-1) (mkApp(c,[|evar|])) (subst1 evar c2) evd' | _ -> error "Apply_Head_Then" in apprec n c (Typing.type_of env evd c) evd let is_mimick_head ts f = match kind_of_term f with | Const c -> not (Closure.is_transparent_constant ts c) | Var id -> not (Closure.is_transparent_variable ts id) | (Rel _|Construct _|Ind _) -> true | _ -> false let try_to_coerce env evd c cty tycon = let j = make_judge c cty in let (evd',j') = inh_conv_coerce_rigid_to dummy_loc env evd j tycon in let evd' = Evarconv.consider_remaining_unif_problems env evd' in let evd' = Evd.map_metas_fvalue (nf_evar evd') evd' in (evd',j'.uj_val) let w_coerce_to_type env evd c cty mvty = let evd,mvty = pose_all_metas_as_evars env evd mvty in let tycon = mk_tycon_type mvty in try try_to_coerce env evd c cty tycon with e when precatchable_exception e -> (* inh_conv_coerce_rigid_to should have reasoned modulo reduction but there are cases where it though it was not rigid (like in fst (nat,nat)) and stops while it could have seen that it is rigid *) let cty = Tacred.hnf_constr env evd cty in try_to_coerce env evd c cty tycon let w_coerce env evd mv c = let cty = get_type_of env evd c in let mvty = Typing.meta_type evd mv in w_coerce_to_type env evd c cty mvty let unify_to_type env sigma flags c status u = let c = refresh_universes c in let t = get_type_of env sigma c in let t = nf_betaiota sigma (nf_meta sigma t) in unify_0 env sigma CUMUL flags t u let unify_type env sigma flags mv status c = let mvty = Typing.meta_type sigma mv in let mvty = nf_meta sigma mvty in unify_to_type env sigma {flags with modulo_delta = flags.modulo_delta_types; modulo_conv_on_closed_terms = Some flags.modulo_delta_types; modulo_betaiota = true} c status mvty (* Move metas that may need coercion at the end of the list of instances *) let order_metas metas = let rec order latemetas = function | [] -> List.rev latemetas | (_,_,(status,to_type) as meta)::metas -> if to_type = CoerceToType then order (meta::latemetas) metas else meta :: order latemetas metas in order [] metas (* Solve an equation ?n[x1=u1..xn=un] = t where ?n is an evar *) let solve_simple_evar_eqn ts env evd ev rhs = let evd,b = solve_simple_eqn (Evarconv.evar_conv_x ts) env evd (None,ev,rhs) in if not b then error_cannot_unify env evd (mkEvar ev,rhs); Evarconv.consider_remaining_unif_problems env evd (* [w_merge env sigma b metas evars] merges common instances in metas or in evars, possibly generating new unification problems; if [b] is true, unification of types of metas is required *) let w_merge env with_types flags (evd,metas,evars) = let rec w_merge_rec evd metas evars eqns = (* Process evars *) match evars with | (curenv,(evk,_ as ev),rhs)::evars' -> if Evd.is_defined evd evk then let v = Evd.existential_value evd ev in let (evd,metas',evars'') = unify_0 curenv evd CONV (set_merge_flags flags) rhs v in w_merge_rec evd (metas'@metas) (evars''@evars') eqns else begin (* This can make rhs' ill-typed if metas are *) let rhs' = subst_meta_instances metas rhs in match kind_of_term rhs with | App (f,cl) when occur_meta rhs' -> if occur_evar evk rhs' then error_occur_check curenv evd evk rhs'; if is_mimick_head flags.modulo_delta f then let evd' = mimick_undefined_evar evd flags f (Array.length cl) evk in w_merge_rec evd' metas evars eqns else let evd', rhs'' = pose_all_metas_as_evars curenv evd rhs' in w_merge_rec (solve_simple_evar_eqn flags.modulo_delta_types curenv evd' ev rhs'') metas evars' eqns | _ -> let evd', rhs'' = pose_all_metas_as_evars curenv evd rhs' in w_merge_rec (solve_simple_evar_eqn flags.modulo_delta_types curenv evd' ev rhs'') metas evars' eqns end | [] -> (* Process metas *) match metas with | (mv,c,(status,to_type))::metas -> let ((evd,c),(metas'',evars'')),eqns = if with_types & to_type <> TypeProcessed then if to_type = CoerceToType then (* Some coercion may have to be inserted *) (w_coerce env evd mv c,([],[])),eqns else (* No coercion needed: delay the unification of types *) ((evd,c),([],[])),(mv,status,c)::eqns else ((evd,c),([],[])),eqns in if meta_defined evd mv then let {rebus=c'},(status',_) = meta_fvalue evd mv in let (take_left,st,(evd,metas',evars')) = merge_instances env evd flags status' status c' c in let evd' = if take_left then evd else meta_reassign mv (c,(st,TypeProcessed)) evd in w_merge_rec evd' (metas'@metas@metas'') (evars'@evars'') eqns else let evd' = if occur_meta_evd evd mv c then if isMetaOf mv (whd_betadeltaiota env evd c) then evd else error_cannot_unify env evd (mkMeta mv,c) else meta_assign mv (c,(status,TypeProcessed)) evd in w_merge_rec evd' (metas''@metas) evars'' eqns | [] -> (* Process type eqns *) let rec process_eqns failures = function | (mv,status,c)::eqns -> (match (try Inl (unify_type env evd flags mv status c) with e when Errors.noncritical e -> Inr e) with | Inr e -> process_eqns (((mv,status,c),e)::failures) eqns | Inl (evd,metas,evars) -> w_merge_rec evd metas evars (List.map fst failures @ eqns)) | [] -> (match failures with | [] -> evd | ((mv,status,c),e)::_ -> raise e) in process_eqns [] eqns and mimick_undefined_evar evd flags hdc nargs sp = let ev = Evd.find_undefined evd sp in let sp_env = Global.env_of_context ev.evar_hyps in let (evd', c) = applyHead sp_env evd nargs hdc in let (evd'',mc,ec) = unify_0 sp_env evd' CUMUL (set_merge_flags flags) (get_type_of sp_env evd' c) ev.evar_concl in let evd''' = w_merge_rec evd'' mc ec [] in if evd' == evd''' then Evd.define sp c evd''' else Evd.define sp (Evarutil.nf_evar evd''' c) evd''' in (* merge constraints *) w_merge_rec evd (order_metas metas) evars [] let w_unify_meta_types env ?(flags=default_unify_flags) evd = let metas,evd = retract_coercible_metas evd in w_merge env true flags (evd,metas,[]) (* [w_unify env evd M N] performs a unification of M and N, generating a bunch of unification constraints in the process. These constraints are processed, one-by-one - they may either generate new bindings, or, if there is already a binding, new unifications, which themselves generate new constraints. This continues until we get failure, or we run out of constraints. [clenv_typed_unify M N clenv] expects in addition that expected types of metavars are unifiable with the types of their instances *) let check_types env flags (sigma,_,_ as subst) m n = if isEvar_or_Meta (fst (whd_stack sigma m)) then unify_0_with_initial_metas subst true env CUMUL flags (get_type_of env sigma n) (get_type_of env sigma m) else if isEvar_or_Meta (fst (whd_stack sigma n)) then unify_0_with_initial_metas subst true env CUMUL flags (get_type_of env sigma m) (get_type_of env sigma n) else subst let try_resolve_typeclasses env evd flags m n = if flags.resolve_evars then try Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals ~split:false ~fail:true env evd with e when Typeclasses_errors.unsatisfiable_exception e -> error_cannot_unify env evd (m, n) else evd let w_unify_core_0 env evd with_types cv_pb flags m n = let (mc1,evd') = retract_coercible_metas evd in let (sigma,ms,es) = check_types env flags (evd,mc1,[]) m n in let subst2 = unify_0_with_initial_metas (evd',ms,es) false env cv_pb flags m n in let evd = w_merge env with_types flags subst2 in try_resolve_typeclasses env evd flags m n let w_unify_0 env evd = w_unify_core_0 env evd false let w_typed_unify env evd = w_unify_core_0 env evd true let w_typed_unify_list env evd flags f1 l1 f2 l2 = let flags' = { flags with resolve_evars = false } in let f1,l1,f2,l2 = adjust_app_list_size f1 l1 f2 l2 in let (mc1,evd') = retract_coercible_metas evd in let subst = List.fold_left2 (fun subst m n -> unify_0_with_initial_metas subst true env CONV flags' m n) (evd',[],[]) (f1::l1) (f2::l2) in let evd = w_merge env true flags subst in try_resolve_typeclasses env evd flags (applist(f1,l1)) (applist(f2,l2)) (* takes a substitution s, an open term op and a closed term cl try to find a subterm of cl which matches op, if op is just a Meta FAIL because we cannot find a binding *) let iter_fail f a = let n = Array.length a in let rec ffail i = if i = n then error "iter_fail" else try f a.(i) with ex when precatchable_exception ex -> ffail (i+1) in ffail 0 (* Tries to find an instance of term [cl] in term [op]. Unifies [cl] to every subterm of [op] until it finds a match. Fails if no match is found *) let w_unify_to_subterm env evd ?(flags=default_unify_flags) (op,cl) = let rec matchrec cl = let cl = strip_outer_cast cl in (try if closed0 cl && not (isEvar cl) then w_typed_unify env evd CONV flags op cl,cl else error "Bound 1" with ex when precatchable_exception ex -> (match kind_of_term cl with | App (f,args) -> let n = Array.length args in assert (n>0); let c1 = mkApp (f,Array.sub args 0 (n-1)) in let c2 = args.(n-1) in (try matchrec c1 with ex when precatchable_exception ex -> matchrec c2) | Case(_,_,c,lf) -> (* does not search in the predicate *) (try matchrec c with ex when precatchable_exception ex -> iter_fail matchrec lf) | LetIn(_,c1,_,c2) -> (try matchrec c1 with ex when precatchable_exception ex -> matchrec c2) | Fix(_,(_,types,terms)) -> (try iter_fail matchrec types with ex when precatchable_exception ex -> iter_fail matchrec terms) | CoFix(_,(_,types,terms)) -> (try iter_fail matchrec types with ex when precatchable_exception ex -> iter_fail matchrec terms) | Prod (_,t,c) -> (try matchrec t with ex when precatchable_exception ex -> matchrec c) | Lambda (_,t,c) -> (try matchrec t with ex when precatchable_exception ex -> matchrec c) | _ -> error "Match_subterm")) in try matchrec cl with ex when precatchable_exception ex -> raise (PretypeError (env,evd,NoOccurrenceFound (op, None))) (* Tries to find all instances of term [cl] in term [op]. Unifies [cl] to every subterm of [op] and return all the matches. Fails if no match is found *) let w_unify_to_subterm_all env evd ?(flags=default_unify_flags) (op,cl) = let return a b = let (evd,c as a) = a () in if List.exists (fun (evd',c') -> eq_constr c c') b then b else a :: b in let fail str _ = error str in let bind f g a = let a1 = try f a with ex when precatchable_exception ex -> a in try g a1 with ex when precatchable_exception ex -> a1 in let bind_iter f a = let n = Array.length a in let rec ffail i = if i = n then fun a -> a else bind (f a.(i)) (ffail (i+1)) in ffail 0 in let rec matchrec cl = let cl = strip_outer_cast cl in (bind (if closed0 cl then return (fun () -> w_typed_unify env evd CONV flags op cl,cl) else fail "Bound 1") (match kind_of_term cl with | App (f,args) -> let n = Array.length args in assert (n>0); let c1 = mkApp (f,Array.sub args 0 (n-1)) in let c2 = args.(n-1) in bind (matchrec c1) (matchrec c2) | Case(_,_,c,lf) -> (* does not search in the predicate *) bind (matchrec c) (bind_iter matchrec lf) | LetIn(_,c1,_,c2) -> bind (matchrec c1) (matchrec c2) | Fix(_,(_,types,terms)) -> bind (bind_iter matchrec types) (bind_iter matchrec terms) | CoFix(_,(_,types,terms)) -> bind (bind_iter matchrec types) (bind_iter matchrec terms) | Prod (_,t,c) -> bind (matchrec t) (matchrec c) | Lambda (_,t,c) -> bind (matchrec t) (matchrec c) | _ -> fail "Match_subterm")) in let res = matchrec cl [] in if res = [] then raise (PretypeError (env,evd,NoOccurrenceFound (op, None))) else res let w_unify_to_subterm_list env evd flags hdmeta oplist t = List.fold_right (fun op (evd,l) -> let op = whd_meta evd op in if isMeta op then if flags.allow_K_in_toplevel_higher_order_unification then (evd,op::l) else error_abstraction_over_meta env evd hdmeta (destMeta op) else if occur_meta_or_existential op then let (evd',cl) = try (* This is up to delta for subterms w/o metas ... *) w_unify_to_subterm env evd ~flags (strip_outer_cast op,t) with PretypeError (env,_,NoOccurrenceFound _) when flags.allow_K_in_toplevel_higher_order_unification -> (evd,op) in if not flags.allow_K_in_toplevel_higher_order_unification && (* ensure we found a different instance *) List.exists (fun op -> eq_constr op cl) l then error_non_linear_unification env evd hdmeta cl else (evd',cl::l) else if flags.allow_K_in_toplevel_higher_order_unification or dependent op t then (evd,op::l) else (* This is not up to delta ... *) raise (PretypeError (env,evd,NoOccurrenceFound (op, None)))) oplist (evd,[]) let secondOrderAbstraction env evd flags typ (p, oplist) = (* Remove delta when looking for a subterm *) let flags = { flags with modulo_delta = (fst flags.modulo_delta, Cpred.empty) } in let (evd',cllist) = w_unify_to_subterm_list env evd flags p oplist typ in let typp = Typing.meta_type evd' p in let pred = abstract_list_all env evd' typp typ cllist in w_merge env false flags (evd',[p,pred,(Conv,TypeProcessed)],[]) let secondOrderDependentAbstraction env evd flags typ (p, oplist) = let typp = Typing.meta_type evd p in let pred = abstract_list_all_with_dependencies env evd typp typ oplist in w_merge env false flags (evd,[p,pred,(Conv,TypeProcessed)],[]) let secondOrderAbstractionAlgo dep = if dep then secondOrderDependentAbstraction else secondOrderAbstraction let w_unify2 env evd flags dep cv_pb ty1 ty2 = let c1, oplist1 = whd_stack evd ty1 in let c2, oplist2 = whd_stack evd ty2 in match kind_of_term c1, kind_of_term c2 with | Meta p1, _ -> (* Find the predicate *) secondOrderAbstractionAlgo dep env evd flags ty2 (p1,oplist1) | _, Meta p2 -> (* Find the predicate *) secondOrderAbstractionAlgo dep env evd flags ty1 (p2, oplist2) | _ -> error "w_unify2" (* The unique unification algorithm works like this: If the pattern is flexible, and the goal has a lambda-abstraction at the head, then we do a first-order unification. If the pattern is not flexible, then we do a first-order unification, too. If the pattern is flexible, and the goal doesn't have a lambda-abstraction head, then we second-order unification. *) (* We decide here if first-order or second-order unif is used for Apply *) (* We apply a term of type (ai:Ai)C and try to solve a goal C' *) (* The type C is in clenv.templtyp.rebus with a lot of Meta to solve *) (* 3-4-99 [HH] New fo/so choice heuristic : In case we have to unify (Meta(1) args) with ([x:A]t args') we first try second-order unification and if it fails first-order. Before, second-order was used if the type of Meta(1) and [x:A]t was convertible and first-order otherwise. But if failed if e.g. the type of Meta(1) had meta-variables in it. *) let w_unify env evd cv_pb ?(flags=default_unify_flags) ty1 ty2 = let hd1,l1 = whd_stack evd ty1 in let hd2,l2 = whd_stack evd ty2 in match kind_of_term hd1, l1<>[], kind_of_term hd2, l2<>[] with (* Pattern case *) | (Meta _, true, Lambda _, _ | Lambda _, _, Meta _, true) when List.length l1 = List.length l2 -> (try w_typed_unify_list env evd flags hd1 l1 hd2 l2 with ex when precatchable_exception ex -> try w_unify2 env evd flags false cv_pb ty1 ty2 with PretypeError (env,_,NoOccurrenceFound _) as e -> raise e) (* Second order case *) | (Meta _, true, _, _ | _, _, Meta _, true) -> (try w_unify2 env evd flags false cv_pb ty1 ty2 with PretypeError (env,_,NoOccurrenceFound _) as e -> raise e | ex when precatchable_exception ex -> try w_typed_unify_list env evd flags hd1 l1 hd2 l2 with ex' when precatchable_exception ex' -> (* Last chance, use pattern-matching with typed dependencies (done late for compatibility) *) try w_unify2 env evd flags true cv_pb ty1 ty2 with ex' when precatchable_exception ex' -> raise ex) (* General case: try first order *) | _ -> w_typed_unify env evd cv_pb flags ty1 ty2 coq-8.4pl2/pretyping/typeclasses.mli0000640000175000001440000001103212010532755016707 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* instance list val typeclasses : unit -> typeclass list val all_instances : unit -> instance list val add_class : typeclass -> unit val add_constant_class : constant -> unit val add_inductive_class : inductive -> unit val new_instance : typeclass -> int option -> bool -> global_reference -> instance val add_instance : instance -> unit val remove_instance : instance -> unit val class_info : global_reference -> typeclass (** raises a UserError if not a class *) (** These raise a UserError if not a class. *) val dest_class_app : env -> constr -> typeclass * constr list (** Just return None if not a class *) val class_of_constr : constr -> (rel_context * (typeclass * constr list)) option val instance_impl : instance -> global_reference val is_class : global_reference -> bool val is_instance : global_reference -> bool (** Returns the term and type for the given instance of the parameters and fields of the type class. *) val instance_constructor : typeclass -> constr list -> constr option * types (** Resolvability. Only undefined evars could be marked or checked for resolvability. *) val is_resolvable : evar_info -> bool val mark_unresolvable : evar_info -> evar_info val mark_resolvable : evar_info -> evar_info val mark_unresolvables : evar_map -> evar_map val is_class_evar : evar_map -> evar_info -> bool (** Filter which evars to consider for resolution. *) type evar_filter = hole_kind -> bool val no_goals : evar_filter val all_evars : evar_filter val resolve_typeclasses : ?filter:evar_filter -> ?split:bool -> ?fail:bool -> env -> evar_map -> evar_map val resolve_one_typeclass : env -> evar_map -> types -> open_constr val register_set_typeclass_transparency : (evaluable_global_reference -> bool (*local?*) -> bool -> unit) -> unit val set_typeclass_transparency : evaluable_global_reference -> bool -> bool -> unit val register_classes_transparent_state : (unit -> transparent_state) -> unit val classes_transparent_state : unit -> transparent_state val register_add_instance_hint : (constr -> bool (* local? *) -> int option -> unit) -> unit val register_remove_instance_hint : (global_reference -> unit) -> unit val add_instance_hint : constr -> bool -> int option -> unit val remove_instance_hint : global_reference -> unit val solve_instanciations_problem : (env -> evar_map -> evar_filter -> bool -> bool -> evar_map) ref val solve_instanciation_problem : (env -> evar_map -> types -> open_constr) ref val declare_instance : int option -> bool -> global_reference -> unit (** Build the subinstances hints for a given typeclass object. check tells if we should check for existence of the subinstances and add only the missing ones. *) val build_subclasses : check:bool -> env -> evar_map -> global_reference -> int option (* priority *) -> (int option * constr) list coq-8.4pl2/pretyping/retyping.mli0000640000175000001440000000323612010532755016220 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ?refresh:bool -> env -> evar_map -> constr -> types val get_sort_of : ?polyprop:bool -> env -> evar_map -> types -> sorts val get_sort_family_of : ?polyprop:bool -> env -> evar_map -> types -> sorts_family (** Makes an assumption from a constr *) val get_assumption_of : env -> evar_map -> constr -> types (** Makes an unsafe judgment from a constr *) val get_judgment_of : env -> evar_map -> constr -> unsafe_judgment val type_of_global_reference_knowing_parameters : env -> evar_map -> constr -> constr array -> types val type_of_global_reference_knowing_conclusion : env -> evar_map -> constr -> types -> types coq-8.4pl2/pretyping/typing.ml0000640000175000001440000002400112064012255015506 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* anomaly ("unknown meta ?"^Nameops.string_of_meta mv) in meta_instance evd ty let constant_type_knowing_parameters env cst jl = let paramstyp = Array.map (fun j -> j.uj_type) jl in type_of_constant_knowing_parameters env (constant_type env cst) paramstyp let inductive_type_knowing_parameters env ind jl = let (mib,mip) = lookup_mind_specif env ind in let paramstyp = Array.map (fun j -> j.uj_type) jl in Inductive.type_of_inductive_knowing_parameters env mip paramstyp let e_type_judgment env evdref j = match kind_of_term (whd_betadeltaiota env !evdref j.uj_type) with | Sort s -> {utj_val = j.uj_val; utj_type = s } | Evar ev -> let (evd,s) = Evarutil.define_evar_as_sort !evdref ev in evdref := evd; { utj_val = j.uj_val; utj_type = s } | _ -> error_not_type env j let e_assumption_of_judgment env evdref j = try (e_type_judgment env evdref j).utj_val with TypeError _ -> error_assumption env j let e_judge_of_apply env evdref funj argjv = let rec apply_rec n typ = function | [] -> { uj_val = mkApp (j_val funj, Array.map j_val argjv); uj_type = typ } | hj::restjl -> match kind_of_term (whd_betadeltaiota env !evdref typ) with | Prod (_,c1,c2) -> if Evarconv.e_cumul env evdref hj.uj_type c1 then apply_rec (n+1) (subst1 hj.uj_val c2) restjl else error_cant_apply_bad_type env (n,c1, hj.uj_type) funj argjv | Evar ev -> let (evd',t) = Evarutil.define_evar_as_product !evdref ev in evdref := evd'; let (_,_,c2) = destProd t in apply_rec (n+1) (subst1 hj.uj_val c2) restjl | _ -> error_cant_apply_not_functional env funj argjv in apply_rec 1 funj.uj_type (Array.to_list argjv) let e_check_branch_types env evdref ind cj (lfj,explft) = if Array.length lfj <> Array.length explft then error_number_branches env cj (Array.length explft); for i = 0 to Array.length explft - 1 do if not (Evarconv.e_cumul env evdref lfj.(i).uj_type explft.(i)) then error_ill_formed_branch env cj.uj_val (ind,i+1) lfj.(i).uj_type explft.(i) done let rec max_sort l = if List.mem InType l then InType else if List.mem InSet l then InSet else InProp let e_is_correct_arity env evdref c pj ind specif params = let arsign = make_arity_signature env true (make_ind_family (ind,params)) in let allowed_sorts = elim_sorts specif in let error () = error_elim_arity env ind allowed_sorts c pj None in let rec srec env pt ar = let pt' = whd_betadeltaiota env !evdref pt in match kind_of_term pt', ar with | Prod (na1,a1,t), (_,None,a1')::ar' -> if not (Evarconv.e_cumul env evdref a1 a1') then error (); srec (push_rel (na1,None,a1) env) t ar' | Sort s, [] -> if not (List.mem (family_of_sort s) allowed_sorts) then error () | Evar (ev,_), [] -> let s = Termops.new_sort_in_family (max_sort allowed_sorts) in evdref := Evd.define ev (mkSort s) !evdref | _, (_,Some _,_ as d)::ar' -> srec (push_rel d env) (lift 1 pt') ar' | _ -> error () in srec env pj.uj_type (List.rev arsign) let e_type_case_branches env evdref (ind,largs) pj c = let specif = lookup_mind_specif env ind in let nparams = inductive_params specif in let (params,realargs) = list_chop nparams largs in let p = pj.uj_val in let univ = e_is_correct_arity env evdref c pj ind specif params in let lc = build_branches_type ind specif params p in let n = (snd specif).Declarations.mind_nrealargs_ctxt in let ty = whd_betaiota !evdref (Reduction.betazeta_appvect (n+1) p (Array.of_list (realargs@[c]))) in (lc, ty, univ) let e_judge_of_case env evdref ci pj cj lfj = let indspec = try find_mrectype env !evdref cj.uj_type with Not_found -> error_case_not_inductive env cj in let _ = check_case_info env (fst indspec) ci in let (bty,rslty,univ) = e_type_case_branches env evdref indspec pj cj.uj_val in e_check_branch_types env evdref (fst indspec) cj (lfj,bty); { uj_val = mkCase (ci, pj.uj_val, cj.uj_val, Array.map j_val lfj); uj_type = rslty } let check_allowed_sort env sigma ind c p = let pj = Retyping.get_judgment_of env sigma p in let ksort = family_of_sort (sort_of_arity env sigma pj.uj_type) in let specif = Global.lookup_inductive ind in let sorts = elim_sorts specif in if not (List.exists ((=) ksort) sorts) then let s = inductive_sort_family (snd specif) in error_elim_arity env ind sorts c pj (Some(ksort,s,error_elim_explain ksort s)) let e_judge_of_cast env evdref cj k tj = let expected_type = tj.utj_val in if not (Evarconv.e_cumul env evdref cj.uj_type expected_type) then error_actual_type env cj expected_type; { uj_val = mkCast (cj.uj_val, k, expected_type); uj_type = expected_type } (* The typing machine without information, without universes but with existential variables. *) (* cstr must be in n.f. w.r.t. evars and execute returns a judgement where both the term and type are in n.f. *) let rec execute env evdref cstr = match kind_of_term cstr with | Meta n -> { uj_val = cstr; uj_type = meta_type !evdref n } | Evar ev -> let ty = Evd.existential_type !evdref ev in let jty = execute env evdref (whd_evar !evdref ty) in let jty = e_assumption_of_judgment env evdref jty in { uj_val = cstr; uj_type = jty } | Rel n -> judge_of_relative env n | Var id -> judge_of_variable env id | Const c -> make_judge cstr (rename_type_of_constant env c) | Ind ind -> make_judge cstr (rename_type_of_inductive env ind) | Construct cstruct -> make_judge cstr (rename_type_of_constructor env cstruct) | Case (ci,p,c,lf) -> let cj = execute env evdref c in let pj = execute env evdref p in let lfj = execute_array env evdref lf in e_judge_of_case env evdref ci pj cj lfj | Fix ((vn,i as vni),recdef) -> let (_,tys,_ as recdef') = execute_recdef env evdref recdef in let fix = (vni,recdef') in check_fix env fix; make_judge (mkFix fix) tys.(i) | CoFix (i,recdef) -> let (_,tys,_ as recdef') = execute_recdef env evdref recdef in let cofix = (i,recdef') in check_cofix env cofix; make_judge (mkCoFix cofix) tys.(i) | Sort (Prop c) -> judge_of_prop_contents c | Sort (Type u) -> judge_of_type u | App (f,args) -> let jl = execute_array env evdref args in let j = match kind_of_term f with | Ind ind -> (* Sort-polymorphism of inductive types *) make_judge f (inductive_type_knowing_parameters env ind (jv_nf_evar !evdref jl)) | Const cst -> (* Sort-polymorphism of inductive types *) make_judge f (constant_type_knowing_parameters env cst (jv_nf_evar !evdref jl)) | _ -> execute env evdref f in e_judge_of_apply env evdref j jl | Lambda (name,c1,c2) -> let j = execute env evdref c1 in let var = e_type_judgment env evdref j in let env1 = push_rel (name,None,var.utj_val) env in let j' = execute env1 evdref c2 in judge_of_abstraction env1 name var j' | Prod (name,c1,c2) -> let j = execute env evdref c1 in let varj = e_type_judgment env evdref j in let env1 = push_rel (name,None,varj.utj_val) env in let j' = execute env1 evdref c2 in let varj' = e_type_judgment env1 evdref j' in judge_of_product env name varj varj' | LetIn (name,c1,c2,c3) -> let j1 = execute env evdref c1 in let j2 = execute env evdref c2 in let j2 = e_type_judgment env evdref j2 in let _ = judge_of_cast env j1 DEFAULTcast j2 in let env1 = push_rel (name,Some j1.uj_val,j2.utj_val) env in let j3 = execute env1 evdref c3 in judge_of_letin env name j1 j2 j3 | Cast (c,k,t) -> let cj = execute env evdref c in let tj = execute env evdref t in let tj = e_type_judgment env evdref tj in e_judge_of_cast env evdref cj k tj and execute_recdef env evdref (names,lar,vdef) = let larj = execute_array env evdref lar in let lara = Array.map (e_assumption_of_judgment env evdref) larj in let env1 = push_rec_types (names,lara,vdef) env in let vdefj = execute_array env1 evdref vdef in let vdefv = Array.map j_val vdefj in let _ = type_fixpoint env1 names lara vdefj in (names,lara,vdefv) and execute_array env evdref = Array.map (execute env evdref) let check env evd c t = let evdref = ref evd in let j = execute env evdref c in if not (Evarconv.e_cumul env evdref j.uj_type t) then error_actual_type env j (nf_evar evd t) (* Type of a constr *) let type_of env evd c = let j = execute env (ref evd) c in (* We are outside the kernel: we take fresh universes *) (* to avoid tactics and co to refresh universes themselves *) Termops.refresh_universes j.uj_type (* Sort of a type *) let sort_of env evd c = let evdref = ref evd in let j = execute env evdref c in let a = e_type_judgment env evdref j in a.utj_type (* Try to solve the existential variables by typing *) let e_type_of env evd c = let evdref = ref evd in let j = execute env evdref c in (* side-effect on evdref *) !evdref, Termops.refresh_universes j.uj_type let solve_evars env evd c = let evdref = ref evd in let c = (execute env evdref c).uj_val in (* side-effect on evdref *) !evdref, nf_evar !evdref c let _ = Evarconv.set_solve_evars solve_evars coq-8.4pl2/pretyping/vnorm.mli0000640000175000001440000000121712010532755015515 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> types -> constr coq-8.4pl2/pretyping/tacred.mli0000640000175000001440000000721412010532755015621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evaluable_global_reference -> bool val error_not_evaluable : Libnames.global_reference -> 'a val evaluable_of_global_reference : Environ.env -> Libnames.global_reference -> evaluable_global_reference val global_of_evaluable_reference : evaluable_global_reference -> Libnames.global_reference exception Redelimination (** Red (raise user error if nothing reducible) *) val red_product : reduction_function (** Red (raise Redelimination if nothing reducible) *) val try_red_product : reduction_function (** Tune the behaviour of simpl for the given constant name *) type simpl_flag = [ `SimplDontExposeCase | `SimplNeverUnfold ] val set_simpl_behaviour : bool -> global_reference -> (int list * int * simpl_flag list) -> unit val get_simpl_behaviour : global_reference -> (int list * int * simpl_flag list) option (** Simpl *) val simpl : reduction_function (** Simpl only at the head *) val whd_simpl : reduction_function (** Hnf: like whd_simpl but force delta-reduction of constants that do not immediately hide a non reducible fix or cofix *) val hnf_constr : reduction_function (** Unfold *) val unfoldn : (occurrences * evaluable_global_reference) list -> reduction_function (** Fold *) val fold_commands : constr list -> reduction_function (** Pattern *) val pattern_occs : (occurrences * constr) list -> reduction_function (** Rem: Lazy strategies are defined in Reduction *) (** Call by value strategy (uses Closures) *) val cbv_norm_flags : Closure.RedFlags.reds -> reduction_function val cbv_beta : local_reduction_function val cbv_betaiota : local_reduction_function val cbv_betadeltaiota : reduction_function val compute : reduction_function (** = [cbv_betadeltaiota] *) (** [reduce_to_atomic_ind env sigma t] puts [t] in the form [t'=(I args)] with [I] an inductive definition; returns [I] and [t'] or fails with a user error *) val reduce_to_atomic_ind : env -> evar_map -> types -> inductive * types (** [reduce_to_quantified_ind env sigma t] puts [t] in the form [t'=(x1:A1)..(xn:An)(I args)] with [I] an inductive definition; returns [I] and [t'] or fails with a user error *) val reduce_to_quantified_ind : env -> evar_map -> types -> inductive * types (** [reduce_to_quantified_ref env sigma ref t] try to put [t] in the form [t'=(x1:A1)..(xn:An)(ref args)] and fails with user error if not possible *) val reduce_to_quantified_ref : env -> evar_map -> global_reference -> types -> types val reduce_to_atomic_ref : env -> evar_map -> global_reference -> types -> types val find_hnf_rectype : env -> evar_map -> types -> inductive * constr list val contextually : bool -> occurrences * constr_pattern -> (patvar_map -> reduction_function) -> reduction_function coq-8.4pl2/pretyping/inductiveops.ml0000640000175000001440000003731712122366173016734 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* match dest_recarg ra with | Mrec (_,i) -> List.mem i listind | _ -> false) rvec in array_exists one_is_rec (dest_subterms rarg) let mis_is_recursive (ind,mib,mip) = mis_is_recursive_subset (interval 0 (mib.mind_ntypes-1)) mip.mind_recargs let mis_nf_constructor_type (ind,mib,mip) j = let specif = mip.mind_nf_lc and ntypes = mib.mind_ntypes and nconstr = Array.length mip.mind_consnames in let make_Ik k = mkInd ((fst ind),ntypes-k-1) in if j > nconstr then error "Not enough constructors in the type."; substl (list_tabulate make_Ik ntypes) specif.(j-1) (* Arity of constructors excluding parameters and local defs *) let mis_constr_nargs indsp = let (mib,mip) = Global.lookup_inductive indsp in let recargs = dest_subterms mip.mind_recargs in Array.map List.length recargs let mis_constr_nargs_env env (kn,i) = let mib = Environ.lookup_mind kn env in let mip = mib.mind_packets.(i) in let recargs = dest_subterms mip.mind_recargs in Array.map List.length recargs let mis_constructor_nargs_env env ((kn,i),j) = let mib = Environ.lookup_mind kn env in let mip = mib.mind_packets.(i) in recarg_length mip.mind_recargs j + mib.mind_nparams let constructor_nrealargs env (ind,j) = let (_,mip) = Inductive.lookup_mind_specif env ind in recarg_length mip.mind_recargs j let constructor_nrealhyps env (ind,j) = let (mib,mip) = Inductive.lookup_mind_specif env ind in mip.mind_consnrealdecls.(j-1) let get_full_arity_sign env ind = let (mib,mip) = Inductive.lookup_mind_specif env ind in mip.mind_arity_ctxt let nconstructors ind = let (mib,mip) = Inductive.lookup_mind_specif (Global.env()) ind in Array.length mip.mind_consnames (* Length of arity (w/o local defs) *) let inductive_nargs env ind = let (mib,mip) = Inductive.lookup_mind_specif env ind in (rel_context_length (mib.mind_params_ctxt), mip.mind_nrealargs_ctxt) let allowed_sorts env (kn,i as ind) = let (mib,mip) = Inductive.lookup_mind_specif env ind in mip.mind_kelim (* Annotation for cases *) let make_case_info env ind style = let (mib,mip) = Inductive.lookup_mind_specif env ind in let print_info = { ind_nargs = mip.mind_nrealargs_ctxt; style = style } in { ci_ind = ind; ci_npar = mib.mind_nparams; ci_cstr_ndecls = mip.mind_consnrealdecls; ci_pp_info = print_info } (*s Useful functions *) type constructor_summary = { cs_cstr : constructor; cs_params : constr list; cs_nargs : int; cs_args : rel_context; cs_concl_realargs : constr array } let lift_constructor n cs = { cs_cstr = cs.cs_cstr; cs_params = List.map (lift n) cs.cs_params; cs_nargs = cs.cs_nargs; cs_args = lift_rel_context n cs.cs_args; cs_concl_realargs = Array.map (liftn n (cs.cs_nargs+1)) cs.cs_concl_realargs } (* Accept less parameters than in the signature *) let instantiate_params t args sign = let rec inst s t = function | ((_,None,_)::ctxt,a::args) -> (match kind_of_term t with | Prod(_,_,t) -> inst (a::s) t (ctxt,args) | _ -> anomaly"instantiate_params: type, ctxt and args mismatch") | ((_,(Some b),_)::ctxt,args) -> (match kind_of_term t with | LetIn(_,_,_,t) -> inst ((substl s b)::s) t (ctxt,args) | _ -> anomaly"instantiate_params: type, ctxt and args mismatch") | _, [] -> substl s t | _ -> anomaly"instantiate_params: type, ctxt and args mismatch" in inst [] t (List.rev sign,args) let get_constructor (ind,mib,mip,params) j = assert (j <= Array.length mip.mind_consnames); let typi = mis_nf_constructor_type (ind,mib,mip) j in let typi = instantiate_params typi params mib.mind_params_ctxt in let (args,ccl) = decompose_prod_assum typi in let (_,allargs) = decompose_app ccl in let vargs = list_skipn (List.length params) allargs in { cs_cstr = ith_constructor_of_inductive ind j; cs_params = params; cs_nargs = rel_context_length args; cs_args = args; cs_concl_realargs = Array.of_list vargs } let get_constructors env (ind,params) = let (mib,mip) = Inductive.lookup_mind_specif env ind in Array.init (Array.length mip.mind_consnames) (fun j -> get_constructor (ind,mib,mip,params) (j+1)) (* substitution in a signature *) let substnl_rel_context subst n sign = let rec aux n = function | d::sign -> substnl_decl subst n d :: aux (n+1) sign | [] -> [] in List.rev (aux n (List.rev sign)) let substl_rel_context subst = substnl_rel_context subst 0 let rec instantiate_context sign args = let rec aux subst = function | (_,None,_)::sign, a::args -> aux (a::subst) (sign,args) | (_,Some b,_)::sign, args -> aux (substl subst b::subst) (sign,args) | [], [] -> subst | _ -> anomaly "Signature/instance mismatch in inductive family" in aux [] (List.rev sign,args) let get_arity env (ind,params) = let (mib,mip) = Inductive.lookup_mind_specif env ind in let parsign = (* Dynamically detect if called with an instance of recursively uniform parameter only or also of non recursively uniform parameters *) let parsign = mib.mind_params_ctxt in let nnonrecparams = mib.mind_nparams - mib.mind_nparams_rec in if List.length params = rel_context_nhyps parsign - nnonrecparams then snd (list_chop nnonrecparams mib.mind_params_ctxt) else parsign in let arproperlength = List.length mip.mind_arity_ctxt - List.length parsign in let arsign,_ = list_chop arproperlength mip.mind_arity_ctxt in let subst = instantiate_context parsign params in (substl_rel_context subst arsign, Inductive.inductive_sort_family mip) (* Functions to build standard types related to inductive *) let build_dependent_constructor cs = applist (mkConstruct cs.cs_cstr, (List.map (lift cs.cs_nargs) cs.cs_params) @(extended_rel_list 0 cs.cs_args)) let build_dependent_inductive env ((ind, params) as indf) = let arsign,_ = get_arity env indf in let nrealargs = List.length arsign in applist (mkInd ind, (List.map (lift nrealargs) params)@(extended_rel_list 0 arsign)) (* builds the arity of an elimination predicate in sort [s] *) let make_arity_signature env dep indf = let (arsign,_) = get_arity env indf in if dep then (* We need names everywhere *) name_context env ((Anonymous,None,build_dependent_inductive env indf)::arsign) (* Costly: would be better to name once for all at definition time *) else (* No need to enforce names *) arsign let make_arity env dep indf s = mkArity (make_arity_signature env dep indf, s) (* [p] is the predicate and [cs] a constructor summary *) let build_branch_type env dep p cs = let base = appvect (lift cs.cs_nargs p, cs.cs_concl_realargs) in if dep then it_mkProd_or_LetIn_name env (applist (base,[build_dependent_constructor cs])) cs.cs_args else it_mkProd_or_LetIn base cs.cs_args (**************************************************) let extract_mrectype t = let (t, l) = decompose_app t in match kind_of_term t with | Ind ind -> (ind, l) | _ -> raise Not_found let find_mrectype env sigma c = let (t, l) = decompose_app (whd_betadeltaiota env sigma c) in match kind_of_term t with | Ind ind -> (ind, l) | _ -> raise Not_found let find_rectype env sigma c = let (t, l) = decompose_app (whd_betadeltaiota env sigma c) in match kind_of_term t with | Ind ind -> let (mib,mip) = Inductive.lookup_mind_specif env ind in if mib.mind_nparams > List.length l then raise Not_found; let (par,rargs) = list_chop mib.mind_nparams l in IndType((ind, par),rargs) | _ -> raise Not_found let find_inductive env sigma c = let (t, l) = decompose_app (whd_betadeltaiota env sigma c) in match kind_of_term t with | Ind ind when (fst (Inductive.lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found let find_coinductive env sigma c = let (t, l) = decompose_app (whd_betadeltaiota env sigma c) in match kind_of_term t with | Ind ind when not (fst (Inductive.lookup_mind_specif env ind)).mind_finite -> (ind, l) | _ -> raise Not_found (***********************************************) (* find appropriate names for pattern variables. Useful in the Case and Inversion (case_then_using et case_nodep_then_using) tactics. *) let is_predicate_explicitly_dep env pred arsign = let rec srec env pval arsign = let pv' = whd_betadeltaiota env Evd.empty pval in match kind_of_term pv', arsign with | Lambda (na,t,b), (_,None,_)::arsign -> srec (push_rel_assum (na,t) env) b arsign | Lambda (na,_,_), _ -> (* The following code has an impact on the introduction names given by the tactics "case" and "inversion": when the elimination is not dependent, "case" uses Anonymous for inductive types in Prop and names created by mkProd_name for inductive types in Set/Type while "inversion" uses anonymous for inductive types both in Prop and Set/Type !! Previously, whether names were created or not relied on whether the predicate created in Indrec.make_case_com had a dependent arity or not. To avoid different predicates printed the same in v8, all predicates built in indrec.ml got a dependent arity (Aug 2004). The new way to decide whether names have to be created or not is to use an Anonymous or Named variable to enforce the expected dependency status (of course, Anonymous implies non dependent, but not conversely). At the end, this is only to preserve the compatibility: a check whether the predicate is actually dependent or not would indeed be more natural! *) na <> Anonymous | _ -> anomaly "Non eta-expanded dep-expanded \"match\" predicate" in srec env pred arsign let is_elim_predicate_explicitly_dependent env pred indf = let arsign,_ = get_arity env indf in is_predicate_explicitly_dep env pred arsign let set_names env n brty = let (ctxt,cl) = decompose_prod_n_assum n brty in it_mkProd_or_LetIn_name env cl ctxt let set_pattern_names env ind brv = let (mib,mip) = Inductive.lookup_mind_specif env ind in let arities = Array.map (fun c -> rel_context_length ((prod_assum c)) - mib.mind_nparams) mip.mind_nf_lc in array_map2 (set_names env) arities brv let type_case_branches_with_names env indspec p c = let (ind,args) = indspec in let (mib,mip as specif) = Inductive.lookup_mind_specif env ind in let nparams = mib.mind_nparams in let (params,realargs) = list_chop nparams args in let lbrty = Inductive.build_branches_type ind specif params p in (* Build case type *) let conclty = Reduction.beta_appvect p (Array.of_list (realargs@[c])) in (* Adjust names *) if is_elim_predicate_explicitly_dependent env p (ind,params) then (set_pattern_names env ind lbrty, conclty) else (lbrty, conclty) (* Type of Case predicates *) let arity_of_case_predicate env (ind,params) dep k = let arsign,_ = get_arity env (ind,params) in let mind = build_dependent_inductive env (ind,params) in let concl = if dep then mkArrow mind (mkSort k) else mkSort k in it_mkProd_or_LetIn concl arsign (***********************************************) (* Inferring the sort of parameters of a polymorphic inductive type knowing the sort of the conclusion *) (* Compute the inductive argument types: replace the sorts that appear in the type of the inductive by the sort of the conclusion, and the other ones by fresh universes. *) let rec instantiate_universes env scl is = function | (_,Some _,_ as d)::sign, exp -> d :: instantiate_universes env scl is (sign, exp) | d::sign, None::exp -> d :: instantiate_universes env scl is (sign, exp) | (na,None,ty)::sign, Some u::exp -> let ctx,_ = Reduction.dest_arity env ty in let s = (* Does the sort of parameter [u] appear in (or equal) the sort of inductive [is] ? *) if univ_depends u is then scl (* constrained sort: replace by scl *) else (* unconstriained sort: replace by fresh universe *) new_Type_sort() in (na,None,mkArity(ctx,s)):: instantiate_universes env scl is (sign, exp) | sign, [] -> sign (* Uniform parameters are exhausted *) | [], _ -> assert false (* Does not deal with universes, but only with Set/Type distinction *) let type_of_inductive_knowing_conclusion env mip conclty = match mip.mind_arity with | Monomorphic s -> s.mind_user_arity | Polymorphic ar -> let _,scl = Reduction.dest_arity env conclty in let ctx = List.rev mip.mind_arity_ctxt in let ctx = instantiate_universes env scl ar.poly_level (ctx,ar.poly_param_levels) in mkArity (List.rev ctx,scl) (***********************************************) (* Guard condition *) (* A function which checks that a term well typed verifies both syntactic conditions *) let control_only_guard env c = let check_fix_cofix e c = match kind_of_term c with | CoFix (_,(_,_,_) as cofix) -> Inductive.check_cofix e cofix | Fix (_,(_,_,_) as fix) -> Inductive.check_fix e fix | _ -> () in let rec iter env c = check_fix_cofix env c; iter_constr_with_full_binders push_rel iter env c in iter env c let subst_inductive subst (kn,i as ind) = let kn' = Mod_subst.subst_ind subst kn in if kn == kn' then ind else (kn',i) coq-8.4pl2/pretyping/indrec.mli0000640000175000001440000000451312010532755015622 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> inductive -> dep_flag -> sorts_family -> constr (** Build a dependent case elimination predicate unless type is in Prop *) val build_case_analysis_scheme_default : env -> evar_map -> inductive -> sorts_family -> constr (** Builds a recursive induction scheme (Peano-induction style) in the same sort family as the inductive family; it is dependent if not in Prop *) val build_induction_scheme : env -> evar_map -> inductive -> dep_flag -> sorts_family -> constr (** Builds mutual (recursive) induction schemes *) val build_mutual_induction_scheme : env -> evar_map -> (inductive * dep_flag * sorts_family) list -> constr list (** Scheme combinators *) (** [modify_sort_scheme s n c] modifies the quantification sort of scheme c whose predicate is abstracted at position [n] of [c] *) val modify_sort_scheme : sorts -> int -> constr -> constr (** [weaken_sort_scheme s n c t] derives by subtyping from [c:t] whose conclusion is quantified on [Type] at position [n] of [t] a scheme quantified on sort [s] *) val weaken_sort_scheme : sorts -> int -> constr -> types -> constr * types (** Recursor names utilities *) val lookup_eliminator : inductive -> sorts_family -> constr val elimination_suffix : sorts_family -> string val make_elimination_ident : identifier -> sorts_family -> identifier val case_suffix : string coq-8.4pl2/pretyping/reductionops.ml0000640000175000001440000007734212121620060016723 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> constr -> constr type reduction_function = contextual_reduction_function type local_reduction_function = evar_map -> constr -> constr type contextual_stack_reduction_function = env -> evar_map -> constr -> constr * constr list type stack_reduction_function = contextual_stack_reduction_function type local_stack_reduction_function = evar_map -> constr -> constr * constr list type contextual_state_reduction_function = env -> evar_map -> state -> state type state_reduction_function = contextual_state_reduction_function type local_state_reduction_function = evar_map -> state -> state (*************************************) (*** Reduction Functions Operators ***) (*************************************) let safe_evar_value sigma ev = try Some (Evd.existential_value sigma ev) with NotInstantiatedEvar | Not_found -> None let rec whd_app_state sigma (x, stack as s) = match kind_of_term x with | App (f,cl) -> whd_app_state sigma (f, append_stack cl stack) | Cast (c,_,_) -> whd_app_state sigma (c, stack) | Evar ev -> (match safe_evar_value sigma ev with Some c -> whd_app_state sigma (c,stack) | _ -> s) | _ -> s let safe_meta_value sigma ev = try Some (Evd.meta_value sigma ev) with Not_found -> None let appterm_of_stack (f,s) = (f,list_of_stack s) let whd_stack sigma x = appterm_of_stack (whd_app_state sigma (x, empty_stack)) let whd_castapp_stack = whd_stack let strong whdfun env sigma t = let rec strongrec env t = map_constr_with_full_binders push_rel strongrec env (whdfun env sigma t) in strongrec env t let local_strong whdfun sigma = let rec strongrec t = map_constr strongrec (whdfun sigma t) in strongrec let rec strong_prodspine redfun sigma c = let x = redfun sigma c in match kind_of_term x with | Prod (na,a,b) -> mkProd (na,a,strong_prodspine redfun sigma b) | _ -> x (*************************************) (*** Reduction using bindingss ***) (*************************************) (* This signature is very similar to Closure.RedFlagsSig except there is eta but no per-constant unfolding *) module type RedFlagsSig = sig type flags type flag val fbeta : flag val fdelta : flag val feta : flag val fiota : flag val fzeta : flag val mkflags : flag list -> flags val red_beta : flags -> bool val red_delta : flags -> bool val red_eta : flags -> bool val red_iota : flags -> bool val red_zeta : flags -> bool end (* Compact Implementation *) module RedFlags = (struct type flag = int type flags = int let fbeta = 1 let fdelta = 2 let feta = 8 let fiota = 16 let fzeta = 32 let mkflags = List.fold_left (lor) 0 let red_beta f = f land fbeta <> 0 let red_delta f = f land fdelta <> 0 let red_eta f = f land feta <> 0 let red_iota f = f land fiota <> 0 let red_zeta f = f land fzeta <> 0 end : RedFlagsSig) open RedFlags (* Local *) let beta = mkflags [fbeta] let eta = mkflags [feta] let zeta = mkflags [fzeta] let betaiota = mkflags [fiota; fbeta] let betaiotazeta = mkflags [fiota; fbeta;fzeta] (* Contextual *) let delta = mkflags [fdelta] let betadelta = mkflags [fbeta;fdelta;fzeta] let betadeltaeta = mkflags [fbeta;fdelta;fzeta;feta] let betadeltaiota = mkflags [fbeta;fdelta;fzeta;fiota] let betadeltaiota_nolet = mkflags [fbeta;fdelta;fiota] let betadeltaiotaeta = mkflags [fbeta;fdelta;fzeta;fiota;feta] let betaetalet = mkflags [fbeta;feta;fzeta] let betalet = mkflags [fbeta;fzeta] (* Beta Reduction tools *) let rec stacklam recfun env t stack = match (decomp_stack stack,kind_of_term t) with | Some (h,stacktl), Lambda (_,_,c) -> stacklam recfun (h::env) c stacktl | _ -> recfun (substl env t, stack) let beta_applist (c,l) = stacklam app_stack [] c (append_stack_list l empty_stack) (* Iota reduction tools *) type 'a miota_args = { mP : constr; (* the result type *) mconstr : constr; (* the constructor *) mci : case_info; (* special info to re-build pattern *) mcargs : 'a list; (* the constructor's arguments *) mlf : 'a array } (* the branch code vector *) let reducible_mind_case c = match kind_of_term c with | Construct _ | CoFix _ -> true | _ -> false let contract_cofix (bodynum,(types,names,bodies as typedbodies)) = let nbodies = Array.length bodies in let make_Fi j = mkCoFix (nbodies-j-1,typedbodies) in substl (list_tabulate make_Fi nbodies) bodies.(bodynum) let reduce_mind_case mia = match kind_of_term mia.mconstr with | Construct (ind_sp,i) -> (* let ncargs = (fst mia.mci).(i-1) in*) let real_cargs = list_skipn mia.mci.ci_npar mia.mcargs in applist (mia.mlf.(i-1),real_cargs) | CoFix cofix -> let cofix_def = contract_cofix cofix in mkCase (mia.mci, mia.mP, applist(cofix_def,mia.mcargs), mia.mlf) | _ -> assert false (* contracts fix==FIX[nl;i](A1...Ak;[F1...Fk]{B1....Bk}) to produce Bi[Fj --> FIX[nl;j](A1...Ak;[F1...Fk]{B1...Bk})] *) let contract_fix ((recindices,bodynum),(types,names,bodies as typedbodies)) = let nbodies = Array.length recindices in let make_Fi j = mkFix ((recindices,nbodies-j-1),typedbodies) in substl (list_tabulate make_Fi nbodies) bodies.(bodynum) let fix_recarg ((recindices,bodynum),_) stack = assert (0 <= bodynum & bodynum < Array.length recindices); let recargnum = Array.get recindices bodynum in try Some (recargnum, stack_nth stack recargnum) with Not_found -> None type fix_reduction_result = NotReducible | Reduced of state let reduce_fix whdfun sigma fix stack = match fix_recarg fix stack with | None -> NotReducible | Some (recargnum,recarg) -> let (recarg'hd,_ as recarg') = whdfun sigma (recarg, empty_stack) in let stack' = stack_assign stack recargnum (app_stack recarg') in (match kind_of_term recarg'hd with | Construct _ -> Reduced (contract_fix fix, stack') | _ -> NotReducible) (* Generic reduction function *) (* Y avait un commentaire pour whd_betadeltaiota : NB : Cette fonction alloue peu c'est l'appel ``let (c,cargs) = whfun (recarg, empty_stack)'' ------------------- qui coute cher *) let rec whd_state_gen flags ts env sigma = let rec whrec (x, stack as s) = match kind_of_term x with | Rel n when red_delta flags -> (match lookup_rel n env with | (_,Some body,_) -> whrec (lift n body, stack) | _ -> s) | Var id when red_delta flags -> (match lookup_named id env with | (_,Some body,_) -> whrec (body, stack) | _ -> s) | Evar ev -> (match safe_evar_value sigma ev with | Some body -> whrec (body, stack) | None -> s) | Meta ev -> (match safe_meta_value sigma ev with | Some body -> whrec (body, stack) | None -> s) | Const const when is_transparent_constant ts const -> (match constant_opt_value env const with | Some body -> whrec (body, stack) | None -> s) | LetIn (_,b,_,c) when red_zeta flags -> stacklam whrec [b] c stack | Cast (c,_,_) -> whrec (c, stack) | App (f,cl) -> whrec (f, append_stack cl stack) | Lambda (na,t,c) -> (match decomp_stack stack with | Some (a,m) when red_beta flags -> stacklam whrec [a] c m | None when red_eta flags -> let env' = push_rel (na,None,t) env in let whrec' = whd_state_gen flags ts env' sigma in (match kind_of_term (app_stack (whrec' (c, empty_stack))) with | App (f,cl) -> let napp = Array.length cl in if napp > 0 then let x', l' = whrec' (array_last cl, empty_stack) in match kind_of_term x', decomp_stack l' with | Rel 1, None -> let lc = Array.sub cl 0 (napp-1) in let u = if napp=1 then f else appvect (f,lc) in if noccurn 1 u then (pop u,empty_stack) else s | _ -> s else s | _ -> s) | _ -> s) | Case (ci,p,d,lf) when red_iota flags -> let (c,cargs) = whrec (d, empty_stack) in if reducible_mind_case c then whrec (reduce_mind_case {mP=p; mconstr=c; mcargs=list_of_stack cargs; mci=ci; mlf=lf}, stack) else (mkCase (ci, p, app_stack (c,cargs), lf), stack) | Fix fix when red_iota flags -> (match reduce_fix (fun _ -> whrec) sigma fix stack with | Reduced s' -> whrec s' | NotReducible -> s) | x -> s in whrec let local_whd_state_gen flags sigma = let rec whrec (x, stack as s) = match kind_of_term x with | LetIn (_,b,_,c) when red_zeta flags -> stacklam whrec [b] c stack | Cast (c,_,_) -> whrec (c, stack) | App (f,cl) -> whrec (f, append_stack cl stack) | Lambda (_,_,c) -> (match decomp_stack stack with | Some (a,m) when red_beta flags -> stacklam whrec [a] c m | None when red_eta flags -> (match kind_of_term (app_stack (whrec (c, empty_stack))) with | App (f,cl) -> let napp = Array.length cl in if napp > 0 then let x', l' = whrec (array_last cl, empty_stack) in match kind_of_term x', decomp_stack l' with | Rel 1, None -> let lc = Array.sub cl 0 (napp-1) in let u = if napp=1 then f else appvect (f,lc) in if noccurn 1 u then (pop u,empty_stack) else s | _ -> s else s | _ -> s) | _ -> s) | Case (ci,p,d,lf) when red_iota flags -> let (c,cargs) = whrec (d, empty_stack) in if reducible_mind_case c then whrec (reduce_mind_case {mP=p; mconstr=c; mcargs=list_of_stack cargs; mci=ci; mlf=lf}, stack) else (mkCase (ci, p, app_stack (c,cargs), lf), stack) | Fix fix when red_iota flags -> (match reduce_fix (fun _ ->whrec) sigma fix stack with | Reduced s' -> whrec s' | NotReducible -> s) | Evar ev -> (match safe_evar_value sigma ev with Some c -> whrec (c,stack) | None -> s) | Meta ev -> (match safe_meta_value sigma ev with Some c -> whrec (c,stack) | None -> s) | x -> s in whrec let stack_red_of_state_red f sigma x = appterm_of_stack (f sigma (x, empty_stack)) let red_of_state_red f sigma x = app_stack (f sigma (x,empty_stack)) (* 1. Beta Reduction Functions *) let whd_beta_state = local_whd_state_gen beta let whd_beta_stack = stack_red_of_state_red whd_beta_state let whd_beta = red_of_state_red whd_beta_state (* Nouveau ! *) let whd_betaetalet_state = local_whd_state_gen betaetalet let whd_betaetalet_stack = stack_red_of_state_red whd_betaetalet_state let whd_betaetalet = red_of_state_red whd_betaetalet_state let whd_betalet_state = local_whd_state_gen betalet let whd_betalet_stack = stack_red_of_state_red whd_betalet_state let whd_betalet = red_of_state_red whd_betalet_state (* 2. Delta Reduction Functions *) let whd_delta_state e = whd_state_gen delta full_transparent_state e let whd_delta_stack env = stack_red_of_state_red (whd_delta_state env) let whd_delta env = red_of_state_red (whd_delta_state env) let whd_betadelta_state e = whd_state_gen betadelta full_transparent_state e let whd_betadelta_stack env = stack_red_of_state_red (whd_betadelta_state env) let whd_betadelta env = red_of_state_red (whd_betadelta_state env) let whd_betadeltaeta_state e = whd_state_gen betadeltaeta full_transparent_state e let whd_betadeltaeta_stack env = stack_red_of_state_red (whd_betadeltaeta_state env) let whd_betadeltaeta env = red_of_state_red (whd_betadeltaeta_state env) (* 3. Iota reduction Functions *) let whd_betaiota_state = local_whd_state_gen betaiota let whd_betaiota_stack = stack_red_of_state_red whd_betaiota_state let whd_betaiota = red_of_state_red whd_betaiota_state let whd_betaiotazeta_state = local_whd_state_gen betaiotazeta let whd_betaiotazeta_stack = stack_red_of_state_red whd_betaiotazeta_state let whd_betaiotazeta = red_of_state_red whd_betaiotazeta_state let whd_betadeltaiota_state env = whd_state_gen betadeltaiota full_transparent_state env let whd_betadeltaiota_stack env = stack_red_of_state_red (whd_betadeltaiota_state env) let whd_betadeltaiota env = red_of_state_red (whd_betadeltaiota_state env) let whd_betadeltaiota_state_using ts env = whd_state_gen betadeltaiota ts env let whd_betadeltaiota_stack_using ts env = stack_red_of_state_red (whd_betadeltaiota_state_using ts env) let whd_betadeltaiota_using ts env = red_of_state_red (whd_betadeltaiota_state_using ts env) let whd_betadeltaiotaeta_state env = whd_state_gen betadeltaiotaeta full_transparent_state env let whd_betadeltaiotaeta_stack env = stack_red_of_state_red (whd_betadeltaiotaeta_state env) let whd_betadeltaiotaeta env = red_of_state_red (whd_betadeltaiotaeta_state env) let whd_betadeltaiota_nolet_state env = whd_state_gen betadeltaiota_nolet full_transparent_state env let whd_betadeltaiota_nolet_stack env = stack_red_of_state_red (whd_betadeltaiota_nolet_state env) let whd_betadeltaiota_nolet env = red_of_state_red (whd_betadeltaiota_nolet_state env) (* 4. Eta reduction Functions *) let whd_eta c = app_stack (local_whd_state_gen eta Evd.empty (c,empty_stack)) (* 5. Zeta Reduction Functions *) let whd_zeta c = app_stack (local_whd_state_gen zeta Evd.empty (c,empty_stack)) (****************************************************************************) (* Reduction Functions *) (****************************************************************************) (* Replacing defined evars for error messages *) let rec whd_evar sigma c = match kind_of_term c with | Evar ev -> (match safe_evar_value sigma ev with Some c -> whd_evar sigma c | None -> c) | Sort s -> whd_sort_variable sigma c | _ -> c let nf_evar = local_strong whd_evar (* lazy reduction functions. The infos must be created for each term *) (* Note by HH [oct 08] : why would it be the job of clos_norm_flags to add a [nf_evar] here *) let clos_norm_flags flgs env sigma t = try norm_val (create_clos_infos ~evars:(safe_evar_value sigma) flgs env) (inject t) with Anomaly _ -> error "Tried to normalized ill-typed term" let nf_beta = clos_norm_flags Closure.beta empty_env let nf_betaiota = clos_norm_flags Closure.betaiota empty_env let nf_betadeltaiota env sigma = clos_norm_flags Closure.betadeltaiota env sigma (* Attention reduire un beta-redexe avec un argument qui n'est pas une variable, peut changer enormement le temps de conversion lors du type checking : (fun x => x + x) M *) let rec whd_betaiota_preserving_vm_cast env sigma t = let rec stacklam_var subst t stack = match (decomp_stack stack,kind_of_term t) with | Some (h,stacktl), Lambda (_,_,c) -> begin match kind_of_term h with | Rel i when not (evaluable_rel i env) -> stacklam_var (h::subst) c stacktl | Var id when not (evaluable_named id env)-> stacklam_var (h::subst) c stacktl | _ -> whrec (substl subst t, stack) end | _ -> whrec (substl subst t, stack) and whrec (x, stack as s) = match kind_of_term x with | Evar ev -> (match safe_evar_value sigma ev with | Some body -> whrec (body, stack) | None -> s) | Cast (c,VMcast,t) -> let c = app_stack (whrec (c,empty_stack)) in let t = app_stack (whrec (t,empty_stack)) in (mkCast(c,VMcast,t),stack) | Cast (c,DEFAULTcast,_) -> whrec (c, stack) | App (f,cl) -> whrec (f, append_stack cl stack) | Lambda (na,t,c) -> (match decomp_stack stack with | Some (a,m) -> stacklam_var [a] c m | _ -> s) | Case (ci,p,d,lf) -> let (c,cargs) = whrec (d, empty_stack) in if reducible_mind_case c then whrec (reduce_mind_case {mP=p; mconstr=c; mcargs=list_of_stack cargs; mci=ci; mlf=lf}, stack) else (mkCase (ci, p, app_stack (c,cargs), lf), stack) | x -> s in app_stack (whrec (t,empty_stack)) let nf_betaiota_preserving_vm_cast = strong whd_betaiota_preserving_vm_cast (********************************************************************) (* Conversion *) (********************************************************************) (* let fkey = Profile.declare_profile "fhnf";; let fhnf info v = Profile.profile2 fkey fhnf info v;; let fakey = Profile.declare_profile "fhnf_apply";; let fhnf_apply info k h a = Profile.profile4 fakey fhnf_apply info k h a;; *) let is_transparent k = Conv_oracle.get_strategy k <> Conv_oracle.Opaque (* Conversion utility functions *) type conversion_test = constraints -> constraints let pb_is_equal pb = pb = CONV let pb_equal = function | CUMUL -> CONV | CONV -> CONV let sort_cmp = sort_cmp let test_conversion (f: ?l2r:bool-> ?evars:'a->'b) env sigma x y = try let _ = f ~evars:(safe_evar_value sigma) env x y in true with NotConvertible -> false | Anomaly _ -> error "Conversion test raised an anomaly" let is_conv env sigma = test_conversion Reduction.conv env sigma let is_conv_leq env sigma = test_conversion Reduction.conv_leq env sigma let is_fconv = function | CONV -> is_conv | CUMUL -> is_conv_leq let test_trans_conversion (f: ?l2r:bool-> ?evars:'a->'b) reds env sigma x y = try let _ = f ~evars:(safe_evar_value sigma) reds env x y in true with NotConvertible -> false | Anomaly _ -> error "Conversion test raised an anomaly" let is_trans_conv reds env sigma = test_trans_conversion Reduction.trans_conv reds env sigma let is_trans_conv_leq reds env sigma = test_trans_conversion Reduction.trans_conv_leq reds env sigma let is_trans_fconv = function | CONV -> is_trans_conv | CUMUL -> is_trans_conv_leq (********************************************************************) (* Special-Purpose Reduction *) (********************************************************************) let whd_meta sigma c = match kind_of_term c with | Meta p -> (try meta_value sigma p with Not_found -> c) | _ -> c (* Try to replace all metas. Does not replace metas in the metas' values * Differs from (strong whd_meta). *) let plain_instance s c = let rec irec n u = match kind_of_term u with | Meta p -> (try lift n (List.assoc p s) with Not_found -> u) | App (f,l) when isCast f -> let (f,_,t) = destCast f in let l' = Array.map (irec n) l in (match kind_of_term f with | Meta p -> (* Don't flatten application nodes: this is used to extract a proof-term from a proof-tree and we want to keep the structure of the proof-tree *) (try let g = List.assoc p s in match kind_of_term g with | App _ -> let h = id_of_string "H" in mkLetIn (Name h,g,t,mkApp(mkRel 1,Array.map (lift 1) l')) | _ -> mkApp (g,l') with Not_found -> mkApp (f,l')) | _ -> mkApp (irec n f,l')) | Cast (m,_,_) when isMeta m -> (try lift n (List.assoc (destMeta m) s) with Not_found -> u) | _ -> map_constr_with_binders succ irec n u in if s = [] then c else irec 0 c (* [instance] is used for [res_pf]; the call to [local_strong whd_betaiota] has (unfortunately) different subtle side effects: - ** Order of subgoals ** If the lemma is a case analysis with parameters, it will move the parameters as first subgoals (e.g. "case H" applied on "H:D->A/\B|-C" will present the subgoal |-D first while w/o betaiota the subgoal |-D would have come last). - ** Betaiota-contraction in statement ** If the lemma has a parameter which is a function and this function is applied in the lemma, then the _strong_ betaiota will contract the application of the function to its argument (e.g. "apply (H (fun x => x))" in "H:forall f, f 0 = 0 |- 0=0" will result in applying the lemma 0=0 in which "(fun x => x) 0" has been contracted). A goal to rewrite may then fail or succeed differently. - ** Naming of hypotheses ** If a lemma is a function of the form "fun H:(forall a:A, P a) => .. F H .." where the expected type of H is "forall b:A, P b", then, without reduction, the application of the lemma will generate a subgoal "forall a:A, P a" (and intro will use name "a"), while with reduction, it will generate a subgoal "forall b:A, P b" (and intro will use name "b"). - ** First-order pattern-matching ** If a lemma has the type "(fun x => p) t" then rewriting t may fail if the type of the lemma is first beta-reduced (this typically happens when rewriting a single variable and the type of the lemma is obtained by meta_instance (with empty map) which itself calls instance with this empty map). *) let instance sigma s c = (* if s = [] then c else *) local_strong whd_betaiota sigma (plain_instance s c) (* pseudo-reduction rule: * [hnf_prod_app env s (Prod(_,B)) N --> B[N] * with an HNF on the first argument to produce a product. * if this does not work, then we use the string S as part of our * error message. *) let hnf_prod_app env sigma t n = match kind_of_term (whd_betadeltaiota env sigma t) with | Prod (_,_,b) -> subst1 n b | _ -> anomaly "hnf_prod_app: Need a product" let hnf_prod_appvect env sigma t nl = Array.fold_left (hnf_prod_app env sigma) t nl let hnf_prod_applist env sigma t nl = List.fold_left (hnf_prod_app env sigma) t nl let hnf_lam_app env sigma t n = match kind_of_term (whd_betadeltaiota env sigma t) with | Lambda (_,_,b) -> subst1 n b | _ -> anomaly "hnf_lam_app: Need an abstraction" let hnf_lam_appvect env sigma t nl = Array.fold_left (hnf_lam_app env sigma) t nl let hnf_lam_applist env sigma t nl = List.fold_left (hnf_lam_app env sigma) t nl let splay_prod env sigma = let rec decrec env m c = let t = whd_betadeltaiota env sigma c in match kind_of_term t with | Prod (n,a,c0) -> decrec (push_rel (n,None,a) env) ((n,a)::m) c0 | _ -> m,t in decrec env [] let splay_lam env sigma = let rec decrec env m c = let t = whd_betadeltaiota env sigma c in match kind_of_term t with | Lambda (n,a,c0) -> decrec (push_rel (n,None,a) env) ((n,a)::m) c0 | _ -> m,t in decrec env [] let splay_prod_assum env sigma = let rec prodec_rec env l c = let t = whd_betadeltaiota_nolet env sigma c in match kind_of_term t with | Prod (x,t,c) -> prodec_rec (push_rel (x,None,t) env) (add_rel_decl (x, None, t) l) c | LetIn (x,b,t,c) -> prodec_rec (push_rel (x, Some b, t) env) (add_rel_decl (x, Some b, t) l) c | Cast (c,_,_) -> prodec_rec env l c | _ -> l,t in prodec_rec env empty_rel_context let splay_arity env sigma c = let l, c = splay_prod env sigma c in match kind_of_term c with | Sort s -> l,s | _ -> invalid_arg "splay_arity" let sort_of_arity env sigma c = snd (splay_arity env sigma c) let splay_prod_n env sigma n = let rec decrec env m ln c = if m = 0 then (ln,c) else match kind_of_term (whd_betadeltaiota env sigma c) with | Prod (n,a,c0) -> decrec (push_rel (n,None,a) env) (m-1) (add_rel_decl (n,None,a) ln) c0 | _ -> invalid_arg "splay_prod_n" in decrec env n empty_rel_context let splay_lam_n env sigma n = let rec decrec env m ln c = if m = 0 then (ln,c) else match kind_of_term (whd_betadeltaiota env sigma c) with | Lambda (n,a,c0) -> decrec (push_rel (n,None,a) env) (m-1) (add_rel_decl (n,None,a) ln) c0 | _ -> invalid_arg "splay_lam_n" in decrec env n empty_rel_context exception NotASort let decomp_sort env sigma t = match kind_of_term (whd_betadeltaiota env sigma t) with | Sort s -> s | _ -> raise NotASort let is_sort env sigma arity = try let _ = decomp_sort env sigma arity in true with NotASort -> false (* reduction to head-normal-form allowing delta/zeta only in argument of case/fix (heuristic used by evar_conv) *) let whd_betaiota_deltazeta_for_iota_state ts env sigma s = let rec whrec s = let (t, stack as s) = whd_betaiota_state sigma s in match kind_of_term t with | Case (ci,p,d,lf) -> let (cr,crargs) = whd_betadeltaiota_stack_using ts env sigma d in let rslt = mkCase (ci, p, applist (cr,crargs), lf) in if reducible_mind_case cr then whrec (rslt, stack) else s | Fix fix -> (match reduce_fix (whd_betadeltaiota_state_using ts env) sigma fix stack with | Reduced s -> whrec s | NotReducible -> s) | _ -> s in whrec s (* A reduction function like whd_betaiota but which keeps casts * and does not reduce redexes containing existential variables. * Used in Correctness. * Added by JCF, 29/1/98. *) let whd_programs_stack env sigma = let rec whrec (x, stack as s) = match kind_of_term x with | App (f,cl) -> let n = Array.length cl - 1 in let c = cl.(n) in if occur_existential c then s else whrec (mkApp (f, Array.sub cl 0 n), append_stack [|c|] stack) | LetIn (_,b,_,c) -> if occur_existential b then s else stacklam whrec [b] c stack | Lambda (_,_,c) -> (match decomp_stack stack with | None -> s | Some (a,m) -> stacklam whrec [a] c m) | Case (ci,p,d,lf) -> if occur_existential d then s else let (c,cargs) = whrec (d, empty_stack) in if reducible_mind_case c then whrec (reduce_mind_case {mP=p; mconstr=c; mcargs=list_of_stack cargs; mci=ci; mlf=lf}, stack) else (mkCase (ci, p, app_stack(c,cargs), lf), stack) | Fix fix -> (match reduce_fix (fun _ ->whrec) sigma fix stack with | Reduced s' -> whrec s' | NotReducible -> s) | _ -> s in whrec let whd_programs env sigma x = app_stack (whd_programs_stack env sigma (x, empty_stack)) exception IsType let find_conclusion env sigma = let rec decrec env c = let t = whd_betadeltaiota env sigma c in match kind_of_term t with | Prod (x,t,c0) -> decrec (push_rel (x,None,t) env) c0 | Lambda (x,t,c0) -> decrec (push_rel (x,None,t) env) c0 | t -> t in decrec env let is_arity env sigma c = match find_conclusion env sigma c with | Sort _ -> true | _ -> false (*************************************) (* Metas *) let meta_value evd mv = let rec valrec mv = match meta_opt_fvalue evd mv with | Some (b,_) -> instance evd (List.map (fun mv' -> (mv',valrec mv')) (Metaset.elements b.freemetas)) b.rebus | None -> mkMeta mv in valrec mv let meta_instance sigma b = let c_sigma = List.map (fun mv -> (mv,meta_value sigma mv)) (Metaset.elements b.freemetas) in if c_sigma = [] then b.rebus else instance sigma c_sigma b.rebus let nf_meta sigma c = meta_instance sigma (mk_freelisted c) (* Instantiate metas that create beta/iota redexes *) let meta_reducible_instance evd b = let fm = Metaset.elements b.freemetas in let metas = List.fold_left (fun l mv -> match (try meta_opt_fvalue evd mv with Not_found -> None) with | Some (g,(_,s)) -> (mv,(g.rebus,s))::l | None -> l) [] fm in let rec irec u = let u = whd_betaiota Evd.empty u in match kind_of_term u with | Case (ci,p,c,bl) when isMeta c or isCast c & isMeta (pi1 (destCast c)) -> let m = try destMeta c with e when Errors.noncritical e -> destMeta (pi1 (destCast c)) in (match try let g,s = List.assoc m metas in if isConstruct g or s <> CoerceToType then Some g else None with Not_found -> None with | Some g -> irec (mkCase (ci,p,g,bl)) | None -> mkCase (ci,irec p,c,Array.map irec bl)) | App (f,l) when isMeta f or isCast f & isMeta (pi1 (destCast f)) -> let m = try destMeta f with e when Errors.noncritical e -> destMeta (pi1 (destCast f)) in (match try let g,s = List.assoc m metas in if isLambda g or s <> CoerceToType then Some g else None with Not_found -> None with | Some g -> irec (mkApp (g,l)) | None -> mkApp (f,Array.map irec l)) | Meta m -> (try let g,s = List.assoc m metas in if s<>CoerceToType then irec g else u with Not_found -> u) | _ -> map_constr irec u in if fm = [] then (* nf_betaiota? *) b.rebus else irec b.rebus let head_unfold_under_prod ts env _ c = let unfold cst = if Cpred.mem cst (snd ts) then match constant_opt_value env cst with | Some c -> c | None -> mkConst cst else mkConst cst in let rec aux c = match kind_of_term c with | Prod (n,t,c) -> mkProd (n,aux t, aux c) | _ -> let (h,l) = decompose_app c in match kind_of_term h with | Const cst -> beta_applist (unfold cst,l) | _ -> c in aux c coq-8.4pl2/pretyping/namegen.mli0000640000175000001440000000711212010532755015766 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val sort_hdchar : sorts -> string val hdchar : env -> types -> string val id_of_name_using_hdchar : env -> types -> name -> identifier val named_hd : env -> types -> name -> name val mkProd_name : env -> name * types * types -> types val mkLambda_name : env -> name * types * constr -> constr (** Deprecated synonyms of [mkProd_name] and [mkLambda_name] *) val prod_name : env -> name * types * types -> types val lambda_name : env -> name * types * constr -> constr val prod_create : env -> types * types -> constr val lambda_create : env -> types * constr -> constr val name_assumption : env -> rel_declaration -> rel_declaration val name_context : env -> rel_context -> rel_context val mkProd_or_LetIn_name : env -> types -> rel_declaration -> types val mkLambda_or_LetIn_name : env -> constr -> rel_declaration -> constr val it_mkProd_or_LetIn_name : env -> types -> rel_context -> types val it_mkLambda_or_LetIn_name : env -> constr -> rel_context -> constr (********************************************************************* Fresh names *) (** Avoid clashing with a name satisfying some predicate *) val next_ident_away_from : identifier -> (identifier -> bool) -> identifier (** Avoid clashing with a name of the given list *) val next_ident_away : identifier -> identifier list -> identifier (** Avoid clashing with a name already used in current module *) val next_ident_away_in_goal : identifier -> identifier list -> identifier (** Avoid clashing with a name already used in current module but tolerate overwriting section variables, as in goals *) val next_global_ident_away : identifier -> identifier list -> identifier (** Avoid clashing with a constructor name already used in current module *) val next_name_away_in_cases_pattern : name -> identifier list -> identifier val next_name_away : name -> identifier list -> identifier (** default is "H" *) val next_name_away_with_default : string -> name -> identifier list -> identifier val next_name_away_with_default_using_types : string -> name -> identifier list -> types -> identifier val set_reserved_typed_name : (types -> name) -> unit (********************************************************************* Making name distinct for displaying *) type renaming_flags = | RenamingForCasesPattern (** avoid only global constructors *) | RenamingForGoal (** avoid all globals (as in intro) *) | RenamingElsewhereFor of (name list * constr) val make_all_name_different : env -> env val compute_displayed_name_in : renaming_flags -> identifier list -> name -> constr -> name * identifier list val compute_and_force_displayed_name_in : renaming_flags -> identifier list -> name -> constr -> name * identifier list val compute_displayed_let_name_in : renaming_flags -> identifier list -> name -> constr -> name * identifier list val rename_bound_vars_as_displayed : identifier list -> name list -> types -> types coq-8.4pl2/pretyping/glob_term.mli0000640000175000001440000001327012010532755016330 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* loc type patvar = identifier type glob_sort = GProp of Term.contents | GType of Univ.universe option type binding_kind = Lib.binding_kind = Explicit | Implicit type quantified_hypothesis = AnonHyp of int | NamedHyp of identifier type 'a explicit_bindings = (loc * quantified_hypothesis * 'a) list type 'a bindings = | ImplicitBindings of 'a list | ExplicitBindings of 'a explicit_bindings | NoBindings type 'a with_bindings = 'a * 'a bindings type 'a cast_type = | CastConv of cast_kind * 'a | CastCoerce (** Cast to a base type (eg, an underlying inductive type) *) type glob_constr = | GRef of (loc * global_reference) | GVar of (loc * identifier) | GEvar of loc * existential_key * glob_constr list option | GPatVar of loc * (bool * patvar) (** Used for patterns only *) | GApp of loc * glob_constr * glob_constr list | GLambda of loc * name * binding_kind * glob_constr * glob_constr | GProd of loc * name * binding_kind * glob_constr * glob_constr | GLetIn of loc * name * glob_constr * glob_constr | GCases of loc * case_style * glob_constr option * tomatch_tuples * cases_clauses (** [GCases(l,style,r,tur,cc)] = "match 'tur' return 'r' with 'cc'" (in [MatchStyle]) *) | GLetTuple of loc * name list * (name * glob_constr option) * glob_constr * glob_constr | GIf of loc * glob_constr * (name * glob_constr option) * glob_constr * glob_constr | GRec of loc * fix_kind * identifier array * glob_decl list array * glob_constr array * glob_constr array | GSort of loc * glob_sort | GHole of (loc * Evd.hole_kind) | GCast of loc * glob_constr * glob_constr cast_type and glob_decl = name * binding_kind * glob_constr option * glob_constr and fix_recursion_order = GStructRec | GWfRec of glob_constr | GMeasureRec of glob_constr * glob_constr option and fix_kind = | GFix of ((int option * fix_recursion_order) array * int) | GCoFix of int and predicate_pattern = name * (loc * inductive * int * name list) option (** [(na,id)] = "as 'na' in 'id'" where if [id] is [Some(l,I,k,args)], [k] is the number of parameter of [I]. *) and tomatch_tuple = (glob_constr * predicate_pattern) and tomatch_tuples = tomatch_tuple list and cases_clause = (loc * identifier list * cases_pattern list * glob_constr) (** [(p,il,cl,t)] = "|'cl' as 'il' => 't'" *) and cases_clauses = cases_clause list val cases_predicate_names : tomatch_tuples -> name list (* Apply one argument to a glob_constr *) val mkGApp : loc -> glob_constr -> glob_constr -> glob_constr val map_glob_constr : (glob_constr -> glob_constr) -> glob_constr -> glob_constr (* Ensure traversal from left to right *) val map_glob_constr_left_to_right : (glob_constr -> glob_constr) -> glob_constr -> glob_constr (* val map_glob_constr_with_binders_loc : loc -> (identifier -> 'a -> identifier * 'a) -> ('a -> glob_constr -> glob_constr) -> 'a -> glob_constr -> glob_constr *) val fold_glob_constr : ('a -> glob_constr -> 'a) -> 'a -> glob_constr -> 'a val iter_glob_constr : (glob_constr -> unit) -> glob_constr -> unit val occur_glob_constr : identifier -> glob_constr -> bool val free_glob_vars : glob_constr -> identifier list val loc_of_glob_constr : glob_constr -> loc (** Conversion from glob_constr to cases pattern, if possible Take the current alias as parameter, @raise Not_found if translation is impossible *) val cases_pattern_of_glob_constr : name -> glob_constr -> cases_pattern val glob_constr_of_closed_cases_pattern : cases_pattern -> name * glob_constr (** {6 Reduction expressions} *) type 'a glob_red_flag = { rBeta : bool; rIota : bool; rZeta : bool; rDelta : bool; (** true = delta all but rConst; false = delta only on rConst*) rConst : 'a list } val all_flags : 'a glob_red_flag type 'a or_var = ArgArg of 'a | ArgVar of identifier located type occurrences_expr = bool * int or_var list val all_occurrences_expr_but : int or_var list -> occurrences_expr val no_occurrences_expr_but : int or_var list -> occurrences_expr val all_occurrences_expr : occurrences_expr val no_occurrences_expr : occurrences_expr type 'a with_occurrences = occurrences_expr * 'a type ('a,'b,'c) red_expr_gen = | Red of bool | Hnf | Simpl of 'c with_occurrences option | Cbv of 'b glob_red_flag | Lazy of 'b glob_red_flag | Unfold of 'b with_occurrences list | Fold of 'a list | Pattern of 'a with_occurrences list | ExtraRedExpr of string | CbvVm type ('a,'b,'c) may_eval = | ConstrTerm of 'a | ConstrEval of ('a,'b,'c) red_expr_gen * 'a | ConstrContext of (loc * identifier) * 'a | ConstrTypeOf of 'a coq-8.4pl2/pretyping/cbv.mli0000640000175000001440000000402712010532755015130 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* env -> Evd.evar_map -> cbv_infos val cbv_norm : cbv_infos -> constr -> constr (*********************************************************************** i This is for cbv debug *) type cbv_value = | VAL of int * constr | STACK of int * cbv_value * cbv_stack | CBN of constr * cbv_value subs | LAM of int * (name * constr) list * constr * cbv_value subs | FIXP of fixpoint * cbv_value subs * cbv_value array | COFIXP of cofixpoint * cbv_value subs * cbv_value array | CONSTR of constructor * cbv_value array and cbv_stack = | TOP | APP of cbv_value array * cbv_stack | CASE of constr * constr array * case_info * cbv_value subs * cbv_stack val shift_value : int -> cbv_value -> cbv_value val stack_app : cbv_value array -> cbv_stack -> cbv_stack val strip_appl : cbv_value -> cbv_stack -> cbv_value * cbv_stack (** recursive functions... *) val cbv_stack_term : cbv_infos -> cbv_stack -> cbv_value subs -> constr -> cbv_value val cbv_norm_term : cbv_infos -> cbv_value subs -> constr -> constr val norm_head : cbv_infos -> cbv_value subs -> constr -> cbv_stack -> cbv_value * cbv_stack val apply_stack : cbv_infos -> constr -> cbv_stack -> constr val cbv_norm_value : cbv_infos -> cbv_value -> constr (** End of cbv debug section i*) coq-8.4pl2/pretyping/evarconv.ml0000640000175000001440000007771612125631355016052 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* MaybeFlexible c | Lambda _ when l<>[] -> MaybeFlexible c | LetIn _ -> MaybeFlexible c | Evar ev -> Flexible ev | Lambda _ | Prod _ | Sort _ | Ind _ | Construct _ | CoFix _ -> Rigid c | Meta _ | Case _ | Fix _ -> PseudoRigid c | Cast _ | App _ -> assert false let eval_flexible_term ts env c = match kind_of_term c with | Const c -> if is_transparent_constant ts c then constant_opt_value env c else None | Rel n -> (try let (_,v,_) = lookup_rel n env in Option.map (lift n) v with Not_found -> None) | Var id -> (try if is_transparent_variable ts id then let (_,v,_) = lookup_named id env in v else None with Not_found -> None) | LetIn (_,b,_,c) -> Some (subst1 b c) | Lambda _ -> Some c | _ -> assert false let evar_apprec ts env evd stack c = let sigma = evd in let rec aux s = let (t,stack) = whd_betaiota_deltazeta_for_iota_state ts env sigma s in match kind_of_term t with | Evar (evk,_ as ev) when Evd.is_defined sigma evk -> aux (Evd.existential_value sigma ev, stack) | _ -> (t, list_of_stack stack) in aux (c, append_stack_list stack empty_stack) let apprec_nohdbeta ts env evd c = match kind_of_term (fst (Reductionops.whd_stack evd c)) with | (Case _ | Fix _) -> applist (evar_apprec ts env evd [] c) | _ -> c let position_problem l2r = function | CONV -> None | CUMUL -> Some l2r (* [check_conv_record (t1,l1) (t2,l2)] tries to decompose the problem (t1 l1) = (t2 l2) into a problem l1 = params1@c1::extra_args1 l2 = us2@extra_args2 (t1 params1 c1) = (proji params (c xs)) (t2 us2) = (cstr us) extra_args1 = extra_args2 by finding a record R and an object c := [xs:bs](Build_R params v1..vn) with vi = (cstr us), for which we know that the i-th projection proji satisfies (proji params (c xs)) = (cstr us) Rem: such objects, usable for conversion, are defined in the objdef table; practically, it amounts to "canonically" equip t2 into a object c in structure R (since, if c1 were not an evar, the projection would have been reduced) *) let check_conv_record (t1,l1) (t2,l2) = try let proji = global_of_constr t1 in let canon_s,l2_effective = try match kind_of_term t2 with Prod (_,a,b) -> (* assert (l2=[]); *) if dependent (mkRel 1) b then raise Not_found else lookup_canonical_conversion (proji, Prod_cs),[a;pop b] | Sort s -> lookup_canonical_conversion (proji, Sort_cs (family_of_sort s)),[] | _ -> let c2 = global_of_constr t2 in lookup_canonical_conversion (proji, Const_cs c2),l2 with Not_found -> lookup_canonical_conversion (proji,Default_cs),[] in let { o_DEF = c; o_INJ=n; o_TABS = bs; o_TPARAMS = params; o_NPARAMS = nparams; o_TCOMPS = us } = canon_s in let params1, c1, extra_args1 = match list_chop nparams l1 with | params1, c1::extra_args1 -> params1, c1, extra_args1 | _ -> raise Not_found in let us2,extra_args2 = list_chop (List.length us) l2_effective in c,bs,(params,params1),(us,us2),(extra_args1,extra_args2),c1, (n,applist(t2,l2)) with Failure _ | Not_found -> raise Not_found (* Precondition: one of the terms of the pb is an uninstantiated evar, * possibly applied to arguments. *) let rec ise_try evd = function [] -> assert false | [f] -> f evd | f1::l -> let (evd',b) = f1 evd in if b then (evd',b) else ise_try evd l let ise_and evd l = let rec ise_and i = function [] -> assert false | [f] -> f i | f1::l -> let (i',b) = f1 i in if b then ise_and i' l else (evd,false) in ise_and evd l let ise_list2 evd f l1 l2 = let rec ise_list2 i l1 l2 = match l1,l2 with [], [] -> (i, true) | [x], [y] -> f i x y | x::l1, y::l2 -> let (i',b) = f i x y in if b then ise_list2 i' l1 l2 else (evd,false) | _ -> (evd, false) in ise_list2 evd l1 l2 let ise_array2 evd f v1 v2 = let rec allrec i = function | -1 -> (i,true) | n -> let (i',b) = f i v1.(n) v2.(n) in if b then allrec i' (n-1) else (evd,false) in let lv1 = Array.length v1 in if lv1 = Array.length v2 then allrec evd (pred lv1) else (evd,false) let rec evar_conv_x ts env evd pbty term1 term2 = let term1 = whd_head_evar evd term1 in let term2 = whd_head_evar evd term2 in (* Maybe convertible but since reducing can erase evars which [evar_apprec] could have found, we do it only if the terms are free of evar. Note: incomplete heuristic... *) let ground_test = if is_ground_term evd term1 && is_ground_term evd term2 then if is_trans_fconv pbty ts env evd term1 term2 then Some true else if is_ground_env evd env then Some false else None else None in match ground_test with Some b -> (evd,b) | None -> (* Until pattern-unification is used consistently, use nohdbeta to not destroy beta-redexes that can be used for 1st-order unification *) let term1 = apprec_nohdbeta ts env evd term1 in let term2 = apprec_nohdbeta ts env evd term2 in if is_undefined_evar evd term1 then solve_simple_eqn (evar_conv_x ts) env evd (position_problem true pbty,destEvar term1,term2) else if is_undefined_evar evd term2 then solve_simple_eqn (evar_conv_x ts) env evd (position_problem false pbty,destEvar term2,term1) else evar_eqappr_x ts env evd pbty (decompose_app term1) (decompose_app term2) and evar_eqappr_x ?(rhs_is_already_stuck = false) ts env evd pbty (term1,l1 as appr1) (term2,l2 as appr2) = (* Evar must be undefined since we have flushed evars *) match (flex_kind_of_term term1 l1, flex_kind_of_term term2 l2) with | Flexible (sp1,al1 as ev1), Flexible (sp2,al2 as ev2) -> let f1 i = if List.length l1 > List.length l2 then let (deb1,rest1) = list_chop (List.length l1-List.length l2) l1 in ise_and i [(fun i -> solve_simple_eqn (evar_conv_x ts) env i (position_problem false pbty,ev2,applist(term1,deb1))); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) rest1 l2)] else let (deb2,rest2) = list_chop (List.length l2-List.length l1) l2 in ise_and i [(fun i -> solve_simple_eqn (evar_conv_x ts) env i (position_problem true pbty,ev1,applist(term2,deb2))); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 rest2)] and f2 i = if sp1 = sp2 then ise_and i [(fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2); (fun i -> solve_refl (evar_conv_x ts) env i sp1 al1 al2, true)] else (i,false) in ise_try evd [f1; f2] | Flexible ev1, MaybeFlexible flex2 -> let f1 i = match is_unification_pattern_evar env evd ev1 l1 (applist appr2) with | Some l1' -> (* Miller-Pfenning's patterns unification *) (* Preserve generality (except that CCI has no eta-conversion) *) let t2 = nf_evar evd (applist appr2) in let t2 = solve_pattern_eqn env l1' t2 in solve_simple_eqn (evar_conv_x ts) env evd (position_problem true pbty,ev1,t2) | None -> (i,false) and f2 i = if List.length l1 <= List.length l2 then (* Try first-order unification *) (* (heuristic that gives acceptable results in practice) *) let (deb2,rest2) = list_chop (List.length l2-List.length l1) l2 in ise_and i (* First compare extra args for better failure message *) [(fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 rest2); (fun i -> evar_conv_x ts env i pbty term1 (applist(term2,deb2)))] else (i,false) and f3 i = match eval_flexible_term ts env flex2 with | Some v2 -> evar_eqappr_x ts env i pbty appr1 (evar_apprec ts env i l2 v2) | None -> (i,false) in ise_try evd [f1; f2; f3] | MaybeFlexible flex1, Flexible ev2 -> let f1 i = match is_unification_pattern_evar env evd ev2 l2 (applist appr1) with | Some l1' -> (* Miller-Pfenning's patterns unification *) (* Preserve generality (except that CCI has no eta-conversion) *) let t1 = nf_evar evd (applist appr1) in let t1 = solve_pattern_eqn env l2 t1 in solve_simple_eqn (evar_conv_x ts) env evd (position_problem false pbty,ev2,t1) | None -> (i,false) and f2 i = if List.length l2 <= List.length l1 then (* Try first-order unification *) (* (heuristic that gives acceptable results in practice) *) let (deb1,rest1) = list_chop (List.length l1-List.length l2) l1 in ise_and i (* First compare extra args for better failure message *) [(fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) rest1 l2); (fun i -> evar_conv_x ts env i pbty (applist(term1,deb1)) term2)] else (i,false) and f3 i = match eval_flexible_term ts env flex1 with | Some v1 -> evar_eqappr_x ts env i pbty (evar_apprec ts env i l1 v1) appr2 | None -> (i,false) in ise_try evd [f1; f2; f3] | MaybeFlexible flex1, MaybeFlexible flex2 -> begin match kind_of_term flex1, kind_of_term flex2 with | LetIn (na,b1,t1,c'1), LetIn (_,b2,_,c'2) -> let f1 i = ise_and i [(fun i -> evar_conv_x ts env i CONV b1 b2); (fun i -> let b = nf_evar i b1 in let t = nf_evar i t1 in evar_conv_x ts (push_rel (na,Some b,t) env) i pbty c'1 c'2); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2)] and f2 i = let appr1 = evar_apprec ts env i l1 (subst1 b1 c'1) and appr2 = evar_apprec ts env i l2 (subst1 b2 c'2) in evar_eqappr_x ts env i pbty appr1 appr2 in ise_try evd [f1; f2] | _, _ -> let f1 i = if eq_constr flex1 flex2 then ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2 else (i,false) and f2 i = (try conv_record ts env i (try check_conv_record appr1 appr2 with Not_found -> check_conv_record appr2 appr1) with Not_found -> (i,false)) and f3 i = (* heuristic: unfold second argument first, exception made if the first argument is a beta-redex (expand a constant only if necessary) or the second argument is potentially usable as a canonical projection or canonical value *) let rec is_unnamed (hd, args) = match kind_of_term hd with | (Var _|Construct _|Ind _|Const _|Prod _|Sort _) -> false | (Case _|Fix _|CoFix _|Meta _|Rel _)-> true | Evar _ -> false (* immediate solution without Canon Struct *) | Lambda _ -> assert(args = []); true | LetIn (_,b,_,c) -> is_unnamed (evar_apprec ts env i args (subst1 b c)) | App _| Cast _ -> assert false in let rhs_is_stuck_and_unnamed () = match eval_flexible_term ts env flex2 with | None -> false | Some v2 -> is_unnamed (evar_apprec ts env i l2 v2) in let rhs_is_already_stuck = rhs_is_already_stuck || rhs_is_stuck_and_unnamed () in if isLambda flex1 || rhs_is_already_stuck then match eval_flexible_term ts env flex1 with | Some v1 -> evar_eqappr_x ~rhs_is_already_stuck ts env i pbty (evar_apprec ts env i l1 v1) appr2 | None -> match eval_flexible_term ts env flex2 with | Some v2 -> evar_eqappr_x ts env i pbty appr1 (evar_apprec ts env i l2 v2) | None -> (i,false) else match eval_flexible_term ts env flex2 with | Some v2 -> evar_eqappr_x ts env i pbty appr1 (evar_apprec ts env i l2 v2) | None -> match eval_flexible_term ts env flex1 with | Some v1 -> evar_eqappr_x ts env i pbty (evar_apprec ts env i l1 v1) appr2 | None -> (i,false) in ise_try evd [f1; f2; f3] end | Rigid c1, Rigid c2 when isLambda c1 & isLambda c2 -> let (na,c1,c'1) = destLambda c1 in let (_,c2,c'2) = destLambda c2 in assert (l1=[] & l2=[]); ise_and evd [(fun i -> evar_conv_x ts env i CONV c1 c2); (fun i -> let c = nf_evar i c1 in evar_conv_x ts (push_rel (na,None,c) env) i CONV c'1 c'2)] | Flexible ev1, (Rigid _ | PseudoRigid _) -> (match is_unification_pattern_evar env evd ev1 l1 (applist appr2) with | Some l1 -> (* Miller-Pfenning's pattern unification *) (* Preserve generality thanks to eta-conversion) *) let t2 = nf_evar evd (applist appr2) in let t2 = solve_pattern_eqn env l1 t2 in solve_simple_eqn (evar_conv_x ts) env evd (position_problem true pbty,ev1,t2) | None -> (* Postpone the use of an heuristic *) add_conv_pb (pbty,env,applist appr1,applist appr2) evd, true) | (Rigid _ | PseudoRigid _), Flexible ev2 -> (match is_unification_pattern_evar env evd ev2 l2 (applist appr1) with | Some l2 -> (* Miller-Pfenning's pattern unification *) (* Preserve generality thanks to eta-conversion) *) let t1 = nf_evar evd (applist appr1) in let t1 = solve_pattern_eqn env l2 t1 in solve_simple_eqn (evar_conv_x ts) env evd (position_problem false pbty,ev2,t1) | None -> (* Postpone the use of an heuristic *) add_conv_pb (pbty,env,applist appr1,applist appr2) evd, true) | MaybeFlexible flex1, (Rigid _ | PseudoRigid _) -> let f3 i = (try conv_record ts env i (check_conv_record appr1 appr2) with Not_found -> (i,false)) and f4 i = match eval_flexible_term ts env flex1 with | Some v1 -> evar_eqappr_x ts env i pbty (evar_apprec ts env i l1 v1) appr2 | None -> (i,false) in ise_try evd [f3; f4] | (Rigid _ | PseudoRigid _), MaybeFlexible flex2 -> let f3 i = (try conv_record ts env i (check_conv_record appr2 appr1) with Not_found -> (i,false)) and f4 i = match eval_flexible_term ts env flex2 with | Some v2 -> evar_eqappr_x ts env i pbty appr1 (evar_apprec ts env i l2 v2) | None -> (i,false) in ise_try evd [f3; f4] (* Eta-expansion *) | Rigid c1, _ when isLambda c1 -> assert (l1 = []); let (na,c1,c'1) = destLambda c1 in let c = nf_evar evd c1 in let env' = push_rel (na,None,c) env in let appr1 = evar_apprec ts env' evd [] c'1 in let appr2 = (lift 1 term2, List.map (lift 1) l2 @ [mkRel 1]) in evar_eqappr_x ts env' evd CONV appr1 appr2 | _, Rigid c2 when isLambda c2 -> assert (l2 = []); let (na,c2,c'2) = destLambda c2 in let c = nf_evar evd c2 in let env' = push_rel (na,None,c) env in let appr1 = (lift 1 term1, List.map (lift 1) l1 @ [mkRel 1]) in let appr2 = evar_apprec ts env' evd [] c'2 in evar_eqappr_x ts env' evd CONV appr1 appr2 | Rigid c1, Rigid c2 -> begin match kind_of_term c1, kind_of_term c2 with | Sort s1, Sort s2 when l1=[] & l2=[] -> (try let evd' = if pbty = CONV then Evd.set_eq_sort evd s1 s2 else Evd.set_leq_sort evd s1 s2 in (evd', true) with Univ.UniverseInconsistency _ -> (evd, false) | e when Errors.noncritical e -> (evd, false)) | Prod (n,c1,c'1), Prod (_,c2,c'2) when l1=[] & l2=[] -> ise_and evd [(fun i -> evar_conv_x ts env i CONV c1 c2); (fun i -> let c = nf_evar i c1 in evar_conv_x ts (push_rel (n,None,c) env) i pbty c'1 c'2)] | Ind sp1, Ind sp2 -> if eq_ind sp1 sp2 then ise_list2 evd (fun i -> evar_conv_x ts env i CONV) l1 l2 else (evd, false) | Construct sp1, Construct sp2 -> if eq_constructor sp1 sp2 then ise_list2 evd (fun i -> evar_conv_x ts env i CONV) l1 l2 else (evd, false) | CoFix (i1,(_,tys1,bds1 as recdef1)), CoFix (i2,(_,tys2,bds2)) -> if i1=i2 then ise_and evd [(fun i -> ise_array2 i (fun i -> evar_conv_x ts env i CONV) tys1 tys2); (fun i -> ise_array2 i (fun i -> evar_conv_x ts (push_rec_types recdef1 env) i CONV) bds1 bds2); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2)] else (evd,false) | (Ind _ | Construct _ | Sort _ | Prod _ | CoFix _), _ -> (evd,false) | _, (Ind _ | Construct _ | Sort _ | Prod _ | CoFix _) -> (evd,false) | (App _ | Meta _ | Cast _ | Case _ | Fix _), _ -> assert false | (LetIn _ | Rel _ | Var _ | Const _ | Evar _), _ -> assert false | (Lambda _), _ -> assert false end | PseudoRigid c1, PseudoRigid c2 -> begin match kind_of_term c1, kind_of_term c2 with | Case (_,p1,c1,cl1), Case (_,p2,c2,cl2) -> ise_and evd [(fun i -> evar_conv_x ts env i CONV p1 p2); (fun i -> evar_conv_x ts env i CONV c1 c2); (fun i -> ise_array2 i (fun i -> evar_conv_x ts env i CONV) cl1 cl2); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2)] | Fix (li1,(_,tys1,bds1 as recdef1)), Fix (li2,(_,tys2,bds2)) -> if li1=li2 then ise_and evd [(fun i -> ise_array2 i (fun i -> evar_conv_x ts env i CONV) tys1 tys2); (fun i -> ise_array2 i (fun i -> evar_conv_x ts (push_rec_types recdef1 env) i CONV) bds1 bds2); (fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) l1 l2)] else (evd,false) | (Meta _ | Case _ | Fix _ | CoFix _), (Meta _ | Case _ | Fix _ | CoFix _) -> (evd,false) | (App _ | Ind _ | Construct _ | Sort _ | Prod _), _ -> assert false | _, (App _ | Ind _ | Construct _ | Sort _ | Prod _) -> assert false | (LetIn _ | Cast _), _ -> assert false | _, (LetIn _ | Cast _) -> assert false | (Lambda _ | Rel _ | Var _ | Const _ | Evar _), _ -> assert false | _, (Lambda _ | Rel _ | Var _ | Const _ | Evar _) -> assert false end | PseudoRigid _, Rigid _ -> (evd,false) | Rigid _, PseudoRigid _ -> (evd,false) and conv_record trs env evd (c,bs,(params,params1),(us,us2),(ts,ts1),c1,(n,t2)) = let (evd',ks,_) = List.fold_left (fun (i,ks,m) b -> if m=n then (i,t2::ks, m-1) else let dloc = (dummy_loc,InternalHole) in let (i',ev) = new_evar i env ~src:dloc (substl ks b) in (i', ev :: ks, m - 1)) (evd,[],List.length bs - 1) bs in ise_and evd' [(fun i -> ise_list2 i (fun i x1 x -> evar_conv_x trs env i CONV x1 (substl ks x)) params1 params); (fun i -> ise_list2 i (fun i u1 u -> evar_conv_x trs env i CONV u1 (substl ks u)) us2 us); (fun i -> evar_conv_x trs env i CONV c1 (applist (c,(List.rev ks)))); (fun i -> ise_list2 i (fun i -> evar_conv_x trs env i CONV) ts ts1)] (* getting rid of the optional argument rhs_is_already_stuck *) let evar_eqappr_x ts env evd pbty appr1 appr2 = evar_eqappr_x ts env evd pbty appr1 appr2 (* We assume here |l1| <= |l2| *) let first_order_unification ts env evd (ev1,l1) (term2,l2) = let (deb2,rest2) = list_chop (List.length l2-List.length l1) l2 in ise_and evd (* First compare extra args for better failure message *) [(fun i -> ise_list2 i (fun i -> evar_conv_x ts env i CONV) rest2 l1); (fun i -> (* Then instantiate evar unless already done by unifying args *) let t2 = applist(term2,deb2) in if is_defined_evar i ev1 then evar_conv_x ts env i CONV t2 (mkEvar ev1) else solve_simple_eqn ~choose:true (evar_conv_x ts) env i (None,ev1,t2))] let choose_less_dependent_instance evk evd term args = let evi = Evd.find_undefined evd evk in let subst = make_pure_subst evi args in let subst' = List.filter (fun (id,c) -> eq_constr c term) subst in if subst' = [] then evd, false else Evd.define evk (mkVar (fst (List.hd subst'))) evd, true let apply_on_subterm f c t = let rec applyrec (k,c as kc) t = (* By using eq_constr, we make an approximation, for instance, we *) (* could also be interested in finding a term u convertible to t *) (* such that c occurs in u *) if eq_constr c t then f k else map_constr_with_binders_left_to_right (fun d (k,c) -> (k+1,lift 1 c)) applyrec kc t in applyrec (0,c) t let filter_possible_projections c ty ctxt args = let fv1 = free_rels c in let fv2 = collect_vars c in let tyvars = collect_vars ty in List.map2 (fun (id,_,_) a -> a == c || (* Here we make an approximation, for instance, we could also be *) (* interested in finding a term u convertible to c such that a occurs *) (* in u *) isRel a && Intset.mem (destRel a) fv1 || isVar a && Idset.mem (destVar a) fv2 || Idset.mem id tyvars) ctxt args let initial_evar_data evi = let ids = List.map pi1 (evar_context evi) in (evar_filter evi, List.map mkVar ids) let solve_evars = ref (fun _ -> failwith "solve_evars not installed") let set_solve_evars f = solve_evars := f (* We solve the problem env_rhs |- ?e[u1..un] = rhs knowing * x1:T1 .. xn:Tn |- ev : ty * by looking for a maximal well-typed abtraction over u1..un in rhs * * We first build C[e11..e1p1,..,en1..enpn] obtained from rhs by replacing * all occurrences of u1..un by evars eij of type Ti' where itself Ti' has * been obtained from the type of ui by also replacing all occurrences of * u1..ui-1 by evars. * * Then, we use typing to infer the relations between the different * occurrences. If some occurrence is still unconstrained after typing, * we instantiate successively the unresolved occurrences of un by xn, * of un-1 by xn-1, etc [the idea comes from Chung-Kil Hur, that he * used for his Heq plugin; extensions to several arguments based on a * proposition from Dan Grayson] *) let second_order_matching ts env_rhs evd (evk,args) argoccs rhs = try let args = Array.to_list args in let evi = Evd.find_undefined evd evk in let env_evar = evar_env evi in let sign = named_context_val env_evar in let ctxt = evar_filtered_context evi in let filter = evar_filter evi in let instance = List.map mkVar (List.map pi1 ctxt) in let rec make_subst = function | (id,_,t)::ctxt', c::l, occs::occsl when isVarId id c -> if occs<>None then error "Cannot force abstraction on identity instance." else make_subst (ctxt',l,occsl) | (id,_,t)::ctxt', c::l, occs::occsl -> let evs = ref [] in let ty = Retyping.get_type_of env_rhs evd c in let filter' = filter_possible_projections c ty ctxt args in let filter = List.map2 (&&) filter filter' in (id,t,c,ty,evs,filter,occs) :: make_subst (ctxt',l,occsl) | [], [], [] -> [] | _ -> anomaly "Signature, instance and occurrences list do not match" in let rec set_holes evdref rhs = function | (id,_,c,cty,evsref,filter,occs)::subst -> let set_var k = match occs with | Some (false,[]) -> mkVar id | Some _ -> error "Selection of specific occurrences not supported" | None -> let evty = set_holes evdref cty subst in let instance = snd (list_filter2 (fun b c -> b) (filter,instance)) in let evd,ev = new_evar_instance sign !evdref evty ~filter instance in evdref := evd; evsref := (fst (destEvar ev),evty)::!evsref; ev in set_holes evdref (apply_on_subterm set_var c rhs) subst | [] -> rhs in let subst = make_subst (ctxt,args,argoccs) in let evdref = ref evd in let rhs = set_holes evdref rhs subst in let evd = !evdref in (* We instantiate the evars of which the value is forced by typing *) let evd,rhs = try !solve_evars env_evar evd rhs with e when Pretype_errors.precatchable_exception e -> (* Could not revert all subterms *) raise Exit in let rec abstract_free_holes evd = function | (id,idty,c,_,evsref,_,_)::l -> let rec force_instantiation evd = function | (evk,evty)::evs -> let evd = if is_undefined evd evk then (* We force abstraction over this unconstrained occurrence *) (* and we use typing to propagate this instantiation *) (* This is an arbitrary choice *) let evd = Evd.define evk (mkVar id) evd in let evd,b = evar_conv_x ts env_evar evd CUMUL idty evty in if not b then error "Cannot find an instance"; let evd,b = reconsider_conv_pbs (evar_conv_x ts) evd in if not b then error "Cannot find an instance"; evd else evd in force_instantiation evd evs | [] -> abstract_free_holes evd l in force_instantiation evd !evsref | [] -> Evd.define evk rhs evd in abstract_free_holes evd subst, true with Exit -> evd, false let second_order_matching_with_args ts env evd ev l t = (* let evd,ev = evar_absorb_arguments env evd ev l in let argoccs = array_map_to_list (fun _ -> None) (snd ev) in second_order_matching ts env evd ev argoccs t *) (evd,false) let apply_conversion_problem_heuristic ts env evd pbty t1 t2 = let t1 = apprec_nohdbeta ts env evd (whd_head_evar evd t1) in let t2 = apprec_nohdbeta ts env evd (whd_head_evar evd t2) in let (term1,l1 as appr1) = decompose_app t1 in let (term2,l2 as appr2) = decompose_app t2 in match kind_of_term term1, kind_of_term term2 with | Evar (evk1,args1), (Rel _|Var _) when l1 = [] & l2 = [] & List.for_all (fun a -> eq_constr a term2 or isEvar a) (remove_instance_local_defs evd evk1 (Array.to_list args1)) -> (* The typical kind of constraint coming from pattern-matching return type inference *) choose_less_dependent_instance evk1 evd term2 args1 | (Rel _|Var _), Evar (evk2,args2) when l1 = [] & l2 = [] & List.for_all (fun a -> eq_constr a term1 or isEvar a) (remove_instance_local_defs evd evk2 (Array.to_list args2)) -> (* The typical kind of constraint coming from pattern-matching return type inference *) choose_less_dependent_instance evk2 evd term1 args2 | Evar (evk1,args1), Evar (evk2,args2) when evk1 = evk2 -> let f env evd pbty x y = (evd,is_trans_fconv pbty ts env evd x y) in solve_refl ~can_drop:true f env evd evk1 args1 args2, true | Evar ev1, Evar ev2 -> solve_evar_evar ~force:true (evar_define (evar_conv_x ts)) (evar_conv_x ts) env evd ev1 ev2, true | Evar ev1,_ when List.length l1 <= List.length l2 -> (* On "?n t1 .. tn = u u1 .. u(n+p)", try first-order unification *) (* and otherwise second-order matching *) ise_try evd [(fun evd -> first_order_unification ts env evd (ev1,l1) appr2); (fun evd -> second_order_matching_with_args ts env evd ev1 l1 (applist appr2))] | _,Evar ev2 when List.length l2 <= List.length l1 -> (* On "u u1 .. u(n+p) = ?n t1 .. tn", try first-order unification *) (* and otherwise second-order matching *) ise_try evd [(fun evd -> first_order_unification ts env evd (ev2,l2) appr1); (fun evd -> second_order_matching_with_args ts env evd ev2 l2 (applist appr1))] | Evar ev1,_ -> (* Try second-order pattern-matching *) second_order_matching_with_args ts env evd ev1 l1 (applist appr2) | _,Evar ev2 -> (* Try second-order pattern-matching *) second_order_matching_with_args ts env evd ev2 l2 (applist appr1) | _ -> (* Some head evar have been instantiated, or unknown kind of problem *) evar_conv_x ts env evd pbty t1 t2 let check_problems_are_solved env evd = match snd (extract_all_conv_pbs evd) with | (pbty,env,t1,t2)::_ -> Pretype_errors.error_cannot_unify env evd (t1, t2) | _ -> () let max_undefined_with_candidates evd = (* If evar were ordered with highest index first, fold_undefined would be going decreasingly and we could use fold_undefined to find the undefined evar of maximum index (alternatively, max_bindings from ocaml 3.12 could be used); instead we traverse the whole map *) let l = Evd.fold_undefined (fun evk ev_info evars -> match ev_info.evar_candidates with | None -> evars | Some l -> (evk,ev_info,l)::evars) evd [] in match l with | [] -> None | a::l -> Some (list_last (a::l)) let rec solve_unconstrained_evars_with_canditates evd = (* max_undefined is supposed to return the most recent, hence possibly most dependent evar *) match max_undefined_with_candidates evd with | None -> evd | Some (evk,ev_info,l) -> let rec aux = function | [] -> error "Unsolvable existential variables." | a::l -> try let conv_algo = evar_conv_x full_transparent_state in let evd = check_evar_instance evd evk a conv_algo in let evd = Evd.define evk a evd in let evd,b = reconsider_conv_pbs conv_algo evd in if b then solve_unconstrained_evars_with_canditates evd else aux l with e when Pretype_errors.precatchable_exception e -> aux l in (* List.rev is there to favor most dependent solutions *) (* and favor progress when used with the refine tactics *) let evd = aux (List.rev l) in solve_unconstrained_evars_with_canditates evd let solve_unconstrained_impossible_cases evd = Evd.fold_undefined (fun evk ev_info evd' -> match ev_info.evar_source with | _,ImpossibleCase -> Evd.define evk (j_type (coq_unit_judge ())) evd' | _ -> evd') evd evd let consider_remaining_unif_problems ?(ts=full_transparent_state) env evd = let evd = solve_unconstrained_evars_with_canditates evd in let rec aux evd pbs progress stuck = match pbs with | (pbty,env,t1,t2 as pb) :: pbs -> let evd', b = apply_conversion_problem_heuristic ts env evd pbty t1 t2 in if b then let (evd', rest) = extract_all_conv_pbs evd' in if rest = [] then aux evd' pbs true stuck else (* Unification got actually stuck, postpone *) aux evd pbs progress (pb :: stuck) else Pretype_errors.error_cannot_unify env evd (t1, t2) | _ -> if progress then aux evd stuck false [] else match stuck with | [] -> (* We're finished *) evd | (pbty,env,t1,t2) :: _ -> (* There remains stuck problems *) Pretype_errors.error_cannot_unify env evd (t1, t2) in let (evd,pbs) = extract_all_conv_pbs evd in let heuristic_solved_evd = aux evd pbs false [] in check_problems_are_solved env heuristic_solved_evd; solve_unconstrained_impossible_cases heuristic_solved_evd (* Main entry points *) let the_conv_x ?(ts=full_transparent_state) env t1 t2 evd = match evar_conv_x ts env evd CONV t1 t2 with (evd',true) -> evd' | _ -> raise Reduction.NotConvertible let the_conv_x_leq ?(ts=full_transparent_state) env t1 t2 evd = match evar_conv_x ts env evd CUMUL t1 t2 with (evd', true) -> evd' | _ -> raise Reduction.NotConvertible let e_conv ?(ts=full_transparent_state) env evdref t1 t2 = match evar_conv_x ts env !evdref CONV t1 t2 with (evd',true) -> evdref := evd'; true | _ -> false let e_cumul ?(ts=full_transparent_state) env evdref t1 t2 = match evar_conv_x ts env !evdref CUMUL t1 t2 with (evd',true) -> evdref := evd'; true | _ -> false coq-8.4pl2/pretyping/evd.mli0000640000175000001440000002630712102010667015134 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool) -> Metaset.t -> bool type 'a freelisted = { rebus : 'a; freemetas : Metaset.t } val metavars_of : constr -> Metaset.t val mk_freelisted : constr -> constr freelisted val map_fl : ('a -> 'b) -> 'a freelisted -> 'b freelisted (** Status of an instance found by unification wrt to the meta it solves: - a supertype of the meta (e.g. the solution to ?X <= T is a supertype of ?X) - a subtype of the meta (e.g. the solution to T <= ?X is a supertype of ?X) - a term that can be eta-expanded n times while still being a solution (e.g. the solution [P] to [?X u v = P u v] can be eta-expanded twice) *) type instance_constraint = IsSuperType | IsSubType | Conv (** Status of the unification of the type of an instance against the type of the meta it instantiates: - CoerceToType means that the unification of types has not been done and that a coercion can still be inserted: the meta should not be substituted freely (this happens for instance given via the "with" binding clause). - TypeProcessed means that the information obtainable from the unification of types has been extracted. - TypeNotProcessed means that the unification of types has not been done but it is known that no coercion may be inserted: the meta can be substituted freely. *) type instance_typing_status = CoerceToType | TypeNotProcessed | TypeProcessed (** Status of an instance together with the status of its type unification *) type instance_status = instance_constraint * instance_typing_status (** Clausal environments *) type clbinding = | Cltyp of name * constr freelisted | Clval of name * (constr freelisted * instance_status) * constr freelisted val map_clb : (constr -> constr) -> clbinding -> clbinding (******************************************************************** ** Kinds of existential variables ***) (** Should the obligation be defined (opaque or transparent (default)) or defined transparent and expanded in the term? *) type obligation_definition_status = Define of bool | Expand (** Evars *) type hole_kind = | ImplicitArg of global_reference * (int * identifier option) * bool (** Force inference *) | BinderType of name | QuestionMark of obligation_definition_status | CasesType | InternalHole | TomatchTypeParameter of inductive * int | GoalEvar | ImpossibleCase | MatchingVar of bool * identifier (******************************************************************** ** Existential variables and unification states ***) (** A unification state (of type [evar_map]) is primarily a finite mapping from existential variables to records containing the type of the evar ([evar_concl]), the context under which it was introduced ([evar_hyps]) and its definition ([evar_body]). [evar_extra] is used to add any other kind of information. It also contains conversion constraints, debugging information and information about meta variables. *) (** Information about existential variables. *) type evar = existential_key val string_of_existential : evar -> string val existential_of_int : int -> evar type evar_body = | Evar_empty | Evar_defined of constr type evar_info = { evar_concl : constr; evar_hyps : named_context_val; evar_body : evar_body; evar_filter : bool list; evar_source : hole_kind located; evar_candidates : constr list option; evar_extra : Store.t } val eq_evar_info : evar_info -> evar_info -> bool val make_evar : named_context_val -> types -> evar_info val evar_concl : evar_info -> constr val evar_context : evar_info -> named_context val evar_filtered_context : evar_info -> named_context val evar_hyps : evar_info -> named_context_val val evar_filtered_hyps : evar_info -> named_context_val val evar_body : evar_info -> evar_body val evar_filter : evar_info -> bool list val evar_unfiltered_env : evar_info -> env val evar_env : evar_info -> env (*** Unification state ***) type evar_map (** Unification state and existential variables *) (** Assuming that the second map extends the first one, this says if some existing evar has been refined *) val progress_evar_map : evar_map -> evar_map -> bool val empty : evar_map val is_empty : evar_map -> bool (** [has_undefined sigma] is [true] if and only if there are uninstantiated evars in [sigma]. *) val has_undefined : evar_map -> bool (** [add sigma ev info] adds [ev] with evar info [info] in sigma. Precondition: ev must not preexist in [sigma]. *) val add : evar_map -> evar -> evar_info -> evar_map val find : evar_map -> evar -> evar_info val find_undefined : evar_map -> evar -> evar_info val remove : evar_map -> evar -> evar_map val mem : evar_map -> evar -> bool val undefined_list : evar_map -> (evar * evar_info) list val to_list : evar_map -> (evar * evar_info) list val fold : (evar -> evar_info -> 'a -> 'a) -> evar_map -> 'a -> 'a val fold_undefined : (evar -> evar_info -> 'a -> 'a) -> evar_map -> 'a -> 'a val merge : evar_map -> evar_map -> evar_map val define : evar -> constr -> evar_map -> evar_map val is_evar : evar_map -> evar -> bool val is_defined : evar_map -> evar -> bool val is_undefined : evar_map -> evar -> bool val add_constraints : evar_map -> Univ.constraints -> evar_map (** {6 ... } *) (** [existential_value sigma ev] raises [NotInstantiatedEvar] if [ev] has no body and [Not_found] if it does not exist in [sigma] *) exception NotInstantiatedEvar val existential_value : evar_map -> existential -> constr val existential_type : evar_map -> existential -> types val existential_opt_value : evar_map -> existential -> constr option val instantiate_evar : named_context -> constr -> constr list -> constr (** Assume empty universe constraints in [evar_map] and [conv_pbs] *) val subst_evar_defs_light : substitution -> evar_map -> evar_map (** spiwack: this function seems to somewhat break the abstraction. *) val evars_reset_evd : ?with_conv_pbs:bool -> evar_map -> evar_map -> evar_map (* spiwack: [is_undefined_evar] should be considered a candidate for moving to evarutils *) val is_undefined_evar : evar_map -> constr -> bool val undefined_evars : evar_map -> evar_map val defined_evars : evar_map -> evar_map (* [fold_undefined f m] iterates ("folds") function [f] over the undefined evars (that is, whose value is [Evar_empty]) of map [m]. It optimizes the call of {!Evd.fold} to [f] and [undefined_evars m] *) val fold_undefined : (evar -> evar_info -> 'a -> 'a) -> evar_map -> 'a -> 'a val evar_declare : named_context_val -> evar -> types -> ?src:loc * hole_kind -> ?filter:bool list -> ?candidates:constr list -> evar_map -> evar_map val evar_source : existential_key -> evar_map -> hole_kind located (* spiwack: this function seems to somewhat break the abstraction. [evar_merge evd ev1] extends the evars of [evd] with [evd1] *) val evar_merge : evar_map -> evar_map -> evar_map (** Unification constraints *) type conv_pb = Reduction.conv_pb type evar_constraint = conv_pb * env * constr * constr val add_conv_pb : evar_constraint -> evar_map -> evar_map module ExistentialMap : Map.S with type key = existential_key module ExistentialSet : Set.S with type elt = existential_key val extract_changed_conv_pbs : evar_map -> (ExistentialSet.t -> evar_constraint -> bool) -> evar_map * evar_constraint list val extract_all_conv_pbs : evar_map -> evar_map * evar_constraint list val evar_list : evar_map -> constr -> existential list val collect_evars : constr -> ExistentialSet.t (** Metas *) val find_meta : evar_map -> metavariable -> clbinding val meta_list : evar_map -> (metavariable * clbinding) list val meta_defined : evar_map -> metavariable -> bool (** [meta_fvalue] raises [Not_found] if meta not in map or [Anomaly] if meta has no value *) val meta_value : evar_map -> metavariable -> constr val meta_fvalue : evar_map -> metavariable -> constr freelisted * instance_status val meta_opt_fvalue : evar_map -> metavariable -> (constr freelisted * instance_status) option val meta_type : evar_map -> metavariable -> types val meta_ftype : evar_map -> metavariable -> types freelisted val meta_name : evar_map -> metavariable -> name val meta_with_name : evar_map -> identifier -> metavariable val meta_declare : metavariable -> types -> ?name:name -> evar_map -> evar_map val meta_assign : metavariable -> constr * instance_status -> evar_map -> evar_map val meta_reassign : metavariable -> constr * instance_status -> evar_map -> evar_map (** [meta_merge evd1 evd2] returns [evd2] extended with the metas of [evd1] *) val meta_merge : evar_map -> evar_map -> evar_map val undefined_metas : evar_map -> metavariable list val metas_of : evar_map -> meta_type_map val map_metas_fvalue : (constr -> constr) -> evar_map -> evar_map type metabinding = metavariable * constr * instance_status val retract_coercible_metas : evar_map -> metabinding list * evar_map val subst_defined_metas : metabinding list -> constr -> constr option (********************************************************* Sort variables *) val new_univ_variable : evar_map -> evar_map * Univ.universe val new_sort_variable : evar_map -> evar_map * sorts val is_sort_variable : evar_map -> sorts -> bool val whd_sort_variable : evar_map -> constr -> constr val set_leq_sort : evar_map -> sorts -> sorts -> evar_map val set_eq_sort : evar_map -> sorts -> sorts -> evar_map (******************************************************************** constr with holes *) type open_constr = evar_map * constr (******************************************************************** The type constructor ['a sigma] adds an evar map to an object of type ['a] *) type 'a sigma = { it : 'a ; sigma : evar_map} val sig_it : 'a sigma -> 'a val sig_sig : 'a sigma -> evar_map (********************************************************* Failure explanation *) type unsolvability_explanation = SeveralInstancesFound of int (******************************************************************** debug pretty-printer: *) val pr_evar_info : evar_info -> Pp.std_ppcmds val pr_evar_map_constraints : evar_map -> Pp.std_ppcmds val pr_evar_map : int option -> evar_map -> Pp.std_ppcmds val pr_metaset : Metaset.t -> Pp.std_ppcmds (*** /!\Deprecated /!\ ** create an [evar_map] with empty meta map: *) val create_evar_defs : evar_map -> evar_map val create_goal_evar_defs : evar_map -> evar_map val is_defined_evar : evar_map -> existential -> bool val subst_evar_map : substitution -> evar_map -> evar_map (*** /Deprecaded ***) coq-8.4pl2/pretyping/classops.ml0000640000175000001440000003235412010532755016040 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int * cl_info_typ *) let class_info cl = Bijint.revmap cl !class_tab let class_exists cl = Bijint.mem cl !class_tab (* class_info_from_index : int -> cl_typ * cl_info_typ *) let class_info_from_index i = Bijint.map i !class_tab let cl_fun_index = fst(class_info CL_FUN) let cl_sort_index = fst(class_info CL_SORT) (* coercion_info : coe_typ -> coe_info_typ *) let coercion_info coe = Gmap.find coe !coercion_tab let coercion_exists coe = Gmap.mem coe !coercion_tab (* find_class_type : evar_map -> constr -> cl_typ * constr list *) let find_class_type sigma t = let t', args = Reductionops.whd_betaiotazeta_stack sigma t in match kind_of_term t' with | Var id -> CL_SECVAR id, args | Const sp -> CL_CONST sp, args | Ind ind_sp -> CL_IND ind_sp, args | Prod (_,_,_) -> CL_FUN, [] | Sort _ -> CL_SORT, [] | _ -> raise Not_found let subst_cl_typ subst ct = match ct with CL_SORT | CL_FUN | CL_SECVAR _ -> ct | CL_CONST kn -> let kn',t = subst_con subst kn in if kn' == kn then ct else fst (find_class_type Evd.empty t) | CL_IND (kn,i) -> let kn' = subst_ind subst kn in if kn' == kn then ct else CL_IND (kn',i) (*CSC: here we should change the datatype for coercions: it should be possible to declare any term as a coercion *) let subst_coe_typ subst t = fst (subst_global subst t) (* class_of : Term.constr -> int *) let class_of env sigma t = let (t, n1, i, args) = try let (cl,args) = find_class_type sigma t in let (i, { cl_param = n1 } ) = class_info cl in (t, n1, i, args) with Not_found -> let t = Tacred.hnf_constr env sigma t in let (cl, args) = find_class_type sigma t in let (i, { cl_param = n1 } ) = class_info cl in (t, n1, i, args) in if List.length args = n1 then t, i else raise Not_found let inductive_class_of ind = fst (class_info (CL_IND ind)) let class_args_of env sigma c = snd (find_class_type sigma c) let string_of_class = function | CL_FUN -> "Funclass" | CL_SORT -> "Sortclass" | CL_CONST sp -> string_of_qualid (shortest_qualid_of_global Idset.empty (ConstRef sp)) | CL_IND sp -> string_of_qualid (shortest_qualid_of_global Idset.empty (IndRef sp)) | CL_SECVAR sp -> string_of_qualid (shortest_qualid_of_global Idset.empty (VarRef sp)) let pr_class x = str (string_of_class x) (* lookup paths *) let lookup_path_between_class (s,t) = Gmap.find (s,t) !inheritance_graph let lookup_path_to_fun_from_class s = lookup_path_between_class (s,cl_fun_index) let lookup_path_to_sort_from_class s = lookup_path_between_class (s,cl_sort_index) (* advanced path lookup *) let apply_on_class_of env sigma t cont = try let (cl,args) = find_class_type sigma t in let (i, { cl_param = n1 } ) = class_info cl in if List.length args <> n1 then raise Not_found; t, cont i with Not_found -> (* Is it worth to be more incremental on the delta steps? *) let t = Tacred.hnf_constr env sigma t in let (cl, args) = find_class_type sigma t in let (i, { cl_param = n1 } ) = class_info cl in if List.length args <> n1 then raise Not_found; t, cont i let lookup_path_between env sigma (s,t) = let (s,(t,p)) = apply_on_class_of env sigma s (fun i -> apply_on_class_of env sigma t (fun j -> lookup_path_between_class (i,j))) in (s,t,p) let lookup_path_to_fun_from env sigma s = apply_on_class_of env sigma s lookup_path_to_fun_from_class let lookup_path_to_sort_from env sigma s = apply_on_class_of env sigma s lookup_path_to_sort_from_class let get_coercion_constructor coe = let c, _ = Reductionops.whd_betadeltaiota_stack (Global.env()) Evd.empty coe.coe_value in match kind_of_term c with | Construct cstr -> (cstr, Inductiveops.constructor_nrealargs (Global.env()) cstr -1) | _ -> raise Not_found let lookup_pattern_path_between (s,t) = let i = inductive_class_of s in let j = inductive_class_of t in List.map get_coercion_constructor (Gmap.find (i,j) !inheritance_graph) (* coercion_value : coe_index -> unsafe_judgment * bool *) let coercion_value { coe_value = c; coe_type = t; coe_is_identity = b } = (make_judge c t, b) (* pretty-print functions are now in Pretty *) (* rajouter une coercion dans le graphe *) let path_printer = ref (fun _ -> str "" : (int * int) * inheritance_path -> std_ppcmds) let install_path_printer f = path_printer := f let print_path x = !path_printer x let message_ambig l = (str"Ambiguous paths:" ++ spc () ++ prlist_with_sep pr_fnl (fun ijp -> print_path ijp) l) (* add_coercion_in_graph : coe_index * cl_index * cl_index -> unit coercion,source,target *) let different_class_params i j = (snd (class_info_from_index i)).cl_param > 0 let add_coercion_in_graph (ic,source,target) = let old_inheritance_graph = !inheritance_graph in let ambig_paths = (ref [] : ((cl_index * cl_index) * inheritance_path) list ref) in let try_add_new_path (i,j as ij) p = try if i=j then begin if different_class_params i j then begin let _ = lookup_path_between_class ij in ambig_paths := (ij,p)::!ambig_paths end end else begin let _ = lookup_path_between_class (i,j) in ambig_paths := (ij,p)::!ambig_paths end; false with Not_found -> begin add_new_path ij p; true end in let try_add_new_path1 ij p = let _ = try_add_new_path ij p in () in if try_add_new_path (source,target) [ic] then begin Gmap.iter (fun (s,t) p -> if s<>t then begin if t = source then begin try_add_new_path1 (s,target) (p@[ic]); Gmap.iter (fun (u,v) q -> if u<>v & u = target && not (list_equal coe_info_typ_equal p q) then try_add_new_path1 (s,v) (p@[ic]@q)) old_inheritance_graph end; if s = target then try_add_new_path1 (source,t) (ic::p) end) old_inheritance_graph end; if (!ambig_paths <> []) && is_verbose () then ppnl (message_ambig !ambig_paths) type coercion = coe_typ * locality * bool * cl_typ * cl_typ * int (* Calcul de l'arit d'une classe *) let reference_arity_length ref = let t = Global.type_of_global ref in List.length (fst (Reductionops.splay_arity (Global.env()) Evd.empty t)) let class_params = function | CL_FUN | CL_SORT -> 0 | CL_CONST sp -> reference_arity_length (ConstRef sp) | CL_SECVAR sp -> reference_arity_length (VarRef sp) | CL_IND sp -> reference_arity_length (IndRef sp) (* add_class : cl_typ -> locality_flag option -> bool -> unit *) let add_class cl = add_new_class cl { cl_param = class_params cl } let automatically_import_coercions = ref false open Goptions let _ = declare_bool_option { optsync = true; optdepr = false; optname = "automatic import of coercions"; optkey = ["Automatic";"Coercions";"Import"]; optread = (fun () -> !automatically_import_coercions); optwrite = (:=) automatically_import_coercions } let cache_coercion (_,(coe,stre,isid,cls,clt,ps)) = add_class cls; add_class clt; let is,_ = class_info cls in let it,_ = class_info clt in let xf = { coe_value = constr_of_global coe; coe_type = Global.type_of_global coe; coe_strength = stre; coe_is_identity = isid; coe_param = ps } in add_new_coercion coe xf; add_coercion_in_graph (xf,is,it) let load_coercion _ o = if !automatically_import_coercions || Flags.version_less_or_equal Flags.V8_2 then cache_coercion o let open_coercion i o = if i = 1 && not (!automatically_import_coercions || Flags.version_less_or_equal Flags.V8_2) then cache_coercion o let subst_coercion (subst,(coe,stre,isid,cls,clt,ps as obj)) = let coe' = subst_coe_typ subst coe in let cls' = subst_cl_typ subst cls in let clt' = subst_cl_typ subst clt in if coe' == coe && cls' == cls & clt' == clt then obj else (coe',stre,isid,cls',clt',ps) let discharge_cl = function | CL_CONST kn -> CL_CONST (Lib.discharge_con kn) | CL_IND ind -> CL_IND (Lib.discharge_inductive ind) | cl -> cl let discharge_coercion (_,(coe,stre,isid,cls,clt,ps)) = if stre = Local then None else let n = try Array.length (Lib.section_instance coe) with Not_found -> 0 in Some (Lib.discharge_global coe, stre, isid, discharge_cl cls, discharge_cl clt, n + ps) let classify_coercion (coe,stre,isid,cls,clt,ps as obj) = if stre = Local then Dispose else Substitute obj type coercion_obj = coe_typ * Decl_kinds.locality * bool * cl_typ * cl_typ * int let inCoercion : coercion_obj -> obj = declare_object {(default_object "COERCION") with open_function = open_coercion; load_function = load_coercion; cache_function = cache_coercion; subst_function = subst_coercion; classify_function = classify_coercion; discharge_function = discharge_coercion } let declare_coercion coef stre ~isid ~src:cls ~target:clt ~params:ps = Lib.add_anonymous_leaf (inCoercion (coef,stre,isid,cls,clt,ps)) (* For printing purpose *) let get_coercion_value v = v.coe_value let pr_cl_index n = int n let classes () = Bijint.dom !class_tab let coercions () = Gmap.rng !coercion_tab let inheritance_graph () = Gmap.to_list !inheritance_graph let coercion_of_reference r = let ref = Nametab.global r in if not (coercion_exists ref) then errorlabstrm "try_add_coercion" (Nametab.pr_global_env Idset.empty ref ++ str" is not a coercion."); ref module CoercionPrinting = struct type t = coe_typ let encode = coercion_of_reference let subst = subst_coe_typ let printer x = pr_global_env Idset.empty x let key = ["Printing";"Coercion"] let title = "Explicitly printed coercions: " let member_message x b = str "Explicit printing of coercion " ++ printer x ++ str (if b then " is set" else " is unset") let synchronous = true end module PrintingCoercion = Goptions.MakeRefTable(CoercionPrinting) let hide_coercion coe = if not (PrintingCoercion.active coe) then let coe_info = coercion_info coe in Some coe_info.coe_param else None coq-8.4pl2/pretyping/termops.ml0000640000175000001440000011324012010532755015674 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (str "Set") | Prop Null -> (str "Prop") | Type u -> (str "Type(" ++ Univ.pr_uni u ++ str ")") let pr_sort_family = function | InSet -> (str "Set") | InProp -> (str "Prop") | InType -> (str "Type") let pr_name = function | Name id -> pr_id id | Anonymous -> str "_" let pr_con sp = str(string_of_con sp) let rec pr_constr c = match kind_of_term c with | Rel n -> str "#"++int n | Meta n -> str "Meta(" ++ int n ++ str ")" | Var id -> pr_id id | Sort s -> print_sort s | Cast (c,_, t) -> hov 1 (str"(" ++ pr_constr c ++ cut() ++ str":" ++ pr_constr t ++ str")") | Prod (Name(id),t,c) -> hov 1 (str"forall " ++ pr_id id ++ str":" ++ pr_constr t ++ str"," ++ spc() ++ pr_constr c) | Prod (Anonymous,t,c) -> hov 0 (str"(" ++ pr_constr t ++ str " ->" ++ spc() ++ pr_constr c ++ str")") | Lambda (na,t,c) -> hov 1 (str"fun " ++ pr_name na ++ str":" ++ pr_constr t ++ str" =>" ++ spc() ++ pr_constr c) | LetIn (na,b,t,c) -> hov 0 (str"let " ++ pr_name na ++ str":=" ++ pr_constr b ++ str":" ++ brk(1,2) ++ pr_constr t ++ cut() ++ pr_constr c) | App (c,l) -> hov 1 (str"(" ++ pr_constr c ++ spc() ++ prlist_with_sep spc pr_constr (Array.to_list l) ++ str")") | Evar (e,l) -> hov 1 (str"Evar#" ++ int e ++ str"{" ++ prlist_with_sep spc pr_constr (Array.to_list l) ++str"}") | Const c -> str"Cst(" ++ pr_con c ++ str")" | Ind (sp,i) -> str"Ind(" ++ pr_mind sp ++ str"," ++ int i ++ str")" | Construct ((sp,i),j) -> str"Constr(" ++ pr_mind sp ++ str"," ++ int i ++ str"," ++ int j ++ str")" | Case (ci,p,c,bl) -> v 0 (hv 0 (str"<"++pr_constr p++str">"++ cut() ++ str"Case " ++ pr_constr c ++ str"of") ++ cut() ++ prlist_with_sep (fun _ -> brk(1,2)) pr_constr (Array.to_list bl) ++ cut() ++ str"end") | Fix ((t,i),(lna,tl,bl)) -> let fixl = Array.mapi (fun i na -> (na,t.(i),tl.(i),bl.(i))) lna in hov 1 (str"fix " ++ int i ++ spc() ++ str"{" ++ v 0 (prlist_with_sep spc (fun (na,i,ty,bd) -> pr_name na ++ str"/" ++ int i ++ str":" ++ pr_constr ty ++ cut() ++ str":=" ++ pr_constr bd) (Array.to_list fixl)) ++ str"}") | CoFix(i,(lna,tl,bl)) -> let fixl = Array.mapi (fun i na -> (na,tl.(i),bl.(i))) lna in hov 1 (str"cofix " ++ int i ++ spc() ++ str"{" ++ v 0 (prlist_with_sep spc (fun (na,ty,bd) -> pr_name na ++ str":" ++ pr_constr ty ++ cut() ++ str":=" ++ pr_constr bd) (Array.to_list fixl)) ++ str"}") let term_printer = ref (fun _ -> pr_constr) let print_constr_env t = !term_printer t let print_constr t = !term_printer (Global.env()) t let set_print_constr f = term_printer := f let pr_var_decl env (id,c,typ) = let pbody = match c with | None -> (mt ()) | Some c -> (* Force evaluation *) let pb = print_constr_env env c in (str" := " ++ pb ++ cut () ) in let pt = print_constr_env env typ in let ptyp = (str" : " ++ pt) in (pr_id id ++ hov 0 (pbody ++ ptyp)) let pr_rel_decl env (na,c,typ) = let pbody = match c with | None -> mt () | Some c -> (* Force evaluation *) let pb = print_constr_env env c in (str":=" ++ spc () ++ pb ++ spc ()) in let ptyp = print_constr_env env typ in match na with | Anonymous -> hov 0 (str"<>" ++ spc () ++ pbody ++ str":" ++ spc () ++ ptyp) | Name id -> hov 0 (pr_id id ++ spc () ++ pbody ++ str":" ++ spc () ++ ptyp) let print_named_context env = hv 0 (fold_named_context (fun env d pps -> pps ++ ws 2 ++ pr_var_decl env d) env ~init:(mt ())) let print_rel_context env = hv 0 (fold_rel_context (fun env d pps -> pps ++ ws 2 ++ pr_rel_decl env d) env ~init:(mt ())) let print_env env = let sign_env = fold_named_context (fun env d pps -> let pidt = pr_var_decl env d in (pps ++ fnl () ++ pidt)) env ~init:(mt ()) in let db_env = fold_rel_context (fun env d pps -> let pnat = pr_rel_decl env d in (pps ++ fnl () ++ pnat)) env ~init:(mt ()) in (sign_env ++ db_env) (*let current_module = ref empty_dirpath let set_module m = current_module := m*) let new_univ_level = let univ_gen = ref 0 in (fun sp -> incr univ_gen; Univ.make_universe_level (Lib.library_dp(),!univ_gen)) let new_univ () = Univ.make_universe (new_univ_level ()) let new_Type () = mkType (new_univ ()) let new_Type_sort () = Type (new_univ ()) (* This refreshes universes in types; works only for inferred types (i.e. for types of the form (x1:A1)...(xn:An)B with B a sort or an atom in head normal form) *) let refresh_universes_gen strict t = let modified = ref false in let rec refresh t = match kind_of_term t with | Sort (Type u) when strict or u <> Univ.type0m_univ -> modified := true; new_Type () | Prod (na,u,v) -> mkProd (na,u,refresh v) | _ -> t in let t' = refresh t in if !modified then t' else t let refresh_universes = refresh_universes_gen false let refresh_universes_strict = refresh_universes_gen true let new_sort_in_family = function | InProp -> prop_sort | InSet -> set_sort | InType -> Type (new_univ ()) (* [Rel (n+m);...;Rel(n+1)] *) let rel_vect n m = Array.init m (fun i -> mkRel(n+m-i)) let rel_list n m = let rec reln l p = if p>m then l else reln (mkRel(n+p)::l) (p+1) in reln [] 1 (* Same as [rel_list] but takes a context as argument and skips let-ins *) let extended_rel_list n hyps = let rec reln l p = function | (_,None,_) :: hyps -> reln (mkRel (n+p) :: l) (p+1) hyps | (_,Some _,_) :: hyps -> reln l (p+1) hyps | [] -> l in reln [] 1 hyps let extended_rel_vect n hyps = Array.of_list (extended_rel_list n hyps) let push_rel_assum (x,t) env = push_rel (x,None,t) env let push_rels_assum assums = push_rel_context (List.map (fun (x,t) -> (x,None,t)) assums) let push_named_rec_types (lna,typarray,_) env = let ctxt = array_map2_i (fun i na t -> match na with | Name id -> (id, None, lift i t) | Anonymous -> anomaly "Fix declarations must be named") lna typarray in Array.fold_left (fun e assum -> push_named assum e) env ctxt let rec lookup_rel_id id sign = let rec lookrec = function | (n, (Anonymous,_,_)::l) -> lookrec (n+1,l) | (n, (Name id',b,t)::l) -> if id' = id then (n,b,t) else lookrec (n+1,l) | (_, []) -> raise Not_found in lookrec (1,sign) (* Constructs either [forall x:t, c] or [let x:=b:t in c] *) let mkProd_or_LetIn (na,body,t) c = match body with | None -> mkProd (na, t, c) | Some b -> mkLetIn (na, b, t, c) (* Constructs either [forall x:t, c] or [c] in which [x] is replaced by [b] *) let mkProd_wo_LetIn (na,body,t) c = match body with | None -> mkProd (na, t, c) | Some b -> subst1 b c let it_mkProd init = List.fold_left (fun c (n,t) -> mkProd (n, t, c)) init let it_mkLambda init = List.fold_left (fun c (n,t) -> mkLambda (n, t, c)) init let it_named_context_quantifier f ~init = List.fold_left (fun c d -> f d c) init let it_mkProd_or_LetIn init = it_named_context_quantifier mkProd_or_LetIn ~init let it_mkProd_wo_LetIn init = it_named_context_quantifier mkProd_wo_LetIn ~init let it_mkLambda_or_LetIn init = it_named_context_quantifier mkLambda_or_LetIn ~init let it_mkNamedProd_or_LetIn init = it_named_context_quantifier mkNamedProd_or_LetIn ~init let it_mkNamedProd_wo_LetIn init = it_named_context_quantifier mkNamedProd_wo_LetIn ~init let it_mkNamedLambda_or_LetIn init = it_named_context_quantifier mkNamedLambda_or_LetIn ~init (* *) (* strips head casts and flattens head applications *) let rec strip_head_cast c = match kind_of_term c with | App (f,cl) -> let rec collapse_rec f cl2 = match kind_of_term f with | App (g,cl1) -> collapse_rec g (Array.append cl1 cl2) | Cast (c,_,_) -> collapse_rec c cl2 | _ -> if Array.length cl2 = 0 then f else mkApp (f,cl2) in collapse_rec f cl | Cast (c,_,_) -> strip_head_cast c | _ -> c let rec drop_extra_implicit_args c = match kind_of_term c with (* Removed trailing extra implicit arguments, what improves compatibility for constants with recently added maximal implicit arguments *) | App (f,args) when isEvar (array_last args) -> drop_extra_implicit_args (mkApp (f,fst (array_chop (Array.length args - 1) args))) | _ -> c (* Get the last arg of an application *) let last_arg c = match kind_of_term c with | App (f,cl) -> array_last cl | _ -> anomaly "last_arg" (* Get the last arg of an application *) let decompose_app_vect c = match kind_of_term c with | App (f,cl) -> (f, cl) | _ -> (c,[||]) let adjust_app_list_size f1 l1 f2 l2 = let len1 = List.length l1 and len2 = List.length l2 in if len1 = len2 then (f1,l1,f2,l2) else if len1 < len2 then let extras,restl2 = list_chop (len2-len1) l2 in (f1, l1, applist (f2,extras), restl2) else let extras,restl1 = list_chop (len1-len2) l1 in (applist (f1,extras), restl1, f2, l2) let adjust_app_array_size f1 l1 f2 l2 = let len1 = Array.length l1 and len2 = Array.length l2 in if len1 = len2 then (f1,l1,f2,l2) else if len1 < len2 then let extras,restl2 = array_chop (len2-len1) l2 in (f1, l1, appvect (f2,extras), restl2) else let extras,restl1 = array_chop (len1-len2) l1 in (appvect (f1,extras), restl1, f2, l2) (* [map_constr_with_named_binders g f l c] maps [f l] on the immediate subterms of [c]; it carries an extra data [l] (typically a name list) which is processed by [g na] (which typically cons [na] to [l]) at each binder traversal (with name [na]); it is not recursive and the order with which subterms are processed is not specified *) let map_constr_with_named_binders g f l c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> c | Cast (c,k,t) -> mkCast (f l c, k, f l t) | Prod (na,t,c) -> mkProd (na, f l t, f (g na l) c) | Lambda (na,t,c) -> mkLambda (na, f l t, f (g na l) c) | LetIn (na,b,t,c) -> mkLetIn (na, f l b, f l t, f (g na l) c) | App (c,al) -> mkApp (f l c, Array.map (f l) al) | Evar (e,al) -> mkEvar (e, Array.map (f l) al) | Case (ci,p,c,bl) -> mkCase (ci, f l p, f l c, Array.map (f l) bl) | Fix (ln,(lna,tl,bl)) -> let l' = Array.fold_left (fun l na -> g na l) l lna in mkFix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) | CoFix(ln,(lna,tl,bl)) -> let l' = Array.fold_left (fun l na -> g na l) l lna in mkCoFix (ln,(lna,Array.map (f l) tl,Array.map (f l') bl)) (* [map_constr_with_binders_left_to_right g f n c] maps [f n] on the immediate subterms of [c]; it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; the subterms are processed from left to right according to the usual representation of the constructions (this may matter if [f] does a side-effect); it is not recursive; in fact, the usual representation of the constructions is at the time being almost those of the ML representation (except for (co-)fixpoint) *) let fold_rec_types g (lna,typarray,_) e = let ctxt = array_map2_i (fun i na t -> (na, None, lift i t)) lna typarray in Array.fold_left (fun e assum -> g assum e) e ctxt let map_constr_with_binders_left_to_right g f l c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> c | Cast (c,k,t) -> let c' = f l c in mkCast (c',k,f l t) | Prod (na,t,c) -> let t' = f l t in mkProd (na, t', f (g (na,None,t) l) c) | Lambda (na,t,c) -> let t' = f l t in mkLambda (na, t', f (g (na,None,t) l) c) | LetIn (na,b,t,c) -> let b' = f l b in let t' = f l t in let c' = f (g (na,Some b,t) l) c in mkLetIn (na, b', t', c') | App (c,[||]) -> assert false | App (c,al) -> (*Special treatment to be able to recognize partially applied subterms*) let a = al.(Array.length al - 1) in let hd = f l (mkApp (c, Array.sub al 0 (Array.length al - 1))) in mkApp (hd, [| f l a |]) | Evar (e,al) -> mkEvar (e, array_map_left (f l) al) | Case (ci,p,c,bl) -> (* In v8 concrete syntax, predicate is after the term to match! *) let c' = f l c in let p' = f l p in mkCase (ci, p', c', array_map_left (f l) bl) | Fix (ln,(lna,tl,bl as fx)) -> let l' = fold_rec_types g fx l in let (tl',bl') = array_map_left_pair (f l) tl (f l') bl in mkFix (ln,(lna,tl',bl')) | CoFix(ln,(lna,tl,bl as fx)) -> let l' = fold_rec_types g fx l in let (tl',bl') = array_map_left_pair (f l) tl (f l') bl in mkCoFix (ln,(lna,tl',bl')) (* strong *) let map_constr_with_full_binders g f l cstr = match kind_of_term cstr with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> cstr | Cast (c,k, t) -> let c' = f l c in let t' = f l t in if c==c' && t==t' then cstr else mkCast (c', k, t') | Prod (na,t,c) -> let t' = f l t in let c' = f (g (na,None,t) l) c in if t==t' && c==c' then cstr else mkProd (na, t', c') | Lambda (na,t,c) -> let t' = f l t in let c' = f (g (na,None,t) l) c in if t==t' && c==c' then cstr else mkLambda (na, t', c') | LetIn (na,b,t,c) -> let b' = f l b in let t' = f l t in let c' = f (g (na,Some b,t) l) c in if b==b' && t==t' && c==c' then cstr else mkLetIn (na, b', t', c') | App (c,al) -> let c' = f l c in let al' = Array.map (f l) al in if c==c' && array_for_all2 (==) al al' then cstr else mkApp (c', al') | Evar (e,al) -> let al' = Array.map (f l) al in if array_for_all2 (==) al al' then cstr else mkEvar (e, al') | Case (ci,p,c,bl) -> let p' = f l p in let c' = f l c in let bl' = Array.map (f l) bl in if p==p' && c==c' && array_for_all2 (==) bl bl' then cstr else mkCase (ci, p', c', bl') | Fix (ln,(lna,tl,bl)) -> let tl' = Array.map (f l) tl in let l' = array_fold_left2 (fun l na t -> g (na,None,t) l) l lna tl in let bl' = Array.map (f l') bl in if array_for_all2 (==) tl tl' && array_for_all2 (==) bl bl' then cstr else mkFix (ln,(lna,tl',bl')) | CoFix(ln,(lna,tl,bl)) -> let tl' = Array.map (f l) tl in let l' = array_fold_left2 (fun l na t -> g (na,None,t) l) l lna tl in let bl' = Array.map (f l') bl in if array_for_all2 (==) tl tl' && array_for_all2 (==) bl bl' then cstr else mkCoFix (ln,(lna,tl',bl')) (* [fold_constr_with_binders g f n acc c] folds [f n] on the immediate subterms of [c] starting from [acc] and proceeding from left to right according to the usual representation of the constructions as [fold_constr] but it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive *) let fold_constr_with_binders g f n acc c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> acc | Cast (c,_, t) -> f n (f n acc c) t | Prod (_,t,c) -> f (g n) (f n acc t) c | Lambda (_,t,c) -> f (g n) (f n acc t) c | LetIn (_,b,t,c) -> f (g n) (f n (f n acc b) t) c | App (c,l) -> Array.fold_left (f n) (f n acc c) l | Evar (_,l) -> Array.fold_left (f n) acc l | Case (_,p,c,bl) -> Array.fold_left (f n) (f n (f n acc p) c) bl | Fix (_,(lna,tl,bl)) -> let n' = iterate g (Array.length tl) n in let fd = array_map2 (fun t b -> (t,b)) tl bl in Array.fold_left (fun acc (t,b) -> f n' (f n acc t) b) acc fd | CoFix (_,(lna,tl,bl)) -> let n' = iterate g (Array.length tl) n in let fd = array_map2 (fun t b -> (t,b)) tl bl in Array.fold_left (fun acc (t,b) -> f n' (f n acc t) b) acc fd (* [iter_constr_with_full_binders g f acc c] iters [f acc] on the immediate subterms of [c]; it carries an extra data [acc] which is processed by [g] at each binder traversal; it is not recursive and the order with which subterms are processed is not specified *) let iter_constr_with_full_binders g f l c = match kind_of_term c with | (Rel _ | Meta _ | Var _ | Sort _ | Const _ | Ind _ | Construct _) -> () | Cast (c,_, t) -> f l c; f l t | Prod (na,t,c) -> f l t; f (g (na,None,t) l) c | Lambda (na,t,c) -> f l t; f (g (na,None,t) l) c | LetIn (na,b,t,c) -> f l b; f l t; f (g (na,Some b,t) l) c | App (c,args) -> f l c; Array.iter (f l) args | Evar (_,args) -> Array.iter (f l) args | Case (_,p,c,bl) -> f l p; f l c; Array.iter (f l) bl | Fix (_,(lna,tl,bl)) -> let l' = array_fold_left2 (fun l na t -> g (na,None,t) l) l lna tl in Array.iter (f l) tl; Array.iter (f l') bl | CoFix (_,(lna,tl,bl)) -> let l' = array_fold_left2 (fun l na t -> g (na,None,t) l) l lna tl in Array.iter (f l) tl; Array.iter (f l') bl (***************************) (* occurs check functions *) (***************************) exception Occur let occur_meta c = let rec occrec c = match kind_of_term c with | Meta _ -> raise Occur | _ -> iter_constr occrec c in try occrec c; false with Occur -> true let occur_existential c = let rec occrec c = match kind_of_term c with | Evar _ -> raise Occur | _ -> iter_constr occrec c in try occrec c; false with Occur -> true let occur_meta_or_existential c = let rec occrec c = match kind_of_term c with | Evar _ -> raise Occur | Meta _ -> raise Occur | _ -> iter_constr occrec c in try occrec c; false with Occur -> true let occur_const s c = let rec occur_rec c = match kind_of_term c with | Const sp when sp=s -> raise Occur | _ -> iter_constr occur_rec c in try occur_rec c; false with Occur -> true let occur_evar n c = let rec occur_rec c = match kind_of_term c with | Evar (sp,_) when sp=n -> raise Occur | _ -> iter_constr occur_rec c in try occur_rec c; false with Occur -> true let occur_in_global env id constr = let vars = vars_of_global env constr in if List.mem id vars then raise Occur let occur_var env id c = let rec occur_rec c = match kind_of_term c with | Var _ | Const _ | Ind _ | Construct _ -> occur_in_global env id c | _ -> iter_constr occur_rec c in try occur_rec c; false with Occur -> true let occur_var_in_decl env hyp (_,c,typ) = match c with | None -> occur_var env hyp typ | Some body -> occur_var env hyp typ || occur_var env hyp body (* returns the list of free debruijn indices in a term *) let free_rels m = let rec frec depth acc c = match kind_of_term c with | Rel n -> if n >= depth then Intset.add (n-depth+1) acc else acc | _ -> fold_constr_with_binders succ frec depth acc c in frec 1 Intset.empty m (* collects all metavar occurences, in left-to-right order, preserving * repetitions and all. *) let collect_metas c = let rec collrec acc c = match kind_of_term c with | Meta mv -> list_add_set mv acc | _ -> fold_constr collrec acc c in List.rev (collrec [] c) (* collects all vars; warning: this is only visible vars, not dependencies in all section variables; for the latter, use global_vars_set *) let collect_vars c = let rec aux vars c = match kind_of_term c with | Var id -> Idset.add id vars | _ -> fold_constr aux vars c in aux Idset.empty c (* Tests whether [m] is a subterm of [t]: [m] is appropriately lifted through abstractions of [t] *) let dependent_main noevar m t = let rec deprec m t = if eq_constr m t then raise Occur else match kind_of_term m, kind_of_term t with | App (fm,lm), App (ft,lt) when Array.length lm < Array.length lt -> deprec m (mkApp (ft,Array.sub lt 0 (Array.length lm))); Array.iter (deprec m) (Array.sub lt (Array.length lm) ((Array.length lt) - (Array.length lm))) | _, Cast (c,_,_) when noevar & isMeta c -> () | _, Evar _ when noevar -> () | _ -> iter_constr_with_binders (lift 1) deprec m t in try deprec m t; false with Occur -> true let dependent = dependent_main false let dependent_no_evar = dependent_main true let count_occurrences m t = let n = ref 0 in let rec countrec m t = if eq_constr m t then incr n else match kind_of_term m, kind_of_term t with | App (fm,lm), App (ft,lt) when Array.length lm < Array.length lt -> countrec m (mkApp (ft,Array.sub lt 0 (Array.length lm))); Array.iter (countrec m) (Array.sub lt (Array.length lm) ((Array.length lt) - (Array.length lm))) | _, Cast (c,_,_) when isMeta c -> () | _, Evar _ -> () | _ -> iter_constr_with_binders (lift 1) countrec m t in countrec m t; !n (* Synonymous *) let occur_term = dependent let pop t = lift (-1) t (***************************) (* bindings functions *) (***************************) type meta_type_map = (metavariable * types) list type meta_value_map = (metavariable * constr) list let rec subst_meta bl c = match kind_of_term c with | Meta i -> (try List.assoc i bl with Not_found -> c) | _ -> map_constr (subst_meta bl) c (* First utilities for avoiding telescope computation for subst_term *) let prefix_application eq_fun (k,c) (t : constr) = let c' = collapse_appl c and t' = collapse_appl t in match kind_of_term c', kind_of_term t' with | App (f1,cl1), App (f2,cl2) -> let l1 = Array.length cl1 and l2 = Array.length cl2 in if l1 <= l2 && eq_fun c' (mkApp (f2, Array.sub cl2 0 l1)) then Some (mkApp (mkRel k, Array.sub cl2 l1 (l2 - l1))) else None | _ -> None let my_prefix_application eq_fun (k,c) (by_c : constr) (t : constr) = let c' = collapse_appl c and t' = collapse_appl t in match kind_of_term c', kind_of_term t' with | App (f1,cl1), App (f2,cl2) -> let l1 = Array.length cl1 and l2 = Array.length cl2 in if l1 <= l2 && eq_fun c' (mkApp (f2, Array.sub cl2 0 l1)) then Some (mkApp ((lift k by_c), Array.sub cl2 l1 (l2 - l1))) else None | _ -> None (* Recognizing occurrences of a given subterm in a term: [subst_term c t] substitutes [(Rel 1)] for all occurrences of term [c] in a term [t]; works if [c] has rels *) let subst_term_gen eq_fun c t = let rec substrec (k,c as kc) t = match prefix_application eq_fun kc t with | Some x -> x | None -> if eq_fun c t then mkRel k else map_constr_with_binders (fun (k,c) -> (k+1,lift 1 c)) substrec kc t in substrec (1,c) t let subst_term = subst_term_gen eq_constr (* Recognizing occurrences of a given subterm in a term : [replace_term c1 c2 t] substitutes [c2] for all occurrences of term [c1] in a term [t]; works if [c1] and [c2] have rels *) let replace_term_gen eq_fun c by_c in_t = let rec substrec (k,c as kc) t = match my_prefix_application eq_fun kc by_c t with | Some x -> x | None -> (if eq_fun c t then (lift k by_c) else map_constr_with_binders (fun (k,c) -> (k+1,lift 1 c)) substrec kc t) in substrec (0,c) in_t let replace_term = replace_term_gen eq_constr (* Substitute only at a list of locations or excluding a list of locations; in the occurrences list (b,l), b=true means no occurrence except the ones in l and b=false, means all occurrences except the ones in l *) type hyp_location_flag = (* To distinguish body and type of local defs *) | InHyp | InHypTypeOnly | InHypValueOnly type occurrences = bool * int list let all_occurrences = (false,[]) let no_occurrences_in_set = (true,[]) let error_invalid_occurrence l = let l = list_uniquize (List.sort Pervasives.compare l) in errorlabstrm "" (str ("Invalid occurrence " ^ plural (List.length l) "number" ^": ") ++ prlist_with_sep spc int l ++ str ".") let pr_position (cl,pos) = let clpos = match cl with | None -> str " of the goal" | Some (id,InHyp) -> str " of hypothesis " ++ pr_id id | Some (id,InHypTypeOnly) -> str " of the type of hypothesis " ++ pr_id id | Some (id,InHypValueOnly) -> str " of the body of hypothesis " ++ pr_id id in int pos ++ clpos let error_cannot_unify_occurrences nested (cl2,pos2,t2) (cl1,pos1,t1) (nowhere_except_in,locs) = let s = if nested then "Found nested occurrences of the pattern" else "Found incompatible occurrences of the pattern" in errorlabstrm "" (str s ++ str ":" ++ spc () ++ str "Matched term " ++ quote (print_constr t2) ++ strbrk " at position " ++ pr_position (cl2,pos2) ++ strbrk " is not compatible with matched term " ++ quote (print_constr t1) ++ strbrk " at position " ++ pr_position (cl1,pos1) ++ str ".") let is_selected pos (nowhere_except_in,locs) = nowhere_except_in && List.mem pos locs || not nowhere_except_in && not (List.mem pos locs) exception NotUnifiable type 'a testing_function = { match_fun : constr -> 'a; merge_fun : 'a -> 'a -> 'a; mutable testing_state : 'a; mutable last_found : ((identifier * hyp_location_flag) option * int * constr) option } let subst_closed_term_occ_gen_modulo (nowhere_except_in,locs as plocs) test cl occ t = let maxocc = List.fold_right max locs 0 in let pos = ref occ in let nested = ref false in let add_subst t subst = try test.testing_state <- test.merge_fun subst test.testing_state; test.last_found <- Some (cl,!pos,t) with NotUnifiable -> let lastpos = Option.get test.last_found in error_cannot_unify_occurrences !nested (cl,!pos,t) lastpos plocs in let rec substrec k t = if nowhere_except_in & !pos > maxocc then t else try let subst = test.match_fun t in if is_selected !pos plocs then (add_subst t subst; incr pos; (* Check nested matching subterms *) nested := true; ignore (subst_below k t); nested := false; (* Do the effective substitution *) mkRel k) else (incr pos; subst_below k t) with NotUnifiable -> subst_below k t and subst_below k t = map_constr_with_binders_left_to_right (fun d k -> k+1) substrec k t in let t' = substrec 1 t in (!pos, t') let is_nowhere (nowhere_except_in,locs) = nowhere_except_in && locs = [] let check_used_occurrences nbocc (nowhere_except_in,locs) = let rest = List.filter (fun o -> o >= nbocc) locs in if rest <> [] then error_invalid_occurrence rest let proceed_with_occurrences f plocs x = if is_nowhere plocs then (* optimization *) x else begin assert (List.for_all (fun x -> x >= 0) (snd plocs)); let (nbocc,x) = f 1 x in check_used_occurrences nbocc plocs; x end let make_eq_test c = { match_fun = (fun c' -> if eq_constr c c' then () else raise NotUnifiable); merge_fun = (fun () () -> ()); testing_state = (); last_found = None } let subst_closed_term_occ_gen plocs pos c t = subst_closed_term_occ_gen_modulo plocs (make_eq_test c) None pos t let subst_closed_term_occ plocs c t = proceed_with_occurrences (fun occ -> subst_closed_term_occ_gen plocs occ c) plocs t let subst_closed_term_occ_modulo plocs test cl t = proceed_with_occurrences (subst_closed_term_occ_gen_modulo plocs test cl) plocs t let map_named_declaration_with_hyploc f hyploc acc (id,bodyopt,typ) = let f = f (Some (id,hyploc)) in match bodyopt,hyploc with | None, InHypValueOnly -> errorlabstrm "" (pr_id id ++ str " has no value.") | None, _ | Some _, InHypTypeOnly -> let acc,typ = f acc typ in acc,(id,bodyopt,typ) | Some body, InHypValueOnly -> let acc,body = f acc body in acc,(id,Some body,typ) | Some body, InHyp -> let acc,body = f acc body in let acc,typ = f acc typ in acc,(id,Some body,typ) let subst_closed_term_occ_decl (plocs,hyploc) c d = proceed_with_occurrences (map_named_declaration_with_hyploc (fun _ occ -> subst_closed_term_occ_gen plocs occ c) hyploc) plocs d let subst_closed_term_occ_decl_modulo (plocs,hyploc) test d = proceed_with_occurrences (map_named_declaration_with_hyploc (subst_closed_term_occ_gen_modulo plocs test) hyploc) plocs d let vars_of_env env = let s = Sign.fold_named_context (fun (id,_,_) s -> Idset.add id s) (named_context env) ~init:Idset.empty in Sign.fold_rel_context (fun (na,_,_) s -> match na with Name id -> Idset.add id s | _ -> s) (rel_context env) ~init:s let add_vname vars = function Name id -> Idset.add id vars | _ -> vars (*************************) (* Names environments *) (*************************) type names_context = name list let add_name n nl = n::nl let lookup_name_of_rel p names = try List.nth names (p-1) with Invalid_argument _ | Failure _ -> raise Not_found let rec lookup_rel_of_name id names = let rec lookrec n = function | Anonymous :: l -> lookrec (n+1) l | (Name id') :: l -> if id' = id then n else lookrec (n+1) l | [] -> raise Not_found in lookrec 1 names let empty_names_context = [] let ids_of_rel_context sign = Sign.fold_rel_context (fun (na,_,_) l -> match na with Name id -> id::l | Anonymous -> l) sign ~init:[] let ids_of_named_context sign = Sign.fold_named_context (fun (id,_,_) idl -> id::idl) sign ~init:[] let ids_of_context env = (ids_of_rel_context (rel_context env)) @ (ids_of_named_context (named_context env)) let names_of_rel_context env = List.map (fun (na,_,_) -> na) (rel_context env) let is_section_variable id = try let _ = Global.lookup_named id in true with Not_found -> false let isGlobalRef c = match kind_of_term c with | Const _ | Ind _ | Construct _ | Var _ -> true | _ -> false let has_polymorphic_type c = match (Global.lookup_constant c).Declarations.const_type with | Declarations.PolymorphicArity _ -> true | _ -> false let base_sort_cmp pb s0 s1 = match (s0,s1) with | (Prop c1, Prop c2) -> c1 = Null or c2 = Pos (* Prop <= Set *) | (Prop c1, Type u) -> pb = Reduction.CUMUL | (Type u1, Type u2) -> true | _ -> false (* eq_constr extended with universe erasure *) let compare_constr_univ f cv_pb t1 t2 = match kind_of_term t1, kind_of_term t2 with Sort s1, Sort s2 -> base_sort_cmp cv_pb s1 s2 | Prod (_,t1,c1), Prod (_,t2,c2) -> f Reduction.CONV t1 t2 & f cv_pb c1 c2 | _ -> compare_constr (f Reduction.CONV) t1 t2 let rec constr_cmp cv_pb t1 t2 = compare_constr_univ constr_cmp cv_pb t1 t2 let eq_constr = constr_cmp Reduction.CONV (* App(c,[t1,...tn]) -> ([c,t1,...,tn-1],tn) App(c,[||]) -> ([],c) *) let split_app c = match kind_of_term c with App(c,l) -> let len = Array.length l in if len=0 then ([],c) else let last = Array.get l (len-1) in let prev = Array.sub l 0 (len-1) in c::(Array.to_list prev), last | _ -> assert false let hdtl l = List.hd l, List.tl l type subst = (rel_context*constr) Intmap.t exception CannotFilter let filtering env cv_pb c1 c2 = let evm = ref Intmap.empty in let define cv_pb e1 ev c1 = try let (e2,c2) = Intmap.find ev !evm in let shift = List.length e1 - List.length e2 in if constr_cmp cv_pb c1 (lift shift c2) then () else raise CannotFilter with Not_found -> evm := Intmap.add ev (e1,c1) !evm in let rec aux env cv_pb c1 c2 = match kind_of_term c1, kind_of_term c2 with | App _, App _ -> let ((p1,l1),(p2,l2)) = (split_app c1),(split_app c2) in aux env cv_pb l1 l2; if p1=[] & p2=[] then () else aux env cv_pb (applist (hdtl p1)) (applist (hdtl p2)) | Prod (n,t1,c1), Prod (_,t2,c2) -> aux env cv_pb t1 t2; aux ((n,None,t1)::env) cv_pb c1 c2 | _, Evar (ev,_) -> define cv_pb env ev c1 | Evar (ev,_), _ -> define cv_pb env ev c2 | _ -> if compare_constr_univ (fun pb c1 c2 -> aux env pb c1 c2; true) cv_pb c1 c2 then () else raise CannotFilter (* TODO: le reste des binders *) in aux env cv_pb c1 c2; !evm let decompose_prod_letin : constr -> int * rel_context * constr = let rec prodec_rec i l c = match kind_of_term c with | Prod (n,t,c) -> prodec_rec (succ i) ((n,None,t)::l) c | LetIn (n,d,t,c) -> prodec_rec (succ i) ((n,Some d,t)::l) c | Cast (c,_,_) -> prodec_rec i l c | _ -> i,l,c in prodec_rec 0 [] let align_prod_letin c a : rel_context * constr = let (lc,_,_) = decompose_prod_letin c in let (la,l,a) = decompose_prod_letin a in if not (la >= lc) then invalid_arg "align_prod_letin"; let (l1,l2) = Util.list_chop lc l in l2,it_mkProd_or_LetIn a l1 (* On reduit une serie d'eta-redex de tete ou rien du tout *) (* [x1:c1;...;xn:cn]@(f;a1...an;x1;...;xn) --> @(f;a1...an) *) (* Remplace 2 versions prcdentes bugges *) let rec eta_reduce_head c = match kind_of_term c with | Lambda (_,c1,c') -> (match kind_of_term (eta_reduce_head c') with | App (f,cl) -> let lastn = (Array.length cl) - 1 in if lastn < 1 then anomaly "application without arguments" else (match kind_of_term cl.(lastn) with | Rel 1 -> let c' = if lastn = 1 then f else mkApp (f, Array.sub cl 0 lastn) in if noccurn 1 c' then lift (-1) c' else c | _ -> c) | _ -> c) | _ -> c (* alpha-eta conversion : ignore print names and casts *) let eta_eq_constr = let rec aux t1 t2 = let t1 = eta_reduce_head (strip_head_cast t1) and t2 = eta_reduce_head (strip_head_cast t2) in t1=t2 or compare_constr aux t1 t2 in aux (* iterator on rel context *) let process_rel_context f env = let sign = named_context_val env in let rels = rel_context env in let env0 = reset_with_named_context sign env in Sign.fold_rel_context f rels ~init:env0 let assums_of_rel_context sign = Sign.fold_rel_context (fun (na,c,t) l -> match c with Some _ -> l | None -> (na, t)::l) sign ~init:[] let map_rel_context_in_env f env sign = let rec aux env acc = function | d::sign -> aux (push_rel d env) (map_rel_declaration (f env) d :: acc) sign | [] -> acc in aux env [] (List.rev sign) let map_rel_context_with_binders f sign = let rec aux k = function | d::sign -> map_rel_declaration (f k) d :: aux (k-1) sign | [] -> [] in aux (rel_context_length sign) sign let substl_rel_context l = map_rel_context_with_binders (fun k -> substnl l (k-1)) let lift_rel_context n = map_rel_context_with_binders (liftn n) let smash_rel_context sign = let rec aux acc = function | [] -> acc | (_,None,_ as d) :: l -> aux (d::acc) l | (_,Some b,_) :: l -> (* Quadratic in the number of let but there are probably a few of them *) aux (List.rev (substl_rel_context [b] (List.rev acc))) l in List.rev (aux [] sign) let adjust_subst_to_rel_context sign l = let rec aux subst sign l = match sign, l with | (_,None,_)::sign', a::args' -> aux (a::subst) sign' args' | (_,Some c,_)::sign', args' -> aux (substl (List.rev subst) c :: subst) sign' args' | [], [] -> List.rev subst | _ -> anomaly "Instance and signature do not match" in aux [] (List.rev sign) l let fold_named_context_both_sides f l ~init = list_fold_right_and_left f l init let rec mem_named_context id = function | (id',_,_) :: _ when id=id' -> true | _ :: sign -> mem_named_context id sign | [] -> false let clear_named_body id env = let rec aux _ = function | (id',Some c,t) when id = id' -> push_named (id,None,t) | d -> push_named d in fold_named_context aux env ~init:(reset_context env) let global_vars env ids = Idset.elements (global_vars_set env ids) let global_vars_set_of_decl env = function | (_,None,t) -> global_vars_set env t | (_,Some c,t) -> Idset.union (global_vars_set env t) (global_vars_set env c) let dependency_closure env sign hyps = if Idset.is_empty hyps then [] else let (_,lh) = Sign.fold_named_context_reverse (fun (hs,hl) (x,_,_ as d) -> if Idset.mem x hs then (Idset.union (global_vars_set_of_decl env d) (Idset.remove x hs), x::hl) else (hs,hl)) ~init:(hyps,[]) sign in List.rev lh (* Combinators on judgments *) let on_judgment f j = { uj_val = f j.uj_val; uj_type = f j.uj_type } let on_judgment_value f j = { j with uj_val = f j.uj_val } let on_judgment_type f j = { j with uj_type = f j.uj_type } (* Cut a context ctx in 2 parts (ctx1,ctx2) with ctx1 containing k variables; skips let-in's *) let context_chop k ctx = let rec chop_aux acc = function | (0, l2) -> (List.rev acc, l2) | (n, ((_,Some _,_ as h)::t)) -> chop_aux (h::acc) (n, t) | (n, (h::t)) -> chop_aux (h::acc) (pred n, t) | (_, []) -> anomaly "context_chop" in chop_aux [] (k,ctx) (* Do not skip let-in's *) let env_rel_context_chop k env = let rels = rel_context env in let ctx1,ctx2 = list_chop k rels in push_rel_context ctx2 (reset_with_named_context (named_context_val env) env), ctx1 (*******************************************) (* Functions to deal with impossible cases *) (*******************************************) let impossible_default_case = ref None let set_impossible_default_clause c = impossible_default_case := Some c let coq_unit_judge = let na1 = Name (id_of_string "A") in let na2 = Name (id_of_string "H") in fun () -> match !impossible_default_case with | Some (id,type_of_id) -> make_judge id type_of_id | None -> (* In case the constants id/ID are not defined *) make_judge (mkLambda (na1,mkProp,mkLambda(na2,mkRel 1,mkRel 1))) (mkProd (na1,mkProp,mkArrow (mkRel 1) (mkRel 2))) coq-8.4pl2/pretyping/matching.ml0000640000175000001440000003057312010532755016004 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if List.mem_assoc n names then Flags.if_warn Pp.msg_warning (str "Collision between bound variable " ++ pr_id n ++ str " and a metavariable of same name."); (names,(n,x)::terms) let add_binders na1 na2 (names,terms as subst) = match na1, na2 with | Name id1, Name id2 -> if List.mem_assoc id1 names then (Flags.if_warn Pp.msg_warning (str "Collision between bound variables of name " ++ pr_id id1); (names,terms)) else (if List.mem_assoc id1 terms then Flags.if_warn Pp.msg_warning (str "Collision between bound variable " ++ pr_id id1 ++ str " and another bound variable of same name."); ((id1,id2)::names,terms)); | _ -> subst let build_lambda toabstract stk (m : constr) = let rec buildrec m p_0 p_1 = match p_0,p_1 with | (_, []) -> m | (n, (_,na,t)::tl) -> if List.mem n toabstract then buildrec (mkLambda (na,t,m)) (n+1) tl else buildrec (lift (-1) m) (n+1) tl in buildrec m 1 stk let rec list_insert f a = function | [] -> [a] | b::l when f a b -> a::b::l | b::l when a = b -> raise PatternMatchingFailure | b::l -> b :: list_insert f a l let extract_bound_vars = let rec aux k = function | ([],_) -> [] | (n::l,(na1,na2,_)::stk) when k = n -> begin match na1,na2 with | Name id1,Name _ -> list_insert (<) id1 (aux (k+1) (l,stk)) | Name _,Anonymous -> anomaly "Unnamed bound variable" | Anonymous,_ -> raise PatternMatchingFailure end | (l,_::stk) -> aux (k+1) (l,stk) | (_,[]) -> assert false in aux 1 let dummy_constr = mkProp let rec make_renaming ids = function | (Name id,Name _,_)::stk -> let renaming = make_renaming ids stk in (try mkRel (list_index id ids) :: renaming with Not_found -> dummy_constr :: renaming) | (_,_,_)::stk -> dummy_constr :: make_renaming ids stk | [] -> [] let merge_binding allow_bound_rels stk n cT subst = let depth = List.length stk in let c = if depth = 0 then (* Optimization *) ([],cT) else let frels = Intset.elements (free_rels cT) in let frels = List.filter (fun i -> i <= depth) frels in if allow_bound_rels then let frels = Sort.list (<) frels in let canonically_ordered_vars = extract_bound_vars (frels,stk) in let renaming = make_renaming canonically_ordered_vars stk in (canonically_ordered_vars, substl renaming cT) else if frels = [] then ([],lift (-depth) cT) else raise PatternMatchingFailure in constrain (n,c) subst let matches_core convert allow_partial_app allow_bound_rels pat c = let conv = match convert with | None -> eq_constr | Some (env,sigma) -> is_conv env sigma in let rec sorec stk subst p t = let cT = strip_outer_cast t in match p,kind_of_term cT with | PSoApp (n,args),m -> let relargs = List.map (function | PRel n -> n | _ -> error "Only bound indices allowed in second order pattern matching.") args in let frels = Intset.elements (free_rels cT) in if list_subset frels relargs then constrain (n,([],build_lambda relargs stk cT)) subst else raise PatternMatchingFailure | PMeta (Some n), m -> merge_binding allow_bound_rels stk n cT subst | PMeta None, m -> subst | PRef (VarRef v1), Var v2 when v1 = v2 -> subst | PVar v1, Var v2 when v1 = v2 -> subst | PRef ref, _ when conv (constr_of_global ref) cT -> subst | PRel n1, Rel n2 when n1 = n2 -> subst | PSort (GProp c1), Sort (Prop c2) when c1 = c2 -> subst | PSort (GType _), Sort (Type _) -> subst | PApp (p, [||]), _ -> sorec stk subst p t | PApp (PApp (h, a1), a2), _ -> sorec stk subst (PApp(h,Array.append a1 a2)) t | PApp (PMeta (Some n),args1), App (c2,args2) when allow_partial_app -> let p = Array.length args2 - Array.length args1 in if p>=0 then let args21, args22 = array_chop p args2 in let c = mkApp(c2,args21) in let subst = merge_binding allow_bound_rels stk n c subst in array_fold_left2 (sorec stk) subst args1 args22 else raise PatternMatchingFailure | PApp (c1,arg1), App (c2,arg2) -> (try array_fold_left2 (sorec stk) (sorec stk subst c1 c2) arg1 arg2 with Invalid_argument _ -> raise PatternMatchingFailure) | PProd (na1,c1,d1), Prod(na2,c2,d2) -> sorec ((na1,na2,c2)::stk) (add_binders na1 na2 (sorec stk subst c1 c2)) d1 d2 | PLambda (na1,c1,d1), Lambda(na2,c2,d2) -> sorec ((na1,na2,c2)::stk) (add_binders na1 na2 (sorec stk subst c1 c2)) d1 d2 | PLetIn (na1,c1,d1), LetIn(na2,c2,t2,d2) -> sorec ((na1,na2,t2)::stk) (add_binders na1 na2 (sorec stk subst c1 c2)) d1 d2 | PIf (a1,b1,b1'), Case (ci,_,a2,[|b2;b2'|]) -> let ctx,b2 = decompose_lam_n_assum ci.ci_cstr_ndecls.(0) b2 in let ctx',b2' = decompose_lam_n_assum ci.ci_cstr_ndecls.(1) b2' in let n = rel_context_length ctx in let n' = rel_context_length ctx' in if noccur_between 1 n b2 & noccur_between 1 n' b2' then let s = List.fold_left (fun l (na,_,t) -> (Anonymous,na,t)::l) stk ctx in let s' = List.fold_left (fun l (na,_,t) -> (Anonymous,na,t)::l) stk ctx' in let b1 = lift_pattern n b1 and b1' = lift_pattern n' b1' in sorec s' (sorec s (sorec stk subst a1 a2) b1 b2) b1' b2' else raise PatternMatchingFailure | PCase (ci1,p1,a1,br1), Case (ci2,p2,a2,br2) -> let n2 = Array.length br2 in if (ci1.cip_ind <> None && ci1.cip_ind <> Some ci2.ci_ind) || (not ci1.cip_extensible && List.length br1 <> n2) then raise PatternMatchingFailure; let chk_branch subst (j,n,c) = (* (ind,j+1) is normally known to be a correct constructor and br2 a correct match over the same inductive *) assert (j < n2); sorec stk subst c br2.(j) in let chk_head = sorec stk (sorec stk subst a1 a2) p1 p2 in List.fold_left chk_branch chk_head br1 | PFix c1, Fix _ when eq_constr (mkFix c1) cT -> subst | PCoFix c1, CoFix _ when eq_constr (mkCoFix c1) cT -> subst | _ -> raise PatternMatchingFailure in let names,terms = sorec [] ([],[]) pat c in (names,Sort.list (fun (a,_) (b,_) -> a (a,b)) subst) let extended_matches = matches_core None true true let matches c p = snd (matches_core_closed None true c p) let special_meta = (-1) (* Tells if it is an authorized occurrence and if the instance is closed *) let authorized_occ partial_app closed pat c mk_ctx next = try let sigma = matches_core_closed None partial_app pat c in if closed && not (List.for_all (fun (_,c) -> closed0 c) (snd sigma)) then next () else sigma, mk_ctx (mkMeta special_meta), next with PatternMatchingFailure -> next () (* Tries to match a subterm of [c] with [pat] *) let sub_match ?(partial_app=false) ?(closed=true) pat c = let rec aux c mk_ctx next = match kind_of_term c with | Cast (c1,k,c2) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let mk_ctx lc = mk_ctx (mkCast (List.hd lc, k,c2)) in try_aux [c1] mk_ctx next) | Lambda (x,c1,c2) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let mk_ctx lc = mk_ctx (mkLambda (x,List.hd lc,List.nth lc 1)) in try_aux [c1;c2] mk_ctx next) | Prod (x,c1,c2) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let mk_ctx lc = mk_ctx (mkProd (x,List.hd lc,List.nth lc 1)) in try_aux [c1;c2] mk_ctx next) | LetIn (x,c1,t,c2) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let mk_ctx = function [c1;c2] -> mkLetIn (x,c1,t,c2) | _ -> assert false in try_aux [c1;c2] mk_ctx next) | App (c1,lc) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let topdown = true in if partial_app then if topdown then let lc1 = Array.sub lc 0 (Array.length lc - 1) in let app = mkApp (c1,lc1) in let mk_ctx = function | [app';c] -> mk_ctx (mkApp (app',[|c|])) | _ -> assert false in try_aux [app;array_last lc] mk_ctx next else let rec aux2 app args next = match args with | [] -> let mk_ctx le = mk_ctx (mkApp (List.hd le, Array.of_list (List.tl le))) in try_aux (c1::Array.to_list lc) mk_ctx next | arg :: args -> let app = mkApp (app,[|arg|]) in let next () = aux2 app args next in let mk_ctx ce = mk_ctx (mkApp (ce, Array.of_list args)) in aux app mk_ctx next in aux2 c1 (Array.to_list lc) next else let mk_ctx le = mk_ctx (mkApp (List.hd le, Array.of_list (List.tl le))) in try_aux (c1::Array.to_list lc) mk_ctx next) | Case (ci,hd,c1,lc) -> authorized_occ partial_app closed pat c mk_ctx (fun () -> let mk_ctx le = mk_ctx (mkCase (ci,hd,List.hd le,Array.of_list (List.tl le))) in try_aux (c1::Array.to_list lc) mk_ctx next) | Construct _ | Fix _ | Ind _|CoFix _ |Evar _|Const _ | Rel _|Meta _|Var _|Sort _ -> authorized_occ partial_app closed pat c mk_ctx next (* Tries [sub_match] for all terms in the list *) and try_aux lc mk_ctx next = let rec try_sub_match_rec lacc = function | [] -> next () | c::tl -> let mk_ctx ce = mk_ctx (List.rev_append lacc (ce::tl)) in let next () = try_sub_match_rec (c::lacc) tl in aux c mk_ctx next in try_sub_match_rec [] lc in aux c (fun x -> x) (fun () -> raise PatternMatchingFailure) type subterm_matching_result = (bound_ident_map * patvar_map) * constr * (unit -> subterm_matching_result) let match_subterm pat c = sub_match pat c let match_appsubterm pat c = sub_match ~partial_app:true pat c let match_subterm_gen app pat c = sub_match ~partial_app:app pat c let is_matching pat c = try let _ = matches pat c in true with PatternMatchingFailure -> false let is_matching_appsubterm ?(closed=true) pat c = try let _ = sub_match ~partial_app:true ~closed pat c in true with PatternMatchingFailure -> false let matches_conv env sigma c p = snd (matches_core_closed (Some (env,sigma)) false c p) let is_matching_conv env sigma pat n = try let _ = matches_conv env sigma pat n in true with PatternMatchingFailure -> false coq-8.4pl2/pretyping/typeclasses_errors.ml0000640000175000001440000000374312010532755020144 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raise (TypeClassError (env, UnsatisfiableConstraints (evd, None))) | Some ev -> let loc, kind = Evd.evar_source ev evd in raise (Loc.Exc_located (loc, TypeClassError (env, UnsatisfiableConstraints (evd, Some (ev, kind))))) let mismatched_ctx_inst env c n m = typeclass_error env (MismatchedContextInstance (c, n, m)) let rec unsatisfiable_exception exn = match exn with | TypeClassError (_, UnsatisfiableConstraints _) -> true | Loc.Exc_located(_, e) -> unsatisfiable_exception e | _ -> false coq-8.4pl2/pretyping/evarconv.mli0000640000175000001440000000363712010532755016207 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* env -> constr -> constr -> evar_map -> evar_map val the_conv_x_leq : ?ts:transparent_state -> env -> constr -> constr -> evar_map -> evar_map (** The same function resolving evars by side-effect and catching the exception *) val e_conv : ?ts:transparent_state -> env -> evar_map ref -> constr -> constr -> bool val e_cumul : ?ts:transparent_state -> env -> evar_map ref -> constr -> constr -> bool (**/**) (* For debugging *) val evar_conv_x : transparent_state -> env -> evar_map -> conv_pb -> constr -> constr -> evar_map * bool val evar_eqappr_x : transparent_state -> env -> evar_map -> conv_pb -> constr * constr list -> constr * constr list -> evar_map * bool (**/**) val consider_remaining_unif_problems : ?ts:transparent_state -> env -> evar_map -> evar_map val check_conv_record : constr * types list -> constr * types list -> constr * constr list * (constr list * constr list) * (constr list * types list) * (constr list * types list) * constr * (int * constr) val set_solve_evars : (env -> evar_map -> constr -> evar_map * constr) -> unit val second_order_matching : transparent_state -> env -> evar_map -> existential -> occurrences option list -> constr -> evar_map * bool coq-8.4pl2/pretyping/doc.tex0000640000175000001440000000032707020713374015144 0ustar notinusers \newpage \section*{Pre-typing} \ocwsection \label{pretyping} \bigskip \begin{center}\epsfig{file=pretyping.dep.ps,width=\linewidth}\end{center} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: coq-8.4pl2/pretyping/pretype_errors.mli0000640000175000001440000001057112010532755017443 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool (** Presenting terms without solved evars *) val nf_evar : Evd.evar_map -> constr -> constr val j_nf_evar : Evd.evar_map -> unsafe_judgment -> unsafe_judgment val jl_nf_evar : Evd.evar_map -> unsafe_judgment list -> unsafe_judgment list val jv_nf_evar : Evd.evar_map -> unsafe_judgment array -> unsafe_judgment array val tj_nf_evar : Evd.evar_map -> unsafe_type_judgment -> unsafe_type_judgment val env_nf_evar : Evd.evar_map -> env -> env val env_nf_betaiotaevar : Evd.evar_map -> env -> env val j_nf_betaiotaevar : Evd.evar_map -> unsafe_judgment -> unsafe_judgment val jv_nf_betaiotaevar : Evd.evar_map -> unsafe_judgment array -> unsafe_judgment array (** Raising errors *) val error_actual_type_loc : loc -> env -> Evd.evar_map -> unsafe_judgment -> constr -> 'b val error_cant_apply_not_functional_loc : loc -> env -> Evd.evar_map -> unsafe_judgment -> unsafe_judgment list -> 'b val error_cant_apply_bad_type_loc : loc -> env -> Evd.evar_map -> int * constr * constr -> unsafe_judgment -> unsafe_judgment list -> 'b val error_case_not_inductive_loc : loc -> env -> Evd.evar_map -> unsafe_judgment -> 'b val error_ill_formed_branch_loc : loc -> env -> Evd.evar_map -> constr -> constructor -> constr -> constr -> 'b val error_number_branches_loc : loc -> env -> Evd.evar_map -> unsafe_judgment -> int -> 'b val error_ill_typed_rec_body_loc : loc -> env -> Evd.evar_map -> int -> name array -> unsafe_judgment array -> types array -> 'b val error_not_a_type_loc : loc -> env -> Evd.evar_map -> unsafe_judgment -> 'b val error_cannot_coerce : env -> Evd.evar_map -> constr * constr -> 'b (** {6 Implicit arguments synthesis errors } *) val error_occur_check : env -> Evd.evar_map -> existential_key -> constr -> 'b val error_not_clean : env -> Evd.evar_map -> existential_key -> constr -> loc * Evd.hole_kind -> 'b val error_unsolvable_implicit : loc -> env -> Evd.evar_map -> Evd.evar_info -> Evd.hole_kind -> Evd.unsolvability_explanation option -> 'b val error_cannot_unify : env -> Evd.evar_map -> constr * constr -> 'b val error_cannot_unify_local : env -> Evd.evar_map -> constr * constr * constr -> 'b val error_cannot_find_well_typed_abstraction : env -> Evd.evar_map -> constr -> constr list -> 'b val error_abstraction_over_meta : env -> Evd.evar_map -> metavariable -> metavariable -> 'b val error_non_linear_unification : env -> Evd.evar_map -> metavariable -> constr -> 'b (** {6 Ml Case errors } *) val error_cant_find_case_type_loc : loc -> env -> Evd.evar_map -> constr -> 'b (** {6 Pretyping errors } *) val error_unexpected_type_loc : loc -> env -> Evd.evar_map -> constr -> constr -> 'b val error_not_product_loc : loc -> env -> Evd.evar_map -> constr -> 'b (** {6 Error in conversion from AST to glob_constr } *) val error_var_not_found_loc : loc -> identifier -> 'b coq-8.4pl2/pretyping/classops.mli0000640000175000001440000000653312010532755016211 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* cl_typ -> cl_typ (** This is the type of infos for declared classes *) type cl_info_typ = { cl_param : int } (** This is the type of coercion kinds *) type coe_typ = Libnames.global_reference (** This is the type of infos for declared coercions *) type coe_info_typ (** [cl_index] is the type of class keys *) type cl_index (** [coe_index] is the type of coercion keys *) type coe_index (** This is the type of paths from a class to another *) type inheritance_path = coe_index list (** {6 Access to classes infos } *) val class_info : cl_typ -> (cl_index * cl_info_typ) val class_exists : cl_typ -> bool val class_info_from_index : cl_index -> cl_typ * cl_info_typ (** [find_class_type env sigma c] returns the head reference of [c] and its arguments *) val find_class_type : evar_map -> types -> cl_typ * constr list (** raises [Not_found] if not convertible to a class *) val class_of : env -> evar_map -> types -> types * cl_index (** raises [Not_found] if not mapped to a class *) val inductive_class_of : inductive -> cl_index val class_args_of : env -> evar_map -> types -> constr list (** {6 [declare_coercion] adds a coercion in the graph of coercion paths } *) val declare_coercion : coe_typ -> locality -> isid:bool -> src:cl_typ -> target:cl_typ -> params:int -> unit (** {6 Access to coercions infos } *) val coercion_exists : coe_typ -> bool val coercion_value : coe_index -> (unsafe_judgment * bool) (** {6 Lookup functions for coercion paths } *) val lookup_path_between_class : cl_index * cl_index -> inheritance_path val lookup_path_between : env -> evar_map -> types * types -> types * types * inheritance_path val lookup_path_to_fun_from : env -> evar_map -> types -> types * inheritance_path val lookup_path_to_sort_from : env -> evar_map -> types -> types * inheritance_path val lookup_pattern_path_between : inductive * inductive -> (constructor * int) list (**/**) (* Crade *) open Pp val install_path_printer : ((cl_index * cl_index) * inheritance_path -> std_ppcmds) -> unit (**/**) (** {6 This is for printing purpose } *) val string_of_class : cl_typ -> string val pr_class : cl_typ -> std_ppcmds val pr_cl_index : cl_index -> std_ppcmds val get_coercion_value : coe_index -> constr val inheritance_graph : unit -> ((cl_index * cl_index) * inheritance_path) list val classes : unit -> cl_typ list val coercions : unit -> coe_index list (** [hide_coercion] returns the number of params to skip if the coercion must be hidden, [None] otherwise; it raises [Not_found] if not a coercion *) val hide_coercion : coe_typ -> int option coq-8.4pl2/pretyping/coercion.ml0000640000175000001440000002446012121620060015777 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* evar_map -> unsafe_judgment -> evar_map * unsafe_judgment (* [inh_coerce_to_sort env evd j] coerces [j] to a type; i.e. it inserts a coercion into [j], if needed, in such a way it gets as type a sort; it fails if no coercion is applicable *) val inh_coerce_to_sort : loc -> env -> evar_map -> unsafe_judgment -> evar_map * unsafe_type_judgment (* [inh_coerce_to_base env evd j] coerces [j] to its base type; i.e. it inserts a coercion into [j], if needed, in such a way it gets as type its base type (the notion depends on the coercion system) *) val inh_coerce_to_base : loc -> env -> evar_map -> unsafe_judgment -> evar_map * unsafe_judgment (* [inh_coerce_to_prod env evars t] coerces [t] to a product type *) val inh_coerce_to_prod : loc -> env -> evar_map -> type_constraint_type -> evar_map * type_constraint_type (* [inh_conv_coerce_to loc env evd j t] coerces [j] to an object of type [t]; i.e. it inserts a coercion into [j], if needed, in such a way [t] and [j.uj_type] are convertible; it fails if no coercion is applicable *) val inh_conv_coerce_to : loc -> env -> evar_map -> unsafe_judgment -> type_constraint_type -> evar_map * unsafe_judgment val inh_conv_coerce_rigid_to : loc -> env -> evar_map -> unsafe_judgment -> type_constraint_type -> evar_map * unsafe_judgment (* [inh_conv_coerces_to loc env evd t t'] checks if an object of type [t] is coercible to an object of type [t'] adding evar constraints if needed; it fails if no coercion exists *) val inh_conv_coerces_to : loc -> env -> evar_map -> types -> type_constraint_type -> evar_map (* [inh_pattern_coerce_to loc env evd pat ind1 ind2] coerces the Cases pattern [pat] typed in [ind1] into a pattern typed in [ind2]; raises [Not_found] if no coercion found *) val inh_pattern_coerce_to : loc -> Glob_term.cases_pattern -> inductive -> inductive -> Glob_term.cases_pattern end module Default = struct (* Typing operations dealing with coercions *) exception NoCoercion (* Here, funj is a coercion therefore already typed in global context *) let apply_coercion_args env argl funj = let rec apply_rec acc typ = function | [] -> { uj_val = applist (j_val funj,argl); uj_type = typ } | h::restl -> (* On devrait pouvoir s'arranger pour qu'on n'ait pas à faire hnf_constr *) match kind_of_term (whd_betadeltaiota env Evd.empty typ) with | Prod (_,c1,c2) -> (* Typage garanti par l'appel app_coercion*) apply_rec (h::acc) (subst1 h c2) restl | _ -> anomaly "apply_coercion_args" in apply_rec [] funj.uj_type argl (* appliquer le chemin de coercions de patterns p *) let apply_pattern_coercion loc pat p = List.fold_left (fun pat (co,n) -> let f i = if i let fv,isid = coercion_value i in let argl = (class_args_of env sigma typ_cl)@[ja.uj_val] in let jres = apply_coercion_args env argl fv in (if isid then { uj_val = ja.uj_val; uj_type = jres.uj_type } else jres), jres.uj_type) (hj,typ_cl) p) with e when Errors.noncritical e -> anomaly "apply_coercion" let inh_app_fun env evd j = let t = whd_betadeltaiota env evd j.uj_type in match kind_of_term t with | Prod (_,_,_) -> (evd,j) | Evar ev -> let (evd',t) = define_evar_as_product evd ev in (evd',{ uj_val = j.uj_val; uj_type = t }) | _ -> let t,p = lookup_path_to_fun_from env evd j.uj_type in (evd,apply_coercion env evd p j t) let inh_app_fun env evd j = try inh_app_fun env evd j with Not_found -> try inh_app_fun env (saturate_evd env evd) j with Not_found -> (evd, j) let inh_tosort_force loc env evd j = try let t,p = lookup_path_to_sort_from env evd j.uj_type in let j1 = apply_coercion env evd p j t in let j2 = on_judgment_type (whd_evar evd) j1 in (evd,type_judgment env j2) with Not_found -> error_not_a_type_loc loc env evd j let inh_coerce_to_sort loc env evd j = let typ = whd_betadeltaiota env evd j.uj_type in match kind_of_term typ with | Sort s -> (evd,{ utj_val = j.uj_val; utj_type = s }) | Evar ev when not (is_defined_evar evd ev) -> let (evd',s) = define_evar_as_sort evd ev in (evd',{ utj_val = j.uj_val; utj_type = s }) | _ -> inh_tosort_force loc env evd j let inh_coerce_to_base loc env evd j = (evd, j) let inh_coerce_to_prod loc env evd t = (evd, t) let inh_coerce_to_fail env evd rigidonly v t c1 = if rigidonly & not (Heads.is_rigid env c1 && Heads.is_rigid env t) then raise NoCoercion else let v', t' = try let t2,t1,p = lookup_path_between env evd (t,c1) in match v with Some v -> let j = apply_coercion env evd p {uj_val = v; uj_type = t} t2 in Some j.uj_val, j.uj_type | None -> None, t with Not_found -> raise NoCoercion in try (the_conv_x_leq env t' c1 evd, v') with Reduction.NotConvertible -> raise NoCoercion let rec inh_conv_coerce_to_fail loc env evd rigidonly v t c1 = try (the_conv_x_leq env t c1 evd, v) with Reduction.NotConvertible -> try inh_coerce_to_fail env evd rigidonly v t c1 with NoCoercion -> match kind_of_term (whd_betadeltaiota env evd t), kind_of_term (whd_betadeltaiota env evd c1) with | Prod (name,t1,t2), Prod (_,u1,u2) -> (* Conversion did not work, we may succeed with a coercion. *) (* We eta-expand (hence possibly modifying the original term!) *) (* and look for a coercion c:u1->t1 s.t. fun x:u1 => v' (c x)) *) (* has type forall (x:u1), u2 (with v' recursively obtained) *) (* Note: we retype the term because sort-polymorphism may have *) (* weaken its type *) let name = match name with | Anonymous -> Name (id_of_string "x") | _ -> name in let env1 = push_rel (name,None,u1) env in let (evd', v1) = inh_conv_coerce_to_fail loc env1 evd rigidonly (Some (mkRel 1)) (lift 1 u1) (lift 1 t1) in let v1 = Option.get v1 in let v2 = Option.map (fun v -> beta_applist (lift 1 v,[v1])) v in let t2 = match v2 with | None -> subst_term v1 t2 | Some v2 -> Retyping.get_type_of env1 evd' v2 in let (evd'',v2') = inh_conv_coerce_to_fail loc env1 evd' rigidonly v2 t2 u2 in (evd'', Option.map (fun v2' -> mkLambda (name, u1, v2')) v2') | _ -> raise NoCoercion (* Look for cj' obtained from cj by inserting coercions, s.t. cj'.typ = t *) let inh_conv_coerce_to_gen rigidonly loc env evd cj (n, t) = match n with None -> let (evd', val') = try inh_conv_coerce_to_fail loc env evd rigidonly (Some cj.uj_val) cj.uj_type t with NoCoercion -> let evd = saturate_evd env evd in try inh_conv_coerce_to_fail loc env evd rigidonly (Some cj.uj_val) cj.uj_type t with NoCoercion -> error_actual_type_loc loc env evd cj t in let val' = match val' with Some v -> v | None -> assert(false) in (evd',{ uj_val = val'; uj_type = t }) | Some (init, cur) -> (evd, cj) let inh_conv_coerce_to = inh_conv_coerce_to_gen false let inh_conv_coerce_rigid_to = inh_conv_coerce_to_gen true let inh_conv_coerces_to loc env (evd : evar_map) t (abs, t') = if abs = None then try fst (inh_conv_coerce_to_fail loc env evd true None t t') with NoCoercion -> evd (* Maybe not enough information to unify *) else evd (* Still problematic, as it changes unification let nabsinit, nabs = match abs with None -> 0, 0 | Some (init, cur) -> init, cur in try let (rels, rng) = (* a little more effort to get products is needed *) try decompose_prod_n nabs t with _ -> if !Flags.debug then msg_warning (str "decompose_prod_n failed"); raise (Invalid_argument "Coercion.inh_conv_coerces_to") in (* The final range free variables must have been replaced by evars, we accept only that evars in rng are applied to free vars. *) if noccur_with_meta 0 (succ nabsinit) rng then ( let env', t, t' = let env' = List.fold_right (fun (n, t) env -> push_rel (n, None, t) env) rels env in env', rng, lift nabs t' in try pi1 (inh_conv_coerce_to_fail loc env' evd None t t') with NoCoercion -> evd) (* Maybe not enough information to unify *) (*let sigma = evd in error_cannot_coerce env' sigma (t, t'))*) else evd with Invalid_argument _ -> evd *) end coq-8.4pl2/pretyping/tacred.ml0000640000175000001440000011250212102030400015423 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* is_evaluable_const env cst | EvalVarRef id -> is_evaluable_var env id let value_of_evaluable_ref env = function | EvalConstRef con -> constant_value env con | EvalVarRef id -> Option.get (pi2 (lookup_named id env)) let constr_of_evaluable_ref = function | EvalConstRef con -> mkConst con | EvalVarRef id -> mkVar id let evaluable_of_global_reference env = function | ConstRef cst when is_evaluable_const env cst -> EvalConstRef cst | VarRef id when is_evaluable_var env id -> EvalVarRef id | r -> error_not_evaluable r let global_of_evaluable_reference = function | EvalConstRef cst -> ConstRef cst | EvalVarRef id -> VarRef id type evaluable_reference = | EvalConst of constant | EvalVar of identifier | EvalRel of int | EvalEvar of existential let mkEvalRef = function | EvalConst cst -> mkConst cst | EvalVar id -> mkVar id | EvalRel n -> mkRel n | EvalEvar ev -> mkEvar ev let isEvalRef env c = match kind_of_term c with | Const sp -> is_evaluable env (EvalConstRef sp) | Var id -> is_evaluable env (EvalVarRef id) | Rel _ | Evar _ -> true | _ -> false let destEvalRef c = match kind_of_term c with | Const cst -> EvalConst cst | Var id -> EvalVar id | Rel n -> EvalRel n | Evar ev -> EvalEvar ev | _ -> anomaly "Not an unfoldable reference" let reference_opt_value sigma env = function | EvalConst cst -> constant_opt_value env cst | EvalVar id -> let (_,v,_) = lookup_named id env in v | EvalRel n -> let (_,v,_) = lookup_rel n env in Option.map (lift n) v | EvalEvar ev -> Evd.existential_opt_value sigma ev exception NotEvaluable let reference_value sigma env c = match reference_opt_value sigma env c with | None -> raise NotEvaluable | Some d -> d (************************************************************************) (* Reduction of constants hiding a fixpoint (e.g. for "simpl" tactic). *) (* One reuses the name of the function after reduction of the fixpoint *) type constant_evaluation = | EliminationFix of int * int * (int * (int * constr) list * int) | EliminationMutualFix of int * evaluable_reference * ((int*evaluable_reference) option array * (int * (int * constr) list * int)) | EliminationCases of int | NotAnElimination (* We use a cache registered as a global table *) let eval_table = ref Cmap_env.empty type frozen = (int * constant_evaluation) Cmap_env.t let init () = eval_table := Cmap_env.empty let freeze () = !eval_table let unfreeze ct = eval_table := ct let _ = Summary.declare_summary "evaluation" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } (* [compute_consteval] determines whether c is an "elimination constant" either [yn:Tn]..[y1:T1](match yi with f1..fk end g1 ..gp) or [yn:Tn]..[y1:T1](Fix(f|t) yi1..yip) with yi1..yip distinct variables among the yi, not occurring in t In the second case, [check_fix_reversibility [T1;...;Tn] args fix] checks that [args] is a subset of disjoint variables in y1..yn (a necessary condition for reversibility). It also returns the relevant information ([i1,Ti1;..;ip,Tip],n) in order to compute an equivalent of Fix(f|t) such that g := [xp:Tip']..[x1:Ti1'](f a1..an) == [xp:Tip']..[x1:Ti1'](Fix(f|t) yi1..yip) with a_k:=y_k if k<>i_j, a_k:=args_k otherwise, and Tij':=Tij[x1..xi(j-1) <- a1..ai(j-1)] Note that the types Tk, when no i_j=k, must not be dependent on the xp..x1. *) let check_fix_reversibility labs args ((lv,i),(_,tys,bds)) = let n = List.length labs in let nargs = List.length args in if nargs > n then raise Elimconst; let nbfix = Array.length bds in let li = List.map (function d -> match kind_of_term d with | Rel k -> if array_for_all (noccurn k) tys && array_for_all (noccurn (k+nbfix)) bds then (k, List.nth labs (k-1)) else raise Elimconst | _ -> raise Elimconst) args in let reversible_rels = List.map fst li in if not (list_distinct reversible_rels) then raise Elimconst; list_iter_i (fun i t_i -> if not (List.mem_assoc (i+1) li) then let fvs = List.map ((+) (i+1)) (Intset.elements (free_rels t_i)) in if list_intersect fvs reversible_rels <> [] then raise Elimconst) labs; let k = lv.(i) in if k < nargs then (* Such an optimisation would need eta-expansion let p = destRel (List.nth args k) in EliminationFix (n-p+1,(nbfix,li,n)) *) EliminationFix (n,nargs,(nbfix,li,n)) else EliminationFix (n-nargs+k+1,nargs,(nbfix,li,n)) (* Heuristic to look if global names are associated to other components of a mutual fixpoint *) let invert_name labs l na0 env sigma ref = function | Name id -> let minfxargs = List.length l in if na0 <> Name id then let refi = match ref with | EvalRel _ | EvalEvar _ -> None | EvalVar id' -> Some (EvalVar id) | EvalConst kn -> Some (EvalConst (con_with_label kn (label_of_id id))) in match refi with | None -> None | Some ref -> try match reference_opt_value sigma env ref with | None -> None | Some c -> let labs',ccl = decompose_lam c in let _, l' = whd_betalet_stack sigma ccl in let labs' = List.map snd labs' in if labs' = labs & l = l' then Some (minfxargs,ref) else None with Not_found (* Undefined ref *) -> None else Some (minfxargs,ref) | Anonymous -> None (* Actually, should not occur *) (* [compute_consteval_direct] expand all constant in a whole, but [compute_consteval_mutual_fix] only one by one, until finding the last one before the Fix if the latter is mutually defined *) let compute_consteval_direct sigma env ref = let rec srec env n labs c = let c',l = whd_betadelta_stack env sigma c in match kind_of_term c' with | Lambda (id,t,g) when l=[] -> srec (push_rel (id,None,t) env) (n+1) (t::labs) g | Fix fix -> (try check_fix_reversibility labs l fix with Elimconst -> NotAnElimination) | Case (_,_,d,_) when isRel d -> EliminationCases n | _ -> NotAnElimination in match reference_opt_value sigma env ref with | None -> NotAnElimination | Some c -> srec env 0 [] c let compute_consteval_mutual_fix sigma env ref = let rec srec env minarg labs ref c = let c',l = whd_betalet_stack sigma c in let nargs = List.length l in match kind_of_term c' with | Lambda (na,t,g) when l=[] -> srec (push_rel (na,None,t) env) (minarg+1) (t::labs) ref g | Fix ((lv,i),(names,_,_)) -> (* Last known constant wrapping Fix is ref = [labs](Fix l) *) (match compute_consteval_direct sigma env ref with | NotAnElimination -> (*Above const was eliminable but this not!*) NotAnElimination | EliminationFix (minarg',minfxargs,infos) -> let refs = Array.map (invert_name labs l names.(i) env sigma ref) names in let new_minarg = max (minarg'+minarg-nargs) minarg' in EliminationMutualFix (new_minarg,ref,(refs,infos)) | _ -> assert false) | _ when isEvalRef env c' -> (* Forget all \'s and args and do as if we had started with c' *) let ref = destEvalRef c' in (match reference_opt_value sigma env ref with | None -> anomaly "Should have been trapped by compute_direct" | Some c -> srec env (minarg-nargs) [] ref c) | _ -> (* Should not occur *) NotAnElimination in match reference_opt_value sigma env ref with | None -> (* Should not occur *) NotAnElimination | Some c -> srec env 0 [] ref c let compute_consteval sigma env ref = match compute_consteval_direct sigma env ref with | EliminationFix (_,_,(nbfix,_,_)) when nbfix <> 1 -> compute_consteval_mutual_fix sigma env ref | elim -> elim let reference_eval sigma env = function | EvalConst cst as ref -> (try Cmap_env.find cst !eval_table with Not_found -> begin let v = compute_consteval sigma env ref in eval_table := Cmap_env.add cst v !eval_table; v end) | ref -> compute_consteval sigma env ref (* If f is bound to EliminationFix (n',infos), then n' is the minimal number of args for starting the reduction and infos is (nbfix,[(yi1,Ti1);...;(yip,Tip)],n) indicating that f converts to some [y1:T1,...,yn:Tn](Fix(..) yip .. yi1) where the y_{i_j} consist in a disjoint subset of the yi, i.e. 1 <= ij <= n and the ij are disjoint (in particular, p <= n). f is applied to largs := arg1 .. argn and we need for recursive calls to build the function g := [xp:Tip',...,x1:Ti1'](f a1 ... an) s.t. (g u1 ... up) reduces to (Fix(..) u1 ... up) This is made possible by setting a_k:=x_j if k=i_j for some j a_k:=arg_k otherwise The type Tij' is Tij[yi(j-1)..y1 <- ai(j-1)..a1] *) let x = Name (id_of_string "x") let make_elim_fun (names,(nbfix,lv,n)) largs = let lu = list_firstn n (list_of_stack largs) in let p = List.length lv in let lyi = List.map fst lv in let la = list_map_i (fun q aq -> (* k from the comment is q+1 *) try mkRel (p+1-(list_index (n-q) lyi)) with Not_found -> aq) 0 (List.map (lift p) lu) in fun i -> match names.(i) with | None -> None | Some (minargs,ref) -> let body = applistc (mkEvalRef ref) la in let g = list_fold_left_i (fun q (* j = n+1-q *) c (ij,tij) -> let subst = List.map (lift (-q)) (list_firstn (n-ij) la) in let tij' = substl (List.rev subst) tij in mkLambda (x,tij',c)) 1 body (List.rev lv) in Some (minargs,g) (* [f] is convertible to [Fix(recindices,bodynum),bodyvect)]: do so that the reduction uses this extra information *) let dummy = mkProp let vfx = id_of_string"_expanded_fix_" let vfun = id_of_string"_eliminator_function_" (* Mark every occurrence of substituted vars (associated to a function) as a problem variable: an evar that can be instantiated either by vfx (expanded fixpoint) or vfun (named function). *) let substl_with_function subst constr = let cnt = ref 0 in let evd = ref Evd.empty in let minargs = ref Intmap.empty in let v = Array.of_list subst in let rec subst_total k c = match kind_of_term c with Rel i when k if i <= k + Array.length v then match v.(i-k-1) with | (fx,Some(min,ref)) -> decr cnt; evd := Evd.add !evd !cnt (Evd.make_evar (val_of_named_context [(vfx,None,dummy);(vfun,None,dummy)]) dummy); minargs := Intmap.add !cnt min !minargs; lift k (mkEvar(!cnt,[|fx;ref|])) | (fx,None) -> lift k fx else mkRel (i - Array.length v) | _ -> map_constr_with_binders succ subst_total k c in let c = subst_total 0 constr in (c,!evd,!minargs) exception Partial (* each problem variable that cannot be made totally applied even by reduction is solved by the expanded fix term. *) let solve_arity_problem env sigma fxminargs c = let evm = ref sigma in let set_fix i = evm := Evd.define i (mkVar vfx) !evm in let rec check strict c = let c' = whd_betaiotazeta sigma c in let (h,rcargs) = decompose_app c' in match kind_of_term h with Evar(i,_) when Intmap.mem i fxminargs && not (Evd.is_defined !evm i) -> let minargs = Intmap.find i fxminargs in if List.length rcargs < minargs then if strict then set_fix i else raise Partial; List.iter (check strict) rcargs | (Var _|Const _) when isEvalRef env h -> (match reference_opt_value sigma env (destEvalRef h) with Some h' -> let bak = !evm in (try List.iter (check false) rcargs with Partial -> evm := bak; check strict (applist(h',rcargs))) | None -> List.iter (check strict) rcargs) | _ -> iter_constr (check strict) c' in check true c; !evm let substl_checking_arity env subst c = (* we initialize the problem: *) let body,sigma,minargs = substl_with_function subst c in (* we collect arity constraints *) let sigma' = solve_arity_problem env sigma minargs body in (* we propagate the constraints: solved problems are substituted; the other ones are replaced by the function symbol *) let rec nf_fix c = match kind_of_term c with Evar(i,[|fx;f|] as ev) when Intmap.mem i minargs -> (match Evd.existential_opt_value sigma' ev with Some c' -> c' | None -> f) | _ -> map_constr nf_fix c in nf_fix body let contract_fix_use_function env sigma f ((recindices,bodynum),(_names,_types,bodies as typedbodies)) = let nbodies = Array.length recindices in let make_Fi j = (mkFix((recindices,j),typedbodies), f j) in let lbodies = list_tabulate make_Fi nbodies in substl_checking_arity env (List.rev lbodies) (nf_beta sigma bodies.(bodynum)) let reduce_fix_use_function env sigma f whfun fix stack = match fix_recarg fix stack with | None -> NotReducible | Some (recargnum,recarg) -> let (recarg'hd,_ as recarg') = if isRel recarg then (* The recarg cannot be a local def, no worry about the right env *) (recarg, empty_stack) else whfun (recarg, empty_stack) in let stack' = stack_assign stack recargnum (app_stack recarg') in (match kind_of_term recarg'hd with | Construct _ -> Reduced (contract_fix_use_function env sigma f fix,stack') | _ -> NotReducible) let contract_cofix_use_function env sigma f (bodynum,(_names,_,bodies as typedbodies)) = let nbodies = Array.length bodies in let make_Fi j = (mkCoFix(j,typedbodies), f j) in let subbodies = list_tabulate make_Fi nbodies in substl_checking_arity env (List.rev subbodies) (nf_beta sigma bodies.(bodynum)) let reduce_mind_case_use_function func env sigma mia = match kind_of_term mia.mconstr with | Construct(ind_sp,i) -> let real_cargs = list_skipn mia.mci.ci_npar mia.mcargs in applist (mia.mlf.(i-1), real_cargs) | CoFix (bodynum,(names,_,_) as cofix) -> let build_cofix_name = if isConst func then let minargs = List.length mia.mcargs in fun i -> if i = bodynum then Some (minargs,func) else match names.(i) with | Anonymous -> None | Name id -> (* In case of a call to another component of a block of mutual inductive, try to reuse the global name if the block was indeed initially built as a global definition *) let kn = con_with_label (destConst func) (label_of_id id) in try match constant_opt_value env kn with | None -> None (* TODO: check kn is correct *) | Some _ -> Some (minargs,mkConst kn) with Not_found -> None else fun _ -> None in let cofix_def = contract_cofix_use_function env sigma build_cofix_name cofix in mkCase (mia.mci, mia.mP, applist(cofix_def,mia.mcargs), mia.mlf) | _ -> assert false let special_red_case env sigma whfun (ci, p, c, lf) = let rec redrec s = let (constr, cargs) = whfun s in if isEvalRef env constr then let ref = destEvalRef constr in match reference_opt_value sigma env ref with | None -> raise Redelimination | Some gvalue -> if reducible_mind_case gvalue then reduce_mind_case_use_function constr env sigma {mP=p; mconstr=gvalue; mcargs=list_of_stack cargs; mci=ci; mlf=lf} else redrec (gvalue, cargs) else if reducible_mind_case constr then reduce_mind_case {mP=p; mconstr=constr; mcargs=list_of_stack cargs; mci=ci; mlf=lf} else raise Redelimination in redrec (c, empty_stack) (* data structure to hold the map kn -> rec_args for simpl *) type behaviour = { b_nargs: int; b_recargs: int list; b_dont_expose_case: bool; } let behaviour_table = ref (Refmap.empty : behaviour Refmap.t) let _ = Summary.declare_summary "simplbehaviour" { Summary.freeze_function = (fun () -> !behaviour_table); Summary.unfreeze_function = (fun x -> behaviour_table := x); Summary.init_function = (fun () -> behaviour_table := Refmap.empty) } type simpl_flag = [ `SimplDontExposeCase | `SimplNeverUnfold ] type req = | ReqLocal | ReqGlobal of global_reference * (int list * int * simpl_flag list) let load_simpl_behaviour _ (_,(_,(r, b))) = behaviour_table := Refmap.add r b !behaviour_table let cache_simpl_behaviour o = load_simpl_behaviour 1 o let classify_simpl_behaviour = function | ReqLocal, _ -> Dispose | ReqGlobal _, _ as o -> Substitute o let subst_simpl_behaviour (subst, (_, (r,o as orig))) = ReqLocal, let r' = fst (subst_global subst r) in if r==r' then orig else (r',o) let discharge_simpl_behaviour = function | _,(ReqGlobal (ConstRef c, req), (_, b)) -> let c' = pop_con c in let vars = Lib.section_segment_of_constant c in let extra = List.length vars in let nargs' = if b.b_nargs < 0 then b.b_nargs else b.b_nargs + extra in let recargs' = List.map ((+) extra) b.b_recargs in let b' = { b with b_nargs = nargs'; b_recargs = recargs' } in Some (ReqGlobal (ConstRef c', req), (ConstRef c', b')) | _ -> None let rebuild_simpl_behaviour = function | req, (ConstRef c, _ as x) -> req, x | _ -> assert false let inSimplBehaviour = declare_object { (default_object "SIMPLBEHAVIOUR") with load_function = load_simpl_behaviour; cache_function = cache_simpl_behaviour; classify_function = classify_simpl_behaviour; subst_function = subst_simpl_behaviour; discharge_function = discharge_simpl_behaviour; rebuild_function = rebuild_simpl_behaviour; } let set_simpl_behaviour local r (recargs, nargs, flags as req) = let nargs = if List.mem `SimplNeverUnfold flags then max_int else nargs in let behaviour = { b_nargs = nargs; b_recargs = recargs; b_dont_expose_case = List.mem `SimplDontExposeCase flags } in let req = if local then ReqLocal else ReqGlobal (r, req) in Lib.add_anonymous_leaf (inSimplBehaviour (req, (r, behaviour))) ;; let get_simpl_behaviour r = try let b = Refmap.find r !behaviour_table in let flags = if b.b_nargs = max_int then [`SimplNeverUnfold] else if b.b_dont_expose_case then [`SimplDontExposeCase] else [] in Some (b.b_recargs, (if b.b_nargs = max_int then -1 else b.b_nargs), flags) with Not_found -> None let get_behaviour = function | EvalVar _ | EvalRel _ | EvalEvar _ -> raise Not_found | EvalConst c -> Refmap.find (ConstRef c) !behaviour_table let recargs r = try let b = get_behaviour r in Some (b.b_recargs, b.b_nargs) with Not_found -> None let dont_expose_case r = try (get_behaviour r).b_dont_expose_case with Not_found -> false (* [red_elim_const] contracts iota/fix/cofix redexes hidden behind constants by keeping the name of the constants in the recursive calls; it fails if no redex is around *) let rec red_elim_const env sigma ref largs = let nargs = stack_args_size largs in let largs, unfold_anyway, unfold_nonelim = match recargs ref with | None -> largs, false, false | Some (_,n) when nargs < n -> raise Redelimination | Some (x::l,_) when nargs <= List.fold_left max x l -> raise Redelimination | Some (l,n) -> List.fold_left (fun stack i -> let arg = stack_nth stack i in let rarg = whd_construct_state env sigma (arg, empty_stack) in match kind_of_term (fst rarg) with | Construct _ -> stack_assign stack i (app_stack rarg) | _ -> raise Redelimination) largs l, n >= 0 && l = [] && nargs >= n, n >= 0 && l <> [] && nargs >= n in try match reference_eval sigma env ref with | EliminationCases n when nargs >= n -> let c = reference_value sigma env ref in let c', lrest = whd_betadelta_state env sigma (c,largs) in let whfun = whd_simpl_state env sigma in (special_red_case env sigma whfun (destCase c'), lrest) | EliminationFix (min,minfxargs,infos) when nargs >= min -> let c = reference_value sigma env ref in let d, lrest = whd_betadelta_state env sigma (c,largs) in let f = make_elim_fun ([|Some (minfxargs,ref)|],infos) largs in let whfun = whd_construct_state env sigma in (match reduce_fix_use_function env sigma f whfun (destFix d) lrest with | NotReducible -> raise Redelimination | Reduced (c,rest) -> (nf_beta sigma c, rest)) | EliminationMutualFix (min,refgoal,refinfos) when nargs >= min -> let rec descend ref args = let c = reference_value sigma env ref in if ref = refgoal then (c,args) else let c', lrest = whd_betalet_state sigma (c,args) in descend (destEvalRef c') lrest in let (_, midargs as s) = descend ref largs in let d, lrest = whd_betadelta_state env sigma s in let f = make_elim_fun refinfos midargs in let whfun = whd_construct_state env sigma in (match reduce_fix_use_function env sigma f whfun (destFix d) lrest with | NotReducible -> raise Redelimination | Reduced (c,rest) -> (nf_beta sigma c, rest)) | NotAnElimination when unfold_nonelim -> let c = reference_value sigma env ref in whd_betaiotazeta sigma (app_stack (c, largs)), empty_stack | _ -> raise Redelimination with Redelimination when unfold_anyway -> let c = reference_value sigma env ref in whd_betaiotazeta sigma (app_stack (c, largs)), empty_stack (* reduce to whd normal form or to an applied constant that does not hide a reducible iota/fix/cofix redex (the "simpl" tactic) *) and whd_simpl_state env sigma s = let rec redrec (x, stack as s) = match kind_of_term x with | Lambda (na,t,c) -> (match decomp_stack stack with | None -> s | Some (a,rest) -> stacklam redrec [a] c rest) | LetIn (n,b,t,c) -> stacklam redrec [b] c stack | App (f,cl) -> redrec (f, append_stack cl stack) | Cast (c,_,_) -> redrec (c, stack) | Case (ci,p,c,lf) -> (try redrec (special_red_case env sigma redrec (ci,p,c,lf), stack) with Redelimination -> s) | Fix fix -> (try match reduce_fix (whd_construct_state env) sigma fix stack with | Reduced s' -> redrec s' | NotReducible -> s with Redelimination -> s) | _ when isEvalRef env x -> let ref = destEvalRef x in (try let hd, _ as s' = redrec (red_elim_const env sigma ref stack) in let rec is_case x = match kind_of_term x with | Lambda (_,_, x) | LetIn (_,_,_, x) | Cast (x, _,_) -> is_case x | App (hd, _) -> is_case hd | Case _ -> true | _ -> false in if dont_expose_case ref && is_case hd then raise Redelimination else s' with Redelimination -> s) | _ -> s in redrec s (* reduce until finding an applied constructor or fail *) and whd_construct_state env sigma s = let (constr, cargs as s') = whd_simpl_state env sigma s in if reducible_mind_case constr then s' else if isEvalRef env constr then let ref = destEvalRef constr in match reference_opt_value sigma env ref with | None -> raise Redelimination | Some gvalue -> whd_construct_state env sigma (gvalue, cargs) else raise Redelimination (************************************************************************) (* Special Purpose Reduction Strategies *) (* Red reduction tactic: one step of delta reduction + full beta-iota-fix-cofix-zeta-cast at the head of the conclusion of a sequence of products; fails if no delta redex is around *) let try_red_product env sigma c = let simpfun = clos_norm_flags betaiotazeta env sigma in let rec redrec env x = match kind_of_term x with | App (f,l) -> (match kind_of_term f with | Fix fix -> let stack = append_stack l empty_stack in (match fix_recarg fix stack with | None -> raise Redelimination | Some (recargnum,recarg) -> let recarg' = redrec env recarg in let stack' = stack_assign stack recargnum recarg' in simpfun (app_stack (f,stack'))) | _ -> simpfun (appvect (redrec env f, l))) | Cast (c,_,_) -> redrec env c | Prod (x,a,b) -> mkProd (x, a, redrec (push_rel (x,None,a) env) b) | LetIn (x,a,b,t) -> redrec env (subst1 a t) | Case (ci,p,d,lf) -> simpfun (mkCase (ci,p,redrec env d,lf)) | _ when isEvalRef env x -> (* TO DO: re-fold fixpoints after expansion *) (* to get true one-step reductions *) let ref = destEvalRef x in (match reference_opt_value sigma env ref with | None -> raise Redelimination | Some c -> c) | _ -> raise Redelimination in redrec env c let red_product env sigma c = try try_red_product env sigma c with Redelimination -> error "Not reducible." (* (* This old version of hnf uses betadeltaiota instead of itself (resp whd_construct_state) to reduce the argument of Case (resp Fix); The new version uses the "simpl" strategy instead. For instance, Variable n:nat. Eval hnf in match (plus (S n) O) with S n => n | _ => O end. returned (fix plus (n m : nat) {struct n} : nat := match n with | O => m | S p => S (plus p m) end) n 0 while the new version returns (plus n O) *) let whd_simpl_orelse_delta_but_fix_old env sigma c = let whd_all = whd_betadeltaiota_state env sigma in let rec redrec (x, stack as s) = match kind_of_term x with | Lambda (na,t,c) -> (match decomp_stack stack with | None -> s | Some (a,rest) -> stacklam redrec [a] c rest) | LetIn (n,b,t,c) -> stacklam redrec [b] c stack | App (f,cl) -> redrec (f, append_stack cl stack) | Cast (c,_,_) -> redrec (c, stack) | Case (ci,p,d,lf) -> (try redrec (special_red_case env sigma whd_all (ci,p,d,lf), stack) with Redelimination -> s) | Fix fix -> (match reduce_fix whd_all fix stack with | Reduced s' -> redrec s' | NotReducible -> s) | _ when isEvalRef env x -> let ref = destEvalRef x in (try redrec (red_elim_const env sigma ref stack) with Redelimination -> match reference_opt_value sigma env ref with | Some c -> (match kind_of_term (strip_lam c) with | CoFix _ | Fix _ -> s | _ -> redrec (c, stack)) | None -> s) | _ -> s in app_stack (redrec (c, empty_stack)) *) (* Same as [whd_simpl] but also reduces constants that do not hide a reducible fix, but does this reduction of constants only until it immediately hides a non reducible fix or a cofix *) let whd_simpl_orelse_delta_but_fix env sigma c = let rec redrec s = let (constr, stack as s') = whd_simpl_state env sigma s in if isEvalRef env constr then match reference_opt_value sigma env (destEvalRef constr) with | Some c -> (match kind_of_term (strip_lam c) with | CoFix _ | Fix _ -> s' | _ -> redrec (c, stack)) | None -> s' else s' in app_stack (redrec (c, empty_stack)) let hnf_constr = whd_simpl_orelse_delta_but_fix (* The "simpl" reduction tactic *) let whd_simpl env sigma c = app_stack (whd_simpl_state env sigma (c, empty_stack)) let simpl env sigma c = strong whd_simpl env sigma c (* Reduction at specific subterms *) let matches_head c t = match kind_of_term t with | App (f,_) -> matches c f | _ -> raise PatternMatchingFailure let contextually byhead ((nowhere_except_in,locs),c) f env sigma t = let maxocc = List.fold_right max locs 0 in let pos = ref 1 in let rec traverse (env,c as envc) t = if nowhere_except_in & (!pos > maxocc) then t else try let subst = if byhead then matches_head c t else matches c t in let ok = if nowhere_except_in then List.mem !pos locs else not (List.mem !pos locs) in incr pos; if ok then f subst env sigma t else if byhead then (* find other occurrences of c in t; TODO: ensure left-to-right *) let (f,l) = destApp t in mkApp (f, array_map_left (traverse envc) l) else t with PatternMatchingFailure -> map_constr_with_binders_left_to_right (fun d (env,c) -> (push_rel d env,lift_pattern 1 c)) traverse envc t in let t' = traverse (env,c) t in if List.exists (fun o -> o >= !pos) locs then error_invalid_occurrence locs; t' (* linear bindings (following pretty-printer) of the value of name in c. * n is the number of the next occurence of name. * ol is the occurence list to find. *) let substlin env evalref n (nowhere_except_in,locs) c = let maxocc = List.fold_right max locs 0 in let pos = ref n in assert (List.for_all (fun x -> x >= 0) locs); let value = value_of_evaluable_ref env evalref in let term = constr_of_evaluable_ref evalref in let rec substrec () c = if nowhere_except_in & !pos > maxocc then c else if eq_constr c term then let ok = if nowhere_except_in then List.mem !pos locs else not (List.mem !pos locs) in incr pos; if ok then value else c else map_constr_with_binders_left_to_right (fun _ () -> ()) substrec () c in let t' = substrec () c in (!pos, t') let string_of_evaluable_ref env = function | EvalVarRef id -> string_of_id id | EvalConstRef kn -> string_of_qualid (Nametab.shortest_qualid_of_global (vars_of_env env) (ConstRef kn)) let unfold env sigma name = if is_evaluable env name then clos_norm_flags (unfold_red name) env sigma else error (string_of_evaluable_ref env name^" is opaque.") (* [unfoldoccs : (readable_constraints -> (int list * full_path) -> constr -> constr)] * Unfolds the constant name in a term c following a list of occurrences occl. * at the occurrences of occ_list. If occ_list is empty, unfold all occurences. * Performs a betaiota reduction after unfolding. *) let unfoldoccs env sigma ((nowhere_except_in,locs as plocs),name) c = if locs = [] then if nowhere_except_in then c else unfold env sigma name c else let (nbocc,uc) = substlin env name 1 plocs c in if nbocc = 1 then error ((string_of_evaluable_ref env name)^" does not occur."); let rest = List.filter (fun o -> o >= nbocc) locs in if rest <> [] then error_invalid_occurrence rest; nf_betaiota sigma uc (* Unfold reduction tactic: *) let unfoldn loccname env sigma c = List.fold_left (fun c occname -> unfoldoccs env sigma occname c) c loccname (* Re-folding constants tactics: refold com in term c *) let fold_one_com com env sigma c = let rcom = try red_product env sigma com with Redelimination -> error "Not reducible." in (* Reason first on the beta-iota-zeta normal form of the constant as unfold produces it, so that the "unfold f; fold f" configuration works to refold fix expressions *) let a = subst_term (clos_norm_flags unfold_side_red env sigma rcom) c in if not (eq_constr a c) then subst1 com a else (* Then reason on the non beta-iota-zeta form for compatibility - even if it is probably a useless configuration *) let a = subst_term rcom c in subst1 com a let fold_commands cl env sigma c = List.fold_right (fun com -> fold_one_com com env sigma) (List.rev cl) c (* call by value reduction functions *) let cbv_norm_flags flags env sigma t = cbv_norm (create_cbv_infos flags env sigma) t let cbv_beta = cbv_norm_flags beta empty_env let cbv_betaiota = cbv_norm_flags betaiota empty_env let cbv_betadeltaiota env sigma = cbv_norm_flags betadeltaiota env sigma let compute = cbv_betadeltaiota (* Pattern *) (* gives [na:ta]c' such that c converts to ([na:ta]c' a), abstracting only * the specified occurrences. *) let abstract_scheme env sigma (locc,a) c = let ta = Retyping.get_type_of env sigma a in let na = named_hd env ta Anonymous in if occur_meta ta then error "Cannot find a type for the generalisation."; if occur_meta a then mkLambda (na,ta,c) else mkLambda (na,ta,subst_closed_term_occ locc a c) let pattern_occs loccs_trm env sigma c = let abstr_trm = List.fold_right (abstract_scheme env sigma) loccs_trm c in try let _ = Typing.type_of env sigma abstr_trm in applist(abstr_trm, List.map snd loccs_trm) with Type_errors.TypeError (env',t) -> raise (ReductionTacticError (InvalidAbstraction (env,abstr_trm,(env',t)))) (* Used in several tactics. *) (* put t as t'=(x1:A1)..(xn:An)B with B an inductive definition of name name return name, B and t' *) let reduce_to_ind_gen allow_product env sigma t = let rec elimrec env t l = let t = hnf_constr env sigma t in match kind_of_term (fst (decompose_app t)) with | Ind ind-> (ind, it_mkProd_or_LetIn t l) | Prod (n,ty,t') -> if allow_product then elimrec (push_rel (n,None,ty) env) t' ((n,None,ty)::l) else errorlabstrm "" (str"Not an inductive definition.") | _ -> (* Last chance: we allow to bypass the Opaque flag (as it was partially the case between V5.10 and V8.1 *) let t' = whd_betadeltaiota env sigma t in match kind_of_term (fst (decompose_app t')) with | Ind ind-> (ind, it_mkProd_or_LetIn t' l) | _ -> errorlabstrm "" (str"Not an inductive product.") in elimrec env t [] let reduce_to_quantified_ind x = reduce_to_ind_gen true x let reduce_to_atomic_ind x = reduce_to_ind_gen false x let rec find_hnf_rectype env sigma t = let ind,t = reduce_to_atomic_ind env sigma t in ind, snd (decompose_app t) (* Reduce the weak-head redex [beta,iota/fix/cofix[all],cast,zeta,simpl/delta] or raise [NotStepReducible] if not a weak-head redex *) exception NotStepReducible let one_step_reduce env sigma c = let rec redrec (x, stack) = match kind_of_term x with | Lambda (n,t,c) -> (match decomp_stack stack with | None -> raise NotStepReducible | Some (a,rest) -> (subst1 a c, rest)) | App (f,cl) -> redrec (f, append_stack cl stack) | LetIn (_,f,_,cl) -> (subst1 f cl,stack) | Cast (c,_,_) -> redrec (c,stack) | Case (ci,p,c,lf) -> (try (special_red_case env sigma (whd_simpl_state env sigma) (ci,p,c,lf), stack) with Redelimination -> raise NotStepReducible) | Fix fix -> (match reduce_fix (whd_construct_state env) sigma fix stack with | Reduced s' -> s' | NotReducible -> raise NotStepReducible) | _ when isEvalRef env x -> let ref = destEvalRef x in (try red_elim_const env sigma ref stack with Redelimination -> match reference_opt_value sigma env ref with | Some d -> d, stack | None -> raise NotStepReducible) | _ -> raise NotStepReducible in app_stack (redrec (c, empty_stack)) let isIndRef = function IndRef _ -> true | _ -> false let reduce_to_ref_gen allow_product env sigma ref t = if isIndRef ref then let (mind,t) = reduce_to_ind_gen allow_product env sigma t in if IndRef mind <> ref then errorlabstrm "" (str "Cannot recognize a statement based on " ++ Nametab.pr_global_env Idset.empty ref ++ str".") else t else (* lazily reduces to match the head of [t] with the expected [ref] *) let rec elimrec env t l = let c, _ = Reductionops.whd_stack sigma t in match kind_of_term c with | Prod (n,ty,t') -> if allow_product then elimrec (push_rel (n,None,t) env) t' ((n,None,ty)::l) else errorlabstrm "" (str "Cannot recognize an atomic statement based on " ++ Nametab.pr_global_env Idset.empty ref ++ str".") | _ -> try if global_of_constr c = ref then it_mkProd_or_LetIn t l else raise Not_found with Not_found -> try let t' = nf_betaiota sigma (one_step_reduce env sigma t) in elimrec env t' l with NotStepReducible -> errorlabstrm "" (str "Cannot recognize a statement based on " ++ Nametab.pr_global_env Idset.empty ref ++ str".") in elimrec env t [] let reduce_to_quantified_ref = reduce_to_ref_gen true let reduce_to_atomic_ref = reduce_to_ref_gen false coq-8.4pl2/pretyping/termops.mli0000640000175000001440000002627712010532755016062 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Univ.universe_level val new_univ : unit -> Univ.universe val new_sort_in_family : sorts_family -> sorts val new_Type : unit -> types val new_Type_sort : unit -> sorts val refresh_universes : types -> types val refresh_universes_strict : types -> types (** printers *) val print_sort : sorts -> std_ppcmds val pr_sort_family : sorts_family -> std_ppcmds (** debug printer: do not use to display terms to the casual user... *) val set_print_constr : (env -> constr -> std_ppcmds) -> unit val print_constr : constr -> std_ppcmds val print_constr_env : env -> constr -> std_ppcmds val print_named_context : env -> std_ppcmds val pr_rel_decl : env -> rel_declaration -> std_ppcmds val print_rel_context : env -> std_ppcmds val print_env : env -> std_ppcmds (** about contexts *) val push_rel_assum : name * types -> env -> env val push_rels_assum : (name * types) list -> env -> env val push_named_rec_types : name array * types array * 'a -> env -> env val lookup_rel_id : identifier -> rel_context -> int * constr option * types (** builds argument lists matching a block of binders or a context *) val rel_vect : int -> int -> constr array val rel_list : int -> int -> constr list val extended_rel_list : int -> rel_context -> constr list val extended_rel_vect : int -> rel_context -> constr array (** iterators/destructors on terms *) val mkProd_or_LetIn : rel_declaration -> types -> types val mkProd_wo_LetIn : rel_declaration -> types -> types val it_mkProd : types -> (name * types) list -> types val it_mkLambda : constr -> (name * types) list -> constr val it_mkProd_or_LetIn : types -> rel_context -> types val it_mkProd_wo_LetIn : types -> rel_context -> types val it_mkLambda_or_LetIn : constr -> rel_context -> constr val it_mkNamedProd_or_LetIn : types -> named_context -> types val it_mkNamedProd_wo_LetIn : types -> named_context -> types val it_mkNamedLambda_or_LetIn : constr -> named_context -> constr val it_named_context_quantifier : (named_declaration -> 'a -> 'a) -> init:'a -> named_context -> 'a (** {6 Generic iterators on constr} *) val map_constr_with_named_binders : (name -> 'a -> 'a) -> ('a -> constr -> constr) -> 'a -> constr -> constr val map_constr_with_binders_left_to_right : (rel_declaration -> 'a -> 'a) -> ('a -> constr -> constr) -> 'a -> constr -> constr val map_constr_with_full_binders : (rel_declaration -> 'a -> 'a) -> ('a -> constr -> constr) -> 'a -> constr -> constr (** [fold_constr_with_binders g f n acc c] folds [f n] on the immediate subterms of [c] starting from [acc] and proceeding from left to right according to the usual representation of the constructions as [fold_constr] but it carries an extra data [n] (typically a lift index) which is processed by [g] (which typically add 1 to [n]) at each binder traversal; it is not recursive *) val fold_constr_with_binders : ('a -> 'a) -> ('a -> 'b -> constr -> 'b) -> 'a -> 'b -> constr -> 'b val iter_constr_with_full_binders : (rel_declaration -> 'a -> 'a) -> ('a -> constr -> unit) -> 'a -> constr -> unit (**********************************************************************) val strip_head_cast : constr -> constr val drop_extra_implicit_args : constr -> constr (** occur checks *) exception Occur val occur_meta : types -> bool val occur_existential : types -> bool val occur_meta_or_existential : types -> bool val occur_const : constant -> types -> bool val occur_evar : existential_key -> types -> bool val occur_var : env -> identifier -> types -> bool val occur_var_in_decl : env -> identifier -> 'a * types option * types -> bool val free_rels : constr -> Intset.t val dependent : constr -> constr -> bool val dependent_no_evar : constr -> constr -> bool val count_occurrences : constr -> constr -> int val collect_metas : constr -> int list val collect_vars : constr -> Idset.t (** for visible vars only *) val occur_term : constr -> constr -> bool (** Synonymous of dependent Substitution of metavariables *) type meta_value_map = (metavariable * constr) list val subst_meta : meta_value_map -> constr -> constr (** Type assignment for metavariables *) type meta_type_map = (metavariable * types) list (** [pop c] lifts by -1 the positive indexes in [c] *) val pop : constr -> constr (** {6 ... } *) (** Substitution of an arbitrary large term. Uses equality modulo reduction of let *) (** [subst_term_gen eq d c] replaces [Rel 1] by [d] in [c] using [eq] as equality *) val subst_term_gen : (constr -> constr -> bool) -> constr -> constr -> constr (** [replace_term_gen eq d e c] replaces [d] by [e] in [c] using [eq] as equality *) val replace_term_gen : (constr -> constr -> bool) -> constr -> constr -> constr -> constr (** [subst_term d c] replaces [Rel 1] by [d] in [c] *) val subst_term : constr -> constr -> constr (** [replace_term d e c] replaces [d] by [e] in [c] *) val replace_term : constr -> constr -> constr -> constr (** In occurrences sets, false = everywhere except and true = nowhere except *) type occurrences = bool * int list val all_occurrences : occurrences val no_occurrences_in_set : occurrences (** [subst_closed_term_occ_gen occl n c d] replaces occurrences of closed [c] at positions [occl], counting from [n], by [Rel 1] in [d] *) val subst_closed_term_occ_gen : occurrences -> int -> constr -> types -> int * types (** [subst_closed_term_occ_modulo] looks for subterm modulo a testing function returning a substitution of type ['a] (or failing with NotUnifiable); a function for merging substitution (possibly failing with NotUnifiable) and an initial substitution are required too *) type hyp_location_flag = (** To distinguish body and type of local defs *) | InHyp | InHypTypeOnly | InHypValueOnly type 'a testing_function = { match_fun : constr -> 'a; merge_fun : 'a -> 'a -> 'a; mutable testing_state : 'a; mutable last_found : ((identifier * hyp_location_flag) option * int * constr) option } val make_eq_test : constr -> unit testing_function exception NotUnifiable val subst_closed_term_occ_modulo : occurrences -> 'a testing_function -> (identifier * hyp_location_flag) option -> constr -> types (** [subst_closed_term_occ occl c d] replaces occurrences of closed [c] at positions [occl] by [Rel 1] in [d] (see also Note OCC) *) val subst_closed_term_occ : occurrences -> constr -> constr -> constr (** [subst_closed_term_occ_decl occl c decl] replaces occurrences of closed [c] at positions [occl] by [Rel 1] in [decl] *) val subst_closed_term_occ_decl : occurrences * hyp_location_flag -> constr -> named_declaration -> named_declaration val subst_closed_term_occ_decl_modulo : occurrences * hyp_location_flag -> 'a testing_function -> named_declaration -> named_declaration val error_invalid_occurrence : int list -> 'a (** Alternative term equalities *) val base_sort_cmp : Reduction.conv_pb -> sorts -> sorts -> bool val compare_constr_univ : (Reduction.conv_pb -> constr -> constr -> bool) -> Reduction.conv_pb -> constr -> constr -> bool val constr_cmp : Reduction.conv_pb -> constr -> constr -> bool val eq_constr : constr -> constr -> bool val eta_reduce_head : constr -> constr val eta_eq_constr : constr -> constr -> bool exception CannotFilter (** Lightweight first-order filtering procedure. Unification variables ar represented by (untyped) Evars. [filtering c1 c2] returns the substitution n'th evar -> (context,term), or raises [CannotFilter]. Warning: Outer-kernel sort subtyping are taken into account: c1 has to be smaller than c2 wrt. sorts. *) type subst = (rel_context*constr) Intmap.t val filtering : rel_context -> Reduction.conv_pb -> constr -> constr -> subst val decompose_prod_letin : constr -> int * rel_context * constr val align_prod_letin : constr -> constr -> rel_context * constr (** Get the last arg of a constr intended to be an application *) val last_arg : constr -> constr (** Force the decomposition of a term as an applicative one *) val decompose_app_vect : constr -> constr * constr array val adjust_app_list_size : constr -> constr list -> constr -> constr list -> (constr * constr list * constr * constr list) val adjust_app_array_size : constr -> constr array -> constr -> constr array -> (constr * constr array * constr * constr array) (** name contexts *) type names_context = name list val add_name : name -> names_context -> names_context val lookup_name_of_rel : int -> names_context -> name val lookup_rel_of_name : identifier -> names_context -> int val empty_names_context : names_context val ids_of_rel_context : rel_context -> identifier list val ids_of_named_context : named_context -> identifier list val ids_of_context : env -> identifier list val names_of_rel_context : env -> names_context val context_chop : int -> rel_context -> rel_context * rel_context val env_rel_context_chop : int -> env -> env * rel_context (** Set of local names *) val vars_of_env: env -> Idset.t val add_vname : Idset.t -> name -> Idset.t (** other signature iterators *) val process_rel_context : (rel_declaration -> env -> env) -> env -> env val assums_of_rel_context : rel_context -> (name * constr) list val lift_rel_context : int -> rel_context -> rel_context val substl_rel_context : constr list -> rel_context -> rel_context val smash_rel_context : rel_context -> rel_context (** expand lets in context *) val adjust_subst_to_rel_context : rel_context -> constr list -> constr list val map_rel_context_in_env : (env -> constr -> constr) -> env -> rel_context -> rel_context val map_rel_context_with_binders : (int -> constr -> constr) -> rel_context -> rel_context val fold_named_context_both_sides : ('a -> named_declaration -> named_declaration list -> 'a) -> named_context -> init:'a -> 'a val mem_named_context : identifier -> named_context -> bool val clear_named_body : identifier -> env -> env val global_vars : env -> constr -> identifier list val global_vars_set_of_decl : env -> named_declaration -> Idset.t (** Gives an ordered list of hypotheses, closed by dependencies, containing a given set *) val dependency_closure : env -> named_context -> Idset.t -> identifier list (** Test if an identifier is the basename of a global reference *) val is_section_variable : identifier -> bool val isGlobalRef : constr -> bool val has_polymorphic_type : constant -> bool (** Combinators on judgments *) val on_judgment : (types -> types) -> unsafe_judgment -> unsafe_judgment val on_judgment_value : (types -> types) -> unsafe_judgment -> unsafe_judgment val on_judgment_type : (types -> types) -> unsafe_judgment -> unsafe_judgment (** {6 Functions to deal with impossible cases } *) val set_impossible_default_clause : constr * types -> unit val coq_unit_judge : unit -> unsafe_judgment coq-8.4pl2/pretyping/indrec.ml0000640000175000001440000004657212010532755015464 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (n, c, Termops.refresh_universes t)) mib.mind_params_ctxt in if not (List.mem kind (elim_sorts specif)) then raise (RecursionSchemeError (NotAllowedCaseAnalysis (false, Termops.new_sort_in_family kind, ind))); let ndepar = mip.mind_nrealargs_ctxt + 1 in (* Pas gnant car env ne sert pas typer mais juste renommer les Anonym *) (* mais pas trs joli ... (mais manque get_sort_of ce niveau) *) let env' = push_rel_context lnamespar env in let indf = make_ind_family(ind, Termops.extended_rel_list 0 lnamespar) in let constrs = get_constructors env indf in let rec add_branch env k = if k = Array.length mip.mind_consnames then let nbprod = k+1 in let indf' = lift_inductive_family nbprod indf in let arsign,_ = get_arity env indf' in let depind = build_dependent_inductive env indf' in let deparsign = (Anonymous,None,depind)::arsign in let ci = make_case_info env ind RegularStyle in let pbody = appvect (mkRel (ndepar + nbprod), if dep then Termops.extended_rel_vect 0 deparsign else Termops.extended_rel_vect 1 arsign) in let p = it_mkLambda_or_LetIn_name env' ((if dep then mkLambda_name env' else mkLambda) (Anonymous,depind,pbody)) arsign in it_mkLambda_or_LetIn_name env' (mkCase (ci, lift ndepar p, mkRel 1, Termops.rel_vect ndepar k)) deparsign else let cs = lift_constructor (k+1) constrs.(k) in let t = build_branch_type env dep (mkRel (k+1)) cs in mkLambda_string "f" t (add_branch (push_rel (Anonymous, None, t) env) (k+1)) in let typP = make_arity env' dep indf (Termops.new_sort_in_family kind) in it_mkLambda_or_LetIn_name env (mkLambda_string "P" typP (add_branch (push_rel (Anonymous,None,typP) env') 0)) lnamespar (* check if the type depends recursively on one of the inductive scheme *) (**********************************************************************) (* Building the recursive elimination *) (* Christine Paulin, 1996 *) (* * t is the type of the constructor co and recargs is the information on * the recursive calls. (It is assumed to be in form given by the user). * build the type of the corresponding branch of the recurrence principle * assuming f has this type, branch_rec gives also the term * [x1]..[xk](f xi (F xi) ...) to be put in the corresponding branch of * the case operation * FPvect gives for each inductive definition if we want an elimination * on it with which predicate and which recursive function. *) let type_rec_branch is_rec dep env sigma (vargs,depPvect,decP) tyi cs recargs = let make_prod = make_prod_dep dep in let nparams = List.length vargs in let process_pos env depK pk = let rec prec env i sign p = let p',largs = whd_betadeltaiota_nolet_stack env sigma p in match kind_of_term p' with | Prod (n,t,c) -> let d = (n,None,t) in make_prod env (n,t,prec (push_rel d env) (i+1) (d::sign) c) | LetIn (n,b,t,c) -> let d = (n,Some b,t) in mkLetIn (n,b,t,prec (push_rel d env) (i+1) (d::sign) c) | Ind (_,_) -> let realargs = list_skipn nparams largs in let base = applist (lift i pk,realargs) in if depK then Reduction.beta_appvect base [|applist (mkRel (i+1), Termops.extended_rel_list 0 sign)|] else base | _ -> assert false in prec env 0 [] in let rec process_constr env i c recargs nhyps li = if nhyps > 0 then match kind_of_term c with | Prod (n,t,c_0) -> let (optionpos,rest) = match recargs with | [] -> None,[] | ra::rest -> (match dest_recarg ra with | Mrec (_,j) when is_rec -> (depPvect.(j),rest) | Imbr _ -> Flags.if_warn msg_warning (str "Ignoring recursive call"); (None,rest) | _ -> (None, rest)) in (match optionpos with | None -> make_prod env (n,t, process_constr (push_rel (n,None,t) env) (i+1) c_0 rest (nhyps-1) (i::li)) | Some(dep',p) -> let nP = lift (i+1+decP) p in let env' = push_rel (n,None,t) env in let t_0 = process_pos env' dep' nP (lift 1 t) in make_prod_dep (dep or dep') env (n,t, mkArrow t_0 (process_constr (push_rel (Anonymous,None,t_0) env') (i+2) (lift 1 c_0) rest (nhyps-1) (i::li)))) | LetIn (n,b,t,c_0) -> mkLetIn (n,b,t, process_constr (push_rel (n,Some b,t) env) (i+1) c_0 recargs (nhyps-1) li) | _ -> assert false else if dep then let realargs = List.map (fun k -> mkRel (i-k)) (List.rev li) in let params = List.map (lift i) vargs in let co = applist (mkConstruct cs.cs_cstr,params@realargs) in Reduction.beta_appvect c [|co|] else c in let nhyps = List.length cs.cs_args in let nP = match depPvect.(tyi) with | Some(_,p) -> lift (nhyps+decP) p | _ -> assert false in let base = appvect (nP,cs.cs_concl_realargs) in let c = it_mkProd_or_LetIn base cs.cs_args in process_constr env 0 c recargs nhyps [] let make_rec_branch_arg env sigma (nparrec,fvect,decF) f cstr recargs = let process_pos env fk = let rec prec env i hyps p = let p',largs = whd_betadeltaiota_nolet_stack env sigma p in match kind_of_term p' with | Prod (n,t,c) -> let d = (n,None,t) in mkLambda_name env (n,t,prec (push_rel d env) (i+1) (d::hyps) c) | LetIn (n,b,t,c) -> let d = (n,Some b,t) in mkLetIn (n,b,t,prec (push_rel d env) (i+1) (d::hyps) c) | Ind _ -> let realargs = list_skipn nparrec largs and arg = appvect (mkRel (i+1), Termops.extended_rel_vect 0 hyps) in applist(lift i fk,realargs@[arg]) | _ -> assert false in prec env 0 [] in (* ici, cstrprods est la liste des produits du constructeur instanti *) let rec process_constr env i f = function | (n,None,t as d)::cprest, recarg::rest -> let optionpos = match dest_recarg recarg with | Norec -> None | Imbr _ -> None | Mrec (_,i) -> fvect.(i) in (match optionpos with | None -> mkLambda_name env (n,t,process_constr (push_rel d env) (i+1) (whd_beta Evd.empty (applist (lift 1 f, [(mkRel 1)]))) (cprest,rest)) | Some(_,f_0) -> let nF = lift (i+1+decF) f_0 in let env' = push_rel d env in let arg = process_pos env' nF (lift 1 t) in mkLambda_name env (n,t,process_constr env' (i+1) (whd_beta Evd.empty (applist (lift 1 f, [(mkRel 1); arg]))) (cprest,rest))) | (n,Some c,t as d)::cprest, rest -> mkLetIn (n,c,t, process_constr (push_rel d env) (i+1) (lift 1 f) (cprest,rest)) | [],[] -> f | _,[] | [],_ -> anomaly "process_constr" in process_constr env 0 f (List.rev cstr.cs_args, recargs) (* Cut a context ctx in 2 parts (ctx1,ctx2) with ctx1 containing k variables *) let context_chop k ctx = let rec chop_aux acc = function | (0, l2) -> (List.rev acc, l2) | (n, ((_,Some _,_ as h)::t)) -> chop_aux (h::acc) (n, t) | (n, (h::t)) -> chop_aux (h::acc) (pred n, t) | (_, []) -> failwith "context_chop" in chop_aux [] (k,ctx) (* Main function *) let mis_make_indrec env sigma listdepkind mib = let nparams = mib.mind_nparams in let nparrec = mib. mind_nparams_rec in let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in let nrec = List.length listdepkind in let depPvec = Array.create mib.mind_ntypes (None : (bool * constr) option) in let _ = let rec assign k = function | [] -> () | (indi,mibi,mipi,dep,_)::rest -> (Array.set depPvec (snd indi) (Some(dep,mkRel k)); assign (k-1) rest) in assign nrec listdepkind in let recargsvec = Array.map (fun mip -> mip.mind_recargs) mib.mind_packets in (* recarg information for non recursive parameters *) let rec recargparn l n = if n = 0 then l else recargparn (mk_norec::l) (n-1) in let recargpar = recargparn [] (nparams-nparrec) in let make_one_rec p = let makefix nbconstruct = let rec mrec i ln ltyp ldef = function | (indi,mibi,mipi,dep,_)::rest -> let tyi = snd indi in let nctyi = Array.length mipi.mind_consnames in (* nb constructeurs du type*) (* arity in the context of the fixpoint, i.e. P1..P_nrec f1..f_nbconstruct *) let args = Termops.extended_rel_list (nrec+nbconstruct) lnamesparrec in let indf = make_ind_family(indi,args) in let arsign,_ = get_arity env indf in let depind = build_dependent_inductive env indf in let deparsign = (Anonymous,None,depind)::arsign in let nonrecpar = rel_context_length lnonparrec in let larsign = rel_context_length deparsign in let ndepar = larsign - nonrecpar in let dect = larsign+nrec+nbconstruct in (* constructors in context of the Cases expr, i.e. P1..P_nrec f1..f_nbconstruct F_1..F_nrec a_1..a_nar x:I *) let args' = Termops.extended_rel_list (dect+nrec) lnamesparrec in let args'' = Termops.extended_rel_list ndepar lnonparrec in let indf' = make_ind_family(indi,args'@args'') in let branches = let constrs = get_constructors env indf' in let fi = Termops.rel_vect (dect-i-nctyi) nctyi in let vecfi = Array.map (fun f -> appvect (f, Termops.extended_rel_vect ndepar lnonparrec)) fi in array_map3 (make_rec_branch_arg env sigma (nparrec,depPvec,larsign)) vecfi constrs (dest_subterms recargsvec.(tyi)) in let j = (match depPvec.(tyi) with | Some (_,c) when isRel c -> destRel c | _ -> assert false) in (* Predicate in the context of the case *) let depind' = build_dependent_inductive env indf' in let arsign',_ = get_arity env indf' in let deparsign' = (Anonymous,None,depind')::arsign' in let pargs = let nrpar = Termops.extended_rel_list (2*ndepar) lnonparrec and nrar = if dep then Termops.extended_rel_list 0 deparsign' else Termops.extended_rel_list 1 arsign' in nrpar@nrar in (* body of i-th component of the mutual fixpoint *) let deftyi = let ci = make_case_info env indi RegularStyle in let concl = applist (mkRel (dect+j+ndepar),pargs) in let pred = it_mkLambda_or_LetIn_name env ((if dep then mkLambda_name env else mkLambda) (Anonymous,depind',concl)) arsign' in it_mkLambda_or_LetIn_name env (mkCase (ci, pred, mkRel 1, branches)) (Termops.lift_rel_context nrec deparsign) in (* type of i-th component of the mutual fixpoint *) let typtyi = let concl = let pargs = if dep then Termops.extended_rel_vect 0 deparsign else Termops.extended_rel_vect 1 arsign in appvect (mkRel (nbconstruct+ndepar+nonrecpar+j),pargs) in it_mkProd_or_LetIn_name env concl deparsign in mrec (i+nctyi) (rel_context_nhyps arsign ::ln) (typtyi::ltyp) (deftyi::ldef) rest | [] -> let fixn = Array.of_list (List.rev ln) in let fixtyi = Array.of_list (List.rev ltyp) in let fixdef = Array.of_list (List.rev ldef) in let names = Array.create nrec (Name(id_of_string "F")) in mkFix ((fixn,p),(names,fixtyi,fixdef)) in mrec 0 [] [] [] in let rec make_branch env i = function | (indi,mibi,mipi,dep,_)::rest -> let tyi = snd indi in let nconstr = Array.length mipi.mind_consnames in let rec onerec env j = if j = nconstr then make_branch env (i+j) rest else let recarg = (dest_subterms recargsvec.(tyi)).(j) in let recarg = recargpar@recarg in let vargs = Termops.extended_rel_list (nrec+i+j) lnamesparrec in let cs = get_constructor (indi,mibi,mipi,vargs) (j+1) in let p_0 = type_rec_branch true dep env sigma (vargs,depPvec,i+j) tyi cs recarg in mkLambda_string "f" p_0 (onerec (push_rel (Anonymous,None,p_0) env) (j+1)) in onerec env 0 | [] -> makefix i listdepkind in let rec put_arity env i = function | (indi,_,_,dep,kinds)::rest -> let indf = make_ind_family (indi, Termops.extended_rel_list i lnamesparrec) in let typP = make_arity env dep indf (Termops.new_sort_in_family kinds) in mkLambda_string "P" typP (put_arity (push_rel (Anonymous,None,typP) env) (i+1) rest) | [] -> make_branch env 0 listdepkind in (* Body on make_one_rec *) let (indi,mibi,mipi,dep,kind) = List.nth listdepkind p in if (mis_is_recursive_subset (List.map (fun (indi,_,_,_,_) -> snd indi) listdepkind) mipi.mind_recargs) then let env' = push_rel_context lnamesparrec env in it_mkLambda_or_LetIn_name env (put_arity env' 0 listdepkind) lnamesparrec else mis_make_case_com dep env sigma indi (mibi,mipi) kind in (* Body of mis_make_indrec *) list_tabulate make_one_rec nrec (**********************************************************************) (* This builds elimination predicate for Case tactic *) let build_case_analysis_scheme env sigma ity dep kind = let (mib,mip) = lookup_mind_specif env ity in mis_make_case_com dep env sigma ity (mib,mip) kind let build_case_analysis_scheme_default env sigma ity kind = let (mib,mip) = lookup_mind_specif env ity in let dep = inductive_sort_family mip <> InProp in mis_make_case_com dep env sigma ity (mib,mip) kind (**********************************************************************) (* [modify_sort_scheme s rec] replaces the sort of the scheme [rec] by [s] *) let change_sort_arity sort = let rec drec a = match kind_of_term a with | Cast (c,_,_) -> drec c | Prod (n,t,c) -> mkProd (n, t, drec c) | LetIn (n,b,t,c) -> mkLetIn (n,b, t, drec c) | Sort _ -> mkSort sort | _ -> assert false in drec (* [npar] is the number of expected arguments (then excluding letin's) *) let modify_sort_scheme sort = let rec drec npar elim = match kind_of_term elim with | Lambda (n,t,c) -> if npar = 0 then mkLambda (n, change_sort_arity sort t, c) else mkLambda (n, t, drec (npar-1) c) | LetIn (n,b,t,c) -> mkLetIn (n,b,t,drec npar c) | _ -> anomaly "modify_sort_scheme: wrong elimination type" in drec (* Change the sort in the type of an inductive definition, builds the corresponding eta-expanded term *) let weaken_sort_scheme sort npars term = let rec drec np elim = match kind_of_term elim with | Prod (n,t,c) -> if np = 0 then let t' = change_sort_arity sort t in mkProd (n, t', c), mkLambda (n, t', mkApp(term,Termops.rel_vect 0 (npars+1))) else let c',term' = drec (np-1) c in mkProd (n, t, c'), mkLambda (n, t, term') | LetIn (n,b,t,c) -> let c',term' = drec np c in mkLetIn (n,b,t,c'), mkLetIn (n,b,t,term') | _ -> anomaly "weaken_sort_scheme: wrong elimination type" in drec npars (**********************************************************************) (* Interface to build complex Scheme *) (* Check inductive types only occurs once (otherwise we obtain a meaning less scheme) *) let check_arities listdepkind = let _ = List.fold_left (fun ln ((_,ni as mind),mibi,mipi,dep,kind) -> let kelim = elim_sorts (mibi,mipi) in if not (List.exists ((=) kind) kelim) then raise (RecursionSchemeError (NotAllowedCaseAnalysis (true, Termops.new_sort_in_family kind,mind))) else if List.mem ni ln then raise (RecursionSchemeError (NotMutualInScheme (mind,mind))) else ni::ln) [] listdepkind in true let build_mutual_induction_scheme env sigma = function | (mind,dep,s)::lrecspec -> let (mib,mip) = Global.lookup_inductive mind in let (sp,tyi) = mind in let listdepkind = (mind,mib,mip,dep,s):: (List.map (function (mind',dep',s') -> let (sp',_) = mind' in if sp=sp' then let (mibi',mipi') = lookup_mind_specif env mind' in (mind',mibi',mipi',dep',s') else raise (RecursionSchemeError (NotMutualInScheme (mind,mind')))) lrecspec) in let _ = check_arities listdepkind in mis_make_indrec env sigma listdepkind mib | _ -> anomaly "build_induction_scheme expects a non empty list of inductive types" let build_induction_scheme env sigma ind dep kind = let (mib,mip) = lookup_mind_specif env ind in List.hd (mis_make_indrec env sigma [(ind,mib,mip,dep,kind)] mib) (*s Eliminations. *) let elimination_suffix = function | InProp -> "_ind" | InSet -> "_rec" | InType -> "_rect" let case_suffix = "_case" let make_elimination_ident id s = add_suffix id (elimination_suffix s) (* Look up function for the default elimination constant *) let lookup_eliminator ind_sp s = let kn,i = ind_sp in let mp,dp,l = repr_mind kn in let ind_id = (Global.lookup_mind kn).mind_packets.(i).mind_typename in let id = add_suffix ind_id (elimination_suffix s) in (* Try first to get an eliminator defined in the same section as the *) (* inductive type *) try let cst =Global.constant_of_delta_kn (make_kn mp dp (label_of_id id)) in let _ = Global.lookup_constant cst in mkConst cst with Not_found -> (* Then try to get a user-defined eliminator in some other places *) (* using short name (e.g. for "eq_rec") *) try constr_of_global (Nametab.locate (qualid_of_ident id)) with Not_found -> errorlabstrm "default_elim" (strbrk "Cannot find the elimination combinator " ++ pr_id id ++ strbrk ", the elimination of the inductive definition " ++ pr_global_env Idset.empty (IndRef ind_sp) ++ strbrk " on sort " ++ Termops.pr_sort_family s ++ strbrk " is probably not allowed.") coq-8.4pl2/pretyping/typeclasses_errors.mli0000640000175000001440000000272412010532755020313 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> 'a val unbound_method : env -> global_reference -> identifier located -> 'a val no_instance : env -> identifier located -> constr list -> 'a val unsatisfiable_constraints : env -> evar_map -> evar option -> 'a val mismatched_ctx_inst : env -> contexts -> constr_expr list -> rel_context -> 'a val unsatisfiable_exception : exn -> bool coq-8.4pl2/pretyping/cases.mli0000640000175000001440000000402112010532755015446 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a val error_wrong_numarg_constructor_loc : loc -> env -> constructor -> int -> 'a val error_wrong_numarg_inductive_loc : loc -> env -> inductive -> int -> 'a val error_bad_constructor_loc : loc -> constructor -> inductive -> 'a val error_bad_pattern_loc : loc -> constructor -> constr -> 'a val error_wrong_predicate_arity_loc : loc -> env -> constr -> constr -> constr -> 'a val error_needs_inversion : env -> constr -> types -> 'a (** {6 Compilation primitive. } *) module type S = sig val compile_cases : loc -> case_style -> (type_constraint -> env -> evar_map ref -> glob_constr -> unsafe_judgment) * evar_map ref -> type_constraint -> env -> glob_constr option * tomatch_tuples * cases_clauses -> unsafe_judgment end module Cases_F(C : Coercion.S) : S coq-8.4pl2/plugins/0000750000175000001440000000000012127276542013317 5ustar notinuserscoq-8.4pl2/plugins/plugins.itarget0000640000175000001440000000007011165421210016340 0ustar notinuserspluginsopt.otarget pluginsbyte.otarget pluginsvo.otargetcoq-8.4pl2/plugins/extraction/0000750000175000001440000000000012127276537015503 5ustar notinuserscoq-8.4pl2/plugins/extraction/ExtrOcamlNatInt.v0000640000175000001440000000645312063736511020707 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int [ "0" "Pervasives.succ" ] "(fun fO fS n -> if n=0 then fO () else fS (n-1))". (** Efficient (but uncertified) versions for usual [nat] functions *) Extract Constant plus => "(+)". Extract Constant pred => "fun n -> Pervasives.max 0 (n-1)". Extract Constant minus => "fun n m -> Pervasives.max 0 (n-m)". Extract Constant mult => "( * )". Extract Inlined Constant max => "Pervasives.max". Extract Inlined Constant min => "Pervasives.min". (*Extract Inlined Constant nat_beq => "(=)".*) Extract Inlined Constant EqNat.beq_nat => "(=)". Extract Inlined Constant EqNat.eq_nat_decide => "(=)". Extract Inlined Constant Peano_dec.eq_nat_dec => "(=)". Extract Constant Compare_dec.nat_compare => "fun n m -> if n=m then Eq else if n "(<=)". Extract Inlined Constant Compare_dec.le_lt_dec => "(<=)". Extract Constant Compare_dec.lt_eq_lt_dec => "fun n m -> if n>m then None else Some (n "fun n -> n mod 2 = 0". Extract Constant Div2.div2 => "fun n -> n/2". Extract Inductive Euclid.diveucl => "(int * int)" [ "" ]. Extract Constant Euclid.eucl_dev => "fun n m -> (m/n, m mod n)". Extract Constant Euclid.quotient => "fun n m -> m/n". Extract Constant Euclid.modulo => "fun n m -> m mod n". (* Definition test n m (H:m>0) := let (q,r,_,_) := eucl_dev m H n in nat_compare n (q*m+r). Recursive Extraction test fact. *) coq-8.4pl2/plugins/extraction/g_extraction.ml40000640000175000001440000000773512010532755020610 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ id ] | [ string(s) ] -> [ s ] END let pr_int_or_id _ _ _ = function | ArgInt i -> int i | ArgId id -> pr_id id ARGUMENT EXTEND int_or_id TYPED AS int_or_id PRINTED BY pr_int_or_id | [ preident(id) ] -> [ ArgId (id_of_string id) ] | [ integer(i) ] -> [ ArgInt i ] END let pr_language = function | Ocaml -> str "Ocaml" | Haskell -> str "Haskell" | Scheme -> str "Scheme" VERNAC ARGUMENT EXTEND language PRINTED BY pr_language | [ "Ocaml" ] -> [ Ocaml ] | [ "Haskell" ] -> [ Haskell ] | [ "Scheme" ] -> [ Scheme ] END (* Extraction commands *) VERNAC COMMAND EXTEND Extraction (* Extraction in the Coq toplevel *) | [ "Extraction" global(x) ] -> [ simple_extraction x ] | [ "Recursive" "Extraction" ne_global_list(l) ] -> [ full_extraction None l ] (* Monolithic extraction to a file *) | [ "Extraction" string(f) ne_global_list(l) ] -> [ full_extraction (Some f) l ] END VERNAC COMMAND EXTEND SeparateExtraction (* Same, with content splitted in several files *) | [ "Separate" "Extraction" ne_global_list(l) ] -> [ separate_extraction l ] END (* Modular extraction (one Coq library = one ML module) *) VERNAC COMMAND EXTEND ExtractionLibrary | [ "Extraction" "Library" ident(m) ] -> [ extraction_library false m ] END VERNAC COMMAND EXTEND RecursiveExtractionLibrary | [ "Recursive" "Extraction" "Library" ident(m) ] -> [ extraction_library true m ] END (* Target Language *) VERNAC COMMAND EXTEND ExtractionLanguage | [ "Extraction" "Language" language(l) ] -> [ extraction_language l ] END VERNAC COMMAND EXTEND ExtractionInline (* Custom inlining directives *) | [ "Extraction" "Inline" ne_global_list(l) ] -> [ extraction_inline true l ] END VERNAC COMMAND EXTEND ExtractionNoInline | [ "Extraction" "NoInline" ne_global_list(l) ] -> [ extraction_inline false l ] END VERNAC COMMAND EXTEND PrintExtractionInline | [ "Print" "Extraction" "Inline" ] -> [ print_extraction_inline () ] END VERNAC COMMAND EXTEND ResetExtractionInline | [ "Reset" "Extraction" "Inline" ] -> [ reset_extraction_inline () ] END VERNAC COMMAND EXTEND ExtractionImplicit (* Custom implicit arguments of some csts/inds/constructors *) | [ "Extraction" "Implicit" global(r) "[" int_or_id_list(l) "]" ] -> [ extraction_implicit r l ] END VERNAC COMMAND EXTEND ExtractionBlacklist (* Force Extraction to not use some filenames *) | [ "Extraction" "Blacklist" ne_ident_list(l) ] -> [ extraction_blacklist l ] END VERNAC COMMAND EXTEND PrintExtractionBlacklist | [ "Print" "Extraction" "Blacklist" ] -> [ print_extraction_blacklist () ] END VERNAC COMMAND EXTEND ResetExtractionBlacklist | [ "Reset" "Extraction" "Blacklist" ] -> [ reset_extraction_blacklist () ] END (* Overriding of a Coq object by an ML one *) VERNAC COMMAND EXTEND ExtractionConstant | [ "Extract" "Constant" global(x) string_list(idl) "=>" mlname(y) ] -> [ extract_constant_inline false x idl y ] END VERNAC COMMAND EXTEND ExtractionInlinedConstant | [ "Extract" "Inlined" "Constant" global(x) "=>" mlname(y) ] -> [ extract_constant_inline true x [] y ] END VERNAC COMMAND EXTEND ExtractionInductive | [ "Extract" "Inductive" global(x) "=>" mlname(id) "[" mlname_list(idl) "]" string_opt(o) ] -> [ extract_inductive x id idl o ] END coq-8.4pl2/plugins/extraction/modutil.ml0000640000175000001440000003272712043753271017516 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mp | MTwith(mt,_)-> msid_of_mt mt | _ -> anomaly "Extraction:the With operator isn't applied to a name" (*s Apply some functions upon all [ml_decl] and [ml_spec] found in a [ml_structure]. *) let se_iter do_decl do_spec do_mp = let rec mt_iter = function | MTident mp -> do_mp mp | MTfunsig (_,mt,mt') -> mt_iter mt; mt_iter mt' | MTwith (mt,ML_With_type(idl,l,t))-> let mp_mt = msid_of_mt mt in let l',idl' = list_sep_last idl in let mp_w = List.fold_left (fun mp l -> MPdot(mp,label_of_id l)) mp_mt idl' in let r = ConstRef (make_con mp_w empty_dirpath (label_of_id l')) in mt_iter mt; do_decl (Dtype(r,l,t)) | MTwith (mt,ML_With_module(idl,mp))-> let mp_mt = msid_of_mt mt in let mp_w = List.fold_left (fun mp l -> MPdot(mp,label_of_id l)) mp_mt idl in mt_iter mt; do_mp mp_w; do_mp mp | MTsig (_, sign) -> List.iter spec_iter sign and spec_iter = function | (_,Spec s) -> do_spec s | (_,Smodule mt) -> mt_iter mt | (_,Smodtype mt) -> mt_iter mt in let rec se_iter = function | (_,SEdecl d) -> do_decl d | (_,SEmodule m) -> me_iter m.ml_mod_expr; mt_iter m.ml_mod_type | (_,SEmodtype m) -> mt_iter m and me_iter = function | MEident mp -> do_mp mp | MEfunctor (_,mt,me) -> me_iter me; mt_iter mt | MEapply (me,me') -> me_iter me; me_iter me' | MEstruct (msid, sel) -> List.iter se_iter sel in se_iter let struct_iter do_decl do_spec do_mp s = List.iter (function (_,sel) -> List.iter (se_iter do_decl do_spec do_mp) sel) s (*s Apply some fonctions upon all references in [ml_type], [ml_ast], [ml_decl], [ml_spec] and [ml_structure]. *) type do_ref = global_reference -> unit let record_iter_references do_term = function | Record l -> List.iter (Option.iter do_term) l | _ -> () let type_iter_references do_type t = let rec iter = function | Tglob (r,l) -> do_type r; List.iter iter l | Tarr (a,b) -> iter a; iter b | _ -> () in iter t let patt_iter_references do_cons p = let rec iter = function | Pcons (r,l) -> do_cons r; List.iter iter l | Pusual r -> do_cons r | Ptuple l -> List.iter iter l | Prel _ | Pwild -> () in iter p let ast_iter_references do_term do_cons do_type a = let rec iter a = ast_iter iter a; match a with | MLglob r -> do_term r | MLcons (_,r,_) -> do_cons r | MLcase (ty,_,v) -> type_iter_references do_type ty; Array.iter (fun (_,p,_) -> patt_iter_references do_cons p) v | MLrel _ | MLlam _ | MLapp _ | MLletin _ | MLtuple _ | MLfix _ | MLexn _ | MLdummy | MLaxiom | MLmagic _ -> () in iter a let ind_iter_references do_term do_cons do_type kn ind = let type_iter = type_iter_references do_type in let cons_iter cp l = do_cons (ConstructRef cp); List.iter type_iter l in let packet_iter ip p = do_type (IndRef ip); if lang () = Ocaml then (match ind.ind_equiv with | Miniml.Equiv kne -> do_type (IndRef (mind_of_kn kne, snd ip)); | _ -> ()); Array.iteri (fun j -> cons_iter (ip,j+1)) p.ip_types in if lang () = Ocaml then record_iter_references do_term ind.ind_kind; Array.iteri (fun i -> packet_iter (kn,i)) ind.ind_packets let decl_iter_references do_term do_cons do_type = let type_iter = type_iter_references do_type and ast_iter = ast_iter_references do_term do_cons do_type in function | Dind (kn,ind) -> ind_iter_references do_term do_cons do_type kn ind | Dtype (r,_,t) -> do_type r; type_iter t | Dterm (r,a,t) -> do_term r; ast_iter a; type_iter t | Dfix(rv,c,t) -> Array.iter do_term rv; Array.iter ast_iter c; Array.iter type_iter t let spec_iter_references do_term do_cons do_type = function | Sind (kn,ind) -> ind_iter_references do_term do_cons do_type kn ind | Stype (r,_,ot) -> do_type r; Option.iter (type_iter_references do_type) ot | Sval (r,t) -> do_term r; type_iter_references do_type t (*s Searching occurrences of a particular term (no lifting done). *) exception Found let rec ast_search f a = if f a then raise Found else ast_iter (ast_search f) a let decl_ast_search f = function | Dterm (_,a,_) -> ast_search f a | Dfix (_,c,_) -> Array.iter (ast_search f) c | _ -> () let struct_ast_search f s = try struct_iter (decl_ast_search f) (fun _ -> ()) (fun _ -> ()) s; false with Found -> true let rec type_search f = function | Tarr (a,b) -> type_search f a; type_search f b | Tglob (r,l) -> List.iter (type_search f) l | u -> if f u then raise Found let decl_type_search f = function | Dind (_,{ind_packets=p}) -> Array.iter (fun {ip_types=v} -> Array.iter (List.iter (type_search f)) v) p | Dterm (_,_,u) -> type_search f u | Dfix (_,_,v) -> Array.iter (type_search f) v | Dtype (_,_,u) -> type_search f u let spec_type_search f = function | Sind (_,{ind_packets=p}) -> Array.iter (fun {ip_types=v} -> Array.iter (List.iter (type_search f)) v) p | Stype (_,_,ot) -> Option.iter (type_search f) ot | Sval (_,u) -> type_search f u let struct_type_search f s = try struct_iter (decl_type_search f) (spec_type_search f) (fun _ -> ()) s; false with Found -> true (*s Generating the signature. *) let rec msig_of_ms = function | [] -> [] | (l,SEdecl (Dind (kn,i))) :: ms -> (l,Spec (Sind (kn,i))) :: (msig_of_ms ms) | (l,SEdecl (Dterm (r,_,t))) :: ms -> (l,Spec (Sval (r,t))) :: (msig_of_ms ms) | (l,SEdecl (Dtype (r,v,t))) :: ms -> (l,Spec (Stype (r,v,Some t))) :: (msig_of_ms ms) | (l,SEdecl (Dfix (rv,_,tv))) :: ms -> let msig = ref (msig_of_ms ms) in for i = Array.length rv - 1 downto 0 do msig := (l,Spec (Sval (rv.(i),tv.(i))))::!msig done; !msig | (l,SEmodule m) :: ms -> (l,Smodule m.ml_mod_type) :: (msig_of_ms ms) | (l,SEmodtype m) :: ms -> (l,Smodtype m) :: (msig_of_ms ms) let signature_of_structure s = List.map (fun (mp,ms) -> mp,msig_of_ms ms) s (*s Searching one [ml_decl] in a [ml_structure] by its [global_reference] *) let is_modular = function | SEdecl _ -> false | SEmodule _ | SEmodtype _ -> true let rec search_structure l m = function | [] -> raise Not_found | (lab,d)::_ when lab=l && is_modular d = m -> d | _::fields -> search_structure l m fields let get_decl_in_structure r struc = try let base_mp,ll = labels_of_ref r in if not (at_toplevel base_mp) then error_not_visible r; let sel = List.assoc base_mp struc in let rec go ll sel = match ll with | [] -> assert false | l :: ll -> match search_structure l (ll<>[]) sel with | SEdecl d -> d | SEmodtype m -> assert false | SEmodule m -> match m.ml_mod_expr with | MEstruct (_,sel) -> go ll sel | _ -> error_not_visible r in go ll sel with Not_found -> anomaly "reference not found in extracted structure" (*s Optimization of a [ml_structure]. *) (* Some transformations of ML terms. [optimize_struct] simplify all beta redexes (when the argument does not occur, it is just thrown away; when it occurs exactly once it is substituted; otherwise a let-in redex is created for clarity) and iota redexes, plus some other optimizations. *) let dfix_to_mlfix rv av i = let rec make_subst n s = if n < 0 then s else make_subst (n-1) (Refmap'.add rv.(n) (n+1) s) in let s = make_subst (Array.length rv - 1) Refmap'.empty in let rec subst n t = match t with | MLglob ((ConstRef kn) as refe) -> (try MLrel (n + (Refmap'.find refe s)) with Not_found -> t) | _ -> ast_map_lift subst n t in let ids = Array.map (fun r -> id_of_label (label_of_r r)) rv in let c = Array.map (subst 0) av in MLfix(i, ids, c) (* [optim_se] applies the [normalize] function everywhere and does the inlining of code. The inlined functions are kept for the moment in order to preserve the global interface, later [depcheck_se] will get rid of them if possible *) let rec optim_se top to_appear s = function | [] -> [] | (l,SEdecl (Dterm (r,a,t))) :: lse -> let a = normalize (ast_glob_subst !s a) in let i = inline r a in if i then s := Refmap'.add r a !s; let d = match optimize_fix a with | MLfix (0, _, [|c|]) -> Dfix ([|r|], [|ast_subst (MLglob r) c|], [|t|]) | a -> Dterm (r, a, t) in (l,SEdecl d) :: (optim_se top to_appear s lse) | (l,SEdecl (Dfix (rv,av,tv))) :: lse -> let av = Array.map (fun a -> normalize (ast_glob_subst !s a)) av in (* This fake body ensures that no fixpoint will be auto-inlined. *) let fake_body = MLfix (0,[||],[||]) in for i = 0 to Array.length rv - 1 do if inline rv.(i) fake_body then s := Refmap'.add rv.(i) (dfix_to_mlfix rv av i) !s done; (l,SEdecl (Dfix (rv, av, tv))) :: (optim_se top to_appear s lse) | (l,SEmodule m) :: lse -> let m = { m with ml_mod_expr = optim_me to_appear s m.ml_mod_expr} in (l,SEmodule m) :: (optim_se top to_appear s lse) | se :: lse -> se :: (optim_se top to_appear s lse) and optim_me to_appear s = function | MEstruct (msid, lse) -> MEstruct (msid, optim_se false to_appear s lse) | MEident mp as me -> me | MEapply (me, me') -> MEapply (optim_me to_appear s me, optim_me to_appear s me') | MEfunctor (mbid,mt,me) -> MEfunctor (mbid,mt, optim_me to_appear s me) (* After these optimisations, some dependencies may not be needed anymore. For non-library extraction, we recompute a minimal set of dependencies for first-level definitions (no module pruning yet). *) exception NoDepCheck let base_r = function | ConstRef c as r -> r | IndRef (kn,_) -> IndRef (kn,0) | ConstructRef ((kn,_),_) -> IndRef (kn,0) | _ -> assert false let reset_needed, add_needed, add_needed_mp, found_needed, is_needed = let needed = ref Refset'.empty and needed_mps = ref MPset.empty in ((fun l -> needed := Refset'.empty; needed_mps := MPset.empty), (fun r -> needed := Refset'.add (base_r r) !needed), (fun mp -> needed_mps := MPset.add mp !needed_mps), (fun r -> needed := Refset'.remove (base_r r) !needed), (fun r -> let r = base_r r in Refset'.mem r !needed || MPset.mem (modpath_of_r r) !needed_mps)) let declared_refs = function | Dind (kn,_) -> [IndRef (kn,0)] | Dtype (r,_,_) -> [r] | Dterm (r,_,_) -> [r] | Dfix (rv,_,_) -> Array.to_list rv (* Computes the dependencies of a declaration, except in case of custom extraction. *) let compute_deps_decl = function | Dind (kn,ind) -> (* Todo Later : avoid dependencies when Extract Inductive *) ind_iter_references add_needed add_needed add_needed kn ind | Dtype (r,ids,t) -> if not (is_custom r) then type_iter_references add_needed t | Dterm (r,u,t) -> type_iter_references add_needed t; if not (is_custom r) then ast_iter_references add_needed add_needed add_needed u | Dfix _ as d -> decl_iter_references add_needed add_needed add_needed d let compute_deps_spec = function | Sind (kn,ind) -> (* Todo Later : avoid dependencies when Extract Inductive *) ind_iter_references add_needed add_needed add_needed kn ind | Stype (r,ids,t) -> if not (is_custom r) then Option.iter (type_iter_references add_needed) t | Sval (r,t) -> type_iter_references add_needed t let rec depcheck_se = function | [] -> [] | ((l,SEdecl d) as t) :: se -> let se' = depcheck_se se in let refs = declared_refs d in let refs' = List.filter is_needed refs in if refs' = [] then (List.iter remove_info_axiom refs; List.iter remove_opaque refs; se') else begin List.iter found_needed refs'; (* Hack to avoid extracting unused part of a Dfix *) match d with | Dfix (rv,trms,tys) when (List.for_all is_custom refs') -> let trms' = Array.create (Array.length rv) (MLexn "UNUSED") in ((l,SEdecl (Dfix (rv,trms',tys))) :: se') | _ -> (compute_deps_decl d; t::se') end | t :: se -> let se' = depcheck_se se in se_iter compute_deps_decl compute_deps_spec add_needed_mp t; t :: se' let rec depcheck_struct = function | [] -> [] | (mp,lse)::struc -> let struc' = depcheck_struct struc in let lse' = depcheck_se lse in if lse' = [] then struc' else (mp,lse')::struc' let check_implicits = function | MLexn s -> if String.length s > 8 && (s.[0] = 'U' || s.[0] = 'I') then begin if String.sub s 0 7 = "UNBOUND" then assert false; if String.sub s 0 8 = "IMPLICIT" then error_non_implicit (String.sub s 9 (String.length s - 9)); end; false | _ -> false let optimize_struct to_appear struc = let subst = ref (Refmap'.empty : ml_ast Refmap'.t) in let opt_struc = List.map (fun (mp,lse) -> (mp, optim_se true (fst to_appear) subst lse)) struc in ignore (struct_ast_search check_implicits opt_struc); if library () then List.filter (fun (_,lse) -> lse<>[]) opt_struc else begin reset_needed (); List.iter add_needed (fst to_appear); List.iter add_needed_mp (snd to_appear); depcheck_struct opt_struc end coq-8.4pl2/plugins/extraction/ExtrOcamlBigIntConv.v0000640000175000001440000000734612010532755021512 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bigint. Parameter bigint_opp : bigint -> bigint. Parameter bigint_twice : bigint -> bigint. Extract Inlined Constant bigint => "Big.big_int". Extract Inlined Constant bigint_zero => "Big.zero". Extract Inlined Constant bigint_succ => "Big.succ". Extract Inlined Constant bigint_opp => "Big.opp". Extract Inlined Constant bigint_twice => "Big.double". Definition bigint_of_nat : nat -> bigint := (fix loop acc n := match n with | O => acc | S n => loop (bigint_succ acc) n end) bigint_zero. Fixpoint bigint_of_pos p := match p with | xH => bigint_succ bigint_zero | xO p => bigint_twice (bigint_of_pos p) | xI p => bigint_succ (bigint_twice (bigint_of_pos p)) end. Fixpoint bigint_of_z z := match z with | Z0 => bigint_zero | Zpos p => bigint_of_pos p | Zneg p => bigint_opp (bigint_of_pos p) end. Fixpoint bigint_of_n n := match n with | N0 => bigint_zero | Npos p => bigint_of_pos p end. (** NB: as for [pred] or [minus], [nat_of_bigint], [n_of_bigint] and [pos_of_bigint] are total and return zero (resp. one) for non-positive inputs. *) Parameter bigint_natlike_rec : forall A, A -> (A->A) -> bigint -> A. Extract Constant bigint_natlike_rec => "Big.nat_rec". Definition nat_of_bigint : bigint -> nat := bigint_natlike_rec _ O S. Parameter bigint_poslike_rec : forall A, (A->A) -> (A->A) -> A -> bigint -> A. Extract Constant bigint_poslike_rec => "Big.positive_rec". Definition pos_of_bigint : bigint -> positive := bigint_poslike_rec _ xI xO xH. Parameter bigint_zlike_case : forall A, A -> (bigint->A) -> (bigint->A) -> bigint -> A. Extract Constant bigint_zlike_case => "Big.z_rec". Definition z_of_bigint : bigint -> Z := bigint_zlike_case _ Z0 (fun i => Zpos (pos_of_bigint i)) (fun i => Zneg (pos_of_bigint i)). Definition n_of_bigint : bigint -> N := bigint_zlike_case _ N0 (fun i => Npos (pos_of_bigint i)) (fun _ => N0). (* Tests: Definition small := 1234%nat. Definition big := 12345678901234567890%positive. Definition nat_0 := nat_of_bigint (bigint_of_nat 0). Definition nat_1 := nat_of_bigint (bigint_of_nat small). Definition pos_1 := pos_of_bigint (bigint_of_pos 1). Definition pos_2 := pos_of_bigint (bigint_of_pos big). Definition n_0 := n_of_bigint (bigint_of_n 0). Definition n_1 := n_of_bigint (bigint_of_n 1). Definition n_2 := n_of_bigint (bigint_of_n (Npos big)). Definition z_0 := z_of_bigint (bigint_of_z 0). Definition z_1 := z_of_bigint (bigint_of_z 1). Definition z_2 := z_of_bigint (bigint_of_z (Zpos big)). Definition z_m1 := z_of_bigint (bigint_of_z (-1)). Definition z_m2 := z_of_bigint (bigint_of_z (Zneg big)). Definition test := (nat_0, nat_1, pos_1, pos_2, n_0, n_1, n_2, z_0, z_1, z_2, z_m1, z_m2). Definition check := (O, small, xH, big, 0%N, 1%N, Npos big, 0%Z, 1%Z, Zpos big, (-1)%Z, Zneg big). Extraction "/tmp/test.ml" check test. ... and we check that test=check *)coq-8.4pl2/plugins/extraction/ExtrOcamlString.v0000640000175000001440000000267212010532755020753 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* char [ "(* If this appears, you're using Ascii internals. Please don't *) (fun (b0,b1,b2,b3,b4,b5,b6,b7) -> let f b i = if b then 1 lsl i else 0 in Char.chr (f b0 0 + f b1 1 + f b2 2 + f b3 3 + f b4 4 + f b5 5 + f b6 6 + f b7 7))" ] "(* If this appears, you're using Ascii internals. Please don't *) (fun f c -> let n = Char.code c in let h i = (n land (1 lsl i)) <> 0 in f (h 0) (h 1) (h 2) (h 3) (h 4) (h 5) (h 6) (h 7))". Extract Constant zero => "'\000'". Extract Constant one => "'\001'". Extract Constant shift => "fun b c -> Char.chr (((Char.code c) lsl 1) land 255 + if b then 1 else 0)". Extract Inlined Constant ascii_dec => "(=)". Extract Inductive string => "char list" [ "[]" "(::)" ]. (* Definition test := "ceci est un test"%string. Recursive Extraction test Ascii.zero Ascii.one. *) coq-8.4pl2/plugins/extraction/ExtrOcamlZBigInt.v0000640000175000001440000000662312010532755021013 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "Big.big_int" [ "Big.doubleplusone" "Big.double" "Big.one" ] "Big.positive_case". Extract Inductive Z => "Big.big_int" [ "Big.zero" "" "Big.opp" ] "Big.z_case". Extract Inductive N => "Big.big_int" [ "Big.zero" "" ] "Big.n_case". (** Nota: the "" above is used as an identity function "(fun p->p)" *) (** Efficient (but uncertified) versions for usual functions *) Extract Constant Pos.add => "Big.add". Extract Constant Pos.succ => "Big.succ". Extract Constant Pos.pred => "fun n -> Big.max Big.one (Big.pred n)". Extract Constant Pos.sub => "fun n m -> Big.max Big.one (Big.sub n m)". Extract Constant Pos.mul => "Big.mult". Extract Constant Pos.min => "Big.min". Extract Constant Pos.max => "Big.max". Extract Constant Pos.compare => "fun x y -> Big.compare_case Eq Lt Gt x y". Extract Constant Pos.compare_cont => "fun x y c -> Big.compare_case c Lt Gt x y". Extract Constant N.add => "Big.add". Extract Constant N.succ => "Big.succ". Extract Constant N.pred => "fun n -> Big.max Big.zero (Big.pred n)". Extract Constant N.sub => "fun n m -> Big.max Big.zero (Big.sub n m)". Extract Constant N.mul => "Big.mult". Extract Constant N.min => "Big.min". Extract Constant N.max => "Big.max". Extract Constant N.div => "fun a b -> if Big.eq b Big.zero then Big.zero else Big.div a b". Extract Constant N.modulo => "fun a b -> if Big.eq b Big.zero then Big.zero else Big.modulo a b". Extract Constant N.compare => "Big.compare_case Eq Lt Gt". Extract Constant Z.add => "Big.add". Extract Constant Z.succ => "Big.succ". Extract Constant Z.pred => "Big.pred". Extract Constant Z.sub => "Big.sub". Extract Constant Z.mul => "Big.mult". Extract Constant Z.opp => "Big.opp". Extract Constant Z.abs => "Big.abs". Extract Constant Z.min => "Big.min". Extract Constant Z.max => "Big.max". Extract Constant Z.compare => "Big.compare_case Eq Lt Gt". Extract Constant Z.of_N => "fun p -> p". Extract Constant Z.abs_N => "Big.abs". (** Z.div and Z.modulo are quite complex to define in terms of (/) and (mod). For the moment we don't even try *) (** Test: Require Import ZArith NArith. Extraction "/tmp/test.ml" Pos.add Pos.pred Pos.sub Pos.mul Pos.compare N.pred N.sub N.div N.modulo N.compare Z.add Z.mul Z.compare Z.of_N Z.abs_N Z.div Z.modulo. *) coq-8.4pl2/plugins/extraction/haskell.ml0000640000175000001440000002734612121620060017447 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Idset.add (id_of_string s)) [ "case"; "class"; "data"; "default"; "deriving"; "do"; "else"; "if"; "import"; "in"; "infix"; "infixl"; "infixr"; "instance"; "let"; "module"; "newtype"; "of"; "then"; "type"; "where"; "_"; "__"; "as"; "qualified"; "hiding" ; "unit" ; "unsafeCoerce" ] Idset.empty let preamble mod_name used_modules usf = let pp_import mp = str ("import qualified "^ string_of_modfile mp ^"\n") in (if not usf.magic then mt () else str "{-# OPTIONS_GHC -cpp -fglasgow-exts #-}\n" ++ str "{- For Hugs, use the option -F\"cpp -P -traditional\" -}\n\n") ++ str "module " ++ pr_upper_id mod_name ++ str " where" ++ fnl2 () ++ str "import qualified Prelude" ++ fnl () ++ prlist pp_import used_modules ++ fnl () ++ (if used_modules = [] then mt () else fnl ()) ++ (if not usf.magic then mt () else str "\ \nunsafeCoerce :: a -> b\ \n#ifdef __GLASGOW_HASKELL__\ \nimport qualified GHC.Base\ \nunsafeCoerce = GHC.Base.unsafeCoerce#\ \n#else\ \n-- HUGS\ \nimport qualified IOExts\ \nunsafeCoerce = IOExts.unsafeCoerce\ \n#endif" ++ fnl2 ()) ++ (if not usf.mldummy then mt () else str "__ :: any" ++ fnl () ++ str "__ = Prelude.error \"Logical or arity value used\"" ++ fnl2 ()) let pp_abst = function | [] -> (mt ()) | l -> (str "\\" ++ prlist_with_sep (fun () -> (str " ")) pr_id l ++ str " ->" ++ spc ()) (*s The pretty-printer for haskell syntax *) let pp_global k r = if is_inline_custom r then str (find_custom r) else str (Common.pp_global k r) (*s Pretty-printing of types. [par] is a boolean indicating whether parentheses are needed or not. *) let kn_sig = let specif = MPfile (dirpath_of_string "Coq.Init.Specif") in make_mind specif empty_dirpath (mk_label "sig") let rec pp_type par vl t = let rec pp_rec par = function | Tmeta _ | Tvar' _ -> assert false | Tvar i -> (try pr_id (List.nth vl (pred i)) with e when Errors.noncritical e -> (str "a" ++ int i)) | Tglob (r,[]) -> pp_global Type r | Tglob (IndRef(kn,0),l) when not (keep_singleton ()) && kn = mk_ind "Coq.Init.Specif" "sig" -> pp_type true vl (List.hd l) | Tglob (r,l) -> pp_par par (pp_global Type r ++ spc () ++ prlist_with_sep spc (pp_type true vl) l) | Tarr (t1,t2) -> pp_par par (pp_rec true t1 ++ spc () ++ str "->" ++ spc () ++ pp_rec false t2) | Tdummy _ -> str "()" | Tunknown -> str "()" | Taxiom -> str "() -- AXIOM TO BE REALIZED\n" in hov 0 (pp_rec par t) (*s Pretty-printing of expressions. [par] indicates whether parentheses are needed or not. [env] is the list of names for the de Bruijn variables. [args] is the list of collected arguments (already pretty-printed). *) let expr_needs_par = function | MLlam _ -> true | MLcase _ -> false (* now that we use the case ... of { ... } syntax *) | _ -> false let rec pp_expr par env args = let apply st = pp_apply st par args and apply2 st = pp_apply2 st par args in function | MLrel n -> let id = get_db_name n env in apply (pr_id id) | MLapp (f,args') -> let stl = List.map (pp_expr true env []) args' in pp_expr par env (stl @ args) f | MLlam _ as a -> let fl,a' = collect_lams a in let fl,env' = push_vars (List.map id_of_mlid fl) env in let st = (pp_abst (List.rev fl) ++ pp_expr false env' [] a') in apply2 st | MLletin (id,a1,a2) -> let i,env' = push_vars [id_of_mlid id] env in let pp_id = pr_id (List.hd i) and pp_a1 = pp_expr false env [] a1 and pp_a2 = pp_expr (not par && expr_needs_par a2) env' [] a2 in let pp_def = str "let {" ++ cut () ++ hov 1 (pp_id ++ str " = " ++ pp_a1 ++ str "}") in apply2 (hv 0 (hv 0 (hv 1 pp_def ++ spc () ++ str "in") ++ spc () ++ hov 0 pp_a2)) | MLglob r -> apply (pp_global Term r) | MLcons (_,r,a) as c -> assert (args=[]); begin match a with | _ when is_native_char c -> pp_native_char c | [] -> pp_global Cons r | [a] -> pp_par par (pp_global Cons r ++ spc () ++ pp_expr true env [] a) | _ -> pp_par par (pp_global Cons r ++ spc () ++ prlist_with_sep spc (pp_expr true env []) a) end | MLtuple l -> assert (args=[]); pp_boxed_tuple (pp_expr true env []) l | MLcase (_,t, pv) when is_custom_match pv -> if not (is_regular_match pv) then error "Cannot mix yet user-given match and general patterns."; let mkfun (ids,_,e) = if ids <> [] then named_lams (List.rev ids) e else dummy_lams (ast_lift 1 e) 1 in let pp_branch tr = pp_expr true env [] (mkfun tr) ++ fnl () in let inner = str (find_custom_match pv) ++ fnl () ++ prvect pp_branch pv ++ pp_expr true env [] t in apply2 (hov 2 inner) | MLcase (typ,t,pv) -> apply2 (v 0 (str "case " ++ pp_expr false env [] t ++ str " of {" ++ fnl () ++ pp_pat env pv)) | MLfix (i,ids,defs) -> let ids',env' = push_vars (List.rev (Array.to_list ids)) env in pp_fix par env' i (Array.of_list (List.rev ids'),defs) args | MLexn s -> (* An [MLexn] may be applied, but I don't really care. *) pp_par par (str "Prelude.error" ++ spc () ++ qs s) | MLdummy -> str "__" (* An [MLdummy] may be applied, but I don't really care. *) | MLmagic a -> pp_apply (str "unsafeCoerce") par (pp_expr true env [] a :: args) | MLaxiom -> pp_par par (str "Prelude.error \"AXIOM TO BE REALIZED\"") and pp_cons_pat par r ppl = pp_par par (pp_global Cons r ++ space_if (ppl<>[]) ++ prlist_with_sep spc identity ppl) and pp_gen_pat par ids env = function | Pcons (r,l) -> pp_cons_pat par r (List.map (pp_gen_pat true ids env) l) | Pusual r -> pp_cons_pat par r (List.map pr_id ids) | Ptuple l -> pp_boxed_tuple (pp_gen_pat false ids env) l | Pwild -> str "_" | Prel n -> pr_id (get_db_name n env) and pp_one_pat env (ids,p,t) = let ids',env' = push_vars (List.rev_map id_of_mlid ids) env in hov 2 (str " " ++ pp_gen_pat false (List.rev ids') env' p ++ str " ->" ++ spc () ++ pp_expr (expr_needs_par t) env' [] t) and pp_pat env pv = prvecti (fun i x -> pp_one_pat env pv.(i) ++ if i = Array.length pv - 1 then str "}" else (str ";" ++ fnl ())) pv (*s names of the functions ([ids]) are already pushed in [env], and passed here just for convenience. *) and pp_fix par env i (ids,bl) args = pp_par par (v 0 (v 1 (str "let {" ++ fnl () ++ prvect_with_sep (fun () -> str ";" ++ fnl ()) (fun (fi,ti) -> pp_function env (pr_id fi) ti) (array_map2 (fun a b -> a,b) ids bl) ++ str "}") ++ fnl () ++ str "in " ++ pp_apply (pr_id ids.(i)) false args)) and pp_function env f t = let bl,t' = collect_lams t in let bl,env' = push_vars (List.map id_of_mlid bl) env in (f ++ pr_binding (List.rev bl) ++ str " =" ++ fnl () ++ str " " ++ hov 2 (pp_expr false env' [] t')) (*s Pretty-printing of inductive types declaration. *) let pp_comment s = str "-- " ++ s ++ fnl () let pp_logical_ind packet = pp_comment (pr_id packet.ip_typename ++ str " : logical inductive") ++ pp_comment (str "with constructors : " ++ prvect_with_sep spc pr_id packet.ip_consnames) let pp_singleton kn packet = let l = rename_tvars keywords packet.ip_vars in let l' = List.rev l in hov 2 (str "type " ++ pp_global Type (IndRef (kn,0)) ++ spc () ++ prlist_with_sep spc pr_id l ++ (if l <> [] then str " " else mt ()) ++ str "=" ++ spc () ++ pp_type false l' (List.hd packet.ip_types.(0)) ++ fnl () ++ pp_comment (str "singleton inductive, whose constructor was " ++ pr_id packet.ip_consnames.(0))) let pp_one_ind ip pl cv = let pl = rename_tvars keywords pl in let pp_constructor (r,l) = (pp_global Cons r ++ match l with | [] -> (mt ()) | _ -> (str " " ++ prlist_with_sep (fun () -> (str " ")) (pp_type true pl) l)) in str (if Array.length cv = 0 then "type " else "data ") ++ pp_global Type (IndRef ip) ++ prlist_strict (fun id -> str " " ++ pr_lower_id id) pl ++ str " =" ++ if Array.length cv = 0 then str " () -- empty inductive" else (fnl () ++ str " " ++ v 0 (str " " ++ prvect_with_sep (fun () -> fnl () ++ str "| ") pp_constructor (Array.mapi (fun i c -> ConstructRef (ip,i+1),c) cv))) let rec pp_ind first kn i ind = if i >= Array.length ind.ind_packets then if first then mt () else fnl () else let ip = (kn,i) in let p = ind.ind_packets.(i) in if is_custom (IndRef (kn,i)) then pp_ind first kn (i+1) ind else if p.ip_logical then pp_logical_ind p ++ pp_ind first kn (i+1) ind else pp_one_ind ip p.ip_vars p.ip_types ++ fnl () ++ pp_ind false kn (i+1) ind (*s Pretty-printing of a declaration. *) let pp_decl = function | Dind (kn,i) when i.ind_kind = Singleton -> pp_singleton kn i.ind_packets.(0) ++ fnl () | Dind (kn,i) -> hov 0 (pp_ind true kn 0 i) | Dtype (r, l, t) -> if is_inline_custom r then mt () else let l = rename_tvars keywords l in let st = try let ids,s = find_type_custom r in prlist (fun id -> str (id^" ")) ids ++ str "=" ++ spc () ++ str s with Not_found -> prlist (fun id -> pr_id id ++ str " ") l ++ if t = Taxiom then str "= () -- AXIOM TO BE REALIZED\n" else str "=" ++ spc () ++ pp_type false l t in hov 2 (str "type " ++ pp_global Type r ++ spc () ++ st) ++ fnl2 () | Dfix (rv, defs, typs) -> let names = Array.map (fun r -> if is_inline_custom r then mt () else pp_global Term r) rv in prvecti (fun i r -> let void = is_inline_custom r || (not (is_custom r) && defs.(i) = MLexn "UNUSED") in if void then mt () else names.(i) ++ str " :: " ++ pp_type false [] typs.(i) ++ fnl () ++ (if is_custom r then (names.(i) ++ str " = " ++ str (find_custom r)) else (pp_function (empty_env ()) names.(i) defs.(i))) ++ fnl2 ()) rv | Dterm (r, a, t) -> if is_inline_custom r then mt () else let e = pp_global Term r in e ++ str " :: " ++ pp_type false [] t ++ fnl () ++ if is_custom r then hov 0 (e ++ str " = " ++ str (find_custom r) ++ fnl2 ()) else hov 0 (pp_function (empty_env ()) e a ++ fnl2 ()) let rec pp_structure_elem = function | (l,SEdecl d) -> pp_decl d | (l,SEmodule m) -> pp_module_expr m.ml_mod_expr | (l,SEmodtype m) -> mt () (* for the moment we simply discard module type *) and pp_module_expr = function | MEstruct (mp,sel) -> prlist_strict pp_structure_elem sel | MEfunctor _ -> mt () (* for the moment we simply discard unapplied functors *) | MEident _ | MEapply _ -> assert false (* should be expansed in extract_env *) let pp_struct = let pp_sel (mp,sel) = push_visible mp []; let p = prlist_strict pp_structure_elem sel in pop_visible (); p in prlist_strict pp_sel let haskell_descr = { keywords = keywords; file_suffix = ".hs"; preamble = preamble; pp_struct = pp_struct; sig_suffix = None; sig_preamble = (fun _ _ _ -> mt ()); pp_sig = (fun _ -> mt ()); pp_decl = pp_decl; } coq-8.4pl2/plugins/extraction/extraction_plugin.mllib0000640000175000001440000000015311161000644022237 0ustar notinusersTable Mlutil Modutil Extraction Common Ocaml Haskell Scheme Extract_env G_extraction Extraction_plugin_mod coq-8.4pl2/plugins/extraction/modutil.mli0000640000175000001440000000310412010532755017646 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool) -> ml_structure -> bool val struct_type_search : (ml_type -> bool) -> ml_structure -> bool type do_ref = global_reference -> unit val decl_iter_references : do_ref -> do_ref -> do_ref -> ml_decl -> unit val spec_iter_references : do_ref -> do_ref -> do_ref -> ml_spec -> unit val signature_of_structure : ml_structure -> ml_signature val msid_of_mt : ml_module_type -> module_path val get_decl_in_structure : global_reference -> ml_structure -> ml_decl (* Some transformations of ML terms. [optimize_struct] simplify all beta redexes (when the argument does not occur, it is just thrown away; when it occurs exactly once it is substituted; otherwise a let-in redex is created for clarity) and iota redexes, plus some other optimizations. The first argument is the list of objects we want to appear. *) val optimize_struct : global_reference list * module_path list -> ml_structure -> ml_structure coq-8.4pl2/plugins/extraction/table.ml0000640000175000001440000006610312121627214017115 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* kn = kn' | ConstRef _ -> false | VarRef _ -> assert false let repr_of_r = function | ConstRef kn -> repr_con kn | IndRef (kn,_) | ConstructRef ((kn,_),_) -> repr_mind kn | VarRef _ -> assert false let modpath_of_r r = let mp,_,_ = repr_of_r r in mp let label_of_r r = let _,_,l = repr_of_r r in l let rec base_mp = function | MPdot (mp,l) -> base_mp mp | mp -> mp let is_modfile = function | MPfile _ -> true | _ -> false let raw_string_of_modfile = function | MPfile f -> String.capitalize (string_of_id (List.hd (repr_dirpath f))) | _ -> assert false let current_toplevel () = fst (Lib.current_prefix ()) let is_toplevel mp = mp = initial_path || mp = current_toplevel () let at_toplevel mp = is_modfile mp || is_toplevel mp let rec mp_length mp = let mp0 = current_toplevel () in let rec len = function | mp when mp = mp0 -> 1 | MPdot (mp,_) -> 1 + len mp | _ -> 1 in len mp let visible_con kn = at_toplevel (base_mp (con_modpath kn)) let rec prefixes_mp mp = match mp with | MPdot (mp',_) -> MPset.add mp (prefixes_mp mp') | _ -> MPset.singleton mp let rec get_nth_label_mp n = function | MPdot (mp,l) -> if n=1 then l else get_nth_label_mp (n-1) mp | _ -> failwith "get_nth_label: not enough MPdot" let common_prefix_from_list mp0 mpl = let prefixes = prefixes_mp mp0 in let rec f = function | [] -> None | mp :: l -> if MPset.mem mp prefixes then Some mp else f l in f mpl let rec parse_labels2 ll mp1 = function | mp when mp1=mp -> mp,ll | MPdot (mp,l) -> parse_labels2 (l::ll) mp1 mp | mp -> mp,ll let labels_of_ref r = let mp_top = current_toplevel () in let mp,_,l = repr_of_r r in parse_labels2 [l] mp_top mp (*S The main tables: constants, inductives, records, ... *) (* Theses tables are not registered within coq save/undo mechanism since we reset their contents at each run of Extraction *) (*s Constants tables. *) let terms = ref (Cmap_env.empty : ml_decl Cmap_env.t) let init_terms () = terms := Cmap_env.empty let add_term kn d = terms := Cmap_env.add kn d !terms let lookup_term kn = Cmap_env.find kn !terms let types = ref (Cmap_env.empty : ml_schema Cmap_env.t) let init_types () = types := Cmap_env.empty let add_type kn s = types := Cmap_env.add kn s !types let lookup_type kn = Cmap_env.find kn !types (*s Inductives table. *) let inductives = ref (Mindmap_env.empty : (mutual_inductive_body * ml_ind) Mindmap_env.t) let init_inductives () = inductives := Mindmap_env.empty let add_ind kn mib ml_ind = inductives := Mindmap_env.add kn (mib,ml_ind) !inductives let lookup_ind kn = Mindmap_env.find kn !inductives let inductive_kinds = ref (Mindmap_env.empty : inductive_kind Mindmap_env.t) let init_inductive_kinds () = inductive_kinds := Mindmap_env.empty let add_inductive_kind kn k = inductive_kinds := Mindmap_env.add kn k !inductive_kinds let is_coinductive r = let kn = match r with | ConstructRef ((kn,_),_) -> kn | IndRef (kn,_) -> kn | _ -> assert false in try Mindmap_env.find kn !inductive_kinds = Coinductive with Not_found -> false let is_coinductive_type = function | Tglob (r,_) -> is_coinductive r | _ -> false let get_record_fields r = let kn = match r with | ConstructRef ((kn,_),_) -> kn | IndRef (kn,_) -> kn | _ -> assert false in try match Mindmap_env.find kn !inductive_kinds with | Record f -> f | _ -> [] with Not_found -> [] let record_fields_of_type = function | Tglob (r,_) -> get_record_fields r | _ -> [] (*s Recursors table. *) (* NB: here we can use the equivalence between canonical and user constant names : Cset is fine, no need for [Cset_env] *) let recursors = ref Cset.empty let init_recursors () = recursors := Cset.empty let add_recursors env kn = let mk_con id = make_con_equiv (modpath (user_mind kn)) (modpath (canonical_mind kn)) empty_dirpath (label_of_id id) in let mib = Environ.lookup_mind kn env in Array.iter (fun mip -> let id = mip.mind_typename in let c_rec = mk_con (Nameops.add_suffix id "_rec") and c_rect = mk_con (Nameops.add_suffix id "_rect") in recursors := Cset.add c_rec (Cset.add c_rect !recursors)) mib.mind_packets let is_recursor = function | ConstRef kn -> Cset.mem kn !recursors | _ -> false (*s Record tables. *) (* NB: here, working modulo name equivalence is ok *) let projs = ref (Refmap.empty : int Refmap.t) let init_projs () = projs := Refmap.empty let add_projection n kn = projs := Refmap.add (ConstRef kn) n !projs let is_projection r = Refmap.mem r !projs let projection_arity r = Refmap.find r !projs (*s Table of used axioms *) let info_axioms = ref Refset'.empty let log_axioms = ref Refset'.empty let init_axioms () = info_axioms := Refset'.empty; log_axioms := Refset'.empty let add_info_axiom r = info_axioms := Refset'.add r !info_axioms let remove_info_axiom r = info_axioms := Refset'.remove r !info_axioms let add_log_axiom r = log_axioms := Refset'.add r !log_axioms let opaques = ref Refset'.empty let init_opaques () = opaques := Refset'.empty let add_opaque r = opaques := Refset'.add r !opaques let remove_opaque r = opaques := Refset'.remove r !opaques (*s Extraction modes: modular or monolithic, library or minimal ? Nota: - Recursive Extraction : monolithic, minimal - Separate Extraction : modular, minimal - Extraction Library : modular, library *) let modular_ref = ref false let library_ref = ref false let set_modular b = modular_ref := b let modular () = !modular_ref let set_library b = library_ref := b let library () = !library_ref (*s Printing. *) (* The following functions work even on objects not in [Global.env ()]. Warning: for inductive objects, this only works if an [extract_inductive] have been done earlier, otherwise we can only ask the Nametab about currently visible objects. *) let safe_basename_of_global r = let last_chance r = try Nametab.basename_of_global r with Not_found -> anomaly "Inductive object unknown to extraction and not globally visible" in match r with | ConstRef kn -> id_of_label (con_label kn) | IndRef (kn,0) -> id_of_label (mind_label kn) | IndRef (kn,i) -> (try (snd (lookup_ind kn)).ind_packets.(i).ip_typename with Not_found -> last_chance r) | ConstructRef ((kn,i),j) -> (try (snd (lookup_ind kn)).ind_packets.(i).ip_consnames.(j-1) with Not_found -> last_chance r) | VarRef _ -> assert false let string_of_global r = try string_of_qualid (Nametab.shortest_qualid_of_global Idset.empty r) with e when Errors.noncritical e -> string_of_id (safe_basename_of_global r) let safe_pr_global r = str (string_of_global r) (* idem, but with qualification, and only for constants. *) let safe_pr_long_global r = try Printer.pr_global r with e when Errors.noncritical e -> match r with | ConstRef kn -> let mp,_,l = repr_con kn in str ((string_of_mp mp)^"."^(string_of_label l)) | _ -> assert false let pr_long_mp mp = let lid = repr_dirpath (Nametab.dirpath_of_module mp) in str (String.concat "." (List.map string_of_id (List.rev lid))) let pr_long_global ref = pr_path (Nametab.path_of_global ref) (*S Warning and Error messages. *) let err s = errorlabstrm "Extraction" s let warning_axioms () = let info_axioms = Refset'.elements !info_axioms in if info_axioms = [] then () else begin let s = if List.length info_axioms = 1 then "axiom" else "axioms" in msg_warning (str ("The following "^s^" must be realized in the extracted code:") ++ hov 1 (spc () ++ prlist_with_sep spc safe_pr_global info_axioms) ++ str "." ++ fnl ()) end; let log_axioms = Refset'.elements !log_axioms in if log_axioms = [] then () else begin let s = if List.length log_axioms = 1 then "axiom was" else "axioms were" in msg_warning (str ("The following logical "^s^" encountered:") ++ hov 1 (spc () ++ prlist_with_sep spc safe_pr_global log_axioms ++ str ".\n") ++ str "Having invalid logical axiom in the environment when extracting" ++ spc () ++ str "may lead to incorrect or non-terminating ML terms." ++ fnl ()) end; if !Flags.load_proofs = Flags.Dont && info_axioms@log_axioms <> [] then msg_warning (str "Some of these axioms might be due to option -dont-load-proofs.") let warning_opaques accessed = let opaques = Refset'.elements !opaques in if opaques = [] then () else let lst = hov 1 (spc () ++ prlist_with_sep spc safe_pr_global opaques) in if accessed then msg_warning (str "The extraction is currently set to bypass opacity,\n" ++ str "the following opaque constant bodies have been accessed :" ++ lst ++ str "." ++ fnl ()) else msg_warning (str "The extraction now honors the opacity constraints by default,\n" ++ str "the following opaque constants have been extracted as axioms :" ++ lst ++ str "." ++ fnl () ++ str "If necessary, use \"Set Extraction AccessOpaque\" to change this." ++ fnl ()) let warning_both_mod_and_cst q mp r = msg_warning (str "The name " ++ pr_qualid q ++ str " is ambiguous, " ++ str "do you mean module " ++ pr_long_mp mp ++ str " or object " ++ pr_long_global r ++ str " ?" ++ fnl () ++ str "First choice is assumed, for the second one please use " ++ str "fully qualified name." ++ fnl ()) let error_axiom_scheme r i = err (str "The type scheme axiom " ++ spc () ++ safe_pr_global r ++ spc () ++ str "needs " ++ pr_int i ++ str " type variable(s).") let check_inside_module () = if Lib.is_modtype () then err (str "You can't do that within a Module Type." ++ fnl () ++ str "Close it and try again.") else if Lib.is_module () then msg_warning (str "Extraction inside an opened module is experimental.\n" ++ str "In case of problem, close it first.\n") let check_inside_section () = if Lib.sections_are_opened () then err (str "You can't do that within a section." ++ fnl () ++ str "Close it and try again.") let warning_id s = msg_warning (str ("The identifier "^s^ " contains __ which is reserved for the extraction")) let error_constant r = err (safe_pr_global r ++ str " is not a constant.") let error_inductive r = err (safe_pr_global r ++ spc () ++ str "is not an inductive type.") let error_nb_cons () = err (str "Not the right number of constructors.") let error_module_clash mp1 mp2 = err (str "The Coq modules " ++ pr_long_mp mp1 ++ str " and " ++ pr_long_mp mp2 ++ str " have the same ML name.\n" ++ str "This is not supported yet. Please do some renaming first.") let error_no_module_expr mp = err (str "The module " ++ pr_long_mp mp ++ str " has no body, it probably comes from\n" ++ str "some Declare Module outside any Module Type.\n" ++ str "This situation is currently unsupported by the extraction.") let error_singleton_become_prop id = err (str "The informative inductive type " ++ pr_id id ++ str " has a Prop instance.\n" ++ str "This happens when a sort-polymorphic singleton inductive type\n" ++ str "has logical parameters, such as (I,I) : (True * True) : Prop.\n" ++ str "The Ocaml extraction cannot handle this situation yet.\n" ++ str "Instead, use a sort-monomorphic type such as (True /\\ True)\n" ++ str "or extract to Haskell.") let error_unknown_module m = err (str "Module" ++ spc () ++ pr_qualid m ++ spc () ++ str "not found.") let error_scheme () = err (str "No Scheme modular extraction available yet.") let error_not_visible r = err (safe_pr_global r ++ str " is not directly visible.\n" ++ str "For example, it may be inside an applied functor.\n" ++ str "Use Recursive Extraction to get the whole environment.") let error_MPfile_as_mod mp b = let s1 = if b then "asked" else "required" in let s2 = if b then "extract some objects of this module or\n" else "" in err (str ("Extraction of file "^(raw_string_of_modfile mp)^ ".v as a module is "^s1^".\n"^ "Monolithic Extraction cannot deal with this situation.\n"^ "Please "^s2^"use (Recursive) Extraction Library instead.\n")) let msg_non_implicit r n id = let name = match id with | Anonymous -> "" | Name id -> "(" ^ string_of_id id ^ ") " in "The " ^ (ordinal n) ^ " argument " ^ name ^ "of " ^ (string_of_global r) let error_non_implicit msg = err (str (msg ^ " still occurs after extraction.") ++ fnl () ++ str "Please check the Extraction Implicit declarations.") let check_loaded_modfile mp = match base_mp mp with | MPfile dp -> if not (Library.library_is_loaded dp) then begin match base_mp (current_toplevel ()) with | MPfile dp' when dp<>dp' -> err (str ("Please load library "^(string_of_dirpath dp^" first."))) | _ -> () end | _ -> () let info_file f = Flags.if_verbose message ("The file "^f^" has been created by extraction.") (*S The Extraction auxiliary commands *) (* The objects defined below should survive an arbitrary time, so we register them to coq save/undo mechanism. *) let my_bool_option name initval = let flag = ref initval in let access = fun () -> !flag in let _ = declare_bool_option {optsync = true; optdepr = false; optname = "Extraction "^name; optkey = ["Extraction"; name]; optread = access; optwrite = (:=) flag } in access (*s Extraction AccessOpaque *) let access_opaque = my_bool_option "AccessOpaque" true (*s Extraction AutoInline *) let auto_inline = my_bool_option "AutoInline" false (*s Extraction TypeExpand *) let type_expand = my_bool_option "TypeExpand" true (*s Extraction KeepSingleton *) let keep_singleton = my_bool_option "KeepSingleton" false (*s Extraction Optimize *) type opt_flag = { opt_kill_dum : bool; (* 1 *) opt_fix_fun : bool; (* 2 *) opt_case_iot : bool; (* 4 *) opt_case_idr : bool; (* 8 *) opt_case_idg : bool; (* 16 *) opt_case_cst : bool; (* 32 *) opt_case_fun : bool; (* 64 *) opt_case_app : bool; (* 128 *) opt_let_app : bool; (* 256 *) opt_lin_let : bool; (* 512 *) opt_lin_beta : bool } (* 1024 *) let kth_digit n k = (n land (1 lsl k) <> 0) let flag_of_int n = { opt_kill_dum = kth_digit n 0; opt_fix_fun = kth_digit n 1; opt_case_iot = kth_digit n 2; opt_case_idr = kth_digit n 3; opt_case_idg = kth_digit n 4; opt_case_cst = kth_digit n 5; opt_case_fun = kth_digit n 6; opt_case_app = kth_digit n 7; opt_let_app = kth_digit n 8; opt_lin_let = kth_digit n 9; opt_lin_beta = kth_digit n 10 } (* For the moment, we allow by default everything except : - the type-unsafe optimization [opt_case_idg], which anyway cannot be activated currently (cf [Mlutil.branch_as_fun]) - the linear let and beta reduction [opt_lin_let] and [opt_lin_beta] (may lead to complexity blow-up, subsumed by finer reductions when inlining recursors). *) let int_flag_init = 1 + 2 + 4 + 8 (*+ 16*) + 32 + 64 + 128 + 256 (*+ 512 + 1024*) let int_flag_ref = ref int_flag_init let opt_flag_ref = ref (flag_of_int int_flag_init) let chg_flag n = int_flag_ref := n; opt_flag_ref := flag_of_int n let optims () = !opt_flag_ref let _ = declare_bool_option {optsync = true; optdepr = false; optname = "Extraction Optimize"; optkey = ["Extraction"; "Optimize"]; optread = (fun () -> !int_flag_ref <> 0); optwrite = (fun b -> chg_flag (if b then int_flag_init else 0))} let _ = declare_int_option { optsync = true; optdepr = false; optname = "Extraction Flag"; optkey = ["Extraction";"Flag"]; optread = (fun _ -> Some !int_flag_ref); optwrite = (function | None -> chg_flag 0 | Some i -> chg_flag (max i 0))} (*s Extraction Lang *) type lang = Ocaml | Haskell | Scheme let lang_ref = ref Ocaml let lang () = !lang_ref let extr_lang : lang -> obj = declare_object {(default_object "Extraction Lang") with cache_function = (fun (_,l) -> lang_ref := l); load_function = (fun _ (_,l) -> lang_ref := l)} let _ = declare_summary "Extraction Lang" { freeze_function = (fun () -> !lang_ref); unfreeze_function = ((:=) lang_ref); init_function = (fun () -> lang_ref := Ocaml) } let extraction_language x = Lib.add_anonymous_leaf (extr_lang x) (*s Extraction Inline/NoInline *) let empty_inline_table = (Refset'.empty,Refset'.empty) let inline_table = ref empty_inline_table let to_inline r = Refset'.mem r (fst !inline_table) let to_keep r = Refset'.mem r (snd !inline_table) let add_inline_entries b l = let f b = if b then Refset'.add else Refset'.remove in let i,k = !inline_table in inline_table := (List.fold_right (f b) l i), (List.fold_right (f (not b)) l k) (* Registration of operations for rollback. *) let inline_extraction : bool * global_reference list -> obj = declare_object {(default_object "Extraction Inline") with cache_function = (fun (_,(b,l)) -> add_inline_entries b l); load_function = (fun _ (_,(b,l)) -> add_inline_entries b l); classify_function = (fun o -> Substitute o); discharge_function = (fun (_,(b,l)) -> Some (b, List.map pop_global_reference l)); subst_function = (fun (s,(b,l)) -> (b,(List.map (fun x -> fst (subst_global s x)) l))) } let _ = declare_summary "Extraction Inline" { freeze_function = (fun () -> !inline_table); unfreeze_function = ((:=) inline_table); init_function = (fun () -> inline_table := empty_inline_table) } (* Grammar entries. *) let extraction_inline b l = let refs = List.map Smartlocate.global_with_alias l in List.iter (fun r -> match r with | ConstRef _ -> () | _ -> error_constant r) refs; Lib.add_anonymous_leaf (inline_extraction (b,refs)) (* Printing part *) let print_extraction_inline () = let (i,n)= !inline_table in let i'= Refset'.filter (function ConstRef _ -> true | _ -> false) i in msg (str "Extraction Inline:" ++ fnl () ++ Refset'.fold (fun r p -> (p ++ str " " ++ safe_pr_long_global r ++ fnl ())) i' (mt ()) ++ str "Extraction NoInline:" ++ fnl () ++ Refset'.fold (fun r p -> (p ++ str " " ++ safe_pr_long_global r ++ fnl ())) n (mt ())) (* Reset part *) let reset_inline : unit -> obj = declare_object {(default_object "Reset Extraction Inline") with cache_function = (fun (_,_)-> inline_table := empty_inline_table); load_function = (fun _ (_,_)-> inline_table := empty_inline_table)} let reset_extraction_inline () = Lib.add_anonymous_leaf (reset_inline ()) (*s Extraction Implicit *) type int_or_id = ArgInt of int | ArgId of identifier let implicits_table = ref Refmap'.empty let implicits_of_global r = try Refmap'.find r !implicits_table with Not_found -> [] let add_implicits r l = let typ = Global.type_of_global r in let rels,_ = decompose_prod (Reduction.whd_betadeltaiota (Global.env ()) typ) in let names = List.rev_map fst rels in let n = List.length names in let check = function | ArgInt i -> if 1 <= i && i <= n then i else err (int i ++ str " is not a valid argument number for " ++ safe_pr_global r) | ArgId id -> (try list_index (Name id) names with Not_found -> err (str "No argument " ++ pr_id id ++ str " for " ++ safe_pr_global r)) in let l' = List.map check l in implicits_table := Refmap'.add r l' !implicits_table (* Registration of operations for rollback. *) let implicit_extraction : global_reference * int_or_id list -> obj = declare_object {(default_object "Extraction Implicit") with cache_function = (fun (_,(r,l)) -> add_implicits r l); load_function = (fun _ (_,(r,l)) -> add_implicits r l); classify_function = (fun o -> Substitute o); subst_function = (fun (s,(r,l)) -> (fst (subst_global s r), l)) } let _ = declare_summary "Extraction Implicit" { freeze_function = (fun () -> !implicits_table); unfreeze_function = ((:=) implicits_table); init_function = (fun () -> implicits_table := Refmap'.empty) } (* Grammar entries. *) let extraction_implicit r l = check_inside_section (); Lib.add_anonymous_leaf (implicit_extraction (Smartlocate.global_with_alias r,l)) (*s Extraction Blacklist of filenames not to use while extracting *) let blacklist_table = ref Idset.empty let modfile_ids = ref [] let modfile_mps = ref MPmap.empty let reset_modfile () = modfile_ids := Idset.elements !blacklist_table; modfile_mps := MPmap.empty let string_of_modfile mp = try MPmap.find mp !modfile_mps with Not_found -> let id = id_of_string (raw_string_of_modfile mp) in let id' = next_ident_away id !modfile_ids in let s' = string_of_id id' in modfile_ids := id' :: !modfile_ids; modfile_mps := MPmap.add mp s' !modfile_mps; s' (* same as [string_of_modfile], but preserves the capital/uncapital 1st char *) let file_of_modfile mp = let s0 = match mp with | MPfile f -> string_of_id (List.hd (repr_dirpath f)) | _ -> assert false in let s = String.copy (string_of_modfile mp) in if s.[0] <> s0.[0] then s.[0] <- s0.[0]; s let add_blacklist_entries l = blacklist_table := List.fold_right (fun s -> Idset.add (id_of_string (String.capitalize s))) l !blacklist_table (* Registration of operations for rollback. *) let blacklist_extraction : string list -> obj = declare_object {(default_object "Extraction Blacklist") with cache_function = (fun (_,l) -> add_blacklist_entries l); load_function = (fun _ (_,l) -> add_blacklist_entries l); subst_function = (fun (_,x) -> x) } let _ = declare_summary "Extraction Blacklist" { freeze_function = (fun () -> !blacklist_table); unfreeze_function = ((:=) blacklist_table); init_function = (fun () -> blacklist_table := Idset.empty) } (* Grammar entries. *) let extraction_blacklist l = let l = List.rev_map string_of_id l in Lib.add_anonymous_leaf (blacklist_extraction l) (* Printing part *) let print_extraction_blacklist () = msgnl (prlist_with_sep fnl pr_id (Idset.elements !blacklist_table)) (* Reset part *) let reset_blacklist : unit -> obj = declare_object {(default_object "Reset Extraction Blacklist") with cache_function = (fun (_,_)-> blacklist_table := Idset.empty); load_function = (fun _ (_,_)-> blacklist_table := Idset.empty)} let reset_extraction_blacklist () = Lib.add_anonymous_leaf (reset_blacklist ()) (*s Extract Constant/Inductive. *) (* UGLY HACK: to be defined in [extraction.ml] *) let use_type_scheme_nb_args, register_type_scheme_nb_args = let r = ref (fun _ _ -> 0) in (fun x y -> !r x y), (:=) r let customs = ref Refmap'.empty let add_custom r ids s = customs := Refmap'.add r (ids,s) !customs let is_custom r = Refmap'.mem r !customs let is_inline_custom r = (is_custom r) && (to_inline r) let find_custom r = snd (Refmap'.find r !customs) let find_type_custom r = Refmap'.find r !customs let custom_matchs = ref Refmap'.empty let add_custom_match r s = custom_matchs := Refmap'.add r s !custom_matchs let indref_of_match pv = if Array.length pv = 0 then raise Not_found; let (_,pat,_) = pv.(0) in match pat with | Pusual (ConstructRef (ip,_)) -> IndRef ip | Pcons (ConstructRef (ip,_),_) -> IndRef ip | _ -> raise Not_found let is_custom_match pv = try Refmap'.mem (indref_of_match pv) !custom_matchs with Not_found -> false let find_custom_match pv = Refmap'.find (indref_of_match pv) !custom_matchs (* Registration of operations for rollback. *) let in_customs : global_reference * string list * string -> obj = declare_object {(default_object "ML extractions") with cache_function = (fun (_,(r,ids,s)) -> add_custom r ids s); load_function = (fun _ (_,(r,ids,s)) -> add_custom r ids s); classify_function = (fun o -> Substitute o); subst_function = (fun (s,(r,ids,str)) -> (fst (subst_global s r), ids, str)) } let _ = declare_summary "ML extractions" { freeze_function = (fun () -> !customs); unfreeze_function = ((:=) customs); init_function = (fun () -> customs := Refmap'.empty) } let in_custom_matchs : global_reference * string -> obj = declare_object {(default_object "ML extractions custom matchs") with cache_function = (fun (_,(r,s)) -> add_custom_match r s); load_function = (fun _ (_,(r,s)) -> add_custom_match r s); classify_function = (fun o -> Substitute o); subst_function = (fun (subs,(r,s)) -> (fst (subst_global subs r), s)) } let _ = declare_summary "ML extractions custom match" { freeze_function = (fun () -> !custom_matchs); unfreeze_function = ((:=) custom_matchs); init_function = (fun () -> custom_matchs := Refmap'.empty) } (* Grammar entries. *) let extract_constant_inline inline r ids s = check_inside_section (); let g = Smartlocate.global_with_alias r in match g with | ConstRef kn -> let env = Global.env () in let typ = Typeops.type_of_constant env kn in let typ = Reduction.whd_betadeltaiota env typ in if Reduction.is_arity env typ then begin let nargs = use_type_scheme_nb_args env typ in if List.length ids <> nargs then error_axiom_scheme g nargs end; Lib.add_anonymous_leaf (inline_extraction (inline,[g])); Lib.add_anonymous_leaf (in_customs (g,ids,s)) | _ -> error_constant g let extract_inductive r s l optstr = check_inside_section (); let g = Smartlocate.global_with_alias r in Dumpglob.add_glob (loc_of_reference r) g; match g with | IndRef ((kn,i) as ip) -> let mib = Global.lookup_mind kn in let n = Array.length mib.mind_packets.(i).mind_consnames in if n <> List.length l then error_nb_cons (); Lib.add_anonymous_leaf (inline_extraction (true,[g])); Lib.add_anonymous_leaf (in_customs (g,[],s)); Option.iter (fun s -> Lib.add_anonymous_leaf (in_custom_matchs (g,s))) optstr; list_iter_i (fun j s -> let g = ConstructRef (ip,succ j) in Lib.add_anonymous_leaf (inline_extraction (true,[g])); Lib.add_anonymous_leaf (in_customs (g,[],s))) l | _ -> error_inductive g (*s Tables synchronization. *) let reset_tables () = init_terms (); init_types (); init_inductives (); init_inductive_kinds (); init_recursors (); init_projs (); init_axioms (); init_opaques (); reset_modfile () coq-8.4pl2/plugins/extraction/extraction.mli0000640000175000001440000000217512010532755020360 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constant -> constant_body -> ml_decl val extract_constant_spec : env -> constant -> constant_body -> ml_spec val extract_with_type : env -> constant_body -> ( identifier list * ml_type ) option val extract_fixpoint : env -> constant array -> (constr, types) prec_declaration -> ml_decl val extract_inductive : env -> mutual_inductive -> ml_ind (*s Is a [ml_decl] or a [ml_spec] logical ? *) val logical_decl : ml_decl -> bool val logical_spec : ml_spec -> bool coq-8.4pl2/plugins/extraction/extract_env.mli0000640000175000001440000000210412010532755020512 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val full_extraction : string option -> reference list -> unit val separate_extraction : reference list -> unit val extraction_library : bool -> identifier -> unit (* For debug / external output via coqtop.byte + Drop : *) val mono_environment : global_reference list -> module_path list -> Miniml.ml_structure (* Used by the Relation Extraction plugin *) val print_one_decl : Miniml.ml_structure -> module_path -> Miniml.ml_decl -> unit coq-8.4pl2/plugins/extraction/haskell.mli0000640000175000001440000000107112010532755017615 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* identifier (*s Warning and Error messages. *) val warning_axioms : unit -> unit val warning_opaques : bool -> unit val warning_both_mod_and_cst : qualid -> module_path -> global_reference -> unit val warning_id : string -> unit val error_axiom_scheme : global_reference -> int -> 'a val error_constant : global_reference -> 'a val error_inductive : global_reference -> 'a val error_nb_cons : unit -> 'a val error_module_clash : module_path -> module_path -> 'a val error_no_module_expr : module_path -> 'a val error_singleton_become_prop : identifier -> 'a val error_unknown_module : qualid -> 'a val error_scheme : unit -> 'a val error_not_visible : global_reference -> 'a val error_MPfile_as_mod : module_path -> bool -> 'a val check_inside_module : unit -> unit val check_inside_section : unit -> unit val check_loaded_modfile : module_path -> unit val msg_non_implicit : global_reference -> int -> name -> string val error_non_implicit : string -> 'a val info_file : string -> unit (*s utilities about [module_path] and [kernel_names] and [global_reference] *) val occur_kn_in_ref : mutual_inductive -> global_reference -> bool val repr_of_r : global_reference -> module_path * dir_path * label val modpath_of_r : global_reference -> module_path val label_of_r : global_reference -> label val current_toplevel : unit -> module_path val base_mp : module_path -> module_path val is_modfile : module_path -> bool val string_of_modfile : module_path -> string val file_of_modfile : module_path -> string val is_toplevel : module_path -> bool val at_toplevel : module_path -> bool val visible_con : constant -> bool val mp_length : module_path -> int val prefixes_mp : module_path -> MPset.t val common_prefix_from_list : module_path -> module_path list -> module_path option val get_nth_label_mp : int -> module_path -> label val labels_of_ref : global_reference -> module_path * label list (*s Some table-related operations *) val add_term : constant -> ml_decl -> unit val lookup_term : constant -> ml_decl val add_type : constant -> ml_schema -> unit val lookup_type : constant -> ml_schema val add_ind : mutual_inductive -> mutual_inductive_body -> ml_ind -> unit val lookup_ind : mutual_inductive -> mutual_inductive_body * ml_ind val add_inductive_kind : mutual_inductive -> inductive_kind -> unit val is_coinductive : global_reference -> bool val is_coinductive_type : ml_type -> bool (* What are the fields of a record (empty for a non-record) *) val get_record_fields : global_reference -> global_reference option list val record_fields_of_type : ml_type -> global_reference option list val add_recursors : Environ.env -> mutual_inductive -> unit val is_recursor : global_reference -> bool val add_projection : int -> constant -> unit val is_projection : global_reference -> bool val projection_arity : global_reference -> int val add_info_axiom : global_reference -> unit val remove_info_axiom : global_reference -> unit val add_log_axiom : global_reference -> unit val add_opaque : global_reference -> unit val remove_opaque : global_reference -> unit val reset_tables : unit -> unit (*s AccessOpaque parameter *) val access_opaque : unit -> bool (*s AutoInline parameter *) val auto_inline : unit -> bool (*s TypeExpand parameter *) val type_expand : unit -> bool (*s KeepSingleton parameter *) val keep_singleton : unit -> bool (*s Optimize parameter *) type opt_flag = { opt_kill_dum : bool; (* 1 *) opt_fix_fun : bool; (* 2 *) opt_case_iot : bool; (* 4 *) opt_case_idr : bool; (* 8 *) opt_case_idg : bool; (* 16 *) opt_case_cst : bool; (* 32 *) opt_case_fun : bool; (* 64 *) opt_case_app : bool; (* 128 *) opt_let_app : bool; (* 256 *) opt_lin_let : bool; (* 512 *) opt_lin_beta : bool } (* 1024 *) val optims : unit -> opt_flag (*s Target language. *) type lang = Ocaml | Haskell | Scheme val lang : unit -> lang (*s Extraction modes: modular or monolithic, library or minimal ? Nota: - Recursive Extraction : monolithic, minimal - Separate Extraction : modular, minimal - Extraction Library : modular, library *) val set_modular : bool -> unit val modular : unit -> bool val set_library : bool -> unit val library : unit -> bool (*s Table for custom inlining *) val to_inline : global_reference -> bool val to_keep : global_reference -> bool (*s Table for implicits arguments *) val implicits_of_global : global_reference -> int list (*s Table for user-given custom ML extractions. *) (* UGLY HACK: registration of a function defined in [extraction.ml] *) val register_type_scheme_nb_args : (Environ.env -> Term.constr -> int) -> unit val is_custom : global_reference -> bool val is_inline_custom : global_reference -> bool val find_custom : global_reference -> string val find_type_custom : global_reference -> string list * string val is_custom_match : ml_branch array -> bool val find_custom_match : ml_branch array -> string (*s Extraction commands. *) val extraction_language : lang -> unit val extraction_inline : bool -> reference list -> unit val print_extraction_inline : unit -> unit val reset_extraction_inline : unit -> unit val extract_constant_inline : bool -> reference -> string list -> string -> unit val extract_inductive : reference -> string -> string list -> string option -> unit type int_or_id = ArgInt of int | ArgId of identifier val extraction_implicit : reference -> int_or_id list -> unit (*s Table of blacklisted filenames *) val extraction_blacklist : identifier list -> unit val reset_extraction_blacklist : unit -> unit val print_extraction_blacklist : unit -> unit coq-8.4pl2/plugins/extraction/CHANGES0000640000175000001440000003341711160567762016506 0ustar notinusers8.0 -> today See the main CHANGES file in the archive 7.4 -> 8.0 No revolution this time. Mostly "behind-the-scene" clean-up and bug-fixes, but also a few steps toward a more user-friendly extraction: * syntax of extraction: - The old (Recursive) Extraction Module M. is now (Recursive) Extraction Library M. The old name was misleading since this command only works with M being a library M.v, and not a module produced by interactive command Module M. - The other commands Extraction foo. Recursive Extraction foo bar. Extraction "myfile.ml" foo bar. now accept that foo can be a module name instead of just a constant name. * Support of type scheme axioms (i.e. axiom whose type is an arity (x1:X1)...(xn:Xn)s with s a sort). For example: Axiom myprod : Set -> Set -> Set. Extract Constant myprod "'a" "'b" => "'a * 'b". Recursive Extraction myprod. -------> type ('a,'b) myprod = 'a * 'b * More flexible support of axioms. When an axiom isn't realized via Extract Constant before extraction, a warning is produced (instead of an error), and the extracted code must be completed later by hand. To find what needs to be completed, search for the following string: AXIOM TO BE REALIZED * Cosmetics: When extraction produces a file, it tells it. * (Experimental) It is allowed to extract under a opened interactive module (but still outside sections). Feature to be used with caution. * A problem has been identified concerning .v files used as normal interactive modules, like in Definition foo :=O. Require A. Module M:=A Extraction M. I might try to support that in the future. In the meanwhile, the current behaviour of extraction is to forbid this. * bug fixes: - many concerning Records. - a Stack Overflow with mutual inductive (PR#320) - some optimizations have been removed since they were not type-safe: For example if e has type: type 'x a = A Then: match e with A -> A -----X----> e To be investigated further. 7.3 -> 7.4 * The two main new features: - Automatic generation of Obj.magic when the extracted code in Ocaml is not directly typable. - An experimental extraction of Coq's new modules to Ocaml modules. * Concerning those Obj.magic: - The extraction now computes the expected type of any terms. Then it compares it with the actual type of the produced code. And when a mismatch is found, a Obj.magic is inserted. - As a rule, any extracted development that was compiling out of the box should not contain any Obj.magic. At the other hand, generation of Obj.magic is not optimized yet: there might be several of them at a place were one would have been enough. - Examples of code needing those Obj.magic: * plugins/extraction/test_extraction.v in the Coq source * in the users' contributions: Lannion Lyon/CIRCUITS Rocq/HIGMAN - As a side-effect of this Obj.magic feature, we now print the types of the extracted terms, both in .ml files as commented documentation and in interfaces .mli files - This feature hasn't been ported yet to Haskell. We are aware of some unsafe casting functions like "unsafeCoerce" on some Haskell implems. So it will eventually be done. * Concerning the extraction of Coq's new modules: - Taking in account the new Coq's modules system has implied a *huge* rewrite of most of the extraction code. - The extraction core (translation from Coq to an abstract mini-ML) is now complete and fairly stable, and supports modules, modules type and functors and all that stuff. - The ocaml pretty-print part, especially the renaming issue, is clearly weaker, and certainly still contains bugs. - Nothing done for translating these Coq Modules to Haskell. - A temporary drawback of this module extraction implementation is that efficiency (especially extraction speed) has been somehow neglected. To improve ... - As an interesting side-effect, definitions are now printed according to the user's original order. No more of this "dependency-correct but weird" order. In particular realized axioms via Extract Constant are now at their right place, and not at the beginning. * Other news: - Records are now printed using the Ocaml record syntax - Syntax output toward Scheme. Quite funny, but quite experimental and not documented. I recommend using the bigloo compiler since it contains natively some pattern matching. - the dummy constant "__" have changed. see README - a few bug-fixes (#191 and others) 7.2 -> 7.3 * Improved documentation in the Reference Manual. * Theoretical bad news: - a naughty example (see the end of test_extraction.v) forced me to stop eliminating lambdas and arguments corresponding to so-called "arity" in the general case. - The dummy constant used in extraction ( let prop = () in ocaml ) may in some cases be applied to arguments. This problem is dealt by generating sufficient abstraction before the (). * Theoretical good news: - there is now a mechanism that remove useless prop/arity lambdas at the top of function declarations. If your function had signature nat -> prop -> nat in the previous extraction, it will now be nat -> nat. So the extractions of common terms should look very much like the old V6.2 one, except in some particular cases (functions as parameters, partial applications, etc). In particular the bad news above have nearly no impact... * By the way there is no more "let prop = ()" in ocaml. Those () are directly inlined. And in Haskell the dummy constant is now __ (two underscore) and is defined by __ = Prelude.error "Logical or arity value used" This dummy constant should never be evaluated when computing an informative value, thanks to the lazy strategy. Hence the error message. * Syntax changes, see Documentation for details: Extraction Language Ocaml. Extraction Language Haskell. Extraction Language Toplevel. That fixes the target language of extraction. Default is Ocaml, even in the coq toplevel: you can now do copy-paste from the coq toplevel without renaming problems. Toplevel language is the ocaml pseudo-language used previously used inside the coq toplevel: coq names are printed with the coq way, i.e. with no renaming. So there is no more particular commands for Haskell, like Haskell Extraction "file" id. Just set your favourite language and go... * Haskell extraction has been tested at last (and corrected...). See specificities in Documentation. * Extraction of CoInductive in Ocaml language is now correct: it uses the Lazy.force and lazy features of Ocaml. * Modular extraction in Ocaml is now far more readable: instead of qualifying everywhere (A.foo), there are now some "open" at the beginning of files. Possible clashes are dealt with. * By default, any recursive function associated with an inductive type (foo_rec and foo_rect when foo is inductive type) will now be inlined in extracted code. * A few constants are explicitely declared to be inlined in extracted code. For the moment there are: Wf.Acc_rec Wf.Acc_rect Wf.well_founded_induction Wf.well_founded_induction_type Those constants does not match the auto-inlining criterion based on strictness. Of course, you can still overide this behaviour via some Extraction NoInline. * There is now a web page showing the extraction of all standard theories: http://www.lri.fr/~letouzey/extraction 7.1 -> 7.2 : * Syntax changes, see Documentation for more details: Set/Unset Extraction Optimize. Default is Set. This control all optimizations made on the ML terms (mostly reduction of dummy beta/iota redexes, but also simplications on Cases, etc). Put this option to Unset if you what a ML term as close as possible to the Coq term. Set/Unset Extraction AutoInline. Default in Set, so by default, the extraction mechanism feels free to inline the bodies of some defined constants, according to some heuristics like size of bodies, useness of some arguments, etc. Those heuristics are not always perfect, you may want to disable this feature, do it by Unset. Extraction Inline toto foo. Extraction NoInline titi faa bor. In addition to the automatic inline feature, you can now tell precisely to inline some more constants by the Extraction Inline command. Conversely, you can forbid the inlining of some specific constants by automatic inlining. Those two commands enable a precise control of what is inlined and what is not. Print Extraction Inline. Sum up the current state of the table recording the custom inlings (Extraction (No)Inline). Reset Extraction Inline. Put the table recording the custom inlings back to empty. As a consequence, there is no more need for options inside the commands of extraction: Extraction foo. Recursive Extraction foo bar. Extraction "file" foo bar. Extraction Module Mymodule. Recursive Extraction Module Mymodule. New: The last syntax extracts the module Mymodule and all the modules it depends on. You can also try the Haskell versions (not tested yet): Haskell Extraction foo. Haskell Recursive Extraction foo bar. Haskell Extraction "file" foo bar. Haskell Extraction Module Mymodule. Haskell Recursive Extraction Module Mymodule. And there's still the realization syntax: Extract Constant coq_bla => "caml_bla". Extract Inlined Constant coq_bla => "caml_bla". Extract Inductive myinductive => mycamlind [my_caml_constr1 ... ]. Note that now, the Extract Inlined Constant command is sugar for an Extract Constant followed by a Extraction Inline. So be careful with Reset Extraction Inline. * Lot of works around optimization of produced code. Should make code more readable. - fixpoint definitions : there should be no more stupid printings like let foo x = let rec f x = .... (f y) .... in f x but rather let rec foo x = .... (foo y) .... - generalized iota (in particular iota and permutation cases/cases): A generalized iota redex is a "Cases e of ...." where e is ok. And the recursive predicate "ok" is given by: e is ok if e is a Constructor or a Cases where all branches are ok. In the case of generalized iota redex, it might be good idea to reduce it, so we do it. Example: match (match t with O -> Left | S n -> match n with O -> Right | S m -> Left) with Left -> blabla | Right -> bloblo After simplification, that gives: match t with O -> blabla | S n -> match n with O -> bloblo | S n -> blabla As shown on the example, code duplication can occur. In practice it seems not to happen frequently. - "constant" case: In V7.1 we used to simplify cases where all branches are the same. In V7.2 we can simplify in addition terms like cases e of C1 x y -> f (C x y) | C2 z -> f (C2 z) If x y z don't occur in f, we can produce (f e). - permutation cases/fun: extracted code has frequenty functions in branches of cases: let foo x = match x with O -> fun _ -> .... | S y -> fun _ -> .... the optimization consist in lifting the common "fun _ ->", and that gives let foo x _ = match x with O -> ..... | S y -> .... * Some bug corrections (many thanks in particular to Michel Levy). * Testing in coq contributions: If you are interested in extraction, you can look at the extraction tests I'have put in the following coq contributions Bordeaux/Additions computation of fibonacci(2000) Bordeaux/EXCEPTIONS multiplication using exception. Bordeaux/SearchTrees list -> binary tree. maximum. Dyade/BDDS boolean tautology checker. Lyon/CIRCUITS multiplication via a modelization of a circuit. Lyon/FIRING-SQUAD print the states of the firing squad. Marseille/CIRCUITS compares integers via a modelization of a circuit. Nancy/FOUnify unification of two first-order terms. Rocq/ARITH/Chinese computation of the chinese remainder. Rocq/COC small coc typechecker. (test by B. Barras, not by me) Rocq/HIGMAN run the proof on one example. Rocq/GRAPHS linear constraints checker in Z. Sophia-Antipolis/Stalmarck boolean tautology checker. Suresnes/BDD boolean tautology checker. Just do "make" in those contributions, the extraction test is integrated. More tests will follow on more contributions. 7.0 -> 7.1 : mostly bug corrections. No theoretical problems dealed with. * The semantics of Extract Constant changed: If you provide a extraction for p by Extract Constant p => "0", your generated ML file will begin by a let p = 0. The old semantics, which was to replace p everywhere by the provided terms, is still available via the Extract Inlined Constant p => "0" syntax. * There are more optimizations applied to the generated code: - identity cases: match e with P x y -> P x y | Q z -> Q z | ... is simplified into e. Especially interesting with the sumbool terms: there will be no more match ... with Left -> Left | Right -> Right - constant cases: match e with P x y -> c | Q z -> c | ... is simplified into c as soon as x, y, z do not occur in c. So no more match ... with Left -> Left | Right -> Left. * the extraction at Toplevel (Extraction foo and Recursive Extraction foo), which was only a development tool at the beginning, is now closer to the real extraction to a file. In particular optimizations are done, and constants like recursors ( ..._rec ) are expanded. * the singleton optimization is now protected against circular type. ( Remind : this optimization is the one that simplify type 'a sig = Exists of 'a into type 'a sig = 'a and match e with (Exists c) -> d into let c = e in d ) * Fixed one bug concerning casted code * The inductives generated should now have always correct type-var list ('a,'b,'c...) * Code cleanup until three days before release. Messing-up code in the last three days before release. 6.x -> 7.0 : Everything changed. See README coq-8.4pl2/plugins/extraction/common.mli0000640000175000001440000000562112010532755017467 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* std_ppcmds val fnl2 : unit -> std_ppcmds val space_if : bool -> std_ppcmds val pp_par : bool -> std_ppcmds -> std_ppcmds (** [pp_apply] : a head part applied to arguments, possibly with parenthesis *) val pp_apply : std_ppcmds -> bool -> std_ppcmds list -> std_ppcmds (** Same as [pp_apply], but with also protection of the head by parenthesis *) val pp_apply2 : std_ppcmds -> bool -> std_ppcmds list -> std_ppcmds val pp_tuple_light : (bool -> 'a -> std_ppcmds) -> 'a list -> std_ppcmds val pp_tuple : ('a -> std_ppcmds) -> 'a list -> std_ppcmds val pp_boxed_tuple : ('a -> std_ppcmds) -> 'a list -> std_ppcmds val pr_binding : identifier list -> std_ppcmds val rename_id : identifier -> Idset.t -> identifier type env = identifier list * Idset.t val empty_env : unit -> env val rename_vars: Idset.t -> identifier list -> env val rename_tvars: Idset.t -> identifier list -> identifier list val push_vars : identifier list -> env -> identifier list * env val get_db_name : int -> env -> identifier type phase = Pre | Impl | Intf val set_phase : phase -> unit val get_phase : unit -> phase val opened_libraries : unit -> module_path list type kind = Term | Type | Cons | Mod val pp_global : kind -> global_reference -> string val pp_module : module_path -> string val top_visible_mp : unit -> module_path (* In [push_visible], the [module_path list] corresponds to module parameters, the innermost one coming first in the list *) val push_visible : module_path -> module_path list -> unit val pop_visible : unit -> unit val check_duplicate : module_path -> label -> string type reset_kind = AllButExternal | Everything val reset_renaming_tables : reset_kind -> unit val set_keywords : Idset.t -> unit (** For instance: [mk_ind "Coq.Init.Datatypes" "nat"] *) val mk_ind : string -> string -> mutual_inductive (** Special hack for constants of type Ascii.ascii : if an [Extract Inductive ascii => char] has been declared, then the constants are directly turned into chars *) val is_native_char : ml_ast -> bool val pp_native_char : ml_ast -> std_ppcmds coq-8.4pl2/plugins/extraction/vo.itarget0000640000175000001440000000023511405706765017510 0ustar notinusersExtrOcamlBasic.vo ExtrOcamlIntConv.vo ExtrOcamlBigIntConv.vo ExtrOcamlNatInt.vo ExtrOcamlNatBigInt.vo ExtrOcamlZInt.vo ExtrOcamlZBigInt.vo ExtrOcamlString.vocoq-8.4pl2/plugins/extraction/scheme.mli0000640000175000001440000000106712010532755017443 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val new_meta : 'a -> ml_type val type_subst_list : ml_type list -> ml_type -> ml_type val type_subst_vect : ml_type array -> ml_type -> ml_type val instantiation : ml_schema -> ml_type val needs_magic : ml_type * ml_type -> bool val put_magic_if : bool -> ml_ast -> ml_ast val put_magic : ml_type * ml_type -> ml_ast -> ml_ast val generalizable : ml_ast -> bool (*s ML type environment. *) module Mlenv : sig type t val empty : t (* get the n-th more recently entered schema and instantiate it. *) val get : t -> int -> ml_type (* Adding a type in an environment, after generalizing free meta *) val push_gen : t -> ml_type -> t (* Adding a type with no [Tvar] *) val push_type : t -> ml_type -> t (* Adding a type with no [Tvar] nor [Tmeta] *) val push_std_type : t -> ml_type -> t end (*s Utility functions over ML types without meta *) val type_mem_kn : mutual_inductive -> ml_type -> bool val type_maxvar : ml_type -> int val type_decomp : ml_type -> ml_type list * ml_type val type_recomp : ml_type list * ml_type -> ml_type val var2var' : ml_type -> ml_type type abbrev_map = global_reference -> ml_type option val type_expand : abbrev_map -> ml_type -> ml_type val type_simpl : ml_type -> ml_type val type_to_sign : abbrev_map -> ml_type -> sign val type_to_signature : abbrev_map -> ml_type -> signature val type_expunge : abbrev_map -> ml_type -> ml_type val type_expunge_from_sign : abbrev_map -> signature -> ml_type -> ml_type val isDummy : ml_type -> bool val isKill : sign -> bool val case_expunge : signature -> ml_ast -> ml_ident list * ml_ast val term_expunge : signature -> ml_ident list * ml_ast -> ml_ast (*s Special identifiers. [dummy_name] is to be used for dead code and will be printed as [_] in concrete (Caml) code. *) val anonymous_name : identifier val dummy_name : identifier val id_of_name : name -> identifier val id_of_mlid : ml_ident -> identifier val tmp_id : ml_ident -> ml_ident (*s [collect_lambda MLlam(id1,...MLlam(idn,t)...)] returns the list [idn;...;id1] and the term [t]. *) val collect_lams : ml_ast -> ml_ident list * ml_ast val collect_n_lams : int -> ml_ast -> ml_ident list * ml_ast val remove_n_lams : int -> ml_ast -> ml_ast val nb_lams : ml_ast -> int val named_lams : ml_ident list -> ml_ast -> ml_ast val dummy_lams : ml_ast -> int -> ml_ast val anonym_or_dummy_lams : ml_ast -> signature -> ml_ast val eta_args_sign : int -> signature -> ml_ast list (*s Utility functions over ML terms. *) val mlapp : ml_ast -> ml_ast list -> ml_ast val ast_map : (ml_ast -> ml_ast) -> ml_ast -> ml_ast val ast_map_lift : (int -> ml_ast -> ml_ast) -> int -> ml_ast -> ml_ast val ast_iter : (ml_ast -> unit) -> ml_ast -> unit val ast_occurs : int -> ml_ast -> bool val ast_occurs_itvl : int -> int -> ml_ast -> bool val ast_lift : int -> ml_ast -> ml_ast val ast_pop : ml_ast -> ml_ast val ast_subst : ml_ast -> ml_ast -> ml_ast val ast_glob_subst : ml_ast Refmap'.t -> ml_ast -> ml_ast val normalize : ml_ast -> ml_ast val optimize_fix : ml_ast -> ml_ast val inline : global_reference -> ml_ast -> bool val is_basic_pattern : ml_pattern -> bool val has_deep_pattern : ml_branch array -> bool val is_regular_match : ml_branch array -> bool exception Impossible (* Classification of signatures *) type sign_kind = | EmptySig | NonLogicalSig (* at least a [Keep] *) | UnsafeLogicalSig (* No [Keep], at least a [Kill Kother] *) | SafeLogicalSig (* only [Kill Ktype] *) val sign_kind : signature -> sign_kind val sign_no_final_keeps : signature -> signature coq-8.4pl2/plugins/extraction/extraction.ml0000640000175000001440000011476012121620060020201 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* error_singleton_become_prop id let sort_of env c = try let polyprop = (lang() = Haskell) in Retyping.get_sort_family_of ~polyprop env none (strip_outer_cast c) with SingletonInductiveBecomesProp id -> error_singleton_become_prop id (*S Generation of flags and signatures. *) (* The type [flag] gives us information about any Coq term: \begin{itemize} \item [TypeScheme] denotes a type scheme, that is something that will become a type after enough applications. More formally, a type scheme has type $(x_1:X_1)\ldots(x_n:X_n)s$ with [s = Set], [Prop] or [Type] \item [Default] denotes the other cases. It may be inexact after instanciation. For example [(X:Type)X] is [Default] and may give [Set] after instanciation, which is rather [TypeScheme] \item [Logic] denotes a term of sort [Prop], or a type scheme on sort [Prop] \item [Info] is the opposite. The same example [(X:Type)X] shows that an [Info] term might in fact be [Logic] later on. \end{itemize} *) type info = Logic | Info type scheme = TypeScheme | Default type flag = info * scheme (*s [flag_of_type] transforms a type [t] into a [flag]. Really important function. *) let rec flag_of_type env t = let t = whd_betadeltaiota env none t in match kind_of_term t with | Prod (x,t,c) -> flag_of_type (push_rel (x,None,t) env) c | Sort (Prop Null) -> (Logic,TypeScheme) | Sort _ -> (Info,TypeScheme) | _ -> if (sort_of env t) = InProp then (Logic,Default) else (Info,Default) (*s Two particular cases of [flag_of_type]. *) let is_default env t = (flag_of_type env t = (Info, Default)) exception NotDefault of kill_reason let check_default env t = match flag_of_type env t with | _,TypeScheme -> raise (NotDefault Ktype) | Logic,_ -> raise (NotDefault Kother) | _ -> () let is_info_scheme env t = (flag_of_type env t = (Info, TypeScheme)) (*s [type_sign] gernerates a signature aimed at treating a type application. *) let rec type_sign env c = match kind_of_term (whd_betadeltaiota env none c) with | Prod (n,t,d) -> (if is_info_scheme env t then Keep else Kill Kother) :: (type_sign (push_rel_assum (n,t) env) d) | _ -> [] let rec type_scheme_nb_args env c = match kind_of_term (whd_betadeltaiota env none c) with | Prod (n,t,d) -> let n = type_scheme_nb_args (push_rel_assum (n,t) env) d in if is_info_scheme env t then n+1 else n | _ -> 0 let _ = register_type_scheme_nb_args type_scheme_nb_args (*s [type_sign_vl] does the same, plus a type var list. *) let rec type_sign_vl env c = match kind_of_term (whd_betadeltaiota env none c) with | Prod (n,t,d) -> let s,vl = type_sign_vl (push_rel_assum (n,t) env) d in if not (is_info_scheme env t) then Kill Kother::s, vl else Keep::s, (next_ident_away (id_of_name n) vl) :: vl | _ -> [],[] let rec nb_default_params env c = match kind_of_term (whd_betadeltaiota env none c) with | Prod (n,t,d) -> let n = nb_default_params (push_rel_assum (n,t) env) d in if is_default env t then n+1 else n | _ -> 0 (* Enriching a signature with implicit information *) let sign_with_implicits r s nb_params = let implicits = implicits_of_global r in let rec add_impl i = function | [] -> [] | sign::s -> let sign' = if sign = Keep && List.mem i implicits then Kill Kother else sign in sign' :: add_impl (succ i) s in add_impl (1+nb_params) s (* Enriching a exception message *) let rec handle_exn r n fn_name = function | MLexn s -> (try Scanf.sscanf s "UNBOUND %d" (fun i -> assert ((0 < i) && (i <= n)); MLexn ("IMPLICIT "^ msg_non_implicit r (n+1-i) (fn_name i))) with e when Errors.noncritical e -> MLexn s) | a -> ast_map (handle_exn r n fn_name) a (*S Management of type variable contexts. *) (* A De Bruijn variable context (db) is a context for translating Coq [Rel] into ML type [Tvar]. *) (*s From a type signature toward a type variable context (db). *) let db_from_sign s = let rec make i acc = function | [] -> acc | Keep :: l -> make (i+1) (i::acc) l | Kill _ :: l -> make i (0::acc) l in make 1 [] s (*s Create a type variable context from indications taken from an inductive type (see just below). *) let rec db_from_ind dbmap i = if i = 0 then [] else (try Intmap.find i dbmap with Not_found -> 0)::(db_from_ind dbmap (i-1)) (*s [parse_ind_args] builds a map: [i->j] iff the i-th Coq argument of a constructor corresponds to the j-th type var of the ML inductive. *) (* \begin{itemize} \item [si] : signature of the inductive \item [i] : counter of Coq args for [(I args)] \item [j] : counter of ML type vars \item [relmax] : total args number of the constructor \end{itemize} *) let parse_ind_args si args relmax = let rec parse i j = function | [] -> Intmap.empty | Kill _ :: s -> parse (i+1) j s | Keep :: s -> (match kind_of_term args.(i-1) with | Rel k -> Intmap.add (relmax+1-k) j (parse (i+1) (j+1) s) | _ -> parse (i+1) (j+1) s) in parse 1 1 si let oib_equal o1 o2 = id_ord o1.mind_typename o2.mind_typename = 0 && list_equal eq_rel_declaration o1.mind_arity_ctxt o2.mind_arity_ctxt && begin match o1.mind_arity, o2.mind_arity with | Monomorphic {mind_user_arity=c1; mind_sort=s1}, Monomorphic {mind_user_arity=c2; mind_sort=s2} -> eq_constr c1 c2 && s1 = s2 | ma1, ma2 -> ma1 = ma2 end && o1.mind_consnames = o2.mind_consnames let mib_equal m1 m2 = array_equal oib_equal m1.mind_packets m1.mind_packets && m1.mind_record = m2.mind_record && m1.mind_finite = m2.mind_finite && m1.mind_ntypes = m2.mind_ntypes && list_equal eq_named_declaration m1.mind_hyps m2.mind_hyps && m1.mind_nparams = m2.mind_nparams && m1.mind_nparams_rec = m2.mind_nparams_rec && list_equal eq_rel_declaration m1.mind_params_ctxt m2.mind_params_ctxt && m1.mind_constraints = m2.mind_constraints (*S Extraction of a type. *) (* [extract_type env db c args] is used to produce an ML type from the coq term [(c args)], which is supposed to be a Coq type. *) (* [db] is a context for translating Coq [Rel] into ML type [Tvar]. *) (* [j] stands for the next ML type var. [j=0] means we do not generate ML type var anymore (in subterms for example). *) let rec extract_type env db j c args = match kind_of_term (whd_betaiotazeta Evd.empty c) with | App (d, args') -> (* We just accumulate the arguments. *) extract_type env db j d (Array.to_list args' @ args) | Lambda (_,_,d) -> (match args with | [] -> assert false (* A lambda cannot be a type. *) | a :: args -> extract_type env db j (subst1 a d) args) | Prod (n,t,d) -> assert (args = []); let env' = push_rel_assum (n,t) env in (match flag_of_type env t with | (Info, Default) -> (* Standard case: two [extract_type] ... *) let mld = extract_type env' (0::db) j d [] in (match expand env mld with | Tdummy d -> Tdummy d | _ -> Tarr (extract_type env db 0 t [], mld)) | (Info, TypeScheme) when j > 0 -> (* A new type var. *) let mld = extract_type env' (j::db) (j+1) d [] in (match expand env mld with | Tdummy d -> Tdummy d | _ -> Tarr (Tdummy Ktype, mld)) | _,lvl -> let mld = extract_type env' (0::db) j d [] in (match expand env mld with | Tdummy d -> Tdummy d | _ -> let reason = if lvl=TypeScheme then Ktype else Kother in Tarr (Tdummy reason, mld))) | Sort _ -> Tdummy Ktype (* The two logical cases. *) | _ when sort_of env (applist (c, args)) = InProp -> Tdummy Kother | Rel n -> (match lookup_rel n env with | (_,Some t,_) -> extract_type env db j (lift n t) args | _ -> (* Asks [db] a translation for [n]. *) if n > List.length db then Tunknown else let n' = List.nth db (n-1) in if n' = 0 then Tunknown else Tvar n') | Const kn -> let r = ConstRef kn in let cb = lookup_constant kn env in let typ = Typeops.type_of_constant_type env cb.const_type in (match flag_of_type env typ with | (Logic,_) -> assert false (* Cf. logical cases above *) | (Info, TypeScheme) -> let mlt = extract_type_app env db (r, type_sign env typ) args in (match cb.const_body with | Undef _ | OpaqueDef _ -> mlt | Def _ when is_custom r -> mlt | Def lbody -> let newc = applist (Declarations.force lbody, args) in let mlt' = extract_type env db j newc [] in (* ML type abbreviations interact badly with Coq *) (* reduction, so [mlt] and [mlt'] might be different: *) (* The more precise is [mlt'], extracted after reduction *) (* The shortest is [mlt], which use abbreviations *) (* If possible, we take [mlt], otherwise [mlt']. *) if expand env mlt = expand env mlt' then mlt else mlt') | (Info, Default) -> (* Not an ML type, for example [(c:forall X, X->X) Type nat] *) (match cb.const_body with | Undef _ | OpaqueDef _ -> Tunknown (* Brutal approx ... *) | Def lbody -> (* We try to reduce. *) let newc = applist (Declarations.force lbody, args) in extract_type env db j newc [])) | Ind (kn,i) -> let s = (extract_ind env kn).ind_packets.(i).ip_sign in extract_type_app env db (IndRef (kn,i),s) args | Case _ | Fix _ | CoFix _ -> Tunknown | _ -> assert false (*s Auxiliary function dealing with type application. Precondition: [r] is a type scheme represented by the signature [s], and is completely applied: [List.length args = List.length s]. *) and extract_type_app env db (r,s) args = let ml_args = List.fold_right (fun (b,c) a -> if b=Keep then let p = List.length (fst (splay_prod env none (type_of env c))) in let db = iterate (fun l -> 0 :: l) p db in (extract_type_scheme env db c p) :: a else a) (List.combine s args) [] in Tglob (r, ml_args) (*S Extraction of a type scheme. *) (* [extract_type_scheme env db c p] works on a Coq term [c] which is an informative type scheme. It means that [c] is not a Coq type, but will be when applied to sufficiently many arguments ([p] in fact). This function decomposes p lambdas, with eta-expansion if needed. *) (* [db] is a context for translating Coq [Rel] into ML type [Tvar]. *) and extract_type_scheme env db c p = if p=0 then extract_type env db 0 c [] else let c = whd_betaiotazeta Evd.empty c in match kind_of_term c with | Lambda (n,t,d) -> extract_type_scheme (push_rel_assum (n,t) env) db d (p-1) | _ -> let rels = fst (splay_prod env none (type_of env c)) in let env = push_rels_assum rels env in let eta_args = List.rev_map mkRel (interval 1 p) in extract_type env db 0 (lift p c) eta_args (*S Extraction of an inductive type. *) and extract_ind env kn = (* kn is supposed to be in long form *) let mib = Environ.lookup_mind kn env in try (* For a same kn, we can get various bodies due to module substitutions. We hence check that the mib has not changed from recording time to retrieving time. Ideally we should also check the env. *) let (mib0,ml_ind) = lookup_ind kn in if not (mib_equal mib mib0) then raise Not_found; ml_ind with Not_found -> (* First, if this inductive is aliased via a Module, we process the original inductive if possible. When at toplevel of the monolithic case, we cannot do much (cf Vector and bug #2570) *) let equiv = if lang () <> Ocaml || (not (modular ()) && at_toplevel (mind_modpath kn)) || kn_ord (canonical_mind kn) (user_mind kn) = 0 then NoEquiv else begin ignore (extract_ind env (mind_of_kn (canonical_mind kn))); Equiv (canonical_mind kn) end in (* Everything concerning parameters. *) (* We do that first, since they are common to all the [mib]. *) let mip0 = mib.mind_packets.(0) in let npar = mib.mind_nparams in let epar = push_rel_context mib.mind_params_ctxt env in (* First pass: we store inductive signatures together with *) (* their type var list. *) let packets = Array.map (fun mip -> let b = snd (mind_arity mip) <> InProp in let ar = Inductive.type_of_inductive env (mib,mip) in let s,v = if b then type_sign_vl env ar else [],[] in let t = Array.make (Array.length mip.mind_nf_lc) [] in { ip_typename = mip.mind_typename; ip_consnames = mip.mind_consnames; ip_logical = (not b); ip_sign = s; ip_vars = v; ip_types = t }) mib.mind_packets in add_ind kn mib {ind_kind = Standard; ind_nparams = npar; ind_packets = packets; ind_equiv = equiv }; (* Second pass: we extract constructors *) for i = 0 to mib.mind_ntypes - 1 do let p = packets.(i) in if not p.ip_logical then let types = arities_of_constructors env (kn,i) in for j = 0 to Array.length types - 1 do let t = snd (decompose_prod_n npar types.(j)) in let prods,head = dest_prod epar t in let nprods = List.length prods in let args = match kind_of_term head with | App (f,args) -> args (* [kind_of_term f = Ind ip] *) | _ -> [||] in let dbmap = parse_ind_args p.ip_sign args (nprods + npar) in let db = db_from_ind dbmap npar in p.ip_types.(j) <- extract_type_cons epar db dbmap t (npar+1) done done; (* Third pass: we determine special cases. *) let ind_info = try let ip = (kn, 0) in let r = IndRef ip in if is_custom r then raise (I Standard); if not mib.mind_finite then raise (I Coinductive); if mib.mind_ntypes <> 1 then raise (I Standard); let p = packets.(0) in if p.ip_logical then raise (I Standard); if Array.length p.ip_types <> 1 then raise (I Standard); let typ = p.ip_types.(0) in let l = List.filter (fun t -> not (isDummy (expand env t))) typ in if not (keep_singleton ()) && List.length l = 1 && not (type_mem_kn kn (List.hd l)) then raise (I Singleton); if l = [] then raise (I Standard); if not mib.mind_record then raise (I Standard); (* Now we're sure it's a record. *) (* First, we find its field names. *) let rec names_prod t = match kind_of_term t with | Prod(n,_,t) -> n::(names_prod t) | LetIn(_,_,_,t) -> names_prod t | Cast(t,_,_) -> names_prod t | _ -> [] in let field_names = list_skipn mib.mind_nparams (names_prod mip0.mind_user_lc.(0)) in assert (List.length field_names = List.length typ); let projs = ref Cset.empty in let mp,d,_ = repr_mind kn in let rec select_fields l typs = match l,typs with | [],[] -> [] | _::l, typ::typs when isDummy (expand env typ) -> select_fields l typs | Anonymous::l, typ::typs -> None :: (select_fields l typs) | Name id::l, typ::typs -> let knp = make_con mp d (label_of_id id) in (* Is it safe to use [id] for projections [foo.id] ? *) if List.for_all ((=) Keep) (type2signature env typ) then projs := Cset.add knp !projs; Some (ConstRef knp) :: (select_fields l typs) | _ -> assert false in let field_glob = select_fields field_names typ in (* Is this record officially declared with its projections ? *) (* If so, we use this information. *) begin try let n = nb_default_params env (Inductive.type_of_inductive env (mib,mip0)) in let check_proj kn = if Cset.mem kn !projs then add_projection n kn in List.iter (Option.iter check_proj) (lookup_projections ip) with Not_found -> () end; Record field_glob with (I info) -> info in let i = {ind_kind = ind_info; ind_nparams = npar; ind_packets = packets; ind_equiv = equiv } in add_ind kn mib i; add_inductive_kind kn i.ind_kind; i (*s [extract_type_cons] extracts the type of an inductive constructor toward the corresponding list of ML types. - [db] is a context for translating Coq [Rel] into ML type [Tvar] - [dbmap] is a translation map (produced by a call to [parse_in_args]) - [i] is the rank of the current product (initially [params_nb+1]) *) and extract_type_cons env db dbmap c i = match kind_of_term (whd_betadeltaiota env none c) with | Prod (n,t,d) -> let env' = push_rel_assum (n,t) env in let db' = (try Intmap.find i dbmap with Not_found -> 0) :: db in let l = extract_type_cons env' db' dbmap d (i+1) in (extract_type env db 0 t []) :: l | _ -> [] (*s Recording the ML type abbreviation of a Coq type scheme constant. *) and mlt_env env r = match r with | ConstRef kn -> (try if not (visible_con kn) then raise Not_found; match lookup_term kn with | Dtype (_,vl,mlt) -> Some mlt | _ -> None with Not_found -> let cb = Environ.lookup_constant kn env in let typ = Typeops.type_of_constant_type env cb.const_type in match cb.const_body with | Undef _ | OpaqueDef _ -> None | Def l_body -> (match flag_of_type env typ with | Info,TypeScheme -> let body = Declarations.force l_body in let s,vl = type_sign_vl env typ in let db = db_from_sign s in let t = extract_type_scheme env db body (List.length s) in add_term kn (Dtype (r, vl, t)); Some t | _ -> None)) | _ -> None and expand env = type_expand (mlt_env env) and type2signature env = type_to_signature (mlt_env env) let type2sign env = type_to_sign (mlt_env env) let type_expunge env = type_expunge (mlt_env env) let type_expunge_from_sign env = type_expunge_from_sign (mlt_env env) (*s Extraction of the type of a constant. *) let record_constant_type env kn opt_typ = try if not (visible_con kn) then raise Not_found; lookup_type kn with Not_found -> let typ = match opt_typ with | None -> Typeops.type_of_constant env kn | Some typ -> typ in let mlt = extract_type env [] 1 typ [] in let schema = (type_maxvar mlt, mlt) in add_type kn schema; schema (*S Extraction of a term. *) (* Precondition: [(c args)] is not a type scheme, and is informative. *) (* [mle] is a ML environment [Mlenv.t]. *) (* [mlt] is the ML type we want our extraction of [(c args)] to have. *) let rec extract_term env mle mlt c args = match kind_of_term c with | App (f,a) -> extract_term env mle mlt f (Array.to_list a @ args) | Lambda (n, t, d) -> let id = id_of_name n in (match args with | a :: l -> (* We make as many [LetIn] as possible. *) let d' = mkLetIn (Name id,a,t,applistc d (List.map (lift 1) l)) in extract_term env mle mlt d' [] | [] -> let env' = push_rel_assum (Name id, t) env in let id, a = try check_default env t; Id id, new_meta() with NotDefault d -> Dummy, Tdummy d in let b = new_meta () in (* If [mlt] cannot be unified with an arrow type, then magic! *) let magic = needs_magic (mlt, Tarr (a, b)) in let d' = extract_term env' (Mlenv.push_type mle a) b d [] in put_magic_if magic (MLlam (id, d'))) | LetIn (n, c1, t1, c2) -> let id = id_of_name n in let env' = push_rel (Name id, Some c1, t1) env in (* We directly push the args inside the [LetIn]. TODO: the opt_let_app flag is supposed to prevent that *) let args' = List.map (lift 1) args in (try check_default env t1; let a = new_meta () in let c1' = extract_term env mle a c1 [] in (* The type of [c1'] is generalized and stored in [mle]. *) let mle' = if generalizable c1' then Mlenv.push_gen mle a else Mlenv.push_type mle a in MLletin (Id id, c1', extract_term env' mle' mlt c2 args') with NotDefault d -> let mle' = Mlenv.push_std_type mle (Tdummy d) in ast_pop (extract_term env' mle' mlt c2 args')) | Const kn -> extract_cst_app env mle mlt kn args | Construct cp -> extract_cons_app env mle mlt cp args | Rel n -> (* As soon as the expected [mlt] for the head is known, *) (* we unify it with an fresh copy of the stored type of [Rel n]. *) let extract_rel mlt = put_magic (mlt, Mlenv.get mle n) (MLrel n) in extract_app env mle mlt extract_rel args | Case ({ci_ind=ip},_,c0,br) -> extract_app env mle mlt (extract_case env mle (ip,c0,br)) args | Fix ((_,i),recd) -> extract_app env mle mlt (extract_fix env mle i recd) args | CoFix (i,recd) -> extract_app env mle mlt (extract_fix env mle i recd) args | Cast (c,_,_) -> extract_term env mle mlt c args | Ind _ | Prod _ | Sort _ | Meta _ | Evar _ | Var _ -> assert false (*s [extract_maybe_term] is [extract_term] for usual terms, else [MLdummy] *) and extract_maybe_term env mle mlt c = try check_default env (type_of env c); extract_term env mle mlt c [] with NotDefault d -> put_magic (mlt, Tdummy d) MLdummy (*s Generic way to deal with an application. *) (* We first type all arguments starting with unknown meta types. This gives us the expected type of the head. Then we use the [mk_head] to produce the ML head from this type. *) and extract_app env mle mlt mk_head args = let metas = List.map new_meta args in let type_head = type_recomp (metas, mlt) in let mlargs = List.map2 (extract_maybe_term env mle) metas args in mlapp (mk_head type_head) mlargs (*s Auxiliary function used to extract arguments of constant or constructor. *) and make_mlargs env e s args typs = let rec f = function | [], [], _ -> [] | a::la, t::lt, [] -> extract_maybe_term env e t a :: (f (la,lt,[])) | a::la, t::lt, Keep::s -> extract_maybe_term env e t a :: (f (la,lt,s)) | _::la, _::lt, _::s -> f (la,lt,s) | _ -> assert false in f (args,typs,s) (*s Extraction of a constant applied to arguments. *) and extract_cst_app env mle mlt kn args = (* First, the [ml_schema] of the constant, in expanded version. *) let nb,t = record_constant_type env kn None in let schema = nb, expand env t in (* Can we instantiate types variables for this constant ? *) (* In Ocaml, inside the definition of this constant, the answer is no. *) let instantiated = if lang () = Ocaml && List.mem kn !current_fixpoints then var2var' (snd schema) else instantiation schema in (* Then the expected type of this constant. *) let a = new_meta () in (* We compare stored and expected types in two steps. *) (* First, can [kn] be applied to all args ? *) let metas = List.map new_meta args in let magic1 = needs_magic (type_recomp (metas, a), instantiated) in (* Second, is the resulting type compatible with the expected type [mlt] ? *) let magic2 = needs_magic (a, mlt) in (* The internal head receives a magic if [magic1] *) let head = put_magic_if magic1 (MLglob (ConstRef kn)) in (* Now, the extraction of the arguments. *) let s_full = type2signature env (snd schema) in let s_full = sign_with_implicits (ConstRef kn) s_full 0 in let s = sign_no_final_keeps s_full in let ls = List.length s in let la = List.length args in (* The ml arguments, already expunged from known logical ones *) let mla = make_mlargs env mle s args metas in let mla = if magic1 || lang () <> Ocaml then mla else try (* for better optimisations later, we discard dependent args of projections and replace them by fake args that will be removed during final pretty-print. *) let l,l' = list_chop (projection_arity (ConstRef kn)) mla in if l' <> [] then (List.map (fun _ -> MLexn "Proj Args") l) @ l' else mla with e when Errors.noncritical e -> mla in (* For strict languages, purely logical signatures with at least one [Kill Kother] lead to a dummy lam. So a [MLdummy] is left accordingly. *) let optdummy = match sign_kind s_full with | UnsafeLogicalSig when lang () <> Haskell -> [MLdummy] | _ -> [] in (* Different situations depending of the number of arguments: *) if la >= ls then (* Enough args, cleanup already done in [mla], we only add the additionnal dummy if needed. *) put_magic_if (magic2 && not magic1) (mlapp head (optdummy @ mla)) else (* Partially applied function with some logical arg missing. We complete via eta and expunge logical args. *) let ls' = ls-la in let s' = list_skipn la s in let mla = (List.map (ast_lift ls') mla) @ (eta_args_sign ls' s') in let e = anonym_or_dummy_lams (mlapp head mla) s' in put_magic_if magic2 (remove_n_lams (List.length optdummy) e) (*s Extraction of an inductive constructor applied to arguments. *) (* \begin{itemize} \item In ML, contructor arguments are uncurryfied. \item We managed to suppress logical parts inside inductive definitions, but they must appears outside (for partial applications for instance) \item We also suppressed all Coq parameters to the inductives, since they are fixed, and thus are not used for the computation. \end{itemize} *) and extract_cons_app env mle mlt (((kn,i) as ip,j) as cp) args = (* First, we build the type of the constructor, stored in small pieces. *) let mi = extract_ind env kn in let params_nb = mi.ind_nparams in let oi = mi.ind_packets.(i) in let nb_tvars = List.length oi.ip_vars and types = List.map (expand env) oi.ip_types.(j-1) in let list_tvar = List.map (fun i -> Tvar i) (interval 1 nb_tvars) in let type_cons = type_recomp (types, Tglob (IndRef ip, list_tvar)) in let type_cons = instantiation (nb_tvars, type_cons) in (* Then, the usual variables [s], [ls], [la], ... *) let s = List.map (type2sign env) types in let s = sign_with_implicits (ConstructRef cp) s params_nb in let ls = List.length s in let la = List.length args in assert (la <= ls + params_nb); let la' = max 0 (la - params_nb) in let args' = list_lastn la' args in (* Now, we build the expected type of the constructor *) let metas = List.map new_meta args' in (* If stored and expected types differ, then magic! *) let a = new_meta () in let magic1 = needs_magic (type_cons, type_recomp (metas, a)) in let magic2 = needs_magic (a, mlt) in let head mla = if mi.ind_kind = Singleton then put_magic_if magic1 (List.hd mla) (* assert (List.length mla = 1) *) else let typeargs = match snd (type_decomp type_cons) with | Tglob (_,l) -> List.map type_simpl l | _ -> assert false in let typ = Tglob(IndRef ip, typeargs) in put_magic_if magic1 (MLcons (typ, ConstructRef cp, mla)) in (* Different situations depending of the number of arguments: *) if la < params_nb then let head' = head (eta_args_sign ls s) in put_magic_if magic2 (dummy_lams (anonym_or_dummy_lams head' s) (params_nb - la)) else let mla = make_mlargs env mle s args' metas in if la = ls + params_nb then put_magic_if (magic2 && not magic1) (head mla) else (* [ params_nb <= la <= ls + params_nb ] *) let ls' = params_nb + ls - la in let s' = list_lastn ls' s in let mla = (List.map (ast_lift ls') mla) @ (eta_args_sign ls' s') in put_magic_if magic2 (anonym_or_dummy_lams (head mla) s') (*S Extraction of a case. *) and extract_case env mle ((kn,i) as ip,c,br) mlt = (* [br]: bodies of each branch (in functional form) *) (* [ni]: number of arguments without parameters in each branch *) let ni = mis_constr_nargs_env env ip in let br_size = Array.length br in assert (Array.length ni = br_size); if br_size = 0 then begin add_recursors env kn; (* May have passed unseen if logical ... *) MLexn "absurd case" end else (* [c] has an inductive type, and is not a type scheme type. *) let t = type_of env c in (* The only non-informative case: [c] is of sort [Prop] *) if (sort_of env t) = InProp then begin add_recursors env kn; (* May have passed unseen if logical ... *) (* Logical singleton case: *) (* [match c with C i j k -> t] becomes [t'] *) assert (br_size = 1); let s = iterate (fun l -> Kill Kother :: l) ni.(0) [] in let mlt = iterate (fun t -> Tarr (Tdummy Kother, t)) ni.(0) mlt in let e = extract_maybe_term env mle mlt br.(0) in snd (case_expunge s e) end else let mi = extract_ind env kn in let oi = mi.ind_packets.(i) in let metas = Array.init (List.length oi.ip_vars) new_meta in (* The extraction of the head. *) let type_head = Tglob (IndRef ip, Array.to_list metas) in let a = extract_term env mle type_head c [] in (* The extraction of each branch. *) let extract_branch i = let r = ConstructRef (ip,i+1) in (* The types of the arguments of the corresponding constructor. *) let f t = type_subst_vect metas (expand env t) in let l = List.map f oi.ip_types.(i) in (* the corresponding signature *) let s = List.map (type2sign env) oi.ip_types.(i) in let s = sign_with_implicits r s mi.ind_nparams in (* Extraction of the branch (in functional form). *) let e = extract_maybe_term env mle (type_recomp (l,mlt)) br.(i) in (* We suppress dummy arguments according to signature. *) let ids,e = case_expunge s e in let e' = handle_exn r (List.length s) (fun _ -> Anonymous) e in (List.rev ids, Pusual r, e') in if mi.ind_kind = Singleton then begin (* Informative singleton case: *) (* [match c with C i -> t] becomes [let i = c' in t'] *) assert (br_size = 1); let (ids,_,e') = extract_branch 0 in assert (List.length ids = 1); MLletin (tmp_id (List.hd ids),a,e') end else (* Standard case: we apply [extract_branch]. *) let typs = List.map type_simpl (Array.to_list metas) in let typ = Tglob (IndRef ip,typs) in MLcase (typ, a, Array.init br_size extract_branch) (*s Extraction of a (co)-fixpoint. *) and extract_fix env mle i (fi,ti,ci as recd) mlt = let env = push_rec_types recd env in let metas = Array.map new_meta fi in metas.(i) <- mlt; let mle = Array.fold_left Mlenv.push_type mle metas in let ei = array_map2 (extract_maybe_term env mle) metas ci in MLfix (i, Array.map id_of_name fi, ei) (*S ML declarations. *) (* [decomp_lams_eta env c t] finds the number [n] of products in the type [t], and decompose the term [c] in [n] lambdas, with eta-expansion if needed. *) let rec decomp_lams_eta_n n m env c t = let rels = fst (splay_prod_n env none n t) in let rels = List.map (fun (id,_,c) -> (id,c)) rels in let rels',c = decompose_lam c in let d = n - m in (* we'd better keep rels' as long as possible. *) let rels = (list_firstn d rels) @ rels' in let eta_args = List.rev_map mkRel (interval 1 d) in rels, applist (lift d c,eta_args) (* Let's try to identify some situation where extracted code will allow generalisation of type variables *) let rec gentypvar_ok c = match kind_of_term c with | Lambda _ | Const _ -> true | App (c,v) -> (* if all arguments are variables, these variables will disappear after extraction (see [empty_s] below) *) array_for_all isRel v && gentypvar_ok c | Cast (c,_,_) -> gentypvar_ok c | _ -> false (*s From a constant to a ML declaration. *) let extract_std_constant env kn body typ = reset_meta_count (); (* The short type [t] (i.e. possibly with abbreviations). *) let t = snd (record_constant_type env kn (Some typ)) in (* The real type [t']: without head products, expanded, *) (* and with [Tvar] translated to [Tvar'] (not instantiable). *) let l,t' = type_decomp (expand env (var2var' t)) in let s = List.map (type2sign env) l in (* Check for user-declared implicit information *) let s = sign_with_implicits (ConstRef kn) s 0 in (* Decomposing the top level lambdas of [body]. If there isn't enough, it's ok, as long as remaining args aren't to be pruned (and initial lambdas aren't to be all removed if the target language is strict). In other situations, eta-expansions create artificially enough lams (but that may break user's clever let-ins and partial applications). *) let rels, c = let n = List.length s and m = nb_lam body in if n <= m then decompose_lam_n n body else let s,s' = list_chop m s in if List.for_all ((=) Keep) s' && (lang () = Haskell || sign_kind s <> UnsafeLogicalSig) then decompose_lam_n m body else decomp_lams_eta_n n m env body typ in (* Should we do one eta-expansion to avoid non-generalizable '_a ? *) let rels, c = let n = List.length rels in let s,s' = list_chop n s in let k = sign_kind s in let empty_s = (k = EmptySig || k = SafeLogicalSig) in if lang () = Ocaml && empty_s && not (gentypvar_ok c) && s' <> [] && type_maxvar t <> 0 then decomp_lams_eta_n (n+1) n env body typ else rels,c in let n = List.length rels in let s = list_firstn n s in let l,l' = list_chop n l in let t' = type_recomp (l',t') in (* The initial ML environment. *) let mle = List.fold_left Mlenv.push_std_type Mlenv.empty l in (* The lambdas names. *) let ids = List.map (fun (n,_) -> Id (id_of_name n)) rels in (* The according Coq environment. *) let env = push_rels_assum rels env in (* The real extraction: *) let e = extract_term env mle t' c [] in (* Expunging term and type from dummy lambdas. *) let trm = term_expunge s (ids,e) in let trm = handle_exn (ConstRef kn) n (fun i -> fst (List.nth rels (i-1))) trm in trm, type_expunge_from_sign env s t (* Extracts the type of an axiom, honors the Extraction Implicit declaration. *) let extract_axiom env kn typ = reset_meta_count (); (* The short type [t] (i.e. possibly with abbreviations). *) let t = snd (record_constant_type env kn (Some typ)) in (* The real type [t']: without head products, expanded, *) (* and with [Tvar] translated to [Tvar'] (not instantiable). *) let l,_ = type_decomp (expand env (var2var' t)) in let s = List.map (type2sign env) l in (* Check for user-declared implicit information *) let s = sign_with_implicits (ConstRef kn) s 0 in type_expunge_from_sign env s t let extract_fixpoint env vkn (fi,ti,ci) = let n = Array.length vkn in let types = Array.make n (Tdummy Kother) and terms = Array.make n MLdummy in let kns = Array.to_list vkn in current_fixpoints := kns; (* for replacing recursive calls [Rel ..] by the corresponding [Const]: *) let sub = List.rev_map mkConst kns in for i = 0 to n-1 do if sort_of env ti.(i) <> InProp then begin let e,t = extract_std_constant env vkn.(i) (substl sub ci.(i)) ti.(i) in terms.(i) <- e; types.(i) <- t; end done; current_fixpoints := []; Dfix (Array.map (fun kn -> ConstRef kn) vkn, terms, types) let extract_constant env kn cb = let r = ConstRef kn in let typ = Typeops.type_of_constant_type env cb.const_type in let warn_info () = if not (is_custom r) then add_info_axiom r in let warn_log () = if not (constant_has_body cb) then add_log_axiom r in let mk_typ_ax () = let n = type_scheme_nb_args env typ in let ids = iterate (fun l -> anonymous_name::l) n [] in Dtype (r, ids, Taxiom) in let mk_typ c = let s,vl = type_sign_vl env typ in let db = db_from_sign s in let t = extract_type_scheme env db c (List.length s) in Dtype (r, vl, t) in let mk_ax () = let t = extract_axiom env kn typ in Dterm (r, MLaxiom, t) in let mk_def c = let e,t = extract_std_constant env kn c typ in Dterm (r,e,t) in match flag_of_type env typ with | (Logic,TypeScheme) -> warn_log (); Dtype (r, [], Tdummy Ktype) | (Logic,Default) -> warn_log (); Dterm (r, MLdummy, Tdummy Kother) | (Info,TypeScheme) -> (match cb.const_body with | Undef _ -> warn_info (); mk_typ_ax () | Def c -> mk_typ (force c) | OpaqueDef c -> add_opaque r; if access_opaque () then mk_typ (force_opaque c) else mk_typ_ax ()) | (Info,Default) -> (match cb.const_body with | Undef _ -> warn_info (); mk_ax () | Def c -> mk_def (force c) | OpaqueDef c -> add_opaque r; if access_opaque () then mk_def (force_opaque c) else mk_ax ()) let extract_constant_spec env kn cb = let r = ConstRef kn in let typ = Typeops.type_of_constant_type env cb.const_type in match flag_of_type env typ with | (Logic, TypeScheme) -> Stype (r, [], Some (Tdummy Ktype)) | (Logic, Default) -> Sval (r, Tdummy Kother) | (Info, TypeScheme) -> let s,vl = type_sign_vl env typ in (match cb.const_body with | Undef _ | OpaqueDef _ -> Stype (r, vl, None) | Def body -> let db = db_from_sign s in let t = extract_type_scheme env db (force body) (List.length s) in Stype (r, vl, Some t)) | (Info, Default) -> let t = snd (record_constant_type env kn (Some typ)) in Sval (r, type_expunge env t) let extract_with_type env cb = let typ = Typeops.type_of_constant_type env cb.const_type in match flag_of_type env typ with | (Info, TypeScheme) -> let s,vl = type_sign_vl env typ in let db = db_from_sign s in let c = match cb.const_body with | Def body -> force body (* A "with Definition ..." is necessarily transparent *) | Undef _ | OpaqueDef _ -> assert false in let t = extract_type_scheme env db c (List.length s) in Some (vl, t) | _ -> None let extract_inductive env kn = let ind = extract_ind env kn in add_recursors env kn; let f i j l = let implicits = implicits_of_global (ConstructRef ((kn,i),j+1)) in let rec filter i = function | [] -> [] | t::l -> let l' = filter (succ i) l in if isDummy (expand env t) || List.mem i implicits then l' else t::l' in filter (1+ind.ind_nparams) l in let packets = Array.mapi (fun i p -> { p with ip_types = Array.mapi (f i) p.ip_types }) ind.ind_packets in { ind with ind_packets = packets } (*s Is a [ml_decl] logical ? *) let logical_decl = function | Dterm (_,MLdummy,Tdummy _) -> true | Dtype (_,[],Tdummy _) -> true | Dfix (_,av,tv) -> (array_for_all ((=) MLdummy) av) && (array_for_all isDummy tv) | Dind (_,i) -> array_for_all (fun ip -> ip.ip_logical) i.ind_packets | _ -> false (*s Is a [ml_spec] logical ? *) let logical_spec = function | Stype (_, [], Some (Tdummy _)) -> true | Sval (_,Tdummy _) -> true | Sind (_,i) -> array_for_all (fun ip -> ip.ip_logical) i.ind_packets | _ -> false coq-8.4pl2/plugins/extraction/extract_env.ml0000640000175000001440000005110112121620060020330 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let mp,_,l = repr_kn kn in let seb = match Libobject.object_tag o with | "CONSTANT" -> SFBconst (Global.lookup_constant (constant_of_kn kn)) | "INDUCTIVE" -> SFBmind (Global.lookup_mind (mind_of_kn kn)) | "MODULE" -> SFBmodule (Global.lookup_module (MPdot (mp,l))) | "MODULE TYPE" -> SFBmodtype (Global.lookup_modtype (MPdot (mp,l))) | _ -> failwith "caught" in l,seb | _ -> failwith "caught" in SEBstruct (List.rev (map_succeed get_reference seg)) let environment_until dir_opt = let rec parse = function | [] when dir_opt = None -> [current_toplevel (), toplevel_env ()] | [] -> [] | d :: l -> match (Global.lookup_module (MPfile d)).mod_expr with | Some meb -> if dir_opt = Some d then [MPfile d, meb] else (MPfile d, meb) :: (parse l) | _ -> assert false in parse (Library.loaded_libraries ()) (*s Visit: a structure recording the needed dependencies for the current extraction *) module type VISIT = sig (* Reset the dependencies by emptying the visit lists *) val reset : unit -> unit (* Add the module_path and all its prefixes to the mp visit list *) val add_mp : module_path -> unit (* Same, but we'll keep all fields of these modules *) val add_mp_all : module_path -> unit (* Add kernel_name / constant / reference / ... in the visit lists. These functions silently add the mp of their arg in the mp list *) val add_ind : mutual_inductive -> unit val add_con : constant -> unit val add_ref : global_reference -> unit val add_decl_deps : ml_decl -> unit val add_spec_deps : ml_spec -> unit (* Test functions: is a particular object a needed dependency for the current extraction ? *) val needed_ind : mutual_inductive -> bool val needed_con : constant -> bool val needed_mp : module_path -> bool val needed_mp_all : module_path -> bool end module Visit : VISIT = struct (* What used to be in a single KNset should now be split into a KNset (for inductives and modules names) and a Cset_env for constants (and still the remaining MPset) *) type must_visit = { mutable ind : KNset.t; mutable con : KNset.t; mutable mp : MPset.t; mutable mp_all : MPset.t } (* the imperative internal visit lists *) let v = { ind = KNset.empty ; con = KNset.empty ; mp = MPset.empty; mp_all = MPset.empty } (* the accessor functions *) let reset () = v.ind <- KNset.empty; v.con <- KNset.empty; v.mp <- MPset.empty; v.mp_all <- MPset.empty let needed_ind i = KNset.mem (user_mind i) v.ind let needed_con c = KNset.mem (user_con c) v.con let needed_mp mp = MPset.mem mp v.mp || MPset.mem mp v.mp_all let needed_mp_all mp = MPset.mem mp v.mp_all let add_mp mp = check_loaded_modfile mp; v.mp <- MPset.union (prefixes_mp mp) v.mp let add_mp_all mp = check_loaded_modfile mp; v.mp <- MPset.union (prefixes_mp mp) v.mp; v.mp_all <- MPset.add mp v.mp_all let add_ind i = let kn = user_mind i in v.ind <- KNset.add kn v.ind; add_mp (modpath kn) let add_con c = let kn = user_con c in v.con <- KNset.add kn v.con; add_mp (modpath kn) let add_ref = function | ConstRef c -> add_con c | IndRef (ind,_) | ConstructRef ((ind,_),_) -> add_ind ind | VarRef _ -> assert false let add_decl_deps = decl_iter_references add_ref add_ref add_ref let add_spec_deps = spec_iter_references add_ref add_ref add_ref end exception Impossible let check_arity env cb = let t = Typeops.type_of_constant_type env cb.const_type in if Reduction.is_arity env t then raise Impossible let check_fix env cb i = match cb.const_body with | Def lbody -> (match kind_of_term (Declarations.force lbody) with | Fix ((_,j),recd) when i=j -> check_arity env cb; (true,recd) | CoFix (j,recd) when i=j -> check_arity env cb; (false,recd) | _ -> raise Impossible) | Undef _ | OpaqueDef _ -> raise Impossible let prec_declaration_equal (na1, ca1, ta1) (na2, ca2, ta2) = na1 = na2 && array_equal eq_constr ca1 ca2 && array_equal eq_constr ta1 ta2 let factor_fix env l cb msb = let _,recd as check = check_fix env cb 0 in let n = Array.length (let fi,_,_ = recd in fi) in if n = 1 then [|l|], recd, msb else begin if List.length msb < n-1 then raise Impossible; let msb', msb'' = list_chop (n-1) msb in let labels = Array.make n l in list_iter_i (fun j -> function | (l,SFBconst cb') -> let check' = check_fix env cb' (j+1) in if not (fst check = fst check' && prec_declaration_equal (snd check) (snd check')) then raise Impossible; labels.(j+1) <- l; | _ -> raise Impossible) msb'; labels, recd, msb'' end (** Expanding a [struct_expr_body] into a version without abbreviations or functor applications. This is done via a detour to entries (hack proposed by Elie) *) let rec seb2mse = function | SEBapply (s,s',_) -> Entries.MSEapply(seb2mse s, seb2mse s') | SEBident mp -> Entries.MSEident mp | _ -> failwith "seb2mse: received a non-atomic seb" let expand_seb env mp seb = let seb,_,_,_ = let inl = Some (Flags.get_inline_level()) in Mod_typing.translate_struct_module_entry env mp inl (seb2mse seb) in seb (** When possible, we use the nicer, shorter, algebraic type structures instead of the expanded ones. *) let my_type_of_mb mb = let m0 = mb.mod_type in match mb.mod_type_alg with Some m -> m0,m | None -> m0,m0 let my_type_of_mtb mtb = let m0 = mtb.typ_expr in match mtb.typ_expr_alg with Some m -> m0,m | None -> m0,m0 (** Ad-hoc update of environment, inspired by [Mod_type.check_with_aux_def]. To check with Elie. *) let rec msid_of_seb = function | SEBident mp -> mp | SEBwith (seb,_) -> msid_of_seb seb | _ -> assert false let env_for_mtb_with_def env mp seb idl = let sig_b = match seb with | SEBstruct(sig_b) -> sig_b | _ -> assert false in let l = label_of_id (List.hd idl) in let spot = function (l',SFBconst _) -> l = l' | _ -> false in let before = fst (list_split_when spot sig_b) in Modops.add_signature mp before empty_delta_resolver env (* From a [structure_body] (i.e. a list of [structure_field_body]) to specifications. *) let rec extract_sfb_spec env mp = function | [] -> [] | (l,SFBconst cb) :: msig -> let kn = make_con mp empty_dirpath l in let s = extract_constant_spec env kn cb in let specs = extract_sfb_spec env mp msig in if logical_spec s then specs else begin Visit.add_spec_deps s; (l,Spec s) :: specs end | (l,SFBmind _) :: msig -> let mind = make_mind mp empty_dirpath l in let s = Sind (mind, extract_inductive env mind) in let specs = extract_sfb_spec env mp msig in if logical_spec s then specs else begin Visit.add_spec_deps s; (l,Spec s) :: specs end | (l,SFBmodule mb) :: msig -> let specs = extract_sfb_spec env mp msig in let spec = extract_seb_spec env mb.mod_mp (my_type_of_mb mb) in (l,Smodule spec) :: specs | (l,SFBmodtype mtb) :: msig -> let specs = extract_sfb_spec env mp msig in let spec = extract_seb_spec env mtb.typ_mp (my_type_of_mtb mtb) in (l,Smodtype spec) :: specs (* From [struct_expr_body] to specifications *) (* Invariant: the [seb] given to [extract_seb_spec] should either come from a [mod_type] or [type_expr] field, or their [_alg] counterparts. This way, any encountered [SEBident] should be a true module type. *) and extract_seb_spec env mp1 (seb,seb_alg) = match seb_alg with | SEBident mp -> Visit.add_mp_all mp; MTident mp | SEBwith(seb',With_definition_body(idl,cb))-> let env' = env_for_mtb_with_def env (msid_of_seb seb') seb idl in let mt = extract_seb_spec env mp1 (seb,seb') in (match extract_with_type env' cb with (* cb peut contenir des kn *) | None -> mt | Some (vl,typ) -> MTwith(mt,ML_With_type(idl,vl,typ))) | SEBwith(seb',With_module_body(idl,mp))-> Visit.add_mp_all mp; MTwith(extract_seb_spec env mp1 (seb,seb'), ML_With_module(idl,mp)) | SEBfunctor (mbid, mtb, seb_alg') -> let seb' = match seb with | SEBfunctor (mbid',_,seb') when mbid' = mbid -> seb' | _ -> assert false in let mp = MPbound mbid in let env' = Modops.add_module (Modops.module_body_of_type mp mtb) env in MTfunsig (mbid, extract_seb_spec env mp (my_type_of_mtb mtb), extract_seb_spec env' mp1 (seb',seb_alg')) | SEBstruct (msig) -> let env' = Modops.add_signature mp1 msig empty_delta_resolver env in MTsig (mp1, extract_sfb_spec env' mp1 msig) | SEBapply _ -> if seb <> seb_alg then extract_seb_spec env mp1 (seb,seb) else assert false (* From a [structure_body] (i.e. a list of [structure_field_body]) to implementations. NB: when [all=false], the evaluation order of the list is important: last to first ensures correct dependencies. *) let rec extract_sfb env mp all = function | [] -> [] | (l,SFBconst cb) :: msb -> (try let vl,recd,msb = factor_fix env l cb msb in let vc = Array.map (make_con mp empty_dirpath) vl in let ms = extract_sfb env mp all msb in let b = array_exists Visit.needed_con vc in if all || b then let d = extract_fixpoint env vc recd in if (not b) && (logical_decl d) then ms else begin Visit.add_decl_deps d; (l,SEdecl d) :: ms end else ms with Impossible -> let ms = extract_sfb env mp all msb in let c = make_con mp empty_dirpath l in let b = Visit.needed_con c in if all || b then let d = extract_constant env c cb in if (not b) && (logical_decl d) then ms else begin Visit.add_decl_deps d; (l,SEdecl d) :: ms end else ms) | (l,SFBmind mib) :: msb -> let ms = extract_sfb env mp all msb in let mind = make_mind mp empty_dirpath l in let b = Visit.needed_ind mind in if all || b then let d = Dind (mind, extract_inductive env mind) in if (not b) && (logical_decl d) then ms else begin Visit.add_decl_deps d; (l,SEdecl d) :: ms end else ms | (l,SFBmodule mb) :: msb -> let ms = extract_sfb env mp all msb in let mp = MPdot (mp,l) in if all || Visit.needed_mp mp then (l,SEmodule (extract_module env mp true mb)) :: ms else ms | (l,SFBmodtype mtb) :: msb -> let ms = extract_sfb env mp all msb in let mp = MPdot (mp,l) in if all || Visit.needed_mp mp then (l,SEmodtype (extract_seb_spec env mp (my_type_of_mtb mtb))) :: ms else ms (* From [struct_expr_body] to implementations *) and extract_seb env mp all = function | (SEBident _ | SEBapply _) as seb when lang () <> Ocaml -> (* in Haskell/Scheme, we expand everything *) extract_seb env mp all (expand_seb env mp seb) | SEBident mp -> if is_modfile mp && not (modular ()) then error_MPfile_as_mod mp false; Visit.add_mp_all mp; MEident mp | SEBapply (meb, meb',_) -> MEapply (extract_seb env mp true meb, extract_seb env mp true meb') | SEBfunctor (mbid, mtb, meb) -> let mp1 = MPbound mbid in let env' = Modops.add_module (Modops.module_body_of_type mp1 mtb) env in MEfunctor (mbid, extract_seb_spec env mp1 (my_type_of_mtb mtb), extract_seb env' mp true meb) | SEBstruct (msb) -> let env' = Modops.add_signature mp msb empty_delta_resolver env in MEstruct (mp,extract_sfb env' mp all msb) | SEBwith (_,_) -> anomaly "Not available yet" and extract_module env mp all mb = (* A module has an empty [mod_expr] when : - it is a module variable (for instance X inside a Module F [X:SIG]) - it is a module assumption (Declare Module). Since we look at modules from outside, we shouldn't have variables. But a Declare Module at toplevel seems legal (cf #2525). For the moment we don't support this situation. *) match mb.mod_expr with | None -> error_no_module_expr mp | Some me -> { ml_mod_expr = extract_seb env mp all me; ml_mod_type = extract_seb_spec env mp (my_type_of_mb mb) } let unpack = function MEstruct (_,sel) -> sel | _ -> assert false let mono_environment refs mpl = Visit.reset (); List.iter Visit.add_ref refs; List.iter Visit.add_mp_all mpl; let env = Global.env () in let l = List.rev (environment_until None) in List.rev_map (fun (mp,m) -> mp, unpack (extract_seb env mp (Visit.needed_mp_all mp) m)) l (**************************************) (*S Part II : Input/Output primitives *) (**************************************) let descr () = match lang () with | Ocaml -> Ocaml.ocaml_descr | Haskell -> Haskell.haskell_descr | Scheme -> Scheme.scheme_descr (* From a filename string "foo.ml" or "foo", builds "foo.ml" and "foo.mli" Works similarly for the other languages. *) let default_id = id_of_string "Main" let mono_filename f = let d = descr () in match f with | None -> None, None, default_id | Some f -> let f = if Filename.check_suffix f d.file_suffix then Filename.chop_suffix f d.file_suffix else f in let id = if lang () <> Haskell then default_id else try id_of_string (Filename.basename f) with e when Errors.noncritical e -> error "Extraction: provided filename is not a valid identifier" in Some (f^d.file_suffix), Option.map ((^) f) d.sig_suffix, id (* Builds a suitable filename from a module id *) let module_filename mp = let f = file_of_modfile mp in let d = descr () in Some (f^d.file_suffix), Option.map ((^) f) d.sig_suffix, id_of_string f (*s Extraction of one decl to stdout. *) let print_one_decl struc mp decl = let d = descr () in reset_renaming_tables AllButExternal; set_phase Pre; ignore (d.pp_struct struc); set_phase Impl; push_visible mp []; msgnl (d.pp_decl decl); pop_visible () (*s Extraction of a ml struct to a file. *) (** For Recursive Extraction, writing directly on stdout won't work with coqide, we use a buffer instead *) let buf = Buffer.create 1000 let formatter dry file = let ft = if dry then Format.make_formatter (fun _ _ _ -> ()) (fun _ -> ()) else match file with | Some f -> Pp_control.with_output_to f | None -> Format.formatter_of_buffer buf in (* We never want to see ellipsis ... in extracted code *) Format.pp_set_max_boxes ft max_int; (* We reuse the width information given via "Set Printing Width" *) (match Pp_control.get_margin () with | None -> () | Some i -> Format.pp_set_margin ft i; Format.pp_set_max_indent ft (i-10)); (* note: max_indent should be < margin above, otherwise it's ignored *) ft let print_structure_to_file (fn,si,mo) dry struc = Buffer.clear buf; let d = descr () in reset_renaming_tables AllButExternal; let unsafe_needs = { mldummy = struct_ast_search ((=) MLdummy) struc; tdummy = struct_type_search Mlutil.isDummy struc; tunknown = struct_type_search ((=) Tunknown) struc; magic = if lang () <> Haskell then false else struct_ast_search (function MLmagic _ -> true | _ -> false) struc } in (* First, a dry run, for computing objects to rename or duplicate *) set_phase Pre; let devnull = formatter true None in msg_with devnull (d.pp_struct struc); let opened = opened_libraries () in (* Print the implementation *) let cout = if dry then None else Option.map open_out fn in let ft = formatter dry cout in begin try (* The real printing of the implementation *) set_phase Impl; msg_with ft (d.preamble mo opened unsafe_needs); msg_with ft (d.pp_struct struc); Option.iter close_out cout; with reraise -> Option.iter close_out cout; raise reraise end; if not dry then Option.iter info_file fn; (* Now, let's print the signature *) Option.iter (fun si -> let cout = open_out si in let ft = formatter false (Some cout) in begin try set_phase Intf; msg_with ft (d.sig_preamble mo opened unsafe_needs); msg_with ft (d.pp_sig (signature_of_structure struc)); close_out cout; with reraise -> close_out cout; raise reraise end; info_file si) (if dry then None else si); (* Print the buffer content via Coq standard formatter (ok with coqide). *) if Buffer.length buf <> 0 then begin Pp.message (Buffer.contents buf); Buffer.reset buf end (*********************************************) (*s Part III: the actual extraction commands *) (*********************************************) let reset () = Visit.reset (); reset_tables (); reset_renaming_tables Everything let init modular library = check_inside_section (); check_inside_module (); set_keywords (descr ()).keywords; set_modular modular; set_library library; reset (); if modular && lang () = Scheme then error_scheme () let warns () = warning_opaques (access_opaque ()); warning_axioms () (* From a list of [reference], let's retrieve whether they correspond to modules or [global_reference]. Warn the user if both is possible. *) let rec locate_ref = function | [] -> [],[] | r::l -> let q = snd (qualid_of_reference r) in let mpo = try Some (Nametab.locate_module q) with Not_found -> None and ro = try Some (Smartlocate.global_with_alias r) with e when Errors.noncritical e -> None in match mpo, ro with | None, None -> Nametab.error_global_not_found q | None, Some r -> let refs,mps = locate_ref l in r::refs,mps | Some mp, None -> let refs,mps = locate_ref l in refs,mp::mps | Some mp, Some r -> warning_both_mod_and_cst q mp r; let refs,mps = locate_ref l in refs,mp::mps (*s Recursive extraction in the Coq toplevel. The vernacular command is \verb!Recursive Extraction! [qualid1] ... [qualidn]. Also used when extracting to a file with the command: \verb!Extraction "file"! [qualid1] ... [qualidn]. *) let full_extr f (refs,mps) = init false false; List.iter (fun mp -> if is_modfile mp then error_MPfile_as_mod mp true) mps; let struc = optimize_struct (refs,mps) (mono_environment refs mps) in warns (); print_structure_to_file (mono_filename f) false struc; reset () let full_extraction f lr = full_extr f (locate_ref lr) (*s Separate extraction is similar to recursive extraction, with the output decomposed in many files, one per Coq .v file *) let separate_extraction lr = init true false; let refs,mps = locate_ref lr in let struc = optimize_struct (refs,mps) (mono_environment refs mps) in warns (); let print = function | (MPfile dir as mp, sel) as e -> print_structure_to_file (module_filename mp) false [e] | _ -> assert false in List.iter print struc; reset () (*s Simple extraction in the Coq toplevel. The vernacular command is \verb!Extraction! [qualid]. *) let simple_extraction r = Vernacentries.dump_global (Genarg.AN r); match locate_ref [r] with | ([], [mp]) as p -> full_extr None p | [r],[] -> init false false; let struc = optimize_struct ([r],[]) (mono_environment [r] []) in let d = get_decl_in_structure r struc in warns (); if is_custom r then msgnl (str "(** User defined extraction *)"); print_one_decl struc (modpath_of_r r) d; reset () | _ -> assert false (*s (Recursive) Extraction of a library. The vernacular command is \verb!(Recursive) Extraction Library! [M]. *) let extraction_library is_rec m = init true true; let dir_m = let q = qualid_of_ident m in try Nametab.full_name_module q with Not_found -> error_unknown_module q in Visit.add_mp_all (MPfile dir_m); let env = Global.env () in let l = List.rev (environment_until (Some dir_m)) in let select l (mp,meb) = if Visit.needed_mp mp then (mp, unpack (extract_seb env mp true meb)) :: l else l in let struc = List.fold_left select [] l in let struc = optimize_struct ([],[]) struc in warns (); let print = function | (MPfile dir as mp, sel) as e -> let dry = not is_rec && dir <> dir_m in print_structure_to_file (module_filename mp) dry [e] | _ -> assert false in List.iter print struc; reset () coq-8.4pl2/plugins/extraction/README0000640000175000001440000001130211160567762016360 0ustar notinusers Coq Extraction ============== What is it ? ------------ The extraction is a mechanism allowing to produce functional code (Ocaml/Haskell/Scheme) out of any Coq terms (either programs or proofs). Who did it ? ------------ The current implementation (from version 7.0 up to now) has been done by P. Letouzey during his PhD, helped by J.C. Fillitre and supervised by C. Paulin. An earlier implementation (versions 6.x) was due to B. Werner and C. Paulin. Where can we find more information ? ------------------------------------ - Coq Reference Manual includes a full chapter about extraction - P. Letouzey's PhD thesis [3] forms a complete document about both theory and implementation and test-cases of Coq-extraction - A more recent article [4] proposes a short overview of extraction - earlier documents [1] [2] may also be useful. Why a complete re-implementation ? ---------------------------------- Extraction code has been completely rewritten since version V6.3. 1) Principles The main goal of the new extraction is to handle any Coq term, even those upon sort Type, and to produce code that always compiles. Thus it will never answer something like "Not an ML type", but rather a dummy term like the ML unit. Translation between Coq and ML is based upon the following principles: - Terms of sort Prop don't have any computational meaning, so they are merged into one ML term "__". This part is done according to P. Letouzey's works [1] and [2]. This dummy constant "__" used to be implemented by the unit (), but we recently found that this constant might be applied in some cases. So "__" is now in Ocaml a fixpoint that forgets its arguments: let __ = let rec f _ = Obj.repr f in Obj.repr f - Terms that are type schemes (i.e. something of type ( : )( : )...s with s a sort ) don't have any ML counterpart at the term level, since they are types transformers. In fact they do not have any computational meaning either. So we also merge them into that dummy term "__". - A Coq term gives a ML term or a ML type depending of its type: type schemes will (try to) give ML types, and all other terms give ML terms. And the rest of the translation is (almost) straightforward: an inductive gives an inductive, etc... This gives ML code that have no special reason to typecheck, due to the incompatibilities between Coq and ML typing systems. In fact most of the time everything goes right. We now verify during extraction that the produced code is typecheckable, and if it is not we insert unsafe type casting at critical points in the code, with either "Obj.magic" in Ocaml or "unsafeCoerce" in Haskell. 2) Differences with previous extraction (V6.3 and before) 2.a) The pros The ability to extract every Coq term, as explain in the previous paragraph. The ability to extract from a file an ML module (cf Extraction Library in the documentation) You can have a taste of extraction directly at the toplevel by using the "Extraction " or the "Recursive Extraction ". This toplevel extraction was already there in V6.3, but was printing Fw terms. It now prints in the language of your choice: Ocaml, Haskell or Scheme. The optimization done on extracted code has been ported between V6.3 and V7 and enhanced, and in particular the mechanism of automatic expansion. 2.b) The cons The presence of some parasite "__" as dummy arguments in functions. This denotes the rests of a proof part. The previous extraction was able to remove them totally. The current implementation removes a good deal of them, but not all. This problem is due to extraction upon Type. For example, let's take this pathological term: (if b then Set else Prop) : Type The only way to know if this is an Set (to keep) or a Prop (to remove) is to compute the boolean b, and we do not want to do that during extraction. There is no more "ML import" feature. You can compensate by using Axioms, and then "Extract Constant ..." [1]: Excution de termes de preuves: une nouvelle mthode d'extraction pour le Calcul des Constructions Inductives, Pierre Letouzey, DEA thesis, 2000, http://www.pps.jussieu.fr/~letouzey/download/rapport_dea.ps.gz [2]: A New Extraction for Coq, Pierre Letouzey, Types 2002 Post-Workshop Proceedings. http://www.pps.jussieu.fr/~letouzey/download/extraction2002.ps.gz [3]: Programmation fonctionnelle certifie: l'extraction de programmes dans l'assistant Coq. Pierre Letouzey, PhD thesis, 2004. http://www.pps.jussieu.fr/~letouzey/download/these_letouzey.ps.gz http://www.pps.jussieu.fr/~letouzey/download/these_letouzey_English.ps.gz [4]: Coq Extraction, An overview. Pierre Letouzey. CiE2008. http://www.pps.jussieu.fr/~letouzey/download/letouzey_extr_cie08.pdf coq-8.4pl2/plugins/extraction/common.ml0000640000175000001440000004711312010532755017320 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ -> false (*s Some pretty-print utility functions. *) let pp_par par st = if par then str "(" ++ st ++ str ")" else st (** [pp_apply] : a head part applied to arguments, possibly with parenthesis *) let pp_apply st par args = match args with | [] -> st | _ -> hov 2 (pp_par par (st ++ spc () ++ prlist_with_sep spc identity args)) (** Same as [pp_apply], but with also protection of the head by parenthesis *) let pp_apply2 st par args = let par' = args <> [] || par in pp_apply (pp_par par' st) par args let pr_binding = function | [] -> mt () | l -> str " " ++ prlist_with_sep (fun () -> str " ") pr_id l let pp_tuple_light f = function | [] -> mt () | [x] -> f true x | l -> pp_par true (prlist_with_sep (fun () -> str "," ++ spc ()) (f false) l) let pp_tuple f = function | [] -> mt () | [x] -> f x | l -> pp_par true (prlist_with_sep (fun () -> str "," ++ spc ()) f l) let pp_boxed_tuple f = function | [] -> mt () | [x] -> f x | l -> pp_par true (hov 0 (prlist_with_sep (fun () -> str "," ++ spc ()) f l)) (** By default, in module Format, you can do horizontal placing of blocks even if they include newlines, as long as the number of chars in the blocks is less that a line length. To avoid this awkward situation, we attach a big virtual size to [fnl] newlines. *) let fnl () = stras (1000000,"") ++ fnl () let fnl2 () = fnl () ++ fnl () let space_if = function true -> str " " | false -> mt () let is_digit = function | '0'..'9' -> true | _ -> false let begins_with_CoqXX s = let n = String.length s in n >= 4 && s.[0] = 'C' && s.[1] = 'o' && s.[2] = 'q' && let i = ref 3 in try while !i < n do if s.[!i] = '_' then i:=n (*Stop*) else if is_digit s.[!i] then incr i else raise Not_found done; true with Not_found -> false let unquote s = if lang () <> Scheme then s else let s = String.copy s in for i=0 to String.length s - 1 do if s.[i] = '\'' then s.[i] <- '~' done; s let rec qualify delim = function | [] -> assert false | [s] -> s | ""::l -> qualify delim l | s::l -> s^delim^(qualify delim l) let dottify = qualify "." let pseudo_qualify = qualify "__" (*s Uppercase/lowercase renamings. *) let is_upper s = match s.[0] with 'A' .. 'Z' -> true | _ -> false let is_lower s = match s.[0] with 'a' .. 'z' | '_' -> true | _ -> false let lowercase_id id = id_of_string (String.uncapitalize (string_of_id id)) let uppercase_id id = let s = string_of_id id in assert (s<>""); if s.[0] = '_' then id_of_string ("Coq_"^s) else id_of_string (String.capitalize s) type kind = Term | Type | Cons | Mod let upperkind = function | Type -> lang () = Haskell | Term -> false | Cons | Mod -> true let kindcase_id k id = if upperkind k then uppercase_id id else lowercase_id id (*s de Bruijn environments for programs *) type env = identifier list * Idset.t (*s Generic renaming issues for local variable names. *) let rec rename_id id avoid = if Idset.mem id avoid then rename_id (lift_subscript id) avoid else id let rec rename_vars avoid = function | [] -> [], avoid | id :: idl when id == dummy_name -> (* we don't rename dummy binders *) let (idl', avoid') = rename_vars avoid idl in (id :: idl', avoid') | id :: idl -> let (idl, avoid) = rename_vars avoid idl in let id = rename_id (lowercase_id id) avoid in (id :: idl, Idset.add id avoid) let rename_tvars avoid l = let rec rename avoid = function | [] -> [],avoid | id :: idl -> let id = rename_id (lowercase_id id) avoid in let idl, avoid = rename (Idset.add id avoid) idl in (id :: idl, avoid) in fst (rename avoid l) let push_vars ids (db,avoid) = let ids',avoid' = rename_vars avoid ids in ids', (ids' @ db, avoid') let get_db_name n (db,_) = let id = List.nth db (pred n) in if id = dummy_name then id_of_string "__" else id (*S Renamings of global objects. *) (*s Tables of global renamings *) let register_cleanup, do_cleanup = let funs = ref [] in (fun f -> funs:=f::!funs), (fun () -> List.iter (fun f -> f ()) !funs) type phase = Pre | Impl | Intf let set_phase, get_phase = let ph = ref Impl in ((:=) ph), (fun () -> !ph) let set_keywords, get_keywords = let k = ref Idset.empty in ((:=) k), (fun () -> !k) let add_global_ids, get_global_ids = let ids = ref Idset.empty in register_cleanup (fun () -> ids := get_keywords ()); let add s = ids := Idset.add s !ids and get () = !ids in (add,get) let empty_env () = [], get_global_ids () let mktable autoclean = let h = Hashtbl.create 97 in if autoclean then register_cleanup (fun () -> Hashtbl.clear h); (Hashtbl.replace h, Hashtbl.find h, fun () -> Hashtbl.clear h) (* We might have built [global_reference] whose canonical part is inaccurate. We must hence compare only the user part, hence using a Hashtbl might be incorrect *) let mktable_ref autoclean = let m = ref Refmap'.empty in let clear () = m := Refmap'.empty in if autoclean then register_cleanup clear; (fun r v -> m := Refmap'.add r v !m), (fun r -> Refmap'.find r !m), clear (* A table recording objects in the first level of all MPfile *) let add_mpfiles_content,get_mpfiles_content,clear_mpfiles_content = mktable false let get_mpfiles_content mp = try get_mpfiles_content mp with Not_found -> failwith "get_mpfiles_content" (*s The list of external modules that will be opened initially *) let mpfiles_add, mpfiles_mem, mpfiles_list, mpfiles_clear = let m = ref MPset.empty in let add mp = m:=MPset.add mp !m and mem mp = MPset.mem mp !m and list () = MPset.elements !m and clear () = m:=MPset.empty in register_cleanup clear; (add,mem,list,clear) (*s List of module parameters that we should alpha-rename *) let params_ren_add, params_ren_mem = let m = ref MPset.empty in let add mp = m:=MPset.add mp !m and mem mp = MPset.mem mp !m and clear () = m:=MPset.empty in register_cleanup clear; (add,mem) (*s table indicating the visible horizon at a precise moment, i.e. the stack of structures we are inside. - The sequence of [mp] parts should have the following form: a [MPfile] at the beginning, and then more and more [MPdot] over this [MPfile], or [MPbound] when inside the type of a module parameter. - the [params] are the [MPbound] when [mp] is a functor, the innermost [MPbound] coming first in the list. - The [content] part is used to record all the names already seen at this level. *) type visible_layer = { mp : module_path; params : module_path list; content : ((kind*string),label) Hashtbl.t } let pop_visible, push_visible, get_visible = let vis = ref [] in register_cleanup (fun () -> vis := []); let pop () = match !vis with | [] -> assert false | v :: vl -> vis := vl; (* we save the 1st-level-content of MPfile for later use *) if get_phase () = Impl && modular () && is_modfile v.mp then add_mpfiles_content v.mp v.content and push mp mps = vis := { mp = mp; params = mps; content = Hashtbl.create 97 } :: !vis and get () = !vis in (pop,push,get) let get_visible_mps () = List.map (function v -> v.mp) (get_visible ()) let top_visible () = match get_visible () with [] -> assert false | v::_ -> v let top_visible_mp () = (top_visible ()).mp let add_visible ks l = Hashtbl.add (top_visible ()).content ks l (* table of local module wrappers used to provide non-ambiguous names *) let add_duplicate, check_duplicate = let index = ref 0 and dups = ref Gmap.empty in register_cleanup (fun () -> index := 0; dups := Gmap.empty); let add mp l = incr index; let ren = "Coq__" ^ string_of_int (!index) in dups := Gmap.add (mp,l) ren !dups and check mp l = Gmap.find (mp, l) !dups in (add,check) type reset_kind = AllButExternal | Everything let reset_renaming_tables flag = do_cleanup (); if flag = Everything then clear_mpfiles_content () (*S Renaming functions *) (* This function creates from [id] a correct uppercase/lowercase identifier. This is done by adding a [Coq_] or [coq_] prefix. To avoid potential clashes with previous [Coq_id] variable, these prefixes are duplicated if already existing. *) let modular_rename k id = let s = string_of_id id in let prefix,is_ok = if upperkind k then "Coq_",is_upper else "coq_",is_lower in if not (is_ok s) || (Idset.mem id (get_keywords ())) || (String.length s >= 4 && String.sub s 0 4 = prefix) then prefix ^ s else s (*s For monolithic extraction, first-level modules might have to be renamed with unique numbers *) let modfstlev_rename = let add_prefixes,get_prefixes,_ = mktable true in fun l -> let coqid = id_of_string "Coq" in let id = id_of_label l in try let coqset = get_prefixes id in let nextcoq = next_ident_away coqid coqset in add_prefixes id (nextcoq::coqset); (string_of_id nextcoq)^"_"^(string_of_id id) with Not_found -> let s = string_of_id id in if is_lower s || begins_with_CoqXX s then (add_prefixes id [coqid]; "Coq_"^s) else (add_prefixes id []; s) (*s Creating renaming for a [module_path] : first, the real function ... *) let rec mp_renaming_fun mp = match mp with | _ when not (modular ()) && at_toplevel mp -> [""] | MPdot (mp,l) -> let lmp = mp_renaming mp in if lmp = [""] then (modfstlev_rename l)::lmp else (modular_rename Mod (id_of_label l))::lmp | MPbound mbid -> let s = modular_rename Mod (id_of_mbid mbid) in if not (params_ren_mem mp) then [s] else let i,_,_ = repr_mbid mbid in [s^"__"^string_of_int i] | MPfile _ -> assert (modular ()); (* see [at_toplevel] above *) assert (get_phase () = Pre); let current_mpfile = (list_last (get_visible ())).mp in if mp <> current_mpfile then mpfiles_add mp; [string_of_modfile mp] (* ... and its version using a cache *) and mp_renaming = let add,get,_ = mktable true in fun x -> try if is_mp_bound (base_mp x) then raise Not_found; get x with Not_found -> let y = mp_renaming_fun x in add x y; y (*s Renamings creation for a [global_reference]: we build its fully-qualified name in a [string list] form (head is the short name). *) let ref_renaming_fun (k,r) = let mp = modpath_of_r r in let l = mp_renaming mp in let l = if lang () <> Ocaml && not (modular ()) then [""] else l in let s = let idg = safe_basename_of_global r in if l = [""] (* this happens only at toplevel of the monolithic case *) then let globs = Idset.elements (get_global_ids ()) in let id = next_ident_away (kindcase_id k idg) globs in string_of_id id else modular_rename k idg in add_global_ids (id_of_string s); s::l (* Cached version of the last function *) let ref_renaming = let add,get,_ = mktable_ref true in fun ((k,r) as x) -> try if is_mp_bound (base_mp (modpath_of_r r)) then raise Not_found; get r with Not_found -> let y = ref_renaming_fun x in add r y; y (* [visible_clash mp0 (k,s)] checks if [mp0-s] of kind [k] can be printed as [s] in the current context of visible modules. More precisely, we check if there exists a visible [mp] that contains [s]. The verification stops if we encounter [mp=mp0]. *) let rec clash mem mp0 ks = function | [] -> false | mp :: _ when mp = mp0 -> false | mp :: _ when mem mp ks -> true | _ :: mpl -> clash mem mp0 ks mpl let mpfiles_clash mp0 ks = clash (fun mp -> Hashtbl.mem (get_mpfiles_content mp)) mp0 ks (List.rev (mpfiles_list ())) let rec params_lookup mp0 ks = function | [] -> false | param :: _ when mp0 = param -> true | param :: params -> if ks = (Mod, List.hd (mp_renaming param)) then params_ren_add param; params_lookup mp0 ks params let visible_clash mp0 ks = let rec clash = function | [] -> false | v :: _ when v.mp = mp0 -> false | v :: vis -> let b = Hashtbl.mem v.content ks in if b && not (is_mp_bound mp0) then true else begin if b then params_ren_add mp0; if params_lookup mp0 ks v.params then false else clash vis end in clash (get_visible ()) (* Same, but with verbose output (and mp0 shouldn't be a MPbound) *) let visible_clash_dbg mp0 ks = let rec clash = function | [] -> None | v :: _ when v.mp = mp0 -> None | v :: vis -> try Some (v.mp,Hashtbl.find v.content ks) with Not_found -> if params_lookup mp0 ks v.params then None else clash vis in clash (get_visible ()) (* After the 1st pass, we can decide which modules will be opened initially *) let opened_libraries () = if not (modular ()) then [] else let used_files = mpfiles_list () in let used_ks = List.map (fun mp -> Mod,string_of_modfile mp) used_files in (* By default, we open all used files. Ambiguities will be resolved later by using qualified names. Nonetheless, we don't open any file A that contains an immediate submodule A.B hiding another file B : otherwise, after such an open, there's no unambiguous way to refer to objects of B. *) let to_open = List.filter (fun mp -> not (List.exists (Hashtbl.mem (get_mpfiles_content mp)) used_ks)) used_files in mpfiles_clear (); List.iter mpfiles_add to_open; mpfiles_list () (*s On-the-fly qualification issues for both monolithic or modular extraction. *) (* [pp_ocaml_gen] below is a function that factorize the printing of both [global_reference] and module names for ocaml. When [k=Mod] then [olab=None], otherwise it contains the label of the reference to print. [rls] is the string list giving the qualified name, short name at the end. *) (* In Coq, we can qualify [M.t] even if we are inside [M], but in Ocaml we cannot do that. So, if [t] gets hidden and we need a long name for it, we duplicate the _definition_ of t in a Coq__XXX module, and similarly for a sub-module [M.N] *) let pp_duplicate k' prefix mp rls olab = let rls', lbl = if k'<>Mod then (* Here rls=[s], the ref to print is ., and olab<>None *) rls, Option.get olab else (* Here rls=s::rls', we search the label for s inside mp *) List.tl rls, get_nth_label_mp (mp_length mp - mp_length prefix) mp in try dottify (check_duplicate prefix lbl :: rls') with Not_found -> assert (get_phase () = Pre); (* otherwise it's too late *) add_duplicate prefix lbl; dottify rls let fstlev_ks k = function | [] -> assert false | [s] -> k,s | s::_ -> Mod,s (* [pp_ocaml_local] : [mp] has something in common with [top_visible ()] but isn't equal to it *) let pp_ocaml_local k prefix mp rls olab = (* what is the largest prefix of [mp] that belongs to [visible]? *) assert (k <> Mod || mp <> prefix); (* mp as whole module isn't in itself *) let rls' = list_skipn (mp_length prefix) rls in let k's = fstlev_ks k rls' in (* Reference r / module path mp is of the form [.s.<...>]. *) if not (visible_clash prefix k's) then dottify rls' else pp_duplicate (fst k's) prefix mp rls' olab (* [pp_ocaml_bound] : [mp] starts with a [MPbound], and we are not inside (i.e. we are not printing the type of the module parameter) *) let pp_ocaml_bound base rls = (* clash with a MPbound will be detected and fixed by renaming this MPbound *) if get_phase () = Pre then ignore (visible_clash base (Mod,List.hd rls)); dottify rls (* [pp_ocaml_extern] : [mp] isn't local, it is defined in another [MPfile]. *) let pp_ocaml_extern k base rls = match rls with | [] -> assert false | base_s :: rls' -> if (not (modular ())) (* Pseudo qualification with "" *) || (rls' = []) (* Case of a file A.v used as a module later *) || (not (mpfiles_mem base)) (* Module not opened *) || (mpfiles_clash base (fstlev_ks k rls')) (* Conflict in opened files *) || (visible_clash base (fstlev_ks k rls')) (* Local conflict *) then (* We need to fully qualify. Last clash situation is unsupported *) match visible_clash_dbg base (Mod,base_s) with | None -> dottify rls | Some (mp,l) -> error_module_clash base (MPdot (mp,l)) else (* Standard situation : object in an opened file *) dottify rls' (* [pp_ocaml_gen] : choosing between [pp_ocaml_extern] or [pp_ocaml_extern] *) let pp_ocaml_gen k mp rls olab = match common_prefix_from_list mp (get_visible_mps ()) with | Some prefix -> pp_ocaml_local k prefix mp rls olab | None -> let base = base_mp mp in if is_mp_bound base then pp_ocaml_bound base rls else pp_ocaml_extern k base rls (* For Haskell, things are simplier: we have removed (almost) all structures *) let pp_haskell_gen k mp rls = match rls with | [] -> assert false | s::rls' -> let str = pseudo_qualify rls' in let str = if is_upper str && not (upperkind k) then ("_"^str) else str in let prf = if base_mp mp <> top_visible_mp () then s ^ "." else "" in prf ^ str (* Main name printing function for a reference *) let pp_global k r = let ls = ref_renaming (k,r) in assert (List.length ls > 1); let s = List.hd ls in let mp,_,l = repr_of_r r in if mp = top_visible_mp () then (* simpliest situation: definition of r (or use in the same context) *) (* we update the visible environment *) (add_visible (k,s) l; unquote s) else let rls = List.rev ls in (* for what come next it's easier this way *) match lang () with | Scheme -> unquote s (* no modular Scheme extraction... *) | Haskell -> if modular () then pp_haskell_gen k mp rls else s | Ocaml -> pp_ocaml_gen k mp rls (Some l) (* The next function is used only in Ocaml extraction...*) let pp_module mp = let ls = mp_renaming mp in match mp with | MPdot (mp0,l) when mp0 = top_visible_mp () -> (* simpliest situation: definition of mp (or use in the same context) *) (* we update the visible environment *) let s = List.hd ls in add_visible (Mod,s) l; s | _ -> pp_ocaml_gen Mod mp (List.rev ls) None (** Special hack for constants of type Ascii.ascii : if an [Extract Inductive ascii => char] has been declared, then the constants are directly turned into chars *) let mk_ind path s = make_mind (MPfile (dirpath_of_string path)) empty_dirpath (mk_label s) let ind_ascii = mk_ind "Coq.Strings.Ascii" "ascii" let check_extract_ascii () = try let char_type = match lang () with | Ocaml -> "char" | Haskell -> "Char" | _ -> raise Not_found in find_custom (IndRef (ind_ascii,0)) = char_type with Not_found -> false let is_list_cons l = List.for_all (function MLcons (_,ConstructRef(_,_),[]) -> true | _ -> false) l let is_native_char = function | MLcons(_,ConstructRef ((kn,0),1),l) -> kn = ind_ascii && check_extract_ascii () && is_list_cons l | _ -> false let pp_native_char c = let rec cumul = function | [] -> 0 | MLcons(_,ConstructRef(_,j),[])::l -> (2-j) + 2 * (cumul l) | _ -> assert false in let l = match c with MLcons(_,_,l) -> l | _ -> assert false in str ("'"^Char.escaped (Char.chr (cumul l))^"'") coq-8.4pl2/plugins/extraction/ExtrOcamlNatBigInt.v0000640000175000001440000000514512010532755021322 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "Big.big_int" [ "Big.zero" "Big.succ" ] "Big.nat_case". (** Efficient (but uncertified) versions for usual [nat] functions *) Extract Constant plus => "Big.add". Extract Constant mult => "Big.mult". Extract Constant pred => "fun n -> Big.max Big.zero (Big.pred n)". Extract Constant minus => "fun n m -> Big.max Big.zero (Big.sub n m)". Extract Constant max => "Big.max". Extract Constant min => "Big.min". (*Extract Constant nat_beq => "Big.eq".*) Extract Constant EqNat.beq_nat => "Big.eq". Extract Constant EqNat.eq_nat_decide => "Big.eq". Extract Constant Peano_dec.eq_nat_dec => "Big.eq". Extract Constant Compare_dec.nat_compare => "Big.compare_case Eq Lt Gt". Extract Constant Compare_dec.leb => "Big.le". Extract Constant Compare_dec.le_lt_dec => "Big.le". Extract Constant Compare_dec.lt_eq_lt_dec => "Big.compare_case (Some false) (Some true) None". Extract Constant Even.even_odd_dec => "fun n -> Big.sign (Big.mod n Big.two) = 0". Extract Constant Div2.div2 => "fun n -> Big.div n Big.two". Extract Inductive Euclid.diveucl => "(Big.big_int * Big.big_int)" [""]. Extract Constant Euclid.eucl_dev => "fun n m -> Big.quomod m n". Extract Constant Euclid.quotient => "fun n m -> Big.div m n". Extract Constant Euclid.modulo => "fun n m -> Big.modulo m n". (* Require Import Euclid. Definition test n m (H:m>0) := let (q,r,_,_) := eucl_dev m H n in nat_compare n (q*m+r). Extraction "/tmp/test.ml" test fact pred minus max min Div2.div2. *) coq-8.4pl2/plugins/extraction/ocaml.ml0000640000175000001440000006177312121620060017121 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* '\'' then str ("'"^s) else str ("' "^s) let pp_abst = function | [] -> mt () | l -> str "fun " ++ prlist_with_sep (fun () -> str " ") pr_id l ++ str " ->" ++ spc () let pp_parameters l = (pp_boxed_tuple pp_tvar l ++ space_if (l<>[])) let pp_string_parameters l = (pp_boxed_tuple str l ++ space_if (l<>[])) let pp_letin pat def body = let fstline = str "let " ++ pat ++ str " =" ++ spc () ++ def in hv 0 (hv 0 (hov 2 fstline ++ spc () ++ str "in") ++ spc () ++ hov 0 body) (*s Ocaml renaming issues. *) let keywords = List.fold_right (fun s -> Idset.add (id_of_string s)) [ "and"; "as"; "assert"; "begin"; "class"; "constraint"; "do"; "done"; "downto"; "else"; "end"; "exception"; "external"; "false"; "for"; "fun"; "function"; "functor"; "if"; "in"; "include"; "inherit"; "initializer"; "lazy"; "let"; "match"; "method"; "module"; "mutable"; "new"; "object"; "of"; "open"; "or"; "parser"; "private"; "rec"; "sig"; "struct"; "then"; "to"; "true"; "try"; "type"; "val"; "virtual"; "when"; "while"; "with"; "mod"; "land"; "lor"; "lxor"; "lsl"; "lsr"; "asr" ; "unit" ; "_" ; "__" ] Idset.empty let pp_open mp = str ("open "^ string_of_modfile mp ^"\n") let preamble _ used_modules usf = prlist pp_open used_modules ++ (if used_modules = [] then mt () else fnl ()) ++ (if usf.tdummy || usf.tunknown then str "type __ = Obj.t\n" else mt()) ++ (if usf.mldummy then str "let __ = let rec f _ = Obj.repr f in Obj.repr f\n" else mt ()) ++ (if usf.tdummy || usf.tunknown || usf.mldummy then fnl () else mt ()) let sig_preamble _ used_modules usf = prlist pp_open used_modules ++ (if used_modules = [] then mt () else fnl ()) ++ (if usf.tdummy || usf.tunknown then str "type __ = Obj.t\n\n" else mt()) (*s The pretty-printer for Ocaml syntax*) (* Beware of the side-effects of [pp_global] and [pp_modname]. They are used to update table of content for modules. Many [let] below should not be altered since they force evaluation order. *) let str_global k r = if is_inline_custom r then find_custom r else Common.pp_global k r let pp_global k r = str (str_global k r) let pp_modname mp = str (Common.pp_module mp) let is_infix r = is_inline_custom r && (let s = find_custom r in let l = String.length s in l >= 2 && s.[0] = '(' && s.[l-1] = ')') let get_infix r = let s = find_custom r in String.sub s 1 (String.length s - 2) let get_ind = function | IndRef _ as r -> r | ConstructRef (ind,_) -> IndRef ind | _ -> assert false let pp_one_field r i = function | Some r -> pp_global Term r | None -> pp_global Type (get_ind r) ++ str "__" ++ int i let pp_field r fields i = pp_one_field r i (List.nth fields i) let pp_fields r fields = list_map_i (pp_one_field r) 0 fields (*s Pretty-printing of types. [par] is a boolean indicating whether parentheses are needed or not. *) let rec pp_type par vl t = let rec pp_rec par = function | Tmeta _ | Tvar' _ | Taxiom -> assert false | Tvar i -> (try pp_tvar (List.nth vl (pred i)) with e when Errors.noncritical e -> (str "'a" ++ int i)) | Tglob (r,[a1;a2]) when is_infix r -> pp_par par (pp_rec true a1 ++ str (get_infix r) ++ pp_rec true a2) | Tglob (r,[]) -> pp_global Type r | Tglob (IndRef(kn,0),l) when not (keep_singleton ()) && kn = mk_ind "Coq.Init.Specif" "sig" -> pp_tuple_light pp_rec l | Tglob (r,l) -> pp_tuple_light pp_rec l ++ spc () ++ pp_global Type r | Tarr (t1,t2) -> pp_par par (pp_rec true t1 ++ spc () ++ str "->" ++ spc () ++ pp_rec false t2) | Tdummy _ -> str "__" | Tunknown -> str "__" in hov 0 (pp_rec par t) (*s Pretty-printing of expressions. [par] indicates whether parentheses are needed or not. [env] is the list of names for the de Bruijn variables. [args] is the list of collected arguments (already pretty-printed). *) let is_bool_patt p s = try let r = match p with | Pusual r -> r | Pcons (r,[]) -> r | _ -> raise Not_found in find_custom r = s with Not_found -> false let is_ifthenelse = function | [|([],p1,_);([],p2,_)|] -> is_bool_patt p1 "true" && is_bool_patt p2 "false" | _ -> false let expr_needs_par = function | MLlam _ -> true | MLcase (_,_,[|_|]) -> false | MLcase (_,_,pv) -> not (is_ifthenelse pv) | _ -> false let rec pp_expr par env args = let apply st = pp_apply st par args and apply2 st = pp_apply2 st par args in function | MLrel n -> let id = get_db_name n env in apply (pr_id id) | MLapp (f,args') -> let stl = List.map (pp_expr true env []) args' in pp_expr par env (stl @ args) f | MLlam _ as a -> let fl,a' = collect_lams a in let fl = List.map id_of_mlid fl in let fl,env' = push_vars fl env in let st = pp_abst (List.rev fl) ++ pp_expr false env' [] a' in apply2 st | MLletin (id,a1,a2) -> let i,env' = push_vars [id_of_mlid id] env in let pp_id = pr_id (List.hd i) and pp_a1 = pp_expr false env [] a1 and pp_a2 = pp_expr (not par && expr_needs_par a2) env' [] a2 in hv 0 (apply2 (pp_letin pp_id pp_a1 pp_a2)) | MLglob r -> (try let args = list_skipn (projection_arity r) args in let record = List.hd args in pp_apply (record ++ str "." ++ pp_global Term r) par (List.tl args) with e when Errors.noncritical e -> apply (pp_global Term r)) | MLfix (i,ids,defs) -> let ids',env' = push_vars (List.rev (Array.to_list ids)) env in pp_fix par env' i (Array.of_list (List.rev ids'),defs) args | MLexn s -> (* An [MLexn] may be applied, but I don't really care. *) pp_par par (str "assert false" ++ spc () ++ str ("(* "^s^" *)")) | MLdummy -> str "__" (* An [MLdummy] may be applied, but I don't really care. *) | MLmagic a -> pp_apply (str "Obj.magic") par (pp_expr true env [] a :: args) | MLaxiom -> pp_par par (str "failwith \"AXIOM TO BE REALIZED\"") | MLcons (_,r,a) as c -> assert (args=[]); begin match a with | _ when is_native_char c -> pp_native_char c | [a1;a2] when is_infix r -> let pp = pp_expr true env [] in pp_par par (pp a1 ++ str (get_infix r) ++ pp a2) | _ when is_coinductive r -> let ne = (a<>[]) in let tuple = space_if ne ++ pp_tuple (pp_expr true env []) a in pp_par par (str "lazy " ++ pp_par ne (pp_global Cons r ++ tuple)) | [] -> pp_global Cons r | _ -> let fds = get_record_fields r in if fds <> [] then pp_record_pat (pp_fields r fds, List.map (pp_expr true env []) a) else let tuple = pp_tuple (pp_expr true env []) a in if str_global Cons r = "" (* hack Extract Inductive prod *) then tuple else pp_par par (pp_global Cons r ++ spc () ++ tuple) end | MLtuple l -> assert (args = []); pp_boxed_tuple (pp_expr true env []) l | MLcase (_, t, pv) when is_custom_match pv -> if not (is_regular_match pv) then error "Cannot mix yet user-given match and general patterns."; let mkfun (ids,_,e) = if ids <> [] then named_lams (List.rev ids) e else dummy_lams (ast_lift 1 e) 1 in let pp_branch tr = pp_expr true env [] (mkfun tr) ++ fnl () in let inner = str (find_custom_match pv) ++ fnl () ++ prvect pp_branch pv ++ pp_expr true env [] t in apply2 (hov 2 inner) | MLcase (typ, t, pv) -> let head = if not (is_coinductive_type typ) then pp_expr false env [] t else (str "Lazy.force" ++ spc () ++ pp_expr true env [] t) in (* First, can this match be printed as a mere record projection ? *) (try pp_record_proj par env typ t pv args with Impossible -> (* Second, can this match be printed as a let-in ? *) if Array.length pv = 1 then let s1,s2 = pp_one_pat env pv.(0) in hv 0 (apply2 (pp_letin s1 head s2)) else (* Third, can this match be printed as [if ... then ... else] ? *) (try apply2 (pp_ifthenelse env head pv) with Not_found -> (* Otherwise, standard match *) apply2 (v 0 (str "match " ++ head ++ str " with" ++ fnl () ++ pp_pat env pv)))) and pp_record_proj par env typ t pv args = (* Can a match be printed as a mere record projection ? *) let fields = record_fields_of_type typ in if fields = [] then raise Impossible; if Array.length pv <> 1 then raise Impossible; if has_deep_pattern pv then raise Impossible; let (ids,pat,body) = pv.(0) in let n = List.length ids in let no_patvar a = not (List.exists (ast_occurs_itvl 1 n) a) in let rel_i,a = match body with | MLrel i when i <= n -> i,[] | MLapp(MLrel i, a) when i<=n && no_patvar a -> i,a | _ -> raise Impossible in let rec lookup_rel i idx = function | Prel j :: l -> if i = j then idx else lookup_rel i (idx+1) l | Pwild :: l -> lookup_rel i (idx+1) l | _ -> raise Impossible in let r,idx = match pat with | Pusual r -> r, n-rel_i | Pcons (r,l) -> r, lookup_rel rel_i 0 l | _ -> raise Impossible in if is_infix r then raise Impossible; let env' = snd (push_vars (List.rev_map id_of_mlid ids) env) in let pp_args = (List.map (pp_expr true env' []) a) @ args in let pp_head = pp_expr true env [] t ++ str "." ++ pp_field r fields idx in pp_apply pp_head par pp_args and pp_record_pat (fields, args) = str "{ " ++ prlist_with_sep (fun () -> str ";" ++ spc ()) (fun (f,a) -> f ++ str " =" ++ spc () ++ a) (List.combine fields args) ++ str " }" and pp_cons_pat r ppl = if is_infix r && List.length ppl = 2 then List.hd ppl ++ str (get_infix r) ++ List.hd (List.tl ppl) else let fields = get_record_fields r in if fields <> [] then pp_record_pat (pp_fields r fields, ppl) else if str_global Cons r = "" then pp_boxed_tuple identity ppl (* Hack Extract Inductive prod *) else pp_global Cons r ++ space_if (ppl<>[]) ++ pp_boxed_tuple identity ppl and pp_gen_pat ids env = function | Pcons (r, l) -> pp_cons_pat r (List.map (pp_gen_pat ids env) l) | Pusual r -> pp_cons_pat r (List.map pr_id ids) | Ptuple l -> pp_boxed_tuple (pp_gen_pat ids env) l | Pwild -> str "_" | Prel n -> pr_id (get_db_name n env) and pp_ifthenelse env expr pv = match pv with | [|([],tru,the);([],fal,els)|] when (is_bool_patt tru "true") && (is_bool_patt fal "false") -> hv 0 (hov 2 (str "if " ++ expr) ++ spc () ++ hov 2 (str "then " ++ hov 2 (pp_expr (expr_needs_par the) env [] the)) ++ spc () ++ hov 2 (str "else " ++ hov 2 (pp_expr (expr_needs_par els) env [] els))) | _ -> raise Not_found and pp_one_pat env (ids,p,t) = let ids',env' = push_vars (List.rev_map id_of_mlid ids) env in pp_gen_pat (List.rev ids') env' p, pp_expr (expr_needs_par t) env' [] t and pp_pat env pv = prvecti (fun i x -> let s1,s2 = pp_one_pat env x in hv 2 (hov 4 (str "| " ++ s1 ++ str " ->") ++ spc () ++ hov 2 s2) ++ if i = Array.length pv - 1 then mt () else fnl ()) pv and pp_function env t = let bl,t' = collect_lams t in let bl,env' = push_vars (List.map id_of_mlid bl) env in match t' with | MLcase(Tglob(r,_),MLrel 1,pv) when not (is_coinductive r) && get_record_fields r = [] && not (is_custom_match pv) -> if not (ast_occurs 1 (MLcase(Tunknown,MLdummy,pv))) then pr_binding (List.rev (List.tl bl)) ++ str " = function" ++ fnl () ++ v 0 (pp_pat env' pv) else pr_binding (List.rev bl) ++ str " = match " ++ pr_id (List.hd bl) ++ str " with" ++ fnl () ++ v 0 (pp_pat env' pv) | _ -> pr_binding (List.rev bl) ++ str " =" ++ fnl () ++ str " " ++ hov 2 (pp_expr false env' [] t') (*s names of the functions ([ids]) are already pushed in [env], and passed here just for convenience. *) and pp_fix par env i (ids,bl) args = pp_par par (v 0 (str "let rec " ++ prvect_with_sep (fun () -> fnl () ++ str "and ") (fun (fi,ti) -> pr_id fi ++ pp_function env ti) (array_map2 (fun id b -> (id,b)) ids bl) ++ fnl () ++ hov 2 (str "in " ++ pp_apply (pr_id ids.(i)) false args))) let pp_val e typ = hov 4 (str "(** val " ++ e ++ str " :" ++ spc () ++ pp_type false [] typ ++ str " **)") ++ fnl2 () (*s Pretty-printing of [Dfix] *) let pp_Dfix (rv,c,t) = let names = Array.map (fun r -> if is_inline_custom r then mt () else pp_global Term r) rv in let rec pp init i = if i >= Array.length rv then (if init then failwith "empty phrase" else mt ()) else let void = is_inline_custom rv.(i) || (not (is_custom rv.(i)) && c.(i) = MLexn "UNUSED") in if void then pp init (i+1) else let def = if is_custom rv.(i) then str " = " ++ str (find_custom rv.(i)) else pp_function (empty_env ()) c.(i) in (if init then mt () else fnl2 ()) ++ pp_val names.(i) t.(i) ++ str (if init then "let rec " else "and ") ++ names.(i) ++ def ++ pp false (i+1) in pp true 0 (*s Pretty-printing of inductive types declaration. *) let pp_equiv param_list name = function | NoEquiv, _ -> mt () | Equiv kn, i -> str " = " ++ pp_parameters param_list ++ pp_global Type (IndRef (mind_of_kn kn,i)) | RenEquiv ren, _ -> str " = " ++ pp_parameters param_list ++ str (ren^".") ++ name let pp_comment s = str "(* " ++ s ++ str " *)" let pp_one_ind prefix ip_equiv pl name cnames ctyps = let pl = rename_tvars keywords pl in let pp_constructor i typs = (if i=0 then mt () else fnl ()) ++ hov 3 (str "| " ++ cnames.(i) ++ (if typs = [] then mt () else str " of ") ++ prlist_with_sep (fun () -> spc () ++ str "* ") (pp_type true pl) typs) in pp_parameters pl ++ str prefix ++ name ++ pp_equiv pl name ip_equiv ++ str " =" ++ if Array.length ctyps = 0 then str " unit (* empty inductive *)" else fnl () ++ v 0 (prvecti pp_constructor ctyps) let pp_logical_ind packet = pp_comment (pr_id packet.ip_typename ++ str " : logical inductive") ++ fnl () ++ pp_comment (str "with constructors : " ++ prvect_with_sep spc pr_id packet.ip_consnames) ++ fnl () let pp_singleton kn packet = let name = pp_global Type (IndRef (kn,0)) in let l = rename_tvars keywords packet.ip_vars in hov 2 (str "type " ++ pp_parameters l ++ name ++ str " =" ++ spc () ++ pp_type false l (List.hd packet.ip_types.(0)) ++ fnl () ++ pp_comment (str "singleton inductive, whose constructor was " ++ pr_id packet.ip_consnames.(0))) let pp_record kn fields ip_equiv packet = let ind = IndRef (kn,0) in let name = pp_global Type ind in let fieldnames = pp_fields ind fields in let l = List.combine fieldnames packet.ip_types.(0) in let pl = rename_tvars keywords packet.ip_vars in str "type " ++ pp_parameters pl ++ name ++ pp_equiv pl name ip_equiv ++ str " = { "++ hov 0 (prlist_with_sep (fun () -> str ";" ++ spc ()) (fun (p,t) -> p ++ str " : " ++ pp_type true pl t) l) ++ str " }" let pp_coind pl name = let pl = rename_tvars keywords pl in pp_parameters pl ++ name ++ str " = " ++ pp_parameters pl ++ str "__" ++ name ++ str " Lazy.t" ++ fnl() ++ str "and " let pp_ind co kn ind = let prefix = if co then "__" else "" in let some = ref false in let init= ref (str "type ") in let names = Array.mapi (fun i p -> if p.ip_logical then mt () else pp_global Type (IndRef (kn,i))) ind.ind_packets in let cnames = Array.mapi (fun i p -> if p.ip_logical then [||] else Array.mapi (fun j _ -> pp_global Cons (ConstructRef ((kn,i),j+1))) p.ip_types) ind.ind_packets in let rec pp i = if i >= Array.length ind.ind_packets then mt () else let ip = (kn,i) in let ip_equiv = ind.ind_equiv, i in let p = ind.ind_packets.(i) in if is_custom (IndRef ip) then pp (i+1) else begin some := true; if p.ip_logical then pp_logical_ind p ++ pp (i+1) else let s = !init in begin init := (fnl () ++ str "and "); s ++ (if co then pp_coind p.ip_vars names.(i) else mt ()) ++ pp_one_ind prefix ip_equiv p.ip_vars names.(i) cnames.(i) p.ip_types ++ pp (i+1) end end in let st = pp 0 in if !some then st else failwith "empty phrase" (*s Pretty-printing of a declaration. *) let pp_mind kn i = match i.ind_kind with | Singleton -> pp_singleton kn i.ind_packets.(0) | Coinductive -> pp_ind true kn i | Record fields -> pp_record kn fields (i.ind_equiv,0) i.ind_packets.(0) | Standard -> pp_ind false kn i let pp_decl = function | Dtype (r,_,_) when is_inline_custom r -> failwith "empty phrase" | Dterm (r,_,_) when is_inline_custom r -> failwith "empty phrase" | Dind (kn,i) -> pp_mind kn i | Dtype (r, l, t) -> let name = pp_global Type r in let l = rename_tvars keywords l in let ids, def = try let ids,s = find_type_custom r in pp_string_parameters ids, str "=" ++ spc () ++ str s with Not_found -> pp_parameters l, if t = Taxiom then str "(* AXIOM TO BE REALIZED *)" else str "=" ++ spc () ++ pp_type false l t in hov 2 (str "type " ++ ids ++ name ++ spc () ++ def) | Dterm (r, a, t) -> let def = if is_custom r then str (" = " ^ find_custom r) else if is_projection r then (prvect str (Array.make (projection_arity r) " _")) ++ str " x = x." else pp_function (empty_env ()) a in let name = pp_global Term r in let postdef = if is_projection r then name else mt () in pp_val name t ++ hov 0 (str "let " ++ name ++ def ++ postdef) | Dfix (rv,defs,typs) -> pp_Dfix (rv,defs,typs) let pp_alias_decl ren = function | Dind (kn,i) -> pp_mind kn { i with ind_equiv = RenEquiv ren } | Dtype (r, l, _) -> let name = pp_global Type r in let l = rename_tvars keywords l in let ids = pp_parameters l in hov 2 (str "type " ++ ids ++ name ++ str " =" ++ spc () ++ ids ++ str (ren^".") ++ name) | Dterm (r, a, t) -> let name = pp_global Term r in hov 2 (str "let " ++ name ++ str (" = "^ren^".") ++ name) | Dfix (rv, _, _) -> prvecti (fun i r -> if is_inline_custom r then mt () else let name = pp_global Term r in hov 2 (str "let " ++ name ++ str (" = "^ren^".") ++ name) ++ fnl ()) rv let pp_spec = function | Sval (r,_) when is_inline_custom r -> failwith "empty phrase" | Stype (r,_,_) when is_inline_custom r -> failwith "empty phrase" | Sind (kn,i) -> pp_mind kn i | Sval (r,t) -> let def = pp_type false [] t in let name = pp_global Term r in hov 2 (str "val " ++ name ++ str " :" ++ spc () ++ def) | Stype (r,vl,ot) -> let name = pp_global Type r in let l = rename_tvars keywords vl in let ids, def = try let ids, s = find_type_custom r in pp_string_parameters ids, str "= " ++ str s with Not_found -> let ids = pp_parameters l in match ot with | None -> ids, mt () | Some Taxiom -> ids, str "(* AXIOM TO BE REALIZED *)" | Some t -> ids, str "=" ++ spc () ++ pp_type false l t in hov 2 (str "type " ++ ids ++ name ++ spc () ++ def) let pp_alias_spec ren = function | Sind (kn,i) -> pp_mind kn { i with ind_equiv = RenEquiv ren } | Stype (r,l,_) -> let name = pp_global Type r in let l = rename_tvars keywords l in let ids = pp_parameters l in hov 2 (str "type " ++ ids ++ name ++ str " =" ++ spc () ++ ids ++ str (ren^".") ++ name) | Sval _ -> assert false let rec pp_specif = function | (_,Spec (Sval _ as s)) -> pp_spec s | (l,Spec s) -> (try let ren = Common.check_duplicate (top_visible_mp ()) l in hov 1 (str ("module "^ren^" : sig ") ++ fnl () ++ pp_spec s) ++ fnl () ++ str "end" ++ fnl () ++ pp_alias_spec ren s with Not_found -> pp_spec s) | (l,Smodule mt) -> let def = pp_module_type [] mt in let def' = pp_module_type [] mt in let name = pp_modname (MPdot (top_visible_mp (), l)) in hov 1 (str "module " ++ name ++ str " : " ++ fnl () ++ def) ++ (try let ren = Common.check_duplicate (top_visible_mp ()) l in fnl () ++ hov 1 (str ("module "^ren^" : ") ++ fnl () ++ def') with Not_found -> Pp.mt ()) | (l,Smodtype mt) -> let def = pp_module_type [] mt in let name = pp_modname (MPdot (top_visible_mp (), l)) in hov 1 (str "module type " ++ name ++ str " = " ++ fnl () ++ def) ++ (try let ren = Common.check_duplicate (top_visible_mp ()) l in fnl () ++ str ("module type "^ren^" = ") ++ name with Not_found -> Pp.mt ()) and pp_module_type params = function | MTident kn -> pp_modname kn | MTfunsig (mbid, mt, mt') -> let typ = pp_module_type [] mt in let name = pp_modname (MPbound mbid) in let def = pp_module_type (MPbound mbid :: params) mt' in str "functor (" ++ name ++ str ":" ++ typ ++ str ") ->" ++ fnl () ++ def | MTsig (mp, sign) -> push_visible mp params; let l = map_succeed pp_specif sign in pop_visible (); str "sig " ++ fnl () ++ v 1 (str " " ++ prlist_with_sep fnl2 identity l) ++ fnl () ++ str "end" | MTwith(mt,ML_With_type(idl,vl,typ)) -> let ids = pp_parameters (rename_tvars keywords vl) in let mp_mt = msid_of_mt mt in let l,idl' = list_sep_last idl in let mp_w = List.fold_left (fun mp l -> MPdot(mp,label_of_id l)) mp_mt idl' in let r = ConstRef (make_con mp_w empty_dirpath (label_of_id l)) in push_visible mp_mt []; let pp_w = str " with type " ++ ids ++ pp_global Type r in pop_visible(); pp_module_type [] mt ++ pp_w ++ str " = " ++ pp_type false vl typ | MTwith(mt,ML_With_module(idl,mp)) -> let mp_mt = msid_of_mt mt in let mp_w = List.fold_left (fun mp id -> MPdot(mp,label_of_id id)) mp_mt idl in push_visible mp_mt []; let pp_w = str " with module " ++ pp_modname mp_w in pop_visible (); pp_module_type [] mt ++ pp_w ++ str " = " ++ pp_modname mp let is_short = function MEident _ | MEapply _ -> true | _ -> false let rec pp_structure_elem = function | (l,SEdecl d) -> (try let ren = Common.check_duplicate (top_visible_mp ()) l in hov 1 (str ("module "^ren^" = struct ") ++ fnl () ++ pp_decl d) ++ fnl () ++ str "end" ++ fnl () ++ pp_alias_decl ren d with Not_found -> pp_decl d) | (l,SEmodule m) -> let typ = (* virtual printing of the type, in order to have a correct mli later*) if Common.get_phase () = Pre then str ": " ++ pp_module_type [] m.ml_mod_type else mt () in let def = pp_module_expr [] m.ml_mod_expr in let name = pp_modname (MPdot (top_visible_mp (), l)) in hov 1 (str "module " ++ name ++ typ ++ str " = " ++ (if (is_short m.ml_mod_expr) then mt () else fnl ()) ++ def) ++ (try let ren = Common.check_duplicate (top_visible_mp ()) l in fnl () ++ str ("module "^ren^" = ") ++ name with Not_found -> mt ()) | (l,SEmodtype m) -> let def = pp_module_type [] m in let name = pp_modname (MPdot (top_visible_mp (), l)) in hov 1 (str "module type " ++ name ++ str " = " ++ fnl () ++ def) ++ (try let ren = Common.check_duplicate (top_visible_mp ()) l in fnl () ++ str ("module type "^ren^" = ") ++ name with Not_found -> mt ()) and pp_module_expr params = function | MEident mp -> pp_modname mp | MEapply (me, me') -> pp_module_expr [] me ++ str "(" ++ pp_module_expr [] me' ++ str ")" | MEfunctor (mbid, mt, me) -> let name = pp_modname (MPbound mbid) in let typ = pp_module_type [] mt in let def = pp_module_expr (MPbound mbid :: params) me in str "functor (" ++ name ++ str ":" ++ typ ++ str ") ->" ++ fnl () ++ def | MEstruct (mp, sel) -> push_visible mp params; let l = map_succeed pp_structure_elem sel in pop_visible (); str "struct " ++ fnl () ++ v 1 (str " " ++ prlist_with_sep fnl2 identity l) ++ fnl () ++ str "end" let do_struct f s = let pp s = try f s ++ fnl2 () with Failure "empty phrase" -> mt () in let ppl (mp,sel) = push_visible mp []; let p = prlist_strict pp sel in (* for monolithic extraction, we try to simulate the unavailability of [MPfile] in names by artificially nesting these [MPfile] *) (if modular () then pop_visible ()); p in let p = prlist_strict ppl s in (if not (modular ()) then repeat (List.length s) pop_visible ()); p let pp_struct s = do_struct pp_structure_elem s let pp_signature s = do_struct pp_specif s let pp_decl d = try pp_decl d with Failure "empty phrase" -> mt () let ocaml_descr = { keywords = keywords; file_suffix = ".ml"; preamble = preamble; pp_struct = pp_struct; sig_suffix = Some ".mli"; sig_preamble = sig_preamble; pp_sig = pp_signature; pp_decl = pp_decl; } coq-8.4pl2/plugins/extraction/ExtrOcamlIntConv.v0000640000175000001440000000564112010532755021064 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int. Parameter int_opp : int -> int. Parameter int_twice : int -> int. Extract Inlined Constant int => int. Extract Inlined Constant int_zero => "0". Extract Inlined Constant int_succ => "succ". Extract Inlined Constant int_opp => "-". Extract Inlined Constant int_twice => "2 *". Definition int_of_nat : nat -> int := (fix loop acc n := match n with | O => acc | S n => loop (int_succ acc) n end) int_zero. Fixpoint int_of_pos p := match p with | xH => int_succ int_zero | xO p => int_twice (int_of_pos p) | xI p => int_succ (int_twice (int_of_pos p)) end. Fixpoint int_of_z z := match z with | Z0 => int_zero | Zpos p => int_of_pos p | Zneg p => int_opp (int_of_pos p) end. Fixpoint int_of_n n := match n with | N0 => int_zero | Npos p => int_of_pos p end. (** NB: as for [pred] or [minus], [nat_of_int], [n_of_int] and [pos_of_int] are total and return zero (resp. one) for non-positive inputs. *) Parameter int_natlike_rec : forall A, A -> (A->A) -> int -> A. Extract Constant int_natlike_rec => "fun fO fS -> let rec loop acc i = if i <= 0 then acc else loop (fS acc) (i-1) in loop fO". Definition nat_of_int : int -> nat := int_natlike_rec _ O S. Parameter int_poslike_rec : forall A, A -> (A->A) -> (A->A) -> int -> A. Extract Constant int_poslike_rec => "fun f1 f2x f2x1 -> let rec loop i = if i <= 1 then f1 else if i land 1 = 0 then f2x (loop (i lsr 1)) else f2x1 (loop (i lsr 1)) in loop". Definition pos_of_int : int -> positive := int_poslike_rec _ xH xO xI. Parameter int_zlike_case : forall A, A -> (int->A) -> (int->A) -> int -> A. Extract Constant int_zlike_case => "fun f0 fpos fneg i -> if i = 0 then f0 else if i>0 then fpos i else fneg (-i)". Definition z_of_int : int -> Z := int_zlike_case _ Z0 (fun i => Zpos (pos_of_int i)) (fun i => Zneg (pos_of_int i)). Definition n_of_int : int -> N := int_zlike_case _ N0 (fun i => Npos (pos_of_int i)) (fun _ => N0). (** Warning: [z_of_int] is currently wrong for Ocaml's [min_int], since [min_int] has no positive opposite ([-min_int = min_int]). *) (* Extraction "/tmp/test.ml" nat_of_int int_of_nat pos_of_int int_of_pos z_of_int int_of_z n_of_int int_of_n. *)coq-8.4pl2/plugins/extraction/ExtrOcamlBasic.v0000640000175000001440000000262712010532755020526 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool [ true false ]. Extract Inductive option => option [ Some None ]. Extract Inductive unit => unit [ "()" ]. Extract Inductive list => list [ "[]" "( :: )" ]. Extract Inductive prod => "( * )" [ "" ]. (** NB: The "" above is a hack, but produce nicer code than "(,)" *) (** Mapping sumbool to bool and sumor to option is not always nicer, but it helps when realizing stuff like [lt_eq_lt_dec] *) Extract Inductive sumbool => bool [ true false ]. Extract Inductive sumor => option [ Some None ]. (** Restore lazyness of andb, orb. NB: without these Extract Constant, andb/orb would be inlined by extraction in order to have lazyness, producing inelegant (if ... then ... else false) and (if ... then true else ...). *) Extract Inlined Constant andb => "(&&)". Extract Inlined Constant orb => "(||)". coq-8.4pl2/plugins/extraction/miniml.mli0000640000175000001440000001404412010532755017463 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* module_path list -> unsafe_needs -> std_ppcmds; pp_struct : ml_structure -> std_ppcmds; (* Concerning a possible interface file *) sig_suffix : string option; sig_preamble : identifier -> module_path list -> unsafe_needs -> std_ppcmds; pp_sig : ml_signature -> std_ppcmds; (* for an isolated declaration print *) pp_decl : ml_decl -> std_ppcmds; } coq-8.4pl2/plugins/extraction/ExtrOcamlZInt.v0000640000175000001440000000622712063736511020375 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* int [ "(fun p->1+2*p)" "(fun p->2*p)" "1" ] "(fun f2p1 f2p f1 p -> if p<=1 then f1 () else if p mod 2 = 0 then f2p (p/2) else f2p1 (p/2))". Extract Inductive Z => int [ "0" "" "(~-)" ] "(fun f0 fp fn z -> if z=0 then f0 () else if z>0 then fp z else fn (-z))". Extract Inductive N => int [ "0" "" ] "(fun f0 fp n -> if n=0 then f0 () else fp n)". (** Nota: the "" above is used as an identity function "(fun p->p)" *) (** Efficient (but uncertified) versions for usual functions *) Extract Constant Pos.add => "(+)". Extract Constant Pos.succ => "Pervasives.succ". Extract Constant Pos.pred => "fun n -> Pervasives.max 1 (n-1)". Extract Constant Pos.sub => "fun n m -> Pervasives.max 1 (n-m)". Extract Constant Pos.mul => "( * )". Extract Constant Pos.min => "Pervasives.min". Extract Constant Pos.max => "Pervasives.max". Extract Constant Pos.compare => "fun x y -> if x=y then Eq else if x "fun x y c -> if x=y then c else if x "(+)". Extract Constant N.succ => "Pervasives.succ". Extract Constant N.pred => "fun n -> Pervasives.max 0 (n-1)". Extract Constant N.sub => "fun n m -> Pervasives.max 0 (n-m)". Extract Constant N.mul => "( * )". Extract Constant N.min => "Pervasives.min". Extract Constant N.max => "Pervasives.max". Extract Constant N.div => "fun a b -> if b=0 then 0 else a/b". Extract Constant N.modulo => "fun a b -> if b=0 then a else a mod b". Extract Constant N.compare => "fun x y -> if x=y then Eq else if x "(+)". Extract Constant Z.succ => "Pervasives.succ". Extract Constant Z.pred => "Pervasives.pred". Extract Constant Z.sub => "(-)". Extract Constant Z.mul => "( * )". Extract Constant Z.opp => "(~-)". Extract Constant Z.abs => "Pervasives.abs". Extract Constant Z.min => "Pervasives.min". Extract Constant Z.max => "Pervasives.max". Extract Constant Z.compare => "fun x y -> if x=y then Eq else if x "fun p -> p". Extract Constant Z.abs_N => "Pervasives.abs". (** Z.div and Z.modulo are quite complex to define in terms of (/) and (mod). For the moment we don't even try *) coq-8.4pl2/plugins/extraction/big.ml0000640000175000001440000001326312010532755016570 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 then fp z else fn (opp z) let compare_case e l g x y = let s = compare x y in if s = 0 then e else if s<0 then l else g let nat_rec fO fS = let rec loop acc n = if sign n <= 0 then acc else loop (fS acc) (pred n) in loop fO let positive_rec f2p1 f2p f1 = let rec loop n = if le n one then f1 else let (q,r) = quomod n two in if eq r zero then f2p (loop q) else f2p1 (loop q) in loop let z_rec fO fp fn = z_case (fun _ -> fO) fp fn coq-8.4pl2/plugins/extraction/ocaml.mli0000640000175000001440000000106712010532755017272 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Idset.add (id_of_string s)) [ "define"; "let"; "lambda"; "lambdas"; "match"; "apply"; "car"; "cdr"; "error"; "delay"; "force"; "_"; "__"] Idset.empty let preamble _ _ usf = str ";; This extracted scheme code relies on some additional macros\n" ++ str ";; available at http://www.pps.jussieu.fr/~letouzey/scheme\n" ++ str "(load \"macros_extr.scm\")\n\n" ++ (if usf.mldummy then str "(define __ (lambda (_) __))\n\n" else mt ()) let pr_id id = let s = string_of_id id in for i = 0 to String.length s - 1 do if s.[i] = '\'' then s.[i] <- '~' done; str s let paren = pp_par true let pp_abst st = function | [] -> assert false | [id] -> paren (str "lambda " ++ paren (pr_id id) ++ spc () ++ st) | l -> paren (str "lambdas " ++ paren (prlist_with_sep spc pr_id l) ++ spc () ++ st) let pp_apply st _ = function | [] -> st | [a] -> hov 2 (paren (st ++ spc () ++ a)) | args -> hov 2 (paren (str "@ " ++ st ++ (prlist_strict (fun x -> spc () ++ x) args))) (*s The pretty-printer for Scheme syntax *) let pp_global k r = str (Common.pp_global k r) (*s Pretty-printing of expressions. *) let rec pp_expr env args = let apply st = pp_apply st true args in function | MLrel n -> let id = get_db_name n env in apply (pr_id id) | MLapp (f,args') -> let stl = List.map (pp_expr env []) args' in pp_expr env (stl @ args) f | MLlam _ as a -> let fl,a' = collect_lams a in let fl,env' = push_vars (List.map id_of_mlid fl) env in apply (pp_abst (pp_expr env' [] a') (List.rev fl)) | MLletin (id,a1,a2) -> let i,env' = push_vars [id_of_mlid id] env in apply (hv 0 (hov 2 (paren (str "let " ++ paren (paren (pr_id (List.hd i) ++ spc () ++ pp_expr env [] a1)) ++ spc () ++ hov 0 (pp_expr env' [] a2))))) | MLglob r -> apply (pp_global Term r) | MLcons (_,r,args') -> assert (args=[]); let st = str "`" ++ paren (pp_global Cons r ++ (if args' = [] then mt () else spc ()) ++ prlist_with_sep spc (pp_cons_args env) args') in if is_coinductive r then paren (str "delay " ++ st) else st | MLtuple _ -> error "Cannot handle tuples in Scheme yet." | MLcase (_,_,pv) when not (is_regular_match pv) -> error "Cannot handle general patterns in Scheme yet." | MLcase (_,t,pv) when is_custom_match pv -> let mkfun (ids,_,e) = if ids <> [] then named_lams (List.rev ids) e else dummy_lams (ast_lift 1 e) 1 in apply (paren (hov 2 (str (find_custom_match pv) ++ fnl () ++ prvect (fun tr -> pp_expr env [] (mkfun tr) ++ fnl ()) pv ++ pp_expr env [] t))) | MLcase (typ,t, pv) -> let e = if not (is_coinductive_type typ) then pp_expr env [] t else paren (str "force" ++ spc () ++ pp_expr env [] t) in apply (v 3 (paren (str "match " ++ e ++ fnl () ++ pp_pat env pv))) | MLfix (i,ids,defs) -> let ids',env' = push_vars (List.rev (Array.to_list ids)) env in pp_fix env' i (Array.of_list (List.rev ids'),defs) args | MLexn s -> (* An [MLexn] may be applied, but I don't really care. *) paren (str "error" ++ spc () ++ qs s) | MLdummy -> str "__" (* An [MLdummy] may be applied, but I don't really care. *) | MLmagic a -> pp_expr env args a | MLaxiom -> paren (str "error \"AXIOM TO BE REALIZED\"") and pp_cons_args env = function | MLcons (_,r,args) when is_coinductive r -> paren (pp_global Cons r ++ (if args = [] then mt () else spc ()) ++ prlist_with_sep spc (pp_cons_args env) args) | e -> str "," ++ pp_expr env [] e and pp_one_pat env (ids,p,t) = let r = match p with | Pusual r -> r | Pcons (r,l) -> r (* cf. the check [is_regular_match] above *) | _ -> assert false in let ids,env' = push_vars (List.rev_map id_of_mlid ids) env in let args = if ids = [] then mt () else (str " " ++ prlist_with_sep spc pr_id (List.rev ids)) in (pp_global Cons r ++ args), (pp_expr env' [] t) and pp_pat env pv = prvect_with_sep fnl (fun x -> let s1,s2 = pp_one_pat env x in hov 2 (str "((" ++ s1 ++ str ")" ++ spc () ++ s2 ++ str ")")) pv (*s names of the functions ([ids]) are already pushed in [env], and passed here just for convenience. *) and pp_fix env j (ids,bl) args = paren (str "letrec " ++ (v 0 (paren (prvect_with_sep fnl (fun (fi,ti) -> paren ((pr_id fi) ++ spc () ++ (pp_expr env [] ti))) (array_map2 (fun id b -> (id,b)) ids bl)) ++ fnl () ++ hov 2 (pp_apply (pr_id (ids.(j))) true args)))) (*s Pretty-printing of a declaration. *) let pp_decl = function | Dind _ -> mt () | Dtype _ -> mt () | Dfix (rv, defs,_) -> let names = Array.map (fun r -> if is_inline_custom r then mt () else pp_global Term r) rv in prvecti (fun i r -> let void = is_inline_custom r || (not (is_custom r) && defs.(i) = MLexn "UNUSED") in if void then mt () else hov 2 (paren (str "define " ++ names.(i) ++ spc () ++ (if is_custom r then str (find_custom r) else pp_expr (empty_env ()) [] defs.(i))) ++ fnl ()) ++ fnl ()) rv | Dterm (r, a, _) -> if is_inline_custom r then mt () else hov 2 (paren (str "define " ++ pp_global Term r ++ spc () ++ (if is_custom r then str (find_custom r) else pp_expr (empty_env ()) [] a))) ++ fnl2 () let rec pp_structure_elem = function | (l,SEdecl d) -> pp_decl d | (l,SEmodule m) -> pp_module_expr m.ml_mod_expr | (l,SEmodtype m) -> mt () (* for the moment we simply discard module type *) and pp_module_expr = function | MEstruct (mp,sel) -> prlist_strict pp_structure_elem sel | MEfunctor _ -> mt () (* for the moment we simply discard unapplied functors *) | MEident _ | MEapply _ -> assert false (* should be expansed in extract_env *) let pp_struct = let pp_sel (mp,sel) = push_visible mp []; let p = prlist_strict pp_structure_elem sel in pop_visible (); p in prlist_strict pp_sel let scheme_descr = { keywords = keywords; file_suffix = ".scm"; preamble = preamble; pp_struct = pp_struct; sig_suffix = None; sig_preamble = (fun _ _ _ -> mt ()); pp_sig = (fun _ -> mt ()); pp_decl = pp_decl; } coq-8.4pl2/plugins/extraction/mlutil.ml0000640000175000001440000012523312121620060017324 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* anonymous_name | Name id when id = dummy_name -> anonymous_name | Name id -> id let id_of_mlid = function | Dummy -> dummy_name | Id id -> id | Tmp id -> id let tmp_id = function | Id id -> Tmp id | a -> a let is_tmp = function Tmp _ -> true | _ -> false (*S Operations upon ML types (with meta). *) let meta_count = ref 0 let reset_meta_count () = meta_count := 0 let new_meta _ = incr meta_count; Tmeta {id = !meta_count; contents = None} (* Simultaneous substitution of [[Tvar 1; ... ; Tvar n]] by [l] in a ML type. *) let type_subst_list l t = let rec subst t = match t with | Tvar j -> List.nth l (j-1) | Tmeta {contents=None} -> t | Tmeta {contents=Some u} -> subst u | Tarr (a,b) -> Tarr (subst a, subst b) | Tglob (r, l) -> Tglob (r, List.map subst l) | a -> a in subst t (* Simultaneous substitution of [[|Tvar 1; ... ; Tvar n|]] by [v] in a ML type. *) let type_subst_vect v t = let rec subst t = match t with | Tvar j -> v.(j-1) | Tmeta {contents=None} -> t | Tmeta {contents=Some u} -> subst u | Tarr (a,b) -> Tarr (subst a, subst b) | Tglob (r, l) -> Tglob (r, List.map subst l) | a -> a in subst t (*s From a type schema to a type. All [Tvar] become fresh [Tmeta]. *) let instantiation (nb,t) = type_subst_vect (Array.init nb new_meta) t (*s Occur-check of a free meta in a type *) let rec type_occurs alpha t = match t with | Tmeta {id=beta; contents=None} -> alpha = beta | Tmeta {contents=Some u} -> type_occurs alpha u | Tarr (t1, t2) -> type_occurs alpha t1 || type_occurs alpha t2 | Tglob (r,l) -> List.exists (type_occurs alpha) l | _ -> false (*s Most General Unificator *) let rec mgu = function | Tmeta m, Tmeta m' when m.id = m'.id -> () | Tmeta m, t | t, Tmeta m -> (match m.contents with | Some u -> mgu (u, t) | None when type_occurs m.id t -> raise Impossible | None -> m.contents <- Some t) | Tarr(a, b), Tarr(a', b') -> mgu (a, a'); mgu (b, b') | Tglob (r,l), Tglob (r',l') when r = r' -> List.iter mgu (List.combine l l') | (Tdummy _, _ | _, Tdummy _) when lang() = Haskell -> () | Tdummy _, Tdummy _ -> () | t, u when t = u -> () (* for Tvar, Tvar', Tunknown, Taxiom *) | _ -> raise Impossible let needs_magic p = try mgu p; false with Impossible -> true let put_magic_if b a = if b && lang () <> Scheme then MLmagic a else a let put_magic p a = if needs_magic p && lang () <> Scheme then MLmagic a else a let generalizable a = lang () <> Ocaml || match a with | MLapp _ -> false | _ -> true (* TODO, this is just an approximation for the moment *) (*S ML type env. *) module Mlenv = struct let meta_cmp m m' = compare m.id m'.id module Metaset = Set.Make(struct type t = ml_meta let compare = meta_cmp end) (* Main MLenv type. [env] is the real environment, whereas [free] (tries to) record the free meta variables occurring in [env]. *) type t = { env : ml_schema list; mutable free : Metaset.t} (* Empty environment. *) let empty = { env = []; free = Metaset.empty } (* [get] returns a instantiated copy of the n-th most recently added type in the environment. *) let get mle n = assert (List.length mle.env >= n); instantiation (List.nth mle.env (n-1)) (* [find_free] finds the free meta in a type. *) let rec find_free set = function | Tmeta m when m.contents = None -> Metaset.add m set | Tmeta {contents = Some t} -> find_free set t | Tarr (a,b) -> find_free (find_free set a) b | Tglob (_,l) -> List.fold_left find_free set l | _ -> set (* The [free] set of an environment can be outdate after some unifications. [clean_free] takes care of that. *) let clean_free mle = let rem = ref Metaset.empty and add = ref Metaset.empty in let clean m = match m.contents with | None -> () | Some u -> rem := Metaset.add m !rem; add := find_free !add u in Metaset.iter clean mle.free; mle.free <- Metaset.union (Metaset.diff mle.free !rem) !add (* From a type to a type schema. If a [Tmeta] is still uninstantiated and does appears in the [mle], then it becomes a [Tvar]. *) let generalization mle t = let c = ref 0 in let map = ref (Intmap.empty : int Intmap.t) in let add_new i = incr c; map := Intmap.add i !c !map; !c in let rec meta2var t = match t with | Tmeta {contents=Some u} -> meta2var u | Tmeta ({id=i} as m) -> (try Tvar (Intmap.find i !map) with Not_found -> if Metaset.mem m mle.free then t else Tvar (add_new i)) | Tarr (t1,t2) -> Tarr (meta2var t1, meta2var t2) | Tglob (r,l) -> Tglob (r, List.map meta2var l) | t -> t in !c, meta2var t (* Adding a type in an environment, after generalizing. *) let push_gen mle t = clean_free mle; { env = generalization mle t :: mle.env; free = mle.free } (* Adding a type with no [Tvar], hence no generalization needed. *) let push_type {env=e;free=f} t = { env = (0,t) :: e; free = find_free f t} (* Adding a type with no [Tvar] nor [Tmeta]. *) let push_std_type {env=e;free=f} t = { env = (0,t) :: e; free = f} end (*S Operations upon ML types (without meta). *) (*s Does a section path occur in a ML type ? *) let rec type_mem_kn kn = function | Tmeta {contents = Some t} -> type_mem_kn kn t | Tglob (r,l) -> occur_kn_in_ref kn r || List.exists (type_mem_kn kn) l | Tarr (a,b) -> (type_mem_kn kn a) || (type_mem_kn kn b) | _ -> false (*s Greatest variable occurring in [t]. *) let type_maxvar t = let rec parse n = function | Tmeta {contents = Some t} -> parse n t | Tvar i -> max i n | Tarr (a,b) -> parse (parse n a) b | Tglob (_,l) -> List.fold_left parse n l | _ -> n in parse 0 t (*s What are the type variables occurring in [t]. *) let intset_union_map_list f l = List.fold_left (fun s t -> Intset.union s (f t)) Intset.empty l let intset_union_map_array f a = Array.fold_left (fun s t -> Intset.union s (f t)) Intset.empty a let rec type_listvar = function | Tmeta {contents = Some t} -> type_listvar t | Tvar i | Tvar' i -> Intset.singleton i | Tarr (a,b) -> Intset.union (type_listvar a) (type_listvar b) | Tglob (_,l) -> intset_union_map_list type_listvar l | _ -> Intset.empty (*s From [a -> b -> c] to [[a;b],c]. *) let rec type_decomp = function | Tmeta {contents = Some t} -> type_decomp t | Tarr (a,b) -> let l,h = type_decomp b in a::l, h | a -> [],a (*s The converse: From [[a;b],c] to [a -> b -> c]. *) let rec type_recomp (l,t) = match l with | [] -> t | a::l -> Tarr (a, type_recomp (l,t)) (*s Translating [Tvar] to [Tvar'] to avoid clash. *) let rec var2var' = function | Tmeta {contents = Some t} -> var2var' t | Tvar i -> Tvar' i | Tarr (a,b) -> Tarr (var2var' a, var2var' b) | Tglob (r,l) -> Tglob (r, List.map var2var' l) | a -> a type abbrev_map = global_reference -> ml_type option (*s Delta-reduction of type constants everywhere in a ML type [t]. [env] is a function of type [ml_type_env]. *) let type_expand env t = let rec expand = function | Tmeta {contents = Some t} -> expand t | Tglob (r,l) -> (match env r with | Some mlt -> expand (type_subst_list l mlt) | None -> Tglob (r, List.map expand l)) | Tarr (a,b) -> Tarr (expand a, expand b) | a -> a in if Table.type_expand () then expand t else t let type_simpl = type_expand (fun _ -> None) (*s Generating a signature from a ML type. *) let type_to_sign env t = match type_expand env t with | Tdummy d -> Kill d | _ -> Keep let type_to_signature env t = let rec f = function | Tmeta {contents = Some t} -> f t | Tarr (Tdummy d, b) -> Kill d :: f b | Tarr (_, b) -> Keep :: f b | _ -> [] in f (type_expand env t) let isKill = function Kill _ -> true | _ -> false let isDummy = function Tdummy _ -> true | _ -> false let sign_of_id = function | Dummy -> Kill Kother | _ -> Keep (* Classification of signatures *) type sign_kind = | EmptySig | NonLogicalSig (* at least a [Keep] *) | UnsafeLogicalSig (* No [Keep], at least a [Kill Kother] *) | SafeLogicalSig (* only [Kill Ktype] *) let rec sign_kind = function | [] -> EmptySig | Keep :: _ -> NonLogicalSig | Kill k :: s -> match sign_kind s with | NonLogicalSig -> NonLogicalSig | UnsafeLogicalSig -> UnsafeLogicalSig | SafeLogicalSig | EmptySig -> if k = Kother then UnsafeLogicalSig else SafeLogicalSig (* Removing the final [Keep] in a signature *) let rec sign_no_final_keeps = function | [] -> [] | k :: s -> let s' = k :: sign_no_final_keeps s in if s' = [Keep] then [] else s' (*s Removing [Tdummy] from the top level of a ML type. *) let type_expunge_from_sign env s t = let rec expunge s t = if s = [] then t else match t with | Tmeta {contents = Some t} -> expunge s t | Tarr (a,b) -> let t = expunge (List.tl s) b in if List.hd s = Keep then Tarr (a, t) else t | Tglob (r,l) -> (match env r with | Some mlt -> expunge s (type_subst_list l mlt) | None -> assert false) | _ -> assert false in let t = expunge (sign_no_final_keeps s) t in if lang () <> Haskell && sign_kind s = UnsafeLogicalSig then Tarr (Tdummy Kother, t) else t let type_expunge env t = type_expunge_from_sign env (type_to_signature env t) t (*S Generic functions over ML ast terms. *) let mlapp f a = if a = [] then f else MLapp (f,a) (*s [ast_iter_rel f t] applies [f] on every [MLrel] in t. It takes care of the number of bingings crossed before reaching the [MLrel]. *) let ast_iter_rel f = let rec iter n = function | MLrel i -> f (i-n) | MLlam (_,a) -> iter (n+1) a | MLletin (_,a,b) -> iter n a; iter (n+1) b | MLcase (_,a,v) -> iter n a; Array.iter (fun (l,_,t) -> iter (n + (List.length l)) t) v | MLfix (_,ids,v) -> let k = Array.length ids in Array.iter (iter (n+k)) v | MLapp (a,l) -> iter n a; List.iter (iter n) l | MLcons (_,_,l) | MLtuple l -> List.iter (iter n) l | MLmagic a -> iter n a | MLglob _ | MLexn _ | MLdummy | MLaxiom -> () in iter 0 (*s Map over asts. *) let ast_map_branch f (c,ids,a) = (c,ids,f a) (* Warning: in [ast_map] we assume that [f] does not change the type of [MLcons] and of [MLcase] heads *) let ast_map f = function | MLlam (i,a) -> MLlam (i, f a) | MLletin (i,a,b) -> MLletin (i, f a, f b) | MLcase (typ,a,v) -> MLcase (typ,f a, Array.map (ast_map_branch f) v) | MLfix (i,ids,v) -> MLfix (i, ids, Array.map f v) | MLapp (a,l) -> MLapp (f a, List.map f l) | MLcons (typ,c,l) -> MLcons (typ,c, List.map f l) | MLtuple l -> MLtuple (List.map f l) | MLmagic a -> MLmagic (f a) | MLrel _ | MLglob _ | MLexn _ | MLdummy | MLaxiom as a -> a (*s Map over asts, with binding depth as parameter. *) let ast_map_lift_branch f n (ids,p,a) = (ids,p, f (n+(List.length ids)) a) (* Same warning as for [ast_map]... *) let ast_map_lift f n = function | MLlam (i,a) -> MLlam (i, f (n+1) a) | MLletin (i,a,b) -> MLletin (i, f n a, f (n+1) b) | MLcase (typ,a,v) -> MLcase (typ,f n a,Array.map (ast_map_lift_branch f n) v) | MLfix (i,ids,v) -> let k = Array.length ids in MLfix (i,ids,Array.map (f (k+n)) v) | MLapp (a,l) -> MLapp (f n a, List.map (f n) l) | MLcons (typ,c,l) -> MLcons (typ,c, List.map (f n) l) | MLtuple l -> MLtuple (List.map (f n) l) | MLmagic a -> MLmagic (f n a) | MLrel _ | MLglob _ | MLexn _ | MLdummy | MLaxiom as a -> a (*s Iter over asts. *) let ast_iter_branch f (c,ids,a) = f a let ast_iter f = function | MLlam (i,a) -> f a | MLletin (i,a,b) -> f a; f b | MLcase (_,a,v) -> f a; Array.iter (ast_iter_branch f) v | MLfix (i,ids,v) -> Array.iter f v | MLapp (a,l) -> f a; List.iter f l | MLcons (_,_,l) | MLtuple l -> List.iter f l | MLmagic a -> f a | MLrel _ | MLglob _ | MLexn _ | MLdummy | MLaxiom -> () (*S Operations concerning De Bruijn indices. *) (*s [ast_occurs k t] returns [true] if [(Rel k)] occurs in [t]. *) let ast_occurs k t = try ast_iter_rel (fun i -> if i = k then raise Found) t; false with Found -> true (*s [occurs_itvl k k' t] returns [true] if there is a [(Rel i)] in [t] with [k<=i<=k'] *) let ast_occurs_itvl k k' t = try ast_iter_rel (fun i -> if (k <= i) && (i <= k') then raise Found) t; false with Found -> true (* Number of occurences of [Rel 1] in [t], with special treatment of match: occurences in different branches aren't added, but we rather use max. *) let nb_occur_match = let rec nb k = function | MLrel i -> if i = k then 1 else 0 | MLcase(_,a,v) -> (nb k a) + Array.fold_left (fun r (ids,_,a) -> max r (nb (k+(List.length ids)) a)) 0 v | MLletin (_,a,b) -> (nb k a) + (nb (k+1) b) | MLfix (_,ids,v) -> let k = k+(Array.length ids) in Array.fold_left (fun r a -> r+(nb k a)) 0 v | MLlam (_,a) -> nb (k+1) a | MLapp (a,l) -> List.fold_left (fun r a -> r+(nb k a)) (nb k a) l | MLcons (_,_,l) | MLtuple l -> List.fold_left (fun r a -> r+(nb k a)) 0 l | MLmagic a -> nb k a | MLglob _ | MLexn _ | MLdummy | MLaxiom -> 0 in nb 1 (*s Lifting on terms. [ast_lift k t] lifts the binding depth of [t] across [k] bindings. *) let ast_lift k t = let rec liftrec n = function | MLrel i as a -> if i-n < 1 then a else MLrel (i+k) | a -> ast_map_lift liftrec n a in if k = 0 then t else liftrec 0 t let ast_pop t = ast_lift (-1) t (*s [permut_rels k k' c] translates [Rel 1 ... Rel k] to [Rel (k'+1) ... Rel (k'+k)] and [Rel (k+1) ... Rel (k+k')] to [Rel 1 ... Rel k'] *) let permut_rels k k' = let rec permut n = function | MLrel i as a -> let i' = i-n in if i'<1 || i'>k+k' then a else if i'<=k then MLrel (i+k') else MLrel (i-k) | a -> ast_map_lift permut n a in permut 0 (*s Substitution. [ml_subst e t] substitutes [e] for [Rel 1] in [t]. Lifting (of one binder) is done at the same time. *) let ast_subst e = let rec subst n = function | MLrel i as a -> let i' = i-n in if i'=1 then ast_lift n e else if i'<1 then a else MLrel (i-1) | a -> ast_map_lift subst n a in subst 0 (*s Generalized substitution. [gen_subst v d t] applies to [t] the substitution coded in the [v] array: [(Rel i)] becomes [v.(i-1)]. [d] is the correction applies to [Rel] greater than [Array.length v]. *) let gen_subst v d t = let rec subst n = function | MLrel i as a -> let i'= i-n in if i' < 1 then a else if i' <= Array.length v then match v.(i'-1) with | None -> MLexn ("UNBOUND " ^ string_of_int i') | Some u -> ast_lift n u else MLrel (i+d) | a -> ast_map_lift subst n a in subst 0 t (*S Operations concerning match patterns *) let is_basic_pattern = function | Prel _ | Pwild -> true | Pusual _ | Pcons _ | Ptuple _ -> false let has_deep_pattern br = let deep = function | Pcons (_,l) | Ptuple l -> not (List.for_all is_basic_pattern l) | Pusual _ | Prel _ | Pwild -> false in array_exists (function (_,pat,_) -> deep pat) br let is_regular_match br = if Array.length br = 0 then false (* empty match becomes MLexn *) else try let get_r (ids,pat,c) = match pat with | Pusual r -> r | Pcons (r,l) -> if not (list_for_all_i (fun i -> (=) (Prel i)) 1 (List.rev l)) then raise Impossible; r | _ -> raise Impossible in let ind = match get_r br.(0) with | ConstructRef (ind,_) -> ind | _ -> raise Impossible in array_for_all_i (fun i tr -> get_r tr = ConstructRef (ind,i+1)) 0 br with Impossible -> false (*S Operations concerning lambdas. *) (*s [collect_lams MLlam(id1,...MLlam(idn,t)...)] returns [[idn;...;id1]] and the term [t]. *) let collect_lams = let rec collect acc = function | MLlam(id,t) -> collect (id::acc) t | x -> acc,x in collect [] (*s [collect_n_lams] does the same for a precise number of [MLlam]. *) let collect_n_lams = let rec collect acc n t = if n = 0 then acc,t else match t with | MLlam(id,t) -> collect (id::acc) (n-1) t | _ -> assert false in collect [] (*s [remove_n_lams] just removes some [MLlam]. *) let rec remove_n_lams n t = if n = 0 then t else match t with | MLlam(_,t) -> remove_n_lams (n-1) t | _ -> assert false (*s [nb_lams] gives the number of head [MLlam]. *) let rec nb_lams = function | MLlam(_,t) -> succ (nb_lams t) | _ -> 0 (*s [named_lams] does the converse of [collect_lams]. *) let rec named_lams ids a = match ids with | [] -> a | id :: ids -> named_lams ids (MLlam (id,a)) (*s The same for a specific identifier (resp. anonymous, dummy) *) let rec many_lams id a = function | 0 -> a | n -> many_lams id (MLlam (id,a)) (pred n) let anonym_tmp_lams a n = many_lams (Tmp anonymous_name) a n let dummy_lams a n = many_lams Dummy a n (*s mixed according to a signature. *) let rec anonym_or_dummy_lams a = function | [] -> a | Keep :: s -> MLlam(anonymous, anonym_or_dummy_lams a s) | Kill _ :: s -> MLlam(Dummy, anonym_or_dummy_lams a s) (*S Operations concerning eta. *) (*s The following function creates [MLrel n;...;MLrel 1] *) let rec eta_args n = if n = 0 then [] else (MLrel n)::(eta_args (pred n)) (*s Same, but filtered by a signature. *) let rec eta_args_sign n = function | [] -> [] | Keep :: s -> (MLrel n) :: (eta_args_sign (n-1) s) | Kill _ :: s -> eta_args_sign (n-1) s (*s This one tests [MLrel (n+k); ... ;MLrel (1+k)] *) let rec test_eta_args_lift k n = function | [] -> n=0 | a :: q -> (a = (MLrel (k+n))) && (test_eta_args_lift k (pred n) q) (*s Computes an eta-reduction. *) let eta_red e = let ids,t = collect_lams e in let n = List.length ids in if n = 0 then e else match t with | MLapp (f,a) -> let m = List.length a in let ids,body,args = if m = n then [], f, a else if m < n then list_skipn m ids, f, a else (* m > n *) let a1,a2 = list_chop (m-n) a in [], MLapp (f,a1), a2 in let p = List.length args in if test_eta_args_lift 0 p args && not (ast_occurs_itvl 1 p body) then named_lams ids (ast_lift (-p) body) else e | _ -> e (*s Computes all head linear beta-reductions possible in [(t a)]. Non-linear head beta-redex become let-in. *) let rec linear_beta_red a t = match a,t with | [], _ -> t | a0::a, MLlam (id,t) -> (match nb_occur_match t with | 0 -> linear_beta_red a (ast_pop t) | 1 -> linear_beta_red a (ast_subst a0 t) | _ -> let a = List.map (ast_lift 1) a in MLletin (id, a0, linear_beta_red a t)) | _ -> MLapp (t, a) let rec tmp_head_lams = function | MLlam (id, t) -> MLlam (tmp_id id, tmp_head_lams t) | e -> e (*s Applies a substitution [s] of constants by their body, plus linear beta reductions at modified positions. Moreover, we mark some lambdas as suitable for later linear reduction (this helps the inlining of recursors). *) let rec ast_glob_subst s t = match t with | MLapp ((MLglob ((ConstRef kn) as refe)) as f, a) -> let a = List.map (fun e -> tmp_head_lams (ast_glob_subst s e)) a in (try linear_beta_red a (Refmap'.find refe s) with Not_found -> MLapp (f, a)) | MLglob ((ConstRef kn) as refe) -> (try Refmap'.find refe s with Not_found -> t) | _ -> ast_map (ast_glob_subst s) t (*S Auxiliary functions used in simplification of ML cases. *) (* Factorisation of some match branches into a common "x -> f x" branch may break types sometimes. Example: [type 'x a = A]. Then [let id = function A -> A] has type ['x a -> 'y a], which is incompatible with the type of [let id x = x]. We now check that the type arguments of the inductive are preserved by our transformation. TODO: this verification should be done someday modulo expansion of type definitions. *) (*s [branch_as_function b typ (l,p,c)] tries to see branch [c] as a function [f] applied to [MLcons(r,l)]. For that it transforms any [MLcons(r,l)] in [MLrel 1] and raises [Impossible] if any variable in [l] occurs outside such a [MLcons] *) let branch_as_fun typ (l,p,c) = let nargs = List.length l in let cons = match p with | Pusual r -> MLcons (typ, r, eta_args nargs) | Pcons (r,pl) -> let pat2rel = function Prel i -> MLrel i | _ -> raise Impossible in MLcons (typ, r, List.map pat2rel pl) | _ -> raise Impossible in let rec genrec n = function | MLrel i as c -> let i' = i-n in if i'<1 then c else if i'>nargs then MLrel (i-nargs+1) else raise Impossible | MLcons _ as cons' when cons' = ast_lift n cons -> MLrel (n+1) | a -> ast_map_lift genrec n a in genrec 0 c (*s [branch_as_cst (l,p,c)] tries to see branch [c] as a constant independent from the pattern [MLcons(r,l)]. For that is raises [Impossible] if any variable in [l] occurs in [c], and otherwise returns [c] lifted to appear like a function with one arg (for uniformity with [branch_as_fun]). NB: [MLcons(r,l)] might occur nonetheless in [c], but only when [l] is empty, i.e. when [r] is a constant constructor *) let branch_as_cst (l,_,c) = let n = List.length l in if ast_occurs_itvl 1 n c then raise Impossible; ast_lift (1-n) c (* A branch [MLcons(r,l)->c] can be seen at the same time as a function branch and a constant branch, either because: - [MLcons(r,l)] doesn't occur in [c]. For example : "A -> B" - this constructor is constant (i.e. [l] is empty). For example "A -> A" When searching for the best factorisation below, we'll try both. *) (* The following structure allows to record which element occurred at what position, and then finally return the most frequent element and its positions. *) let census_add, census_max, census_clean = let h = Hashtbl.create 13 in let clear () = Hashtbl.clear h in let add e i = let s = try Hashtbl.find h e with Not_found -> Intset.empty in Hashtbl.replace h e (Intset.add i s) in let max e0 = let len = ref 0 and lst = ref Intset.empty and elm = ref e0 in Hashtbl.iter (fun e s -> let n = Intset.cardinal s in if n > !len then begin len := n; lst := s; elm := e end) h; (!elm,!lst) in (add,max,clear) (* [factor_branches] return the longest possible list of branches that have the same factorization, either as a function or as a constant. *) let is_opt_pat (_,p,_) = match p with | Prel _ | Pwild -> true | _ -> false let factor_branches o typ br = if array_exists is_opt_pat br then None (* already optimized *) else begin census_clean (); for i = 0 to Array.length br - 1 do if o.opt_case_idr then (try census_add (branch_as_fun typ br.(i)) i with Impossible -> ()); if o.opt_case_cst then (try census_add (branch_as_cst br.(i)) i with Impossible -> ()); done; let br_factor, br_set = census_max MLdummy in census_clean (); let n = Intset.cardinal br_set in if n = 0 then None else if Array.length br >= 2 && n < 2 then None else Some (br_factor, br_set) end (*s If all branches are functions, try to permut the case and the functions. *) let rec merge_ids ids ids' = match ids,ids' with | [],l -> l | l,[] -> l | i::ids, i'::ids' -> (if i = Dummy then i' else i) :: (merge_ids ids ids') let is_exn = function MLexn _ -> true | _ -> false let rec permut_case_fun br acc = let nb = ref max_int in Array.iter (fun (_,_,t) -> let ids, c = collect_lams t in let n = List.length ids in if (n < !nb) && (not (is_exn c)) then nb := n) br; if !nb = max_int || !nb = 0 then ([],br) else begin let br = Array.copy br in let ids = ref [] in for i = 0 to Array.length br - 1 do let (l,p,t) = br.(i) in let local_nb = nb_lams t in if local_nb < !nb then (* t = MLexn ... *) br.(i) <- (l,p,remove_n_lams local_nb t) else begin let local_ids,t = collect_n_lams !nb t in ids := merge_ids !ids local_ids; br.(i) <- (l,p,permut_rels !nb (List.length l) t) end done; (!ids,br) end (*S Generalized iota-reduction. *) (* Definition of a generalized iota-redex: it's a [MLcase(e,br)] where the head [e] is a [MLcons] or made of [MLcase]'s with [MLcons] as leaf branches. A generalized iota-redex is transformed into beta-redexes. *) (* In [iota_red], we try to simplify a [MLcase(_,MLcons(typ,r,a),br)]. Argument [i] is the branch we consider, we should lift what comes from [br] by [lift] *) let rec iota_red i lift br ((typ,r,a) as cons) = if i >= Array.length br then raise Impossible; let (ids,p,c) = br.(i) in match p with | Pusual r' | Pcons (r',_) when r'<>r -> iota_red (i+1) lift br cons | Pusual r' -> let c = named_lams (List.rev ids) c in let c = ast_lift lift c in MLapp (c,a) | Prel 1 when List.length ids = 1 -> let c = MLlam (List.hd ids, c) in let c = ast_lift lift c in MLapp(c,[MLcons(typ,r,a)]) | Pwild when ids = [] -> ast_lift lift c | _ -> raise Impossible (* TODO: handle some more cases *) (* [iota_gen] is an extension of [iota_red] where we allow to traverse matches in the head of the first match *) let iota_gen br hd = let rec iota k = function | MLcons (typ,r,a) -> iota_red 0 k br (typ,r,a) | MLcase(typ,e,br') -> let new_br = Array.map (fun (i,p,c)->(i,p,iota (k+(List.length i)) c)) br' in MLcase(typ,e,new_br) | _ -> raise Impossible in iota 0 hd let is_atomic = function | MLrel _ | MLglob _ | MLexn _ | MLdummy -> true | _ -> false let is_imm_apply = function MLapp (MLrel 1, _) -> true | _ -> false (** Program creates a let-in named "program_branch_NN" for each branch of match. Unfolding them leads to more natural code (and more dummy removal) *) let is_program_branch = function | Id id -> let s = string_of_id id in let br = "program_branch_" in let n = String.length br in (try ignore (int_of_string (String.sub s n (String.length s - n))); String.sub s 0 n = br with e when Errors.noncritical e -> false) | Tmp _ | Dummy -> false let expand_linear_let o id e = o.opt_lin_let || is_tmp id || is_program_branch id || is_imm_apply e (*S The main simplification function. *) (* Some beta-iota reductions + simplifications. *) let rec simpl o = function | MLapp (f, []) -> simpl o f | MLapp (f, a) -> simpl_app o (List.map (simpl o) a) (simpl o f) | MLcase (typ,e,br) -> let br = Array.map (fun (l,p,t) -> (l,p,simpl o t)) br in simpl_case o typ br (simpl o e) | MLletin(Dummy,_,e) -> simpl o (ast_pop e) | MLletin(id,c,e) -> let e = simpl o e in if (is_atomic c) || (is_atomic e) || (let n = nb_occur_match e in (n = 0 || (n=1 && expand_linear_let o id e))) then simpl o (ast_subst c e) else MLletin(id, simpl o c, e) | MLfix(i,ids,c) -> let n = Array.length ids in if ast_occurs_itvl 1 n c.(i) then MLfix (i, ids, Array.map (simpl o) c) else simpl o (ast_lift (-n) c.(i)) (* Dummy fixpoint *) | a -> ast_map (simpl o) a (* invariant : list [a] of arguments is non-empty *) and simpl_app o a = function | MLapp (f',a') -> simpl_app o (a'@a) f' | MLlam (Dummy,t) -> simpl o (MLapp (ast_pop t, List.tl a)) | MLlam (id,t) -> (* Beta redex *) (match nb_occur_match t with | 0 -> simpl o (MLapp (ast_pop t, List.tl a)) | 1 when (is_tmp id || o.opt_lin_beta) -> simpl o (MLapp (ast_subst (List.hd a) t, List.tl a)) | _ -> let a' = List.map (ast_lift 1) (List.tl a) in simpl o (MLletin (id, List.hd a, MLapp (t, a')))) | MLletin (id,e1,e2) when o.opt_let_app -> (* Application of a letin: we push arguments inside *) MLletin (id, e1, simpl o (MLapp (e2, List.map (ast_lift 1) a))) | MLcase (typ,e,br) when o.opt_case_app -> (* Application of a case: we push arguments inside *) let br' = Array.map (fun (l,p,t) -> let k = List.length l in let a' = List.map (ast_lift k) a in (l, p, simpl o (MLapp (t,a')))) br in simpl o (MLcase (typ,e,br')) | (MLdummy | MLexn _) as e -> e (* We just discard arguments in those cases. *) | f -> MLapp (f,a) (* Invariant : all empty matches should now be [MLexn] *) and simpl_case o typ br e = try (* Generalized iota-redex *) if not o.opt_case_iot then raise Impossible; simpl o (iota_gen br e) with Impossible -> (* Swap the case and the lam if possible *) let ids,br = if o.opt_case_fun then permut_case_fun br [] else [],br in let n = List.length ids in if n <> 0 then simpl o (named_lams ids (MLcase (typ, ast_lift n e, br))) else (* Can we merge several branches as the same constant or function ? *) if lang() = Scheme || is_custom_match br then MLcase (typ, e, br) else match factor_branches o typ br with | Some (f,ints) when Intset.cardinal ints = Array.length br -> (* If all branches have been factorized, we remove the match *) simpl o (MLletin (Tmp anonymous_name, e, f)) | Some (f,ints) -> let last_br = if ast_occurs 1 f then ([Tmp anonymous_name], Prel 1, f) else ([], Pwild, ast_pop f) in let brl = Array.to_list br in let brl_opt = list_filter_i (fun i _ -> not (Intset.mem i ints)) brl in let brl_opt = brl_opt @ [last_br] in MLcase (typ, e, Array.of_list brl_opt) | None -> MLcase (typ, e, br) (*S Local prop elimination. *) (* We try to eliminate as many [prop] as possible inside an [ml_ast]. *) (*s In a list, it selects only the elements corresponding to a [Keep] in the boolean list [l]. *) let rec select_via_bl l args = match l,args with | [],_ -> args | Keep::l,a::args -> a :: (select_via_bl l args) | Kill _::l,a::args -> select_via_bl l args | _ -> assert false (*s [kill_some_lams] removes some head lambdas according to the signature [bl]. This list is build on the identifier list model: outermost lambda is on the right. [Rels] corresponding to removed lambdas are supposed not to occur, and the other [Rels] are made correct via a [gen_subst]. Output is not directly a [ml_ast], compose with [named_lams] if needed. *) let kill_some_lams bl (ids,c) = let n = List.length bl in let n' = List.fold_left (fun n b -> if b=Keep then (n+1) else n) 0 bl in if n = n' then ids,c else if n' = 0 then [],ast_lift (-n) c else begin let v = Array.make n None in let rec parse_ids i j = function | [] -> () | Keep :: l -> v.(i) <- Some (MLrel j); parse_ids (i+1) (j+1) l | Kill _ :: l -> parse_ids (i+1) j l in parse_ids 0 1 bl; select_via_bl bl ids, gen_subst v (n'-n) c end (*s [kill_dummy_lams] uses the last function to kill the lambdas corresponding to a [dummy_name]. It can raise [Impossible] if there is nothing to do, or if there is no lambda left at all. *) let kill_dummy_lams c = let ids,c = collect_lams c in let bl = List.map sign_of_id ids in if not (List.mem Keep bl) then raise Impossible; let rec fst_kill n = function | [] -> raise Impossible | Kill _ :: bl -> n | Keep :: bl -> fst_kill (n+1) bl in let skip = max 0 ((fst_kill 0 bl) - 1) in let ids_skip, ids = list_chop skip ids in let _, bl = list_chop skip bl in let c = named_lams ids_skip c in let ids',c = kill_some_lams bl (ids,c) in ids, named_lams ids' c (*s [eta_expansion_sign] takes a function [fun idn ... id1 -> c] and a signature [s] and builds a eta-long version. *) (* For example, if [s = [Keep;Keep;Kill Prop;Keep]] then the output is : [fun idn ... id1 x x _ x -> (c' 4 3 __ 1)] with [c' = lift 4 c] *) let eta_expansion_sign s (ids,c) = let rec abs ids rels i = function | [] -> let a = List.rev_map (function MLrel x -> MLrel (i-x) | a -> a) rels in ids, MLapp (ast_lift (i-1) c, a) | Keep :: l -> abs (anonymous :: ids) (MLrel i :: rels) (i+1) l | Kill _ :: l -> abs (Dummy :: ids) (MLdummy :: rels) (i+1) l in abs ids [] 1 s (*s If [s = [b1; ... ; bn]] then [case_expunge] decomposes [e] in [n] lambdas (with eta-expansion if needed) and removes all dummy lambdas corresponding to [Del] in [s]. *) let case_expunge s e = let m = List.length s in let n = nb_lams e in let p = if m <= n then collect_n_lams m e else eta_expansion_sign (list_skipn n s) (collect_lams e) in kill_some_lams (List.rev s) p (*s [term_expunge] takes a function [fun idn ... id1 -> c] and a signature [s] and remove dummy lams. The difference with [case_expunge] is that we here leave one dummy lambda if all lambdas are logical dummy and the target language is strict. *) let term_expunge s (ids,c) = if s = [] then c else let ids,c = kill_some_lams (List.rev s) (ids,c) in if ids = [] && lang () <> Haskell && List.mem (Kill Kother) s then MLlam (Dummy, ast_lift 1 c) else named_lams ids c (*s [kill_dummy_args ids r t] looks for occurences of [MLrel r] in [t] and purge the args of [MLrel r] corresponding to a [dummy_name]. It makes eta-expansion if needed. *) let kill_dummy_args ids r t = let m = List.length ids in let bl = List.rev_map sign_of_id ids in let rec found n = function | MLrel r' when r' = r + n -> true | MLmagic e -> found n e | _ -> false in let rec killrec n = function | MLapp(e, a) when found n e -> let k = max 0 (m - (List.length a)) in let a = List.map (killrec n) a in let a = List.map (ast_lift k) a in let a = select_via_bl bl (a @ (eta_args k)) in named_lams (list_firstn k ids) (MLapp (ast_lift k e, a)) | e when found n e -> let a = select_via_bl bl (eta_args m) in named_lams ids (MLapp (ast_lift m e, a)) | e -> ast_map_lift killrec n e in killrec 0 t (*s The main function for local [dummy] elimination. *) let rec kill_dummy = function | MLfix(i,fi,c) -> (try let ids,c = kill_dummy_fix i c in ast_subst (MLfix (i,fi,c)) (kill_dummy_args ids 1 (MLrel 1)) with Impossible -> MLfix (i,fi,Array.map kill_dummy c)) | MLapp (MLfix (i,fi,c),a) -> let a = List.map kill_dummy a in (try let ids,c = kill_dummy_fix i c in let fake = MLapp (MLrel 1, List.map (ast_lift 1) a) in let fake' = kill_dummy_args ids 1 fake in ast_subst (MLfix (i,fi,c)) fake' with Impossible -> MLapp(MLfix(i,fi,Array.map kill_dummy c),a)) | MLletin(id, MLfix (i,fi,c),e) -> (try let ids,c = kill_dummy_fix i c in let e = kill_dummy (kill_dummy_args ids 1 e) in MLletin(id, MLfix(i,fi,c),e) with Impossible -> MLletin(id, MLfix(i,fi,Array.map kill_dummy c),kill_dummy e)) | MLletin(id,c,e) -> (try let ids,c = kill_dummy_lams (kill_dummy_hd c) in let e = kill_dummy (kill_dummy_args ids 1 e) in let c = kill_dummy c in if is_atomic c then ast_subst c e else MLletin (id, c, e) with Impossible -> MLletin(id,kill_dummy c,kill_dummy e)) | a -> ast_map kill_dummy a (* Similar function, but acting only on head lambdas and let-ins *) and kill_dummy_hd = function | MLlam(id,e) -> MLlam(id, kill_dummy_hd e) | MLletin(id,c,e) -> (try let ids,c = kill_dummy_lams (kill_dummy_hd c) in let e = kill_dummy_hd (kill_dummy_args ids 1 e) in let c = kill_dummy c in if is_atomic c then ast_subst c e else MLletin (id, c, e) with Impossible -> MLletin(id,kill_dummy c,kill_dummy_hd e)) | a -> a and kill_dummy_fix i c = let n = Array.length c in let ids,ci = kill_dummy_lams (kill_dummy_hd c.(i)) in let c = Array.copy c in c.(i) <- ci; for j = 0 to (n-1) do c.(j) <- kill_dummy (kill_dummy_args ids (n-i) c.(j)) done; ids,c (*s Putting things together. *) let normalize a = let o = optims () in let rec norm a = let a' = if o.opt_kill_dum then kill_dummy (simpl o a) else simpl o a in if a = a' then a else norm a' in norm a (*S Special treatment of fixpoint for pretty-printing purpose. *) let general_optimize_fix f ids n args m c = let v = Array.make n 0 in for i=0 to (n-1) do v.(i)<-i done; let aux i = function | MLrel j when v.(j-1)>=0 -> if ast_occurs (j+1) c then raise Impossible else v.(j-1)<-(-i-1) | _ -> raise Impossible in list_iter_i aux args; let args_f = List.rev_map (fun i -> MLrel (i+m+1)) (Array.to_list v) in let new_f = anonym_tmp_lams (MLapp (MLrel (n+m+1),args_f)) m in let new_c = named_lams ids (normalize (MLapp ((ast_subst new_f c),args))) in MLfix(0,[|f|],[|new_c|]) let optimize_fix a = if not (optims()).opt_fix_fun then a else let ids,a' = collect_lams a in let n = List.length ids in if n = 0 then a else match a' with | MLfix(_,[|f|],[|c|]) -> let new_f = MLapp (MLrel (n+1),eta_args n) in let new_c = named_lams ids (normalize (ast_subst new_f c)) in MLfix(0,[|f|],[|new_c|]) | MLapp(a',args) -> let m = List.length args in (match a' with | MLfix(_,_,_) when (test_eta_args_lift 0 n args) && not (ast_occurs_itvl 1 m a') -> a' | MLfix(_,[|f|],[|c|]) -> (try general_optimize_fix f ids n args m c with Impossible -> a) | _ -> a) | _ -> a (*S Inlining. *) (* Utility functions used in the decision of inlining. *) let ml_size_branch size pv = Array.fold_left (fun a (_,_,t) -> a + size t) 0 pv let rec ml_size = function | MLapp(t,l) -> List.length l + ml_size t + ml_size_list l | MLlam(_,t) -> 1 + ml_size t | MLcons(_,_,l) | MLtuple l -> ml_size_list l | MLcase(_,t,pv) -> 1 + ml_size t + ml_size_branch ml_size pv | MLfix(_,_,f) -> ml_size_array f | MLletin (_,_,t) -> ml_size t | MLmagic t -> ml_size t | MLglob _ | MLrel _ | MLexn _ | MLdummy | MLaxiom -> 0 and ml_size_list l = List.fold_left (fun a t -> a + ml_size t) 0 l and ml_size_array a = Array.fold_left (fun a t -> a + ml_size t) 0 a let is_fix = function MLfix _ -> true | _ -> false (*s Strictness *) (* A variable is strict if the evaluation of the whole term implies the evaluation of this variable. Non-strict variables can be found behind Match, for example. Expanding a term [t] is a good idea when it begins by at least one non-strict lambda, since the corresponding argument to [t] might be unevaluated in the expanded code. *) exception Toplevel let lift n l = List.map ((+) n) l let pop n l = List.map (fun x -> if x<=n then raise Toplevel else x-n) l (* This function returns a list of de Bruijn indices of non-strict variables, or raises [Toplevel] if it has an internal non-strict variable. In fact, not all variables are checked for strictness, only the ones which de Bruijn index is in the candidates list [cand]. The flag [add] controls the behaviour when going through a lambda: should we add the corresponding variable to the candidates? We use this flag to check only the external lambdas, those that will correspond to arguments. *) let rec non_stricts add cand = function | MLlam (id,t) -> let cand = lift 1 cand in let cand = if add then 1::cand else cand in pop 1 (non_stricts add cand t) | MLrel n -> List.filter ((<>) n) cand | MLapp (t,l)-> let cand = non_stricts false cand t in List.fold_left (non_stricts false) cand l | MLcons (_,_,l) -> List.fold_left (non_stricts false) cand l | MLletin (_,t1,t2) -> let cand = non_stricts false cand t1 in pop 1 (non_stricts add (lift 1 cand) t2) | MLfix (_,i,f)-> let n = Array.length i in let cand = lift n cand in let cand = Array.fold_left (non_stricts false) cand f in pop n cand | MLcase (_,t,v) -> (* The only interesting case: for a variable to be non-strict, *) (* it is sufficient that it appears non-strict in at least one branch, *) (* so we make an union (in fact a merge). *) let cand = non_stricts false cand t in Array.fold_left (fun c (i,_,t)-> let n = List.length i in let cand = lift n cand in let cand = pop n (non_stricts add cand t) in Sort.merge (<=) cand c) [] v (* [merge] may duplicates some indices, but I don't mind. *) | MLmagic t -> non_stricts add cand t | _ -> cand (* The real test: we are looking for internal non-strict variables, so we start with no candidates, and the only positive answer is via the [Toplevel] exception. *) let is_not_strict t = try let _ = non_stricts true [] t in false with Toplevel -> true (*s Inlining decision *) (* [inline_test] answers the following question: If we could inline [t] (the user said nothing special), should we inline ? We expand small terms with at least one non-strict variable (i.e. a variable that may not be evaluated). Futhermore we don't expand fixpoints. Moreover, as mentionned by X. Leroy (bug #2241), inling a constant from inside an opaque module might break types. To avoid that, we require below that both [r] and its body are globally visible. This isn't fully satisfactory, since [r] might not be visible (functor), and anyway it might be interesting to inline [r] at least inside its own structure. But to be safe, we adopt this restriction for the moment. *) open Declarations let inline_test r t = if not (auto_inline ()) then false else let c = match r with ConstRef c -> c | _ -> assert false in let has_body = try constant_has_body (Global.lookup_constant c) with e when Errors.noncritical e -> false in has_body && (let t1 = eta_red t in let t2 = snd (collect_lams t1) in not (is_fix t2) && ml_size t < 12 && is_not_strict t) let con_of_string s = let null = empty_dirpath in match repr_dirpath (dirpath_of_string s) with | id :: d -> make_con (MPfile (make_dirpath d)) null (label_of_id id) | [] -> assert false let manual_inline_set = List.fold_right (fun x -> Cset_env.add (con_of_string x)) [ "Coq.Init.Wf.well_founded_induction_type"; "Coq.Init.Wf.well_founded_induction"; "Coq.Init.Wf.Acc_iter"; "Coq.Init.Wf.Fix_F"; "Coq.Init.Wf.Fix"; "Coq.Init.Datatypes.andb"; "Coq.Init.Datatypes.orb"; "Coq.Init.Logic.eq_rec_r"; "Coq.Init.Logic.eq_rect_r"; "Coq.Init.Specif.proj1_sig"; ] Cset_env.empty let manual_inline = function | ConstRef c -> Cset_env.mem c manual_inline_set | _ -> false (* If the user doesn't say he wants to keep [t], we inline in two cases: \begin{itemize} \item the user explicitly requests it \item [expansion_test] answers that the inlining is a good idea, and we are free to act (AutoInline is set) \end{itemize} *) let inline r t = not (to_keep r) (* The user DOES want to keep it *) && not (is_inline_custom r) && (to_inline r (* The user DOES want to inline it *) || (lang () <> Haskell && not (is_projection r) && (is_recursor r || manual_inline r || inline_test r t))) coq-8.4pl2/plugins/subtac/0000750000175000001440000000000012127276537014604 5ustar notinuserscoq-8.4pl2/plugins/subtac/test/0000750000175000001440000000000012127276537015563 5ustar notinuserscoq-8.4pl2/plugins/subtac/test/id.v0000640000175000001440000000151511160567762016350 0ustar notinusersRequire Coq.Arith.Arith. Require Import Coq.subtac.Utils. Program Fixpoint id (n : nat) : { x : nat | x = n } := match n with | O => O | S p => S (id p) end. intros ; auto. pose (subset_simpl (id p)). simpl in e. unfold p0. rewrite e. auto. Defined. Check id. Print id. Extraction id. Axiom le_gt_dec : forall n m, { n <= m } + { n > m }. Require Import Omega. Program Fixpoint id_if (n : nat) { wf n lt }: { x : nat | x = n } := if le_gt_dec n 0 then 0 else S (id_if (pred n)). intros. auto with arith. intros. pose (subset_simpl (id_if (pred n))). simpl in e. rewrite e. induction n ; auto with arith. Defined. Print id_if_instance. Extraction id_if_instance. Notation "( x & y )" := (@existS _ _ x y) : core_scope. Program Definition testsig ( a : nat ) : { x : nat & { y : nat | x = y }} := (a & a). intros. auto. Qed. coq-8.4pl2/plugins/subtac/test/wf.v0000640000175000001440000000241211254456226016361 0ustar notinusersNotation "( x & y )" := (@existS _ _ x y) : core_scope. Unset Printing All. Require Import Coq.Arith.Compare_dec. Require Import Coq.subtac.Utils. Ltac one_simpl_hyp := match goal with | [H : (`exist _ _ _) = _ |- _] => simpl in H | [H : _ = (`exist _ _ _) |- _] => simpl in H | [H : (`exist _ _ _) < _ |- _] => simpl in H | [H : _ < (`exist _ _ _) |- _] => simpl in H | [H : (`exist _ _ _) <= _ |- _] => simpl in H | [H : _ <= (`exist _ _ _) |- _] => simpl in H | [H : (`exist _ _ _) > _ |- _] => simpl in H | [H : _ > (`exist _ _ _) |- _] => simpl in H | [H : (`exist _ _ _) >= _ |- _] => simpl in H | [H : _ >= (`exist _ _ _) |- _] => simpl in H end. Ltac one_simpl_subtac := destruct_exists ; repeat one_simpl_hyp ; simpl. Ltac simpl_subtac := do 3 one_simpl_subtac ; simpl. Require Import Omega. Require Import Wf_nat. Program Fixpoint euclid (a : nat) (b : { b : nat | b <> O }) {wf a lt} : { q : nat & { r : nat | a = b * q + r /\ r < b } } := if le_lt_dec b a then let (q', r) := euclid (a - b) b in (S q' & r) else (O & a). destruct b ; simpl_subtac. omega. simpl_subtac. assert(x0 * S q' = x0 + x0 * q'). rewrite <- mult_n_Sm. omega. rewrite H2 ; omega. simpl_subtac. split ; auto with arith. omega. apply lt_wf. Defined. Check euclid_evars_proof.coq-8.4pl2/plugins/subtac/test/ListsTest.v0000640000175000001440000000361711254456226017713 0ustar notinusers(* -*- coq-prog-args: ("-emacs-U" "-debug") -*- *) Require Import Coq.Program.Program. Require Import List. Set Implicit Arguments. Section Accessors. Variable A : Set. Program Definition myhd : forall (l : list A | length l <> 0), A := fun l => match l with | nil => ! | hd :: tl => hd end. Program Definition mytail (l : list A | length l <> 0) : list A := match l with | nil => ! | hd :: tl => tl end. End Accessors. Program Definition test_hd : nat := myhd (cons 1 nil). (*Eval compute in test_hd*) (*Program Definition test_tail : list A := mytail nil.*) Section app. Variable A : Set. Program Fixpoint app (l : list A) (l' : list A) { struct l } : { r : list A | length r = length l + length l' } := match l with | nil => l' | hd :: tl => hd :: (tl ++ l') end where "x ++ y" := (app x y). Next Obligation. intros. destruct_call app ; program_simpl. Defined. Program Lemma app_id_l : forall l : list A, l = nil ++ l. Proof. simpl ; auto. Qed. Program Lemma app_id_r : forall l : list A, l = l ++ nil. Proof. induction l ; simpl in * ; auto. rewrite <- IHl ; auto. Qed. End app. Extraction app. Section Nth. Variable A : Set. Program Fixpoint nth (l : list A) (n : nat | n < length l) { struct l } : A := match n, l with | 0, hd :: _ => hd | S n', _ :: tl => nth tl n' | _, nil => ! end. Next Obligation. Proof. simpl in *. auto with arith. Defined. Next Obligation. Proof. inversion H. Qed. Program Fixpoint nth' (l : list A) (n : nat | n < length l) { struct l } : A := match l, n with | hd :: _, 0 => hd | _ :: tl, S n' => nth' tl n' | nil, _ => ! end. Next Obligation. Proof. simpl in *. auto with arith. Defined. Next Obligation. Proof. intros. inversion H. Defined. End Nth. coq-8.4pl2/plugins/subtac/test/Mutind.v0000640000175000001440000000034511254456226017210 0ustar notinusersRequire Import List. Program Fixpoint f a : { x : nat | x > 0 } := match a with | 0 => 1 | S a' => g a a' end with g a b : { x : nat | x > 0 } := match b with | 0 => 1 | S b' => f b' end. Check f. Check g. coq-8.4pl2/plugins/subtac/test/Test1.v0000640000175000001440000000043211254456226016745 0ustar notinusersProgram Definition test (a b : nat) : { x : nat | x = a + b } := ((a + b) : { x : nat | x = a + b }). Proof. intros. reflexivity. Qed. Print test. Require Import List. Program hd_opt (l : list nat) : { x : nat | x <> 0 } := match l with nil => 1 | a :: l => a end. coq-8.4pl2/plugins/subtac/test/measure.v0000640000175000001440000000064311160567762017416 0ustar notinusersNotation "( x & y )" := (@existS _ _ x y) : core_scope. Unset Printing All. Require Import Coq.Arith.Compare_dec. Require Import Coq.Program.Program. Fixpoint size (a : nat) : nat := match a with 0 => 1 | S n => S (size n) end. Program Fixpoint test_measure (a : nat) {measure size a} : nat := match a with | S (S n) => S (test_measure n) | 0 | S 0 => a end. Check test_measure. Print test_measure.coq-8.4pl2/plugins/subtac/test/ListDep.v0000640000175000001440000000212411254456226017311 0ustar notinusers(* -*- coq-prog-args: ("-emacs-U" "-debug") -*- *) Require Import List. Require Import Coq.Program.Program. Set Implicit Arguments. Definition sub_list (A : Set) (l' l : list A) := (forall v, In v l' -> In v l) /\ length l' <= length l. Lemma sub_list_tl : forall A : Set, forall x (l l' : list A), sub_list (x :: l) l' -> sub_list l l'. Proof. intros. inversion H. split. intros. apply H0. auto with datatypes. auto with arith. Qed. Section Map_DependentRecursor. Variable U V : Set. Variable l : list U. Variable f : { x : U | In x l } -> V. Obligations Tactic := unfold sub_list in * ; program_simpl ; intuition. Program Fixpoint map_rec ( l' : list U | sub_list l' l ) { measure length l' } : { r : list V | length r = length l' } := match l' with | nil => nil | cons x tl => let tl' := map_rec tl in f x :: tl' end. Next Obligation. destruct_call map_rec. simpl in *. subst l'. simpl ; auto with arith. Qed. Program Definition map : list V := map_rec l. End Map_DependentRecursor. Extraction map. Extraction map_rec. coq-8.4pl2/plugins/subtac/test/take.v0000640000175000001440000000115011254456226016667 0ustar notinusers(* -*- coq-prog-args: ("-emacs-U" "-debug") -*- *) Require Import JMeq. Require Import List. Require Import Program. Set Implicit Arguments. Obligations Tactic := idtac. Print cons. Program Fixpoint take (A : Set) (l : list A) (n : nat | n <= length l) { struct l } : { l' : list A | length l' = n } := match n with | 0 => nil | S p => match l with | cons hd tl => let rest := take tl p in cons hd rest | nil => ! end end. Require Import Omega. Solve All Obligations. Next Obligation. destruct_call take ; program_simpl. Defined. Next Obligation. intros. inversion H. Defined. coq-8.4pl2/plugins/subtac/test/euclid.v0000640000175000001440000000127211254456226017215 0ustar notinusersRequire Import Coq.Program.Program. Require Import Coq.Arith.Compare_dec. Notation "( x & y )" := (existS _ x y) : core_scope. Require Import Omega. Program Fixpoint euclid (a : nat) (b : { b : nat | b <> O }) {wf lt a} : { q : nat & { r : nat | a = b * q + r /\ r < b } } := if le_lt_dec b a then let (q', r) := euclid (a - b) b in (S q' & r) else (O & a). Next Obligation. assert(b * S q' = b * q' + b) by auto with arith ; omega. Defined. Program Definition test_euclid : (prod nat nat) := let (q, r) := euclid 4 2 in (q, q). Eval lazy beta zeta delta iota in test_euclid. Program Definition testsig (a : nat) : { x : nat & { y : nat | x < y } } := (a & S a). Check testsig. coq-8.4pl2/plugins/subtac/test/rec.v0000640000175000001440000000236211160567762016526 0ustar notinusersRequire Import Coq.Arith.Arith. Require Import Lt. Require Import Omega. Axiom lt_ge_dec : forall x y : nat, { x < y } + { x >= y }. (*Proof. intros. elim (le_lt_dec y x) ; intros ; auto with arith. Defined. *) Require Import Coq.subtac.FixSub. Require Import Wf_nat. Lemma preda_lt_a : forall a, 0 < a -> pred a < a. auto with arith. Qed. Program Fixpoint id_struct (a : nat) : nat := match a with 0 => 0 | S n => S (id_struct n) end. Check struct_rec. if (lt_ge_dec O a) then S (wfrec (pred a)) else O. Program Fixpoint wfrec (a : nat) { wf a lt } : nat := if (lt_ge_dec O a) then S (wfrec (pred a)) else O. intros. apply preda_lt_a ; auto. Defined. Extraction wfrec. Extraction Inline proj1_sig. Extract Inductive bool => "bool" [ "true" "false" ]. Extract Inductive sumbool => "bool" [ "true" "false" ]. Extract Inlined Constant lt_ge_dec => "<". Extraction wfrec. Extraction Inline lt_ge_dec le_lt_dec. Extraction wfrec. Program Fixpoint structrec (a : nat) { wf a lt } : nat := match a with S n => S (structrec n) | 0 => 0 end. intros. unfold n0. omega. Defined. Print structrec. Extraction structrec. Extraction structrec. Definition structrec_fun (a : nat) : nat := structrec a (lt_wf a). Print structrec_fun. coq-8.4pl2/plugins/subtac/subtac_cases.mli0000640000175000001440000000133612010532755017736 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'b) -> 'a -> 'b val contrib_name : string val subtac_dir : string list val fixsub_module : string list val init_constant : string list -> string -> constr delayed val init_reference : string list -> string -> global_reference delayed val well_founded_ref : global_reference delayed val acc_ref : global_reference delayed val acc_inv_ref : global_reference delayed val fix_sub_ref : global_reference delayed val measure_on_R_ref : global_reference delayed val fix_measure_sub_ref : global_reference delayed val refl_ref : global_reference delayed val lt_ref : reference val sig_ref : reference val proj1_sig_ref : reference val proj2_sig_ref : reference val build_sig : unit -> coq_sigma_data val sig_ : coq_sigma_data delayed val fix_proto : constr delayed val hide_obligation : constr delayed val eq_ind : constr delayed val eq_rec : constr delayed val eq_rect : constr delayed val eq_refl : constr delayed val not_ref : constr delayed val and_typ : constr delayed val eqdep_ind : constr delayed val eqdep_rec : constr delayed val jmeq_ind : constr delayed val jmeq_rec : constr delayed val jmeq_refl : constr delayed val existS : coq_sigma_data delayed val prod : coq_sigma_data delayed val well_founded : constr delayed val fix : constr delayed val acc : constr delayed val acc_inv : constr delayed val extconstr : constr -> constr_expr val extsort : sorts -> constr_expr val my_print_constr : env -> constr -> std_ppcmds val my_print_constr_expr : constr_expr -> std_ppcmds val my_print_evardefs : evar_map -> std_ppcmds val my_print_context : env -> std_ppcmds val my_print_rel_context : env -> rel_context -> std_ppcmds val my_print_named_context : env -> std_ppcmds val my_print_env : env -> std_ppcmds val my_print_glob_constr : env -> glob_constr -> std_ppcmds val my_print_tycon_type : env -> type_constraint_type -> std_ppcmds val debug : int -> std_ppcmds -> unit val debug_msg : int -> std_ppcmds -> std_ppcmds val trace : std_ppcmds -> unit val wf_relations : (constr, constr delayed) Hashtbl.t type binders = local_binder list val app_opt : ('a -> 'a) option -> 'a -> 'a val print_args : env -> constr array -> std_ppcmds val make_existential : loc -> ?opaque:obligation_definition_status -> env -> evar_map ref -> types -> constr val no_goals_or_obligations : Typeclasses.evar_filter val make_existential_expr : loc -> 'a -> 'b -> constr_expr val string_of_hole_kind : hole_kind -> string val evars_of_term : evar_map -> evar_map -> constr -> evar_map val non_instanciated_map : env -> evar_map ref -> evar_map -> evar_map val global_kind : logical_kind val goal_kind : locality * goal_object_kind val global_proof_kind : logical_kind val goal_proof_kind : locality * goal_object_kind val global_fix_kind : logical_kind val goal_fix_kind : locality * goal_object_kind val mkSubset : name -> constr -> constr -> constr val mkProj1 : constr -> constr -> constr -> constr val mkProj1 : constr -> constr -> constr -> constr val mk_ex_pi1 : constr -> constr -> constr -> constr val mk_ex_pi1 : constr -> constr -> constr -> constr val mk_eq : types -> constr -> constr -> types val mk_eq_refl : types -> constr -> constr val mk_JMeq : types -> constr-> types -> constr -> types val mk_JMeq_refl : types -> constr -> constr val mk_conj : types list -> types val mk_not : types -> types val build_dependent_sum : (identifier * types) list -> Proof_type.tactic * types val and_tac : (identifier * 'a * constr * Proof_type.tactic) list -> ((constr -> (identifier * 'a * constr * constr) list) -> Tacexpr.declaration_hook) -> unit val destruct_ex : constr -> constr -> constr list val id_of_name : name -> identifier val definition_message : identifier -> std_ppcmds val recursive_message : constant array -> std_ppcmds val print_message : std_ppcmds -> unit val solve_by_tac : evar_info -> Tacmach.tactic -> constr val string_of_list : string -> ('a -> string) -> 'a list -> string val string_of_intset : Intset.t -> string val pr_evar_map : evar_map -> Pp.std_ppcmds val tactics_call : string -> Tacexpr.glob_tactic_arg list -> Tacexpr.glob_tactic_expr val pp_list : ('a -> Pp.std_ppcmds) -> 'a list -> Pp.std_ppcmds coq-8.4pl2/plugins/subtac/g_subtac.ml40000640000175000001440000001362512121620060016773 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* *) module Gram = Pcoq.Gram module Vernac = Pcoq.Vernac_ module Tactic = Pcoq.Tactic module SubtacGram = struct let gec s = Gram.entry_create ("Subtac."^s) (* types *) let subtac_gallina_loc : Vernacexpr.vernac_expr located Gram.entry = gec "subtac_gallina_loc" let subtac_withtac : Tacexpr.raw_tactic_expr option Gram.entry = gec "subtac_withtac" end open Glob_term open SubtacGram open Util open Pcoq open Prim open Constr let sigref = mkRefC (Qualid (dummy_loc, Libnames.qualid_of_string "Coq.Init.Specif.sig")) GEXTEND Gram GLOBAL: subtac_gallina_loc typeclass_constraint subtac_withtac; subtac_gallina_loc: [ [ g = Vernac.gallina -> loc, g | g = Vernac.gallina_ext -> loc, g ] ] ; subtac_withtac: [ [ "with"; t = Tactic.tactic -> Some t | -> None ] ] ; Constr.closed_binder: [[ "("; id=Prim.name; ":"; t=Constr.lconstr; "|"; c=Constr.lconstr; ")" -> let typ = mkAppC (sigref, [mkLambdaC ([id], default_binder_kind, t, c)]) in [LocalRawAssum ([id], default_binder_kind, typ)] ] ]; END type 'a gallina_loc_argtype = (Vernacexpr.vernac_expr located, 'a) Genarg.abstract_argument_type let (wit_subtac_gallina_loc : Genarg.tlevel gallina_loc_argtype), (globwit_subtac_gallina_loc : Genarg.glevel gallina_loc_argtype), (rawwit_subtac_gallina_loc : Genarg.rlevel gallina_loc_argtype) = Genarg.create_arg None "subtac_gallina_loc" type 'a withtac_argtype = (Tacexpr.raw_tactic_expr option, 'a) Genarg.abstract_argument_type let (wit_subtac_withtac : Genarg.tlevel withtac_argtype), (globwit_subtac_withtac : Genarg.glevel withtac_argtype), (rawwit_subtac_withtac : Genarg.rlevel withtac_argtype) = Genarg.create_arg None "subtac_withtac" VERNAC COMMAND EXTEND Subtac [ "Program" subtac_gallina_loc(g) ] -> [ Subtac.subtac g ] END let try_catch_exn f e = try f e with exn when Errors.noncritical exn -> errorlabstrm "Program" (Errors.print exn) let subtac_obligation e = try_catch_exn Subtac_obligations.subtac_obligation e let next_obligation e = try_catch_exn Subtac_obligations.next_obligation e let try_solve_obligation e = try_catch_exn Subtac_obligations.try_solve_obligation e let try_solve_obligations e = try_catch_exn Subtac_obligations.try_solve_obligations e let solve_all_obligations e = try_catch_exn Subtac_obligations.solve_all_obligations e let admit_obligations e = try_catch_exn Subtac_obligations.admit_obligations e VERNAC COMMAND EXTEND Subtac_Obligations | [ "Obligation" integer(num) "of" ident(name) ":" lconstr(t) subtac_withtac(tac) ] -> [ subtac_obligation (num, Some name, Some t) tac ] | [ "Obligation" integer(num) "of" ident(name) subtac_withtac(tac) ] -> [ subtac_obligation (num, Some name, None) tac ] | [ "Obligation" integer(num) ":" lconstr(t) subtac_withtac(tac) ] -> [ subtac_obligation (num, None, Some t) tac ] | [ "Obligation" integer(num) subtac_withtac(tac) ] -> [ subtac_obligation (num, None, None) tac ] | [ "Next" "Obligation" "of" ident(name) subtac_withtac(tac) ] -> [ next_obligation (Some name) tac ] | [ "Next" "Obligation" subtac_withtac(tac) ] -> [ next_obligation None tac ] END VERNAC COMMAND EXTEND Subtac_Solve_Obligation | [ "Solve" "Obligation" integer(num) "of" ident(name) "using" tactic(t) ] -> [ try_solve_obligation num (Some name) (Some (Tacinterp.interp t)) ] | [ "Solve" "Obligation" integer(num) "using" tactic(t) ] -> [ try_solve_obligation num None (Some (Tacinterp.interp t)) ] END VERNAC COMMAND EXTEND Subtac_Solve_Obligations | [ "Solve" "Obligations" "of" ident(name) "using" tactic(t) ] -> [ try_solve_obligations (Some name) (Some (Tacinterp.interp t)) ] | [ "Solve" "Obligations" "using" tactic(t) ] -> [ try_solve_obligations None (Some (Tacinterp.interp t)) ] | [ "Solve" "Obligations" ] -> [ try_solve_obligations None None ] END VERNAC COMMAND EXTEND Subtac_Solve_All_Obligations | [ "Solve" "All" "Obligations" "using" tactic(t) ] -> [ solve_all_obligations (Some (Tacinterp.interp t)) ] | [ "Solve" "All" "Obligations" ] -> [ solve_all_obligations None ] END VERNAC COMMAND EXTEND Subtac_Admit_Obligations | [ "Admit" "Obligations" "of" ident(name) ] -> [ admit_obligations (Some name) ] | [ "Admit" "Obligations" ] -> [ admit_obligations None ] END VERNAC COMMAND EXTEND Subtac_Set_Solver | [ "Obligation" "Tactic" ":=" tactic(t) ] -> [ Subtac_obligations.set_default_tactic (Vernacexpr.use_section_locality ()) (Tacinterp.glob_tactic t) ] END open Pp VERNAC COMMAND EXTEND Subtac_Show_Solver | [ "Show" "Obligation" "Tactic" ] -> [ msgnl (str"Program obligation tactic is " ++ Subtac_obligations.print_default_tactic ()) ] END VERNAC COMMAND EXTEND Subtac_Show_Obligations | [ "Obligations" "of" ident(name) ] -> [ Subtac_obligations.show_obligations (Some name) ] | [ "Obligations" ] -> [ Subtac_obligations.show_obligations None ] END VERNAC COMMAND EXTEND Subtac_Show_Preterm | [ "Preterm" "of" ident(name) ] -> [ Subtac_obligations.show_term (Some name) ] | [ "Preterm" ] -> [ Subtac_obligations.show_term None ] END coq-8.4pl2/plugins/subtac/subtac_coercion.mli0000640000175000001440000000013211160567762020445 0ustar notinusersopen Term val disc_subset : types -> (types * types) option module Coercion : Coercion.S coq-8.4pl2/plugins/subtac/subtac_obligations.ml0000640000175000001440000005467112121620060021001 0ustar notinusersopen Printf open Pp open Subtac_utils open Command open Environ open Term open Names open Libnames open Summary open Libobject open Entries open Decl_kinds open Util open Evd open Declare open Proof_type open Compat let ppwarn cmd = Pp.warn (str"Program:" ++ cmd) let pperror cmd = Util.errorlabstrm "Program" cmd let error s = pperror (str s) let reduce c = Reductionops.clos_norm_flags Closure.betaiota (Global.env ()) Evd.empty c exception NoObligations of identifier option let explain_no_obligations = function Some ident -> str "No obligations for program " ++ str (string_of_id ident) | None -> str "No obligations remaining" type obligation_info = (Names.identifier * Term.types * hole_kind located * obligation_definition_status * Intset.t * tactic option) array type obligation = { obl_name : identifier; obl_type : types; obl_location : hole_kind located; obl_body : constr option; obl_status : obligation_definition_status; obl_deps : Intset.t; obl_tac : tactic option; } type obligations = (obligation array * int) type fixpoint_kind = | IsFixpoint of (identifier located option * Topconstr.recursion_order_expr) list | IsCoFixpoint type notations = (Vernacexpr.lstring * Topconstr.constr_expr * Topconstr.scope_name option) list type program_info = { prg_name: identifier; prg_body: constr; prg_type: constr; prg_obligations: obligations; prg_deps : identifier list; prg_fixkind : fixpoint_kind option ; prg_implicits : (Topconstr.explicitation * (bool * bool * bool)) list; prg_notations : notations ; prg_kind : definition_kind; prg_reduce : constr -> constr; prg_hook : Tacexpr.declaration_hook; } let assumption_message id = Flags.if_verbose message ((string_of_id id) ^ " is assumed") let (set_default_tactic, get_default_tactic, print_default_tactic) = Tactic_option.declare_tactic_option "Program tactic" (* true = All transparent, false = Opaque if possible *) let proofs_transparency = ref true let set_proofs_transparency = (:=) proofs_transparency let get_proofs_transparency () = !proofs_transparency open Goptions let _ = declare_bool_option { optsync = true; optdepr = false; optname = "transparency of Program obligations"; optkey = ["Transparent";"Obligations"]; optread = get_proofs_transparency; optwrite = set_proofs_transparency; } (* true = hide obligations *) let hide_obligations = ref false let set_hide_obligations = (:=) hide_obligations let get_hide_obligations () = !hide_obligations open Goptions let _ = declare_bool_option { optsync = true; optdepr = false; optname = "Hidding of Program obligations"; optkey = ["Hide";"Obligations"]; optread = get_hide_obligations; optwrite = set_hide_obligations; } let evar_of_obligation o = make_evar (Global.named_context_val ()) o.obl_type let get_obligation_body expand obl = let c = Option.get obl.obl_body in if expand && obl.obl_status = Expand then match kind_of_term c with | Const c -> constant_value (Global.env ()) c | _ -> c else c let obl_substitution expand obls deps = Intset.fold (fun x acc -> let xobl = obls.(x) in let oblb = try get_obligation_body expand xobl with e when Errors.noncritical e -> assert(false) in (xobl.obl_name, (xobl.obl_type, oblb)) :: acc) deps [] let subst_deps expand obls deps t = let subst = obl_substitution expand obls deps in Term.replace_vars (List.map (fun (n, (_, b)) -> n, b) subst) t let rec prod_app t n = match kind_of_term (strip_outer_cast t) with | Prod (_,_,b) -> subst1 n b | LetIn (_, b, t, b') -> prod_app (subst1 b b') n | _ -> errorlabstrm "prod_app" (str"Needed a product, but didn't find one" ++ fnl ()) (* prod_appvect T [| a1 ; ... ; an |] -> (T a1 ... an) *) let prod_applist t nL = List.fold_left prod_app t nL let replace_appvars subst = let rec aux c = let f, l = decompose_app c in if isVar f then try let c' = List.map (map_constr aux) l in let (t, b) = List.assoc (destVar f) subst in mkApp (delayed_force hide_obligation, [| prod_applist t c'; applistc b c' |]) with Not_found -> map_constr aux c else map_constr aux c in map_constr aux let subst_prog expand obls ints prg = let subst = obl_substitution expand obls ints in if get_hide_obligations () then (replace_appvars subst prg.prg_body, replace_appvars subst (Termops.refresh_universes prg.prg_type)) else let subst' = List.map (fun (n, (_, b)) -> n, b) subst in (Term.replace_vars subst' prg.prg_body, Term.replace_vars subst' (Termops.refresh_universes prg.prg_type)) let subst_deps_obl obls obl = let t' = subst_deps true obls obl.obl_deps obl.obl_type in { obl with obl_type = t' } module ProgMap = Map.Make(struct type t = identifier let compare = compare end) let map_replace k v m = ProgMap.add k v (ProgMap.remove k m) let map_keys m = ProgMap.fold (fun k _ l -> k :: l) m [] let map_cardinal m = let i = ref 0 in ProgMap.iter (fun _ _ -> incr i) m; !i exception Found of program_info let map_first m = try ProgMap.iter (fun _ v -> raise (Found v)) m; assert(false) with Found x -> x let from_prg : program_info ProgMap.t ref = ref ProgMap.empty let freeze () = !from_prg let unfreeze v = from_prg := v let init () = from_prg := ProgMap.empty (** Beware: if this code is dynamically loaded via dynlink after the start of Coq, then this [init] function will not be run by [Lib.init ()]. Luckily, here we can launch [init] at load-time. *) let _ = init () let _ = Summary.declare_summary "program-tcc-table" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let progmap_union = ProgMap.fold ProgMap.add let close sec = if not (ProgMap.is_empty !from_prg) then let keys = map_keys !from_prg in errorlabstrm "Program" (str "Unsolved obligations when closing " ++ str sec ++ str":" ++ spc () ++ prlist_with_sep spc (fun x -> Nameops.pr_id x) keys ++ (str (if List.length keys = 1 then " has " else "have ") ++ str "unsolved obligations")) let input : program_info ProgMap.t -> obj = declare_object { (default_object "Program state") with cache_function = (fun (na, pi) -> from_prg := pi); load_function = (fun _ (_, pi) -> from_prg := pi); discharge_function = (fun _ -> close "section"; None); classify_function = (fun _ -> close "module"; Dispose) } open Evd let progmap_remove prg = Lib.add_anonymous_leaf (input (ProgMap.remove prg.prg_name !from_prg)) let progmap_add n prg = Lib.add_anonymous_leaf (input (ProgMap.add n prg !from_prg)) let progmap_replace prg' = Lib.add_anonymous_leaf (input (map_replace prg'.prg_name prg' !from_prg)) let rec intset_to = function -1 -> Intset.empty | n -> Intset.add n (intset_to (pred n)) let subst_body expand prg = let obls, _ = prg.prg_obligations in let ints = intset_to (pred (Array.length obls)) in subst_prog expand obls ints prg let declare_definition prg = let body, typ = subst_body true prg in let (local, kind) = prg.prg_kind in let ce = { const_entry_body = body; const_entry_secctx = None; const_entry_type = Some typ; const_entry_opaque = false } in (Command.get_declare_definition_hook ()) ce; match local with | Local when Lib.sections_are_opened () -> let c = SectionLocalDef(ce.const_entry_body,ce.const_entry_type,false) in let _ = declare_variable prg.prg_name (Lib.cwd(),c,IsDefinition kind) in print_message (Subtac_utils.definition_message prg.prg_name); if Pfedit.refining () then Flags.if_verbose msg_warning (str"Local definition " ++ Nameops.pr_id prg.prg_name ++ str" is not visible from current goals"); progmap_remove prg; VarRef prg.prg_name | (Global|Local) -> let c = Declare.declare_constant prg.prg_name (DefinitionEntry ce,IsDefinition (snd prg.prg_kind)) in let gr = ConstRef c in if Impargs.is_implicit_args () || prg.prg_implicits <> [] then Impargs.declare_manual_implicits false gr [prg.prg_implicits]; print_message (Subtac_utils.definition_message prg.prg_name); progmap_remove prg; prg.prg_hook local gr; gr open Pp open Ppconstr let rec lam_index n t acc = match kind_of_term t with | Lambda (na, _, b) -> if na = Name n then acc else lam_index n b (succ acc) | _ -> raise Not_found let compute_possible_guardness_evidences (n,_) fixbody fixtype = match n with | Some (loc, n) -> [lam_index n fixbody 0] | None -> (* If recursive argument was not given by user, we try all args. An earlier approach was to look only for inductive arguments, but doing it properly involves delta-reduction, and it finally doesn't seem to worth the effort (except for huge mutual fixpoints ?) *) let m = Term.nb_prod fixtype in let ctx = fst (decompose_prod_n_assum m fixtype) in list_map_i (fun i _ -> i) 0 ctx let declare_mutual_definition l = let len = List.length l in let first = List.hd l in let fixdefs, fixtypes, fiximps = list_split3 (List.map (fun x -> let subs, typ = (subst_body true x) in let term = snd (Reductionops.splay_lam_n (Global.env ()) Evd.empty len subs) in let typ = snd (Reductionops.splay_prod_n (Global.env ()) Evd.empty len typ) in x.prg_reduce term, x.prg_reduce typ, x.prg_implicits) l) in (* let fixdefs = List.map reduce_fix fixdefs in *) let fixkind = Option.get first.prg_fixkind in let arrrec, recvec = Array.of_list fixtypes, Array.of_list fixdefs in let fixdecls = (Array.of_list (List.map (fun x -> Name x.prg_name) l), arrrec, recvec) in let (local,kind) = first.prg_kind in let fixnames = first.prg_deps in let kind = if fixkind <> IsCoFixpoint then Fixpoint else CoFixpoint in let indexes, fixdecls = match fixkind with | IsFixpoint wfl -> let possible_indexes = list_map3 compute_possible_guardness_evidences wfl fixdefs fixtypes in let indexes = Pretyping.search_guard dummy_loc (Global.env ()) possible_indexes fixdecls in Some indexes, list_map_i (fun i _ -> mkFix ((indexes,i),fixdecls)) 0 l | IsCoFixpoint -> None, list_map_i (fun i _ -> mkCoFix (i,fixdecls)) 0 l in (* Declare the recursive definitions *) let kns = list_map4 (declare_fix kind) fixnames fixdecls fixtypes fiximps in (* Declare notations *) List.iter Metasyntax.add_notation_interpretation first.prg_notations; Declare.recursive_message (fixkind<>IsCoFixpoint) indexes fixnames; let gr = List.hd kns in let kn = match gr with ConstRef kn -> kn | _ -> assert false in first.prg_hook local gr; List.iter progmap_remove l; kn let declare_obligation prg obl body = let body = prg.prg_reduce body in let ty = prg.prg_reduce obl.obl_type in match obl.obl_status with | Expand -> { obl with obl_body = Some body } | Define opaque -> let opaque = if get_proofs_transparency () then false else opaque in let ce = { const_entry_body = body; const_entry_secctx = None; const_entry_type = Some ty; const_entry_opaque = opaque } in let constant = Declare.declare_constant obl.obl_name (DefinitionEntry ce,IsProof Property) in if not opaque then Auto.add_hints false [string_of_id prg.prg_name] (Auto.HintsUnfoldEntry [EvalConstRef constant]); print_message (Subtac_utils.definition_message obl.obl_name); { obl with obl_body = Some (mkConst constant) } let init_prog_info n b t deps fixkind notations obls impls kind reduce hook = let obls', b = match b with | None -> assert(obls = [||]); let n = Nameops.add_suffix n "_obligation" in [| { obl_name = n; obl_body = None; obl_location = dummy_loc, InternalHole; obl_type = t; obl_status = Expand; obl_deps = Intset.empty; obl_tac = None } |], mkVar n | Some b -> Array.mapi (fun i (n, t, l, o, d, tac) -> { obl_name = n ; obl_body = None; obl_location = l; obl_type = reduce t; obl_status = o; obl_deps = d; obl_tac = tac }) obls, b in { prg_name = n ; prg_body = b; prg_type = reduce t; prg_obligations = (obls', Array.length obls'); prg_deps = deps; prg_fixkind = fixkind ; prg_notations = notations ; prg_implicits = impls; prg_kind = kind; prg_reduce = reduce; prg_hook = hook; } let get_prog name = let prg_infos = !from_prg in match name with Some n -> (try ProgMap.find n prg_infos with Not_found -> raise (NoObligations (Some n))) | None -> (let n = map_cardinal prg_infos in match n with 0 -> raise (NoObligations None) | 1 -> map_first prg_infos | _ -> error "More than one program with unsolved obligations") let get_prog_err n = try get_prog n with NoObligations id -> pperror (explain_no_obligations id) let obligations_solved prg = (snd prg.prg_obligations) = 0 let all_programs () = ProgMap.fold (fun k p l -> p :: l) !from_prg [] type progress = | Remain of int | Dependent | Defined of global_reference let obligations_message rem = if rem > 0 then if rem = 1 then Flags.if_verbose msgnl (int rem ++ str " obligation remaining") else Flags.if_verbose msgnl (int rem ++ str " obligations remaining") else Flags.if_verbose msgnl (str "No more obligations remaining") let update_obls prg obls rem = let prg' = { prg with prg_obligations = (obls, rem) } in progmap_replace prg'; obligations_message rem; if rem > 0 then Remain rem else ( match prg'.prg_deps with | [] -> let kn = declare_definition prg' in progmap_remove prg'; Defined kn | l -> let progs = List.map (fun x -> ProgMap.find x !from_prg) prg'.prg_deps in if List.for_all (fun x -> obligations_solved x) progs then let kn = declare_mutual_definition progs in Defined (ConstRef kn) else Dependent) let is_defined obls x = obls.(x).obl_body <> None let deps_remaining obls deps = Intset.fold (fun x acc -> if is_defined obls x then acc else x :: acc) deps [] let dependencies obls n = let res = ref Intset.empty in Array.iteri (fun i obl -> if i <> n && Intset.mem n obl.obl_deps then res := Intset.add i !res) obls; !res let kind_of_opacity o = match o with | Define false | Expand -> Subtac_utils.goal_kind | _ -> Subtac_utils.goal_proof_kind let not_transp_msg = str "Obligation should be transparent but was declared opaque." ++ spc () ++ str"Use 'Defined' instead." let warn_not_transp () = ppwarn not_transp_msg let error_not_transp () = pperror not_transp_msg let rec solve_obligation prg num tac = let user_num = succ num in let obls, rem = prg.prg_obligations in let obl = obls.(num) in if obl.obl_body <> None then pperror (str "Obligation" ++ spc () ++ int user_num ++ str "already" ++ spc() ++ str "solved.") else match deps_remaining obls obl.obl_deps with | [] -> let obl = subst_deps_obl obls obl in Lemmas.start_proof obl.obl_name (kind_of_opacity obl.obl_status) obl.obl_type (fun strength gr -> let cst = match gr with ConstRef cst -> cst | _ -> assert false in let obl = let transparent = evaluable_constant cst (Global.env ()) in let body = match obl.obl_status with | Expand -> if not transparent then error_not_transp () else constant_value (Global.env ()) cst | Define opaque -> if not opaque && not transparent then error_not_transp () else Libnames.constr_of_global gr in if transparent then Auto.add_hints true [string_of_id prg.prg_name] (Auto.HintsUnfoldEntry [EvalConstRef cst]); { obl with obl_body = Some body } in let obls = Array.copy obls in let _ = obls.(num) <- obl in let res = try update_obls prg obls (pred rem) with e when Errors.noncritical e -> pperror (Errors.print (Cerrors.process_vernac_interp_error e)) in match res with | Remain n when n > 0 -> let deps = dependencies obls num in if deps <> Intset.empty then ignore(auto_solve_obligations (Some prg.prg_name) None ~oblset:deps) | _ -> ()); trace (str "Started obligation " ++ int user_num ++ str " proof: " ++ Subtac_utils.my_print_constr (Global.env ()) obl.obl_type); Pfedit.by (snd (get_default_tactic ())); Option.iter (fun tac -> Pfedit.set_end_tac (Tacinterp.interp tac)) tac; Flags.if_verbose (fun () -> msg (Printer.pr_open_subgoals ())) () | l -> pperror (str "Obligation " ++ int user_num ++ str " depends on obligation(s) " ++ str (string_of_list ", " (fun x -> string_of_int (succ x)) l)) and subtac_obligation (user_num, name, typ) tac = let num = pred user_num in let prg = get_prog_err name in let obls, rem = prg.prg_obligations in if num < Array.length obls then let obl = obls.(num) in match obl.obl_body with None -> solve_obligation prg num tac | Some r -> error "Obligation already solved" else error (sprintf "Unknown obligation number %i" (succ num)) and solve_obligation_by_tac prg obls i tac = let obl = obls.(i) in match obl.obl_body with | Some _ -> false | None -> try if deps_remaining obls obl.obl_deps = [] then let obl = subst_deps_obl obls obl in let tac = match tac with | Some t -> t | None -> match obl.obl_tac with | Some t -> t | None -> snd (get_default_tactic ()) in let t = Subtac_utils.solve_by_tac (evar_of_obligation obl) tac in obls.(i) <- declare_obligation prg obl t; true else false with | Loc.Exc_located(_, Proof_type.LtacLocated (_, Refiner.FailError (_, s))) | Loc.Exc_located(_, Refiner.FailError (_, s)) | Refiner.FailError (_, s) -> user_err_loc (fst obl.obl_location, "solve_obligation", Lazy.force s) | Util.Anomaly _ as e -> raise e | e when Errors.noncritical e -> false and solve_prg_obligations prg ?oblset tac = let obls, rem = prg.prg_obligations in let rem = ref rem in let obls' = Array.copy obls in let p = match oblset with | None -> (fun _ -> true) | Some s -> (fun i -> Intset.mem i s) in let _ = Array.iteri (fun i x -> if p i && solve_obligation_by_tac prg obls' i tac then decr rem) obls' in update_obls prg obls' !rem and solve_obligations n tac = let prg = get_prog_err n in solve_prg_obligations prg tac and solve_all_obligations tac = ProgMap.iter (fun k v -> ignore(solve_prg_obligations v tac)) !from_prg and try_solve_obligation n prg tac = let prg = get_prog prg in let obls, rem = prg.prg_obligations in let obls' = Array.copy obls in if solve_obligation_by_tac prg obls' n tac then ignore(update_obls prg obls' (pred rem)); and try_solve_obligations n tac = try ignore (solve_obligations n tac) with NoObligations _ -> () and auto_solve_obligations n ?oblset tac : progress = Flags.if_verbose msgnl (str "Solving obligations automatically..."); try solve_prg_obligations (get_prog_err n) ?oblset tac with NoObligations _ -> Dependent open Pp let show_obligations_of_prg ?(msg=true) prg = let n = prg.prg_name in let obls, rem = prg.prg_obligations in let showed = ref 5 in if msg then msgnl (int rem ++ str " obligation(s) remaining: "); Array.iteri (fun i x -> match x.obl_body with | None -> if !showed > 0 then ( decr showed; msgnl (str "Obligation" ++ spc() ++ int (succ i) ++ spc () ++ str "of" ++ spc() ++ str (string_of_id n) ++ str ":" ++ spc () ++ hov 1 (my_print_constr (Global.env ()) x.obl_type ++ str "." ++ fnl ()))) | Some _ -> ()) obls let show_obligations ?(msg=true) n = let progs = match n with | None -> all_programs () | Some n -> try [ProgMap.find n !from_prg] with Not_found -> raise (NoObligations (Some n)) in List.iter (show_obligations_of_prg ~msg) progs let show_term n = let prg = get_prog_err n in let n = prg.prg_name in msgnl (str (string_of_id n) ++ spc () ++ str":" ++ spc () ++ my_print_constr (Global.env ()) prg.prg_type ++ spc () ++ str ":=" ++ fnl () ++ my_print_constr (Global.env ()) prg.prg_body) let add_definition n ?term t ?(implicits=[]) ?(kind=Global,Definition) ?tactic ?(reduce=reduce) ?(hook=fun _ _ -> ()) obls = Flags.if_verbose pp (str (string_of_id n) ++ str " has type-checked"); let prg = init_prog_info n term t [] None [] obls implicits kind reduce hook in let obls,_ = prg.prg_obligations in if Array.length obls = 0 then ( Flags.if_verbose ppnl (str "."); let cst = declare_definition prg in Defined cst) else ( let len = Array.length obls in let _ = Flags.if_verbose ppnl (str ", generating " ++ int len ++ str " obligation(s)") in progmap_add n prg; let res = auto_solve_obligations (Some n) tactic in match res with | Remain rem -> Flags.if_verbose (fun () -> show_obligations ~msg:false (Some n)) (); res | _ -> res) let add_mutual_definitions l ?tactic ?(kind=Global,Definition) ?(reduce=reduce) ?(hook=fun _ _ -> ()) notations fixkind = let deps = List.map (fun (n, b, t, imps, obls) -> n) l in List.iter (fun (n, b, t, imps, obls) -> let prg = init_prog_info n (Some b) t deps (Some fixkind) notations obls imps kind reduce hook in progmap_add n prg) l; let _defined = List.fold_left (fun finished x -> if finished then finished else let res = auto_solve_obligations (Some x) tactic in match res with | Defined _ -> (* If one definition is turned into a constant, the whole block is defined. *) true | _ -> false) false deps in () let admit_obligations n = let prg = get_prog_err n in let obls, rem = prg.prg_obligations in let obls = Array.copy obls in Array.iteri (fun i x -> match x.obl_body with | None -> let x = subst_deps_obl obls x in let kn = Declare.declare_constant x.obl_name (ParameterEntry (None, x.obl_type,None), IsAssumption Conjectural) in assumption_message x.obl_name; obls.(i) <- { x with obl_body = Some (mkConst kn) } | Some _ -> ()) obls; ignore(update_obls prg obls 0) exception Found of int let array_find f arr = try Array.iteri (fun i x -> if f x then raise (Found i)) arr; raise Not_found with Found i -> i let next_obligation n tac = let prg = get_prog_err n in let obls, rem = prg.prg_obligations in let i = try array_find (fun x -> x.obl_body = None && deps_remaining obls x.obl_deps = []) obls with Not_found -> anomaly "Could not find a solvable obligation." in solve_obligation prg i tac coq-8.4pl2/plugins/subtac/eterm.ml0000640000175000001440000002043211734117167016250 0ustar notinusers(** - Get types of existentials ; - Flatten dependency tree (prefix order) ; - Replace existentials by De Bruijn indices in term, applied to the right arguments ; - Apply term prefixed by quantification on "existentials". *) open Term open Sign open Names open Evd open List open Pp open Util open Subtac_utils open Proof_type let trace s = if !Flags.debug then (msgnl s; msgerr s) else () let succfix (depth, fixrels) = (succ depth, List.map succ fixrels) type oblinfo = { ev_name: int * identifier; ev_hyps: named_context; ev_status: obligation_definition_status; ev_chop: int option; ev_src: hole_kind located; ev_typ: types; ev_tac: tactic option; ev_deps: Intset.t } (* spiwack: Store field for internalizing ev_tac in evar_infos' evar_extra. *) open Store.Field let evar_tactic = Store.field () (** Substitute evar references in t using De Bruijn indices, where n binders were passed through. *) let subst_evar_constr evs n idf t = let seen = ref Intset.empty in let transparent = ref Idset.empty in let evar_info id = List.assoc id evs in let rec substrec (depth, fixrels) c = match kind_of_term c with | Evar (k, args) -> let { ev_name = (id, idstr) ; ev_hyps = hyps ; ev_chop = chop } = try evar_info k with Not_found -> anomaly ("eterm: existential variable " ^ string_of_int k ^ " not found") in seen := Intset.add id !seen; (* Evar arguments are created in inverse order, and we must not apply to defined ones (i.e. LetIn's) *) let args = let n = match chop with None -> 0 | Some c -> c in let (l, r) = list_chop n (List.rev (Array.to_list args)) in List.rev r in let args = let rec aux hyps args acc = match hyps, args with ((_, None, _) :: tlh), (c :: tla) -> aux tlh tla ((substrec (depth, fixrels) c) :: acc) | ((_, Some _, _) :: tlh), (_ :: tla) -> aux tlh tla acc | [], [] -> acc | _, _ -> acc (*failwith "subst_evars: invalid argument"*) in aux hyps args [] in if List.exists (fun x -> match kind_of_term x with Rel n -> List.mem n fixrels | _ -> false) args then transparent := Idset.add idstr !transparent; mkApp (idf idstr, Array.of_list args) | Fix _ -> map_constr_with_binders succfix substrec (depth, 1 :: fixrels) c | _ -> map_constr_with_binders succfix substrec (depth, fixrels) c in let t' = substrec (0, []) t in t', !seen, !transparent (** Substitute variable references in t using De Bruijn indices, where n binders were passed through. *) let subst_vars acc n t = let var_index id = Util.list_index id acc in let rec substrec depth c = match kind_of_term c with | Var v -> (try mkRel (depth + (var_index v)) with Not_found -> c) | _ -> map_constr_with_binders succ substrec depth c in substrec 0 t (** Rewrite type of an evar ([ H1 : t1, ... Hn : tn |- concl ]) to a product : forall H1 : t1, ..., forall Hn : tn, concl. Changes evars and hypothesis references to variable references. *) let etype_of_evar evs hyps concl = let rec aux acc n = function (id, copt, t) :: tl -> let t', s, trans = subst_evar_constr evs n mkVar t in let t'' = subst_vars acc 0 t' in let rest, s', trans' = aux (id :: acc) (succ n) tl in let s' = Intset.union s s' in let trans' = Idset.union trans trans' in (match copt with Some c -> let c', s'', trans'' = subst_evar_constr evs n mkVar c in let c' = subst_vars acc 0 c' in mkNamedProd_or_LetIn (id, Some c', t'') rest, Intset.union s'' s', Idset.union trans'' trans' | None -> mkNamedProd_or_LetIn (id, None, t'') rest, s', trans') | [] -> let t', s, trans = subst_evar_constr evs n mkVar concl in subst_vars acc 0 t', s, trans in aux [] 0 (rev hyps) open Tacticals let trunc_named_context n ctx = let len = List.length ctx in list_firstn (len - n) ctx let rec chop_product n t = if n = 0 then Some t else match kind_of_term t with | Prod (_, _, b) -> if noccurn 1 b then chop_product (pred n) (Termops.pop b) else None | _ -> None let evars_of_evar_info evi = Intset.union (Evarutil.evars_of_term evi.evar_concl) (Intset.union (match evi.evar_body with | Evar_empty -> Intset.empty | Evar_defined b -> Evarutil.evars_of_term b) (Evarutil.evars_of_named_context (evar_filtered_context evi))) let evar_dependencies evm oev = let one_step deps = Intset.fold (fun ev s -> let evi = Evd.find evm ev in let deps' = evars_of_evar_info evi in if Intset.mem oev deps' then raise (Invalid_argument ("Ill-formed evar map: cycle detected for evar " ^ string_of_int oev)) else Intset.union deps' s) deps deps in let rec aux deps = let deps' = one_step deps in if Intset.equal deps deps' then deps else aux deps' in aux (Intset.singleton oev) let move_after (id, ev, deps as obl) l = let rec aux restdeps = function | (id', _, _) as obl' :: tl -> let restdeps' = Intset.remove id' restdeps in if Intset.is_empty restdeps' then obl' :: obl :: tl else obl' :: aux restdeps' tl | [] -> [obl] in aux (Intset.remove id deps) l let sort_dependencies evl = let rec aux l found list = match l with | (id, ev, deps) as obl :: tl -> let found' = Intset.union found (Intset.singleton id) in if Intset.subset deps found' then aux tl found' (obl :: list) else aux (move_after obl tl) found list | [] -> List.rev list in aux evl Intset.empty [] let map_evar_body f = function | Evar_empty -> Evar_empty | Evar_defined c -> Evar_defined (f c) open Environ let map_evar_info f evi = { evi with evar_hyps = val_of_named_context (map_named_context f (named_context_of_val evi.evar_hyps)); evar_concl = f evi.evar_concl; evar_body = map_evar_body f evi.evar_body } let eterm_obligations env name isevars evm fs ?status t ty = (* 'Serialize' the evars *) let nc = Environ.named_context env in let nc_len = Sign.named_context_length nc in let evl = List.rev (to_list evm) in let evl = List.map (fun (id, ev) -> (id, ev, evar_dependencies evm id)) evl in let sevl = sort_dependencies evl in let evl = List.map (fun (id, ev, _) -> id, ev) sevl in let evn = let i = ref (-1) in List.rev_map (fun (id, ev) -> incr i; (id, (!i, id_of_string (string_of_id name ^ "_obligation_" ^ string_of_int (succ !i))), ev)) evl in let evts = (* Remove existential variables in types and build the corresponding products *) fold_right (fun (id, (n, nstr), ev) l -> let hyps = Evd.evar_filtered_context ev in let hyps = trunc_named_context nc_len hyps in let evtyp, deps, transp = etype_of_evar l hyps ev.evar_concl in let evtyp, hyps, chop = match chop_product fs evtyp with | Some t -> t, trunc_named_context fs hyps, fs | None -> evtyp, hyps, 0 in let loc, k = evar_source id isevars in let status = match k with QuestionMark o -> Some o | _ -> status in let status, chop = match status with | Some (Define true as stat) -> if chop <> fs then Define false, None else stat, Some chop | Some s -> s, None | None -> Define true, None in let tac = match evar_tactic.get ev.evar_extra with | Some t -> if Dyn.tag t = "tactic" then Some (Tacinterp.interp (Tacinterp.globTacticIn (Tacinterp.tactic_out t))) else None | None -> None in let info = { ev_name = (n, nstr); ev_hyps = hyps; ev_status = status; ev_chop = chop; ev_src = loc, k; ev_typ = evtyp ; ev_deps = deps; ev_tac = tac } in (id, info) :: l) evn [] in let t', _, transparent = (* Substitute evar refs in the term by variables *) subst_evar_constr evts 0 mkVar t in let ty, _, _ = subst_evar_constr evts 0 mkVar ty in let evars = List.map (fun (ev, info) -> let { ev_name = (_, name); ev_status = status; ev_src = src; ev_typ = typ; ev_deps = deps; ev_tac = tac } = info in let status = match status with | Define true when Idset.mem name transparent -> Define false | _ -> status in name, typ, src, status, deps, tac) evts in let evnames = List.map (fun (ev, info) -> ev, snd info.ev_name) evts in let evmap f c = pi1 (subst_evar_constr evts 0 f c) in Array.of_list (List.rev evars), (evnames, evmap), t', ty let mkMetas n = list_tabulate (fun _ -> Evarutil.mk_new_meta ()) n coq-8.4pl2/plugins/subtac/subtac_errors.ml0000640000175000001440000000121211254456226020003 0ustar notinusersopen Util open Pp open Printer type term_pp = Pp.std_ppcmds type subtyping_error = | UncoercibleInferType of loc * term_pp * term_pp | UncoercibleInferTerm of loc * term_pp * term_pp * term_pp * term_pp | UncoercibleRewrite of term_pp * term_pp type typing_error = | NonFunctionalApp of loc * term_pp * term_pp * term_pp | NonConvertible of loc * term_pp * term_pp | NonSigma of loc * term_pp | IllSorted of loc * term_pp exception Subtyping_error of subtyping_error exception Typing_error of typing_error exception Debug_msg of string let typing_error e = raise (Typing_error e) let subtyping_error e = raise (Subtyping_error e) coq-8.4pl2/plugins/subtac/subtac_command.ml0000640000175000001440000004736112121620060020103 0ustar notinusersopen Closure open RedFlags open Declarations open Entries open Libobject open Pattern open Matching open Pp open Glob_term open Sign open Tacred open Util open Names open Nameops open Libnames open Nametab open Pfedit open Proof_type open Refiner open Tacmach open Tactic_debug open Topconstr open Term open Tacexpr open Safe_typing open Typing open Hiddentac open Genarg open Decl_kinds open Mod_subst open Printer open Inductiveops open Syntax_def open Environ open Tactics open Tacticals open Tacinterp open Vernacexpr open Notation open Evd open Evarutil module SPretyping = Subtac_pretyping.Pretyping open Subtac_utils open Pretyping open Subtac_obligations (*********************************************************************) (* Functions to parse and interpret constructions *) let evar_nf isevars c = Evarutil.nf_evar !isevars c let interp_gen kind isevars env ?(impls=Constrintern.empty_internalization_env) ?(allow_patvar=false) ?(ltacvars=([],[])) c = let c' = Constrintern.intern_gen (kind=IsType) ~impls ~allow_patvar ~ltacvars ( !isevars) env c in let c' = SPretyping.understand_tcc_evars isevars env kind c' in evar_nf isevars c' let interp_constr isevars env c = interp_gen (OfType None) isevars env c let interp_type_evars isevars env ?(impls=Constrintern.empty_internalization_env) c = interp_gen IsType isevars env ~impls c let interp_casted_constr isevars env ?(impls=Constrintern.empty_internalization_env) c typ = interp_gen (OfType (Some typ)) isevars env ~impls c let interp_casted_constr_evars isevars env ?(impls=Constrintern.empty_internalization_env) c typ = interp_gen (OfType (Some typ)) isevars env ~impls c let interp_open_constr isevars env c = msgnl (str "Pretyping " ++ my_print_constr_expr c); let c = Constrintern.intern_constr ( !isevars) env c in let c' = SPretyping.understand_tcc_evars isevars env (OfType None) c in evar_nf isevars c' let interp_constr_judgment isevars env c = let j = SPretyping.understand_judgment_tcc isevars env (Constrintern.intern_constr ( !isevars) env c) in { uj_val = evar_nf isevars j.uj_val; uj_type = evar_nf isevars j.uj_type } let locate_if_isevar loc na = function | GHole _ -> (try match na with | Name id -> glob_constr_of_aconstr loc (Reserve.find_reserved_type id) | Anonymous -> raise Not_found with Not_found -> GHole (loc, Evd.BinderType na)) | x -> x let interp_binder sigma env na t = let t = Constrintern.intern_gen true ( !sigma) env t in SPretyping.understand_tcc_evars sigma env IsType (locate_if_isevar (loc_of_glob_constr t) na t) let interp_context_evars evdref env params = let int_env, bl = Constrintern.intern_context false !evdref env Constrintern.empty_internalization_env params in let (env, par, _, impls) = List.fold_left (fun (env,params,n,impls) (na, k, b, t) -> match b with None -> let t' = locate_if_isevar (loc_of_glob_constr t) na t in let t = SPretyping.understand_tcc_evars evdref env IsType t' in let d = (na,None,t) in let impls = if k = Implicit then let na = match na with Name n -> Some n | Anonymous -> None in (ExplByPos (n, na), (true, true, true)) :: impls else impls in (push_rel d env, d::params, succ n, impls) | Some b -> let c = SPretyping.understand_judgment_tcc evdref env b in let d = (na, Some c.uj_val, c.uj_type) in (push_rel d env,d::params, succ n, impls)) (env,[],1,[]) (List.rev bl) in (env, par), impls (* try to find non recursive definitions *) let list_chop_hd i l = match list_chop i l with | (l1,x::l2) -> (l1,x,l2) | (x :: [], l2) -> ([], x, []) | _ -> assert(false) let collect_non_rec env = let rec searchrec lnonrec lnamerec ldefrec larrec nrec = try let i = list_try_find_i (fun i f -> if List.for_all (fun (_, def) -> not (Termops.occur_var env f def)) ldefrec then i else failwith "try_find_i") 0 lnamerec in let (lf1,f,lf2) = list_chop_hd i lnamerec in let (ldef1,def,ldef2) = list_chop_hd i ldefrec in let (lar1,ar,lar2) = list_chop_hd i larrec in let newlnv = try match list_chop i nrec with | (lnv1,_::lnv2) -> (lnv1@lnv2) | _ -> [] (* nrec=[] for cofixpoints *) with Failure "list_chop" -> [] in searchrec ((f,def,ar)::lnonrec) (lf1@lf2) (ldef1@ldef2) (lar1@lar2) newlnv with Failure "try_find_i" -> (List.rev lnonrec, (Array.of_list lnamerec, Array.of_list ldefrec, Array.of_list larrec, Array.of_list nrec)) in searchrec [] let list_of_local_binders l = let rec aux acc = function Topconstr.LocalRawDef (n, c) :: tl -> aux ((n, Some c, None) :: acc) tl | Topconstr.LocalRawAssum (nl, k, c) :: tl -> aux (List.fold_left (fun acc n -> (n, None, Some c) :: acc) acc nl) tl | [] -> List.rev acc in aux [] l let lift_binders k n l = let rec aux n = function | (id, t, c) :: tl -> (id, Option.map (liftn k n) t, liftn k n c) :: aux (pred n) tl | [] -> [] in aux n l let rec gen_rels = function 0 -> [] | n -> mkRel n :: gen_rels (pred n) let split_args n rel = match list_chop ((List.length rel) - n) rel with (l1, x :: l2) -> l1, x, l2 | _ -> assert(false) open Coqlib let sigT = Lazy.lazy_from_fun build_sigma_type let sigT_info = lazy { ci_ind = destInd (Lazy.force sigT).typ; ci_npar = 2; ci_cstr_ndecls = [|2|]; ci_pp_info = { ind_nargs = 0; style = LetStyle } } let rec telescope = function | [] -> assert false | [(n, None, t)] -> t, [n, Some (mkRel 1), t], mkRel 1 | (n, None, t) :: tl -> let ty, tys, (k, constr) = List.fold_left (fun (ty, tys, (k, constr)) (n, b, t) -> let pred = mkLambda (n, t, ty) in let sigty = mkApp ((Lazy.force sigT).typ, [|t; pred|]) in let intro = mkApp ((Lazy.force sigT).intro, [|lift k t; lift k pred; mkRel k; constr|]) in (sigty, pred :: tys, (succ k, intro))) (t, [], (2, mkRel 1)) tl in let (last, subst) = List.fold_right2 (fun pred (n, b, t) (prev, subst) -> let proj1 = applistc (Lazy.force sigT).proj1 [t; pred; prev] in let proj2 = applistc (Lazy.force sigT).proj2 [t; pred; prev] in (lift 1 proj2, (n, Some proj1, t) :: subst)) (List.rev tys) tl (mkRel 1, []) in ty, ((n, Some last, t) :: subst), constr | (n, Some b, t) :: tl -> let ty, subst, term = telescope tl in ty, ((n, Some b, t) :: subst), lift 1 term let nf_evar_context isevars ctx = List.map (fun (n, b, t) -> (n, Option.map (Evarutil.nf_evar isevars) b, Evarutil.nf_evar isevars t)) ctx let build_wellfounded (recname,n,bl,arityc,body) r measure notation = Coqlib.check_required_library ["Coq";"Program";"Wf"]; let sigma = Evd.empty in let isevars = ref (Evd.create_evar_defs sigma) in let env = Global.env() in let _pr c = my_print_constr env c in let _prr = Printer.pr_rel_context env in let _prn = Printer.pr_named_context env in let _pr_rel env = Printer.pr_rel_context env in let (env', binders_rel), impls = interp_context_evars isevars env bl in let len = List.length binders_rel in let top_env = push_rel_context binders_rel env in let top_arity = interp_type_evars isevars top_env arityc in let full_arity = it_mkProd_or_LetIn top_arity binders_rel in let argtyp, letbinders, make = telescope binders_rel in let argname = id_of_string "recarg" in let arg = (Name argname, None, argtyp) in let binders = letbinders @ [arg] in let binders_env = push_rel_context binders_rel env in let rel = interp_constr isevars env r in let relty = type_of env !isevars rel in let relargty = let error () = user_err_loc (constr_loc r, "Subtac_command.build_wellfounded", my_print_constr env rel ++ str " is not an homogeneous binary relation.") in try let ctx, ar = Reductionops.splay_prod_n env !isevars 2 relty in match ctx, kind_of_term ar with | [(_, None, t); (_, None, u)], Sort (Prop Null) when Reductionops.is_conv env !isevars t u -> t | _, _ -> error () with e when Errors.noncritical e -> error () in let measure = interp_casted_constr isevars binders_env measure relargty in let wf_rel, wf_rel_fun, measure_fn = let measure_body, measure = it_mkLambda_or_LetIn measure letbinders, it_mkLambda_or_LetIn measure binders in let comb = constr_of_global (delayed_force measure_on_R_ref) in let wf_rel = mkApp (comb, [| argtyp; relargty; rel; measure |]) in let wf_rel_fun x y = mkApp (rel, [| subst1 x measure_body; subst1 y measure_body |]) in wf_rel, wf_rel_fun, measure in let wf_proof = mkApp (delayed_force well_founded, [| argtyp ; wf_rel |]) in let argid' = id_of_string (string_of_id argname ^ "'") in let wfarg len = (Name argid', None, mkSubset (Name argid') argtyp (wf_rel_fun (mkRel 1) (mkRel (len + 1)))) in let intern_bl = wfarg 1 :: [arg] in let _intern_env = push_rel_context intern_bl env in let proj = (delayed_force sig_).Coqlib.proj1 in let wfargpred = mkLambda (Name argid', argtyp, wf_rel_fun (mkRel 1) (mkRel 3)) in let projection = (* in wfarg :: arg :: before *) mkApp (proj, [| argtyp ; wfargpred ; mkRel 1 |]) in let top_arity_let = it_mkLambda_or_LetIn top_arity letbinders in let intern_arity = substl [projection] top_arity_let in (* substitute the projection of wfarg for something, now intern_arity is in wfarg :: arg *) let intern_fun_arity_prod = it_mkProd_or_LetIn intern_arity [wfarg 1] in let intern_fun_binder = (Name (add_suffix recname "'"), None, intern_fun_arity_prod) in let curry_fun = let wfpred = mkLambda (Name argid', argtyp, wf_rel_fun (mkRel 1) (mkRel (2 * len + 4))) in let arg = mkApp ((delayed_force sig_).intro, [| argtyp; wfpred; lift 1 make; mkRel 1 |]) in let app = mkApp (mkRel (2 * len + 2 (* recproof + orig binders + current binders *)), [| arg |]) in let rcurry = mkApp (rel, [| measure; lift len measure |]) in let lam = (Name (id_of_string "recproof"), None, rcurry) in let body = it_mkLambda_or_LetIn app (lam :: binders_rel) in let ty = it_mkProd_or_LetIn (lift 1 top_arity) (lam :: binders_rel) in (Name recname, Some body, ty) in let fun_bl = intern_fun_binder :: [arg] in let lift_lets = Termops.lift_rel_context 1 letbinders in let intern_body = let ctx = (Name recname, None, pi3 curry_fun) :: binders_rel in let (r, l, impls, scopes) = Constrintern.compute_internalization_data env Constrintern.Recursive full_arity impls in let newimpls = Idmap.singleton recname (r, l, impls @ [(Some (id_of_string "recproof", Impargs.Manual, (true, false)))], scopes @ [None]) in interp_casted_constr isevars ~impls:newimpls (push_rel_context ctx env) body (lift 1 top_arity) in let intern_body_lam = it_mkLambda_or_LetIn intern_body (curry_fun :: lift_lets @ fun_bl) in let prop = mkLambda (Name argname, argtyp, top_arity_let) in let def = mkApp (constr_of_global (delayed_force fix_sub_ref), [| argtyp ; wf_rel ; make_existential dummy_loc ~opaque:(Define false) env isevars wf_proof ; prop ; intern_body_lam |]) in let _ = isevars := Evarutil.nf_evar_map !isevars in let binders_rel = nf_evar_context !isevars binders_rel in let binders = nf_evar_context !isevars binders in let top_arity = Evarutil.nf_evar !isevars top_arity in let hook, recname, typ = if List.length binders_rel > 1 then let name = add_suffix recname "_func" in let hook l gr = let body = it_mkLambda_or_LetIn (mkApp (constr_of_global gr, [|make|])) binders_rel in let ty = it_mkProd_or_LetIn top_arity binders_rel in let ce = { const_entry_body = Evarutil.nf_evar !isevars body; const_entry_secctx = None; const_entry_type = Some ty; const_entry_opaque = false } in let c = Declare.declare_constant recname (DefinitionEntry ce, IsDefinition Definition) in let gr = ConstRef c in if Impargs.is_implicit_args () || impls <> [] then Impargs.declare_manual_implicits false gr [impls] in let typ = it_mkProd_or_LetIn top_arity binders in hook, name, typ else let typ = it_mkProd_or_LetIn top_arity binders_rel in let hook l gr = if Impargs.is_implicit_args () || impls <> [] then Impargs.declare_manual_implicits false gr [impls] in hook, recname, typ in let fullcoqc = Evarutil.nf_evar !isevars def in let fullctyp = Evarutil.nf_evar !isevars typ in let evm = evars_of_term !isevars Evd.empty fullctyp in let evm = evars_of_term !isevars evm fullcoqc in let evm = non_instanciated_map env isevars evm in let evars, _, evars_def, evars_typ = Eterm.eterm_obligations env recname !isevars evm 0 fullcoqc fullctyp in Subtac_obligations.add_definition recname ~term:evars_def evars_typ evars ~hook let interp_fix_context evdref env fix = interp_context_evars evdref env fix.Command.fix_binders let interp_fix_ccl evdref (env,_) fix = interp_type_evars evdref env fix.Command.fix_type let interp_fix_body evdref env_rec impls (_,ctx) fix ccl = let env = push_rel_context ctx env_rec in let body = Option.map (fun c -> interp_casted_constr_evars evdref env ~impls c ccl) fix.Command.fix_body in Option.map (fun c -> it_mkLambda_or_LetIn c ctx) body let build_fix_type (_,ctx) ccl = it_mkProd_or_LetIn ccl ctx let prepare_recursive_declaration fixnames fixtypes fixdefs = let defs = List.map (subst_vars (List.rev fixnames)) fixdefs in let names = List.map (fun id -> Name id) fixnames in (Array.of_list names, Array.of_list fixtypes, Array.of_list defs) let rel_index n ctx = list_index0 (Name n) (List.rev_map pi1 (List.filter (fun x -> pi2 x = None) ctx)) let rec unfold f b = match f b with | Some (x, b') -> x :: unfold f b' | None -> [] let find_annot loc id ctx = try rel_index id ctx with Not_found -> user_err_loc(loc,"", str "No parameter named " ++ Nameops.pr_id id ++ str".") let compute_possible_guardness_evidences (n,_) (_, fixctx) fixtype = match n with | Some (loc, id) -> [find_annot loc id fixctx] | None -> (* If recursive argument was not given by user, we try all args. An earlier approach was to look only for inductive arguments, but doing it properly involves delta-reduction, and it finally doesn't seem to worth the effort (except for huge mutual fixpoints ?) *) let len = List.length fixctx in unfold (function x when x = len -> None | n -> Some (n, succ n)) 0 let push_named_context = List.fold_right push_named let check_evars env initial_sigma evd c = let sigma = evd in let c = nf_evar sigma c in let rec proc_rec c = match kind_of_term c with | Evar (evk,args) -> assert (Evd.mem sigma evk); if not (Evd.mem initial_sigma evk) then let (loc,k) = evar_source evk evd in (match k with | QuestionMark _ | ImplicitArg (_, _, false) -> () | _ -> let evi = nf_evar_info sigma (Evd.find sigma evk) in Pretype_errors.error_unsolvable_implicit loc env sigma evi k None) | _ -> iter_constr proc_rec c in proc_rec c let out_def = function | Some def -> def | None -> error "Program Fixpoint needs defined bodies." let interp_recursive fixkind l = let env = Global.env() in let fixl, ntnl = List.split l in let kind = fixkind <> IsCoFixpoint in let fixnames = List.map (fun fix -> fix.Command.fix_name) fixl in (* Interp arities allowing for unresolved types *) let evdref = ref Evd.empty in let fixctxs, fiximps = List.split (List.map (interp_fix_context evdref env) fixl) in let fixccls = List.map2 (interp_fix_ccl evdref) fixctxs fixl in let fixtypes = List.map2 build_fix_type fixctxs fixccls in let rec_sign = List.fold_left2 (fun env' id t -> let sort = Retyping.get_type_of env !evdref t in let fixprot = try mkApp (delayed_force Subtac_utils.fix_proto, [|sort; t|]) with e when Errors.noncritical e -> t in (id,None,fixprot) :: env') [] fixnames fixtypes in let env_rec = push_named_context rec_sign env in (* Get interpretation metadatas *) let impls = Constrintern.compute_internalization_env env Constrintern.Recursive fixnames fixtypes fiximps in let notations = List.flatten ntnl in (* Interp bodies with rollback because temp use of notations/implicit *) let fixdefs = States.with_state_protection (fun () -> List.iter (Metasyntax.set_notation_for_interpretation impls) notations; list_map3 (interp_fix_body evdref env_rec impls) fixctxs fixl fixccls) () in let fixdefs = List.map out_def fixdefs in (* Instantiate evars and check all are resolved *) let evd = Evarconv.consider_remaining_unif_problems env_rec !evdref in let evd = Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals ~split:true ~fail:false env_rec evd in let evd = Evarutil.nf_evar_map evd in let fixdefs = List.map (nf_evar evd) fixdefs in let fixtypes = List.map (nf_evar evd) fixtypes in let rec_sign = nf_named_context_evar evd rec_sign in let recdefs = List.length rec_sign in List.iter (check_evars env_rec Evd.empty evd) fixdefs; List.iter (check_evars env Evd.empty evd) fixtypes; Command.check_mutuality env kind (List.combine fixnames fixdefs); (* Russell-specific code *) (* Get the interesting evars, those that were not instanciated *) let isevars = Evd.undefined_evars evd in let evm = isevars in (* Solve remaining evars *) let rec collect_evars id def typ imps = (* Generalize by the recursive prototypes *) let def = Termops.it_mkNamedLambda_or_LetIn def rec_sign and typ = Termops.it_mkNamedProd_or_LetIn typ rec_sign in let evm' = Subtac_utils.evars_of_term evm Evd.empty def in let evm' = Subtac_utils.evars_of_term evm evm' typ in let evars, _, def, typ = Eterm.eterm_obligations env id isevars evm' recdefs def typ in (id, def, typ, imps, evars) in let defs = list_map4 collect_evars fixnames fixdefs fixtypes fiximps in (match fixkind with | IsFixpoint wfl -> let possible_indexes = list_map3 compute_possible_guardness_evidences wfl fixctxs fixtypes in let fixdecls = Array.of_list (List.map (fun x -> Name x) fixnames), Array.of_list fixtypes, Array.of_list (List.map (subst_vars (List.rev fixnames)) fixdefs) in let indexes = Pretyping.search_guard dummy_loc (Global.env ()) possible_indexes fixdecls in list_iter_i (fun i _ -> Inductive.check_fix env ((indexes,i),fixdecls)) l | IsCoFixpoint -> ()); Subtac_obligations.add_mutual_definitions defs notations fixkind let out_n = function Some n -> n | None -> raise Not_found let build_recursive l = let g = List.map (fun ((_,wf,_,_,_),_) -> wf) l in match g, l with [(n, CWfRec r)], [(((_,id),_,bl,typ,def),ntn)] -> ignore(build_wellfounded (id, n, bl, typ, out_def def) r (match n with Some n -> mkIdentC (snd n) | None -> errorlabstrm "Subtac_command.build_recursive" (str "Recursive argument required for well-founded fixpoints")) ntn) | [(n, CMeasureRec (m, r))], [(((_,id),_,bl,typ,def),ntn)] -> ignore(build_wellfounded (id, n, bl, typ, out_def def) (Option.default (CRef lt_ref) r) m ntn) | _, _ when List.for_all (fun (n, ro) -> ro = CStructRec) g -> let fixl = List.map (fun (((_,id),(n,ro),bl,typ,def),ntn) -> ({Command.fix_name = id; Command.fix_binders = bl; Command.fix_annot = n; Command.fix_body = def; Command.fix_type = typ},ntn)) l in interp_recursive (IsFixpoint g) fixl | _, _ -> errorlabstrm "Subtac_command.build_recursive" (str "Well-founded fixpoints not allowed in mutually recursive blocks") let build_corecursive l = let fixl = List.map (fun (((_,id),bl,typ,def),ntn) -> ({Command.fix_name = id; Command.fix_binders = bl; Command.fix_annot = None; Command.fix_body = def; Command.fix_type = typ},ntn)) l in interp_recursive IsCoFixpoint fixl coq-8.4pl2/plugins/subtac/subtac.ml0000640000175000001440000001730312121620060016376 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constant_value (Global.env()) (match cst with ConstRef kn -> kn | _ -> assert false) | _ -> errorlabstrm "start_proof" (str "The statement obligations could not be resolved automatically, " ++ spc () ++ str "write a statement definition first.") else let _ = Typeops.infer_type env c in c let start_proof_com env isevars sopt kind (bl,t) hook = let id = match sopt with | Some (loc,id) -> (* We check existence here: it's a bit late at Qed time *) if Nametab.exists_cci (Lib.make_path id) or is_section_variable id then user_err_loc (loc,"start_proof",pr_id id ++ str " already exists"); id | None -> next_global_ident_away (id_of_string "Unnamed_thm") (Pfedit.get_all_proof_names ()) in let evm, c, typ, imps = Subtac_pretyping.subtac_process ~is_type:true env isevars id [] (Topconstr.prod_constr_expr t bl) None in let c = solve_tccs_in_type env id isevars evm c typ in Lemmas.start_proof id kind c (fun loc gr -> Impargs.declare_manual_implicits (loc = Local) gr ~enriching:true [imps]; hook loc gr) let start_proof_and_print env isevars idopt k t hook = start_proof_com env isevars idopt k t hook; Vernacentries.print_subgoals () let _ = Detyping.set_detype_anonymous (fun loc n -> GVar (loc, id_of_string ("Anonymous_REL_" ^ string_of_int n))) let assumption_message id = Flags.if_verbose message ((string_of_id id) ^ " is assumed") let declare_assumptions env isevars idl is_coe k bl c nl = if not (Pfedit.refining ()) then let id = snd (List.hd idl) in let evm, c, typ, imps = Subtac_pretyping.subtac_process env isevars id [] (Topconstr.prod_constr_expr c bl) None in let c = solve_tccs_in_type env id isevars evm c typ in List.iter (Command.declare_assumption is_coe k c imps false nl) idl else errorlabstrm "Command.Assumption" (str "Cannot declare an assumption while in proof editing mode.") let dump_constraint ty ((loc, n), _, _) = match n with | Name id -> Dumpglob.dump_definition (loc, id) false ty | Anonymous -> () let dump_variable lid = () let vernac_assumption env isevars kind l nl = let global = fst kind = Global in List.iter (fun (is_coe,(idl,c)) -> if Dumpglob.dump () then List.iter (fun lid -> if global then Dumpglob.dump_definition lid (not global) "ax" else dump_variable lid) idl; declare_assumptions env isevars idl is_coe kind [] c nl) l let check_fresh (loc,id) = if Nametab.exists_cci (Lib.make_path id) or is_section_variable id then user_err_loc (loc,"",pr_id id ++ str " already exists") let subtac (loc, command) = check_required_library ["Coq";"Init";"Datatypes"]; check_required_library ["Coq";"Init";"Specif"]; let env = Global.env () in let isevars = ref (create_evar_defs Evd.empty) in try match command with | VernacDefinition (defkind, (_, id as lid), expr, hook) -> check_fresh lid; Dumpglob.dump_definition lid false "def"; (match expr with | ProveBody (bl, t) -> start_proof_and_print env isevars (Some lid) (Global, DefinitionBody Definition) (bl,t) (fun _ _ -> ()) | DefineBody (bl, _, c, tycon) -> ignore(Subtac_pretyping.subtac_proof defkind hook env isevars id bl c tycon)) | VernacFixpoint l -> List.iter (fun ((lid, _, _, _, _), _) -> check_fresh lid; Dumpglob.dump_definition lid false "fix") l; let _ = trace (str "Building fixpoint") in ignore(Subtac_command.build_recursive l) | VernacStartTheoremProof (thkind, [Some id, (bl,t,guard)], lettop, hook) -> if guard <> None then error "Do not support building theorems as a fixpoint."; Dumpglob.dump_definition id false "prf"; if not(Pfedit.refining ()) then if lettop then errorlabstrm "Subtac_command.StartProof" (str "Let declarations can only be used in proof editing mode"); if Lib.is_modtype () then errorlabstrm "Subtac_command.StartProof" (str "Proof editing mode not supported in module types"); check_fresh id; start_proof_and_print env isevars (Some id) (Global, Proof thkind) (bl,t) hook | VernacAssumption (stre,nl,l) -> vernac_assumption env isevars stre l nl | VernacInstance (abst, glob, sup, is, props, pri) -> dump_constraint "inst" is; if abst then error "Declare Instance not supported here."; ignore(Subtac_classes.new_instance ~global:glob sup is props pri) | VernacCoFixpoint l -> if Dumpglob.dump () then List.iter (fun ((lid, _, _, _), _) -> Dumpglob.dump_definition lid false "cofix") l; ignore(Subtac_command.build_corecursive l) (*| VernacEndProof e -> subtac_end_proof e*) | _ -> user_err_loc (loc,"", str ("Invalid Program command")) with | Typing_error e -> msg_warning (str "Type error in Program tactic:"); let cmds = (match e with | NonFunctionalApp (loc, x, mux, e) -> str "non functional application of term " ++ e ++ str " to function " ++ x ++ str " of (mu) type " ++ mux | NonSigma (loc, t) -> str "Term is not of Sigma type: " ++ t | NonConvertible (loc, x, y) -> str "Unconvertible terms:" ++ spc () ++ x ++ spc () ++ str "and" ++ spc () ++ y | IllSorted (loc, t) -> str "Term is ill-sorted:" ++ spc () ++ t ) in msg_warning cmds | Subtyping_error e -> msg_warning (str "(Program tactic) Subtyping error:"); let cmds = match e with | UncoercibleInferType (loc, x, y) -> str "Uncoercible terms:" ++ spc () ++ x ++ spc () ++ str "and" ++ spc () ++ y | UncoercibleInferTerm (loc, x, y, tx, ty) -> str "Uncoercible terms:" ++ spc () ++ tx ++ spc () ++ str "of" ++ spc () ++ str "type" ++ spc () ++ x ++ str "and" ++ spc() ++ ty ++ spc () ++ str "of" ++ spc () ++ str "type" ++ spc () ++ y | UncoercibleRewrite (x, y) -> str "Uncoercible terms:" ++ spc () ++ x ++ spc () ++ str "and" ++ spc () ++ y in msg_warning cmds | Cases.PatternMatchingError (env, exn) as e -> raise e | Type_errors.TypeError (env, exn) as e -> raise e | Pretype_errors.PretypeError (env, _, exn) as e -> raise e | (Loc.Exc_located (loc, Proof_type.LtacLocated (_,e')) | Loc.Exc_located (loc, e') as e) -> raise e | reraise -> (* msg_warning (str "Uncaught exception: " ++ Errors.print e); *) raise reraise coq-8.4pl2/plugins/subtac/subtac_pretyping.ml0000640000175000001440000001153612010532755020513 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop *) wf_proof: constr; (* : well_founded R *) f_type: types; (* f: A -> Set *) f_fulltype: types; (* Type with argument and wf proof product first *) } let my_print_rec_info env t = str "Name: " ++ Nameops.pr_name t.arg_name ++ spc () ++ str "Arg type: " ++ my_print_constr env t.arg_type ++ spc () ++ str "Wf relation: " ++ my_print_constr env t.wf_relation ++ spc () ++ str "Wf proof: " ++ my_print_constr env t.wf_proof ++ spc () ++ str "Abbreviated Type: " ++ my_print_constr env t.f_type ++ spc () ++ str "Full type: " ++ my_print_constr env t.f_fulltype (* trace (str "pretype for " ++ (my_print_glob_constr env c) ++ *) (* str " and tycon "++ my_print_tycon env tycon ++ *) (* str " in environment: " ++ my_print_env env); *) let interp env isevars c tycon = let j = pretype tycon env isevars ([],[]) c in let _ = isevars := Evarutil.nf_evar_map !isevars in let evd = consider_remaining_unif_problems env !isevars in (* let unevd = undefined_evars evd in *) let unevd' = Typeclasses.resolve_typeclasses ~filter:Subtac_utils.no_goals_or_obligations ~split:true ~fail:true env evd in let unevd' = Typeclasses.resolve_typeclasses ~filter:Typeclasses.all_evars ~split:true ~fail:false env unevd' in let evm = unevd' in isevars := unevd'; nf_evar evm j.uj_val, nf_evar evm j.uj_type let find_with_index x l = let rec aux i = function (y, _, _) as t :: tl -> if x = y then i, t else aux (succ i) tl | [] -> raise Not_found in aux 0 l open Vernacexpr let coqintern_constr evd env : Topconstr.constr_expr -> Glob_term.glob_constr = Constrintern.intern_constr evd env let coqintern_type evd env : Topconstr.constr_expr -> Glob_term.glob_constr = Constrintern.intern_type evd env let env_with_binders env isevars l = let rec aux ((env, rels) as acc) = function Topconstr.LocalRawDef ((loc, name), def) :: tl -> let rawdef = coqintern_constr !isevars env def in let coqdef, deftyp = interp env isevars rawdef empty_tycon in let reldecl = (name, Some coqdef, deftyp) in aux (push_rel reldecl env, reldecl :: rels) tl | Topconstr.LocalRawAssum (bl, k, typ) :: tl -> let rawtyp = coqintern_type !isevars env typ in let coqtyp, typtyp = interp env isevars rawtyp empty_tycon in let acc = List.fold_left (fun (env, rels) (loc, name) -> let reldecl = (name, None, coqtyp) in (push_rel reldecl env, reldecl :: rels)) (env, rels) bl in aux acc tl | [] -> acc in aux (env, []) l let subtac_process ?(is_type=false) env isevars id bl c tycon = let c = Topconstr.abstract_constr_expr c bl in let tycon, imps = match tycon with None -> empty_tycon, None | Some t -> let t = Topconstr.prod_constr_expr t bl in let t = coqintern_type !isevars env t in let imps = Implicit_quantifiers.implicits_of_glob_constr t in let coqt, ttyp = interp env isevars t empty_tycon in mk_tycon coqt, Some imps in let c = coqintern_constr !isevars env c in let imps = match imps with | Some i -> i | None -> Implicit_quantifiers.implicits_of_glob_constr ~with_products:is_type c in let coqc, ctyp = interp env isevars c tycon in let evm = non_instanciated_map env isevars !isevars in let ty = nf_evar !isevars (match tycon with Some (None, c) -> c | _ -> ctyp) in evm, coqc, ty, imps open Subtac_obligations let subtac_proof kind hook env isevars id bl c tycon = let evm, coqc, coqt, imps = subtac_process env isevars id bl c tycon in let evm' = Subtac_utils.evars_of_term evm Evd.empty coqc in let evm' = Subtac_utils.evars_of_term evm evm' coqt in let evars, _, def, ty = Eterm.eterm_obligations env id !isevars evm' 0 coqc coqt in add_definition id ~term:def ty ~implicits:imps ~kind ~hook evars coq-8.4pl2/plugins/subtac/subtac_obligations.mli0000640000175000001440000000520411603172617021154 0ustar notinusersopen Names open Util open Libnames open Evd open Proof_type open Vernacexpr type obligation_info = (identifier * Term.types * hole_kind located * obligation_definition_status * Intset.t * tactic option) array (* ident, type, location, (opaque or transparent, expand or define), dependencies, tactic to solve it *) type progress = (* Resolution status of a program *) | Remain of int (* n obligations remaining *) | Dependent (* Dependent on other definitions *) | Defined of global_reference (* Defined as id *) val set_default_tactic : bool -> Tacexpr.glob_tactic_expr -> unit val get_default_tactic : unit -> locality_flag * Proof_type.tactic val print_default_tactic : unit -> Pp.std_ppcmds val set_proofs_transparency : bool -> unit (* true = All transparent, false = Opaque if possible *) val get_proofs_transparency : unit -> bool val add_definition : Names.identifier -> ?term:Term.constr -> Term.types -> ?implicits:(Topconstr.explicitation * (bool * bool * bool)) list -> ?kind:Decl_kinds.definition_kind -> ?tactic:Proof_type.tactic -> ?reduce:(Term.constr -> Term.constr) -> ?hook:(Tacexpr.declaration_hook) -> obligation_info -> progress type notations = (Vernacexpr.lstring * Topconstr.constr_expr * Topconstr.scope_name option) list type fixpoint_kind = | IsFixpoint of (identifier located option * Topconstr.recursion_order_expr) list | IsCoFixpoint val add_mutual_definitions : (Names.identifier * Term.constr * Term.types * (Topconstr.explicitation * (bool * bool * bool)) list * obligation_info) list -> ?tactic:Proof_type.tactic -> ?kind:Decl_kinds.definition_kind -> ?reduce:(Term.constr -> Term.constr) -> ?hook:Tacexpr.declaration_hook -> notations -> fixpoint_kind -> unit val subtac_obligation : int * Names.identifier option * Topconstr.constr_expr option -> Tacexpr.raw_tactic_expr option -> unit val next_obligation : Names.identifier option -> Tacexpr.raw_tactic_expr option -> unit val solve_obligations : Names.identifier option -> Proof_type.tactic option -> progress (* Number of remaining obligations to be solved for this program *) val solve_all_obligations : Proof_type.tactic option -> unit val try_solve_obligation : int -> Names.identifier option -> Proof_type.tactic option -> unit val try_solve_obligations : Names.identifier option -> Proof_type.tactic option -> unit val show_obligations : ?msg:bool -> Names.identifier option -> unit val show_term : Names.identifier option -> unit val admit_obligations : Names.identifier option -> unit exception NoObligations of Names.identifier option val explain_no_obligations : Names.identifier option -> Pp.std_ppcmds coq-8.4pl2/plugins/subtac/eterm.mli0000640000175000001440000000315012010532755016407 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr list val evar_dependencies : evar_map -> int -> Intset.t val sort_dependencies : (int * evar_info * Intset.t) list -> (int * evar_info * Intset.t) list (* env, id, evars, number of function prototypes to try to clear from evars contexts, object and type *) val eterm_obligations : env -> identifier -> evar_map -> evar_map -> int -> ?status:obligation_definition_status -> constr -> types -> (identifier * types * hole_kind located * obligation_definition_status * Intset.t * tactic option) array (* Existential key, obl. name, type as product, location of the original evar, associated tactic, status and dependencies as indexes into the array *) * ((existential_key * identifier) list * ((identifier -> constr) -> constr -> constr)) * constr * types (* Translations from existential identifiers to obligation identifiers and for terms with existentials to closed terms, given a translation from obligation identifiers to constrs, new term, new type *) coq-8.4pl2/plugins/subtac/subtac_plugin.mllib0000640000175000001440000000026511272145402020452 0ustar notinusersSubtac_utils Eterm Subtac_errors Subtac_coercion Subtac_obligations Subtac_cases Subtac_pretyping_F Subtac_pretyping Subtac_command Subtac_classes Subtac G_subtac Subtac_plugin_mod coq-8.4pl2/plugins/subtac/subtac_errors.mli0000640000175000001440000000116711160567762020171 0ustar notinuserstype term_pp = Pp.std_ppcmds type subtyping_error = UncoercibleInferType of Util.loc * term_pp * term_pp | UncoercibleInferTerm of Util.loc * term_pp * term_pp * term_pp * term_pp | UncoercibleRewrite of term_pp * term_pp type typing_error = NonFunctionalApp of Util.loc * term_pp * term_pp * term_pp | NonConvertible of Util.loc * term_pp * term_pp | NonSigma of Util.loc * term_pp | IllSorted of Util.loc * term_pp exception Subtyping_error of subtyping_error exception Typing_error of typing_error exception Debug_msg of string val typing_error : typing_error -> 'a val subtyping_error : subtyping_error -> 'a coq-8.4pl2/plugins/subtac/subtac_pretyping_F.ml0000640000175000001440000005734512121620060020756 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* j | Some t -> evd_comb2 (Coercion.inh_conv_coerce_to loc env) evdref j t let push_rels vars env = List.fold_right push_rel vars env (* let evar_type_case evdref env ct pt lft p c = let (mind,bty,rslty) = type_case_branches env ( evdref) ct pt p c in check_branches_message evdref env mind (c,ct) (bty,lft); (mind,rslty) *) let strip_meta id = (* For Grammar v7 compatibility *) let s = string_of_id id in if s.[0]='$' then id_of_string (String.sub s 1 (String.length s - 1)) else id let invert_ltac_bound_name env id0 id = try mkRel (pi1 (Termops.lookup_rel_id id (rel_context env))) with Not_found -> errorlabstrm "" (str "Ltac variable " ++ pr_id id0 ++ str " depends on pattern variable name " ++ pr_id id ++ str " which is not bound in current context") let pretype_id loc env sigma (lvar,unbndltacvars) id = let id = strip_meta id in (* May happen in tactics defined by Grammar *) try let (n,_,typ) = Termops.lookup_rel_id id (rel_context env) in { uj_val = mkRel n; uj_type = lift n typ } with Not_found -> try let (ids,c) = List.assoc id lvar in let subst = List.map (invert_ltac_bound_name env id) ids in let c = substl subst c in { uj_val = c; uj_type = Retyping.get_type_of env sigma c } with Not_found -> try let (_,_,typ) = lookup_named id env in { uj_val = mkVar id; uj_type = typ } with Not_found -> try (* To build a nicer ltac error message *) match List.assoc id unbndltacvars with | None -> user_err_loc (loc,"", str "variable " ++ pr_id id ++ str " should be bound to a term") | Some id0 -> Pretype_errors.error_var_not_found_loc loc id0 with Not_found -> error_var_not_found_loc loc id (* make a dependent predicate from an undependent one *) let make_dep_of_undep env (IndType (indf,realargs)) pj = let n = List.length realargs in let rec decomp n p = if n=0 then p else match kind_of_term p with | Lambda (_,_,c) -> decomp (n-1) c | _ -> decomp (n-1) (applist (lift 1 p, [mkRel 1])) in let sign,s = decompose_prod_n n pj.uj_type in let ind = build_dependent_inductive env indf in let s' = mkProd (Anonymous, ind, s) in let ccl = lift 1 (decomp n pj.uj_val) in let ccl' = mkLambda (Anonymous, ind, ccl) in {uj_val=Termops.it_mkLambda ccl' sign; uj_type=Termops.it_mkProd s' sign} (*************************************************************************) (* Main pretyping function *) let pretype_ref evdref env ref = let c = constr_of_global ref in make_judge c (Retyping.get_type_of env Evd.empty c) let pretype_sort evdref = function | GProp c -> judge_of_prop_contents c | GType _ -> evd_comb0 judge_of_new_Type evdref let split_tycon_lam loc env evd tycon = let rec real_split evd c = let t = whd_betadeltaiota env evd c in match kind_of_term t with | Prod (na,dom,rng) -> evd, (na, dom, rng) | Evar ev when not (Evd.is_defined_evar evd ev) -> let (evd',prod) = define_evar_as_product evd ev in let (_,dom,rng) = destProd prod in evd',(Anonymous, dom, rng) | _ -> error_not_product_loc loc env evd c in match tycon with | None -> evd,(Anonymous,None,None) | Some (abs, c) -> (match abs with | None -> let evd', (n, dom, rng) = real_split evd c in evd', (n, mk_tycon dom, mk_tycon rng) | Some (init, cur) -> evd, (Anonymous, None, Some (Some (init, succ cur), c))) (* [pretype tycon env evdref lvar lmeta cstr] attempts to type [cstr] *) (* in environment [env], with existential variables [( evdref)] and *) (* the type constraint tycon *) let rec pretype (tycon : type_constraint) env evdref lvar c = (* let _ = try Subtac_utils.trace (str "pretype " ++ Subtac_utils.my_print_glob_constr env c ++ *) (* str " with tycon " ++ Evarutil.pr_tycon env tycon) *) (* with _ -> () *) (* in *) match c with | GRef (loc,ref) -> inh_conv_coerce_to_tycon loc env evdref (pretype_ref evdref env ref) tycon | GVar (loc, id) -> inh_conv_coerce_to_tycon loc env evdref (pretype_id loc env !evdref lvar id) tycon | GEvar (loc, ev, instopt) -> (* Ne faudrait-il pas s'assurer que hyps est bien un sous-contexte du contexte courant, et qu'il n'y a pas de Rel "cach" *) let hyps = evar_context (Evd.find !evdref ev) in let args = match instopt with | None -> instance_from_named_context hyps | Some inst -> failwith "Evar subtitutions not implemented" in let c = mkEvar (ev, args) in let j = (Retyping.get_judgment_of env !evdref c) in inh_conv_coerce_to_tycon loc env evdref j tycon | GPatVar (loc,(someta,n)) -> anomaly "Found a pattern variable in a glob_constr to type" | GHole (loc,k) -> let ty = match tycon with | Some (None, ty) -> ty | None | Some _ -> e_new_evar evdref env ~src:(loc, InternalHole) (Termops.new_Type ()) in { uj_val = e_new_evar evdref env ~src:(loc,k) ty; uj_type = ty } | GRec (loc,fixkind,names,bl,lar,vdef) -> let rec type_bl env ctxt = function [] -> ctxt | (na,k,None,ty)::bl -> let ty' = pretype_type empty_valcon env evdref lvar ty in let dcl = (na,None,ty'.utj_val) in type_bl (push_rel dcl env) (add_rel_decl dcl ctxt) bl | (na,k,Some bd,ty)::bl -> let ty' = pretype_type empty_valcon env evdref lvar ty in let bd' = pretype (mk_tycon ty'.utj_val) env evdref lvar ty in let dcl = (na,Some bd'.uj_val,ty'.utj_val) in type_bl (push_rel dcl env) (add_rel_decl dcl ctxt) bl in let ctxtv = Array.map (type_bl env empty_rel_context) bl in let larj = array_map2 (fun e ar -> pretype_type empty_valcon (push_rel_context e env) evdref lvar ar) ctxtv lar in let lara = Array.map (fun a -> a.utj_val) larj in let ftys = array_map2 (fun e a -> it_mkProd_or_LetIn a e) ctxtv lara in let nbfix = Array.length lar in let names = Array.map (fun id -> Name id) names in (* Note: bodies are not used by push_rec_types, so [||] is safe *) let newenv = let marked_ftys = Array.map (fun ty -> let sort = Retyping.get_type_of env !evdref ty in mkApp (delayed_force Subtac_utils.fix_proto, [| sort; ty |])) ftys in push_rec_types (names,marked_ftys,[||]) env in let fixi = match fixkind with GFix (vn, i) -> i | GCoFix i -> i in let vdefj = array_map2_i (fun i ctxt def -> let fty = let ty = ftys.(i) in if i = fixi then ( Option.iter (fun tycon -> evdref := Coercion.inh_conv_coerces_to loc env !evdref ftys.(i) tycon) tycon; nf_evar !evdref ty) else ty in (* we lift nbfix times the type in tycon, because of * the nbfix variables pushed to newenv *) let (ctxt,ty) = decompose_prod_n_assum (rel_context_length ctxt) (lift nbfix fty) in let nenv = push_rel_context ctxt newenv in let j = pretype (mk_tycon ty) nenv evdref lvar def in { uj_val = it_mkLambda_or_LetIn j.uj_val ctxt; uj_type = it_mkProd_or_LetIn j.uj_type ctxt }) ctxtv vdef in evar_type_fixpoint loc env evdref names ftys vdefj; let ftys = Array.map (nf_evar !evdref) ftys in let fdefs = Array.map (fun x -> nf_evar !evdref (j_val x)) vdefj in let fixj = match fixkind with | GFix (vn,i) -> (* First, let's find the guard indexes. *) (* If recursive argument was not given by user, we try all args. An earlier approach was to look only for inductive arguments, but doing it properly involves delta-reduction, and it finally doesn't seem worth the effort (except for huge mutual fixpoints ?) *) let possible_indexes = Array.to_list (Array.mapi (fun i (n,_) -> match n with | Some n -> [n] | None -> list_map_i (fun i _ -> i) 0 ctxtv.(i)) vn) in let fixdecls = (names,ftys,fdefs) in let indexes = search_guard loc env possible_indexes fixdecls in make_judge (mkFix ((indexes,i),fixdecls)) ftys.(i) | GCoFix i -> let cofix = (i,(names,ftys,fdefs)) in (try check_cofix env cofix with e when Errors.noncritical e -> Loc.raise loc e); make_judge (mkCoFix cofix) ftys.(i) in inh_conv_coerce_to_tycon loc env evdref fixj tycon | GSort (loc,s) -> let s' = pretype_sort evdref s in inh_conv_coerce_to_tycon loc env evdref s' tycon | GApp (loc,f,args) -> let length = List.length args in let ftycon = let ty = if length > 0 then match tycon with | None -> None | Some (None, ty) -> mk_abstr_tycon length ty | Some (Some (init, cur), ty) -> Some (Some (length + init, length + cur), ty) else tycon in match ty with | Some (_, t) -> if Subtac_coercion.disc_subset (whd_betadeltaiota env !evdref t) = None then ty else None | _ -> None in let fj = pretype ftycon env evdref lvar f in let floc = loc_of_glob_constr f in let rec apply_rec env n resj tycon = function | [] -> resj | c::rest -> let argloc = loc_of_glob_constr c in let resj = evd_comb1 (Coercion.inh_app_fun env) evdref resj in let resty = whd_betadeltaiota env !evdref resj.uj_type in match kind_of_term resty with | Prod (na,c1,c2) -> Option.iter (fun ty -> evdref := Coercion.inh_conv_coerces_to loc env !evdref resty ty) tycon; let evd, (_, _, tycon) = split_tycon loc env !evdref tycon in evdref := evd; let hj = pretype (mk_tycon c1) env evdref lvar c in let value, typ = applist (j_val resj, [j_val hj]), subst1 hj.uj_val c2 in apply_rec env (n+1) { uj_val = value; uj_type = typ } (Option.map (fun (abs, c) -> abs, c) tycon) rest | _ -> let hj = pretype empty_tycon env evdref lvar c in error_cant_apply_not_functional_loc (join_loc floc argloc) env !evdref resj [hj] in let resj = apply_rec env 1 fj ftycon args in let resj = match kind_of_term (whd_evar !evdref resj.uj_val) with | App (f,args) when isInd f or isConst f -> let sigma = !evdref in let c = mkApp (f,Array.map (whd_evar sigma) args) in let t = Retyping.get_type_of env sigma c in make_judge c t | _ -> resj in inh_conv_coerce_to_tycon loc env evdref resj tycon | GLambda(loc,name,k,c1,c2) -> let tycon' = evd_comb1 (fun evd tycon -> match tycon with | None -> evd, tycon | Some ty -> let evd, ty' = Coercion.inh_coerce_to_prod loc env evd ty in evd, Some ty') evdref tycon in let (name',dom,rng) = evd_comb1 (split_tycon_lam loc env) evdref tycon' in let dom_valcon = valcon_of_tycon dom in let j = pretype_type dom_valcon env evdref lvar c1 in let var = (name,None,j.utj_val) in let j' = pretype rng (push_rel var env) evdref lvar c2 in let resj = judge_of_abstraction env name j j' in inh_conv_coerce_to_tycon loc env evdref resj tycon | GProd(loc,name,k,c1,c2) -> let j = pretype_type empty_valcon env evdref lvar c1 in let var = (name,j.utj_val) in let env' = Termops.push_rel_assum var env in let j' = pretype_type empty_valcon env' evdref lvar c2 in let resj = try judge_of_product env name j j' with TypeError _ as e -> Loc.raise loc e in inh_conv_coerce_to_tycon loc env evdref resj tycon | GLetIn(loc,name,c1,c2) -> let j = pretype empty_tycon env evdref lvar c1 in let t = Termops.refresh_universes j.uj_type in let var = (name,Some j.uj_val,t) in let tycon = lift_tycon 1 tycon in let j' = pretype tycon (push_rel var env) evdref lvar c2 in { uj_val = mkLetIn (name, j.uj_val, t, j'.uj_val) ; uj_type = subst1 j.uj_val j'.uj_type } | GLetTuple (loc,nal,(na,po),c,d) -> let cj = pretype empty_tycon env evdref lvar c in let (IndType (indf,realargs)) = try find_rectype env !evdref cj.uj_type with Not_found -> let cloc = loc_of_glob_constr c in error_case_not_inductive_loc cloc env !evdref cj in let cstrs = get_constructors env indf in if Array.length cstrs <> 1 then user_err_loc (loc,"",str "Destructing let is only for inductive types with one constructor"); let cs = cstrs.(0) in if List.length nal <> cs.cs_nargs then user_err_loc (loc,"", str "Destructing let on this type expects " ++ int cs.cs_nargs ++ str " variables"); let fsign = List.map2 (fun na (_,c,t) -> (na,c,t)) (List.rev nal) cs.cs_args in let env_f = push_rels fsign env in (* Make dependencies from arity signature impossible *) let arsgn = let arsgn,_ = get_arity env indf in if not !allow_anonymous_refs then List.map (fun (_,b,t) -> (Anonymous,b,t)) arsgn else arsgn in let psign = (na,None,build_dependent_inductive env indf)::arsgn in let nar = List.length arsgn in (match po with | Some p -> let env_p = push_rels psign env in let pj = pretype_type empty_valcon env_p evdref lvar p in let ccl = nf_evar !evdref pj.utj_val in let psign = make_arity_signature env true indf in (* with names *) let p = it_mkLambda_or_LetIn ccl psign in let inst = (Array.to_list cs.cs_concl_realargs) @[build_dependent_constructor cs] in let lp = lift cs.cs_nargs p in let fty = hnf_lam_applist env !evdref lp inst in let fj = pretype (mk_tycon fty) env_f evdref lvar d in let f = it_mkLambda_or_LetIn fj.uj_val fsign in let v = let mis,_ = dest_ind_family indf in let ci = make_case_info env mis LetStyle in mkCase (ci, p, cj.uj_val,[|f|]) in { uj_val = v; uj_type = substl (realargs@[cj.uj_val]) ccl } | None -> let tycon = lift_tycon cs.cs_nargs tycon in let fj = pretype tycon env_f evdref lvar d in let f = it_mkLambda_or_LetIn fj.uj_val fsign in let ccl = nf_evar !evdref fj.uj_type in let ccl = if noccur_between 1 cs.cs_nargs ccl then lift (- cs.cs_nargs) ccl else error_cant_find_case_type_loc loc env !evdref cj.uj_val in let p = it_mkLambda_or_LetIn (lift (nar+1) ccl) psign in let v = let mis,_ = dest_ind_family indf in let ci = make_case_info env mis LetStyle in mkCase (ci, p, cj.uj_val,[|f|] ) in { uj_val = v; uj_type = ccl }) | GIf (loc,c,(na,po),b1,b2) -> let cj = pretype empty_tycon env evdref lvar c in let (IndType (indf,realargs)) = try find_rectype env !evdref cj.uj_type with Not_found -> let cloc = loc_of_glob_constr c in error_case_not_inductive_loc cloc env !evdref cj in let cstrs = get_constructors env indf in if Array.length cstrs <> 2 then user_err_loc (loc,"", str "If is only for inductive types with two constructors."); let arsgn = let arsgn,_ = get_arity env indf in if not !allow_anonymous_refs then (* Make dependencies from arity signature impossible *) List.map (fun (_,b,t) -> (Anonymous,b,t)) arsgn else arsgn in let nar = List.length arsgn in let psign = (na,None,build_dependent_inductive env indf)::arsgn in let pred,p = match po with | Some p -> let env_p = push_rels psign env in let pj = pretype_type empty_valcon env_p evdref lvar p in let ccl = nf_evar !evdref pj.utj_val in let pred = it_mkLambda_or_LetIn ccl psign in let typ = lift (- nar) (beta_applist (pred,[cj.uj_val])) in let jtyp = inh_conv_coerce_to_tycon loc env evdref {uj_val = pred; uj_type = typ} tycon in jtyp.uj_val, jtyp.uj_type | None -> let p = match tycon with | Some (None, ty) -> ty | None | Some _ -> e_new_evar evdref env ~src:(loc,InternalHole) (Termops.new_Type ()) in it_mkLambda_or_LetIn (lift (nar+1) p) psign, p in let pred = nf_evar !evdref pred in let p = nf_evar !evdref p in let f cs b = let n = rel_context_length cs.cs_args in let pi = lift n pred in let pi = beta_applist (pi, [build_dependent_constructor cs]) in let csgn = if not !allow_anonymous_refs then List.map (fun (_,b,t) -> (Anonymous,b,t)) cs.cs_args else List.map (fun (n, b, t) -> match n with Name _ -> (n, b, t) | Anonymous -> (Name (id_of_string "H"), b, t)) cs.cs_args in let env_c = push_rels csgn env in let bj = pretype (mk_tycon pi) env_c evdref lvar b in it_mkLambda_or_LetIn bj.uj_val cs.cs_args in let b1 = f cstrs.(0) b1 in let b2 = f cstrs.(1) b2 in let v = let mis,_ = dest_ind_family indf in let ci = make_case_info env mis IfStyle in mkCase (ci, pred, cj.uj_val, [|b1;b2|]) in { uj_val = v; uj_type = p } | GCases (loc,sty,po,tml,eqns) -> Cases.compile_cases loc sty ((fun vtyc env evdref -> pretype vtyc env evdref lvar),evdref) tycon env (* loc *) (po,tml,eqns) | GCast (loc,c,k) -> let cj = match k with CastCoerce -> let cj = pretype empty_tycon env evdref lvar c in evd_comb1 (Coercion.inh_coerce_to_base loc env) evdref cj | CastConv (k,t) -> let tj = pretype_type empty_valcon env evdref lvar t in let cj = pretype (mk_tycon tj.utj_val) env evdref lvar c in let v = mkCast (cj.uj_val, k, tj.utj_val) in { uj_val = v; uj_type = tj.utj_val } in inh_conv_coerce_to_tycon loc env evdref cj tycon (* [pretype_type valcon env evdref lvar c] coerces [c] into a type *) and pretype_type valcon env evdref lvar = function | GHole loc -> (match valcon with | Some v -> let s = let sigma = !evdref in let t = Retyping.get_type_of env sigma v in match kind_of_term (whd_betadeltaiota env sigma t) with | Sort s -> s | Evar ev when is_Type (existential_type sigma ev) -> evd_comb1 (define_evar_as_sort) evdref ev | _ -> anomaly "Found a type constraint which is not a type" in { utj_val = v; utj_type = s } | None -> let s = Termops.new_Type_sort () in { utj_val = e_new_evar evdref env ~src:loc (mkSort s); utj_type = s}) | c -> let j = pretype empty_tycon env evdref lvar c in let loc = loc_of_glob_constr c in let tj = evd_comb1 (Coercion.inh_coerce_to_sort loc env) evdref j in match valcon with | None -> tj | Some v -> if e_cumul env evdref v tj.utj_val then tj else error_unexpected_type_loc (loc_of_glob_constr c) env !evdref tj.utj_val v let pretype_gen expand_evar fail_evar resolve_classes evdref env lvar kind c = let c' = match kind with | OfType exptyp -> let tycon = match exptyp with None -> empty_tycon | Some t -> mk_tycon t in (pretype tycon env evdref lvar c).uj_val | IsType -> (pretype_type empty_valcon env evdref lvar c).utj_val in if resolve_classes then (try evdref := Typeclasses.resolve_typeclasses ~filter:Subtac_utils.no_goals_or_obligations ~split:true ~fail:true env !evdref; evdref := Typeclasses.resolve_typeclasses ~filter:Typeclasses.all_evars ~split:true ~fail:false env !evdref with e when Errors.noncritical e -> if fail_evar then raise e else ()); evdref := consider_remaining_unif_problems env !evdref; let c = if expand_evar then nf_evar !evdref c' else c' in if fail_evar then check_evars env Evd.empty !evdref c; c (* TODO: comment faire remonter l'information si le typage a resolu des variables du sigma original. il faudrait que la fonction de typage retourne aussi le nouveau sigma... *) let understand_judgment sigma env c = let evdref = ref (create_evar_defs sigma) in let j = pretype empty_tycon env evdref ([],[]) c in let evd = consider_remaining_unif_problems env !evdref in let j = j_nf_evar evd j in check_evars env sigma evd (mkCast(j.uj_val,DEFAULTcast, j.uj_type)); j let understand_judgment_tcc evdref env c = let j = pretype empty_tycon env evdref ([],[]) c in j_nf_evar !evdref j (* Raw calls to the unsafe inference machine: boolean says if we must fail on unresolved evars; the unsafe_judgment list allows us to extend env with some bindings *) let ise_pretype_gen expand_evar fail_evar resolve_classes sigma env lvar kind c = let evdref = ref (Evd.create_evar_defs sigma) in let c = pretype_gen expand_evar fail_evar resolve_classes evdref env lvar kind c in !evdref, c (** Entry points of the high-level type synthesis algorithm *) let understand_gen kind sigma env c = snd (ise_pretype_gen true true true sigma env ([],[]) kind c) let understand sigma env ?expected_type:exptyp c = snd (ise_pretype_gen true true true sigma env ([],[]) (OfType exptyp) c) let understand_type sigma env c = snd (ise_pretype_gen true false true sigma env ([],[]) IsType c) let understand_ltac ?(resolve_classes=false) expand_evar sigma env lvar kind c = ise_pretype_gen expand_evar false resolve_classes sigma env lvar kind c let understand_tcc ?(resolve_classes=true) sigma env ?expected_type:exptyp c = ise_pretype_gen true false resolve_classes sigma env ([],[]) (OfType exptyp) c let understand_tcc_evars ?(fail_evar=false) ?(resolve_classes=true) evdref env kind c = pretype_gen true fail_evar resolve_classes evdref env ([],[]) kind c end module Default : S = SubtacPretyping_F(Coercion.Default) coq-8.4pl2/plugins/subtac/subtac_command.mli0000640000175000001440000000274311520541251020255 0ustar notinusersopen Pretyping open Evd open Environ open Term open Topconstr open Names open Libnames open Pp open Vernacexpr open Constrintern val interp_gen : typing_constraint -> evar_map ref -> env -> ?impls:internalization_env -> ?allow_patvar:bool -> ?ltacvars:ltac_sign -> constr_expr -> constr val interp_constr : evar_map ref -> env -> constr_expr -> constr val interp_type_evars : evar_map ref -> env -> ?impls:internalization_env -> constr_expr -> constr val interp_casted_constr_evars : evar_map ref -> env -> ?impls:internalization_env -> constr_expr -> types -> constr val interp_open_constr : evar_map ref -> env -> constr_expr -> constr val interp_constr_judgment : evar_map ref -> env -> constr_expr -> unsafe_judgment val list_chop_hd : int -> 'a list -> 'a list * 'a * 'a list val interp_binder : Evd.evar_map ref -> Environ.env -> Names.name -> Topconstr.constr_expr -> Term.constr val telescope : (Names.name * Term.types option * Term.types) list -> Term.types * (Names.name * Term.types option * Term.types) list * Term.constr val build_wellfounded : Names.identifier * 'a * Topconstr.local_binder list * Topconstr.constr_expr * Topconstr.constr_expr -> Topconstr.constr_expr -> Topconstr.constr_expr -> 'b -> Subtac_obligations.progress val build_recursive : (fixpoint_expr * decl_notation list) list -> unit val build_corecursive : (cofixpoint_expr * decl_notation list) list -> unit coq-8.4pl2/plugins/subtac/subtac.mli0000640000175000001440000000013411160567762016566 0ustar notinusersval require_library : string -> unit val subtac : Util.loc * Vernacexpr.vernac_expr -> unit coq-8.4pl2/plugins/subtac/subtac_pretyping.mli0000640000175000001440000000126011504715034020655 0ustar notinusersopen Term open Environ open Names open Sign open Evd open Global open Topconstr open Implicit_quantifiers open Impargs module Pretyping : Pretyping.S val interp : Environ.env -> Evd.evar_map ref -> Glob_term.glob_constr -> Evarutil.type_constraint -> Term.constr * Term.constr val subtac_process : ?is_type:bool -> env -> evar_map ref -> identifier -> local_binder list -> constr_expr -> constr_expr option -> evar_map * constr * types * manual_explicitation list val subtac_proof : Decl_kinds.definition_kind -> Tacexpr.declaration_hook -> env -> evar_map ref -> identifier -> local_binder list -> constr_expr -> constr_expr option -> Subtac_obligations.progress coq-8.4pl2/plugins/subtac/subtac_classes.ml0000640000175000001440000001602612010532755020126 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* SPretyping.understand_tcc_evars evdref env IsType t) (SPretyping.understand_judgment_tcc evdref) !evdref env params in bl let interp_type_evars_impls ~evdref ?(impls=empty_internalization_env) env c = let c = intern_gen true ~impls !evdref env c in let imps = Implicit_quantifiers.implicits_of_glob_constr ~with_products:true c in SPretyping.understand_tcc_evars ~fail_evar:false evdref env IsType c, imps let type_ctx_instance evars env ctx inst subst = let rec aux (subst, instctx) l = function (na, b, t) :: ctx -> let t' = substl subst t in let c', l = match b with | None -> interp_casted_constr_evars evars env (List.hd l) t', List.tl l | Some b -> substl subst b, l in evars := resolve_typeclasses ~filter:Subtac_utils.no_goals_or_obligations ~fail:true env !evars; let d = na, Some c', t' in aux (c' :: subst, d :: instctx) l ctx | [] -> subst in aux (subst, []) inst (List.rev ctx) let new_instance ?(global=false) ctx (instid, bk, cl) props ?(generalize=true) pri = let env = Global.env() in let evars = ref Evd.empty in let tclass, _ = match bk with | Implicit -> Implicit_quantifiers.implicit_application Idset.empty (* need no avoid *) ~allow_partial:false (fun avoid (clname, (id, _, t)) -> match clname with | Some (cl, b) -> let t = if b then let _k = class_info cl in CHole (Util.dummy_loc, Some Evd.InternalHole) else CHole (Util.dummy_loc, None) in t, avoid | None -> failwith ("new instance: under-applied typeclass")) cl | Explicit -> cl, Idset.empty in let tclass = if generalize then CGeneralization (dummy_loc, Implicit, Some AbsPi, tclass) else tclass in let k, cty, ctx', ctx, len, imps, subst = let (env', ctx), imps = interp_context_evars evars env ctx in let c', imps' = interp_type_evars_impls ~evdref:evars env' tclass in let len = List.length ctx in let imps = imps @ Impargs.lift_implicits len imps' in let ctx', c = decompose_prod_assum c' in let ctx'' = ctx' @ ctx in let cl, args = Typeclasses.dest_class_app (push_rel_context ctx'' env) c in let _, args = List.fold_right (fun (na, b, t) (args, args') -> match b with | None -> (List.tl args, List.hd args :: args') | Some b -> (args, substl args' b :: args')) (snd cl.cl_context) (args, []) in cl, c', ctx', ctx, len, imps, args in let id = match snd instid with | Name id -> let sp = Lib.make_path id in if Nametab.exists_cci sp then errorlabstrm "new_instance" (Nameops.pr_id id ++ Pp.str " already exists"); id | Anonymous -> let i = Nameops.add_suffix (Classes.id_of_class k) "_instance_0" in Namegen.next_global_ident_away i (Termops.ids_of_context env) in evars := resolve_typeclasses ~filter:Subtac_utils.no_goals_or_obligations ~fail:true env !evars; let ctx = Evarutil.nf_rel_context_evar !evars ctx and ctx' = Evarutil.nf_rel_context_evar !evars ctx' in let env' = push_rel_context ctx env in let sigma = !evars in let subst = List.map (Evarutil.nf_evar sigma) subst in let props = match props with | Some (CRecord (loc, _, fs)) -> if List.length fs > List.length k.cl_props then Classes.mismatched_props env' (List.map snd fs) k.cl_props; Inl fs | Some p -> Inr p | None -> Inl [] in let subst = match props with | Inr term -> let c = interp_casted_constr_evars evars env' term cty in Inr c | Inl props -> let get_id = function | Ident id' -> id' | _ -> errorlabstrm "new_instance" (Pp.str "Only local structures are handled") in let props, rest = List.fold_left (fun (props, rest) (id,b,_) -> if b = None then try let (loc_mid, c) = List.find (fun (id', _) -> Name (snd (get_id id')) = id) rest in let rest' = List.filter (fun (id', _) -> Name (snd (get_id id')) <> id) rest in let (loc, mid) = get_id loc_mid in List.iter (fun (n, _, x) -> if n = Name mid then Option.iter (fun x -> Dumpglob.add_glob loc (ConstRef x)) x) k.cl_projs; c :: props, rest' with Not_found -> (CHole (Util.dummy_loc, None) :: props), rest else props, rest) ([], props) k.cl_props in if rest <> [] then unbound_method env' k.cl_impl (get_id (fst (List.hd rest))) else Inl (type_ctx_instance evars (push_rel_context ctx' env') k.cl_props props subst) in evars := Evarutil.nf_evar_map !evars; evars := resolve_typeclasses ~filter:Subtac_utils.no_goals_or_obligations ~fail:true env !evars; evars := resolve_typeclasses ~filter:Typeclasses.no_goals ~fail:false env !evars; let term, termtype = match subst with | Inl subst -> let subst = List.fold_left2 (fun subst' s (_, b, _) -> if b = None then s :: subst' else subst') [] subst (k.cl_props @ snd k.cl_context) in let app, ty_constr = instance_constructor k subst in let termtype = it_mkProd_or_LetIn ty_constr (ctx' @ ctx) in let term = Termops.it_mkLambda_or_LetIn (Option.get app) (ctx' @ ctx) in term, termtype | Inr def -> let termtype = it_mkProd_or_LetIn cty ctx in let term = Termops.it_mkLambda_or_LetIn def ctx in term, termtype in let termtype = Evarutil.nf_evar !evars termtype in let term = Evarutil.nf_evar !evars term in evars := undefined_evars !evars; Evarutil.check_evars env Evd.empty !evars termtype; let hook vis gr = let cst = match gr with ConstRef kn -> kn | _ -> assert false in Impargs.declare_manual_implicits false gr ~enriching:false [imps]; Typeclasses.declare_instance pri (not global) (ConstRef cst) in let evm = Subtac_utils.evars_of_term !evars Evd.empty term in let obls, _, constr, typ = Eterm.eterm_obligations env id !evars evm 0 term termtype in id, Subtac_obligations.add_definition id ~term:constr typ ~kind:(Global,Instance) ~hook obls coq-8.4pl2/plugins/subtac/subtac_cases.ml0000640000175000001440000022456712121620060017570 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* PatVar (dummy_loc,Anonymous)) (* Environment management *) let push_rels vars env = List.fold_right push_rel vars env (* We have x1:t1...xn:tn,xi':ti,y1..yk |- c and re-generalize over xi:ti to get x1:t1...xn:tn,xi':ti,y1..yk |- c[xi:=xi'] *) let regeneralize_rel i k j = if j = i+k then k else if j < i+k then j else j let rec regeneralize_index i k t = match kind_of_term t with | Rel j when j = i+k -> mkRel (k+1) | Rel j when j < i+k -> t | Rel j when j > i+k -> t | _ -> map_constr_with_binders succ (regeneralize_index i) k t type alias_constr = | DepAlias | NonDepAlias let mkSpecialLetInJudge j (na,(deppat,nondeppat,d,t)) = { uj_val = (match d with | DepAlias -> mkLetIn (na,deppat,t,j.uj_val) | NonDepAlias -> if (not (dependent (mkRel 1) j.uj_type)) or (* A leaf: *) isRel deppat then (* The body of pat is not needed to type j - see *) (* insert_aliases - and both deppat and nondeppat have the *) (* same type, then one can freely substitute one by the other *) subst1 nondeppat j.uj_val else (* The body of pat is not needed to type j but its value *) (* is dependent in the type of j; our choice is to *) (* enforce this dependency *) mkLetIn (na,deppat,t,j.uj_val)); uj_type = subst1 deppat j.uj_type } (**********************************************************************) (* Structures used in compiling pattern-matching *) type rhs = { rhs_env : env; avoid_ids : identifier list; it : glob_constr; } type equation = { patterns : cases_pattern list; rhs : rhs; alias_stack : name list; eqn_loc : loc; used : bool ref } type matrix = equation list (* 1st argument of IsInd is the original ind before extracting the summary *) type tomatch_type = | IsInd of types * inductive_type | NotInd of constr option * types type tomatch_status = | Pushed of ((constr * tomatch_type) * int list) | Alias of (constr * constr * alias_constr * constr) | Abstract of rel_declaration type tomatch_stack = tomatch_status list (* The type [predicate_signature] types the terms to match and the rhs: - [PrLetIn (names,dep,pred)] types a pushed term ([Pushed]), if dep<>Anonymous, the term is dependent, let n=|names|, if n<>0 then the type of the pushed term is necessarily an inductive with n real arguments. Otherwise, it may be non inductive, or inductive without real arguments, or inductive originating from a subterm in which case real args are not dependent; it accounts for n+1 binders if dep or n binders if not dep - [PrProd] types abstracted term ([Abstract]); it accounts for one binder - [PrCcl] types the right-hand side - Aliases [Alias] have no trace in [predicate_signature] *) type predicate_signature = | PrLetIn of (name list * name) * predicate_signature | PrProd of predicate_signature | PrCcl of constr (* We keep a constr for aliases and a cases_pattern for error message *) type alias_builder = | AliasLeaf | AliasConstructor of constructor type pattern_history = | Top | MakeAlias of alias_builder * pattern_continuation and pattern_continuation = | Continuation of int * cases_pattern list * pattern_history | Result of cases_pattern list let start_history n = Continuation (n, [], Top) let feed_history arg = function | Continuation (n, l, h) when n>=1 -> Continuation (n-1, arg :: l, h) | Continuation (n, _, _) -> anomaly ("Bad number of expected remaining patterns: "^(string_of_int n)) | Result _ -> anomaly "Exhausted pattern history" (* This is for non exhaustive error message *) let rec glob_pattern_of_partial_history args2 = function | Continuation (n, args1, h) -> let args3 = make_anonymous_patvars (n - (List.length args2)) in build_glob_pattern (List.rev_append args1 (args2@args3)) h | Result pl -> pl and build_glob_pattern args = function | Top -> args | MakeAlias (AliasLeaf, rh) -> assert (args = []); glob_pattern_of_partial_history [PatVar (dummy_loc, Anonymous)] rh | MakeAlias (AliasConstructor pci, rh) -> glob_pattern_of_partial_history [PatCstr (dummy_loc, pci, args, Anonymous)] rh let complete_history = glob_pattern_of_partial_history [] (* This is to build glued pattern-matching history and alias bodies *) let rec simplify_history = function | Continuation (0, l, Top) -> Result (List.rev l) | Continuation (0, l, MakeAlias (f, rh)) -> let pargs = List.rev l in let pat = match f with | AliasConstructor pci -> PatCstr (dummy_loc,pci,pargs,Anonymous) | AliasLeaf -> assert (l = []); PatVar (dummy_loc, Anonymous) in feed_history pat rh | h -> h (* Builds a continuation expecting [n] arguments and building [ci] applied to this [n] arguments *) let push_history_pattern n current cont = Continuation (n, [], MakeAlias (current, cont)) (* A pattern-matching problem has the following form: env, isevars |- Cases tomatch of mat end where tomatch is some sequence of "instructions" (t1 ... tn) and mat is some matrix (p11 ... p1n -> rhs1) ( ... ) (pm1 ... pmn -> rhsm) Terms to match: there are 3 kinds of instructions - "Pushed" terms to match are typed in [env]; these are usually just Rel(n) except for the initial terms given by user and typed in [env] - "Abstract" instructions means an abstraction has to be inserted in the current branch to build (this means a pattern has been detected dependent in another one and generalisation is necessary to ensure well-typing) - "Alias" instructions means an alias has to be inserted (this alias is usually removed at the end, except when its type is not the same as the type of the matched term from which it comes - typically because the inductive types are "real" parameters) Right-hand-sides: They consist of a raw term to type in an environment specific to the clause they belong to: the names of declarations are those of the variables present in the patterns. Therefore, they come with their own [rhs_env] (actually it is the same as [env] except for the names of variables). *) type pattern_matching_problem = { env : env; isevars : Evd.evar_map ref; pred : predicate_signature option; tomatch : tomatch_stack; history : pattern_continuation; mat : matrix; caseloc : loc; casestyle: case_style; typing_function: type_constraint -> env -> glob_constr -> unsafe_judgment } (*--------------------------------------------------------------------------* * A few functions to infer the inductive type from the patterns instead of * * checking that the patterns correspond to the ind. type of the * * destructurated object. Allows type inference of examples like * * match n with O => true | _ => false end * * match x in I with C => true | _ => false end * *--------------------------------------------------------------------------*) (* Computing the inductive type from the matrix of patterns *) (* We use the "in I" clause to coerce the terms to match and otherwise use the constructor to know in which type is the matching problem Note that insertion of coercions inside nested patterns is done each time the matrix is expanded *) let rec find_row_ind = function [] -> None | PatVar _ :: l -> find_row_ind l | PatCstr(loc,c,_,_) :: _ -> Some (loc,c) let inductive_template isevars env tmloc ind = let arsign = get_full_arity_sign env ind in let hole_source = match tmloc with | Some loc -> fun i -> (loc, Evd.TomatchTypeParameter (ind,i)) | None -> fun _ -> (dummy_loc, Evd.InternalHole) in let (_,evarl,_) = List.fold_right (fun (na,b,ty) (subst,evarl,n) -> match b with | None -> let ty' = substl subst ty in let e = e_new_evar isevars env ~src:(hole_source n) ty' in (e::subst,e::evarl,n+1) | Some b -> (b::subst,evarl,n+1)) arsign ([],[],1) in applist (mkInd ind,List.rev evarl) (************************************************************************) (* Utils *) let mkExistential env ?(src=(dummy_loc,Evd.InternalHole)) isevars = e_new_evar isevars env ~src:src (new_Type ()) let evd_comb2 f isevars x y = let (evd',y) = f !isevars x y in isevars := evd'; y let context_of_arsign l = let (x, _) = List.fold_right (fun c (x, n) -> (lift_rel_context n c @ x, List.length c + n)) l ([], 0) in x (* We put the tycon inside the arity signature, possibly discovering dependencies. *) let prepare_predicate_from_arsign_tycon loc env evm tomatchs arsign c = let nar = List.fold_left (fun n sign -> List.length sign + n) 0 arsign in let subst, len = List.fold_left2 (fun (subst, len) (tm, tmtype) sign -> let signlen = List.length sign in match kind_of_term tm with | Rel n when dependent tm c && signlen = 1 (* The term to match is not of a dependent type itself *) -> ((n, len) :: subst, len - signlen) | Rel n when signlen > 1 (* The term is of a dependent type, maybe some variable in its type appears in the tycon. *) -> (match tmtype with | NotInd _ -> (* len - signlen, subst*) assert false (* signlen > 1 *) | IsInd (_, IndType(indf,realargs)) -> let subst = if dependent tm c && List.for_all isRel realargs then (n, 1) :: subst else subst in List.fold_left (fun (subst, len) arg -> match kind_of_term arg with | Rel n when dependent arg c -> ((n, len) :: subst, pred len) | _ -> (subst, pred len)) (subst, len) realargs) | _ -> (subst, len - signlen)) ([], nar) tomatchs arsign in let rec predicate lift c = match kind_of_term c with | Rel n when n > lift -> (try (* Make the predicate dependent on the matched variable *) let idx = List.assoc (n - lift) subst in mkRel (idx + lift) with Not_found -> (* A variable that is not matched, lift over the arsign. *) mkRel (n + nar)) | _ -> map_constr_with_binders succ predicate lift c in try (* The tycon may be ill-typed after abstraction. *) let pred = predicate 0 c in let env' = push_rel_context (context_of_arsign arsign) env in ignore(Typing.sort_of env' evm pred); pred with e when Errors.noncritical e -> lift nar c module Cases_F(Coercion : Coercion.S) : S = struct let inh_coerce_to_ind isevars env ty tyi = let expected_typ = inductive_template isevars env None tyi in (* devrait tre indiffrent d'exiger leq ou pas puisque pour un inductif cela doit tre gal *) let _ = e_cumul env isevars expected_typ ty in () let unify_tomatch_with_patterns isevars env loc typ pats = match find_row_ind pats with | None -> NotInd (None,typ) | Some (_,(ind,_)) -> inh_coerce_to_ind isevars env typ ind; try IsInd (typ,find_rectype env ( !isevars) typ) with Not_found -> NotInd (None,typ) let find_tomatch_tycon isevars env loc = function (* Try if some 'in I ...' is present and can be used as a constraint *) | Some (_,ind,_,_) -> mk_tycon (inductive_template isevars env loc ind) | None -> empty_tycon let coerce_row typing_fun isevars env pats (tomatch,(_,indopt)) = let loc = Some (loc_of_glob_constr tomatch) in let tycon = find_tomatch_tycon isevars env loc indopt in let j = typing_fun tycon env tomatch in let evd, j = Coercion.inh_coerce_to_base (loc_of_glob_constr tomatch) env !isevars j in isevars := evd; let typ = nf_evar ( !isevars) j.uj_type in let t = try IsInd (typ,find_rectype env ( !isevars) typ) with Not_found -> unify_tomatch_with_patterns isevars env loc typ pats in (j.uj_val,t) let coerce_to_indtype typing_fun isevars env matx tomatchl = let pats = List.map (fun r -> r.patterns) matx in let matx' = match matrix_transpose pats with | [] -> List.map (fun _ -> []) tomatchl (* no patterns at all *) | m -> m in List.map2 (coerce_row typing_fun isevars env) matx' tomatchl let adjust_tomatch_to_pattern pb ((current,typ),deps) = (* Ideally, we could find a common inductive type to which both the term to match and the patterns coerce *) (* In practice, we coerce the term to match if it is not already an inductive type and it is not dependent; moreover, we use only the first pattern type and forget about the others *) let typ = match typ with IsInd (t,_) -> t | NotInd (_,t) -> t in let typ = try IsInd (typ,find_rectype pb.env ( !(pb.isevars)) typ) with Not_found -> NotInd (None,typ) in let tomatch = ((current,typ),deps) in match typ with | NotInd (None,typ) -> let tm1 = List.map (fun eqn -> List.hd eqn.patterns) pb.mat in (match find_row_ind tm1 with | None -> tomatch | Some (_,(ind,_)) -> let indt = inductive_template pb.isevars pb.env None ind in let current = if deps = [] & isEvar typ then (* Don't insert coercions if dependent; only solve evars *) let _ = e_cumul pb.env pb.isevars indt typ in current else (evd_comb2 (Coercion.inh_conv_coerce_to dummy_loc pb.env) pb.isevars (make_judge current typ) (mk_tycon_type indt)).uj_val in let sigma = !(pb.isevars) in let typ = IsInd (indt,find_rectype pb.env sigma indt) in ((current,typ),deps)) | _ -> tomatch (* extract some ind from [t], possibly coercing from constructors in [tm] *) let to_mutind env isevars tm c t = (* match c with | Some body -> *) NotInd (c,t) (* | None -> unify_tomatch_with_patterns isevars env t tm*) let type_of_tomatch = function | IsInd (t,_) -> t | NotInd (_,t) -> t let mkDeclTomatch na = function | IsInd (t,_) -> (na,None,t) | NotInd (c,t) -> (na,c,t) let map_tomatch_type f = function | IsInd (t,ind) -> IsInd (f t,map_inductive_type f ind) | NotInd (c,t) -> NotInd (Option.map f c, f t) let liftn_tomatch_type n depth = map_tomatch_type (liftn n depth) let lift_tomatch_type n = liftn_tomatch_type n 1 (**********************************************************************) (* Utilities on patterns *) let current_pattern eqn = match eqn.patterns with | pat::_ -> pat | [] -> anomaly "Empty list of patterns" let alias_of_pat = function | PatVar (_,name) -> name | PatCstr(_,_,_,name) -> name let remove_current_pattern eqn = match eqn.patterns with | pat::pats -> { eqn with patterns = pats; alias_stack = alias_of_pat pat :: eqn.alias_stack } | [] -> anomaly "Empty list of patterns" let prepend_pattern tms eqn = {eqn with patterns = tms@eqn.patterns } (**********************************************************************) (* Well-formedness tests *) (* Partial check on patterns *) exception NotAdjustable let rec adjust_local_defs loc = function | (pat :: pats, (_,None,_) :: decls) -> pat :: adjust_local_defs loc (pats,decls) | (pats, (_,Some _,_) :: decls) -> PatVar (loc, Anonymous) :: adjust_local_defs loc (pats,decls) | [], [] -> [] | _ -> raise NotAdjustable let check_and_adjust_constructor env ind cstrs = function | PatVar _ as pat -> pat | PatCstr (loc,((_,i) as cstr),args,alias) as pat -> (* Check it is constructor of the right type *) let ind' = inductive_of_constructor cstr in if Names.eq_ind ind' ind then (* Check the constructor has the right number of args *) let ci = cstrs.(i-1) in let nb_args_constr = ci.cs_nargs in if List.length args = nb_args_constr then pat else try let args' = adjust_local_defs loc (args, List.rev ci.cs_args) in PatCstr (loc, cstr, args', alias) with NotAdjustable -> error_wrong_numarg_constructor_loc loc (Global.env()) cstr nb_args_constr else (* Try to insert a coercion *) try Coercion.inh_pattern_coerce_to loc pat ind' ind with Not_found -> error_bad_constructor_loc loc cstr ind let check_all_variables typ mat = List.iter (fun eqn -> match current_pattern eqn with | PatVar (_,id) -> () | PatCstr (loc,cstr_sp,_,_) -> error_bad_pattern_loc loc cstr_sp typ) mat let check_unused_pattern env eqn = if not !(eqn.used) then raise_pattern_matching_error (eqn.eqn_loc, env, UnusedClause eqn.patterns) let set_used_pattern eqn = eqn.used := true let extract_rhs pb = match pb.mat with | [] -> errorlabstrm "build_leaf" (mssg_may_need_inversion()) | eqn::_ -> set_used_pattern eqn; eqn.rhs (**********************************************************************) (* Functions to deal with matrix factorization *) let occur_in_rhs na rhs = match na with | Anonymous -> false | Name id -> occur_glob_constr id rhs.it let is_dep_patt eqn = function | PatVar (_,name) -> occur_in_rhs name eqn.rhs | PatCstr _ -> true let dependencies_in_rhs nargs eqns = if eqns = [] then list_tabulate (fun _ -> false) nargs (* Only "_" patts *) else let deps = List.map (fun (tms,eqn) -> List.map (is_dep_patt eqn) tms) eqns in let columns = matrix_transpose deps in List.map (List.exists ((=) true)) columns let dependent_decl a = function | (na,None,t) -> dependent a t | (na,Some c,t) -> dependent a t || dependent a c (* Computing the matrix of dependencies *) (* We are in context d1...dn |- and [find_dependencies k 1 nextlist] computes for declaration [k+1] in which of declarations in [nextlist] (which corresponds to d(k+2)...dn) it depends; declarations are expressed by index, e.g. in dependency list [n-2;1], [1] points to [dn] and [n-2] to [d3] *) let rec find_dependency_list k n = function | [] -> [] | (used,tdeps,d)::rest -> let deps = find_dependency_list k (n+1) rest in if used && dependent_decl (mkRel n) d then list_add_set (List.length rest + 1) (list_union deps tdeps) else deps let find_dependencies is_dep_or_cstr_in_rhs d (k,nextlist) = let deps = find_dependency_list k 1 nextlist in if is_dep_or_cstr_in_rhs || deps <> [] then (k-1,(true ,deps,d)::nextlist) else (k-1,(false,[] ,d)::nextlist) let find_dependencies_signature deps_in_rhs typs = let k = List.length deps_in_rhs in let _,l = List.fold_right2 find_dependencies deps_in_rhs typs (k,[]) in List.map (fun (_,deps,_) -> deps) l (******) (* A Pushed term to match has just been substituted by some constructor t = (ci x1...xn) and the terms x1 ... xn have been added to match - all terms to match and to push (dependent on t by definition) must have (Rel depth) substituted by t and Rel's>depth lifted by n - all pushed terms to match (non dependent on t by definition) must be lifted by n We start with depth=1 *) let regeneralize_index_tomatch n = let rec genrec depth = function | [] -> [] | Pushed ((c,tm),l)::rest -> let c = regeneralize_index n depth c in let tm = map_tomatch_type (regeneralize_index n depth) tm in let l = List.map (regeneralize_rel n depth) l in Pushed ((c,tm),l)::(genrec depth rest) | Alias (c1,c2,d,t)::rest -> Alias (regeneralize_index n depth c1,c2,d,t)::(genrec depth rest) | Abstract d::rest -> Abstract (map_rel_declaration (regeneralize_index n depth) d) ::(genrec (depth+1) rest) in genrec 0 let rec replace_term n c k t = if isRel t && destRel t = n+k then lift k c else map_constr_with_binders succ (replace_term n c) k t let replace_tomatch n c = let rec replrec depth = function | [] -> [] | Pushed ((b,tm),l)::rest -> let b = replace_term n c depth b in let tm = map_tomatch_type (replace_term n c depth) tm in List.iter (fun i -> if i=n+depth then anomaly "replace_tomatch") l; Pushed ((b,tm),l)::(replrec depth rest) | Alias (c1,c2,d,t)::rest -> Alias (replace_term n c depth c1,c2,d,t)::(replrec depth rest) | Abstract d::rest -> Abstract (map_rel_declaration (replace_term n c depth) d) ::(replrec (depth+1) rest) in replrec 0 let rec liftn_tomatch_stack n depth = function | [] -> [] | Pushed ((c,tm),l)::rest -> let c = liftn n depth c in let tm = liftn_tomatch_type n depth tm in let l = List.map (fun i -> if i Alias (liftn n depth c1,liftn n depth c2,d,liftn n depth t) ::(liftn_tomatch_stack n depth rest) | Abstract d::rest -> Abstract (map_rel_declaration (liftn n depth) d) ::(liftn_tomatch_stack n (depth+1) rest) let lift_tomatch_stack n = liftn_tomatch_stack n 1 (* if [current] has type [I(p1...pn u1...um)] and we consider the case of constructor [ci] of type [I(p1...pn u'1...u'm)], then the default variable [name] is expected to have which type? Rem: [current] is [(Rel i)] except perhaps for initial terms to match *) (************************************************************************) (* Some heuristics to get names for variables pushed in pb environment *) (* Typical requirement: [match y with (S (S x)) => x | x => x end] should be compiled into [match y with O => y | (S n) => match n with O => y | (S x) => x end end] and [match y with (S (S n)) => n | n => n end] into [match y with O => y | (S n0) => match n0 with O => y | (S n) => n end end] i.e. user names should be preserved and created names should not interfere with user names *) let merge_name get_name obj = function | Anonymous -> get_name obj | na -> na let merge_names get_name = List.map2 (merge_name get_name) let get_names env sign eqns = let names1 = list_tabulate (fun _ -> Anonymous) (List.length sign) in (* If any, we prefer names used in pats, from top to bottom *) let names2 = List.fold_right (fun (pats,eqn) names -> merge_names alias_of_pat pats names) eqns names1 in (* Otherwise, we take names from the parameters of the constructor but avoiding conflicts with user ids *) let allvars = List.fold_left (fun l (_,eqn) -> list_union l eqn.rhs.avoid_ids) [] eqns in let names4,_ = List.fold_left2 (fun (l,avoid) d na -> let na = merge_name (fun (na,_,t) -> Name (next_name_away (named_hd env t na) avoid)) d na in (na::l,(out_name na)::avoid)) ([],allvars) (List.rev sign) names2 in names4 (************************************************************************) (* Recovering names for variables pushed to the rhs' environment *) let recover_alias_names get_name = List.map2 (fun x (_,c,t) ->(get_name x,c,t)) let all_name sign = List.map (fun (n, b, t) -> let n = match n with Name _ -> n | Anonymous -> Name (id_of_string "Anonymous") in (n, b, t)) sign let push_rels_eqn sign eqn = let sign = all_name sign in {eqn with rhs = {eqn.rhs with rhs_env = push_rels sign eqn.rhs.rhs_env; } } let push_rels_eqn_with_names sign eqn = let pats = List.rev (list_firstn (List.length sign) eqn.patterns) in let sign = recover_alias_names alias_of_pat pats sign in push_rels_eqn sign eqn let build_aliases_context env sigma names allpats pats = (* pats is the list of bodies to push as an alias *) (* They all are defined in env and we turn them into a sign *) (* cuts in sign need to be done in allpats *) let rec insert env sign1 sign2 n newallpats oldallpats = function | (deppat,_,_,_)::pats, Anonymous::names when not (isRel deppat) -> (* Anonymous leaves must be considered named and treated in the *) (* next clause because they may occur in implicit arguments *) insert env sign1 sign2 n newallpats (List.map List.tl oldallpats) (pats,names) | (deppat,nondeppat,d,t)::pats, na::names -> let nondeppat = lift n nondeppat in let deppat = lift n deppat in let newallpats = List.map2 (fun l1 l2 -> List.hd l2::l1) newallpats oldallpats in let oldallpats = List.map List.tl oldallpats in let decl = (na,Some deppat,t) in let a = (deppat,nondeppat,d,t) in insert (push_rel decl env) (decl::sign1) ((na,a)::sign2) (n+1) newallpats oldallpats (pats,names) | [], [] -> newallpats, sign1, sign2, env | _ -> anomaly "Inconsistent alias and name lists" in let allpats = List.map (fun x -> [x]) allpats in insert env [] [] 0 (List.map (fun _ -> []) allpats) allpats (pats, names) let insert_aliases_eqn sign eqnnames alias_rest eqn = let thissign = List.map2 (fun na (_,c,t) -> (na,c,t)) eqnnames sign in push_rels_eqn thissign { eqn with alias_stack = alias_rest; } let insert_aliases env sigma alias eqns = (* L, y a une faiblesse, si un alias est utilis dans un cas par *) (* dfaut prsent mais inutile, ce qui est le cas gnral, l'alias *) (* est introduit mme s'il n'est pas utilis dans les cas rguliers *) let eqnsnames = List.map (fun eqn -> List.hd eqn.alias_stack) eqns in let alias_rests = List.map (fun eqn -> List.tl eqn.alias_stack) eqns in (* names2 takes the meet of all needed aliases *) let names2 = List.fold_right (merge_name (fun x -> x)) eqnsnames Anonymous in (* Only needed aliases are kept by build_aliases_context *) let eqnsnames, sign1, sign2, env = build_aliases_context env sigma [names2] eqnsnames [alias] in let eqns = list_map3 (insert_aliases_eqn sign1) eqnsnames alias_rests eqns in sign2, env, eqns (**********************************************************************) (* Functions to deal with elimination predicate *) exception Occur let noccur_between_without_evar n m term = let rec occur_rec n c = match kind_of_term c with | Rel p -> if n<=p && p () | _ -> iter_constr_with_binders succ occur_rec n c in try occur_rec n term; true with Occur -> false (* Inferring the predicate *) let prepare_unif_pb typ cs = let n = List.length (assums_of_rel_context cs.cs_args) in (* We may need to invert ci if its parameters occur in typ *) let typ' = if noccur_between_without_evar 1 n typ then lift (-n) typ else (* TODO4-1 *) error "Unable to infer return clause of this pattern-matching problem" in let args = extended_rel_list (-n) cs.cs_args in let ci = applist (mkConstruct cs.cs_cstr, cs.cs_params@args) in (* This is the problem: finding P s.t. cs_args |- (P realargs ci) = typ' *) (Array.map (lift (-n)) cs.cs_concl_realargs, ci, typ') (* Infering the predicate *) (* The problem to solve is the following: We match Gamma |- t : I(u01..u0q) against the following constructors: Gamma, x11...x1p1 |- C1(x11..x1p1) : I(u11..u1q) ... Gamma, xn1...xnpn |- Cn(xn1..xnp1) : I(un1..unq) Assume the types in the branches are the following Gamma, x11...x1p1 |- branch1 : T1 ... Gamma, xn1...xnpn |- branchn : Tn Assume the type of the global case expression is Gamma |- T The predicate has the form phi = [y1..yq][z:I(y1..yq)]? and must satisfy the following n+1 equations: Gamma, x11...x1p1 |- (phi u11..u1q (C1 x11..x1p1)) = T1 ... Gamma, xn1...xnpn |- (phi un1..unq (Cn xn1..xnpn)) = Tn Gamma |- (phi u01..u0q t) = T Some hints: - Clearly, if xij occurs in Ti, then, a "match z with (Ci xi1..xipi) => ..." should be inserted somewhere in Ti. - If T is undefined, an easy solution is to insert a "match z with (Ci xi1..xipi) => ..." in front of each Ti - Otherwise, T1..Tn and T must be step by step unified, if some of them diverge, then try to replace the diverging subterm by one of y1..yq or z. - The main problem is what to do when an existential variables is encountered let prepare_unif_pb typ cs = let n = cs.cs_nargs in let _,p = decompose_prod_n n typ in let ci = build_dependent_constructor cs in (* This is the problem: finding P s.t. cs_args |- (P realargs ci) = p *) (n, cs.cs_concl_realargs, ci, p) let eq_operator_lift k (n,n') = function | OpRel p, OpRel p' when p > k & p' > k -> if p < k+n or p' < k+n' then false else p - n = p' - n' | op, op' -> op = op' let rec transpose_args n = if n=0 then [] else (Array.map (fun l -> List.hd l) lv):: (transpose_args (m-1) (Array.init (fun l -> List.tl l))) let shift_operator k = function OpLambda _ | OpProd _ -> k+1 | _ -> k let reloc_operator (k,n) = function OpRel p when p > k -> let rec unify_clauses k pv = let pv'= Array.map (fun (n,sign,_,p) -> n,splay_constr (whd_betaiotaevar (push_rels (List.rev sign) env) ( isevars)) p) pv in let n1,op1 = let (n1,(op1,args1)) = pv'.(0) in n1,op1 in if Array.for_all (fun (ni,(opi,_)) -> eq_operator_lift k (n1,ni) (op1,opi)) pv' then let argvl = transpose_args (List.length args1) pv' in let k' = shift_operator k op1 in let argl = List.map (unify_clauses k') argvl in gather_constr (reloc_operator (k,n1) op1) argl *) let abstract_conclusion typ cs = let n = List.length (assums_of_rel_context cs.cs_args) in let (sign,p) = decompose_prod_n n typ in it_mkLambda p sign let infer_predicate loc env isevars typs cstrs indf = (* Il faudra substituer les isevars a un certain moment *) if Array.length cstrs = 0 then (* "TODO4-3" *) error "Inference of annotation for empty inductive types not implemented" else (* Empiric normalization: p may depend in a irrelevant way on args of the*) (* cstr as in [c:{_:Alpha & Beta}] match c with (existS a b)=>(a,b) end *) let typs = Array.map (local_strong whd_beta ( !isevars)) typs in let eqns = array_map2 prepare_unif_pb typs cstrs in (* First strategy: no dependencies at all *) (* let (mis,_) = dest_ind_family indf in let (cclargs,_,typn) = eqns.(mis_nconstr mis -1) in *) let (sign,_) = get_arity env indf in let mtyp = if array_exists is_Type typs then (* Heuristic to avoid comparison between non-variables algebric univs*) new_Type () else mkExistential env ~src:(loc, Evd.CasesType) isevars in if array_for_all (fun (_,_,typ) -> e_cumul env isevars typ mtyp) eqns then (* Non dependent case -> turn it into a (dummy) dependent one *) let sign = (Anonymous,None,build_dependent_inductive env indf)::sign in let pred = it_mkLambda_or_LetIn (lift (List.length sign) mtyp) sign in (true,pred) (* true = dependent -- par dfaut *) else (* let s = get_sort_of env ( isevars) typs.(0) in let predpred = it_mkLambda_or_LetIn (mkSort s) sign in let caseinfo = make_default_case_info mis in let brs = array_map2 abstract_conclusion typs cstrs in let predbody = mkCase (caseinfo, (nf_betaiota predpred), mkRel 1, brs) in let pred = it_mkLambda_or_LetIn (lift (List.length sign) mtyp) sign in *) (* "TODO4-2" *) (* We skip parameters *) let cis = Array.map (fun cs -> applist (mkConstruct cs.cs_cstr, extended_rel_list 0 cs.cs_args)) cstrs in let ct = array_map2 (fun ci (_,_,t) -> (ci,t)) cis eqns in raise_pattern_matching_error (loc,env, CannotInferPredicate ct) (* (true,pred) *) (* Propagation of user-provided predicate through compilation steps *) let rec map_predicate f k = function | PrCcl ccl -> PrCcl (f k ccl) | PrProd pred -> PrProd (map_predicate f (k+1) pred) | PrLetIn ((names,dep as tm),pred) -> let k' = List.length names + (if dep<>Anonymous then 1 else 0) in PrLetIn (tm, map_predicate f (k+k') pred) let rec noccurn_predicate k = function | PrCcl ccl -> noccurn k ccl | PrProd pred -> noccurn_predicate (k+1) pred | PrLetIn ((names,dep),pred) -> let k' = List.length names + (if dep<>Anonymous then 1 else 0) in noccurn_predicate (k+k') pred let liftn_predicate n = map_predicate (liftn n) let lift_predicate n = liftn_predicate n 1 let regeneralize_index_predicate n = map_predicate (regeneralize_index n) 0 let substnl_predicate sigma = map_predicate (substnl sigma) (* This is parallel bindings *) let subst_predicate (args,copt) pred = let sigma = match copt with | None -> List.rev args | Some c -> c::(List.rev args) in substnl_predicate sigma 0 pred let specialize_predicate_var (cur,typ) = function | PrProd _ | PrCcl _ -> anomaly "specialize_predicate_var: a pattern-variable must be pushed" | PrLetIn (([],dep),pred) -> subst_predicate ([],if dep<>Anonymous then Some cur else None) pred | PrLetIn ((_,dep),pred) -> (match typ with | IsInd (_,IndType (_,realargs)) -> subst_predicate (realargs,if dep<>Anonymous then Some cur else None) pred | _ -> anomaly "specialize_predicate_var") let ungeneralize_predicate = function | PrLetIn _ | PrCcl _ -> anomaly "ungeneralize_predicate: expects a product" | PrProd pred -> pred (*****************************************************************************) (* We have pred = [X:=realargs;x:=c]P typed in Gamma1, x:I(realargs), Gamma2 *) (* and we want to abstract P over y:t(x) typed in the same context to get *) (* *) (* pred' = [X:=realargs;x':=c](y':t(x'))P[y:=y'] *) (* *) (* We first need to lift t(x) s.t. it is typed in Gamma, X:=rargs, x' *) (* then we have to replace x by x' in t(x) and y by y' in P *) (*****************************************************************************) let generalize_predicate ny d = function | PrLetIn ((names,dep as tm),pred) -> if dep=Anonymous then anomaly "Undetected dependency"; let p = List.length names + 1 in let pred = lift_predicate 1 pred in let pred = regeneralize_index_predicate (ny+p+1) pred in PrLetIn (tm, PrProd pred) | PrProd _ | PrCcl _ -> anomaly "generalize_predicate: expects a non trivial pattern" let rec extract_predicate l = function | pred, Alias (deppat,nondeppat,_,_)::tms -> let tms' = match kind_of_term nondeppat with | Rel i -> replace_tomatch i deppat tms | _ -> (* initial terms are not dependent *) tms in extract_predicate l (pred,tms') | PrProd pred, Abstract d'::tms -> let d' = map_rel_declaration (lift (List.length l)) d' in substl l (mkProd_or_LetIn d' (extract_predicate [] (pred,tms))) | PrLetIn (([],dep),pred), Pushed ((cur,_),_)::tms -> extract_predicate (if dep<>Anonymous then cur::l else l) (pred,tms) | PrLetIn ((_,dep),pred), Pushed ((cur,IsInd (_,(IndType(_,realargs)))),_)::tms -> let l = List.rev realargs@l in extract_predicate (if dep<>Anonymous then cur::l else l) (pred,tms) | PrCcl ccl, [] -> substl l ccl | _ -> anomaly"extract_predicate: predicate inconsistent with terms to match" let abstract_predicate env sigma indf cur tms = function | (PrProd _ | PrCcl _) -> anomaly "abstract_predicate: must be some LetIn" | PrLetIn ((names,dep),pred) -> let sign = make_arity_signature env true indf in (* n is the number of real args + 1 *) let n = List.length sign in let tms = lift_tomatch_stack n tms in let tms = match kind_of_term cur with | Rel i -> regeneralize_index_tomatch (i+n) tms | _ -> (* Initial case *) tms in (* Depending on whether the predicate is dependent or not, and has real args or not, we lift it to make room for [sign] *) (* Even if not intrinsically dep, we move the predicate into a dep one *) let sign,k = if names = [] & n <> 1 then (* Real args were not considered *) (if dep<>Anonymous then ((let (_,c,t) = List.hd sign in (dep,c,t)::List.tl sign),n-1) else (sign,n)) else (* Real args are OK *) (List.map2 (fun na (_,c,t) -> (na,c,t)) (dep::names) sign, if dep<>Anonymous then 0 else 1) in let pred = lift_predicate k pred in let pred = extract_predicate [] (pred,tms) in (true, it_mkLambda_or_LetIn_name env pred sign) let rec known_dependent = function | None -> false | Some (PrLetIn ((_,dep),_)) -> dep<>Anonymous | Some (PrCcl _) -> false | Some (PrProd _) -> anomaly "known_dependent: can only be used when patterns remain" (* [expand_arg] is used by [specialize_predicate] it replaces gamma, x1...xn, x1...xk |- pred by gamma, x1...xn, x1...xk-1 |- [X=realargs,xk=xk]pred (if dep) or by gamma, x1...xn, x1...xk-1 |- [X=realargs]pred (if not dep) *) let expand_arg n alreadydep (na,t) deps (k,pred) = (* current can occur in pred even if the original problem is not dependent *) let dep = if alreadydep<>Anonymous then alreadydep else if deps = [] && noccurn_predicate 1 pred then Anonymous else Name (id_of_string "x") in let pred = if dep<>Anonymous then pred else lift_predicate (-1) pred in (* There is no dependency in realargs for subpattern *) (k-1, PrLetIn (([],dep), pred)) (*****************************************************************************) (* pred = [X:=realargs;x:=c]P types the following problem: *) (* *) (* Gamma |- match Pushed(c:I(realargs)) rest with...end: pred *) (* *) (* where the branch with constructor Ci:(x1:T1)...(xn:Tn)->I(realargsi) *) (* is considered. Assume each Ti is some Ii(argsi). *) (* We let e=Ci(x1,...,xn) and replace pred by *) (* *) (* pred' = [X1:=rargs1,x1:=x1']...[Xn:=rargsn,xn:=xn'](P[X:=realargsi;x:=e]) *) (* *) (* s.t Gamma,x1'..xn' |- match Pushed(x1')..Pushed(xn') rest with..end :pred'*) (* *) (*****************************************************************************) let specialize_predicate tomatchs deps cs = function | (PrProd _ | PrCcl _) -> anomaly "specialize_predicate: a matched pattern must be pushed" | PrLetIn ((names,isdep),pred) -> (* Assume some gamma st: gamma, (X,x:=realargs,copt) |- pred *) let nrealargs = List.length names in let k = nrealargs + (if isdep<>Anonymous then 1 else 0) in (* We adjust pred st: gamma, x1..xn, (X,x:=realargs,copt) |- pred' *) let n = cs.cs_nargs in let pred' = liftn_predicate n (k+1) pred in let argsi = if nrealargs <> 0 then Array.to_list cs.cs_concl_realargs else [] in let copti = if isdep<>Anonymous then Some (build_dependent_constructor cs) else None in (* The substituends argsi, copti are all defined in gamma, x1...xn *) (* We need _parallel_ bindings to get gamma, x1...xn |- pred'' *) let pred'' = subst_predicate (argsi, copti) pred' in (* We adjust pred st: gamma, x1..xn, x1..xn |- pred'' *) let pred''' = liftn_predicate n (n+1) pred'' in (* We finally get gamma,x1..xn |- [X1,x1:=R1,x1]..[Xn,xn:=Rn,xn]pred'''*) snd (List.fold_right2 (expand_arg n isdep) tomatchs deps (n,pred''')) let find_predicate loc env isevars p typs cstrs current (IndType (indf,realargs)) tms = let (dep,pred) = match p with | Some p -> abstract_predicate env ( !isevars) indf current tms p | None -> infer_predicate loc env isevars typs cstrs indf in let typ = whd_beta ( !isevars) (applist (pred, realargs)) in if dep then (pred, whd_beta ( !isevars) (applist (typ, [current])), new_Type ()) else (pred, typ, new_Type ()) (************************************************************************) (* Sorting equations by constructor *) type inversion_problem = (* the discriminating arg in some Ind and its order in Ind *) | Incompatible of int * (int * int) | Constraints of (int * constr) list let solve_constraints constr_info indt = (* TODO *) Constraints [] let rec irrefutable env = function | PatVar (_,name) -> true | PatCstr (_,cstr,args,_) -> let ind = inductive_of_constructor cstr in let (_,mip) = Inductive.lookup_mind_specif env ind in let one_constr = Array.length mip.mind_user_lc = 1 in one_constr & List.for_all (irrefutable env) args let first_clause_irrefutable env = function | eqn::mat -> List.for_all (irrefutable env) eqn.patterns | _ -> false let group_equations pb ind current cstrs mat = let mat = if first_clause_irrefutable pb.env mat then [List.hd mat] else mat in let brs = Array.create (Array.length cstrs) [] in let only_default = ref true in let _ = List.fold_right (* To be sure it's from bottom to top *) (fun eqn () -> let rest = remove_current_pattern eqn in let pat = current_pattern eqn in match check_and_adjust_constructor pb.env ind cstrs pat with | PatVar (_,name) -> (* This is a default clause that we expand *) for i=1 to Array.length cstrs do let n = cstrs.(i-1).cs_nargs in let args = make_anonymous_patvars n in brs.(i-1) <- (args, rest) :: brs.(i-1) done | PatCstr (loc,((_,i)),args,_) -> (* This is a regular clause *) only_default := false; brs.(i-1) <- (args,rest) :: brs.(i-1)) mat () in (brs,!only_default) (************************************************************************) (* Here starts the pattern-matching compilation algorithm *) (* Abstracting over dependent subterms to match *) let rec generalize_problem pb = function | [] -> pb | i::l -> let d = map_rel_declaration (lift i) (Environ.lookup_rel i pb.env) in let pb' = generalize_problem pb l in let tomatch = lift_tomatch_stack 1 pb'.tomatch in let tomatch = regeneralize_index_tomatch (i+1) tomatch in { pb with tomatch = Abstract d :: tomatch; pred = Option.map (generalize_predicate i d) pb'.pred } (* No more patterns: typing the right-hand side of equations *) let build_leaf pb = let rhs = extract_rhs pb in let tycon = match pb.pred with | None -> anomaly "Predicate not found" | Some (PrCcl typ) -> mk_tycon typ | Some _ -> anomaly "not all parameters of pred have been consumed" in pb.typing_function tycon rhs.rhs_env rhs.it (* Building the sub-problem when all patterns are variables *) let shift_problem (current,t) pb = {pb with tomatch = Alias (current,current,NonDepAlias,type_of_tomatch t)::pb.tomatch; pred = Option.map (specialize_predicate_var (current,t)) pb.pred; history = push_history_pattern 0 AliasLeaf pb.history; mat = List.map remove_current_pattern pb.mat } (* Building the sub-pattern-matching problem for a given branch *) let build_branch current deps pb eqns const_info = (* We remember that we descend through a constructor *) let alias_type = if Array.length const_info.cs_concl_realargs = 0 & not (known_dependent pb.pred) & deps = [] then NonDepAlias else DepAlias in let history = push_history_pattern const_info.cs_nargs (AliasConstructor const_info.cs_cstr) pb.history in (* We find matching clauses *) let cs_args = (*assums_of_rel_context*) const_info.cs_args in let names = get_names pb.env cs_args eqns in let submat = List.map (fun (tms,eqn) -> prepend_pattern tms eqn) eqns in if submat = [] then raise_pattern_matching_error (dummy_loc, pb.env, NonExhaustive (complete_history history)); let typs = List.map2 (fun (_,c,t) na -> (na,c,t)) cs_args names in let _,typs',_ = List.fold_right (fun (na,c,t as d) (env,typs,tms) -> let tm1 = List.map List.hd tms in let tms = List.map List.tl tms in (push_rel d env, (na,to_mutind env pb.isevars tm1 c t)::typs,tms)) typs (pb.env,[],List.map fst eqns) in let dep_sign = find_dependencies_signature (dependencies_in_rhs const_info.cs_nargs eqns) (List.rev typs) in (* The dependent term to subst in the types of the remaining UnPushed terms is relative to the current context enriched by topushs *) let ci = build_dependent_constructor const_info in (* We replace [(mkRel 1)] by its expansion [ci] *) (* and context "Gamma = Gamma1, current, Gamma2" by "Gamma;typs;curalias" *) (* This is done in two steps : first from "Gamma |- tms" *) (* into "Gamma; typs; curalias |- tms" *) let tomatch = lift_tomatch_stack const_info.cs_nargs pb.tomatch in let currents = list_map2_i (fun i (na,t) deps -> Pushed ((mkRel i, lift_tomatch_type i t), deps)) 1 typs' (List.rev dep_sign) in let sign = List.map (fun (na,t) -> mkDeclTomatch na t) typs' in let ind = appvect ( applist (mkInd (inductive_of_constructor const_info.cs_cstr), List.map (lift const_info.cs_nargs) const_info.cs_params), const_info.cs_concl_realargs) in let cur_alias = lift (List.length sign) current in let currents = Alias (ci,cur_alias,alias_type,ind) :: currents in let env' = push_rels sign pb.env in let pred' = Option.map (specialize_predicate (List.rev typs') dep_sign const_info) pb.pred in sign, { pb with env = env'; tomatch = List.rev_append currents tomatch; pred = pred'; history = history; mat = List.map (push_rels_eqn_with_names sign) submat } (********************************************************************** INVARIANT: pb = { env, subst, tomatch, mat, ...} tomatch = list of Pushed (c:T) or Abstract (na:T) or Alias (c:T) "Pushed" terms and types are relative to env "Abstract" types are relative to env enriched by the previous terms to match *) (**********************************************************************) (* Main compiling descent *) let rec compile pb = match pb.tomatch with | (Pushed cur)::rest -> match_current { pb with tomatch = rest } cur | (Alias x)::rest -> compile_alias pb x rest | (Abstract d)::rest -> compile_generalization pb d rest | [] -> build_leaf pb and match_current pb tomatch = let ((current,typ as ct),deps) = adjust_tomatch_to_pattern pb tomatch in match typ with | NotInd (_,typ) -> check_all_variables typ pb.mat; compile (shift_problem ct pb) | IsInd (_,(IndType(indf,realargs) as indt)) -> let mind,_ = dest_ind_family indf in let cstrs = get_constructors pb.env indf in let eqns,onlydflt = group_equations pb mind current cstrs pb.mat in if (Array.length cstrs <> 0 or pb.mat <> []) & onlydflt then compile (shift_problem ct pb) else let _constraints = Array.map (solve_constraints indt) cstrs in (* We generalize over terms depending on current term to match *) let pb = generalize_problem pb deps in (* We compile branches *) let brs = array_map2 (compile_branch current deps pb) eqns cstrs in (* We build the (elementary) case analysis *) let brvals = Array.map (fun (v,_) -> v) brs in let brtyps = Array.map (fun (_,t) -> t) brs in let (pred,typ,s) = find_predicate pb.caseloc pb.env pb.isevars pb.pred brtyps cstrs current indt pb.tomatch in let ci = make_case_info pb.env mind pb.casestyle in let case = mkCase (ci,nf_betaiota Evd.empty pred,current,brvals) in let inst = List.map mkRel deps in { uj_val = applist (case, inst); uj_type = substl inst typ } and compile_branch current deps pb eqn cstr = let sign, pb = build_branch current deps pb eqn cstr in let j = compile pb in (it_mkLambda_or_LetIn j.uj_val sign, j.uj_type) and compile_generalization pb d rest = let pb = { pb with env = push_rel d pb.env; tomatch = rest; pred = Option.map ungeneralize_predicate pb.pred; mat = List.map (push_rels_eqn [d]) pb.mat } in let j = compile pb in { uj_val = mkLambda_or_LetIn d j.uj_val; uj_type = mkProd_or_LetIn d j.uj_type } and compile_alias pb (deppat,nondeppat,d,t) rest = let history = simplify_history pb.history in let sign, newenv, mat = insert_aliases pb.env ( !(pb.isevars)) (deppat,nondeppat,d,t) pb.mat in let n = List.length sign in (* We had Gamma1; x:current; Gamma2 |- tomatch(x) and we rebind x to get *) (* Gamma1; x:current; Gamma2; typs; x':=curalias |- tomatch(x') *) let tomatch = lift_tomatch_stack n rest in let tomatch = match kind_of_term nondeppat with | Rel i -> if n = 1 then regeneralize_index_tomatch (i+n) tomatch else replace_tomatch i deppat tomatch | _ -> (* initial terms are not dependent *) tomatch in let pb = {pb with env = newenv; tomatch = tomatch; pred = Option.map (lift_predicate n) pb.pred; history = history; mat = mat } in let j = compile pb in List.fold_left mkSpecialLetInJudge j sign (* pour les alias des initiaux, enrichir les env de ce qu'il faut et substituer aprs par les initiaux *) (**************************************************************************) (* Preparation of the pattern-matching problem *) (* builds the matrix of equations testing that each eqn has n patterns * and linearizing the _ patterns. * Syntactic correctness has already been done in astterm *) let matx_of_eqns env eqns = let build_eqn (loc,ids,lpat,rhs) = let rhs = { rhs_env = env; avoid_ids = ids@(ids_of_named_context (named_context env)); it = rhs; } in { patterns = lpat; alias_stack = []; eqn_loc = loc; used = ref false; rhs = rhs } in List.map build_eqn eqns (************************************************************************) (* preparing the elimination predicate if any *) let oldprepare_predicate_from_tycon loc dep env isevars tomatchs sign c = let cook (n, l, env, signs) = function | c,IsInd (_,IndType(indf,realargs)) -> let indf' = lift_inductive_family n indf in let sign = make_arity_signature env dep indf' in let p = List.length realargs in if dep then (n + p + 1, c::(List.rev realargs)@l, push_rels sign env,sign::signs) else (n + p, (List.rev realargs)@l, push_rels sign env,sign::signs) | c,NotInd _ -> (n, l, env, []::signs) in let n, allargs, env, signs = List.fold_left cook (0, [], env, []) tomatchs in let names = List.rev (List.map (List.map pi1) signs) in let allargs = List.map (fun c -> lift n (nf_betadeltaiota env ( !isevars) c)) allargs in let rec build_skeleton env c = (* Don't put into normal form, it has effects on the synthesis of evars *) (* let c = whd_betadeltaiota env ( isevars) c in *) (* We turn all subterms possibly dependent into an evar with maximum ctxt*) if isEvar c or List.exists (eq_constr c) allargs then e_new_evar isevars env ~src:(loc, Evd.CasesType) (Retyping.get_type_of env ( !isevars) c) else map_constr_with_full_binders push_rel build_skeleton env c in names, build_skeleton env (lift n c) (* Here, [pred] is assumed to be in the context built from all *) (* realargs and terms to match *) let build_initial_predicate isdep allnames pred = let nar = List.fold_left (fun n names -> List.length names + n) 0 allnames in let rec buildrec n pred = function | [] -> PrCcl pred | names::lnames -> let names' = if isdep then List.tl names else names in let n' = n + List.length names' in let pred, p, user_p = if isdep then if dependent (mkRel (nar-n')) pred then pred, 1, 1 else liftn (-1) (nar-n') pred, 0, 1 else pred, 0, 0 in let na = if p=1 then let na = List.hd names in if na = Anonymous then (* peut arriver en raison des evars *) Name (id_of_string "x") (*Hum*) else na else Anonymous in PrLetIn ((names',na), buildrec (n'+user_p) pred lnames) in buildrec 0 pred allnames let extract_arity_signature env0 tomatchl tmsign = let get_one_sign n tm (na,t) = match tm with | NotInd (bo,typ) -> (match t with | None -> [na,Option.map (lift n) bo,lift n typ] | Some (loc,_,_,_) -> user_err_loc (loc,"", str "Unexpected type annotation for a term of non inductive type")) | IsInd (_,IndType(indf,realargs)) -> let indf' = lift_inductive_family n indf in let (ind,params) = dest_ind_family indf' in let nrealargs = List.length realargs in let realnal = match t with | Some (loc,ind',nparams,realnal) -> if ind <> ind' then user_err_loc (loc,"",str "Wrong inductive type"); if List.length params <> nparams or nrealargs <> List.length realnal then anomaly "Ill-formed 'in' clause in cases"; List.rev realnal | None -> list_tabulate (fun _ -> Anonymous) nrealargs in let arsign = fst (get_arity env0 indf') in (na,None,build_dependent_inductive env0 indf') ::(List.map2 (fun x (_,c,t) ->(x,c,t)) realnal arsign) in let rec buildrec n = function | [],[] -> [] | (_,tm)::ltm, x::tmsign -> let l = get_one_sign n tm x in l :: buildrec (n + List.length l) (ltm,tmsign) | _ -> assert false in List.rev (buildrec 0 (tomatchl,tmsign)) let extract_arity_signatures env0 tomatchl tmsign = let get_one_sign tm (na,t) = match tm with | NotInd (bo,typ) -> (match t with | None -> [na,bo,typ] | Some (loc,_,_,_) -> user_err_loc (loc,"", str "Unexpected type annotation for a term of non inductive type")) | IsInd (_,IndType(indf,realargs)) -> let (ind,params) = dest_ind_family indf in let nrealargs = List.length realargs in let realnal = match t with | Some (loc,ind',nparams,realnal) -> if ind <> ind' then user_err_loc (loc,"",str "Wrong inductive type"); if List.length params <> nparams or nrealargs <> List.length realnal then anomaly "Ill-formed 'in' clause in cases"; List.rev realnal | None -> list_tabulate (fun _ -> Anonymous) nrealargs in let arsign = fst (get_arity env0 indf) in (na,None,build_dependent_inductive env0 indf) ::(try List.map2 (fun x (_,c,t) ->(x,c,t)) realnal arsign with e when Errors.noncritical e -> assert false) in let rec buildrec = function | [],[] -> [] | (_,tm)::ltm, x::tmsign -> let l = get_one_sign tm x in l :: buildrec (ltm,tmsign) | _ -> assert false in List.rev (buildrec (tomatchl,tmsign)) let inh_conv_coerce_to_tycon loc env isevars j tycon = match tycon with | Some p -> let (evd',j) = Coercion.inh_conv_coerce_to loc env !isevars j p in isevars := evd'; j | None -> j let out_ind = function IsInd (_, IndType(x, y)) -> (x, y) | _ -> assert(false) let string_of_name name = match name with | Anonymous -> "anonymous" | Name n -> string_of_id n let id_of_name n = id_of_string (string_of_name n) let make_prime_id name = let str = string_of_name name in id_of_string str, id_of_string (str ^ "'") let prime avoid name = let previd, id = make_prime_id name in previd, next_ident_away id avoid let make_prime avoid prevname = let previd, id = prime !avoid prevname in avoid := id :: !avoid; previd, id let eq_id avoid id = let hid = id_of_string ("Heq_" ^ string_of_id id) in let hid' = next_ident_away hid avoid in hid' let mk_eq typ x y = mkApp (delayed_force eq_ind, [| typ; x ; y |]) let mk_eq_refl typ x = mkApp (delayed_force eq_refl, [| typ; x |]) let mk_JMeq typ x typ' y = mkApp (delayed_force Subtac_utils.jmeq_ind, [| typ; x ; typ'; y |]) let mk_JMeq_refl typ x = mkApp (delayed_force Subtac_utils.jmeq_refl, [| typ; x |]) let hole = GHole (dummy_loc, Evd.QuestionMark (Evd.Define true)) let constr_of_pat env isevars arsign pat avoid = let rec typ env (ty, realargs) pat avoid = match pat with | PatVar (l,name) -> let name, avoid = match name with Name n -> name, avoid | Anonymous -> let previd, id = prime avoid (Name (id_of_string "wildcard")) in Name id, id :: avoid in PatVar (l, name), [name, None, ty] @ realargs, mkRel 1, ty, (List.map (fun x -> mkRel 1) realargs), 1, avoid | PatCstr (l,((_, i) as cstr),args,alias) -> let cind = inductive_of_constructor cstr in let IndType (indf, _) = try find_rectype env ( !isevars) (lift (-(List.length realargs)) ty) with Not_found -> error_case_not_inductive env {uj_val = ty; uj_type = Typing.type_of env !isevars ty} in let ind, params = dest_ind_family indf in if ind <> cind then error_bad_constructor_loc l cstr ind; let cstrs = get_constructors env indf in let ci = cstrs.(i-1) in let nb_args_constr = ci.cs_nargs in assert(nb_args_constr = List.length args); let patargs, args, sign, env, n, m, avoid = List.fold_right2 (fun (na, c, t) ua (patargs, args, sign, env, n, m, avoid) -> let pat', sign', arg', typ', argtypargs, n', avoid = typ env (substl args (liftn (List.length sign) (succ (List.length args)) t), []) ua avoid in let args' = arg' :: List.map (lift n') args in let env' = push_rels sign' env in (pat' :: patargs, args', sign' @ sign, env', n' + n, succ m, avoid)) ci.cs_args (List.rev args) ([], [], [], env, 0, 0, avoid) in let args = List.rev args in let patargs = List.rev patargs in let pat' = PatCstr (l, cstr, patargs, alias) in let cstr = mkConstruct ci.cs_cstr in let app = applistc cstr (List.map (lift (List.length sign)) params) in let app = applistc app args in let apptype = Retyping.get_type_of env ( !isevars) app in let IndType (indf, realargs) = find_rectype env ( !isevars) apptype in match alias with Anonymous -> pat', sign, app, apptype, realargs, n, avoid | Name id -> let sign = (alias, None, lift m ty) :: sign in let avoid = id :: avoid in let sign, i, avoid = try let env = push_rels sign env in isevars := the_conv_x_leq (push_rels sign env) (lift (succ m) ty) (lift 1 apptype) !isevars; let eq_t = mk_eq (lift (succ m) ty) (mkRel 1) (* alias *) (lift 1 app) (* aliased term *) in let neq = eq_id avoid id in (Name neq, Some (mkRel 0), eq_t) :: sign, 2, neq :: avoid with Reduction.NotConvertible -> sign, 1, avoid in (* Mark the equality as a hole *) pat', sign, lift i app, lift i apptype, realargs, n + i, avoid in let pat', sign, patc, patty, args, z, avoid = typ env (pi3 (List.hd arsign), List.tl arsign) pat avoid in pat', (sign, patc, (pi3 (List.hd arsign), args), pat'), avoid (* shadows functional version *) let eq_id avoid id = let hid = id_of_string ("Heq_" ^ string_of_id id) in let hid' = next_ident_away hid !avoid in avoid := hid' :: !avoid; hid' let rels_of_patsign = List.map (fun ((na, b, t) as x) -> match b with | Some t' when kind_of_term t' = Rel 0 -> (na, None, t) | _ -> x) let vars_of_ctx ctx = let _, y = List.fold_right (fun (na, b, t) (prev, vars) -> match b with | Some t' when kind_of_term t' = Rel 0 -> prev, (GApp (dummy_loc, (GRef (dummy_loc, delayed_force refl_ref)), [hole; GVar (dummy_loc, prev)])) :: vars | _ -> match na with Anonymous -> raise (Invalid_argument "vars_of_ctx") | Name n -> n, GVar (dummy_loc, n) :: vars) ctx (id_of_string "vars_of_ctx_error", []) in List.rev y let rec is_included x y = match x, y with | PatVar _, _ -> true | _, PatVar _ -> true | PatCstr (l, (_, i), args, alias), PatCstr (l', (_, i'), args', alias') -> if i = i' then List.for_all2 is_included args args' else false (* liftsign is the current pattern's complete signature length. Hence pats is already typed in its full signature. However prevpatterns are in the original one signature per pattern form. *) let build_ineqs prevpatterns pats liftsign = let _tomatchs = List.length pats in let diffs = List.fold_left (fun c eqnpats -> let acc = List.fold_left2 (* ppat is the pattern we are discriminating against, curpat is the current one. *) (fun acc (ppat_sign, ppat_c, (ppat_ty, ppat_tyargs), ppat) (curpat_sign, curpat_c, (curpat_ty, curpat_tyargs), curpat) -> match acc with None -> None | Some (sign, len, n, c) -> (* FixMe: do not work with ppat_args *) if is_included curpat ppat then (* Length of previous pattern's signature *) let lens = List.length ppat_sign in (* Accumulated length of previous pattern's signatures *) let len' = lens + len in let acc = ((* Jump over previous prevpat signs *) lift_rel_context len ppat_sign @ sign, len', succ n, (* nth pattern *) mkApp (delayed_force eq_ind, [| lift (len' + liftsign) curpat_ty; liftn (len + liftsign) (succ lens) ppat_c ; lift len' curpat_c |]) :: List.map (lift lens (* Jump over this prevpat signature *)) c) in Some acc else None) (Some ([], 0, 0, [])) eqnpats pats in match acc with None -> c | Some (sign, len, _, c') -> let conj = it_mkProd_or_LetIn (mk_not (mk_conj c')) (lift_rel_context liftsign sign) in conj :: c) [] prevpatterns in match diffs with [] -> None | _ -> Some (mk_conj diffs) let subst_rel_context k ctx subst = let (_, ctx') = List.fold_right (fun (n, b, t) (k, acc) -> (succ k, (n, Option.map (substnl subst k) b, substnl subst k t) :: acc)) ctx (k, []) in ctx' let lift_rel_contextn n k sign = let rec liftrec k = function | (na,c,t)::sign -> (na,Option.map (liftn n k) c,liftn n k t)::(liftrec (k-1) sign) | [] -> [] in liftrec (rel_context_length sign + k) sign let constrs_of_pats typing_fun env isevars eqns tomatchs sign neqs arity = let i = ref 0 in let (x, y, z) = List.fold_left (fun (branches, eqns, prevpatterns) eqn -> let _, newpatterns, pats = List.fold_left2 (fun (idents, newpatterns, pats) pat arsign -> let pat', cpat, idents = constr_of_pat env isevars arsign pat idents in (idents, pat' :: newpatterns, cpat :: pats)) ([], [], []) eqn.patterns sign in let newpatterns = List.rev newpatterns and opats = List.rev pats in let rhs_rels, pats, signlen = List.fold_left (fun (renv, pats, n) (sign,c, (s, args), p) -> (* Recombine signatures and terms of all of the row's patterns *) let sign' = lift_rel_context n sign in let len = List.length sign' in (sign' @ renv, (* lift to get outside of previous pattern's signatures. *) (sign', liftn n (succ len) c, (s, List.map (liftn n (succ len)) args), p) :: pats, len + n)) ([], [], 0) opats in let pats, _ = List.fold_left (* lift to get outside of past patterns to get terms in the combined environment. *) (fun (pats, n) (sign, c, (s, args), p) -> let len = List.length sign in ((rels_of_patsign sign, lift n c, (s, List.map (lift n) args), p) :: pats, len + n)) ([], 0) pats in let ineqs = build_ineqs prevpatterns pats signlen in let rhs_rels' = rels_of_patsign rhs_rels in let _signenv = push_rel_context rhs_rels' env in let arity = let args, nargs = List.fold_right (fun (sign, c, (_, args), _) (allargs,n) -> (args @ c :: allargs, List.length args + succ n)) pats ([], 0) in let args = List.rev args in substl args (liftn signlen (succ nargs) arity) in let rhs_rels', tycon = let neqs_rels, arity = match ineqs with | None -> [], arity | Some ineqs -> [Anonymous, None, ineqs], lift 1 arity in let eqs_rels, arity = decompose_prod_n_assum neqs arity in eqs_rels @ neqs_rels @ rhs_rels', arity in let rhs_env = push_rels rhs_rels' env in let j = typing_fun (mk_tycon tycon) rhs_env eqn.rhs.it in let bbody = it_mkLambda_or_LetIn j.uj_val rhs_rels' and btype = it_mkProd_or_LetIn j.uj_type rhs_rels' in let branch_name = id_of_string ("program_branch_" ^ (string_of_int !i)) in let branch_decl = (Name branch_name, Some (lift !i bbody), (lift !i btype)) in let branch = let bref = GVar (dummy_loc, branch_name) in match vars_of_ctx rhs_rels with [] -> bref | l -> GApp (dummy_loc, bref, l) in let branch = match ineqs with Some _ -> GApp (dummy_loc, branch, [ hole ]) | None -> branch in incr i; let rhs = { eqn.rhs with it = branch } in (branch_decl :: branches, { eqn with patterns = newpatterns; rhs = rhs } :: eqns, opats :: prevpatterns)) ([], [], []) eqns in x, y (* Builds the predicate. If the predicate is dependent, its context is * made of 1+nrealargs assumptions for each matched term in an inductive * type and 1 assumption for each term not _syntactically_ in an * inductive type. * Each matched terms are independently considered dependent or not. * A type constraint but no annotation case: it is assumed non dependent. *) let lift_ctx n ctx = let ctx', _ = List.fold_right (fun (c, t) (ctx, n') -> (liftn n n' c, liftn_tomatch_type n n' t) :: ctx, succ n') ctx ([], 0) in ctx' (* Turn matched terms into variables. *) let abstract_tomatch env tomatchs tycon = let prev, ctx, names, tycon = List.fold_left (fun (prev, ctx, names, tycon) (c, t) -> let lenctx = List.length ctx in match kind_of_term c with Rel n -> (lift lenctx c, lift_tomatch_type lenctx t) :: prev, ctx, names, tycon | _ -> let tycon = Option.map (fun t -> subst_term (lift 1 c) (lift 1 t)) tycon in let name = next_ident_away (id_of_string "filtered_var") names in (mkRel 1, lift_tomatch_type (succ lenctx) t) :: lift_ctx 1 prev, (Name name, Some (lift lenctx c), lift lenctx $ type_of_tomatch t) :: ctx, name :: names, tycon) ([], [], [], tycon) tomatchs in List.rev prev, ctx, tycon let is_dependent_ind = function IsInd (_, IndType (indf, args)) when List.length args > 0 -> true | _ -> false let build_dependent_signature env evars avoid tomatchs arsign = let avoid = ref avoid in let arsign = List.rev arsign in let allnames = List.rev (List.map (List.map pi1) arsign) in let nar = List.fold_left (fun n names -> List.length names + n) 0 allnames in let eqs, neqs, refls, slift, arsign' = List.fold_left2 (fun (eqs, neqs, refl_args, slift, arsigns) (tm, ty) arsign -> (* The accumulator: previous eqs, number of previous eqs, lift to get outside eqs and in the introduced variables ('as' and 'in'), new arity signatures *) match ty with IsInd (ty, IndType (indf, args)) when List.length args > 0 -> (* Build the arity signature following the names in matched terms as much as possible *) let argsign = List.tl arsign in (* arguments in inverse application order *) let (appn, appb, appt) as _appsign = List.hd arsign in (* The matched argument *) let argsign = List.rev argsign in (* arguments in application order *) let env', nargeqs, argeqs, refl_args, slift, argsign' = List.fold_left2 (fun (env, nargeqs, argeqs, refl_args, slift, argsign') arg (name, b, t) -> let argt = Retyping.get_type_of env evars arg in let eq, refl_arg = if Reductionops.is_conv env evars argt t then (mk_eq (lift (nargeqs + slift) argt) (mkRel (nargeqs + slift)) (lift (nargeqs + nar) arg), mk_eq_refl argt arg) else (mk_JMeq (lift (nargeqs + slift) t) (mkRel (nargeqs + slift)) (lift (nargeqs + nar) argt) (lift (nargeqs + nar) arg), mk_JMeq_refl argt arg) in let previd, id = let name = match kind_of_term arg with Rel n -> pi1 (lookup_rel n env) | _ -> name in make_prime avoid name in (env, succ nargeqs, (Name (eq_id avoid previd), None, eq) :: argeqs, refl_arg :: refl_args, pred slift, (Name id, b, t) :: argsign')) (env, neqs, [], [], slift, []) args argsign in let eq = mk_JMeq (lift (nargeqs + slift) appt) (mkRel (nargeqs + slift)) (lift (nargeqs + nar) ty) (lift (nargeqs + nar) tm) in let refl_eq = mk_JMeq_refl ty tm in let previd, id = make_prime avoid appn in (((Name (eq_id avoid previd), None, eq) :: argeqs) :: eqs, succ nargeqs, refl_eq :: refl_args, pred slift, (((Name id, appb, appt) :: argsign') :: arsigns)) | _ -> (* Non dependent inductive or not inductive, just use a regular equality *) let (name, b, typ) = match arsign with [x] -> x | _ -> assert(false) in let previd, id = make_prime avoid name in let arsign' = (Name id, b, typ) in let tomatch_ty = type_of_tomatch ty in let eq = mk_eq (lift nar tomatch_ty) (mkRel slift) (lift nar tm) in ([(Name (eq_id avoid previd), None, eq)] :: eqs, succ neqs, (mk_eq_refl tomatch_ty tm) :: refl_args, pred slift, (arsign' :: []) :: arsigns)) ([], 0, [], nar, []) tomatchs arsign in let arsign'' = List.rev arsign' in assert(slift = 0); (* we must have folded over all elements of the arity signature *) arsign'', allnames, nar, eqs, neqs, refls (**************************************************************************) (* Main entry of the matching compilation *) let liftn_rel_context n k sign = let rec liftrec k = function | (na,c,t)::sign -> (na,Option.map (liftn n k) c,liftn n k t)::(liftrec (k-1) sign) | [] -> [] in liftrec (k + rel_context_length sign) sign let nf_evars_env sigma (env : env) : env = let nf t = nf_evar sigma t in let env0 : env = reset_context env in let f e (na, b, t) e' : env = Environ.push_named (na, Option.map nf b, nf t) e' in let env' = Environ.fold_named_context f ~init:env0 env in Environ.fold_rel_context (fun e (na, b, t) e' -> Environ.push_rel (na, Option.map nf b, nf t) e') ~init:env' env let prepare_predicate_from_rettyp loc typing_fun isevars env tomatchs sign tycon rtntyp = (* We extract the signature of the arity *) let arsign = extract_arity_signature env tomatchs sign in let newenv = List.fold_right push_rels arsign env in let allnames = List.rev (List.map (List.map pi1) arsign) in match rtntyp with | Some rtntyp -> let predcclj = typing_fun (mk_tycon (new_Type ())) newenv rtntyp in let predccl = (j_nf_evar !isevars predcclj).uj_val in Some (build_initial_predicate true allnames predccl) | None -> match valcon_of_tycon tycon with | Some ty -> let pred = prepare_predicate_from_arsign_tycon loc env !isevars tomatchs arsign ty in Some (build_initial_predicate true allnames pred) | None -> None let compile_cases loc style (typing_fun, isevars) (tycon : Evarutil.type_constraint) env (predopt, tomatchl, eqns) = let typing_fun tycon env = typing_fun tycon env isevars in (* We build the matrix of patterns and right-hand side *) let matx = matx_of_eqns env eqns in (* We build the vector of terms to match consistently with the *) (* constructors found in patterns *) let tomatchs = coerce_to_indtype typing_fun isevars env matx tomatchl in let _isdep = List.exists (fun (x, y) -> is_dependent_ind y) tomatchs in if predopt = None then let tycon = valcon_of_tycon tycon in let tomatchs, tomatchs_lets, tycon' = abstract_tomatch env tomatchs tycon in let env = push_rel_context tomatchs_lets env in let len = List.length eqns in let sign, allnames, signlen, eqs, neqs, args = (* The arity signature *) let arsign = extract_arity_signatures env tomatchs (List.map snd tomatchl) in (* Build the dependent arity signature, the equalities which makes the first part of the predicate and their instantiations. *) let avoid = [] in build_dependent_signature env ( !isevars) avoid tomatchs arsign in let tycon, arity = match tycon' with | None -> let ev = mkExistential env isevars in ev, ev | Some t -> Option.get tycon, prepare_predicate_from_arsign_tycon loc env ( !isevars) tomatchs sign t in let neqs, arity = let ctx = context_of_arsign eqs in let neqs = List.length ctx in neqs, it_mkProd_or_LetIn (lift neqs arity) ctx in let lets, matx = (* Type the rhs under the assumption of equations *) constrs_of_pats typing_fun env isevars matx tomatchs sign neqs arity in let matx = List.rev matx in let _ = assert(len = List.length lets) in let env = push_rels lets env in let matx = List.map (fun eqn -> { eqn with rhs = { eqn.rhs with rhs_env = env } }) matx in let tomatchs = List.map (fun (x, y) -> lift len x, lift_tomatch_type len y) tomatchs in let args = List.rev_map (lift len) args in let pred = liftn len (succ signlen) arity in let pred = build_initial_predicate true allnames pred in (* We push the initial terms to match and push their alias to rhs' envs *) (* names of aliases will be recovered from patterns (hence Anonymous here) *) let initial_pushed = List.map (fun tm -> Pushed (tm,[])) tomatchs in let pb = { env = env; isevars = isevars; pred = Some pred; tomatch = initial_pushed; history = start_history (List.length initial_pushed); mat = matx; caseloc = loc; casestyle= style; typing_function = typing_fun } in let j = compile pb in (* We check for unused patterns *) List.iter (check_unused_pattern env) matx; let body = it_mkLambda_or_LetIn (applistc j.uj_val args) lets in let j = { uj_val = it_mkLambda_or_LetIn body tomatchs_lets; uj_type = nf_evar !isevars tycon; } in j else (* We build the elimination predicate if any and check its consistency *) (* with the type of arguments to match *) let tmsign = List.map snd tomatchl in let pred = prepare_predicate_from_rettyp loc typing_fun isevars env tomatchs tmsign tycon predopt in (* We push the initial terms to match and push their alias to rhs' envs *) (* names of aliases will be recovered from patterns (hence Anonymous here) *) let initial_pushed = List.map (fun tm -> Pushed (tm,[])) tomatchs in let pb = { env = env; isevars = isevars; pred = pred; tomatch = initial_pushed; history = start_history (List.length initial_pushed); mat = matx; caseloc = loc; casestyle= style; typing_function = typing_fun } in let j = compile pb in (* We check for unused patterns *) List.iter (check_unused_pattern env) matx; inh_conv_coerce_to_tycon loc env isevars j tycon end coq-8.4pl2/plugins/subtac/subtac_utils.ml0000640000175000001440000003444112121620060017620 0ustar notinusers(** -*- compile-command: "make -C ../.. plugins/subtac/subtac_plugin.cma" -*- *) open Evd open Libnames open Coqlib open Term open Names open Util let ($) f x = f x (****************************************************************************) (* Library linking *) let contrib_name = "Program" let subtac_dir = [contrib_name] let fixsub_module = subtac_dir @ ["Wf"] let utils_module = subtac_dir @ ["Utils"] let tactics_module = subtac_dir @ ["Tactics"] let init_constant dir s () = gen_constant contrib_name dir s let init_reference dir s () = gen_reference contrib_name dir s let safe_init_constant md name () = check_required_library ("Coq"::md); init_constant md name () let ex_pi1 = init_constant utils_module "ex_pi1" let ex_pi2 = init_constant utils_module "ex_pi2" let make_ref l s = init_reference l s let well_founded_ref = make_ref ["Init";"Wf"] "Well_founded" let acc_ref = make_ref ["Init";"Wf"] "Acc" let acc_inv_ref = make_ref ["Init";"Wf"] "Acc_inv" let fix_sub_ref = make_ref fixsub_module "Fix_sub" let measure_on_R_ref = make_ref fixsub_module "MR" let fix_measure_sub_ref = make_ref fixsub_module "Fix_measure_sub" let refl_ref = make_ref ["Init";"Logic"] "refl_equal" let make_ref s = Qualid (dummy_loc, qualid_of_string s) let lt_ref = make_ref "Init.Peano.lt" let sig_ref = make_ref "Init.Specif.sig" let proj1_sig_ref = make_ref "Init.Specif.proj1_sig" let proj2_sig_ref = make_ref "Init.Specif.proj2_sig" let build_sig () = { proj1 = init_constant ["Init"; "Specif"] "proj1_sig" (); proj2 = init_constant ["Init"; "Specif"] "proj2_sig" (); elim = init_constant ["Init"; "Specif"] "sig_rec" (); intro = init_constant ["Init"; "Specif"] "exist" (); typ = init_constant ["Init"; "Specif"] "sig" () } let sig_ = build_sig let fix_proto = safe_init_constant tactics_module "fix_proto" let hide_obligation = safe_init_constant tactics_module "obligation" let eq_ind = init_constant ["Init"; "Logic"] "eq" let eq_rec = init_constant ["Init"; "Logic"] "eq_rec" let eq_rect = init_constant ["Init"; "Logic"] "eq_rect" let eq_refl = init_constant ["Init"; "Logic"] "refl_equal" let eq_ind_ref = init_reference ["Init"; "Logic"] "eq" let refl_equal_ref = init_reference ["Init"; "Logic"] "refl_equal" let not_ref = init_constant ["Init"; "Logic"] "not" let and_typ = Coqlib.build_coq_and let eqdep_ind = init_constant [ "Logic";"Eqdep"] "eq_dep" let eqdep_rec = init_constant ["Logic";"Eqdep"] "eq_dep_rec" let eqdep_ind_ref = init_reference [ "Logic";"Eqdep"] "eq_dep" let eqdep_intro_ref = init_reference [ "Logic";"Eqdep"] "eq_dep_intro" let jmeq_ind = safe_init_constant ["Logic";"JMeq"] "JMeq" let jmeq_rec = init_constant ["Logic";"JMeq"] "JMeq_rec" let jmeq_refl = init_constant ["Logic";"JMeq"] "JMeq_refl" let ex_ind = init_constant ["Init"; "Logic"] "ex" let ex_intro = init_reference ["Init"; "Logic"] "ex_intro" let proj1 = init_constant ["Init"; "Logic"] "proj1" let proj2 = init_constant ["Init"; "Logic"] "proj2" let existS = build_sigma_type let prod = build_prod (* orders *) let well_founded = init_constant ["Init"; "Wf"] "well_founded" let fix = init_constant ["Init"; "Wf"] "Fix" let acc = init_constant ["Init"; "Wf"] "Acc" let acc_inv = init_constant ["Init"; "Wf"] "Acc_inv" let extconstr = Constrextern.extern_constr true (Global.env ()) let extsort s = Constrextern.extern_constr true (Global.env ()) (mkSort s) open Pp let my_print_constr = Termops.print_constr_env let my_print_constr_expr = Ppconstr.pr_constr_expr let my_print_rel_context env ctx = Printer.pr_rel_context env ctx let my_print_context = Termops.print_rel_context let my_print_named_context = Termops.print_named_context let my_print_env = Termops.print_env let my_print_glob_constr = Printer.pr_glob_constr_env let my_print_evardefs = Evd.pr_evar_map None let my_print_tycon_type = Evarutil.pr_tycon_type let debug_level = 2 let debug_on = true let debug n s = if debug_on then if !Flags.debug && n >= debug_level then msgnl s else () else () let debug_msg n s = if debug_on then if !Flags.debug && n >= debug_level then s else mt () else mt () let trace s = if debug_on then if !Flags.debug && debug_level > 0 then msgnl s else () else () let rec pp_list f = function [] -> mt() | x :: y -> f x ++ spc () ++ pp_list f y let wf_relations = Hashtbl.create 10 let std_relations () = let add k v = Hashtbl.add wf_relations k v in add (init_constant ["Init"; "Peano"] "lt" ()) (init_constant ["Arith"; "Wf_nat"] "lt_wf") let std_relations = Lazy.lazy_from_fun std_relations type binders = Topconstr.local_binder list let app_opt c e = match c with Some constr -> constr e | None -> e let print_args env args = Array.fold_right (fun a acc -> my_print_constr env a ++ spc () ++ acc) args (str "") let make_existential loc ?(opaque = Define true) env isevars c = Evarutil.e_new_evar isevars env ~src:(loc, QuestionMark opaque) c let no_goals_or_obligations = function | GoalEvar | QuestionMark _ -> false | _ -> true let make_existential_expr loc env c = let key = Evarutil.new_untyped_evar () in let evar = Topconstr.CEvar (loc, key, None) in debug 2 (str "Constructed evar " ++ int key); evar let string_of_hole_kind = function | ImplicitArg _ -> "ImplicitArg" | BinderType _ -> "BinderType" | QuestionMark _ -> "QuestionMark" | CasesType -> "CasesType" | InternalHole -> "InternalHole" | TomatchTypeParameter _ -> "TomatchTypeParameter" | GoalEvar -> "GoalEvar" | ImpossibleCase -> "ImpossibleCase" | MatchingVar _ -> "MatchingVar" let evars_of_term evc init c = let rec evrec acc c = match kind_of_term c with | Evar (n, _) when Evd.mem evc n -> Evd.add acc n (Evd.find evc n) | Evar (n, _) -> assert(false) | _ -> fold_constr evrec acc c in evrec init c let non_instanciated_map env evd evm = List.fold_left (fun evm (key, evi) -> let (loc,k) = evar_source key !evd in debug 2 (str "evar " ++ int key ++ str " has kind " ++ str (string_of_hole_kind k)); match k with | QuestionMark _ -> Evd.add evm key evi | ImplicitArg (_,_,false) -> Evd.add evm key evi | _ -> debug 2 (str " and is an implicit"); Pretype_errors.error_unsolvable_implicit loc env evm (Evarutil.nf_evar_info evm evi) k None) Evd.empty (Evarutil.non_instantiated evm) let global_kind = Decl_kinds.IsDefinition Decl_kinds.Definition let goal_kind = Decl_kinds.Global, Decl_kinds.DefinitionBody Decl_kinds.Definition let global_proof_kind = Decl_kinds.IsProof Decl_kinds.Lemma let goal_proof_kind = Decl_kinds.Global, Decl_kinds.Proof Decl_kinds.Lemma let global_fix_kind = Decl_kinds.IsDefinition Decl_kinds.Fixpoint let goal_fix_kind = Decl_kinds.Global, Decl_kinds.DefinitionBody Decl_kinds.Fixpoint open Tactics open Tacticals let filter_map f l = let rec aux acc = function hd :: tl -> (match f hd with Some t -> aux (t :: acc) tl | None -> aux acc tl) | [] -> List.rev acc in aux [] l let build_dependent_sum l = let rec aux names conttac conttype = function (n, t) :: ((_ :: _) as tl) -> let hyptype = substl names t in trace (spc () ++ str ("treating evar " ^ string_of_id n)); (try trace (str " assert: " ++ my_print_constr (Global.env ()) hyptype) with e when Errors.noncritical e -> ()); let tac = assert_tac (Name n) hyptype in let conttac = (fun cont -> conttac (tclTHENS tac ([intros; (tclTHENSEQ [constructor_tac false (Some 1) 1 (Glob_term.ImplicitBindings [mkVar n]); cont]); ]))) in let conttype = (fun typ -> let tex = mkLambda (Name n, t, typ) in conttype (mkApp (ex_ind (), [| t; tex |]))) in aux (mkVar n :: names) conttac conttype tl | (n, t) :: [] -> (conttac intros, conttype t) | [] -> raise (Invalid_argument "build_dependent_sum") in aux [] identity identity (List.rev l) open Proof_type open Tacexpr let mkProj1 a b c = mkApp (delayed_force proj1, [| a; b; c |]) let mkProj2 a b c = mkApp (delayed_force proj2, [| a; b; c |]) let mk_ex_pi1 a b c = mkApp (delayed_force ex_pi1, [| a; b; c |]) let mk_ex_pi2 a b c = mkApp (delayed_force ex_pi2, [| a; b; c |]) let mkSubset name typ prop = mkApp ((delayed_force sig_).typ, [| typ; mkLambda (name, typ, prop) |]) let mk_eq typ x y = mkApp (delayed_force eq_ind, [| typ; x ; y |]) let mk_eq_refl typ x = mkApp (delayed_force eq_refl, [| typ; x |]) let mk_JMeq typ x typ' y = mkApp (delayed_force jmeq_ind, [| typ; x ; typ'; y |]) let mk_JMeq_refl typ x = mkApp (delayed_force jmeq_refl, [| typ; x |]) let unsafe_fold_right f = function hd :: tl -> List.fold_right f tl hd | [] -> raise (Invalid_argument "unsafe_fold_right") let mk_conj l = let conj_typ = delayed_force and_typ in unsafe_fold_right (fun c conj -> mkApp (conj_typ, [| c ; conj |])) l let mk_not c = let notc = delayed_force not_ref in mkApp (notc, [| c |]) let and_tac l hook = let andc = Coqlib.build_coq_and () in let rec aux ((accid, goal, tac, extract) as acc) = function | [] -> (* Singleton *) acc | (id, x, elgoal, eltac) :: tl -> let tac' = tclTHEN simplest_split (tclTHENLIST [tac; eltac]) in let proj = fun c -> mkProj2 goal elgoal c in let extract = List.map (fun (id, x, y, f) -> (id, x, y, (fun c -> f (mkProj1 goal elgoal c)))) extract in aux ((string_of_id id) ^ "_" ^ accid, mkApp (andc, [| goal; elgoal |]), tac', (id, x, elgoal, proj) :: extract) tl in let and_proof_id, and_goal, and_tac, and_extract = match l with | [] -> raise (Invalid_argument "and_tac: empty list of goals") | (hdid, x, hdg, hdt) :: tl -> aux (string_of_id hdid, hdg, hdt, [hdid, x, hdg, (fun c -> c)]) tl in let and_proofid = id_of_string (and_proof_id ^ "_and_proof") in Lemmas.start_proof and_proofid goal_kind and_goal (hook (fun c -> List.map (fun (id, x, t, f) -> (id, x, t, f c)) and_extract)); trace (str "Started and proof"); Pfedit.by and_tac; trace (str "Applied and tac") let destruct_ex ext ex = let rec aux c acc = match kind_of_term c with App (f, args) -> (match kind_of_term f with Ind i when i = Term.destInd (delayed_force ex_ind) && Array.length args = 2 -> let (dom, rng) = try (args.(0), args.(1)) with e when Errors.noncritical e -> assert(false) in let pi1 = (mk_ex_pi1 dom rng acc) in let rng_body = match kind_of_term rng with Lambda (_, _, t) -> subst1 pi1 t | t -> rng in pi1 :: aux rng_body (mk_ex_pi2 dom rng acc) | _ -> [acc]) | _ -> [acc] in aux ex ext open Glob_term let id_of_name = function Name n -> n | Anonymous -> raise (Invalid_argument "id_of_name") let definition_message id = Nameops.pr_id id ++ str " is defined" let recursive_message v = match Array.length v with | 0 -> error "no recursive definition" | 1 -> (Printer.pr_constant (Global.env ()) v.(0) ++ str " is recursively defined") | _ -> hov 0 (prvect_with_sep pr_comma (Printer.pr_constant (Global.env ())) v ++ spc () ++ str "are recursively defined") let print_message m = Flags.if_verbose ppnl m (* Solve an obligation using tactics, return the corresponding proof term *) let solve_by_tac evi t = let id = id_of_string "H" in try Pfedit.start_proof id goal_kind evi.evar_hyps evi.evar_concl (fun _ _ -> ()); Pfedit.by (tclCOMPLETE t); let _,(const,_,_,_) = Pfedit.cook_proof ignore in Pfedit.delete_current_proof (); Inductiveops.control_only_guard (Global.env ()) const.Entries.const_entry_body; const.Entries.const_entry_body with reraise -> Pfedit.delete_current_proof(); raise reraise (* let apply_tac t goal = t goal *) (* let solve_by_tac evi t = *) (* let ev = 1 in *) (* let evm = Evd.add Evd.empty ev evi in *) (* let goal = {it = evi; sigma = evm } in *) (* let (res, valid) = apply_tac t goal in *) (* if res.it = [] then *) (* let prooftree = valid [] in *) (* let proofterm, obls = Refiner.extract_open_proof res.sigma prooftree in *) (* if obls = [] then proofterm *) (* else raise Exit *) (* else raise Exit *) let rec string_of_list sep f = function [] -> "" | x :: [] -> f x | x :: ((y :: _) as tl) -> f x ^ sep ^ string_of_list sep f tl let string_of_intset d = string_of_list "," string_of_int (Intset.elements d) (**********************************************************) (* Pretty-printing *) open Printer open Ppconstr open Nameops open Evd let pr_meta_map evd = let ml = meta_list evd in let pr_name = function Name id -> str"[" ++ pr_id id ++ str"]" | _ -> mt() in let pr_meta_binding = function | (mv,Cltyp (na,b)) -> hov 0 (pr_meta mv ++ pr_name na ++ str " : " ++ Termops.print_constr b.rebus ++ fnl ()) | (mv,Clval(na,b,_)) -> hov 0 (pr_meta mv ++ pr_name na ++ str " := " ++ Termops.print_constr (fst b).rebus ++ fnl ()) in prlist pr_meta_binding ml let pr_idl idl = prlist_with_sep pr_spc pr_id idl let pr_evar_info evi = let phyps = (*pr_idl (List.rev (ids_of_named_context (evar_context evi))) *) Printer.pr_named_context (Global.env()) (evar_context evi) in let pty = Termops.print_constr evi.evar_concl in let pb = match evi.evar_body with | Evar_empty -> mt () | Evar_defined c -> spc() ++ str"=> " ++ Termops.print_constr c in hov 2 (str"[" ++ phyps ++ spc () ++ str"|- " ++ pty ++ pb ++ str"]") let pr_evar_map sigma = h 0 (prlist_with_sep pr_fnl (fun (ev,evi) -> h 0 (str(string_of_existential ev)++str"=="++ pr_evar_info evi)) (to_list sigma)) let pr_constraints pbs = h 0 (prlist_with_sep pr_fnl (fun (pbty,t1,t2) -> Termops.print_constr t1 ++ spc() ++ str (match pbty with | Reduction.CONV -> "==" | Reduction.CUMUL -> "<=") ++ spc() ++ Termops.print_constr t2) pbs) let pr_evar_map evd = let pp_evm = let evars = evd in if evars = empty then mt() else str"EVARS:"++brk(0,1)++pr_evar_map evars++fnl() in let pp_met = if meta_list evd = [] then mt() else str"METAS:"++brk(0,1)++pr_meta_map evd in v 0 (pp_evm ++ pp_met) let contrib_tactics_path = make_dirpath (List.map id_of_string ["Tactics";contrib_name;"Coq"]) let tactics_tac s = lazy(make_kn (MPfile contrib_tactics_path) (make_dirpath []) (mk_label s)) let tactics_call tac args = TacArg(dummy_loc,TacCall(dummy_loc, ArgArg(dummy_loc, Lazy.force (tactics_tac tac)),args)) coq-8.4pl2/plugins/subtac/subtac_coercion.ml0000640000175000001440000004045712121620060020265 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (match kind_of_term c with Ind i -> let len = Array.length l in let sig_ = delayed_force sig_ in if len = 2 && i = Term.destInd sig_.typ then let (a, b) = pair_of_array l in Some (a, b) else None | _ -> None) | _ -> None and disc_exist env x = match kind_of_term x with | App (c, l) -> (match kind_of_term c with Construct c -> if c = Term.destConstruct (delayed_force sig_).intro then Some (l.(0), l.(1), l.(2), l.(3)) else None | _ -> None) | _ -> None module Coercion = struct exception NoSubtacCoercion let disc_proj_exist env x = match kind_of_term x with | App (c, l) -> (if Term.eq_constr c (delayed_force sig_).proj1 && Array.length l = 3 then disc_exist env l.(2) else None) | _ -> None let sort_rel s1 s2 = match s1, s2 with Prop Pos, Prop Pos -> Prop Pos | Prop Pos, Prop Null -> Prop Null | Prop Null, Prop Null -> Prop Null | Prop Null, Prop Pos -> Prop Pos | Type _, Prop Pos -> Prop Pos | Type _, Prop Null -> Prop Null | _, Type _ -> s2 let hnf env isevars c = whd_betadeltaiota env isevars c let hnf_nodelta env evars c = whd_betaiota evars c let lift_args n sign = let rec liftrec k = function | t::sign -> liftn n k t :: (liftrec (k-1) sign) | [] -> [] in liftrec (List.length sign) sign let rec mu env isevars t = let rec aux v = let v = hnf env !isevars v in match disc_subset v with Some (u, p) -> let f, ct = aux u in let p = hnf env !isevars p in (Some (fun x -> app_opt env isevars f (mkApp ((delayed_force sig_).proj1, [| u; p; x |]))), ct) | None -> (None, v) in aux t and coerce loc env isevars (x : Term.constr) (y : Term.constr) : (Term.constr -> Term.constr) option = let rec coerce_unify env x y = let x = hnf env !isevars x and y = hnf env !isevars y in try isevars := the_conv_x_leq env x y !isevars; None with Reduction.NotConvertible -> coerce' env x y and coerce' env x y : (Term.constr -> Term.constr) option = let subco () = subset_coerce env isevars x y in let dest_prod c = match Reductionops.splay_prod_n env ( !isevars) 1 c with | [(na,b,t)], c -> (na,t), c | _ -> raise NoSubtacCoercion in let rec coerce_application typ typ' c c' l l' = let len = Array.length l in let rec aux tele typ typ' i co = if i < len then let hdx = l.(i) and hdy = l'.(i) in try isevars := the_conv_x_leq env hdx hdy !isevars; let (n, eqT), restT = dest_prod typ in let (n', eqT'), restT' = dest_prod typ' in aux (hdx :: tele) (subst1 hdx restT) (subst1 hdy restT') (succ i) co with Reduction.NotConvertible -> let (n, eqT), restT = dest_prod typ in let (n', eqT'), restT' = dest_prod typ' in let _ = try isevars := the_conv_x_leq env eqT eqT' !isevars with Reduction.NotConvertible -> raise NoSubtacCoercion in (* Disallow equalities on arities *) if Reduction.is_arity env eqT then raise NoSubtacCoercion; let restargs = lift_args 1 (List.rev (Array.to_list (Array.sub l (succ i) (len - (succ i))))) in let args = List.rev (restargs @ mkRel 1 :: List.map (lift 1) tele) in let pred = mkLambda (n, eqT, applistc (lift 1 c) args) in let eq = mkApp (delayed_force eq_ind, [| eqT; hdx; hdy |]) in let evar = make_existential loc env isevars eq in let eq_app x = mkApp (delayed_force eq_rect, [| eqT; hdx; pred; x; hdy; evar|]) in aux (hdy :: tele) (subst1 hdx restT) (subst1 hdy restT') (succ i) (fun x -> eq_app (co x)) else Some co in if isEvar c || isEvar c' then (* Second-order unification needed. *) raise NoSubtacCoercion; aux [] typ typ' 0 (fun x -> x) in match (kind_of_term x, kind_of_term y) with | Sort s, Sort s' -> (match s, s' with Prop x, Prop y when x = y -> None | Prop _, Type _ -> None | Type x, Type y when x = y -> None (* false *) | _ -> subco ()) | Prod (name, a, b), Prod (name', a', b') -> let name' = Name (Namegen.next_ident_away (id_of_string "x") (Termops.ids_of_context env)) in let env' = push_rel (name', None, a') env in let c1 = coerce_unify env' (lift 1 a') (lift 1 a) in (* env, x : a' |- c1 : lift 1 a' > lift 1 a *) let coec1 = app_opt env' isevars c1 (mkRel 1) in (* env, x : a' |- c1[x] : lift 1 a *) let c2 = coerce_unify env' (subst1 coec1 (liftn 1 2 b)) b' in (* env, x : a' |- c2 : b[c1[x]/x]] > b' *) (match c1, c2 with | None, None -> None | _, _ -> Some (fun f -> mkLambda (name', a', app_opt env' isevars c2 (mkApp (Term.lift 1 f, [| coec1 |]))))) | App (c, l), App (c', l') -> (match kind_of_term c, kind_of_term c' with Ind i, Ind i' -> (* Inductive types *) let len = Array.length l in let existS = delayed_force existS in let prod = delayed_force prod in (* Sigma types *) if len = Array.length l' && len = 2 && i = i' && (i = Term.destInd existS.typ || i = Term.destInd prod.typ) then if i = Term.destInd existS.typ then begin let (a, pb), (a', pb') = pair_of_array l, pair_of_array l' in let c1 = coerce_unify env a a' in let rec remove_head a c = match kind_of_term c with | Lambda (n, t, t') -> c, t' (*| Prod (n, t, t') -> t'*) | Evar (k, args) -> let (evs, t) = Evarutil.define_evar_as_lambda env !isevars (k,args) in isevars := evs; let (n, dom, rng) = destLambda t in let (domk, args) = destEvar dom in isevars := define domk a !isevars; t, rng | _ -> raise NoSubtacCoercion in let (pb, b), (pb', b') = remove_head a pb, remove_head a' pb' in let env' = push_rel (make_name "x", None, a) env in let c2 = coerce_unify env' b b' in match c1, c2 with None, None -> None | _, _ -> Some (fun x -> let x, y = app_opt env' isevars c1 (mkApp (existS.proj1, [| a; pb; x |])), app_opt env' isevars c2 (mkApp (existS.proj2, [| a; pb; x |])) in mkApp (existS.intro, [| a'; pb'; x ; y |])) end else begin let (a, b), (a', b') = pair_of_array l, pair_of_array l' in let c1 = coerce_unify env a a' in let c2 = coerce_unify env b b' in match c1, c2 with None, None -> None | _, _ -> Some (fun x -> let x, y = app_opt env isevars c1 (mkApp (prod.proj1, [| a; b; x |])), app_opt env isevars c2 (mkApp (prod.proj2, [| a; b; x |])) in mkApp (prod.intro, [| a'; b'; x ; y |])) end else if i = i' && len = Array.length l' then let evm = !isevars in (try subco () with NoSubtacCoercion -> let typ = Typing.type_of env evm c in let typ' = Typing.type_of env evm c' in (* if not (is_arity env evm typ) then *) coerce_application typ typ' c c' l l') (* else subco () *) else subco () | x, y when x = y -> if Array.length l = Array.length l' then let evm = !isevars in let lam_type = Typing.type_of env evm c in let lam_type' = Typing.type_of env evm c' in (* if not (is_arity env evm lam_type) then ( *) coerce_application lam_type lam_type' c c' l l' (* ) else subco () *) else subco () | _ -> subco ()) | _, _ -> subco () and subset_coerce env isevars x y = match disc_subset x with Some (u, p) -> let c = coerce_unify env u y in let f x = app_opt env isevars c (mkApp ((delayed_force sig_).proj1, [| u; p; x |])) in Some f | None -> match disc_subset y with Some (u, p) -> let c = coerce_unify env x u in Some (fun x -> let cx = app_opt env isevars c x in let evar = make_existential loc env isevars (mkApp (p, [| cx |])) in (mkApp ((delayed_force sig_).intro, [| u; p; cx; evar |]))) | None -> raise NoSubtacCoercion (*isevars := Evd.add_conv_pb (Reduction.CONV, x, y) !isevars; None*) in coerce_unify env x y let coerce_itf loc env isevars v t c1 = let evars = ref isevars in let coercion = coerce loc env evars t c1 in let t = Option.map (app_opt env evars coercion) v in !evars, t (* Taken from pretyping/coercion.ml *) (* Typing operations dealing with coercions *) (* Here, funj is a coercion therefore already typed in global context *) let apply_coercion_args env argl funj = let rec apply_rec acc typ = function | [] -> { uj_val = applist (j_val funj,argl); uj_type = typ } | h::restl -> (* On devrait pouvoir s'arranger pour qu'on n'ait pas à faire hnf_constr *) match kind_of_term (whd_betadeltaiota env Evd.empty typ) with | Prod (_,c1,c2) -> (* Typage garanti par l'appel à app_coercion*) apply_rec (h::acc) (subst1 h c2) restl | _ -> anomaly "apply_coercion_args" in apply_rec [] funj.uj_type argl (* appliquer le chemin de coercions de patterns p *) exception NoCoercion let apply_pattern_coercion loc pat p = List.fold_left (fun pat (co,n) -> let f i = if i let fv,isid = coercion_value i in let argl = (class_args_of env sigma typ_cl)@[ja.uj_val] in let jres = apply_coercion_args env argl fv in (if isid then { uj_val = ja.uj_val; uj_type = jres.uj_type } else jres), jres.uj_type) (hj,typ_cl) p) with e when Errors.noncritical e -> anomaly "apply_coercion" let inh_app_fun env isevars j = let isevars = ref isevars in let t = hnf env !isevars j.uj_type in match kind_of_term t with | Prod (_,_,_) -> (!isevars,j) | Evar ev when not (is_defined_evar !isevars ev) -> let (isevars',t) = define_evar_as_product !isevars ev in (isevars',{ uj_val = j.uj_val; uj_type = t }) | _ -> (try let t,p = lookup_path_to_fun_from env !isevars j.uj_type in (!isevars,apply_coercion env !isevars p j t) with Not_found -> try let coercef, t = mu env isevars t in let res = { uj_val = app_opt env isevars coercef j.uj_val; uj_type = t } in (!isevars, res) with NoSubtacCoercion | NoCoercion -> (!isevars,j)) let inh_tosort_force loc env isevars j = try let t,p = lookup_path_to_sort_from env ( isevars) j.uj_type in let j1 = apply_coercion env ( isevars) p j t in (isevars, type_judgment env (j_nf_evar ( isevars) j1)) with Not_found -> error_not_a_type_loc loc env ( isevars) j let inh_coerce_to_sort loc env isevars j = let typ = hnf env isevars j.uj_type in match kind_of_term typ with | Sort s -> (isevars,{ utj_val = j.uj_val; utj_type = s }) | Evar ev when not (is_defined_evar isevars ev) -> let (isevars',s) = define_evar_as_sort isevars ev in (isevars',{ utj_val = j.uj_val; utj_type = s }) | _ -> inh_tosort_force loc env isevars j let inh_coerce_to_base loc env isevars j = let isevars = ref isevars in let typ = hnf env !isevars j.uj_type in let ct, typ' = mu env isevars typ in let res = { uj_val = app_opt env isevars ct j.uj_val; uj_type = typ' } in !isevars, res let inh_coerce_to_prod loc env isevars t = let isevars = ref isevars in let typ = hnf env !isevars (snd t) in let _, typ' = mu env isevars typ in !isevars, (fst t, typ') let inh_coerce_to_fail env evd rigidonly v t c1 = if rigidonly & not (Heads.is_rigid env c1 && Heads.is_rigid env t) then raise NoCoercion else let v', t' = try let t2,t1,p = lookup_path_between env evd (t,c1) in match v with Some v -> let j = apply_coercion env evd p {uj_val = v; uj_type = t} t2 in Some j.uj_val, j.uj_type | None -> None, t with Not_found -> raise NoCoercion in try (the_conv_x_leq env t' c1 evd, v') with Reduction.NotConvertible -> raise NoCoercion let rec inh_conv_coerce_to_fail loc env evd rigidonly v t c1 = try (the_conv_x_leq env t c1 evd, v) with Reduction.NotConvertible -> try inh_coerce_to_fail env evd rigidonly v t c1 with NoCoercion -> match kind_of_term (whd_betadeltaiota env evd t), kind_of_term (whd_betadeltaiota env evd c1) with | Prod (name,t1,t2), Prod (_,u1,u2) -> (* Conversion did not work, we may succeed with a coercion. *) (* We eta-expand (hence possibly modifying the original term!) *) (* and look for a coercion c:u1->t1 s.t. fun x:u1 => v' (c x)) *) (* has type forall (x:u1), u2 (with v' recursively obtained) *) let name = match name with | Anonymous -> Name (id_of_string "x") | _ -> name in let env1 = push_rel (name,None,u1) env in let (evd', v1) = inh_conv_coerce_to_fail loc env1 evd rigidonly (Some (mkRel 1)) (lift 1 u1) (lift 1 t1) in let v1 = Option.get v1 in let v2 = Option.map (fun v -> beta_applist (lift 1 v,[v1])) v in let t2 = Termops.subst_term v1 t2 in let (evd'',v2') = inh_conv_coerce_to_fail loc env1 evd' rigidonly v2 t2 u2 in (evd'', Option.map (fun v2' -> mkLambda (name, u1, v2')) v2') | _ -> raise NoCoercion (* Look for cj' obtained from cj by inserting coercions, s.t. cj'.typ = t *) let inh_conv_coerce_to_gen rigidonly loc env evd cj ((n, t) as _tycon) = match n with | None -> let cj = { cj with uj_type = hnf_nodelta env evd cj.uj_type } and t = hnf_nodelta env evd t in let (evd', val') = try inh_conv_coerce_to_fail loc env evd rigidonly (Some cj.uj_val) cj.uj_type t with NoCoercion -> (try coerce_itf loc env evd (Some cj.uj_val) cj.uj_type t with NoSubtacCoercion -> error_actual_type_loc loc env evd cj t) in let val' = match val' with Some v -> v | None -> assert(false) in (evd',{ uj_val = val'; uj_type = t }) | Some (init, cur) -> (evd, cj) let inh_conv_coerce_to = inh_conv_coerce_to_gen false let inh_conv_coerce_rigid_to = inh_conv_coerce_to_gen true let inh_conv_coerces_to loc env isevars t ((abs, t') as _tycon) = let nabsinit, nabs = match abs with None -> 0, 0 | Some (init, cur) -> init, cur in try let rels, rng = Reductionops.splay_prod_n env ( isevars) nabs t in (* The final range free variables must have been replaced by evars, we accept only that evars in rng are applied to free vars. *) if noccur_with_meta 1 (succ nabs) rng then ( let env', t, t' = let env' = push_rel_context rels env in env', rng, lift nabs t' in try fst (try inh_conv_coerce_to_fail loc env' isevars false None t t' with NoCoercion -> coerce_itf loc env' isevars None t t') with NoSubtacCoercion -> error_cannot_coerce env' isevars (t, t')) else isevars with e when Errors.noncritical e -> isevars end coq-8.4pl2/plugins/subtac/subtac_classes.mli0000640000175000001440000000212112010532755020266 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Environ.env -> ('a * Term.constr option * Term.constr) list -> Topconstr.constr_expr list -> Term.constr list -> Term.constr list val new_instance : ?global:bool -> local_binder list -> typeclass_constraint -> constr_expr option -> ?generalize:bool -> int option -> identifier * Subtac_obligations.progress coq-8.4pl2/plugins/field/0000750000175000001440000000000012127276537014406 5ustar notinuserscoq-8.4pl2/plugins/field/field_plugin.mllib0000640000175000001440000000002711161000644020045 0ustar notinusersField Field_plugin_mod coq-8.4pl2/plugins/field/vo.itarget0000640000175000001440000000012011307752066016400 0ustar notinusersLegacyField_Compl.vo LegacyField_Tactic.vo LegacyField_Theory.vo LegacyField.vo coq-8.4pl2/plugins/field/LegacyField_Tactic.v0000640000175000001440000003025412010532755020225 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ExprA ****) Ltac get_component a s := eval cbv beta iota delta [a] in (a s). Ltac body_of s := eval cbv beta iota delta [s] in s. Ltac mem_assoc var lvar := match constr:lvar with | nil => constr:false | ?X1 :: ?X2 => match constr:(X1 = var) with | (?X1 = ?X1) => constr:true | _ => mem_assoc var X2 end end. Ltac number lvar := let rec number_aux lvar cpt := match constr:lvar with | (@nil ?X1) => constr:(@nil (prod X1 nat)) | ?X2 :: ?X3 => let l2 := number_aux X3 (S cpt) in constr:((X2,cpt) :: l2) end in number_aux lvar 0. Ltac build_varlist FT trm := let rec seek_var lvar trm := let AT := get_component A FT with AzeroT := get_component Azero FT with AoneT := get_component Aone FT with AplusT := get_component Aplus FT with AmultT := get_component Amult FT with AoppT := get_component Aopp FT with AinvT := get_component Ainv FT in match constr:trm with | AzeroT => lvar | AoneT => lvar | (AplusT ?X1 ?X2) => let l1 := seek_var lvar X1 in seek_var l1 X2 | (AmultT ?X1 ?X2) => let l1 := seek_var lvar X1 in seek_var l1 X2 | (AoppT ?X1) => seek_var lvar X1 | (AinvT ?X1) => seek_var lvar X1 | ?X1 => let res := mem_assoc X1 lvar in match constr:res with | true => lvar | false => constr:(X1 :: lvar) end end in let AT := get_component A FT in let lvar := seek_var (@nil AT) trm in number lvar. Ltac assoc elt lst := match constr:lst with | nil => fail | (?X1,?X2) :: ?X3 => match constr:(elt = X1) with | (?X1 = ?X1) => constr:X2 | _ => assoc elt X3 end end. Ltac interp_A FT lvar trm := let AT := get_component A FT with AzeroT := get_component Azero FT with AoneT := get_component Aone FT with AplusT := get_component Aplus FT with AmultT := get_component Amult FT with AoppT := get_component Aopp FT with AinvT := get_component Ainv FT in match constr:trm with | AzeroT => constr:EAzero | AoneT => constr:EAone | (AplusT ?X1 ?X2) => let e1 := interp_A FT lvar X1 with e2 := interp_A FT lvar X2 in constr:(EAplus e1 e2) | (AmultT ?X1 ?X2) => let e1 := interp_A FT lvar X1 with e2 := interp_A FT lvar X2 in constr:(EAmult e1 e2) | (AoppT ?X1) => let e := interp_A FT lvar X1 in constr:(EAopp e) | (AinvT ?X1) => let e := interp_A FT lvar X1 in constr:(EAinv e) | ?X1 => let idx := assoc X1 lvar in constr:(EAvar idx) end. (************************) (* Simplification *) (************************) (**** Generation of the multiplier ****) Ltac remove e l := match constr:l with | nil => l | e :: ?X2 => constr:X2 | ?X2 :: ?X3 => let nl := remove e X3 in constr:(X2 :: nl) end. Ltac union l1 l2 := match constr:l1 with | nil => l2 | ?X2 :: ?X3 => let nl2 := remove X2 l2 in let nl := union X3 nl2 in constr:(X2 :: nl) end. Ltac raw_give_mult trm := match constr:trm with | (EAinv ?X1) => constr:(X1 :: nil) | (EAopp ?X1) => raw_give_mult X1 | (EAplus ?X1 ?X2) => let l1 := raw_give_mult X1 with l2 := raw_give_mult X2 in union l1 l2 | (EAmult ?X1 ?X2) => let l1 := raw_give_mult X1 with l2 := raw_give_mult X2 in eval compute in (app l1 l2) | _ => constr:(@nil ExprA) end. Ltac give_mult trm := let ltrm := raw_give_mult trm in constr:(mult_of_list ltrm). (**** Associativity ****) Ltac apply_assoc FT lvar trm := let t := eval compute in (assoc trm) in match constr:(t = trm) with | (?X1 = ?X1) => idtac | _ => rewrite <- (assoc_correct FT trm); change (assoc trm) with t end. (**** Distribution *****) Ltac apply_distrib FT lvar trm := let t := eval compute in (distrib trm) in match constr:(t = trm) with | (?X1 = ?X1) => idtac | _ => rewrite <- (distrib_correct FT trm); change (distrib trm) with t end. (**** Multiplication by the inverse product ****) Ltac grep_mult := match goal with | id:(interp_ExprA _ _ _ <> _) |- _ => id end. Ltac weak_reduce := match goal with | |- context [(interp_ExprA ?X1 ?X2 _)] => cbv beta iota zeta delta [interp_ExprA assoc_2nd eq_nat_dec mult_of_list X1 X2 A Azero Aone Aplus Amult Aopp Ainv] end. Ltac multiply mul := match goal with | |- (interp_ExprA ?FT ?X2 ?X3 = interp_ExprA ?FT ?X2 ?X4) => let AzeroT := get_component Azero FT in cut (interp_ExprA FT X2 mul <> AzeroT); [ intro; (let id := grep_mult in apply (mult_eq FT X3 X4 mul X2 id)) | weak_reduce; (let AoneT := get_component Aone ltac:(body_of FT) with AmultT := get_component Amult ltac:(body_of FT) in try match goal with | |- context [(AmultT _ AoneT)] => rewrite (AmultT_1r FT) end; clear FT X2) ] end. Ltac apply_multiply FT lvar trm := let t := eval compute in (multiply trm) in match constr:(t = trm) with | (?X1 = ?X1) => idtac | _ => rewrite <- (multiply_correct FT trm); change (multiply trm) with t end. (**** Permutations and simplification ****) Ltac apply_inverse mul FT lvar trm := let t := eval compute in (inverse_simplif mul trm) in match constr:(t = trm) with | (?X1 = ?X1) => idtac | _ => rewrite <- (inverse_correct FT trm mul); [ change (inverse_simplif mul trm) with t | assumption ] end. (**** Inverse test ****) Ltac strong_fail tac := first [ tac | fail 2 ]. Ltac inverse_test_aux FT trm := let AplusT := get_component Aplus FT with AmultT := get_component Amult FT with AoppT := get_component Aopp FT with AinvT := get_component Ainv FT in match constr:trm with | (AinvT _) => fail 1 | (AoppT ?X1) => strong_fail ltac:(inverse_test_aux FT X1; idtac) | (AplusT ?X1 ?X2) => strong_fail ltac:(inverse_test_aux FT X1; inverse_test_aux FT X2) | (AmultT ?X1 ?X2) => strong_fail ltac:(inverse_test_aux FT X1; inverse_test_aux FT X2) | _ => idtac end. Ltac inverse_test FT := let AplusT := get_component Aplus FT in match goal with | |- (?X1 = ?X2) => inverse_test_aux FT (AplusT X1 X2) end. (**** Field itself ****) Ltac apply_simplif sfun := match goal with | |- (interp_ExprA ?X1 ?X2 ?X3 = interp_ExprA _ _ _) => sfun X1 X2 X3 end; match goal with | |- (interp_ExprA _ _ _ = interp_ExprA ?X1 ?X2 ?X3) => sfun X1 X2 X3 end. Ltac unfolds FT := match get_component Aminus FT with | Some ?X1 => unfold X1 | _ => idtac end; match get_component Adiv FT with | Some ?X1 => unfold X1 | _ => idtac end. Ltac reduce FT := let AzeroT := get_component Azero FT with AoneT := get_component Aone FT with AplusT := get_component Aplus FT with AmultT := get_component Amult FT with AoppT := get_component Aopp FT with AinvT := get_component Ainv FT in (cbv beta iota zeta delta -[AzeroT AoneT AplusT AmultT AoppT AinvT] || compute). Ltac field_gen_aux FT := let AplusT := get_component Aplus FT in match goal with | |- (?X1 = ?X2) => let lvar := build_varlist FT (AplusT X1 X2) in let trm1 := interp_A FT lvar X1 with trm2 := interp_A FT lvar X2 in let mul := give_mult (EAplus trm1 trm2) in cut (let ft := FT in let vm := lvar in interp_ExprA ft vm trm1 = interp_ExprA ft vm trm2); [ compute; auto | intros ft vm; apply_simplif apply_distrib; apply_simplif apply_assoc; multiply mul; [ apply_simplif apply_multiply; apply_simplif ltac:(apply_inverse mul); (let id := grep_mult in clear id; weak_reduce; clear ft vm; first [ inverse_test FT; legacy ring | field_gen_aux FT ]) | idtac ] ] end. Ltac field_gen FT := unfolds FT; (inverse_test FT; legacy ring) || field_gen_aux FT. (*****************************) (* Term Simplification *) (*****************************) (**** Minus and division expansions ****) Ltac init_exp FT trm := let e := (match get_component Aminus FT with | Some ?X1 => eval cbv beta delta [X1] in trm | _ => trm end) in match get_component Adiv FT with | Some ?X1 => eval cbv beta delta [X1] in e | _ => e end. (**** Inverses simplification ****) Ltac simpl_inv trm := match constr:trm with | (EAplus ?X1 ?X2) => let e1 := simpl_inv X1 with e2 := simpl_inv X2 in constr:(EAplus e1 e2) | (EAmult ?X1 ?X2) => let e1 := simpl_inv X1 with e2 := simpl_inv X2 in constr:(EAmult e1 e2) | (EAopp ?X1) => let e := simpl_inv X1 in constr:(EAopp e) | (EAinv ?X1) => SimplInvAux X1 | ?X1 => constr:X1 end with SimplInvAux trm := match constr:trm with | (EAinv ?X1) => simpl_inv X1 | (EAmult ?X1 ?X2) => let e1 := simpl_inv (EAinv X1) with e2 := simpl_inv (EAinv X2) in constr:(EAmult e1 e2) | ?X1 => let e := simpl_inv X1 in constr:(EAinv e) end. (**** Monom simplification ****) Ltac map_tactic fcn lst := match constr:lst with | nil => lst | ?X2 :: ?X3 => let r := fcn X2 with t := map_tactic fcn X3 in constr:(r :: t) end. Ltac build_monom_aux lst trm := match constr:lst with | nil => eval compute in (assoc trm) | ?X1 :: ?X2 => build_monom_aux X2 (EAmult trm X1) end. Ltac build_monom lnum lden := let ildn := map_tactic ltac:(fun e => constr:(EAinv e)) lden in let ltot := eval compute in (app lnum ildn) in let trm := build_monom_aux ltot EAone in match constr:trm with | (EAmult _ ?X1) => constr:X1 | ?X1 => constr:X1 end. Ltac simpl_monom_aux lnum lden trm := match constr:trm with | (EAmult (EAinv ?X1) ?X2) => let mma := mem_assoc X1 lnum in match constr:mma with | true => let newlnum := remove X1 lnum in simpl_monom_aux newlnum lden X2 | false => simpl_monom_aux lnum (X1 :: lden) X2 end | (EAmult ?X1 ?X2) => let mma := mem_assoc X1 lden in match constr:mma with | true => let newlden := remove X1 lden in simpl_monom_aux lnum newlden X2 | false => simpl_monom_aux (X1 :: lnum) lden X2 end | (EAinv ?X1) => let mma := mem_assoc X1 lnum in match constr:mma with | true => let newlnum := remove X1 lnum in build_monom newlnum lden | false => build_monom lnum (X1 :: lden) end | ?X1 => let mma := mem_assoc X1 lden in match constr:mma with | true => let newlden := remove X1 lden in build_monom lnum newlden | false => build_monom (X1 :: lnum) lden end end. Ltac simpl_monom trm := simpl_monom_aux (@nil ExprA) (@nil ExprA) trm. Ltac simpl_all_monomials trm := match constr:trm with | (EAplus ?X1 ?X2) => let e1 := simpl_monom X1 with e2 := simpl_all_monomials X2 in constr:(EAplus e1 e2) | ?X1 => simpl_monom X1 end. (**** Associativity and distribution ****) Ltac assoc_distrib trm := eval compute in (assoc (distrib trm)). (**** The tactic Field_Term ****) Ltac eval_weak_reduce trm := eval cbv beta iota zeta delta [interp_ExprA assoc_2nd eq_nat_dec mult_of_list A Azero Aone Aplus Amult Aopp Ainv] in trm. Ltac field_term FT exp := let newexp := init_exp FT exp in let lvar := build_varlist FT newexp in let trm := interp_A FT lvar newexp in let tma := eval compute in (assoc trm) in let tsmp := simpl_all_monomials ltac:(assoc_distrib ltac:(simpl_all_monomials ltac:(simpl_inv tma))) in let trep := eval_weak_reduce (interp_ExprA FT lvar tsmp) in (replace exp with trep; [ legacy ring trep | field_gen FT ]). coq-8.4pl2/plugins/field/LegacyField_Compl.v0000640000175000001440000000245112010532755020066 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* e2}) (lst:list (prod A B)) {struct lst} : B -> A -> A := fun (key:B) (default:A) => match lst with | nil => default | (v,e) :: l => match eq_dec e key with | left _ => v | right _ => assoc_2nd_rec A B eq_dec l key default end end). Definition mem := (fix mem (A:Set) (eq_dec:forall e1 e2:A, {e1 = e2} + {e1 <> e2}) (a:A) (l:list A) {struct l} : bool := match l with | nil => false | a1 :: l1 => match eq_dec a a1 with | left _ => true | right _ => mem A eq_dec a l1 end end). coq-8.4pl2/plugins/field/LegacyField_Theory.v0000640000175000001440000004676412010532755020305 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> A; Amult : A -> A -> A; Aone : A; Azero : A; Aopp : A -> A; Aeq : A -> A -> bool; Ainv : A -> A; Aminus : option (A -> A -> A); Adiv : option (A -> A -> A); RT : Ring_Theory Aplus Amult Aone Azero Aopp Aeq; Th_inv_def : forall n:A, n <> Azero -> Amult (Ainv n) n = Aone}. (* The reflexion structure *) Inductive ExprA : Set := | EAzero : ExprA | EAone : ExprA | EAplus : ExprA -> ExprA -> ExprA | EAmult : ExprA -> ExprA -> ExprA | EAopp : ExprA -> ExprA | EAinv : ExprA -> ExprA | EAvar : nat -> ExprA. (**** Decidability of equality ****) Lemma eqExprA_O : forall e1 e2:ExprA, {e1 = e2} + {e1 <> e2}. Proof. double induction e1 e2; try intros; try (left; reflexivity) || (try (right; discriminate)). elim (H1 e0); intro y; elim (H2 e); intro y0; try (left; rewrite y; rewrite y0; auto) || (right; red; intro; inversion H3; auto). elim (H1 e0); intro y; elim (H2 e); intro y0; try (left; rewrite y; rewrite y0; auto) || (right; red; intro; inversion H3; auto). elim (H0 e); intro y. left; rewrite y; auto. right; red; intro; inversion H1; auto. elim (H0 e); intro y. left; rewrite y; auto. right; red; intro; inversion H1; auto. elim (eq_nat_dec n n0); intro y. left; rewrite y; auto. right; red; intro; inversion H; auto. Defined. Definition eq_nat_dec := Eval compute in eq_nat_dec. Definition eqExprA := Eval compute in eqExprA_O. (**** Generation of the multiplier ****) Fixpoint mult_of_list (e:list ExprA) : ExprA := match e with | nil => EAone | e1 :: l1 => EAmult e1 (mult_of_list l1) end. Section Theory_of_fields. Variable T : Field_Theory. Let AT := A T. Let AplusT := Aplus T. Let AmultT := Amult T. Let AoneT := Aone T. Let AzeroT := Azero T. Let AoppT := Aopp T. Let AeqT := Aeq T. Let AinvT := Ainv T. Let RTT := RT T. Let Th_inv_defT := Th_inv_def T. Add Legacy Abstract Ring (A T) (Aplus T) (Amult T) (Aone T) ( Azero T) (Aopp T) (Aeq T) (RT T). Add Legacy Abstract Ring AT AplusT AmultT AoneT AzeroT AoppT AeqT RTT. (***************************) (* Lemmas to be used *) (***************************) Lemma AplusT_comm : forall r1 r2:AT, AplusT r1 r2 = AplusT r2 r1. Proof. intros; legacy ring. Qed. Lemma AplusT_assoc : forall r1 r2 r3:AT, AplusT (AplusT r1 r2) r3 = AplusT r1 (AplusT r2 r3). Proof. intros; legacy ring. Qed. Lemma AmultT_comm : forall r1 r2:AT, AmultT r1 r2 = AmultT r2 r1. Proof. intros; legacy ring. Qed. Lemma AmultT_assoc : forall r1 r2 r3:AT, AmultT (AmultT r1 r2) r3 = AmultT r1 (AmultT r2 r3). Proof. intros; legacy ring. Qed. Lemma AplusT_Ol : forall r:AT, AplusT AzeroT r = r. Proof. intros; legacy ring. Qed. Lemma AmultT_1l : forall r:AT, AmultT AoneT r = r. Proof. intros; legacy ring. Qed. Lemma AplusT_AoppT_r : forall r:AT, AplusT r (AoppT r) = AzeroT. Proof. intros; legacy ring. Qed. Lemma AmultT_AplusT_distr : forall r1 r2 r3:AT, AmultT r1 (AplusT r2 r3) = AplusT (AmultT r1 r2) (AmultT r1 r3). Proof. intros; legacy ring. Qed. Lemma r_AplusT_plus : forall r r1 r2:AT, AplusT r r1 = AplusT r r2 -> r1 = r2. Proof. intros; transitivity (AplusT (AplusT (AoppT r) r) r1). legacy ring. transitivity (AplusT (AplusT (AoppT r) r) r2). repeat rewrite AplusT_assoc; rewrite <- H; reflexivity. legacy ring. Qed. Lemma r_AmultT_mult : forall r r1 r2:AT, AmultT r r1 = AmultT r r2 -> r <> AzeroT -> r1 = r2. Proof. intros; transitivity (AmultT (AmultT (AinvT r) r) r1). rewrite Th_inv_defT; [ symmetry ; apply AmultT_1l; auto | auto ]. transitivity (AmultT (AmultT (AinvT r) r) r2). repeat rewrite AmultT_assoc; rewrite H; trivial. rewrite Th_inv_defT; [ apply AmultT_1l; auto | auto ]. Qed. Lemma AmultT_Or : forall r:AT, AmultT r AzeroT = AzeroT. Proof. intro; legacy ring. Qed. Lemma AmultT_Ol : forall r:AT, AmultT AzeroT r = AzeroT. Proof. intro; legacy ring. Qed. Lemma AmultT_1r : forall r:AT, AmultT r AoneT = r. Proof. intro; legacy ring. Qed. Lemma AinvT_r : forall r:AT, r <> AzeroT -> AmultT r (AinvT r) = AoneT. Proof. intros; rewrite AmultT_comm; apply Th_inv_defT; auto. Qed. Lemma Rmult_neq_0_reg : forall r1 r2:AT, AmultT r1 r2 <> AzeroT -> r1 <> AzeroT /\ r2 <> AzeroT. Proof. intros r1 r2 H; split; red; intro; apply H; rewrite H0; legacy ring. Qed. (************************) (* Interpretation *) (************************) (**** ExprA --> A ****) Fixpoint interp_ExprA (lvar:list (AT * nat)) (e:ExprA) {struct e} : AT := match e with | EAzero => AzeroT | EAone => AoneT | EAplus e1 e2 => AplusT (interp_ExprA lvar e1) (interp_ExprA lvar e2) | EAmult e1 e2 => AmultT (interp_ExprA lvar e1) (interp_ExprA lvar e2) | EAopp e => Aopp T (interp_ExprA lvar e) | EAinv e => Ainv T (interp_ExprA lvar e) | EAvar n => assoc_2nd AT nat eq_nat_dec lvar n AzeroT end. (************************) (* Simplification *) (************************) (**** Associativity ****) Definition merge_mult := (fix merge_mult (e1:ExprA) : ExprA -> ExprA := fun e2:ExprA => match e1 with | EAmult t1 t2 => match t2 with | EAmult t2 t3 => EAmult t1 (EAmult t2 (merge_mult t3 e2)) | _ => EAmult t1 (EAmult t2 e2) end | _ => EAmult e1 e2 end). Fixpoint assoc_mult (e:ExprA) : ExprA := match e with | EAmult e1 e3 => match e1 with | EAmult e1 e2 => merge_mult (merge_mult (assoc_mult e1) (assoc_mult e2)) (assoc_mult e3) | _ => EAmult e1 (assoc_mult e3) end | _ => e end. Definition merge_plus := (fix merge_plus (e1:ExprA) : ExprA -> ExprA := fun e2:ExprA => match e1 with | EAplus t1 t2 => match t2 with | EAplus t2 t3 => EAplus t1 (EAplus t2 (merge_plus t3 e2)) | _ => EAplus t1 (EAplus t2 e2) end | _ => EAplus e1 e2 end). Fixpoint assoc (e:ExprA) : ExprA := match e with | EAplus e1 e3 => match e1 with | EAplus e1 e2 => merge_plus (merge_plus (assoc e1) (assoc e2)) (assoc e3) | _ => EAplus (assoc_mult e1) (assoc e3) end | _ => assoc_mult e end. Lemma merge_mult_correct1 : forall (e1 e2 e3:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (merge_mult (EAmult e1 e2) e3) = interp_ExprA lvar (EAmult e1 (merge_mult e2 e3)). Proof. intros e1 e2; generalize e1; generalize e2; clear e1 e2. simple induction e2; auto; intros. unfold merge_mult at 1; fold merge_mult; unfold interp_ExprA at 2; fold interp_ExprA; rewrite (H0 e e3 lvar); unfold interp_ExprA at 1; fold interp_ExprA; unfold interp_ExprA at 5; fold interp_ExprA; auto. Qed. Lemma merge_mult_correct : forall (e1 e2:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (merge_mult e1 e2) = interp_ExprA lvar (EAmult e1 e2). Proof. simple induction e1; auto; intros. elim e0; try (intros; simpl; legacy ring). unfold interp_ExprA in H2; fold interp_ExprA in H2; cut (AmultT (interp_ExprA lvar e2) (AmultT (interp_ExprA lvar e4) (AmultT (interp_ExprA lvar e) (interp_ExprA lvar e3))) = AmultT (AmultT (AmultT (interp_ExprA lvar e) (interp_ExprA lvar e4)) (interp_ExprA lvar e2)) (interp_ExprA lvar e3)). intro H3; rewrite H3; rewrite <- H2; rewrite merge_mult_correct1; simpl; legacy ring. legacy ring. Qed. Lemma assoc_mult_correct1 : forall (e1 e2:ExprA) (lvar:list (AT * nat)), AmultT (interp_ExprA lvar (assoc_mult e1)) (interp_ExprA lvar (assoc_mult e2)) = interp_ExprA lvar (assoc_mult (EAmult e1 e2)). Proof. simple induction e1; auto; intros. rewrite <- (H e0 lvar); simpl; rewrite merge_mult_correct; simpl; rewrite merge_mult_correct; simpl; auto. Qed. Lemma assoc_mult_correct : forall (e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (assoc_mult e) = interp_ExprA lvar e. Proof. simple induction e; auto; intros. elim e0; intros. intros; simpl; legacy ring. simpl; rewrite (AmultT_1l (interp_ExprA lvar (assoc_mult e1))); rewrite (AmultT_1l (interp_ExprA lvar e1)); apply H0. simpl; rewrite (H0 lvar); auto. simpl; rewrite merge_mult_correct; simpl; rewrite merge_mult_correct; simpl; rewrite AmultT_assoc; rewrite assoc_mult_correct1; rewrite H2; simpl; rewrite <- assoc_mult_correct1 in H1; unfold interp_ExprA at 3 in H1; fold interp_ExprA in H1; rewrite (H0 lvar) in H1; rewrite (AmultT_comm (interp_ExprA lvar e3) (interp_ExprA lvar e1)); rewrite <- AmultT_assoc; rewrite H1; rewrite AmultT_assoc; legacy ring. simpl; rewrite (H0 lvar); auto. simpl; rewrite (H0 lvar); auto. simpl; rewrite (H0 lvar); auto. Qed. Lemma merge_plus_correct1 : forall (e1 e2 e3:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (merge_plus (EAplus e1 e2) e3) = interp_ExprA lvar (EAplus e1 (merge_plus e2 e3)). Proof. intros e1 e2; generalize e1; generalize e2; clear e1 e2. simple induction e2; auto; intros. unfold merge_plus at 1; fold merge_plus; unfold interp_ExprA at 2; fold interp_ExprA; rewrite (H0 e e3 lvar); unfold interp_ExprA at 1; fold interp_ExprA; unfold interp_ExprA at 5; fold interp_ExprA; auto. Qed. Lemma merge_plus_correct : forall (e1 e2:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (merge_plus e1 e2) = interp_ExprA lvar (EAplus e1 e2). Proof. simple induction e1; auto; intros. elim e0; try intros; try (simpl; legacy ring). unfold interp_ExprA in H2; fold interp_ExprA in H2; cut (AplusT (interp_ExprA lvar e2) (AplusT (interp_ExprA lvar e4) (AplusT (interp_ExprA lvar e) (interp_ExprA lvar e3))) = AplusT (AplusT (AplusT (interp_ExprA lvar e) (interp_ExprA lvar e4)) (interp_ExprA lvar e2)) (interp_ExprA lvar e3)). intro H3; rewrite H3; rewrite <- H2; rewrite merge_plus_correct1; simpl; legacy ring. legacy ring. Qed. Lemma assoc_plus_correct : forall (e1 e2:ExprA) (lvar:list (AT * nat)), AplusT (interp_ExprA lvar (assoc e1)) (interp_ExprA lvar (assoc e2)) = interp_ExprA lvar (assoc (EAplus e1 e2)). Proof. simple induction e1; auto; intros. rewrite <- (H e0 lvar); simpl; rewrite merge_plus_correct; simpl; rewrite merge_plus_correct; simpl; auto. Qed. Lemma assoc_correct : forall (e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (assoc e) = interp_ExprA lvar e. Proof. simple induction e; auto; intros. elim e0; intros. simpl; rewrite (H0 lvar); auto. simpl; rewrite (H0 lvar); auto. simpl; rewrite merge_plus_correct; simpl; rewrite merge_plus_correct; simpl; rewrite AplusT_assoc; rewrite assoc_plus_correct; rewrite H2; simpl; apply (r_AplusT_plus (interp_ExprA lvar (assoc e1)) (AplusT (interp_ExprA lvar (assoc e2)) (AplusT (interp_ExprA lvar e3) (interp_ExprA lvar e1))) (AplusT (AplusT (interp_ExprA lvar e2) (interp_ExprA lvar e3)) (interp_ExprA lvar e1))); rewrite <- AplusT_assoc; rewrite (AplusT_comm (interp_ExprA lvar (assoc e1)) (interp_ExprA lvar (assoc e2))) ; rewrite assoc_plus_correct; rewrite H1; simpl; rewrite (H0 lvar); rewrite <- (AplusT_assoc (AplusT (interp_ExprA lvar e2) (interp_ExprA lvar e1)) (interp_ExprA lvar e3) (interp_ExprA lvar e1)) ; rewrite (AplusT_assoc (interp_ExprA lvar e2) (interp_ExprA lvar e1) (interp_ExprA lvar e3)); rewrite (AplusT_comm (interp_ExprA lvar e1) (interp_ExprA lvar e3)); rewrite <- (AplusT_assoc (interp_ExprA lvar e2) (interp_ExprA lvar e3) (interp_ExprA lvar e1)); apply AplusT_comm. unfold assoc; fold assoc; unfold interp_ExprA; fold interp_ExprA; rewrite assoc_mult_correct; rewrite (H0 lvar); simpl; auto. simpl; rewrite (H0 lvar); auto. simpl; rewrite (H0 lvar); auto. simpl; rewrite (H0 lvar); auto. unfold assoc; fold assoc; unfold interp_ExprA; fold interp_ExprA; rewrite assoc_mult_correct; simpl; auto. Qed. (**** Distribution *****) Fixpoint distrib_EAopp (e:ExprA) : ExprA := match e with | EAplus e1 e2 => EAplus (distrib_EAopp e1) (distrib_EAopp e2) | EAmult e1 e2 => EAmult (distrib_EAopp e1) (distrib_EAopp e2) | EAopp e => EAmult (EAopp EAone) (distrib_EAopp e) | e => e end. Definition distrib_mult_right := (fix distrib_mult_right (e1:ExprA) : ExprA -> ExprA := fun e2:ExprA => match e1 with | EAplus t1 t2 => EAplus (distrib_mult_right t1 e2) (distrib_mult_right t2 e2) | _ => EAmult e1 e2 end). Fixpoint distrib_mult_left (e1 e2:ExprA) {struct e1} : ExprA := match e1 with | EAplus t1 t2 => EAplus (distrib_mult_left t1 e2) (distrib_mult_left t2 e2) | _ => distrib_mult_right e2 e1 end. Fixpoint distrib_main (e:ExprA) : ExprA := match e with | EAmult e1 e2 => distrib_mult_left (distrib_main e1) (distrib_main e2) | EAplus e1 e2 => EAplus (distrib_main e1) (distrib_main e2) | EAopp e => EAopp (distrib_main e) | _ => e end. Definition distrib (e:ExprA) : ExprA := distrib_main (distrib_EAopp e). Lemma distrib_mult_right_correct : forall (e1 e2:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (distrib_mult_right e1 e2) = AmultT (interp_ExprA lvar e1) (interp_ExprA lvar e2). Proof. simple induction e1; try intros; simpl; auto. rewrite AmultT_comm; rewrite AmultT_AplusT_distr; rewrite (H e2 lvar); rewrite (H0 e2 lvar); legacy ring. Qed. Lemma distrib_mult_left_correct : forall (e1 e2:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (distrib_mult_left e1 e2) = AmultT (interp_ExprA lvar e1) (interp_ExprA lvar e2). Proof. simple induction e1; try intros; simpl. rewrite AmultT_Ol; rewrite distrib_mult_right_correct; simpl; apply AmultT_Or. rewrite distrib_mult_right_correct; simpl; apply AmultT_comm. rewrite AmultT_comm; rewrite (AmultT_AplusT_distr (interp_ExprA lvar e2) (interp_ExprA lvar e) (interp_ExprA lvar e0)); rewrite (AmultT_comm (interp_ExprA lvar e2) (interp_ExprA lvar e)); rewrite (AmultT_comm (interp_ExprA lvar e2) (interp_ExprA lvar e0)); rewrite (H e2 lvar); rewrite (H0 e2 lvar); auto. rewrite distrib_mult_right_correct; simpl; apply AmultT_comm. rewrite distrib_mult_right_correct; simpl; apply AmultT_comm. rewrite distrib_mult_right_correct; simpl; apply AmultT_comm. rewrite distrib_mult_right_correct; simpl; apply AmultT_comm. Qed. Lemma distrib_correct : forall (e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (distrib e) = interp_ExprA lvar e. Proof. simple induction e; intros; auto. simpl; rewrite <- (H lvar); rewrite <- (H0 lvar); unfold distrib; simpl; auto. simpl; rewrite <- (H lvar); rewrite <- (H0 lvar); unfold distrib; simpl; apply distrib_mult_left_correct. simpl; fold AoppT; rewrite <- (H lvar); unfold distrib; simpl; rewrite distrib_mult_right_correct; simpl; fold AoppT; legacy ring. Qed. (**** Multiplication by the inverse product ****) Lemma mult_eq : forall (e1 e2 a:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar a <> AzeroT -> interp_ExprA lvar (EAmult a e1) = interp_ExprA lvar (EAmult a e2) -> interp_ExprA lvar e1 = interp_ExprA lvar e2. Proof. simpl; intros; apply (r_AmultT_mult (interp_ExprA lvar a) (interp_ExprA lvar e1) (interp_ExprA lvar e2)); assumption. Qed. Fixpoint multiply_aux (a e:ExprA) {struct e} : ExprA := match e with | EAplus e1 e2 => EAplus (EAmult a e1) (multiply_aux a e2) | _ => EAmult a e end. Definition multiply (e:ExprA) : ExprA := match e with | EAmult a e1 => multiply_aux a e1 | _ => e end. Lemma multiply_aux_correct : forall (a e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (multiply_aux a e) = AmultT (interp_ExprA lvar a) (interp_ExprA lvar e). Proof. simple induction e; simpl; intros; try rewrite merge_mult_correct; auto. simpl; rewrite (H0 lvar); legacy ring. Qed. Lemma multiply_correct : forall (e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar (multiply e) = interp_ExprA lvar e. Proof. simple induction e; simpl; auto. intros; apply multiply_aux_correct. Qed. (**** Permutations and simplification ****) Fixpoint monom_remove (a m:ExprA) {struct m} : ExprA := match m with | EAmult m0 m1 => match eqExprA m0 (EAinv a) with | left _ => m1 | right _ => EAmult m0 (monom_remove a m1) end | _ => match eqExprA m (EAinv a) with | left _ => EAone | right _ => EAmult a m end end. Definition monom_simplif_rem := (fix monom_simplif_rem (a:ExprA) : ExprA -> ExprA := fun m:ExprA => match a with | EAmult a0 a1 => monom_simplif_rem a1 (monom_remove a0 m) | _ => monom_remove a m end). Definition monom_simplif (a m:ExprA) : ExprA := match m with | EAmult a' m' => match eqExprA a a' with | left _ => monom_simplif_rem a m' | right _ => m end | _ => m end. Fixpoint inverse_simplif (a e:ExprA) {struct e} : ExprA := match e with | EAplus e1 e2 => EAplus (monom_simplif a e1) (inverse_simplif a e2) | _ => monom_simplif a e end. Lemma monom_remove_correct : forall (e a:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar a <> AzeroT -> interp_ExprA lvar (monom_remove a e) = AmultT (interp_ExprA lvar a) (interp_ExprA lvar e). Proof. simple induction e; intros. simpl; case (eqExprA EAzero (EAinv a)); intros; [ inversion e0 | simpl; trivial ]. simpl; case (eqExprA EAone (EAinv a)); intros; [ inversion e0 | simpl; trivial ]. simpl; case (eqExprA (EAplus e0 e1) (EAinv a)); intros; [ inversion e2 | simpl; trivial ]. simpl; case (eqExprA e0 (EAinv a)); intros. rewrite e2; simpl; fold AinvT. rewrite <- (AmultT_assoc (interp_ExprA lvar a) (AinvT (interp_ExprA lvar a)) (interp_ExprA lvar e1)); rewrite AinvT_r; [ legacy ring | assumption ]. simpl; rewrite H0; auto; legacy ring. simpl; fold AoppT; case (eqExprA (EAopp e0) (EAinv a)); intros; [ inversion e1 | simpl; trivial ]. unfold monom_remove; case (eqExprA (EAinv e0) (EAinv a)); intros. case (eqExprA e0 a); intros. rewrite e2; simpl; fold AinvT; rewrite AinvT_r; auto. inversion e1; simpl; exfalso; auto. simpl; trivial. unfold monom_remove; case (eqExprA (EAvar n) (EAinv a)); intros; [ inversion e0 | simpl; trivial ]. Qed. Lemma monom_simplif_rem_correct : forall (a e:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar a <> AzeroT -> interp_ExprA lvar (monom_simplif_rem a e) = AmultT (interp_ExprA lvar a) (interp_ExprA lvar e). Proof. simple induction a; simpl; intros; try rewrite monom_remove_correct; auto. elim (Rmult_neq_0_reg (interp_ExprA lvar e) (interp_ExprA lvar e0) H1); intros. rewrite (H0 (monom_remove e e1) lvar H3); rewrite monom_remove_correct; auto. legacy ring. Qed. Lemma monom_simplif_correct : forall (e a:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar a <> AzeroT -> interp_ExprA lvar (monom_simplif a e) = interp_ExprA lvar e. Proof. simple induction e; intros; auto. simpl; case (eqExprA a e0); intros. rewrite <- e2; apply monom_simplif_rem_correct; auto. simpl; trivial. Qed. Lemma inverse_correct : forall (e a:ExprA) (lvar:list (AT * nat)), interp_ExprA lvar a <> AzeroT -> interp_ExprA lvar (inverse_simplif a e) = interp_ExprA lvar e. Proof. simple induction e; intros; auto. simpl; rewrite (H0 a lvar H1); rewrite monom_simplif_correct; auto. unfold inverse_simplif; rewrite monom_simplif_correct; auto. Qed. End Theory_of_fields. (* Compatibility *) Notation AplusT_sym := AplusT_comm (only parsing). Notation AmultT_sym := AmultT_comm (only parsing). coq-8.4pl2/plugins/field/LegacyField.v0000640000175000001440000000131512010532755016732 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mkApp (init_constant "None",[|ac3|]) | Some f -> mkApp (init_constant "Some",[|ac3;constr_of f|]) module Cmap = Map.Make(struct type t = constr let compare = constr_ord end) (* Table of theories *) let th_tab = ref (Cmap.empty : constr Cmap.t) let lookup env typ = try Cmap.find typ !th_tab with Not_found -> errorlabstrm "field" (str "No field is declared for type" ++ spc() ++ Printer.pr_lconstr_env env typ) let _ = let init () = th_tab := Cmap.empty in let freeze () = !th_tab in let unfreeze fs = th_tab := fs in Summary.declare_summary "field" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let load_addfield _ = () let cache_addfield (_,(typ,th)) = th_tab := Cmap.add typ th !th_tab let subst_addfield (subst,(typ,th as obj)) = let typ' = subst_mps subst typ in let th' = subst_mps subst th in if typ' == typ && th' == th then obj else (typ',th') (* Declaration of the Add Field library object *) let in_addfield : types * constr -> Libobject.obj = Libobject.declare_object {(Libobject.default_object "ADD_FIELD") with Libobject.open_function = (fun i o -> if i=1 then cache_addfield o); Libobject.cache_function = cache_addfield; Libobject.subst_function = subst_addfield; Libobject.classify_function = (fun a -> Libobject.Substitute a)} (* Adds a theory to the table *) let add_field a aplus amult aone azero aopp aeq ainv aminus_o adiv_o rth ainv_l = begin (try Ring.add_theory true true false a None None None aplus amult aone azero (Some aopp) aeq rth Quote.ConstrSet.empty with | UserError("Add Semi Ring",_) -> ()); let th = mkApp ((constant ["LegacyField_Theory"] "Build_Field_Theory"), [|a;aplus;amult;aone;azero;aopp;aeq;ainv;aminus_o;adiv_o;rth;ainv_l|]) in begin let _ = type_of (Global.env ()) Evd.empty th in (); Lib.add_anonymous_leaf (in_addfield (a,th)) end end (* Vernac command declaration *) open Extend open Pcoq open Genarg VERNAC ARGUMENT EXTEND divarg | [ "div" ":=" constr(adiv) ] -> [ adiv ] END VERNAC ARGUMENT EXTEND minusarg | [ "minus" ":=" constr(aminus) ] -> [ aminus ] END (* (* The v7->v8 translator needs printers, then temporary use ARGUMENT EXTEND...*) VERNAC ARGUMENT EXTEND minus_div_arg | [ "with" minusarg(m) divarg_opt(d) ] -> [ Some m, d ] | [ "with" divarg(d) minusarg_opt(m) ] -> [ m, Some d ] | [ ] -> [ None, None ] END *) (* For the translator, otherwise the code above is OK *) open Ppconstr let pp_minus_div_arg _prc _prlc _prt (omin,odiv) = if omin=None && odiv=None then mt() else spc() ++ str "with" ++ pr_opt (fun c -> str "minus := " ++ _prc c) omin ++ pr_opt (fun c -> str "div := " ++ _prc c) odiv (* let () = Pptactic.declare_extra_genarg_pprule true (rawwit_minus_div_arg,pp_minus_div_arg) (globwit_minus_div_arg,pp_minus_div_arg) (wit_minus_div_arg,pp_minus_div_arg) *) ARGUMENT EXTEND minus_div_arg TYPED AS constr_opt * constr_opt PRINTED BY pp_minus_div_arg | [ "with" minusarg(m) divarg_opt(d) ] -> [ Some m, d ] | [ "with" divarg(d) minusarg_opt(m) ] -> [ m, Some d ] | [ ] -> [ None, None ] END VERNAC COMMAND EXTEND Field [ "Add" "Legacy" "Field" constr(a) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aopp) constr(aeq) constr(ainv) constr(rth) constr(ainv_l) minus_div_arg(md) ] -> [ let (aminus_o, adiv_o) = md in add_field (constr_of a) (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) (constr_of aopp) (constr_of aeq) (constr_of ainv) (constr_of_opt a aminus_o) (constr_of_opt a adiv_o) (constr_of rth) (constr_of ainv_l) ] END (* Guesses the type and calls field_gen with the right theory *) let field g = Coqlib.check_required_library ["Coq";"field";"LegacyField"]; let typ = try match Hipattern.match_with_equation (pf_concl g) with | _,_,Hipattern.PolymorphicLeibnizEq (t,_,_) -> t | _ -> raise Exit with Hipattern.NoEquationFound | Exit -> error "The statement is not built from Leibniz' equality" in let th = VConstr ([],lookup (pf_env g) typ) in (interp_tac_gen [(id_of_string "FT",th)] [] (get_debug ()) <:tactic< match goal with |- (@eq _ _ _) => field_gen FT end >>) g (* Verifies that all the terms have the same type and gives the right theory *) let guess_theory env evc = function | c::tl -> let t = type_of env evc c in if List.exists (fun c1 -> not (Reductionops.is_conv env evc t (type_of env evc c1))) tl then errorlabstrm "Field:" (str" All the terms must have the same type") else lookup env t | [] -> anomaly "Field: must have a non-empty constr list here" (* Guesses the type and calls Field_Term with the right theory *) let field_term l g = Coqlib.check_required_library ["Coq";"field";"LegacyField"]; let env = (pf_env g) and evc = (project g) in let th = valueIn (VConstr ([],guess_theory env evc l)) and nl = List.map (fun x -> valueIn (VConstr ([],x))) (Quote.sort_subterm g l) in (List.fold_right (fun c a -> let tac = (Tacinterp.interp <:tactic<(Field_Term $th $c)>>) in Tacticals.tclTHENFIRSTn tac [|a|]) nl Tacticals.tclIDTAC) g (* Declaration of Field *) TACTIC EXTEND legacy_field | [ "legacy" "field" ] -> [ field ] | [ "legacy" "field" ne_constr_list(l) ] -> [ field_term l ] END coq-8.4pl2/plugins/firstorder/0000750000175000001440000000000012127276540015500 5ustar notinuserscoq-8.4pl2/plugins/firstorder/g_ground.ml40000640000175000001440000001110612121620060017702 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Some !ground_depth); optwrite= (function None->ground_depth:=3 | Some i->ground_depth:=(max i 0))} in declare_int_option gdopt let congruence_depth=ref 100 let _= let gdopt= { optsync=true; optdepr=false; optname="Congruence Depth"; optkey=["Congruence";"Depth"]; optread=(fun ()->Some !congruence_depth); optwrite= (function None->congruence_depth:=0 | Some i->congruence_depth:=(max i 0))} in declare_int_option gdopt let (set_default_solver, default_solver, print_default_solver) = Tactic_option.declare_tactic_option ~default:(<:tactic>) "Firstorder default solver" VERNAC COMMAND EXTEND Firstorder_Set_Solver | [ "Set" "Firstorder" "Solver" tactic(t) ] -> [ set_default_solver (Vernacexpr.use_section_locality ()) (Tacinterp.glob_tactic t) ] END VERNAC COMMAND EXTEND Firstorder_Print_Solver | [ "Print" "Firstorder" "Solver" ] -> [ Pp.msgnl (Pp.(++) (Pp.str"Firstorder solver tactic is ") (print_default_solver ())) ] END let fail_solver=tclFAIL 0 (Pp.str "GTauto failed") let gen_ground_tac flag taco ids bases gl= let backup= !qflag in try qflag:=flag; let solver= match taco with Some tac-> tac | None-> snd (default_solver ()) in let startseq gl= let seq=empty_seq !ground_depth in extend_with_auto_hints bases (extend_with_ref_list ids seq gl) gl in let result=ground_tac solver startseq gl in qflag:=backup;result with reraise ->qflag:=backup;raise reraise (* special for compatibility with Intuition let constant str = Coqlib.gen_constant "User" ["Init";"Logic"] str let defined_connectives=lazy [[],EvalConstRef (destConst (constant "not")); [],EvalConstRef (destConst (constant "iff"))] let normalize_evaluables= onAllHypsAndConcl (function None->unfold_in_concl (Lazy.force defined_connectives) | Some id-> unfold_in_hyp (Lazy.force defined_connectives) (Tacexpr.InHypType id)) *) open Genarg open Ppconstr open Printer let pr_firstorder_using_raw _ _ _ = prlist_with_sep pr_comma pr_reference let pr_firstorder_using_glob _ _ _ = prlist_with_sep pr_comma (pr_or_var (pr_located pr_global)) let pr_firstorder_using_typed _ _ _ = prlist_with_sep pr_comma pr_global ARGUMENT EXTEND firstorder_using PRINTED BY pr_firstorder_using_typed RAW_TYPED AS reference_list RAW_PRINTED BY pr_firstorder_using_raw GLOB_TYPED AS reference_list GLOB_PRINTED BY pr_firstorder_using_glob | [ "using" reference(a) ] -> [ [a] ] | [ "using" reference(a) "," ne_reference_list_sep(l,",") ] -> [ a::l ] | [ "using" reference(a) reference(b) reference_list(l) ] -> [ Flags.if_verbose Pp.msg_warning (Pp.str "Deprecated syntax; use \",\" as separator"); a::b::l ] | [ ] -> [ [] ] END TACTIC EXTEND firstorder [ "firstorder" tactic_opt(t) firstorder_using(l) ] -> [ gen_ground_tac true (Option.map eval_tactic t) l [] ] | [ "firstorder" tactic_opt(t) "with" ne_preident_list(l) ] -> [ gen_ground_tac true (Option.map eval_tactic t) [] l ] | [ "firstorder" tactic_opt(t) firstorder_using(l) "with" ne_preident_list(l') ] -> [ gen_ground_tac true (Option.map eval_tactic t) l l' ] END TACTIC EXTEND gintuition [ "gintuition" tactic_opt(t) ] -> [ gen_ground_tac false (Option.map eval_tactic t) [] [] ] END let default_declarative_automation gls = tclORELSE (tclORELSE (Auto.h_trivial [] None) (Cctac.congruence_tac !congruence_depth [])) (gen_ground_tac true (Some (tclTHEN (snd (default_solver ())) (Cctac.congruence_tac !congruence_depth []))) [] []) gls let () = Decl_proof_instr.register_automation_tac default_declarative_automation coq-8.4pl2/plugins/firstorder/rules.ml0000640000175000001440000001404012010532755017156 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic) -> Sequent.t -> tactic type lseqtac= global_reference -> seqtac type 'a with_backtracking = tactic -> 'a let wrap n b continue seq gls= check_for_interrupt (); let nc=pf_hyps gls in let env=pf_env gls in let rec aux i nc ctx= if i<=0 then seq else match nc with []->anomaly "Not the expected number of hyps" | ((id,_,typ) as nd)::q-> if occur_var env id (pf_concl gls) || List.exists (occur_var_in_decl env id) ctx then (aux (i-1) q (nd::ctx)) else add_formula Hyp (VarRef id) typ (aux (i-1) q (nd::ctx)) gls in let seq1=aux n nc [] in let seq2=if b then add_formula Concl dummy_id (pf_concl gls) seq1 gls else seq1 in continue seq2 gls let basename_of_global=function VarRef id->id | _->assert false let clear_global=function VarRef id->clear [id] | _->tclIDTAC (* connection rules *) let axiom_tac t seq= try exact_no_check (constr_of_global (find_left t seq)) with Not_found->tclFAIL 0 (Pp.str "No axiom link") let ll_atom_tac a backtrack id continue seq= tclIFTHENELSE (try tclTHENLIST [generalize [mkApp(constr_of_global id, [|constr_of_global (find_left a seq)|])]; clear_global id; intro] with Not_found->tclFAIL 0 (Pp.str "No link")) (wrap 1 false continue seq) backtrack (* right connectives rules *) let and_tac backtrack continue seq= tclIFTHENELSE simplest_split (wrap 0 true continue seq) backtrack let or_tac backtrack continue seq= tclORELSE (any_constructor false (Some (tclCOMPLETE (wrap 0 true continue seq)))) backtrack let arrow_tac backtrack continue seq= tclIFTHENELSE intro (wrap 1 true continue seq) (tclORELSE (tclTHEN introf (tclCOMPLETE (wrap 1 true continue seq))) backtrack) (* left connectives rules *) let left_and_tac ind backtrack id continue seq gls= let n=(construct_nhyps ind gls).(0) in tclIFTHENELSE (tclTHENLIST [simplest_elim (constr_of_global id); clear_global id; tclDO n intro]) (wrap n false continue seq) backtrack gls let left_or_tac ind backtrack id continue seq gls= let v=construct_nhyps ind gls in let f n= tclTHENLIST [clear_global id; tclDO n intro; wrap n false continue seq] in tclIFTHENSVELSE (simplest_elim (constr_of_global id)) (Array.map f v) backtrack gls let left_false_tac id= simplest_elim (constr_of_global id) (* left arrow connective rules *) (* We use this function for false, and, or, exists *) let ll_ind_tac ind largs backtrack id continue seq gl= let rcs=ind_hyps 0 ind largs gl in let vargs=Array.of_list largs in (* construire le terme H->B, le generaliser etc *) let myterm i= let rc=rcs.(i) in let p=List.length rc in let cstr=mkApp ((mkConstruct (ind,(i+1))),vargs) in let vars=Array.init p (fun j->mkRel (p-j)) in let capply=mkApp ((lift p cstr),vars) in let head=mkApp ((lift p (constr_of_global id)),[|capply|]) in it_mkLambda_or_LetIn head rc in let lp=Array.length rcs in let newhyps=list_tabulate myterm lp in tclIFTHENELSE (tclTHENLIST [generalize newhyps; clear_global id; tclDO lp intro]) (wrap lp false continue seq) backtrack gl let ll_arrow_tac a b c backtrack id continue seq= let cc=mkProd(Anonymous,a,(lift 1 b)) in let d=mkLambda (Anonymous,b, mkApp ((constr_of_global id), [|mkLambda (Anonymous,(lift 1 a),(mkRel 2))|])) in tclORELSE (tclTHENS (cut c) [tclTHENLIST [introf; clear_global id; wrap 1 false continue seq]; tclTHENS (cut cc) [exact_no_check (constr_of_global id); tclTHENLIST [generalize [d]; clear_global id; introf; introf; tclCOMPLETE (wrap 2 true continue seq)]]]) backtrack (* quantifier rules (easy side) *) let forall_tac backtrack continue seq= tclORELSE (tclIFTHENELSE intro (wrap 0 true continue seq) (tclORELSE (tclTHEN introf (tclCOMPLETE (wrap 0 true continue seq))) backtrack)) (if !qflag then tclFAIL 0 (Pp.str "reversible in 1st order mode") else backtrack) let left_exists_tac ind backtrack id continue seq gls= let n=(construct_nhyps ind gls).(0) in tclIFTHENELSE (simplest_elim (constr_of_global id)) (tclTHENLIST [clear_global id; tclDO n intro; (wrap (n-1) false continue seq)]) backtrack gls let ll_forall_tac prod backtrack id continue seq= tclORELSE (tclTHENS (cut prod) [tclTHENLIST [intro; (fun gls-> let id0=pf_nth_hyp_id gls 1 in let term=mkApp((constr_of_global id),[|mkVar(id0)|]) in tclTHEN (generalize [term]) (clear [id0]) gls); clear_global id; intro; tclCOMPLETE (wrap 1 false continue (deepen seq))]; tclCOMPLETE (wrap 0 true continue (deepen seq))]) backtrack (* rules for instantiation with unification moved to instances.ml *) (* special for compatibility with old Intuition *) let constant str = Coqlib.gen_constant "User" ["Init";"Logic"] str let defined_connectives=lazy [all_occurrences,EvalConstRef (destConst (constant "not")); all_occurrences,EvalConstRef (destConst (constant "iff"))] let normalize_evaluables= onAllHypsAndConcl (function None->unfold_in_concl (Lazy.force defined_connectives) | Some id -> unfold_in_hyp (Lazy.force defined_connectives) (id,InHypTypeOnly)) coq-8.4pl2/plugins/firstorder/unify.ml0000640000175000001440000001023312010532755017156 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (n,subst_meta [i,t] tn)) !sigma) in let rec head_reduce t= (* forbids non-sigma-normal meta in head position*) match kind_of_term t with Meta i-> (try head_reduce (List.assoc i !sigma) with Not_found->t) | _->t in Queue.add (t1,t2) bige; try while true do let t1,t2=Queue.take bige in let nt1=head_reduce (whd_betaiotazeta Evd.empty t1) and nt2=head_reduce (whd_betaiotazeta Evd.empty t2) in match (kind_of_term nt1),(kind_of_term nt2) with Meta i,Meta j-> if i<>j then if i let t=subst_meta !sigma nt2 in if Intset.is_empty (free_rels t) && not (occur_term (mkMeta i) t) then bind i t else raise (UFAIL(nt1,nt2)) | _,Meta i -> let t=subst_meta !sigma nt1 in if Intset.is_empty (free_rels t) && not (occur_term (mkMeta i) t) then bind i t else raise (UFAIL(nt1,nt2)) | Cast(_,_,_),_->Queue.add (strip_outer_cast nt1,nt2) bige | _,Cast(_,_,_)->Queue.add (nt1,strip_outer_cast nt2) bige | (Prod(_,a,b),Prod(_,c,d))|(Lambda(_,a,b),Lambda(_,c,d))-> Queue.add (a,c) bige;Queue.add (pop b,pop d) bige | Case (_,pa,ca,va),Case (_,pb,cb,vb)-> Queue.add (pa,pb) bige; Queue.add (ca,cb) bige; let l=Array.length va in if l<>(Array.length vb) then raise (UFAIL (nt1,nt2)) else for i=0 to l-1 do Queue.add (va.(i),vb.(i)) bige done | App(ha,va),App(hb,vb)-> Queue.add (ha,hb) bige; let l=Array.length va in if l<>(Array.length vb) then raise (UFAIL (nt1,nt2)) else for i=0 to l-1 do Queue.add (va.(i),vb.(i)) bige done | _->if not (eq_constr nt1 nt2) then raise (UFAIL (nt1,nt2)) done; assert false (* this place is unreachable but needed for the sake of typing *) with Queue.Empty-> !sigma let value i t= let add x y= if x<0 then y else if y<0 then x else x+y in let rec vaux term= if isMeta term && destMeta term = i then 0 else let f v t=add v (vaux t) in let vr=fold_constr f (-1) term in if vr<0 then -1 else vr+1 in vaux t type instance= Real of (int*constr)*int | Phantom of constr let mk_rel_inst t= let new_rel=ref 1 in let rel_env=ref [] in let rec renum_rec d t= match kind_of_term t with Meta n-> (try mkRel (d+(List.assoc n !rel_env)) with Not_found-> let m= !new_rel in incr new_rel; rel_env:=(n,m) :: !rel_env; mkRel (m+d)) | _ -> map_constr_with_binders succ renum_rec d t in let nt=renum_rec 0 t in (!new_rel - 1,nt) let unif_atoms i dom t1 t2= try let t=List.assoc i (unif t1 t2) in if isMeta t then Some (Phantom dom) else Some (Real(mk_rel_inst t,value i t1)) with UFAIL(_,_) ->None | Not_found ->Some (Phantom dom) let renum_metas_from k n t= (* requires n = max (free_rels t) *) let l=list_tabulate (fun i->mkMeta (k+i)) n in substl l t let more_general (m1,t1) (m2,t2)= let mt1=renum_metas_from 0 m1 t1 and mt2=renum_metas_from m1 m2 t2 in try let sigma=unif mt1 mt2 in let p (n,t)= nfalse coq-8.4pl2/plugins/firstorder/sequent.mli0000640000175000001440000000340112010532755017660 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* global_reference -> global_reference list CM.t -> global_reference list CM.t val cm_remove : constr -> global_reference -> global_reference list CM.t -> global_reference list CM.t module HP: Heap.S with type elt=Formula.t type t = {redexes:HP.t; context: global_reference list CM.t; latoms:constr list; gl:types; glatom:constr option; cnt:counter; history:History.t; depth:int} val deepen: t -> t val record: h_item -> t -> t val lookup: h_item -> t -> bool val add_formula : side -> global_reference -> constr -> t -> Proof_type.goal sigma -> t val re_add_formula_list : Formula.t list -> t -> t val find_left : constr -> t -> global_reference val take_formula : t -> Formula.t * t val empty_seq : int -> t val extend_with_ref_list : global_reference list -> t -> Proof_type.goal sigma -> t val extend_with_auto_hints : Auto.hint_db_name list -> t -> Proof_type.goal sigma -> t val print_cmap: global_reference list CM.t -> unit coq-8.4pl2/plugins/firstorder/formula.mli0000640000175000001440000000404212010532755017643 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a -> int) -> ('b -> 'b -> int) -> 'a -> 'a -> 'b -> 'b -> int val (==?) : ('a -> 'a -> 'b ->'b -> int) -> ('c -> 'c -> int) -> 'a -> 'a -> 'b -> 'b -> 'c ->'c -> int type ('a,'b) sum = Left of 'a | Right of 'b type counter = bool -> metavariable val construct_nhyps : inductive -> Proof_type.goal Tacmach.sigma -> int array val ind_hyps : int -> inductive -> constr list -> Proof_type.goal Tacmach.sigma -> rel_context array type atoms = {positive:constr list;negative:constr list} type side = Hyp | Concl | Hint val dummy_id: global_reference val build_atoms : Proof_type.goal Tacmach.sigma -> counter -> side -> constr -> bool * atoms type right_pattern = Rarrow | Rand | Ror | Rfalse | Rforall | Rexists of metavariable*constr*bool type left_arrow_pattern= LLatom | LLfalse of inductive*constr list | LLand of inductive*constr list | LLor of inductive*constr list | LLforall of constr | LLexists of inductive*constr list | LLarrow of constr*constr*constr type left_pattern= Lfalse | Land of inductive | Lor of inductive | Lforall of metavariable*constr*bool | Lexists of inductive | LA of constr*left_arrow_pattern type t={id: global_reference; constr: constr; pat: (left_pattern,right_pattern) sum; atoms: atoms} (*exception Is_atom of constr*) val build_formula : side -> global_reference -> types -> Proof_type.goal Tacmach.sigma -> counter -> (t,types) sum coq-8.4pl2/plugins/firstorder/ground.ml0000640000175000001440000000733312010532755017331 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* () in List.iter f (Classops.coercions ()); red_flags:= Closure.RedFlags.red_add_transparent Closure.betaiotazeta (Names.Idpred.full,Names.Cpred.complement !predref) let ground_tac solver startseq gl= update_flags (); let rec toptac skipped seq gl= if Tacinterp.get_debug()=Tactic_debug.DebugOn 0 then Pp.msgnl (Printer.pr_goal gl); tclORELSE (axiom_tac seq.gl seq) begin try let (hd,seq1)=take_formula seq and re_add s=re_add_formula_list skipped s in let continue=toptac [] and backtrack gl=toptac (hd::skipped) seq1 gl in match hd.pat with Right rpat-> begin match rpat with Rand-> and_tac backtrack continue (re_add seq1) | Rforall-> let backtrack1= if !qflag then tclFAIL 0 (Pp.str "reversible in 1st order mode") else backtrack in forall_tac backtrack1 continue (re_add seq1) | Rarrow-> arrow_tac backtrack continue (re_add seq1) | Ror-> or_tac backtrack continue (re_add seq1) | Rfalse->backtrack | Rexists(i,dom,triv)-> let (lfp,seq2)=collect_quantified seq in let backtrack2=toptac (lfp@skipped) seq2 in if !qflag && seq.depth>0 then quantified_tac lfp backtrack2 continue (re_add seq) else backtrack2 (* need special backtracking *) end | Left lpat-> begin match lpat with Lfalse-> left_false_tac hd.id | Land ind-> left_and_tac ind backtrack hd.id continue (re_add seq1) | Lor ind-> left_or_tac ind backtrack hd.id continue (re_add seq1) | Lforall (_,_,_)-> let (lfp,seq2)=collect_quantified seq in let backtrack2=toptac (lfp@skipped) seq2 in if !qflag && seq.depth>0 then quantified_tac lfp backtrack2 continue (re_add seq) else backtrack2 (* need special backtracking *) | Lexists ind -> if !qflag then left_exists_tac ind backtrack hd.id continue (re_add seq1) else backtrack | LA (typ,lap)-> let la_tac= begin match lap with LLatom -> backtrack | LLand (ind,largs) | LLor(ind,largs) | LLfalse (ind,largs)-> (ll_ind_tac ind largs backtrack hd.id continue (re_add seq1)) | LLforall p -> if seq.depth>0 && !qflag then (ll_forall_tac p backtrack hd.id continue (re_add seq1)) else backtrack | LLexists (ind,l) -> if !qflag then ll_ind_tac ind l backtrack hd.id continue (re_add seq1) else backtrack | LLarrow (a,b,c) -> (ll_arrow_tac a b c backtrack hd.id continue (re_add seq1)) end in ll_atom_tac typ la_tac hd.id continue (re_add seq1) end with Heap.EmptyHeap->solver end gl in wrap (List.length (pf_hyps gl)) true (toptac []) (startseq gl) gl coq-8.4pl2/plugins/firstorder/instances.mli0000640000175000001440000000146012010532755020166 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Formula.t list * Sequent.t val give_instances : Formula.t list -> Sequent.t -> (Unify.instance * global_reference) list val quantified_tac : Formula.t list -> seqtac with_backtracking coq-8.4pl2/plugins/firstorder/ground_plugin.mllib0000640000175000001440000000011011254456226021367 0ustar notinusersFormula Unify Sequent Rules Instances Ground G_ground Ground_plugin_mod coq-8.4pl2/plugins/firstorder/rules.mli0000640000175000001440000000306212010532755017331 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic) -> Sequent.t -> tactic type lseqtac= global_reference -> seqtac type 'a with_backtracking = tactic -> 'a val wrap : int -> bool -> seqtac val basename_of_global: global_reference -> identifier val clear_global: global_reference -> tactic val axiom_tac : constr -> Sequent.t -> tactic val ll_atom_tac : constr -> lseqtac with_backtracking val and_tac : seqtac with_backtracking val or_tac : seqtac with_backtracking val arrow_tac : seqtac with_backtracking val left_and_tac : inductive -> lseqtac with_backtracking val left_or_tac : inductive -> lseqtac with_backtracking val left_false_tac : global_reference -> tactic val ll_ind_tac : inductive -> constr list -> lseqtac with_backtracking val ll_arrow_tac : constr -> constr -> constr -> lseqtac with_backtracking val forall_tac : seqtac with_backtracking val left_exists_tac : inductive -> lseqtac with_backtracking val ll_forall_tac : types -> lseqtac with_backtracking val normalize_evaluables : tactic coq-8.4pl2/plugins/firstorder/unify.mli0000640000175000001440000000160712010532755017334 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> (int*constr) list type instance= Real of (int*constr)*int (* nb trous*terme*valeur heuristique *) | Phantom of constr (* domaine de quantification *) val unif_atoms : metavariable -> constr -> constr -> constr -> instance option val more_general : (int*constr) -> (int*constr) -> bool coq-8.4pl2/plugins/firstorder/sequent.ml0000640000175000001440000001350312010532755017513 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if b then incr cnt;!cnt let priority = (* pure heuristics, <=0 for non reversible *) function Right rf-> begin match rf with Rarrow -> 100 | Rand -> 40 | Ror -> -15 | Rfalse -> -50 | Rforall -> 100 | Rexists (_,_,_) -> -29 end | Left lf -> match lf with Lfalse -> 999 | Land _ -> 90 | Lor _ -> 40 | Lforall (_,_,_) -> -30 | Lexists _ -> 60 | LA(_,lap) -> match lap with LLatom -> 0 | LLfalse (_,_) -> 100 | LLand (_,_) -> 80 | LLor (_,_) -> 70 | LLforall _ -> -20 | LLexists (_,_) -> 50 | LLarrow (_,_,_) -> -10 let left_reversible lpat=(priority lpat)>0 module OrderedFormula= struct type t=Formula.t let compare e1 e2= (priority e1.pat) - (priority e2.pat) end module OrderedConstr= struct type t=constr let compare=constr_ord end type h_item = global_reference * (int*constr) option module Hitem= struct type t = h_item let compare (id1,co1) (id2,co2)= (Libnames.RefOrdered.compare =? (fun oc1 oc2 -> match oc1,oc2 with Some (m1,c1),Some (m2,c2) -> ((-) =? OrderedConstr.compare) m1 m2 c1 c2 | _,_->Pervasives.compare oc1 oc2)) id1 id2 co1 co2 end module CM=Map.Make(OrderedConstr) module History=Set.Make(Hitem) let cm_add typ nam cm= try let l=CM.find typ cm in CM.add typ (nam::l) cm with Not_found->CM.add typ [nam] cm let cm_remove typ nam cm= try let l=CM.find typ cm in let l0=List.filter (fun id->id<>nam) l in match l0 with []->CM.remove typ cm | _ ->CM.add typ l0 cm with Not_found ->cm module HP=Heap.Functional(OrderedFormula) type t= {redexes:HP.t; context:(global_reference list) CM.t; latoms:constr list; gl:types; glatom:constr option; cnt:counter; history:History.t; depth:int} let deepen seq={seq with depth=seq.depth-1} let record item seq={seq with history=History.add item seq.history} let lookup item seq= History.mem item seq.history || match item with (_,None)->false | (id,Some ((m,t) as c))-> let p (id2,o)= match o with None -> false | Some ((m2,t2) as c2)->id=id2 && m2>m && more_general c2 c in History.exists p seq.history let rec add_formula side nam t seq gl= match build_formula side nam t gl seq.cnt with Left f-> begin match side with Concl -> {seq with redexes=HP.add f seq.redexes; gl=f.constr; glatom=None} | _ -> {seq with redexes=HP.add f seq.redexes; context=cm_add f.constr nam seq.context} end | Right t-> match side with Concl -> {seq with gl=t;glatom=Some t} | _ -> {seq with context=cm_add t nam seq.context; latoms=t::seq.latoms} let re_add_formula_list lf seq= let do_one f cm= if f.id == dummy_id then cm else cm_add f.constr f.id cm in {seq with redexes=List.fold_right HP.add lf seq.redexes; context=List.fold_right do_one lf seq.context} let find_left t seq=List.hd (CM.find t seq.context) (*let rev_left seq= try let lpat=(HP.maximum seq.redexes).pat in left_reversible lpat with Heap.EmptyHeap -> false *) let no_formula seq= seq.redexes=HP.empty let rec take_formula seq= let hd=HP.maximum seq.redexes and hp=HP.remove seq.redexes in if hd.id == dummy_id then let nseq={seq with redexes=hp} in if seq.gl==hd.constr then hd,nseq else take_formula nseq (* discarding deprecated goal *) else hd,{seq with redexes=hp; context=cm_remove hd.constr hd.id seq.context} let empty_seq depth= {redexes=HP.empty; context=CM.empty; latoms=[]; gl=(mkMeta 1); glatom=None; cnt=newcnt (); history=History.empty; depth=depth} let expand_constructor_hints = list_map_append (function | IndRef ind -> list_tabulate (fun i -> ConstructRef (ind,i+1)) (Inductiveops.nconstructors ind) | gr -> [gr]) let extend_with_ref_list l seq gl= let l = expand_constructor_hints l in let f gr seq= let c=constr_of_global gr in let typ=(pf_type_of gl c) in add_formula Hyp gr typ seq gl in List.fold_right f l seq open Auto let extend_with_auto_hints l seq gl= let seqref=ref seq in let f p_a_t = match p_a_t.code with Res_pf (c,_) | Give_exact c | Res_pf_THEN_trivial_fail (c,_) -> (try let gr=global_of_constr c in let typ=(pf_type_of gl c) in seqref:=add_formula Hint gr typ !seqref gl with Not_found->()) | _-> () in let g _ l = List.iter f l in let h dbname= let hdb= try searchtable_map dbname with Not_found-> error ("Firstorder: "^dbname^" : No such Hint database") in Hint_db.iter g hdb in List.iter h l; !seqref let print_cmap map= let print_entry c l s= let xc=Constrextern.extern_constr false (Global.env ()) c in str "| " ++ Util.prlist Printer.pr_global l ++ str " : " ++ Ppconstr.pr_constr_expr xc ++ cut () ++ s in msgnl (v 0 (str "-----" ++ cut () ++ CM.fold print_entry map (mt ()) ++ str "-----")) coq-8.4pl2/plugins/firstorder/formula.ml0000640000175000001440000001677212010532755017507 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* metavariable exception Is_atom of constr let meta_succ m = m+1 let rec nb_prod_after n c= match kind_of_term c with | Prod (_,_,b) ->if n>0 then nb_prod_after (n-1) b else 1+(nb_prod_after 0 b) | _ -> 0 let construct_nhyps ind gls = let nparams = (fst (Global.lookup_inductive ind)).mind_nparams in let constr_types = Inductiveops.arities_of_constructors (pf_env gls) ind in let hyp = nb_prod_after nparams in Array.map hyp constr_types (* indhyps builds the array of arrays of constructor hyps for (ind largs)*) let ind_hyps nevar ind largs gls= let types= Inductiveops.arities_of_constructors (pf_env gls) ind in let lp=Array.length types in let myhyps i= let t1=Term.prod_applist types.(i) largs in let t2=snd (decompose_prod_n_assum nevar t1) in fst (decompose_prod_assum t2) in Array.init lp myhyps let special_nf gl= let infos=Closure.create_clos_infos !red_flags (pf_env gl) in (fun t -> Closure.norm_val infos (Closure.inject t)) let special_whd gl= let infos=Closure.create_clos_infos !red_flags (pf_env gl) in (fun t -> Closure.whd_val infos (Closure.inject t)) type kind_of_formula= Arrow of constr*constr | False of inductive*constr list | And of inductive*constr list*bool | Or of inductive*constr list*bool | Exists of inductive*constr list | Forall of constr*constr | Atom of constr let rec kind_of_formula gl term = let normalize=special_nf gl in let cciterm=special_whd gl term in match match_with_imp_term cciterm with Some (a,b)-> Arrow(a,(pop b)) |_-> match match_with_forall_term cciterm with Some (_,a,b)-> Forall(a,b) |_-> match match_with_nodep_ind cciterm with Some (i,l,n)-> let ind=destInd i in let (mib,mip) = Global.lookup_inductive ind in let nconstr=Array.length mip.mind_consnames in if nconstr=0 then False(ind,l) else let has_realargs=(n>0) in let is_trivial= let is_constant c = nb_prod c = mib.mind_nparams in array_exists is_constant mip.mind_nf_lc in if Inductiveops.mis_is_recursive (ind,mib,mip) || (has_realargs && not is_trivial) then Atom cciterm else if nconstr=1 then And(ind,l,is_trivial) else Or(ind,l,is_trivial) | _ -> match match_with_sigma_type cciterm with Some (i,l)-> Exists((destInd i),l) |_-> Atom (normalize cciterm) type atoms = {positive:constr list;negative:constr list} type side = Hyp | Concl | Hint let no_atoms = (false,{positive=[];negative=[]}) let dummy_id=VarRef (id_of_string "_") (* "_" cannot be parsed *) let build_atoms gl metagen side cciterm = let trivial =ref false and positive=ref [] and negative=ref [] in let normalize=special_nf gl in let rec build_rec env polarity cciterm= match kind_of_formula gl cciterm with False(_,_)->if not polarity then trivial:=true | Arrow (a,b)-> build_rec env (not polarity) a; build_rec env polarity b | And(i,l,b) | Or(i,l,b)-> if b then begin let unsigned=normalize (substnl env 0 cciterm) in if polarity then positive:= unsigned :: !positive else negative:= unsigned :: !negative end; let v = ind_hyps 0 i l gl in let g i _ (_,_,t) = build_rec env polarity (lift i t) in let f l = list_fold_left_i g (1-(List.length l)) () l in if polarity && (* we have a constant constructor *) array_exists (function []->true|_->false) v then trivial:=true; Array.iter f v | Exists(i,l)-> let var=mkMeta (metagen true) in let v =(ind_hyps 1 i l gl).(0) in let g i _ (_,_,t) = build_rec (var::env) polarity (lift i t) in list_fold_left_i g (2-(List.length l)) () v | Forall(_,b)-> let var=mkMeta (metagen true) in build_rec (var::env) polarity b | Atom t-> let unsigned=substnl env 0 t in if not (isMeta unsigned) then (* discarding wildcard atoms *) if polarity then positive:= unsigned :: !positive else negative:= unsigned :: !negative in begin match side with Concl -> build_rec [] true cciterm | Hyp -> build_rec [] false cciterm | Hint -> let rels,head=decompose_prod cciterm in let env=List.rev (List.map (fun _->mkMeta (metagen true)) rels) in build_rec env false head;trivial:=false (* special for hints *) end; (!trivial, {positive= !positive; negative= !negative}) type right_pattern = Rarrow | Rand | Ror | Rfalse | Rforall | Rexists of metavariable*constr*bool type left_arrow_pattern= LLatom | LLfalse of inductive*constr list | LLand of inductive*constr list | LLor of inductive*constr list | LLforall of constr | LLexists of inductive*constr list | LLarrow of constr*constr*constr type left_pattern= Lfalse | Land of inductive | Lor of inductive | Lforall of metavariable*constr*bool | Lexists of inductive | LA of constr*left_arrow_pattern type t={id:global_reference; constr:constr; pat:(left_pattern,right_pattern) sum; atoms:atoms} let build_formula side nam typ gl metagen= let normalize = special_nf gl in try let m=meta_succ(metagen false) in let trivial,atoms= if !qflag then build_atoms gl metagen side typ else no_atoms in let pattern= match side with Concl -> let pat= match kind_of_formula gl typ with False(_,_) -> Rfalse | Atom a -> raise (Is_atom a) | And(_,_,_) -> Rand | Or(_,_,_) -> Ror | Exists (i,l) -> let (_,_,d)=list_last (ind_hyps 0 i l gl).(0) in Rexists(m,d,trivial) | Forall (_,a) -> Rforall | Arrow (a,b) -> Rarrow in Right pat | _ -> let pat= match kind_of_formula gl typ with False(i,_) -> Lfalse | Atom a -> raise (Is_atom a) | And(i,_,b) -> if b then let nftyp=normalize typ in raise (Is_atom nftyp) else Land i | Or(i,_,b) -> if b then let nftyp=normalize typ in raise (Is_atom nftyp) else Lor i | Exists (ind,_) -> Lexists ind | Forall (d,_) -> Lforall(m,d,trivial) | Arrow (a,b) -> let nfa=normalize a in LA (nfa, match kind_of_formula gl a with False(i,l)-> LLfalse(i,l) | Atom t-> LLatom | And(i,l,_)-> LLand(i,l) | Or(i,l,_)-> LLor(i,l) | Arrow(a,c)-> LLarrow(a,c,b) | Exists(i,l)->LLexists(i,l) | Forall(_,_)->LLforall a) in Left pat in Left {id=nam; constr=normalize typ; pat=pattern; atoms=atoms} with Is_atom a-> Right a (* already in nf *) coq-8.4pl2/plugins/firstorder/ground.mli0000640000175000001440000000116612010532755017500 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (Proof_type.goal Tacmach.sigma -> Sequent.t) -> Tacmach.tactic coq-8.4pl2/plugins/firstorder/instances.ml0000640000175000001440000001351212121620060020004 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (OrderedConstr.compare d1 d2) | Real((m1,c1),n1),Real((m2,c2),n2)-> ((-) =? (-) ==? OrderedConstr.compare) m2 m1 n1 n2 c1 c2 | Phantom(_),Real((m,_),_)-> if m=0 then -1 else 1 | Real((m,_),_),Phantom(_)-> if m=0 then 1 else -1 let compare_gr id1 id2 = if id1==id2 then 0 else if id1==dummy_id then 1 else if id2==dummy_id then -1 else Libnames.RefOrdered.compare id1 id2 module OrderedInstance= struct type t=instance * Libnames.global_reference let compare (inst1,id1) (inst2,id2)= (compare_instance =? compare_gr) inst2 inst1 id2 id1 (* we want a __decreasing__ total order *) end module IS=Set.Make(OrderedInstance) let make_simple_atoms seq= let ratoms= match seq.glatom with Some t->[t] | None->[] in {negative=seq.latoms;positive=ratoms} let do_sequent setref triv id seq i dom atoms= let flag=ref true in let phref=ref triv in let do_atoms a1 a2 = let do_pair t1 t2 = match unif_atoms i dom t1 t2 with None->() | Some (Phantom _) ->phref:=true | Some c ->flag:=false;setref:=IS.add (c,id) !setref in List.iter (fun t->List.iter (do_pair t) a2.negative) a1.positive; List.iter (fun t->List.iter (do_pair t) a2.positive) a1.negative in HP.iter (fun lf->do_atoms atoms lf.atoms) seq.redexes; do_atoms atoms (make_simple_atoms seq); !flag && !phref let match_one_quantified_hyp setref seq lf= match lf.pat with Left(Lforall(i,dom,triv))|Right(Rexists(i,dom,triv))-> if do_sequent setref triv lf.id seq i dom lf.atoms then setref:=IS.add ((Phantom dom),lf.id) !setref | _ ->anomaly "can't happen" let give_instances lf seq= let setref=ref IS.empty in List.iter (match_one_quantified_hyp setref seq) lf; IS.elements !setref (* collector for the engine *) let rec collect_quantified seq= try let hd,seq1=take_formula seq in (match hd.pat with Left(Lforall(_,_,_)) | Right(Rexists(_,_,_)) -> let (q,seq2)=collect_quantified seq1 in ((hd::q),seq2) | _->[],seq) with Heap.EmptyHeap -> [],seq (* open instances processor *) let dummy_constr=mkMeta (-1) let dummy_bvid=id_of_string "x" let mk_open_instance id gl m t= let env=pf_env gl in let evmap=Refiner.project gl in let var_id= if id==dummy_id then dummy_bvid else let typ=pf_type_of gl (constr_of_global id) in (* since we know we will get a product, reduction is not too expensive *) let (nam,_,_)=destProd (whd_betadeltaiota env evmap typ) in match nam with Name id -> id | Anonymous -> dummy_bvid in let revt=substl (list_tabulate (fun i->mkRel (m-i)) m) t in let rec aux n avoid= if n=0 then [] else let nid=(fresh_id avoid var_id gl) in (Name nid,None,dummy_constr)::(aux (n-1) (nid::avoid)) in let nt=it_mkLambda_or_LetIn revt (aux m []) in let rawt=Detyping.detype false [] [] nt in let rec raux n t= if n=0 then t else match t with GLambda(loc,name,k,_,t0)-> let t1=raux (n-1) t0 in GLambda(loc,name,k,GHole (dummy_loc,Evd.BinderType name),t1) | _-> anomaly "can't happen" in let ntt=try Pretyping.Default.understand evmap env (raux m rawt) with e when Errors.noncritical e -> error "Untypable instance, maybe higher-order non-prenex quantification" in decompose_lam_n_assum m ntt (* tactics *) let left_instance_tac (inst,id) continue seq= match inst with Phantom dom-> if lookup (id,None) seq then tclFAIL 0 (Pp.str "already done") else tclTHENS (cut dom) [tclTHENLIST [introf; (fun gls->generalize [mkApp(constr_of_global id, [|mkVar (Tacmach.pf_nth_hyp_id gls 1)|])] gls); introf; tclSOLVE [wrap 1 false continue (deepen (record (id,None) seq))]]; tclTRY assumption] | Real((m,t) as c,_)-> if lookup (id,Some c) seq then tclFAIL 0 (Pp.str "already done") else let special_generalize= if m>0 then fun gl-> let (rc,ot)= mk_open_instance id gl m t in let gt= it_mkLambda_or_LetIn (mkApp(constr_of_global id,[|ot|])) rc in generalize [gt] gl else generalize [mkApp(constr_of_global id,[|t|])] in tclTHENLIST [special_generalize; introf; tclSOLVE [wrap 1 false continue (deepen (record (id,Some c) seq))]] let right_instance_tac inst continue seq= match inst with Phantom dom -> tclTHENS (cut dom) [tclTHENLIST [introf; (fun gls-> split (Glob_term.ImplicitBindings [mkVar (Tacmach.pf_nth_hyp_id gls 1)]) gls); tclSOLVE [wrap 0 true continue (deepen seq)]]; tclTRY assumption] | Real ((0,t),_) -> (tclTHEN (split (Glob_term.ImplicitBindings [t])) (tclSOLVE [wrap 0 true continue (deepen seq)])) | Real ((m,t),_) -> tclFAIL 0 (Pp.str "not implemented ... yet") let instance_tac inst= if (snd inst)==dummy_id then right_instance_tac (fst inst) else left_instance_tac inst let quantified_tac lf backtrack continue seq gl= let insts=give_instances lf seq in tclORELSE (tclFIRST (List.map (fun inst->instance_tac inst continue seq) insts)) backtrack gl coq-8.4pl2/plugins/funind/0000750000175000001440000000000012127276540014600 5ustar notinuserscoq-8.4pl2/plugins/funind/glob_term_to_relation.ml0000640000175000001440000013716112121620060021475 0ustar notinusersopen Printer open Pp open Names open Term open Glob_term open Libnames open Indfun_common open Util open Glob_termops let observe strm = if do_observe () then Pp.msgnl strm else () let observennl strm = if do_observe () then Pp.msg strm else () type binder_type = | Lambda of name | Prod of name | LetIn of name type glob_context = (binder_type*glob_constr) list (* compose_glob_context [(bt_1,n_1,t_1);......] rt returns b_1(n_1,t_1,.....,bn(n_k,t_k,rt)) where the b_i's are the binders corresponding to the bt_i's *) let compose_glob_context = let compose_binder (bt,t) acc = match bt with | Lambda n -> mkGLambda(n,t,acc) | Prod n -> mkGProd(n,t,acc) | LetIn n -> mkGLetIn(n,t,acc) in List.fold_right compose_binder (* The main part deals with building a list of globalized constructor expressions from the rhs of a fixpoint equation. *) type 'a build_entry_pre_return = { context : glob_context; (* the binding context of the result *) value : 'a; (* The value *) } type 'a build_entry_return = { result : 'a build_entry_pre_return list; to_avoid : identifier list } (* [combine_results combine_fun res1 res2] combine two results [res1] and [res2] w.r.t. [combine_fun]. Informally, both [res1] and [res2] are lists of "constructors" [res1_1;...] and [res2_1,....] and we need to produce [combine_fun res1_1 res2_1;combine_fun res1_1 res2_2;........] *) let combine_results (combine_fun : 'a build_entry_pre_return -> 'b build_entry_pre_return -> 'c build_entry_pre_return ) (res1: 'a build_entry_return) (res2 : 'b build_entry_return) : 'c build_entry_return = let pre_result = List.map ( fun res1 -> (* for each result in arg_res *) List.map (* we add it in each args_res *) (fun res2 -> combine_fun res1 res2 ) res2.result ) res1.result in (* and then we flatten the map *) { result = List.concat pre_result; to_avoid = list_union res1.to_avoid res2.to_avoid } (* The combination function for an argument with a list of argument *) let combine_args arg args = { context = arg.context@args.context; (* Note that the binding context of [arg] MUST be placed before the one of [args] in order to preserve possible type dependencies *) value = arg.value::args.value; } let ids_of_binder = function | LetIn Anonymous | Prod Anonymous | Lambda Anonymous -> [] | LetIn (Name id) | Prod (Name id) | Lambda (Name id) -> [id] let rec change_vars_in_binder mapping = function [] -> [] | (bt,t)::l -> let new_mapping = List.fold_right Idmap.remove (ids_of_binder bt) mapping in (bt,change_vars mapping t):: (if idmap_is_empty new_mapping then l else change_vars_in_binder new_mapping l ) let rec replace_var_by_term_in_binder x_id term = function | [] -> [] | (bt,t)::l -> (bt,replace_var_by_term x_id term t):: if List.mem x_id (ids_of_binder bt) then l else replace_var_by_term_in_binder x_id term l let add_bt_names bt = List.append (ids_of_binder bt) let apply_args ctxt body args = let need_convert_id avoid id = List.exists (is_free_in id) args || List.mem id avoid in let need_convert avoid bt = List.exists (need_convert_id avoid) (ids_of_binder bt) in let next_name_away (na:name) (mapping: identifier Idmap.t) (avoid: identifier list) = match na with | Name id when List.mem id avoid -> let new_id = Namegen.next_ident_away id avoid in Name new_id,Idmap.add id new_id mapping,new_id::avoid | _ -> na,mapping,avoid in let next_bt_away bt (avoid:identifier list) = match bt with | LetIn na -> let new_na,mapping,new_avoid = next_name_away na Idmap.empty avoid in LetIn new_na,mapping,new_avoid | Prod na -> let new_na,mapping,new_avoid = next_name_away na Idmap.empty avoid in Prod new_na,mapping,new_avoid | Lambda na -> let new_na,mapping,new_avoid = next_name_away na Idmap.empty avoid in Lambda new_na,mapping,new_avoid in let rec do_apply avoid ctxt body args = match ctxt,args with | _,[] -> (* No more args *) (ctxt,body) | [],_ -> (* no more fun *) let f,args' = glob_decompose_app body in (ctxt,mkGApp(f,args'@args)) | (Lambda Anonymous,t)::ctxt',arg::args' -> do_apply avoid ctxt' body args' | (Lambda (Name id),t)::ctxt',arg::args' -> let new_avoid,new_ctxt',new_body,new_id = if need_convert_id avoid id then let new_avoid = id::avoid in let new_id = Namegen.next_ident_away id new_avoid in let new_avoid' = new_id :: new_avoid in let mapping = Idmap.add id new_id Idmap.empty in let new_ctxt' = change_vars_in_binder mapping ctxt' in let new_body = change_vars mapping body in new_avoid',new_ctxt',new_body,new_id else id::avoid,ctxt',body,id in let new_body = replace_var_by_term new_id arg new_body in let new_ctxt' = replace_var_by_term_in_binder new_id arg new_ctxt' in do_apply avoid new_ctxt' new_body args' | (bt,t)::ctxt',_ -> let new_avoid,new_ctxt',new_body,new_bt = let new_avoid = add_bt_names bt avoid in if need_convert avoid bt then let new_bt,mapping,new_avoid = next_bt_away bt new_avoid in ( new_avoid, change_vars_in_binder mapping ctxt', change_vars mapping body, new_bt ) else new_avoid,ctxt',body,bt in let new_ctxt',new_body = do_apply new_avoid new_ctxt' new_body args in (new_bt,t)::new_ctxt',new_body in do_apply [] ctxt body args let combine_app f args = let new_ctxt,new_value = apply_args f.context f.value args.value in { (* Note that the binding context of [args] MUST be placed before the one of the applied value in order to preserve possible type dependencies *) context = args.context@new_ctxt; value = new_value; } let combine_lam n t b = { context = []; value = mkGLambda(n, compose_glob_context t.context t.value, compose_glob_context b.context b.value ) } let combine_prod n t b = { context = t.context@((Prod n,t.value)::b.context); value = b.value} let combine_letin n t b = { context = t.context@((LetIn n,t.value)::b.context); value = b.value} let mk_result ctxt value avoid = { result = [{context = ctxt; value = value}] ; to_avoid = avoid } (************************************************* Some functions to deal with overlapping patterns **************************************************) let coq_True_ref = lazy (Coqlib.gen_reference "" ["Init";"Logic"] "True") let coq_False_ref = lazy (Coqlib.gen_reference "" ["Init";"Logic"] "False") (* [make_discr_match_el \[e1,...en\]] builds match e1,...,en with (the list of expresions on which we will do the matching) *) let make_discr_match_el = List.map (fun e -> (e,(Anonymous,None))) (* [make_discr_match_brl i \[pat_1,...,pat_n\]] constructs a discrimination pattern matching on the ith expression. that is. match ?????? with \\ | pat_1 => False \\ | pat_{i-1} => False \\ | pat_i => True \\ | pat_{i+1} => False \\ \vdots | pat_n => False end *) let make_discr_match_brl i = list_map_i (fun j (_,idl,patl,_) -> if j=i then (dummy_loc,idl,patl, mkGRef (Lazy.force coq_True_ref)) else (dummy_loc,idl,patl, mkGRef (Lazy.force coq_False_ref)) ) 0 (* [make_discr_match brl el i] generates an hypothesis such that it reduce to true iff brl_{i} is the first branch matched by [el] Used when we want to simulate the coq pattern matching algorithm *) let make_discr_match brl = fun el i -> mkGCases(None, make_discr_match_el el, make_discr_match_brl i brl) let pr_name = function | Name id -> Ppconstr.pr_id id | Anonymous -> str "_" (**********************************************************************) (* functions used to build case expression from lettuple and if ones *) (**********************************************************************) (* [build_constructors_of_type] construct the array of pattern of its inductive argument*) let build_constructors_of_type ind' argl = let (mib,ind) = Inductive.lookup_mind_specif (Global.env()) ind' in let npar = mib.Declarations.mind_nparams in Array.mapi (fun i _ -> let construct = ind',i+1 in let constructref = ConstructRef(construct) in let _implicit_positions_of_cst = Impargs.implicits_of_global constructref in let cst_narg = Inductiveops.mis_constructor_nargs_env (Global.env ()) construct in let argl = match argl with | None -> Array.to_list (Array.init cst_narg (fun _ -> mkGHole ()) ) | Some l -> Array.to_list (Array.init npar (fun _ -> mkGHole ()))@l in let pat_as_term = mkGApp(mkGRef (ConstructRef(ind',i+1)),argl) in cases_pattern_of_glob_constr Anonymous pat_as_term ) ind.Declarations.mind_consnames (* [find_type_of] very naive attempts to discover the type of an if or a letin *) let rec find_type_of nb b = let f,_ = glob_decompose_app b in match f with | GRef(_,ref) -> begin let ind_type = match ref with | VarRef _ | ConstRef _ -> let constr_of_ref = constr_of_global ref in let type_of_ref = Typing.type_of (Global.env ()) Evd.empty constr_of_ref in let (_,ret_type) = Reduction.dest_prod (Global.env ()) type_of_ref in let ret_type,_ = decompose_app ret_type in if not (isInd ret_type) then begin (* Pp.msgnl (str "not an inductive" ++ pr_lconstr ret_type); *) raise (Invalid_argument "not an inductive") end; destInd ret_type | IndRef ind -> ind | ConstructRef c -> fst c in let _,ind_type_info = Inductive.lookup_mind_specif (Global.env()) ind_type in if not (Array.length ind_type_info.Declarations.mind_consnames = nb ) then raise (Invalid_argument "find_type_of : not a valid inductive"); ind_type end | GCast(_,b,_) -> find_type_of nb b | GApp _ -> assert false (* we have decomposed any application via glob_decompose_app *) | _ -> raise (Invalid_argument "not a ref") (******************) (* Main functions *) (******************) let raw_push_named (na,raw_value,raw_typ) env = match na with | Anonymous -> env | Name id -> let value = Option.map (Pretyping.Default.understand Evd.empty env) raw_value in let typ = Pretyping.Default.understand_type Evd.empty env raw_typ in Environ.push_named (id,value,typ) env let add_pat_variables pat typ env : Environ.env = let rec add_pat_variables env pat typ : Environ.env = observe (str "new rel env := " ++ Printer.pr_rel_context_of env); match pat with | PatVar(_,na) -> Environ.push_rel (na,None,typ) env | PatCstr(_,c,patl,na) -> let Inductiveops.IndType(indf,indargs) = try Inductiveops.find_rectype env Evd.empty typ with Not_found -> assert false in let constructors = Inductiveops.get_constructors env indf in let constructor : Inductiveops.constructor_summary = List.find (fun cs -> cs.Inductiveops.cs_cstr = c) (Array.to_list constructors) in let cs_args_types :types list = List.map (fun (_,_,t) -> t) constructor.Inductiveops.cs_args in List.fold_left2 add_pat_variables env patl (List.rev cs_args_types) in let new_env = add_pat_variables env pat typ in let res = fst ( Sign.fold_rel_context (fun (na,v,t) (env,ctxt) -> match na with | Anonymous -> assert false | Name id -> let new_t = substl ctxt t in let new_v = Option.map (substl ctxt) v in observe (str "for variable " ++ Ppconstr.pr_id id ++ fnl () ++ str "old type := " ++ Printer.pr_lconstr t ++ fnl () ++ str "new type := " ++ Printer.pr_lconstr new_t ++ fnl () ++ Option.fold_right (fun v _ -> str "old value := " ++ Printer.pr_lconstr v ++ fnl ()) v (mt ()) ++ Option.fold_right (fun v _ -> str "new value := " ++ Printer.pr_lconstr v ++ fnl ()) new_v (mt ()) ); (Environ.push_named (id,new_v,new_t) env,mkVar id::ctxt) ) (Environ.rel_context new_env) ~init:(env,[]) ) in observe (str "new var env := " ++ Printer.pr_named_context_of res); res let rec pattern_to_term_and_type env typ = function | PatVar(loc,Anonymous) -> assert false | PatVar(loc,Name id) -> mkGVar id | PatCstr(loc,constr,patternl,_) -> let cst_narg = Inductiveops.mis_constructor_nargs_env (Global.env ()) constr in let Inductiveops.IndType(indf,indargs) = try Inductiveops.find_rectype env Evd.empty typ with Not_found -> assert false in let constructors = Inductiveops.get_constructors env indf in let constructor = List.find (fun cs -> cs.Inductiveops.cs_cstr = constr) (Array.to_list constructors) in let cs_args_types :types list = List.map (fun (_,_,t) -> t) constructor.Inductiveops.cs_args in let _,cstl = Inductiveops.dest_ind_family indf in let csta = Array.of_list cstl in let implicit_args = Array.to_list (Array.init (cst_narg - List.length patternl) (fun i -> Detyping.detype false [] (Termops.names_of_rel_context env) csta.(i)) ) in let patl_as_term = List.map2 (pattern_to_term_and_type env) (List.rev cs_args_types) patternl in mkGApp(mkGRef(ConstructRef constr), implicit_args@patl_as_term ) (* [build_entry_lc funnames avoid rt] construct the list (in fact a build_entry_return) of constructors corresponding to [rt] when replacing calls to [funnames] by calls to the corresponding graphs. The idea to transform a term [t] into a list of constructors [lc] is the following: \begin{itemize} \item if the term is a binder (bind x, body) then first compute [lc'] the list corresponding to [body] and add (bind x. _) to each elements of [lc] \item if the term has the form (g t1 ... ... tn) where g does not appears in (fnames) then compute [lc1] ... [lcn] the lists of constructors corresponding to [t1] ... [tn], then combine those lists and [g] as follows~: for each element [c1,...,cn] of [lc1\times...\times lcn], [g c1 ... cn] is an element of [lc] \item if the term has the form (f t1 .... tn) where [f] appears in [fnames] then compute [lc1] ... [lcn] the lists of constructors corresponding to [t1] ... [tn], then compute those lists and [f] as follows~: for each element [c1,...,cn] of [lc1\times...\times lcn] create a new variable [res] and [forall res, R_f c1 ... cn res] is in [lc] \item if the term is a cast just treat its body part \item if the term is a match, an if or a lettuple then compute the lists corresponding to each branch of the case and concatenate them (informally, each branch of a match produces a new constructor) \end{itemize} WARNING: The terms constructed here are only USING the glob_constr syntax but are highly bad formed. We must wait to have complete all the current calculi to set the recursive calls. At this point, each term [f t1 ... tn] (where f appears in [funnames]) is replaced by a pseudo term [forall res, res t1 ... tn, res]. A reconstruction phase is done later. We in fact not create a constructor list since then end of each constructor has not the expected form but only the value of the function *) let rec build_entry_lc env funnames avoid rt : glob_constr build_entry_return = observe (str " Entering : " ++ Printer.pr_glob_constr rt); match rt with | GRef _ | GVar _ | GEvar _ | GPatVar _ | GSort _ | GHole _ -> (* do nothing (except changing type of course) *) mk_result [] rt avoid | GApp(_,_,_) -> let f,args = glob_decompose_app rt in let args_res : (glob_constr list) build_entry_return = List.fold_right (* create the arguments lists of constructors and combine them *) (fun arg ctxt_argsl -> let arg_res = build_entry_lc env funnames ctxt_argsl.to_avoid arg in combine_results combine_args arg_res ctxt_argsl ) args (mk_result [] [] avoid) in begin match f with | GLambda _ -> let rec aux t l = match l with | [] -> t | u::l -> match t with | GLambda(loc,na,_,nat,b) -> GLetIn(dummy_loc,na,u,aux b l) | _ -> GApp(dummy_loc,t,l) in build_entry_lc env funnames avoid (aux f args) | GVar(_,id) when Idset.mem id funnames -> (* if we have [f t1 ... tn] with [f]$\in$[fnames] then we create a fresh variable [res], add [res] and its "value" (i.e. [res v1 ... vn]) to each pseudo constructor build for the arguments (i.e. a pseudo context [ctxt] and a pseudo value "v1 ... vn". The "value" of this branch is then simply [res] *) let rt_as_constr = Pretyping.Default.understand Evd.empty env rt in let rt_typ = Typing.type_of env Evd.empty rt_as_constr in let res_raw_type = Detyping.detype false [] (Termops.names_of_rel_context env) rt_typ in let res = fresh_id args_res.to_avoid "res" in let new_avoid = res::args_res.to_avoid in let res_rt = mkGVar res in let new_result = List.map (fun arg_res -> let new_hyps = [Prod (Name res),res_raw_type; Prod Anonymous,mkGApp(res_rt,(mkGVar id)::arg_res.value)] in {context = arg_res.context@new_hyps; value = res_rt } ) args_res.result in { result = new_result; to_avoid = new_avoid } | GVar _ | GEvar _ | GPatVar _ | GHole _ | GSort _ | GRef _ -> (* if have [g t1 ... tn] with [g] not appearing in [funnames] then foreach [ctxt,v1 ... vn] in [args_res] we return [ctxt, g v1 .... vn] *) { args_res with result = List.map (fun args_res -> {args_res with value = mkGApp(f,args_res.value)}) args_res.result } | GApp _ -> assert false (* we have collected all the app in [glob_decompose_app] *) | GLetIn(_,n,t,b) -> (* if we have [(let x := v in b) t1 ... tn] , we discard our work and compute the list of constructor for [let x = v in (b t1 ... tn)] up to alpha conversion *) let new_n,new_b,new_avoid = match n with | Name id when List.exists (is_free_in id) args -> (* need to alpha-convert the name *) let new_id = Namegen.next_ident_away id avoid in let new_avoid = id:: avoid in let new_b = replace_var_by_term id (GVar(dummy_loc,id)) b in (Name new_id,new_b,new_avoid) | _ -> n,b,avoid in build_entry_lc env funnames avoid (mkGLetIn(new_n,t,mkGApp(new_b,args))) | GCases _ | GIf _ | GLetTuple _ -> (* we have [(match e1, ...., en with ..... end) t1 tn] we first compute the result from the case and then combine each of them with each of args one *) let f_res = build_entry_lc env funnames args_res.to_avoid f in combine_results combine_app f_res args_res | GCast(_,b,_) -> (* for an applied cast we just trash the cast part and restart the work. WARNING: We need to restart since [b] itself should be an application term *) build_entry_lc env funnames avoid (mkGApp(b,args)) | GRec _ -> error "Not handled GRec" | GProd _ -> error "Cannot apply a type" end (* end of the application treatement *) | GLambda(_,n,_,t,b) -> (* we first compute the list of constructor corresponding to the body of the function, then the one corresponding to the type and combine the two result *) let t_res = build_entry_lc env funnames avoid t in let new_n = match n with | Name _ -> n | Anonymous -> Name (Indfun_common.fresh_id [] "_x") in let new_env = raw_push_named (new_n,None,t) env in let b_res = build_entry_lc new_env funnames avoid b in combine_results (combine_lam new_n) t_res b_res | GProd(_,n,_,t,b) -> (* we first compute the list of constructor corresponding to the body of the function, then the one corresponding to the type and combine the two result *) let t_res = build_entry_lc env funnames avoid t in let new_env = raw_push_named (n,None,t) env in let b_res = build_entry_lc new_env funnames avoid b in combine_results (combine_prod n) t_res b_res | GLetIn(_,n,v,b) -> (* we first compute the list of constructor corresponding to the body of the function, then the one corresponding to the value [t] and combine the two result *) let v_res = build_entry_lc env funnames avoid v in let v_as_constr = Pretyping.Default.understand Evd.empty env v in let v_type = Typing.type_of env Evd.empty v_as_constr in let new_env = match n with Anonymous -> env | Name id -> Environ.push_named (id,Some v_as_constr,v_type) env in let b_res = build_entry_lc new_env funnames avoid b in combine_results (combine_letin n) v_res b_res | GCases(_,_,_,el,brl) -> (* we create the discrimination function and treat the case itself *) let make_discr = make_discr_match brl in build_entry_lc_from_case env funnames make_discr el brl avoid | GIf(_,b,(na,e_option),lhs,rhs) -> let b_as_constr = Pretyping.Default.understand Evd.empty env b in let b_typ = Typing.type_of env Evd.empty b_as_constr in let (ind,_) = try Inductiveops.find_inductive env Evd.empty b_typ with Not_found -> errorlabstrm "" (str "Cannot find the inductive associated to " ++ Printer.pr_glob_constr b ++ str " in " ++ Printer.pr_glob_constr rt ++ str ". try again with a cast") in let case_pats = build_constructors_of_type ind None in assert (Array.length case_pats = 2); let brl = list_map_i (fun i x -> (dummy_loc,[],[case_pats.(i)],x)) 0 [lhs;rhs] in let match_expr = mkGCases(None,[(b,(Anonymous,None))],brl) in (* Pp.msgnl (str "new case := " ++ Printer.pr_glob_constr match_expr); *) build_entry_lc env funnames avoid match_expr | GLetTuple(_,nal,_,b,e) -> begin let nal_as_glob_constr = Some (List.map (function Name id -> mkGVar id | Anonymous -> mkGHole () ) nal) in let b_as_constr = Pretyping.Default.understand Evd.empty env b in let b_typ = Typing.type_of env Evd.empty b_as_constr in let (ind,_) = try Inductiveops.find_inductive env Evd.empty b_typ with Not_found -> errorlabstrm "" (str "Cannot find the inductive associated to " ++ Printer.pr_glob_constr b ++ str " in " ++ Printer.pr_glob_constr rt ++ str ". try again with a cast") in let case_pats = build_constructors_of_type ind nal_as_glob_constr in assert (Array.length case_pats = 1); let br = (dummy_loc,[],[case_pats.(0)],e) in let match_expr = mkGCases(None,[b,(Anonymous,None)],[br]) in build_entry_lc env funnames avoid match_expr end | GRec _ -> error "Not handled GRec" | GCast(_,b,_) -> build_entry_lc env funnames avoid b and build_entry_lc_from_case env funname make_discr (el:tomatch_tuples) (brl:Glob_term.cases_clauses) avoid : glob_constr build_entry_return = match el with | [] -> assert false (* this case correspond to match with .... !*) | el -> (* this case correspond to match el with brl end we first compute the list of lists corresponding to [el] and combine them . Then for each elemeent of the combinations, we compute the result we compute one list per branch in [brl] and finally we just concatenate those list *) let case_resl = List.fold_right (fun (case_arg,_) ctxt_argsl -> let arg_res = build_entry_lc env funname avoid case_arg in combine_results combine_args arg_res ctxt_argsl ) el (mk_result [] [] avoid) in let types = List.map (fun (case_arg,_) -> let case_arg_as_constr = Pretyping.Default.understand Evd.empty env case_arg in Typing.type_of env Evd.empty case_arg_as_constr ) el in (****** The next works only if the match is not dependent ****) let results = List.map (fun ca -> let res = build_entry_lc_from_case_term env types funname (make_discr) [] brl case_resl.to_avoid ca in res ) case_resl.result in { result = List.concat (List.map (fun r -> r.result) results); to_avoid = List.fold_left (fun acc r -> list_union acc r.to_avoid) [] results } and build_entry_lc_from_case_term env types funname make_discr patterns_to_prevent brl avoid matched_expr = match brl with | [] -> (* computed_branches *) {result = [];to_avoid = avoid} | br::brl' -> (* alpha convertion to prevent name clashes *) let _,idl,patl,return = alpha_br avoid br in let new_avoid = idl@avoid in (* for now we can no more use idl as an indentifier *) (* building a list of precondition stating that we are not in this branch (will be used in the following recursive calls) *) let new_env = List.fold_right2 add_pat_variables patl types env in let not_those_patterns : (identifier list -> glob_constr -> glob_constr) list = List.map2 (fun pat typ -> fun avoid pat'_as_term -> let renamed_pat,_,_ = alpha_pat avoid pat in let pat_ids = get_pattern_id renamed_pat in let env_with_pat_ids = add_pat_variables pat typ new_env in List.fold_right (fun id acc -> let typ_of_id = Typing.type_of env_with_pat_ids Evd.empty (mkVar id) in let raw_typ_of_id = Detyping.detype false [] (Termops.names_of_rel_context env_with_pat_ids) typ_of_id in mkGProd (Name id,raw_typ_of_id,acc)) pat_ids (glob_make_neq pat'_as_term (pattern_to_term renamed_pat)) ) patl types in (* Checking if we can be in this branch (will be used in the following recursive calls) *) let unify_with_those_patterns : (cases_pattern -> bool*bool) list = List.map (fun pat pat' -> are_unifiable pat pat',eq_cases_pattern pat pat') patl in (* we first compute the other branch result (in ordrer to keep the order of the matching as much as possible) *) let brl'_res = build_entry_lc_from_case_term env types funname make_discr ((unify_with_those_patterns,not_those_patterns)::patterns_to_prevent) brl' avoid matched_expr in (* We now create the precondition of this branch i.e. 1- the list of variable appearing in the different patterns of this branch and the list of equation stating than el = patl (List.flatten ...) 2- If there exists a previous branch which pattern unify with the one of this branch then a discrimination precond stating that we are not in a previous branch (if List.exists ...) *) let those_pattern_preconds = (List.flatten ( list_map3 (fun pat e typ_as_constr -> let this_pat_ids = ids_of_pat pat in let typ = Detyping.detype false [] (Termops.names_of_rel_context new_env) typ_as_constr in let pat_as_term = pattern_to_term pat in List.fold_right (fun id acc -> if Idset.mem id this_pat_ids then (Prod (Name id), let typ_of_id = Typing.type_of new_env Evd.empty (mkVar id) in let raw_typ_of_id = Detyping.detype false [] (Termops.names_of_rel_context new_env) typ_of_id in raw_typ_of_id )::acc else acc ) idl [(Prod Anonymous,glob_make_eq ~typ pat_as_term e)] ) patl matched_expr.value types ) ) @ (if List.exists (function (unifl,_) -> let (unif,_) = List.split (List.map2 (fun x y -> x y) unifl patl) in List.for_all (fun x -> x) unif) patterns_to_prevent then let i = List.length patterns_to_prevent in let pats_as_constr = List.map2 (pattern_to_term_and_type new_env) types patl in [(Prod Anonymous,make_discr pats_as_constr i )] else [] ) in (* We compute the result of the value returned by the branch*) let return_res = build_entry_lc new_env funname new_avoid return in (* and combine it with the preconds computed for this branch *) let this_branch_res = List.map (fun res -> { context = matched_expr.context@those_pattern_preconds@res.context ; value = res.value} ) return_res.result in { brl'_res with result = this_branch_res@brl'_res.result } let is_res id = try String.sub (string_of_id id) 0 3 = "res" with Invalid_argument _ -> false let same_raw_term rt1 rt2 = match rt1,rt2 with | GRef(_,r1), GRef (_,r2) -> r1=r2 | GHole _, GHole _ -> true | _ -> false let decompose_raw_eq lhs rhs = let rec decompose_raw_eq lhs rhs acc = observe (str "decomposing eq for " ++ pr_glob_constr lhs ++ str " " ++ pr_glob_constr rhs); let (rhd,lrhs) = glob_decompose_app rhs in let (lhd,llhs) = glob_decompose_app lhs in observe (str "lhd := " ++ pr_glob_constr lhd); observe (str "rhd := " ++ pr_glob_constr rhd); observe (str "llhs := " ++ int (List.length llhs)); observe (str "lrhs := " ++ int (List.length lrhs)); let sllhs = List.length llhs in let slrhs = List.length lrhs in if same_raw_term lhd rhd && sllhs = slrhs then (* let _ = assert false in *) List.fold_right2 decompose_raw_eq llhs lrhs acc else (lhs,rhs)::acc in decompose_raw_eq lhs rhs [] exception Continue (* The second phase which reconstruct the real type of the constructor. rebuild the globalized constructors expression. eliminates some meaningless equalities, applies some rewrites...... *) let rec rebuild_cons env nb_args relname args crossed_types depth rt = observe (str "rebuilding : " ++ pr_glob_constr rt); match rt with | GProd(_,n,k,t,b) -> let not_free_in_t id = not (is_free_in id t) in let new_crossed_types = t::crossed_types in begin match t with | GApp(_,(GVar(_,res_id) as res_rt),args') when is_res res_id -> begin match args' with | (GVar(_,this_relname))::args' -> (*i The next call to mk_rel_id is valid since we are constructing the graph Ensures by: obvious i*) let new_t = mkGApp(mkGVar(mk_rel_id this_relname),args'@[res_rt]) in let t' = Pretyping.Default.understand Evd.empty env new_t in let new_env = Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname args new_crossed_types (depth + 1) b in mkGProd(n,new_t,new_b), Idset.filter not_free_in_t id_to_exclude | _ -> (* the first args is the name of the function! *) assert false end | GApp(loc1,GRef(loc2,eq_as_ref),[ty;GVar(loc3,id);rt]) when eq_as_ref = Lazy.force Coqlib.coq_eq_ref && n = Anonymous -> begin try observe (str "computing new type for eq : " ++ pr_glob_constr rt); let t' = try Pretyping.Default.understand Evd.empty env t with e when Errors.noncritical e -> raise Continue in let is_in_b = is_free_in id b in let _keep_eq = not (List.exists (is_free_in id) args) || is_in_b || List.exists (is_free_in id) crossed_types in let new_args = List.map (replace_var_by_term id rt) args in let subst_b = if is_in_b then b else replace_var_by_term id rt b in let new_env = Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname new_args new_crossed_types (depth + 1) subst_b in mkGProd(n,t,new_b),id_to_exclude with Continue -> let jmeq = Libnames.IndRef (destInd (jmeq ())) in let ty' = Pretyping.Default.understand Evd.empty env ty in let ind,args' = Inductive.find_inductive env ty' in let mib,_ = Global.lookup_inductive ind in let nparam = mib.Declarations.mind_nparams in let params,arg' = ((Util.list_chop nparam args')) in let rt_typ = GApp(Util.dummy_loc, GRef (Util.dummy_loc,Libnames.IndRef ind), (List.map (fun p -> Detyping.detype false [] (Termops.names_of_rel_context env) p) params)@(Array.to_list (Array.make (List.length args' - nparam) (mkGHole ())))) in let eq' = GApp(loc1,GRef(loc2,jmeq),[ty;GVar(loc3,id);rt_typ;rt]) in observe (str "computing new type for jmeq : " ++ pr_glob_constr eq'); let eq'_as_constr = Pretyping.Default.understand Evd.empty env eq' in observe (str " computing new type for jmeq : done") ; let new_args = match kind_of_term eq'_as_constr with | App(_,[|_;_;ty;_|]) -> let ty = Array.to_list (snd (destApp ty)) in let ty' = snd (Util.list_chop nparam ty) in List.fold_left2 (fun acc var_as_constr arg -> if isRel var_as_constr then let (na,_,_) = Environ.lookup_rel (destRel var_as_constr) env in match na with | Anonymous -> acc | Name id' -> (id',Detyping.detype false [] (Termops.names_of_rel_context env) arg)::acc else if isVar var_as_constr then (destVar var_as_constr,Detyping.detype false [] (Termops.names_of_rel_context env) arg)::acc else acc ) [] arg' ty' | _ -> assert false in let is_in_b = is_free_in id b in let _keep_eq = not (List.exists (is_free_in id) args) || is_in_b || List.exists (is_free_in id) crossed_types in let new_args = List.fold_left (fun args (id,rt) -> List.map (replace_var_by_term id rt) args ) args ((id,rt)::new_args) in let subst_b = if is_in_b then b else replace_var_by_term id rt b in let new_env = let t' = Pretyping.Default.understand Evd.empty env eq' in Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname new_args new_crossed_types (depth + 1) subst_b in mkGProd(n,eq',new_b),id_to_exclude end (* J.F:. keep this comment it explain how to remove some meaningless equalities if keep_eq then mkGProd(n,t,new_b),id_to_exclude else new_b, Idset.add id id_to_exclude *) | GApp(loc1,GRef(loc2,eq_as_ref),[ty;rt1;rt2]) when eq_as_ref = Lazy.force Coqlib.coq_eq_ref && n = Anonymous -> begin try let l = decompose_raw_eq rt1 rt2 in if List.length l > 1 then let new_rt = List.fold_left (fun acc (lhs,rhs) -> mkGProd(Anonymous, mkGApp(mkGRef(eq_as_ref),[mkGHole ();lhs;rhs]),acc) ) b l in rebuild_cons env nb_args relname args crossed_types depth new_rt else raise Continue with Continue -> observe (str "computing new type for prod : " ++ pr_glob_constr rt); let t' = Pretyping.Default.understand Evd.empty env t in let new_env = Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname args new_crossed_types (depth + 1) b in match n with | Name id when Idset.mem id id_to_exclude && depth >= nb_args -> new_b,Idset.remove id (Idset.filter not_free_in_t id_to_exclude) | _ -> mkGProd(n,t,new_b),Idset.filter not_free_in_t id_to_exclude end | _ -> observe (str "computing new type for prod : " ++ pr_glob_constr rt); let t' = Pretyping.Default.understand Evd.empty env t in let new_env = Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname args new_crossed_types (depth + 1) b in match n with | Name id when Idset.mem id id_to_exclude && depth >= nb_args -> new_b,Idset.remove id (Idset.filter not_free_in_t id_to_exclude) | _ -> mkGProd(n,t,new_b),Idset.filter not_free_in_t id_to_exclude end | GLambda(_,n,k,t,b) -> begin let not_free_in_t id = not (is_free_in id t) in let new_crossed_types = t :: crossed_types in observe (str "computing new type for lambda : " ++ pr_glob_constr rt); let t' = Pretyping.Default.understand Evd.empty env t in match n with | Name id -> let new_env = Environ.push_rel (n,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname (args@[mkGVar id])new_crossed_types (depth + 1 ) b in if Idset.mem id id_to_exclude && depth >= nb_args then new_b, Idset.remove id (Idset.filter not_free_in_t id_to_exclude) else GProd(dummy_loc,n,k,t,new_b),Idset.filter not_free_in_t id_to_exclude | _ -> anomaly "Should not have an anonymous function here" (* We have renamed all the anonymous functions during alpha_renaming phase *) end | GLetIn(_,n,t,b) -> begin let not_free_in_t id = not (is_free_in id t) in let t' = Pretyping.Default.understand Evd.empty env t in let type_t' = Typing.type_of env Evd.empty t' in let new_env = Environ.push_rel (n,Some t',type_t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname args (t::crossed_types) (depth + 1 ) b in match n with | Name id when Idset.mem id id_to_exclude && depth >= nb_args -> new_b,Idset.remove id (Idset.filter not_free_in_t id_to_exclude) | _ -> GLetIn(dummy_loc,n,t,new_b), Idset.filter not_free_in_t id_to_exclude end | GLetTuple(_,nal,(na,rto),t,b) -> assert (rto=None); begin let not_free_in_t id = not (is_free_in id t) in let new_t,id_to_exclude' = rebuild_cons env nb_args relname args (crossed_types) depth t in let t' = Pretyping.Default.understand Evd.empty env new_t in let new_env = Environ.push_rel (na,None,t') env in let new_b,id_to_exclude = rebuild_cons new_env nb_args relname args (t::crossed_types) (depth + 1) b in (* match n with *) (* | Name id when Idset.mem id id_to_exclude -> *) (* new_b,Idset.remove id (Idset.filter not_free_in_t id_to_exclude) *) (* | _ -> *) GLetTuple(dummy_loc,nal,(na,None),t,new_b), Idset.filter not_free_in_t (Idset.union id_to_exclude id_to_exclude') end | _ -> mkGApp(mkGVar relname,args@[rt]),Idset.empty (* debuging wrapper *) let rebuild_cons env nb_args relname args crossed_types rt = (* observennl (str "rebuild_cons : rt := "++ pr_glob_constr rt ++ *) (* str "nb_args := " ++ str (string_of_int nb_args)); *) let res = rebuild_cons env nb_args relname args crossed_types 0 rt in (* observe (str " leads to "++ pr_glob_constr (fst res)); *) res (* naive implementation of parameter detection. A parameter is an argument which is only preceded by parameters and whose calls are all syntaxically equal. TODO: Find a valid way to deal with implicit arguments here! *) let rec compute_cst_params relnames params = function | GRef _ | GVar _ | GEvar _ | GPatVar _ -> params | GApp(_,GVar(_,relname'),rtl) when Idset.mem relname' relnames -> compute_cst_params_from_app [] (params,rtl) | GApp(_,f,args) -> List.fold_left (compute_cst_params relnames) params (f::args) | GLambda(_,_,_,t,b) | GProd(_,_,_,t,b) | GLetIn(_,_,t,b) | GLetTuple(_,_,_,t,b) -> let t_params = compute_cst_params relnames params t in compute_cst_params relnames t_params b | GCases _ -> params (* If there is still cases at this point they can only be discriminitation ones *) | GSort _ -> params | GHole _ -> params | GIf _ | GRec _ | GCast _ -> raise (UserError("compute_cst_params", str "Not handled case")) and compute_cst_params_from_app acc (params,rtl) = match params,rtl with | _::_,[] -> assert false (* the rel has at least nargs + 1 arguments ! *) | ((Name id,_,is_defined) as param)::params',(GVar(_,id'))::rtl' when id_ord id id' == 0 && not is_defined -> compute_cst_params_from_app (param::acc) (params',rtl') | _ -> List.rev acc let compute_params_name relnames (args : (Names.name * Glob_term.glob_constr * bool) list array) csts = let rels_params = Array.mapi (fun i args -> List.fold_left (fun params (_,cst) -> compute_cst_params relnames params cst) args csts.(i) ) args in let l = ref [] in let _ = try list_iter_i (fun i ((n,nt,is_defined) as param) -> if array_for_all (fun l -> let (n',nt',is_defined') = List.nth l i in n = n' && Topconstr.eq_glob_constr nt nt' && is_defined = is_defined') rels_params then l := param::!l ) rels_params.(0) with e when Errors.noncritical e -> () in List.rev !l let rec rebuild_return_type rt = match rt with | Topconstr.CProdN(loc,n,t') -> Topconstr.CProdN(loc,n,rebuild_return_type t') | Topconstr.CArrow(loc,t,t') -> Topconstr.CArrow(loc,t,rebuild_return_type t') | Topconstr.CLetIn(loc,na,t,t') -> Topconstr.CLetIn(loc,na,t,rebuild_return_type t') | _ -> Topconstr.CArrow(dummy_loc,rt,Topconstr.CSort(dummy_loc,GType None)) let do_build_inductive funnames (funsargs: (Names.name * glob_constr * bool) list list) returned_types (rtl:glob_constr list) = let _time1 = System.get_time () in (* Pp.msgnl (prlist_with_sep fnl Printer.pr_glob_constr rtl); *) let funnames_as_set = List.fold_right Idset.add funnames Idset.empty in let funnames = Array.of_list funnames in let funsargs = Array.of_list funsargs in let returned_types = Array.of_list returned_types in (* alpha_renaming of the body to prevent variable capture during manipulation *) let rtl_alpha = List.map (function rt -> expand_as (alpha_rt [] rt)) rtl in let rta = Array.of_list rtl_alpha in (*i The next call to mk_rel_id is valid since we are constructing the graph Ensures by: obvious i*) let relnames = Array.map mk_rel_id funnames in let relnames_as_set = Array.fold_right Idset.add relnames Idset.empty in (* Construction of the pseudo constructors *) let env = Array.fold_right (fun id env -> Environ.push_named (id,None,Typing.type_of env Evd.empty (Constrintern.global_reference id)) env ) funnames (Global.env ()) in let resa = Array.map (build_entry_lc env funnames_as_set []) rta in let env_with_graphs = let rel_arity i funargs = (* Reduilding arities (with parameters) *) let rel_first_args :(Names.name * Glob_term.glob_constr * bool ) list = funargs in List.fold_right (fun (n,t,is_defined) acc -> if is_defined then Topconstr.CLetIn(dummy_loc,(dummy_loc, n),Constrextern.extern_glob_constr Idset.empty t, acc) else Topconstr.CProdN (dummy_loc, [[(dummy_loc,n)],Topconstr.default_binder_kind,Constrextern.extern_glob_constr Idset.empty t], acc ) ) rel_first_args (rebuild_return_type returned_types.(i)) in (* We need to lift back our work topconstr but only with all information We mimick a Set Printing All. Then save the graphs and reset Printing options to their primitive values *) let rel_arities = Array.mapi rel_arity funsargs in Util.array_fold_left2 (fun env rel_name rel_ar -> Environ.push_named (rel_name,None, Constrintern.interp_constr Evd.empty env rel_ar) env) env relnames rel_arities in (* and of the real constructors*) let constr i res = List.map (function result (* (args',concl') *) -> let rt = compose_glob_context result.context result.value in let nb_args = List.length funsargs.(i) in (* with_full_print (fun rt -> Pp.msgnl (str "glob constr " ++ pr_glob_constr rt)) rt; *) fst ( rebuild_cons env_with_graphs nb_args relnames.(i) [] [] rt ) ) res.result in (* adding names to constructors *) let next_constructor_id = ref (-1) in let mk_constructor_id i = incr next_constructor_id; (*i The next call to mk_rel_id is valid since we are constructing the graph Ensures by: obvious i*) id_of_string ((string_of_id (mk_rel_id funnames.(i)))^"_"^(string_of_int !next_constructor_id)) in let rel_constructors i rt : (identifier*glob_constr) list = next_constructor_id := (-1); List.map (fun constr -> (mk_constructor_id i),constr) (constr i rt) in let rel_constructors = Array.mapi rel_constructors resa in (* Computing the set of parameters if asked *) let rels_params = compute_params_name relnames_as_set funsargs rel_constructors in let nrel_params = List.length rels_params in let rel_constructors = (* Taking into account the parameters in constructors *) Array.map (List.map (fun (id,rt) -> (id,snd (chop_rprod_n nrel_params rt)))) rel_constructors in let rel_arity i funargs = (* Reduilding arities (with parameters) *) let rel_first_args :(Names.name * Glob_term.glob_constr * bool ) list = (snd (list_chop nrel_params funargs)) in List.fold_right (fun (n,t,is_defined) acc -> if is_defined then Topconstr.CLetIn(dummy_loc,(dummy_loc, n),Constrextern.extern_glob_constr Idset.empty t, acc) else Topconstr.CProdN (dummy_loc, [[(dummy_loc,n)],Topconstr.default_binder_kind,Constrextern.extern_glob_constr Idset.empty t], acc ) ) rel_first_args (rebuild_return_type returned_types.(i)) in (* We need to lift back our work topconstr but only with all information We mimick a Set Printing All. Then save the graphs and reset Printing options to their primitive values *) let rel_arities = Array.mapi rel_arity funsargs in let rel_params = List.map (fun (n,t,is_defined) -> if is_defined then Topconstr.LocalRawDef((dummy_loc,n), Constrextern.extern_glob_constr Idset.empty t) else Topconstr.LocalRawAssum ([(dummy_loc,n)], Topconstr.default_binder_kind, Constrextern.extern_glob_constr Idset.empty t) ) rels_params in let ext_rels_constructors = Array.map (List.map (fun (id,t) -> false,((dummy_loc,id), Flags.with_option Flags.raw_print (Constrextern.extern_glob_type Idset.empty) ((* zeta_normalize *) t) ) )) (rel_constructors) in let rel_ind i ext_rel_constructors = ((dummy_loc,relnames.(i)), rel_params, Some rel_arities.(i), ext_rel_constructors),[] in let ext_rel_constructors = (Array.mapi rel_ind ext_rels_constructors) in let rel_inds = Array.to_list ext_rel_constructors in (* let _ = *) (* Pp.msgnl (\* observe *\) ( *) (* str "Inductive" ++ spc () ++ *) (* prlist_with_sep *) (* (fun () -> fnl ()++spc () ++ str "with" ++ spc ()) *) (* (function ((_,id),_,params,ar,constr) -> *) (* Ppconstr.pr_id id ++ spc () ++ *) (* Ppconstr.pr_binders params ++ spc () ++ *) (* str ":" ++ spc () ++ *) (* Ppconstr.pr_lconstr_expr ar ++ spc () ++ str ":=" ++ *) (* prlist_with_sep *) (* (fun _ -> fnl () ++ spc () ++ str "|" ++ spc ()) *) (* (function (_,((_,id),t)) -> *) (* Ppconstr.pr_id id ++ spc () ++ str ":" ++ spc () ++ *) (* Ppconstr.pr_lconstr_expr t) *) (* constr *) (* ) *) (* rel_inds *) (* ) *) (* in *) let _time2 = System.get_time () in try with_full_print (Flags.silently (Command.do_mutual_inductive rel_inds)) true with | UserError(s,msg) as e -> let _time3 = System.get_time () in (* Pp.msgnl (str "error : "++ str (string_of_float (System.time_difference time2 time3))); *) let repacked_rel_inds = List.map (fun ((a , b , c , l),ntn) -> ((false,a) , b, c , Vernacexpr.Inductive_kw, Vernacexpr.Constructors l),ntn ) rel_inds in let msg = str "while trying to define"++ spc () ++ Ppvernac.pr_vernac (Vernacexpr.VernacInductive(Decl_kinds.Finite,false,repacked_rel_inds)) ++ fnl () ++ msg in observe (msg); raise e | reraise -> let _time3 = System.get_time () in (* Pp.msgnl (str "error : "++ str (string_of_float (System.time_difference time2 time3))); *) let repacked_rel_inds = List.map (fun ((a , b , c , l),ntn) -> ((false,a) , b, c , Vernacexpr.Inductive_kw, Vernacexpr.Constructors l),ntn ) rel_inds in let msg = str "while trying to define"++ spc () ++ Ppvernac.pr_vernac (Vernacexpr.VernacInductive(Decl_kinds.Finite,false,repacked_rel_inds)) ++ fnl () ++ Errors.print reraise in observe msg; raise reraise let build_inductive funnames funsargs returned_types rtl = try do_build_inductive funnames funsargs returned_types rtl with e when Errors.noncritical e -> raise (Building_graph e) coq-8.4pl2/plugins/funind/vo.itarget0000640000175000001440000000001211307752066016600 0ustar notinusersRecdef.vo coq-8.4pl2/plugins/funind/g_indfun.ml40000640000175000001440000003514712121620060017002 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* hov 1 (Ppconstr.pr_id id ++ str " := " ++ cut () ++ prc c) | loc, Glob_term.AnonHyp n, c -> hov 1 (int n ++ str " := " ++ cut () ++ prc c) let pr_bindings prc prlc = function | Glob_term.ImplicitBindings l -> brk (1,1) ++ str "with" ++ brk (1,1) ++ Util.prlist_with_sep spc prc l | Glob_term.ExplicitBindings l -> brk (1,1) ++ str "with" ++ brk (1,1) ++ Util.prlist_with_sep spc (fun b -> str"(" ++ pr_binding prlc b ++ str")") l | Glob_term.NoBindings -> mt () let pr_with_bindings prc prlc (c,bl) = prc c ++ hv 0 (pr_bindings prc prlc bl) let pr_fun_ind_using prc prlc _ opt_c = match opt_c with | None -> mt () | Some b -> spc () ++ hov 2 (str "using" ++ spc () ++ pr_with_bindings prc prlc b) (* Duplication of printing functions because "'a with_bindings" is (internally) not uniform in 'a: indeed constr_with_bindings at the "typed" level has type "open_constr with_bindings" instead of "constr with_bindings"; hence, its printer cannot be polymorphic in (prc,prlc)... *) let pr_with_bindings_typed prc prlc (c,bl) = prc c ++ hv 0 (pr_bindings prc prlc bl) let pr_fun_ind_using_typed prc prlc _ opt_c = match opt_c with | None -> mt () | Some b -> spc () ++ hov 2 (str "using" ++ spc () ++ pr_with_bindings_typed prc prlc b.Evd.it) ARGUMENT EXTEND fun_ind_using PRINTED BY pr_fun_ind_using_typed RAW_TYPED AS constr_with_bindings_opt RAW_PRINTED BY pr_fun_ind_using GLOB_TYPED AS constr_with_bindings_opt GLOB_PRINTED BY pr_fun_ind_using | [ "using" constr_with_bindings(c) ] -> [ Some c ] | [ ] -> [ None ] END TACTIC EXTEND newfuninv [ "functional" "inversion" quantified_hypothesis(hyp) reference_opt(fname) ] -> [ Invfun.invfun hyp fname ] END let pr_intro_as_pat prc _ _ pat = match pat with | Some pat -> spc () ++ str "as" ++ spc () ++ pr_intro_pattern pat | None -> mt () ARGUMENT EXTEND with_names TYPED AS intro_pattern_opt PRINTED BY pr_intro_as_pat | [ "as" simple_intropattern(ipat) ] -> [ Some ipat ] | [] ->[ None ] END TACTIC EXTEND newfunind ["functional" "induction" ne_constr_list(cl) fun_ind_using(princl) with_names(pat)] -> [ let c = match cl with | [] -> assert false | [c] -> c | c::cl -> applist(c,cl) in Extratactics.onSomeWithHoles (fun x -> functional_induction true c x pat) princl ] END (***** debug only ***) TACTIC EXTEND snewfunind ["soft" "functional" "induction" ne_constr_list(cl) fun_ind_using(princl) with_names(pat)] -> [ let c = match cl with | [] -> assert false | [c] -> c | c::cl -> applist(c,cl) in Extratactics.onSomeWithHoles (fun x -> functional_induction false c x pat) princl ] END let pr_constr_coma_sequence prc _ _ = Util.prlist_with_sep Util.pr_comma prc ARGUMENT EXTEND constr_coma_sequence' TYPED AS constr_list PRINTED BY pr_constr_coma_sequence | [ constr(c) "," constr_coma_sequence'(l) ] -> [ c::l ] | [ constr(c) ] -> [ [c] ] END let pr_auto_using prc _prlc _prt = Pptactic.pr_auto_using prc ARGUMENT EXTEND auto_using' TYPED AS constr_list PRINTED BY pr_auto_using | [ "using" constr_coma_sequence'(l) ] -> [ l ] | [ ] -> [ [] ] END module Gram = Pcoq.Gram module Vernac = Pcoq.Vernac_ module Tactic = Pcoq.Tactic module FunctionGram = struct let gec s = Gram.entry_create ("Function."^s) (* types *) let function_rec_definition_loc : (Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) located Gram.entry = gec "function_rec_definition_loc" end open FunctionGram GEXTEND Gram GLOBAL: function_rec_definition_loc ; function_rec_definition_loc: [ [ g = Vernac.rec_definition -> loc, g ]] ; END type 'a function_rec_definition_loc_argtype = ((Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) located, 'a) Genarg.abstract_argument_type let (wit_function_rec_definition_loc : Genarg.tlevel function_rec_definition_loc_argtype), (globwit_function_rec_definition_loc : Genarg.glevel function_rec_definition_loc_argtype), (rawwit_function_rec_definition_loc : Genarg.rlevel function_rec_definition_loc_argtype) = Genarg.create_arg None "function_rec_definition_loc" VERNAC COMMAND EXTEND Function ["Function" ne_function_rec_definition_loc_list_sep(recsl,"with")] -> [ do_generate_principle false (List.map snd recsl); ] END let pr_fun_scheme_arg (princ_name,fun_name,s) = Nameops.pr_id princ_name ++ str " :=" ++ spc() ++ str "Induction for " ++ Libnames.pr_reference fun_name ++ spc() ++ str "Sort " ++ Ppconstr.pr_glob_sort s VERNAC ARGUMENT EXTEND fun_scheme_arg PRINTED BY pr_fun_scheme_arg | [ ident(princ_name) ":=" "Induction" "for" reference(fun_name) "Sort" sort(s) ] -> [ (princ_name,fun_name,s) ] END let warning_error names e = let e = Cerrors.process_vernac_interp_error e in match e with | Building_graph e -> Pp.msg_warning (str "Cannot define graph(s) for " ++ h 1 (prlist_with_sep (fun _ -> str","++spc ()) Libnames.pr_reference names) ++ if do_observe () then (spc () ++ Errors.print e) else mt ()) | Defining_principle e -> Pp.msg_warning (str "Cannot define principle(s) for "++ h 1 (prlist_with_sep (fun _ -> str","++spc ()) Libnames.pr_reference names) ++ if do_observe () then Errors.print e else mt ()) | _ -> raise e VERNAC COMMAND EXTEND NewFunctionalScheme ["Functional" "Scheme" ne_fun_scheme_arg_list_sep(fas,"with") ] -> [ begin try Functional_principles_types.build_scheme fas with Functional_principles_types.No_graph_found -> begin match fas with | (_,fun_name,_)::_ -> begin begin make_graph (Nametab.global fun_name) end ; try Functional_principles_types.build_scheme fas with Functional_principles_types.No_graph_found -> Util.error ("Cannot generate induction principle(s)") | e when Errors.noncritical e -> let names = List.map (fun (_,na,_) -> na) fas in warning_error names e end | _ -> assert false (* we can only have non empty list *) end | e when Errors.noncritical e -> let names = List.map (fun (_,na,_) -> na) fas in warning_error names e end ] END (***** debug only ***) VERNAC COMMAND EXTEND NewFunctionalCase ["Functional" "Case" fun_scheme_arg(fas) ] -> [ Functional_principles_types.build_case_scheme fas ] END (***** debug only ***) VERNAC COMMAND EXTEND GenerateGraph ["Generate" "graph" "for" reference(c)] -> [ make_graph (Nametab.global c) ] END (* FINDUCTION *) (* comment this line to see debug msgs *) let msg x = () ;; let pr_lconstr c = str "" (* uncomment this to see debugging *) let prconstr c = msg (str" " ++ Printer.pr_lconstr c ++ str"\n") let prlistconstr lc = List.iter prconstr lc let prstr s = msg(str s) let prNamedConstr s c = begin msg(str ""); msg(str(s^"==>\n ") ++ Printer.pr_lconstr c ++ str "\n<==\n"); msg(str ""); end (** Information about an occurrence of a function call (application) inside a term. *) type fapp_info = { fname: constr; (** The function applied *) largs: constr list; (** List of arguments *) free: bool; (** [true] if all arguments are debruijn free *) max_rel: int; (** max debruijn index in the funcall *) onlyvars: bool (** [true] if all arguments are variables (and not debruijn) *) } (** [constr_head_match(a b c) a] returns true, false otherwise. *) let constr_head_match u t= if isApp u then let uhd,args= destApp u in uhd=t else false (** [hdMatchSub inu t] returns the list of occurrences of [t] in [inu]. DeBruijn are not pushed, so some of them may be unbound in the result. *) let rec hdMatchSub inu (test: constr -> bool) : fapp_info list = let subres = match kind_of_term inu with | Lambda (nm,tp,cstr) | Prod (nm,tp,cstr) -> hdMatchSub tp test @ hdMatchSub (lift 1 cstr) test | Fix (_,(lna,tl,bl)) -> (* not sure Fix is correct *) Array.fold_left (fun acc cstr -> acc @ hdMatchSub (lift (Array.length tl) cstr) test) [] bl | _ -> (* Cofix will be wrong *) fold_constr (fun l cstr -> l @ hdMatchSub cstr test) [] inu in if not (test inu) then subres else let f,args = decompose_app inu in let freeset = Termops.free_rels inu in let max_rel = try Util.Intset.max_elt freeset with Not_found -> -1 in {fname = f; largs = args; free = Util.Intset.is_empty freeset; max_rel = max_rel; onlyvars = List.for_all isVar args } ::subres let mkEq typ c1 c2 = mkApp (Coqlib.build_coq_eq(),[| typ; c1; c2|]) let poseq_unsafe idunsafe cstr gl = let typ = Tacmach.pf_type_of gl cstr in tclTHEN (Tactics.letin_tac None (Name idunsafe) cstr None allHypsAndConcl) (tclTHENFIRST (Tactics.assert_tac Anonymous (mkEq typ (mkVar idunsafe) cstr)) Tactics.reflexivity) gl let poseq id cstr gl = let x = Tactics.fresh_id [] id gl in poseq_unsafe x cstr gl (* dirty? *) let list_constr_largs = ref [] let rec poseq_list_ids_rec lcstr gl = match lcstr with | [] -> tclIDTAC gl | c::lcstr' -> match kind_of_term c with | Var _ -> (list_constr_largs:=c::!list_constr_largs ; poseq_list_ids_rec lcstr' gl) | _ -> let _ = prstr "c = " in let _ = prconstr c in let _ = prstr "\n" in let typ = Tacmach.pf_type_of gl c in let cname = Namegen.id_of_name_using_hdchar (Global.env()) typ Anonymous in let x = Tactics.fresh_id [] cname gl in let _ = list_constr_largs:=mkVar x :: !list_constr_largs in let _ = prstr " list_constr_largs = " in let _ = prlistconstr !list_constr_largs in let _ = prstr "\n" in tclTHEN (poseq_unsafe x c) (poseq_list_ids_rec lcstr') gl let poseq_list_ids lcstr gl = let _ = list_constr_largs := [] in poseq_list_ids_rec lcstr gl (** [find_fapp test g] returns the list of [app_info] of all calls to functions that satisfy [test] in the conclusion of goal g. Trivial repetition (not modulo conversion) are deleted. *) let find_fapp (test:constr -> bool) g : fapp_info list = let pre_res = hdMatchSub (Tacmach.pf_concl g) test in let res = List.fold_right (fun x acc -> if List.mem x acc then acc else x::acc) pre_res [] in (prlistconstr (List.map (fun x -> applist (x.fname,x.largs)) res); res) (** [finduction id filter g] tries to apply functional induction on an occurence of function [id] in the conclusion of goal [g]. If [id]=[None] then calls to any function are selected. In any case [heuristic] is used to select the most pertinent occurrence. *) let finduction (oid:identifier option) (heuristic: fapp_info list -> fapp_info list) (nexttac:Proof_type.tactic) g = let test = match oid with | Some id -> let idconstr = mkConst (const_of_id id) in (fun u -> constr_head_match u idconstr) (* select only id *) | None -> (fun u -> isApp u) in (* select calls to any function *) let info_list = find_fapp test g in let ordered_info_list = heuristic info_list in prlistconstr (List.map (fun x -> applist (x.fname,x.largs)) ordered_info_list); if List.length ordered_info_list = 0 then Util.error "function not found in goal\n"; let taclist: Proof_type.tactic list = List.map (fun info -> (tclTHEN (tclTHEN (poseq_list_ids info.largs) ( fun gl -> (functional_induction true (applist (info.fname, List.rev !list_constr_largs)) None None) gl)) nexttac)) ordered_info_list in (* we try each (f t u v) until one does not fail *) (* TODO: try also to mix functional schemes *) tclFIRST taclist g (** [chose_heuristic oi x] returns the heuristic for reordering (and/or forgetting some elts of) a list of occurrences of function calls infos to chose first with functional induction. *) let chose_heuristic (oi:int option) : fapp_info list -> fapp_info list = match oi with | Some i -> (fun l -> [ List.nth l (i-1) ]) (* occurrence was given by the user *) | None -> (* Default heuristic: put first occurrences where all arguments are *bound* (meaning already introduced) variables *) let ordering x y = if x.free && x.onlyvars && y.free && y.onlyvars then 0 (* both pertinent *) else if x.free && x.onlyvars then -1 else if y.free && y.onlyvars then 1 else 0 (* both not pertinent *) in List.sort ordering TACTIC EXTEND finduction ["finduction" ident(id) natural_opt(oi)] -> [ match oi with | Some(n) when n<=0 -> Util.error "numerical argument must be > 0" | _ -> let heuristic = chose_heuristic oi in finduction (Some id) heuristic tclIDTAC ] END TACTIC EXTEND fauto [ "fauto" tactic(tac)] -> [ let heuristic = chose_heuristic None in finduction None heuristic (Tacinterp.eval_tactic tac) ] | [ "fauto" ] -> [ let heuristic = chose_heuristic None in finduction None heuristic tclIDTAC ] END TACTIC EXTEND poseq [ "poseq" ident(x) constr(c) ] -> [ poseq x c ] END VERNAC COMMAND EXTEND Showindinfo [ "showindinfo" ident(x) ] -> [ Merge.showind x ] END VERNAC COMMAND EXTEND MergeFunind [ "Mergeschemes" "(" ident(id1) ne_ident_list(cl1) ")" "with" "(" ident(id2) ne_ident_list(cl2) ")" "using" ident(id) ] -> [ let f1 = Constrintern.interp_constr Evd.empty (Global.env()) (CRef (Libnames.Ident (Util.dummy_loc,id1))) in let f2 = Constrintern.interp_constr Evd.empty (Global.env()) (CRef (Libnames.Ident (Util.dummy_loc,id2))) in let f1type = Typing.type_of (Global.env()) Evd.empty f1 in let f2type = Typing.type_of (Global.env()) Evd.empty f2 in let ar1 = List.length (fst (decompose_prod f1type)) in let ar2 = List.length (fst (decompose_prod f2type)) in let _ = if ar1 <> List.length cl1 then Util.error ("not the right number of arguments for " ^ string_of_id id1) in let _ = if ar2 <> List.length cl2 then Util.error ("not the right number of arguments for " ^ string_of_id id2) in Merge.merge id1 id2 (Array.of_list cl1) (Array.of_list cl2) id ] END coq-8.4pl2/plugins/funind/functional_principles_types.ml0000640000175000001440000005567512121620060022753 0ustar notinusersopen Printer open Util open Term open Namegen open Names open Declarations open Pp open Entries open Hiddentac open Evd open Tacmach open Proof_type open Tacticals open Tactics open Indfun_common open Functional_principles_proofs exception Toberemoved_with_rel of int*constr exception Toberemoved let pr_elim_scheme el = let env = Global.env () in let msg = str "params := " ++ Printer.pr_rel_context env el.params in let env = Environ.push_rel_context el.params env in let msg = msg ++ fnl () ++ str "predicates := "++ Printer.pr_rel_context env el.predicates in let env = Environ.push_rel_context el.predicates env in let msg = msg ++ fnl () ++ str "branches := " ++ Printer.pr_rel_context env el.branches in let env = Environ.push_rel_context el.branches env in let msg = msg ++ fnl () ++ str "args := " ++ Printer.pr_rel_context env el.args in let env = Environ.push_rel_context el.args env in msg ++ fnl () ++ str "concl := " ++ pr_lconstr_env env el.concl let observe s = if do_observe () then Pp.msgnl s let pr_elim_scheme el = let env = Global.env () in let msg = str "params := " ++ Printer.pr_rel_context env el.params in let env = Environ.push_rel_context el.params env in let msg = msg ++ fnl () ++ str "predicates := "++ Printer.pr_rel_context env el.predicates in let env = Environ.push_rel_context el.predicates env in let msg = msg ++ fnl () ++ str "branches := " ++ Printer.pr_rel_context env el.branches in let env = Environ.push_rel_context el.branches env in let msg = msg ++ fnl () ++ str "args := " ++ Printer.pr_rel_context env el.args in let env = Environ.push_rel_context el.args env in msg ++ fnl () ++ str "concl := " ++ pr_lconstr_env env el.concl let observe s = if do_observe () then Pp.msgnl s (* Transform an inductive induction principle into a functional one *) let compute_new_princ_type_from_rel rel_to_fun sorts princ_type = let princ_type_info = compute_elim_sig princ_type in let env = Global.env () in let env_with_params = Environ.push_rel_context princ_type_info.params env in let tbl = Hashtbl.create 792 in let rec change_predicates_names (avoid:identifier list) (predicates:rel_context) : rel_context = match predicates with | [] -> [] |(Name x,v,t)::predicates -> let id = Namegen.next_ident_away x avoid in Hashtbl.add tbl id x; (Name id,v,t)::(change_predicates_names (id::avoid) predicates) | (Anonymous,_,_)::_ -> anomaly "Anonymous property binder " in let avoid = (Termops.ids_of_context env_with_params ) in let princ_type_info = { princ_type_info with predicates = change_predicates_names avoid princ_type_info.predicates } in (* observe (str "starting princ_type := " ++ pr_lconstr_env env princ_type); *) (* observe (str "princ_infos : " ++ pr_elim_scheme princ_type_info); *) let change_predicate_sort i (x,_,t) = let new_sort = sorts.(i) in let args,_ = decompose_prod t in let real_args = if princ_type_info.indarg_in_concl then List.tl args else args in Nameops.out_name x,None,compose_prod real_args (mkSort new_sort) in let new_predicates = list_map_i change_predicate_sort 0 princ_type_info.predicates in let env_with_params_and_predicates = List.fold_right Environ.push_named new_predicates env_with_params in let rel_as_kn = fst (match princ_type_info.indref with | Some (Libnames.IndRef ind) -> ind | _ -> error "Not a valid predicate" ) in let ptes_vars = List.map (fun (id,_,_) -> id) new_predicates in let is_pte = let set = List.fold_right Idset.add ptes_vars Idset.empty in fun t -> match kind_of_term t with | Var id -> Idset.mem id set | _ -> false in let pre_princ = it_mkProd_or_LetIn (it_mkProd_or_LetIn (Option.fold_right mkProd_or_LetIn princ_type_info.indarg princ_type_info.concl ) princ_type_info.args ) princ_type_info.branches in let pre_princ = substl (List.map mkVar ptes_vars) pre_princ in let is_dom c = match kind_of_term c with | Ind((u,_)) -> u = rel_as_kn | Construct((u,_),_) -> u = rel_as_kn | _ -> false in let get_fun_num c = match kind_of_term c with | Ind(_,num) -> num | Construct((_,num),_) -> num | _ -> assert false in let dummy_var = mkVar (id_of_string "________") in let mk_replacement c i args = let res = mkApp(rel_to_fun.(i), Array.map Termops.pop (array_get_start args)) in (* observe (str "replacing " ++ pr_lconstr c ++ str " by " ++ pr_lconstr res); *) res in let rec compute_new_princ_type remove env pre_princ : types*(constr list) = let (new_princ_type,_) as res = match kind_of_term pre_princ with | Rel n -> begin try match Environ.lookup_rel n env with | _,_,t when is_dom t -> raise Toberemoved | _ -> pre_princ,[] with Not_found -> assert false end | Prod(x,t,b) -> compute_new_princ_type_for_binder remove mkProd env x t b | Lambda(x,t,b) -> compute_new_princ_type_for_binder remove mkLambda env x t b | Ind _ | Construct _ when is_dom pre_princ -> raise Toberemoved | App(f,args) when is_dom f -> let var_to_be_removed = destRel (array_last args) in let num = get_fun_num f in raise (Toberemoved_with_rel (var_to_be_removed,mk_replacement pre_princ num args)) | App(f,args) -> let args = if is_pte f && remove then array_get_start args else args in let new_args,binders_to_remove = Array.fold_right (compute_new_princ_type_with_acc remove env) args ([],[]) in let new_f,binders_to_remove_from_f = compute_new_princ_type remove env f in applist(new_f, new_args), list_union_eq eq_constr binders_to_remove_from_f binders_to_remove | LetIn(x,v,t,b) -> compute_new_princ_type_for_letin remove env x v t b | _ -> pre_princ,[] in (* let _ = match kind_of_term pre_princ with *) (* | Prod _ -> *) (* observe(str "compute_new_princ_type for "++ *) (* pr_lconstr_env env pre_princ ++ *) (* str" is "++ *) (* pr_lconstr_env env new_princ_type ++ fnl ()) *) (* | _ -> () in *) res and compute_new_princ_type_for_binder remove bind_fun env x t b = begin try let new_t,binders_to_remove_from_t = compute_new_princ_type remove env t in let new_x : name = get_name (Termops.ids_of_context env) x in let new_env = Environ.push_rel (x,None,t) env in let new_b,binders_to_remove_from_b = compute_new_princ_type remove new_env b in if List.exists (eq_constr (mkRel 1)) binders_to_remove_from_b then (Termops.pop new_b), filter_map (eq_constr (mkRel 1)) Termops.pop binders_to_remove_from_b else ( bind_fun(new_x,new_t,new_b), list_union_eq eq_constr binders_to_remove_from_t (List.map Termops.pop binders_to_remove_from_b) ) with | Toberemoved -> (* observe (str "Decl of "++Ppconstr.pr_name x ++ str " is removed "); *) let new_b,binders_to_remove_from_b = compute_new_princ_type remove env (substnl [dummy_var] 1 b) in new_b, List.map Termops.pop binders_to_remove_from_b | Toberemoved_with_rel (n,c) -> (* observe (str "Decl of "++Ppconstr.pr_name x ++ str " is removed "); *) let new_b,binders_to_remove_from_b = compute_new_princ_type remove env (substnl [c] n b) in new_b, list_add_set_eq eq_constr (mkRel n) (List.map Termops.pop binders_to_remove_from_b) end and compute_new_princ_type_for_letin remove env x v t b = begin try let new_t,binders_to_remove_from_t = compute_new_princ_type remove env t in let new_v,binders_to_remove_from_v = compute_new_princ_type remove env v in let new_x : name = get_name (Termops.ids_of_context env) x in let new_env = Environ.push_rel (x,Some v,t) env in let new_b,binders_to_remove_from_b = compute_new_princ_type remove new_env b in if List.exists (eq_constr (mkRel 1)) binders_to_remove_from_b then (Termops.pop new_b),filter_map (eq_constr (mkRel 1)) Termops.pop binders_to_remove_from_b else ( mkLetIn(new_x,new_v,new_t,new_b), list_union_eq eq_constr (list_union_eq eq_constr binders_to_remove_from_t binders_to_remove_from_v) (List.map Termops.pop binders_to_remove_from_b) ) with | Toberemoved -> (* observe (str "Decl of "++Ppconstr.pr_name x ++ str " is removed "); *) let new_b,binders_to_remove_from_b = compute_new_princ_type remove env (substnl [dummy_var] 1 b) in new_b, List.map Termops.pop binders_to_remove_from_b | Toberemoved_with_rel (n,c) -> (* observe (str "Decl of "++Ppconstr.pr_name x ++ str " is removed "); *) let new_b,binders_to_remove_from_b = compute_new_princ_type remove env (substnl [c] n b) in new_b, list_add_set_eq eq_constr (mkRel n) (List.map Termops.pop binders_to_remove_from_b) end and compute_new_princ_type_with_acc remove env e (c_acc,to_remove_acc) = let new_e,to_remove_from_e = compute_new_princ_type remove env e in new_e::c_acc,list_union_eq eq_constr to_remove_from_e to_remove_acc in (* observe (str "Computing new principe from " ++ pr_lconstr_env env_with_params_and_predicates pre_princ); *) let pre_res,_ = compute_new_princ_type princ_type_info.indarg_in_concl env_with_params_and_predicates pre_princ in let pre_res = replace_vars (list_map_i (fun i id -> (id, mkRel i)) 1 ptes_vars) (lift (List.length ptes_vars) pre_res) in it_mkProd_or_LetIn (it_mkProd_or_LetIn pre_res (List.map (fun (id,t,b) -> Name(Hashtbl.find tbl id), t,b) new_predicates) ) princ_type_info.params let change_property_sort toSort princ princName = let princ_info = compute_elim_sig princ in let change_sort_in_predicate (x,v,t) = (x,None, let args,_ = decompose_prod t in compose_prod args (mkSort toSort) ) in let princName_as_constr = Constrintern.global_reference princName in let init = let nargs = (princ_info.nparams + (List.length princ_info.predicates)) in mkApp(princName_as_constr, Array.init nargs (fun i -> mkRel (nargs - i ))) in it_mkLambda_or_LetIn (it_mkLambda_or_LetIn init (List.map change_sort_in_predicate princ_info.predicates) ) princ_info.params let pp_dur time time' = str (string_of_float (System.time_difference time time')) (* let qed () = save_named true *) let defined () = try Lemmas.save_named false with | UserError("extract_proof",msg) -> Util.errorlabstrm "defined" ((try str "On goal : " ++ fnl () ++ pr_open_subgoals () ++ fnl () with e when Errors.noncritical e -> mt () ) ++msg) let build_functional_principle interactive_proof old_princ_type sorts funs i proof_tac hook = (* First we get the type of the old graph principle *) let mutr_nparams = (compute_elim_sig old_princ_type).nparams in (* let time1 = System.get_time () in *) let new_principle_type = compute_new_princ_type_from_rel (Array.map mkConst funs) sorts old_princ_type in (* let time2 = System.get_time () in *) (* Pp.msgnl (str "computing principle type := " ++ System.fmt_time_difference time1 time2); *) observe (str "new_principle_type : " ++ pr_lconstr new_principle_type); let new_princ_name = next_ident_away_in_goal (id_of_string "___________princ_________") [] in begin Lemmas.start_proof new_princ_name (Decl_kinds.Global,(Decl_kinds.Proof Decl_kinds.Theorem)) new_principle_type (hook new_principle_type) ; (* let _tim1 = System.get_time () in *) Pfedit.by (proof_tac (Array.map mkConst funs) mutr_nparams); (* let _tim2 = System.get_time () in *) (* begin *) (* let dur1 = System.time_difference tim1 tim2 in *) (* Pp.msgnl (str ("Time to compute proof: ") ++ str (string_of_float dur1)); *) (* end; *) get_proof_clean true end let generate_functional_principle interactive_proof old_princ_type sorts new_princ_name funs i proof_tac = try let f = funs.(i) in let type_sort = Termops.new_sort_in_family InType in let new_sorts = match sorts with | None -> Array.make (Array.length funs) (type_sort) | Some a -> a in let base_new_princ_name,new_princ_name = match new_princ_name with | Some (id) -> id,id | None -> let id_of_f = id_of_label (con_label f) in id_of_f,Indrec.make_elimination_ident id_of_f (family_of_sort type_sort) in let names = ref [new_princ_name] in let hook new_principle_type _ _ = if sorts = None then (* let id_of_f = id_of_label (con_label f) in *) let register_with_sort fam_sort = let s = Termops.new_sort_in_family fam_sort in let name = Indrec.make_elimination_ident base_new_princ_name fam_sort in let value = change_property_sort s new_principle_type new_princ_name in (* Pp.msgnl (str "new principle := " ++ pr_lconstr value); *) let ce = { const_entry_body = value; const_entry_secctx = None; const_entry_type = None; const_entry_opaque = false } in ignore( Declare.declare_constant name (Entries.DefinitionEntry ce, Decl_kinds.IsDefinition (Decl_kinds.Scheme) ) ); Flags.if_verbose (fun id -> Pp.msgnl (Ppconstr.pr_id id ++ str " is defined")) name; names := name :: !names in register_with_sort InProp; register_with_sort InSet in let (id,(entry,g_kind,hook)) = build_functional_principle interactive_proof old_princ_type new_sorts funs i proof_tac hook in (* Pr 1278 : Don't forget to close the goal if an error is raised !!!! *) save false new_princ_name entry g_kind hook with e when Errors.noncritical e -> begin begin try let id = Pfedit.get_current_proof_name () in let s = string_of_id id in let n = String.length "___________princ_________" in if String.length s >= n then if String.sub s 0 n = "___________princ_________" then Pfedit.delete_current_proof () else () else () with e when Errors.noncritical e -> () end; raise (Defining_principle e) end (* defined () *) exception Not_Rec let get_funs_constant mp dp = let rec get_funs_constant const e : (Names.constant*int) array = match kind_of_term ((strip_lam e)) with | Fix((_,(na,_,_))) -> Array.mapi (fun i na -> match na with | Name id -> let const = make_con mp dp (label_of_id id) in const,i | Anonymous -> anomaly "Anonymous fix" ) na | _ -> [|const,0|] in function const -> let find_constant_body const = match body_of_constant (Global.lookup_constant const) with | Some b -> let body = force b in let body = Tacred.cbv_norm_flags (Closure.RedFlags.mkflags [Closure.RedFlags.fZETA]) (Global.env ()) (Evd.empty) body in body | None -> error ( "Cannot define a principle over an axiom ") in let f = find_constant_body const in let l_const = get_funs_constant const f in (* We need to check that all the functions found are in the same block to prevent Reset stange thing *) let l_bodies = List.map find_constant_body (Array.to_list (Array.map fst l_const)) in let l_params,l_fixes = List.split (List.map decompose_lam l_bodies) in (* all the paremeter must be equal*) let _check_params = let first_params = List.hd l_params in List.iter (fun params -> if not (list_equal (fun (n1, c1) (n2, c2) -> n1 = n2 && eq_constr c1 c2) first_params params) then error "Not a mutal recursive block" ) l_params in (* The bodies has to be very similar *) let _check_bodies = try let extract_info is_first body = match kind_of_term body with | Fix((idxs,_),(na,ta,ca)) -> (idxs,na,ta,ca) | _ -> if is_first && (List.length l_bodies = 1) then raise Not_Rec else error "Not a mutal recursive block" in let first_infos = extract_info true (List.hd l_bodies) in let check body = (* Hope this is correct *) let eq_infos (ia1, na1, ta1, ca1) (ia2, na2, ta2, ca2) = ia1 = ia2 && na1 = na2 && array_equal eq_constr ta1 ta2 && array_equal eq_constr ca1 ca2 in if not (eq_infos first_infos (extract_info false body)) then error "Not a mutal recursive block" in List.iter check l_bodies with Not_Rec -> () in l_const exception No_graph_found exception Found_type of int let make_scheme (fas : (constant*Glob_term.glob_sort) list) : Entries.definition_entry list = let env = Global.env () and sigma = Evd.empty in let funs = List.map fst fas in let first_fun = List.hd funs in let funs_mp,funs_dp,_ = Names.repr_con first_fun in let first_fun_kn = try fst (find_Function_infos first_fun).graph_ind with Not_found -> raise No_graph_found in let this_block_funs_indexes = get_funs_constant funs_mp funs_dp first_fun in let this_block_funs = Array.map fst this_block_funs_indexes in let prop_sort = InProp in let funs_indexes = let this_block_funs_indexes = Array.to_list this_block_funs_indexes in List.map (function const -> List.assoc const this_block_funs_indexes) funs in let ind_list = List.map (fun (idx) -> let ind = first_fun_kn,idx in ind,true,prop_sort ) funs_indexes in let l_schemes = List.map (Typing.type_of env sigma) (Indrec.build_mutual_induction_scheme env sigma ind_list) in let i = ref (-1) in let sorts = List.rev_map (fun (_,x) -> Termops.new_sort_in_family (Pretyping.interp_elimination_sort x) ) fas in (* We create the first priciple by tactic *) let first_type,other_princ_types = match l_schemes with s::l_schemes -> s,l_schemes | _ -> anomaly "" in let (_,(const,_,_)) = try build_functional_principle false first_type (Array.of_list sorts) this_block_funs 0 (prove_princ_for_struct false 0 (Array.of_list funs)) (fun _ _ _ -> ()) with e when Errors.noncritical e -> begin begin try let id = Pfedit.get_current_proof_name () in let s = string_of_id id in let n = String.length "___________princ_________" in if String.length s >= n then if String.sub s 0 n = "___________princ_________" then Pfedit.delete_current_proof () else () else () with e when Errors.noncritical e -> () end; raise (Defining_principle e) end in incr i; let opacity = let finfos = find_Function_infos this_block_funs.(0) in try let equation = Option.get finfos.equation_lemma in Declarations.is_opaque (Global.lookup_constant equation) with Option.IsNone -> (* non recursive definition *) false in let const = {const with const_entry_opaque = opacity } in (* The others are just deduced *) if other_princ_types = [] then [const] else let other_fun_princ_types = let funs = Array.map mkConst this_block_funs in let sorts = Array.of_list sorts in List.map (compute_new_princ_type_from_rel funs sorts) other_princ_types in let first_princ_body,first_princ_type = const.Entries.const_entry_body, const.Entries.const_entry_type in let ctxt,fix = decompose_lam_assum first_princ_body in (* the principle has for forall ...., fix .*) let (idxs,_),(_,ta,_ as decl) = destFix fix in let other_result = List.map (* we can now compute the other principles *) (fun scheme_type -> incr i; observe (Printer.pr_lconstr scheme_type); let type_concl = (strip_prod_assum scheme_type) in let applied_f = List.hd (List.rev (snd (decompose_app type_concl))) in let f = fst (decompose_app applied_f) in try (* we search the number of the function in the fix block (name of the function) *) Array.iteri (fun j t -> let t = (strip_prod_assum t) in let applied_g = List.hd (List.rev (snd (decompose_app t))) in let g = fst (decompose_app applied_g) in if eq_constr f g then raise (Found_type j); observe (Printer.pr_lconstr f ++ str " <> " ++ Printer.pr_lconstr g) ) ta; (* If we reach this point, the two principle are not mutually recursive We fall back to the previous method *) let (_,(const,_,_)) = build_functional_principle false (List.nth other_princ_types (!i - 1)) (Array.of_list sorts) this_block_funs !i (prove_princ_for_struct false !i (Array.of_list funs)) (fun _ _ _ -> ()) in const with Found_type i -> let princ_body = Termops.it_mkLambda_or_LetIn (mkFix((idxs,i),decl)) ctxt in {const with Entries.const_entry_body = princ_body; Entries.const_entry_type = Some scheme_type } ) other_fun_princ_types in const::other_result let build_scheme fas = Dumpglob.pause (); let bodies_types = make_scheme (List.map (fun (_,f,sort) -> let f_as_constant = try match Nametab.global f with | Libnames.ConstRef c -> c | _ -> Util.error "Functional Scheme can only be used with functions" with Not_found -> Util.error ("Cannot find "^ Libnames.string_of_reference f) in (f_as_constant,sort) ) fas ) in List.iter2 (fun (princ_id,_,_) def_entry -> ignore (Declare.declare_constant princ_id (Entries.DefinitionEntry def_entry,Decl_kinds.IsProof Decl_kinds.Theorem)); Flags.if_verbose (fun id -> Pp.msgnl (Ppconstr.pr_id id ++ str " is defined")) princ_id ) fas bodies_types; Dumpglob.continue () let build_case_scheme fa = let env = Global.env () and sigma = Evd.empty in (* let id_to_constr id = *) (* Constrintern.global_reference id *) (* in *) let funs = (fun (_,f,_) -> try Libnames.constr_of_global (Nametab.global f) with Not_found -> Util.error ("Cannot find "^ Libnames.string_of_reference f)) fa in let first_fun = destConst funs in let funs_mp,funs_dp,_ = Names.repr_con first_fun in let first_fun_kn = try fst (find_Function_infos first_fun).graph_ind with Not_found -> raise No_graph_found in let this_block_funs_indexes = get_funs_constant funs_mp funs_dp first_fun in let this_block_funs = Array.map fst this_block_funs_indexes in let prop_sort = InProp in let funs_indexes = let this_block_funs_indexes = Array.to_list this_block_funs_indexes in List.assoc (destConst funs) this_block_funs_indexes in let ind_fun = let ind = first_fun_kn,funs_indexes in ind,prop_sort in let scheme_type = (Typing.type_of env sigma ) ((fun (ind,sf) -> Indrec.build_case_analysis_scheme_default env sigma ind sf) ind_fun) in let sorts = (fun (_,_,x) -> Termops.new_sort_in_family (Pretyping.interp_elimination_sort x) ) fa in let princ_name = (fun (x,_,_) -> x) fa in let _ = (* Pp.msgnl (str "Generating " ++ Ppconstr.pr_id princ_name ++str " with " ++ pr_lconstr scheme_type ++ str " and " ++ (fun a -> prlist_with_sep spc (fun c -> pr_lconstr (mkConst c)) (Array.to_list a)) this_block_funs ); *) generate_functional_principle false scheme_type (Some ([|sorts|])) (Some princ_name) this_block_funs 0 (prove_princ_for_struct false 0 [|destConst funs|]) in () coq-8.4pl2/plugins/funind/functional_principles_proofs.ml0000640000175000001440000014325412121620060023106 0ustar notinusersopen Printer open Util open Term open Namegen open Names open Declarations open Pp open Entries open Hiddentac open Evd open Tacmach open Proof_type open Tacticals open Tactics open Indfun_common open Libnames let msgnl = Pp.msgnl let observe strm = if do_observe () then Pp.msgnl strm else () let observennl strm = if do_observe () then begin Pp.msg strm;Pp.pp_flush () end else () let do_observe_tac s tac g = try let v = tac g in (* msgnl (goal ++ fnl () ++ (str s)++(str " ")++(str "finished")); *) v with reraise -> let e = Cerrors.process_vernac_interp_error reraise in let goal = try (Printer.pr_goal g) with e when Errors.noncritical e -> assert false in msgnl (str "observation "++ s++str " raised exception " ++ Errors.print e ++ str " on goal " ++ goal ); raise e;; let observe_tac_stream s tac g = if do_observe () then do_observe_tac s tac g else tac g let observe_tac s tac g = observe_tac_stream (str s) tac g (* let tclTRYD tac = *) (* if !Flags.debug || do_observe () *) (* then (fun g -> try (\* do_observe_tac "" *\)tac g with _ -> tclIDTAC g) *) (* else tac *) let list_chop ?(msg="") n l = try list_chop n l with Failure (msg') -> failwith (msg ^ msg') let make_refl_eq constructor type_of_t t = (* let refl_equal_term = Lazy.force refl_equal in *) mkApp(constructor,[|type_of_t;t|]) type pte_info = { proving_tac : (identifier list -> Tacmach.tactic); is_valid : constr -> bool } type ptes_info = pte_info Idmap.t type 'a dynamic_info = { nb_rec_hyps : int; rec_hyps : identifier list ; eq_hyps : identifier list; info : 'a } type body_info = constr dynamic_info let finish_proof dynamic_infos g = observe_tac "finish" ( h_assumption) g let refine c = Tacmach.refine_no_check c let thin l = Tacmach.thin_no_check l let cut_replacing id t tac :tactic= tclTHENS (cut t) [ tclTHEN (thin_no_check [id]) (introduction_no_check id); tac ] let intro_erasing id = tclTHEN (thin [id]) (introduction id) let rec_hyp_id = id_of_string "rec_hyp" let is_trivial_eq t = let res = try begin match kind_of_term t with | App(f,[|_;t1;t2|]) when eq_constr f (Lazy.force eq) -> eq_constr t1 t2 | App(f,[|t1;a1;t2;a2|]) when eq_constr f (jmeq ()) -> eq_constr t1 t2 && eq_constr a1 a2 | _ -> false end with e when Errors.noncritical e -> false in (* observe (str "is_trivial_eq " ++ Printer.pr_lconstr t ++ (if res then str " true" else str " false")); *) res let rec incompatible_constructor_terms t1 t2 = let c1,arg1 = decompose_app t1 and c2,arg2 = decompose_app t2 in (not (eq_constr t1 t2)) && isConstruct c1 && isConstruct c2 && ( not (eq_constr c1 c2) || List.exists2 incompatible_constructor_terms arg1 arg2 ) let is_incompatible_eq t = let res = try match kind_of_term t with | App(f,[|_;t1;t2|]) when eq_constr f (Lazy.force eq) -> incompatible_constructor_terms t1 t2 | App(f,[|u1;t1;u2;t2|]) when eq_constr f (jmeq ()) -> (eq_constr u1 u2 && incompatible_constructor_terms t1 t2) | _ -> false with e when Errors.noncritical e -> false in if res then observe (str "is_incompatible_eq " ++ Printer.pr_lconstr t); res let change_hyp_with_using msg hyp_id t tac : tactic = fun g -> let prov_id = pf_get_new_id hyp_id g in tclTHENS ((* observe_tac msg *) (assert_by (Name prov_id) t (tclCOMPLETE tac))) [tclTHENLIST [ (* observe_tac "change_hyp_with_using thin" *) (thin [hyp_id]); (* observe_tac "change_hyp_with_using rename " *) (h_rename [prov_id,hyp_id]) ]] g exception TOREMOVE let prove_trivial_eq h_id context (constructor,type_of_term,term) = let nb_intros = List.length context in tclTHENLIST [ tclDO nb_intros intro; (* introducing context *) (fun g -> let context_hyps = fst (list_chop ~msg:"prove_trivial_eq : " nb_intros (pf_ids_of_hyps g)) in let context_hyps' = (mkApp(constructor,[|type_of_term;term|])):: (List.map mkVar context_hyps) in let to_refine = applist(mkVar h_id,List.rev context_hyps') in refine to_refine g ) ] let find_rectype env c = let (t, l) = decompose_app (Reduction.whd_betadeltaiota env c) in match kind_of_term t with | Ind ind -> (t, l) | Construct _ -> (t,l) | _ -> raise Not_found let isAppConstruct ?(env=Global.env ()) t = try let t',l = find_rectype (Global.env ()) t in observe (str "isAppConstruct : " ++ Printer.pr_lconstr t ++ str " -> " ++ Printer.pr_lconstr (applist (t',l))); true with Not_found -> false let nf_betaiotazeta = (* Reductionops.local_strong Reductionops.whd_betaiotazeta *) let clos_norm_flags flgs env sigma t = Closure.norm_val (Closure.create_clos_infos flgs env) (Closure.inject (Reductionops.nf_evar sigma t)) in clos_norm_flags Closure.betaiotazeta Environ.empty_env Evd.empty let change_eq env sigma hyp_id (context:rel_context) x t end_of_type = let nochange ?t' msg = begin observe (str ("Not treating ( "^msg^" )") ++ pr_lconstr t ++ str " " ++ match t' with None -> str "" | Some t -> Printer.pr_lconstr t ); failwith "NoChange"; end in let eq_constr = Reductionops.is_conv env sigma in if not (noccurn 1 end_of_type) then nochange "dependent"; (* if end_of_type depends on this term we don't touch it *) if not (isApp t) then nochange "not an equality"; let f_eq,args = destApp t in let constructor,t1,t2,t1_typ = try if (eq_constr f_eq (Lazy.force eq)) then let t1 = (args.(1),args.(0)) and t2 = (args.(2),args.(0)) and t1_typ = args.(0) in (Lazy.force refl_equal,t1,t2,t1_typ) else if (eq_constr f_eq (jmeq ())) then (jmeq_refl (),(args.(1),args.(0)),(args.(3),args.(2)),args.(0)) else nochange "not an equality" with e when Errors.noncritical e -> nochange "not an equality" in if not ((closed0 (fst t1)) && (closed0 (snd t1)))then nochange "not a closed lhs"; let rec compute_substitution sub t1 t2 = (* observe (str "compute_substitution : " ++ pr_lconstr t1 ++ str " === " ++ pr_lconstr t2); *) if isRel t2 then let t2 = destRel t2 in begin try let t1' = Intmap.find t2 sub in if not (eq_constr t1 t1') then nochange "twice bound variable"; sub with Not_found -> assert (closed0 t1); Intmap.add t2 t1 sub end else if isAppConstruct t1 && isAppConstruct t2 then begin let c1,args1 = find_rectype env t1 and c2,args2 = find_rectype env t2 in if not (eq_constr c1 c2) then nochange "cannot solve (diff)"; List.fold_left2 compute_substitution sub args1 args2 end else if (eq_constr t1 t2) then sub else nochange ~t':(make_refl_eq constructor (Reduction.whd_betadeltaiota env t1) t2) "cannot solve (diff)" in let sub = compute_substitution Intmap.empty (snd t1) (snd t2) in let sub = compute_substitution sub (fst t1) (fst t2) in let end_of_type_with_pop = Termops.pop end_of_type in (*the equation will be removed *) let new_end_of_type = (* Ugly hack to prevent Map.fold order change between ocaml-3.08.3 and ocaml-3.08.4 Can be safely replaced by the next comment for Ocaml >= 3.08.4 *) let sub' = Intmap.fold (fun i t acc -> (i,t)::acc) sub [] in let sub'' = List.sort (fun (x,_) (y,_) -> Pervasives.compare x y) sub' in List.fold_left (fun end_of_type (i,t) -> lift 1 (substnl [t] (i-1) end_of_type)) end_of_type_with_pop sub'' in let old_context_length = List.length context + 1 in let witness_fun = mkLetIn(Anonymous,make_refl_eq constructor t1_typ (fst t1),t, mkApp(mkVar hyp_id,Array.init old_context_length (fun i -> mkRel (old_context_length - i))) ) in let new_type_of_hyp,ctxt_size,witness_fun = list_fold_left_i (fun i (end_of_type,ctxt_size,witness_fun) ((x',b',t') as decl) -> try let witness = Intmap.find i sub in if b' <> None then anomaly "can not redefine a rel!"; (Termops.pop end_of_type,ctxt_size,mkLetIn(x',witness,t',witness_fun)) with Not_found -> (mkProd_or_LetIn decl end_of_type, ctxt_size + 1, mkLambda_or_LetIn decl witness_fun) ) 1 (new_end_of_type,0,witness_fun) context in let new_type_of_hyp = Reductionops.nf_betaiota Evd.empty new_type_of_hyp in let new_ctxt,new_end_of_type = decompose_prod_n_assum ctxt_size new_type_of_hyp in let prove_new_hyp : tactic = tclTHEN (tclDO ctxt_size intro) (fun g -> let all_ids = pf_ids_of_hyps g in let new_ids,_ = list_chop ctxt_size all_ids in let to_refine = applist(witness_fun,List.rev_map mkVar new_ids) in refine to_refine g ) in let simpl_eq_tac = change_hyp_with_using "prove_pattern_simplification" hyp_id new_type_of_hyp prove_new_hyp in (* observe (str "In " ++ Ppconstr.pr_id hyp_id ++ *) (* str "removing an equation " ++ fnl ()++ *) (* str "old_typ_of_hyp :=" ++ *) (* Printer.pr_lconstr_env *) (* env *) (* (it_mkProd_or_LetIn ~init:end_of_type ((x,None,t)::context)) *) (* ++ fnl () ++ *) (* str "new_typ_of_hyp := "++ *) (* Printer.pr_lconstr_env env new_type_of_hyp ++ fnl () *) (* ++ str "old context := " ++ pr_rel_context env context ++ fnl () *) (* ++ str "new context := " ++ pr_rel_context env new_ctxt ++ fnl () *) (* ++ str "old type := " ++ pr_lconstr end_of_type ++ fnl () *) (* ++ str "new type := " ++ pr_lconstr new_end_of_type ++ fnl () *) (* ); *) new_ctxt,new_end_of_type,simpl_eq_tac let is_property ptes_info t_x full_type_of_hyp = if isApp t_x then let pte,args = destApp t_x in if isVar pte && array_for_all closed0 args then try let info = Idmap.find (destVar pte) ptes_info in info.is_valid full_type_of_hyp with Not_found -> false else false else false let isLetIn t = match kind_of_term t with | LetIn _ -> true | _ -> false let h_reduce_with_zeta = h_reduce (Glob_term.Cbv {Glob_term.all_flags with Glob_term.rDelta = false; }) let rewrite_until_var arg_num eq_ids : tactic = (* tests if the declares recursive argument is neither a Constructor nor an applied Constructor since such a form for the recursive argument will break the Guard when trying to save the Lemma. *) let test_var g = let _,args = destApp (pf_concl g) in not ((isConstruct args.(arg_num)) || isAppConstruct args.(arg_num)) in let rec do_rewrite eq_ids g = if test_var g then tclIDTAC g else match eq_ids with | [] -> anomaly "Cannot find a way to prove recursive property"; | eq_id::eq_ids -> tclTHEN (tclTRY (Equality.rewriteRL (mkVar eq_id))) (do_rewrite eq_ids) g in do_rewrite eq_ids let rec_pte_id = id_of_string "Hrec" let clean_hyp_with_heq ptes_infos eq_hyps hyp_id env sigma = let coq_False = Coqlib.build_coq_False () in let coq_True = Coqlib.build_coq_True () in let coq_I = Coqlib.build_coq_I () in let rec scan_type context type_of_hyp : tactic = if isLetIn type_of_hyp then let real_type_of_hyp = it_mkProd_or_LetIn type_of_hyp context in let reduced_type_of_hyp = nf_betaiotazeta real_type_of_hyp in (* length of context didn't change ? *) let new_context,new_typ_of_hyp = decompose_prod_n_assum (List.length context) reduced_type_of_hyp in tclTHENLIST [ h_reduce_with_zeta (Tacticals.onHyp hyp_id) ; scan_type new_context new_typ_of_hyp ] else if isProd type_of_hyp then begin let (x,t_x,t') = destProd type_of_hyp in let actual_real_type_of_hyp = it_mkProd_or_LetIn t' context in if is_property ptes_infos t_x actual_real_type_of_hyp then begin let pte,pte_args = (destApp t_x) in let (* fix_info *) prove_rec_hyp = (Idmap.find (destVar pte) ptes_infos).proving_tac in let popped_t' = Termops.pop t' in let real_type_of_hyp = it_mkProd_or_LetIn popped_t' context in let prove_new_type_of_hyp = let context_length = List.length context in tclTHENLIST [ tclDO context_length intro; (fun g -> let context_hyps_ids = fst (list_chop ~msg:"rec hyp : context_hyps" context_length (pf_ids_of_hyps g)) in let rec_pte_id = pf_get_new_id rec_pte_id g in let to_refine = applist(mkVar hyp_id, List.rev_map mkVar (rec_pte_id::context_hyps_ids) ) in (* observe_tac "rec hyp " *) (tclTHENS (assert_tac (Name rec_pte_id) t_x) [ (* observe_tac "prove rec hyp" *) (prove_rec_hyp eq_hyps); (* observe_tac "prove rec hyp" *) (refine to_refine) ]) g ) ] in tclTHENLIST [ (* observe_tac "hyp rec" *) (change_hyp_with_using "rec_hyp_tac" hyp_id real_type_of_hyp prove_new_type_of_hyp); scan_type context popped_t' ] end else if eq_constr t_x coq_False then begin (* observe (str "Removing : "++ Ppconstr.pr_id hyp_id++ *) (* str " since it has False in its preconds " *) (* ); *) raise TOREMOVE; (* False -> .. useless *) end else if is_incompatible_eq t_x then raise TOREMOVE (* t_x := C1 ... = C2 ... *) else if eq_constr t_x coq_True (* Trivial => we remove this precons *) then (* observe (str "In "++Ppconstr.pr_id hyp_id++ *) (* str " removing useless precond True" *) (* ); *) let popped_t' = Termops.pop t' in let real_type_of_hyp = it_mkProd_or_LetIn popped_t' context in let prove_trivial = let nb_intro = List.length context in tclTHENLIST [ tclDO nb_intro intro; (fun g -> let context_hyps = fst (list_chop ~msg:"removing True : context_hyps "nb_intro (pf_ids_of_hyps g)) in let to_refine = applist (mkVar hyp_id, List.rev (coq_I::List.map mkVar context_hyps) ) in refine to_refine g ) ] in tclTHENLIST[ change_hyp_with_using "prove_trivial" hyp_id real_type_of_hyp ((* observe_tac "prove_trivial" *) prove_trivial); scan_type context popped_t' ] else if is_trivial_eq t_x then (* t_x := t = t => we remove this precond *) let popped_t' = Termops.pop t' in let real_type_of_hyp = it_mkProd_or_LetIn popped_t' context in let hd,args = destApp t_x in let get_args hd args = if eq_constr hd (Lazy.force eq) then (Lazy.force refl_equal,args.(0),args.(1)) else (jmeq_refl (),args.(0),args.(1)) in tclTHENLIST [ change_hyp_with_using "prove_trivial_eq" hyp_id real_type_of_hyp ((* observe_tac "prove_trivial_eq" *) (prove_trivial_eq hyp_id context (get_args hd args))); scan_type context popped_t' ] else begin try let new_context,new_t',tac = change_eq env sigma hyp_id context x t_x t' in tclTHEN tac (scan_type new_context new_t') with Failure "NoChange" -> (* Last thing todo : push the rel in the context and continue *) scan_type ((x,None,t_x)::context) t' end end else tclIDTAC in try scan_type [] (Typing.type_of env sigma (mkVar hyp_id)), [hyp_id] with TOREMOVE -> thin [hyp_id],[] let clean_goal_with_heq ptes_infos continue_tac dyn_infos = fun g -> let env = pf_env g and sigma = project g in let tac,new_hyps = List.fold_left ( fun (hyps_tac,new_hyps) hyp_id -> let hyp_tac,new_hyp = clean_hyp_with_heq ptes_infos dyn_infos.eq_hyps hyp_id env sigma in (tclTHEN hyp_tac hyps_tac),new_hyp@new_hyps ) (tclIDTAC,[]) dyn_infos.rec_hyps in let new_infos = { dyn_infos with rec_hyps = new_hyps; nb_rec_hyps = List.length new_hyps } in tclTHENLIST [ tac ; (* observe_tac "clean_hyp_with_heq continue" *) (continue_tac new_infos) ] g let heq_id = id_of_string "Heq" let treat_new_case ptes_infos nb_prod continue_tac term dyn_infos = fun g -> let nb_first_intro = nb_prod - 1 - dyn_infos.nb_rec_hyps in tclTHENLIST [ (* We first introduce the variables *) tclDO nb_first_intro (intro_avoiding dyn_infos.rec_hyps); (* Then the equation itself *) intro_using heq_id; onLastHypId (fun heq_id -> tclTHENLIST [ (* Then the new hypothesis *) tclMAP introduction_no_check dyn_infos.rec_hyps; (* observe_tac "after_introduction" *)(fun g' -> (* We get infos on the equations introduced*) let new_term_value_eq = pf_type_of g' (mkVar heq_id) in (* compute the new value of the body *) let new_term_value = match kind_of_term new_term_value_eq with | App(f,[| _;_;args2 |]) -> args2 | _ -> observe (str "cannot compute new term value : " ++ pr_gls g' ++ fnl () ++ str "last hyp is" ++ pr_lconstr_env (pf_env g') new_term_value_eq ); anomaly "cannot compute new term value" in let fun_body = mkLambda(Anonymous, pf_type_of g' term, Termops.replace_term term (mkRel 1) dyn_infos.info ) in let new_body = pf_nf_betaiota g' (mkApp(fun_body,[| new_term_value |])) in let new_infos = {dyn_infos with info = new_body; eq_hyps = heq_id::dyn_infos.eq_hyps } in clean_goal_with_heq ptes_infos continue_tac new_infos g' )]) ] g let my_orelse tac1 tac2 g = try tac1 g with e when Errors.noncritical e -> (* observe (str "using snd tac since : " ++ Errors.print e); *) tac2 g let instanciate_hyps_with_args (do_prove:identifier list -> tactic) hyps args_id = let args = Array.of_list (List.map mkVar args_id) in let instanciate_one_hyp hid = my_orelse ( (* we instanciate the hyp if possible *) fun g -> let prov_hid = pf_get_new_id hid g in tclTHENLIST[ pose_proof (Name prov_hid) (mkApp(mkVar hid,args)); thin [hid]; h_rename [prov_hid,hid] ] g ) ( (* if not then we are in a mutual function block and this hyp is a recursive hyp on an other function. We are not supposed to use it while proving this principle so that we can trash it *) (fun g -> (* observe (str "Instanciation: removing hyp " ++ Ppconstr.pr_id hid); *) thin [hid] g ) ) in if args_id = [] then tclTHENLIST [ tclMAP (fun hyp_id -> h_reduce_with_zeta (Tacticals.onHyp hyp_id)) hyps; do_prove hyps ] else tclTHENLIST [ tclMAP (fun hyp_id -> h_reduce_with_zeta (Tacticals.onHyp hyp_id)) hyps; tclMAP instanciate_one_hyp hyps; (fun g -> let all_g_hyps_id = List.fold_right Idset.add (pf_ids_of_hyps g) Idset.empty in let remaining_hyps = List.filter (fun id -> Idset.mem id all_g_hyps_id) hyps in do_prove remaining_hyps g ) ] let build_proof (interactive_proof:bool) (fnames:constant list) ptes_infos dyn_infos : tactic = let rec build_proof_aux do_finalize dyn_infos : tactic = fun g -> (* observe (str "proving on " ++ Printer.pr_lconstr_env (pf_env g) term);*) match kind_of_term dyn_infos.info with | Case(ci,ct,t,cb) -> let do_finalize_t dyn_info' = fun g -> let t = dyn_info'.info in let dyn_infos = {dyn_info' with info = mkCase(ci,ct,t,cb)} in let g_nb_prod = nb_prod (pf_concl g) in let type_of_term = pf_type_of g t in let term_eq = make_refl_eq (Lazy.force refl_equal) type_of_term t in tclTHENSEQ [ h_generalize (term_eq::(List.map mkVar dyn_infos.rec_hyps)); thin dyn_infos.rec_hyps; pattern_option [(false,[1]),t] None; (fun g -> observe_tac "toto" ( tclTHENSEQ [h_simplest_case t; (fun g' -> let g'_nb_prod = nb_prod (pf_concl g') in let nb_instanciate_partial = g'_nb_prod - g_nb_prod in observe_tac "treat_new_case" (treat_new_case ptes_infos nb_instanciate_partial (build_proof do_finalize) t dyn_infos) g' ) ]) g ) ] g in build_proof do_finalize_t {dyn_infos with info = t} g | Lambda(n,t,b) -> begin match kind_of_term( pf_concl g) with | Prod _ -> tclTHEN intro (fun g' -> let (id,_,_) = pf_last_hyp g' in let new_term = pf_nf_betaiota g' (mkApp(dyn_infos.info,[|mkVar id|])) in let new_infos = {dyn_infos with info = new_term} in let do_prove new_hyps = build_proof do_finalize {new_infos with rec_hyps = new_hyps; nb_rec_hyps = List.length new_hyps } in (* observe_tac "Lambda" *) (instanciate_hyps_with_args do_prove new_infos.rec_hyps [id]) g' (* build_proof do_finalize new_infos g' *) ) g | _ -> do_finalize dyn_infos g end | Cast(t,_,_) -> build_proof do_finalize {dyn_infos with info = t} g | Const _ | Var _ | Meta _ | Evar _ | Sort _ | Construct _ | Ind _ -> do_finalize dyn_infos g | App(_,_) -> let f,args = decompose_app dyn_infos.info in begin match kind_of_term f with | App _ -> assert false (* we have collected all the app in decompose_app *) | Var _ | Construct _ | Rel _ | Evar _ | Meta _ | Ind _ | Sort _ | Prod _ -> let new_infos = { dyn_infos with info = (f,args) } in build_proof_args do_finalize new_infos g | Const c when not (List.mem c fnames) -> let new_infos = { dyn_infos with info = (f,args) } in (* Pp.msgnl (str "proving in " ++ pr_lconstr_env (pf_env g) dyn_infos.info); *) build_proof_args do_finalize new_infos g | Const _ -> do_finalize dyn_infos g | Lambda _ -> let new_term = Reductionops.nf_beta Evd.empty dyn_infos.info in build_proof do_finalize {dyn_infos with info = new_term} g | LetIn _ -> let new_infos = { dyn_infos with info = nf_betaiotazeta dyn_infos.info } in tclTHENLIST [tclMAP (fun hyp_id -> h_reduce_with_zeta (Tacticals.onHyp hyp_id)) dyn_infos.rec_hyps; h_reduce_with_zeta Tacticals.onConcl; build_proof do_finalize new_infos ] g | Cast(b,_,_) -> build_proof do_finalize {dyn_infos with info = b } g | Case _ | Fix _ | CoFix _ -> let new_finalize dyn_infos = let new_infos = { dyn_infos with info = dyn_infos.info,args } in build_proof_args do_finalize new_infos in build_proof new_finalize {dyn_infos with info = f } g end | Fix _ | CoFix _ -> error ( "Anonymous local (co)fixpoints are not handled yet") | Prod _ -> error "Prod" | LetIn _ -> let new_infos = { dyn_infos with info = nf_betaiotazeta dyn_infos.info } in tclTHENLIST [tclMAP (fun hyp_id -> h_reduce_with_zeta (Tacticals.onHyp hyp_id)) dyn_infos.rec_hyps; h_reduce_with_zeta Tacticals.onConcl; build_proof do_finalize new_infos ] g | Rel _ -> anomaly "Free var in goal conclusion !" and build_proof do_finalize dyn_infos g = (* observe (str "proving with "++Printer.pr_lconstr dyn_infos.info++ str " on goal " ++ pr_gls g); *) observe_tac "build_proof" (build_proof_aux do_finalize dyn_infos) g and build_proof_args do_finalize dyn_infos (* f_args' args *) :tactic = fun g -> let (f_args',args) = dyn_infos.info in let tac : tactic = fun g -> match args with | [] -> do_finalize {dyn_infos with info = f_args'} g | arg::args -> (* observe (str "build_proof_args with arg := "++ pr_lconstr_env (pf_env g) arg++ *) (* fnl () ++ *) (* pr_goal (Tacmach.sig_it g) *) (* ); *) let do_finalize dyn_infos = let new_arg = dyn_infos.info in (* tclTRYD *) (build_proof_args do_finalize {dyn_infos with info = (mkApp(f_args',[|new_arg|])), args} ) in build_proof do_finalize {dyn_infos with info = arg } g in (* observe_tac "build_proof_args" *) (tac ) g in let do_finish_proof dyn_infos = (* tclTRYD *) (clean_goal_with_heq ptes_infos finish_proof dyn_infos) in (* observe_tac "build_proof" *) (build_proof (clean_goal_with_heq ptes_infos do_finish_proof) dyn_infos) (* Proof of principles from structural functions *) let is_pte_type t = isSort ((strip_prod t)) let is_pte (_,_,t) = is_pte_type t type static_fix_info = { idx : int; name : identifier; types : types; offset : int; nb_realargs : int; body_with_param : constr; num_in_block : int } let prove_rec_hyp_for_struct fix_info = (fun eq_hyps -> tclTHEN (rewrite_until_var (fix_info.idx) eq_hyps) (fun g -> let _,pte_args = destApp (pf_concl g) in let rec_hyp_proof = mkApp(mkVar fix_info.name,array_get_start pte_args) in refine rec_hyp_proof g )) let prove_rec_hyp fix_info = { proving_tac = prove_rec_hyp_for_struct fix_info ; is_valid = fun _ -> true } exception Not_Rec let generalize_non_dep hyp g = (* observe (str "rec id := " ++ Ppconstr.pr_id hyp); *) let hyps = [hyp] in let env = Global.env () in let hyp_typ = pf_type_of g (mkVar hyp) in let to_revert,_ = Environ.fold_named_context_reverse (fun (clear,keep) (hyp,_,_ as decl) -> if List.mem hyp hyps or List.exists (Termops.occur_var_in_decl env hyp) keep or Termops.occur_var env hyp hyp_typ or Termops.is_section_variable hyp (* should be dangerous *) then (clear,decl::keep) else (hyp::clear,keep)) ~init:([],[]) (pf_env g) in (* observe (str "to_revert := " ++ prlist_with_sep spc Ppconstr.pr_id to_revert); *) tclTHEN ((* observe_tac "h_generalize" *) (h_generalize (List.map mkVar to_revert) )) ((* observe_tac "thin" *) (thin to_revert)) g let id_of_decl (na,_,_) = (Nameops.out_name na) let var_of_decl decl = mkVar (id_of_decl decl) let revert idl = tclTHEN (generalize (List.map mkVar idl)) (thin idl) let generate_equation_lemma fnames f fun_num nb_params nb_args rec_args_num = (* observe (str "nb_args := " ++ str (string_of_int nb_args)); *) (* observe (str "nb_params := " ++ str (string_of_int nb_params)); *) (* observe (str "rec_args_num := " ++ str (string_of_int (rec_args_num + 1) )); *) let f_def = Global.lookup_constant (destConst f) in let eq_lhs = mkApp(f,Array.init (nb_params + nb_args) (fun i -> mkRel(nb_params + nb_args - i))) in let f_body = force (Option.get (body_of_constant f_def)) in let params,f_body_with_params = decompose_lam_n nb_params f_body in let (_,num),(_,_,bodies) = destFix f_body_with_params in let fnames_with_params = let params = Array.init nb_params (fun i -> mkRel(nb_params - i)) in let fnames = List.rev (Array.to_list (Array.map (fun f -> mkApp(f,params)) fnames)) in fnames in (* observe (str "fnames_with_params " ++ prlist_with_sep fnl pr_lconstr fnames_with_params); *) (* observe (str "body " ++ pr_lconstr bodies.(num)); *) let f_body_with_params_and_other_fun = substl fnames_with_params bodies.(num) in (* observe (str "f_body_with_params_and_other_fun " ++ pr_lconstr f_body_with_params_and_other_fun); *) let eq_rhs = nf_betaiotazeta (mkApp(compose_lam params f_body_with_params_and_other_fun,Array.init (nb_params + nb_args) (fun i -> mkRel(nb_params + nb_args - i)))) in (* observe (str "eq_rhs " ++ pr_lconstr eq_rhs); *) let type_ctxt,type_of_f = decompose_prod_n_assum (nb_params + nb_args) (Typeops.type_of_constant_type (Global.env()) f_def.const_type) in let eqn = mkApp(Lazy.force eq,[|type_of_f;eq_lhs;eq_rhs|]) in let lemma_type = it_mkProd_or_LetIn eqn type_ctxt in let f_id = id_of_label (con_label (destConst f)) in let prove_replacement = tclTHENSEQ [ tclDO (nb_params + rec_args_num + 1) intro; (* observe_tac "" *) (fun g -> let rec_id = pf_nth_hyp_id g 1 in tclTHENSEQ [(* observe_tac "generalize_non_dep in generate_equation_lemma" *) (generalize_non_dep rec_id); (* observe_tac "h_case" *) (h_case false (mkVar rec_id,Glob_term.NoBindings)); intros_reflexivity] g ) ] in Lemmas.start_proof (*i The next call to mk_equation_id is valid since we are constructing the lemma Ensures by: obvious i*) (mk_equation_id f_id) (Decl_kinds.Global,(Decl_kinds.Proof Decl_kinds.Theorem)) lemma_type (fun _ _ -> ()); Pfedit.by (prove_replacement); Lemmas.save_named false let do_replace params rec_arg_num rev_args_id f fun_num all_funs g = let equation_lemma = try let finfos = find_Function_infos (destConst f) in mkConst (Option.get finfos.equation_lemma) with (Not_found | Option.IsNone as e) -> let f_id = id_of_label (con_label (destConst f)) in (*i The next call to mk_equation_id is valid since we will construct the lemma Ensures by: obvious i*) let equation_lemma_id = (mk_equation_id f_id) in generate_equation_lemma all_funs f fun_num (List.length params) (List.length rev_args_id) rec_arg_num; let _ = match e with | Option.IsNone -> let finfos = find_Function_infos (destConst f) in update_Function {finfos with equation_lemma = Some (match Nametab.locate (qualid_of_ident equation_lemma_id) with ConstRef c -> c | _ -> Util.anomaly "Not a constant" ) } | _ -> () in Constrintern.construct_reference (pf_hyps g) equation_lemma_id in let nb_intro_to_do = nb_prod (pf_concl g) in tclTHEN (tclDO nb_intro_to_do intro) ( fun g' -> let just_introduced = nLastDecls nb_intro_to_do g' in let just_introduced_id = List.map (fun (id,_,_) -> id) just_introduced in tclTHEN (Equality.rewriteLR equation_lemma) (revert just_introduced_id) g' ) g let prove_princ_for_struct interactive_proof fun_num fnames all_funs _nparams : tactic = fun g -> let princ_type = pf_concl g in let princ_info = compute_elim_sig princ_type in let fresh_id = let avoid = ref (pf_ids_of_hyps g) in (fun na -> let new_id = match na with Name id -> fresh_id !avoid (string_of_id id) | Anonymous -> fresh_id !avoid "H" in avoid := new_id :: !avoid; (Name new_id) ) in let fresh_decl = (fun (na,b,t) -> (fresh_id na,b,t) ) in let princ_info : elim_scheme = { princ_info with params = List.map fresh_decl princ_info.params; predicates = List.map fresh_decl princ_info.predicates; branches = List.map fresh_decl princ_info.branches; args = List.map fresh_decl princ_info.args } in let get_body const = match body_of_constant (Global.lookup_constant const) with | Some b -> let body = force b in Tacred.cbv_norm_flags (Closure.RedFlags.mkflags [Closure.RedFlags.fZETA]) (Global.env ()) (Evd.empty) body | None -> error ( "Cannot define a principle over an axiom ") in let fbody = get_body fnames.(fun_num) in let f_ctxt,f_body = decompose_lam fbody in let f_ctxt_length = List.length f_ctxt in let diff_params = princ_info.nparams - f_ctxt_length in let full_params,princ_params,fbody_with_full_params = if diff_params > 0 then let princ_params,full_params = list_chop diff_params princ_info.params in (full_params, (* real params *) princ_params, (* the params of the principle which are not params of the function *) substl (* function instanciated with real params *) (List.map var_of_decl full_params) f_body ) else let f_ctxt_other,f_ctxt_params = list_chop (- diff_params) f_ctxt in let f_body = compose_lam f_ctxt_other f_body in (princ_info.params, (* real params *) [],(* all params are full params *) substl (* function instanciated with real params *) (List.map var_of_decl princ_info.params) f_body ) in (* observe (str "full_params := " ++ *) (* prlist_with_sep spc (fun (na,_,_) -> Ppconstr.pr_id (Nameops.out_name na)) *) (* full_params *) (* ); *) (* observe (str "princ_params := " ++ *) (* prlist_with_sep spc (fun (na,_,_) -> Ppconstr.pr_id (Nameops.out_name na)) *) (* princ_params *) (* ); *) (* observe (str "fbody_with_full_params := " ++ *) (* pr_lconstr fbody_with_full_params *) (* ); *) let all_funs_with_full_params = Array.map (fun f -> applist(f, List.rev_map var_of_decl full_params)) all_funs in let fix_offset = List.length princ_params in let ptes_to_fix,infos = match kind_of_term fbody_with_full_params with | Fix((idxs,i),(names,typess,bodies)) -> let bodies_with_all_params = Array.map (fun body -> Reductionops.nf_betaiota Evd.empty (applist(substl (List.rev (Array.to_list all_funs_with_full_params)) body, List.rev_map var_of_decl princ_params)) ) bodies in let info_array = Array.mapi (fun i types -> let types = prod_applist types (List.rev_map var_of_decl princ_params) in { idx = idxs.(i) - fix_offset; name = Nameops.out_name (fresh_id names.(i)); types = types; offset = fix_offset; nb_realargs = List.length (fst (decompose_lam bodies.(i))) - fix_offset; body_with_param = bodies_with_all_params.(i); num_in_block = i } ) typess in let pte_to_fix,rev_info = list_fold_left_i (fun i (acc_map,acc_info) (pte,_,_) -> let infos = info_array.(i) in let type_args,_ = decompose_prod infos.types in let nargs = List.length type_args in let f = applist(mkConst fnames.(i), List.rev_map var_of_decl princ_info.params) in let first_args = Array.init nargs (fun i -> mkRel (nargs -i)) in let app_f = mkApp(f,first_args) in let pte_args = (Array.to_list first_args)@[app_f] in let app_pte = applist(mkVar (Nameops.out_name pte),pte_args) in let body_with_param,num = let body = get_body fnames.(i) in let body_with_full_params = Reductionops.nf_betaiota Evd.empty ( applist(body,List.rev_map var_of_decl full_params)) in match kind_of_term body_with_full_params with | Fix((_,num),(_,_,bs)) -> Reductionops.nf_betaiota Evd.empty ( (applist (substl (List.rev (Array.to_list all_funs_with_full_params)) bs.(num), List.rev_map var_of_decl princ_params)) ),num | _ -> error "Not a mutual block" in let info = {infos with types = compose_prod type_args app_pte; body_with_param = body_with_param; num_in_block = num } in (* observe (str "binding " ++ Ppconstr.pr_id (Nameops.out_name pte) ++ *) (* str " to " ++ Ppconstr.pr_id info.name); *) (Idmap.add (Nameops.out_name pte) info acc_map,info::acc_info) ) 0 (Idmap.empty,[]) (List.rev princ_info.predicates) in pte_to_fix,List.rev rev_info | _ -> Idmap.empty,[] in let mk_fixes : tactic = let pre_info,infos = list_chop fun_num infos in match pre_info,infos with | [],[] -> tclIDTAC | _, this_fix_info::others_infos -> let other_fix_infos = List.map (fun fi -> fi.name,fi.idx + 1 ,fi.types) (pre_info@others_infos) in if other_fix_infos = [] then (* observe_tac ("h_fix") *) (h_fix (Some this_fix_info.name) (this_fix_info.idx +1)) else h_mutual_fix false this_fix_info.name (this_fix_info.idx + 1) other_fix_infos | _ -> anomaly "Not a valid information" in let first_tac : tactic = (* every operations until fix creations *) tclTHENSEQ [ (* observe_tac "introducing params" *) (intros_using (List.rev_map id_of_decl princ_info.params)); (* observe_tac "introducing predictes" *) (intros_using (List.rev_map id_of_decl princ_info.predicates)); (* observe_tac "introducing branches" *) (intros_using (List.rev_map id_of_decl princ_info.branches)); (* observe_tac "building fixes" *) mk_fixes; ] in let intros_after_fixes : tactic = fun gl -> let ctxt,pte_app = (decompose_prod_assum (pf_concl gl)) in let pte,pte_args = (decompose_app pte_app) in try let pte = try destVar pte with e when Errors.noncritical e -> anomaly "Property is not a variable" in let fix_info = Idmap.find pte ptes_to_fix in let nb_args = fix_info.nb_realargs in tclTHENSEQ [ (* observe_tac ("introducing args") *) (tclDO nb_args intro); (fun g -> (* replacement of the function by its body *) let args = nLastDecls nb_args g in let fix_body = fix_info.body_with_param in (* observe (str "fix_body := "++ pr_lconstr_env (pf_env gl) fix_body); *) let args_id = List.map (fun (id,_,_) -> id) args in let dyn_infos = { nb_rec_hyps = -100; rec_hyps = []; info = Reductionops.nf_betaiota Evd.empty (applist(fix_body,List.rev_map mkVar args_id)); eq_hyps = [] } in tclTHENSEQ [ (* observe_tac "do_replace" *) (do_replace full_params (fix_info.idx + List.length princ_params) (args_id@(List.map (fun (id,_,_) -> Nameops.out_name id ) princ_params)) (all_funs.(fix_info.num_in_block)) fix_info.num_in_block all_funs ); (* observe_tac "do_replace" *) (* (do_replace princ_info.params fix_info.idx args_id *) (* (List.hd (List.rev pte_args)) fix_body); *) let do_prove = build_proof interactive_proof (Array.to_list fnames) (Idmap.map prove_rec_hyp ptes_to_fix) in let prove_tac branches = let dyn_infos = {dyn_infos with rec_hyps = branches; nb_rec_hyps = List.length branches } in observe_tac "cleaning" (clean_goal_with_heq (Idmap.map prove_rec_hyp ptes_to_fix) do_prove dyn_infos) in (* observe (str "branches := " ++ *) (* prlist_with_sep spc (fun decl -> Ppconstr.pr_id (id_of_decl decl)) princ_info.branches ++ fnl () ++ *) (* str "args := " ++ prlist_with_sep spc Ppconstr.pr_id args_id *) (* ); *) (* observe_tac "instancing" *) (instanciate_hyps_with_args prove_tac (List.rev_map id_of_decl princ_info.branches) (List.rev args_id)) ] g ); ] gl with Not_found -> let nb_args = min (princ_info.nargs) (List.length ctxt) in tclTHENSEQ [ tclDO nb_args intro; (fun g -> (* replacement of the function by its body *) let args = nLastDecls nb_args g in let args_id = List.map (fun (id,_,_) -> id) args in let dyn_infos = { nb_rec_hyps = -100; rec_hyps = []; info = Reductionops.nf_betaiota Evd.empty (applist(fbody_with_full_params, (List.rev_map var_of_decl princ_params)@ (List.rev_map mkVar args_id) )); eq_hyps = [] } in let fname = destConst (fst (decompose_app (List.hd (List.rev pte_args)))) in tclTHENSEQ [unfold_in_concl [(Termops.all_occurrences, Names.EvalConstRef fname)]; let do_prove = build_proof interactive_proof (Array.to_list fnames) (Idmap.map prove_rec_hyp ptes_to_fix) in let prove_tac branches = let dyn_infos = {dyn_infos with rec_hyps = branches; nb_rec_hyps = List.length branches } in clean_goal_with_heq (Idmap.map prove_rec_hyp ptes_to_fix) do_prove dyn_infos in instanciate_hyps_with_args prove_tac (List.rev_map id_of_decl princ_info.branches) (List.rev args_id) ] g ) ] gl in tclTHEN first_tac intros_after_fixes g (* Proof of principles of general functions *) let h_id = Recdef.h_id and hrec_id = Recdef.hrec_id and acc_inv_id = Recdef.acc_inv_id and ltof_ref = Recdef.ltof_ref and acc_rel = Recdef.acc_rel and well_founded = Recdef.well_founded and h_intros = Recdef.h_intros and list_rewrite = Recdef.list_rewrite and evaluable_of_global_reference = Recdef.evaluable_of_global_reference let prove_with_tcc tcc_lemma_constr eqs : tactic = match !tcc_lemma_constr with | None -> anomaly "No tcc proof !!" | Some lemma -> fun gls -> (* let hid = next_ident_away_in_goal h_id (pf_ids_of_hyps gls) in *) (* let ids = hid::pf_ids_of_hyps gls in *) tclTHENSEQ [ (* generalize [lemma]; *) (* h_intro hid; *) (* Elim.h_decompose_and (mkVar hid); *) tclTRY(list_rewrite true eqs); (* (fun g -> *) (* let ids' = pf_ids_of_hyps g in *) (* let ids = List.filter (fun id -> not (List.mem id ids)) ids' in *) (* rewrite *) (* ) *) Eauto.gen_eauto (false,5) [] (Some []) ] gls let backtrack_eqs_until_hrec hrec eqs : tactic = fun gls -> let eqs = List.map mkVar eqs in let rewrite = tclFIRST (List.map Equality.rewriteRL eqs ) in let _,hrec_concl = decompose_prod (pf_type_of gls (mkVar hrec)) in let f_app = array_last (snd (destApp hrec_concl)) in let f = (fst (destApp f_app)) in let rec backtrack : tactic = fun g -> let f_app = array_last (snd (destApp (pf_concl g))) in match kind_of_term f_app with | App(f',_) when eq_constr f' f -> tclIDTAC g | _ -> tclTHEN rewrite backtrack g in backtrack gls let build_clause eqs = { Tacexpr.onhyps = Some (List.map (fun id -> (Glob_term.all_occurrences_expr, id), Termops.InHyp) eqs ); Tacexpr.concl_occs = Glob_term.no_occurrences_expr } let rec rewrite_eqs_in_eqs eqs = match eqs with | [] -> tclIDTAC | eq::eqs -> tclTHEN (tclMAP (fun id gl -> observe_tac (Format.sprintf "rewrite %s in %s " (string_of_id eq) (string_of_id id)) (tclTRY (Equality.general_rewrite_in true Termops.all_occurrences true (* dep proofs also: *) true id (mkVar eq) false)) gl ) eqs ) (rewrite_eqs_in_eqs eqs) let new_prove_with_tcc is_mes acc_inv hrec tcc_hyps eqs : tactic = fun gls -> (tclTHENSEQ [ backtrack_eqs_until_hrec hrec eqs; (* observe_tac ("new_prove_with_tcc ( applying "^(string_of_id hrec)^" )" ) *) (tclTHENS (* We must have exactly ONE subgoal !*) (apply (mkVar hrec)) [ tclTHENSEQ [ keep (tcc_hyps@eqs); apply (Lazy.force acc_inv); (fun g -> if is_mes then unfold_in_concl [(Termops.all_occurrences, evaluable_of_global_reference (delayed_force ltof_ref))] g else tclIDTAC g ); observe_tac "rew_and_finish" (tclTHENLIST [tclTRY(Recdef.list_rewrite false (List.map mkVar eqs)); observe_tac "rewrite_eqs_in_eqs" (rewrite_eqs_in_eqs eqs); (observe_tac "finishing using" ( tclCOMPLETE( Eauto.eauto_with_bases (true,5) [Evd.empty,Lazy.force refl_equal] [Auto.Hint_db.empty empty_transparent_state false] ) ) ) ] ) ] ]) ]) gls let is_valid_hypothesis predicates_name = let predicates_name = List.fold_right Idset.add predicates_name Idset.empty in let is_pte typ = if isApp typ then let pte,_ = destApp typ in if isVar pte then Idset.mem (destVar pte) predicates_name else false else false in let rec is_valid_hypothesis typ = is_pte typ || match kind_of_term typ with | Prod(_,pte,typ') -> is_pte pte && is_valid_hypothesis typ' | _ -> false in is_valid_hypothesis let prove_principle_for_gen (f_ref,functional_ref,eq_ref) tcc_lemma_ref is_mes rec_arg_num rec_arg_type relation gl = let princ_type = pf_concl gl in let princ_info = compute_elim_sig princ_type in let fresh_id = let avoid = ref (pf_ids_of_hyps gl) in fun na -> let new_id = match na with | Name id -> fresh_id !avoid (string_of_id id) | Anonymous -> fresh_id !avoid "H" in avoid := new_id :: !avoid; Name new_id in let fresh_decl (na,b,t) = (fresh_id na,b,t) in let princ_info : elim_scheme = { princ_info with params = List.map fresh_decl princ_info.params; predicates = List.map fresh_decl princ_info.predicates; branches = List.map fresh_decl princ_info.branches; args = List.map fresh_decl princ_info.args } in let wf_tac = if is_mes then (fun b -> Recdef.tclUSER_if_not_mes tclIDTAC b None) else fun _ -> prove_with_tcc tcc_lemma_ref [] in let real_rec_arg_num = rec_arg_num - princ_info.nparams in let npost_rec_arg = princ_info.nargs - real_rec_arg_num + 1 in (* observe ( *) (* str "princ_type := " ++ pr_lconstr princ_type ++ fnl () ++ *) (* str "princ_info.nparams := " ++ int princ_info.nparams ++ fnl () ++ *) (* str "princ_info.nargs := " ++ int princ_info.nargs ++ fnl () ++ *) (* str "rec_arg_num := " ++ int rec_arg_num ++ fnl() ++ *) (* str "real_rec_arg_num := " ++ int real_rec_arg_num ++ fnl () ++ *) (* str "npost_rec_arg := " ++ int npost_rec_arg ); *) let (post_rec_arg,pre_rec_arg) = Util.list_chop npost_rec_arg princ_info.args in let rec_arg_id = match List.rev post_rec_arg with | (Name id,_,_)::_ -> id | _ -> assert false in (* observe (str "rec_arg_id := " ++ pr_lconstr (mkVar rec_arg_id)); *) let subst_constrs = List.map (fun (na,_,_) -> mkVar (Nameops.out_name na)) (pre_rec_arg@princ_info.params) in let relation = substl subst_constrs relation in let input_type = substl subst_constrs rec_arg_type in let wf_thm_id = Nameops.out_name (fresh_id (Name (id_of_string "wf_R"))) in let acc_rec_arg_id = Nameops.out_name (fresh_id (Name (id_of_string ("Acc_"^(string_of_id rec_arg_id))))) in let revert l = tclTHEN (h_generalize (List.map mkVar l)) (clear l) in let fix_id = Nameops.out_name (fresh_id (Name hrec_id)) in let prove_rec_arg_acc g = ((* observe_tac "prove_rec_arg_acc" *) (tclCOMPLETE (tclTHEN (assert_by (Name wf_thm_id) (mkApp (delayed_force well_founded,[|input_type;relation|])) (fun g -> (* observe_tac "prove wf" *) (tclCOMPLETE (wf_tac is_mes)) g)) ( (* observe_tac *) (* "apply wf_thm" *) h_simplest_apply (mkApp(mkVar wf_thm_id,[|mkVar rec_arg_id|])) ) ) ) ) g in let args_ids = List.map (fun (na,_,_) -> Nameops.out_name na) princ_info.args in let lemma = match !tcc_lemma_ref with | None -> anomaly ( "No tcc proof !!") | Some lemma -> lemma in (* let rec list_diff del_list check_list = *) (* match del_list with *) (* [] -> *) (* [] *) (* | f::r -> *) (* if List.mem f check_list then *) (* list_diff r check_list *) (* else *) (* f::(list_diff r check_list) *) (* in *) let tcc_list = ref [] in let start_tac gls = let hyps = pf_ids_of_hyps gls in let hid = next_ident_away_in_goal (id_of_string "prov") hyps in tclTHENSEQ [ generalize [lemma]; h_intro hid; Elim.h_decompose_and (mkVar hid); (fun g -> let new_hyps = pf_ids_of_hyps g in tcc_list := List.rev (list_subtract new_hyps (hid::hyps)); if !tcc_list = [] then begin tcc_list := [hid]; tclIDTAC g end else thin [hid] g ) ] gls in tclTHENSEQ [ observe_tac "start_tac" start_tac; h_intros (List.rev_map (fun (na,_,_) -> Nameops.out_name na) (princ_info.args@princ_info.branches@princ_info.predicates@princ_info.params) ); (* observe_tac "" *) (assert_by (Name acc_rec_arg_id) (mkApp (delayed_force acc_rel,[|input_type;relation;mkVar rec_arg_id|])) (prove_rec_arg_acc) ); (* observe_tac "reverting" *) (revert (List.rev (acc_rec_arg_id::args_ids))); (* (fun g -> observe (Printer.pr_goal (sig_it g) ++ fnl () ++ *) (* str "fix arg num" ++ int (List.length args_ids + 1) ); tclIDTAC g); *) (* observe_tac "h_fix " *) (h_fix (Some fix_id) (List.length args_ids + 1)); (* (fun g -> observe (Printer.pr_goal (sig_it g) ++ fnl() ++ pr_lconstr_env (pf_env g ) (pf_type_of g (mkVar fix_id) )); tclIDTAC g); *) h_intros (List.rev (acc_rec_arg_id::args_ids)); Equality.rewriteLR (mkConst eq_ref); (* observe_tac "finish" *) (fun gl' -> let body = let _,args = destApp (pf_concl gl') in array_last args in let body_info rec_hyps = { nb_rec_hyps = List.length rec_hyps; rec_hyps = rec_hyps; eq_hyps = []; info = body } in let acc_inv = lazy ( mkApp ( delayed_force acc_inv_id, [|input_type;relation;mkVar rec_arg_id|] ) ) in let acc_inv = lazy (mkApp(Lazy.force acc_inv, [|mkVar acc_rec_arg_id|])) in let predicates_names = List.map (fun (na,_,_) -> Nameops.out_name na) princ_info.predicates in let pte_info = { proving_tac = (fun eqs -> (* msgnl (str "tcc_list := "++ prlist_with_sep spc Ppconstr.pr_id !tcc_list); *) (* msgnl (str "princ_info.args := "++ prlist_with_sep spc Ppconstr.pr_id (List.map (fun (na,_,_) -> (Nameops.out_name na)) princ_info.args)); *) (* msgnl (str "princ_info.params := "++ prlist_with_sep spc Ppconstr.pr_id (List.map (fun (na,_,_) -> (Nameops.out_name na)) princ_info.params)); *) (* msgnl (str "acc_rec_arg_id := "++ Ppconstr.pr_id acc_rec_arg_id); *) (* msgnl (str "eqs := "++ prlist_with_sep spc Ppconstr.pr_id eqs); *) (* observe_tac "new_prove_with_tcc" *) (new_prove_with_tcc is_mes acc_inv fix_id (!tcc_list@(List.map (fun (na,_,_) -> (Nameops.out_name na)) (princ_info.args@princ_info.params) )@ ([acc_rec_arg_id])) eqs ) ); is_valid = is_valid_hypothesis predicates_names } in let ptes_info : pte_info Idmap.t = List.fold_left (fun map pte_id -> Idmap.add pte_id pte_info map ) Idmap.empty predicates_names in let make_proof rec_hyps = build_proof false [f_ref] ptes_info (body_info rec_hyps) in (* observe_tac "instanciate_hyps_with_args" *) (instanciate_hyps_with_args make_proof (List.map (fun (na,_,_) -> Nameops.out_name na) princ_info.branches) (List.rev args_ids) ) gl' ) ] gl coq-8.4pl2/plugins/funind/glob_term_to_relation.mli0000640000175000001440000000102411505230575021647 0ustar notinusers (* [build_inductive parametrize funnames funargs returned_types bodies] constructs and saves the graphs of the functions [funnames] taking [funargs] as arguments and returning [returned_types] using bodies [bodies] *) val build_inductive : Names.identifier list -> (* The list of function name *) (Names.name*Glob_term.glob_constr*bool) list list -> (* The list of function args *) Topconstr.constr_expr list -> (* The list of function returned type *) Glob_term.glob_constr list -> (* the list of body *) unit coq-8.4pl2/plugins/funind/indfun.ml0000640000175000001440000007250012121620060016402 0ustar notinusersopen Util open Names open Term open Pp open Indfun_common open Libnames open Glob_term open Declarations let is_rec_info scheme_info = let test_branche min acc (_,_,br) = acc || ( let new_branche = it_mkProd_or_LetIn mkProp (fst (decompose_prod_assum br)) in let free_rels_in_br = Termops.free_rels new_branche in let max = min + scheme_info.Tactics.npredicates in Util.Intset.exists (fun i -> i >= min && i< max) free_rels_in_br ) in Util.list_fold_left_i test_branche 1 false (List.rev scheme_info.Tactics.branches) let choose_dest_or_ind scheme_info = if is_rec_info scheme_info then Tactics.new_induct false else Tactics.new_destruct false let functional_induction with_clean c princl pat = Dumpglob.pause (); let res = let f,args = decompose_app c in fun g -> let princ,bindings, princ_type = match princl with | None -> (* No principle is given let's find the good one *) begin match kind_of_term f with | Const c' -> let princ_option = let finfo = (* we first try to find out a graph on f *) try find_Function_infos c' with Not_found -> errorlabstrm "" (str "Cannot find induction information on "++ Printer.pr_lconstr (mkConst c') ) in match Tacticals.elimination_sort_of_goal g with | InProp -> finfo.prop_lemma | InSet -> finfo.rec_lemma | InType -> finfo.rect_lemma in let princ = (* then we get the principle *) try mkConst (Option.get princ_option ) with Option.IsNone -> (*i If there is not default lemma defined then, we cross our finger and try to find a lemma named f_ind (or f_rec, f_rect) i*) let princ_name = Indrec.make_elimination_ident (id_of_label (con_label c')) (Tacticals.elimination_sort_of_goal g) in try mkConst(const_of_id princ_name ) with Not_found -> (* This one is neither defined ! *) errorlabstrm "" (str "Cannot find induction principle for " ++Printer.pr_lconstr (mkConst c') ) in (princ,Glob_term.NoBindings, Tacmach.pf_type_of g princ) | _ -> raise (UserError("",str "functional induction must be used with a function" )) end | Some ((princ,binding)) -> princ,binding,Tacmach.pf_type_of g princ in let princ_infos = Tactics.compute_elim_sig princ_type in let args_as_induction_constr = let c_list = if princ_infos.Tactics.farg_in_concl then [c] else [] in List.map (fun c -> Tacexpr.ElimOnConstr (Evd.empty,(c,NoBindings))) (args@c_list) in let princ' = Some (princ,bindings) in let princ_vars = List.fold_right (fun a acc -> try Idset.add (destVar a) acc with e when Errors.noncritical e -> acc ) args Idset.empty in let old_idl = List.fold_right Idset.add (Tacmach.pf_ids_of_hyps g) Idset.empty in let old_idl = Idset.diff old_idl princ_vars in let subst_and_reduce g = if with_clean then let idl = map_succeed (fun id -> if Idset.mem id old_idl then failwith "subst_and_reduce"; id ) (Tacmach.pf_ids_of_hyps g) in let flag = Glob_term.Cbv {Glob_term.all_flags with Glob_term.rDelta = false; } in Tacticals.tclTHEN (Tacticals.tclMAP (fun id -> Tacticals.tclTRY (Equality.subst_gen (do_rewrite_dependent ()) [id])) idl ) (Hiddentac.h_reduce flag Tacticals.allHypsAndConcl) g else Tacticals.tclIDTAC g in Tacticals.tclTHEN (choose_dest_or_ind princ_infos args_as_induction_constr princ' (None,pat) None) subst_and_reduce g in Dumpglob.continue (); res let rec abstract_glob_constr c = function | [] -> c | Topconstr.LocalRawDef (x,b)::bl -> Topconstr.mkLetInC(x,b,abstract_glob_constr c bl) | Topconstr.LocalRawAssum (idl,k,t)::bl -> List.fold_right (fun x b -> Topconstr.mkLambdaC([x],k,t,b)) idl (abstract_glob_constr c bl) let interp_casted_constr_with_implicits sigma env impls c = Constrintern.intern_gen false sigma env ~impls ~allow_patvar:false ~ltacvars:([],[]) c (* Construct a fixpoint as a Glob_term and not as a constr *) let build_newrecursive lnameargsardef = let env0 = Global.env() and sigma = Evd.empty in let (rec_sign,rec_impls) = List.fold_left (fun (env,impls) ((_,recname),bl,arityc,_) -> let arityc = Topconstr.prod_constr_expr arityc bl in let arity = Constrintern.interp_type sigma env0 arityc in let impl = Constrintern.compute_internalization_data env0 Constrintern.Recursive arity [] in (Environ.push_named (recname,None,arity) env, Idmap.add recname impl impls)) (env0,Constrintern.empty_internalization_env) lnameargsardef in let recdef = (* Declare local notations *) let fs = States.freeze() in let def = try List.map (fun (_,bl,_,def) -> let def = abstract_glob_constr def bl in interp_casted_constr_with_implicits sigma rec_sign rec_impls def ) lnameargsardef with reraise -> States.unfreeze fs; raise reraise in States.unfreeze fs; def in recdef,rec_impls let build_newrecursive l = let l' = List.map (fun ((fixna,_,bll,ar,body_opt),lnot) -> match body_opt with | Some body -> (fixna,bll,ar,body) | None -> user_err_loc (dummy_loc,"Function",str "Body of Function must be given") ) l in build_newrecursive l' (* Checks whether or not the mutual bloc is recursive *) let rec is_rec names = let names = List.fold_right Idset.add names Idset.empty in let check_id id names = Idset.mem id names in let rec lookup names = function | GVar(_,id) -> check_id id names | GRef _ | GEvar _ | GPatVar _ | GSort _ | GHole _ -> false | GCast(_,b,_) -> lookup names b | GRec _ -> error "GRec not handled" | GIf(_,b,_,lhs,rhs) -> (lookup names b) || (lookup names lhs) || (lookup names rhs) | GLetIn(_,na,t,b) | GLambda(_,na,_,t,b) | GProd(_,na,_,t,b) -> lookup names t || lookup (Nameops.name_fold Idset.remove na names) b | GLetTuple(_,nal,_,t,b) -> lookup names t || lookup (List.fold_left (fun acc na -> Nameops.name_fold Idset.remove na acc) names nal ) b | GApp(_,f,args) -> List.exists (lookup names) (f::args) | GCases(_,_,_,el,brl) -> List.exists (fun (e,_) -> lookup names e) el || List.exists (lookup_br names) brl and lookup_br names (_,idl,_,rt) = let new_names = List.fold_right Idset.remove idl names in lookup new_names rt in lookup names let rec local_binders_length = function (* Assume that no `{ ... } contexts occur *) | [] -> 0 | Topconstr.LocalRawDef _::bl -> 1 + local_binders_length bl | Topconstr.LocalRawAssum (idl,_,_)::bl -> List.length idl + local_binders_length bl let prepare_body ((name,_,args,types,_),_) rt = let n = local_binders_length args in (* Pp.msgnl (str "nb lambda to chop : " ++ str (string_of_int n) ++ fnl () ++Printer.pr_glob_constr rt); *) let fun_args,rt' = chop_rlambda_n n rt in (fun_args,rt') let derive_inversion fix_names = try (* we first transform the fix_names identifier into their corresponding constant *) let fix_names_as_constant = List.map (fun id -> destConst (Constrintern.global_reference id)) fix_names in (* Then we check that the graphs have been defined If one of the graphs haven't been defined we do nothing *) List.iter (fun c -> ignore (find_Function_infos c)) fix_names_as_constant ; try Invfun.derive_correctness Functional_principles_types.make_scheme functional_induction fix_names_as_constant (*i The next call to mk_rel_id is valid since we have just construct the graph Ensures by : register_built i*) (List.map (fun id -> destInd (Constrintern.global_reference (mk_rel_id id))) fix_names ) with e when Errors.noncritical e -> let e' = Cerrors.process_vernac_interp_error e in msg_warning (str "Cannot build inversion information" ++ if do_observe () then (fnl() ++ Errors.print e') else mt ()) with e when Errors.noncritical e -> () let warning_error names e = let e = Cerrors.process_vernac_interp_error e in let e_explain e = match e with | ToShow e -> spc () ++ Errors.print e | _ -> if do_observe () then (spc () ++ Errors.print e) else mt () in match e with | Building_graph e -> Pp.msg_warning (str "Cannot define graph(s) for " ++ h 1 (prlist_with_sep (fun _ -> str","++spc ()) Ppconstr.pr_id names) ++ e_explain e) | Defining_principle e -> Pp.msg_warning (str "Cannot define principle(s) for "++ h 1 (prlist_with_sep (fun _ -> str","++spc ()) Ppconstr.pr_id names) ++ e_explain e) | _ -> raise e let error_error names e = let e = Cerrors.process_vernac_interp_error e in let e_explain e = match e with | ToShow e -> spc () ++ Errors.print e | _ -> if do_observe () then (spc () ++ Errors.print e) else mt () in match e with | Building_graph e -> errorlabstrm "" (str "Cannot define graph(s) for " ++ h 1 (prlist_with_sep (fun _ -> str","++spc ()) Ppconstr.pr_id names) ++ e_explain e) | _ -> raise e let generate_principle on_error is_general do_built (fix_rec_l:(Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) list) recdefs interactive_proof (continue_proof : int -> Names.constant array -> Term.constr array -> int -> Tacmach.tactic) : unit = let names = List.map (function ((_, name),_,_,_,_),_ -> name) fix_rec_l in let fun_bodies = List.map2 prepare_body fix_rec_l recdefs in let funs_args = List.map fst fun_bodies in let funs_types = List.map (function ((_,_,_,types,_),_) -> types) fix_rec_l in try (* We then register the Inductive graphs of the functions *) Glob_term_to_relation.build_inductive names funs_args funs_types recdefs; if do_built then begin (*i The next call to mk_rel_id is valid since we have just construct the graph Ensures by : do_built i*) let f_R_mut = Ident (dummy_loc,mk_rel_id (List.nth names 0)) in let ind_kn = fst (locate_with_msg (pr_reference f_R_mut++str ": Not an inductive type!") locate_ind f_R_mut) in let fname_kn ((fname,_,_,_,_),_) = let f_ref = Ident fname in locate_with_msg (pr_reference f_ref++str ": Not an inductive type!") locate_constant f_ref in let funs_kn = Array.of_list (List.map fname_kn fix_rec_l) in let _ = list_map_i (fun i x -> let princ = destConst (Indrec.lookup_eliminator (ind_kn,i) (InProp)) in let princ_type = Typeops.type_of_constant (Global.env()) princ in Functional_principles_types.generate_functional_principle interactive_proof princ_type None None funs_kn i (continue_proof 0 [|funs_kn.(i)|]) ) 0 fix_rec_l in Array.iter (add_Function is_general) funs_kn; () end with e when Errors.noncritical e -> on_error names e let register_struct is_rec (fixpoint_exprl:(Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) list) = match fixpoint_exprl with | [((_,fname),_,bl,ret_type,body),_] when not is_rec -> let body = match body with | Some body -> body | None -> user_err_loc (dummy_loc,"Function",str "Body of Function must be given") in let ce,imps = Command.interp_definition bl None body (Some ret_type) in Command.declare_definition fname (Decl_kinds.Global,Decl_kinds.Definition) ce imps (fun _ _ -> ()) | _ -> Command.do_fixpoint fixpoint_exprl let generate_correction_proof_wf f_ref tcc_lemma_ref is_mes functional_ref eq_ref rec_arg_num rec_arg_type nb_args relation (_: int) (_:Names.constant array) (_:Term.constr array) (_:int) : Tacmach.tactic = Functional_principles_proofs.prove_principle_for_gen (f_ref,functional_ref,eq_ref) tcc_lemma_ref is_mes rec_arg_num rec_arg_type relation let register_wf ?(is_mes=false) fname rec_impls wf_rel_expr wf_arg using_lemmas args ret_type body pre_hook = let type_of_f = Topconstr.prod_constr_expr ret_type args in let rec_arg_num = let names = List.map snd (Topconstr.names_of_local_assums args) in match wf_arg with | None -> if List.length names = 1 then 1 else error "Recursive argument must be specified" | Some wf_arg -> list_index (Name wf_arg) names in let unbounded_eq = let f_app_args = Topconstr.CAppExpl (dummy_loc, (None,(Ident (dummy_loc,fname))) , (List.map (function | _,Anonymous -> assert false | _,Name e -> (Topconstr.mkIdentC e) ) (Topconstr.names_of_local_assums args) ) ) in Topconstr.CApp (dummy_loc,(None,Topconstr.mkRefC (Qualid (dummy_loc,(qualid_of_string "Logic.eq")))), [(f_app_args,None);(body,None)]) in let eq = Topconstr.prod_constr_expr unbounded_eq args in let hook f_ref tcc_lemma_ref functional_ref eq_ref rec_arg_num rec_arg_type nb_args relation = try pre_hook (generate_correction_proof_wf f_ref tcc_lemma_ref is_mes functional_ref eq_ref rec_arg_num rec_arg_type nb_args relation ); derive_inversion [fname] with e when Errors.noncritical e -> (* No proof done *) () in Recdef.recursive_definition is_mes fname rec_impls type_of_f wf_rel_expr rec_arg_num eq hook using_lemmas let register_mes fname rec_impls wf_mes_expr wf_rel_expr_opt wf_arg using_lemmas args ret_type body = let wf_arg_type,wf_arg = match wf_arg with | None -> begin match args with | [Topconstr.LocalRawAssum ([(_,Name x)],k,t)] -> t,x | _ -> error "Recursive argument must be specified" end | Some wf_args -> try match List.find (function | Topconstr.LocalRawAssum(l,k,t) -> List.exists (function (_,Name id) -> id = wf_args | _ -> false) l | _ -> false ) args with | Topconstr.LocalRawAssum(_,k,t) -> t,wf_args | _ -> assert false with Not_found -> assert false in let wf_rel_from_mes,is_mes = match wf_rel_expr_opt with | None -> let ltof = let make_dir l = make_dirpath (List.map id_of_string (List.rev l)) in Libnames.Qualid (dummy_loc,Libnames.qualid_of_path (Libnames.make_path (make_dir ["Arith";"Wf_nat"]) (id_of_string "ltof"))) in let fun_from_mes = let applied_mes = Topconstr.mkAppC(wf_mes_expr,[Topconstr.mkIdentC wf_arg]) in Topconstr.mkLambdaC ([(dummy_loc,Name wf_arg)],Topconstr.default_binder_kind,wf_arg_type,applied_mes) in let wf_rel_from_mes = Topconstr.mkAppC(Topconstr.mkRefC ltof,[wf_arg_type;fun_from_mes]) in wf_rel_from_mes,true | Some wf_rel_expr -> let wf_rel_with_mes = let a = Names.id_of_string "___a" in let b = Names.id_of_string "___b" in Topconstr.mkLambdaC( [dummy_loc,Name a;dummy_loc,Name b], Topconstr.Default Lib.Explicit, wf_arg_type, Topconstr.mkAppC(wf_rel_expr, [ Topconstr.mkAppC(wf_mes_expr,[Topconstr.mkIdentC a]); Topconstr.mkAppC(wf_mes_expr,[Topconstr.mkIdentC b]) ]) ) in wf_rel_with_mes,false in register_wf ~is_mes:is_mes fname rec_impls wf_rel_from_mes (Some wf_arg) using_lemmas args ret_type body let map_option f = function | None -> None | Some v -> Some (f v) let decompose_lambda_n_assum_constr_expr = let rec decompose_lambda_n_assum_constr_expr acc n e = if n = 0 then (List.rev acc,e) else match e with | Topconstr.CLambdaN(_, [],e') -> decompose_lambda_n_assum_constr_expr acc n e' | Topconstr.CLambdaN(lambda_loc,(nal,bk,nal_type)::bl,e') -> let nal_length = List.length nal in if nal_length <= n then decompose_lambda_n_assum_constr_expr (Topconstr.LocalRawAssum(nal,bk,nal_type)::acc) (n - nal_length) (Topconstr.CLambdaN(lambda_loc,bl,e')) else let nal_keep,nal_expr = list_chop n nal in (List.rev (Topconstr.LocalRawAssum(nal_keep,bk,nal_type)::acc), Topconstr.CLambdaN(lambda_loc,(nal_expr,bk,nal_type)::bl,e') ) | Topconstr.CLetIn(_, na,nav,e') -> decompose_lambda_n_assum_constr_expr (Topconstr.LocalRawDef(na,nav)::acc) (pred n) e' | _ -> error "Not enough product or assumption" in decompose_lambda_n_assum_constr_expr [] let decompose_prod_n_assum_constr_expr = let rec decompose_prod_n_assum_constr_expr acc n e = (* Pp.msgnl (str "n := " ++ int n ++ fnl ()++ *) (* str "e := " ++ Ppconstr.pr_lconstr_expr e); *) if n = 0 then (* let _ = Pp.msgnl (str "return_type := " ++ Ppconstr.pr_lconstr_expr e) in *) (List.rev acc,e) else match e with | Topconstr.CProdN(_, [],e') -> decompose_prod_n_assum_constr_expr acc n e' | Topconstr.CProdN(lambda_loc,(nal,bk,nal_type)::bl,e') -> let nal_length = List.length nal in if nal_length <= n then (* let _ = Pp.msgnl (str "first case") in *) decompose_prod_n_assum_constr_expr (Topconstr.LocalRawAssum(nal,bk,nal_type)::acc) (n - nal_length) (if bl = [] then e' else (Topconstr.CLambdaN(lambda_loc,bl,e'))) else (* let _ = Pp.msgnl (str "second case") in *) let nal_keep,nal_expr = list_chop n nal in (List.rev (Topconstr.LocalRawAssum(nal_keep,bk,nal_type)::acc), Topconstr.CLambdaN(lambda_loc,(nal_expr,bk,nal_type)::bl,e') ) | Topconstr.CArrow(_,premisse,concl) -> (* let _ = Pp.msgnl (str "arrow case") in *) decompose_prod_n_assum_constr_expr (Topconstr.LocalRawAssum([dummy_loc,Names.Anonymous], Topconstr.Default Lib.Explicit,premisse) ::acc) (pred n) concl | Topconstr.CLetIn(_, na,nav,e') -> decompose_prod_n_assum_constr_expr (Topconstr.LocalRawDef(na,nav)::acc) (pred n) e' | _ -> error "Not enough product or assumption" in decompose_prod_n_assum_constr_expr [] open Topconstr let id_of_name = function | Name id -> id | _ -> assert false let rec rebuild_bl (aux,assoc) bl typ = match bl,typ with | [], _ -> (List.rev aux,replace_vars_constr_expr assoc typ,assoc) | (Topconstr.LocalRawAssum(nal,bk,_))::bl',typ -> rebuild_nal (aux,assoc) bk bl' nal (List.length nal) typ | (Topconstr.LocalRawDef(na,_))::bl',CLetIn(_,_,nat,typ') -> rebuild_bl ((Topconstr.LocalRawDef(na,replace_vars_constr_expr assoc nat)::aux),assoc) bl' typ' | _ -> assert false and rebuild_nal (aux,assoc) bk bl' nal lnal typ = match nal,typ with | [], _ -> rebuild_bl (aux,assoc) bl' typ | na::nal,CArrow(_,nat,typ') -> rebuild_nal ((LocalRawAssum([na],bk,replace_vars_constr_expr assoc nat))::aux,assoc) bk bl' nal (pred lnal) typ' | _,CProdN(_,[],typ) -> rebuild_nal (aux,assoc) bk bl' nal lnal typ | _,CProdN(_,(nal',bk',nal't)::rest,typ') -> let lnal' = List.length nal' in if lnal' >= lnal then let old_nal',new_nal' = list_chop lnal nal' in rebuild_bl ((LocalRawAssum(nal,bk,replace_vars_constr_expr assoc nal't)::aux),(List.rev_append (List.combine (List.map id_of_name (List.map snd old_nal')) (List.map id_of_name (List.map snd nal))) assoc)) bl' (if new_nal' = [] && rest = [] then typ' else if new_nal' = [] then CProdN(dummy_loc,rest,typ') else CProdN(dummy_loc,((new_nal',bk',nal't)::rest),typ')) else let captured_nal,non_captured_nal = list_chop lnal' nal in rebuild_nal ((LocalRawAssum(captured_nal,bk,replace_vars_constr_expr assoc nal't)::aux), (List.rev_append (List.combine (List.map id_of_name (List.map snd captured_nal)) ((List.map id_of_name (List.map snd nal)))) assoc)) bk bl' non_captured_nal (lnal - lnal') (CProdN(dummy_loc,rest,typ')) | _ -> assert false let rebuild_bl (aux,assoc) bl typ = rebuild_bl (aux,assoc) bl typ let recompute_binder_list (fixpoint_exprl : (Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) list) = let fixl,ntns = Command.extract_fixpoint_components false fixpoint_exprl in let ((_,_,typel),_) = Command.interp_fixpoint fixl ntns in let constr_expr_typel = with_full_print (List.map (Constrextern.extern_constr false (Global.env ()))) typel in let fixpoint_exprl_with_new_bl = List.map2 (fun ((lna,(rec_arg_opt,rec_order),bl,ret_typ,opt_body),notation_list) fix_typ -> let new_bl',new_ret_type,_ = rebuild_bl ([],[]) bl fix_typ in (((lna,(rec_arg_opt,rec_order),new_bl',new_ret_type,opt_body),notation_list):(Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list)) ) fixpoint_exprl constr_expr_typel in fixpoint_exprl_with_new_bl let do_generate_principle on_error register_built interactive_proof (fixpoint_exprl:(Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) list) :unit = List.iter (fun (_,l) -> if l <> [] then error "Function does not support notations for now") fixpoint_exprl; let _is_struct = match fixpoint_exprl with | [((_,(wf_x,Topconstr.CWfRec wf_rel),_,_,_),_) as fixpoint_expr] -> let ((((_,name),_,args,types,body)),_) as fixpoint_expr = match recompute_binder_list [fixpoint_expr] with | [e] -> e | _ -> assert false in let fixpoint_exprl = [fixpoint_expr] in let body = match body with | Some body -> body | None -> user_err_loc (dummy_loc,"Function",str "Body of Function must be given") in let recdefs,rec_impls = build_newrecursive fixpoint_exprl in let using_lemmas = [] in let pre_hook = generate_principle on_error true register_built fixpoint_exprl recdefs true in if register_built then register_wf name rec_impls wf_rel (map_option snd wf_x) using_lemmas args types body pre_hook; false |[((_,(wf_x,Topconstr.CMeasureRec(wf_mes,wf_rel_opt)),_,_,_),_) as fixpoint_expr] -> let ((((_,name),_,args,types,body)),_) as fixpoint_expr = match recompute_binder_list [fixpoint_expr] with | [e] -> e | _ -> assert false in let fixpoint_exprl = [fixpoint_expr] in let recdefs,rec_impls = build_newrecursive fixpoint_exprl in let using_lemmas = [] in let body = match body with | Some body -> body | None -> user_err_loc (dummy_loc,"Function",str "Body of Function must be given") in let pre_hook = generate_principle on_error true register_built fixpoint_exprl recdefs true in if register_built then register_mes name rec_impls wf_mes wf_rel_opt (map_option snd wf_x) using_lemmas args types body pre_hook; true | _ -> List.iter (function ((_na,(_,ord),_args,_body,_type),_not) -> match ord with | Topconstr.CMeasureRec _ | Topconstr.CWfRec _ -> error ("Cannot use mutual definition with well-founded recursion or measure") | _ -> () ) fixpoint_exprl; let fixpoint_exprl = recompute_binder_list fixpoint_exprl in let fix_names = List.map (function (((_,name),_,_,_,_),_) -> name) fixpoint_exprl in (* ok all the expressions are structural *) let recdefs,rec_impls = build_newrecursive fixpoint_exprl in let is_rec = List.exists (is_rec fix_names) recdefs in if register_built then register_struct is_rec fixpoint_exprl; generate_principle on_error false register_built fixpoint_exprl recdefs interactive_proof (Functional_principles_proofs.prove_princ_for_struct interactive_proof); if register_built then derive_inversion fix_names; true; in () open Topconstr let rec add_args id new_args b = match b with | CRef r -> begin match r with | Libnames.Ident(loc,fname) when fname = id -> CAppExpl(dummy_loc,(None,r),new_args) | _ -> b end | CFix _ | CCoFix _ -> anomaly "add_args : todo" | CArrow(loc,b1,b2) -> CArrow(loc,add_args id new_args b1, add_args id new_args b2) | CProdN(loc,nal,b1) -> CProdN(loc, List.map (fun (nal,k,b2) -> (nal,k,add_args id new_args b2)) nal, add_args id new_args b1) | CLambdaN(loc,nal,b1) -> CLambdaN(loc, List.map (fun (nal,k,b2) -> (nal,k,add_args id new_args b2)) nal, add_args id new_args b1) | CLetIn(loc,na,b1,b2) -> CLetIn(loc,na,add_args id new_args b1,add_args id new_args b2) | CAppExpl(loc,(pf,r),exprl) -> begin match r with | Libnames.Ident(loc,fname) when fname = id -> CAppExpl(loc,(pf,r),new_args@(List.map (add_args id new_args) exprl)) | _ -> CAppExpl(loc,(pf,r),List.map (add_args id new_args) exprl) end | CApp(loc,(pf,b),bl) -> CApp(loc,(pf,add_args id new_args b), List.map (fun (e,o) -> add_args id new_args e,o) bl) | CCases(loc,sty,b_option,cel,cal) -> CCases(loc,sty,Option.map (add_args id new_args) b_option, List.map (fun (b,(na,b_option)) -> add_args id new_args b, (na,Option.map (add_args id new_args) b_option)) cel, List.map (fun (loc,cpl,e) -> (loc,cpl,add_args id new_args e)) cal ) | CLetTuple(loc,nal,(na,b_option),b1,b2) -> CLetTuple(loc,nal,(na,Option.map (add_args id new_args) b_option), add_args id new_args b1, add_args id new_args b2 ) | CIf(loc,b1,(na,b_option),b2,b3) -> CIf(loc,add_args id new_args b1, (na,Option.map (add_args id new_args) b_option), add_args id new_args b2, add_args id new_args b3 ) | CHole _ -> b | CPatVar _ -> b | CEvar _ -> b | CSort _ -> b | CCast(loc,b1,CastConv(ck,b2)) -> CCast(loc,add_args id new_args b1,CastConv(ck,add_args id new_args b2)) | CCast(loc,b1,CastCoerce) -> CCast(loc,add_args id new_args b1,CastCoerce) | CRecord (loc, w, pars) -> CRecord (loc, (match w with Some w -> Some (add_args id new_args w) | _ -> None), List.map (fun (e,o) -> e, add_args id new_args o) pars) | CNotation _ -> anomaly "add_args : CNotation" | CGeneralization _ -> anomaly "add_args : CGeneralization" | CPrim _ -> b | CDelimiters _ -> anomaly "add_args : CDelimiters" exception Stop of Topconstr.constr_expr (* [chop_n_arrow n t] chops the [n] first arrows in [t] Acts on Topconstr.constr_expr *) let rec chop_n_arrow n t = if n <= 0 then t (* If we have already removed all the arrows then return the type *) else (* If not we check the form of [t] *) match t with | Topconstr.CArrow(_,_,t) -> (* If we have an arrow, we discard it and recall [chop_n_arrow] *) chop_n_arrow (n-1) t | Topconstr.CProdN(_,nal_ta',t') -> (* If we have a forall, to result are possible : either we need to discard more than the number of arrows contained in this product declaration then we just recall [chop_n_arrow] on the remaining number of arrow to chop and [t'] we discard it and recall [chop_n_arrow], either this product contains more arrows than the number we need to chop and then we return the new type *) begin try let new_n = let rec aux (n:int) = function [] -> n | (nal,k,t'')::nal_ta' -> let nal_l = List.length nal in if n >= nal_l then aux (n - nal_l) nal_ta' else let new_t' = Topconstr.CProdN(dummy_loc, ((snd (list_chop n nal)),k,t'')::nal_ta',t') in raise (Stop new_t') in aux n nal_ta' in chop_n_arrow new_n t' with Stop t -> t end | _ -> anomaly "Not enough products" let rec get_args b t : Topconstr.local_binder list * Topconstr.constr_expr * Topconstr.constr_expr = match b with | Topconstr.CLambdaN (loc, (nal_ta), b') -> begin let n = (List.fold_left (fun n (nal,_,_) -> n+List.length nal) 0 nal_ta ) in let nal_tas,b'',t'' = get_args b' (chop_n_arrow n t) in (List.map (fun (nal,k,ta) -> (Topconstr.LocalRawAssum (nal,k,ta))) nal_ta)@nal_tas, b'',t'' end | _ -> [],b,t let make_graph (f_ref:global_reference) = let c,c_body = match f_ref with | ConstRef c -> begin try c,Global.lookup_constant c with Not_found -> raise (UserError ("",str "Cannot find " ++ Printer.pr_lconstr (mkConst c)) ) end | _ -> raise (UserError ("", str "Not a function reference") ) in Dumpglob.pause (); (match body_of_constant c_body with | None -> error "Cannot build a graph over an axiom !" | Some b -> let env = Global.env () in let body = (force b) in let extern_body,extern_type = with_full_print (fun () -> (Constrextern.extern_constr false env body, Constrextern.extern_type false env (Typeops.type_of_constant_type env c_body.const_type) ) ) () in let (nal_tas,b,t) = get_args extern_body extern_type in let expr_list = match b with | Topconstr.CFix(loc,l_id,fixexprl) -> let l = List.map (fun (id,(n,recexp),bl,t,b) -> let loc, rec_id = Option.get n in let new_args = List.flatten (List.map (function | Topconstr.LocalRawDef (na,_)-> [] | Topconstr.LocalRawAssum (nal,_,_) -> List.map (fun (loc,n) -> CRef(Libnames.Ident(loc, Nameops.out_name n))) nal ) nal_tas ) in let b' = add_args (snd id) new_args b in (((id, ( Some (dummy_loc,rec_id),CStructRec),nal_tas@bl,t,Some b'),[]):(Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list)) ) fixexprl in l | _ -> let id = id_of_label (con_label c) in [((dummy_loc,id),(None,Topconstr.CStructRec),nal_tas,t,Some b),[]] in do_generate_principle error_error false false expr_list; (* We register the infos *) let mp,dp,_ = repr_con c in List.iter (fun (((_,id),_,_,_,_),_) -> add_Function false (make_con mp dp (label_of_id id))) expr_list); Dumpglob.continue () let do_generate_principle = do_generate_principle warning_error true coq-8.4pl2/plugins/funind/Recdef.v0000640000175000001440000000323012010532755016150 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A) -> A -> A := fun (fl : A -> A) (def : A) => match n with | O => def | S m => fl (iter m fl def) end. End Iter. Theorem SSplus_lt : forall p p' : nat, p < S (S (p + p')). intro p; intro p'; change (S p <= S (S (p + p'))); apply le_S; apply Gt.gt_le_S; change (p < S (p + p')); apply Lt.le_lt_n_Sm; apply Plus.le_plus_l. Qed. Theorem Splus_lt : forall p p' : nat, p' < S (p + p'). intro p; intro p'; change (S p' <= S (p + p')); apply Gt.gt_le_S; change (p' < S (p + p')); apply Lt.le_lt_n_Sm; apply Plus.le_plus_r. Qed. Theorem le_lt_SS : forall x y, x <= y -> x < S (S y). intro x; intro y; intro H; change (S x <= S (S y)); apply le_S; apply Gt.gt_le_S; change (x < S y); apply Lt.le_lt_n_Sm; exact H. Qed. Inductive max_type (m n:nat) : Set := cmt : forall v, m <= v -> n <= v -> max_type m n. Definition max : forall m n:nat, max_type m n. intros m n; case (Compare_dec.le_gt_dec m n). intros h; exists n; [exact h | apply le_n]. intros h; exists m; [apply le_n | apply Lt.lt_le_weak; exact h]. Defined. coq-8.4pl2/plugins/funind/functional_principles_types.mli0000640000175000001440000000152711505230601023112 0ustar notinusersopen Names open Term val generate_functional_principle : (* do we accept interactive proving *) bool -> (* induction principle on rel *) types -> (* *) sorts array option -> (* Name of the new principle *) (identifier) option -> (* the compute functions to use *) constant array -> (* We prove the nth- principle *) int -> (* The tactic to use to make the proof w.r the number of params *) (constr array -> int -> Tacmach.tactic) -> unit val compute_new_princ_type_from_rel : constr array -> sorts array -> types -> types exception No_graph_found val make_scheme : (constant*Glob_term.glob_sort) list -> Entries.definition_entry list val build_scheme : (identifier*Libnames.reference*Glob_term.glob_sort) list -> unit val build_case_scheme : (identifier*Libnames.reference*Glob_term.glob_sort) -> unit coq-8.4pl2/plugins/funind/recdef.ml0000640000175000001440000014042512121620060016351 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* next_global_ident_away id (acc@ids)::acc) idl [] let pf_get_new_id id g = List.hd (pf_get_new_ids [id] g) let h_intros l = tclMAP h_intro l let debug_queue = Stack.create () let rec print_debug_queue b e = if not (Stack.is_empty debug_queue) then begin let lmsg,goal = Stack.pop debug_queue in if b then msgnl (lmsg ++ (str " raised exception " ++ Errors.print e) ++ str " on goal " ++ goal) else begin msgnl (str " from " ++ lmsg ++ str " on goal " ++ goal); end; print_debug_queue false e; end let do_observe_tac s tac g = let goal = Printer.pr_goal g in let lmsg = (str "recdef : ") ++ (str s) in Stack.push (lmsg,goal) debug_queue; try let v = tac g in ignore(Stack.pop debug_queue); v with reraise -> if not (Stack.is_empty debug_queue) then print_debug_queue true reraise; raise reraise let observe_tac s tac g = if Tacinterp.get_debug () <> Tactic_debug.DebugOff then do_observe_tac s tac g else tac g let hyp_ids = List.map id_of_string ["x";"v";"k";"def";"p";"h";"n";"h'"; "anonymous"; "teq"; "rec_res"; "hspec";"heq"; "hrec"; "hex"; "teq"; "pmax";"hle"];; let rec nthtl = function l, 0 -> l | _::tl, n -> nthtl (tl, n-1) | [], _ -> [];; let hyp_id n l = List.nth l n;; let (x_id:identifier) = hyp_id 0 hyp_ids;; let (v_id:identifier) = hyp_id 1 hyp_ids;; let (k_id:identifier) = hyp_id 2 hyp_ids;; let (def_id:identifier) = hyp_id 3 hyp_ids;; let (p_id:identifier) = hyp_id 4 hyp_ids;; let (h_id:identifier) = hyp_id 5 hyp_ids;; let (n_id:identifier) = hyp_id 6 hyp_ids;; let (h'_id:identifier) = hyp_id 7 hyp_ids;; let (ano_id:identifier) = hyp_id 8 hyp_ids;; let (rec_res_id:identifier) = hyp_id 10 hyp_ids;; let (hspec_id:identifier) = hyp_id 11 hyp_ids;; let (heq_id:identifier) = hyp_id 12 hyp_ids;; let (hrec_id:identifier) = hyp_id 13 hyp_ids;; let (hex_id:identifier) = hyp_id 14 hyp_ids;; let (teq_id:identifier) = hyp_id 15 hyp_ids;; let (pmax_id:identifier) = hyp_id 16 hyp_ids;; let (hle_id:identifier) = hyp_id 17 hyp_ids;; let message s = if Flags.is_verbose () then msgnl(str s);; let def_of_const t = match (kind_of_term t) with Const sp -> (try (match body_of_constant (Global.lookup_constant sp) with | Some c -> Declarations.force c | _ -> assert false) with e when Errors.noncritical e -> anomaly ("Cannot find definition of constant "^ (string_of_id (id_of_label (con_label sp)))) ) |_ -> assert false let type_of_const t = match (kind_of_term t) with Const sp -> Typeops.type_of_constant (Global.env()) sp |_ -> assert false let arg_type t = match kind_of_term (def_of_const t) with Lambda(a,b,c) -> b | _ -> assert false;; let evaluable_of_global_reference r = match r with ConstRef sp -> EvalConstRef sp | VarRef id -> EvalVarRef id | _ -> assert false;; let rank_for_arg_list h = let predicate a b = try List.for_all2 eq_constr a b with Invalid_argument _ -> false in let rec rank_aux i = function | [] -> None | x::tl -> if predicate h x then Some i else rank_aux (i+1) tl in rank_aux 0;; let rec check_not_nested f t = match kind_of_term t with | App(g, _) when eq_constr f g -> errorlabstrm "recdef" (str "Nested recursive function are not allowed with Function") | Var(_) when eq_constr t f -> errorlabstrm "recdef" (str "Nested recursive function are not allowed with Function") | _ -> iter_constr (check_not_nested f) t let rec (find_call_occs : int -> int -> constr -> constr -> (constr list -> constr) * constr list list) = fun nb_arg nb_lam f expr -> match (kind_of_term expr) with App (g, args) when eq_constr g f -> if Array.length args <> nb_arg then errorlabstrm "recdef" (str "Partial application of function " ++ Printer.pr_lconstr expr ++ str " in its body is not allowed while using Function"); Array.iter (check_not_nested f) args; (fun l -> List.hd l), [Array.to_list args] | App (g, args) -> let (largs: constr list) = Array.to_list args in let rec find_aux = function [] -> (fun x -> []), [] | a::upper_tl -> (match find_aux upper_tl with (cf, ((arg1::args) as args_for_upper_tl)) -> (match find_call_occs nb_arg nb_lam f a with cf2, (_ :: _ as other_args) -> let rec avoid_duplicates args = match args with | [] -> (fun _ -> []), [] | h::tl -> let recomb_tl, args_for_tl = avoid_duplicates tl in match rank_for_arg_list h args_for_upper_tl with | None -> (fun l -> List.hd l::recomb_tl(List.tl l)), h::args_for_tl | Some i -> (fun l -> List.nth l (i+List.length args_for_tl):: recomb_tl l), args_for_tl in let recombine, other_args' = avoid_duplicates other_args in let len1 = List.length other_args' in (fun l -> cf2 (recombine l)::cf(nthtl(l,len1))), other_args'@args_for_upper_tl | _, [] -> (fun x -> a::cf x), args_for_upper_tl) | _, [] -> (match find_call_occs nb_arg nb_lam f a with cf, (arg1::args) -> (fun l -> cf l::upper_tl), (arg1::args) | _, [] -> (fun x -> a::upper_tl), [])) in begin match (find_aux largs) with cf, [] -> (fun l -> mkApp(g, args)), [] | cf, args -> (fun l -> mkApp (g, Array.of_list (cf l))), args end | Rel(v) -> if v > nb_lam then error "find_call_occs : Rel" else ((fun l -> expr),[]) | Var(_) when eq_constr expr f -> errorlabstrm "recdef" (str "Partial application of function " ++ Printer.pr_lconstr expr ++ str " in its body is not allowed while using Function") | Var(id) -> (fun l -> expr), [] | Meta(_) -> error "Found a metavariable. Can not treat such a term" | Evar(_) -> error "Found an evar. Can not treat such a term" | Sort(_) -> (fun l -> expr), [] | Cast(b,_,_) -> find_call_occs nb_arg nb_lam f b | Prod(na,t,b) -> error "Found a product. Can not treat such a term" | Lambda(na,t,b) -> begin match find_call_occs nb_arg (succ nb_lam) f b with | _, [] -> (* Lambda are authorized as long as they do not contain recursives calls *) (fun l -> expr),[] | _ -> error "Found a lambda which body contains a recursive call. Such terms are not allowed" end | LetIn(na,v,t,b) -> begin match find_call_occs nb_arg nb_lam f v, find_call_occs nb_arg (succ nb_lam) f b with | (_,[]),(_,[]) -> ((fun l -> expr), []) | (_,[]),(cf,(_::_ as l)) -> ((fun l -> mkLetIn(na,v,t,cf l)),l) | (cf,(_::_ as l)),(_,[]) -> ((fun l -> mkLetIn(na,cf l,t,b)), l) | _ -> error "Found a letin with recursive calls in both variable value and body. Such terms are not allowed." end | Const(_) -> (fun l -> expr), [] | Ind(_) -> (fun l -> expr), [] | Construct (_, _) -> (fun l -> expr), [] | Case(i,t,a,r) -> (match find_call_occs nb_arg nb_lam f a with cf, (arg1::args) -> (fun l -> mkCase(i, t, (cf l), r)),(arg1::args) | _ -> (fun l -> expr),[]) | Fix(_) -> error "Found a local fixpoint. Can not treat such a term" | CoFix(_) -> error "Found a local cofixpoint : CoFix";; let coq_constant s = Coqlib.gen_constant_in_modules "RecursiveDefinition" (Coqlib.init_modules @ Coqlib.arith_modules) s;; let coq_base_constant s = Coqlib.gen_constant_in_modules "RecursiveDefinition" (Coqlib.init_modules @ [["Coq";"Arith";"Le"];["Coq";"Arith";"Lt"]]) s;; let constant sl s = constr_of_global (locate (make_qualid(Names.make_dirpath (List.map id_of_string (List.rev sl))) (id_of_string s)));; let find_reference sl s = (locate (make_qualid(Names.make_dirpath (List.map id_of_string (List.rev sl))) (id_of_string s)));; let le_lt_SS = function () -> (constant ["Recdef"] "le_lt_SS") let le_lt_n_Sm = function () -> (coq_base_constant "le_lt_n_Sm") let le_trans = function () -> (coq_base_constant "le_trans") let le_lt_trans = function () -> (coq_base_constant "le_lt_trans") let lt_S_n = function () -> (coq_base_constant "lt_S_n") let le_n = function () -> (coq_base_constant "le_n") let refl_equal = function () -> (coq_base_constant "eq_refl") let eq = function () -> (coq_base_constant "eq") let ex = function () -> (coq_base_constant "ex") let coq_sig_ref = function () -> (find_reference ["Coq";"Init";"Specif"] "sig") let coq_sig = function () -> (coq_base_constant "sig") let coq_O = function () -> (coq_base_constant "O") let coq_S = function () -> (coq_base_constant "S") let gt_antirefl = function () -> (coq_constant "gt_irrefl") let lt_n_O = function () -> (coq_base_constant "lt_n_O") let lt_n_Sn = function () -> (coq_base_constant "lt_n_Sn") let f_equal = function () -> (coq_constant "f_equal") let well_founded_induction = function () -> (coq_constant "well_founded_induction") let well_founded = function () -> (coq_constant "well_founded") let acc_rel = function () -> (coq_constant "Acc") let acc_inv_id = function () -> (coq_constant "Acc_inv") let well_founded_ltof = function () -> (Coqlib.coq_constant "" ["Arith";"Wf_nat"] "well_founded_ltof") let iter_ref = function () -> (try find_reference ["Recdef"] "iter" with Not_found -> error "module Recdef not loaded") let max_ref = function () -> (find_reference ["Recdef"] "max") let iter = function () -> (constr_of_global (delayed_force iter_ref)) let max_constr = function () -> (constr_of_global (delayed_force max_ref)) let ltof_ref = function () -> (find_reference ["Coq";"Arith";"Wf_nat"] "ltof") let coq_conj = function () -> find_reference ["Coq";"Init";"Logic"] "conj" (* These are specific to experiments in nat with lt as well_founded_relation, *) (* but this should be made more general. *) let nat = function () -> (coq_base_constant "nat") let lt = function () -> (coq_base_constant "lt") (* This is simply an implementation of the case_eq tactic. this code should be replaced with the tactic defined in Ltac in Init/Tactics.v *) let mkCaseEq a : tactic = (fun g -> let type_of_a = pf_type_of g a in tclTHENLIST [h_generalize [mkApp(delayed_force refl_equal, [| type_of_a; a|])]; (fun g2 -> change_in_concl None (pattern_occs [((false,[1]), a)] (pf_env g2) Evd.empty (pf_concl g2)) g2); simplest_case a] g);; (* This is like the previous one except that it also rewrite on all hypotheses except the ones given in the first argument. All the modified hypotheses are generalized in the process and should be introduced back later; the result is the pair of the tactic and the list of hypotheses that have been generalized and cleared. *) let mkDestructEq : identifier list -> constr -> goal sigma -> tactic * identifier list = fun not_on_hyp expr g -> let hyps = pf_hyps g in let to_revert = Util.map_succeed (fun (id,_,t) -> if List.mem id not_on_hyp || not (Termops.occur_term expr t) then failwith "is_expr_context"; id) hyps in let to_revert_constr = List.rev_map mkVar to_revert in let type_of_expr = pf_type_of g expr in let new_hyps = mkApp(delayed_force refl_equal, [|type_of_expr; expr|]):: to_revert_constr in tclTHENLIST [h_generalize new_hyps; (fun g2 -> change_in_concl None (pattern_occs [((false,[1]), expr)] (pf_env g2) Evd.empty (pf_concl g2)) g2); simplest_case expr], to_revert let rec mk_intros_and_continue thin_intros (extra_eqn:bool) cont_function (eqs:constr list) nb_lam (expr:constr) g = observe_tac "mk_intros_and_continue" ( let finalize () = if extra_eqn then let teq = pf_get_new_id teq_id g in tclTHENLIST [ h_intro teq; thin thin_intros; h_intros thin_intros; tclMAP (fun eq -> tclTRY (Equality.general_rewrite_in true Termops.all_occurrences true (* deps proofs also: *) true teq eq false)) (List.rev eqs); (fun g1 -> let ty_teq = pf_type_of g1 (mkVar teq) in let teq_lhs,teq_rhs = let _,args = try destApp ty_teq with e when Errors.noncritical e -> Pp.msgnl (Printer.pr_goal g1 ++ fnl () ++ pr_id teq ++ str ":" ++ Printer.pr_lconstr ty_teq); assert false in args.(1),args.(2) in cont_function (mkVar teq::eqs) (Termops.replace_term teq_lhs teq_rhs expr) g1 ) ] else tclTHENSEQ[ thin thin_intros; h_intros thin_intros; cont_function eqs expr ] in if nb_lam = 0 then finalize () else match kind_of_term expr with | Lambda (n, _, b) -> let n1 = match n with Name x -> x | Anonymous -> ano_id in let new_n = pf_get_new_id n1 g in tclTHEN (h_intro new_n) (mk_intros_and_continue thin_intros extra_eqn cont_function eqs (pred nb_lam) (subst1 (mkVar new_n) b)) | _ -> assert false) g (* finalize () *) let const_of_ref = function ConstRef kn -> kn | _ -> anomaly "ConstRef expected" let simpl_iter clause = reduce (Lazy {rBeta=true;rIota=true;rZeta= true; rDelta=false; rConst = [ EvalConstRef (const_of_ref (delayed_force iter_ref))]}) (* (Simpl (Some ([],mkConst (const_of_ref (delayed_force iter_ref))))) *) clause (* The boolean value is_mes expresses that the termination is expressed using a measure function instead of a well-founded relation. *) let tclUSER tac is_mes l g = let clear_tac = match l with | None -> h_clear true [] | Some l -> tclMAP (fun id -> tclTRY (h_clear false [id])) (List.rev l) in tclTHENSEQ [ clear_tac; if is_mes then tclTHEN (unfold_in_concl [(Termops.all_occurrences, evaluable_of_global_reference (delayed_force ltof_ref))]) tac else tac ] g let list_rewrite (rev:bool) (eqs: constr list) = tclREPEAT (List.fold_right (fun eq i -> tclORELSE (rewriteLR eq) i) (if rev then (List.rev eqs) else eqs) (tclFAIL 0 (mt())));; let base_leaf_terminate (func:global_reference) eqs expr = (* let _ = msgnl (str "entering base_leaf") in *) (fun g -> let k',h = match pf_get_new_ids [k_id;h_id] g with [k';h] -> k',h | _ -> assert false in tclTHENLIST [observe_tac "first split" (split (ImplicitBindings [expr])); observe_tac "second split" (split (ImplicitBindings [delayed_force coq_O])); observe_tac "intro k" (h_intro k'); observe_tac "case on k" (tclTHENS (simplest_case (mkVar k')) [(tclTHEN (h_intro h) (tclTHEN (simplest_elim (mkApp (delayed_force gt_antirefl, [| delayed_force coq_O |]))) default_auto)); tclIDTAC ]); intros; simpl_iter onConcl; unfold_constr func; list_rewrite true eqs; default_auto] g);; (* La fonction est donnee en premier argument a la fonctionnelle suivie d'autres Lambdas et de Case ... Pour recuperer la fonction f a partir de la fonctionnelle *) let get_f foncl = match (kind_of_term (def_of_const foncl)) with Lambda (Name f, _, _) -> f |_ -> error "la fonctionnelle est mal definie";; let rec compute_le_proofs = function [] -> assumption | a::tl -> tclORELSE assumption (tclTHENS (fun g -> let le_trans = delayed_force le_trans in let t_le_trans = compute_renamed_type g le_trans in let m_id = let _,_,t = destProd t_le_trans in let na,_,_ = destProd t in Nameops.out_name na in apply_with_bindings (le_trans, ExplicitBindings[dummy_loc,NamedHyp m_id,a]) g) [compute_le_proofs tl; tclORELSE (apply (delayed_force le_n)) assumption]) let make_lt_proof pmax le_proof = tclTHENS (fun g -> let le_lt_trans = delayed_force le_lt_trans in let t_le_lt_trans = compute_renamed_type g le_lt_trans in let m_id = let _,_,t = destProd t_le_lt_trans in let na,_,_ = destProd t in Nameops.out_name na in apply_with_bindings (le_lt_trans, ExplicitBindings[dummy_loc,NamedHyp m_id, pmax]) g) [observe_tac "compute_le_proofs" (compute_le_proofs le_proof); tclTHENLIST[observe_tac "lt_S_n" (apply (delayed_force lt_S_n)); default_full_auto]];; let rec list_cond_rewrite k def pmax cond_eqs le_proofs = match cond_eqs with [] -> tclIDTAC | eq::eqs -> (fun g -> let t_eq = compute_renamed_type g (mkVar eq) in let k_id,def_id = let k_na,_,t = destProd t_eq in let _,_,t = destProd t in let def_na,_,_ = destProd t in Nameops.out_name k_na,Nameops.out_name def_na in tclTHENS (general_rewrite_bindings false Termops.all_occurrences (* dep proofs also: *) true true (mkVar eq, ExplicitBindings[dummy_loc, NamedHyp k_id, mkVar k; dummy_loc, NamedHyp def_id, mkVar def]) false) [list_cond_rewrite k def pmax eqs le_proofs; observe_tac "make_lt_proof" (make_lt_proof pmax le_proofs)] g ) let rec introduce_all_equalities func eqs values specs bound le_proofs cond_eqs = match specs with [] -> fun g -> let ids = pf_ids_of_hyps g in let s_max = mkApp(delayed_force coq_S, [|bound|]) in let k = next_ident_away_in_goal k_id ids in let ids = k::ids in let h' = next_ident_away_in_goal (h'_id) ids in let ids = h'::ids in let def = next_ident_away_in_goal def_id ids in tclTHENLIST [observe_tac "introduce_all_equalities_final split" (split (ImplicitBindings [s_max])); observe_tac "introduce_all_equalities_final intro k" (h_intro k); tclTHENS (observe_tac "introduce_all_equalities_final case k" (simplest_case (mkVar k))) [ tclTHENLIST[h_intro h'; simplest_elim(mkApp(delayed_force lt_n_O,[|s_max|])); default_full_auto]; tclIDTAC ]; observe_tac "clearing k " (clear [k]); observe_tac "intros k h' def" (h_intros [k;h';def]); observe_tac "simple_iter" (simpl_iter onConcl); observe_tac "unfold functional" (unfold_in_concl[((true,[1]),evaluable_of_global_reference func)]); observe_tac "rewriting equations" (list_rewrite true eqs); observe_tac ("cond rewrite "^(string_of_id k)) (list_cond_rewrite k def bound cond_eqs le_proofs); observe_tac "refl equal" (apply (delayed_force refl_equal))] g | spec1::specs -> fun g -> let ids = Termops.ids_of_named_context (pf_hyps g) in let p = next_ident_away_in_goal p_id ids in let ids = p::ids in let pmax = next_ident_away_in_goal pmax_id ids in let ids = pmax::ids in let hle1 = next_ident_away_in_goal hle_id ids in let ids = hle1::ids in let hle2 = next_ident_away_in_goal hle_id ids in let ids = hle2::ids in let heq = next_ident_away_in_goal heq_id ids in tclTHENLIST [simplest_elim (mkVar spec1); list_rewrite true eqs; h_intros [p; heq]; simplest_elim (mkApp(delayed_force max_constr, [| bound; mkVar p|])); h_intros [pmax; hle1; hle2]; introduce_all_equalities func eqs values specs (mkVar pmax) ((mkVar pmax)::le_proofs) (heq::cond_eqs)] g;; let string_match s = if String.length s < 3 then failwith "string_match"; try for i = 0 to 3 do if String.get s i <> String.get "Acc_" i then failwith "string_match" done; with Invalid_argument _ -> failwith "string_match" let retrieve_acc_var g = (* Julien: I don't like this version .... *) let hyps = pf_ids_of_hyps g in map_succeed (fun id -> string_match (string_of_id id);id) hyps let rec introduce_all_values concl_tac is_mes acc_inv func context_fn eqs hrec args values specs = (match args with [] -> tclTHENLIST [observe_tac "split" (split(ImplicitBindings [context_fn (List.map mkVar (List.rev values))])); observe_tac "introduce_all_equalities" (introduce_all_equalities func eqs (List.rev values) (List.rev specs) (delayed_force coq_O) [] [])] | arg::args -> (fun g -> let ids = Termops.ids_of_named_context (pf_hyps g) in let rec_res = next_ident_away_in_goal rec_res_id ids in let ids = rec_res::ids in let hspec = next_ident_away_in_goal hspec_id ids in let tac = observe_tac "introduce_all_values" ( introduce_all_values concl_tac is_mes acc_inv func context_fn eqs hrec args (rec_res::values)(hspec::specs)) in (tclTHENS (observe_tac "elim h_rec" (simplest_elim (mkApp(mkVar hrec, Array.of_list arg))) ) [tclTHENLIST [h_intros [rec_res; hspec]; tac]; (tclTHENS (observe_tac "acc_inv" (apply (Lazy.force acc_inv))) [(* tclTHEN (tclTRY(list_rewrite true eqs)) *) (observe_tac "h_assumption" h_assumption) ; tclTHENLIST [ tclTRY(list_rewrite true eqs); observe_tac "user proof" (fun g -> tclUSER concl_tac is_mes (Some (hrec::hspec::(retrieve_acc_var g)@specs)) g ) ] ] ) ]) g) ) let rec_leaf_terminate nb_arg f_constr concl_tac is_mes acc_inv hrec (func:global_reference) eqs expr = match find_call_occs nb_arg 0 f_constr expr with | context_fn, args -> observe_tac "introduce_all_values" (introduce_all_values concl_tac is_mes acc_inv func context_fn eqs hrec args [] []) let proveterminate nb_arg rec_arg_id is_mes acc_inv (hrec:identifier) (f_constr:constr) (func:global_reference) base_leaf rec_leaf = let rec proveterminate (eqs:constr list) (expr:constr) = try (* let _ = msgnl (str "entering proveterminate") in *) let v = match (kind_of_term expr) with Case (ci, t, a, l) -> (match find_call_occs nb_arg 0 f_constr a with _,[] -> (fun g -> let destruct_tac, rev_to_thin_intro = mkDestructEq rec_arg_id a g in tclTHENS destruct_tac (list_map_i (fun i -> mk_intros_and_continue (List.rev rev_to_thin_intro) true proveterminate eqs ci.ci_cstr_ndecls.(i)) 0 (Array.to_list l)) g) | _, _::_ -> (match find_call_occs nb_arg 0 f_constr expr with _,[] -> observe_tac "base_leaf" (base_leaf func eqs expr) | _, _:: _ -> observe_tac "rec_leaf" (rec_leaf is_mes acc_inv hrec func eqs expr))) | _ -> (match find_call_occs nb_arg 0 f_constr expr with _,[] -> (try observe_tac "base_leaf" (base_leaf func eqs expr) with reraise -> (msgerrnl (str "failure in base case");raise reraise )) | _, _::_ -> observe_tac "rec_leaf" (rec_leaf is_mes acc_inv hrec func eqs expr)) in v with reraise -> begin msgerrnl(str "failure in proveterminate"); raise reraise end in proveterminate let hyp_terminates nb_args func = let a_arrow_b = arg_type (constr_of_global func) in let rev_args,b = decompose_prod_n nb_args a_arrow_b in let left = mkApp(delayed_force iter, Array.of_list (lift 5 a_arrow_b:: mkRel 3:: constr_of_global func::mkRel 1:: List.rev (list_map_i (fun i _ -> mkRel (6+i)) 0 rev_args) ) ) in let right = mkRel 5 in let equality = mkApp(delayed_force eq, [|lift 5 b; left; right|]) in let result = (mkProd ((Name def_id) , lift 4 a_arrow_b, equality)) in let cond = mkApp(delayed_force lt, [|(mkRel 2); (mkRel 1)|]) in let nb_iter = mkApp(delayed_force ex, [|delayed_force nat; (mkLambda (Name p_id, delayed_force nat, (mkProd (Name k_id, delayed_force nat, mkArrow cond result))))|])in let value = mkApp(delayed_force coq_sig, [|b; (mkLambda (Name v_id, b, nb_iter))|]) in compose_prod rev_args value let tclUSER_if_not_mes concl_tac is_mes names_to_suppress = if is_mes then tclCOMPLETE (h_simplest_apply (delayed_force well_founded_ltof)) else tclUSER concl_tac is_mes names_to_suppress let termination_proof_header is_mes input_type ids args_id relation rec_arg_num rec_arg_id tac wf_tac : tactic = begin fun g -> let nargs = List.length args_id in let pre_rec_args = List.rev_map mkVar (fst (list_chop (rec_arg_num - 1) args_id)) in let relation = substl pre_rec_args relation in let input_type = substl pre_rec_args input_type in let wf_thm = next_ident_away_in_goal (id_of_string ("wf_R")) ids in let wf_rec_arg = next_ident_away_in_goal (id_of_string ("Acc_"^(string_of_id rec_arg_id))) (wf_thm::ids) in let hrec = next_ident_away_in_goal hrec_id (wf_rec_arg::wf_thm::ids) in let acc_inv = lazy ( mkApp ( delayed_force acc_inv_id, [|input_type;relation;mkVar rec_arg_id|] ) ) in tclTHEN (h_intros args_id) (tclTHENS (observe_tac "first assert" (assert_tac (Name wf_rec_arg) (mkApp (delayed_force acc_rel, [|input_type;relation;mkVar rec_arg_id|]) ) ) ) [ (* accesibility proof *) tclTHENS (observe_tac "second assert" (assert_tac (Name wf_thm) (mkApp (delayed_force well_founded,[|input_type;relation|])) ) ) [ (* interactive proof that the relation is well_founded *) observe_tac "wf_tac" (wf_tac is_mes (Some args_id)); (* this gives the accessibility argument *) observe_tac "apply wf_thm" (h_simplest_apply (mkApp(mkVar wf_thm,[|mkVar rec_arg_id|])) ) ] ; (* rest of the proof *) tclTHENSEQ [observe_tac "generalize" (onNLastHypsId (nargs+1) (tclMAP (fun id -> tclTHEN (h_generalize [mkVar id]) (h_clear false [id])) )) ; observe_tac "h_fix" (h_fix (Some hrec) (nargs+1)); h_intros args_id; h_intro wf_rec_arg; observe_tac "tac" (tac wf_rec_arg hrec acc_inv) ] ] ) g end let rec instantiate_lambda t l = match l with | [] -> t | a::l -> let (bound_name, _, body) = destLambda t in instantiate_lambda (subst1 a body) l ;; let whole_start (concl_tac:tactic) nb_args is_mes func input_type relation rec_arg_num : tactic = begin fun g -> let ids = Termops.ids_of_named_context (pf_hyps g) in let func_body = (def_of_const (constr_of_global func)) in let (f_name, _, body1) = destLambda func_body in let f_id = match f_name with | Name f_id -> next_ident_away_in_goal f_id ids | Anonymous -> anomaly "Anonymous function" in let n_names_types,_ = decompose_lam_n nb_args body1 in let n_ids,ids = List.fold_left (fun (n_ids,ids) (n_name,_) -> match n_name with | Name id -> let n_id = next_ident_away_in_goal id ids in n_id::n_ids,n_id::ids | _ -> anomaly "anonymous argument" ) ([],(f_id::ids)) n_names_types in let rec_arg_id = List.nth n_ids (rec_arg_num - 1) in let expr = instantiate_lambda func_body (mkVar f_id::(List.map mkVar n_ids)) in termination_proof_header is_mes input_type ids n_ids relation rec_arg_num rec_arg_id (fun rec_arg_id hrec acc_inv g -> (proveterminate nb_args [rec_arg_id] is_mes acc_inv hrec (mkVar f_id) func base_leaf_terminate (rec_leaf_terminate nb_args (mkVar f_id) concl_tac) [] expr ) g ) (tclUSER_if_not_mes concl_tac) g end let get_current_subgoals_types () = let p = Proof_global.give_me_the_proof () in let { Evd.it=sgs ; sigma=sigma } = Proof.V82.subgoals p in List.map (Goal.V82.abstract_type sigma) sgs let build_and_l l = let and_constr = Coqlib.build_coq_and () in let conj_constr = coq_conj () in let mk_and p1 p2 = Term.mkApp(and_constr,[|p1;p2|]) in let rec is_well_founded t = match kind_of_term t with | Prod(_,_,t') -> is_well_founded t' | App(_,_) -> let (f,_) = decompose_app t in eq_constr f (well_founded ()) | _ -> assert false in let compare t1 t2 = let b1,b2= is_well_founded t1,is_well_founded t2 in if (b1&&b2) || not (b1 || b2) then 0 else if b1 && not b2 then 1 else -1 in let l = List.sort compare l in let rec f = function | [] -> failwith "empty list of subgoals!" | [p] -> p,tclIDTAC,1 | p1::pl -> let c,tac,nb = f pl in mk_and p1 c, tclTHENS (apply (constr_of_global conj_constr)) [tclIDTAC; tac ],nb+1 in f l let is_rec_res id = let rec_res_name = string_of_id rec_res_id in let id_name = string_of_id id in try String.sub id_name 0 (String.length rec_res_name) = rec_res_name with e when Errors.noncritical e -> false let clear_goals = let rec clear_goal t = match kind_of_term t with | Prod(Name id as na,t',b) -> let b' = clear_goal b in if noccurn 1 b' && (is_rec_res id) then Termops.pop b' else if b' == b then t else mkProd(na,t',b') | _ -> map_constr clear_goal t in List.map clear_goal let build_new_goal_type () = let sub_gls_types = get_current_subgoals_types () in (* Pp.msgnl (str "sub_gls_types1 := " ++ Util.prlist_with_sep (fun () -> Pp.fnl () ++ Pp.fnl ()) Printer.pr_lconstr sub_gls_types); *) let sub_gls_types = clear_goals sub_gls_types in (* Pp.msgnl (str "sub_gls_types2 := " ++ Util.prlist_with_sep (fun () -> Pp.fnl () ++ Pp.fnl ()) Printer.pr_lconstr sub_gls_types); *) let res = build_and_l sub_gls_types in res let is_opaque_constant c = let cb = Global.lookup_constant c in match cb.Declarations.const_body with | Declarations.OpaqueDef _ -> true | Declarations.Undef _ -> true | Declarations.Def _ -> false let open_new_goal (build_proof:tactic -> tactic -> unit) using_lemmas ref_ goal_name (gls_type,decompose_and_tac,nb_goal) = (* Pp.msgnl (str "gls_type := " ++ Printer.pr_lconstr gls_type); *) let current_proof_name = get_current_proof_name () in let name = match goal_name with | Some s -> s | None -> try (add_suffix current_proof_name "_subproof") with e when Errors.noncritical e -> anomaly "open_new_goal with an unamed theorem" in let sign = initialize_named_context_for_proof () in let na = next_global_ident_away name [] in if Termops.occur_existential gls_type then Util.error "\"abstract\" cannot handle existentials"; let hook _ _ = let opacity = let na_ref = Libnames.Ident (dummy_loc,na) in let na_global = Nametab.global na_ref in match na_global with ConstRef c -> is_opaque_constant c | _ -> anomaly "equation_lemma: not a constant" in let lemma = mkConst (Lib.make_con na) in ref_ := Some lemma ; let lid = ref [] in let h_num = ref (-1) in Flags.silently Vernacentries.interp (Vernacexpr.VernacAbort None); build_proof ( fun gls -> let hid = next_ident_away_in_goal h_id (pf_ids_of_hyps gls) in tclTHENSEQ [ h_generalize [lemma]; h_intro hid; (fun g -> let ids = pf_ids_of_hyps g in tclTHEN (Elim.h_decompose_and (mkVar hid)) (fun g -> let ids' = pf_ids_of_hyps g in lid := List.rev (list_subtract ids' ids); if !lid = [] then lid := [hid]; tclIDTAC g ) g ); ] gls) (fun g -> match kind_of_term (pf_concl g) with | App(f,_) when eq_constr f (well_founded ()) -> Auto.h_auto None [] (Some []) g | _ -> incr h_num; (observe_tac "finishing using" ( tclCOMPLETE( tclFIRST[ tclTHEN (eapply_with_bindings (mkVar (List.nth !lid !h_num), NoBindings)) e_assumption; Eauto.eauto_with_bases (true,5) [Evd.empty,delayed_force refl_equal] [Auto.Hint_db.empty empty_transparent_state false] ] ) ) ) g) ; Lemmas.save_named opacity; in start_proof na (Decl_kinds.Global, Decl_kinds.Proof Decl_kinds.Lemma) sign gls_type hook ; if Indfun_common.is_strict_tcc () then by (tclIDTAC) else begin by ( fun g -> tclTHEN (decompose_and_tac) (tclORELSE (tclFIRST (List.map (fun c -> tclTHENSEQ [intros; h_simplest_apply (interp_constr Evd.empty (Global.env()) c); tclCOMPLETE Auto.default_auto ] ) using_lemmas) ) tclIDTAC) g) end; try by tclIDTAC; (* raises UserError _ if the proof is complete *) if Flags.is_verbose () then (pp (Printer.pr_open_subgoals())) with UserError _ -> defined () ;; let com_terminate tcc_lemma_name tcc_lemma_ref is_mes fonctional_ref input_type relation rec_arg_num thm_name using_lemmas nb_args hook = let start_proof (tac_start:tactic) (tac_end:tactic) = let (evmap, env) = Lemmas.get_current_context() in start_proof thm_name (Global, Proof Lemma) (Environ.named_context_val env) (hyp_terminates nb_args fonctional_ref) hook; by (observe_tac "starting_tac" tac_start); by (observe_tac "whole_start" (whole_start tac_end nb_args is_mes fonctional_ref input_type relation rec_arg_num )) in start_proof tclIDTAC tclIDTAC; try let new_goal_type = build_new_goal_type () in open_new_goal start_proof using_lemmas tcc_lemma_ref (Some tcc_lemma_name) (new_goal_type); with Failure "empty list of subgoals!" -> (* a non recursive function declared with measure ! *) defined () let ind_of_ref = function | IndRef (ind,i) -> (ind,i) | _ -> anomaly "IndRef expected" let (value_f:constr list -> global_reference -> constr) = fun al fterm -> let d0 = dummy_loc in let rev_x_id_l = ( List.fold_left (fun x_id_l _ -> let x_id = next_ident_away_in_goal x_id x_id_l in x_id::x_id_l ) [] al ) in let context = List.map (fun (x, c) -> Name x, None, c) (List.combine rev_x_id_l (List.rev al)) in let env = Environ.push_rel_context context (Global.env ()) in let glob_body = GCases (d0,RegularStyle,None, [GApp(d0, GRef(d0,fterm), List.rev_map (fun x_id -> GVar(d0, x_id)) rev_x_id_l), (Anonymous,None)], [d0, [v_id], [PatCstr(d0,(ind_of_ref (delayed_force coq_sig_ref),1), [PatVar(d0, Name v_id); PatVar(d0, Anonymous)], Anonymous)], GVar(d0,v_id)]) in let body = understand Evd.empty env glob_body in it_mkLambda_or_LetIn body context let (declare_fun : identifier -> logical_kind -> constr -> global_reference) = fun f_id kind value -> let ce = {const_entry_body = value; const_entry_secctx = None; const_entry_type = None; const_entry_opaque = false } in ConstRef(declare_constant f_id (DefinitionEntry ce, kind));; let (declare_f : identifier -> logical_kind -> constr list -> global_reference -> global_reference) = fun f_id kind input_type fterm_ref -> declare_fun f_id kind (value_f input_type fterm_ref);; let rec n_x_id ids n = if n = 0 then [] else let x = next_ident_away_in_goal x_id ids in x::n_x_id (x::ids) (n-1);; let start_equation (f:global_reference) (term_f:global_reference) (cont_tactic:identifier list -> tactic) g = let ids = pf_ids_of_hyps g in let terminate_constr = constr_of_global term_f in let nargs = nb_prod (type_of_const terminate_constr) in let x = n_x_id ids nargs in tclTHENLIST [ h_intros x; unfold_in_concl [(Termops.all_occurrences, evaluable_of_global_reference f)]; observe_tac "simplest_case" (simplest_case (mkApp (terminate_constr, Array.of_list (List.map mkVar x)))); observe_tac "prove_eq" (cont_tactic x)] g;; let base_leaf_eq func eqs f_id g = let ids = pf_ids_of_hyps g in let k = next_ident_away_in_goal k_id ids in let p = next_ident_away_in_goal p_id (k::ids) in let v = next_ident_away_in_goal v_id (p::k::ids) in let heq = next_ident_away_in_goal heq_id (v::p::k::ids) in let heq1 = next_ident_away_in_goal heq_id (heq::v::p::k::ids) in let hex = next_ident_away_in_goal hex_id (heq1::heq::v::p::k::ids) in tclTHENLIST [ h_intros [v; hex]; simplest_elim (mkVar hex); h_intros [p;heq1]; tclTRY (rewriteRL (mkApp(mkVar heq1, [|mkApp (delayed_force coq_S, [|mkVar p|]); mkApp(delayed_force lt_n_Sn, [|mkVar p|]); f_id|]))); simpl_iter onConcl; tclTRY (unfold_in_concl [((true,[1]), evaluable_of_global_reference func)]); observe_tac "list_revrite" (list_rewrite true eqs); apply (delayed_force refl_equal)] g;; let f_S t = mkApp(delayed_force coq_S, [|t|]);; let rec introduce_all_values_eq cont_tac functional termine f p heq1 pmax bounds le_proofs eqs ids = function [] -> let heq2 = next_ident_away_in_goal heq_id ids in tclTHENLIST [pose_proof (Name heq2) (mkApp(mkVar heq1, [|f_S(f_S(mkVar pmax))|])); simpl_iter (onHyp heq2); unfold_in_hyp [((true,[1]), evaluable_of_global_reference (global_of_constr functional))] (heq2, Termops.InHyp); tclTHENS (fun gls -> let t_eq = compute_renamed_type gls (mkVar heq2) in let def_id = let _,_,t = destProd t_eq in let def_na,_,_ = destProd t in Nameops.out_name def_na in observe_tac "rewrite heq" (general_rewrite_bindings false Termops.all_occurrences true (* dep proofs also: *) true (mkVar heq2, ExplicitBindings[dummy_loc,NamedHyp def_id, f]) false) gls) [tclTHENLIST [observe_tac "list_rewrite" (list_rewrite true eqs); cont_tac pmax le_proofs]; tclTHENLIST[apply (delayed_force le_lt_SS); compute_le_proofs le_proofs]]] | arg::args -> let v' = next_ident_away_in_goal v_id ids in let ids = v'::ids in let hex' = next_ident_away_in_goal hex_id ids in let ids = hex'::ids in let p' = next_ident_away_in_goal p_id ids in let ids = p'::ids in let new_pmax = next_ident_away_in_goal pmax_id ids in let ids = pmax::ids in let hle1 = next_ident_away_in_goal hle_id ids in let ids = hle1::ids in let hle2 = next_ident_away_in_goal hle_id ids in let ids = hle2::ids in let heq = next_ident_away_in_goal heq_id ids in let ids = heq::ids in let heq2 = next_ident_away_in_goal heq_id ids in let ids = heq2::ids in tclTHENLIST [mkCaseEq(mkApp(termine, Array.of_list arg)); h_intros [v'; hex']; simplest_elim(mkVar hex'); h_intros [p']; simplest_elim(mkApp(delayed_force max_constr, [|mkVar pmax; mkVar p'|])); h_intros [new_pmax;hle1;hle2]; introduce_all_values_eq (fun pmax' le_proofs'-> tclTHENLIST [cont_tac pmax' le_proofs'; h_intros [heq;heq2]; observe_tac ("rewriteRL " ^ (string_of_id heq2)) (tclTRY (rewriteLR (mkVar heq2))); tclTRY (tclTHENS ( fun g -> let t_eq = compute_renamed_type g (mkVar heq) in let k_id,def_id = let k_na,_,t = destProd t_eq in let _,_,t = destProd t in let def_na,_,_ = destProd t in Nameops.out_name k_na,Nameops.out_name def_na in let c_b = (mkVar heq, ExplicitBindings [dummy_loc, NamedHyp k_id, f_S(mkVar pmax'); dummy_loc, NamedHyp def_id, f]) in observe_tac "general_rewrite_bindings" ( (general_rewrite_bindings false Termops.all_occurrences true (* dep proofs also: *) true c_b false)) g ) [tclIDTAC; tclTHENLIST [apply (delayed_force le_lt_n_Sm); compute_le_proofs le_proofs']])]) functional termine f p heq1 new_pmax (p'::bounds)((mkVar pmax)::le_proofs) eqs (heq2::heq::hle2::hle1::new_pmax::p'::hex'::v'::ids) args] let rec_leaf_eq termine f ids functional eqs expr fn args = let p = next_ident_away_in_goal p_id ids in let ids = p::ids in let v = next_ident_away_in_goal v_id ids in let ids = v::ids in let hex = next_ident_away_in_goal hex_id ids in let ids = hex::ids in let heq1 = next_ident_away_in_goal heq_id ids in let ids = heq1::ids in let hle1 = next_ident_away_in_goal hle_id ids in let ids = hle1::ids in tclTHENLIST [observe_tac "intros v hex" (h_intros [v;hex]); simplest_elim (mkVar hex); h_intros [p;heq1]; h_generalize [mkApp(delayed_force le_n,[|mkVar p|])]; h_intros [hle1]; observe_tac "introduce_all_values_eq" (introduce_all_values_eq (fun _ _ -> tclIDTAC) functional termine f p heq1 p [] [] eqs ids args); observe_tac "failing here" (apply (delayed_force refl_equal))] let rec prove_eq nb_arg (termine:constr) (f:constr)(functional:global_reference) (eqs:constr list) (expr:constr) = (* tclTRY *) observe_tac "prove_eq" (match kind_of_term expr with Case(ci,t,a,l) -> (match find_call_occs nb_arg 0 f a with _,[] -> (fun g -> let destruct_tac,rev_to_thin_intro = mkDestructEq [] a g in tclTHENS destruct_tac (list_map_i (fun i -> mk_intros_and_continue (List.rev rev_to_thin_intro) true (prove_eq nb_arg termine f functional) eqs ci.ci_cstr_ndecls.(i)) 0 (Array.to_list l)) g) | _,_::_ -> (match find_call_occs nb_arg 0 f expr with _,[] -> observe_tac "base_leaf_eq(1)" (base_leaf_eq functional eqs f) | fn,args -> fun g -> let ids = Termops.ids_of_named_context (pf_hyps g) in observe_tac "rec_leaf_eq" (rec_leaf_eq termine f ids (constr_of_global functional) eqs expr fn args) g)) | _ -> (match find_call_occs nb_arg 0 f expr with _,[] -> observe_tac "base_leaf_eq(2)" ( base_leaf_eq functional eqs f) | fn,args -> fun g -> let ids = Termops.ids_of_named_context (pf_hyps g) in observe_tac "rec_leaf_eq" (rec_leaf_eq termine f ids (constr_of_global functional) eqs expr fn args) g));; let (com_eqn : int -> identifier -> global_reference -> global_reference -> global_reference -> constr -> unit) = fun nb_arg eq_name functional_ref f_ref terminate_ref equation_lemma_type -> let opacity = match terminate_ref with | ConstRef c -> is_opaque_constant c | _ -> anomaly "terminate_lemma: not a constant" in let (evmap, env) = Lemmas.get_current_context() in let f_constr = (constr_of_global f_ref) in let equation_lemma_type = subst1 f_constr equation_lemma_type in (start_proof eq_name (Global, Proof Lemma) (Environ.named_context_val env) equation_lemma_type (fun _ _ -> ()); by (start_equation f_ref terminate_ref (fun x -> prove_eq nb_arg (constr_of_global terminate_ref) f_constr functional_ref [] (instantiate_lambda (def_of_const (constr_of_global functional_ref)) (f_constr::List.map mkVar x) ) ) ); (* (try Vernacentries.interp (Vernacexpr.VernacShow Vernacexpr.ShowProof) with _ -> ()); *) (* Vernacentries.interp (Vernacexpr.VernacShow Vernacexpr.ShowScript); *) Flags.silently (fun () -> Lemmas.save_named opacity) () ; (* Pp.msgnl (str "eqn finished"); *) );; let nf_zeta env = Reductionops.clos_norm_flags (Closure.RedFlags.mkflags [Closure.RedFlags.fZETA]) env Evd.empty let nf_betaiotazeta = (* Reductionops.local_strong Reductionops.whd_betaiotazeta *) let clos_norm_flags flgs env sigma t = Closure.norm_val (Closure.create_clos_infos flgs env) (Closure.inject (Reductionops.nf_evar sigma t)) in clos_norm_flags Closure.betaiotazeta Environ.empty_env Evd.empty let recursive_definition is_mes function_name rec_impls type_of_f r rec_arg_num eq generate_induction_principle using_lemmas : unit = let previous_label = Lib.current_command_label () in let function_type = interp_constr Evd.empty (Global.env()) type_of_f in let env = push_named (function_name,None,function_type) (Global.env()) in (* Pp.msgnl (str "function type := " ++ Printer.pr_lconstr function_type); *) let equation_lemma_type = nf_betaiotazeta (interp_gen (OfType None) Evd.empty env ~impls:rec_impls eq) in (* Pp.msgnl (str "lemma type := " ++ Printer.pr_lconstr equation_lemma_type ++ fnl ()); *) let res_vars,eq' = decompose_prod equation_lemma_type in let env_eq' = Environ.push_rel_context (List.map (fun (x,y) -> (x,None,y)) res_vars) env in let eq' = nf_zeta env_eq' eq' in let res = (* Pp.msgnl (str "res_var :=" ++ Printer.pr_lconstr_env (push_rel_context (List.map (function (x,t) -> (x,None,t)) res_vars) env) eq'); *) (* Pp.msgnl (str "rec_arg_num := " ++ str (string_of_int rec_arg_num)); *) (* Pp.msgnl (str "eq' := " ++ str (string_of_int rec_arg_num)); *) match kind_of_term eq' with | App(e,[|_;_;eq_fix|]) -> mkLambda (Name function_name,function_type,subst_var function_name (compose_lam res_vars eq_fix)) | _ -> failwith "Recursive Definition (res not eq)" in let pre_rec_args,function_type_before_rec_arg = decompose_prod_n (rec_arg_num - 1) function_type in let (_, rec_arg_type, _) = destProd function_type_before_rec_arg in let arg_types = List.rev_map snd (fst (decompose_prod_n (List.length res_vars) function_type)) in let equation_id = add_suffix function_name "_equation" in let functional_id = add_suffix function_name "_F" in let term_id = add_suffix function_name "_terminate" in let functional_ref = declare_fun functional_id (IsDefinition Decl_kinds.Definition) res in let env_with_pre_rec_args = push_rel_context(List.map (function (x,t) -> (x,None,t)) pre_rec_args) env in let relation = interp_constr Evd.empty env_with_pre_rec_args r in let tcc_lemma_name = add_suffix function_name "_tcc" in let tcc_lemma_constr = ref None in (* let _ = Pp.msgnl (str "relation := " ++ Printer.pr_lconstr_env env_with_pre_rec_args relation) in *) let hook _ _ = let term_ref = Nametab.locate (qualid_of_ident term_id) in let f_ref = declare_f function_name (IsProof Lemma) arg_types term_ref in let _ = Table.extraction_inline true [Ident (dummy_loc,term_id)] in (* message "start second proof"; *) let stop = ref false in begin try com_eqn (List.length res_vars) equation_id functional_ref f_ref term_ref (subst_var function_name equation_lemma_type) with e when Errors.noncritical e -> begin if Tacinterp.get_debug () <> Tactic_debug.DebugOff then pperrnl (str "Cannot create equation Lemma " ++ Errors.print e) else anomaly "Cannot create equation Lemma" ; stop := true; end end; if not !stop then let eq_ref = Nametab.locate (qualid_of_ident equation_id ) in let f_ref = destConst (constr_of_global f_ref) and functional_ref = destConst (constr_of_global functional_ref) and eq_ref = destConst (constr_of_global eq_ref) in generate_induction_principle f_ref tcc_lemma_constr functional_ref eq_ref rec_arg_num rec_arg_type (nb_prod res) relation; if Flags.is_verbose () then msgnl (h 1 (Ppconstr.pr_id function_name ++ spc () ++ str"is defined" )++ fnl () ++ h 1 (Ppconstr.pr_id equation_id ++ spc () ++ str"is defined" ) ) in try com_terminate tcc_lemma_name tcc_lemma_constr is_mes functional_ref rec_arg_type relation rec_arg_num term_id using_lemmas (List.length res_vars) hook with reraise -> begin (try ignore (Backtrack.backto previous_label) with e when Errors.noncritical e -> ()); (* anomaly "Cannot create termination Lemma" *) raise reraise end coq-8.4pl2/plugins/funind/functional_principles_proofs.mli0000640000175000001440000000113111254456226023263 0ustar notinusersopen Names open Term val prove_princ_for_struct : bool -> int -> constant array -> constr array -> int -> Tacmach.tactic val prove_principle_for_gen : constant*constant*constant -> (* name of the function, the fonctionnal and the fixpoint equation *) constr option ref -> (* a pointer to the obligation proofs lemma *) bool -> (* is that function uses measure *) int -> (* the number of recursive argument *) types -> (* the type of the recursive argument *) constr -> (* the wf relation used to prove the function *) Tacmach.tactic (* val is_pte : rel_declaration -> bool *) coq-8.4pl2/plugins/funind/indfun_common.ml0000640000175000001440000003773712121620060017767 0ustar notinusersopen Names open Pp open Libnames let mk_prefix pre id = id_of_string (pre^(string_of_id id)) let mk_rel_id = mk_prefix "R_" let mk_correct_id id = Nameops.add_suffix (mk_rel_id id) "_correct" let mk_complete_id id = Nameops.add_suffix (mk_rel_id id) "_complete" let mk_equation_id id = Nameops.add_suffix id "_equation" let msgnl m = () let invalid_argument s = raise (Invalid_argument s) let fresh_id avoid s = Namegen.next_ident_away_in_goal (id_of_string s) avoid let fresh_name avoid s = Name (fresh_id avoid s) let get_name avoid ?(default="H") = function | Anonymous -> fresh_name avoid default | Name n -> Name n let array_get_start a = try Array.init (Array.length a - 1) (fun i -> a.(i)) with Invalid_argument "index out of bounds" -> invalid_argument "array_get_start" let id_of_name = function Name id -> id | _ -> raise Not_found let locate ref = let (loc,qid) = qualid_of_reference ref in Nametab.locate qid let locate_ind ref = match locate ref with | IndRef x -> x | _ -> raise Not_found let locate_constant ref = match locate ref with | ConstRef x -> x | _ -> raise Not_found let locate_with_msg msg f x = try f x with | Not_found -> raise (Util.UserError("", msg)) let filter_map filter f = let rec it = function | [] -> [] | e::l -> if filter e then (f e) :: it l else it l in it let chop_rlambda_n = let rec chop_lambda_n acc n rt = if n == 0 then List.rev acc,rt else match rt with | Glob_term.GLambda(_,name,k,t,b) -> chop_lambda_n ((name,t,false)::acc) (n-1) b | Glob_term.GLetIn(_,name,v,b) -> chop_lambda_n ((name,v,true)::acc) (n-1) b | _ -> raise (Util.UserError("chop_rlambda_n", str "chop_rlambda_n: Not enough Lambdas")) in chop_lambda_n [] let chop_rprod_n = let rec chop_prod_n acc n rt = if n == 0 then List.rev acc,rt else match rt with | Glob_term.GProd(_,name,k,t,b) -> chop_prod_n ((name,t)::acc) (n-1) b | _ -> raise (Util.UserError("chop_rprod_n",str "chop_rprod_n: Not enough products")) in chop_prod_n [] let list_union_eq eq_fun l1 l2 = let rec urec = function | [] -> l2 | a::l -> if List.exists (eq_fun a) l2 then urec l else a::urec l in urec l1 let list_add_set_eq eq_fun x l = if List.exists (eq_fun x) l then l else x::l let const_of_id id = let _,princ_ref = qualid_of_reference (Libnames.Ident (Util.dummy_loc,id)) in try Nametab.locate_constant princ_ref with Not_found -> Util.error ("cannot find "^ string_of_id id) let def_of_const t = match (Term.kind_of_term t) with Term.Const sp -> (try (match Declarations.body_of_constant (Global.lookup_constant sp) with | Some c -> Declarations.force c | _ -> assert false) with e when Errors.noncritical e -> assert false) |_ -> assert false let coq_constant s = Coqlib.gen_constant_in_modules "RecursiveDefinition" Coqlib.init_modules s;; let constant sl s = constr_of_global (Nametab.locate (make_qualid(Names.make_dirpath (List.map id_of_string (List.rev sl))) (id_of_string s)));; let find_reference sl s = (Nametab.locate (make_qualid(Names.make_dirpath (List.map id_of_string (List.rev sl))) (id_of_string s)));; let eq = lazy(coq_constant "eq") let refl_equal = lazy(coq_constant "eq_refl") (*****************************************************************) (* Copy of the standart save mechanism but without the much too *) (* slow reduction function *) (*****************************************************************) open Declarations open Entries open Decl_kinds open Declare let definition_message id = Flags.if_verbose message ((string_of_id id) ^ " is defined") let save with_clean id const (locality,kind) hook = let {const_entry_body = pft; const_entry_secctx = _; const_entry_type = tpo; const_entry_opaque = opacity } = const in let l,r = match locality with | Local when Lib.sections_are_opened () -> let k = logical_kind_of_goal_kind kind in let c = SectionLocalDef (pft, tpo, opacity) in let _ = declare_variable id (Lib.cwd(), c, k) in (Local, VarRef id) | Local -> let k = logical_kind_of_goal_kind kind in let kn = declare_constant id (DefinitionEntry const, k) in (Global, ConstRef kn) | Global -> let k = logical_kind_of_goal_kind kind in let kn = declare_constant id (DefinitionEntry const, k) in (Global, ConstRef kn) in if with_clean then Pfedit.delete_current_proof (); hook l r; definition_message id let cook_proof _ = let (id,(entry,_,strength,hook)) = Pfedit.cook_proof (fun _ -> ()) in (id,(entry,strength,hook)) let new_save_named opacity = let id,(const,persistence,hook) = cook_proof true in let const = { const with const_entry_opaque = opacity } in save true id const persistence hook let get_proof_clean do_reduce = let result = cook_proof do_reduce in Pfedit.delete_current_proof (); result let with_full_print f a = let old_implicit_args = Impargs.is_implicit_args () and old_strict_implicit_args = Impargs.is_strict_implicit_args () and old_contextual_implicit_args = Impargs.is_contextual_implicit_args () in let old_rawprint = !Flags.raw_print in Flags.raw_print := true; Impargs.make_implicit_args false; Impargs.make_strict_implicit_args false; Impargs.make_contextual_implicit_args false; Impargs.make_contextual_implicit_args false; Dumpglob.pause (); try let res = f a in Impargs.make_implicit_args old_implicit_args; Impargs.make_strict_implicit_args old_strict_implicit_args; Impargs.make_contextual_implicit_args old_contextual_implicit_args; Flags.raw_print := old_rawprint; Dumpglob.continue (); res with | reraise -> Impargs.make_implicit_args old_implicit_args; Impargs.make_strict_implicit_args old_strict_implicit_args; Impargs.make_contextual_implicit_args old_contextual_implicit_args; Flags.raw_print := old_rawprint; Dumpglob.continue (); raise reraise (**********************) type function_info = { function_constant : constant; graph_ind : inductive; equation_lemma : constant option; correctness_lemma : constant option; completeness_lemma : constant option; rect_lemma : constant option; rec_lemma : constant option; prop_lemma : constant option; is_general : bool; (* Has this function been defined using general recursive definition *) } (* type function_db = function_info list *) (* let function_table = ref ([] : function_db) *) let from_function = ref Cmap.empty let from_graph = ref Indmap.empty (* let rec do_cache_info finfo = function | [] -> raise Not_found | (finfo'::finfos as l) -> if finfo' == finfo then l else if finfo'.function_constant = finfo.function_constant then finfo::finfos else let res = do_cache_info finfo finfos in if res == finfos then l else finfo'::l let cache_Function (_,(finfos)) = let new_tbl = try do_cache_info finfos !function_table with Not_found -> finfos::!function_table in if new_tbl != !function_table then function_table := new_tbl *) let cache_Function (_,finfos) = from_function := Cmap.add finfos.function_constant finfos !from_function; from_graph := Indmap.add finfos.graph_ind finfos !from_graph let load_Function _ = cache_Function let open_Function _ = cache_Function let subst_Function (subst,finfos) = let do_subst_con c = fst (Mod_subst.subst_con subst c) and do_subst_ind (kn,i) = (Mod_subst.subst_ind subst kn,i) in let function_constant' = do_subst_con finfos.function_constant in let graph_ind' = do_subst_ind finfos.graph_ind in let equation_lemma' = Option.smartmap do_subst_con finfos.equation_lemma in let correctness_lemma' = Option.smartmap do_subst_con finfos.correctness_lemma in let completeness_lemma' = Option.smartmap do_subst_con finfos.completeness_lemma in let rect_lemma' = Option.smartmap do_subst_con finfos.rect_lemma in let rec_lemma' = Option.smartmap do_subst_con finfos.rec_lemma in let prop_lemma' = Option.smartmap do_subst_con finfos.prop_lemma in if function_constant' == finfos.function_constant && graph_ind' == finfos.graph_ind && equation_lemma' == finfos.equation_lemma && correctness_lemma' == finfos.correctness_lemma && completeness_lemma' == finfos.completeness_lemma && rect_lemma' == finfos.rect_lemma && rec_lemma' == finfos.rec_lemma && prop_lemma' == finfos.prop_lemma then finfos else { function_constant = function_constant'; graph_ind = graph_ind'; equation_lemma = equation_lemma' ; correctness_lemma = correctness_lemma' ; completeness_lemma = completeness_lemma' ; rect_lemma = rect_lemma' ; rec_lemma = rec_lemma'; prop_lemma = prop_lemma'; is_general = finfos.is_general } let classify_Function infos = Libobject.Substitute infos let discharge_Function (_,finfos) = let function_constant' = Lib.discharge_con finfos.function_constant and graph_ind' = Lib.discharge_inductive finfos.graph_ind and equation_lemma' = Option.smartmap Lib.discharge_con finfos.equation_lemma and correctness_lemma' = Option.smartmap Lib.discharge_con finfos.correctness_lemma and completeness_lemma' = Option.smartmap Lib.discharge_con finfos.completeness_lemma and rect_lemma' = Option.smartmap Lib.discharge_con finfos.rect_lemma and rec_lemma' = Option.smartmap Lib.discharge_con finfos.rec_lemma and prop_lemma' = Option.smartmap Lib.discharge_con finfos.prop_lemma in if function_constant' == finfos.function_constant && graph_ind' == finfos.graph_ind && equation_lemma' == finfos.equation_lemma && correctness_lemma' == finfos.correctness_lemma && completeness_lemma' == finfos.completeness_lemma && rect_lemma' == finfos.rect_lemma && rec_lemma' == finfos.rec_lemma && prop_lemma' == finfos.prop_lemma then Some finfos else Some { function_constant = function_constant' ; graph_ind = graph_ind' ; equation_lemma = equation_lemma' ; correctness_lemma = correctness_lemma' ; completeness_lemma = completeness_lemma'; rect_lemma = rect_lemma'; rec_lemma = rec_lemma'; prop_lemma = prop_lemma' ; is_general = finfos.is_general } open Term let pr_info f_info = str "function_constant := " ++ Printer.pr_lconstr (mkConst f_info.function_constant)++ fnl () ++ str "function_constant_type := " ++ (try Printer.pr_lconstr (Global.type_of_global (ConstRef f_info.function_constant)) with e when Errors.noncritical e -> mt ()) ++ fnl () ++ str "equation_lemma := " ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.equation_lemma (mt ()) ) ++ fnl () ++ str "completeness_lemma :=" ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.completeness_lemma (mt ()) ) ++ fnl () ++ str "correctness_lemma := " ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.correctness_lemma (mt ()) ) ++ fnl () ++ str "rect_lemma := " ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.rect_lemma (mt ()) ) ++ fnl () ++ str "rec_lemma := " ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.rec_lemma (mt ()) ) ++ fnl () ++ str "prop_lemma := " ++ (Option.fold_right (fun v acc -> Printer.pr_lconstr (mkConst v)) f_info.prop_lemma (mt ()) ) ++ fnl () ++ str "graph_ind := " ++ Printer.pr_lconstr (mkInd f_info.graph_ind) ++ fnl () let pr_table tb = let l = Cmap.fold (fun k v acc -> v::acc) tb [] in Util.prlist_with_sep fnl pr_info l let in_Function : function_info -> Libobject.obj = Libobject.declare_object {(Libobject.default_object "FUNCTIONS_DB") with Libobject.cache_function = cache_Function; Libobject.load_function = load_Function; Libobject.classify_function = classify_Function; Libobject.subst_function = subst_Function; Libobject.discharge_function = discharge_Function (* Libobject.open_function = open_Function; *) } (* Synchronisation with reset *) let freeze () = !from_function,!from_graph let unfreeze (functions,graphs) = (* Pp.msgnl (str "unfreezing function_table : " ++ pr_table l); *) from_function := functions; from_graph := graphs let init () = (* Pp.msgnl (str "reseting function_table"); *) from_function := Cmap.empty; from_graph := Indmap.empty let _ = Summary.declare_summary "functions_db_sum" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let find_or_none id = try Some (match Nametab.locate (qualid_of_ident id) with ConstRef c -> c | _ -> Util.anomaly "Not a constant" ) with Not_found -> None let find_Function_infos f = Cmap.find f !from_function let find_Function_of_graph ind = Indmap.find ind !from_graph let update_Function finfo = (* Pp.msgnl (pr_info finfo); *) Lib.add_anonymous_leaf (in_Function finfo) let add_Function is_general f = let f_id = id_of_label (con_label f) in let equation_lemma = find_or_none (mk_equation_id f_id) and correctness_lemma = find_or_none (mk_correct_id f_id) and completeness_lemma = find_or_none (mk_complete_id f_id) and rect_lemma = find_or_none (Nameops.add_suffix f_id "_rect") and rec_lemma = find_or_none (Nameops.add_suffix f_id "_rec") and prop_lemma = find_or_none (Nameops.add_suffix f_id "_ind") and graph_ind = match Nametab.locate (qualid_of_ident (mk_rel_id f_id)) with | IndRef ind -> ind | _ -> Util.anomaly "Not an inductive" in let finfos = { function_constant = f; equation_lemma = equation_lemma; completeness_lemma = completeness_lemma; correctness_lemma = correctness_lemma; rect_lemma = rect_lemma; rec_lemma = rec_lemma; prop_lemma = prop_lemma; graph_ind = graph_ind; is_general = is_general } in update_Function finfos let pr_table () = pr_table !from_function (*********************************) (* Debuging *) let functional_induction_rewrite_dependent_proofs = ref true let function_debug = ref false open Goptions let functional_induction_rewrite_dependent_proofs_sig = { optsync = false; optdepr = false; optname = "Functional Induction Rewrite Dependent"; optkey = ["Functional";"Induction";"Rewrite";"Dependent"]; optread = (fun () -> !functional_induction_rewrite_dependent_proofs); optwrite = (fun b -> functional_induction_rewrite_dependent_proofs := b) } let _ = declare_bool_option functional_induction_rewrite_dependent_proofs_sig let do_rewrite_dependent () = !functional_induction_rewrite_dependent_proofs = true let function_debug_sig = { optsync = false; optdepr = false; optname = "Function debug"; optkey = ["Function_debug"]; optread = (fun () -> !function_debug); optwrite = (fun b -> function_debug := b) } let _ = declare_bool_option function_debug_sig let do_observe () = !function_debug = true let strict_tcc = ref false let is_strict_tcc () = !strict_tcc let strict_tcc_sig = { optsync = false; optdepr = false; optname = "Raw Function Tcc"; optkey = ["Function_raw_tcc"]; optread = (fun () -> !strict_tcc); optwrite = (fun b -> strict_tcc := b) } let _ = declare_bool_option strict_tcc_sig exception Building_graph of exn exception Defining_principle of exn exception ToShow of exn let init_constant dir s = try Coqlib.gen_constant "Function" dir s with e when Errors.noncritical e -> raise (ToShow e) let jmeq () = try (Coqlib.check_required_library ["Coq";"Logic";"JMeq"]; init_constant ["Logic";"JMeq"] "JMeq") with e when Errors.noncritical e -> raise (ToShow e) let jmeq_rec () = try Coqlib.check_required_library ["Coq";"Logic";"JMeq"]; init_constant ["Logic";"JMeq"] "JMeq_rec" with e when Errors.noncritical e -> raise (ToShow e) let jmeq_refl () = try Coqlib.check_required_library ["Coq";"Logic";"JMeq"]; init_constant ["Logic";"JMeq"] "JMeq_refl" with e when Errors.noncritical e -> raise (ToShow e) coq-8.4pl2/plugins/funind/glob_termops.ml0000640000175000001440000005125112121620060017613 0ustar notinusersopen Pp open Glob_term open Util open Names (* Ocaml 3.06 Map.S does not handle is_empty *) let idmap_is_empty m = m = Idmap.empty (* Some basic functions to rebuild glob_constr In each of them the location is Util.dummy_loc *) let mkGRef ref = GRef(dummy_loc,ref) let mkGVar id = GVar(dummy_loc,id) let mkGApp(rt,rtl) = GApp(dummy_loc,rt,rtl) let mkGLambda(n,t,b) = GLambda(dummy_loc,n,Explicit,t,b) let mkGProd(n,t,b) = GProd(dummy_loc,n,Explicit,t,b) let mkGLetIn(n,t,b) = GLetIn(dummy_loc,n,t,b) let mkGCases(rto,l,brl) = GCases(dummy_loc,Term.RegularStyle,rto,l,brl) let mkGSort s = GSort(dummy_loc,s) let mkGHole () = GHole(dummy_loc,Evd.BinderType Anonymous) let mkGCast(b,t) = GCast(dummy_loc,b,CastConv (Term.DEFAULTcast,t)) (* Some basic functions to decompose glob_constrs These are analogous to the ones constrs *) let glob_decompose_prod = let rec glob_decompose_prod args = function | GProd(_,n,k,t,b) -> glob_decompose_prod ((n,t)::args) b | rt -> args,rt in glob_decompose_prod [] let glob_decompose_prod_or_letin = let rec glob_decompose_prod args = function | GProd(_,n,k,t,b) -> glob_decompose_prod ((n,None,Some t)::args) b | GLetIn(_,n,t,b) -> glob_decompose_prod ((n,Some t,None)::args) b | rt -> args,rt in glob_decompose_prod [] let glob_compose_prod = List.fold_left (fun b (n,t) -> mkGProd(n,t,b)) let glob_compose_prod_or_letin = List.fold_left ( fun concl decl -> match decl with | (n,None,Some t) -> mkGProd(n,t,concl) | (n,Some bdy,None) -> mkGLetIn(n,bdy,concl) | _ -> assert false) let glob_decompose_prod_n n = let rec glob_decompose_prod i args c = if i<=0 then args,c else match c with | GProd(_,n,_,t,b) -> glob_decompose_prod (i-1) ((n,t)::args) b | rt -> args,rt in glob_decompose_prod n [] let glob_decompose_prod_or_letin_n n = let rec glob_decompose_prod i args c = if i<=0 then args,c else match c with | GProd(_,n,_,t,b) -> glob_decompose_prod (i-1) ((n,None,Some t)::args) b | GLetIn(_,n,t,b) -> glob_decompose_prod (i-1) ((n,Some t,None)::args) b | rt -> args,rt in glob_decompose_prod n [] let glob_decompose_app = let rec decompose_rapp acc rt = (* msgnl (str "glob_decompose_app on : "++ Printer.pr_glob_constr rt); *) match rt with | GApp(_,rt,rtl) -> decompose_rapp (List.fold_left (fun y x -> x::y) acc rtl) rt | rt -> rt,List.rev acc in decompose_rapp [] (* [glob_make_eq t1 t2] build the glob_constr corresponding to [t2 = t1] *) let glob_make_eq ?(typ= mkGHole ()) t1 t2 = mkGApp(mkGRef (Lazy.force Coqlib.coq_eq_ref),[typ;t2;t1]) (* [glob_make_neq t1 t2] build the glob_constr corresponding to [t1 <> t2] *) let glob_make_neq t1 t2 = mkGApp(mkGRef (Lazy.force Coqlib.coq_not_ref),[glob_make_eq t1 t2]) (* [glob_make_or P1 P2] build the glob_constr corresponding to [P1 \/ P2] *) let glob_make_or t1 t2 = mkGApp (mkGRef(Lazy.force Coqlib.coq_or_ref),[t1;t2]) (* [glob_make_or_list [P1;...;Pn]] build the glob_constr corresponding to [P1 \/ ( .... \/ Pn)] *) let rec glob_make_or_list = function | [] -> raise (Invalid_argument "mk_or") | [e] -> e | e::l -> glob_make_or e (glob_make_or_list l) let remove_name_from_mapping mapping na = match na with | Anonymous -> mapping | Name id -> Idmap.remove id mapping let change_vars = let rec change_vars mapping rt = match rt with | GRef _ -> rt | GVar(loc,id) -> let new_id = try Idmap.find id mapping with Not_found -> id in GVar(loc,new_id) | GEvar _ -> rt | GPatVar _ -> rt | GApp(loc,rt',rtl) -> GApp(loc, change_vars mapping rt', List.map (change_vars mapping) rtl ) | GLambda(loc,name,k,t,b) -> GLambda(loc, name, k, change_vars mapping t, change_vars (remove_name_from_mapping mapping name) b ) | GProd(loc,name,k,t,b) -> GProd(loc, name, k, change_vars mapping t, change_vars (remove_name_from_mapping mapping name) b ) | GLetIn(loc,name,def,b) -> GLetIn(loc, name, change_vars mapping def, change_vars (remove_name_from_mapping mapping name) b ) | GLetTuple(loc,nal,(na,rto),b,e) -> let new_mapping = List.fold_left remove_name_from_mapping mapping nal in GLetTuple(loc, nal, (na, Option.map (change_vars mapping) rto), change_vars mapping b, change_vars new_mapping e ) | GCases(loc,sty,infos,el,brl) -> GCases(loc,sty, infos, List.map (fun (e,x) -> (change_vars mapping e,x)) el, List.map (change_vars_br mapping) brl ) | GIf(loc,b,(na,e_option),lhs,rhs) -> GIf(loc, change_vars mapping b, (na,Option.map (change_vars mapping) e_option), change_vars mapping lhs, change_vars mapping rhs ) | GRec _ -> error "Local (co)fixes are not supported" | GSort _ -> rt | GHole _ -> rt | GCast(loc,b,CastConv (k,t)) -> GCast(loc,change_vars mapping b, CastConv (k,change_vars mapping t)) | GCast(loc,b,CastCoerce) -> GCast(loc,change_vars mapping b,CastCoerce) and change_vars_br mapping ((loc,idl,patl,res) as br) = let new_mapping = List.fold_right Idmap.remove idl mapping in if idmap_is_empty new_mapping then br else (loc,idl,patl,change_vars new_mapping res) in change_vars let rec alpha_pat excluded pat = match pat with | PatVar(loc,Anonymous) -> let new_id = Indfun_common.fresh_id excluded "_x" in PatVar(loc,Name new_id),(new_id::excluded),Idmap.empty | PatVar(loc,Name id) -> if List.mem id excluded then let new_id = Namegen.next_ident_away id excluded in PatVar(loc,Name new_id),(new_id::excluded), (Idmap.add id new_id Idmap.empty) else pat,excluded,Idmap.empty | PatCstr(loc,constr,patl,na) -> let new_na,new_excluded,map = match na with | Name id when List.mem id excluded -> let new_id = Namegen.next_ident_away id excluded in Name new_id,new_id::excluded, Idmap.add id new_id Idmap.empty | _ -> na,excluded,Idmap.empty in let new_patl,new_excluded,new_map = List.fold_left (fun (patl,excluded,map) pat -> let new_pat,new_excluded,new_map = alpha_pat excluded pat in (new_pat::patl,new_excluded,Idmap.fold Idmap.add new_map map) ) ([],new_excluded,map) patl in PatCstr(loc,constr,List.rev new_patl,new_na),new_excluded,new_map let alpha_patl excluded patl = let patl,new_excluded,map = List.fold_left (fun (patl,excluded,map) pat -> let new_pat,new_excluded,new_map = alpha_pat excluded pat in new_pat::patl,new_excluded,(Idmap.fold Idmap.add new_map map) ) ([],excluded,Idmap.empty) patl in (List.rev patl,new_excluded,map) let raw_get_pattern_id pat acc = let rec get_pattern_id pat = match pat with | PatVar(loc,Anonymous) -> assert false | PatVar(loc,Name id) -> [id] | PatCstr(loc,constr,patternl,_) -> List.fold_right (fun pat idl -> let idl' = get_pattern_id pat in idl'@idl ) patternl [] in (get_pattern_id pat)@acc let get_pattern_id pat = raw_get_pattern_id pat [] let rec alpha_rt excluded rt = let new_rt = match rt with | GRef _ | GVar _ | GEvar _ | GPatVar _ -> rt | GLambda(loc,Anonymous,k,t,b) -> let new_id = Namegen.next_ident_away (id_of_string "_x") excluded in let new_excluded = new_id :: excluded in let new_t = alpha_rt new_excluded t in let new_b = alpha_rt new_excluded b in GLambda(loc,Name new_id,k,new_t,new_b) | GProd(loc,Anonymous,k,t,b) -> let new_t = alpha_rt excluded t in let new_b = alpha_rt excluded b in GProd(loc,Anonymous,k,new_t,new_b) | GLetIn(loc,Anonymous,t,b) -> let new_t = alpha_rt excluded t in let new_b = alpha_rt excluded b in GLetIn(loc,Anonymous,new_t,new_b) | GLambda(loc,Name id,k,t,b) -> let new_id = Namegen.next_ident_away id excluded in let t,b = if new_id = id then t,b else let replace = change_vars (Idmap.add id new_id Idmap.empty) in (t,replace b) in let new_excluded = new_id::excluded in let new_t = alpha_rt new_excluded t in let new_b = alpha_rt new_excluded b in GLambda(loc,Name new_id,k,new_t,new_b) | GProd(loc,Name id,k,t,b) -> let new_id = Namegen.next_ident_away id excluded in let new_excluded = new_id::excluded in let t,b = if new_id = id then t,b else let replace = change_vars (Idmap.add id new_id Idmap.empty) in (t,replace b) in let new_t = alpha_rt new_excluded t in let new_b = alpha_rt new_excluded b in GProd(loc,Name new_id,k,new_t,new_b) | GLetIn(loc,Name id,t,b) -> let new_id = Namegen.next_ident_away id excluded in let t,b = if new_id = id then t,b else let replace = change_vars (Idmap.add id new_id Idmap.empty) in (t,replace b) in let new_excluded = new_id::excluded in let new_t = alpha_rt new_excluded t in let new_b = alpha_rt new_excluded b in GLetIn(loc,Name new_id,new_t,new_b) | GLetTuple(loc,nal,(na,rto),t,b) -> let rev_new_nal,new_excluded,mapping = List.fold_left (fun (nal,excluded,mapping) na -> match na with | Anonymous -> (na::nal,excluded,mapping) | Name id -> let new_id = Namegen.next_ident_away id excluded in if new_id = id then na::nal,id::excluded,mapping else (Name new_id)::nal,id::excluded,(Idmap.add id new_id mapping) ) ([],excluded,Idmap.empty) nal in let new_nal = List.rev rev_new_nal in let new_rto,new_t,new_b = if idmap_is_empty mapping then rto,t,b else let replace = change_vars mapping in (Option.map replace rto, t,replace b) in let new_t = alpha_rt new_excluded new_t in let new_b = alpha_rt new_excluded new_b in let new_rto = Option.map (alpha_rt new_excluded) new_rto in GLetTuple(loc,new_nal,(na,new_rto),new_t,new_b) | GCases(loc,sty,infos,el,brl) -> let new_el = List.map (function (rt,i) -> alpha_rt excluded rt, i) el in GCases(loc,sty,infos,new_el,List.map (alpha_br excluded) brl) | GIf(loc,b,(na,e_o),lhs,rhs) -> GIf(loc,alpha_rt excluded b, (na,Option.map (alpha_rt excluded) e_o), alpha_rt excluded lhs, alpha_rt excluded rhs ) | GRec _ -> error "Not handled GRec" | GSort _ -> rt | GHole _ -> rt | GCast (loc,b,CastConv (k,t)) -> GCast(loc,alpha_rt excluded b,CastConv(k,alpha_rt excluded t)) | GCast (loc,b,CastCoerce) -> GCast(loc,alpha_rt excluded b,CastCoerce) | GApp(loc,f,args) -> GApp(loc, alpha_rt excluded f, List.map (alpha_rt excluded) args ) in new_rt and alpha_br excluded (loc,ids,patl,res) = let new_patl,new_excluded,mapping = alpha_patl excluded patl in let new_ids = List.fold_right raw_get_pattern_id new_patl [] in let new_excluded = new_ids@excluded in let renamed_res = change_vars mapping res in let new_res = alpha_rt new_excluded renamed_res in (loc,new_ids,new_patl,new_res) (* [is_free_in id rt] checks if [id] is a free variable in [rt] *) let is_free_in id = let rec is_free_in = function | GRef _ -> false | GVar(_,id') -> id_ord id' id == 0 | GEvar _ -> false | GPatVar _ -> false | GApp(_,rt,rtl) -> List.exists is_free_in (rt::rtl) | GLambda(_,n,_,t,b) | GProd(_,n,_,t,b) | GLetIn(_,n,t,b) -> let check_in_b = match n with | Name id' -> id_ord id' id <> 0 | _ -> true in is_free_in t || (check_in_b && is_free_in b) | GCases(_,_,_,el,brl) -> (List.exists (fun (e,_) -> is_free_in e) el) || List.exists is_free_in_br brl | GLetTuple(_,nal,_,b,t) -> let check_in_nal = not (List.exists (function Name id' -> id'= id | _ -> false) nal) in is_free_in t || (check_in_nal && is_free_in b) | GIf(_,cond,_,br1,br2) -> is_free_in cond || is_free_in br1 || is_free_in br2 | GRec _ -> raise (UserError("",str "Not handled GRec")) | GSort _ -> false | GHole _ -> false | GCast (_,b,CastConv (_,t)) -> is_free_in b || is_free_in t | GCast (_,b,CastCoerce) -> is_free_in b and is_free_in_br (_,ids,_,rt) = (not (List.mem id ids)) && is_free_in rt in is_free_in let rec pattern_to_term = function | PatVar(loc,Anonymous) -> assert false | PatVar(loc,Name id) -> mkGVar id | PatCstr(loc,constr,patternl,_) -> let cst_narg = Inductiveops.mis_constructor_nargs_env (Global.env ()) constr in let implicit_args = Array.to_list (Array.init (cst_narg - List.length patternl) (fun _ -> mkGHole ()) ) in let patl_as_term = List.map pattern_to_term patternl in mkGApp(mkGRef(Libnames.ConstructRef constr), implicit_args@patl_as_term ) let replace_var_by_term x_id term = let rec replace_var_by_pattern rt = match rt with | GRef _ -> rt | GVar(_,id) when id_ord id x_id == 0 -> term | GVar _ -> rt | GEvar _ -> rt | GPatVar _ -> rt | GApp(loc,rt',rtl) -> GApp(loc, replace_var_by_pattern rt', List.map replace_var_by_pattern rtl ) | GLambda(_,Name id,_,_,_) when id_ord id x_id == 0 -> rt | GLambda(loc,name,k,t,b) -> GLambda(loc, name, k, replace_var_by_pattern t, replace_var_by_pattern b ) | GProd(_,Name id,_,_,_) when id_ord id x_id == 0 -> rt | GProd(loc,name,k,t,b) -> GProd(loc, name, k, replace_var_by_pattern t, replace_var_by_pattern b ) | GLetIn(_,Name id,_,_) when id_ord id x_id == 0 -> rt | GLetIn(loc,name,def,b) -> GLetIn(loc, name, replace_var_by_pattern def, replace_var_by_pattern b ) | GLetTuple(_,nal,_,_,_) when List.exists (function Name id -> id = x_id | _ -> false) nal -> rt | GLetTuple(loc,nal,(na,rto),def,b) -> GLetTuple(loc, nal, (na,Option.map replace_var_by_pattern rto), replace_var_by_pattern def, replace_var_by_pattern b ) | GCases(loc,sty,infos,el,brl) -> GCases(loc,sty, infos, List.map (fun (e,x) -> (replace_var_by_pattern e,x)) el, List.map replace_var_by_pattern_br brl ) | GIf(loc,b,(na,e_option),lhs,rhs) -> GIf(loc, replace_var_by_pattern b, (na,Option.map replace_var_by_pattern e_option), replace_var_by_pattern lhs, replace_var_by_pattern rhs ) | GRec _ -> raise (UserError("",str "Not handled GRec")) | GSort _ -> rt | GHole _ -> rt | GCast(loc,b,CastConv(k,t)) -> GCast(loc,replace_var_by_pattern b,CastConv(k,replace_var_by_pattern t)) | GCast(loc,b,CastCoerce) -> GCast(loc,replace_var_by_pattern b,CastCoerce) and replace_var_by_pattern_br ((loc,idl,patl,res) as br) = if List.exists (fun id -> id_ord id x_id == 0) idl then br else (loc,idl,patl,replace_var_by_pattern res) in replace_var_by_pattern (* checking unifiability of patterns *) exception NotUnifiable let rec are_unifiable_aux = function | [] -> () | eq::eqs -> match eq with | PatVar _,_ | _,PatVar _ -> are_unifiable_aux eqs | PatCstr(_,constructor1,cpl1,_),PatCstr(_,constructor2,cpl2,_) -> if constructor2 <> constructor1 then raise NotUnifiable else let eqs' = try ((List.combine cpl1 cpl2)@eqs) with e when Errors.noncritical e -> anomaly "are_unifiable_aux" in are_unifiable_aux eqs' let are_unifiable pat1 pat2 = try are_unifiable_aux [pat1,pat2]; true with NotUnifiable -> false let rec eq_cases_pattern_aux = function | [] -> () | eq::eqs -> match eq with | PatVar _,PatVar _ -> eq_cases_pattern_aux eqs | PatCstr(_,constructor1,cpl1,_),PatCstr(_,constructor2,cpl2,_) -> if constructor2 <> constructor1 then raise NotUnifiable else let eqs' = try ((List.combine cpl1 cpl2)@eqs) with e when Errors.noncritical e -> anomaly "eq_cases_pattern_aux" in eq_cases_pattern_aux eqs' | _ -> raise NotUnifiable let eq_cases_pattern pat1 pat2 = try eq_cases_pattern_aux [pat1,pat2]; true with NotUnifiable -> false let ids_of_pat = let rec ids_of_pat ids = function | PatVar(_,Anonymous) -> ids | PatVar(_,Name id) -> Idset.add id ids | PatCstr(_,_,patl,_) -> List.fold_left ids_of_pat ids patl in ids_of_pat Idset.empty let id_of_name = function | Names.Anonymous -> id_of_string "x" | Names.Name x -> x (* TODO: finish Rec caes *) let ids_of_glob_constr c = let rec ids_of_glob_constr acc c = let idof = id_of_name in match c with | GVar (_,id) -> id::acc | GApp (loc,g,args) -> ids_of_glob_constr [] g @ List.flatten (List.map (ids_of_glob_constr []) args) @ acc | GLambda (loc,na,k,ty,c) -> idof na :: ids_of_glob_constr [] ty @ ids_of_glob_constr [] c @ acc | GProd (loc,na,k,ty,c) -> idof na :: ids_of_glob_constr [] ty @ ids_of_glob_constr [] c @ acc | GLetIn (loc,na,b,c) -> idof na :: ids_of_glob_constr [] b @ ids_of_glob_constr [] c @ acc | GCast (loc,c,CastConv(k,t)) -> ids_of_glob_constr [] c @ ids_of_glob_constr [] t @ acc | GCast (loc,c,CastCoerce) -> ids_of_glob_constr [] c @ acc | GIf (loc,c,(na,po),b1,b2) -> ids_of_glob_constr [] c @ ids_of_glob_constr [] b1 @ ids_of_glob_constr [] b2 @ acc | GLetTuple (_,nal,(na,po),b,c) -> List.map idof nal @ ids_of_glob_constr [] b @ ids_of_glob_constr [] c @ acc | GCases (loc,sty,rtntypopt,tml,brchl) -> List.flatten (List.map (fun (_,idl,patl,c) -> idl @ ids_of_glob_constr [] c) brchl) | GRec _ -> failwith "Fix inside a constructor branch" | (GSort _ | GHole _ | GRef _ | GEvar _ | GPatVar _) -> [] in (* build the set *) List.fold_left (fun acc x -> Idset.add x acc) Idset.empty (ids_of_glob_constr [] c) let zeta_normalize = let rec zeta_normalize_term rt = match rt with | GRef _ -> rt | GVar _ -> rt | GEvar _ -> rt | GPatVar _ -> rt | GApp(loc,rt',rtl) -> GApp(loc, zeta_normalize_term rt', List.map zeta_normalize_term rtl ) | GLambda(loc,name,k,t,b) -> GLambda(loc, name, k, zeta_normalize_term t, zeta_normalize_term b ) | GProd(loc,name,k,t,b) -> GProd(loc, name, k, zeta_normalize_term t, zeta_normalize_term b ) | GLetIn(_,Name id,def,b) -> zeta_normalize_term (replace_var_by_term id def b) | GLetIn(loc,Anonymous,def,b) -> zeta_normalize_term b | GLetTuple(loc,nal,(na,rto),def,b) -> GLetTuple(loc, nal, (na,Option.map zeta_normalize_term rto), zeta_normalize_term def, zeta_normalize_term b ) | GCases(loc,sty,infos,el,brl) -> GCases(loc,sty, infos, List.map (fun (e,x) -> (zeta_normalize_term e,x)) el, List.map zeta_normalize_br brl ) | GIf(loc,b,(na,e_option),lhs,rhs) -> GIf(loc, zeta_normalize_term b, (na,Option.map zeta_normalize_term e_option), zeta_normalize_term lhs, zeta_normalize_term rhs ) | GRec _ -> raise (UserError("",str "Not handled GRec")) | GSort _ -> rt | GHole _ -> rt | GCast(loc,b,CastConv(k,t)) -> GCast(loc,zeta_normalize_term b,CastConv(k,zeta_normalize_term t)) | GCast(loc,b,CastCoerce) -> GCast(loc,zeta_normalize_term b,CastCoerce) and zeta_normalize_br (loc,idl,patl,res) = (loc,idl,patl,zeta_normalize_term res) in zeta_normalize_term let expand_as = let rec add_as map pat = match pat with | PatVar _ -> map | PatCstr(_,_,patl,Name id) -> Idmap.add id (pattern_to_term pat) (List.fold_left add_as map patl) | PatCstr(_,_,patl,_) -> List.fold_left add_as map patl in let rec expand_as map rt = match rt with | GRef _ | GEvar _ | GPatVar _ | GSort _ | GHole _ -> rt | GVar(_,id) -> begin try Idmap.find id map with Not_found -> rt end | GApp(loc,f,args) -> GApp(loc,expand_as map f,List.map (expand_as map) args) | GLambda(loc,na,k,t,b) -> GLambda(loc,na,k,expand_as map t, expand_as map b) | GProd(loc,na,k,t,b) -> GProd(loc,na,k,expand_as map t, expand_as map b) | GLetIn(loc,na,v,b) -> GLetIn(loc,na, expand_as map v,expand_as map b) | GLetTuple(loc,nal,(na,po),v,b) -> GLetTuple(loc,nal,(na,Option.map (expand_as map) po), expand_as map v, expand_as map b) | GIf(loc,e,(na,po),br1,br2) -> GIf(loc,expand_as map e,(na,Option.map (expand_as map) po), expand_as map br1, expand_as map br2) | GRec _ -> error "Not handled GRec" | GCast(loc,b,CastConv(kind,t)) -> GCast(loc,expand_as map b,CastConv(kind,expand_as map t)) | GCast(loc,b,CastCoerce) -> GCast(loc,expand_as map b,CastCoerce) | GCases(loc,sty,po,el,brl) -> GCases(loc, sty, Option.map (expand_as map) po, List.map (fun (rt,t) -> expand_as map rt,t) el, List.map (expand_as_br map) brl) and expand_as_br map (loc,idl,cpl,rt) = (loc,idl,cpl, expand_as (List.fold_left add_as map cpl) rt) in expand_as Idmap.empty coq-8.4pl2/plugins/funind/indfun.mli0000640000175000001440000000100011504715034016550 0ustar notinusersopen Util open Names open Term open Pp open Indfun_common open Libnames open Glob_term open Declarations val do_generate_principle : bool -> (Vernacexpr.fixpoint_expr * Vernacexpr.decl_notation list) list -> unit val functional_induction : bool -> Term.constr -> (Term.constr * Term.constr Glob_term.bindings) option -> Genarg.intro_pattern_expr Util.located option -> Proof_type.goal Tacmach.sigma -> Proof_type.goal list Evd.sigma val make_graph : Libnames.global_reference -> unit coq-8.4pl2/plugins/funind/recdef_plugin.mllib0000640000175000001440000000024011505230575020421 0ustar notinusersIndfun_common Glob_termops Recdef Glob_term_to_relation Functional_principles_proofs Functional_principles_types Invfun Indfun Merge G_indfun Recdef_plugin_mod coq-8.4pl2/plugins/funind/merge.ml0000640000175000001440000011274412121620060016223 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false) t1 t2 then true else false let rec compare_constr' t1 t2 = if compare_constr_nosub t1 t2 then true else (compare_constr (compare_constr') t1 t2) let rec substitterm prof t by_t in_u = if (compare_constr' (lift prof t) in_u) then (lift prof by_t) else map_constr_with_binders succ (fun i -> substitterm i t by_t) prof in_u let lift_ldecl n ldecl = List.map (fun (x,y) -> x,lift n y) ldecl let understand = Pretyping.Default.understand Evd.empty (Global.env()) (** Operations on names and identifiers *) let id_of_name = function Anonymous -> id_of_string "H" | Name id -> id;; let name_of_string str = Name (id_of_string str) let string_of_name nme = string_of_id (id_of_name nme) (** [isVarf f x] returns [true] if term [x] is of the form [(Var f)]. *) let isVarf f x = match x with | GVar (_,x) -> Pervasives.compare x f = 0 | _ -> false (** [ident_global_exist id] returns true if identifier [id] is linked in global environment. *) let ident_global_exist id = try let ans = CRef (Libnames.Ident (dummy_loc,id)) in let _ = ignore (Constrintern.intern_constr Evd.empty (Global.env()) ans) in true with e when Errors.noncritical e -> false (** [next_ident_fresh id] returns a fresh identifier (ie not linked in global env) with base [id]. *) let next_ident_fresh (id:identifier) = let res = ref id in while ident_global_exist !res do res := Nameops.lift_subscript !res done; !res (** {2 Debugging} *) (* comment this line to see debug msgs *) let msg x = () ;; let pr_lconstr c = str "" (* uncomment this to see debugging *) let prconstr c = msg (str" " ++ Printer.pr_lconstr c) let prconstrnl c = msg (str" " ++ Printer.pr_lconstr c ++ str"\n") let prlistconstr lc = List.iter prconstr lc let prstr s = msg(str s) let prNamedConstr s c = begin msg(str ""); msg(str(s^" { ") ++ Printer.pr_lconstr c ++ str " } "); msg(str ""); end let prNamedRConstr s c = begin msg(str ""); msg(str(s^" { ") ++ Printer.pr_glob_constr c ++ str " } "); msg(str ""); end let prNamedLConstr_aux lc = List.iter (prNamedConstr "\n") lc let prNamedLConstr s lc = begin prstr "[ "; prstr s; prNamedLConstr_aux lc; prstr " ]\n"; end let prNamedLDecl s lc = begin prstr s; prstr "\n"; List.iter (fun (nm,_,tp) -> prNamedConstr (string_of_name nm) tp) lc; prstr "\n"; end let prNamedRLDecl s lc = begin prstr s; prstr "\n"; prstr "{ "; List.iter (fun x -> match x with | (nm,None,Some tp) -> prNamedRConstr (string_of_name nm) tp | (nm,Some bdy,None) -> prNamedRConstr ("(letin) "^string_of_name nm) bdy | _ -> assert false ) lc; prstr " }\n"; prstr "\n"; end let showind (id:identifier) = let cstrid = Constrintern.global_reference id in let ind1,cstrlist = Inductiveops.find_inductive (Global.env()) Evd.empty cstrid in let mib1,ib1 = Inductive.lookup_mind_specif (Global.env()) ind1 in List.iter (fun (nm, optcstr, tp) -> print_string (string_of_name nm^":"); prconstr tp; print_string "\n") ib1.mind_arity_ctxt; (match ib1.mind_arity with | Monomorphic x -> Printf.printf "arity :"; prconstr x.mind_user_arity | Polymorphic x -> Printf.printf "arity : universe?"); Array.iteri (fun i x -> Printf.printf"type constr %d :" i ; prconstr x) ib1.mind_user_lc (** {2 Misc} *) exception Found of int (* Array scanning *) let array_prfx (arr: 'a array) (pred: int -> 'a -> bool): int = try for i=0 to Array.length arr - 1 do if pred i (arr.(i)) then raise (Found i) done; Array.length arr (* all elt are positive *) with Found i -> i let array_fold_lefti (f: int -> 'a -> 'b -> 'a) (acc:'a) (arr:'b array): 'a = let i = ref 0 in Array.fold_left (fun acc x -> let res = f !i acc x in i := !i + 1; res) acc arr (* Like list_chop but except that [i] is the size of the suffix of [l]. *) let list_chop_end i l = let size_prefix = List.length l -i in if size_prefix < 0 then failwith "list_chop_end" else list_chop size_prefix l let list_fold_lefti (f: int -> 'a -> 'b -> 'a) (acc:'a) (arr:'b list): 'a = let i = ref 0 in List.fold_left (fun acc x -> let res = f !i acc x in i := !i + 1; res) acc arr let list_filteri (f: int -> 'a -> bool) (l:'a list):'a list = let i = ref 0 in List.filter (fun x -> let res = f !i x in i := !i + 1; res) l (** Iteration module *) module For = struct let rec map i j (f: int -> 'a) = if i>j then [] else f i :: (map (i+1) j f) let rec foldup i j (f: 'a -> int -> 'a) acc = if i>j then acc else let newacc = f acc i in foldup (i+1) j f newacc let rec folddown i j (f: 'a -> int -> 'a) acc = if i>j then acc else let newacc = f acc j in folddown i (j-1) f newacc let fold i j = if i Printf.sprintf "Linked %d" i | Unlinked -> Printf.sprintf "Unlinked" | Funres -> Printf.sprintf "Funres" let linkmonad f lnkvar = match lnkvar with | Linked i -> Linked (f i) | Unlinked -> Unlinked | Funres -> Funres let linklift lnkvar i = linkmonad (fun x -> x+i) lnkvar (* This map is used to deal with debruijn linked indices. *) module Link = Map.Make (struct type t = int let compare = Pervasives.compare end) let pr_links l = Printf.printf "links:\n"; Link.iter (fun k e -> Printf.printf "%d : %s\n" k (prlinked e)) l; Printf.printf "_____________\n" type 'a merged_arg = | Prm_stable of 'a | Prm_linked of 'a | Prm_arg of 'a | Arg_stable of 'a | Arg_linked of 'a | Arg_funres (** Information about graph merging of two inductives. All rel_decl list are IN REVERSE ORDER (ie well suited for compose) *) type merge_infos = { ident:identifier; (** new inductive name *) mib1: mutual_inductive_body; oib1: one_inductive_body; mib2: mutual_inductive_body; oib2: one_inductive_body; (** Array of links of the first inductive (should be all stable) *) lnk1: int merged_arg array; (** Array of links of the second inductive (point to the first ind param/args) *) lnk2: int merged_arg array; (** rec params which remain rec param (ie not linked) *) recprms1: rel_declaration list; recprms2: rel_declaration list; nrecprms1: int; nrecprms2: int; (** rec parms which became non parm (either linked to something or because after a rec parm that became non parm) *) otherprms1: rel_declaration list; otherprms2: rel_declaration list; notherprms1:int; notherprms2:int; (** args which remain args in merge *) args1:rel_declaration list; args2:rel_declaration list; nargs1:int; nargs2:int; (** functional result args *) funresprms1: rel_declaration list; funresprms2: rel_declaration list; nfunresprms1:int; nfunresprms2:int; } let pr_merginfo x = let i,s= match x with | Prm_linked i -> Some i,"Prm_linked" | Arg_linked i -> Some i,"Arg_linked" | Prm_stable i -> Some i,"Prm_stable" | Prm_arg i -> Some i,"Prm_arg" | Arg_stable i -> Some i,"Arg_stable" | Arg_funres -> None , "Arg_funres" in match i with | Some i -> Printf.sprintf "%s(%d)" s i | None -> Printf.sprintf "%s" s let isPrm_stable x = match x with Prm_stable _ -> true | _ -> false (* ?? prm_linked?? *) let isArg_stable x = match x with Arg_stable _ | Prm_arg _ -> true | _ -> false let is_stable x = match x with Arg_stable _ | Prm_stable _ | Prm_arg _ -> true | _ -> false let isArg_funres x = match x with Arg_funres -> true | _ -> false let filter_shift_stable (lnk:int merged_arg array) (l:'a list): 'a list = let prms = list_filteri (fun i _ -> isPrm_stable lnk.(i)) l in let args = list_filteri (fun i _ -> isArg_stable lnk.(i)) l in let fres = list_filteri (fun i _ -> isArg_funres lnk.(i)) l in prms@args@fres (** Reverse the link map, keeping only linked vars, elements are list of int as several vars may be linked to the same var. *) let revlinked lnk = For.fold 0 (Array.length lnk - 1) (fun acc k -> match lnk.(k) with | Unlinked | Funres -> acc | Linked i -> let old = try Link.find i acc with Not_found -> [] in Link.add i (k::old) acc) Link.empty let array_switch arr i j = let aux = arr.(j) in arr.(j) <- arr.(i); arr.(i) <- aux let filter_shift_stable_right (lnk:int merged_arg array) (l:'a list): 'a list = let larr = Array.of_list l in let _ = Array.iteri (fun j x -> match x with | Prm_linked i -> array_switch larr i j | Arg_linked i -> array_switch larr i j | Prm_stable i -> () | Prm_arg i -> () | Arg_stable i -> () | Arg_funres -> () ) lnk in filter_shift_stable lnk (Array.to_list larr) (** {1 Utilities for merging} *) let ind1name = id_of_string "__ind1" let ind2name = id_of_string "__ind2" (** Performs verifications on two graphs before merging: they must not be co-inductive, and for the moment they must not be mutual either. *) let verify_inds mib1 mib2 = if not mib1.mind_finite then error "First argument is coinductive"; if not mib2.mind_finite then error "Second argument is coinductive"; if mib1.mind_ntypes <> 1 then error "First argument is mutual"; if mib2.mind_ntypes <> 1 then error "Second argument is mutual"; () (* (** [build_raw_params prms_decl avoid] returns a list of variables attributed to the list of decl [prms_decl], avoiding names in [avoid]. *) let build_raw_params prms_decl avoid = let dummy_constr = compose_prod (List.map (fun (x,_,z) -> x,z) prms_decl) (mkRel 1) in let _ = prNamedConstr "DUMMY" dummy_constr in let dummy_glob_constr = Detyping.detype false avoid [] dummy_constr in let _ = prNamedRConstr "RAWDUMMY" dummy_glob_constr in let res,_ = glob_decompose_prod dummy_glob_constr in let comblist = List.combine prms_decl res in comblist, res , (avoid @ (Idset.elements (ids_of_glob_constr dummy_glob_constr))) *) let ids_of_rawlist avoid rawl = List.fold_left Idset.union avoid (List.map ids_of_glob_constr rawl) (** {1 Merging function graphs} *) (** [shift_linked_params mib1 mib2 lnk] Computes which parameters (rec uniform and ordinary ones) of mutual inductives [mib1] and [mib2] remain uniform when linked by [lnk]. All parameters are considered, ie we take parameters of the first inductive body of [mib1] and [mib2]. Explanation: The two inductives have parameters, some of the first are recursively uniform, some of the last are functional result of the functional graph. (I x1 x2 ... xk ... xk' ... xn) (J y1 y2 ... xl ... yl' ... ym) Problem is, if some rec unif params are linked to non rec unif ones, they become non rec (and the following too). And functinal argument have to be shifted at the end *) let shift_linked_params mib1 mib2 (lnk1:linked_var array) (lnk2:linked_var array) id = let _ = prstr "\nYOUHOU shift\n" in let linked_targets = revlinked lnk2 in let is_param_of_mib1 x = x < mib1.mind_nparams_rec in let is_param_of_mib2 x = x < mib2.mind_nparams_rec in let is_targetted_by_non_recparam_lnk1 i = try let targets = Link.find i linked_targets in List.exists (fun x -> not (is_param_of_mib2 x)) targets with Not_found -> false in let mlnk1 = Array.mapi (fun i lkv -> let isprm = is_param_of_mib1 i in let prmlost = is_targetted_by_non_recparam_lnk1 i in match isprm , prmlost, lnk1.(i) with | true , true , _ -> Prm_arg i (* recparam becoming ordinary *) | true , false , _-> Prm_stable i (* recparam remains recparam*) | false , false , Funres -> Arg_funres | _ , _ , Funres -> assert false (* fun res cannot be a rec param or lost *) | false , _ , _ -> Arg_stable i) (* Args of lnk1 are not linked *) lnk1 in let mlnk2 = Array.mapi (fun i lkv -> (* Is this correct if some param of ind2 is lost? *) let isprm = is_param_of_mib2 i in match isprm , lnk2.(i) with | true , Linked j when not (is_param_of_mib1 j) -> Prm_arg j (* recparam becoming ordinary *) | true , Linked j -> Prm_linked j (*recparam linked to recparam*) | true , Unlinked -> Prm_stable i (* recparam remains recparam*) | false , Linked j -> Arg_linked j (* Args of lnk2 lost *) | false , Unlinked -> Arg_stable i (* Args of lnk2 remains *) | false , Funres -> Arg_funres | true , Funres -> assert false (* fun res cannot be a rec param *) ) lnk2 in let oib1 = mib1.mind_packets.(0) in let oib2 = mib2.mind_packets.(0) in (* count params remaining params *) let n_params1 = array_prfx mlnk1 (fun i x -> not (isPrm_stable x)) in let n_params2 = array_prfx mlnk2 (fun i x -> not (isPrm_stable x)) in let bldprms arity_ctxt mlnk = list_fold_lefti (fun i (acc1,acc2,acc3,acc4) x -> prstr (pr_merginfo mlnk.(i));prstr "\n"; match mlnk.(i) with | Prm_stable _ -> x::acc1 , acc2 , acc3, acc4 | Prm_arg _ -> acc1 , x::acc2 , acc3, acc4 | Arg_stable _ -> acc1 , acc2 , x::acc3, acc4 | Arg_funres -> acc1 , acc2 , acc3, x::acc4 | _ -> acc1 , acc2 , acc3, acc4) ([],[],[],[]) arity_ctxt in (* let arity_ctxt2 = build_raw_params oib2.mind_arity_ctxt (Idset.elements (ids_of_glob_constr oib1.mind_arity_ctxt)) in*) let recprms1,otherprms1,args1,funresprms1 = bldprms (List.rev oib1.mind_arity_ctxt) mlnk1 in let _ = prstr "\n\n\n" in let recprms2,otherprms2,args2,funresprms2 = bldprms (List.rev oib2.mind_arity_ctxt) mlnk2 in let _ = prstr "\notherprms1:\n" in let _ = List.iter (fun (x,_,y) -> prstr (string_of_name x^" : ");prconstr y;prstr "\n") otherprms1 in let _ = prstr "\notherprms2:\n" in let _ = List.iter (fun (x,_,y) -> prstr (string_of_name x^" : ");prconstr y;prstr "\n") otherprms2 in { ident=id; mib1=mib1; oib1 = oib1; mib2=mib2; oib2 = oib2; lnk1 = mlnk1; lnk2 = mlnk2; nrecprms1 = n_params1; recprms1 = recprms1; otherprms1 = otherprms1; args1 = args1; funresprms1 = funresprms1; notherprms1 = Array.length mlnk1 - n_params1; nfunresprms1 = List.length funresprms1; nargs1 = List.length args1; nrecprms2 = n_params2; recprms2 = recprms2; otherprms2 = otherprms2; args2 = args2; funresprms2 = funresprms2; notherprms2 = Array.length mlnk2 - n_params2; nargs2 = List.length args2; nfunresprms2 = List.length funresprms2; } (** {1 Merging functions} *) exception NoMerge let rec merge_app c1 c2 id1 id2 shift filter_shift_stable = let lnk = Array.append shift.lnk1 shift.lnk2 in match c1 , c2 with | GApp(_,f1, arr1), GApp(_,f2,arr2) when isVarf id1 f1 && isVarf id2 f2 -> let _ = prstr "\nICI1!\n";Pp.flush_all() in let args = filter_shift_stable lnk (arr1 @ arr2) in GApp (dummy_loc,GVar (dummy_loc,shift.ident) , args) | GApp(_,f1, arr1), GApp(_,f2,arr2) -> raise NoMerge | GLetIn(_,nme,bdy,trm) , _ -> let _ = prstr "\nICI2!\n";Pp.flush_all() in let newtrm = merge_app trm c2 id1 id2 shift filter_shift_stable in GLetIn(dummy_loc,nme,bdy,newtrm) | _, GLetIn(_,nme,bdy,trm) -> let _ = prstr "\nICI3!\n";Pp.flush_all() in let newtrm = merge_app c1 trm id1 id2 shift filter_shift_stable in GLetIn(dummy_loc,nme,bdy,newtrm) | _ -> let _ = prstr "\nICI4!\n";Pp.flush_all() in raise NoMerge let rec merge_app_unsafe c1 c2 shift filter_shift_stable = let lnk = Array.append shift.lnk1 shift.lnk2 in match c1 , c2 with | GApp(_,f1, arr1), GApp(_,f2,arr2) -> let args = filter_shift_stable lnk (arr1 @ arr2) in GApp (dummy_loc,GVar(dummy_loc,shift.ident) , args) (* FIXME: what if the function appears in the body of the let? *) | GLetIn(_,nme,bdy,trm) , _ -> let _ = prstr "\nICI2 '!\n";Pp.flush_all() in let newtrm = merge_app_unsafe trm c2 shift filter_shift_stable in GLetIn(dummy_loc,nme,bdy,newtrm) | _, GLetIn(_,nme,bdy,trm) -> let _ = prstr "\nICI3 '!\n";Pp.flush_all() in let newtrm = merge_app_unsafe c1 trm shift filter_shift_stable in GLetIn(dummy_loc,nme,bdy,newtrm) | _ -> let _ = prstr "\nICI4 '!\n";Pp.flush_all() in raise NoMerge (* Heuristic when merging two lists of hypothesis: merge every rec calls of branch 1 with all rec calls of branch 2. *) (* TODO: reecrire cette heuristique (jusqu'a merge_types) *) let rec merge_rec_hyps shift accrec (ltyp:(Names.name * glob_constr option * glob_constr option) list) filter_shift_stable : (Names.name * glob_constr option * glob_constr option) list = let mergeonehyp t reldecl = match reldecl with | (nme,x,Some (GApp(_,i,args) as ind)) -> nme,x, Some (merge_app_unsafe ind t shift filter_shift_stable) | (nme,Some _,None) -> error "letins with recursive calls not treated yet" | (nme,None,Some _) -> assert false | (nme,None,None) | (nme,Some _,Some _) -> assert false in match ltyp with | [] -> [] | (nme,None,Some (GApp(_,f, largs) as t)) :: lt when isVarf ind2name f -> let rechyps = List.map (mergeonehyp t) accrec in rechyps @ merge_rec_hyps shift accrec lt filter_shift_stable | e::lt -> e :: merge_rec_hyps shift accrec lt filter_shift_stable let rec build_suppl_reccall (accrec:(name * glob_constr) list) concl2 shift = List.map (fun (nm,tp) -> (nm,merge_app_unsafe tp concl2 shift)) accrec let find_app (nme:identifier) ltyp = try ignore (List.map (fun x -> match x with | _,None,Some (GApp(_,f,_)) when isVarf nme f -> raise (Found 0) | _ -> ()) ltyp); false with Found _ -> true let prnt_prod_or_letin nm letbdy typ = match letbdy , typ with | Some lbdy , None -> prNamedRConstr ("(letin) " ^ string_of_name nm) lbdy | None , Some tp -> prNamedRConstr (string_of_name nm) tp | _ , _ -> assert false let rec merge_types shift accrec1 (ltyp1:(name * glob_constr option * glob_constr option) list) (concl1:glob_constr) (ltyp2:(name * glob_constr option * glob_constr option) list) concl2 : (name * glob_constr option * glob_constr option) list * glob_constr = let _ = prstr "MERGE_TYPES\n" in let _ = prstr "ltyp 1 : " in let _ = List.iter (fun (nm,lbdy,tp) -> prnt_prod_or_letin nm lbdy tp) ltyp1 in let _ = prstr "\nltyp 2 : " in let _ = List.iter (fun (nm,lbdy,tp) -> prnt_prod_or_letin nm lbdy tp) ltyp2 in let _ = prstr "\n" in let res = match ltyp1 with | [] -> let isrec1 = (accrec1<>[]) in let isrec2 = find_app ind2name ltyp2 in let rechyps = if isrec1 && isrec2 then (* merge_rec_hyps shift accrec1 ltyp2 filter_shift_stable *) merge_rec_hyps shift [name_of_string "concl1",None,Some concl1] ltyp2 filter_shift_stable_right @ merge_rec_hyps shift accrec1 [name_of_string "concl2",None, Some concl2] filter_shift_stable else if isrec1 (* if rec calls in accrec1 and not in ltyp2, add one to ltyp2 *) then merge_rec_hyps shift accrec1 (ltyp2@[name_of_string "concl2",None,Some concl2]) filter_shift_stable else if isrec2 then merge_rec_hyps shift [name_of_string "concl1",None,Some concl1] ltyp2 filter_shift_stable_right else ltyp2 in let _ = prstr"\nrechyps : " in let _ = List.iter(fun (nm,lbdy,tp)-> prnt_prod_or_letin nm lbdy tp) rechyps in let _ = prstr "MERGE CONCL : " in let _ = prNamedRConstr "concl1" concl1 in let _ = prstr " with " in let _ = prNamedRConstr "concl2" concl2 in let _ = prstr "\n" in let concl = merge_app concl1 concl2 ind1name ind2name shift filter_shift_stable in let _ = prstr "FIN " in let _ = prNamedRConstr "concl" concl in let _ = prstr "\n" in rechyps , concl | (nme,None, Some t1)as e ::lt1 -> (match t1 with | GApp(_,f,carr) when isVarf ind1name f -> merge_types shift (e::accrec1) lt1 concl1 ltyp2 concl2 | _ -> let recres, recconcl2 = merge_types shift accrec1 lt1 concl1 ltyp2 concl2 in ((nme,None,Some t1) :: recres) , recconcl2) | (nme,Some bd, None) ::lt1 -> (* FIXME: what if ind1name appears in bd? *) let recres, recconcl2 = merge_types shift accrec1 lt1 concl1 ltyp2 concl2 in ((nme,Some bd,None) :: recres) , recconcl2 | (_,None,None)::_ | (_,Some _,Some _)::_ -> assert false in res (** [build_link_map_aux allargs1 allargs2 shift] returns the mapping of linked args [allargs2] to target args of [allargs1] as specified in [shift]. [allargs1] and [allargs2] are in reverse order. Also returns the list of unlinked vars of [allargs2]. *) let build_link_map_aux (allargs1:identifier array) (allargs2:identifier array) (lnk:int merged_arg array) = array_fold_lefti (fun i acc e -> if i = Array.length lnk - 1 then acc (* functional arg, not in allargs *) else match e with | Prm_linked j | Arg_linked j -> Idmap.add allargs2.(i) allargs1.(j) acc | _ -> acc) Idmap.empty lnk let build_link_map allargs1 allargs2 lnk = let allargs1 = Array.of_list (List.rev (List.map (fun (x,_,_) -> id_of_name x) allargs1)) in let allargs2 = Array.of_list (List.rev (List.map (fun (x,_,_) -> id_of_name x) allargs2)) in build_link_map_aux allargs1 allargs2 lnk (** [merge_one_constructor lnk shift typcstr1 typcstr2] merges the two constructor rawtypes [typcstr1] and [typcstr2]. [typcstr1] and [typcstr2] contain all parameters (including rec. unif. ones) of their inductive. if [typcstr1] and [typcstr2] are of the form: forall recparams1, forall ordparams1, H1a -> H2a... (I1 x1 y1 ... z1) forall recparams2, forall ordparams2, H2b -> H2b... (I2 x2 y2 ... z2) we build: forall recparams1 (recparams2 without linked params), forall ordparams1 (ordparams2 without linked params), H1a' -> H2a' -> ... -> H2a' -> H2b'(shifted) -> ... -> (newI x1 ... z1 x2 y2 ...z2 without linked params) where Hix' have been adapted, ie: - linked vars have been changed, - rec calls to I1 and I2 have been replaced by rec calls to newI. More precisely calls to I1 and I2 have been merge by an experimental heuristic (in particular if n o rec calls for I1 or I2 is found, we use the conclusion as a rec call). See [merge_types] above. Precond: vars sets of [typcstr1] and [typcstr2] must be disjoint. TODO: return nothing if equalities (after linking) are contradictory. *) let merge_one_constructor (shift:merge_infos) (typcstr1:glob_constr) (typcstr2:glob_constr) : glob_constr = (* FIXME: les noms des parametres corerspondent en principe au parametres du niveau mib, mais il faudrait s'en assurer *) (* shift.nfunresprmsx last args are functional result *) let nargs1 = shift.mib1.mind_nparams + shift.oib1.mind_nrealargs - shift.nfunresprms1 in let nargs2 = shift.mib2.mind_nparams + shift.oib2.mind_nrealargs - shift.nfunresprms2 in let allargs1,rest1 = glob_decompose_prod_or_letin_n nargs1 typcstr1 in let allargs2,rest2 = glob_decompose_prod_or_letin_n nargs2 typcstr2 in (* Build map of linked args of [typcstr2], and apply it to [typcstr2]. *) let linked_map = build_link_map allargs1 allargs2 shift.lnk2 in let rest2 = change_vars linked_map rest2 in let hyps1,concl1 = glob_decompose_prod_or_letin rest1 in let hyps2,concl2' = glob_decompose_prod_or_letin rest2 in let ltyp,concl2 = merge_types shift [] (List.rev hyps1) concl1 (List.rev hyps2) concl2' in let _ = prNamedRLDecl "ltyp result:" ltyp in let typ = glob_compose_prod_or_letin concl2 (List.rev ltyp) in let revargs1 = list_filteri (fun i _ -> isArg_stable shift.lnk1.(i)) (List.rev allargs1) in let _ = prNamedRLDecl "ltyp allargs1" allargs1 in let _ = prNamedRLDecl "ltyp revargs1" revargs1 in let revargs2 = list_filteri (fun i _ -> isArg_stable shift.lnk2.(i)) (List.rev allargs2) in let _ = prNamedRLDecl "ltyp allargs2" allargs2 in let _ = prNamedRLDecl "ltyp revargs2" revargs2 in let typwithprms = glob_compose_prod_or_letin typ (List.rev revargs2 @ List.rev revargs1) in typwithprms (** constructor numbering *) let fresh_cstror_suffix , cstror_suffix_init = let cstror_num = ref 0 in (fun () -> let res = string_of_int !cstror_num in cstror_num := !cstror_num + 1; res) , (fun () -> cstror_num := 0) (** [merge_constructor_id id1 id2 shift] returns the identifier of the new constructor from the id of the two merged constructor and the merging info. *) let merge_constructor_id id1 id2 shift:identifier = let id = string_of_id shift.ident ^ "_" ^ fresh_cstror_suffix () in next_ident_fresh (id_of_string id) (** [merge_constructors lnk shift avoid] merges the two list of constructor [(name*type)]. These are translated to glob_constr first, each of them having distinct var names. *) let rec merge_constructors (shift:merge_infos) (avoid:Idset.t) (typcstr1:(identifier * glob_constr) list) (typcstr2:(identifier * glob_constr) list) : (identifier * glob_constr) list = List.flatten (List.map (fun (id1,rawtyp1) -> List.map (fun (id2,rawtyp2) -> let typ = merge_one_constructor shift rawtyp1 rawtyp2 in let newcstror_id = merge_constructor_id id1 id2 shift in let _ = prstr "\n**************\n" in newcstror_id , typ) typcstr2) typcstr1) (** [merge_inductive_body lnk shift avoid oib1 oib2] merges two inductive bodies [oib1] and [oib2], linking with [lnk], params info in [shift], avoiding identifiers in [avoid]. *) let rec merge_inductive_body (shift:merge_infos) avoid (oib1:one_inductive_body) (oib2:one_inductive_body) = (* building glob_constr type of constructors *) let mkrawcor nme avoid typ = (* first replace rel 1 by a varname *) let substindtyp = substitterm 0 (mkRel 1) (mkVar nme) typ in Detyping.detype false (Idset.elements avoid) [] substindtyp in let lcstr1: glob_constr list = Array.to_list (Array.map (mkrawcor ind1name avoid) oib1.mind_user_lc) in (* add to avoid all indentifiers of lcstr1 *) let avoid2 = Idset.union avoid (ids_of_rawlist avoid lcstr1) in let lcstr2 = Array.to_list (Array.map (mkrawcor ind2name avoid2) oib2.mind_user_lc) in let avoid3 = Idset.union avoid (ids_of_rawlist avoid lcstr2) in let params1 = try fst (glob_decompose_prod_n shift.nrecprms1 (List.hd lcstr1)) with e when Errors.noncritical e -> [] in let params2 = try fst (glob_decompose_prod_n shift.nrecprms2 (List.hd lcstr2)) with e when Errors.noncritical e -> [] in let lcstr1 = List.combine (Array.to_list oib1.mind_consnames) lcstr1 in let lcstr2 = List.combine (Array.to_list oib2.mind_consnames) lcstr2 in cstror_suffix_init(); params1,params2,merge_constructors shift avoid3 lcstr1 lcstr2 (** [merge_mutual_inductive_body lnk mib1 mib2 shift] merge mutual inductive bodies [mib1] and [mib2] linking vars with [lnk]. [shift] information on parameters of the new inductive. For the moment, inductives are supposed to be non mutual. *) let rec merge_mutual_inductive_body (mib1:mutual_inductive_body) (mib2:mutual_inductive_body) (shift:merge_infos) = (* Mutual not treated, we take first ind body of each. *) merge_inductive_body shift Idset.empty mib1.mind_packets.(0) mib2.mind_packets.(0) let glob_constr_to_constr_expr x = (* build a constr_expr from a glob_constr *) Flags.with_option Flags.raw_print (Constrextern.extern_glob_type Idset.empty) x let merge_rec_params_and_arity prms1 prms2 shift (concl:constr) = let params = prms2 @ prms1 in let resparams = List.fold_left (fun acc (nme,tp) -> let _ = prstr "param :" in let _ = prNamedRConstr (string_of_name nme) tp in let _ = prstr " ; " in let typ = glob_constr_to_constr_expr tp in LocalRawAssum ([(dummy_loc,nme)], Topconstr.default_binder_kind, typ) :: acc) [] params in let concl = Constrextern.extern_constr false (Global.env()) concl in let arity,_ = List.fold_left (fun (acc,env) (nm,_,c) -> let typ = Constrextern.extern_constr false env c in let newenv = Environ.push_rel (nm,None,c) env in CProdN (dummy_loc, [[(dummy_loc,nm)],Topconstr.default_binder_kind,typ] , acc) , newenv) (concl,Global.env()) (shift.funresprms2 @ shift.funresprms1 @ shift.args2 @ shift.args1 @ shift.otherprms2 @ shift.otherprms1) in resparams,arity (** [glob_constr_list_to_inductive_expr ident rawlist] returns the induct_expr corresponding to the the list of constructor types [rawlist], named ident. FIXME: params et cstr_expr (arity) *) let glob_constr_list_to_inductive_expr prms1 prms2 mib1 mib2 shift (rawlist:(identifier * glob_constr) list) = let lident = dummy_loc, shift.ident in let bindlist , cstr_expr = (* params , arities *) merge_rec_params_and_arity prms1 prms2 shift mkSet in let lcstor_expr : (bool * (lident * constr_expr)) list = List.map (* zeta_normalize t ? *) (fun (id,t) -> false, ((dummy_loc,id),glob_constr_to_constr_expr t)) rawlist in lident , bindlist , Some cstr_expr , lcstor_expr let mkProd_reldecl (rdecl:rel_declaration) (t2:glob_constr) = match rdecl with | (nme,None,t) -> let traw = Detyping.detype false [] [] t in GProd (dummy_loc,nme,Explicit,traw,t2) | (_,Some _,_) -> assert false let mkProd_reldecl (rdecl:rel_declaration) (t2:glob_constr) = match rdecl with | (nme,None,t) -> let traw = Detyping.detype false [] [] t in GProd (dummy_loc,nme,Explicit,traw,t2) | (_,Some _,_) -> assert false (** [merge_inductive ind1 ind2 lnk] merges two graphs, linking variables specified in [lnk]. Graphs are not supposed to be mutual inductives for the moment. *) let merge_inductive (ind1: inductive) (ind2: inductive) (lnk1: linked_var array) (lnk2: linked_var array) id = let env = Global.env() in let mib1,_ = Inductive.lookup_mind_specif env ind1 in let mib2,_ = Inductive.lookup_mind_specif env ind2 in let _ = verify_inds mib1 mib2 in (* raises an exception if something wrong *) (* compute params that become ordinary args (because linked to ord. args) *) let shift_prm = shift_linked_params mib1 mib2 lnk1 lnk2 id in let prms1,prms2, rawlist = merge_mutual_inductive_body mib1 mib2 shift_prm in let _ = prstr "\nrawlist : " in let _ = List.iter (fun (nm,tp) -> prNamedRConstr (string_of_id nm) tp;prstr "\n") rawlist in let _ = prstr "\nend rawlist\n" in (* FIX: retransformer en constr ici let shift_prm = { shift_prm with recprms1=prms1; recprms1=prms1; } in *) let indexpr = glob_constr_list_to_inductive_expr prms1 prms2 mib1 mib2 shift_prm rawlist in (* Declare inductive *) let indl,_,_ = Command.extract_mutual_inductive_declaration_components [(indexpr,[])] in let mie,impls = Command.interp_mutual_inductive indl [] true (* means: not coinductive *) in (* Declare the mutual inductive block with its associated schemes *) ignore (Command.declare_mutual_inductive_with_eliminations Declare.UserVerbose mie impls) (* Find infos on identifier id. *) let find_Function_infos_safe (id:identifier): Indfun_common.function_info = let kn_of_id x = let f_ref = Libnames.Ident (dummy_loc,x) in locate_with_msg (str "Don't know what to do with " ++ Libnames.pr_reference f_ref) locate_constant f_ref in try find_Function_infos (kn_of_id id) with Not_found -> errorlabstrm "indfun" (Nameops.pr_id id ++ str " has no functional scheme") (** [merge id1 id2 args1 args2 id] builds and declares a new inductive type called [id], representing the merged graphs of both graphs [ind1] and [ind2]. identifiers occuring in both arrays [args1] and [args2] are considered linked (i.e. are the same variable) in the new graph. Warning: For the moment, repetitions of an id in [args1] or [args2] are not supported. *) let merge (id1:identifier) (id2:identifier) (args1:identifier array) (args2:identifier array) id : unit = let finfo1 = find_Function_infos_safe id1 in let finfo2 = find_Function_infos_safe id2 in (* FIXME? args1 are supposed unlinked. mergescheme (G x x) ?? *) (* We add one arg (functional arg of the graph) *) let lnk1 = Array.make (Array.length args1 + 1) Unlinked in let lnk2' = (* args2 may be linked to args1 members. FIXME: same as above: vars may be linked inside args2?? *) Array.mapi (fun i c -> match array_find_i (fun i x -> x=c) args1 with | Some j -> Linked j | None -> Unlinked) args2 in (* We add one arg (functional arg of the graph) *) let lnk2 = Array.append lnk2' (Array.make 1 Unlinked) in (* setting functional results *) let _ = lnk1.(Array.length lnk1 - 1) <- Funres in let _ = lnk2.(Array.length lnk2 - 1) <- Funres in merge_inductive finfo1.graph_ind finfo2.graph_ind lnk1 lnk2 id let remove_last_arg c = let (x,y) = decompose_prod c in let xnolast = List.rev (List.tl (List.rev x)) in compose_prod xnolast y let rec remove_n_fst_list n l = if n=0 then l else remove_n_fst_list (n-1) (List.tl l) let remove_n_last_list n l = List.rev (remove_n_fst_list n (List.rev l)) let remove_last_n_arg n c = let (x,y) = decompose_prod c in let xnolast = remove_n_last_list n x in compose_prod xnolast y (* [funify_branches relinfo nfuns branch] returns the branch [branch] of the relinfo [relinfo] modified to fit in a functional principle. Things to do: - remove indargs from rel applications - replace *variables only* corresponding to function (recursive) results by the actual function application. *) let funify_branches relinfo nfuns branch = let mut_induct, induct = match relinfo.indref with | None -> assert false | Some (IndRef ((mutual_ind,i) as ind)) -> mutual_ind,ind | _ -> assert false in let is_dom c = match kind_of_term c with | Ind((u,_)) | Construct((u,_),_) -> u = mut_induct | _ -> false in let _dom_i c = assert (is_dom c); match kind_of_term c with | Ind((u,i)) | Construct((u,_),i) -> i | _ -> assert false in let _is_pred c shift = match kind_of_term c with | Rel i -> let reali = i-shift in (reali>=0 && reali false in (* FIXME: *) (Anonymous,Some mkProp,mkProp) let relprinctype_to_funprinctype relprinctype nfuns = let relinfo = compute_elim_sig relprinctype in assert (not relinfo.farg_in_concl); assert (relinfo.indarg_in_concl); (* first remove indarg and indarg_in_concl *) let relinfo_noindarg = { relinfo with indarg_in_concl = false; indarg = None; concl = remove_last_arg (pop relinfo.concl); } in (* the nfuns last induction arguments are functional ones: remove them *) let relinfo_argsok = { relinfo_noindarg with nargs = relinfo_noindarg.nargs - nfuns; (* args is in reverse order, so remove fst *) args = remove_n_fst_list nfuns relinfo_noindarg.args; concl = popn nfuns relinfo_noindarg.concl } in let new_branches = List.map (funify_branches relinfo_argsok nfuns) relinfo_argsok.branches in let relinfo_branches = { relinfo_argsok with branches = new_branches } in relinfo_branches (* @article{ bundy93rippling, author = "Alan Bundy and Andrew Stevens and Frank van Harmelen and Andrew Ireland and Alan Smaill", title = "Rippling: A Heuristic for Guiding Inductive Proofs", journal = "Artificial Intelligence", volume = "62", number = "2", pages = "185-253", year = "1993", url = "citeseer.ist.psu.edu/bundy93rippling.html" } *) coq-8.4pl2/plugins/funind/indfun_common.mli0000640000175000001440000000620711504715034020136 0ustar notinusersopen Names open Pp (* The mk_?_id function build different name w.r.t. a function Each of their use is justified in the code *) val mk_rel_id : identifier -> identifier val mk_correct_id : identifier -> identifier val mk_complete_id : identifier -> identifier val mk_equation_id : identifier -> identifier val msgnl : std_ppcmds -> unit val invalid_argument : string -> 'a val fresh_id : identifier list -> string -> identifier val fresh_name : identifier list -> string -> name val get_name : identifier list -> ?default:string -> name -> name val array_get_start : 'a array -> 'a array val id_of_name : name -> identifier val locate_ind : Libnames.reference -> inductive val locate_constant : Libnames.reference -> constant val locate_with_msg : Pp.std_ppcmds -> (Libnames.reference -> 'a) -> Libnames.reference -> 'a val filter_map : ('a -> bool) -> ('a -> 'b) -> 'a list -> 'b list val list_union_eq : ('a -> 'a -> bool) -> 'a list -> 'a list -> 'a list val list_add_set_eq : ('a -> 'a -> bool) -> 'a -> 'a list -> 'a list val chop_rlambda_n : int -> Glob_term.glob_constr -> (name*Glob_term.glob_constr*bool) list * Glob_term.glob_constr val chop_rprod_n : int -> Glob_term.glob_constr -> (name*Glob_term.glob_constr) list * Glob_term.glob_constr val def_of_const : Term.constr -> Term.constr val eq : Term.constr Lazy.t val refl_equal : Term.constr Lazy.t val const_of_id: identifier -> constant val jmeq : unit -> Term.constr val jmeq_refl : unit -> Term.constr (* [save_named] is a copy of [Command.save_named] but uses [nf_betaiotazeta] instead of [nf_betaiotaevar_preserving_vm_cast] *) val new_save_named : bool -> unit val save : bool -> identifier -> Entries.definition_entry -> Decl_kinds.goal_kind -> Tacexpr.declaration_hook -> unit (* [get_proof_clean do_reduce] : returns the proof name, definition, kind and hook and abort the proof *) val get_proof_clean : bool -> Names.identifier * (Entries.definition_entry * Decl_kinds.goal_kind * Tacexpr.declaration_hook) (* [with_full_print f a] applies [f] to [a] in full printing environment This function preserves the print settings *) val with_full_print : ('a -> 'b) -> 'a -> 'b (*****************) type function_info = { function_constant : constant; graph_ind : inductive; equation_lemma : constant option; correctness_lemma : constant option; completeness_lemma : constant option; rect_lemma : constant option; rec_lemma : constant option; prop_lemma : constant option; is_general : bool; } val find_Function_infos : constant -> function_info val find_Function_of_graph : inductive -> function_info (* WARNING: To be used just after the graph definition !!! *) val add_Function : bool -> constant -> unit val update_Function : function_info -> unit (** debugging *) val pr_info : function_info -> Pp.std_ppcmds val pr_table : unit -> Pp.std_ppcmds (* val function_debug : bool ref *) val do_observe : unit -> bool val do_rewrite_dependent : unit -> bool (* To localize pb *) exception Building_graph of exn exception Defining_principle of exn exception ToShow of exn val is_strict_tcc : unit -> bool coq-8.4pl2/plugins/funind/invfun.ml0000640000175000001440000010603612121620060016426 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* hov 1 (Ppconstr.pr_id id ++ str " := " ++ Pp.cut () ++ prc c) | loc, Glob_term.AnonHyp n, c -> hov 1 (int n ++ str " := " ++ Pp.cut () ++ prc c) let pr_bindings prc prlc = function | Glob_term.ImplicitBindings l -> brk (1,1) ++ str "with" ++ brk (1,1) ++ Util.prlist_with_sep spc prc l | Glob_term.ExplicitBindings l -> brk (1,1) ++ str "with" ++ brk (1,1) ++ Util.prlist_with_sep spc (fun b -> str"(" ++ pr_binding prlc b ++ str")") l | Glob_term.NoBindings -> mt () let pr_with_bindings prc prlc (c,bl) = prc c ++ hv 0 (pr_bindings prc prlc bl) let pr_constr_with_binding prc (c,bl) : Pp.std_ppcmds = pr_with_bindings prc prc (c,bl) (* The local debuging mechanism *) let msgnl = Pp.msgnl let observe strm = if do_observe () then Pp.msgnl strm else () let observennl strm = if do_observe () then begin Pp.msg strm;Pp.pp_flush () end else () let do_observe_tac s tac g = let goal = try Printer.pr_goal g with e when Errors.noncritical e -> assert false in try let v = tac g in msgnl (goal ++ fnl () ++ s ++(str " ")++(str "finished")); v with reraise -> let e' = Cerrors.process_vernac_interp_error reraise in msgnl (str "observation "++ s++str " raised exception " ++ Errors.print e' ++ str " on goal " ++ goal ); raise reraise;; let observe_tac s tac g = if do_observe () then do_observe_tac (str s) tac g else tac g (* [nf_zeta] $\zeta$-normalization of a term *) let nf_zeta = Reductionops.clos_norm_flags (Closure.RedFlags.mkflags [Closure.RedFlags.fZETA]) Environ.empty_env Evd.empty (* [id_to_constr id] finds the term associated to [id] in the global environment *) let id_to_constr id = try Constrintern.global_reference id with Not_found -> raise (UserError ("",str "Cannot find " ++ Ppconstr.pr_id id)) (* [generate_type g_to_f f graph i] build the completeness (resp. correctness) lemma type if [g_to_f = true] (resp. g_to_f = false) where [graph] is the graph of [f] and is the [i]th function in the block. [generate_type true f i] returns \[\forall (x_1:t_1)\ldots(x_n:t_n), let fv := f x_1\ldots x_n in, forall res, graph\ x_1\ldots x_n\ res \rightarrow res = fv \] decomposed as the context and the conclusion [generate_type false f i] returns \[\forall (x_1:t_1)\ldots(x_n:t_n), let fv := f x_1\ldots x_n in, forall res, res = fv \rightarrow graph\ x_1\ldots x_n\ res\] decomposed as the context and the conclusion *) let generate_type g_to_f f graph i = (*i we deduce the number of arguments of the function and its returned type from the graph i*) let graph_arity = Inductive.type_of_inductive (Global.env()) (Global.lookup_inductive (destInd graph)) in let ctxt,_ = decompose_prod_assum graph_arity in let fun_ctxt,res_type = match ctxt with | [] | [_] -> anomaly "Not a valid context" | (_,_,res_type)::fun_ctxt -> fun_ctxt,res_type in let nb_args = List.length fun_ctxt in let args_from_decl i decl = match decl with | (_,Some _,_) -> incr i; failwith "args_from_decl" | _ -> let j = !i in incr i;mkRel (nb_args - j + 1) in (*i We need to name the vars [res] and [fv] i*) let res_id = Namegen.next_ident_away_in_goal (id_of_string "res") (map_succeed (function (Name id,_,_) -> id | (Anonymous,_,_) -> failwith "") fun_ctxt) in let fv_id = Namegen.next_ident_away_in_goal (id_of_string "fv") (res_id::(map_succeed (function (Name id,_,_) -> id | (Anonymous,_,_) -> failwith "Anonymous!") fun_ctxt)) in (*i we can then type the argument to be applied to the function [f] i*) let args_as_rels = let i = ref 0 in Array.of_list ((map_succeed (args_from_decl i) (List.rev fun_ctxt))) in let args_as_rels = Array.map Termops.pop args_as_rels in (*i the hypothesis [res = fv] can then be computed We will need to lift it by one in order to use it as a conclusion i*) let res_eq_f_of_args = mkApp(Coqlib.build_coq_eq (),[|lift 2 res_type;mkRel 1;mkRel 2|]) in (*i The hypothesis [graph\ x_1\ldots x_n\ res] can then be computed We will need to lift it by one in order to use it as a conclusion i*) let graph_applied = let args_and_res_as_rels = let i = ref 0 in Array.of_list ((map_succeed (args_from_decl i) (List.rev ((Name res_id,None,res_type)::fun_ctxt))) ) in let args_and_res_as_rels = Array.mapi (fun i c -> if i <> Array.length args_and_res_as_rels - 1 then lift 1 c else c) args_and_res_as_rels in mkApp(graph,args_and_res_as_rels) in (*i The [pre_context] is the defined to be the context corresponding to \[\forall (x_1:t_1)\ldots(x_n:t_n), let fv := f x_1\ldots x_n in, forall res, \] i*) let pre_ctxt = (Name res_id,None,lift 1 res_type)::(Name fv_id,Some (mkApp(mkConst f,args_as_rels)),res_type)::fun_ctxt in (*i and we can return the solution depending on which lemma type we are defining i*) if g_to_f then (Anonymous,None,graph_applied)::pre_ctxt,(lift 1 res_eq_f_of_args) else (Anonymous,None,res_eq_f_of_args)::pre_ctxt,(lift 1 graph_applied) (* [find_induction_principle f] searches and returns the [body] and the [type] of [f_rect] WARNING: while convertible, [type_of body] and [type] can be non equal *) let find_induction_principle f = let f_as_constant = match kind_of_term f with | Const c' -> c' | _ -> error "Must be used with a function" in let infos = find_Function_infos f_as_constant in match infos.rect_lemma with | None -> raise Not_found | Some rect_lemma -> let rect_lemma = mkConst rect_lemma in let typ = Typing.type_of (Global.env ()) Evd.empty rect_lemma in rect_lemma,typ (* let fname = *) (* match kind_of_term f with *) (* | Const c' -> *) (* id_of_label (con_label c') *) (* | _ -> error "Must be used with a function" *) (* in *) (* let princ_name = *) (* ( *) (* Indrec.make_elimination_ident *) (* fname *) (* InType *) (* ) *) (* in *) (* let c = (\* mkConst(mk_from_const (destConst f) princ_name ) in *\) failwith "" in *) (* c,Typing.type_of (Global.env ()) Evd.empty c *) let rec generate_fresh_id x avoid i = if i == 0 then [] else let id = Namegen.next_ident_away_in_goal x avoid in id::(generate_fresh_id x (id::avoid) (pred i)) (* [prove_fun_correct functional_induction funs_constr graphs_constr schemes lemmas_types_infos i ] is the tactic used to prove correctness lemma. [functional_induction] is the tactic defined in [indfun] (dependency problem) [funs_constr], [graphs_constr] [schemes] [lemmas_types_infos] are the mutually recursive functions (resp. graphs of the functions and principles and correctness lemma types) to prove correct. [i] is the indice of the function to prove correct The lemma to prove if suppose to have been generated by [generate_type] (in $\zeta$ normal form that is it looks like~: [\forall (x_1:t_1)\ldots(x_n:t_n), forall res, res = f x_1\ldots x_n in, \rightarrow graph\ x_1\ldots x_n\ res] The sketch of the proof is the following one~: \begin{enumerate} \item intros until $x_n$ \item $functional\ induction\ (f.(i)\ x_1\ldots x_n)$ using schemes.(i) \item for each generated branch intro [res] and [hres :res = f x_1\ldots x_n], rewrite [hres] and the apply the corresponding constructor of the corresponding graph inductive. \end{enumerate} *) let prove_fun_correct functional_induction funs_constr graphs_constr schemes lemmas_types_infos i : tactic = fun g -> (* first of all we recreate the lemmas types to be used as predicates of the induction principle that is~: \[fun (x_1:t_1)\ldots(x_n:t_n)=> fun fv => fun res => res = fv \rightarrow graph\ x_1\ldots x_n\ res\] *) let lemmas = Array.map (fun (_,(ctxt,concl)) -> match ctxt with | [] | [_] | [_;_] -> anomaly "bad context" | hres::res::(x,_,t)::ctxt -> Termops.it_mkLambda_or_LetIn (Termops.it_mkProd_or_LetIn concl [hres;res]) ((x,None,t)::ctxt) ) lemmas_types_infos in (* we the get the definition of the graphs block *) let graph_ind = destInd graphs_constr.(i) in let kn = fst graph_ind in let mib,_ = Global.lookup_inductive graph_ind in (* and the principle to use in this lemma in $\zeta$ normal form *) let f_principle,princ_type = schemes.(i) in let princ_type = nf_zeta princ_type in let princ_infos = Tactics.compute_elim_sig princ_type in (* The number of args of the function is then easilly computable *) let nb_fun_args = nb_prod (pf_concl g) - 2 in let args_names = generate_fresh_id (id_of_string "x") [] nb_fun_args in let ids = args_names@(pf_ids_of_hyps g) in (* Since we cannot ensure that the funcitonnal principle is defined in the environement and due to the bug #1174, we will need to pose the principle using a name *) let principle_id = Namegen.next_ident_away_in_goal (id_of_string "princ") ids in let ids = principle_id :: ids in (* We get the branches of the principle *) let branches = List.rev princ_infos.branches in (* and built the intro pattern for each of them *) let intro_pats = List.map (fun (_,_,br_type) -> List.map (fun id -> dummy_loc, Genarg.IntroIdentifier id) (generate_fresh_id (id_of_string "y") ids (List.length (fst (decompose_prod_assum br_type)))) ) branches in (* before building the full intro pattern for the principle *) let pat = Some (dummy_loc,Genarg.IntroOrAndPattern intro_pats) in let eq_ind = Coqlib.build_coq_eq () in let eq_construct = mkConstruct((destInd eq_ind),1) in (* The next to referencies will be used to find out which constructor to apply in each branch *) let ind_number = ref 0 and min_constr_number = ref 0 in (* The tactic to prove the ith branch of the principle *) let prove_branche i g = (* We get the identifiers of this branch *) let this_branche_ids = List.fold_right (fun (_,pat) acc -> match pat with | Genarg.IntroIdentifier id -> Idset.add id acc | _ -> anomaly "Not an identifier" ) (List.nth intro_pats (pred i)) Idset.empty in (* and get the real args of the branch by unfolding the defined constant *) let pre_args,pre_tac = List.fold_right (fun (id,b,t) (pre_args,pre_tac) -> if Idset.mem id this_branche_ids then match b with | None -> (id::pre_args,pre_tac) | Some b -> (pre_args, tclTHEN (h_reduce (Glob_term.Unfold([Glob_term.all_occurrences_expr,EvalVarRef id])) allHyps) pre_tac ) else (pre_args,pre_tac) ) (pf_hyps g) ([],tclIDTAC) in (* We can then recompute the arguments of the constructor. For each [hid] introduced by this branch, if [hid] has type $forall res, res=fv -> graph.(j)\ x_1\ x_n res$ the corresponding arguments of the constructor are [ fv (hid fv (refl_equal fv)) ]. If [hid] has another type the corresponding argument of the constructor is [hid] *) let constructor_args = List.fold_right (fun hid acc -> let type_of_hid = pf_type_of g (mkVar hid) in match kind_of_term type_of_hid with | Prod(_,_,t') -> begin match kind_of_term t' with | Prod(_,t'',t''') -> begin match kind_of_term t'',kind_of_term t''' with | App(eq,args), App(graph',_) when (eq_constr eq eq_ind) && array_exists (eq_constr graph') graphs_constr -> ((mkApp(mkVar hid,[|args.(2);(mkApp(eq_construct,[|args.(0);args.(2)|]))|])) ::args.(2)::acc) | _ -> mkVar hid :: acc end | _ -> mkVar hid :: acc end | _ -> mkVar hid :: acc ) pre_args [] in (* in fact we must also add the parameters to the constructor args *) let constructor_args = let params_id = fst (list_chop princ_infos.nparams args_names) in (List.map mkVar params_id)@(List.rev constructor_args) in (* We then get the constructor corresponding to this branch and modifies the references has needed i.e. if the constructor is the last one of the current inductive then add one the number of the inductive to take and add the number of constructor of the previous graph to the minimal constructor number *) let constructor = let constructor_num = i - !min_constr_number in let length = Array.length (mib.Declarations.mind_packets.(!ind_number).Declarations.mind_consnames) in if constructor_num <= length then begin (kn,!ind_number),constructor_num end else begin incr ind_number; min_constr_number := !min_constr_number + length ; (kn,!ind_number),1 end in (* we can then build the final proof term *) let app_constructor = applist((mkConstruct(constructor)),constructor_args) in (* an apply the tactic *) let res,hres = match generate_fresh_id (id_of_string "z") (ids(* @this_branche_ids *)) 2 with | [res;hres] -> res,hres | _ -> assert false in observe (str "constructor := " ++ Printer.pr_lconstr_env (pf_env g) app_constructor); ( tclTHENSEQ [ (* unfolding of all the defined variables introduced by this branch *) observe_tac "unfolding" pre_tac; (* $zeta$ normalizing of the conclusion *) h_reduce (Glob_term.Cbv { Glob_term.all_flags with Glob_term.rDelta = false ; Glob_term.rConst = [] } ) onConcl; (* introducing the the result of the graph and the equality hypothesis *) observe_tac "introducing" (tclMAP h_intro [res;hres]); (* replacing [res] with its value *) observe_tac "rewriting res value" (Equality.rewriteLR (mkVar hres)); (* Conclusion *) observe_tac "exact" (h_exact app_constructor) ] ) g in (* end of branche proof *) let param_names = fst (list_chop princ_infos.nparams args_names) in let params = List.map mkVar param_names in let lemmas = Array.to_list (Array.map (fun c -> applist(c,params)) lemmas) in (* The bindings of the principle that is the params of the principle and the different lemma types *) let bindings = let params_bindings,avoid = List.fold_left2 (fun (bindings,avoid) (x,_,_) p -> let id = Namegen.next_ident_away (Nameops.out_name x) avoid in (dummy_loc,Glob_term.NamedHyp id,p)::bindings,id::avoid ) ([],pf_ids_of_hyps g) princ_infos.params (List.rev params) in let lemmas_bindings = List.rev (fst (List.fold_left2 (fun (bindings,avoid) (x,_,_) p -> let id = Namegen.next_ident_away (Nameops.out_name x) avoid in (dummy_loc,Glob_term.NamedHyp id,(nf_zeta p))::bindings,id::avoid) ([],avoid) princ_infos.predicates (lemmas))) in Glob_term.ExplicitBindings (params_bindings@lemmas_bindings) in tclTHENSEQ [ observe_tac "intro args_names" (tclMAP h_intro args_names); observe_tac "principle" (assert_by (Name principle_id) princ_type (h_exact f_principle)); tclTHEN_i (observe_tac "functional_induction" ( fun g -> observe (pr_constr_with_binding (Printer.pr_lconstr_env (pf_env g)) (mkVar principle_id,bindings)); functional_induction false (applist(funs_constr.(i),List.map mkVar args_names)) (Some (mkVar principle_id,bindings)) pat g )) (fun i g -> observe_tac ("proving branche "^string_of_int i) (prove_branche i) g ) ] g (* [generalize_dependent_of x hyp g] generalize every hypothesis which depends of [x] but [hyp] *) let generalize_dependent_of x hyp g = tclMAP (function | (id,None,t) when not (id = hyp) && (Termops.occur_var (pf_env g) x t) -> tclTHEN (h_generalize [mkVar id]) (thin [id]) | _ -> tclIDTAC ) (pf_hyps g) g (* [intros_with_rewrite] do the intros in each branch and treat each new hypothesis (unfolding, substituting, destructing cases \ldots) *) let rec intros_with_rewrite g = observe_tac "intros_with_rewrite" intros_with_rewrite_aux g and intros_with_rewrite_aux : tactic = fun g -> let eq_ind = Coqlib.build_coq_eq () in match kind_of_term (pf_concl g) with | Prod(_,t,t') -> begin match kind_of_term t with | App(eq,args) when (eq_constr eq eq_ind) -> if Reductionops.is_conv (pf_env g) (project g) args.(1) args.(2) then let id = pf_get_new_id (id_of_string "y") g in tclTHENSEQ [ h_intro id; thin [id]; intros_with_rewrite ] g else if isVar args.(1) then let id = pf_get_new_id (id_of_string "y") g in tclTHENSEQ [ h_intro id; generalize_dependent_of (destVar args.(1)) id; tclTRY (Equality.rewriteLR (mkVar id)); intros_with_rewrite ] g else if isVar args.(2) then let id = pf_get_new_id (id_of_string "y") g in tclTHENSEQ [ h_intro id; generalize_dependent_of (destVar args.(2)) id; tclTRY (Equality.rewriteRL (mkVar id)); intros_with_rewrite ] g else begin let id = pf_get_new_id (id_of_string "y") g in tclTHENSEQ[ h_intro id; tclTRY (Equality.rewriteLR (mkVar id)); intros_with_rewrite ] g end | Ind _ when eq_constr t (Coqlib.build_coq_False ()) -> Tauto.tauto g | Case(_,_,v,_) -> tclTHENSEQ[ h_case false (v,Glob_term.NoBindings); intros_with_rewrite ] g | LetIn _ -> tclTHENSEQ[ h_reduce (Glob_term.Cbv {Glob_term.all_flags with Glob_term.rDelta = false; }) onConcl ; intros_with_rewrite ] g | _ -> let id = pf_get_new_id (id_of_string "y") g in tclTHENSEQ [ h_intro id;intros_with_rewrite] g end | LetIn _ -> tclTHENSEQ[ h_reduce (Glob_term.Cbv {Glob_term.all_flags with Glob_term.rDelta = false; }) onConcl ; intros_with_rewrite ] g | _ -> tclIDTAC g let rec reflexivity_with_destruct_cases g = let destruct_case () = try match kind_of_term (snd (destApp (pf_concl g))).(2) with | Case(_,_,v,_) -> tclTHENSEQ[ h_case false (v,Glob_term.NoBindings); intros; observe_tac "reflexivity_with_destruct_cases" reflexivity_with_destruct_cases ] | _ -> reflexivity with e when Errors.noncritical e -> reflexivity in let eq_ind = Coqlib.build_coq_eq () in let discr_inject = Tacticals.onAllHypsAndConcl ( fun sc g -> match sc with None -> tclIDTAC g | Some id -> match kind_of_term (pf_type_of g (mkVar id)) with | App(eq,[|_;t1;t2|]) when eq_constr eq eq_ind -> if Equality.discriminable (pf_env g) (project g) t1 t2 then Equality.discrHyp id g else if Equality.injectable (pf_env g) (project g) t1 t2 then tclTHENSEQ [Equality.injHyp id;thin [id];intros_with_rewrite] g else tclIDTAC g | _ -> tclIDTAC g ) in (tclFIRST [ observe_tac "reflexivity_with_destruct_cases : reflexivity" reflexivity; observe_tac "reflexivity_with_destruct_cases : destruct_case" ((destruct_case ())); (* We reach this point ONLY if the same value is matched (at least) two times along binding path. In this case, either we have a discriminable hypothesis and we are done, either at least an injectable one and we do the injection before continuing *) observe_tac "reflexivity_with_destruct_cases : others" (tclTHEN (tclPROGRESS discr_inject ) reflexivity_with_destruct_cases) ]) g (* [prove_fun_complete funs graphs schemes lemmas_types_infos i] is the tactic used to prove completness lemma. [funcs], [graphs] [schemes] [lemmas_types_infos] are the mutually recursive functions (resp. definitions of the graphs of the functions, principles and correctness lemma types) to prove correct. [i] is the indice of the function to prove complete The lemma to prove if suppose to have been generated by [generate_type] (in $\zeta$ normal form that is it looks like~: [\forall (x_1:t_1)\ldots(x_n:t_n), forall res, graph\ x_1\ldots x_n\ res, \rightarrow res = f x_1\ldots x_n in] The sketch of the proof is the following one~: \begin{enumerate} \item intros until $H:graph\ x_1\ldots x_n\ res$ \item $elim\ H$ using schemes.(i) \item for each generated branch, intro the news hyptohesis, for each such hyptohesis [h], if [h] has type [x=?] with [x] a variable, then subst [x], if [h] has type [t=?] with [t] not a variable then rewrite [t] in the subterms, else if [h] is a match then destruct it, else do just introduce it, after all intros, the conclusion should be a reflexive equality. \end{enumerate} *) let prove_fun_complete funcs graphs schemes lemmas_types_infos i : tactic = fun g -> (* We compute the types of the different mutually recursive lemmas in $\zeta$ normal form *) let lemmas = Array.map (fun (_,(ctxt,concl)) -> nf_zeta (Termops.it_mkLambda_or_LetIn concl ctxt)) lemmas_types_infos in (* We get the constant and the principle corresponding to this lemma *) let f = funcs.(i) in let graph_principle = nf_zeta schemes.(i) in let princ_type = pf_type_of g graph_principle in let princ_infos = Tactics.compute_elim_sig princ_type in (* Then we get the number of argument of the function and compute a fresh name for each of them *) let nb_fun_args = nb_prod (pf_concl g) - 2 in let args_names = generate_fresh_id (id_of_string "x") [] nb_fun_args in let ids = args_names@(pf_ids_of_hyps g) in (* and fresh names for res H and the principle (cf bug bug #1174) *) let res,hres,graph_principle_id = match generate_fresh_id (id_of_string "z") ids 3 with | [res;hres;graph_principle_id] -> res,hres,graph_principle_id | _ -> assert false in let ids = res::hres::graph_principle_id::ids in (* we also compute fresh names for each hyptohesis of each branche of the principle *) let branches = List.rev princ_infos.branches in let intro_pats = List.map (fun (_,_,br_type) -> List.map (fun id -> id) (generate_fresh_id (id_of_string "y") ids (nb_prod br_type)) ) branches in (* We will need to change the function by its body using [f_equation] if it is recursive (that is the graph is infinite or unfold if the graph is finite *) let rewrite_tac j ids : tactic = let graph_def = graphs.(j) in let infos = try find_Function_infos (destConst funcs.(j)) with Not_found -> error "No graph found" in if infos.is_general || Rtree.is_infinite graph_def.mind_recargs then let eq_lemma = try Option.get (infos).equation_lemma with Option.IsNone -> anomaly "Cannot find equation lemma" in tclTHENSEQ[ tclMAP h_intro ids; Equality.rewriteLR (mkConst eq_lemma); (* Don't forget to $\zeta$ normlize the term since the principles have been $\zeta$-normalized *) h_reduce (Glob_term.Cbv {Glob_term.all_flags with Glob_term.rDelta = false; }) onConcl ; h_generalize (List.map mkVar ids); thin ids ] else unfold_in_concl [(Termops.all_occurrences, Names.EvalConstRef (destConst f))] in (* The proof of each branche itself *) let ind_number = ref 0 in let min_constr_number = ref 0 in let prove_branche i g = (* we fist compute the inductive corresponding to the branch *) let this_ind_number = let constructor_num = i - !min_constr_number in let length = Array.length (graphs.(!ind_number).Declarations.mind_consnames) in if constructor_num <= length then !ind_number else begin incr ind_number; min_constr_number := !min_constr_number + length; !ind_number end in let this_branche_ids = List.nth intro_pats (pred i) in tclTHENSEQ[ (* we expand the definition of the function *) observe_tac "rewrite_tac" (rewrite_tac this_ind_number this_branche_ids); (* introduce hypothesis with some rewrite *) observe_tac "intros_with_rewrite" intros_with_rewrite; (* The proof is (almost) complete *) observe_tac "reflexivity" (reflexivity_with_destruct_cases) ] g in let params_names = fst (list_chop princ_infos.nparams args_names) in let params = List.map mkVar params_names in tclTHENSEQ [ tclMAP h_intro (args_names@[res;hres]); observe_tac "h_generalize" (h_generalize [mkApp(applist(graph_principle,params),Array.map (fun c -> applist(c,params)) lemmas)]); h_intro graph_principle_id; observe_tac "" (tclTHEN_i (observe_tac "elim" ((elim false (mkVar hres,Glob_term.NoBindings) (Some (mkVar graph_principle_id,Glob_term.NoBindings))))) (fun i g -> observe_tac "prove_branche" (prove_branche i) g )) ] g let do_save () = Lemmas.save_named false (* [derive_correctness make_scheme functional_induction funs graphs] create correctness and completeness lemmas for each function in [funs] w.r.t. [graphs] [make_scheme] is Functional_principle_types.make_scheme (dependency pb) and [functional_induction] is Indfun.functional_induction (same pb) *) let derive_correctness make_scheme functional_induction (funs: constant list) (graphs:inductive list) = let previous_state = States.freeze () in let funs = Array.of_list funs and graphs = Array.of_list graphs in let funs_constr = Array.map mkConst funs in try let graphs_constr = Array.map mkInd graphs in let lemmas_types_infos = Util.array_map2_i (fun i f_constr graph -> let const_of_f = destConst f_constr in let (type_of_lemma_ctxt,type_of_lemma_concl) as type_info = generate_type false const_of_f graph i in let type_of_lemma = Termops.it_mkProd_or_LetIn type_of_lemma_concl type_of_lemma_ctxt in let type_of_lemma = nf_zeta type_of_lemma in observe (str "type_of_lemma := " ++ Printer.pr_lconstr type_of_lemma); type_of_lemma,type_info ) funs_constr graphs_constr in let schemes = (* The functional induction schemes are computed and not saved if there is more that one function if the block contains only one function we can safely reuse [f_rect] *) try if Array.length funs_constr <> 1 then raise Not_found; [| find_induction_principle funs_constr.(0) |] with Not_found -> Array.of_list (List.map (fun entry -> (entry.Entries.const_entry_body, Option.get entry.Entries.const_entry_type ) ) (make_scheme (array_map_to_list (fun const -> const,Glob_term.GType None) funs)) ) in let proving_tac = prove_fun_correct functional_induction funs_constr graphs_constr schemes lemmas_types_infos in Array.iteri (fun i f_as_constant -> let f_id = id_of_label (con_label f_as_constant) in (*i The next call to mk_correct_id is valid since we are constructing the lemma Ensures by: obvious i*) let lem_id = mk_correct_id f_id in Lemmas.start_proof lem_id (Decl_kinds.Global,(Decl_kinds.Proof Decl_kinds.Theorem)) (fst lemmas_types_infos.(i)) (fun _ _ -> ()); Pfedit.by (observe_tac ("prove correctness ("^(string_of_id f_id)^")") (proving_tac i)); do_save (); let finfo = find_Function_infos f_as_constant in let lem_cst = destConst (Constrintern.global_reference lem_id) in update_Function {finfo with correctness_lemma = Some lem_cst} ) funs; let lemmas_types_infos = Util.array_map2_i (fun i f_constr graph -> let const_of_f = destConst f_constr in let (type_of_lemma_ctxt,type_of_lemma_concl) as type_info = generate_type true const_of_f graph i in let type_of_lemma = Termops.it_mkProd_or_LetIn type_of_lemma_concl type_of_lemma_ctxt in let type_of_lemma = nf_zeta type_of_lemma in observe (str "type_of_lemma := " ++ Printer.pr_lconstr type_of_lemma); type_of_lemma,type_info ) funs_constr graphs_constr in let kn,_ as graph_ind = destInd graphs_constr.(0) in let mib,mip = Global.lookup_inductive graph_ind in let schemes = Array.of_list (Indrec.build_mutual_induction_scheme (Global.env ()) Evd.empty (Array.to_list (Array.mapi (fun i _ -> (kn,i),true,InType) mib.Declarations.mind_packets ) ) ) in let proving_tac = prove_fun_complete funs_constr mib.Declarations.mind_packets schemes lemmas_types_infos in Array.iteri (fun i f_as_constant -> let f_id = id_of_label (con_label f_as_constant) in (*i The next call to mk_complete_id is valid since we are constructing the lemma Ensures by: obvious i*) let lem_id = mk_complete_id f_id in Lemmas.start_proof lem_id (Decl_kinds.Global,(Decl_kinds.Proof Decl_kinds.Theorem)) (fst lemmas_types_infos.(i)) (fun _ _ -> ()); Pfedit.by (observe_tac ("prove completeness ("^(string_of_id f_id)^")") (proving_tac i)); do_save (); let finfo = find_Function_infos f_as_constant in let lem_cst = destConst (Constrintern.global_reference lem_id) in update_Function {finfo with completeness_lemma = Some lem_cst} ) funs; with reraise -> (* In case of problem, we reset all the lemmas *) Pfedit.delete_all_proofs (); States.unfreeze previous_state; raise reraise (***********************************************) (* [revert_graph kn post_tac hid] transforme an hypothesis [hid] having type Ind(kn,num) t1 ... tn res when [kn] denotes a graph block into f_num t1... tn = res (by applying [f_complete] to the first type) before apply post_tac on the result if the type of hypothesis has not this form or if we cannot find the completeness lemma then we do nothing *) let revert_graph kn post_tac hid g = let typ = pf_type_of g (mkVar hid) in match kind_of_term typ with | App(i,args) when isInd i -> let ((kn',num) as ind') = destInd i in if kn = kn' then (* We have generated a graph hypothesis so that we must change it if we can *) let info = try find_Function_of_graph ind' with Not_found -> (* The graphs are mutually recursive but we cannot find one of them !*) anomaly "Cannot retrieve infos about a mutual block" in (* if we can find a completeness lemma for this function then we can come back to the functional form. If not, we do nothing *) match info.completeness_lemma with | None -> tclIDTAC g | Some f_complete -> let f_args,res = array_chop (Array.length args - 1) args in tclTHENSEQ [ h_generalize [applist(mkConst f_complete,(Array.to_list f_args)@[res.(0);mkVar hid])]; thin [hid]; h_intro hid; post_tac hid ] g else tclIDTAC g | _ -> tclIDTAC g (* [functional_inversion hid fconst f_correct ] is the functional version of [inversion] [hid] is the hypothesis to invert, [fconst] is the function to invert and [f_correct] is the correctness lemma for [fconst]. The sketch is the follwing~: \begin{enumerate} \item Transforms the hypothesis [hid] such that its type is now $res\ =\ f\ t_1 \ldots t_n$ (fails if it is not possible) \item replace [hid] with $R\_f t_1 \ldots t_n res$ using [f_correct] \item apply [inversion] on [hid] \item finally in each branch, replace each hypothesis [R\_f ..] by [f ...] using [f_complete] (whenever such a lemma exists) \end{enumerate} *) let functional_inversion kn hid fconst f_correct : tactic = fun g -> let old_ids = List.fold_right Idset.add (pf_ids_of_hyps g) Idset.empty in let type_of_h = pf_type_of g (mkVar hid) in match kind_of_term type_of_h with | App(eq,args) when eq_constr eq (Coqlib.build_coq_eq ()) -> let pre_tac,f_args,res = match kind_of_term args.(1),kind_of_term args.(2) with | App(f,f_args),_ when eq_constr f fconst -> ((fun hid -> h_symmetry (onHyp hid)),f_args,args.(2)) |_,App(f,f_args) when eq_constr f fconst -> ((fun hid -> tclIDTAC),f_args,args.(1)) | _ -> (fun hid -> tclFAIL 1 (mt ())),[||],args.(2) in tclTHENSEQ[ pre_tac hid; h_generalize [applist(f_correct,(Array.to_list f_args)@[res;mkVar hid])]; thin [hid]; h_intro hid; Inv.inv FullInversion None (Glob_term.NamedHyp hid); (fun g -> let new_ids = List.filter (fun id -> not (Idset.mem id old_ids)) (pf_ids_of_hyps g) in tclMAP (revert_graph kn pre_tac) (hid::new_ids) g ); ] g | _ -> tclFAIL 1 (mt ()) g let invfun qhyp f = let f = match f with | ConstRef f -> f | _ -> raise (Util.UserError("",str "Not a function")) in try let finfos = find_Function_infos f in let f_correct = mkConst(Option.get finfos.correctness_lemma) and kn = fst finfos.graph_ind in Tactics.try_intros_until (fun hid -> functional_inversion kn hid (mkConst f) f_correct) qhyp with | Not_found -> error "No graph found" | Option.IsNone -> error "Cannot use equivalence with graph!" let invfun qhyp f g = match f with | Some f -> invfun qhyp f g | None -> Tactics.try_intros_until (fun hid g -> let hyp_typ = pf_type_of g (mkVar hid) in match kind_of_term hyp_typ with | App(eq,args) when eq_constr eq (Coqlib.build_coq_eq ()) -> begin let f1,_ = decompose_app args.(1) in try if not (isConst f1) then failwith ""; let finfos = find_Function_infos (destConst f1) in let f_correct = mkConst(Option.get finfos.correctness_lemma) and kn = fst finfos.graph_ind in functional_inversion kn hid f1 f_correct g with | Failure "" | Option.IsNone | Not_found -> try let f2,_ = decompose_app args.(2) in if not (isConst f2) then failwith ""; let finfos = find_Function_infos (destConst f2) in let f_correct = mkConst(Option.get finfos.correctness_lemma) and kn = fst finfos.graph_ind in functional_inversion kn hid f2 f_correct g with | Failure "" -> errorlabstrm "" (str "Hypothesis" ++ Ppconstr.pr_id hid ++ str " must contain at leat one Function") | Option.IsNone -> if do_observe () then error "Cannot use equivalence with graph for any side of the equality" else errorlabstrm "" (str "Cannot find inversion information for hypothesis " ++ Ppconstr.pr_id hid) | Not_found -> if do_observe () then error "No graph found for any side of equality" else errorlabstrm "" (str "Cannot find inversion information for hypothesis " ++ Ppconstr.pr_id hid) end | _ -> errorlabstrm "" (Ppconstr.pr_id hid ++ str " must be an equality ") ) qhyp g coq-8.4pl2/plugins/funind/glob_termops.mli0000640000175000001440000001076211505370347020005 0ustar notinusersopen Glob_term (* Ocaml 3.06 Map.S does not handle is_empty *) val idmap_is_empty : 'a Names.Idmap.t -> bool (* [get_pattern_id pat] returns a list of all the variable appearing in [pat] *) val get_pattern_id : cases_pattern -> Names.identifier list (* [pattern_to_term pat] returns a glob_constr corresponding to [pat]. [pat] must not contain occurences of anonymous pattern *) val pattern_to_term : cases_pattern -> glob_constr (* Some basic functions to rebuild glob_constr In each of them the location is Util.dummy_loc *) val mkGRef : Libnames.global_reference -> glob_constr val mkGVar : Names.identifier -> glob_constr val mkGApp : glob_constr*(glob_constr list) -> glob_constr val mkGLambda : Names.name * glob_constr * glob_constr -> glob_constr val mkGProd : Names.name * glob_constr * glob_constr -> glob_constr val mkGLetIn : Names.name * glob_constr * glob_constr -> glob_constr val mkGCases : glob_constr option * tomatch_tuples * cases_clauses -> glob_constr val mkGSort : glob_sort -> glob_constr val mkGHole : unit -> glob_constr (* we only build Evd.BinderType Anonymous holes *) val mkGCast : glob_constr* glob_constr -> glob_constr (* Some basic functions to decompose glob_constrs These are analogous to the ones constrs *) val glob_decompose_prod : glob_constr -> (Names.name*glob_constr) list * glob_constr val glob_decompose_prod_or_letin : glob_constr -> (Names.name*glob_constr option*glob_constr option) list * glob_constr val glob_decompose_prod_n : int -> glob_constr -> (Names.name*glob_constr) list * glob_constr val glob_decompose_prod_or_letin_n : int -> glob_constr -> (Names.name*glob_constr option*glob_constr option) list * glob_constr val glob_compose_prod : glob_constr -> (Names.name*glob_constr) list -> glob_constr val glob_compose_prod_or_letin: glob_constr -> (Names.name*glob_constr option*glob_constr option) list -> glob_constr val glob_decompose_app : glob_constr -> glob_constr*(glob_constr list) (* [glob_make_eq t1 t2] build the glob_constr corresponding to [t2 = t1] *) val glob_make_eq : ?typ:glob_constr -> glob_constr -> glob_constr -> glob_constr (* [glob_make_neq t1 t2] build the glob_constr corresponding to [t1 <> t2] *) val glob_make_neq : glob_constr -> glob_constr -> glob_constr (* [glob_make_or P1 P2] build the glob_constr corresponding to [P1 \/ P2] *) val glob_make_or : glob_constr -> glob_constr -> glob_constr (* [glob_make_or_list [P1;...;Pn]] build the glob_constr corresponding to [P1 \/ ( .... \/ Pn)] *) val glob_make_or_list : glob_constr list -> glob_constr (* alpha_conversion functions *) (* Replace the var mapped in the glob_constr/context *) val change_vars : Names.identifier Names.Idmap.t -> glob_constr -> glob_constr (* [alpha_pat avoid pat] rename all the variables present in [pat] s.t. the result does not share variables with [avoid]. This function create a fresh variable for each occurence of the anonymous pattern. Also returns a mapping from old variables to new ones and the concatenation of [avoid] with the variables appearing in the result. *) val alpha_pat : Names.Idmap.key list -> Glob_term.cases_pattern -> Glob_term.cases_pattern * Names.Idmap.key list * Names.identifier Names.Idmap.t (* [alpha_rt avoid rt] alpha convert [rt] s.t. the result repects barendregt conventions and does not share bound variables with avoid *) val alpha_rt : Names.identifier list -> glob_constr -> glob_constr (* same as alpha_rt but for case branches *) val alpha_br : Names.identifier list -> Util.loc * Names.identifier list * Glob_term.cases_pattern list * Glob_term.glob_constr -> Util.loc * Names.identifier list * Glob_term.cases_pattern list * Glob_term.glob_constr (* Reduction function *) val replace_var_by_term : Names.identifier -> Glob_term.glob_constr -> Glob_term.glob_constr -> Glob_term.glob_constr (* [is_free_in id rt] checks if [id] is a free variable in [rt] *) val is_free_in : Names.identifier -> glob_constr -> bool val are_unifiable : cases_pattern -> cases_pattern -> bool val eq_cases_pattern : cases_pattern -> cases_pattern -> bool (* ids_of_pat : cases_pattern -> Idset.t returns the set of variables appearing in a pattern *) val ids_of_pat : cases_pattern -> Names.Idset.t (* TODO: finish this function (Fix not treated) *) val ids_of_glob_constr: glob_constr -> Names.Idset.t (* removing let_in construction in a glob_constr *) val zeta_normalize : Glob_term.glob_constr -> Glob_term.glob_constr val expand_as : glob_constr -> glob_constr coq-8.4pl2/plugins/pluginsvo.itarget0000640000175000001440000000033711743265053016730 0ustar notinusersfield/vo.otarget fourier/vo.otarget funind/vo.otarget nsatz/vo.otarget micromega/vo.otarget omega/vo.otarget quote/vo.otarget ring/vo.otarget romega/vo.otarget rtauto/vo.otarget setoid_ring/vo.otarget extraction/vo.otarget coq-8.4pl2/plugins/cc/0000750000175000001440000000000012127276540013702 5ustar notinuserscoq-8.4pl2/plugins/cc/ccalgo.ml0000640000175000001440000006236112121620060015455 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !cc_verbose); optwrite=(fun b -> cc_verbose := b)} in declare_bool_option gdopt (* Signature table *) module ST=struct (* l: sign -> term r: term -> sign *) type t = {toterm:(int*int,int) Hashtbl.t; tosign:(int,int*int) Hashtbl.t} let empty ()= {toterm=Hashtbl.create init_size; tosign=Hashtbl.create init_size} let enter t sign st= if Hashtbl.mem st.toterm sign then anomaly "enter: signature already entered" else Hashtbl.replace st.toterm sign t; Hashtbl.replace st.tosign t sign let query sign st=Hashtbl.find st.toterm sign let rev_query term st=Hashtbl.find st.tosign term let delete st t= try let sign=Hashtbl.find st.tosign t in Hashtbl.remove st.toterm sign; Hashtbl.remove st.tosign t with Not_found -> () let rec delete_set st s = Intset.iter (delete st) s end type pa_constructor= { cnode : int; arity : int; args : int list} type pa_fun= {fsym:int; fnargs:int} type pa_mark= Fmark of pa_fun | Cmark of pa_constructor module PacMap=Map.Make(struct type t=pa_constructor let compare=Pervasives.compare end) module PafMap=Map.Make(struct type t=pa_fun let compare=Pervasives.compare end) type cinfo= {ci_constr: constructor; (* inductive type *) ci_arity: int; (* # args *) ci_nhyps: int} (* # projectable args *) type term= Symb of constr | Product of sorts_family * sorts_family | Eps of identifier | Appli of term*term | Constructor of cinfo (* constructor arity + nhyps *) let rec term_equal t1 t2 = match t1, t2 with | Symb c1, Symb c2 -> eq_constr c1 c2 | Product (s1, t1), Product (s2, t2) -> s1 = s2 && t1 = t2 | Eps i1, Eps i2 -> id_ord i1 i2 = 0 | Appli (t1, u1), Appli (t2, u2) -> term_equal t1 t2 && term_equal u1 u2 | Constructor {ci_constr=c1; ci_arity=i1; ci_nhyps=j1}, Constructor {ci_constr=c2; ci_arity=i2; ci_nhyps=j2} -> i1 = i2 && j1 = j2 && eq_constructor c1 c2 | _ -> t1 = t2 open Hashtbl_alt.Combine let rec hash_term = function | Symb c -> combine 1 (hash_constr c) | Product (s1, s2) -> combine3 2 (Hashtbl.hash s1) (Hashtbl.hash s2) | Eps i -> combine 3 (Hashtbl.hash i) | Appli (t1, t2) -> combine3 4 (hash_term t1) (hash_term t2) | Constructor {ci_constr=c; ci_arity=i; ci_nhyps=j} -> combine4 5 (Hashtbl.hash c) i j type ccpattern = PApp of term * ccpattern list (* arguments are reversed *) | PVar of int type rule= Congruence | Axiom of constr * bool | Injection of int * pa_constructor * int * pa_constructor * int type from= Goal | Hyp of constr | HeqG of constr | HeqnH of constr * constr type 'a eq = {lhs:int;rhs:int;rule:'a} type equality = rule eq type disequality = from eq type patt_kind = Normal | Trivial of types | Creates_variables type quant_eq = {qe_hyp_id: identifier; qe_pol: bool; qe_nvars:int; qe_lhs: ccpattern; qe_lhs_valid:patt_kind; qe_rhs: ccpattern; qe_rhs_valid:patt_kind} let swap eq : equality = let swap_rule=match eq.rule with Congruence -> Congruence | Injection (i,pi,j,pj,k) -> Injection (j,pj,i,pi,k) | Axiom (id,reversed) -> Axiom (id,not reversed) in {lhs=eq.rhs;rhs=eq.lhs;rule=swap_rule} type inductive_status = Unknown | Partial of pa_constructor | Partial_applied | Total of (int * pa_constructor) type representative= {mutable weight:int; mutable lfathers:Intset.t; mutable fathers:Intset.t; mutable inductive_status: inductive_status; class_type : Term.types; mutable functions: Intset.t PafMap.t; mutable constructors: int PacMap.t} (*pac -> term = app(constr,t) *) type cl = Rep of representative| Eqto of int*equality type vertex = Leaf| Node of (int*int) type node = {mutable clas:cl; mutable cpath: int; vertex:vertex; term:term} module Constrhash = Hashtbl.Make (struct type t = constr let equal = eq_constr let hash = hash_constr end) module Typehash = Constrhash module Termhash = Hashtbl.Make (struct type t = term let equal = term_equal let hash = hash_term end) module Identhash = Hashtbl.Make (struct type t = identifier let equal = Pervasives.(=) let hash = Hashtbl.hash end) type forest= {mutable max_size:int; mutable size:int; mutable map: node array; axioms: (term*term) Constrhash.t; mutable epsilons: pa_constructor list; syms: int Termhash.t} type state = {uf: forest; sigtable:ST.t; mutable terms: Intset.t; combine: equality Queue.t; marks: (int * pa_mark) Queue.t; mutable diseq: disequality list; mutable quant: quant_eq list; mutable pa_classes: Intset.t; q_history: (int array) Identhash.t; mutable rew_depth:int; mutable changed:bool; by_type: Intset.t Typehash.t; mutable gls:Proof_type.goal Tacmach.sigma} let dummy_node = {clas=Eqto(min_int,{lhs=min_int;rhs=min_int;rule=Congruence}); cpath=min_int; vertex=Leaf; term=Symb (mkRel min_int)} let empty depth gls:state = {uf= {max_size=init_size; size=0; map=Array.create init_size dummy_node; epsilons=[]; axioms=Constrhash.create init_size; syms=Termhash.create init_size}; terms=Intset.empty; combine=Queue.create (); marks=Queue.create (); sigtable=ST.empty (); diseq=[]; quant=[]; pa_classes=Intset.empty; q_history=Identhash.create init_size; rew_depth=depth; by_type=Constrhash.create init_size; changed=false; gls=gls} let forest state = state.uf let compress_path uf i j = uf.map.(j).cpath<-i let rec find_aux uf visited i= let j = uf.map.(i).cpath in if j<0 then let _ = List.iter (compress_path uf i) visited in i else find_aux uf (i::visited) j let find uf i= find_aux uf [] i let get_representative uf i= match uf.map.(i).clas with Rep r -> r | _ -> anomaly "get_representative: not a representative" let find_pac uf i pac = PacMap.find pac (get_representative uf i).constructors let get_constructor_info uf i= match uf.map.(i).term with Constructor cinfo->cinfo | _ -> anomaly "get_constructor: not a constructor" let size uf i= (get_representative uf i).weight let axioms uf = uf.axioms let epsilons uf = uf.epsilons let add_lfather uf i t= let r=get_representative uf i in r.weight<-r.weight+1; r.lfathers<-Intset.add t r.lfathers; r.fathers <-Intset.add t r.fathers let add_rfather uf i t= let r=get_representative uf i in r.weight<-r.weight+1; r.fathers <-Intset.add t r.fathers exception Discriminable of int * pa_constructor * int * pa_constructor let append_pac t p = {p with arity=pred p.arity;args=t::p.args} let tail_pac p= {p with arity=succ p.arity;args=List.tl p.args} let fsucc paf = {paf with fnargs=succ paf.fnargs} let add_pac rep pac t = if not (PacMap.mem pac rep.constructors) then rep.constructors<-PacMap.add pac t rep.constructors let add_paf rep paf t = let already = try PafMap.find paf rep.functions with Not_found -> Intset.empty in rep.functions<- PafMap.add paf (Intset.add t already) rep.functions let term uf i=uf.map.(i).term let subterms uf i= match uf.map.(i).vertex with Node(j,k) -> (j,k) | _ -> anomaly "subterms: not a node" let signature uf i= let j,k=subterms uf i in (find uf j,find uf k) let next uf= let size=uf.size in let nsize= succ size in if nsize=uf.max_size then let newmax=uf.max_size * 3 / 2 + 1 in let newmap=Array.create newmax dummy_node in begin uf.max_size<-newmax; Array.blit uf.map 0 newmap 0 size; uf.map<-newmap end else (); uf.size<-nsize; size let new_representative typ = {weight=0; lfathers=Intset.empty; fathers=Intset.empty; inductive_status=Unknown; class_type=typ; functions=PafMap.empty; constructors=PacMap.empty} (* rebuild a constr from an applicative term *) let _A_ = Name (id_of_string "A") let _B_ = Name (id_of_string "A") let _body_ = mkProd(Anonymous,mkRel 2,mkRel 2) let cc_product s1 s2 = mkLambda(_A_,mkSort(Termops.new_sort_in_family s1), mkLambda(_B_,mkSort(Termops.new_sort_in_family s2),_body_)) let rec constr_of_term = function Symb s->s | Product(s1,s2) -> cc_product s1 s2 | Eps id -> mkVar id | Constructor cinfo -> mkConstruct cinfo.ci_constr | Appli (s1,s2)-> make_app [(constr_of_term s2)] s1 and make_app l=function Appli (s1,s2)->make_app ((constr_of_term s2)::l) s1 | other -> applistc (constr_of_term other) l let rec canonize_name c = let func = canonize_name in match kind_of_term c with | Const kn -> let canon_const = constant_of_kn (canonical_con kn) in (mkConst canon_const) | Ind (kn,i) -> let canon_mind = mind_of_kn (canonical_mind kn) in (mkInd (canon_mind,i)) | Construct ((kn,i),j) -> let canon_mind = mind_of_kn (canonical_mind kn) in mkConstruct ((canon_mind,i),j) | Prod (na,t,ct) -> mkProd (na,func t, func ct) | Lambda (na,t,ct) -> mkLambda (na, func t,func ct) | LetIn (na,b,t,ct) -> mkLetIn (na, func b,func t,func ct) | App (ct,l) -> mkApp (func ct,array_smartmap func l) | _ -> c (* rebuild a term from a pattern and a substitution *) let build_subst uf subst = Array.map (fun i -> try term uf i with e when Errors.noncritical e -> anomaly "incomplete matching") subst let rec inst_pattern subst = function PVar i -> subst.(pred i) | PApp (t, args) -> List.fold_right (fun spat f -> Appli (f,inst_pattern subst spat)) args t let pr_idx_term state i = str "[" ++ int i ++ str ":=" ++ Termops.print_constr (constr_of_term (term state.uf i)) ++ str "]" let pr_term t = str "[" ++ Termops.print_constr (constr_of_term t) ++ str "]" let rec add_term state t= let uf=state.uf in try Termhash.find uf.syms t with Not_found -> let b=next uf in let typ = pf_type_of state.gls (constr_of_term t) in let typ = canonize_name typ in let new_node= match t with Symb _ | Product (_,_) -> let paf = {fsym=b; fnargs=0} in Queue.add (b,Fmark paf) state.marks; {clas= Rep (new_representative typ); cpath= -1; vertex= Leaf; term= t} | Eps id -> {clas= Rep (new_representative typ); cpath= -1; vertex= Leaf; term= t} | Appli (t1,t2) -> let i1=add_term state t1 and i2=add_term state t2 in add_lfather uf (find uf i1) b; add_rfather uf (find uf i2) b; state.terms<-Intset.add b state.terms; {clas= Rep (new_representative typ); cpath= -1; vertex= Node(i1,i2); term= t} | Constructor cinfo -> let paf = {fsym=b; fnargs=0} in Queue.add (b,Fmark paf) state.marks; let pac = {cnode= b; arity= cinfo.ci_arity; args=[]} in Queue.add (b,Cmark pac) state.marks; {clas=Rep (new_representative typ); cpath= -1; vertex=Leaf; term=t} in uf.map.(b)<-new_node; Termhash.add uf.syms t b; Typehash.replace state.by_type typ (Intset.add b (try Typehash.find state.by_type typ with Not_found -> Intset.empty)); b let add_equality state c s t= let i = add_term state s in let j = add_term state t in Queue.add {lhs=i;rhs=j;rule=Axiom(c,false)} state.combine; Constrhash.add state.uf.axioms c (s,t) let add_disequality state from s t = let i = add_term state s in let j = add_term state t in state.diseq<-{lhs=i;rhs=j;rule=from}::state.diseq let add_quant state id pol (nvars,valid1,patt1,valid2,patt2) = state.quant<- {qe_hyp_id= id; qe_pol= pol; qe_nvars=nvars; qe_lhs= patt1; qe_lhs_valid=valid1; qe_rhs= patt2; qe_rhs_valid=valid2}::state.quant let is_redundant state id args = try let norm_args = Array.map (find state.uf) args in let prev_args = Identhash.find_all state.q_history id in List.exists (fun old_args -> Util.array_for_all2 (fun i j -> i = find state.uf j) norm_args old_args) prev_args with Not_found -> false let add_inst state (inst,int_subst) = check_for_interrupt (); if state.rew_depth > 0 then if is_redundant state inst.qe_hyp_id int_subst then debug msgnl (str "discarding redundant (dis)equality") else begin Identhash.add state.q_history inst.qe_hyp_id int_subst; let subst = build_subst (forest state) int_subst in let prfhead= mkVar inst.qe_hyp_id in let args = Array.map constr_of_term subst in let _ = array_rev args in (* highest deBruijn index first *) let prf= mkApp(prfhead,args) in let s = inst_pattern subst inst.qe_lhs and t = inst_pattern subst inst.qe_rhs in state.changed<-true; state.rew_depth<-pred state.rew_depth; if inst.qe_pol then begin debug (fun () -> msgnl (str "Adding new equality, depth="++ int state.rew_depth); msgnl (str " [" ++ Termops.print_constr prf ++ str " : " ++ pr_term s ++ str " == " ++ pr_term t ++ str "]")) (); add_equality state prf s t end else begin debug (fun () -> msgnl (str "Adding new disequality, depth="++ int state.rew_depth); msgnl (str " [" ++ Termops.print_constr prf ++ str " : " ++ pr_term s ++ str " <> " ++ pr_term t ++ str "]")) (); add_disequality state (Hyp prf) s t end end let link uf i j eq = (* links i -> j *) let node=uf.map.(i) in node.clas<-Eqto (j,eq); node.cpath<-j let rec down_path uf i l= match uf.map.(i).clas with Eqto(j,t)->down_path uf j (((i,j),t)::l) | Rep _ ->l let rec min_path=function ([],l2)->([],l2) | (l1,[])->(l1,[]) | (((c1,t1)::q1),((c2,t2)::q2)) when c1=c2 -> min_path (q1,q2) | cpl -> cpl let join_path uf i j= assert (find uf i=find uf j); min_path (down_path uf i [],down_path uf j []) let union state i1 i2 eq= debug (fun () -> msgnl (str "Linking " ++ pr_idx_term state i1 ++ str " and " ++ pr_idx_term state i2 ++ str ".")) (); let r1= get_representative state.uf i1 and r2= get_representative state.uf i2 in link state.uf i1 i2 eq; Constrhash.replace state.by_type r1.class_type (Intset.remove i1 (try Constrhash.find state.by_type r1.class_type with Not_found -> Intset.empty)); let f= Intset.union r1.fathers r2.fathers in r2.weight<-Intset.cardinal f; r2.fathers<-f; r2.lfathers<-Intset.union r1.lfathers r2.lfathers; ST.delete_set state.sigtable r1.fathers; state.terms<-Intset.union state.terms r1.fathers; PacMap.iter (fun pac b -> Queue.add (b,Cmark pac) state.marks) r1.constructors; PafMap.iter (fun paf -> Intset.iter (fun b -> Queue.add (b,Fmark paf) state.marks)) r1.functions; match r1.inductive_status,r2.inductive_status with Unknown,_ -> () | Partial pac,Unknown -> r2.inductive_status<-Partial pac; state.pa_classes<-Intset.remove i1 state.pa_classes; state.pa_classes<-Intset.add i2 state.pa_classes | Partial _ ,(Partial _ |Partial_applied) -> state.pa_classes<-Intset.remove i1 state.pa_classes | Partial_applied,Unknown -> r2.inductive_status<-Partial_applied | Partial_applied,Partial _ -> state.pa_classes<-Intset.remove i2 state.pa_classes; r2.inductive_status<-Partial_applied | Total cpl,Unknown -> r2.inductive_status<-Total cpl; | Total (i,pac),Total _ -> Queue.add (i,Cmark pac) state.marks | _,_ -> () let merge eq state = (* merge and no-merge *) debug (fun () -> msgnl (str "Merging " ++ pr_idx_term state eq.lhs ++ str " and " ++ pr_idx_term state eq.rhs ++ str ".")) (); let uf=state.uf in let i=find uf eq.lhs and j=find uf eq.rhs in if i<>j then if (size uf i)<(size uf j) then union state i j eq else union state j i (swap eq) let update t state = (* update 1 and 2 *) debug (fun () -> msgnl (str "Updating term " ++ pr_idx_term state t ++ str ".")) (); let (i,j) as sign = signature state.uf t in let (u,v) = subterms state.uf t in let rep = get_representative state.uf i in begin match rep.inductive_status with Partial _ -> rep.inductive_status <- Partial_applied; state.pa_classes <- Intset.remove i state.pa_classes | _ -> () end; PacMap.iter (fun pac _ -> Queue.add (t,Cmark (append_pac v pac)) state.marks) rep.constructors; PafMap.iter (fun paf _ -> Queue.add (t,Fmark (fsucc paf)) state.marks) rep.functions; try let s = ST.query sign state.sigtable in Queue.add {lhs=t;rhs=s;rule=Congruence} state.combine with Not_found -> ST.enter t sign state.sigtable let process_function_mark t rep paf state = add_paf rep paf t; state.terms<-Intset.union rep.lfathers state.terms let process_constructor_mark t i rep pac state = match rep.inductive_status with Total (s,opac) -> if pac.cnode <> opac.cnode then (* Conflict *) raise (Discriminable (s,opac,t,pac)) else (* Match *) let cinfo = get_constructor_info state.uf pac.cnode in let rec f n oargs args= if n > 0 then match (oargs,args) with s1::q1,s2::q2-> Queue.add {lhs=s1;rhs=s2;rule=Injection(s,opac,t,pac,n)} state.combine; f (n-1) q1 q2 | _-> anomaly "add_pacs : weird error in injection subterms merge" in f cinfo.ci_nhyps opac.args pac.args | Partial_applied | Partial _ -> add_pac rep pac t; state.terms<-Intset.union rep.lfathers state.terms | Unknown -> if pac.arity = 0 then rep.inductive_status <- Total (t,pac) else begin add_pac rep pac t; state.terms<-Intset.union rep.lfathers state.terms; rep.inductive_status <- Partial pac; state.pa_classes<- Intset.add i state.pa_classes end let process_mark t m state = debug (fun () -> msgnl (str "Processing mark for term " ++ pr_idx_term state t ++ str ".")) (); let i=find state.uf t in let rep=get_representative state.uf i in match m with Fmark paf -> process_function_mark t rep paf state | Cmark pac -> process_constructor_mark t i rep pac state type explanation = Discrimination of (int*pa_constructor*int*pa_constructor) | Contradiction of disequality | Incomplete let check_disequalities state = let uf=state.uf in let rec check_aux = function dis::q -> debug (fun () -> msg (str "Checking if " ++ pr_idx_term state dis.lhs ++ str " = " ++ pr_idx_term state dis.rhs ++ str " ... ")) (); if find uf dis.lhs=find uf dis.rhs then begin debug msgnl (str "Yes");Some dis end else begin debug msgnl (str "No");check_aux q end | [] -> None in check_aux state.diseq let one_step state = try let eq = Queue.take state.combine in merge eq state; true with Queue.Empty -> try let (t,m) = Queue.take state.marks in process_mark t m state; true with Queue.Empty -> try let t = Intset.choose state.terms in state.terms<-Intset.remove t state.terms; update t state; true with Not_found -> false let __eps__ = id_of_string "_eps_" let new_state_var typ state = let id = pf_get_new_id __eps__ state.gls in let {it=gl ; sigma=sigma} = state.gls in let gls = Goal.V82.new_goal_with sigma gl [id,None,typ] in state.gls<- gls; id let complete_one_class state i= match (get_representative state.uf i).inductive_status with Partial pac -> let rec app t typ n = if n<=0 then t else let _,etyp,rest= destProd typ in let id = new_state_var etyp state in app (Appli(t,Eps id)) (substl [mkVar id] rest) (n-1) in let _c = pf_type_of state.gls (constr_of_term (term state.uf pac.cnode)) in let _args = List.map (fun i -> constr_of_term (term state.uf i)) pac.args in let typ = prod_applist _c (List.rev _args) in let ct = app (term state.uf i) typ pac.arity in state.uf.epsilons <- pac :: state.uf.epsilons; ignore (add_term state ct) | _ -> anomaly "wrong incomplete class" let complete state = Intset.iter (complete_one_class state) state.pa_classes type matching_problem = {mp_subst : int array; mp_inst : quant_eq; mp_stack : (ccpattern*int) list } let make_fun_table state = let uf= state.uf in let funtab=ref PafMap.empty in Array.iteri (fun i inode -> if i < uf.size then match inode.clas with Rep rep -> PafMap.iter (fun paf _ -> let elem = try PafMap.find paf !funtab with Not_found -> Intset.empty in funtab:= PafMap.add paf (Intset.add i elem) !funtab) rep.functions | _ -> ()) state.uf.map; !funtab let rec do_match state res pb_stack = let mp=Stack.pop pb_stack in match mp.mp_stack with [] -> res:= (mp.mp_inst,mp.mp_subst) :: !res | (patt,cl)::remains -> let uf=state.uf in match patt with PVar i -> if mp.mp_subst.(pred i)<0 then begin mp.mp_subst.(pred i)<- cl; (* no aliasing problem here *) Stack.push {mp with mp_stack=remains} pb_stack end else if mp.mp_subst.(pred i) = cl then Stack.push {mp with mp_stack=remains} pb_stack else (* mismatch for non-linear variable in pattern *) () | PApp (f,[]) -> begin try let j=Termhash.find uf.syms f in if find uf j =cl then Stack.push {mp with mp_stack=remains} pb_stack with Not_found -> () end | PApp(f, ((last_arg::rem_args) as args)) -> try let j=Termhash.find uf.syms f in let paf={fsym=j;fnargs=List.length args} in let rep=get_representative uf cl in let good_terms = PafMap.find paf rep.functions in let aux i = let (s,t) = signature state.uf i in Stack.push {mp with mp_subst=Array.copy mp.mp_subst; mp_stack= (PApp(f,rem_args),s) :: (last_arg,t) :: remains} pb_stack in Intset.iter aux good_terms with Not_found -> () let paf_of_patt syms = function PVar _ -> invalid_arg "paf_of_patt: pattern is trivial" | PApp (f,args) -> {fsym=Termhash.find syms f; fnargs=List.length args} let init_pb_stack state = let syms= state.uf.syms in let pb_stack = Stack.create () in let funtab = make_fun_table state in let aux inst = begin let good_classes = match inst.qe_lhs_valid with Creates_variables -> Intset.empty | Normal -> begin try let paf= paf_of_patt syms inst.qe_lhs in PafMap.find paf funtab with Not_found -> Intset.empty end | Trivial typ -> begin try Typehash.find state.by_type typ with Not_found -> Intset.empty end in Intset.iter (fun i -> Stack.push {mp_subst = Array.make inst.qe_nvars (-1); mp_inst=inst; mp_stack=[inst.qe_lhs,i]} pb_stack) good_classes end; begin let good_classes = match inst.qe_rhs_valid with Creates_variables -> Intset.empty | Normal -> begin try let paf= paf_of_patt syms inst.qe_rhs in PafMap.find paf funtab with Not_found -> Intset.empty end | Trivial typ -> begin try Typehash.find state.by_type typ with Not_found -> Intset.empty end in Intset.iter (fun i -> Stack.push {mp_subst = Array.make inst.qe_nvars (-1); mp_inst=inst; mp_stack=[inst.qe_rhs,i]} pb_stack) good_classes end in List.iter aux state.quant; pb_stack let find_instances state = let pb_stack= init_pb_stack state in let res =ref [] in let _ = debug msgnl (str "Running E-matching algorithm ... "); try while true do check_for_interrupt (); do_match state res pb_stack done; anomaly "get out of here !" with Stack.Empty -> () in !res let rec execute first_run state = debug msgnl (str "Executing ... "); try while check_for_interrupt (); one_step state do () done; match check_disequalities state with None -> if not(Intset.is_empty state.pa_classes) then begin debug msgnl (str "First run was incomplete, completing ... "); complete state; execute false state end else if state.rew_depth>0 then let l=find_instances state in List.iter (add_inst state) l; if state.changed then begin state.changed <- false; execute true state end else begin debug msgnl (str "Out of instances ... "); None end else begin debug msgnl (str "Out of depth ... "); None end | Some dis -> Some begin if first_run then Contradiction dis else Incomplete end with Discriminable(s,spac,t,tpac) -> Some begin if first_run then Discrimination (s,spac,t,tpac) else Incomplete end coq-8.4pl2/plugins/cc/cc_plugin.mllib0000640000175000001440000000006011161000644016646 0ustar notinusersCcalgo Ccproof Cctac G_congruence Cc_plugin_mod coq-8.4pl2/plugins/cc/ccproof.ml0000640000175000001440000001000712010532755015660 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* prefl (Appli (t1,t2)) | _, _ -> {p_lhs=Appli (p1.p_lhs,p2.p_lhs); p_rhs=Appli (p1.p_rhs,p2.p_rhs); p_rule=Congr (p1,p2)} let rec ptrans p1 p3= match p1.p_rule,p3.p_rule with Refl _, _ ->p3 | _, Refl _ ->p1 | Trans(p1,p2), _ ->ptrans p1 (ptrans p2 p3) | Congr(p1,p2), Congr(p3,p4) ->pcongr (ptrans p1 p3) (ptrans p2 p4) | Congr(p1,p2), Trans({p_rule=Congr(p3,p4)},p5) -> ptrans (pcongr (ptrans p1 p3) (ptrans p2 p4)) p5 | _, _ -> if term_equal p1.p_rhs p3.p_lhs then {p_lhs=p1.p_lhs; p_rhs=p3.p_rhs; p_rule=Trans (p1,p3)} else anomaly "invalid cc transitivity" let rec psym p = match p.p_rule with Refl _ -> p | SymAx s -> {p_lhs=p.p_rhs; p_rhs=p.p_lhs; p_rule=Ax s} | Ax s-> {p_lhs=p.p_rhs; p_rhs=p.p_lhs; p_rule=SymAx s} | Inject (p0,c,n,a)-> {p_lhs=p.p_rhs; p_rhs=p.p_lhs; p_rule=Inject (psym p0,c,n,a)} | Trans (p1,p2)-> ptrans (psym p2) (psym p1) | Congr (p1,p2)-> pcongr (psym p1) (psym p2) let pax axioms s = let l,r = Constrhash.find axioms s in {p_lhs=l; p_rhs=r; p_rule=Ax s} let psymax axioms s = let l,r = Constrhash.find axioms s in {p_lhs=r; p_rhs=l; p_rule=SymAx s} let rec nth_arg t n= match t with Appli (t1,t2)-> if n>0 then nth_arg t1 (n-1) else t2 | _ -> anomaly "nth_arg: not enough args" let pinject p c n a = {p_lhs=nth_arg p.p_lhs (n-a); p_rhs=nth_arg p.p_rhs (n-a); p_rule=Inject(p,c,n,a)} let build_proof uf= let axioms = axioms uf in let rec equal_proof i j= if i=j then prefl (term uf i) else let (li,lj)=join_path uf i j in ptrans (path_proof i li) (psym (path_proof j lj)) and edge_proof ((i,j),eq)= let pi=equal_proof i eq.lhs in let pj=psym (equal_proof j eq.rhs) in let pij= match eq.rule with Axiom (s,reversed)-> if reversed then psymax axioms s else pax axioms s | Congruence ->congr_proof eq.lhs eq.rhs | Injection (ti,ipac,tj,jpac,k) -> let p=ind_proof ti ipac tj jpac in let cinfo= get_constructor_info uf ipac.cnode in pinject p cinfo.ci_constr cinfo.ci_nhyps k in ptrans (ptrans pi pij) pj and constr_proof i t ipac= if ipac.args=[] then equal_proof i t else let npac=tail_pac ipac in let (j,arg)=subterms uf t in let targ=term uf arg in let rj=find uf j in let u=find_pac uf rj npac in let p=constr_proof j u npac in ptrans (equal_proof i t) (pcongr p (prefl targ)) and path_proof i=function [] -> prefl (term uf i) | x::q->ptrans (path_proof (snd (fst x)) q) (edge_proof x) and congr_proof i j= let (i1,i2) = subterms uf i and (j1,j2) = subterms uf j in pcongr (equal_proof i1 j1) (equal_proof i2 j2) and ind_proof i ipac j jpac= let p=equal_proof i j and p1=constr_proof i i ipac and p2=constr_proof j j jpac in ptrans (psym p1) (ptrans p p2) in function `Prove (i,j) -> equal_proof i j | `Discr (i,ci,j,cj)-> ind_proof i ci j cj coq-8.4pl2/plugins/cc/cctac.ml0000640000175000001440000003760212121620060015302 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Closure.whd_val infos (Closure.inject t)) let whd_delta env= let infos=Closure.create_clos_infos Closure.betadeltaiota env in (fun t -> Closure.whd_val infos (Closure.inject t)) (* decompose member of equality in an applicative format *) let sf_of env sigma c = family_of_sort (sort_of env sigma c) let rec decompose_term env sigma t= match kind_of_term (whd env t) with App (f,args)-> let tf=decompose_term env sigma f in let targs=Array.map (decompose_term env sigma) args in Array.fold_left (fun s t->Appli (s,t)) tf targs | Prod (_,a,_b) when not (Termops.dependent (mkRel 1) _b) -> let b = Termops.pop _b in let sort_b = sf_of env sigma b in let sort_a = sf_of env sigma a in Appli(Appli(Product (sort_a,sort_b) , decompose_term env sigma a), decompose_term env sigma b) | Construct c-> let (mind,i_ind),i_con = c in let canon_mind = mind_of_kn (canonical_mind mind) in let canon_ind = canon_mind,i_ind in let (oib,_)=Global.lookup_inductive (canon_ind) in let nargs=mis_constructor_nargs_env env (canon_ind,i_con) in Constructor {ci_constr= (canon_ind,i_con); ci_arity=nargs; ci_nhyps=nargs-oib.mind_nparams} | Ind c -> let mind,i_ind = c in let canon_mind = mind_of_kn (canonical_mind mind) in let canon_ind = canon_mind,i_ind in (Symb (mkInd canon_ind)) | Const c -> let canon_const = constant_of_kn (canonical_con c) in (Symb (mkConst canon_const)) | _ ->if closed0 t then (Symb t) else raise Not_found (* decompose equality in members and type *) let atom_of_constr env sigma term = let wh = (whd_delta env term) in let kot = kind_of_term wh in match kot with App (f,args)-> if eq_constr f (Lazy.force _eq) && (Array.length args)=3 then `Eq (args.(0), decompose_term env sigma args.(1), decompose_term env sigma args.(2)) else `Other (decompose_term env sigma term) | _ -> `Other (decompose_term env sigma term) let rec pattern_of_constr env sigma c = match kind_of_term (whd env c) with App (f,args)-> let pf = decompose_term env sigma f in let pargs,lrels = List.split (array_map_to_list (pattern_of_constr env sigma) args) in PApp (pf,List.rev pargs), List.fold_left Intset.union Intset.empty lrels | Prod (_,a,_b) when not (Termops.dependent (mkRel 1) _b) -> let b = Termops.pop _b in let pa,sa = pattern_of_constr env sigma a in let pb,sb = pattern_of_constr env sigma b in let sort_b = sf_of env sigma b in let sort_a = sf_of env sigma a in PApp(Product (sort_a,sort_b), [pa;pb]),(Intset.union sa sb) | Rel i -> PVar i,Intset.singleton i | _ -> let pf = decompose_term env sigma c in PApp (pf,[]),Intset.empty let non_trivial = function PVar _ -> false | _ -> true let patterns_of_constr env sigma nrels term= let f,args= try destApp (whd_delta env term) with e when Errors.noncritical e -> raise Not_found in if eq_constr f (Lazy.force _eq) && (Array.length args)=3 then let patt1,rels1 = pattern_of_constr env sigma args.(1) and patt2,rels2 = pattern_of_constr env sigma args.(2) in let valid1 = if Intset.cardinal rels1 <> nrels then Creates_variables else if non_trivial patt1 then Normal else Trivial args.(0) and valid2 = if Intset.cardinal rels2 <> nrels then Creates_variables else if non_trivial patt2 then Normal else Trivial args.(0) in if valid1 <> Creates_variables || valid2 <> Creates_variables then nrels,valid1,patt1,valid2,patt2 else raise Not_found else raise Not_found let rec quantified_atom_of_constr env sigma nrels term = match kind_of_term (whd_delta env term) with Prod (id,atom,ff) -> if eq_constr ff (Lazy.force _False) then let patts=patterns_of_constr env sigma nrels atom in `Nrule patts else quantified_atom_of_constr (Environ.push_rel (id,None,atom) env) sigma (succ nrels) ff | _ -> let patts=patterns_of_constr env sigma nrels term in `Rule patts let litteral_of_constr env sigma term= match kind_of_term (whd_delta env term) with | Prod (id,atom,ff) -> if eq_constr ff (Lazy.force _False) then match (atom_of_constr env sigma atom) with `Eq(t,a,b) -> `Neq(t,a,b) | `Other(p) -> `Nother(p) else begin try quantified_atom_of_constr (Environ.push_rel (id,None,atom) env) sigma 1 ff with Not_found -> `Other (decompose_term env sigma term) end | _ -> atom_of_constr env sigma term (* store all equalities from the context *) let rec make_prb gls depth additionnal_terms = let env=pf_env gls in let sigma=sig_sig gls in let state = empty depth gls in let pos_hyps = ref [] in let neg_hyps =ref [] in List.iter (fun c -> let t = decompose_term env sigma c in ignore (add_term state t)) additionnal_terms; List.iter (fun (id,_,e) -> begin let cid=mkVar id in match litteral_of_constr env sigma e with `Eq (t,a,b) -> add_equality state cid a b | `Neq (t,a,b) -> add_disequality state (Hyp cid) a b | `Other ph -> List.iter (fun (cidn,nh) -> add_disequality state (HeqnH (cid,cidn)) ph nh) !neg_hyps; pos_hyps:=(cid,ph):: !pos_hyps | `Nother nh -> List.iter (fun (cidp,ph) -> add_disequality state (HeqnH (cidp,cid)) ph nh) !pos_hyps; neg_hyps:=(cid,nh):: !neg_hyps | `Rule patts -> add_quant state id true patts | `Nrule patts -> add_quant state id false patts end) (Environ.named_context_of_val (Goal.V82.hyps gls.sigma gls.it)); begin match atom_of_constr env sigma (pf_concl gls) with `Eq (t,a,b) -> add_disequality state Goal a b | `Other g -> List.iter (fun (idp,ph) -> add_disequality state (HeqG idp) ph g) !pos_hyps end; state (* indhyps builds the array of arrays of constructor hyps for (ind largs) *) let build_projection intype outtype (cstr:constructor) special default gls= let env=pf_env gls in let (h,argv) = try destApp intype with Invalid_argument _ -> (intype,[||]) in let ind=destInd h in let types=Inductiveops.arities_of_constructors env ind in let lp=Array.length types in let ci=pred (snd cstr) in let branch i= let ti=Term.prod_appvect types.(i) argv in let rc=fst (decompose_prod_assum ti) in let head= if i=ci then special else default in it_mkLambda_or_LetIn head rc in let branches=Array.init lp branch in let casee=mkRel 1 in let pred=mkLambda(Anonymous,intype,outtype) in let case_info=make_case_info (pf_env gls) ind RegularStyle in let body= mkCase(case_info, pred, casee, branches) in let id=pf_get_new_id (id_of_string "t") gls in mkLambda(Name id,intype,body) (* generate an adhoc tactic following the proof tree *) let _M =mkMeta let rec proof_tac p gls = match p.p_rule with Ax c -> exact_check c gls | SymAx c -> let l=constr_of_term p.p_lhs and r=constr_of_term p.p_rhs in let typ = Termops.refresh_universes (pf_type_of gls l) in exact_check (mkApp(Lazy.force _sym_eq,[|typ;r;l;c|])) gls | Refl t -> let lr = constr_of_term t in let typ = Termops.refresh_universes (pf_type_of gls lr) in exact_check (mkApp(Lazy.force _refl_equal,[|typ;constr_of_term t|])) gls | Trans (p1,p2)-> let t1 = constr_of_term p1.p_lhs and t2 = constr_of_term p1.p_rhs and t3 = constr_of_term p2.p_rhs in let typ = Termops.refresh_universes (pf_type_of gls t2) in let prf = mkApp(Lazy.force _trans_eq,[|typ;t1;t2;t3;_M 1;_M 2|]) in tclTHENS (refine prf) [(proof_tac p1);(proof_tac p2)] gls | Congr (p1,p2)-> let tf1=constr_of_term p1.p_lhs and tx1=constr_of_term p2.p_lhs and tf2=constr_of_term p1.p_rhs and tx2=constr_of_term p2.p_rhs in let typf = Termops.refresh_universes (pf_type_of gls tf1) in let typx = Termops.refresh_universes (pf_type_of gls tx1) in let typfx = Termops.refresh_universes (pf_type_of gls (mkApp (tf1,[|tx1|]))) in let id = pf_get_new_id (id_of_string "f") gls in let appx1 = mkLambda(Name id,typf,mkApp(mkRel 1,[|tx1|])) in let lemma1 = mkApp(Lazy.force _f_equal, [|typf;typfx;appx1;tf1;tf2;_M 1|]) in let lemma2= mkApp(Lazy.force _f_equal, [|typx;typfx;tf2;tx1;tx2;_M 1|]) in let prf = mkApp(Lazy.force _trans_eq, [|typfx; mkApp(tf1,[|tx1|]); mkApp(tf2,[|tx1|]); mkApp(tf2,[|tx2|]);_M 2;_M 3|]) in tclTHENS (refine prf) [tclTHEN (refine lemma1) (proof_tac p1); tclFIRST [tclTHEN (refine lemma2) (proof_tac p2); reflexivity; fun gls -> errorlabstrm "Congruence" (Pp.str "I don't know how to handle dependent equality")]] gls | Inject (prf,cstr,nargs,argind) -> let ti=constr_of_term prf.p_lhs in let tj=constr_of_term prf.p_rhs in let default=constr_of_term p.p_lhs in let intype = Termops.refresh_universes (pf_type_of gls ti) in let outtype = Termops.refresh_universes (pf_type_of gls default) in let special=mkRel (1+nargs-argind) in let proj=build_projection intype outtype cstr special default gls in let injt= mkApp (Lazy.force _f_equal,[|intype;outtype;proj;ti;tj;_M 1|]) in tclTHEN (refine injt) (proof_tac prf) gls let refute_tac c t1 t2 p gls = let tt1=constr_of_term t1 and tt2=constr_of_term t2 in let intype = Termops.refresh_universes (pf_type_of gls tt1) in let neweq= mkApp(Lazy.force _eq, [|intype;tt1;tt2|]) in let hid=pf_get_new_id (id_of_string "Heq") gls in let false_t=mkApp (c,[|mkVar hid|]) in tclTHENS (assert_tac (Name hid) neweq) [proof_tac p; simplest_elim false_t] gls let convert_to_goal_tac c t1 t2 p gls = let tt1=constr_of_term t1 and tt2=constr_of_term t2 in let sort = Termops.refresh_universes (pf_type_of gls tt2) in let neweq=mkApp(Lazy.force _eq,[|sort;tt1;tt2|]) in let e=pf_get_new_id (id_of_string "e") gls in let x=pf_get_new_id (id_of_string "X") gls in let identity=mkLambda (Name x,sort,mkRel 1) in let endt=mkApp (Lazy.force _eq_rect, [|sort;tt1;identity;c;tt2;mkVar e|]) in tclTHENS (assert_tac (Name e) neweq) [proof_tac p;exact_check endt] gls let convert_to_hyp_tac c1 t1 c2 t2 p gls = let tt2=constr_of_term t2 in let h=pf_get_new_id (id_of_string "H") gls in let false_t=mkApp (c2,[|mkVar h|]) in tclTHENS (assert_tac (Name h) tt2) [convert_to_goal_tac c1 t1 t2 p; simplest_elim false_t] gls let discriminate_tac cstr p gls = let t1=constr_of_term p.p_lhs and t2=constr_of_term p.p_rhs in let intype = Termops.refresh_universes (pf_type_of gls t1) in let concl=pf_concl gls in let outsort = mkType (Termops.new_univ ()) in let xid=pf_get_new_id (id_of_string "X") gls in let tid=pf_get_new_id (id_of_string "t") gls in let identity=mkLambda(Name xid,outsort,mkLambda(Name tid,mkRel 1,mkRel 1)) in let trivial=pf_type_of gls identity in let outtype = mkType (Termops.new_univ ()) in let pred=mkLambda(Name xid,outtype,mkRel 1) in let hid=pf_get_new_id (id_of_string "Heq") gls in let proj=build_projection intype outtype cstr trivial concl gls in let injt=mkApp (Lazy.force _f_equal, [|intype;outtype;proj;t1;t2;mkVar hid|]) in let endt=mkApp (Lazy.force _eq_rect, [|outtype;trivial;pred;identity;concl;injt|]) in let neweq=mkApp(Lazy.force _eq,[|intype;t1;t2|]) in tclTHENS (assert_tac (Name hid) neweq) [proof_tac p;exact_check endt] gls (* wrap everything *) let build_term_to_complete uf meta pac = let cinfo = get_constructor_info uf pac.cnode in let real_args = List.map (fun i -> constr_of_term (term uf i)) pac.args in let dummy_args = List.rev (list_tabulate meta pac.arity) in let all_args = List.rev_append real_args dummy_args in applistc (mkConstruct cinfo.ci_constr) all_args let cc_tactic depth additionnal_terms gls= Coqlib.check_required_library ["Coq";"Init";"Logic"]; let _ = debug Pp.msgnl (Pp.str "Reading subgoal ...") in let state = make_prb gls depth additionnal_terms in let _ = debug Pp.msgnl (Pp.str "Problem built, solving ...") in let sol = execute true state in let _ = debug Pp.msgnl (Pp.str "Computation completed.") in let uf=forest state in match sol with None -> tclFAIL 0 (str "congruence failed") gls | Some reason -> debug Pp.msgnl (Pp.str "Goal solved, generating proof ..."); match reason with Discrimination (i,ipac,j,jpac) -> let p=build_proof uf (`Discr (i,ipac,j,jpac)) in let cstr=(get_constructor_info uf ipac.cnode).ci_constr in discriminate_tac cstr p gls | Incomplete -> let metacnt = ref 0 in let newmeta _ = incr metacnt; _M !metacnt in let terms_to_complete = List.map (build_term_to_complete uf newmeta) (epsilons uf) in Pp.msgnl (Pp.str "Goal is solvable by congruence but \ some arguments are missing."); Pp.msgnl (Pp.str " Try " ++ hov 8 begin str "\"congruence with (" ++ prlist_with_sep (fun () -> str ")" ++ pr_spc () ++ str "(") (Termops.print_constr_env (pf_env gls)) terms_to_complete ++ str ")\"," end); Pp.msgnl (Pp.str " replacing metavariables by arbitrary terms."); tclFAIL 0 (str "Incomplete") gls | Contradiction dis -> let p=build_proof uf (`Prove (dis.lhs,dis.rhs)) in let ta=term uf dis.lhs and tb=term uf dis.rhs in match dis.rule with Goal -> proof_tac p gls | Hyp id -> refute_tac id ta tb p gls | HeqG id -> convert_to_goal_tac id ta tb p gls | HeqnH (ida,idb) -> convert_to_hyp_tac ida ta idb tb p gls let cc_fail gls = errorlabstrm "Congruence" (Pp.str "congruence failed.") let congruence_tac depth l = tclORELSE (tclTHEN (tclREPEAT introf) (cc_tactic depth l)) cc_fail (* Beware: reflexivity = constructor 1 = apply refl_equal might be slow now, let's rather do something equivalent to a "simple apply refl_equal" *) let simple_reflexivity () = apply (Lazy.force _refl_equal) (* The [f_equal] tactic. It mimics the use of lemmas [f_equal], [f_equal2], etc. This isn't particularly related with congruence, apart from the fact that congruence is called internally. *) let f_equal gl = let cut_eq c1 c2 = let ty = Termops.refresh_universes (pf_type_of gl c1) in tclTHENTRY (Tactics.cut (mkApp (Lazy.force _eq, [|ty; c1; c2|]))) (simple_reflexivity ()) in try match kind_of_term (pf_concl gl) with | App (r,[|_;t;t'|]) when eq_constr r (Lazy.force _eq) -> begin match kind_of_term t, kind_of_term t' with | App (f,v), App (f',v') when Array.length v = Array.length v' -> let rec cuts i = if i < 0 then tclTRY (congruence_tac 1000 []) else tclTHENFIRST (cut_eq v.(i) v'.(i)) (cuts (i-1)) in cuts (Array.length v - 1) gl | _ -> tclIDTAC gl end | _ -> tclIDTAC gl with Type_errors.TypeError _ -> tclIDTAC gl coq-8.4pl2/plugins/cc/ccalgo.mli0000640000175000001440000001177412010532755015642 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* term -> bool type patt_kind = Normal | Trivial of types | Creates_variables type ccpattern = PApp of term * ccpattern list | PVar of int type pa_constructor = { cnode : int; arity : int; args : int list} module PacMap : Map.S with type key = pa_constructor type forest type state type rule= Congruence | Axiom of constr * bool | Injection of int * pa_constructor * int * pa_constructor * int type from= Goal | Hyp of constr | HeqG of constr | HeqnH of constr*constr type 'a eq = {lhs:int;rhs:int;rule:'a} type equality = rule eq type disequality = from eq type explanation = Discrimination of (int*pa_constructor*int*pa_constructor) | Contradiction of disequality | Incomplete module Constrhash : Hashtbl.S with type key = constr module Termhash : Hashtbl.S with type key = term val constr_of_term : term -> constr val debug : (Pp.std_ppcmds -> unit) -> Pp.std_ppcmds -> unit val forest : state -> forest val axioms : forest -> (term * term) Constrhash.t val epsilons : forest -> pa_constructor list val empty : int -> Proof_type.goal Tacmach.sigma -> state val add_term : state -> term -> int val add_equality : state -> constr -> term -> term -> unit val add_disequality : state -> from -> term -> term -> unit val add_quant : state -> identifier -> bool -> int * patt_kind * ccpattern * patt_kind * ccpattern -> unit val tail_pac : pa_constructor -> pa_constructor val find : forest -> int -> int val find_pac : forest -> int -> pa_constructor -> int val term : forest -> int -> term val get_constructor_info : forest -> int -> cinfo val subterms : forest -> int -> int * int val join_path : forest -> int -> int -> ((int * int) * equality) list * ((int * int) * equality) list type quant_eq= {qe_hyp_id: identifier; qe_pol: bool; qe_nvars:int; qe_lhs: ccpattern; qe_lhs_valid:patt_kind; qe_rhs: ccpattern; qe_rhs_valid:patt_kind} type pa_fun= {fsym:int; fnargs:int} type matching_problem module PafMap: Map.S with type key = pa_fun val make_fun_table : state -> Intset.t PafMap.t val do_match : state -> (quant_eq * int array) list ref -> matching_problem Stack.t -> unit val init_pb_stack : state -> matching_problem Stack.t val paf_of_patt : int Termhash.t -> ccpattern -> pa_fun val find_instances : state -> (quant_eq * int array) list val execute : bool -> state -> explanation option (*type pa_constructor module PacMap:Map.S with type key=pa_constructor type term = Symb of Term.constr | Eps | Appli of term * term | Constructor of Names.constructor*int*int type rule = Congruence | Axiom of Names.identifier | Injection of int*int*int*int type equality = {lhs : int; rhs : int; rule : rule} module ST : sig type t val empty : unit -> t val enter : int -> int * int -> t -> unit val query : int * int -> t -> int val delete : int -> t -> unit val delete_list : int list -> t -> unit end module UF : sig type t exception Discriminable of int * int * int * int * t val empty : unit -> t val find : t -> int -> int val size : t -> int -> int val get_constructor : t -> int -> Names.constructor val pac_arity : t -> int -> int * int -> int val mem_node_pac : t -> int -> int * int -> int val add_pacs : t -> int -> pa_constructor PacMap.t -> int list * equality list val term : t -> int -> term val subterms : t -> int -> int * int val add : t -> term -> int val union : t -> int -> int -> equality -> int list * equality list val join_path : t -> int -> int -> ((int*int)*equality) list* ((int*int)*equality) list end val combine_rec : UF.t -> int list -> equality list val process_rec : UF.t -> equality list -> int list val cc : UF.t -> unit val make_uf : (Names.identifier * (term * term)) list -> UF.t val add_one_diseq : UF.t -> (term * term) -> int * int val add_disaxioms : UF.t -> (Names.identifier * (term * term)) list -> (Names.identifier * (int * int)) list val check_equal : UF.t -> int * int -> bool val find_contradiction : UF.t -> (Names.identifier * (int * int)) list -> (Names.identifier * (int * int)) *) coq-8.4pl2/plugins/cc/g_congruence.ml40000640000175000001440000000172012010532755016751 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ congruence_tac 1000 [] ] |[ "congruence" integer(n) ] -> [ congruence_tac n [] ] |[ "congruence" "with" ne_constr_list(l) ] -> [ congruence_tac 1000 l ] |[ "congruence" integer(n) "with" ne_constr_list(l) ] -> [ congruence_tac n l ] END TACTIC EXTEND f_equal [ "f_equal" ] -> [ f_equal ] END coq-8.4pl2/plugins/cc/ccproof.mli0000640000175000001440000000160612010532755016036 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ `Discr of int * pa_constructor * int * pa_constructor | `Prove of int * int ] -> proof coq-8.4pl2/plugins/cc/README0000640000175000001440000000103611160567762014570 0ustar notinusers cctac: congruence-closure for coq author: Pierre Corbineau, Stage de DEA au LSV, ENS Cachan Thse au LRI, Universit Paris Sud XI Files : - ccalgo.ml : congruence closure algorithm - ccproof.ml : proof generation code - cctac.ml4 : the tactic itself - CCSolve.v : a small Ltac tactic based on congruence Known Bugs : the congruence tactic can fail due to type dependencies. Related documents: Peter J. Downey, Ravi Sethi, and Robert E. Tarjan. Variations on the common subexpression problem. JACM, 27(4):758-771, October 1980. coq-8.4pl2/plugins/cc/cctac.mli0000640000175000001440000000135012010532755015454 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Proof_type.tactic val cc_tactic : int -> constr list -> tactic val cc_fail : tactic val congruence_tac : int -> constr list -> tactic val f_equal : tactic coq-8.4pl2/plugins/syntax/0000750000175000001440000000000012127276540014643 5ustar notinuserscoq-8.4pl2/plugins/syntax/nat_syntax.ml0000640000175000001440000000451312010532755017363 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* > *) let threshold = of_int 5000 let nat_of_int dloc n = if is_pos_or_zero n then begin if less_than threshold n then Flags.if_warn msg_warning (strbrk "Stack overflow or segmentation fault happens when " ++ strbrk "working with large numbers in nat (observed threshold " ++ strbrk "may vary from 5000 to 70000 depending on your system " ++ strbrk "limits and on the command executed)."); let ref_O = GRef (dloc, glob_O) in let ref_S = GRef (dloc, glob_S) in let rec mk_nat acc n = if n <> zero then mk_nat (GApp (dloc,ref_S, [acc])) (sub_1 n) else acc in mk_nat ref_O n end else user_err_loc (dloc, "nat_of_int", str "Cannot interpret a negative number as a number of type nat") (************************************************************************) (* Printing via scopes *) exception Non_closed_number let rec int_of_nat = function | GApp (_,GRef (_,s),[a]) when s = glob_S -> add_1 (int_of_nat a) | GRef (_,z) when z = glob_O -> zero | _ -> raise Non_closed_number let uninterp_nat p = try Some (int_of_nat p) with Non_closed_number -> None (************************************************************************) (* Declare the primitive parsers and printers *) let _ = Notation.declare_numeral_interpreter "nat_scope" (nat_path,["Coq";"Init";"Datatypes"]) nat_of_int ([GRef (dummy_loc,glob_S); GRef (dummy_loc,glob_O)], uninterp_nat, true) coq-8.4pl2/plugins/syntax/r_syntax_plugin.mllib0000640000175000001440000000003511163467536021116 0ustar notinusersR_syntax R_syntax_plugin_mod coq-8.4pl2/plugins/syntax/string_syntax.ml0000640000175000001440000000407111504715034020106 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (match uninterp_ascii a with | Some c -> Buffer.add_char b (Char.chr c); aux s | _ -> raise Non_closed_string) | GRef (_,z) when z = force glob_EmptyString -> Some (Buffer.contents b) | _ -> raise Non_closed_string in aux r with Non_closed_string -> None let _ = Notation.declare_string_interpreter "string_scope" (string_path,["Coq";"Strings";"String"]) interp_string ([GRef (dummy_loc,static_glob_String); GRef (dummy_loc,static_glob_EmptyString)], uninterp_string, true) coq-8.4pl2/plugins/syntax/nat_syntax_plugin.mllib0000640000175000001440000000004111163467536021434 0ustar notinusersNat_syntax Nat_syntax_plugin_mod coq-8.4pl2/plugins/syntax/ascii_syntax.ml0000640000175000001440000000520311504715034017666 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | GRef (_,k)::l when k = glob_true -> 1+2*(uninterp_bool_list (n-1) l) | GRef (_,k)::l when k = glob_false -> 2*(uninterp_bool_list (n-1) l) | _ -> raise Non_closed_ascii in try let rec aux = function | GApp (_,GRef (_,k),l) when k = force glob_Ascii -> uninterp_bool_list 8 l | _ -> raise Non_closed_ascii in Some (aux r) with Non_closed_ascii -> None let make_ascii_string n = if n>=32 && n<=126 then String.make 1 (char_of_int n) else Printf.sprintf "%03d" n let uninterp_ascii_string r = Option.map make_ascii_string (uninterp_ascii r) let _ = Notation.declare_string_interpreter "char_scope" (ascii_path,ascii_module) interp_ascii_string ([GRef (dummy_loc,static_glob_Ascii)], uninterp_ascii_string, true) coq-8.4pl2/plugins/syntax/z_syntax.ml0000640000175000001440000001450312010532755017052 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* GApp (dloc, ref_xO,[pos_of q]) | (q,true) when q <> zero -> GApp (dloc,ref_xI,[pos_of q]) | (q,true) -> ref_xH in pos_of x let error_non_positive dloc = user_err_loc (dloc, "interp_positive", str "Only strictly positive numbers in type \"positive\".") let interp_positive dloc n = if is_strictly_pos n then pos_of_bignat dloc n else error_non_positive dloc (**********************************************************************) (* Printing positive via scopes *) (**********************************************************************) let rec bignat_of_pos = function | GApp (_, GRef (_,b),[a]) when b = glob_xO -> mult_2(bignat_of_pos a) | GApp (_, GRef (_,b),[a]) when b = glob_xI -> add_1(mult_2(bignat_of_pos a)) | GRef (_, a) when a = glob_xH -> Bigint.one | _ -> raise Non_closed_number let uninterp_positive p = try Some (bignat_of_pos p) with Non_closed_number -> None (************************************************************************) (* Declaring interpreters and uninterpreters for positive *) (************************************************************************) let _ = Notation.declare_numeral_interpreter "positive_scope" (positive_path,binnums) interp_positive ([GRef (dummy_loc, glob_xI); GRef (dummy_loc, glob_xO); GRef (dummy_loc, glob_xH)], uninterp_positive, true) (**********************************************************************) (* Parsing N via scopes *) (**********************************************************************) let n_kn = make_kn (make_dir binnums) (id_of_string "N") let glob_n = IndRef (n_kn,0) let path_of_N0 = ((n_kn,0),1) let path_of_Npos = ((n_kn,0),2) let glob_N0 = ConstructRef path_of_N0 let glob_Npos = ConstructRef path_of_Npos let n_path = make_path binnums "N" let n_of_binnat dloc pos_or_neg n = if n <> zero then GApp(dloc, GRef (dloc,glob_Npos), [pos_of_bignat dloc n]) else GRef (dloc, glob_N0) let error_negative dloc = user_err_loc (dloc, "interp_N", str "No negative numbers in type \"N\".") let n_of_int dloc n = if is_pos_or_zero n then n_of_binnat dloc true n else error_negative dloc (**********************************************************************) (* Printing N via scopes *) (**********************************************************************) let bignat_of_n = function | GApp (_, GRef (_,b),[a]) when b = glob_Npos -> bignat_of_pos a | GRef (_, a) when a = glob_N0 -> Bigint.zero | _ -> raise Non_closed_number let uninterp_n p = try Some (bignat_of_n p) with Non_closed_number -> None (************************************************************************) (* Declaring interpreters and uninterpreters for N *) let _ = Notation.declare_numeral_interpreter "N_scope" (n_path,binnums) n_of_int ([GRef (dummy_loc, glob_N0); GRef (dummy_loc, glob_Npos)], uninterp_n, true) (**********************************************************************) (* Parsing Z via scopes *) (**********************************************************************) let z_path = make_path binnums "Z" let z_kn = make_kn (make_dir binnums) (id_of_string "Z") let glob_z = IndRef (z_kn,0) let path_of_ZERO = ((z_kn,0),1) let path_of_POS = ((z_kn,0),2) let path_of_NEG = ((z_kn,0),3) let glob_ZERO = ConstructRef path_of_ZERO let glob_POS = ConstructRef path_of_POS let glob_NEG = ConstructRef path_of_NEG let z_of_int dloc n = if n <> zero then let sgn, n = if is_pos_or_zero n then glob_POS, n else glob_NEG, Bigint.neg n in GApp(dloc, GRef (dloc,sgn), [pos_of_bignat dloc n]) else GRef (dloc, glob_ZERO) (**********************************************************************) (* Printing Z via scopes *) (**********************************************************************) let bigint_of_z = function | GApp (_, GRef (_,b),[a]) when b = glob_POS -> bignat_of_pos a | GApp (_, GRef (_,b),[a]) when b = glob_NEG -> Bigint.neg (bignat_of_pos a) | GRef (_, a) when a = glob_ZERO -> Bigint.zero | _ -> raise Non_closed_number let uninterp_z p = try Some (bigint_of_z p) with Non_closed_number -> None (************************************************************************) (* Declaring interpreters and uninterpreters for Z *) let _ = Notation.declare_numeral_interpreter "Z_scope" (z_path,binnums) z_of_int ([GRef (dummy_loc, glob_ZERO); GRef (dummy_loc, glob_POS); GRef (dummy_loc, glob_NEG)], uninterp_z, true) coq-8.4pl2/plugins/syntax/numbers_syntax.ml0000640000175000001440000002155112010532755020255 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* cur | (GRef (_,b))::l when b = int31_0 -> args_parsing l (mult_2 cur) | (GRef (_,b))::l when b = int31_1 -> args_parsing l (add_1 (mult_2 cur)) | _ -> raise Non_closed in function | GApp (_, GRef (_, c), args) when c=int31_construct -> args_parsing args zero | _ -> raise Non_closed let uninterp_int31 i = try Some (bigint_of_int31 i) with Non_closed -> None (* Actually declares the interpreter for int31 *) let _ = Notation.declare_numeral_interpreter int31_scope (int31_path, int31_module) interp_int31 ([GRef (Util.dummy_loc, int31_construct)], uninterp_int31, true) (*** Parsing for bigN in digital notation ***) (* the base for bigN (in Coq) that is 2^31 in our case *) let base = pow two 31 (* base of the bigN of height N : (2^31)^(2^n) *) let rank n = let rec rk n pow2 = if n <= 0 then pow2 else rk (n-1) (mult pow2 pow2) in rk n base (* splits a number bi at height n, that is the rest needs 2^n int31 to be stored it is expected to be used only when the quotient would also need 2^n int31 to be stored *) let split_at n bi = euclid bi (rank (n-1)) (* search the height of the Coq bigint needed to represent the integer bi *) let height bi = let rec hght n pow2 = if less_than bi pow2 then n else hght (n+1) (mult pow2 pow2) in hght 0 base (* n must be a non-negative integer (from bigint.ml) *) let word_of_pos_bigint dloc hght n = let ref_W0 = GRef (dloc, zn2z_W0) in let ref_WW = GRef (dloc, zn2z_WW) in let rec decomp hgt n = if hgt <= 0 then int31_of_pos_bigint dloc n else if equal n zero then GApp (dloc, ref_W0, [GHole (dloc, Evd.InternalHole)]) else let (h,l) = split_at hgt n in GApp (dloc, ref_WW, [GHole (dloc, Evd.InternalHole); decomp (hgt-1) h; decomp (hgt-1) l]) in decomp hght n let bigN_of_pos_bigint dloc n = let h = height n in let ref_constructor = GRef (dloc, bigN_constructor h) in let word = word_of_pos_bigint dloc h n in let args = if h < n_inlined then [word] else [Nat_syntax.nat_of_int dloc (of_int (h-n_inlined));word] in GApp (dloc, ref_constructor, args) let bigN_error_negative dloc = Util.user_err_loc (dloc, "interp_bigN", Pp.str "bigN are only non-negative numbers.") let interp_bigN dloc n = if is_pos_or_zero n then bigN_of_pos_bigint dloc n else bigN_error_negative dloc (* Pretty prints a bigN *) let bigint_of_word = let rec get_height rc = match rc with | GApp (_,GRef(_,c), [_;lft;rght]) when c = zn2z_WW -> 1+max (get_height lft) (get_height rght) | _ -> 0 in let rec transform hght rc = match rc with | GApp (_,GRef(_,c),_) when c = zn2z_W0-> zero | GApp (_,GRef(_,c), [_;lft;rght]) when c=zn2z_WW-> let new_hght = hght-1 in add (mult (rank new_hght) (transform new_hght lft)) (transform new_hght rght) | _ -> bigint_of_int31 rc in fun rc -> let hght = get_height rc in transform hght rc let bigint_of_bigN rc = match rc with | GApp (_,_,[one_arg]) -> bigint_of_word one_arg | GApp (_,_,[_;second_arg]) -> bigint_of_word second_arg | _ -> raise Non_closed let uninterp_bigN rc = try Some (bigint_of_bigN rc) with Non_closed -> None (* declare the list of constructors of bigN used in the declaration of the numeral interpreter *) let bigN_list_of_constructors = let rec build i = if i < n_inlined+1 then GRef (Util.dummy_loc, bigN_constructor i)::(build (i+1)) else [] in build 0 (* Actually declares the interpreter for bigN *) let _ = Notation.declare_numeral_interpreter bigN_scope (bigN_path, bigN_module) interp_bigN (bigN_list_of_constructors, uninterp_bigN, true) (*** Parsing for bigZ in digital notation ***) let interp_bigZ dloc n = let ref_pos = GRef (dloc, bigZ_pos) in let ref_neg = GRef (dloc, bigZ_neg) in if is_pos_or_zero n then GApp (dloc, ref_pos, [bigN_of_pos_bigint dloc n]) else GApp (dloc, ref_neg, [bigN_of_pos_bigint dloc (neg n)]) (* pretty printing functions for bigZ *) let bigint_of_bigZ = function | GApp (_, GRef(_,c), [one_arg]) when c = bigZ_pos -> bigint_of_bigN one_arg | GApp (_, GRef(_,c), [one_arg]) when c = bigZ_neg -> let opp_val = bigint_of_bigN one_arg in if equal opp_val zero then raise Non_closed else neg opp_val | _ -> raise Non_closed let uninterp_bigZ rc = try Some (bigint_of_bigZ rc) with Non_closed -> None (* Actually declares the interpreter for bigZ *) let _ = Notation.declare_numeral_interpreter bigZ_scope (bigZ_path, bigZ_module) interp_bigZ ([GRef (Util.dummy_loc, bigZ_pos); GRef (Util.dummy_loc, bigZ_neg)], uninterp_bigZ, true) (*** Parsing for bigQ in digital notation ***) let interp_bigQ dloc n = let ref_z = GRef (dloc, bigQ_z) in GApp (dloc, ref_z, [interp_bigZ dloc n]) let uninterp_bigQ rc = try match rc with | GApp (_, GRef(_,c), [one_arg]) when c = bigQ_z -> Some (bigint_of_bigZ one_arg) | _ -> None (* we don't pretty-print yet fractions *) with Non_closed -> None (* Actually declares the interpreter for bigQ *) let _ = Notation.declare_numeral_interpreter bigQ_scope (bigQ_path, bigQ_module) interp_bigQ ([GRef (Util.dummy_loc, bigQ_z)], uninterp_bigQ, true) coq-8.4pl2/plugins/syntax/string_syntax_plugin.mllib0000640000175000001440000000004711163467536022166 0ustar notinusersString_syntax String_syntax_plugin_mod coq-8.4pl2/plugins/syntax/ascii_syntax_plugin.mllib0000640000175000001440000000004511163467536021746 0ustar notinusersAscii_syntax Ascii_syntax_plugin_mod coq-8.4pl2/plugins/syntax/z_syntax_plugin.mllib0000640000175000001440000000003511163467536021126 0ustar notinusersZ_syntax Z_syntax_plugin_mod coq-8.4pl2/plugins/syntax/numbers_syntax_plugin.mllib0000640000175000001440000000005111163467536022326 0ustar notinusersNumbers_syntax Numbers_syntax_plugin_mod coq-8.4pl2/plugins/syntax/r_syntax.ml0000640000175000001440000001013012010532755017032 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* zero then r_of_pos n else GRef(dloc,glob_R0) let r_of_int dloc z = if is_strictly_neg z then GApp (dloc, GRef(dloc,glob_Ropp), [r_of_posint dloc (neg z)]) else r_of_posint dloc z (**********************************************************************) (* Printing R via scopes *) (**********************************************************************) let bignat_of_r = (* for numbers > 1 *) let rec bignat_of_pos = function (* 1+1 *) | GApp (_,GRef (_,p), [GRef (_,o1); GRef (_,o2)]) when p = glob_Rplus & o1 = glob_R1 & o2 = glob_R1 -> two (* 1+(1+1) *) | GApp (_,GRef (_,p1), [GRef (_,o1); GApp(_,GRef (_,p2),[GRef(_,o2);GRef(_,o3)])]) when p1 = glob_Rplus & p2 = glob_Rplus & o1 = glob_R1 & o2 = glob_R1 & o3 = glob_R1 -> three (* (1+1)*b *) | GApp (_,GRef (_,p), [a; b]) when p = glob_Rmult -> if bignat_of_pos a <> two then raise Non_closed_number; mult_2 (bignat_of_pos b) (* 1+(1+1)*b *) | GApp (_,GRef (_,p1), [GRef (_,o); GApp (_,GRef (_,p2),[a;b])]) when p1 = glob_Rplus & p2 = glob_Rmult & o = glob_R1 -> if bignat_of_pos a <> two then raise Non_closed_number; add_1 (mult_2 (bignat_of_pos b)) | _ -> raise Non_closed_number in let bignat_of_r = function | GRef (_,a) when a = glob_R0 -> zero | GRef (_,a) when a = glob_R1 -> one | r -> bignat_of_pos r in bignat_of_r let bigint_of_r = function | GApp (_,GRef (_,o), [a]) when o = glob_Ropp -> let n = bignat_of_r a in if n = zero then raise Non_closed_number; neg n | a -> bignat_of_r a let uninterp_r p = try Some (bigint_of_r p) with Non_closed_number -> None let _ = Notation.declare_numeral_interpreter "R_scope" (r_path,["Coq";"Reals";"Rdefinitions"]) r_of_int ([GRef(dummy_loc,glob_Ropp);GRef(dummy_loc,glob_R0); GRef(dummy_loc,glob_Rplus);GRef(dummy_loc,glob_Rmult); GRef(dummy_loc,glob_R1)], uninterp_r, false) coq-8.4pl2/plugins/omega/0000750000175000001440000000000012127276540014405 5ustar notinuserscoq-8.4pl2/plugins/omega/omega.ml0000640000175000001440000006355212010532755016035 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bigint -> bool val add : bigint -> bigint -> bigint val sub : bigint -> bigint -> bigint val mult : bigint -> bigint -> bigint val euclid : bigint -> bigint -> bigint * bigint val neg : bigint -> bigint val zero : bigint val one : bigint val to_string : bigint -> string end let debug = ref false module MakeOmegaSolver (Int:INT) = struct type bigint = Int.bigint let (?) x y = Int.less_than y x let (>=?) x y = Int.less_than y x or x = y let (=?) = (=) let (+) = Int.add let (-) = Int.sub let ( * ) = Int.mult let (/) x y = fst (Int.euclid x y) let (mod) x y = snd (Int.euclid x y) let zero = Int.zero let one = Int.one let two = one + one let negone = Int.neg one let abs x = if Int.less_than x zero then Int.neg x else x let string_of_bigint = Int.to_string let neg = Int.neg (* To ensure that polymorphic (<) is not used mistakenly on big integers *) (* Warning: do not use (=) either on big int *) let (<) = ((<) : int -> int -> bool) let (>) = ((>) : int -> int -> bool) let (<=) = ((<=) : int -> int -> bool) let (>=) = ((>=) : int -> int -> bool) let pp i = print_int i; print_newline (); flush stdout let push v l = l := v :: !l let rec pgcd x y = if y =? zero then x else pgcd y (x mod y) let pgcd_l = function | [] -> failwith "pgcd_l" | x :: l -> List.fold_left pgcd x l let floor_div a b = match a >=? zero , b >? zero with | true,true -> a / b | false,false -> a / b | true, false -> (a-one) / b - one | false,true -> (a+one) / b - one type coeff = {c: bigint ; v: int} type linear = coeff list type eqn_kind = EQUA | INEQ | DISE type afine = { (* a number uniquely identifying the equation *) id: int ; (* a boolean true for an eq, false for an ineq (Sigma a_i x_i >= 0) *) kind: eqn_kind; (* the variables and their coefficient *) body: coeff list; (* a constant *) constant: bigint } type state_action = { st_new_eq : afine; st_def : afine; st_orig : afine; st_coef : bigint; st_var : int } type action = | DIVIDE_AND_APPROX of afine * afine * bigint * bigint | NOT_EXACT_DIVIDE of afine * bigint | FORGET_C of int | EXACT_DIVIDE of afine * bigint | SUM of int * (bigint * afine) * (bigint * afine) | STATE of state_action | HYP of afine | FORGET of int * int | FORGET_I of int * int | CONTRADICTION of afine * afine | NEGATE_CONTRADICT of afine * afine * bool | MERGE_EQ of int * afine * int | CONSTANT_NOT_NUL of int * bigint | CONSTANT_NUL of int | CONSTANT_NEG of int * bigint | SPLIT_INEQ of afine * (int * action list) * (int * action list) | WEAKEN of int * bigint exception UNSOLVABLE exception NO_CONTRADICTION let display_eq print_var (l,e) = let _ = List.fold_left (fun not_first f -> print_string (if f.c ? zero then Printf.printf "+ %s " (string_of_bigint e) else if e accu + one + trace_length l1 + trace_length l2 | _ -> accu + one in List.fold_left action_length zero l let operator_of_eq = function | EQUA -> "=" | DISE -> "!=" | INEQ -> ">=" let kind_of = function | EQUA -> "equation" | DISE -> "disequation" | INEQ -> "inequation" let display_system print_var l = List.iter (fun { kind=b; body=e; constant=c; id=id} -> Printf.printf "E%d: " id; display_eq print_var (e,c); Printf.printf "%s 0\n" (operator_of_eq b)) l; print_string "------------------------\n\n" let display_inequations print_var l = List.iter (fun e -> display_eq print_var e;print_string ">= 0\n") l; print_string "------------------------\n\n" let sbi = string_of_bigint let rec display_action print_var = function | act :: l -> begin match act with | DIVIDE_AND_APPROX (e1,e2,k,d) -> Printf.printf "Inequation E%d is divided by %s and the constant coefficient is \ rounded by substracting %s.\n" e1.id (sbi k) (sbi d) | NOT_EXACT_DIVIDE (e,k) -> Printf.printf "Constant in equation E%d is not divisible by the pgcd \ %s of its other coefficients.\n" e.id (sbi k) | EXACT_DIVIDE (e,k) -> Printf.printf "Equation E%d is divided by the pgcd \ %s of its coefficients.\n" e.id (sbi k) | WEAKEN (e,k) -> Printf.printf "To ensure a solution in the dark shadow \ the equation E%d is weakened by %s.\n" e (sbi k) | SUM (e,(c1,e1),(c2,e2)) -> Printf.printf "We state %s E%d = %s %s E%d + %s %s E%d.\n" (kind_of e1.kind) e (sbi c1) (kind_of e1.kind) e1.id (sbi c2) (kind_of e2.kind) e2.id | STATE { st_new_eq = e } -> Printf.printf "We define a new equation E%d: " e.id; display_eq print_var (e.body,e.constant); print_string (operator_of_eq e.kind); print_string " 0" | HYP e -> Printf.printf "We define E%d: " e.id; display_eq print_var (e.body,e.constant); print_string (operator_of_eq e.kind); print_string " 0\n" | FORGET_C e -> Printf.printf "E%d is trivially satisfiable.\n" e | FORGET (e1,e2) -> Printf.printf "E%d subsumes E%d.\n" e1 e2 | FORGET_I (e1,e2) -> Printf.printf "E%d subsumes E%d.\n" e1 e2 | MERGE_EQ (e,e1,e2) -> Printf.printf "E%d and E%d can be merged into E%d.\n" e1.id e2 e | CONTRADICTION (e1,e2) -> Printf.printf "Equations E%d and E%d imply a contradiction on their \ constant factors.\n" e1.id e2.id | NEGATE_CONTRADICT(e1,e2,b) -> Printf.printf "Equations E%d and E%d state that their body is at the same time \ equal and different\n" e1.id e2.id | CONSTANT_NOT_NUL (e,k) -> Printf.printf "Equation E%d states %s = 0.\n" e (sbi k) | CONSTANT_NEG(e,k) -> Printf.printf "Equation E%d states %s >= 0.\n" e (sbi k) | CONSTANT_NUL e -> Printf.printf "Inequation E%d states 0 != 0.\n" e | SPLIT_INEQ (e,(e1,l1),(e2,l2)) -> Printf.printf "Equation E%d is split in E%d and E%d\n\n" e.id e1 e2; display_action print_var l1; print_newline (); display_action print_var l2; print_newline () end; display_action print_var l | [] -> flush stdout let default_print_var v = Printf.sprintf "X%d" v (* For debugging *) (*""*) let add_event, history, clear_history = let accu = ref [] in (fun (v:action) -> if !debug then display_action default_print_var [v]; push v accu), (fun () -> !accu), (fun () -> accu := []) let nf_linear = Sort.list (fun x y -> x.v > y.v) let nf ((b : bool),(e,(x : int))) = (b,(nf_linear e,x)) let map_eq_linear f = let rec loop = function | x :: l -> let c = f x.c in if c=?zero then loop l else {v=x.v; c=c} :: loop l | [] -> [] in loop let map_eq_afine f e = { id = e.id; kind = e.kind; body = map_eq_linear f e.body; constant = f e.constant } let negate_eq = map_eq_afine (fun x -> neg x) let rec sum p0 p1 = match (p0,p1) with | ([], l) -> l | (l, []) -> l | (((x1::l1) as l1'), ((x2::l2) as l2')) -> if x1.v = x2.v then let c = x1.c + x2.c in if c =? zero then sum l1 l2 else {v=x1.v;c=c} :: sum l1 l2 else if x1.v > x2.v then x1 :: sum l1 l2' else x2 :: sum l1' l2 let sum_afine new_eq_id eq1 eq2 = { kind = eq1.kind; id = new_eq_id (); body = sum eq1.body eq2.body; constant = eq1.constant + eq2.constant } exception FACTOR1 let rec chop_factor_1 = function | x :: l -> if abs x.c =? one then x,l else let (c',l') = chop_factor_1 l in (c',x::l') | [] -> raise FACTOR1 exception CHOPVAR let rec chop_var v = function | f :: l -> if f.v = v then f,l else let (f',l') = chop_var v l in (f',f::l') | [] -> raise CHOPVAR let normalize ({id=id; kind=eq_flag; body=e; constant =x} as eq) = if e = [] then begin match eq_flag with | EQUA -> if x =? zero then [] else begin add_event (CONSTANT_NOT_NUL(id,x)); raise UNSOLVABLE end | DISE -> if x <> zero then [] else begin add_event (CONSTANT_NUL id); raise UNSOLVABLE end | INEQ -> if x >=? zero then [] else begin add_event (CONSTANT_NEG(id,x)); raise UNSOLVABLE end end else let gcd = pgcd_l (List.map (fun f -> abs f.c) e) in if eq_flag=EQUA & x mod gcd <> zero then begin add_event (NOT_EXACT_DIVIDE (eq,gcd)); raise UNSOLVABLE end else if eq_flag=DISE & x mod gcd <> zero then begin add_event (FORGET_C eq.id); [] end else if gcd <> one then begin let c = floor_div x gcd in let d = x - c * gcd in let new_eq = {id=id; kind=eq_flag; constant=c; body=map_eq_linear (fun c -> c / gcd) e} in add_event (if eq_flag=EQUA or eq_flag = DISE then EXACT_DIVIDE(eq,gcd) else DIVIDE_AND_APPROX(eq,new_eq,gcd,d)); [new_eq] end else [eq] let eliminate_with_in new_eq_id {v=v;c=c_unite} eq2 ({body=e1; constant=c1} as eq1) = try let (f,_) = chop_var v e1 in let coeff = if c_unite=?one then neg f.c else if c_unite=? negone then f.c else failwith "eliminate_with_in" in let res = sum_afine new_eq_id eq1 (map_eq_afine (fun c -> c * coeff) eq2) in add_event (SUM (res.id,(one,eq1),(coeff,eq2))); res with CHOPVAR -> eq1 let omega_mod a b = a - b * floor_div (two * a + b) (two * b) let banerjee_step (new_eq_id,new_var_id,print_var) original l1 l2 = let e = original.body in let sigma = new_var_id () in let smallest,var = try List.fold_left (fun (v,p) c -> if v >? (abs c.c) then abs c.c,c.v else (v,p)) (abs (List.hd e).c, (List.hd e).v) (List.tl e) with Failure "tl" -> display_system print_var [original] ; failwith "TL" in let m = smallest + one in let new_eq = { constant = omega_mod original.constant m; body = {c= neg m;v=sigma} :: map_eq_linear (fun a -> omega_mod a m) original.body; id = new_eq_id (); kind = EQUA } in let definition = { constant = neg (floor_div (two * original.constant + m) (two * m)); body = map_eq_linear (fun a -> neg (floor_div (two * a + m) (two * m))) original.body; id = new_eq_id (); kind = EQUA } in add_event (STATE {st_new_eq = new_eq; st_def = definition; st_orig = original; st_coef = m; st_var = sigma}); let new_eq = List.hd (normalize new_eq) in let eliminated_var, def = chop_var var new_eq.body in let other_equations = Util.list_map_append (fun e -> normalize (eliminate_with_in new_eq_id eliminated_var new_eq e)) l1 in let inequations = Util.list_map_append (fun e -> normalize (eliminate_with_in new_eq_id eliminated_var new_eq e)) l2 in let original' = eliminate_with_in new_eq_id eliminated_var new_eq original in let mod_original = map_eq_afine (fun c -> c / m) original' in add_event (EXACT_DIVIDE (original',m)); List.hd (normalize mod_original),other_equations,inequations let rec eliminate_one_equation ((new_eq_id,new_var_id,print_var) as new_ids) (e,other,ineqs) = if !debug then display_system print_var (e::other); try let v,def = chop_factor_1 e.body in (Util.list_map_append (fun e' -> normalize (eliminate_with_in new_eq_id v e e')) other, Util.list_map_append (fun e' -> normalize (eliminate_with_in new_eq_id v e e')) ineqs) with FACTOR1 -> eliminate_one_equation new_ids (banerjee_step new_ids e other ineqs) let rec banerjee ((_,_,print_var) as new_ids) (sys_eq,sys_ineq) = let rec fst_eq_1 = function (eq::l) -> if List.exists (fun x -> abs x.c =? one) eq.body then eq,l else let (eq',l') = fst_eq_1 l in (eq',eq::l') | [] -> raise Not_found in match sys_eq with [] -> if !debug then display_system print_var sys_ineq; sys_ineq | (e1::rest) -> let eq,other = try fst_eq_1 sys_eq with Not_found -> (e1,rest) in if eq.body = [] then if eq.constant =? zero then begin add_event (FORGET_C eq.id); banerjee new_ids (other,sys_ineq) end else begin add_event (CONSTANT_NOT_NUL(eq.id,eq.constant)); raise UNSOLVABLE end else banerjee new_ids (eliminate_one_equation new_ids (eq,other,sys_ineq)) type kind = INVERTED | NORMAL let redundancy_elimination new_eq_id system = let normal = function ({body=f::_} as e) when f.c negate_eq e, INVERTED | e -> e,NORMAL in let table = Hashtbl.create 7 in List.iter (fun e -> let ({body=ne} as nx) ,kind = normal e in if ne = [] then if nx.constant let kept = if v.constant Some nx,optinvert end else begin match optinvert with Some v -> let _kept = if v.constant >? nx.constant then begin add_event (FORGET_I (v.id,nx.id));v end else begin add_event (FORGET_I (nx.id,v.id));nx end in (optnormal,Some(if v.constant >? nx.constant then v else nx)) | None -> optnormal,Some nx end in begin match final with (Some high, Some low) -> if high.constant () end; Hashtbl.remove table ne; Hashtbl.add table ne final with Not_found -> Hashtbl.add table ne (if kind = NORMAL then (Some nx,None) else (None,Some nx))) system; let accu_eq = ref [] in let accu_ineq = ref [] in Hashtbl.iter (fun p0 p1 -> match (p0,p1) with | (e, (Some x, Some y)) when x.constant =? y.constant -> let id=new_eq_id () in add_event (MERGE_EQ(id,x,y.id)); push {id=id; kind=EQUA; body=x.body; constant=x.constant} accu_eq | (e, (optnorm,optinvert)) -> begin match optnorm with Some x -> push x accu_ineq | _ -> () end; begin match optinvert with Some x -> push (negate_eq x) accu_ineq | _ -> () end) table; !accu_eq,!accu_ineq exception SOLVED_SYSTEM let select_variable system = let table = Hashtbl.create 7 in let push v c= try let r = Hashtbl.find table v in r := max !r (abs c) with Not_found -> Hashtbl.add table v (ref (abs c)) in List.iter (fun {body=l} -> List.iter (fun f -> push f.v f.c) l) system; let vmin,cmin = ref (-1), ref zero in let var_cpt = ref 0 in Hashtbl.iter (fun v ({contents = c}) -> incr var_cpt; if c try let f,eq' = chop_var v eq.body in if f.c >=? zero then (not_occ,((f.c,eq) :: below),over) else (not_occ,below,((neg f.c,eq) :: over)) with CHOPVAR -> (eq::not_occ,below,over)) ([],[],[]) system let product new_eq_id dark_shadow low high = List.fold_left (fun accu (a,eq1) -> List.fold_left (fun accu (b,eq2) -> let eq = sum_afine new_eq_id (map_eq_afine (fun c -> c * b) eq1) (map_eq_afine (fun c -> c * a) eq2) in add_event(SUM(eq.id,(b,eq1),(a,eq2))); match normalize eq with | [eq] -> let final_eq = if dark_shadow then let delta = (a - one) * (b - one) in add_event(WEAKEN(eq.id,delta)); {id = eq.id; kind=INEQ; body = eq.body; constant = eq.constant - delta} else eq in final_eq :: accu | (e::_) -> failwith "Product dardk" | [] -> accu) accu high) [] low let fourier_motzkin (new_eq_id,_,print_var) dark_shadow system = let v = select_variable system in let (ineq_out, ineq_low,ineq_high) = classify v system in let expanded = ineq_out @ product new_eq_id dark_shadow ineq_low ineq_high in if !debug then display_system print_var expanded; expanded let simplify ((new_eq_id,new_var_id,print_var) as new_ids) dark_shadow system = if List.exists (fun e -> e.kind = DISE) system then failwith "disequation in simplify"; clear_history (); List.iter (fun e -> add_event (HYP e)) system; let system = Util.list_map_append normalize system in let eqs,ineqs = List.partition (fun e -> e.kind=EQUA) system in let simp_eq,simp_ineq = redundancy_elimination new_eq_id ineqs in let system = (eqs @ simp_eq,simp_ineq) in let rec loop1a system = let sys_ineq = banerjee new_ids system in loop1b sys_ineq and loop1b sys_ineq = let simp_eq,simp_ineq = redundancy_elimination new_eq_id sys_ineq in if simp_eq = [] then simp_ineq else loop1a (simp_eq,simp_ineq) in let rec loop2 system = try let expanded = fourier_motzkin new_ids dark_shadow system in loop2 (loop1b expanded) with SOLVED_SYSTEM -> if !debug then display_system print_var system; system in loop2 (loop1a system) let rec depend relie_on accu = function | act :: l -> begin match act with | DIVIDE_AND_APPROX (e,_,_,_) -> if List.mem e.id relie_on then depend relie_on (act::accu) l else depend relie_on accu l | EXACT_DIVIDE (e,_) -> if List.mem e.id relie_on then depend relie_on (act::accu) l else depend relie_on accu l | WEAKEN (e,_) -> if List.mem e relie_on then depend relie_on (act::accu) l else depend relie_on accu l | SUM (e,(_,e1),(_,e2)) -> if List.mem e relie_on then depend (e1.id::e2.id::relie_on) (act::accu) l else depend relie_on accu l | STATE {st_new_eq=e;st_orig=o} -> if List.mem e.id relie_on then depend (o.id::relie_on) (act::accu) l else depend relie_on accu l | HYP e -> if List.mem e.id relie_on then depend relie_on (act::accu) l else depend relie_on accu l | FORGET_C _ -> depend relie_on accu l | FORGET _ -> depend relie_on accu l | FORGET_I _ -> depend relie_on accu l | MERGE_EQ (e,e1,e2) -> if List.mem e relie_on then depend (e1.id::e2::relie_on) (act::accu) l else depend relie_on accu l | NOT_EXACT_DIVIDE (e,_) -> depend (e.id::relie_on) (act::accu) l | CONTRADICTION (e1,e2) -> depend (e1.id::e2.id::relie_on) (act::accu) l | CONSTANT_NOT_NUL (e,_) -> depend (e::relie_on) (act::accu) l | CONSTANT_NEG (e,_) -> depend (e::relie_on) (act::accu) l | CONSTANT_NUL e -> depend (e::relie_on) (act::accu) l | NEGATE_CONTRADICT (e1,e2,_) -> depend (e1.id::e2.id::relie_on) (act::accu) l | SPLIT_INEQ _ -> failwith "depend" end | [] -> relie_on, accu (* let depend relie_on accu trace = Printf.printf "Longueur de la trace initiale : %d\n" (trace_length trace + trace_length accu); let rel',trace' = depend relie_on accu trace in Printf.printf "Longueur de la trace simplifie : %d\n" (trace_length trace'); rel',trace' *) let solve (new_eq_id,new_eq_var,print_var) system = try let _ = simplify new_eq_id false system in failwith "no contradiction" with UNSOLVABLE -> display_action print_var (snd (depend [] [] (history ()))) let negation (eqs,ineqs) = let diseq,_ = List.partition (fun e -> e.kind = DISE) ineqs in let normal = function | ({body=f::_} as e) when f.c negate_eq e, INVERTED | e -> e,NORMAL in let table = Hashtbl.create 7 in List.iter (fun e -> let {body=ne;constant=c} ,kind = normal e in Hashtbl.add table (ne,c) (kind,e)) diseq; List.iter (fun e -> assert (e.kind = EQUA); let {body=ne;constant=c},kind = normal e in try let (kind',e') = Hashtbl.find table (ne,c) in add_event (NEGATE_CONTRADICT (e,e',kind=kind')); raise UNSOLVABLE with Not_found -> ()) eqs exception FULL_SOLUTION of action list * int list let simplify_strong ((new_eq_id,new_var_id,print_var) as new_ids) system = clear_history (); List.iter (fun e -> add_event (HYP e)) system; (* Initial simplification phase *) let rec loop1a system = negation system; let sys_ineq = banerjee new_ids system in loop1b sys_ineq and loop1b sys_ineq = let dise,ine = List.partition (fun e -> e.kind = DISE) sys_ineq in let simp_eq,simp_ineq = redundancy_elimination new_eq_id ine in if simp_eq = [] then dise @ simp_ineq else loop1a (simp_eq,dise @ simp_ineq) in let rec loop2 system = try let expanded = fourier_motzkin new_ids false system in loop2 (loop1b expanded) with SOLVED_SYSTEM -> if !debug then display_system print_var system; system in let rec explode_diseq = function | (de::diseq,ineqs,expl_map) -> let id1 = new_eq_id () and id2 = new_eq_id () in let e1 = {id = id1; kind=INEQ; body = de.body; constant = de.constant -one} in let e2 = {id = id2; kind=INEQ; body = map_eq_linear neg de.body; constant = neg de.constant - one} in let new_sys = List.map (fun (what,sys) -> ((de.id,id1,true)::what, e1::sys)) ineqs @ List.map (fun (what,sys) -> ((de.id,id2,false)::what,e2::sys)) ineqs in explode_diseq (diseq,new_sys,(de.id,(de,id1,id2))::expl_map) | ([],ineqs,expl_map) -> ineqs,expl_map in try let system = Util.list_map_append normalize system in let eqs,ineqs = List.partition (fun e -> e.kind=EQUA) system in let dise,ine = List.partition (fun e -> e.kind = DISE) ineqs in let simp_eq,simp_ineq = redundancy_elimination new_eq_id ine in let system = (eqs @ simp_eq,simp_ineq @ dise) in let system' = loop1a system in let diseq,ineq = List.partition (fun e -> e.kind = DISE) system' in let first_segment = history () in let sys_exploded,explode_map = explode_diseq (diseq,[[],ineq],[]) in let all_solutions = List.map (fun (decomp,sys) -> clear_history (); try let _ = loop2 sys in raise NO_CONTRADICTION with UNSOLVABLE -> let relie_on,path = depend [] [] (history ()) in let dc,_ = List.partition (fun (_,id,_) -> List.mem id relie_on) decomp in let red = List.map (fun (x,_,_) -> x) dc in (red,relie_on,decomp,path)) sys_exploded in let max_count sys = let tbl = Hashtbl.create 7 in let augment x = try incr (Hashtbl.find tbl x) with Not_found -> Hashtbl.add tbl x (ref 1) in let eq = ref (-1) and c = ref 0 in List.iter (function | ([],r_on,_,path) -> raise (FULL_SOLUTION (path,r_on)) | (l,_,_,_) -> List.iter augment l) sys; Hashtbl.iter (fun x v -> if !v > !c then begin eq := x; c := !v end) tbl; !eq in let rec solve systems = try let id = max_count systems in let rec sign = function | ((id',_,b)::l) -> if id=id' then b else sign l | [] -> failwith "solve" in let s1,s2 = List.partition (fun (_,_,decomp,_) -> sign decomp) systems in let s1' = List.map (fun (dep,ro,dc,pa) -> (Util.list_except id dep,ro,dc,pa)) s1 in let s2' = List.map (fun (dep,ro,dc,pa) -> (Util.list_except id dep,ro,dc,pa)) s2 in let (r1,relie1) = solve s1' and (r2,relie2) = solve s2' in let (eq,id1,id2) = List.assoc id explode_map in [SPLIT_INEQ(eq,(id1,r1),(id2, r2))], eq.id :: Util.list_union relie1 relie2 with FULL_SOLUTION (x0,x1) -> (x0,x1) in let act,relie_on = solve all_solutions in snd(depend relie_on act first_segment) with UNSOLVABLE -> snd (depend [] [] (history ())) end coq-8.4pl2/plugins/omega/coq_omega.ml0000640000175000001440000017570712121620060016673 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let s = "Omega" ^ string_of_int !cpt in incr cpt; id_of_string s) let new_identifier_state = let cpt = ref 0 in (fun () -> let s = make_ident "State" (Some !cpt) in incr cpt; s) let new_identifier_var = let cpt = ref 0 in (fun () -> let s = "Zvar" ^ string_of_int !cpt in incr cpt; id_of_string s) let new_id = let cpt = ref 0 in fun () -> incr cpt; !cpt let new_var_num = let cpt = ref 1000 in (fun () -> incr cpt; !cpt) let new_var = let cpt = ref 0 in fun () -> incr cpt; Nameops.make_ident "WW" (Some !cpt) let display_var i = Printf.sprintf "X%d" i let intern_id,unintern_id = let cpt = ref 0 in let table = Hashtbl.create 7 and co_table = Hashtbl.create 7 in (fun (name : identifier) -> try Hashtbl.find table name with Not_found -> let idx = !cpt in Hashtbl.add table name idx; Hashtbl.add co_table idx name; incr cpt; idx), (fun idx -> try Hashtbl.find co_table idx with Not_found -> let v = new_var () in Hashtbl.add table v idx; Hashtbl.add co_table idx v; v) let mk_then = tclTHENLIST let exists_tac c = constructor_tac false (Some 1) 1 (Glob_term.ImplicitBindings [c]) let generalize_tac t = generalize_time (generalize t) let elim t = elim_time (simplest_elim t) let exact t = exact_time (Tactics.refine t) let unfold s = Tactics.unfold_in_concl [Termops.all_occurrences, Lazy.force s] let rev_assoc k = let rec loop = function | [] -> raise Not_found | (v,k')::_ when k = k' -> v | _ :: l -> loop l in loop let tag_hypothesis,tag_of_hyp, hyp_of_tag = let l = ref ([]:(identifier * int) list) in (fun h id -> l := (h,id):: !l), (fun h -> try List.assoc h !l with Not_found -> failwith "tag_hypothesis"), (fun h -> try rev_assoc h !l with Not_found -> failwith "tag_hypothesis") let hide_constr,find_constr,clear_tables,dump_tables = let l = ref ([]:(constr * (identifier * identifier * bool)) list) in (fun h id eg b -> l := (h,(id,eg,b)):: !l), (fun h -> try list_assoc_f eq_constr h !l with Not_found -> failwith "find_contr"), (fun () -> l := []), (fun () -> !l) (* Lazy evaluation is used for Coq constants, because this code is evaluated before the compiled modules are loaded. To use the constant Zplus, one must type "Lazy.force coq_Zplus" This is the right way to access to Coq constants in tactics ML code *) open Coqlib let logic_dir = ["Coq";"Logic";"Decidable"] let coq_modules = init_modules @arith_modules @ [logic_dir] @ zarith_base_modules @ [["Coq"; "omega"; "OmegaLemmas"]] let init_constant = gen_constant_in_modules "Omega" init_modules let constant = gen_constant_in_modules "Omega" coq_modules let z_constant = gen_constant_in_modules "Omega" [["Coq";"ZArith"]] let zbase_constant = gen_constant_in_modules "Omega" [["Coq";"ZArith";"BinInt"]] (* Zarith *) let coq_xH = lazy (constant "xH") let coq_xO = lazy (constant "xO") let coq_xI = lazy (constant "xI") let coq_Z0 = lazy (constant "Z0") let coq_Zpos = lazy (constant "Zpos") let coq_Zneg = lazy (constant "Zneg") let coq_Z = lazy (constant "Z") let coq_comparison = lazy (constant "comparison") let coq_Gt = lazy (constant "Gt") let coq_Zplus = lazy (zbase_constant "Z.add") let coq_Zmult = lazy (zbase_constant "Z.mul") let coq_Zopp = lazy (zbase_constant "Z.opp") let coq_Zminus = lazy (zbase_constant "Z.sub") let coq_Zsucc = lazy (zbase_constant "Z.succ") let coq_Zpred = lazy (zbase_constant "Z.pred") let coq_Zgt = lazy (zbase_constant "Z.gt") let coq_Zle = lazy (zbase_constant "Z.le") let coq_Z_of_nat = lazy (zbase_constant "Z.of_nat") let coq_inj_plus = lazy (z_constant "Nat2Z.inj_add") let coq_inj_mult = lazy (z_constant "Nat2Z.inj_mul") let coq_inj_minus1 = lazy (z_constant "Nat2Z.inj_sub") let coq_inj_minus2 = lazy (constant "inj_minus2") let coq_inj_S = lazy (z_constant "Nat2Z.inj_succ") let coq_inj_le = lazy (z_constant "Znat.inj_le") let coq_inj_lt = lazy (z_constant "Znat.inj_lt") let coq_inj_ge = lazy (z_constant "Znat.inj_ge") let coq_inj_gt = lazy (z_constant "Znat.inj_gt") let coq_inj_neq = lazy (z_constant "inj_neq") let coq_inj_eq = lazy (z_constant "inj_eq") let coq_fast_Zplus_assoc_reverse = lazy (constant "fast_Zplus_assoc_reverse") let coq_fast_Zplus_assoc = lazy (constant "fast_Zplus_assoc") let coq_fast_Zmult_assoc_reverse = lazy (constant "fast_Zmult_assoc_reverse") let coq_fast_Zplus_permute = lazy (constant "fast_Zplus_permute") let coq_fast_Zplus_comm = lazy (constant "fast_Zplus_comm") let coq_fast_Zmult_comm = lazy (constant "fast_Zmult_comm") let coq_Zmult_le_approx = lazy (constant "Zmult_le_approx") let coq_OMEGA1 = lazy (constant "OMEGA1") let coq_OMEGA2 = lazy (constant "OMEGA2") let coq_OMEGA3 = lazy (constant "OMEGA3") let coq_OMEGA4 = lazy (constant "OMEGA4") let coq_OMEGA5 = lazy (constant "OMEGA5") let coq_OMEGA6 = lazy (constant "OMEGA6") let coq_OMEGA7 = lazy (constant "OMEGA7") let coq_OMEGA8 = lazy (constant "OMEGA8") let coq_OMEGA9 = lazy (constant "OMEGA9") let coq_fast_OMEGA10 = lazy (constant "fast_OMEGA10") let coq_fast_OMEGA11 = lazy (constant "fast_OMEGA11") let coq_fast_OMEGA12 = lazy (constant "fast_OMEGA12") let coq_fast_OMEGA13 = lazy (constant "fast_OMEGA13") let coq_fast_OMEGA14 = lazy (constant "fast_OMEGA14") let coq_fast_OMEGA15 = lazy (constant "fast_OMEGA15") let coq_fast_OMEGA16 = lazy (constant "fast_OMEGA16") let coq_OMEGA17 = lazy (constant "OMEGA17") let coq_OMEGA18 = lazy (constant "OMEGA18") let coq_OMEGA19 = lazy (constant "OMEGA19") let coq_OMEGA20 = lazy (constant "OMEGA20") let coq_fast_Zred_factor0 = lazy (constant "fast_Zred_factor0") let coq_fast_Zred_factor1 = lazy (constant "fast_Zred_factor1") let coq_fast_Zred_factor2 = lazy (constant "fast_Zred_factor2") let coq_fast_Zred_factor3 = lazy (constant "fast_Zred_factor3") let coq_fast_Zred_factor4 = lazy (constant "fast_Zred_factor4") let coq_fast_Zred_factor5 = lazy (constant "fast_Zred_factor5") let coq_fast_Zred_factor6 = lazy (constant "fast_Zred_factor6") let coq_fast_Zmult_plus_distr_l = lazy (constant "fast_Zmult_plus_distr_l") let coq_fast_Zmult_opp_comm = lazy (constant "fast_Zmult_opp_comm") let coq_fast_Zopp_plus_distr = lazy (constant "fast_Zopp_plus_distr") let coq_fast_Zopp_mult_distr_r = lazy (constant "fast_Zopp_mult_distr_r") let coq_fast_Zopp_eq_mult_neg_1 = lazy (constant "fast_Zopp_eq_mult_neg_1") let coq_fast_Zopp_involutive = lazy (constant "fast_Zopp_involutive") let coq_Zegal_left = lazy (constant "Zegal_left") let coq_Zne_left = lazy (constant "Zne_left") let coq_Zlt_left = lazy (constant "Zlt_left") let coq_Zge_left = lazy (constant "Zge_left") let coq_Zgt_left = lazy (constant "Zgt_left") let coq_Zle_left = lazy (constant "Zle_left") let coq_new_var = lazy (constant "new_var") let coq_intro_Z = lazy (constant "intro_Z") let coq_dec_eq = lazy (zbase_constant "Z.eq_decidable") let coq_dec_Zne = lazy (constant "dec_Zne") let coq_dec_Zle = lazy (zbase_constant "Z.le_decidable") let coq_dec_Zlt = lazy (zbase_constant "Z.lt_decidable") let coq_dec_Zgt = lazy (constant "dec_Zgt") let coq_dec_Zge = lazy (constant "dec_Zge") let coq_not_Zeq = lazy (constant "not_Zeq") let coq_not_Zne = lazy (constant "not_Zne") let coq_Znot_le_gt = lazy (constant "Znot_le_gt") let coq_Znot_lt_ge = lazy (constant "Znot_lt_ge") let coq_Znot_ge_lt = lazy (constant "Znot_ge_lt") let coq_Znot_gt_le = lazy (constant "Znot_gt_le") let coq_neq = lazy (constant "neq") let coq_Zne = lazy (constant "Zne") let coq_Zle = lazy (zbase_constant "Z.le") let coq_Zgt = lazy (zbase_constant "Z.gt") let coq_Zge = lazy (zbase_constant "Z.ge") let coq_Zlt = lazy (zbase_constant "Z.lt") (* Peano/Datatypes *) let coq_le = lazy (init_constant "le") let coq_lt = lazy (init_constant "lt") let coq_ge = lazy (init_constant "ge") let coq_gt = lazy (init_constant "gt") let coq_minus = lazy (init_constant "minus") let coq_plus = lazy (init_constant "plus") let coq_mult = lazy (init_constant "mult") let coq_pred = lazy (init_constant "pred") let coq_nat = lazy (init_constant "nat") let coq_S = lazy (init_constant "S") let coq_O = lazy (init_constant "O") (* Compare_dec/Peano_dec/Minus *) let coq_pred_of_minus = lazy (constant "pred_of_minus") let coq_le_gt_dec = lazy (constant "le_gt_dec") let coq_dec_eq_nat = lazy (constant "dec_eq_nat") let coq_dec_le = lazy (constant "dec_le") let coq_dec_lt = lazy (constant "dec_lt") let coq_dec_ge = lazy (constant "dec_ge") let coq_dec_gt = lazy (constant "dec_gt") let coq_not_eq = lazy (constant "not_eq") let coq_not_le = lazy (constant "not_le") let coq_not_lt = lazy (constant "not_lt") let coq_not_ge = lazy (constant "not_ge") let coq_not_gt = lazy (constant "not_gt") (* Logic/Decidable *) let coq_eq_ind_r = lazy (constant "eq_ind_r") let coq_dec_or = lazy (constant "dec_or") let coq_dec_and = lazy (constant "dec_and") let coq_dec_imp = lazy (constant "dec_imp") let coq_dec_iff = lazy (constant "dec_iff") let coq_dec_not = lazy (constant "dec_not") let coq_dec_False = lazy (constant "dec_False") let coq_dec_not_not = lazy (constant "dec_not_not") let coq_dec_True = lazy (constant "dec_True") let coq_not_or = lazy (constant "not_or") let coq_not_and = lazy (constant "not_and") let coq_not_imp = lazy (constant "not_imp") let coq_not_iff = lazy (constant "not_iff") let coq_not_not = lazy (constant "not_not") let coq_imp_simp = lazy (constant "imp_simp") let coq_iff = lazy (constant "iff") (* uses build_coq_and, build_coq_not, build_coq_or, build_coq_ex *) (* For unfold *) open Closure let evaluable_ref_of_constr s c = match kind_of_term (Lazy.force c) with | Const kn when Tacred.is_evaluable (Global.env()) (EvalConstRef kn) -> EvalConstRef kn | _ -> anomaly ("Coq_omega: "^s^" is not an evaluable constant") let sp_Zsucc = lazy (evaluable_ref_of_constr "Z.succ" coq_Zsucc) let sp_Zpred = lazy (evaluable_ref_of_constr "Z.pred" coq_Zpred) let sp_Zminus = lazy (evaluable_ref_of_constr "Z.sub" coq_Zminus) let sp_Zle = lazy (evaluable_ref_of_constr "Z.le" coq_Zle) let sp_Zgt = lazy (evaluable_ref_of_constr "Z.gt" coq_Zgt) let sp_Zge = lazy (evaluable_ref_of_constr "Z.ge" coq_Zge) let sp_Zlt = lazy (evaluable_ref_of_constr "Z.lt" coq_Zlt) let sp_not = lazy (evaluable_ref_of_constr "not" (lazy (build_coq_not ()))) let mk_var v = mkVar (id_of_string v) let mk_plus t1 t2 = mkApp (Lazy.force coq_Zplus, [| t1; t2 |]) let mk_times t1 t2 = mkApp (Lazy.force coq_Zmult, [| t1; t2 |]) let mk_minus t1 t2 = mkApp (Lazy.force coq_Zminus, [| t1;t2 |]) let mk_eq t1 t2 = mkApp (build_coq_eq (), [| Lazy.force coq_Z; t1; t2 |]) let mk_le t1 t2 = mkApp (Lazy.force coq_Zle, [| t1; t2 |]) let mk_gt t1 t2 = mkApp (Lazy.force coq_Zgt, [| t1; t2 |]) let mk_inv t = mkApp (Lazy.force coq_Zopp, [| t |]) let mk_and t1 t2 = mkApp (build_coq_and (), [| t1; t2 |]) let mk_or t1 t2 = mkApp (build_coq_or (), [| t1; t2 |]) let mk_not t = mkApp (build_coq_not (), [| t |]) let mk_eq_rel t1 t2 = mkApp (build_coq_eq (), [| Lazy.force coq_comparison; t1; t2 |]) let mk_inj t = mkApp (Lazy.force coq_Z_of_nat, [| t |]) let mk_integer n = let rec loop n = if n =? one then Lazy.force coq_xH else mkApp((if n mod two =? zero then Lazy.force coq_xO else Lazy.force coq_xI), [| loop (n/two) |]) in if n =? zero then Lazy.force coq_Z0 else mkApp ((if n >? zero then Lazy.force coq_Zpos else Lazy.force coq_Zneg), [| loop (abs n) |]) type omega_constant = | Zplus | Zmult | Zminus | Zsucc | Zopp | Zpred | Plus | Mult | Minus | Pred | S | O | Zpos | Zneg | Z0 | Z_of_nat | Eq | Neq | Zne | Zle | Zlt | Zge | Zgt | Z | Nat | And | Or | False | True | Not | Iff | Le | Lt | Ge | Gt | Other of string type omega_proposition = | Keq of constr * constr * constr | Kn type result = | Kvar of identifier | Kapp of omega_constant * constr list | Kimp of constr * constr | Kufo (* Nota: Kimp correspond to a binder (Prod), but hopefully we won't have to bother with term lifting: Kimp will correspond to anonymous product, for which (Rel 1) doesn't occur in the right term. Moreover, we'll work on fully introduced goals, hence no Rel's in the term parts that we manipulate, but rather Var's. Said otherwise: all constr manipulated here are closed *) let destructurate_prop t = let c, args = decompose_app t in match kind_of_term c, args with | _, [_;_;_] when eq_constr c (build_coq_eq ()) -> Kapp (Eq,args) | _, [_;_] when eq_constr c (Lazy.force coq_neq) -> Kapp (Neq,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zne) -> Kapp (Zne,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zle) -> Kapp (Zle,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zlt) -> Kapp (Zlt,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zge) -> Kapp (Zge,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zgt) -> Kapp (Zgt,args) | _, [_;_] when eq_constr c (build_coq_and ()) -> Kapp (And,args) | _, [_;_] when eq_constr c (build_coq_or ()) -> Kapp (Or,args) | _, [_;_] when eq_constr c (Lazy.force coq_iff) -> Kapp (Iff, args) | _, [_] when eq_constr c (build_coq_not ()) -> Kapp (Not,args) | _, [] when eq_constr c (build_coq_False ()) -> Kapp (False,args) | _, [] when eq_constr c (build_coq_True ()) -> Kapp (True,args) | _, [_;_] when eq_constr c (Lazy.force coq_le) -> Kapp (Le,args) | _, [_;_] when eq_constr c (Lazy.force coq_lt) -> Kapp (Lt,args) | _, [_;_] when eq_constr c (Lazy.force coq_ge) -> Kapp (Ge,args) | _, [_;_] when eq_constr c (Lazy.force coq_gt) -> Kapp (Gt,args) | Const sp, args -> Kapp (Other (string_of_path (path_of_global (ConstRef sp))),args) | Construct csp , args -> Kapp (Other (string_of_path (path_of_global (ConstructRef csp))), args) | Ind isp, args -> Kapp (Other (string_of_path (path_of_global (IndRef isp))),args) | Var id,[] -> Kvar id | Prod (Anonymous,typ,body), [] -> Kimp(typ,body) | Prod (Name _,_,_),[] -> error "Omega: Not a quantifier-free goal" | _ -> Kufo let destructurate_type t = let c, args = decompose_app t in match kind_of_term c, args with | _, [] when eq_constr c (Lazy.force coq_Z) -> Kapp (Z,args) | _, [] when eq_constr c (Lazy.force coq_nat) -> Kapp (Nat,args) | _ -> Kufo let destructurate_term t = let c, args = decompose_app t in match kind_of_term c, args with | _, [_;_] when eq_constr c (Lazy.force coq_Zplus) -> Kapp (Zplus,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zmult) -> Kapp (Zmult,args) | _, [_;_] when eq_constr c (Lazy.force coq_Zminus) -> Kapp (Zminus,args) | _, [_] when eq_constr c (Lazy.force coq_Zsucc) -> Kapp (Zsucc,args) | _, [_] when eq_constr c (Lazy.force coq_Zpred) -> Kapp (Zpred,args) | _, [_] when eq_constr c (Lazy.force coq_Zopp) -> Kapp (Zopp,args) | _, [_;_] when eq_constr c (Lazy.force coq_plus) -> Kapp (Plus,args) | _, [_;_] when eq_constr c (Lazy.force coq_mult) -> Kapp (Mult,args) | _, [_;_] when eq_constr c (Lazy.force coq_minus) -> Kapp (Minus,args) | _, [_] when eq_constr c (Lazy.force coq_pred) -> Kapp (Pred,args) | _, [_] when eq_constr c (Lazy.force coq_S) -> Kapp (S,args) | _, [] when eq_constr c (Lazy.force coq_O) -> Kapp (O,args) | _, [_] when eq_constr c (Lazy.force coq_Zpos) -> Kapp (Zneg,args) | _, [_] when eq_constr c (Lazy.force coq_Zneg) -> Kapp (Zpos,args) | _, [] when eq_constr c (Lazy.force coq_Z0) -> Kapp (Z0,args) | _, [_] when eq_constr c (Lazy.force coq_Z_of_nat) -> Kapp (Z_of_nat,args) | Var id,[] -> Kvar id | _ -> Kufo let recognize_number t = let rec loop t = match decompose_app t with | f, [t] when eq_constr f (Lazy.force coq_xI) -> one + two * loop t | f, [t] when eq_constr f (Lazy.force coq_xO) -> two * loop t | f, [] when eq_constr f (Lazy.force coq_xH) -> one | _ -> failwith "not a number" in match decompose_app t with | f, [t] when eq_constr f (Lazy.force coq_Zpos) -> loop t | f, [t] when eq_constr f (Lazy.force coq_Zneg) -> neg (loop t) | f, [] when eq_constr f (Lazy.force coq_Z0) -> zero | _ -> failwith "not a number" type constr_path = | P_APP of int (* Abstraction and product *) | P_BODY | P_TYPE (* Case *) | P_BRANCH of int | P_ARITY | P_ARG let context operation path (t : constr) = let rec loop i p0 t = match (p0,kind_of_term t) with | (p, Cast (c,k,t)) -> mkCast (loop i p c,k,t) | ([], _) -> operation i t | ((P_APP n :: p), App (f,v)) -> let v' = Array.copy v in v'.(pred n) <- loop i p v'.(pred n); mkApp (f, v') | ((P_BRANCH n :: p), Case (ci,q,c,v)) -> (* avant, y avait mkApp... anyway, BRANCH seems nowhere used *) let v' = Array.copy v in v'.(n) <- loop i p v'.(n); (mkCase (ci,q,c,v')) | ((P_ARITY :: p), App (f,l)) -> appvect (loop i p f,l) | ((P_ARG :: p), App (f,v)) -> let v' = Array.copy v in v'.(0) <- loop i p v'.(0); mkApp (f,v') | (p, Fix ((_,n as ln),(tys,lna,v))) -> let l = Array.length v in let v' = Array.copy v in v'.(n)<- loop (Pervasives.(+) i l) p v.(n); (mkFix (ln,(tys,lna,v'))) | ((P_BODY :: p), Prod (n,t,c)) -> (mkProd (n,t,loop (succ i) p c)) | ((P_BODY :: p), Lambda (n,t,c)) -> (mkLambda (n,t,loop (succ i) p c)) | ((P_BODY :: p), LetIn (n,b,t,c)) -> (mkLetIn (n,b,t,loop (succ i) p c)) | ((P_TYPE :: p), Prod (n,t,c)) -> (mkProd (n,loop i p t,c)) | ((P_TYPE :: p), Lambda (n,t,c)) -> (mkLambda (n,loop i p t,c)) | ((P_TYPE :: p), LetIn (n,b,t,c)) -> (mkLetIn (n,b,loop i p t,c)) | (p, _) -> ppnl (Printer.pr_lconstr t); failwith ("abstract_path " ^ string_of_int(List.length p)) in loop 1 path t let occurence path (t : constr) = let rec loop p0 t = match (p0,kind_of_term t) with | (p, Cast (c,_,_)) -> loop p c | ([], _) -> t | ((P_APP n :: p), App (f,v)) -> loop p v.(pred n) | ((P_BRANCH n :: p), Case (_,_,_,v)) -> loop p v.(n) | ((P_ARITY :: p), App (f,_)) -> loop p f | ((P_ARG :: p), App (f,v)) -> loop p v.(0) | (p, Fix((_,n) ,(_,_,v))) -> loop p v.(n) | ((P_BODY :: p), Prod (n,t,c)) -> loop p c | ((P_BODY :: p), Lambda (n,t,c)) -> loop p c | ((P_BODY :: p), LetIn (n,b,t,c)) -> loop p c | ((P_TYPE :: p), Prod (n,term,c)) -> loop p term | ((P_TYPE :: p), Lambda (n,term,c)) -> loop p term | ((P_TYPE :: p), LetIn (n,b,term,c)) -> loop p term | (p, _) -> ppnl (Printer.pr_lconstr t); failwith ("occurence " ^ string_of_int(List.length p)) in loop path t let abstract_path typ path t = let term_occur = ref (mkRel 0) in let abstract = context (fun i t -> term_occur:= t; mkRel i) path t in mkLambda (Name (id_of_string "x"), typ, abstract), !term_occur let focused_simpl path gl = let newc = context (fun i t -> pf_nf gl t) (List.rev path) (pf_concl gl) in convert_concl_no_check newc DEFAULTcast gl let focused_simpl path = simpl_time (focused_simpl path) type oformula = | Oplus of oformula * oformula | Oinv of oformula | Otimes of oformula * oformula | Oatom of identifier | Oz of bigint | Oufo of constr let rec oprint = function | Oplus(t1,t2) -> print_string "("; oprint t1; print_string "+"; oprint t2; print_string ")" | Oinv t -> print_string "~"; oprint t | Otimes (t1,t2) -> print_string "("; oprint t1; print_string "*"; oprint t2; print_string ")" | Oatom s -> print_string (string_of_id s) | Oz i -> print_string (string_of_bigint i) | Oufo f -> print_string "?" let rec weight = function | Oatom c -> intern_id c | Oz _ -> -1 | Oinv c -> weight c | Otimes(c,_) -> weight c | Oplus _ -> failwith "weight" | Oufo _ -> -1 let rec val_of = function | Oatom c -> mkVar c | Oz c -> mk_integer c | Oinv c -> mkApp (Lazy.force coq_Zopp, [| val_of c |]) | Otimes (t1,t2) -> mkApp (Lazy.force coq_Zmult, [| val_of t1; val_of t2 |]) | Oplus(t1,t2) -> mkApp (Lazy.force coq_Zplus, [| val_of t1; val_of t2 |]) | Oufo c -> c let compile name kind = let rec loop accu = function | Oplus(Otimes(Oatom v,Oz n),r) -> loop ({v=intern_id v; c=n} :: accu) r | Oz n -> let id = new_id () in tag_hypothesis name id; {kind = kind; body = List.rev accu; constant = n; id = id} | _ -> anomaly "compile_equation" in loop [] let rec decompile af = let rec loop = function | ({v=v; c=n}::r) -> Oplus(Otimes(Oatom (unintern_id v),Oz n),loop r) | [] -> Oz af.constant in loop af.body let mkNewMeta () = mkMeta (Evarutil.new_meta()) let clever_rewrite_base_poly typ p result theorem gl = let full = pf_concl gl in let (abstracted,occ) = abstract_path typ (List.rev p) full in let t = applist (mkLambda (Name (id_of_string "P"), mkArrow typ mkProp, mkLambda (Name (id_of_string "H"), applist (mkRel 1,[result]), mkApp (Lazy.force coq_eq_ind_r, [| typ; result; mkRel 2; mkRel 1; occ; theorem |]))), [abstracted]) in exact (applist(t,[mkNewMeta()])) gl let clever_rewrite_base p result theorem gl = clever_rewrite_base_poly (Lazy.force coq_Z) p result theorem gl let clever_rewrite_base_nat p result theorem gl = clever_rewrite_base_poly (Lazy.force coq_nat) p result theorem gl let clever_rewrite_gen p result (t,args) = let theorem = applist(t, args) in clever_rewrite_base p result theorem let clever_rewrite_gen_nat p result (t,args) = let theorem = applist(t, args) in clever_rewrite_base_nat p result theorem let clever_rewrite p vpath t gl = let full = pf_concl gl in let (abstracted,occ) = abstract_path (Lazy.force coq_Z) (List.rev p) full in let vargs = List.map (fun p -> occurence p occ) vpath in let t' = applist(t, (vargs @ [abstracted])) in exact (applist(t',[mkNewMeta()])) gl let rec shuffle p (t1,t2) = match t1,t2 with | Oplus(l1,r1), Oplus(l2,r2) -> if weight l1 > weight l2 then let (tac,t') = shuffle (P_APP 2 :: p) (r1,t2) in (clever_rewrite p [[P_APP 1;P_APP 1]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zplus_assoc_reverse) :: tac, Oplus(l1,t')) else let (tac,t') = shuffle (P_APP 2 :: p) (t1,r2) in (clever_rewrite p [[P_APP 1];[P_APP 2;P_APP 1];[P_APP 2;P_APP 2]] (Lazy.force coq_fast_Zplus_permute) :: tac, Oplus(l2,t')) | Oplus(l1,r1), t2 -> if weight l1 > weight t2 then let (tac,t') = shuffle (P_APP 2 :: p) (r1,t2) in clever_rewrite p [[P_APP 1;P_APP 1]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zplus_assoc_reverse) :: tac, Oplus(l1, t') else [clever_rewrite p [[P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zplus_comm)], Oplus(t2,t1) | t1,Oplus(l2,r2) -> if weight l2 > weight t1 then let (tac,t') = shuffle (P_APP 2 :: p) (t1,r2) in clever_rewrite p [[P_APP 1];[P_APP 2;P_APP 1];[P_APP 2;P_APP 2]] (Lazy.force coq_fast_Zplus_permute) :: tac, Oplus(l2,t') else [],Oplus(t1,t2) | Oz t1,Oz t2 -> [focused_simpl p], Oz(Bigint.add t1 t2) | t1,t2 -> if weight t1 < weight t2 then [clever_rewrite p [[P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zplus_comm)], Oplus(t2,t1) else [],Oplus(t1,t2) let rec shuffle_mult p_init k1 e1 k2 e2 = let rec loop p = function | (({c=c1;v=v1}::l1) as l1'),(({c=c2;v=v2}::l2) as l2') -> if v1 = v2 then let tac = clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1; P_APP 1]; [P_APP 1; P_APP 1; P_APP 1; P_APP 2]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 1; P_APP 2]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA10) in if Bigint.add (Bigint.mult k1 c1) (Bigint.mult k2 c2) =? zero then let tac' = clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zred_factor5) in tac :: focused_simpl (P_APP 1::P_APP 2:: p) :: tac' :: loop p (l1,l2) else tac :: loop (P_APP 2 :: p) (l1,l2) else if v1 > v2 then clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1; P_APP 1]; [P_APP 1; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 1; P_APP 2]; [P_APP 2]; [P_APP 1; P_APP 2]] (Lazy.force coq_fast_OMEGA11) :: loop (P_APP 2 :: p) (l1,l2') else clever_rewrite p [[P_APP 2; P_APP 1; P_APP 1; P_APP 1]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA12) :: loop (P_APP 2 :: p) (l1',l2) | ({c=c1;v=v1}::l1), [] -> clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1; P_APP 1]; [P_APP 1; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 1; P_APP 2]; [P_APP 2]; [P_APP 1; P_APP 2]] (Lazy.force coq_fast_OMEGA11) :: loop (P_APP 2 :: p) (l1,[]) | [],({c=c2;v=v2}::l2) -> clever_rewrite p [[P_APP 2; P_APP 1; P_APP 1; P_APP 1]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA12) :: loop (P_APP 2 :: p) ([],l2) | [],[] -> [focused_simpl p_init] in loop p_init (e1,e2) let rec shuffle_mult_right p_init e1 k2 e2 = let rec loop p = function | (({c=c1;v=v1}::l1) as l1'),(({c=c2;v=v2}::l2) as l2') -> if v1 = v2 then let tac = clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1]; [P_APP 1; P_APP 1; P_APP 2]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 2]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA15) in if Bigint.add c1 (Bigint.mult k2 c2) =? zero then let tac' = clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zred_factor5) in tac :: focused_simpl (P_APP 1::P_APP 2:: p) :: tac' :: loop p (l1,l2) else tac :: loop (P_APP 2 :: p) (l1,l2) else if v1 > v2 then clever_rewrite p [[P_APP 1;P_APP 1]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zplus_assoc_reverse) :: loop (P_APP 2 :: p) (l1,l2') else clever_rewrite p [[P_APP 2; P_APP 1; P_APP 1; P_APP 1]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA12) :: loop (P_APP 2 :: p) (l1',l2) | ({c=c1;v=v1}::l1), [] -> clever_rewrite p [[P_APP 1;P_APP 1]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zplus_assoc_reverse) :: loop (P_APP 2 :: p) (l1,[]) | [],({c=c2;v=v2}::l2) -> clever_rewrite p [[P_APP 2; P_APP 1; P_APP 1; P_APP 1]; [P_APP 2; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1]; [P_APP 2; P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]] (Lazy.force coq_fast_OMEGA12) :: loop (P_APP 2 :: p) ([],l2) | [],[] -> [focused_simpl p_init] in loop p_init (e1,e2) let rec shuffle_cancel p = function | [] -> [focused_simpl p] | ({c=c1}::l1) -> let tac = clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1];[P_APP 1; P_APP 2]; [P_APP 2; P_APP 2]; [P_APP 1; P_APP 1; P_APP 2; P_APP 1]] (if c1 >? zero then (Lazy.force coq_fast_OMEGA13) else (Lazy.force coq_fast_OMEGA14)) in tac :: shuffle_cancel p l1 let rec scalar p n = function | Oplus(t1,t2) -> let tac1,t1' = scalar (P_APP 1 :: p) n t1 and tac2,t2' = scalar (P_APP 2 :: p) n t2 in clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 1;P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zmult_plus_distr_l) :: (tac1 @ tac2), Oplus(t1',t2') | Oinv t -> [clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zmult_opp_comm); focused_simpl (P_APP 2 :: p)], Otimes(t,Oz(neg n)) | Otimes(t1,Oz x) -> [clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 1;P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zmult_assoc_reverse); focused_simpl (P_APP 2 :: p)], Otimes(t1,Oz (n*x)) | Otimes(t1,t2) -> error "Omega: Can't solve a goal with non-linear products" | (Oatom _ as t) -> [], Otimes(t,Oz n) | Oz i -> [focused_simpl p],Oz(n*i) | Oufo c -> [], Oufo (mkApp (Lazy.force coq_Zmult, [| mk_integer n; c |])) let rec scalar_norm p_init = let rec loop p = function | [] -> [focused_simpl p_init] | (_::l) -> clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1];[P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_OMEGA16) :: loop (P_APP 2 :: p) l in loop p_init let rec norm_add p_init = let rec loop p = function | [] -> [focused_simpl p_init] | _:: l -> clever_rewrite p [[P_APP 1;P_APP 1]; [P_APP 1; P_APP 2];[P_APP 2]] (Lazy.force coq_fast_Zplus_assoc_reverse) :: loop (P_APP 2 :: p) l in loop p_init let rec scalar_norm_add p_init = let rec loop p = function | [] -> [focused_simpl p_init] | _ :: l -> clever_rewrite p [[P_APP 1; P_APP 1; P_APP 1; P_APP 1]; [P_APP 1; P_APP 1; P_APP 1; P_APP 2]; [P_APP 1; P_APP 1; P_APP 2]; [P_APP 2]; [P_APP 1; P_APP 2]] (Lazy.force coq_fast_OMEGA11) :: loop (P_APP 2 :: p) l in loop p_init let rec negate p = function | Oplus(t1,t2) -> let tac1,t1' = negate (P_APP 1 :: p) t1 and tac2,t2' = negate (P_APP 2 :: p) t2 in clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 1;P_APP 2]] (Lazy.force coq_fast_Zopp_plus_distr) :: (tac1 @ tac2), Oplus(t1',t2') | Oinv t -> [clever_rewrite p [[P_APP 1;P_APP 1]] (Lazy.force coq_fast_Zopp_involutive)], t | Otimes(t1,Oz x) -> [clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 1;P_APP 2]] (Lazy.force coq_fast_Zopp_mult_distr_r); focused_simpl (P_APP 2 :: p)], Otimes(t1,Oz (neg x)) | Otimes(t1,t2) -> error "Omega: Can't solve a goal with non-linear products" | (Oatom _ as t) -> let r = Otimes(t,Oz(negone)) in [clever_rewrite p [[P_APP 1]] (Lazy.force coq_fast_Zopp_eq_mult_neg_1)], r | Oz i -> [focused_simpl p],Oz(neg i) | Oufo c -> [], Oufo (mkApp (Lazy.force coq_Zopp, [| c |])) let rec transform p t = let default isnat t' = try let v,th,_ = find_constr t' in [clever_rewrite_base p (mkVar v) (mkVar th)], Oatom v with e when Errors.noncritical e -> let v = new_identifier_var () and th = new_identifier () in hide_constr t' v th isnat; [clever_rewrite_base p (mkVar v) (mkVar th)], Oatom v in try match destructurate_term t with | Kapp(Zplus,[t1;t2]) -> let tac1,t1' = transform (P_APP 1 :: p) t1 and tac2,t2' = transform (P_APP 2 :: p) t2 in let tac,t' = shuffle p (t1',t2') in tac1 @ tac2 @ tac, t' | Kapp(Zminus,[t1;t2]) -> let tac,t = transform p (mkApp (Lazy.force coq_Zplus, [| t1; (mkApp (Lazy.force coq_Zopp, [| t2 |])) |])) in unfold sp_Zminus :: tac,t | Kapp(Zsucc,[t1]) -> let tac,t = transform p (mkApp (Lazy.force coq_Zplus, [| t1; mk_integer one |])) in unfold sp_Zsucc :: tac,t | Kapp(Zpred,[t1]) -> let tac,t = transform p (mkApp (Lazy.force coq_Zplus, [| t1; mk_integer negone |])) in unfold sp_Zpred :: tac,t | Kapp(Zmult,[t1;t2]) -> let tac1,t1' = transform (P_APP 1 :: p) t1 and tac2,t2' = transform (P_APP 2 :: p) t2 in begin match t1',t2' with | (_,Oz n) -> let tac,t' = scalar p n t1' in tac1 @ tac2 @ tac,t' | (Oz n,_) -> let sym = clever_rewrite p [[P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zmult_comm) in let tac,t' = scalar p n t2' in tac1 @ tac2 @ (sym :: tac),t' | _ -> default false t end | Kapp((Zpos|Zneg|Z0),_) -> (try ([],Oz(recognize_number t)) with e when Errors.noncritical e -> default false t) | Kvar s -> [],Oatom s | Kapp(Zopp,[t]) -> let tac,t' = transform (P_APP 1 :: p) t in let tac',t'' = negate p t' in tac @ tac', t'' | Kapp(Z_of_nat,[t']) -> default true t' | _ -> default false t with e when catchable_exception e -> default false t let shrink_pair p f1 f2 = match f1,f2 with | Oatom v,Oatom _ -> let r = Otimes(Oatom v,Oz two) in clever_rewrite p [[P_APP 1]] (Lazy.force coq_fast_Zred_factor1), r | Oatom v, Otimes(_,c2) -> let r = Otimes(Oatom v,Oplus(c2,Oz one)) in clever_rewrite p [[P_APP 1];[P_APP 2;P_APP 2]] (Lazy.force coq_fast_Zred_factor2), r | Otimes (v1,c1),Oatom v -> let r = Otimes(Oatom v,Oplus(c1,Oz one)) in clever_rewrite p [[P_APP 2];[P_APP 1;P_APP 2]] (Lazy.force coq_fast_Zred_factor3), r | Otimes (Oatom v,c1),Otimes (v2,c2) -> let r = Otimes(Oatom v,Oplus(c1,c2)) in clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 1;P_APP 2];[P_APP 2;P_APP 2]] (Lazy.force coq_fast_Zred_factor4),r | t1,t2 -> begin oprint t1; print_newline (); oprint t2; print_newline (); flush Pervasives.stdout; error "shrink.1" end let reduce_factor p = function | Oatom v -> let r = Otimes(Oatom v,Oz one) in [clever_rewrite p [[]] (Lazy.force coq_fast_Zred_factor0)],r | Otimes(Oatom v,Oz n) as f -> [],f | Otimes(Oatom v,c) -> let rec compute = function | Oz n -> n | Oplus(t1,t2) -> Bigint.add (compute t1) (compute t2) | _ -> error "condense.1" in [focused_simpl (P_APP 2 :: p)], Otimes(Oatom v,Oz(compute c)) | t -> oprint t; error "reduce_factor.1" let rec condense p = function | Oplus(f1,(Oplus(f2,r) as t)) -> if weight f1 = weight f2 then begin let shrink_tac,t = shrink_pair (P_APP 1 :: p) f1 f2 in let assoc_tac = clever_rewrite p [[P_APP 1];[P_APP 2;P_APP 1];[P_APP 2;P_APP 2]] (Lazy.force coq_fast_Zplus_assoc) in let tac_list,t' = condense p (Oplus(t,r)) in (assoc_tac :: shrink_tac :: tac_list), t' end else begin let tac,f = reduce_factor (P_APP 1 :: p) f1 in let tac',t' = condense (P_APP 2 :: p) t in (tac @ tac'), Oplus(f,t') end | Oplus(f1,Oz n) -> let tac,f1' = reduce_factor (P_APP 1 :: p) f1 in tac,Oplus(f1',Oz n) | Oplus(f1,f2) -> if weight f1 = weight f2 then begin let tac_shrink,t = shrink_pair p f1 f2 in let tac,t' = condense p t in tac_shrink :: tac,t' end else begin let tac,f = reduce_factor (P_APP 1 :: p) f1 in let tac',t' = condense (P_APP 2 :: p) f2 in (tac @ tac'),Oplus(f,t') end | Oz _ as t -> [],t | t -> let tac,t' = reduce_factor p t in let final = Oplus(t',Oz zero) in let tac' = clever_rewrite p [[]] (Lazy.force coq_fast_Zred_factor6) in tac @ [tac'], final let rec clear_zero p = function | Oplus(Otimes(Oatom v,Oz n),r) when n =? zero -> let tac = clever_rewrite p [[P_APP 1;P_APP 1];[P_APP 2]] (Lazy.force coq_fast_Zred_factor5) in let tac',t = clear_zero p r in tac :: tac',t | Oplus(f,r) -> let tac,t = clear_zero (P_APP 2 :: p) r in tac,Oplus(f,t) | t -> [],t let replay_history tactic_normalisation = let aux = id_of_string "auxiliary" in let aux1 = id_of_string "auxiliary_1" in let aux2 = id_of_string "auxiliary_2" in let izero = mk_integer zero in let rec loop t = match t with | HYP e :: l -> begin try tclTHEN (List.assoc (hyp_of_tag e.id) tactic_normalisation) (loop l) with Not_found -> loop l end | NEGATE_CONTRADICT (e2,e1,b) :: l -> let eq1 = decompile e1 and eq2 = decompile e2 in let id1 = hyp_of_tag e1.id and id2 = hyp_of_tag e2.id in let k = if b then negone else one in let p_initial = [P_APP 1;P_TYPE] in let tac= shuffle_mult_right p_initial e1.body k e2.body in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_OMEGA17, [| val_of eq1; val_of eq2; mk_integer k; mkVar id1; mkVar id2 |])]); (mk_then tac); (intros_using [aux]); (resolve_id aux); reflexivity ] | CONTRADICTION (e1,e2) :: l -> let eq1 = decompile e1 and eq2 = decompile e2 in let p_initial = [P_APP 2;P_TYPE] in let tac = shuffle_cancel p_initial e1.body in let solve_le = let not_sup_sup = mkApp (build_coq_eq (), [| Lazy.force coq_comparison; Lazy.force coq_Gt; Lazy.force coq_Gt |]) in tclTHENS (tclTHENLIST [ (unfold sp_Zle); (simpl_in_concl); intro; (absurd not_sup_sup) ]) [ assumption ; reflexivity ] in let theorem = mkApp (Lazy.force coq_OMEGA2, [| val_of eq1; val_of eq2; mkVar (hyp_of_tag e1.id); mkVar (hyp_of_tag e2.id) |]) in tclTHEN (tclTHEN (generalize_tac [theorem]) (mk_then tac)) (solve_le) | DIVIDE_AND_APPROX (e1,e2,k,d) :: l -> let id = hyp_of_tag e1.id in let eq1 = val_of(decompile e1) and eq2 = val_of(decompile e2) in let kk = mk_integer k and dd = mk_integer d in let rhs = mk_plus (mk_times eq2 kk) dd in let state_eg = mk_eq eq1 rhs in let tac = scalar_norm_add [P_APP 3] e2.body in tclTHENS (cut state_eg) [ tclTHENS (tclTHENLIST [ (intros_using [aux]); (generalize_tac [mkApp (Lazy.force coq_OMEGA1, [| eq1; rhs; mkVar aux; mkVar id |])]); (clear [aux;id]); (intros_using [id]); (cut (mk_gt kk dd)) ]) [ tclTHENS (cut (mk_gt kk izero)) [ tclTHENLIST [ (intros_using [aux1; aux2]); (generalize_tac [mkApp (Lazy.force coq_Zmult_le_approx, [| kk;eq2;dd;mkVar aux1;mkVar aux2; mkVar id |])]); (clear [aux1;aux2;id]); (intros_using [id]); (loop l) ]; tclTHENLIST [ (unfold sp_Zgt); (simpl_in_concl); reflexivity ] ]; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ]; tclTHEN (mk_then tac) reflexivity ] | NOT_EXACT_DIVIDE (e1,k) :: l -> let c = floor_div e1.constant k in let d = Bigint.sub e1.constant (Bigint.mult c k) in let e2 = {id=e1.id; kind=EQUA;constant = c; body = map_eq_linear (fun c -> c / k) e1.body } in let eq2 = val_of(decompile e2) in let kk = mk_integer k and dd = mk_integer d in let tac = scalar_norm_add [P_APP 2] e2.body in tclTHENS (cut (mk_gt dd izero)) [ tclTHENS (cut (mk_gt kk dd)) [tclTHENLIST [ (intros_using [aux2;aux1]); (generalize_tac [mkApp (Lazy.force coq_OMEGA4, [| dd;kk;eq2;mkVar aux1; mkVar aux2 |])]); (clear [aux1;aux2]); (unfold sp_not); (intros_using [aux]); (resolve_id aux); (mk_then tac); assumption ] ; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ]; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ] | EXACT_DIVIDE (e1,k) :: l -> let id = hyp_of_tag e1.id in let e2 = map_eq_afine (fun c -> c / k) e1 in let eq1 = val_of(decompile e1) and eq2 = val_of(decompile e2) in let kk = mk_integer k in let state_eq = mk_eq eq1 (mk_times eq2 kk) in if e1.kind = DISE then let tac = scalar_norm [P_APP 3] e2.body in tclTHENS (cut state_eq) [tclTHENLIST [ (intros_using [aux1]); (generalize_tac [mkApp (Lazy.force coq_OMEGA18, [| eq1;eq2;kk;mkVar aux1; mkVar id |])]); (clear [aux1;id]); (intros_using [id]); (loop l) ]; tclTHEN (mk_then tac) reflexivity ] else let tac = scalar_norm [P_APP 3] e2.body in tclTHENS (cut state_eq) [ tclTHENS (cut (mk_gt kk izero)) [tclTHENLIST [ (intros_using [aux2;aux1]); (generalize_tac [mkApp (Lazy.force coq_OMEGA3, [| eq1; eq2; kk; mkVar aux2; mkVar aux1;mkVar id|])]); (clear [aux1;aux2;id]); (intros_using [id]); (loop l) ]; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ]; tclTHEN (mk_then tac) reflexivity ] | (MERGE_EQ(e3,e1,e2)) :: l -> let id = new_identifier () in tag_hypothesis id e3; let id1 = hyp_of_tag e1.id and id2 = hyp_of_tag e2 in let eq1 = val_of(decompile e1) and eq2 = val_of (decompile (negate_eq e1)) in let tac = clever_rewrite [P_APP 3] [[P_APP 1]] (Lazy.force coq_fast_Zopp_eq_mult_neg_1) :: scalar_norm [P_APP 3] e1.body in tclTHENS (cut (mk_eq eq1 (mk_inv eq2))) [tclTHENLIST [ (intros_using [aux]); (generalize_tac [mkApp (Lazy.force coq_OMEGA8, [| eq1;eq2;mkVar id1;mkVar id2; mkVar aux|])]); (clear [id1;id2;aux]); (intros_using [id]); (loop l) ]; tclTHEN (mk_then tac) reflexivity] | STATE {st_new_eq=e;st_def=def;st_orig=orig;st_coef=m;st_var=v} :: l -> let id = new_identifier () and id2 = hyp_of_tag orig.id in tag_hypothesis id e.id; let eq1 = val_of(decompile def) and eq2 = val_of(decompile orig) in let vid = unintern_id v in let theorem = mkApp (build_coq_ex (), [| Lazy.force coq_Z; mkLambda (Name vid, Lazy.force coq_Z, mk_eq (mkRel 1) eq1) |]) in let mm = mk_integer m in let p_initial = [P_APP 2;P_TYPE] in let tac = clever_rewrite (P_APP 1 :: P_APP 1 :: P_APP 2 :: p_initial) [[P_APP 1]] (Lazy.force coq_fast_Zopp_eq_mult_neg_1) :: shuffle_mult_right p_initial orig.body m ({c= negone;v= v}::def.body) in tclTHENS (cut theorem) [tclTHENLIST [ (intros_using [aux]); (elim_id aux); (clear [aux]); (intros_using [vid; aux]); (generalize_tac [mkApp (Lazy.force coq_OMEGA9, [| mkVar vid;eq2;eq1;mm; mkVar id2;mkVar aux |])]); (mk_then tac); (clear [aux]); (intros_using [id]); (loop l) ]; tclTHEN (exists_tac eq1) reflexivity ] | SPLIT_INEQ(e,(e1,act1),(e2,act2)) :: l -> let id1 = new_identifier () and id2 = new_identifier () in tag_hypothesis id1 e1; tag_hypothesis id2 e2; let id = hyp_of_tag e.id in let tac1 = norm_add [P_APP 2;P_TYPE] e.body in let tac2 = scalar_norm_add [P_APP 2;P_TYPE] e.body in let eq = val_of(decompile e) in tclTHENS (simplest_elim (applist (Lazy.force coq_OMEGA19, [eq; mkVar id]))) [tclTHENLIST [ (mk_then tac1); (intros_using [id1]); (loop act1) ]; tclTHENLIST [ (mk_then tac2); (intros_using [id2]); (loop act2) ]] | SUM(e3,(k1,e1),(k2,e2)) :: l -> let id = new_identifier () in tag_hypothesis id e3; let id1 = hyp_of_tag e1.id and id2 = hyp_of_tag e2.id in let eq1 = val_of(decompile e1) and eq2 = val_of(decompile e2) in if k1 =? one & e2.kind = EQUA then let tac_thm = match e1.kind with | EQUA -> Lazy.force coq_OMEGA5 | INEQ -> Lazy.force coq_OMEGA6 | DISE -> Lazy.force coq_OMEGA20 in let kk = mk_integer k2 in let p_initial = if e1.kind=DISE then [P_APP 1; P_TYPE] else [P_APP 2; P_TYPE] in let tac = shuffle_mult_right p_initial e1.body k2 e2.body in tclTHENLIST [ (generalize_tac [mkApp (tac_thm, [| eq1; eq2; kk; mkVar id1; mkVar id2 |])]); (mk_then tac); (intros_using [id]); (loop l) ] else let kk1 = mk_integer k1 and kk2 = mk_integer k2 in let p_initial = [P_APP 2;P_TYPE] in let tac= shuffle_mult p_initial k1 e1.body k2 e2.body in tclTHENS (cut (mk_gt kk1 izero)) [tclTHENS (cut (mk_gt kk2 izero)) [tclTHENLIST [ (intros_using [aux2;aux1]); (generalize_tac [mkApp (Lazy.force coq_OMEGA7, [| eq1;eq2;kk1;kk2; mkVar aux1;mkVar aux2; mkVar id1;mkVar id2 |])]); (clear [aux1;aux2]); (mk_then tac); (intros_using [id]); (loop l) ]; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ]; tclTHENLIST [ (unfold sp_Zgt); simpl_in_concl; reflexivity ] ] | CONSTANT_NOT_NUL(e,k) :: l -> tclTHEN (generalize_tac [mkVar (hyp_of_tag e)]) Equality.discrConcl | CONSTANT_NUL(e) :: l -> tclTHEN (resolve_id (hyp_of_tag e)) reflexivity | CONSTANT_NEG(e,k) :: l -> tclTHENLIST [ (generalize_tac [mkVar (hyp_of_tag e)]); (unfold sp_Zle); simpl_in_concl; (unfold sp_not); (intros_using [aux]); (resolve_id aux); reflexivity ] | _ -> tclIDTAC in loop let normalize p_initial t = let (tac,t') = transform p_initial t in let (tac',t'') = condense p_initial t' in let (tac'',t''') = clear_zero p_initial t'' in tac @ tac' @ tac'' , t''' let normalize_equation id flag theorem pos t t1 t2 (tactic,defs) = let p_initial = [P_APP pos ;P_TYPE] in let (tac,t') = normalize p_initial t in let shift_left = tclTHEN (generalize_tac [mkApp (theorem, [| t1; t2; mkVar id |]) ]) (tclTRY (clear [id])) in if tac <> [] then let id' = new_identifier () in ((id',(tclTHENLIST [ (shift_left); (mk_then tac); (intros_using [id']) ])) :: tactic, compile id' flag t' :: defs) else (tactic,defs) let destructure_omega gl tac_def (id,c) = if atompart_of_id id = "State" then tac_def else try match destructurate_prop c with | Kapp(Eq,[typ;t1;t2]) when destructurate_type (pf_nf gl typ) = Kapp(Z,[]) -> let t = mk_plus t1 (mk_inv t2) in normalize_equation id EQUA (Lazy.force coq_Zegal_left) 2 t t1 t2 tac_def | Kapp(Zne,[t1;t2]) -> let t = mk_plus t1 (mk_inv t2) in normalize_equation id DISE (Lazy.force coq_Zne_left) 1 t t1 t2 tac_def | Kapp(Zle,[t1;t2]) -> let t = mk_plus t2 (mk_inv t1) in normalize_equation id INEQ (Lazy.force coq_Zle_left) 2 t t1 t2 tac_def | Kapp(Zlt,[t1;t2]) -> let t = mk_plus (mk_plus t2 (mk_integer negone)) (mk_inv t1) in normalize_equation id INEQ (Lazy.force coq_Zlt_left) 2 t t1 t2 tac_def | Kapp(Zge,[t1;t2]) -> let t = mk_plus t1 (mk_inv t2) in normalize_equation id INEQ (Lazy.force coq_Zge_left) 2 t t1 t2 tac_def | Kapp(Zgt,[t1;t2]) -> let t = mk_plus (mk_plus t1 (mk_integer negone)) (mk_inv t2) in normalize_equation id INEQ (Lazy.force coq_Zgt_left) 2 t t1 t2 tac_def | _ -> tac_def with e when catchable_exception e -> tac_def let reintroduce id = (* [id] cannot be cleared if dependent: protect it by a try *) tclTHEN (tclTRY (clear [id])) (intro_using id) let coq_omega gl = clear_tables (); let tactic_normalisation, system = List.fold_left (destructure_omega gl) ([],[]) (pf_hyps_types gl) in let prelude,sys = List.fold_left (fun (tac,sys) (t,(v,th,b)) -> if b then let id = new_identifier () in let i = new_id () in tag_hypothesis id i; (tclTHENLIST [ (simplest_elim (applist (Lazy.force coq_intro_Z, [t]))); (intros_using [v; id]); (elim_id id); (clear [id]); (intros_using [th;id]); tac ]), {kind = INEQ; body = [{v=intern_id v; c=one}]; constant = zero; id = i} :: sys else (tclTHENLIST [ (simplest_elim (applist (Lazy.force coq_new_var, [t]))); (intros_using [v;th]); tac ]), sys) (tclIDTAC,[]) (dump_tables ()) in let system = system @ sys in if !display_system_flag then display_system display_var system; if !old_style_flag then begin try let _ = simplify (new_id,new_var_num,display_var) false system in tclIDTAC gl with UNSOLVABLE -> let _,path = depend [] [] (history ()) in if !display_action_flag then display_action display_var path; (tclTHEN prelude (replay_history tactic_normalisation path)) gl end else begin try let path = simplify_strong (new_id,new_var_num,display_var) system in if !display_action_flag then display_action display_var path; (tclTHEN prelude (replay_history tactic_normalisation path)) gl with NO_CONTRADICTION -> error "Omega can't solve this system" end let coq_omega = solver_time coq_omega let nat_inject gl = let rec explore p t = try match destructurate_term t with | Kapp(Plus,[t1;t2]) -> tclTHENLIST [ (clever_rewrite_gen p (mk_plus (mk_inj t1) (mk_inj t2)) ((Lazy.force coq_inj_plus),[t1;t2])); (explore (P_APP 1 :: p) t1); (explore (P_APP 2 :: p) t2) ] | Kapp(Mult,[t1;t2]) -> tclTHENLIST [ (clever_rewrite_gen p (mk_times (mk_inj t1) (mk_inj t2)) ((Lazy.force coq_inj_mult),[t1;t2])); (explore (P_APP 1 :: p) t1); (explore (P_APP 2 :: p) t2) ] | Kapp(Minus,[t1;t2]) -> let id = new_identifier () in tclTHENS (tclTHEN (simplest_elim (applist (Lazy.force coq_le_gt_dec, [t2;t1]))) (intros_using [id])) [ tclTHENLIST [ (clever_rewrite_gen p (mk_minus (mk_inj t1) (mk_inj t2)) ((Lazy.force coq_inj_minus1),[t1;t2;mkVar id])); (loop [id,mkApp (Lazy.force coq_le, [| t2;t1 |])]); (explore (P_APP 1 :: p) t1); (explore (P_APP 2 :: p) t2) ]; (tclTHEN (clever_rewrite_gen p (mk_integer zero) ((Lazy.force coq_inj_minus2),[t1;t2;mkVar id])) (loop [id,mkApp (Lazy.force coq_gt, [| t2;t1 |])])) ] | Kapp(S,[t']) -> let rec is_number t = try match destructurate_term t with Kapp(S,[t]) -> is_number t | Kapp(O,[]) -> true | _ -> false with e when catchable_exception e -> false in let rec loop p t = try match destructurate_term t with Kapp(S,[t]) -> (tclTHEN (clever_rewrite_gen p (mkApp (Lazy.force coq_Zsucc, [| mk_inj t |])) ((Lazy.force coq_inj_S),[t])) (loop (P_APP 1 :: p) t)) | _ -> explore p t with e when catchable_exception e -> explore p t in if is_number t' then focused_simpl p else loop p t | Kapp(Pred,[t]) -> let t_minus_one = mkApp (Lazy.force coq_minus, [| t; mkApp (Lazy.force coq_S, [| Lazy.force coq_O |]) |]) in tclTHEN (clever_rewrite_gen_nat (P_APP 1 :: p) t_minus_one ((Lazy.force coq_pred_of_minus),[t])) (explore p t_minus_one) | Kapp(O,[]) -> focused_simpl p | _ -> tclIDTAC with e when catchable_exception e -> tclIDTAC and loop = function | [] -> tclIDTAC | (i,t)::lit -> begin try match destructurate_prop t with Kapp(Le,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_le, [| t1;t2;mkVar i |]) ]); (explore [P_APP 1; P_TYPE] t1); (explore [P_APP 2; P_TYPE] t2); (reintroduce i); (loop lit) ] | Kapp(Lt,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_lt, [| t1;t2;mkVar i |]) ]); (explore [P_APP 1; P_TYPE] t1); (explore [P_APP 2; P_TYPE] t2); (reintroduce i); (loop lit) ] | Kapp(Ge,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_ge, [| t1;t2;mkVar i |]) ]); (explore [P_APP 1; P_TYPE] t1); (explore [P_APP 2; P_TYPE] t2); (reintroduce i); (loop lit) ] | Kapp(Gt,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_gt, [| t1;t2;mkVar i |]) ]); (explore [P_APP 1; P_TYPE] t1); (explore [P_APP 2; P_TYPE] t2); (reintroduce i); (loop lit) ] | Kapp(Neq,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_neq, [| t1;t2;mkVar i |]) ]); (explore [P_APP 1; P_TYPE] t1); (explore [P_APP 2; P_TYPE] t2); (reintroduce i); (loop lit) ] | Kapp(Eq,[typ;t1;t2]) -> if pf_conv_x gl typ (Lazy.force coq_nat) then tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_inj_eq, [| t1;t2;mkVar i |]) ]); (explore [P_APP 2; P_TYPE] t1); (explore [P_APP 3; P_TYPE] t2); (reintroduce i); (loop lit) ] else loop lit | _ -> loop lit with e when catchable_exception e -> loop lit end in loop (List.rev (pf_hyps_types gl)) gl let dec_binop = function | Zne -> coq_dec_Zne | Zle -> coq_dec_Zle | Zlt -> coq_dec_Zlt | Zge -> coq_dec_Zge | Zgt -> coq_dec_Zgt | Le -> coq_dec_le | Lt -> coq_dec_lt | Ge -> coq_dec_ge | Gt -> coq_dec_gt | _ -> raise Not_found let not_binop = function | Zne -> coq_not_Zne | Zle -> coq_Znot_le_gt | Zlt -> coq_Znot_lt_ge | Zge -> coq_Znot_ge_lt | Zgt -> coq_Znot_gt_le | Le -> coq_not_le | Lt -> coq_not_lt | Ge -> coq_not_ge | Gt -> coq_not_gt | _ -> raise Not_found (** A decidability check : for some [t], could we build a term of type [decidable t] (i.e. [t\/~t]) ? Otherwise, we raise [Undecidable]. Note that a successful check implies that [t] has type Prop. *) exception Undecidable let rec decidability gl t = match destructurate_prop t with | Kapp(Or,[t1;t2]) -> mkApp (Lazy.force coq_dec_or, [| t1; t2; decidability gl t1; decidability gl t2 |]) | Kapp(And,[t1;t2]) -> mkApp (Lazy.force coq_dec_and, [| t1; t2; decidability gl t1; decidability gl t2 |]) | Kapp(Iff,[t1;t2]) -> mkApp (Lazy.force coq_dec_iff, [| t1; t2; decidability gl t1; decidability gl t2 |]) | Kimp(t1,t2) -> (* This is the only situation where it's not obvious that [t] is in Prop. The recursive call on [t2] will ensure that. *) mkApp (Lazy.force coq_dec_imp, [| t1; t2; decidability gl t1; decidability gl t2 |]) | Kapp(Not,[t1]) -> mkApp (Lazy.force coq_dec_not, [| t1; decidability gl t1 |]) | Kapp(Eq,[typ;t1;t2]) -> begin match destructurate_type (pf_nf gl typ) with | Kapp(Z,[]) -> mkApp (Lazy.force coq_dec_eq, [| t1;t2 |]) | Kapp(Nat,[]) -> mkApp (Lazy.force coq_dec_eq_nat, [| t1;t2 |]) | _ -> raise Undecidable end | Kapp(op,[t1;t2]) -> (try mkApp (Lazy.force (dec_binop op), [| t1; t2 |]) with Not_found -> raise Undecidable) | Kapp(False,[]) -> Lazy.force coq_dec_False | Kapp(True,[]) -> Lazy.force coq_dec_True | _ -> raise Undecidable let onClearedName id tac = (* We cannot ensure that hyps can be cleared (because of dependencies), *) (* so renaming may be necessary *) tclTHEN (tclTRY (clear [id])) (fun gl -> let id = fresh_id [] id gl in tclTHEN (introduction id) (tac id) gl) let onClearedName2 id tac = tclTHEN (tclTRY (clear [id])) (fun gl -> let id1 = fresh_id [] (add_suffix id "_left") gl in let id2 = fresh_id [] (add_suffix id "_right") gl in tclTHENLIST [ introduction id1; introduction id2; tac id1 id2 ] gl) let destructure_hyps gl = let rec loop = function | [] -> (tclTHEN nat_inject coq_omega) | (i,body,t)::lit -> begin try match destructurate_prop t with | Kapp(False,[]) -> elim_id i | Kapp((Zle|Zge|Zgt|Zlt|Zne),[t1;t2]) -> loop lit | Kapp(Or,[t1;t2]) -> (tclTHENS (elim_id i) [ onClearedName i (fun i -> (loop ((i,None,t1)::lit))); onClearedName i (fun i -> (loop ((i,None,t2)::lit))) ]) | Kapp(And,[t1;t2]) -> tclTHEN (elim_id i) (onClearedName2 i (fun i1 i2 -> loop ((i1,None,t1)::(i2,None,t2)::lit))) | Kapp(Iff,[t1;t2]) -> tclTHEN (elim_id i) (onClearedName2 i (fun i1 i2 -> loop ((i1,None,mkArrow t1 t2)::(i2,None,mkArrow t2 t1)::lit))) | Kimp(t1,t2) -> (* t1 and t2 might be in Type rather than Prop. For t1, the decidability check will ensure being Prop. *) if is_Prop (pf_type_of gl t2) then let d1 = decidability gl t1 in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_imp_simp, [| t1; t2; d1; mkVar i|])]); (onClearedName i (fun i -> (loop ((i,None,mk_or (mk_not t1) t2)::lit)))) ] else loop lit | Kapp(Not,[t]) -> begin match destructurate_prop t with Kapp(Or,[t1;t2]) -> tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_not_or,[| t1; t2; mkVar i |])]); (onClearedName i (fun i -> (loop ((i,None,mk_and (mk_not t1) (mk_not t2)):: lit)))) ] | Kapp(And,[t1;t2]) -> let d1 = decidability gl t1 in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_not_and, [| t1; t2; d1; mkVar i |])]); (onClearedName i (fun i -> (loop ((i,None,mk_or (mk_not t1) (mk_not t2))::lit)))) ] | Kapp(Iff,[t1;t2]) -> let d1 = decidability gl t1 in let d2 = decidability gl t2 in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_not_iff, [| t1; t2; d1; d2; mkVar i |])]); (onClearedName i (fun i -> (loop ((i,None, mk_or (mk_and t1 (mk_not t2)) (mk_and (mk_not t1) t2))::lit)))) ] | Kimp(t1,t2) -> (* t2 must be in Prop otherwise ~(t1->t2) wouldn't be ok. For t1, being decidable implies being Prop. *) let d1 = decidability gl t1 in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_not_imp, [| t1; t2; d1; mkVar i |])]); (onClearedName i (fun i -> (loop ((i,None,mk_and t1 (mk_not t2)) :: lit)))) ] | Kapp(Not,[t]) -> let d = decidability gl t in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force coq_not_not, [| t; d; mkVar i |])]); (onClearedName i (fun i -> (loop ((i,None,t)::lit)))) ] | Kapp(op,[t1;t2]) -> (try let thm = not_binop op in tclTHENLIST [ (generalize_tac [mkApp (Lazy.force thm, [| t1;t2;mkVar i|])]); (onClearedName i (fun _ -> loop lit)) ] with Not_found -> loop lit) | Kapp(Eq,[typ;t1;t2]) -> if !old_style_flag then begin match destructurate_type (pf_nf gl typ) with | Kapp(Nat,_) -> tclTHENLIST [ (simplest_elim (mkApp (Lazy.force coq_not_eq, [|t1;t2;mkVar i|]))); (onClearedName i (fun _ -> loop lit)) ] | Kapp(Z,_) -> tclTHENLIST [ (simplest_elim (mkApp (Lazy.force coq_not_Zeq, [|t1;t2;mkVar i|]))); (onClearedName i (fun _ -> loop lit)) ] | _ -> loop lit end else begin match destructurate_type (pf_nf gl typ) with | Kapp(Nat,_) -> (tclTHEN (convert_hyp_no_check (i,body, (mkApp (Lazy.force coq_neq, [| t1;t2|])))) (loop lit)) | Kapp(Z,_) -> (tclTHEN (convert_hyp_no_check (i,body, (mkApp (Lazy.force coq_Zne, [| t1;t2|])))) (loop lit)) | _ -> loop lit end | _ -> loop lit end | _ -> loop lit with | Undecidable -> loop lit | e when catchable_exception e -> loop lit end in loop (pf_hyps gl) gl let destructure_goal gl = let concl = pf_concl gl in let rec loop t = match destructurate_prop t with | Kapp(Not,[t]) -> (tclTHEN (tclTHEN (unfold sp_not) intro) destructure_hyps) | Kimp(a,b) -> (tclTHEN intro (loop b)) | Kapp(False,[]) -> destructure_hyps | _ -> let goal_tac = try let dec = decidability gl t in tclTHEN (Tactics.refine (mkApp (Lazy.force coq_dec_not_not, [| t; dec; mkNewMeta () |]))) intro with Undecidable -> Tactics.elim_type (build_coq_False ()) in tclTHEN goal_tac destructure_hyps in (loop concl) gl let destructure_goal = all_time (destructure_goal) let omega_solver gl = Coqlib.check_required_library ["Coq";"omega";"Omega"]; let result = destructure_goal gl in (* if !display_time_flag then begin text_time (); flush Pervasives.stdout end; *) result coq-8.4pl2/plugins/omega/vo.itarget0000640000175000001440000000006311307752066016413 0ustar notinusersOmegaLemmas.vo OmegaPlugin.vo Omega.vo PreOmega.vo coq-8.4pl2/plugins/omega/PreOmega.v0000640000175000001440000003655612010532755016305 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* simpl (t a) in * | _ => zify_unop_var_or_term t thm a end. Ltac zify_unop_nored t thm a := (* in this version, we don't try to reduce the unop (that can be (Z.add x)) *) let isz := isZcst a in match isz with | true => zify_unop_core t thm a | _ => zify_unop_var_or_term t thm a end. Ltac zify_binop t thm a b:= (* works as zify_unop, except that we should be careful when dealing with b, since it can be equal to a *) let isza := isZcst a in match isza with | true => zify_unop (t a) (thm a) b | _ => let za := fresh "z" in (rename a into za; rename za into a; zify_unop_nored (t a) (thm a) b) || (remember a as za; match goal with | H : za = b |- _ => zify_unop_nored (t za) (thm za) za | _ => zify_unop_nored (t za) (thm za) b end) end. Ltac zify_op_1 := match goal with | |- context [ Z.max ?a ?b ] => zify_binop Z.max Z.max_spec a b | H : context [ Z.max ?a ?b ] |- _ => zify_binop Z.max Z.max_spec a b | |- context [ Z.min ?a ?b ] => zify_binop Z.min Z.min_spec a b | H : context [ Z.min ?a ?b ] |- _ => zify_binop Z.min Z.min_spec a b | |- context [ Z.sgn ?a ] => zify_unop Z.sgn Z.sgn_spec a | H : context [ Z.sgn ?a ] |- _ => zify_unop Z.sgn Z.sgn_spec a | |- context [ Z.abs ?a ] => zify_unop Z.abs Z.abs_spec a | H : context [ Z.abs ?a ] |- _ => zify_unop Z.abs Z.abs_spec a end. Ltac zify_op := repeat zify_op_1. (** II) Conversion from nat to Z *) Definition Z_of_nat' := Z.of_nat. Ltac hide_Z_of_nat t := let z := fresh "z" in set (z:=Z.of_nat t) in *; change Z.of_nat with Z_of_nat' in z; unfold z in *; clear z. Ltac zify_nat_rel := match goal with (* I: equalities *) | |- (@eq nat ?a ?b) => apply (Nat2Z.inj a b) (* shortcut *) | H : context [ @eq nat ?a ?b ] |- _ => rewrite <- (Nat2Z.inj_iff a b) in H | |- context [ @eq nat ?a ?b ] => rewrite <- (Nat2Z.inj_iff a b) (* II: less than *) | H : context [ lt ?a ?b ] |- _ => rewrite (Nat2Z.inj_lt a b) in H | |- context [ lt ?a ?b ] => rewrite (Nat2Z.inj_lt a b) (* III: less or equal *) | H : context [ le ?a ?b ] |- _ => rewrite (Nat2Z.inj_le a b) in H | |- context [ le ?a ?b ] => rewrite (Nat2Z.inj_le a b) (* IV: greater than *) | H : context [ gt ?a ?b ] |- _ => rewrite (Nat2Z.inj_gt a b) in H | |- context [ gt ?a ?b ] => rewrite (Nat2Z.inj_gt a b) (* V: greater or equal *) | H : context [ ge ?a ?b ] |- _ => rewrite (Nat2Z.inj_ge a b) in H | |- context [ ge ?a ?b ] => rewrite (Nat2Z.inj_ge a b) end. Ltac zify_nat_op := match goal with (* misc type conversions: positive/N/Z to nat *) | H : context [ Z.of_nat (Pos.to_nat ?a) ] |- _ => rewrite (positive_nat_Z a) in H | |- context [ Z.of_nat (Pos.to_nat ?a) ] => rewrite (positive_nat_Z a) | H : context [ Z.of_nat (N.to_nat ?a) ] |- _ => rewrite (N_nat_Z a) in H | |- context [ Z.of_nat (N.to_nat ?a) ] => rewrite (N_nat_Z a) | H : context [ Z.of_nat (Z.abs_nat ?a) ] |- _ => rewrite (Zabs2Nat.id_abs a) in H | |- context [ Z.of_nat (Z.abs_nat ?a) ] => rewrite (Zabs2Nat.id_abs a) (* plus -> Z.add *) | H : context [ Z.of_nat (plus ?a ?b) ] |- _ => rewrite (Nat2Z.inj_add a b) in H | |- context [ Z.of_nat (plus ?a ?b) ] => rewrite (Nat2Z.inj_add a b) (* min -> Z.min *) | H : context [ Z.of_nat (min ?a ?b) ] |- _ => rewrite (Nat2Z.inj_min a b) in H | |- context [ Z.of_nat (min ?a ?b) ] => rewrite (Nat2Z.inj_min a b) (* max -> Z.max *) | H : context [ Z.of_nat (max ?a ?b) ] |- _ => rewrite (Nat2Z.inj_max a b) in H | |- context [ Z.of_nat (max ?a ?b) ] => rewrite (Nat2Z.inj_max a b) (* minus -> Z.max (Z.sub ... ...) 0 *) | H : context [ Z.of_nat (minus ?a ?b) ] |- _ => rewrite (Nat2Z.inj_sub_max a b) in H | |- context [ Z.of_nat (minus ?a ?b) ] => rewrite (Nat2Z.inj_sub_max a b) (* pred -> minus ... -1 -> Z.max (Z.sub ... -1) 0 *) | H : context [ Z.of_nat (pred ?a) ] |- _ => rewrite (pred_of_minus a) in H | |- context [ Z.of_nat (pred ?a) ] => rewrite (pred_of_minus a) (* mult -> Z.mul and a positivity hypothesis *) | H : context [ Z.of_nat (mult ?a ?b) ] |- _ => pose proof (Nat2Z.is_nonneg (mult a b)); rewrite (Nat2Z.inj_mul a b) in * | |- context [ Z.of_nat (mult ?a ?b) ] => pose proof (Nat2Z.is_nonneg (mult a b)); rewrite (Nat2Z.inj_mul a b) in * (* O -> Z0 *) | H : context [ Z.of_nat O ] |- _ => simpl (Z.of_nat O) in H | |- context [ Z.of_nat O ] => simpl (Z.of_nat O) (* S -> number or Z.succ *) | H : context [ Z.of_nat (S ?a) ] |- _ => let isnat := isnatcst a in match isnat with | true => simpl (Z.of_nat (S a)) in H | _ => rewrite (Nat2Z.inj_succ a) in H end | |- context [ Z.of_nat (S ?a) ] => let isnat := isnatcst a in match isnat with | true => simpl (Z.of_nat (S a)) | _ => rewrite (Nat2Z.inj_succ a) end (* atoms of type nat : we add a positivity condition (if not already there) *) | _ : 0 <= Z.of_nat ?a |- _ => hide_Z_of_nat a | _ : context [ Z.of_nat ?a ] |- _ => pose proof (Nat2Z.is_nonneg a); hide_Z_of_nat a | |- context [ Z.of_nat ?a ] => pose proof (Nat2Z.is_nonneg a); hide_Z_of_nat a end. Ltac zify_nat := repeat zify_nat_rel; repeat zify_nat_op; unfold Z_of_nat' in *. (* III) conversion from positive to Z *) Definition Zpos' := Zpos. Definition Zneg' := Zneg. Ltac hide_Zpos t := let z := fresh "z" in set (z:=Zpos t) in *; change Zpos with Zpos' in z; unfold z in *; clear z. Ltac zify_positive_rel := match goal with (* I: equalities *) | |- (@eq positive ?a ?b) => apply Pos2Z.inj | H : context [ @eq positive ?a ?b ] |- _ => rewrite <- (Pos2Z.inj_iff a b) in H | |- context [ @eq positive ?a ?b ] => rewrite <- (Pos2Z.inj_iff a b) (* II: less than *) | H : context [ (?a < ?b)%positive ] |- _ => change (a change (a change (a<=b)%positive with (Zpos a<=Zpos b) in H | |- context [ (?a <= ?b)%positive ] => change (a<=b)%positive with (Zpos a<=Zpos b) (* IV: greater than *) | H : context [ (?a > ?b)%positive ] |- _ => change (a>b)%positive with (Zpos a>Zpos b) in H | |- context [ (?a > ?b)%positive ] => change (a>b)%positive with (Zpos a>Zpos b) (* V: greater or equal *) | H : context [ (?a >= ?b)%positive ] |- _ => change (a>=b)%positive with (Zpos a>=Zpos b) in H | |- context [ (?a >= ?b)%positive ] => change (a>=b)%positive with (Zpos a>=Zpos b) end. Ltac zify_positive_op := match goal with (* Zneg -> -Zpos (except for numbers) *) | H : context [ Zneg ?a ] |- _ => let isp := isPcst a in match isp with | true => change (Zneg a) with (Zneg' a) in H | _ => change (Zneg a) with (- Zpos a) in H end | |- context [ Zneg ?a ] => let isp := isPcst a in match isp with | true => change (Zneg a) with (Zneg' a) | _ => change (Zneg a) with (- Zpos a) end (* misc type conversions: nat to positive *) | H : context [ Zpos (Pos.of_succ_nat ?a) ] |- _ => rewrite (Zpos_P_of_succ_nat a) in H | |- context [ Zpos (Pos.of_succ_nat ?a) ] => rewrite (Zpos_P_of_succ_nat a) (* Pos.add -> Z.add *) | H : context [ Zpos (?a + ?b) ] |- _ => change (Zpos (a+b)) with (Zpos a + Zpos b) in H | |- context [ Zpos (?a + ?b) ] => change (Zpos (a+b)) with (Zpos a + Zpos b) (* Pos.min -> Z.min *) | H : context [ Zpos (Pos.min ?a ?b) ] |- _ => rewrite (Pos2Z.inj_min a b) in H | |- context [ Zpos (Pos.min ?a ?b) ] => rewrite (Pos2Z.inj_min a b) (* Pos.max -> Z.max *) | H : context [ Zpos (Pos.max ?a ?b) ] |- _ => rewrite (Pos2Z.inj_max a b) in H | |- context [ Zpos (Pos.max ?a ?b) ] => rewrite (Pos2Z.inj_max a b) (* Pos.sub -> Z.max 1 (Z.sub ... ...) *) | H : context [ Zpos (Pos.sub ?a ?b) ] |- _ => rewrite (Pos2Z.inj_sub a b) in H | |- context [ Zpos (Pos.sub ?a ?b) ] => rewrite (Pos2Z.inj_sub a b) (* Pos.succ -> Z.succ *) | H : context [ Zpos (Pos.succ ?a) ] |- _ => rewrite (Pos2Z.inj_succ a) in H | |- context [ Zpos (Pos.succ ?a) ] => rewrite (Pos2Z.inj_succ a) (* Pos.pred -> Pos.sub ... -1 -> Z.max 1 (Z.sub ... - 1) *) | H : context [ Zpos (Pos.pred ?a) ] |- _ => rewrite <- (Pos.sub_1_r a) in H | |- context [ Zpos (Pos.pred ?a) ] => rewrite <- (Pos.sub_1_r a) (* Pos.mul -> Z.mul and a positivity hypothesis *) | H : context [ Zpos (?a * ?b) ] |- _ => pose proof (Pos2Z.is_pos (Pos.mul a b)); change (Zpos (a*b)) with (Zpos a * Zpos b) in * | |- context [ Zpos (?a * ?b) ] => pose proof (Pos2Z.is_pos (Pos.mul a b)); change (Zpos (a*b)) with (Zpos a * Zpos b) in * (* xO *) | H : context [ Zpos (xO ?a) ] |- _ => let isp := isPcst a in match isp with | true => change (Zpos (xO a)) with (Zpos' (xO a)) in H | _ => rewrite (Pos2Z.inj_xO a) in H end | |- context [ Zpos (xO ?a) ] => let isp := isPcst a in match isp with | true => change (Zpos (xO a)) with (Zpos' (xO a)) | _ => rewrite (Pos2Z.inj_xO a) end (* xI *) | H : context [ Zpos (xI ?a) ] |- _ => let isp := isPcst a in match isp with | true => change (Zpos (xI a)) with (Zpos' (xI a)) in H | _ => rewrite (Pos2Z.inj_xI a) in H end | |- context [ Zpos (xI ?a) ] => let isp := isPcst a in match isp with | true => change (Zpos (xI a)) with (Zpos' (xI a)) | _ => rewrite (Pos2Z.inj_xI a) end (* xI : nothing to do, just prevent adding a useless positivity condition *) | H : context [ Zpos xH ] |- _ => hide_Zpos xH | |- context [ Zpos xH ] => hide_Zpos xH (* atoms of type positive : we add a positivity condition (if not already there) *) | _ : 0 < Zpos ?a |- _ => hide_Zpos a | _ : context [ Zpos ?a ] |- _ => pose proof (Pos2Z.is_pos a); hide_Zpos a | |- context [ Zpos ?a ] => pose proof (Pos2Z.is_pos a); hide_Zpos a end. Ltac zify_positive := repeat zify_positive_rel; repeat zify_positive_op; unfold Zpos',Zneg' in *. (* IV) conversion from N to Z *) Definition Z_of_N' := Z.of_N. Ltac hide_Z_of_N t := let z := fresh "z" in set (z:=Z.of_N t) in *; change Z.of_N with Z_of_N' in z; unfold z in *; clear z. Ltac zify_N_rel := match goal with (* I: equalities *) | |- (@eq N ?a ?b) => apply (N2Z.inj a b) (* shortcut *) | H : context [ @eq N ?a ?b ] |- _ => rewrite <- (N2Z.inj_iff a b) in H | |- context [ @eq N ?a ?b ] => rewrite <- (N2Z.inj_iff a b) (* II: less than *) | H : context [ (?a < ?b)%N ] |- _ => rewrite (N2Z.inj_lt a b) in H | |- context [ (?a < ?b)%N ] => rewrite (N2Z.inj_lt a b) (* III: less or equal *) | H : context [ (?a <= ?b)%N ] |- _ => rewrite (N2Z.inj_le a b) in H | |- context [ (?a <= ?b)%N ] => rewrite (N2Z.inj_le a b) (* IV: greater than *) | H : context [ (?a > ?b)%N ] |- _ => rewrite (N2Z.inj_gt a b) in H | |- context [ (?a > ?b)%N ] => rewrite (N2Z.inj_gt a b) (* V: greater or equal *) | H : context [ (?a >= ?b)%N ] |- _ => rewrite (N2Z.inj_ge a b) in H | |- context [ (?a >= ?b)%N ] => rewrite (N2Z.inj_ge a b) end. Ltac zify_N_op := match goal with (* misc type conversions: nat to positive *) | H : context [ Z.of_N (N.of_nat ?a) ] |- _ => rewrite (nat_N_Z a) in H | |- context [ Z.of_N (N.of_nat ?a) ] => rewrite (nat_N_Z a) | H : context [ Z.of_N (Z.abs_N ?a) ] |- _ => rewrite (N2Z.inj_abs_N a) in H | |- context [ Z.of_N (Z.abs_N ?a) ] => rewrite (N2Z.inj_abs_N a) | H : context [ Z.of_N (Npos ?a) ] |- _ => rewrite (N2Z.inj_pos a) in H | |- context [ Z.of_N (Npos ?a) ] => rewrite (N2Z.inj_pos a) | H : context [ Z.of_N N0 ] |- _ => change (Z.of_N N0) with Z0 in H | |- context [ Z.of_N N0 ] => change (Z.of_N N0) with Z0 (* N.add -> Z.add *) | H : context [ Z.of_N (N.add ?a ?b) ] |- _ => rewrite (N2Z.inj_add a b) in H | |- context [ Z.of_N (N.add ?a ?b) ] => rewrite (N2Z.inj_add a b) (* N.min -> Z.min *) | H : context [ Z.of_N (N.min ?a ?b) ] |- _ => rewrite (N2Z.inj_min a b) in H | |- context [ Z.of_N (N.min ?a ?b) ] => rewrite (N2Z.inj_min a b) (* N.max -> Z.max *) | H : context [ Z.of_N (N.max ?a ?b) ] |- _ => rewrite (N2Z.inj_max a b) in H | |- context [ Z.of_N (N.max ?a ?b) ] => rewrite (N2Z.inj_max a b) (* N.sub -> Z.max 0 (Z.sub ... ...) *) | H : context [ Z.of_N (N.sub ?a ?b) ] |- _ => rewrite (N2Z.inj_sub_max a b) in H | |- context [ Z.of_N (N.sub ?a ?b) ] => rewrite (N2Z.inj_sub_max a b) (* N.succ -> Z.succ *) | H : context [ Z.of_N (N.succ ?a) ] |- _ => rewrite (N2Z.inj_succ a) in H | |- context [ Z.of_N (N.succ ?a) ] => rewrite (N2Z.inj_succ a) (* N.mul -> Z.mul and a positivity hypothesis *) | H : context [ Z.of_N (N.mul ?a ?b) ] |- _ => pose proof (N2Z.is_nonneg (N.mul a b)); rewrite (N2Z.inj_mul a b) in * | |- context [ Z.of_N (N.mul ?a ?b) ] => pose proof (N2Z.is_nonneg (N.mul a b)); rewrite (N2Z.inj_mul a b) in * (* atoms of type N : we add a positivity condition (if not already there) *) | _ : 0 <= Z.of_N ?a |- _ => hide_Z_of_N a | _ : context [ Z.of_N ?a ] |- _ => pose proof (N2Z.is_nonneg a); hide_Z_of_N a | |- context [ Z.of_N ?a ] => pose proof (N2Z.is_nonneg a); hide_Z_of_N a end. Ltac zify_N := repeat zify_N_rel; repeat zify_N_op; unfold Z_of_N' in *. (** The complete Z-ification tactic *) Ltac zify := repeat progress (zify_nat; zify_positive; zify_N); zify_op. coq-8.4pl2/plugins/omega/omega_plugin.mllib0000640000175000001440000000005111161000644020054 0ustar notinusersOmega Coq_omega G_omega Omega_plugin_mod coq-8.4pl2/plugins/omega/g_omega.ml40000640000175000001440000000351412010532755016417 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Tacinterp.interp <:tactic> | "positive" -> Tacinterp.interp <:tactic> | "N" -> Tacinterp.interp <:tactic> | "Z" -> Tacinterp.interp <:tactic> | s -> Util.error ("No Omega knowledge base for type "^s)) (Util.list_uniquize (List.sort compare l)) in tclTHEN (tclREPEAT (tclPROGRESS (tclTHENLIST tacs))) omega_solver TACTIC EXTEND omega | [ "omega" ] -> [ omega_tactic [] ] END TACTIC EXTEND omega' | [ "omega" "with" ne_ident_list(l) ] -> [ omega_tactic (List.map Names.string_of_id l) ] | [ "omega" "with" "*" ] -> [ omega_tactic ["nat";"positive";"N";"Z"] ] END coq-8.4pl2/plugins/omega/OmegaLemmas.v0000640000175000001440000001775711776416511017010 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 <= x -> 0 <= y. Proof. now intros ->. Qed. Lemma OMEGA2 x y : 0 <= x -> 0 <= y -> 0 <= x + y. Proof. Z.order_pos. Qed. Lemma OMEGA3 x y k : k > 0 -> x = y * k -> x = 0 -> y = 0. Proof. intros LT -> EQ. apply Z.mul_eq_0 in EQ. destruct EQ; now subst. Qed. Lemma OMEGA4 x y z : x > 0 -> y > x -> z * y + x <> 0. Proof. Z.swap_greater. intros Hx Hxy. rewrite Z.add_move_0_l, <- Z.mul_opp_l. destruct (Z.lt_trichotomy (-z) 1) as [LT|[->|GT]]. - intro. revert LT. apply Z.le_ngt, (Z.le_succ_l 0). apply Z.mul_pos_cancel_r with y; Z.order. - Z.nzsimpl. Z.order. - rewrite (Z.mul_lt_mono_pos_r y), Z.mul_1_l in GT; Z.order. Qed. Lemma OMEGA5 x y z : x = 0 -> y = 0 -> x + y * z = 0. Proof. now intros -> ->. Qed. Lemma OMEGA6 x y z : 0 <= x -> y = 0 -> 0 <= x + y * z. Proof. intros H ->. now Z.nzsimpl. Qed. Lemma OMEGA7 x y z t : z > 0 -> t > 0 -> 0 <= x -> 0 <= y -> 0 <= x * z + y * t. Proof. intros. Z.swap_greater. Z.order_pos. Qed. Lemma OMEGA8 x y : 0 <= x -> 0 <= y -> x = - y -> x = 0. Proof. intros H1 H2 H3. rewrite <- Z.opp_nonpos_nonneg in H2. Z.order. Qed. Lemma OMEGA9 x y z t : y = 0 -> x = z -> y + (- x + z) * t = 0. Proof. intros. subst. now rewrite Z.add_opp_diag_l. Qed. Lemma OMEGA10 v c1 c2 l1 l2 k1 k2 : (v * c1 + l1) * k1 + (v * c2 + l2) * k2 = v * (c1 * k1 + c2 * k2) + (l1 * k1 + l2 * k2). Proof. rewrite ?Z.mul_add_distr_r, ?Z.mul_add_distr_l, ?Z.mul_assoc. rewrite <- !Z.add_assoc. f_equal. apply Z.add_shuffle3. Qed. Lemma OMEGA11 v1 c1 l1 l2 k1 : (v1 * c1 + l1) * k1 + l2 = v1 * (c1 * k1) + (l1 * k1 + l2). Proof. rewrite ?Z.mul_add_distr_r, ?Z.mul_add_distr_l, ?Z.mul_assoc. now rewrite Z.add_assoc. Qed. Lemma OMEGA12 v2 c2 l1 l2 k2 : l1 + (v2 * c2 + l2) * k2 = v2 * (c2 * k2) + (l1 + l2 * k2). Proof. rewrite ?Z.mul_add_distr_r, ?Z.mul_add_distr_l, ?Z.mul_assoc. apply Z.add_shuffle3. Qed. Lemma OMEGA13 (v l1 l2 : Z) (x : positive) : v * Zpos x + l1 + (v * Zneg x + l2) = l1 + l2. Proof. rewrite Z.add_shuffle1. rewrite <- Z.mul_add_distr_l, <- Pos2Z.opp_neg, Z.add_opp_diag_r. now Z.nzsimpl. Qed. Lemma OMEGA14 (v l1 l2 : Z) (x : positive) : v * Zneg x + l1 + (v * Zpos x + l2) = l1 + l2. Proof. rewrite Z.add_shuffle1. rewrite <- Z.mul_add_distr_l, <- Pos2Z.opp_neg, Z.add_opp_diag_r. now Z.nzsimpl. Qed. Lemma OMEGA15 v c1 c2 l1 l2 k2 : v * c1 + l1 + (v * c2 + l2) * k2 = v * (c1 + c2 * k2) + (l1 + l2 * k2). Proof. rewrite ?Z.mul_add_distr_r, ?Z.mul_add_distr_l, ?Z.mul_assoc. apply Z.add_shuffle1. Qed. Lemma OMEGA16 v c l k : (v * c + l) * k = v * (c * k) + l * k. Proof. now rewrite ?Z.mul_add_distr_r, ?Z.mul_add_distr_l, ?Z.mul_assoc. Qed. Lemma OMEGA17 x y z : Zne x 0 -> y = 0 -> Zne (x + y * z) 0. Proof. unfold Zne, not. intros NE EQ. subst. now Z.nzsimpl. Qed. Lemma OMEGA18 x y k : x = y * k -> Zne x 0 -> Zne y 0. Proof. unfold Zne, not. intros. subst; auto. Qed. Lemma OMEGA19 x : Zne x 0 -> 0 <= x + -1 \/ 0 <= x * -1 + -1. Proof. unfold Zne. intros Hx. apply Z.lt_gt_cases in Hx. destruct Hx as [LT|GT]. - right. change (-1) with (-(1)). rewrite Z.mul_opp_r, <- Z.opp_add_distr. Z.nzsimpl. rewrite Z.opp_nonneg_nonpos. now apply Z.le_succ_l. - left. now apply Z.lt_le_pred. Qed. Lemma OMEGA20 x y z : Zne x 0 -> y = 0 -> Zne (x + y * z) 0. Proof. unfold Zne, not. intros H1 H2 H3; apply H1; rewrite H2 in H3; simpl in H3; rewrite Z.add_0_r in H3; trivial with arith. Qed. Definition fast_Zplus_comm (x y : Z) (P : Z -> Prop) (H : P (y + x)) := eq_ind_r P H (Z.add_comm x y). Definition fast_Zplus_assoc_reverse (n m p : Z) (P : Z -> Prop) (H : P (n + (m + p))) := eq_ind_r P H (Zplus_assoc_reverse n m p). Definition fast_Zplus_assoc (n m p : Z) (P : Z -> Prop) (H : P (n + m + p)) := eq_ind_r P H (Z.add_assoc n m p). Definition fast_Zplus_permute (n m p : Z) (P : Z -> Prop) (H : P (m + (n + p))) := eq_ind_r P H (Z.add_shuffle3 n m p). Definition fast_OMEGA10 (v c1 c2 l1 l2 k1 k2 : Z) (P : Z -> Prop) (H : P (v * (c1 * k1 + c2 * k2) + (l1 * k1 + l2 * k2))) := eq_ind_r P H (OMEGA10 v c1 c2 l1 l2 k1 k2). Definition fast_OMEGA11 (v1 c1 l1 l2 k1 : Z) (P : Z -> Prop) (H : P (v1 * (c1 * k1) + (l1 * k1 + l2))) := eq_ind_r P H (OMEGA11 v1 c1 l1 l2 k1). Definition fast_OMEGA12 (v2 c2 l1 l2 k2 : Z) (P : Z -> Prop) (H : P (v2 * (c2 * k2) + (l1 + l2 * k2))) := eq_ind_r P H (OMEGA12 v2 c2 l1 l2 k2). Definition fast_OMEGA15 (v c1 c2 l1 l2 k2 : Z) (P : Z -> Prop) (H : P (v * (c1 + c2 * k2) + (l1 + l2 * k2))) := eq_ind_r P H (OMEGA15 v c1 c2 l1 l2 k2). Definition fast_OMEGA16 (v c l k : Z) (P : Z -> Prop) (H : P (v * (c * k) + l * k)) := eq_ind_r P H (OMEGA16 v c l k). Definition fast_OMEGA13 (v l1 l2 : Z) (x : positive) (P : Z -> Prop) (H : P (l1 + l2)) := eq_ind_r P H (OMEGA13 v l1 l2 x). Definition fast_OMEGA14 (v l1 l2 : Z) (x : positive) (P : Z -> Prop) (H : P (l1 + l2)) := eq_ind_r P H (OMEGA14 v l1 l2 x). Definition fast_Zred_factor0 (x : Z) (P : Z -> Prop) (H : P (x * 1)) := eq_ind_r P H (Zred_factor0 x). Definition fast_Zopp_eq_mult_neg_1 (x : Z) (P : Z -> Prop) (H : P (x * -1)) := eq_ind_r P H (Z.opp_eq_mul_m1 x). Definition fast_Zmult_comm (x y : Z) (P : Z -> Prop) (H : P (y * x)) := eq_ind_r P H (Z.mul_comm x y). Definition fast_Zopp_plus_distr (x y : Z) (P : Z -> Prop) (H : P (- x + - y)) := eq_ind_r P H (Z.opp_add_distr x y). Definition fast_Zopp_involutive (x : Z) (P : Z -> Prop) (H : P x) := eq_ind_r P H (Z.opp_involutive x). Definition fast_Zopp_mult_distr_r (x y : Z) (P : Z -> Prop) (H : P (x * - y)) := eq_ind_r P H (Zopp_mult_distr_r x y). Definition fast_Zmult_plus_distr_l (n m p : Z) (P : Z -> Prop) (H : P (n * p + m * p)) := eq_ind_r P H (Z.mul_add_distr_r n m p). Definition fast_Zmult_opp_comm (x y : Z) (P : Z -> Prop) (H : P (x * - y)) := eq_ind_r P H (Z.mul_opp_comm x y). Definition fast_Zmult_assoc_reverse (n m p : Z) (P : Z -> Prop) (H : P (n * (m * p))) := eq_ind_r P H (Zmult_assoc_reverse n m p). Definition fast_Zred_factor1 (x : Z) (P : Z -> Prop) (H : P (x * 2)) := eq_ind_r P H (Zred_factor1 x). Definition fast_Zred_factor2 (x y : Z) (P : Z -> Prop) (H : P (x * (1 + y))) := eq_ind_r P H (Zred_factor2 x y). Definition fast_Zred_factor3 (x y : Z) (P : Z -> Prop) (H : P (x * (1 + y))) := eq_ind_r P H (Zred_factor3 x y). Definition fast_Zred_factor4 (x y z : Z) (P : Z -> Prop) (H : P (x * (y + z))) := eq_ind_r P H (Zred_factor4 x y z). Definition fast_Zred_factor5 (x y : Z) (P : Z -> Prop) (H : P y) := eq_ind_r P H (Zred_factor5 x y). Definition fast_Zred_factor6 (x : Z) (P : Z -> Prop) (H : P (x + 0)) := eq_ind_r P H (Zred_factor6 x). Theorem intro_Z : forall n:nat, exists y : Z, Z.of_nat n = y /\ 0 <= y * 1 + 0. Proof. intros n; exists (Z.of_nat n); split; trivial. rewrite Z.mul_1_r, Z.add_0_r. apply Nat2Z.is_nonneg. Qed. coq-8.4pl2/plugins/omega/OmegaPlugin.v0000640000175000001440000000106012010532755016773 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* nat) => abstract omega: zarith. Hint Extern 10 (_ <= _) => abstract omega: zarith. Hint Extern 10 (_ < _) => abstract omega: zarith. Hint Extern 10 (_ >= _) => abstract omega: zarith. Hint Extern 10 (_ > _) => abstract omega: zarith. Hint Extern 10 (_ <> _ :>nat) => abstract omega: zarith. Hint Extern 10 (~ _ <= _) => abstract omega: zarith. Hint Extern 10 (~ _ < _) => abstract omega: zarith. Hint Extern 10 (~ _ >= _) => abstract omega: zarith. Hint Extern 10 (~ _ > _) => abstract omega: zarith. Hint Extern 10 (_ = _ :>Z) => abstract omega: zarith. Hint Extern 10 (_ <= _)%Z => abstract omega: zarith. Hint Extern 10 (_ < _)%Z => abstract omega: zarith. Hint Extern 10 (_ >= _)%Z => abstract omega: zarith. Hint Extern 10 (_ > _)%Z => abstract omega: zarith. Hint Extern 10 (_ <> _ :>Z) => abstract omega: zarith. Hint Extern 10 (~ (_ <= _)%Z) => abstract omega: zarith. Hint Extern 10 (~ (_ < _)%Z) => abstract omega: zarith. Hint Extern 10 (~ (_ >= _)%Z) => abstract omega: zarith. Hint Extern 10 (~ (_ > _)%Z) => abstract omega: zarith. Hint Extern 10 False => abstract omega: zarith.coq-8.4pl2/plugins/pluginsdyn.itarget0000640000175000001440000000117111743265053017073 0ustar notinusersfield/field_plugin.cmxs setoid_ring/newring_plugin.cmxs extraction/extraction_plugin.cmxs decl_mode/decl_mode_plugin.cmxs firstorder/ground_plugin.cmxs rtauto/rtauto_plugin.cmxs fourier/fourier_plugin.cmxs romega/romega_plugin.cmxs omega/omega_plugin.cmxs micromega/micromega_plugin.cmxs xml/xml_plugin.cmxs subtac/subtac_plugin.cmxs ring/ring_plugin.cmxs cc/cc_plugin.cmxs nsatz/nsatz_plugin.cmxs funind/recdef_plugin.cmxs syntax/ascii_syntax_plugin.cmxs syntax/nat_syntax_plugin.cmxs syntax/numbers_syntax_plugin.cmxs syntax/r_syntax_plugin.cmxs syntax/string_syntax_plugin.cmxs syntax/z_syntax_plugin.cmxs quote/quote_plugin.cmxs coq-8.4pl2/plugins/quote/0000750000175000001440000000000012127276540014452 5ustar notinuserscoq-8.4pl2/plugins/quote/Quote.v0000640000175000001440000000505012010532755015731 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool index_eq_prop: (n,m:index)(index_eq n m)=true -> n=m index_lt : index -> bool varmap : Type -> Type. varmap_find : (A:Type)A -> index -> (varmap A) -> A. The first arg. of varmap_find is the default value to take if the object is not found in the varmap. index_lt defines a total well-founded order, but we don't prove that. ***********************************************************************) Set Implicit Arguments. Section variables_map. Variable A : Type. Inductive varmap : Type := | Empty_vm : varmap | Node_vm : A -> varmap -> varmap -> varmap. Inductive index : Set := | Left_idx : index -> index | Right_idx : index -> index | End_idx : index. Fixpoint varmap_find (default_value:A) (i:index) (v:varmap) {struct v} : A := match i, v with | End_idx, Node_vm x _ _ => x | Right_idx i1, Node_vm x v1 v2 => varmap_find default_value i1 v2 | Left_idx i1, Node_vm x v1 v2 => varmap_find default_value i1 v1 | _, _ => default_value end. Fixpoint index_eq (n m:index) {struct m} : bool := match n, m with | End_idx, End_idx => true | Left_idx n', Left_idx m' => index_eq n' m' | Right_idx n', Right_idx m' => index_eq n' m' | _, _ => false end. Fixpoint index_lt (n m:index) {struct m} : bool := match n, m with | End_idx, Left_idx _ => true | End_idx, Right_idx _ => true | Left_idx n', Right_idx m' => true | Right_idx n', Right_idx m' => index_lt n' m' | Left_idx n', Left_idx m' => index_lt n' m' | _, _ => false end. Lemma index_eq_prop : forall n m:index, index_eq n m = true -> n = m. simple induction n; simple induction m; simpl; intros. rewrite (H i0 H1); reflexivity. discriminate. discriminate. discriminate. rewrite (H i0 H1); reflexivity. discriminate. discriminate. discriminate. reflexivity. Qed. End variables_map. Unset Implicit Arguments. coq-8.4pl2/plugins/quote/vo.itarget0000640000175000001440000000001011307752066016450 0ustar notinusersQuote.vocoq-8.4pl2/plugins/quote/quote.ml0000640000175000001440000004146212121620060016131 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (varmap A L) -> A}. Then, the tactic \texttt{quote f} will replace an expression \texttt{e} of type \texttt{A} by \texttt{(f vm t)} such that \texttt{e} and \texttt{(f vm t)} are convertible. The problem is then inverting the function \texttt{f}. The tactic works when: \begin{itemize} \item L is a simple inductive datatype. The constructors of L may have one of the three following forms: \begin{enumerate} \item ordinary recursive constructors like: \verb|Cplus : L -> L -> L| \item variable leaf like: \verb|Cvar : index -> L| \item constant leaf like \verb|Cconst : A -> L| \end{enumerate} The definition of \texttt{L} must contain at most one variable leaf and at most one constant leaf. When there are both a variable leaf and a constant leaf, there is an ambiguity on inversion. The term t can be either the interpretation of \texttt{(Cconst t)} or the interpretation of (\texttt{Cvar}~$i$) in a variable map containing the binding $i \rightarrow$~\texttt{t}. How to discriminate between these choices? To solve the dilemma, one gives to \texttt{quote} a list of \emph{constant constructors}: a term will be considered as a constant if it is either a constant constructor or the application of a constant constructor to constants. For example the list \verb+[S, O]+ defines the closed natural numbers. \texttt{(S (S O))} is a constant when \texttt{(S x)} is not. The definition of constants vary for each application of the tactic, so it can even be different for two applications of \texttt{quote} with the same function. \item \texttt{f} is a quite simple fixpoint on \texttt{L}. In particular, \texttt{f} must verify: \begin{verbatim} (f (Cvar i)) = (varmap_find vm default_value i) \end{verbatim} \begin{verbatim} (f (Cconst c)) = c \end{verbatim} where \texttt{index} and \texttt{varmap\_find} are those defined the \texttt{Quote} module. \emph{The tactic won't work with user's own variables map!!} It is mandatory to use the variable map defined in module \texttt{Quote}. \end{itemize} The method to proceed is then clear: \begin{itemize} \item Start with an empty hashtable of "registed leafs" that maps constr to integers and a "variable counter" equal to 0. \item Try to match the term with every right hand side of the definition of \texttt{f}. If there is one match, returns the correponding left hand side and call yourself recursively to get the arguments of this left hand side. If there is no match, we are at a leaf. That is the interpretation of either a variable or a constant. If it is a constant, return \texttt{Cconst} applied to that constant. If not, it is a variable. Look in the hashtable if this leaf has been already encountered. If not, increment the variable counter and add an entry to the hashtable; then return \texttt{(Cvar !variables\_counter)} \end{itemize} *) (*i*) open Pp open Util open Names open Term open Pattern open Matching open Tacmach open Tactics open Tacexpr (*i*) (*s First, we need to access some Coq constants We do that lazily, because this code can be linked before the constants are loaded in the environment *) let constant dir s = Coqlib.gen_constant "Quote" ("quote"::dir) s let coq_Empty_vm = lazy (constant ["Quote"] "Empty_vm") let coq_Node_vm = lazy (constant ["Quote"] "Node_vm") let coq_varmap_find = lazy (constant ["Quote"] "varmap_find") let coq_Right_idx = lazy (constant ["Quote"] "Right_idx") let coq_Left_idx = lazy (constant ["Quote"] "Left_idx") let coq_End_idx = lazy (constant ["Quote"] "End_idx") (*s Then comes the stuff to decompose the body of interpetation function and pre-compute the inversion data. For a function like: \begin{verbatim} Fixpoint interp (vm:varmap Prop) (f:form) := match f with | f_and f1 f1 f2 => (interp f1) /\ (interp f2) | f_or f1 f1 f2 => (interp f1) \/ (interp f2) | f_var i => varmap_find Prop default_v i vm | f_const c => c end. \end{verbatim} With the constant constructors \texttt{C1}, \dots, \texttt{Cn}, the corresponding scheme will be: \begin{verbatim} {normal_lhs_rhs = [ "(f_and ?1 ?2)", "?1 /\ ?2"; "(f_or ?1 ?2)", " ?1 \/ ?2";]; return_type = "Prop"; constants = Some [C1,...Cn]; variable_lhs = Some "(f_var ?1)"; constant_lhs = Some "(f_const ?1)" } \end{verbatim} If there is no constructor for variables in the type \texttt{form}, then [variable_lhs] is [None]. Idem for constants and [constant_lhs]. Both cannot be equal to [None]. The metas in the RHS must correspond to those in the LHS (one cannot exchange ?1 and ?2 in the example above) *) module ConstrSet = Set.Make( struct type t = constr let compare = constr_ord end) type inversion_scheme = { normal_lhs_rhs : (constr * constr_pattern) list; variable_lhs : constr option; return_type : constr; constants : ConstrSet.t; constant_lhs : constr option } (*s [compute_ivs gl f cs] computes the inversion scheme associated to [f:constr] with constants list [cs:constr list] in the context of goal [gl]. This function uses the auxiliary functions [i_can't_do_that], [decomp_term], [compute_lhs] and [compute_rhs]. *) let i_can't_do_that () = error "Quote: not a simple fixpoint" let decomp_term c = kind_of_term (strip_outer_cast c) (*s [compute_lhs typ i nargsi] builds the term \texttt{(C ?nargsi ... ?2 ?1)}, where \texttt{C} is the [i]-th constructor of inductive type [typ] *) let coerce_meta_out id = let s = string_of_id id in int_of_string (String.sub s 1 (String.length s - 1)) let coerce_meta_in n = id_of_string ("M" ^ string_of_int n) let compute_lhs typ i nargsi = match kind_of_term typ with | Ind(sp,0) -> let argsi = Array.init nargsi (fun j -> mkMeta (nargsi - j)) in mkApp (mkConstruct ((sp,0),i+1), argsi) | _ -> i_can't_do_that () (*s This function builds the pattern from the RHS. Recursive calls are replaced by meta-variables ?i corresponding to those in the LHS *) let compute_rhs bodyi index_of_f = let rec aux c = match kind_of_term c with | App (j, args) when isRel j && destRel j = index_of_f (* recursive call *) -> let i = destRel (array_last args) in PMeta (Some (coerce_meta_in i)) | App (f,args) -> PApp (snd (pattern_of_constr Evd.empty f), Array.map aux args) | Cast (c,_,_) -> aux c | _ -> snd (pattern_of_constr Evd.empty c) in aux bodyi (*s Now the function [compute_ivs] itself *) let compute_ivs gl f cs = let cst = try destConst f with e when Errors.noncritical e -> i_can't_do_that () in let body = Environ.constant_value (Global.env()) cst in match decomp_term body with | Fix(([| len |], 0), ([| name |], [| typ |], [| body2 |])) -> let (args3, body3) = decompose_lam body2 in let nargs3 = List.length args3 in begin match decomp_term body3 with | Case(_,p,c,lci) -> (*

      (p-q)*r = p*r-q*r. Proof. intros H. rewrite 3 (mul_comm _ r). now apply mul_sub_distr_l. Qed. Lemma sub_lt_mono_l p q r: q

      p r-p < r-q. Proof. intros Hqp Hpr. apply (add_lt_mono_r p). rewrite sub_add by trivial. apply le_lt_trans with ((r-q)+q). rewrite sub_add by (now apply lt_trans with p). apply le_refl. now apply add_lt_mono_l. Qed. Lemma sub_compare_mono_l p q r : q

      r

      (p-q ?= p-r) = (r ?= q). Proof. intros Hqp Hrp. case (compare_spec r q); intros H. subst. apply compare_refl. apply sub_lt_mono_l; trivial. apply lt_gt, sub_lt_mono_l; trivial. Qed. Lemma sub_compare_mono_r p q r : p p (q-p ?= r-p) = (q ?= r). Proof. intros. rewrite <- (add_compare_mono_r p), 2 sub_add; trivial. Qed. Lemma sub_lt_mono_r p q r : q

      r q-r < p-r. Proof. intros. unfold lt. rewrite sub_compare_mono_r; trivial. now apply lt_trans with q. Qed. Lemma sub_decr n m : m n-m < n. Proof. intros. apply add_lt_mono_r with m. rewrite sub_add; trivial. apply lt_add_r. Qed. Lemma add_sub_assoc p q r : r p+(q-r) = p+q-r. Proof. intros. apply add_reg_r with r. rewrite <- add_assoc, !sub_add; trivial. rewrite add_comm. apply lt_trans with q; trivial using lt_add_r. Qed. Lemma sub_add_distr p q r : q+r < p -> p-(q+r) = p-q-r. Proof. intros. assert (q < p) by (apply lt_trans with (q+r); trivial using lt_add_r). rewrite (add_comm q r) in *. apply add_reg_r with (r+q). rewrite sub_add by trivial. rewrite add_assoc, !sub_add; trivial. apply (add_lt_mono_r q). rewrite sub_add; trivial. Qed. Lemma sub_sub_distr p q r : r q-r < p -> p-(q-r) = p+r-q. Proof. intros. apply add_reg_r with ((q-r)+r). rewrite add_assoc, !sub_add; trivial. rewrite <- (sub_add q r); trivial. now apply add_lt_mono_r. Qed. (** Recursive equations for [sub] *) Lemma sub_xO_xO n m : m n~0 - m~0 = (n-m)~0. Proof. intros H. unfold sub. simpl. now destruct (sub_mask_pos n m H) as (p, ->). Qed. Lemma sub_xI_xI n m : m n~1 - m~1 = (n-m)~0. Proof. intros H. unfold sub. simpl. now destruct (sub_mask_pos n m H) as (p, ->). Qed. Lemma sub_xI_xO n m : m n~1 - m~0 = (n-m)~1. Proof. intros H. unfold sub. simpl. now destruct (sub_mask_pos n m) as (p, ->). Qed. Lemma sub_xO_xI n m : n~0 - m~1 = pred_double (n-m). Proof. unfold sub. simpl. rewrite sub_mask_carry_spec. now destruct (sub_mask n m) as [|[r|r|]|]. Qed. (** Properties of subtraction with underflow *) Lemma sub_mask_neg_iff' p q : sub_mask p q = IsNeg <-> p < q. Proof. rewrite lt_iff_add. apply sub_mask_neg_iff. Qed. Lemma sub_mask_neg p q : p sub_mask p q = IsNeg. Proof. apply sub_mask_neg_iff'. Qed. Lemma sub_le p q : p<=q -> p-q = 1. Proof. unfold le, sub. rewrite compare_sub_mask. destruct sub_mask; easy'. Qed. Lemma sub_lt p q : p p-q = 1. Proof. intros. now apply sub_le, lt_le_incl. Qed. Lemma sub_diag p : p-p = 1. Proof. unfold sub. now rewrite sub_mask_diag. Qed. (** ** Results concerning [size] and [size_nat] *) Lemma size_nat_monotone p q : p (size_nat p <= size_nat q)%nat. Proof. assert (le0 : forall n, (0<=n)%nat) by (induction n; auto). assert (leS : forall n m, (n<=m -> S n <= S m)%nat) by (induction 1; auto). revert q. induction p; destruct q; simpl; intros; auto; easy || apply leS; red in H; simpl_compare_in H. apply IHp. red. now destruct (p?=q). destruct (compare_spec p q); subst; now auto. Qed. Lemma size_gt p : p < 2^(size p). Proof. induction p; simpl; try rewrite pow_succ_r; try easy. apply le_succ_l in IHp. now apply le_succ_l. Qed. Lemma size_le p : 2^(size p) <= p~0. Proof. induction p; simpl; try rewrite pow_succ_r; try easy. apply mul_le_mono_l. apply le_lteq; left. rewrite xI_succ_xO. apply lt_succ_r, IHp. Qed. (** ** Properties of [min] and [max] *) (** First, the specification *) Lemma max_l : forall x y, y<=x -> max x y = x. Proof. intros x y H. unfold max. case compare_spec; auto. intros H'. apply le_nlt in H. now elim H. Qed. Lemma max_r : forall x y, x<=y -> max x y = y. Proof. unfold le, max. intros x y. destruct compare; easy'. Qed. Lemma min_l : forall x y, x<=y -> min x y = x. Proof. unfold le, min. intros x y. destruct compare; easy'. Qed. Lemma min_r : forall x y, y<=x -> min x y = y. Proof. intros x y H. unfold min. case compare_spec; auto. intros H'. apply le_nlt in H. now elim H'. Qed. (** We hence obtain all the generic properties of [min] and [max]. *) Include UsualMinMaxLogicalProperties <+ UsualMinMaxDecProperties. Ltac order := Private_Tac.order. (** Minimum, maximum and constant one *) Lemma max_1_l n : max 1 n = n. Proof. unfold max. case compare_spec; auto. intros H. apply lt_nle in H. elim H. apply le_1_l. Qed. Lemma max_1_r n : max n 1 = n. Proof. rewrite max_comm. apply max_1_l. Qed. Lemma min_1_l n : min 1 n = 1. Proof. unfold min. case compare_spec; auto. intros H. apply lt_nle in H. elim H. apply le_1_l. Qed. Lemma min_1_r n : min n 1 = 1. Proof. rewrite min_comm. apply min_1_l. Qed. (** Minimum, maximum and operations (consequences of monotonicity) *) Lemma succ_max_distr n m : succ (max n m) = max (succ n) (succ m). Proof. symmetry. apply max_monotone. intros x x'. apply succ_le_mono. Qed. Lemma succ_min_distr n m : succ (min n m) = min (succ n) (succ m). Proof. symmetry. apply min_monotone. intros x x'. apply succ_le_mono. Qed. Lemma add_max_distr_l n m p : max (p + n) (p + m) = p + max n m. Proof. apply max_monotone. intros x x'. apply add_le_mono_l. Qed. Lemma add_max_distr_r n m p : max (n + p) (m + p) = max n m + p. Proof. rewrite 3 (add_comm _ p). apply add_max_distr_l. Qed. Lemma add_min_distr_l n m p : min (p + n) (p + m) = p + min n m. Proof. apply min_monotone. intros x x'. apply add_le_mono_l. Qed. Lemma add_min_distr_r n m p : min (n + p) (m + p) = min n m + p. Proof. rewrite 3 (add_comm _ p). apply add_min_distr_l. Qed. Lemma mul_max_distr_l n m p : max (p * n) (p * m) = p * max n m. Proof. apply max_monotone. intros x x'. apply mul_le_mono_l. Qed. Lemma mul_max_distr_r n m p : max (n * p) (m * p) = max n m * p. Proof. rewrite 3 (mul_comm _ p). apply mul_max_distr_l. Qed. Lemma mul_min_distr_l n m p : min (p * n) (p * m) = p * min n m. Proof. apply min_monotone. intros x x'. apply mul_le_mono_l. Qed. Lemma mul_min_distr_r n m p : min (n * p) (m * p) = min n m * p. Proof. rewrite 3 (mul_comm _ p). apply mul_min_distr_l. Qed. (** ** Results concerning [iter_op] *) Lemma iter_op_succ : forall A (op:A->A->A), (forall x y z, op x (op y z) = op (op x y) z) -> forall p a, iter_op op (succ p) a = op a (iter_op op p a). Proof. induction p; simpl; intros; trivial. rewrite H. apply IHp. Qed. (** ** Results about [of_nat] and [of_succ_nat] *) Lemma of_nat_succ (n:nat) : of_succ_nat n = of_nat (S n). Proof. induction n. trivial. simpl. f_equal. now rewrite IHn. Qed. Lemma pred_of_succ_nat (n:nat) : pred (of_succ_nat n) = of_nat n. Proof. destruct n. trivial. simpl pred. rewrite pred_succ. apply of_nat_succ. Qed. Lemma succ_of_nat (n:nat) : n<>O -> succ (of_nat n) = of_succ_nat n. Proof. rewrite of_nat_succ. destruct n; trivial. now destruct 1. Qed. (** ** Correctness proofs for the square root function *) Inductive SqrtSpec : positive*mask -> positive -> Prop := | SqrtExact s x : x=s*s -> SqrtSpec (s,IsNul) x | SqrtApprox s r x : x=s*s+r -> r <= s~0 -> SqrtSpec (s,IsPos r) x. Lemma sqrtrem_step_spec f g p x : (f=xO \/ f=xI) -> (g=xO \/ g=xI) -> SqrtSpec p x -> SqrtSpec (sqrtrem_step f g p) (g (f x)). Proof. intros Hf Hg [ s _ -> | s r _ -> Hr ]. (* exact *) unfold sqrtrem_step. destruct Hf,Hg; subst; simpl; constructor; now rewrite ?square_xO. (* approx *) assert (Hfg : forall p q, g (f (p+q)) = p~0~0 + g (f q)) by (intros; destruct Hf, Hg; now subst). unfold sqrtrem_step, leb. case compare_spec; [intros EQ | intros LT | intros GT]. (* - EQ *) rewrite <- EQ, sub_mask_diag. constructor. destruct Hg; subst g; destr_eq EQ. destruct Hf; subst f; destr_eq EQ. subst. now rewrite square_xI. (* - LT *) destruct (sub_mask_pos' _ _ LT) as (y & -> & H). constructor. rewrite Hfg, <- H. now rewrite square_xI, add_assoc. clear Hfg. rewrite <- lt_succ_r in Hr. change (r < s~1) in Hr. rewrite <- lt_succ_r, (add_lt_mono_l (s~0~1)), H. simpl. rewrite add_carry_spec, add_diag. simpl. destruct Hf,Hg; subst; red; simpl_compare; now rewrite Hr. (* - GT *) constructor. now rewrite Hfg, square_xO. apply lt_succ_r, GT. Qed. Lemma sqrtrem_spec p : SqrtSpec (sqrtrem p) p. Proof. revert p. fix 1. destruct p; try destruct p; try (constructor; easy); apply sqrtrem_step_spec; auto. Qed. Lemma sqrt_spec p : let s := sqrt p in s*s <= p < (succ s)*(succ s). Proof. simpl. assert (H:=sqrtrem_spec p). unfold sqrt in *. destruct sqrtrem as (s,rm); simpl. inversion_clear H; subst. (* exact *) split. reflexivity. apply mul_lt_mono; apply lt_succ_diag_r. (* approx *) split. apply lt_le_incl, lt_add_r. rewrite <- add_1_l, mul_add_distr_r, !mul_add_distr_l, !mul_1_r, !mul_1_l. rewrite add_assoc, (add_comm _ r). apply add_lt_mono_r. now rewrite <- add_assoc, add_diag, add_1_l, lt_succ_r. Qed. (** ** Correctness proofs for the gcd function *) Lemma divide_add_cancel_l p q r : (p | r) -> (p | q + r) -> (p | q). Proof. intros (s,Hs) (t,Ht). exists (t-s). rewrite mul_sub_distr_r. rewrite <- Hs, <- Ht. symmetry. apply add_sub. apply mul_lt_mono_r with p. rewrite <- Hs, <- Ht, add_comm. apply lt_add_r. Qed. Lemma divide_xO_xI p q r : (p | q~0) -> (p | r~1) -> (p | q). Proof. intros (s,Hs) (t,Ht). destruct p. destruct s; try easy. simpl in Hs. destr_eq Hs. now exists s. rewrite mul_xO_r in Ht; discriminate. exists q; now rewrite mul_1_r. Qed. Lemma divide_xO_xO p q : (p~0|q~0) <-> (p|q). Proof. split; intros (r,H); simpl in *. rewrite mul_xO_r in H. destr_eq H. now exists r. exists r; simpl. rewrite mul_xO_r. f_equal; auto. Qed. Lemma divide_mul_l p q r : (p|q) -> (p|q*r). Proof. intros (s,H). exists (s*r). rewrite <- mul_assoc, (mul_comm r p), mul_assoc. now f_equal. Qed. Lemma divide_mul_r p q r : (p|r) -> (p|q*r). Proof. rewrite mul_comm. apply divide_mul_l. Qed. (** The first component of ggcd is gcd *) Lemma ggcdn_gcdn : forall n a b, fst (ggcdn n a b) = gcdn n a b. Proof. induction n. simpl; auto. destruct a, b; simpl; auto; try case compare_spec; simpl; trivial; rewrite <- IHn; destruct ggcdn as (g,(u,v)); simpl; auto. Qed. Lemma ggcd_gcd : forall a b, fst (ggcd a b) = gcd a b. Proof. unfold ggcd, gcd. intros. apply ggcdn_gcdn. Qed. (** The other components of ggcd are indeed the correct factors. *) Ltac destr_pggcdn IHn := match goal with |- context [ ggcdn _ ?x ?y ] => generalize (IHn x y); destruct ggcdn as (g,(u,v)); simpl end. Lemma ggcdn_correct_divisors : forall n a b, let '(g,(aa,bb)) := ggcdn n a b in a = g*aa /\ b = g*bb. Proof. induction n. simpl; auto. destruct a, b; simpl; auto; try case compare_spec; try destr_pggcdn IHn. (* Eq *) intros ->. now rewrite mul_comm. (* Lt *) intros (H',H) LT; split; auto. rewrite mul_add_distr_l, mul_xO_r, <- H, <- H'. simpl. f_equal. symmetry. rewrite add_comm. now apply sub_add. (* Gt *) intros (H',H) LT; split; auto. rewrite mul_add_distr_l, mul_xO_r, <- H, <- H'. simpl. f_equal. symmetry. rewrite add_comm. now apply sub_add. (* Then... *) intros (H,H'); split; auto. rewrite mul_xO_r, H'; auto. intros (H,H'); split; auto. rewrite mul_xO_r, H; auto. intros (H,H'); split; subst; auto. Qed. Lemma ggcd_correct_divisors : forall a b, let '(g,(aa,bb)) := ggcd a b in a=g*aa /\ b=g*bb. Proof. unfold ggcd. intros. apply ggcdn_correct_divisors. Qed. (** We can use this fact to prove a part of the gcd correctness *) Lemma gcd_divide_l : forall a b, (gcd a b | a). Proof. intros a b. rewrite <- ggcd_gcd. generalize (ggcd_correct_divisors a b). destruct ggcd as (g,(aa,bb)); simpl. intros (H,_). exists aa. now rewrite mul_comm. Qed. Lemma gcd_divide_r : forall a b, (gcd a b | b). Proof. intros a b. rewrite <- ggcd_gcd. generalize (ggcd_correct_divisors a b). destruct ggcd as (g,(aa,bb)); simpl. intros (_,H). exists bb. now rewrite mul_comm. Qed. (** We now prove directly that gcd is the greatest amongst common divisors *) Lemma gcdn_greatest : forall n a b, (size_nat a + size_nat b <= n)%nat -> forall p, (p|a) -> (p|b) -> (p|gcdn n a b). Proof. induction n. destruct a, b; simpl; inversion 1. destruct a, b; simpl; try case compare_spec; simpl; auto. (* Lt *) intros LT LE p Hp1 Hp2. apply IHn; clear IHn; trivial. apply le_S_n in LE. eapply Le.le_trans; [|eapply LE]. rewrite plus_comm, <- plus_n_Sm, <- plus_Sn_m. apply plus_le_compat; trivial. apply size_nat_monotone, sub_decr, LT. apply divide_xO_xI with a; trivial. apply (divide_add_cancel_l p _ a~1); trivial. now rewrite <- sub_xI_xI, sub_add. (* Gt *) intros LT LE p Hp1 Hp2. apply IHn; clear IHn; trivial. apply le_S_n in LE. eapply Le.le_trans; [|eapply LE]. apply plus_le_compat; trivial. apply size_nat_monotone, sub_decr, LT. apply divide_xO_xI with b; trivial. apply (divide_add_cancel_l p _ b~1); trivial. now rewrite <- sub_xI_xI, sub_add. (* a~1 b~0 *) intros LE p Hp1 Hp2. apply IHn; clear IHn; trivial. apply le_S_n in LE. simpl. now rewrite plus_n_Sm. apply divide_xO_xI with a; trivial. (* a~0 b~1 *) intros LE p Hp1 Hp2. apply IHn; clear IHn; trivial. simpl. now apply le_S_n. apply divide_xO_xI with b; trivial. (* a~0 b~0 *) intros LE p Hp1 Hp2. destruct p. change (gcdn n a b)~0 with (2*(gcdn n a b)). apply divide_mul_r. apply IHn; clear IHn. apply le_S_n in LE. apply le_Sn_le. now rewrite plus_n_Sm. apply divide_xO_xI with p; trivial. now exists 1. apply divide_xO_xI with p; trivial. now exists 1. apply divide_xO_xO. apply IHn; clear IHn. apply le_S_n in LE. apply le_Sn_le. now rewrite plus_n_Sm. now apply divide_xO_xO. now apply divide_xO_xO. exists (gcdn n a b)~0. now rewrite mul_1_r. Qed. Lemma gcd_greatest : forall a b p, (p|a) -> (p|b) -> (p|gcd a b). Proof. intros. apply gcdn_greatest; auto. Qed. (** As a consequence, the rests after division by gcd are relatively prime *) Lemma ggcd_greatest : forall a b, let (aa,bb) := snd (ggcd a b) in forall p, (p|aa) -> (p|bb) -> p=1. Proof. intros. generalize (gcd_greatest a b) (ggcd_correct_divisors a b). rewrite <- ggcd_gcd. destruct ggcd as (g,(aa,bb)); simpl. intros H (EQa,EQb) p Hp1 Hp2; subst. assert (H' : (g*p | g)). apply H. destruct Hp1 as (r,Hr). exists r. now rewrite mul_assoc, (mul_comm r g), <- mul_assoc, <- Hr. destruct Hp2 as (r,Hr). exists r. now rewrite mul_assoc, (mul_comm r g), <- mul_assoc, <- Hr. destruct H' as (q,H'). rewrite (mul_comm g p), mul_assoc in H'. apply mul_eq_1 with q; rewrite mul_comm. now apply mul_reg_r with g. Qed. End Pos. (** Exportation of notations *) Infix "+" := Pos.add : positive_scope. Infix "-" := Pos.sub : positive_scope. Infix "*" := Pos.mul : positive_scope. Infix "^" := Pos.pow : positive_scope. Infix "?=" := Pos.compare (at level 70, no associativity) : positive_scope. Infix "=?" := Pos.eqb (at level 70, no associativity) : positive_scope. Infix "<=?" := Pos.leb (at level 70, no associativity) : positive_scope. Infix "=" := Pos.ge : positive_scope. Infix ">" := Pos.gt : positive_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : positive_scope. Notation "x <= y < z" := (x <= y /\ y < z) : positive_scope. Notation "x < y < z" := (x < y /\ y < z) : positive_scope. Notation "x < y <= z" := (x < y /\ y <= z) : positive_scope. Notation "( p | q )" := (Pos.divide p q) (at level 0) : positive_scope. (** Compatibility notations *) Notation positive := positive (only parsing). Notation positive_rect := positive_rect (only parsing). Notation positive_rec := positive_rec (only parsing). Notation positive_ind := positive_ind (only parsing). Notation xI := xI (only parsing). Notation xO := xO (only parsing). Notation xH := xH (only parsing). Notation IsNul := Pos.IsNul (only parsing). Notation IsPos := Pos.IsPos (only parsing). Notation IsNeg := Pos.IsNeg (only parsing). Notation Psucc := Pos.succ (compat "8.3"). Notation Pplus := Pos.add (compat "8.3"). Notation Pplus_carry := Pos.add_carry (compat "8.3"). Notation Ppred := Pos.pred (compat "8.3"). Notation Piter_op := Pos.iter_op (compat "8.3"). Notation Piter_op_succ := Pos.iter_op_succ (compat "8.3"). Notation Pmult_nat := (Pos.iter_op plus) (compat "8.3"). Notation nat_of_P := Pos.to_nat (compat "8.3"). Notation P_of_succ_nat := Pos.of_succ_nat (compat "8.3"). Notation Pdouble_minus_one := Pos.pred_double (compat "8.3"). Notation positive_mask := Pos.mask (compat "8.3"). Notation positive_mask_rect := Pos.mask_rect (compat "8.3"). Notation positive_mask_ind := Pos.mask_ind (compat "8.3"). Notation positive_mask_rec := Pos.mask_rec (compat "8.3"). Notation Pdouble_plus_one_mask := Pos.succ_double_mask (compat "8.3"). Notation Pdouble_mask := Pos.double_mask (compat "8.3"). Notation Pdouble_minus_two := Pos.double_pred_mask (compat "8.3"). Notation Pminus_mask := Pos.sub_mask (compat "8.3"). Notation Pminus_mask_carry := Pos.sub_mask_carry (compat "8.3"). Notation Pminus := Pos.sub (compat "8.3"). Notation Pmult := Pos.mul (compat "8.3"). Notation iter_pos := @Pos.iter (compat "8.3"). Notation Ppow := Pos.pow (compat "8.3"). Notation Pdiv2 := Pos.div2 (compat "8.3"). Notation Pdiv2_up := Pos.div2_up (compat "8.3"). Notation Psize := Pos.size_nat (compat "8.3"). Notation Psize_pos := Pos.size (compat "8.3"). Notation Pcompare := Pos.compare_cont (compat "8.3"). Notation Plt := Pos.lt (compat "8.3"). Notation Pgt := Pos.gt (compat "8.3"). Notation Ple := Pos.le (compat "8.3"). Notation Pge := Pos.ge (compat "8.3"). Notation Pmin := Pos.min (compat "8.3"). Notation Pmax := Pos.max (compat "8.3"). Notation Peqb := Pos.eqb (compat "8.3"). Notation positive_eq_dec := Pos.eq_dec (compat "8.3"). Notation xI_succ_xO := Pos.xI_succ_xO (compat "8.3"). Notation Psucc_discr := Pos.succ_discr (compat "8.3"). Notation Psucc_o_double_minus_one_eq_xO := Pos.succ_pred_double (compat "8.3"). Notation Pdouble_minus_one_o_succ_eq_xI := Pos.pred_double_succ (compat "8.3"). Notation xO_succ_permute := Pos.double_succ (compat "8.3"). Notation double_moins_un_xO_discr := Pos.pred_double_xO_discr (compat "8.3"). Notation Psucc_not_one := Pos.succ_not_1 (compat "8.3"). Notation Ppred_succ := Pos.pred_succ (compat "8.3"). Notation Psucc_pred := Pos.succ_pred_or (compat "8.3"). Notation Psucc_inj := Pos.succ_inj (compat "8.3"). Notation Pplus_carry_spec := Pos.add_carry_spec (compat "8.3"). Notation Pplus_comm := Pos.add_comm (compat "8.3"). Notation Pplus_succ_permute_r := Pos.add_succ_r (compat "8.3"). Notation Pplus_succ_permute_l := Pos.add_succ_l (compat "8.3"). Notation Pplus_no_neutral := Pos.add_no_neutral (compat "8.3"). Notation Pplus_carry_plus := Pos.add_carry_add (compat "8.3"). Notation Pplus_reg_r := Pos.add_reg_r (compat "8.3"). Notation Pplus_reg_l := Pos.add_reg_l (compat "8.3"). Notation Pplus_carry_reg_r := Pos.add_carry_reg_r (compat "8.3"). Notation Pplus_carry_reg_l := Pos.add_carry_reg_l (compat "8.3"). Notation Pplus_assoc := Pos.add_assoc (compat "8.3"). Notation Pplus_xO := Pos.add_xO (compat "8.3"). Notation Pplus_xI_double_minus_one := Pos.add_xI_pred_double (compat "8.3"). Notation Pplus_xO_double_minus_one := Pos.add_xO_pred_double (compat "8.3"). Notation Pplus_diag := Pos.add_diag (compat "8.3"). Notation PeanoView := Pos.PeanoView (compat "8.3"). Notation PeanoOne := Pos.PeanoOne (compat "8.3"). Notation PeanoSucc := Pos.PeanoSucc (compat "8.3"). Notation PeanoView_rect := Pos.PeanoView_rect (compat "8.3"). Notation PeanoView_ind := Pos.PeanoView_ind (compat "8.3"). Notation PeanoView_rec := Pos.PeanoView_rec (compat "8.3"). Notation peanoView_xO := Pos.peanoView_xO (compat "8.3"). Notation peanoView_xI := Pos.peanoView_xI (compat "8.3"). Notation peanoView := Pos.peanoView (compat "8.3"). Notation PeanoView_iter := Pos.PeanoView_iter (compat "8.3"). Notation eq_dep_eq_positive := Pos.eq_dep_eq_positive (compat "8.3"). Notation PeanoViewUnique := Pos.PeanoViewUnique (compat "8.3"). Notation Prect := Pos.peano_rect (compat "8.3"). Notation Prect_succ := Pos.peano_rect_succ (compat "8.3"). Notation Prect_base := Pos.peano_rect_base (compat "8.3"). Notation Prec := Pos.peano_rec (compat "8.3"). Notation Pind := Pos.peano_ind (compat "8.3"). Notation Pcase := Pos.peano_case (compat "8.3"). Notation Pmult_1_r := Pos.mul_1_r (compat "8.3"). Notation Pmult_Sn_m := Pos.mul_succ_l (compat "8.3"). Notation Pmult_xO_permute_r := Pos.mul_xO_r (compat "8.3"). Notation Pmult_xI_permute_r := Pos.mul_xI_r (compat "8.3"). Notation Pmult_comm := Pos.mul_comm (compat "8.3"). Notation Pmult_plus_distr_l := Pos.mul_add_distr_l (compat "8.3"). Notation Pmult_plus_distr_r := Pos.mul_add_distr_r (compat "8.3"). Notation Pmult_assoc := Pos.mul_assoc (compat "8.3"). Notation Pmult_xI_mult_xO_discr := Pos.mul_xI_mul_xO_discr (compat "8.3"). Notation Pmult_xO_discr := Pos.mul_xO_discr (compat "8.3"). Notation Pmult_reg_r := Pos.mul_reg_r (compat "8.3"). Notation Pmult_reg_l := Pos.mul_reg_l (compat "8.3"). Notation Pmult_1_inversion_l := Pos.mul_eq_1_l (compat "8.3"). Notation Psquare_xO := Pos.square_xO (compat "8.3"). Notation Psquare_xI := Pos.square_xI (compat "8.3"). Notation iter_pos_swap_gen := Pos.iter_swap_gen (compat "8.3"). Notation iter_pos_swap := Pos.iter_swap (compat "8.3"). Notation iter_pos_succ := Pos.iter_succ (compat "8.3"). Notation iter_pos_plus := Pos.iter_add (compat "8.3"). Notation iter_pos_invariant := Pos.iter_invariant (compat "8.3"). Notation Ppow_1_r := Pos.pow_1_r (compat "8.3"). Notation Ppow_succ_r := Pos.pow_succ_r (compat "8.3"). Notation Peqb_refl := Pos.eqb_refl (compat "8.3"). Notation Peqb_eq := Pos.eqb_eq (compat "8.3"). Notation Pcompare_refl_id := Pos.compare_cont_refl (compat "8.3"). Notation Pcompare_eq_iff := Pos.compare_eq_iff (compat "8.3"). Notation Pcompare_Gt_Lt := Pos.compare_cont_Gt_Lt (compat "8.3"). Notation Pcompare_eq_Lt := Pos.compare_lt_iff (compat "8.3"). Notation Pcompare_Lt_Gt := Pos.compare_cont_Lt_Gt (compat "8.3"). Notation Pcompare_antisym := Pos.compare_cont_antisym (compat "8.3"). Notation ZC1 := Pos.gt_lt (compat "8.3"). Notation ZC2 := Pos.lt_gt (compat "8.3"). Notation Pcompare_spec := Pos.compare_spec (compat "8.3"). Notation Pcompare_p_Sp := Pos.lt_succ_diag_r (compat "8.3"). Notation Pcompare_succ_succ := Pos.compare_succ_succ (compat "8.3"). Notation Pcompare_1 := Pos.nlt_1_r (compat "8.3"). Notation Plt_1 := Pos.nlt_1_r (compat "8.3"). Notation Plt_1_succ := Pos.lt_1_succ (compat "8.3"). Notation Plt_lt_succ := Pos.lt_lt_succ (compat "8.3"). Notation Plt_irrefl := Pos.lt_irrefl (compat "8.3"). Notation Plt_trans := Pos.lt_trans (compat "8.3"). Notation Plt_ind := Pos.lt_ind (compat "8.3"). Notation Ple_lteq := Pos.le_lteq (compat "8.3"). Notation Ple_refl := Pos.le_refl (compat "8.3"). Notation Ple_lt_trans := Pos.le_lt_trans (compat "8.3"). Notation Plt_le_trans := Pos.lt_le_trans (compat "8.3"). Notation Ple_trans := Pos.le_trans (compat "8.3"). Notation Plt_succ_r := Pos.lt_succ_r (compat "8.3"). Notation Ple_succ_l := Pos.le_succ_l (compat "8.3"). Notation Pplus_compare_mono_l := Pos.add_compare_mono_l (compat "8.3"). Notation Pplus_compare_mono_r := Pos.add_compare_mono_r (compat "8.3"). Notation Pplus_lt_mono_l := Pos.add_lt_mono_l (compat "8.3"). Notation Pplus_lt_mono_r := Pos.add_lt_mono_r (compat "8.3"). Notation Pplus_lt_mono := Pos.add_lt_mono (compat "8.3"). Notation Pplus_le_mono_l := Pos.add_le_mono_l (compat "8.3"). Notation Pplus_le_mono_r := Pos.add_le_mono_r (compat "8.3"). Notation Pplus_le_mono := Pos.add_le_mono (compat "8.3"). Notation Pmult_compare_mono_l := Pos.mul_compare_mono_l (compat "8.3"). Notation Pmult_compare_mono_r := Pos.mul_compare_mono_r (compat "8.3"). Notation Pmult_lt_mono_l := Pos.mul_lt_mono_l (compat "8.3"). Notation Pmult_lt_mono_r := Pos.mul_lt_mono_r (compat "8.3"). Notation Pmult_lt_mono := Pos.mul_lt_mono (compat "8.3"). Notation Pmult_le_mono_l := Pos.mul_le_mono_l (compat "8.3"). Notation Pmult_le_mono_r := Pos.mul_le_mono_r (compat "8.3"). Notation Pmult_le_mono := Pos.mul_le_mono (compat "8.3"). Notation Plt_plus_r := Pos.lt_add_r (compat "8.3"). Notation Plt_not_plus_l := Pos.lt_not_add_l (compat "8.3"). Notation Ppow_gt_1 := Pos.pow_gt_1 (compat "8.3"). Notation Ppred_mask := Pos.pred_mask (compat "8.3"). Notation Pminus_mask_succ_r := Pos.sub_mask_succ_r (compat "8.3"). Notation Pminus_mask_carry_spec := Pos.sub_mask_carry_spec (compat "8.3"). Notation Pminus_succ_r := Pos.sub_succ_r (compat "8.3"). Notation Pminus_mask_diag := Pos.sub_mask_diag (compat "8.3"). Notation Pplus_minus_eq := Pos.add_sub (compat "8.3"). Notation Pmult_minus_distr_l := Pos.mul_sub_distr_l (compat "8.3"). Notation Pminus_lt_mono_l := Pos.sub_lt_mono_l (compat "8.3"). Notation Pminus_compare_mono_l := Pos.sub_compare_mono_l (compat "8.3"). Notation Pminus_compare_mono_r := Pos.sub_compare_mono_r (compat "8.3"). Notation Pminus_lt_mono_r := Pos.sub_lt_mono_r (compat "8.3"). Notation Pminus_decr := Pos.sub_decr (compat "8.3"). Notation Pminus_xI_xI := Pos.sub_xI_xI (compat "8.3"). Notation Pplus_minus_assoc := Pos.add_sub_assoc (compat "8.3"). Notation Pminus_plus_distr := Pos.sub_add_distr (compat "8.3"). Notation Pminus_minus_distr := Pos.sub_sub_distr (compat "8.3"). Notation Pminus_mask_Lt := Pos.sub_mask_neg (compat "8.3"). Notation Pminus_Lt := Pos.sub_lt (compat "8.3"). Notation Pminus_Eq := Pos.sub_diag (compat "8.3"). Notation Psize_monotone := Pos.size_nat_monotone (compat "8.3"). Notation Psize_pos_gt := Pos.size_gt (compat "8.3"). Notation Psize_pos_le := Pos.size_le (compat "8.3"). (** More complex compatibility facts, expressed as lemmas (to preserve scopes for instance) *) Lemma Peqb_true_eq x y : Pos.eqb x y = true -> x=y. Proof. apply Pos.eqb_eq. Qed. Lemma Pcompare_eq_Gt p q : (p ?= q) = Gt <-> p > q. Proof. reflexivity. Qed. Lemma Pplus_one_succ_r p : Pos.succ p = p + 1. Proof (eq_sym (Pos.add_1_r p)). Lemma Pplus_one_succ_l p : Pos.succ p = 1 + p. Proof (eq_sym (Pos.add_1_l p)). Lemma Pcompare_refl p : Pos.compare_cont p p Eq = Eq. Proof (Pos.compare_cont_refl p Eq). Lemma Pcompare_Eq_eq : forall p q, Pos.compare_cont p q Eq = Eq -> p = q. Proof Pos.compare_eq. Lemma ZC4 p q : Pos.compare_cont p q Eq = CompOpp (Pos.compare_cont q p Eq). Proof (Pos.compare_antisym q p). Lemma Ppred_minus p : Pos.pred p = p - 1. Proof (eq_sym (Pos.sub_1_r p)). Lemma Pminus_mask_Gt p q : p > q -> exists h : positive, Pos.sub_mask p q = IsPos h /\ q + h = p /\ (h = 1 \/ Pos.sub_mask_carry p q = IsPos (Pos.pred h)). Proof. intros H. apply Pos.gt_lt in H. destruct (Pos.sub_mask_pos p q H) as (r & U). exists r. repeat split; trivial. now apply Pos.sub_mask_pos_iff. destruct (Pos.eq_dec r 1) as [EQ|NE]; [now left|right]. rewrite Pos.sub_mask_carry_spec, U. destruct r; trivial. now elim NE. Qed. Lemma Pplus_minus : forall p q, p > q -> q+(p-q) = p. Proof. intros. rewrite Pos.add_comm. now apply Pos.sub_add, Pos.gt_lt. Qed. (** Discontinued results of little interest and little/zero use in user contributions: Pplus_carry_no_neutral Pplus_carry_pred_eq_plus Pcompare_not_Eq Pcompare_Lt_Lt Pcompare_Lt_eq_Lt Pcompare_Gt_Gt Pcompare_Gt_eq_Gt Psucc_lt_compat Psucc_le_compat ZC3 Pcompare_p_Sq Pminus_mask_carry_diag Pminus_mask_IsNeg ZL10 ZL11 double_eq_zero_inversion double_plus_one_zero_discr double_plus_one_eq_one_inversion double_eq_one_discr Infix "/" := Pdiv2 : positive_scope. *) (** Old stuff, to remove someday *) Lemma Dcompare : forall r:comparison, r = Eq \/ r = Lt \/ r = Gt. Proof. destruct r; auto. Qed. (** Incompatibilities : - [(_ ?= _)%positive] expects no arg now, and designates [Pos.compare] which is convertible but syntactically distinct to [Pos.compare_cont .. .. Eq]. - [Pmult_nat] cannot be unfolded (unfold [Pos.iter_op] instead). *) coq-8.4pl2/theories/QArith/0000750000175000001440000000000012127276544014652 5ustar notinuserscoq-8.4pl2/theories/QArith/Qpower.v0000640000175000001440000001377712010532755016324 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0)%Z -> 0^n == 0. Proof. intros [|n|n] Hn; try (elim Hn; reflexivity); simpl; rewrite Qpower_positive_0; reflexivity. Qed. Lemma Qpower_not_0_positive : forall a n, ~a==0 -> ~Qpower_positive a n == 0. Proof. intros a n X H. apply X; clear X. induction n; simpl in *; try assumption; destruct (Qmult_integral _ _ H); try destruct (Qmult_integral _ _ H0); auto. Qed. Lemma Qpower_pos_positive : forall p n, 0 <= p -> 0 <= Qpower_positive p n. intros p n Hp. induction n; simpl; repeat apply Qmult_le_0_compat;assumption. Qed. Lemma Qpower_pos : forall p n, 0 <= p -> 0 <= p^n. Proof. intros p [|n|n] Hp; simpl; try discriminate; try apply Qinv_le_0_compat; apply Qpower_pos_positive; assumption. Qed. Lemma Qmult_power_positive : forall a b n, Qpower_positive (a*b) n == (Qpower_positive a n)*(Qpower_positive b n). Proof. induction n; simpl; repeat rewrite IHn; ring. Qed. Lemma Qmult_power : forall a b n, (a*b)^n == a^n*b^n. Proof. intros a b [|n|n]; simpl; try rewrite Qmult_power_positive; try rewrite Qinv_mult_distr; reflexivity. Qed. Lemma Qinv_power_positive : forall a n, Qpower_positive (/a) n == /(Qpower_positive a n). Proof. induction n; simpl; repeat (rewrite IHn || rewrite Qinv_mult_distr); reflexivity. Qed. Lemma Qinv_power : forall a n, (/a)^n == /a^n. Proof. intros a [|n|n]; simpl; try rewrite Qinv_power_positive; reflexivity. Qed. Lemma Qdiv_power : forall a b n, (a/b)^n == (a^n/b^n). Proof. unfold Qdiv. intros a b n. rewrite Qmult_power. rewrite Qinv_power. reflexivity. Qed. Lemma Qinv_power_n : forall n p, (1#p)^n == /(inject_Z ('p))^n. Proof. intros n p. rewrite Qmake_Qdiv. rewrite Qdiv_power. rewrite Qpower_1. unfold Qdiv. ring. Qed. Lemma Qpower_plus_positive : forall a n m, Qpower_positive a (n+m) == (Qpower_positive a n)*(Qpower_positive a m). Proof. intros a n m. unfold Qpower_positive. apply pow_pos_add. apply Q_Setoid. apply Qmult_comp. apply Qmult_assoc. Qed. Lemma Qpower_opp : forall a n, a^(-n) == /a^n. Proof. intros a [|n|n]; simpl; try reflexivity. symmetry; apply Qinv_involutive. Qed. Lemma Qpower_minus_positive : forall a (n m:positive), (m < n)%positive -> Qpower_positive a (n-m)%positive == (Qpower_positive a n)/(Qpower_positive a m). Proof. intros a n m H. destruct (Qeq_dec a 0) as [EQ|NEQ]. - now rewrite EQ, !Qpower_positive_0. - rewrite <- (Qdiv_mult_l (Qpower_positive a (n - m)) (Qpower_positive a m)) by (now apply Qpower_not_0_positive). f_equiv. rewrite <- Qpower_plus_positive. now rewrite Pos.sub_add. Qed. Lemma Qpower_plus : forall a n m, ~a==0 -> a^(n+m) == a^n*a^m. Proof. intros a [|n|n] [|m|m] H; simpl; try ring; try rewrite Qpower_plus_positive; try apply Qinv_mult_distr; try reflexivity; rewrite ?Z.pos_sub_spec; case Pos.compare_spec; intros H0; simpl; subst; try rewrite Qpower_minus_positive; try (field; try split; apply Qpower_not_0_positive); assumption. Qed. Lemma Qpower_plus' : forall a n m, (n+m <> 0)%Z -> a^(n+m) == a^n*a^m. Proof. intros a n m H. destruct (Qeq_dec a 0)as [X|X]. rewrite X. rewrite Qpower_0 by assumption. destruct n; destruct m; try (elim H; reflexivity); simpl; repeat rewrite Qpower_positive_0; ring_simplify; reflexivity. apply Qpower_plus. assumption. Qed. Lemma Qpower_mult_positive : forall a n m, Qpower_positive a (n*m) == Qpower_positive (Qpower_positive a n) m. Proof. intros a n m. induction n using Pos.peano_ind. reflexivity. rewrite Pos.mul_succ_l. rewrite <- Pos.add_1_l. do 2 rewrite Qpower_plus_positive. rewrite IHn. rewrite Qmult_power_positive. reflexivity. Qed. Lemma Qpower_mult : forall a n m, a^(n*m) == (a^n)^m. Proof. intros a [|n|n] [|m|m]; simpl; try rewrite Qpower_positive_1; try rewrite Qpower_mult_positive; try rewrite Qinv_power_positive; try rewrite Qinv_involutive; try reflexivity. Qed. Lemma Zpower_Qpower : forall (a n:Z), (0<=n)%Z -> inject_Z (a^n) == (inject_Z a)^n. Proof. intros a [|n|n] H;[reflexivity| |elim H; reflexivity]. induction n using Pos.peano_ind. replace (a^1)%Z with a by ring. ring. rewrite Pos2Z.inj_succ. unfold Z.succ. rewrite Zpower_exp; auto with *; try discriminate. rewrite Qpower_plus' by discriminate. rewrite <- IHn by discriminate. replace (a^'n*a^1)%Z with (a^'n*a)%Z by ring. ring_simplify. reflexivity. Qed. Lemma Qsqr_nonneg : forall a, 0 <= a^2. Proof. intros a. destruct (Qlt_le_dec 0 a) as [A|A]. apply (Qmult_le_0_compat a a); (apply Qlt_le_weak; assumption). setoid_replace (a^2) with ((-a)*(-a)) by ring. rewrite Qle_minus_iff in A. setoid_replace (0+ - a) with (-a) in A by ring. apply Qmult_le_0_compat; assumption. Qed. Theorem Qpower_decomp p x y : Qpower_positive (x#y) p = x ^ Zpos p # (y ^ p). Proof. induction p; intros; simpl Qpower_positive; rewrite ?IHp. - (* xI *) unfold Qmult, Qnum, Qden. f_equal. + now rewrite <- Z.pow_twice_r, <- Z.pow_succ_r. + apply Pos2Z.inj; rewrite !Pos2Z.inj_mul, !Pos2Z.inj_pow. now rewrite <- Z.pow_twice_r, <- Z.pow_succ_r. - (* xO *) unfold Qmult, Qnum, Qden. f_equal. + now rewrite <- Z.pow_twice_r. + apply Pos2Z.inj; rewrite !Pos2Z.inj_mul, !Pos2Z.inj_pow. now rewrite <- Z.pow_twice_r. - (* xO *) now rewrite Z.pow_1_r, Pos.pow_1_r. Qed. coq-8.4pl2/theories/QArith/Qreduction.v0000640000175000001440000001130712010532755017147 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* g) by (intro; subst; discriminate). rewrite Z2Pos.id. ring. rewrite <- (Z.mul_pos_cancel_l g); [now rewrite <- Hd | omega]. Close Scope Z_scope. Qed. Lemma Qred_complete : forall p q, p==q -> Qred p = Qred q. Proof. intros (a,b) (c,d). unfold Qred, Qeq in *; simpl in *. Open Scope Z_scope. intros H. generalize (Z.ggcd_gcd a ('b)) (Zgcd_is_gcd a ('b)) (Z.gcd_nonneg a ('b)) (Z.ggcd_correct_divisors a ('b)). destruct (Z.ggcd a (Zpos b)) as (g,(aa,bb)). simpl. intros <- Hg1 Hg2 (Hg3,Hg4). assert (Hg0 : g <> 0) by (intro; now subst g). generalize (Z.ggcd_gcd c ('d)) (Zgcd_is_gcd c ('d)) (Z.gcd_nonneg c ('d)) (Z.ggcd_correct_divisors c ('d)). destruct (Z.ggcd c (Zpos d)) as (g',(cc,dd)). simpl. intros <- Hg'1 Hg'2 (Hg'3,Hg'4). assert (Hg'0 : g' <> 0) by (intro; now subst g'). elim (rel_prime_cross_prod aa bb cc dd). - congruence. - (*rel_prime*) constructor. * exists aa; auto with zarith. * exists bb; auto with zarith. * intros x Ha Hb. destruct Hg1 as (Hg11,Hg12,Hg13). destruct (Hg13 (g*x)) as (x',Hx). { rewrite Hg3. destruct Ha as (xa,Hxa); exists xa; rewrite Hxa; ring. } { rewrite Hg4. destruct Hb as (xb,Hxb); exists xb; rewrite Hxb; ring. } exists x'. apply Z.mul_reg_l with g; auto. rewrite Hx at 1; ring. - (* rel_prime *) constructor. * exists cc; auto with zarith. * exists dd; auto with zarith. * intros x Hc Hd. inversion Hg'1 as (Hg'11,Hg'12,Hg'13). destruct (Hg'13 (g'*x)) as (x',Hx). { rewrite Hg'3. destruct Hc as (xc,Hxc); exists xc; rewrite Hxc; ring. } { rewrite Hg'4. destruct Hd as (xd,Hxd); exists xd; rewrite Hxd; ring. } exists x'. apply Z.mul_reg_l with g'; auto. rewrite Hx at 1; ring. - apply Z.lt_gt. rewrite <- (Z.mul_pos_cancel_l g); [now rewrite <- Hg4 | omega]. - apply Z.lt_gt. rewrite <- (Z.mul_pos_cancel_l g'); [now rewrite <- Hg'4 | omega]. - apply Z.mul_reg_l with (g*g'). * rewrite Z.mul_eq_0. now destruct 1. * rewrite Z.mul_shuffle1, <- Hg3, <- Hg'4. now rewrite Z.mul_shuffle1, <- Hg'3, <- Hg4, H, Z.mul_comm. Close Scope Z_scope. Qed. Add Morphism Qred : Qred_comp. Proof. intros q q' H. rewrite (Qred_correct q); auto. rewrite (Qred_correct q'); auto. Qed. Definition Qplus' (p q : Q) := Qred (Qplus p q). Definition Qmult' (p q : Q) := Qred (Qmult p q). Definition Qminus' x y := Qred (Qminus x y). Lemma Qplus'_correct : forall p q : Q, (Qplus' p q)==(Qplus p q). Proof. intros; unfold Qplus'; apply Qred_correct; auto. Qed. Lemma Qmult'_correct : forall p q : Q, (Qmult' p q)==(Qmult p q). Proof. intros; unfold Qmult'; apply Qred_correct; auto. Qed. Lemma Qminus'_correct : forall p q : Q, (Qminus' p q)==(Qminus p q). Proof. intros; unfold Qminus'; apply Qred_correct; auto. Qed. Add Morphism Qplus' : Qplus'_comp. Proof. intros; unfold Qplus'. rewrite H, H0; auto with qarith. Qed. Add Morphism Qmult' : Qmult'_comp. intros; unfold Qmult'. rewrite H, H0; auto with qarith. Qed. Add Morphism Qminus' : Qminus'_comp. intros; unfold Qminus'. rewrite H, H0; auto with qarith. Qed. Lemma Qred_opp: forall q, Qred (-q) = - (Qred q). Proof. intros (x, y); unfold Qred; simpl. rewrite Z.ggcd_opp; case Z.ggcd; intros p1 (p2, p3); simpl. unfold Qopp; auto. Qed. Theorem Qred_compare: forall x y, Qcompare x y = Qcompare (Qred x) (Qred y). Proof. intros x y; apply Qcompare_comp; apply Qeq_sym; apply Qred_correct. Qed. coq-8.4pl2/theories/QArith/vo.itarget0000640000175000001440000000020411324610454016642 0ustar notinusersQabs.vo QArith_base.vo QArith.vo Qcanon.vo Qfield.vo Qpower.vo Qreals.vo Qreduction.vo Qring.vo Qround.vo QOrderedType.vo Qminmax.vocoq-8.4pl2/theories/QArith/Qcanon.v0000640000175000001440000003065512010532755016260 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Q ; canon : Qred this = this }. Delimit Scope Qc_scope with Qc. Bind Scope Qc_scope with Qc. Arguments Qcmake this%Q _. Open Scope Qc_scope. Lemma Qred_identity : forall q:Q, Z.gcd (Qnum q) (QDen q) = 1%Z -> Qred q = q. Proof. unfold Qred; intros (a,b); simpl. generalize (Z.ggcd_gcd a ('b)) (Z.ggcd_correct_divisors a ('b)). intros. rewrite H1 in H; clear H1. destruct (Z.ggcd a ('b)) as (g,(aa,bb)); simpl in *; subst. destruct H0. rewrite Z.mul_1_l in H, H0. subst; simpl; auto. Qed. Lemma Qred_identity2 : forall q:Q, Qred q = q -> Z.gcd (Qnum q) (QDen q) = 1%Z. Proof. unfold Qred; intros (a,b); simpl. generalize (Z.ggcd_gcd a ('b)) (Z.ggcd_correct_divisors a ('b)) (Z.gcd_nonneg a ('b)). intros. rewrite <- H; rewrite <- H in H1; clear H. destruct (Z.ggcd a ('b)) as (g,(aa,bb)); simpl in *; subst. injection H2; intros; clear H2. destruct H0. clear H0 H3. destruct g as [|g|g]; destruct bb as [|bb|bb]; simpl in *; try discriminate. f_equal. apply Pos.mul_reg_r with bb. injection H2; intros. rewrite <- H0. rewrite H; simpl; auto. elim H1; auto. Qed. Lemma Qred_iff : forall q:Q, Qred q = q <-> Z.gcd (Qnum q) (QDen q) = 1%Z. Proof. split; intros. apply Qred_identity2; auto. apply Qred_identity; auto. Qed. Lemma Qred_involutive : forall q:Q, Qred (Qred q) = Qred q. Proof. intros; apply Qred_complete. apply Qred_correct. Qed. Definition Q2Qc (q:Q) : Qc := Qcmake (Qred q) (Qred_involutive q). Arguments Q2Qc q%Q. Notation " !! " := Q2Qc : Qc_scope. Lemma Qc_is_canon : forall q q' : Qc, q == q' -> q = q'. Proof. intros (q,proof_q) (q',proof_q'). simpl. intros H. assert (H0:=Qred_complete _ _ H). assert (q = q') by congruence. subst q'. assert (proof_q = proof_q'). apply eq_proofs_unicity; auto; intros. repeat decide equality. congruence. Qed. Hint Resolve Qc_is_canon. Notation " 0 " := (!!0) : Qc_scope. Notation " 1 " := (!!1) : Qc_scope. Definition Qcle (x y : Qc) := (x <= y)%Q. Definition Qclt (x y : Qc) := (x < y)%Q. Notation Qcgt := (fun x y : Qc => Qlt y x). Notation Qcge := (fun x y : Qc => Qle y x). Infix "<" := Qclt : Qc_scope. Infix "<=" := Qcle : Qc_scope. Infix ">" := Qcgt : Qc_scope. Infix ">=" := Qcge : Qc_scope. Notation "x <= y <= z" := (x<=y/\y<=z) : Qc_scope. Notation "x < y < z" := (x (p ?= q) = Eq. Proof. unfold Qccompare. intros; rewrite <- Qeq_alt. split; auto. intro H; rewrite H; auto with qarith. Qed. Lemma Qclt_alt : forall p q, (p (p?=q = Lt). Proof. intros; exact (Qlt_alt p q). Qed. Lemma Qcgt_alt : forall p q, (p>q) <-> (p?=q = Gt). Proof. intros; exact (Qgt_alt p q). Qed. Lemma Qle_alt : forall p q, (p<=q) <-> (p?=q <> Gt). Proof. intros; exact (Qle_alt p q). Qed. Lemma Qge_alt : forall p q, (p>=q) <-> (p?=q <> Lt). Proof. intros; exact (Qge_alt p q). Qed. (** equality on [Qc] is decidable: *) Theorem Qc_eq_dec : forall x y:Qc, {x=y} + {x<>y}. Proof. intros. destruct (Qeq_dec x y) as [H|H]; auto. right; contradict H; subst; auto with qarith. Defined. (** The addition, multiplication and opposite are defined in the straightforward way: *) Definition Qcplus (x y : Qc) := !!(x+y). Infix "+" := Qcplus : Qc_scope. Definition Qcmult (x y : Qc) := !!(x*y). Infix "*" := Qcmult : Qc_scope. Definition Qcopp (x : Qc) := !!(-x). Notation "- x" := (Qcopp x) : Qc_scope. Definition Qcminus (x y : Qc) := x+-y. Infix "-" := Qcminus : Qc_scope. Definition Qcinv (x : Qc) := !!(/x). Notation "/ x" := (Qcinv x) : Qc_scope. Definition Qcdiv (x y : Qc) := x*/y. Infix "/" := Qcdiv : Qc_scope. (** [0] and [1] are apart *) Lemma Q_apart_0_1 : 1 <> 0. Proof. unfold Q2Qc. intros H; discriminate H. Qed. Ltac qc := match goal with | q:Qc |- _ => destruct q; qc | _ => apply Qc_is_canon; simpl; repeat rewrite Qred_correct end. Opaque Qred. (** Addition is associative: *) Theorem Qcplus_assoc : forall x y z, x+(y+z)=(x+y)+z. Proof. intros; qc; apply Qplus_assoc. Qed. (** [0] is a neutral element for addition: *) Lemma Qcplus_0_l : forall x, 0+x = x. Proof. intros; qc; apply Qplus_0_l. Qed. Lemma Qcplus_0_r : forall x, x+0 = x. Proof. intros; qc; apply Qplus_0_r. Qed. (** Commutativity of addition: *) Theorem Qcplus_comm : forall x y, x+y = y+x. Proof. intros; qc; apply Qplus_comm. Qed. (** Properties of [Qopp] *) Lemma Qcopp_involutive : forall q, - -q = q. Proof. intros; qc; apply Qopp_involutive. Qed. Theorem Qcplus_opp_r : forall q, q+(-q) = 0. Proof. intros; qc; apply Qplus_opp_r. Qed. (** Multiplication is associative: *) Theorem Qcmult_assoc : forall n m p, n*(m*p)=(n*m)*p. Proof. intros; qc; apply Qmult_assoc. Qed. (** [1] is a neutral element for multiplication: *) Lemma Qcmult_1_l : forall n, 1*n = n. Proof. intros; qc; apply Qmult_1_l. Qed. Theorem Qcmult_1_r : forall n, n*1=n. Proof. intros; qc; apply Qmult_1_r. Qed. (** Commutativity of multiplication *) Theorem Qcmult_comm : forall x y, x*y=y*x. Proof. intros; qc; apply Qmult_comm. Qed. (** Distributivity *) Theorem Qcmult_plus_distr_r : forall x y z, x*(y+z)=(x*y)+(x*z). Proof. intros; qc; apply Qmult_plus_distr_r. Qed. Theorem Qcmult_plus_distr_l : forall x y z, (x+y)*z=(x*z)+(y*z). Proof. intros; qc; apply Qmult_plus_distr_l. Qed. (** Integrality *) Theorem Qcmult_integral : forall x y, x*y=0 -> x=0 \/ y=0. Proof. intros. destruct (Qmult_integral x y); try qc; auto. injection H; clear H; intros. rewrite <- (Qred_correct (x*y)). rewrite <- (Qred_correct 0). rewrite H; auto with qarith. Qed. Theorem Qcmult_integral_l : forall x y, ~ x = 0 -> x*y = 0 -> y = 0. Proof. intros; destruct (Qcmult_integral _ _ H0); tauto. Qed. (** Inverse and division. *) Theorem Qcmult_inv_r : forall x, x<>0 -> x*(/x) = 1. Proof. intros; qc; apply Qmult_inv_r; auto. Qed. Theorem Qcmult_inv_l : forall x, x<>0 -> (/x)*x = 1. Proof. intros. rewrite Qcmult_comm. apply Qcmult_inv_r; auto. Qed. Lemma Qcinv_mult_distr : forall p q, / (p * q) = /p * /q. Proof. intros; qc; apply Qinv_mult_distr. Qed. Theorem Qcdiv_mult_l : forall x y, y<>0 -> (x*y)/y = x. Proof. unfold Qcdiv. intros. rewrite <- Qcmult_assoc. rewrite Qcmult_inv_r; auto. apply Qcmult_1_r. Qed. Theorem Qcmult_div_r : forall x y, ~ y = 0 -> y*(x/y) = x. Proof. unfold Qcdiv. intros. rewrite Qcmult_assoc. rewrite Qcmult_comm. rewrite Qcmult_assoc. rewrite Qcmult_inv_l; auto. apply Qcmult_1_l. Qed. (** Properties of order upon Q. *) Lemma Qcle_refl : forall x, x<=x. Proof. unfold Qcle; intros; simpl; apply Qle_refl. Qed. Lemma Qcle_antisym : forall x y, x<=y -> y<=x -> x=y. Proof. unfold Qcle; intros; simpl in *. apply Qc_is_canon; apply Qle_antisym; auto. Qed. Lemma Qcle_trans : forall x y z, x<=y -> y<=z -> x<=z. Proof. unfold Qcle; intros; eapply Qle_trans; eauto. Qed. Lemma Qclt_not_eq : forall x y, x x<>y. Proof. unfold Qclt; intros; simpl in *. intro; destruct (Qlt_not_eq _ _ H). subst; auto with qarith. Qed. (** Large = strict or equal *) Lemma Qclt_le_weak : forall x y, x x<=y. Proof. unfold Qcle, Qclt; intros; apply Qlt_le_weak; auto. Qed. Lemma Qcle_lt_trans : forall x y z, x<=y -> y x y<=z -> x y x y<=x. Proof. unfold Qcle, Qclt; intros; apply Qnot_lt_le; auto. Qed. Lemma Qcnot_le_lt : forall x y, ~ x<=y -> y ~ y<=x. Proof. unfold Qcle, Qclt; intros; apply Qlt_not_le; auto. Qed. Lemma Qcle_not_lt : forall x y, x<=y -> ~ y x -q <= -p. Proof. unfold Qcle, Qcopp; intros; simpl in *. repeat rewrite Qred_correct. apply Qopp_le_compat; auto. Qed. Lemma Qcle_minus_iff : forall p q, p <= q <-> 0 <= q+-p. Proof. unfold Qcle, Qcminus; intros; simpl in *. repeat rewrite Qred_correct. apply Qle_minus_iff; auto. Qed. Lemma Qclt_minus_iff : forall p q, p < q <-> 0 < q+-p. Proof. unfold Qclt, Qcplus, Qcopp; intros; simpl in *. repeat rewrite Qred_correct. apply Qlt_minus_iff; auto. Qed. Lemma Qcplus_le_compat : forall x y z t, x<=y -> z<=t -> x+z <= y+t. Proof. unfold Qcplus, Qcle; intros; simpl in *. repeat rewrite Qred_correct. apply Qplus_le_compat; auto. Qed. Lemma Qcmult_le_compat_r : forall x y z, x <= y -> 0 <= z -> x*z <= y*z. Proof. unfold Qcmult, Qcle; intros; simpl in *. repeat rewrite Qred_correct. apply Qmult_le_compat_r; auto. Qed. Lemma Qcmult_lt_0_le_reg_r : forall x y z, 0 < z -> x*z <= y*z -> x <= y. Proof. unfold Qcmult, Qcle, Qclt; intros; simpl in *. repeat progress rewrite Qred_correct in * |-. eapply Qmult_lt_0_le_reg_r; eauto. Qed. Lemma Qcmult_lt_compat_r : forall x y z, 0 < z -> x < y -> x*z < y*z. Proof. unfold Qcmult, Qclt; intros; simpl in *. repeat progress rewrite Qred_correct in *. eapply Qmult_lt_compat_r; eauto. Qed. (** Rational to the n-th power *) Fixpoint Qcpower (q:Qc)(n:nat) : Qc := match n with | O => 1 | S n => q * (Qcpower q n) end. Notation " q ^ n " := (Qcpower q n) : Qc_scope. Lemma Qcpower_1 : forall n, 1^n = 1. Proof. induction n; simpl; auto with qarith. rewrite IHn; auto with qarith. Qed. Lemma Qcpower_0 : forall n, n<>O -> 0^n = 0. Proof. destruct n; simpl. destruct 1; auto. intros. now apply Qc_is_canon. Qed. Lemma Qcpower_pos : forall p n, 0 <= p -> 0 <= p^n. Proof. induction n; simpl; auto with qarith. easy. intros. apply Qcle_trans with (0*(p^n)). easy. apply Qcmult_le_compat_r; auto. Qed. (** And now everything is easier concerning tactics: *) (** A ring tactic for rational numbers *) Definition Qc_eq_bool (x y : Qc) := if Qc_eq_dec x y then true else false. Lemma Qc_eq_bool_correct : forall x y : Qc, Qc_eq_bool x y = true -> x=y. Proof. intros x y; unfold Qc_eq_bool; case (Qc_eq_dec x y); simpl; auto. intros _ H; inversion H. Qed. Definition Qcrt : ring_theory 0 1 Qcplus Qcmult Qcminus Qcopp (eq(A:=Qc)). Proof. constructor. exact Qcplus_0_l. exact Qcplus_comm. exact Qcplus_assoc. exact Qcmult_1_l. exact Qcmult_comm. exact Qcmult_assoc. exact Qcmult_plus_distr_l. reflexivity. exact Qcplus_opp_r. Qed. Definition Qcft : field_theory 0%Qc 1%Qc Qcplus Qcmult Qcminus Qcopp Qcdiv Qcinv (eq(A:=Qc)). Proof. constructor. exact Qcrt. exact Q_apart_0_1. reflexivity. exact Qcmult_inv_l. Qed. Add Field Qcfield : Qcft. (** A field tactic for rational numbers *) Example test_field : (forall x y : Qc, y<>0 -> (x/y)*y = x)%Qc. intros. field. auto. Qed. Theorem Qc_decomp: forall x y: Qc, (Qred x = x -> Qred y = y -> (x:Q) = y)-> x = y. Proof. intros (q, Hq) (q', Hq'); simpl; intros H. assert (H1 := H Hq Hq'). subst q'. assert (Hq = Hq'). apply Eqdep_dec.eq_proofs_unicity; auto; intros. repeat decide equality. congruence. Qed. coq-8.4pl2/theories/QArith/Qring.v0000640000175000001440000000104512010532755016110 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y" := (Qlt y x)(only parsing) : Q_scope. Notation "x >= y" := (Qle y x)(only parsing) : Q_scope. Notation "x <= y <= z" := (x<=y/\y<=z) : Q_scope. (** injection from Z is injective. *) Lemma inject_Z_injective (a b: Z): inject_Z a == inject_Z b <-> a = b. Proof. unfold Qeq. simpl. omega. Qed. (** Another approach : using Qcompare for defining order relations. *) Definition Qcompare (p q : Q) := (Qnum p * QDen q ?= Qnum q * QDen p)%Z. Notation "p ?= q" := (Qcompare p q) : Q_scope. Lemma Qeq_alt p q : (p == q) <-> (p ?= q) = Eq. Proof. symmetry. apply Z.compare_eq_iff. Qed. Lemma Qlt_alt p q : (p (p?=q = Lt). Proof. reflexivity. Qed. Lemma Qgt_alt p q : (p>q) <-> (p?=q = Gt). Proof. symmetry. apply Z.gt_lt_iff. Qed. Lemma Qle_alt p q : (p<=q) <-> (p?=q <> Gt). Proof. reflexivity. Qed. Lemma Qge_alt p q : (p>=q) <-> (p?=q <> Lt). Proof. symmetry. apply Z.ge_le_iff. Qed. Hint Unfold Qeq Qlt Qle : qarith. Hint Extern 5 (?X1 <> ?X2) => intro; discriminate: qarith. Lemma Qcompare_antisym x y : CompOpp (x ?= y) = (y ?= x). Proof. symmetry. apply Z.compare_antisym. Qed. Lemma Qcompare_spec x y : CompareSpec (x==y) (x y == x. Proof. auto with qarith. Qed. Theorem Qeq_trans x y z : x == y -> y == z -> x == z. Proof. unfold Qeq; intros XY YZ. apply Z.mul_reg_r with (QDen y); [auto with qarith|]. now rewrite Z.mul_shuffle0, XY, Z.mul_shuffle0, YZ, Z.mul_shuffle0. Qed. Hint Immediate Qeq_sym : qarith. Hint Resolve Qeq_refl Qeq_trans : qarith. (** In a word, [Qeq] is a setoid equality. *) Instance Q_Setoid : Equivalence Qeq. Proof. split; red; eauto with qarith. Qed. (** Furthermore, this equality is decidable: *) Theorem Qeq_dec x y : {x==y} + {~ x==y}. Proof. apply Z.eq_dec. Defined. Definition Qeq_bool x y := (Zeq_bool (Qnum x * QDen y) (Qnum y * QDen x))%Z. Definition Qle_bool x y := (Z.leb (Qnum x * QDen y) (Qnum y * QDen x))%Z. Lemma Qeq_bool_iff x y : Qeq_bool x y = true <-> x == y. Proof. symmetry; apply Zeq_is_eq_bool. Qed. Lemma Qeq_bool_eq x y : Qeq_bool x y = true -> x == y. Proof. apply Qeq_bool_iff. Qed. Lemma Qeq_eq_bool x y : x == y -> Qeq_bool x y = true. Proof. apply Qeq_bool_iff. Qed. Lemma Qeq_bool_neq x y : Qeq_bool x y = false -> ~ x == y. Proof. rewrite <- Qeq_bool_iff. now intros ->. Qed. Lemma Qle_bool_iff x y : Qle_bool x y = true <-> x <= y. Proof. symmetry; apply Zle_is_le_bool. Qed. Lemma Qle_bool_imp_le x y : Qle_bool x y = true -> x <= y. Proof. apply Qle_bool_iff. Qed. Theorem Qnot_eq_sym x y : ~x == y -> ~y == x. Proof. auto with qarith. Qed. Hint Resolve Qnot_eq_sym : qarith. (** * Addition, multiplication and opposite *) (** The addition, multiplication and opposite are defined in the straightforward way: *) Definition Qplus (x y : Q) := (Qnum x * QDen y + Qnum y * QDen x) # (Qden x * Qden y). Definition Qmult (x y : Q) := (Qnum x * Qnum y) # (Qden x * Qden y). Definition Qopp (x : Q) := (- Qnum x) # (Qden x). Definition Qminus (x y : Q) := Qplus x (Qopp y). Definition Qinv (x : Q) := match Qnum x with | Z0 => 0 | Zpos p => (QDen x)#p | Zneg p => (Zneg (Qden x))#p end. Definition Qdiv (x y : Q) := Qmult x (Qinv y). Infix "+" := Qplus : Q_scope. Notation "- x" := (Qopp x) : Q_scope. Infix "-" := Qminus : Q_scope. Infix "*" := Qmult : Q_scope. Notation "/ x" := (Qinv x) : Q_scope. Infix "/" := Qdiv : Q_scope. (** A light notation for [Zpos] *) Notation " ' x " := (Zpos x) (at level 20, no associativity) : Z_scope. Lemma Qmake_Qdiv a b : a#b==inject_Z a/inject_Z ('b). Proof. unfold Qeq. simpl. ring. Qed. (** * Setoid compatibility results *) Instance Qplus_comp : Proper (Qeq==>Qeq==>Qeq) Qplus. Proof. unfold Qeq, Qplus; simpl. Open Scope Z_scope. intros (p1, p2) (q1, q2) H (r1, r2) (s1, s2) H0; simpl in *. simpl_mult; ring_simplify. replace (p1 * 'r2 * 'q2) with (p1 * 'q2 * 'r2) by ring. rewrite H. replace (r1 * 'p2 * 'q2 * 's2) with (r1 * 's2 * 'p2 * 'q2) by ring. rewrite H0. ring. Close Scope Z_scope. Qed. Instance Qopp_comp : Proper (Qeq==>Qeq) Qopp. Proof. unfold Qeq, Qopp; simpl. Open Scope Z_scope. intros x y H; simpl. replace (- Qnum x * ' Qden y) with (- (Qnum x * ' Qden y)) by ring. rewrite H; ring. Close Scope Z_scope. Qed. Instance Qminus_comp : Proper (Qeq==>Qeq==>Qeq) Qminus. Proof. intros x x' Hx y y' Hy. unfold Qminus. rewrite Hx, Hy; auto with qarith. Qed. Instance Qmult_comp : Proper (Qeq==>Qeq==>Qeq) Qmult. Proof. unfold Qeq; simpl. Open Scope Z_scope. intros (p1, p2) (q1, q2) H (r1, r2) (s1, s2) H0; simpl in *. intros; simpl_mult; ring_simplify. replace (q1 * s1 * 'p2) with (q1 * 'p2 * s1) by ring. rewrite <- H. replace (p1 * r1 * 'q2 * 's2) with (r1 * 's2 * p1 * 'q2) by ring. rewrite H0. ring. Close Scope Z_scope. Qed. Instance Qinv_comp : Proper (Qeq==>Qeq) Qinv. Proof. unfold Qeq, Qinv; simpl. Open Scope Z_scope. intros (p1, p2) (q1, q2) EQ; simpl in *. destruct q1; simpl in *. - apply Z.mul_eq_0 in EQ. destruct EQ; now subst. - destruct p1; simpl in *; try discriminate. now rewrite Pos.mul_comm, <- EQ, Pos.mul_comm. - destruct p1; simpl in *; try discriminate. now rewrite Pos.mul_comm, <- EQ, Pos.mul_comm. Close Scope Z_scope. Qed. Instance Qdiv_comp : Proper (Qeq==>Qeq==>Qeq) Qdiv. Proof. intros x x' Hx y y' Hy; unfold Qdiv. rewrite Hx, Hy; auto with qarith. Qed. Instance Qcompare_comp : Proper (Qeq==>Qeq==>eq) Qcompare. Proof. unfold Qeq, Qcompare. Open Scope Z_scope. intros (p1,p2) (q1,q2) H (r1,r2) (s1,s2) H'; simpl in *. rewrite <- (Zcompare_mult_compat (q2*s2) (p1*'r2)). rewrite <- (Zcompare_mult_compat (p2*r2) (q1*'s2)). change ('(q2*s2)) with ('q2 * 's2). change ('(p2*r2)) with ('p2 * 'r2). replace ('q2 * 's2 * (p1*'r2)) with ((p1*'q2)*'r2*'s2) by ring. rewrite H. replace ('q2 * 's2 * (r1*'p2)) with ((r1*'s2)*'q2*'p2) by ring. rewrite H'. f_equal; ring. Close Scope Z_scope. Qed. Instance Qle_comp : Proper (Qeq==>Qeq==>iff) Qle. Proof. intros p q H r s H'. rewrite 2 Qle_alt, H, H'; auto with *. Qed. Instance Qlt_compat : Proper (Qeq==>Qeq==>iff) Qlt. Proof. intros p q H r s H'. rewrite 2 Qlt_alt, H, H'; auto with *. Qed. Instance Qeqb_comp : Proper (Qeq==>Qeq==>eq) Qeq_bool. Proof. intros p q H r s H'; apply eq_true_iff_eq. rewrite 2 Qeq_bool_iff, H, H'; split; auto with qarith. Qed. Instance Qleb_comp : Proper (Qeq==>Qeq==>eq) Qle_bool. Proof. intros p q H r s H'; apply eq_true_iff_eq. rewrite 2 Qle_bool_iff, H, H'; split; auto with qarith. Qed. (** [0] and [1] are apart *) Lemma Q_apart_0_1 : ~ 1 == 0. Proof. unfold Qeq; auto with qarith. Qed. (** * Properties of [Qadd] *) (** Addition is associative: *) Theorem Qplus_assoc : forall x y z, x+(y+z)==(x+y)+z. Proof. intros (x1, x2) (y1, y2) (z1, z2). unfold Qeq, Qplus; simpl; simpl_mult; ring. Qed. (** [0] is a neutral element for addition: *) Lemma Qplus_0_l : forall x, 0+x == x. Proof. intros (x1, x2); unfold Qeq, Qplus; simpl; ring. Qed. Lemma Qplus_0_r : forall x, x+0 == x. Proof. intros (x1, x2); unfold Qeq, Qplus; simpl. rewrite Pos.mul_comm; simpl; ring. Qed. (** Commutativity of addition: *) Theorem Qplus_comm : forall x y, x+y == y+x. Proof. intros (x1, x2); unfold Qeq, Qplus; simpl. intros; rewrite Pos.mul_comm; ring. Qed. (** * Properties of [Qopp] *) Lemma Qopp_involutive : forall q, - -q == q. Proof. red; simpl; intros; ring. Qed. Theorem Qplus_opp_r : forall q, q+(-q) == 0. Proof. red; simpl; intro; ring. Qed. (** Injectivity of addition (uses theory about Qopp above): *) Lemma Qplus_inj_r (x y z: Q): x + z == y + z <-> x == y. Proof. split; intro E. rewrite <- (Qplus_0_r x), <- (Qplus_0_r y). rewrite <- (Qplus_opp_r z); auto. do 2 rewrite Qplus_assoc. rewrite E. reflexivity. rewrite E. reflexivity. Qed. Lemma Qplus_inj_l (x y z: Q): z + x == z + y <-> x == y. Proof. rewrite (Qplus_comm z x), (Qplus_comm z y). apply Qplus_inj_r. Qed. (** * Properties of [Qmult] *) (** Multiplication is associative: *) Theorem Qmult_assoc : forall n m p, n*(m*p)==(n*m)*p. Proof. intros; red; simpl; rewrite Pos.mul_assoc; ring. Qed. (** multiplication and zero *) Lemma Qmult_0_l : forall x , 0*x == 0. Proof. intros; compute; reflexivity. Qed. Lemma Qmult_0_r : forall x , x*0 == 0. Proof. intros; red; simpl; ring. Qed. (** [1] is a neutral element for multiplication: *) Lemma Qmult_1_l : forall n, 1*n == n. Proof. intro; red; simpl; destruct (Qnum n); auto. Qed. Theorem Qmult_1_r : forall n, n*1==n. Proof. intro; red; simpl. rewrite Z.mul_1_r with (n := Qnum n). rewrite Pos.mul_comm; simpl; trivial. Qed. (** Commutativity of multiplication *) Theorem Qmult_comm : forall x y, x*y==y*x. Proof. intros; red; simpl; rewrite Pos.mul_comm; ring. Qed. (** Distributivity over [Qadd] *) Theorem Qmult_plus_distr_r : forall x y z, x*(y+z)==(x*y)+(x*z). Proof. intros (x1, x2) (y1, y2) (z1, z2). unfold Qeq, Qmult, Qplus; simpl; simpl_mult; ring. Qed. Theorem Qmult_plus_distr_l : forall x y z, (x+y)*z==(x*z)+(y*z). Proof. intros (x1, x2) (y1, y2) (z1, z2). unfold Qeq, Qmult, Qplus; simpl; simpl_mult; ring. Qed. (** Integrality *) Theorem Qmult_integral : forall x y, x*y==0 -> x==0 \/ y==0. Proof. intros (x1,x2) (y1,y2). unfold Qeq, Qmult; simpl. now rewrite <- Z.mul_eq_0, !Z.mul_1_r. Qed. Theorem Qmult_integral_l : forall x y, ~ x == 0 -> x*y == 0 -> y == 0. Proof. intros (x1, x2) (y1, y2). unfold Qeq, Qmult; simpl. rewrite !Z.mul_1_r, Z.mul_eq_0. intuition. Qed. (** * inject_Z is a ring homomorphism: *) Lemma inject_Z_plus (x y: Z): inject_Z (x + y) = inject_Z x + inject_Z y. Proof. unfold Qplus, inject_Z. simpl. f_equal. ring. Qed. Lemma inject_Z_mult (x y: Z): inject_Z (x * y) = inject_Z x * inject_Z y. Proof. reflexivity. Qed. Lemma inject_Z_opp (x: Z): inject_Z (- x) = - inject_Z x. Proof. reflexivity. Qed. (** * Inverse and division. *) Lemma Qinv_involutive : forall q, (/ / q) == q. Proof. intros [[|n|n] d]; red; simpl; reflexivity. Qed. Theorem Qmult_inv_r : forall x, ~ x == 0 -> x*(/x) == 1. Proof. intros (x1, x2); unfold Qeq, Qdiv, Qmult; case x1; simpl; intros; simpl_mult; try ring. elim H; auto. Qed. Lemma Qinv_mult_distr : forall p q, / (p * q) == /p * /q. Proof. intros (x1,x2) (y1,y2); unfold Qeq, Qinv, Qmult; simpl. destruct x1; simpl; auto; destruct y1; simpl; auto. Qed. Theorem Qdiv_mult_l : forall x y, ~ y == 0 -> (x*y)/y == x. Proof. intros; unfold Qdiv. rewrite <- (Qmult_assoc x y (Qinv y)). rewrite (Qmult_inv_r y H). apply Qmult_1_r. Qed. Theorem Qmult_div_r : forall x y, ~ y == 0 -> y*(x/y) == x. Proof. intros; unfold Qdiv. rewrite (Qmult_assoc y x (Qinv y)). rewrite (Qmult_comm y x). fold (Qdiv (Qmult x y) y). apply Qdiv_mult_l; auto. Qed. (** Injectivity of Qmult (requires theory about Qinv above): *) Lemma Qmult_inj_r (x y z: Q): ~ z == 0 -> (x * z == y * z <-> x == y). Proof. intro z_ne_0. split; intro E. rewrite <- (Qmult_1_r x), <- (Qmult_1_r y). rewrite <- (Qmult_inv_r z); auto. do 2 rewrite Qmult_assoc. rewrite E. reflexivity. rewrite E. reflexivity. Qed. Lemma Qmult_inj_l (x y z: Q): ~ z == 0 -> (z * x == z * y <-> x == y). Proof. rewrite (Qmult_comm z x), (Qmult_comm z y). apply Qmult_inj_r. Qed. (** * Properties of order upon Q. *) Lemma Qle_refl x : x<=x. Proof. unfold Qle; auto with zarith. Qed. Lemma Qle_antisym x y : x<=y -> y<=x -> x==y. Proof. unfold Qle, Qeq; auto with zarith. Qed. Lemma Qle_trans : forall x y z, x<=y -> y<=z -> x<=z. Proof. unfold Qle; intros (x1, x2) (y1, y2) (z1, z2); simpl; intros. Open Scope Z_scope. apply Z.mul_le_mono_pos_r with ('y2); [easy|]. apply Z.le_trans with (y1 * 'x2 * 'z2). - rewrite Z.mul_shuffle0. now apply Z.mul_le_mono_pos_r. - rewrite Z.mul_shuffle0, (Z.mul_shuffle0 z1). now apply Z.mul_le_mono_pos_r. Close Scope Z_scope. Qed. Hint Resolve Qle_trans : qarith. Lemma Qlt_irrefl x : ~x ~ x==y. Proof. unfold Qlt, Qeq; auto with zarith. Qed. Lemma Zle_Qle (x y: Z): (x <= y)%Z = (inject_Z x <= inject_Z y). Proof. unfold Qle. simpl. now rewrite !Z.mul_1_r. Qed. Lemma Zlt_Qlt (x y: Z): (x < y)%Z = (inject_Z x < inject_Z y). Proof. unfold Qlt. simpl. now rewrite !Z.mul_1_r. Qed. (** Large = strict or equal *) Lemma Qle_lteq x y : x<=y <-> x x<=y. Proof. unfold Qle, Qlt; auto with zarith. Qed. Lemma Qle_lt_trans : forall x y z, x<=y -> y x y<=z -> x y x y<=x. Proof. unfold Qle, Qlt; auto with zarith. Qed. Lemma Qnot_le_lt : forall x y, ~ x<=y -> y ~ y<=x. Proof. unfold Qle, Qlt; auto with zarith. Qed. Lemma Qle_not_lt : forall x y, x<=y -> ~ y x -q <= -p. Proof. intros (a1,a2) (b1,b2); unfold Qle, Qlt; simpl. rewrite !Z.mul_opp_l. omega. Qed. Hint Resolve Qopp_le_compat : qarith. Lemma Qle_minus_iff : forall p q, p <= q <-> 0 <= q+-p. Proof. intros (x1,x2) (y1,y2); unfold Qle; simpl. rewrite Z.mul_opp_l. omega. Qed. Lemma Qlt_minus_iff : forall p q, p < q <-> 0 < q+-p. Proof. intros (x1,x2) (y1,y2); unfold Qlt; simpl. rewrite Z.mul_opp_l. omega. Qed. Lemma Qplus_le_compat : forall x y z t, x<=y -> z<=t -> x+z <= y+t. Proof. unfold Qplus, Qle; intros (x1, x2) (y1, y2) (z1, z2) (t1, t2); simpl; simpl_mult. Open Scope Z_scope. intros. match goal with |- ?a <= ?b => ring_simplify a b end. rewrite Z.add_comm. apply Z.add_le_mono. match goal with |- ?a <= ?b => ring_simplify z1 t1 ('z2) ('t2) a b end. auto with zarith. match goal with |- ?a <= ?b => ring_simplify x1 y1 ('x2) ('y2) a b end. auto with zarith. Close Scope Z_scope. Qed. Lemma Qplus_lt_le_compat : forall x y z t, x z<=t -> x+z < y+t. Proof. unfold Qplus, Qle, Qlt; intros (x1, x2) (y1, y2) (z1, z2) (t1, t2); simpl; simpl_mult. Open Scope Z_scope. intros. match goal with |- ?a < ?b => ring_simplify a b end. rewrite Z.add_comm. apply Z.add_le_lt_mono. match goal with |- ?a <= ?b => ring_simplify z1 t1 ('z2) ('t2) a b end. auto with zarith. match goal with |- ?a < ?b => ring_simplify x1 y1 ('x2) ('y2) a b end. do 2 (apply Z.mul_lt_mono_pos_r;try easy). Close Scope Z_scope. Qed. Lemma Qplus_le_l (x y z: Q): x + z <= y + z <-> x <= y. Proof. split; intros. rewrite <- (Qplus_0_r x), <- (Qplus_0_r y), <- (Qplus_opp_r z). do 2 rewrite Qplus_assoc. apply Qplus_le_compat; auto with *. apply Qplus_le_compat; auto with *. Qed. Lemma Qplus_le_r (x y z: Q): z + x <= z + y <-> x <= y. Proof. rewrite (Qplus_comm z x), (Qplus_comm z y). apply Qplus_le_l. Qed. Lemma Qplus_lt_l (x y z: Q): x + z < y + z <-> x < y. Proof. split; intros. rewrite <- (Qplus_0_r x), <- (Qplus_0_r y), <- (Qplus_opp_r z). do 2 rewrite Qplus_assoc. apply Qplus_lt_le_compat; auto with *. apply Qplus_lt_le_compat; auto with *. Qed. Lemma Qplus_lt_r (x y z: Q): z + x < z + y <-> x < y. Proof. rewrite (Qplus_comm z x), (Qplus_comm z y). apply Qplus_lt_l. Qed. Lemma Qmult_le_compat_r : forall x y z, x <= y -> 0 <= z -> x*z <= y*z. Proof. intros (a1,a2) (b1,b2) (c1,c2); unfold Qle, Qlt; simpl. Open Scope Z_scope. intros; simpl_mult. rewrite Z.mul_shuffle1, (Z.mul_shuffle1 b1). apply Z.mul_le_mono_nonneg_r; auto with zarith. Close Scope Z_scope. Qed. Lemma Qmult_lt_0_le_reg_r : forall x y z, 0 < z -> x*z <= y*z -> x <= y. Proof. intros (a1,a2) (b1,b2) (c1,c2); unfold Qle, Qlt; simpl. Open Scope Z_scope. simpl_mult. rewrite Z.mul_shuffle1, (Z.mul_shuffle1 b1). intros LT LE. apply Z.mul_le_mono_pos_r in LE; trivial. apply Z.mul_pos_pos; [omega|easy]. Close Scope Z_scope. Qed. Lemma Qmult_le_r (x y z: Q): 0 < z -> (x*z <= y*z <-> x <= y). Proof. split; intro. now apply Qmult_lt_0_le_reg_r with z. apply Qmult_le_compat_r; auto with qarith. Qed. Lemma Qmult_le_l (x y z: Q): 0 < z -> (z*x <= z*y <-> x <= y). Proof. rewrite (Qmult_comm z x), (Qmult_comm z y). apply Qmult_le_r. Qed. Lemma Qmult_lt_compat_r : forall x y z, 0 < z -> x < y -> x*z < y*z. Proof. intros (a1,a2) (b1,b2) (c1,c2); unfold Qle, Qlt; simpl. Open Scope Z_scope. intros; simpl_mult. rewrite Z.mul_shuffle1, (Z.mul_shuffle1 b1). apply Z.mul_lt_mono_pos_r; auto with zarith. apply Z.mul_pos_pos; [omega|reflexivity]. Close Scope Z_scope. Qed. Lemma Qmult_lt_r: forall x y z, 0 < z -> (x*z < y*z <-> x < y). Proof. Open Scope Z_scope. intros (a1,a2) (b1,b2) (c1,c2). unfold Qle, Qlt; simpl. simpl_mult. rewrite Z.mul_shuffle1, (Z.mul_shuffle1 b1). intro LT. rewrite <- Z.mul_lt_mono_pos_r. reflexivity. apply Z.mul_pos_pos; [omega|reflexivity]. Close Scope Z_scope. Qed. Lemma Qmult_lt_l (x y z: Q): 0 < z -> (z*x < z*y <-> x < y). Proof. rewrite (Qmult_comm z x), (Qmult_comm z y). apply Qmult_lt_r. Qed. Lemma Qmult_le_0_compat : forall a b, 0 <= a -> 0 <= b -> 0 <= a*b. Proof. intros a b Ha Hb. unfold Qle in *. simpl in *. auto with *. Qed. Lemma Qinv_le_0_compat : forall a, 0 <= a -> 0 <= /a. Proof. intros [[|n|n] d] Ha; assumption. Qed. Lemma Qle_shift_div_l : forall a b c, 0 < c -> a*c <= b -> a <= b/c. Proof. intros a b c Hc H. apply Qmult_lt_0_le_reg_r with (c). assumption. setoid_replace (b/c*c) with (c*(b/c)) by apply Qmult_comm. rewrite Qmult_div_r; try assumption. auto with *. Qed. Lemma Qle_shift_inv_l : forall a c, 0 < c -> a*c <= 1 -> a <= /c. Proof. intros a c Hc H. setoid_replace (/c) with (1*/c) by (symmetry; apply Qmult_1_l). change (a <= 1/c). apply Qle_shift_div_l; assumption. Qed. Lemma Qle_shift_div_r : forall a b c, 0 < b -> a <= c*b -> a/b <= c. Proof. intros a b c Hc H. apply Qmult_lt_0_le_reg_r with b. assumption. setoid_replace (a/b*b) with (b*(a/b)) by apply Qmult_comm. rewrite Qmult_div_r; try assumption. auto with *. Qed. Lemma Qle_shift_inv_r : forall b c, 0 < b -> 1 <= c*b -> /b <= c. Proof. intros b c Hc H. setoid_replace (/b) with (1*/b) by (symmetry; apply Qmult_1_l). change (1/b <= c). apply Qle_shift_div_r; assumption. Qed. Lemma Qinv_lt_0_compat : forall a, 0 < a -> 0 < /a. Proof. intros [[|n|n] d] Ha; assumption. Qed. Lemma Qlt_shift_div_l : forall a b c, 0 < c -> a*c < b -> a < b/c. Proof. intros a b c Hc H. apply Qnot_le_lt. intros H0. apply (Qlt_not_le _ _ H). apply Qmult_lt_0_le_reg_r with (/c). apply Qinv_lt_0_compat. assumption. setoid_replace (a*c/c) with (a) by (apply Qdiv_mult_l; auto with *). assumption. Qed. Lemma Qlt_shift_inv_l : forall a c, 0 < c -> a*c < 1 -> a < /c. Proof. intros a c Hc H. setoid_replace (/c) with (1*/c) by (symmetry; apply Qmult_1_l). change (a < 1/c). apply Qlt_shift_div_l; assumption. Qed. Lemma Qlt_shift_div_r : forall a b c, 0 < b -> a < c*b -> a/b < c. Proof. intros a b c Hc H. apply Qnot_le_lt. intros H0. apply (Qlt_not_le _ _ H). apply Qmult_lt_0_le_reg_r with (/b). apply Qinv_lt_0_compat. assumption. setoid_replace (c*b/b) with (c) by (apply Qdiv_mult_l; auto with *). assumption. Qed. Lemma Qlt_shift_inv_r : forall b c, 0 < b -> 1 < c*b -> /b < c. Proof. intros b c Hc H. setoid_replace (/b) with (1*/b) by (symmetry; apply Qmult_1_l). change (1/b < c). apply Qlt_shift_div_r; assumption. Qed. (** * Rational to the n-th power *) Definition Qpower_positive (q:Q)(p:positive) : Q := pow_pos Qmult q p. Instance Qpower_positive_comp : Proper (Qeq==>eq==>Qeq) Qpower_positive. Proof. intros x x' Hx y y' Hy. rewrite <-Hy; clear y' Hy. unfold Qpower_positive. induction y; simpl; try rewrite IHy; try rewrite Hx; reflexivity. Qed. Definition Qpower (q:Q) (z:Z) := match z with | Zpos p => Qpower_positive q p | Z0 => 1 | Zneg p => /Qpower_positive q p end. Notation " q ^ z " := (Qpower q z) : Q_scope. Instance Qpower_comp : Proper (Qeq==>eq==>Qeq) Qpower. Proof. intros x x' Hx y y' Hy. rewrite <- Hy; clear y' Hy. destruct y; simpl; rewrite ?Hx; auto with *. Qed. coq-8.4pl2/theories/QArith/Qfield.v0000640000175000001440000000507012010532755016236 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* isZcst z | Qmake ?n ?d => match isZcst n with true => isPcst d | _ => false end | _ => false end. Ltac Qcst t := match isQcst t with true => t | _ => NotConstant end. Ltac Qpow_tac t := match t with | Z0 => N0 | Zpos ?n => Ncst (Npos n) | Z.of_N ?n => Ncst n | NtoZ ?n => Ncst n | _ => NotConstant end. Add Field Qfield : Qsft (decidable Qeq_bool_eq, completeness Qeq_eq_bool, constants [Qcst], power_tac Qpower_theory [Qpow_tac]). (** Exemple of use: *) Section Examples. Let ex1 : forall x y z : Q, (x+y)*z == (x*z)+(y*z). intros. ring. Qed. Let ex2 : forall x y : Q, x+y == y+x. intros. ring. Qed. Let ex3 : forall x y z : Q, (x+y)+z == x+(y+z). intros. ring. Qed. Let ex4 : (inject_Z 1)+(inject_Z 1)==(inject_Z 2). ring. Qed. Let ex5 : 1+1 == 2#1. ring. Qed. Let ex6 : (1#1)+(1#1) == 2#1. ring. Qed. Let ex7 : forall x : Q, x-x== 0. intro. ring. Qed. Let ex8 : forall x : Q, x^1 == x. intro. ring. Qed. Let ex9 : forall x : Q, x^0 == 1. intro. ring. Qed. Let ex10 : forall x y : Q, ~(y==0) -> (x/y)*y == x. intros. field. auto. Qed. End Examples. Lemma Qopp_plus : forall a b, -(a+b) == -a + -b. Proof. intros; ring. Qed. Lemma Qopp_opp : forall q, - -q==q. Proof. intros; ring. Qed. coq-8.4pl2/theories/QArith/Qabs.v0000640000175000001440000000756712010532755015735 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type), (0 <= x -> P x) -> (x <= 0 -> P (- x)) -> P (Qabs x). Proof. intros x P H1 H2. destruct x as [[|xn|xn] xd]; [apply H1|apply H1|apply H2]; abstract (compute; discriminate). Defined. Add Morphism Qabs with signature Qeq ==> Qeq as Qabs_wd. intros [xn xd] [yn yd] H. simpl. unfold Qeq in *. simpl in *. change (' yd)%Z with (Z.abs (' yd)). change (' xd)%Z with (Z.abs (' xd)). repeat rewrite <- Z.abs_mul. congruence. Qed. Lemma Qabs_pos : forall x, 0 <= x -> Qabs x == x. Proof. intros x H. apply Qabs_case. reflexivity. intros H0. setoid_replace x with 0. reflexivity. apply Qle_antisym; assumption. Qed. Lemma Qabs_neg : forall x, x <= 0 -> Qabs x == - x. Proof. intros x H. apply Qabs_case. intros H0. setoid_replace x with 0. reflexivity. apply Qle_antisym; assumption. reflexivity. Qed. Lemma Qabs_nonneg : forall x, 0 <= (Qabs x). intros x. apply Qabs_case. auto. apply (Qopp_le_compat x 0). Qed. Lemma Zabs_Qabs : forall n d, (Z.abs n#d)==Qabs (n#d). Proof. intros [|n|n]; reflexivity. Qed. Lemma Qabs_opp : forall x, Qabs (-x) == Qabs x. Proof. intros x. do 2 apply Qabs_case; try (intros; ring); (intros H0 H1; setoid_replace x with 0;[reflexivity|]; apply Qle_antisym);try assumption; rewrite Qle_minus_iff in *; ring_simplify; ring_simplify in H1; assumption. Qed. Lemma Qabs_triangle : forall x y, Qabs (x+y) <= Qabs x + Qabs y. Proof. intros [xn xd] [yn yd]. unfold Qplus. unfold Qle. simpl. apply Z.mul_le_mono_nonneg_r;auto with *. change (' yd)%Z with (Z.abs (' yd)). change (' xd)%Z with (Z.abs (' xd)). repeat rewrite <- Z.abs_mul. apply Z.abs_triangle. Qed. Lemma Qabs_Qmult : forall a b, Qabs (a*b) == (Qabs a)*(Qabs b). Proof. intros [an ad] [bn bd]. simpl. rewrite Z.abs_mul. reflexivity. Qed. Lemma Qabs_Qminus x y: Qabs (x - y) = Qabs (y - x). Proof. unfold Qminus, Qopp. simpl. rewrite Pos.mul_comm, <- Z.abs_opp. do 2 f_equal. ring. Qed. Lemma Qle_Qabs : forall a, a <= Qabs a. Proof. intros a. apply Qabs_case; auto with *. intros H. apply Qle_trans with 0; try assumption. change 0 with (-0). apply Qopp_le_compat. assumption. Qed. Lemma Qabs_triangle_reverse : forall x y, Qabs x - Qabs y <= Qabs (x - y). Proof. intros x y. rewrite Qle_minus_iff. setoid_replace (Qabs (x - y) + - (Qabs x - Qabs y)) with ((Qabs (x - y) + Qabs y) + - Qabs x) by ring. rewrite <- Qle_minus_iff. setoid_replace (Qabs x) with (Qabs (x-y+y)). apply Qabs_triangle. apply Qabs_wd. ring. Qed. Lemma Qabs_Qle_condition x y: Qabs x <= y <-> -y <= x <= y. Proof. split. split. rewrite <- (Qopp_opp x). apply Qopp_le_compat. apply Qle_trans with (Qabs (-x)). apply Qle_Qabs. now rewrite Qabs_opp. apply Qle_trans with (Qabs x); auto using Qle_Qabs. intros (H,H'). apply Qabs_case; trivial. intros. rewrite <- (Qopp_opp y). now apply Qopp_le_compat. Qed. Lemma Qabs_diff_Qle_condition x y r: Qabs (x - y) <= r <-> x - r <= y <= x + r. Proof. intros. unfold Qminus. rewrite Qabs_Qle_condition. rewrite <- (Qplus_le_l (-r) (x+-y) (y+r)). rewrite <- (Qplus_le_l (x+-y) r (y-r)). setoid_replace (-r + (y + r)) with y by ring. setoid_replace (r + (y - r)) with y by ring. setoid_replace (x + - y + (y + r)) with (x + r) by ring. setoid_replace (x + - y + (y - r)) with (x - r) by ring. intuition. Qed. coq-8.4pl2/theories/QArith/QOrderedType.v0000640000175000001440000000340012010532755017374 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Qeq==>iff) Qlt. Proof. auto with *. Qed. Definition le_lteq := Qle_lteq. Definition compare_spec := Qcompare_spec. End Q_as_OT. (** * An [order] tactic for [Q] numbers *) Module QOrder := OTF_to_OrderTac Q_as_OT. Ltac q_order := QOrder.order. (** Note that [q_order] is domain-agnostic: it will not prove [1<=2] or [x<=x+x], but rather things like [x<=y -> y<=x -> x==y]. *) coq-8.4pl2/theories/QArith/Qreals.v0000640000175000001440000001521312010532755016261 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0%R. intros; apply not_O_IZR; auto with qarith. Qed. Hint Resolve IZR_nz Rmult_integral_contrapositive. Lemma eqR_Qeq : forall x y : Q, Q2R x = Q2R y -> x==y. Proof. unfold Qeq, Q2R; intros (x1, x2) (y1, y2); unfold Qnum, Qden; intros. apply eq_IZR. do 2 rewrite mult_IZR. set (X1 := IZR x1) in *; assert (X2nz := IZR_nz x2); set (X2 := IZR (Zpos x2)) in *. set (Y1 := IZR y1) in *; assert (Y2nz := IZR_nz y2); set (Y2 := IZR (Zpos y2)) in *. assert ((X2 * X1 * / X2)%R = (X2 * (Y1 * / Y2))%R). rewrite <- H; field; auto. rewrite Rinv_r_simpl_m in H0; auto; rewrite H0; field; auto. Qed. Lemma Qeq_eqR : forall x y : Q, x==y -> Q2R x = Q2R y. Proof. unfold Qeq, Q2R; intros (x1, x2) (y1, y2); unfold Qnum, Qden; intros. set (X1 := IZR x1) in *; assert (X2nz := IZR_nz x2); set (X2 := IZR (Zpos x2)) in *. set (Y1 := IZR y1) in *; assert (Y2nz := IZR_nz y2); set (Y2 := IZR (Zpos y2)) in *. assert ((X1 * Y2)%R = (Y1 * X2)%R). unfold X1, X2, Y1, Y2; do 2 rewrite <- mult_IZR. apply IZR_eq; auto. clear H. field_simplify_eq; auto. ring_simplify X1 Y2 (Y2 * X1)%R. rewrite H0; ring. Qed. Lemma Rle_Qle : forall x y : Q, (Q2R x <= Q2R y)%R -> x<=y. Proof. unfold Qle, Q2R; intros (x1, x2) (y1, y2); unfold Qnum, Qden; intros. apply le_IZR. do 2 rewrite mult_IZR. set (X1 := IZR x1) in *; assert (X2nz := IZR_nz x2); set (X2 := IZR (Zpos x2)) in *. set (Y1 := IZR y1) in *; assert (Y2nz := IZR_nz y2); set (Y2 := IZR (Zpos y2)) in *. replace (X1 * Y2)%R with (X1 * / X2 * (X2 * Y2))%R; try (field; auto). replace (Y1 * X2)%R with (Y1 * / Y2 * (X2 * Y2))%R; try (field; auto). apply Rmult_le_compat_r; auto. apply Rmult_le_pos. unfold X2; replace 0%R with (IZR 0); auto; apply IZR_le; auto with zarith. unfold Y2; replace 0%R with (IZR 0); auto; apply IZR_le; auto with zarith. Qed. Lemma Qle_Rle : forall x y : Q, x<=y -> (Q2R x <= Q2R y)%R. Proof. unfold Qle, Q2R; intros (x1, x2) (y1, y2); unfold Qnum, Qden; intros. set (X1 := IZR x1) in *; assert (X2nz := IZR_nz x2); set (X2 := IZR (Zpos x2)) in *. set (Y1 := IZR y1) in *; assert (Y2nz := IZR_nz y2); set (Y2 := IZR (Zpos y2)) in *. assert (X1 * Y2 <= Y1 * X2)%R. unfold X1, X2, Y1, Y2; do 2 rewrite <- mult_IZR. apply IZR_le; auto. clear H. replace (X1 * / X2)%R with (X1 * Y2 * (/ X2 * / Y2))%R; try (field; auto). replace (Y1 * / Y2)%R with (Y1 * X2 * (/ X2 * / Y2))%R; try (field; auto). apply Rmult_le_compat_r; auto. apply Rmult_le_pos; apply Rlt_le; apply Rinv_0_lt_compat. unfold X2; replace 0%R with (IZR 0); auto; apply IZR_lt; red; auto with zarith. unfold Y2; replace 0%R with (IZR 0); auto; apply IZR_lt; red; auto with zarith. Qed. Lemma Rlt_Qlt : forall x y : Q, (Q2R x < Q2R y)%R -> x (Q2R x < Q2R y)%R. Proof. unfold Qlt, Q2R; intros (x1, x2) (y1, y2); unfold Qnum, Qden; intros. set (X1 := IZR x1) in *; assert (X2nz := IZR_nz x2); set (X2 := IZR (Zpos x2)) in *. set (Y1 := IZR y1) in *; assert (Y2nz := IZR_nz y2); set (Y2 := IZR (Zpos y2)) in *. assert (X1 * Y2 < Y1 * X2)%R. unfold X1, X2, Y1, Y2; do 2 rewrite <- mult_IZR. apply IZR_lt; auto. clear H. replace (X1 * / X2)%R with (X1 * Y2 * (/ X2 * / Y2))%R; try (field; auto). replace (Y1 * / Y2)%R with (Y1 * X2 * (/ X2 * / Y2))%R; try (field; auto). apply Rmult_lt_compat_r; auto. apply Rmult_lt_0_compat; apply Rinv_0_lt_compat. unfold X2; replace 0%R with (IZR 0); auto; apply IZR_lt; red; auto with zarith. unfold Y2; replace 0%R with (IZR 0); auto; apply IZR_lt; red; auto with zarith. Qed. Lemma Q2R_plus : forall x y : Q, Q2R (x+y) = (Q2R x + Q2R y)%R. Proof. unfold Qplus, Qeq, Q2R; intros (x1, x2) (y1, y2); unfold Qden, Qnum. simpl_mult. rewrite plus_IZR. do 3 rewrite mult_IZR. field; auto. Qed. Lemma Q2R_mult : forall x y : Q, Q2R (x*y) = (Q2R x * Q2R y)%R. Proof. unfold Qmult, Qeq, Q2R; intros (x1, x2) (y1, y2); unfold Qden, Qnum. simpl_mult. do 2 rewrite mult_IZR. field; auto. Qed. Lemma Q2R_opp : forall x : Q, Q2R (- x) = (- Q2R x)%R. Proof. unfold Qopp, Qeq, Q2R; intros (x1, x2); unfold Qden, Qnum. rewrite Ropp_Ropp_IZR. field; auto. Qed. Lemma Q2R_minus : forall x y : Q, Q2R (x-y) = (Q2R x - Q2R y)%R. unfold Qminus; intros; rewrite Q2R_plus; rewrite Q2R_opp; auto. Qed. Lemma Q2R_inv : forall x : Q, ~ x==0 -> Q2R (/x) = (/ Q2R x)%R. Proof. unfold Qinv, Q2R, Qeq; intros (x1, x2); unfold Qden, Qnum. case x1. simpl; intros; elim H; trivial. intros; field; auto. intros; change (IZR (Zneg x2)) with (- IZR (' x2))%R; change (IZR (Zneg p)) with (- IZR (' p))%R; field; (*auto 8 with real.*) repeat split; auto; auto with real. Qed. Lemma Q2R_div : forall x y : Q, ~ y==0 -> Q2R (x/y) = (Q2R x / Q2R y)%R. Proof. unfold Qdiv, Rdiv. intros; rewrite Q2R_mult. rewrite Q2R_inv; auto. Qed. Hint Rewrite Q2R_plus Q2R_mult Q2R_opp Q2R_minus Q2R_inv Q2R_div : q2r_simpl. Section LegacyQField. (** In the past, the field tactic was not able to deal with setoid datatypes, so translating from Q to R and applying field on reals was a workaround. See now Qfield for a direct field tactic on Q. *) Ltac QField := apply eqR_Qeq; autorewrite with q2r_simpl; try field; auto. (** Examples of use: *) Let ex1 : forall x y z : Q, (x+y)*z == (x*z)+(y*z). intros; QField. Qed. Let ex2 : forall x y : Q, ~ y==0 -> (x/y)*y == x. intros; QField. intro; apply H; apply eqR_Qeq. rewrite H0; unfold Q2R; simpl; field; auto with real. Qed. End LegacyQField. coq-8.4pl2/theories/QArith/QArith.v0000640000175000001440000000113312010532755016216 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* - q < - p. Proof. intros (a1,a2) (b1,b2); unfold Qle, Qlt; simpl. rewrite !Z.mul_opp_l; omega. Qed. Hint Resolve Qopp_lt_compat : qarith. (************) Local Coercion inject_Z : Z >-> Q. Definition Qfloor (x:Q) := let (n,d) := x in Z.div n (Zpos d). Definition Qceiling (x:Q) := (-(Qfloor (-x)))%Z. Lemma Qfloor_Z : forall z:Z, Qfloor z = z. Proof. intros z. simpl. auto with *. Qed. Lemma Qceiling_Z : forall z:Z, Qceiling z = z. Proof. intros z. unfold Qceiling. simpl. rewrite Zdiv_1_r. auto with *. Qed. Lemma Qfloor_le : forall x, Qfloor x <= x. Proof. intros [n d]. simpl. unfold Qle. simpl. replace (n*1)%Z with n by ring. rewrite Z.mul_comm. apply Z_mult_div_ge. auto with *. Qed. Hint Resolve Qfloor_le : qarith. Lemma Qle_ceiling : forall x, x <= Qceiling x. Proof. intros x. apply Qle_trans with (- - x). rewrite Qopp_involutive. auto with *. change (Qceiling x:Q) with (-(Qfloor(-x))). auto with *. Qed. Hint Resolve Qle_ceiling : qarith. Lemma Qle_floor_ceiling : forall x, Qfloor x <= Qceiling x. Proof. eauto with qarith. Qed. Lemma Qlt_floor : forall x, x < (Qfloor x+1)%Z. Proof. intros [n d]. simpl. unfold Qlt. simpl. replace (n*1)%Z with n by ring. ring_simplify. replace (n / ' d * ' d + ' d)%Z with (('d * (n / 'd) + n mod 'd) + 'd - n mod 'd)%Z by ring. rewrite <- Z_div_mod_eq; auto with*. rewrite <- Z.lt_add_lt_sub_r. destruct (Z_mod_lt n ('d)); auto with *. Qed. Hint Resolve Qlt_floor : qarith. Lemma Qceiling_lt : forall x, (Qceiling x-1)%Z < x. Proof. intros x. unfold Qceiling. replace (- Qfloor (- x) - 1)%Z with (-(Qfloor (-x) + 1))%Z by ring. change ((- (Qfloor (- x) + 1))%Z:Q) with (-(Qfloor (- x) + 1)%Z). apply Qlt_le_trans with (- - x); auto with *. rewrite Qopp_involutive. auto with *. Qed. Hint Resolve Qceiling_lt : qarith. Lemma Qfloor_resp_le : forall x y, x <= y -> (Qfloor x <= Qfloor y)%Z. Proof. intros [xn xd] [yn yd] Hxy. unfold Qle in *. simpl in *. rewrite <- (Zdiv_mult_cancel_r xn ('xd) ('yd)); auto with *. rewrite <- (Zdiv_mult_cancel_r yn ('yd) ('xd)); auto with *. rewrite (Z.mul_comm ('yd) ('xd)). apply Z_div_le; auto with *. Qed. Hint Resolve Qfloor_resp_le : qarith. Lemma Qceiling_resp_le : forall x y, x <= y -> (Qceiling x <= Qceiling y)%Z. Proof. intros x y Hxy. unfold Qceiling. cut (Qfloor (-y) <= Qfloor (-x))%Z; auto with *. Qed. Hint Resolve Qceiling_resp_le : qarith. Add Morphism Qfloor with signature Qeq ==> eq as Qfloor_comp. Proof. intros x y H. apply Z.le_antisymm. auto with *. symmetry in H; auto with *. Qed. Add Morphism Qceiling with signature Qeq ==> eq as Qceiling_comp. Proof. intros x y H. apply Z.le_antisymm. auto with *. symmetry in H; auto with *. Qed. Lemma Zdiv_Qdiv (n m: Z): (n / m)%Z = Qfloor (n / m). Proof. unfold Qfloor. intros. simpl. destruct m as [?|?|p]; simpl. now rewrite Zdiv_0_r, Z.mul_0_r. now rewrite Z.mul_1_r. rewrite <- Z.opp_eq_mul_m1. rewrite <- (Z.opp_involutive (Zpos p)). now rewrite Zdiv_opp_opp. Qed. coq-8.4pl2/theories/QArith/Qminmax.v0000640000175000001440000000410712010532755016444 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string -> string. Delimit Scope string_scope with string. Bind Scope string_scope with string. Local Open Scope string_scope. (** Equality is decidable *) Definition string_dec : forall s1 s2 : string, {s1 = s2} + {s1 <> s2}. decide equality; apply ascii_dec. Defined. (** *** Concatenation of strings *) Reserved Notation "x ++ y" (right associativity, at level 60). Fixpoint append (s1 s2 : string) : string := match s1 with | EmptyString => s2 | String c s1' => String c (s1' ++ s2) end where "s1 ++ s2" := (append s1 s2) : string_scope. (******************************) (** Length *) (******************************) Fixpoint length (s : string) : nat := match s with | EmptyString => 0 | String c s' => S (length s') end. (******************************) (** Nth character of a string *) (******************************) Fixpoint get (n : nat) (s : string) {struct s} : option ascii := match s with | EmptyString => None | String c s' => match n with | O => Some c | S n' => get n' s' end end. (** Two lists that are identical through get are syntactically equal *) Theorem get_correct : forall s1 s2 : string, (forall n : nat, get n s1 = get n s2) <-> s1 = s2. Proof. intros s1; elim s1; simpl. intros s2; case s2; simpl; split; auto. intros H; generalize (H 0); intros H1; inversion H1. intros; discriminate. intros a s1' Rec s2; case s2; simpl; split; auto. intros H; generalize (H 0); intros H1; inversion H1. intros; discriminate. intros H; generalize (H 0); simpl; intros H1; inversion H1. case (Rec s). intros H0; rewrite H0; auto. intros n; exact (H (S n)). intros H; injection H; intros H1 H2 n; case n; auto. rewrite H2; trivial. rewrite H1; auto. Qed. (** The first elements of [s1 ++ s2] are the ones of [s1] *) Theorem append_correct1 : forall (s1 s2 : string) (n : nat), n < length s1 -> get n s1 = get n (s1 ++ s2). Proof. intros s1; elim s1; simpl; auto. intros s2 n H; inversion H. intros a s1' Rec s2 n; case n; simpl; auto. intros n0 H; apply Rec; auto. apply lt_S_n; auto. Qed. (** The last elements of [s1 ++ s2] are the ones of [s2] *) Theorem append_correct2 : forall (s1 s2 : string) (n : nat), get n s2 = get (n + length s1) (s1 ++ s2). Proof. intros s1; elim s1; simpl; auto. intros s2 n; rewrite plus_comm; simpl; auto. intros a s1' Rec s2 n; case n; simpl; auto. generalize (Rec s2 0); simpl; auto. intros. rewrite <- Plus.plus_Snm_nSm; auto. Qed. (** *** Substrings *) (** [substring n m s] returns the substring of [s] that starts at position [n] and of length [m]; if this does not make sense it returns [""] *) Fixpoint substring (n m : nat) (s : string) : string := match n, m, s with | 0, 0, _ => EmptyString | 0, S m', EmptyString => s | 0, S m', String c s' => String c (substring 0 m' s') | S n', _, EmptyString => s | S n', _, String c s' => substring n' m s' end. (** The substring is included in the initial string *) Theorem substring_correct1 : forall (s : string) (n m p : nat), p < m -> get p (substring n m s) = get (p + n) s. Proof. intros s; elim s; simpl; auto. intros n; case n; simpl; auto. intros m; case m; simpl; auto. intros a s' Rec; intros n; case n; simpl; auto. intros m; case m; simpl; auto. intros p H; inversion H. intros m' p; case p; simpl; auto. intros n0 H; apply Rec; simpl; auto. apply Lt.lt_S_n; auto. intros n' m p H; rewrite <- Plus.plus_Snm_nSm; simpl; auto. Qed. (** The substring has at most [m] elements *) Theorem substring_correct2 : forall (s : string) (n m p : nat), m <= p -> get p (substring n m s) = None. Proof. intros s; elim s; simpl; auto. intros n; case n; simpl; auto. intros m; case m; simpl; auto. intros a s' Rec; intros n; case n; simpl; auto. intros m; case m; simpl; auto. intros m' p; case p; simpl; auto. intros H; inversion H. intros n0 H; apply Rec; simpl; auto. apply Le.le_S_n; auto. Qed. (** *** Test functions *) (** Test if [s1] is a prefix of [s2] *) Fixpoint prefix (s1 s2 : string) {struct s2} : bool := match s1 with | EmptyString => true | String a s1' => match s2 with | EmptyString => false | String b s2' => match ascii_dec a b with | left _ => prefix s1' s2' | right _ => false end end end. (** If [s1] is a prefix of [s2], it is the [substring] of length [length s1] starting at position [O] of [s2] *) Theorem prefix_correct : forall s1 s2 : string, prefix s1 s2 = true <-> substring 0 (length s1) s2 = s1. Proof. intros s1; elim s1; simpl; auto. intros s2; case s2; simpl; split; auto. intros a s1' Rec s2; case s2; simpl; auto. split; intros; discriminate. intros b s2'; case (ascii_dec a b); simpl; auto. intros e; case (Rec s2'); intros H1 H2; split; intros H3; auto. rewrite e; rewrite H1; auto. apply H2; injection H3; auto. intros n; split; intros; try discriminate. case n; injection H; auto. Qed. (** Test if, starting at position [n], [s1] occurs in [s2]; if so it returns the position *) Fixpoint index (n : nat) (s1 s2 : string) : option nat := match s2, n with | EmptyString, 0 => match s1 with | EmptyString => Some 0 | String a s1' => None end | EmptyString, S n' => None | String b s2', 0 => if prefix s1 s2 then Some 0 else match index 0 s1 s2' with | Some n => Some (S n) | None => None end | String b s2', S n' => match index n' s1 s2' with | Some n => Some (S n) | None => None end end. (* Dirty trick to avoid locally that prefix reduces itself *) Opaque prefix. (** If the result of [index] is [Some m], [s1] in [s2] at position [m] *) Theorem index_correct1 : forall (n m : nat) (s1 s2 : string), index n s1 s2 = Some m -> substring m (length s1) s2 = s1. Proof. intros n m s1 s2; generalize n m s1; clear n m s1; elim s2; simpl; auto. intros n; case n; simpl; auto. intros m s1; case s1; simpl; auto. intros H; injection H; intros H1; rewrite <- H1; auto. intros; discriminate. intros; discriminate. intros b s2' Rec n m s1. case n; simpl; auto. generalize (prefix_correct s1 (String b s2')); case (prefix s1 (String b s2')). intros H0 H; injection H; intros H1; rewrite <- H1; auto. case H0; simpl; auto. case m; simpl; auto. case (index 0 s1 s2'); intros; discriminate. intros m'; generalize (Rec 0 m' s1); case (index 0 s1 s2'); auto. intros x H H0 H1; apply H; injection H1; auto. intros; discriminate. intros n'; case m; simpl; auto. case (index n' s1 s2'); intros; discriminate. intros m'; generalize (Rec n' m' s1); case (index n' s1 s2'); auto. intros x H H1; apply H; injection H1; auto. intros; discriminate. Qed. (** If the result of [index] is [Some m], [s1] does not occur in [s2] before [m] *) Theorem index_correct2 : forall (n m : nat) (s1 s2 : string), index n s1 s2 = Some m -> forall p : nat, n <= p -> p < m -> substring p (length s1) s2 <> s1. Proof. intros n m s1 s2; generalize n m s1; clear n m s1; elim s2; simpl; auto. intros n; case n; simpl; auto. intros m s1; case s1; simpl; auto. intros H; injection H; intros H1; rewrite <- H1. intros p H0 H2; inversion H2. intros; discriminate. intros; discriminate. intros b s2' Rec n m s1. case n; simpl; auto. generalize (prefix_correct s1 (String b s2')); case (prefix s1 (String b s2')). intros H0 H; injection H; intros H1; rewrite <- H1; auto. intros p H2 H3; inversion H3. case m; simpl; auto. case (index 0 s1 s2'); intros; discriminate. intros m'; generalize (Rec 0 m' s1); case (index 0 s1 s2'); auto. intros x H H0 H1 p; try case p; simpl; auto. intros H2 H3; red; intros H4; case H0. intros H5 H6; absurd (false = true); auto with bool. intros n0 H2 H3; apply H; auto. injection H1; auto. apply Le.le_O_n. apply Lt.lt_S_n; auto. intros; discriminate. intros n'; case m; simpl; auto. case (index n' s1 s2'); intros; discriminate. intros m'; generalize (Rec n' m' s1); case (index n' s1 s2'); auto. intros x H H0 p; case p; simpl; auto. intros H1; inversion H1; auto. intros n0 H1 H2; apply H; auto. injection H0; auto. apply Le.le_S_n; auto. apply Lt.lt_S_n; auto. intros; discriminate. Qed. (** If the result of [index] is [None], [s1] does not occur in [s2] after [n] *) Theorem index_correct3 : forall (n m : nat) (s1 s2 : string), index n s1 s2 = None -> s1 <> EmptyString -> n <= m -> substring m (length s1) s2 <> s1. Proof. intros n m s1 s2; generalize n m s1; clear n m s1; elim s2; simpl; auto. intros n; case n; simpl; auto. intros m s1; case s1; simpl; auto. case m; intros; red; intros; discriminate. intros n' m; case m; auto. intros s1; case s1; simpl; auto. intros b s2' Rec n m s1. case n; simpl; auto. generalize (prefix_correct s1 (String b s2')); case (prefix s1 (String b s2')). intros; discriminate. case m; simpl; auto with bool. case s1; simpl; auto. intros a s H H0 H1 H2; red; intros H3; case H. intros H4 H5; absurd (false = true); auto with bool. case s1; simpl; auto. intros a s n0 H H0 H1 H2; change (substring n0 (length (String a s)) s2' <> String a s); apply (Rec 0); auto. generalize H0; case (index 0 (String a s) s2'); simpl; auto; intros; discriminate. apply Le.le_O_n. intros n'; case m; simpl; auto. intros H H0 H1; inversion H1. intros n0 H H0 H1; apply (Rec n'); auto. generalize H; case (index n' s1 s2'); simpl; auto; intros; discriminate. apply Le.le_S_n; auto. Qed. (* Back to normal for prefix *) Transparent prefix. (** If we are searching for the [Empty] string and the answer is no this means that [n] is greater than the size of [s] *) Theorem index_correct4 : forall (n : nat) (s : string), index n EmptyString s = None -> length s < n. Proof. intros n s; generalize n; clear n; elim s; simpl; auto. intros n; case n; simpl; auto. intros; discriminate. intros; apply Lt.lt_O_Sn. intros a s' H n; case n; simpl; auto. intros; discriminate. intros n'; generalize (H n'); case (index n' EmptyString s'); simpl; auto. intros; discriminate. intros H0 H1; apply Lt.lt_n_S; auto. Qed. (** Same as [index] but with no optional type, we return [0] when it does not occur *) Definition findex n s1 s2 := match index n s1 s2 with | Some n => n | None => 0 end. (** *** Concrete syntax *) (** The concrete syntax for strings in scope string_scope follows the Coq convention for strings: all ascii characters of code less than 128 are litteral to the exception of the character `double quote' which must be doubled. Strings that involve ascii characters of code >= 128 which are not part of a valid utf8 sequence of characters are not representable using the Coq string notation (use explicitly the String constructor with the ascii codes of the characters). *) Example HelloWorld := " ""Hello world!"" ". coq-8.4pl2/theories/Strings/Ascii.v0000640000175000001440000001030112010532755016314 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Ascii c a1 a2 a3 a4 a5 a6 a7 end. (** Definition of a decidable function that is effective *) Definition ascii_dec : forall a b : ascii, {a = b} + {a <> b}. decide equality; apply bool_dec. Defined. (** * Conversion between natural numbers modulo 256 and ascii characters *) (** Auxillary function that turns a positive into an ascii by looking at the last 8 bits, ie z mod 2^8 *) Definition ascii_of_pos : positive -> ascii := let loop := fix loop n p := match n with | O => zero | S n' => match p with | xH => one | xI p' => shift true (loop n' p') | xO p' => shift false (loop n' p') end end in loop 8. (** Conversion from [N] to [ascii] *) Definition ascii_of_N (n : N) := match n with | N0 => zero | Npos p => ascii_of_pos p end. (** Same for [nat] *) Definition ascii_of_nat (a : nat) := ascii_of_N (N.of_nat a). (** The opposite functions *) Local Open Scope list_scope. Fixpoint N_of_digits (l:list bool) : N := match l with | nil => 0 | b :: l' => (if b then 1 else 0) + 2*(N_of_digits l') end%N. Definition N_of_ascii (a : ascii) : N := let (a0,a1,a2,a3,a4,a5,a6,a7) := a in N_of_digits (a0::a1::a2::a3::a4::a5::a6::a7::nil). Definition nat_of_ascii (a : ascii) : nat := N.to_nat (N_of_ascii a). (** Proofs that we have indeed opposite function (below 256) *) Theorem ascii_N_embedding : forall a : ascii, ascii_of_N (N_of_ascii a) = a. Proof. destruct a as [[|][|][|][|][|][|][|][|]]; vm_compute; reflexivity. Qed. Theorem N_ascii_embedding : forall n:N, (n < 256)%N -> N_of_ascii (ascii_of_N n) = n. Proof. destruct n. reflexivity. do 8 (destruct p; [ | | intros; vm_compute; reflexivity ]); intro H; vm_compute in H; destruct p; discriminate. Qed. Theorem ascii_nat_embedding : forall a : ascii, ascii_of_nat (nat_of_ascii a) = a. Proof. destruct a as [[|][|][|][|][|][|][|][|]]; compute; reflexivity. Qed. Theorem nat_ascii_embedding : forall n : nat, n < 256 -> nat_of_ascii (ascii_of_nat n) = n. Proof. intros. unfold nat_of_ascii, ascii_of_nat. rewrite N_ascii_embedding. apply Nat2N.id. unfold N.lt. change 256%N with (N.of_nat 256). rewrite <- Nat2N.inj_compare. rewrite <- Compare_dec.nat_compare_lt. auto. Qed. (** * Concrete syntax *) (** Ascii characters can be represented in scope char_scope as follows: - ["c"] represents itself if c is a character of code < 128, - [""""] is an exception: it represents the ascii character 34 (double quote), - ["nnn"] represents the ascii character of decimal code nnn. For instance, both ["065"] and ["A"] denote the character `uppercase A', and both ["034"] and [""""] denote the character `double quote'. Notice that the ascii characters of code >= 128 do not denote stand-alone utf8 characters so that only the notation "nnn" is available for them (unless your terminal is able to represent them, which is typically not the case in coqide). *) Local Open Scope char_scope. Example Space := " ". Example DoubleQuote := """". Example Beep := "007". coq-8.4pl2/theories/Bool/0000750000175000001440000000000012127276544014355 5ustar notinuserscoq-8.4pl2/theories/Bool/Bool.v0000640000175000001440000004271212010532755015434 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* True | false => False end. (*******************) (** * Decidability *) (*******************) Lemma bool_dec : forall b1 b2 : bool, {b1 = b2} + {b1 <> b2}. Proof. decide equality. Defined. (*********************) (** * Discrimination *) (*********************) Lemma diff_true_false : true <> false. Proof. discriminate. Qed. Hint Resolve diff_true_false : bool v62. Lemma diff_false_true : false <> true. Proof. discriminate. Qed. Hint Resolve diff_false_true : bool v62. Hint Extern 1 (false <> true) => exact diff_false_true. Lemma eq_true_false_abs : forall b:bool, b = true -> b = false -> False. Proof. destr_bool. Qed. Lemma not_true_is_false : forall b:bool, b <> true -> b = false. Proof. destr_bool; intuition. Qed. Lemma not_false_is_true : forall b:bool, b <> false -> b = true. Proof. destr_bool; intuition. Qed. Lemma not_true_iff_false : forall b, b <> true <-> b = false. Proof. destr_bool; intuition. Qed. Lemma not_false_iff_true : forall b, b <> false <-> b = true. Proof. destr_bool; intuition. Qed. (**********************) (** * Order on booleans *) (**********************) Definition leb (b1 b2:bool) := match b1 with | true => b2 = true | false => True end. Hint Unfold leb: bool v62. Lemma leb_implb : forall b1 b2, leb b1 b2 <-> implb b1 b2 = true. Proof. destr_bool; intuition. Qed. (* Infix "<=" := leb : bool_scope. *) (*************) (** * Equality *) (*************) Definition eqb (b1 b2:bool) : bool := match b1, b2 with | true, true => true | true, false => false | false, true => false | false, false => true end. Lemma eqb_subst : forall (P:bool -> Prop) (b1 b2:bool), eqb b1 b2 = true -> P b1 -> P b2. Proof. destr_bool. Qed. Lemma eqb_reflx : forall b:bool, eqb b b = true. Proof. destr_bool. Qed. Lemma eqb_prop : forall a b:bool, eqb a b = true -> a = b. Proof. destr_bool. Qed. Lemma eqb_true_iff : forall a b:bool, eqb a b = true <-> a = b. Proof. destr_bool; intuition. Qed. Lemma eqb_false_iff : forall a b:bool, eqb a b = false <-> a <> b. Proof. destr_bool; intuition. Qed. (************************) (** * A synonym of [if] on [bool] *) (************************) Definition ifb (b1 b2 b3:bool) : bool := match b1 with | true => b2 | false => b3 end. Open Scope bool_scope. (****************************) (** * De Morgan laws *) (****************************) Lemma negb_orb : forall b1 b2:bool, negb (b1 || b2) = negb b1 && negb b2. Proof. destr_bool. Qed. Lemma negb_andb : forall b1 b2:bool, negb (b1 && b2) = negb b1 || negb b2. Proof. destr_bool. Qed. (********************************) (** * Properties of [negb] *) (********************************) Lemma negb_involutive : forall b:bool, negb (negb b) = b. Proof. destr_bool. Qed. Lemma negb_involutive_reverse : forall b:bool, b = negb (negb b). Proof. destr_bool. Qed. Notation negb_elim := negb_involutive (only parsing). Notation negb_intro := negb_involutive_reverse (only parsing). Lemma negb_sym : forall b b':bool, b' = negb b -> b = negb b'. Proof. destr_bool. Qed. Lemma no_fixpoint_negb : forall b:bool, negb b <> b. Proof. destr_bool. Qed. Lemma eqb_negb1 : forall b:bool, eqb (negb b) b = false. Proof. destr_bool. Qed. Lemma eqb_negb2 : forall b:bool, eqb b (negb b) = false. Proof. destr_bool. Qed. Lemma if_negb : forall (A:Type) (b:bool) (x y:A), (if negb b then x else y) = (if b then y else x). Proof. destr_bool. Qed. Lemma negb_true_iff : forall b, negb b = true <-> b = false. Proof. destr_bool; intuition. Qed. Lemma negb_false_iff : forall b, negb b = false <-> b = true. Proof. destr_bool; intuition. Qed. (********************************) (** * Properties of [orb] *) (********************************) Lemma orb_true_iff : forall b1 b2, b1 || b2 = true <-> b1 = true \/ b2 = true. Proof. destr_bool; intuition. Qed. Lemma orb_false_iff : forall b1 b2, b1 || b2 = false <-> b1 = false /\ b2 = false. Proof. destr_bool; intuition. Qed. Lemma orb_true_elim : forall b1 b2:bool, b1 || b2 = true -> {b1 = true} + {b2 = true}. Proof. destruct b1; simpl; auto. Defined. Lemma orb_prop : forall a b:bool, a || b = true -> a = true \/ b = true. Proof. intros; apply orb_true_iff; trivial. Qed. Lemma orb_true_intro : forall b1 b2:bool, b1 = true \/ b2 = true -> b1 || b2 = true. Proof. intros; apply orb_true_iff; trivial. Qed. Hint Resolve orb_true_intro: bool v62. Lemma orb_false_intro : forall b1 b2:bool, b1 = false -> b2 = false -> b1 || b2 = false. Proof. intros. subst. reflexivity. Qed. Hint Resolve orb_false_intro: bool v62. Lemma orb_false_elim : forall b1 b2:bool, b1 || b2 = false -> b1 = false /\ b2 = false. Proof. intros. apply orb_false_iff; trivial. Qed. Lemma orb_diag : forall b, b || b = b. Proof. destr_bool. Qed. (** [true] is a zero for [orb] *) Lemma orb_true_r : forall b:bool, b || true = true. Proof. destr_bool. Qed. Hint Resolve orb_true_r: bool v62. Lemma orb_true_l : forall b:bool, true || b = true. Proof. reflexivity. Qed. Notation orb_b_true := orb_true_r (only parsing). Notation orb_true_b := orb_true_l (only parsing). (** [false] is neutral for [orb] *) Lemma orb_false_r : forall b:bool, b || false = b. Proof. destr_bool. Qed. Hint Resolve orb_false_r: bool v62. Lemma orb_false_l : forall b:bool, false || b = b. Proof. destr_bool. Qed. Hint Resolve orb_false_l: bool v62. Notation orb_b_false := orb_false_r (only parsing). Notation orb_false_b := orb_false_l (only parsing). (** Complementation *) Lemma orb_negb_r : forall b:bool, b || negb b = true. Proof. destr_bool. Qed. Hint Resolve orb_negb_r: bool v62. Notation orb_neg_b := orb_negb_r (only parsing). (** Commutativity *) Lemma orb_comm : forall b1 b2:bool, b1 || b2 = b2 || b1. Proof. destr_bool. Qed. (** Associativity *) Lemma orb_assoc : forall b1 b2 b3:bool, b1 || (b2 || b3) = b1 || b2 || b3. Proof. destr_bool. Qed. Hint Resolve orb_comm orb_assoc: bool v62. (*******************************) (** * Properties of [andb] *) (*******************************) Lemma andb_true_iff : forall b1 b2:bool, b1 && b2 = true <-> b1 = true /\ b2 = true. Proof. destr_bool; intuition. Qed. Lemma andb_false_iff : forall b1 b2:bool, b1 && b2 = false <-> b1 = false \/ b2 = false. Proof. destr_bool; intuition. Qed. Lemma andb_true_eq : forall a b:bool, true = a && b -> true = a /\ true = b. Proof. destr_bool. auto. Defined. Lemma andb_false_intro1 : forall b1 b2:bool, b1 = false -> b1 && b2 = false. Proof. intros. apply andb_false_iff. auto. Qed. Lemma andb_false_intro2 : forall b1 b2:bool, b2 = false -> b1 && b2 = false. Proof. intros. apply andb_false_iff. auto. Qed. (** [false] is a zero for [andb] *) Lemma andb_false_r : forall b:bool, b && false = false. Proof. destr_bool. Qed. Lemma andb_false_l : forall b:bool, false && b = false. Proof. reflexivity. Qed. Notation andb_b_false := andb_false_r (only parsing). Notation andb_false_b := andb_false_l (only parsing). Lemma andb_diag : forall b, b && b = b. Proof. destr_bool. Qed. (** [true] is neutral for [andb] *) Lemma andb_true_r : forall b:bool, b && true = b. Proof. destr_bool. Qed. Lemma andb_true_l : forall b:bool, true && b = b. Proof. reflexivity. Qed. Notation andb_b_true := andb_true_r (only parsing). Notation andb_true_b := andb_true_l (only parsing). Lemma andb_false_elim : forall b1 b2:bool, b1 && b2 = false -> {b1 = false} + {b2 = false}. Proof. destruct b1; simpl; auto. Defined. Hint Resolve andb_false_elim: bool v62. (** Complementation *) Lemma andb_negb_r : forall b:bool, b && negb b = false. Proof. destr_bool. Qed. Hint Resolve andb_negb_r: bool v62. Notation andb_neg_b := andb_negb_r (only parsing). (** Commutativity *) Lemma andb_comm : forall b1 b2:bool, b1 && b2 = b2 && b1. Proof. destr_bool. Qed. (** Associativity *) Lemma andb_assoc : forall b1 b2 b3:bool, b1 && (b2 && b3) = b1 && b2 && b3. Proof. destr_bool. Qed. Hint Resolve andb_comm andb_assoc: bool v62. (*******************************************) (** * Properties mixing [andb] and [orb] *) (*******************************************) (** Distributivity *) Lemma andb_orb_distrib_r : forall b1 b2 b3:bool, b1 && (b2 || b3) = b1 && b2 || b1 && b3. Proof. destr_bool. Qed. Lemma andb_orb_distrib_l : forall b1 b2 b3:bool, (b1 || b2) && b3 = b1 && b3 || b2 && b3. Proof. destr_bool. Qed. Lemma orb_andb_distrib_r : forall b1 b2 b3:bool, b1 || b2 && b3 = (b1 || b2) && (b1 || b3). Proof. destr_bool. Qed. Lemma orb_andb_distrib_l : forall b1 b2 b3:bool, b1 && b2 || b3 = (b1 || b3) && (b2 || b3). Proof. destr_bool. Qed. (* Compatibility *) Notation demorgan1 := andb_orb_distrib_r (only parsing). Notation demorgan2 := andb_orb_distrib_l (only parsing). Notation demorgan3 := orb_andb_distrib_r (only parsing). Notation demorgan4 := orb_andb_distrib_l (only parsing). (** Absorption *) Lemma absoption_andb : forall b1 b2:bool, b1 && (b1 || b2) = b1. Proof. destr_bool. Qed. Lemma absoption_orb : forall b1 b2:bool, b1 || b1 && b2 = b1. Proof. destr_bool. Qed. (*********************************) (** * Properties of [xorb] *) (*********************************) (** [false] is neutral for [xorb] *) Lemma xorb_false_r : forall b:bool, xorb b false = b. Proof. destr_bool. Qed. Lemma xorb_false_l : forall b:bool, xorb false b = b. Proof. destr_bool. Qed. Notation xorb_false := xorb_false_r (only parsing). Notation false_xorb := xorb_false_l (only parsing). (** [true] is "complementing" for [xorb] *) Lemma xorb_true_r : forall b:bool, xorb b true = negb b. Proof. reflexivity. Qed. Lemma xorb_true_l : forall b:bool, xorb true b = negb b. Proof. reflexivity. Qed. Notation xorb_true := xorb_true_r (only parsing). Notation true_xorb := xorb_true_l (only parsing). (** Nilpotency (alternatively: identity is a inverse for [xorb]) *) Lemma xorb_nilpotent : forall b:bool, xorb b b = false. Proof. destr_bool. Qed. (** Commutativity *) Lemma xorb_comm : forall b b':bool, xorb b b' = xorb b' b. Proof. destr_bool. Qed. (** Associativity *) Lemma xorb_assoc_reverse : forall b b' b'':bool, xorb (xorb b b') b'' = xorb b (xorb b' b''). Proof. destr_bool. Qed. Notation xorb_assoc := xorb_assoc_reverse (only parsing). (* Compatibility *) Lemma xorb_eq : forall b b':bool, xorb b b' = false -> b = b'. Proof. destr_bool. Qed. Lemma xorb_move_l_r_1 : forall b b' b'':bool, xorb b b' = b'' -> b' = xorb b b''. Proof. destr_bool. Qed. Lemma xorb_move_l_r_2 : forall b b' b'':bool, xorb b b' = b'' -> b = xorb b'' b'. Proof. destr_bool. Qed. Lemma xorb_move_r_l_1 : forall b b' b'':bool, b = xorb b' b'' -> xorb b' b = b''. Proof. destr_bool. Qed. Lemma xorb_move_r_l_2 : forall b b' b'':bool, b = xorb b' b'' -> xorb b b'' = b'. Proof. destr_bool. Qed. Lemma negb_xorb_l : forall b b', negb (xorb b b') = xorb (negb b) b'. Proof. destruct b,b'; trivial. Qed. Lemma negb_xorb_r : forall b b', negb (xorb b b') = xorb b (negb b'). Proof. destruct b,b'; trivial. Qed. Lemma xorb_negb_negb : forall b b', xorb (negb b) (negb b') = xorb b b'. Proof. destruct b,b'; trivial. Qed. (** Lemmas about the [b = true] embedding of [bool] to [Prop] *) Lemma eq_iff_eq_true : forall b1 b2, b1 = b2 <-> (b1 = true <-> b2 = true). Proof. destr_bool; intuition. Qed. Lemma eq_true_iff_eq : forall b1 b2, (b1 = true <-> b2 = true) -> b1 = b2. Proof. apply eq_iff_eq_true. Qed. Notation bool_1 := eq_true_iff_eq (only parsing). (* Compatibility *) Lemma eq_true_negb_classical : forall b:bool, negb b <> true -> b = true. Proof. destr_bool; intuition. Qed. Notation bool_3 := eq_true_negb_classical (only parsing). (* Compatibility *) Lemma eq_true_not_negb : forall b:bool, b <> true -> negb b = true. Proof. destr_bool; intuition. Qed. Notation bool_6 := eq_true_not_negb (only parsing). (* Compatibility *) Hint Resolve eq_true_not_negb : bool. (* An interesting lemma for auto but too strong to keep compatibility *) Lemma absurd_eq_bool : forall b b':bool, False -> b = b'. Proof. contradiction. Qed. (* A more specific one that preserves compatibility with old hint bool_3 *) Lemma absurd_eq_true : forall b, False -> b = true. Proof. contradiction. Qed. Hint Resolve absurd_eq_true. (* A specific instance of eq_trans that preserves compatibility with old hint bool_2 *) Lemma trans_eq_bool : forall x y z:bool, x = y -> y = z -> x = z. Proof. apply eq_trans. Qed. Hint Resolve trans_eq_bool. (*****************************************) (** * Reflection of [bool] into [Prop] *) (*****************************************) (** [Is_true] and equality *) Hint Unfold Is_true: bool. Lemma Is_true_eq_true : forall x:bool, Is_true x -> x = true. Proof. destr_bool; tauto. Qed. Lemma Is_true_eq_left : forall x:bool, x = true -> Is_true x. Proof. intros; subst; auto with bool. Qed. Lemma Is_true_eq_right : forall x:bool, true = x -> Is_true x. Proof. intros; subst; auto with bool. Qed. Notation Is_true_eq_true2 := Is_true_eq_right (only parsing). Hint Immediate Is_true_eq_right Is_true_eq_left: bool. Lemma eqb_refl : forall x:bool, Is_true (eqb x x). Proof. destr_bool. Qed. Lemma eqb_eq : forall x y:bool, Is_true (eqb x y) -> x = y. Proof. destr_bool; tauto. Qed. (** [Is_true] and connectives *) Lemma orb_prop_elim : forall a b:bool, Is_true (a || b) -> Is_true a \/ Is_true b. Proof. destr_bool; tauto. Qed. Notation orb_prop2 := orb_prop_elim (only parsing). Lemma orb_prop_intro : forall a b:bool, Is_true a \/ Is_true b -> Is_true (a || b). Proof. destr_bool; tauto. Qed. Lemma andb_prop_intro : forall b1 b2:bool, Is_true b1 /\ Is_true b2 -> Is_true (b1 && b2). Proof. destr_bool; tauto. Qed. Hint Resolve andb_prop_intro: bool v62. Notation andb_true_intro2 := (fun b1 b2 H1 H2 => andb_prop_intro b1 b2 (conj H1 H2)) (only parsing). Lemma andb_prop_elim : forall a b:bool, Is_true (a && b) -> Is_true a /\ Is_true b. Proof. destr_bool; auto. Qed. Hint Resolve andb_prop_elim: bool v62. Notation andb_prop2 := andb_prop_elim (only parsing). Lemma eq_bool_prop_intro : forall b1 b2, (Is_true b1 <-> Is_true b2) -> b1 = b2. Proof. destr_bool; tauto. Qed. Lemma eq_bool_prop_elim : forall b1 b2, b1 = b2 -> (Is_true b1 <-> Is_true b2). Proof. destr_bool; tauto. Qed. Lemma negb_prop_elim : forall b, Is_true (negb b) -> ~ Is_true b. Proof. destr_bool; tauto. Qed. Lemma negb_prop_intro : forall b, ~ Is_true b -> Is_true (negb b). Proof. destr_bool; tauto. Qed. Lemma negb_prop_classical : forall b, ~ Is_true (negb b) -> Is_true b. Proof. destr_bool; tauto. Qed. Lemma negb_prop_involutive : forall b, Is_true b -> ~ Is_true (negb b). Proof. destr_bool; tauto. Qed. (** Rewrite rules about andb, orb and if (used in romega) *) Lemma andb_if : forall (A:Type)(a a':A)(b b' : bool), (if b && b' then a else a') = (if b then if b' then a else a' else a'). Proof. destr_bool. Qed. Lemma negb_if : forall (A:Type)(a a':A)(b:bool), (if negb b then a else a') = (if b then a' else a). Proof. destr_bool. Qed. (*****************************************) (** * Alternative versions of [andb] and [orb] with lazy behavior (for vm_compute) *) (*****************************************) Notation "a &&& b" := (if a then b else false) (at level 40, left associativity) : lazy_bool_scope. Notation "a ||| b" := (if a then true else b) (at level 50, left associativity) : lazy_bool_scope. Local Open Scope lazy_bool_scope. Lemma andb_lazy_alt : forall a b : bool, a && b = a &&& b. Proof. reflexivity. Qed. Lemma orb_lazy_alt : forall a b : bool, a || b = a ||| b. Proof. reflexivity. Qed. (*****************************************) (** * Reflect: a specialized inductive type for relating propositions and booleans, as popularized by the Ssreflect library. *) (*****************************************) Inductive reflect (P : Prop) : bool -> Set := | ReflectT : P -> reflect P true | ReflectF : ~ P -> reflect P false. Hint Constructors reflect : bool. (** Interest: a case on a reflect lemma or hyp performs clever unification, and leave the goal in a convenient shape (a bit like case_eq). *) (** Relation with iff : *) Lemma reflect_iff : forall P b, reflect P b -> (P<->b=true). Proof. destruct 1; intuition; discriminate. Qed. Lemma iff_reflect : forall P b, (P<->b=true) -> reflect P b. Proof. destr_bool; intuition. Defined. (** It would be nice to join [reflect_iff] and [iff_reflect] in a unique [iff] statement, but this isn't allowed since [iff] is in Prop. *) (** Reflect implies decidability of the proposition *) Lemma reflect_dec : forall P b, reflect P b -> {P}+{~P}. Proof. destruct 1; auto. Defined. (** Reciprocally, from a decidability, we could state a [reflect] as soon as we have a [bool_of_sumbool]. *) coq-8.4pl2/theories/Bool/vo.itarget0000640000175000001440000000010611307752066016355 0ustar notinusersBoolEq.vo Bool.vo Bvector.vo DecBool.vo IfProp.vo Sumbool.vo Zerob.vo coq-8.4pl2/theories/Bool/IfProp.v0000640000175000001440000000273512010532755015741 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | Iftrue : A -> IfProp A B true | Iffalse : B -> IfProp A B false. Hint Resolve Iftrue Iffalse: bool v62. Lemma Iftrue_inv : forall (A B:Prop) (b:bool), IfProp A B b -> b = true -> A. destruct 1; intros; auto with bool. case diff_true_false; auto with bool. Qed. Lemma Iffalse_inv : forall (A B:Prop) (b:bool), IfProp A B b -> b = false -> B. destruct 1; intros; auto with bool. case diff_true_false; trivial with bool. Qed. Lemma IfProp_true : forall A B:Prop, IfProp A B true -> A. intros. inversion H. assumption. Qed. Lemma IfProp_false : forall A B:Prop, IfProp A B false -> B. intros. inversion H. assumption. Qed. Lemma IfProp_or : forall (A B:Prop) (b:bool), IfProp A B b -> A \/ B. destruct 1; auto with bool. Qed. Lemma IfProp_sum : forall (A B:Prop) (b:bool), IfProp A B b -> {A} + {B}. destruct b; intro H. left; inversion H; auto with bool. right; inversion H; auto with bool. Qed. coq-8.4pl2/theories/Bool/Sumbool.v0000640000175000001440000000416612010532755016162 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Set), (b = true -> P true) -> (b = false -> P false) -> P b. destruct b; auto. Defined. Definition bool_eq_ind : forall (b:bool) (P:bool -> Prop), (b = true -> P true) -> (b = false -> P false) -> P b. destruct b; auto. Defined. (** Logic connectives on type [sumbool] *) Section connectives. Variables A B C D : Prop. Hypothesis H1 : {A} + {B}. Hypothesis H2 : {C} + {D}. Definition sumbool_and : {A /\ C} + {B \/ D}. case H1; case H2; auto. Defined. Definition sumbool_or : {A \/ C} + {B /\ D}. case H1; case H2; auto. Defined. Definition sumbool_not : {B} + {A}. case H1; auto. Defined. End connectives. Hint Resolve sumbool_and sumbool_or: core. Hint Immediate sumbool_not : core. (** Any decidability function in type [sumbool] can be turned into a function returning a boolean with the corresponding specification: *) Definition bool_of_sumbool : forall A B:Prop, {A} + {B} -> {b : bool | if b then A else B}. intros A B H. elim H; intro; [exists true | exists false]; assumption. Defined. Arguments bool_of_sumbool : default implicits. coq-8.4pl2/theories/Bool/BoolEq.v0000640000175000001440000000361012010532755015714 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> bool. Variable beq_refl : forall x:A, true = beq x x. Variable beq_eq : forall x y:A, true = beq x y -> x = y. Definition beq_eq_true : forall x y:A, x = y -> true = beq x y. Proof. intros x y H. case H. apply beq_refl. Defined. Definition beq_eq_not_false : forall x y:A, x = y -> false <> beq x y. Proof. intros x y e. rewrite <- beq_eq_true; trivial; discriminate. Defined. Definition beq_false_not_eq : forall x y:A, false = beq x y -> x <> y. Proof. exact (fun (x y:A) (H:false = beq x y) (e:x = y) => beq_eq_not_false x y e H). Defined. Definition exists_beq_eq : forall x y:A, {b : bool | b = beq x y}. Proof. intros. exists (beq x y). constructor. Defined. Definition not_eq_false_beq : forall x y:A, x <> y -> false = beq x y. Proof. intros x y H. symmetry . apply not_true_is_false. intro. apply H. apply beq_eq. symmetry . assumption. Defined. Definition eq_dec : forall x y:A, {x = y} + {x <> y}. Proof. intros x y; case (exists_beq_eq x y). intros b; case b; intro H. left; apply beq_eq; assumption. right; apply beq_false_not_eq; assumption. Defined. End Bool_eq_dec. coq-8.4pl2/theories/Bool/Zerob.v0000640000175000001440000000241612010532755015617 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | S _ => false end. Lemma zerob_true_intro : forall n:nat, n = 0 -> zerob n = true. Proof. destruct n; [ trivial with bool | inversion 1 ]. Qed. Hint Resolve zerob_true_intro: bool. Lemma zerob_true_elim : forall n:nat, zerob n = true -> n = 0. Proof. destruct n; [ trivial with bool | inversion 1 ]. Qed. Lemma zerob_false_intro : forall n:nat, n <> 0 -> zerob n = false. Proof. destruct n; [ destruct 1; auto with bool | trivial with bool ]. Qed. Hint Resolve zerob_false_intro: bool. Lemma zerob_false_elim : forall n:nat, zerob n = false -> n <> 0. Proof. destruct n; [ inversion 1 | auto with bool ]. Qed. coq-8.4pl2/theories/Bool/Bvector.v0000640000175000001440000000655412010532755016151 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bv | S p' => BshiftL n (BshiftL_iter n bv p') false end. Fixpoint BshiftRl_iter (n:nat) (bv:Bvector (S n)) (p:nat) : Bvector (S n) := match p with | O => bv | S p' => BshiftRl n (BshiftRl_iter n bv p') false end. Fixpoint BshiftRa_iter (n:nat) (bv:Bvector (S n)) (p:nat) : Bvector (S n) := match p with | O => bv | S p' => BshiftRa n (BshiftRa_iter n bv p') end. End BOOLEAN_VECTORS. coq-8.4pl2/theories/Bool/intro.tex0000640000175000001440000000110407265050405016220 0ustar notinusers\section{Bool}\label{Bool} The BOOL library includes the following files: \begin{itemize} \item {\tt Bool.v} defines standard operations on booleans and states and proves simple facts on them. \item {\tt IfProp.v} defines a disjunction which contains its proof and states its properties. \item {\tt Zerob.v} defines the test against 0 on natural numbers and states and proves properties of it. \item {\tt Orb.v} states and proves facts on the boolean or. \item {\tt DecBool.v} defines a conditional from a proof of decidability and states its properties. \end{itemize} coq-8.4pl2/theories/Bool/DecBool.v0000640000175000001440000000176212010532755016050 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* forall x y:C, ifdec H x y = x. Proof. intros; case H; auto. intro; absurd B; trivial. Qed. Theorem ifdec_right : forall (A B:Prop) (C:Set) (H:{A} + {B}), ~ A -> forall x y:C, ifdec H x y = y. Proof. intros; case H; auto. intro; absurd A; trivial. Qed. Unset Implicit Arguments. coq-8.4pl2/theories/MSets/0000750000175000001440000000000012127276544014515 5ustar notinuserscoq-8.4pl2/theories/MSets/MSetAVL.v0000640000175000001440000006064512063736507016132 0ustar notinusers(* -*- coding: utf-8 -*- *) (***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | Node h _ _ _ => h end. (** ** Singleton set *) Definition singleton x := Node 1 Leaf x Leaf. (** ** Helper functions *) (** [create l x r] creates a node, assuming [l] and [r] to be balanced and [|height l - height r| <= 2]. *) Definition create l x r := Node (max (height l) (height r) + 1) l x r. (** [bal l x r] acts as [create], but performs one step of rebalancing if necessary, i.e. assumes [|height l - height r| <= 3]. *) Definition assert_false := create. Definition bal l x r := let hl := height l in let hr := height r in if gt_le_dec hl (hr+2) then match l with | Leaf => assert_false l x r | Node _ ll lx lr => if ge_lt_dec (height ll) (height lr) then create ll lx (create lr x r) else match lr with | Leaf => assert_false l x r | Node _ lrl lrx lrr => create (create ll lx lrl) lrx (create lrr x r) end end else if gt_le_dec hr (hl+2) then match r with | Leaf => assert_false l x r | Node _ rl rx rr => if ge_lt_dec (height rr) (height rl) then create (create l x rl) rx rr else match rl with | Leaf => assert_false l x r | Node _ rll rlx rlr => create (create l x rll) rlx (create rlr rx rr) end end else create l x r. (** ** Insertion *) Fixpoint add x s := match s with | Leaf => Node 1 Leaf x Leaf | Node h l y r => match X.compare x y with | Lt => bal (add x l) y r | Eq => Node h l y r | Gt => bal l y (add x r) end end. (** ** Join Same as [bal] but does not assume anything regarding heights of [l] and [r]. *) Fixpoint join l : elt -> t -> t := match l with | Leaf => add | Node lh ll lx lr => fun x => fix join_aux (r:t) : t := match r with | Leaf => add x l | Node rh rl rx rr => if gt_le_dec lh (rh+2) then bal ll lx (join lr x r) else if gt_le_dec rh (lh+2) then bal (join_aux rl) rx rr else create l x r end end. (** ** Extraction of minimum element Morally, [remove_min] is to be applied to a non-empty tree [t = Node h l x r]. Since we can't deal here with [assert false] for [t=Leaf], we pre-unpack [t] (and forget about [h]). *) Fixpoint remove_min l x r : t*elt := match l with | Leaf => (r,x) | Node lh ll lx lr => let (l',m) := remove_min ll lx lr in (bal l' x r, m) end. (** ** Merging two trees [merge t1 t2] builds the union of [t1] and [t2] assuming all elements of [t1] to be smaller than all elements of [t2], and [|height t1 - height t2| <= 2]. *) Definition merge s1 s2 := match s1,s2 with | Leaf, _ => s2 | _, Leaf => s1 | _, Node _ l2 x2 r2 => let (s2',m) := remove_min l2 x2 r2 in bal s1 m s2' end. (** ** Deletion *) Fixpoint remove x s := match s with | Leaf => Leaf | Node _ l y r => match X.compare x y with | Lt => bal (remove x l) y r | Eq => merge l r | Gt => bal l y (remove x r) end end. (** ** Concatenation Same as [merge] but does not assume anything about heights. *) Definition concat s1 s2 := match s1, s2 with | Leaf, _ => s2 | _, Leaf => s1 | _, Node _ l2 x2 r2 => let (s2',m) := remove_min l2 x2 r2 in join s1 m s2' end. (** ** Splitting [split x s] returns a triple [(l, present, r)] where - [l] is the set of elements of [s] that are [< x] - [r] is the set of elements of [s] that are [> x] - [present] is [true] if and only if [s] contains [x]. *) Record triple := mktriple { t_left:t; t_in:bool; t_right:t }. Notation "<< l , b , r >>" := (mktriple l b r) (at level 9). Fixpoint split x s : triple := match s with | Leaf => << Leaf, false, Leaf >> | Node _ l y r => match X.compare x y with | Lt => let (ll,b,rl) := split x l in << ll, b, join rl y r >> | Eq => << l, true, r >> | Gt => let (rl,b,rr) := split x r in << join l y rl, b, rr >> end end. (** ** Intersection *) Fixpoint inter s1 s2 := match s1, s2 with | Leaf, _ => Leaf | _, Leaf => Leaf | Node _ l1 x1 r1, _ => let (l2',pres,r2') := split x1 s2 in if pres then join (inter l1 l2') x1 (inter r1 r2') else concat (inter l1 l2') (inter r1 r2') end. (** ** Difference *) Fixpoint diff s1 s2 := match s1, s2 with | Leaf, _ => Leaf | _, Leaf => s1 | Node _ l1 x1 r1, _ => let (l2',pres,r2') := split x1 s2 in if pres then concat (diff l1 l2') (diff r1 r2') else join (diff l1 l2') x1 (diff r1 r2') end. (** ** Union *) (** In ocaml, heights of [s1] and [s2] are compared each time in order to recursively perform the split on the smaller set. Unfortunately, this leads to a non-structural algorithm. The following code is a simplification of the ocaml version: no comparison of heights. It might be slightly slower, but experimentally all the tests I've made in ocaml have shown this potential slowdown to be non-significant. Anyway, the exact code of ocaml has also been formalized thanks to Function+measure, see [ocaml_union] in [MSetFullAVL]. *) Fixpoint union s1 s2 := match s1, s2 with | Leaf, _ => s2 | _, Leaf => s1 | Node _ l1 x1 r1, _ => let (l2',_,r2') := split x1 s2 in join (union l1 l2') x1 (union r1 r2') end. (** ** Filter *) Fixpoint filter (f:elt->bool) s := match s with | Leaf => Leaf | Node _ l x r => let l' := filter f l in let r' := filter f r in if f x then join l' x r' else concat l' r' end. (** ** Partition *) Fixpoint partition (f:elt->bool)(s : t) : t*t := match s with | Leaf => (Leaf, Leaf) | Node _ l x r => let (l1,l2) := partition f l in let (r1,r2) := partition f r in if f x then (join l1 x r1, concat l2 r2) else (concat l1 r1, join l2 x r2) end. End Ops. (** * MakeRaw Functor of pure functions + a posteriori proofs of invariant preservation *) Module MakeRaw (Import I:Int)(X:OrderedType) <: RawSets X. Include Ops I X. (** Generic definition of binary-search-trees and proofs of specifications for generic functions such as mem or fold. *) Include MSetGenTree.Props X I. (** Automation and dedicated tactics *) Local Hint Immediate MX.eq_sym. Local Hint Unfold In lt_tree gt_tree Ok. Local Hint Constructors InT bst. Local Hint Resolve MX.eq_refl MX.eq_trans MX.lt_trans @ok. Local Hint Resolve lt_leaf gt_leaf lt_tree_node gt_tree_node. Local Hint Resolve lt_tree_not_in lt_tree_trans gt_tree_not_in gt_tree_trans. Local Hint Resolve elements_spec2. (* Sometimes functional induction will expose too much of a tree structure. The following tactic allows to factor back a Node whose internal parts occurs nowhere else. *) (* TODO: why Ltac instead of Tactic Notation don't work ? why clear ? *) Tactic Notation "factornode" ident(s) := try clear s; match goal with | |- context [Node ?l ?x ?r ?h] => set (s:=Node l x r h) in *; clearbody s; clear l x r h | _ : context [Node ?l ?x ?r ?h] |- _ => set (s:=Node l x r h) in *; clearbody s; clear l x r h end. (** Inductions principles for some of the set operators *) Functional Scheme bal_ind := Induction for bal Sort Prop. Functional Scheme remove_min_ind := Induction for remove_min Sort Prop. Functional Scheme merge_ind := Induction for merge Sort Prop. Functional Scheme concat_ind := Induction for concat Sort Prop. Functional Scheme inter_ind := Induction for inter Sort Prop. Functional Scheme diff_ind := Induction for diff Sort Prop. Functional Scheme union_ind := Induction for union Sort Prop. (** Notations and helper lemma about pairs and triples *) Notation "s #1" := (fst s) (at level 9, format "s '#1'") : pair_scope. Notation "s #2" := (snd s) (at level 9, format "s '#2'") : pair_scope. Notation "t #l" := (t_left t) (at level 9, format "t '#l'") : pair_scope. Notation "t #b" := (t_in t) (at level 9, format "t '#b'") : pair_scope. Notation "t #r" := (t_right t) (at level 9, format "t '#r'") : pair_scope. Local Open Scope pair_scope. (** ** Singleton set *) Lemma singleton_spec : forall x y, InT y (singleton x) <-> X.eq y x. Proof. unfold singleton; intuition_in. Qed. Instance singleton_ok x : Ok (singleton x). Proof. unfold singleton; auto. Qed. (** ** Helper functions *) Lemma create_spec : forall l x r y, InT y (create l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. unfold create; split; [ inversion_clear 1 | ]; intuition. Qed. Instance create_ok l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r) : Ok (create l x r). Proof. unfold create; auto. Qed. Lemma bal_spec : forall l x r y, InT y (bal l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. intros l x r; functional induction bal l x r; intros; try clear e0; rewrite !create_spec; intuition_in. Qed. Instance bal_ok l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r) : Ok (bal l x r). Proof. functional induction bal l x r; intros; inv; repeat apply create_ok; auto; unfold create; (apply lt_tree_node || apply gt_tree_node); auto; (eapply lt_tree_trans || eapply gt_tree_trans); eauto. Qed. (** ** Insertion *) Lemma add_spec' : forall s x y, InT y (add x s) <-> X.eq y x \/ InT y s. Proof. induct s x; try rewrite ?bal_spec, ?IHl, ?IHr; intuition_in. setoid_replace y with x'; eauto. Qed. Lemma add_spec : forall s x y `{Ok s}, InT y (add x s) <-> X.eq y x \/ InT y s. Proof. intros; apply add_spec'. Qed. Instance add_ok s x `(Ok s) : Ok (add x s). Proof. induct s x; auto; apply bal_ok; auto; intros y; rewrite add_spec'; intuition; order. Qed. Local Open Scope Int_scope. (** ** Join *) (** Function/Functional Scheme can't deal with internal fix. Let's do its job by hand: *) Ltac join_tac := intro l; induction l as [| lh ll _ lx lr Hlr]; [ | intros x r; induction r as [| rh rl Hrl rx rr _]; unfold join; [ | destruct (gt_le_dec lh (rh+2)) as [GT|LE]; [ match goal with |- context b [ bal ?a ?b ?c] => replace (bal a b c) with (bal ll lx (join lr x (Node rh rl rx rr))); [ | auto] end | destruct (gt_le_dec rh (lh+2)) as [GT'|LE']; [ match goal with |- context b [ bal ?a ?b ?c] => replace (bal a b c) with (bal (join (Node lh ll lx lr) x rl) rx rr); [ | auto] end | ] ] ] ]; intros. Lemma join_spec : forall l x r y, InT y (join l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. join_tac. simpl. rewrite add_spec'; intuition_in. rewrite add_spec'; intuition_in. rewrite bal_spec, Hlr; clear Hlr Hrl; intuition_in. rewrite bal_spec, Hrl; clear Hlr Hrl; intuition_in. apply create_spec. Qed. Instance join_ok : forall l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r), Ok (join l x r). Proof. join_tac; auto with *; inv; apply bal_ok; auto; clear Hrl Hlr; intro; intros; rewrite join_spec in *. intuition; [ setoid_replace y with x | ]; eauto. intuition; [ setoid_replace y with x | ]; eauto. Qed. (** ** Extraction of minimum element *) Lemma remove_min_spec : forall l x r y h, InT y (Node h l x r) <-> X.eq y (remove_min l x r)#2 \/ InT y (remove_min l x r)#1. Proof. intros l x r; functional induction (remove_min l x r); simpl in *; intros. intuition_in. rewrite bal_spec, In_node_iff, IHp, e0; simpl; intuition. Qed. Instance remove_min_ok l x r : forall h `(Ok (Node h l x r)), Ok (remove_min l x r)#1. Proof. functional induction (remove_min l x r); simpl; intros. inv; auto. assert (O : Ok (Node _x ll lx lr)) by (inv; auto). assert (L : lt_tree x (Node _x ll lx lr)) by (inv; auto). specialize IHp with (1:=O); rewrite e0 in IHp; auto; simpl in *. apply bal_ok; auto. inv; auto. intro y; specialize (L y). rewrite remove_min_spec, e0 in L; simpl in L; intuition. inv; auto. Qed. Lemma remove_min_gt_tree : forall l x r h `{Ok (Node h l x r)}, gt_tree (remove_min l x r)#2 (remove_min l x r)#1. Proof. intros l x r; functional induction (remove_min l x r); simpl; intros. inv; auto. assert (O : Ok (Node _x ll lx lr)) by (inv; auto). assert (L : lt_tree x (Node _x ll lx lr)) by (inv; auto). specialize IHp with (1:=O); rewrite e0 in IHp; simpl in IHp. intro y; rewrite bal_spec; intuition; specialize (L m); rewrite remove_min_spec, e0 in L; simpl in L; [setoid_replace y with x|inv]; eauto. Qed. Local Hint Resolve remove_min_gt_tree. (** ** Merging two trees *) Lemma merge_spec : forall s1 s2 y, InT y (merge s1 s2) <-> InT y s1 \/ InT y s2. Proof. intros s1 s2; functional induction (merge s1 s2); intros; try factornode s1. intuition_in. intuition_in. rewrite bal_spec, remove_min_spec, e1; simpl; intuition. Qed. Instance merge_ok s1 s2 : forall `(Ok s1, Ok s2) `(forall y1 y2 : elt, InT y1 s1 -> InT y2 s2 -> X.lt y1 y2), Ok (merge s1 s2). Proof. functional induction (merge s1 s2); intros; auto; try factornode s1. apply bal_ok; auto. change s2' with ((s2',m)#1); rewrite <-e1; eauto with *. intros y Hy. apply H1; auto. rewrite remove_min_spec, e1; simpl; auto. change (gt_tree (s2',m)#2 (s2',m)#1); rewrite <-e1; eauto. Qed. (** ** Deletion *) Lemma remove_spec : forall s x y `{Ok s}, (InT y (remove x s) <-> InT y s /\ ~ X.eq y x). Proof. induct s x. intuition_in. rewrite merge_spec; intuition; [order|order|intuition_in]. elim H2; eauto. rewrite bal_spec, IHl; clear IHl IHr; intuition; [order|order|intuition_in]. rewrite bal_spec, IHr; clear IHl IHr; intuition; [order|order|intuition_in]. Qed. Instance remove_ok s x `(Ok s) : Ok (remove x s). Proof. induct s x. auto. (* EQ *) apply merge_ok; eauto. (* LT *) apply bal_ok; auto. intro z; rewrite remove_spec; auto; destruct 1; eauto. (* GT *) apply bal_ok; auto. intro z; rewrite remove_spec; auto; destruct 1; eauto. Qed. (** ** Concatenation *) Lemma concat_spec : forall s1 s2 y, InT y (concat s1 s2) <-> InT y s1 \/ InT y s2. Proof. intros s1 s2; functional induction (concat s1 s2); intros; try factornode s1. intuition_in. intuition_in. rewrite join_spec, remove_min_spec, e1; simpl; intuition. Qed. Instance concat_ok s1 s2 : forall `(Ok s1, Ok s2) `(forall y1 y2 : elt, InT y1 s1 -> InT y2 s2 -> X.lt y1 y2), Ok (concat s1 s2). Proof. functional induction (concat s1 s2); intros; auto; try factornode s1. apply join_ok; auto. change (Ok (s2',m)#1); rewrite <-e1; eauto with *. intros y Hy. apply H1; auto. rewrite remove_min_spec, e1; simpl; auto. change (gt_tree (s2',m)#2 (s2',m)#1); rewrite <-e1; eauto. Qed. (** ** Splitting *) Lemma split_spec1 : forall s x y `{Ok s}, (InT y (split x s)#l <-> InT y s /\ X.lt y x). Proof. induct s x. intuition_in. intuition_in; order. specialize (IHl x y). destruct (split x l); simpl in *. rewrite IHl; intuition_in; order. specialize (IHr x y). destruct (split x r); simpl in *. rewrite join_spec, IHr; intuition_in; order. Qed. Lemma split_spec2 : forall s x y `{Ok s}, (InT y (split x s)#r <-> InT y s /\ X.lt x y). Proof. induct s x. intuition_in. intuition_in; order. specialize (IHl x y). destruct (split x l); simpl in *. rewrite join_spec, IHl; intuition_in; order. specialize (IHr x y). destruct (split x r); simpl in *. rewrite IHr; intuition_in; order. Qed. Lemma split_spec3 : forall s x `{Ok s}, ((split x s)#b = true <-> InT x s). Proof. induct s x. intuition_in; try discriminate. intuition. specialize (IHl x). destruct (split x l); simpl in *. rewrite IHl; intuition_in; order. specialize (IHr x). destruct (split x r); simpl in *. rewrite IHr; intuition_in; order. Qed. Lemma split_ok : forall s x `{Ok s}, Ok (split x s)#l /\ Ok (split x s)#r. Proof. induct s x; simpl; auto. specialize (IHl x). generalize (fun y => @split_spec2 l x y _). destruct (split x l); simpl in *; intuition. apply join_ok; auto. intros y; rewrite H; intuition. specialize (IHr x). generalize (fun y => @split_spec1 r x y _). destruct (split x r); simpl in *; intuition. apply join_ok; auto. intros y; rewrite H; intuition. Qed. Instance split_ok1 s x `(Ok s) : Ok (split x s)#l. Proof. intros; destruct (@split_ok s x); auto. Qed. Instance split_ok2 s x `(Ok s) : Ok (split x s)#r. Proof. intros; destruct (@split_ok s x); auto. Qed. (** ** Intersection *) Ltac destruct_split := match goal with | H : split ?x ?s = << ?u, ?v, ?w >> |- _ => assert ((split x s)#l = u) by (rewrite H; auto); assert ((split x s)#b = v) by (rewrite H; auto); assert ((split x s)#r = w) by (rewrite H; auto); clear H; subst u w end. Lemma inter_spec_ok : forall s1 s2 `{Ok s1, Ok s2}, Ok (inter s1 s2) /\ (forall y, InT y (inter s1 s2) <-> InT y s1 /\ InT y s2). Proof. intros s1 s2; functional induction inter s1 s2; intros B1 B2; [intuition_in|intuition_in | | ]; factornode s2; destruct_split; inv; destruct IHt0 as (IHo1,IHi1), IHt1 as (IHo2,IHi2); auto with *; split; intros. - (* Ok join *) apply join_ok; auto with *; intro y; rewrite ?IHi1, ?IHi2; intuition. - (* InT join *) rewrite join_spec, IHi1, IHi2, split_spec1, split_spec2; intuition_in. setoid_replace y with x1; auto. rewrite <- split_spec3; auto. - (* Ok concat *) apply concat_ok; auto with *; intros y1 y2; rewrite IHi1, IHi2; intuition; order. - (* InT concat *) rewrite concat_spec, IHi1, IHi2, split_spec1, split_spec2; auto. intuition_in. absurd (InT x1 s2). rewrite <- split_spec3; auto; congruence. setoid_replace x1 with y; auto. Qed. Lemma inter_spec : forall s1 s2 y `{Ok s1, Ok s2}, (InT y (inter s1 s2) <-> InT y s1 /\ InT y s2). Proof. intros; destruct (@inter_spec_ok s1 s2); auto. Qed. Instance inter_ok s1 s2 `(Ok s1, Ok s2) : Ok (inter s1 s2). Proof. intros; destruct (@inter_spec_ok s1 s2); auto. Qed. (** ** Difference *) Lemma diff_spec_ok : forall s1 s2 `{Ok s1, Ok s2}, Ok (diff s1 s2) /\ (forall y, InT y (diff s1 s2) <-> InT y s1 /\ ~InT y s2). Proof. intros s1 s2; functional induction diff s1 s2; intros B1 B2; [intuition_in|intuition_in | | ]; factornode s2; destruct_split; inv; destruct IHt0 as (IHb1,IHi1), IHt1 as (IHb2,IHi2); auto with *; split; intros. - (* Ok concat *) apply concat_ok; auto; intros y1 y2; rewrite IHi1, IHi2; intuition; order. - (* InT concat *) rewrite concat_spec, IHi1, IHi2, split_spec1, split_spec2; intuition_in. absurd (InT x1 s2). + setoid_replace x1 with y; auto. + rewrite <- split_spec3; auto; congruence. - (* Ok join *) apply join_ok; auto; intro y; rewrite ?IHi1, ?IHi2; intuition. - (* InT join *) rewrite join_spec, IHi1, IHi2, split_spec1, split_spec2; auto with *. intuition_in. absurd (InT x1 s2); auto. * rewrite <- split_spec3; auto; congruence. * setoid_replace x1 with y; auto. Qed. Lemma diff_spec : forall s1 s2 y `{Ok s1, Ok s2}, (InT y (diff s1 s2) <-> InT y s1 /\ ~InT y s2). Proof. intros; destruct (@diff_spec_ok s1 s2); auto. Qed. Instance diff_ok s1 s2 `(Ok s1, Ok s2) : Ok (diff s1 s2). Proof. intros; destruct (@diff_spec_ok s1 s2); auto. Qed. (** ** Union *) Lemma union_spec : forall s1 s2 y `{Ok s1, Ok s2}, (InT y (union s1 s2) <-> InT y s1 \/ InT y s2). Proof. intros s1 s2; functional induction union s1 s2; intros y B1 B2. intuition_in. intuition_in. factornode s2; destruct_split; inv. rewrite join_spec, IHt0, IHt1, split_spec1, split_spec2; auto with *. destruct (X.compare_spec y x1); intuition_in. Qed. Instance union_ok s1 s2 : forall `(Ok s1, Ok s2), Ok (union s1 s2). Proof. functional induction union s1 s2; intros B1 B2; auto. factornode s2; destruct_split; inv. apply join_ok; auto with *. intro y; rewrite union_spec, split_spec1; intuition_in. intro y; rewrite union_spec, split_spec2; intuition_in. Qed. (** * Filter *) Lemma filter_spec : forall s x f, Proper (X.eq==>Logic.eq) f -> (InT x (filter f s) <-> InT x s /\ f x = true). Proof. induction s as [ |h l Hl x0 r Hr]; intros x f Hf; simpl. - intuition_in. - case_eq (f x0); intros Hx0. * rewrite join_spec, Hl, Hr; intuition_in. now setoid_replace x with x0. * rewrite concat_spec, Hl, Hr; intuition_in. assert (f x = f x0) by auto. congruence. Qed. Lemma filter_weak_spec : forall s x f, InT x (filter f s) -> InT x s. Proof. induction s as [ |h l Hl x0 r Hr]; intros x f; simpl. - trivial. - destruct (f x0). * rewrite join_spec; intuition_in; eauto. * rewrite concat_spec; intuition_in; eauto. Qed. Instance filter_ok s f `(H : Ok s) : Ok (filter f s). Proof. induction H as [ | h x l r Hl Hfl Hr Hfr Hlt Hgt ]. - constructor. - simpl. assert (lt_tree x (filter f l)) by (eauto using filter_weak_spec). assert (gt_tree x (filter f r)) by (eauto using filter_weak_spec). destruct (f x); eauto using concat_ok, join_ok. Qed. (** * Partition *) Lemma partition_spec1' s f : (partition f s)#1 = filter f s. Proof. induction s as [ | h l Hl x r Hr ]; simpl. - trivial. - rewrite <- Hl, <- Hr. now destruct (partition f l), (partition f r), (f x). Qed. Lemma partition_spec2' s f : (partition f s)#2 = filter (fun x => negb (f x)) s. Proof. induction s as [ | h l Hl x r Hr ]; simpl. - trivial. - rewrite <- Hl, <- Hr. now destruct (partition f l), (partition f r), (f x). Qed. Lemma partition_spec1 s f : Proper (X.eq==>Logic.eq) f -> Equal (partition f s)#1 (filter f s). Proof. now rewrite partition_spec1'. Qed. Lemma partition_spec2 s f : Proper (X.eq==>Logic.eq) f -> Equal (partition f s)#2 (filter (fun x => negb (f x)) s). Proof. now rewrite partition_spec2'. Qed. Instance partition_ok1 s f `(Ok s) : Ok (partition f s)#1. Proof. rewrite partition_spec1'; now apply filter_ok. Qed. Instance partition_ok2 s f `(Ok s) : Ok (partition f s)#2. Proof. rewrite partition_spec2'; now apply filter_ok. Qed. End MakeRaw. (** * Encapsulation Now, in order to really provide a functor implementing [S], we need to encapsulate everything into a type of binary search trees. They also happen to be well-balanced, but this has no influence on the correctness of operations, so we won't state this here, see [MSetFullAVL] if you need more than just the MSet interface. *) Module IntMake (I:Int)(X: OrderedType) <: S with Module E := X. Module Raw := MakeRaw I X. Include Raw2Sets X Raw. End IntMake. (* For concrete use inside Coq, we propose an instantiation of [Int] by [Z]. *) Module Make (X: OrderedType) <: S with Module E := X :=IntMake(Z_as_Int)(X). coq-8.4pl2/theories/MSets/MSetEqProperties.v0000640000175000001440000005424311776416511020127 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mem x s=mem y s. Proof. intro H; rewrite H; auto. Qed. Lemma equal_mem_1: (forall a, mem a s=mem a s') -> equal s s'=true. Proof. intros; apply equal_1; unfold Equal; intros. do 2 rewrite mem_iff; rewrite H; tauto. Qed. Lemma equal_mem_2: equal s s'=true -> forall a, mem a s=mem a s'. Proof. intros; rewrite (equal_2 H); auto. Qed. Lemma subset_mem_1: (forall a, mem a s=true->mem a s'=true) -> subset s s'=true. Proof. intros; apply subset_1; unfold Subset; intros a. do 2 rewrite mem_iff; auto. Qed. Lemma subset_mem_2: subset s s'=true -> forall a, mem a s=true -> mem a s'=true. Proof. intros H a; do 2 rewrite <- mem_iff; apply subset_2; auto. Qed. Lemma empty_mem: mem x empty=false. Proof. rewrite <- not_mem_iff; auto with set. Qed. Lemma is_empty_equal_empty: is_empty s = equal s empty. Proof. apply bool_1; split; intros. auto with set. rewrite <- is_empty_iff; auto with set. Qed. Lemma choose_mem_1: choose s=Some x -> mem x s=true. Proof. auto with set. Qed. Lemma choose_mem_2: choose s=None -> is_empty s=true. Proof. auto with set. Qed. Lemma add_mem_1: mem x (add x s)=true. Proof. auto with set relations. Qed. Lemma add_mem_2: ~E.eq x y -> mem y (add x s)=mem y s. Proof. apply add_neq_b. Qed. Lemma remove_mem_1: mem x (remove x s)=false. Proof. rewrite <- not_mem_iff; auto with set relations. Qed. Lemma remove_mem_2: ~E.eq x y -> mem y (remove x s)=mem y s. Proof. apply remove_neq_b. Qed. Lemma singleton_equal_add: equal (singleton x) (add x empty)=true. Proof. rewrite (singleton_equal_add x); auto with set. Qed. Lemma union_mem: mem x (union s s')=mem x s || mem x s'. Proof. apply union_b. Qed. Lemma inter_mem: mem x (inter s s')=mem x s && mem x s'. Proof. apply inter_b. Qed. Lemma diff_mem: mem x (diff s s')=mem x s && negb (mem x s'). Proof. apply diff_b. Qed. (** properties of [mem] *) Lemma mem_3 : ~In x s -> mem x s=false. Proof. intros; rewrite <- not_mem_iff; auto. Qed. Lemma mem_4 : mem x s=false -> ~In x s. Proof. intros; rewrite not_mem_iff; auto. Qed. (** Properties of [equal] *) Lemma equal_refl: equal s s=true. Proof. auto with set. Qed. Lemma equal_sym: equal s s'=equal s' s. Proof. intros; apply bool_1; do 2 rewrite <- equal_iff; intuition. Qed. Lemma equal_trans: equal s s'=true -> equal s' s''=true -> equal s s''=true. Proof. intros; rewrite (equal_2 H); auto. Qed. Lemma equal_equal: equal s s'=true -> equal s s''=equal s' s''. Proof. intros; rewrite (equal_2 H); auto. Qed. Lemma equal_cardinal: equal s s'=true -> cardinal s=cardinal s'. Proof. auto with set. Qed. (* Properties of [subset] *) Lemma subset_refl: subset s s=true. Proof. auto with set. Qed. Lemma subset_antisym: subset s s'=true -> subset s' s=true -> equal s s'=true. Proof. auto with set. Qed. Lemma subset_trans: subset s s'=true -> subset s' s''=true -> subset s s''=true. Proof. do 3 rewrite <- subset_iff; intros. apply subset_trans with s'; auto. Qed. Lemma subset_equal: equal s s'=true -> subset s s'=true. Proof. auto with set. Qed. (** Properties of [choose] *) Lemma choose_mem_3: is_empty s=false -> {x:elt|choose s=Some x /\ mem x s=true}. Proof. intros. generalize (@choose_1 s) (@choose_2 s). destruct (choose s);intros. exists e;auto with set. generalize (H1 (eq_refl None)); clear H1. intros; rewrite (is_empty_1 H1) in H; discriminate. Qed. Lemma choose_mem_4: choose empty=None. Proof. generalize (@choose_1 empty). case (@choose empty);intros;auto. elim (@empty_1 e); auto. Qed. (** Properties of [add] *) Lemma add_mem_3: mem y s=true -> mem y (add x s)=true. Proof. auto with set. Qed. Lemma add_equal: mem x s=true -> equal (add x s) s=true. Proof. auto with set. Qed. (** Properties of [remove] *) Lemma remove_mem_3: mem y (remove x s)=true -> mem y s=true. Proof. rewrite remove_b; intros H;destruct (andb_prop _ _ H); auto. Qed. Lemma remove_equal: mem x s=false -> equal (remove x s) s=true. Proof. intros; apply equal_1; apply remove_equal. rewrite not_mem_iff; auto. Qed. Lemma add_remove: mem x s=true -> equal (add x (remove x s)) s=true. Proof. intros; apply equal_1; apply add_remove; auto with set. Qed. Lemma remove_add: mem x s=false -> equal (remove x (add x s)) s=true. Proof. intros; apply equal_1; apply remove_add; auto. rewrite not_mem_iff; auto. Qed. (** Properties of [is_empty] *) Lemma is_empty_cardinal: is_empty s = zerob (cardinal s). Proof. intros; apply bool_1; split; intros. rewrite MP.cardinal_1; simpl; auto with set. assert (cardinal s = 0) by (apply zerob_true_elim; auto). auto with set. Qed. (** Properties of [singleton] *) Lemma singleton_mem_1: mem x (singleton x)=true. Proof. auto with set relations. Qed. Lemma singleton_mem_2: ~E.eq x y -> mem y (singleton x)=false. Proof. intros; rewrite singleton_b. unfold eqb; destruct (E.eq_dec x y); intuition. Qed. Lemma singleton_mem_3: mem y (singleton x)=true -> E.eq x y. Proof. intros; apply singleton_1; auto with set. Qed. (** Properties of [union] *) Lemma union_sym: equal (union s s') (union s' s)=true. Proof. auto with set. Qed. Lemma union_subset_equal: subset s s'=true -> equal (union s s') s'=true. Proof. auto with set. Qed. Lemma union_equal_1: equal s s'=true-> equal (union s s'') (union s' s'')=true. Proof. auto with set. Qed. Lemma union_equal_2: equal s' s''=true-> equal (union s s') (union s s'')=true. Proof. auto with set. Qed. Lemma union_assoc: equal (union (union s s') s'') (union s (union s' s''))=true. Proof. auto with set. Qed. Lemma add_union_singleton: equal (add x s) (union (singleton x) s)=true. Proof. auto with set. Qed. Lemma union_add: equal (union (add x s) s') (add x (union s s'))=true. Proof. auto with set. Qed. (* caracterisation of [union] via [subset] *) Lemma union_subset_1: subset s (union s s')=true. Proof. auto with set. Qed. Lemma union_subset_2: subset s' (union s s')=true. Proof. auto with set. Qed. Lemma union_subset_3: subset s s''=true -> subset s' s''=true -> subset (union s s') s''=true. Proof. intros; apply subset_1; apply union_subset_3; auto with set. Qed. (** Properties of [inter] *) Lemma inter_sym: equal (inter s s') (inter s' s)=true. Proof. auto with set. Qed. Lemma inter_subset_equal: subset s s'=true -> equal (inter s s') s=true. Proof. auto with set. Qed. Lemma inter_equal_1: equal s s'=true -> equal (inter s s'') (inter s' s'')=true. Proof. auto with set. Qed. Lemma inter_equal_2: equal s' s''=true -> equal (inter s s') (inter s s'')=true. Proof. auto with set. Qed. Lemma inter_assoc: equal (inter (inter s s') s'') (inter s (inter s' s''))=true. Proof. auto with set. Qed. Lemma union_inter_1: equal (inter (union s s') s'') (union (inter s s'') (inter s' s''))=true. Proof. auto with set. Qed. Lemma union_inter_2: equal (union (inter s s') s'') (inter (union s s'') (union s' s''))=true. Proof. auto with set. Qed. Lemma inter_add_1: mem x s'=true -> equal (inter (add x s) s') (add x (inter s s'))=true. Proof. auto with set. Qed. Lemma inter_add_2: mem x s'=false -> equal (inter (add x s) s') (inter s s')=true. Proof. intros; apply equal_1; apply inter_add_2. rewrite not_mem_iff; auto. Qed. (* caracterisation of [union] via [subset] *) Lemma inter_subset_1: subset (inter s s') s=true. Proof. auto with set. Qed. Lemma inter_subset_2: subset (inter s s') s'=true. Proof. auto with set. Qed. Lemma inter_subset_3: subset s'' s=true -> subset s'' s'=true -> subset s'' (inter s s')=true. Proof. intros; apply subset_1; apply inter_subset_3; auto with set. Qed. (** Properties of [diff] *) Lemma diff_subset: subset (diff s s') s=true. Proof. auto with set. Qed. Lemma diff_subset_equal: subset s s'=true -> equal (diff s s') empty=true. Proof. auto with set. Qed. Lemma remove_inter_singleton: equal (remove x s) (diff s (singleton x))=true. Proof. auto with set. Qed. Lemma diff_inter_empty: equal (inter (diff s s') (inter s s')) empty=true. Proof. auto with set. Qed. Lemma diff_inter_all: equal (union (diff s s') (inter s s')) s=true. Proof. auto with set. Qed. End BasicProperties. Hint Immediate empty_mem is_empty_equal_empty add_mem_1 remove_mem_1 singleton_equal_add union_mem inter_mem diff_mem equal_sym add_remove remove_add : set. Hint Resolve equal_mem_1 subset_mem_1 choose_mem_1 choose_mem_2 add_mem_2 remove_mem_2 equal_refl equal_equal subset_refl subset_equal subset_antisym add_mem_3 add_equal remove_mem_3 remove_equal : set. (** General recursion principle *) Lemma set_rec: forall (P:t->Type), (forall s s', equal s s'=true -> P s -> P s') -> (forall s x, mem x s=false -> P s -> P (add x s)) -> P empty -> forall s, P s. Proof. intros. apply set_induction; auto; intros. apply X with empty; auto with set. apply X with (add x s0); auto with set. apply equal_1; intro a; rewrite add_iff; rewrite (H0 a); tauto. apply X0; auto with set; apply mem_3; auto. Qed. (** Properties of [fold] *) Lemma exclusive_set : forall s s' x, ~(In x s/\In x s') <-> mem x s && mem x s'=false. Proof. intros; do 2 rewrite mem_iff. destruct (mem x s); destruct (mem x s'); intuition. Qed. Section Fold. Variables (A:Type)(eqA:A->A->Prop)(st:Equivalence eqA). Variables (f:elt->A->A)(Comp:Proper (E.eq==>eqA==>eqA) f)(Ass:transpose eqA f). Variables (i:A). Variables (s s':t)(x:elt). Lemma fold_empty: (fold f empty i) = i. Proof. apply fold_empty; auto. Qed. Lemma fold_equal: equal s s'=true -> eqA (fold f s i) (fold f s' i). Proof. intros; apply fold_equal with (eqA:=eqA); auto with set. Qed. Lemma fold_add: mem x s=false -> eqA (fold f (add x s) i) (f x (fold f s i)). Proof. intros; apply fold_add with (eqA:=eqA); auto. rewrite not_mem_iff; auto. Qed. Lemma add_fold: mem x s=true -> eqA (fold f (add x s) i) (fold f s i). Proof. intros; apply add_fold with (eqA:=eqA); auto with set. Qed. Lemma remove_fold_1: mem x s=true -> eqA (f x (fold f (remove x s) i)) (fold f s i). Proof. intros; apply remove_fold_1 with (eqA:=eqA); auto with set. Qed. Lemma remove_fold_2: mem x s=false -> eqA (fold f (remove x s) i) (fold f s i). Proof. intros; apply remove_fold_2 with (eqA:=eqA); auto. rewrite not_mem_iff; auto. Qed. Lemma fold_union: (forall x, mem x s && mem x s'=false) -> eqA (fold f (union s s') i) (fold f s (fold f s' i)). Proof. intros; apply fold_union with (eqA:=eqA); auto. intros; rewrite exclusive_set; auto. Qed. End Fold. (** Properties of [cardinal] *) Lemma add_cardinal_1: forall s x, mem x s=true -> cardinal (add x s)=cardinal s. Proof. auto with set. Qed. Lemma add_cardinal_2: forall s x, mem x s=false -> cardinal (add x s)=S (cardinal s). Proof. intros; apply add_cardinal_2; auto. rewrite not_mem_iff; auto. Qed. Lemma remove_cardinal_1: forall s x, mem x s=true -> S (cardinal (remove x s))=cardinal s. Proof. intros; apply remove_cardinal_1; auto with set. Qed. Lemma remove_cardinal_2: forall s x, mem x s=false -> cardinal (remove x s)=cardinal s. Proof. intros; apply Equal_cardinal; apply equal_2; auto with set. Qed. Lemma union_cardinal: forall s s', (forall x, mem x s && mem x s'=false) -> cardinal (union s s')=cardinal s+cardinal s'. Proof. intros; apply union_cardinal; auto; intros. rewrite exclusive_set; auto. Qed. Lemma subset_cardinal: forall s s', subset s s'=true -> cardinal s<=cardinal s'. Proof. intros; apply subset_cardinal; auto with set. Qed. Section Bool. (** Properties of [filter] *) Variable f:elt->bool. Variable Comp: Proper (E.eq==>Logic.eq) f. Let Comp' : Proper (E.eq==>Logic.eq) (fun x =>negb (f x)). Proof. repeat red; intros; f_equal; auto. Qed. Lemma filter_mem: forall s x, mem x (filter f s)=mem x s && f x. Proof. intros; apply filter_b; auto. Qed. Lemma for_all_filter: forall s, for_all f s=is_empty (filter (fun x => negb (f x)) s). Proof. intros; apply bool_1; split; intros. apply is_empty_1. unfold Empty; intros. rewrite filter_iff; auto. red; destruct 1. rewrite <- (@for_all_iff s f) in H; auto. rewrite (H a H0) in H1; discriminate. apply for_all_1; auto; red; intros. revert H; rewrite <- is_empty_iff. unfold Empty; intro H; generalize (H x); clear H. rewrite filter_iff; auto. destruct (f x); auto. Qed. Lemma exists_filter : forall s, exists_ f s=negb (is_empty (filter f s)). Proof. intros; apply bool_1; split; intros. destruct (exists_2 Comp H) as (a,(Ha1,Ha2)). apply bool_6. red; intros; apply (@is_empty_2 _ H0 a); auto with set. generalize (@choose_1 (filter f s)) (@choose_2 (filter f s)). destruct (choose (filter f s)). intros H0 _; apply exists_1; auto. exists e; generalize (H0 e); rewrite filter_iff; auto. intros _ H0. rewrite (is_empty_1 (H0 (eq_refl None))) in H; auto; discriminate. Qed. Lemma partition_filter_1: forall s, equal (fst (partition f s)) (filter f s)=true. Proof. auto with set. Qed. Lemma partition_filter_2: forall s, equal (snd (partition f s)) (filter (fun x => negb (f x)) s)=true. Proof. auto with set. Qed. Lemma filter_add_1 : forall s x, f x = true -> filter f (add x s) [=] add x (filter f s). Proof. red; intros; set_iff; do 2 (rewrite filter_iff; auto); set_iff. intuition. rewrite <- H; apply Comp; auto with relations. Qed. Lemma filter_add_2 : forall s x, f x = false -> filter f (add x s) [=] filter f s. Proof. red; intros; do 2 (rewrite filter_iff; auto); set_iff. intuition. assert (f x = f a) by (apply Comp; auto). rewrite H in H1; rewrite H2 in H1; discriminate. Qed. Lemma add_filter_1 : forall s s' x, f x=true -> (Add x s s') -> (Add x (filter f s) (filter f s')). Proof. unfold Add, MP.Add; intros. repeat rewrite filter_iff; auto. rewrite H0; clear H0. intuition. setoid_replace y with x; auto with relations. Qed. Lemma add_filter_2 : forall s s' x, f x=false -> (Add x s s') -> filter f s [=] filter f s'. Proof. unfold Add, MP.Add, Equal; intros. repeat rewrite filter_iff; auto. rewrite H0; clear H0. intuition. setoid_replace x with a in H; auto. congruence. Qed. Lemma union_filter: forall f g, Proper (E.eq==>Logic.eq) f -> Proper (E.eq==>Logic.eq) g -> forall s, union (filter f s) (filter g s) [=] filter (fun x=>orb (f x) (g x)) s. Proof. clear Comp' Comp f. intros. assert (Proper (E.eq==>Logic.eq) (fun x => orb (f x) (g x))). repeat red; intros. rewrite (H x y H1); rewrite (H0 x y H1); auto. unfold Equal; intros; set_iff; repeat rewrite filter_iff; auto. assert (f a || g a = true <-> f a = true \/ g a = true). split; auto with bool. intro H3; destruct (orb_prop _ _ H3); auto. tauto. Qed. Lemma filter_union: forall s s', filter f (union s s') [=] union (filter f s) (filter f s'). Proof. unfold Equal; intros; set_iff; repeat rewrite filter_iff; auto; set_iff; tauto. Qed. (** Properties of [for_all] *) Lemma for_all_mem_1: forall s, (forall x, (mem x s)=true->(f x)=true) -> (for_all f s)=true. Proof. intros. rewrite for_all_filter; auto. rewrite is_empty_equal_empty. apply equal_mem_1;intros. rewrite filter_b; auto. rewrite empty_mem. generalize (H a); case (mem a s);intros;auto. rewrite H0;auto. Qed. Lemma for_all_mem_2: forall s, (for_all f s)=true -> forall x,(mem x s)=true -> (f x)=true. Proof. intros. rewrite for_all_filter in H; auto. rewrite is_empty_equal_empty in H. generalize (equal_mem_2 _ _ H x). rewrite filter_b; auto. rewrite empty_mem. rewrite H0; simpl;intros. rewrite <- negb_false_iff; auto. Qed. Lemma for_all_mem_3: forall s x,(mem x s)=true -> (f x)=false -> (for_all f s)=false. Proof. intros. apply (bool_eq_ind (for_all f s));intros;auto. rewrite for_all_filter in H1; auto. rewrite is_empty_equal_empty in H1. generalize (equal_mem_2 _ _ H1 x). rewrite filter_b; auto. rewrite empty_mem. rewrite H. rewrite H0. simpl;auto. Qed. Lemma for_all_mem_4: forall s, for_all f s=false -> {x:elt | mem x s=true /\ f x=false}. Proof. intros. rewrite for_all_filter in H; auto. destruct (choose_mem_3 _ H) as (x,(H0,H1));intros. exists x. rewrite filter_b in H1; auto. elim (andb_prop _ _ H1). split;auto. rewrite <- negb_true_iff; auto. Qed. (** Properties of [exists] *) Lemma for_all_exists: forall s, exists_ f s = negb (for_all (fun x =>negb (f x)) s). Proof. intros. rewrite for_all_b; auto. rewrite exists_b; auto. induction (elements s); simpl; auto. destruct (f a); simpl; auto. Qed. End Bool. Section Bool'. Variable f:elt->bool. Variable Comp: Proper (E.eq==>Logic.eq) f. Let Comp' : Proper (E.eq==>Logic.eq) (fun x => negb (f x)). Proof. repeat red; intros; f_equal; auto. Qed. Lemma exists_mem_1: forall s, (forall x, mem x s=true->f x=false) -> exists_ f s=false. Proof. intros. rewrite for_all_exists; auto. rewrite for_all_mem_1;auto with bool. intros;generalize (H x H0);intros. rewrite negb_true_iff; auto. Qed. Lemma exists_mem_2: forall s, exists_ f s=false -> forall x, mem x s=true -> f x=false. Proof. intros. rewrite for_all_exists in H; auto. rewrite negb_false_iff in H. rewrite <- negb_true_iff. apply for_all_mem_2 with (2:=H); auto. Qed. Lemma exists_mem_3: forall s x, mem x s=true -> f x=true -> exists_ f s=true. Proof. intros. rewrite for_all_exists; auto. rewrite negb_true_iff. apply for_all_mem_3 with x;auto. rewrite negb_false_iff; auto. Qed. Lemma exists_mem_4: forall s, exists_ f s=true -> {x:elt | (mem x s)=true /\ (f x)=true}. Proof. intros. rewrite for_all_exists in H; auto. rewrite negb_true_iff in H. elim (@for_all_mem_4 (fun x =>negb (f x)) Comp' s);intros;auto. elim p;intros. exists x;split;auto. rewrite <-negb_false_iff; auto. Qed. End Bool'. Section Sum. (** Adding a valuation function on all elements of a set. *) Definition sum (f:elt -> nat)(s:t) := fold (fun x => plus (f x)) s 0. Notation compat_opL := (Proper (E.eq==>Logic.eq==>Logic.eq)). Notation transposeL := (transpose Logic.eq). Lemma sum_plus : forall f g, Proper (E.eq==>Logic.eq) f -> Proper (E.eq==>Logic.eq) g -> forall s, sum (fun x =>f x+g x) s = sum f s + sum g s. Proof. unfold sum. intros f g Hf Hg. assert (fc : compat_opL (fun x:elt =>plus (f x))) by (repeat red; intros; rewrite Hf; auto). assert (ft : transposeL (fun x:elt =>plus (f x))) by (red; intros; omega). assert (gc : compat_opL (fun x:elt => plus (g x))) by (repeat red; intros; rewrite Hg; auto). assert (gt : transposeL (fun x:elt =>plus (g x))) by (red; intros; omega). assert (fgc : compat_opL (fun x:elt =>plus ((f x)+(g x)))) by (repeat red; intros; rewrite Hf,Hg; auto). assert (fgt : transposeL (fun x:elt=>plus ((f x)+(g x)))) by (red; intros; omega). intros s;pattern s; apply set_rec. intros. rewrite <- (fold_equal _ _ _ _ fc ft 0 _ _ H). rewrite <- (fold_equal _ _ _ _ gc gt 0 _ _ H). rewrite <- (fold_equal _ _ _ _ fgc fgt 0 _ _ H); auto. intros; do 3 (rewrite fold_add; auto with *). do 3 rewrite fold_empty;auto. Qed. Lemma sum_filter : forall f : elt -> bool, Proper (E.eq==>Logic.eq) f -> forall s, (sum (fun x => if f x then 1 else 0) s) = (cardinal (filter f s)). Proof. unfold sum; intros f Hf. assert (st : Equivalence (@Logic.eq nat)) by (split; congruence). assert (cc : compat_opL (fun x => plus (if f x then 1 else 0))) by (repeat red; intros; rewrite Hf; auto). assert (ct : transposeL (fun x => plus (if f x then 1 else 0))) by (red; intros; omega). intros s;pattern s; apply set_rec. intros. change elt with E.t. rewrite <- (fold_equal _ _ st _ cc ct 0 _ _ H). apply equal_2 in H; rewrite <- H, <-H0; auto. intros; rewrite (fold_add _ _ st _ cc ct); auto. generalize (@add_filter_1 f Hf s0 (add x s0) x) (@add_filter_2 f Hf s0 (add x s0) x) . assert (~ In x (filter f s0)). intro H1; rewrite (mem_1 (filter_1 Hf H1)) in H; discriminate H. case (f x); simpl; intros. rewrite (MP.cardinal_2 H1 (H2 (eq_refl true) (MP.Add_add s0 x))); auto. rewrite <- (MP.Equal_cardinal (H3 (eq_refl false) (MP.Add_add s0 x))); auto. intros; rewrite fold_empty;auto. rewrite MP.cardinal_1; auto. unfold Empty; intros. rewrite filter_iff; auto; set_iff; tauto. Qed. Lemma fold_compat : forall (A:Type)(eqA:A->A->Prop)(st:Equivalence eqA) (f g:elt->A->A), Proper (E.eq==>eqA==>eqA) f -> transpose eqA f -> Proper (E.eq==>eqA==>eqA) g -> transpose eqA g -> forall (i:A)(s:t),(forall x:elt, (In x s) -> forall y, (eqA (f x y) (g x y))) -> (eqA (fold f s i) (fold g s i)). Proof. intros A eqA st f g fc ft gc gt i. intro s; pattern s; apply set_rec; intros. transitivity (fold f s0 i). apply fold_equal with (eqA:=eqA); auto. rewrite equal_sym; auto. transitivity (fold g s0 i). apply H0; intros; apply H1; auto with set. elim (equal_2 H x); auto with set; intros. apply fold_equal with (eqA:=eqA); auto with set. transitivity (f x (fold f s0 i)). apply fold_add with (eqA:=eqA); auto with set. transitivity (g x (fold f s0 i)); auto with set relations. transitivity (g x (fold g s0 i)); auto with set relations. apply gc; auto with set relations. symmetry; apply fold_add with (eqA:=eqA); auto. do 2 rewrite fold_empty; reflexivity. Qed. Lemma sum_compat : forall f g, Proper (E.eq==>Logic.eq) f -> Proper (E.eq==>Logic.eq) g -> forall s, (forall x, In x s -> f x=g x) -> sum f s=sum g s. intros. unfold sum; apply (@fold_compat _ (@Logic.eq nat)); repeat red; auto with *. Qed. End Sum. End WEqPropertiesOn. (** Now comes variants for self-contained weak sets and for full sets. For these variants, only one argument is necessary. Thanks to the subtyping [WS<=S], the [EqProperties] functor which is meant to be used on modules [(M:S)] can simply be an alias of [WEqProperties]. *) Module WEqProperties (M:WSets) := WEqPropertiesOn M.E M. Module EqProperties := WEqProperties. coq-8.4pl2/theories/MSets/MSetPositive.v0000640000175000001440000010625611776416511017311 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* True | xH, _ => False | xO p, xO q => bits_lt p q | xO _, _ => True | xI p, xI q => bits_lt p q | xI _, _ => False end. Definition lt:=bits_lt. Lemma bits_lt_antirefl : forall x : positive, ~ bits_lt x x. Proof. induction x; simpl; auto. Qed. Lemma bits_lt_trans : forall x y z : positive, bits_lt x y -> bits_lt y z -> bits_lt x z. Proof. induction x; destruct y,z; simpl; eauto; intuition. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. intros x x' Hx y y' Hy. rewrite Hx, Hy; intuition. Qed. Instance lt_strorder : StrictOrder lt. Proof. split; [ exact bits_lt_antirefl | exact bits_lt_trans ]. Qed. Fixpoint compare x y := match x, y with | x~1, y~1 => compare x y | x~1, _ => Gt | x~0, y~0 => compare x y | x~0, _ => Lt | 1, y~1 => Lt | 1, 1 => Eq | 1, y~0 => Gt end. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. unfold eq, lt. induction x; destruct y; try constructor; simpl; auto. destruct (IHx y); subst; auto. destruct (IHx y); subst; auto. Qed. End PositiveOrderedTypeBits. Module PositiveSet <: S with Module E:=PositiveOrderedTypeBits. Module E:=PositiveOrderedTypeBits. Definition elt := positive. Inductive tree := | Leaf : tree | Node : tree -> bool -> tree -> tree. Scheme tree_ind := Induction for tree Sort Prop. Definition t := tree. Definition empty := Leaf. Fixpoint is_empty (m : t) : bool := match m with | Leaf => true | Node l b r => negb b &&& is_empty l &&& is_empty r end. Fixpoint mem (i : positive) (m : t) : bool := match m with | Leaf => false | Node l o r => match i with | 1 => o | i~0 => mem i l | i~1 => mem i r end end. Fixpoint add (i : positive) (m : t) : t := match m with | Leaf => match i with | 1 => Node Leaf true Leaf | i~0 => Node (add i Leaf) false Leaf | i~1 => Node Leaf false (add i Leaf) end | Node l o r => match i with | 1 => Node l true r | i~0 => Node (add i l) o r | i~1 => Node l o (add i r) end end. Definition singleton i := add i empty. (** helper function to avoid creating empty trees that are not leaves *) Definition node l (b: bool) r := if b then Node l b r else match l,r with | Leaf,Leaf => Leaf | _,_ => Node l false r end. Fixpoint remove (i : positive) (m : t) : t := match m with | Leaf => Leaf | Node l o r => match i with | 1 => node l false r | i~0 => node (remove i l) o r | i~1 => node l o (remove i r) end end. Fixpoint union (m m': t) := match m with | Leaf => m' | Node l o r => match m' with | Leaf => m | Node l' o' r' => Node (union l l') (o||o') (union r r') end end. Fixpoint inter (m m': t) := match m with | Leaf => Leaf | Node l o r => match m' with | Leaf => Leaf | Node l' o' r' => node (inter l l') (o&&o') (inter r r') end end. Fixpoint diff (m m': t) := match m with | Leaf => Leaf | Node l o r => match m' with | Leaf => m | Node l' o' r' => node (diff l l') (o&&negb o') (diff r r') end end. Fixpoint equal (m m': t): bool := match m with | Leaf => is_empty m' | Node l o r => match m' with | Leaf => is_empty m | Node l' o' r' => eqb o o' &&& equal l l' &&& equal r r' end end. Fixpoint subset (m m': t): bool := match m with | Leaf => true | Node l o r => match m' with | Leaf => is_empty m | Node l' o' r' => (negb o ||| o') &&& subset l l' &&& subset r r' end end. (** reverses [y] and concatenate it with [x] *) Fixpoint rev_append y x := match y with | 1 => x | y~1 => rev_append y x~1 | y~0 => rev_append y x~0 end. Infix "@" := rev_append (at level 60). Definition rev x := x@1. Section Fold. Variables B : Type. Variable f : positive -> B -> B. (** the additional argument, [i], records the current path, in reverse order (this should be more efficient: we reverse this argument only at present nodes only, rather than at each node of the tree). we also use this convention in all functions below *) Fixpoint xfold (m : t) (v : B) (i : positive) := match m with | Leaf => v | Node l true r => xfold r (f (rev i) (xfold l v i~0)) i~1 | Node l false r => xfold r (xfold l v i~0) i~1 end. Definition fold m i := xfold m i 1. End Fold. Section Quantifiers. Variable f : positive -> bool. Fixpoint xforall (m : t) (i : positive) := match m with | Leaf => true | Node l o r => (negb o ||| f (rev i)) &&& xforall r i~1 &&& xforall l i~0 end. Definition for_all m := xforall m 1. Fixpoint xexists (m : t) (i : positive) := match m with | Leaf => false | Node l o r => (o &&& f (rev i)) ||| xexists r i~1 ||| xexists l i~0 end. Definition exists_ m := xexists m 1. Fixpoint xfilter (m : t) (i : positive) := match m with | Leaf => Leaf | Node l o r => node (xfilter l i~0) (o &&& f (rev i)) (xfilter r i~1) end. Definition filter m := xfilter m 1. Fixpoint xpartition (m : t) (i : positive) := match m with | Leaf => (Leaf,Leaf) | Node l o r => let (lt,lf) := xpartition l i~0 in let (rt,rf) := xpartition r i~1 in if o then let fi := f (rev i) in (node lt fi rt, node lf (negb fi) rf) else (node lt false rt, node lf false rf) end. Definition partition m := xpartition m 1. End Quantifiers. (** uses [a] to accumulate values rather than doing a lot of concatenations *) Fixpoint xelements (m : t) (i : positive) (a: list positive) := match m with | Leaf => a | Node l false r => xelements l i~0 (xelements r i~1 a) | Node l true r => xelements l i~0 (rev i :: xelements r i~1 a) end. Definition elements (m : t) := xelements m 1 nil. Fixpoint cardinal (m : t) : nat := match m with | Leaf => O | Node l false r => (cardinal l + cardinal r)%nat | Node l true r => S (cardinal l + cardinal r) end. Definition omap (f: elt -> elt) x := match x with | None => None | Some i => Some (f i) end. (** would it be more efficient to use a path like in the above functions ? *) Fixpoint choose (m: t) := match m with | Leaf => None | Node l o r => if o then Some 1 else match choose l with | None => omap xI (choose r) | Some i => Some i~0 end end. Fixpoint min_elt (m: t) := match m with | Leaf => None | Node l o r => match min_elt l with | None => if o then Some 1 else omap xI (min_elt r) | Some i => Some i~0 end end. Fixpoint max_elt (m: t) := match m with | Leaf => None | Node l o r => match max_elt r with | None => if o then Some 1 else omap xO (max_elt l) | Some i => Some i~1 end end. (** lexicographic product, defined using a notation to keep things lazy *) Notation lex u v := match u with Eq => v | Lt => Lt | Gt => Gt end. Definition compare_bool a b := match a,b with | false, true => Lt | true, false => Gt | _,_ => Eq end. Fixpoint compare (m m': t): comparison := match m,m' with | Leaf,_ => if is_empty m' then Eq else Lt | _,Leaf => if is_empty m then Eq else Gt | Node l o r,Node l' o' r' => lex (compare_bool o o') (lex (compare l l') (compare r r')) end. Definition In i t := mem i t = true. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, In x s /\ P x. Notation "s [=] t" := (Equal s t) (at level 70, no associativity). Notation "s [<=] t" := (Subset s t) (at level 70, no associativity). Definition eq := Equal. Definition lt m m' := compare m m' = Lt. (** Specification of [In] *) Instance In_compat : Proper (E.eq==>Logic.eq==>iff) In. Proof. intros s s' Hs x x' Hx. rewrite Hs, Hx; intuition. Qed. (** Specification of [eq] *) Local Instance eq_equiv : Equivalence eq. Proof. firstorder. Qed. (** Specification of [mem] *) Lemma mem_spec: forall s x, mem x s = true <-> In x s. Proof. unfold In. intuition. Qed. (** Additional lemmas for mem *) Lemma mem_Leaf: forall x, mem x Leaf = false. Proof. destruct x; trivial. Qed. (** Specification of [empty] *) Lemma empty_spec : Empty empty. Proof. unfold Empty, In. intro. rewrite mem_Leaf. discriminate. Qed. (** Specification of node *) Lemma mem_node: forall x l o r, mem x (node l o r) = mem x (Node l o r). Proof. intros x l o r. case o; trivial. destruct l; trivial. destruct r; trivial. symmetry. destruct x. apply mem_Leaf. apply mem_Leaf. reflexivity. Qed. Local Opaque node. (** Specification of [is_empty] *) Lemma is_empty_spec: forall s, is_empty s = true <-> Empty s. Proof. unfold Empty, In. induction s as [|l IHl o r IHr]; simpl. setoid_rewrite mem_Leaf. firstorder. rewrite <- 2andb_lazy_alt, 2andb_true_iff, IHl, IHr. clear IHl IHr. destruct o; simpl; split. intuition discriminate. intro H. elim (H 1). reflexivity. intros H [a|a|]; apply H || intro; discriminate. intro H. split. split. reflexivity. intro a. apply (H a~0). intro a. apply (H a~1). Qed. (** Specification of [subset] *) Lemma subset_Leaf_s: forall s, Leaf [<=] s. Proof. intros s i Hi. apply empty_spec in Hi. elim Hi. Qed. Lemma subset_spec: forall s s', subset s s' = true <-> s [<=] s'. Proof. induction s as [|l IHl o r IHr]; intros [|l' o' r']; simpl. split; intros. apply subset_Leaf_s. reflexivity. split; intros. apply subset_Leaf_s. reflexivity. rewrite <- 2andb_lazy_alt, 2andb_true_iff, 2is_empty_spec. destruct o; simpl. split. intuition discriminate. intro H. elim (@empty_spec 1). apply H. reflexivity. split; intro H. destruct H as [[_ Hl] Hr]. intros [i|i|] Hi. elim (Hr i Hi). elim (Hl i Hi). discriminate. split. split. reflexivity. unfold Empty. intros a H1. apply (@empty_spec (a~0)), H. assumption. unfold Empty. intros a H1. apply (@empty_spec (a~1)), H. assumption. rewrite <- 2andb_lazy_alt, 2andb_true_iff, IHl, IHr. clear. destruct o; simpl. split; intro H. destruct H as [[Ho' Hl] Hr]. rewrite Ho'. intros i Hi. destruct i. apply (Hr i). assumption. apply (Hl i). assumption. assumption. split. split. destruct o'; trivial. specialize (H 1). unfold In in H. simpl in H. apply H. reflexivity. intros i Hi. apply (H i~0). apply Hi. intros i Hi. apply (H i~1). apply Hi. split; intros. intros i Hi. destruct i; destruct H as [[H Hl] Hr]. apply (Hr i). assumption. apply (Hl i). assumption. discriminate Hi. split. split. reflexivity. intros i Hi. apply (H i~0). apply Hi. intros i Hi. apply (H i~1). apply Hi. Qed. (** Specification of [equal] (via subset) *) Lemma equal_subset: forall s s', equal s s' = subset s s' && subset s' s. Proof. induction s as [|l IHl o r IHr]; intros [|l' o' r']; simpl; trivial. destruct o. reflexivity. rewrite andb_comm. reflexivity. rewrite <- 6andb_lazy_alt. rewrite eq_iff_eq_true. rewrite 7andb_true_iff, eqb_true_iff. rewrite IHl, IHr, 2andb_true_iff. clear IHl IHr. intuition subst. destruct o'; reflexivity. destruct o'; reflexivity. destruct o; auto. destruct o'; trivial. Qed. Lemma equal_spec: forall s s', equal s s' = true <-> Equal s s'. Proof. intros. rewrite equal_subset. rewrite andb_true_iff. rewrite 2subset_spec. unfold Equal, Subset. firstorder. Qed. Lemma eq_dec : forall s s', { eq s s' } + { ~ eq s s' }. Proof. unfold eq. intros. case_eq (equal s s'); intro H. left. apply equal_spec, H. right. rewrite <- equal_spec. congruence. Defined. (** (Specified) definition of [compare] *) Lemma lex_Opp: forall u v u' v', u = CompOpp u' -> v = CompOpp v' -> lex u v = CompOpp (lex u' v'). Proof. intros ? ? u' ? -> ->. case u'; reflexivity. Qed. Lemma compare_bool_inv: forall b b', compare_bool b b' = CompOpp (compare_bool b' b). Proof. intros [|] [|]; reflexivity. Qed. Lemma compare_inv: forall s s', compare s s' = CompOpp (compare s' s). Proof. induction s as [|l IHl o r IHr]; destruct s' as [|l' o' r']; trivial. unfold compare. case is_empty; reflexivity. unfold compare. case is_empty; reflexivity. simpl. rewrite compare_bool_inv. case compare_bool; simpl; trivial; apply lex_Opp; auto. Qed. Lemma lex_Eq: forall u v, lex u v = Eq <-> u=Eq /\ v=Eq. Proof. intros u v; destruct u; intuition discriminate. Qed. Lemma compare_bool_Eq: forall b1 b2, compare_bool b1 b2 = Eq <-> eqb b1 b2 = true. Proof. intros [|] [|]; intuition discriminate. Qed. Lemma compare_equal: forall s s', compare s s' = Eq <-> equal s s' = true. Proof. induction s as [|l IHl o r IHr]; destruct s' as [|l' o' r']. simpl. tauto. unfold compare, equal. case is_empty; intuition discriminate. unfold compare, equal. case is_empty; intuition discriminate. simpl. rewrite <- 2andb_lazy_alt, 2andb_true_iff. rewrite <- IHl, <- IHr, <- compare_bool_Eq. clear IHl IHr. rewrite and_assoc. rewrite <- 2lex_Eq. reflexivity. Qed. Lemma compare_gt: forall s s', compare s s' = Gt -> lt s' s. Proof. unfold lt. intros s s'. rewrite compare_inv. case compare; trivial; intros; discriminate. Qed. Lemma compare_eq: forall s s', compare s s' = Eq -> eq s s'. Proof. unfold eq. intros s s'. rewrite compare_equal, equal_spec. trivial. Qed. Lemma compare_spec : forall s s' : t, CompSpec eq lt s s' (compare s s'). Proof. intros. case_eq (compare s s'); intro H; constructor. apply compare_eq, H. assumption. apply compare_gt, H. Qed. Section lt_spec. Inductive ct: comparison -> comparison -> comparison -> Prop := | ct_xxx: forall x, ct x x x | ct_xex: forall x, ct x Eq x | ct_exx: forall x, ct Eq x x | ct_glx: forall x, ct Gt Lt x | ct_lgx: forall x, ct Lt Gt x. Lemma ct_cxe: forall x, ct (CompOpp x) x Eq. Proof. destruct x; constructor. Qed. Lemma ct_xce: forall x, ct x (CompOpp x) Eq. Proof. destruct x; constructor. Qed. Lemma ct_lxl: forall x, ct Lt x Lt. Proof. destruct x; constructor. Qed. Lemma ct_gxg: forall x, ct Gt x Gt. Proof. destruct x; constructor. Qed. Lemma ct_xll: forall x, ct x Lt Lt. Proof. destruct x; constructor. Qed. Lemma ct_xgg: forall x, ct x Gt Gt. Proof. destruct x; constructor. Qed. Local Hint Constructors ct: ct. Local Hint Resolve ct_cxe ct_xce ct_lxl ct_xll ct_gxg ct_xgg: ct. Ltac ct := trivial with ct. Lemma ct_lex: forall u v w u' v' w', ct u v w -> ct u' v' w' -> ct (lex u u') (lex v v') (lex w w'). Proof. intros u v w u' v' w' H H'. inversion_clear H; inversion_clear H'; ct; destruct w; ct; destruct w'; ct. Qed. Lemma ct_compare_bool: forall a b c, ct (compare_bool a b) (compare_bool b c) (compare_bool a c). Proof. intros [|] [|] [|]; constructor. Qed. Lemma compare_x_Leaf: forall s, compare s Leaf = if is_empty s then Eq else Gt. Proof. intros. rewrite compare_inv. simpl. case (is_empty s); reflexivity. Qed. Lemma compare_empty_x: forall a, is_empty a = true -> forall b, compare a b = if is_empty b then Eq else Lt. Proof. induction a as [|l IHl o r IHr]; trivial. destruct o. intro; discriminate. simpl is_empty. rewrite <- andb_lazy_alt, andb_true_iff. intros [Hl Hr]. destruct b as [|l' [|] r']; simpl compare; trivial. rewrite Hl, Hr. trivial. rewrite (IHl Hl), (IHr Hr). simpl. case (is_empty l'); case (is_empty r'); trivial. Qed. Lemma compare_x_empty: forall a, is_empty a = true -> forall b, compare b a = if is_empty b then Eq else Gt. Proof. setoid_rewrite <- compare_x_Leaf. intros. rewrite 2(compare_inv b), (compare_empty_x _ H). reflexivity. Qed. Lemma ct_compare: forall a b c, ct (compare a b) (compare b c) (compare a c). Proof. induction a as [|l IHl o r IHr]; intros s' s''. destruct s' as [|l' o' r']; destruct s'' as [|l'' o'' r'']; ct. rewrite compare_inv. ct. unfold compare at 1. case_eq (is_empty (Node l' o' r')); intro H'. rewrite (compare_empty_x _ H'). ct. unfold compare at 2. case_eq (is_empty (Node l'' o'' r'')); intro H''. rewrite (compare_x_empty _ H''), H'. ct. ct. destruct s' as [|l' o' r']; destruct s'' as [|l'' o'' r'']. ct. unfold compare at 2. rewrite compare_x_Leaf. case_eq (is_empty (Node l o r)); intro H. rewrite (compare_empty_x _ H). ct. case_eq (is_empty (Node l'' o'' r'')); intro H''. rewrite (compare_x_empty _ H''), H. ct. ct. rewrite 2 compare_x_Leaf. case_eq (is_empty (Node l o r)); intro H. rewrite compare_inv, (compare_x_empty _ H). ct. case_eq (is_empty (Node l' o' r')); intro H'. rewrite (compare_x_empty _ H'), H. ct. ct. simpl compare. apply ct_lex. apply ct_compare_bool. apply ct_lex; trivial. Qed. End lt_spec. Instance lt_strorder : StrictOrder lt. Proof. unfold lt. split. intros x H. assert (compare x x = Eq). apply compare_equal, equal_spec. reflexivity. congruence. intros a b c. assert (H := ct_compare a b c). inversion_clear H; trivial; intros; discriminate. Qed. Local Instance compare_compat_1 : Proper (eq==>Logic.eq==>Logic.eq) compare. Proof. intros x x' Hx y y' Hy. subst y'. unfold eq in *. rewrite <- equal_spec, <- compare_equal in *. assert (C:=ct_compare x x' y). rewrite Hx in C. inversion C; auto. Qed. Instance compare_compat : Proper (eq==>eq==>Logic.eq) compare. Proof. intros x x' Hx y y' Hy. rewrite Hx. rewrite compare_inv, Hy, <- compare_inv. reflexivity. Qed. Local Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. intros x x' Hx y y' Hy. unfold lt. rewrite Hx, Hy. intuition. Qed. (** Specification of [add] *) Lemma add_spec: forall s x y, In y (add x s) <-> y=x \/ In y s. Proof. unfold In. intros s x y; revert x y s. induction x; intros [y|y|] [|l o r]; simpl mem; try (rewrite IHx; clear IHx); rewrite ?mem_Leaf; intuition congruence. Qed. (** Specification of [remove] *) Lemma remove_spec: forall s x y, In y (remove x s) <-> In y s /\ y<>x. Proof. unfold In. intros s x y; revert x y s. induction x; intros [y|y|] [|l o r]; simpl remove; rewrite ?mem_node; simpl mem; try (rewrite IHx; clear IHx); rewrite ?mem_Leaf; intuition congruence. Qed. (** Specification of [singleton] *) Lemma singleton_spec : forall x y, In y (singleton x) <-> y=x. Proof. unfold singleton. intros x y. rewrite add_spec. intuition. unfold In in *. rewrite mem_Leaf in *. discriminate. Qed. (** Specification of [union] *) Lemma union_spec: forall s s' x, In x (union s s') <-> In x s \/ In x s'. Proof. unfold In. intros s s' x; revert x s s'. induction x; destruct s; destruct s'; simpl union; simpl mem; try (rewrite IHx; clear IHx); try intuition congruence. apply orb_true_iff. Qed. (** Specification of [inter] *) Lemma inter_spec: forall s s' x, In x (inter s s') <-> In x s /\ In x s'. Proof. unfold In. intros s s' x; revert x s s'. induction x; destruct s; destruct s'; simpl inter; rewrite ?mem_node; simpl mem; try (rewrite IHx; clear IHx); try intuition congruence. apply andb_true_iff. Qed. (** Specification of [diff] *) Lemma diff_spec: forall s s' x, In x (diff s s') <-> In x s /\ ~ In x s'. Proof. unfold In. intros s s' x; revert x s s'. induction x; destruct s; destruct s' as [|l' o' r']; simpl diff; rewrite ?mem_node; simpl mem; try (rewrite IHx; clear IHx); try intuition congruence. rewrite andb_true_iff. destruct o'; intuition discriminate. Qed. (** Specification of [fold] *) Lemma fold_spec: forall s (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (fun a e => f e a) (elements s) i. Proof. unfold fold, elements. intros s A i f. revert s i. set (f' := fun a e => f e a). assert (H: forall s i j acc, fold_left f' acc (xfold f s i j) = fold_left f' (xelements s j acc) i). induction s as [|l IHl o r IHr]; intros; trivial. destruct o; simpl xelements; simpl xfold. rewrite IHr, <- IHl. reflexivity. rewrite IHr. apply IHl. intros. exact (H s i 1 nil). Qed. (** Specification of [cardinal] *) Lemma cardinal_spec: forall s, cardinal s = length (elements s). Proof. unfold elements. assert (H: forall s j acc, (cardinal s + length acc)%nat = length (xelements s j acc)). induction s as [|l IHl b r IHr]; intros j acc; simpl; trivial. destruct b. rewrite <- IHl. simpl. rewrite <- IHr. rewrite <- plus_n_Sm, Plus.plus_assoc. reflexivity. rewrite <- IHl, <- IHr. rewrite Plus.plus_assoc. reflexivity. intros. rewrite <- H. simpl. rewrite Plus.plus_comm. reflexivity. Qed. (** Specification of [filter] *) Lemma xfilter_spec: forall f s x i, In x (xfilter f s i) <-> In x s /\ f (i@x) = true. Proof. intro f. unfold In. induction s as [|l IHl o r IHr]; intros x i; simpl xfilter. rewrite mem_Leaf. intuition discriminate. rewrite mem_node. destruct x; simpl. rewrite IHr. reflexivity. rewrite IHl. reflexivity. rewrite <- andb_lazy_alt. apply andb_true_iff. Qed. Lemma filter_spec: forall s x f, compat_bool E.eq f -> (In x (filter f s) <-> In x s /\ f x = true). Proof. intros. apply xfilter_spec. Qed. (** Specification of [for_all] *) Lemma xforall_spec: forall f s i, xforall f s i = true <-> For_all (fun x => f (i@x) = true) s. Proof. unfold For_all, In. intro f. induction s as [|l IHl o r IHr]; intros i; simpl. setoid_rewrite mem_Leaf. intuition discriminate. rewrite <- 2andb_lazy_alt, <- orb_lazy_alt, 2 andb_true_iff. rewrite IHl, IHr. clear IHl IHr. split. intros [[Hi Hr] Hl] x. destruct x; simpl; intro H. apply Hr, H. apply Hl, H. rewrite H in Hi. assumption. intro H; intuition. specialize (H 1). destruct o. apply H. reflexivity. reflexivity. apply H. assumption. apply H. assumption. Qed. Lemma for_all_spec: forall s f, compat_bool E.eq f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Proof. intros. apply xforall_spec. Qed. (** Specification of [exists] *) Lemma xexists_spec: forall f s i, xexists f s i = true <-> Exists (fun x => f (i@x) = true) s. Proof. unfold Exists, In. intro f. induction s as [|l IHl o r IHr]; intros i; simpl. setoid_rewrite mem_Leaf. firstorder. rewrite <- 2orb_lazy_alt, 2orb_true_iff, <- andb_lazy_alt, andb_true_iff. rewrite IHl, IHr. clear IHl IHr. split. intros [[Hi|[x Hr]]|[x Hl]]. exists 1. exact Hi. exists x~1. exact Hr. exists x~0. exact Hl. intros [[x|x|] H]; eauto. Qed. Lemma exists_spec : forall s f, compat_bool E.eq f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Proof. intros. apply xexists_spec. Qed. (** Specification of [partition] *) Lemma partition_filter : forall s f, partition f s = (filter f s, filter (fun x => negb (f x)) s). Proof. unfold partition, filter. intros s f. generalize 1 as j. induction s as [|l IHl o r IHr]; intro j. reflexivity. destruct o; simpl; rewrite IHl, IHr; reflexivity. Qed. Lemma partition_spec1 : forall s f, compat_bool E.eq f -> Equal (fst (partition f s)) (filter f s). Proof. intros. rewrite partition_filter. reflexivity. Qed. Lemma partition_spec2 : forall s f, compat_bool E.eq f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. intros. rewrite partition_filter. reflexivity. Qed. (** Specification of [elements] *) Notation InL := (InA E.eq). Lemma xelements_spec: forall s j acc y, InL y (xelements s j acc) <-> InL y acc \/ exists x, y=(j@x) /\ mem x s = true. Proof. induction s as [|l IHl o r IHr]; simpl. intros. split; intro H. left. assumption. destruct H as [H|[x [Hx Hx']]]. assumption. elim (empty_spec Hx'). intros j acc y. case o. rewrite IHl. rewrite InA_cons. rewrite IHr. clear IHl IHr. split. intros [[H|[H|[x [-> H]]]]|[x [-> H]]]; eauto. right. exists x~1. auto. right. exists x~0. auto. intros [H|[x [-> H]]]. eauto. destruct x. left. right. right. exists x; auto. right. exists x; auto. left. left. reflexivity. rewrite IHl, IHr. clear IHl IHr. split. intros [[H|[x [-> H]]]|[x [-> H]]]. eauto. right. exists x~1. auto. right. exists x~0. auto. intros [H|[x [-> H]]]. eauto. destruct x. left. right. exists x; auto. right. exists x; auto. discriminate. Qed. Lemma elements_spec1: forall s x, InL x (elements s) <-> In x s. Proof. unfold elements. intros. rewrite xelements_spec. split; [ intros [A|(y & B & C)] | intros IN ]. inversion A. simpl in *. congruence. right. exists x. auto. Qed. Lemma lt_rev_append: forall j x y, E.lt x y -> E.lt (j@x) (j@y). Proof. induction j; intros; simpl; auto. Qed. Lemma elements_spec2: forall s, sort E.lt (elements s). Proof. unfold elements. assert (H: forall s j acc, sort E.lt acc -> (forall x y, In x s -> InL y acc -> E.lt (j@x) y) -> sort E.lt (xelements s j acc)). induction s as [|l IHl o r IHr]; simpl; trivial. intros j acc Hacc Hsacc. destruct o. apply IHl. constructor. apply IHr. apply Hacc. intros x y Hx Hy. apply Hsacc; assumption. case_eq (xelements r j~1 acc). constructor. intros z q H. constructor. assert (H': InL z (xelements r j~1 acc)). rewrite H. constructor. reflexivity. clear H q. rewrite xelements_spec in H'. destruct H' as [Hy|[x [-> Hx]]]. apply (Hsacc 1 z); trivial. reflexivity. simpl. apply lt_rev_append. exact I. intros x y Hx Hy. inversion_clear Hy. rewrite H. simpl. apply lt_rev_append. exact I. rewrite xelements_spec in H. destruct H as [Hy|[z [-> Hy]]]. apply Hsacc; assumption. simpl. apply lt_rev_append. exact I. apply IHl. apply IHr. apply Hacc. intros x y Hx Hy. apply Hsacc; assumption. intros x y Hx Hy. rewrite xelements_spec in Hy. destruct Hy as [Hy|[z [-> Hy]]]. apply Hsacc; assumption. simpl. apply lt_rev_append. exact I. intros. apply H. constructor. intros x y _ H'. inversion H'. Qed. Lemma elements_spec2w: forall s, NoDupA E.eq (elements s). Proof. intro. apply SortA_NoDupA with E.lt; auto with *. apply E.eq_equiv. apply elements_spec2. Qed. (** Specification of [choose] *) Lemma choose_spec1: forall s x, choose s = Some x -> In x s. Proof. induction s as [| l IHl o r IHr]; simpl. intros. discriminate. destruct o. intros x H. injection H; intros; subst. reflexivity. revert IHl. case choose. intros p Hp x H. injection H; intros; subst; clear H. apply Hp. reflexivity. intros _ x. revert IHr. case choose. intros p Hp H. injection H; intros; subst; clear H. apply Hp. reflexivity. intros. discriminate. Qed. Lemma choose_spec2: forall s, choose s = None -> Empty s. Proof. unfold Empty, In. intros s H. induction s as [|l IHl o r IHr]. intro. apply empty_spec. destruct o. discriminate. simpl in H. destruct (choose l). discriminate. destruct (choose r). discriminate. intros [a|a|]. apply IHr. reflexivity. apply IHl. reflexivity. discriminate. Qed. Lemma choose_empty: forall s, is_empty s = true -> choose s = None. Proof. intros s Hs. case_eq (choose s); trivial. intros p Hp. apply choose_spec1 in Hp. apply is_empty_spec in Hs. elim (Hs _ Hp). Qed. Lemma choose_spec3': forall s s', Equal s s' -> choose s = choose s'. Proof. setoid_rewrite <- equal_spec. induction s as [|l IHl o r IHr]. intros. symmetry. apply choose_empty. assumption. destruct s' as [|l' o' r']. generalize (Node l o r) as s. simpl. intros. apply choose_empty. rewrite equal_spec in H. symmetry in H. rewrite <- equal_spec in H. assumption. simpl. rewrite <- 2andb_lazy_alt, 2andb_true_iff, eqb_true_iff. intros [[<- Hl] Hr]. rewrite (IHl _ Hl), (IHr _ Hr). reflexivity. Qed. Lemma choose_spec3: forall s s' x y, choose s = Some x -> choose s' = Some y -> Equal s s' -> E.eq x y. Proof. intros s s' x y Hx Hy H. apply choose_spec3' in H. congruence. Qed. (** Specification of [min_elt] *) Lemma min_elt_spec1: forall s x, min_elt s = Some x -> In x s. Proof. unfold In. induction s as [| l IHl o r IHr]; simpl. intros. discriminate. intros x. destruct (min_elt l); intros. injection H. intros <-. apply IHl. reflexivity. destruct o; simpl. injection H. intros <-. reflexivity. destruct (min_elt r); simpl in *. injection H. intros <-. apply IHr. reflexivity. discriminate. Qed. Lemma min_elt_spec3: forall s, min_elt s = None -> Empty s. Proof. unfold Empty, In. intros s H. induction s as [|l IHl o r IHr]. intro. apply empty_spec. intros [a|a|]. apply IHr. revert H. clear. simpl. destruct (min_elt r); trivial. case min_elt; intros; try discriminate. destruct o; discriminate. apply IHl. revert H. clear. simpl. destruct (min_elt l); trivial. intro; discriminate. revert H. clear. simpl. case min_elt; intros; try discriminate. destruct o; discriminate. Qed. Lemma min_elt_spec2: forall s x y, min_elt s = Some x -> In y s -> ~ E.lt y x. Proof. unfold In. induction s as [|l IHl o r IHr]; intros x y H H'. discriminate. simpl in H. case_eq (min_elt l). intros p Hp. rewrite Hp in H. injection H; intros <-. destruct y as [z|z|]; simpl; intro; trivial. apply (IHl p z); trivial. intro Hp; rewrite Hp in H. apply min_elt_spec3 in Hp. destruct o. injection H. intros <- Hl. clear H. destruct y as [z|z|]; simpl; trivial. elim (Hp _ H'). destruct (min_elt r). injection H. intros <-. clear H. destruct y as [z|z|]. apply (IHr p z); trivial. elim (Hp _ H'). discriminate. discriminate. Qed. (** Specification of [max_elt] *) Lemma max_elt_spec1: forall s x, max_elt s = Some x -> In x s. Proof. unfold In. induction s as [| l IHl o r IHr]; simpl. intros. discriminate. intros x. destruct (max_elt r); intros. injection H. intros <-. apply IHr. reflexivity. destruct o; simpl. injection H. intros <-. reflexivity. destruct (max_elt l); simpl in *. injection H. intros <-. apply IHl. reflexivity. discriminate. Qed. Lemma max_elt_spec3: forall s, max_elt s = None -> Empty s. Proof. unfold Empty, In. intros s H. induction s as [|l IHl o r IHr]. intro. apply empty_spec. intros [a|a|]. apply IHr. revert H. clear. simpl. destruct (max_elt r); trivial. intro; discriminate. apply IHl. revert H. clear. simpl. destruct (max_elt l); trivial. case max_elt; intros; try discriminate. destruct o; discriminate. revert H. clear. simpl. case max_elt; intros; try discriminate. destruct o; discriminate. Qed. Lemma max_elt_spec2: forall s x y, max_elt s = Some x -> In y s -> ~ E.lt x y. Proof. unfold In. induction s as [|l IHl o r IHr]; intros x y H H'. discriminate. simpl in H. case_eq (max_elt r). intros p Hp. rewrite Hp in H. injection H; intros <-. destruct y as [z|z|]; simpl; intro; trivial. apply (IHr p z); trivial. intro Hp; rewrite Hp in H. apply max_elt_spec3 in Hp. destruct o. injection H. intros <- Hl. clear H. destruct y as [z|z|]; simpl; trivial. elim (Hp _ H'). destruct (max_elt l). injection H. intros <-. clear H. destruct y as [z|z|]. elim (Hp _ H'). apply (IHl p z); trivial. discriminate. discriminate. Qed. End PositiveSet. coq-8.4pl2/theories/MSets/MSets.v0000640000175000001440000000155111420077670015734 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false | y :: l => if X.eq_dec x y then true else mem x l end. Fixpoint add (x : elt) (s : t) : t := match s with | nil => x :: nil | y :: l => if X.eq_dec x y then s else y :: add x l end. Definition singleton (x : elt) : t := x :: nil. Fixpoint remove (x : elt) (s : t) : t := match s with | nil => nil | y :: l => if X.eq_dec x y then l else y :: remove x l end. Definition fold (B : Type) (f : elt -> B -> B) (s : t) (i : B) : B := fold_left (flip f) s i. Definition union (s : t) : t -> t := fold add s. Definition diff (s s' : t) : t := fold remove s' s. Definition inter (s s': t) : t := fold (fun x s => if mem x s' then add x s else s) s nil. Definition subset (s s' : t) : bool := is_empty (diff s s'). Definition equal (s s' : t) : bool := andb (subset s s') (subset s' s). Fixpoint filter (f : elt -> bool) (s : t) : t := match s with | nil => nil | x :: l => if f x then x :: filter f l else filter f l end. Fixpoint for_all (f : elt -> bool) (s : t) : bool := match s with | nil => true | x :: l => if f x then for_all f l else false end. Fixpoint exists_ (f : elt -> bool) (s : t) : bool := match s with | nil => false | x :: l => if f x then true else exists_ f l end. Fixpoint partition (f : elt -> bool) (s : t) : t * t := match s with | nil => (nil, nil) | x :: l => let (s1, s2) := partition f l in if f x then (x :: s1, s2) else (s1, x :: s2) end. Definition cardinal (s : t) : nat := length s. Definition elements (s : t) : list elt := s. Definition choose (s : t) : option elt := match s with | nil => None | x::_ => Some x end. End Ops. (** ** Proofs of set operation specifications. *) Module MakeRaw (X:DecidableType) <: WRawSets X. Include Ops X. Section ForNotations. Notation NoDup := (NoDupA X.eq). Notation In := (InA X.eq). (* TODO: modify proofs in order to avoid these hints *) Hint Resolve (@Equivalence_Reflexive _ _ X.eq_equiv). Hint Immediate (@Equivalence_Symmetric _ _ X.eq_equiv). Hint Resolve (@Equivalence_Transitive _ _ X.eq_equiv). Definition IsOk := NoDup. Class Ok (s:t) : Prop := ok : NoDup s. Hint Unfold Ok. Hint Resolve @ok. Instance NoDup_Ok s (nd : NoDup s) : Ok s := { ok := nd }. Ltac inv_ok := match goal with | H:Ok (_ :: _) |- _ => inversion_clear H; inv_ok | H:Ok nil |- _ => clear H; inv_ok | H:NoDup ?l |- _ => change (Ok l) in H; inv_ok | _ => idtac end. Ltac inv := invlist InA; inv_ok. Ltac constructors := repeat constructor. Fixpoint isok l := match l with | nil => true | a::l => negb (mem a l) && isok l end. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, In x s /\ P x. Lemma In_compat : Proper (X.eq==>eq==>iff) In. Proof. repeat red; intros. subst. rewrite H; auto. Qed. Lemma mem_spec : forall s x `{Ok s}, mem x s = true <-> In x s. Proof. induction s; intros. split; intros; inv. discriminate. simpl; destruct (X.eq_dec x a); split; intros; inv; auto. right; rewrite <- IHs; auto. rewrite IHs; auto. Qed. Lemma isok_iff : forall l, Ok l <-> isok l = true. Proof. induction l. intuition. simpl. rewrite andb_true_iff. rewrite negb_true_iff. rewrite <- IHl. split; intros H. inv. split; auto. apply not_true_is_false. rewrite mem_spec; auto. destruct H; constructors; auto. rewrite <- mem_spec; auto; congruence. Qed. Global Instance isok_Ok l : isok l = true -> Ok l | 10. Proof. intros. apply <- isok_iff; auto. Qed. Lemma add_spec : forall (s : t) (x y : elt) {Hs : Ok s}, In y (add x s) <-> X.eq y x \/ In y s. Proof. induction s; simpl; intros. intuition; inv; auto. destruct X.eq_dec; inv; rewrite InA_cons, ?IHs; intuition. left; eauto. inv; auto. Qed. Global Instance add_ok s x `(Ok s) : Ok (add x s). Proof. induction s. simpl; intuition. intros; inv. simpl. destruct X.eq_dec; auto. constructors; auto. intro; inv; auto. rewrite add_spec in *; intuition. Qed. Lemma remove_spec : forall (s : t) (x y : elt) {Hs : Ok s}, In y (remove x s) <-> In y s /\ ~X.eq y x. Proof. induction s; simpl; intros. intuition; inv; auto. destruct X.eq_dec; inv; rewrite !InA_cons, ?IHs; intuition. elim H. setoid_replace a with y; eauto. elim H3. setoid_replace x with y; eauto. elim n. eauto. Qed. Global Instance remove_ok s x `(Ok s) : Ok (remove x s). Proof. induction s; simpl; intros. auto. destruct X.eq_dec; inv; auto. constructors; auto. rewrite remove_spec; intuition. Qed. Lemma singleton_ok : forall x : elt, Ok (singleton x). Proof. unfold singleton; simpl; constructors; auto. intro; inv. Qed. Lemma singleton_spec : forall x y : elt, In y (singleton x) <-> X.eq y x. Proof. unfold singleton; simpl; split; intros. inv; auto. left; auto. Qed. Lemma empty_ok : Ok empty. Proof. unfold empty; constructors. Qed. Lemma empty_spec : Empty empty. Proof. unfold Empty, empty; red; intros; inv. Qed. Lemma is_empty_spec : forall s : t, is_empty s = true <-> Empty s. Proof. unfold Empty; destruct s; simpl; split; intros; auto. intro; inv. discriminate. elim (H e); auto. Qed. Lemma elements_spec1 : forall (s : t) (x : elt), In x (elements s) <-> In x s. Proof. unfold elements; intuition. Qed. Lemma elements_spec2w : forall (s : t) {Hs : Ok s}, NoDup (elements s). Proof. unfold elements; auto. Qed. Lemma fold_spec : forall (s : t) (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (flip f) (elements s) i. Proof. reflexivity. Qed. Global Instance union_ok : forall s s' `(Ok s, Ok s'), Ok (union s s'). Proof. induction s; simpl; auto; intros; inv; unfold flip; auto with *. Qed. Lemma union_spec : forall (s s' : t) (x : elt) {Hs : Ok s} {Hs' : Ok s'}, In x (union s s') <-> In x s \/ In x s'. Proof. induction s; simpl in *; unfold flip; intros; auto; inv. intuition; inv. rewrite IHs, add_spec, InA_cons; intuition. Qed. Global Instance inter_ok s s' `(Ok s, Ok s') : Ok (inter s s'). Proof. unfold inter, fold, flip. set (acc := nil (A:=elt)). assert (Hacc : Ok acc) by constructors. clearbody acc; revert acc Hacc. induction s; simpl; auto; intros. inv. apply IHs; auto. destruct (mem a s'); auto with *. Qed. Lemma inter_spec : forall (s s' : t) (x : elt) {Hs : Ok s} {Hs' : Ok s'}, In x (inter s s') <-> In x s /\ In x s'. Proof. unfold inter, fold, flip; intros. set (acc := nil (A:=elt)) in *. assert (Hacc : Ok acc) by constructors. assert (IFF : (In x s /\ In x s') <-> (In x s /\ In x s') \/ In x acc). intuition; unfold acc in *; inv. rewrite IFF; clear IFF. clearbody acc. revert acc Hacc x s' Hs Hs'. induction s; simpl; intros. intuition; inv. inv. case_eq (mem a s'); intros Hm. rewrite IHs, add_spec, InA_cons; intuition. rewrite mem_spec in Hm; auto. left; split; auto. rewrite H1; auto. rewrite IHs, InA_cons; intuition. rewrite H2, <- mem_spec in H3; auto. congruence. Qed. Global Instance diff_ok : forall s s' `(Ok s, Ok s'), Ok (diff s s'). Proof. unfold diff; intros s s'; revert s. induction s'; simpl; unfold flip; auto; intros. inv; auto with *. Qed. Lemma diff_spec : forall (s s' : t) (x : elt) {Hs : Ok s} {Hs' : Ok s'}, In x (diff s s') <-> In x s /\ ~In x s'. Proof. unfold diff; intros s s'; revert s. induction s'; simpl; unfold flip. intuition; inv. intros. inv. rewrite IHs', remove_spec, InA_cons; intuition. Qed. Lemma subset_spec : forall (s s' : t) {Hs : Ok s} {Hs' : Ok s'}, subset s s' = true <-> Subset s s'. Proof. unfold subset, Subset; intros. rewrite is_empty_spec. unfold Empty; intros. intuition. specialize (H a). rewrite diff_spec in H; intuition. rewrite <- (mem_spec a) in H |- *. destruct (mem a s'); intuition. rewrite diff_spec in H0; intuition. Qed. Lemma equal_spec : forall (s s' : t) {Hs : Ok s} {Hs' : Ok s'}, equal s s' = true <-> Equal s s'. Proof. unfold Equal, equal; intros. rewrite andb_true_iff, !subset_spec. unfold Subset; intuition. rewrite <- H; auto. rewrite H; auto. Qed. Definition choose_spec1 : forall (s : t) (x : elt), choose s = Some x -> In x s. Proof. destruct s; simpl; intros; inversion H; auto. Qed. Definition choose_spec2 : forall s : t, choose s = None -> Empty s. Proof. destruct s; simpl; intros. intros x H0; inversion H0. inversion H. Qed. Lemma cardinal_spec : forall (s : t) {Hs : Ok s}, cardinal s = length (elements s). Proof. auto. Qed. Lemma filter_spec' : forall s x f, In x (filter f s) -> In x s. Proof. induction s; simpl. intuition; inv. intros; destruct (f a); inv; intuition; right; eauto. Qed. Lemma filter_spec : forall (s : t) (x : elt) (f : elt -> bool), Proper (X.eq==>eq) f -> (In x (filter f s) <-> In x s /\ f x = true). Proof. induction s; simpl. intuition; inv. intros. destruct (f a) eqn:E; rewrite ?InA_cons, IHs; intuition. setoid_replace x with a; auto. setoid_replace a with x in E; auto. congruence. Qed. Global Instance filter_ok s f `(Ok s) : Ok (filter f s). Proof. induction s; simpl. auto. intros; inv. case (f a); auto. constructors; auto. contradict H0. eapply filter_spec'; eauto. Qed. Lemma for_all_spec : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Proof. unfold For_all; induction s; simpl. intuition. inv. intros; inv. destruct (f a) eqn:F. rewrite IHs; intuition. inv; auto. setoid_replace x with a; auto. split; intros H'; try discriminate. intros. rewrite <- F, <- (H' a); auto. Qed. Lemma exists_spec : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Proof. unfold Exists; induction s; simpl. split; [discriminate| intros (x & Hx & _); inv]. intros. destruct (f a) eqn:F. split; auto. exists a; auto. rewrite IHs; firstorder. inv. setoid_replace a with x in F; auto; congruence. exists x; auto. Qed. Lemma partition_spec1 : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> Equal (fst (partition f s)) (filter f s). Proof. simple induction s; simpl; auto; unfold Equal. firstorder. intros x l Hrec f Hf. generalize (Hrec f Hf); clear Hrec. case (partition f l); intros s1 s2; simpl; intros. case (f x); simpl; firstorder; inversion H0; intros; firstorder. Qed. Lemma partition_spec2 : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. simple induction s; simpl; auto; unfold Equal. firstorder. intros x l Hrec f Hf. generalize (Hrec f Hf); clear Hrec. case (partition f l); intros s1 s2; simpl; intros. case (f x); simpl; firstorder; inversion H0; intros; firstorder. Qed. Lemma partition_ok1' : forall (s : t) {Hs : Ok s} (f : elt -> bool)(x:elt), In x (fst (partition f s)) -> In x s. Proof. induction s; simpl; auto; intros. inv. generalize (IHs H1 f x). destruct (f a); destruct (partition f s); simpl in *; auto. inversion_clear H; auto. Qed. Lemma partition_ok2' : forall (s : t) {Hs : Ok s} (f : elt -> bool)(x:elt), In x (snd (partition f s)) -> In x s. Proof. induction s; simpl; auto; intros. inv. generalize (IHs H1 f x). destruct (f a); destruct (partition f s); simpl in *; auto. inversion_clear H; auto. Qed. Global Instance partition_ok1 : forall s f `(Ok s), Ok (fst (partition f s)). Proof. simple induction s; simpl. auto. intros x l Hrec f Hs; inv. generalize (@partition_ok1' _ _ f x). generalize (Hrec f H0). case (f x); case (partition f l); simpl; constructors; auto. Qed. Global Instance partition_ok2 : forall s f `(Ok s), Ok (snd (partition f s)). Proof. simple induction s; simpl. auto. intros x l Hrec f Hs; inv. generalize (@partition_ok2' _ _ f x). generalize (Hrec f H0). case (f x); case (partition f l); simpl; constructors; auto. Qed. End ForNotations. Definition In := InA X.eq. Definition eq := Equal. Instance eq_equiv : Equivalence eq := _. End MakeRaw. (** * Encapsulation Now, in order to really provide a functor implementing [S], we need to encapsulate everything into a type of lists without redundancy. *) Module Make (X: DecidableType) <: WSets with Module E := X. Module Raw := MakeRaw X. Include WRaw2Sets X Raw. End Make. coq-8.4pl2/theories/MSets/vo.itarget0000640000175000001440000000027611742067507016527 0ustar notinusersMSetGenTree.vo MSetAVL.vo MSetRBT.vo MSetDecide.vo MSetEqProperties.vo MSetFacts.vo MSetInterface.vo MSetList.vo MSetProperties.vo MSets.vo MSetToFiniteSet.vo MSetWeakList.vo MSetPositive.vocoq-8.4pl2/theories/MSets/MSetRBT.v0000640000175000001440000015156012007751764016134 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* option (elt * t). Axiom remove_min_spec1 : forall s k s', remove_min s = Some (k,s') -> min_elt s = Some k /\ remove k s [=] s'. Axiom remove_min_spec2 : forall s, remove_min s = None -> Empty s. End MSetRemoveMin. (** The type of color annotation. *) Inductive color := Red | Black. Module Color. Definition t := color. End Color. (** * Ops : the pure functions *) Module Ops (X:Orders.OrderedType) <: MSetInterface.Ops X. (** ** Generic trees instantiated with color *) (** We reuse a generic definition of trees where the information parameter is a color. Functions like mem or fold are also provided by this generic functor. *) Include MSetGenTree.Ops X Color. Definition t := tree. Local Notation Rd := (Node Red). Local Notation Bk := (Node Black). (** ** Basic tree *) Definition singleton (k: elt) : tree := Bk Leaf k Leaf. (** ** Changing root color *) Definition makeBlack t := match t with | Leaf => Leaf | Node _ a x b => Bk a x b end. Definition makeRed t := match t with | Leaf => Leaf | Node _ a x b => Rd a x b end. (** ** Balancing *) (** We adapt when one side is not a true red-black tree. Both sides have the same black depth. *) Definition lbal l k r := match l with | Rd (Rd a x b) y c => Rd (Bk a x b) y (Bk c k r) | Rd a x (Rd b y c) => Rd (Bk a x b) y (Bk c k r) | _ => Bk l k r end. Definition rbal l k r := match r with | Rd (Rd b y c) z d => Rd (Bk l k b) y (Bk c z d) | Rd b y (Rd c z d) => Rd (Bk l k b) y (Bk c z d) | _ => Bk l k r end. (** A variant of [rbal], with reverse pattern order. Is it really useful ? Should we always use it ? *) Definition rbal' l k r := match r with | Rd b y (Rd c z d) => Rd (Bk l k b) y (Bk c z d) | Rd (Rd b y c) z d => Rd (Bk l k b) y (Bk c z d) | _ => Bk l k r end. (** Balancing with different black depth. One side is almost a red-black tree, while the other is a true red-black tree, but with black depth + 1. Used in deletion. *) Definition lbalS l k r := match l with | Rd a x b => Rd (Bk a x b) k r | _ => match r with | Bk a y b => rbal' l k (Rd a y b) | Rd (Bk a y b) z c => Rd (Bk l k a) y (rbal' b z (makeRed c)) | _ => Rd l k r (* impossible *) end end. Definition rbalS l k r := match r with | Rd b y c => Rd l k (Bk b y c) | _ => match l with | Bk a x b => lbal (Rd a x b) k r | Rd a x (Bk b y c) => Rd (lbal (makeRed a) x b) y (Bk c k r) | _ => Rd l k r (* impossible *) end end. (** ** Insertion *) Fixpoint ins x s := match s with | Leaf => Rd Leaf x Leaf | Node c l y r => match X.compare x y with | Eq => s | Lt => match c with | Red => Rd (ins x l) y r | Black => lbal (ins x l) y r end | Gt => match c with | Red => Rd l y (ins x r) | Black => rbal l y (ins x r) end end end. Definition add x s := makeBlack (ins x s). (** ** Deletion *) Fixpoint append (l:tree) : tree -> tree := match l with | Leaf => fun r => r | Node lc ll lx lr => fix append_l (r:tree) : tree := match r with | Leaf => l | Node rc rl rx rr => match lc, rc with | Red, Red => let lrl := append lr rl in match lrl with | Rd lr' x rl' => Rd (Rd ll lx lr') x (Rd rl' rx rr) | _ => Rd ll lx (Rd lrl rx rr) end | Black, Black => let lrl := append lr rl in match lrl with | Rd lr' x rl' => Rd (Bk ll lx lr') x (Bk rl' rx rr) | _ => lbalS ll lx (Bk lrl rx rr) end | Black, Red => Rd (append_l rl) rx rr | Red, Black => Rd ll lx (append lr r) end end end. Fixpoint del x t := match t with | Leaf => Leaf | Node _ a y b => match X.compare x y with | Eq => append a b | Lt => match a with | Bk _ _ _ => lbalS (del x a) y b | _ => Rd (del x a) y b end | Gt => match b with | Bk _ _ _ => rbalS a y (del x b) | _ => Rd a y (del x b) end end end. Definition remove x t := makeBlack (del x t). (** ** Removing minimal element *) Fixpoint delmin l x r : (elt * tree) := match l with | Leaf => (x,r) | Node lc ll lx lr => let (k,l') := delmin ll lx lr in match lc with | Black => (k, lbalS l' x r) | Red => (k, Rd l' x r) end end. Definition remove_min t : option (elt * tree) := match t with | Leaf => None | Node _ l x r => let (k,t) := delmin l x r in Some (k, makeBlack t) end. (** ** Tree-ification We rebuild a tree of size [if pred then n-1 else n] as soon as the list [l] has enough elements *) Definition bogus : tree * list elt := (Leaf, nil). Notation treeify_t := (list elt -> tree * list elt). Definition treeify_zero : treeify_t := fun acc => (Leaf,acc). Definition treeify_one : treeify_t := fun acc => match acc with | x::acc => (Rd Leaf x Leaf, acc) | _ => bogus end. Definition treeify_cont (f g : treeify_t) : treeify_t := fun acc => match f acc with | (l, x::acc) => match g acc with | (r, acc) => (Bk l x r, acc) end | _ => bogus end. Fixpoint treeify_aux (pred:bool)(n: positive) : treeify_t := match n with | xH => if pred then treeify_zero else treeify_one | xO n => treeify_cont (treeify_aux pred n) (treeify_aux true n) | xI n => treeify_cont (treeify_aux false n) (treeify_aux pred n) end. Fixpoint plength_aux (l:list elt)(p:positive) := match l with | nil => p | _::l => plength_aux l (Pos.succ p) end. Definition plength l := plength_aux l 1. Definition treeify (l:list elt) := fst (treeify_aux true (plength l) l). (** ** Filtering *) Fixpoint filter_aux (f: elt -> bool) s acc := match s with | Leaf => acc | Node _ l k r => let acc := filter_aux f r acc in if f k then filter_aux f l (k::acc) else filter_aux f l acc end. Definition filter (f: elt -> bool) (s: t) : t := treeify (filter_aux f s nil). Fixpoint partition_aux (f: elt -> bool) s acc1 acc2 := match s with | Leaf => (acc1,acc2) | Node _ sl k sr => let (acc1, acc2) := partition_aux f sr acc1 acc2 in if f k then partition_aux f sl (k::acc1) acc2 else partition_aux f sl acc1 (k::acc2) end. Definition partition (f: elt -> bool) (s:t) : t*t := let (ok,ko) := partition_aux f s nil nil in (treeify ok, treeify ko). (** ** Union, intersection, difference *) (** union of the elements of [l1] and [l2] into a third [acc] list. *) Fixpoint union_list l1 : list elt -> list elt -> list elt := match l1 with | nil => @rev_append _ | x::l1' => fix union_l1 l2 acc := match l2 with | nil => rev_append l1 acc | y::l2' => match X.compare x y with | Eq => union_list l1' l2' (x::acc) | Lt => union_l1 l2' (y::acc) | Gt => union_list l1' l2 (x::acc) end end end. Definition linear_union s1 s2 := treeify (union_list (rev_elements s1) (rev_elements s2) nil). Fixpoint inter_list l1 : list elt -> list elt -> list elt := match l1 with | nil => fun _ acc => acc | x::l1' => fix inter_l1 l2 acc := match l2 with | nil => acc | y::l2' => match X.compare x y with | Eq => inter_list l1' l2' (x::acc) | Lt => inter_l1 l2' acc | Gt => inter_list l1' l2 acc end end end. Definition linear_inter s1 s2 := treeify (inter_list (rev_elements s1) (rev_elements s2) nil). Fixpoint diff_list l1 : list elt -> list elt -> list elt := match l1 with | nil => fun _ acc => acc | x::l1' => fix diff_l1 l2 acc := match l2 with | nil => rev_append l1 acc | y::l2' => match X.compare x y with | Eq => diff_list l1' l2' acc | Lt => diff_l1 l2' acc | Gt => diff_list l1' l2 (x::acc) end end end. Definition linear_diff s1 s2 := treeify (diff_list (rev_elements s1) (rev_elements s2) nil). (** [compare_height] returns: - [Lt] if [height s2] is at least twice [height s1]; - [Gt] if [height s1] is at least twice [height s2]; - [Eq] if heights are approximately equal. Warning: this is not an equivalence relation! but who cares.... *) Definition skip_red t := match t with | Rd t' _ _ => t' | _ => t end. Definition skip_black t := match skip_red t with | Bk t' _ _ => t' | t' => t' end. Fixpoint compare_height (s1x s1 s2 s2x: tree) : comparison := match skip_red s1x, skip_red s1, skip_red s2, skip_red s2x with | Node _ s1x' _ _, Node _ s1' _ _, Node _ s2' _ _, Node _ s2x' _ _ => compare_height (skip_black s2x') s1' s2' (skip_black s2x') | _, Leaf, _, Node _ _ _ _ => Lt | Node _ _ _ _, _, Leaf, _ => Gt | Node _ s1x' _ _, Node _ s1' _ _, Node _ s2' _ _, Leaf => compare_height (skip_black s1x') s1' s2' Leaf | Leaf, Node _ s1' _ _, Node _ s2' _ _, Node _ s2x' _ _ => compare_height Leaf s1' s2' (skip_black s2x') | _, _, _, _ => Eq end. (** When one tree is quite smaller than the other, we simply adds repeatively all its elements in the big one. For trees of comparable height, we rather use [linear_union]. *) Definition union (t1 t2: t) : t := match compare_height t1 t1 t2 t2 with | Lt => fold add t1 t2 | Gt => fold add t2 t1 | Eq => linear_union t1 t2 end. Definition diff (t1 t2: t) : t := match compare_height t1 t1 t2 t2 with | Lt => filter (fun k => negb (mem k t2)) t1 | Gt => fold remove t2 t1 | Eq => linear_diff t1 t2 end. Definition inter (t1 t2: t) : t := match compare_height t1 t1 t2 t2 with | Lt => filter (fun k => mem k t2) t1 | Gt => filter (fun k => mem k t1) t2 | Eq => linear_inter t1 t2 end. End Ops. (** * MakeRaw : the pure functions and their specifications *) Module Type MakeRaw (X:Orders.OrderedType) <: MSetInterface.RawSets X. Include Ops X. (** Generic definition of binary-search-trees and proofs of specifications for generic functions such as mem or fold. *) Include MSetGenTree.Props X Color. Local Notation Rd := (Node Red). Local Notation Bk := (Node Black). Local Hint Immediate MX.eq_sym. Local Hint Unfold In lt_tree gt_tree Ok. Local Hint Constructors InT bst. Local Hint Resolve MX.eq_refl MX.eq_trans MX.lt_trans @ok. Local Hint Resolve lt_leaf gt_leaf lt_tree_node gt_tree_node. Local Hint Resolve lt_tree_not_in lt_tree_trans gt_tree_not_in gt_tree_trans. Local Hint Resolve elements_spec2. (** ** Singleton set *) Lemma singleton_spec x y : InT y (singleton x) <-> X.eq y x. Proof. unfold singleton; intuition_in. Qed. Instance singleton_ok x : Ok (singleton x). Proof. unfold singleton; auto. Qed. (** ** makeBlack, MakeRed *) Lemma makeBlack_spec s x : InT x (makeBlack s) <-> InT x s. Proof. destruct s; simpl; intuition_in. Qed. Lemma makeRed_spec s x : InT x (makeRed s) <-> InT x s. Proof. destruct s; simpl; intuition_in. Qed. Instance makeBlack_ok s `{Ok s} : Ok (makeBlack s). Proof. destruct s; simpl; ok. Qed. Instance makeRed_ok s `{Ok s} : Ok (makeRed s). Proof. destruct s; simpl; ok. Qed. (** ** Generic handling for red-matching and red-red-matching *) Definition isblack t := match t with Bk _ _ _ => True | _ => False end. Definition notblack t := match t with Bk _ _ _ => False | _ => True end. Definition notred t := match t with Rd _ _ _ => False | _ => True end. Definition rcase {A} f g t : A := match t with | Rd a x b => f a x b | _ => g t end. Inductive rspec {A} f g : tree -> A -> Prop := | rred a x b : rspec f g (Rd a x b) (f a x b) | relse t : notred t -> rspec f g t (g t). Fact rmatch {A} f g t : rspec (A:=A) f g t (rcase f g t). Proof. destruct t as [|[|] l x r]; simpl; now constructor. Qed. Definition rrcase {A} f g t : A := match t with | Rd (Rd a x b) y c => f a x b y c | Rd a x (Rd b y c) => f a x b y c | _ => g t end. Notation notredred := (rrcase (fun _ _ _ _ _ => False) (fun _ => True)). Inductive rrspec {A} f g : tree -> A -> Prop := | rrleft a x b y c : rrspec f g (Rd (Rd a x b) y c) (f a x b y c) | rrright a x b y c : rrspec f g (Rd a x (Rd b y c)) (f a x b y c) | rrelse t : notredred t -> rrspec f g t (g t). Fact rrmatch {A} f g t : rrspec (A:=A) f g t (rrcase f g t). Proof. destruct t as [|[|] l x r]; simpl; try now constructor. destruct l as [|[|] ll lx lr], r as [|[|] rl rx rr]; now constructor. Qed. Definition rrcase' {A} f g t : A := match t with | Rd a x (Rd b y c) => f a x b y c | Rd (Rd a x b) y c => f a x b y c | _ => g t end. Fact rrmatch' {A} f g t : rrspec (A:=A) f g t (rrcase' f g t). Proof. destruct t as [|[|] l x r]; simpl; try now constructor. destruct l as [|[|] ll lx lr], r as [|[|] rl rx rr]; now constructor. Qed. (** Balancing operations are instances of generic match *) Fact lbal_match l k r : rrspec (fun a x b y c => Rd (Bk a x b) y (Bk c k r)) (fun l => Bk l k r) l (lbal l k r). Proof. exact (rrmatch _ _ _). Qed. Fact rbal_match l k r : rrspec (fun a x b y c => Rd (Bk l k a) x (Bk b y c)) (fun r => Bk l k r) r (rbal l k r). Proof. exact (rrmatch _ _ _). Qed. Fact rbal'_match l k r : rrspec (fun a x b y c => Rd (Bk l k a) x (Bk b y c)) (fun r => Bk l k r) r (rbal' l k r). Proof. exact (rrmatch' _ _ _). Qed. Fact lbalS_match l x r : rspec (fun a y b => Rd (Bk a y b) x r) (fun l => match r with | Bk a y b => rbal' l x (Rd a y b) | Rd (Bk a y b) z c => Rd (Bk l x a) y (rbal' b z (makeRed c)) | _ => Rd l x r end) l (lbalS l x r). Proof. exact (rmatch _ _ _). Qed. Fact rbalS_match l x r : rspec (fun a y b => Rd l x (Bk a y b)) (fun r => match l with | Bk a y b => lbal (Rd a y b) x r | Rd a y (Bk b z c) => Rd (lbal (makeRed a) y b) z (Bk c x r) | _ => Rd l x r end) r (rbalS l x r). Proof. exact (rmatch _ _ _). Qed. (** ** Balancing for insertion *) Lemma lbal_spec l x r y : InT y (lbal l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. case lbal_match; intuition_in. Qed. Instance lbal_ok l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r) : Ok (lbal l x r). Proof. destruct (lbal_match l x r); ok. Qed. Lemma rbal_spec l x r y : InT y (rbal l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. case rbal_match; intuition_in. Qed. Instance rbal_ok l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r) : Ok (rbal l x r). Proof. destruct (rbal_match l x r); ok. Qed. Lemma rbal'_spec l x r y : InT y (rbal' l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. case rbal'_match; intuition_in. Qed. Instance rbal'_ok l x r `(Ok l, Ok r, lt_tree x l, gt_tree x r) : Ok (rbal' l x r). Proof. destruct (rbal'_match l x r); ok. Qed. Hint Rewrite In_node_iff In_leaf_iff makeRed_spec makeBlack_spec lbal_spec rbal_spec rbal'_spec : rb. Ltac descolor := destruct_all Color.t. Ltac destree t := destruct t as [|[|] ? ? ?]. Ltac autorew := autorewrite with rb. Tactic Notation "autorew" "in" ident(H) := autorewrite with rb in H. (** ** Insertion *) Lemma ins_spec : forall s x y, InT y (ins x s) <-> X.eq y x \/ InT y s. Proof. induct s x. - intuition_in. - intuition_in. setoid_replace y with x; eauto. - descolor; autorew; rewrite IHl; intuition_in. - descolor; autorew; rewrite IHr; intuition_in. Qed. Hint Rewrite ins_spec : rb. Instance ins_ok s x `{Ok s} : Ok (ins x s). Proof. induct s x; auto; descolor; (apply lbal_ok || apply rbal_ok || ok); auto; intros y; autorew; intuition; order. Qed. Lemma add_spec' s x y : InT y (add x s) <-> X.eq y x \/ InT y s. Proof. unfold add. now autorew. Qed. Hint Rewrite add_spec' : rb. Lemma add_spec s x y `{Ok s} : InT y (add x s) <-> X.eq y x \/ InT y s. Proof. apply add_spec'. Qed. Instance add_ok s x `{Ok s} : Ok (add x s). Proof. unfold add; auto_tc. Qed. (** ** Balancing for deletion *) Lemma lbalS_spec l x r y : InT y (lbalS l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. case lbalS_match. - intros; autorew; intuition_in. - clear l. intros l _. destruct r as [|[|] rl rx rr]. * autorew. intuition_in. * destree rl; autorew; intuition_in. * autorew. intuition_in. Qed. Instance lbalS_ok l x r : forall `(Ok l, Ok r, lt_tree x l, gt_tree x r), Ok (lbalS l x r). Proof. case lbalS_match; intros. - ok. - destruct r as [|[|] rl rx rr]. * ok. * destruct rl as [|[|] rll rlx rlr]; intros; ok. + apply rbal'_ok; ok. intros w; autorew; auto. + intros w; autorew. destruct 1 as [Hw|[Hw|Hw]]; try rewrite Hw; eauto. * ok. autorew. apply rbal'_ok; ok. Qed. Lemma rbalS_spec l x r y : InT y (rbalS l x r) <-> X.eq y x \/ InT y l \/ InT y r. Proof. case rbalS_match. - intros; autorew; intuition_in. - intros t _. destruct l as [|[|] ll lx lr]. * autorew. intuition_in. * destruct lr as [|[|] lrl lrx lrr]; autorew; intuition_in. * autorew. intuition_in. Qed. Instance rbalS_ok l x r : forall `(Ok l, Ok r, lt_tree x l, gt_tree x r), Ok (rbalS l x r). Proof. case rbalS_match; intros. - ok. - destruct l as [|[|] ll lx lr]. * ok. * destruct lr as [|[|] lrl lrx lrr]; intros; ok. + apply lbal_ok; ok. intros w; autorew; auto. + intros w; autorew. destruct 1 as [Hw|[Hw|Hw]]; try rewrite Hw; eauto. * ok. apply lbal_ok; ok. Qed. Hint Rewrite lbalS_spec rbalS_spec : rb. (** ** Append for deletion *) Ltac append_tac l r := induction l as [| lc ll _ lx lr IHlr]; [intro r; simpl |induction r as [| rc rl IHrl rx rr _]; [simpl |destruct lc, rc; [specialize (IHlr rl); clear IHrl |simpl; assert (Hr:notred (Bk rl rx rr)) by (simpl; trivial); set (r:=Bk rl rx rr) in *; clearbody r; clear IHrl rl rx rr; specialize (IHlr r) |change (append _ _) with (Rd (append (Bk ll lx lr) rl) rx rr); assert (Hl:notred (Bk ll lx lr)) by (simpl; trivial); set (l:=Bk ll lx lr) in *; clearbody l; clear IHlr ll lx lr |specialize (IHlr rl); clear IHrl]]]. Fact append_rr_match ll lx lr rl rx rr : rspec (fun a x b => Rd (Rd ll lx a) x (Rd b rx rr)) (fun t => Rd ll lx (Rd t rx rr)) (append lr rl) (append (Rd ll lx lr) (Rd rl rx rr)). Proof. exact (rmatch _ _ _). Qed. Fact append_bb_match ll lx lr rl rx rr : rspec (fun a x b => Rd (Bk ll lx a) x (Bk b rx rr)) (fun t => lbalS ll lx (Bk t rx rr)) (append lr rl) (append (Bk ll lx lr) (Bk rl rx rr)). Proof. exact (rmatch _ _ _). Qed. Lemma append_spec l r x : InT x (append l r) <-> InT x l \/ InT x r. Proof. revert r. append_tac l r; autorew; try tauto. - (* Red / Red *) revert IHlr; case append_rr_match; [intros a y b | intros t Ht]; autorew; tauto. - (* Black / Black *) revert IHlr; case append_bb_match; [intros a y b | intros t Ht]; autorew; tauto. Qed. Hint Rewrite append_spec : rb. Lemma append_ok : forall x l r `{Ok l, Ok r}, lt_tree x l -> gt_tree x r -> Ok (append l r). Proof. append_tac l r. - (* Leaf / _ *) trivial. - (* _ / Leaf *) trivial. - (* Red / Red *) intros; inv. assert (IH : Ok (append lr rl)) by (apply IHlr; eauto). clear IHlr. assert (X.lt lx rx) by (transitivity x; eauto). assert (G : gt_tree lx (append lr rl)). { intros w. autorew. destruct 1; [|transitivity x]; eauto. } assert (L : lt_tree rx (append lr rl)). { intros w. autorew. destruct 1; [transitivity x|]; eauto. } revert IH G L; case append_rr_match; intros; ok. - (* Red / Black *) intros; ok. intros w; autorew; destruct 1; eauto. - (* Black / Red *) intros; ok. intros w; autorew; destruct 1; eauto. - (* Black / Black *) intros; inv. assert (IH : Ok (append lr rl)) by (apply IHlr; eauto). clear IHlr. assert (X.lt lx rx) by (transitivity x; eauto). assert (G : gt_tree lx (append lr rl)). { intros w. autorew. destruct 1; [|transitivity x]; eauto. } assert (L : lt_tree rx (append lr rl)). { intros w. autorew. destruct 1; [transitivity x|]; eauto. } revert IH G L; case append_bb_match; intros; ok. apply lbalS_ok; ok. Qed. (** ** Deletion *) Lemma del_spec : forall s x y `{Ok s}, InT y (del x s) <-> InT y s /\ ~X.eq y x. Proof. induct s x. - intuition_in. - autorew; intuition_in. assert (X.lt y x') by eauto. order. assert (X.lt x' y) by eauto. order. order. - destruct l as [|[|] ll lx lr]; autorew; rewrite ?IHl by trivial; intuition_in; order. - destruct r as [|[|] rl rx rr]; autorew; rewrite ?IHr by trivial; intuition_in; order. Qed. Hint Rewrite del_spec : rb. Instance del_ok s x `{Ok s} : Ok (del x s). Proof. induct s x. - trivial. - eapply append_ok; eauto. - assert (lt_tree x' (del x l)). { intro w. autorew; trivial. destruct 1. eauto. } destruct l as [|[|] ll lx lr]; auto_tc. - assert (gt_tree x' (del x r)). { intro w. autorew; trivial. destruct 1. eauto. } destruct r as [|[|] rl rx rr]; auto_tc. Qed. Lemma remove_spec s x y `{Ok s} : InT y (remove x s) <-> InT y s /\ ~X.eq y x. Proof. unfold remove. now autorew. Qed. Hint Rewrite remove_spec : rb. Instance remove_ok s x `{Ok s} : Ok (remove x s). Proof. unfold remove; auto_tc. Qed. (** ** Removing the minimal element *) Lemma delmin_spec l y r c x s' `{O : Ok (Node c l y r)} : delmin l y r = (x,s') -> min_elt (Node c l y r) = Some x /\ del x (Node c l y r) = s'. Proof. revert y r c x s' O. induction l as [|lc ll IH ly lr _]. - simpl. intros y r _ x s' _. injection 1; intros; subst. now rewrite MX.compare_refl. - intros y r c x s' O. simpl delmin. specialize (IH ly lr). destruct delmin as (x0,s0). destruct (IH lc x0 s0); clear IH; [ok|trivial|]. remember (Node lc ll ly lr) as l. simpl min_elt in *. intros E. replace x0 with x in * by (destruct lc; now injection E). split. * subst l; intuition. * assert (X.lt x y). { inversion_clear O. assert (InT x l) by now apply min_elt_spec1. auto. } simpl. case X.compare_spec; try order. destruct lc; injection E; clear E; intros; subst l s0; auto. Qed. Lemma remove_min_spec1 s x s' `{Ok s}: remove_min s = Some (x,s') -> min_elt s = Some x /\ remove x s = s'. Proof. unfold remove_min. destruct s as [|c l y r]; try easy. generalize (delmin_spec l y r c). destruct delmin as (x0,s0). intros D. destruct (D x0 s0) as (->,<-); auto. fold (remove x0 (Node c l y r)). inversion_clear 1; auto. Qed. Lemma remove_min_spec2 s : remove_min s = None -> Empty s. Proof. unfold remove_min. destruct s as [|c l y r]. - easy. - now destruct delmin. Qed. Lemma remove_min_ok (s:t) `{Ok s}: match remove_min s with | Some (_,s') => Ok s' | None => True end. Proof. generalize (remove_min_spec1 s). destruct remove_min as [(x0,s0)|]; auto. intros R. destruct (R x0 s0); auto. subst s0. auto_tc. Qed. (** ** Treeify *) Notation ifpred p n := (if p then pred n else n%nat). Definition treeify_invariant size (f:treeify_t) := forall acc, size <= length acc -> let (t,acc') := f acc in cardinal t = size /\ acc = elements t ++ acc'. Lemma treeify_zero_spec : treeify_invariant 0 treeify_zero. Proof. intro. simpl. auto. Qed. Lemma treeify_one_spec : treeify_invariant 1 treeify_one. Proof. intros [|x acc]; simpl; auto; inversion 1. Qed. Lemma treeify_cont_spec f g size1 size2 size : treeify_invariant size1 f -> treeify_invariant size2 g -> size = S (size1 + size2) -> treeify_invariant size (treeify_cont f g). Proof. intros Hf Hg EQ acc LE. unfold treeify_cont. specialize (Hf acc). destruct (f acc) as (t1,acc1). destruct Hf as (Hf1,Hf2). { transitivity size; trivial. subst. auto with arith. } destruct acc1 as [|x acc1]. { exfalso. revert LE. apply Nat.lt_nge. subst. rewrite <- app_nil_end, <- elements_cardinal; auto with arith. } specialize (Hg acc1). destruct (g acc1) as (t2,acc2). destruct Hg as (Hg1,Hg2). { revert LE. subst. rewrite app_length, <- elements_cardinal. simpl. rewrite Nat.add_succ_r, <- Nat.succ_le_mono. apply Nat.add_le_mono_l. } simpl. rewrite elements_node, app_ass. now subst. Qed. Lemma treeify_aux_spec n (p:bool) : treeify_invariant (ifpred p (Pos.to_nat n)) (treeify_aux p n). Proof. revert p. induction n as [n|n|]; intros p; simpl treeify_aux. - eapply treeify_cont_spec; [ apply (IHn false) | apply (IHn p) | ]. rewrite Pos2Nat.inj_xI. assert (H := Pos2Nat.is_pos n). apply Nat.neq_0_lt_0 in H. destruct p; simpl; intros; rewrite Nat.add_0_r; trivial. now rewrite <- Nat.add_succ_r, Nat.succ_pred; trivial. - eapply treeify_cont_spec; [ apply (IHn p) | apply (IHn true) | ]. rewrite Pos2Nat.inj_xO. assert (H := Pos2Nat.is_pos n). apply Nat.neq_0_lt_0 in H. rewrite <- Nat.add_succ_r, Nat.succ_pred by trivial. destruct p; simpl; intros; rewrite Nat.add_0_r; trivial. symmetry. now apply Nat.add_pred_l. - destruct p; [ apply treeify_zero_spec | apply treeify_one_spec ]. Qed. Lemma plength_aux_spec l p : Pos.to_nat (plength_aux l p) = length l + Pos.to_nat p. Proof. revert p. induction l; simpl; trivial. intros. now rewrite IHl, Pos2Nat.inj_succ, Nat.add_succ_r. Qed. Lemma plength_spec l : Pos.to_nat (plength l) = S (length l). Proof. unfold plength. rewrite plength_aux_spec. apply Nat.add_1_r. Qed. Lemma treeify_elements l : elements (treeify l) = l. Proof. assert (H := treeify_aux_spec (plength l) true l). unfold treeify. destruct treeify_aux as (t,acc); simpl in *. destruct H as (H,H'). { now rewrite plength_spec. } subst l. rewrite plength_spec, app_length, <- elements_cardinal in *. destruct acc. * now rewrite app_nil_r. * exfalso. revert H. simpl. rewrite Nat.add_succ_r, Nat.add_comm. apply Nat.succ_add_discr. Qed. Lemma treeify_spec x l : InT x (treeify l) <-> InA X.eq x l. Proof. intros. now rewrite <- elements_spec1, treeify_elements. Qed. Lemma treeify_ok l : sort X.lt l -> Ok (treeify l). Proof. intros. apply elements_sort_ok. rewrite treeify_elements; auto. Qed. (** ** Filter *) Lemma filter_app A f (l l':list A) : List.filter f (l ++ l') = List.filter f l ++ List.filter f l'. Proof. induction l as [|x l IH]; simpl; trivial. destruct (f x); simpl; now rewrite IH. Qed. Lemma filter_aux_elements s f acc : filter_aux f s acc = List.filter f (elements s) ++ acc. Proof. revert acc. induction s as [|c l IHl x r IHr]; simpl; trivial. intros acc. rewrite elements_node, filter_app. simpl. destruct (f x); now rewrite IHl, IHr, app_ass. Qed. Lemma filter_elements s f : elements (filter f s) = List.filter f (elements s). Proof. unfold filter. now rewrite treeify_elements, filter_aux_elements, app_nil_r. Qed. Lemma filter_spec s x f : Proper (X.eq==>Logic.eq) f -> (InT x (filter f s) <-> InT x s /\ f x = true). Proof. intros Hf. rewrite <- elements_spec1, filter_elements, filter_InA, elements_spec1; now auto_tc. Qed. Instance filter_ok s f `(Ok s) : Ok (filter f s). Proof. apply elements_sort_ok. rewrite filter_elements. apply filter_sort with X.eq; auto_tc. Qed. (** ** Partition *) Lemma partition_aux_spec s f acc1 acc2 : partition_aux f s acc1 acc2 = (filter_aux f s acc1, filter_aux (fun x => negb (f x)) s acc2). Proof. revert acc1 acc2. induction s as [ | c l Hl x r Hr ]; simpl. - trivial. - intros acc1 acc2. destruct (f x); simpl; now rewrite Hr, Hl. Qed. Lemma partition_spec s f : partition f s = (filter f s, filter (fun x => negb (f x)) s). Proof. unfold partition, filter. now rewrite partition_aux_spec. Qed. Lemma partition_spec1 s f : Proper (X.eq==>Logic.eq) f -> Equal (fst (partition f s)) (filter f s). Proof. now rewrite partition_spec. Qed. Lemma partition_spec2 s f : Proper (X.eq==>Logic.eq) f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. now rewrite partition_spec. Qed. Instance partition_ok1 s f `(Ok s) : Ok (fst (partition f s)). Proof. rewrite partition_spec; now apply filter_ok. Qed. Instance partition_ok2 s f `(Ok s) : Ok (snd (partition f s)). Proof. rewrite partition_spec; now apply filter_ok. Qed. (** ** An invariant for binary list functions with accumulator. *) Ltac inA := rewrite ?InA_app_iff, ?InA_cons, ?InA_nil, ?InA_rev in *; auto_tc. Record INV l1 l2 acc : Prop := { l1_sorted : sort X.lt (rev l1); l2_sorted : sort X.lt (rev l2); acc_sorted : sort X.lt acc; l1_lt_acc x y : InA X.eq x l1 -> InA X.eq y acc -> X.lt x y; l2_lt_acc x y : InA X.eq x l2 -> InA X.eq y acc -> X.lt x y}. Local Hint Resolve l1_sorted l2_sorted acc_sorted. Lemma INV_init s1 s2 `(Ok s1, Ok s2) : INV (rev_elements s1) (rev_elements s2) nil. Proof. rewrite !rev_elements_rev. split; rewrite ?rev_involutive; auto; intros; now inA. Qed. Lemma INV_sym l1 l2 acc : INV l1 l2 acc -> INV l2 l1 acc. Proof. destruct 1; now split. Qed. Lemma INV_drop x1 l1 l2 acc : INV (x1 :: l1) l2 acc -> INV l1 l2 acc. Proof. intros (l1s,l2s,accs,l1a,l2a). simpl in *. destruct (sorted_app_inv _ _ l1s) as (U & V & W); auto. split; auto. Qed. Lemma INV_eq x1 x2 l1 l2 acc : INV (x1 :: l1) (x2 :: l2) acc -> X.eq x1 x2 -> INV l1 l2 (x1 :: acc). Proof. intros (U,V,W,X,Y) EQ. simpl in *. destruct (sorted_app_inv _ _ U) as (U1 & U2 & U3); auto. destruct (sorted_app_inv _ _ V) as (V1 & V2 & V3); auto. split; auto. - constructor; auto. apply InA_InfA with X.eq; auto_tc. - intros x y; inA; intros Hx [Hy|Hy]. + apply U3; inA. + apply X; inA. - intros x y; inA; intros Hx [Hy|Hy]. + rewrite Hy, EQ; apply V3; inA. + apply Y; inA. Qed. Lemma INV_lt x1 x2 l1 l2 acc : INV (x1 :: l1) (x2 :: l2) acc -> X.lt x1 x2 -> INV (x1 :: l1) l2 (x2 :: acc). Proof. intros (U,V,W,X,Y) EQ. simpl in *. destruct (sorted_app_inv _ _ U) as (U1 & U2 & U3); auto. destruct (sorted_app_inv _ _ V) as (V1 & V2 & V3); auto. split; auto. - constructor; auto. apply InA_InfA with X.eq; auto_tc. - intros x y; inA; intros Hx [Hy|Hy]. + rewrite Hy; clear Hy. destruct Hx; [order|]. transitivity x1; auto. apply U3; inA. + apply X; inA. - intros x y; inA; intros Hx [Hy|Hy]. + rewrite Hy. apply V3; inA. + apply Y; inA. Qed. Lemma INV_rev l1 l2 acc : INV l1 l2 acc -> Sorted X.lt (rev_append l1 acc). Proof. intros. rewrite rev_append_rev. apply SortA_app with X.eq; eauto with *. intros x y. inA. eapply l1_lt_acc; eauto. Qed. (** ** union *) Lemma union_list_ok l1 l2 acc : INV l1 l2 acc -> sort X.lt (union_list l1 l2 acc). Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]; [intro l2|induction l2 as [|x2 l2 IH2]]; intros acc inv. - eapply INV_rev, INV_sym; eauto. - eapply INV_rev; eauto. - simpl. case X.compare_spec; intro C. * apply IH1. eapply INV_eq; eauto. * apply (IH2 (x2::acc)). eapply INV_lt; eauto. * apply IH1. eapply INV_sym, INV_lt; eauto. now apply INV_sym. Qed. Instance linear_union_ok s1 s2 `(Ok s1, Ok s2) : Ok (linear_union s1 s2). Proof. unfold linear_union. now apply treeify_ok, union_list_ok, INV_init. Qed. Instance fold_add_ok s1 s2 `(Ok s1, Ok s2) : Ok (fold add s1 s2). Proof. rewrite fold_spec, <- fold_left_rev_right. unfold elt in *. induction (rev (elements s1)); simpl; unfold flip in *; auto_tc. Qed. Instance union_ok s1 s2 `(Ok s1, Ok s2) : Ok (union s1 s2). Proof. unfold union. destruct compare_height; auto_tc. Qed. Lemma union_list_spec x l1 l2 acc : InA X.eq x (union_list l1 l2 acc) <-> InA X.eq x l1 \/ InA X.eq x l2 \/ InA X.eq x acc. Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]. - intros l2 acc; simpl. rewrite rev_append_rev. inA. tauto. - induction l2 as [|x2 l2 IH2]; intros acc; simpl. * rewrite rev_append_rev. inA. tauto. * case X.compare_spec; intro C. + rewrite IH1, !InA_cons, C; tauto. + rewrite (IH2 (x2::acc)), !InA_cons. tauto. + rewrite IH1, !InA_cons; tauto. Qed. Lemma linear_union_spec s1 s2 x : InT x (linear_union s1 s2) <-> InT x s1 \/ InT x s2. Proof. unfold linear_union. rewrite treeify_spec, union_list_spec, !rev_elements_rev. rewrite !InA_rev, InA_nil, !elements_spec1 by auto_tc. tauto. Qed. Lemma fold_add_spec s1 s2 x : InT x (fold add s1 s2) <-> InT x s1 \/ InT x s2. Proof. rewrite fold_spec, <- fold_left_rev_right. rewrite <- (elements_spec1 s1), <- InA_rev by auto_tc. unfold elt in *. induction (rev (elements s1)); simpl. - rewrite InA_nil. tauto. - unfold flip. rewrite add_spec', IHl, InA_cons. tauto. Qed. Lemma union_spec' s1 s2 x : InT x (union s1 s2) <-> InT x s1 \/ InT x s2. Proof. unfold union. destruct compare_height. - apply linear_union_spec. - apply fold_add_spec. - rewrite fold_add_spec. tauto. Qed. Lemma union_spec : forall s1 s2 y `{Ok s1, Ok s2}, (InT y (union s1 s2) <-> InT y s1 \/ InT y s2). Proof. intros; apply union_spec'. Qed. (** ** inter *) Lemma inter_list_ok l1 l2 acc : INV l1 l2 acc -> sort X.lt (inter_list l1 l2 acc). Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]; [|induction l2 as [|x2 l2 IH2]]; simpl. - eauto. - eauto. - intros acc inv. case X.compare_spec; intro C. * apply IH1. eapply INV_eq; eauto. * apply (IH2 acc). eapply INV_sym, INV_drop, INV_sym; eauto. * apply IH1. eapply INV_drop; eauto. Qed. Instance linear_inter_ok s1 s2 `(Ok s1, Ok s2) : Ok (linear_inter s1 s2). Proof. unfold linear_inter. now apply treeify_ok, inter_list_ok, INV_init. Qed. Instance inter_ok s1 s2 `(Ok s1, Ok s2) : Ok (inter s1 s2). Proof. unfold inter. destruct compare_height; auto_tc. Qed. Lemma inter_list_spec x l1 l2 acc : sort X.lt (rev l1) -> sort X.lt (rev l2) -> (InA X.eq x (inter_list l1 l2 acc) <-> (InA X.eq x l1 /\ InA X.eq x l2) \/ InA X.eq x acc). Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]. - intros l2 acc; simpl. inA. tauto. - induction l2 as [|x2 l2 IH2]; intros acc. * simpl. inA. tauto. * simpl. intros U V. destruct (sorted_app_inv _ _ U) as (U1 & U2 & U3); auto. destruct (sorted_app_inv _ _ V) as (V1 & V2 & V3); auto. case X.compare_spec; intro C. + rewrite IH1, !InA_cons, C; tauto. + rewrite (IH2 acc); auto. inA. intuition; try order. assert (X.lt x x1) by (apply U3; inA). order. + rewrite IH1; auto. inA. intuition; try order. assert (X.lt x x2) by (apply V3; inA). order. Qed. Lemma linear_inter_spec s1 s2 x `(Ok s1, Ok s2) : InT x (linear_inter s1 s2) <-> InT x s1 /\ InT x s2. Proof. unfold linear_inter. rewrite !rev_elements_rev, treeify_spec, inter_list_spec by (rewrite rev_involutive; auto_tc). rewrite !InA_rev, InA_nil, !elements_spec1 by auto_tc. tauto. Qed. Local Instance mem_proper s `(Ok s) : Proper (X.eq ==> Logic.eq) (fun k => mem k s). Proof. intros x y EQ. apply Bool.eq_iff_eq_true; rewrite !mem_spec; auto. now rewrite EQ. Qed. Lemma inter_spec s1 s2 y `{Ok s1, Ok s2} : InT y (inter s1 s2) <-> InT y s1 /\ InT y s2. Proof. unfold inter. destruct compare_height. - now apply linear_inter_spec. - rewrite filter_spec, mem_spec by auto_tc; tauto. - rewrite filter_spec, mem_spec by auto_tc; tauto. Qed. (** ** difference *) Lemma diff_list_ok l1 l2 acc : INV l1 l2 acc -> sort X.lt (diff_list l1 l2 acc). Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]; [intro l2|induction l2 as [|x2 l2 IH2]]; intros acc inv. - eauto. - unfold diff_list. eapply INV_rev; eauto. - simpl. case X.compare_spec; intro C. * apply IH1. eapply INV_drop, INV_sym, INV_drop, INV_sym; eauto. * apply (IH2 acc). eapply INV_sym, INV_drop, INV_sym; eauto. * apply IH1. eapply INV_sym, INV_lt; eauto. now apply INV_sym. Qed. Instance diff_inter_ok s1 s2 `(Ok s1, Ok s2) : Ok (linear_diff s1 s2). Proof. unfold linear_inter. now apply treeify_ok, diff_list_ok, INV_init. Qed. Instance fold_remove_ok s1 s2 `(Ok s2) : Ok (fold remove s1 s2). Proof. rewrite fold_spec, <- fold_left_rev_right. unfold elt in *. induction (rev (elements s1)); simpl; unfold flip in *; auto_tc. Qed. Instance diff_ok s1 s2 `(Ok s1, Ok s2) : Ok (diff s1 s2). Proof. unfold diff. destruct compare_height; auto_tc. Qed. Lemma diff_list_spec x l1 l2 acc : sort X.lt (rev l1) -> sort X.lt (rev l2) -> (InA X.eq x (diff_list l1 l2 acc) <-> (InA X.eq x l1 /\ ~InA X.eq x l2) \/ InA X.eq x acc). Proof. revert l2 acc. induction l1 as [|x1 l1 IH1]. - intros l2 acc; simpl. inA. tauto. - induction l2 as [|x2 l2 IH2]; intros acc. * intros; simpl. rewrite rev_append_rev. inA. tauto. * simpl. intros U V. destruct (sorted_app_inv _ _ U) as (U1 & U2 & U3); auto. destruct (sorted_app_inv _ _ V) as (V1 & V2 & V3); auto. case X.compare_spec; intro C. + rewrite IH1; auto. f_equiv. inA. intuition; try order. assert (X.lt x x1) by (apply U3; inA). order. + rewrite (IH2 acc); auto. f_equiv. inA. intuition; try order. assert (X.lt x x1) by (apply U3; inA). order. + rewrite IH1; auto. inA. intuition; try order. left; split; auto. destruct 1. order. assert (X.lt x x2) by (apply V3; inA). order. Qed. Lemma linear_diff_spec s1 s2 x `(Ok s1, Ok s2) : InT x (linear_diff s1 s2) <-> InT x s1 /\ ~InT x s2. Proof. unfold linear_diff. rewrite !rev_elements_rev, treeify_spec, diff_list_spec by (rewrite rev_involutive; auto_tc). rewrite !InA_rev, InA_nil, !elements_spec1 by auto_tc. tauto. Qed. Lemma fold_remove_spec s1 s2 x `(Ok s2) : InT x (fold remove s1 s2) <-> InT x s2 /\ ~InT x s1. Proof. rewrite fold_spec, <- fold_left_rev_right. rewrite <- (elements_spec1 s1), <- InA_rev by auto_tc. unfold elt in *. induction (rev (elements s1)); simpl; intros. - rewrite InA_nil. intuition. - unfold flip in *. rewrite remove_spec, IHl, InA_cons. tauto. clear IHl. induction l; simpl; auto_tc. Qed. Lemma diff_spec s1 s2 y `{Ok s1, Ok s2} : InT y (diff s1 s2) <-> InT y s1 /\ ~InT y s2. Proof. unfold diff. destruct compare_height. - now apply linear_diff_spec. - rewrite filter_spec, Bool.negb_true_iff, <- Bool.not_true_iff_false, mem_spec; intuition. intros x1 x2 EQ. f_equal. now apply mem_proper. - now apply fold_remove_spec. Qed. End MakeRaw. (** * Balancing properties We now prove that all operations preserve a red-black invariant, and that trees have hence a logarithmic depth. *) Module BalanceProps(X:Orders.OrderedType)(Import M : MakeRaw X). Local Notation Rd := (Node Red). Local Notation Bk := (Node Black). Import M.MX. (** ** Red-Black invariants *) (** In a red-black tree : - a red node has no red children - the black depth at each node is the same along all paths. The black depth is here an argument of the predicate. *) Inductive rbt : nat -> tree -> Prop := | RB_Leaf : rbt 0 Leaf | RB_Rd n l k r : notred l -> notred r -> rbt n l -> rbt n r -> rbt n (Rd l k r) | RB_Bk n l k r : rbt n l -> rbt n r -> rbt (S n) (Bk l k r). (** A red-red tree is almost a red-black tree, except that it has a _red_ root node which _may_ have red children. Note that a red-red tree is hence non-empty, and all its strict subtrees are red-black. *) Inductive rrt (n:nat) : tree -> Prop := | RR_Rd l k r : rbt n l -> rbt n r -> rrt n (Rd l k r). (** An almost-red-black tree is almost a red-black tree, except that it's permitted to have two red nodes in a row at the very root (only). We implement this notion by saying that a quasi-red-black tree is either a red-black tree or a red-red tree. *) Inductive arbt (n:nat)(t:tree) : Prop := | ARB_RB : rbt n t -> arbt n t | ARB_RR : rrt n t -> arbt n t. (** The main exported invariant : being a red-black tree for some black depth. *) Class Rbt (t:tree) := RBT : exists d, rbt d t. (** ** Basic tactics and results about red-black *) Scheme rbt_ind := Induction for rbt Sort Prop. Local Hint Constructors rbt rrt arbt. Local Hint Extern 0 (notred _) => (exact I). Ltac invrb := intros; invtree rrt; invtree rbt; try contradiction. Ltac desarb := match goal with H:arbt _ _ |- _ => destruct H end. Ltac nonzero n := destruct n as [|n]; [try split; invrb|]. Lemma rr_nrr_rb n t : rrt n t -> notredred t -> rbt n t. Proof. destruct 1 as [l x r Hl Hr]. destruct l, r; descolor; invrb; auto. Qed. Local Hint Resolve rr_nrr_rb. Lemma arb_nrr_rb n t : arbt n t -> notredred t -> rbt n t. Proof. destruct 1; auto. Qed. Lemma arb_nr_rb n t : arbt n t -> notred t -> rbt n t. Proof. destruct 1; destruct t; descolor; invrb; auto. Qed. Local Hint Resolve arb_nrr_rb arb_nr_rb. (** ** A Red-Black tree has indeed a logarithmic depth *) Definition redcarac s := rcase (fun _ _ _ => 1) (fun _ => 0) s. Lemma rb_maxdepth s n : rbt n s -> maxdepth s <= 2*n + redcarac s. Proof. induction 1. - simpl; auto. - replace (redcarac l) with 0 in * by now destree l. replace (redcarac r) with 0 in * by now destree r. simpl maxdepth. simpl redcarac. rewrite Nat.add_succ_r, <- Nat.succ_le_mono. now apply Nat.max_lub. - simpl. rewrite <- Nat.succ_le_mono. apply Nat.max_lub; eapply Nat.le_trans; eauto; [destree l | destree r]; simpl; rewrite !Nat.add_0_r, ?Nat.add_1_r; auto with arith. Qed. Lemma rb_mindepth s n : rbt n s -> n + redcarac s <= mindepth s. Proof. induction 1; simpl. - trivial. - rewrite Nat.add_succ_r. apply -> Nat.succ_le_mono. replace (redcarac l) with 0 in * by now destree l. replace (redcarac r) with 0 in * by now destree r. now apply Nat.min_glb. - apply -> Nat.succ_le_mono. rewrite Nat.add_0_r. apply Nat.min_glb; eauto with arith. Qed. Lemma maxdepth_upperbound s : Rbt s -> maxdepth s <= 2 * log2 (S (cardinal s)). Proof. intros (n,H). eapply Nat.le_trans; [eapply rb_maxdepth; eauto|]. transitivity (2*(n+redcarac s)). - rewrite Nat.mul_add_distr_l. apply Nat.add_le_mono_l. rewrite <- Nat.mul_1_l at 1. apply Nat.mul_le_mono_r. auto with arith. - apply Nat.mul_le_mono_l. transitivity (mindepth s). + now apply rb_mindepth. + apply mindepth_log_cardinal. Qed. Lemma maxdepth_lowerbound s : s<>Leaf -> log2 (cardinal s) < maxdepth s. Proof. apply maxdepth_log_cardinal. Qed. (** ** Singleton *) Lemma singleton_rb x : Rbt (singleton x). Proof. unfold singleton. exists 1; auto. Qed. (** ** [makeBlack] and [makeRed] *) Lemma makeBlack_rb n t : arbt n t -> Rbt (makeBlack t). Proof. destruct t as [|[|] l x r]. - exists 0; auto. - destruct 1; invrb; exists (S n); simpl; auto. - exists n; auto. Qed. Lemma makeRed_rr t n : rbt (S n) t -> notred t -> rrt n (makeRed t). Proof. destruct t as [|[|] l x r]; invrb; simpl; auto. Qed. (** ** Balancing *) Lemma lbal_rb n l k r : arbt n l -> rbt n r -> rbt (S n) (lbal l k r). Proof. case lbal_match; intros; desarb; invrb; auto. Qed. Lemma rbal_rb n l k r : rbt n l -> arbt n r -> rbt (S n) (rbal l k r). Proof. case rbal_match; intros; desarb; invrb; auto. Qed. Lemma rbal'_rb n l k r : rbt n l -> arbt n r -> rbt (S n) (rbal' l k r). Proof. case rbal'_match; intros; desarb; invrb; auto. Qed. Lemma lbalS_rb n l x r : arbt n l -> rbt (S n) r -> notred r -> rbt (S n) (lbalS l x r). Proof. intros Hl Hr Hr'. destruct r as [|[|] rl rx rr]; invrb. clear Hr'. revert Hl. case lbalS_match. - destruct 1; invrb; auto. - intros. apply rbal'_rb; auto. Qed. Lemma lbalS_arb n l x r : arbt n l -> rbt (S n) r -> arbt (S n) (lbalS l x r). Proof. case lbalS_match. - destruct 1; invrb; auto. - clear l. intros l Hl Hl' Hr. destruct r as [|[|] rl rx rr]; invrb. * destruct rl as [|[|] rll rlx rlr]; invrb. right; auto using rbal'_rb, makeRed_rr. * left; apply rbal'_rb; auto. Qed. Lemma rbalS_rb n l x r : rbt (S n) l -> notred l -> arbt n r -> rbt (S n) (rbalS l x r). Proof. intros Hl Hl' Hr. destruct l as [|[|] ll lx lr]; invrb. clear Hl'. revert Hr. case rbalS_match. - destruct 1; invrb; auto. - intros. apply lbal_rb; auto. Qed. Lemma rbalS_arb n l x r : rbt (S n) l -> arbt n r -> arbt (S n) (rbalS l x r). Proof. case rbalS_match. - destruct 2; invrb; auto. - clear r. intros r Hr Hr' Hl. destruct l as [|[|] ll lx lr]; invrb. * destruct lr as [|[|] lrl lrx lrr]; invrb. right; auto using lbal_rb, makeRed_rr. * left; apply lbal_rb; auto. Qed. (** ** Insertion *) (** The next lemmas combine simultaneous results about rbt and arbt. A first solution here: statement with [if ... then ... else] *) Definition ifred s (A B:Prop) := rcase (fun _ _ _ => A) (fun _ => B) s. Lemma ifred_notred s A B : notred s -> (ifred s A B <-> B). Proof. destruct s; descolor; simpl; intuition. Qed. Lemma ifred_or s A B : ifred s A B -> A\/B. Proof. destruct s; descolor; simpl; intuition. Qed. Lemma ins_rr_rb x s n : rbt n s -> ifred s (rrt n (ins x s)) (rbt n (ins x s)). Proof. induction 1 as [ | n l k r | n l k r Hl IHl Hr IHr ]. - simpl; auto. - simpl. rewrite ifred_notred in * by trivial. elim_compare x k; auto. - rewrite ifred_notred by trivial. unfold ins; fold ins. (* simpl is too much here ... *) elim_compare x k. * auto. * apply lbal_rb; trivial. apply ifred_or in IHl; intuition. * apply rbal_rb; trivial. apply ifred_or in IHr; intuition. Qed. Lemma ins_arb x s n : rbt n s -> arbt n (ins x s). Proof. intros H. apply (ins_rr_rb x), ifred_or in H. intuition. Qed. Instance add_rb x s : Rbt s -> Rbt (add x s). Proof. intros (n,H). unfold add. now apply (makeBlack_rb n), ins_arb. Qed. (** ** Deletion *) (** A second approach here: statement with ... /\ ... *) Lemma append_arb_rb n l r : rbt n l -> rbt n r -> (arbt n (append l r)) /\ (notred l -> notred r -> rbt n (append l r)). Proof. revert r n. append_tac l r. - split; auto. - split; auto. - (* Red / Red *) intros n. invrb. case (IHlr n); auto; clear IHlr. case append_rr_match. + intros a x b _ H; split; invrb. assert (rbt n (Rd a x b)) by auto. invrb. auto. + split; invrb; auto. - (* Red / Black *) split; invrb. destruct (IHlr n) as (_,IH); auto. - (* Black / Red *) split; invrb. destruct (IHrl n) as (_,IH); auto. - (* Black / Black *) nonzero n. invrb. destruct (IHlr n) as (IH,_); auto; clear IHlr. revert IH. case append_bb_match. + intros a x b IH; split; destruct IH; invrb; auto. + split; [left | invrb]; auto using lbalS_rb. Qed. (** A third approach : Lemma ... with ... *) Lemma del_arb s x n : rbt (S n) s -> isblack s -> arbt n (del x s) with del_rb s x n : rbt n s -> notblack s -> rbt n (del x s). Proof. { revert n. induct s x; try destruct c; try contradiction; invrb. - apply append_arb_rb; assumption. - assert (IHl' := del_rb l x). clear IHr del_arb del_rb. destruct l as [|[|] ll lx lr]; auto. nonzero n. apply lbalS_arb; auto. - assert (IHr' := del_rb r x). clear IHl del_arb del_rb. destruct r as [|[|] rl rx rr]; auto. nonzero n. apply rbalS_arb; auto. } { revert n. induct s x; try assumption; try destruct c; try contradiction; invrb. - apply append_arb_rb; assumption. - assert (IHl' := del_arb l x). clear IHr del_arb del_rb. destruct l as [|[|] ll lx lr]; auto. nonzero n. destruct n as [|n]; [invrb|]; apply lbalS_rb; auto. - assert (IHr' := del_arb r x). clear IHl del_arb del_rb. destruct r as [|[|] rl rx rr]; auto. nonzero n. apply rbalS_rb; auto. } Qed. Instance remove_rb s x : Rbt s -> Rbt (remove x s). Proof. intros (n,H). unfold remove. destruct s as [|[|] l y r]. - apply (makeBlack_rb n). auto. - apply (makeBlack_rb n). left. apply del_rb; simpl; auto. - nonzero n. apply (makeBlack_rb n). apply del_arb; simpl; auto. Qed. (** ** Treeify *) Definition treeify_rb_invariant size depth (f:treeify_t) := forall acc, size <= length acc -> rbt depth (fst (f acc)) /\ size + length (snd (f acc)) = length acc. Lemma treeify_zero_rb : treeify_rb_invariant 0 0 treeify_zero. Proof. intros acc _; simpl; auto. Qed. Lemma treeify_one_rb : treeify_rb_invariant 1 0 treeify_one. Proof. intros [|x acc]; simpl; auto; inversion 1. Qed. Lemma treeify_cont_rb f g size1 size2 size d : treeify_rb_invariant size1 d f -> treeify_rb_invariant size2 d g -> size = S (size1 + size2) -> treeify_rb_invariant size (S d) (treeify_cont f g). Proof. intros Hf Hg H acc Hacc. unfold treeify_cont. specialize (Hf acc). destruct (f acc) as (l, acc1). simpl in *. destruct Hf as (Hf1, Hf2). { subst. eauto with arith. } destruct acc1 as [|x acc2]; simpl in *. - exfalso. revert Hacc. apply Nat.lt_nge. rewrite H, <- Hf2. auto with arith. - specialize (Hg acc2). destruct (g acc2) as (r, acc3). simpl in *. destruct Hg as (Hg1, Hg2). { revert Hacc. rewrite H, <- Hf2, Nat.add_succ_r, <- Nat.succ_le_mono. apply Nat.add_le_mono_l. } split; auto. now rewrite H, <- Hf2, <- Hg2, Nat.add_succ_r, Nat.add_assoc. Qed. Lemma treeify_aux_rb n : exists d, forall (b:bool), treeify_rb_invariant (ifpred b (Pos.to_nat n)) d (treeify_aux b n). Proof. induction n as [n (d,IHn)|n (d,IHn)| ]. - exists (S d). intros b. eapply treeify_cont_rb; [ apply (IHn false) | apply (IHn b) | ]. rewrite Pos2Nat.inj_xI. assert (H := Pos2Nat.is_pos n). apply Nat.neq_0_lt_0 in H. destruct b; simpl; intros; rewrite Nat.add_0_r; trivial. now rewrite <- Nat.add_succ_r, Nat.succ_pred; trivial. - exists (S d). intros b. eapply treeify_cont_rb; [ apply (IHn b) | apply (IHn true) | ]. rewrite Pos2Nat.inj_xO. assert (H := Pos2Nat.is_pos n). apply Nat.neq_0_lt_0 in H. rewrite <- Nat.add_succ_r, Nat.succ_pred by trivial. destruct b; simpl; intros; rewrite Nat.add_0_r; trivial. symmetry. now apply Nat.add_pred_l. - exists 0; destruct b; [ apply treeify_zero_rb | apply treeify_one_rb ]. Qed. (** The black depth of [treeify l] is actually a log2, but we don't need to mention that. *) Instance treeify_rb l : Rbt (treeify l). Proof. unfold treeify. destruct (treeify_aux_rb (plength l)) as (d,H). exists d. apply H. now rewrite plength_spec. Qed. (** ** Filtering *) Instance filter_rb f s : Rbt (filter f s). Proof. unfold filter; auto_tc. Qed. Instance partition_rb1 f s : Rbt (fst (partition f s)). Proof. unfold partition. destruct partition_aux. simpl. auto_tc. Qed. Instance partition_rb2 f s : Rbt (snd (partition f s)). Proof. unfold partition. destruct partition_aux. simpl. auto_tc. Qed. (** ** Union, intersection, difference *) Instance fold_add_rb s1 s2 : Rbt s2 -> Rbt (fold add s1 s2). Proof. intros. rewrite fold_spec, <- fold_left_rev_right. unfold elt in *. induction (rev (elements s1)); simpl; unfold flip in *; auto_tc. Qed. Instance fold_remove_rb s1 s2 : Rbt s2 -> Rbt (fold remove s1 s2). Proof. intros. rewrite fold_spec, <- fold_left_rev_right. unfold elt in *. induction (rev (elements s1)); simpl; unfold flip in *; auto_tc. Qed. Lemma union_rb s1 s2 : Rbt s1 -> Rbt s2 -> Rbt (union s1 s2). Proof. intros. unfold union, linear_union. destruct compare_height; auto_tc. Qed. Lemma inter_rb s1 s2 : Rbt s1 -> Rbt s2 -> Rbt (inter s1 s2). Proof. intros. unfold inter, linear_inter. destruct compare_height; auto_tc. Qed. Lemma diff_rb s1 s2 : Rbt s1 -> Rbt s2 -> Rbt (diff s1 s2). Proof. intros. unfold diff, linear_diff. destruct compare_height; auto_tc. Qed. End BalanceProps. (** * Final Encapsulation Now, in order to really provide a functor implementing [S], we need to encapsulate everything into a type of binary search trees. They also happen to be well-balanced, but this has no influence on the correctness of operations, so we won't state this here, see [BalanceProps] if you need more than just the MSet interface. *) Module Type MSetInterface_S_Ext := MSetInterface.S <+ MSetRemoveMin. Module Make (X: Orders.OrderedType) <: MSetInterface_S_Ext with Module E := X. Module Raw. Include MakeRaw X. End Raw. Include MSetInterface.Raw2Sets X Raw. Definition opt_ok (x:option (elt * Raw.t)) := match x with Some (_,s) => Raw.Ok s | None => True end. Definition mk_opt_t (x: option (elt * Raw.t))(P: opt_ok x) : option (elt * t) := match x as o return opt_ok o -> option (elt * t) with | Some (k,s') => fun P : Raw.Ok s' => Some (k, Mkt s') | None => fun _ => None end P. Definition remove_min s : option (elt * t) := mk_opt_t (Raw.remove_min (this s)) (Raw.remove_min_ok s). Lemma remove_min_spec1 s x s' : remove_min s = Some (x,s') -> min_elt s = Some x /\ Equal (remove x s) s'. Proof. destruct s as (s,Hs). unfold remove_min, mk_opt_t, min_elt, remove, Equal, In; simpl. generalize (fun x s' => @Raw.remove_min_spec1 s x s' Hs). set (P := Raw.remove_min_ok s). clearbody P. destruct (Raw.remove_min s) as [(x0,s0)|]; try easy. intros H U. injection U. clear U; intros; subst. simpl. destruct (H x s0); auto. subst; intuition. Qed. Lemma remove_min_spec2 s : remove_min s = None -> Empty s. Proof. destruct s as (s,Hs). unfold remove_min, mk_opt_t, Empty, In; simpl. generalize (Raw.remove_min_spec2 s). set (P := Raw.remove_min_ok s). clearbody P. destruct (Raw.remove_min s) as [(x0,s0)|]; now intuition. Qed. End Make. coq-8.4pl2/theories/MSets/MSetGenTree.v0000640000175000001440000007542211742067507017040 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tree -> X.t -> tree -> tree. (** ** The empty set and emptyness test *) Definition empty := Leaf. Definition is_empty t := match t with | Leaf => true | _ => false end. (** ** Membership test *) (** The [mem] function is deciding membership. It exploits the binary search tree invariant to achieve logarithmic complexity. *) Fixpoint mem x t := match t with | Leaf => false | Node _ l k r => match X.compare x k with | Lt => mem x l | Eq => true | Gt => mem x r end end. (** ** Minimal, maximal, arbitrary elements *) Fixpoint min_elt (t : tree) : option elt := match t with | Leaf => None | Node _ Leaf x r => Some x | Node _ l x r => min_elt l end. Fixpoint max_elt (t : tree) : option elt := match t with | Leaf => None | Node _ l x Leaf => Some x | Node _ l x r => max_elt r end. Definition choose := min_elt. (** ** Iteration on elements *) Fixpoint fold {A: Type} (f: elt -> A -> A) (t: tree) (base: A) : A := match t with | Leaf => base | Node _ l x r => fold f r (f x (fold f l base)) end. Fixpoint elements_aux acc s := match s with | Leaf => acc | Node _ l x r => elements_aux (x :: elements_aux acc r) l end. Definition elements := elements_aux nil. Fixpoint rev_elements_aux acc s := match s with | Leaf => acc | Node _ l x r => rev_elements_aux (x :: rev_elements_aux acc l) r end. Definition rev_elements := rev_elements_aux nil. Fixpoint cardinal (s : tree) : nat := match s with | Leaf => 0 | Node _ l _ r => S (cardinal l + cardinal r) end. Fixpoint maxdepth s := match s with | Leaf => 0 | Node _ l _ r => S (max (maxdepth l) (maxdepth r)) end. Fixpoint mindepth s := match s with | Leaf => 0 | Node _ l _ r => S (min (mindepth l) (mindepth r)) end. (** ** Testing universal or existential properties. *) (** We do not use the standard boolean operators of Coq, but lazy ones. *) Fixpoint for_all (f:elt->bool) s := match s with | Leaf => true | Node _ l x r => f x &&& for_all f l &&& for_all f r end. Fixpoint exists_ (f:elt->bool) s := match s with | Leaf => false | Node _ l x r => f x ||| exists_ f l ||| exists_ f r end. (** ** Comparison of trees *) (** The algorithm here has been suggested by Xavier Leroy, and transformed into c.p.s. by Benjamin Grégoire. The original ocaml code (with non-structural recursive calls) has also been formalized (thanks to Function+measure), see [ocaml_compare] in [MSetFullAVL]. The following code with continuations computes dramatically faster in Coq, and should be almost as efficient after extraction. *) (** Enumeration of the elements of a tree. This corresponds to the "samefringe" notion in the litterature. *) Inductive enumeration := | End : enumeration | More : elt -> tree -> enumeration -> enumeration. (** [cons t e] adds the elements of tree [t] on the head of enumeration [e]. *) Fixpoint cons s e : enumeration := match s with | Leaf => e | Node _ l x r => cons l (More x r e) end. (** One step of comparison of elements *) Definition compare_more x1 (cont:enumeration->comparison) e2 := match e2 with | End => Gt | More x2 r2 e2 => match X.compare x1 x2 with | Eq => cont (cons r2 e2) | Lt => Lt | Gt => Gt end end. (** Comparison of left tree, middle element, then right tree *) Fixpoint compare_cont s1 (cont:enumeration->comparison) e2 := match s1 with | Leaf => cont e2 | Node _ l1 x1 r1 => compare_cont l1 (compare_more x1 (compare_cont r1 cont)) e2 end. (** Initial continuation *) Definition compare_end e2 := match e2 with End => Eq | _ => Lt end. (** The complete comparison *) Definition compare s1 s2 := compare_cont s1 compare_end (cons s2 End). Definition equal s1 s2 := match compare s1 s2 with Eq => true | _ => false end. (** ** Subset test *) (** In ocaml, recursive calls are made on "half-trees" such as (Node _ l1 x1 Leaf) and (Node _ Leaf x1 r1). Instead of these non-structural calls, we propose here two specialized functions for these situations. This version should be almost as efficient as the one of ocaml (closures as arguments may slow things a bit), it is simply less compact. The exact ocaml version has also been formalized (thanks to Function+measure), see [ocaml_subset] in [MSetFullAVL]. *) Fixpoint subsetl (subset_l1 : tree -> bool) x1 s2 : bool := match s2 with | Leaf => false | Node _ l2 x2 r2 => match X.compare x1 x2 with | Eq => subset_l1 l2 | Lt => subsetl subset_l1 x1 l2 | Gt => mem x1 r2 &&& subset_l1 s2 end end. Fixpoint subsetr (subset_r1 : tree -> bool) x1 s2 : bool := match s2 with | Leaf => false | Node _ l2 x2 r2 => match X.compare x1 x2 with | Eq => subset_r1 r2 | Lt => mem x1 l2 &&& subset_r1 s2 | Gt => subsetr subset_r1 x1 r2 end end. Fixpoint subset s1 s2 : bool := match s1, s2 with | Leaf, _ => true | Node _ _ _ _, Leaf => false | Node _ l1 x1 r1, Node _ l2 x2 r2 => match X.compare x1 x2 with | Eq => subset l1 l2 &&& subset r1 r2 | Lt => subsetl (subset l1) x1 l2 &&& subset r1 s2 | Gt => subsetr (subset r1) x1 r2 &&& subset l1 s2 end end. End Ops. (** * Props : correctness proofs of these generic operations *) Module Type Props (X:OrderedType)(Info:InfoTyp)(Import M:Ops X Info). (** ** Occurrence in a tree *) Inductive InT (x : elt) : tree -> Prop := | IsRoot : forall c l r y, X.eq x y -> InT x (Node c l y r) | InLeft : forall c l r y, InT x l -> InT x (Node c l y r) | InRight : forall c l r y, InT x r -> InT x (Node c l y r). Definition In := InT. (** ** Some shortcuts *) Definition Equal s s' := forall a : elt, InT a s <-> InT a s'. Definition Subset s s' := forall a : elt, InT a s -> InT a s'. Definition Empty s := forall a : elt, ~ InT a s. Definition For_all (P : elt -> Prop) s := forall x, InT x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, InT x s /\ P x. (** ** Binary search trees *) (** [lt_tree x s]: all elements in [s] are smaller than [x] (resp. greater for [gt_tree]) *) Definition lt_tree x s := forall y, InT y s -> X.lt y x. Definition gt_tree x s := forall y, InT y s -> X.lt x y. (** [bst t] : [t] is a binary search tree *) Inductive bst : tree -> Prop := | BSLeaf : bst Leaf | BSNode : forall c x l r, bst l -> bst r -> lt_tree x l -> gt_tree x r -> bst (Node c l x r). (** [bst] is the (decidable) invariant our trees will have to satisfy. *) Definition IsOk := bst. Class Ok (s:tree) : Prop := ok : bst s. Instance bst_Ok s (Hs : bst s) : Ok s := { ok := Hs }. Fixpoint ltb_tree x s := match s with | Leaf => true | Node _ l y r => match X.compare x y with | Gt => ltb_tree x l && ltb_tree x r | _ => false end end. Fixpoint gtb_tree x s := match s with | Leaf => true | Node _ l y r => match X.compare x y with | Lt => gtb_tree x l && gtb_tree x r | _ => false end end. Fixpoint isok s := match s with | Leaf => true | Node _ l x r => isok l && isok r && ltb_tree x l && gtb_tree x r end. (** ** Known facts about ordered types *) Module Import MX := OrderedTypeFacts X. (** ** Automation and dedicated tactics *) Scheme tree_ind := Induction for tree Sort Prop. Scheme bst_ind := Induction for bst Sort Prop. Local Hint Resolve MX.eq_refl MX.eq_trans MX.lt_trans @ok. Local Hint Immediate MX.eq_sym. Local Hint Unfold In lt_tree gt_tree. Local Hint Constructors InT bst. Local Hint Unfold Ok. (** Automatic treatment of [Ok] hypothesis *) Ltac clear_inversion H := inversion H; clear H; subst. Ltac inv_ok := match goal with | H:Ok (Node _ _ _ _) |- _ => clear_inversion H; inv_ok | H:Ok Leaf |- _ => clear H; inv_ok | H:bst ?x |- _ => change (Ok x) in H; inv_ok | _ => idtac end. (** A tactic to repeat [inversion_clear] on all hyps of the form [(f (Node _ _ _ _))] *) Ltac is_tree_constr c := match c with | Leaf => idtac | Node _ _ _ _ => idtac | _ => fail end. Ltac invtree f := match goal with | H:f ?s |- _ => is_tree_constr s; clear_inversion H; invtree f | H:f _ ?s |- _ => is_tree_constr s; clear_inversion H; invtree f | H:f _ _ ?s |- _ => is_tree_constr s; clear_inversion H; invtree f | _ => idtac end. Ltac inv := inv_ok; invtree InT. Ltac intuition_in := repeat progress (intuition; inv). (** Helper tactic concerning order of elements. *) Ltac order := match goal with | U: lt_tree _ ?s, V: InT _ ?s |- _ => generalize (U _ V); clear U; order | U: gt_tree _ ?s, V: InT _ ?s |- _ => generalize (U _ V); clear U; order | _ => MX.order end. (** [isok] is indeed a decision procedure for [Ok] *) Lemma ltb_tree_iff : forall x s, lt_tree x s <-> ltb_tree x s = true. Proof. induction s as [|c l IHl y r IHr]; simpl. unfold lt_tree; intuition_in. elim_compare x y. split; intros; try discriminate. assert (X.lt y x) by auto. order. split; intros; try discriminate. assert (X.lt y x) by auto. order. rewrite !andb_true_iff, <-IHl, <-IHr. unfold lt_tree; intuition_in; order. Qed. Lemma gtb_tree_iff : forall x s, gt_tree x s <-> gtb_tree x s = true. Proof. induction s as [|c l IHl y r IHr]; simpl. unfold gt_tree; intuition_in. elim_compare x y. split; intros; try discriminate. assert (X.lt x y) by auto. order. rewrite !andb_true_iff, <-IHl, <-IHr. unfold gt_tree; intuition_in; order. split; intros; try discriminate. assert (X.lt x y) by auto. order. Qed. Lemma isok_iff : forall s, Ok s <-> isok s = true. Proof. induction s as [|c l IHl y r IHr]; simpl. intuition_in. rewrite !andb_true_iff, <- IHl, <-IHr, <- ltb_tree_iff, <- gtb_tree_iff. intuition_in. Qed. Instance isok_Ok s : isok s = true -> Ok s | 10. Proof. intros; apply <- isok_iff; auto. Qed. (** ** Basic results about [In] *) Lemma In_1 : forall s x y, X.eq x y -> InT x s -> InT y s. Proof. induction s; simpl; intuition_in; eauto. Qed. Local Hint Immediate In_1. Instance In_compat : Proper (X.eq==>eq==>iff) InT. Proof. apply proper_sym_impl_iff_2; auto with *. repeat red; intros; subst. apply In_1 with x; auto. Qed. Lemma In_node_iff : forall c l x r y, InT y (Node c l x r) <-> InT y l \/ X.eq y x \/ InT y r. Proof. intuition_in. Qed. Lemma In_leaf_iff : forall x, InT x Leaf <-> False. Proof. intuition_in. Qed. (** Results about [lt_tree] and [gt_tree] *) Lemma lt_leaf : forall x : elt, lt_tree x Leaf. Proof. red; inversion 1. Qed. Lemma gt_leaf : forall x : elt, gt_tree x Leaf. Proof. red; inversion 1. Qed. Lemma lt_tree_node : forall (x y : elt) (l r : tree) (i : Info.t), lt_tree x l -> lt_tree x r -> X.lt y x -> lt_tree x (Node i l y r). Proof. unfold lt_tree; intuition_in; order. Qed. Lemma gt_tree_node : forall (x y : elt) (l r : tree) (i : Info.t), gt_tree x l -> gt_tree x r -> X.lt x y -> gt_tree x (Node i l y r). Proof. unfold gt_tree; intuition_in; order. Qed. Local Hint Resolve lt_leaf gt_leaf lt_tree_node gt_tree_node. Lemma lt_tree_not_in : forall (x : elt) (t : tree), lt_tree x t -> ~ InT x t. Proof. intros; intro; order. Qed. Lemma lt_tree_trans : forall x y, X.lt x y -> forall t, lt_tree x t -> lt_tree y t. Proof. eauto. Qed. Lemma gt_tree_not_in : forall (x : elt) (t : tree), gt_tree x t -> ~ InT x t. Proof. intros; intro; order. Qed. Lemma gt_tree_trans : forall x y, X.lt y x -> forall t, gt_tree x t -> gt_tree y t. Proof. eauto. Qed. Instance lt_tree_compat : Proper (X.eq ==> Logic.eq ==> iff) lt_tree. Proof. apply proper_sym_impl_iff_2; auto. intros x x' Hx s s' Hs H y Hy. subst. setoid_rewrite <- Hx; auto. Qed. Instance gt_tree_compat : Proper (X.eq ==> Logic.eq ==> iff) gt_tree. Proof. apply proper_sym_impl_iff_2; auto. intros x x' Hx s s' Hs H y Hy. subst. setoid_rewrite <- Hx; auto. Qed. Local Hint Resolve lt_tree_not_in lt_tree_trans gt_tree_not_in gt_tree_trans. Ltac induct s x := induction s as [|i l IHl x' r IHr]; simpl; intros; [|elim_compare x x'; intros; inv]. Ltac auto_tc := auto with typeclass_instances. Ltac ok := inv; change bst with Ok in *; match goal with | |- Ok (Node _ _ _ _) => constructor; auto_tc; ok | |- lt_tree _ (Node _ _ _ _) => apply lt_tree_node; ok | |- gt_tree _ (Node _ _ _ _) => apply gt_tree_node; ok | _ => eauto with typeclass_instances end. (** ** Empty set *) Lemma empty_spec : Empty empty. Proof. intros x H. inversion H. Qed. Instance empty_ok : Ok empty. Proof. auto. Qed. (** ** Emptyness test *) Lemma is_empty_spec : forall s, is_empty s = true <-> Empty s. Proof. destruct s as [|c r x l]; simpl; auto. - split; auto. intros _ x H. inv. - split; auto. try discriminate. intro H; elim (H x); auto. Qed. (** ** Membership *) Lemma mem_spec : forall s x `{Ok s}, mem x s = true <-> InT x s. Proof. split. - induct s x; now auto. - induct s x; intuition_in; order. Qed. (** ** Minimal and maximal elements *) Functional Scheme min_elt_ind := Induction for min_elt Sort Prop. Functional Scheme max_elt_ind := Induction for max_elt Sort Prop. Lemma min_elt_spec1 s x : min_elt s = Some x -> InT x s. Proof. functional induction (min_elt s); auto; inversion 1; auto. Qed. Lemma min_elt_spec2 s x y `{Ok s} : min_elt s = Some x -> InT y s -> ~ X.lt y x. Proof. revert y. functional induction (min_elt s); try rename _x0 into r; try rename _x2 into l1, _x3 into x1, _x4 into r1. - discriminate. - intros y V W. inversion V; clear V; subst. inv; order. - intros; inv; auto. * assert (X.lt x x0) by (apply H8; apply min_elt_spec1; auto). order. * assert (X.lt x1 x0) by auto. assert (~X.lt x1 x) by auto. order. Qed. Lemma min_elt_spec3 s : min_elt s = None -> Empty s. Proof. functional induction (min_elt s). red; red; inversion 2. inversion 1. intro H0. destruct (IHo H0 _x3); auto. Qed. Lemma max_elt_spec1 s x : max_elt s = Some x -> InT x s. Proof. functional induction (max_elt s); auto; inversion 1; auto. Qed. Lemma max_elt_spec2 s x y `{Ok s} : max_elt s = Some x -> InT y s -> ~ X.lt x y. Proof. revert y. functional induction (max_elt s); try rename _x0 into r; try rename _x2 into l1, _x3 into x1, _x4 into r1. - discriminate. - intros y V W. inversion V; clear V; subst. inv; order. - intros; inv; auto. * assert (X.lt x0 x) by (apply H9; apply max_elt_spec1; auto). order. * assert (X.lt x0 x1) by auto. assert (~X.lt x x1) by auto. order. Qed. Lemma max_elt_spec3 s : max_elt s = None -> Empty s. Proof. functional induction (max_elt s). red; red; inversion 2. inversion 1. intro H0. destruct (IHo H0 _x3); auto. Qed. Lemma choose_spec1 : forall s x, choose s = Some x -> InT x s. Proof. exact min_elt_spec1. Qed. Lemma choose_spec2 : forall s, choose s = None -> Empty s. Proof. exact min_elt_spec3. Qed. Lemma choose_spec3 : forall s s' x x' `{Ok s, Ok s'}, choose s = Some x -> choose s' = Some x' -> Equal s s' -> X.eq x x'. Proof. unfold choose, Equal; intros s s' x x' Hb Hb' Hx Hx' H. assert (~X.lt x x'). apply min_elt_spec2 with s'; auto. rewrite <-H; auto using min_elt_spec1. assert (~X.lt x' x). apply min_elt_spec2 with s; auto. rewrite H; auto using min_elt_spec1. elim_compare x x'; intuition. Qed. (** ** Elements *) Lemma elements_spec1' : forall s acc x, InA X.eq x (elements_aux acc s) <-> InT x s \/ InA X.eq x acc. Proof. induction s as [ | c l Hl x r Hr ]; simpl; auto. intuition. inversion H0. intros. rewrite Hl. destruct (Hr acc x0); clear Hl Hr. intuition; inversion_clear H3; intuition. Qed. Lemma elements_spec1 : forall s x, InA X.eq x (elements s) <-> InT x s. Proof. intros; generalize (elements_spec1' s nil x); intuition. inversion_clear H0. Qed. Lemma elements_spec2' : forall s acc `{Ok s}, sort X.lt acc -> (forall x y : elt, InA X.eq x acc -> InT y s -> X.lt y x) -> sort X.lt (elements_aux acc s). Proof. induction s as [ | c l Hl y r Hr]; simpl; intuition. inv. apply Hl; auto. constructor. apply Hr; auto. eapply InA_InfA; eauto with *. intros. destruct (elements_spec1' r acc y0); intuition. intros. inversion_clear H. order. destruct (elements_spec1' r acc x); intuition eauto. Qed. Lemma elements_spec2 : forall s `(Ok s), sort X.lt (elements s). Proof. intros; unfold elements; apply elements_spec2'; auto. intros; inversion H0. Qed. Local Hint Resolve elements_spec2. Lemma elements_spec2w : forall s `(Ok s), NoDupA X.eq (elements s). Proof. intros. eapply SortA_NoDupA; eauto with *. Qed. Lemma elements_aux_cardinal : forall s acc, (length acc + cardinal s)%nat = length (elements_aux acc s). Proof. simple induction s; simpl; intuition. rewrite <- H. simpl. rewrite <- H0. rewrite (Nat.add_comm (cardinal t0)). now rewrite <- Nat.add_succ_r, Nat.add_assoc. Qed. Lemma elements_cardinal : forall s : tree, cardinal s = length (elements s). Proof. exact (fun s => elements_aux_cardinal s nil). Qed. Definition cardinal_spec (s:tree)(Hs:Ok s) := elements_cardinal s. Lemma elements_app : forall s acc, elements_aux acc s = elements s ++ acc. Proof. induction s; simpl; intros; auto. rewrite IHs1, IHs2. unfold elements; simpl. rewrite 2 IHs1, IHs2, !app_nil_r, !app_ass; auto. Qed. Lemma elements_node c l x r : elements (Node c l x r) = elements l ++ x :: elements r. Proof. unfold elements; simpl. now rewrite !elements_app, !app_nil_r. Qed. Lemma rev_elements_app : forall s acc, rev_elements_aux acc s = rev_elements s ++ acc. Proof. induction s; simpl; intros; auto. rewrite IHs1, IHs2. unfold rev_elements; simpl. rewrite IHs1, 2 IHs2, !app_nil_r, !app_ass; auto. Qed. Lemma rev_elements_node c l x r : rev_elements (Node c l x r) = rev_elements r ++ x :: rev_elements l. Proof. unfold rev_elements; simpl. now rewrite !rev_elements_app, !app_nil_r. Qed. Lemma rev_elements_rev s : rev_elements s = rev (elements s). Proof. induction s as [|c l IHl x r IHr]; trivial. rewrite elements_node, rev_elements_node, IHl, IHr, rev_app_distr. simpl. now rewrite !app_ass. Qed. (** The converse of [elements_spec2], used in MSetRBT *) (* TODO: TO MIGRATE ELSEWHERE... *) Lemma sorted_app_inv l1 l2 : sort X.lt (l1++l2) -> sort X.lt l1 /\ sort X.lt l2 /\ forall x1 x2, InA X.eq x1 l1 -> InA X.eq x2 l2 -> X.lt x1 x2. Proof. induction l1 as [|a1 l1 IHl1]. - simpl; repeat split; auto. intros. now rewrite InA_nil in *. - simpl. inversion_clear 1 as [ | ? ? Hs Hhd ]. destruct (IHl1 Hs) as (H1 & H2 & H3). repeat split. * constructor; auto. destruct l1; simpl in *; auto; inversion_clear Hhd; auto. * trivial. * intros x1 x2 Hx1 Hx2. rewrite InA_cons in Hx1. destruct Hx1. + rewrite H. apply SortA_InfA_InA with (eqA:=X.eq)(l:=l1++l2); auto_tc. rewrite InA_app_iff; auto_tc. + auto. Qed. Lemma elements_sort_ok s : sort X.lt (elements s) -> Ok s. Proof. induction s as [|c l IHl x r IHr]. - auto. - rewrite elements_node. intros H. destruct (sorted_app_inv _ _ H) as (H1 & H2 & H3). inversion_clear H2. constructor; ok. * intros y Hy. apply H3. + now rewrite elements_spec1. + rewrite InA_cons. now left. * intros y Hy. apply SortA_InfA_InA with (eqA:=X.eq)(l:=elements r); auto_tc. now rewrite elements_spec1. Qed. (** ** [for_all] and [exists] *) Lemma for_all_spec s f : Proper (X.eq==>eq) f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Proof. intros Hf; unfold For_all. induction s as [|i l IHl x r IHr]; simpl; auto. - split; intros; inv; auto. - rewrite <- !andb_lazy_alt, !andb_true_iff, IHl, IHr. clear IHl IHr. intuition_in. eauto. Qed. Lemma exists_spec s f : Proper (X.eq==>eq) f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Proof. intros Hf; unfold Exists. induction s as [|i l IHl x r IHr]; simpl; auto. - split. * discriminate. * intros (y,(H,_)); inv. - rewrite <- !orb_lazy_alt, !orb_true_iff, IHl, IHr. clear IHl IHr. split; [intros [[H|(y,(H,H'))]|(y,(H,H'))]|intros (y,(H,H'))]. * exists x; auto. * exists y; auto. * exists y; auto. * inv; [left;left|left;right|right]; try (exists y); eauto. Qed. (** ** Fold *) Lemma fold_spec' {A} (f : elt -> A -> A) (s : tree) (i : A) (acc : list elt) : fold_left (flip f) (elements_aux acc s) i = fold_left (flip f) acc (fold f s i). Proof. revert i acc. induction s as [|c l IHl x r IHr]; simpl; intros; auto. rewrite IHl. simpl. unfold flip at 2. apply IHr. Qed. Lemma fold_spec (s:tree) {A} (i : A) (f : elt -> A -> A) : fold f s i = fold_left (flip f) (elements s) i. Proof. revert i. unfold elements. induction s as [|c l IHl x r IHr]; simpl; intros; auto. rewrite fold_spec'. rewrite IHr. simpl; auto. Qed. (** ** Subset *) Lemma subsetl_spec : forall subset_l1 l1 x1 c1 s2 `{Ok (Node c1 l1 x1 Leaf), Ok s2}, (forall s `{Ok s}, (subset_l1 s = true <-> Subset l1 s)) -> (subsetl subset_l1 x1 s2 = true <-> Subset (Node c1 l1 x1 Leaf) s2 ). Proof. induction s2 as [|c2 l2 IHl2 x2 r2 IHr2]; simpl; intros. unfold Subset; intuition; try discriminate. assert (H': InT x1 Leaf) by auto; inversion H'. specialize (IHl2 H). specialize (IHr2 H). inv. elim_compare x1 x2. rewrite H1 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. assert (X.eq a x2) by order; intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite IHl2 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite <-andb_lazy_alt, andb_true_iff, H1 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. constructor 3. setoid_replace a with x1; auto. rewrite <- mem_spec; auto. rewrite mem_spec; auto. assert (InT x1 (Node c2 l2 x2 r2)) by auto; intuition_in; order. Qed. Lemma subsetr_spec : forall subset_r1 r1 x1 c1 s2, bst (Node c1 Leaf x1 r1) -> bst s2 -> (forall s, bst s -> (subset_r1 s = true <-> Subset r1 s)) -> (subsetr subset_r1 x1 s2 = true <-> Subset (Node c1 Leaf x1 r1) s2). Proof. induction s2 as [|c2 l2 IHl2 x2 r2 IHr2]; simpl; intros. unfold Subset; intuition; try discriminate. assert (H': InT x1 Leaf) by auto; inversion H'. specialize (IHl2 H). specialize (IHr2 H). inv. elim_compare x1 x2. rewrite H1 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. assert (X.eq a x2) by order; intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite <-andb_lazy_alt, andb_true_iff, H1 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. constructor 2. setoid_replace a with x1; auto. rewrite <- mem_spec; auto. rewrite mem_spec; auto. assert (InT x1 (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite IHr2 by auto; clear H1 IHl2 IHr2. unfold Subset. intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. Qed. Lemma subset_spec : forall s1 s2 `{Ok s1, Ok s2}, (subset s1 s2 = true <-> Subset s1 s2). Proof. induction s1 as [|c1 l1 IHl1 x1 r1 IHr1]; simpl; intros. unfold Subset; intuition_in. destruct s2 as [|c2 l2 x2 r2]; simpl; intros. unfold Subset; intuition_in; try discriminate. assert (H': InT x1 Leaf) by auto; inversion H'. inv. elim_compare x1 x2. rewrite <-andb_lazy_alt, andb_true_iff, IHl1, IHr1 by auto. clear IHl1 IHr1. unfold Subset; intuition_in. assert (X.eq a x2) by order; intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite <-andb_lazy_alt, andb_true_iff, IHr1 by auto. rewrite (@subsetl_spec (subset l1) l1 x1 c1) by auto. clear IHl1 IHr1. unfold Subset; intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. rewrite <-andb_lazy_alt, andb_true_iff, IHl1 by auto. rewrite (@subsetr_spec (subset r1) r1 x1 c1) by auto. clear IHl1 IHr1. unfold Subset; intuition_in. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. assert (InT a (Node c2 l2 x2 r2)) by auto; intuition_in; order. Qed. (** ** Comparison *) (** Relations [eq] and [lt] over trees *) Module L := MSetInterface.MakeListOrdering X. Definition eq := Equal. Instance eq_equiv : Equivalence eq. Proof. firstorder. Qed. Lemma eq_Leq : forall s s', eq s s' <-> L.eq (elements s) (elements s'). Proof. unfold eq, Equal, L.eq; intros. setoid_rewrite elements_spec1; firstorder. Qed. Definition lt (s1 s2 : tree) : Prop := exists s1' s2', Ok s1' /\ Ok s2' /\ eq s1 s1' /\ eq s2 s2' /\ L.lt (elements s1') (elements s2'). Instance lt_strorder : StrictOrder lt. Proof. split. intros s (s1 & s2 & B1 & B2 & E1 & E2 & L). assert (eqlistA X.eq (elements s1) (elements s2)). apply SortA_equivlistA_eqlistA with (ltA:=X.lt); auto with *. rewrite <- eq_Leq. transitivity s; auto. symmetry; auto. rewrite H in L. apply (StrictOrder_Irreflexive (elements s2)); auto. intros s1 s2 s3 (s1' & s2' & B1 & B2 & E1 & E2 & L12) (s2'' & s3' & B2' & B3 & E2' & E3 & L23). exists s1', s3'; do 4 (split; trivial). assert (eqlistA X.eq (elements s2') (elements s2'')). apply SortA_equivlistA_eqlistA with (ltA:=X.lt); auto with *. rewrite <- eq_Leq. transitivity s2; auto. symmetry; auto. transitivity (elements s2'); auto. rewrite H; auto. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. intros s1 s2 E12 s3 s4 E34. split. intros (s1' & s3' & B1 & B3 & E1 & E3 & LT). exists s1', s3'; do 2 (split; trivial). split. transitivity s1; auto. symmetry; auto. split; auto. transitivity s3; auto. symmetry; auto. intros (s1' & s3' & B1 & B3 & E1 & E3 & LT). exists s1', s3'; do 2 (split; trivial). split. transitivity s2; auto. split; auto. transitivity s4; auto. Qed. (** Proof of the comparison algorithm *) (** [flatten_e e] returns the list of elements of [e] i.e. the list of elements actually compared *) Fixpoint flatten_e (e : enumeration) : list elt := match e with | End => nil | More x t r => x :: elements t ++ flatten_e r end. Lemma flatten_e_elements : forall l x r c e, elements l ++ flatten_e (More x r e) = elements (Node c l x r) ++ flatten_e e. Proof. intros; simpl. now rewrite elements_node, app_ass. Qed. Lemma cons_1 : forall s e, flatten_e (cons s e) = elements s ++ flatten_e e. Proof. induction s; simpl; auto; intros. rewrite IHs1; apply flatten_e_elements. Qed. (** Correctness of this comparison *) Definition Cmp c x y := CompSpec L.eq L.lt x y c. Local Hint Unfold Cmp flip. Lemma compare_end_Cmp : forall e2, Cmp (compare_end e2) nil (flatten_e e2). Proof. destruct e2; simpl; constructor; auto. reflexivity. Qed. Lemma compare_more_Cmp : forall x1 cont x2 r2 e2 l, Cmp (cont (cons r2 e2)) l (elements r2 ++ flatten_e e2) -> Cmp (compare_more x1 cont (More x2 r2 e2)) (x1::l) (flatten_e (More x2 r2 e2)). Proof. simpl; intros; elim_compare x1 x2; simpl; red; auto. Qed. Lemma compare_cont_Cmp : forall s1 cont e2 l, (forall e, Cmp (cont e) l (flatten_e e)) -> Cmp (compare_cont s1 cont e2) (elements s1 ++ l) (flatten_e e2). Proof. induction s1 as [|c1 l1 Hl1 x1 r1 Hr1]; simpl; intros; auto. rewrite elements_node, app_ass; simpl. apply Hl1; auto. clear e2. intros [|x2 r2 e2]. simpl; auto. apply compare_more_Cmp. rewrite <- cons_1; auto. Qed. Lemma compare_Cmp : forall s1 s2, Cmp (compare s1 s2) (elements s1) (elements s2). Proof. intros; unfold compare. rewrite (app_nil_end (elements s1)). replace (elements s2) with (flatten_e (cons s2 End)) by (rewrite cons_1; simpl; rewrite <- app_nil_end; auto). apply compare_cont_Cmp; auto. intros. apply compare_end_Cmp; auto. Qed. Lemma compare_spec : forall s1 s2 `{Ok s1, Ok s2}, CompSpec eq lt s1 s2 (compare s1 s2). Proof. intros. destruct (compare_Cmp s1 s2); constructor. rewrite eq_Leq; auto. intros; exists s1, s2; repeat split; auto. intros; exists s2, s1; repeat split; auto. Qed. (** ** Equality test *) Lemma equal_spec : forall s1 s2 `{Ok s1, Ok s2}, equal s1 s2 = true <-> eq s1 s2. Proof. unfold equal; intros s1 s2 B1 B2. destruct (@compare_spec s1 s2 B1 B2) as [H|H|H]; split; intros H'; auto; try discriminate. rewrite H' in H. elim (StrictOrder_Irreflexive s2); auto. rewrite H' in H. elim (StrictOrder_Irreflexive s2); auto. Qed. (** ** A few results about [mindepth] and [maxdepth] *) Lemma mindepth_maxdepth s : mindepth s <= maxdepth s. Proof. induction s; simpl; auto. rewrite <- Nat.succ_le_mono. transitivity (mindepth s1). apply Nat.le_min_l. transitivity (maxdepth s1). trivial. apply Nat.le_max_l. Qed. Lemma maxdepth_cardinal s : cardinal s < 2^(maxdepth s). Proof. unfold Peano.lt. induction s as [|c l IHl x r IHr]. - auto. - simpl. rewrite <- Nat.add_succ_r, <- Nat.add_succ_l, Nat.add_0_r. apply Nat.add_le_mono; etransitivity; try apply IHl; try apply IHr; apply Nat.pow_le_mono; auto. * apply Nat.le_max_l. * apply Nat.le_max_r. Qed. Lemma mindepth_cardinal s : 2^(mindepth s) <= S (cardinal s). Proof. unfold Peano.lt. induction s as [|c l IHl x r IHr]. - auto. - simpl. rewrite <- Nat.add_succ_r, <- Nat.add_succ_l, Nat.add_0_r. apply Nat.add_le_mono; etransitivity; try apply IHl; try apply IHr; apply Nat.pow_le_mono; auto. * apply Nat.le_min_l. * apply Nat.le_min_r. Qed. Lemma maxdepth_log_cardinal s : s <> Leaf -> log2 (cardinal s) < maxdepth s. Proof. intros H. apply Nat.log2_lt_pow2. destruct s; simpl; intuition. apply maxdepth_cardinal. Qed. Lemma mindepth_log_cardinal s : mindepth s <= log2 (S (cardinal s)). Proof. apply Nat.log2_le_pow2. auto with arith. apply mindepth_cardinal. Qed. End Props.coq-8.4pl2/theories/MSets/MSetList.v0000640000175000001440000006016311776623104016414 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false | y :: l => match X.compare x y with | Lt => false | Eq => true | Gt => mem x l end end. Fixpoint add x s := match s with | nil => x :: nil | y :: l => match X.compare x y with | Lt => x :: s | Eq => s | Gt => y :: add x l end end. Definition singleton (x : elt) := x :: nil. Fixpoint remove x s := match s with | nil => nil | y :: l => match X.compare x y with | Lt => s | Eq => l | Gt => y :: remove x l end end. Fixpoint union (s : t) : t -> t := match s with | nil => fun s' => s' | x :: l => (fix union_aux (s' : t) : t := match s' with | nil => s | x' :: l' => match X.compare x x' with | Lt => x :: union l s' | Eq => x :: union l l' | Gt => x' :: union_aux l' end end) end. Fixpoint inter (s : t) : t -> t := match s with | nil => fun _ => nil | x :: l => (fix inter_aux (s' : t) : t := match s' with | nil => nil | x' :: l' => match X.compare x x' with | Lt => inter l s' | Eq => x :: inter l l' | Gt => inter_aux l' end end) end. Fixpoint diff (s : t) : t -> t := match s with | nil => fun _ => nil | x :: l => (fix diff_aux (s' : t) : t := match s' with | nil => s | x' :: l' => match X.compare x x' with | Lt => x :: diff l s' | Eq => diff l l' | Gt => diff_aux l' end end) end. Fixpoint equal (s : t) : t -> bool := fun s' : t => match s, s' with | nil, nil => true | x :: l, x' :: l' => match X.compare x x' with | Eq => equal l l' | _ => false end | _, _ => false end. Fixpoint subset s s' := match s, s' with | nil, _ => true | x :: l, x' :: l' => match X.compare x x' with | Lt => false | Eq => subset l l' | Gt => subset s l' end | _, _ => false end. Definition fold (B : Type) (f : elt -> B -> B) (s : t) (i : B) : B := fold_left (flip f) s i. Fixpoint filter (f : elt -> bool) (s : t) : t := match s with | nil => nil | x :: l => if f x then x :: filter f l else filter f l end. Fixpoint for_all (f : elt -> bool) (s : t) : bool := match s with | nil => true | x :: l => if f x then for_all f l else false end. Fixpoint exists_ (f : elt -> bool) (s : t) : bool := match s with | nil => false | x :: l => if f x then true else exists_ f l end. Fixpoint partition (f : elt -> bool) (s : t) : t * t := match s with | nil => (nil, nil) | x :: l => let (s1, s2) := partition f l in if f x then (x :: s1, s2) else (s1, x :: s2) end. Definition cardinal (s : t) : nat := length s. Definition elements (x : t) : list elt := x. Definition min_elt (s : t) : option elt := match s with | nil => None | x :: _ => Some x end. Fixpoint max_elt (s : t) : option elt := match s with | nil => None | x :: nil => Some x | _ :: l => max_elt l end. Definition choose := min_elt. Fixpoint compare s s' := match s, s' with | nil, nil => Eq | nil, _ => Lt | _, nil => Gt | x::s, x'::s' => match X.compare x x' with | Eq => compare s s' | Lt => Lt | Gt => Gt end end. End Ops. Module MakeRaw (X: OrderedType) <: RawSets X. Module Import MX := OrderedTypeFacts X. Module Import ML := OrderedTypeLists X. Include Ops X. (** ** Proofs of set operation specifications. *) Section ForNotations. Definition inf x l := match l with | nil => true | y::_ => match X.compare x y with Lt => true | _ => false end end. Fixpoint isok l := match l with | nil => true | x::l => inf x l && isok l end. Notation Sort l := (isok l = true). Notation Inf := (lelistA X.lt). Notation In := (InA X.eq). (* TODO: modify proofs in order to avoid these hints *) Hint Resolve (@Equivalence_Reflexive _ _ X.eq_equiv). Hint Immediate (@Equivalence_Symmetric _ _ X.eq_equiv). Hint Resolve (@Equivalence_Transitive _ _ X.eq_equiv). Definition IsOk s := Sort s. Class Ok (s:t) : Prop := ok : Sort s. Hint Resolve @ok. Hint Unfold Ok. Instance Sort_Ok s `(Hs : Sort s) : Ok s := { ok := Hs }. Lemma inf_iff : forall x l, Inf x l <-> inf x l = true. Proof. intros x l; split; intro H. (* -> *) destruct H; simpl in *. reflexivity. rewrite <- compare_lt_iff in H; rewrite H; reflexivity. (* <- *) destruct l as [|y ys]; simpl in *. constructor; fail. revert H; case_eq (X.compare x y); try discriminate; []. intros Ha _. rewrite compare_lt_iff in Ha. constructor; assumption. Qed. Lemma isok_iff : forall l, sort X.lt l <-> Ok l. Proof. intro l; split; intro H. (* -> *) elim H. constructor; fail. intros y ys Ha Hb Hc. change (inf y ys && isok ys = true). rewrite inf_iff in Hc. rewrite andb_true_iff; tauto. (* <- *) induction l as [|x xs]. constructor. change (inf x xs && isok xs = true) in H. rewrite andb_true_iff, <- inf_iff in H. destruct H; constructor; tauto. Qed. Hint Extern 1 (Ok _) => rewrite <- isok_iff. Ltac inv_ok := match goal with | H:sort X.lt (_ :: _) |- _ => inversion_clear H; inv_ok | H:sort X.lt nil |- _ => clear H; inv_ok | H:sort X.lt ?l |- _ => change (Ok l) in H; inv_ok | H:Ok _ |- _ => rewrite <- isok_iff in H; inv_ok | |- Ok _ => rewrite <- isok_iff | _ => idtac end. Ltac inv := invlist InA; inv_ok; invlist lelistA. Ltac constructors := repeat constructor. Ltac sort_inf_in := match goal with | H:Inf ?x ?l, H':In ?y ?l |- _ => cut (X.lt x y); [ intro | apply Sort_Inf_In with l; auto] | _ => fail end. Global Instance isok_Ok s `(isok s = true) : Ok s | 10. Proof. intros. assumption. Qed. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) (s : t) := exists x, In x s /\ P x. Lemma mem_spec : forall (s : t) (x : elt) (Hs : Ok s), mem x s = true <-> In x s. Proof. induction s; intros x Hs; inv; simpl. intuition. discriminate. inv. elim_compare x a; rewrite InA_cons; intuition; try order. discriminate. sort_inf_in. order. rewrite <- IHs; auto. rewrite IHs; auto. Qed. Lemma add_inf : forall (s : t) (x a : elt), Inf a s -> X.lt a x -> Inf a (add x s). Proof. simple induction s; simpl. intuition. intros; elim_compare x a; inv; intuition. Qed. Hint Resolve add_inf. Global Instance add_ok s x : forall `(Ok s), Ok (add x s). Proof. repeat rewrite <- isok_iff; revert s x. simple induction s; simpl. intuition. intros; elim_compare x a; inv; auto. Qed. Lemma add_spec : forall (s : t) (x y : elt) (Hs : Ok s), In y (add x s) <-> X.eq y x \/ In y s. Proof. induction s; simpl; intros. intuition. inv; auto. elim_compare x a; inv; rewrite !InA_cons, ?IHs; intuition. left; order. Qed. Lemma remove_inf : forall (s : t) (x a : elt) (Hs : Ok s), Inf a s -> Inf a (remove x s). Proof. induction s; simpl. intuition. intros; elim_compare x a; inv; auto. apply Inf_lt with a; auto. Qed. Hint Resolve remove_inf. Global Instance remove_ok s x : forall `(Ok s), Ok (remove x s). Proof. repeat rewrite <- isok_iff; revert s x. induction s; simpl. intuition. intros; elim_compare x a; inv; auto. Qed. Lemma remove_spec : forall (s : t) (x y : elt) (Hs : Ok s), In y (remove x s) <-> In y s /\ ~X.eq y x. Proof. induction s; simpl; intros. intuition; inv; auto. elim_compare x a; inv; rewrite !InA_cons, ?IHs; intuition; try sort_inf_in; try order. Qed. Global Instance singleton_ok x : Ok (singleton x). Proof. unfold singleton; simpl; auto. Qed. Lemma singleton_spec : forall x y : elt, In y (singleton x) <-> X.eq y x. Proof. unfold singleton; simpl; split; intros; inv; auto. Qed. Ltac induction2 := simple induction s; [ simpl; auto; try solve [ intros; inv ] | intros x l Hrec; simple induction s'; [ simpl; auto; try solve [ intros; inv ] | intros x' l' Hrec'; simpl; elim_compare x x'; intros; inv; auto ]]. Lemma union_inf : forall (s s' : t) (a : elt) (Hs : Ok s) (Hs' : Ok s'), Inf a s -> Inf a s' -> Inf a (union s s'). Proof. induction2. Qed. Hint Resolve union_inf. Global Instance union_ok s s' : forall `(Ok s, Ok s'), Ok (union s s'). Proof. repeat rewrite <- isok_iff; revert s s'. induction2; constructors; try apply @ok; auto. apply Inf_eq with x'; auto; apply union_inf; auto; apply Inf_eq with x; auto. change (Inf x' (union (x :: l) l')); auto. Qed. Lemma union_spec : forall (s s' : t) (x : elt) (Hs : Ok s) (Hs' : Ok s'), In x (union s s') <-> In x s \/ In x s'. Proof. induction2; try rewrite ?InA_cons, ?Hrec, ?Hrec'; intuition; inv; auto. left; order. Qed. Lemma inter_inf : forall (s s' : t) (a : elt) (Hs : Ok s) (Hs' : Ok s'), Inf a s -> Inf a s' -> Inf a (inter s s'). Proof. induction2. apply Inf_lt with x; auto. apply Hrec'; auto. apply Inf_lt with x'; auto. Qed. Hint Resolve inter_inf. Global Instance inter_ok s s' : forall `(Ok s, Ok s'), Ok (inter s s'). Proof. repeat rewrite <- isok_iff; revert s s'. induction2. constructors; auto. apply Inf_eq with x'; auto; apply inter_inf; auto; apply Inf_eq with x; auto. Qed. Lemma inter_spec : forall (s s' : t) (x : elt) (Hs : Ok s) (Hs' : Ok s'), In x (inter s s') <-> In x s /\ In x s'. Proof. induction2; try rewrite ?InA_cons, ?Hrec, ?Hrec'; intuition; inv; auto; try sort_inf_in; try order. left; order. Qed. Lemma diff_inf : forall (s s' : t) (Hs : Ok s) (Hs' : Ok s') (a : elt), Inf a s -> Inf a s' -> Inf a (diff s s'). Proof. intros s s'; repeat rewrite <- isok_iff; revert s s'. induction2. apply Hrec; trivial. apply Inf_lt with x; auto. apply Inf_lt with x'; auto. apply Hrec'; auto. apply Inf_lt with x'; auto. Qed. Hint Resolve diff_inf. Global Instance diff_ok s s' : forall `(Ok s, Ok s'), Ok (diff s s'). Proof. repeat rewrite <- isok_iff; revert s s'. induction2. Qed. Lemma diff_spec : forall (s s' : t) (x : elt) (Hs : Ok s) (Hs' : Ok s'), In x (diff s s') <-> In x s /\ ~In x s'. Proof. induction2; try rewrite ?InA_cons, ?Hrec, ?Hrec'; intuition; inv; auto; try sort_inf_in; try order. right; intuition; inv; auto. Qed. Lemma equal_spec : forall (s s' : t) (Hs : Ok s) (Hs' : Ok s'), equal s s' = true <-> Equal s s'. Proof. induction s as [ | x s IH]; intros [ | x' s'] Hs Hs'; simpl. intuition. split; intros H. discriminate. assert (In x' nil) by (rewrite H; auto). inv. split; intros H. discriminate. assert (In x nil) by (rewrite <-H; auto). inv. inv. elim_compare x x' as C; try discriminate. (* x=x' *) rewrite IH; auto. split; intros E y; specialize (E y). rewrite !InA_cons, E, C; intuition. rewrite !InA_cons, C in E. intuition; try sort_inf_in; order. (* xx' *) split; intros E. discriminate. assert (In x' (x::s)) by (rewrite E; auto). inv; try sort_inf_in; order. Qed. Lemma subset_spec : forall (s s' : t) (Hs : Ok s) (Hs' : Ok s'), subset s s' = true <-> Subset s s'. Proof. intros s s'; revert s. induction s' as [ | x' s' IH]; intros [ | x s] Hs Hs'; simpl; auto. split; try red; intros; auto. split; intros H. discriminate. assert (In x nil) by (apply H; auto). inv. split; try red; intros; auto. inv. inv. elim_compare x x' as C. (* x=x' *) rewrite IH; auto. split; intros S y; specialize (S y). rewrite !InA_cons, C. intuition. rewrite !InA_cons, C in S. intuition; try sort_inf_in; order. (* xx' *) rewrite IH; auto. split; intros S y; specialize (S y). rewrite !InA_cons. intuition. rewrite !InA_cons in S. rewrite !InA_cons. intuition; try sort_inf_in; order. Qed. Global Instance empty_ok : Ok empty. Proof. constructors. Qed. Lemma empty_spec : Empty empty. Proof. unfold Empty, empty; intuition; inv. Qed. Lemma is_empty_spec : forall s : t, is_empty s = true <-> Empty s. Proof. intros [ | x s]; simpl. split; auto. intros _ x H. inv. split. discriminate. intros H. elim (H x); auto. Qed. Lemma elements_spec1 : forall (s : t) (x : elt), In x (elements s) <-> In x s. Proof. intuition. Qed. Lemma elements_spec2 : forall (s : t) (Hs : Ok s), sort X.lt (elements s). Proof. intro s; repeat rewrite <- isok_iff; auto. Qed. Lemma elements_spec2w : forall (s : t) (Hs : Ok s), NoDupA X.eq (elements s). Proof. intro s; repeat rewrite <- isok_iff; auto. Qed. Lemma min_elt_spec1 : forall (s : t) (x : elt), min_elt s = Some x -> In x s. Proof. destruct s; simpl; inversion 1; auto. Qed. Lemma min_elt_spec2 : forall (s : t) (x y : elt) (Hs : Ok s), min_elt s = Some x -> In y s -> ~ X.lt y x. Proof. induction s as [ | x s IH]; simpl; inversion 2; subst. intros; inv; try sort_inf_in; order. Qed. Lemma min_elt_spec3 : forall s : t, min_elt s = None -> Empty s. Proof. destruct s; simpl; red; intuition. inv. discriminate. Qed. Lemma max_elt_spec1 : forall (s : t) (x : elt), max_elt s = Some x -> In x s. Proof. induction s as [ | x s IH]. inversion 1. destruct s as [ | y s]. simpl. inversion 1; subst; auto. right; apply IH; auto. Qed. Lemma max_elt_spec2 : forall (s : t) (x y : elt) (Hs : Ok s), max_elt s = Some x -> In y s -> ~ X.lt x y. Proof. induction s as [ | a s IH]. inversion 2. destruct s as [ | b s]. inversion 2; subst. intros; inv; order. intros. inv; auto. assert (~X.lt x b) by (apply IH; auto). assert (X.lt a b) by auto. order. Qed. Lemma max_elt_spec3 : forall s : t, max_elt s = None -> Empty s. Proof. induction s as [ | a s IH]. red; intuition; inv. destruct s as [ | b s]. inversion 1. intros; elim IH with b; auto. Qed. Definition choose_spec1 : forall (s : t) (x : elt), choose s = Some x -> In x s := min_elt_spec1. Definition choose_spec2 : forall s : t, choose s = None -> Empty s := min_elt_spec3. Lemma choose_spec3: forall s s' x x', Ok s -> Ok s' -> choose s = Some x -> choose s' = Some x' -> Equal s s' -> X.eq x x'. Proof. unfold choose; intros s s' x x' Hs Hs' Hx Hx' H. assert (~X.lt x x'). apply min_elt_spec2 with s'; auto. rewrite <-H; auto using min_elt_spec1. assert (~X.lt x' x). apply min_elt_spec2 with s; auto. rewrite H; auto using min_elt_spec1. order. Qed. Lemma fold_spec : forall (s : t) (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (flip f) (elements s) i. Proof. reflexivity. Qed. Lemma cardinal_spec : forall (s : t) (Hs : Ok s), cardinal s = length (elements s). Proof. auto. Qed. Lemma filter_inf : forall (s : t) (x : elt) (f : elt -> bool) (Hs : Ok s), Inf x s -> Inf x (filter f s). Proof. simple induction s; simpl. intuition. intros x l Hrec a f Hs Ha; inv. case (f x); auto. apply Hrec; auto. apply Inf_lt with x; auto. Qed. Global Instance filter_ok s f : forall `(Ok s), Ok (filter f s). Proof. repeat rewrite <- isok_iff; revert s f. simple induction s; simpl. auto. intros x l Hrec f Hs; inv. case (f x); auto. constructors; auto. apply filter_inf; auto. Qed. Lemma filter_spec : forall (s : t) (x : elt) (f : elt -> bool), Proper (X.eq==>eq) f -> (In x (filter f s) <-> In x s /\ f x = true). Proof. induction s; simpl; intros. split; intuition; inv. destruct (f a) eqn:F; rewrite !InA_cons, ?IHs; intuition. setoid_replace x with a; auto. setoid_replace a with x in F; auto; congruence. Qed. Lemma for_all_spec : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Proof. unfold For_all; induction s; simpl; intros. split; intros; auto. inv. destruct (f a) eqn:F. rewrite IHs; auto. firstorder. inv; auto. setoid_replace x with a; auto. split; intros H'. discriminate. rewrite H' in F; auto. Qed. Lemma exists_spec : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Proof. unfold Exists; induction s; simpl; intros. firstorder. discriminate. inv. destruct (f a) eqn:F. firstorder. rewrite IHs; auto. firstorder. inv. setoid_replace a with x in F; auto; congruence. exists x; auto. Qed. Lemma partition_inf1 : forall (s : t) (f : elt -> bool) (x : elt) (Hs : Ok s), Inf x s -> Inf x (fst (partition f s)). Proof. intros s f x; repeat rewrite <- isok_iff; revert s f x. simple induction s; simpl. intuition. intros x l Hrec f a Hs Ha; inv. generalize (Hrec f a H). case (f x); case (partition f l); simpl. auto. intros; apply H2; apply Inf_lt with x; auto. Qed. Lemma partition_inf2 : forall (s : t) (f : elt -> bool) (x : elt) (Hs : Ok s), Inf x s -> Inf x (snd (partition f s)). Proof. intros s f x; repeat rewrite <- isok_iff; revert s f x. simple induction s; simpl. intuition. intros x l Hrec f a Hs Ha; inv. generalize (Hrec f a H). case (f x); case (partition f l); simpl. intros; apply H2; apply Inf_lt with x; auto. auto. Qed. Global Instance partition_ok1 s f : forall `(Ok s), Ok (fst (partition f s)). Proof. repeat rewrite <- isok_iff; revert s f. simple induction s; simpl. auto. intros x l Hrec f Hs; inv. generalize (Hrec f H); generalize (@partition_inf1 l f x). case (f x); case (partition f l); simpl; auto. Qed. Global Instance partition_ok2 s f : forall `(Ok s), Ok (snd (partition f s)). Proof. repeat rewrite <- isok_iff; revert s f. simple induction s; simpl. auto. intros x l Hrec f Hs; inv. generalize (Hrec f H); generalize (@partition_inf2 l f x). case (f x); case (partition f l); simpl; auto. Qed. Lemma partition_spec1 : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> Equal (fst (partition f s)) (filter f s). Proof. simple induction s; simpl; auto; unfold Equal. split; auto. intros x l Hrec f Hf. generalize (Hrec f Hf); clear Hrec. destruct (partition f l) as [s1 s2]; simpl; intros. case (f x); simpl; auto. split; inversion_clear 1; auto. constructor 2; rewrite <- H; auto. constructor 2; rewrite H; auto. Qed. Lemma partition_spec2 : forall (s : t) (f : elt -> bool), Proper (X.eq==>eq) f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. simple induction s; simpl; auto; unfold Equal. split; auto. intros x l Hrec f Hf. generalize (Hrec f Hf); clear Hrec. destruct (partition f l) as [s1 s2]; simpl; intros. case (f x); simpl; auto. split; inversion_clear 1; auto. constructor 2; rewrite <- H; auto. constructor 2; rewrite H; auto. Qed. End ForNotations. Definition In := InA X.eq. Instance In_compat : Proper (X.eq==>eq==> iff) In. Proof. repeat red; intros; rewrite H, H0; auto. Qed. Module L := MakeListOrdering X. Definition eq := L.eq. Definition eq_equiv := L.eq_equiv. Definition lt l1 l2 := exists l1' l2', Ok l1' /\ Ok l2' /\ eq l1 l1' /\ eq l2 l2' /\ L.lt l1' l2'. Instance lt_strorder : StrictOrder lt. Proof. split. intros s (s1 & s2 & B1 & B2 & E1 & E2 & L). repeat rewrite <- isok_iff in *. assert (eqlistA X.eq s1 s2). apply SortA_equivlistA_eqlistA with (ltA:=X.lt); auto using @ok with *. transitivity s; auto. symmetry; auto. rewrite H in L. apply (StrictOrder_Irreflexive s2); auto. intros s1 s2 s3 (s1' & s2' & B1 & B2 & E1 & E2 & L12) (s2'' & s3' & B2' & B3 & E2' & E3 & L23). exists s1', s3'. repeat rewrite <- isok_iff in *. do 4 (split; trivial). assert (eqlistA X.eq s2' s2''). apply SortA_equivlistA_eqlistA with (ltA:=X.lt); auto using @ok with *. transitivity s2; auto. symmetry; auto. transitivity s2'; auto. rewrite H; auto. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. intros s1 s2 E12 s3 s4 E34. split. intros (s1' & s3' & B1 & B3 & E1 & E3 & LT). exists s1', s3'; do 2 (split; trivial). split. transitivity s1; auto. symmetry; auto. split; auto. transitivity s3; auto. symmetry; auto. intros (s1' & s3' & B1 & B3 & E1 & E3 & LT). exists s1', s3'; do 2 (split; trivial). split. transitivity s2; auto. split; auto. transitivity s4; auto. Qed. Lemma compare_spec_aux : forall s s', CompSpec eq L.lt s s' (compare s s'). Proof. induction s as [|x s IH]; intros [|x' s']; simpl; intuition. elim_compare x x'; auto. Qed. Lemma compare_spec : forall s s', Ok s -> Ok s' -> CompSpec eq lt s s' (compare s s'). Proof. intros s s' Hs Hs'. destruct (compare_spec_aux s s'); constructor; auto. exists s, s'; repeat split; auto using @ok. exists s', s; repeat split; auto using @ok. Qed. End MakeRaw. (** * Encapsulation Now, in order to really provide a functor implementing [S], we need to encapsulate everything into a type of strictly ordered lists. *) Module Make (X: OrderedType) <: S with Module E := X. Module Raw := MakeRaw X. Include Raw2Sets X Raw. End Make. (** For this specific implementation, eq coincides with Leibniz equality *) Require Eqdep_dec. Module Type OrderedTypeWithLeibniz. Include OrderedType. Parameter eq_leibniz : forall x y, eq x y -> x = y. End OrderedTypeWithLeibniz. Module Type SWithLeibniz. Declare Module E : OrderedTypeWithLeibniz. Include SetsOn E. Parameter eq_leibniz : forall x y, eq x y -> x = y. End SWithLeibniz. Module MakeWithLeibniz (X: OrderedTypeWithLeibniz) <: SWithLeibniz with Module E := X. Module E := X. Module Raw := MakeRaw X. Include Raw2SetsOn X Raw. Lemma eq_leibniz_list : forall xs ys, eqlistA X.eq xs ys -> xs = ys. Proof. induction xs as [|x xs]; intros [|y ys] H; inversion H; [ | ]. reflexivity. f_equal. apply X.eq_leibniz; congruence. apply IHxs; subst; assumption. Qed. Lemma eq_leibniz : forall s s', eq s s' -> s = s'. Proof. intros [xs Hxs] [ys Hys] Heq. change (equivlistA X.eq xs ys) in Heq. assert (H : eqlistA X.eq xs ys). rewrite <- Raw.isok_iff in Hxs, Hys. apply SortA_equivlistA_eqlistA with X.lt; auto with *. apply eq_leibniz_list in H. subst ys. f_equal. apply Eqdep_dec.eq_proofs_unicity. intros x y; destruct (bool_dec x y); tauto. Qed. End MakeWithLeibniz. coq-8.4pl2/theories/MSets/MSetProperties.v0000640000175000001440000010472711776416511017644 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* E.eq x y \/ In y s. Lemma Add_Equal : forall x s s', Add x s s' <-> s' [=] add x s. Proof. unfold Add. split; intros. red; intros. rewrite H; clear H. fsetdec. fsetdec. Qed. Ltac expAdd := repeat rewrite Add_Equal. Section BasicProperties. Variable s s' s'' s1 s2 s3 : t. Variable x x' : elt. Lemma equal_refl : s[=]s. Proof. fsetdec. Qed. Lemma equal_sym : s[=]s' -> s'[=]s. Proof. fsetdec. Qed. Lemma equal_trans : s1[=]s2 -> s2[=]s3 -> s1[=]s3. Proof. fsetdec. Qed. Lemma subset_refl : s[<=]s. Proof. fsetdec. Qed. Lemma subset_trans : s1[<=]s2 -> s2[<=]s3 -> s1[<=]s3. Proof. fsetdec. Qed. Lemma subset_antisym : s[<=]s' -> s'[<=]s -> s[=]s'. Proof. fsetdec. Qed. Lemma subset_equal : s[=]s' -> s[<=]s'. Proof. fsetdec. Qed. Lemma subset_empty : empty[<=]s. Proof. fsetdec. Qed. Lemma subset_remove_3 : s1[<=]s2 -> remove x s1 [<=] s2. Proof. fsetdec. Qed. Lemma subset_diff : s1[<=]s3 -> diff s1 s2 [<=] s3. Proof. fsetdec. Qed. Lemma subset_add_3 : In x s2 -> s1[<=]s2 -> add x s1 [<=] s2. Proof. fsetdec. Qed. Lemma subset_add_2 : s1[<=]s2 -> s1[<=] add x s2. Proof. fsetdec. Qed. Lemma in_subset : In x s1 -> s1[<=]s2 -> In x s2. Proof. fsetdec. Qed. Lemma double_inclusion : s1[=]s2 <-> s1[<=]s2 /\ s2[<=]s1. Proof. intuition fsetdec. Qed. Lemma empty_is_empty_1 : Empty s -> s[=]empty. Proof. fsetdec. Qed. Lemma empty_is_empty_2 : s[=]empty -> Empty s. Proof. fsetdec. Qed. Lemma add_equal : In x s -> add x s [=] s. Proof. fsetdec. Qed. Lemma add_add : add x (add x' s) [=] add x' (add x s). Proof. fsetdec. Qed. Lemma remove_equal : ~ In x s -> remove x s [=] s. Proof. fsetdec. Qed. Lemma Equal_remove : s[=]s' -> remove x s [=] remove x s'. Proof. fsetdec. Qed. Lemma add_remove : In x s -> add x (remove x s) [=] s. Proof. fsetdec. Qed. Lemma remove_add : ~In x s -> remove x (add x s) [=] s. Proof. fsetdec. Qed. Lemma singleton_equal_add : singleton x [=] add x empty. Proof. fsetdec. Qed. Lemma remove_singleton_empty : In x s -> remove x s [=] empty -> singleton x [=] s. Proof. fsetdec. Qed. Lemma union_sym : union s s' [=] union s' s. Proof. fsetdec. Qed. Lemma union_subset_equal : s[<=]s' -> union s s' [=] s'. Proof. fsetdec. Qed. Lemma union_equal_1 : s[=]s' -> union s s'' [=] union s' s''. Proof. fsetdec. Qed. Lemma union_equal_2 : s'[=]s'' -> union s s' [=] union s s''. Proof. fsetdec. Qed. Lemma union_assoc : union (union s s') s'' [=] union s (union s' s''). Proof. fsetdec. Qed. Lemma add_union_singleton : add x s [=] union (singleton x) s. Proof. fsetdec. Qed. Lemma union_add : union (add x s) s' [=] add x (union s s'). Proof. fsetdec. Qed. Lemma union_remove_add_1 : union (remove x s) (add x s') [=] union (add x s) (remove x s'). Proof. fsetdec. Qed. Lemma union_remove_add_2 : In x s -> union (remove x s) (add x s') [=] union s s'. Proof. fsetdec. Qed. Lemma union_subset_1 : s [<=] union s s'. Proof. fsetdec. Qed. Lemma union_subset_2 : s' [<=] union s s'. Proof. fsetdec. Qed. Lemma union_subset_3 : s[<=]s'' -> s'[<=]s'' -> union s s' [<=] s''. Proof. fsetdec. Qed. Lemma union_subset_4 : s[<=]s' -> union s s'' [<=] union s' s''. Proof. fsetdec. Qed. Lemma union_subset_5 : s[<=]s' -> union s'' s [<=] union s'' s'. Proof. fsetdec. Qed. Lemma empty_union_1 : Empty s -> union s s' [=] s'. Proof. fsetdec. Qed. Lemma empty_union_2 : Empty s -> union s' s [=] s'. Proof. fsetdec. Qed. Lemma not_in_union : ~In x s -> ~In x s' -> ~In x (union s s'). Proof. fsetdec. Qed. Lemma inter_sym : inter s s' [=] inter s' s. Proof. fsetdec. Qed. Lemma inter_subset_equal : s[<=]s' -> inter s s' [=] s. Proof. fsetdec. Qed. Lemma inter_equal_1 : s[=]s' -> inter s s'' [=] inter s' s''. Proof. fsetdec. Qed. Lemma inter_equal_2 : s'[=]s'' -> inter s s' [=] inter s s''. Proof. fsetdec. Qed. Lemma inter_assoc : inter (inter s s') s'' [=] inter s (inter s' s''). Proof. fsetdec. Qed. Lemma union_inter_1 : inter (union s s') s'' [=] union (inter s s'') (inter s' s''). Proof. fsetdec. Qed. Lemma union_inter_2 : union (inter s s') s'' [=] inter (union s s'') (union s' s''). Proof. fsetdec. Qed. Lemma inter_add_1 : In x s' -> inter (add x s) s' [=] add x (inter s s'). Proof. fsetdec. Qed. Lemma inter_add_2 : ~ In x s' -> inter (add x s) s' [=] inter s s'. Proof. fsetdec. Qed. Lemma empty_inter_1 : Empty s -> Empty (inter s s'). Proof. fsetdec. Qed. Lemma empty_inter_2 : Empty s' -> Empty (inter s s'). Proof. fsetdec. Qed. Lemma inter_subset_1 : inter s s' [<=] s. Proof. fsetdec. Qed. Lemma inter_subset_2 : inter s s' [<=] s'. Proof. fsetdec. Qed. Lemma inter_subset_3 : s''[<=]s -> s''[<=]s' -> s''[<=] inter s s'. Proof. fsetdec. Qed. Lemma empty_diff_1 : Empty s -> Empty (diff s s'). Proof. fsetdec. Qed. Lemma empty_diff_2 : Empty s -> diff s' s [=] s'. Proof. fsetdec. Qed. Lemma diff_subset : diff s s' [<=] s. Proof. fsetdec. Qed. Lemma diff_subset_equal : s[<=]s' -> diff s s' [=] empty. Proof. fsetdec. Qed. Lemma remove_diff_singleton : remove x s [=] diff s (singleton x). Proof. fsetdec. Qed. Lemma diff_inter_empty : inter (diff s s') (inter s s') [=] empty. Proof. fsetdec. Qed. Lemma diff_inter_all : union (diff s s') (inter s s') [=] s. Proof. fsetdec. Qed. Lemma Add_add : Add x s (add x s). Proof. expAdd; fsetdec. Qed. Lemma Add_remove : In x s -> Add x (remove x s) s. Proof. expAdd; fsetdec. Qed. Lemma union_Add : Add x s s' -> Add x (union s s'') (union s' s''). Proof. expAdd; fsetdec. Qed. Lemma inter_Add : In x s'' -> Add x s s' -> Add x (inter s s'') (inter s' s''). Proof. expAdd; fsetdec. Qed. Lemma union_Equal : In x s'' -> Add x s s' -> union s s'' [=] union s' s''. Proof. expAdd; fsetdec. Qed. Lemma inter_Add_2 : ~In x s'' -> Add x s s' -> inter s s'' [=] inter s' s''. Proof. expAdd; fsetdec. Qed. End BasicProperties. Hint Immediate equal_sym add_remove remove_add union_sym inter_sym: set. Hint Resolve equal_refl equal_trans subset_refl subset_equal subset_antisym subset_trans subset_empty subset_remove_3 subset_diff subset_add_3 subset_add_2 in_subset empty_is_empty_1 empty_is_empty_2 add_equal remove_equal singleton_equal_add union_subset_equal union_equal_1 union_equal_2 union_assoc add_union_singleton union_add union_subset_1 union_subset_2 union_subset_3 inter_subset_equal inter_equal_1 inter_equal_2 inter_assoc union_inter_1 union_inter_2 inter_add_1 inter_add_2 empty_inter_1 empty_inter_2 empty_union_1 empty_union_2 empty_diff_1 empty_diff_2 union_Add inter_Add union_Equal inter_Add_2 not_in_union inter_subset_1 inter_subset_2 inter_subset_3 diff_subset diff_subset_equal remove_diff_singleton diff_inter_empty diff_inter_all Add_add Add_remove Equal_remove add_add : set. (** * Properties of elements *) Lemma elements_Empty : forall s, Empty s <-> elements s = nil. Proof. intros. unfold Empty. split; intros. assert (forall a, ~ List.In a (elements s)). red; intros. apply (H a). rewrite elements_iff. rewrite InA_alt; exists a; auto with relations. destruct (elements s); auto. elim (H0 e); simpl; auto. red; intros. rewrite elements_iff in H0. rewrite InA_alt in H0; destruct H0. rewrite H in H0; destruct H0 as (_,H0); inversion H0. Qed. Lemma elements_empty : elements empty = nil. Proof. rewrite <-elements_Empty; auto with set. Qed. (** * Conversions between lists and sets *) Definition of_list (l : list elt) := List.fold_right add empty l. Definition to_list := elements. Lemma of_list_1 : forall l x, In x (of_list l) <-> InA E.eq x l. Proof. induction l; simpl; intro x. rewrite empty_iff, InA_nil. intuition. rewrite add_iff, InA_cons, IHl. intuition. Qed. Lemma of_list_2 : forall l, equivlistA E.eq (to_list (of_list l)) l. Proof. unfold to_list; red; intros. rewrite <- elements_iff; apply of_list_1. Qed. Lemma of_list_3 : forall s, of_list (to_list s) [=] s. Proof. unfold to_list; red; intros. rewrite of_list_1; symmetry; apply elements_iff. Qed. (** * Fold *) Section Fold. Notation NoDup := (NoDupA E.eq). Notation InA := (InA E.eq). (** Alternative specification via [fold_right] *) Lemma fold_spec_right (s:t)(A:Type)(i:A)(f : elt -> A -> A) : fold f s i = List.fold_right f i (rev (elements s)). Proof. rewrite fold_spec. symmetry. apply fold_left_rev_right. Qed. (** ** Induction principles for fold (contributed by S. Lescuyer) *) (** In the following lemma, the step hypothesis is deliberately restricted to the precise set s we are considering. *) Theorem fold_rec : forall (A:Type)(P : t -> A -> Type)(f : elt -> A -> A)(i:A)(s:t), (forall s', Empty s' -> P s' i) -> (forall x a s' s'', In x s -> ~In x s' -> Add x s' s'' -> P s' a -> P s'' (f x a)) -> P s (fold f s i). Proof. intros A P f i s Pempty Pstep. rewrite fold_spec_right. set (l:=rev (elements s)). assert (Pstep' : forall x a s' s'', InA x l -> ~In x s' -> Add x s' s'' -> P s' a -> P s'' (f x a)). intros; eapply Pstep; eauto. rewrite elements_iff, <- InA_rev; auto with *. assert (Hdup : NoDup l) by (unfold l; eauto using elements_3w, NoDupA_rev with *). assert (Hsame : forall x, In x s <-> InA x l) by (unfold l; intros; rewrite elements_iff, InA_rev; intuition). clear Pstep; clearbody l; revert s Hsame; induction l. (* empty *) intros s Hsame; simpl. apply Pempty. intro x. rewrite Hsame, InA_nil; intuition. (* step *) intros s Hsame; simpl. apply Pstep' with (of_list l); auto with relations. inversion_clear Hdup; rewrite of_list_1; auto. red. intros. rewrite Hsame, of_list_1, InA_cons; intuition. apply IHl. intros; eapply Pstep'; eauto. inversion_clear Hdup; auto. exact (of_list_1 l). Qed. (** Same, with [empty] and [add] instead of [Empty] and [Add]. In this case, [P] must be compatible with equality of sets *) Theorem fold_rec_bis : forall (A:Type)(P : t -> A -> Type)(f : elt -> A -> A)(i:A)(s:t), (forall s s' a, s[=]s' -> P s a -> P s' a) -> (P empty i) -> (forall x a s', In x s -> ~In x s' -> P s' a -> P (add x s') (f x a)) -> P s (fold f s i). Proof. intros A P f i s Pmorphism Pempty Pstep. apply fold_rec; intros. apply Pmorphism with empty; auto with set. rewrite Add_Equal in H1; auto with set. apply Pmorphism with (add x s'); auto with set. Qed. Lemma fold_rec_nodep : forall (A:Type)(P : A -> Type)(f : elt -> A -> A)(i:A)(s:t), P i -> (forall x a, In x s -> P a -> P (f x a)) -> P (fold f s i). Proof. intros; apply fold_rec_bis with (P:=fun _ => P); auto. Qed. (** [fold_rec_weak] is a weaker principle than [fold_rec_bis] : the step hypothesis must here be applicable to any [x]. At the same time, it looks more like an induction principle, and hence can be easier to use. *) Lemma fold_rec_weak : forall (A:Type)(P : t -> A -> Type)(f : elt -> A -> A)(i:A), (forall s s' a, s[=]s' -> P s a -> P s' a) -> P empty i -> (forall x a s, ~In x s -> P s a -> P (add x s) (f x a)) -> forall s, P s (fold f s i). Proof. intros; apply fold_rec_bis; auto. Qed. Lemma fold_rel : forall (A B:Type)(R : A -> B -> Type) (f : elt -> A -> A)(g : elt -> B -> B)(i : A)(j : B)(s : t), R i j -> (forall x a b, In x s -> R a b -> R (f x a) (g x b)) -> R (fold f s i) (fold g s j). Proof. intros A B R f g i j s Rempty Rstep. rewrite 2 fold_spec_right. set (l:=rev (elements s)). assert (Rstep' : forall x a b, InA x l -> R a b -> R (f x a) (g x b)) by (intros; apply Rstep; auto; rewrite elements_iff, <- InA_rev; auto with *). clearbody l; clear Rstep s. induction l; simpl; auto with relations. Qed. (** From the induction principle on [fold], we can deduce some general induction principles on sets. *) Lemma set_induction : forall P : t -> Type, (forall s, Empty s -> P s) -> (forall s s', P s -> forall x, ~In x s -> Add x s s' -> P s') -> forall s, P s. Proof. intros. apply (@fold_rec _ (fun s _ => P s) (fun _ _ => tt) tt s); eauto. Qed. Lemma set_induction_bis : forall P : t -> Type, (forall s s', s [=] s' -> P s -> P s') -> P empty -> (forall x s, ~In x s -> P s -> P (add x s)) -> forall s, P s. Proof. intros. apply (@fold_rec_bis _ (fun s _ => P s) (fun _ _ => tt) tt s); eauto. Qed. (** [fold] can be used to reconstruct the same initial set. *) Lemma fold_identity : forall s, fold add s empty [=] s. Proof. intros. apply fold_rec with (P:=fun s acc => acc[=]s); auto with set. intros. rewrite H2; rewrite Add_Equal in H1; auto with set. Qed. (** ** Alternative (weaker) specifications for [fold] *) (** When [MSets] was first designed, the order in which Ocaml's [Set.fold] takes the set elements was unspecified. This specification reflects this fact: *) Lemma fold_0 : forall s (A : Type) (i : A) (f : elt -> A -> A), exists l : list elt, NoDup l /\ (forall x : elt, In x s <-> InA x l) /\ fold f s i = fold_right f i l. Proof. intros; exists (rev (elements s)); split. apply NoDupA_rev; auto with *. split; intros. rewrite elements_iff; do 2 rewrite InA_alt. split; destruct 1; generalize (In_rev (elements s) x0); exists x0; intuition. apply fold_spec_right. Qed. (** An alternate (and previous) specification for [fold] was based on the recursive structure of a set. It is now lemmas [fold_1] and [fold_2]. *) Lemma fold_1 : forall s (A : Type) (eqA : A -> A -> Prop) (st : Equivalence eqA) (i : A) (f : elt -> A -> A), Empty s -> eqA (fold f s i) i. Proof. unfold Empty; intros; destruct (fold_0 s i f) as (l,(H1, (H2, H3))). rewrite H3; clear H3. generalize H H2; clear H H2; case l; simpl; intros. reflexivity. elim (H e). elim (H2 e); intuition. Qed. Lemma fold_2 : forall s s' x (A : Type) (eqA : A -> A -> Prop) (st : Equivalence eqA) (i : A) (f : elt -> A -> A), Proper (E.eq==>eqA==>eqA) f -> transpose eqA f -> ~ In x s -> Add x s s' -> eqA (fold f s' i) (f x (fold f s i)). Proof. intros; destruct (fold_0 s i f) as (l,(Hl, (Hl1, Hl2))); destruct (fold_0 s' i f) as (l',(Hl', (Hl'1, Hl'2))). rewrite Hl2; rewrite Hl'2; clear Hl2 Hl'2. apply fold_right_add with (eqA:=E.eq)(eqB:=eqA); auto. eauto with *. rewrite <- Hl1; auto. intros a; rewrite InA_cons; rewrite <- Hl1; rewrite <- Hl'1; rewrite (H2 a); intuition. Qed. (** In fact, [fold] on empty sets is more than equivalent to the initial element, it is Leibniz-equal to it. *) Lemma fold_1b : forall s (A : Type)(i : A) (f : elt -> A -> A), Empty s -> (fold f s i) = i. Proof. intros. rewrite FM.fold_1. rewrite elements_Empty in H; rewrite H; simpl; auto. Qed. Section Fold_More. Variables (A:Type)(eqA:A->A->Prop)(st:Equivalence eqA). Variables (f:elt->A->A)(Comp:Proper (E.eq==>eqA==>eqA) f)(Ass:transpose eqA f). Lemma fold_commutes : forall i s x, eqA (fold f s (f x i)) (f x (fold f s i)). Proof. intros. apply fold_rel with (R:=fun u v => eqA u (f x v)); intros. reflexivity. transitivity (f x0 (f x b)); auto. apply Comp; auto with relations. Qed. (** ** Fold is a morphism *) Lemma fold_init : forall i i' s, eqA i i' -> eqA (fold f s i) (fold f s i'). Proof. intros. apply fold_rel with (R:=eqA); auto. intros; apply Comp; auto with relations. Qed. Lemma fold_equal : forall i s s', s[=]s' -> eqA (fold f s i) (fold f s' i). Proof. intros i s; pattern s; apply set_induction; clear s; intros. transitivity i. apply fold_1; auto. symmetry; apply fold_1; auto. rewrite <- H0; auto. transitivity (f x (fold f s i)). apply fold_2 with (eqA := eqA); auto. symmetry; apply fold_2 with (eqA := eqA); auto. unfold Add in *; intros. rewrite <- H2; auto. Qed. (** ** Fold and other set operators *) Lemma fold_empty : forall i, fold f empty i = i. Proof. intros i; apply fold_1b; auto with set. Qed. Lemma fold_add : forall i s x, ~In x s -> eqA (fold f (add x s) i) (f x (fold f s i)). Proof. intros; apply fold_2 with (eqA := eqA); auto with set. Qed. Lemma add_fold : forall i s x, In x s -> eqA (fold f (add x s) i) (fold f s i). Proof. intros; apply fold_equal; auto with set. Qed. Lemma remove_fold_1: forall i s x, In x s -> eqA (f x (fold f (remove x s) i)) (fold f s i). Proof. intros. symmetry. apply fold_2 with (eqA:=eqA); auto with set relations. Qed. Lemma remove_fold_2: forall i s x, ~In x s -> eqA (fold f (remove x s) i) (fold f s i). Proof. intros. apply fold_equal; auto with set. Qed. Lemma fold_union_inter : forall i s s', eqA (fold f (union s s') (fold f (inter s s') i)) (fold f s (fold f s' i)). Proof. intros; pattern s; apply set_induction; clear s; intros. transitivity (fold f s' (fold f (inter s s') i)). apply fold_equal; auto with set. transitivity (fold f s' i). apply fold_init; auto. apply fold_1; auto with set. symmetry; apply fold_1; auto. rename s'0 into s''. destruct (In_dec x s'). (* In x s' *) transitivity (fold f (union s'' s') (f x (fold f (inter s s') i))); auto with set. apply fold_init; auto. apply fold_2 with (eqA:=eqA); auto with set. rewrite inter_iff; intuition. transitivity (f x (fold f s (fold f s' i))). transitivity (fold f (union s s') (f x (fold f (inter s s') i))). apply fold_equal; auto. apply equal_sym; apply union_Equal with x; auto with set. transitivity (f x (fold f (union s s') (fold f (inter s s') i))). apply fold_commutes; auto. apply Comp; auto with relations. symmetry; apply fold_2 with (eqA:=eqA); auto. (* ~(In x s') *) transitivity (f x (fold f (union s s') (fold f (inter s'' s') i))). apply fold_2 with (eqA:=eqA); auto with set. transitivity (f x (fold f (union s s') (fold f (inter s s') i))). apply Comp;auto with relations. apply fold_init;auto. apply fold_equal;auto. apply equal_sym; apply inter_Add_2 with x; auto with set. transitivity (f x (fold f s (fold f s' i))). apply Comp; auto with relations. symmetry; apply fold_2 with (eqA:=eqA); auto. Qed. Lemma fold_diff_inter : forall i s s', eqA (fold f (diff s s') (fold f (inter s s') i)) (fold f s i). Proof. intros. transitivity (fold f (union (diff s s') (inter s s')) (fold f (inter (diff s s') (inter s s')) i)). symmetry; apply fold_union_inter; auto. transitivity (fold f s (fold f (inter (diff s s') (inter s s')) i)). apply fold_equal; auto with set. apply fold_init; auto. apply fold_1; auto with set. Qed. Lemma fold_union: forall i s s', (forall x, ~(In x s/\In x s')) -> eqA (fold f (union s s') i) (fold f s (fold f s' i)). Proof. intros. transitivity (fold f (union s s') (fold f (inter s s') i)). apply fold_init; auto. symmetry; apply fold_1; auto with set. unfold Empty; intro a; generalize (H a); set_iff; tauto. apply fold_union_inter; auto. Qed. End Fold_More. Lemma fold_plus : forall s p, fold (fun _ => S) s p = fold (fun _ => S) s 0 + p. Proof. intros. apply fold_rel with (R:=fun u v => u = v + p); simpl; auto. Qed. End Fold. (** * Cardinal *) (** ** Characterization of cardinal in terms of fold *) Lemma cardinal_fold : forall s, cardinal s = fold (fun _ => S) s 0. Proof. intros; rewrite cardinal_1; rewrite FM.fold_1. symmetry; apply fold_left_length; auto. Qed. (** ** Old specifications for [cardinal]. *) Lemma cardinal_0 : forall s, exists l : list elt, NoDupA E.eq l /\ (forall x : elt, In x s <-> InA E.eq x l) /\ cardinal s = length l. Proof. intros; exists (elements s); intuition; apply cardinal_1. Qed. Lemma cardinal_1 : forall s, Empty s -> cardinal s = 0. Proof. intros; rewrite cardinal_fold; apply fold_1; auto with *. Qed. Lemma cardinal_2 : forall s s' x, ~ In x s -> Add x s s' -> cardinal s' = S (cardinal s). Proof. intros; do 2 rewrite cardinal_fold. change S with ((fun _ => S) x). apply fold_2; auto. split; congruence. congruence. Qed. (** ** Cardinal and (non-)emptiness *) Lemma cardinal_Empty : forall s, Empty s <-> cardinal s = 0. Proof. intros. rewrite elements_Empty, FM.cardinal_1. destruct (elements s); intuition; discriminate. Qed. Lemma cardinal_inv_1 : forall s, cardinal s = 0 -> Empty s. Proof. intros; rewrite cardinal_Empty; auto. Qed. Hint Resolve cardinal_inv_1. Lemma cardinal_inv_2 : forall s n, cardinal s = S n -> { x : elt | In x s }. Proof. intros; rewrite FM.cardinal_1 in H. generalize (elements_2 (s:=s)). destruct (elements s); try discriminate. exists e; auto with relations. Qed. Lemma cardinal_inv_2b : forall s, cardinal s <> 0 -> { x : elt | In x s }. Proof. intro; generalize (@cardinal_inv_2 s); destruct cardinal; [intuition|eauto]. Qed. (** ** Cardinal is a morphism *) Lemma Equal_cardinal : forall s s', s[=]s' -> cardinal s = cardinal s'. Proof. symmetry. remember (cardinal s) as n; symmetry in Heqn; revert s s' Heqn H. induction n; intros. apply cardinal_1; rewrite <- H; auto. destruct (cardinal_inv_2 Heqn) as (x,H2). revert Heqn. rewrite (cardinal_2 (s:=remove x s) (s':=s) (x:=x)); auto with set relations. rewrite (cardinal_2 (s:=remove x s') (s':=s') (x:=x)); eauto with set relations. Qed. Instance cardinal_m : Proper (Equal==>Logic.eq) cardinal. Proof. exact Equal_cardinal. Qed. Hint Resolve Add_add Add_remove Equal_remove cardinal_inv_1 Equal_cardinal. (** ** Cardinal and set operators *) Lemma empty_cardinal : cardinal empty = 0. Proof. rewrite cardinal_fold; apply fold_1; auto with *. Qed. Hint Immediate empty_cardinal cardinal_1 : set. Lemma singleton_cardinal : forall x, cardinal (singleton x) = 1. Proof. intros. rewrite (singleton_equal_add x). replace 0 with (cardinal empty); auto with set. apply cardinal_2 with x; auto with set. Qed. Hint Resolve singleton_cardinal: set. Lemma diff_inter_cardinal : forall s s', cardinal (diff s s') + cardinal (inter s s') = cardinal s . Proof. intros; do 3 rewrite cardinal_fold. rewrite <- fold_plus. apply fold_diff_inter with (eqA:=@Logic.eq nat); auto with *. congruence. Qed. Lemma union_cardinal: forall s s', (forall x, ~(In x s/\In x s')) -> cardinal (union s s')=cardinal s+cardinal s'. Proof. intros; do 3 rewrite cardinal_fold. rewrite <- fold_plus. apply fold_union; auto. split; congruence. congruence. Qed. Lemma subset_cardinal : forall s s', s[<=]s' -> cardinal s <= cardinal s' . Proof. intros. rewrite <- (diff_inter_cardinal s' s). rewrite (inter_sym s' s). rewrite (inter_subset_equal H); auto with arith. Qed. Lemma subset_cardinal_lt : forall s s' x, s[<=]s' -> In x s' -> ~In x s -> cardinal s < cardinal s'. Proof. intros. rewrite <- (diff_inter_cardinal s' s). rewrite (inter_sym s' s). rewrite (inter_subset_equal H). generalize (@cardinal_inv_1 (diff s' s)). destruct (cardinal (diff s' s)). intro H2; destruct (H2 (eq_refl _) x). set_iff; auto. intros _. change (0 + cardinal s < S n + cardinal s). apply Plus.plus_lt_le_compat; auto with arith. Qed. Theorem union_inter_cardinal : forall s s', cardinal (union s s') + cardinal (inter s s') = cardinal s + cardinal s' . Proof. intros. do 4 rewrite cardinal_fold. do 2 rewrite <- fold_plus. apply fold_union_inter with (eqA:=@Logic.eq nat); auto with *. congruence. Qed. Lemma union_cardinal_inter : forall s s', cardinal (union s s') = cardinal s + cardinal s' - cardinal (inter s s'). Proof. intros. rewrite <- union_inter_cardinal. rewrite Plus.plus_comm. auto with arith. Qed. Lemma union_cardinal_le : forall s s', cardinal (union s s') <= cardinal s + cardinal s'. Proof. intros; generalize (union_inter_cardinal s s'). intros; rewrite <- H; auto with arith. Qed. Lemma add_cardinal_1 : forall s x, In x s -> cardinal (add x s) = cardinal s. Proof. auto with set. Qed. Lemma add_cardinal_2 : forall s x, ~In x s -> cardinal (add x s) = S (cardinal s). Proof. intros. do 2 rewrite cardinal_fold. change S with ((fun _ => S) x); apply fold_add with (eqA:=@Logic.eq nat); auto with *. congruence. Qed. Lemma remove_cardinal_1 : forall s x, In x s -> S (cardinal (remove x s)) = cardinal s. Proof. intros. do 2 rewrite cardinal_fold. change S with ((fun _ =>S) x). apply remove_fold_1 with (eqA:=@Logic.eq nat); auto with *. congruence. Qed. Lemma remove_cardinal_2 : forall s x, ~In x s -> cardinal (remove x s) = cardinal s. Proof. auto with set. Qed. Hint Resolve subset_cardinal union_cardinal add_cardinal_1 add_cardinal_2. End WPropertiesOn. (** Now comes variants for self-contained weak sets and for full sets. For these variants, only one argument is necessary. Thanks to the subtyping [WS<=S], the [Properties] functor which is meant to be used on modules [(M:S)] can simply be an alias of [WProperties]. *) Module WProperties (M:WSets) := WPropertiesOn M.E M. Module Properties := WProperties. (** Now comes some properties specific to the element ordering, invalid for Weak Sets. *) Module OrdProperties (M:Sets). Module Import ME:=OrderedTypeFacts(M.E). Module Import ML:=OrderedTypeLists(M.E). Module Import P := Properties M. Import FM. Import M.E. Import M. Hint Resolve elements_spec2. Hint Immediate min_elt_spec1 min_elt_spec2 min_elt_spec3 max_elt_spec1 max_elt_spec2 max_elt_spec3 : set. (** First, a specialized version of SortA_equivlistA_eqlistA: *) Lemma sort_equivlistA_eqlistA : forall l l' : list elt, sort E.lt l -> sort E.lt l' -> equivlistA E.eq l l' -> eqlistA E.eq l l'. Proof. apply SortA_equivlistA_eqlistA; eauto with *. Qed. Definition gtb x y := match E.compare x y with Gt => true | _ => false end. Definition leb x := fun y => negb (gtb x y). Definition elements_lt x s := List.filter (gtb x) (elements s). Definition elements_ge x s := List.filter (leb x) (elements s). Lemma gtb_1 : forall x y, gtb x y = true <-> E.lt y x. Proof. intros; rewrite <- compare_gt_iff. unfold gtb. destruct E.compare; intuition; try discriminate. Qed. Lemma leb_1 : forall x y, leb x y = true <-> ~E.lt y x. Proof. intros; rewrite <- compare_gt_iff. unfold leb, gtb. destruct E.compare; intuition; try discriminate. Qed. Instance gtb_compat x : Proper (E.eq==>Logic.eq) (gtb x). Proof. intros a b H. unfold gtb. rewrite H; auto. Qed. Instance leb_compat x : Proper (E.eq==>Logic.eq) (leb x). Proof. intros a b H; unfold leb. rewrite H; auto. Qed. Hint Resolve gtb_compat leb_compat. Lemma elements_split : forall x s, elements s = elements_lt x s ++ elements_ge x s. Proof. unfold elements_lt, elements_ge, leb; intros. eapply (@filter_split _ E.eq); eauto with *. intros. rewrite gtb_1 in H. assert (~E.lt y x). unfold gtb in *; elim_compare x y; intuition; try discriminate; order. order. Qed. Lemma elements_Add : forall s s' x, ~In x s -> Add x s s' -> eqlistA E.eq (elements s') (elements_lt x s ++ x :: elements_ge x s). Proof. intros; unfold elements_ge, elements_lt. apply sort_equivlistA_eqlistA; auto with set. apply (@SortA_app _ E.eq); auto with *. apply (@filter_sort _ E.eq); auto with *; eauto with *. constructor; auto. apply (@filter_sort _ E.eq); auto with *; eauto with *. rewrite Inf_alt by (apply (@filter_sort _ E.eq); eauto with *). intros. rewrite filter_InA in H1; auto with *; destruct H1. rewrite leb_1 in H2. rewrite <- elements_iff in H1. assert (~E.eq x y). contradict H; rewrite H; auto. order. intros. rewrite filter_InA in H1; auto with *; destruct H1. rewrite gtb_1 in H3. inversion_clear H2. order. rewrite filter_InA in H4; auto with *; destruct H4. rewrite leb_1 in H4. order. red; intros a. rewrite InA_app_iff, InA_cons, !filter_InA, <-!elements_iff, leb_1, gtb_1, (H0 a) by (auto with *). intuition. elim_compare a x; intuition. right; right; split; auto. order. Qed. Definition Above x s := forall y, In y s -> E.lt y x. Definition Below x s := forall y, In y s -> E.lt x y. Lemma elements_Add_Above : forall s s' x, Above x s -> Add x s s' -> eqlistA E.eq (elements s') (elements s ++ x::nil). Proof. intros. apply sort_equivlistA_eqlistA; auto with set. apply (@SortA_app _ E.eq); auto with *. intros. invlist InA. rewrite <- elements_iff in H1. setoid_replace y with x; auto. red; intros a. rewrite InA_app_iff, InA_cons, InA_nil, <-!elements_iff, (H0 a) by (auto with *). intuition. Qed. Lemma elements_Add_Below : forall s s' x, Below x s -> Add x s s' -> eqlistA E.eq (elements s') (x::elements s). Proof. intros. apply sort_equivlistA_eqlistA; auto with set. change (sort E.lt ((x::nil) ++ elements s)). apply (@SortA_app _ E.eq); auto with *. intros. invlist InA. rewrite <- elements_iff in H2. setoid_replace x0 with x; auto. red; intros a. rewrite InA_cons, <- !elements_iff, (H0 a); intuition. Qed. (** Two other induction principles on sets: we can be more restrictive on the element we add at each step. *) Lemma set_induction_max : forall P : t -> Type, (forall s : t, Empty s -> P s) -> (forall s s', P s -> forall x, Above x s -> Add x s s' -> P s') -> forall s : t, P s. Proof. intros; remember (cardinal s) as n; revert s Heqn; induction n; intros; auto. case_eq (max_elt s); intros. apply X0 with (remove e s) e; auto with set. apply IHn. assert (S n = S (cardinal (remove e s))). rewrite Heqn; apply cardinal_2 with e; auto with set relations. inversion H0; auto. red; intros. rewrite remove_iff in H0; destruct H0. generalize (@max_elt_spec2 s e y H H0); order. assert (H0:=max_elt_spec3 H). rewrite cardinal_Empty in H0; rewrite H0 in Heqn; inversion Heqn. Qed. Lemma set_induction_min : forall P : t -> Type, (forall s : t, Empty s -> P s) -> (forall s s', P s -> forall x, Below x s -> Add x s s' -> P s') -> forall s : t, P s. Proof. intros; remember (cardinal s) as n; revert s Heqn; induction n; intros; auto. case_eq (min_elt s); intros. apply X0 with (remove e s) e; auto with set. apply IHn. assert (S n = S (cardinal (remove e s))). rewrite Heqn; apply cardinal_2 with e; auto with set relations. inversion H0; auto. red; intros. rewrite remove_iff in H0; destruct H0. generalize (@min_elt_spec2 s e y H H0); order. assert (H0:=min_elt_spec3 H). rewrite cardinal_Empty in H0; auto; rewrite H0 in Heqn; inversion Heqn. Qed. (** More properties of [fold] : behavior with respect to Above/Below *) Lemma fold_3 : forall s s' x (A : Type) (eqA : A -> A -> Prop) (st : Equivalence eqA) (i : A) (f : elt -> A -> A), Proper (E.eq==>eqA==>eqA) f -> Above x s -> Add x s s' -> eqA (fold f s' i) (f x (fold f s i)). Proof. intros. rewrite 2 fold_spec_right. change (f x (fold_right f i (rev (elements s)))) with (fold_right f i (rev (x::nil)++rev (elements s))). apply (@fold_right_eqlistA E.t E.eq A eqA st); auto with *. rewrite <- distr_rev. apply eqlistA_rev. apply elements_Add_Above; auto. Qed. Lemma fold_4 : forall s s' x (A : Type) (eqA : A -> A -> Prop) (st : Equivalence eqA) (i : A) (f : elt -> A -> A), Proper (E.eq==>eqA==>eqA) f -> Below x s -> Add x s s' -> eqA (fold f s' i) (fold f s (f x i)). Proof. intros. rewrite !fold_spec. change (eqA (fold_left (flip f) (elements s') i) (fold_left (flip f) (x::elements s) i)). unfold flip; rewrite <-!fold_left_rev_right. apply (@fold_right_eqlistA E.t E.eq A eqA st); auto. apply eqlistA_rev. apply elements_Add_Below; auto. Qed. (** The following results have already been proved earlier, but we can now prove them with one hypothesis less: no need for [(transpose eqA f)]. *) Section FoldOpt. Variables (A:Type)(eqA:A->A->Prop)(st:Equivalence eqA). Variables (f:elt->A->A)(Comp:Proper (E.eq==>eqA==>eqA) f). Lemma fold_equal : forall i s s', s[=]s' -> eqA (fold f s i) (fold f s' i). Proof. intros. rewrite 2 fold_spec_right. apply (@fold_right_eqlistA E.t E.eq A eqA st); auto. apply eqlistA_rev. apply sort_equivlistA_eqlistA; auto with set. red; intro a; do 2 rewrite <- elements_iff; auto. Qed. Lemma add_fold : forall i s x, In x s -> eqA (fold f (add x s) i) (fold f s i). Proof. intros; apply fold_equal; auto with set. Qed. Lemma remove_fold_2: forall i s x, ~In x s -> eqA (fold f (remove x s) i) (fold f s i). Proof. intros. apply fold_equal; auto with set. Qed. End FoldOpt. (** An alternative version of [choose_3] *) Lemma choose_equal : forall s s', Equal s s' -> match choose s, choose s' with | Some x, Some x' => E.eq x x' | None, None => True | _, _ => False end. Proof. intros s s' H; generalize (@choose_spec1 s)(@choose_spec2 s) (@choose_spec1 s')(@choose_spec2 s')(@choose_spec3 s s'); destruct (choose s); destruct (choose s'); simpl; intuition. apply H5 with e; rewrite <-H; auto. apply H5 with e; rewrite H; auto. Qed. End OrdProperties. coq-8.4pl2/theories/MSets/MSetDecide.v0000640000175000001440000007466011643605605016664 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ... -> Pk -> P >> where [P]'s are defined by the grammar: << P ::= | Q | Empty F | Subset F F' | Equal F F' Q ::= | E.eq X X' | In X F | Q /\ Q' | Q \/ Q' | Q -> Q' | Q <-> Q' | ~ Q | True | False F ::= | S | empty | singleton X | add X F | remove X F | union F F' | inter F F' | diff F F' X ::= x1 | ... | xm S ::= s1 | ... | sn >> The tactic will also work on some goals that vary slightly from the above form: - The variables and hypotheses may be mixed in any order and may have already been introduced into the context. Moreover, there may be additional, unrelated hypotheses mixed in (these will be ignored). - A conjunction of hypotheses will be handled as easily as separate hypotheses, i.e., [P1 /\ P2 -> P] can be solved iff [P1 -> P2 -> P] can be solved. - [fsetdec] should solve any goal if the MSet-related hypotheses are contradictory. - [fsetdec] will first perform any necessary zeta and beta reductions and will invoke [subst] to eliminate any Coq equalities between finite sets or their elements. - If [E.eq] is convertible with Coq's equality, it will not matter which one is used in the hypotheses or conclusion. - The tactic can solve goals where the finite sets or set elements are expressed by Coq terms that are more complicated than variables. However, non-local definitions are not expanded, and Coq equalities between non-variable terms are not used. For example, this goal will be solved: << forall (f : t -> t), forall (g : elt -> elt), forall (s1 s2 : t), forall (x1 x2 : elt), Equal s1 (f s2) -> E.eq x1 (g (g x2)) -> In x1 s1 -> In (g (g x2)) (f s2) >> This one will not be solved: << forall (f : t -> t), forall (g : elt -> elt), forall (s1 s2 : t), forall (x1 x2 : elt), Equal s1 (f s2) -> E.eq x1 (g x2) -> In x1 s1 -> g x2 = g (g x2) -> In (g (g x2)) (f s2) >> *) (** * Facts and Tactics for Propositional Logic These lemmas and tactics are in a module so that they do not affect the namespace if you import the enclosing module [Decide]. *) Module MSetLogicalFacts. Require Export Decidable. Require Export Setoid. (** ** Lemmas and Tactics About Decidable Propositions *) (** ** Propositional Equivalences Involving Negation These are all written with the unfolded form of negation, since I am not sure if setoid rewriting will always perform conversion. *) (** ** Tactics for Negations *) Tactic Notation "fold" "any" "not" := repeat ( match goal with | H: context [?P -> False] |- _ => fold (~ P) in H | |- context [?P -> False] => fold (~ P) end). (** [push not using db] will pushes all negations to the leaves of propositions in the goal, using the lemmas in [db] to assist in checking the decidability of the propositions involved. If [using db] is omitted, then [core] will be used. Additional versions are provided to manipulate the hypotheses or the hypotheses and goal together. XXX: This tactic and the similar subsequent ones should have been defined using [autorewrite]. However, dealing with multiples rewrite sites and side-conditions is done more cleverly with the following explicit analysis of goals. *) Ltac or_not_l_iff P Q tac := (rewrite (or_not_l_iff_1 P Q) by tac) || (rewrite (or_not_l_iff_2 P Q) by tac). Ltac or_not_r_iff P Q tac := (rewrite (or_not_r_iff_1 P Q) by tac) || (rewrite (or_not_r_iff_2 P Q) by tac). Ltac or_not_l_iff_in P Q H tac := (rewrite (or_not_l_iff_1 P Q) in H by tac) || (rewrite (or_not_l_iff_2 P Q) in H by tac). Ltac or_not_r_iff_in P Q H tac := (rewrite (or_not_r_iff_1 P Q) in H by tac) || (rewrite (or_not_r_iff_2 P Q) in H by tac). Tactic Notation "push" "not" "using" ident(db) := let dec := solve_decidable using db in unfold not, iff; repeat ( match goal with | |- context [True -> False] => rewrite not_true_iff | |- context [False -> False] => rewrite not_false_iff | |- context [(?P -> False) -> False] => rewrite (not_not_iff P) by dec | |- context [(?P -> False) -> (?Q -> False)] => rewrite (contrapositive P Q) by dec | |- context [(?P -> False) \/ ?Q] => or_not_l_iff P Q dec | |- context [?P \/ (?Q -> False)] => or_not_r_iff P Q dec | |- context [(?P -> False) -> ?Q] => rewrite (imp_not_l P Q) by dec | |- context [?P \/ ?Q -> False] => rewrite (not_or_iff P Q) | |- context [?P /\ ?Q -> False] => rewrite (not_and_iff P Q) | |- context [(?P -> ?Q) -> False] => rewrite (not_imp_iff P Q) by dec end); fold any not. Tactic Notation "push" "not" := push not using core. Tactic Notation "push" "not" "in" "*" "|-" "using" ident(db) := let dec := solve_decidable using db in unfold not, iff in * |-; repeat ( match goal with | H: context [True -> False] |- _ => rewrite not_true_iff in H | H: context [False -> False] |- _ => rewrite not_false_iff in H | H: context [(?P -> False) -> False] |- _ => rewrite (not_not_iff P) in H by dec | H: context [(?P -> False) -> (?Q -> False)] |- _ => rewrite (contrapositive P Q) in H by dec | H: context [(?P -> False) \/ ?Q] |- _ => or_not_l_iff_in P Q H dec | H: context [?P \/ (?Q -> False)] |- _ => or_not_r_iff_in P Q H dec | H: context [(?P -> False) -> ?Q] |- _ => rewrite (imp_not_l P Q) in H by dec | H: context [?P \/ ?Q -> False] |- _ => rewrite (not_or_iff P Q) in H | H: context [?P /\ ?Q -> False] |- _ => rewrite (not_and_iff P Q) in H | H: context [(?P -> ?Q) -> False] |- _ => rewrite (not_imp_iff P Q) in H by dec end); fold any not. Tactic Notation "push" "not" "in" "*" "|-" := push not in * |- using core. Tactic Notation "push" "not" "in" "*" "using" ident(db) := push not using db; push not in * |- using db. Tactic Notation "push" "not" "in" "*" := push not in * using core. (** A simple test case to see how this works. *) Lemma test_push : forall P Q R : Prop, decidable P -> decidable Q -> (~ True) -> (~ False) -> (~ ~ P) -> (~ (P /\ Q) -> ~ R) -> ((P /\ Q) \/ ~ R) -> (~ (P /\ Q) \/ R) -> (R \/ ~ (P /\ Q)) -> (~ R \/ (P /\ Q)) -> (~ P -> R) -> (~ ((R -> P) \/ (Q -> R))) -> (~ (P /\ R)) -> (~ (P -> R)) -> True. Proof. intros. push not in *. (* note that ~(R->P) remains (since R isnt decidable) *) tauto. Qed. (** [pull not using db] will pull as many negations as possible toward the top of the propositions in the goal, using the lemmas in [db] to assist in checking the decidability of the propositions involved. If [using db] is omitted, then [core] will be used. Additional versions are provided to manipulate the hypotheses or the hypotheses and goal together. *) Tactic Notation "pull" "not" "using" ident(db) := let dec := solve_decidable using db in unfold not, iff; repeat ( match goal with | |- context [True -> False] => rewrite not_true_iff | |- context [False -> False] => rewrite not_false_iff | |- context [(?P -> False) -> False] => rewrite (not_not_iff P) by dec | |- context [(?P -> False) -> (?Q -> False)] => rewrite (contrapositive P Q) by dec | |- context [(?P -> False) \/ ?Q] => or_not_l_iff P Q dec | |- context [?P \/ (?Q -> False)] => or_not_r_iff P Q dec | |- context [(?P -> False) -> ?Q] => rewrite (imp_not_l P Q) by dec | |- context [(?P -> False) /\ (?Q -> False)] => rewrite <- (not_or_iff P Q) | |- context [?P -> ?Q -> False] => rewrite <- (not_and_iff P Q) | |- context [?P /\ (?Q -> False)] => rewrite <- (not_imp_iff P Q) by dec | |- context [(?Q -> False) /\ ?P] => rewrite <- (not_imp_rev_iff P Q) by dec end); fold any not. Tactic Notation "pull" "not" := pull not using core. Tactic Notation "pull" "not" "in" "*" "|-" "using" ident(db) := let dec := solve_decidable using db in unfold not, iff in * |-; repeat ( match goal with | H: context [True -> False] |- _ => rewrite not_true_iff in H | H: context [False -> False] |- _ => rewrite not_false_iff in H | H: context [(?P -> False) -> False] |- _ => rewrite (not_not_iff P) in H by dec | H: context [(?P -> False) -> (?Q -> False)] |- _ => rewrite (contrapositive P Q) in H by dec | H: context [(?P -> False) \/ ?Q] |- _ => or_not_l_iff_in P Q H dec | H: context [?P \/ (?Q -> False)] |- _ => or_not_r_iff_in P Q H dec | H: context [(?P -> False) -> ?Q] |- _ => rewrite (imp_not_l P Q) in H by dec | H: context [(?P -> False) /\ (?Q -> False)] |- _ => rewrite <- (not_or_iff P Q) in H | H: context [?P -> ?Q -> False] |- _ => rewrite <- (not_and_iff P Q) in H | H: context [?P /\ (?Q -> False)] |- _ => rewrite <- (not_imp_iff P Q) in H by dec | H: context [(?Q -> False) /\ ?P] |- _ => rewrite <- (not_imp_rev_iff P Q) in H by dec end); fold any not. Tactic Notation "pull" "not" "in" "*" "|-" := pull not in * |- using core. Tactic Notation "pull" "not" "in" "*" "using" ident(db) := pull not using db; pull not in * |- using db. Tactic Notation "pull" "not" "in" "*" := pull not in * using core. (** A simple test case to see how this works. *) Lemma test_pull : forall P Q R : Prop, decidable P -> decidable Q -> (~ True) -> (~ False) -> (~ ~ P) -> (~ (P /\ Q) -> ~ R) -> ((P /\ Q) \/ ~ R) -> (~ (P /\ Q) \/ R) -> (R \/ ~ (P /\ Q)) -> (~ R \/ (P /\ Q)) -> (~ P -> R) -> (~ (R -> P) /\ ~ (Q -> R)) -> (~ P \/ ~ R) -> (P /\ ~ R) -> (~ R /\ P) -> True. Proof. intros. pull not in *. tauto. Qed. End MSetLogicalFacts. Import MSetLogicalFacts. (** * Auxiliary Tactics Again, these lemmas and tactics are in a module so that they do not affect the namespace if you import the enclosing module [Decide]. *) Module MSetDecideAuxiliary. (** ** Generic Tactics We begin by defining a few generic, useful tactics. *) (** remove logical hypothesis inter-dependencies (fix #2136). *) Ltac no_logical_interdep := match goal with | H : ?P |- _ => match type of P with | Prop => match goal with H' : context [ H ] |- _ => clear dependent H' end | _ => fail end; no_logical_interdep | _ => idtac end. (** [if t then t1 else t2] executes [t] and, if it does not fail, then [t1] will be applied to all subgoals produced. If [t] fails, then [t2] is executed. *) Tactic Notation "if" tactic(t) "then" tactic(t1) "else" tactic(t2) := first [ t; first [ t1 | fail 2 ] | t2 ]. Ltac abstract_term t := if (is_var t) then fail "no need to abstract a variable" else (let x := fresh "x" in set (x := t) in *; try clearbody x). Ltac abstract_elements := repeat (match goal with | |- context [ singleton ?t ] => abstract_term t | _ : context [ singleton ?t ] |- _ => abstract_term t | |- context [ add ?t _ ] => abstract_term t | _ : context [ add ?t _ ] |- _ => abstract_term t | |- context [ remove ?t _ ] => abstract_term t | _ : context [ remove ?t _ ] |- _ => abstract_term t | |- context [ In ?t _ ] => abstract_term t | _ : context [ In ?t _ ] |- _ => abstract_term t end). (** [prop P holds by t] succeeds (but does not modify the goal or context) if the proposition [P] can be proved by [t] in the current context. Otherwise, the tactic fails. *) Tactic Notation "prop" constr(P) "holds" "by" tactic(t) := let H := fresh in assert P as H by t; clear H. (** This tactic acts just like [assert ... by ...] but will fail if the context already contains the proposition. *) Tactic Notation "assert" "new" constr(e) "by" tactic(t) := match goal with | H: e |- _ => fail 1 | _ => assert e by t end. (** [subst++] is similar to [subst] except that - it never fails (as [subst] does on recursive equations), - it substitutes locally defined variable for their definitions, - it performs beta reductions everywhere, which may arise after substituting a locally defined function for its definition. *) Tactic Notation "subst" "++" := repeat ( match goal with | x : _ |- _ => subst x end); cbv zeta beta in *. (** [decompose records] calls [decompose record H] on every relevant hypothesis [H]. *) Tactic Notation "decompose" "records" := repeat ( match goal with | H: _ |- _ => progress (decompose record H); clear H end). (** ** Discarding Irrelevant Hypotheses We will want to clear the context of any non-MSet-related hypotheses in order to increase the speed of the tactic. To do this, we will need to be able to decide which are relevant. We do this by making a simple inductive definition classifying the propositions of interest. *) Inductive MSet_elt_Prop : Prop -> Prop := | eq_Prop : forall (S : Type) (x y : S), MSet_elt_Prop (x = y) | eq_elt_prop : forall x y, MSet_elt_Prop (E.eq x y) | In_elt_prop : forall x s, MSet_elt_Prop (In x s) | True_elt_prop : MSet_elt_Prop True | False_elt_prop : MSet_elt_Prop False | conj_elt_prop : forall P Q, MSet_elt_Prop P -> MSet_elt_Prop Q -> MSet_elt_Prop (P /\ Q) | disj_elt_prop : forall P Q, MSet_elt_Prop P -> MSet_elt_Prop Q -> MSet_elt_Prop (P \/ Q) | impl_elt_prop : forall P Q, MSet_elt_Prop P -> MSet_elt_Prop Q -> MSet_elt_Prop (P -> Q) | not_elt_prop : forall P, MSet_elt_Prop P -> MSet_elt_Prop (~ P). Inductive MSet_Prop : Prop -> Prop := | elt_MSet_Prop : forall P, MSet_elt_Prop P -> MSet_Prop P | Empty_MSet_Prop : forall s, MSet_Prop (Empty s) | Subset_MSet_Prop : forall s1 s2, MSet_Prop (Subset s1 s2) | Equal_MSet_Prop : forall s1 s2, MSet_Prop (Equal s1 s2). (** Here is the tactic that will throw away hypotheses that are not useful (for the intended scope of the [fsetdec] tactic). *) Hint Constructors MSet_elt_Prop MSet_Prop : MSet_Prop. Ltac discard_nonMSet := repeat ( match goal with | H : context [ @Logic.eq ?T ?x ?y ] |- _ => if (change T with E.t in H) then fail else if (change T with t in H) then fail else clear H | H : ?P |- _ => if prop (MSet_Prop P) holds by (auto 100 with MSet_Prop) then fail else clear H end). (** ** Turning Set Operators into Propositional Connectives The lemmas from [MSetFacts] will be used to break down set operations into propositional formulas built over the predicates [In] and [E.eq] applied only to variables. We are going to use them with [autorewrite]. *) Hint Rewrite F.empty_iff F.singleton_iff F.add_iff F.remove_iff F.union_iff F.inter_iff F.diff_iff : set_simpl. Lemma eq_refl_iff (x : E.t) : E.eq x x <-> True. Proof. now split. Qed. Hint Rewrite eq_refl_iff : set_eq_simpl. (** ** Decidability of MSet Propositions *) (** [In] is decidable. *) Lemma dec_In : forall x s, decidable (In x s). Proof. red; intros; generalize (F.mem_iff s x); case (mem x s); intuition. Qed. (** [E.eq] is decidable. *) Lemma dec_eq : forall (x y : E.t), decidable (E.eq x y). Proof. red; intros x y; destruct (E.eq_dec x y); auto. Qed. (** The hint database [MSet_decidability] will be given to the [push_neg] tactic from the module [Negation]. *) Hint Resolve dec_In dec_eq : MSet_decidability. (** ** Normalizing Propositions About Equality We have to deal with the fact that [E.eq] may be convertible with Coq's equality. Thus, we will find the following tactics useful to replace one form with the other everywhere. *) (** The next tactic, [Logic_eq_to_E_eq], mentions the term [E.t]; thus, we must ensure that [E.t] is used in favor of any other convertible but syntactically distinct term. *) Ltac change_to_E_t := repeat ( match goal with | H : ?T |- _ => progress (change T with E.t in H); repeat ( match goal with | J : _ |- _ => progress (change T with E.t in J) | |- _ => progress (change T with E.t) end ) | H : forall x : ?T, _ |- _ => progress (change T with E.t in H); repeat ( match goal with | J : _ |- _ => progress (change T with E.t in J) | |- _ => progress (change T with E.t) end ) end). (** These two tactics take us from Coq's built-in equality to [E.eq] (and vice versa) when possible. *) Ltac Logic_eq_to_E_eq := repeat ( match goal with | H: _ |- _ => progress (change (@Logic.eq E.t) with E.eq in H) | |- _ => progress (change (@Logic.eq E.t) with E.eq) end). Ltac E_eq_to_Logic_eq := repeat ( match goal with | H: _ |- _ => progress (change E.eq with (@Logic.eq E.t) in H) | |- _ => progress (change E.eq with (@Logic.eq E.t)) end). (** This tactic works like the built-in tactic [subst], but at the level of set element equality (which may not be the convertible with Coq's equality). *) Ltac substMSet := repeat ( match goal with | H: E.eq ?x ?x |- _ => clear H | H: E.eq ?x ?y |- _ => rewrite H in *; clear H end); autorewrite with set_eq_simpl in *. (** ** Considering Decidability of Base Propositions This tactic adds assertions about the decidability of [E.eq] and [In] to the context. This is necessary for the completeness of the [fsetdec] tactic. However, in order to minimize the cost of proof search, we should be careful to not add more than we need. Once negations have been pushed to the leaves of the propositions, we only need to worry about decidability for those base propositions that appear in a negated form. *) Ltac assert_decidability := (** We actually don't want these rules to fire if the syntactic context in the patterns below is trivially empty, but we'll just do some clean-up at the afterward. *) repeat ( match goal with | H: context [~ E.eq ?x ?y] |- _ => assert new (E.eq x y \/ ~ E.eq x y) by (apply dec_eq) | H: context [~ In ?x ?s] |- _ => assert new (In x s \/ ~ In x s) by (apply dec_In) | |- context [~ E.eq ?x ?y] => assert new (E.eq x y \/ ~ E.eq x y) by (apply dec_eq) | |- context [~ In ?x ?s] => assert new (In x s \/ ~ In x s) by (apply dec_In) end); (** Now we eliminate the useless facts we added (because they would likely be very harmful to performance). *) repeat ( match goal with | _: ~ ?P, H : ?P \/ ~ ?P |- _ => clear H end). (** ** Handling [Empty], [Subset], and [Equal] This tactic instantiates universally quantified hypotheses (which arise from the unfolding of [Empty], [Subset], and [Equal]) for each of the set element expressions that is involved in some membership or equality fact. Then it throws away those hypotheses, which should no longer be needed. *) Ltac inst_MSet_hypotheses := repeat ( match goal with | H : forall a : E.t, _, _ : context [ In ?x _ ] |- _ => let P := type of (H x) in assert new P by (exact (H x)) | H : forall a : E.t, _ |- context [ In ?x _ ] => let P := type of (H x) in assert new P by (exact (H x)) | H : forall a : E.t, _, _ : context [ E.eq ?x _ ] |- _ => let P := type of (H x) in assert new P by (exact (H x)) | H : forall a : E.t, _ |- context [ E.eq ?x _ ] => let P := type of (H x) in assert new P by (exact (H x)) | H : forall a : E.t, _, _ : context [ E.eq _ ?x ] |- _ => let P := type of (H x) in assert new P by (exact (H x)) | H : forall a : E.t, _ |- context [ E.eq _ ?x ] => let P := type of (H x) in assert new P by (exact (H x)) end); repeat ( match goal with | H : forall a : E.t, _ |- _ => clear H end). (** ** The Core [fsetdec] Auxiliary Tactics *) (** Here is the crux of the proof search. Recursion through [intuition]! (This will terminate if I correctly understand the behavior of [intuition].) *) Ltac fsetdec_rec := progress substMSet; intuition fsetdec_rec. (** If we add [unfold Empty, Subset, Equal in *; intros;] to the beginning of this tactic, it will satisfy the same specification as the [fsetdec] tactic; however, it will be much slower than necessary without the pre-processing done by the wrapper tactic [fsetdec]. *) Ltac fsetdec_body := autorewrite with set_eq_simpl in *; inst_MSet_hypotheses; autorewrite with set_simpl set_eq_simpl in *; push not in * using MSet_decidability; substMSet; assert_decidability; auto; (intuition fsetdec_rec) || fail 1 "because the goal is beyond the scope of this tactic". End MSetDecideAuxiliary. Import MSetDecideAuxiliary. (** * The [fsetdec] Tactic Here is the top-level tactic (the only one intended for clients of this library). It's specification is given at the top of the file. *) Ltac fsetdec := (** We first unfold any occurrences of [iff]. *) unfold iff in *; (** We fold occurrences of [not] because it is better for [intros] to leave us with a goal of [~ P] than a goal of [False]. *) fold any not; intros; (** We don't care about the value of elements : complex ones are abstracted as new variables (avoiding potential dependencies, see bug #2464) *) abstract_elements; (** We remove dependencies to logical hypothesis. This way, later "clear" will work nicely (see bug #2136) *) no_logical_interdep; (** Now we decompose conjunctions, which will allow the [discard_nonMSet] and [assert_decidability] tactics to do a much better job. *) decompose records; discard_nonMSet; (** We unfold these defined propositions on finite sets. If our goal was one of them, then have one more item to introduce now. *) unfold Empty, Subset, Equal in *; intros; (** We now want to get rid of all uses of [=] in favor of [E.eq]. However, the best way to eliminate a [=] is in the context is with [subst], so we will try that first. In fact, we may as well convert uses of [E.eq] into [=] when possible before we do [subst] so that we can even more mileage out of it. Then we will convert all remaining uses of [=] back to [E.eq] when possible. We use [change_to_E_t] to ensure that we have a canonical name for set elements, so that [Logic_eq_to_E_eq] will work properly. *) change_to_E_t; E_eq_to_Logic_eq; subst++; Logic_eq_to_E_eq; (** The next optimization is to swap a negated goal with a negated hypothesis when possible. Any swap will improve performance by eliminating the total number of negations, but we will get the maximum benefit if we swap the goal with a hypotheses mentioning the same set element, so we try that first. If we reach the fourth branch below, we attempt any swap. However, to maintain completeness of this tactic, we can only perform such a swap with a decidable proposition; hence, we first test whether the hypothesis is an [MSet_elt_Prop], noting that any [MSet_elt_Prop] is decidable. *) pull not using MSet_decidability; unfold not in *; match goal with | H: (In ?x ?r) -> False |- (In ?x ?s) -> False => contradict H; fsetdec_body | H: (In ?x ?r) -> False |- (E.eq ?x ?y) -> False => contradict H; fsetdec_body | H: (In ?x ?r) -> False |- (E.eq ?y ?x) -> False => contradict H; fsetdec_body | H: ?P -> False |- ?Q -> False => if prop (MSet_elt_Prop P) holds by (auto 100 with MSet_Prop) then (contradict H; fsetdec_body) else fsetdec_body | |- _ => fsetdec_body end. (** * Examples *) Module MSetDecideTestCases. Lemma test_eq_trans_1 : forall x y z s, E.eq x y -> ~ ~ E.eq z y -> In x s -> In z s. Proof. fsetdec. Qed. Lemma test_eq_trans_2 : forall x y z r s, In x (singleton y) -> ~ In z r -> ~ ~ In z (add y r) -> In x s -> In z s. Proof. fsetdec. Qed. Lemma test_eq_neq_trans_1 : forall w x y z s, E.eq x w -> ~ ~ E.eq x y -> ~ E.eq y z -> In w s -> In w (remove z s). Proof. fsetdec. Qed. Lemma test_eq_neq_trans_2 : forall w x y z r1 r2 s, In x (singleton w) -> ~ In x r1 -> In x (add y r1) -> In y r2 -> In y (remove z r2) -> In w s -> In w (remove z s). Proof. fsetdec. Qed. Lemma test_In_singleton : forall x, In x (singleton x). Proof. fsetdec. Qed. Lemma test_add_In : forall x y s, In x (add y s) -> ~ E.eq x y -> In x s. Proof. fsetdec. Qed. Lemma test_Subset_add_remove : forall x s, s [<=] (add x (remove x s)). Proof. fsetdec. Qed. Lemma test_eq_disjunction : forall w x y z, In w (add x (add y (singleton z))) -> E.eq w x \/ E.eq w y \/ E.eq w z. Proof. fsetdec. Qed. Lemma test_not_In_disj : forall x y s1 s2 s3 s4, ~ In x (union s1 (union s2 (union s3 (add y s4)))) -> ~ (In x s1 \/ In x s4 \/ E.eq y x). Proof. fsetdec. Qed. Lemma test_not_In_conj : forall x y s1 s2 s3 s4, ~ In x (union s1 (union s2 (union s3 (add y s4)))) -> ~ In x s1 /\ ~ In x s4 /\ ~ E.eq y x. Proof. fsetdec. Qed. Lemma test_iff_conj : forall a x s s', (In a s' <-> E.eq x a \/ In a s) -> (In a s' <-> In a (add x s)). Proof. fsetdec. Qed. Lemma test_set_ops_1 : forall x q r s, (singleton x) [<=] s -> Empty (union q r) -> Empty (inter (diff s q) (diff s r)) -> ~ In x s. Proof. fsetdec. Qed. Lemma eq_chain_test : forall x1 x2 x3 x4 s1 s2 s3 s4, Empty s1 -> In x2 (add x1 s1) -> In x3 s2 -> ~ In x3 (remove x2 s2) -> ~ In x4 s3 -> In x4 (add x3 s3) -> In x1 s4 -> Subset (add x4 s4) s4. Proof. fsetdec. Qed. Lemma test_too_complex : forall x y z r s, E.eq x y -> (In x (singleton y) -> r [<=] s) -> In z r -> In z s. Proof. (** [fsetdec] is not intended to solve this directly. *) intros until s; intros Heq H Hr; lapply H; fsetdec. Qed. Lemma function_test_1 : forall (f : t -> t), forall (g : elt -> elt), forall (s1 s2 : t), forall (x1 x2 : elt), Equal s1 (f s2) -> E.eq x1 (g (g x2)) -> In x1 s1 -> In (g (g x2)) (f s2). Proof. fsetdec. Qed. Lemma function_test_2 : forall (f : t -> t), forall (g : elt -> elt), forall (s1 s2 : t), forall (x1 x2 : elt), Equal s1 (f s2) -> E.eq x1 (g x2) -> In x1 s1 -> g x2 = g (g x2) -> In (g (g x2)) (f s2). Proof. (** [fsetdec] is not intended to solve this directly. *) intros until 3. intros g_eq. rewrite <- g_eq. fsetdec. Qed. Lemma test_baydemir : forall (f : t -> t), forall (s : t), forall (x y : elt), In x (add y (f s)) -> ~ E.eq x y -> In x (f s). Proof. fsetdec. Qed. End MSetDecideTestCases. End WDecideOn. Require Import MSetInterface. (** Now comes variants for self-contained weak sets and for full sets. For these variants, only one argument is necessary. Thanks to the subtyping [WS<=S], the [Decide] functor which is meant to be used on modules [(M:S)] can simply be an alias of [WDecide]. *) Module WDecide (M:WSets) := !WDecideOn M.E M. Module Decide := WDecide. coq-8.4pl2/theories/MSets/MSetToFiniteSet.v0000640000175000001440000001071211366307247017673 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Ensemble M.elt := fun s x => M.In x s. Notation " !! " := mkEns. Lemma In_In : forall s x, M.In x s <-> In _ (!!s) x. Proof. unfold In; compute; auto with extcore. Qed. Lemma Subset_Included : forall s s', s[<=]s' <-> Included _ (!!s) (!!s'). Proof. unfold Subset, Included, In, mkEns; intuition. Qed. Notation " a === b " := (Same_set M.elt a b) (at level 70, no associativity). Lemma Equal_Same_set : forall s s', s[=]s' <-> !!s === !!s'. Proof. intros. rewrite double_inclusion. unfold Subset, Included, Same_set, In, mkEns; intuition. Qed. Lemma empty_Empty_Set : !!M.empty === Empty_set _. Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1. Qed. Lemma Empty_Empty_set : forall s, Empty s -> !!s === Empty_set _. Proof. unfold Same_set, Included, mkEns, In. split; intros. destruct(H x H0). inversion H0. Qed. Lemma singleton_Singleton : forall x, !!(M.singleton x) === Singleton _ x . Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1; try constructor; auto. Qed. Lemma union_Union : forall s s', !!(union s s') === Union _ (!!s) (!!s'). Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1; [ constructor 1 | constructor 2 | | ]; auto. Qed. Lemma inter_Intersection : forall s s', !!(inter s s') === Intersection _ (!!s) (!!s'). Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1; try constructor; auto. Qed. Lemma add_Add : forall x s, !!(add x s) === Add _ (!!s) x. Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1; auto with sets. inversion H0. constructor 2; constructor. constructor 1; auto. Qed. Lemma Add_Add : forall x s s', MP.Add x s s' -> !!s' === Add _ (!!s) x. Proof. unfold Same_set, Included, mkEns, In. split; intros. red in H; rewrite H in H0. destruct H0. inversion H0. constructor 2; constructor. constructor 1; auto. red in H; rewrite H. inversion H0; auto. inversion H1; auto. Qed. Lemma remove_Subtract : forall x s, !!(remove x s) === Subtract _ (!!s) x. Proof. unfold Same_set, Included, mkEns, In. split; intro; set_iff; inversion 1; auto with sets. split; auto. contradict H1. inversion H1; auto. Qed. Lemma mkEns_Finite : forall s, Finite _ (!!s). Proof. intro s; pattern s; apply set_induction; clear s; intros. intros; replace (!!s) with (Empty_set elt); auto with sets. symmetry; apply Extensionality_Ensembles. apply Empty_Empty_set; auto. replace (!!s') with (Add _ (!!s) x). constructor 2; auto. symmetry; apply Extensionality_Ensembles. apply Add_Add; auto. Qed. Lemma mkEns_cardinal : forall s, cardinal _ (!!s) (M.cardinal s). Proof. intro s; pattern s; apply set_induction; clear s; intros. intros; replace (!!s) with (Empty_set elt); auto with sets. rewrite MP.cardinal_1; auto with sets. symmetry; apply Extensionality_Ensembles. apply Empty_Empty_set; auto. replace (!!s') with (Add _ (!!s) x). rewrite (cardinal_2 H0 H1); auto with sets. symmetry; apply Extensionality_Ensembles. apply Add_Add; auto. Qed. (** we can even build a function from Finite Ensemble to MSet ... at least in Prop. *) Lemma Ens_to_MSet : forall e : Ensemble M.elt, Finite _ e -> exists s:M.t, !!s === e. Proof. induction 1. exists M.empty. apply empty_Empty_Set. destruct IHFinite as (s,Hs). exists (M.add x s). apply Extensionality_Ensembles in Hs. rewrite <- Hs. apply add_Add. Qed. End WS_to_Finite_set. Module S_to_Finite_set (U:UsualOrderedType)(M: SetsOn U) := WS_to_Finite_set U M. coq-8.4pl2/theories/MSets/MSetFacts.v0000640000175000001440000004144311403445776016545 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* In x s -> In y s. Proof. intros E; rewrite E; auto. Qed. Lemma mem_1 : In x s -> mem x s = true. Proof. intros; apply <- mem_spec; auto. Qed. Lemma mem_2 : mem x s = true -> In x s. Proof. intros; apply -> mem_spec; auto. Qed. Lemma equal_1 : Equal s s' -> equal s s' = true. Proof. intros; apply <- equal_spec; auto. Qed. Lemma equal_2 : equal s s' = true -> Equal s s'. Proof. intros; apply -> equal_spec; auto. Qed. Lemma subset_1 : Subset s s' -> subset s s' = true. Proof. intros; apply <- subset_spec; auto. Qed. Lemma subset_2 : subset s s' = true -> Subset s s'. Proof. intros; apply -> subset_spec; auto. Qed. Lemma is_empty_1 : Empty s -> is_empty s = true. Proof. intros; apply <- is_empty_spec; auto. Qed. Lemma is_empty_2 : is_empty s = true -> Empty s. Proof. intros; apply -> is_empty_spec; auto. Qed. Lemma add_1 : E.eq x y -> In y (add x s). Proof. intros; apply <- add_spec. auto with relations. Qed. Lemma add_2 : In y s -> In y (add x s). Proof. intros; apply <- add_spec; auto. Qed. Lemma add_3 : ~ E.eq x y -> In y (add x s) -> In y s. Proof. rewrite add_spec. intros H [H'|H']; auto. elim H; auto with relations. Qed. Lemma remove_1 : E.eq x y -> ~ In y (remove x s). Proof. intros; rewrite remove_spec; intuition. Qed. Lemma remove_2 : ~ E.eq x y -> In y s -> In y (remove x s). Proof. intros; apply <- remove_spec; auto with relations. Qed. Lemma remove_3 : In y (remove x s) -> In y s. Proof. rewrite remove_spec; intuition. Qed. Lemma singleton_1 : In y (singleton x) -> E.eq x y. Proof. rewrite singleton_spec; auto with relations. Qed. Lemma singleton_2 : E.eq x y -> In y (singleton x). Proof. rewrite singleton_spec; auto with relations. Qed. Lemma union_1 : In x (union s s') -> In x s \/ In x s'. Proof. rewrite union_spec; auto. Qed. Lemma union_2 : In x s -> In x (union s s'). Proof. rewrite union_spec; auto. Qed. Lemma union_3 : In x s' -> In x (union s s'). Proof. rewrite union_spec; auto. Qed. Lemma inter_1 : In x (inter s s') -> In x s. Proof. rewrite inter_spec; intuition. Qed. Lemma inter_2 : In x (inter s s') -> In x s'. Proof. rewrite inter_spec; intuition. Qed. Lemma inter_3 : In x s -> In x s' -> In x (inter s s'). Proof. rewrite inter_spec; intuition. Qed. Lemma diff_1 : In x (diff s s') -> In x s. Proof. rewrite diff_spec; intuition. Qed. Lemma diff_2 : In x (diff s s') -> ~ In x s'. Proof. rewrite diff_spec; intuition. Qed. Lemma diff_3 : In x s -> ~ In x s' -> In x (diff s s'). Proof. rewrite diff_spec; auto. Qed. Variable f : elt -> bool. Notation compatb := (Proper (E.eq==>Logic.eq)) (only parsing). Lemma filter_1 : compatb f -> In x (filter f s) -> In x s. Proof. intros P; rewrite filter_spec; intuition. Qed. Lemma filter_2 : compatb f -> In x (filter f s) -> f x = true. Proof. intros P; rewrite filter_spec; intuition. Qed. Lemma filter_3 : compatb f -> In x s -> f x = true -> In x (filter f s). Proof. intros P; rewrite filter_spec; intuition. Qed. Lemma for_all_1 : compatb f -> For_all (fun x => f x = true) s -> for_all f s = true. Proof. intros; apply <- for_all_spec; auto. Qed. Lemma for_all_2 : compatb f -> for_all f s = true -> For_all (fun x => f x = true) s. Proof. intros; apply -> for_all_spec; auto. Qed. Lemma exists_1 : compatb f -> Exists (fun x => f x = true) s -> exists_ f s = true. Proof. intros; apply <- exists_spec; auto. Qed. Lemma exists_2 : compatb f -> exists_ f s = true -> Exists (fun x => f x = true) s. Proof. intros; apply -> exists_spec; auto. Qed. Lemma elements_1 : In x s -> InA E.eq x (elements s). Proof. intros; apply <- elements_spec1; auto. Qed. Lemma elements_2 : InA E.eq x (elements s) -> In x s. Proof. intros; apply -> elements_spec1; auto. Qed. End ImplSpec. Notation empty_1 := empty_spec (only parsing). Notation fold_1 := fold_spec (only parsing). Notation cardinal_1 := cardinal_spec (only parsing). Notation partition_1 := partition_spec1 (only parsing). Notation partition_2 := partition_spec2 (only parsing). Notation choose_1 := choose_spec1 (only parsing). Notation choose_2 := choose_spec2 (only parsing). Notation elements_3w := elements_spec2w (only parsing). Hint Resolve mem_1 equal_1 subset_1 empty_1 is_empty_1 choose_1 choose_2 add_1 add_2 remove_1 remove_2 singleton_2 union_1 union_2 union_3 inter_3 diff_3 fold_1 filter_3 for_all_1 exists_1 partition_1 partition_2 elements_1 elements_3w : set. Hint Immediate In_1 mem_2 equal_2 subset_2 is_empty_2 add_3 remove_3 singleton_1 inter_1 inter_2 diff_1 diff_2 filter_1 filter_2 for_all_2 exists_2 elements_2 : set. (** * Specifications written using equivalences : this is now provided by the default interface. *) Section IffSpec. Variable s s' s'' : t. Variable x y z : elt. Lemma In_eq_iff : E.eq x y -> (In x s <-> In y s). Proof. intros E; rewrite E; intuition. Qed. Lemma mem_iff : In x s <-> mem x s = true. Proof. apply iff_sym, mem_spec. Qed. Lemma not_mem_iff : ~In x s <-> mem x s = false. Proof. rewrite <-mem_spec; destruct (mem x s); intuition. Qed. Lemma equal_iff : s[=]s' <-> equal s s' = true. Proof. apply iff_sym, equal_spec. Qed. Lemma subset_iff : s[<=]s' <-> subset s s' = true. Proof. apply iff_sym, subset_spec. Qed. Lemma empty_iff : In x empty <-> False. Proof. intuition; apply (empty_spec H). Qed. Lemma is_empty_iff : Empty s <-> is_empty s = true. Proof. apply iff_sym, is_empty_spec. Qed. Lemma singleton_iff : In y (singleton x) <-> E.eq x y. Proof. rewrite singleton_spec; intuition. Qed. Lemma add_iff : In y (add x s) <-> E.eq x y \/ In y s. Proof. rewrite add_spec; intuition. Qed. Lemma add_neq_iff : ~ E.eq x y -> (In y (add x s) <-> In y s). Proof. rewrite add_spec; intuition. elim H; auto with relations. Qed. Lemma remove_iff : In y (remove x s) <-> In y s /\ ~E.eq x y. Proof. rewrite remove_spec; intuition. Qed. Lemma remove_neq_iff : ~ E.eq x y -> (In y (remove x s) <-> In y s). Proof. rewrite remove_spec; intuition. Qed. Variable f : elt -> bool. Lemma for_all_iff : Proper (E.eq==>Logic.eq) f -> (For_all (fun x => f x = true) s <-> for_all f s = true). Proof. intros; apply iff_sym, for_all_spec; auto. Qed. Lemma exists_iff : Proper (E.eq==>Logic.eq) f -> (Exists (fun x => f x = true) s <-> exists_ f s = true). Proof. intros; apply iff_sym, exists_spec; auto. Qed. Lemma elements_iff : In x s <-> InA E.eq x (elements s). Proof. apply iff_sym, elements_spec1. Qed. End IffSpec. Notation union_iff := union_spec (only parsing). Notation inter_iff := inter_spec (only parsing). Notation diff_iff := diff_spec (only parsing). Notation filter_iff := filter_spec (only parsing). (** Useful tactic for simplifying expressions like [In y (add x (union s s'))] *) Ltac set_iff := repeat (progress ( rewrite add_iff || rewrite remove_iff || rewrite singleton_iff || rewrite union_iff || rewrite inter_iff || rewrite diff_iff || rewrite empty_iff)). (** * Specifications written using boolean predicates *) Section BoolSpec. Variable s s' s'' : t. Variable x y z : elt. Lemma mem_b : E.eq x y -> mem x s = mem y s. Proof. intros. generalize (mem_iff s x) (mem_iff s y)(In_eq_iff s H). destruct (mem x s); destruct (mem y s); intuition. Qed. Lemma empty_b : mem y empty = false. Proof. generalize (empty_iff y)(mem_iff empty y). destruct (mem y empty); intuition. Qed. Lemma add_b : mem y (add x s) = eqb x y || mem y s. Proof. generalize (mem_iff (add x s) y)(mem_iff s y)(add_iff s x y); unfold eqb. destruct (eq_dec x y); destruct (mem y s); destruct (mem y (add x s)); intuition. Qed. Lemma add_neq_b : ~ E.eq x y -> mem y (add x s) = mem y s. Proof. intros; generalize (mem_iff (add x s) y)(mem_iff s y)(add_neq_iff s H). destruct (mem y s); destruct (mem y (add x s)); intuition. Qed. Lemma remove_b : mem y (remove x s) = mem y s && negb (eqb x y). Proof. generalize (mem_iff (remove x s) y)(mem_iff s y)(remove_iff s x y); unfold eqb. destruct (eq_dec x y); destruct (mem y s); destruct (mem y (remove x s)); simpl; intuition. Qed. Lemma remove_neq_b : ~ E.eq x y -> mem y (remove x s) = mem y s. Proof. intros; generalize (mem_iff (remove x s) y)(mem_iff s y)(remove_neq_iff s H). destruct (mem y s); destruct (mem y (remove x s)); intuition. Qed. Lemma singleton_b : mem y (singleton x) = eqb x y. Proof. generalize (mem_iff (singleton x) y)(singleton_iff x y); unfold eqb. destruct (eq_dec x y); destruct (mem y (singleton x)); intuition. Qed. Lemma union_b : mem x (union s s') = mem x s || mem x s'. Proof. generalize (mem_iff (union s s') x)(mem_iff s x)(mem_iff s' x)(union_iff s s' x). destruct (mem x s); destruct (mem x s'); destruct (mem x (union s s')); intuition. Qed. Lemma inter_b : mem x (inter s s') = mem x s && mem x s'. Proof. generalize (mem_iff (inter s s') x)(mem_iff s x)(mem_iff s' x)(inter_iff s s' x). destruct (mem x s); destruct (mem x s'); destruct (mem x (inter s s')); intuition. Qed. Lemma diff_b : mem x (diff s s') = mem x s && negb (mem x s'). Proof. generalize (mem_iff (diff s s') x)(mem_iff s x)(mem_iff s' x)(diff_iff s s' x). destruct (mem x s); destruct (mem x s'); destruct (mem x (diff s s')); simpl; intuition. Qed. Lemma elements_b : mem x s = existsb (eqb x) (elements s). Proof. generalize (mem_iff s x)(elements_iff s x)(existsb_exists (eqb x) (elements s)). rewrite InA_alt. destruct (mem x s); destruct (existsb (eqb x) (elements s)); auto; intros. symmetry. rewrite H1. destruct H0 as (H0,_). destruct H0 as (a,(Ha1,Ha2)); [ intuition |]. exists a; intuition. unfold eqb; destruct (eq_dec x a); auto. rewrite <- H. rewrite H0. destruct H1 as (H1,_). destruct H1 as (a,(Ha1,Ha2)); [intuition|]. exists a; intuition. unfold eqb in *; destruct (eq_dec x a); auto; discriminate. Qed. Variable f : elt->bool. Lemma filter_b : Proper (E.eq==>Logic.eq) f -> mem x (filter f s) = mem x s && f x. Proof. intros. generalize (mem_iff (filter f s) x)(mem_iff s x)(filter_iff s x H). destruct (mem x s); destruct (mem x (filter f s)); destruct (f x); simpl; intuition. Qed. Lemma for_all_b : Proper (E.eq==>Logic.eq) f -> for_all f s = forallb f (elements s). Proof. intros. generalize (forallb_forall f (elements s))(for_all_iff s H)(elements_iff s). unfold For_all. destruct (forallb f (elements s)); destruct (for_all f s); auto; intros. rewrite <- H1; intros. destruct H0 as (H0,_). rewrite (H2 x0) in H3. rewrite (InA_alt E.eq x0 (elements s)) in H3. destruct H3 as (a,(Ha1,Ha2)). rewrite (H _ _ Ha1). apply H0; auto. symmetry. rewrite H0; intros. destruct H1 as (_,H1). apply H1; auto. rewrite H2. rewrite InA_alt. exists x0; split; auto with relations. Qed. Lemma exists_b : Proper (E.eq==>Logic.eq) f -> exists_ f s = existsb f (elements s). Proof. intros. generalize (existsb_exists f (elements s))(exists_iff s H)(elements_iff s). unfold Exists. destruct (existsb f (elements s)); destruct (exists_ f s); auto; intros. rewrite <- H1; intros. destruct H0 as (H0,_). destruct H0 as (a,(Ha1,Ha2)); auto. exists a; split; auto. rewrite H2; rewrite InA_alt; exists a; auto with relations. symmetry. rewrite H0. destruct H1 as (_,H1). destruct H1 as (a,(Ha1,Ha2)); auto. rewrite (H2 a) in Ha1. rewrite (InA_alt E.eq a (elements s)) in Ha1. destruct Ha1 as (b,(Hb1,Hb2)). exists b; auto. rewrite <- (H _ _ Hb1); auto. Qed. End BoolSpec. (** * Declarations of morphisms with respects to [E.eq] and [Equal] *) Instance In_m : Proper (E.eq==>Equal==>iff) In. Proof. unfold Equal; intros x y H s s' H0. rewrite (In_eq_iff s H); auto. Qed. Instance Empty_m : Proper (Equal==>iff) Empty. Proof. repeat red; unfold Empty; intros s s' E. setoid_rewrite E; auto. Qed. Instance is_empty_m : Proper (Equal==>Logic.eq) is_empty. Proof. intros s s' H. generalize (is_empty_iff s). rewrite H at 1. rewrite is_empty_iff. destruct (is_empty s); destruct (is_empty s'); intuition. Qed. Instance mem_m : Proper (E.eq==>Equal==>Logic.eq) mem. Proof. intros x x' Hx s s' Hs. generalize (mem_iff s x). rewrite Hs, Hx at 1; rewrite mem_iff. destruct (mem x s), (mem x' s'); intuition. Qed. Instance singleton_m : Proper (E.eq==>Equal) singleton. Proof. intros x y H a. rewrite !singleton_iff, H; intuition. Qed. Instance add_m : Proper (E.eq==>Equal==>Equal) add. Proof. intros x x' Hx s s' Hs a. rewrite !add_iff, Hx, Hs; intuition. Qed. Instance remove_m : Proper (E.eq==>Equal==>Equal) remove. Proof. intros x x' Hx s s' Hs a. rewrite !remove_iff, Hx, Hs; intuition. Qed. Instance union_m : Proper (Equal==>Equal==>Equal) union. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !union_iff, Hs1, Hs2; intuition. Qed. Instance inter_m : Proper (Equal==>Equal==>Equal) inter. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !inter_iff, Hs1, Hs2; intuition. Qed. Instance diff_m : Proper (Equal==>Equal==>Equal) diff. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !diff_iff, Hs1, Hs2; intuition. Qed. Instance Subset_m : Proper (Equal==>Equal==>iff) Subset. Proof. unfold Equal, Subset; firstorder. Qed. Instance subset_m : Proper (Equal==>Equal==>Logic.eq) subset. Proof. intros s1 s1' Hs1 s2 s2' Hs2. generalize (subset_iff s1 s2). rewrite Hs1, Hs2 at 1. rewrite subset_iff. destruct (subset s1 s2); destruct (subset s1' s2'); intuition. Qed. Instance equal_m : Proper (Equal==>Equal==>Logic.eq) equal. Proof. intros s1 s1' Hs1 s2 s2' Hs2. generalize (equal_iff s1 s2). rewrite Hs1,Hs2 at 1. rewrite equal_iff. destruct (equal s1 s2); destruct (equal s1' s2'); intuition. Qed. Instance SubsetSetoid : PreOrder Subset. (* reflexive + transitive *) Proof. firstorder. Qed. Definition Subset_refl := @PreOrder_Reflexive _ _ SubsetSetoid. Definition Subset_trans := @PreOrder_Transitive _ _ SubsetSetoid. Instance In_s_m : Morphisms.Proper (E.eq ==> Subset ++> impl) In | 1. Proof. simpl_relation. eauto with set. Qed. Instance Empty_s_m : Proper (Subset-->impl) Empty. Proof. firstorder. Qed. Instance add_s_m : Proper (E.eq==>Subset++>Subset) add. Proof. intros x x' Hx s s' Hs a. rewrite !add_iff, Hx; intuition. Qed. Instance remove_s_m : Proper (E.eq==>Subset++>Subset) remove. Proof. intros x x' Hx s s' Hs a. rewrite !remove_iff, Hx; intuition. Qed. Instance union_s_m : Proper (Subset++>Subset++>Subset) union. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !union_iff, Hs1, Hs2; intuition. Qed. Instance inter_s_m : Proper (Subset++>Subset++>Subset) inter. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !inter_iff, Hs1, Hs2; intuition. Qed. Instance diff_s_m : Proper (Subset++>Subset-->Subset) diff. Proof. intros s1 s1' Hs1 s2 s2' Hs2 a. rewrite !diff_iff, Hs1, Hs2; intuition. Qed. (* [fold], [filter], [for_all], [exists_] and [partition] requires some knowledge on [f] in order to be known as morphisms. *) Generalizable Variables f. Instance filter_equal : forall `(Proper _ (E.eq==>Logic.eq) f), Proper (Equal==>Equal) (filter f). Proof. intros f Hf s s' Hs a. rewrite !filter_iff, Hs by auto; intuition. Qed. Instance filter_subset : forall `(Proper _ (E.eq==>Logic.eq) f), Proper (Subset==>Subset) (filter f). Proof. intros f Hf s s' Hs a. rewrite !filter_iff, Hs by auto; intuition. Qed. Lemma filter_ext : forall f f', Proper (E.eq==>Logic.eq) f -> (forall x, f x = f' x) -> forall s s', s[=]s' -> filter f s [=] filter f' s'. Proof. intros f f' Hf Hff' s s' Hss' x. rewrite 2 filter_iff; auto. rewrite Hff', Hss'; intuition. red; red; intros; rewrite <- 2 Hff'; auto. Qed. (* For [elements], [min_elt], [max_elt] and [choose], we would need setoid structures on [list elt] and [option elt]. *) (* Later: Add Morphism cardinal ; cardinal_m. *) End WFactsOn. (** Now comes variants for self-contained weak sets and for full sets. For these variants, only one argument is necessary. Thanks to the subtyping [WS<=S], the [Facts] functor which is meant to be used on modules [(M:S)] can simply be an alias of [WFacts]. *) Module WFacts (M:WSets) := WFactsOn M.E M. Module Facts := WFacts. coq-8.4pl2/theories/MSets/MSetInterface.v0000640000175000001440000010007611776623104017377 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool. (** Test whether a set is empty or not. *) Parameter mem : elt -> t -> bool. (** [mem x s] tests whether [x] belongs to the set [s]. *) Parameter add : elt -> t -> t. (** [add x s] returns a set containing all elements of [s], plus [x]. If [x] was already in [s], [s] is returned unchanged. *) Parameter singleton : elt -> t. (** [singleton x] returns the one-element set containing only [x]. *) Parameter remove : elt -> t -> t. (** [remove x s] returns a set containing all elements of [s], except [x]. If [x] was not in [s], [s] is returned unchanged. *) Parameter union : t -> t -> t. (** Set union. *) Parameter inter : t -> t -> t. (** Set intersection. *) Parameter diff : t -> t -> t. (** Set difference. *) Parameter equal : t -> t -> bool. (** [equal s1 s2] tests whether the sets [s1] and [s2] are equal, that is, contain equal elements. *) Parameter subset : t -> t -> bool. (** [subset s1 s2] tests whether the set [s1] is a subset of the set [s2]. *) Parameter fold : forall A : Type, (elt -> A -> A) -> t -> A -> A. (** [fold f s a] computes [(f xN ... (f x2 (f x1 a))...)], where [x1 ... xN] are the elements of [s]. The order in which elements of [s] are presented to [f] is unspecified. *) Parameter for_all : (elt -> bool) -> t -> bool. (** [for_all p s] checks if all elements of the set satisfy the predicate [p]. *) Parameter exists_ : (elt -> bool) -> t -> bool. (** [exists p s] checks if at least one element of the set satisfies the predicate [p]. *) Parameter filter : (elt -> bool) -> t -> t. (** [filter p s] returns the set of all elements in [s] that satisfy predicate [p]. *) Parameter partition : (elt -> bool) -> t -> t * t. (** [partition p s] returns a pair of sets [(s1, s2)], where [s1] is the set of all the elements of [s] that satisfy the predicate [p], and [s2] is the set of all the elements of [s] that do not satisfy [p]. *) Parameter cardinal : t -> nat. (** Return the number of elements of a set. *) Parameter elements : t -> list elt. (** Return the list of all elements of the given set, in any order. *) Parameter choose : t -> option elt. (** Return one element of the given set, or [None] if the set is empty. Which element is chosen is unspecified. Equal sets could return different elements. *) End HasWOps. Module Type WOps (E : DecidableType). Definition elt := E.t. Parameter t : Type. (** the abstract type of sets *) Include HasWOps. End WOps. (** ** Functorial signature for weak sets Weak sets are sets without ordering on base elements, only a decidable equality. *) Module Type WSetsOn (E : DecidableType). (** First, we ask for all the functions *) Include WOps E. (** Logical predicates *) Parameter In : elt -> t -> Prop. Declare Instance In_compat : Proper (E.eq==>eq==>iff) In. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, In x s /\ P x. Notation "s [=] t" := (Equal s t) (at level 70, no associativity). Notation "s [<=] t" := (Subset s t) (at level 70, no associativity). Definition eq : t -> t -> Prop := Equal. Include IsEq. (** [eq] is obviously an equivalence, for subtyping only *) Include HasEqDec. (** Specifications of set operators *) Section Spec. Variable s s': t. Variable x y : elt. Variable f : elt -> bool. Notation compatb := (Proper (E.eq==>Logic.eq)) (only parsing). Parameter mem_spec : mem x s = true <-> In x s. Parameter equal_spec : equal s s' = true <-> s[=]s'. Parameter subset_spec : subset s s' = true <-> s[<=]s'. Parameter empty_spec : Empty empty. Parameter is_empty_spec : is_empty s = true <-> Empty s. Parameter add_spec : In y (add x s) <-> E.eq y x \/ In y s. Parameter remove_spec : In y (remove x s) <-> In y s /\ ~E.eq y x. Parameter singleton_spec : In y (singleton x) <-> E.eq y x. Parameter union_spec : In x (union s s') <-> In x s \/ In x s'. Parameter inter_spec : In x (inter s s') <-> In x s /\ In x s'. Parameter diff_spec : In x (diff s s') <-> In x s /\ ~In x s'. Parameter fold_spec : forall (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (flip f) (elements s) i. Parameter cardinal_spec : cardinal s = length (elements s). Parameter filter_spec : compatb f -> (In x (filter f s) <-> In x s /\ f x = true). Parameter for_all_spec : compatb f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Parameter exists_spec : compatb f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Parameter partition_spec1 : compatb f -> fst (partition f s) [=] filter f s. Parameter partition_spec2 : compatb f -> snd (partition f s) [=] filter (fun x => negb (f x)) s. Parameter elements_spec1 : InA E.eq x (elements s) <-> In x s. (** When compared with ordered sets, here comes the only property that is really weaker: *) Parameter elements_spec2w : NoDupA E.eq (elements s). Parameter choose_spec1 : choose s = Some x -> In x s. Parameter choose_spec2 : choose s = None -> Empty s. End Spec. End WSetsOn. (** ** Static signature for weak sets Similar to the functorial signature [WSetsOn], except that the module [E] of base elements is incorporated in the signature. *) Module Type WSets. Declare Module E : DecidableType. Include WSetsOn E. End WSets. (** ** Functorial signature for sets on ordered elements Based on [WSetsOn], plus ordering on sets and [min_elt] and [max_elt] and some stronger specifications for other functions. *) Module Type HasOrdOps (Import T:TypElt). Parameter compare : t -> t -> comparison. (** Total ordering between sets. Can be used as the ordering function for doing sets of sets. *) Parameter min_elt : t -> option elt. (** Return the smallest element of the given set (with respect to the [E.compare] ordering), or [None] if the set is empty. *) Parameter max_elt : t -> option elt. (** Same as [min_elt], but returns the largest element of the given set. *) End HasOrdOps. Module Type Ops (E : OrderedType) := WOps E <+ HasOrdOps. Module Type SetsOn (E : OrderedType). Include WSetsOn E <+ HasOrdOps <+ HasLt <+ IsStrOrder. Section Spec. Variable s s': t. Variable x y : elt. Parameter compare_spec : CompSpec eq lt s s' (compare s s'). (** Additional specification of [elements] *) Parameter elements_spec2 : sort E.lt (elements s). (** Remark: since [fold] is specified via [elements], this stronger specification of [elements] has an indirect impact on [fold], which can now be proved to receive elements in increasing order. *) Parameter min_elt_spec1 : min_elt s = Some x -> In x s. Parameter min_elt_spec2 : min_elt s = Some x -> In y s -> ~ E.lt y x. Parameter min_elt_spec3 : min_elt s = None -> Empty s. Parameter max_elt_spec1 : max_elt s = Some x -> In x s. Parameter max_elt_spec2 : max_elt s = Some x -> In y s -> ~ E.lt x y. Parameter max_elt_spec3 : max_elt s = None -> Empty s. (** Additional specification of [choose] *) Parameter choose_spec3 : choose s = Some x -> choose s' = Some y -> Equal s s' -> E.eq x y. End Spec. End SetsOn. (** ** Static signature for sets on ordered elements Similar to the functorial signature [SetsOn], except that the module [E] of base elements is incorporated in the signature. *) Module Type Sets. Declare Module E : OrderedType. Include SetsOn E. End Sets. Module Type S := Sets. (** ** Some subtyping tests << WSetsOn ---> WSets | | | | V V SetsOn ---> Sets Module S_WS (M : Sets) <: WSets := M. Module Sfun_WSfun (E:OrderedType)(M : SetsOn E) <: WSetsOn E := M. Module S_Sfun (M : Sets) <: SetsOn M.E := M. Module WS_WSfun (M : WSets) <: WSetsOn M.E := M. >> *) (** ** Signatures for set representations with ill-formed values. Motivation: For many implementation of finite sets (AVL trees, sorted lists, lists without duplicates), we use the same two-layer approach: - A first module deals with the datatype (eg. list or tree) without any restriction on the values we consider. In this module (named "Raw" in the past), some results are stated under the assumption that some invariant (e.g. sortedness) holds for the input sets. We also prove that this invariant is preserved by set operators. - A second module implements the exact Sets interface by using a subtype, for instance [{ l : list A | sorted l }]. This module is a mere wrapper around the first Raw module. With the interfaces below, we give some respectability to the "Raw" modules. This allows the interested users to directly access them via the interfaces. Even better, we can build once and for all a functor doing the transition between Raw and usual Sets. Description: The type [t] of sets may contain ill-formed values on which our set operators may give wrong answers. In particular, [mem] may not see a element in a ill-formed set (think for instance of a unsorted list being given to an optimized [mem] that stops its search as soon as a strictly larger element is encountered). Unlike optimized operators, the [In] predicate is supposed to always be correct, even on ill-formed sets. Same for [Equal] and other logical predicates. A predicate parameter [Ok] is used to discriminate between well-formed and ill-formed values. Some lemmas hold only on sets validating [Ok]. This predicate [Ok] is required to be preserved by set operators. Moreover, a boolean function [isok] should exist for identifying (at least some of) the well-formed sets. *) Module Type WRawSets (E : DecidableType). (** First, we ask for all the functions *) Include WOps E. (** Is a set well-formed or ill-formed ? *) Parameter IsOk : t -> Prop. Class Ok (s:t) : Prop := ok : IsOk s. (** In order to be able to validate (at least some) particular sets as well-formed, we ask for a boolean function for (semi-)deciding predicate [Ok]. If [Ok] isn't decidable, [isok] may be the always-false function. *) Parameter isok : t -> bool. Declare Instance isok_Ok s `(isok s = true) : Ok s | 10. (** Logical predicates *) Parameter In : elt -> t -> Prop. Declare Instance In_compat : Proper (E.eq==>eq==>iff) In. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) s := forall x, In x s -> P x. Definition Exists (P : elt -> Prop) s := exists x, In x s /\ P x. Notation "s [=] t" := (Equal s t) (at level 70, no associativity). Notation "s [<=] t" := (Subset s t) (at level 70, no associativity). Definition eq : t -> t -> Prop := Equal. Declare Instance eq_equiv : Equivalence eq. (** First, all operations are compatible with the well-formed predicate. *) Declare Instance empty_ok : Ok empty. Declare Instance add_ok s x `(Ok s) : Ok (add x s). Declare Instance remove_ok s x `(Ok s) : Ok (remove x s). Declare Instance singleton_ok x : Ok (singleton x). Declare Instance union_ok s s' `(Ok s, Ok s') : Ok (union s s'). Declare Instance inter_ok s s' `(Ok s, Ok s') : Ok (inter s s'). Declare Instance diff_ok s s' `(Ok s, Ok s') : Ok (diff s s'). Declare Instance filter_ok s f `(Ok s) : Ok (filter f s). Declare Instance partition_ok1 s f `(Ok s) : Ok (fst (partition f s)). Declare Instance partition_ok2 s f `(Ok s) : Ok (snd (partition f s)). (** Now, the specifications, with constraints on the input sets. *) Section Spec. Variable s s': t. Variable x y : elt. Variable f : elt -> bool. Notation compatb := (Proper (E.eq==>Logic.eq)) (only parsing). Parameter mem_spec : forall `{Ok s}, mem x s = true <-> In x s. Parameter equal_spec : forall `{Ok s, Ok s'}, equal s s' = true <-> s[=]s'. Parameter subset_spec : forall `{Ok s, Ok s'}, subset s s' = true <-> s[<=]s'. Parameter empty_spec : Empty empty. Parameter is_empty_spec : is_empty s = true <-> Empty s. Parameter add_spec : forall `{Ok s}, In y (add x s) <-> E.eq y x \/ In y s. Parameter remove_spec : forall `{Ok s}, In y (remove x s) <-> In y s /\ ~E.eq y x. Parameter singleton_spec : In y (singleton x) <-> E.eq y x. Parameter union_spec : forall `{Ok s, Ok s'}, In x (union s s') <-> In x s \/ In x s'. Parameter inter_spec : forall `{Ok s, Ok s'}, In x (inter s s') <-> In x s /\ In x s'. Parameter diff_spec : forall `{Ok s, Ok s'}, In x (diff s s') <-> In x s /\ ~In x s'. Parameter fold_spec : forall (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (flip f) (elements s) i. Parameter cardinal_spec : forall `{Ok s}, cardinal s = length (elements s). Parameter filter_spec : compatb f -> (In x (filter f s) <-> In x s /\ f x = true). Parameter for_all_spec : compatb f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Parameter exists_spec : compatb f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Parameter partition_spec1 : compatb f -> fst (partition f s) [=] filter f s. Parameter partition_spec2 : compatb f -> snd (partition f s) [=] filter (fun x => negb (f x)) s. Parameter elements_spec1 : InA E.eq x (elements s) <-> In x s. Parameter elements_spec2w : forall `{Ok s}, NoDupA E.eq (elements s). Parameter choose_spec1 : choose s = Some x -> In x s. Parameter choose_spec2 : choose s = None -> Empty s. End Spec. End WRawSets. (** From weak raw sets to weak usual sets *) Module WRaw2SetsOn (E:DecidableType)(M:WRawSets E) <: WSetsOn E. (** We avoid creating induction principles for the Record *) Local Unset Elimination Schemes. Local Unset Case Analysis Schemes. Definition elt := E.t. Record t_ := Mkt {this :> M.t; is_ok : M.Ok this}. Definition t := t_. Arguments Mkt this {is_ok}. Hint Resolve is_ok : typeclass_instances. Definition In (x : elt)(s : t) := M.In x s.(this). Definition Equal (s s' : t) := forall a : elt, In a s <-> In a s'. Definition Subset (s s' : t) := forall a : elt, In a s -> In a s'. Definition Empty (s : t) := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop)(s : t) := forall x, In x s -> P x. Definition Exists (P : elt -> Prop)(s : t) := exists x, In x s /\ P x. Definition mem (x : elt)(s : t) := M.mem x s. Definition add (x : elt)(s : t) : t := Mkt (M.add x s). Definition remove (x : elt)(s : t) : t := Mkt (M.remove x s). Definition singleton (x : elt) : t := Mkt (M.singleton x). Definition union (s s' : t) : t := Mkt (M.union s s'). Definition inter (s s' : t) : t := Mkt (M.inter s s'). Definition diff (s s' : t) : t := Mkt (M.diff s s'). Definition equal (s s' : t) := M.equal s s'. Definition subset (s s' : t) := M.subset s s'. Definition empty : t := Mkt M.empty. Definition is_empty (s : t) := M.is_empty s. Definition elements (s : t) : list elt := M.elements s. Definition choose (s : t) : option elt := M.choose s. Definition fold (A : Type)(f : elt -> A -> A)(s : t) : A -> A := M.fold f s. Definition cardinal (s : t) := M.cardinal s. Definition filter (f : elt -> bool)(s : t) : t := Mkt (M.filter f s). Definition for_all (f : elt -> bool)(s : t) := M.for_all f s. Definition exists_ (f : elt -> bool)(s : t) := M.exists_ f s. Definition partition (f : elt -> bool)(s : t) : t * t := let p := M.partition f s in (Mkt (fst p), Mkt (snd p)). Instance In_compat : Proper (E.eq==>eq==>iff) In. Proof. repeat red. intros; apply M.In_compat; congruence. Qed. Definition eq : t -> t -> Prop := Equal. Instance eq_equiv : Equivalence eq. Proof. firstorder. Qed. Definition eq_dec : forall (s s':t), { eq s s' }+{ ~eq s s' }. Proof. intros (s,Hs) (s',Hs'). change ({M.Equal s s'}+{~M.Equal s s'}). destruct (M.equal s s') eqn:H; [left|right]; rewrite <- M.equal_spec; congruence. Defined. Section Spec. Variable s s' : t. Variable x y : elt. Variable f : elt -> bool. Notation compatb := (Proper (E.eq==>Logic.eq)) (only parsing). Lemma mem_spec : mem x s = true <-> In x s. Proof. exact (@M.mem_spec _ _ _). Qed. Lemma equal_spec : equal s s' = true <-> Equal s s'. Proof. exact (@M.equal_spec _ _ _ _). Qed. Lemma subset_spec : subset s s' = true <-> Subset s s'. Proof. exact (@M.subset_spec _ _ _ _). Qed. Lemma empty_spec : Empty empty. Proof. exact M.empty_spec. Qed. Lemma is_empty_spec : is_empty s = true <-> Empty s. Proof. exact (@M.is_empty_spec _). Qed. Lemma add_spec : In y (add x s) <-> E.eq y x \/ In y s. Proof. exact (@M.add_spec _ _ _ _). Qed. Lemma remove_spec : In y (remove x s) <-> In y s /\ ~E.eq y x. Proof. exact (@M.remove_spec _ _ _ _). Qed. Lemma singleton_spec : In y (singleton x) <-> E.eq y x. Proof. exact (@M.singleton_spec _ _). Qed. Lemma union_spec : In x (union s s') <-> In x s \/ In x s'. Proof. exact (@M.union_spec _ _ _ _ _). Qed. Lemma inter_spec : In x (inter s s') <-> In x s /\ In x s'. Proof. exact (@M.inter_spec _ _ _ _ _). Qed. Lemma diff_spec : In x (diff s s') <-> In x s /\ ~In x s'. Proof. exact (@M.diff_spec _ _ _ _ _). Qed. Lemma fold_spec : forall (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (fun a e => f e a) (elements s) i. Proof. exact (@M.fold_spec _). Qed. Lemma cardinal_spec : cardinal s = length (elements s). Proof. exact (@M.cardinal_spec s _). Qed. Lemma filter_spec : compatb f -> (In x (filter f s) <-> In x s /\ f x = true). Proof. exact (@M.filter_spec _ _ _). Qed. Lemma for_all_spec : compatb f -> (for_all f s = true <-> For_all (fun x => f x = true) s). Proof. exact (@M.for_all_spec _ _). Qed. Lemma exists_spec : compatb f -> (exists_ f s = true <-> Exists (fun x => f x = true) s). Proof. exact (@M.exists_spec _ _). Qed. Lemma partition_spec1 : compatb f -> Equal (fst (partition f s)) (filter f s). Proof. exact (@M.partition_spec1 _ _). Qed. Lemma partition_spec2 : compatb f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. exact (@M.partition_spec2 _ _). Qed. Lemma elements_spec1 : InA E.eq x (elements s) <-> In x s. Proof. exact (@M.elements_spec1 _ _). Qed. Lemma elements_spec2w : NoDupA E.eq (elements s). Proof. exact (@M.elements_spec2w _ _). Qed. Lemma choose_spec1 : choose s = Some x -> In x s. Proof. exact (@M.choose_spec1 _ _). Qed. Lemma choose_spec2 : choose s = None -> Empty s. Proof. exact (@M.choose_spec2 _). Qed. End Spec. End WRaw2SetsOn. Module WRaw2Sets (D:DecidableType)(M:WRawSets D) <: WSets with Module E := D. Module E := D. Include WRaw2SetsOn D M. End WRaw2Sets. (** Same approach for ordered sets *) Module Type RawSets (E : OrderedType). Include WRawSets E <+ HasOrdOps <+ HasLt <+ IsStrOrder. Section Spec. Variable s s': t. Variable x y : elt. (** Specification of [compare] *) Parameter compare_spec : forall `{Ok s, Ok s'}, CompSpec eq lt s s' (compare s s'). (** Additional specification of [elements] *) Parameter elements_spec2 : forall `{Ok s}, sort E.lt (elements s). (** Specification of [min_elt] *) Parameter min_elt_spec1 : min_elt s = Some x -> In x s. Parameter min_elt_spec2 : forall `{Ok s}, min_elt s = Some x -> In y s -> ~ E.lt y x. Parameter min_elt_spec3 : min_elt s = None -> Empty s. (** Specification of [max_elt] *) Parameter max_elt_spec1 : max_elt s = Some x -> In x s. Parameter max_elt_spec2 : forall `{Ok s}, max_elt s = Some x -> In y s -> ~ E.lt x y. Parameter max_elt_spec3 : max_elt s = None -> Empty s. (** Additional specification of [choose] *) Parameter choose_spec3 : forall `{Ok s, Ok s'}, choose s = Some x -> choose s' = Some y -> Equal s s' -> E.eq x y. End Spec. End RawSets. (** From Raw to usual sets *) Module Raw2SetsOn (O:OrderedType)(M:RawSets O) <: SetsOn O. Include WRaw2SetsOn O M. Definition compare (s s':t) := M.compare s s'. Definition min_elt (s:t) : option elt := M.min_elt s. Definition max_elt (s:t) : option elt := M.max_elt s. Definition lt (s s':t) := M.lt s s'. (** Specification of [lt] *) Instance lt_strorder : StrictOrder lt. Proof. constructor ; unfold lt; red. unfold complement. red. intros. apply (irreflexivity H). intros. transitivity y; auto. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. repeat red. unfold eq, lt. intros (s1,p1) (s2,p2) E (s1',p1') (s2',p2') E'; simpl. change (M.eq s1 s2) in E. change (M.eq s1' s2') in E'. rewrite E,E'; intuition. Qed. Section Spec. Variable s s' s'' : t. Variable x y : elt. Lemma compare_spec : CompSpec eq lt s s' (compare s s'). Proof. unfold compare; destruct (@M.compare_spec s s' _ _); auto. Qed. (** Additional specification of [elements] *) Lemma elements_spec2 : sort O.lt (elements s). Proof. exact (@M.elements_spec2 _ _). Qed. (** Specification of [min_elt] *) Lemma min_elt_spec1 : min_elt s = Some x -> In x s. Proof. exact (@M.min_elt_spec1 _ _). Qed. Lemma min_elt_spec2 : min_elt s = Some x -> In y s -> ~ O.lt y x. Proof. exact (@M.min_elt_spec2 _ _ _ _). Qed. Lemma min_elt_spec3 : min_elt s = None -> Empty s. Proof. exact (@M.min_elt_spec3 _). Qed. (** Specification of [max_elt] *) Lemma max_elt_spec1 : max_elt s = Some x -> In x s. Proof. exact (@M.max_elt_spec1 _ _). Qed. Lemma max_elt_spec2 : max_elt s = Some x -> In y s -> ~ O.lt x y. Proof. exact (@M.max_elt_spec2 _ _ _ _). Qed. Lemma max_elt_spec3 : max_elt s = None -> Empty s. Proof. exact (@M.max_elt_spec3 _). Qed. (** Additional specification of [choose] *) Lemma choose_spec3 : choose s = Some x -> choose s' = Some y -> Equal s s' -> O.eq x y. Proof. exact (@M.choose_spec3 _ _ _ _ _ _). Qed. End Spec. End Raw2SetsOn. Module Raw2Sets (O:OrderedType)(M:RawSets O) <: Sets with Module E := O. Module E := O. Include Raw2SetsOn O M. End Raw2Sets. (** It is in fact possible to provide an ordering on sets with very little information on them (more or less only the [In] predicate). This generic build of ordering is in fact not used for the moment, we rather use a simplier version dedicated to sets-as-sorted-lists, see [MakeListOrdering]. *) Module Type IN (O:OrderedType). Parameter Inline t : Type. Parameter Inline In : O.t -> t -> Prop. Declare Instance In_compat : Proper (O.eq==>eq==>iff) In. Definition Equal s s' := forall x, In x s <-> In x s'. Definition Empty s := forall x, ~In x s. End IN. Module MakeSetOrdering (O:OrderedType)(Import M:IN O). Module Import MO := OrderedTypeFacts O. Definition eq : t -> t -> Prop := Equal. Instance eq_equiv : Equivalence eq. Proof. firstorder. Qed. Instance : Proper (O.eq==>eq==>iff) In. Proof. intros x x' Ex s s' Es. rewrite Ex. apply Es. Qed. Definition Below x s := forall y, In y s -> O.lt y x. Definition Above x s := forall y, In y s -> O.lt x y. Definition EquivBefore x s s' := forall y, O.lt y x -> (In y s <-> In y s'). Definition EmptyBetween x y s := forall z, In z s -> O.lt z y -> O.lt z x. Definition lt s s' := exists x, EquivBefore x s s' /\ ((In x s' /\ Below x s) \/ (In x s /\ exists y, In y s' /\ O.lt x y /\ EmptyBetween x y s')). Instance : Proper (O.eq==>eq==>eq==>iff) EquivBefore. Proof. unfold EquivBefore. intros x x' E s1 s1' E1 s2 s2' E2. setoid_rewrite E; setoid_rewrite E1; setoid_rewrite E2; intuition. Qed. Instance : Proper (O.eq==>eq==>iff) Below. Proof. unfold Below. intros x x' Ex s s' Es. setoid_rewrite Ex; setoid_rewrite Es; intuition. Qed. Instance : Proper (O.eq==>eq==>iff) Above. Proof. unfold Above. intros x x' Ex s s' Es. setoid_rewrite Ex; setoid_rewrite Es; intuition. Qed. Instance : Proper (O.eq==>O.eq==>eq==>iff) EmptyBetween. Proof. unfold EmptyBetween. intros x x' Ex y y' Ey s s' Es. setoid_rewrite Ex; setoid_rewrite Ey; setoid_rewrite Es; intuition. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. unfold lt. intros s1 s1' E1 s2 s2' E2. setoid_rewrite E1; setoid_rewrite E2; intuition. Qed. Instance lt_strorder : StrictOrder lt. Proof. split. (* irreflexive *) intros s (x & _ & [(IN,Em)|(IN & y & IN' & LT & Be)]). specialize (Em x IN); order. specialize (Be x IN LT); order. (* transitive *) intros s1 s2 s3 (x & EQ & [(IN,Pre)|(IN,Lex)]) (x' & EQ' & [(IN',Pre')|(IN',Lex')]). (* 1) Pre / Pre --> Pre *) assert (O.lt x x') by (specialize (Pre' x IN); auto). exists x; split. intros y Hy; rewrite <- (EQ' y); auto; order. left; split; auto. rewrite <- (EQ' x); auto. (* 2) Pre / Lex *) elim_compare x x'. (* 2a) x=x' --> Pre *) destruct Lex' as (y & INy & LT & Be). exists y; split. intros z Hz. split; intros INz. specialize (Pre z INz). rewrite <- (EQ' z), <- (EQ z); auto; order. specialize (Be z INz Hz). rewrite (EQ z), (EQ' z); auto; order. left; split; auto. intros z Hz. transitivity x; auto; order. (* 2b) x Pre *) exists x; split. intros z Hz. rewrite <- (EQ' z) by order; auto. left; split; auto. rewrite <- (EQ' x); auto. (* 2c) x>x' --> Lex *) exists x'; split. intros z Hz. rewrite (EQ z) by order; auto. right; split; auto. rewrite (EQ x'); auto. (* 3) Lex / Pre --> Lex *) destruct Lex as (y & INy & LT & Be). specialize (Pre' y INy). exists x; split. intros z Hz. rewrite <- (EQ' z) by order; auto. right; split; auto. exists y; repeat split; auto. rewrite <- (EQ' y); auto. intros z Hz LTz; apply Be; auto. rewrite (EQ' z); auto; order. (* 4) Lex / Lex *) elim_compare x x'. (* 4a) x=x' --> impossible *) destruct Lex as (y & INy & LT & Be). setoid_replace x with x' in LT; auto. specialize (Be x' IN' LT); order. (* 4b) x Lex *) exists x; split. intros z Hz. rewrite <- (EQ' z) by order; auto. right; split; auto. destruct Lex as (y & INy & LT & Be). elim_compare y x'. (* 4ba *) destruct Lex' as (y' & Iny' & LT' & Be'). exists y'; repeat split; auto. order. intros z Hz LTz. specialize (Be' z Hz LTz). rewrite <- (EQ' z) in Hz by order. apply Be; auto. order. (* 4bb *) exists y; repeat split; auto. rewrite <- (EQ' y); auto. intros z Hz LTz. apply Be; auto. rewrite (EQ' z); auto; order. (* 4bc*) assert (O.lt x' x) by auto. order. (* 4c) x>x' --> Lex *) exists x'; split. intros z Hz. rewrite (EQ z) by order; auto. right; split; auto. rewrite (EQ x'); auto. Qed. Lemma lt_empty_r : forall s s', Empty s' -> ~ lt s s'. Proof. intros s s' Hs' (x & _ & [(IN,_)|(_ & y & IN & _)]). elim (Hs' x IN). elim (Hs' y IN). Qed. Definition Add x s s' := forall y, In y s' <-> O.eq x y \/ In y s. Lemma lt_empty_l : forall x s1 s2 s2', Empty s1 -> Above x s2 -> Add x s2 s2' -> lt s1 s2'. Proof. intros x s1 s2 s2' Em Ab Ad. exists x; split. intros y Hy; split; intros IN. elim (Em y IN). rewrite (Ad y) in IN; destruct IN as [EQ|IN]. order. specialize (Ab y IN). order. left; split. rewrite (Ad x). now left. intros y Hy. elim (Em y Hy). Qed. Lemma lt_add_lt : forall x1 x2 s1 s1' s2 s2', Above x1 s1 -> Above x2 s2 -> Add x1 s1 s1' -> Add x2 s2 s2' -> O.lt x1 x2 -> lt s1' s2'. Proof. intros x1 x2 s1 s1' s2 s2' Ab1 Ab2 Ad1 Ad2 LT. exists x1; split; [ | right; split]; auto. intros y Hy. rewrite (Ad1 y), (Ad2 y). split; intros [U|U]; try order. specialize (Ab1 y U). order. specialize (Ab2 y U). order. rewrite (Ad1 x1); auto with *. exists x2; repeat split; auto. rewrite (Ad2 x2); now left. intros y. rewrite (Ad2 y). intros [U|U]. order. specialize (Ab2 y U). order. Qed. Lemma lt_add_eq : forall x1 x2 s1 s1' s2 s2', Above x1 s1 -> Above x2 s2 -> Add x1 s1 s1' -> Add x2 s2 s2' -> O.eq x1 x2 -> lt s1 s2 -> lt s1' s2'. Proof. intros x1 x2 s1 s1' s2 s2' Ab1 Ab2 Ad1 Ad2 Hx (x & EQ & Disj). assert (O.lt x1 x). destruct Disj as [(IN,_)|(IN,_)]; auto. rewrite Hx; auto. exists x; split. intros z Hz. rewrite (Ad1 z), (Ad2 z). split; intros [U|U]; try (left; order); right. rewrite <- (EQ z); auto. rewrite (EQ z); auto. destruct Disj as [(IN,Em)|(IN & y & INy & LTy & Be)]. left; split; auto. rewrite (Ad2 x); auto. intros z. rewrite (Ad1 z); intros [U|U]; try specialize (Ab1 z U); auto; order. right; split; auto. rewrite (Ad1 x); auto. exists y; repeat split; auto. rewrite (Ad2 y); auto. intros z. rewrite (Ad2 z). intros [U|U]; try specialize (Ab2 z U); auto; order. Qed. End MakeSetOrdering. Module MakeListOrdering (O:OrderedType). Module MO:=OrderedTypeFacts O. Local Notation t := (list O.t). Local Notation In := (InA O.eq). Definition eq s s' := forall x, In x s <-> In x s'. Instance eq_equiv : Equivalence eq := _. Inductive lt_list : t -> t -> Prop := | lt_nil : forall x s, lt_list nil (x :: s) | lt_cons_lt : forall x y s s', O.lt x y -> lt_list (x :: s) (y :: s') | lt_cons_eq : forall x y s s', O.eq x y -> lt_list s s' -> lt_list (x :: s) (y :: s'). Hint Constructors lt_list. Definition lt := lt_list. Hint Unfold lt. Instance lt_strorder : StrictOrder lt. Proof. split. (* irreflexive *) assert (forall s s', s=s' -> ~lt s s'). red; induction 2. discriminate. inversion H; subst. apply (StrictOrder_Irreflexive y); auto. inversion H; subst; auto. intros s Hs; exact (H s s (eq_refl s) Hs). (* transitive *) intros s s' s'' H; generalize s''; clear s''; elim H. intros x l s'' H'; inversion_clear H'; auto. intros x x' l l' E s'' H'; inversion_clear H'; auto. constructor 2. transitivity x'; auto. constructor 2. rewrite <- H0; auto. intros. inversion_clear H3. constructor 2. rewrite H0; auto. constructor 3; auto. transitivity y; auto. unfold lt in *; auto. Qed. Instance lt_compat' : Proper (eqlistA O.eq==>eqlistA O.eq==>iff) lt. Proof. apply proper_sym_impl_iff_2; auto with *. intros s1 s1' E1 s2 s2' E2 H. revert s1' E1 s2' E2. induction H; intros; inversion_clear E1; inversion_clear E2. constructor 1. constructor 2. MO.order. constructor 3. MO.order. unfold lt in *; auto. Qed. Lemma eq_cons : forall l1 l2 x y, O.eq x y -> eq l1 l2 -> eq (x :: l1) (y :: l2). Proof. unfold eq; intros l1 l2 x y Exy E12 z. split; inversion_clear 1. left; MO.order. right; rewrite <- E12; auto. left; MO.order. right; rewrite E12; auto. Qed. Hint Resolve eq_cons. Lemma cons_CompSpec : forall c x1 x2 l1 l2, O.eq x1 x2 -> CompSpec eq lt l1 l2 c -> CompSpec eq lt (x1::l1) (x2::l2) c. Proof. destruct c; simpl; inversion_clear 2; auto with relations. Qed. Hint Resolve cons_CompSpec. End MakeListOrdering. coq-8.4pl2/theories/Classes/0000750000175000001440000000000012127276544015057 5ustar notinuserscoq-8.4pl2/theories/Classes/RelationClasses.v0000640000175000001440000003624012010532755020335 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R x y -> False. (** Opaque for proof-search. *) Typeclasses Opaque complement. (** These are convertible. *) Lemma complement_inverse : forall A (R : relation A), complement (inverse R) = inverse (complement R). Proof. reflexivity. Qed. (** We rebind relations in separate classes to be able to overload each proof. *) Set Implicit Arguments. Unset Strict Implicit. Class Reflexive {A} (R : relation A) := reflexivity : forall x, R x x. Class Irreflexive {A} (R : relation A) := irreflexivity : Reflexive (complement R). Hint Extern 1 (Reflexive (complement _)) => class_apply @irreflexivity : typeclass_instances. Class Symmetric {A} (R : relation A) := symmetry : forall x y, R x y -> R y x. Class Asymmetric {A} (R : relation A) := asymmetry : forall x y, R x y -> R y x -> False. Class Transitive {A} (R : relation A) := transitivity : forall x y z, R x y -> R y z -> R x z. Hint Resolve @irreflexivity : ord. Unset Implicit Arguments. (** A HintDb for relations. *) Ltac solve_relation := match goal with | [ |- ?R ?x ?x ] => reflexivity | [ H : ?R ?x ?y |- ?R ?y ?x ] => symmetry ; exact H end. Hint Extern 4 => solve_relation : relations. (** We can already dualize all these properties. *) Generalizable Variables A B C D R S T U l eqA eqB eqC eqD. Lemma flip_Reflexive `{Reflexive A R} : Reflexive (flip R). Proof. tauto. Qed. Hint Extern 3 (Reflexive (flip _)) => apply flip_Reflexive : typeclass_instances. Program Definition flip_Irreflexive `(Irreflexive A R) : Irreflexive (flip R) := irreflexivity (R:=R). Program Definition flip_Symmetric `(Symmetric A R) : Symmetric (flip R) := fun x y H => symmetry (R:=R) H. Program Definition flip_Asymmetric `(Asymmetric A R) : Asymmetric (flip R) := fun x y H H' => asymmetry (R:=R) H H'. Program Definition flip_Transitive `(Transitive A R) : Transitive (flip R) := fun x y z H H' => transitivity (R:=R) H' H. Hint Extern 3 (Irreflexive (flip _)) => class_apply flip_Irreflexive : typeclass_instances. Hint Extern 3 (Symmetric (flip _)) => class_apply flip_Symmetric : typeclass_instances. Hint Extern 3 (Asymmetric (flip _)) => class_apply flip_Asymmetric : typeclass_instances. Hint Extern 3 (Transitive (flip _)) => class_apply flip_Transitive : typeclass_instances. Definition Reflexive_complement_Irreflexive `(Reflexive A (R : relation A)) : Irreflexive (complement R). Proof. firstorder. Qed. Definition complement_Symmetric `(Symmetric A (R : relation A)) : Symmetric (complement R). Proof. firstorder. Qed. Hint Extern 3 (Symmetric (complement _)) => class_apply complement_Symmetric : typeclass_instances. Hint Extern 3 (Irreflexive (complement _)) => class_apply Reflexive_complement_Irreflexive : typeclass_instances. (** * Standard instances. *) Ltac reduce_hyp H := match type of H with | context [ _ <-> _ ] => fail 1 | _ => red in H ; try reduce_hyp H end. Ltac reduce_goal := match goal with | [ |- _ <-> _ ] => fail 1 | _ => red ; intros ; try reduce_goal end. Tactic Notation "reduce" "in" hyp(Hid) := reduce_hyp Hid. Ltac reduce := reduce_goal. Tactic Notation "apply" "*" constr(t) := first [ refine t | refine (t _) | refine (t _ _) | refine (t _ _ _) | refine (t _ _ _ _) | refine (t _ _ _ _ _) | refine (t _ _ _ _ _ _) | refine (t _ _ _ _ _ _ _) ]. Ltac simpl_relation := unfold flip, impl, arrow ; try reduce ; program_simpl ; try ( solve [ intuition ]). Local Obligation Tactic := simpl_relation. (** Logical implication. *) Program Instance impl_Reflexive : Reflexive impl. Program Instance impl_Transitive : Transitive impl. (** Logical equivalence. *) Instance iff_Reflexive : Reflexive iff := iff_refl. Instance iff_Symmetric : Symmetric iff := iff_sym. Instance iff_Transitive : Transitive iff := iff_trans. (** Leibniz equality. *) Instance eq_Reflexive {A} : Reflexive (@eq A) := @eq_refl A. Instance eq_Symmetric {A} : Symmetric (@eq A) := @eq_sym A. Instance eq_Transitive {A} : Transitive (@eq A) := @eq_trans A. (** Various combinations of reflexivity, symmetry and transitivity. *) (** A [PreOrder] is both Reflexive and Transitive. *) Class PreOrder {A} (R : relation A) : Prop := { PreOrder_Reflexive :> Reflexive R | 2 ; PreOrder_Transitive :> Transitive R | 2 }. (** A partial equivalence relation is Symmetric and Transitive. *) Class PER {A} (R : relation A) : Prop := { PER_Symmetric :> Symmetric R | 3 ; PER_Transitive :> Transitive R | 3 }. (** Equivalence relations. *) Class Equivalence {A} (R : relation A) : Prop := { Equivalence_Reflexive :> Reflexive R ; Equivalence_Symmetric :> Symmetric R ; Equivalence_Transitive :> Transitive R }. (** An Equivalence is a PER plus reflexivity. *) Instance Equivalence_PER `(Equivalence A R) : PER R | 10 := { PER_Symmetric := Equivalence_Symmetric ; PER_Transitive := Equivalence_Transitive }. (** We can now define antisymmetry w.r.t. an equivalence relation on the carrier. *) Class Antisymmetric A eqA `{equ : Equivalence A eqA} (R : relation A) := antisymmetry : forall {x y}, R x y -> R y x -> eqA x y. Program Definition flip_antiSymmetric `(Antisymmetric A eqA R) : Antisymmetric A eqA (flip R). Proof. firstorder. Qed. (** Leibinz equality [eq] is an equivalence relation. The instance has low priority as it is always applicable if only the type is constrained. *) Program Instance eq_equivalence : Equivalence (@eq A) | 10. (** Logical equivalence [iff] is an equivalence relation. *) Program Instance iff_equivalence : Equivalence iff. (** We now develop a generalization of results on relations for arbitrary predicates. The resulting theory can be applied to homogeneous binary relations but also to arbitrary n-ary predicates. *) Local Open Scope list_scope. (* Notation " [ ] " := nil : list_scope. *) (* Notation " [ x ; .. ; y ] " := (cons x .. (cons y nil) ..) (at level 1) : list_scope. *) (** A compact representation of non-dependent arities, with the codomain singled-out. *) (* Note, we do not use [list Type] because it imposes unnecessary universe constraints *) Inductive Tlist : Type := Tnil : Tlist | Tcons : Type -> Tlist -> Tlist. Local Infix "::" := Tcons. Fixpoint arrows (l : Tlist) (r : Type) : Type := match l with | Tnil => r | A :: l' => A -> arrows l' r end. (** We can define abbreviations for operation and relation types based on [arrows]. *) Definition unary_operation A := arrows (A::Tnil) A. Definition binary_operation A := arrows (A::A::Tnil) A. Definition ternary_operation A := arrows (A::A::A::Tnil) A. (** We define n-ary [predicate]s as functions into [Prop]. *) Notation predicate l := (arrows l Prop). (** Unary predicates, or sets. *) Definition unary_predicate A := predicate (A::Tnil). (** Homogeneous binary relations, equivalent to [relation A]. *) Definition binary_relation A := predicate (A::A::Tnil). (** We can close a predicate by universal or existential quantification. *) Fixpoint predicate_all (l : Tlist) : predicate l -> Prop := match l with | Tnil => fun f => f | A :: tl => fun f => forall x : A, predicate_all tl (f x) end. Fixpoint predicate_exists (l : Tlist) : predicate l -> Prop := match l with | Tnil => fun f => f | A :: tl => fun f => exists x : A, predicate_exists tl (f x) end. (** Pointwise extension of a binary operation on [T] to a binary operation on functions whose codomain is [T]. For an operator on [Prop] this lifts the operator to a binary operation. *) Fixpoint pointwise_extension {T : Type} (op : binary_operation T) (l : Tlist) : binary_operation (arrows l T) := match l with | Tnil => fun R R' => op R R' | A :: tl => fun R R' => fun x => pointwise_extension op tl (R x) (R' x) end. (** Pointwise lifting, equivalent to doing [pointwise_extension] and closing using [predicate_all]. *) Fixpoint pointwise_lifting (op : binary_relation Prop) (l : Tlist) : binary_relation (predicate l) := match l with | Tnil => fun R R' => op R R' | A :: tl => fun R R' => forall x, pointwise_lifting op tl (R x) (R' x) end. (** The n-ary equivalence relation, defined by lifting the 0-ary [iff] relation. *) Definition predicate_equivalence {l : Tlist} : binary_relation (predicate l) := pointwise_lifting iff l. (** The n-ary implication relation, defined by lifting the 0-ary [impl] relation. *) Definition predicate_implication {l : Tlist} := pointwise_lifting impl l. (** Notations for pointwise equivalence and implication of predicates. *) Infix "<∙>" := predicate_equivalence (at level 95, no associativity) : predicate_scope. Infix "-∙>" := predicate_implication (at level 70, right associativity) : predicate_scope. Local Open Scope predicate_scope. (** The pointwise liftings of conjunction and disjunctions. Note that these are [binary_operation]s, building new relations out of old ones. *) Definition predicate_intersection := pointwise_extension and. Definition predicate_union := pointwise_extension or. Infix "/∙\" := predicate_intersection (at level 80, right associativity) : predicate_scope. Infix "\∙/" := predicate_union (at level 85, right associativity) : predicate_scope. (** The always [True] and always [False] predicates. *) Fixpoint true_predicate {l : Tlist} : predicate l := match l with | Tnil => True | A :: tl => fun _ => @true_predicate tl end. Fixpoint false_predicate {l : Tlist} : predicate l := match l with | Tnil => False | A :: tl => fun _ => @false_predicate tl end. Notation "∙⊤∙" := true_predicate : predicate_scope. Notation "∙⊥∙" := false_predicate : predicate_scope. (** Predicate equivalence is an equivalence, and predicate implication defines a preorder. *) Program Instance predicate_equivalence_equivalence : Equivalence (@predicate_equivalence l). Next Obligation. induction l ; firstorder. Qed. Next Obligation. induction l ; firstorder. Qed. Next Obligation. fold pointwise_lifting. induction l. firstorder. intros. simpl in *. pose (IHl (x x0) (y x0) (z x0)). firstorder. Qed. Program Instance predicate_implication_preorder : PreOrder (@predicate_implication l). Next Obligation. induction l ; firstorder. Qed. Next Obligation. induction l. firstorder. unfold predicate_implication in *. simpl in *. intro. pose (IHl (x x0) (y x0) (z x0)). firstorder. Qed. (** We define the various operations which define the algebra on binary relations, from the general ones. *) Definition relation_equivalence {A : Type} : relation (relation A) := @predicate_equivalence (_::_::Tnil). Class subrelation {A:Type} (R R' : relation A) : Prop := is_subrelation : @predicate_implication (A::A::Tnil) R R'. Arguments subrelation {A} R R'. Definition relation_conjunction {A} (R : relation A) (R' : relation A) : relation A := @predicate_intersection (A::A::Tnil) R R'. Definition relation_disjunction {A} (R : relation A) (R' : relation A) : relation A := @predicate_union (A::A::Tnil) R R'. (** Relation equivalence is an equivalence, and subrelation defines a partial order. *) Set Automatic Introduction. Instance relation_equivalence_equivalence (A : Type) : Equivalence (@relation_equivalence A). Proof. exact (@predicate_equivalence_equivalence (A::A::Tnil)). Qed. Instance relation_implication_preorder A : PreOrder (@subrelation A). Proof. exact (@predicate_implication_preorder (A::A::Tnil)). Qed. (** *** Partial Order. A partial order is a preorder which is additionally antisymmetric. We give an equivalent definition, up-to an equivalence relation on the carrier. *) Class PartialOrder {A} eqA `{equ : Equivalence A eqA} R `{preo : PreOrder A R} := partial_order_equivalence : relation_equivalence eqA (relation_conjunction R (inverse R)). (** The equivalence proof is sufficient for proving that [R] must be a morphism for equivalence (see Morphisms). It is also sufficient to show that [R] is antisymmetric w.r.t. [eqA] *) Instance partial_order_antisym `(PartialOrder A eqA R) : ! Antisymmetric A eqA R. Proof with auto. reduce_goal. pose proof partial_order_equivalence as poe. do 3 red in poe. apply <- poe. firstorder. Qed. (** The partial order defined by subrelation and relation equivalence. *) Program Instance subrelation_partial_order : ! PartialOrder (relation A) relation_equivalence subrelation. Next Obligation. Proof. unfold relation_equivalence in *. compute; firstorder. Qed. Typeclasses Opaque arrows predicate_implication predicate_equivalence relation_equivalence pointwise_lifting. (** Rewrite relation on a given support: declares a relation as a rewrite relation for use by the generalized rewriting tactic. It helps choosing if a rewrite should be handled by the generalized or the regular rewriting tactic using leibniz equality. Users can declare an [RewriteRelation A RA] anywhere to declare default relations. This is also done automatically by the [Declare Relation A RA] commands. *) Class RewriteRelation {A : Type} (RA : relation A). Instance: RewriteRelation impl. Instance: RewriteRelation iff. Instance: RewriteRelation (@relation_equivalence A). (** Any [Equivalence] declared in the context is automatically considered a rewrite relation. *) Instance equivalence_rewrite_relation `(Equivalence A eqA) : RewriteRelation eqA. (** Strict Order *) Class StrictOrder {A : Type} (R : relation A) : Prop := { StrictOrder_Irreflexive :> Irreflexive R ; StrictOrder_Transitive :> Transitive R }. Instance StrictOrder_Asymmetric `(StrictOrder A R) : Asymmetric R. Proof. firstorder. Qed. (** Inversing a [StrictOrder] gives another [StrictOrder] *) Lemma StrictOrder_inverse `(StrictOrder A R) : StrictOrder (inverse R). Proof. firstorder. Qed. (** Same for [PartialOrder]. *) Lemma PreOrder_inverse `(PreOrder A R) : PreOrder (inverse R). Proof. firstorder. Qed. Hint Extern 3 (StrictOrder (inverse _)) => class_apply StrictOrder_inverse : typeclass_instances. Hint Extern 3 (PreOrder (inverse _)) => class_apply PreOrder_inverse : typeclass_instances. Lemma PartialOrder_inverse `(PartialOrder A eqA R) : PartialOrder eqA (inverse R). Proof. firstorder. Qed. Hint Extern 3 (PartialOrder (inverse _)) => class_apply PartialOrder_inverse : typeclass_instances. coq-8.4pl2/theories/Classes/RelationPairs.v0000640000175000001440000001153711662502170020020 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B) : relation A := fun a a' => R (f a) (f a'). Infix "@@" := RelCompFun (at level 30, right associativity) : signature_scope. Notation "R @@1" := (R @@ Fst)%signature (at level 30) : signature_scope. Notation "R @@2" := (R @@ Snd)%signature (at level 30) : signature_scope. (** We declare measures to the system using the [Measure] class. Otherwise the instances would easily introduce loops, never instantiating the [f] function. *) Class Measure {A B} (f : A -> B). (** Standard measures. *) Instance fst_measure : @Measure (A * B) A Fst. Instance snd_measure : @Measure (A * B) B Snd. (** We define a product relation over [A*B]: each components should satisfy the corresponding initial relation. *) Definition RelProd {A B}(RA:relation A)(RB:relation B) : relation (A*B) := relation_conjunction (RA @@1) (RB @@2). Infix "*" := RelProd : signature_scope. Section RelCompFun_Instances. Context {A B : Type} (R : relation B). Global Instance RelCompFun_Reflexive `(Measure A B f, Reflexive _ R) : Reflexive (R@@f). Proof. firstorder. Qed. Global Instance RelCompFun_Symmetric `(Measure A B f, Symmetric _ R) : Symmetric (R@@f). Proof. firstorder. Qed. Global Instance RelCompFun_Transitive `(Measure A B f, Transitive _ R) : Transitive (R@@f). Proof. firstorder. Qed. Global Instance RelCompFun_Irreflexive `(Measure A B f, Irreflexive _ R) : Irreflexive (R@@f). Proof. firstorder. Qed. Global Program Instance RelCompFun_Equivalence `(Measure A B f, Equivalence _ R) : Equivalence (R@@f). Global Program Instance RelCompFun_StrictOrder `(Measure A B f, StrictOrder _ R) : StrictOrder (R@@f). End RelCompFun_Instances. Instance RelProd_Reflexive {A B}(RA:relation A)(RB:relation B) `(Reflexive _ RA, Reflexive _ RB) : Reflexive (RA*RB). Proof. firstorder. Qed. Instance RelProd_Symmetric {A B}(RA:relation A)(RB:relation B) `(Symmetric _ RA, Symmetric _ RB) : Symmetric (RA*RB). Proof. firstorder. Qed. Instance RelProd_Transitive {A B}(RA:relation A)(RB:relation B) `(Transitive _ RA, Transitive _ RB) : Transitive (RA*RB). Proof. firstorder. Qed. Program Instance RelProd_Equivalence {A B}(RA:relation A)(RB:relation B) `(Equivalence _ RA, Equivalence _ RB) : Equivalence (RA*RB). Lemma FstRel_ProdRel {A B}(RA:relation A) : relation_equivalence (RA @@1) (RA*(fun _ _ : B => True)). Proof. firstorder. Qed. Lemma SndRel_ProdRel {A B}(RB:relation B) : relation_equivalence (RB @@2) ((fun _ _ : A =>True) * RB). Proof. firstorder. Qed. Instance FstRel_sub {A B} (RA:relation A)(RB:relation B): subrelation (RA*RB) (RA @@1). Proof. firstorder. Qed. Instance SndRel_sub {A B} (RA:relation A)(RB:relation B): subrelation (RA*RB) (RB @@2). Proof. firstorder. Qed. Instance pair_compat { A B } (RA:relation A)(RB:relation B) : Proper (RA==>RB==> RA*RB) (@pair _ _). Proof. firstorder. Qed. Instance fst_compat { A B } (RA:relation A)(RB:relation B) : Proper (RA*RB ==> RA) Fst. Proof. intros (x,y) (x',y') (Hx,Hy); compute in *; auto. Qed. Instance snd_compat { A B } (RA:relation A)(RB:relation B) : Proper (RA*RB ==> RB) Snd. Proof. intros (x,y) (x',y') (Hx,Hy); compute in *; auto. Qed. Instance RelCompFun_compat {A B}(f:A->B)(R : relation B) `(Proper _ (Ri==>Ri==>Ro) R) : Proper (Ri@@f==>Ri@@f==>Ro) (R@@f)%signature. Proof. unfold RelCompFun; firstorder. Qed. Hint Unfold RelProd RelCompFun. Hint Extern 2 (RelProd _ _ _ _) => split. coq-8.4pl2/theories/Classes/SetoidClass.v0000640000175000001440000001063212010532755017454 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Equivalence equiv }. (* Too dangerous instance *) (* Program Instance [ eqa : Equivalence A eqA ] => *) (* equivalence_setoid : Setoid A := *) (* equiv := eqA ; setoid_equiv := eqa. *) (** Shortcuts to make proof search easier. *) Definition setoid_refl `(sa : Setoid A) : Reflexive equiv. Proof. typeclasses eauto. Qed. Definition setoid_sym `(sa : Setoid A) : Symmetric equiv. Proof. typeclasses eauto. Qed. Definition setoid_trans `(sa : Setoid A) : Transitive equiv. Proof. typeclasses eauto. Qed. Existing Instance setoid_refl. Existing Instance setoid_sym. Existing Instance setoid_trans. (** Standard setoids. *) (* Program Instance eq_setoid : Setoid A := *) (* equiv := eq ; setoid_equiv := eq_equivalence. *) Program Instance iff_setoid : Setoid Prop := { equiv := iff ; setoid_equiv := iff_equivalence }. (** Overloaded notations for setoid equivalence and inequivalence. Not to be confused with [eq] and [=]. *) (** Subset objects should be first coerced to their underlying type, but that notation doesn't work in the standard case then. *) (* Notation " x == y " := (equiv (x :>) (y :>)) (at level 70, no associativity) : type_scope. *) Notation " x == y " := (equiv x y) (at level 70, no associativity) : type_scope. Notation " x =/= y " := (complement equiv x y) (at level 70, no associativity) : type_scope. (** Use the [clsubstitute] command which substitutes an equality in every hypothesis. *) Ltac clsubst H := lazymatch type of H with ?x == ?y => substitute H ; clear H x end. Ltac clsubst_nofail := match goal with | [ H : ?x == ?y |- _ ] => clsubst H ; clsubst_nofail | _ => idtac end. (** [subst*] will try its best at substituting every equality in the goal. *) Tactic Notation "clsubst" "*" := clsubst_nofail. Lemma nequiv_equiv_trans : forall `{Setoid A} (x y z : A), x =/= y -> y == z -> x =/= z. Proof with auto. intros; intro. assert(z == y) by (symmetry ; auto). assert(x == y) by (transitivity z ; eauto). contradiction. Qed. Lemma equiv_nequiv_trans : forall `{Setoid A} (x y z : A), x == y -> y =/= z -> x =/= z. Proof. intros; intro. assert(y == x) by (symmetry ; auto). assert(y == z) by (transitivity x ; eauto). contradiction. Qed. Ltac setoid_simplify_one := match goal with | [ H : (?x == ?x)%type |- _ ] => clear H | [ H : (?x == ?y)%type |- _ ] => clsubst H | [ |- (?x =/= ?y)%type ] => let name:=fresh "Hneq" in intro name end. Ltac setoid_simplify := repeat setoid_simplify_one. Ltac setoidify_tac := match goal with | [ s : Setoid ?A, H : ?R ?x ?y |- _ ] => change R with (@equiv A R s) in H | [ s : Setoid ?A |- context C [ ?R ?x ?y ] ] => change (R x y) with (@equiv A R s x y) end. Ltac setoidify := repeat setoidify_tac. (** Every setoid relation gives rise to a morphism, in fact every partial setoid does. *) Program Instance setoid_morphism `(sa : Setoid A) : Proper (equiv ++> equiv ++> iff) equiv := proper_prf. Program Instance setoid_partial_app_morphism `(sa : Setoid A) (x : A) : Proper (equiv ++> iff) (equiv x) := proper_prf. (** Partial setoids don't require reflexivity so we can build a partial setoid on the function space. *) Class PartialSetoid (A : Type) := { pequiv : relation A ; pequiv_prf :> PER pequiv }. (** Overloaded notation for partial setoid equivalence. *) Infix "=~=" := pequiv (at level 70, no associativity) : type_scope. (** Reset the default Program tactic. *) Obligation Tactic := program_simpl. coq-8.4pl2/theories/Classes/vo.itarget0000640000175000001440000000025211335063552017055 0ustar notinusersEquivalence.vo EquivDec.vo Init.vo Morphisms_Prop.vo Morphisms_Relations.vo Morphisms.vo RelationClasses.vo SetoidClass.vo SetoidDec.vo SetoidTactics.vo RelationPairs.vo coq-8.4pl2/theories/Classes/Equivalence.v0000640000175000001440000001023412010532755017476 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* substitute H ; clear H x end. Ltac setoid_subst_nofail := match goal with | [ H : ?x === ?y |- _ ] => setoid_subst H ; setoid_subst_nofail | _ => idtac end. (** [subst*] will try its best at substituting every equality in the goal. *) Tactic Notation "subst" "*" := subst_no_fail ; setoid_subst_nofail. (** Simplify the goal w.r.t. equivalence. *) Ltac equiv_simplify_one := match goal with | [ H : ?x === ?x |- _ ] => clear H | [ H : ?x === ?y |- _ ] => setoid_subst H | [ |- ?x =/= ?y ] => let name:=fresh "Hneq" in intro name | [ |- ~ ?x === ?y ] => let name:=fresh "Hneq" in intro name end. Ltac equiv_simplify := repeat equiv_simplify_one. (** "reify" relations which are equivalences to applications of the overloaded [equiv] method for easy recognition in tactics. *) Ltac equivify_tac := match goal with | [ s : Equivalence ?A ?R, H : ?R ?x ?y |- _ ] => change R with (@equiv A R s) in H | [ s : Equivalence ?A ?R |- context C [ ?R ?x ?y ] ] => change (R x y) with (@equiv A R s x y) end. Ltac equivify := repeat equivify_tac. Section Respecting. (** Here we build an equivalence instance for functions which relates respectful ones only, we do not export it. *) Definition respecting `(eqa : Equivalence A (R : relation A), eqb : Equivalence B (R' : relation B)) : Type := { morph : A -> B | respectful R R' morph morph }. Program Instance respecting_equiv `(eqa : Equivalence A R, eqb : Equivalence B R') : Equivalence (fun (f g : respecting eqa eqb) => forall (x y : A), R x y -> R' (proj1_sig f x) (proj1_sig g y)). Solve Obligations using unfold respecting in * ; simpl_relation ; program_simpl. Next Obligation. Proof. unfold respecting in *. program_simpl. transitivity (y y0); auto. apply H0. reflexivity. Qed. End Respecting. (** The default equivalence on function spaces, with higher-priority than [eq]. *) Program Instance pointwise_equivalence {A} `(eqb : Equivalence B eqB) : Equivalence (pointwise_relation A eqB) | 9. Next Obligation. Proof. transitivity (y a) ; auto. Qed. coq-8.4pl2/theories/Classes/Morphisms.v0000640000175000001440000005055712010532755017232 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type) (D : B -> Type) (R : A -> B -> Prop) (R' : forall (x : A) (y : B), C x -> D y -> Prop) : (forall x : A, C x) -> (forall x : B, D x) -> Prop := fun f g => forall x y, R x y -> R' x y (f x) (g y). (** The non-dependent version is an instance where we forget dependencies. *) Definition respectful {A B : Type} (R : relation A) (R' : relation B) : relation (A -> B) := Eval compute in @respectful_hetero A A (fun _ => B) (fun _ => B) R (fun _ _ => R'). (** Notations reminiscent of the old syntax for declaring morphisms. *) Delimit Scope signature_scope with signature. Arguments Proper {A}%type R%signature m. Arguments respectful {A B}%type (R R')%signature _ _. Module ProperNotations. Notation " R ++> R' " := (@respectful _ _ (R%signature) (R'%signature)) (right associativity, at level 55) : signature_scope. Notation " R ==> R' " := (@respectful _ _ (R%signature) (R'%signature)) (right associativity, at level 55) : signature_scope. Notation " R --> R' " := (@respectful _ _ (inverse (R%signature)) (R'%signature)) (right associativity, at level 55) : signature_scope. End ProperNotations. Export ProperNotations. Local Open Scope signature_scope. (** [solve_proper] try to solve the goal [Proper (?==> ... ==>?) f] by repeated introductions and setoid rewrites. It should work fine when [f] is a combination of already known morphisms and quantifiers. *) Ltac solve_respectful t := match goal with | |- respectful _ _ _ _ => let H := fresh "H" in intros ? ? H; solve_respectful ltac:(setoid_rewrite H; t) | _ => t; reflexivity end. Ltac solve_proper := unfold Proper; solve_respectful ltac:(idtac). (** [f_equiv] is a clone of [f_equal] that handles setoid equivalences. For example, if we know that [f] is a morphism for [E1==>E2==>E], then the goal [E (f x y) (f x' y')] will be transformed by [f_equiv] into the subgoals [E1 x x'] and [E2 y y']. *) Ltac f_equiv := match goal with | |- ?R (?f ?x) (?f' _) => let T := type of x in let Rx := fresh "R" in evar (Rx : relation T); let H := fresh in assert (H : (Rx==>R)%signature f f'); unfold Rx in *; clear Rx; [ f_equiv | apply H; clear H; try reflexivity ] | |- ?R ?f ?f' => try reflexivity; change (Proper R f); eauto with typeclass_instances; fail | _ => idtac end. (** [forall_def] reifies the dependent product as a definition. *) Definition forall_def {A : Type} (B : A -> Type) : Type := forall x : A, B x. (** Dependent pointwise lifting of a relation on the range. *) Definition forall_relation {A : Type} {B : A -> Type} (sig : forall a, relation (B a)) : relation (forall x, B x) := fun f g => forall a, sig a (f a) (g a). Arguments forall_relation {A B}%type sig%signature _ _. (** Non-dependent pointwise lifting *) Definition pointwise_relation (A : Type) {B : Type} (R : relation B) : relation (A -> B) := Eval compute in forall_relation (B:=fun _ => B) (fun _ => R). Lemma pointwise_pointwise A B (R : relation B) : relation_equivalence (pointwise_relation A R) (@eq A ==> R). Proof. intros. split. simpl_relation. firstorder. Qed. (** We can build a PER on the Coq function space if we have PERs on the domain and codomain. *) Hint Unfold Reflexive : core. Hint Unfold Symmetric : core. Hint Unfold Transitive : core. Typeclasses Opaque respectful pointwise_relation forall_relation. Program Instance respectful_per `(PER A R, PER B R') : PER (R ==> R'). Next Obligation. Proof with auto. assert(R x0 x0). transitivity y0... symmetry... transitivity (y x0)... Qed. (** Subrelations induce a morphism on the identity. *) Instance subrelation_id_proper `(subrelation A R₁ R₂) : Proper (R₁ ==> R₂) id. Proof. firstorder. Qed. (** The subrelation property goes through products as usual. *) Lemma subrelation_respectful `(subl : subrelation A R₂ R₁, subr : subrelation B S₁ S₂) : subrelation (R₁ ==> S₁) (R₂ ==> S₂). Proof. simpl_relation. apply subr. apply H. apply subl. apply H0. Qed. (** And of course it is reflexive. *) Lemma subrelation_refl A R : @subrelation A R R. Proof. simpl_relation. Qed. Ltac subrelation_tac T U := (is_ground T ; is_ground U ; class_apply @subrelation_refl) || class_apply @subrelation_respectful || class_apply @subrelation_refl. Hint Extern 3 (@subrelation _ ?T ?U) => subrelation_tac T U : typeclass_instances. (** [Proper] is itself a covariant morphism for [subrelation]. *) Lemma subrelation_proper `(mor : Proper A R₁ m, unc : Unconvertible (relation A) R₁ R₂, sub : subrelation A R₁ R₂) : Proper R₂ m. Proof. intros. apply sub. apply mor. Qed. CoInductive apply_subrelation : Prop := do_subrelation. Ltac proper_subrelation := match goal with [ H : apply_subrelation |- _ ] => clear H ; class_apply @subrelation_proper end. Hint Extern 5 (@Proper _ ?H _) => proper_subrelation : typeclass_instances. Instance proper_subrelation_proper : Proper (subrelation ++> eq ==> impl) (@Proper A). Proof. reduce. subst. firstorder. Qed. (** Essential subrelation instances for [iff], [impl] and [pointwise_relation]. *) Instance iff_impl_subrelation : subrelation iff impl | 2. Proof. firstorder. Qed. Instance iff_inverse_impl_subrelation : subrelation iff (inverse impl) | 2. Proof. firstorder. Qed. Instance pointwise_subrelation {A} `(sub : subrelation B R R') : subrelation (pointwise_relation A R) (pointwise_relation A R') | 4. Proof. reduce. unfold pointwise_relation in *. apply sub. apply H. Qed. (** For dependent function types. *) Lemma forall_subrelation A (B : A -> Type) (R S : forall x : A, relation (B x)) : (forall a, subrelation (R a) (S a)) -> subrelation (forall_relation R) (forall_relation S). Proof. reduce. apply H. apply H0. Qed. (** We use an extern hint to help unification. *) Hint Extern 4 (subrelation (@forall_relation ?A ?B ?R) (@forall_relation _ _ ?S)) => apply (@forall_subrelation A B R S) ; intro : typeclass_instances. (** Any symmetric relation is equal to its inverse. *) Lemma subrelation_symmetric A R `(Symmetric A R) : subrelation (inverse R) R. Proof. reduce. red in H0. symmetry. assumption. Qed. Hint Extern 4 (subrelation (inverse _) _) => class_apply @subrelation_symmetric : typeclass_instances. (** The complement of a relation conserves its proper elements. *) Program Definition complement_proper `(mR : Proper (A -> A -> Prop) (RA ==> RA ==> iff) R) : Proper (RA ==> RA ==> iff) (complement R) := _. Next Obligation. Proof. unfold complement. pose (mR x y H x0 y0 H0). intuition. Qed. Hint Extern 1 (Proper _ (complement _)) => apply @complement_proper : typeclass_instances. (** The [inverse] too, actually the [flip] instance is a bit more general. *) Program Definition flip_proper `(mor : Proper (A -> B -> C) (RA ==> RB ==> RC) f) : Proper (RB ==> RA ==> RC) (flip f) := _. Next Obligation. Proof. apply mor ; auto. Qed. Hint Extern 1 (Proper _ (flip _)) => apply @flip_proper : typeclass_instances. (** Every Transitive relation gives rise to a binary morphism on [impl], contravariant in the first argument, covariant in the second. *) Program Instance trans_contra_co_morphism `(Transitive A R) : Proper (R --> R ++> impl) R. Next Obligation. Proof with auto. transitivity x... transitivity x0... Qed. (** Proper declarations for partial applications. *) Program Instance trans_contra_inv_impl_morphism `(Transitive A R) : Proper (R --> inverse impl) (R x) | 3. Next Obligation. Proof with auto. transitivity y... Qed. Program Instance trans_co_impl_morphism `(Transitive A R) : Proper (R ++> impl) (R x) | 3. Next Obligation. Proof with auto. transitivity x0... Qed. Program Instance trans_sym_co_inv_impl_morphism `(PER A R) : Proper (R ++> inverse impl) (R x) | 3. Next Obligation. Proof with auto. transitivity y... symmetry... Qed. Program Instance trans_sym_contra_impl_morphism `(PER A R) : Proper (R --> impl) (R x) | 3. Next Obligation. Proof with auto. transitivity x0... symmetry... Qed. Program Instance per_partial_app_morphism `(PER A R) : Proper (R ==> iff) (R x) | 2. Next Obligation. Proof with auto. split. intros ; transitivity x0... intros. transitivity y... symmetry... Qed. (** Every Transitive relation induces a morphism by "pushing" an [R x y] on the left of an [R x z] proof to get an [R y z] goal. *) Program Instance trans_co_eq_inv_impl_morphism `(Transitive A R) : Proper (R ==> (@eq A) ==> inverse impl) R | 2. Next Obligation. Proof with auto. transitivity y... Qed. (** Every Symmetric and Transitive relation gives rise to an equivariant morphism. *) Program Instance PER_morphism `(PER A R) : Proper (R ==> R ==> iff) R | 1. Next Obligation. Proof with auto. split ; intros. transitivity x0... transitivity x... symmetry... transitivity y... transitivity y0... symmetry... Qed. Lemma symmetric_equiv_inverse `(Symmetric A R) : relation_equivalence R (flip R). Proof. firstorder. Qed. Program Instance compose_proper A B C R₀ R₁ R₂ : Proper ((R₁ ==> R₂) ==> (R₀ ==> R₁) ==> (R₀ ==> R₂)) (@compose A B C). Next Obligation. Proof. simpl_relation. unfold compose. apply H. apply H0. apply H1. Qed. (** Coq functions are morphisms for Leibniz equality, applied only if really needed. *) Instance reflexive_eq_dom_reflexive (A : Type) `(Reflexive B R') : Reflexive (@Logic.eq A ==> R'). Proof. simpl_relation. Qed. (** [respectful] is a morphism for relation equivalence. *) Instance respectful_morphism : Proper (relation_equivalence ++> relation_equivalence ++> relation_equivalence) (@respectful A B). Proof. reduce. unfold respectful, relation_equivalence, predicate_equivalence in * ; simpl in *. split ; intros. rewrite <- H0. apply H1. rewrite H. assumption. rewrite H0. apply H1. rewrite <- H. assumption. Qed. (** Every element in the carrier of a reflexive relation is a morphism for this relation. We use a proxy class for this case which is used internally to discharge reflexivity constraints. The [Reflexive] instance will almost always be used, but it won't apply in general to any kind of [Proper (A -> B) _ _] goal, making proof-search much slower. A cleaner solution would be to be able to set different priorities in different hint bases and select a particular hint database for resolution of a type class constraint.*) Class ProperProxy {A} (R : relation A) (m : A) : Prop := proper_proxy : R m m. Lemma eq_proper_proxy A (x : A) : ProperProxy (@eq A) x. Proof. firstorder. Qed. Lemma reflexive_proper_proxy `(Reflexive A R) (x : A) : ProperProxy R x. Proof. firstorder. Qed. Lemma proper_proper_proxy `(Proper A R x) : ProperProxy R x. Proof. firstorder. Qed. Hint Extern 1 (ProperProxy _ _) => class_apply @eq_proper_proxy || class_apply @reflexive_proper_proxy : typeclass_instances. Hint Extern 2 (ProperProxy ?R _) => not_evar R; class_apply @proper_proper_proxy : typeclass_instances. (** [R] is Reflexive, hence we can build the needed proof. *) Lemma Reflexive_partial_app_morphism `(Proper (A -> B) (R ==> R') m, ProperProxy A R x) : Proper R' (m x). Proof. simpl_relation. Qed. Class Params {A : Type} (of : A) (arity : nat). Class PartialApplication. CoInductive normalization_done : Prop := did_normalization. Ltac partial_application_tactic := let rec do_partial_apps H m cont := match m with | ?m' ?x => class_apply @Reflexive_partial_app_morphism ; [(do_partial_apps H m' ltac:idtac)|clear H] | _ => cont end in let rec do_partial H ar m := match ar with | 0%nat => do_partial_apps H m ltac:(fail 1) | S ?n' => match m with ?m' ?x => do_partial H n' m' end end in let params m sk fk := (let m' := fresh in head_of_constr m' m ; let n := fresh in evar (n:nat) ; let v := eval compute in n in clear n ; let H := fresh in assert(H:Params m' v) by typeclasses eauto ; let v' := eval compute in v in subst m'; (sk H v' || fail 1)) || fk in let on_morphism m cont := params m ltac:(fun H n => do_partial H n m) ltac:(cont) in match goal with | [ _ : normalization_done |- _ ] => fail 1 | [ _ : @Params _ _ _ |- _ ] => fail 1 | [ |- @Proper ?T _ (?m ?x) ] => match goal with | [ H : PartialApplication |- _ ] => class_apply @Reflexive_partial_app_morphism; [|clear H] | _ => on_morphism (m x) ltac:(class_apply @Reflexive_partial_app_morphism) end end. Hint Extern 4 (@Proper _ _ _) => partial_application_tactic : typeclass_instances. Lemma inverse_respectful : forall (A : Type) (R : relation A) (B : Type) (R' : relation B), relation_equivalence (inverse (R ==> R')) (inverse R ==> inverse R'). Proof. intros. unfold flip, respectful. split ; intros ; intuition. Qed. (** Special-purpose class to do normalization of signatures w.r.t. inverse. *) Class Normalizes (A : Type) (m : relation A) (m' : relation A) : Prop := normalizes : relation_equivalence m m'. (** Current strategy: add [inverse] everywhere and reduce using [subrelation] afterwards. *) Lemma inverse_atom A R : Normalizes A R (inverse (inverse R)). Proof. firstorder. Qed. Lemma inverse_arrow `(NA : Normalizes A R (inverse R'''), NB : Normalizes B R' (inverse R'')) : Normalizes (A -> B) (R ==> R') (inverse (R''' ==> R'')%signature). Proof. unfold Normalizes in *. intros. rewrite NA, NB. firstorder. Qed. Ltac inverse := match goal with | [ |- Normalizes _ (respectful _ _) _ ] => class_apply @inverse_arrow | _ => class_apply @inverse_atom end. Hint Extern 1 (Normalizes _ _ _) => inverse : typeclass_instances. (** Treating inverse: can't make them direct instances as we need at least a [flip] present in the goal. *) Lemma inverse1 `(subrelation A R' R) : subrelation (inverse (inverse R')) R. Proof. firstorder. Qed. Lemma inverse2 `(subrelation A R R') : subrelation R (inverse (inverse R')). Proof. firstorder. Qed. Hint Extern 1 (subrelation (flip _) _) => class_apply @inverse1 : typeclass_instances. Hint Extern 1 (subrelation _ (flip _)) => class_apply @inverse2 : typeclass_instances. (** That's if and only if *) Lemma eq_subrelation `(Reflexive A R) : subrelation (@eq A) R. Proof. simpl_relation. Qed. (* Hint Extern 3 (subrelation eq ?R) => not_evar R ; class_apply eq_subrelation : typeclass_instances. *) (** Once we have normalized, we will apply this instance to simplify the problem. *) Definition proper_inverse_proper `(mor : Proper A R m) : Proper (inverse R) m := mor. Hint Extern 2 (@Proper _ (flip _) _) => class_apply @proper_inverse_proper : typeclass_instances. (** Bootstrap !!! *) Instance proper_proper : Proper (relation_equivalence ==> eq ==> iff) (@Proper A). Proof. simpl_relation. reduce in H. split ; red ; intros. setoid_rewrite <- H. apply H0. setoid_rewrite H. apply H0. Qed. Lemma proper_normalizes_proper `(Normalizes A R0 R1, Proper A R1 m) : Proper R0 m. Proof. red in H, H0. setoid_rewrite H. assumption. Qed. Ltac proper_normalization := match goal with | [ _ : normalization_done |- _ ] => fail 1 | [ _ : apply_subrelation |- @Proper _ ?R _ ] => let H := fresh "H" in set(H:=did_normalization) ; class_apply @proper_normalizes_proper end. Hint Extern 6 (@Proper _ _ _) => proper_normalization : typeclass_instances. (** Every reflexive relation gives rise to a morphism, only for immediately solving goals without variables. *) Lemma reflexive_proper `{Reflexive A R} (x : A) : Proper R x. Proof. firstorder. Qed. Lemma proper_eq A (x : A) : Proper (@eq A) x. Proof. intros. apply reflexive_proper. Qed. Ltac proper_reflexive := match goal with | [ _ : normalization_done |- _ ] => fail 1 | _ => class_apply proper_eq || class_apply @reflexive_proper end. Hint Extern 7 (@Proper _ _ _) => proper_reflexive : typeclass_instances. (** When the relation on the domain is symmetric, we can inverse the relation on the codomain. Same for binary functions. *) Lemma proper_sym_flip : forall `(Symmetric A R1)`(Proper (A->B) (R1==>R2) f), Proper (R1==>inverse R2) f. Proof. intros A R1 Sym B R2 f Hf. intros x x' Hxx'. apply Hf, Sym, Hxx'. Qed. Lemma proper_sym_flip_2 : forall `(Symmetric A R1)`(Symmetric B R2)`(Proper (A->B->C) (R1==>R2==>R3) f), Proper (R1==>R2==>inverse R3) f. Proof. intros A R1 Sym1 B R2 Sym2 C R3 f Hf. intros x x' Hxx' y y' Hyy'. apply Hf; auto. Qed. (** When the relation on the domain is symmetric, a predicate is compatible with [iff] as soon as it is compatible with [impl]. Same with a binary relation. *) Lemma proper_sym_impl_iff : forall `(Symmetric A R)`(Proper _ (R==>impl) f), Proper (R==>iff) f. Proof. intros A R Sym f Hf x x' Hxx'. repeat red in Hf. split; eauto. Qed. Lemma proper_sym_impl_iff_2 : forall `(Symmetric A R)`(Symmetric B R')`(Proper _ (R==>R'==>impl) f), Proper (R==>R'==>iff) f. Proof. intros A R Sym B R' Sym' f Hf x x' Hxx' y y' Hyy'. repeat red in Hf. split; eauto. Qed. (** A [PartialOrder] is compatible with its underlying equivalence. *) Instance PartialOrder_proper `(PartialOrder A eqA R) : Proper (eqA==>eqA==>iff) R. Proof. intros. apply proper_sym_impl_iff_2; auto with *. intros x x' Hx y y' Hy Hr. transitivity x. generalize (partial_order_equivalence x x'); compute; intuition. transitivity y; auto. generalize (partial_order_equivalence y y'); compute; intuition. Qed. (** From a [PartialOrder] to the corresponding [StrictOrder]: [lt = le /\ ~eq]. If the order is total, we could also say [gt = ~le]. *) Lemma PartialOrder_StrictOrder `(PartialOrder A eqA R) : StrictOrder (relation_conjunction R (complement eqA)). Proof. split; compute. intros x (_,Hx). apply Hx, Equivalence_Reflexive. intros x y z (Hxy,Hxy') (Hyz,Hyz'). split. apply PreOrder_Transitive with y; assumption. intro Hxz. apply Hxy'. apply partial_order_antisym; auto. rewrite Hxz; auto. Qed. Hint Extern 4 (StrictOrder (relation_conjunction _ _)) => class_apply PartialOrder_StrictOrder : typeclass_instances. (** From a [StrictOrder] to the corresponding [PartialOrder]: [le = lt \/ eq]. If the order is total, we could also say [ge = ~lt]. *) Lemma StrictOrder_PreOrder `(Equivalence A eqA, StrictOrder A R, Proper _ (eqA==>eqA==>iff) R) : PreOrder (relation_disjunction R eqA). Proof. split. intros x. right. reflexivity. intros x y z [Hxy|Hxy] [Hyz|Hyz]. left. transitivity y; auto. left. rewrite <- Hyz; auto. left. rewrite Hxy; auto. right. transitivity y; auto. Qed. Hint Extern 4 (PreOrder (relation_disjunction _ _)) => class_apply StrictOrder_PreOrder : typeclass_instances. Lemma StrictOrder_PartialOrder `(Equivalence A eqA, StrictOrder A R, Proper _ (eqA==>eqA==>iff) R) : PartialOrder eqA (relation_disjunction R eqA). Proof. intros. intros x y. compute. intuition. elim (StrictOrder_Irreflexive x). transitivity y; auto. Qed. Hint Extern 4 (PartialOrder _ (relation_disjunction _ _)) => class_apply StrictOrder_PartialOrder : typeclass_instances. coq-8.4pl2/theories/Classes/Morphisms_Prop.v0000640000175000001440000000766412010532755020233 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* impl) not | 1. Program Instance not_iff_morphism : Proper (iff ++> iff) not. (** Logical conjunction. *) Program Instance and_impl_morphism : Proper (impl ==> impl ==> impl) and | 1. Program Instance and_iff_morphism : Proper (iff ==> iff ==> iff) and. (** Logical disjunction. *) Program Instance or_impl_morphism : Proper (impl ==> impl ==> impl) or | 1. Program Instance or_iff_morphism : Proper (iff ==> iff ==> iff) or. (** Logical implication [impl] is a morphism for logical equivalence. *) Program Instance iff_iff_iff_impl_morphism : Proper (iff ==> iff ==> iff) impl. (** Morphisms for quantifiers *) Program Instance ex_iff_morphism {A : Type} : Proper (pointwise_relation A iff ==> iff) (@ex A). Next Obligation. Proof. unfold pointwise_relation in H. split ; intros. destruct H0 as [x1 H1]. exists x1. rewrite H in H1. assumption. destruct H0 as [x1 H1]. exists x1. rewrite H. assumption. Qed. Program Instance ex_impl_morphism {A : Type} : Proper (pointwise_relation A impl ==> impl) (@ex A) | 1. Next Obligation. Proof. unfold pointwise_relation in H. exists H0. apply H. assumption. Qed. Program Instance ex_inverse_impl_morphism {A : Type} : Proper (pointwise_relation A (inverse impl) ==> inverse impl) (@ex A) | 1. Next Obligation. Proof. unfold pointwise_relation in H. exists H0. apply H. assumption. Qed. Program Instance all_iff_morphism {A : Type} : Proper (pointwise_relation A iff ==> iff) (@all A). Next Obligation. Proof. unfold pointwise_relation, all in *. intuition ; specialize (H x0) ; intuition. Qed. Program Instance all_impl_morphism {A : Type} : Proper (pointwise_relation A impl ==> impl) (@all A) | 1. Next Obligation. Proof. unfold pointwise_relation, all in *. intuition ; specialize (H x0) ; intuition. Qed. Program Instance all_inverse_impl_morphism {A : Type} : Proper (pointwise_relation A (inverse impl) ==> inverse impl) (@all A) | 1. Next Obligation. Proof. unfold pointwise_relation, all in *. intuition ; specialize (H x0) ; intuition. Qed. (** Equivalent points are simultaneously accessible or not *) Instance Acc_pt_morphism {A:Type}(E R : A->A->Prop) `(Equivalence _ E) `(Proper _ (E==>E==>iff) R) : Proper (E==>iff) (Acc R). Proof. apply proper_sym_impl_iff; auto with *. intros x y EQ WF. apply Acc_intro; intros z Hz. rewrite <- EQ in Hz. now apply Acc_inv with x. Qed. (** Equivalent relations have the same accessible points *) Instance Acc_rel_morphism {A:Type} : Proper (@relation_equivalence A ==> Logic.eq ==> iff) (@Acc A). Proof. apply proper_sym_impl_iff_2. red; now symmetry. red; now symmetry. intros R R' EQ a a' Ha WF. subst a'. induction WF as [x _ WF']. constructor. intros y Ryx. now apply WF', EQ. Qed. (** Equivalent relations are simultaneously well-founded or not *) Instance well_founded_morphism {A : Type} : Proper (@relation_equivalence A ==> iff) (@well_founded A). Proof. unfold well_founded. solve_proper. Qed. coq-8.4pl2/theories/Classes/Morphisms_Relations.v0000640000175000001440000000455412010532755021246 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* relation_equivalence ==> relation_equivalence) relation_conjunction. Proof. firstorder. Qed. Instance relation_disjunction_morphism : Proper (relation_equivalence (A:=A) ==> relation_equivalence ==> relation_equivalence) relation_disjunction. Proof. firstorder. Qed. (* Predicate equivalence is exactly the same as the pointwise lifting of [iff]. *) Require Import List. Lemma predicate_equivalence_pointwise (l : Tlist) : Proper (@predicate_equivalence l ==> pointwise_lifting iff l) id. Proof. do 2 red. unfold predicate_equivalence. auto. Qed. Lemma predicate_implication_pointwise (l : Tlist) : Proper (@predicate_implication l ==> pointwise_lifting impl l) id. Proof. do 2 red. unfold predicate_implication. auto. Qed. (** The instanciation at relation allows to rewrite applications of relations [R x y] to [R' x y] when [R] and [R'] are in [relation_equivalence]. *) Instance relation_equivalence_pointwise : Proper (relation_equivalence ==> pointwise_relation A (pointwise_relation A iff)) id. Proof. intro. apply (predicate_equivalence_pointwise (Tcons A (Tcons A Tnil))). Qed. Instance subrelation_pointwise : Proper (subrelation ==> pointwise_relation A (pointwise_relation A impl)) id. Proof. intro. apply (predicate_implication_pointwise (Tcons A (Tcons A Tnil))). Qed. Lemma inverse_pointwise_relation A (R : relation A) : relation_equivalence (pointwise_relation A (inverse R)) (inverse (pointwise_relation A R)). Proof. intros. split; firstorder. Qed. coq-8.4pl2/theories/Classes/SetoidTactics.v0000640000175000001440000001365012010532755020004 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ?R' => red ; intros ; subst ; red_subst_eq_morphism R' | ?R ==> ?R' => red ; intros ; red_subst_eq_morphism R' | _ => idtac end. Ltac destruct_proper := match goal with | [ |- @Proper ?A ?R ?m ] => red end. Ltac reverse_arrows x := match x with | @Logic.eq ?A ==> ?R' => revert_last ; reverse_arrows R' | ?R ==> ?R' => do 3 revert_last ; reverse_arrows R' | _ => idtac end. Ltac default_add_morphism_tactic := unfold flip ; intros ; (try destruct_proper) ; match goal with | [ |- (?x ==> ?y) _ _ ] => red_subst_eq_morphism (x ==> y) ; reverse_arrows (x ==> y) end. Ltac add_morphism_tactic := default_add_morphism_tactic. Obligation Tactic := program_simpl. (* Notation "'Morphism' s t " := (@Proper _ (s%signature) t) (at level 10, s at next level, t at next level). *) coq-8.4pl2/theories/Classes/Init.v0000640000175000001440000000265112010532755016144 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unify x y with typeclass_instances ; fail 1 "Convertible" | |- _ => exact tt end. Hint Extern 0 (@Unconvertible _ _ _) => unconvertible : typeclass_instances. coq-8.4pl2/theories/Classes/SetoidDec.v0000640000175000001440000000731212010532755017103 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ) (y :>)) (no associativity, at level 70). Definition swap_sumbool {A B} (x : { A } + { B }) : { B } + { A } := match x with | left H => @right _ _ H | right H => @left _ _ H end. Require Import Coq.Program.Program. Local Open Scope program_scope. (** Invert the branches. *) Program Definition nequiv_dec `{EqDec A} (x y : A) : { x =/= y } + { x == y } := swap_sumbool (x == y). (** Overloaded notation for inequality. *) Infix "=/=" := nequiv_dec (no associativity, at level 70). (** Define boolean versions, losing the logical information. *) Definition equiv_decb `{EqDec A} (x y : A) : bool := if x == y then true else false. Definition nequiv_decb `{EqDec A} (x y : A) : bool := negb (equiv_decb x y). Infix "==b" := equiv_decb (no associativity, at level 70). Infix "<>b" := nequiv_decb (no associativity, at level 70). (** Decidable leibniz equality instances. *) Require Import Coq.Arith.Arith. (** The equiv is burried inside the setoid, but we can recover it by specifying which setoid we're talking about. *) Program Instance eq_setoid A : Setoid A | 10 := { equiv := eq ; setoid_equiv := eq_equivalence }. Program Instance nat_eq_eqdec : EqDec (eq_setoid nat) := eq_nat_dec. Require Import Coq.Bool.Bool. Program Instance bool_eqdec : EqDec (eq_setoid bool) := bool_dec. Program Instance unit_eqdec : EqDec (eq_setoid unit) := fun x y => in_left. Next Obligation. Proof. destruct x ; destruct y. reflexivity. Qed. Program Instance prod_eqdec `(! EqDec (eq_setoid A), ! EqDec (eq_setoid B)) : EqDec (eq_setoid (prod A B)) := fun x y => let '(x1, x2) := x in let '(y1, y2) := y in if x1 == y1 then if x2 == y2 then in_left else in_right else in_right. Solve Obligations using unfold complement ; program_simpl. (** Objects of function spaces with countable domains like bool have decidable equality. *) Program Instance bool_function_eqdec `(! EqDec (eq_setoid A)) : EqDec (eq_setoid (bool -> A)) := fun f g => if f true == g true then if f false == g false then in_left else in_right else in_right. Solve Obligations using try red ; unfold equiv, complement ; program_simpl. Next Obligation. Proof. extensionality x. destruct x ; auto. Qed. coq-8.4pl2/theories/Classes/EquivDec.v0000640000175000001440000001071512010532755016746 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ) (y :>)) (no associativity, at level 70) : equiv_scope. Definition swap_sumbool {A B} (x : { A } + { B }) : { B } + { A } := match x with | left H => @right _ _ H | right H => @left _ _ H end. Local Open Scope program_scope. (** Invert the branches. *) Program Definition nequiv_dec `{EqDec A} (x y : A) : { x =/= y } + { x === y } := swap_sumbool (x == y). (** Overloaded notation for inequality. *) Infix "<>" := nequiv_dec (no associativity, at level 70) : equiv_scope. (** Define boolean versions, losing the logical information. *) Definition equiv_decb `{EqDec A} (x y : A) : bool := if x == y then true else false. Definition nequiv_decb `{EqDec A} (x y : A) : bool := negb (equiv_decb x y). Infix "==b" := equiv_decb (no associativity, at level 70). Infix "<>b" := nequiv_decb (no associativity, at level 70). (** Decidable leibniz equality instances. *) (** The equiv is burried inside the setoid, but we can recover it by specifying which setoid we're talking about. *) Program Instance nat_eq_eqdec : EqDec nat eq := eq_nat_dec. Program Instance bool_eqdec : EqDec bool eq := bool_dec. Program Instance unit_eqdec : EqDec unit eq := fun x y => in_left. Next Obligation. Proof. destruct x ; destruct y. reflexivity. Qed. Obligation Tactic := unfold complement, equiv ; program_simpl. Program Instance prod_eqdec `(EqDec A eq, EqDec B eq) : ! EqDec (prod A B) eq := { equiv_dec x y := let '(x1, x2) := x in let '(y1, y2) := y in if x1 == y1 then if x2 == y2 then in_left else in_right else in_right }. Program Instance sum_eqdec `(EqDec A eq, EqDec B eq) : EqDec (sum A B) eq := { equiv_dec x y := match x, y with | inl a, inl b => if a == b then in_left else in_right | inr a, inr b => if a == b then in_left else in_right | inl _, inr _ | inr _, inl _ => in_right end }. (** Objects of function spaces with countable domains like bool have decidable equality. Proving the reflection requires functional extensionality though. *) Program Instance bool_function_eqdec `(EqDec A eq) : ! EqDec (bool -> A) eq := { equiv_dec f g := if f true == g true then if f false == g false then in_left else in_right else in_right }. Next Obligation. Proof. extensionality x. destruct x ; auto. Qed. Require Import List. Program Instance list_eqdec `(eqa : EqDec A eq) : ! EqDec (list A) eq := { equiv_dec := fix aux (x y : list A) := match x, y with | nil, nil => in_left | cons hd tl, cons hd' tl' => if hd == hd' then if aux tl tl' then in_left else in_right else in_right | _, _ => in_right end }. Solve Obligations using unfold equiv, complement in * ; program_simpl ; intuition (discriminate || eauto). Next Obligation. destruct y ; intuition eauto. Defined. Solve Obligations using unfold equiv, complement in * ; program_simpl ; intuition (discriminate || eauto). coq-8.4pl2/theories/Logic/0000750000175000001440000000000012127276545014520 5ustar notinuserscoq-8.4pl2/theories/Logic/ChoiceFacts.v0000640000175000001440000006743512010532755017067 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* GAC_rel and IPL_2 |- GAC_rel = OAC_rel 3.2. IPL^2 |- AC_fun + IGP = GAC_fun = OAC_fun = AC_fun + Drinker 3.3. D_iota -> ID_iota and D_epsilon <-> ID_epsilon + Drinker 4. Derivability of choice for decidable relations with well-ordered codomain 5. Equivalence of choices on dependent or non dependent functional types 6. Non contradiction of constructive descriptions wrt functional choices 7. Definite description transports classical logic to the computational world 8. Choice -> Dependent choice -> Countable choice References: [[Bell]] John L. Bell, Choice principles in intuitionistic set theory, unpublished. [[Bell93]] John L. Bell, Hilbert's Epsilon Operator in Intuitionistic Type Theories, Mathematical Logic Quarterly, volume 39, 1993. [[Carlström05]] Jesper Carlström, Interpreting descriptions in intentional type theory, Journal of Symbolic Logic 70(2):488-514, 2005. *) Set Implicit Arguments. (**********************************************************************) (** * Definitions *) (** Choice, reification and description schemes *) Section ChoiceSchemes. Variables A B :Type. Variable P:A->Prop. Variable R:A->B->Prop. (** ** Constructive choice and description *) (** AC_rel *) Definition RelationalChoice_on := forall R:A->B->Prop, (forall x : A, exists y : B, R x y) -> (exists R' : A->B->Prop, subrelation R' R /\ forall x, exists! y, R' x y). (** AC_fun *) Definition FunctionalChoice_on := forall R:A->B->Prop, (forall x : A, exists y : B, R x y) -> (exists f : A->B, forall x : A, R x (f x)). (** DC_fun *) Definition FunctionalDependentChoice_on := forall (R:A->A->Prop), (forall x, exists y, R x y) -> forall x0, (exists f : nat -> A, f 0 = x0 /\ forall n, R (f n) (f (S n))). (** ACw_fun *) Definition FunctionalCountableChoice_on := forall (R:nat->A->Prop), (forall n, exists y, R n y) -> (exists f : nat -> A, forall n, R n (f n)). (** AC! or Functional Relation Reification (known as Axiom of Unique Choice in topos theory; also called principle of definite description *) Definition FunctionalRelReification_on := forall R:A->B->Prop, (forall x : A, exists! y : B, R x y) -> (exists f : A->B, forall x : A, R x (f x)). (** ID_epsilon (constructive version of indefinite description; combined with proof-irrelevance, it may be connected to Carlström's type theory with a constructive indefinite description operator) *) Definition ConstructiveIndefiniteDescription_on := forall P:A->Prop, (exists x, P x) -> { x:A | P x }. (** ID_iota (constructive version of definite description; combined with proof-irrelevance, it may be connected to Carlström's and Stenlund's type theory with a constructive definite description operator) *) Definition ConstructiveDefiniteDescription_on := forall P:A->Prop, (exists! x, P x) -> { x:A | P x }. (** ** Weakly classical choice and description *) (** GAC_rel *) Definition GuardedRelationalChoice_on := forall P : A->Prop, forall R : A->B->Prop, (forall x : A, P x -> exists y : B, R x y) -> (exists R' : A->B->Prop, subrelation R' R /\ forall x, P x -> exists! y, R' x y). (** GAC_fun *) Definition GuardedFunctionalChoice_on := forall P : A->Prop, forall R : A->B->Prop, inhabited B -> (forall x : A, P x -> exists y : B, R x y) -> (exists f : A->B, forall x, P x -> R x (f x)). (** GFR_fun *) Definition GuardedFunctionalRelReification_on := forall P : A->Prop, forall R : A->B->Prop, inhabited B -> (forall x : A, P x -> exists! y : B, R x y) -> (exists f : A->B, forall x : A, P x -> R x (f x)). (** OAC_rel *) Definition OmniscientRelationalChoice_on := forall R : A->B->Prop, exists R' : A->B->Prop, subrelation R' R /\ forall x : A, (exists y : B, R x y) -> exists! y, R' x y. (** OAC_fun *) Definition OmniscientFunctionalChoice_on := forall R : A->B->Prop, inhabited B -> exists f : A->B, forall x : A, (exists y : B, R x y) -> R x (f x). (** D_epsilon *) Definition EpsilonStatement_on := forall P:A->Prop, inhabited A -> { x:A | (exists x, P x) -> P x }. (** D_iota *) Definition IotaStatement_on := forall P:A->Prop, inhabited A -> { x:A | (exists! x, P x) -> P x }. End ChoiceSchemes. (** Generalized schemes *) Notation RelationalChoice := (forall A B, RelationalChoice_on A B). Notation FunctionalChoice := (forall A B, FunctionalChoice_on A B). Definition FunctionalDependentChoice := (forall A, FunctionalDependentChoice_on A). Definition FunctionalCountableChoice := (forall A, FunctionalCountableChoice_on A). Notation FunctionalChoiceOnInhabitedSet := (forall A B, inhabited B -> FunctionalChoice_on A B). Notation FunctionalRelReification := (forall A B, FunctionalRelReification_on A B). Notation GuardedRelationalChoice := (forall A B, GuardedRelationalChoice_on A B). Notation GuardedFunctionalChoice := (forall A B, GuardedFunctionalChoice_on A B). Notation GuardedFunctionalRelReification := (forall A B, GuardedFunctionalRelReification_on A B). Notation OmniscientRelationalChoice := (forall A B, OmniscientRelationalChoice_on A B). Notation OmniscientFunctionalChoice := (forall A B, OmniscientFunctionalChoice_on A B). Notation ConstructiveDefiniteDescription := (forall A, ConstructiveDefiniteDescription_on A). Notation ConstructiveIndefiniteDescription := (forall A, ConstructiveIndefiniteDescription_on A). Notation IotaStatement := (forall A, IotaStatement_on A). Notation EpsilonStatement := (forall A, EpsilonStatement_on A). (** Subclassical schemes *) Definition ProofIrrelevance := forall (A:Prop) (a1 a2:A), a1 = a2. Definition IndependenceOfGeneralPremises := forall (A:Type) (P:A -> Prop) (Q:Prop), inhabited A -> (Q -> exists x, P x) -> exists x, Q -> P x. Definition SmallDrinker'sParadox := forall (A:Type) (P:A -> Prop), inhabited A -> exists x, (exists x, P x) -> P x. (**********************************************************************) (** * AC_rel + AC! = AC_fun We show that the functional formulation of the axiom of Choice (usual formulation in type theory) is equivalent to its relational formulation (only formulation of set theory) + functional relation reification (aka axiom of unique choice, or, principle of (parametric) definite descriptions) *) (** This shows that the axiom of choice can be assumed (under its relational formulation) without known inconsistency with classical logic, though functional relation reification conflicts with classical logic *) Lemma description_rel_choice_imp_funct_choice : forall A B : Type, FunctionalRelReification_on A B -> RelationalChoice_on A B -> FunctionalChoice_on A B. Proof. intros A B Descr RelCh R H. destruct (RelCh R H) as (R',(HR'R,H0)). destruct (Descr R') as (f,Hf). firstorder. exists f; intro x. destruct (H0 x) as (y,(HR'xy,Huniq)). rewrite <- (Huniq (f x) (Hf x)). apply HR'R; assumption. Qed. Lemma funct_choice_imp_rel_choice : forall A B, FunctionalChoice_on A B -> RelationalChoice_on A B. Proof. intros A B FunCh R H. destruct (FunCh R H) as (f,H0). exists (fun x y => f x = y). split. intros x y Heq; rewrite <- Heq; trivial. intro x; exists (f x); split. reflexivity. trivial. Qed. Lemma funct_choice_imp_description : forall A B, FunctionalChoice_on A B -> FunctionalRelReification_on A B. Proof. intros A B FunCh R H. destruct (FunCh R) as [f H0]. (* 1 *) intro x. destruct (H x) as (y,(HRxy,_)). exists y; exact HRxy. (* 2 *) exists f; exact H0. Qed. Corollary FunChoice_Equiv_RelChoice_and_ParamDefinDescr : forall A B, FunctionalChoice_on A B <-> RelationalChoice_on A B /\ FunctionalRelReification_on A B. Proof. intros A B; split. intro H; split; [ exact (funct_choice_imp_rel_choice H) | exact (funct_choice_imp_description H) ]. intros [H H0]; exact (description_rel_choice_imp_funct_choice H0 H). Qed. (**********************************************************************) (** * Connection between the guarded, non guarded and omniscient choices *) (** We show that the guarded formulations of the axiom of choice are equivalent to their "omniscient" variant and comes from the non guarded formulation in presence either of the independance of general premises or subset types (themselves derivable from subtypes thanks to proof- irrelevance) *) (**********************************************************************) (** ** AC_rel + PI -> GAC_rel and AC_rel + IGP -> GAC_rel and GAC_rel = OAC_rel *) Lemma rel_choice_and_proof_irrel_imp_guarded_rel_choice : RelationalChoice -> ProofIrrelevance -> GuardedRelationalChoice. Proof. intros rel_choice proof_irrel. red; intros A B P R H. destruct (rel_choice _ _ (fun (x:sigT P) (y:B) => R (projT1 x) y)) as (R',(HR'R,H0)). intros (x,HPx). destruct (H x HPx) as (y,HRxy). exists y; exact HRxy. set (R'' := fun (x:A) (y:B) => exists H : P x, R' (existT P x H) y). exists R''; split. intros x y (HPx,HR'xy). change x with (projT1 (existT P x HPx)); apply HR'R; exact HR'xy. intros x HPx. destruct (H0 (existT P x HPx)) as (y,(HR'xy,Huniq)). exists y; split. exists HPx; exact HR'xy. intros y' (H'Px,HR'xy'). apply Huniq. rewrite proof_irrel with (a1 := HPx) (a2 := H'Px); exact HR'xy'. Qed. Lemma rel_choice_indep_of_general_premises_imp_guarded_rel_choice : forall A B, inhabited B -> RelationalChoice_on A B -> IndependenceOfGeneralPremises -> GuardedRelationalChoice_on A B. Proof. intros A B Inh AC_rel IndPrem P R H. destruct (AC_rel (fun x y => P x -> R x y)) as (R',(HR'R,H0)). intro x. apply IndPrem. exact Inh. intro Hx. apply H; assumption. exists (fun x y => P x /\ R' x y). firstorder. Qed. Lemma guarded_rel_choice_imp_rel_choice : forall A B, GuardedRelationalChoice_on A B -> RelationalChoice_on A B. Proof. intros A B GAC_rel R H. destruct (GAC_rel (fun _ => True) R) as (R',(HR'R,H0)). firstorder. exists R'; firstorder. Qed. Lemma subset_types_imp_guarded_rel_choice_iff_rel_choice : ProofIrrelevance -> (GuardedRelationalChoice <-> RelationalChoice). Proof. intuition auto using guarded_rel_choice_imp_rel_choice, rel_choice_and_proof_irrel_imp_guarded_rel_choice. Qed. (** OAC_rel = GAC_rel *) Corollary guarded_iff_omniscient_rel_choice : GuardedRelationalChoice <-> OmniscientRelationalChoice. Proof. split. intros GAC_rel A B R. apply (GAC_rel A B (fun x => exists y, R x y) R); auto. intros OAC_rel A B P R H. destruct (OAC_rel A B R) as (f,Hf); exists f; firstorder. Qed. (**********************************************************************) (** ** AC_fun + IGP = GAC_fun = OAC_fun = AC_fun + Drinker *) (** AC_fun + IGP = GAC_fun *) Lemma guarded_fun_choice_imp_indep_of_general_premises : GuardedFunctionalChoice -> IndependenceOfGeneralPremises. Proof. intros GAC_fun A P Q Inh H. destruct (GAC_fun unit A (fun _ => Q) (fun _ => P) Inh) as (f,Hf). tauto. exists (f tt); auto. Qed. Lemma guarded_fun_choice_imp_fun_choice : GuardedFunctionalChoice -> FunctionalChoiceOnInhabitedSet. Proof. intros GAC_fun A B Inh R H. destruct (GAC_fun A B (fun _ => True) R Inh) as (f,Hf). firstorder. exists f; auto. Qed. Lemma fun_choice_and_indep_general_prem_imp_guarded_fun_choice : FunctionalChoiceOnInhabitedSet -> IndependenceOfGeneralPremises -> GuardedFunctionalChoice. Proof. intros AC_fun IndPrem A B P R Inh H. apply (AC_fun A B Inh (fun x y => P x -> R x y)). intro x; apply IndPrem; eauto. Qed. Corollary fun_choice_and_indep_general_prem_iff_guarded_fun_choice : FunctionalChoiceOnInhabitedSet /\ IndependenceOfGeneralPremises <-> GuardedFunctionalChoice. Proof. intuition auto using guarded_fun_choice_imp_indep_of_general_premises, guarded_fun_choice_imp_fun_choice, fun_choice_and_indep_general_prem_imp_guarded_fun_choice. Qed. (** AC_fun + Drinker = OAC_fun *) (** This was already observed by Bell [[Bell]] *) Lemma omniscient_fun_choice_imp_small_drinker : OmniscientFunctionalChoice -> SmallDrinker'sParadox. Proof. intros OAC_fun A P Inh. destruct (OAC_fun unit A (fun _ => P)) as (f,Hf). auto. exists (f tt); firstorder. Qed. Lemma omniscient_fun_choice_imp_fun_choice : OmniscientFunctionalChoice -> FunctionalChoiceOnInhabitedSet. Proof. intros OAC_fun A B Inh R H. destruct (OAC_fun A B R Inh) as (f,Hf). exists f; firstorder. Qed. Lemma fun_choice_and_small_drinker_imp_omniscient_fun_choice : FunctionalChoiceOnInhabitedSet -> SmallDrinker'sParadox -> OmniscientFunctionalChoice. Proof. intros AC_fun Drinker A B R Inh. destruct (AC_fun A B Inh (fun x y => (exists y, R x y) -> R x y)) as (f,Hf). intro x; apply (Drinker B (R x) Inh). exists f; assumption. Qed. Corollary fun_choice_and_small_drinker_iff_omniscient_fun_choice : FunctionalChoiceOnInhabitedSet /\ SmallDrinker'sParadox <-> OmniscientFunctionalChoice. Proof. intuition auto using omniscient_fun_choice_imp_small_drinker, omniscient_fun_choice_imp_fun_choice, fun_choice_and_small_drinker_imp_omniscient_fun_choice. Qed. (** OAC_fun = GAC_fun *) (** This is derivable from the intuitionistic equivalence between IGP and Drinker but we give a direct proof *) Theorem guarded_iff_omniscient_fun_choice : GuardedFunctionalChoice <-> OmniscientFunctionalChoice. Proof. split. intros GAC_fun A B R Inh. apply (GAC_fun A B (fun x => exists y, R x y) R); auto. intros OAC_fun A B P R Inh H. destruct (OAC_fun A B R Inh) as (f,Hf). exists f; firstorder. Qed. (**********************************************************************) (** ** D_iota -> ID_iota and D_epsilon <-> ID_epsilon + Drinker *) (** D_iota -> ID_iota *) Lemma iota_imp_constructive_definite_description : IotaStatement -> ConstructiveDefiniteDescription. Proof. intros D_iota A P H. destruct D_iota with (P:=P) as (x,H1). destruct H; red in H; auto. exists x; apply H1; assumption. Qed. (** ID_epsilon + Drinker <-> D_epsilon *) Lemma epsilon_imp_constructive_indefinite_description: EpsilonStatement -> ConstructiveIndefiniteDescription. Proof. intros D_epsilon A P H. destruct D_epsilon with (P:=P) as (x,H1). destruct H; auto. exists x; apply H1; assumption. Qed. Lemma constructive_indefinite_description_and_small_drinker_imp_epsilon : SmallDrinker'sParadox -> ConstructiveIndefiniteDescription -> EpsilonStatement. Proof. intros Drinkers D_epsilon A P Inh; apply D_epsilon; apply Drinkers; assumption. Qed. Lemma epsilon_imp_small_drinker : EpsilonStatement -> SmallDrinker'sParadox. Proof. intros D_epsilon A P Inh; edestruct D_epsilon; eauto. Qed. Theorem constructive_indefinite_description_and_small_drinker_iff_epsilon : (SmallDrinker'sParadox * ConstructiveIndefiniteDescription -> EpsilonStatement) * (EpsilonStatement -> SmallDrinker'sParadox * ConstructiveIndefiniteDescription). Proof. intuition auto using epsilon_imp_constructive_indefinite_description, constructive_indefinite_description_and_small_drinker_imp_epsilon, epsilon_imp_small_drinker. Qed. (**********************************************************************) (** * Derivability of choice for decidable relations with well-ordered codomain *) (** Countable codomains, such as [nat], can be equipped with a well-order, which implies the existence of a least element on inhabited decidable subsets. As a consequence, the relational form of the axiom of choice is derivable on [nat] for decidable relations. We show instead that functional relation reification and the functional form of the axiom of choice are equivalent on decidable relation with [nat] as codomain *) Require Import Wf_nat. Require Import Decidable. Definition FunctionalChoice_on_rel (A B:Type) (R:A->B->Prop) := (forall x:A, exists y : B, R x y) -> exists f : A -> B, (forall x:A, R x (f x)). Lemma classical_denumerable_description_imp_fun_choice : forall A:Type, FunctionalRelReification_on A nat -> forall R:A->nat->Prop, (forall x y, decidable (R x y)) -> FunctionalChoice_on_rel R. Proof. intros A Descr. red; intros R Rdec H. set (R':= fun x y => R x y /\ forall y', R x y' -> y <= y'). destruct (Descr R') as (f,Hf). intro x. apply (dec_inh_nat_subset_has_unique_least_element (R x)). apply Rdec. apply (H x). exists f. intros x. destruct (Hf x) as (Hfx,_). assumption. Qed. (**********************************************************************) (** * Choice on dependent and non dependent function types are equivalent *) (** ** Choice on dependent and non dependent function types are equivalent *) Definition DependentFunctionalChoice_on (A:Type) (B:A -> Type) := forall R:forall x:A, B x -> Prop, (forall x:A, exists y : B x, R x y) -> (exists f : (forall x:A, B x), forall x:A, R x (f x)). Notation DependentFunctionalChoice := (forall A (B:A->Type), DependentFunctionalChoice_on B). (** The easy part *) Theorem dep_non_dep_functional_choice : DependentFunctionalChoice -> FunctionalChoice. Proof. intros AC_depfun A B R H. destruct (AC_depfun A (fun _ => B) R H) as (f,Hf). exists f; trivial. Qed. (** Deriving choice on product types requires some computation on singleton propositional types, so we need computational conjunction projections and dependent elimination of conjunction and equality *) Scheme and_indd := Induction for and Sort Prop. Scheme eq_indd := Induction for eq Sort Prop. Definition proj1_inf (A B:Prop) (p : A/\B) := let (a,b) := p in a. Theorem non_dep_dep_functional_choice : FunctionalChoice -> DependentFunctionalChoice. Proof. intros AC_fun A B R H. pose (B' := { x:A & B x }). pose (R' := fun (x:A) (y:B') => projT1 y = x /\ R (projT1 y) (projT2 y)). destruct (AC_fun A B' R') as (f,Hf). intros x. destruct (H x) as (y,Hy). exists (existT (fun x => B x) x y). split; trivial. exists (fun x => eq_rect _ _ (projT2 (f x)) _ (proj1_inf (Hf x))). intro x; destruct (Hf x) as (Heq,HR) using and_indd. destruct (f x); simpl in *. destruct Heq using eq_indd; trivial. Qed. (** ** Reification of dependent and non dependent functional relation are equivalent *) Definition DependentFunctionalRelReification_on (A:Type) (B:A -> Type) := forall (R:forall x:A, B x -> Prop), (forall x:A, exists! y : B x, R x y) -> (exists f : (forall x:A, B x), forall x:A, R x (f x)). Notation DependentFunctionalRelReification := (forall A (B:A->Type), DependentFunctionalRelReification_on B). (** The easy part *) Theorem dep_non_dep_functional_rel_reification : DependentFunctionalRelReification -> FunctionalRelReification. Proof. intros DepFunReify A B R H. destruct (DepFunReify A (fun _ => B) R H) as (f,Hf). exists f; trivial. Qed. (** Deriving choice on product types requires some computation on singleton propositional types, so we need computational conjunction projections and dependent elimination of conjunction and equality *) Theorem non_dep_dep_functional_rel_reification : FunctionalRelReification -> DependentFunctionalRelReification. Proof. intros AC_fun A B R H. pose (B' := { x:A & B x }). pose (R' := fun (x:A) (y:B') => projT1 y = x /\ R (projT1 y) (projT2 y)). destruct (AC_fun A B' R') as (f,Hf). intros x. destruct (H x) as (y,(Hy,Huni)). exists (existT (fun x => B x) x y). repeat split; trivial. intros (x',y') (Heqx',Hy'). simpl in *. destruct Heqx'. rewrite (Huni y'); trivial. exists (fun x => eq_rect _ _ (projT2 (f x)) _ (proj1_inf (Hf x))). intro x; destruct (Hf x) as (Heq,HR) using and_indd. destruct (f x); simpl in *. destruct Heq using eq_indd; trivial. Qed. Corollary dep_iff_non_dep_functional_rel_reification : FunctionalRelReification <-> DependentFunctionalRelReification. Proof. intuition auto using non_dep_dep_functional_rel_reification, dep_non_dep_functional_rel_reification. Qed. (**********************************************************************) (** * Non contradiction of constructive descriptions wrt functional axioms of choice *) (** ** Non contradiction of indefinite description *) Lemma relative_non_contradiction_of_indefinite_descr : forall C:Prop, (ConstructiveIndefiniteDescription -> C) -> (FunctionalChoice -> C). Proof. intros C H AC_fun. assert (AC_depfun := non_dep_dep_functional_choice AC_fun). pose (A0 := { A:Type & { P:A->Prop & exists x, P x }}). pose (B0 := fun x:A0 => projT1 x). pose (R0 := fun x:A0 => fun y:B0 x => projT1 (projT2 x) y). pose (H0 := fun x:A0 => projT2 (projT2 x)). destruct (AC_depfun A0 B0 R0 H0) as (f, Hf). apply H. intros A P H'. exists (f (existT _ A (existT _ P H'))). pose (Hf' := Hf (existT _ A (existT _ P H'))). assumption. Qed. Lemma constructive_indefinite_descr_fun_choice : ConstructiveIndefiniteDescription -> FunctionalChoice. Proof. intros IndefDescr A B R H. exists (fun x => proj1_sig (IndefDescr B (R x) (H x))). intro x. apply (proj2_sig (IndefDescr B (R x) (H x))). Qed. (** ** Non contradiction of definite description *) Lemma relative_non_contradiction_of_definite_descr : forall C:Prop, (ConstructiveDefiniteDescription -> C) -> (FunctionalRelReification -> C). Proof. intros C H FunReify. assert (DepFunReify := non_dep_dep_functional_rel_reification FunReify). pose (A0 := { A:Type & { P:A->Prop & exists! x, P x }}). pose (B0 := fun x:A0 => projT1 x). pose (R0 := fun x:A0 => fun y:B0 x => projT1 (projT2 x) y). pose (H0 := fun x:A0 => projT2 (projT2 x)). destruct (DepFunReify A0 B0 R0 H0) as (f, Hf). apply H. intros A P H'. exists (f (existT _ A (existT _ P H'))). pose (Hf' := Hf (existT _ A (existT _ P H'))). assumption. Qed. Lemma constructive_definite_descr_fun_reification : ConstructiveDefiniteDescription -> FunctionalRelReification. Proof. intros DefDescr A B R H. exists (fun x => proj1_sig (DefDescr B (R x) (H x))). intro x. apply (proj2_sig (DefDescr B (R x) (H x))). Qed. (** Remark, the following corollaries morally hold: Definition In_propositional_context (A:Type) := forall C:Prop, (A -> C) -> C. Corollary constructive_definite_descr_in_prop_context_iff_fun_reification : In_propositional_context ConstructiveIndefiniteDescription <-> FunctionalChoice. Corollary constructive_definite_descr_in_prop_context_iff_fun_reification : In_propositional_context ConstructiveDefiniteDescription <-> FunctionalRelReification. but expecting [FunctionalChoice] (resp. [FunctionalRelReification]) to be applied on the same Type universes on both sides of the first (resp. second) equivalence breaks the stratification of universes. *) (**********************************************************************) (** * Excluded-middle + definite description => computational excluded-middle *) (** The idea for the following proof comes from [[ChicliPottierSimpson02]] *) (** Classical logic and axiom of unique choice (i.e. functional relation reification), as shown in [[ChicliPottierSimpson02]], implies the double-negation of excluded-middle in [Set] (which is incompatible with the impredicativity of [Set]). We adapt the proof to show that constructive definite description transports excluded-middle from [Prop] to [Set]. [[ChicliPottierSimpson02]] Laurent Chicli, Loïc Pottier, Carlos Simpson, Mathematical Quotients and Quotient Types in Coq, Proceedings of TYPES 2002, Lecture Notes in Computer Science 2646, Springer Verlag. *) Require Import Setoid. Theorem constructive_definite_descr_excluded_middle : ConstructiveDefiniteDescription -> (forall P:Prop, P \/ ~ P) -> (forall P:Prop, {P} + {~ P}). Proof. intros Descr EM P. pose (select := fun b:bool => if b then P else ~P). assert { b:bool | select b } as ([|],HP). apply Descr. rewrite <- unique_existence; split. destruct (EM P). exists true; trivial. exists false; trivial. intros [|] [|] H1 H2; simpl in *; reflexivity || contradiction. left; trivial. right; trivial. Qed. Corollary fun_reification_descr_computational_excluded_middle_in_prop_context : FunctionalRelReification -> (forall P:Prop, P \/ ~ P) -> forall C:Prop, ((forall P:Prop, {P} + {~ P}) -> C) -> C. Proof. intros FunReify EM C H. apply relative_non_contradiction_of_definite_descr; trivial. auto using constructive_definite_descr_excluded_middle. Qed. (**********************************************************************) (** * Choice => Dependent choice => Countable choice *) (* The implications below are standard *) Require Import Arith. Theorem functional_choice_imp_functional_dependent_choice : FunctionalChoice -> FunctionalDependentChoice. Proof. intros FunChoice A R HRfun x0. apply FunChoice in HRfun as (g,Rg). set (f:=fix f n := match n with 0 => x0 | S n' => g (f n') end). exists f; firstorder. Qed. Theorem functional_dependent_choice_imp_functional_countable_choice : FunctionalDependentChoice -> FunctionalCountableChoice. Proof. intros H A R H0. set (R' (p q:nat*A) := fst q = S (fst p) /\ R (fst p) (snd q)). destruct (H0 0) as (y0,Hy0). destruct H with (R:=R') (x0:=(0,y0)) as (f,(Hf0,HfS)). intro x; destruct (H0 (fst x)) as (y,Hy). exists (S (fst x),y). red. auto. assert (Heq:forall n, fst (f n) = n). induction n. rewrite Hf0; reflexivity. specialize HfS with n; destruct HfS as (->,_); congruence. exists (fun n => snd (f (S n))). intro n'. specialize HfS with n'. destruct HfS as (_,HR). rewrite Heq in HR. assumption. Qed. coq-8.4pl2/theories/Logic/Diaconescu.v0000640000175000001440000002266712010532755016767 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* AC_ext, Mathematical Logic Quaterly, vol 50(3), pp 236-240, 2004. *) (**********************************************************************) (** * Pred. Ext. + Rel. Axiom of Choice -> Excluded-Middle *) Section PredExt_RelChoice_imp_EM. (** The axiom of extensionality for predicates *) Definition PredicateExtensionality := forall P Q:bool -> Prop, (forall b:bool, P b <-> Q b) -> P = Q. (** From predicate extensionality we get propositional extensionality hence proof-irrelevance *) Require Import ClassicalFacts. Variable pred_extensionality : PredicateExtensionality. Lemma prop_ext : forall A B:Prop, (A <-> B) -> A = B. Proof. intros A B H. change ((fun _ => A) true = (fun _ => B) true). rewrite pred_extensionality with (P := fun _:bool => A) (Q := fun _:bool => B). reflexivity. intros _; exact H. Qed. Lemma proof_irrel : forall (A:Prop) (a1 a2:A), a1 = a2. Proof. apply (ext_prop_dep_proof_irrel_cic prop_ext). Qed. (** From proof-irrelevance and relational choice, we get guarded relational choice *) Require Import ChoiceFacts. Variable rel_choice : RelationalChoice. Lemma guarded_rel_choice : GuardedRelationalChoice. Proof. apply (rel_choice_and_proof_irrel_imp_guarded_rel_choice rel_choice proof_irrel). Qed. (** The form of choice we need: there is a functional relation which chooses an element in any non empty subset of bool *) Require Import Bool. Lemma AC_bool_subset_to_bool : exists R : (bool -> Prop) -> bool -> Prop, (forall P:bool -> Prop, (exists b : bool, P b) -> exists b : bool, P b /\ R P b /\ (forall b':bool, R P b' -> b = b')). Proof. destruct (guarded_rel_choice _ _ (fun Q:bool -> Prop => exists y : _, Q y) (fun (Q:bool -> Prop) (y:bool) => Q y)) as (R,(HRsub,HR)). exact (fun _ H => H). exists R; intros P HP. destruct (HR P HP) as (y,(Hy,Huni)). exists y; firstorder. Qed. (** The proof of the excluded middle *) (** Remark: P could have been in Set or Type *) Theorem pred_ext_and_rel_choice_imp_EM : forall P:Prop, P \/ ~ P. Proof. intro P. (** first we exhibit the choice functional relation R *) destruct AC_bool_subset_to_bool as [R H]. set (class_of_true := fun b => b = true \/ P). set (class_of_false := fun b => b = false \/ P). (** the actual "decision": is (R class_of_true) = true or false? *) destruct (H class_of_true) as [b0 [H0 [H0' H0'']]]. exists true; left; reflexivity. destruct H0. (** the actual "decision": is (R class_of_false) = true or false? *) destruct (H class_of_false) as [b1 [H1 [H1' H1'']]]. exists false; left; reflexivity. destruct H1. (** case where P is false: (R class_of_true)=true /\ (R class_of_false)=false *) right. intro HP. assert (Hequiv : forall b:bool, class_of_true b <-> class_of_false b). intro b; split. unfold class_of_false; right; assumption. unfold class_of_true; right; assumption. assert (Heq : class_of_true = class_of_false). apply pred_extensionality with (1 := Hequiv). apply diff_true_false. rewrite <- H0. rewrite <- H1. rewrite <- H0''. reflexivity. rewrite Heq. assumption. (** cases where P is true *) left; assumption. left; assumption. Qed. End PredExt_RelChoice_imp_EM. (**********************************************************************) (** * B. Proof-Irrel. + Rel. Axiom of Choice -> Excl.-Middle for Equality *) (** This is an adaptation of Diaconescu's theorem, exploiting the form of extensionality provided by proof-irrelevance *) Section ProofIrrel_RelChoice_imp_EqEM. Variable rel_choice : RelationalChoice. Variable proof_irrelevance : forall P:Prop , forall x y:P, x=y. (** Let [a1] and [a2] be two elements in some type [A] *) Variable A :Type. Variables a1 a2 : A. (** We build the subset [A'] of [A] made of [a1] and [a2] *) Definition A' := sigT (fun x => x=a1 \/ x=a2). Definition a1':A'. exists a1 ; auto. Defined. Definition a2':A'. exists a2 ; auto. Defined. (** By proof-irrelevance, projection is a retraction *) Lemma projT1_injective : a1=a2 -> a1'=a2'. Proof. intro Heq ; unfold a1', a2', A'. rewrite Heq. replace (or_introl (a2=a2) (eq_refl a2)) with (or_intror (a2=a2) (eq_refl a2)). reflexivity. apply proof_irrelevance. Qed. (** But from the actual proofs of being in [A'], we can assert in the proof-irrelevant world the existence of relevant boolean witnesses *) Lemma decide : forall x:A', exists y:bool , (projT1 x = a1 /\ y = true ) \/ (projT1 x = a2 /\ y = false). Proof. intros [a [Ha1|Ha2]]; [exists true | exists false]; auto. Qed. (** Thanks to the axiom of choice, the boolean witnesses move from the propositional world to the relevant world *) Theorem proof_irrel_rel_choice_imp_eq_dec : a1=a2 \/ ~a1=a2. Proof. destruct (rel_choice A' bool (fun x y => projT1 x = a1 /\ y = true \/ projT1 x = a2 /\ y = false)) as (R,(HRsub,HR)). apply decide. destruct (HR a1') as (b1,(Ha1'b1,_Huni1)). destruct (HRsub a1' b1 Ha1'b1) as [(_, Hb1true)|(Ha1a2, _Hb1false)]. destruct (HR a2') as (b2,(Ha2'b2,Huni2)). destruct (HRsub a2' b2 Ha2'b2) as [(Ha2a1, _Hb2true)|(_, Hb2false)]. left; symmetry; assumption. right; intro H. subst b1; subst b2. rewrite (projT1_injective H) in Ha1'b1. assert (false = true) by auto using Huni2. discriminate. left; assumption. Qed. (** An alternative more concise proof can be done by directly using the guarded relational choice *) Declare Implicit Tactic auto. Lemma proof_irrel_rel_choice_imp_eq_dec' : a1=a2 \/ ~a1=a2. Proof. assert (decide: forall x:A, x=a1 \/ x=a2 -> exists y:bool, x=a1 /\ y=true \/ x=a2 /\ y=false). intros a [Ha1|Ha2]; [exists true | exists false]; auto. assert (guarded_rel_choice := rel_choice_and_proof_irrel_imp_guarded_rel_choice rel_choice proof_irrelevance). destruct (guarded_rel_choice A bool (fun x => x=a1 \/ x=a2) (fun x y => x=a1 /\ y=true \/ x=a2 /\ y=false)) as (R,(HRsub,HR)). apply decide. destruct (HR a1) as (b1,(Ha1b1,_Huni1)). left; reflexivity. destruct (HRsub a1 b1 Ha1b1) as [(_, Hb1true)|(Ha1a2, _Hb1false)]. destruct (HR a2) as (b2,(Ha2b2,Huni2)). right; reflexivity. destruct (HRsub a2 b2 Ha2b2) as [(Ha2a1, _Hb2true)|(_, Hb2false)]. left; symmetry; assumption. right; intro H. subst b1; subst b2; subst a1. assert (false = true) by auto using Huni2, Ha1b1. discriminate. left; assumption. Qed. End ProofIrrel_RelChoice_imp_EqEM. (**********************************************************************) (** * Extensional Hilbert's epsilon description operator -> Excluded-Middle *) (** Proof sketch from Bell [Bell93] (with thanks to P. Castéran) *) Local Notation inhabited A := A (only parsing). Section ExtensionalEpsilon_imp_EM. Variable epsilon : forall A : Type, inhabited A -> (A -> Prop) -> A. Hypothesis epsilon_spec : forall (A:Type) (i:inhabited A) (P:A->Prop), (exists x, P x) -> P (epsilon A i P). Hypothesis epsilon_extensionality : forall (A:Type) (i:inhabited A) (P Q:A->Prop), (forall a, P a <-> Q a) -> epsilon A i P = epsilon A i Q. Local Notation eps := (epsilon bool true) (only parsing). Theorem extensional_epsilon_imp_EM : forall P:Prop, P \/ ~ P. Proof. intro P. pose (B := fun y => y=false \/ P). pose (C := fun y => y=true \/ P). assert (B (eps B)) as [Hfalse|HP] by (apply epsilon_spec; exists false; left; reflexivity). assert (C (eps C)) as [Htrue|HP] by (apply epsilon_spec; exists true; left; reflexivity). right; intro HP. assert (forall y, B y <-> C y) by (intro y; split; intro; right; assumption). rewrite epsilon_extensionality with (1:=H) in Hfalse. rewrite Htrue in Hfalse. discriminate. auto. auto. Qed. End ExtensionalEpsilon_imp_EM. coq-8.4pl2/theories/Logic/vo.itarget0000640000175000001440000000075011307752066016524 0ustar notinusersBerardi.vo ChoiceFacts.vo ClassicalChoice.vo ClassicalDescription.vo ClassicalEpsilon.vo ClassicalFacts.vo Classical_Pred_Set.vo Classical_Pred_Type.vo Classical_Prop.vo Classical_Type.vo ClassicalUniqueChoice.vo Classical.vo ConstructiveEpsilon.vo Decidable.vo Description.vo Diaconescu.vo Epsilon.vo Eqdep_dec.vo EqdepFacts.vo Eqdep.vo FunctionalExtensionality.vo Hurkens.vo IndefiniteDescription.vo JMeq.vo ProofIrrelevanceFacts.vo ProofIrrelevance.vo RelationalChoice.vo SetIsType.vo coq-8.4pl2/theories/Logic/Hurkens.v0000640000175000001440000000506412010532755016321 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool. Variable b2p : bool -> Prop. Hypothesis p2p1 : forall A:Prop, b2p (p2b A) -> A. Hypothesis p2p2 : forall A:Prop, A -> b2p (p2b A). Variable B : Prop. Definition V := forall A:Prop, ((A -> bool) -> A -> bool) -> A -> bool. Definition U := V -> bool. Definition sb (z:V) : V := fun A r a => r (z A r) a. Definition le (i:U -> bool) (x:U) : bool := x (fun A r a => i (fun v => sb v A r a)). Definition induct (i:U -> bool) : Prop := forall x:U, b2p (le i x) -> b2p (i x). Definition WF : U := fun z => p2b (induct (z U le)). Definition I (x:U) : Prop := (forall i:U -> bool, b2p (le i x) -> b2p (i (fun v => sb v U le x))) -> B. Lemma Omega : forall i:U -> bool, induct i -> b2p (i WF). Proof. intros i y. apply y. unfold le, WF, induct. apply p2p2. intros x H0. apply y. exact H0. Qed. Lemma lemma1 : induct (fun u => p2b (I u)). Proof. unfold induct. intros x p. apply (p2p2 (I x)). intro q. apply (p2p1 (I (fun v:V => sb v U le x)) (q (fun u => p2b (I u)) p)). intro i. apply q with (i := fun y => i (fun v:V => sb v U le y)). Qed. Lemma lemma2 : (forall i:U -> bool, induct i -> b2p (i WF)) -> B. Proof. intro x. apply (p2p1 (I WF) (x (fun u => p2b (I u)) lemma1)). intros i H0. apply (x (fun y => i (fun v => sb v U le y))). apply (p2p1 _ H0). Qed. Theorem paradox : B. Proof. exact (lemma2 Omega). Qed. End Paradox. coq-8.4pl2/theories/Logic/EqdepFacts.v0000640000175000001440000002726612010532755016731 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Eq_dep_eq <-> UIP <-> UIP_refl <-> K 3. Definition of the functor that builds properties of dependent equalities assuming axiom eq_rect_eq *) (************************************************************************) (** * Definition of dependent equality and equivalence with equality of dependent pairs *) Import EqNotations. Section Dependent_Equality. Variable U : Type. Variable P : U -> Type. (** Dependent equality *) Inductive eq_dep (p:U) (x:P p) : forall q:U, P q -> Prop := eq_dep_intro : eq_dep p x p x. Hint Constructors eq_dep: core. Lemma eq_dep_refl : forall (p:U) (x:P p), eq_dep p x p x. Proof eq_dep_intro. Lemma eq_dep_sym : forall (p q:U) (x:P p) (y:P q), eq_dep p x q y -> eq_dep q y p x. Proof. destruct 1; auto. Qed. Hint Immediate eq_dep_sym: core. Lemma eq_dep_trans : forall (p q r:U) (x:P p) (y:P q) (z:P r), eq_dep p x q y -> eq_dep q y r z -> eq_dep p x r z. Proof. destruct 1; auto. Qed. Scheme eq_indd := Induction for eq Sort Prop. (** Equivalent definition of dependent equality as a dependent pair of equalities *) Inductive eq_dep1 (p:U) (x:P p) (q:U) (y:P q) : Prop := eq_dep1_intro : forall h:q = p, x = rew h in y -> eq_dep1 p x q y. Lemma eq_dep1_dep : forall (p:U) (x:P p) (q:U) (y:P q), eq_dep1 p x q y -> eq_dep p x q y. Proof. destruct 1 as (eq_qp, H). destruct eq_qp using eq_indd. rewrite H. apply eq_dep_intro. Qed. Lemma eq_dep_dep1 : forall (p q:U) (x:P p) (y:P q), eq_dep p x q y -> eq_dep1 p x q y. Proof. destruct 1. apply eq_dep1_intro with (eq_refl p). simpl; trivial. Qed. End Dependent_Equality. Arguments eq_dep [U P] p x q _. Arguments eq_dep1 [U P] p x q y. (** Dependent equality is equivalent to equality on dependent pairs *) Lemma eq_sigT_eq_dep : forall (U:Type) (P:U -> Type) (p q:U) (x:P p) (y:P q), existT P p x = existT P q y -> eq_dep p x q y. Proof. intros. dependent rewrite H. apply eq_dep_intro. Qed. Notation eq_sigS_eq_dep := eq_sigT_eq_dep (compat "8.2"). (* Compatibility *) Lemma eq_dep_eq_sigT : forall (U:Type) (P:U -> Type) (p q:U) (x:P p) (y:P q), eq_dep p x q y -> existT P p x = existT P q y. Proof. destruct 1; reflexivity. Qed. Lemma eq_sigT_iff_eq_dep : forall (U:Type) (P:U -> Type) (p q:U) (x:P p) (y:P q), existT P p x = existT P q y <-> eq_dep p x q y. Proof. split; auto using eq_sigT_eq_dep, eq_dep_eq_sigT. Qed. Notation equiv_eqex_eqdep := eq_sigT_iff_eq_dep (only parsing). (* Compat *) Lemma eq_sig_eq_dep : forall (U:Prop) (P:U -> Prop) (p q:U) (x:P p) (y:P q), exist P p x = exist P q y -> eq_dep p x q y. Proof. intros. dependent rewrite H. apply eq_dep_intro. Qed. Lemma eq_dep_eq_sig : forall (U:Prop) (P:U -> Prop) (p q:U) (x:P p) (y:P q), eq_dep p x q y -> exist P p x = exist P q y. Proof. destruct 1; reflexivity. Qed. Lemma eq_sig_iff_eq_dep : forall (U:Prop) (P:U -> Prop) (p q:U) (x:P p) (y:P q), exist P p x = exist P q y <-> eq_dep p x q y. Proof. split; auto using eq_sig_eq_dep, eq_dep_eq_sig. Qed. (** Dependent equality is equivalent to a dependent pair of equalities *) Set Implicit Arguments. Lemma eq_sigT_sig_eq : forall X P (x1 x2:X) H1 H2, existT P x1 H1 = existT P x2 H2 <-> {H:x1=x2 | rew H in H1 = H2}. Proof. intros; split; intro H. - change x2 with (projT1 (existT P x2 H2)). change H2 with (projT2 (existT P x2 H2)) at 5. destruct H. simpl. exists eq_refl. reflexivity. - destruct H as (->,<-). reflexivity. Defined. Lemma eq_sigT_fst : forall X P (x1 x2:X) H1 H2 (H:existT P x1 H1 = existT P x2 H2), x1 = x2. Proof. intros. change x2 with (projT1 (existT P x2 H2)). destruct H. reflexivity. Defined. Lemma eq_sigT_snd : forall X P (x1 x2:X) H1 H2 (H:existT P x1 H1 = existT P x2 H2), rew (eq_sigT_fst H) in H1 = H2. Proof. intros. unfold eq_sigT_fst. change x2 with (projT1 (existT P x2 H2)). change H2 with (projT2 (existT P x2 H2)) at 3. destruct H. reflexivity. Defined. Lemma eq_sig_fst : forall X P (x1 x2:X) H1 H2 (H:exist P x1 H1 = exist P x2 H2), x1 = x2. Proof. intros. change x2 with (proj1_sig (exist P x2 H2)). destruct H. reflexivity. Defined. Lemma eq_sig_snd : forall X P (x1 x2:X) H1 H2 (H:exist P x1 H1 = exist P x2 H2), rew (eq_sig_fst H) in H1 = H2. Proof. intros. unfold eq_sig_fst, eq_ind. change x2 with (proj1_sig (exist P x2 H2)). change H2 with (proj2_sig (exist P x2 H2)) at 3. destruct H. reflexivity. Defined. Unset Implicit Arguments. (** Exported hints *) Hint Resolve eq_dep_intro: core. Hint Immediate eq_dep_sym: core. (************************************************************************) (** * Eq_rect_eq <-> Eq_dep_eq <-> UIP <-> UIP_refl <-> K *) Section Equivalences. Variable U:Type. (** Invariance by Substitution of Reflexive Equality Proofs *) Definition Eq_rect_eq := forall (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. (** Injectivity of Dependent Equality *) Definition Eq_dep_eq := forall (P:U->Type) (p:U) (x y:P p), eq_dep p x p y -> x = y. (** Uniqueness of Identity Proofs (UIP) *) Definition UIP_ := forall (x y:U) (p1 p2:x = y), p1 = p2. (** Uniqueness of Reflexive Identity Proofs *) Definition UIP_refl_ := forall (x:U) (p:x = x), p = eq_refl x. (** Streicher's axiom K *) Definition Streicher_K_ := forall (x:U) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. (** Injectivity of Dependent Equality is a consequence of *) (** Invariance by Substitution of Reflexive Equality Proof *) Lemma eq_rect_eq__eq_dep1_eq : Eq_rect_eq -> forall (P:U->Type) (p:U) (x y:P p), eq_dep1 p x p y -> x = y. Proof. intro eq_rect_eq. simple destruct 1; intro. rewrite <- eq_rect_eq; auto. Qed. Lemma eq_rect_eq__eq_dep_eq : Eq_rect_eq -> Eq_dep_eq. Proof. intros eq_rect_eq; red; intros. apply (eq_rect_eq__eq_dep1_eq eq_rect_eq); apply eq_dep_dep1; trivial. Qed. (** Uniqueness of Identity Proofs (UIP) is a consequence of *) (** Injectivity of Dependent Equality *) Lemma eq_dep_eq__UIP : Eq_dep_eq -> UIP_. Proof. intro eq_dep_eq; red. intros; apply eq_dep_eq with (P := fun y => x = y). elim p2 using eq_indd. elim p1 using eq_indd. apply eq_dep_intro. Qed. (** Uniqueness of Reflexive Identity Proofs is a direct instance of UIP *) Lemma UIP__UIP_refl : UIP_ -> UIP_refl_. Proof. intro UIP; red; intros; apply UIP. Qed. (** Streicher's axiom K is a direct consequence of Uniqueness of Reflexive Identity Proofs *) Lemma UIP_refl__Streicher_K : UIP_refl_ -> Streicher_K_. Proof. intro UIP_refl; red; intros; rewrite UIP_refl; assumption. Qed. (** We finally recover from K the Invariance by Substitution of Reflexive Equality Proofs *) Lemma Streicher_K__eq_rect_eq : Streicher_K_ -> Eq_rect_eq. Proof. intro Streicher_K; red; intros. apply Streicher_K with (p := h). reflexivity. Qed. (** Remark: It is reasonable to think that [eq_rect_eq] is strictly stronger than [eq_rec_eq] (which is [eq_rect_eq] restricted on [Set]): [Definition Eq_rec_eq := forall (P:U -> Set) (p:U) (x:P p) (h:p = p), x = eq_rec p P x p h.] Typically, [eq_rect_eq] allows to prove UIP and Streicher's K what does not seem possible with [eq_rec_eq]. In particular, the proof of [UIP] requires to use [eq_rect_eq] on [fun y -> x=y] which is in [Type] but not in [Set]. *) End Equivalences. Section Corollaries. Variable U:Type. (** UIP implies the injectivity of equality on dependent pairs in Type *) Definition Inj_dep_pair := forall (P:U -> Type) (p:U) (x y:P p), existT P p x = existT P p y -> x = y. Lemma eq_dep_eq__inj_pair2 : Eq_dep_eq U -> Inj_dep_pair. Proof. intro eq_dep_eq; red; intros. apply eq_dep_eq. apply eq_sigT_eq_dep. assumption. Qed. End Corollaries. Notation Inj_dep_pairS := Inj_dep_pair. Notation Inj_dep_pairT := Inj_dep_pair. Notation eq_dep_eq__inj_pairT2 := eq_dep_eq__inj_pair2. (************************************************************************) (** * Definition of the functor that builds properties of dependent equalities assuming axiom eq_rect_eq *) Module Type EqdepElimination. Axiom eq_rect_eq : forall (U:Type) (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. End EqdepElimination. Module EqdepTheory (M:EqdepElimination). Section Axioms. Variable U:Type. (** Invariance by Substitution of Reflexive Equality Proofs *) Lemma eq_rect_eq : forall (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof M.eq_rect_eq U. Lemma eq_rec_eq : forall (p:U) (Q:U -> Set) (x:Q p) (h:p = p), x = eq_rec p Q x p h. Proof (fun p Q => M.eq_rect_eq U p Q). (** Injectivity of Dependent Equality *) Lemma eq_dep_eq : forall (P:U->Type) (p:U) (x y:P p), eq_dep p x p y -> x = y. Proof (eq_rect_eq__eq_dep_eq U eq_rect_eq). (** Uniqueness of Identity Proofs (UIP) is a consequence of *) (** Injectivity of Dependent Equality *) Lemma UIP : forall (x y:U) (p1 p2:x = y), p1 = p2. Proof (eq_dep_eq__UIP U eq_dep_eq). (** Uniqueness of Reflexive Identity Proofs is a direct instance of UIP *) Lemma UIP_refl : forall (x:U) (p:x = x), p = eq_refl x. Proof (UIP__UIP_refl U UIP). (** Streicher's axiom K is a direct consequence of Uniqueness of Reflexive Identity Proofs *) Lemma Streicher_K : forall (x:U) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof (UIP_refl__Streicher_K U UIP_refl). End Axioms. (** UIP implies the injectivity of equality on dependent pairs in Type *) Lemma inj_pair2 : forall (U:Type) (P:U -> Type) (p:U) (x y:P p), existT P p x = existT P p y -> x = y. Proof (fun U => eq_dep_eq__inj_pair2 U (eq_dep_eq U)). Notation inj_pairT2 := inj_pair2. End EqdepTheory. Arguments eq_dep U P p x q _ : clear implicits. Arguments eq_dep1 U P p x q y : clear implicits. coq-8.4pl2/theories/Logic/ClassicalFacts.v0000640000175000001440000004566512010532755017574 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (A = A->A) -> A has fixpoint 2.2. CC |- prop. ext. + dep elim on bool -> proof-irrelevance 2.3. CIC |- prop. ext. -> proof-irrelevance 2.4. CC |- excluded-middle + dep elim on bool -> proof-irrelevance 2.5. CIC |- excluded-middle -> proof-irrelevance 3. Weak classical axioms 3.1. Weak excluded middle 3.2. Gödel-Dummett axiom and right distributivity of implication over disjunction 3 3. Independence of general premises and drinker's paradox *) (************************************************************************) (** * Prop degeneracy = excluded-middle + prop extensionality *) (** i.e. [(forall A, A=True \/ A=False) <-> (forall A, A\/~A) /\ (forall A B, (A<->B) -> A=B)] *) (** [prop_degeneracy] (also referred to as propositional completeness) asserts (up to consistency) that there are only two distinct formulas *) Definition prop_degeneracy := forall A:Prop, A = True \/ A = False. (** [prop_extensionality] asserts that equivalent formulas are equal *) Definition prop_extensionality := forall A B:Prop, (A <-> B) -> A = B. (** [excluded_middle] asserts that we can reason by case on the truth or falsity of any formula *) Definition excluded_middle := forall A:Prop, A \/ ~ A. (** We show [prop_degeneracy <-> (prop_extensionality /\ excluded_middle)] *) Lemma prop_degen_ext : prop_degeneracy -> prop_extensionality. Proof. intros H A B [Hab Hba]. destruct (H A); destruct (H B). rewrite H1; exact H0. absurd B. rewrite H1; exact (fun H => H). apply Hab; rewrite H0; exact I. absurd A. rewrite H0; exact (fun H => H). apply Hba; rewrite H1; exact I. rewrite H1; exact H0. Qed. Lemma prop_degen_em : prop_degeneracy -> excluded_middle. Proof. intros H A. destruct (H A). left; rewrite H0; exact I. right; rewrite H0; exact (fun x => x). Qed. Lemma prop_ext_em_degen : prop_extensionality -> excluded_middle -> prop_degeneracy. Proof. intros Ext EM A. destruct (EM A). left; apply (Ext A True); split; [ exact (fun _ => I) | exact (fun _ => H) ]. right; apply (Ext A False); split; [ exact H | apply False_ind ]. Qed. (** A weakest form of propositional extensionality: extensionality for provable propositions only *) Definition provable_prop_extensionality := forall A:Prop, A -> A = True. Lemma provable_prop_ext : prop_extensionality -> provable_prop_extensionality. Proof. intros Ext A Ha; apply Ext; split; trivial. Qed. (************************************************************************) (** * Classical logic and proof-irrelevance *) (************************************************************************) (** ** CC |- prop ext + A inhabited -> (A = A->A) -> A has fixpoint *) (** We successively show that: [prop_extensionality] implies equality of [A] and [A->A] for inhabited [A], which implies the existence of a (trivial) retract from [A->A] to [A] (just take the identity), which implies the existence of a fixpoint operator in [A] (e.g. take the Y combinator of lambda-calculus) *) Local Notation inhabited A := A (only parsing). Lemma prop_ext_A_eq_A_imp_A : prop_extensionality -> forall A:Prop, inhabited A -> (A -> A) = A. Proof. intros Ext A a. apply (Ext (A -> A) A); split; [ exact (fun _ => a) | exact (fun _ _ => a) ]. Qed. Record retract (A B:Prop) : Prop := {f1 : A -> B; f2 : B -> A; f1_o_f2 : forall x:B, f1 (f2 x) = x}. Lemma prop_ext_retract_A_A_imp_A : prop_extensionality -> forall A:Prop, inhabited A -> retract A (A -> A). Proof. intros Ext A a. rewrite (prop_ext_A_eq_A_imp_A Ext A a). exists (fun x:A => x) (fun x:A => x). reflexivity. Qed. Record has_fixpoint (A:Prop) : Prop := {F : (A -> A) -> A; Fix : forall f:A -> A, F f = f (F f)}. Lemma ext_prop_fixpoint : prop_extensionality -> forall A:Prop, inhabited A -> has_fixpoint A. Proof. intros Ext A a. case (prop_ext_retract_A_A_imp_A Ext A a); intros g1 g2 g1_o_g2. exists (fun f => (fun x:A => f (g1 x x)) (g2 (fun x => f (g1 x x)))). intro f. pattern (g1 (g2 (fun x:A => f (g1 x x)))) at 1. rewrite (g1_o_g2 (fun x:A => f (g1 x x))). reflexivity. Qed. (** Remark: [prop_extensionality] can be replaced in lemma [ext_prop_fixpoint] by the weakest property [provable_prop_extensionality]. *) (************************************************************************) (** ** CC |- prop_ext /\ dep elim on bool -> proof-irrelevance *) (** [proof_irrelevance] asserts equality of all proofs of a given formula *) Definition proof_irrelevance := forall (A:Prop) (a1 a2:A), a1 = a2. (** Assume that we have booleans with the property that there is at most 2 booleans (which is equivalent to dependent case analysis). Consider the fixpoint of the negation function: it is either true or false by dependent case analysis, but also the opposite by fixpoint. Hence proof-irrelevance. We then map equality of boolean proofs to proof irrelevance in all propositions. *) Section Proof_irrelevance_gen. Variable bool : Prop. Variable true : bool. Variable false : bool. Hypothesis bool_elim : forall C:Prop, C -> C -> bool -> C. Hypothesis bool_elim_redl : forall (C:Prop) (c1 c2:C), c1 = bool_elim C c1 c2 true. Hypothesis bool_elim_redr : forall (C:Prop) (c1 c2:C), c2 = bool_elim C c1 c2 false. Let bool_dep_induction := forall P:bool -> Prop, P true -> P false -> forall b:bool, P b. Lemma aux : prop_extensionality -> bool_dep_induction -> true = false. Proof. intros Ext Ind. case (ext_prop_fixpoint Ext bool true); intros G Gfix. set (neg := fun b:bool => bool_elim bool false true b). generalize (eq_refl (G neg)). pattern (G neg) at 1. apply Ind with (b := G neg); intro Heq. rewrite (bool_elim_redl bool false true). change (true = neg true); rewrite Heq; apply Gfix. rewrite (bool_elim_redr bool false true). change (neg false = false); rewrite Heq; symmetry ; apply Gfix. Qed. Lemma ext_prop_dep_proof_irrel_gen : prop_extensionality -> bool_dep_induction -> proof_irrelevance. Proof. intros Ext Ind A a1 a2. set (f := fun b:bool => bool_elim A a1 a2 b). rewrite (bool_elim_redl A a1 a2). change (f true = a2). rewrite (bool_elim_redr A a1 a2). change (f true = f false). rewrite (aux Ext Ind). reflexivity. Qed. End Proof_irrelevance_gen. (** In the pure Calculus of Constructions, we can define the boolean proposition bool = (C:Prop)C->C->C but we cannot prove that it has at most 2 elements. *) Section Proof_irrelevance_Prop_Ext_CC. Definition BoolP := forall C:Prop, C -> C -> C. Definition TrueP : BoolP := fun C c1 c2 => c1. Definition FalseP : BoolP := fun C c1 c2 => c2. Definition BoolP_elim C c1 c2 (b:BoolP) := b C c1 c2. Definition BoolP_elim_redl (C:Prop) (c1 c2:C) : c1 = BoolP_elim C c1 c2 TrueP := eq_refl c1. Definition BoolP_elim_redr (C:Prop) (c1 c2:C) : c2 = BoolP_elim C c1 c2 FalseP := eq_refl c2. Definition BoolP_dep_induction := forall P:BoolP -> Prop, P TrueP -> P FalseP -> forall b:BoolP, P b. Lemma ext_prop_dep_proof_irrel_cc : prop_extensionality -> BoolP_dep_induction -> proof_irrelevance. Proof. exact (ext_prop_dep_proof_irrel_gen BoolP TrueP FalseP BoolP_elim BoolP_elim_redl BoolP_elim_redr). Qed. End Proof_irrelevance_Prop_Ext_CC. (** Remark: [prop_extensionality] can be replaced in lemma [ext_prop_dep_proof_irrel_gen] by the weakest property [provable_prop_extensionality]. *) (************************************************************************) (** ** CIC |- prop. ext. -> proof-irrelevance *) (** In the Calculus of Inductive Constructions, inductively defined booleans enjoy dependent case analysis, hence directly proof-irrelevance from propositional extensionality. *) Section Proof_irrelevance_CIC. Inductive boolP : Prop := | trueP : boolP | falseP : boolP. Definition boolP_elim_redl (C:Prop) (c1 c2:C) : c1 = boolP_ind C c1 c2 trueP := eq_refl c1. Definition boolP_elim_redr (C:Prop) (c1 c2:C) : c2 = boolP_ind C c1 c2 falseP := eq_refl c2. Scheme boolP_indd := Induction for boolP Sort Prop. Lemma ext_prop_dep_proof_irrel_cic : prop_extensionality -> proof_irrelevance. Proof. exact (fun pe => ext_prop_dep_proof_irrel_gen boolP trueP falseP boolP_ind boolP_elim_redl boolP_elim_redr pe boolP_indd). Qed. End Proof_irrelevance_CIC. (** Can we state proof irrelevance from propositional degeneracy (i.e. propositional extensionality + excluded middle) without dependent case analysis ? Berardi [[Berardi90]] built a model of CC interpreting inhabited types by the set of all untyped lambda-terms. This model satisfies propositional degeneracy without satisfying proof-irrelevance (nor dependent case analysis). This implies that the previous results cannot be refined. [[Berardi90]] Stefano Berardi, "Type dependence and constructive mathematics", Ph. D. thesis, Dipartimento Matematica, Università di Torino, 1990. *) (************************************************************************) (** ** CC |- excluded-middle + dep elim on bool -> proof-irrelevance *) (** This is a proof in the pure Calculus of Construction that classical logic in [Prop] + dependent elimination of disjunction entails proof-irrelevance. Reference: [[Coquand90]] T. Coquand, "Metamathematical Investigations of a Calculus of Constructions", Proceedings of Logic in Computer Science (LICS'90), 1990. Proof skeleton: classical logic + dependent elimination of disjunction + discrimination of proofs implies the existence of a retract from [Prop] into [bool], hence inconsistency by encoding any paradox of system U- (e.g. Hurkens' paradox). *) Require Import Hurkens. Section Proof_irrelevance_EM_CC. Variable or : Prop -> Prop -> Prop. Variable or_introl : forall A B:Prop, A -> or A B. Variable or_intror : forall A B:Prop, B -> or A B. Hypothesis or_elim : forall A B C:Prop, (A -> C) -> (B -> C) -> or A B -> C. Hypothesis or_elim_redl : forall (A B C:Prop) (f:A -> C) (g:B -> C) (a:A), f a = or_elim A B C f g (or_introl A B a). Hypothesis or_elim_redr : forall (A B C:Prop) (f:A -> C) (g:B -> C) (b:B), g b = or_elim A B C f g (or_intror A B b). Hypothesis or_dep_elim : forall (A B:Prop) (P:or A B -> Prop), (forall a:A, P (or_introl A B a)) -> (forall b:B, P (or_intror A B b)) -> forall b:or A B, P b. Hypothesis em : forall A:Prop, or A (~ A). Variable B : Prop. Variables b1 b2 : B. (** [p2b] and [b2p] form a retract if [~b1=b2] *) Definition p2b A := or_elim A (~ A) B (fun _ => b1) (fun _ => b2) (em A). Definition b2p b := b1 = b. Lemma p2p1 : forall A:Prop, A -> b2p (p2b A). Proof. unfold p2b; intro A; apply or_dep_elim with (b := em A); unfold b2p; intros. apply (or_elim_redl A (~ A) B (fun _ => b1) (fun _ => b2)). destruct (b H). Qed. Lemma p2p2 : b1 <> b2 -> forall A:Prop, b2p (p2b A) -> A. Proof. intro not_eq_b1_b2. unfold p2b; intro A; apply or_dep_elim with (b := em A); unfold b2p; intros. assumption. destruct not_eq_b1_b2. rewrite <- (or_elim_redr A (~ A) B (fun _ => b1) (fun _ => b2)) in H. assumption. Qed. (** Using excluded-middle a second time, we get proof-irrelevance *) Theorem proof_irrelevance_cc : b1 = b2. Proof. refine (or_elim _ _ _ _ _ (em (b1 = b2))); intro H. trivial. apply (paradox B p2b b2p (p2p2 H) p2p1). Qed. End Proof_irrelevance_EM_CC. (** Remark: Hurkens' paradox still holds with a retract from the _negative_ fragment of [Prop] into [bool], hence weak classical logic, i.e. [forall A, ~A\/~~A], is enough for deriving proof-irrelevance. *) (************************************************************************) (** ** CIC |- excluded-middle -> proof-irrelevance *) (** Since, dependent elimination is derivable in the Calculus of Inductive Constructions (CCI), we get proof-irrelevance from classical logic in the CCI. *) Section Proof_irrelevance_CCI. Hypothesis em : forall A:Prop, A \/ ~ A. Definition or_elim_redl (A B C:Prop) (f:A -> C) (g:B -> C) (a:A) : f a = or_ind f g (or_introl B a) := eq_refl (f a). Definition or_elim_redr (A B C:Prop) (f:A -> C) (g:B -> C) (b:B) : g b = or_ind f g (or_intror A b) := eq_refl (g b). Scheme or_indd := Induction for or Sort Prop. Theorem proof_irrelevance_cci : forall (B:Prop) (b1 b2:B), b1 = b2. Proof. exact (proof_irrelevance_cc or or_introl or_intror or_ind or_elim_redl or_elim_redr or_indd em). Qed. End Proof_irrelevance_CCI. (** Remark: in the Set-impredicative CCI, Hurkens' paradox still holds with [bool] in [Set] and since [~true=false] for [true] and [false] in [bool] from [Set], we get the inconsistency of [em : forall A:Prop, {A}+{~A}] in the Set-impredicative CCI. *) (** * Weak classical axioms *) (** We show the following increasing in the strength of axioms: - weak excluded-middle - right distributivity of implication over disjunction and Gödel-Dummett axiom - independence of general premises and drinker's paradox - excluded-middle *) (** ** Weak excluded-middle *) (** The weak classical logic based on [~~A \/ ~A] is referred to with name KC in {[ChagrovZakharyaschev97]] [[ChagrovZakharyaschev97]] Alexander Chagrov and Michael Zakharyaschev, "Modal Logic", Clarendon Press, 1997. *) Definition weak_excluded_middle := forall A:Prop, ~~A \/ ~A. (** The interest in the equivalent variant [weak_generalized_excluded_middle] is that it holds even in logic without a primitive [False] connective (like Gödel-Dummett axiom) *) Definition weak_generalized_excluded_middle := forall A B:Prop, ((A -> B) -> B) \/ (A -> B). (** ** Gödel-Dummett axiom *) (** [(A->B) \/ (B->A)] is studied in [[Dummett59]] and is based on [[Gödel33]]. [[Dummett59]] Michael A. E. Dummett. "A Propositional Calculus with a Denumerable Matrix", In the Journal of Symbolic Logic, Vol 24 No. 2(1959), pp 97-103. [[Gödel33]] Kurt Gödel. "Zum intuitionistischen Aussagenkalkül", Ergeb. Math. Koll. 4 (1933), pp. 34-38. *) Definition GodelDummett := forall A B:Prop, (A -> B) \/ (B -> A). Lemma excluded_middle_Godel_Dummett : excluded_middle -> GodelDummett. Proof. intros EM A B. destruct (EM B) as [HB|HnotB]. left; intros _; exact HB. right; intros HB; destruct (HnotB HB). Qed. (** [(A->B) \/ (B->A)] is equivalent to [(C -> A\/B) -> (C->A) \/ (C->B)] (proof from [[Dummett59]]) *) Definition RightDistributivityImplicationOverDisjunction := forall A B C:Prop, (C -> A\/B) -> (C->A) \/ (C->B). Lemma Godel_Dummett_iff_right_distr_implication_over_disjunction : GodelDummett <-> RightDistributivityImplicationOverDisjunction. Proof. split. intros GD A B C HCAB. destruct (GD B A) as [HBA|HAB]; [left|right]; intro HC; destruct (HCAB HC) as [HA|HB]; [ | apply HBA | apply HAB | ]; assumption. intros Distr A B. destruct (Distr A B (A\/B)) as [HABA|HABB]. intro HAB; exact HAB. right; intro HB; apply HABA; right; assumption. left; intro HA; apply HABB; left; assumption. Qed. (** [(A->B) \/ (B->A)] is stronger than the weak excluded middle *) Lemma Godel_Dummett_weak_excluded_middle : GodelDummett -> weak_excluded_middle. Proof. intros GD A. destruct (GD (~A) A) as [HnotAA|HAnotA]. left; intro HnotA; apply (HnotA (HnotAA HnotA)). right; intro HA; apply (HAnotA HA HA). Qed. (** ** Independence of general premises and drinker's paradox *) (** Independence of general premises is the unconstrained, non constructive, version of the Independence of Premises as considered in [[Troelstra73]]. It is a generalization to predicate logic of the right distributivity of implication over disjunction (hence of Gödel-Dummett axiom) whose own constructive form (obtained by a restricting the third formula to be negative) is called Kreisel-Putnam principle [[KreiselPutnam57]]. [[KreiselPutnam57]], Georg Kreisel and Hilary Putnam. "Eine Unableitsbarkeitsbeweismethode für den intuitionistischen Aussagenkalkül". Archiv für Mathematische Logik und Graundlagenforschung, 3:74- 78, 1957. [[Troelstra73]], Anne Troelstra, editor. Metamathematical Investigation of Intuitionistic Arithmetic and Analysis, volume 344 of Lecture Notes in Mathematics, Springer-Verlag, 1973. *) Definition IndependenceOfGeneralPremises := forall (A:Type) (P:A -> Prop) (Q:Prop), inhabited A -> (Q -> exists x, P x) -> exists x, Q -> P x. Lemma independence_general_premises_right_distr_implication_over_disjunction : IndependenceOfGeneralPremises -> RightDistributivityImplicationOverDisjunction. Proof. intros IP A B C HCAB. destruct (IP bool (fun b => if b then A else B) C true) as ([|],H). intro HC; destruct (HCAB HC); [exists true|exists false]; assumption. left; assumption. right; assumption. Qed. Lemma independence_general_premises_Godel_Dummett : IndependenceOfGeneralPremises -> GodelDummett. Proof. destruct Godel_Dummett_iff_right_distr_implication_over_disjunction. auto using independence_general_premises_right_distr_implication_over_disjunction. Qed. (** Independence of general premises is equivalent to the drinker's paradox *) Definition DrinkerParadox := forall (A:Type) (P:A -> Prop), inhabited A -> exists x, (exists x, P x) -> P x. Lemma independence_general_premises_drinker : IndependenceOfGeneralPremises <-> DrinkerParadox. Proof. split. intros IP A P InhA; apply (IP A P (exists x, P x) InhA); intro Hx; exact Hx. intros Drinker A P Q InhA H; destruct (Drinker A P InhA) as (x,Hx). exists x; intro HQ; apply (Hx (H HQ)). Qed. (** Independence of general premises is weaker than (generalized) excluded middle Remark: generalized excluded middle is preferred here to avoid relying on the "ex falso quodlibet" property (i.e. [False -> forall A, A]) *) Definition generalized_excluded_middle := forall A B:Prop, A \/ (A -> B). Lemma excluded_middle_independence_general_premises : generalized_excluded_middle -> DrinkerParadox. Proof. intros GEM A P x0. destruct (GEM (exists x, P x) (P x0)) as [(x,Hx)|Hnot]. exists x; intro; exact Hx. exists x0; exact Hnot. Qed. coq-8.4pl2/theories/Logic/IndefiniteDescription.v0000640000175000001440000000270612010532755021164 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop), (exists x, P x) -> { x : A | P x }. Lemma constructive_definite_description : forall (A : Type) (P : A->Prop), (exists! x, P x) -> { x : A | P x }. Proof. intros; apply constructive_indefinite_description; firstorder. Qed. Lemma functional_choice : forall (A B : Type) (R:A->B->Prop), (forall x : A, exists y : B, R x y) -> (exists f : A->B, forall x : A, R x (f x)). Proof. apply constructive_indefinite_descr_fun_choice. exact constructive_indefinite_description. Qed. coq-8.4pl2/theories/Logic/ConstructiveEpsilon.v0000640000175000001440000002176512010532755020732 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop. Hypothesis P_dec : forall n, {P n}+{~(P n)}. (** The termination argument is [before_witness n], which says that any number before any witness (not necessarily the [x] of [exists x :A, P x]) makes the search eventually stops. *) Inductive before_witness : nat -> Prop := | stop : forall n, P n -> before_witness n | next : forall n, before_witness (S n) -> before_witness n. (* Computation of the initial termination certificate *) Fixpoint O_witness (n : nat) : before_witness n -> before_witness 0 := match n return (before_witness n -> before_witness 0) with | 0 => fun b => b | S n => fun b => O_witness n (next n b) end. (* Inversion of [inv_before_witness n] in a way such that the result is structurally smaller even in the [stop] case. *) Definition inv_before_witness : forall n, before_witness n -> ~(P n) -> before_witness (S n) := fun n b => match b in before_witness n return ~ P n -> before_witness (S n) with | stop n p => fun not_p => match (not_p p) with end | next n b => fun _ => b end. Fixpoint linear_search m (b : before_witness m) : {n : nat | P n} := match P_dec m with | left yes => exist (fun n => P n) m yes | right no => linear_search (S m) (inv_before_witness m b no) end. Definition constructive_indefinite_ground_description_nat : (exists n, P n) -> {n:nat | P n} := fun e => linear_search O (let (n, p) := e in O_witness n (stop n p)). End ConstructiveIndefiniteGroundDescription_Direct. (************************************************************************) (* Version using the predicate [Acc] *) Require Import Arith. Section ConstructiveIndefiniteGroundDescription_Acc. Variable P : nat -> Prop. Hypothesis P_decidable : forall n : nat, {P n} + {~ P n}. (** The predicate [Acc] delineates elements that are accessible via a given relation [R]. An element is accessible if there are no infinite [R]-descending chains starting from it. To use [Fix_F], we define a relation R and prove that if [exists n, P n] then 0 is accessible with respect to R. Then, by induction on the definition of [Acc R 0], we show [{n : nat | P n}]. The relation [R] describes the connection between the two successive numbers we try. Namely, [y] is [R]-less then [x] if we try [y] after [x], i.e., [y = S x] and [P x] is false. Then the absence of an infinite [R]-descending chain from 0 is equivalent to the termination of our searching algorithm. *) Let R (x y : nat) : Prop := x = S y /\ ~ P y. Local Notation acc x := (Acc R x). Lemma P_implies_acc : forall x : nat, P x -> acc x. Proof. intros x H. constructor. intros y [_ not_Px]. absurd (P x); assumption. Qed. Lemma P_eventually_implies_acc : forall (x : nat) (n : nat), P (n + x) -> acc x. Proof. intros x n; generalize x; clear x; induction n as [|n IH]; simpl. apply P_implies_acc. intros x H. constructor. intros y [fxy _]. apply IH. rewrite fxy. replace (n + S x) with (S (n + x)); auto with arith. Defined. Corollary P_eventually_implies_acc_ex : (exists n : nat, P n) -> acc 0. Proof. intros H; elim H. intros x Px. apply P_eventually_implies_acc with (n := x). replace (x + 0) with x; auto with arith. Defined. (** In the following statement, we use the trick with recursion on [Acc]. This is also where decidability of [P] is used. *) Theorem acc_implies_P_eventually : acc 0 -> {n : nat | P n}. Proof. intros Acc_0. pattern 0. apply Fix_F with (R := R); [| assumption]. clear Acc_0; intros x IH. destruct (P_decidable x) as [Px | not_Px]. exists x; simpl; assumption. set (y := S x). assert (Ryx : R y x). unfold R; split; auto. destruct (IH y Ryx) as [n Hn]. exists n; assumption. Defined. Theorem constructive_indefinite_ground_description_nat_Acc : (exists n : nat, P n) -> {n : nat | P n}. Proof. intros H; apply acc_implies_P_eventually. apply P_eventually_implies_acc_ex; assumption. Defined. End ConstructiveIndefiniteGroundDescription_Acc. (************************************************************************) Section ConstructiveGroundEpsilon_nat. Variable P : nat -> Prop. Hypothesis P_decidable : forall x : nat, {P x} + {~ P x}. Definition constructive_ground_epsilon_nat (E : exists n : nat, P n) : nat := proj1_sig (constructive_indefinite_ground_description_nat P P_decidable E). Definition constructive_ground_epsilon_spec_nat (E : (exists n, P n)) : P (constructive_ground_epsilon_nat E) := proj2_sig (constructive_indefinite_ground_description_nat P P_decidable E). End ConstructiveGroundEpsilon_nat. (************************************************************************) Section ConstructiveGroundEpsilon. (** For the current purpose, we say that a set [A] is countable if there are functions [f : A -> nat] and [g : nat -> A] such that [g] is a left inverse of [f]. *) Variable A : Type. Variable f : A -> nat. Variable g : nat -> A. Hypothesis gof_eq_id : forall x : A, g (f x) = x. Variable P : A -> Prop. Hypothesis P_decidable : forall x : A, {P x} + {~ P x}. Definition P' (x : nat) : Prop := P (g x). Lemma P'_decidable : forall n : nat, {P' n} + {~ P' n}. Proof. intro n; unfold P'; destruct (P_decidable (g n)); auto. Defined. Lemma constructive_indefinite_ground_description : (exists x : A, P x) -> {x : A | P x}. Proof. intro H. assert (H1 : exists n : nat, P' n). destruct H as [x Hx]. exists (f x); unfold P'. rewrite gof_eq_id; assumption. apply (constructive_indefinite_ground_description_nat P' P'_decidable) in H1. destruct H1 as [n Hn]. exists (g n); unfold P' in Hn; assumption. Defined. Lemma constructive_definite_ground_description : (exists! x : A, P x) -> {x : A | P x}. Proof. intros; apply constructive_indefinite_ground_description; firstorder. Defined. Definition constructive_ground_epsilon (E : exists x : A, P x) : A := proj1_sig (constructive_indefinite_ground_description E). Definition constructive_ground_epsilon_spec (E : (exists x, P x)) : P (constructive_ground_epsilon E) := proj2_sig (constructive_indefinite_ground_description E). End ConstructiveGroundEpsilon. (* begin hide *) (* Compatibility: the qualificative "ground" was absent from the initial names of the results in this file but this had introduced confusion with the similarly named statement in Description.v *) Notation constructive_indefinite_description_nat := constructive_indefinite_ground_description_nat (only parsing). Notation constructive_epsilon_spec_nat := constructive_ground_epsilon_spec_nat (only parsing). Notation constructive_epsilon_nat := constructive_ground_epsilon_nat (only parsing). Notation constructive_indefinite_description := constructive_indefinite_ground_description (only parsing). Notation constructive_definite_description := constructive_definite_ground_description (only parsing). Notation constructive_epsilon_spec := constructive_ground_epsilon_spec (only parsing). Notation constructive_epsilon := constructive_ground_epsilon (only parsing). (* end hide *) coq-8.4pl2/theories/Logic/Classical_Pred_Set.v0000640000175000001440000000322012010532755020355 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop, ~ (forall n:U, P n) -> exists n : U, ~ P n. Proof (Classical_Pred_Type.not_all_ex_not U). Lemma not_all_not_ex : forall P:U -> Prop, ~ (forall n:U, ~ P n) -> exists n : U, P n. Proof (Classical_Pred_Type.not_all_not_ex U). Lemma not_ex_all_not : forall P:U -> Prop, ~ (exists n : U, P n) -> forall n:U, ~ P n. Proof (Classical_Pred_Type.not_ex_all_not U). Lemma not_ex_not_all : forall P:U -> Prop, ~ (exists n : U, ~ P n) -> forall n:U, P n. Proof (Classical_Pred_Type.not_ex_not_all U). Lemma ex_not_not_all : forall P:U -> Prop, (exists n : U, ~ P n) -> ~ (forall n:U, P n). Proof (Classical_Pred_Type.ex_not_not_all U). Lemma all_not_not_ex : forall P:U -> Prop, (forall n:U, ~ P n) -> ~ (exists n : U, P n). Proof (Classical_Pred_Type.all_not_not_ex U). End Generic. coq-8.4pl2/theories/Logic/Description.v0000640000175000001440000000160412010532755017161 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop), (exists! x, P x) -> { x : A | P x }. coq-8.4pl2/theories/Logic/intro.tex0000750000175000001440000000036707267556340016411 0ustar notinusers\section{Logic}\label{Logic} This library deals with classical logic and its properties. The main file is {\tt Classical.v}. This library also provides some facts on equalities for dependent types. See the files {\tt Eqdep.v} and {\tt JMeq.v}. coq-8.4pl2/theories/Logic/ProofIrrelevanceFacts.v0000640000175000001440000000365412010532755021133 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof. intros; rewrite M.proof_irrelevance with (p1:=h) (p2:=eq_refl p). reflexivity. Qed. End Eq_rect_eq. (** Export the theory of injective dependent elimination *) Module EqdepTheory := EqdepTheory(Eq_rect_eq). Export EqdepTheory. Scheme eq_indd := Induction for eq Sort Prop. (** We derive the irrelevance of the membership property for subsets *) Lemma subset_eq_compat : forall (U:Set) (P:U->Prop) (x y:U) (p:P x) (q:P y), x = y -> exist P x p = exist P y q. Proof. intros. rewrite M.proof_irrelevance with (p1:=q) (p2:=eq_rect x P p y H). elim H using eq_indd. reflexivity. Qed. Lemma subsetT_eq_compat : forall (U:Type) (P:U->Prop) (x y:U) (p:P x) (q:P y), x = y -> existT P x p = existT P y q. Proof. intros. rewrite M.proof_irrelevance with (p1:=q) (p2:=eq_rect x P p y H). elim H using eq_indd. reflexivity. Qed. End ProofIrrelevanceTheory. coq-8.4pl2/theories/Logic/ClassicalEpsilon.v0000640000175000001440000000705512010532755020134 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop), (exists x, P x) -> { x : A | P x }. Lemma constructive_definite_description : forall (A : Type) (P : A->Prop), (exists! x, P x) -> { x : A | P x }. Proof. intros; apply constructive_indefinite_description; firstorder. Qed. Theorem excluded_middle_informative : forall P:Prop, {P} + {~ P}. Proof. apply (constructive_definite_descr_excluded_middle constructive_definite_description classic). Qed. Theorem classical_indefinite_description : forall (A : Type) (P : A->Prop), inhabited A -> { x : A | (exists x, P x) -> P x }. Proof. intros A P i. destruct (excluded_middle_informative (exists x, P x)) as [Hex|HnonP]. apply constructive_indefinite_description with (P:= fun x => (exists x, P x) -> P x). destruct Hex as (x,Hx). exists x; intros _; exact Hx. assert {x : A | True} as (a,_). apply constructive_indefinite_description with (P := fun _ : A => True). destruct i as (a); firstorder. firstorder. Defined. (** Hilbert's epsilon operator *) Definition epsilon (A : Type) (i:inhabited A) (P : A->Prop) : A := proj1_sig (classical_indefinite_description P i). Definition epsilon_spec (A : Type) (i:inhabited A) (P : A->Prop) : (exists x, P x) -> P (epsilon i P) := proj2_sig (classical_indefinite_description P i). (** Open question: is classical_indefinite_description constructively provable from [relational_choice] and [constructive_definite_description] (at least, using the fact that [functional_choice] is provable from [relational_choice] and [unique_choice], we know that the double negation of [classical_indefinite_description] is provable (see [relative_non_contradiction_of_indefinite_desc]). *) (** A proof that if [P] is inhabited, [epsilon a P] does not depend on the actual proof that the domain of [P] is inhabited (proof idea kindly provided by Pierre Castéran) *) Lemma epsilon_inh_irrelevance : forall (A:Type) (i j : inhabited A) (P:A->Prop), (exists x, P x) -> epsilon i P = epsilon j P. Proof. intros. unfold epsilon, classical_indefinite_description. destruct (excluded_middle_informative (exists x : A, P x)) as [|[]]; trivial. Qed. Opaque epsilon. (** *** Weaker lemmas (compatibility lemmas) *) Theorem choice : forall (A B : Type) (R : A->B->Prop), (forall x : A, exists y : B, R x y) -> (exists f : A->B, forall x : A, R x (f x)). Proof. intros A B R H. exists (fun x => proj1_sig (constructive_indefinite_description _ (H x))). intro x. apply (proj2_sig (constructive_indefinite_description _ (H x))). Qed. coq-8.4pl2/theories/Logic/SetIsType.v0000640000175000001440000000167512010532755016577 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := JMeq_refl : JMeq x x. Set Elimination Schemes. Arguments JMeq_refl {A x} , [A] x. Hint Resolve JMeq_refl. Lemma JMeq_sym : forall (A B:Type) (x:A) (y:B), JMeq x y -> JMeq y x. Proof. destruct 1; trivial. Qed. Hint Immediate JMeq_sym. Lemma JMeq_trans : forall (A B C:Type) (x:A) (y:B) (z:C), JMeq x y -> JMeq y z -> JMeq x z. Proof. destruct 2; trivial. Qed. Axiom JMeq_eq : forall (A:Type) (x y:A), JMeq x y -> x = y. Lemma JMeq_ind : forall (A:Type) (x:A) (P:A -> Prop), P x -> forall y, JMeq x y -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := H'); trivial. Qed. Lemma JMeq_rec : forall (A:Type) (x:A) (P:A -> Set), P x -> forall y, JMeq x y -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := H'); trivial. Qed. Lemma JMeq_rect : forall (A:Type) (x:A) (P:A->Type), P x -> forall y, JMeq x y -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := H'); trivial. Qed. Lemma JMeq_ind_r : forall (A:Type) (x:A) (P:A -> Prop), P x -> forall y, JMeq y x -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := JMeq_sym H'); trivial. Qed. Lemma JMeq_rec_r : forall (A:Type) (x:A) (P:A -> Set), P x -> forall y, JMeq y x -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := JMeq_sym H'); trivial. Qed. Lemma JMeq_rect_r : forall (A:Type) (x:A) (P:A -> Type), P x -> forall y, JMeq y x -> P y. Proof. intros A x P H y H'; case JMeq_eq with (1 := JMeq_sym H'); trivial. Qed. Lemma JMeq_congr : forall (A:Type) (x:A) (B:Type) (f:A->B) (y:A), JMeq x y -> f x = f y. Proof. intros A x B f y H; case JMeq_eq with (1 := H); trivial. Qed. (** [JMeq] is equivalent to [eq_dep Type (fun X => X)] *) Require Import Eqdep. Lemma JMeq_eq_dep_id : forall (A B:Type) (x:A) (y:B), JMeq x y -> eq_dep Type (fun X => X) A x B y. Proof. destruct 1. apply eq_dep_intro. Qed. Lemma eq_dep_id_JMeq : forall (A B:Type) (x:A) (y:B), eq_dep Type (fun X => X) A x B y -> JMeq x y. Proof. destruct 1. apply JMeq_refl. Qed. (** [eq_dep U P p x q y] is strictly finer than [JMeq (P p) x (P q) y] *) Lemma eq_dep_JMeq : forall U P p x q y, eq_dep U P p x q y -> JMeq x y. Proof. destruct 1. apply JMeq_refl. Qed. Lemma eq_dep_strictly_stronger_JMeq : exists U P p q x y, JMeq x y /\ ~ eq_dep U P p x q y. Proof. exists bool. exists (fun _ => True). exists true. exists false. exists I. exists I. split. trivial. intro H. assert (true=false) by (destruct H; reflexivity). discriminate. Qed. (** However, when the dependencies are equal, [JMeq (P p) x (P q) y] is as strong as [eq_dep U P p x q y] (this uses [JMeq_eq]) *) Lemma JMeq_eq_dep : forall U (P:U->Prop) p q (x:P p) (y:P q), p = q -> JMeq x y -> eq_dep U P p x q y. Proof. intros. destruct H. apply JMeq_eq in H0 as ->. reflexivity. Qed. (* Compatibility *) Notation sym_JMeq := JMeq_sym (only parsing). Notation trans_JMeq := JMeq_trans (only parsing). coq-8.4pl2/theories/Logic/Decidable.v0000640000175000001440000001156512010532755016541 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (~ P -> False) -> P. Proof. unfold decidable; tauto. Qed. Theorem dec_True : decidable True. Proof. unfold decidable; auto. Qed. Theorem dec_False : decidable False. Proof. unfold decidable, not; auto. Qed. Theorem dec_or : forall A B:Prop, decidable A -> decidable B -> decidable (A \/ B). Proof. unfold decidable; tauto. Qed. Theorem dec_and : forall A B:Prop, decidable A -> decidable B -> decidable (A /\ B). Proof. unfold decidable; tauto. Qed. Theorem dec_not : forall A:Prop, decidable A -> decidable (~ A). Proof. unfold decidable; tauto. Qed. Theorem dec_imp : forall A B:Prop, decidable A -> decidable B -> decidable (A -> B). Proof. unfold decidable; tauto. Qed. Theorem dec_iff : forall A B:Prop, decidable A -> decidable B -> decidable (A<->B). Proof. unfold decidable; tauto. Qed. Theorem not_not : forall P:Prop, decidable P -> ~ ~ P -> P. Proof. unfold decidable; tauto. Qed. Theorem not_or : forall A B:Prop, ~ (A \/ B) -> ~ A /\ ~ B. Proof. tauto. Qed. Theorem not_and : forall A B:Prop, decidable A -> ~ (A /\ B) -> ~ A \/ ~ B. Proof. unfold decidable; tauto. Qed. Theorem not_imp : forall A B:Prop, decidable A -> ~ (A -> B) -> A /\ ~ B. Proof. unfold decidable; tauto. Qed. Theorem imp_simp : forall A B:Prop, decidable A -> (A -> B) -> ~ A \/ B. Proof. unfold decidable; tauto. Qed. Theorem not_iff : forall A B:Prop, decidable A -> decidable B -> ~ (A <-> B) -> (A /\ ~ B) \/ (~ A /\ B). Proof. unfold decidable; tauto. Qed. (** Results formulated with iff, used in FSetDecide. Negation are expanded since it is unclear whether setoid rewrite will always perform conversion. *) (** We begin with lemmas that, when read from left to right, can be understood as ways to eliminate uses of [not]. *) Theorem not_true_iff : (True -> False) <-> False. Proof. tauto. Qed. Theorem not_false_iff : (False -> False) <-> True. Proof. tauto. Qed. Theorem not_not_iff : forall A:Prop, decidable A -> (((A -> False) -> False) <-> A). Proof. unfold decidable; tauto. Qed. Theorem contrapositive : forall A B:Prop, decidable A -> (((A -> False) -> (B -> False)) <-> (B -> A)). Proof. unfold decidable; tauto. Qed. Lemma or_not_l_iff_1 : forall A B: Prop, decidable A -> ((A -> False) \/ B <-> (A -> B)). Proof. unfold decidable. tauto. Qed. Lemma or_not_l_iff_2 : forall A B: Prop, decidable B -> ((A -> False) \/ B <-> (A -> B)). Proof. unfold decidable. tauto. Qed. Lemma or_not_r_iff_1 : forall A B: Prop, decidable A -> (A \/ (B -> False) <-> (B -> A)). Proof. unfold decidable. tauto. Qed. Lemma or_not_r_iff_2 : forall A B: Prop, decidable B -> (A \/ (B -> False) <-> (B -> A)). Proof. unfold decidable. tauto. Qed. Lemma imp_not_l : forall A B: Prop, decidable A -> (((A -> False) -> B) <-> (A \/ B)). Proof. unfold decidable. tauto. Qed. (** Moving Negations Around: We have four lemmas that, when read from left to right, describe how to push negations toward the leaves of a proposition and, when read from right to left, describe how to pull negations toward the top of a proposition. *) Theorem not_or_iff : forall A B:Prop, (A \/ B -> False) <-> (A -> False) /\ (B -> False). Proof. tauto. Qed. Lemma not_and_iff : forall A B:Prop, (A /\ B -> False) <-> (A -> B -> False). Proof. tauto. Qed. Lemma not_imp_iff : forall A B:Prop, decidable A -> (((A -> B) -> False) <-> A /\ (B -> False)). Proof. unfold decidable. tauto. Qed. Lemma not_imp_rev_iff : forall A B : Prop, decidable A -> (((A -> B) -> False) <-> (B -> False) /\ A). Proof. unfold decidable. tauto. Qed. (** With the following hint database, we can leverage [auto] to check decidability of propositions. *) Hint Resolve dec_True dec_False dec_or dec_and dec_imp dec_not dec_iff : decidable_prop. (** [solve_decidable using lib] will solve goals about the decidability of a proposition, assisted by an auxiliary database of lemmas. The database is intended to contain lemmas stating the decidability of base propositions, (e.g., the decidability of equality on a particular inductive type). *) Tactic Notation "solve_decidable" "using" ident(db) := match goal with | |- decidable _ => solve [ auto 100 with decidable_prop db ] end. Tactic Notation "solve_decidable" := solve_decidable using core. coq-8.4pl2/theories/Logic/Classical_Prop.v0000640000175000001440000000630612010532755017600 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* p. Proof. unfold not; intros; elim (classic p); auto. intro NP; elim (H NP). Qed. (** Peirce's law states [forall P Q:Prop, ((P -> Q) -> P) -> P]. Thanks to [forall P, False -> P], it is equivalent to the following form *) Lemma Peirce : forall P:Prop, ((P -> False) -> P) -> P. Proof. intros P H; destruct (classic P); auto. Qed. Lemma not_imply_elim : forall P Q:Prop, ~ (P -> Q) -> P. Proof. intros; apply NNPP; red. intro; apply H; intro; absurd P; trivial. Qed. Lemma not_imply_elim2 : forall P Q:Prop, ~ (P -> Q) -> ~ Q. Proof. (* Intuitionistic *) tauto. Qed. Lemma imply_to_or : forall P Q:Prop, (P -> Q) -> ~ P \/ Q. Proof. intros; elim (classic P); auto. Qed. Lemma imply_to_and : forall P Q:Prop, ~ (P -> Q) -> P /\ ~ Q. Proof. intros; split. apply not_imply_elim with Q; trivial. apply not_imply_elim2 with P; trivial. Qed. Lemma or_to_imply : forall P Q:Prop, ~ P \/ Q -> P -> Q. Proof. (* Intuitionistic *) tauto. Qed. Lemma not_and_or : forall P Q:Prop, ~ (P /\ Q) -> ~ P \/ ~ Q. Proof. intros; elim (classic P); auto. Qed. Lemma or_not_and : forall P Q:Prop, ~ P \/ ~ Q -> ~ (P /\ Q). Proof. simple induction 1; red; simple induction 2; auto. Qed. Lemma not_or_and : forall P Q:Prop, ~ (P \/ Q) -> ~ P /\ ~ Q. Proof. (* Intuitionistic *) tauto. Qed. Lemma and_not_or : forall P Q:Prop, ~ P /\ ~ Q -> ~ (P \/ Q). Proof. (* Intuitionistic *) tauto. Qed. Lemma imply_and_or : forall P Q:Prop, (P -> Q) -> P \/ Q -> Q. Proof. (* Intuitionistic *) tauto. Qed. Lemma imply_and_or2 : forall P Q R:Prop, (P -> Q) -> P \/ R -> Q \/ R. Proof. (* Intuitionistic *) tauto. Qed. Lemma proof_irrelevance : forall (P:Prop) (p1 p2:P), p1 = p2. Proof proof_irrelevance_cci classic. (* classical_left transforms |- A \/ B into ~B |- A *) (* classical_right transforms |- A \/ B into ~A |- B *) Ltac classical_right := match goal with | _:_ |-?X1 \/ _ => (elim (classic X1);intro;[left;trivial|right]) end. Ltac classical_left := match goal with | _:_ |- _ \/?X1 => (elim (classic X1);intro;[right;trivial|left]) end. Require Export EqdepFacts. Module Eq_rect_eq. Lemma eq_rect_eq : forall (U:Type) (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof. intros; rewrite proof_irrelevance with (p1:=h) (p2:=eq_refl p); reflexivity. Qed. End Eq_rect_eq. Module EqdepTheory := EqdepTheory(Eq_rect_eq). Export EqdepTheory. coq-8.4pl2/theories/Logic/Classical.v0000640000175000001440000000123212010532755016571 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. End Eq_rect_eq. Module EqdepTheory := EqdepTheory(Eq_rect_eq). Export EqdepTheory. (** Exported hints *) Hint Resolve eq_dep_eq: eqdep v62. Hint Resolve inj_pair2 inj_pairT2: eqdep. coq-8.4pl2/theories/Logic/Classical_Type.v0000640000175000001440000000125112010532755017573 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type) (R:forall x:A, B x -> Prop), (forall x : A, exists! y : B x, R x y) -> (exists f : (forall x:A, B x), forall x:A, R x (f x)). (** Unique choice reifies functional relations into functions *) Theorem unique_choice : forall (A B:Type) (R:A -> B -> Prop), (forall x:A, exists! y : B, R x y) -> (exists f:A->B, forall x:A, R x (f x)). Proof. intros A B. apply (dependent_unique_choice A (fun _ => B)). Qed. (** The following proof comes from [[ChicliPottierSimpson02]] *) Require Import Setoid. Theorem classic_set_in_prop_context : forall C:Prop, ((forall P:Prop, {P} + {~ P}) -> C) -> C. Proof. intros C HnotEM. set (R := fun A b => A /\ true = b \/ ~ A /\ false = b). assert (H : exists f : Prop -> bool, (forall A:Prop, R A (f A))). apply unique_choice. intro A. destruct (classic A) as [Ha| Hnota]. exists true; split. left; split; [ assumption | reflexivity ]. intros y [[_ Hy]| [Hna _]]. assumption. contradiction. exists false; split. right; split; [ assumption | reflexivity ]. intros y [[Ha _]| [_ Hy]]. contradiction. assumption. destruct H as [f Hf]. apply HnotEM. intro P. assert (HfP := Hf P). (* Elimination from Hf to Set is not allowed but from f to Set yes ! *) destruct (f P). left. destruct HfP as [[Ha _]| [_ Hfalse]]. assumption. discriminate. right. destruct HfP as [[_ Hfalse]| [Hna _]]. discriminate. assumption. Qed. Corollary not_not_classic_set : ((forall P:Prop, {P} + {~ P}) -> False) -> False. Proof. apply classic_set_in_prop_context. Qed. (* Compatibility *) Notation classic_set := not_not_classic_set (only parsing). coq-8.4pl2/theories/Logic/ExtensionalityFacts.v0000640000175000001440000001062712010532755020703 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Equality of projections from diagonal 3. Functional extensionality <-> Unicity of inverse bijections 4. Functional extensionality <-> Bijectivity of bijective composition *) Set Implicit Arguments. (**********************************************************************) (** * Definitions *) (** Being an inverse *) Definition is_inverse A B f g := (forall a:A, g (f a) = a) /\ (forall b:B, f (g b) = b). (** The diagonal over A and the one-one correspondence with A *) Record Delta A := { pi1:A; pi2:A; eq:pi1=pi2 }. Definition delta {A} (a:A) := {|pi1 := a; pi2 := a; eq := eq_refl a |}. Arguments pi1 {A} _. Arguments pi2 {A} _. Lemma diagonal_projs_same_behavior : forall A (x:Delta A), pi1 x = pi2 x. Proof. destruct x as (a1,a2,Heq); assumption. Qed. Lemma diagonal_inverse1 : forall A, is_inverse (A:=A) delta pi1. Proof. split; [trivial|]; destruct b as (a1,a2,[]); reflexivity. Qed. Lemma diagonal_inverse2 : forall A, is_inverse (A:=A) delta pi2. Proof. split; [trivial|]; destruct b as (a1,a2,[]); reflexivity. Qed. (** Functional extensionality *) Local Notation FunctionalExtensionality := (forall A B (f g : A -> B), (forall x, f x = g x) -> f = g). (** Equality of projections from diagonal *) Local Notation EqDeltaProjs := (forall A, pi1 = pi2 :> (Delta A -> A)). (** Unicity of bijection inverse *) Local Notation UniqueInverse := (forall A B (f:A->B) g1 g2, is_inverse f g1 -> is_inverse f g2 -> g1 = g2). (** Bijectivity of bijective composition *) Definition action A B C (f:A->B) := (fun h:B->C => fun x => h (f x)). Local Notation BijectivityBijectiveComp := (forall A B C (f:A->B) g, is_inverse f g -> is_inverse (A:=B->C) (action f) (action g)). (**********************************************************************) (** * Functional extensionality <-> Equality of projections from diagonal *) Theorem FunctExt_iff_EqDeltaProjs : FunctionalExtensionality <-> EqDeltaProjs. Proof. split. - intros FunExt *; apply FunExt, diagonal_projs_same_behavior. - intros EqProjs **; change f with (fun x => pi1 {|pi1:=f x; pi2:=g x; eq:=H x|}). rewrite EqProjs; reflexivity. Qed. (**********************************************************************) (** * Functional extensionality <-> Unicity of bijection inverse *) Lemma FunctExt_UniqInverse : FunctionalExtensionality -> UniqueInverse. Proof. intros FunExt * (Hg1f,Hfg1) (Hg2f,Hfg2). apply FunExt. intros; congruence. Qed. Lemma UniqInverse_EqDeltaProjs : UniqueInverse -> EqDeltaProjs. Proof. intros UniqInv *. apply UniqInv with delta; [apply diagonal_inverse1 | apply diagonal_inverse2]. Qed. Theorem FunctExt_iff_UniqInverse : FunctionalExtensionality <-> UniqueInverse. Proof. split. - apply FunctExt_UniqInverse. - intro; apply FunctExt_iff_EqDeltaProjs, UniqInverse_EqDeltaProjs; trivial. Qed. (**********************************************************************) (** * Functional extensionality <-> Bijectivity of bijective composition *) Lemma FunctExt_BijComp : FunctionalExtensionality -> BijectivityBijectiveComp. Proof. intros FunExt * (Hgf,Hfg). split; unfold action. - intros h; apply FunExt; intro b; rewrite Hfg; reflexivity. - intros h; apply FunExt; intro a; rewrite Hgf; reflexivity. Qed. Lemma BijComp_FunctExt : BijectivityBijectiveComp -> FunctionalExtensionality. Proof. intros BijComp. apply FunctExt_iff_UniqInverse. intros * H1 H2. destruct BijComp with (C:=A) (1:=H2) as (Hg2f,_). destruct BijComp with (C:=A) (1:=H1) as (_,Hfg1). rewrite <- (Hg2f g1). change g1 with (action g1 (fun x => x)). rewrite -> (Hfg1 (fun x => x)). reflexivity. Qed. coq-8.4pl2/theories/Logic/ClassicalDescription.v0000640000175000001440000000575612010532755021014 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop), inhabited A -> { x : A | (exists! x : A, P x) -> P x }. Proof. intros A P i. destruct (excluded_middle_informative (exists! x, P x)) as [Hex|HnonP]. apply constructive_definite_description with (P:= fun x => (exists! x : A, P x) -> P x). destruct Hex as (x,(Hx,Huni)). exists x; split. intros _; exact Hx. firstorder. exists i; tauto. Qed. (** Church's iota operator *) Definition iota (A : Type) (i:inhabited A) (P : A->Prop) : A := proj1_sig (classical_definite_description P i). Definition iota_spec (A : Type) (i:inhabited A) (P : A->Prop) : (exists! x:A, P x) -> P (iota i P) := proj2_sig (classical_definite_description P i). (** Axiom of unique "choice" (functional reification of functional relations) *) Theorem dependent_unique_choice : forall (A:Type) (B:A -> Type) (R:forall x:A, B x -> Prop), (forall x:A, exists! y : B x, R x y) -> (exists f : (forall x:A, B x), forall x:A, R x (f x)). Proof. intros A B R H. assert (Hexuni:forall x, exists! y, R x y). intro x. apply H. exists (fun x => proj1_sig (constructive_definite_description (R x) (Hexuni x))). intro x. apply (proj2_sig (constructive_definite_description (R x) (Hexuni x))). Qed. Theorem unique_choice : forall (A B:Type) (R:A -> B -> Prop), (forall x:A, exists! y : B, R x y) -> (exists f : A -> B, forall x:A, R x (f x)). Proof. intros A B. apply dependent_unique_choice with (B:=fun _:A => B). Qed. (** Compatibility lemmas *) Unset Implicit Arguments. Definition dependent_description := dependent_unique_choice. Definition description := unique_choice. coq-8.4pl2/theories/Logic/FunctionalExtensionality.v0000640000175000001440000000351412010532755021742 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B}, f = g -> forall x, f x = g x. Proof. intros. rewrite H. auto. Qed. (** Statements of functional extensionality for simple and dependent functions. *) Axiom functional_extensionality_dep : forall {A} {B : A -> Type}, forall (f g : forall x : A, B x), (forall x, f x = g x) -> f = g. Lemma functional_extensionality {A B} (f g : A -> B) : (forall x, f x = g x) -> f = g. Proof. intros ; eauto using @functional_extensionality_dep. Qed. (** Apply [functional_extensionality], introducing variable x. *) Tactic Notation "extensionality" ident(x) := match goal with [ |- ?X = ?Y ] => (apply (@functional_extensionality _ _ X Y) || apply (@functional_extensionality_dep _ _ X Y)) ; intro x end. (** Eta expansion follows from extensionality. *) Lemma eta_expansion_dep {A} {B : A -> Type} (f : forall x : A, B x) : f = fun x => f x. Proof. intros. extensionality x. reflexivity. Qed. Lemma eta_expansion {A B} (f : A -> B) : f = fun x => f x. Proof. apply (eta_expansion_dep f). Qed. coq-8.4pl2/theories/Logic/ProofIrrelevance.v0000640000175000001440000000154512010532755020147 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B->Prop), (forall x : A, exists y : B, R x y) -> exists R' : A->B->Prop, subrelation R' R /\ forall x : A, exists! y : B, R' x y. coq-8.4pl2/theories/Logic/Berardi.v0000640000175000001440000001013112010532755016241 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* > *) Set Implicit Arguments. Section Berardis_paradox. (** Excluded middle *) Hypothesis EM : forall P:Prop, P \/ ~ P. (** Conditional on any proposition. *) Definition IFProp (P B:Prop) (e1 e2:P) := match EM B with | or_introl _ => e1 | or_intror _ => e2 end. (** Axiom of choice applied to disjunction. Provable in Coq because of dependent elimination. *) Lemma AC_IF : forall (P B:Prop) (e1 e2:P) (Q:P -> Prop), (B -> Q e1) -> (~ B -> Q e2) -> Q (IFProp B e1 e2). Proof. intros P B e1 e2 Q p1 p2. unfold IFProp. case (EM B); assumption. Qed. (** We assume a type with two elements. They play the role of booleans. The main theorem under the current assumptions is that [T=F] *) Variable Bool : Prop. Variable T : Bool. Variable F : Bool. (** The powerset operator *) Definition pow (P:Prop) := P -> Bool. (** A piece of theory about retracts *) Section Retracts. Variables A B : Prop. Record retract : Prop := {i : A -> B; j : B -> A; inv : forall a:A, j (i a) = a}. Record retract_cond : Prop := {i2 : A -> B; j2 : B -> A; inv2 : retract -> forall a:A, j2 (i2 a) = a}. (** The dependent elimination above implies the axiom of choice: *) Lemma AC : forall r:retract_cond, retract -> forall a:A, j2 r (i2 r a) = a. Proof. intros r. case r; simpl. trivial. Qed. End Retracts. (** This lemma is basically a commutation of implication and existential quantification: (EX x | A -> P(x)) <=> (A -> EX x | P(x)) which is provable in classical logic ( => is already provable in intuitionnistic logic). *) Lemma L1 : forall A B:Prop, retract_cond (pow A) (pow B). Proof. intros A B. destruct (EM (retract (pow A) (pow B))) as [(f0,g0,e) | hf]. exists f0 g0; trivial. exists (fun (x:pow A) (y:B) => F) (fun (x:pow B) (y:A) => F); intros; destruct hf; auto. Qed. (** The paradoxical set *) Definition U := forall P:Prop, pow P. (** Bijection between [U] and [(pow U)] *) Definition f (u:U) : pow U := u U. Definition g (h:pow U) : U := fun X => let lX := j2 (L1 X U) in let rU := i2 (L1 U U) in lX (rU h). (** We deduce that the powerset of [U] is a retract of [U]. This lemma is stated in Berardi's article, but is not used afterwards. *) Lemma retract_pow_U_U : retract (pow U) U. Proof. exists g f. intro a. unfold f, g; simpl. apply AC. exists (fun x:pow U => x) (fun x:pow U => x). trivial. Qed. (** Encoding of Russel's paradox *) (** The boolean negation. *) Definition Not_b (b:Bool) := IFProp (b = T) F T. (** the set of elements not belonging to itself *) Definition R : U := g (fun u:U => Not_b (u U u)). Lemma not_has_fixpoint : R R = Not_b (R R). Proof. unfold R at 1. unfold g. rewrite AC with (r := L1 U U) (a := fun u:U => Not_b (u U u)). trivial. exists (fun x:pow U => x) (fun x:pow U => x); trivial. Qed. Theorem classical_proof_irrelevence : T = F. Proof. generalize not_has_fixpoint. unfold Not_b. apply AC_IF. intros is_true is_false. elim is_true; elim is_false; trivial. intros not_true is_true. elim not_true; trivial. Qed. End Berardis_paradox. coq-8.4pl2/theories/Logic/Classical_Pred_Type.v0000640000175000001440000000365312010532755020555 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop, ~ (forall n:U, ~ P n) -> exists n : U, P n. Proof. intros P notall. apply NNPP. intro abs. apply notall. intros n H. apply abs; exists n; exact H. Qed. Lemma not_all_ex_not : forall P:U -> Prop, ~ (forall n:U, P n) -> exists n : U, ~ P n. Proof. intros P notall. apply not_all_not_ex with (P:=fun x => ~ P x). intro all; apply notall. intro n; apply NNPP. apply all. Qed. Lemma not_ex_all_not : forall P:U -> Prop, ~ (exists n : U, P n) -> forall n:U, ~ P n. Proof. (* Intuitionistic *) unfold not; intros P notex n abs. apply notex. exists n; trivial. Qed. Lemma not_ex_not_all : forall P:U -> Prop, ~ (exists n : U, ~ P n) -> forall n:U, P n. Proof. intros P H n. apply NNPP. red; intro K; apply H; exists n; trivial. Qed. Lemma ex_not_not_all : forall P:U -> Prop, (exists n : U, ~ P n) -> ~ (forall n:U, P n). Proof. (* Intuitionistic *) unfold not; intros P exnot allP. elim exnot; auto. Qed. Lemma all_not_not_ex : forall P:U -> Prop, (forall n:U, ~ P n) -> ~ (exists n : U, P n). Proof. (* Intuitionistic *) unfold not; intros P allnot exP; elim exP; intros n p. apply allnot with n; auto. Qed. End Generic. coq-8.4pl2/theories/Logic/ClassicalChoice.v0000640000175000001440000000401112010532755017702 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false] in [Set]. *) Require Export ClassicalUniqueChoice. Require Export RelationalChoice. Require Import ChoiceFacts. Set Implicit Arguments. Definition subset (U:Type) (P Q:U->Prop) : Prop := forall x, P x -> Q x. Theorem singleton_choice : forall (A : Type) (P : A->Prop), (exists x : A, P x) -> exists P' : A->Prop, subset P' P /\ exists! x, P' x. Proof. intros A P H. destruct (relational_choice unit A (fun _ => P) (fun _ => H)) as (R',(Hsub,HR')). exists (R' tt); firstorder. Qed. Theorem choice : forall (A B : Type) (R : A->B->Prop), (forall x : A, exists y : B, R x y) -> exists f : A->B, (forall x : A, R x (f x)). Proof. intros A B. apply description_rel_choice_imp_funct_choice. exact (unique_choice A B). exact (relational_choice A B). Qed. coq-8.4pl2/theories/Logic/Epsilon.v0000640000175000001440000000440712010532755016313 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop), inhabited A -> { x : A | (exists x, P x) -> P x }. Lemma constructive_indefinite_description : forall (A : Type) (P : A->Prop), (exists x, P x) -> { x : A | P x }. Proof. apply epsilon_imp_constructive_indefinite_description. exact epsilon_statement. Qed. Lemma small_drinkers'_paradox : forall (A:Type) (P:A -> Prop), inhabited A -> exists x, (exists x, P x) -> P x. Proof. apply epsilon_imp_small_drinker. exact epsilon_statement. Qed. Theorem iota_statement : forall (A : Type) (P : A->Prop), inhabited A -> { x : A | (exists! x : A, P x) -> P x }. Proof. intros; destruct epsilon_statement with (P:=P); firstorder. Qed. Lemma constructive_definite_description : forall (A : Type) (P : A->Prop), (exists! x, P x) -> { x : A | P x }. Proof. apply iota_imp_constructive_definite_description. exact iota_statement. Qed. (** Hilbert's epsilon operator and its specification *) Definition epsilon (A : Type) (i:inhabited A) (P : A->Prop) : A := proj1_sig (epsilon_statement P i). Definition epsilon_spec (A : Type) (i:inhabited A) (P : A->Prop) : (exists x, P x) -> P (epsilon i P) := proj2_sig (epsilon_statement P i). (** Church's iota operator and its specification *) Definition iota (A : Type) (i:inhabited A) (P : A->Prop) : A := proj1_sig (iota_statement P i). Definition iota_spec (A : Type) (i:inhabited A) (P : A->Prop) : (exists! x:A, P x) -> P (iota i P) := proj2_sig (iota_statement P i). coq-8.4pl2/theories/Logic/Eqdep_dec.v0000640000175000001440000002134212010532755016550 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* | in Lego adapted to Coq by B. Barras Credit: Proofs up to [K_dec] follow an outline by Michael Hedberg Table of contents: 1. Streicher's K and injectivity of dependent pair hold on decidable types 1.1. Definition of the functor that builds properties of dependent equalities from a proof of decidability of equality for a set in Type 1.2. Definition of the functor that builds properties of dependent equalities from a proof of decidability of equality for a set in Set *) (************************************************************************) (** * Streicher's K and injectivity of dependent pair hold on decidable types *) Set Implicit Arguments. Section EqdepDec. Variable A : Type. Let comp (x y y':A) (eq1:x = y) (eq2:x = y') : y = y' := eq_ind _ (fun a => a = y') eq2 _ eq1. Remark trans_sym_eq : forall (x y:A) (u:x = y), comp u u = eq_refl y. Proof. intros. case u; trivial. Qed. Variable eq_dec : forall x y:A, x = y \/ x <> y. Variable x : A. Let nu (y:A) (u:x = y) : x = y := match eq_dec x y with | or_introl eqxy => eqxy | or_intror neqxy => False_ind _ (neqxy u) end. Let nu_constant : forall (y:A) (u v:x = y), nu u = nu v. intros. unfold nu. case (eq_dec x y); intros. reflexivity. case n; trivial. Qed. Let nu_inv (y:A) (v:x = y) : x = y := comp (nu (eq_refl x)) v. Remark nu_left_inv : forall (y:A) (u:x = y), nu_inv (nu u) = u. Proof. intros. case u; unfold nu_inv. apply trans_sym_eq. Qed. Theorem eq_proofs_unicity : forall (y:A) (p1 p2:x = y), p1 = p2. Proof. intros. elim nu_left_inv with (u := p1). elim nu_left_inv with (u := p2). elim nu_constant with y p1 p2. reflexivity. Qed. Theorem K_dec : forall P:x = x -> Prop, P (eq_refl x) -> forall p:x = x, P p. Proof. intros. elim eq_proofs_unicity with x (eq_refl x) p. trivial. Qed. (** The corollary *) Let proj (P:A -> Prop) (exP:ex P) (def:P x) : P x := match exP with | ex_intro x' prf => match eq_dec x' x with | or_introl eqprf => eq_ind x' P prf x eqprf | _ => def end end. Theorem inj_right_pair : forall (P:A -> Prop) (y y':P x), ex_intro P x y = ex_intro P x y' -> y = y'. Proof. intros. cut (proj (ex_intro P x y) y = proj (ex_intro P x y') y). simpl. case (eq_dec x x). intro e. elim e using K_dec; trivial. intros. case n; trivial. case H. reflexivity. Qed. End EqdepDec. Require Import EqdepFacts. (** We deduce axiom [K] for (decidable) types *) Theorem K_dec_type : forall A:Type, (forall x y:A, {x = y} + {x <> y}) -> forall (x:A) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof. intros A eq_dec x P H p. elim p using K_dec; intros. case (eq_dec x0 y); [left|right]; assumption. trivial. Qed. Theorem K_dec_set : forall A:Set, (forall x y:A, {x = y} + {x <> y}) -> forall (x:A) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof fun A => K_dec_type (A:=A). (** We deduce the [eq_rect_eq] axiom for (decidable) types *) Theorem eq_rect_eq_dec : forall A:Type, (forall x y:A, {x = y} + {x <> y}) -> forall (p:A) (Q:A -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof. intros A eq_dec. apply (Streicher_K__eq_rect_eq A (K_dec_type eq_dec)). Qed. (** We deduce the injectivity of dependent equality for decidable types *) Theorem eq_dep_eq_dec : forall A:Type, (forall x y:A, {x = y} + {x <> y}) -> forall (P:A->Type) (p:A) (x y:P p), eq_dep A P p x p y -> x = y. Proof (fun A eq_dec => eq_rect_eq__eq_dep_eq A (eq_rect_eq_dec eq_dec)). Theorem UIP_dec : forall (A:Type), (forall x y:A, {x = y} + {x <> y}) -> forall (x y:A) (p1 p2:x = y), p1 = p2. Proof (fun A eq_dec => eq_dep_eq__UIP A (eq_dep_eq_dec eq_dec)). Unset Implicit Arguments. (************************************************************************) (** ** Definition of the functor that builds properties of dependent equalities on decidable sets in Type *) (** The signature of decidable sets in [Type] *) Module Type DecidableType. Parameter U:Type. Axiom eq_dec : forall x y:U, {x = y} + {x <> y}. End DecidableType. (** The module [DecidableEqDep] collects equality properties for decidable set in [Type] *) Module DecidableEqDep (M:DecidableType). Import M. (** Invariance by Substitution of Reflexive Equality Proofs *) Lemma eq_rect_eq : forall (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof eq_rect_eq_dec eq_dec. (** Injectivity of Dependent Equality *) Theorem eq_dep_eq : forall (P:U->Type) (p:U) (x y:P p), eq_dep U P p x p y -> x = y. Proof (eq_rect_eq__eq_dep_eq U eq_rect_eq). (** Uniqueness of Identity Proofs (UIP) *) Lemma UIP : forall (x y:U) (p1 p2:x = y), p1 = p2. Proof (eq_dep_eq__UIP U eq_dep_eq). (** Uniqueness of Reflexive Identity Proofs *) Lemma UIP_refl : forall (x:U) (p:x = x), p = eq_refl x. Proof (UIP__UIP_refl U UIP). (** Streicher's axiom K *) Lemma Streicher_K : forall (x:U) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof (K_dec_type eq_dec). (** Injectivity of equality on dependent pairs in [Type] *) Lemma inj_pairT2 : forall (P:U -> Type) (p:U) (x y:P p), existT P p x = existT P p y -> x = y. Proof eq_dep_eq__inj_pairT2 U eq_dep_eq. (** Proof-irrelevance on subsets of decidable sets *) Lemma inj_pairP2 : forall (P:U -> Prop) (x:U) (p q:P x), ex_intro P x p = ex_intro P x q -> p = q. Proof. intros. apply inj_right_pair with (A:=U). intros x0 y0; case (eq_dec x0 y0); [left|right]; assumption. assumption. Qed. End DecidableEqDep. (************************************************************************) (** ** Definition of the functor that builds properties of dependent equalities on decidable sets in Set *) (** The signature of decidable sets in [Set] *) Module Type DecidableSet. Parameter U:Type. Axiom eq_dec : forall x y:U, {x = y} + {x <> y}. End DecidableSet. (** The module [DecidableEqDepSet] collects equality properties for decidable set in [Set] *) Module DecidableEqDepSet (M:DecidableSet). Import M. Module N:=DecidableEqDep(M). (** Invariance by Substitution of Reflexive Equality Proofs *) Lemma eq_rect_eq : forall (p:U) (Q:U -> Type) (x:Q p) (h:p = p), x = eq_rect p Q x p h. Proof eq_rect_eq_dec eq_dec. (** Injectivity of Dependent Equality *) Theorem eq_dep_eq : forall (P:U->Type) (p:U) (x y:P p), eq_dep U P p x p y -> x = y. Proof N.eq_dep_eq. (** Uniqueness of Identity Proofs (UIP) *) Lemma UIP : forall (x y:U) (p1 p2:x = y), p1 = p2. Proof N.UIP. (** Uniqueness of Reflexive Identity Proofs *) Lemma UIP_refl : forall (x:U) (p:x = x), p = eq_refl x. Proof N.UIP_refl. (** Streicher's axiom K *) Lemma Streicher_K : forall (x:U) (P:x = x -> Prop), P (eq_refl x) -> forall p:x = x, P p. Proof N.Streicher_K. (** Proof-irrelevance on subsets of decidable sets *) Lemma inj_pairP2 : forall (P:U -> Prop) (x:U) (p q:P x), ex_intro P x p = ex_intro P x q -> p = q. Proof N.inj_pairP2. (** Injectivity of equality on dependent pairs in [Type] *) Lemma inj_pair2 : forall (P:U -> Type) (p:U) (x y:P p), existT P p x = existT P p y -> x = y. Proof eq_dep_eq__inj_pair2 U N.eq_dep_eq. (** Injectivity of equality on dependent pairs with second component in [Type] *) Notation inj_pairT2 := inj_pair2. End DecidableEqDepSet. (** From decidability to inj_pair2 **) Lemma inj_pair2_eq_dec : forall A:Type, (forall x y:A, {x=y}+{x<>y}) -> ( forall (P:A -> Type) (p:A) (x y:P p), existT P p x = existT P p y -> x = y ). Proof. intros A eq_dec. apply eq_dep_eq__inj_pair2. apply eq_rect_eq__eq_dep_eq. unfold Eq_rect_eq. apply eq_rect_eq_dec. apply eq_dec. Qed. coq-8.4pl2/theories/Lists/0000750000175000001440000000000012127276545014561 5ustar notinuserscoq-8.4pl2/theories/Lists/List.v0000640000175000001440000012772012010532755015662 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* default | x :: _ => x end. Definition hd_error (l:list A) := match l with | nil => error | x :: _ => value x end. Definition tl (l:list A) := match l with | nil => nil | a :: m => m end. (** The [In] predicate *) Fixpoint In (a:A) (l:list A) : Prop := match l with | nil => False | b :: m => b = a \/ In a m end. End Lists. (** Standard notations for lists. In a special module to avoid conflict. *) Module ListNotations. Notation " [ ] " := nil : list_scope. Notation " [ x ] " := (cons x nil) : list_scope. Notation " [ x ; .. ; y ] " := (cons x .. (cons y nil) ..) : list_scope. End ListNotations. Import ListNotations. (** ** Facts about lists *) Section Facts. Variable A : Type. (** *** Genereric facts *) (** Discrimination *) Theorem nil_cons : forall (x:A) (l:list A), [] <> x :: l. Proof. intros; discriminate. Qed. (** Destruction *) Theorem destruct_list : forall l : list A, {x:A & {tl:list A | l = x::tl}}+{l = []}. Proof. induction l as [|a tail]. right; reflexivity. left; exists a, tail; reflexivity. Qed. (** *** Head and tail *) Theorem hd_error_nil : hd_error (@nil A) = None. Proof. simpl; reflexivity. Qed. Theorem hd_error_cons : forall (l : list A) (x : A), hd_error (x::l) = Some x. Proof. intros; simpl; reflexivity. Qed. (************************) (** *** Facts about [In] *) (************************) (** Characterization of [In] *) Theorem in_eq : forall (a:A) (l:list A), In a (a :: l). Proof. simpl; auto. Qed. Theorem in_cons : forall (a b:A) (l:list A), In b l -> In b (a :: l). Proof. simpl; auto. Qed. Theorem in_nil : forall a:A, ~ In a []. Proof. unfold not; intros a H; inversion_clear H. Qed. Theorem in_split : forall x (l:list A), In x l -> exists l1 l2, l = l1++x::l2. Proof. induction l; simpl; destruct 1. subst a; auto. exists [], l; auto. destruct (IHl H) as (l1,(l2,H0)). exists (a::l1), l2; simpl; f_equal; auto. Qed. (** Inversion *) Lemma in_inv : forall (a b:A) (l:list A), In b (a :: l) -> a = b \/ In b l. Proof. intros a b l H; inversion_clear H; auto. Qed. (** Decidability of [In] *) Theorem in_dec : (forall x y:A, {x = y} + {x <> y}) -> forall (a:A) (l:list A), {In a l} + {~ In a l}. Proof. intro H; induction l as [| a0 l IHl]. right; apply in_nil. destruct (H a0 a); simpl; auto. destruct IHl; simpl; auto. right; unfold not; intros [Hc1| Hc2]; auto. Defined. (**************************) (** *** Facts about [app] *) (**************************) (** Discrimination *) Theorem app_cons_not_nil : forall (x y:list A) (a:A), [] <> x ++ a :: y. Proof. unfold not. destruct x as [| a l]; simpl; intros. discriminate H. discriminate H. Qed. (** Concat with [nil] *) Theorem app_nil_l : forall l:list A, [] ++ l = l. Proof. reflexivity. Qed. Theorem app_nil_r : forall l:list A, l ++ [] = l. Proof. induction l; simpl; f_equal; auto. Qed. (* begin hide *) (* Deprecated *) Theorem app_nil_end : forall (l:list A), l = l ++ []. Proof. symmetry; apply app_nil_r. Qed. (* end hide *) (** [app] is associative *) Theorem app_assoc : forall l m n:list A, l ++ m ++ n = (l ++ m) ++ n. Proof. intros l m n; induction l; simpl; f_equal; auto. Qed. (* begin hide *) (* Deprecated *) Theorem app_assoc_reverse : forall l m n:list A, (l ++ m) ++ n = l ++ m ++ n. Proof. auto using app_assoc. Qed. Hint Resolve app_assoc_reverse. (* end hide *) (** [app] commutes with [cons] *) Theorem app_comm_cons : forall (x y:list A) (a:A), a :: (x ++ y) = (a :: x) ++ y. Proof. auto. Qed. (** Facts deduced from the result of a concatenation *) Theorem app_eq_nil : forall l l':list A, l ++ l' = [] -> l = [] /\ l' = []. Proof. destruct l as [| x l]; destruct l' as [| y l']; simpl; auto. intro; discriminate. intros H; discriminate H. Qed. Theorem app_eq_unit : forall (x y:list A) (a:A), x ++ y = [a] -> x = [] /\ y = [a] \/ x = [a] /\ y = []. Proof. destruct x as [| a l]; [ destruct y as [| a l] | destruct y as [| a0 l0] ]; simpl. intros a H; discriminate H. left; split; auto. right; split; auto. generalize H. generalize (app_nil_r l); intros E. rewrite -> E; auto. intros. injection H. intro. cut ([] = l ++ a0 :: l0); auto. intro. generalize (app_cons_not_nil _ _ _ H1); intro. elim H2. Qed. Lemma app_inj_tail : forall (x y:list A) (a b:A), x ++ [a] = y ++ [b] -> x = y /\ a = b. Proof. induction x as [| x l IHl]; [ destruct y as [| a l] | destruct y as [| a l0] ]; simpl; auto. intros a b H. injection H. auto. intros a0 b H. injection H; intros. generalize (app_cons_not_nil _ _ _ H0); destruct 1. intros a b H. injection H; intros. cut ([] = l ++ [a]); auto. intro. generalize (app_cons_not_nil _ _ _ H2); destruct 1. intros a0 b H. injection H; intros. destruct (IHl l0 a0 b H0). split; auto. rewrite <- H1; rewrite <- H2; reflexivity. Qed. (** Compatibility with other operations *) Lemma app_length : forall l l' : list A, length (l++l') = length l + length l'. Proof. induction l; simpl; auto. Qed. Lemma in_app_or : forall (l m:list A) (a:A), In a (l ++ m) -> In a l \/ In a m. Proof. intros l m a. elim l; simpl; auto. intros a0 y H H0. now_show ((a0 = a \/ In a y) \/ In a m). elim H0; auto. intro H1. now_show ((a0 = a \/ In a y) \/ In a m). elim (H H1); auto. Qed. Lemma in_or_app : forall (l m:list A) (a:A), In a l \/ In a m -> In a (l ++ m). Proof. intros l m a. elim l; simpl; intro H. now_show (In a m). elim H; auto; intro H0. now_show (In a m). elim H0. (* subProof completed *) intros y H0 H1. now_show (H = a \/ In a (y ++ m)). elim H1; auto 4. intro H2. now_show (H = a \/ In a (y ++ m)). elim H2; auto. Qed. Lemma in_app_iff : forall l l' (a:A), In a (l++l') <-> In a l \/ In a l'. Proof. split; auto using in_app_or, in_or_app. Qed. Lemma app_inv_head: forall l l1 l2 : list A, l ++ l1 = l ++ l2 -> l1 = l2. Proof. induction l; simpl; auto; injection 1; auto. Qed. Lemma app_inv_tail: forall l l1 l2 : list A, l1 ++ l = l2 ++ l -> l1 = l2. Proof. intros l l1 l2; revert l1 l2 l. induction l1 as [ | x1 l1]; destruct l2 as [ | x2 l2]; simpl; auto; intros l H. absurd (length (x2 :: l2 ++ l) <= length l). simpl; rewrite app_length; auto with arith. rewrite <- H; auto with arith. absurd (length (x1 :: l1 ++ l) <= length l). simpl; rewrite app_length; auto with arith. rewrite H; auto with arith. injection H; clear H; intros; f_equal; eauto. Qed. End Facts. Hint Resolve app_assoc app_assoc_reverse: datatypes v62. Hint Resolve app_comm_cons app_cons_not_nil: datatypes v62. Hint Immediate app_eq_nil: datatypes v62. Hint Resolve app_eq_unit app_inj_tail: datatypes v62. Hint Resolve in_eq in_cons in_inv in_nil in_app_or in_or_app: datatypes v62. (*******************************************) (** * Operations on the elements of a list *) (*******************************************) Section Elts. Variable A : Type. (*****************************) (** ** Nth element of a list *) (*****************************) Fixpoint nth (n:nat) (l:list A) (default:A) {struct l} : A := match n, l with | O, x :: l' => x | O, other => default | S m, [] => default | S m, x :: t => nth m t default end. Fixpoint nth_ok (n:nat) (l:list A) (default:A) {struct l} : bool := match n, l with | O, x :: l' => true | O, other => false | S m, [] => false | S m, x :: t => nth_ok m t default end. Lemma nth_in_or_default : forall (n:nat) (l:list A) (d:A), {In (nth n l d) l} + {nth n l d = d}. (* Realizer nth_ok. Program_all. *) Proof. intros n l d; generalize n; induction l; intro n0. right; case n0; trivial. case n0; simpl. auto. intro n1; elim (IHl n1); auto. Qed. Lemma nth_S_cons : forall (n:nat) (l:list A) (d a:A), In (nth n l d) l -> In (nth (S n) (a :: l) d) (a :: l). Proof. simpl; auto. Qed. Fixpoint nth_error (l:list A) (n:nat) {struct n} : Exc A := match n, l with | O, x :: _ => value x | S n, _ :: l => nth_error l n | _, _ => error end. Definition nth_default (default:A) (l:list A) (n:nat) : A := match nth_error l n with | Some x => x | None => default end. Lemma nth_default_eq : forall n l (d:A), nth_default d l n = nth n l d. Proof. unfold nth_default; induction n; intros [ | ] ?; simpl; auto. Qed. Lemma nth_In : forall (n:nat) (l:list A) (d:A), n < length l -> In (nth n l d) l. Proof. unfold lt; induction n as [| n hn]; simpl. destruct l; simpl; [ inversion 2 | auto ]. destruct l as [| a l hl]; simpl. inversion 2. intros d ie; right; apply hn; auto with arith. Qed. Lemma nth_overflow : forall l n d, length l <= n -> nth n l d = d. Proof. induction l; destruct n; simpl; intros; auto. inversion H. apply IHl; auto with arith. Qed. Lemma nth_indep : forall l n d d', n < length l -> nth n l d = nth n l d'. Proof. induction l; simpl; intros; auto. inversion H. destruct n; simpl; auto with arith. Qed. Lemma app_nth1 : forall l l' d n, n < length l -> nth n (l++l') d = nth n l d. Proof. induction l. intros. inversion H. intros l' d n. case n; simpl; auto. intros; rewrite IHl; auto with arith. Qed. Lemma app_nth2 : forall l l' d n, n >= length l -> nth n (l++l') d = nth (n-length l) l' d. Proof. induction l. intros. simpl. destruct n; auto. intros l' d n. case n; simpl; auto. intros. inversion H. intros. rewrite IHl; auto with arith. Qed. (*****************) (** ** Remove *) (*****************) Hypothesis eq_dec : forall x y : A, {x = y}+{x <> y}. Fixpoint remove (x : A) (l : list A) : list A := match l with | [] => [] | y::tl => if (eq_dec x y) then remove x tl else y::(remove x tl) end. Theorem remove_In : forall (l : list A) (x : A), ~ In x (remove x l). Proof. induction l as [|x l]; auto. intro y; simpl; destruct (eq_dec y x) as [yeqx | yneqx]. apply IHl. unfold not; intro HF; simpl in HF; destruct HF; auto. apply (IHl y); assumption. Qed. (******************************) (** ** Last element of a list *) (******************************) (** [last l d] returns the last element of the list [l], or the default value [d] if [l] is empty. *) Fixpoint last (l:list A) (d:A) : A := match l with | [] => d | [a] => a | a :: l => last l d end. (** [removelast l] remove the last element of [l] *) Fixpoint removelast (l:list A) : list A := match l with | [] => [] | [a] => [] | a :: l => a :: removelast l end. Lemma app_removelast_last : forall l d, l <> [] -> l = removelast l ++ [last l d]. Proof. induction l. destruct 1; auto. intros d _. destruct l; auto. pattern (a0::l) at 1; rewrite IHl with d; auto; discriminate. Qed. Lemma exists_last : forall l, l <> [] -> { l' : (list A) & { a : A | l = l' ++ [a]}}. Proof. induction l. destruct 1; auto. intros _. destruct l. exists [], a; auto. destruct IHl as [l' (a',H)]; try discriminate. rewrite H. exists (a::l'), a'; auto. Qed. Lemma removelast_app : forall l l', l' <> [] -> removelast (l++l') = l ++ removelast l'. Proof. induction l. simpl; auto. simpl; intros. assert (l++l' <> []). destruct l. simpl; auto. simpl; discriminate. specialize (IHl l' H). destruct (l++l'); [elim H0; auto|f_equal; auto]. Qed. (****************************************) (** ** Counting occurences of a element *) (****************************************) Fixpoint count_occ (l : list A) (x : A) : nat := match l with | [] => 0 | y :: tl => let n := count_occ tl x in if eq_dec y x then S n else n end. (** Compatibility of count_occ with operations on list *) Theorem count_occ_In (l : list A) (x : A) : In x l <-> count_occ l x > 0. Proof. induction l as [|y l]; simpl. - split; [destruct 1 | apply gt_irrefl]. - destruct eq_dec as [->|Hneq]; rewrite IHl; intuition. Qed. Theorem count_occ_inv_nil (l : list A) : (forall x:A, count_occ l x = 0) <-> l = []. Proof. split. - induction l as [|x l]; trivial. intros H. specialize (H x). simpl in H. destruct eq_dec as [_|NEQ]; [discriminate|now elim NEQ]. - now intros ->. Qed. Lemma count_occ_nil : forall (x : A), count_occ [] x = 0. Proof. intro x; simpl; reflexivity. Qed. Lemma count_occ_cons_eq : forall (l : list A) (x y : A), x = y -> count_occ (x::l) y = S (count_occ l y). Proof. intros l x y H; simpl. destruct (eq_dec x y); [reflexivity | contradiction]. Qed. Lemma count_occ_cons_neq : forall (l : list A) (x y : A), x <> y -> count_occ (x::l) y = count_occ l y. Proof. intros l x y H; simpl. destruct (eq_dec x y); [contradiction | reflexivity]. Qed. End Elts. (*******************************) (** * Manipulating whole lists *) (*******************************) Section ListOps. Variable A : Type. (*************************) (** ** Reverse *) (*************************) Fixpoint rev (l:list A) : list A := match l with | [] => [] | x :: l' => rev l' ++ [x] end. Lemma rev_app_distr : forall x y:list A, rev (x ++ y) = rev y ++ rev x. Proof. induction x as [| a l IHl]. destruct y as [| a l]. simpl. auto. simpl. rewrite app_nil_r; auto. intro y. simpl. rewrite (IHl y). rewrite app_assoc; trivial. Qed. Remark rev_unit : forall (l:list A) (a:A), rev (l ++ [a]) = a :: rev l. Proof. intros. apply (rev_app_distr l [a]); simpl; auto. Qed. Lemma rev_involutive : forall l:list A, rev (rev l) = l. Proof. induction l as [| a l IHl]. simpl; auto. simpl. rewrite (rev_unit (rev l) a). rewrite IHl; auto. Qed. (** Compatibility with other operations *) Lemma in_rev : forall l x, In x l <-> In x (rev l). Proof. induction l. simpl; intuition. intros. simpl. intuition. subst. apply in_or_app; right; simpl; auto. apply in_or_app; left; firstorder. destruct (in_app_or _ _ _ H); firstorder. Qed. Lemma rev_length : forall l, length (rev l) = length l. Proof. induction l;simpl; auto. rewrite app_length. rewrite IHl. simpl. elim (length l); simpl; auto. Qed. Lemma rev_nth : forall l d n, n < length l -> nth n (rev l) d = nth (length l - S n) l d. Proof. induction l. intros; inversion H. intros. simpl in H. simpl (rev (a :: l)). simpl (length (a :: l) - S n). inversion H. rewrite <- minus_n_n; simpl. rewrite <- rev_length. rewrite app_nth2; auto. rewrite <- minus_n_n; auto. rewrite app_nth1; auto. rewrite (minus_plus_simpl_l_reverse (length l) n 1). replace (1 + length l) with (S (length l)); auto with arith. rewrite <- minus_Sn_m; auto with arith. apply IHl ; auto with arith. rewrite rev_length; auto. Qed. (** An alternative tail-recursive definition for reverse *) Fixpoint rev_append (l l': list A) : list A := match l with | [] => l' | a::l => rev_append l (a::l') end. Definition rev' l : list A := rev_append l []. Lemma rev_append_rev : forall l l', rev_append l l' = rev l ++ l'. Proof. induction l; simpl; auto; intros. rewrite <- app_assoc; firstorder. Qed. Lemma rev_alt : forall l, rev l = rev_append l []. Proof. intros; rewrite rev_append_rev. rewrite app_nil_r; trivial. Qed. (*********************************************) (** Reverse Induction Principle on Lists *) (*********************************************) Section Reverse_Induction. Lemma rev_list_ind : forall P:list A-> Prop, P [] -> (forall (a:A) (l:list A), P (rev l) -> P (rev (a :: l))) -> forall l:list A, P (rev l). Proof. induction l; auto. Qed. Theorem rev_ind : forall P:list A -> Prop, P [] -> (forall (x:A) (l:list A), P l -> P (l ++ [x])) -> forall l:list A, P l. Proof. intros. generalize (rev_involutive l). intros E; rewrite <- E. apply (rev_list_ind P). auto. simpl. intros. apply (H0 a (rev l0)). auto. Qed. End Reverse_Induction. (***********************************) (** ** Decidable equality on lists *) (***********************************) Hypothesis eq_dec : forall (x y : A), {x = y}+{x <> y}. Lemma list_eq_dec : forall l l':list A, {l = l'} + {l <> l'}. Proof. decide equality. Defined. End ListOps. (***************************************************) (** * Applying functions to the elements of a list *) (***************************************************) (************) (** ** Map *) (************) Section Map. Variables A B : Type. Variable f : A -> B. Fixpoint map (l:list A) : list B := match l with | nil => nil | cons a t => cons (f a) (map t) end. Lemma in_map : forall (l:list A) (x:A), In x l -> In (f x) (map l). Proof. induction l; firstorder (subst; auto). Qed. Lemma in_map_iff : forall l y, In y (map l) <-> exists x, f x = y /\ In x l. Proof. induction l; firstorder (subst; auto). Qed. Lemma map_length : forall l, length (map l) = length l. Proof. induction l; simpl; auto. Qed. Lemma map_nth : forall l d n, nth n (map l) (f d) = f (nth n l d). Proof. induction l; simpl map; destruct n; firstorder. Qed. Lemma map_nth_error : forall n l d, nth_error l n = Some d -> nth_error (map l) n = Some (f d). Proof. induction n; intros [ | ] ? Heq; simpl in *; inversion Heq; auto. Qed. Lemma map_app : forall l l', map (l++l') = (map l)++(map l'). Proof. induction l; simpl; auto. intros; rewrite IHl; auto. Qed. Lemma map_rev : forall l, map (rev l) = rev (map l). Proof. induction l; simpl; auto. rewrite map_app. rewrite IHl; auto. Qed. Lemma map_eq_nil : forall l, map l = [] -> l = []. Proof. destruct l; simpl; reflexivity || discriminate. Qed. (** [flat_map] *) Definition flat_map (f:A -> list B) := fix flat_map (l:list A) : list B := match l with | nil => nil | cons x t => (f x)++(flat_map t) end. Lemma in_flat_map : forall (f:A->list B)(l:list A)(y:B), In y (flat_map f l) <-> exists x, In x l /\ In y (f x). Proof. induction l; simpl; split; intros. contradiction. destruct H as (x,(H,_)); contradiction. destruct (in_app_or _ _ _ H). exists a; auto. destruct (IHl y) as (H1,_); destruct (H1 H0) as (x,(H2,H3)). exists x; auto. apply in_or_app. destruct H as (x,(H0,H1)); destruct H0. subst; auto. right; destruct (IHl y) as (_,H2); apply H2. exists x; auto. Qed. End Map. Lemma map_id : forall (A :Type) (l : list A), map (fun x => x) l = l. Proof. induction l; simpl; auto; rewrite IHl; auto. Qed. Lemma map_map : forall (A B C:Type)(f:A->B)(g:B->C) l, map g (map f l) = map (fun x => g (f x)) l. Proof. induction l; simpl; auto. rewrite IHl; auto. Qed. Lemma map_ext : forall (A B : Type)(f g:A->B), (forall a, f a = g a) -> forall l, map f l = map g l. Proof. induction l; simpl; auto. rewrite H; rewrite IHl; auto. Qed. (************************************) (** Left-to-right iterator on lists *) (************************************) Section Fold_Left_Recursor. Variables A B : Type. Variable f : A -> B -> A. Fixpoint fold_left (l:list B) (a0:A) : A := match l with | nil => a0 | cons b t => fold_left t (f a0 b) end. Lemma fold_left_app : forall (l l':list B)(i:A), fold_left (l++l') i = fold_left l' (fold_left l i). Proof. induction l. simpl; auto. intros. simpl. auto. Qed. End Fold_Left_Recursor. Lemma fold_left_length : forall (A:Type)(l:list A), fold_left (fun x _ => S x) l 0 = length l. Proof. intro A. cut (forall (l:list A) n, fold_left (fun x _ => S x) l n = n + length l). intros. exact (H l 0). induction l; simpl; auto. intros; rewrite IHl. simpl; auto with arith. Qed. (************************************) (** Right-to-left iterator on lists *) (************************************) Section Fold_Right_Recursor. Variables A B : Type. Variable f : B -> A -> A. Variable a0 : A. Fixpoint fold_right (l:list B) : A := match l with | nil => a0 | cons b t => f b (fold_right t) end. End Fold_Right_Recursor. Lemma fold_right_app : forall (A B:Type)(f:A->B->B) l l' i, fold_right f i (l++l') = fold_right f (fold_right f i l') l. Proof. induction l. simpl; auto. simpl; intros. f_equal; auto. Qed. Lemma fold_left_rev_right : forall (A B:Type)(f:A->B->B) l i, fold_right f i (rev l) = fold_left (fun x y => f y x) l i. Proof. induction l. simpl; auto. intros. simpl. rewrite fold_right_app; simpl; auto. Qed. Theorem fold_symmetric : forall (A:Type) (f:A -> A -> A), (forall x y z:A, f x (f y z) = f (f x y) z) -> (forall x y:A, f x y = f y x) -> forall (a0:A) (l:list A), fold_left f l a0 = fold_right f a0 l. Proof. destruct l as [| a l]. reflexivity. simpl. rewrite <- H0. generalize a0 a. induction l as [| a3 l IHl]; simpl. trivial. intros. rewrite H. rewrite (H0 a2). rewrite <- (H a1). rewrite (H0 a1). rewrite IHl. reflexivity. Qed. (** [(list_power x y)] is [y^x], or the set of sequences of elts of [y] indexed by elts of [x], sorted in lexicographic order. *) Fixpoint list_power (A B:Type)(l:list A) (l':list B) : list (list (A * B)) := match l with | nil => cons nil nil | cons x t => flat_map (fun f:list (A * B) => map (fun y:B => cons (x, y) f) l') (list_power t l') end. (*************************************) (** ** Boolean operations over lists *) (*************************************) Section Bool. Variable A : Type. Variable f : A -> bool. (** find whether a boolean function can be satisfied by an elements of the list. *) Fixpoint existsb (l:list A) : bool := match l with | nil => false | a::l => f a || existsb l end. Lemma existsb_exists : forall l, existsb l = true <-> exists x, In x l /\ f x = true. Proof. induction l; simpl; intuition. inversion H. firstorder. destruct (orb_prop _ _ H1); firstorder. firstorder. subst. rewrite H2; auto. Qed. Lemma existsb_nth : forall l n d, n < length l -> existsb l = false -> f (nth n l d) = false. Proof. induction l. inversion 1. simpl; intros. destruct (orb_false_elim _ _ H0); clear H0; auto. destruct n ; auto. rewrite IHl; auto with arith. Qed. Lemma existsb_app : forall l1 l2, existsb (l1++l2) = existsb l1 || existsb l2. Proof. induction l1; intros l2; simpl. solve[auto]. case (f a); simpl; solve[auto]. Qed. (** find whether a boolean function is satisfied by all the elements of a list. *) Fixpoint forallb (l:list A) : bool := match l with | nil => true | a::l => f a && forallb l end. Lemma forallb_forall : forall l, forallb l = true <-> (forall x, In x l -> f x = true). Proof. induction l; simpl; intuition. destruct (andb_prop _ _ H1). congruence. destruct (andb_prop _ _ H1); auto. assert (forallb l = true). apply H0; intuition. rewrite H1; auto. Qed. Lemma forallb_app : forall l1 l2, forallb (l1++l2) = forallb l1 && forallb l2. Proof. induction l1; simpl. solve[auto]. case (f a); simpl; solve[auto]. Qed. (** [filter] *) Fixpoint filter (l:list A) : list A := match l with | nil => nil | x :: l => if f x then x::(filter l) else filter l end. Lemma filter_In : forall x l, In x (filter l) <-> In x l /\ f x = true. Proof. induction l; simpl. intuition. intros. case_eq (f a); intros; simpl; intuition congruence. Qed. (** [find] *) Fixpoint find (l:list A) : option A := match l with | nil => None | x :: tl => if f x then Some x else find tl end. (** [partition] *) Fixpoint partition (l:list A) : list A * list A := match l with | nil => (nil, nil) | x :: tl => let (g,d) := partition tl in if f x then (x::g,d) else (g,x::d) end. End Bool. (******************************************************) (** ** Operations on lists of pairs or lists of lists *) (******************************************************) Section ListPairs. Variables A B : Type. (** [split] derives two lists from a list of pairs *) Fixpoint split (l:list (A*B)) : list A * list B := match l with | nil => (nil, nil) | (x,y) :: tl => let (g,d) := split tl in (x::g, y::d) end. Lemma in_split_l : forall (l:list (A*B))(p:A*B), In p l -> In (fst p) (fst (split l)). Proof. induction l; simpl; intros; auto. destruct p; destruct a; destruct (split l); simpl in *. destruct H. injection H; auto. right; apply (IHl (a0,b) H). Qed. Lemma in_split_r : forall (l:list (A*B))(p:A*B), In p l -> In (snd p) (snd (split l)). Proof. induction l; simpl; intros; auto. destruct p; destruct a; destruct (split l); simpl in *. destruct H. injection H; auto. right; apply (IHl (a0,b) H). Qed. Lemma split_nth : forall (l:list (A*B))(n:nat)(d:A*B), nth n l d = (nth n (fst (split l)) (fst d), nth n (snd (split l)) (snd d)). Proof. induction l. destruct n; destruct d; simpl; auto. destruct n; destruct d; simpl; auto. destruct a; destruct (split l); simpl; auto. destruct a; destruct (split l); simpl in *; auto. apply IHl. Qed. Lemma split_length_l : forall (l:list (A*B)), length (fst (split l)) = length l. Proof. induction l; simpl; auto. destruct a; destruct (split l); simpl; auto. Qed. Lemma split_length_r : forall (l:list (A*B)), length (snd (split l)) = length l. Proof. induction l; simpl; auto. destruct a; destruct (split l); simpl; auto. Qed. (** [combine] is the opposite of [split]. Lists given to [combine] are meant to be of same length. If not, [combine] stops on the shorter list *) Fixpoint combine (l : list A) (l' : list B) : list (A*B) := match l,l' with | x::tl, y::tl' => (x,y)::(combine tl tl') | _, _ => nil end. Lemma split_combine : forall (l: list (A*B)), let (l1,l2) := split l in combine l1 l2 = l. Proof. induction l. simpl; auto. destruct a; simpl. destruct (split l); simpl in *. f_equal; auto. Qed. Lemma combine_split : forall (l:list A)(l':list B), length l = length l' -> split (combine l l') = (l,l'). Proof. induction l; destruct l'; simpl; intros; auto; try discriminate. injection H; clear H; intros. rewrite IHl; auto. Qed. Lemma in_combine_l : forall (l:list A)(l':list B)(x:A)(y:B), In (x,y) (combine l l') -> In x l. Proof. induction l. simpl; auto. destruct l'; simpl; auto; intros. contradiction. destruct H. injection H; auto. right; apply IHl with l' y; auto. Qed. Lemma in_combine_r : forall (l:list A)(l':list B)(x:A)(y:B), In (x,y) (combine l l') -> In y l'. Proof. induction l. simpl; intros; contradiction. destruct l'; simpl; auto; intros. destruct H. injection H; auto. right; apply IHl with x; auto. Qed. Lemma combine_length : forall (l:list A)(l':list B), length (combine l l') = min (length l) (length l'). Proof. induction l. simpl; auto. destruct l'; simpl; auto. Qed. Lemma combine_nth : forall (l:list A)(l':list B)(n:nat)(x:A)(y:B), length l = length l' -> nth n (combine l l') (x,y) = (nth n l x, nth n l' y). Proof. induction l; destruct l'; intros; try discriminate. destruct n; simpl; auto. destruct n; simpl in *; auto. Qed. (** [list_prod] has the same signature as [combine], but unlike [combine], it adds every possible pairs, not only those at the same position. *) Fixpoint list_prod (l:list A) (l':list B) : list (A * B) := match l with | nil => nil | cons x t => (map (fun y:B => (x, y)) l')++(list_prod t l') end. Lemma in_prod_aux : forall (x:A) (y:B) (l:list B), In y l -> In (x, y) (map (fun y0:B => (x, y0)) l). Proof. induction l; [ simpl; auto | simpl; destruct 1 as [H1| ]; [ left; rewrite H1; trivial | right; auto ] ]. Qed. Lemma in_prod : forall (l:list A) (l':list B) (x:A) (y:B), In x l -> In y l' -> In (x, y) (list_prod l l'). Proof. induction l; [ simpl; tauto | simpl; intros; apply in_or_app; destruct H; [ left; rewrite H; apply in_prod_aux; assumption | right; auto ] ]. Qed. Lemma in_prod_iff : forall (l:list A)(l':list B)(x:A)(y:B), In (x,y) (list_prod l l') <-> In x l /\ In y l'. Proof. split; [ | intros; apply in_prod; intuition ]. induction l; simpl; intros. intuition. destruct (in_app_or _ _ _ H); clear H. destruct (in_map_iff (fun y : B => (a, y)) l' (x,y)) as (H1,_). destruct (H1 H0) as (z,(H2,H3)); clear H0 H1. injection H2; clear H2; intros; subst; intuition. intuition. Qed. Lemma prod_length : forall (l:list A)(l':list B), length (list_prod l l') = (length l) * (length l'). Proof. induction l; simpl; auto. intros. rewrite app_length. rewrite map_length. auto. Qed. End ListPairs. (*****************************************) (** * Miscellaneous operations on lists *) (*****************************************) (******************************) (** ** Length order of lists *) (******************************) Section length_order. Variable A : Type. Definition lel (l m:list A) := length l <= length m. Variables a b : A. Variables l m n : list A. Lemma lel_refl : lel l l. Proof. unfold lel; auto with arith. Qed. Lemma lel_trans : lel l m -> lel m n -> lel l n. Proof. unfold lel; intros. now_show (length l <= length n). apply le_trans with (length m); auto with arith. Qed. Lemma lel_cons_cons : lel l m -> lel (a :: l) (b :: m). Proof. unfold lel; simpl; auto with arith. Qed. Lemma lel_cons : lel l m -> lel l (b :: m). Proof. unfold lel; simpl; auto with arith. Qed. Lemma lel_tail : lel (a :: l) (b :: m) -> lel l m. Proof. unfold lel; simpl; auto with arith. Qed. Lemma lel_nil : forall l':list A, lel l' nil -> nil = l'. Proof. intro l'; elim l'; auto with arith. intros a' y H H0. now_show (nil = a' :: y). absurd (S (length y) <= 0); auto with arith. Qed. End length_order. Hint Resolve lel_refl lel_cons_cons lel_cons lel_nil lel_nil nil_cons: datatypes v62. (******************************) (** ** Set inclusion on list *) (******************************) Section SetIncl. Variable A : Type. Definition incl (l m:list A) := forall a:A, In a l -> In a m. Hint Unfold incl. Lemma incl_refl : forall l:list A, incl l l. Proof. auto. Qed. Hint Resolve incl_refl. Lemma incl_tl : forall (a:A) (l m:list A), incl l m -> incl l (a :: m). Proof. auto with datatypes. Qed. Hint Immediate incl_tl. Lemma incl_tran : forall l m n:list A, incl l m -> incl m n -> incl l n. Proof. auto. Qed. Lemma incl_appl : forall l m n:list A, incl l n -> incl l (n ++ m). Proof. auto with datatypes. Qed. Hint Immediate incl_appl. Lemma incl_appr : forall l m n:list A, incl l n -> incl l (m ++ n). Proof. auto with datatypes. Qed. Hint Immediate incl_appr. Lemma incl_cons : forall (a:A) (l m:list A), In a m -> incl l m -> incl (a :: l) m. Proof. unfold incl; simpl; intros a l m H H0 a0 H1. now_show (In a0 m). elim H1. now_show (a = a0 -> In a0 m). elim H1; auto; intro H2. now_show (a = a0 -> In a0 m). elim H2; auto. (* solves subgoal *) now_show (In a0 l -> In a0 m). auto. Qed. Hint Resolve incl_cons. Lemma incl_app : forall l m n:list A, incl l n -> incl m n -> incl (l ++ m) n. Proof. unfold incl; simpl; intros l m n H H0 a H1. now_show (In a n). elim (in_app_or _ _ _ H1); auto. Qed. Hint Resolve incl_app. End SetIncl. Hint Resolve incl_refl incl_tl incl_tran incl_appl incl_appr incl_cons incl_app: datatypes v62. (**************************************) (** * Cutting a list at some position *) (**************************************) Section Cutting. Variable A : Type. Fixpoint firstn (n:nat)(l:list A) : list A := match n with | 0 => nil | S n => match l with | nil => nil | a::l => a::(firstn n l) end end. Fixpoint skipn (n:nat)(l:list A) : list A := match n with | 0 => l | S n => match l with | nil => nil | a::l => skipn n l end end. Lemma firstn_skipn : forall n l, firstn n l ++ skipn n l = l. Proof. induction n. simpl; auto. destruct l; simpl; auto. f_equal; auto. Qed. Lemma firstn_length : forall n l, length (firstn n l) = min n (length l). Proof. induction n; destruct l; simpl; auto. Qed. Lemma removelast_firstn : forall n l, n < length l -> removelast (firstn (S n) l) = firstn n l. Proof. induction n; destruct l. simpl; auto. simpl; auto. simpl; auto. intros. simpl in H. change (firstn (S (S n)) (a::l)) with ((a::nil)++firstn (S n) l). change (firstn (S n) (a::l)) with (a::firstn n l). rewrite removelast_app. rewrite IHn; auto with arith. clear IHn; destruct l; simpl in *; try discriminate. inversion_clear H. inversion_clear H0. Qed. Lemma firstn_removelast : forall n l, n < length l -> firstn n (removelast l) = firstn n l. Proof. induction n; destruct l. simpl; auto. simpl; auto. simpl; auto. intros. simpl in H. change (removelast (a :: l)) with (removelast ((a::nil)++l)). rewrite removelast_app. simpl; f_equal; auto with arith. intro H0; rewrite H0 in H; inversion_clear H; inversion_clear H1. Qed. End Cutting. (********************************) (** ** Lists without redundancy *) (********************************) Section ReDun. Variable A : Type. Inductive NoDup : list A -> Prop := | NoDup_nil : NoDup nil | NoDup_cons : forall x l, ~ In x l -> NoDup l -> NoDup (x::l). Lemma NoDup_remove_1 : forall l l' a, NoDup (l++a::l') -> NoDup (l++l'). Proof. induction l; simpl. inversion_clear 1; auto. inversion_clear 1. constructor. contradict H0. apply in_or_app; destruct (in_app_or _ _ _ H0); simpl; tauto. apply IHl with a0; auto. Qed. Lemma NoDup_remove_2 : forall l l' a, NoDup (l++a::l') -> ~In a (l++l'). Proof. induction l; simpl. inversion_clear 1; auto. inversion_clear 1. contradict H0. destruct H0. subst a0. apply in_or_app; right; red; auto. destruct (IHl _ _ H1); auto. Qed. End ReDun. (***********************************) (** ** Sequence of natural numbers *) (***********************************) Section NatSeq. (** [seq] computes the sequence of [len] contiguous integers that starts at [start]. For instance, [seq 2 3] is [2::3::4::nil]. *) Fixpoint seq (start len:nat) : list nat := match len with | 0 => nil | S len => start :: seq (S start) len end. Lemma seq_length : forall len start, length (seq start len) = len. Proof. induction len; simpl; auto. Qed. Lemma seq_nth : forall len start n d, n < len -> nth n (seq start len) d = start+n. Proof. induction len; intros. inversion H. simpl seq. destruct n; simpl. auto with arith. rewrite IHlen;simpl; auto with arith. Qed. Lemma seq_shift : forall len start, map S (seq start len) = seq (S start) len. Proof. induction len; simpl; auto. intros. rewrite IHlen. auto with arith. Qed. End NatSeq. (** * Existential and universal predicates over lists *) Inductive Exists {A} (P:A->Prop) : list A -> Prop := | Exists_cons_hd : forall x l, P x -> Exists P (x::l) | Exists_cons_tl : forall x l, Exists P l -> Exists P (x::l). Hint Constructors Exists. Lemma Exists_exists : forall A P (l:list A), Exists P l <-> (exists x, In x l /\ P x). Proof. split. induction 1; firstorder. induction l; firstorder; subst; auto. Qed. Lemma Exists_nil : forall A (P:A->Prop), Exists P nil <-> False. Proof. split; inversion 1. Qed. Lemma Exists_cons : forall A (P:A->Prop) x l, Exists P (x::l) <-> P x \/ Exists P l. Proof. split; inversion 1; auto. Qed. Inductive Forall {A} (P:A->Prop) : list A -> Prop := | Forall_nil : Forall P nil | Forall_cons : forall x l, P x -> Forall P l -> Forall P (x::l). Hint Constructors Forall. Lemma Forall_forall : forall A P (l:list A), Forall P l <-> (forall x, In x l -> P x). Proof. split. induction 1; firstorder; subst; auto. induction l; firstorder. Qed. Lemma Forall_inv : forall A P (a:A) l, Forall P (a :: l) -> P a. Proof. intros; inversion H; trivial. Defined. Lemma Forall_rect : forall A (P:A->Prop) (Q : list A -> Type), Q [] -> (forall b l, P b -> Q (b :: l)) -> forall l, Forall P l -> Q l. Proof. intros A P Q H H'; induction l; intro; [|eapply H', Forall_inv]; eassumption. Defined. Lemma Forall_impl : forall A (P Q : A -> Prop), (forall a, P a -> Q a) -> forall l, Forall P l -> Forall Q l. Proof. intros A P Q Himp l H. induction H; firstorder. Qed. (** [Forall2]: stating that elements of two lists are pairwise related. *) Inductive Forall2 A B (R:A->B->Prop) : list A -> list B -> Prop := | Forall2_nil : Forall2 R [] [] | Forall2_cons : forall x y l l', R x y -> Forall2 R l l' -> Forall2 R (x::l) (y::l'). Hint Constructors Forall2. Theorem Forall2_refl : forall A B (R:A->B->Prop), Forall2 R [] []. Proof. exact Forall2_nil. Qed. Theorem Forall2_app_inv_l : forall A B (R:A->B->Prop) l1 l2 l', Forall2 R (l1 ++ l2) l' -> exists l1' l2', Forall2 R l1 l1' /\ Forall2 R l2 l2' /\ l' = l1' ++ l2'. Proof. induction l1; intros. exists [], l'; auto. simpl in H; inversion H; subst; clear H. apply IHl1 in H4 as (l1' & l2' & Hl1 & Hl2 & ->). exists (y::l1'), l2'; simpl; auto. Qed. Theorem Forall2_app_inv_r : forall A B (R:A->B->Prop) l1' l2' l, Forall2 R l (l1' ++ l2') -> exists l1 l2, Forall2 R l1 l1' /\ Forall2 R l2 l2' /\ l = l1 ++ l2. Proof. induction l1'; intros. exists [], l; auto. simpl in H; inversion H; subst; clear H. apply IHl1' in H4 as (l1 & l2 & Hl1 & Hl2 & ->). exists (x::l1), l2; simpl; auto. Qed. Theorem Forall2_app : forall A B (R:A->B->Prop) l1 l2 l1' l2', Forall2 R l1 l1' -> Forall2 R l2 l2' -> Forall2 R (l1 ++ l2) (l1' ++ l2'). Proof. intros. induction l1 in l1', H, H0 |- *; inversion H; subst; simpl; auto. Qed. (** [ForallPairs] : specifies that a certain relation should always hold when inspecting all possible pairs of elements of a list. *) Definition ForallPairs A (R : A -> A -> Prop) l := forall a b, In a l -> In b l -> R a b. (** [ForallOrdPairs] : we still check a relation over all pairs of elements of a list, but now the order of elements matters. *) Inductive ForallOrdPairs A (R : A -> A -> Prop) : list A -> Prop := | FOP_nil : ForallOrdPairs R nil | FOP_cons : forall a l, Forall (R a) l -> ForallOrdPairs R l -> ForallOrdPairs R (a::l). Hint Constructors ForallOrdPairs. Lemma ForallOrdPairs_In : forall A (R:A->A->Prop) l, ForallOrdPairs R l -> forall x y, In x l -> In y l -> x=y \/ R x y \/ R y x. Proof. induction 1. inversion 1. simpl; destruct 1; destruct 1; repeat subst; auto. right; left. apply -> Forall_forall; eauto. right; right. apply -> Forall_forall; eauto. Qed. (** [ForallPairs] implies [ForallOrdPairs]. The reverse implication is true only when [R] is symmetric and reflexive. *) Lemma ForallPairs_ForallOrdPairs : forall A (R:A->A->Prop) l, ForallPairs R l -> ForallOrdPairs R l. Proof. induction l; auto. intros H. constructor. apply <- Forall_forall. intros; apply H; simpl; auto. apply IHl. red; intros; apply H; simpl; auto. Qed. Lemma ForallOrdPairs_ForallPairs : forall A (R:A->A->Prop), (forall x, R x x) -> (forall x y, R x y -> R y x) -> forall l, ForallOrdPairs R l -> ForallPairs R l. Proof. intros A R Refl Sym l Hl x y Hx Hy. destruct (ForallOrdPairs_In Hl _ _ Hx Hy); subst; intuition. Qed. (** * Inversion of predicates over lists based on head symbol *) Ltac is_list_constr c := match c with | nil => idtac | (_::_) => idtac | _ => fail end. Ltac invlist f := match goal with | H:f ?l |- _ => is_list_constr l; inversion_clear H; invlist f | H:f _ ?l |- _ => is_list_constr l; inversion_clear H; invlist f | H:f _ _ ?l |- _ => is_list_constr l; inversion_clear H; invlist f | H:f _ _ _ ?l |- _ => is_list_constr l; inversion_clear H; invlist f | H:f _ _ _ _ ?l |- _ => is_list_constr l; inversion_clear H; invlist f | _ => idtac end. (** * Exporting hints and tactics *) Hint Rewrite rev_involutive (* rev (rev l) = l *) rev_unit (* rev (l ++ a :: nil) = a :: rev l *) map_nth (* nth n (map f l) (f d) = f (nth n l d) *) map_length (* length (map f l) = length l *) seq_length (* length (seq start len) = len *) app_length (* length (l ++ l') = length l + length l' *) rev_length (* length (rev l) = length l *) app_nil_r (* l ++ nil = l *) : list. Ltac simpl_list := autorewrite with list. Ltac ssimpl_list := autorewrite with list using simpl. (* begin hide *) (* Compatibility notations after the migration of [list] to [Datatypes] *) Notation list := list (only parsing). Notation list_rect := list_rect (only parsing). Notation list_rec := list_rec (only parsing). Notation list_ind := list_ind (only parsing). Notation nil := nil (only parsing). Notation cons := cons (only parsing). Notation length := length (only parsing). Notation app := app (only parsing). (* Compatibility Names *) Notation tail := tl (only parsing). Notation head := hd_error (only parsing). Notation head_nil := hd_error_nil (only parsing). Notation head_cons := hd_error_cons (only parsing). Notation ass_app := app_assoc (only parsing). Notation app_ass := app_assoc_reverse (only parsing). Notation In_split := in_split (only parsing). Notation In_rev := in_rev (only parsing). Notation In_dec := in_dec (only parsing). Notation distr_rev := rev_app_distr (only parsing). Notation rev_acc := rev_append (only parsing). Notation rev_acc_rev := rev_append_rev (only parsing). Notation AllS := Forall (only parsing). (* was formerly in TheoryList *) Hint Resolve app_nil_end : datatypes v62. (* end hide *) coq-8.4pl2/theories/Lists/Streams.v0000640000175000001440000001333412010532755016360 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Stream -> Stream. Definition hd (x:Stream) := match x with | Cons a _ => a end. Definition tl (x:Stream) := match x with | Cons _ s => s end. Fixpoint Str_nth_tl (n:nat) (s:Stream) : Stream := match n with | O => s | S m => Str_nth_tl m (tl s) end. Definition Str_nth (n:nat) (s:Stream) : A := hd (Str_nth_tl n s). Lemma unfold_Stream : forall x:Stream, x = match x with | Cons a s => Cons a s end. Proof. intro x. case x. trivial. Qed. Lemma tl_nth_tl : forall (n:nat) (s:Stream), tl (Str_nth_tl n s) = Str_nth_tl n (tl s). Proof. simple induction n; simpl; auto. Qed. Hint Resolve tl_nth_tl: datatypes v62. Lemma Str_nth_tl_plus : forall (n m:nat) (s:Stream), Str_nth_tl n (Str_nth_tl m s) = Str_nth_tl (n + m) s. simple induction n; simpl; intros; auto with datatypes. rewrite <- H. rewrite tl_nth_tl; trivial with datatypes. Qed. Lemma Str_nth_plus : forall (n m:nat) (s:Stream), Str_nth n (Str_nth_tl m s) = Str_nth (n + m) s. intros; unfold Str_nth; rewrite Str_nth_tl_plus; trivial with datatypes. Qed. (** Extensional Equality between two streams *) CoInductive EqSt (s1 s2: Stream) : Prop := eqst : hd s1 = hd s2 -> EqSt (tl s1) (tl s2) -> EqSt s1 s2. (** A coinduction principle *) Ltac coinduction proof := cofix proof; intros; constructor; [ clear proof | try (apply proof; clear proof) ]. (** Extensional equality is an equivalence relation *) Theorem EqSt_reflex : forall s:Stream, EqSt s s. coinduction EqSt_reflex. reflexivity. Qed. Theorem sym_EqSt : forall s1 s2:Stream, EqSt s1 s2 -> EqSt s2 s1. coinduction Eq_sym. case H; intros; symmetry ; assumption. case H; intros; assumption. Qed. Theorem trans_EqSt : forall s1 s2 s3:Stream, EqSt s1 s2 -> EqSt s2 s3 -> EqSt s1 s3. coinduction Eq_trans. transitivity (hd s2). case H; intros; assumption. case H0; intros; assumption. apply (Eq_trans (tl s1) (tl s2) (tl s3)). case H; trivial with datatypes. case H0; trivial with datatypes. Qed. (** The definition given is equivalent to require the elements at each position to be equal *) Theorem eqst_ntheq : forall (n:nat) (s1 s2:Stream), EqSt s1 s2 -> Str_nth n s1 = Str_nth n s2. unfold Str_nth; simple induction n. intros s1 s2 H; case H; trivial with datatypes. intros m hypind. simpl. intros s1 s2 H. apply hypind. case H; trivial with datatypes. Qed. Theorem ntheq_eqst : forall s1 s2:Stream, (forall n:nat, Str_nth n s1 = Str_nth n s2) -> EqSt s1 s2. coinduction Equiv2. apply (H 0). intros n; apply (H (S n)). Qed. Section Stream_Properties. Variable P : Stream -> Prop. (*i Inductive Exists : Stream -> Prop := | Here : forall x:Stream, P x -> Exists x | Further : forall x:Stream, ~ P x -> Exists (tl x) -> Exists x. i*) Inductive Exists ( x: Stream ) : Prop := | Here : P x -> Exists x | Further : Exists (tl x) -> Exists x. CoInductive ForAll (x: Stream) : Prop := HereAndFurther : P x -> ForAll (tl x) -> ForAll x. Lemma ForAll_Str_nth_tl : forall m x, ForAll x -> ForAll (Str_nth_tl m x). Proof. induction m. tauto. intros x [_ H]. simpl. apply IHm. assumption. Qed. Section Co_Induction_ForAll. Variable Inv : Stream -> Prop. Hypothesis InvThenP : forall x:Stream, Inv x -> P x. Hypothesis InvIsStable : forall x:Stream, Inv x -> Inv (tl x). Theorem ForAll_coind : forall x:Stream, Inv x -> ForAll x. coinduction ForAll_coind; auto. Qed. End Co_Induction_ForAll. End Stream_Properties. End Streams. Section Map. Variables A B : Type. Variable f : A -> B. CoFixpoint map (s:Stream A) : Stream B := Cons (f (hd s)) (map (tl s)). Lemma Str_nth_tl_map : forall n s, Str_nth_tl n (map s)= map (Str_nth_tl n s). Proof. induction n. reflexivity. simpl. intros s. apply IHn. Qed. Lemma Str_nth_map : forall n s, Str_nth n (map s)= f (Str_nth n s). Proof. intros n s. unfold Str_nth. rewrite Str_nth_tl_map. reflexivity. Qed. Lemma ForAll_map : forall (P:Stream B -> Prop) (S:Stream A), ForAll (fun s => P (map s)) S <-> ForAll P (map S). Proof. intros P S. split; generalize S; clear S; cofix; intros S; constructor; destruct H as [H0 H]; firstorder. Qed. Lemma Exists_map : forall (P:Stream B -> Prop) (S:Stream A), Exists (fun s => P (map s)) S -> Exists P (map S). Proof. intros P S H. (induction H;[left|right]); firstorder. Defined. End Map. Section Constant_Stream. Variable A : Type. Variable a : A. CoFixpoint const : Stream A := Cons a const. End Constant_Stream. Section Zip. Variable A B C : Type. Variable f: A -> B -> C. CoFixpoint zipWith (a:Stream A) (b:Stream B) : Stream C := Cons (f (hd a) (hd b)) (zipWith (tl a) (tl b)). Lemma Str_nth_tl_zipWith : forall n (a:Stream A) (b:Stream B), Str_nth_tl n (zipWith a b)= zipWith (Str_nth_tl n a) (Str_nth_tl n b). Proof. induction n. reflexivity. intros [x xs] [y ys]. unfold Str_nth in *. simpl in *. apply IHn. Qed. Lemma Str_nth_zipWith : forall n (a:Stream A) (b:Stream B), Str_nth n (zipWith a b)= f (Str_nth n a) (Str_nth n b). Proof. intros. unfold Str_nth. rewrite Str_nth_tl_zipWith. reflexivity. Qed. End Zip. Unset Implicit Arguments. coq-8.4pl2/theories/Lists/vo.itarget0000640000175000001440000000013611777052043016562 0ustar notinusersListSet.vo ListTactics.vo List.vo SetoidList.vo SetoidPermutation.vo StreamMemo.vo Streams.vo coq-8.4pl2/theories/Lists/SetoidPermutation.v0000640000175000001440000000757211777052043020436 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* list A -> Prop := | permA_nil: PermutationA nil nil | permA_skip x₁ x₂ l₁ l₂ : eqA x₁ x₂ -> PermutationA l₁ l₂ -> PermutationA (x₁ :: l₁) (x₂ :: l₂) | permA_swap x y l : PermutationA (y :: x :: l) (x :: y :: l) | permA_trans l₁ l₂ l₃ : PermutationA l₁ l₂ -> PermutationA l₂ l₃ -> PermutationA l₁ l₃. Local Hint Constructors PermutationA. Global Instance: Equivalence PermutationA. Proof. constructor. - intro l. induction l; intuition. - intros l₁ l₂. induction 1; eauto. apply permA_skip; intuition. - exact permA_trans. Qed. Global Instance PermutationA_cons : Proper (eqA ==> PermutationA ==> PermutationA) (@cons A). Proof. repeat intro. now apply permA_skip. Qed. Lemma PermutationA_app_head l₁ l₂ l : PermutationA l₁ l₂ -> PermutationA (l ++ l₁) (l ++ l₂). Proof. induction l; trivial; intros. apply permA_skip; intuition. Qed. Global Instance PermutationA_app : Proper (PermutationA ==> PermutationA ==> PermutationA) (@app A). Proof. intros l₁ l₂ Pl k₁ k₂ Pk. induction Pl. - easy. - now apply permA_skip. - etransitivity. * rewrite <-!app_comm_cons. now apply permA_swap. * rewrite !app_comm_cons. now apply PermutationA_app_head. - do 2 (etransitivity; try eassumption). apply PermutationA_app_head. now symmetry. Qed. Lemma PermutationA_app_tail l₁ l₂ l : PermutationA l₁ l₂ -> PermutationA (l₁ ++ l) (l₂ ++ l). Proof. intros E. now rewrite E. Qed. Lemma PermutationA_cons_append l x : PermutationA (x :: l) (l ++ x :: nil). Proof. induction l. - easy. - simpl. rewrite <-IHl. intuition. Qed. Lemma PermutationA_app_comm l₁ l₂ : PermutationA (l₁ ++ l₂) (l₂ ++ l₁). Proof. induction l₁. - now rewrite app_nil_r. - rewrite <-app_comm_cons, IHl₁, app_comm_cons. now rewrite PermutationA_cons_append, <-app_assoc. Qed. Lemma PermutationA_cons_app l l₁ l₂ x : PermutationA l (l₁ ++ l₂) -> PermutationA (x :: l) (l₁ ++ x :: l₂). Proof. intros E. rewrite E. now rewrite app_comm_cons, PermutationA_cons_append, <-app_assoc. Qed. Lemma PermutationA_middle l₁ l₂ x : PermutationA (x :: l₁ ++ l₂) (l₁ ++ x :: l₂). Proof. now apply PermutationA_cons_app. Qed. Lemma PermutationA_equivlistA l₁ l₂ : PermutationA l₁ l₂ -> equivlistA eqA l₁ l₂. Proof. induction 1. - reflexivity. - now apply equivlistA_cons_proper. - now apply equivlistA_permute_heads. - etransitivity; eassumption. Qed. Lemma NoDupA_equivlistA_PermutationA l₁ l₂ : NoDupA eqA l₁ -> NoDupA eqA l₂ -> equivlistA eqA l₁ l₂ -> PermutationA l₁ l₂. Proof. intros Pl₁. revert l₂. induction Pl₁ as [|x l₁ E1]. - intros l₂ _ H₂. symmetry in H₂. now rewrite (equivlistA_nil_eq eqA). - intros l₂ Pl₂ E2. destruct (@InA_split _ eqA l₂ x) as [l₂h [y [l₂t [E3 ?]]]]. { rewrite <-E2. intuition. } subst. transitivity (y :: l₁); [intuition |]. apply PermutationA_cons_app, IHPl₁. now apply NoDupA_split with y. apply equivlistA_NoDupA_split with x y; intuition. Qed. End Permutation. coq-8.4pl2/theories/Lists/SetoidList.v0000640000175000001440000005643411777052043017043 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. (** Being in a list modulo an equality relation over type [A]. *) Inductive InA (x : A) : list A -> Prop := | InA_cons_hd : forall y l, eqA x y -> InA x (y :: l) | InA_cons_tl : forall y l, InA x l -> InA x (y :: l). Hint Constructors InA. (** TODO: it would be nice to have a generic definition instead of the previous one. Having [InA = Exists eqA] raises too many compatibility issues. For now, we only state the equivalence: *) Lemma InA_altdef : forall x l, InA x l <-> Exists (eqA x) l. Proof. split; induction 1; auto. Qed. Lemma InA_cons : forall x y l, InA x (y::l) <-> eqA x y \/ InA x l. Proof. intuition. invlist InA; auto. Qed. Lemma InA_nil : forall x, InA x nil <-> False. Proof. intuition. invlist InA. Qed. (** An alternative definition of [InA]. *) Lemma InA_alt : forall x l, InA x l <-> exists y, eqA x y /\ In y l. Proof. intros; rewrite InA_altdef, Exists_exists; firstorder. Qed. (** A list without redundancy modulo the equality over [A]. *) Inductive NoDupA : list A -> Prop := | NoDupA_nil : NoDupA nil | NoDupA_cons : forall x l, ~ InA x l -> NoDupA l -> NoDupA (x::l). Hint Constructors NoDupA. (** An alternative definition of [NoDupA] based on [ForallOrdPairs] *) Lemma NoDupA_altdef : forall l, NoDupA l <-> ForallOrdPairs (complement eqA) l. Proof. split; induction 1; constructor; auto. rewrite Forall_forall. intros b Hb. intro Eq; elim H. rewrite InA_alt. exists b; auto. rewrite InA_alt; intros (a' & Haa' & Ha'). rewrite Forall_forall in H. exact (H a' Ha' Haa'). Qed. (** lists with same elements modulo [eqA] *) Definition inclA l l' := forall x, InA x l -> InA x l'. Definition equivlistA l l' := forall x, InA x l <-> InA x l'. Lemma incl_nil l : inclA nil l. Proof. intro. intros. inversion H. Qed. Hint Resolve incl_nil : list. (** lists with same elements modulo [eqA] at the same place *) Inductive eqlistA : list A -> list A -> Prop := | eqlistA_nil : eqlistA nil nil | eqlistA_cons : forall x x' l l', eqA x x' -> eqlistA l l' -> eqlistA (x::l) (x'::l'). Hint Constructors eqlistA. (** We could also have written [eqlistA = Forall2 eqA]. *) Lemma eqlistA_altdef : forall l l', eqlistA l l' <-> Forall2 eqA l l'. Proof. split; induction 1; auto. Qed. (** Results concerning lists modulo [eqA] *) Hypothesis eqA_equiv : Equivalence eqA. Hint Resolve (@Equivalence_Reflexive _ _ eqA_equiv). Hint Resolve (@Equivalence_Transitive _ _ eqA_equiv). Hint Immediate (@Equivalence_Symmetric _ _ eqA_equiv). Ltac inv := invlist InA; invlist sort; invlist lelistA; invlist NoDupA. (** First, the two notions [equivlistA] and [eqlistA] are indeed equivlances *) Global Instance equivlist_equiv : Equivalence equivlistA. Proof. firstorder. Qed. Global Instance eqlistA_equiv : Equivalence eqlistA. Proof. constructor; red. induction x; auto. induction 1; auto. intros x y z H; revert z; induction H; auto. inversion 1; subst; auto. invlist eqlistA; eauto with *. Qed. (** Moreover, [eqlistA] implies [equivlistA]. A reverse result will be proved later for sorted list without duplicates. *) Global Instance eqlistA_equivlistA : subrelation eqlistA equivlistA. Proof. intros x x' H. induction H. intuition. red; intros. rewrite 2 InA_cons. rewrite (IHeqlistA x0), H; intuition. Qed. (** InA is compatible with eqA (for its first arg) and with equivlistA (and hence eqlistA) for its second arg *) Global Instance InA_compat : Proper (eqA==>equivlistA==>iff) InA. Proof. intros x x' Hxx' l l' Hll'. rewrite (Hll' x). rewrite 2 InA_alt; firstorder. Qed. (** For compatibility, an immediate consequence of [InA_compat] *) Lemma InA_eqA : forall l x y, eqA x y -> InA x l -> InA y l. Proof. intros l x y H H'. rewrite <- H; auto. Qed. Hint Immediate InA_eqA. Lemma In_InA : forall l x, In x l -> InA x l. Proof. simple induction l; simpl; intuition. subst; auto. Qed. Hint Resolve In_InA. Lemma InA_split : forall l x, InA x l -> exists l1 y l2, eqA x y /\ l = l1++y::l2. Proof. induction l; intros; inv. exists (@nil A); exists a; exists l; auto. destruct (IHl x H0) as (l1,(y,(l2,(H1,H2)))). exists (a::l1); exists y; exists l2; auto. split; simpl; f_equal; auto. Qed. Lemma InA_app : forall l1 l2 x, InA x (l1 ++ l2) -> InA x l1 \/ InA x l2. Proof. induction l1; simpl in *; intuition. inv; auto. elim (IHl1 l2 x H0); auto. Qed. Lemma InA_app_iff : forall l1 l2 x, InA x (l1 ++ l2) <-> InA x l1 \/ InA x l2. Proof. split. apply InA_app. destruct 1; generalize H; do 2 rewrite InA_alt. destruct 1 as (y,(H1,H2)); exists y; split; auto. apply in_or_app; auto. destruct 1 as (y,(H1,H2)); exists y; split; auto. apply in_or_app; auto. Qed. Lemma InA_rev : forall p m, InA p (rev m) <-> InA p m. Proof. intros; do 2 rewrite InA_alt. split; intros (y,H); exists y; intuition. rewrite In_rev; auto. rewrite <- In_rev; auto. Qed. (** Some more facts about InA *) Lemma InA_singleton x y : InA x (y::nil) <-> eqA x y. Proof. rewrite InA_cons, InA_nil; tauto. Qed. Lemma InA_double_head x y l : InA x (y :: y :: l) <-> InA x (y :: l). Proof. rewrite !InA_cons; tauto. Qed. Lemma InA_permute_heads x y z l : InA x (y :: z :: l) <-> InA x (z :: y :: l). Proof. rewrite !InA_cons; tauto. Qed. Lemma InA_app_idem x l : InA x (l ++ l) <-> InA x l. Proof. rewrite InA_app_iff; tauto. Qed. Section NoDupA. Lemma NoDupA_app : forall l l', NoDupA l -> NoDupA l' -> (forall x, InA x l -> InA x l' -> False) -> NoDupA (l++l'). Proof. induction l; simpl; auto; intros. inv. constructor. rewrite InA_alt; intros (y,(H4,H5)). destruct (in_app_or _ _ _ H5). elim H2. rewrite InA_alt. exists y; auto. apply (H1 a). auto. rewrite InA_alt. exists y; auto. apply IHl; auto. intros. apply (H1 x); auto. Qed. Lemma NoDupA_rev : forall l, NoDupA l -> NoDupA (rev l). Proof. induction l. simpl; auto. simpl; intros. inv. apply NoDupA_app; auto. constructor; auto. intro; inv. intros x. rewrite InA_alt. intros (x1,(H2,H3)). intro; inv. destruct H0. rewrite <- H4, H2. apply In_InA. rewrite In_rev; auto. Qed. Lemma NoDupA_split : forall l l' x, NoDupA (l++x::l') -> NoDupA (l++l'). Proof. induction l; simpl in *; intros; inv; auto. constructor; eauto. contradict H0. rewrite InA_app_iff in *. rewrite InA_cons. intuition. Qed. Lemma NoDupA_swap : forall l l' x, NoDupA (l++x::l') -> NoDupA (x::l++l'). Proof. induction l; simpl in *; intros; inv; auto. constructor; eauto. assert (H2:=IHl _ _ H1). inv. rewrite InA_cons. red; destruct 1. apply H0. rewrite InA_app_iff in *; rewrite InA_cons; auto. apply H; auto. constructor. contradict H0. rewrite InA_app_iff in *; rewrite InA_cons; intuition. eapply NoDupA_split; eauto. Qed. Lemma NoDupA_singleton x : NoDupA (x::nil). Proof. repeat constructor. inversion 1. Qed. End NoDupA. Section EquivlistA. Global Instance equivlistA_cons_proper: Proper (eqA ==> equivlistA ==> equivlistA) (@cons A). Proof. intros ? ? E1 ? ? E2 ?; now rewrite !InA_cons, E1, E2. Qed. Global Instance equivlistA_app_proper: Proper (equivlistA ==> equivlistA ==> equivlistA) (@app A). Proof. intros ? ? E1 ? ? E2 ?. now rewrite !InA_app_iff, E1, E2. Qed. Lemma equivlistA_cons_nil x l : ~ equivlistA (x :: l) nil. Proof. intros E. now eapply InA_nil, E, InA_cons_hd. Qed. Lemma equivlistA_nil_eq l : equivlistA l nil -> l = nil. Proof. destruct l. - trivial. - intros H. now apply equivlistA_cons_nil in H. Qed. Lemma equivlistA_double_head x l : equivlistA (x :: x :: l) (x :: l). Proof. intro. apply InA_double_head. Qed. Lemma equivlistA_permute_heads x y l : equivlistA (x :: y :: l) (y :: x :: l). Proof. intro. apply InA_permute_heads. Qed. Lemma equivlistA_app_idem l : equivlistA (l ++ l) l. Proof. intro. apply InA_app_idem. Qed. Lemma equivlistA_NoDupA_split l l1 l2 x y : eqA x y -> NoDupA (x::l) -> NoDupA (l1++y::l2) -> equivlistA (x::l) (l1++y::l2) -> equivlistA l (l1++l2). Proof. intros; intro a. generalize (H2 a). rewrite !InA_app_iff, !InA_cons. inv. assert (SW:=NoDupA_swap H1). inv. rewrite InA_app_iff in H0. split; intros. assert (~eqA a x) by (contradict H3; rewrite <- H3; auto). assert (~eqA a y) by (rewrite <- H; auto). tauto. assert (OR : eqA a x \/ InA a l) by intuition. clear H6. destruct OR as [EQN|INA]; auto. elim H0. rewrite <-H,<-EQN; auto. Qed. End EquivlistA. Section Fold. Variable B:Type. Variable eqB:B->B->Prop. Variable st:Equivalence eqB. Variable f:A->B->B. Variable i:B. Variable Comp:Proper (eqA==>eqB==>eqB) f. Lemma fold_right_eqlistA : forall s s', eqlistA s s' -> eqB (fold_right f i s) (fold_right f i s'). Proof. induction 1; simpl; auto with relations. apply Comp; auto. Qed. (** Fold with restricted [transpose] hypothesis. *) Section Fold_With_Restriction. Variable R : A -> A -> Prop. Hypothesis R_sym : Symmetric R. Hypothesis R_compat : Proper (eqA==>eqA==>iff) R. (* (** [ForallOrdPairs R] is compatible with [equivlistA] over the lists without duplicates, as long as the relation [R] is symmetric and compatible with [eqA]. To prove this fact, we use an auxiliary notion: "forall distinct pairs, ...". *) Definition ForallNeqPairs := ForallPairs (fun a b => ~eqA a b -> R a b). (** [ForallOrdPairs] and [ForallNeqPairs] are related, but not completely equivalent. For proving one implication, we need to know that the list has no duplicated elements... *) Lemma ForallNeqPairs_ForallOrdPairs : forall l, NoDupA l -> ForallNeqPairs l -> ForallOrdPairs R l. Proof. induction l; auto. constructor. inv. rewrite Forall_forall; intros b Hb. apply H0; simpl; auto. contradict H1; rewrite H1; auto. apply IHl. inv; auto. intros b c Hb Hc Hneq. apply H0; simpl; auto. Qed. (** ... and for proving the other implication, we need to be able to reverse relation [R]. *) Lemma ForallOrdPairs_ForallNeqPairs : forall l, ForallOrdPairs R l -> ForallNeqPairs l. Proof. intros l Hl x y Hx Hy N. destruct (ForallOrdPairs_In Hl x y Hx Hy) as [H|[H|H]]. subst; elim N; auto. assumption. apply R_sym; assumption. Qed. *) (** Compatibility of [ForallOrdPairs] with respect to [inclA]. *) Lemma ForallOrdPairs_inclA : forall l l', NoDupA l' -> inclA l' l -> ForallOrdPairs R l -> ForallOrdPairs R l'. Proof. induction l' as [|x l' IH]. constructor. intros ND Incl FOP. apply FOP_cons; inv; unfold inclA in *; auto. rewrite Forall_forall; intros y Hy. assert (Ix : InA x (x::l')) by (rewrite InA_cons; auto). apply Incl in Ix. rewrite InA_alt in Ix. destruct Ix as (x' & Hxx' & Hx'). assert (Iy : InA y (x::l')) by (apply In_InA; simpl; auto). apply Incl in Iy. rewrite InA_alt in Iy. destruct Iy as (y' & Hyy' & Hy'). rewrite Hxx', Hyy'. destruct (ForallOrdPairs_In FOP x' y' Hx' Hy') as [E|[?|?]]; auto. absurd (InA x l'); auto. rewrite Hxx', E, <- Hyy'; auto. Qed. (** Two-argument functions that allow to reorder their arguments. *) Definition transpose (f : A -> B -> B) := forall (x y : A) (z : B), eqB (f x (f y z)) (f y (f x z)). (** A version of transpose with restriction on where it should hold *) Definition transpose_restr (R : A -> A -> Prop)(f : A -> B -> B) := forall (x y : A) (z : B), R x y -> eqB (f x (f y z)) (f y (f x z)). Variable TraR :transpose_restr R f. Lemma fold_right_commutes_restr : forall s1 s2 x, ForallOrdPairs R (s1++x::s2) -> eqB (fold_right f i (s1++x::s2)) (f x (fold_right f i (s1++s2))). Proof. induction s1; simpl; auto; intros. reflexivity. transitivity (f a (f x (fold_right f i (s1++s2)))). apply Comp; auto. apply IHs1. invlist ForallOrdPairs; auto. apply TraR. invlist ForallOrdPairs; auto. rewrite Forall_forall in H0; apply H0. apply in_or_app; simpl; auto. Qed. Lemma fold_right_equivlistA_restr : forall s s', NoDupA s -> NoDupA s' -> ForallOrdPairs R s -> equivlistA s s' -> eqB (fold_right f i s) (fold_right f i s'). Proof. simple induction s. destruct s'; simpl. intros; reflexivity. unfold equivlistA; intros. destruct (H2 a). assert (InA a nil) by auto; inv. intros x l Hrec s' N N' F E; simpl in *. assert (InA x s') by (rewrite <- (E x); auto). destruct (InA_split H) as (s1,(y,(s2,(H1,H2)))). subst s'. transitivity (f x (fold_right f i (s1++s2))). apply Comp; auto. apply Hrec; auto. inv; auto. eapply NoDupA_split; eauto. invlist ForallOrdPairs; auto. eapply equivlistA_NoDupA_split; eauto. transitivity (f y (fold_right f i (s1++s2))). apply Comp; auto. reflexivity. symmetry; apply fold_right_commutes_restr. apply ForallOrdPairs_inclA with (x::l); auto. red; intros; rewrite E; auto. Qed. Lemma fold_right_add_restr : forall s' s x, NoDupA s -> NoDupA s' -> ForallOrdPairs R s' -> ~ InA x s -> equivlistA s' (x::s) -> eqB (fold_right f i s') (f x (fold_right f i s)). Proof. intros; apply (@fold_right_equivlistA_restr s' (x::s)); auto. Qed. End Fold_With_Restriction. (** we now state similar results, but without restriction on transpose. *) Variable Tra :transpose f. Lemma fold_right_commutes : forall s1 s2 x, eqB (fold_right f i (s1++x::s2)) (f x (fold_right f i (s1++s2))). Proof. induction s1; simpl; auto; intros. reflexivity. transitivity (f a (f x (fold_right f i (s1++s2)))); auto. apply Comp; auto. Qed. Lemma fold_right_equivlistA : forall s s', NoDupA s -> NoDupA s' -> equivlistA s s' -> eqB (fold_right f i s) (fold_right f i s'). Proof. intros; apply fold_right_equivlistA_restr with (R:=fun _ _ => True); repeat red; auto. apply ForallPairs_ForallOrdPairs; try red; auto. Qed. Lemma fold_right_add : forall s' s x, NoDupA s -> NoDupA s' -> ~ InA x s -> equivlistA s' (x::s) -> eqB (fold_right f i s') (f x (fold_right f i s)). Proof. intros; apply (@fold_right_equivlistA s' (x::s)); auto. Qed. End Fold. Section Remove. Hypothesis eqA_dec : forall x y : A, {eqA x y}+{~(eqA x y)}. Lemma InA_dec : forall x l, { InA x l } + { ~ InA x l }. Proof. induction l. right; auto. intro; inv. destruct (eqA_dec x a). left; auto. destruct IHl. left; auto. right; intro; inv; contradiction. Defined. Fixpoint removeA (x : A) (l : list A) : list A := match l with | nil => nil | y::tl => if (eqA_dec x y) then removeA x tl else y::(removeA x tl) end. Lemma removeA_filter : forall x l, removeA x l = filter (fun y => if eqA_dec x y then false else true) l. Proof. induction l; simpl; auto. destruct (eqA_dec x a); auto. rewrite IHl; auto. Qed. Lemma removeA_InA : forall l x y, InA y (removeA x l) <-> InA y l /\ ~eqA x y. Proof. induction l; simpl; auto. split. intro; inv. destruct 1; inv. intros. destruct (eqA_dec x a); simpl; auto. rewrite IHl; split; destruct 1; split; auto. inv; auto. destruct H0; transitivity a; auto. split. intro; inv. split; auto. contradict n. transitivity y; auto. rewrite (IHl x y) in H0; destruct H0; auto. destruct 1; inv; auto. right; rewrite IHl; auto. Qed. Lemma removeA_NoDupA : forall s x, NoDupA s -> NoDupA (removeA x s). Proof. simple induction s; simpl; intros. auto. inv. destruct (eqA_dec x a); simpl; auto. constructor; auto. rewrite removeA_InA. intuition. Qed. Lemma removeA_equivlistA : forall l l' x, ~InA x l -> equivlistA (x :: l) l' -> equivlistA l (removeA x l'). Proof. unfold equivlistA; intros. rewrite removeA_InA. split; intros. rewrite <- H0; split; auto. contradict H. apply InA_eqA with x0; auto. rewrite <- (H0 x0) in H1. destruct H1. inv; auto. elim H2; auto. Qed. End Remove. (** Results concerning lists modulo [eqA] and [ltA] *) Variable ltA : A -> A -> Prop. Hypothesis ltA_strorder : StrictOrder ltA. Hypothesis ltA_compat : Proper (eqA==>eqA==>iff) ltA. Hint Resolve (@StrictOrder_Transitive _ _ ltA_strorder). Notation InfA:=(lelistA ltA). Notation SortA:=(sort ltA). Hint Constructors lelistA sort. Lemma InfA_ltA : forall l x y, ltA x y -> InfA y l -> InfA x l. Proof. destruct l; constructor. inv; eauto. Qed. Global Instance InfA_compat : Proper (eqA==>eqlistA==>iff) InfA. Proof. intros x x' Hxx' l l' Hll'. inversion_clear Hll'. intuition. split; intro; inv; constructor. rewrite <- Hxx', <- H; auto. rewrite Hxx', H; auto. Qed. (** For compatibility, can be deduced from [InfA_compat] *) Lemma InfA_eqA l x y : eqA x y -> InfA y l -> InfA x l. Proof. intros H; now rewrite H. Qed. Hint Immediate InfA_ltA InfA_eqA. Lemma SortA_InfA_InA : forall l x a, SortA l -> InfA a l -> InA x l -> ltA a x. Proof. simple induction l. intros. inv. intros. inv. setoid_replace x with a; auto. eauto. Qed. Lemma In_InfA : forall l x, (forall y, In y l -> ltA x y) -> InfA x l. Proof. simple induction l; simpl; intros; constructor; auto. Qed. Lemma InA_InfA : forall l x, (forall y, InA y l -> ltA x y) -> InfA x l. Proof. simple induction l; simpl; intros; constructor; auto. Qed. (* In fact, this may be used as an alternative definition for InfA: *) Lemma InfA_alt : forall l x, SortA l -> (InfA x l <-> (forall y, InA y l -> ltA x y)). Proof. split. intros; eapply SortA_InfA_InA; eauto. apply InA_InfA. Qed. Lemma InfA_app : forall l1 l2 a, InfA a l1 -> InfA a l2 -> InfA a (l1++l2). Proof. induction l1; simpl; auto. intros; inv; auto. Qed. Lemma SortA_app : forall l1 l2, SortA l1 -> SortA l2 -> (forall x y, InA x l1 -> InA y l2 -> ltA x y) -> SortA (l1 ++ l2). Proof. induction l1; simpl in *; intuition. inv. constructor; auto. apply InfA_app; auto. destruct l2; auto. Qed. Lemma SortA_NoDupA : forall l, SortA l -> NoDupA l. Proof. simple induction l; auto. intros x l' H H0. inv. constructor; auto. intro. apply (StrictOrder_Irreflexive x). eapply SortA_InfA_InA; eauto. Qed. (** Some results about [eqlistA] *) Section EqlistA. Lemma eqlistA_length : forall l l', eqlistA l l' -> length l = length l'. Proof. induction 1; auto; simpl; congruence. Qed. Global Instance app_eqlistA_compat : Proper (eqlistA==>eqlistA==>eqlistA) (@app A). Proof. repeat red; induction 1; simpl; auto. Qed. (** For compatibility, can be deduced from app_eqlistA_compat **) Lemma eqlistA_app : forall l1 l1' l2 l2', eqlistA l1 l1' -> eqlistA l2 l2' -> eqlistA (l1++l2) (l1'++l2'). Proof. intros l1 l1' l2 l2' H H'; rewrite H, H'; reflexivity. Qed. Lemma eqlistA_rev_app : forall l1 l1', eqlistA l1 l1' -> forall l2 l2', eqlistA l2 l2' -> eqlistA ((rev l1)++l2) ((rev l1')++l2'). Proof. induction 1; auto. simpl; intros. do 2 rewrite app_ass; simpl; auto. Qed. Global Instance rev_eqlistA_compat : Proper (eqlistA==>eqlistA) (@rev A). Proof. repeat red. intros. rewrite (app_nil_end (rev x)), (app_nil_end (rev y)). apply eqlistA_rev_app; auto. Qed. Lemma eqlistA_rev : forall l1 l1', eqlistA l1 l1' -> eqlistA (rev l1) (rev l1'). Proof. apply rev_eqlistA_compat. Qed. Lemma SortA_equivlistA_eqlistA : forall l l', SortA l -> SortA l' -> equivlistA l l' -> eqlistA l l'. Proof. induction l; destruct l'; simpl; intros; auto. destruct (H1 a); assert (InA a nil) by auto; inv. destruct (H1 a); assert (InA a nil) by auto; inv. inv. assert (forall y, InA y l -> ltA a y). intros; eapply SortA_InfA_InA with (l:=l); eauto. assert (forall y, InA y l' -> ltA a0 y). intros; eapply SortA_InfA_InA with (l:=l'); eauto. clear H3 H4. assert (eqA a a0). destruct (H1 a). destruct (H1 a0). assert (InA a (a0::l')) by auto. inv; auto. assert (InA a0 (a::l)) by auto. inv; auto. elim (StrictOrder_Irreflexive a); eauto. constructor; auto. apply IHl; auto. split; intros. destruct (H1 x). assert (InA x (a0::l')) by auto. inv; auto. rewrite H9,<-H3 in H4. elim (StrictOrder_Irreflexive a); eauto. destruct (H1 x). assert (InA x (a::l)) by auto. inv; auto. rewrite H9,H3 in H4. elim (StrictOrder_Irreflexive a0); eauto. Qed. End EqlistA. (** A few things about [filter] *) Section Filter. Lemma filter_sort : forall f l, SortA l -> SortA (List.filter f l). Proof. induction l; simpl; auto. intros; inv; auto. destruct (f a); auto. constructor; auto. apply In_InfA; auto. intros. rewrite filter_In in H; destruct H. eapply SortA_InfA_InA; eauto. Qed. Arguments eq {A} x _. Lemma filter_InA : forall f, Proper (eqA==>eq) f -> forall l x, InA x (List.filter f l) <-> InA x l /\ f x = true. Proof. clear ltA ltA_compat ltA_strorder. intros; do 2 rewrite InA_alt; intuition. destruct H0 as (y,(H0,H1)); rewrite filter_In in H1; exists y; intuition. destruct H0 as (y,(H0,H1)); rewrite filter_In in H1; intuition. rewrite (H _ _ H0); auto. destruct H1 as (y,(H0,H1)); exists y; rewrite filter_In; intuition. rewrite <- (H _ _ H0); auto. Qed. Lemma filter_split : forall f, (forall x y, f x = true -> f y = false -> ltA x y) -> forall l, SortA l -> l = filter f l ++ filter (fun x=>negb (f x)) l. Proof. induction l; simpl; intros; auto. inv. rewrite IHl at 1; auto. case_eq (f a); simpl; intros; auto. assert (forall e, In e l -> f e = false). intros. assert (H4:=SortA_InfA_InA H1 H2 (In_InA H3)). case_eq (f e); simpl; intros; auto. elim (StrictOrder_Irreflexive e). transitivity a; auto. replace (List.filter f l) with (@nil A); auto. generalize H3; clear; induction l; simpl; auto. case_eq (f a); auto; intros. rewrite H3 in H; auto; try discriminate. Qed. End Filter. End Type_with_equality. Hint Constructors InA eqlistA NoDupA sort lelistA. Arguments equivlistA_cons_nil {A} eqA {eqA_equiv} x l _. Arguments equivlistA_nil_eq {A} eqA {eqA_equiv} l _. Section Find. Variable A B : Type. Variable eqA : A -> A -> Prop. Hypothesis eqA_equiv : Equivalence eqA. Hypothesis eqA_dec : forall x y : A, {eqA x y}+{~(eqA x y)}. Fixpoint findA (f : A -> bool) (l:list (A*B)) : option B := match l with | nil => None | (a,b)::l => if f a then Some b else findA f l end. Lemma findA_NoDupA : forall l a b, NoDupA (fun p p' => eqA (fst p) (fst p')) l -> (InA (fun p p' => eqA (fst p) (fst p') /\ snd p = snd p') (a,b) l <-> findA (fun a' => if eqA_dec a a' then true else false) l = Some b). Proof. set (eqk := fun p p' : A*B => eqA (fst p) (fst p')). set (eqke := fun p p' : A*B => eqA (fst p) (fst p') /\ snd p = snd p'). induction l; intros; simpl. split; intros; try discriminate. invlist InA. destruct a as (a',b'); rename a0 into a. invlist NoDupA. split; intros. invlist InA. compute in H2; destruct H2. subst b'. destruct (eqA_dec a a'); intuition. destruct (eqA_dec a a'); simpl. contradict H0. revert e H2; clear - eqA_equiv. induction l. intros; invlist InA. intros; invlist InA; auto. destruct a0. compute in H; destruct H. subst b. left; auto. compute. transitivity a; auto. symmetry; auto. rewrite <- IHl; auto. destruct (eqA_dec a a'); simpl in *. left; split; simpl; congruence. right. rewrite IHl; auto. Qed. End Find. (** Compatibility aliases. [Proper] is rather to be used directly now.*) Definition compat_bool {A} (eqA:A->A->Prop)(f:A->bool) := Proper (eqA==>Logic.eq) f. Definition compat_nat {A} (eqA:A->A->Prop)(f:A->nat) := Proper (eqA==>Logic.eq) f. Definition compat_P {A} (eqA:A->A->Prop)(P:A->Prop) := Proper (eqA==>impl) P. Definition compat_op {A B} (eqA:A->A->Prop)(eqB:B->B->Prop)(f:A->B->B) := Proper (eqA==>eqB==>eqB) f. coq-8.4pl2/theories/Lists/ListTactics.v0000640000175000001440000000410712010532755017166 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* fcons x ltac:(list_fold_right fcons fnil tl) | nil => fnil end. (* A variant of list_fold_right, to prevent the match of list_fold_right from catching errors raised by fcons. *) Ltac lazy_list_fold_right fcons fnil l := let f := match l with | ?x :: ?tl => fun _ => fcons x ltac:(fun _ => lazy_list_fold_right fcons fnil tl) | nil => fun _ => fnil() end in f(). Ltac list_fold_left fcons fnil l := match l with | ?x :: ?tl => list_fold_left fcons ltac:(fcons x fnil) tl | nil => fnil end. Ltac list_iter f l := match l with | ?x :: ?tl => f x; list_iter f tl | nil => idtac end. Ltac list_iter_gen seq f l := match l with | ?x :: ?tl => let t1 _ := f x in let t2 _ := list_iter_gen seq f tl in seq t1 t2 | nil => idtac end. Ltac AddFvTail a l := match l with | nil => constr:(a::nil) | a :: _ => l | ?x :: ?l => let l' := AddFvTail a l in constr:(x::l') end. Ltac Find_at a l := let rec find n l := match l with | nil => fail 100 "anomaly: Find_at" | a :: _ => eval compute in n | _ :: ?l => find (Pos.succ n) l end in find 1%positive l. Ltac check_is_list t := match t with | _ :: ?l => check_is_list l | nil => idtac | _ => fail 100 "anomaly: failed to build a canonical list" end. Ltac check_fv l := check_is_list l; match type of l with | list _ => idtac | _ => fail 100 "anomaly: built an ill-typed list" end. coq-8.4pl2/theories/Lists/intro.tex0000750000175000001440000000123711742055044016433 0ustar notinusers\section{Lists}\label{Lists} This library includes the following files: \begin{itemize} \item {\tt List.v} contains definitions of (polymorphic) lists, functions on lists such as head, tail, map, append and prove some properties of these functions. Implicit arguments are used in this library, so you should read the Reference Manual about implicit arguments before using it. \item {\tt ListSet.v} contains definitions and properties of finite sets, implemented as lists. \item {\tt Streams.v} defines the type of infinite lists (streams). It is a co-inductive type. Basic facts are stated and proved. The streams are also polymorphic. \end{itemize} coq-8.4pl2/theories/Lists/ListSet.v0000640000175000001440000002472712010532755016341 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y}. Definition set := list A. Definition empty_set : set := nil. Fixpoint set_add (a:A) (x:set) : set := match x with | nil => a :: nil | a1 :: x1 => match Aeq_dec a a1 with | left _ => a1 :: x1 | right _ => a1 :: set_add a x1 end end. Fixpoint set_mem (a:A) (x:set) : bool := match x with | nil => false | a1 :: x1 => match Aeq_dec a a1 with | left _ => true | right _ => set_mem a x1 end end. (** If [a] belongs to [x], removes [a] from [x]. If not, does nothing *) Fixpoint set_remove (a:A) (x:set) : set := match x with | nil => empty_set | a1 :: x1 => match Aeq_dec a a1 with | left _ => x1 | right _ => a1 :: set_remove a x1 end end. Fixpoint set_inter (x:set) : set -> set := match x with | nil => fun y => nil | a1 :: x1 => fun y => if set_mem a1 y then a1 :: set_inter x1 y else set_inter x1 y end. Fixpoint set_union (x y:set) : set := match y with | nil => x | a1 :: y1 => set_add a1 (set_union x y1) end. (** returns the set of all els of [x] that does not belong to [y] *) Fixpoint set_diff (x y:set) : set := match x with | nil => nil | a1 :: x1 => if set_mem a1 y then set_diff x1 y else set_add a1 (set_diff x1 y) end. Definition set_In : A -> set -> Prop := In (A:=A). Lemma set_In_dec : forall (a:A) (x:set), {set_In a x} + {~ set_In a x}. Proof. unfold set_In. (*** Realizer set_mem. Program_all. ***) simple induction x. auto. intros a0 x0 Ha0. case (Aeq_dec a a0); intro eq. rewrite eq; simpl; auto with datatypes. elim Ha0. auto with datatypes. right; simpl; unfold not; intros [Hc1| Hc2]; auto with datatypes. Qed. Lemma set_mem_ind : forall (B:Type) (P:B -> Prop) (y z:B) (a:A) (x:set), (set_In a x -> P y) -> P z -> P (if set_mem a x then y else z). Proof. simple induction x; simpl; intros. assumption. elim (Aeq_dec a a0); auto with datatypes. Qed. Lemma set_mem_ind2 : forall (B:Type) (P:B -> Prop) (y z:B) (a:A) (x:set), (set_In a x -> P y) -> (~ set_In a x -> P z) -> P (if set_mem a x then y else z). Proof. simple induction x; simpl; intros. apply H0; red; trivial. case (Aeq_dec a a0); auto with datatypes. intro; apply H; intros; auto. apply H1; red; intro. case H3; auto. Qed. Lemma set_mem_correct1 : forall (a:A) (x:set), set_mem a x = true -> set_In a x. Proof. simple induction x; simpl. discriminate. intros a0 l; elim (Aeq_dec a a0); auto with datatypes. Qed. Lemma set_mem_correct2 : forall (a:A) (x:set), set_In a x -> set_mem a x = true. Proof. simple induction x; simpl. intro Ha; elim Ha. intros a0 l; elim (Aeq_dec a a0); auto with datatypes. intros H1 H2 [H3| H4]. absurd (a0 = a); auto with datatypes. auto with datatypes. Qed. Lemma set_mem_complete1 : forall (a:A) (x:set), set_mem a x = false -> ~ set_In a x. Proof. simple induction x; simpl. tauto. intros a0 l; elim (Aeq_dec a a0). intros; discriminate H0. unfold not; intros; elim H1; auto with datatypes. Qed. Lemma set_mem_complete2 : forall (a:A) (x:set), ~ set_In a x -> set_mem a x = false. Proof. simple induction x; simpl. tauto. intros a0 l; elim (Aeq_dec a a0). intros; elim H0; auto with datatypes. tauto. Qed. Lemma set_add_intro1 : forall (a b:A) (x:set), set_In a x -> set_In a (set_add b x). Proof. unfold set_In; simple induction x; simpl. auto with datatypes. intros a0 l H [Ha0a| Hal]. elim (Aeq_dec b a0); left; assumption. elim (Aeq_dec b a0); right; [ assumption | auto with datatypes ]. Qed. Lemma set_add_intro2 : forall (a b:A) (x:set), a = b -> set_In a (set_add b x). Proof. unfold set_In; simple induction x; simpl. auto with datatypes. intros a0 l H Hab. elim (Aeq_dec b a0); [ rewrite Hab; intro Hba0; rewrite Hba0; simpl; auto with datatypes | auto with datatypes ]. Qed. Hint Resolve set_add_intro1 set_add_intro2. Lemma set_add_intro : forall (a b:A) (x:set), a = b \/ set_In a x -> set_In a (set_add b x). Proof. intros a b x [H1| H2]; auto with datatypes. Qed. Lemma set_add_elim : forall (a b:A) (x:set), set_In a (set_add b x) -> a = b \/ set_In a x. Proof. unfold set_In. simple induction x. simpl; intros [H1| H2]; auto with datatypes. simpl; do 3 intro. elim (Aeq_dec b a0). simpl; tauto. simpl; intros; elim H0. trivial with datatypes. tauto. tauto. Qed. Lemma set_add_elim2 : forall (a b:A) (x:set), set_In a (set_add b x) -> a <> b -> set_In a x. intros a b x H; case (set_add_elim _ _ _ H); intros; trivial. case H1; trivial. Qed. Hint Resolve set_add_intro set_add_elim set_add_elim2. Lemma set_add_not_empty : forall (a:A) (x:set), set_add a x <> empty_set. Proof. simple induction x; simpl. discriminate. intros; elim (Aeq_dec a a0); intros; discriminate. Qed. Lemma set_union_intro1 : forall (a:A) (x y:set), set_In a x -> set_In a (set_union x y). Proof. simple induction y; simpl; auto with datatypes. Qed. Lemma set_union_intro2 : forall (a:A) (x y:set), set_In a y -> set_In a (set_union x y). Proof. simple induction y; simpl. tauto. intros; elim H0; auto with datatypes. Qed. Hint Resolve set_union_intro2 set_union_intro1. Lemma set_union_intro : forall (a:A) (x y:set), set_In a x \/ set_In a y -> set_In a (set_union x y). Proof. intros; elim H; auto with datatypes. Qed. Lemma set_union_elim : forall (a:A) (x y:set), set_In a (set_union x y) -> set_In a x \/ set_In a y. Proof. simple induction y; simpl. auto with datatypes. intros. generalize (set_add_elim _ _ _ H0). intros [H1| H1]. auto with datatypes. tauto. Qed. Lemma set_union_emptyL : forall (a:A) (x:set), set_In a (set_union empty_set x) -> set_In a x. intros a x H; case (set_union_elim _ _ _ H); auto || contradiction. Qed. Lemma set_union_emptyR : forall (a:A) (x:set), set_In a (set_union x empty_set) -> set_In a x. intros a x H; case (set_union_elim _ _ _ H); auto || contradiction. Qed. Lemma set_inter_intro : forall (a:A) (x y:set), set_In a x -> set_In a y -> set_In a (set_inter x y). Proof. simple induction x. auto with datatypes. simpl; intros a0 l Hrec y [Ha0a| Hal] Hy. simpl; rewrite Ha0a. generalize (set_mem_correct1 a y). generalize (set_mem_complete1 a y). elim (set_mem a y); simpl; intros. auto with datatypes. absurd (set_In a y); auto with datatypes. elim (set_mem a0 y); [ right; auto with datatypes | auto with datatypes ]. Qed. Lemma set_inter_elim1 : forall (a:A) (x y:set), set_In a (set_inter x y) -> set_In a x. Proof. simple induction x. auto with datatypes. simpl; intros a0 l Hrec y. generalize (set_mem_correct1 a0 y). elim (set_mem a0 y); simpl; intros. elim H0; eauto with datatypes. eauto with datatypes. Qed. Lemma set_inter_elim2 : forall (a:A) (x y:set), set_In a (set_inter x y) -> set_In a y. Proof. simple induction x. simpl; tauto. simpl; intros a0 l Hrec y. generalize (set_mem_correct1 a0 y). elim (set_mem a0 y); simpl; intros. elim H0; [ intro Hr; rewrite <- Hr; eauto with datatypes | eauto with datatypes ]. eauto with datatypes. Qed. Hint Resolve set_inter_elim1 set_inter_elim2. Lemma set_inter_elim : forall (a:A) (x y:set), set_In a (set_inter x y) -> set_In a x /\ set_In a y. Proof. eauto with datatypes. Qed. Lemma set_diff_intro : forall (a:A) (x y:set), set_In a x -> ~ set_In a y -> set_In a (set_diff x y). Proof. simple induction x. simpl; tauto. simpl; intros a0 l Hrec y [Ha0a| Hal] Hay. rewrite Ha0a; generalize (set_mem_complete2 _ _ Hay). elim (set_mem a y); [ intro Habs; discriminate Habs | auto with datatypes ]. elim (set_mem a0 y); auto with datatypes. Qed. Lemma set_diff_elim1 : forall (a:A) (x y:set), set_In a (set_diff x y) -> set_In a x. Proof. simple induction x. simpl; tauto. simpl; intros a0 l Hrec y; elim (set_mem a0 y). eauto with datatypes. intro; generalize (set_add_elim _ _ _ H). intros [H1| H2]; eauto with datatypes. Qed. Lemma set_diff_elim2 : forall (a:A) (x y:set), set_In a (set_diff x y) -> ~ set_In a y. intros a x y; elim x; simpl. intros; contradiction. intros a0 l Hrec. apply set_mem_ind2; auto. intros H1 H2; case (set_add_elim _ _ _ H2); intros; auto. rewrite H; trivial. Qed. Lemma set_diff_trivial : forall (a:A) (x:set), ~ set_In a (set_diff x x). red; intros a x H. apply (set_diff_elim2 _ _ _ H). apply (set_diff_elim1 _ _ _ H). Qed. Hint Resolve set_diff_intro set_diff_trivial. End first_definitions. Section other_definitions. Definition set_prod : forall {A B:Type}, set A -> set B -> set (A * B) := list_prod. (** [B^A], set of applications from [A] to [B] *) Definition set_power : forall {A B:Type}, set A -> set B -> set (set (A * B)) := list_power. Definition set_fold_left {A B:Type} : (B -> A -> B) -> set A -> B -> B := fold_left (A:=B) (B:=A). Definition set_fold_right {A B:Type} (f:A -> B -> B) (x:set A) (b:B) : B := fold_right f b x. Definition set_map {A B:Type} (Aeq_dec : forall x y:B, {x = y} + {x <> y}) (f : A -> B) (x : set A) : set B := set_fold_right (fun a => set_add Aeq_dec (f a)) x (empty_set B). End other_definitions. Unset Implicit Arguments. coq-8.4pl2/theories/Lists/StreamMemo.v0000640000175000001440000001254012010532755017011 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A. CoFixpoint memo_make (n:nat) : Stream A := Cons (f n) (memo_make (S n)). Definition memo_list := memo_make 0. Fixpoint memo_get (n:nat) (l:Stream A) : A := match n with | O => hd l | S n1 => memo_get n1 (tl l) end. Theorem memo_get_correct: forall n, memo_get n memo_list = f n. Proof. assert (F1: forall n m, memo_get n (memo_make m) = f (n + m)). { induction n as [| n Hrec]; try (intros m; reflexivity). intros m; simpl; rewrite Hrec. rewrite plus_n_Sm; auto. } intros n; transitivity (f (n + 0)); try exact (F1 n 0). rewrite <- plus_n_O; auto. Qed. (** Building with possible sharing using a iterator [g] : We now suppose in addition that [f n] is in fact the [n]-th iterate of a function [g]. *) Variable g: A -> A. Hypothesis Hg_correct: forall n, f (S n) = g (f n). CoFixpoint imemo_make (fn:A) : Stream A := let fn1 := g fn in Cons fn1 (imemo_make fn1). Definition imemo_list := let f0 := f 0 in Cons f0 (imemo_make f0). Theorem imemo_get_correct: forall n, memo_get n imemo_list = f n. Proof. assert (F1: forall n m, memo_get n (imemo_make (f m)) = f (S (n + m))). { induction n as [| n Hrec]; try (intros m; exact (eq_sym (Hg_correct m))). simpl; intros m; rewrite <- Hg_correct, Hrec, <- plus_n_Sm; auto. } destruct n as [| n]; try reflexivity. unfold imemo_list; simpl; rewrite F1. rewrite <- plus_n_O; auto. Qed. End MemoFunction. (** For a dependent function, the previous solution is reused thanks to a temporarly hiding of the dependency in a "container" [memo_val]. *) Section DependentMemoFunction. Variable A: nat -> Type. Variable f: forall n, A n. Inductive memo_val: Type := memo_mval: forall n, A n -> memo_val. Fixpoint is_eq (n m : nat) : {n = m} + {True} := match n, m return {n = m} + {True} with | 0, 0 =>left True (eq_refl 0) | 0, S m1 => right (0 = S m1) I | S n1, 0 => right (S n1 = 0) I | S n1, S m1 => match is_eq n1 m1 with | left H => left True (f_equal S H) | right _ => right (S n1 = S m1) I end end. Definition memo_get_val n (v: memo_val): A n := match v with | memo_mval m x => match is_eq n m with | left H => match H in (eq _ y) return (A y -> A n) with | eq_refl => fun v1 : A n => v1 end | right _ => fun _ : A m => f n end x end. Let mf n := memo_mval n (f n). Definition dmemo_list := memo_list _ mf. Definition dmemo_get n l := memo_get_val n (memo_get _ n l). Theorem dmemo_get_correct: forall n, dmemo_get n dmemo_list = f n. Proof. intros n; unfold dmemo_get, dmemo_list. rewrite (memo_get_correct memo_val mf n); simpl. case (is_eq n n); simpl; auto; intros e. assert (e = eq_refl n). apply eq_proofs_unicity. induction x as [| x Hx]; destruct y as [| y]. left; auto. right; intros HH; discriminate HH. right; intros HH; discriminate HH. case (Hx y). intros HH; left; case HH; auto. intros HH; right; intros HH1; case HH. injection HH1; auto. rewrite H; auto. Qed. (** Finally, a version with both dependency and iterator *) Variable g: forall n, A n -> A (S n). Hypothesis Hg_correct: forall n, f (S n) = g n (f n). Let mg v := match v with memo_mval n1 v1 => memo_mval (S n1) (g n1 v1) end. Definition dimemo_list := imemo_list _ mf mg. Theorem dimemo_get_correct: forall n, dmemo_get n dimemo_list = f n. Proof. intros n; unfold dmemo_get, dimemo_list. rewrite (imemo_get_correct memo_val mf mg); simpl. case (is_eq n n); simpl; auto; intros e. assert (e = eq_refl n). apply eq_proofs_unicity. induction x as [| x Hx]; destruct y as [| y]. left; auto. right; intros HH; discriminate HH. right; intros HH; discriminate HH. case (Hx y). intros HH; left; case HH; auto. intros HH; right; intros HH1; case HH. injection HH1; auto. rewrite H; auto. intros n1; unfold mf; rewrite Hg_correct; auto. Qed. End DependentMemoFunction. (** An example with the memo function on factorial *) (* Require Import ZArith. Open Scope Z_scope. Fixpoint tfact (n: nat) := match n with | O => 1 | S n1 => Z.of_nat n * tfact n1 end. Definition lfact_list := dimemo_list _ tfact (fun n z => (Z.of_nat (S n) * z)). Definition lfact n := dmemo_get _ tfact n lfact_list. Theorem lfact_correct n: lfact n = tfact n. Proof. intros n; unfold lfact, lfact_list. rewrite dimemo_get_correct; auto. Qed. Fixpoint nop p := match p with | xH => 0 | xI p1 => nop p1 | xO p1 => nop p1 end. Fixpoint test z := match z with | Z0 => 0 | Zpos p1 => nop p1 | Zneg p1 => nop p1 end. Time Eval vm_compute in test (lfact 2000). Time Eval vm_compute in test (lfact 2000). Time Eval vm_compute in test (lfact 1500). Time Eval vm_compute in (lfact 1500). *) coq-8.4pl2/theories/Relations/0000750000175000001440000000000012127276545015423 5ustar notinuserscoq-8.4pl2/theories/Relations/Relation_Operators.v0000640000175000001440000001652012010532755021417 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | t_step (y:A) : R x y -> clos_trans x y | t_trans (y z:A) : clos_trans x y -> clos_trans y z -> clos_trans x z. (** Alternative definition by transitive extension on the left *) Inductive clos_trans_1n (x: A) : A -> Prop := | t1n_step (y:A) : R x y -> clos_trans_1n x y | t1n_trans (y z:A) : R x y -> clos_trans_1n y z -> clos_trans_1n x z. (** Alternative definition by transitive extension on the right *) Inductive clos_trans_n1 (x: A) : A -> Prop := | tn1_step (y:A) : R x y -> clos_trans_n1 x y | tn1_trans (y z:A) : R y z -> clos_trans_n1 x y -> clos_trans_n1 x z. End Transitive_Closure. (** ** Reflexive-transitive closure *) Section Reflexive_Transitive_Closure. Variable A : Type. Variable R : relation A. (** Definition by direct reflexive-transitive closure *) Inductive clos_refl_trans (x:A) : A -> Prop := | rt_step (y:A) : R x y -> clos_refl_trans x y | rt_refl : clos_refl_trans x x | rt_trans (y z:A) : clos_refl_trans x y -> clos_refl_trans y z -> clos_refl_trans x z. (** Alternative definition by transitive extension on the left *) Inductive clos_refl_trans_1n (x: A) : A -> Prop := | rt1n_refl : clos_refl_trans_1n x x | rt1n_trans (y z:A) : R x y -> clos_refl_trans_1n y z -> clos_refl_trans_1n x z. (** Alternative definition by transitive extension on the right *) Inductive clos_refl_trans_n1 (x: A) : A -> Prop := | rtn1_refl : clos_refl_trans_n1 x x | rtn1_trans (y z:A) : R y z -> clos_refl_trans_n1 x y -> clos_refl_trans_n1 x z. End Reflexive_Transitive_Closure. (** ** Reflexive-symmetric-transitive closure *) Section Reflexive_Symmetric_Transitive_Closure. Variable A : Type. Variable R : relation A. (** Definition by direct reflexive-symmetric-transitive closure *) Inductive clos_refl_sym_trans : relation A := | rst_step (x y:A) : R x y -> clos_refl_sym_trans x y | rst_refl (x:A) : clos_refl_sym_trans x x | rst_sym (x y:A) : clos_refl_sym_trans x y -> clos_refl_sym_trans y x | rst_trans (x y z:A) : clos_refl_sym_trans x y -> clos_refl_sym_trans y z -> clos_refl_sym_trans x z. (** Alternative definition by symmetric-transitive extension on the left *) Inductive clos_refl_sym_trans_1n (x: A) : A -> Prop := | rst1n_refl : clos_refl_sym_trans_1n x x | rst1n_trans (y z:A) : R x y \/ R y x -> clos_refl_sym_trans_1n y z -> clos_refl_sym_trans_1n x z. (** Alternative definition by symmetric-transitive extension on the right *) Inductive clos_refl_sym_trans_n1 (x: A) : A -> Prop := | rstn1_refl : clos_refl_sym_trans_n1 x x | rstn1_trans (y z:A) : R y z \/ R z y -> clos_refl_sym_trans_n1 x y -> clos_refl_sym_trans_n1 x z. End Reflexive_Symmetric_Transitive_Closure. (** ** Converse of a relation *) Section Converse. Variable A : Type. Variable R : relation A. Definition transp (x y:A) := R y x. End Converse. (** ** Union of relations *) Section Union. Variable A : Type. Variables R1 R2 : relation A. Definition union (x y:A) := R1 x y \/ R2 x y. End Union. (** ** Disjoint union of relations *) Section Disjoint_Union. Variables A B : Type. Variable leA : A -> A -> Prop. Variable leB : B -> B -> Prop. Inductive le_AsB : A + B -> A + B -> Prop := | le_aa (x y:A) : leA x y -> le_AsB (inl _ x) (inl _ y) | le_ab (x:A) (y:B) : le_AsB (inl _ x) (inr _ y) | le_bb (x y:B) : leB x y -> le_AsB (inr _ x) (inr _ y). End Disjoint_Union. (** ** Lexicographic order on dependent pairs *) Section Lexicographic_Product. Variable A : Type. Variable B : A -> Type. Variable leA : A -> A -> Prop. Variable leB : forall x:A, B x -> B x -> Prop. Inductive lexprod : sigT B -> sigT B -> Prop := | left_lex : forall (x x':A) (y:B x) (y':B x'), leA x x' -> lexprod (existT B x y) (existT B x' y') | right_lex : forall (x:A) (y y':B x), leB x y y' -> lexprod (existT B x y) (existT B x y'). End Lexicographic_Product. (** ** Product of relations *) Section Symmetric_Product. Variable A : Type. Variable B : Type. Variable leA : A -> A -> Prop. Variable leB : B -> B -> Prop. Inductive symprod : A * B -> A * B -> Prop := | left_sym : forall x x':A, leA x x' -> forall y:B, symprod (x, y) (x', y) | right_sym : forall y y':B, leB y y' -> forall x:A, symprod (x, y) (x, y'). End Symmetric_Product. (** ** Multiset of two relations *) Section Swap. Variable A : Type. Variable R : A -> A -> Prop. Inductive swapprod : A * A -> A * A -> Prop := | sp_noswap x y (p:A * A) : symprod A A R R (x, y) p -> swapprod (x, y) p | sp_swap x y (p:A * A) : symprod A A R R (x, y) p -> swapprod (y, x) p. End Swap. Local Open Scope list_scope. Section Lexicographic_Exponentiation. Variable A : Set. Variable leA : A -> A -> Prop. Let Nil := nil (A:=A). Let List := list A. Inductive Ltl : List -> List -> Prop := | Lt_nil (a:A) (x:List) : Ltl Nil (a :: x) | Lt_hd (a b:A) : leA a b -> forall x y:list A, Ltl (a :: x) (b :: y) | Lt_tl (a:A) (x y:List) : Ltl x y -> Ltl (a :: x) (a :: y). Inductive Desc : List -> Prop := | d_nil : Desc Nil | d_one (x:A) : Desc (x :: Nil) | d_conc (x y:A) (l:List) : leA x y -> Desc (l ++ y :: Nil) -> Desc ((l ++ y :: Nil) ++ x :: Nil). Definition Pow : Set := sig Desc. Definition lex_exp (a b:Pow) : Prop := Ltl (proj1_sig a) (proj1_sig b). End Lexicographic_Exponentiation. Hint Unfold transp union: sets v62. Hint Resolve t_step rt_step rt_refl rst_step rst_refl: sets v62. Hint Immediate rst_sym: sets v62. (* begin hide *) (* Compatibility *) Notation rts1n_refl := rst1n_refl (only parsing). Notation rts1n_trans := rst1n_trans (only parsing). Notation rtsn1_refl := rstn1_refl (only parsing). Notation rtsn1_trans := rstn1_trans (only parsing). (* end hide *) coq-8.4pl2/theories/Relations/vo.itarget0000640000175000001440000000012311307752066017421 0ustar notinusersOperators_Properties.vo Relation_Definitions.vo Relation_Operators.vo Relations.vo coq-8.4pl2/theories/Relations/Relations.v0000640000175000001440000000226412010532755017544 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B) (r:relation B), equivalence B r -> equivalence A (fun x y:A => r (f x) (f y)). Proof. intros; split; elim H; red; auto. intros _ equiv_trans _ x y z H0 H1; apply equiv_trans with (f y); assumption. Qed. Lemma inverse_image_of_eq : forall (A B:Type) (f:A -> B), equivalence A (fun x y:A => f x = f y). Proof. split; red; [ (* reflexivity *) reflexivity | (* transitivity *) intros; transitivity (f y); assumption | (* symmetry *) intros; symmetry ; assumption ]. Qed. coq-8.4pl2/theories/Relations/intro.tex0000750000175000001440000000134707265050405017300 0ustar notinusers\section{Relations}\label{Relations} This library develops closure properties of relations. \begin{itemize} \item {\tt Relation\_Definitions.v} deals with the general notions about binary relations (orders, equivalences, ...) \item {\tt Relation\_Operators.v} and {\tt Rstar.v} define various closures of relations (by symmetry, by transitivity, ...) and lexicographic orderings. \item {\tt Operators\_Properties.v} states and proves facts on the various closures of a relation. \item {\tt Relations.v} puts {\tt Relation\_Definitions.v}, {\tt Relation\_Operators.v} and \\ {\tt Operators\_Properties.v} together. \item {\tt Newman.v} proves Newman's lemma on noetherian and locally confluent relations. \end{itemize} coq-8.4pl2/theories/Relations/Relation_Definitions.v0000640000175000001440000000443112010532755021712 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Variable R : relation. Section General_Properties_of_Relations. Definition reflexive : Prop := forall x:A, R x x. Definition transitive : Prop := forall x y z:A, R x y -> R y z -> R x z. Definition symmetric : Prop := forall x y:A, R x y -> R y x. Definition antisymmetric : Prop := forall x y:A, R x y -> R y x -> x = y. (* for compatibility with Equivalence in ../PROGRAMS/ALG/ *) Definition equiv := reflexive /\ transitive /\ symmetric. End General_Properties_of_Relations. Section Sets_of_Relations. Record preorder : Prop := { preord_refl : reflexive; preord_trans : transitive}. Record order : Prop := { ord_refl : reflexive; ord_trans : transitive; ord_antisym : antisymmetric}. Record equivalence : Prop := { equiv_refl : reflexive; equiv_trans : transitive; equiv_sym : symmetric}. Record PER : Prop := {per_sym : symmetric; per_trans : transitive}. End Sets_of_Relations. Section Relations_of_Relations. Definition inclusion (R1 R2:relation) : Prop := forall x y:A, R1 x y -> R2 x y. Definition same_relation (R1 R2:relation) : Prop := inclusion R1 R2 /\ inclusion R2 R1. Definition commut (R1 R2:relation) : Prop := forall x y:A, R1 y x -> forall z:A, R2 z y -> exists2 y' : A, R2 y' x & R1 z y'. End Relations_of_Relations. End Relation_Definition. Hint Unfold reflexive transitive antisymmetric symmetric: sets v62. Hint Resolve Build_preorder Build_order Build_equivalence Build_PER preord_refl preord_trans ord_refl ord_trans ord_antisym equiv_refl equiv_trans equiv_sym per_sym per_trans: sets v62. Hint Unfold inclusion same_relation commut: sets v62. coq-8.4pl2/theories/Relations/Operators_Properties.v0000640000175000001440000002766012010532755022005 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* clos_trans R x y. Proof. induction 1. left; assumption. right with y; auto. left; auto. Qed. Lemma clos_trans_t1n : forall x y, clos_trans R x y -> clos_trans_1n R x y. Proof. induction 1. left; assumption. generalize IHclos_trans2; clear IHclos_trans2; induction IHclos_trans1. right with y; auto. right with y; auto. eapply IHIHclos_trans1; auto. apply clos_t1n_trans; auto. Qed. Lemma clos_trans_t1n_iff : forall x y, clos_trans R x y <-> clos_trans_1n R x y. Proof. split. apply clos_trans_t1n. apply clos_t1n_trans. Qed. (** Direct transitive closure vs right-step extension *) Lemma clos_tn1_trans : forall x y, clos_trans_n1 R x y -> clos_trans R x y. Proof. induction 1. left; assumption. right with y; auto. left; assumption. Qed. Lemma clos_trans_tn1 : forall x y, clos_trans R x y -> clos_trans_n1 R x y. Proof. induction 1. left; assumption. elim IHclos_trans2. intro y0; right with y. auto. auto. intros. right with y0; auto. Qed. Lemma clos_trans_tn1_iff : forall x y, clos_trans R x y <-> clos_trans_n1 R x y. Proof. split. apply clos_trans_tn1. apply clos_tn1_trans. Qed. (** Direct reflexive-transitive closure is equivalent to transitivity by left-step extension *) Lemma clos_rt1n_step : forall x y, R x y -> clos_refl_trans_1n R x y. Proof. intros x y H. right with y;[assumption|left]. Qed. Lemma clos_rtn1_step : forall x y, R x y -> clos_refl_trans_n1 R x y. Proof. intros x y H. right with x;[assumption|left]. Qed. Lemma clos_rt1n_rt : forall x y, clos_refl_trans_1n R x y -> clos_refl_trans R x y. Proof. induction 1. constructor 2. constructor 3 with y; auto. constructor 1; auto. Qed. Lemma clos_rt_rt1n : forall x y, clos_refl_trans R x y -> clos_refl_trans_1n R x y. Proof. induction 1. apply clos_rt1n_step; assumption. left. generalize IHclos_refl_trans2; clear IHclos_refl_trans2; induction IHclos_refl_trans1; auto. right with y; auto. eapply IHIHclos_refl_trans1; auto. apply clos_rt1n_rt; auto. Qed. Lemma clos_rt_rt1n_iff : forall x y, clos_refl_trans R x y <-> clos_refl_trans_1n R x y. Proof. split. apply clos_rt_rt1n. apply clos_rt1n_rt. Qed. (** Direct reflexive-transitive closure is equivalent to transitivity by right-step extension *) Lemma clos_rtn1_rt : forall x y, clos_refl_trans_n1 R x y -> clos_refl_trans R x y. Proof. induction 1. constructor 2. constructor 3 with y; auto. constructor 1; assumption. Qed. Lemma clos_rt_rtn1 : forall x y, clos_refl_trans R x y -> clos_refl_trans_n1 R x y. Proof. induction 1. apply clos_rtn1_step; auto. left. elim IHclos_refl_trans2; auto. intros. right with y0; auto. Qed. Lemma clos_rt_rtn1_iff : forall x y, clos_refl_trans R x y <-> clos_refl_trans_n1 R x y. Proof. split. apply clos_rt_rtn1. apply clos_rtn1_rt. Qed. (** Induction on the left transitive step *) Lemma clos_refl_trans_ind_left : forall (x:A) (P:A -> Prop), P x -> (forall y z:A, clos_refl_trans R x y -> P y -> R y z -> P z) -> forall z:A, clos_refl_trans R x z -> P z. Proof. intros. revert H H0. induction H1; intros; auto with sets. apply H1 with x; auto with sets. apply IHclos_refl_trans2. apply IHclos_refl_trans1; auto with sets. intros. apply H0 with y0; auto with sets. apply rt_trans with y; auto with sets. Qed. (** Induction on the right transitive step *) Lemma rt1n_ind_right : forall (P : A -> Prop) (z:A), P z -> (forall x y, R x y -> clos_refl_trans_1n R y z -> P y -> P x) -> forall x, clos_refl_trans_1n R x z -> P x. induction 3; auto. apply H0 with y; auto. Qed. Lemma clos_refl_trans_ind_right : forall (P : A -> Prop) (z:A), P z -> (forall x y, R x y -> P y -> clos_refl_trans R y z -> P x) -> forall x, clos_refl_trans R x z -> P x. intros P z Hz IH x Hxz. apply clos_rt_rt1n_iff in Hxz. elim Hxz using rt1n_ind_right; auto. clear x Hxz. intros x y Hxy Hyz Hy. apply clos_rt_rt1n_iff in Hyz. eauto. Qed. (** Direct reflexive-symmetric-transitive closure is equivalent to transitivity by symmetric left-step extension *) Lemma clos_rst1n_rst : forall x y, clos_refl_sym_trans_1n R x y -> clos_refl_sym_trans R x y. Proof. induction 1. constructor 2. constructor 4 with y; auto. case H;[constructor 1|constructor 3; constructor 1]; auto. Qed. Lemma clos_rst1n_trans : forall x y z, clos_refl_sym_trans_1n R x y -> clos_refl_sym_trans_1n R y z -> clos_refl_sym_trans_1n R x z. induction 1. auto. intros; right with y; eauto. Qed. Lemma clos_rst1n_sym : forall x y, clos_refl_sym_trans_1n R x y -> clos_refl_sym_trans_1n R y x. Proof. intros x y H; elim H. constructor 1. intros x0 y0 z D H0 H1; apply clos_rst1n_trans with y0; auto. right with x0. tauto. left. Qed. Lemma clos_rst_rst1n : forall x y, clos_refl_sym_trans R x y -> clos_refl_sym_trans_1n R x y. induction 1. constructor 2 with y; auto. constructor 1. constructor 1. apply clos_rst1n_sym; auto. eapply clos_rst1n_trans; eauto. Qed. Lemma clos_rst_rst1n_iff : forall x y, clos_refl_sym_trans R x y <-> clos_refl_sym_trans_1n R x y. Proof. split. apply clos_rst_rst1n. apply clos_rst1n_rst. Qed. (** Direct reflexive-symmetric-transitive closure is equivalent to transitivity by symmetric right-step extension *) Lemma clos_rstn1_rst : forall x y, clos_refl_sym_trans_n1 R x y -> clos_refl_sym_trans R x y. Proof. induction 1. constructor 2. constructor 4 with y; auto. case H;[constructor 1|constructor 3; constructor 1]; auto. Qed. Lemma clos_rstn1_trans : forall x y z, clos_refl_sym_trans_n1 R x y -> clos_refl_sym_trans_n1 R y z -> clos_refl_sym_trans_n1 R x z. Proof. intros x y z H1 H2. induction H2. auto. intros. right with y0; eauto. Qed. Lemma clos_rstn1_sym : forall x y, clos_refl_sym_trans_n1 R x y -> clos_refl_sym_trans_n1 R y x. Proof. intros x y H; elim H. constructor 1. intros y0 z D H0 H1. apply clos_rstn1_trans with y0; auto. right with z. tauto. left. Qed. Lemma clos_rst_rstn1 : forall x y, clos_refl_sym_trans R x y -> clos_refl_sym_trans_n1 R x y. Proof. induction 1. constructor 2 with x; auto. constructor 1. constructor 1. apply clos_rstn1_sym; auto. eapply clos_rstn1_trans; eauto. Qed. Lemma clos_rst_rstn1_iff : forall x y, clos_refl_sym_trans R x y <-> clos_refl_sym_trans_n1 R x y. Proof. split. apply clos_rst_rstn1. apply clos_rstn1_rst. Qed. End Equivalences. End Properties. (* begin hide *) (* Compatibility *) Notation trans_tn1 := clos_trans_tn1 (only parsing). Notation tn1_trans := clos_tn1_trans (only parsing). Notation tn1_trans_equiv := clos_trans_tn1_iff (only parsing). Notation trans_t1n := clos_trans_t1n (only parsing). Notation t1n_trans := clos_t1n_trans (only parsing). Notation t1n_trans_equiv := clos_trans_t1n_iff (only parsing). Notation R_rtn1 := clos_rtn1_step (only parsing). Notation trans_rt1n := clos_rt_rt1n (only parsing). Notation rt1n_trans := clos_rt1n_rt (only parsing). Notation rt1n_trans_equiv := clos_rt_rt1n_iff (only parsing). Notation R_rt1n := clos_rt1n_step (only parsing). Notation trans_rtn1 := clos_rt_rtn1 (only parsing). Notation rtn1_trans := clos_rtn1_rt (only parsing). Notation rtn1_trans_equiv := clos_rt_rtn1_iff (only parsing). Notation rts1n_rts := clos_rst1n_rst (only parsing). Notation rts_1n_trans := clos_rst1n_trans (only parsing). Notation rts1n_sym := clos_rst1n_sym (only parsing). Notation rts_rts1n := clos_rst_rst1n (only parsing). Notation rts_rts1n_equiv := clos_rst_rst1n_iff (only parsing). Notation rtsn1_rts := clos_rstn1_rst (only parsing). Notation rtsn1_trans := clos_rstn1_trans (only parsing). Notation rtsn1_sym := clos_rstn1_sym (only parsing). Notation rts_rtsn1 := clos_rst_rstn1 (only parsing). Notation rts_rtsn1_equiv := clos_rst_rstn1_iff (only parsing). (* end hide *) coq-8.4pl2/theories/ZArith/0000750000175000001440000000000012127276546014665 5ustar notinuserscoq-8.4pl2/theories/ZArith/ZArith_dec.v0000640000175000001440000001130112010532755017051 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* P) -> ((n ?= m) = Lt -> P) -> ((n ?= m) = Gt -> P) -> P. Proof. intros H1 H2 H3. destruct (n ?= m); auto. Defined. Lemma Zcompare_rec (P:Set) (n m:Z) : ((n ?= m) = Eq -> P) -> ((n ?= m) = Lt -> P) -> ((n ?= m) = Gt -> P) -> P. Proof. apply Zcompare_rect. Defined. Notation Z_eq_dec := Z.eq_dec (compat "8.3"). Section decidability. Variables x y : Z. (** * Decidability of order on binary integers *) Definition Z_lt_dec : {x < y} + {~ x < y}. Proof. unfold Z.lt; case Z.compare; (now left) || (now right). Defined. Definition Z_le_dec : {x <= y} + {~ x <= y}. Proof. unfold Z.le; case Z.compare; (now left) || (right; tauto). Defined. Definition Z_gt_dec : {x > y} + {~ x > y}. Proof. unfold Z.gt; case Z.compare; (now left) || (now right). Defined. Definition Z_ge_dec : {x >= y} + {~ x >= y}. Proof. unfold Z.ge; case Z.compare; (now left) || (right; tauto). Defined. Definition Z_lt_ge_dec : {x < y} + {x >= y}. Proof. exact Z_lt_dec. Defined. Lemma Z_lt_le_dec : {x < y} + {y <= x}. Proof. elim Z_lt_ge_dec. * now left. * right; now apply Z.ge_le. Defined. Definition Z_le_gt_dec : {x <= y} + {x > y}. Proof. elim Z_le_dec; auto with arith. intro. right. Z.swap_greater. now apply Z.nle_gt. Defined. Definition Z_gt_le_dec : {x > y} + {x <= y}. Proof. exact Z_gt_dec. Defined. Definition Z_ge_lt_dec : {x >= y} + {x < y}. Proof. elim Z_ge_dec; auto with arith. intro. right. Z.swap_greater. now apply Z.lt_nge. Defined. Definition Z_le_lt_eq_dec : x <= y -> {x < y} + {x = y}. Proof. intro H. apply Zcompare_rec with (n := x) (m := y). intro. right. elim (Z.compare_eq_iff x y); auto with arith. intro. left. elim (Z.compare_eq_iff x y); auto with arith. intro H1. absurd (x > y); auto with arith. Defined. End decidability. (** * Cotransitivity of order on binary integers *) Lemma Zlt_cotrans : forall n m:Z, n < m -> forall p:Z, {n < p} + {p < m}. Proof. intros x y H z. case (Z_lt_ge_dec x z). intro. left. assumption. intro. right. apply Z.le_lt_trans with (m := x). apply Z.ge_le. assumption. assumption. Defined. Lemma Zlt_cotrans_pos : forall n m:Z, 0 < n + m -> {0 < n} + {0 < m}. Proof. intros x y H. case (Zlt_cotrans 0 (x + y) H x). - now left. - right. apply Z.add_lt_mono_l with (p := x). now rewrite Z.add_0_r. Defined. Lemma Zlt_cotrans_neg : forall n m:Z, n + m < 0 -> {n < 0} + {m < 0}. Proof. intros x y H; case (Zlt_cotrans (x + y) 0 H x); intro Hxy; [ right; apply Z.add_lt_mono_l with (p := x); rewrite Z.add_0_r | left ]; assumption. Defined. Lemma not_Zeq_inf : forall n m:Z, n <> m -> {n < m} + {m < n}. Proof. intros x y H. case Z_lt_ge_dec with x y. intro. left. assumption. intro H0. generalize (Z.ge_le _ _ H0). intro. case (Z_le_lt_eq_dec _ _ H1). intro. right. assumption. intro. apply False_rec. apply H. symmetry . assumption. Defined. Lemma Z_dec : forall n m:Z, {n < m} + {n > m} + {n = m}. Proof. intros x y. case (Z_lt_ge_dec x y). intro H. left. left. assumption. intro H. generalize (Z.ge_le _ _ H). intro H0. case (Z_le_lt_eq_dec y x H0). intro H1. left. right. apply Z.lt_gt. assumption. intro. right. symmetry . assumption. Defined. Lemma Z_dec' : forall n m:Z, {n < m} + {m < n} + {n = m}. Proof. intros x y. case (Z.eq_dec x y); intro H; [ right; assumption | left; apply (not_Zeq_inf _ _ H) ]. Defined. (* begin hide *) (* To deprecate ? *) Corollary Z_zerop : forall x:Z, {x = 0} + {x <> 0}. Proof. exact (fun x:Z => Z.eq_dec x 0). Defined. Corollary Z_notzerop : forall (x:Z), {x <> 0} + {x = 0}. Proof (fun x => sumbool_not _ _ (Z_zerop x)). Corollary Z_noteq_dec : forall (x y:Z), {x <> y} + {x = y}. Proof (fun x y => sumbool_not _ _ (Z.eq_dec x y)). (* end hide *) coq-8.4pl2/theories/ZArith/Zpow_def.v0000640000175000001440000000246412010532755016624 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z.succ n = Z.succ m *) (** Lemmas ending by Z.gt *) Zsucc_gt_compat (* m > n -> Z.succ m > Z.succ n *) Zgt_succ (* Z.succ n > n *) Zorder.Zgt_pos_0 (* Z.pos p > 0 *) Zplus_gt_compat_l (* n > m -> p+n > p+m *) Zplus_gt_compat_r (* n > m -> n+p > m+p *) (** Lemmas ending by Z.lt *) Pos2Z.is_pos (* 0 < Z.pos p *) Z.lt_succ_diag_r (* n < Z.succ n *) Zsucc_lt_compat (* n < m -> Z.succ n < Z.succ m *) Z.lt_pred_l (* Z.pred n < n *) Zplus_lt_compat_l (* n < m -> p+n < p+m *) Zplus_lt_compat_r (* n < m -> n+p < m+p *) (** Lemmas ending by Z.le *) Nat2Z.is_nonneg (* 0 <= Z.of_nat n *) Pos2Z.is_nonneg (* 0 <= Z.pos p *) Z.le_refl (* n <= n *) Z.le_succ_diag_r (* n <= Z.succ n *) Zsucc_le_compat (* m <= n -> Z.succ m <= Z.succ n *) Z.le_pred_l (* Z.pred n <= n *) Z.le_min_l (* Z.min n m <= n *) Z.le_min_r (* Z.min n m <= m *) Zplus_le_compat_l (* n <= m -> p+n <= p+m *) Zplus_le_compat_r (* a <= b -> a+c <= b+c *) Z.abs_nonneg (* 0 <= |x| *) (** ** Irreversible simplification lemmas *) (** Probably to be declared as hints, when no other simplification is possible *) (** Lemmas ending by eq *) Z_eq_mult (* y = 0 -> y*x = 0 *) Zplus_eq_compat (* n = m -> p = q -> n+p = m+q *) (** Lemmas ending by Z.ge *) Zorder.Zmult_ge_compat_r (* a >= b -> c >= 0 -> a*c >= b*c *) Zorder.Zmult_ge_compat_l (* a >= b -> c >= 0 -> c*a >= c*b *) Zorder.Zmult_ge_compat (* : a >= c -> b >= d -> c >= 0 -> d >= 0 -> a*b >= c*d *) (** Lemmas ending by Z.lt *) Zorder.Zmult_gt_0_compat (* a > 0 -> b > 0 -> a*b > 0 *) Z.lt_lt_succ_r (* n < m -> n < Z.succ m *) (** Lemmas ending by Z.le *) Z.mul_nonneg_nonneg (* 0 <= x -> 0 <= y -> 0 <= x*y *) Zorder.Zmult_le_compat_r (* a <= b -> 0 <= c -> a*c <= b*c *) Zorder.Zmult_le_compat_l (* a <= b -> 0 <= c -> c*a <= c*b *) Z.add_nonneg_nonneg (* 0 <= x -> 0 <= y -> 0 <= x+y *) Z.le_le_succ_r (* x <= y -> x <= Z.succ y *) Z.add_le_mono (* n <= m -> p <= q -> n+p <= m+q *) : zarith. coq-8.4pl2/theories/ZArith/Zcompare.v0000640000175000001440000001261712010532755016630 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (m ?= n) = Lt. Proof Z.gt_lt_iff. Lemma Zcompare_antisym n m : CompOpp (n ?= m) = (m ?= n). Proof eq_sym (Z.compare_antisym n m). (** * Transitivity of comparison *) Lemma Zcompare_Lt_trans : forall n m p:Z, (n ?= m) = Lt -> (m ?= p) = Lt -> (n ?= p) = Lt. Proof Z.lt_trans. Lemma Zcompare_Gt_trans : forall n m p:Z, (n ?= m) = Gt -> (m ?= p) = Gt -> (n ?= p) = Gt. Proof. intros n m p. change (n > m -> m > p -> n > p). Z.swap_greater. intros. now transitivity m. Qed. (** * Comparison and opposite *) Lemma Zcompare_opp n m : (n ?= m) = (- m ?= - n). Proof. symmetry. apply Z.compare_opp. Qed. (** * Comparison first-order specification *) Lemma Zcompare_Gt_spec n m : (n ?= m) = Gt -> exists h, n + - m = Zpos h. Proof. rewrite Z.compare_sub. unfold Z.sub. destruct (n+-m) as [|p|p]; try discriminate. now exists p. Qed. (** * Comparison and addition *) Lemma Zcompare_plus_compat n m p : (p + n ?= p + m) = (n ?= m). Proof. apply Z.add_compare_mono_l. Qed. Lemma Zplus_compare_compat (r:comparison) (n m p q:Z) : (n ?= m) = r -> (p ?= q) = r -> (n + p ?= m + q) = r. Proof. rewrite (Z.compare_sub n), (Z.compare_sub p), (Z.compare_sub (n+p)). unfold Z.sub. rewrite Z.opp_add_distr. rewrite Z.add_shuffle1. destruct (n+-m), (p+-q); simpl; intros; now subst. Qed. Lemma Zcompare_succ_Gt n : (Z.succ n ?= n) = Gt. Proof. apply Z.lt_gt. apply Z.lt_succ_diag_r. Qed. Lemma Zcompare_Gt_not_Lt n m : (n ?= m) = Gt <-> (n ?= m+1) <> Lt. Proof. change (n > m <-> n >= m+1). Z.swap_greater. symmetry. apply Z.le_succ_l. Qed. (** * Successor and comparison *) Lemma Zcompare_succ_compat n m : (Z.succ n ?= Z.succ m) = (n ?= m). Proof. rewrite <- 2 Z.add_1_l. apply Z.add_compare_mono_l. Qed. (** * Multiplication and comparison *) Lemma Zcompare_mult_compat : forall (p:positive) (n m:Z), (Zpos p * n ?= Zpos p * m) = (n ?= m). Proof. intros p [|n|n] [|m|m]; simpl; trivial; now rewrite Pos.mul_compare_mono_l. Qed. Lemma Zmult_compare_compat_l n m p: p > 0 -> (n ?= m) = (p * n ?= p * m). Proof. intros; destruct p; try discriminate. symmetry. apply Zcompare_mult_compat. Qed. Lemma Zmult_compare_compat_r n m p : p > 0 -> (n ?= m) = (n * p ?= m * p). Proof. intros; rewrite 2 (Z.mul_comm _ p); now apply Zmult_compare_compat_l. Qed. (** * Relating [x ?= y] to [=], [<=], [<], [>=] or [>] *) Lemma Zcompare_elim : forall (c1 c2 c3:Prop) (n m:Z), (n = m -> c1) -> (n < m -> c2) -> (n > m -> c3) -> match n ?= m with | Eq => c1 | Lt => c2 | Gt => c3 end. Proof. intros. case Z.compare_spec; trivial. now Z.swap_greater. Qed. Lemma Zcompare_eq_case : forall (c1 c2 c3:Prop) (n m:Z), c1 -> n = m -> match n ?= m with | Eq => c1 | Lt => c2 | Gt => c3 end. Proof. intros. subst. now rewrite Z.compare_refl. Qed. Lemma Zle_compare : forall n m:Z, n <= m -> match n ?= m with | Eq => True | Lt => True | Gt => False end. Proof. intros. case Z.compare_spec; trivial; Z.order. Qed. Lemma Zlt_compare : forall n m:Z, n < m -> match n ?= m with | Eq => False | Lt => True | Gt => False end. Proof. intros x y H; now rewrite H. Qed. Lemma Zge_compare : forall n m:Z, n >= m -> match n ?= m with | Eq => True | Lt => False | Gt => True end. Proof. intros. now case Z.compare_spec. Qed. Lemma Zgt_compare : forall n m:Z, n > m -> match n ?= m with | Eq => False | Lt => False | Gt => True end. Proof. intros x y H; now rewrite H. Qed. (** Compatibility notations *) Notation Zcompare_refl := Z.compare_refl (compat "8.3"). Notation Zcompare_Eq_eq := Z.compare_eq (compat "8.3"). Notation Zcompare_Eq_iff_eq := Z.compare_eq_iff (compat "8.3"). Notation Zcompare_spec := Z.compare_spec (compat "8.3"). Notation Zmin_l := Z.min_l (compat "8.3"). Notation Zmin_r := Z.min_r (compat "8.3"). Notation Zmax_l := Z.max_l (compat "8.3"). Notation Zmax_r := Z.max_r (compat "8.3"). Notation Zabs_eq := Z.abs_eq (compat "8.3"). Notation Zabs_non_eq := Z.abs_neq (compat "8.3"). Notation Zsgn_0 := Z.sgn_null (compat "8.3"). Notation Zsgn_1 := Z.sgn_pos (compat "8.3"). Notation Zsgn_m1 := Z.sgn_neg (compat "8.3"). (** Not kept: Zcompare_egal_dec *) coq-8.4pl2/theories/ZArith/Zcomplements.v0000640000175000001440000001125012010532755017520 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1%positive | xO a' => xO (floor_pos a') | xI b' => xO (floor_pos b') end. Definition floor (a:positive) := Zpos (floor_pos a). Lemma floor_gt0 : forall p:positive, floor p > 0. Proof. reflexivity. Qed. Lemma floor_ok : forall p:positive, floor p <= Zpos p < 2 * floor p. Proof. unfold floor. induction p; simpl. - rewrite !Pos2Z.inj_xI, (Pos2Z.inj_xO (xO _)), Pos2Z.inj_xO. omega. - rewrite (Pos2Z.inj_xO (xO _)), (Pos2Z.inj_xO p), Pos2Z.inj_xO. omega. - omega. Qed. (**********************************************************************) (** Two more induction principles over [Z]. *) Theorem Z_lt_abs_rec : forall P:Z -> Set, (forall n:Z, (forall m:Z, Z.abs m < Z.abs n -> P m) -> P n) -> forall n:Z, P n. Proof. intros P HP p. set (Q := fun z => 0 <= z -> P z * P (- z)) in *. cut (Q (Z.abs p)); [ intros | apply (Z_lt_rec Q); auto with zarith ]. elim (Zabs_dec p); intro eq; rewrite eq; elim H; auto with zarith. unfold Q; clear Q; intros. split; apply HP. rewrite Z.abs_eq; auto; intros. elim (H (Z.abs m)); intros; auto with zarith. elim (Zabs_dec m); intro eq; rewrite eq; trivial. rewrite Z.abs_neq, Z.opp_involutive; auto with zarith; intros. elim (H (Z.abs m)); intros; auto with zarith. elim (Zabs_dec m); intro eq; rewrite eq; trivial. Qed. Theorem Z_lt_abs_induction : forall P:Z -> Prop, (forall n:Z, (forall m:Z, Z.abs m < Z.abs n -> P m) -> P n) -> forall n:Z, P n. Proof. intros P HP p. set (Q := fun z => 0 <= z -> P z /\ P (- z)) in *. cut (Q (Z.abs p)); [ intros | apply (Z_lt_induction Q); auto with zarith ]. elim (Zabs_dec p); intro eq; rewrite eq; elim H; auto with zarith. unfold Q; clear Q; intros. split; apply HP. rewrite Z.abs_eq; auto; intros. elim (H (Z.abs m)); intros; auto with zarith. elim (Zabs_dec m); intro eq; rewrite eq; trivial. rewrite Z.abs_neq, Z.opp_involutive; auto with zarith; intros. elim (H (Z.abs m)); intros; auto with zarith. elim (Zabs_dec m); intro eq; rewrite eq; trivial. Qed. (** To do case analysis over the sign of [z] *) Lemma Zcase_sign : forall (n:Z) (P:Prop), (n = 0 -> P) -> (n > 0 -> P) -> (n < 0 -> P) -> P. Proof. intros x P Hzero Hpos Hneg. destruct x; [apply Hzero|apply Hpos|apply Hneg]; easy. Qed. Lemma sqr_pos n : n * n >= 0. Proof. Z.swap_greater. apply Z.square_nonneg. Qed. (**********************************************************************) (** A list length in Z, tail recursive. *) Require Import List. Fixpoint Zlength_aux (acc:Z) (A:Type) (l:list A) : Z := match l with | nil => acc | _ :: l => Zlength_aux (Z.succ acc) A l end. Definition Zlength := Zlength_aux 0. Arguments Zlength [A] l. Section Zlength_properties. Variable A : Type. Implicit Type l : list A. Lemma Zlength_correct l : Zlength l = Z.of_nat (length l). Proof. assert (H : forall l acc, Zlength_aux acc A l = acc + Z.of_nat (length l)). clear l. induction l. auto with zarith. intros. simpl length; simpl Zlength_aux. rewrite IHl, Nat2Z.inj_succ; auto with zarith. unfold Zlength. now rewrite H. Qed. Lemma Zlength_nil : Zlength (A:=A) nil = 0. Proof. reflexivity. Qed. Lemma Zlength_cons (x:A) l : Zlength (x :: l) = Z.succ (Zlength l). Proof. intros. now rewrite !Zlength_correct, <- Nat2Z.inj_succ. Qed. Lemma Zlength_nil_inv l : Zlength l = 0 -> l = nil. Proof. rewrite Zlength_correct. destruct l as [|x l]; auto. now rewrite <- Nat2Z.inj_0, Nat2Z.inj_iff. Qed. End Zlength_properties. Arguments Zlength_correct [A] l. Arguments Zlength_cons [A] x l. Arguments Zlength_nil_inv [A] l _. coq-8.4pl2/theories/ZArith/Zquot.v0000640000175000001440000003403012010532755016163 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0] condition whenever possible. *) Lemma Zrem_0_l a : Z.rem 0 a = 0. Proof. now destruct a. Qed. Lemma Zquot_0_l a : 0÷a = 0. Proof. now destruct a. Qed. Hint Resolve Zrem_0_l Zrem_0_r Zquot_0_l Zquot_0_r Z.quot_1_r Z.rem_1_r : zarith. Ltac zero_or_not a := destruct (Z.eq_decidable a 0) as [->|?]; [rewrite ?Zquot_0_l, ?Zrem_0_l, ?Zquot_0_r, ?Zrem_0_r; auto with zarith|]. Lemma Z_rem_same a : Z.rem a a = 0. Proof. zero_or_not a. now apply Z.rem_same. Qed. Lemma Z_rem_mult a b : Z.rem (a*b) b = 0. Proof. zero_or_not b. now apply Z.rem_mul. Qed. (** * Division and Opposite *) (* The precise equalities that are invalid with "historic" Zdiv. *) Theorem Zquot_opp_l a b : (-a)÷b = -(a÷b). Proof. zero_or_not b. now apply Z.quot_opp_l. Qed. Theorem Zquot_opp_r a b : a÷(-b) = -(a÷b). Proof. zero_or_not b. now apply Z.quot_opp_r. Qed. Theorem Zrem_opp_l a b : Z.rem (-a) b = -(Z.rem a b). Proof. zero_or_not b. now apply Z.rem_opp_l. Qed. Theorem Zrem_opp_r a b : Z.rem a (-b) = Z.rem a b. Proof. zero_or_not b. now apply Z.rem_opp_r. Qed. Theorem Zquot_opp_opp a b : (-a)÷(-b) = a÷b. Proof. zero_or_not b. now apply Z.quot_opp_opp. Qed. Theorem Zrem_opp_opp a b : Z.rem (-a) (-b) = -(Z.rem a b). Proof. zero_or_not b. now apply Z.rem_opp_opp. Qed. (** The sign of the remainder is the one of [a]. Due to the possible nullity of [a], a general result is to be stated in the following form: *) Theorem Zrem_sgn a b : 0 <= Z.sgn (Z.rem a b) * Z.sgn a. Proof. zero_or_not b. - apply Z.square_nonneg. - zero_or_not (Z.rem a b). rewrite Z.rem_sign_nz; trivial. apply Z.square_nonneg. Qed. (** This can also be said in a simplier way: *) Theorem Zrem_sgn2 a b : 0 <= (Z.rem a b) * a. Proof. zero_or_not b. - apply Z.square_nonneg. - now apply Z.rem_sign_mul. Qed. (** Reformulation of [Z.rem_bound_abs] in 2 then 4 particular cases. *) Theorem Zrem_lt_pos a b : 0<=a -> b<>0 -> 0 <= Z.rem a b < Z.abs b. Proof. intros; generalize (Z.rem_nonneg a b) (Z.rem_bound_abs a b); romega with *. Qed. Theorem Zrem_lt_neg a b : a<=0 -> b<>0 -> -Z.abs b < Z.rem a b <= 0. Proof. intros; generalize (Z.rem_nonpos a b) (Z.rem_bound_abs a b); romega with *. Qed. Theorem Zrem_lt_pos_pos a b : 0<=a -> 0 0 <= Z.rem a b < b. Proof. intros; generalize (Zrem_lt_pos a b); romega with *. Qed. Theorem Zrem_lt_pos_neg a b : 0<=a -> b<0 -> 0 <= Z.rem a b < -b. Proof. intros; generalize (Zrem_lt_pos a b); romega with *. Qed. Theorem Zrem_lt_neg_pos a b : a<=0 -> 0 -b < Z.rem a b <= 0. Proof. intros; generalize (Zrem_lt_neg a b); romega with *. Qed. Theorem Zrem_lt_neg_neg a b : a<=0 -> b<0 -> b < Z.rem a b <= 0. Proof. intros; generalize (Zrem_lt_neg a b); romega with *. Qed. (** * Unicity results *) Definition Remainder a b r := (0 <= a /\ 0 <= r < Z.abs b) \/ (a <= 0 /\ -Z.abs b < r <= 0). Definition Remainder_alt a b r := Z.abs r < Z.abs b /\ 0 <= r * a. Lemma Remainder_equiv : forall a b r, Remainder a b r <-> Remainder_alt a b r. Proof. unfold Remainder, Remainder_alt; intuition. - romega with *. - romega with *. - rewrite <-(Z.mul_opp_opp). apply Z.mul_nonneg_nonneg; romega. - assert (0 <= Z.sgn r * Z.sgn a). { rewrite <-Z.sgn_mul, Z.sgn_nonneg; auto. } destruct r; simpl Z.sgn in *; romega with *. Qed. Theorem Zquot_mod_unique_full a b q r : Remainder a b r -> a = b*q + r -> q = a÷b /\ r = Z.rem a b. Proof. destruct 1 as [(H,H0)|(H,H0)]; intros. apply Zdiv_mod_unique with b; auto. apply Zrem_lt_pos; auto. romega with *. rewrite <- H1; apply Z.quot_rem'. rewrite <- (Z.opp_involutive a). rewrite Zquot_opp_l, Zrem_opp_l. generalize (Zdiv_mod_unique b (-q) (-a÷b) (-r) (Z.rem (-a) b)). generalize (Zrem_lt_pos (-a) b). rewrite <-Z.quot_rem', Z.mul_opp_r, <-Z.opp_add_distr, <-H1. romega with *. Qed. Theorem Zquot_unique_full a b q r : Remainder a b r -> a = b*q + r -> q = a÷b. Proof. intros; destruct (Zquot_mod_unique_full a b q r); auto. Qed. Theorem Zrem_unique_full a b q r : Remainder a b r -> a = b*q + r -> r = Z.rem a b. Proof. intros; destruct (Zquot_mod_unique_full a b q r); auto. Qed. (** * Order results about Zrem and Zquot *) (* Division of positive numbers is positive. *) Lemma Z_quot_pos a b : 0 <= a -> 0 <= b -> 0 <= a÷b. Proof. intros. zero_or_not b. apply Z.quot_pos; auto with zarith. Qed. (** As soon as the divisor is greater or equal than 2, the division is strictly decreasing. *) Lemma Z_quot_lt a b : 0 < a -> 2 <= b -> a÷b < a. Proof. intros. apply Z.quot_lt; auto with zarith. Qed. (** [<=] is compatible with a positive division. *) Lemma Z_quot_monotone a b c : 0<=c -> a<=b -> a÷c <= b÷c. Proof. intros. zero_or_not c. apply Z.quot_le_mono; auto with zarith. Qed. (** With our choice of division, rounding of (a÷b) is always done toward 0: *) Lemma Z_mult_quot_le a b : 0 <= a -> 0 <= b*(a÷b) <= a. Proof. intros. zero_or_not b. apply Z.mul_quot_le; auto with zarith. Qed. Lemma Z_mult_quot_ge a b : a <= 0 -> a <= b*(a÷b) <= 0. Proof. intros. zero_or_not b. apply Z.mul_quot_ge; auto with zarith. Qed. (** The previous inequalities between [b*(a÷b)] and [a] are exact iff the modulo is zero. *) Lemma Z_quot_exact_full a b : a = b*(a÷b) <-> Z.rem a b = 0. Proof. intros. zero_or_not b. intuition. apply Z.quot_exact; auto. Qed. (** A modulo cannot grow beyond its starting point. *) Theorem Zrem_le a b : 0 <= a -> 0 <= b -> Z.rem a b <= a. Proof. intros. zero_or_not b. apply Z.rem_le; auto with zarith. Qed. (** Some additionnal inequalities about Zdiv. *) Theorem Zquot_le_upper_bound: forall a b q, 0 < b -> a <= q*b -> a÷b <= q. Proof. intros a b q; rewrite Z.mul_comm; apply Z.quot_le_upper_bound. Qed. Theorem Zquot_lt_upper_bound: forall a b q, 0 <= a -> 0 < b -> a < q*b -> a÷b < q. Proof. intros a b q; rewrite Z.mul_comm; apply Z.quot_lt_upper_bound. Qed. Theorem Zquot_le_lower_bound: forall a b q, 0 < b -> q*b <= a -> q <= a÷b. Proof. intros a b q; rewrite Z.mul_comm; apply Z.quot_le_lower_bound. Qed. Theorem Zquot_sgn: forall a b, 0 <= Z.sgn (a÷b) * Z.sgn a * Z.sgn b. Proof. destruct a as [ |a|a]; destruct b as [ |b|b]; simpl; auto with zarith; unfold Z.quot; simpl; destruct N.pos_div_eucl; simpl; destruct n; simpl; auto with zarith. Qed. (** * Relations between usual operations and Zmod and Zdiv *) (** First, a result that used to be always valid with Zdiv, but must be restricted here. For instance, now (9+(-5)*2) rem 2 = -1 <> 1 = 9 rem 2 *) Lemma Z_rem_plus : forall a b c:Z, 0 <= (a+b*c) * a -> Z.rem (a + b * c) c = Z.rem a c. Proof. intros. zero_or_not c. apply Z.rem_add; auto with zarith. Qed. Lemma Z_quot_plus : forall a b c:Z, 0 <= (a+b*c) * a -> c<>0 -> (a + b * c) ÷ c = a ÷ c + b. Proof. intros. apply Z.quot_add; auto with zarith. Qed. Theorem Z_quot_plus_l: forall a b c : Z, 0 <= (a*b+c)*c -> b<>0 -> b<>0 -> (a * b + c) ÷ b = a + c ÷ b. Proof. intros. apply Z.quot_add_l; auto with zarith. Qed. (** Cancellations. *) Lemma Zquot_mult_cancel_r : forall a b c:Z, c<>0 -> (a*c)÷(b*c) = a÷b. Proof. intros. zero_or_not b. apply Z.quot_mul_cancel_r; auto. Qed. Lemma Zquot_mult_cancel_l : forall a b c:Z, c<>0 -> (c*a)÷(c*b) = a÷b. Proof. intros. rewrite (Z.mul_comm c b). zero_or_not b. rewrite (Z.mul_comm b c). apply Z.quot_mul_cancel_l; auto. Qed. Lemma Zmult_rem_distr_l: forall a b c, Z.rem (c*a) (c*b) = c * (Z.rem a b). Proof. intros. zero_or_not c. rewrite (Z.mul_comm c b). zero_or_not b. rewrite (Z.mul_comm b c). apply Z.mul_rem_distr_l; auto. Qed. Lemma Zmult_rem_distr_r: forall a b c, Z.rem (a*c) (b*c) = (Z.rem a b) * c. Proof. intros. zero_or_not b. rewrite (Z.mul_comm b c). zero_or_not c. rewrite (Z.mul_comm c b). apply Z.mul_rem_distr_r; auto. Qed. (** Operations modulo. *) Theorem Zrem_rem: forall a n, Z.rem (Z.rem a n) n = Z.rem a n. Proof. intros. zero_or_not n. apply Z.rem_rem; auto. Qed. Theorem Zmult_rem: forall a b n, Z.rem (a * b) n = Z.rem (Z.rem a n * Z.rem b n) n. Proof. intros. zero_or_not n. apply Z.mul_rem; auto. Qed. (** addition and modulo Generally speaking, unlike with Zdiv, we don't have (a+b) rem n = (a rem n + b rem n) rem n for any a and b. For instance, take (8 + (-10)) rem 3 = -2 whereas (8 rem 3 + (-10 rem 3)) rem 3 = 1. *) Theorem Zplus_rem: forall a b n, 0 <= a * b -> Z.rem (a + b) n = Z.rem (Z.rem a n + Z.rem b n) n. Proof. intros. zero_or_not n. apply Z.add_rem; auto. Qed. Lemma Zplus_rem_idemp_l: forall a b n, 0 <= a * b -> Z.rem (Z.rem a n + b) n = Z.rem (a + b) n. Proof. intros. zero_or_not n. apply Z.add_rem_idemp_l; auto. Qed. Lemma Zplus_rem_idemp_r: forall a b n, 0 <= a*b -> Z.rem (b + Z.rem a n) n = Z.rem (b + a) n. Proof. intros. zero_or_not n. apply Z.add_rem_idemp_r; auto. rewrite Z.mul_comm; auto. Qed. Lemma Zmult_rem_idemp_l: forall a b n, Z.rem (Z.rem a n * b) n = Z.rem (a * b) n. Proof. intros. zero_or_not n. apply Z.mul_rem_idemp_l; auto. Qed. Lemma Zmult_rem_idemp_r: forall a b n, Z.rem (b * Z.rem a n) n = Z.rem (b * a) n. Proof. intros. zero_or_not n. apply Z.mul_rem_idemp_r; auto. Qed. (** Unlike with Zdiv, the following result is true without restrictions. *) Lemma Zquot_Zquot : forall a b c, (a÷b)÷c = a÷(b*c). Proof. intros. zero_or_not b. rewrite Z.mul_comm. zero_or_not c. rewrite Z.mul_comm. apply Z.quot_quot; auto. Qed. (** A last inequality: *) Theorem Zquot_mult_le: forall a b c, 0<=a -> 0<=b -> 0<=c -> c*(a÷b) <= (c*a)÷b. Proof. intros. zero_or_not b. apply Z.quot_mul_le; auto with zarith. Qed. (** Z.rem is related to divisibility (see more in Znumtheory) *) Lemma Zrem_divides : forall a b, Z.rem a b = 0 <-> exists c, a = b*c. Proof. intros. zero_or_not b. firstorder. rewrite Z.rem_divide; trivial. split; intros (c,Hc); exists c; subst; auto with zarith. Qed. (** Particular case : dividing by 2 is related with parity *) Lemma Zquot2_odd_remainder : forall a, Remainder a 2 (if Z.odd a then Z.sgn a else 0). Proof. intros [ |p|p]. simpl. left. simpl. auto with zarith. left. destruct p; simpl; auto with zarith. right. destruct p; simpl; split; now auto with zarith. Qed. Lemma Zrem_odd : forall a, Z.rem a 2 = if Z.odd a then Z.sgn a else 0. Proof. intros. symmetry. apply Zrem_unique_full with (Z.quot2 a). apply Zquot2_odd_remainder. apply Zquot2_odd_eqn. Qed. Lemma Zrem_even : forall a, Z.rem a 2 = if Z.even a then 0 else Z.sgn a. Proof. intros a. rewrite Zrem_odd, Zodd_even_bool. now destruct Z.even. Qed. Lemma Zeven_rem : forall a, Z.even a = Z.eqb (Z.rem a 2) 0. Proof. intros a. rewrite Zrem_even. destruct a as [ |p|p]; trivial; now destruct p. Qed. Lemma Zodd_rem : forall a, Z.odd a = negb (Z.eqb (Z.rem a 2) 0). Proof. intros a. rewrite Zrem_odd. destruct a as [ |p|p]; trivial; now destruct p. Qed. (** * Interaction with "historic" Zdiv *) (** They agree at least on positive numbers: *) Theorem Zquotrem_Zdiv_eucl_pos : forall a b:Z, 0 <= a -> 0 < b -> a÷b = a/b /\ Z.rem a b = a mod b. Proof. intros. apply Zdiv_mod_unique with b. apply Zrem_lt_pos; auto with zarith. rewrite Z.abs_eq; auto with *; apply Z_mod_lt; auto with *. rewrite <- Z_div_mod_eq; auto with *. symmetry; apply Z.quot_rem; auto with *. Qed. Theorem Zquot_Zdiv_pos : forall a b, 0 <= a -> 0 <= b -> a÷b = a/b. Proof. intros a b Ha Hb. Z.le_elim Hb. - generalize (Zquotrem_Zdiv_eucl_pos a b Ha Hb); intuition. - subst; now rewrite Zquot_0_r, Zdiv_0_r. Qed. Theorem Zrem_Zmod_pos : forall a b, 0 <= a -> 0 < b -> Z.rem a b = a mod b. Proof. intros a b Ha Hb; generalize (Zquotrem_Zdiv_eucl_pos a b Ha Hb); intuition. Qed. (** Modulos are null at the same places *) Theorem Zrem_Zmod_zero : forall a b, b<>0 -> (Z.rem a b = 0 <-> a mod b = 0). Proof. intros. rewrite Zrem_divides, Zmod_divides; intuition. Qed. coq-8.4pl2/theories/ZArith/Zminmax.v0000640000175000001440000000217712010532755016473 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ => false end. Definition Zneq_bool (x y:Z) := match x ?= y with | Eq => false | _ => true end. (** Properties in term of [if ... then ... else ...] *) Lemma Zle_cases n m : if n <=? m then n <= m else n > m. Proof. case Z.leb_spec; now Z.swap_greater. Qed. Lemma Zlt_cases n m : if n = m. Proof. case Z.ltb_spec; now Z.swap_greater. Qed. Lemma Zge_cases n m : if n >=? m then n >= m else n < m. Proof. rewrite Z.geb_leb. case Z.leb_spec; now Z.swap_greater. Qed. Lemma Zgt_cases n m : if n >? m then n > m else n <= m. Proof. rewrite Z.gtb_ltb. case Z.ltb_spec; now Z.swap_greater. Qed. (** Lemmas on [Z.leb] used in contrib/graphs *) Lemma Zle_bool_imp_le n m : (n <=? m) = true -> (n <= m). Proof. apply Z.leb_le. Qed. Lemma Zle_imp_le_bool n m : (n <= m) -> (n <=? m) = true. Proof. apply Z.leb_le. Qed. Notation Zle_bool_refl := Z.leb_refl (compat "8.3"). Lemma Zle_bool_antisym n m : (n <=? m) = true -> (m <=? n) = true -> n = m. Proof. rewrite !Z.leb_le. apply Z.le_antisymm. Qed. Lemma Zle_bool_trans n m p : (n <=? m) = true -> (m <=? p) = true -> (n <=? p) = true. Proof. rewrite !Z.leb_le. apply Z.le_trans. Qed. Definition Zle_bool_total x y : { x <=? y = true } + { y <=? x = true }. Proof. case_eq (x <=? y); intros H. - left; trivial. - right. apply Z.leb_gt in H. now apply Z.leb_le, Z.lt_le_incl. Defined. Lemma Zle_bool_plus_mono n m p q : (n <=? m) = true -> (p <=? q) = true -> (n + p <=? m + q) = true. Proof. rewrite !Z.leb_le. apply Z.add_le_mono. Qed. Lemma Zone_pos : 1 <=? 0 = false. Proof. reflexivity. Qed. Lemma Zone_min_pos n : (n <=? 0) = false -> (1 <=? n) = true. Proof. rewrite Z.leb_le, Z.leb_gt. apply Z.le_succ_l. Qed. (** Properties in term of [iff] *) Lemma Zle_is_le_bool n m : (n <= m) <-> (n <=? m) = true. Proof. symmetry. apply Z.leb_le. Qed. Lemma Zge_is_le_bool n m : (n >= m) <-> (m <=? n) = true. Proof. Z.swap_greater. symmetry. apply Z.leb_le. Qed. Lemma Zlt_is_lt_bool n m : (n < m) <-> (n m) <-> (n >? m) = true. Proof. Z.swap_greater. rewrite Z.gtb_ltb. symmetry. apply Z.ltb_lt. Qed. Lemma Zlt_is_le_bool n m : (n < m) <-> (n <=? m - 1) = true. Proof. rewrite Z.leb_le. apply Z.lt_le_pred. Qed. Lemma Zgt_is_le_bool n m : (n > m) <-> (m <=? n - 1) = true. Proof. Z.swap_greater. rewrite Z.leb_le. apply Z.lt_le_pred. Qed. (** Properties of the deprecated [Zeq_bool] *) Lemma Zeq_is_eq_bool x y : x = y <-> Zeq_bool x y = true. Proof. unfold Zeq_bool. rewrite <- Z.compare_eq_iff. destruct Z.compare; now split. Qed. Lemma Zeq_bool_eq x y : Zeq_bool x y = true -> x = y. Proof. apply Zeq_is_eq_bool. Qed. Lemma Zeq_bool_neq x y : Zeq_bool x y = false -> x <> y. Proof. rewrite Zeq_is_eq_bool; now destruct Zeq_bool. Qed. Lemma Zeq_bool_if x y : if Zeq_bool x y then x=y else x<>y. Proof. generalize (Zeq_bool_eq x y) (Zeq_bool_neq x y). destruct Zeq_bool; auto. Qed. coq-8.4pl2/theories/ZArith/intro.tex0000750000175000001440000000035607267560347016555 0ustar notinusers\section{Binary integers : ZArith} The {\tt ZArith} library deals with binary integers (those used by the {\tt Omega} decision tactic). Here are defined various arithmetical notions and their properties, similar to those of {\tt Arith}. coq-8.4pl2/theories/ZArith/Zgcd_alt.v0000640000175000001440000002174112010532755016575 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z -> Z := fun a b => match n with | O => 1 (* arbitrary, since n should be big enough *) | S n => match a with | Z0 => Z.abs b | Zpos _ => Zgcdn n (Z.modulo b a) a | Zneg a => Zgcdn n (Z.modulo b (Zpos a)) (Zpos a) end end. Definition Zgcd_bound (a:Z) := match a with | Z0 => S O | Zpos p => let n := Pos.size_nat p in (n+n)%nat | Zneg p => let n := Pos.size_nat p in (n+n)%nat end. Definition Zgcd_alt a b := Zgcdn (Zgcd_bound a) a b. (** A first obvious fact : [Z.gcd a b] is positive. *) Lemma Zgcdn_pos : forall n a b, 0 <= Zgcdn n a b. Proof. induction n. simpl; auto with zarith. destruct a; simpl; intros; auto with zarith; auto. Qed. Lemma Zgcd_alt_pos : forall a b, 0 <= Zgcd_alt a b. Proof. intros; unfold Z.gcd; apply Zgcdn_pos; auto. Qed. (** We now prove that Z.gcd is indeed a gcd. *) (** 1) We prove a weaker & easier bound. *) Lemma Zgcdn_linear_bound : forall n a b, Z.abs a < Z.of_nat n -> Zis_gcd a b (Zgcdn n a b). Proof. induction n. simpl; intros. exfalso; generalize (Z.abs_nonneg a); omega. destruct a; intros; simpl; [ generalize (Zis_gcd_0_abs b); intuition | | ]; unfold Z.modulo; generalize (Z_div_mod b (Zpos p) (eq_refl Gt)); destruct (Z.div_eucl b (Zpos p)) as (q,r); intros (H0,H1); rewrite Nat2Z.inj_succ in H; simpl Z.abs in H; (assert (H2: Z.abs r < Z.of_nat n) by (rewrite Z.abs_eq; auto with zarith)); assert (IH:=IHn r (Zpos p) H2); clear IHn; simpl in IH |- *; rewrite H0. apply Zis_gcd_for_euclid2; auto. apply Zis_gcd_minus; apply Zis_gcd_sym. apply Zis_gcd_for_euclid2; auto. Qed. (** 2) For Euclid's algorithm, the worst-case situation corresponds to Fibonacci numbers. Let's define them: *) Fixpoint fibonacci (n:nat) : Z := match n with | O => 1 | S O => 1 | S (S n as p) => fibonacci p + fibonacci n end. Lemma fibonacci_pos : forall n, 0 <= fibonacci n. Proof. cut (forall N n, (n 0<=fibonacci n). eauto. induction N. inversion 1. intros. destruct n. simpl; auto with zarith. destruct n. simpl; auto with zarith. change (0 <= fibonacci (S n) + fibonacci n). generalize (IHN n) (IHN (S n)); omega. Qed. Lemma fibonacci_incr : forall n m, (n<=m)%nat -> fibonacci n <= fibonacci m. Proof. induction 1. auto with zarith. apply Z.le_trans with (fibonacci m); auto. clear. destruct m. simpl; auto with zarith. change (fibonacci (S m) <= fibonacci (S m)+fibonacci m). generalize (fibonacci_pos m); omega. Qed. (** 3) We prove that fibonacci numbers are indeed worst-case: for a given number [n], if we reach a conclusion about [gcd(a,b)] in exactly [n+1] loops, then [fibonacci (n+1)<=a /\ fibonacci(n+2)<=b] *) Lemma Zgcdn_worst_is_fibonacci : forall n a b, 0 < a < b -> Zis_gcd a b (Zgcdn (S n) a b) -> Zgcdn n a b <> Zgcdn (S n) a b -> fibonacci (S n) <= a /\ fibonacci (S (S n)) <= b. Proof. induction n. intros [|a|a]; intros; simpl; omega. intros [|a|a] b (Ha,Ha'); [simpl; omega | | easy ]. remember (S n) as m. rewrite Heqm at 2. simpl Zgcdn. unfold Z.modulo; generalize (Z_div_mod b (Zpos a) eq_refl). destruct (Z.div_eucl b (Zpos a)) as (q,r). intros (EQ,(Hr,Hr')). Z.le_elim Hr. - (* r > 0 *) replace (fibonacci (S (S m))) with (fibonacci (S m) + fibonacci m) by auto. intros. destruct (IHn r (Zpos a) (conj Hr Hr')); auto. + assert (EQ' : r = Zpos a * (-q) + b) by (rewrite EQ; ring). rewrite EQ' at 1. apply Zis_gcd_sym. apply Zis_gcd_for_euclid2; auto. apply Zis_gcd_sym; auto. + split; auto. rewrite EQ. apply Z.add_le_mono; auto. apply Z.le_trans with (Zpos a * 1); auto. now rewrite Z.mul_1_r. apply Z.mul_le_mono_nonneg_l; auto with zarith. change 1 with (Z.succ 0). apply Z.le_succ_l. destruct q; auto with zarith. assert (Zpos a * Zneg p < 0) by now compute. omega. - (* r = 0 *) clear IHn EQ Hr'; intros _. subst r; simpl; rewrite Heqm. destruct n. + simpl. omega. + now destruct 1. Qed. (** 3b) We reformulate the previous result in a more positive way. *) Lemma Zgcdn_ok_before_fibonacci : forall n a b, 0 < a < b -> a < fibonacci (S n) -> Zis_gcd a b (Zgcdn n a b). Proof. destruct a; [ destruct 1; exfalso; omega | | destruct 1; discriminate]. cut (forall k n b, k = (S (Pos.to_nat p) - n)%nat -> 0 < Zpos p < b -> Zpos p < fibonacci (S n) -> Zis_gcd (Zpos p) b (Zgcdn n (Zpos p) b)). destruct 2; eauto. clear n; induction k. intros. assert (Pos.to_nat p < n)%nat by omega. apply Zgcdn_linear_bound. simpl. generalize (inj_le _ _ H2). rewrite Nat2Z.inj_succ. rewrite positive_nat_Z; auto. omega. intros. generalize (Zgcdn_worst_is_fibonacci n (Zpos p) b H0); intros. assert (Zis_gcd (Zpos p) b (Zgcdn (S n) (Zpos p) b)). apply IHk; auto. omega. replace (fibonacci (S (S n))) with (fibonacci (S n)+fibonacci n) by auto. generalize (fibonacci_pos n); omega. replace (Zgcdn n (Zpos p) b) with (Zgcdn (S n) (Zpos p) b); auto. generalize (H2 H3); clear H2 H3; omega. Qed. (** 4) The proposed bound leads to a fibonacci number that is big enough. *) Lemma Zgcd_bound_fibonacci : forall a, 0 < a -> a < fibonacci (Zgcd_bound a). Proof. destruct a; [omega| | intro H; discriminate]. intros _. induction p; [ | | compute; auto ]; simpl Zgcd_bound in *; rewrite plus_comm; simpl plus; set (n:= (Pos.size_nat p+Pos.size_nat p)%nat) in *; simpl; assert (n <> O) by (unfold n; destruct p; simpl; auto). destruct n as [ |m]; [elim H; auto| ]. generalize (fibonacci_pos m); rewrite Pos2Z.inj_xI; omega. destruct n as [ |m]; [elim H; auto| ]. generalize (fibonacci_pos m); rewrite Pos2Z.inj_xO; omega. Qed. (* 5) the end: we glue everything together and take care of situations not corresponding to [0 Zis_gcd (Zpos a) b (Zgcdn n (Zpos a) b). Proof. intros. generalize (Zgcd_bound_fibonacci (Zpos a)). simpl Zgcd_bound in *. remember (Pos.size_nat a+Pos.size_nat a)%nat as m. assert (1 < m)%nat. { rewrite Heqm; destruct a; simpl; rewrite 1?plus_comm; auto with arith. } destruct m as [ |m]; [inversion H0; auto| ]. destruct n as [ |n]; [inversion H; auto| ]. simpl Zgcdn. unfold Z.modulo. generalize (Z_div_mod b (Zpos a) (eq_refl Gt)). destruct (Z.div_eucl b (Zpos a)) as (q,r). intros (->,(H1,H2)) H3. apply Zis_gcd_for_euclid2. Z.le_elim H1. + apply Zgcdn_ok_before_fibonacci; auto. apply Z.lt_le_trans with (fibonacci (S m)); [ omega | apply fibonacci_incr; auto]. + subst r; simpl. destruct m as [ |m]; [exfalso; omega| ]. destruct n as [ |n]; [exfalso; omega| ]. simpl; apply Zis_gcd_sym; apply Zis_gcd_0. Qed. Lemma Zgcdn_is_gcd n a b : (Zgcd_bound a <= n)%nat -> Zis_gcd a b (Zgcdn n a b). Proof. destruct a. - simpl; intros. destruct n; [exfalso; omega | ]. simpl; generalize (Zis_gcd_0_abs b); intuition. - apply Zgcdn_is_gcd_pos. - rewrite <- Zgcd_bound_opp, <- Zgcdn_opp. intros. apply Zis_gcd_minus, Zis_gcd_sym. simpl Z.opp. now apply Zgcdn_is_gcd_pos. Qed. Lemma Zgcd_is_gcd : forall a b, Zis_gcd a b (Zgcd_alt a b). Proof. unfold Zgcd_alt; intros; apply Zgcdn_is_gcd; auto. Qed. coq-8.4pl2/theories/ZArith/Int.v0000640000175000001440000003205512063736507015611 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z. Parameter _0 : t. Parameter _1 : t. Parameter _2 : t. Parameter _3 : t. Parameter plus : t -> t -> t. Parameter opp : t -> t. Parameter minus : t -> t -> t. Parameter mult : t -> t -> t. Parameter max : t -> t -> t. Notation "0" := _0 : Int_scope. Notation "1" := _1 : Int_scope. Notation "2" := _2 : Int_scope. Notation "3" := _3 : Int_scope. Infix "+" := plus : Int_scope. Infix "-" := minus : Int_scope. Infix "*" := mult : Int_scope. Notation "- x" := (opp x) : Int_scope. (** For logical relations, we can rely on their counterparts in Z, since they don't appear after extraction. Moreover, using tactics like omega is easier this way. *) Notation "x == y" := (i2z x = i2z y) (at level 70, y at next level, no associativity) : Int_scope. Notation "x <= y" := (i2z x <= i2z y)%Z : Int_scope. Notation "x < y" := (i2z x < i2z y)%Z : Int_scope. Notation "x >= y" := (i2z x >= i2z y)%Z : Int_scope. Notation "x > y" := (i2z x > i2z y)%Z : Int_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : Int_scope. Notation "x <= y < z" := (x <= y /\ y < z) : Int_scope. Notation "x < y < z" := (x < y /\ y < z) : Int_scope. Notation "x < y <= z" := (x < y /\ y <= z) : Int_scope. (** Some decidability fonctions (informative). *) Axiom gt_le_dec : forall x y : t, {x > y} + {x <= y}. Axiom ge_lt_dec : forall x y : t, {x >= y} + {x < y}. Axiom eq_dec : forall x y : t, { x == y } + {~ x==y }. (** Specifications *) (** First, we ask [i2z] to be injective. Said otherwise, our ad-hoc equality [==] and the generic [=] are in fact equivalent. We define [==] nonetheless since the translation to [Z] for using automatic tactic is easier. *) Axiom i2z_eq : forall n p : t, n == p -> n = p. (** Then, we express the specifications of the above parameters using their Z counterparts. *) Axiom i2z_0 : i2z _0 = 0%Z. Axiom i2z_1 : i2z _1 = 1%Z. Axiom i2z_2 : i2z _2 = 2%Z. Axiom i2z_3 : i2z _3 = 3%Z. Axiom i2z_plus : forall n p, i2z (n + p) = (i2z n + i2z p)%Z. Axiom i2z_opp : forall n, i2z (-n) = (-i2z n)%Z. Axiom i2z_minus : forall n p, i2z (n - p) = (i2z n - i2z p)%Z. Axiom i2z_mult : forall n p, i2z (n * p) = (i2z n * i2z p)%Z. Axiom i2z_max : forall n p, i2z (max n p) = Z.max (i2z n) (i2z p). End Int. (** * Facts and tactics using [Int] *) Module MoreInt (Import I:Int). Local Notation int := I.t. (** A magic (but costly) tactic that goes from [int] back to the [Z] friendly world ... *) Hint Rewrite -> i2z_0 i2z_1 i2z_2 i2z_3 i2z_plus i2z_opp i2z_minus i2z_mult i2z_max : i2z. Ltac i2z := match goal with | H : ?a = ?b |- _ => generalize (f_equal i2z H); try autorewrite with i2z; clear H; intro H; i2z | |- ?a = ?b => apply (i2z_eq a b); try autorewrite with i2z; i2z | H : _ |- _ => progress autorewrite with i2z in H; i2z | _ => try autorewrite with i2z end. (** A reflexive version of the [i2z] tactic *) (** this [i2z_refl] is actually weaker than [i2z]. For instance, if a [i2z] is buried deep inside a subterm, [i2z_refl] may miss it. See also the limitation about [Set] or [Type] part below. Anyhow, [i2z_refl] is enough for applying [romega]. *) Ltac i2z_gen := match goal with | |- ?a = ?b => apply (i2z_eq a b); i2z_gen | H : ?a = ?b |- _ => generalize (f_equal i2z H); clear H; i2z_gen | H : eq (A:=Z) ?a ?b |- _ => revert H; i2z_gen | H : Z.lt ?a ?b |- _ => revert H; i2z_gen | H : Z.le ?a ?b |- _ => revert H; i2z_gen | H : Z.gt ?a ?b |- _ => revert H; i2z_gen | H : Z.ge ?a ?b |- _ => revert H; i2z_gen | H : _ -> ?X |- _ => (* A [Set] or [Type] part cannot be dealt with easily using the [ExprP] datatype. So we forget it, leaving a goal that can be weaker than the original. *) match type of X with | Type => clear H; i2z_gen | Prop => revert H; i2z_gen end | H : _ <-> _ |- _ => revert H; i2z_gen | H : _ /\ _ |- _ => revert H; i2z_gen | H : _ \/ _ |- _ => revert H; i2z_gen | H : ~ _ |- _ => revert H; i2z_gen | _ => idtac end. Inductive ExprI : Set := | EI0 : ExprI | EI1 : ExprI | EI2 : ExprI | EI3 : ExprI | EIplus : ExprI -> ExprI -> ExprI | EIopp : ExprI -> ExprI | EIminus : ExprI -> ExprI -> ExprI | EImult : ExprI -> ExprI -> ExprI | EImax : ExprI -> ExprI -> ExprI | EIraw : int -> ExprI. Inductive ExprZ : Set := | EZplus : ExprZ -> ExprZ -> ExprZ | EZopp : ExprZ -> ExprZ | EZminus : ExprZ -> ExprZ -> ExprZ | EZmult : ExprZ -> ExprZ -> ExprZ | EZmax : ExprZ -> ExprZ -> ExprZ | EZofI : ExprI -> ExprZ | EZraw : Z -> ExprZ. Inductive ExprP : Type := | EPeq : ExprZ -> ExprZ -> ExprP | EPlt : ExprZ -> ExprZ -> ExprP | EPle : ExprZ -> ExprZ -> ExprP | EPgt : ExprZ -> ExprZ -> ExprP | EPge : ExprZ -> ExprZ -> ExprP | EPimpl : ExprP -> ExprP -> ExprP | EPequiv : ExprP -> ExprP -> ExprP | EPand : ExprP -> ExprP -> ExprP | EPor : ExprP -> ExprP -> ExprP | EPneg : ExprP -> ExprP | EPraw : Prop -> ExprP. (** [int] to [ExprI] *) Ltac i2ei trm := match constr:trm with | 0 => constr:EI0 | 1 => constr:EI1 | 2 => constr:EI2 | 3 => constr:EI3 | ?x + ?y => let ex := i2ei x with ey := i2ei y in constr:(EIplus ex ey) | ?x - ?y => let ex := i2ei x with ey := i2ei y in constr:(EIminus ex ey) | ?x * ?y => let ex := i2ei x with ey := i2ei y in constr:(EImult ex ey) | max ?x ?y => let ex := i2ei x with ey := i2ei y in constr:(EImax ex ey) | - ?x => let ex := i2ei x in constr:(EIopp ex) | ?x => constr:(EIraw x) end (** [Z] to [ExprZ] *) with z2ez trm := match constr:trm with | (?x + ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EZplus ex ey) | (?x - ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EZminus ex ey) | (?x * ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EZmult ex ey) | (Z.max ?x ?y) => let ex := z2ez x with ey := z2ez y in constr:(EZmax ex ey) | (- ?x)%Z => let ex := z2ez x in constr:(EZopp ex) | i2z ?x => let ex := i2ei x in constr:(EZofI ex) | ?x => constr:(EZraw x) end. (** [Prop] to [ExprP] *) Ltac p2ep trm := match constr:trm with | (?x <-> ?y) => let ex := p2ep x with ey := p2ep y in constr:(EPequiv ex ey) | (?x -> ?y) => let ex := p2ep x with ey := p2ep y in constr:(EPimpl ex ey) | (?x /\ ?y) => let ex := p2ep x with ey := p2ep y in constr:(EPand ex ey) | (?x \/ ?y) => let ex := p2ep x with ey := p2ep y in constr:(EPor ex ey) | (~ ?x) => let ex := p2ep x in constr:(EPneg ex) | (eq (A:=Z) ?x ?y) => let ex := z2ez x with ey := z2ez y in constr:(EPeq ex ey) | (?x < ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EPlt ex ey) | (?x <= ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EPle ex ey) | (?x > ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EPgt ex ey) | (?x >= ?y)%Z => let ex := z2ez x with ey := z2ez y in constr:(EPge ex ey) | ?x => constr:(EPraw x) end. (** [ExprI] to [int] *) Fixpoint ei2i (e:ExprI) : int := match e with | EI0 => 0 | EI1 => 1 | EI2 => 2 | EI3 => 3 | EIplus e1 e2 => (ei2i e1)+(ei2i e2) | EIminus e1 e2 => (ei2i e1)-(ei2i e2) | EImult e1 e2 => (ei2i e1)*(ei2i e2) | EImax e1 e2 => max (ei2i e1) (ei2i e2) | EIopp e => -(ei2i e) | EIraw i => i end. (** [ExprZ] to [Z] *) Fixpoint ez2z (e:ExprZ) : Z := match e with | EZplus e1 e2 => ((ez2z e1)+(ez2z e2))%Z | EZminus e1 e2 => ((ez2z e1)-(ez2z e2))%Z | EZmult e1 e2 => ((ez2z e1)*(ez2z e2))%Z | EZmax e1 e2 => Z.max (ez2z e1) (ez2z e2) | EZopp e => (-(ez2z e))%Z | EZofI e => i2z (ei2i e) | EZraw z => z end. (** [ExprP] to [Prop] *) Fixpoint ep2p (e:ExprP) : Prop := match e with | EPeq e1 e2 => (ez2z e1) = (ez2z e2) | EPlt e1 e2 => ((ez2z e1)<(ez2z e2))%Z | EPle e1 e2 => ((ez2z e1)<=(ez2z e2))%Z | EPgt e1 e2 => ((ez2z e1)>(ez2z e2))%Z | EPge e1 e2 => ((ez2z e1)>=(ez2z e2))%Z | EPimpl e1 e2 => (ep2p e1) -> (ep2p e2) | EPequiv e1 e2 => (ep2p e1) <-> (ep2p e2) | EPand e1 e2 => (ep2p e1) /\ (ep2p e2) | EPor e1 e2 => (ep2p e1) \/ (ep2p e2) | EPneg e => ~ (ep2p e) | EPraw p => p end. (** [ExprI] (supposed under a [i2z]) to a simplified [ExprZ] *) Fixpoint norm_ei (e:ExprI) : ExprZ := match e with | EI0 => EZraw (0%Z) | EI1 => EZraw (1%Z) | EI2 => EZraw (2%Z) | EI3 => EZraw (3%Z) | EIplus e1 e2 => EZplus (norm_ei e1) (norm_ei e2) | EIminus e1 e2 => EZminus (norm_ei e1) (norm_ei e2) | EImult e1 e2 => EZmult (norm_ei e1) (norm_ei e2) | EImax e1 e2 => EZmax (norm_ei e1) (norm_ei e2) | EIopp e => EZopp (norm_ei e) | EIraw i => EZofI (EIraw i) end. (** [ExprZ] to a simplified [ExprZ] *) Fixpoint norm_ez (e:ExprZ) : ExprZ := match e with | EZplus e1 e2 => EZplus (norm_ez e1) (norm_ez e2) | EZminus e1 e2 => EZminus (norm_ez e1) (norm_ez e2) | EZmult e1 e2 => EZmult (norm_ez e1) (norm_ez e2) | EZmax e1 e2 => EZmax (norm_ez e1) (norm_ez e2) | EZopp e => EZopp (norm_ez e) | EZofI e => norm_ei e | EZraw z => EZraw z end. (** [ExprP] to a simplified [ExprP] *) Fixpoint norm_ep (e:ExprP) : ExprP := match e with | EPeq e1 e2 => EPeq (norm_ez e1) (norm_ez e2) | EPlt e1 e2 => EPlt (norm_ez e1) (norm_ez e2) | EPle e1 e2 => EPle (norm_ez e1) (norm_ez e2) | EPgt e1 e2 => EPgt (norm_ez e1) (norm_ez e2) | EPge e1 e2 => EPge (norm_ez e1) (norm_ez e2) | EPimpl e1 e2 => EPimpl (norm_ep e1) (norm_ep e2) | EPequiv e1 e2 => EPequiv (norm_ep e1) (norm_ep e2) | EPand e1 e2 => EPand (norm_ep e1) (norm_ep e2) | EPor e1 e2 => EPor (norm_ep e1) (norm_ep e2) | EPneg e => EPneg (norm_ep e) | EPraw p => EPraw p end. Lemma norm_ei_correct : forall e:ExprI, ez2z (norm_ei e) = i2z (ei2i e). Proof. induction e; simpl; intros; i2z; auto; try congruence. Qed. Lemma norm_ez_correct : forall e:ExprZ, ez2z (norm_ez e) = ez2z e. Proof. induction e; simpl; intros; i2z; auto; try congruence; apply norm_ei_correct. Qed. Lemma norm_ep_correct : forall e:ExprP, ep2p (norm_ep e) <-> ep2p e. Proof. induction e; simpl; repeat (rewrite norm_ez_correct); intuition. Qed. Lemma norm_ep_correct2 : forall e:ExprP, ep2p (norm_ep e) -> ep2p e. Proof. intros; destruct (norm_ep_correct e); auto. Qed. Ltac i2z_refl := i2z_gen; match goal with |- ?t => let e := p2ep t in change (ep2p e); apply norm_ep_correct2; simpl end. (* i2z_refl can be replaced below by (simpl in *; i2z). The reflexive version improves compilation of AVL files by about 15% *) End MoreInt. (** * An implementation of [Int] *) (** It's always nice to know that our [Int] interface is realizable :-) *) Module Z_as_Int <: Int. Local Open Scope Z_scope. Definition t := Z. Definition _0 := 0. Definition _1 := 1. Definition _2 := 2. Definition _3 := 3. Definition plus := Z.add. Definition opp := Z.opp. Definition minus := Z.sub. Definition mult := Z.mul. Definition max := Z.max. Definition gt_le_dec := Z_gt_le_dec. Definition ge_lt_dec := Z_ge_lt_dec. Definition eq_dec := Z.eq_dec. Definition i2z : t -> Z := fun n => n. Lemma i2z_eq : forall n p, i2z n=i2z p -> n = p. Proof. auto. Qed. Lemma i2z_0 : i2z _0 = 0. Proof. auto. Qed. Lemma i2z_1 : i2z _1 = 1. Proof. auto. Qed. Lemma i2z_2 : i2z _2 = 2. Proof. auto. Qed. Lemma i2z_3 : i2z _3 = 3. Proof. auto. Qed. Lemma i2z_plus n p : i2z (n + p) = i2z n + i2z p. Proof. auto. Qed. Lemma i2z_opp n : i2z (- n) = - i2z n. Proof. auto. Qed. Lemma i2z_minus n p : i2z (n - p) = i2z n - i2z p. Proof. auto. Qed. Lemma i2z_mult n p : i2z (n * p) = i2z n * i2z p. Proof. auto. Qed. Lemma i2z_max n p : i2z (max n p) = Z.max (i2z n) (i2z p). Proof. auto. Qed. End Z_as_Int. coq-8.4pl2/theories/ZArith/ZArith_base.v0000640000175000001440000000242612010532755017240 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq==>eq) modulo. Proof. congruence. Qed. Instance div_wd : Proper (eq==>eq==>eq) div. Proof. congruence. Qed. Theorem div_mod a b : b<>0 -> a = b*(div a b) + modulo a b. Proof. intros Hb. unfold div, modulo. rewrite Z.mul_assoc. rewrite Z.sgn_abs. apply Z.div_mod. now destruct b. Qed. Lemma mod_always_pos a b : b<>0 -> 0 <= modulo a b < Z.abs b. Proof. intros Hb. unfold modulo. apply Z.mod_pos_bound. destruct b; compute; trivial. now destruct Hb. Qed. Lemma mod_bound_pos a b : 0<=a -> 0 0 <= modulo a b < b. Proof. intros _ Hb. rewrite <- (Z.abs_eq b) at 3 by Z.order. apply mod_always_pos. Z.order. Qed. Include ZEuclidProp Z Z Z. End ZEuclid. coq-8.4pl2/theories/ZArith/BinIntDef.v0000640000175000001440000003503712010532755016653 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | pos p => pos p~0 | neg p => neg p~0 end. Definition succ_double x := match x with | 0 => 1 | pos p => pos p~1 | neg p => neg (Pos.pred_double p) end. Definition pred_double x := match x with | 0 => -1 | neg p => neg p~1 | pos p => pos (Pos.pred_double p) end. (** ** Subtraction of positive into Z *) Fixpoint pos_sub (x y:positive) {struct y} : Z := match x, y with | p~1, q~1 => double (pos_sub p q) | p~1, q~0 => succ_double (pos_sub p q) | p~1, 1 => pos p~0 | p~0, q~1 => pred_double (pos_sub p q) | p~0, q~0 => double (pos_sub p q) | p~0, 1 => pos (Pos.pred_double p) | 1, q~1 => neg q~0 | 1, q~0 => neg (Pos.pred_double q) | 1, 1 => Z0 end%positive. (** ** Addition *) Definition add x y := match x, y with | 0, y => y | x, 0 => x | pos x', pos y' => pos (x' + y') | pos x', neg y' => pos_sub x' y' | neg x', pos y' => pos_sub y' x' | neg x', neg y' => neg (x' + y') end. Infix "+" := add : Z_scope. (** ** Opposite *) Definition opp x := match x with | 0 => 0 | pos x => neg x | neg x => pos x end. Notation "- x" := (opp x) : Z_scope. (** ** Successor *) Definition succ x := x + 1. (** ** Predecessor *) Definition pred x := x + -1. (** ** Subtraction *) Definition sub m n := m + -n. Infix "-" := sub : Z_scope. (** ** Multiplication *) Definition mul x y := match x, y with | 0, _ => 0 | _, 0 => 0 | pos x', pos y' => pos (x' * y') | pos x', neg y' => neg (x' * y') | neg x', pos y' => neg (x' * y') | neg x', neg y' => pos (x' * y') end. Infix "*" := mul : Z_scope. (** ** Power function *) Definition pow_pos (z:Z) (n:positive) := Pos.iter n (mul z) 1. Definition pow x y := match y with | pos p => pow_pos x p | 0 => 1 | neg _ => 0 end. Infix "^" := pow : Z_scope. (** ** Square *) Definition square x := match x with | 0 => 0 | pos p => pos (Pos.square p) | neg p => pos (Pos.square p) end. (** ** Comparison *) Definition compare x y := match x, y with | 0, 0 => Eq | 0, pos y' => Lt | 0, neg y' => Gt | pos x', 0 => Gt | pos x', pos y' => (x' ?= y')%positive | pos x', neg y' => Gt | neg x', 0 => Lt | neg x', pos y' => Lt | neg x', neg y' => CompOpp ((x' ?= y')%positive) end. Infix "?=" := compare (at level 70, no associativity) : Z_scope. (** ** Sign function *) Definition sgn z := match z with | 0 => 0 | pos p => 1 | neg p => -1 end. (** Boolean equality and comparisons *) Definition leb x y := match x ?= y with | Gt => false | _ => true end. Definition ltb x y := match x ?= y with | Lt => true | _ => false end. (** Nota: [geb] and [gtb] are provided for compatibility, but [leb] and [ltb] should rather be used instead, since more results will be available on them. *) Definition geb x y := match x ?= y with | Lt => false | _ => true end. Definition gtb x y := match x ?= y with | Gt => true | _ => false end. Fixpoint eqb x y := match x, y with | 0, 0 => true | pos p, pos q => Pos.eqb p q | neg p, neg q => Pos.eqb p q | _, _ => false end. Infix "=?" := eqb (at level 70, no associativity) : Z_scope. Infix "<=?" := leb (at level 70, no associativity) : Z_scope. Infix "=?" := geb (at level 70, no associativity) : Z_scope. Infix ">?" := gtb (at level 70, no associativity) : Z_scope. (** ** Minimum and maximum *) Definition max n m := match n ?= m with | Eq | Gt => n | Lt => m end. Definition min n m := match n ?= m with | Eq | Lt => n | Gt => m end. (** ** Absolute value *) Definition abs z := match z with | 0 => 0 | pos p => pos p | neg p => pos p end. (** ** Conversions *) (** From [Z] to [nat] via absolute value *) Definition abs_nat (z:Z) : nat := match z with | 0 => 0%nat | pos p => Pos.to_nat p | neg p => Pos.to_nat p end. (** From [Z] to [N] via absolute value *) Definition abs_N (z:Z) : N := match z with | 0 => 0%N | pos p => N.pos p | neg p => N.pos p end. (** From [Z] to [nat] by rounding negative numbers to 0 *) Definition to_nat (z:Z) : nat := match z with | pos p => Pos.to_nat p | _ => O end. (** From [Z] to [N] by rounding negative numbers to 0 *) Definition to_N (z:Z) : N := match z with | pos p => N.pos p | _ => 0%N end. (** From [nat] to [Z] *) Definition of_nat (n:nat) : Z := match n with | O => 0 | S n => pos (Pos.of_succ_nat n) end. (** From [N] to [Z] *) Definition of_N (n:N) : Z := match n with | 0%N => 0 | N.pos p => pos p end. (** From [Z] to [positive] by rounding nonpositive numbers to 1 *) Definition to_pos (z:Z) : positive := match z with | pos p => p | _ => 1%positive end. (** ** Iteration of a function By convention, iterating a negative number of times is identity. *) Definition iter (n:Z) {A} (f:A -> A) (x:A) := match n with | pos p => Pos.iter p f x | _ => x end. (** ** Euclidean divisions for binary integers *) (** Concerning the many possible variants of integer divisions, see the headers of the generic files [ZDivFloor], [ZDivTrunc], [ZDivEucl], and the article by R. Boute mentioned there. We provide here two flavours, Floor and Trunc, while the Euclid convention can be found in file Zeuclid.v For non-zero b, they all satisfy [a = b*(a/b) + (a mod b)] and [ |a mod b| < |b| ], but the sign of the modulo will differ when [a<0] and/or [b<0]. *) (** ** Floor division *) (** [div_eucl] provides a Truncated-Toward-Bottom (a.k.a Floor) Euclidean division. Its projections are named [div] (noted "/") and [modulo] (noted with an infix "mod"). These functions correspond to the `div` and `mod` of Haskell. This is the historical convention of Coq. The main properties of this convention are : - we have [sgn (a mod b) = sgn (b)] - [div a b] is the greatest integer smaller or equal to the exact fraction [a/b]. - there is no easy sign rule. In addition, note that we arbitrary take [a/0 = 0] and [a mod 0 = 0]. *) (** First, a division for positive numbers. Even if the second argument is a Z, the answer is arbitrary is it isn't a Zpos. *) Fixpoint pos_div_eucl (a:positive) (b:Z) : Z * Z := match a with | xH => if 2 <=? b then (0, 1) else (1, 0) | xO a' => let (q, r) := pos_div_eucl a' b in let r' := 2 * r in if r' let (q, r) := pos_div_eucl a' b in let r' := 2 * r + 1 in if r' (0, 0) | _, 0 => (0, 0) | pos a', pos _ => pos_div_eucl a' b | neg a', pos _ => let (q, r) := pos_div_eucl a' b in match r with | 0 => (- q, 0) | _ => (- (q + 1), b - r) end | neg a', neg b' => let (q, r) := pos_div_eucl a' (pos b') in (q, - r) | pos a', neg b' => let (q, r) := pos_div_eucl a' (pos b') in match r with | 0 => (- q, 0) | _ => (- (q + 1), b + r) end end. Definition div (a b:Z) : Z := let (q, _) := div_eucl a b in q. Definition modulo (a b:Z) : Z := let (_, r) := div_eucl a b in r. Infix "/" := div : Z_scope. Infix "mod" := modulo (at level 40, no associativity) : Z_scope. (** ** Trunc Division *) (** [quotrem] provides a Truncated-Toward-Zero Euclidean division. Its projections are named [quot] (noted "÷") and [rem]. These functions correspond to the `quot` and `rem` of Haskell. This division convention is used in most programming languages, e.g. Ocaml. With this convention: - we have [sgn(a rem b) = sgn(a)] - sign rule for division: [quot (-a) b = quot a (-b) = -(quot a b)] - and for modulo: [a rem (-b) = a rem b] and [(-a) rem b = -(a rem b)] Note that we arbitrary take here [quot a 0 = 0] and [a rem 0 = a]. *) Definition quotrem (a b:Z) : Z * Z := match a, b with | 0, _ => (0, 0) | _, 0 => (0, a) | pos a, pos b => let (q, r) := N.pos_div_eucl a (N.pos b) in (of_N q, of_N r) | neg a, pos b => let (q, r) := N.pos_div_eucl a (N.pos b) in (-of_N q, - of_N r) | pos a, neg b => let (q, r) := N.pos_div_eucl a (N.pos b) in (-of_N q, of_N r) | neg a, neg b => let (q, r) := N.pos_div_eucl a (N.pos b) in (of_N q, - of_N r) end. Definition quot a b := fst (quotrem a b). Definition rem a b := snd (quotrem a b). Infix "÷" := quot (at level 40, left associativity) : Z_scope. (** No infix notation for rem, otherwise it becomes a keyword *) (** ** Parity functions *) Definition even z := match z with | 0 => true | pos (xO _) => true | neg (xO _) => true | _ => false end. Definition odd z := match z with | 0 => false | pos (xO _) => false | neg (xO _) => false | _ => true end. (** ** Division by two *) (** [div2] performs rounding toward bottom, it is hence a particular case of [div], and for all relative number [n] we have: [n = 2 * div2 n + if odd n then 1 else 0]. *) Definition div2 z := match z with | 0 => 0 | pos 1 => 0 | pos p => pos (Pos.div2 p) | neg p => neg (Pos.div2_up p) end. (** [quot2] performs rounding toward zero, it is hence a particular case of [quot], and for all relative number [n] we have: [n = 2 * quot2 n + if odd n then sgn n else 0]. *) Definition quot2 (z:Z) := match z with | 0 => 0 | pos 1 => 0 | pos p => pos (Pos.div2 p) | neg 1 => 0 | neg p => neg (Pos.div2 p) end. (** NB: [Z.quot2] used to be named [Z.div2] in Coq <= 8.3 *) (** * Base-2 logarithm *) Definition log2 z := match z with | pos (p~1) => pos (Pos.size p) | pos (p~0) => pos (Pos.size p) | _ => 0 end. (** ** Square root *) Definition sqrtrem n := match n with | 0 => (0, 0) | pos p => match Pos.sqrtrem p with | (s, IsPos r) => (pos s, pos r) | (s, _) => (pos s, 0) end | neg _ => (0,0) end. Definition sqrt n := match n with | pos p => pos (Pos.sqrt p) | _ => 0 end. (** ** Greatest Common Divisor *) Definition gcd a b := match a,b with | 0, _ => abs b | _, 0 => abs a | pos a, pos b => pos (Pos.gcd a b) | pos a, neg b => pos (Pos.gcd a b) | neg a, pos b => pos (Pos.gcd a b) | neg a, neg b => pos (Pos.gcd a b) end. (** A generalized gcd, also computing division of a and b by gcd. *) Definition ggcd a b : Z*(Z*Z) := match a,b with | 0, _ => (abs b,(0, sgn b)) | _, 0 => (abs a,(sgn a, 0)) | pos a, pos b => let '(g,(aa,bb)) := Pos.ggcd a b in (pos g, (pos aa, pos bb)) | pos a, neg b => let '(g,(aa,bb)) := Pos.ggcd a b in (pos g, (pos aa, neg bb)) | neg a, pos b => let '(g,(aa,bb)) := Pos.ggcd a b in (pos g, (neg aa, pos bb)) | neg a, neg b => let '(g,(aa,bb)) := Pos.ggcd a b in (pos g, (neg aa, neg bb)) end. (** ** Bitwise functions *) (** When accessing the bits of negative numbers, all functions below will use the two's complement representation. For instance, [-1] will correspond to an infinite stream of true bits. If this isn't what you're looking for, you can use [abs] first and then access the bits of the absolute value. *) (** [testbit] : accessing the [n]-th bit of a number [a]. For negative [n], we arbitrarily answer [false]. *) Definition testbit a n := match n with | 0 => odd a | pos p => match a with | 0 => false | pos a => Pos.testbit a (N.pos p) | neg a => negb (N.testbit (Pos.pred_N a) (N.pos p)) end | neg _ => false end. (** Shifts Nota: a shift to the right by [-n] will be a shift to the left by [n], and vice-versa. For fulfilling the two's complement convention, shifting to the right a negative number should correspond to a division by 2 with rounding toward bottom, hence the use of [div2] instead of [quot2]. *) Definition shiftl a n := match n with | 0 => a | pos p => Pos.iter p (mul 2) a | neg p => Pos.iter p div2 a end. Definition shiftr a n := shiftl a (-n). (** Bitwise operations [lor] [land] [ldiff] [lxor] *) Definition lor a b := match a, b with | 0, _ => b | _, 0 => a | pos a, pos b => pos (Pos.lor a b) | neg a, pos b => neg (N.succ_pos (N.ldiff (Pos.pred_N a) (N.pos b))) | pos a, neg b => neg (N.succ_pos (N.ldiff (Pos.pred_N b) (N.pos a))) | neg a, neg b => neg (N.succ_pos (N.land (Pos.pred_N a) (Pos.pred_N b))) end. Definition land a b := match a, b with | 0, _ => 0 | _, 0 => 0 | pos a, pos b => of_N (Pos.land a b) | neg a, pos b => of_N (N.ldiff (N.pos b) (Pos.pred_N a)) | pos a, neg b => of_N (N.ldiff (N.pos a) (Pos.pred_N b)) | neg a, neg b => neg (N.succ_pos (N.lor (Pos.pred_N a) (Pos.pred_N b))) end. Definition ldiff a b := match a, b with | 0, _ => 0 | _, 0 => a | pos a, pos b => of_N (Pos.ldiff a b) | neg a, pos b => neg (N.succ_pos (N.lor (Pos.pred_N a) (N.pos b))) | pos a, neg b => of_N (N.land (N.pos a) (Pos.pred_N b)) | neg a, neg b => of_N (N.ldiff (Pos.pred_N b) (Pos.pred_N a)) end. Definition lxor a b := match a, b with | 0, _ => b | _, 0 => a | pos a, pos b => of_N (Pos.lxor a b) | neg a, pos b => neg (N.succ_pos (N.lxor (Pos.pred_N a) (N.pos b))) | pos a, neg b => neg (N.succ_pos (N.lxor (N.pos a) (Pos.pred_N b))) | neg a, neg b => of_N (N.lxor (Pos.pred_N a) (Pos.pred_N b)) end. End Z.coq-8.4pl2/theories/ZArith/Wf_Z.v0000640000175000001440000001356312010532755015716 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ∀x:Z, x <= 0 -> P x /\ || || (Q O) ∧ (∀n:nat, Q n -> Q (S n)) <=== (P 0) ∧ (∀x:Z, P x -> P (Z.succ x)) <=== (Z.of_nat (S n) = Z.succ (Z.of_nat n)) <=== Z_of_nat_complete >> Then the diagram will be closed and the theorem proved. *) Lemma Z_of_nat_complete (x : Z) : 0 <= x -> exists n : nat, x = Z.of_nat n. Proof. intros H. exists (Z.to_nat x). symmetry. now apply Z2Nat.id. Qed. Lemma Z_of_nat_complete_inf (x : Z) : 0 <= x -> {n : nat | x = Z.of_nat n}. Proof. intros H. exists (Z.to_nat x). symmetry. now apply Z2Nat.id. Qed. Lemma Z_of_nat_prop : forall P:Z -> Prop, (forall n:nat, P (Z.of_nat n)) -> forall x:Z, 0 <= x -> P x. Proof. intros P H x Hx. now destruct (Z_of_nat_complete x Hx) as (n,->). Qed. Lemma Z_of_nat_set : forall P:Z -> Set, (forall n:nat, P (Z.of_nat n)) -> forall x:Z, 0 <= x -> P x. Proof. intros P H x Hx. now destruct (Z_of_nat_complete_inf x Hx) as (n,->). Qed. Lemma natlike_ind : forall P:Z -> Prop, P 0 -> (forall x:Z, 0 <= x -> P x -> P (Z.succ x)) -> forall x:Z, 0 <= x -> P x. Proof. intros P Ho Hrec x Hx; apply Z_of_nat_prop; trivial. induction n. exact Ho. rewrite Nat2Z.inj_succ. apply Hrec; trivial using Nat2Z.is_nonneg. Qed. Lemma natlike_rec : forall P:Z -> Set, P 0 -> (forall x:Z, 0 <= x -> P x -> P (Z.succ x)) -> forall x:Z, 0 <= x -> P x. Proof. intros P Ho Hrec x Hx; apply Z_of_nat_set; trivial. induction n. exact Ho. rewrite Nat2Z.inj_succ. apply Hrec; trivial using Nat2Z.is_nonneg. Qed. Section Efficient_Rec. (** [natlike_rec2] is the same as [natlike_rec], but with a different proof, designed to give a better extracted term. *) Let R (a b:Z) := 0 <= a /\ a < b. Let R_wf : well_founded R. Proof. apply well_founded_lt_compat with Z.to_nat. intros x y (Hx,H). apply Z2Nat.inj_lt; Z.order. Qed. Lemma natlike_rec2 : forall P:Z -> Type, P 0 -> (forall z:Z, 0 <= z -> P z -> P (Z.succ z)) -> forall z:Z, 0 <= z -> P z. Proof. intros P Ho Hrec. induction z as [z IH] using (well_founded_induction_type R_wf). destruct z; intros Hz. - apply Ho. - set (y:=Z.pred (Zpos p)). assert (LE : 0 <= y) by (unfold y; now apply Z.lt_le_pred). assert (EQ : Zpos p = Z.succ y) by (unfold y; now rewrite Z.succ_pred). rewrite EQ. apply Hrec, IH; trivial. split; trivial. unfold y; apply Z.lt_pred_l. - now destruct Hz. Qed. (** A variant of the previous using [Z.pred] instead of [Z.succ]. *) Lemma natlike_rec3 : forall P:Z -> Type, P 0 -> (forall z:Z, 0 < z -> P (Z.pred z) -> P z) -> forall z:Z, 0 <= z -> P z. Proof. intros P Ho Hrec. induction z as [z IH] using (well_founded_induction_type R_wf). destruct z; intros Hz. - apply Ho. - assert (EQ : 0 <= Z.pred (Zpos p)) by now apply Z.lt_le_pred. apply Hrec. easy. apply IH; trivial. split; trivial. apply Z.lt_pred_l. - now destruct Hz. Qed. (** A more general induction principle on non-negative numbers using [Z.lt]. *) Lemma Zlt_0_rec : forall P:Z -> Type, (forall x:Z, (forall y:Z, 0 <= y < x -> P y) -> 0 <= x -> P x) -> forall x:Z, 0 <= x -> P x. Proof. intros P Hrec. induction x as [x IH] using (well_founded_induction_type R_wf). destruct x; intros Hx. - apply Hrec; trivial. intros y (Hy,Hy'). assert (0 < 0) by now apply Z.le_lt_trans with y. discriminate. - apply Hrec; trivial. intros y (Hy,Hy'). apply IH; trivial. now split. - now destruct Hx. Defined. Lemma Zlt_0_ind : forall P:Z -> Prop, (forall x:Z, (forall y:Z, 0 <= y < x -> P y) -> 0 <= x -> P x) -> forall x:Z, 0 <= x -> P x. Proof. exact Zlt_0_rec. Qed. (** Obsolete version of [Z.lt] induction principle on non-negative numbers *) Lemma Z_lt_rec : forall P:Z -> Type, (forall x:Z, (forall y:Z, 0 <= y < x -> P y) -> P x) -> forall x:Z, 0 <= x -> P x. Proof. intros P Hrec; apply Zlt_0_rec; auto. Qed. Lemma Z_lt_induction : forall P:Z -> Prop, (forall x:Z, (forall y:Z, 0 <= y < x -> P y) -> P x) -> forall x:Z, 0 <= x -> P x. Proof. exact Z_lt_rec. Qed. (** An even more general induction principle using [Z.lt]. *) Lemma Zlt_lower_bound_rec : forall P:Z -> Type, forall z:Z, (forall x:Z, (forall y:Z, z <= y < x -> P y) -> z <= x -> P x) -> forall x:Z, z <= x -> P x. Proof. intros P z Hrec x Hx. rewrite <- (Z.sub_simpl_r x z). apply Z.le_0_sub in Hx. pattern (x - z); apply Zlt_0_rec; trivial. clear x Hx. intros x IH Hx. apply Hrec. intros y (Hy,Hy'). rewrite <- (Z.sub_simpl_r y z). apply IH; split. now rewrite Z.le_0_sub. now apply Z.lt_sub_lt_add_r. now rewrite <- (Z.add_le_mono_r 0 x z). Qed. Lemma Zlt_lower_bound_ind : forall P:Z -> Prop, forall z:Z, (forall x:Z, (forall y:Z, z <= y < x -> P y) -> z <= x -> P x) -> forall x:Z, z <= x -> P x. Proof. exact Zlt_lower_bound_rec. Qed. End Efficient_Rec. coq-8.4pl2/theories/ZArith/auxiliary.v0000640000175000001440000000453512010532755017057 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Zne (n + - m) 0. Proof. unfold Zne. now rewrite <- Z.sub_move_0_r. Qed. Theorem Zegal_left n m : n = m -> n + - m = 0. Proof. apply Z.sub_move_0_r. Qed. Theorem Zle_left n m : n <= m -> 0 <= m + - n. Proof. apply Z.le_0_sub. Qed. Theorem Zle_left_rev n m : 0 <= m + - n -> n <= m. Proof. apply Z.le_0_sub. Qed. Theorem Zlt_left_rev n m : 0 < m + - n -> n < m. Proof. apply Z.lt_0_sub. Qed. Theorem Zlt_left_lt n m : n < m -> 0 < m + - n. Proof. apply Z.lt_0_sub. Qed. Theorem Zlt_left n m : n < m -> 0 <= m + -1 + - n. Proof. intros. rewrite Z.add_shuffle0. change (-1) with (- Z.succ 0). now apply Z.le_0_sub, Z.le_succ_l, Z.lt_0_sub. Qed. Theorem Zge_left n m : n >= m -> 0 <= n + - m. Proof. Z.swap_greater. apply Z.le_0_sub. Qed. Theorem Zgt_left n m : n > m -> 0 <= n + -1 + - m. Proof. Z.swap_greater. apply Zlt_left. Qed. Theorem Zgt_left_gt n m : n > m -> n + - m > 0. Proof. Z.swap_greater. apply Z.lt_0_sub. Qed. Theorem Zgt_left_rev n m : n + - m > 0 -> n > m. Proof. Z.swap_greater. apply Z.lt_0_sub. Qed. Theorem Zle_mult_approx n m p : n > 0 -> p > 0 -> 0 <= m -> 0 <= m * n + p. Proof. Z.swap_greater. intros. Z.order_pos. Qed. Theorem Zmult_le_approx n m p : n > 0 -> n > p -> 0 <= m * n + p -> 0 <= m. Proof. Z.swap_greater. intros. apply Z.lt_succ_r. apply Z.mul_pos_cancel_r with n; trivial. Z.nzsimpl. apply Z.le_lt_trans with (m*n+p); trivial. now apply Z.add_lt_mono_l. Qed. coq-8.4pl2/theories/ZArith/ZOdiv_def.v0000640000175000001440000000132611736570167016731 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 -> forall a:positive, let (q, r) := Z.pos_div_eucl a b in Zpos a = b * q + r /\ 0 <= r < b. Proof. intros b Hb a. Z.swap_greater. generalize (Z.pos_div_eucl_eq a b Hb) (Z.pos_div_eucl_bound a b Hb). destruct Z.pos_div_eucl. rewrite Z.mul_comm. auto. Qed. Theorem Z_div_mod a b : b > 0 -> let (q, r) := Z.div_eucl a b in a = b * q + r /\ 0 <= r < b. Proof. Z.swap_greater. intros Hb. assert (Hb' : b<>0) by (now destruct b). generalize (Z.div_eucl_eq a b Hb') (Z.mod_pos_bound a b Hb). unfold Z.modulo. destruct Z.div_eucl. auto. Qed. (** For stating the fully general result, let's give a short name to the condition on the remainder. *) Definition Remainder r b := 0 <= r < b \/ b < r <= 0. (** Another equivalent formulation: *) Definition Remainder_alt r b := Z.abs r < Z.abs b /\ Z.sgn r <> - Z.sgn b. (* In the last formulation, [ Z.sgn r <> - Z.sgn b ] is less nice than saying [ Z.sgn r = Z.sgn b ], but at least it works even when [r] is null. *) Lemma Remainder_equiv : forall r b, Remainder r b <-> Remainder_alt r b. Proof. intros; unfold Remainder, Remainder_alt; omega with *. Qed. Hint Unfold Remainder. (** Now comes the fully general result about Euclidean division. *) Theorem Z_div_mod_full a b : b <> 0 -> let (q, r) := Z.div_eucl a b in a = b * q + r /\ Remainder r b. Proof. intros Hb. generalize (Z.div_eucl_eq a b Hb) (Z.mod_pos_bound a b) (Z.mod_neg_bound a b). unfold Z.modulo. destruct Z.div_eucl as (q,r). intros EQ POS NEG. split; auto. red; destruct b. now destruct Hb. left; now apply POS. right; now apply NEG. Qed. (** The same results as before, stated separately in terms of Z.div and Z.modulo *) Lemma Z_mod_remainder a b : b<>0 -> Remainder (a mod b) b. Proof. unfold Z.modulo; intros Hb; generalize (Z_div_mod_full a b Hb); auto. destruct Z.div_eucl; tauto. Qed. Lemma Z_mod_lt a b : b > 0 -> 0 <= a mod b < b. Proof (fun Hb => Z.mod_pos_bound a b (Z.gt_lt _ _ Hb)). Lemma Z_mod_neg a b : b < 0 -> b < a mod b <= 0. Proof (Z.mod_neg_bound a b). Lemma Z_div_mod_eq a b : b > 0 -> a = b*(a/b) + (a mod b). Proof. intros Hb; apply Z.div_mod; auto with zarith. Qed. Lemma Zmod_eq_full a b : b<>0 -> a mod b = a - (a/b)*b. Proof. intros. rewrite Z.mul_comm. now apply Z.mod_eq. Qed. Lemma Zmod_eq a b : b>0 -> a mod b = a - (a/b)*b. Proof. intros. apply Zmod_eq_full. now destruct b. Qed. (** Existence theorem *) Theorem Zdiv_eucl_exist : forall (b:Z)(Hb:b>0)(a:Z), {qr : Z * Z | let (q, r) := qr in a = b * q + r /\ 0 <= r < b}. Proof. intros b Hb a. exists (Z.div_eucl a b). exact (Z_div_mod a b Hb). Qed. Arguments Zdiv_eucl_exist : default implicits. (** Uniqueness theorems *) Theorem Zdiv_mod_unique b q1 q2 r1 r2 : 0 <= r1 < Z.abs b -> 0 <= r2 < Z.abs b -> b*q1+r1 = b*q2+r2 -> q1=q2 /\ r1=r2. Proof. intros Hr1 Hr2 H. rewrite <- (Z.abs_sgn b), <- !Z.mul_assoc in H. destruct (Z.div_mod_unique (Z.abs b) (Z.sgn b * q1) (Z.sgn b * q2) r1 r2); auto. split; trivial. apply Z.mul_cancel_l with (Z.sgn b); trivial. rewrite Z.sgn_null_iff, <- Z.abs_0_iff. destruct Hr1; Z.order. Qed. Theorem Zdiv_mod_unique_2 : forall b q1 q2 r1 r2:Z, Remainder r1 b -> Remainder r2 b -> b*q1+r1 = b*q2+r2 -> q1=q2 /\ r1=r2. Proof Z.div_mod_unique. Theorem Zdiv_unique_full: forall a b q r, Remainder r b -> a = b*q + r -> q = a/b. Proof Z.div_unique. Theorem Zdiv_unique: forall a b q r, 0 <= r < b -> a = b*q + r -> q = a/b. Proof. intros; eapply Zdiv_unique_full; eauto. Qed. Theorem Zmod_unique_full: forall a b q r, Remainder r b -> a = b*q + r -> r = a mod b. Proof Z.mod_unique. Theorem Zmod_unique: forall a b q r, 0 <= r < b -> a = b*q + r -> r = a mod b. Proof. intros; eapply Zmod_unique_full; eauto. Qed. (** * Basic values of divisions and modulo. *) Lemma Zmod_0_l: forall a, 0 mod a = 0. Proof. destruct a; simpl; auto. Qed. Lemma Zmod_0_r: forall a, a mod 0 = 0. Proof. destruct a; simpl; auto. Qed. Lemma Zdiv_0_l: forall a, 0/a = 0. Proof. destruct a; simpl; auto. Qed. Lemma Zdiv_0_r: forall a, a/0 = 0. Proof. destruct a; simpl; auto. Qed. Ltac zero_or_not a := destruct (Z.eq_dec a 0); [subst; rewrite ?Zmod_0_l, ?Zdiv_0_l, ?Zmod_0_r, ?Zdiv_0_r; auto with zarith|]. Lemma Zmod_1_r: forall a, a mod 1 = 0. Proof. intros. zero_or_not a. apply Z.mod_1_r. Qed. Lemma Zdiv_1_r: forall a, a/1 = a. Proof. intros. zero_or_not a. apply Z.div_1_r. Qed. Hint Resolve Zmod_0_l Zmod_0_r Zdiv_0_l Zdiv_0_r Zdiv_1_r Zmod_1_r : zarith. Lemma Zdiv_1_l: forall a, 1 < a -> 1/a = 0. Proof Z.div_1_l. Lemma Zmod_1_l: forall a, 1 < a -> 1 mod a = 1. Proof Z.mod_1_l. Lemma Z_div_same_full : forall a:Z, a<>0 -> a/a = 1. Proof Z.div_same. Lemma Z_mod_same_full : forall a, a mod a = 0. Proof. intros. zero_or_not a. apply Z.mod_same; auto. Qed. Lemma Z_mod_mult : forall a b, (a*b) mod b = 0. Proof. intros. zero_or_not b. apply Z.mod_mul. auto. Qed. Lemma Z_div_mult_full : forall a b:Z, b <> 0 -> (a*b)/b = a. Proof Z.div_mul. (** * Order results about Z.modulo and Z.div *) (* Division of positive numbers is positive. *) Lemma Z_div_pos: forall a b, b > 0 -> 0 <= a -> 0 <= a/b. Proof. intros. apply Z.div_pos; auto with zarith. Qed. Lemma Z_div_ge0: forall a b, b > 0 -> a >= 0 -> a/b >=0. Proof. intros; generalize (Z_div_pos a b H); auto with zarith. Qed. (** As soon as the divisor is greater or equal than 2, the division is strictly decreasing. *) Lemma Z_div_lt : forall a b:Z, b >= 2 -> a > 0 -> a/b < a. Proof. intros. apply Z.div_lt; auto with zarith. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem Zdiv_small: forall a b, 0 <= a < b -> a/b = 0. Proof Z.div_small. (** Same situation, in term of modulo: *) Theorem Zmod_small: forall a n, 0 <= a < n -> a mod n = a. Proof Z.mod_small. (** [Z.ge] is compatible with a positive division. *) Lemma Z_div_ge : forall a b c:Z, c > 0 -> a >= b -> a/c >= b/c. Proof. intros. apply Z.le_ge. apply Z.div_le_mono; auto with zarith. Qed. (** Same, with [Z.le]. *) Lemma Z_div_le : forall a b c:Z, c > 0 -> a <= b -> a/c <= b/c. Proof. intros. apply Z.div_le_mono; auto with zarith. Qed. (** With our choice of division, rounding of (a/b) is always done toward bottom: *) Lemma Z_mult_div_ge : forall a b:Z, b > 0 -> b*(a/b) <= a. Proof. intros. apply Z.mul_div_le; auto with zarith. Qed. Lemma Z_mult_div_ge_neg : forall a b:Z, b < 0 -> b*(a/b) >= a. Proof. intros. apply Z.le_ge. apply Z.mul_div_ge; auto with zarith. Qed. (** The previous inequalities are exact iff the modulo is zero. *) Lemma Z_div_exact_full_1 : forall a b:Z, a = b*(a/b) -> a mod b = 0. Proof. intros a b. zero_or_not b. rewrite Z.div_exact; auto. Qed. Lemma Z_div_exact_full_2 : forall a b:Z, b <> 0 -> a mod b = 0 -> a = b*(a/b). Proof. intros; rewrite Z.div_exact; auto. Qed. (** A modulo cannot grow beyond its starting point. *) Theorem Zmod_le: forall a b, 0 < b -> 0 <= a -> a mod b <= a. Proof. intros. apply Z.mod_le; auto. Qed. (** Some additionnal inequalities about Z.div. *) Theorem Zdiv_lt_upper_bound: forall a b q, 0 < b -> a < q*b -> a/b < q. Proof. intros a b q; rewrite Z.mul_comm; apply Z.div_lt_upper_bound. Qed. Theorem Zdiv_le_upper_bound: forall a b q, 0 < b -> a <= q*b -> a/b <= q. Proof. intros a b q; rewrite Z.mul_comm; apply Z.div_le_upper_bound. Qed. Theorem Zdiv_le_lower_bound: forall a b q, 0 < b -> q*b <= a -> q <= a/b. Proof. intros a b q; rewrite Z.mul_comm; apply Z.div_le_lower_bound. Qed. (** A division of respect opposite monotonicity for the divisor *) Lemma Zdiv_le_compat_l: forall p q r, 0 <= p -> 0 < q < r -> p / r <= p / q. Proof. intros; apply Z.div_le_compat_l; auto with zarith. Qed. Theorem Zdiv_sgn: forall a b, 0 <= Z.sgn (a/b) * Z.sgn a * Z.sgn b. Proof. destruct a as [ |a|a]; destruct b as [ |b|b]; simpl; auto with zarith; generalize (Z.div_pos (Zpos a) (Zpos b)); unfold Z.div, Z.div_eucl; destruct Z.pos_div_eucl as (q,r); destruct r; omega with *. Qed. (** * Relations between usual operations and Z.modulo and Z.div *) Lemma Z_mod_plus_full : forall a b c:Z, (a + b * c) mod c = a mod c. Proof. intros. zero_or_not c. apply Z.mod_add; auto. Qed. Lemma Z_div_plus_full : forall a b c:Z, c <> 0 -> (a + b * c) / c = a / c + b. Proof Z.div_add. Theorem Z_div_plus_full_l: forall a b c : Z, b <> 0 -> (a * b + c) / b = a + c / b. Proof Z.div_add_l. (** [Z.opp] and [Z.div], [Z.modulo]. Due to the choice of convention for our Euclidean division, some of the relations about [Z.opp] and divisions are rather complex. *) Lemma Zdiv_opp_opp : forall a b:Z, (-a)/(-b) = a/b. Proof. intros. zero_or_not b. apply Z.div_opp_opp; auto. Qed. Lemma Zmod_opp_opp : forall a b:Z, (-a) mod (-b) = - (a mod b). Proof. intros. zero_or_not b. apply Z.mod_opp_opp; auto. Qed. Lemma Z_mod_zero_opp_full : forall a b:Z, a mod b = 0 -> (-a) mod b = 0. Proof. intros. zero_or_not b. apply Z.mod_opp_l_z; auto. Qed. Lemma Z_mod_nz_opp_full : forall a b:Z, a mod b <> 0 -> (-a) mod b = b - (a mod b). Proof. intros. zero_or_not b. apply Z.mod_opp_l_nz; auto. Qed. Lemma Z_mod_zero_opp_r : forall a b:Z, a mod b = 0 -> a mod (-b) = 0. Proof. intros. zero_or_not b. apply Z.mod_opp_r_z; auto. Qed. Lemma Z_mod_nz_opp_r : forall a b:Z, a mod b <> 0 -> a mod (-b) = (a mod b) - b. Proof. intros. zero_or_not b. apply Z.mod_opp_r_nz; auto. Qed. Lemma Z_div_zero_opp_full : forall a b:Z, a mod b = 0 -> (-a)/b = -(a/b). Proof. intros. zero_or_not b. apply Z.div_opp_l_z; auto. Qed. Lemma Z_div_nz_opp_full : forall a b:Z, a mod b <> 0 -> (-a)/b = -(a/b)-1. Proof. intros a b. zero_or_not b. intros; rewrite Z.div_opp_l_nz; auto. Qed. Lemma Z_div_zero_opp_r : forall a b:Z, a mod b = 0 -> a/(-b) = -(a/b). Proof. intros. zero_or_not b. apply Z.div_opp_r_z; auto. Qed. Lemma Z_div_nz_opp_r : forall a b:Z, a mod b <> 0 -> a/(-b) = -(a/b)-1. Proof. intros a b. zero_or_not b. intros; rewrite Z.div_opp_r_nz; auto. Qed. (** Cancellations. *) Lemma Zdiv_mult_cancel_r : forall a b c:Z, c <> 0 -> (a*c)/(b*c) = a/b. Proof. intros. zero_or_not b. apply Z.div_mul_cancel_r; auto. Qed. Lemma Zdiv_mult_cancel_l : forall a b c:Z, c<>0 -> (c*a)/(c*b) = a/b. Proof. intros. rewrite (Z.mul_comm c b); zero_or_not b. rewrite (Z.mul_comm b c). apply Z.div_mul_cancel_l; auto. Qed. Lemma Zmult_mod_distr_l: forall a b c, (c*a) mod (c*b) = c * (a mod b). Proof. intros. zero_or_not c. rewrite (Z.mul_comm c b); zero_or_not b. rewrite (Z.mul_comm b c). apply Z.mul_mod_distr_l; auto. Qed. Lemma Zmult_mod_distr_r: forall a b c, (a*c) mod (b*c) = (a mod b) * c. Proof. intros. zero_or_not b. rewrite (Z.mul_comm b c); zero_or_not c. rewrite (Z.mul_comm c b). apply Z.mul_mod_distr_r; auto. Qed. (** Operations modulo. *) Theorem Zmod_mod: forall a n, (a mod n) mod n = a mod n. Proof. intros. zero_or_not n. apply Z.mod_mod; auto. Qed. Theorem Zmult_mod: forall a b n, (a * b) mod n = ((a mod n) * (b mod n)) mod n. Proof. intros. zero_or_not n. apply Z.mul_mod; auto. Qed. Theorem Zplus_mod: forall a b n, (a + b) mod n = (a mod n + b mod n) mod n. Proof. intros. zero_or_not n. apply Z.add_mod; auto. Qed. Theorem Zminus_mod: forall a b n, (a - b) mod n = (a mod n - b mod n) mod n. Proof. intros. replace (a - b) with (a + (-1) * b); auto with zarith. replace (a mod n - b mod n) with (a mod n + (-1) * (b mod n)); auto with zarith. rewrite Zplus_mod. rewrite Zmult_mod. rewrite Zplus_mod with (b:=(-1) * (b mod n)). rewrite Zmult_mod. rewrite Zmult_mod with (b:= b mod n). repeat rewrite Zmod_mod; auto. Qed. Lemma Zplus_mod_idemp_l: forall a b n, (a mod n + b) mod n = (a + b) mod n. Proof. intros; rewrite Zplus_mod, Zmod_mod, <- Zplus_mod; auto. Qed. Lemma Zplus_mod_idemp_r: forall a b n, (b + a mod n) mod n = (b + a) mod n. Proof. intros; rewrite Zplus_mod, Zmod_mod, <- Zplus_mod; auto. Qed. Lemma Zminus_mod_idemp_l: forall a b n, (a mod n - b) mod n = (a - b) mod n. Proof. intros; rewrite Zminus_mod, Zmod_mod, <- Zminus_mod; auto. Qed. Lemma Zminus_mod_idemp_r: forall a b n, (a - b mod n) mod n = (a - b) mod n. Proof. intros; rewrite Zminus_mod, Zmod_mod, <- Zminus_mod; auto. Qed. Lemma Zmult_mod_idemp_l: forall a b n, (a mod n * b) mod n = (a * b) mod n. Proof. intros; rewrite Zmult_mod, Zmod_mod, <- Zmult_mod; auto. Qed. Lemma Zmult_mod_idemp_r: forall a b n, (b * (a mod n)) mod n = (b * a) mod n. Proof. intros; rewrite Zmult_mod, Zmod_mod, <- Zmult_mod; auto. Qed. (** For a specific number N, equality modulo N is hence a nice setoid equivalence, compatible with [+], [-] and [*]. *) Section EqualityModulo. Variable N:Z. Definition eqm a b := (a mod N = b mod N). Infix "==" := eqm (at level 70). Lemma eqm_refl : forall a, a == a. Proof. unfold eqm; auto. Qed. Lemma eqm_sym : forall a b, a == b -> b == a. Proof. unfold eqm; auto. Qed. Lemma eqm_trans : forall a b c, a == b -> b == c -> a == c. Proof. unfold eqm; eauto with *. Qed. Instance eqm_setoid : Equivalence eqm. Proof. constructor; [exact eqm_refl | exact eqm_sym | exact eqm_trans]. Qed. Instance Zplus_eqm : Proper (eqm ==> eqm ==> eqm) Z.add. Proof. unfold eqm; repeat red; intros. rewrite Zplus_mod, H, H0, <- Zplus_mod; auto. Qed. Instance Zminus_eqm : Proper (eqm ==> eqm ==> eqm) Z.sub. Proof. unfold eqm; repeat red; intros. rewrite Zminus_mod, H, H0, <- Zminus_mod; auto. Qed. Instance Zmult_eqm : Proper (eqm ==> eqm ==> eqm) Z.mul. Proof. unfold eqm; repeat red; intros. rewrite Zmult_mod, H, H0, <- Zmult_mod; auto. Qed. Instance Zopp_eqm : Proper (eqm ==> eqm) Z.opp. Proof. intros x y H. change ((-x)==(-y)) with ((0-x)==(0-y)). now rewrite H. Qed. Lemma Zmod_eqm : forall a, (a mod N) == a. Proof. intros; exact (Zmod_mod a N). Qed. (* NB: Z.modulo and Z.div are not morphisms with respect to eqm. For instance, let (==) be (eqm 2). Then we have (3 == 1) but: ~ (3 mod 3 == 1 mod 3) ~ (1 mod 3 == 1 mod 1) ~ (3/3 == 1/3) ~ (1/3 == 1/1) *) End EqualityModulo. Lemma Zdiv_Zdiv : forall a b c, 0<=b -> 0<=c -> (a/b)/c = a/(b*c). Proof. intros. zero_or_not b. rewrite Z.mul_comm. zero_or_not c. rewrite Z.mul_comm. apply Z.div_div; auto with zarith. Qed. (** Unfortunately, the previous result isn't always true on negative numbers. For instance: 3/(-2)/(-2) = 1 <> 0 = 3 / (-2*-2) *) (** A last inequality: *) Theorem Zdiv_mult_le: forall a b c, 0<=a -> 0<=b -> 0<=c -> c*(a/b) <= (c*a)/b. Proof. intros. zero_or_not b. apply Z.div_mul_le; auto with zarith. Qed. (** Z.modulo is related to divisibility (see more in Znumtheory) *) Lemma Zmod_divides : forall a b, b<>0 -> (a mod b = 0 <-> exists c, a = b*c). Proof. intros. rewrite Z.mod_divide; trivial. split; intros (c,Hc); exists c; subst; auto with zarith. Qed. (** Particular case : dividing by 2 is related with parity *) Lemma Zdiv2_div : forall a, Z.div2 a = a/2. Proof Z.div2_div. Lemma Zmod_odd : forall a, a mod 2 = if Z.odd a then 1 else 0. Proof. intros a. now rewrite <- Z.bit0_odd, <- Z.bit0_mod. Qed. Lemma Zmod_even : forall a, a mod 2 = if Z.even a then 0 else 1. Proof. intros a. rewrite Zmod_odd, Zodd_even_bool. now destruct Z.even. Qed. Lemma Zodd_mod : forall a, Z.odd a = Zeq_bool (a mod 2) 1. Proof. intros a. rewrite Zmod_odd. now destruct Z.odd. Qed. Lemma Zeven_mod : forall a, Z.even a = Zeq_bool (a mod 2) 0. Proof. intros a. rewrite Zmod_even. now destruct Z.even. Qed. (** * Compatibility *) (** Weaker results kept only for compatibility *) Lemma Z_mod_same : forall a, a > 0 -> a mod a = 0. Proof. intros; apply Z_mod_same_full. Qed. Lemma Z_div_same : forall a, a > 0 -> a/a = 1. Proof. intros; apply Z_div_same_full; auto with zarith. Qed. Lemma Z_div_plus : forall a b c:Z, c > 0 -> (a + b * c) / c = a / c + b. Proof. intros; apply Z_div_plus_full; auto with zarith. Qed. Lemma Z_div_mult : forall a b:Z, b > 0 -> (a*b)/b = a. Proof. intros; apply Z_div_mult_full; auto with zarith. Qed. Lemma Z_mod_plus : forall a b c:Z, c > 0 -> (a + b * c) mod c = a mod c. Proof. intros; apply Z_mod_plus_full; auto with zarith. Qed. Lemma Z_div_exact_1 : forall a b:Z, b > 0 -> a = b*(a/b) -> a mod b = 0. Proof. intros; apply Z_div_exact_full_1; auto with zarith. Qed. Lemma Z_div_exact_2 : forall a b:Z, b > 0 -> a mod b = 0 -> a = b*(a/b). Proof. intros; apply Z_div_exact_full_2; auto with zarith. Qed. Lemma Z_mod_zero_opp : forall a b:Z, b > 0 -> a mod b = 0 -> (-a) mod b = 0. Proof. intros; apply Z_mod_zero_opp_full; auto with zarith. Qed. (** * A direct way to compute Z.modulo *) Fixpoint Zmod_POS (a : positive) (b : Z) : Z := match a with | xI a' => let r := Zmod_POS a' b in let r' := (2 * r + 1) in if r' let r := Zmod_POS a' b in let r' := (2 * r) in if r' if 2 <=? b then 1 else 0 end. Definition Zmod' a b := match a with | Z0 => 0 | Zpos a' => match b with | Z0 => 0 | Zpos _ => Zmod_POS a' b | Zneg b' => let r := Zmod_POS a' (Zpos b') in match r with Z0 => 0 | _ => b + r end end | Zneg a' => match b with | Z0 => 0 | Zpos _ => let r := Zmod_POS a' b in match r with Z0 => 0 | _ => b - r end | Zneg b' => - (Zmod_POS a' (Zpos b')) end end. Theorem Zmod_POS_correct a b : Zmod_POS a b = snd (Z.pos_div_eucl a b). Proof. induction a as [a IH|a IH| ]; simpl; rewrite ?IH. destruct (Z.pos_div_eucl a b) as (p,q); simpl; case Z.ltb_spec; reflexivity. destruct (Z.pos_div_eucl a b) as (p,q); simpl; case Z.ltb_spec; reflexivity. case Z.leb_spec; trivial. Qed. Theorem Zmod'_correct: forall a b, Zmod' a b = a mod b. Proof. intros a b; unfold Z.modulo; case a; simpl; auto. intros p; case b; simpl; auto. intros p1; refine (Zmod_POS_correct _ _); auto. intros p1; rewrite Zmod_POS_correct; auto. case (Z.pos_div_eucl p (Zpos p1)); simpl; intros z1 z2; case z2; auto. intros p; case b; simpl; auto. intros p1; rewrite Zmod_POS_correct; auto. case (Z.pos_div_eucl p (Zpos p1)); simpl; intros z1 z2; case z2; auto. intros p1; rewrite Zmod_POS_correct; simpl; auto. case (Z.pos_div_eucl p (Zpos p1)); auto. Qed. (** Another convention is possible for division by negative numbers: * quotient is always the biggest integer smaller than or equal to a/b * remainder is hence always positive or null. *) Theorem Zdiv_eucl_extended : forall b:Z, b <> 0 -> forall a:Z, {qr : Z * Z | let (q, r) := qr in a = b * q + r /\ 0 <= r < Z.abs b}. Proof. intros b Hb a. elim (Z_le_gt_dec 0 b); intro Hb'. cut (b > 0); [ intro Hb'' | omega ]. rewrite Z.abs_eq; [ apply Zdiv_eucl_exist; assumption | assumption ]. cut (- b > 0); [ intro Hb'' | omega ]. elim (Zdiv_eucl_exist Hb'' a); intros qr. elim qr; intros q r Hqr. exists (- q, r). elim Hqr; intros. split. rewrite <- Z.mul_opp_comm; assumption. rewrite Z.abs_neq; [ assumption | omega ]. Qed. Arguments Zdiv_eucl_extended : default implicits. (** * Division and modulo in Z agree with same in nat: *) Require Import NPeano. Lemma div_Zdiv (n m: nat): m <> O -> Z.of_nat (n / m) = Z.of_nat n / Z.of_nat m. Proof. intros. apply (Zdiv_unique _ _ _ (Z.of_nat (n mod m))). split. auto with zarith. now apply inj_lt, Nat.mod_upper_bound. rewrite <- Nat2Z.inj_mul, <- Nat2Z.inj_add. now apply inj_eq, Nat.div_mod. Qed. Lemma mod_Zmod (n m: nat): m <> O -> Z.of_nat (n mod m) = (Z.of_nat n) mod (Z.of_nat m). Proof. intros. apply (Zmod_unique _ _ (Z.of_nat n / Z.of_nat m)). split. auto with zarith. now apply inj_lt, Nat.mod_upper_bound. rewrite <- div_Zdiv, <- Nat2Z.inj_mul, <- Nat2Z.inj_add by trivial. now apply inj_eq, Nat.div_mod. Qed. coq-8.4pl2/theories/ZArith/Znat.v0000640000175000001440000006567012010532755015773 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Nat ---- | ^ | | | v Pos ---------> Z | | ^ | v | ----> N ----- >> *) Lemma nat_N_Z n : Z.of_N (N.of_nat n) = Z.of_nat n. Proof. now destruct n. Qed. Lemma N_nat_Z n : Z.of_nat (N.to_nat n) = Z.of_N n. Proof. destruct n; trivial. simpl. destruct (Pos2Nat.is_succ p) as (m,H). rewrite H. simpl. f_equal. now apply SuccNat2Pos.inv. Qed. Lemma positive_nat_Z p : Z.of_nat (Pos.to_nat p) = Zpos p. Proof. destruct (Pos2Nat.is_succ p) as (n,H). rewrite H. simpl. f_equal. now apply SuccNat2Pos.inv. Qed. Lemma positive_N_Z p : Z.of_N (Npos p) = Zpos p. Proof. reflexivity. Qed. Lemma positive_N_nat p : N.to_nat (Npos p) = Pos.to_nat p. Proof. reflexivity. Qed. Lemma positive_nat_N p : N.of_nat (Pos.to_nat p) = Npos p. Proof. destruct (Pos2Nat.is_succ p) as (n,H). rewrite H. simpl. f_equal. now apply SuccNat2Pos.inv. Qed. Lemma Z_N_nat n : N.to_nat (Z.to_N n) = Z.to_nat n. Proof. now destruct n. Qed. Lemma Z_nat_N n : N.of_nat (Z.to_nat n) = Z.to_N n. Proof. destruct n; simpl; trivial. apply positive_nat_N. Qed. Lemma Zabs_N_nat n : N.to_nat (Z.abs_N n) = Z.abs_nat n. Proof. now destruct n. Qed. Lemma Zabs_nat_N n : N.of_nat (Z.abs_nat n) = Z.abs_N n. Proof. destruct n; simpl; trivial; apply positive_nat_N. Qed. (** * Conversions between [Z] and [N] *) Module N2Z. (** [Z.of_N] is a bijection between [N] and non-negative [Z], with [Z.to_N] (or [Z.abs_N]) as reciprocal. See [Z2N.id] below for the dual equation. *) Lemma id n : Z.to_N (Z.of_N n) = n. Proof. now destruct n. Qed. (** [Z.of_N] is hence injective *) Lemma inj n m : Z.of_N n = Z.of_N m -> n = m. Proof. destruct n, m; simpl; congruence. Qed. Lemma inj_iff n m : Z.of_N n = Z.of_N m <-> n = m. Proof. split. apply inj. intros; now f_equal. Qed. (** [Z.of_N] produce non-negative integers *) Lemma is_nonneg n : 0 <= Z.of_N n. Proof. now destruct n. Qed. (** [Z.of_N], basic equations *) Lemma inj_0 : Z.of_N 0 = 0. Proof. reflexivity. Qed. Lemma inj_pos p : Z.of_N (Npos p) = Zpos p. Proof. reflexivity. Qed. (** [Z.of_N] and usual operations. *) Lemma inj_compare n m : (Z.of_N n ?= Z.of_N m) = (n ?= m)%N. Proof. now destruct n, m. Qed. Lemma inj_le n m : (n<=m)%N <-> Z.of_N n <= Z.of_N m. Proof. unfold Z.le. now rewrite inj_compare. Qed. Lemma inj_lt n m : (n Z.of_N n < Z.of_N m. Proof. unfold Z.lt. now rewrite inj_compare. Qed. Lemma inj_ge n m : (n>=m)%N <-> Z.of_N n >= Z.of_N m. Proof. unfold Z.ge. now rewrite inj_compare. Qed. Lemma inj_gt n m : (n>m)%N <-> Z.of_N n > Z.of_N m. Proof. unfold Z.gt. now rewrite inj_compare. Qed. Lemma inj_abs_N z : Z.of_N (Z.abs_N z) = Z.abs z. Proof. now destruct z. Qed. Lemma inj_add n m : Z.of_N (n+m) = Z.of_N n + Z.of_N m. Proof. now destruct n, m. Qed. Lemma inj_mul n m : Z.of_N (n*m) = Z.of_N n * Z.of_N m. Proof. now destruct n, m. Qed. Lemma inj_sub_max n m : Z.of_N (n-m) = Z.max 0 (Z.of_N n - Z.of_N m). Proof. destruct n as [|n], m as [|m]; simpl; trivial. rewrite Z.pos_sub_spec, Pos.compare_sub_mask. unfold Pos.sub. now destruct (Pos.sub_mask n m). Qed. Lemma inj_sub n m : (m<=n)%N -> Z.of_N (n-m) = Z.of_N n - Z.of_N m. Proof. intros H. rewrite inj_sub_max. unfold N.le in H. rewrite N.compare_antisym, <- inj_compare, Z.compare_sub in H. destruct (Z.of_N n - Z.of_N m); trivial; now destruct H. Qed. Lemma inj_succ n : Z.of_N (N.succ n) = Z.succ (Z.of_N n). Proof. destruct n. trivial. simpl. now rewrite Pos.add_1_r. Qed. Lemma inj_pred_max n : Z.of_N (N.pred n) = Z.max 0 (Z.pred (Z.of_N n)). Proof. unfold Z.pred. now rewrite N.pred_sub, inj_sub_max. Qed. Lemma inj_pred n : (0 Z.of_N (N.pred n) = Z.pred (Z.of_N n). Proof. intros H. unfold Z.pred. rewrite N.pred_sub, inj_sub; trivial. now apply N.le_succ_l in H. Qed. Lemma inj_min n m : Z.of_N (N.min n m) = Z.min (Z.of_N n) (Z.of_N m). Proof. unfold Z.min, N.min. rewrite inj_compare. now case N.compare. Qed. Lemma inj_max n m : Z.of_N (N.max n m) = Z.max (Z.of_N n) (Z.of_N m). Proof. unfold Z.max, N.max. rewrite inj_compare. case N.compare_spec; intros; subst; trivial. Qed. Lemma inj_div n m : Z.of_N (n/m) = Z.of_N n / Z.of_N m. Proof. destruct m as [|m]. now destruct n. apply Z.div_unique_pos with (Z.of_N (n mod (Npos m))). split. apply is_nonneg. apply inj_lt. now apply N.mod_lt. rewrite <- inj_mul, <- inj_add. f_equal. now apply N.div_mod. Qed. Lemma inj_mod n m : (m<>0)%N -> Z.of_N (n mod m) = (Z.of_N n) mod (Z.of_N m). Proof. intros Hm. apply Z.mod_unique_pos with (Z.of_N (n / m)). split. apply is_nonneg. apply inj_lt. now apply N.mod_lt. rewrite <- inj_mul, <- inj_add. f_equal. now apply N.div_mod. Qed. Lemma inj_quot n m : Z.of_N (n/m) = Z.of_N n ÷ Z.of_N m. Proof. destruct m. - now destruct n. - rewrite Z.quot_div_nonneg, inj_div; trivial. apply is_nonneg. easy. Qed. Lemma inj_rem n m : Z.of_N (n mod m) = Z.rem (Z.of_N n) (Z.of_N m). Proof. destruct m. - now destruct n. - rewrite Z.rem_mod_nonneg, inj_mod; trivial. easy. apply is_nonneg. easy. Qed. Lemma inj_div2 n : Z.of_N (N.div2 n) = Z.div2 (Z.of_N n). Proof. destruct n as [|p]; trivial. now destruct p. Qed. Lemma inj_quot2 n : Z.of_N (N.div2 n) = Z.quot2 (Z.of_N n). Proof. destruct n as [|p]; trivial. now destruct p. Qed. Lemma inj_pow n m : Z.of_N (n^m) = (Z.of_N n)^(Z.of_N m). Proof. destruct n, m; trivial. now rewrite Z.pow_0_l. apply Pos2Z.inj_pow. Qed. Lemma inj_testbit a n : Z.testbit (Z.of_N a) (Z.of_N n) = N.testbit a n. Proof. apply Z.Private_BootStrap.testbit_of_N. Qed. End N2Z. Module Z2N. (** [Z.to_N] is a bijection between non-negative [Z] and [N], with [Pos.of_N] as reciprocal. See [N2Z.id] above for the dual equation. *) Lemma id n : 0<=n -> Z.of_N (Z.to_N n) = n. Proof. destruct n; (now destruct 1) || trivial. Qed. (** [Z.to_N] is hence injective for non-negative integers. *) Lemma inj n m : 0<=n -> 0<=m -> Z.to_N n = Z.to_N m -> n = m. Proof. intros. rewrite <- (id n), <- (id m) by trivial. now f_equal. Qed. Lemma inj_iff n m : 0<=n -> 0<=m -> (Z.to_N n = Z.to_N m <-> n = m). Proof. intros. split. now apply inj. intros; now subst. Qed. (** [Z.to_N], basic equations *) Lemma inj_0 : Z.to_N 0 = 0%N. Proof. reflexivity. Qed. Lemma inj_pos n : Z.to_N (Zpos n) = Npos n. Proof. reflexivity. Qed. Lemma inj_neg n : Z.to_N (Zneg n) = 0%N. Proof. reflexivity. Qed. (** [Z.to_N] and operations *) Lemma inj_add n m : 0<=n -> 0<=m -> Z.to_N (n+m) = (Z.to_N n + Z.to_N m)%N. Proof. destruct n, m; trivial; (now destruct 1) || (now destruct 2). Qed. Lemma inj_mul n m : 0<=n -> 0<=m -> Z.to_N (n*m) = (Z.to_N n * Z.to_N m)%N. Proof. destruct n, m; trivial; (now destruct 1) || (now destruct 2). Qed. Lemma inj_succ n : 0<=n -> Z.to_N (Z.succ n) = N.succ (Z.to_N n). Proof. unfold Z.succ. intros. rewrite inj_add by easy. apply N.add_1_r. Qed. Lemma inj_sub n m : 0<=m -> Z.to_N (n - m) = (Z.to_N n - Z.to_N m)%N. Proof. destruct n as [|n|n], m as [|m|m]; trivial; try (now destruct 1). intros _. simpl. rewrite Z.pos_sub_spec, Pos.compare_sub_mask. unfold Pos.sub. now destruct (Pos.sub_mask n m). Qed. Lemma inj_pred n : Z.to_N (Z.pred n) = N.pred (Z.to_N n). Proof. unfold Z.pred. rewrite <- N.sub_1_r. now apply (inj_sub n 1). Qed. Lemma inj_compare n m : 0<=n -> 0<=m -> (Z.to_N n ?= Z.to_N m)%N = (n ?= m). Proof. intros Hn Hm. now rewrite <- N2Z.inj_compare, !id. Qed. Lemma inj_le n m : 0<=n -> 0<=m -> (n<=m <-> (Z.to_N n <= Z.to_N m)%N). Proof. intros Hn Hm. unfold Z.le, N.le. now rewrite inj_compare. Qed. Lemma inj_lt n m : 0<=n -> 0<=m -> (n (Z.to_N n < Z.to_N m)%N). Proof. intros Hn Hm. unfold Z.lt, N.lt. now rewrite inj_compare. Qed. Lemma inj_min n m : Z.to_N (Z.min n m) = N.min (Z.to_N n) (Z.to_N m). Proof. destruct n, m; simpl; trivial; unfold Z.min, N.min; simpl; now case Pos.compare. Qed. Lemma inj_max n m : Z.to_N (Z.max n m) = N.max (Z.to_N n) (Z.to_N m). Proof. destruct n, m; simpl; trivial; unfold Z.max, N.max; simpl. case Pos.compare_spec; intros; subst; trivial. now case Pos.compare. Qed. Lemma inj_div n m : 0<=n -> 0<=m -> Z.to_N (n/m) = (Z.to_N n / Z.to_N m)%N. Proof. destruct n, m; trivial; intros Hn Hm; (now destruct Hn) || (now destruct Hm) || clear. simpl. rewrite <- (N2Z.id (_ / _)). f_equal. now rewrite N2Z.inj_div. Qed. Lemma inj_mod n m : 0<=n -> 0 Z.to_N (n mod m) = ((Z.to_N n) mod (Z.to_N m))%N. Proof. destruct n, m; trivial; intros Hn Hm; (now destruct Hn) || (now destruct Hm) || clear. simpl. rewrite <- (N2Z.id (_ mod _)). f_equal. now rewrite N2Z.inj_mod. Qed. Lemma inj_quot n m : 0<=n -> 0<=m -> Z.to_N (n÷m) = (Z.to_N n / Z.to_N m)%N. Proof. destruct m. - now destruct n. - intros. now rewrite Z.quot_div_nonneg, inj_div. - now destruct 2. Qed. Lemma inj_rem n m :0<=n -> 0<=m -> Z.to_N (Z.rem n m) = ((Z.to_N n) mod (Z.to_N m))%N. Proof. destruct m. - now destruct n. - intros. now rewrite Z.rem_mod_nonneg, inj_mod. - now destruct 2. Qed. Lemma inj_div2 n : Z.to_N (Z.div2 n) = N.div2 (Z.to_N n). Proof. destruct n as [|p|p]; trivial. now destruct p. Qed. Lemma inj_quot2 n : Z.to_N (Z.quot2 n) = N.div2 (Z.to_N n). Proof. destruct n as [|p|p]; trivial; now destruct p. Qed. Lemma inj_pow n m : 0<=n -> 0<=m -> Z.to_N (n^m) = ((Z.to_N n)^(Z.to_N m))%N. Proof. destruct m. - trivial. - intros. now rewrite <- (N2Z.id (_ ^ _)), N2Z.inj_pow, id. - now destruct 2. Qed. Lemma inj_testbit a n : 0<=n -> Z.testbit (Z.of_N a) n = N.testbit a (Z.to_N n). Proof. apply Z.Private_BootStrap.testbit_of_N'. Qed. End Z2N. Module Zabs2N. (** Results about [Z.abs_N], converting absolute values of [Z] integers to [N]. *) Lemma abs_N_spec n : Z.abs_N n = Z.to_N (Z.abs n). Proof. now destruct n. Qed. Lemma abs_N_nonneg n : 0<=n -> Z.abs_N n = Z.to_N n. Proof. destruct n; trivial; now destruct 1. Qed. Lemma id_abs n : Z.of_N (Z.abs_N n) = Z.abs n. Proof. now destruct n. Qed. Lemma id n : Z.abs_N (Z.of_N n) = n. Proof. now destruct n. Qed. (** [Z.abs_N], basic equations *) Lemma inj_0 : Z.abs_N 0 = 0%N. Proof. reflexivity. Qed. Lemma inj_pos p : Z.abs_N (Zpos p) = Npos p. Proof. reflexivity. Qed. Lemma inj_neg p : Z.abs_N (Zneg p) = Npos p. Proof. reflexivity. Qed. (** [Z.abs_N] and usual operations, with non-negative integers *) Lemma inj_opp n : Z.abs_N (-n) = Z.abs_N n. Proof. now destruct n. Qed. Lemma inj_succ n : 0<=n -> Z.abs_N (Z.succ n) = N.succ (Z.abs_N n). Proof. intros. rewrite !abs_N_nonneg; trivial. now apply Z2N.inj_succ. now apply Z.le_le_succ_r. Qed. Lemma inj_add n m : 0<=n -> 0<=m -> Z.abs_N (n+m) = (Z.abs_N n + Z.abs_N m)%N. Proof. intros. rewrite !abs_N_nonneg; trivial. now apply Z2N.inj_add. now apply Z.add_nonneg_nonneg. Qed. Lemma inj_mul n m : Z.abs_N (n*m) = (Z.abs_N n * Z.abs_N m)%N. Proof. now destruct n, m. Qed. Lemma inj_sub n m : 0<=m<=n -> Z.abs_N (n-m) = (Z.abs_N n - Z.abs_N m)%N. Proof. intros (Hn,H). rewrite !abs_N_nonneg; trivial. now apply Z2N.inj_sub. Z.order. now apply Z.le_0_sub. Qed. Lemma inj_pred n : 0 Z.abs_N (Z.pred n) = N.pred (Z.abs_N n). Proof. intros. rewrite !abs_N_nonneg. now apply Z2N.inj_pred. Z.order. apply Z.lt_succ_r. now rewrite Z.succ_pred. Qed. Lemma inj_compare n m : 0<=n -> 0<=m -> (Z.abs_N n ?= Z.abs_N m)%N = (n ?= m). Proof. intros. rewrite !abs_N_nonneg by trivial. now apply Z2N.inj_compare. Qed. Lemma inj_le n m : 0<=n -> 0<=m -> (n<=m <-> (Z.abs_N n <= Z.abs_N m)%N). Proof. intros Hn Hm. unfold Z.le, N.le. now rewrite inj_compare. Qed. Lemma inj_lt n m : 0<=n -> 0<=m -> (n (Z.abs_N n < Z.abs_N m)%N). Proof. intros Hn Hm. unfold Z.lt, N.lt. now rewrite inj_compare. Qed. Lemma inj_min n m : 0<=n -> 0<=m -> Z.abs_N (Z.min n m) = N.min (Z.abs_N n) (Z.abs_N m). Proof. intros. rewrite !abs_N_nonneg; trivial. now apply Z2N.inj_min. now apply Z.min_glb. Qed. Lemma inj_max n m : 0<=n -> 0<=m -> Z.abs_N (Z.max n m) = N.max (Z.abs_N n) (Z.abs_N m). Proof. intros. rewrite !abs_N_nonneg; trivial. now apply Z2N.inj_max. transitivity n; trivial. apply Z.le_max_l. Qed. Lemma inj_quot n m : Z.abs_N (n÷m) = ((Z.abs_N n) / (Z.abs_N m))%N. Proof. assert (forall p q, Z.abs_N (Zpos p ÷ Zpos q) = (Npos p / Npos q)%N). intros. rewrite abs_N_nonneg. now apply Z2N.inj_quot. now apply Z.quot_pos. destruct n, m; trivial; simpl. - trivial. - now rewrite <- Pos2Z.opp_pos, Z.quot_opp_r, inj_opp. - now rewrite <- Pos2Z.opp_pos, Z.quot_opp_l, inj_opp. - now rewrite <- 2 Pos2Z.opp_pos, Z.quot_opp_opp. Qed. Lemma inj_rem n m : Z.abs_N (Z.rem n m) = ((Z.abs_N n) mod (Z.abs_N m))%N. Proof. assert (forall p q, Z.abs_N (Z.rem (Zpos p) (Zpos q)) = ((Npos p) mod (Npos q))%N). intros. rewrite abs_N_nonneg. now apply Z2N.inj_rem. now apply Z.rem_nonneg. destruct n, m; trivial; simpl. - trivial. - now rewrite <- Pos2Z.opp_pos, Z.rem_opp_r. - now rewrite <- Pos2Z.opp_pos, Z.rem_opp_l, inj_opp. - now rewrite <- 2 Pos2Z.opp_pos, Z.rem_opp_opp, inj_opp. Qed. Lemma inj_pow n m : 0<=m -> Z.abs_N (n^m) = ((Z.abs_N n)^(Z.abs_N m))%N. Proof. intros Hm. rewrite abs_N_spec, Z.abs_pow, Z2N.inj_pow, <- abs_N_spec; trivial. f_equal. symmetry; now apply abs_N_nonneg. apply Z.abs_nonneg. Qed. (** [Z.abs_N] and usual operations, statements with [Z.abs] *) Lemma inj_succ_abs n : Z.abs_N (Z.succ (Z.abs n)) = N.succ (Z.abs_N n). Proof. destruct n; simpl; trivial; now rewrite Pos.add_1_r. Qed. Lemma inj_add_abs n m : Z.abs_N (Z.abs n + Z.abs m) = (Z.abs_N n + Z.abs_N m)%N. Proof. now destruct n, m. Qed. Lemma inj_mul_abs n m : Z.abs_N (Z.abs n * Z.abs m) = (Z.abs_N n * Z.abs_N m)%N. Proof. now destruct n, m. Qed. End Zabs2N. (** * Conversions between [Z] and [nat] *) Module Nat2Z. (** [Z.of_nat], basic equations *) Lemma inj_0 : Z.of_nat 0 = 0. Proof. reflexivity. Qed. Lemma inj_succ n : Z.of_nat (S n) = Z.succ (Z.of_nat n). Proof. destruct n. trivial. simpl. apply Pos2Z.inj_succ. Qed. (** [Z.of_N] produce non-negative integers *) Lemma is_nonneg n : 0 <= Z.of_nat n. Proof. now induction n. Qed. (** [Z.of_nat] is a bijection between [nat] and non-negative [Z], with [Z.to_nat] (or [Z.abs_nat]) as reciprocal. See [Z2Nat.id] below for the dual equation. *) Lemma id n : Z.to_nat (Z.of_nat n) = n. Proof. now rewrite <- nat_N_Z, <- Z_N_nat, N2Z.id, Nat2N.id. Qed. (** [Z.of_nat] is hence injective *) Lemma inj n m : Z.of_nat n = Z.of_nat m -> n = m. Proof. intros H. now rewrite <- (id n), <- (id m), H. Qed. Lemma inj_iff n m : Z.of_nat n = Z.of_nat m <-> n = m. Proof. split. apply inj. intros; now f_equal. Qed. (** [Z.of_nat] and usual operations *) Lemma inj_compare n m : (Z.of_nat n ?= Z.of_nat m) = nat_compare n m. Proof. now rewrite <-!nat_N_Z, N2Z.inj_compare, <- Nat2N.inj_compare. Qed. Lemma inj_le n m : (n<=m)%nat <-> Z.of_nat n <= Z.of_nat m. Proof. unfold Z.le. now rewrite inj_compare, nat_compare_le. Qed. Lemma inj_lt n m : (n Z.of_nat n < Z.of_nat m. Proof. unfold Z.lt. now rewrite inj_compare, nat_compare_lt. Qed. Lemma inj_ge n m : (n>=m)%nat <-> Z.of_nat n >= Z.of_nat m. Proof. unfold Z.ge. now rewrite inj_compare, nat_compare_ge. Qed. Lemma inj_gt n m : (n>m)%nat <-> Z.of_nat n > Z.of_nat m. Proof. unfold Z.gt. now rewrite inj_compare, nat_compare_gt. Qed. Lemma inj_abs_nat z : Z.of_nat (Z.abs_nat z) = Z.abs z. Proof. destruct z; simpl; trivial; destruct (Pos2Nat.is_succ p) as (n,H); rewrite H; simpl; f_equal; now apply SuccNat2Pos.inv. Qed. Lemma inj_add n m : Z.of_nat (n+m) = Z.of_nat n + Z.of_nat m. Proof. now rewrite <- !nat_N_Z, Nat2N.inj_add, N2Z.inj_add. Qed. Lemma inj_mul n m : Z.of_nat (n*m) = Z.of_nat n * Z.of_nat m. Proof. now rewrite <- !nat_N_Z, Nat2N.inj_mul, N2Z.inj_mul. Qed. Lemma inj_sub_max n m : Z.of_nat (n-m) = Z.max 0 (Z.of_nat n - Z.of_nat m). Proof. now rewrite <- !nat_N_Z, Nat2N.inj_sub, N2Z.inj_sub_max. Qed. Lemma inj_sub n m : (m<=n)%nat -> Z.of_nat (n-m) = Z.of_nat n - Z.of_nat m. Proof. rewrite nat_compare_le, Nat2N.inj_compare. intros. now rewrite <- !nat_N_Z, Nat2N.inj_sub, N2Z.inj_sub. Qed. Lemma inj_pred_max n : Z.of_nat (pred n) = Z.max 0 (Z.pred (Z.of_nat n)). Proof. now rewrite <- !nat_N_Z, Nat2N.inj_pred, N2Z.inj_pred_max. Qed. Lemma inj_pred n : (0 Z.of_nat (pred n) = Z.pred (Z.of_nat n). Proof. rewrite nat_compare_lt, Nat2N.inj_compare. intros. now rewrite <- !nat_N_Z, Nat2N.inj_pred, N2Z.inj_pred. Qed. Lemma inj_min n m : Z.of_nat (min n m) = Z.min (Z.of_nat n) (Z.of_nat m). Proof. now rewrite <- !nat_N_Z, Nat2N.inj_min, N2Z.inj_min. Qed. Lemma inj_max n m : Z.of_nat (max n m) = Z.max (Z.of_nat n) (Z.of_nat m). Proof. now rewrite <- !nat_N_Z, Nat2N.inj_max, N2Z.inj_max. Qed. End Nat2Z. Module Z2Nat. (** [Z.to_nat] is a bijection between non-negative [Z] and [nat], with [Pos.of_nat] as reciprocal. See [nat2Z.id] above for the dual equation. *) Lemma id n : 0<=n -> Z.of_nat (Z.to_nat n) = n. Proof. intros. now rewrite <- Z_N_nat, <- nat_N_Z, N2Nat.id, Z2N.id. Qed. (** [Z.to_nat] is hence injective for non-negative integers. *) Lemma inj n m : 0<=n -> 0<=m -> Z.to_nat n = Z.to_nat m -> n = m. Proof. intros. rewrite <- (id n), <- (id m) by trivial. now f_equal. Qed. Lemma inj_iff n m : 0<=n -> 0<=m -> (Z.to_nat n = Z.to_nat m <-> n = m). Proof. intros. split. now apply inj. intros; now subst. Qed. (** [Z.to_nat], basic equations *) Lemma inj_0 : Z.to_nat 0 = O. Proof. reflexivity. Qed. Lemma inj_pos n : Z.to_nat (Zpos n) = Pos.to_nat n. Proof. reflexivity. Qed. Lemma inj_neg n : Z.to_nat (Zneg n) = O. Proof. reflexivity. Qed. (** [Z.to_nat] and operations *) Lemma inj_add n m : 0<=n -> 0<=m -> Z.to_nat (n+m) = (Z.to_nat n + Z.to_nat m)%nat. Proof. intros. now rewrite <- !Z_N_nat, Z2N.inj_add, N2Nat.inj_add. Qed. Lemma inj_mul n m : 0<=n -> 0<=m -> Z.to_nat (n*m) = (Z.to_nat n * Z.to_nat m)%nat. Proof. intros. now rewrite <- !Z_N_nat, Z2N.inj_mul, N2Nat.inj_mul. Qed. Lemma inj_succ n : 0<=n -> Z.to_nat (Z.succ n) = S (Z.to_nat n). Proof. intros. now rewrite <- !Z_N_nat, Z2N.inj_succ, N2Nat.inj_succ. Qed. Lemma inj_sub n m : 0<=m -> Z.to_nat (n - m) = (Z.to_nat n - Z.to_nat m)%nat. Proof. intros. now rewrite <- !Z_N_nat, Z2N.inj_sub, N2Nat.inj_sub. Qed. Lemma inj_pred n : Z.to_nat (Z.pred n) = pred (Z.to_nat n). Proof. now rewrite <- !Z_N_nat, Z2N.inj_pred, N2Nat.inj_pred. Qed. Lemma inj_compare n m : 0<=n -> 0<=m -> nat_compare (Z.to_nat n) (Z.to_nat m) = (n ?= m). Proof. intros Hn Hm. now rewrite <- Nat2Z.inj_compare, !id. Qed. Lemma inj_le n m : 0<=n -> 0<=m -> (n<=m <-> (Z.to_nat n <= Z.to_nat m)%nat). Proof. intros Hn Hm. unfold Z.le. now rewrite nat_compare_le, inj_compare. Qed. Lemma inj_lt n m : 0<=n -> 0<=m -> (n (Z.to_nat n < Z.to_nat m)%nat). Proof. intros Hn Hm. unfold Z.lt. now rewrite nat_compare_lt, inj_compare. Qed. Lemma inj_min n m : Z.to_nat (Z.min n m) = min (Z.to_nat n) (Z.to_nat m). Proof. now rewrite <- !Z_N_nat, Z2N.inj_min, N2Nat.inj_min. Qed. Lemma inj_max n m : Z.to_nat (Z.max n m) = max (Z.to_nat n) (Z.to_nat m). Proof. now rewrite <- !Z_N_nat, Z2N.inj_max, N2Nat.inj_max. Qed. End Z2Nat. Module Zabs2Nat. (** Results about [Z.abs_nat], converting absolute values of [Z] integers to [nat]. *) Lemma abs_nat_spec n : Z.abs_nat n = Z.to_nat (Z.abs n). Proof. now destruct n. Qed. Lemma abs_nat_nonneg n : 0<=n -> Z.abs_nat n = Z.to_nat n. Proof. destruct n; trivial; now destruct 1. Qed. Lemma id_abs n : Z.of_nat (Z.abs_nat n) = Z.abs n. Proof. rewrite <-Zabs_N_nat, N_nat_Z. apply Zabs2N.id_abs. Qed. Lemma id n : Z.abs_nat (Z.of_nat n) = n. Proof. now rewrite <-Zabs_N_nat, <-nat_N_Z, Zabs2N.id, Nat2N.id. Qed. (** [Z.abs_nat], basic equations *) Lemma inj_0 : Z.abs_nat 0 = 0%nat. Proof. reflexivity. Qed. Lemma inj_pos p : Z.abs_nat (Zpos p) = Pos.to_nat p. Proof. reflexivity. Qed. Lemma inj_neg p : Z.abs_nat (Zneg p) = Pos.to_nat p. Proof. reflexivity. Qed. (** [Z.abs_nat] and usual operations, with non-negative integers *) Lemma inj_succ n : 0<=n -> Z.abs_nat (Z.succ n) = S (Z.abs_nat n). Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_succ, N2Nat.inj_succ. Qed. Lemma inj_add n m : 0<=n -> 0<=m -> Z.abs_nat (n+m) = (Z.abs_nat n + Z.abs_nat m)%nat. Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_add, N2Nat.inj_add. Qed. Lemma inj_mul n m : Z.abs_nat (n*m) = (Z.abs_nat n * Z.abs_nat m)%nat. Proof. destruct n, m; simpl; trivial using Pos2Nat.inj_mul. Qed. Lemma inj_sub n m : 0<=m<=n -> Z.abs_nat (n-m) = (Z.abs_nat n - Z.abs_nat m)%nat. Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_sub, N2Nat.inj_sub. Qed. Lemma inj_pred n : 0 Z.abs_nat (Z.pred n) = pred (Z.abs_nat n). Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_pred, N2Nat.inj_pred. Qed. Lemma inj_compare n m : 0<=n -> 0<=m -> nat_compare (Z.abs_nat n) (Z.abs_nat m) = (n ?= m). Proof. intros. now rewrite <- !Zabs_N_nat, <- N2Nat.inj_compare, Zabs2N.inj_compare. Qed. Lemma inj_le n m : 0<=n -> 0<=m -> (n<=m <-> (Z.abs_nat n <= Z.abs_nat m)%nat). Proof. intros Hn Hm. unfold Z.le. now rewrite nat_compare_le, inj_compare. Qed. Lemma inj_lt n m : 0<=n -> 0<=m -> (n (Z.abs_nat n < Z.abs_nat m)%nat). Proof. intros Hn Hm. unfold Z.lt. now rewrite nat_compare_lt, inj_compare. Qed. Lemma inj_min n m : 0<=n -> 0<=m -> Z.abs_nat (Z.min n m) = min (Z.abs_nat n) (Z.abs_nat m). Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_min, N2Nat.inj_min. Qed. Lemma inj_max n m : 0<=n -> 0<=m -> Z.abs_nat (Z.max n m) = max (Z.abs_nat n) (Z.abs_nat m). Proof. intros. now rewrite <- !Zabs_N_nat, Zabs2N.inj_max, N2Nat.inj_max. Qed. (** [Z.abs_nat] and usual operations, statements with [Z.abs] *) Lemma inj_succ_abs n : Z.abs_nat (Z.succ (Z.abs n)) = S (Z.abs_nat n). Proof. now rewrite <- !Zabs_N_nat, Zabs2N.inj_succ_abs, N2Nat.inj_succ. Qed. Lemma inj_add_abs n m : Z.abs_nat (Z.abs n + Z.abs m) = (Z.abs_nat n + Z.abs_nat m)%nat. Proof. now rewrite <- !Zabs_N_nat, Zabs2N.inj_add_abs, N2Nat.inj_add. Qed. Lemma inj_mul_abs n m : Z.abs_nat (Z.abs n * Z.abs m) = (Z.abs_nat n * Z.abs_nat m)%nat. Proof. now rewrite <- !Zabs_N_nat, Zabs2N.inj_mul_abs, N2Nat.inj_mul. Qed. End Zabs2Nat. (** Compatibility *) Definition neq (x y:nat) := x <> y. Lemma inj_neq n m : neq n m -> Zne (Z.of_nat n) (Z.of_nat m). Proof. intros H H'. now apply H, Nat2Z.inj. Qed. Lemma Zpos_P_of_succ_nat n : Zpos (Pos.of_succ_nat n) = Z.succ (Z.of_nat n). Proof (Nat2Z.inj_succ n). (** For these one, used in omega, a Definition is necessary *) Definition inj_eq := (f_equal Z.of_nat). Definition inj_le n m := proj1 (Nat2Z.inj_le n m). Definition inj_lt n m := proj1 (Nat2Z.inj_lt n m). Definition inj_ge n m := proj1 (Nat2Z.inj_ge n m). Definition inj_gt n m := proj1 (Nat2Z.inj_gt n m). (** For the others, a Notation is fine *) Notation inj_0 := Nat2Z.inj_0 (compat "8.3"). Notation inj_S := Nat2Z.inj_succ (compat "8.3"). Notation inj_compare := Nat2Z.inj_compare (compat "8.3"). Notation inj_eq_rev := Nat2Z.inj (compat "8.3"). Notation inj_eq_iff := (fun n m => iff_sym (Nat2Z.inj_iff n m)) (compat "8.3"). Notation inj_le_iff := Nat2Z.inj_le (compat "8.3"). Notation inj_lt_iff := Nat2Z.inj_lt (compat "8.3"). Notation inj_ge_iff := Nat2Z.inj_ge (compat "8.3"). Notation inj_gt_iff := Nat2Z.inj_gt (compat "8.3"). Notation inj_le_rev := (fun n m => proj2 (Nat2Z.inj_le n m)) (compat "8.3"). Notation inj_lt_rev := (fun n m => proj2 (Nat2Z.inj_lt n m)) (compat "8.3"). Notation inj_ge_rev := (fun n m => proj2 (Nat2Z.inj_ge n m)) (compat "8.3"). Notation inj_gt_rev := (fun n m => proj2 (Nat2Z.inj_gt n m)) (compat "8.3"). Notation inj_plus := Nat2Z.inj_add (compat "8.3"). Notation inj_mult := Nat2Z.inj_mul (compat "8.3"). Notation inj_minus1 := Nat2Z.inj_sub (compat "8.3"). Notation inj_minus := Nat2Z.inj_sub_max (compat "8.3"). Notation inj_min := Nat2Z.inj_min (compat "8.3"). Notation inj_max := Nat2Z.inj_max (compat "8.3"). Notation Z_of_nat_of_P := positive_nat_Z (compat "8.3"). Notation Zpos_eq_Z_of_nat_o_nat_of_P := (fun p => eq_sym (positive_nat_Z p)) (compat "8.3"). Notation Z_of_nat_of_N := N_nat_Z (compat "8.3"). Notation Z_of_N_of_nat := nat_N_Z (compat "8.3"). Notation Z_of_N_eq := (f_equal Z.of_N) (compat "8.3"). Notation Z_of_N_eq_rev := N2Z.inj (compat "8.3"). Notation Z_of_N_eq_iff := (fun n m => iff_sym (N2Z.inj_iff n m)) (compat "8.3"). Notation Z_of_N_compare := N2Z.inj_compare (compat "8.3"). Notation Z_of_N_le_iff := N2Z.inj_le (compat "8.3"). Notation Z_of_N_lt_iff := N2Z.inj_lt (compat "8.3"). Notation Z_of_N_ge_iff := N2Z.inj_ge (compat "8.3"). Notation Z_of_N_gt_iff := N2Z.inj_gt (compat "8.3"). Notation Z_of_N_le := (fun n m => proj1 (N2Z.inj_le n m)) (compat "8.3"). Notation Z_of_N_lt := (fun n m => proj1 (N2Z.inj_lt n m)) (compat "8.3"). Notation Z_of_N_ge := (fun n m => proj1 (N2Z.inj_ge n m)) (compat "8.3"). Notation Z_of_N_gt := (fun n m => proj1 (N2Z.inj_gt n m)) (compat "8.3"). Notation Z_of_N_le_rev := (fun n m => proj2 (N2Z.inj_le n m)) (compat "8.3"). Notation Z_of_N_lt_rev := (fun n m => proj2 (N2Z.inj_lt n m)) (compat "8.3"). Notation Z_of_N_ge_rev := (fun n m => proj2 (N2Z.inj_ge n m)) (compat "8.3"). Notation Z_of_N_gt_rev := (fun n m => proj2 (N2Z.inj_gt n m)) (compat "8.3"). Notation Z_of_N_pos := N2Z.inj_pos (compat "8.3"). Notation Z_of_N_abs := N2Z.inj_abs_N (compat "8.3"). Notation Z_of_N_le_0 := N2Z.is_nonneg (compat "8.3"). Notation Z_of_N_plus := N2Z.inj_add (compat "8.3"). Notation Z_of_N_mult := N2Z.inj_mul (compat "8.3"). Notation Z_of_N_minus := N2Z.inj_sub_max (compat "8.3"). Notation Z_of_N_succ := N2Z.inj_succ (compat "8.3"). Notation Z_of_N_min := N2Z.inj_min (compat "8.3"). Notation Z_of_N_max := N2Z.inj_max (compat "8.3"). Notation Zabs_of_N := Zabs2N.id (compat "8.3"). Notation Zabs_N_succ_abs := Zabs2N.inj_succ_abs (compat "8.3"). Notation Zabs_N_succ := Zabs2N.inj_succ (compat "8.3"). Notation Zabs_N_plus_abs := Zabs2N.inj_add_abs (compat "8.3"). Notation Zabs_N_plus := Zabs2N.inj_add (compat "8.3"). Notation Zabs_N_mult_abs := Zabs2N.inj_mul_abs (compat "8.3"). Notation Zabs_N_mult := Zabs2N.inj_mul (compat "8.3"). Theorem inj_minus2 : forall n m:nat, (m > n)%nat -> Z.of_nat (n - m) = 0. Proof. intros. rewrite not_le_minus_0; auto with arith. Qed. coq-8.4pl2/theories/ZArith/Zdigits.v0000640000175000001440000002121612010532755016460 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1%Z | false => 0%Z end. Lemma binary_value : forall n:nat, Bvector n -> Z. Proof. simple induction n; intros. exact 0%Z. inversion H0. exact (bit_value h + 2 * H H2)%Z. Defined. Lemma two_compl_value : forall n:nat, Bvector (S n) -> Z. Proof. simple induction n; intros. inversion H. exact (- bit_value h)%Z. inversion H0. exact (bit_value h + 2 * H H2)%Z. Defined. End VALUE_OF_BOOLEAN_VECTORS. Section ENCODING_VALUE. (** We compute the binary value via a Horner scheme. Computation stops at the vector length without checks. We define a function Zmod2 similar to Z.div2 returning the quotient of division z=2q+r with 0<=r<=1. The two's complement value is also computed via a Horner scheme with Zmod2, the parameter is the size minus one. *) Definition Zmod2 (z:Z) := match z with | Z0 => 0%Z | Zpos p => match p with | xI q => Zpos q | xO q => Zpos q | xH => 0%Z end | Zneg p => match p with | xI q => (Zneg q - 1)%Z | xO q => Zneg q | xH => (-1)%Z end end. Lemma Zmod2_twice : forall z:Z, z = (2 * Zmod2 z + bit_value (Z.odd z))%Z. Proof. destruct z; simpl. trivial. destruct p; simpl; trivial. destruct p; simpl. destruct p as [p| p| ]; simpl. rewrite <- (Pos.pred_double_succ p); trivial. trivial. trivial. trivial. trivial. Qed. Lemma Z_to_binary : forall n:nat, Z -> Bvector n. Proof. simple induction n; intros. exact Bnil. exact (Bcons (Z.odd H0) n0 (H (Z.div2 H0))). Defined. Lemma Z_to_two_compl : forall n:nat, Z -> Bvector (S n). Proof. simple induction n; intros. exact (Bcons (Z.odd H) 0 Bnil). exact (Bcons (Z.odd H0) (S n0) (H (Zmod2 H0))). Defined. End ENCODING_VALUE. Section Z_BRIC_A_BRAC. (** Some auxiliary lemmas used in the next section. Large use of ZArith. Deserve to be properly rewritten. *) Lemma binary_value_Sn : forall (n:nat) (b:bool) (bv:Bvector n), binary_value (S n) ( b :: bv) = (bit_value b + 2 * binary_value n bv)%Z. Proof. intros; auto. Qed. Lemma Z_to_binary_Sn : forall (n:nat) (b:bool) (z:Z), (z >= 0)%Z -> Z_to_binary (S n) (bit_value b + 2 * z) = Bcons b n (Z_to_binary n z). Proof. destruct b; destruct z; simpl; auto. intro H; elim H; trivial. Qed. Lemma binary_value_pos : forall (n:nat) (bv:Bvector n), (binary_value n bv >= 0)%Z. Proof. induction bv as [| a n v IHbv]; simpl. omega. destruct a; destruct (binary_value n v); simpl; auto. auto with zarith. Qed. Lemma two_compl_value_Sn : forall (n:nat) (bv:Bvector (S n)) (b:bool), two_compl_value (S n) (Bcons b (S n) bv) = (bit_value b + 2 * two_compl_value n bv)%Z. Proof. intros; auto. Qed. Lemma Z_to_two_compl_Sn : forall (n:nat) (b:bool) (z:Z), Z_to_two_compl (S n) (bit_value b + 2 * z) = Bcons b (S n) (Z_to_two_compl n z). Proof. destruct b; destruct z as [| p| p]; auto. destruct p as [p| p| ]; auto. destruct p as [p| p| ]; simpl; auto. intros; rewrite (Pos.succ_pred_double p); trivial. Qed. Lemma Z_to_binary_Sn_z : forall (n:nat) (z:Z), Z_to_binary (S n) z = Bcons (Z.odd z) n (Z_to_binary n (Z.div2 z)). Proof. intros; auto. Qed. Lemma Z_div2_value : forall z:Z, (z >= 0)%Z -> (bit_value (Z.odd z) + 2 * Z.div2 z)%Z = z. Proof. destruct z as [| p| p]; auto. destruct p; auto. intro H; elim H; trivial. Qed. Lemma Pdiv2 : forall z:Z, (z >= 0)%Z -> (Z.div2 z >= 0)%Z. Proof. destruct z as [| p| p]. auto. destruct p; auto. simpl; intros; omega. intro H; elim H; trivial. Qed. Lemma Zdiv2_two_power_nat : forall (z:Z) (n:nat), (z >= 0)%Z -> (z < two_power_nat (S n))%Z -> (Z.div2 z < two_power_nat n)%Z. Proof. intros. cut (2 * Z.div2 z < 2 * two_power_nat n)%Z; intros. omega. rewrite <- two_power_nat_S. destruct (Zeven.Zeven_odd_dec z); intros. rewrite <- Zeven.Zeven_div2; auto. generalize (Zeven.Zodd_div2 z z0); omega. Qed. Lemma Z_to_two_compl_Sn_z : forall (n:nat) (z:Z), Z_to_two_compl (S n) z = Bcons (Z.odd z) (S n) (Z_to_two_compl n (Zmod2 z)). Proof. intros; auto. Qed. Lemma Zeven_bit_value : forall z:Z, Zeven.Zeven z -> bit_value (Z.odd z) = 0%Z. Proof. destruct z; unfold bit_value; auto. destruct p; tauto || (intro H; elim H). destruct p; tauto || (intro H; elim H). Qed. Lemma Zodd_bit_value : forall z:Z, Zeven.Zodd z -> bit_value (Z.odd z) = 1%Z. Proof. destruct z; unfold bit_value; auto. intros; elim H. destruct p; tauto || (intros; elim H). destruct p; tauto || (intros; elim H). Qed. Lemma Zge_minus_two_power_nat_S : forall (n:nat) (z:Z), (z >= - two_power_nat (S n))%Z -> (Zmod2 z >= - two_power_nat n)%Z. Proof. intros n z; rewrite (two_power_nat_S n). generalize (Zmod2_twice z). destruct (Zeven.Zeven_odd_dec z) as [H| H]. rewrite (Zeven_bit_value z H); intros; omega. rewrite (Zodd_bit_value z H); intros; omega. Qed. Lemma Zlt_two_power_nat_S : forall (n:nat) (z:Z), (z < two_power_nat (S n))%Z -> (Zmod2 z < two_power_nat n)%Z. Proof. intros n z; rewrite (two_power_nat_S n). generalize (Zmod2_twice z). destruct (Zeven.Zeven_odd_dec z) as [H| H]. rewrite (Zeven_bit_value z H); intros; omega. rewrite (Zodd_bit_value z H); intros; omega. Qed. End Z_BRIC_A_BRAC. Section COHERENT_VALUE. (** We check that the functions are reciprocal on the definition interval. This uses earlier library lemmas. *) Lemma binary_to_Z_to_binary : forall (n:nat) (bv:Bvector n), Z_to_binary n (binary_value n bv) = bv. Proof. induction bv as [| a n bv IHbv]. auto. rewrite binary_value_Sn. rewrite Z_to_binary_Sn. rewrite IHbv; trivial. apply binary_value_pos. Qed. Lemma two_compl_to_Z_to_two_compl : forall (n:nat) (bv:Bvector n) (b:bool), Z_to_two_compl n (two_compl_value n (Bcons b n bv)) = Bcons b n bv. Proof. induction bv as [| a n bv IHbv]; intro b. destruct b; auto. rewrite two_compl_value_Sn. rewrite Z_to_two_compl_Sn. rewrite IHbv; trivial. Qed. Lemma Z_to_binary_to_Z : forall (n:nat) (z:Z), (z >= 0)%Z -> (z < two_power_nat n)%Z -> binary_value n (Z_to_binary n z) = z. Proof. induction n as [| n IHn]. unfold two_power_nat, shift_nat; simpl; intros; omega. intros; rewrite Z_to_binary_Sn_z. rewrite binary_value_Sn. rewrite IHn. apply Z_div2_value; auto. apply Pdiv2; trivial. apply Zdiv2_two_power_nat; trivial. Qed. Lemma Z_to_two_compl_to_Z : forall (n:nat) (z:Z), (z >= - two_power_nat n)%Z -> (z < two_power_nat n)%Z -> two_compl_value n (Z_to_two_compl n z) = z. Proof. induction n as [| n IHn]. unfold two_power_nat, shift_nat; simpl; intros. assert (z = (-1)%Z \/ z = 0%Z). omega. intuition; subst z; trivial. intros; rewrite Z_to_two_compl_Sn_z. rewrite two_compl_value_Sn. rewrite IHn. generalize (Zmod2_twice z); omega. apply Zge_minus_two_power_nat_S; auto. apply Zlt_two_power_nat_S; auto. Qed. End COHERENT_VALUE. coq-8.4pl2/theories/ZArith/Zmin.v0000640000175000001440000000425212010532755015761 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y /\ Z.min x y = y. Proof. Z.swap_greater. rewrite Z.min_comm. destruct (Z.min_spec y x); auto. Qed. Lemma Zmin_irreducible n m : Z.min n m = n \/ Z.min n m = m. Proof. destruct (Z.min_dec n m); auto. Qed. Notation Zmin_or := Zmin_irreducible (compat "8.3"). Lemma Zmin_le_prime_inf n m p : Z.min n m <= p -> {n <= p} + {m <= p}. Proof. apply Z.min_case; auto. Qed. Lemma Zpos_min_1 p : Z.min 1 (Zpos p) = 1. Proof. now destruct p. Qed. coq-8.4pl2/theories/ZArith/vo.itarget0000640000175000001440000000057411736570167016702 0ustar notinusersauxiliary.vo BinIntDef.vo BinInt.vo Int.vo Wf_Z.vo Zabs.vo ZArith_base.vo ZArith_dec.vo ZArith.vo Zdigits.vo Zbool.vo Zcompare.vo Zcomplements.vo Zdiv.vo Zeven.vo Zgcd_alt.vo Zpow_alt.vo Zhints.vo Zlogarithm.vo Zmax.vo Zminmax.vo Zmin.vo Zmisc.vo Znat.vo Znumtheory.vo ZOdiv_def.vo ZOdiv.vo Zquot.vo Zorder.vo Zpow_def.vo Zpower.vo Zpow_facts.vo Zsqrt_compat.vo Zwf.vo Zeuclid.vo coq-8.4pl2/theories/ZArith/Zmax.v0000640000175000001440000000462012010532755015762 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = y /\ Z.max x y = x \/ x < y /\ Z.max x y = y. Proof. Z.swap_greater. destruct (Z.max_spec x y); auto. Qed. Lemma Zmax_left n m : n>=m -> Z.max n m = n. Proof. Z.swap_greater. apply Z.max_l. Qed. Lemma Zpos_max_1 p : Z.max 1 (Z.pos p) = Z.pos p. Proof. now destruct p. Qed. coq-8.4pl2/theories/ZArith/Zpow_facts.v0000640000175000001440000001757012010532755017172 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 < Z.pow_pos x p. Proof. intros. now apply (Z.pow_pos_nonneg x (Zpos p)). Qed. Notation Zpower_1_r := Z.pow_1_r (compat "8.3"). Notation Zpower_1_l := Z.pow_1_l (compat "8.3"). Notation Zpower_0_l := Z.pow_0_l' (compat "8.3"). Notation Zpower_0_r := Z.pow_0_r (compat "8.3"). Notation Zpower_2 := Z.pow_2_r (compat "8.3"). Notation Zpower_gt_0 := Z.pow_pos_nonneg (compat "8.3"). Notation Zpower_ge_0 := Z.pow_nonneg (compat "8.3"). Notation Zpower_Zabs := Z.abs_pow (compat "8.3"). Notation Zpower_Zsucc := Z.pow_succ_r (compat "8.3"). Notation Zpower_mult := Z.pow_mul_r (compat "8.3"). Notation Zpower_le_monotone2 := Z.pow_le_mono_r (compat "8.3"). Theorem Zpower_le_monotone a b c : 0 < a -> 0 <= b <= c -> a^b <= a^c. Proof. intros. now apply Z.pow_le_mono_r. Qed. Theorem Zpower_lt_monotone a b c : 1 < a -> 0 <= b < c -> a^b < a^c. Proof. intros. apply Z.pow_lt_mono_r; auto with zarith. Qed. Theorem Zpower_gt_1 x y : 1 < x -> 0 < y -> 1 < x^y. Proof. apply Z.pow_gt_1. Qed. Theorem Zmult_power p q r : 0 <= r -> (p*q)^r = p^r * q^r. Proof. intros. apply Z.pow_mul_l. Qed. Hint Resolve Z.pow_nonneg Z.pow_pos_nonneg : zarith. Theorem Zpower_le_monotone3 a b c : 0 <= c -> 0 <= a <= b -> a^c <= b^c. Proof. intros. now apply Z.pow_le_mono_l. Qed. Lemma Zpower_le_monotone_inv a b c : 1 < a -> 0 < b -> a^b <= a^c -> b <= c. Proof. intros Ha Hb H. apply (Z.pow_le_mono_r_iff a); trivial. apply Z.lt_le_incl; apply (Z.pow_gt_1 a); trivial. apply Z.lt_le_trans with (a^b); trivial. now apply Z.pow_gt_1. Qed. Notation Zpower_nat_Zpower := Zpower_nat_Zpower (only parsing). Theorem Zpower2_lt_lin n : 0 <= n -> n < 2^n. Proof. intros. now apply Z.pow_gt_lin_r. Qed. Theorem Zpower2_le_lin n : 0 <= n -> n <= 2^n. Proof. intros. apply Z.lt_le_incl. now apply Z.pow_gt_lin_r. Qed. Lemma Zpower2_Psize n p : Zpos p < 2^(Z.of_nat n) <-> (Pos.size_nat p <= n)%nat. Proof. revert p; induction n. destruct p; now split. assert (Hn := Nat2Z.is_nonneg n). destruct p; simpl Pos.size_nat. - specialize IHn with p. rewrite Pos2Z.inj_xI, Nat2Z.inj_succ, Z.pow_succ_r; omega. - specialize IHn with p. rewrite Pos2Z.inj_xO, Nat2Z.inj_succ, Z.pow_succ_r; omega. - split; auto with zarith. intros _. apply Z.pow_gt_1. easy. now rewrite Nat2Z.inj_succ, Z.lt_succ_r. Qed. (** * Z.pow and modulo *) Theorem Zpower_mod p q n : 0 < n -> (p^q) mod n = ((p mod n)^q) mod n. Proof. intros Hn; destruct (Z.le_gt_cases 0 q) as [H1|H1]. - pattern q; apply natlike_ind; trivial. clear q H1. intros q Hq Rec. rewrite !Z.pow_succ_r; trivial. rewrite Z.mul_mod_idemp_l; auto with zarith. rewrite Z.mul_mod, Rec, <- Z.mul_mod; auto with zarith. - rewrite !Z.pow_neg_r; auto with zarith. Qed. (** A direct way to compute Z.pow modulo **) Fixpoint Zpow_mod_pos (a: Z)(m: positive)(n : Z) : Z := match m with | xH => a mod n | xO m' => let z := Zpow_mod_pos a m' n in match z with | 0 => 0 | _ => (z * z) mod n end | xI m' => let z := Zpow_mod_pos a m' n in match z with | 0 => 0 | _ => (z * z * a) mod n end end. Definition Zpow_mod a m n := match m with | 0 => 1 mod n | Zpos p => Zpow_mod_pos a p n | Zneg p => 0 end. Theorem Zpow_mod_pos_correct a m n : n <> 0 -> Zpow_mod_pos a m n = (Z.pow_pos a m) mod n. Proof. intros Hn. induction m. - rewrite Pos.xI_succ_xO at 2. rewrite <- Pos.add_1_r, <- Pos.add_diag. rewrite 2 Zpower_pos_is_exp, Zpower_pos_1_r. rewrite Z.mul_mod, (Z.mul_mod (Z.pow_pos a m)) by trivial. rewrite <- IHm, <- Z.mul_mod by trivial. simpl. now destruct (Zpow_mod_pos a m n). - rewrite <- Pos.add_diag at 2. rewrite Zpower_pos_is_exp. rewrite Z.mul_mod by trivial. rewrite <- IHm. simpl. now destruct (Zpow_mod_pos a m n). - now rewrite Zpower_pos_1_r. Qed. Theorem Zpow_mod_correct a m n : n <> 0 -> Zpow_mod a m n = (a ^ m) mod n. Proof. intros Hn. destruct m; simpl. - trivial. - apply Zpow_mod_pos_correct; auto with zarith. - rewrite Z.mod_0_l; auto with zarith. Qed. (* Complements about power and number theory. *) Lemma Zpower_divide p q : 0 < q -> (p | p ^ q). Proof. exists (p^(q - 1)). rewrite Z.mul_comm, <- Z.pow_succ_r; f_equal; auto with zarith. Qed. Theorem rel_prime_Zpower_r i p q : 0 <= i -> rel_prime p q -> rel_prime p (q^i). Proof. intros Hi Hpq; pattern i; apply natlike_ind; auto with zarith. simpl. apply rel_prime_sym, rel_prime_1. clear i Hi. intros i Hi Rec; rewrite Z.pow_succ_r; auto. apply rel_prime_mult; auto. Qed. Theorem rel_prime_Zpower i j p q : 0 <= i -> 0 <= j -> rel_prime p q -> rel_prime (p^i) (q^j). Proof. intros Hi Hj H. apply rel_prime_Zpower_r; trivial. apply rel_prime_sym. apply rel_prime_Zpower_r; trivial. now apply rel_prime_sym. Qed. Theorem prime_power_prime p q n : 0 <= n -> prime p -> prime q -> (p | q^n) -> p = q. Proof. intros Hn Hp Hq; pattern n; apply natlike_ind; auto; clear n Hn. - simpl; intros. assert (2<=p) by (apply prime_ge_2; auto). assert (p<=1) by (apply Z.divide_pos_le; auto with zarith). omega. - intros n Hn Rec. rewrite Z.pow_succ_r by trivial. intros. assert (2<=p) by (apply prime_ge_2; auto). assert (2<=q) by (apply prime_ge_2; auto). destruct prime_mult with (2 := H); auto. apply prime_div_prime; auto. Qed. Theorem Zdivide_power_2 x p n : 0 <= n -> 0 <= x -> prime p -> (x | p^n) -> exists m, x = p^m. Proof. intros Hn Hx; revert p n Hn. generalize Hx. pattern x; apply Z_lt_induction; auto. clear x Hx; intros x IH Hx p n Hn Hp H. Z.le_elim Hx; subst. apply Z.le_succ_l in Hx; simpl in Hx. Z.le_elim Hx; subst. (* x > 1 *) case (prime_dec x); intros Hpr. exists 1; rewrite Z.pow_1_r; apply prime_power_prime with n; auto. case not_prime_divide with (2 := Hpr); auto. intros p1 ((Hp1, Hpq1),(q1,->)). assert (Hq1 : 0 < q1) by (apply Z.mul_lt_mono_pos_r with p1; auto with zarith). destruct (IH p1) with p n as (r1,Hr1); auto with zarith. transitivity (q1 * p1); trivial. exists q1; auto with zarith. destruct (IH q1) with p n as (r2,Hr2); auto with zarith. split; auto with zarith. rewrite <- (Z.mul_1_r q1) at 1. apply Z.mul_lt_mono_pos_l; auto with zarith. transitivity (q1 * p1); trivial. exists p1; auto with zarith. exists (r2 + r1); subst. symmetry. apply Z.pow_add_r. generalize Hq1; case r2; now auto with zarith. generalize Hp1; case r1; now auto with zarith. (* x = 1 *) exists 0; rewrite Z.pow_0_r; auto. (* x = 0 *) exists n; destruct H; rewrite Z.mul_0_r in H; auto. Qed. (** * Z.square: a direct definition of [z^2] *) Notation Psquare := Pos.square (compat "8.3"). Notation Zsquare := Z.square (compat "8.3"). Notation Psquare_correct := Pos.square_spec (compat "8.3"). Notation Zsquare_correct := Z.square_spec (compat "8.3"). coq-8.4pl2/theories/ZArith/Zlogarithm.v0000640000175000001440000002035612010532755017167 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 (* 1 *) | xO q => Z.succ (log_inf q) (* 2n *) | xI q => Z.succ (log_inf q) (* 2n+1 *) end. Fixpoint log_sup (p:positive) : Z := match p with | xH => 0 (* 1 *) | xO n => Z.succ (log_sup n) (* 2n *) | xI n => Z.succ (Z.succ (log_inf n)) (* 2n+1 *) end. Hint Unfold log_inf log_sup. Lemma Psize_log_inf : forall p, Zpos (Pos.size p) = Z.succ (log_inf p). Proof. induction p; simpl; now rewrite ?Pos2Z.inj_succ, ?IHp. Qed. Lemma Zlog2_log_inf : forall p, Z.log2 (Zpos p) = log_inf p. Proof. unfold Z.log2. destruct p; simpl; trivial; apply Psize_log_inf. Qed. Lemma Zlog2_up_log_sup : forall p, Z.log2_up (Zpos p) = log_sup p. Proof. induction p; simpl. - change (Zpos p~1) with (2*(Zpos p)+1). rewrite Z.log2_up_succ_double, Zlog2_log_inf; try easy. unfold Z.succ. now rewrite !(Z.add_comm _ 1), Z.add_assoc. - change (Zpos p~0) with (2*Zpos p). now rewrite Z.log2_up_double, IHp. - reflexivity. Qed. (** Then we give the specifications of [log_inf] and [log_sup] and prove their validity *) Hint Resolve Z.le_trans: zarith. Theorem log_inf_correct : forall x:positive, 0 <= log_inf x /\ two_p (log_inf x) <= Zpos x < two_p (Z.succ (log_inf x)). Proof. simple induction x; intros; simpl; [ elim H; intros Hp HR; clear H; split; [ auto with zarith | rewrite two_p_S with (x := Z.succ (log_inf p)) by (apply Z.le_le_succ_r; trivial); rewrite two_p_S by trivial; rewrite two_p_S in HR by trivial; rewrite (BinInt.Pos2Z.inj_xI p); omega ] | elim H; intros Hp HR; clear H; split; [ auto with zarith | rewrite two_p_S with (x := Z.succ (log_inf p)) by (apply Z.le_le_succ_r; trivial); rewrite two_p_S by trivial; rewrite two_p_S in HR by trivial; rewrite (BinInt.Pos2Z.inj_xO p); omega ] | unfold two_power_pos; unfold shift_pos; simpl; omega ]. Qed. Definition log_inf_correct1 (p:positive) := proj1 (log_inf_correct p). Definition log_inf_correct2 (p:positive) := proj2 (log_inf_correct p). Opaque log_inf_correct1 log_inf_correct2. Hint Resolve log_inf_correct1 log_inf_correct2: zarith. Lemma log_sup_correct1 : forall p:positive, 0 <= log_sup p. Proof. simple induction p; intros; simpl; auto with zarith. Qed. (** For every [p], either [p] is a power of two and [(log_inf p)=(log_sup p)] either [(log_sup p)=(log_inf p)+1] *) Theorem log_sup_log_inf : forall p:positive, IF Zpos p = two_p (log_inf p) then Zpos p = two_p (log_sup p) else log_sup p = Z.succ (log_inf p). Proof. simple induction p; intros; [ elim H; right; simpl; rewrite (two_p_S (log_inf p0) (log_inf_correct1 p0)); rewrite BinInt.Pos2Z.inj_xI; unfold Z.succ; omega | elim H; clear H; intro Hif; [ left; simpl; rewrite (two_p_S (log_inf p0) (log_inf_correct1 p0)); rewrite (two_p_S (log_sup p0) (log_sup_correct1 p0)); rewrite <- (proj1 Hif); rewrite <- (proj2 Hif); auto | right; simpl; rewrite (two_p_S (log_inf p0) (log_inf_correct1 p0)); rewrite BinInt.Pos2Z.inj_xO; unfold Z.succ; omega ] | left; auto ]. Qed. Theorem log_sup_correct2 : forall x:positive, two_p (Z.pred (log_sup x)) < Zpos x <= two_p (log_sup x). Proof. intro. elim (log_sup_log_inf x). (* x is a power of two and [log_sup = log_inf] *) intros [E1 E2]; rewrite E2. split; [ apply two_p_pred; apply log_sup_correct1 | apply Z.le_refl ]. intros [E1 E2]; rewrite E2. rewrite (Z.pred_succ (log_inf x)). generalize (log_inf_correct2 x); omega. Qed. Lemma log_inf_le_log_sup : forall p:positive, log_inf p <= log_sup p. Proof. simple induction p; simpl; intros; omega. Qed. Lemma log_sup_le_Slog_inf : forall p:positive, log_sup p <= Z.succ (log_inf p). Proof. simple induction p; simpl; intros; omega. Qed. (** Now it's possible to specify and build the [Log] rounded to the nearest *) Fixpoint log_near (x:positive) : Z := match x with | xH => 0 | xO xH => 1 | xI xH => 2 | xO y => Z.succ (log_near y) | xI y => Z.succ (log_near y) end. Theorem log_near_correct1 : forall p:positive, 0 <= log_near p. Proof. simple induction p; simpl; intros; [ elim p0; auto with zarith | elim p0; auto with zarith | trivial with zarith ]. intros; apply Z.le_le_succ_r. generalize H0; now elim p1. intros; apply Z.le_le_succ_r. generalize H0; now elim p1. Qed. Theorem log_near_correct2 : forall p:positive, log_near p = log_inf p \/ log_near p = log_sup p. Proof. simple induction p. intros p0 [Einf| Esup]. simpl. rewrite Einf. case p0; [ left | left | right ]; reflexivity. simpl; rewrite Esup. elim (log_sup_log_inf p0). generalize (log_inf_le_log_sup p0). generalize (log_sup_le_Slog_inf p0). case p0; auto with zarith. intros; omega. case p0; intros; auto with zarith. intros p0 [Einf| Esup]. simpl. repeat rewrite Einf. case p0; intros; auto with zarith. simpl. repeat rewrite Esup. case p0; intros; auto with zarith. auto. Qed. End Log_pos. Section divers. (** Number of significative digits. *) Definition N_digits (x:Z) := match x with | Zpos p => log_inf p | Zneg p => log_inf p | Z0 => 0 end. Lemma ZERO_le_N_digits : forall x:Z, 0 <= N_digits x. Proof. simple induction x; simpl; [ apply Z.le_refl | exact log_inf_correct1 | exact log_inf_correct1 ]. Qed. Lemma log_inf_shift_nat : forall n:nat, log_inf (shift_nat n 1) = Z.of_nat n. Proof. simple induction n; intros; [ try trivial | rewrite Nat2Z.inj_succ; rewrite <- H; reflexivity ]. Qed. Lemma log_sup_shift_nat : forall n:nat, log_sup (shift_nat n 1) = Z.of_nat n. Proof. simple induction n; intros; [ try trivial | rewrite Nat2Z.inj_succ; rewrite <- H; reflexivity ]. Qed. (** [Is_power p] means that p is a power of two *) Fixpoint Is_power (p:positive) : Prop := match p with | xH => True | xO q => Is_power q | xI q => False end. Lemma Is_power_correct : forall p:positive, Is_power p <-> (exists y : nat, p = shift_nat y 1). Proof. split; [ elim p; [ simpl; tauto | simpl; intros; generalize (H H0); intro H1; elim H1; intros y0 Hy0; exists (S y0); rewrite Hy0; reflexivity | intro; exists 0%nat; reflexivity ] | intros; elim H; intros; rewrite H0; elim x; intros; simpl; trivial ]. Qed. Lemma Is_power_or : forall p:positive, Is_power p \/ ~ Is_power p. Proof. simple induction p; [ intros; right; simpl; tauto | intros; elim H; [ intros; left; simpl; exact H0 | intros; right; simpl; exact H0 ] | left; simpl; trivial ]. Qed. End divers. coq-8.4pl2/theories/ZArith/BinInt.v0000640000175000001440000014537712010532755016245 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Gt. Definition ge x y := (x ?= y) <> Lt. Infix "<=" := le : Z_scope. Infix "<" := lt : Z_scope. Infix ">=" := ge : Z_scope. Infix ">" := gt : Z_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : Z_scope. Notation "x <= y < z" := (x <= y /\ y < z) : Z_scope. Notation "x < y < z" := (x < y /\ y < z) : Z_scope. Notation "x < y <= z" := (x < y /\ y <= z) : Z_scope. Definition divide x y := exists z, y = z*x. Notation "( x | y )" := (divide x y) (at level 0). Definition Even a := exists b, a = 2*b. Definition Odd a := exists b, a = 2*b+1. (** * Decidability of equality. *) Definition eq_dec (x y : Z) : {x = y} + {x <> y}. Proof. decide equality; apply Pos.eq_dec. Defined. (** * Properties of [pos_sub] *) (** [pos_sub] can be written in term of positive comparison and subtraction (cf. earlier definition of addition of Z) *) Lemma pos_sub_spec p q : pos_sub p q = match (p ?= q)%positive with | Eq => 0 | Lt => neg (q - p) | Gt => pos (p - q) end. Proof. revert q. induction p; destruct q; simpl; trivial; rewrite ?Pos.compare_xI_xI, ?Pos.compare_xO_xI, ?Pos.compare_xI_xO, ?Pos.compare_xO_xO, IHp; simpl; case Pos.compare_spec; intros; simpl; trivial; (now rewrite Pos.sub_xI_xI) || (now rewrite Pos.sub_xO_xO) || (now rewrite Pos.sub_xO_xI) || (now rewrite Pos.sub_xI_xO) || subst; unfold Pos.sub; simpl; now rewrite Pos.sub_mask_diag. Qed. Lemma pos_sub_discr p q : match pos_sub p q with | Z0 => p = q | pos k => p = q + k | neg k => q = p + k end%positive. Proof. rewrite pos_sub_spec. case Pos.compare_spec; auto; intros; now rewrite Pos.add_comm, Pos.sub_add. Qed. (** Particular cases of the previous result *) Lemma pos_sub_diag p : pos_sub p p = 0. Proof. now rewrite pos_sub_spec, Pos.compare_refl. Qed. Lemma pos_sub_lt p q : (p < q)%positive -> pos_sub p q = neg (q - p). Proof. intros H. now rewrite pos_sub_spec, H. Qed. Lemma pos_sub_gt p q : (q < p)%positive -> pos_sub p q = pos (p - q). Proof. intros H. now rewrite pos_sub_spec, Pos.compare_antisym, H. Qed. (** The opposite of [pos_sub] is [pos_sub] with reversed arguments *) Lemma pos_sub_opp p q : - pos_sub p q = pos_sub q p. Proof. revert q; induction p; destruct q; simpl; trivial; rewrite <- IHp; now destruct pos_sub. Qed. (** In the following module, we group results that are needed now to prove specifications of operations, but will also be provided later by the generic functor of properties. *) Module Import Private_BootStrap. (** * Properties of addition *) (** ** Zero is neutral for addition *) Lemma add_0_r n : n + 0 = n. Proof. now destruct n. Qed. (** ** Addition is commutative *) Lemma add_comm n m : n + m = m + n. Proof. destruct n, m; simpl; trivial; now rewrite Pos.add_comm. Qed. (** ** Opposite distributes over addition *) Lemma opp_add_distr n m : - (n + m) = - n + - m. Proof. destruct n, m; simpl; trivial using pos_sub_opp. Qed. (** ** Opposite is injective *) Lemma opp_inj n m : -n = -m -> n = m. Proof. destruct n, m; simpl; intros H; destr_eq H; now f_equal. Qed. (** ** Addition is associative *) Lemma pos_sub_add p q r : pos_sub (p + q) r = pos p + pos_sub q r. Proof. simpl. rewrite !pos_sub_spec. case (Pos.compare_spec q r); intros E0. - (* q = r *) subst. assert (H := Pos.lt_add_r r p). rewrite Pos.add_comm in H. apply Pos.lt_gt in H. now rewrite H, Pos.add_sub. - (* q < r *) rewrite pos_sub_spec. assert (Hr : (r = (r-q)+q)%positive) by (now rewrite Pos.sub_add). rewrite Hr at 1. rewrite Pos.add_compare_mono_r. case Pos.compare_spec; intros E1; trivial; f_equal. rewrite Pos.add_comm. apply Pos.sub_add_distr. rewrite Hr, Pos.add_comm. now apply Pos.add_lt_mono_r. symmetry. apply Pos.sub_sub_distr; trivial. - (* r < q *) assert (LT : (r < p + q)%positive). { apply Pos.lt_trans with q; trivial. rewrite Pos.add_comm. apply Pos.lt_add_r. } apply Pos.lt_gt in LT. rewrite LT. f_equal. symmetry. now apply Pos.add_sub_assoc. Qed. Lemma add_assoc n m p : n + (m + p) = n + m + p. Proof. assert (AUX : forall x y z, pos x + (y + z) = pos x + y + z). { intros x [|y|y] [|z|z]; rewrite ?add_0_r; trivial. - simpl. now rewrite Pos.add_assoc. - simpl (_ + neg _). symmetry. apply pos_sub_add. - simpl (neg _ + _); simpl (_ + neg _). now rewrite (add_comm _ (pos _)), <- 2 pos_sub_add, Pos.add_comm. - apply opp_inj. rewrite !opp_add_distr. simpl opp. simpl (neg _ + _); simpl (_ + neg _). rewrite add_comm, Pos.add_comm. apply pos_sub_add. } destruct n. - trivial. - apply AUX. - apply opp_inj. rewrite !opp_add_distr. simpl opp. apply AUX. Qed. (** ** Subtraction and successor *) Lemma sub_succ_l n m : succ n - m = succ (n - m). Proof. unfold sub, succ. now rewrite <- 2 add_assoc, (add_comm 1). Qed. (** ** Opposite is inverse for addition *) Lemma add_opp_diag_r n : n + - n = 0. Proof. destruct n; simpl; trivial; now rewrite pos_sub_diag. Qed. Lemma add_opp_diag_l n : - n + n = 0. Proof. rewrite add_comm. apply add_opp_diag_r. Qed. (** ** Commutativity of multiplication *) Lemma mul_comm n m : n * m = m * n. Proof. destruct n, m; simpl; trivial; f_equal; apply Pos.mul_comm. Qed. (** ** Associativity of multiplication *) Lemma mul_assoc n m p : n * (m * p) = n * m * p. Proof. destruct n, m, p; simpl; trivial; f_equal; apply Pos.mul_assoc. Qed. (** Multiplication and constants *) Lemma mul_1_l n : 1 * n = n. Proof. now destruct n. Qed. Lemma mul_1_r n : n * 1 = n. Proof. destruct n; simpl; now rewrite ?Pos.mul_1_r. Qed. (** ** Multiplication and Opposite *) Lemma mul_opp_l n m : - n * m = - (n * m). Proof. now destruct n, m. Qed. Lemma mul_opp_r n m : n * - m = - (n * m). Proof. now destruct n, m. Qed. Lemma mul_opp_opp n m : - n * - m = n * m. Proof. now destruct n, m. Qed. Lemma mul_opp_comm n m : - n * m = n * - m. Proof. now destruct n, m. Qed. (** ** Distributivity of multiplication over addition *) Lemma mul_add_distr_pos (p:positive) n m : pos p * (n + m) = pos p * n + pos p * m. Proof. destruct n as [|n|n], m as [|m|m]; simpl; trivial; rewrite ?pos_sub_spec, ?Pos.mul_compare_mono_l; try case Pos.compare_spec; intros; now rewrite ?Pos.mul_add_distr_l, ?Pos.mul_sub_distr_l. Qed. Lemma mul_add_distr_l n m p : n * (m + p) = n * m + n * p. Proof. destruct n as [|n|n]. trivial. apply mul_add_distr_pos. change (neg n) with (- pos n). rewrite !mul_opp_l, <- opp_add_distr. f_equal. apply mul_add_distr_pos. Qed. Lemma mul_add_distr_r n m p : (n + m) * p = n * p + m * p. Proof. rewrite !(mul_comm _ p). apply mul_add_distr_l. Qed. (** ** Basic properties of divisibility *) Lemma divide_Zpos p q : (pos p|pos q) <-> (p|q)%positive. Proof. split. intros ([ |r|r],H); simpl in *; destr_eq H. exists r; auto. intros (r,H). exists (pos r); simpl; now f_equal. Qed. Lemma divide_Zpos_Zneg_r n p : (n|pos p) <-> (n|neg p). Proof. split; intros (m,H); exists (-m); now rewrite mul_opp_l, <- H. Qed. Lemma divide_Zpos_Zneg_l n p : (pos p|n) <-> (neg p|n). Proof. split; intros (m,H); exists (-m); now rewrite mul_opp_l, <- mul_opp_r. Qed. (** ** Conversions between [Z.testbit] and [N.testbit] *) Lemma testbit_of_N a n : testbit (of_N a) (of_N n) = N.testbit a n. Proof. destruct a as [|a], n; simpl; trivial. now destruct a. Qed. Lemma testbit_of_N' a n : 0<=n -> testbit (of_N a) n = N.testbit a (to_N n). Proof. intro Hn. rewrite <- testbit_of_N. f_equal. destruct n; trivial; now destruct Hn. Qed. Lemma testbit_Zpos a n : 0<=n -> testbit (pos a) n = N.testbit (N.pos a) (to_N n). Proof. intro Hn. now rewrite <- testbit_of_N'. Qed. Lemma testbit_Zneg a n : 0<=n -> testbit (neg a) n = negb (N.testbit (Pos.pred_N a) (to_N n)). Proof. intro Hn. rewrite <- testbit_of_N' by trivial. destruct n as [ |n|n]; [ | simpl; now destruct (Pos.pred_N a) | now destruct Hn]. unfold testbit. now destruct a as [|[ | | ]| ]. Qed. End Private_BootStrap. (** * Proofs of specifications *) (** ** Specification of constants *) Lemma one_succ : 1 = succ 0. Proof. reflexivity. Qed. Lemma two_succ : 2 = succ 1. Proof. reflexivity. Qed. (** ** Specification of addition *) Lemma add_0_l n : 0 + n = n. Proof. now destruct n. Qed. Lemma add_succ_l n m : succ n + m = succ (n + m). Proof. unfold succ. now rewrite 2 (add_comm _ 1), add_assoc. Qed. (** ** Specification of opposite *) Lemma opp_0 : -0 = 0. Proof. reflexivity. Qed. Lemma opp_succ n : -(succ n) = pred (-n). Proof. unfold succ, pred. apply opp_add_distr. Qed. (** ** Specification of successor and predecessor *) Lemma succ_pred n : succ (pred n) = n. Proof. unfold succ, pred. now rewrite <- add_assoc, add_opp_diag_r, add_0_r. Qed. Lemma pred_succ n : pred (succ n) = n. Proof. unfold succ, pred. now rewrite <- add_assoc, add_opp_diag_r, add_0_r. Qed. (** ** Specification of subtraction *) Lemma sub_0_r n : n - 0 = n. Proof. apply add_0_r. Qed. Lemma sub_succ_r n m : n - succ m = pred (n - m). Proof. unfold sub, succ, pred. now rewrite opp_add_distr, add_assoc. Qed. (** ** Specification of multiplication *) Lemma mul_0_l n : 0 * n = 0. Proof. reflexivity. Qed. Lemma mul_succ_l n m : succ n * m = n * m + m. Proof. unfold succ. now rewrite mul_add_distr_r, mul_1_l. Qed. (** ** Specification of comparisons and order *) Lemma eqb_eq n m : (n =? m) = true <-> n = m. Proof. destruct n, m; simpl; try (now split); rewrite Pos.eqb_eq; split; (now injection 1) || (intros; now f_equal). Qed. Lemma ltb_lt n m : (n n < m. Proof. unfold ltb, lt. destruct compare; easy'. Qed. Lemma leb_le n m : (n <=? m) = true <-> n <= m. Proof. unfold leb, le. destruct compare; easy'. Qed. Lemma compare_eq_iff n m : (n ?= m) = Eq <-> n = m. Proof. destruct n, m; simpl; rewrite ?CompOpp_iff, ?Pos.compare_eq_iff; split; congruence. Qed. Lemma compare_sub n m : (n ?= m) = (n - m ?= 0). Proof. destruct n as [|n|n], m as [|m|m]; simpl; trivial; rewrite <- ? Pos.compare_antisym, ?pos_sub_spec; case Pos.compare_spec; trivial. Qed. Lemma compare_antisym n m : (m ?= n) = CompOpp (n ?= m). Proof. destruct n, m; simpl; trivial; now rewrite <- ?Pos.compare_antisym. Qed. Lemma compare_lt_iff n m : (n ?= m) = Lt <-> n < m. Proof. reflexivity. Qed. Lemma compare_le_iff n m : (n ?= m) <> Gt <-> n <= m. Proof. reflexivity. Qed. (** Some more advanced properties of comparison and orders, including [compare_spec] and [lt_irrefl] and [lt_eq_cases]. *) Include BoolOrderFacts. (** Remaining specification of [lt] and [le] *) Lemma lt_succ_r n m : n < succ m <-> n<=m. Proof. unfold lt, le. rewrite compare_sub, sub_succ_r. rewrite (compare_sub n m). destruct (n-m) as [|[ | | ]|]; easy'. Qed. (** ** Specification of minimum and maximum *) Lemma max_l n m : m<=n -> max n m = n. Proof. unfold le, max. rewrite (compare_antisym n m). case compare; intuition. Qed. Lemma max_r n m : n<=m -> max n m = m. Proof. unfold le, max. case compare_spec; intuition. Qed. Lemma min_l n m : n<=m -> min n m = n. Proof. unfold le, min. case compare_spec; intuition. Qed. Lemma min_r n m : m<=n -> min n m = m. Proof. unfold le, min. rewrite (compare_antisym n m). case compare_spec; intuition. Qed. (** ** Specification of absolute value *) Lemma abs_eq n : 0 <= n -> abs n = n. Proof. destruct n; trivial. now destruct 1. Qed. Lemma abs_neq n : n <= 0 -> abs n = - n. Proof. destruct n; trivial. now destruct 1. Qed. (** ** Specification of sign *) Lemma sgn_null n : n = 0 -> sgn n = 0. Proof. intros. now subst. Qed. Lemma sgn_pos n : 0 < n -> sgn n = 1. Proof. now destruct n. Qed. Lemma sgn_neg n : n < 0 -> sgn n = -1. Proof. now destruct n. Qed. (** ** Specification of power *) Lemma pow_0_r n : n^0 = 1. Proof. reflexivity. Qed. Lemma pow_succ_r n m : 0<=m -> n^(succ m) = n * n^m. Proof. destruct m as [|m|m]; (now destruct 1) || (intros _); simpl; trivial. unfold pow_pos. now rewrite Pos.add_comm, Pos.iter_add. Qed. Lemma pow_neg_r n m : m<0 -> n^m = 0. Proof. now destruct m. Qed. (** For folding back a [pow_pos] into a [pow] *) Lemma pow_pos_fold n p : pow_pos n p = n ^ (pos p). Proof. reflexivity. Qed. (** ** Specification of square *) Lemma square_spec n : square n = n * n. Proof. destruct n; trivial; simpl; f_equal; apply Pos.square_spec. Qed. (** ** Specification of square root *) Lemma sqrtrem_spec n : 0<=n -> let (s,r) := sqrtrem n in n = s*s + r /\ 0 <= r <= 2*s. Proof. destruct n. now repeat split. generalize (Pos.sqrtrem_spec p). simpl. destruct 1; simpl; subst; now repeat split. now destruct 1. Qed. Lemma sqrt_spec n : 0<=n -> let s := sqrt n in s*s <= n < (succ s)*(succ s). Proof. destruct n. now repeat split. unfold sqrt. intros _. simpl succ. rewrite Pos.add_1_r. apply (Pos.sqrt_spec p). now destruct 1. Qed. Lemma sqrt_neg n : n<0 -> sqrt n = 0. Proof. now destruct n. Qed. Lemma sqrtrem_sqrt n : fst (sqrtrem n) = sqrt n. Proof. destruct n; try reflexivity. unfold sqrtrem, sqrt, Pos.sqrt. destruct (Pos.sqrtrem p) as (s,r). now destruct r. Qed. (** ** Specification of logarithm *) Lemma log2_spec n : 0 < n -> 2^(log2 n) <= n < 2^(succ (log2 n)). Proof. assert (Pow : forall p q, pos (p^q) = (pos p)^(pos q)). { intros. now apply Pos.iter_swap_gen. } destruct n as [|[p|p|]|]; intros Hn; split; try easy; unfold log2; simpl succ; rewrite ?Pos.add_1_r, <- Pow. change (2^Pos.size p <= Pos.succ (p~0))%positive. apply Pos.lt_le_incl, Pos.lt_succ_r, Pos.size_le. apply Pos.size_gt. apply Pos.size_le. apply Pos.size_gt. Qed. Lemma log2_nonpos n : n<=0 -> log2 n = 0. Proof. destruct n as [|p|p]; trivial; now destruct 1. Qed. (** Specification of parity functions *) Lemma even_spec n : even n = true <-> Even n. Proof. split. exists (div2 n). now destruct n as [|[ | | ]|[ | | ]]. intros (m,->). now destruct m. Qed. Lemma odd_spec n : odd n = true <-> Odd n. Proof. split. exists (div2 n). destruct n as [|[ | | ]|[ | | ]]; simpl; try easy. now rewrite Pos.pred_double_succ. intros (m,->). now destruct m as [|[ | | ]|[ | | ]]. Qed. (** ** Multiplication and Doubling *) Lemma double_spec n : double n = 2*n. Proof. reflexivity. Qed. Lemma succ_double_spec n : succ_double n = 2*n + 1. Proof. now destruct n. Qed. Lemma pred_double_spec n : pred_double n = 2*n - 1. Proof. now destruct n. Qed. (** ** Correctness proofs for Trunc division *) Lemma pos_div_eucl_eq a b : 0 < b -> let (q, r) := pos_div_eucl a b in pos a = q * b + r. Proof. intros Hb. induction a; unfold pos_div_eucl; fold pos_div_eucl. - (* ~1 *) destruct pos_div_eucl as (q,r). change (pos a~1) with (2*(pos a)+1). rewrite IHa, mul_add_distr_l, mul_assoc. destruct ltb. now rewrite add_assoc. rewrite mul_add_distr_r, mul_1_l, <- !add_assoc. f_equal. unfold sub. now rewrite (add_comm _ (-b)), add_assoc, add_opp_diag_r. - (* ~0 *) destruct pos_div_eucl as (q,r). change (pos a~0) with (2*pos a). rewrite IHa, mul_add_distr_l, mul_assoc. destruct ltb. trivial. rewrite mul_add_distr_r, mul_1_l, <- !add_assoc. f_equal. unfold sub. now rewrite (add_comm _ (-b)), add_assoc, add_opp_diag_r. - (* 1 *) case leb_spec; trivial. intros Hb'. destruct b as [|b|b]; try easy; clear Hb. replace b with 1%positive; trivial. apply Pos.le_antisym. apply Pos.le_1_l. now apply Pos.lt_succ_r. Qed. Lemma div_eucl_eq a b : b<>0 -> let (q, r) := div_eucl a b in a = b * q + r. Proof. destruct a as [ |a|a], b as [ |b|b]; unfold div_eucl; trivial; (now destruct 1) || intros _; generalize (pos_div_eucl_eq a (pos b) (eq_refl _)); destruct pos_div_eucl as (q,r); rewrite mul_comm. - (* pos pos *) trivial. - (* pos neg *) intros ->. destruct r as [ |r|r]; rewrite <- !mul_opp_comm; trivial; rewrite mul_add_distr_l, mul_1_r, <- add_assoc; f_equal; now rewrite add_assoc, add_opp_diag_r. - (* neg pos *) change (neg a) with (- pos a). intros ->. rewrite (opp_add_distr _ r), <- mul_opp_r. destruct r as [ |r|r]; trivial; rewrite opp_add_distr, mul_add_distr_l, <- add_assoc; f_equal; unfold sub; now rewrite add_assoc, mul_opp_r, mul_1_r, add_opp_diag_l. - (* neg neg *) change (neg a) with (- pos a). intros ->. now rewrite opp_add_distr, <- mul_opp_l. Qed. Lemma div_mod a b : b<>0 -> a = b*(a/b) + (a mod b). Proof. intros Hb. generalize (div_eucl_eq a b Hb). unfold div, modulo. now destruct div_eucl. Qed. Lemma pos_div_eucl_bound a b : 0 0 <= snd (pos_div_eucl a b) < b. Proof. assert (AUX : forall m p, m < pos (p~0) -> m - pos p < pos p). intros m p. unfold lt. rewrite (compare_sub m), (compare_sub _ (pos _)). unfold sub. rewrite <- add_assoc. simpl opp; simpl (neg _ + _). now rewrite Pos.add_diag. intros Hb. destruct b as [|b|b]; discriminate Hb || clear Hb. induction a; unfold pos_div_eucl; fold pos_div_eucl. (* ~1 *) destruct pos_div_eucl as (q,r). simpl in IHa; destruct IHa as (Hr,Hr'). case ltb_spec; intros H; unfold snd. split; trivial. now destruct r. split. unfold le. now rewrite compare_antisym, <- compare_sub, <- compare_antisym. apply AUX. rewrite <- succ_double_spec. destruct r; try easy. unfold lt in *; simpl in *. now rewrite Pos.compare_xI_xO, Hr'. (* ~0 *) destruct pos_div_eucl as (q,r). simpl in IHa; destruct IHa as (Hr,Hr'). case ltb_spec; intros H; unfold snd. split; trivial. now destruct r. split. unfold le. now rewrite compare_antisym, <- compare_sub, <- compare_antisym. apply AUX. destruct r; try easy. (* 1 *) case leb_spec; intros H; simpl; split; try easy. red; simpl. now apply Pos.le_succ_l. Qed. Lemma mod_pos_bound a b : 0 < b -> 0 <= a mod b < b. Proof. destruct b as [|b|b]; try easy; intros _. destruct a as [|a|a]; unfold modulo, div_eucl. now split. now apply pos_div_eucl_bound. generalize (pos_div_eucl_bound a (pos b) (eq_refl _)). destruct pos_div_eucl as (q,r); unfold snd; intros (Hr,Hr'). destruct r as [|r|r]; (now destruct Hr) || clear Hr. now split. split. unfold le. now rewrite compare_antisym, <- compare_sub, <- compare_antisym, Hr'. unfold lt in *; simpl in *. rewrite pos_sub_gt by trivial. simpl. now apply Pos.sub_decr. Qed. Definition mod_bound_pos a b (_:0<=a) := mod_pos_bound a b. Lemma mod_neg_bound a b : b < 0 -> b < a mod b <= 0. Proof. destruct b as [|b|b]; try easy; intros _. destruct a as [|a|a]; unfold modulo, div_eucl. now split. generalize (pos_div_eucl_bound a (pos b) (eq_refl _)). destruct pos_div_eucl as (q,r); unfold snd; intros (Hr,Hr'). destruct r as [|r|r]; (now destruct Hr) || clear Hr. now split. split. unfold lt in *; simpl in *. rewrite pos_sub_lt by trivial. rewrite <- Pos.compare_antisym. now apply Pos.sub_decr. change (neg b - neg r <= 0). unfold le, lt in *. rewrite <- compare_sub. simpl in *. now rewrite <- Pos.compare_antisym, Hr'. generalize (pos_div_eucl_bound a (pos b) (eq_refl _)). destruct pos_div_eucl as (q,r); unfold snd; intros (Hr,Hr'). split; destruct r; try easy. red; simpl; now rewrite <- Pos.compare_antisym. Qed. (** ** Correctness proofs for Floor division *) Theorem quotrem_eq a b : let (q,r) := quotrem a b in a = q * b + r. Proof. destruct a as [|a|a], b as [|b|b]; simpl; trivial; generalize (N.pos_div_eucl_spec a (N.pos b)); case N.pos_div_eucl; trivial; intros q r; try change (neg a) with (-pos a); change (pos a) with (of_N (N.pos a)); intros ->; now destruct q, r. Qed. Lemma quot_rem' a b : a = b*(a÷b) + rem a b. Proof. rewrite mul_comm. generalize (quotrem_eq a b). unfold quot, rem. now destruct quotrem. Qed. Lemma quot_rem a b : b<>0 -> a = b*(a÷b) + rem a b. Proof. intros _. apply quot_rem'. Qed. Lemma rem_bound_pos a b : 0<=a -> 0 0 <= rem a b < b. Proof. intros Ha Hb. destruct b as [|b|b]; (now discriminate Hb) || clear Hb; destruct a as [|a|a]; (now destruct Ha) || clear Ha. compute. now split. unfold rem, quotrem. assert (H := N.pos_div_eucl_remainder a (N.pos b)). destruct N.pos_div_eucl as (q,[|r]); simpl; split; try easy. now apply H. Qed. Lemma rem_opp_l' a b : rem (-a) b = - (rem a b). Proof. destruct a, b; trivial; unfold rem; simpl; now destruct N.pos_div_eucl as (q,[|r]). Qed. Lemma rem_opp_r' a b : rem a (-b) = rem a b. Proof. destruct a, b; trivial; unfold rem; simpl; now destruct N.pos_div_eucl as (q,[|r]). Qed. Lemma rem_opp_l a b : b<>0 -> rem (-a) b = - (rem a b). Proof. intros _. apply rem_opp_l'. Qed. Lemma rem_opp_r a b : b<>0 -> rem a (-b) = rem a b. Proof. intros _. apply rem_opp_r'. Qed. (** ** Correctness proofs for gcd *) Lemma ggcd_gcd a b : fst (ggcd a b) = gcd a b. Proof. destruct a as [ |p|p], b as [ |q|q]; simpl; auto; generalize (Pos.ggcd_gcd p q); destruct Pos.ggcd as (g,(aa,bb)); simpl; congruence. Qed. Lemma ggcd_correct_divisors a b : let '(g,(aa,bb)) := ggcd a b in a = g*aa /\ b = g*bb. Proof. destruct a as [ |p|p], b as [ |q|q]; simpl; rewrite ?Pos.mul_1_r; auto; generalize (Pos.ggcd_correct_divisors p q); destruct Pos.ggcd as (g,(aa,bb)); simpl; destruct 1; now subst. Qed. Lemma gcd_divide_l a b : (gcd a b | a). Proof. rewrite <- ggcd_gcd. generalize (ggcd_correct_divisors a b). destruct ggcd as (g,(aa,bb)); simpl. intros (H,_). exists aa. now rewrite mul_comm. Qed. Lemma gcd_divide_r a b : (gcd a b | b). Proof. rewrite <- ggcd_gcd. generalize (ggcd_correct_divisors a b). destruct ggcd as (g,(aa,bb)); simpl. intros (_,H). exists bb. now rewrite mul_comm. Qed. Lemma gcd_greatest a b c : (c|a) -> (c|b) -> (c | gcd a b). Proof. assert (H : forall p q r, (r|pos p) -> (r|pos q) -> (r|pos (Pos.gcd p q))). { intros p q [|r|r] H H'. destruct H; now rewrite mul_comm in *. apply divide_Zpos, Pos.gcd_greatest; now apply divide_Zpos. apply divide_Zpos_Zneg_l, divide_Zpos, Pos.gcd_greatest; now apply divide_Zpos, divide_Zpos_Zneg_l. } destruct a, b; simpl; auto; intros; try apply H; trivial; now apply divide_Zpos_Zneg_r. Qed. Lemma gcd_nonneg a b : 0 <= gcd a b. Proof. now destruct a, b. Qed. (** ggcd and opp : an auxiliary result used in QArith *) Theorem ggcd_opp a b : ggcd (-a) b = (let '(g,(aa,bb)) := ggcd a b in (g,(-aa,bb))). Proof. destruct a as [|a|a], b as [|b|b]; unfold ggcd, opp; auto; destruct (Pos.ggcd a b) as (g,(aa,bb)); auto. Qed. (** ** Proofs of specifications for bitwise operations *) Lemma div2_spec a : div2 a = shiftr a 1. Proof. reflexivity. Qed. Lemma testbit_0_l n : testbit 0 n = false. Proof. now destruct n. Qed. Lemma testbit_neg_r a n : n<0 -> testbit a n = false. Proof. now destruct n. Qed. Lemma testbit_odd_0 a : testbit (2*a+1) 0 = true. Proof. now destruct a as [|a|[a|a|]]. Qed. Lemma testbit_even_0 a : testbit (2*a) 0 = false. Proof. now destruct a. Qed. Lemma testbit_odd_succ a n : 0<=n -> testbit (2*a+1) (succ n) = testbit a n. Proof. destruct n as [|n|n]; (now destruct 1) || intros _. destruct a as [|[a|a|]|[a|a|]]; simpl; trivial. now destruct a. unfold testbit; simpl. destruct a as [|a|[a|a|]]; simpl; trivial; rewrite ?Pos.add_1_r, ?Pos.pred_N_succ; now destruct n. Qed. Lemma testbit_even_succ a n : 0<=n -> testbit (2*a) (succ n) = testbit a n. Proof. destruct n as [|n|n]; (now destruct 1) || intros _. destruct a as [|[a|a|]|[a|a|]]; simpl; trivial. now destruct a. unfold testbit; simpl. destruct a as [|a|[a|a|]]; simpl; trivial; rewrite ?Pos.add_1_r, ?Pos.pred_N_succ; now destruct n. Qed. (** Correctness proofs about [Z.shiftr] and [Z.shiftl] *) Lemma shiftr_spec_aux a n m : 0<=n -> 0<=m -> testbit (shiftr a n) m = testbit a (m+n). Proof. intros Hn Hm. unfold shiftr. destruct n as [ |n|n]; (now destruct Hn) || clear Hn; simpl. now rewrite add_0_r. assert (forall p, to_N (m + pos p) = (to_N m + N.pos p)%N). destruct m; trivial; now destruct Hm. assert (forall p, 0 <= m + pos p). destruct m; easy || now destruct Hm. destruct a as [ |a|a]. (* a = 0 *) replace (Pos.iter n div2 0) with 0 by (apply Pos.iter_invariant; intros; subst; trivial). now rewrite 2 testbit_0_l. (* a > 0 *) change (pos a) with (of_N (N.pos a)) at 1. rewrite <- (Pos.iter_swap_gen _ _ _ N.div2) by now intros [|[ | | ]]. rewrite testbit_Zpos, testbit_of_N', H; trivial. exact (N.shiftr_spec' (N.pos a) (N.pos n) (to_N m)). (* a < 0 *) rewrite <- (Pos.iter_swap_gen _ _ _ Pos.div2_up) by trivial. rewrite 2 testbit_Zneg, H; trivial. f_equal. rewrite (Pos.iter_swap_gen _ _ _ _ N.div2) by exact N.pred_div2_up. exact (N.shiftr_spec' (Pos.pred_N a) (N.pos n) (to_N m)). Qed. Lemma shiftl_spec_low a n m : m testbit (shiftl a n) m = false. Proof. intros H. destruct n as [|n|n], m as [|m|m]; try easy; simpl shiftl. destruct (Pos.succ_pred_or n) as [-> | <-]; rewrite ?Pos.iter_succ; apply testbit_even_0. destruct a as [ |a|a]. (* a = 0 *) replace (Pos.iter n (mul 2) 0) with 0 by (apply Pos.iter_invariant; intros; subst; trivial). apply testbit_0_l. (* a > 0 *) rewrite <- (Pos.iter_swap_gen _ _ _ xO) by trivial. rewrite testbit_Zpos by easy. exact (N.shiftl_spec_low (N.pos a) (N.pos n) (N.pos m) H). (* a < 0 *) rewrite <- (Pos.iter_swap_gen _ _ _ xO) by trivial. rewrite testbit_Zneg by easy. now rewrite (N.pos_pred_shiftl_low a (N.pos n)). Qed. Lemma shiftl_spec_high a n m : 0<=m -> n<=m -> testbit (shiftl a n) m = testbit a (m-n). Proof. intros Hm H. destruct n as [ |n|n]. simpl. now rewrite sub_0_r. (* n > 0 *) destruct m as [ |m|m]; try (now destruct H). assert (0 <= pos m - pos n). red. now rewrite compare_antisym, <- compare_sub, <- compare_antisym. assert (EQ : to_N (pos m - pos n) = (N.pos m - N.pos n)%N). red in H. simpl in H. simpl to_N. rewrite pos_sub_spec, Pos.compare_antisym. destruct (Pos.compare_spec n m) as [H'|H'|H']; try (now destruct H). subst. now rewrite N.sub_diag. simpl. destruct (Pos.sub_mask_pos' m n H') as (p & -> & <-). f_equal. now rewrite Pos.add_comm, Pos.add_sub. destruct a; unfold shiftl. (* ... a = 0 *) replace (Pos.iter n (mul 2) 0) with 0 by (apply Pos.iter_invariant; intros; subst; trivial). now rewrite 2 testbit_0_l. (* ... a > 0 *) rewrite <- (Pos.iter_swap_gen _ _ _ xO) by trivial. rewrite 2 testbit_Zpos, EQ by easy. exact (N.shiftl_spec_high' (N.pos p) (N.pos n) (N.pos m) H). (* ... a < 0 *) rewrite <- (Pos.iter_swap_gen _ _ _ xO) by trivial. rewrite 2 testbit_Zneg, EQ by easy. f_equal. simpl to_N. rewrite <- N.shiftl_spec_high by easy. now apply (N.pos_pred_shiftl_high p (N.pos n)). (* n < 0 *) unfold sub. simpl. now apply (shiftr_spec_aux a (pos n) m). Qed. Lemma shiftr_spec a n m : 0<=m -> testbit (shiftr a n) m = testbit a (m+n). Proof. intros Hm. destruct (leb_spec 0 n). now apply shiftr_spec_aux. destruct (leb_spec (-n) m) as [LE|GT]. unfold shiftr. rewrite (shiftl_spec_high a (-n) m); trivial. now destruct n. unfold shiftr. rewrite (shiftl_spec_low a (-n) m); trivial. rewrite testbit_neg_r; trivial. red in GT. rewrite compare_sub in GT. now destruct n. Qed. (** Correctness proofs for bitwise operations *) Lemma lor_spec a b n : testbit (lor a b) n = testbit a n || testbit b n. Proof. destruct (leb_spec 0 n) as [Hn|Hn]; [|now rewrite !testbit_neg_r]. destruct a as [ |a|a], b as [ |b|b]; rewrite ?testbit_0_l, ?orb_false_r; trivial; unfold lor; rewrite ?testbit_Zpos, ?testbit_Zneg, ?N.pos_pred_succ by trivial. now rewrite <- N.lor_spec. now rewrite N.ldiff_spec, negb_andb, negb_involutive, orb_comm. now rewrite N.ldiff_spec, negb_andb, negb_involutive. now rewrite N.land_spec, negb_andb. Qed. Lemma land_spec a b n : testbit (land a b) n = testbit a n && testbit b n. Proof. destruct (leb_spec 0 n) as [Hn|Hn]; [|now rewrite !testbit_neg_r]. destruct a as [ |a|a], b as [ |b|b]; rewrite ?testbit_0_l, ?andb_false_r; trivial; unfold land; rewrite ?testbit_Zpos, ?testbit_Zneg, ?testbit_of_N', ?N.pos_pred_succ by trivial. now rewrite <- N.land_spec. now rewrite N.ldiff_spec. now rewrite N.ldiff_spec, andb_comm. now rewrite N.lor_spec, negb_orb. Qed. Lemma ldiff_spec a b n : testbit (ldiff a b) n = testbit a n && negb (testbit b n). Proof. destruct (leb_spec 0 n) as [Hn|Hn]; [|now rewrite !testbit_neg_r]. destruct a as [ |a|a], b as [ |b|b]; rewrite ?testbit_0_l, ?andb_true_r; trivial; unfold ldiff; rewrite ?testbit_Zpos, ?testbit_Zneg, ?testbit_of_N', ?N.pos_pred_succ by trivial. now rewrite <- N.ldiff_spec. now rewrite N.land_spec, negb_involutive. now rewrite N.lor_spec, negb_orb. now rewrite N.ldiff_spec, negb_involutive, andb_comm. Qed. Lemma lxor_spec a b n : testbit (lxor a b) n = xorb (testbit a n) (testbit b n). Proof. destruct (leb_spec 0 n) as [Hn|Hn]; [|now rewrite !testbit_neg_r]. destruct a as [ |a|a], b as [ |b|b]; rewrite ?testbit_0_l, ?xorb_false_l, ?xorb_false_r; trivial; unfold lxor; rewrite ?testbit_Zpos, ?testbit_Zneg, ?testbit_of_N', ?N.pos_pred_succ by trivial. now rewrite <- N.lxor_spec. now rewrite N.lxor_spec, negb_xorb_r. now rewrite N.lxor_spec, negb_xorb_l. now rewrite N.lxor_spec, xorb_negb_negb. Qed. (** ** Induction principles based on successor / predecessor *) Lemma peano_ind (P : Z -> Prop) : P 0 -> (forall x, P x -> P (succ x)) -> (forall x, P x -> P (pred x)) -> forall z, P z. Proof. intros H0 Hs Hp z; destruct z. assumption. induction p using Pos.peano_ind. now apply (Hs 0). rewrite <- Pos.add_1_r. now apply (Hs (pos p)). induction p using Pos.peano_ind. now apply (Hp 0). rewrite <- Pos.add_1_r. now apply (Hp (neg p)). Qed. Lemma bi_induction (P : Z -> Prop) : Proper (eq ==> iff) P -> P 0 -> (forall x, P x <-> P (succ x)) -> forall z, P z. Proof. intros _ H0 Hs. induction z using peano_ind. assumption. now apply -> Hs. apply Hs. now rewrite succ_pred. Qed. (** * Proofs of morphisms, obvious since eq is Leibniz *) Local Obligation Tactic := simpl_relation. Program Definition succ_wd : Proper (eq==>eq) succ := _. Program Definition pred_wd : Proper (eq==>eq) pred := _. Program Definition opp_wd : Proper (eq==>eq) opp := _. Program Definition add_wd : Proper (eq==>eq==>eq) add := _. Program Definition sub_wd : Proper (eq==>eq==>eq) sub := _. Program Definition mul_wd : Proper (eq==>eq==>eq) mul := _. Program Definition lt_wd : Proper (eq==>eq==>iff) lt := _. Program Definition div_wd : Proper (eq==>eq==>eq) div := _. Program Definition mod_wd : Proper (eq==>eq==>eq) modulo := _. Program Definition quot_wd : Proper (eq==>eq==>eq) quot := _. Program Definition rem_wd : Proper (eq==>eq==>eq) rem := _. Program Definition pow_wd : Proper (eq==>eq==>eq) pow := _. Program Definition testbit_wd : Proper (eq==>eq==>Logic.eq) testbit := _. (** The Bind Scope prevents Z to stay associated with abstract_scope. (TODO FIX) *) Include ZProp. Bind Scope Z_scope with Z. Include UsualMinMaxLogicalProperties <+ UsualMinMaxDecProperties. (** In generic statements, the predicates [lt] and [le] have been favored, whereas [gt] and [ge] don't even exist in the abstract layers. The use of [gt] and [ge] is hence not recommended. We provide here the bare minimal results to related them with [lt] and [le]. *) Lemma gt_lt_iff n m : n > m <-> m < n. Proof. unfold lt, gt. now rewrite compare_antisym, CompOpp_iff. Qed. Lemma gt_lt n m : n > m -> m < n. Proof. apply gt_lt_iff. Qed. Lemma lt_gt n m : n < m -> m > n. Proof. apply gt_lt_iff. Qed. Lemma ge_le_iff n m : n >= m <-> m <= n. Proof. unfold le, ge. now rewrite compare_antisym, CompOpp_iff. Qed. Lemma ge_le n m : n >= m -> m <= n. Proof. apply ge_le_iff. Qed. Lemma le_ge n m : n <= m -> m >= n. Proof. apply ge_le_iff. Qed. (** We provide a tactic converting from one style to the other. *) Ltac swap_greater := rewrite ?gt_lt_iff in *; rewrite ?ge_le_iff in *. (** Similarly, the boolean comparisons [ltb] and [leb] are favored over their dual [gtb] and [geb]. We prove here the equivalence and a few minimal results. *) Lemma gtb_ltb n m : (n >? m) = (m =? m) = (m <=? n). Proof. unfold geb, leb. rewrite compare_antisym. now case compare. Qed. Lemma gtb_lt n m : (n >? m) = true <-> m < n. Proof. rewrite gtb_ltb. apply ltb_lt. Qed. Lemma geb_le n m : (n >=? m) = true <-> m <= n. Proof. rewrite geb_leb. apply leb_le. Qed. Lemma gtb_spec n m : BoolSpec (m? m). Proof. rewrite gtb_ltb. apply ltb_spec. Qed. Lemma geb_spec n m : BoolSpec (m<=n) (n=? m). Proof. rewrite geb_leb. apply leb_spec. Qed. (** TODO : to add in Numbers ? *) Lemma add_reg_l n m p : n + m = n + p -> m = p. Proof. exact (proj1 (add_cancel_l m p n)). Qed. Lemma mul_reg_l n m p : p <> 0 -> p * n = p * m -> n = m. Proof. exact (fun Hp => proj1 (mul_cancel_l n m p Hp)). Qed. Lemma mul_reg_r n m p : p <> 0 -> n * p = m * p -> n = m. Proof. exact (fun Hp => proj1 (mul_cancel_r n m p Hp)). Qed. Lemma opp_eq_mul_m1 n : - n = n * -1. Proof. rewrite mul_comm. now destruct n. Qed. Lemma add_diag n : n + n = 2 * n. Proof. change 2 with (1+1). now rewrite mul_add_distr_r, !mul_1_l. Qed. (** * Comparison and opposite *) Lemma compare_opp n m : (- n ?= - m) = (m ?= n). Proof. destruct n, m; simpl; trivial; intros; now rewrite <- Pos.compare_antisym. Qed. (** * Comparison and addition *) Lemma add_compare_mono_l n m p : (n + m ?= n + p) = (m ?= p). Proof. rewrite (compare_sub m p), compare_sub. f_equal. unfold sub. rewrite opp_add_distr, (add_comm n m), add_assoc. f_equal. now rewrite <- add_assoc, add_opp_diag_r, add_0_r. Qed. End Z. (** Re-export Notations *) Infix "+" := Z.add : Z_scope. Notation "- x" := (Z.opp x) : Z_scope. Infix "-" := Z.sub : Z_scope. Infix "*" := Z.mul : Z_scope. Infix "^" := Z.pow : Z_scope. Infix "/" := Z.div : Z_scope. Infix "mod" := Z.modulo (at level 40, no associativity) : Z_scope. Infix "÷" := Z.quot (at level 40, left associativity) : Z_scope. Infix "?=" := Z.compare (at level 70, no associativity) : Z_scope. Infix "=?" := Z.eqb (at level 70, no associativity) : Z_scope. Infix "<=?" := Z.leb (at level 70, no associativity) : Z_scope. Infix "=?" := Z.geb (at level 70, no associativity) : Z_scope. Infix ">?" := Z.gtb (at level 70, no associativity) : Z_scope. Notation "( x | y )" := (Z.divide x y) (at level 0) : Z_scope. Infix "<=" := Z.le : Z_scope. Infix "<" := Z.lt : Z_scope. Infix ">=" := Z.ge : Z_scope. Infix ">" := Z.gt : Z_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : Z_scope. Notation "x <= y < z" := (x <= y /\ y < z) : Z_scope. Notation "x < y < z" := (x < y /\ y < z) : Z_scope. Notation "x < y <= z" := (x < y /\ y <= z) : Z_scope. (** Conversions from / to positive numbers *) Module Pos2Z. Lemma id p : Z.to_pos (Z.pos p) = p. Proof. reflexivity. Qed. Lemma inj p q : Z.pos p = Z.pos q -> p = q. Proof. now injection 1. Qed. Lemma inj_iff p q : Z.pos p = Z.pos q <-> p = q. Proof. split. apply inj. intros; now f_equal. Qed. Lemma is_pos p : 0 < Z.pos p. Proof. reflexivity. Qed. Lemma is_nonneg p : 0 <= Z.pos p. Proof. easy. Qed. Lemma inj_1 : Z.pos 1 = 1. Proof. reflexivity. Qed. Lemma inj_xO p : Z.pos p~0 = 2 * Z.pos p. Proof. reflexivity. Qed. Lemma inj_xI p : Z.pos p~1 = 2 * Z.pos p + 1. Proof. reflexivity. Qed. Lemma inj_succ p : Z.pos (Pos.succ p) = Z.succ (Z.pos p). Proof. simpl. now rewrite Pos.add_1_r. Qed. Lemma inj_add p q : Z.pos (p+q) = Z.pos p + Z.pos q. Proof. reflexivity. Qed. Lemma inj_sub p q : (p < q)%positive -> Z.pos (q-p) = Z.pos q - Z.pos p. Proof. intros. simpl. now rewrite Z.pos_sub_gt. Qed. Lemma inj_sub_max p q : Z.pos (p - q) = Z.max 1 (Z.pos p - Z.pos q). Proof. simpl. rewrite Z.pos_sub_spec. case Pos.compare_spec; intros. - subst; now rewrite Pos.sub_diag. - now rewrite Pos.sub_lt. - now destruct (p-q)%positive. Qed. Lemma inj_pred p : p <> 1%positive -> Z.pos (Pos.pred p) = Z.pred (Z.pos p). Proof. destruct p; easy || now destruct 1. Qed. Lemma inj_mul p q : Z.pos (p*q) = Z.pos p * Z.pos q. Proof. reflexivity. Qed. Lemma inj_pow_pos p q : Z.pos (p^q) = Z.pow_pos (Z.pos p) q. Proof. now apply Pos.iter_swap_gen. Qed. Lemma inj_pow p q : Z.pos (p^q) = (Z.pos p)^(Z.pos q). Proof. apply inj_pow_pos. Qed. Lemma inj_square p : Z.pos (Pos.square p) = Z.square (Z.pos p). Proof. reflexivity. Qed. Lemma inj_compare p q : (p ?= q)%positive = (Z.pos p ?= Z.pos q). Proof. reflexivity. Qed. Lemma inj_leb p q : (p <=? q)%positive = (Z.pos p <=? Z.pos q). Proof. reflexivity. Qed. Lemma inj_ltb p q : (p (p|q)%positive. Proof. apply Z.Private_BootStrap.divide_Zpos. Qed. Lemma inj_testbit a n : 0<=n -> Z.testbit (Z.pos a) n = N.testbit (N.pos a) (Z.to_N n). Proof. apply Z.Private_BootStrap.testbit_Zpos. Qed. (** Some results concerning Z.neg *) Lemma inj_neg p q : Z.neg p = Z.neg q -> p = q. Proof. now injection 1. Qed. Lemma inj_neg_iff p q : Z.neg p = Z.neg q <-> p = q. Proof. split. apply inj_neg. intros; now f_equal. Qed. Lemma neg_is_neg p : Z.neg p < 0. Proof. reflexivity. Qed. Lemma neg_is_nonpos p : Z.neg p <= 0. Proof. easy. Qed. Lemma neg_xO p : Z.neg p~0 = 2 * Z.neg p. Proof. reflexivity. Qed. Lemma neg_xI p : Z.neg p~1 = 2 * Z.neg p - 1. Proof. reflexivity. Qed. Lemma opp_neg p : - Z.neg p = Z.pos p. Proof. reflexivity. Qed. Lemma opp_pos p : - Z.pos p = Z.neg p. Proof. reflexivity. Qed. Lemma add_neg_neg p q : Z.neg p + Z.neg q = Z.neg (p+q). Proof. reflexivity. Qed. Lemma add_pos_neg p q : Z.pos p + Z.neg q = Z.pos_sub p q. Proof. reflexivity. Qed. Lemma add_neg_pos p q : Z.neg p + Z.pos q = Z.pos_sub q p. Proof. reflexivity. Qed. Lemma divide_pos_neg_r n p : (n|Z.pos p) <-> (n|Z.neg p). Proof. apply Z.Private_BootStrap.divide_Zpos_Zneg_r. Qed. Lemma divide_pos_neg_l n p : (Z.pos p|n) <-> (Z.neg p|n). Proof. apply Z.Private_BootStrap.divide_Zpos_Zneg_l. Qed. Lemma testbit_neg a n : 0<=n -> Z.testbit (Z.neg a) n = negb (N.testbit (Pos.pred_N a) (Z.to_N n)). Proof. apply Z.Private_BootStrap.testbit_Zneg. Qed. End Pos2Z. Module Z2Pos. Lemma id x : 0 < x -> Z.pos (Z.to_pos x) = x. Proof. now destruct x. Qed. Lemma inj x y : 0 < x -> 0 < y -> Z.to_pos x = Z.to_pos y -> x = y. Proof. destruct x; simpl; try easy. intros _ H ->. now apply id. Qed. Lemma inj_iff x y : 0 < x -> 0 < y -> (Z.to_pos x = Z.to_pos y <-> x = y). Proof. split. now apply inj. intros; now f_equal. Qed. Lemma to_pos_nonpos x : x <= 0 -> Z.to_pos x = 1%positive. Proof. destruct x; trivial. now destruct 1. Qed. Lemma inj_1 : Z.to_pos 1 = 1%positive. Proof. reflexivity. Qed. Lemma inj_double x : 0 < x -> Z.to_pos (Z.double x) = (Z.to_pos x)~0%positive. Proof. now destruct x. Qed. Lemma inj_succ_double x : 0 < x -> Z.to_pos (Z.succ_double x) = (Z.to_pos x)~1%positive. Proof. now destruct x. Qed. Lemma inj_succ x : 0 < x -> Z.to_pos (Z.succ x) = Pos.succ (Z.to_pos x). Proof. destruct x; try easy. simpl. now rewrite Pos.add_1_r. Qed. Lemma inj_add x y : 0 < x -> 0 < y -> Z.to_pos (x+y) = (Z.to_pos x + Z.to_pos y)%positive. Proof. destruct x; easy || now destruct y. Qed. Lemma inj_sub x y : 0 < x < y -> Z.to_pos (y-x) = (Z.to_pos y - Z.to_pos x)%positive. Proof. destruct x; try easy. destruct y; try easy. simpl. intros. now rewrite Z.pos_sub_gt. Qed. Lemma inj_pred x : 1 < x -> Z.to_pos (Z.pred x) = Pos.pred (Z.to_pos x). Proof. now destruct x as [|[x|x|]|]. Qed. Lemma inj_mul x y : 0 < x -> 0 < y -> Z.to_pos (x*y) = (Z.to_pos x * Z.to_pos y)%positive. Proof. destruct x; easy || now destruct y. Qed. Lemma inj_pow x y : 0 < x -> 0 < y -> Z.to_pos (x^y) = (Z.to_pos x ^ Z.to_pos y)%positive. Proof. intros. apply Pos2Z.inj. rewrite Pos2Z.inj_pow, !id; trivial. apply Z.pow_pos_nonneg. trivial. now apply Z.lt_le_incl. Qed. Lemma inj_pow_pos x p : 0 < x -> Z.to_pos (Z.pow_pos x p) = ((Z.to_pos x)^p)%positive. Proof. intros. now apply (inj_pow x (Z.pos p)). Qed. Lemma inj_compare x y : 0 < x -> 0 < y -> (x ?= y) = (Z.to_pos x ?= Z.to_pos y)%positive. Proof. destruct x; easy || now destruct y. Qed. Lemma inj_leb x y : 0 < x -> 0 < y -> (x <=? y) = (Z.to_pos x <=? Z.to_pos y)%positive. Proof. destruct x; easy || now destruct y. Qed. Lemma inj_ltb x y : 0 < x -> 0 < y -> (x 0 < y -> (x =? y) = (Z.to_pos x =? Z.to_pos y)%positive. Proof. destruct x; easy || now destruct y. Qed. Lemma inj_max x y : Z.to_pos (Z.max x y) = Pos.max (Z.to_pos x) (Z.to_pos y). Proof. destruct x; simpl; try rewrite Pos.max_1_l. - now destruct y. - destruct y; simpl; now rewrite ?Pos.max_1_r, <- ?Pos2Z.inj_max. - destruct y; simpl; rewrite ?Pos.max_1_r; trivial. apply to_pos_nonpos. now apply Z.max_lub. Qed. Lemma inj_min x y : Z.to_pos (Z.min x y) = Pos.min (Z.to_pos x) (Z.to_pos y). Proof. destruct x; simpl; try rewrite Pos.min_1_l. - now destruct y. - destruct y; simpl; now rewrite ?Pos.min_1_r, <- ?Pos2Z.inj_min. - destruct y; simpl; rewrite ?Pos.min_1_r; trivial. apply to_pos_nonpos. apply Z.min_le_iff. now left. Qed. Lemma inj_sqrt x : Z.to_pos (Z.sqrt x) = Pos.sqrt (Z.to_pos x). Proof. now destruct x. Qed. Lemma inj_gcd x y : 0 < x -> 0 < y -> Z.to_pos (Z.gcd x y) = Pos.gcd (Z.to_pos x) (Z.to_pos y). Proof. destruct x; easy || now destruct y. Qed. End Z2Pos. (** Compatibility Notations *) Notation Zdouble_plus_one := Z.succ_double (compat "8.3"). Notation Zdouble_minus_one := Z.pred_double (compat "8.3"). Notation Zdouble := Z.double (compat "8.3"). Notation ZPminus := Z.pos_sub (compat "8.3"). Notation Zsucc' := Z.succ (compat "8.3"). Notation Zpred' := Z.pred (compat "8.3"). Notation Zplus' := Z.add (compat "8.3"). Notation Zplus := Z.add (compat "8.3"). (* Slightly incompatible *) Notation Zopp := Z.opp (compat "8.3"). Notation Zsucc := Z.succ (compat "8.3"). Notation Zpred := Z.pred (compat "8.3"). Notation Zminus := Z.sub (compat "8.3"). Notation Zmult := Z.mul (compat "8.3"). Notation Zcompare := Z.compare (compat "8.3"). Notation Zsgn := Z.sgn (compat "8.3"). Notation Zle := Z.le (compat "8.3"). Notation Zge := Z.ge (compat "8.3"). Notation Zlt := Z.lt (compat "8.3"). Notation Zgt := Z.gt (compat "8.3"). Notation Zmax := Z.max (compat "8.3"). Notation Zmin := Z.min (compat "8.3"). Notation Zabs := Z.abs (compat "8.3"). Notation Zabs_nat := Z.abs_nat (compat "8.3"). Notation Zabs_N := Z.abs_N (compat "8.3"). Notation Z_of_nat := Z.of_nat (compat "8.3"). Notation Z_of_N := Z.of_N (compat "8.3"). Notation Zind := Z.peano_ind (compat "8.3"). Notation Zopp_0 := Z.opp_0 (compat "8.3"). Notation Zopp_involutive := Z.opp_involutive (compat "8.3"). Notation Zopp_inj := Z.opp_inj (compat "8.3"). Notation Zplus_0_l := Z.add_0_l (compat "8.3"). Notation Zplus_0_r := Z.add_0_r (compat "8.3"). Notation Zplus_comm := Z.add_comm (compat "8.3"). Notation Zopp_plus_distr := Z.opp_add_distr (compat "8.3"). Notation Zopp_succ := Z.opp_succ (compat "8.3"). Notation Zplus_opp_r := Z.add_opp_diag_r (compat "8.3"). Notation Zplus_opp_l := Z.add_opp_diag_l (compat "8.3"). Notation Zplus_assoc := Z.add_assoc (compat "8.3"). Notation Zplus_permute := Z.add_shuffle3 (compat "8.3"). Notation Zplus_reg_l := Z.add_reg_l (compat "8.3"). Notation Zplus_succ_l := Z.add_succ_l (compat "8.3"). Notation Zplus_succ_comm := Z.add_succ_comm (compat "8.3"). Notation Zsucc_discr := Z.neq_succ_diag_r (compat "8.3"). Notation Zsucc_inj := Z.succ_inj (compat "8.3"). Notation Zsucc'_inj := Z.succ_inj (compat "8.3"). Notation Zsucc'_pred' := Z.succ_pred (compat "8.3"). Notation Zpred'_succ' := Z.pred_succ (compat "8.3"). Notation Zpred'_inj := Z.pred_inj (compat "8.3"). Notation Zsucc'_discr := Z.neq_succ_diag_r (compat "8.3"). Notation Zminus_0_r := Z.sub_0_r (compat "8.3"). Notation Zminus_diag := Z.sub_diag (compat "8.3"). Notation Zminus_plus_distr := Z.sub_add_distr (compat "8.3"). Notation Zminus_succ_r := Z.sub_succ_r (compat "8.3"). Notation Zminus_plus := Z.add_simpl_l (compat "8.3"). Notation Zmult_0_l := Z.mul_0_l (compat "8.3"). Notation Zmult_0_r := Z.mul_0_r (compat "8.3"). Notation Zmult_1_l := Z.mul_1_l (compat "8.3"). Notation Zmult_1_r := Z.mul_1_r (compat "8.3"). Notation Zmult_comm := Z.mul_comm (compat "8.3"). Notation Zmult_assoc := Z.mul_assoc (compat "8.3"). Notation Zmult_permute := Z.mul_shuffle3 (compat "8.3"). Notation Zmult_1_inversion_l := Z.mul_eq_1 (compat "8.3"). Notation Zdouble_mult := Z.double_spec (compat "8.3"). Notation Zdouble_plus_one_mult := Z.succ_double_spec (compat "8.3"). Notation Zopp_mult_distr_l_reverse := Z.mul_opp_l (compat "8.3"). Notation Zmult_opp_opp := Z.mul_opp_opp (compat "8.3"). Notation Zmult_opp_comm := Z.mul_opp_comm (compat "8.3"). Notation Zopp_eq_mult_neg_1 := Z.opp_eq_mul_m1 (compat "8.3"). Notation Zmult_plus_distr_r := Z.mul_add_distr_l (compat "8.3"). Notation Zmult_plus_distr_l := Z.mul_add_distr_r (compat "8.3"). Notation Zmult_minus_distr_r := Z.mul_sub_distr_r (compat "8.3"). Notation Zmult_reg_l := Z.mul_reg_l (compat "8.3"). Notation Zmult_reg_r := Z.mul_reg_r (compat "8.3"). Notation Zmult_succ_l := Z.mul_succ_l (compat "8.3"). Notation Zmult_succ_r := Z.mul_succ_r (compat "8.3"). Notation Zpos_xI := Pos2Z.inj_xI (compat "8.3"). Notation Zpos_xO := Pos2Z.inj_xO (compat "8.3"). Notation Zneg_xI := Pos2Z.neg_xI (compat "8.3"). Notation Zneg_xO := Pos2Z.neg_xO (compat "8.3"). Notation Zopp_neg := Pos2Z.opp_neg (compat "8.3"). Notation Zpos_succ_morphism := Pos2Z.inj_succ (compat "8.3"). Notation Zpos_mult_morphism := Pos2Z.inj_mul (compat "8.3"). Notation Zpos_minus_morphism := Pos2Z.inj_sub (compat "8.3"). Notation Zpos_eq_rev := Pos2Z.inj (compat "8.3"). Notation Zpos_plus_distr := Pos2Z.inj_add (compat "8.3"). Notation Zneg_plus_distr := Pos2Z.add_neg_neg (compat "8.3"). Notation Z := Z (only parsing). Notation Z_rect := Z_rect (only parsing). Notation Z_rec := Z_rec (only parsing). Notation Z_ind := Z_ind (only parsing). Notation Z0 := Z0 (only parsing). Notation Zpos := Zpos (only parsing). Notation Zneg := Zneg (only parsing). (** Compatibility lemmas. These could be notations, but scope information would be lost. *) Notation SYM1 lem := (fun n => eq_sym (lem n)). Notation SYM2 lem := (fun n m => eq_sym (lem n m)). Notation SYM3 lem := (fun n m p => eq_sym (lem n m p)). Lemma Zplus_assoc_reverse : forall n m p, n+m+p = n+(m+p). Proof (SYM3 Z.add_assoc). Lemma Zplus_succ_r_reverse : forall n m, Z.succ (n+m) = n+Z.succ m. Proof (SYM2 Z.add_succ_r). Notation Zplus_succ_r := Zplus_succ_r_reverse (only parsing). Lemma Zplus_0_r_reverse : forall n, n = n + 0. Proof (SYM1 Z.add_0_r). Lemma Zplus_eq_compat : forall n m p q, n=m -> p=q -> n+p=m+q. Proof (f_equal2 Z.add). Lemma Zsucc_pred : forall n, n = Z.succ (Z.pred n). Proof (SYM1 Z.succ_pred). Lemma Zpred_succ : forall n, n = Z.pred (Z.succ n). Proof (SYM1 Z.pred_succ). Lemma Zsucc_eq_compat : forall n m, n = m -> Z.succ n = Z.succ m. Proof (f_equal Z.succ). Lemma Zminus_0_l_reverse : forall n, n = n - 0. Proof (SYM1 Z.sub_0_r). Lemma Zminus_diag_reverse : forall n, 0 = n-n. Proof (SYM1 Z.sub_diag). Lemma Zminus_succ_l : forall n m, Z.succ (n - m) = Z.succ n - m. Proof (SYM2 Z.sub_succ_l). Lemma Zplus_minus_eq : forall n m p, n = m + p -> p = n - m. Proof. intros. now apply Z.add_move_l. Qed. Lemma Zplus_minus : forall n m, n + (m - n) = m. Proof (fun n m => eq_trans (Z.add_comm n (m-n)) (Z.sub_add n m)). Lemma Zminus_plus_simpl_l : forall n m p, p + n - (p + m) = n - m. Proof (fun n m p => Z.add_add_simpl_l_l p n m). Lemma Zminus_plus_simpl_l_reverse : forall n m p, n - m = p + n - (p + m). Proof (SYM3 Zminus_plus_simpl_l). Lemma Zminus_plus_simpl_r : forall n m p, n + p - (m + p) = n - m. Proof (fun n m p => Z.add_add_simpl_r_r n p m). Lemma Zeq_minus : forall n m, n = m -> n - m = 0. Proof (fun n m => proj2 (Z.sub_move_0_r n m)). Lemma Zminus_eq : forall n m, n - m = 0 -> n = m. Proof (fun n m => proj1 (Z.sub_move_0_r n m)). Lemma Zmult_0_r_reverse : forall n, 0 = n * 0. Proof (SYM1 Z.mul_0_r). Lemma Zmult_assoc_reverse : forall n m p, n * m * p = n * (m * p). Proof (SYM3 Z.mul_assoc). Lemma Zmult_integral : forall n m, n * m = 0 -> n = 0 \/ m = 0. Proof (fun n m => proj1 (Z.mul_eq_0 n m)). Lemma Zmult_integral_l : forall n m, n <> 0 -> m * n = 0 -> m = 0. Proof (fun n m H H' => Z.mul_eq_0_l m n H' H). Lemma Zopp_mult_distr_l : forall n m, - (n * m) = - n * m. Proof (SYM2 Z.mul_opp_l). Lemma Zopp_mult_distr_r : forall n m, - (n * m) = n * - m. Proof (SYM2 Z.mul_opp_r). Lemma Zmult_minus_distr_l : forall n m p, p * (n - m) = p * n - p * m. Proof (fun n m p => Z.mul_sub_distr_l p n m). Lemma Zmult_succ_r_reverse : forall n m, n * m + n = n * Z.succ m. Proof (SYM2 Z.mul_succ_r). Lemma Zmult_succ_l_reverse : forall n m, n * m + m = Z.succ n * m. Proof (SYM2 Z.mul_succ_l). Lemma Zpos_eq : forall p q, p = q -> Z.pos p = Z.pos q. Proof. congruence. Qed. Lemma Zpos_eq_iff : forall p q, p = q <-> Z.pos p = Z.pos q. Proof (fun p q => iff_sym (Pos2Z.inj_iff p q)). Hint Immediate Zsucc_pred: zarith. (* Not kept : Zplus_0_simpl_l Zplus_0_simpl_l_reverse Zplus_opp_expand Zsucc_inj_contrapositive Zsucc_succ' Zpred_pred' *) (* No compat notation for : weak_assoc (now Z.add_assoc_pos) weak_Zmult_plus_distr_r (now Z.mul_add_distr_pos) *) (** Obsolete stuff *) Definition Zne (x y:Z) := x <> y. (* TODO : to remove someday ? *) Ltac elim_compare com1 com2 := case (Dcompare (com1 ?= com2)%Z); [ idtac | let x := fresh "H" in (intro x; case x; clear x) ]. Lemma ZL0 : 2%nat = (1 + 1)%nat. Proof. reflexivity. Qed. Lemma Zplus_diag_eq_mult_2 n : n + n = n * 2. Proof. rewrite Z.mul_comm. apply Z.add_diag. Qed. Lemma Z_eq_mult n m : m = 0 -> m * n = 0. Proof. intros; now subst. Qed. coq-8.4pl2/theories/ZArith/Znumtheory.v0000640000175000001440000006407112010532755017235 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (a | - b). Proof. apply Z.divide_opp_r. Qed. Lemma Zdivide_opp_r_rev a b : (a | - b) -> (a | b). Proof. apply Z.divide_opp_r. Qed. Lemma Zdivide_opp_l a b : (a | b) -> (- a | b). Proof. apply Z.divide_opp_l. Qed. Lemma Zdivide_opp_l_rev a b : (- a | b) -> (a | b). Proof. apply Z.divide_opp_l. Qed. Theorem Zdivide_Zabs_l a b : (Z.abs a | b) -> (a | b). Proof. apply Z.divide_abs_l. Qed. Theorem Zdivide_Zabs_inv_l a b : (a | b) -> (Z.abs a | b). Proof. apply Z.divide_abs_l. Qed. Hint Resolve Z.divide_refl Z.divide_1_l Z.divide_0_r: zarith. Hint Resolve Z.mul_divide_mono_l Z.mul_divide_mono_r: zarith. Hint Resolve Z.divide_add_r Zdivide_opp_r Zdivide_opp_r_rev Zdivide_opp_l Zdivide_opp_l_rev Z.divide_sub_r Z.divide_mul_l Z.divide_mul_r Z.divide_factor_l Z.divide_factor_r: zarith. (** Auxiliary result. *) Lemma Zmult_one x y : x >= 0 -> x * y = 1 -> x = 1. Proof. Z.swap_greater. apply Z.eq_mul_1_nonneg. Qed. (** Only [1] and [-1] divide [1]. *) Notation Zdivide_1 := Z.divide_1_r (compat "8.3"). (** If [a] divides [b] and [b] divides [a] then [a] is [b] or [-b]. *) Notation Zdivide_antisym := Z.divide_antisym (compat "8.3"). Notation Zdivide_trans := Z.divide_trans (compat "8.3"). (** If [a] divides [b] and [b<>0] then [|a| <= |b|]. *) Lemma Zdivide_bounds a b : (a | b) -> b <> 0 -> Z.abs a <= Z.abs b. Proof. intros H Hb. rewrite <- Z.divide_abs_l, <- Z.divide_abs_r in H. apply Z.abs_pos in Hb. now apply Z.divide_pos_le. Qed. (** [Z.divide] can be expressed using [Z.modulo]. *) Lemma Zmod_divide : forall a b, b<>0 -> a mod b = 0 -> (b | a). Proof. apply Z.mod_divide. Qed. Lemma Zdivide_mod : forall a b, (b | a) -> a mod b = 0. Proof. intros a b (c,->); apply Z_mod_mult. Qed. (** [Z.divide] is hence decidable *) Lemma Zdivide_dec a b : {(a | b)} + {~ (a | b)}. Proof. destruct (Z.eq_dec a 0) as [Ha|Ha]. destruct (Z.eq_dec b 0) as [Hb|Hb]. left; subst; apply Z.divide_0_r. right. subst. contradict Hb. now apply Z.divide_0_l. destruct (Z.eq_dec (b mod a) 0). left. now apply Z.mod_divide. right. now rewrite <- Z.mod_divide. Defined. Theorem Zdivide_Zdiv_eq a b : 0 < a -> (a | b) -> b = a * (b / a). Proof. intros Ha H. rewrite (Z.div_mod b a) at 1; auto with zarith. rewrite Zdivide_mod; auto with zarith. Qed. Theorem Zdivide_Zdiv_eq_2 a b c : 0 < a -> (a | b) -> (c * b) / a = c * (b / a). Proof. intros. apply Z.divide_div_mul_exact; auto with zarith. Qed. Theorem Zdivide_le: forall a b : Z, 0 <= a -> 0 < b -> (a | b) -> a <= b. Proof. intros. now apply Z.divide_pos_le. Qed. Theorem Zdivide_Zdiv_lt_pos a b : 1 < a -> 0 < b -> (a | b) -> 0 < b / a < b . Proof. intros H1 H2 H3; split. apply Z.mul_pos_cancel_l with a; auto with zarith. rewrite <- Zdivide_Zdiv_eq; auto with zarith. now apply Z.div_lt. Qed. Lemma Zmod_div_mod n m a: 0 < n -> 0 < m -> (n | m) -> a mod n = (a mod m) mod n. Proof. intros H1 H2 (p,Hp). rewrite (Z.div_mod a m) at 1; auto with zarith. rewrite Hp at 1. rewrite Z.mul_shuffle0, Z.add_comm, Z.mod_add; auto with zarith. Qed. Lemma Zmod_divide_minus a b c: 0 < b -> a mod b = c -> (b | a - c). Proof. intros H H1. apply Z.mod_divide; auto with zarith. rewrite Zminus_mod; auto with zarith. rewrite H1. rewrite <- (Z.mod_small c b) at 1. rewrite Z.sub_diag, Z.mod_0_l; auto with zarith. subst. now apply Z.mod_pos_bound. Qed. Lemma Zdivide_mod_minus a b c: 0 <= c < b -> (b | a - c) -> a mod b = c. Proof. intros (H1, H2) H3. assert (0 < b) by Z.order. replace a with ((a - c) + c); auto with zarith. rewrite Z.add_mod; auto with zarith. rewrite (Zdivide_mod (a-c) b); try rewrite Z.add_0_l; auto with zarith. rewrite Z.mod_mod; try apply Zmod_small; auto with zarith. Qed. (** * Greatest common divisor (gcd). *) (** There is no unicity of the gcd; hence we define the predicate [Zis_gcd a b g] expressing that [g] is a gcd of [a] and [b]. (We show later that the [gcd] is actually unique if we discard its sign.) *) Inductive Zis_gcd (a b g:Z) : Prop := Zis_gcd_intro : (g | a) -> (g | b) -> (forall x, (x | a) -> (x | b) -> (x | g)) -> Zis_gcd a b g. (** Trivial properties of [gcd] *) Lemma Zis_gcd_sym : forall a b d, Zis_gcd a b d -> Zis_gcd b a d. Proof. induction 1; constructor; intuition. Qed. Lemma Zis_gcd_0 : forall a, Zis_gcd a 0 a. Proof. constructor; auto with zarith. Qed. Lemma Zis_gcd_1 : forall a, Zis_gcd a 1 1. Proof. constructor; auto with zarith. Qed. Lemma Zis_gcd_refl : forall a, Zis_gcd a a a. Proof. constructor; auto with zarith. Qed. Lemma Zis_gcd_minus : forall a b d, Zis_gcd a (- b) d -> Zis_gcd b a d. Proof. induction 1; constructor; intuition. Qed. Lemma Zis_gcd_opp : forall a b d, Zis_gcd a b d -> Zis_gcd b a (- d). Proof. induction 1; constructor; intuition. Qed. Lemma Zis_gcd_0_abs a : Zis_gcd 0 a (Z.abs a). Proof. apply Zabs_ind. intros; apply Zis_gcd_sym; apply Zis_gcd_0; auto. intros; apply Zis_gcd_opp; apply Zis_gcd_0; auto. Qed. Hint Resolve Zis_gcd_sym Zis_gcd_0 Zis_gcd_minus Zis_gcd_opp: zarith. Theorem Zis_gcd_unique: forall a b c d : Z, Zis_gcd a b c -> Zis_gcd a b d -> c = d \/ c = (- d). Proof. intros a b c d [Hc1 Hc2 Hc3] [Hd1 Hd2 Hd3]. assert (c|d) by auto. assert (d|c) by auto. apply Z.divide_antisym; auto. Qed. (** * Extended Euclid algorithm. *) (** Euclid's algorithm to compute the [gcd] mainly relies on the following property. *) Lemma Zis_gcd_for_euclid : forall a b d q:Z, Zis_gcd b (a - q * b) d -> Zis_gcd a b d. Proof. simple induction 1; constructor; intuition. replace a with (a - q * b + q * b). auto with zarith. ring. Qed. Lemma Zis_gcd_for_euclid2 : forall b d q r:Z, Zis_gcd r b d -> Zis_gcd b (b * q + r) d. Proof. simple induction 1; constructor; intuition. apply H2; auto. replace r with (b * q + r - b * q). auto with zarith. ring. Qed. (** We implement the extended version of Euclid's algorithm, i.e. the one computing Bezout's coefficients as it computes the [gcd]. We follow the algorithm given in Knuth's "Art of Computer Programming", vol 2, page 325. *) Section extended_euclid_algorithm. Variables a b : Z. (** The specification of Euclid's algorithm is the existence of [u], [v] and [d] such that [ua+vb=d] and [(gcd a b d)]. *) Inductive Euclid : Set := Euclid_intro : forall u v d:Z, u * a + v * b = d -> Zis_gcd a b d -> Euclid. (** The recursive part of Euclid's algorithm uses well-founded recursion of non-negative integers. It maintains 6 integers [u1,u2,u3,v1,v2,v3] such that the following invariant holds: [u1*a+u2*b=u3] and [v1*a+v2*b=v3] and [gcd(u3,v3)=gcd(a,b)]. *) Lemma euclid_rec : forall v3:Z, 0 <= v3 -> forall u1 u2 u3 v1 v2:Z, u1 * a + u2 * b = u3 -> v1 * a + v2 * b = v3 -> (forall d:Z, Zis_gcd u3 v3 d -> Zis_gcd a b d) -> Euclid. Proof. intros v3 Hv3; generalize Hv3; pattern v3. apply Zlt_0_rec. clear v3 Hv3; intros. elim (Z_zerop x); intro. apply Euclid_intro with (u := u1) (v := u2) (d := u3). assumption. apply H3. rewrite a0; auto with zarith. set (q := u3 / x) in *. assert (Hq : 0 <= u3 - q * x < x). replace (u3 - q * x) with (u3 mod x). apply Z_mod_lt; omega. assert (xpos : x > 0). omega. generalize (Z_div_mod_eq u3 x xpos). unfold q. intro eq; pattern u3 at 2; rewrite eq; ring. apply (H (u3 - q * x) Hq (proj1 Hq) v1 v2 x (u1 - q * v1) (u2 - q * v2)). tauto. replace ((u1 - q * v1) * a + (u2 - q * v2) * b) with (u1 * a + u2 * b - q * (v1 * a + v2 * b)). rewrite H1; rewrite H2; trivial. ring. intros; apply H3. apply Zis_gcd_for_euclid with q; assumption. assumption. Qed. (** We get Euclid's algorithm by applying [euclid_rec] on [1,0,a,0,1,b] when [b>=0] and [1,0,a,0,-1,-b] when [b<0]. *) Lemma euclid : Euclid. Proof. case (Z_le_gt_dec 0 b); intro. intros; apply euclid_rec with (u1 := 1) (u2 := 0) (u3 := a) (v1 := 0) (v2 := 1) (v3 := b); auto with zarith; ring. intros; apply euclid_rec with (u1 := 1) (u2 := 0) (u3 := a) (v1 := 0) (v2 := -1) (v3 := - b); auto with zarith; try ring. Qed. End extended_euclid_algorithm. Theorem Zis_gcd_uniqueness_apart_sign : forall a b d d':Z, Zis_gcd a b d -> Zis_gcd a b d' -> d = d' \/ d = - d'. Proof. simple induction 1. intros H1 H2 H3; simple induction 1; intros. generalize (H3 d' H4 H5); intro Hd'd. generalize (H6 d H1 H2); intro Hdd'. exact (Z.divide_antisym d d' Hdd' Hd'd). Qed. (** * Bezout's coefficients *) Inductive Bezout (a b d:Z) : Prop := Bezout_intro : forall u v:Z, u * a + v * b = d -> Bezout a b d. (** Existence of Bezout's coefficients for the [gcd] of [a] and [b] *) Lemma Zis_gcd_bezout : forall a b d:Z, Zis_gcd a b d -> Bezout a b d. Proof. intros a b d Hgcd. elim (euclid a b); intros u v d0 e g. generalize (Zis_gcd_uniqueness_apart_sign a b d d0 Hgcd g). intro H; elim H; clear H; intros. apply Bezout_intro with u v. rewrite H; assumption. apply Bezout_intro with (- u) (- v). rewrite H; rewrite <- e; ring. Qed. (** gcd of [ca] and [cb] is [c gcd(a,b)]. *) Lemma Zis_gcd_mult : forall a b c d:Z, Zis_gcd a b d -> Zis_gcd (c * a) (c * b) (c * d). Proof. intros a b c d; simple induction 1. constructor; auto with zarith. intros x Ha Hb. elim (Zis_gcd_bezout a b d H). intros u v Huv. elim Ha; intros a' Ha'. elim Hb; intros b' Hb'. apply Zdivide_intro with (u * a' + v * b'). rewrite <- Huv. replace (c * (u * a + v * b)) with (u * (c * a) + v * (c * b)). rewrite Ha'; rewrite Hb'; ring. ring. Qed. (** * Relative primality *) Definition rel_prime (a b:Z) : Prop := Zis_gcd a b 1. (** Bezout's theorem: [a] and [b] are relatively prime if and only if there exist [u] and [v] such that [ua+vb = 1]. *) Lemma rel_prime_bezout : forall a b:Z, rel_prime a b -> Bezout a b 1. Proof. intros a b; exact (Zis_gcd_bezout a b 1). Qed. Lemma bezout_rel_prime : forall a b:Z, Bezout a b 1 -> rel_prime a b. Proof. simple induction 1; constructor; auto with zarith. intros. rewrite <- H0; auto with zarith. Qed. (** Gauss's theorem: if [a] divides [bc] and if [a] and [b] are relatively prime, then [a] divides [c]. *) Theorem Gauss : forall a b c:Z, (a | b * c) -> rel_prime a b -> (a | c). Proof. intros. elim (rel_prime_bezout a b H0); intros. replace c with (c * 1); [ idtac | ring ]. rewrite <- H1. replace (c * (u * a + v * b)) with (c * u * a + v * (b * c)); [ eauto with zarith | ring ]. Qed. (** If [a] is relatively prime to [b] and [c], then it is to [bc] *) Lemma rel_prime_mult : forall a b c:Z, rel_prime a b -> rel_prime a c -> rel_prime a (b * c). Proof. intros a b c Hb Hc. elim (rel_prime_bezout a b Hb); intros. elim (rel_prime_bezout a c Hc); intros. apply bezout_rel_prime. apply Bezout_intro with (u := u * u0 * a + v0 * c * u + u0 * v * b) (v := v * v0). rewrite <- H. replace (u * a + v * b) with ((u * a + v * b) * 1); [ idtac | ring ]. rewrite <- H0. ring. Qed. Lemma rel_prime_cross_prod : forall a b c d:Z, rel_prime a b -> rel_prime c d -> b > 0 -> d > 0 -> a * d = b * c -> a = c /\ b = d. Proof. intros a b c d; intros. elim (Z.divide_antisym b d). split; auto with zarith. rewrite H4 in H3. rewrite Z.mul_comm in H3. apply Z.mul_reg_l with d; auto with zarith. intros; omega. apply Gauss with a. rewrite H3. auto with zarith. red; auto with zarith. apply Gauss with c. rewrite Z.mul_comm. rewrite <- H3. auto with zarith. red; auto with zarith. Qed. (** After factorization by a gcd, the original numbers are relatively prime. *) Lemma Zis_gcd_rel_prime : forall a b g:Z, b > 0 -> g >= 0 -> Zis_gcd a b g -> rel_prime (a / g) (b / g). Proof. intros a b g; intros. assert (g <> 0). intro. elim H1; intros. elim H4; intros. rewrite H2 in H6; subst b; omega. unfold rel_prime. destruct H1. destruct H1 as (a',H1). destruct H3 as (b',H3). replace (a/g) with a'; [|rewrite H1; rewrite Z_div_mult; auto with zarith]. replace (b/g) with b'; [|rewrite H3; rewrite Z_div_mult; auto with zarith]. constructor. exists a'; auto with zarith. exists b'; auto with zarith. intros x (xa,H5) (xb,H6). destruct (H4 (x*g)) as (x',Hx'). exists xa; rewrite Z.mul_assoc; rewrite <- H5; auto. exists xb; rewrite Z.mul_assoc; rewrite <- H6; auto. replace g with (1*g) in Hx'; auto with zarith. do 2 rewrite Z.mul_assoc in Hx'. apply Z.mul_reg_r in Hx'; trivial. rewrite Z.mul_1_r in Hx'. exists x'; auto with zarith. Qed. Theorem rel_prime_sym: forall a b, rel_prime a b -> rel_prime b a. Proof. intros a b H; auto with zarith. red; apply Zis_gcd_sym; auto with zarith. Qed. Theorem rel_prime_div: forall p q r, rel_prime p q -> (r | p) -> rel_prime r q. Proof. intros p q r H (u, H1); subst. inversion_clear H as [H1 H2 H3]. red; apply Zis_gcd_intro; try apply Z.divide_1_l. intros x H4 H5; apply H3; auto. apply Z.divide_mul_r; auto. Qed. Theorem rel_prime_1: forall n, rel_prime 1 n. Proof. intros n; red; apply Zis_gcd_intro; auto. exists 1; auto with zarith. exists n; auto with zarith. Qed. Theorem not_rel_prime_0: forall n, 1 < n -> ~ rel_prime 0 n. Proof. intros n H H1; absurd (n = 1 \/ n = -1). intros [H2 | H2]; subst; contradict H; auto with zarith. case (Zis_gcd_unique 0 n n 1); auto. apply Zis_gcd_intro; auto. exists 0; auto with zarith. exists 1; auto with zarith. Qed. Theorem rel_prime_mod: forall p q, 0 < q -> rel_prime p q -> rel_prime (p mod q) q. Proof. intros p q H H0. assert (H1: Bezout p q 1). apply rel_prime_bezout; auto. inversion_clear H1 as [q1 r1 H2]. apply bezout_rel_prime. apply Bezout_intro with q1 (r1 + q1 * (p / q)). rewrite <- H2. pattern p at 3; rewrite (Z_div_mod_eq p q); try ring; auto with zarith. Qed. Theorem rel_prime_mod_rev: forall p q, 0 < q -> rel_prime (p mod q) q -> rel_prime p q. Proof. intros p q H H0. rewrite (Z_div_mod_eq p q); auto with zarith; red. apply Zis_gcd_sym; apply Zis_gcd_for_euclid2; auto with zarith. Qed. Theorem Zrel_prime_neq_mod_0: forall a b, 1 < b -> rel_prime a b -> a mod b <> 0. Proof. intros a b H H1 H2. case (not_rel_prime_0 _ H). rewrite <- H2. apply rel_prime_mod; auto with zarith. Qed. (** * Primality *) Inductive prime (p:Z) : Prop := prime_intro : 1 < p -> (forall n:Z, 1 <= n < p -> rel_prime n p) -> prime p. (** The sole divisors of a prime number [p] are [-1], [1], [p] and [-p]. *) Lemma prime_divisors : forall p:Z, prime p -> forall a:Z, (a | p) -> a = -1 \/ a = 1 \/ a = p \/ a = - p. Proof. destruct 1; intros. assert (a = - p \/ - p < a < -1 \/ a = -1 \/ a = 0 \/ a = 1 \/ 1 < a < p \/ a = p). { assert (Z.abs a <= Z.abs p) as H2. apply Zdivide_bounds; [ assumption | omega ]. revert H2. pattern (Z.abs a); apply Zabs_ind; pattern (Z.abs p); apply Zabs_ind; intros; omega. } intuition idtac. (* -p < a < -1 *) - absurd (rel_prime (- a) p); intuition. inversion H2. assert (- a | - a) by auto with zarith. assert (- a | p) by auto with zarith. apply H7, Z.divide_1_r in H8; intuition. (* a = 0 *) - inversion H1. subst a; omega. (* 1 < a < p *) - absurd (rel_prime a p); intuition. inversion H2. assert (a | a) by auto with zarith. assert (a | p) by auto with zarith. apply H7, Z.divide_1_r in H8; intuition. Qed. (** A prime number is relatively prime with any number it does not divide *) Lemma prime_rel_prime : forall p:Z, prime p -> forall a:Z, ~ (p | a) -> rel_prime p a. Proof. simple induction 1; intros. constructor; intuition. elim (prime_divisors p H x H3); intuition; subst; auto with zarith. absurd (p | a); auto with zarith. absurd (p | a); intuition. Qed. Hint Resolve prime_rel_prime: zarith. (** As a consequence, a prime number is relatively prime with smaller numbers *) Theorem rel_prime_le_prime: forall a p, prime p -> 1 <= a < p -> rel_prime a p. Proof. intros a p Hp [H1 H2]. apply rel_prime_sym; apply prime_rel_prime; auto. intros [q Hq]; subst a. case (Z.le_gt_cases q 0); intros Hl. absurd (q * p <= 0 * p); auto with zarith. absurd (1 * p <= q * p); auto with zarith. Qed. (** If a prime [p] divides [ab] then it divides either [a] or [b] *) Lemma prime_mult : forall p:Z, prime p -> forall a b:Z, (p | a * b) -> (p | a) \/ (p | b). Proof. intro p; simple induction 1; intros. case (Zdivide_dec p a); intuition. right; apply Gauss with a; auto with zarith. Qed. Lemma not_prime_0: ~ prime 0. Proof. intros H1; case (prime_divisors _ H1 2); auto with zarith. Qed. Lemma not_prime_1: ~ prime 1. Proof. intros H1; absurd (1 < 1); auto with zarith. inversion H1; auto. Qed. Lemma prime_2: prime 2. Proof. apply prime_intro; auto with zarith. intros n (H,H'); Z.le_elim H; auto with zarith. - contradict H'; auto with zarith. - subst n. constructor; auto with zarith. Qed. Theorem prime_3: prime 3. Proof. apply prime_intro; auto with zarith. intros n (H,H'); Z.le_elim H; auto with zarith. - replace n with 2 by omega. constructor; auto with zarith. intros x (q,Hq) (q',Hq'). exists (q' - q). ring_simplify. now rewrite <- Hq, <- Hq'. - replace n with 1 by trivial. constructor; auto with zarith. Qed. Theorem prime_ge_2 p : prime p -> 2 <= p. Proof. intros (Hp,_); auto with zarith. Qed. Definition prime' p := 1

      ~ (n|p)). Lemma Z_0_1_more x : 0<=x -> x=0 \/ x=1 \/ 1 prime p. Proof. split; intros (Hp,H). - (* prime -> prime' *) constructor; trivial; intros n Hn. constructor; auto with zarith; intros x Hxn Hxp. rewrite <- Z.divide_abs_l in Hxn, Hxp |- *. assert (Hx := Z.abs_nonneg x). set (y:=Z.abs x) in *; clearbody y; clear x; rename y into x. destruct (Z_0_1_more x Hx) as [->|[->|Hx']]. + exfalso. apply Z.divide_0_l in Hxn. omega. + now exists 1. + elim (H x); auto. split; trivial. apply Z.le_lt_trans with n; auto with zarith. apply Z.divide_pos_le; auto with zarith. - (* prime' -> prime *) constructor; trivial. intros n Hn Hnp. case (Zis_gcd_unique n p n 1); auto with zarith. constructor; auto with zarith. apply H; auto with zarith. Qed. Theorem square_not_prime: forall a, ~ prime (a * a). Proof. intros a Ha. rewrite <- (Z.abs_square a) in Ha. assert (H:=Z.abs_nonneg a). set (b:=Z.abs a) in *; clearbody b; clear a; rename b into a. rewrite <- prime_alt in Ha; destruct Ha as (Ha,Ha'). assert (H' : 1 < a) by now apply (Z.square_lt_simpl_nonneg 1). apply (Ha' a). + split; trivial. rewrite <- (Z.mul_1_l a) at 1. apply Z.mul_lt_mono_pos_r; omega. + exists a; auto. Qed. Theorem prime_div_prime: forall p q, prime p -> prime q -> (p | q) -> p = q. Proof. intros p q H H1 H2; assert (Hp: 0 < p); try apply Z.lt_le_trans with 2; try apply prime_ge_2; auto with zarith. assert (Hq: 0 < q); try apply Z.lt_le_trans with 2; try apply prime_ge_2; auto with zarith. case prime_divisors with (2 := H2); auto. intros H4; contradict Hp; subst; auto with zarith. intros [H4| [H4 | H4]]; subst; auto. contradict H; auto; apply not_prime_1. contradict Hp; auto with zarith. Qed. (** we now prove that [Z.gcd] is indeed a gcd in the sense of [Zis_gcd]. *) Notation Zgcd_is_pos := Z.gcd_nonneg (compat "8.3"). Lemma Zgcd_is_gcd : forall a b, Zis_gcd a b (Z.gcd a b). Proof. constructor. apply Z.gcd_divide_l. apply Z.gcd_divide_r. apply Z.gcd_greatest. Qed. Theorem Zgcd_spec : forall x y : Z, {z : Z | Zis_gcd x y z /\ 0 <= z}. Proof. intros x y; exists (Z.gcd x y). split; [apply Zgcd_is_gcd | apply Z.gcd_nonneg]. Qed. Theorem Zdivide_Zgcd: forall p q r : Z, (p | q) -> (p | r) -> (p | Z.gcd q r). Proof. intros. now apply Z.gcd_greatest. Qed. Theorem Zis_gcd_gcd: forall a b c : Z, 0 <= c -> Zis_gcd a b c -> Z.gcd a b = c. Proof. intros a b c H1 H2. case (Zis_gcd_uniqueness_apart_sign a b c (Z.gcd a b)); auto. apply Zgcd_is_gcd; auto. Z.le_elim H1. - generalize (Z.gcd_nonneg a b); auto with zarith. - subst. now case (Z.gcd a b). Qed. Notation Zgcd_inv_0_l := Z.gcd_eq_0_l (compat "8.3"). Notation Zgcd_inv_0_r := Z.gcd_eq_0_r (compat "8.3"). Theorem Zgcd_div_swap0 : forall a b : Z, 0 < Z.gcd a b -> 0 < b -> (a / Z.gcd a b) * b = a * (b/Z.gcd a b). Proof. intros a b Hg Hb. assert (F := Zgcd_is_gcd a b); inversion F as [F1 F2 F3]. pattern b at 2; rewrite (Zdivide_Zdiv_eq (Z.gcd a b) b); auto. repeat rewrite Z.mul_assoc; f_equal. rewrite Z.mul_comm. rewrite <- Zdivide_Zdiv_eq; auto. Qed. Theorem Zgcd_div_swap : forall a b c : Z, 0 < Z.gcd a b -> 0 < b -> (c * a) / Z.gcd a b * b = c * a * (b/Z.gcd a b). Proof. intros a b c Hg Hb. assert (F := Zgcd_is_gcd a b); inversion F as [F1 F2 F3]. pattern b at 2; rewrite (Zdivide_Zdiv_eq (Z.gcd a b) b); auto. repeat rewrite Z.mul_assoc; f_equal. rewrite Zdivide_Zdiv_eq_2; auto. repeat rewrite <- Z.mul_assoc; f_equal. rewrite Z.mul_comm. rewrite <- Zdivide_Zdiv_eq; auto. Qed. Notation Zgcd_comm := Z.gcd_comm (compat "8.3"). Lemma Zgcd_ass a b c : Z.gcd (Z.gcd a b) c = Z.gcd a (Z.gcd b c). Proof. symmetry. apply Z.gcd_assoc. Qed. Notation Zgcd_Zabs := Z.gcd_abs_l (compat "8.3"). Notation Zgcd_0 := Z.gcd_0_r (compat "8.3"). Notation Zgcd_1 := Z.gcd_1_r (compat "8.3"). Hint Resolve Z.gcd_0_r Z.gcd_1_r : zarith. Theorem Zgcd_1_rel_prime : forall a b, Z.gcd a b = 1 <-> rel_prime a b. Proof. unfold rel_prime; split; intro H. rewrite <- H; apply Zgcd_is_gcd. case (Zis_gcd_unique a b (Z.gcd a b) 1); auto. apply Zgcd_is_gcd. intros H2; absurd (0 <= Z.gcd a b); auto with zarith. generalize (Z.gcd_nonneg a b); auto with zarith. Qed. Definition rel_prime_dec: forall a b, { rel_prime a b }+{ ~ rel_prime a b }. Proof. intros a b; case (Z.eq_dec (Z.gcd a b) 1); intros H1. left; apply -> Zgcd_1_rel_prime; auto. right; contradict H1; apply <- Zgcd_1_rel_prime; auto. Defined. Definition prime_dec_aux: forall p m, { forall n, 1 < n < m -> rel_prime n p } + { exists n, 1 < n < m /\ ~ rel_prime n p }. Proof. intros p m. case (Z_lt_dec 1 m); intros H1; [ | left; intros; exfalso; omega ]. pattern m; apply natlike_rec; auto with zarith. left; intros; exfalso; omega. intros x Hx IH; destruct IH as [F|E]. destruct (rel_prime_dec x p) as [Y|N]. left; intros n [HH1 HH2]. rewrite Z.lt_succ_r in HH2. Z.le_elim HH2; subst; auto with zarith. - case (Z_lt_dec 1 x); intros HH1. * right; exists x; split; auto with zarith. * left; intros n [HHH1 HHH2]; contradict HHH1; auto with zarith. - right; destruct E as (n,((H0,H2),H3)); exists n; auto with zarith. Defined. Definition prime_dec: forall p, { prime p }+{ ~ prime p }. Proof. intros p; case (Z_lt_dec 1 p); intros H1. + case (prime_dec_aux p p); intros H2. * left; apply prime_intro; auto. intros n (Hn1,Hn2). Z.le_elim Hn1; auto; subst n. constructor; auto with zarith. * right; intros H3; inversion_clear H3 as [Hp1 Hp2]. case H2; intros n [Hn1 Hn2]; case Hn2; auto with zarith. + right; intros H3; inversion_clear H3 as [Hp1 Hp2]; case H1; auto. Defined. Theorem not_prime_divide: forall p, 1 < p -> ~ prime p -> exists n, 1 < n < p /\ (n | p). Proof. intros p Hp Hp1. case (prime_dec_aux p p); intros H1. - elim Hp1; constructor; auto. intros n (Hn1,Hn2). Z.le_elim Hn1; auto with zarith. subst n; constructor; auto with zarith. - case H1; intros n (Hn1,Hn2). destruct (Z_0_1_more _ (Z.gcd_nonneg n p)) as [H|[H|H]]. + exfalso. apply Z.gcd_eq_0_l in H. omega. + elim Hn2. red. rewrite <- H. apply Zgcd_is_gcd. + exists (Z.gcd n p); split; [ split; auto | apply Z.gcd_divide_r ]. apply Z.le_lt_trans with n; auto with zarith. apply Z.divide_pos_le; auto with zarith. apply Z.gcd_divide_l. Qed. coq-8.4pl2/theories/ZArith/Zmisc.v0000640000175000001440000000206512010532755016131 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z.iter n f x = iter_nat (Z.abs_nat n) A f x. Proof. intros n A f x; case n; auto. intros p _; unfold Z.iter, Z.abs_nat; apply Pos2Nat.inj_iter. intros p abs; case abs; trivial. Qed. coq-8.4pl2/theories/ZArith/Zsqrt_compat.v0000640000175000001440000001700612010532755017533 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* match constr:X1 with | context [1%positive] => fail 1 | _ => rewrite (Pos2Z.inj_xI X1) end | |- context [(Zpos (xO ?X1))] => match constr:X1 with | context [1%positive] => fail 1 | _ => rewrite (Pos2Z.inj_xO X1) end end. Inductive sqrt_data (n:Z) : Set := c_sqrt : forall s r:Z, n = s * s + r -> 0 <= r <= 2 * s -> sqrt_data n. Definition sqrtrempos : forall p:positive, sqrt_data (Zpos p). refine (fix sqrtrempos (p:positive) : sqrt_data (Zpos p) := match p return sqrt_data (Zpos p) with | xH => c_sqrt 1 1 0 _ _ | xO xH => c_sqrt 2 1 1 _ _ | xI xH => c_sqrt 3 1 2 _ _ | xO (xO p') => match sqrtrempos p' with | c_sqrt s' r' Heq Hint => match Z_le_gt_dec (4 * s' + 1) (4 * r') with | left Hle => c_sqrt (Zpos (xO (xO p'))) (2 * s' + 1) (4 * r' - (4 * s' + 1)) _ _ | right Hgt => c_sqrt (Zpos (xO (xO p'))) (2 * s') (4 * r') _ _ end end | xO (xI p') => match sqrtrempos p' with | c_sqrt s' r' Heq Hint => match Z_le_gt_dec (4 * s' + 1) (4 * r' + 2) with | left Hle => c_sqrt (Zpos (xO (xI p'))) (2 * s' + 1) (4 * r' + 2 - (4 * s' + 1)) _ _ | right Hgt => c_sqrt (Zpos (xO (xI p'))) (2 * s') (4 * r' + 2) _ _ end end | xI (xO p') => match sqrtrempos p' with | c_sqrt s' r' Heq Hint => match Z_le_gt_dec (4 * s' + 1) (4 * r' + 1) with | left Hle => c_sqrt (Zpos (xI (xO p'))) (2 * s' + 1) (4 * r' + 1 - (4 * s' + 1)) _ _ | right Hgt => c_sqrt (Zpos (xI (xO p'))) (2 * s') (4 * r' + 1) _ _ end end | xI (xI p') => match sqrtrempos p' with | c_sqrt s' r' Heq Hint => match Z_le_gt_dec (4 * s' + 1) (4 * r' + 3) with | left Hle => c_sqrt (Zpos (xI (xI p'))) (2 * s' + 1) (4 * r' + 3 - (4 * s' + 1)) _ _ | right Hgt => c_sqrt (Zpos (xI (xI p'))) (2 * s') (4 * r' + 3) _ _ end end end); clear sqrtrempos; repeat compute_POS; try (try rewrite Heq; ring); try omega. Defined. (** Define with integer input, but with a strong (readable) specification. *) Definition Zsqrt : forall x:Z, 0 <= x -> {s : Z & {r : Z | x = s * s + r /\ s * s <= x < (s + 1) * (s + 1)}}. refine (fun x => match x return 0 <= x -> {s : Z & {r : Z | x = s * s + r /\ s * s <= x < (s + 1) * (s + 1)}} with | Zpos p => fun h => match sqrtrempos p with | c_sqrt s r Heq Hint => existT (fun s:Z => {r : Z | Zpos p = s * s + r /\ s * s <= Zpos p < (s + 1) * (s + 1)}) s (exist (fun r:Z => Zpos p = s * s + r /\ s * s <= Zpos p < (s + 1) * (s + 1)) r _) end | Zneg p => fun h => False_rec {s : Z & {r : Z | Zneg p = s * s + r /\ s * s <= Zneg p < (s + 1) * (s + 1)}} (h (eq_refl Datatypes.Gt)) | Z0 => fun h => existT (fun s:Z => {r : Z | 0 = s * s + r /\ s * s <= 0 < (s + 1) * (s + 1)}) 0 (exist (fun r:Z => 0 = 0 * 0 + r /\ 0 * 0 <= 0 < (0 + 1) * (0 + 1)) 0 _) end); try omega. split; [ omega | rewrite Heq; ring_simplify (s*s) ((s + 1) * (s + 1)); omega ]. Defined. (** Define a function of type Z->Z that computes the integer square root, but only for positive numbers, and 0 for others. *) Definition Zsqrt_plain (x:Z) : Z := match x with | Zpos p => match Zsqrt (Zpos p) (Pos2Z.is_nonneg p) with | existT s _ => s end | Zneg p => 0 | Z0 => 0 end. (** A basic theorem about Zsqrt_plain *) Theorem Zsqrt_interval : forall n:Z, 0 <= n -> Zsqrt_plain n * Zsqrt_plain n <= n < (Zsqrt_plain n + 1) * (Zsqrt_plain n + 1). Proof. intros [|p|p] Hp. - now compute. - unfold Zsqrt_plain. now destruct Zsqrt as (s & r & Heq & Hint). - now elim Hp. Qed. (** Positivity *) Theorem Zsqrt_plain_is_pos: forall n, 0 <= n -> 0 <= Zsqrt_plain n. Proof. intros n m; case (Zsqrt_interval n); auto with zarith. intros H1 H2; case (Z.le_gt_cases 0 (Zsqrt_plain n)); auto. intros H3; contradict H2; auto; apply Z.le_ngt. apply Z.le_trans with ( 2 := H1 ). replace ((Zsqrt_plain n + 1) * (Zsqrt_plain n + 1)) with (Zsqrt_plain n * Zsqrt_plain n + (2 * Zsqrt_plain n + 1)); auto with zarith. ring. Qed. (** Direct correctness on squares. *) Theorem Zsqrt_square_id: forall a, 0 <= a -> Zsqrt_plain (a * a) = a. Proof. intros a H. generalize (Zsqrt_plain_is_pos (a * a)); auto with zarith; intros Haa. case (Zsqrt_interval (a * a)); auto with zarith. intros H1 H2. case (Z.le_gt_cases a (Zsqrt_plain (a * a))); intros H3. - Z.le_elim H3; auto. contradict H1; auto; apply Z.lt_nge; auto with zarith. apply Z.le_lt_trans with (a * Zsqrt_plain (a * a)); auto with zarith. apply Z.mul_lt_mono_pos_r; auto with zarith. - contradict H2; auto; apply Z.le_ngt; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. Qed. (** [Zsqrt_plain] is increasing *) Theorem Zsqrt_le: forall p q, 0 <= p <= q -> Zsqrt_plain p <= Zsqrt_plain q. Proof. intros p q [H1 H2]. Z.le_elim H2; [ | subst q; auto with zarith]. case (Z.le_gt_cases (Zsqrt_plain p) (Zsqrt_plain q)); auto; intros H3. assert (Hp: (0 <= Zsqrt_plain q)). { apply Zsqrt_plain_is_pos; auto with zarith. } absurd (q <= p); auto with zarith. apply Z.le_trans with ((Zsqrt_plain q + 1) * (Zsqrt_plain q + 1)). case (Zsqrt_interval q); auto with zarith. apply Z.le_trans with (Zsqrt_plain p * Zsqrt_plain p); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. case (Zsqrt_interval p); auto with zarith. Qed. (** Equivalence between Zsqrt_plain and [Z.sqrt] *) Lemma Zsqrt_equiv : forall n, Zsqrt_plain n = Z.sqrt n. Proof. intros. destruct (Z_le_gt_dec 0 n). symmetry. apply Z.sqrt_unique; trivial. now apply Zsqrt_interval. now destruct n. Qed.coq-8.4pl2/theories/ZArith/Zpower.v0000640000175000001440000002252312010532755016333 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* nat->nat] and [Z.mul : Z->Z->Z] *) Lemma Zpower_nat_is_exp : forall (n m:nat) (z:Z), Zpower_nat z (n + m) = Zpower_nat z n * Zpower_nat z m. Proof. induction n. - intros. now rewrite Zpower_nat_0_r, Z.mul_1_l. - intros. simpl. now rewrite 2 Zpower_nat_succ_r, IHn, Z.mul_assoc. Qed. (** Conversions between powers of unary and binary integers *) Lemma Zpower_pos_nat (z : Z) (p : positive) : Z.pow_pos z p = Zpower_nat z (Pos.to_nat p). Proof. apply Pos2Nat.inj_iter. Qed. Lemma Zpower_nat_Z (z : Z) (n : nat) : Zpower_nat z n = z ^ (Z.of_nat n). Proof. induction n. trivial. rewrite Zpower_nat_succ_r, Nat2Z.inj_succ, Z.pow_succ_r. now f_equal. apply Nat2Z.is_nonneg. Qed. Theorem Zpower_nat_Zpower z n : 0 <= n -> z^n = Zpower_nat z (Z.abs_nat n). Proof. intros. now rewrite Zpower_nat_Z, Zabs2Nat.id_abs, Z.abs_eq. Qed. (** The function [(Z.pow_pos z)] is a morphism for [Pos.add : positive->positive->positive] and [Z.mul : Z->Z->Z] *) Lemma Zpower_pos_is_exp (n m : positive)(z:Z) : Z.pow_pos z (n + m) = Z.pow_pos z n * Z.pow_pos z m. Proof. now apply (Z.pow_add_r z (Zpos n) (Zpos m)). Qed. Hint Immediate Zpower_nat_is_exp Zpower_pos_is_exp : zarith. Hint Unfold Z.pow_pos Zpower_nat: zarith. Theorem Zpower_exp x n m : n >= 0 -> m >= 0 -> x ^ (n + m) = x ^ n * x ^ m. Proof. Z.swap_greater. apply Z.pow_add_r. Qed. Section Powers_of_2. (** * Powers of 2 *) (** For the powers of two, that will be widely used, a more direct calculus is possible. [shift n m] computes [2^n * m], i.e. [m] shifted by [n] positions *) Definition shift_nat (n:nat) (z:positive) := nat_iter n xO z. Definition shift_pos (n z:positive) := Pos.iter n xO z. Definition shift (n:Z) (z:positive) := match n with | Z0 => z | Zpos p => Pos.iter p xO z | Zneg p => z end. Definition two_power_nat (n:nat) := Zpos (shift_nat n 1). Definition two_power_pos (x:positive) := Zpos (shift_pos x 1). Definition two_p (x:Z) := match x with | Z0 => 1 | Zpos y => two_power_pos y | Zneg y => 0 end. (** Equivalence with notions defined in BinInt *) Lemma shift_nat_equiv n p : shift_nat n p = Pos.shiftl_nat p n. Proof. reflexivity. Qed. Lemma shift_pos_equiv n p : shift_pos n p = Pos.shiftl p (Npos n). Proof. reflexivity. Qed. Lemma shift_equiv n p : 0<=n -> Zpos (shift n p) = Z.shiftl (Zpos p) n. Proof. destruct n. - trivial. - simpl; intros. now apply Pos.iter_swap_gen. - now destruct 1. Qed. Lemma two_power_nat_equiv n : two_power_nat n = 2 ^ (Z.of_nat n). Proof. induction n. - trivial. - now rewrite Nat2Z.inj_succ, Z.pow_succ_r, <- IHn by apply Nat2Z.is_nonneg. Qed. Lemma two_power_pos_equiv p : two_power_pos p = 2 ^ Zpos p. Proof. now apply Pos.iter_swap_gen. Qed. Lemma two_p_equiv x : two_p x = 2 ^ x. Proof. destruct x; trivial. apply two_power_pos_equiv. Qed. (** Properties of these old versions of powers of two *) Lemma two_power_nat_S n : two_power_nat (S n) = 2 * two_power_nat n. Proof. reflexivity. Qed. Lemma shift_nat_plus n m x : shift_nat (n + m) x = shift_nat n (shift_nat m x). Proof. apply iter_nat_plus. Qed. Theorem shift_nat_correct n x : Zpos (shift_nat n x) = Zpower_nat 2 n * Zpos x. Proof. induction n. - trivial. - now rewrite Zpower_nat_succ_r, <- Z.mul_assoc, <- IHn. Qed. Theorem two_power_nat_correct n : two_power_nat n = Zpower_nat 2 n. Proof. now rewrite two_power_nat_equiv, Zpower_nat_Z. Qed. Lemma shift_pos_nat p x : shift_pos p x = shift_nat (Pos.to_nat p) x. Proof. apply Pos2Nat.inj_iter. Qed. Lemma two_power_pos_nat p : two_power_pos p = two_power_nat (Pos.to_nat p). Proof. unfold two_power_pos. now rewrite shift_pos_nat. Qed. Theorem shift_pos_correct p x : Zpos (shift_pos p x) = Z.pow_pos 2 p * Zpos x. Proof. now rewrite shift_pos_nat, Zpower_pos_nat, shift_nat_correct. Qed. Theorem two_power_pos_correct x : two_power_pos x = Z.pow_pos 2 x. Proof. apply two_power_pos_equiv. Qed. Theorem two_power_pos_is_exp x y : two_power_pos (x + y) = two_power_pos x * two_power_pos y. Proof. rewrite 3 two_power_pos_equiv. now apply (Z.pow_add_r 2 (Zpos x) (Zpos y)). Qed. Lemma two_p_correct x : two_p x = 2^x. Proof (two_p_equiv x). Theorem two_p_is_exp x y : 0 <= x -> 0 <= y -> two_p (x + y) = two_p x * two_p y. Proof. rewrite !two_p_equiv. apply Z.pow_add_r. Qed. Lemma two_p_gt_ZERO x : 0 <= x -> two_p x > 0. Proof. Z.swap_greater. rewrite two_p_equiv. now apply Z.pow_pos_nonneg. Qed. Lemma two_p_S x : 0 <= x -> two_p (Z.succ x) = 2 * two_p x. Proof. rewrite !two_p_equiv. now apply Z.pow_succ_r. Qed. Lemma two_p_pred x : 0 <= x -> two_p (Z.pred x) < two_p x. Proof. rewrite !two_p_equiv. intros. apply Z.pow_lt_mono_r; auto with zarith. Qed. End Powers_of_2. Hint Resolve two_p_gt_ZERO: zarith. Hint Immediate two_p_pred two_p_S: zarith. Section power_div_with_rest. (** * Division by a power of two. *) (** To [x:Z] and [p:positive], [q],[r] are associated such that [x = 2^p.q + r] and [0 <= r < 2^p] *) (** Invariant: [d*q + r = d'*q + r /\ d' = 2*d /\ 0<=r (0, r) | Zpos xH => (0, d + r) | Zpos (xI n) => (Zpos n, d + r) | Zpos (xO n) => (Zpos n, r) | Zneg xH => (-1, d + r) | Zneg (xI n) => (Zneg n - 1, d + r) | Zneg (xO n) => (Zneg n, r) end, 2 * d). Definition Zdiv_rest (x:Z) (p:positive) := let (qr, d) := Pos.iter p Zdiv_rest_aux (x, 0, 1) in qr. Lemma Zdiv_rest_correct1 (x:Z) (p:positive) : let (_, d) := Pos.iter p Zdiv_rest_aux (x, 0, 1) in d = two_power_pos p. Proof. rewrite Pos2Nat.inj_iter, two_power_pos_nat. induction (Pos.to_nat p); simpl; trivial. destruct (nat_iter n Zdiv_rest_aux (x,0,1)) as ((q,r),d). unfold Zdiv_rest_aux. rewrite two_power_nat_S; now f_equal. Qed. Lemma Zdiv_rest_correct2 (x:Z) (p:positive) : let '(q,r,d) := Pos.iter p Zdiv_rest_aux (x, 0, 1) in x = q * d + r /\ 0 <= r < d. Proof. apply Pos.iter_invariant; [|omega]. intros ((q,r),d) (H,H'). unfold Zdiv_rest_aux. destruct q as [ |[q|q| ]|[q|q| ]]; try omega. - rewrite Pos2Z.inj_xI, Z.mul_add_distr_r in H. rewrite Z.mul_shuffle3, Z.mul_assoc. omega. - rewrite Pos2Z.inj_xO in H. rewrite Z.mul_shuffle3, Z.mul_assoc. omega. - rewrite Pos2Z.neg_xI, Z.mul_sub_distr_r in H. rewrite Z.mul_sub_distr_r, Z.mul_shuffle3, Z.mul_assoc. omega. - rewrite Pos2Z.neg_xO in H. rewrite Z.mul_shuffle3, Z.mul_assoc. omega. Qed. (** Old-style rich specification by proof of existence *) Inductive Zdiv_rest_proofs (x:Z) (p:positive) : Set := Zdiv_rest_proof : forall q r:Z, x = q * two_power_pos p + r -> 0 <= r -> r < two_power_pos p -> Zdiv_rest_proofs x p. Lemma Zdiv_rest_correct (x:Z) (p:positive) : Zdiv_rest_proofs x p. Proof. generalize (Zdiv_rest_correct1 x p); generalize (Zdiv_rest_correct2 x p). destruct (Pos.iter p Zdiv_rest_aux (x, 0, 1)) as ((q,r),d). intros (H1,(H2,H3)) ->. now exists q r. Qed. (** Direct correctness of [Zdiv_rest] *) Lemma Zdiv_rest_ok x p : let (q,r) := Zdiv_rest x p in x = q * 2^(Zpos p) + r /\ 0 <= r < 2^(Zpos p). Proof. unfold Zdiv_rest. generalize (Zdiv_rest_correct1 x p); generalize (Zdiv_rest_correct2 x p). destruct (Pos.iter p Zdiv_rest_aux (x, 0, 1)) as ((q,r),d). intros H ->. now rewrite two_power_pos_equiv in H. Qed. (** Equivalence with [Z.shiftr] *) Lemma Zdiv_rest_shiftr x p : fst (Zdiv_rest x p) = Z.shiftr x (Zpos p). Proof. generalize (Zdiv_rest_ok x p). destruct (Zdiv_rest x p) as (q,r). intros (H,H'). simpl. rewrite Z.shiftr_div_pow2 by easy. apply Z.div_unique_pos with r; trivial. now rewrite Z.mul_comm. Qed. End power_div_with_rest. coq-8.4pl2/theories/ZArith/Zeven.v0000640000175000001440000001727112010532755016140 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* True | Zpos (xO _) => True | Zneg (xO _) => True | _ => False end. Definition Zodd (z:Z) := match z with | Zpos xH => True | Zneg xH => True | Zpos (xI _) => True | Zneg (xI _) => True | _ => False end. Lemma Zeven_equiv z : Zeven z <-> Z.Even z. Proof. rewrite <- Z.even_spec. destruct z as [|p|p]; try destruct p; simpl; intuition. Qed. Lemma Zodd_equiv z : Zodd z <-> Z.Odd z. Proof. rewrite <- Z.odd_spec. destruct z as [|p|p]; try destruct p; simpl; intuition. Qed. Theorem Zeven_ex_iff n : Zeven n <-> exists m, n = 2*m. Proof (Zeven_equiv n). Theorem Zodd_ex_iff n : Zodd n <-> exists m, n = 2*m + 1. Proof (Zodd_equiv n). (** Boolean tests of parity (now in BinInt.Z) *) Notation Zeven_bool := Z.even (compat "8.3"). Notation Zodd_bool := Z.odd (compat "8.3"). Lemma Zeven_bool_iff n : Z.even n = true <-> Zeven n. Proof. now rewrite Z.even_spec, Zeven_equiv. Qed. Lemma Zodd_bool_iff n : Z.odd n = true <-> Zodd n. Proof. now rewrite Z.odd_spec, Zodd_equiv. Qed. Ltac boolify_even_odd := rewrite <- ?Zeven_bool_iff, <- ?Zodd_bool_iff. Lemma Zodd_even_bool n : Z.odd n = negb (Z.even n). Proof. symmetry. apply Z.negb_even. Qed. Lemma Zeven_odd_bool n : Z.even n = negb (Z.odd n). Proof. symmetry. apply Z.negb_odd. Qed. Definition Zeven_odd_dec n : {Zeven n} + {Zodd n}. Proof. destruct n as [|p|p]; try destruct p; simpl; (now left) || (now right). Defined. Definition Zeven_dec n : {Zeven n} + {~ Zeven n}. Proof. destruct n as [|p|p]; try destruct p; simpl; (now left) || (now right). Defined. Definition Zodd_dec n : {Zodd n} + {~ Zodd n}. Proof. destruct n as [|p|p]; try destruct p; simpl; (now left) || (now right). Defined. Lemma Zeven_not_Zodd n : Zeven n -> ~ Zodd n. Proof. boolify_even_odd. rewrite <- Z.negb_odd. destruct Z.odd; intuition. Qed. Lemma Zodd_not_Zeven n : Zodd n -> ~ Zeven n. Proof. boolify_even_odd. rewrite <- Z.negb_odd. destruct Z.odd; intuition. Qed. Lemma Zeven_Sn n : Zodd n -> Zeven (Z.succ n). Proof. boolify_even_odd. now rewrite Z.even_succ. Qed. Lemma Zodd_Sn n : Zeven n -> Zodd (Z.succ n). Proof. boolify_even_odd. now rewrite Z.odd_succ. Qed. Lemma Zeven_pred n : Zodd n -> Zeven (Z.pred n). Proof. boolify_even_odd. now rewrite Z.even_pred. Qed. Lemma Zodd_pred n : Zeven n -> Zodd (Z.pred n). Proof. boolify_even_odd. now rewrite Z.odd_pred. Qed. Hint Unfold Zeven Zodd: zarith. Notation Zeven_bool_succ := Z.even_succ (compat "8.3"). Notation Zeven_bool_pred := Z.even_pred (compat "8.3"). Notation Zodd_bool_succ := Z.odd_succ (compat "8.3"). Notation Zodd_bool_pred := Z.odd_pred (compat "8.3"). (******************************************************************) (** * Definition of [Z.quot2], [Z.div2] and properties wrt [Zeven] and [Zodd] *) Notation Zdiv2 := Z.div2 (compat "8.3"). Notation Zquot2 := Z.quot2 (compat "8.3"). (** Properties of [Z.div2] *) Lemma Zdiv2_odd_eqn n : n = 2*(Z.div2 n) + if Z.odd n then 1 else 0. Proof (Z.div2_odd n). Lemma Zeven_div2 n : Zeven n -> n = 2 * Z.div2 n. Proof. boolify_even_odd. rewrite <- Z.negb_odd, Bool.negb_true_iff. intros Hn. rewrite (Zdiv2_odd_eqn n) at 1. now rewrite Hn, Z.add_0_r. Qed. Lemma Zodd_div2 n : Zodd n -> n = 2 * Z.div2 n + 1. Proof. boolify_even_odd. intros Hn. rewrite (Zdiv2_odd_eqn n) at 1. now rewrite Hn. Qed. (** Properties of [Z.quot2] *) (** TODO: move to Numbers someday *) Lemma Zquot2_odd_eqn n : n = 2*(Z.quot2 n) + if Z.odd n then Z.sgn n else 0. Proof. now destruct n as [ |[p|p| ]|[p|p| ]]. Qed. Lemma Zeven_quot2 n : Zeven n -> n = 2 * Z.quot2 n. Proof. intros Hn. apply Zeven_bool_iff in Hn. rewrite (Zquot2_odd_eqn n) at 1. now rewrite Zodd_even_bool, Hn, Z.add_0_r. Qed. Lemma Zodd_quot2 n : n >= 0 -> Zodd n -> n = 2 * Z.quot2 n + 1. Proof. intros Hn Hn'. apply Zodd_bool_iff in Hn'. rewrite (Zquot2_odd_eqn n) at 1. rewrite Hn'. f_equal. destruct n; (now destruct Hn) || easy. Qed. Lemma Zodd_quot2_neg n : n <= 0 -> Zodd n -> n = 2 * Z.quot2 n - 1. Proof. intros Hn Hn'. apply Zodd_bool_iff in Hn'. rewrite (Zquot2_odd_eqn n) at 1; rewrite Hn'. unfold Z.sub. f_equal. destruct n; (now destruct Hn) || easy. Qed. Lemma Zquot2_opp n : Z.quot2 (-n) = - Z.quot2 n. Proof. now destruct n as [ |[p|p| ]|[p|p| ]]. Qed. Lemma Zquot2_quot n : Z.quot2 n = n ÷ 2. Proof. assert (AUX : forall m, 0 < m -> Z.quot2 m = m ÷ 2). { intros m Hm. apply Z.quot_unique with (if Z.odd m then Z.sgn m else 0). now apply Z.lt_le_incl. rewrite Z.sgn_pos by trivial. destruct (Z.odd m); now split. apply Zquot2_odd_eqn. } destruct (Z.lt_trichotomy 0 n) as [POS|[NUL|NEG]]. - now apply AUX. - now subst. - apply Z.opp_inj. rewrite <- Z.quot_opp_l, <- Zquot2_opp. apply AUX. now destruct n. easy. Qed. (** More properties of parity *) Lemma Z_modulo_2 n : {y | n = 2 * y} + {y | n = 2 * y + 1}. Proof. destruct (Zeven_odd_dec n) as [Hn|Hn]. - left. exists (Z.div2 n). exact (Zeven_div2 n Hn). - right. exists (Z.div2 n). exact (Zodd_div2 n Hn). Qed. Lemma Zsplit2 n : {p : Z * Z | let (x1, x2) := p in n = x1 + x2 /\ (x1 = x2 \/ x2 = x1 + 1)}. Proof. destruct (Z_modulo_2 n) as [(y,Hy)|(y,Hy)]; rewrite <- Z.add_diag in Hy. - exists (y, y). split. assumption. now left. - exists (y, y + 1). split. now rewrite Z.add_assoc. now right. Qed. Theorem Zeven_ex n : Zeven n -> exists m, n = 2 * m. Proof. exists (Z.div2 n); apply Zeven_div2; auto. Qed. Theorem Zodd_ex n : Zodd n -> exists m, n = 2 * m + 1. Proof. exists (Z.div2 n); apply Zodd_div2; auto. Qed. Theorem Zeven_2p p : Zeven (2 * p). Proof. now destruct p. Qed. Theorem Zodd_2p_plus_1 p : Zodd (2 * p + 1). Proof. destruct p as [|p|p]; now try destruct p. Qed. Theorem Zeven_plus_Zodd a b : Zeven a -> Zodd b -> Zodd (a + b). Proof. boolify_even_odd. rewrite <- Z.negb_odd, Bool.negb_true_iff. intros Ha Hb. now rewrite Z.odd_add, Ha, Hb. Qed. Theorem Zeven_plus_Zeven a b : Zeven a -> Zeven b -> Zeven (a + b). Proof. boolify_even_odd. intros Ha Hb. now rewrite Z.even_add, Ha, Hb. Qed. Theorem Zodd_plus_Zeven a b : Zodd a -> Zeven b -> Zodd (a + b). Proof. intros. rewrite Z.add_comm. now apply Zeven_plus_Zodd. Qed. Theorem Zodd_plus_Zodd a b : Zodd a -> Zodd b -> Zeven (a + b). Proof. boolify_even_odd. rewrite <- 2 Z.negb_even, 2 Bool.negb_true_iff. intros Ha Hb. now rewrite Z.even_add, Ha, Hb. Qed. Theorem Zeven_mult_Zeven_l a b : Zeven a -> Zeven (a * b). Proof. boolify_even_odd. intros Ha. now rewrite Z.even_mul, Ha. Qed. Theorem Zeven_mult_Zeven_r a b : Zeven b -> Zeven (a * b). Proof. intros. rewrite Z.mul_comm. now apply Zeven_mult_Zeven_l. Qed. Theorem Zodd_mult_Zodd a b : Zodd a -> Zodd b -> Zodd (a * b). Proof. boolify_even_odd. intros Ha Hb. now rewrite Z.odd_mul, Ha, Hb. Qed. (* for compatibility *) Close Scope Z_scope. coq-8.4pl2/theories/ZArith/Zpow_alt.v0000640000175000001440000000500112010532755016634 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1 | Zpos p => Pos.iter_op Z.mul p n | Zneg p => 0 end. Infix "^^" := Zpower_alt (at level 30, right associativity) : Z_scope. Lemma Piter_mul_acc : forall f, (forall x y:Z, (f x)*y = f (x*y)) -> forall p k, Pos.iter p f k = (Pos.iter p f 1)*k. Proof. intros f Hf. induction p; simpl; intros. - set (g := Pos.iter p f 1) in *. now rewrite !IHp, Hf, Z.mul_assoc. - set (g := Pos.iter p f 1) in *. now rewrite !IHp, Z.mul_assoc. - now rewrite Hf, Z.mul_1_l. Qed. Lemma Piter_op_square : forall p a, Pos.iter_op Z.mul p (a*a) = (Pos.iter_op Z.mul p a)*(Pos.iter_op Z.mul p a). Proof. induction p; simpl; intros; trivial. now rewrite IHp, Z.mul_shuffle1. Qed. Lemma Zpower_equiv a b : a^^b = a^b. Proof. destruct b as [|p|p]; trivial. unfold Zpower_alt, Z.pow, Z.pow_pos. revert a. induction p; simpl; intros. - f_equal. rewrite Piter_mul_acc. now rewrite Piter_op_square, IHp. intros. symmetry; apply Z.mul_assoc. - rewrite Piter_mul_acc. now rewrite Piter_op_square, IHp. intros. symmetry; apply Z.mul_assoc. - now Z.nzsimpl. Qed. Lemma Zpower_alt_0_r n : n^^0 = 1. Proof. reflexivity. Qed. Lemma Zpower_alt_succ_r a b : 0<=b -> a^^(Z.succ b) = a * a^^b. Proof. destruct b as [|b|b]; intros Hb; simpl. - now Z.nzsimpl. - now rewrite Pos.add_1_r, Pos.iter_op_succ by apply Z.mul_assoc. - now elim Hb. Qed. Lemma Zpower_alt_neg_r a b : b<0 -> a^^b = 0. Proof. now destruct b. Qed. Lemma Zpower_alt_Ppow p q : (Zpos p)^^(Zpos q) = Zpos (p^q). Proof. now rewrite Zpower_equiv, Pos2Z.inj_pow. Qed. coq-8.4pl2/theories/ZArith/Zabs.v0000640000175000001440000000677112010532755015753 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop) (n:Z), (n >= 0 -> P n) -> (n <= 0 -> P (- n)) -> P (Z.abs n). Proof. intros. apply Z.abs_case_strong; Z.swap_greater; trivial. intros x y Hx; now subst. Qed. Theorem Zabs_intro : forall P (n:Z), P (- n) -> P n -> P (Z.abs n). Proof. now destruct n. Qed. Definition Zabs_dec : forall x:Z, {x = Z.abs x} + {x = - Z.abs x}. Proof. destruct x; auto. Defined. Lemma Zabs_spec x : 0 <= x /\ Z.abs x = x \/ 0 > x /\ Z.abs x = -x. Proof. Z.swap_greater. apply Z.abs_spec. Qed. (** * Some results about the sign function. *) Notation Zsgn_Zmult := Z.sgn_mul (compat "8.3"). Notation Zsgn_Zopp := Z.sgn_opp (compat "8.3"). Notation Zsgn_pos := Z.sgn_pos_iff (compat "8.3"). Notation Zsgn_neg := Z.sgn_neg_iff (compat "8.3"). Notation Zsgn_null := Z.sgn_null_iff (compat "8.3"). (** A characterization of the sign function: *) Lemma Zsgn_spec x : 0 < x /\ Z.sgn x = 1 \/ 0 = x /\ Z.sgn x = 0 \/ 0 > x /\ Z.sgn x = -1. Proof. intros. Z.swap_greater. apply Z.sgn_spec. Qed. (** Compatibility *) Notation inj_Zabs_nat := Zabs2Nat.id_abs (compat "8.3"). Notation Zabs_nat_Z_of_nat := Zabs2Nat.id (compat "8.3"). Notation Zabs_nat_mult := Zabs2Nat.inj_mul (compat "8.3"). Notation Zabs_nat_Zsucc := Zabs2Nat.inj_succ (compat "8.3"). Notation Zabs_nat_Zplus := Zabs2Nat.inj_add (compat "8.3"). Notation Zabs_nat_Zminus := (fun n m => Zabs2Nat.inj_sub m n) (compat "8.3"). Notation Zabs_nat_compare := Zabs2Nat.inj_compare (compat "8.3"). Lemma Zabs_nat_le n m : 0 <= n <= m -> (Z.abs_nat n <= Z.abs_nat m)%nat. Proof. intros (H,H'). apply Zabs2Nat.inj_le; trivial. now transitivity n. Qed. Lemma Zabs_nat_lt n m : 0 <= n < m -> (Z.abs_nat n < Z.abs_nat m)%nat. Proof. intros (H,H'). apply Zabs2Nat.inj_lt; trivial. transitivity n; trivial. now apply Z.lt_le_incl. Qed. coq-8.4pl2/theories/ZArith/ZArith.v0000640000175000001440000000151112010532755016240 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* m}. Proof. unfold ">", "<". generalize (Z.compare_eq n m). destruct (n ?= m); [ left; right | left; left | right]; auto. Defined. Theorem Ztrichotomy n m : n < m \/ n = m \/ n > m. Proof. Z.swap_greater. apply Z.lt_trichotomy. Qed. (**********************************************************************) (** * Decidability of equality and order on Z *) Notation dec_eq := Z.eq_decidable (compat "8.3"). Notation dec_Zle := Z.le_decidable (compat "8.3"). Notation dec_Zlt := Z.lt_decidable (compat "8.3"). Theorem dec_Zne n m : decidable (Zne n m). Proof. destruct (Z.eq_decidable n m); [right|left]; subst; auto. Qed. Theorem dec_Zgt n m : decidable (n > m). Proof. destruct (Z.lt_decidable m n); [left|right]; Z.swap_greater; auto. Qed. Theorem dec_Zge n m : decidable (n >= m). Proof. destruct (Z.le_decidable m n); [left|right]; Z.swap_greater; auto. Qed. Theorem not_Zeq n m : n <> m -> n < m \/ m < n. Proof. apply Z.lt_gt_cases. Qed. (** * Relating strict and large orders *) Notation Zgt_lt := Z.gt_lt (compat "8.3"). Notation Zlt_gt := Z.lt_gt (compat "8.3"). Notation Zge_le := Z.ge_le (compat "8.3"). Notation Zle_ge := Z.le_ge (compat "8.3"). Notation Zgt_iff_lt := Z.gt_lt_iff (compat "8.3"). Notation Zge_iff_le := Z.ge_le_iff (compat "8.3"). Lemma Zle_not_lt n m : n <= m -> ~ m < n. Proof. apply Z.le_ngt. Qed. Lemma Zlt_not_le n m : n < m -> ~ m <= n. Proof. apply Z.lt_nge. Qed. Lemma Zle_not_gt n m : n <= m -> ~ n > m. Proof. trivial. Qed. Lemma Zgt_not_le n m : n > m -> ~ n <= m. Proof. Z.swap_greater. apply Z.lt_nge. Qed. Lemma Znot_ge_lt n m : ~ n >= m -> n < m. Proof. Z.swap_greater. apply Z.nle_gt. Qed. Lemma Znot_lt_ge n m : ~ n < m -> n >= m. Proof. trivial. Qed. Lemma Znot_gt_le n m: ~ n > m -> n <= m. Proof. trivial. Qed. Lemma Znot_le_gt n m : ~ n <= m -> n > m. Proof. Z.swap_greater. apply Z.nle_gt. Qed. Lemma not_Zne n m : ~ Zne n m -> n = m. Proof. intros H. destruct (Z.eq_decidable n m); [assumption|now elim H]. Qed. (** * Equivalence and order properties *) (** Reflexivity *) Notation Zle_refl := Z.le_refl (compat "8.3"). Notation Zeq_le := Z.eq_le_incl (compat "8.3"). Hint Resolve Z.le_refl: zarith. (** Antisymmetry *) Notation Zle_antisym := Z.le_antisymm (compat "8.3"). (** Asymmetry *) Notation Zlt_asym := Z.lt_asymm (compat "8.3"). Lemma Zgt_asym n m : n > m -> ~ m > n. Proof. Z.swap_greater. apply Z.lt_asymm. Qed. (** Irreflexivity *) Notation Zlt_irrefl := Z.lt_irrefl (compat "8.3"). Notation Zlt_not_eq := Z.lt_neq (compat "8.3"). Lemma Zgt_irrefl n : ~ n > n. Proof. Z.swap_greater. apply Z.lt_irrefl. Qed. (** Large = strict or equal *) Notation Zlt_le_weak := Z.lt_le_incl (compat "8.3"). Notation Zle_lt_or_eq_iff := Z.lt_eq_cases (compat "8.3"). Lemma Zle_lt_or_eq n m : n <= m -> n < m \/ n = m. Proof. apply Z.lt_eq_cases. Qed. (** Dichotomy *) Notation Zle_or_lt := Z.le_gt_cases (compat "8.3"). (** Transitivity of strict orders *) Notation Zlt_trans := Z.lt_trans (compat "8.3"). Lemma Zgt_trans n m p : n > m -> m > p -> n > p. Proof. Z.swap_greater. intros; now transitivity m. Qed. (** Mixed transitivity *) Notation Zlt_le_trans := Z.lt_le_trans (compat "8.3"). Notation Zle_lt_trans := Z.le_lt_trans (compat "8.3"). Lemma Zle_gt_trans n m p : m <= n -> m > p -> n > p. Proof. Z.swap_greater. Z.order. Qed. Lemma Zgt_le_trans n m p : n > m -> p <= m -> n > p. Proof. Z.swap_greater. Z.order. Qed. (** Transitivity of large orders *) Notation Zle_trans := Z.le_trans (compat "8.3"). Lemma Zge_trans n m p : n >= m -> m >= p -> n >= p. Proof. Z.swap_greater. Z.order. Qed. Hint Resolve Z.le_trans: zarith. (** * Compatibility of order and operations on Z *) (** ** Successor *) (** Compatibility of successor wrt to order *) Lemma Zsucc_le_compat n m : m <= n -> Z.succ m <= Z.succ n. Proof. apply Z.succ_le_mono. Qed. Lemma Zsucc_lt_compat n m : n < m -> Z.succ n < Z.succ m. Proof. apply Z.succ_lt_mono. Qed. Lemma Zsucc_gt_compat n m : m > n -> Z.succ m > Z.succ n. Proof. Z.swap_greater. apply Z.succ_lt_mono. Qed. Hint Resolve Zsucc_le_compat: zarith. (** Simplification of successor wrt to order *) Lemma Zsucc_gt_reg n m : Z.succ m > Z.succ n -> m > n. Proof. Z.swap_greater. apply Z.succ_lt_mono. Qed. Lemma Zsucc_le_reg n m : Z.succ m <= Z.succ n -> m <= n. Proof. apply Z.succ_le_mono. Qed. Lemma Zsucc_lt_reg n m : Z.succ n < Z.succ m -> n < m. Proof. apply Z.succ_lt_mono. Qed. (** Special base instances of order *) Notation Zlt_succ := Z.lt_succ_diag_r (compat "8.3"). Notation Zlt_pred := Z.lt_pred_l (compat "8.3"). Lemma Zgt_succ n : Z.succ n > n. Proof. Z.swap_greater. apply Z.lt_succ_diag_r. Qed. Lemma Znot_le_succ n : ~ Z.succ n <= n. Proof. apply Z.lt_nge, Z.lt_succ_diag_r. Qed. (** Relating strict and large order using successor or predecessor *) Notation Zlt_succ_r := Z.lt_succ_r (compat "8.3"). Notation Zle_succ_l := Z.le_succ_l (compat "8.3"). Lemma Zgt_le_succ n m : m > n -> Z.succ n <= m. Proof. Z.swap_greater. apply Z.le_succ_l. Qed. Lemma Zle_gt_succ n m : n <= m -> Z.succ m > n. Proof. Z.swap_greater. apply Z.lt_succ_r. Qed. Lemma Zle_lt_succ n m : n <= m -> n < Z.succ m. Proof. apply Z.lt_succ_r. Qed. Lemma Zlt_le_succ n m : n < m -> Z.succ n <= m. Proof. apply Z.le_succ_l. Qed. Lemma Zgt_succ_le n m : Z.succ m > n -> n <= m. Proof. Z.swap_greater. apply Z.lt_succ_r. Qed. Lemma Zlt_succ_le n m : n < Z.succ m -> n <= m. Proof. apply Z.lt_succ_r. Qed. Lemma Zle_succ_gt n m : Z.succ n <= m -> m > n. Proof. Z.swap_greater. apply Z.le_succ_l. Qed. (** Weakening order *) Notation Zle_succ := Z.le_succ_diag_r (compat "8.3"). Notation Zle_pred := Z.le_pred_l (compat "8.3"). Notation Zlt_lt_succ := Z.lt_lt_succ_r (compat "8.3"). Notation Zle_le_succ := Z.le_le_succ_r (compat "8.3"). Lemma Zle_succ_le n m : Z.succ n <= m -> n <= m. Proof. intros. now apply Z.lt_le_incl, Z.le_succ_l. Qed. Hint Resolve Z.le_succ_diag_r: zarith. Hint Resolve Z.le_le_succ_r: zarith. (** Relating order wrt successor and order wrt predecessor *) Lemma Zgt_succ_pred n m : m > Z.succ n -> Z.pred m > n. Proof. Z.swap_greater. apply Z.lt_succ_lt_pred. Qed. Lemma Zlt_succ_pred n m : Z.succ n < m -> n < Z.pred m. Proof. apply Z.lt_succ_lt_pred. Qed. (** Relating strict order and large order on positive *) Lemma Zlt_0_le_0_pred n : 0 < n -> 0 <= Z.pred n. Proof. apply Z.lt_le_pred. Qed. Lemma Zgt_0_le_0_pred n : n > 0 -> 0 <= Z.pred n. Proof. Z.swap_greater. apply Z.lt_le_pred. Qed. (** Special cases of ordered integers *) Notation Zlt_0_1 := Z.lt_0_1 (compat "8.3"). Notation Zle_0_1 := Z.le_0_1 (compat "8.3"). Lemma Zle_neg_pos : forall p q:positive, Zneg p <= Zpos q. Proof. easy. Qed. Lemma Zgt_pos_0 : forall p:positive, Zpos p > 0. Proof. easy. Qed. (* weaker but useful (in [Z.pow] for instance) *) Lemma Zle_0_pos : forall p:positive, 0 <= Zpos p. Proof. easy. Qed. Lemma Zlt_neg_0 : forall p:positive, Zneg p < 0. Proof. easy. Qed. Lemma Zle_0_nat : forall n:nat, 0 <= Z.of_nat n. Proof. induction n; simpl; intros. apply Z.le_refl. easy. Qed. Hint Immediate Z.eq_le_incl: zarith. (** Derived lemma *) Lemma Zgt_succ_gt_or_eq n m : Z.succ n > m -> n > m \/ m = n. Proof. Z.swap_greater. intros. now apply Z.lt_eq_cases, Z.lt_succ_r. Qed. (** ** Addition *) (** Compatibility of addition wrt to order *) Notation Zplus_lt_le_compat := Z.add_lt_le_mono (compat "8.3"). Notation Zplus_le_lt_compat := Z.add_le_lt_mono (compat "8.3"). Notation Zplus_le_compat := Z.add_le_mono (compat "8.3"). Notation Zplus_lt_compat := Z.add_lt_mono (compat "8.3"). Lemma Zplus_gt_compat_l n m p : n > m -> p + n > p + m. Proof. Z.swap_greater. apply Z.add_lt_mono_l. Qed. Lemma Zplus_gt_compat_r n m p : n > m -> n + p > m + p. Proof. Z.swap_greater. apply Z.add_lt_mono_r. Qed. Lemma Zplus_le_compat_l n m p : n <= m -> p + n <= p + m. Proof. apply Z.add_le_mono_l. Qed. Lemma Zplus_le_compat_r n m p : n <= m -> n + p <= m + p. Proof. apply Z.add_le_mono_r. Qed. Lemma Zplus_lt_compat_l n m p : n < m -> p + n < p + m. Proof. apply Z.add_lt_mono_l. Qed. Lemma Zplus_lt_compat_r n m p : n < m -> n + p < m + p. Proof. apply Z.add_lt_mono_r. Qed. (** Compatibility of addition wrt to being positive *) Notation Zplus_le_0_compat := Z.add_nonneg_nonneg (compat "8.3"). (** Simplification of addition wrt to order *) Lemma Zplus_le_reg_l n m p : p + n <= p + m -> n <= m. Proof. apply Z.add_le_mono_l. Qed. Lemma Zplus_le_reg_r n m p : n + p <= m + p -> n <= m. Proof. apply Z.add_le_mono_r. Qed. Lemma Zplus_lt_reg_l n m p : p + n < p + m -> n < m. Proof. apply Z.add_lt_mono_l. Qed. Lemma Zplus_lt_reg_r n m p : n + p < m + p -> n < m. Proof. apply Z.add_lt_mono_r. Qed. Lemma Zplus_gt_reg_l n m p : p + n > p + m -> n > m. Proof. Z.swap_greater. apply Z.add_lt_mono_l. Qed. Lemma Zplus_gt_reg_r n m p : n + p > m + p -> n > m. Proof. Z.swap_greater. apply Z.add_lt_mono_r. Qed. (** ** Multiplication *) (** Compatibility of multiplication by a positive wrt to order *) Lemma Zmult_le_compat_r n m p : n <= m -> 0 <= p -> n * p <= m * p. Proof. intros. now apply Z.mul_le_mono_nonneg_r. Qed. Lemma Zmult_le_compat_l n m p : n <= m -> 0 <= p -> p * n <= p * m. Proof. intros. now apply Z.mul_le_mono_nonneg_l. Qed. Lemma Zmult_lt_compat_r n m p : 0 < p -> n < m -> n * p < m * p. Proof. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_gt_compat_r n m p : p > 0 -> n > m -> n * p > m * p. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_gt_0_lt_compat_r n m p : p > 0 -> n < m -> n * p < m * p. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_gt_0_le_compat_r n m p : p > 0 -> n <= m -> n * p <= m * p. Proof. Z.swap_greater. apply Z.mul_le_mono_pos_r. Qed. Lemma Zmult_lt_0_le_compat_r n m p : 0 < p -> n <= m -> n * p <= m * p. Proof. apply Z.mul_le_mono_pos_r. Qed. Lemma Zmult_gt_0_lt_compat_l n m p : p > 0 -> n < m -> p * n < p * m. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_l. Qed. Lemma Zmult_lt_compat_l n m p : 0 < p -> n < m -> p * n < p * m. Proof. apply Z.mul_lt_mono_pos_l. Qed. Lemma Zmult_gt_compat_l n m p : p > 0 -> n > m -> p * n > p * m. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_l. Qed. Lemma Zmult_ge_compat_r n m p : n >= m -> p >= 0 -> n * p >= m * p. Proof. Z.swap_greater. intros. now apply Z.mul_le_mono_nonneg_r. Qed. Lemma Zmult_ge_compat_l n m p : n >= m -> p >= 0 -> p * n >= p * m. Proof. Z.swap_greater. intros. now apply Z.mul_le_mono_nonneg_l. Qed. Lemma Zmult_ge_compat n m p q : n >= p -> m >= q -> p >= 0 -> q >= 0 -> n * m >= p * q. Proof. Z.swap_greater. intros. now apply Z.mul_le_mono_nonneg. Qed. Lemma Zmult_le_compat n m p q : n <= p -> m <= q -> 0 <= n -> 0 <= m -> n * m <= p * q. Proof. intros. now apply Z.mul_le_mono_nonneg. Qed. (** Simplification of multiplication by a positive wrt to being positive *) Lemma Zmult_gt_0_lt_reg_r n m p : p > 0 -> n * p < m * p -> n < m. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_lt_reg_r n m p : 0 < p -> n * p < m * p -> n < m. Proof. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_le_reg_r n m p : p > 0 -> n * p <= m * p -> n <= m. Proof. Z.swap_greater. apply Z.mul_le_mono_pos_r. Qed. Lemma Zmult_lt_0_le_reg_r n m p : 0 < p -> n * p <= m * p -> n <= m. Proof. apply Z.mul_le_mono_pos_r. Qed. Lemma Zmult_ge_reg_r n m p : p > 0 -> n * p >= m * p -> n >= m. Proof. Z.swap_greater. apply Z.mul_le_mono_pos_r. Qed. Lemma Zmult_gt_reg_r n m p : p > 0 -> n * p > m * p -> n > m. Proof. Z.swap_greater. apply Z.mul_lt_mono_pos_r. Qed. Lemma Zmult_lt_compat n m p q : 0 <= n < p -> 0 <= m < q -> n * m < p * q. Proof. intros (Hn,Hnp) (Hm,Hmq). now apply Z.mul_lt_mono_nonneg. Qed. Lemma Zmult_lt_compat2 n m p q : 0 < n <= p -> 0 < m < q -> n * m < p * q. Proof. intros (Hn, Hnp) (Hm,Hmq). apply Z.le_lt_trans with (p * m). apply Z.mul_le_mono_pos_r; trivial. apply Z.mul_lt_mono_pos_l; Z.order. Qed. (** Compatibility of multiplication by a positive wrt to being positive *) Notation Zmult_le_0_compat := Z.mul_nonneg_nonneg (compat "8.3"). Notation Zmult_lt_0_compat := Z.mul_pos_pos (compat "8.3"). Notation Zmult_lt_O_compat := Z.mul_pos_pos (compat "8.3"). Lemma Zmult_gt_0_compat n m : n > 0 -> m > 0 -> n * m > 0. Proof. Z.swap_greater. apply Z.mul_pos_pos. Qed. (* To remove someday ... *) Lemma Zmult_gt_0_le_0_compat n m : n > 0 -> 0 <= m -> 0 <= m * n. Proof. Z.swap_greater. intros. apply Z.mul_nonneg_nonneg. trivial. now apply Z.lt_le_incl. Qed. (** Simplification of multiplication by a positive wrt to being positive *) Lemma Zmult_le_0_reg_r n m : n > 0 -> 0 <= m * n -> 0 <= m. Proof. Z.swap_greater. apply Z.mul_nonneg_cancel_r. Qed. Lemma Zmult_lt_0_reg_r n m : 0 < n -> 0 < m * n -> 0 < m. Proof. apply Z.mul_pos_cancel_r. Qed. Lemma Zmult_gt_0_lt_0_reg_r n m : n > 0 -> 0 < m * n -> 0 < m. Proof. Z.swap_greater. apply Z.mul_pos_cancel_r. Qed. Lemma Zmult_gt_0_reg_l n m : n > 0 -> n * m > 0 -> m > 0. Proof. Z.swap_greater. apply Z.mul_pos_cancel_l. Qed. (** ** Square *) (** Simplification of square wrt order *) Lemma Zlt_square_simpl n m : 0 <= n -> m * m < n * n -> m < n. Proof. apply Z.square_lt_simpl_nonneg. Qed. Lemma Zgt_square_simpl n m : n >= 0 -> n * n > m * m -> n > m. Proof. Z.swap_greater. apply Z.square_lt_simpl_nonneg. Qed. (** * Equivalence between inequalities *) Notation Zle_plus_swap := Z.le_add_le_sub_r (compat "8.3"). Notation Zlt_plus_swap := Z.lt_add_lt_sub_r (compat "8.3"). Notation Zlt_minus_simpl_swap := Z.lt_sub_pos (compat "8.3"). Lemma Zeq_plus_swap n m p : n + p = m <-> n = m - p. Proof. apply Z.add_move_r. Qed. Lemma Zlt_0_minus_lt n m : 0 < n - m -> m < n. Proof. apply Z.lt_0_sub. Qed. Lemma Zle_0_minus_le n m : 0 <= n - m -> m <= n. Proof. apply Z.le_0_sub. Qed. Lemma Zle_minus_le_0 n m : m <= n -> 0 <= n - m. Proof. apply Z.le_0_sub. Qed. (** For compatibility *) Notation Zlt_O_minus_lt := Zlt_0_minus_lt (only parsing). coq-8.4pl2/theories/ZArith/Zwf.v0000640000175000001440000000473612010532755015621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Acc (Zwf c) a). clear a; simple induction n; intros. (** n= 0 *) case H; intros. case (lt_n_O (f a)); auto. apply Acc_intro; unfold Zwf; intros. assert False; omega || contradiction. (** inductive case *) case H0; clear H0; intro; auto. apply Acc_intro; intros. apply H. unfold Zwf in H1. case (Z.le_gt_cases c y); intro; auto with zarith. left. red in H0. apply lt_le_trans with (f a); auto with arith. unfold f. apply Zabs2Nat.inj_lt; omega. apply (H (S (f a))); auto. Qed. End wf_proof. Hint Resolve Zwf_well_founded: datatypes v62. (** We also define the other family of relations: [x (Zwf_up c) y] iff [y < x <= c] *) Definition Zwf_up (c x y:Z) := y < x <= c. (** and we prove that [(Zwf_up c)] is well founded *) Section wf_proof_up. Variable c : Z. (** The proof of well-foundness is classic: we do the proof by induction on a measure in nat, which is here [|c-x|] *) Let f (z:Z) := Z.abs_nat (c - z). Lemma Zwf_up_well_founded : well_founded (Zwf_up c). Proof. apply well_founded_lt_compat with (f := f). unfold Zwf_up, f. intros. apply Zabs2Nat.inj_lt; try (apply Z.le_0_sub; intuition). now apply Z.sub_lt_mono_l. Qed. End wf_proof_up. Hint Resolve Zwf_up_well_founded: datatypes v62. coq-8.4pl2/theories/Init/0000750000175000001440000000000012127276546014367 5ustar notinuserscoq-8.4pl2/theories/Init/Wf.v0000640000175000001440000001017012010532755015116 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. (** The accessibility predicate is defined to be non-informative *) (** (Acc_rect is automatically defined because Acc is a singleton type) *) Inductive Acc (x: A) : Prop := Acc_intro : (forall y:A, R y x -> Acc y) -> Acc x. Lemma Acc_inv : forall x:A, Acc x -> forall y:A, R y x -> Acc y. destruct 1; trivial. Defined. Global Implicit Arguments Acc_inv [x y] [x]. (** A relation is well-founded if every element is accessible *) Definition well_founded := forall a:A, Acc a. (** Well-founded induction on [Set] and [Prop] *) Hypothesis Rwf : well_founded. Theorem well_founded_induction_type : forall P:A -> Type, (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. Proof. intros; apply Acc_rect; auto. Defined. Theorem well_founded_induction : forall P:A -> Set, (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. Proof. exact (fun P:A -> Set => well_founded_induction_type P). Defined. Theorem well_founded_ind : forall P:A -> Prop, (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. Proof. exact (fun P:A -> Prop => well_founded_induction_type P). Defined. (** Well-founded fixpoints *) Section FixPoint. Variable P : A -> Type. Variable F : forall x:A, (forall y:A, R y x -> P y) -> P x. Fixpoint Fix_F (x:A) (a:Acc x) : P x := F (fun (y:A) (h:R y x) => Fix_F (Acc_inv a h)). Scheme Acc_inv_dep := Induction for Acc Sort Prop. Lemma Fix_F_eq : forall (x:A) (r:Acc x), F (fun (y:A) (p:R y x) => Fix_F (x:=y) (Acc_inv r p)) = Fix_F (x:=x) r. Proof. destruct r using Acc_inv_dep; auto. Qed. Definition Fix (x:A) := Fix_F (Rwf x). (** Proof that [well_founded_induction] satisfies the fixpoint equation. It requires an extra property of the functional *) Hypothesis F_ext : forall (x:A) (f g:forall y:A, R y x -> P y), (forall (y:A) (p:R y x), f y p = g y p) -> F f = F g. Lemma Fix_F_inv : forall (x:A) (r s:Acc x), Fix_F r = Fix_F s. Proof. intro x; induction (Rwf x); intros. rewrite <- (Fix_F_eq r); rewrite <- (Fix_F_eq s); intros. apply F_ext; auto. Qed. Lemma Fix_eq : forall x:A, Fix x = F (fun (y:A) (p:R y x) => Fix y). Proof. intro x; unfold Fix. rewrite <- Fix_F_eq. apply F_ext; intros. apply Fix_F_inv. Qed. End FixPoint. End Well_founded. (** Well-founded fixpoints over pairs *) Section Well_founded_2. Variables A B : Type. Variable R : A * B -> A * B -> Prop. Variable P : A -> B -> Type. Section FixPoint_2. Variable F : forall (x:A) (x':B), (forall (y:A) (y':B), R (y, y') (x, x') -> P y y') -> P x x'. Fixpoint Fix_F_2 (x:A) (x':B) (a:Acc R (x, x')) : P x x' := F (fun (y:A) (y':B) (h:R (y, y') (x, x')) => Fix_F_2 (x:=y) (x':=y') (Acc_inv a (y,y') h)). End FixPoint_2. Hypothesis Rwf : well_founded R. Theorem well_founded_induction_type_2 : (forall (x:A) (x':B), (forall (y:A) (y':B), R (y, y') (x, x') -> P y y') -> P x x') -> forall (a:A) (b:B), P a b. Proof. intros; apply Fix_F_2; auto. Defined. End Well_founded_2. Notation Acc_iter := Fix_F (only parsing). (* compatibility *) Notation Acc_iter_2 := Fix_F_2 (only parsing). (* compatibility *) coq-8.4pl2/theories/Init/Logic.v0000640000175000001440000003210212010532755015576 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* False. Notation "~ x" := (not x) : type_scope. Hint Unfold not: core. (** [and A B], written [A /\ B], is the conjunction of [A] and [B] [conj p q] is a proof of [A /\ B] as soon as [p] is a proof of [A] and [q] a proof of [B] [proj1] and [proj2] are first and second projections of a conjunction *) Inductive and (A B:Prop) : Prop := conj : A -> B -> A /\ B where "A /\ B" := (and A B) : type_scope. Section Conjunction. Variables A B : Prop. Theorem proj1 : A /\ B -> A. Proof. destruct 1; trivial. Qed. Theorem proj2 : A /\ B -> B. Proof. destruct 1; trivial. Qed. End Conjunction. (** [or A B], written [A \/ B], is the disjunction of [A] and [B] *) Inductive or (A B:Prop) : Prop := | or_introl : A -> A \/ B | or_intror : B -> A \/ B where "A \/ B" := (or A B) : type_scope. Arguments or_introl [A B] _, [A] B _. Arguments or_intror [A B] _, A [B] _. (** [iff A B], written [A <-> B], expresses the equivalence of [A] and [B] *) Definition iff (A B:Prop) := (A -> B) /\ (B -> A). Notation "A <-> B" := (iff A B) : type_scope. Section Equivalence. Theorem iff_refl : forall A:Prop, A <-> A. Proof. split; auto. Qed. Theorem iff_trans : forall A B C:Prop, (A <-> B) -> (B <-> C) -> (A <-> C). Proof. intros A B C [H1 H2] [H3 H4]; split; auto. Qed. Theorem iff_sym : forall A B:Prop, (A <-> B) -> (B <-> A). Proof. intros A B [H1 H2]; split; auto. Qed. End Equivalence. Hint Unfold iff: extcore. (** Some equivalences *) Theorem neg_false : forall A : Prop, ~ A <-> (A <-> False). Proof. intro A; unfold not; split. - intro H; split; [exact H | intro H1; elim H1]. - intros [H _]; exact H. Qed. Theorem and_cancel_l : forall A B C : Prop, (B -> A) -> (C -> A) -> ((A /\ B <-> A /\ C) <-> (B <-> C)). Proof. intros; tauto. Qed. Theorem and_cancel_r : forall A B C : Prop, (B -> A) -> (C -> A) -> ((B /\ A <-> C /\ A) <-> (B <-> C)). Proof. intros; tauto. Qed. Theorem and_comm : forall A B : Prop, A /\ B <-> B /\ A. Proof. intros; tauto. Qed. Theorem and_assoc : forall A B C : Prop, (A /\ B) /\ C <-> A /\ B /\ C. Proof. intros; tauto. Qed. Theorem or_cancel_l : forall A B C : Prop, (B -> ~ A) -> (C -> ~ A) -> ((A \/ B <-> A \/ C) <-> (B <-> C)). Proof. intros; tauto. Qed. Theorem or_cancel_r : forall A B C : Prop, (B -> ~ A) -> (C -> ~ A) -> ((B \/ A <-> C \/ A) <-> (B <-> C)). Proof. intros; tauto. Qed. Theorem or_comm : forall A B : Prop, (A \/ B) <-> (B \/ A). Proof. intros; tauto. Qed. Theorem or_assoc : forall A B C : Prop, (A \/ B) \/ C <-> A \/ B \/ C. Proof. intros; tauto. Qed. (** Backward direction of the equivalences above does not need assumptions *) Theorem and_iff_compat_l : forall A B C : Prop, (B <-> C) -> (A /\ B <-> A /\ C). Proof. intros; tauto. Qed. Theorem and_iff_compat_r : forall A B C : Prop, (B <-> C) -> (B /\ A <-> C /\ A). Proof. intros; tauto. Qed. Theorem or_iff_compat_l : forall A B C : Prop, (B <-> C) -> (A \/ B <-> A \/ C). Proof. intros; tauto. Qed. Theorem or_iff_compat_r : forall A B C : Prop, (B <-> C) -> (B \/ A <-> C \/ A). Proof. intros; tauto. Qed. Lemma iff_and : forall A B : Prop, (A <-> B) -> (A -> B) /\ (B -> A). Proof. intros A B []; split; trivial. Qed. Lemma iff_to_and : forall A B : Prop, (A <-> B) <-> (A -> B) /\ (B -> A). Proof. intros; tauto. Qed. (** [(IF_then_else P Q R)], written [IF P then Q else R] denotes either [P] and [Q], or [~P] and [Q] *) Definition IF_then_else (P Q R:Prop) := P /\ Q \/ ~ P /\ R. Notation "'IF' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3) (at level 200, right associativity) : type_scope. (** * First-order quantifiers *) (** [ex P], or simply [exists x, P x], or also [exists x:A, P x], expresses the existence of an [x] of some type [A] in [Set] which satisfies the predicate [P]. This is existential quantification. [ex2 P Q], or simply [exists2 x, P x & Q x], or also [exists2 x:A, P x & Q x], expresses the existence of an [x] of type [A] which satisfies both predicates [P] and [Q]. Universal quantification is primitively written [forall x:A, Q]. By symmetry with existential quantification, the construction [all P] is provided too. *) (** Remark: [exists x, Q] denotes [ex (fun x => Q)] so that [exists x, P x] is in fact equivalent to [ex (fun x => P x)] which may be not convertible to [ex P] if [P] is not itself an abstraction *) Inductive ex (A:Type) (P:A -> Prop) : Prop := ex_intro : forall x:A, P x -> ex (A:=A) P. Inductive ex2 (A:Type) (P Q:A -> Prop) : Prop := ex_intro2 : forall x:A, P x -> Q x -> ex2 (A:=A) P Q. Definition all (A:Type) (P:A -> Prop) := forall x:A, P x. (* Rule order is important to give printing priority to fully typed exists *) Notation "'exists' x .. y , p" := (ex (fun x => .. (ex (fun y => p)) ..)) (at level 200, x binder, right associativity, format "'[' 'exists' '/ ' x .. y , '/ ' p ']'") : type_scope. Notation "'exists2' x , p & q" := (ex2 (fun x => p) (fun x => q)) (at level 200, x ident, p at level 200, right associativity) : type_scope. Notation "'exists2' x : t , p & q" := (ex2 (fun x:t => p) (fun x:t => q)) (at level 200, x ident, t at level 200, p at level 200, right associativity, format "'[' 'exists2' '/ ' x : t , '/ ' '[' p & '/' q ']' ']'") : type_scope. (** Derived rules for universal quantification *) Section universal_quantification. Variable A : Type. Variable P : A -> Prop. Theorem inst : forall x:A, all (fun x => P x) -> P x. Proof. unfold all; auto. Qed. Theorem gen : forall (B:Prop) (f:forall y:A, B -> P y), B -> all P. Proof. red; auto. Qed. End universal_quantification. (** * Equality *) (** [eq x y], or simply [x=y] expresses the equality of [x] and [y]. Both [x] and [y] must belong to the same type [A]. The definition is inductive and states the reflexivity of the equality. The others properties (symmetry, transitivity, replacement of equals by equals) are proved below. The type of [x] and [y] can be made explicit using the notation [x = y :> A]. This is Leibniz equality as it expresses that [x] and [y] are equal iff every property on [A] which is true of [x] is also true of [y] *) Inductive eq (A:Type) (x:A) : A -> Prop := eq_refl : x = x :>A where "x = y :> A" := (@eq A x y) : type_scope. Notation "x = y" := (x = y :>_) : type_scope. Notation "x <> y :> T" := (~ x = y :>T) : type_scope. Notation "x <> y" := (x <> y :>_) : type_scope. Arguments eq {A} x _. Arguments eq_refl {A x} , [A] x. Arguments eq_ind [A] x P _ y _. Arguments eq_rec [A] x P _ y _. Arguments eq_rect [A] x P _ y _. Hint Resolve I conj or_introl or_intror eq_refl: core. Hint Resolve ex_intro ex_intro2: core. Section Logic_lemmas. Theorem absurd : forall A C:Prop, A -> ~ A -> C. Proof. unfold not; intros A C h1 h2. destruct (h2 h1). Qed. Section equality. Variables A B : Type. Variable f : A -> B. Variables x y z : A. Theorem eq_sym : x = y -> y = x. Proof. destruct 1; trivial. Defined. Opaque eq_sym. Theorem eq_trans : x = y -> y = z -> x = z. Proof. destruct 2; trivial. Defined. Opaque eq_trans. Theorem f_equal : x = y -> f x = f y. Proof. destruct 1; trivial. Defined. Opaque f_equal. Theorem not_eq_sym : x <> y -> y <> x. Proof. red; intros h1 h2; apply h1; destruct h2; trivial. Qed. End equality. Definition eq_ind_r : forall (A:Type) (x:A) (P:A -> Prop), P x -> forall y:A, y = x -> P y. intros A x P H y H0; elim eq_sym with (1 := H0); assumption. Defined. Definition eq_rec_r : forall (A:Type) (x:A) (P:A -> Set), P x -> forall y:A, y = x -> P y. intros A x P H y H0; elim eq_sym with (1 := H0); assumption. Defined. Definition eq_rect_r : forall (A:Type) (x:A) (P:A -> Type), P x -> forall y:A, y = x -> P y. intros A x P H y H0; elim eq_sym with (1 := H0); assumption. Defined. End Logic_lemmas. Module EqNotations. Notation "'rew' H 'in' H'" := (eq_rect _ _ H' _ H) (at level 10, H' at level 10). Notation "'rew' <- H 'in' H'" := (eq_rect_r _ H' H) (at level 10, H' at level 10). Notation "'rew' -> H 'in' H'" := (eq_rect _ _ H' _ H) (at level 10, H' at level 10, only parsing). End EqNotations. Theorem f_equal2 : forall (A1 A2 B:Type) (f:A1 -> A2 -> B) (x1 y1:A1) (x2 y2:A2), x1 = y1 -> x2 = y2 -> f x1 x2 = f y1 y2. Proof. destruct 1; destruct 1; reflexivity. Qed. Theorem f_equal3 : forall (A1 A2 A3 B:Type) (f:A1 -> A2 -> A3 -> B) (x1 y1:A1) (x2 y2:A2) (x3 y3:A3), x1 = y1 -> x2 = y2 -> x3 = y3 -> f x1 x2 x3 = f y1 y2 y3. Proof. destruct 1; destruct 1; destruct 1; reflexivity. Qed. Theorem f_equal4 : forall (A1 A2 A3 A4 B:Type) (f:A1 -> A2 -> A3 -> A4 -> B) (x1 y1:A1) (x2 y2:A2) (x3 y3:A3) (x4 y4:A4), x1 = y1 -> x2 = y2 -> x3 = y3 -> x4 = y4 -> f x1 x2 x3 x4 = f y1 y2 y3 y4. Proof. destruct 1; destruct 1; destruct 1; destruct 1; reflexivity. Qed. Theorem f_equal5 : forall (A1 A2 A3 A4 A5 B:Type) (f:A1 -> A2 -> A3 -> A4 -> A5 -> B) (x1 y1:A1) (x2 y2:A2) (x3 y3:A3) (x4 y4:A4) (x5 y5:A5), x1 = y1 -> x2 = y2 -> x3 = y3 -> x4 = y4 -> x5 = y5 -> f x1 x2 x3 x4 x5 = f y1 y2 y3 y4 y5. Proof. destruct 1; destruct 1; destruct 1; destruct 1; destruct 1; reflexivity. Qed. (* Aliases *) Notation sym_eq := eq_sym (compat "8.3"). Notation trans_eq := eq_trans (compat "8.3"). Notation sym_not_eq := not_eq_sym (compat "8.3"). Notation refl_equal := eq_refl (compat "8.3"). Notation sym_equal := eq_sym (compat "8.3"). Notation trans_equal := eq_trans (compat "8.3"). Notation sym_not_equal := not_eq_sym (compat "8.3"). Hint Immediate eq_sym not_eq_sym: core. (** Basic definitions about relations and properties *) Definition subrelation (A B : Type) (R R' : A->B->Prop) := forall x y, R x y -> R' x y. Definition unique (A : Type) (P : A->Prop) (x:A) := P x /\ forall (x':A), P x' -> x=x'. Definition uniqueness (A:Type) (P:A->Prop) := forall x y, P x -> P y -> x = y. (** Unique existence *) Notation "'exists' ! x .. y , p" := (ex (unique (fun x => .. (ex (unique (fun y => p))) ..))) (at level 200, x binder, right associativity, format "'[' 'exists' ! '/ ' x .. y , '/ ' p ']'") : type_scope. Lemma unique_existence : forall (A:Type) (P:A->Prop), ((exists x, P x) /\ uniqueness P) <-> (exists! x, P x). Proof. intros A P; split. - intros ((x,Hx),Huni); exists x; red; auto. - intros (x,(Hx,Huni)); split. + exists x; assumption. + intros x' x'' Hx' Hx''; transitivity x. symmetry; auto. auto. Qed. Lemma forall_exists_unique_domain_coincide : forall A (P:A->Prop), (exists! x, P x) -> forall Q:A->Prop, (forall x, P x -> Q x) <-> (exists x, P x /\ Q x). Proof. intros A P (x & Hp & Huniq); split. - intro; exists x; auto. - intros (x0 & HPx0 & HQx0) x1 HPx1. replace x1 with x0 by (transitivity x; [symmetry|]; auto). assumption. Qed. Lemma forall_exists_coincide_unique_domain : forall A (P:A->Prop), (forall Q:A->Prop, (forall x, P x -> Q x) <-> (exists x, P x /\ Q x)) -> (exists! x, P x). Proof. intros A P H. destruct H with (Q:=P) as ((x & Hx & _),_); [trivial|]. exists x. split; [trivial|]. destruct H with (Q:=fun x'=>x=x') as (_,Huniq). apply Huniq. exists x; auto. Qed. (** * Being inhabited *) (** The predicate [inhabited] can be used in different contexts. If [A] is thought as a type, [inhabited A] states that [A] is inhabited. If [A] is thought as a computationally relevant proposition, then [inhabited A] weakens [A] so as to hide its computational meaning. The so-weakened proof remains computationally relevant but only in a propositional context. *) Inductive inhabited (A:Type) : Prop := inhabits : A -> inhabited A. Hint Resolve inhabits: core. Lemma exists_inhabited : forall (A:Type) (P:A->Prop), (exists x, P x) -> inhabited A. Proof. destruct 1; auto. Qed. (** Declaration of stepl and stepr for eq and iff *) Lemma eq_stepl : forall (A : Type) (x y z : A), x = y -> x = z -> z = y. Proof. intros A x y z H1 H2. rewrite <- H2; exact H1. Qed. Declare Left Step eq_stepl. Declare Right Step eq_trans. Lemma iff_stepl : forall A B C : Prop, (A <-> B) -> (A <-> C) -> (C <-> B). Proof. intros; tauto. Qed. Declare Left Step iff_stepl. Declare Right Step iff_trans. coq-8.4pl2/theories/Init/Datatypes.v0000640000175000001440000002376512010532755016516 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false | true, false => true | false, true => true | false, false => false end. Definition negb (b:bool) := if b then false else true. Infix "||" := orb : bool_scope. Infix "&&" := andb : bool_scope. (** Basic properties of [andb] *) Lemma andb_prop : forall a b:bool, andb a b = true -> a = true /\ b = true. Proof. destruct a; destruct b; intros; split; try (reflexivity || discriminate). Qed. Hint Resolve andb_prop: bool. Lemma andb_true_intro : forall b1 b2:bool, b1 = true /\ b2 = true -> andb b1 b2 = true. Proof. destruct b1; destruct b2; simpl; intros [? ?]; assumption. Qed. Hint Resolve andb_true_intro: bool. (** Interpretation of booleans as propositions *) Inductive eq_true : bool -> Prop := is_eq_true : eq_true true. Hint Constructors eq_true : eq_true. (** Another way of interpreting booleans as propositions *) Definition is_true b := b = true. (** [is_true] can be activated as a coercion by (Local) Coercion is_true : bool >-> Prop. *) (** Additional rewriting lemmas about [eq_true] *) Lemma eq_true_ind_r : forall (P : bool -> Prop) (b : bool), P b -> eq_true b -> P true. Proof. intros P b H H0; destruct H0 in H; assumption. Defined. Lemma eq_true_rec_r : forall (P : bool -> Set) (b : bool), P b -> eq_true b -> P true. Proof. intros P b H H0; destruct H0 in H; assumption. Defined. Lemma eq_true_rect_r : forall (P : bool -> Type) (b : bool), P b -> eq_true b -> P true. Proof. intros P b H H0; destruct H0 in H; assumption. Defined. (** The [BoolSpec] inductive will be used to relate a [boolean] value and two propositions corresponding respectively to the [true] case and the [false] case. Interest: [BoolSpec] behave nicely with [case] and [destruct]. See also [Bool.reflect] when [Q = ~P]. *) Inductive BoolSpec (P Q : Prop) : bool -> Prop := | BoolSpecT : P -> BoolSpec P Q true | BoolSpecF : Q -> BoolSpec P Q false. Hint Constructors BoolSpec. (********************************************************************) (** * Peano natural numbers *) (** [nat] is the datatype of natural numbers built from [O] and successor [S]; note that the constructor name is the letter O. Numbers in [nat] can be denoted using a decimal notation; e.g. [3%nat] abbreviates [S (S (S O))] *) Inductive nat : Set := | O : nat | S : nat -> nat. Delimit Scope nat_scope with nat. Bind Scope nat_scope with nat. Arguments S _%nat. (********************************************************************) (** * Container datatypes *) (** [option A] is the extension of [A] with an extra element [None] *) Inductive option (A:Type) : Type := | Some : A -> option A | None : option A. Arguments None [A]. Definition option_map (A B:Type) (f:A->B) o := match o with | Some a => Some (f a) | None => None end. (** [sum A B], written [A + B], is the disjoint sum of [A] and [B] *) Inductive sum (A B:Type) : Type := | inl : A -> sum A B | inr : B -> sum A B. Notation "x + y" := (sum x y) : type_scope. Arguments inl {A B} _ , [A] B _. Arguments inr {A B} _ , A [B] _. (** [prod A B], written [A * B], is the product of [A] and [B]; the pair [pair A B a b] of [a] and [b] is abbreviated [(a,b)] *) Inductive prod (A B:Type) : Type := pair : A -> B -> prod A B. Add Printing Let prod. Notation "x * y" := (prod x y) : type_scope. Notation "( x , y , .. , z )" := (pair .. (pair x y) .. z) : core_scope. Arguments pair {A B} _ _. Section projections. Variables A B : Type. Definition fst (p:A * B) := match p with | (x, y) => x end. Definition snd (p:A * B) := match p with | (x, y) => y end. End projections. Hint Resolve pair inl inr: core. Lemma surjective_pairing : forall (A B:Type) (p:A * B), p = pair (fst p) (snd p). Proof. destruct p; reflexivity. Qed. Lemma injective_projections : forall (A B:Type) (p1 p2:A * B), fst p1 = fst p2 -> snd p1 = snd p2 -> p1 = p2. Proof. destruct p1; destruct p2; simpl; intros Hfst Hsnd. rewrite Hfst; rewrite Hsnd; reflexivity. Qed. Definition prod_uncurry (A B C:Type) (f:prod A B -> C) (x:A) (y:B) : C := f (pair x y). Definition prod_curry (A B C:Type) (f:A -> B -> C) (p:prod A B) : C := match p with | pair x y => f x y end. (** Polymorphic lists and some operations *) Inductive list (A : Type) : Type := | nil : list A | cons : A -> list A -> list A. Arguments nil [A]. Infix "::" := cons (at level 60, right associativity) : list_scope. Delimit Scope list_scope with list. Bind Scope list_scope with list. Local Open Scope list_scope. Definition length (A : Type) : list A -> nat := fix length l := match l with | nil => O | _ :: l' => S (length l') end. (** Concatenation of two lists *) Definition app (A : Type) : list A -> list A -> list A := fix app l m := match l with | nil => m | a :: l1 => a :: app l1 m end. Infix "++" := app (right associativity, at level 60) : list_scope. (********************************************************************) (** * The comparison datatype *) Inductive comparison : Set := | Eq : comparison | Lt : comparison | Gt : comparison. Definition CompOpp (r:comparison) := match r with | Eq => Eq | Lt => Gt | Gt => Lt end. Lemma CompOpp_involutive : forall c, CompOpp (CompOpp c) = c. Proof. destruct c; reflexivity. Qed. Lemma CompOpp_inj : forall c c', CompOpp c = CompOpp c' -> c = c'. Proof. destruct c; destruct c'; auto; discriminate. Qed. Lemma CompOpp_iff : forall c c', CompOpp c = c' <-> c = CompOpp c'. Proof. split; intros; apply CompOpp_inj; rewrite CompOpp_involutive; auto. Qed. (** The [CompareSpec] inductive relates a [comparison] value with three propositions, one for each possible case. Typically, it can be used to specify a comparison function via some equality and order predicates. Interest: [CompareSpec] behave nicely with [case] and [destruct]. *) Inductive CompareSpec (Peq Plt Pgt : Prop) : comparison -> Prop := | CompEq : Peq -> CompareSpec Peq Plt Pgt Eq | CompLt : Plt -> CompareSpec Peq Plt Pgt Lt | CompGt : Pgt -> CompareSpec Peq Plt Pgt Gt. Hint Constructors CompareSpec. (** For having clean interfaces after extraction, [CompareSpec] is declared in Prop. For some situations, it is nonetheless useful to have a version in Type. Interestingly, these two versions are equivalent. *) Inductive CompareSpecT (Peq Plt Pgt : Prop) : comparison -> Type := | CompEqT : Peq -> CompareSpecT Peq Plt Pgt Eq | CompLtT : Plt -> CompareSpecT Peq Plt Pgt Lt | CompGtT : Pgt -> CompareSpecT Peq Plt Pgt Gt. Hint Constructors CompareSpecT. Lemma CompareSpec2Type : forall Peq Plt Pgt c, CompareSpec Peq Plt Pgt c -> CompareSpecT Peq Plt Pgt c. Proof. destruct c; intros H; constructor; inversion_clear H; auto. Defined. (** As an alternate formulation, one may also directly refer to predicates [eq] and [lt] for specifying a comparison, rather that fully-applied propositions. This [CompSpec] is now a particular case of [CompareSpec]. *) Definition CompSpec {A} (eq lt : A->A->Prop)(x y:A) : comparison -> Prop := CompareSpec (eq x y) (lt x y) (lt y x). Definition CompSpecT {A} (eq lt : A->A->Prop)(x y:A) : comparison -> Type := CompareSpecT (eq x y) (lt x y) (lt y x). Hint Unfold CompSpec CompSpecT. Lemma CompSpec2Type : forall A (eq lt:A->A->Prop) x y c, CompSpec eq lt x y c -> CompSpecT eq lt x y c. Proof. intros. apply CompareSpec2Type; assumption. Defined. (******************************************************************) (** * Misc Other Datatypes *) (** [identity A a] is the family of datatypes on [A] whose sole non-empty member is the singleton datatype [identity A a a] whose sole inhabitant is denoted [refl_identity A a] *) Inductive identity (A:Type) (a:A) : A -> Type := identity_refl : identity a a. Hint Resolve identity_refl: core. Arguments identity_ind [A] a P f y i. Arguments identity_rec [A] a P f y i. Arguments identity_rect [A] a P f y i. (** Identity type *) Definition ID := forall A:Type, A -> A. Definition id : ID := fun A x => x. (* begin hide *) (* Compatibility *) Notation prodT := prod (compat "8.2"). Notation pairT := pair (compat "8.2"). Notation prodT_rect := prod_rect (compat "8.2"). Notation prodT_rec := prod_rec (compat "8.2"). Notation prodT_ind := prod_ind (compat "8.2"). Notation fstT := fst (compat "8.2"). Notation sndT := snd (compat "8.2"). Notation prodT_uncurry := prod_uncurry (compat "8.2"). Notation prodT_curry := prod_curry (compat "8.2"). (* end hide *) coq-8.4pl2/theories/Init/Logic_Type.v0000640000175000001440000000427112010532755016605 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* False. (** Properties of [identity] *) Section identity_is_a_congruence. Variables A B : Type. Variable f : A -> B. Variables x y z : A. Lemma identity_sym : identity x y -> identity y x. Proof. destruct 1; trivial. Defined. Lemma identity_trans : identity x y -> identity y z -> identity x z. Proof. destruct 2; trivial. Defined. Lemma identity_congr : identity x y -> identity (f x) (f y). Proof. destruct 1; trivial. Defined. Lemma not_identity_sym : notT (identity x y) -> notT (identity y x). Proof. red; intros H H'; apply H; destruct H'; trivial. Qed. End identity_is_a_congruence. Definition identity_ind_r : forall (A:Type) (a:A) (P:A -> Prop), P a -> forall y:A, identity y a -> P y. intros A x P H y H0; case identity_sym with (1 := H0); trivial. Defined. Definition identity_rec_r : forall (A:Type) (a:A) (P:A -> Set), P a -> forall y:A, identity y a -> P y. intros A x P H y H0; case identity_sym with (1 := H0); trivial. Defined. Definition identity_rect_r : forall (A:Type) (a:A) (P:A -> Type), P a -> forall y:A, identity y a -> P y. intros A x P H y H0; case identity_sym with (1 := H0); trivial. Defined. Hint Immediate identity_sym not_identity_sym: core v62. Notation refl_id := identity_refl (compat "8.3"). Notation sym_id := identity_sym (compat "8.3"). Notation trans_id := identity_trans (compat "8.3"). Notation sym_not_id := not_identity_sym (compat "8.3"). coq-8.4pl2/theories/Init/Notations.v0000640000175000001440000000651212010532755016525 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y" (at level 95, no associativity). Reserved Notation "x /\ y" (at level 80, right associativity). Reserved Notation "x \/ y" (at level 85, right associativity). Reserved Notation "~ x" (at level 75, right associativity). (** Notations for equality and inequalities *) Reserved Notation "x = y :> T" (at level 70, y at next level, no associativity). Reserved Notation "x = y" (at level 70, no associativity). Reserved Notation "x = y = z" (at level 70, no associativity, y at next level). Reserved Notation "x <> y :> T" (at level 70, y at next level, no associativity). Reserved Notation "x <> y" (at level 70, no associativity). Reserved Notation "x <= y" (at level 70, no associativity). Reserved Notation "x < y" (at level 70, no associativity). Reserved Notation "x >= y" (at level 70, no associativity). Reserved Notation "x > y" (at level 70, no associativity). Reserved Notation "x <= y <= z" (at level 70, y at next level). Reserved Notation "x <= y < z" (at level 70, y at next level). Reserved Notation "x < y < z" (at level 70, y at next level). Reserved Notation "x < y <= z" (at level 70, y at next level). (** Arithmetical notations (also used for type constructors) *) Reserved Notation "x + y" (at level 50, left associativity). Reserved Notation "x - y" (at level 50, left associativity). Reserved Notation "x * y" (at level 40, left associativity). Reserved Notation "x / y" (at level 40, left associativity). Reserved Notation "- x" (at level 35, right associativity). Reserved Notation "/ x" (at level 35, right associativity). Reserved Notation "x ^ y" (at level 30, right associativity). (** Notations for booleans *) Reserved Notation "x || y" (at level 50, left associativity). Reserved Notation "x && y" (at level 40, left associativity). (** Notations for pairs *) Reserved Notation "( x , y , .. , z )" (at level 0). (** Notation "{ x }" is reserved and has a special status as component of other notations such as "{ A } + { B }" and "A + { B }" (which are at the same level than "x + y"); "{ x }" is at level 0 to factor with "{ x : A | P }" *) Reserved Notation "{ x }" (at level 0, x at level 99). (** Notations for sigma-types or subsets *) Reserved Notation "{ x | P }" (at level 0, x at level 99). Reserved Notation "{ x | P & Q }" (at level 0, x at level 99). Reserved Notation "{ x : A | P }" (at level 0, x at level 99). Reserved Notation "{ x : A | P & Q }" (at level 0, x at level 99). Reserved Notation "{ x : A & P }" (at level 0, x at level 99). Reserved Notation "{ x : A & P & Q }" (at level 0, x at level 99). Delimit Scope type_scope with type. Delimit Scope core_scope with core. Open Scope core_scope. Open Scope type_scope. coq-8.4pl2/theories/Init/vo.itarget0000640000175000001440000000014011307752066016363 0ustar notinusersDatatypes.vo Logic_Type.vo Logic.vo Notations.vo Peano.vo Prelude.vo Specif.vo Tactics.vo Wf.vo coq-8.4pl2/theories/Init/Prelude.v0000640000175000001440000000220312010532755016140 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n | S u => u end. Hint Resolve (f_equal pred): v62. Theorem pred_Sn : forall n:nat, n = pred (S n). Proof. simpl; reflexivity. Qed. (** Injectivity of successor *) Definition eq_add_S n m (H: S n = S m): n = m := f_equal pred H. Hint Immediate eq_add_S: core. Theorem not_eq_S : forall n m:nat, n <> m -> S n <> S m. Proof. red; auto. Qed. Hint Resolve not_eq_S: core. Definition IsSucc (n:nat) : Prop := match n with | O => False | S p => True end. (** Zero is not the successor of a number *) Theorem O_S : forall n:nat, 0 <> S n. Proof. discriminate. Qed. Hint Resolve O_S: core. Theorem n_Sn : forall n:nat, n <> S n. Proof. induction n; auto. Qed. Hint Resolve n_Sn: core. (** Addition *) Fixpoint plus (n m:nat) : nat := match n with | O => m | S p => S (p + m) end where "n + m" := (plus n m) : nat_scope. Hint Resolve (f_equal2 plus): v62. Hint Resolve (f_equal2 (A1:=nat) (A2:=nat)): core. Lemma plus_n_O : forall n:nat, n = n + 0. Proof. induction n; simpl; auto. Qed. Hint Resolve plus_n_O: core. Lemma plus_O_n : forall n:nat, 0 + n = n. Proof. auto. Qed. Lemma plus_n_Sm : forall n m:nat, S (n + m) = n + S m. Proof. intros n m; induction n; simpl; auto. Qed. Hint Resolve plus_n_Sm: core. Lemma plus_Sn_m : forall n m:nat, S n + m = S (n + m). Proof. auto. Qed. (** Standard associated names *) Notation plus_0_r_reverse := plus_n_O (compat "8.2"). Notation plus_succ_r_reverse := plus_n_Sm (compat "8.2"). (** Multiplication *) Fixpoint mult (n m:nat) : nat := match n with | O => 0 | S p => m + p * m end where "n * m" := (mult n m) : nat_scope. Hint Resolve (f_equal2 mult): core. Lemma mult_n_O : forall n:nat, 0 = n * 0. Proof. induction n; simpl; auto. Qed. Hint Resolve mult_n_O: core. Lemma mult_n_Sm : forall n m:nat, n * m + n = n * S m. Proof. intros; induction n as [| p H]; simpl; auto. destruct H; rewrite <- plus_n_Sm; apply eq_S. pattern m at 1 3; elim m; simpl; auto. Qed. Hint Resolve mult_n_Sm: core. (** Standard associated names *) Notation mult_0_r_reverse := mult_n_O (compat "8.2"). Notation mult_succ_r_reverse := mult_n_Sm (compat "8.2"). (** Truncated subtraction: [m-n] is [0] if [n>=m] *) Fixpoint minus (n m:nat) : nat := match n, m with | O, _ => n | S k, O => n | S k, S l => k - l end where "n - m" := (minus n m) : nat_scope. (** Definition of the usual orders, the basic properties of [le] and [lt] can be found in files Le and Lt *) Inductive le (n:nat) : nat -> Prop := | le_n : n <= n | le_S : forall m:nat, n <= m -> n <= S m where "n <= m" := (le n m) : nat_scope. Hint Constructors le: core. (*i equivalent to : "Hints Resolve le_n le_S : core." i*) Definition lt (n m:nat) := S n <= m. Hint Unfold lt: core. Infix "<" := lt : nat_scope. Definition ge (n m:nat) := m <= n. Hint Unfold ge: core. Infix ">=" := ge : nat_scope. Definition gt (n m:nat) := m < n. Hint Unfold gt: core. Infix ">" := gt : nat_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : nat_scope. Notation "x <= y < z" := (x <= y /\ y < z) : nat_scope. Notation "x < y < z" := (x < y /\ y < z) : nat_scope. Notation "x < y <= z" := (x < y /\ y <= z) : nat_scope. Theorem le_pred : forall n m, n <= m -> pred n <= pred m. Proof. induction 1; auto. destruct m; simpl; auto. Qed. Theorem le_S_n : forall n m, S n <= S m -> n <= m. Proof. intros n m. exact (le_pred (S n) (S m)). Qed. (** Case analysis *) Theorem nat_case : forall (n:nat) (P:nat -> Prop), P 0 -> (forall m:nat, P (S m)) -> P n. Proof. induction n; auto. Qed. (** Principle of double induction *) Theorem nat_double_ind : forall R:nat -> nat -> Prop, (forall n:nat, R 0 n) -> (forall n:nat, R (S n) 0) -> (forall n m:nat, R n m -> R (S n) (S m)) -> forall n m:nat, R n m. Proof. induction n; auto. destruct m; auto. Qed. (** Maximum and minimum : definitions and specifications *) Fixpoint max n m : nat := match n, m with | O, _ => m | S n', O => n | S n', S m' => S (max n' m') end. Fixpoint min n m : nat := match n, m with | O, _ => 0 | S n', O => 0 | S n', S m' => S (min n' m') end. Theorem max_l : forall n m : nat, m <= n -> max n m = n. Proof. induction n; destruct m; simpl; auto. inversion 1. intros. apply f_equal. apply IHn. apply le_S_n. trivial. Qed. Theorem max_r : forall n m : nat, n <= m -> max n m = m. Proof. induction n; destruct m; simpl; auto. inversion 1. intros. apply f_equal. apply IHn. apply le_S_n. trivial. Qed. Theorem min_l : forall n m : nat, n <= m -> min n m = n. Proof. induction n; destruct m; simpl; auto. inversion 1. intros. apply f_equal. apply IHn. apply le_S_n. trivial. Qed. Theorem min_r : forall n m : nat, m <= n -> min n m = m. Proof. induction n; destruct m; simpl; auto. inversion 1. intros. apply f_equal. apply IHn. apply le_S_n. trivial. Qed. (** [n]th iteration of the function [f] *) Fixpoint nat_iter (n:nat) {A} (f:A->A) (x:A) : A := match n with | O => x | S n' => f (nat_iter n' f x) end. Lemma nat_iter_succ_r n {A} (f:A->A) (x:A) : nat_iter (S n) f x = nat_iter n f (f x). Proof. induction n; intros; simpl; rewrite <- ?IHn; trivial. Qed. Theorem nat_iter_plus : forall (n m:nat) {A} (f:A -> A) (x:A), nat_iter (n + m) f x = nat_iter n f (nat_iter m f x). Proof. induction n; intros; simpl; rewrite ?IHn; trivial. Qed. (** Preservation of invariants : if [f : A->A] preserves the invariant [Inv], then the iterates of [f] also preserve it. *) Theorem nat_iter_invariant : forall (n:nat) {A} (f:A -> A) (P : A -> Prop), (forall x, P x -> P (f x)) -> forall x, P x -> P (nat_iter n f x). Proof. induction n; simpl; trivial. intros A f P Hf x Hx. apply Hf, IHn; trivial. Qed. coq-8.4pl2/theories/Init/Specif.v0000640000175000001440000001563212010532755015763 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop) : Type := exist : forall x:A, P x -> sig P. Inductive sig2 (A:Type) (P Q:A -> Prop) : Type := exist2 : forall x:A, P x -> Q x -> sig2 P Q. (** [(sigT A P)], or more suggestively [{x:A & (P x)}] is a Sigma-type. Similarly for [(sigT2 A P Q)], also written [{x:A & (P x) & (Q x)}]. *) Inductive sigT (A:Type) (P:A -> Type) : Type := existT : forall x:A, P x -> sigT P. Inductive sigT2 (A:Type) (P Q:A -> Type) : Type := existT2 : forall x:A, P x -> Q x -> sigT2 P Q. (* Notations *) Arguments sig (A P)%type. Arguments sig2 (A P Q)%type. Arguments sigT (A P)%type. Arguments sigT2 (A P Q)%type. Notation "{ x | P }" := (sig (fun x => P)) : type_scope. Notation "{ x | P & Q }" := (sig2 (fun x => P) (fun x => Q)) : type_scope. Notation "{ x : A | P }" := (sig (fun x:A => P)) : type_scope. Notation "{ x : A | P & Q }" := (sig2 (fun x:A => P) (fun x:A => Q)) : type_scope. Notation "{ x : A & P }" := (sigT (fun x:A => P)) : type_scope. Notation "{ x : A & P & Q }" := (sigT2 (fun x:A => P) (fun x:A => Q)) : type_scope. Add Printing Let sig. Add Printing Let sig2. Add Printing Let sigT. Add Printing Let sigT2. (** Projections of [sig] An element [y] of a subset [{x:A | (P x)}] is the pair of an [a] of type [A] and of a proof [h] that [a] satisfies [P]. Then [(proj1_sig y)] is the witness [a] and [(proj2_sig y)] is the proof of [(P a)] *) Section Subset_projections. Variable A : Type. Variable P : A -> Prop. Definition proj1_sig (e:sig P) := match e with | exist a b => a end. Definition proj2_sig (e:sig P) := match e return P (proj1_sig e) with | exist a b => b end. End Subset_projections. (** Projections of [sigT] An element [x] of a sigma-type [{y:A & P y}] is a dependent pair made of an [a] of type [A] and an [h] of type [P a]. Then, [(projT1 x)] is the first projection and [(projT2 x)] is the second projection, the type of which depends on the [projT1]. *) Section Projections. Variable A : Type. Variable P : A -> Type. Definition projT1 (x:sigT P) : A := match x with | existT a _ => a end. Definition projT2 (x:sigT P) : P (projT1 x) := match x return P (projT1 x) with | existT _ h => h end. End Projections. (** [sigT] of a predicate is equivalent to [sig] *) Lemma sig_of_sigT : forall (A:Type) (P:A->Prop), sigT P -> sig P. Proof. destruct 1 as (x,H); exists x; trivial. Defined. Lemma sigT_of_sig : forall (A:Type) (P:A->Prop), sig P -> sigT P. Proof. destruct 1 as (x,H); exists x; trivial. Defined. Coercion sigT_of_sig : sig >-> sigT. Coercion sig_of_sigT : sigT >-> sig. (** [sumbool] is a boolean type equipped with the justification of their value *) Inductive sumbool (A B:Prop) : Set := | left : A -> {A} + {B} | right : B -> {A} + {B} where "{ A } + { B }" := (sumbool A B) : type_scope. Add Printing If sumbool. Arguments left {A B} _, [A] B _. Arguments right {A B} _ , A [B] _. (** [sumor] is an option type equipped with the justification of why it may not be a regular value *) Inductive sumor (A:Type) (B:Prop) : Type := | inleft : A -> A + {B} | inright : B -> A + {B} where "A + { B }" := (sumor A B) : type_scope. Add Printing If sumor. Arguments inleft {A B} _ , [A] B _. Arguments inright {A B} _ , A [B] _. (** Various forms of the axiom of choice for specifications *) Section Choice_lemmas. Variables S S' : Set. Variable R : S -> S' -> Prop. Variable R' : S -> S' -> Set. Variables R1 R2 : S -> Prop. Lemma Choice : (forall x:S, {y:S' | R x y}) -> {f:S -> S' | forall z:S, R z (f z)}. Proof. intro H. exists (fun z => proj1_sig (H z)). intro z; destruct (H z); assumption. Defined. Lemma Choice2 : (forall x:S, {y:S' & R' x y}) -> {f:S -> S' & forall z:S, R' z (f z)}. Proof. intro H. exists (fun z => projT1 (H z)). intro z; destruct (H z); assumption. Defined. Lemma bool_choice : (forall x:S, {R1 x} + {R2 x}) -> {f:S -> bool | forall x:S, f x = true /\ R1 x \/ f x = false /\ R2 x}. Proof. intro H. exists (fun z:S => if H z then true else false). intro z; destruct (H z); auto. Defined. End Choice_lemmas. Section Dependent_choice_lemmas. Variables X : Set. Variable R : X -> X -> Prop. Lemma dependent_choice : (forall x:X, {y | R x y}) -> forall x0, {f : nat -> X | f O = x0 /\ forall n, R (f n) (f (S n))}. Proof. intros H x0. set (f:=fix f n := match n with O => x0 | S n' => proj1_sig (H (f n')) end). exists f. split. reflexivity. induction n; simpl; apply proj2_sig. Defined. End Dependent_choice_lemmas. (** A result of type [(Exc A)] is either a normal value of type [A] or an [error] : [Inductive Exc [A:Type] : Type := value : A->(Exc A) | error : (Exc A)]. It is implemented using the option type. *) Definition Exc := option. Definition value := Some. Definition error := @None. Arguments error [A]. Definition except := False_rec. (* for compatibility with previous versions *) Arguments except [P] _. Theorem absurd_set : forall (A:Prop) (C:Set), A -> ~ A -> C. Proof. intros A C h1 h2. apply False_rec. apply (h2 h1). Defined. Hint Resolve left right inleft inright: core v62. Hint Resolve exist exist2 existT existT2: core. (* Compatibility *) Notation sigS := sigT (compat "8.2"). Notation existS := existT (compat "8.2"). Notation sigS_rect := sigT_rect (compat "8.2"). Notation sigS_rec := sigT_rec (compat "8.2"). Notation sigS_ind := sigT_ind (compat "8.2"). Notation projS1 := projT1 (compat "8.2"). Notation projS2 := projT2 (compat "8.2"). Notation sigS2 := sigT2 (compat "8.2"). Notation existS2 := existT2 (compat "8.2"). Notation sigS2_rect := sigT2_rect (compat "8.2"). Notation sigS2_rec := sigT2_rec (compat "8.2"). Notation sigS2_ind := sigT2_ind (compat "8.2"). coq-8.4pl2/theories/Init/Tactics.v0000640000175000001440000001745712010532755016153 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* negneg H | |- (_->False) => negneg H | |- _ => negpos H end in let pos H := match goal with | |- (~_) => posneg H | |- (_->False) => posneg H | |- _ => pospos H end in match type of H with | (~_) => neg H | (_->False) => neg H | _ => (elim H;fail) || pos H end. (* Transforming a negative goal [ H:~A |- ~B ] into a positive one [ B |- A ]*) Ltac swap H := idtac "swap is OBSOLETE: use contradict instead."; intro; apply H; clear H. (* To contradict an hypothesis without copying its type. *) Ltac absurd_hyp H := idtac "absurd_hyp is OBSOLETE: use contradict instead."; let T := type of H in absurd T. (* A useful complement to contradict. Here H:A while G allows to conclude ~A *) Ltac false_hyp H G := let T := type of H in absurd T; [ apply G | assumption ]. (* A case with no loss of information. *) Ltac case_eq x := generalize (eq_refl x); pattern x at -1; case x. (* use either discriminate or injection on a hypothesis *) Ltac destr_eq H := discriminate H || (try (injection H; clear H; intro H)). (* Similar variants of destruct *) Tactic Notation "destruct_with_eqn" constr(x) := destruct x eqn:?. Tactic Notation "destruct_with_eqn" ident(n) := try intros until n; destruct n eqn:?. Tactic Notation "destruct_with_eqn" ":" ident(H) constr(x) := destruct x eqn:H. Tactic Notation "destruct_with_eqn" ":" ident(H) ident(n) := try intros until n; destruct n eqn:H. (** Break every hypothesis of a certain type *) Ltac destruct_all t := match goal with | x : t |- _ => destruct x; destruct_all t | _ => idtac end. (* Rewriting in all hypothesis several times everywhere *) Tactic Notation "rewrite_all" constr(eq) := repeat rewrite eq in *. Tactic Notation "rewrite_all" "<-" constr(eq) := repeat rewrite <- eq in *. (** Tactics for applying equivalences. The following code provides tactics "apply -> t", "apply <- t", "apply -> t in H" and "apply <- t in H". Here t is a term whose type consists of nested dependent and nondependent products with an equivalence A <-> B as the conclusion. The tactics with "->" in their names apply A -> B while those with "<-" in the name apply B -> A. *) (* The idea of the tactics is to first provide a term in the context whose type is the implication (in one of the directions), and then apply it. The first idea is to produce a statement "forall ..., A -> B" (call this type T) and then do "assert (H : T)" for a fresh H. Thus, T can be proved from the original equivalence and then used to perform the application. However, currently in Ltac it is difficult to produce such T from the original formula. Therefore, we first pose the original equivalence as H. If the type of H is a dependent product, we create an existential variable and apply H to this variable. If the type of H has the form C -> D, then we do a cut on C. Once we eliminate all products, we split (i.e., destruct) the conjunction into two parts and apply the relevant one. *) Ltac find_equiv H := let T := type of H in lazymatch T with | ?A -> ?B => let H1 := fresh in let H2 := fresh in cut A; [intro H1; pose proof (H H1) as H2; clear H H1; rename H2 into H; find_equiv H | clear H] | forall x : ?t, _ => let a := fresh "a" with H1 := fresh "H" in evar (a : t); pose proof (H a) as H1; unfold a in H1; clear a; clear H; rename H1 into H; find_equiv H | ?A <-> ?B => idtac | _ => fail "The given statement does not seem to end with an equivalence." end. Ltac bapply lemma todo := let H := fresh in pose proof lemma as H; find_equiv H; [todo H; clear H | .. ]. Tactic Notation "apply" "->" constr(lemma) := bapply lemma ltac:(fun H => destruct H as [H _]; apply H). Tactic Notation "apply" "<-" constr(lemma) := bapply lemma ltac:(fun H => destruct H as [_ H]; apply H). Tactic Notation "apply" "->" constr(lemma) "in" hyp(J) := bapply lemma ltac:(fun H => destruct H as [H _]; apply H in J). Tactic Notation "apply" "<-" constr(lemma) "in" hyp(J) := bapply lemma ltac:(fun H => destruct H as [_ H]; apply H in J). (** An experimental tactic simpler than auto that is useful for ending proofs "in one step" *) Ltac easy := let rec use_hyp H := match type of H with | _ /\ _ => exact H || destruct_hyp H | _ => try solve [inversion H] end with do_intro := let H := fresh in intro H; use_hyp H with destruct_hyp H := case H; clear H; do_intro; do_intro in let rec use_hyps := match goal with | H : _ /\ _ |- _ => exact H || (destruct_hyp H; use_hyps) | H : _ |- _ => solve [inversion H] | _ => idtac end in let rec do_atom := solve [reflexivity | symmetry; trivial] || contradiction || (split; do_atom) with do_ccl := trivial with eq_true; repeat do_intro; do_atom in (use_hyps; do_ccl) || fail "Cannot solve this goal". Tactic Notation "now" tactic(t) := t; easy. (** Slightly more than [easy]*) Ltac easy' := repeat split; simpl; easy || now destruct 1. (** A tactic to document or check what is proved at some point of a script *) Ltac now_show c := change c. (** Support for rewriting decidability statements *) Set Implicit Arguments. Lemma decide_left : forall (C:Prop) (decide:{C}+{~C}), C -> forall P:{C}+{~C}->Prop, (forall H:C, P (left _ H)) -> P decide. Proof. intros; destruct decide. apply H0. contradiction. Qed. Lemma decide_right : forall (C:Prop) (decide:{C}+{~C}), ~C -> forall P:{C}+{~C}->Prop, (forall H:~C, P (right _ H)) -> P decide. Proof. intros; destruct decide. contradiction. apply H0. Qed. Tactic Notation "decide" constr(lemma) "with" constr(H) := let try_to_merge_hyps H := try (clear H; intro H) || (let H' := fresh H "bis" in intro H'; try clear H') || (let H' := fresh in intro H'; try clear H') in match type of H with | ~ ?C => apply (decide_right lemma H); try_to_merge_hyps H | ?C -> False => apply (decide_right lemma H); try_to_merge_hyps H | _ => apply (decide_left lemma H); try_to_merge_hyps H end. (** Clear an hypothesis and its dependencies *) Tactic Notation "clear" "dependent" hyp(h) := let rec depclear h := clear h || match goal with | H : context [ h ] |- _ => depclear H; depclear h end || fail "hypothesis to clear is used in the conclusion (maybe indirectly)" in depclear h. (** Revert an hypothesis and its dependencies : this is actually generalize dependent... *) Tactic Notation "revert" "dependent" hyp(h) := generalize dependent h. coq-8.4pl2/theories/Reals/0000750000175000001440000000000012127276547014533 5ustar notinuserscoq-8.4pl2/theories/Reals/Rminmax.v0000640000175000001440000000707512010532755016332 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rmax x y = x. Proof. unfold Rmax. intros. destruct Rle_dec as [H'|H']; [| apply Rnot_le_lt in H' ]; unfold Rle in *; intuition. Qed. Lemma Rmax_r : forall x y, x<=y -> Rmax x y = y. Proof. unfold Rmax. intros. destruct Rle_dec as [H'|H']; [| apply Rnot_le_lt in H' ]; unfold Rle in *; intuition. Qed. Lemma Rmin_l : forall x y, x<=y -> Rmin x y = x. Proof. unfold Rmin. intros. destruct Rle_dec as [H'|H']; [| apply Rnot_le_lt in H' ]; unfold Rle in *; intuition. Qed. Lemma Rmin_r : forall x y, y<=x -> Rmin x y = y. Proof. unfold Rmin. intros. destruct Rle_dec as [H'|H']; [| apply Rnot_le_lt in H' ]; unfold Rle in *; intuition. Qed. Module RHasMinMax <: HasMinMax R_as_OT. Definition max := Rmax. Definition min := Rmin. Definition max_l := Rmax_l. Definition max_r := Rmax_r. Definition min_l := Rmin_l. Definition min_r := Rmin_r. End RHasMinMax. Module R. (** We obtain hence all the generic properties of max and min. *) Include UsualMinMaxProperties R_as_OT RHasMinMax. (** * Properties specific to the [R] domain *) (** Compatibilities (consequences of monotonicity) *) Lemma plus_max_distr_l : forall n m p, Rmax (p + n) (p + m) = p + Rmax n m. Proof. intros. apply max_monotone. intros x y. apply Rplus_le_compat_l. Qed. Lemma plus_max_distr_r : forall n m p, Rmax (n + p) (m + p) = Rmax n m + p. Proof. intros. rewrite (Rplus_comm n p), (Rplus_comm m p), (Rplus_comm _ p). apply plus_max_distr_l. Qed. Lemma plus_min_distr_l : forall n m p, Rmin (p + n) (p + m) = p + Rmin n m. Proof. intros. apply min_monotone. intros x y. apply Rplus_le_compat_l. Qed. Lemma plus_min_distr_r : forall n m p, Rmin (n + p) (m + p) = Rmin n m + p. Proof. intros. rewrite (Rplus_comm n p), (Rplus_comm m p), (Rplus_comm _ p). apply plus_min_distr_l. Qed. (** Anti-monotonicity swaps the role of [min] and [max] *) Lemma opp_max_distr : forall n m : R, -(Rmax n m) = Rmin (- n) (- m). Proof. intros. symmetry. apply min_max_antimonotone. do 3 red. intros; apply Rge_le. apply Ropp_le_ge_contravar; auto. Qed. Lemma opp_min_distr : forall n m : R, - (Rmin n m) = Rmax (- n) (- m). Proof. intros. symmetry. apply max_min_antimonotone. do 3 red. intros; apply Rge_le. apply Ropp_le_ge_contravar; auto. Qed. Lemma minus_max_distr_l : forall n m p, Rmax (p - n) (p - m) = p - Rmin n m. Proof. unfold Rminus. intros. rewrite opp_min_distr. apply plus_max_distr_l. Qed. Lemma minus_max_distr_r : forall n m p, Rmax (n - p) (m - p) = Rmax n m - p. Proof. unfold Rminus. intros. apply plus_max_distr_r. Qed. Lemma minus_min_distr_l : forall n m p, Rmin (p - n) (p - m) = p - Rmax n m. Proof. unfold Rminus. intros. rewrite opp_max_distr. apply plus_min_distr_l. Qed. Lemma minus_min_distr_r : forall n m p, Rmin (n - p) (m - p) = Rmin n m - p. Proof. unfold Rminus. intros. apply plus_min_distr_r. Qed. End R. coq-8.4pl2/theories/Reals/Rseries.v0000640000175000001440000003027612010532755016332 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R. (*********) Fixpoint Rmax_N (N:nat) : R := match N with | O => Un 0 | S n => Rmax (Un (S n)) (Rmax_N n) end. (*********) Definition EUn r : Prop := exists i : nat, r = Un i. (*********) Definition Un_cv (l:R) : Prop := forall eps:R, eps > 0 -> exists N : nat, (forall n:nat, (n >= N)%nat -> R_dist (Un n) l < eps). (*********) Definition Cauchy_crit : Prop := forall eps:R, eps > 0 -> exists N : nat, (forall n m:nat, (n >= N)%nat -> (m >= N)%nat -> R_dist (Un n) (Un m) < eps). (*********) Definition Un_growing : Prop := forall n:nat, Un n <= Un (S n). (*********) Lemma EUn_noempty : exists r : R, EUn r. Proof. unfold EUn; split with (Un 0); split with 0%nat; trivial. Qed. (*********) Lemma Un_in_EUn : forall n:nat, EUn (Un n). Proof. intro; unfold EUn; split with n; trivial. Qed. (*********) Lemma Un_bound_imp : forall x:R, (forall n:nat, Un n <= x) -> is_upper_bound EUn x. Proof. intros; unfold is_upper_bound; intros; unfold EUn in H0; elim H0; clear H0; intros; generalize (H x1); intro; rewrite <- H0 in H1; trivial. Qed. (*********) Lemma growing_prop : forall n m:nat, Un_growing -> (n >= m)%nat -> Un n >= Un m. Proof. double induction n m; intros. unfold Rge; right; trivial. exfalso; unfold ge in H1; generalize (le_Sn_O n0); intro; auto. cut (n0 >= 0)%nat. generalize H0; intros; unfold Un_growing in H0; apply (Rge_trans (Un (S n0)) (Un n0) (Un 0) (Rle_ge (Un n0) (Un (S n0)) (H0 n0)) (H 0%nat H2 H3)). elim n0; auto. elim (lt_eq_lt_dec n1 n0); intro y. elim y; clear y; intro y. unfold ge in H2; generalize (le_not_lt n0 n1 (le_S_n n0 n1 H2)); intro; exfalso; auto. rewrite y; unfold Rge; right; trivial. unfold ge in H0; generalize (H0 (S n0) H1 (lt_le_S n0 n1 y)); intro; unfold Un_growing in H1; apply (Rge_trans (Un (S n1)) (Un n1) (Un (S n0)) (Rle_ge (Un n1) (Un (S n1)) (H1 n1)) H3). Qed. (*********) Lemma Un_cv_crit_lub : Un_growing -> forall l, is_lub EUn l -> Un_cv l. Proof. intros Hug l H eps Heps. cut (exists N, Un N > l - eps). intros (N, H3). exists N. intros n H4. unfold R_dist. rewrite Rabs_left1, Ropp_minus_distr. apply Rplus_lt_reg_r with (Un n - eps). apply Rlt_le_trans with (Un N). now replace (Un n - eps + (l - Un n)) with (l - eps) by ring. replace (Un n - eps + eps) with (Un n) by ring. apply Rge_le. now apply growing_prop. apply Rle_minus. apply (proj1 H). now exists n. assert (Hi2pn: forall n, 0 < (/ 2)^n). clear. intros n. apply pow_lt. apply Rinv_0_lt_compat. now apply (IZR_lt 0 2). pose (test := fun n => match Rle_lt_dec (Un n) (l - eps) with left _ => false | right _ => true end). pose (sum := let fix aux n := match n with S n' => aux n' + if test n' then (/ 2)^n else 0 | O => 0 end in aux). assert (Hsum': forall m n, sum m <= sum (m + n)%nat <= sum m + (/2)^m - (/2)^(m + n)). clearbody test. clear -Hi2pn. intros m. induction n. rewrite<- plus_n_O. ring_simplify (sum m + (/ 2) ^ m - (/ 2) ^ m). split ; apply Rle_refl. rewrite <- plus_n_Sm. simpl. split. apply Rle_trans with (sum (m + n)%nat + 0). rewrite Rplus_0_r. apply IHn. apply Rplus_le_compat_l. case (test (m + n)%nat). apply Rlt_le. exact (Hi2pn (S (m + n))). apply Rle_refl. apply Rle_trans with (sum (m + n)%nat + / 2 * (/ 2) ^ (m + n)). apply Rplus_le_compat_l. case (test (m + n)%nat). apply Rle_refl. apply Rlt_le. exact (Hi2pn (S (m + n))). apply Rplus_le_reg_r with (-(/ 2 * (/ 2) ^ (m + n))). rewrite Rplus_assoc, Rplus_opp_r, Rplus_0_r. apply Rle_trans with (1 := proj2 IHn). apply Req_le. field. assert (Hsum: forall n, 0 <= sum n <= 1 - (/2)^n). intros N. generalize (Hsum' O N). simpl. now rewrite Rplus_0_l. destruct (completeness (fun x : R => exists n : nat, x = sum n)) as (m, (Hm1, Hm2)). exists 1. intros x (n, H1). rewrite H1. apply Rle_trans with (1 := proj2 (Hsum n)). apply Rlt_le. apply Rplus_lt_reg_r with ((/2)^n - 1). now ring_simplify. exists 0. now exists O. destruct (Rle_or_lt m 0) as [[Hm|Hm]|Hm]. elim Rlt_not_le with (1 := Hm). apply Hm1. now exists O. assert (Hs0: forall n, sum n = 0). intros n. specialize (Hm1 (sum n) (ex_intro _ _ (eq_refl _))). apply Rle_antisym with (2 := proj1 (Hsum n)). now rewrite <- Hm. assert (Hub: forall n, Un n <= l - eps). intros n. generalize (eq_refl (sum (S n))). simpl sum at 1. rewrite 2!Hs0, Rplus_0_l. unfold test. destruct Rle_lt_dec. easy. intros H'. elim Rgt_not_eq with (2 := H'). exact (Hi2pn (S n)). clear -Heps H Hub. destruct H as (_, H). refine (False_ind _ (Rle_not_lt _ _ (H (l - eps) _) _)). intros x (n, H1). now rewrite H1. apply Rplus_lt_reg_r with (eps - l). now ring_simplify. assert (Rabs (/2) < 1). rewrite Rabs_pos_eq. rewrite <- Rinv_1 at 3. apply Rinv_lt_contravar. rewrite Rmult_1_l. now apply (IZR_lt 0 2). now apply (IZR_lt 1 2). apply Rlt_le. apply Rinv_0_lt_compat. now apply (IZR_lt 0 2). destruct (pow_lt_1_zero (/2) H0 m Hm) as [N H4]. exists N. apply Rnot_le_lt. intros H5. apply Rlt_not_le with (1 := H4 _ (le_refl _)). rewrite Rabs_pos_eq. 2: now apply Rlt_le. apply Hm2. intros x (n, H6). rewrite H6. clear x H6. assert (Hs: sum N = 0). clear H4. induction N. easy. simpl. assert (H6: Un N <= l - eps). apply Rle_trans with (2 := H5). apply Rge_le. apply growing_prop ; try easy. apply le_n_Sn. rewrite (IHN H6), Rplus_0_l. unfold test. destruct Rle_lt_dec. apply eq_refl. now elim Rlt_not_le with (1 := r). destruct (le_or_lt N n) as [Hn|Hn]. rewrite le_plus_minus with (1 := Hn). apply Rle_trans with (1 := proj2 (Hsum' N (n - N)%nat)). rewrite Hs, Rplus_0_l. set (k := (N + (n - N))%nat). apply Rlt_le. apply Rplus_lt_reg_r with ((/2)^k - (/2)^N). now ring_simplify. apply Rle_trans with (sum N). rewrite le_plus_minus with (1 := Hn). rewrite plus_Snm_nSm. exact (proj1 (Hsum' _ _)). rewrite Hs. now apply Rlt_le. Qed. (*********) Lemma Un_cv_crit : Un_growing -> bound EUn -> exists l : R, Un_cv l. Proof. intros Hug Heub. exists (projT1 (completeness EUn Heub EUn_noempty)). destruct (completeness EUn Heub EUn_noempty) as (l, H). now apply Un_cv_crit_lub. Qed. (*********) Lemma finite_greater : forall N:nat, exists M : R, (forall n:nat, (n <= N)%nat -> Un n <= M). Proof. intro; induction N as [| N HrecN]. split with (Un 0); intros; rewrite (le_n_O_eq n H); apply (Req_le (Un n) (Un n) (eq_refl (Un n))). elim HrecN; clear HrecN; intros; split with (Rmax (Un (S N)) x); intros; elim (Rmax_Rle (Un (S N)) x (Un n)); intros; clear H1; inversion H0. rewrite <- H1; rewrite <- H1 in H2; apply (H2 (or_introl (Un n <= x) (Req_le (Un n) (Un n) (eq_refl (Un n))))). apply (H2 (or_intror (Un n <= Un (S N)) (H n H3))). Qed. (*********) Lemma cauchy_bound : Cauchy_crit -> bound EUn. Proof. unfold Cauchy_crit, bound; intros; unfold is_upper_bound; unfold Rgt in H; elim (H 1 Rlt_0_1); clear H; intros; generalize (H x); intro; generalize (le_dec x); intro; elim (finite_greater x); intros; split with (Rmax x0 (Un x + 1)); clear H; intros; unfold EUn in H; elim H; clear H; intros; elim (H1 x2); clear H1; intro y. unfold ge in H0; generalize (H0 x2 (le_n x) y); clear H0; intro; rewrite <- H in H0; unfold R_dist in H0; elim (Rabs_def2 (Un x - x1) 1 H0); clear H0; intros; elim (Rmax_Rle x0 (Un x + 1) x1); intros; apply H4; clear H3 H4; right; clear H H0 y; apply (Rlt_le x1 (Un x + 1)); generalize (Rlt_minus (-1) (Un x - x1) H1); clear H1; intro; apply (Rminus_lt x1 (Un x + 1)); cut (-1 - (Un x - x1) = x1 - (Un x + 1)); [ intro; rewrite H0 in H; assumption | ring ]. generalize (H2 x2 y); clear H2 H0; intro; rewrite <- H in H0; elim (Rmax_Rle x0 (Un x + 1) x1); intros; clear H1; apply H2; left; assumption. Qed. End sequence. (*****************************************************************) (** * Definition of Power Series and properties *) (* *) (*****************************************************************) Section Isequence. (*********) Variable An : nat -> R. (*********) Definition Pser (x l:R) : Prop := infinite_sum (fun n:nat => An n * x ^ n) l. End Isequence. Lemma GP_infinite : forall x:R, Rabs x < 1 -> Pser (fun n:nat => 1) x (/ (1 - x)). Proof. intros; unfold Pser; unfold infinite_sum; intros; elim (Req_dec x 0). intros; exists 0%nat; intros; rewrite H1; rewrite Rminus_0_r; rewrite Rinv_1; cut (sum_f_R0 (fun n0:nat => 1 * 0 ^ n0) n = 1). intros; rewrite H3; rewrite R_dist_eq; auto. elim n; simpl. ring. intros; rewrite H3; ring. intro; cut (0 < eps * (Rabs (1 - x) * Rabs (/ x))). intro; elim (pow_lt_1_zero x H (eps * (Rabs (1 - x) * Rabs (/ x))) H2); intro N; intros; exists N; intros; cut (sum_f_R0 (fun n0:nat => 1 * x ^ n0) n = sum_f_R0 (fun n0:nat => x ^ n0) n). intros; rewrite H5; apply (Rmult_lt_reg_l (Rabs (1 - x)) (R_dist (sum_f_R0 (fun n0:nat => x ^ n0) n) (/ (1 - x))) eps). apply Rabs_pos_lt. apply Rminus_eq_contra. apply Rlt_dichotomy_converse. right; unfold Rgt. apply (Rle_lt_trans x (Rabs x) 1). apply RRle_abs. assumption. unfold R_dist; rewrite <- Rabs_mult. rewrite Rmult_minus_distr_l. cut ((1 - x) * sum_f_R0 (fun n0:nat => x ^ n0) n = - (sum_f_R0 (fun n0:nat => x ^ n0) n * (x - 1))). intro; rewrite H6. rewrite GP_finite. rewrite Rinv_r. cut (- (x ^ (n + 1) - 1) - 1 = - x ^ (n + 1)). intro; rewrite H7. rewrite Rabs_Ropp; cut ((n + 1)%nat = S n); auto. intro H8; rewrite H8; simpl; rewrite Rabs_mult; apply (Rlt_le_trans (Rabs x * Rabs (x ^ n)) (Rabs x * (eps * (Rabs (1 - x) * Rabs (/ x)))) ( Rabs (1 - x) * eps)). apply Rmult_lt_compat_l. apply Rabs_pos_lt. assumption. auto. cut (Rabs x * (eps * (Rabs (1 - x) * Rabs (/ x))) = Rabs x * Rabs (/ x) * (eps * Rabs (1 - x))). clear H8; intros; rewrite H8; rewrite <- Rabs_mult; rewrite Rinv_r. rewrite Rabs_R1; cut (1 * (eps * Rabs (1 - x)) = Rabs (1 - x) * eps). intros; rewrite H9; unfold Rle; right; reflexivity. ring. assumption. ring. ring. ring. apply Rminus_eq_contra. apply Rlt_dichotomy_converse. right; unfold Rgt. apply (Rle_lt_trans x (Rabs x) 1). apply RRle_abs. assumption. ring; ring. elim n; simpl. ring. intros; rewrite H5. ring. apply Rmult_lt_0_compat. auto. apply Rmult_lt_0_compat. apply Rabs_pos_lt. apply Rminus_eq_contra. apply Rlt_dichotomy_converse. right; unfold Rgt. apply (Rle_lt_trans x (Rabs x) 1). apply RRle_abs. assumption. apply Rabs_pos_lt. apply Rinv_neq_0_compat. assumption. Qed. coq-8.4pl2/theories/Reals/RIneq.v0000640000175000001440000015721512010532755015737 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* r. Proof. exact Rlt_irrefl. Qed. Lemma Rlt_not_eq : forall r1 r2, r1 < r2 -> r1 <> r2. Proof. red; intros r1 r2 H H0; apply (Rlt_irrefl r1). pattern r1 at 2; rewrite H0; trivial. Qed. Lemma Rgt_not_eq : forall r1 r2, r1 > r2 -> r1 <> r2. Proof. intros; apply not_eq_sym; apply Rlt_not_eq; auto with real. Qed. (**********) Lemma Rlt_dichotomy_converse : forall r1 r2, r1 < r2 \/ r1 > r2 -> r1 <> r2. Proof. generalize Rlt_not_eq Rgt_not_eq. intuition eauto. Qed. Hint Resolve Rlt_dichotomy_converse: real. (** Reasoning by case on equality and order *) (**********) Lemma Req_dec : forall r1 r2, r1 = r2 \/ r1 <> r2. Proof. intros; generalize (total_order_T r1 r2) Rlt_dichotomy_converse; intuition eauto 3. Qed. Hint Resolve Req_dec: real. (**********) Lemma Rtotal_order : forall r1 r2, r1 < r2 \/ r1 = r2 \/ r1 > r2. Proof. intros; generalize (total_order_T r1 r2); tauto. Qed. (**********) Lemma Rdichotomy : forall r1 r2, r1 <> r2 -> r1 < r2 \/ r1 > r2. Proof. intros; generalize (total_order_T r1 r2); tauto. Qed. (*********************************************************) (** ** Relating [<], [>], [<=] and [>=] *) (*********************************************************) (*********************************************************) (** ** Order *) (*********************************************************) (** *** Relating strict and large orders *) Lemma Rlt_le : forall r1 r2, r1 < r2 -> r1 <= r2. Proof. intros; red; tauto. Qed. Hint Resolve Rlt_le: real. Lemma Rgt_ge : forall r1 r2, r1 > r2 -> r1 >= r2. Proof. intros; red; tauto. Qed. (**********) Lemma Rle_ge : forall r1 r2, r1 <= r2 -> r2 >= r1. Proof. destruct 1; red; auto with real. Qed. Hint Immediate Rle_ge: real. Hint Resolve Rle_ge: rorders. Lemma Rge_le : forall r1 r2, r1 >= r2 -> r2 <= r1. Proof. destruct 1; red; auto with real. Qed. Hint Resolve Rge_le: real. Hint Immediate Rge_le: rorders. (**********) Lemma Rlt_gt : forall r1 r2, r1 < r2 -> r2 > r1. Proof. trivial. Qed. Hint Resolve Rlt_gt: rorders. Lemma Rgt_lt : forall r1 r2, r1 > r2 -> r2 < r1. Proof. trivial. Qed. Hint Immediate Rgt_lt: rorders. (**********) Lemma Rnot_le_lt : forall r1 r2, ~ r1 <= r2 -> r2 < r1. Proof. intros r1 r2; generalize (Rtotal_order r1 r2); unfold Rle; tauto. Qed. Hint Immediate Rnot_le_lt: real. Lemma Rnot_ge_gt : forall r1 r2, ~ r1 >= r2 -> r2 > r1. Proof. intros; red; apply Rnot_le_lt. auto with real. Qed. Lemma Rnot_le_gt : forall r1 r2, ~ r1 <= r2 -> r1 > r2. Proof. intros; red; apply Rnot_le_lt. auto with real. Qed. Lemma Rnot_ge_lt : forall r1 r2, ~ r1 >= r2 -> r1 < r2. Proof. intros; apply Rnot_le_lt. auto with real. Qed. Lemma Rnot_lt_le : forall r1 r2, ~ r1 < r2 -> r2 <= r1. Proof. intros r1 r2 H; destruct (Rtotal_order r1 r2) as [ | [ H0 | H0 ] ]. contradiction. subst; auto with rorders. auto with real. Qed. Lemma Rnot_gt_le : forall r1 r2, ~ r1 > r2 -> r1 <= r2. Proof. auto using Rnot_lt_le with real. Qed. Lemma Rnot_gt_ge : forall r1 r2, ~ r1 > r2 -> r2 >= r1. Proof. intros; eauto using Rnot_lt_le with rorders. Qed. Lemma Rnot_lt_ge : forall r1 r2, ~ r1 < r2 -> r1 >= r2. Proof. eauto using Rnot_gt_ge with rorders. Qed. (**********) Lemma Rlt_not_le : forall r1 r2, r2 < r1 -> ~ r1 <= r2. Proof. generalize Rlt_asym Rlt_dichotomy_converse; unfold Rle. intuition eauto 3. Qed. Hint Immediate Rlt_not_le: real. Lemma Rgt_not_le : forall r1 r2, r1 > r2 -> ~ r1 <= r2. Proof. exact Rlt_not_le. Qed. Lemma Rlt_not_ge : forall r1 r2, r1 < r2 -> ~ r1 >= r2. Proof. red; intros; eapply Rlt_not_le; eauto with real. Qed. Hint Immediate Rlt_not_ge: real. Lemma Rgt_not_ge : forall r1 r2, r2 > r1 -> ~ r1 >= r2. Proof. exact Rlt_not_ge. Qed. Lemma Rle_not_lt : forall r1 r2, r2 <= r1 -> ~ r1 < r2. Proof. intros r1 r2. generalize (Rlt_asym r1 r2) (Rlt_dichotomy_converse r1 r2). unfold Rle; intuition. Qed. Lemma Rge_not_lt : forall r1 r2, r1 >= r2 -> ~ r1 < r2. Proof. intros; apply Rle_not_lt; auto with real. Qed. Lemma Rle_not_gt : forall r1 r2, r1 <= r2 -> ~ r1 > r2. Proof. do 2 intro; apply Rle_not_lt. Qed. Lemma Rge_not_gt : forall r1 r2, r2 >= r1 -> ~ r1 > r2. Proof. do 2 intro; apply Rge_not_lt. Qed. (**********) Lemma Req_le : forall r1 r2, r1 = r2 -> r1 <= r2. Proof. unfold Rle; tauto. Qed. Hint Immediate Req_le: real. Lemma Req_ge : forall r1 r2, r1 = r2 -> r1 >= r2. Proof. unfold Rge; tauto. Qed. Hint Immediate Req_ge: real. Lemma Req_le_sym : forall r1 r2, r2 = r1 -> r1 <= r2. Proof. unfold Rle; auto. Qed. Hint Immediate Req_le_sym: real. Lemma Req_ge_sym : forall r1 r2, r2 = r1 -> r1 >= r2. Proof. unfold Rge; auto. Qed. Hint Immediate Req_ge_sym: real. (** *** Asymmetry *) (** Remark: [Rlt_asym] is an axiom *) Lemma Rgt_asym : forall r1 r2:R, r1 > r2 -> ~ r2 > r1. Proof. do 2 intro; apply Rlt_asym. Qed. (** *** Antisymmetry *) Lemma Rle_antisym : forall r1 r2, r1 <= r2 -> r2 <= r1 -> r1 = r2. Proof. intros r1 r2; generalize (Rlt_asym r1 r2); unfold Rle; intuition. Qed. Hint Resolve Rle_antisym: real. Lemma Rge_antisym : forall r1 r2, r1 >= r2 -> r2 >= r1 -> r1 = r2. Proof. auto with real. Qed. (**********) Lemma Rle_le_eq : forall r1 r2, r1 <= r2 /\ r2 <= r1 <-> r1 = r2. Proof. intuition. Qed. Lemma Rge_ge_eq : forall r1 r2, r1 >= r2 /\ r2 >= r1 <-> r1 = r2. Proof. intuition. Qed. (** *** Compatibility with equality *) Lemma Rlt_eq_compat : forall r1 r2 r3 r4, r1 = r2 -> r2 < r4 -> r4 = r3 -> r1 < r3. Proof. intros x x' y y'; intros; replace x with x'; replace y with y'; assumption. Qed. Lemma Rgt_eq_compat : forall r1 r2 r3 r4, r1 = r2 -> r2 > r4 -> r4 = r3 -> r1 > r3. Proof. intros; red; apply Rlt_eq_compat with (r2:=r4) (r4:=r2); auto. Qed. (** *** Transitivity *) (** Remark: [Rlt_trans] is an axiom *) Lemma Rle_trans : forall r1 r2 r3, r1 <= r2 -> r2 <= r3 -> r1 <= r3. Proof. generalize eq_trans Rlt_trans Rlt_eq_compat. unfold Rle. intuition eauto 2. Qed. Lemma Rge_trans : forall r1 r2 r3, r1 >= r2 -> r2 >= r3 -> r1 >= r3. Proof. eauto using Rle_trans with rorders. Qed. Lemma Rgt_trans : forall r1 r2 r3, r1 > r2 -> r2 > r3 -> r1 > r3. Proof. eauto using Rlt_trans with rorders. Qed. (**********) Lemma Rle_lt_trans : forall r1 r2 r3, r1 <= r2 -> r2 < r3 -> r1 < r3. Proof. generalize Rlt_trans Rlt_eq_compat. unfold Rle. intuition eauto 2. Qed. Lemma Rlt_le_trans : forall r1 r2 r3, r1 < r2 -> r2 <= r3 -> r1 < r3. Proof. generalize Rlt_trans Rlt_eq_compat; unfold Rle; intuition eauto 2. Qed. Lemma Rge_gt_trans : forall r1 r2 r3, r1 >= r2 -> r2 > r3 -> r1 > r3. Proof. eauto using Rlt_le_trans with rorders. Qed. Lemma Rgt_ge_trans : forall r1 r2 r3, r1 > r2 -> r2 >= r3 -> r1 > r3. Proof. eauto using Rle_lt_trans with rorders. Qed. (** *** (Classical) decidability *) Lemma Rlt_dec : forall r1 r2, {r1 < r2} + {~ r1 < r2}. Proof. intros; generalize (total_order_T r1 r2) (Rlt_dichotomy_converse r1 r2); intuition. Qed. Lemma Rle_dec : forall r1 r2, {r1 <= r2} + {~ r1 <= r2}. Proof. intros r1 r2. generalize (total_order_T r1 r2) (Rlt_dichotomy_converse r1 r2). intuition eauto 4 with real. Qed. Lemma Rgt_dec : forall r1 r2, {r1 > r2} + {~ r1 > r2}. Proof. do 2 intro; apply Rlt_dec. Qed. Lemma Rge_dec : forall r1 r2, {r1 >= r2} + {~ r1 >= r2}. Proof. intros; edestruct Rle_dec; [left|right]; eauto with rorders. Qed. Lemma Rlt_le_dec : forall r1 r2, {r1 < r2} + {r2 <= r1}. Proof. intros; generalize (total_order_T r1 r2); intuition. Qed. Lemma Rgt_ge_dec : forall r1 r2, {r1 > r2} + {r2 >= r1}. Proof. intros; edestruct Rlt_le_dec; [left|right]; eauto with rorders. Qed. Lemma Rle_lt_dec : forall r1 r2, {r1 <= r2} + {r2 < r1}. Proof. intros; generalize (total_order_T r1 r2); intuition. Qed. Lemma Rge_gt_dec : forall r1 r2, {r1 >= r2} + {r2 > r1}. Proof. intros; edestruct Rle_lt_dec; [left|right]; eauto with rorders. Qed. Lemma Rlt_or_le : forall r1 r2, r1 < r2 \/ r2 <= r1. Proof. intros n m; elim (Rle_lt_dec m n); auto with real. Qed. Lemma Rgt_or_ge : forall r1 r2, r1 > r2 \/ r2 >= r1. Proof. intros; edestruct Rlt_or_le; [left|right]; eauto with rorders. Qed. Lemma Rle_or_lt : forall r1 r2, r1 <= r2 \/ r2 < r1. Proof. intros n m; elim (Rlt_le_dec m n); auto with real. Qed. Lemma Rge_or_gt : forall r1 r2, r1 >= r2 \/ r2 > r1. Proof. intros; edestruct Rle_or_lt; [left|right]; eauto with rorders. Qed. Lemma Rle_lt_or_eq_dec : forall r1 r2, r1 <= r2 -> {r1 < r2} + {r1 = r2}. Proof. intros r1 r2 H; generalize (total_order_T r1 r2); intuition. Qed. (**********) Lemma inser_trans_R : forall r1 r2 r3 r4, r1 <= r2 < r3 -> {r1 <= r2 < r4} + {r4 <= r2 < r3}. Proof. intros n m p q; intros; generalize (Rlt_le_dec m q); intuition. Qed. (*********************************************************) (** ** Addition *) (*********************************************************) (** Remark: [Rplus_0_l] is an axiom *) Lemma Rplus_0_r : forall r, r + 0 = r. Proof. intro; ring. Qed. Hint Resolve Rplus_0_r: real. Lemma Rplus_ne : forall r, r + 0 = r /\ 0 + r = r. Proof. split; ring. Qed. Hint Resolve Rplus_ne: real v62. (**********) (** Remark: [Rplus_opp_r] is an axiom *) Lemma Rplus_opp_l : forall r, - r + r = 0. Proof. intro; ring. Qed. Hint Resolve Rplus_opp_l: real. (**********) Lemma Rplus_opp_r_uniq : forall r1 r2, r1 + r2 = 0 -> r2 = - r1. Proof. intros x y H; replace y with (- x + x + y) by ring. rewrite Rplus_assoc; rewrite H; ring. Qed. Hint Resolve (f_equal (A:=R)): real. Lemma Rplus_eq_compat_l : forall r r1 r2, r1 = r2 -> r + r1 = r + r2. Proof. auto with real. Qed. (*i Old i*)Hint Resolve Rplus_eq_compat_l: v62. (**********) Lemma Rplus_eq_reg_l : forall r r1 r2, r + r1 = r + r2 -> r1 = r2. Proof. intros; transitivity (- r + r + r1). ring. transitivity (- r + r + r2). repeat rewrite Rplus_assoc; rewrite <- H; reflexivity. ring. Qed. Hint Resolve Rplus_eq_reg_l: real. (**********) Lemma Rplus_0_r_uniq : forall r r1, r + r1 = r -> r1 = 0. Proof. intros r b; pattern r at 2; replace r with (r + 0); eauto with real. Qed. (***********) Lemma Rplus_eq_0_l : forall r1 r2, 0 <= r1 -> 0 <= r2 -> r1 + r2 = 0 -> r1 = 0. Proof. intros a b H [H0| H0] H1; auto with real. absurd (0 < a + b). rewrite H1; auto with real. apply Rle_lt_trans with (a + 0). rewrite Rplus_0_r; assumption. auto using Rplus_lt_compat_l with real. rewrite <- H0, Rplus_0_r in H1; assumption. Qed. Lemma Rplus_eq_R0 : forall r1 r2, 0 <= r1 -> 0 <= r2 -> r1 + r2 = 0 -> r1 = 0 /\ r2 = 0. Proof. intros a b; split. apply Rplus_eq_0_l with b; auto with real. apply Rplus_eq_0_l with a; auto with real. rewrite Rplus_comm; auto with real. Qed. (*********************************************************) (** ** Multiplication *) (*********************************************************) (**********) Lemma Rinv_r : forall r, r <> 0 -> r * / r = 1. Proof. intros; field; trivial. Qed. Hint Resolve Rinv_r: real. Lemma Rinv_l_sym : forall r, r <> 0 -> 1 = / r * r. Proof. intros; field; trivial. Qed. Hint Resolve Rinv_l_sym: real. Lemma Rinv_r_sym : forall r, r <> 0 -> 1 = r * / r. Proof. intros; field; trivial. Qed. Hint Resolve Rinv_r_sym: real. (**********) Lemma Rmult_0_r : forall r, r * 0 = 0. Proof. intro; ring. Qed. Hint Resolve Rmult_0_r: real v62. (**********) Lemma Rmult_0_l : forall r, 0 * r = 0. Proof. intro; ring. Qed. Hint Resolve Rmult_0_l: real v62. (**********) Lemma Rmult_ne : forall r, r * 1 = r /\ 1 * r = r. Proof. intro; split; ring. Qed. Hint Resolve Rmult_ne: real v62. (**********) Lemma Rmult_1_r : forall r, r * 1 = r. Proof. intro; ring. Qed. Hint Resolve Rmult_1_r: real. (**********) Lemma Rmult_eq_compat_l : forall r r1 r2, r1 = r2 -> r * r1 = r * r2. Proof. auto with real. Qed. (*i Old i*)Hint Resolve Rmult_eq_compat_l: v62. Lemma Rmult_eq_compat_r : forall r r1 r2, r1 = r2 -> r1 * r = r2 * r. Proof. intros. rewrite <- 2!(Rmult_comm r). now apply Rmult_eq_compat_l. Qed. (**********) Lemma Rmult_eq_reg_l : forall r r1 r2, r * r1 = r * r2 -> r <> 0 -> r1 = r2. Proof. intros; transitivity (/ r * r * r1). field; trivial. transitivity (/ r * r * r2). repeat rewrite Rmult_assoc; rewrite H; trivial. field; trivial. Qed. Lemma Rmult_eq_reg_r : forall r r1 r2, r1 * r = r2 * r -> r <> 0 -> r1 = r2. Proof. intros. apply Rmult_eq_reg_l with (2 := H0). now rewrite 2!(Rmult_comm r). Qed. (**********) Lemma Rmult_integral : forall r1 r2, r1 * r2 = 0 -> r1 = 0 \/ r2 = 0. Proof. intros; case (Req_dec r1 0); [ intro Hz | intro Hnotz ]. auto. right; apply Rmult_eq_reg_l with r1; trivial. rewrite H; auto with real. Qed. (**********) Lemma Rmult_eq_0_compat : forall r1 r2, r1 = 0 \/ r2 = 0 -> r1 * r2 = 0. Proof. intros r1 r2 [H| H]; rewrite H; auto with real. Qed. Hint Resolve Rmult_eq_0_compat: real. (**********) Lemma Rmult_eq_0_compat_r : forall r1 r2, r1 = 0 -> r1 * r2 = 0. Proof. auto with real. Qed. (**********) Lemma Rmult_eq_0_compat_l : forall r1 r2, r2 = 0 -> r1 * r2 = 0. Proof. auto with real. Qed. (**********) Lemma Rmult_neq_0_reg : forall r1 r2, r1 * r2 <> 0 -> r1 <> 0 /\ r2 <> 0. Proof. intros r1 r2 H; split; red; intro; apply H; auto with real. Qed. (**********) Lemma Rmult_integral_contrapositive : forall r1 r2, r1 <> 0 /\ r2 <> 0 -> r1 * r2 <> 0. Proof. red; intros r1 r2 [H1 H2] H. case (Rmult_integral r1 r2); auto with real. Qed. Hint Resolve Rmult_integral_contrapositive: real. Lemma Rmult_integral_contrapositive_currified : forall r1 r2, r1 <> 0 -> r2 <> 0 -> r1 * r2 <> 0. Proof. auto using Rmult_integral_contrapositive. Qed. (**********) Lemma Rmult_plus_distr_r : forall r1 r2 r3, (r1 + r2) * r3 = r1 * r3 + r2 * r3. Proof. intros; ring. Qed. (*********************************************************) (** ** Square function *) (*********************************************************) (***********) Definition Rsqr r : R := r * r. Notation "r ²" := (Rsqr r) (at level 1, format "r ²") : R_scope. (***********) Lemma Rsqr_0 : Rsqr 0 = 0. unfold Rsqr; auto with real. Qed. (***********) Lemma Rsqr_0_uniq : forall r, Rsqr r = 0 -> r = 0. unfold Rsqr; intros; elim (Rmult_integral r r H); trivial. Qed. (*********************************************************) (** ** Opposite *) (*********************************************************) (**********) Lemma Ropp_eq_compat : forall r1 r2, r1 = r2 -> - r1 = - r2. Proof. auto with real. Qed. Hint Resolve Ropp_eq_compat: real. (**********) Lemma Ropp_0 : -0 = 0. Proof. ring. Qed. Hint Resolve Ropp_0: real v62. (**********) Lemma Ropp_eq_0_compat : forall r, r = 0 -> - r = 0. Proof. intros; rewrite H; auto with real. Qed. Hint Resolve Ropp_eq_0_compat: real. (**********) Lemma Ropp_involutive : forall r, - - r = r. Proof. intro; ring. Qed. Hint Resolve Ropp_involutive: real. (*********) Lemma Ropp_neq_0_compat : forall r, r <> 0 -> - r <> 0. Proof. red; intros r H H0. apply H. transitivity (- - r); auto with real. Qed. Hint Resolve Ropp_neq_0_compat: real. (**********) Lemma Ropp_plus_distr : forall r1 r2, - (r1 + r2) = - r1 + - r2. Proof. intros; ring. Qed. Hint Resolve Ropp_plus_distr: real. (*********************************************************) (** ** Opposite and multiplication *) (*********************************************************) Lemma Ropp_mult_distr_l_reverse : forall r1 r2, - r1 * r2 = - (r1 * r2). Proof. intros; ring. Qed. Hint Resolve Ropp_mult_distr_l_reverse: real. (**********) Lemma Rmult_opp_opp : forall r1 r2, - r1 * - r2 = r1 * r2. Proof. intros; ring. Qed. Hint Resolve Rmult_opp_opp: real. Lemma Ropp_mult_distr_r_reverse : forall r1 r2, r1 * - r2 = - (r1 * r2). Proof. intros; ring. Qed. (*********************************************************) (** ** Substraction *) (*********************************************************) Lemma Rminus_0_r : forall r, r - 0 = r. Proof. intro; ring. Qed. Hint Resolve Rminus_0_r: real. Lemma Rminus_0_l : forall r, 0 - r = - r. Proof. intro; ring. Qed. Hint Resolve Rminus_0_l: real. (**********) Lemma Ropp_minus_distr : forall r1 r2, - (r1 - r2) = r2 - r1. Proof. intros; ring. Qed. Hint Resolve Ropp_minus_distr: real. Lemma Ropp_minus_distr' : forall r1 r2, - (r2 - r1) = r1 - r2. Proof. intros; ring. Qed. (**********) Lemma Rminus_diag_eq : forall r1 r2, r1 = r2 -> r1 - r2 = 0. Proof. intros; rewrite H; ring. Qed. Hint Resolve Rminus_diag_eq: real. (**********) Lemma Rminus_diag_uniq : forall r1 r2, r1 - r2 = 0 -> r1 = r2. Proof. intros r1 r2; unfold Rminus; rewrite Rplus_comm; intro. rewrite <- (Ropp_involutive r2); apply (Rplus_opp_r_uniq (- r2) r1 H). Qed. Hint Immediate Rminus_diag_uniq: real. Lemma Rminus_diag_uniq_sym : forall r1 r2, r2 - r1 = 0 -> r1 = r2. Proof. intros; generalize (Rminus_diag_uniq r2 r1 H); clear H; intro H; rewrite H; ring. Qed. Hint Immediate Rminus_diag_uniq_sym: real. Lemma Rplus_minus : forall r1 r2, r1 + (r2 - r1) = r2. Proof. intros; ring. Qed. Hint Resolve Rplus_minus: real. (**********) Lemma Rminus_eq_contra : forall r1 r2, r1 <> r2 -> r1 - r2 <> 0. Proof. red; intros r1 r2 H H0. apply H; auto with real. Qed. Hint Resolve Rminus_eq_contra: real. Lemma Rminus_not_eq : forall r1 r2, r1 - r2 <> 0 -> r1 <> r2. Proof. red; intros; elim H; apply Rminus_diag_eq; auto. Qed. Hint Resolve Rminus_not_eq: real. Lemma Rminus_not_eq_right : forall r1 r2, r2 - r1 <> 0 -> r1 <> r2. Proof. red; intros; elim H; rewrite H0; ring. Qed. Hint Resolve Rminus_not_eq_right: real. (**********) Lemma Rmult_minus_distr_l : forall r1 r2 r3, r1 * (r2 - r3) = r1 * r2 - r1 * r3. Proof. intros; ring. Qed. (*********************************************************) (** ** Inverse *) (*********************************************************) Lemma Rinv_1 : / 1 = 1. Proof. field. Qed. Hint Resolve Rinv_1: real. (*********) Lemma Rinv_neq_0_compat : forall r, r <> 0 -> / r <> 0. Proof. red; intros; apply R1_neq_R0. replace 1 with (/ r * r); auto with real. Qed. Hint Resolve Rinv_neq_0_compat: real. (*********) Lemma Rinv_involutive : forall r, r <> 0 -> / / r = r. Proof. intros; field; trivial. Qed. Hint Resolve Rinv_involutive: real. (*********) Lemma Rinv_mult_distr : forall r1 r2, r1 <> 0 -> r2 <> 0 -> / (r1 * r2) = / r1 * / r2. Proof. intros; field; auto. Qed. (*********) Lemma Ropp_inv_permute : forall r, r <> 0 -> - / r = / - r. Proof. intros; field; trivial. Qed. Lemma Rinv_r_simpl_r : forall r1 r2, r1 <> 0 -> r1 * / r1 * r2 = r2. Proof. intros; transitivity (1 * r2); auto with real. rewrite Rinv_r; auto with real. Qed. Lemma Rinv_r_simpl_l : forall r1 r2, r1 <> 0 -> r2 * r1 * / r1 = r2. Proof. intros; transitivity (r2 * 1); auto with real. transitivity (r2 * (r1 * / r1)); auto with real. Qed. Lemma Rinv_r_simpl_m : forall r1 r2, r1 <> 0 -> r1 * r2 * / r1 = r2. Proof. intros; transitivity (r2 * 1); auto with real. transitivity (r2 * (r1 * / r1)); auto with real. ring. Qed. Hint Resolve Rinv_r_simpl_l Rinv_r_simpl_r Rinv_r_simpl_m: real. (*********) Lemma Rinv_mult_simpl : forall r1 r2 r3, r1 <> 0 -> r1 * / r2 * (r3 * / r1) = r3 * / r2. Proof. intros a b c; intros. transitivity (a * / a * (c * / b)); auto with real. ring. Qed. (*********************************************************) (** ** Order and addition *) (*********************************************************) (** *** Compatibility *) (** Remark: [Rplus_lt_compat_l] is an axiom *) Lemma Rplus_gt_compat_l : forall r r1 r2, r1 > r2 -> r + r1 > r + r2. Proof. eauto using Rplus_lt_compat_l with rorders. Qed. Hint Resolve Rplus_gt_compat_l: real. (**********) Lemma Rplus_lt_compat_r : forall r r1 r2, r1 < r2 -> r1 + r < r2 + r. Proof. intros. rewrite (Rplus_comm r1 r); rewrite (Rplus_comm r2 r); auto with real. Qed. Hint Resolve Rplus_lt_compat_r: real. Lemma Rplus_gt_compat_r : forall r r1 r2, r1 > r2 -> r1 + r > r2 + r. Proof. do 3 intro; apply Rplus_lt_compat_r. Qed. (**********) Lemma Rplus_le_compat_l : forall r r1 r2, r1 <= r2 -> r + r1 <= r + r2. Proof. unfold Rle; intros; elim H; intro. left; apply (Rplus_lt_compat_l r r1 r2 H0). right; rewrite <- H0; auto with zarith real. Qed. Lemma Rplus_ge_compat_l : forall r r1 r2, r1 >= r2 -> r + r1 >= r + r2. Proof. auto using Rplus_le_compat_l with rorders. Qed. Hint Resolve Rplus_ge_compat_l: real. (**********) Lemma Rplus_le_compat_r : forall r r1 r2, r1 <= r2 -> r1 + r <= r2 + r. Proof. unfold Rle; intros; elim H; intro. left; apply (Rplus_lt_compat_r r r1 r2 H0). right; rewrite <- H0; auto with real. Qed. Hint Resolve Rplus_le_compat_l Rplus_le_compat_r: real. Lemma Rplus_ge_compat_r : forall r r1 r2, r1 >= r2 -> r1 + r >= r2 + r. Proof. auto using Rplus_le_compat_r with rorders. Qed. (*********) Lemma Rplus_lt_compat : forall r1 r2 r3 r4, r1 < r2 -> r3 < r4 -> r1 + r3 < r2 + r4. Proof. intros; apply Rlt_trans with (r2 + r3); auto with real. Qed. Hint Immediate Rplus_lt_compat: real. Lemma Rplus_le_compat : forall r1 r2 r3 r4, r1 <= r2 -> r3 <= r4 -> r1 + r3 <= r2 + r4. Proof. intros; apply Rle_trans with (r2 + r3); auto with real. Qed. Hint Immediate Rplus_le_compat: real. Lemma Rplus_gt_compat : forall r1 r2 r3 r4, r1 > r2 -> r3 > r4 -> r1 + r3 > r2 + r4. Proof. auto using Rplus_lt_compat with rorders. Qed. Lemma Rplus_ge_compat : forall r1 r2 r3 r4, r1 >= r2 -> r3 >= r4 -> r1 + r3 >= r2 + r4. Proof. auto using Rplus_le_compat with rorders. Qed. (*********) Lemma Rplus_lt_le_compat : forall r1 r2 r3 r4, r1 < r2 -> r3 <= r4 -> r1 + r3 < r2 + r4. Proof. intros; apply Rlt_le_trans with (r2 + r3); auto with real. Qed. Lemma Rplus_le_lt_compat : forall r1 r2 r3 r4, r1 <= r2 -> r3 < r4 -> r1 + r3 < r2 + r4. Proof. intros; apply Rle_lt_trans with (r2 + r3); auto with real. Qed. Hint Immediate Rplus_lt_le_compat Rplus_le_lt_compat: real. Lemma Rplus_gt_ge_compat : forall r1 r2 r3 r4, r1 > r2 -> r3 >= r4 -> r1 + r3 > r2 + r4. Proof. auto using Rplus_lt_le_compat with rorders. Qed. Lemma Rplus_ge_gt_compat : forall r1 r2 r3 r4, r1 >= r2 -> r3 > r4 -> r1 + r3 > r2 + r4. Proof. auto using Rplus_le_lt_compat with rorders. Qed. (**********) Lemma Rplus_lt_0_compat : forall r1 r2, 0 < r1 -> 0 < r2 -> 0 < r1 + r2. Proof. intros x y; intros; apply Rlt_trans with x; [ assumption | pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_lt_compat_l; assumption ]. Qed. Lemma Rplus_le_lt_0_compat : forall r1 r2, 0 <= r1 -> 0 < r2 -> 0 < r1 + r2. Proof. intros x y; intros; apply Rle_lt_trans with x; [ assumption | pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_lt_compat_l; assumption ]. Qed. Lemma Rplus_lt_le_0_compat : forall r1 r2, 0 < r1 -> 0 <= r2 -> 0 < r1 + r2. Proof. intros x y; intros; rewrite <- Rplus_comm; apply Rplus_le_lt_0_compat; assumption. Qed. Lemma Rplus_le_le_0_compat : forall r1 r2, 0 <= r1 -> 0 <= r2 -> 0 <= r1 + r2. Proof. intros x y; intros; apply Rle_trans with x; [ assumption | pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; assumption ]. Qed. (**********) Lemma sum_inequa_Rle_lt : forall a x b c y d:R, a <= x -> x < b -> c < y -> y <= d -> a + c < x + y < b + d. Proof. intros; split. apply Rlt_le_trans with (a + y); auto with real. apply Rlt_le_trans with (b + y); auto with real. Qed. (** *** Cancellation *) Lemma Rplus_lt_reg_r : forall r r1 r2, r + r1 < r + r2 -> r1 < r2. Proof. intros; cut (- r + r + r1 < - r + r + r2). rewrite Rplus_opp_l. elim (Rplus_ne r1); elim (Rplus_ne r2); intros; rewrite <- H3; rewrite <- H1; auto with zarith real. rewrite Rplus_assoc; rewrite Rplus_assoc; apply (Rplus_lt_compat_l (- r) (r + r1) (r + r2) H). Qed. Lemma Rplus_le_reg_l : forall r r1 r2, r + r1 <= r + r2 -> r1 <= r2. Proof. unfold Rle; intros; elim H; intro. left; apply (Rplus_lt_reg_r r r1 r2 H0). right; apply (Rplus_eq_reg_l r r1 r2 H0). Qed. Lemma Rplus_le_reg_r : forall r r1 r2, r1 + r <= r2 + r -> r1 <= r2. Proof. intros. apply (Rplus_le_reg_l r). now rewrite 2!(Rplus_comm r). Qed. Lemma Rplus_gt_reg_l : forall r r1 r2, r + r1 > r + r2 -> r1 > r2. Proof. unfold Rgt; intros; apply (Rplus_lt_reg_r r r2 r1 H). Qed. Lemma Rplus_ge_reg_l : forall r r1 r2, r + r1 >= r + r2 -> r1 >= r2. Proof. intros; apply Rle_ge; apply Rplus_le_reg_l with r; auto with real. Qed. (**********) Lemma Rplus_le_reg_pos_r : forall r1 r2 r3, 0 <= r2 -> r1 + r2 <= r3 -> r1 <= r3. Proof. intros x y z; intros; apply Rle_trans with (x + y); [ pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; assumption | assumption ]. Qed. Lemma Rplus_lt_reg_pos_r : forall r1 r2 r3, 0 <= r2 -> r1 + r2 < r3 -> r1 < r3. Proof. intros x y z; intros; apply Rle_lt_trans with (x + y); [ pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; assumption | assumption ]. Qed. Lemma Rplus_ge_reg_neg_r : forall r1 r2 r3, 0 >= r2 -> r1 + r2 >= r3 -> r1 >= r3. Proof. intros x y z; intros; apply Rge_trans with (x + y); [ pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_ge_compat_l; assumption | assumption ]. Qed. Lemma Rplus_gt_reg_neg_r : forall r1 r2 r3, 0 >= r2 -> r1 + r2 > r3 -> r1 > r3. Proof. intros x y z; intros; apply Rge_gt_trans with (x + y); [ pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_ge_compat_l; assumption | assumption ]. Qed. (*********************************************************) (** ** Order and opposite *) (*********************************************************) (** *** Contravariant compatibility *) Lemma Ropp_gt_lt_contravar : forall r1 r2, r1 > r2 -> - r1 < - r2. Proof. unfold Rgt; intros. apply (Rplus_lt_reg_r (r2 + r1)). replace (r2 + r1 + - r1) with r2. replace (r2 + r1 + - r2) with r1. trivial. ring. ring. Qed. Hint Resolve Ropp_gt_lt_contravar. Lemma Ropp_lt_gt_contravar : forall r1 r2, r1 < r2 -> - r1 > - r2. Proof. unfold Rgt; auto with real. Qed. Hint Resolve Ropp_lt_gt_contravar: real. (**********) Lemma Ropp_lt_contravar : forall r1 r2, r2 < r1 -> - r1 < - r2. Proof. auto with real. Qed. Hint Resolve Ropp_lt_contravar: real. Lemma Ropp_gt_contravar : forall r1 r2, r2 > r1 -> - r1 > - r2. Proof. auto with real. Qed. (**********) Lemma Ropp_le_ge_contravar : forall r1 r2, r1 <= r2 -> - r1 >= - r2. Proof. unfold Rge; intros r1 r2 [H| H]; auto with real. Qed. Hint Resolve Ropp_le_ge_contravar: real. Lemma Ropp_ge_le_contravar : forall r1 r2, r1 >= r2 -> - r1 <= - r2. Proof. unfold Rle; intros r1 r2 [H| H]; auto with real. Qed. Hint Resolve Ropp_ge_le_contravar: real. (**********) Lemma Ropp_le_contravar : forall r1 r2, r2 <= r1 -> - r1 <= - r2. Proof. intros r1 r2 H; elim H; auto with real. Qed. Hint Resolve Ropp_le_contravar: real. Lemma Ropp_ge_contravar : forall r1 r2, r2 >= r1 -> - r1 >= - r2. Proof. auto using Ropp_le_contravar with real. Qed. (**********) Lemma Ropp_0_lt_gt_contravar : forall r, 0 < r -> 0 > - r. Proof. intros; replace 0 with (-0); auto with real. Qed. Hint Resolve Ropp_0_lt_gt_contravar: real. Lemma Ropp_0_gt_lt_contravar : forall r, 0 > r -> 0 < - r. Proof. intros; replace 0 with (-0); auto with real. Qed. Hint Resolve Ropp_0_gt_lt_contravar: real. (**********) Lemma Ropp_lt_gt_0_contravar : forall r, r > 0 -> - r < 0. Proof. intros; rewrite <- Ropp_0; auto with real. Qed. Hint Resolve Ropp_lt_gt_0_contravar: real. Lemma Ropp_gt_lt_0_contravar : forall r, r < 0 -> - r > 0. Proof. intros; rewrite <- Ropp_0; auto with real. Qed. Hint Resolve Ropp_gt_lt_0_contravar: real. (**********) Lemma Ropp_0_le_ge_contravar : forall r, 0 <= r -> 0 >= - r. Proof. intros; replace 0 with (-0); auto with real. Qed. Hint Resolve Ropp_0_le_ge_contravar: real. Lemma Ropp_0_ge_le_contravar : forall r, 0 >= r -> 0 <= - r. Proof. intros; replace 0 with (-0); auto with real. Qed. Hint Resolve Ropp_0_ge_le_contravar: real. (** *** Cancellation *) Lemma Ropp_lt_cancel : forall r1 r2, - r2 < - r1 -> r1 < r2. Proof. intros x y H'. rewrite <- (Ropp_involutive x); rewrite <- (Ropp_involutive y); auto with real. Qed. Hint Immediate Ropp_lt_cancel: real. Lemma Ropp_gt_cancel : forall r1 r2, - r2 > - r1 -> r1 > r2. Proof. auto using Ropp_lt_cancel with rorders. Qed. Lemma Ropp_le_cancel : forall r1 r2, - r2 <= - r1 -> r1 <= r2. Proof. intros x y H. elim H; auto with real. intro H1; rewrite <- (Ropp_involutive x); rewrite <- (Ropp_involutive y); rewrite H1; auto with real. Qed. Hint Immediate Ropp_le_cancel: real. Lemma Ropp_ge_cancel : forall r1 r2, - r2 >= - r1 -> r1 >= r2. Proof. auto using Ropp_le_cancel with rorders. Qed. (*********************************************************) (** ** Order and multiplication *) (*********************************************************) (** Remark: [Rmult_lt_compat_l] is an axiom *) (** *** Covariant compatibility *) Lemma Rmult_lt_compat_r : forall r r1 r2, 0 < r -> r1 < r2 -> r1 * r < r2 * r. Proof. intros; rewrite (Rmult_comm r1 r); rewrite (Rmult_comm r2 r); auto with real. Qed. Hint Resolve Rmult_lt_compat_r. Lemma Rmult_gt_compat_r : forall r r1 r2, r > 0 -> r1 > r2 -> r1 * r > r2 * r. Proof. eauto using Rmult_lt_compat_r with rorders. Qed. Lemma Rmult_gt_compat_l : forall r r1 r2, r > 0 -> r1 > r2 -> r * r1 > r * r2. Proof. eauto using Rmult_lt_compat_l with rorders. Qed. (**********) Lemma Rmult_le_compat_l : forall r r1 r2, 0 <= r -> r1 <= r2 -> r * r1 <= r * r2. Proof. intros r r1 r2 H H0; destruct H; destruct H0; unfold Rle; auto with real. right; rewrite <- H; do 2 rewrite Rmult_0_l; reflexivity. Qed. Hint Resolve Rmult_le_compat_l: real. Lemma Rmult_le_compat_r : forall r r1 r2, 0 <= r -> r1 <= r2 -> r1 * r <= r2 * r. Proof. intros r r1 r2 H; rewrite (Rmult_comm r1 r); rewrite (Rmult_comm r2 r); auto with real. Qed. Hint Resolve Rmult_le_compat_r: real. Lemma Rmult_ge_compat_l : forall r r1 r2, r >= 0 -> r1 >= r2 -> r * r1 >= r * r2. Proof. eauto using Rmult_le_compat_l with rorders. Qed. Lemma Rmult_ge_compat_r : forall r r1 r2, r >= 0 -> r1 >= r2 -> r1 * r >= r2 * r. Proof. eauto using Rmult_le_compat_r with rorders. Qed. (**********) Lemma Rmult_le_compat : forall r1 r2 r3 r4, 0 <= r1 -> 0 <= r3 -> r1 <= r2 -> r3 <= r4 -> r1 * r3 <= r2 * r4. Proof. intros x y z t H' H'0 H'1 H'2. apply Rle_trans with (r2 := x * t); auto with real. repeat rewrite (fun x => Rmult_comm x t). apply Rmult_le_compat_l; auto. apply Rle_trans with z; auto. Qed. Hint Resolve Rmult_le_compat: real. Lemma Rmult_ge_compat : forall r1 r2 r3 r4, r2 >= 0 -> r4 >= 0 -> r1 >= r2 -> r3 >= r4 -> r1 * r3 >= r2 * r4. Proof. auto with real rorders. Qed. Lemma Rmult_gt_0_lt_compat : forall r1 r2 r3 r4, r3 > 0 -> r2 > 0 -> r1 < r2 -> r3 < r4 -> r1 * r3 < r2 * r4. Proof. intros; apply Rlt_trans with (r2 * r3); auto with real. Qed. (*********) Lemma Rmult_le_0_lt_compat : forall r1 r2 r3 r4, 0 <= r1 -> 0 <= r3 -> r1 < r2 -> r3 < r4 -> r1 * r3 < r2 * r4. Proof. intros; apply Rle_lt_trans with (r2 * r3); [ apply Rmult_le_compat_r; [ assumption | left; assumption ] | apply Rmult_lt_compat_l; [ apply Rle_lt_trans with r1; assumption | assumption ] ]. Qed. (*********) Lemma Rmult_lt_0_compat : forall r1 r2, 0 < r1 -> 0 < r2 -> 0 < r1 * r2. Proof. intros; replace 0 with (0 * r2); auto with real. Qed. Lemma Rmult_gt_0_compat : forall r1 r2, r1 > 0 -> r2 > 0 -> r1 * r2 > 0. Proof Rmult_lt_0_compat. (** *** Contravariant compatibility *) Lemma Rmult_le_compat_neg_l : forall r r1 r2, r <= 0 -> r1 <= r2 -> r * r2 <= r * r1. Proof. intros; replace r with (- - r); auto with real. do 2 rewrite (Ropp_mult_distr_l_reverse (- r)). apply Ropp_le_contravar; auto with real. Qed. Hint Resolve Rmult_le_compat_neg_l: real. Lemma Rmult_le_ge_compat_neg_l : forall r r1 r2, r <= 0 -> r1 <= r2 -> r * r1 >= r * r2. Proof. intros; apply Rle_ge; auto with real. Qed. Hint Resolve Rmult_le_ge_compat_neg_l: real. Lemma Rmult_lt_gt_compat_neg_l : forall r r1 r2, r < 0 -> r1 < r2 -> r * r1 > r * r2. Proof. intros; replace r with (- - r); auto with real. rewrite (Ropp_mult_distr_l_reverse (- r)); rewrite (Ropp_mult_distr_l_reverse (- r)). apply Ropp_lt_gt_contravar; auto with real. Qed. (** *** Cancellation *) Lemma Rmult_lt_reg_l : forall r r1 r2, 0 < r -> r * r1 < r * r2 -> r1 < r2. Proof. intros z x y H H0. case (Rtotal_order x y); intros Eq0; auto; elim Eq0; clear Eq0; intros Eq0. rewrite Eq0 in H0; exfalso; apply (Rlt_irrefl (z * y)); auto. generalize (Rmult_lt_compat_l z y x H Eq0); intro; exfalso; generalize (Rlt_trans (z * x) (z * y) (z * x) H0 H1); intro; apply (Rlt_irrefl (z * x)); auto. Qed. Lemma Rmult_lt_reg_r : forall r r1 r2 : R, 0 < r -> r1 * r < r2 * r -> r1 < r2. Proof. intros. apply Rmult_lt_reg_l with r. exact H. now rewrite 2!(Rmult_comm r). Qed. Lemma Rmult_gt_reg_l : forall r r1 r2, 0 < r -> r * r1 < r * r2 -> r1 < r2. Proof. eauto using Rmult_lt_reg_l with rorders. Qed. Lemma Rmult_le_reg_l : forall r r1 r2, 0 < r -> r * r1 <= r * r2 -> r1 <= r2. Proof. intros z x y H H0; case H0; auto with real. intros H1; apply Rlt_le. apply Rmult_lt_reg_l with (r := z); auto. intros H1; replace x with (/ z * (z * x)); auto with real. replace y with (/ z * (z * y)). rewrite H1; auto with real. rewrite <- Rmult_assoc; rewrite Rinv_l; auto with real. rewrite <- Rmult_assoc; rewrite Rinv_l; auto with real. Qed. Lemma Rmult_le_reg_r : forall r r1 r2, 0 < r -> r1 * r <= r2 * r -> r1 <= r2. Proof. intros. apply Rmult_le_reg_l with r. exact H. now rewrite 2!(Rmult_comm r). Qed. (*********************************************************) (** ** Order and substraction *) (*********************************************************) Lemma Rlt_minus : forall r1 r2, r1 < r2 -> r1 - r2 < 0. Proof. intros; apply (Rplus_lt_reg_r r2). replace (r2 + (r1 - r2)) with r1. replace (r2 + 0) with r2; auto with real. ring. Qed. Hint Resolve Rlt_minus: real. Lemma Rgt_minus : forall r1 r2, r1 > r2 -> r1 - r2 > 0. Proof. intros; apply (Rplus_lt_reg_r r2). replace (r2 + (r1 - r2)) with r1. replace (r2 + 0) with r2; auto with real. ring. Qed. (**********) Lemma Rle_minus : forall r1 r2, r1 <= r2 -> r1 - r2 <= 0. Proof. destruct 1; unfold Rle; auto with real. Qed. Lemma Rge_minus : forall r1 r2, r1 >= r2 -> r1 - r2 >= 0. Proof. destruct 1. auto using Rgt_minus, Rgt_ge. right; auto using Rminus_diag_eq with rorders. Qed. (**********) Lemma Rminus_lt : forall r1 r2, r1 - r2 < 0 -> r1 < r2. Proof. intros; replace r1 with (r1 - r2 + r2). pattern r2 at 3; replace r2 with (0 + r2); auto with real. ring. Qed. Lemma Rminus_gt : forall r1 r2, r1 - r2 > 0 -> r1 > r2. Proof. intros; replace r2 with (0 + r2); auto with real. replace r1 with (r1 - r2 + r2). apply Rplus_gt_compat_r; assumption. ring. Qed. (**********) Lemma Rminus_le : forall r1 r2, r1 - r2 <= 0 -> r1 <= r2. Proof. intros; replace r1 with (r1 - r2 + r2). pattern r2 at 3; replace r2 with (0 + r2); auto with real. ring. Qed. Lemma Rminus_ge : forall r1 r2, r1 - r2 >= 0 -> r1 >= r2. Proof. intros; replace r2 with (0 + r2); auto with real. replace r1 with (r1 - r2 + r2). apply Rplus_ge_compat_r; assumption. ring. Qed. (**********) Lemma tech_Rplus : forall r (s:R), 0 <= r -> 0 < s -> r + s <> 0. Proof. intros; apply not_eq_sym; apply Rlt_not_eq. rewrite Rplus_comm; replace 0 with (0 + 0); auto with real. Qed. Hint Immediate tech_Rplus: real. (*********************************************************) (** ** Order and square function *) (*********************************************************) Lemma Rle_0_sqr : forall r, 0 <= Rsqr r. Proof. intro; case (Rlt_le_dec r 0); unfold Rsqr; intro. replace (r * r) with (- r * - r); auto with real. replace 0 with (- r * 0); auto with real. replace 0 with (0 * r); auto with real. Qed. (***********) Lemma Rlt_0_sqr : forall r, r <> 0 -> 0 < Rsqr r. Proof. intros; case (Rdichotomy r 0); trivial; unfold Rsqr; intro. replace (r * r) with (- r * - r); auto with real. replace 0 with (- r * 0); auto with real. replace 0 with (0 * r); auto with real. Qed. Hint Resolve Rle_0_sqr Rlt_0_sqr: real. (***********) Lemma Rplus_sqr_eq_0_l : forall r1 r2, Rsqr r1 + Rsqr r2 = 0 -> r1 = 0. Proof. intros a b; intros; apply Rsqr_0_uniq; apply Rplus_eq_0_l with (Rsqr b); auto with real. Qed. Lemma Rplus_sqr_eq_0 : forall r1 r2, Rsqr r1 + Rsqr r2 = 0 -> r1 = 0 /\ r2 = 0. Proof. intros a b; split. apply Rplus_sqr_eq_0_l with b; auto with real. apply Rplus_sqr_eq_0_l with a; auto with real. rewrite Rplus_comm; auto with real. Qed. (*********************************************************) (** ** Zero is less than one *) (*********************************************************) Lemma Rlt_0_1 : 0 < 1. Proof. replace 1 with (Rsqr 1); auto with real. unfold Rsqr; auto with real. Qed. Hint Resolve Rlt_0_1: real. Lemma Rle_0_1 : 0 <= 1. Proof. left. exact Rlt_0_1. Qed. (*********************************************************) (** ** Order and inverse *) (*********************************************************) Lemma Rinv_0_lt_compat : forall r, 0 < r -> 0 < / r. Proof. intros; apply Rnot_le_lt; red; intros. absurd (1 <= 0); auto with real. replace 1 with (r * / r); auto with real. replace 0 with (r * 0); auto with real. Qed. Hint Resolve Rinv_0_lt_compat: real. (*********) Lemma Rinv_lt_0_compat : forall r, r < 0 -> / r < 0. Proof. intros; apply Rnot_le_lt; red; intros. absurd (1 <= 0); auto with real. replace 1 with (r * / r); auto with real. replace 0 with (r * 0); auto with real. Qed. Hint Resolve Rinv_lt_0_compat: real. (*********) Lemma Rinv_lt_contravar : forall r1 r2, 0 < r1 * r2 -> r1 < r2 -> / r2 < / r1. Proof. intros; apply Rmult_lt_reg_l with (r1 * r2); auto with real. case (Rmult_neq_0_reg r1 r2); intros; auto with real. replace (r1 * r2 * / r2) with r1. replace (r1 * r2 * / r1) with r2; trivial. symmetry ; auto with real. symmetry ; auto with real. Qed. Lemma Rinv_1_lt_contravar : forall r1 r2, 1 <= r1 -> r1 < r2 -> / r2 < / r1. Proof. intros x y H' H'0. cut (0 < x); [ intros Lt0 | apply Rlt_le_trans with (r2 := 1) ]; auto with real. apply Rmult_lt_reg_l with (r := x); auto with real. rewrite (Rmult_comm x (/ x)); rewrite Rinv_l; auto with real. apply Rmult_lt_reg_l with (r := y); auto with real. apply Rlt_trans with (r2 := x); auto. cut (y * (x * / y) = x). intro H1; rewrite H1; rewrite (Rmult_1_r y); auto. rewrite (Rmult_comm x); rewrite <- Rmult_assoc; rewrite (Rmult_comm y (/ y)); rewrite Rinv_l; auto with real. apply Rlt_dichotomy_converse; right. red; apply Rlt_trans with (r2 := x); auto with real. Qed. Hint Resolve Rinv_1_lt_contravar: real. (*********************************************************) (** ** Miscellaneous *) (*********************************************************) (**********) Lemma Rle_lt_0_plus_1 : forall r, 0 <= r -> 0 < r + 1. Proof. intros. apply Rlt_le_trans with 1; auto with real. pattern 1 at 1; replace 1 with (0 + 1); auto with real. Qed. Hint Resolve Rle_lt_0_plus_1: real. (**********) Lemma Rlt_plus_1 : forall r, r < r + 1. Proof. intros. pattern r at 1; replace r with (r + 0); auto with real. Qed. Hint Resolve Rlt_plus_1: real. (**********) Lemma tech_Rgt_minus : forall r1 r2, 0 < r2 -> r1 > r1 - r2. Proof. red; unfold Rminus; intros. pattern r1 at 2; replace r1 with (r1 + 0); auto with real. Qed. (*********************************************************) (** ** Injection from [N] to [R] *) (*********************************************************) (**********) Lemma S_INR : forall n:nat, INR (S n) = INR n + 1. Proof. intro; case n; auto with real. Qed. (**********) Lemma S_O_plus_INR : forall n:nat, INR (1 + n) = INR 1 + INR n. Proof. intro; simpl; case n; intros; auto with real. Qed. (**********) Lemma plus_INR : forall n m:nat, INR (n + m) = INR n + INR m. Proof. intros n m; induction n as [| n Hrecn]. simpl; auto with real. replace (S n + m)%nat with (S (n + m)); auto with arith. repeat rewrite S_INR. rewrite Hrecn; ring. Qed. Hint Resolve plus_INR: real. (**********) Lemma minus_INR : forall n m:nat, (m <= n)%nat -> INR (n - m) = INR n - INR m. Proof. intros n m le; pattern m, n; apply le_elim_rel; auto with real. intros; rewrite <- minus_n_O; auto with real. intros; repeat rewrite S_INR; simpl. rewrite H0; ring. Qed. Hint Resolve minus_INR: real. (*********) Lemma mult_INR : forall n m:nat, INR (n * m) = INR n * INR m. Proof. intros n m; induction n as [| n Hrecn]. simpl; auto with real. intros; repeat rewrite S_INR; simpl. rewrite plus_INR; rewrite Hrecn; ring. Qed. Hint Resolve mult_INR: real. (*********) Lemma lt_0_INR : forall n:nat, (0 < n)%nat -> 0 < INR n. Proof. simple induction 1; intros; auto with real. rewrite S_INR; auto with real. Qed. Hint Resolve lt_0_INR: real. Lemma lt_INR : forall n m:nat, (n < m)%nat -> INR n < INR m. Proof. simple induction 1; intros; auto with real. rewrite S_INR; auto with real. rewrite S_INR; apply Rlt_trans with (INR m0); auto with real. Qed. Hint Resolve lt_INR: real. Lemma lt_1_INR : forall n:nat, (1 < n)%nat -> 1 < INR n. Proof. intros; replace 1 with (INR 1); auto with real. Qed. Hint Resolve lt_1_INR: real. (**********) Lemma pos_INR_nat_of_P : forall p:positive, 0 < INR (Pos.to_nat p). Proof. intro; apply lt_0_INR. simpl; auto with real. apply Pos2Nat.is_pos. Qed. Hint Resolve pos_INR_nat_of_P: real. (**********) Lemma pos_INR : forall n:nat, 0 <= INR n. Proof. intro n; case n. simpl; auto with real. auto with arith real. Qed. Hint Resolve pos_INR: real. Lemma INR_lt : forall n m:nat, INR n < INR m -> (n < m)%nat. Proof. double induction n m; intros. simpl; exfalso; apply (Rlt_irrefl 0); auto. auto with arith. generalize (pos_INR (S n0)); intro; cut (INR 0 = 0); [ intro H2; rewrite H2 in H0; idtac | simpl; trivial ]. generalize (Rle_lt_trans 0 (INR (S n0)) 0 H1 H0); intro; exfalso; apply (Rlt_irrefl 0); auto. do 2 rewrite S_INR in H1; cut (INR n1 < INR n0). intro H2; generalize (H0 n0 H2); intro; auto with arith. apply (Rplus_lt_reg_r 1 (INR n1) (INR n0)). rewrite Rplus_comm; rewrite (Rplus_comm 1 (INR n0)); trivial. Qed. Hint Resolve INR_lt: real. (*********) Lemma le_INR : forall n m:nat, (n <= m)%nat -> INR n <= INR m. Proof. simple induction 1; intros; auto with real. rewrite S_INR. apply Rle_trans with (INR m0); auto with real. Qed. Hint Resolve le_INR: real. (**********) Lemma INR_not_0 : forall n:nat, INR n <> 0 -> n <> 0%nat. Proof. red; intros n H H1. apply H. rewrite H1; trivial. Qed. Hint Immediate INR_not_0: real. (**********) Lemma not_0_INR : forall n:nat, n <> 0%nat -> INR n <> 0. Proof. intro n; case n. intro; absurd (0%nat = 0%nat); trivial. intros; rewrite S_INR. apply Rgt_not_eq; red; auto with real. Qed. Hint Resolve not_0_INR: real. Lemma not_INR : forall n m:nat, n <> m -> INR n <> INR m. Proof. intros n m H; case (le_or_lt n m); intros H1. case (le_lt_or_eq _ _ H1); intros H2. apply Rlt_dichotomy_converse; auto with real. exfalso; auto. apply not_eq_sym; apply Rlt_dichotomy_converse; auto with real. Qed. Hint Resolve not_INR: real. Lemma INR_eq : forall n m:nat, INR n = INR m -> n = m. Proof. intros; case (le_or_lt n m); intros H1. case (le_lt_or_eq _ _ H1); intros H2; auto. cut (n <> m). intro H3; generalize (not_INR n m H3); intro H4; exfalso; auto. omega. symmetry ; cut (m <> n). intro H3; generalize (not_INR m n H3); intro H4; exfalso; auto. omega. Qed. Hint Resolve INR_eq: real. Lemma INR_le : forall n m:nat, INR n <= INR m -> (n <= m)%nat. Proof. intros; elim H; intro. generalize (INR_lt n m H0); intro; auto with arith. generalize (INR_eq n m H0); intro; rewrite H1; auto. Qed. Hint Resolve INR_le: real. Lemma not_1_INR : forall n:nat, n <> 1%nat -> INR n <> 1. Proof. replace 1 with (INR 1); auto with real. Qed. Hint Resolve not_1_INR: real. (*********************************************************) (** ** Injection from [Z] to [R] *) (*********************************************************) (**********) Lemma IZN : forall n:Z, (0 <= n)%Z -> exists m : nat, n = Z.of_nat m. Proof. intros z; idtac; apply Z_of_nat_complete; assumption. Qed. (**********) Lemma INR_IZR_INZ : forall n:nat, INR n = IZR (Z.of_nat n). Proof. simple induction n; auto with real. intros; simpl; rewrite SuccNat2Pos.id_succ; auto with real. Qed. Lemma plus_IZR_NEG_POS : forall p q:positive, IZR (Zpos p + Zneg q) = IZR (Zpos p) + IZR (Zneg q). Proof. intros p q; simpl. rewrite Z.pos_sub_spec. case Pos.compare_spec; intros H; simpl. subst. ring. rewrite Pos2Nat.inj_sub by trivial. rewrite minus_INR by (now apply lt_le_weak, Pos2Nat.inj_lt). ring. rewrite Pos2Nat.inj_sub by trivial. rewrite minus_INR by (now apply lt_le_weak, Pos2Nat.inj_lt). ring. Qed. (**********) Lemma plus_IZR : forall n m:Z, IZR (n + m) = IZR n + IZR m. Proof. intro z; destruct z; intro t; destruct t; intros; auto with real. simpl; intros; rewrite Pos2Nat.inj_add; auto with real. apply plus_IZR_NEG_POS. rewrite Z.add_comm; rewrite Rplus_comm; apply plus_IZR_NEG_POS. simpl; intros; rewrite Pos2Nat.inj_add; rewrite plus_INR; auto with real. Qed. (**********) Lemma mult_IZR : forall n m:Z, IZR (n * m) = IZR n * IZR m. Proof. intros z t; case z; case t; simpl; auto with real. intros t1 z1; rewrite Pos2Nat.inj_mul; auto with real. intros t1 z1; rewrite Pos2Nat.inj_mul; auto with real. rewrite Rmult_comm. rewrite Ropp_mult_distr_l_reverse; auto with real. apply Ropp_eq_compat; rewrite mult_comm; auto with real. intros t1 z1; rewrite Pos2Nat.inj_mul; auto with real. rewrite Ropp_mult_distr_l_reverse; auto with real. intros t1 z1; rewrite Pos2Nat.inj_mul; auto with real. rewrite Rmult_opp_opp; auto with real. Qed. Lemma pow_IZR : forall z n, pow (IZR z) n = IZR (Z.pow z (Z.of_nat n)). Proof. intros z [|n];simpl;trivial. rewrite Zpower_pos_nat. rewrite SuccNat2Pos.id_succ. unfold Zpower_nat;simpl. rewrite mult_IZR. induction n;simpl;trivial. rewrite mult_IZR;ring[IHn]. Qed. (**********) Lemma succ_IZR : forall n:Z, IZR (Z.succ n) = IZR n + 1. Proof. intro; change 1 with (IZR 1); unfold Z.succ; apply plus_IZR. Qed. (**********) Lemma opp_IZR : forall n:Z, IZR (- n) = - IZR n. Proof. intro z; case z; simpl; auto with real. Qed. Definition Ropp_Ropp_IZR := opp_IZR. Lemma minus_IZR : forall n m:Z, IZR (n - m) = IZR n - IZR m. Proof. intros; unfold Z.sub, Rminus. rewrite <- opp_IZR. apply plus_IZR. Qed. (**********) Lemma Z_R_minus : forall n m:Z, IZR n - IZR m = IZR (n - m). Proof. intros z1 z2; unfold Rminus; unfold Z.sub. rewrite <- (Ropp_Ropp_IZR z2); symmetry ; apply plus_IZR. Qed. (**********) Lemma lt_0_IZR : forall n:Z, 0 < IZR n -> (0 < n)%Z. Proof. intro z; case z; simpl; intros. absurd (0 < 0); auto with real. unfold Z.lt; simpl; trivial. case Rlt_not_le with (1 := H). replace 0 with (-0); auto with real. Qed. (**********) Lemma lt_IZR : forall n m:Z, IZR n < IZR m -> (n < m)%Z. Proof. intros z1 z2 H; apply Z.lt_0_sub. apply lt_0_IZR. rewrite <- Z_R_minus. exact (Rgt_minus (IZR z2) (IZR z1) H). Qed. (**********) Lemma eq_IZR_R0 : forall n:Z, IZR n = 0 -> n = 0%Z. Proof. intro z; destruct z; simpl; intros; auto with zarith. case (Rlt_not_eq 0 (INR (Pos.to_nat p))); auto with real. case (Rlt_not_eq (- INR (Pos.to_nat p)) 0); auto with real. apply Ropp_lt_gt_0_contravar. unfold Rgt; apply pos_INR_nat_of_P. Qed. (**********) Lemma eq_IZR : forall n m:Z, IZR n = IZR m -> n = m. Proof. intros z1 z2 H; generalize (Rminus_diag_eq (IZR z1) (IZR z2) H); rewrite (Z_R_minus z1 z2); intro; generalize (eq_IZR_R0 (z1 - z2) H0); intro; omega. Qed. (**********) Lemma not_0_IZR : forall n:Z, n <> 0%Z -> IZR n <> 0. Proof. intros z H; red; intros H0; case H. apply eq_IZR; auto. Qed. (*********) Lemma le_0_IZR : forall n:Z, 0 <= IZR n -> (0 <= n)%Z. Proof. unfold Rle; intros z [H| H]. red; intro; apply (Z.lt_le_incl 0 z (lt_0_IZR z H)); assumption. rewrite (eq_IZR_R0 z); auto with zarith real. Qed. (**********) Lemma le_IZR : forall n m:Z, IZR n <= IZR m -> (n <= m)%Z. Proof. unfold Rle; intros z1 z2 [H| H]. apply (Z.lt_le_incl z1 z2); auto with real. apply lt_IZR; trivial. rewrite (eq_IZR z1 z2); auto with zarith real. Qed. (**********) Lemma le_IZR_R1 : forall n:Z, IZR n <= 1 -> (n <= 1)%Z. Proof. pattern 1 at 1; replace 1 with (IZR 1); intros; auto. apply le_IZR; trivial. Qed. (**********) Lemma IZR_ge : forall n m:Z, (n >= m)%Z -> IZR n >= IZR m. Proof. intros m n H; apply Rnot_lt_ge; red; intro. generalize (lt_IZR m n H0); intro; omega. Qed. Lemma IZR_le : forall n m:Z, (n <= m)%Z -> IZR n <= IZR m. Proof. intros m n H; apply Rnot_gt_le; red; intro. unfold Rgt in H0; generalize (lt_IZR n m H0); intro; omega. Qed. Lemma IZR_lt : forall n m:Z, (n < m)%Z -> IZR n < IZR m. Proof. intros m n H; cut (m <= n)%Z. intro H0; elim (IZR_le m n H0); intro; auto. generalize (eq_IZR m n H1); intro; exfalso; omega. omega. Qed. Lemma one_IZR_lt1 : forall n:Z, -1 < IZR n < 1 -> n = 0%Z. Proof. intros z [H1 H2]. apply Z.le_antisymm. apply Z.lt_succ_r; apply lt_IZR; trivial. replace 0%Z with (Z.succ (-1)); trivial. apply Z.le_succ_l; apply lt_IZR; trivial. Qed. Lemma one_IZR_r_R1 : forall r (n m:Z), r < IZR n <= r + 1 -> r < IZR m <= r + 1 -> n = m. Proof. intros r z x [H1 H2] [H3 H4]. cut ((z - x)%Z = 0%Z); auto with zarith. apply one_IZR_lt1. rewrite <- Z_R_minus; split. replace (-1) with (r - (r + 1)). unfold Rminus; apply Rplus_lt_le_compat; auto with real. ring. replace 1 with (r + 1 - r). unfold Rminus; apply Rplus_le_lt_compat; auto with real. ring. Qed. (**********) Lemma single_z_r_R1 : forall r (n m:Z), r < IZR n -> IZR n <= r + 1 -> r < IZR m -> IZR m <= r + 1 -> n = m. Proof. intros; apply one_IZR_r_R1 with r; auto. Qed. (**********) Lemma tech_single_z_r_R1 : forall r (n:Z), r < IZR n -> IZR n <= r + 1 -> (exists s : Z, s <> n /\ r < IZR s /\ IZR s <= r + 1) -> False. Proof. intros r z H1 H2 [s [H3 [H4 H5]]]. apply H3; apply single_z_r_R1 with r; trivial. Qed. (*********) Lemma Rmult_le_pos : forall r1 r2, 0 <= r1 -> 0 <= r2 -> 0 <= r1 * r2. Proof. intros x y H H0; rewrite <- (Rmult_0_l x); rewrite <- (Rmult_comm x); apply (Rmult_le_compat_l x 0 y H H0). Qed. Lemma Rle_Rinv : forall x y:R, 0 < x -> 0 < y -> x <= y -> / y <= / x. Proof. intros; apply Rmult_le_reg_l with x. apply H. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with y. apply H0. rewrite Rmult_1_r; rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; apply H1. red; intro; rewrite H2 in H0; elim (Rlt_irrefl _ H0). red; intro; rewrite H2 in H; elim (Rlt_irrefl _ H). Qed. Lemma double : forall r1, 2 * r1 = r1 + r1. Proof. intro; ring. Qed. Lemma double_var : forall r1, r1 = r1 / 2 + r1 / 2. Proof. intro; rewrite <- double; unfold Rdiv; rewrite <- Rmult_assoc; symmetry ; apply Rinv_r_simpl_m. replace 2 with (INR 2); [ apply not_0_INR; discriminate | unfold INR; ring ]. Qed. (*********************************************************) (** ** Other rules about < and <= *) (*********************************************************) Lemma Rmult_ge_0_gt_0_lt_compat : forall r1 r2 r3 r4, r3 >= 0 -> r2 > 0 -> r1 < r2 -> r3 < r4 -> r1 * r3 < r2 * r4. Proof. intros; apply Rle_lt_trans with (r2 * r3); auto with real. Qed. Lemma le_epsilon : forall r1 r2, (forall eps:R, 0 < eps -> r1 <= r2 + eps) -> r1 <= r2. Proof. intros x y; intros; elim (Rtotal_order x y); intro. left; assumption. elim H0; intro. right; assumption. clear H0; generalize (Rgt_minus x y H1); intro H2; change (0 < x - y) in H2. cut (0 < 2). intro. generalize (Rmult_lt_0_compat (x - y) (/ 2) H2 (Rinv_0_lt_compat 2 H0)); intro H3; generalize (H ((x - y) * / 2) H3); replace (y + (x - y) * / 2) with ((y + x) * / 2). intro H4; generalize (Rmult_le_compat_l 2 x ((y + x) * / 2) (Rlt_le 0 2 H0) H4); rewrite <- (Rmult_comm ((y + x) * / 2)); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; replace (2 * x) with (x + x). rewrite (Rplus_comm y); intro H5; apply Rplus_le_reg_l with x; assumption. ring. replace 2 with (INR 2); [ apply not_0_INR; discriminate | reflexivity ]. pattern y at 2; replace y with (y / 2 + y / 2). unfold Rminus, Rdiv. repeat rewrite Rmult_plus_distr_r. ring. cut (forall z:R, 2 * z = z + z). intro. rewrite <- (H4 (y / 2)). unfold Rdiv. rewrite <- Rmult_assoc; apply Rinv_r_simpl_m. replace 2 with (INR 2). apply not_0_INR. discriminate. unfold INR; reflexivity. intro; ring. cut (0%nat <> 2%nat); [ intro H0; generalize (lt_0_INR 2 (neq_O_lt 2 H0)); unfold INR; intro; assumption | discriminate ]. Qed. (**********) Lemma completeness_weak : forall E:R -> Prop, bound E -> (exists x : R, E x) -> exists m : R, is_lub E m. Proof. intros; elim (completeness E H H0); intros; split with x; assumption. Qed. (*********************************************************) (** * Definitions of new types *) (*********************************************************) Record nonnegreal : Type := mknonnegreal {nonneg :> R; cond_nonneg : 0 <= nonneg}. Record posreal : Type := mkposreal {pos :> R; cond_pos : 0 < pos}. Record nonposreal : Type := mknonposreal {nonpos :> R; cond_nonpos : nonpos <= 0}. Record negreal : Type := mknegreal {neg :> R; cond_neg : neg < 0}. Record nonzeroreal : Type := mknonzeroreal {nonzero :> R; cond_nonzero : nonzero <> 0}. (** Compatibility *) Notation prod_neq_R0 := Rmult_integral_contrapositive_currified (only parsing). Notation minus_Rgt := Rminus_gt (only parsing). Notation minus_Rge := Rminus_ge (only parsing). Notation plus_le_is_le := Rplus_le_reg_pos_r (only parsing). Notation plus_lt_is_lt := Rplus_lt_reg_pos_r (only parsing). Notation INR_lt_1 := lt_1_INR (only parsing). Notation lt_INR_0 := lt_0_INR (only parsing). Notation not_nm_INR := not_INR (only parsing). Notation INR_pos := pos_INR_nat_of_P (only parsing). Notation not_INR_O := INR_not_0 (only parsing). Notation not_O_INR := not_0_INR (only parsing). Notation not_O_IZR := not_0_IZR (only parsing). Notation le_O_IZR := le_0_IZR (only parsing). Notation lt_O_IZR := lt_0_IZR (only parsing). coq-8.4pl2/theories/Reals/Rlogic.v0000640000175000001440000002021412010532755016124 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop. Hypothesis HP : forall n, {P n} + {~P n}. Let ge_fun_sums_ge_lemma : (forall (m n : nat) (f : nat -> R), (lt m n) -> (forall i : nat, 0 <= f i) -> sum_f_R0 f m <= sum_f_R0 f n). Proof. intros m n f mn fpos. replace (sum_f_R0 f m) with (sum_f_R0 f m + 0) by ring. rewrite (tech2 f m n mn). apply Rplus_le_compat_l. induction (n - S m)%nat; simpl in *. apply fpos. replace 0 with (0 + 0) by ring. apply (Rplus_le_compat _ _ _ _ IHn0 (fpos (S (m + S n0)%nat))). Qed. Let ge_fun_sums_ge : (forall (m n : nat) (f : nat -> R), (le m n) -> (forall i : nat, 0 <= f i) -> sum_f_R0 f m <= sum_f_R0 f n). Proof. intros m n f mn pos. elim (le_lt_or_eq _ _ mn). intro; apply ge_fun_sums_ge_lemma; assumption. intro H; rewrite H; auto with *. Qed. Let f:=fun n => (if HP n then (1/2)^n else 0)%R. Lemma cauchy_crit_geometric_dec_fun : Cauchy_crit_series f. Proof. intros e He. assert (X:(Pser (fun n:nat => 1) (1/2) (/ (1 - (1/2))))%R). apply GP_infinite. apply Rabs_def1; fourier. assert (He':e/2 > 0) by fourier. destruct (X _ He') as [N HN]. clear X. exists N. intros n m Hn Hm. replace e with (e/2 + e/2)%R by field. set (g:=(fun n0 : nat => 1 * (1 / 2) ^ n0)) in *. assert (R_dist (sum_f_R0 g n) (sum_f_R0 g m) < e / 2 + e / 2). apply Rle_lt_trans with (R_dist (sum_f_R0 g n) 2+R_dist 2 (sum_f_R0 g m))%R. apply R_dist_tri. replace (/(1 - 1/2)) with 2 in HN by field. cut (forall n, (n >= N)%nat -> R_dist (sum_f_R0 g n) 2 < e/2)%R. intros Z. apply Rplus_lt_compat. apply Z; assumption. rewrite R_dist_sym. apply Z; assumption. clear - HN He. intros n Hn. apply HN. auto. eapply Rle_lt_trans;[|apply H]. clear -ge_fun_sums_ge n. cut (forall n m, (m <= n)%nat -> R_dist (sum_f_R0 f n) (sum_f_R0 f m) <= R_dist (sum_f_R0 g n) (sum_f_R0 g m)). intros H. destruct (le_lt_dec m n). apply H; assumption. rewrite R_dist_sym. rewrite (R_dist_sym (sum_f_R0 g n)). apply H; auto with *. clear n m. intros n m Hnm. unfold R_dist. cut (forall i : nat, (1 / 2) ^ i >= 0). intro RPosPow. rewrite Rabs_pos_eq. rewrite Rabs_pos_eq. cut (sum_f_R0 g m - sum_f_R0 f m <= sum_f_R0 g n - sum_f_R0 f n). intros; fourier. do 2 rewrite <- minus_sum. apply (ge_fun_sums_ge m n (fun i : nat => g i - f i) Hnm). intro i. unfold f, g. elim (HP i); intro; ring_simplify; auto with *. cut (sum_f_R0 g m <= sum_f_R0 g n). intro; fourier. apply (ge_fun_sums_ge m n g Hnm). intro. unfold g. ring_simplify. apply Rge_le. apply RPosPow. cut (sum_f_R0 f m <= sum_f_R0 f n). intro; fourier. apply (ge_fun_sums_ge m n f Hnm). intro; unfold f. elim (HP i); intro; simpl. apply Rge_le. apply RPosPow. auto with *. intro i. apply Rle_ge. apply pow_le. fourier. Qed. Lemma forall_dec : {forall n, P n} + {~forall n, P n}. Proof. destruct (cv_cauchy_2 _ cauchy_crit_geometric_dec_fun). cut (2 <= x <-> forall n : nat, P n). intro H. elim (Rle_dec 2 x); intro X. left; tauto. right; tauto. assert (A:Rabs(1/2) < 1) by (apply Rabs_def1; fourier). assert (A0:=(GP_infinite (1/2) A)). symmetry. split; intro. replace 2 with (/ (1 - (1 / 2))) by field. unfold Pser, infinite_sum in A0. eapply Rle_cv_lim;[|unfold Un_cv; apply A0 |apply u]. intros n. clear -n H. induction n; unfold f;simpl. destruct (HP 0); auto with *. elim n; auto. apply Rplus_le_compat; auto. destruct (HP (S n)); auto with *. elim n0; auto. intros n. destruct (HP n); auto. elim (RIneq.Rle_not_lt _ _ H). assert (B:0< (1/2)^n). apply pow_lt. fourier. apply Rle_lt_trans with (2-(1/2)^n);[|fourier]. replace (/(1-1/2))%R with 2 in A0 by field. set (g:= fun m => if (eq_nat_dec m n) then (1/2)^n else 0). assert (Z: Un_cv (fun N : nat => sum_f_R0 g N) ((1/2)^n)). intros e He. exists n. intros a Ha. replace (sum_f_R0 g a) with ((1/2)^n). rewrite (R_dist_eq); assumption. symmetry. cut (forall a : nat, ((a >= n)%nat -> sum_f_R0 g a = (1 / 2) ^ n) /\ ((a < n)%nat -> sum_f_R0 g a = 0))%R. intros H0. destruct (H0 a). auto. clear - g. induction a. split; intros H; simpl; unfold g; destruct (eq_nat_dec 0 n) as [t|f]; try reflexivity. elim f; auto with *. exfalso; omega. destruct IHa as [IHa0 IHa1]. split; intros H; simpl; unfold g at 2; destruct (eq_nat_dec (S a) n). rewrite IHa1. ring. omega. ring_simplify. apply IHa0. omega. exfalso; omega. ring_simplify. apply IHa1. omega. assert (C:=CV_minus _ _ _ _ A0 Z). eapply Rle_cv_lim;[|apply u |apply C]. clear - n0 B. intros m. simpl. induction m. simpl. unfold f, g. destruct (eq_nat_dec 0 n). destruct (HP 0). elim n0. congruence. clear -n. induction n; simpl; fourier. destruct (HP); simpl; fourier. cut (f (S m) <= 1 * ((1 / 2) ^ (S m)) - g (S m)). intros L. eapply Rle_trans. simpl. apply Rplus_le_compat. apply IHm. apply L. simpl; fourier. unfold f, g. destruct (eq_nat_dec (S m) n). destruct (HP (S m)). elim n0. congruence. rewrite e. fourier. destruct (HP (S m)). fourier. ring_simplify. apply pow_le. fourier. Qed. Lemma sig_forall_dec : {n | ~P n}+{forall n, P n}. Proof. destruct forall_dec. right; assumption. left. apply constructive_indefinite_ground_description_nat; auto. clear - HP. firstorder. apply Classical_Pred_Type.not_all_ex_not. assumption. Qed. End Arithmetical_dec. (** 2- Derivability of the Archimedean axiom *) (* This is a standard proof (it has been taken from PlanetMath). It is formulated negatively so as to avoid the need for classical logic. Using a proof of {n | ~P n}+{forall n, P n} (the one above or a variant of it that does not need classical axioms) , we can in principle also derive [up] and its [specification] *) Theorem not_not_archimedean : forall r : R, ~ (forall n : nat, (INR n <= r)%R). Proof. intros r H. set (E := fun r => exists n : nat, r = INR n). assert (exists x : R, E x) by (exists 0%R; simpl; red; exists 0%nat; reflexivity). assert (bound E) by (exists r; intros x (m,H2); rewrite H2; apply H). destruct (completeness E) as (M,(H3,H4)); try assumption. set (M' := (M + -1)%R). assert (H2 : ~ is_upper_bound E M'). intro H5. assert (M <= M')%R by (apply H4; exact H5). apply (Rlt_not_le M M'). unfold M'. pattern M at 2. rewrite <- Rplus_0_l. pattern (0 + M)%R. rewrite Rplus_comm. rewrite <- (Rplus_opp_r 1). apply Rplus_lt_compat_l. rewrite Rplus_comm. apply Rlt_plus_1. assumption. apply H2. intros N (n,H7). rewrite H7. unfold M'. assert (H5 : (INR (S n) <= M)%R) by (apply H3; exists (S n); reflexivity). rewrite S_INR in H5. assert (H6 : (INR n + 1 + -1 <= M + -1)%R). apply Rplus_le_compat_r. assumption. rewrite Rplus_assoc in H6. rewrite Rplus_opp_r in H6. rewrite (Rplus_comm (INR n) 0) in H6. rewrite Rplus_0_l in H6. assumption. Qed. coq-8.4pl2/theories/Reals/Rsigma.v0000640000175000001440000001146612010532755016140 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R. Definition sigma (low high:nat) : R := sum_f_R0 (fun k:nat => f (low + k)) (high - low). Theorem sigma_split : forall low high k:nat, (low <= k)%nat -> (k < high)%nat -> sigma low high = sigma low k + sigma (S k) high. Proof. intros; induction k as [| k Hreck]. cut (low = 0%nat). intro; rewrite H1; unfold sigma; rewrite <- minus_n_n; rewrite <- minus_n_O; simpl; replace (high - 1)%nat with (pred high). apply (decomp_sum (fun k:nat => f k)). assumption. apply pred_of_minus. inversion H; reflexivity. cut ((low <= k)%nat \/ low = S k). intro; elim H1; intro. replace (sigma low (S k)) with (sigma low k + f (S k)). rewrite Rplus_assoc; replace (f (S k) + sigma (S (S k)) high) with (sigma (S k) high). apply Hreck. assumption. apply lt_trans with (S k); [ apply lt_n_Sn | assumption ]. unfold sigma; replace (high - S (S k))%nat with (pred (high - S k)). pattern (S k) at 3; replace (S k) with (S k + 0)%nat; [ idtac | ring ]. replace (sum_f_R0 (fun k0:nat => f (S (S k) + k0)) (pred (high - S k))) with (sum_f_R0 (fun k0:nat => f (S k + S k0)) (pred (high - S k))). apply (decomp_sum (fun i:nat => f (S k + i))). apply lt_minus_O_lt; assumption. apply sum_eq; intros; replace (S k + S i)%nat with (S (S k) + i)%nat. reflexivity. ring. replace (high - S (S k))%nat with (high - S k - 1)%nat. apply pred_of_minus. omega. unfold sigma; replace (S k - low)%nat with (S (k - low)). pattern (S k) at 1; replace (S k) with (low + S (k - low))%nat. symmetry ; apply (tech5 (fun i:nat => f (low + i))). omega. omega. rewrite <- H2; unfold sigma; rewrite <- minus_n_n; simpl; replace (high - S low)%nat with (pred (high - low)). replace (sum_f_R0 (fun k0:nat => f (S (low + k0))) (pred (high - low))) with (sum_f_R0 (fun k0:nat => f (low + S k0)) (pred (high - low))). apply (decomp_sum (fun k0:nat => f (low + k0))). apply lt_minus_O_lt. apply le_lt_trans with (S k); [ rewrite H2; apply le_n | assumption ]. apply sum_eq; intros; replace (S (low + i)) with (low + S i)%nat. reflexivity. ring. omega. inversion H; [ right; reflexivity | left; assumption ]. Qed. Theorem sigma_diff : forall low high k:nat, (low <= k)%nat -> (k < high)%nat -> sigma low high - sigma low k = sigma (S k) high. Proof. intros low high k H1 H2; symmetry ; rewrite (sigma_split H1 H2); ring. Qed. Theorem sigma_diff_neg : forall low high k:nat, (low <= k)%nat -> (k < high)%nat -> sigma low k - sigma low high = - sigma (S k) high. Proof. intros low high k H1 H2; rewrite (sigma_split H1 H2); ring. Qed. Theorem sigma_first : forall low high:nat, (low < high)%nat -> sigma low high = f low + sigma (S low) high. Proof. intros low high H1; generalize (lt_le_S low high H1); intro H2; generalize (lt_le_weak low high H1); intro H3; replace (f low) with (sigma low low). apply sigma_split. apply le_n. assumption. unfold sigma; rewrite <- minus_n_n. simpl. replace (low + 0)%nat with low; [ reflexivity | ring ]. Qed. Theorem sigma_last : forall low high:nat, (low < high)%nat -> sigma low high = f high + sigma low (pred high). Proof. intros low high H1; generalize (lt_le_S low high H1); intro H2; generalize (lt_le_weak low high H1); intro H3; replace (f high) with (sigma high high). rewrite Rplus_comm; cut (high = S (pred high)). intro; pattern high at 3; rewrite H. apply sigma_split. apply le_S_n; rewrite <- H; apply lt_le_S; assumption. apply lt_pred_n_n; apply le_lt_trans with low; [ apply le_O_n | assumption ]. apply S_pred with 0%nat; apply le_lt_trans with low; [ apply le_O_n | assumption ]. unfold sigma; rewrite <- minus_n_n; simpl; replace (high + 0)%nat with high; [ reflexivity | ring ]. Qed. Theorem sigma_eq_arg : forall low:nat, sigma low low = f low. Proof. intro; unfold sigma; rewrite <- minus_n_n. simpl; replace (low + 0)%nat with low; [ reflexivity | ring ]. Qed. End Sigma. coq-8.4pl2/theories/Reals/Rtrigo_def.v0000640000175000001440000003450612010532755017002 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* / INR (fact i) * x ^ i) l. Lemma exp_cof_no_R0 : forall n:nat, / INR (fact n) <> 0. Proof. intro. apply Rinv_neq_0_compat. apply INR_fact_neq_0. Qed. Lemma exist_exp : forall x:R, { l:R | exp_in x l }. Proof. intro; generalize (Alembert_C3 (fun n:nat => / INR (fact n)) x exp_cof_no_R0 Alembert_exp). unfold Pser, exp_in. trivial. Defined. Definition exp (x:R) : R := proj1_sig (exist_exp x). Lemma pow_i : forall i:nat, (0 < i)%nat -> 0 ^ i = 0. Proof. intros; apply pow_ne_zero. red; intro; rewrite H0 in H; elim (lt_irrefl _ H). Qed. Lemma exist_exp0 : { l:R | exp_in 0 l }. Proof. exists 1. unfold exp_in; unfold infinite_sum; intros. exists 0%nat. intros; replace (sum_f_R0 (fun i:nat => / INR (fact i) * 0 ^ i) n) with 1. unfold R_dist; replace (1 - 1) with 0; [ rewrite Rabs_R0; assumption | ring ]. induction n as [| n Hrecn]. simpl; rewrite Rinv_1; ring. rewrite tech5. rewrite <- Hrecn. simpl. ring. unfold ge; apply le_O_n. Defined. (* Value of [exp 0] *) Lemma exp_0 : exp 0 = 1. Proof. cut (exp_in 0 (exp 0)). cut (exp_in 0 1). unfold exp_in; intros; eapply uniqueness_sum. apply H0. apply H. exact (proj2_sig exist_exp0). exact (proj2_sig (exist_exp 0)). Qed. (*****************************************) (** * Definition of hyperbolic functions *) (*****************************************) Definition cosh (x:R) : R := (exp x + exp (- x)) / 2. Definition sinh (x:R) : R := (exp x - exp (- x)) / 2. Definition tanh (x:R) : R := sinh x / cosh x. Lemma cosh_0 : cosh 0 = 1. Proof. unfold cosh; rewrite Ropp_0; rewrite exp_0. unfold Rdiv; rewrite <- Rinv_r_sym; [ reflexivity | discrR ]. Qed. Lemma sinh_0 : sinh 0 = 0. Proof. unfold sinh; rewrite Ropp_0; rewrite exp_0. unfold Rminus, Rdiv; rewrite Rplus_opp_r; apply Rmult_0_l. Qed. Definition cos_n (n:nat) : R := (-1) ^ n / INR (fact (2 * n)). Lemma simpl_cos_n : forall n:nat, cos_n (S n) / cos_n n = - / INR (2 * S n * (2 * n + 1)). Proof. intro; unfold cos_n; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite pow_add; unfold Rdiv; rewrite Rinv_mult_distr. rewrite Rinv_involutive. replace ((-1) ^ n * (-1) ^ 1 * / INR (fact (2 * (n + 1))) * (/ (-1) ^ n * INR (fact (2 * n)))) with ((-1) ^ n * / (-1) ^ n * / INR (fact (2 * (n + 1))) * INR (fact (2 * n)) * (-1) ^ 1); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_l; unfold pow; rewrite Rmult_1_r. replace (2 * (n + 1))%nat with (S (S (2 * n))); [ idtac | ring ]. do 2 rewrite fact_simpl; do 2 rewrite mult_INR; repeat rewrite Rinv_mult_distr; try (apply not_O_INR; discriminate). rewrite <- (Rmult_comm (-1)). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r. replace (S (2 * n)) with (2 * n + 1)%nat; [ idtac | ring ]. rewrite mult_INR; rewrite Rinv_mult_distr. ring. apply not_O_INR; discriminate. replace (2 * n + 1)%nat with (S (2 * n)); [ apply not_O_INR; discriminate | ring ]. apply INR_fact_neq_0. apply INR_fact_neq_0. apply prod_neq_R0; [ apply not_O_INR; discriminate | apply INR_fact_neq_0 ]. apply pow_nonzero; discrR. apply INR_fact_neq_0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat; apply INR_fact_neq_0. Qed. Lemma archimed_cor1 : forall eps:R, 0 < eps -> exists N : nat, / INR N < eps /\ (0 < N)%nat. Proof. intros; cut (/ eps < IZR (up (/ eps))). intro; cut (0 <= up (/ eps))%Z. intro; assert (H2 := IZN _ H1); elim H2; intros; exists (max x 1). split. cut (0 < IZR (Z.of_nat x)). intro; rewrite INR_IZR_INZ; apply Rle_lt_trans with (/ IZR (Z.of_nat x)). apply Rmult_le_reg_l with (IZR (Z.of_nat x)). assumption. rewrite <- Rinv_r_sym; [ idtac | red; intro; rewrite H5 in H4; elim (Rlt_irrefl _ H4) ]. apply Rmult_le_reg_l with (IZR (Z.of_nat (max x 1))). apply Rlt_le_trans with (IZR (Z.of_nat x)). assumption. repeat rewrite <- INR_IZR_INZ; apply le_INR; apply le_max_l. rewrite Rmult_1_r; rewrite (Rmult_comm (IZR (Z.of_nat (max x 1)))); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; repeat rewrite <- INR_IZR_INZ; apply le_INR; apply le_max_l. rewrite <- INR_IZR_INZ; apply not_O_INR. red; intro; assert (H6 := le_max_r x 1); cut (0 < 1)%nat; [ intro | apply lt_O_Sn ]; assert (H8 := lt_le_trans _ _ _ H7 H6); rewrite H5 in H8; elim (lt_irrefl _ H8). pattern eps at 1; rewrite <- Rinv_involutive. apply Rinv_lt_contravar. apply Rmult_lt_0_compat; [ apply Rinv_0_lt_compat; assumption | assumption ]. rewrite H3 in H0; assumption. red; intro; rewrite H5 in H; elim (Rlt_irrefl _ H). apply Rlt_trans with (/ eps). apply Rinv_0_lt_compat; assumption. rewrite H3 in H0; assumption. apply lt_le_trans with 1%nat; [ apply lt_O_Sn | apply le_max_r ]. apply le_IZR; replace (IZR 0) with 0; [ idtac | reflexivity ]; left; apply Rlt_trans with (/ eps); [ apply Rinv_0_lt_compat; assumption | assumption ]. assert (H0 := archimed (/ eps)). elim H0; intros; assumption. Qed. Lemma Alembert_cos : Un_cv (fun n:nat => Rabs (cos_n (S n) / cos_n n)) 0. Proof. unfold Un_cv; intros. assert (H0 := archimed_cor1 eps H). elim H0; intros; exists x. intros; rewrite simpl_cos_n; unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; rewrite Rabs_Ropp; rewrite Rabs_right. rewrite mult_INR; rewrite Rinv_mult_distr. cut (/ INR (2 * S n) < 1). intro; cut (/ INR (2 * n + 1) < eps). intro; rewrite <- (Rmult_1_l eps). apply Rmult_gt_0_lt_compat; try assumption. change (0 < / INR (2 * n + 1)); apply Rinv_0_lt_compat; apply lt_INR_0. replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_O_Sn | ring ]. apply Rlt_0_1. cut (x < 2 * n + 1)%nat. intro; assert (H5 := lt_INR _ _ H4). apply Rlt_trans with (/ INR x). apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply lt_INR_0. elim H1; intros; assumption. apply lt_INR_0; replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_O_Sn | ring ]. assumption. elim H1; intros; assumption. apply lt_le_trans with (S n). unfold ge in H2; apply le_lt_n_Sm; assumption. replace (2 * n + 1)%nat with (S (2 * n)); [ idtac | ring ]. apply le_n_S; apply le_n_2n. apply Rmult_lt_reg_l with (INR (2 * S n)). apply lt_INR_0; replace (2 * S n)%nat with (S (S (2 * n))). apply lt_O_Sn. replace (S n) with (n + 1)%nat; [ idtac | ring ]. ring. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; replace 1 with (INR 1); [ apply lt_INR | reflexivity ]. replace (2 * S n)%nat with (S (S (2 * n))). apply lt_n_S; apply lt_O_Sn. replace (S n) with (n + 1)%nat; [ ring | ring ]. apply not_O_INR; discriminate. apply not_O_INR; discriminate. replace (2 * n + 1)%nat with (S (2 * n)); [ apply not_O_INR; discriminate | ring ]. apply Rle_ge; left; apply Rinv_0_lt_compat. apply lt_INR_0. replace (2 * S n * (2 * n + 1))%nat with (S (S (4 * (n * n) + 6 * n))). apply lt_O_Sn. apply INR_eq. repeat rewrite S_INR; rewrite plus_INR; repeat rewrite mult_INR; rewrite plus_INR; rewrite mult_INR; repeat rewrite S_INR; replace (INR 0) with 0; [ ring | reflexivity ]. Qed. Lemma cosn_no_R0 : forall n:nat, cos_n n <> 0. intro; unfold cos_n; unfold Rdiv; apply prod_neq_R0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat. apply INR_fact_neq_0. Qed. (**********) Definition cos_in (x l:R) : Prop := infinite_sum (fun i:nat => cos_n i * x ^ i) l. (**********) Lemma exist_cos : forall x:R, { l:R | cos_in x l }. intro; generalize (Alembert_C3 cos_n x cosn_no_R0 Alembert_cos). unfold Pser, cos_in; trivial. Qed. (** Definition of cosinus *) Definition cos (x:R) : R := let (a,_) := exist_cos (Rsqr x) in a. Definition sin_n (n:nat) : R := (-1) ^ n / INR (fact (2 * n + 1)). Lemma simpl_sin_n : forall n:nat, sin_n (S n) / sin_n n = - / INR ((2 * S n + 1) * (2 * S n)). Proof. intro; unfold sin_n; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite pow_add; unfold Rdiv; rewrite Rinv_mult_distr. rewrite Rinv_involutive. replace ((-1) ^ n * (-1) ^ 1 * / INR (fact (2 * (n + 1) + 1)) * (/ (-1) ^ n * INR (fact (2 * n + 1)))) with ((-1) ^ n * / (-1) ^ n * / INR (fact (2 * (n + 1) + 1)) * INR (fact (2 * n + 1)) * (-1) ^ 1); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_l; unfold pow; rewrite Rmult_1_r; replace (2 * (n + 1) + 1)%nat with (S (S (2 * n + 1))). do 2 rewrite fact_simpl; do 2 rewrite mult_INR; repeat rewrite Rinv_mult_distr. rewrite <- (Rmult_comm (-1)); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; replace (S (2 * n + 1)) with (2 * (n + 1))%nat. repeat rewrite mult_INR; repeat rewrite Rinv_mult_distr. ring. apply not_O_INR; discriminate. replace (n + 1)%nat with (S n); [ apply not_O_INR; discriminate | ring ]. apply not_O_INR; discriminate. apply prod_neq_R0. apply not_O_INR; discriminate. replace (n + 1)%nat with (S n); [ apply not_O_INR; discriminate | ring ]. apply not_O_INR; discriminate. replace (n + 1)%nat with (S n); [ apply not_O_INR; discriminate | ring ]. rewrite mult_plus_distr_l; cut (forall n:nat, S n = (n + 1)%nat). intros; rewrite (H (2 * n + 1)%nat). ring. intros; ring. apply INR_fact_neq_0. apply not_O_INR; discriminate. apply INR_fact_neq_0. apply not_O_INR; discriminate. apply prod_neq_R0; [ apply not_O_INR; discriminate | apply INR_fact_neq_0 ]. cut (forall n:nat, S (S n) = (n + 2)%nat); [ intros; rewrite (H (2 * n + 1)%nat); ring | intros; ring ]. apply pow_nonzero; discrR. apply INR_fact_neq_0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat; apply INR_fact_neq_0. Qed. Lemma Alembert_sin : Un_cv (fun n:nat => Rabs (sin_n (S n) / sin_n n)) 0. Proof. unfold Un_cv; intros; assert (H0 := archimed_cor1 eps H). elim H0; intros; exists x. intros; rewrite simpl_sin_n; unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; rewrite Rabs_Ropp; rewrite Rabs_right. rewrite mult_INR; rewrite Rinv_mult_distr. cut (/ INR (2 * S n) < 1). intro; cut (/ INR (2 * S n + 1) < eps). intro; rewrite <- (Rmult_1_l eps); rewrite (Rmult_comm (/ INR (2 * S n + 1))); apply Rmult_gt_0_lt_compat; try assumption. change (0 < / INR (2 * S n + 1)); apply Rinv_0_lt_compat; apply lt_INR_0; replace (2 * S n + 1)%nat with (S (2 * S n)); [ apply lt_O_Sn | ring ]. apply Rlt_0_1. cut (x < 2 * S n + 1)%nat. intro; assert (H5 := lt_INR _ _ H4); apply Rlt_trans with (/ INR x). apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply lt_INR_0; elim H1; intros; assumption. apply lt_INR_0; replace (2 * S n + 1)%nat with (S (2 * S n)); [ apply lt_O_Sn | ring ]. assumption. elim H1; intros; assumption. apply lt_le_trans with (S n). unfold ge in H2; apply le_lt_n_Sm; assumption. replace (2 * S n + 1)%nat with (S (2 * S n)); [ idtac | ring ]. apply le_S; apply le_n_2n. apply Rmult_lt_reg_l with (INR (2 * S n)). apply lt_INR_0; replace (2 * S n)%nat with (S (S (2 * n))); [ apply lt_O_Sn | replace (S n) with (n + 1)%nat; [ idtac | ring ]; ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; replace 1 with (INR 1); [ apply lt_INR | reflexivity ]. replace (2 * S n)%nat with (S (S (2 * n))). apply lt_n_S; apply lt_O_Sn. replace (S n) with (n + 1)%nat; [ ring | ring ]. apply not_O_INR; discriminate. apply not_O_INR; discriminate. apply not_O_INR; discriminate. left; change (0 < / INR ((2 * S n + 1) * (2 * S n))); apply Rinv_0_lt_compat. apply lt_INR_0. replace ((2 * S n + 1) * (2 * S n))%nat with (S (S (S (S (S (S (4 * (n * n) + 10 * n))))))). apply lt_O_Sn. apply INR_eq; repeat rewrite S_INR; rewrite plus_INR; repeat rewrite mult_INR; rewrite plus_INR; rewrite mult_INR; repeat rewrite S_INR; replace (INR 0) with 0; [ ring | reflexivity ]. Defined. Lemma sin_no_R0 : forall n:nat, sin_n n <> 0. Proof. intro; unfold sin_n; unfold Rdiv; apply prod_neq_R0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat; apply INR_fact_neq_0. Qed. (**********) Definition sin_in (x l:R) : Prop := infinite_sum (fun i:nat => sin_n i * x ^ i) l. (**********) Lemma exist_sin : forall x:R, { l:R | sin_in x l }. Proof. intro; generalize (Alembert_C3 sin_n x sin_no_R0 Alembert_sin). unfold Pser, sin_n; trivial. Defined. (***********************) (* Definition of sinus *) Definition sin (x:R) : R := let (a,_) := exist_sin (Rsqr x) in x * a. (*********************************************) (** * Properties *) (*********************************************) Lemma cos_sym : forall x:R, cos x = cos (- x). Proof. intros; unfold cos; replace (Rsqr (- x)) with (Rsqr x). reflexivity. apply Rsqr_neg. Qed. Lemma sin_antisym : forall x:R, sin (- x) = - sin x. Proof. intro; unfold sin; replace (Rsqr (- x)) with (Rsqr x); [ idtac | apply Rsqr_neg ]. case (exist_sin (Rsqr x)); intros; ring. Qed. Lemma sin_0 : sin 0 = 0. Proof. unfold sin; case (exist_sin (Rsqr 0)). intros; ring. Qed. Lemma exist_cos0 : { l:R | cos_in 0 l }. Proof. exists 1. unfold cos_in; unfold infinite_sum; intros; exists 0%nat. intros. unfold R_dist. induction n as [| n Hrecn]. unfold cos_n; simpl. unfold Rdiv; rewrite Rinv_1. do 2 rewrite Rmult_1_r. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. rewrite tech5. replace (cos_n (S n) * 0 ^ S n) with 0. rewrite Rplus_0_r. apply Hrecn; unfold ge; apply le_O_n. simpl; ring. Defined. (* Value of [cos 0] *) Lemma cos_0 : cos 0 = 1. Proof. cut (cos_in 0 (cos 0)). cut (cos_in 0 1). unfold cos_in; intros; eapply uniqueness_sum. apply H0. apply H. exact (proj2_sig exist_cos0). assert (H := proj2_sig (exist_cos (Rsqr 0))); unfold cos; pattern 0 at 1; replace 0 with (Rsqr 0); [ exact H | apply Rsqr_0 ]. Qed. coq-8.4pl2/theories/Reals/Ranalysis1.v0000640000175000001440000014423312010532755016743 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R. (****************************************************) (** * Basic operations on functions *) (****************************************************) Definition plus_fct f1 f2 (x:R) : R := f1 x + f2 x. Definition opp_fct f (x:R) : R := - f x. Definition mult_fct f1 f2 (x:R) : R := f1 x * f2 x. Definition mult_real_fct (a:R) f (x:R) : R := a * f x. Definition minus_fct f1 f2 (x:R) : R := f1 x - f2 x. Definition div_fct f1 f2 (x:R) : R := f1 x / f2 x. Definition div_real_fct (a:R) f (x:R) : R := a / f x. Definition comp f1 f2 (x:R) : R := f1 (f2 x). Definition inv_fct f (x:R) : R := / f x. Delimit Scope Rfun_scope with F. Arguments plus_fct (f1 f2)%F x%R. Arguments mult_fct (f1 f2)%F x%R. Arguments minus_fct (f1 f2)%F x%R. Arguments div_fct (f1 f2)%F x%R. Arguments inv_fct f%F x%R. Arguments opp_fct f%F x%R. Arguments mult_real_fct a%R f%F x%R. Arguments div_real_fct a%R f%F x%R. Arguments comp (f1 f2)%F x%R. Infix "+" := plus_fct : Rfun_scope. Notation "- x" := (opp_fct x) : Rfun_scope. Infix "*" := mult_fct : Rfun_scope. Infix "-" := minus_fct : Rfun_scope. Infix "/" := div_fct : Rfun_scope. Local Notation "f1 'o' f2" := (comp f1 f2) (at level 20, right associativity) : Rfun_scope. Notation "/ x" := (inv_fct x) : Rfun_scope. Definition fct_cte (a x:R) : R := a. Definition id (x:R) := x. (****************************************************) (** * Variations of functions *) (****************************************************) Definition increasing f : Prop := forall x y:R, x <= y -> f x <= f y. Definition decreasing f : Prop := forall x y:R, x <= y -> f y <= f x. Definition strict_increasing f : Prop := forall x y:R, x < y -> f x < f y. Definition strict_decreasing f : Prop := forall x y:R, x < y -> f y < f x. Definition constant f : Prop := forall x y:R, f x = f y. (**********) Definition no_cond (x:R) : Prop := True. (**********) Definition constant_D_eq f (D:R -> Prop) (c:R) : Prop := forall x:R, D x -> f x = c. (***************************************************) (** * Definition of continuity as a limit *) (***************************************************) (**********) Definition continuity_pt f (x0:R) : Prop := continue_in f no_cond x0. Definition continuity f : Prop := forall x:R, continuity_pt f x. Arguments continuity_pt f%F x0%R. Arguments continuity f%F. (**********) Lemma continuity_pt_plus : forall f1 f2 (x0:R), continuity_pt f1 x0 -> continuity_pt f2 x0 -> continuity_pt (f1 + f2) x0. Proof. unfold continuity_pt, plus_fct; unfold continue_in; intros; apply limit_plus; assumption. Qed. Lemma continuity_pt_opp : forall f (x0:R), continuity_pt f x0 -> continuity_pt (- f) x0. Proof. unfold continuity_pt, opp_fct; unfold continue_in; intros; apply limit_Ropp; assumption. Qed. Lemma continuity_pt_minus : forall f1 f2 (x0:R), continuity_pt f1 x0 -> continuity_pt f2 x0 -> continuity_pt (f1 - f2) x0. Proof. unfold continuity_pt, minus_fct; unfold continue_in; intros; apply limit_minus; assumption. Qed. Lemma continuity_pt_mult : forall f1 f2 (x0:R), continuity_pt f1 x0 -> continuity_pt f2 x0 -> continuity_pt (f1 * f2) x0. Proof. unfold continuity_pt, mult_fct; unfold continue_in; intros; apply limit_mul; assumption. Qed. Lemma continuity_pt_const : forall f (x0:R), constant f -> continuity_pt f x0. Proof. unfold constant, continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; intros; exists 1; split; [ apply Rlt_0_1 | intros; generalize (H x x0); intro; rewrite H2; simpl; rewrite R_dist_eq; assumption ]. Qed. Lemma continuity_pt_scal : forall f (a x0:R), continuity_pt f x0 -> continuity_pt (mult_real_fct a f) x0. Proof. unfold continuity_pt, mult_real_fct; unfold continue_in; intros; apply (limit_mul (fun x:R => a) f (D_x no_cond x0) a (f x0) x0). unfold limit1_in; unfold limit_in; intros; exists 1; split. apply Rlt_0_1. intros; rewrite R_dist_eq; assumption. assumption. Qed. Lemma continuity_pt_inv : forall f (x0:R), continuity_pt f x0 -> f x0 <> 0 -> continuity_pt (/ f) x0. Proof. intros. replace (/ f)%F with (fun x:R => / f x). unfold continuity_pt; unfold continue_in; intros; apply limit_inv; assumption. unfold inv_fct; reflexivity. Qed. Lemma div_eq_inv : forall f1 f2, (f1 / f2)%F = (f1 * / f2)%F. Proof. intros; reflexivity. Qed. Lemma continuity_pt_div : forall f1 f2 (x0:R), continuity_pt f1 x0 -> continuity_pt f2 x0 -> f2 x0 <> 0 -> continuity_pt (f1 / f2) x0. Proof. intros; rewrite (div_eq_inv f1 f2); apply continuity_pt_mult; [ assumption | apply continuity_pt_inv; assumption ]. Qed. Lemma continuity_pt_comp : forall f1 f2 (x:R), continuity_pt f1 x -> continuity_pt f2 (f1 x) -> continuity_pt (f2 o f1) x. Proof. unfold continuity_pt; unfold continue_in; intros; unfold comp. cut (limit1_in (fun x0:R => f2 (f1 x0)) (Dgf (D_x no_cond x) (D_x no_cond (f1 x)) f1) ( f2 (f1 x)) x -> limit1_in (fun x0:R => f2 (f1 x0)) (D_x no_cond x) (f2 (f1 x)) x). intro; apply H1. eapply limit_comp. apply H. apply H0. unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. assert (H3 := H1 eps H2). elim H3; intros. exists x0. split. elim H4; intros; assumption. intros; case (Req_dec (f1 x) (f1 x1)); intro. rewrite H6; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. elim H4; intros; apply H8. split. unfold Dgf, D_x, no_cond. split. split. trivial. elim H5; unfold D_x, no_cond; intros. elim H9; intros; assumption. split. trivial. assumption. elim H5; intros; assumption. Qed. (**********) Lemma continuity_plus : forall f1 f2, continuity f1 -> continuity f2 -> continuity (f1 + f2). Proof. unfold continuity; intros; apply (continuity_pt_plus f1 f2 x (H x) (H0 x)). Qed. Lemma continuity_opp : forall f, continuity f -> continuity (- f). Proof. unfold continuity; intros; apply (continuity_pt_opp f x (H x)). Qed. Lemma continuity_minus : forall f1 f2, continuity f1 -> continuity f2 -> continuity (f1 - f2). Proof. unfold continuity; intros; apply (continuity_pt_minus f1 f2 x (H x) (H0 x)). Qed. Lemma continuity_mult : forall f1 f2, continuity f1 -> continuity f2 -> continuity (f1 * f2). Proof. unfold continuity; intros; apply (continuity_pt_mult f1 f2 x (H x) (H0 x)). Qed. Lemma continuity_const : forall f, constant f -> continuity f. Proof. unfold continuity; intros; apply (continuity_pt_const f x H). Qed. Lemma continuity_scal : forall f (a:R), continuity f -> continuity (mult_real_fct a f). Proof. unfold continuity; intros; apply (continuity_pt_scal f a x (H x)). Qed. Lemma continuity_inv : forall f, continuity f -> (forall x:R, f x <> 0) -> continuity (/ f). Proof. unfold continuity; intros; apply (continuity_pt_inv f x (H x) (H0 x)). Qed. Lemma continuity_div : forall f1 f2, continuity f1 -> continuity f2 -> (forall x:R, f2 x <> 0) -> continuity (f1 / f2). Proof. unfold continuity; intros; apply (continuity_pt_div f1 f2 x (H x) (H0 x) (H1 x)). Qed. Lemma continuity_comp : forall f1 f2, continuity f1 -> continuity f2 -> continuity (f2 o f1). Proof. unfold continuity; intros. apply (continuity_pt_comp f1 f2 x (H x) (H0 (f1 x))). Qed. (*****************************************************) (** * Derivative's definition using Landau's kernel *) (*****************************************************) Definition derivable_pt_lim f (x l:R) : Prop := forall eps:R, 0 < eps -> exists delta : posreal, (forall h:R, h <> 0 -> Rabs h < delta -> Rabs ((f (x + h) - f x) / h - l) < eps). Definition derivable_pt_abs f (x l:R) : Prop := derivable_pt_lim f x l. Definition derivable_pt f (x:R) := { l:R | derivable_pt_abs f x l }. Definition derivable f := forall x:R, derivable_pt f x. Definition derive_pt f (x:R) (pr:derivable_pt f x) := proj1_sig pr. Definition derive f (pr:derivable f) (x:R) := derive_pt f x (pr x). Arguments derivable_pt_lim f%F x%R l. Arguments derivable_pt_abs f%F (x l)%R. Arguments derivable_pt f%F x%R. Arguments derivable f%F. Arguments derive_pt f%F x%R pr. Arguments derive f%F pr x. Definition antiderivative f (g:R -> R) (a b:R) : Prop := (forall x:R, a <= x <= b -> exists pr : derivable_pt g x, f x = derive_pt g x pr) /\ a <= b. (**************************************) (** * Class of differential functions *) (**************************************) Record Differential : Type := mkDifferential {d1 :> R -> R; cond_diff : derivable d1}. Record Differential_D2 : Type := mkDifferential_D2 {d2 :> R -> R; cond_D1 : derivable d2; cond_D2 : derivable (derive d2 cond_D1)}. (**********) Lemma uniqueness_step1 : forall f (x l1 l2:R), limit1_in (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) l1 0 -> limit1_in (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) l2 0 -> l1 = l2. Proof. intros; apply (single_limit (fun h:R => (f (x + h) - f x) / h) ( fun h:R => h <> 0) l1 l2 0); try assumption. unfold adhDa; intros; exists (alp / 2). split. unfold Rdiv; apply prod_neq_R0. red; intro; rewrite H2 in H1; elim (Rlt_irrefl _ H1). apply Rinv_neq_0_compat; discrR. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; unfold Rdiv; rewrite Rabs_mult. replace (Rabs (/ 2)) with (/ 2). replace (Rabs alp) with alp. apply Rmult_lt_reg_l with 2. prove_sup0. rewrite (Rmult_comm 2); rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]; rewrite Rmult_1_r; rewrite double; pattern alp at 1; replace alp with (alp + 0); [ idtac | ring ]; apply Rplus_lt_compat_l; assumption. symmetry ; apply Rabs_right; left; assumption. symmetry ; apply Rabs_right; left; change (0 < / 2); apply Rinv_0_lt_compat; prove_sup0. Qed. Lemma uniqueness_step2 : forall f (x l:R), derivable_pt_lim f x l -> limit1_in (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) l 0. Proof. unfold derivable_pt_lim; intros; unfold limit1_in; unfold limit_in; intros. assert (H1 := H eps H0). elim H1; intros. exists (pos x0). split. apply (cond_pos x0). simpl; unfold R_dist; intros. elim H3; intros. apply H2; [ assumption | unfold Rminus in H5; rewrite Ropp_0 in H5; rewrite Rplus_0_r in H5; assumption ]. Qed. Lemma uniqueness_step3 : forall f (x l:R), limit1_in (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) l 0 -> derivable_pt_lim f x l. Proof. unfold limit1_in, derivable_pt_lim; unfold limit_in; unfold dist; simpl; intros. elim (H eps H0). intros; elim H1; intros. exists (mkposreal x0 H2). simpl; intros; unfold R_dist in H3; apply (H3 h). split; [ assumption | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; assumption ]. Qed. Lemma uniqueness_limite : forall f (x l1 l2:R), derivable_pt_lim f x l1 -> derivable_pt_lim f x l2 -> l1 = l2. Proof. intros. assert (H1 := uniqueness_step2 _ _ _ H). assert (H2 := uniqueness_step2 _ _ _ H0). assert (H3 := uniqueness_step1 _ _ _ _ H1 H2). assumption. Qed. Lemma derive_pt_eq : forall f (x l:R) (pr:derivable_pt f x), derive_pt f x pr = l <-> derivable_pt_lim f x l. Proof. intros; split. intro; assert (H1 := proj2_sig pr); unfold derive_pt in H; rewrite H in H1; assumption. intro; assert (H1 := proj2_sig pr); unfold derivable_pt_abs in H1. assert (H2 := uniqueness_limite _ _ _ _ H H1). unfold derive_pt; unfold derivable_pt_abs. symmetry ; assumption. Qed. (**********) Lemma derive_pt_eq_0 : forall f (x l:R) (pr:derivable_pt f x), derivable_pt_lim f x l -> derive_pt f x pr = l. Proof. intros; elim (derive_pt_eq f x l pr); intros. apply (H1 H). Qed. (**********) Lemma derive_pt_eq_1 : forall f (x l:R) (pr:derivable_pt f x), derive_pt f x pr = l -> derivable_pt_lim f x l. Proof. intros; elim (derive_pt_eq f x l pr); intros. apply (H0 H). Qed. (**********************************************************************) (** * Equivalence of this definition with the one using limit concept *) (**********************************************************************) Lemma derive_pt_D_in : forall f (df:R -> R) (x:R) (pr:derivable_pt f x), D_in f df no_cond x <-> derive_pt f x pr = df x. Proof. intros; split. unfold D_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros. apply derive_pt_eq_0. unfold derivable_pt_lim. intros; elim (H eps H0); intros alpha H1; elim H1; intros; exists (mkposreal alpha H2); intros; generalize (H3 (x + h)); intro; cut (x + h - x = h); [ intro; cut (D_x no_cond x (x + h) /\ Rabs (x + h - x) < alpha); [ intro; generalize (H6 H8); rewrite H7; intro; assumption | split; [ unfold D_x; split; [ unfold no_cond; trivial | apply Rminus_not_eq_right; rewrite H7; assumption ] | rewrite H7; assumption ] ] | ring ]. intro. assert (H0 := derive_pt_eq_1 f x (df x) pr H). unfold D_in; unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H0 eps H1); intros alpha H2; exists (pos alpha); split. apply (cond_pos alpha). intros; elim H3; intros; unfold D_x in H4; elim H4; intros; cut (x0 - x <> 0). intro; generalize (H2 (x0 - x) H8 H5); replace (x + (x0 - x)) with x0. intro; assumption. ring. auto with real. Qed. Lemma derivable_pt_lim_D_in : forall f (df:R -> R) (x:R), D_in f df no_cond x <-> derivable_pt_lim f x (df x). Proof. intros; split. unfold D_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros. unfold derivable_pt_lim. intros; elim (H eps H0); intros alpha H1; elim H1; intros; exists (mkposreal alpha H2); intros; generalize (H3 (x + h)); intro; cut (x + h - x = h); [ intro; cut (D_x no_cond x (x + h) /\ Rabs (x + h - x) < alpha); [ intro; generalize (H6 H8); rewrite H7; intro; assumption | split; [ unfold D_x; split; [ unfold no_cond; trivial | apply Rminus_not_eq_right; rewrite H7; assumption ] | rewrite H7; assumption ] ] | ring ]. intro. unfold derivable_pt_lim in H. unfold D_in; unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H eps H0); intros alpha H2; exists (pos alpha); split. apply (cond_pos alpha). intros. elim H1; intros; unfold D_x in H3; elim H3; intros; cut (x0 - x <> 0). intro; generalize (H2 (x0 - x) H7 H4); replace (x + (x0 - x)) with x0. intro; assumption. ring. auto with real. Qed. (***********************************) (** * derivability -> continuity *) (***********************************) (**********) Lemma derivable_derive : forall f (x:R) (pr:derivable_pt f x), exists l : R, derive_pt f x pr = l. Proof. intros; exists (proj1_sig pr). unfold derive_pt; reflexivity. Qed. Theorem derivable_continuous_pt : forall f (x:R), derivable_pt f x -> continuity_pt f x. Proof. intros f x X. generalize (derivable_derive f x X); intro. elim H; intros l H1. cut (l = fct_cte l x). intro. rewrite H0 in H1. generalize (derive_pt_D_in f (fct_cte l) x); intro. elim (H2 X); intros. generalize (H4 H1); intro. unfold continuity_pt. apply (cont_deriv f (fct_cte l) no_cond x H5). unfold fct_cte; reflexivity. Qed. Theorem derivable_continuous : forall f, derivable f -> continuity f. Proof. unfold derivable, continuity; intros f X x. apply (derivable_continuous_pt f x (X x)). Qed. (****************************************************************) (** * Main rules *) (****************************************************************) Lemma derivable_pt_lim_plus : forall f1 f2 (x l1 l2:R), derivable_pt_lim f1 x l1 -> derivable_pt_lim f2 x l2 -> derivable_pt_lim (f1 + f2) x (l1 + l2). intros. apply uniqueness_step3. assert (H1 := uniqueness_step2 _ _ _ H). assert (H2 := uniqueness_step2 _ _ _ H0). unfold plus_fct. cut (forall h:R, (f1 (x + h) + f2 (x + h) - (f1 x + f2 x)) / h = (f1 (x + h) - f1 x) / h + (f2 (x + h) - f2 x) / h). intro. generalize (limit_plus (fun h':R => (f1 (x + h') - f1 x) / h') (fun h':R => (f2 (x + h') - f2 x) / h') (fun h:R => h <> 0) l1 l2 0 H1 H2). unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H4 eps H5); intros. exists x0. elim H6; intros. split. assumption. intros; rewrite H3; apply H8; assumption. intro; unfold Rdiv; ring. Qed. Lemma derivable_pt_lim_opp : forall f (x l:R), derivable_pt_lim f x l -> derivable_pt_lim (- f) x (- l). Proof. intros. apply uniqueness_step3. assert (H1 := uniqueness_step2 _ _ _ H). unfold opp_fct. cut (forall h:R, (- f (x + h) - - f x) / h = - ((f (x + h) - f x) / h)). intro. generalize (limit_Ropp (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) l 0 H1). unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H2 eps H3); intros. exists x0. elim H4; intros. split. assumption. intros; rewrite H0; apply H6; assumption. intro; unfold Rdiv; ring. Qed. Lemma derivable_pt_lim_minus : forall f1 f2 (x l1 l2:R), derivable_pt_lim f1 x l1 -> derivable_pt_lim f2 x l2 -> derivable_pt_lim (f1 - f2) x (l1 - l2). Proof. intros. apply uniqueness_step3. assert (H1 := uniqueness_step2 _ _ _ H). assert (H2 := uniqueness_step2 _ _ _ H0). unfold minus_fct. cut (forall h:R, (f1 (x + h) - f1 x) / h - (f2 (x + h) - f2 x) / h = (f1 (x + h) - f2 (x + h) - (f1 x - f2 x)) / h). intro. generalize (limit_minus (fun h':R => (f1 (x + h') - f1 x) / h') (fun h':R => (f2 (x + h') - f2 x) / h') (fun h:R => h <> 0) l1 l2 0 H1 H2). unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H4 eps H5); intros. exists x0. elim H6; intros. split. assumption. intros; rewrite <- H3; apply H8; assumption. intro; unfold Rdiv; ring. Qed. Lemma derivable_pt_lim_mult : forall f1 f2 (x l1 l2:R), derivable_pt_lim f1 x l1 -> derivable_pt_lim f2 x l2 -> derivable_pt_lim (f1 * f2) x (l1 * f2 x + f1 x * l2). Proof. intros. assert (H1 := derivable_pt_lim_D_in f1 (fun y:R => l1) x). elim H1; intros. assert (H4 := H3 H). assert (H5 := derivable_pt_lim_D_in f2 (fun y:R => l2) x). elim H5; intros. assert (H8 := H7 H0). clear H1 H2 H3 H5 H6 H7. assert (H1 := derivable_pt_lim_D_in (f1 * f2)%F (fun y:R => l1 * f2 x + f1 x * l2) x). elim H1; intros. clear H1 H3. apply H2. unfold mult_fct. apply (Dmult no_cond (fun y:R => l1) (fun y:R => l2) f1 f2 x); assumption. Qed. Lemma derivable_pt_lim_const : forall a x:R, derivable_pt_lim (fct_cte a) x 0. Proof. intros; unfold fct_cte, derivable_pt_lim. intros; exists (mkposreal 1 Rlt_0_1); intros; unfold Rminus; rewrite Rplus_opp_r; unfold Rdiv; rewrite Rmult_0_l; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. Qed. Lemma derivable_pt_lim_scal : forall f (a x l:R), derivable_pt_lim f x l -> derivable_pt_lim (mult_real_fct a f) x (a * l). Proof. intros. assert (H0 := derivable_pt_lim_const a x). replace (mult_real_fct a f) with (fct_cte a * f)%F. replace (a * l) with (0 * f x + a * l); [ idtac | ring ]. apply (derivable_pt_lim_mult (fct_cte a) f x 0 l); assumption. unfold mult_real_fct, mult_fct, fct_cte; reflexivity. Qed. Lemma derivable_pt_lim_id : forall x:R, derivable_pt_lim id x 1. Proof. intro; unfold derivable_pt_lim. intros eps Heps; exists (mkposreal eps Heps); intros h H1 H2; unfold id; replace ((x + h - x) / h - 1) with 0. rewrite Rabs_R0; apply Rle_lt_trans with (Rabs h). apply Rabs_pos. assumption. unfold Rminus; rewrite Rplus_assoc; rewrite (Rplus_comm x); rewrite Rplus_assoc. rewrite Rplus_opp_l; rewrite Rplus_0_r; unfold Rdiv; rewrite <- Rinv_r_sym. symmetry ; apply Rplus_opp_r. assumption. Qed. Lemma derivable_pt_lim_Rsqr : forall x:R, derivable_pt_lim Rsqr x (2 * x). Proof. intro; unfold derivable_pt_lim. unfold Rsqr; intros eps Heps; exists (mkposreal eps Heps); intros h H1 H2; replace (((x + h) * (x + h) - x * x) / h - 2 * x) with h. assumption. replace ((x + h) * (x + h) - x * x) with (2 * x * h + h * h); [ idtac | ring ]. unfold Rdiv; rewrite Rmult_plus_distr_r. repeat rewrite Rmult_assoc. repeat rewrite <- Rinv_r_sym; [ idtac | assumption ]. ring. Qed. Lemma derivable_pt_lim_comp : forall f1 f2 (x l1 l2:R), derivable_pt_lim f1 x l1 -> derivable_pt_lim f2 (f1 x) l2 -> derivable_pt_lim (f2 o f1) x (l2 * l1). Proof. intros; assert (H1 := derivable_pt_lim_D_in f1 (fun y:R => l1) x). elim H1; intros. assert (H4 := H3 H). assert (H5 := derivable_pt_lim_D_in f2 (fun y:R => l2) (f1 x)). elim H5; intros. assert (H8 := H7 H0). clear H1 H2 H3 H5 H6 H7. assert (H1 := derivable_pt_lim_D_in (f2 o f1)%F (fun y:R => l2 * l1) x). elim H1; intros. clear H1 H3; apply H2. unfold comp; cut (D_in (fun x0:R => f2 (f1 x0)) (fun y:R => l2 * l1) (Dgf no_cond no_cond f1) x -> D_in (fun x0:R => f2 (f1 x0)) (fun y:R => l2 * l1) no_cond x). intro; apply H1. rewrite Rmult_comm; apply (Dcomp no_cond no_cond (fun y:R => l1) (fun y:R => l2) f1 f2 x); assumption. unfold Dgf, D_in, no_cond; unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. elim (H1 eps H3); intros. exists x0; intros; split. elim H5; intros; assumption. intros; elim H5; intros; apply H9; split. unfold D_x; split. split; trivial. elim H6; intros; unfold D_x in H10; elim H10; intros; assumption. elim H6; intros; assumption. Qed. Lemma derivable_pt_plus : forall f1 f2 (x:R), derivable_pt f1 x -> derivable_pt f2 x -> derivable_pt (f1 + f2) x. Proof. unfold derivable_pt; intros f1 f2 x X X0. elim X; intros. elim X0; intros. exists (x0 + x1). apply derivable_pt_lim_plus; assumption. Qed. Lemma derivable_pt_opp : forall f (x:R), derivable_pt f x -> derivable_pt (- f) x. Proof. unfold derivable_pt; intros f x X. elim X; intros. exists (- x0). apply derivable_pt_lim_opp; assumption. Qed. Lemma derivable_pt_minus : forall f1 f2 (x:R), derivable_pt f1 x -> derivable_pt f2 x -> derivable_pt (f1 - f2) x. Proof. unfold derivable_pt; intros f1 f2 x X X0. elim X; intros. elim X0; intros. exists (x0 - x1). apply derivable_pt_lim_minus; assumption. Qed. Lemma derivable_pt_mult : forall f1 f2 (x:R), derivable_pt f1 x -> derivable_pt f2 x -> derivable_pt (f1 * f2) x. Proof. unfold derivable_pt; intros f1 f2 x X X0. elim X; intros. elim X0; intros. exists (x0 * f2 x + f1 x * x1). apply derivable_pt_lim_mult; assumption. Qed. Lemma derivable_pt_const : forall a x:R, derivable_pt (fct_cte a) x. Proof. intros; unfold derivable_pt. exists 0. apply derivable_pt_lim_const. Qed. Lemma derivable_pt_scal : forall f (a x:R), derivable_pt f x -> derivable_pt (mult_real_fct a f) x. Proof. unfold derivable_pt; intros f1 a x X. elim X; intros. exists (a * x0). apply derivable_pt_lim_scal; assumption. Qed. Lemma derivable_pt_id : forall x:R, derivable_pt id x. Proof. unfold derivable_pt; intro. exists 1. apply derivable_pt_lim_id. Qed. Lemma derivable_pt_Rsqr : forall x:R, derivable_pt Rsqr x. Proof. unfold derivable_pt; intro; exists (2 * x). apply derivable_pt_lim_Rsqr. Qed. Lemma derivable_pt_comp : forall f1 f2 (x:R), derivable_pt f1 x -> derivable_pt f2 (f1 x) -> derivable_pt (f2 o f1) x. Proof. unfold derivable_pt; intros f1 f2 x X X0. elim X; intros. elim X0; intros. exists (x1 * x0). apply derivable_pt_lim_comp; assumption. Qed. Lemma derivable_plus : forall f1 f2, derivable f1 -> derivable f2 -> derivable (f1 + f2). Proof. unfold derivable; intros f1 f2 X X0 x. apply (derivable_pt_plus _ _ x (X _) (X0 _)). Qed. Lemma derivable_opp : forall f, derivable f -> derivable (- f). Proof. unfold derivable; intros f X x. apply (derivable_pt_opp _ x (X _)). Qed. Lemma derivable_minus : forall f1 f2, derivable f1 -> derivable f2 -> derivable (f1 - f2). Proof. unfold derivable; intros f1 f2 X X0 x. apply (derivable_pt_minus _ _ x (X _) (X0 _)). Qed. Lemma derivable_mult : forall f1 f2, derivable f1 -> derivable f2 -> derivable (f1 * f2). Proof. unfold derivable; intros f1 f2 X X0 x. apply (derivable_pt_mult _ _ x (X _) (X0 _)). Qed. Lemma derivable_const : forall a:R, derivable (fct_cte a). Proof. unfold derivable; intros. apply derivable_pt_const. Qed. Lemma derivable_scal : forall f (a:R), derivable f -> derivable (mult_real_fct a f). Proof. unfold derivable; intros f a X x. apply (derivable_pt_scal _ a x (X _)). Qed. Lemma derivable_id : derivable id. Proof. unfold derivable; intro; apply derivable_pt_id. Qed. Lemma derivable_Rsqr : derivable Rsqr. Proof. unfold derivable; intro; apply derivable_pt_Rsqr. Qed. Lemma derivable_comp : forall f1 f2, derivable f1 -> derivable f2 -> derivable (f2 o f1). Proof. unfold derivable; intros f1 f2 X X0 x. apply (derivable_pt_comp _ _ x (X _) (X0 _)). Qed. Lemma derive_pt_plus : forall f1 f2 (x:R) (pr1:derivable_pt f1 x) (pr2:derivable_pt f2 x), derive_pt (f1 + f2) x (derivable_pt_plus _ _ _ pr1 pr2) = derive_pt f1 x pr1 + derive_pt f2 x pr2. Proof. intros. assert (H := derivable_derive f1 x pr1). assert (H0 := derivable_derive f2 x pr2). assert (H1 := derivable_derive (f1 + f2)%F x (derivable_pt_plus _ _ _ pr1 pr2)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. elim H1; clear H1; intros l H1. rewrite H; rewrite H0; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. assert (H4 := proj2_sig pr2). unfold derive_pt in H0; rewrite H0 in H4. apply derivable_pt_lim_plus; assumption. Qed. Lemma derive_pt_opp : forall f (x:R) (pr1:derivable_pt f x), derive_pt (- f) x (derivable_pt_opp _ _ pr1) = - derive_pt f x pr1. Proof. intros. assert (H := derivable_derive f x pr1). assert (H0 := derivable_derive (- f)%F x (derivable_pt_opp _ _ pr1)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. rewrite H; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. apply derivable_pt_lim_opp; assumption. Qed. Lemma derive_pt_minus : forall f1 f2 (x:R) (pr1:derivable_pt f1 x) (pr2:derivable_pt f2 x), derive_pt (f1 - f2) x (derivable_pt_minus _ _ _ pr1 pr2) = derive_pt f1 x pr1 - derive_pt f2 x pr2. Proof. intros. assert (H := derivable_derive f1 x pr1). assert (H0 := derivable_derive f2 x pr2). assert (H1 := derivable_derive (f1 - f2)%F x (derivable_pt_minus _ _ _ pr1 pr2)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. elim H1; clear H1; intros l H1. rewrite H; rewrite H0; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. assert (H4 := proj2_sig pr2). unfold derive_pt in H0; rewrite H0 in H4. apply derivable_pt_lim_minus; assumption. Qed. Lemma derive_pt_mult : forall f1 f2 (x:R) (pr1:derivable_pt f1 x) (pr2:derivable_pt f2 x), derive_pt (f1 * f2) x (derivable_pt_mult _ _ _ pr1 pr2) = derive_pt f1 x pr1 * f2 x + f1 x * derive_pt f2 x pr2. Proof. intros. assert (H := derivable_derive f1 x pr1). assert (H0 := derivable_derive f2 x pr2). assert (H1 := derivable_derive (f1 * f2)%F x (derivable_pt_mult _ _ _ pr1 pr2)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. elim H1; clear H1; intros l H1. rewrite H; rewrite H0; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. assert (H4 := proj2_sig pr2). unfold derive_pt in H0; rewrite H0 in H4. apply derivable_pt_lim_mult; assumption. Qed. Lemma derive_pt_const : forall a x:R, derive_pt (fct_cte a) x (derivable_pt_const a x) = 0. Proof. intros. apply derive_pt_eq_0. apply derivable_pt_lim_const. Qed. Lemma derive_pt_scal : forall f (a x:R) (pr:derivable_pt f x), derive_pt (mult_real_fct a f) x (derivable_pt_scal _ _ _ pr) = a * derive_pt f x pr. Proof. intros. assert (H := derivable_derive f x pr). assert (H0 := derivable_derive (mult_real_fct a f) x (derivable_pt_scal _ _ _ pr)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. rewrite H; apply derive_pt_eq_0. assert (H3 := proj2_sig pr). unfold derive_pt in H; rewrite H in H3. apply derivable_pt_lim_scal; assumption. Qed. Lemma derive_pt_id : forall x:R, derive_pt id x (derivable_pt_id _) = 1. Proof. intros. apply derive_pt_eq_0. apply derivable_pt_lim_id. Qed. Lemma derive_pt_Rsqr : forall x:R, derive_pt Rsqr x (derivable_pt_Rsqr _) = 2 * x. Proof. intros. apply derive_pt_eq_0. apply derivable_pt_lim_Rsqr. Qed. Lemma derive_pt_comp : forall f1 f2 (x:R) (pr1:derivable_pt f1 x) (pr2:derivable_pt f2 (f1 x)), derive_pt (f2 o f1) x (derivable_pt_comp _ _ _ pr1 pr2) = derive_pt f2 (f1 x) pr2 * derive_pt f1 x pr1. Proof. intros. assert (H := derivable_derive f1 x pr1). assert (H0 := derivable_derive f2 (f1 x) pr2). assert (H1 := derivable_derive (f2 o f1)%F x (derivable_pt_comp _ _ _ pr1 pr2)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. elim H1; clear H1; intros l H1. rewrite H; rewrite H0; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. assert (H4 := proj2_sig pr2). unfold derive_pt in H0; rewrite H0 in H4. apply derivable_pt_lim_comp; assumption. Qed. (* Pow *) Definition pow_fct (n:nat) (y:R) : R := y ^ n. Lemma derivable_pt_lim_pow_pos : forall (x:R) (n:nat), (0 < n)%nat -> derivable_pt_lim (fun y:R => y ^ n) x (INR n * x ^ pred n). Proof. intros. induction n as [| n Hrecn]. elim (lt_irrefl _ H). cut (n = 0%nat \/ (0 < n)%nat). intro; elim H0; intro. rewrite H1; simpl. replace (fun y:R => y * 1) with (id * fct_cte 1)%F. replace (1 * 1) with (1 * fct_cte 1 x + id x * 0). apply derivable_pt_lim_mult. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte, id; ring. reflexivity. replace (fun y:R => y ^ S n) with (fun y:R => y * y ^ n). replace (pred (S n)) with n; [ idtac | reflexivity ]. replace (fun y:R => y * y ^ n) with (id * (fun y:R => y ^ n))%F. set (f := fun y:R => y ^ n). replace (INR (S n) * x ^ n) with (1 * f x + id x * (INR n * x ^ pred n)). apply derivable_pt_lim_mult. apply derivable_pt_lim_id. unfold f; apply Hrecn; assumption. unfold f. pattern n at 1 5; replace n with (S (pred n)). unfold id; rewrite S_INR; simpl. ring. symmetry ; apply S_pred with 0%nat; assumption. unfold mult_fct, id; reflexivity. reflexivity. inversion H. left; reflexivity. right. apply lt_le_trans with 1%nat. apply lt_O_Sn. assumption. Qed. Lemma derivable_pt_lim_pow : forall (x:R) (n:nat), derivable_pt_lim (fun y:R => y ^ n) x (INR n * x ^ pred n). Proof. intros. induction n as [| n Hrecn]. simpl. rewrite Rmult_0_l. replace (fun _:R => 1) with (fct_cte 1); [ apply derivable_pt_lim_const | reflexivity ]. apply derivable_pt_lim_pow_pos. apply lt_O_Sn. Qed. Lemma derivable_pt_pow : forall (n:nat) (x:R), derivable_pt (fun y:R => y ^ n) x. Proof. intros; unfold derivable_pt. exists (INR n * x ^ pred n). apply derivable_pt_lim_pow. Qed. Lemma derivable_pow : forall n:nat, derivable (fun y:R => y ^ n). Proof. intro; unfold derivable; intro; apply derivable_pt_pow. Qed. Lemma derive_pt_pow : forall (n:nat) (x:R), derive_pt (fun y:R => y ^ n) x (derivable_pt_pow n x) = INR n * x ^ pred n. Proof. intros; apply derive_pt_eq_0. apply derivable_pt_lim_pow. Qed. Lemma pr_nu : forall f (x:R) (pr1 pr2:derivable_pt f x), derive_pt f x pr1 = derive_pt f x pr2. Proof. intros. unfold derivable_pt in pr1. unfold derivable_pt in pr2. elim pr1; intros. elim pr2; intros. unfold derivable_pt_abs in p. unfold derivable_pt_abs in p0. simpl. apply (uniqueness_limite f x x0 x1 p p0). Qed. (************************************************************) (** * Local extremum's condition *) (************************************************************) Theorem deriv_maximum : forall f (a b c:R) (pr:derivable_pt f c), a < c -> c < b -> (forall x:R, a < x -> x < b -> f x <= f c) -> derive_pt f c pr = 0. Proof. intros; case (Rtotal_order 0 (derive_pt f c pr)); intro. assert (H3 := derivable_derive f c pr). elim H3; intros l H4; rewrite H4 in H2. assert (H5 := derive_pt_eq_1 f c l pr H4). cut (0 < l / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H5 (l / 2) H6); intros delta H7. cut (0 < (b - c) / 2). intro; cut (Rmin (delta / 2) ((b - c) / 2) <> 0). intro; cut (Rabs (Rmin (delta / 2) ((b - c) / 2)) < delta). intro. assert (H11 := H7 (Rmin (delta / 2) ((b - c) / 2)) H9 H10). cut (0 < Rmin (delta / 2) ((b - c) / 2)). intro; cut (a < c + Rmin (delta / 2) ((b - c) / 2)). intro; cut (c + Rmin (delta / 2) ((b - c) / 2) < b). intro; assert (H15 := H1 (c + Rmin (delta / 2) ((b - c) / 2)) H13 H14). cut ((f (c + Rmin (delta / 2) ((b - c) / 2)) - f c) / Rmin (delta / 2) ((b - c) / 2) <= 0). intro; cut (- l < 0). intro; unfold Rminus in H11. cut ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l < 0). intro; cut (Rabs ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l) < l / 2). unfold Rabs; case (Rcase_abs ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l)); intro. replace (- ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l)) with (l + - ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2))). intro; generalize (Rplus_lt_compat_l (- l) (l + - ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2))) (l / 2) H19); repeat rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; replace (- l + l / 2) with (- (l / 2)). intro; generalize (Ropp_lt_gt_contravar (- ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2))) (- (l / 2)) H20); repeat rewrite Ropp_involutive; intro; generalize (Rlt_trans 0 (l / 2) ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2)) H6 H21); intro; elim (Rlt_irrefl 0 (Rlt_le_trans 0 ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2)) 0 H22 H16)). pattern l at 2; rewrite double_var. ring. ring. intro. assert (H20 := Rge_le ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l) 0 r). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H20 H18)). assumption. rewrite <- Ropp_0; replace ((f (c + Rmin (delta / 2) ((b + - c) / 2)) + - f c) / Rmin (delta / 2) ((b + - c) / 2) + - l) with (- (l + - ((f (c + Rmin (delta / 2) ((b + - c) / 2)) - f c) / Rmin (delta / 2) ((b + - c) / 2)))). apply Ropp_gt_lt_contravar; change (0 < l + - ((f (c + Rmin (delta / 2) ((b + - c) / 2)) - f c) / Rmin (delta / 2) ((b + - c) / 2))); apply Rplus_lt_le_0_compat; [ assumption | rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; assumption ]. unfold Rminus; ring. rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. replace ((f (c + Rmin (delta / 2) ((b - c) / 2)) - f c) / Rmin (delta / 2) ((b - c) / 2)) with (- ((f c - f (c + Rmin (delta / 2) ((b - c) / 2))) / Rmin (delta / 2) ((b - c) / 2))). rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; unfold Rdiv; apply Rmult_le_pos; [ generalize (Rplus_le_compat_r (- f (c + Rmin (delta * / 2) ((b - c) * / 2))) (f (c + Rmin (delta * / 2) ((b - c) * / 2))) ( f c) H15); rewrite Rplus_opp_r; intro; assumption | left; apply Rinv_0_lt_compat; assumption ]. unfold Rdiv. rewrite <- Ropp_mult_distr_l_reverse. repeat rewrite <- (Rmult_comm (/ Rmin (delta * / 2) ((b - c) * / 2))). apply Rmult_eq_reg_l with (Rmin (delta * / 2) ((b - c) * / 2)). repeat rewrite <- Rmult_assoc. rewrite <- Rinv_r_sym. repeat rewrite Rmult_1_l. ring. red; intro. unfold Rdiv in H12; rewrite H16 in H12; elim (Rlt_irrefl 0 H12). red; intro. unfold Rdiv in H12; rewrite H16 in H12; elim (Rlt_irrefl 0 H12). assert (H14 := Rmin_r (delta / 2) ((b - c) / 2)). assert (H15 := Rplus_le_compat_l c (Rmin (delta / 2) ((b - c) / 2)) ((b - c) / 2) H14). apply Rle_lt_trans with (c + (b - c) / 2). assumption. apply Rmult_lt_reg_l with 2. prove_sup0. replace (2 * (c + (b - c) / 2)) with (c + b). replace (2 * b) with (b + b). apply Rplus_lt_compat_r; assumption. ring. unfold Rdiv; rewrite Rmult_plus_distr_l. repeat rewrite (Rmult_comm 2). rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r. ring. discrR. apply Rlt_trans with c. assumption. pattern c at 1; rewrite <- (Rplus_0_r c); apply Rplus_lt_compat_l; assumption. cut (0 < delta / 2). intro; apply (Rmin_stable_in_posreal (mkposreal (delta / 2) H12) (mkposreal ((b - c) / 2) H8)). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. unfold Rabs; case (Rcase_abs (Rmin (delta / 2) ((b - c) / 2))). intro. cut (0 < delta / 2). intro. generalize (Rmin_stable_in_posreal (mkposreal (delta / 2) H10) (mkposreal ((b - c) / 2) H8)); simpl; intro; elim (Rlt_irrefl 0 (Rlt_trans 0 (Rmin (delta / 2) ((b - c) / 2)) 0 H11 r)). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. intro; apply Rle_lt_trans with (delta / 2). apply Rmin_l. unfold Rdiv; apply Rmult_lt_reg_l with 2. prove_sup0. rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l. replace (2 * delta) with (delta + delta). pattern delta at 2; rewrite <- (Rplus_0_r delta); apply Rplus_lt_compat_l. rewrite Rplus_0_r; apply (cond_pos delta). symmetry ; apply double. discrR. cut (0 < delta / 2). intro; generalize (Rmin_stable_in_posreal (mkposreal (delta / 2) H9) (mkposreal ((b - c) / 2) H8)); simpl; intro; red; intro; rewrite H11 in H10; elim (Rlt_irrefl 0 H10). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. unfold Rdiv; apply Rmult_lt_0_compat. generalize (Rplus_lt_compat_r (- c) c b H0); rewrite Rplus_opp_r; intro; assumption. apply Rinv_0_lt_compat; prove_sup0. elim H2; intro. symmetry ; assumption. generalize (derivable_derive f c pr); intro; elim H4; intros l H5. rewrite H5 in H3; generalize (derive_pt_eq_1 f c l pr H5); intro; cut (0 < - (l / 2)). intro; elim (H6 (- (l / 2)) H7); intros delta H9. cut (0 < (c - a) / 2). intro; cut (Rmax (- (delta / 2)) ((a - c) / 2) < 0). intro; cut (Rmax (- (delta / 2)) ((a - c) / 2) <> 0). intro; cut (Rabs (Rmax (- (delta / 2)) ((a - c) / 2)) < delta). intro; generalize (H9 (Rmax (- (delta / 2)) ((a - c) / 2)) H11 H12); intro; cut (a < c + Rmax (- (delta / 2)) ((a - c) / 2)). cut (c + Rmax (- (delta / 2)) ((a - c) / 2) < b). intros; generalize (H1 (c + Rmax (- (delta / 2)) ((a - c) / 2)) H15 H14); intro; cut (0 <= (f (c + Rmax (- (delta / 2)) ((a - c) / 2)) - f c) / Rmax (- (delta / 2)) ((a - c) / 2)). intro; cut (0 < - l). intro; unfold Rminus in H13; cut (0 < (f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2) + - l). intro; cut (Rabs ((f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2) + - l) < - (l / 2)). unfold Rabs; case (Rcase_abs ((f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2) + - l)). intro; elim (Rlt_irrefl 0 (Rlt_trans 0 ((f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2) + - l) 0 H19 r)). intros; generalize (Rplus_lt_compat_r l ((f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2) + - l) ( - (l / 2)) H20); repeat rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; replace (- (l / 2) + l) with (l / 2). cut (l / 2 < 0). intros; generalize (Rlt_trans ((f (c + Rmax (- (delta / 2)) ((a + - c) / 2)) + - f c) / Rmax (- (delta / 2)) ((a + - c) / 2)) (l / 2) 0 H22 H21); intro; elim (Rlt_irrefl 0 (Rle_lt_trans 0 ((f (c + Rmax (- (delta / 2)) ((a - c) / 2)) - f c) / Rmax (- (delta / 2)) ((a - c) / 2)) 0 H17 H23)). rewrite <- (Ropp_involutive (l / 2)); rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. pattern l at 3; rewrite double_var. ring. assumption. apply Rplus_le_lt_0_compat; assumption. rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. unfold Rdiv; replace ((f (c + Rmax (- (delta * / 2)) ((a - c) * / 2)) - f c) * / Rmax (- (delta * / 2)) ((a - c) * / 2)) with (- (f (c + Rmax (- (delta * / 2)) ((a - c) * / 2)) - f c) * / - Rmax (- (delta * / 2)) ((a - c) * / 2)). apply Rmult_le_pos. generalize (Rplus_le_compat_l (- f (c + Rmax (- (delta * / 2)) ((a - c) * / 2))) (f (c + Rmax (- (delta * / 2)) ((a - c) * / 2))) ( f c) H16); rewrite Rplus_opp_l; replace (- (f (c + Rmax (- (delta * / 2)) ((a - c) * / 2)) - f c)) with (- f (c + Rmax (- (delta * / 2)) ((a - c) * / 2)) + f c). intro; assumption. ring. left; apply Rinv_0_lt_compat; rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. unfold Rdiv. rewrite <- Ropp_inv_permute. rewrite Rmult_opp_opp. reflexivity. unfold Rdiv in H11; assumption. generalize (Rplus_lt_compat_l c (Rmax (- (delta / 2)) ((a - c) / 2)) 0 H10); rewrite Rplus_0_r; intro; apply Rlt_trans with c; assumption. generalize (RmaxLess2 (- (delta / 2)) ((a - c) / 2)); intro; generalize (Rplus_le_compat_l c ((a - c) / 2) (Rmax (- (delta / 2)) ((a - c) / 2)) H14); intro; apply Rlt_le_trans with (c + (a - c) / 2). apply Rmult_lt_reg_l with 2. prove_sup0. replace (2 * (c + (a - c) / 2)) with (a + c). rewrite double. apply Rplus_lt_compat_l; assumption. field; discrR. assumption. unfold Rabs; case (Rcase_abs (Rmax (- (delta / 2)) ((a - c) / 2))). intro; generalize (RmaxLess1 (- (delta / 2)) ((a - c) / 2)); intro; generalize (Ropp_le_ge_contravar (- (delta / 2)) (Rmax (- (delta / 2)) ((a - c) / 2)) H12); rewrite Ropp_involutive; intro; generalize (Rge_le (delta / 2) (- Rmax (- (delta / 2)) ((a - c) / 2)) H13); intro; apply Rle_lt_trans with (delta / 2). assumption. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite double. pattern delta at 2; rewrite <- (Rplus_0_r delta); apply Rplus_lt_compat_l; rewrite Rplus_0_r; apply (cond_pos delta). discrR. cut (- (delta / 2) < 0). cut ((a - c) / 2 < 0). intros; generalize (Rmax_stable_in_negreal (mknegreal (- (delta / 2)) H13) (mknegreal ((a - c) / 2) H12)); simpl; intro; generalize (Rge_le (Rmax (- (delta / 2)) ((a - c) / 2)) 0 r); intro; elim (Rlt_irrefl 0 (Rle_lt_trans 0 (Rmax (- (delta / 2)) ((a - c) / 2)) 0 H15 H14)). rewrite <- Ropp_0; rewrite <- (Ropp_involutive ((a - c) / 2)); apply Ropp_lt_gt_contravar; replace (- ((a - c) / 2)) with ((c - a) / 2). assumption. unfold Rdiv. rewrite <- Ropp_mult_distr_l_reverse. rewrite (Ropp_minus_distr a c). reflexivity. rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | assert (Hyp : 0 < 2); [ prove_sup0 | apply (Rinv_0_lt_compat 2 Hyp) ] ]. red; intro; rewrite H11 in H10; elim (Rlt_irrefl 0 H10). cut ((a - c) / 2 < 0). intro; cut (- (delta / 2) < 0). intro; apply (Rmax_stable_in_negreal (mknegreal (- (delta / 2)) H11) (mknegreal ((a - c) / 2) H10)). rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | assert (Hyp : 0 < 2); [ prove_sup0 | apply (Rinv_0_lt_compat 2 Hyp) ] ]. rewrite <- Ropp_0; rewrite <- (Ropp_involutive ((a - c) / 2)); apply Ropp_lt_gt_contravar; replace (- ((a - c) / 2)) with ((c - a) / 2). assumption. unfold Rdiv. rewrite <- Ropp_mult_distr_l_reverse. rewrite (Ropp_minus_distr a c). reflexivity. unfold Rdiv; apply Rmult_lt_0_compat; [ generalize (Rplus_lt_compat_r (- a) a c H); rewrite Rplus_opp_r; intro; assumption | assert (Hyp : 0 < 2); [ prove_sup0 | apply (Rinv_0_lt_compat 2 Hyp) ] ]. replace (- (l / 2)) with (- l / 2). unfold Rdiv; apply Rmult_lt_0_compat. rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. assert (Hyp : 0 < 2); [ prove_sup0 | apply (Rinv_0_lt_compat 2 Hyp) ]. unfold Rdiv; apply Ropp_mult_distr_l_reverse. Qed. Theorem deriv_minimum : forall f (a b c:R) (pr:derivable_pt f c), a < c -> c < b -> (forall x:R, a < x -> x < b -> f c <= f x) -> derive_pt f c pr = 0. Proof. intros. rewrite <- (Ropp_involutive (derive_pt f c pr)). apply Ropp_eq_0_compat. rewrite <- (derive_pt_opp f c pr). cut (forall x:R, a < x -> x < b -> (- f)%F x <= (- f)%F c). intro. apply (deriv_maximum (- f)%F a b c (derivable_pt_opp _ _ pr) H H0 H2). intros; unfold opp_fct; apply Ropp_ge_le_contravar; apply Rle_ge. apply (H1 x H2 H3). Qed. Theorem deriv_constant2 : forall f (a b c:R) (pr:derivable_pt f c), a < c -> c < b -> (forall x:R, a < x -> x < b -> f x = f c) -> derive_pt f c pr = 0. Proof. intros. eapply deriv_maximum with a b; try assumption. intros; right; apply (H1 x H2 H3). Qed. (**********) Lemma nonneg_derivative_0 : forall f (pr:derivable f), increasing f -> forall x:R, 0 <= derive_pt f x (pr x). Proof. intros; unfold increasing in H. assert (H0 := derivable_derive f x (pr x)). elim H0; intros l H1. rewrite H1; case (Rtotal_order 0 l); intro. left; assumption. elim H2; intro. right; assumption. assert (H4 := derive_pt_eq_1 f x l (pr x) H1). cut (0 < - (l / 2)). intro; elim (H4 (- (l / 2)) H5); intros delta H6. cut (delta / 2 <> 0 /\ 0 < delta / 2 /\ Rabs (delta / 2) < delta). intro; decompose [and] H7; intros; generalize (H6 (delta / 2) H8 H11); cut (0 <= (f (x + delta / 2) - f x) / (delta / 2)). intro; cut (0 <= (f (x + delta / 2) - f x) / (delta / 2) - l). intro; unfold Rabs; case (Rcase_abs ((f (x + delta / 2) - f x) / (delta / 2) - l)). intro; elim (Rlt_irrefl 0 (Rle_lt_trans 0 ((f (x + delta / 2) - f x) / (delta / 2) - l) 0 H12 r)). intros; generalize (Rplus_lt_compat_r l ((f (x + delta / 2) - f x) / (delta / 2) - l) (- (l / 2)) H13); unfold Rminus; replace (- (l / 2) + l) with (l / 2). rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; intro; generalize (Rle_lt_trans 0 ((f (x + delta / 2) - f x) / (delta / 2)) (l / 2) H9 H14); intro; cut (l / 2 < 0). intro; elim (Rlt_irrefl 0 (Rlt_trans 0 (l / 2) 0 H15 H16)). rewrite <- Ropp_0 in H5; generalize (Ropp_lt_gt_contravar (-0) (- (l / 2)) H5); repeat rewrite Ropp_involutive; intro; assumption. pattern l at 3; rewrite double_var. ring. unfold Rminus; apply Rplus_le_le_0_compat. unfold Rdiv; apply Rmult_le_pos. cut (x <= x + delta * / 2). intro; generalize (H x (x + delta * / 2) H12); intro; generalize (Rplus_le_compat_l (- f x) (f x) (f (x + delta * / 2)) H13); rewrite Rplus_opp_l; rewrite Rplus_comm; intro; assumption. pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; left; assumption. left; apply Rinv_0_lt_compat; assumption. left; rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; assumption. unfold Rdiv; apply Rmult_le_pos. cut (x <= x + delta * / 2). intro; generalize (H x (x + delta * / 2) H9); intro; generalize (Rplus_le_compat_l (- f x) (f x) (f (x + delta * / 2)) H12); rewrite Rplus_opp_l; rewrite Rplus_comm; intro; assumption. pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; left; assumption. left; apply Rinv_0_lt_compat; assumption. split. unfold Rdiv; apply prod_neq_R0. generalize (cond_pos delta); intro; red; intro H9; rewrite H9 in H7; elim (Rlt_irrefl 0 H7). apply Rinv_neq_0_compat; discrR. split. unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. replace (Rabs (delta / 2)) with (delta / 2). unfold Rdiv; apply Rmult_lt_reg_l with 2. prove_sup0. rewrite (Rmult_comm 2). rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]. rewrite Rmult_1_r. rewrite double. pattern (pos delta) at 1; rewrite <- Rplus_0_r. apply Rplus_lt_compat_l; apply (cond_pos delta). symmetry ; apply Rabs_right. left; change (0 < delta / 2); unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. unfold Rdiv; rewrite <- Ropp_mult_distr_l_reverse; apply Rmult_lt_0_compat. apply Rplus_lt_reg_r with l. unfold Rminus; rewrite Rplus_opp_r; rewrite Rplus_0_r; assumption. apply Rinv_0_lt_compat; prove_sup0. Qed. coq-8.4pl2/theories/Reals/DiscrR.v0000640000175000001440000000451612010532755016102 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* IZR z1 = IZR z2. intros; rewrite H; reflexivity. Qed. Lemma IZR_neq : forall z1 z2:Z, z1 <> z2 -> IZR z1 <> IZR z2. intros; red; intro; elim H; apply eq_IZR; assumption. Qed. Ltac discrR := try match goal with | |- (?X1 <> ?X2) => change 2 with (IZR 2); change 1 with (IZR 1); change 0 with (IZR 0); repeat rewrite <- plus_IZR || rewrite <- mult_IZR || rewrite <- Ropp_Ropp_IZR || rewrite Z_R_minus; apply IZR_neq; try discriminate end. Ltac prove_sup0 := match goal with | |- (0 < 1) => apply Rlt_0_1 | |- (0 < ?X1) => repeat (apply Rmult_lt_0_compat || apply Rplus_lt_pos; try apply Rlt_0_1 || apply Rlt_R0_R2) | |- (?X1 > 0) => change (0 < X1); prove_sup0 end. Ltac omega_sup := change 2 with (IZR 2); change 1 with (IZR 1); change 0 with (IZR 0); repeat rewrite <- plus_IZR || rewrite <- mult_IZR || rewrite <- Ropp_Ropp_IZR || rewrite Z_R_minus; apply IZR_lt; omega. Ltac prove_sup := match goal with | |- (?X1 > ?X2) => change (X2 < X1); prove_sup | |- (0 < ?X1) => prove_sup0 | |- (- ?X1 < 0) => rewrite <- Ropp_0; prove_sup | |- (- ?X1 < - ?X2) => apply Ropp_lt_gt_contravar; prove_sup | |- (- ?X1 < ?X2) => apply Rlt_trans with 0; prove_sup | |- (?X1 < ?X2) => omega_sup | _ => idtac end. Ltac Rcompute := change 2 with (IZR 2); change 1 with (IZR 1); change 0 with (IZR 0); repeat rewrite <- plus_IZR || rewrite <- mult_IZR || rewrite <- Ropp_Ropp_IZR || rewrite Z_R_minus; apply IZR_eq; try reflexivity. coq-8.4pl2/theories/Reals/Rprod.v0000640000175000001440000001456212010532755016004 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (N:nat) : R := match N with | O => f O | S p => prod_f_R0 f p * f (S p) end. Notation prod_f_SO := (fun An N => prod_f_R0 (fun n => An (S n)) N). (**********) Lemma prod_SO_split : forall (An:nat -> R) (n k:nat), (k < n)%nat -> prod_f_R0 An n = prod_f_R0 An k * prod_f_R0 (fun l:nat => An (k +1+l)%nat) (n - k -1). Proof. intros; induction n as [| n Hrecn]. absurd (k < 0)%nat; omega. cut (k = n \/ (k < n)%nat);[intro; elim H0; intro|omega]. replace (S n - k - 1)%nat with O; [rewrite H1; simpl|omega]. replace (n+1+0)%nat with (S n); ring. replace (S n - k-1)%nat with (S (n - k-1));[idtac|omega]. simpl; replace (k + S (n - k))%nat with (S n). replace (k + 1 + S (n - k - 1))%nat with (S n). rewrite Hrecn; [ ring | assumption ]. omega. omega. Qed. (**********) Lemma prod_SO_pos : forall (An:nat -> R) (N:nat), (forall n:nat, (n <= N)%nat -> 0 <= An n) -> 0 <= prod_f_R0 An N. Proof. intros; induction N as [| N HrecN]. simpl; apply H; trivial. simpl; apply Rmult_le_pos. apply HrecN; intros; apply H; apply le_trans with N; [ assumption | apply le_n_Sn ]. apply H; apply le_n. Qed. (**********) Lemma prod_SO_Rle : forall (An Bn:nat -> R) (N:nat), (forall n:nat, (n <= N)%nat -> 0 <= An n <= Bn n) -> prod_f_R0 An N <= prod_f_R0 Bn N. Proof. intros; induction N as [| N HrecN]. elim H with O; trivial. simpl; apply Rle_trans with (prod_f_R0 An N * Bn (S N)). apply Rmult_le_compat_l. apply prod_SO_pos; intros; elim (H n (le_trans _ _ _ H0 (le_n_Sn N))); intros; assumption. elim (H (S N) (le_n (S N))); intros; assumption. do 2 rewrite <- (Rmult_comm (Bn (S N))); apply Rmult_le_compat_l. elim (H (S N) (le_n (S N))); intros. apply Rle_trans with (An (S N)); assumption. apply HrecN; intros; elim (H n (le_trans _ _ _ H0 (le_n_Sn N))); intros; split; assumption. Qed. (** Application to factorial *) Lemma fact_prodSO : forall n:nat, INR (fact n) = prod_f_R0 (fun k:nat => (match (eq_nat_dec k 0) with | left _ => 1%R | right _ => INR k end)) n. Proof. intro; induction n as [| n Hrecn]. reflexivity. simpl; rewrite <- Hrecn. case n; auto with real. intros; repeat rewrite plus_INR;rewrite mult_INR;ring. Qed. Lemma le_n_2n : forall n:nat, (n <= 2 * n)%nat. Proof. simple induction n. replace (2 * 0)%nat with 0%nat; [ apply le_n | ring ]. intros; replace (2 * S n0)%nat with (S (S (2 * n0))). apply le_n_S; apply le_S; assumption. replace (S (S (2 * n0))) with (2 * n0 + 2)%nat; [ idtac | ring ]. replace (S n0) with (n0 + 1)%nat; [ idtac | ring ]. ring. Qed. (** We prove that (N!)^2<=(2N-k)!*k! forall k in [|O;2N|] *) Lemma RfactN_fact2N_factk : forall N k:nat, (k <= 2 * N)%nat -> Rsqr (INR (fact N)) <= INR (fact (2 * N - k)) * INR (fact k). Proof. assert (forall (n:nat), 0 <= (if eq_nat_dec n 0 then 1 else INR n)). intros; case (eq_nat_dec n 0); auto with real. assert (forall (n:nat), (0 < n)%nat -> (if eq_nat_dec n 0 then 1 else INR n) = INR n). intros n; case (eq_nat_dec n 0); auto with real. intros; absurd (0 < n)%nat; omega. intros; unfold Rsqr; repeat rewrite fact_prodSO. cut ((k=N)%nat \/ (k < N)%nat \/ (N < k)%nat). intro H2; elim H2; intro H3. rewrite H3; replace (2*N-N)%nat with N;[right; ring|omega]. case H3; intro; clear H2 H3. rewrite (prod_SO_split (fun l:nat => if eq_nat_dec l 0 then 1 else INR l) (2 * N - k) N). rewrite Rmult_assoc; apply Rmult_le_compat_l. apply prod_SO_pos; intros; auto. replace (2 * N - k - N-1)%nat with (N - k-1)%nat. rewrite Rmult_comm; rewrite (prod_SO_split (fun l:nat => if eq_nat_dec l 0 then 1 else INR l) N k). apply Rmult_le_compat_l. apply prod_SO_pos; intros; auto. apply prod_SO_Rle; intros; split; auto. rewrite H0. rewrite H0. apply le_INR; omega. omega. omega. assumption. omega. omega. rewrite <- (Rmult_comm (prod_f_R0 (fun l:nat => if eq_nat_dec l 0 then 1 else INR l) k)); rewrite (prod_SO_split (fun l:nat => if eq_nat_dec l 0 then 1 else INR l) k N). rewrite Rmult_assoc; apply Rmult_le_compat_l. apply prod_SO_pos; intros; auto. rewrite Rmult_comm; rewrite (prod_SO_split (fun l:nat => if eq_nat_dec l 0 then 1 else INR l) N (2 * N - k)). apply Rmult_le_compat_l. apply prod_SO_pos; intros; auto. replace (N - (2 * N - k)-1)%nat with (k - N-1)%nat. apply prod_SO_Rle; intros; split; auto. rewrite H0. rewrite H0. apply le_INR; omega. omega. omega. omega. omega. assumption. omega. Qed. (**********) Lemma INR_fact_lt_0 : forall n:nat, 0 < INR (fact n). Proof. intro; apply lt_INR_0; apply neq_O_lt; red; intro; elim (fact_neq_0 n); symmetry ; assumption. Qed. (** We have the following inequality : (C 2N k) <= (C 2N N) forall k in [|O;2N|] *) Lemma C_maj : forall N k:nat, (k <= 2 * N)%nat -> C (2 * N) k <= C (2 * N) N. Proof. intros; unfold C; unfold Rdiv; apply Rmult_le_compat_l. apply pos_INR. replace (2 * N - N)%nat with N. apply Rmult_le_reg_l with (INR (fact N) * INR (fact N)). apply Rmult_lt_0_compat; apply INR_fact_lt_0. rewrite <- Rinv_r_sym. rewrite Rmult_comm; apply Rmult_le_reg_l with (INR (fact k) * INR (fact (2 * N - k))). apply Rmult_lt_0_compat; apply INR_fact_lt_0. rewrite Rmult_1_r; rewrite <- mult_INR; rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite mult_INR; rewrite (Rmult_comm (INR (fact k))); replace (INR (fact N) * INR (fact N)) with (Rsqr (INR (fact N))). apply RfactN_fact2N_factk. assumption. reflexivity. rewrite mult_INR; apply prod_neq_R0; apply INR_fact_neq_0. apply prod_neq_R0; apply INR_fact_neq_0. omega. Qed. coq-8.4pl2/theories/Reals/intro.tex0000640000175000001440000000017207265050405016377 0ustar notinusers\section{Reals}\label{Reals} This library contains an axiomatization of real numbers. The main file is \texttt{Reals.v}. coq-8.4pl2/theories/Reals/Cos_plus.v0000640000175000001440000006253712010532755016512 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rabs (Reste1 x y N) <= Majxy x y (pred N). Proof. intros. set (C := Rmax 1 (Rmax (Rabs x) (Rabs y))). unfold Reste1. apply Rle_trans with (sum_f_R0 (fun k:nat => Rabs (sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k))) * x ^ (2 * S (l + k)) * ((-1) ^ (N - l) / INR (fact (2 * (N - l)))) * y ^ (2 * (N - l))) (pred (N - k)))) ( pred N)). apply (Rsum_abs (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k))) * x ^ (2 * S (l + k)) * ((-1) ^ (N - l) / INR (fact (2 * (N - l)))) * y ^ (2 * (N - l))) (pred (N - k))) (pred N)). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => Rabs ((-1) ^ S (l + k) / INR (fact (2 * S (l + k))) * x ^ (2 * S (l + k)) * ((-1) ^ (N - l) / INR (fact (2 * (N - l)))) * y ^ (2 * (N - l)))) (pred (N - k))) ( pred N)). apply sum_Rle. intros. apply (Rsum_abs (fun l:nat => (-1) ^ S (l + n) / INR (fact (2 * S (l + n))) * x ^ (2 * S (l + n)) * ((-1) ^ (N - l) / INR (fact (2 * (N - l)))) * y ^ (2 * (N - l))) (pred (N - n))). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (2 * S (l + k)) * fact (2 * (N - l))) * C ^ (2 * S (N + k))) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. unfold Rdiv; repeat rewrite Rabs_mult. do 2 rewrite pow_1_abs. do 2 rewrite Rmult_1_l. rewrite (Rabs_right (/ INR (fact (2 * S (n0 + n))))). rewrite (Rabs_right (/ INR (fact (2 * (N - n0))))). rewrite mult_INR. rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. rewrite <- Rmult_assoc. rewrite <- (Rmult_comm (/ INR (fact (2 * (N - n0))))). rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. do 2 rewrite <- RPow_abs. apply Rle_trans with (Rabs x ^ (2 * S (n0 + n)) * C ^ (2 * (N - n0))). apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. apply pow_incr. split. apply Rabs_pos. unfold C. apply Rle_trans with (Rmax (Rabs x) (Rabs y)); apply RmaxLess2. apply Rle_trans with (C ^ (2 * S (n0 + n)) * C ^ (2 * (N - n0))). do 2 rewrite <- (Rmult_comm (C ^ (2 * (N - n0)))). apply Rmult_le_compat_l. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. unfold C; apply RmaxLess1. apply pow_incr. split. apply Rabs_pos. unfold C; apply Rle_trans with (Rmax (Rabs x) (Rabs y)). apply RmaxLess1. apply RmaxLess2. right. replace (2 * S (N + n))%nat with (2 * (N - n0) + 2 * S (n0 + n))%nat. rewrite pow_add. apply Rmult_comm. apply INR_eq; rewrite plus_INR; do 3 rewrite mult_INR. rewrite minus_INR. repeat rewrite S_INR; do 2 rewrite plus_INR; ring. apply le_trans with (pred (N - n)). exact H1. apply le_S_n. replace (S (pred (N - n))) with (N - n)%nat. apply le_trans with N. apply (fun p n m:nat => plus_le_reg_l n m p) with n. rewrite <- le_plus_minus. apply le_plus_r. apply le_trans with (pred N). assumption. apply le_pred_n. apply le_n_Sn. apply S_pred with 0%nat. apply plus_lt_reg_l with n. rewrite <- le_plus_minus. replace (n + 0)%nat with n; [ idtac | ring ]. apply le_lt_trans with (pred N). assumption. apply lt_pred_n_n; assumption. apply le_trans with (pred N). assumption. apply le_pred_n. apply INR_fact_neq_0. apply INR_fact_neq_0. apply Rle_ge; left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_ge; left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (2 * S (l + k)) * fact (2 * (N - l))) * C ^ (4 * N)) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat. rewrite mult_INR; apply Rmult_lt_0_compat; apply INR_fact_lt_0. apply Rle_pow. unfold C; apply RmaxLess1. replace (4 * N)%nat with (2 * (2 * N))%nat; [ idtac | ring ]. apply (fun m n p:nat => mult_le_compat_l p n m). replace (2 * N)%nat with (S (N + pred N)). apply le_n_S. apply plus_le_compat_l; assumption. rewrite pred_of_minus. omega. apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => C ^ (4 * N) * Rsqr (/ INR (fact (S (N + k))))) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. rewrite <- (Rmult_comm (C ^ (4 * N))). apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. replace (/ INR (fact (2 * S (n0 + n)) * fact (2 * (N - n0)))) with (Binomial.C (2 * S (N + n)) (2 * S (n0 + n)) / INR (fact (2 * S (N + n)))). apply Rle_trans with (Binomial.C (2 * S (N + n)) (S (N + n)) / INR (fact (2 * S (N + n)))). unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ INR (fact (2 * S (N + n))))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply C_maj. omega. right. unfold Rdiv; rewrite Rmult_comm. unfold Binomial.C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (2 * S (N + n) - S (N + n))%nat with (S (N + n)). rewrite Rinv_mult_distr. unfold Rsqr; reflexivity. apply INR_fact_neq_0. apply INR_fact_neq_0. omega. apply INR_fact_neq_0. unfold Rdiv; rewrite Rmult_comm. unfold Binomial.C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (2 * S (N + n) - 2 * S (n0 + n))%nat with (2 * (N - n0))%nat. rewrite mult_INR. reflexivity. omega. apply INR_fact_neq_0. apply Rle_trans with (sum_f_R0 (fun k:nat => INR N / INR (fact (S N)) * C ^ (4 * N)) (pred N)). apply sum_Rle; intros. rewrite <- (scal_sum (fun _:nat => C ^ (4 * N)) (pred (N - n)) (Rsqr (/ INR (fact (S (N + n)))))). rewrite sum_cte. rewrite <- Rmult_assoc. do 2 rewrite <- (Rmult_comm (C ^ (4 * N))). rewrite Rmult_assoc. apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. apply Rle_trans with (Rsqr (/ INR (fact (S (N + n)))) * INR N). apply Rmult_le_compat_l. apply Rle_0_sqr. apply le_INR. omega. rewrite Rmult_comm; unfold Rdiv; apply Rmult_le_compat_l. apply pos_INR. apply Rle_trans with (/ INR (fact (S (N + n)))). pattern (/ INR (fact (S (N + n)))) at 2; rewrite <- Rmult_1_r. unfold Rsqr. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rmult_le_reg_l with (INR (fact (S (N + n)))). apply INR_fact_lt_0. rewrite <- Rinv_r_sym. rewrite Rmult_1_r. replace 1 with (INR 1). apply le_INR. apply lt_le_S. apply INR_lt; apply INR_fact_lt_0. reflexivity. apply INR_fact_neq_0. apply Rmult_le_reg_l with (INR (fact (S (N + n)))). apply INR_fact_lt_0. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (INR (fact (S N))). apply INR_fact_lt_0. rewrite Rmult_1_r. rewrite (Rmult_comm (INR (fact (S N)))). rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. apply le_INR. apply fact_le. apply le_n_S. apply le_plus_l. apply INR_fact_neq_0. apply INR_fact_neq_0. rewrite sum_cte. apply Rle_trans with (C ^ (4 * N) / INR (fact (pred N))). rewrite <- (Rmult_comm (C ^ (4 * N))). unfold Rdiv; rewrite Rmult_assoc; apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. cut (S (pred N) = N). intro; rewrite H0. pattern N at 2; rewrite <- H0. do 2 rewrite fact_simpl. rewrite H0. repeat rewrite mult_INR. repeat rewrite Rinv_mult_distr. rewrite (Rmult_comm (/ INR (S N))). repeat rewrite <- Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_l. pattern (/ INR (fact (pred N))) at 2; rewrite <- Rmult_1_r. rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rmult_le_reg_l with (INR (S N)). apply lt_INR_0; apply lt_O_Sn. rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; rewrite Rmult_1_l. apply le_INR; apply le_n_Sn. apply not_O_INR; discriminate. apply not_O_INR. red; intro; rewrite H1 in H; elim (lt_irrefl _ H). apply not_O_INR. red; intro; rewrite H1 in H; elim (lt_irrefl _ H). apply INR_fact_neq_0. apply not_O_INR; discriminate. apply prod_neq_R0. apply not_O_INR. red; intro; rewrite H1 in H; elim (lt_irrefl _ H). apply INR_fact_neq_0. symmetry ; apply S_pred with 0%nat; assumption. right. unfold Majxy. unfold C. replace (S (pred N)) with N. reflexivity. apply S_pred with 0%nat; assumption. Qed. Lemma reste2_maj : forall (x y:R) (N:nat), (0 < N)%nat -> Rabs (Reste2 x y N) <= Majxy x y N. Proof. intros. set (C := Rmax 1 (Rmax (Rabs x) (Rabs y))). unfold Reste2. apply Rle_trans with (sum_f_R0 (fun k:nat => Rabs (sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k) + 1)) * x ^ (2 * S (l + k) + 1) * ((-1) ^ (N - l) / INR (fact (2 * (N - l) + 1))) * y ^ (2 * (N - l) + 1)) (pred (N - k)))) ( pred N)). apply (Rsum_abs (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k) + 1)) * x ^ (2 * S (l + k) + 1) * ((-1) ^ (N - l) / INR (fact (2 * (N - l) + 1))) * y ^ (2 * (N - l) + 1)) (pred (N - k))) ( pred N)). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => Rabs ((-1) ^ S (l + k) / INR (fact (2 * S (l + k) + 1)) * x ^ (2 * S (l + k) + 1) * ((-1) ^ (N - l) / INR (fact (2 * (N - l) + 1))) * y ^ (2 * (N - l) + 1))) (pred (N - k))) ( pred N)). apply sum_Rle. intros. apply (Rsum_abs (fun l:nat => (-1) ^ S (l + n) / INR (fact (2 * S (l + n) + 1)) * x ^ (2 * S (l + n) + 1) * ((-1) ^ (N - l) / INR (fact (2 * (N - l) + 1))) * y ^ (2 * (N - l) + 1)) (pred (N - n))). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (2 * S (l + k) + 1) * fact (2 * (N - l) + 1)) * C ^ (2 * S (S (N + k)))) (pred (N - k))) ( pred N)). apply sum_Rle; intros. apply sum_Rle; intros. unfold Rdiv; repeat rewrite Rabs_mult. do 2 rewrite pow_1_abs. do 2 rewrite Rmult_1_l. rewrite (Rabs_right (/ INR (fact (2 * S (n0 + n) + 1)))). rewrite (Rabs_right (/ INR (fact (2 * (N - n0) + 1)))). rewrite mult_INR. rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. rewrite <- Rmult_assoc. rewrite <- (Rmult_comm (/ INR (fact (2 * (N - n0) + 1)))). rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. do 2 rewrite <- RPow_abs. apply Rle_trans with (Rabs x ^ (2 * S (n0 + n) + 1) * C ^ (2 * (N - n0) + 1)). apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. apply pow_incr. split. apply Rabs_pos. unfold C. apply Rle_trans with (Rmax (Rabs x) (Rabs y)); apply RmaxLess2. apply Rle_trans with (C ^ (2 * S (n0 + n) + 1) * C ^ (2 * (N - n0) + 1)). do 2 rewrite <- (Rmult_comm (C ^ (2 * (N - n0) + 1))). apply Rmult_le_compat_l. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. unfold C; apply RmaxLess1. apply pow_incr. split. apply Rabs_pos. unfold C; apply Rle_trans with (Rmax (Rabs x) (Rabs y)). apply RmaxLess1. apply RmaxLess2. right. replace (2 * S (S (N + n)))%nat with (2 * (N - n0) + 1 + (2 * S (n0 + n) + 1))%nat. repeat rewrite pow_add. ring. omega. apply INR_fact_neq_0. apply INR_fact_neq_0. apply Rle_ge; left; apply Rinv_0_lt_compat. apply INR_fact_lt_0. apply Rle_ge; left; apply Rinv_0_lt_compat. apply INR_fact_lt_0. apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (2 * S (l + k) + 1) * fact (2 * (N - l) + 1)) * C ^ (4 * S N)) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat. rewrite mult_INR; apply Rmult_lt_0_compat; apply INR_fact_lt_0. apply Rle_pow. unfold C; apply RmaxLess1. replace (4 * S N)%nat with (2 * (2 * S N))%nat; [ idtac | ring ]. apply (fun m n p:nat => mult_le_compat_l p n m). replace (2 * S N)%nat with (S (S (N + N))). repeat apply le_n_S. apply plus_le_compat_l. apply le_trans with (pred N). assumption. apply le_pred_n. ring. apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => C ^ (4 * S N) * Rsqr (/ INR (fact (S (S (N + k)))))) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. rewrite <- (Rmult_comm (C ^ (4 * S N))). apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. replace (/ INR (fact (2 * S (n0 + n) + 1) * fact (2 * (N - n0) + 1))) with (Binomial.C (2 * S (S (N + n))) (2 * S (n0 + n) + 1) / INR (fact (2 * S (S (N + n))))). apply Rle_trans with (Binomial.C (2 * S (S (N + n))) (S (S (N + n))) / INR (fact (2 * S (S (N + n))))). unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ INR (fact (2 * S (S (N + n)))))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply C_maj. apply le_trans with (2 * S (S (n0 + n)))%nat. replace (2 * S (S (n0 + n)))%nat with (S (2 * S (n0 + n) + 1)). apply le_n_Sn. ring. omega. right. unfold Rdiv; rewrite Rmult_comm. unfold Binomial.C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (2 * S (S (N + n)) - S (S (N + n)))%nat with (S (S (N + n))). rewrite Rinv_mult_distr. unfold Rsqr; reflexivity. apply INR_fact_neq_0. apply INR_fact_neq_0. omega. apply INR_fact_neq_0. unfold Rdiv; rewrite Rmult_comm. unfold Binomial.C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (2 * S (S (N + n)) - (2 * S (n0 + n) + 1))%nat with (2 * (N - n0) + 1)%nat. rewrite mult_INR. reflexivity. omega. apply INR_fact_neq_0. apply Rle_trans with (sum_f_R0 (fun k:nat => INR N / INR (fact (S (S N))) * C ^ (4 * S N)) (pred N)). apply sum_Rle; intros. rewrite <- (scal_sum (fun _:nat => C ^ (4 * S N)) (pred (N - n)) (Rsqr (/ INR (fact (S (S (N + n))))))). rewrite sum_cte. rewrite <- Rmult_assoc. do 2 rewrite <- (Rmult_comm (C ^ (4 * S N))). rewrite Rmult_assoc. apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. apply Rle_trans with (Rsqr (/ INR (fact (S (S (N + n))))) * INR N). apply Rmult_le_compat_l. apply Rle_0_sqr. replace (S (pred (N - n))) with (N - n)%nat. apply le_INR. omega. omega. rewrite Rmult_comm; unfold Rdiv; apply Rmult_le_compat_l. apply pos_INR. apply Rle_trans with (/ INR (fact (S (S (N + n))))). pattern (/ INR (fact (S (S (N + n))))) at 2; rewrite <- Rmult_1_r. unfold Rsqr. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rmult_le_reg_l with (INR (fact (S (S (N + n))))). apply INR_fact_lt_0. rewrite <- Rinv_r_sym. rewrite Rmult_1_r. replace 1 with (INR 1). apply le_INR. apply lt_le_S. apply INR_lt; apply INR_fact_lt_0. reflexivity. apply INR_fact_neq_0. apply Rmult_le_reg_l with (INR (fact (S (S (N + n))))). apply INR_fact_lt_0. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (INR (fact (S (S N)))). apply INR_fact_lt_0. rewrite Rmult_1_r. rewrite (Rmult_comm (INR (fact (S (S N))))). rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. apply le_INR. apply fact_le. omega. apply INR_fact_neq_0. apply INR_fact_neq_0. rewrite sum_cte. apply Rle_trans with (C ^ (4 * S N) / INR (fact N)). rewrite <- (Rmult_comm (C ^ (4 * S N))). unfold Rdiv; rewrite Rmult_assoc; apply Rmult_le_compat_l. apply pow_le. left; apply Rlt_le_trans with 1. apply Rlt_0_1. unfold C; apply RmaxLess1. cut (S (pred N) = N). intro; rewrite H0. do 2 rewrite fact_simpl. repeat rewrite mult_INR. repeat rewrite Rinv_mult_distr. apply Rle_trans with (INR (S (S N)) * (/ INR (S (S N)) * (/ INR (S N) * / INR (fact N))) * INR N). repeat rewrite Rmult_assoc. rewrite (Rmult_comm (INR N)). rewrite (Rmult_comm (INR (S (S N)))). apply Rmult_le_compat_l. repeat apply Rmult_le_pos. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. left; apply Rinv_0_lt_compat. apply INR_fact_lt_0. apply pos_INR. apply le_INR. apply le_trans with (S N); apply le_n_Sn. repeat rewrite <- Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_l. apply Rle_trans with (/ INR (S N) * / INR (fact N) * INR (S N)). repeat rewrite Rmult_assoc. repeat apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply le_INR; apply le_n_Sn. rewrite (Rmult_comm (/ INR (S N))). rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r; right; reflexivity. apply not_O_INR; discriminate. apply not_O_INR; discriminate. apply not_O_INR; discriminate. apply INR_fact_neq_0. apply not_O_INR; discriminate. apply prod_neq_R0; [ apply not_O_INR; discriminate | apply INR_fact_neq_0 ]. symmetry ; apply S_pred with 0%nat; assumption. right. unfold Majxy. unfold C. reflexivity. Qed. Lemma reste1_cv_R0 : forall x y:R, Un_cv (Reste1 x y) 0. Proof. intros. assert (H := Majxy_cv_R0 x y). unfold Un_cv in H; unfold R_dist in H. unfold Un_cv; unfold R_dist; intros. elim (H eps H0); intros N0 H1. exists (S N0); intros. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r. apply Rle_lt_trans with (Rabs (Majxy x y (pred n))). rewrite (Rabs_right (Majxy x y (pred n))). apply reste1_maj. apply lt_le_trans with (S N0). apply lt_O_Sn. assumption. apply Rle_ge. unfold Majxy. unfold Rdiv; apply Rmult_le_pos. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. replace (Majxy x y (pred n)) with (Majxy x y (pred n) - 0); [ idtac | ring ]. apply H1. unfold ge; apply le_S_n. replace (S (pred n)) with n. assumption. apply S_pred with 0%nat. apply lt_le_trans with (S N0); [ apply lt_O_Sn | assumption ]. Qed. Lemma reste2_cv_R0 : forall x y:R, Un_cv (Reste2 x y) 0. Proof. intros. assert (H := Majxy_cv_R0 x y). unfold Un_cv in H; unfold R_dist in H. unfold Un_cv; unfold R_dist; intros. elim (H eps H0); intros N0 H1. exists (S N0); intros. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r. apply Rle_lt_trans with (Rabs (Majxy x y n)). rewrite (Rabs_right (Majxy x y n)). apply reste2_maj. apply lt_le_trans with (S N0). apply lt_O_Sn. assumption. apply Rle_ge. unfold Majxy. unfold Rdiv; apply Rmult_le_pos. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. replace (Majxy x y n) with (Majxy x y n - 0); [ idtac | ring ]. apply H1. unfold ge; apply le_trans with (S N0). apply le_n_Sn. exact H2. Qed. Lemma reste_cv_R0 : forall x y:R, Un_cv (Reste x y) 0. Proof. intros. unfold Reste. set (An := fun n:nat => Reste2 x y n). set (Bn := fun n:nat => Reste1 x y (S n)). cut (Un_cv (fun n:nat => An n - Bn n) (0 - 0) -> Un_cv (fun N:nat => Reste2 x y N - Reste1 x y (S N)) 0). intro. apply H. apply CV_minus. unfold An. replace (fun n:nat => Reste2 x y n) with (Reste2 x y). apply reste2_cv_R0. reflexivity. unfold Bn. assert (H0 := reste1_cv_R0 x y). unfold Un_cv in H0; unfold R_dist in H0. unfold Un_cv; unfold R_dist; intros. elim (H0 eps H1); intros N0 H2. exists N0; intros. apply H2. unfold ge; apply le_trans with (S N0). apply le_n_Sn. apply le_n_S; assumption. unfold An, Bn. intro. replace 0 with (0 - 0); [ idtac | ring ]. exact H. Qed. Theorem cos_plus : forall x y:R, cos (x + y) = cos x * cos y - sin x * sin y. Proof. intros. cut (Un_cv (C1 x y) (cos x * cos y - sin x * sin y)). cut (Un_cv (C1 x y) (cos (x + y))). intros. apply UL_sequence with (C1 x y); assumption. apply C1_cvg. unfold Un_cv; unfold R_dist. intros. assert (H0 := A1_cvg x). assert (H1 := A1_cvg y). assert (H2 := B1_cvg x). assert (H3 := B1_cvg y). assert (H4 := CV_mult _ _ _ _ H0 H1). assert (H5 := CV_mult _ _ _ _ H2 H3). assert (H6 := reste_cv_R0 x y). unfold Un_cv in H4; unfold Un_cv in H5; unfold Un_cv in H6. unfold R_dist in H4; unfold R_dist in H5; unfold R_dist in H6. cut (0 < eps / 3); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H4 (eps / 3) H7); intros N1 H8. elim (H5 (eps / 3) H7); intros N2 H9. elim (H6 (eps / 3) H7); intros N3 H10. set (N := S (S (max (max N1 N2) N3))). exists N. intros. cut (n = S (pred n)). intro; rewrite H12. rewrite <- cos_plus_form. rewrite <- H12. apply Rle_lt_trans with (Rabs (A1 x n * A1 y n - cos x * cos y) + Rabs (sin x * sin y - B1 x (pred n) * B1 y (pred n) + Reste x y (pred n))). replace (A1 x n * A1 y n - B1 x (pred n) * B1 y (pred n) + Reste x y (pred n) - (cos x * cos y - sin x * sin y)) with (A1 x n * A1 y n - cos x * cos y + (sin x * sin y - B1 x (pred n) * B1 y (pred n) + Reste x y (pred n))); [ apply Rabs_triang | ring ]. replace eps with (eps / 3 + (eps / 3 + eps / 3)). apply Rplus_lt_compat. apply H8. unfold ge; apply le_trans with N. unfold N. apply le_trans with (max N1 N2). apply le_max_l. apply le_trans with (max (max N1 N2) N3). apply le_max_l. apply le_trans with (S (max (max N1 N2) N3)); apply le_n_Sn. assumption. apply Rle_lt_trans with (Rabs (sin x * sin y - B1 x (pred n) * B1 y (pred n)) + Rabs (Reste x y (pred n))). apply Rabs_triang. apply Rplus_lt_compat. rewrite <- Rabs_Ropp. rewrite Ropp_minus_distr. apply H9. unfold ge; apply le_trans with (max N1 N2). apply le_max_r. apply le_S_n. rewrite <- H12. apply le_trans with N. unfold N. apply le_n_S. apply le_trans with (max (max N1 N2) N3). apply le_max_l. apply le_n_Sn. assumption. replace (Reste x y (pred n)) with (Reste x y (pred n) - 0). apply H10. unfold ge. apply le_S_n. rewrite <- H12. apply le_trans with N. unfold N. apply le_n_S. apply le_trans with (max (max N1 N2) N3). apply le_max_r. apply le_n_Sn. assumption. ring. pattern eps at 4; replace eps with (3 * (eps / 3)). ring. unfold Rdiv. rewrite <- Rmult_assoc. apply Rinv_r_simpl_m. discrR. apply lt_le_trans with (pred N). unfold N; simpl; apply lt_O_Sn. apply le_S_n. rewrite <- H12. replace (S (pred N)) with N. assumption. unfold N; simpl; reflexivity. cut (0 < N)%nat. intro. cut (0 < n)%nat. intro. apply S_pred with 0%nat; assumption. apply lt_le_trans with N; assumption. unfold N; apply lt_O_Sn. Qed. coq-8.4pl2/theories/Reals/Rtrigo1.v0000640000175000001440000021302312010532755016236 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R, fn = (fun (N:nat) (x:R) => (-1) ^ N / INR (fact (2 * N)) * x ^ (2 * N)) -> CVN_R fn. Proof. unfold CVN_R in |- *; intros. cut ((r:R) <> 0). intro hyp_r; unfold CVN_r in |- *. exists (fun n:nat => / INR (fact (2 * n)) * r ^ (2 * n)). cut { l:R | Un_cv (fun n:nat => sum_f_R0 (fun k:nat => Rabs (/ INR (fact (2 * k)) * r ^ (2 * k))) n) l }. intro X; elim X; intros. exists x. split. apply p. intros; rewrite H; unfold Rdiv in |- *; do 2 rewrite Rabs_mult. rewrite pow_1_abs; rewrite Rmult_1_l. cut (0 < / INR (fact (2 * n))). intro; rewrite (Rabs_right _ (Rle_ge _ _ (Rlt_le _ _ H1))). apply Rmult_le_compat_l. left; apply H1. rewrite <- RPow_abs; apply pow_maj_Rabs. rewrite Rabs_Rabsolu. unfold Boule in H0; rewrite Rminus_0_r in H0. left; apply H0. apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Alembert_C2. intro; apply Rabs_no_R0. apply prod_neq_R0. apply Rinv_neq_0_compat. apply INR_fact_neq_0. apply pow_nonzero; assumption. assert (H0 := Alembert_cos). unfold cos_n in H0; unfold Un_cv in H0; unfold Un_cv in |- *; intros. cut (0 < eps / Rsqr r). intro; elim (H0 _ H2); intros N0 H3. exists N0; intros. unfold R_dist in |- *; assert (H5 := H3 _ H4). unfold R_dist in H5; replace (Rabs (Rabs (/ INR (fact (2 * S n)) * r ^ (2 * S n)) / Rabs (/ INR (fact (2 * n)) * r ^ (2 * n)))) with (Rsqr r * Rabs ((-1) ^ S n / INR (fact (2 * S n)) / ((-1) ^ n / INR (fact (2 * n))))). apply Rmult_lt_reg_l with (/ Rsqr r). apply Rinv_0_lt_compat; apply Rsqr_pos_lt; assumption. pattern (/ Rsqr r) at 1 in |- *; replace (/ Rsqr r) with (Rabs (/ Rsqr r)). rewrite <- Rabs_mult; rewrite Rmult_minus_distr_l; rewrite Rmult_0_r; rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps); apply H5. unfold Rsqr in |- *; apply prod_neq_R0; assumption. rewrite Rabs_Rinv. rewrite Rabs_right. reflexivity. apply Rle_ge; apply Rle_0_sqr. unfold Rsqr in |- *; apply prod_neq_R0; assumption. rewrite (Rmult_comm (Rsqr r)); unfold Rdiv in |- *; repeat rewrite Rabs_mult; rewrite Rabs_Rabsolu; rewrite pow_1_abs; rewrite Rmult_1_l; repeat rewrite Rmult_assoc; apply Rmult_eq_compat_l. rewrite Rabs_Rinv. rewrite Rabs_mult; rewrite (pow_1_abs n); rewrite Rmult_1_l; rewrite <- Rabs_Rinv. rewrite Rinv_involutive. rewrite Rinv_mult_distr. rewrite Rabs_Rinv. rewrite Rinv_involutive. rewrite (Rmult_comm (Rabs (Rabs (r ^ (2 * S n))))); rewrite Rabs_mult; rewrite Rabs_Rabsolu; rewrite Rmult_assoc; apply Rmult_eq_compat_l. rewrite Rabs_Rinv. do 2 rewrite Rabs_Rabsolu; repeat rewrite Rabs_right. replace (r ^ (2 * S n)) with (r ^ (2 * n) * r * r). repeat rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. unfold Rsqr in |- *; ring. apply pow_nonzero; assumption. replace (2 * S n)%nat with (S (S (2 * n))). simpl in |- *; ring. ring. apply Rle_ge; apply pow_le; left; apply (cond_pos r). apply Rle_ge; apply pow_le; left; apply (cond_pos r). apply Rabs_no_R0; apply pow_nonzero; assumption. apply Rabs_no_R0; apply INR_fact_neq_0. apply INR_fact_neq_0. apply Rabs_no_R0; apply Rinv_neq_0_compat; apply INR_fact_neq_0. apply Rabs_no_R0; apply pow_nonzero; assumption. apply INR_fact_neq_0. apply Rinv_neq_0_compat; apply INR_fact_neq_0. apply prod_neq_R0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat; apply INR_fact_neq_0. unfold Rdiv in |- *; apply Rmult_lt_0_compat. apply H1. apply Rinv_0_lt_compat; apply Rsqr_pos_lt; assumption. assert (H0 := cond_pos r); red in |- *; intro; rewrite H1 in H0; elim (Rlt_irrefl _ H0). Qed. (**********) Lemma continuity_cos : continuity cos. Proof. set (fn := fun (N:nat) (x:R) => (-1) ^ N / INR (fact (2 * N)) * x ^ (2 * N)). cut (CVN_R fn). intro; cut (forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }). intro cv; cut (forall n:nat, continuity (fn n)). intro; cut (forall x:R, cos x = SFL fn cv x). intro; cut (continuity (SFL fn cv) -> continuity cos). intro; apply H1. apply SFL_continuity; assumption. unfold continuity in |- *; unfold continuity_pt in |- *; unfold continue_in in |- *; unfold limit1_in in |- *; unfold limit_in in |- *; simpl in |- *; unfold R_dist in |- *; intros. elim (H1 x _ H2); intros. exists x0; intros. elim H3; intros. split. apply H4. intros; rewrite (H0 x); rewrite (H0 x1); apply H5; apply H6. intro; unfold cos, SFL in |- *. case (cv x); case (exist_cos (Rsqr x)); intros. symmetry in |- *; eapply UL_sequence. apply u. unfold cos_in in c; unfold infinite_sum in c; unfold Un_cv in |- *; intros. elim (c _ H0); intros N0 H1. exists N0; intros. unfold R_dist in H1; unfold R_dist, SP in |- *. replace (sum_f_R0 (fun k:nat => fn k x) n) with (sum_f_R0 (fun i:nat => cos_n i * Rsqr x ^ i) n). apply H1; assumption. apply sum_eq; intros. unfold cos_n, fn in |- *; apply Rmult_eq_compat_l. unfold Rsqr in |- *; rewrite pow_sqr; reflexivity. intro; unfold fn in |- *; replace (fun x:R => (-1) ^ n / INR (fact (2 * n)) * x ^ (2 * n)) with (fct_cte ((-1) ^ n / INR (fact (2 * n))) * pow_fct (2 * n))%F; [ idtac | reflexivity ]. apply continuity_mult. apply derivable_continuous; apply derivable_const. apply derivable_continuous; apply (derivable_pow (2 * n)). apply CVN_R_CVS; apply X. apply CVN_R_cos; unfold fn in |- *; reflexivity. Qed. Lemma sin_gt_cos_7_8 : sin (7 / 8) > cos (7 / 8). Proof. assert (lo1 : 0 <= 7/8) by fourier. assert (up1 : 7/8 <= 4) by fourier. assert (lo : -2 <= 7/8) by fourier. assert (up : 7/8 <= 2) by fourier. destruct (pre_sin_bound _ 0 lo1 up1) as [lower _ ]. destruct (pre_cos_bound _ 0 lo up) as [_ upper]. apply Rle_lt_trans with (1 := upper). apply Rlt_le_trans with (2 := lower). unfold cos_approx, sin_approx. simpl sum_f_R0; replace 7 with (IZR 7) by (simpl; field). replace 8 with (IZR 8) by (simpl; field). unfold cos_term, sin_term; simpl fact; rewrite !INR_IZR_INZ. simpl plus; simpl mult. field_simplify; try (repeat apply conj; apply not_eq_sym, Rlt_not_eq, (IZR_lt 0); reflexivity). unfold Rminus; rewrite !pow_IZR, <- !mult_IZR, <- !opp_IZR, <- ?plus_IZR. match goal with |- IZR ?a / ?b < ?c / ?d => apply Rmult_lt_reg_r with d;[apply (IZR_lt 0); reflexivity | unfold Rdiv at 2; rewrite Rmult_assoc, Rinv_l, Rmult_1_r, Rmult_comm; [ |apply not_eq_sym, Rlt_not_eq, (IZR_lt 0); reflexivity ]]; apply Rmult_lt_reg_r with b;[apply (IZR_lt 0); reflexivity | ] end. unfold Rdiv; rewrite !Rmult_assoc, Rinv_l, Rmult_1_r; [ | apply not_eq_sym, Rlt_not_eq, (IZR_lt 0); reflexivity]. repeat (rewrite <- !plus_IZR || rewrite <- !mult_IZR). apply IZR_lt; reflexivity. Qed. Definition PI_2_aux : {z | 7/8 <= z <= 7/4 /\ -cos z = 0}. assert (cc : continuity (fun r =>- cos r)). apply continuity_opp, continuity_cos. assert (cvp : 0 < cos (7/8)). assert (int78 : -2 <= 7/8 <= 2) by (split; fourier). destruct int78 as [lower upper]. case (pre_cos_bound _ 0 lower upper). unfold cos_approx; simpl sum_f_R0; unfold cos_term. intros cl _; apply Rlt_le_trans with (2 := cl); simpl. fourier. assert (cun : cos (7/4) < 0). replace (7/4) with (7/8 + 7/8) by field. rewrite cos_plus. apply Rlt_minus; apply Rsqr_incrst_1. exact sin_gt_cos_7_8. apply Rlt_le; assumption. apply Rlt_le; apply Rlt_trans with (1 := cvp); exact sin_gt_cos_7_8. apply IVT; auto; fourier. Qed. Definition PI2 := proj1_sig PI_2_aux. Definition PI := 2 * PI2. Lemma cos_pi2 : cos PI2 = 0. unfold PI2; case PI_2_aux; simpl. intros x [_ q]; rewrite <- (Ropp_involutive (cos x)), q; apply Ropp_0. Qed. Lemma pi2_int : 7/8 <= PI2 <= 7/4. unfold PI2; case PI_2_aux; simpl; tauto. Qed. (**********) Lemma cos_minus : forall x y:R, cos (x - y) = cos x * cos y + sin x * sin y. Proof. intros; unfold Rminus in |- *; rewrite cos_plus. rewrite <- cos_sym; rewrite sin_antisym; ring. Qed. (**********) Lemma sin2_cos2 : forall x:R, Rsqr (sin x) + Rsqr (cos x) = 1. Proof. intro; unfold Rsqr in |- *; rewrite Rplus_comm; rewrite <- (cos_minus x x); unfold Rminus in |- *; rewrite Rplus_opp_r; apply cos_0. Qed. Lemma cos2 : forall x:R, Rsqr (cos x) = 1 - Rsqr (sin x). Proof. intros x; rewrite <- (sin2_cos2 x); ring. Qed. Lemma sin2 : forall x:R, Rsqr (sin x) = 1 - Rsqr (cos x). Proof. intro x; generalize (cos2 x); intro H1; rewrite H1. unfold Rminus in |- *; rewrite Ropp_plus_distr; rewrite <- Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_l; symmetry in |- *; apply Ropp_involutive. Qed. (**********) Lemma cos_PI2 : cos (PI / 2) = 0. Proof. unfold PI; generalize cos_pi2; replace ((2 * PI2)/2) with PI2 by field; tauto. Qed. Lemma sin_pos_tech : forall x, 0 < x < 2 -> 0 < sin x. intros x [int1 int2]. assert (lo : 0 <= x) by (apply Rlt_le; assumption). assert (up : x <= 4) by (apply Rlt_le, Rlt_trans with (1:=int2); fourier). destruct (pre_sin_bound _ 0 lo up) as [t _]; clear lo up. apply Rlt_le_trans with (2:= t); clear t. unfold sin_approx; simpl sum_f_R0; unfold sin_term; simpl. match goal with |- _ < ?a => replace a with (x * (1 - x^2/6)) by (simpl; field) end. assert (t' : x ^ 2 <= 4). replace 4 with (2 ^ 2) by field. apply (pow_incr x 2); split; apply Rlt_le; assumption. apply Rmult_lt_0_compat;[assumption | fourier ]. Qed. Lemma sin_PI2 : sin (PI / 2) = 1. replace (PI / 2) with PI2 by (unfold PI; field). assert (int' : 0 < PI2 < 2). destruct pi2_int; split; fourier. assert (lo2 := sin_pos_tech PI2 int'). assert (t2 : Rabs (sin PI2) = 1). rewrite <- Rabs_R1; apply Rsqr_eq_abs_0. rewrite Rsqr_1, sin2, cos_pi2, Rsqr_0, Rminus_0_r; reflexivity. revert t2; rewrite Rabs_pos_eq;[| apply Rlt_le]; tauto. Qed. Lemma PI_RGT_0 : PI > 0. Proof. unfold PI; destruct pi2_int; fourier. Qed. Lemma PI_4 : PI <= 4. Proof. unfold PI; destruct pi2_int; fourier. Qed. (**********) Lemma PI_neq0 : PI <> 0. Proof. red in |- *; intro; assert (H0 := PI_RGT_0); rewrite H in H0; elim (Rlt_irrefl _ H0). Qed. (**********) Lemma cos_PI : cos PI = -1. Proof. replace PI with (PI / 2 + PI / 2). rewrite cos_plus. rewrite sin_PI2; rewrite cos_PI2. ring. symmetry in |- *; apply double_var. Qed. Lemma sin_PI : sin PI = 0. Proof. assert (H := sin2_cos2 PI). rewrite cos_PI in H. rewrite <- Rsqr_neg in H. rewrite Rsqr_1 in H. cut (Rsqr (sin PI) = 0). intro; apply (Rsqr_eq_0 _ H0). apply Rplus_eq_reg_l with 1. rewrite Rplus_0_r; rewrite Rplus_comm; exact H. Qed. Lemma sin_bound : forall (a : R) (n : nat), 0 <= a -> a <= PI -> sin_approx a (2 * n + 1) <= sin a <= sin_approx a (2 * (n + 1)). Proof. intros a n a0 api; apply pre_sin_bound. assumption. apply Rle_trans with (1:= api) (2 := PI_4). Qed. Lemma cos_bound : forall (a : R) (n : nat), - PI / 2 <= a -> a <= PI / 2 -> cos_approx a (2 * n + 1) <= cos a <= cos_approx a (2 * (n + 1)). Proof. intros a n lower upper; apply pre_cos_bound. apply Rle_trans with (2 := lower). apply Rmult_le_reg_r with 2; [fourier |]. replace ((-PI/2) * 2) with (-PI) by field. assert (t := PI_4); fourier. apply Rle_trans with (1 := upper). apply Rmult_le_reg_r with 2; [fourier | ]. replace ((PI/2) * 2) with PI by field. generalize PI_4; intros; fourier. Qed. (**********) Lemma neg_cos : forall x:R, cos (x + PI) = - cos x. Proof. intro x; rewrite cos_plus; rewrite sin_PI; rewrite cos_PI; ring. Qed. (**********) Lemma sin_cos : forall x:R, sin x = - cos (PI / 2 + x). Proof. intro x; rewrite cos_plus; rewrite sin_PI2; rewrite cos_PI2; ring. Qed. (**********) Lemma sin_plus : forall x y:R, sin (x + y) = sin x * cos y + cos x * sin y. Proof. intros. rewrite (sin_cos (x + y)). replace (PI / 2 + (x + y)) with (PI / 2 + x + y); [ rewrite cos_plus | ring ]. rewrite (sin_cos (PI / 2 + x)). replace (PI / 2 + (PI / 2 + x)) with (x + PI). rewrite neg_cos. replace (cos (PI / 2 + x)) with (- sin x). ring. rewrite sin_cos; rewrite Ropp_involutive; reflexivity. pattern PI at 1 in |- *; rewrite (double_var PI); ring. Qed. Lemma sin_minus : forall x y:R, sin (x - y) = sin x * cos y - cos x * sin y. Proof. intros; unfold Rminus in |- *; rewrite sin_plus. rewrite <- cos_sym; rewrite sin_antisym; ring. Qed. (**********) Definition tan (x:R) : R := sin x / cos x. Lemma tan_plus : forall x y:R, cos x <> 0 -> cos y <> 0 -> cos (x + y) <> 0 -> 1 - tan x * tan y <> 0 -> tan (x + y) = (tan x + tan y) / (1 - tan x * tan y). Proof. intros; unfold tan in |- *; rewrite sin_plus; rewrite cos_plus; unfold Rdiv in |- *; replace (cos x * cos y - sin x * sin y) with (cos x * cos y * (1 - sin x * / cos x * (sin y * / cos y))). rewrite Rinv_mult_distr. repeat rewrite <- Rmult_assoc; replace ((sin x * cos y + cos x * sin y) * / (cos x * cos y)) with (sin x * / cos x + sin y * / cos y). reflexivity. rewrite Rmult_plus_distr_r; rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc; repeat rewrite (Rmult_comm (sin x)); repeat rewrite <- Rmult_assoc. repeat rewrite Rinv_r_simpl_m; [ reflexivity | assumption | assumption ]. assumption. assumption. apply prod_neq_R0; assumption. assumption. unfold Rminus in |- *; rewrite Rmult_plus_distr_l; rewrite Rmult_1_r; apply Rplus_eq_compat_l; repeat rewrite Rmult_assoc; rewrite (Rmult_comm (sin x)); rewrite (Rmult_comm (cos y)); rewrite <- Ropp_mult_distr_r_reverse; repeat rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite (Rmult_comm (sin x)); rewrite <- Ropp_mult_distr_r_reverse; repeat rewrite Rmult_assoc; apply Rmult_eq_compat_l; rewrite (Rmult_comm (/ cos y)); rewrite Rmult_assoc; rewrite <- Rinv_r_sym. apply Rmult_1_r. assumption. assumption. Qed. (*******************************************************) (** * Some properties of cos, sin and tan *) (*******************************************************) Lemma sin_2a : forall x:R, sin (2 * x) = 2 * sin x * cos x. Proof. intro x; rewrite double; rewrite sin_plus. rewrite <- (Rmult_comm (sin x)); symmetry in |- *; rewrite Rmult_assoc; apply double. Qed. Lemma cos_2a : forall x:R, cos (2 * x) = cos x * cos x - sin x * sin x. Proof. intro x; rewrite double; apply cos_plus. Qed. Lemma cos_2a_cos : forall x:R, cos (2 * x) = 2 * cos x * cos x - 1. Proof. intro x; rewrite double; unfold Rminus in |- *; rewrite Rmult_assoc; rewrite cos_plus; generalize (sin2_cos2 x); rewrite double; intro H1; rewrite <- H1; ring_Rsqr. Qed. Lemma cos_2a_sin : forall x:R, cos (2 * x) = 1 - 2 * sin x * sin x. Proof. intro x; rewrite Rmult_assoc; unfold Rminus in |- *; repeat rewrite double. generalize (sin2_cos2 x); intro H1; rewrite <- H1; rewrite cos_plus; ring_Rsqr. Qed. Lemma tan_2a : forall x:R, cos x <> 0 -> cos (2 * x) <> 0 -> 1 - tan x * tan x <> 0 -> tan (2 * x) = 2 * tan x / (1 - tan x * tan x). Proof. repeat rewrite double; intros; repeat rewrite double; rewrite double in H0; apply tan_plus; assumption. Qed. Lemma sin_neg : forall x:R, sin (- x) = - sin x. Proof. apply sin_antisym. Qed. Lemma cos_neg : forall x:R, cos (- x) = cos x. Proof. intro; symmetry in |- *; apply cos_sym. Qed. Lemma tan_0 : tan 0 = 0. Proof. unfold tan in |- *; rewrite sin_0; rewrite cos_0. unfold Rdiv in |- *; apply Rmult_0_l. Qed. Lemma tan_neg : forall x:R, tan (- x) = - tan x. Proof. intros x; unfold tan in |- *; rewrite sin_neg; rewrite cos_neg; unfold Rdiv in |- *. apply Ropp_mult_distr_l_reverse. Qed. Lemma tan_minus : forall x y:R, cos x <> 0 -> cos y <> 0 -> cos (x - y) <> 0 -> 1 + tan x * tan y <> 0 -> tan (x - y) = (tan x - tan y) / (1 + tan x * tan y). Proof. intros; unfold Rminus in |- *; rewrite tan_plus. rewrite tan_neg; unfold Rminus in |- *; rewrite <- Ropp_mult_distr_l_reverse; rewrite Rmult_opp_opp; reflexivity. assumption. rewrite cos_neg; assumption. assumption. rewrite tan_neg; unfold Rminus in |- *; rewrite <- Ropp_mult_distr_l_reverse; rewrite Rmult_opp_opp; assumption. Qed. Lemma cos_3PI2 : cos (3 * (PI / 2)) = 0. Proof. replace (3 * (PI / 2)) with (PI + PI / 2). rewrite cos_plus; rewrite sin_PI; rewrite cos_PI2; ring. pattern PI at 1 in |- *; rewrite (double_var PI). ring. Qed. Lemma sin_2PI : sin (2 * PI) = 0. Proof. rewrite sin_2a; rewrite sin_PI; ring. Qed. Lemma cos_2PI : cos (2 * PI) = 1. Proof. rewrite cos_2a; rewrite sin_PI; rewrite cos_PI; ring. Qed. Lemma neg_sin : forall x:R, sin (x + PI) = - sin x. Proof. intro x; rewrite sin_plus; rewrite sin_PI; rewrite cos_PI; ring. Qed. Lemma sin_PI_x : forall x:R, sin (PI - x) = sin x. Proof. intro x; rewrite sin_minus; rewrite sin_PI; rewrite cos_PI; rewrite Rmult_0_l; unfold Rminus in |- *; rewrite Rplus_0_l; rewrite Ropp_mult_distr_l_reverse; rewrite Ropp_involutive; apply Rmult_1_l. Qed. Lemma sin_period : forall (x:R) (k:nat), sin (x + 2 * INR k * PI) = sin x. Proof. intros x k; induction k as [| k Hreck]. simpl in |- *; ring_simplify (x + 2 * 0 * PI). trivial. replace (x + 2 * INR (S k) * PI) with (x + 2 * INR k * PI + 2 * PI). rewrite sin_plus in |- *; rewrite sin_2PI in |- *; rewrite cos_2PI in |- *. ring_simplify; trivial. rewrite S_INR in |- *; ring. Qed. Lemma cos_period : forall (x:R) (k:nat), cos (x + 2 * INR k * PI) = cos x. Proof. intros x k; induction k as [| k Hreck]. simpl in |- *; ring_simplify (x + 2 * 0 * PI). trivial. replace (x + 2 * INR (S k) * PI) with (x + 2 * INR k * PI + 2 * PI). rewrite cos_plus in |- *; rewrite sin_2PI in |- *; rewrite cos_2PI in |- *. ring_simplify; trivial. rewrite S_INR in |- *; ring. Qed. Lemma sin_shift : forall x:R, sin (PI / 2 - x) = cos x. Proof. intro x; rewrite sin_minus; rewrite sin_PI2; rewrite cos_PI2; ring. Qed. Lemma cos_shift : forall x:R, cos (PI / 2 - x) = sin x. Proof. intro x; rewrite cos_minus; rewrite sin_PI2; rewrite cos_PI2; ring. Qed. Lemma cos_sin : forall x:R, cos x = sin (PI / 2 + x). Proof. intro x; rewrite sin_plus; rewrite sin_PI2; rewrite cos_PI2; ring. Qed. Lemma PI2_RGT_0 : 0 < PI / 2. Proof. unfold Rdiv in |- *; apply Rmult_lt_0_compat; [ apply PI_RGT_0 | apply Rinv_0_lt_compat; prove_sup ]. Qed. Lemma SIN_bound : forall x:R, -1 <= sin x <= 1. Proof. intro; case (Rle_dec (-1) (sin x)); intro. case (Rle_dec (sin x) 1); intro. split; assumption. cut (1 < sin x). intro; generalize (Rsqr_incrst_1 1 (sin x) H (Rlt_le 0 1 Rlt_0_1) (Rlt_le 0 (sin x) (Rlt_trans 0 1 (sin x) Rlt_0_1 H))); rewrite Rsqr_1; intro; rewrite sin2 in H0; unfold Rminus in H0; generalize (Rplus_lt_compat_l (-1) 1 (1 + - Rsqr (cos x)) H0); repeat rewrite <- Rplus_assoc; repeat rewrite Rplus_opp_l; rewrite Rplus_0_l; intro; rewrite <- Ropp_0 in H1; generalize (Ropp_lt_gt_contravar (-0) (- Rsqr (cos x)) H1); repeat rewrite Ropp_involutive; intro; generalize (Rle_0_sqr (cos x)); intro; elim (Rlt_irrefl 0 (Rle_lt_trans 0 (Rsqr (cos x)) 0 H3 H2)). auto with real. cut (sin x < -1). intro; generalize (Ropp_lt_gt_contravar (sin x) (-1) H); rewrite Ropp_involutive; clear H; intro; generalize (Rsqr_incrst_1 1 (- sin x) H (Rlt_le 0 1 Rlt_0_1) (Rlt_le 0 (- sin x) (Rlt_trans 0 1 (- sin x) Rlt_0_1 H))); rewrite Rsqr_1; intro; rewrite <- Rsqr_neg in H0; rewrite sin2 in H0; unfold Rminus in H0; generalize (Rplus_lt_compat_l (-1) 1 (1 + - Rsqr (cos x)) H0); repeat rewrite <- Rplus_assoc; repeat rewrite Rplus_opp_l; rewrite Rplus_0_l; intro; rewrite <- Ropp_0 in H1; generalize (Ropp_lt_gt_contravar (-0) (- Rsqr (cos x)) H1); repeat rewrite Ropp_involutive; intro; generalize (Rle_0_sqr (cos x)); intro; elim (Rlt_irrefl 0 (Rle_lt_trans 0 (Rsqr (cos x)) 0 H3 H2)). auto with real. Qed. Lemma COS_bound : forall x:R, -1 <= cos x <= 1. Proof. intro; rewrite <- sin_shift; apply SIN_bound. Qed. Lemma cos_sin_0 : forall x:R, ~ (cos x = 0 /\ sin x = 0). Proof. intro; red in |- *; intro; elim H; intros; generalize (sin2_cos2 x); intro; rewrite H0 in H2; rewrite H1 in H2; repeat rewrite Rsqr_0 in H2; rewrite Rplus_0_r in H2; generalize Rlt_0_1; intro; rewrite <- H2 in H3; elim (Rlt_irrefl 0 H3). Qed. Lemma cos_sin_0_var : forall x:R, cos x <> 0 \/ sin x <> 0. Proof. intros x. destruct (Req_dec (cos x) 0). 2: now left. right. intros H'. apply (cos_sin_0 x). now split. Qed. (*****************************************************************) (** * Using series definitions of cos and sin *) (*****************************************************************) Definition sin_lb (a:R) : R := sin_approx a 3. Definition sin_ub (a:R) : R := sin_approx a 4. Definition cos_lb (a:R) : R := cos_approx a 3. Definition cos_ub (a:R) : R := cos_approx a 4. Lemma sin_lb_gt_0 : forall a:R, 0 < a -> a <= PI / 2 -> 0 < sin_lb a. Proof. intros. unfold sin_lb in |- *; unfold sin_approx in |- *; unfold sin_term in |- *. set (Un := fun i:nat => a ^ (2 * i + 1) / INR (fact (2 * i + 1))). replace (sum_f_R0 (fun i:nat => (-1) ^ i * (a ^ (2 * i + 1) / INR (fact (2 * i + 1)))) 3) with (sum_f_R0 (fun i:nat => (-1) ^ i * Un i) 3); [ idtac | apply sum_eq; intros; unfold Un in |- *; reflexivity ]. cut (forall n:nat, Un (S n) < Un n). intro; simpl in |- *. repeat rewrite Rmult_1_l; repeat rewrite Rmult_1_r; replace (-1 * Un 1%nat) with (- Un 1%nat); [ idtac | ring ]; replace (-1 * -1 * Un 2%nat) with (Un 2%nat); [ idtac | ring ]; replace (-1 * (-1 * -1) * Un 3%nat) with (- Un 3%nat); [ idtac | ring ]; replace (Un 0%nat + - Un 1%nat + Un 2%nat + - Un 3%nat) with (Un 0%nat - Un 1%nat + (Un 2%nat - Un 3%nat)); [ idtac | ring ]. apply Rplus_lt_0_compat. unfold Rminus in |- *; apply Rplus_lt_reg_r with (Un 1%nat); rewrite Rplus_0_r; rewrite (Rplus_comm (Un 1%nat)); rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply H1. unfold Rminus in |- *; apply Rplus_lt_reg_r with (Un 3%nat); rewrite Rplus_0_r; rewrite (Rplus_comm (Un 3%nat)); rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply H1. intro; unfold Un in |- *. cut ((2 * S n + 1)%nat = (2 * n + 1 + 2)%nat). intro; rewrite H1. rewrite pow_add; unfold Rdiv in |- *; rewrite Rmult_assoc; apply Rmult_lt_compat_l. apply pow_lt; assumption. rewrite <- H1; apply Rmult_lt_reg_l with (INR (fact (2 * n + 1))). apply lt_INR_0; apply neq_O_lt. assert (H2 := fact_neq_0 (2 * n + 1)). red in |- *; intro; elim H2; symmetry in |- *; assumption. rewrite <- Rinv_r_sym. apply Rmult_lt_reg_l with (INR (fact (2 * S n + 1))). apply lt_INR_0; apply neq_O_lt. assert (H2 := fact_neq_0 (2 * S n + 1)). red in |- *; intro; elim H2; symmetry in |- *; assumption. rewrite (Rmult_comm (INR (fact (2 * S n + 1)))); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. do 2 rewrite Rmult_1_r; apply Rle_lt_trans with (INR (fact (2 * n + 1)) * 4). apply Rmult_le_compat_l. replace 0 with (INR 0); [ idtac | reflexivity ]; apply le_INR; apply le_O_n. simpl in |- *; rewrite Rmult_1_r; replace 4 with (Rsqr 2); [ idtac | ring_Rsqr ]; replace (a * a) with (Rsqr a); [ idtac | reflexivity ]; apply Rsqr_incr_1. apply Rle_trans with (PI / 2); [ assumption | unfold Rdiv in |- *; apply Rmult_le_reg_l with 2; [ prove_sup0 | rewrite <- Rmult_assoc; rewrite Rinv_r_simpl_m; [ replace 4 with 4; [ apply PI_4 | ring ] | discrR ] ] ]. left; assumption. left; prove_sup0. rewrite H1; replace (2 * n + 1 + 2)%nat with (S (S (2 * n + 1))). do 2 rewrite fact_simpl; do 2 rewrite mult_INR. repeat rewrite <- Rmult_assoc. rewrite <- (Rmult_comm (INR (fact (2 * n + 1)))). rewrite Rmult_assoc. apply Rmult_lt_compat_l. apply lt_INR_0; apply neq_O_lt. assert (H2 := fact_neq_0 (2 * n + 1)). red in |- *; intro; elim H2; symmetry in |- *; assumption. do 2 rewrite S_INR; rewrite plus_INR; rewrite mult_INR; set (x := INR n); unfold INR in |- *. replace ((2 * x + 1 + 1 + 1) * (2 * x + 1 + 1)) with (4 * x * x + 10 * x + 6); [ idtac | ring ]. apply Rplus_lt_reg_r with (-4); rewrite Rplus_opp_l; replace (-4 + (4 * x * x + 10 * x + 6)) with (4 * x * x + 10 * x + 2); [ idtac | ring ]. apply Rplus_le_lt_0_compat. cut (0 <= x). intro; apply Rplus_le_le_0_compat; repeat apply Rmult_le_pos; assumption || left; prove_sup. unfold x in |- *; replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. prove_sup0. ring. apply INR_fact_neq_0. apply INR_fact_neq_0. ring. Qed. Lemma SIN : forall a:R, 0 <= a -> a <= PI -> sin_lb a <= sin a <= sin_ub a. intros; unfold sin_lb, sin_ub in |- *; apply (sin_bound a 1 H H0). Qed. Lemma COS : forall a:R, - PI / 2 <= a -> a <= PI / 2 -> cos_lb a <= cos a <= cos_ub a. intros; unfold cos_lb, cos_ub in |- *; apply (cos_bound a 1 H H0). Qed. (**********) Lemma _PI2_RLT_0 : - (PI / 2) < 0. Proof. rewrite <- Ropp_0; apply Ropp_lt_contravar; apply PI2_RGT_0. Qed. Lemma PI4_RLT_PI2 : PI / 4 < PI / 2. Proof. unfold Rdiv in |- *; apply Rmult_lt_compat_l. apply PI_RGT_0. apply Rinv_lt_contravar. apply Rmult_lt_0_compat; prove_sup0. pattern 2 at 1 in |- *; rewrite <- Rplus_0_r. replace 4 with (2 + 2); [ apply Rplus_lt_compat_l; prove_sup0 | ring ]. Qed. Lemma PI2_Rlt_PI : PI / 2 < PI. Proof. unfold Rdiv in |- *; pattern PI at 2 in |- *; rewrite <- Rmult_1_r. apply Rmult_lt_compat_l. apply PI_RGT_0. pattern 1 at 3 in |- *; rewrite <- Rinv_1; apply Rinv_lt_contravar. rewrite Rmult_1_l; prove_sup0. pattern 1 at 1 in |- *; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1. Qed. (***************************************************) (** * Increasing and decreasing of [cos] and [sin] *) (***************************************************) Theorem sin_gt_0 : forall x:R, 0 < x -> x < PI -> 0 < sin x. Proof. intros; elim (SIN x (Rlt_le 0 x H) (Rlt_le x PI H0)); intros H1 _; case (Rtotal_order x (PI / 2)); intro H2. apply Rlt_le_trans with (sin_lb x). apply sin_lb_gt_0; [ assumption | left; assumption ]. assumption. elim H2; intro H3. rewrite H3; rewrite sin_PI2; apply Rlt_0_1. rewrite <- sin_PI_x; generalize (Ropp_gt_lt_contravar x (PI / 2) H3); intro H4; generalize (Rplus_lt_compat_l PI (- x) (- (PI / 2)) H4). replace (PI + - x) with (PI - x). replace (PI + - (PI / 2)) with (PI / 2). intro H5; generalize (Ropp_lt_gt_contravar x PI H0); intro H6; change (- PI < - x) in H6; generalize (Rplus_lt_compat_l PI (- PI) (- x) H6). rewrite Rplus_opp_r. replace (PI + - x) with (PI - x). intro H7; elim (SIN (PI - x) (Rlt_le 0 (PI - x) H7) (Rlt_le (PI - x) PI (Rlt_trans (PI - x) (PI / 2) PI H5 PI2_Rlt_PI))); intros H8 _; generalize (sin_lb_gt_0 (PI - x) H7 (Rlt_le (PI - x) (PI / 2) H5)); intro H9; apply (Rlt_le_trans 0 (sin_lb (PI - x)) (sin (PI - x)) H9 H8). reflexivity. pattern PI at 2 in |- *; rewrite double_var; ring. reflexivity. Qed. Theorem cos_gt_0 : forall x:R, - (PI / 2) < x -> x < PI / 2 -> 0 < cos x. Proof. intros; rewrite cos_sin; generalize (Rplus_lt_compat_l (PI / 2) (- (PI / 2)) x H). rewrite Rplus_opp_r; intro H1; generalize (Rplus_lt_compat_l (PI / 2) x (PI / 2) H0); rewrite <- double_var; intro H2; apply (sin_gt_0 (PI / 2 + x) H1 H2). Qed. Lemma sin_ge_0 : forall x:R, 0 <= x -> x <= PI -> 0 <= sin x. Proof. intros x H1 H2; elim H1; intro H3; [ elim H2; intro H4; [ left; apply (sin_gt_0 x H3 H4) | rewrite H4; right; symmetry in |- *; apply sin_PI ] | rewrite <- H3; right; symmetry in |- *; apply sin_0 ]. Qed. Lemma cos_ge_0 : forall x:R, - (PI / 2) <= x -> x <= PI / 2 -> 0 <= cos x. Proof. intros x H1 H2; elim H1; intro H3; [ elim H2; intro H4; [ left; apply (cos_gt_0 x H3 H4) | rewrite H4; right; symmetry in |- *; apply cos_PI2 ] | rewrite <- H3; rewrite cos_neg; right; symmetry in |- *; apply cos_PI2 ]. Qed. Lemma sin_le_0 : forall x:R, PI <= x -> x <= 2 * PI -> sin x <= 0. Proof. intros x H1 H2; apply Rge_le; rewrite <- Ropp_0; rewrite <- (Ropp_involutive (sin x)); apply Ropp_le_ge_contravar; rewrite <- neg_sin; replace (x + PI) with (x - PI + 2 * INR 1 * PI); [ rewrite (sin_period (x - PI) 1); apply sin_ge_0; [ replace (x - PI) with (x + - PI); [ rewrite Rplus_comm; replace 0 with (- PI + PI); [ apply Rplus_le_compat_l; assumption | ring ] | ring ] | replace (x - PI) with (x + - PI); rewrite Rplus_comm; [ pattern PI at 2 in |- *; replace PI with (- PI + 2 * PI); [ apply Rplus_le_compat_l; assumption | ring ] | ring ] ] | unfold INR in |- *; ring ]. Qed. Lemma cos_le_0 : forall x:R, PI / 2 <= x -> x <= 3 * (PI / 2) -> cos x <= 0. Proof. intros x H1 H2; apply Rge_le; rewrite <- Ropp_0; rewrite <- (Ropp_involutive (cos x)); apply Ropp_le_ge_contravar; rewrite <- neg_cos; replace (x + PI) with (x - PI + 2 * INR 1 * PI). rewrite cos_period; apply cos_ge_0. replace (- (PI / 2)) with (- PI + PI / 2). unfold Rminus in |- *; rewrite (Rplus_comm x); apply Rplus_le_compat_l; assumption. pattern PI at 1 in |- *; rewrite (double_var PI); rewrite Ropp_plus_distr; ring. unfold Rminus in |- *; rewrite Rplus_comm; replace (PI / 2) with (- PI + 3 * (PI / 2)). apply Rplus_le_compat_l; assumption. pattern PI at 1 in |- *; rewrite (double_var PI); rewrite Ropp_plus_distr; ring. unfold INR in |- *; ring. Qed. Lemma sin_lt_0 : forall x:R, PI < x -> x < 2 * PI -> sin x < 0. Proof. intros x H1 H2; rewrite <- Ropp_0; rewrite <- (Ropp_involutive (sin x)); apply Ropp_lt_gt_contravar; rewrite <- neg_sin; replace (x + PI) with (x - PI + 2 * INR 1 * PI); [ rewrite (sin_period (x - PI) 1); apply sin_gt_0; [ replace (x - PI) with (x + - PI); [ rewrite Rplus_comm; replace 0 with (- PI + PI); [ apply Rplus_lt_compat_l; assumption | ring ] | ring ] | replace (x - PI) with (x + - PI); rewrite Rplus_comm; [ pattern PI at 2 in |- *; replace PI with (- PI + 2 * PI); [ apply Rplus_lt_compat_l; assumption | ring ] | ring ] ] | unfold INR in |- *; ring ]. Qed. Lemma sin_lt_0_var : forall x:R, - PI < x -> x < 0 -> sin x < 0. Proof. intros; generalize (Rplus_lt_compat_l (2 * PI) (- PI) x H); replace (2 * PI + - PI) with PI; [ intro H1; rewrite Rplus_comm in H1; generalize (Rplus_lt_compat_l (2 * PI) x 0 H0); intro H2; rewrite (Rplus_comm (2 * PI)) in H2; rewrite <- (Rplus_comm 0) in H2; rewrite Rplus_0_l in H2; rewrite <- (sin_period x 1); unfold INR in |- *; replace (2 * 1 * PI) with (2 * PI); [ apply (sin_lt_0 (x + 2 * PI) H1 H2) | ring ] | ring ]. Qed. Lemma cos_lt_0 : forall x:R, PI / 2 < x -> x < 3 * (PI / 2) -> cos x < 0. Proof. intros x H1 H2; rewrite <- Ropp_0; rewrite <- (Ropp_involutive (cos x)); apply Ropp_lt_gt_contravar; rewrite <- neg_cos; replace (x + PI) with (x - PI + 2 * INR 1 * PI). rewrite cos_period; apply cos_gt_0. replace (- (PI / 2)) with (- PI + PI / 2). unfold Rminus in |- *; rewrite (Rplus_comm x); apply Rplus_lt_compat_l; assumption. pattern PI at 1 in |- *; rewrite (double_var PI); rewrite Ropp_plus_distr; ring. unfold Rminus in |- *; rewrite Rplus_comm; replace (PI / 2) with (- PI + 3 * (PI / 2)). apply Rplus_lt_compat_l; assumption. pattern PI at 1 in |- *; rewrite (double_var PI); rewrite Ropp_plus_distr; ring. unfold INR in |- *; ring. Qed. Lemma tan_gt_0 : forall x:R, 0 < x -> x < PI / 2 -> 0 < tan x. Proof. intros x H1 H2; unfold tan in |- *; generalize _PI2_RLT_0; generalize (Rlt_trans 0 x (PI / 2) H1 H2); intros; generalize (Rlt_trans (- (PI / 2)) 0 x H0 H1); intro H5; generalize (Rlt_trans x (PI / 2) PI H2 PI2_Rlt_PI); intro H7; unfold Rdiv in |- *; apply Rmult_lt_0_compat. apply sin_gt_0; assumption. apply Rinv_0_lt_compat; apply cos_gt_0; assumption. Qed. Lemma tan_lt_0 : forall x:R, - (PI / 2) < x -> x < 0 -> tan x < 0. Proof. intros x H1 H2; unfold tan in |- *; generalize (cos_gt_0 x H1 (Rlt_trans x 0 (PI / 2) H2 PI2_RGT_0)); intro H3; rewrite <- Ropp_0; replace (sin x / cos x) with (- (- sin x / cos x)). rewrite <- sin_neg; apply Ropp_gt_lt_contravar; change (0 < sin (- x) / cos x) in |- *; unfold Rdiv in |- *; apply Rmult_lt_0_compat. apply sin_gt_0. rewrite <- Ropp_0; apply Ropp_gt_lt_contravar; assumption. apply Rlt_trans with (PI / 2). rewrite <- (Ropp_involutive (PI / 2)); apply Ropp_gt_lt_contravar; assumption. apply PI2_Rlt_PI. apply Rinv_0_lt_compat; assumption. unfold Rdiv in |- *; ring. Qed. Lemma cos_ge_0_3PI2 : forall x:R, 3 * (PI / 2) <= x -> x <= 2 * PI -> 0 <= cos x. Proof. intros; rewrite <- cos_neg; rewrite <- (cos_period (- x) 1); unfold INR in |- *; replace (- x + 2 * 1 * PI) with (2 * PI - x). generalize (Ropp_le_ge_contravar x (2 * PI) H0); intro H1; generalize (Rge_le (- x) (- (2 * PI)) H1); clear H1; intro H1; generalize (Rplus_le_compat_l (2 * PI) (- (2 * PI)) (- x) H1). rewrite Rplus_opp_r. intro H2; generalize (Ropp_le_ge_contravar (3 * (PI / 2)) x H); intro H3; generalize (Rge_le (- (3 * (PI / 2))) (- x) H3); clear H3; intro H3; generalize (Rplus_le_compat_l (2 * PI) (- x) (- (3 * (PI / 2))) H3). replace (2 * PI + - (3 * (PI / 2))) with (PI / 2). intro H4; apply (cos_ge_0 (2 * PI - x) (Rlt_le (- (PI / 2)) (2 * PI - x) (Rlt_le_trans (- (PI / 2)) 0 (2 * PI - x) _PI2_RLT_0 H2)) H4). rewrite double; pattern PI at 2 3 in |- *; rewrite double_var; ring. ring. Qed. Lemma form1 : forall p q:R, cos p + cos q = 2 * cos ((p - q) / 2) * cos ((p + q) / 2). Proof. intros p q; pattern p at 1 in |- *; replace p with ((p - q) / 2 + (p + q) / 2). rewrite <- (cos_neg q); replace (- q) with ((p - q) / 2 - (p + q) / 2). rewrite cos_plus; rewrite cos_minus; ring. pattern q at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. pattern p at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. Qed. Lemma form2 : forall p q:R, cos p - cos q = -2 * sin ((p - q) / 2) * sin ((p + q) / 2). Proof. intros p q; pattern p at 1 in |- *; replace p with ((p - q) / 2 + (p + q) / 2). rewrite <- (cos_neg q); replace (- q) with ((p - q) / 2 - (p + q) / 2). rewrite cos_plus; rewrite cos_minus; ring. pattern q at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. pattern p at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. Qed. Lemma form3 : forall p q:R, sin p + sin q = 2 * cos ((p - q) / 2) * sin ((p + q) / 2). Proof. intros p q; pattern p at 1 in |- *; replace p with ((p - q) / 2 + (p + q) / 2). pattern q at 3 in |- *; replace q with ((p + q) / 2 - (p - q) / 2). rewrite sin_plus; rewrite sin_minus; ring. pattern q at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. pattern p at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. Qed. Lemma form4 : forall p q:R, sin p - sin q = 2 * cos ((p + q) / 2) * sin ((p - q) / 2). Proof. intros p q; pattern p at 1 in |- *; replace p with ((p - q) / 2 + (p + q) / 2). pattern q at 3 in |- *; replace q with ((p + q) / 2 - (p - q) / 2). rewrite sin_plus; rewrite sin_minus; ring. pattern q at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. pattern p at 3 in |- *; rewrite double_var; unfold Rdiv in |- *; ring. Qed. Lemma sin_increasing_0 : forall x y:R, - (PI / 2) <= x -> x <= PI / 2 -> - (PI / 2) <= y -> y <= PI / 2 -> sin x < sin y -> x < y. Proof. intros; cut (sin ((x - y) / 2) < 0). intro H4; case (Rtotal_order ((x - y) / 2) 0); intro H5. assert (Hyp : 0 < 2). prove_sup0. generalize (Rmult_lt_compat_l 2 ((x - y) / 2) 0 Hyp H5). unfold Rdiv in |- *. rewrite <- Rmult_assoc. rewrite Rinv_r_simpl_m. rewrite Rmult_0_r. clear H5; intro H5; apply Rminus_lt; assumption. discrR. elim H5; intro H6. rewrite H6 in H4; rewrite sin_0 in H4; elim (Rlt_irrefl 0 H4). change (0 < (x - y) / 2) in H6; generalize (Ropp_le_ge_contravar (- (PI / 2)) y H1). rewrite Ropp_involutive. intro H7; generalize (Rge_le (PI / 2) (- y) H7); clear H7; intro H7; generalize (Rplus_le_compat x (PI / 2) (- y) (PI / 2) H0 H7). rewrite <- double_var. intro H8. assert (Hyp : 0 < 2). prove_sup0. generalize (Rmult_le_compat_l (/ 2) (x - y) PI (Rlt_le 0 (/ 2) (Rinv_0_lt_compat 2 Hyp)) H8). repeat rewrite (Rmult_comm (/ 2)). intro H9; generalize (sin_gt_0 ((x - y) / 2) H6 (Rle_lt_trans ((x - y) / 2) (PI / 2) PI H9 PI2_Rlt_PI)); intro H10; elim (Rlt_irrefl (sin ((x - y) / 2)) (Rlt_trans (sin ((x - y) / 2)) 0 (sin ((x - y) / 2)) H4 H10)). generalize (Rlt_minus (sin x) (sin y) H3); clear H3; intro H3; rewrite form4 in H3; generalize (Rplus_le_compat x (PI / 2) y (PI / 2) H0 H2). rewrite <- double_var. assert (Hyp : 0 < 2). prove_sup0. intro H4; generalize (Rmult_le_compat_l (/ 2) (x + y) PI (Rlt_le 0 (/ 2) (Rinv_0_lt_compat 2 Hyp)) H4). repeat rewrite (Rmult_comm (/ 2)). clear H4; intro H4; generalize (Rplus_le_compat (- (PI / 2)) x (- (PI / 2)) y H H1); replace (- (PI / 2) + - (PI / 2)) with (- PI). intro H5; generalize (Rmult_le_compat_l (/ 2) (- PI) (x + y) (Rlt_le 0 (/ 2) (Rinv_0_lt_compat 2 Hyp)) H5). replace (/ 2 * (x + y)) with ((x + y) / 2). replace (/ 2 * - PI) with (- (PI / 2)). clear H5; intro H5; elim H4; intro H40. elim H5; intro H50. generalize (cos_gt_0 ((x + y) / 2) H50 H40); intro H6; generalize (Rmult_lt_compat_l 2 0 (cos ((x + y) / 2)) Hyp H6). rewrite Rmult_0_r. clear H6; intro H6; case (Rcase_abs (sin ((x - y) / 2))); intro H7. assumption. generalize (Rge_le (sin ((x - y) / 2)) 0 H7); clear H7; intro H7; generalize (Rmult_le_pos (2 * cos ((x + y) / 2)) (sin ((x - y) / 2)) (Rlt_le 0 (2 * cos ((x + y) / 2)) H6) H7); intro H8; generalize (Rle_lt_trans 0 (2 * cos ((x + y) / 2) * sin ((x - y) / 2)) 0 H8 H3); intro H9; elim (Rlt_irrefl 0 H9). rewrite <- H50 in H3; rewrite cos_neg in H3; rewrite cos_PI2 in H3; rewrite Rmult_0_r in H3; rewrite Rmult_0_l in H3; elim (Rlt_irrefl 0 H3). unfold Rdiv in H3. rewrite H40 in H3; assert (H50 := cos_PI2); unfold Rdiv in H50; rewrite H50 in H3; rewrite Rmult_0_r in H3; rewrite Rmult_0_l in H3; elim (Rlt_irrefl 0 H3). unfold Rdiv in |- *. rewrite <- Ropp_mult_distr_l_reverse. apply Rmult_comm. unfold Rdiv in |- *; apply Rmult_comm. pattern PI at 1 in |- *; rewrite double_var. rewrite Ropp_plus_distr. reflexivity. Qed. Lemma sin_increasing_1 : forall x y:R, - (PI / 2) <= x -> x <= PI / 2 -> - (PI / 2) <= y -> y <= PI / 2 -> x < y -> sin x < sin y. Proof. intros; generalize (Rplus_lt_compat_l x x y H3); intro H4; generalize (Rplus_le_compat (- (PI / 2)) x (- (PI / 2)) x H H); replace (- (PI / 2) + - (PI / 2)) with (- PI). assert (Hyp : 0 < 2). prove_sup0. intro H5; generalize (Rle_lt_trans (- PI) (x + x) (x + y) H5 H4); intro H6; generalize (Rmult_lt_compat_l (/ 2) (- PI) (x + y) (Rinv_0_lt_compat 2 Hyp) H6); replace (/ 2 * - PI) with (- (PI / 2)). replace (/ 2 * (x + y)) with ((x + y) / 2). clear H4 H5 H6; intro H4; generalize (Rplus_lt_compat_l y x y H3); intro H5; rewrite Rplus_comm in H5; generalize (Rplus_le_compat y (PI / 2) y (PI / 2) H2 H2). rewrite <- double_var. intro H6; generalize (Rlt_le_trans (x + y) (y + y) PI H5 H6); intro H7; generalize (Rmult_lt_compat_l (/ 2) (x + y) PI (Rinv_0_lt_compat 2 Hyp) H7); replace (/ 2 * PI) with (PI / 2). replace (/ 2 * (x + y)) with ((x + y) / 2). clear H5 H6 H7; intro H5; generalize (Ropp_le_ge_contravar (- (PI / 2)) y H1); rewrite Ropp_involutive; clear H1; intro H1; generalize (Rge_le (PI / 2) (- y) H1); clear H1; intro H1; generalize (Ropp_le_ge_contravar y (PI / 2) H2); clear H2; intro H2; generalize (Rge_le (- y) (- (PI / 2)) H2); clear H2; intro H2; generalize (Rplus_lt_compat_l (- y) x y H3); replace (- y + x) with (x - y). rewrite Rplus_opp_l. intro H6; generalize (Rmult_lt_compat_l (/ 2) (x - y) 0 (Rinv_0_lt_compat 2 Hyp) H6); rewrite Rmult_0_r; replace (/ 2 * (x - y)) with ((x - y) / 2). clear H6; intro H6; generalize (Rplus_le_compat (- (PI / 2)) x (- (PI / 2)) (- y) H H2); replace (- (PI / 2) + - (PI / 2)) with (- PI). replace (x + - y) with (x - y). intro H7; generalize (Rmult_le_compat_l (/ 2) (- PI) (x - y) (Rlt_le 0 (/ 2) (Rinv_0_lt_compat 2 Hyp)) H7); replace (/ 2 * - PI) with (- (PI / 2)). replace (/ 2 * (x - y)) with ((x - y) / 2). clear H7; intro H7; clear H H0 H1 H2; apply Rminus_lt; rewrite form4; generalize (cos_gt_0 ((x + y) / 2) H4 H5); intro H8; generalize (Rmult_lt_0_compat 2 (cos ((x + y) / 2)) Hyp H8); clear H8; intro H8; cut (- PI < - (PI / 2)). intro H9; generalize (sin_lt_0_var ((x - y) / 2) (Rlt_le_trans (- PI) (- (PI / 2)) ((x - y) / 2) H9 H7) H6); intro H10; generalize (Rmult_lt_gt_compat_neg_l (sin ((x - y) / 2)) 0 ( 2 * cos ((x + y) / 2)) H10 H8); intro H11; rewrite Rmult_0_r in H11; rewrite Rmult_comm; assumption. apply Ropp_lt_gt_contravar; apply PI2_Rlt_PI. unfold Rdiv in |- *; apply Rmult_comm. unfold Rdiv in |- *; rewrite <- Ropp_mult_distr_l_reverse; apply Rmult_comm. reflexivity. pattern PI at 1 in |- *; rewrite double_var. rewrite Ropp_plus_distr. reflexivity. unfold Rdiv in |- *; apply Rmult_comm. unfold Rminus in |- *; apply Rplus_comm. unfold Rdiv in |- *; apply Rmult_comm. unfold Rdiv in |- *; apply Rmult_comm. unfold Rdiv in |- *; apply Rmult_comm. unfold Rdiv in |- *. rewrite <- Ropp_mult_distr_l_reverse. apply Rmult_comm. pattern PI at 1 in |- *; rewrite double_var. rewrite Ropp_plus_distr. reflexivity. Qed. Lemma sin_decreasing_0 : forall x y:R, x <= 3 * (PI / 2) -> PI / 2 <= x -> y <= 3 * (PI / 2) -> PI / 2 <= y -> sin x < sin y -> y < x. Proof. intros; rewrite <- (sin_PI_x x) in H3; rewrite <- (sin_PI_x y) in H3; generalize (Ropp_lt_gt_contravar (sin (PI - x)) (sin (PI - y)) H3); repeat rewrite <- sin_neg; generalize (Rplus_le_compat_l (- PI) x (3 * (PI / 2)) H); generalize (Rplus_le_compat_l (- PI) (PI / 2) x H0); generalize (Rplus_le_compat_l (- PI) y (3 * (PI / 2)) H1); generalize (Rplus_le_compat_l (- PI) (PI / 2) y H2); replace (- PI + x) with (x - PI). replace (- PI + PI / 2) with (- (PI / 2)). replace (- PI + y) with (y - PI). replace (- PI + 3 * (PI / 2)) with (PI / 2). replace (- (PI - x)) with (x - PI). replace (- (PI - y)) with (y - PI). intros; change (sin (y - PI) < sin (x - PI)) in H8; apply Rplus_lt_reg_r with (- PI); rewrite Rplus_comm; replace (y + - PI) with (y - PI). rewrite Rplus_comm; replace (x + - PI) with (x - PI). apply (sin_increasing_0 (y - PI) (x - PI) H4 H5 H6 H7 H8). reflexivity. reflexivity. unfold Rminus in |- *; rewrite Ropp_plus_distr. rewrite Ropp_involutive. apply Rplus_comm. unfold Rminus in |- *; rewrite Ropp_plus_distr. rewrite Ropp_involutive. apply Rplus_comm. pattern PI at 2 in |- *; rewrite double_var. rewrite Ropp_plus_distr. ring. unfold Rminus in |- *; apply Rplus_comm. pattern PI at 2 in |- *; rewrite double_var. rewrite Ropp_plus_distr. ring. unfold Rminus in |- *; apply Rplus_comm. Qed. Lemma sin_decreasing_1 : forall x y:R, x <= 3 * (PI / 2) -> PI / 2 <= x -> y <= 3 * (PI / 2) -> PI / 2 <= y -> x < y -> sin y < sin x. Proof. intros; rewrite <- (sin_PI_x x); rewrite <- (sin_PI_x y); generalize (Rplus_le_compat_l (- PI) x (3 * (PI / 2)) H); generalize (Rplus_le_compat_l (- PI) (PI / 2) x H0); generalize (Rplus_le_compat_l (- PI) y (3 * (PI / 2)) H1); generalize (Rplus_le_compat_l (- PI) (PI / 2) y H2); generalize (Rplus_lt_compat_l (- PI) x y H3); replace (- PI + PI / 2) with (- (PI / 2)). replace (- PI + y) with (y - PI). replace (- PI + 3 * (PI / 2)) with (PI / 2). replace (- PI + x) with (x - PI). intros; apply Ropp_lt_cancel; repeat rewrite <- sin_neg; replace (- (PI - x)) with (x - PI). replace (- (PI - y)) with (y - PI). apply (sin_increasing_1 (x - PI) (y - PI) H7 H8 H5 H6 H4). unfold Rminus in |- *; rewrite Ropp_plus_distr. rewrite Ropp_involutive. apply Rplus_comm. unfold Rminus in |- *; rewrite Ropp_plus_distr. rewrite Ropp_involutive. apply Rplus_comm. unfold Rminus in |- *; apply Rplus_comm. pattern PI at 2 in |- *; rewrite double_var; ring. unfold Rminus in |- *; apply Rplus_comm. pattern PI at 2 in |- *; rewrite double_var; ring. Qed. Lemma cos_increasing_0 : forall x y:R, PI <= x -> x <= 2 * PI -> PI <= y -> y <= 2 * PI -> cos x < cos y -> x < y. Proof. intros x y H1 H2 H3 H4; rewrite <- (cos_neg x); rewrite <- (cos_neg y); rewrite <- (cos_period (- x) 1); rewrite <- (cos_period (- y) 1); unfold INR in |- *; replace (- x + 2 * 1 * PI) with (PI / 2 - (x - 3 * (PI / 2))). replace (- y + 2 * 1 * PI) with (PI / 2 - (y - 3 * (PI / 2))). repeat rewrite cos_shift; intro H5; generalize (Rplus_le_compat_l (-3 * (PI / 2)) PI x H1); generalize (Rplus_le_compat_l (-3 * (PI / 2)) x (2 * PI) H2); generalize (Rplus_le_compat_l (-3 * (PI / 2)) PI y H3); generalize (Rplus_le_compat_l (-3 * (PI / 2)) y (2 * PI) H4). replace (-3 * (PI / 2) + y) with (y - 3 * (PI / 2)). replace (-3 * (PI / 2) + x) with (x - 3 * (PI / 2)). replace (-3 * (PI / 2) + 2 * PI) with (PI / 2). replace (-3 * (PI / 2) + PI) with (- (PI / 2)). clear H1 H2 H3 H4; intros H1 H2 H3 H4; apply Rplus_lt_reg_r with (-3 * (PI / 2)); replace (-3 * (PI / 2) + x) with (x - 3 * (PI / 2)). replace (-3 * (PI / 2) + y) with (y - 3 * (PI / 2)). apply (sin_increasing_0 (x - 3 * (PI / 2)) (y - 3 * (PI / 2)) H4 H3 H2 H1 H5). unfold Rminus in |- *. rewrite Ropp_mult_distr_l_reverse. apply Rplus_comm. unfold Rminus in |- *. rewrite Ropp_mult_distr_l_reverse. apply Rplus_comm. pattern PI at 3 in |- *; rewrite double_var. ring. rewrite double; pattern PI at 3 4 in |- *; rewrite double_var. ring. unfold Rminus in |- *. rewrite Ropp_mult_distr_l_reverse. apply Rplus_comm. unfold Rminus in |- *. rewrite Ropp_mult_distr_l_reverse. apply Rplus_comm. rewrite Rmult_1_r. rewrite (double PI); pattern PI at 3 4 in |- *; rewrite double_var. ring. rewrite Rmult_1_r. rewrite (double PI); pattern PI at 3 4 in |- *; rewrite double_var. ring. Qed. Lemma cos_increasing_1 : forall x y:R, PI <= x -> x <= 2 * PI -> PI <= y -> y <= 2 * PI -> x < y -> cos x < cos y. Proof. intros x y H1 H2 H3 H4 H5; generalize (Rplus_le_compat_l (-3 * (PI / 2)) PI x H1); generalize (Rplus_le_compat_l (-3 * (PI / 2)) x (2 * PI) H2); generalize (Rplus_le_compat_l (-3 * (PI / 2)) PI y H3); generalize (Rplus_le_compat_l (-3 * (PI / 2)) y (2 * PI) H4); generalize (Rplus_lt_compat_l (-3 * (PI / 2)) x y H5); rewrite <- (cos_neg x); rewrite <- (cos_neg y); rewrite <- (cos_period (- x) 1); rewrite <- (cos_period (- y) 1); unfold INR in |- *; replace (-3 * (PI / 2) + x) with (x - 3 * (PI / 2)). replace (-3 * (PI / 2) + y) with (y - 3 * (PI / 2)). replace (-3 * (PI / 2) + PI) with (- (PI / 2)). replace (-3 * (PI / 2) + 2 * PI) with (PI / 2). clear H1 H2 H3 H4 H5; intros H1 H2 H3 H4 H5; replace (- x + 2 * 1 * PI) with (PI / 2 - (x - 3 * (PI / 2))). replace (- y + 2 * 1 * PI) with (PI / 2 - (y - 3 * (PI / 2))). repeat rewrite cos_shift; apply (sin_increasing_1 (x - 3 * (PI / 2)) (y - 3 * (PI / 2)) H5 H4 H3 H2 H1). rewrite Rmult_1_r. rewrite (double PI); pattern PI at 3 4 in |- *; rewrite double_var. ring. rewrite Rmult_1_r. rewrite (double PI); pattern PI at 3 4 in |- *; rewrite double_var. ring. rewrite (double PI); pattern PI at 3 4 in |- *; rewrite double_var. ring. pattern PI at 3 in |- *; rewrite double_var; ring. unfold Rminus in |- *. rewrite <- Ropp_mult_distr_l_reverse. apply Rplus_comm. unfold Rminus in |- *. rewrite <- Ropp_mult_distr_l_reverse. apply Rplus_comm. Qed. Lemma cos_decreasing_0 : forall x y:R, 0 <= x -> x <= PI -> 0 <= y -> y <= PI -> cos x < cos y -> y < x. Proof. intros; generalize (Ropp_lt_gt_contravar (cos x) (cos y) H3); repeat rewrite <- neg_cos; intro H4; change (cos (y + PI) < cos (x + PI)) in H4; rewrite (Rplus_comm x) in H4; rewrite (Rplus_comm y) in H4; generalize (Rplus_le_compat_l PI 0 x H); generalize (Rplus_le_compat_l PI x PI H0); generalize (Rplus_le_compat_l PI 0 y H1); generalize (Rplus_le_compat_l PI y PI H2); rewrite Rplus_0_r. rewrite <- double. clear H H0 H1 H2 H3; intros; apply Rplus_lt_reg_r with PI; apply (cos_increasing_0 (PI + y) (PI + x) H0 H H2 H1 H4). Qed. Lemma cos_decreasing_1 : forall x y:R, 0 <= x -> x <= PI -> 0 <= y -> y <= PI -> x < y -> cos y < cos x. Proof. intros; apply Ropp_lt_cancel; repeat rewrite <- neg_cos; rewrite (Rplus_comm x); rewrite (Rplus_comm y); generalize (Rplus_le_compat_l PI 0 x H); generalize (Rplus_le_compat_l PI x PI H0); generalize (Rplus_le_compat_l PI 0 y H1); generalize (Rplus_le_compat_l PI y PI H2); rewrite Rplus_0_r. rewrite <- double. generalize (Rplus_lt_compat_l PI x y H3); clear H H0 H1 H2 H3; intros; apply (cos_increasing_1 (PI + x) (PI + y) H3 H2 H1 H0 H). Qed. Lemma tan_diff : forall x y:R, cos x <> 0 -> cos y <> 0 -> tan x - tan y = sin (x - y) / (cos x * cos y). Proof. intros; unfold tan in |- *; rewrite sin_minus. unfold Rdiv in |- *. unfold Rminus in |- *. rewrite Rmult_plus_distr_r. rewrite Rinv_mult_distr. repeat rewrite (Rmult_comm (sin x)). repeat rewrite Rmult_assoc. rewrite (Rmult_comm (cos y)). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm (sin x)). apply Rplus_eq_compat_l. rewrite <- Ropp_mult_distr_l_reverse. rewrite <- Ropp_mult_distr_r_reverse. rewrite (Rmult_comm (/ cos x)). repeat rewrite Rmult_assoc. rewrite (Rmult_comm (cos x)). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. reflexivity. assumption. assumption. assumption. assumption. Qed. Lemma tan_increasing_0 : forall x y:R, - (PI / 4) <= x -> x <= PI / 4 -> - (PI / 4) <= y -> y <= PI / 4 -> tan x < tan y -> x < y. Proof. intros; generalize PI4_RLT_PI2; intro H4; generalize (Ropp_lt_gt_contravar (PI / 4) (PI / 2) H4); intro H5; change (- (PI / 2) < - (PI / 4)) in H5; generalize (cos_gt_0 x (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) x H5 H) (Rle_lt_trans x (PI / 4) (PI / 2) H0 H4)); intro HP1; generalize (cos_gt_0 y (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) y H5 H1) (Rle_lt_trans y (PI / 4) (PI / 2) H2 H4)); intro HP2; generalize (not_eq_sym (Rlt_not_eq 0 (cos x) (cos_gt_0 x (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) x H5 H) (Rle_lt_trans x (PI / 4) (PI / 2) H0 H4)))); intro H6; generalize (not_eq_sym (Rlt_not_eq 0 (cos y) (cos_gt_0 y (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) y H5 H1) (Rle_lt_trans y (PI / 4) (PI / 2) H2 H4)))); intro H7; generalize (tan_diff x y H6 H7); intro H8; generalize (Rlt_minus (tan x) (tan y) H3); clear H3; intro H3; rewrite H8 in H3; cut (sin (x - y) < 0). intro H9; generalize (Ropp_le_ge_contravar (- (PI / 4)) y H1); rewrite Ropp_involutive; intro H10; generalize (Rge_le (PI / 4) (- y) H10); clear H10; intro H10; generalize (Ropp_le_ge_contravar y (PI / 4) H2); intro H11; generalize (Rge_le (- y) (- (PI / 4)) H11); clear H11; intro H11; generalize (Rplus_le_compat (- (PI / 4)) x (- (PI / 4)) (- y) H H11); generalize (Rplus_le_compat x (PI / 4) (- y) (PI / 4) H0 H10); replace (x + - y) with (x - y). replace (PI / 4 + PI / 4) with (PI / 2). replace (- (PI / 4) + - (PI / 4)) with (- (PI / 2)). intros; case (Rtotal_order 0 (x - y)); intro H14. generalize (sin_gt_0 (x - y) H14 (Rle_lt_trans (x - y) (PI / 2) PI H12 PI2_Rlt_PI)); intro H15; elim (Rlt_irrefl 0 (Rlt_trans 0 (sin (x - y)) 0 H15 H9)). elim H14; intro H15. rewrite <- H15 in H9; rewrite sin_0 in H9; elim (Rlt_irrefl 0 H9). apply Rminus_lt; assumption. pattern PI at 1 in |- *; rewrite double_var. unfold Rdiv in |- *. rewrite Rmult_plus_distr_r. repeat rewrite Rmult_assoc. rewrite <- Rinv_mult_distr. rewrite Ropp_plus_distr. replace 4 with 4. reflexivity. ring. discrR. discrR. pattern PI at 1 in |- *; rewrite double_var. unfold Rdiv in |- *. rewrite Rmult_plus_distr_r. repeat rewrite Rmult_assoc. rewrite <- Rinv_mult_distr. replace 4 with 4. reflexivity. ring. discrR. discrR. reflexivity. case (Rcase_abs (sin (x - y))); intro H9. assumption. generalize (Rge_le (sin (x - y)) 0 H9); clear H9; intro H9; generalize (Rinv_0_lt_compat (cos x) HP1); intro H10; generalize (Rinv_0_lt_compat (cos y) HP2); intro H11; generalize (Rmult_lt_0_compat (/ cos x) (/ cos y) H10 H11); replace (/ cos x * / cos y) with (/ (cos x * cos y)). intro H12; generalize (Rmult_le_pos (sin (x - y)) (/ (cos x * cos y)) H9 (Rlt_le 0 (/ (cos x * cos y)) H12)); intro H13; elim (Rlt_irrefl 0 (Rle_lt_trans 0 (sin (x - y) * / (cos x * cos y)) 0 H13 H3)). rewrite Rinv_mult_distr. reflexivity. assumption. assumption. Qed. Lemma tan_increasing_1 : forall x y:R, - (PI / 4) <= x -> x <= PI / 4 -> - (PI / 4) <= y -> y <= PI / 4 -> x < y -> tan x < tan y. Proof. intros; apply Rminus_lt; generalize PI4_RLT_PI2; intro H4; generalize (Ropp_lt_gt_contravar (PI / 4) (PI / 2) H4); intro H5; change (- (PI / 2) < - (PI / 4)) in H5; generalize (cos_gt_0 x (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) x H5 H) (Rle_lt_trans x (PI / 4) (PI / 2) H0 H4)); intro HP1; generalize (cos_gt_0 y (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) y H5 H1) (Rle_lt_trans y (PI / 4) (PI / 2) H2 H4)); intro HP2; generalize (not_eq_sym (Rlt_not_eq 0 (cos x) (cos_gt_0 x (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) x H5 H) (Rle_lt_trans x (PI / 4) (PI / 2) H0 H4)))); intro H6; generalize (not_eq_sym (Rlt_not_eq 0 (cos y) (cos_gt_0 y (Rlt_le_trans (- (PI / 2)) (- (PI / 4)) y H5 H1) (Rle_lt_trans y (PI / 4) (PI / 2) H2 H4)))); intro H7; rewrite (tan_diff x y H6 H7); generalize (Rinv_0_lt_compat (cos x) HP1); intro H10; generalize (Rinv_0_lt_compat (cos y) HP2); intro H11; generalize (Rmult_lt_0_compat (/ cos x) (/ cos y) H10 H11); replace (/ cos x * / cos y) with (/ (cos x * cos y)). clear H10 H11; intro H8; generalize (Ropp_le_ge_contravar y (PI / 4) H2); intro H11; generalize (Rge_le (- y) (- (PI / 4)) H11); clear H11; intro H11; generalize (Rplus_le_compat (- (PI / 4)) x (- (PI / 4)) (- y) H H11); replace (x + - y) with (x - y). replace (- (PI / 4) + - (PI / 4)) with (- (PI / 2)). clear H11; intro H9; generalize (Rlt_minus x y H3); clear H3; intro H3; clear H H0 H1 H2 H4 H5 HP1 HP2; generalize PI2_Rlt_PI; intro H1; generalize (Ropp_lt_gt_contravar (PI / 2) PI H1); clear H1; intro H1; generalize (sin_lt_0_var (x - y) (Rlt_le_trans (- PI) (- (PI / 2)) (x - y) H1 H9) H3); intro H2; generalize (Rmult_lt_gt_compat_neg_l (sin (x - y)) 0 (/ (cos x * cos y)) H2 H8); rewrite Rmult_0_r; intro H4; assumption. pattern PI at 1 in |- *; rewrite double_var. unfold Rdiv in |- *. rewrite Rmult_plus_distr_r. repeat rewrite Rmult_assoc. rewrite <- Rinv_mult_distr. replace 4 with 4. rewrite Ropp_plus_distr. reflexivity. ring. discrR. discrR. reflexivity. apply Rinv_mult_distr; assumption. Qed. Lemma sin_incr_0 : forall x y:R, - (PI / 2) <= x -> x <= PI / 2 -> - (PI / 2) <= y -> y <= PI / 2 -> sin x <= sin y -> x <= y. Proof. intros; case (Rtotal_order (sin x) (sin y)); intro H4; [ left; apply (sin_increasing_0 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order x y); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (sin_increasing_1 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl (sin y) H8) ] ] | elim (Rlt_irrefl (sin x) (Rle_lt_trans (sin x) (sin y) (sin x) H3 H5)) ] ]. Qed. Lemma sin_incr_1 : forall x y:R, - (PI / 2) <= x -> x <= PI / 2 -> - (PI / 2) <= y -> y <= PI / 2 -> x <= y -> sin x <= sin y. Proof. intros; case (Rtotal_order x y); intro H4; [ left; apply (sin_increasing_1 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order (sin x) (sin y)); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (sin_increasing_0 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl y H8) ] ] | elim (Rlt_irrefl x (Rle_lt_trans x y x H3 H5)) ] ]. Qed. Lemma sin_decr_0 : forall x y:R, x <= 3 * (PI / 2) -> PI / 2 <= x -> y <= 3 * (PI / 2) -> PI / 2 <= y -> sin x <= sin y -> y <= x. Proof. intros; case (Rtotal_order (sin x) (sin y)); intro H4; [ left; apply (sin_decreasing_0 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order x y); intro H6; [ generalize (sin_decreasing_1 x y H H0 H1 H2 H6); intro H8; rewrite H5 in H8; elim (Rlt_irrefl (sin y) H8) | elim H6; intro H7; [ right; symmetry in |- *; assumption | left; assumption ] ] | elim (Rlt_irrefl (sin x) (Rle_lt_trans (sin x) (sin y) (sin x) H3 H5)) ] ]. Qed. Lemma sin_decr_1 : forall x y:R, x <= 3 * (PI / 2) -> PI / 2 <= x -> y <= 3 * (PI / 2) -> PI / 2 <= y -> x <= y -> sin y <= sin x. Proof. intros; case (Rtotal_order x y); intro H4; [ left; apply (sin_decreasing_1 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order (sin x) (sin y)); intro H6; [ generalize (sin_decreasing_0 x y H H0 H1 H2 H6); intro H8; rewrite H5 in H8; elim (Rlt_irrefl y H8) | elim H6; intro H7; [ right; symmetry in |- *; assumption | left; assumption ] ] | elim (Rlt_irrefl x (Rle_lt_trans x y x H3 H5)) ] ]. Qed. Lemma cos_incr_0 : forall x y:R, PI <= x -> x <= 2 * PI -> PI <= y -> y <= 2 * PI -> cos x <= cos y -> x <= y. Proof. intros; case (Rtotal_order (cos x) (cos y)); intro H4; [ left; apply (cos_increasing_0 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order x y); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (cos_increasing_1 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl (cos y) H8) ] ] | elim (Rlt_irrefl (cos x) (Rle_lt_trans (cos x) (cos y) (cos x) H3 H5)) ] ]. Qed. Lemma cos_incr_1 : forall x y:R, PI <= x -> x <= 2 * PI -> PI <= y -> y <= 2 * PI -> x <= y -> cos x <= cos y. Proof. intros; case (Rtotal_order x y); intro H4; [ left; apply (cos_increasing_1 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order (cos x) (cos y)); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (cos_increasing_0 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl y H8) ] ] | elim (Rlt_irrefl x (Rle_lt_trans x y x H3 H5)) ] ]. Qed. Lemma cos_decr_0 : forall x y:R, 0 <= x -> x <= PI -> 0 <= y -> y <= PI -> cos x <= cos y -> y <= x. Proof. intros; case (Rtotal_order (cos x) (cos y)); intro H4; [ left; apply (cos_decreasing_0 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order x y); intro H6; [ generalize (cos_decreasing_1 x y H H0 H1 H2 H6); intro H8; rewrite H5 in H8; elim (Rlt_irrefl (cos y) H8) | elim H6; intro H7; [ right; symmetry in |- *; assumption | left; assumption ] ] | elim (Rlt_irrefl (cos x) (Rle_lt_trans (cos x) (cos y) (cos x) H3 H5)) ] ]. Qed. Lemma cos_decr_1 : forall x y:R, 0 <= x -> x <= PI -> 0 <= y -> y <= PI -> x <= y -> cos y <= cos x. Proof. intros; case (Rtotal_order x y); intro H4; [ left; apply (cos_decreasing_1 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order (cos x) (cos y)); intro H6; [ generalize (cos_decreasing_0 x y H H0 H1 H2 H6); intro H8; rewrite H5 in H8; elim (Rlt_irrefl y H8) | elim H6; intro H7; [ right; symmetry in |- *; assumption | left; assumption ] ] | elim (Rlt_irrefl x (Rle_lt_trans x y x H3 H5)) ] ]. Qed. Lemma tan_incr_0 : forall x y:R, - (PI / 4) <= x -> x <= PI / 4 -> - (PI / 4) <= y -> y <= PI / 4 -> tan x <= tan y -> x <= y. Proof. intros; case (Rtotal_order (tan x) (tan y)); intro H4; [ left; apply (tan_increasing_0 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order x y); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (tan_increasing_1 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl (tan y) H8) ] ] | elim (Rlt_irrefl (tan x) (Rle_lt_trans (tan x) (tan y) (tan x) H3 H5)) ] ]. Qed. Lemma tan_incr_1 : forall x y:R, - (PI / 4) <= x -> x <= PI / 4 -> - (PI / 4) <= y -> y <= PI / 4 -> x <= y -> tan x <= tan y. Proof. intros; case (Rtotal_order x y); intro H4; [ left; apply (tan_increasing_1 x y H H0 H1 H2 H4) | elim H4; intro H5; [ case (Rtotal_order (tan x) (tan y)); intro H6; [ left; assumption | elim H6; intro H7; [ right; assumption | generalize (tan_increasing_0 y x H1 H2 H H0 H7); intro H8; rewrite H5 in H8; elim (Rlt_irrefl y H8) ] ] | elim (Rlt_irrefl x (Rle_lt_trans x y x H3 H5)) ] ]. Qed. (**********) Lemma sin_eq_0_1 : forall x:R, (exists k : Z, x = IZR k * PI) -> sin x = 0. Proof. intros. elim H; intros. apply (Zcase_sign x0). intro. rewrite H1 in H0. simpl in H0. rewrite H0; rewrite Rmult_0_l; apply sin_0. intro. cut (0 <= x0)%Z. intro. elim (IZN x0 H2); intros. rewrite H3 in H0. rewrite <- INR_IZR_INZ in H0. rewrite H0. elim (even_odd_cor x1); intros. elim H4; intro. rewrite H5. rewrite mult_INR. simpl in |- *. rewrite <- (Rplus_0_l (2 * INR x2 * PI)). rewrite sin_period. apply sin_0. rewrite H5. rewrite S_INR; rewrite mult_INR. simpl in |- *. rewrite Rmult_plus_distr_r. rewrite Rmult_1_l; rewrite sin_plus. rewrite sin_PI. rewrite Rmult_0_r. rewrite <- (Rplus_0_l (2 * INR x2 * PI)). rewrite sin_period. rewrite sin_0; ring. apply le_IZR. left; apply IZR_lt. assert (H2 := Z.gt_lt_iff). elim (H2 x0 0%Z); intros. apply H3; assumption. intro. rewrite H0. replace (sin (IZR x0 * PI)) with (- sin (- IZR x0 * PI)). cut (0 <= - x0)%Z. intro. rewrite <- Ropp_Ropp_IZR. elim (IZN (- x0) H2); intros. rewrite H3. rewrite <- INR_IZR_INZ. elim (even_odd_cor x1); intros. elim H4; intro. rewrite H5. rewrite mult_INR. simpl in |- *. rewrite <- (Rplus_0_l (2 * INR x2 * PI)). rewrite sin_period. rewrite sin_0; ring. rewrite H5. rewrite S_INR; rewrite mult_INR. simpl in |- *. rewrite Rmult_plus_distr_r. rewrite Rmult_1_l; rewrite sin_plus. rewrite sin_PI. rewrite Rmult_0_r. rewrite <- (Rplus_0_l (2 * INR x2 * PI)). rewrite sin_period. rewrite sin_0; ring. apply le_IZR. apply Rplus_le_reg_l with (IZR x0). rewrite Rplus_0_r. rewrite Ropp_Ropp_IZR. rewrite Rplus_opp_r. left; replace 0 with (IZR 0); [ apply IZR_lt | reflexivity ]. assumption. rewrite <- sin_neg. rewrite Ropp_mult_distr_l_reverse. rewrite Ropp_involutive. reflexivity. Qed. Lemma sin_eq_0_0 (x:R) : sin x = 0 -> exists k : Z, x = IZR k * PI. Proof. intros Hx. destruct (euclidian_division x PI PI_neq0) as (q & r & EQ & Hr & Hr'). exists q. rewrite <- (Rplus_0_r (_*_)). subst. apply Rplus_eq_compat_l. rewrite sin_plus in Hx. assert (H : sin (IZR q * PI) = 0) by (apply sin_eq_0_1; now exists q). rewrite H, Rmult_0_l, Rplus_0_l in Hx. destruct (Rmult_integral _ _ Hx) as [H'|H']. - exfalso. generalize (sin2_cos2 (IZR q * PI)). rewrite H, H', Rsqr_0, Rplus_0_l. intros; now apply R1_neq_R0. - rewrite Rabs_right in Hr'; [|left; apply PI_RGT_0]. destruct Hr as [Hr | ->]; trivial. exfalso. generalize (sin_gt_0 r Hr Hr'). rewrite H'. apply Rlt_irrefl. Qed. Lemma cos_eq_0_0 (x:R) : cos x = 0 -> exists k : Z, x = IZR k * PI + PI / 2. Proof. rewrite cos_sin. intros Hx. destruct (sin_eq_0_0 (PI/2 + x) Hx) as (k,Hk). clear Hx. exists (k-1)%Z. rewrite <- Z_R_minus; simpl. symmetry in Hk. field_simplify [Hk]. field. Qed. Lemma cos_eq_0_1 (x:R) : (exists k : Z, x = IZR k * PI + PI / 2) -> cos x = 0. Proof. rewrite cos_sin. intros (k,->). replace (_ + _) with (IZR k * PI + PI) by field. rewrite neg_sin, <- Ropp_0. apply Ropp_eq_compat. apply sin_eq_0_1. now exists k. Qed. Lemma sin_eq_O_2PI_0 (x:R) : 0 <= x -> x <= 2 * PI -> sin x = 0 -> x = 0 \/ x = PI \/ x = 2 * PI. Proof. intros Lo Hi Hx. destruct (sin_eq_0_0 x Hx) as (k,Hk). clear Hx. destruct (Rtotal_order PI x) as [Hx|[Hx|Hx]]. - right; right. clear Lo. subst. f_equal. change 2 with (IZR (- (-2))). f_equal. apply Z.add_move_0_l. apply one_IZR_lt1. rewrite plus_IZR; simpl. split. + replace (-1) with (-2 + 1) by ring. apply Rplus_lt_compat_l. apply Rmult_lt_reg_r with PI; [apply PI_RGT_0|]. now rewrite Rmult_1_l. + apply Rle_lt_trans with 0; [|apply Rlt_0_1]. replace 0 with (-2 + 2) by ring. apply Rplus_le_compat_l. apply Rmult_le_reg_r with PI; [apply PI_RGT_0|]. trivial. - right; left; auto. - left. clear Hi. subst. replace 0 with (IZR 0 * PI) by (simpl; ring). f_equal. f_equal. apply one_IZR_lt1. split. + apply Rlt_le_trans with 0; [rewrite <- Ropp_0; apply Ropp_gt_lt_contravar, Rlt_0_1 | ]. apply Rmult_le_reg_r with PI; [apply PI_RGT_0|]. now rewrite Rmult_0_l. + apply Rmult_lt_reg_r with PI; [apply PI_RGT_0|]. now rewrite Rmult_1_l. Qed. Lemma sin_eq_O_2PI_1 (x:R) : 0 <= x -> x <= 2 * PI -> x = 0 \/ x = PI \/ x = 2 * PI -> sin x = 0. Proof. intros _ _ [ -> |[ -> | -> ]]. - now rewrite sin_0. - now rewrite sin_PI. - now rewrite sin_2PI. Qed. Lemma cos_eq_0_2PI_0 (x:R) : 0 <= x -> x <= 2 * PI -> cos x = 0 -> x = PI / 2 \/ x = 3 * (PI / 2). Proof. intros Lo Hi Hx. destruct (Rtotal_order x (3 * (PI / 2))) as [LT|[EQ|GT]]. - rewrite cos_sin in Hx. assert (Lo' : 0 <= PI / 2 + x). { apply Rplus_le_le_0_compat. apply Rlt_le, PI2_RGT_0. trivial. } assert (Hi' : PI / 2 + x <= 2 * PI). { apply Rlt_le. replace (2 * PI) with (PI / 2 + 3 * (PI / 2)) by field. now apply Rplus_lt_compat_l. } destruct (sin_eq_O_2PI_0 (PI / 2 + x) Lo' Hi' Hx) as [H|[H|H]]. + exfalso. apply (Rplus_le_compat_l (PI/2)) in Lo. rewrite Rplus_0_r, H in Lo. apply (Rlt_irrefl 0 (Rlt_le_trans 0 (PI / 2) 0 PI2_RGT_0 Lo)). + left. apply (Rplus_eq_compat_l (-(PI/2))) in H. ring_simplify in H. rewrite H. field. + right. apply (Rplus_eq_compat_l (-(PI/2))) in H. ring_simplify in H. rewrite H. field. - now right. - exfalso. destruct (cos_eq_0_0 x Hx) as (k,Hk). clear Hx Lo. subst. assert (LT : (k < 2)%Z). { apply lt_IZR. simpl. apply (Rmult_lt_reg_r PI); [apply PI_RGT_0|]. apply Rlt_le_trans with (IZR k * PI + PI/2); trivial. rewrite <- (Rplus_0_r (IZR k * PI)) at 1. apply Rplus_lt_compat_l. apply PI2_RGT_0. } assert (GT' : (1 < k)%Z). { apply lt_IZR. simpl. apply (Rmult_lt_reg_r PI); [apply PI_RGT_0|rewrite Rmult_1_l]. replace (3*(PI/2)) with (PI/2 + PI) in GT by field. rewrite Rplus_comm in GT. now apply Rplus_lt_reg_r in GT. } omega. Qed. Lemma cos_eq_0_2PI_1 (x:R) : 0 <= x -> x <= 2 * PI -> x = PI / 2 \/ x = 3 * (PI / 2) -> cos x = 0. Proof. intros Lo Hi [ -> | -> ]. - now rewrite cos_PI2. - now rewrite cos_3PI2. Qed. coq-8.4pl2/theories/Reals/Ranalysis3.v0000640000175000001440000007220112010532755016740 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (x l1 l2:R), derivable_pt_lim f1 x l1 -> derivable_pt_lim f2 x l2 -> f2 x <> 0 -> derivable_pt_lim (f1 / f2) x ((l1 * f2 x - l2 * f1 x) / Rsqr (f2 x)). Proof. intros f1 f2 x l1 l2 H H0 H1. cut (derivable_pt f2 x); [ intro X | unfold derivable_pt; exists l2; exact H0 ]. assert (H2 := continuous_neq_0 _ _ (derivable_continuous_pt _ _ X) H1). elim H2; clear H2; intros eps_f2 H2. unfold div_fct. assert (H3 := derivable_continuous_pt _ _ X). unfold continuity_pt in H3; unfold continue_in in H3; unfold limit1_in in H3; unfold limit_in in H3; unfold dist in H3. simpl in H3; unfold R_dist in H3. elim (H3 (Rabs (f2 x) / 2)); [ idtac | unfold Rdiv; change (0 < Rabs (f2 x) * / 2); apply Rmult_lt_0_compat; [ apply Rabs_pos_lt; assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. clear H3; intros alp_f2 H3. cut (forall x0:R, Rabs (x0 - x) < alp_f2 -> Rabs (f2 x0 - f2 x) < Rabs (f2 x) / 2). intro H4. cut (forall a:R, Rabs (a - x) < alp_f2 -> Rabs (f2 x) / 2 < Rabs (f2 a)). intro H5. cut (forall a:R, Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)). intro Maj. unfold derivable_pt_lim; intros. elim (H (Rabs (eps * f2 x / 8))); [ idtac | unfold Rdiv; change (0 < Rabs (eps * f2 x * / 8)); apply Rabs_pos_lt; repeat apply prod_neq_R0; [ red; intro H7; rewrite H7 in H6; elim (Rlt_irrefl _ H6) | assumption | apply Rinv_neq_0_compat; discrR ] ]. intros alp_f1d H7. case (Req_dec (f1 x) 0); intro. case (Req_dec l1 0); intro. (***********************************) (* First case *) (* (f1 x)=0 l1 =0 *) (***********************************) cut (0 < Rmin eps_f2 (Rmin alp_f2 alp_f1d)); [ intro | repeat apply Rmin_pos; [ apply (cond_pos eps_f2) | elim H3; intros; assumption | apply (cond_pos alp_f1d) ] ]. exists (mkposreal (Rmin eps_f2 (Rmin alp_f2 alp_f1d)) H10). simpl; intros. assert (H13 := Rlt_le_trans _ _ _ H12 (Rmin_r _ _)). assert (H14 := Rlt_le_trans _ _ _ H12 (Rmin_l _ _)). assert (H15 := Rlt_le_trans _ _ _ H13 (Rmin_r _ _)). assert (H16 := Rlt_le_trans _ _ _ H13 (Rmin_l _ _)). assert (H17 := H7 _ H11 H15). rewrite formule; [ idtac | assumption | assumption | apply H2; apply H14 ]. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite H8. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite H8. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite H9. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); try assumption || apply H2. apply H14. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. (***********************************) (* Second case *) (* (f1 x)=0 l1<>0 *) (***********************************) assert (H10 := derivable_continuous_pt _ _ X). unfold continuity_pt in H10. unfold continue_in in H10. unfold limit1_in in H10. unfold limit_in in H10. unfold dist in H10. simpl in H10. unfold R_dist in H10. elim (H10 (Rabs (eps * Rsqr (f2 x) / (8 * l1)))). clear H10; intros alp_f2t2 H10. cut (forall a:R, Rabs a < alp_f2t2 -> Rabs (f2 (x + a) - f2 x) < Rabs (eps * Rsqr (f2 x) / (8 * l1))). intro H11. cut (0 < Rmin (Rmin eps_f2 alp_f1d) (Rmin alp_f2 alp_f2t2)). intro. exists (mkposreal (Rmin (Rmin eps_f2 alp_f1d) (Rmin alp_f2 alp_f2t2)) H12). simpl. intros. assert (H15 := Rlt_le_trans _ _ _ H14 (Rmin_r _ _)). assert (H16 := Rlt_le_trans _ _ _ H14 (Rmin_l _ _)). assert (H17 := Rlt_le_trans _ _ _ H15 (Rmin_l _ _)). assert (H18 := Rlt_le_trans _ _ _ H15 (Rmin_r _ _)). assert (H19 := Rlt_le_trans _ _ _ H16 (Rmin_l _ _)). assert (H20 := Rlt_le_trans _ _ _ H16 (Rmin_r _ _)). clear H14 H15 H16. rewrite formule; try assumption. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite H8. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite H8. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term2 x h eps l1 alp_f2 alp_f2t2 eps_f2 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. apply H2; assumption. repeat apply Rmin_pos. apply (cond_pos eps_f2). apply (cond_pos alp_f1d). elim H3; intros; assumption. elim H10; intros; assumption. intros. elim H10; intros. case (Req_dec a 0); intro. rewrite H14; rewrite Rplus_0_r. unfold Rminus; rewrite Rplus_opp_r. rewrite Rabs_R0. apply Rabs_pos_lt. unfold Rdiv, Rsqr; repeat rewrite Rmult_assoc. repeat apply prod_neq_R0; try assumption. red; intro; rewrite H15 in H6; elim (Rlt_irrefl _ H6). apply Rinv_neq_0_compat; repeat apply prod_neq_R0; discrR || assumption. apply H13. split. apply D_x_no_cond; assumption. replace (x + a - x) with a; [ assumption | ring ]. change (0 < Rabs (eps * Rsqr (f2 x) / (8 * l1))). apply Rabs_pos_lt; unfold Rdiv, Rsqr; repeat rewrite Rmult_assoc; repeat apply prod_neq_R0. red; intro; rewrite H11 in H6; elim (Rlt_irrefl _ H6). assumption. assumption. apply Rinv_neq_0_compat; repeat apply prod_neq_R0; [ discrR | discrR | discrR | assumption ]. (***********************************) (* Third case *) (* (f1 x)<>0 l1=0 l2=0 *) (***********************************) case (Req_dec l1 0); intro. case (Req_dec l2 0); intro. elim (H0 (Rabs (Rsqr (f2 x) * eps / (8 * f1 x)))); [ idtac | apply Rabs_pos_lt; unfold Rdiv, Rsqr; repeat rewrite Rmult_assoc; repeat apply prod_neq_R0; [ assumption | assumption | red; intro; rewrite H11 in H6; elim (Rlt_irrefl _ H6) | apply Rinv_neq_0_compat; repeat apply prod_neq_R0; discrR || assumption ] ]. intros alp_f2d H12. cut (0 < Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d alp_f2d)). intro. exists (mkposreal (Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d alp_f2d)) H11). simpl. intros. assert (H15 := Rlt_le_trans _ _ _ H14 (Rmin_l _ _)). assert (H16 := Rlt_le_trans _ _ _ H14 (Rmin_r _ _)). assert (H17 := Rlt_le_trans _ _ _ H15 (Rmin_l _ _)). assert (H18 := Rlt_le_trans _ _ _ H15 (Rmin_r _ _)). assert (H19 := Rlt_le_trans _ _ _ H16 (Rmin_l _ _)). assert (H20 := Rlt_le_trans _ _ _ H16 (Rmin_r _ _)). clear H15 H16. rewrite formule; try assumption. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite H10. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term3 x h eps l2 alp_f2 eps_f2 alp_f2d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite H9. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); assumption || idtac. apply H2; assumption. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. apply H2; assumption. repeat apply Rmin_pos. apply (cond_pos eps_f2). elim H3; intros; assumption. apply (cond_pos alp_f1d). apply (cond_pos alp_f2d). (***********************************) (* Fourth case *) (* (f1 x)<>0 l1=0 l2<>0 *) (***********************************) elim (H0 (Rabs (Rsqr (f2 x) * eps / (8 * f1 x)))); [ idtac | apply Rabs_pos_lt; unfold Rsqr, Rdiv; repeat rewrite Rinv_mult_distr; repeat apply prod_neq_R0; try assumption || discrR ]. intros alp_f2d H11. assert (H12 := derivable_continuous_pt _ _ X). unfold continuity_pt in H12. unfold continue_in in H12. unfold limit1_in in H12. unfold limit_in in H12. unfold dist in H12. simpl in H12. unfold R_dist in H12. elim (H12 (Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2)))). intros alp_f2c H13. cut (0 < Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d (Rmin alp_f2d alp_f2c))). intro. exists (mkposreal (Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d (Rmin alp_f2d alp_f2c))) H14). simpl; intros. assert (H17 := Rlt_le_trans _ _ _ H16 (Rmin_l _ _)). assert (H18 := Rlt_le_trans _ _ _ H16 (Rmin_r _ _)). assert (H19 := Rlt_le_trans _ _ _ H18 (Rmin_r _ _)). assert (H20 := Rlt_le_trans _ _ _ H19 (Rmin_l _ _)). assert (H21 := Rlt_le_trans _ _ _ H19 (Rmin_r _ _)). assert (H22 := Rlt_le_trans _ _ _ H18 (Rmin_l _ _)). assert (H23 := Rlt_le_trans _ _ _ H17 (Rmin_l _ _)). assert (H24 := Rlt_le_trans _ _ _ H17 (Rmin_r _ _)). clear H16 H17 H18 H19. cut (forall a:R, Rabs a < alp_f2c -> Rabs (f2 (x + a) - f2 x) < Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))). intro. rewrite formule; try assumption. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite <- Rabs_mult. apply (maj_term4 x h eps l2 alp_f2 alp_f2c eps_f2 f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term3 x h eps l2 alp_f2 eps_f2 alp_f2d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite H9. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. apply H2; assumption. intros. case (Req_dec a 0); intro. rewrite H17; rewrite Rplus_0_r. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0. apply Rabs_pos_lt. unfold Rdiv, Rsqr. repeat rewrite Rinv_mult_distr; try assumption. repeat apply prod_neq_R0; try assumption. red; intro H18; rewrite H18 in H6; elim (Rlt_irrefl _ H6). apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; assumption. apply Rinv_neq_0_compat; assumption. discrR. discrR. discrR. discrR. discrR. apply prod_neq_R0; [ discrR | assumption ]. elim H13; intros. apply H19. split. apply D_x_no_cond; assumption. replace (x + a - x) with a; [ assumption | ring ]. repeat apply Rmin_pos. apply (cond_pos eps_f2). elim H3; intros; assumption. apply (cond_pos alp_f1d). apply (cond_pos alp_f2d). elim H13; intros; assumption. change (0 < Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))). apply Rabs_pos_lt. unfold Rsqr, Rdiv. repeat rewrite Rinv_mult_distr; try assumption || discrR. repeat apply prod_neq_R0; try assumption. red; intro H13; rewrite H13 in H6; elim (Rlt_irrefl _ H6). apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; assumption. apply Rinv_neq_0_compat; assumption. apply prod_neq_R0; [ discrR | assumption ]. red; intro H11; rewrite H11 in H6; elim (Rlt_irrefl _ H6). apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; discrR. apply Rinv_neq_0_compat; assumption. (***********************************) (* Fifth case *) (* (f1 x)<>0 l1<>0 l2=0 *) (***********************************) case (Req_dec l2 0); intro. assert (H11 := derivable_continuous_pt _ _ X). unfold continuity_pt in H11. unfold continue_in in H11. unfold limit1_in in H11. unfold limit_in in H11. unfold dist in H11. simpl in H11. unfold R_dist in H11. elim (H11 (Rabs (eps * Rsqr (f2 x) / (8 * l1)))). clear H11; intros alp_f2t2 H11. elim (H0 (Rabs (Rsqr (f2 x) * eps / (8 * f1 x)))). intros alp_f2d H12. cut (0 < Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d (Rmin alp_f2d alp_f2t2))). intro. exists (mkposreal (Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d (Rmin alp_f2d alp_f2t2))) H13). simpl. intros. cut (forall a:R, Rabs a < alp_f2t2 -> Rabs (f2 (x + a) - f2 x) < Rabs (eps * Rsqr (f2 x) / (8 * l1))). intro. assert (H17 := Rlt_le_trans _ _ _ H15 (Rmin_l _ _)). assert (H18 := Rlt_le_trans _ _ _ H15 (Rmin_r _ _)). assert (H19 := Rlt_le_trans _ _ _ H17 (Rmin_r _ _)). assert (H20 := Rlt_le_trans _ _ _ H17 (Rmin_l _ _)). assert (H21 := Rlt_le_trans _ _ _ H18 (Rmin_r _ _)). assert (H22 := Rlt_le_trans _ _ _ H18 (Rmin_l _ _)). assert (H23 := Rlt_le_trans _ _ _ H21 (Rmin_l _ _)). assert (H24 := Rlt_le_trans _ _ _ H21 (Rmin_r _ _)). clear H15 H17 H18 H21. rewrite formule; try assumption. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite H10. unfold Rdiv; repeat rewrite Rmult_0_r || rewrite Rmult_0_l. rewrite Rabs_R0; rewrite Rmult_0_l. apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup ]. rewrite <- Rabs_mult. apply (maj_term3 x h eps l2 alp_f2 eps_f2 alp_f2d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term2 x h eps l1 alp_f2 alp_f2t2 eps_f2 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. apply H2; assumption. intros. case (Req_dec a 0); intro. rewrite H17; rewrite Rplus_0_r; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0. apply Rabs_pos_lt. unfold Rdiv; rewrite Rinv_mult_distr; try discrR || assumption. unfold Rsqr. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H18; rewrite H18 in H6; elim (Rlt_irrefl _ H6)). elim H11; intros. apply H19. split. apply D_x_no_cond; assumption. replace (x + a - x) with a; [ assumption | ring ]. repeat apply Rmin_pos. apply (cond_pos eps_f2). elim H3; intros; assumption. apply (cond_pos alp_f1d). apply (cond_pos alp_f2d). elim H11; intros; assumption. apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr; try discrR || assumption. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H12; rewrite H12 in H6; elim (Rlt_irrefl _ H6)). change (0 < Rabs (eps * Rsqr (f2 x) / (8 * l1))). apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr; try discrR || assumption. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H12; rewrite H12 in H6; elim (Rlt_irrefl _ H6)). (***********************************) (* Sixth case *) (* (f1 x)<>0 l1<>0 l2<>0 *) (***********************************) elim (H0 (Rabs (Rsqr (f2 x) * eps / (8 * f1 x)))). intros alp_f2d H11. assert (H12 := derivable_continuous_pt _ _ X). unfold continuity_pt in H12. unfold continue_in in H12. unfold limit1_in in H12. unfold limit_in in H12. unfold dist in H12. simpl in H12. unfold R_dist in H12. elim (H12 (Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2)))). intros alp_f2c H13. elim (H12 (Rabs (eps * Rsqr (f2 x) / (8 * l1)))). intros alp_f2t2 H14. cut (0 < Rmin (Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d alp_f2d)) (Rmin alp_f2c alp_f2t2)). intro. exists (mkposreal (Rmin (Rmin (Rmin eps_f2 alp_f2) (Rmin alp_f1d alp_f2d)) (Rmin alp_f2c alp_f2t2)) H15). simpl. intros. assert (H18 := Rlt_le_trans _ _ _ H17 (Rmin_l _ _)). assert (H19 := Rlt_le_trans _ _ _ H17 (Rmin_r _ _)). assert (H20 := Rlt_le_trans _ _ _ H18 (Rmin_l _ _)). assert (H21 := Rlt_le_trans _ _ _ H18 (Rmin_r _ _)). assert (H22 := Rlt_le_trans _ _ _ H19 (Rmin_l _ _)). assert (H23 := Rlt_le_trans _ _ _ H19 (Rmin_r _ _)). assert (H24 := Rlt_le_trans _ _ _ H20 (Rmin_l _ _)). assert (H25 := Rlt_le_trans _ _ _ H20 (Rmin_r _ _)). assert (H26 := Rlt_le_trans _ _ _ H21 (Rmin_l _ _)). assert (H27 := Rlt_le_trans _ _ _ H21 (Rmin_r _ _)). clear H17 H18 H19 H20 H21. cut (forall a:R, Rabs a < alp_f2t2 -> Rabs (f2 (x + a) - f2 x) < Rabs (eps * Rsqr (f2 x) / (8 * l1))). cut (forall a:R, Rabs a < alp_f2c -> Rabs (f2 (x + a) - f2 x) < Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))). intros. rewrite formule; try assumption. apply Rle_lt_trans with (Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) + Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) + Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) + Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x))). unfold Rminus. rewrite <- (Rabs_Ropp (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) + - f2 x) / h + - l2))) . apply Rabs_4. repeat rewrite Rabs_mult. apply Rlt_le_trans with (eps / 4 + eps / 4 + eps / 4 + eps / 4). cut (Rabs (/ f2 (x + h)) * Rabs ((f1 (x + h) - f1 x) / h - l1) < eps / 4). cut (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (f2 x - f2 (x + h)) < eps / 4). cut (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs ((f2 (x + h) - f2 x) / h - l2) < eps / 4). cut (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (f2 (x + h) - f2 x) < eps / 4). intros. apply Rlt_4; assumption. rewrite <- Rabs_mult. apply (maj_term4 x h eps l2 alp_f2 alp_f2c eps_f2 f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term3 x h eps l2 alp_f2 eps_f2 alp_f2d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term2 x h eps l1 alp_f2 alp_f2t2 eps_f2 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. rewrite <- Rabs_mult. apply (maj_term1 x h eps l1 alp_f2 eps_f2 alp_f1d f1 f2); try assumption. apply H2; assumption. apply Rmin_2; assumption. right; symmetry ; apply quadruple_var. apply H2; assumption. intros. case (Req_dec a 0); intro. rewrite H18; rewrite Rplus_0_r; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H28; rewrite H28 in H6; elim (Rlt_irrefl _ H6)). apply prod_neq_R0; [ discrR | assumption ]. apply prod_neq_R0; [ discrR | assumption ]. assumption. elim H13; intros. apply H20. split. apply D_x_no_cond; assumption. replace (x + a - x) with a; [ assumption | ring ]. intros. case (Req_dec a 0); intro. rewrite H18; rewrite Rplus_0_r; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H28; rewrite H28 in H6; elim (Rlt_irrefl _ H6)). discrR. assumption. elim H14; intros. apply H20. split. unfold D_x, no_cond; split. trivial. apply Rminus_not_eq_right. replace (x + a - x) with a; [ assumption | ring ]. replace (x + a - x) with a; [ assumption | ring ]. repeat apply Rmin_pos. apply (cond_pos eps_f2). elim H3; intros; assumption. apply (cond_pos alp_f1d). apply (cond_pos alp_f2d). elim H13; intros; assumption. elim H14; intros; assumption. change (0 < Rabs (eps * Rsqr (f2 x) / (8 * l1))); apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr; try discrR || assumption. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H14; rewrite H14 in H6; elim (Rlt_irrefl _ H6)). change (0 < Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))); apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H13; rewrite H13 in H6; elim (Rlt_irrefl _ H6)). apply prod_neq_R0; [ discrR | assumption ]. apply prod_neq_R0; [ discrR | assumption ]. assumption. apply Rabs_pos_lt. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr; [ idtac | discrR | assumption ]. repeat apply prod_neq_R0; assumption || (apply Rinv_neq_0_compat; assumption) || (apply Rinv_neq_0_compat; discrR) || (red; intro H11; rewrite H11 in H6; elim (Rlt_irrefl _ H6)). intros. unfold Rdiv. apply Rmult_lt_reg_l with (Rabs (f2 (x + a))). apply Rabs_pos_lt; apply H2. apply Rlt_le_trans with (Rmin eps_f2 alp_f2). assumption. apply Rmin_l. rewrite <- Rinv_r_sym. apply Rmult_lt_reg_l with (Rabs (f2 x)). apply Rabs_pos_lt; assumption. rewrite Rmult_1_r. rewrite (Rmult_comm (Rabs (f2 x))). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. apply Rmult_lt_reg_l with (/ 2). apply Rinv_0_lt_compat; prove_sup0. repeat rewrite (Rmult_comm (/ 2)). repeat rewrite Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_r. unfold Rdiv in H5; apply H5. replace (x + a - x) with a. assert (H7 := Rlt_le_trans _ _ _ H6 (Rmin_r _ _)); assumption. ring. discrR. apply Rabs_no_R0; assumption. apply Rabs_no_R0; apply H2. assert (H7 := Rlt_le_trans _ _ _ H6 (Rmin_l _ _)); assumption. intros. assert (H6 := H4 a H5). rewrite <- (Rabs_Ropp (f2 a - f2 x)) in H6. rewrite Ropp_minus_distr in H6. assert (H7 := Rle_lt_trans _ _ _ (Rabs_triang_inv _ _) H6). apply Rplus_lt_reg_r with (- Rabs (f2 a) + Rabs (f2 x) / 2). rewrite Rplus_assoc. rewrite <- double_var. do 2 rewrite (Rplus_comm (- Rabs (f2 a))). rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. unfold Rminus in H7; assumption. intros. case (Req_dec x x0); intro. rewrite <- H5; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; unfold Rdiv; apply Rmult_lt_0_compat; [ apply Rabs_pos_lt; assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim H3; intros. apply H7. split. unfold D_x, no_cond; split. trivial. assumption. assumption. Qed. Lemma derivable_pt_div : forall (f1 f2:R -> R) (x:R), derivable_pt f1 x -> derivable_pt f2 x -> f2 x <> 0 -> derivable_pt (f1 / f2) x. Proof. unfold derivable_pt. intros f1 f2 x X X0 H. elim X; intros. elim X0; intros. exists ((x0 * f2 x - x1 * f1 x) / Rsqr (f2 x)). apply derivable_pt_lim_div; assumption. Qed. Lemma derivable_div : forall f1 f2:R -> R, derivable f1 -> derivable f2 -> (forall x:R, f2 x <> 0) -> derivable (f1 / f2). Proof. unfold derivable; intros f1 f2 X X0 H x. apply (derivable_pt_div _ _ _ (X x) (X0 x) (H x)). Qed. Lemma derive_pt_div : forall (f1 f2:R -> R) (x:R) (pr1:derivable_pt f1 x) (pr2:derivable_pt f2 x) (na:f2 x <> 0), derive_pt (f1 / f2) x (derivable_pt_div _ _ _ pr1 pr2 na) = (derive_pt f1 x pr1 * f2 x - derive_pt f2 x pr2 * f1 x) / Rsqr (f2 x). Proof. intros. assert (H := derivable_derive f1 x pr1). assert (H0 := derivable_derive f2 x pr2). assert (H1 := derivable_derive (f1 / f2)%F x (derivable_pt_div _ _ _ pr1 pr2 na)). elim H; clear H; intros l1 H. elim H0; clear H0; intros l2 H0. elim H1; clear H1; intros l H1. rewrite H; rewrite H0; apply derive_pt_eq_0. assert (H3 := proj2_sig pr1). unfold derive_pt in H; rewrite H in H3. assert (H4 := proj2_sig pr2). unfold derive_pt in H0; rewrite H0 in H4. apply derivable_pt_lim_div; assumption. Qed. coq-8.4pl2/theories/Reals/Ranalysis5.v0000640000175000001440000016006511776416522016763 0ustar notinusersRequire Import Rbase. Require Import Ranalysis_reg. Require Import Rfunctions. Require Import Rseries. Require Import Fourier. Require Import RiemannInt. Require Import SeqProp. Require Import Max. Local Open Scope R_scope. (** * Preliminaries lemmas *) Lemma f_incr_implies_g_incr_interv : forall f g:R->R, forall lb ub, lb < ub -> (forall x y, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall x, f lb <= x -> x <= f ub -> (comp f g) x = id x) -> (forall x , f lb <= x -> x <= f ub -> lb <= g x <= ub) -> (forall x y, f lb <= x -> x < y -> y <= f ub -> g x < g y). Proof. intros f g lb ub lb_lt_ub f_incr f_eq_g g_ok x y lb_le_x x_lt_y y_le_ub. assert (x_encad : f lb <= x <= f ub). split ; [assumption | apply Rle_trans with (r2:=y) ; [apply Rlt_le|] ; assumption]. assert (y_encad : f lb <= y <= f ub). split ; [apply Rle_trans with (r2:=x) ; [|apply Rlt_le] ; assumption | assumption]. assert (Temp1 : lb <= lb) by intuition ; assert (Temp2 : ub <= ub) by intuition. assert (gx_encad := g_ok _ (proj1 x_encad) (proj2 x_encad)). assert (gy_encad := g_ok _ (proj1 y_encad) (proj2 y_encad)). clear Temp1 Temp2. case (Rlt_dec (g x) (g y)). intuition. intros Hfalse. assert (Temp := Rnot_lt_le _ _ Hfalse). assert (Hcontradiction : y <= x). replace y with (id y) by intuition ; replace x with (id x) by intuition ; rewrite <- f_eq_g. rewrite <- f_eq_g. assert (f_incr2 : forall x y, lb <= x -> x <= y -> y < ub -> f x <= f y). intros m n lb_le_m m_le_n n_lt_ub. case (m_le_n). intros ; apply Rlt_le ; apply f_incr ; [| | apply Rlt_le] ; assumption. intros Hyp ; rewrite Hyp ; apply Req_le ; reflexivity. apply f_incr2. intuition. intuition. Focus 3. intuition. Focus 2. intuition. Focus 2. intuition. Focus 2. intuition. assert (Temp2 : g x <> ub). intro Hf. assert (Htemp : (comp f g) x = f ub). unfold comp ; rewrite Hf ; reflexivity. rewrite f_eq_g in Htemp ; unfold id in Htemp. assert (Htemp2 : x < f ub). apply Rlt_le_trans with (r2:=y) ; intuition. clear -Htemp Htemp2. fourier. intuition. intuition. clear -Temp2 gx_encad. case (proj2 gx_encad). intuition. intro Hfalse ; apply False_ind ; apply Temp2 ; assumption. apply False_ind. clear - Hcontradiction x_lt_y. fourier. Qed. Lemma derivable_pt_id_interv : forall (lb ub x:R), lb <= x <= ub -> derivable_pt id x. Proof. intros. reg. Qed. Lemma pr_nu_var2_interv : forall (f g : R -> R) (lb ub x : R) (pr1 : derivable_pt f x) (pr2 : derivable_pt g x), lb < ub -> lb < x < ub -> (forall h : R, lb < h < ub -> f h = g h) -> derive_pt f x pr1 = derive_pt g x pr2. Proof. intros f g lb ub x Prf Prg lb_lt_ub x_encad local_eq. assert (forall x l, lb < x < ub -> (derivable_pt_abs f x l <-> derivable_pt_abs g x l)). intros a l a_encad. unfold derivable_pt_abs, derivable_pt_lim. split. intros Hyp eps eps_pos. elim (Hyp eps eps_pos) ; intros delta Hyp2. assert (Pos_cond : Rmin delta (Rmin (ub - a) (a - lb)) > 0). clear-a lb ub a_encad delta. apply Rmin_pos ; [exact (delta.(cond_pos)) | apply Rmin_pos ] ; apply Rlt_Rminus ; intuition. exists (mkposreal (Rmin delta (Rmin (ub - a) (a - lb))) Pos_cond). intros h h_neq h_encad. replace (g (a + h) - g a) with (f (a + h) - f a). apply Hyp2 ; intuition. apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))). assumption. apply Rmin_l. assert (local_eq2 : forall h : R, lb < h < ub -> - f h = - g h). intros ; apply Ropp_eq_compat ; intuition. rewrite local_eq ; unfold Rminus. rewrite local_eq2. reflexivity. assumption. assert (Sublemma2 : forall x y, Rabs x < Rabs y -> y > 0 -> x < y). intros m n Hyp_abs y_pos. apply Rlt_le_trans with (r2:=Rabs n). apply Rle_lt_trans with (r2:=Rabs m) ; [ | assumption] ; apply RRle_abs. apply Req_le ; apply Rabs_right ; apply Rgt_ge ; assumption. split. assert (Sublemma : forall x y z, -z < y - x -> x < y + z). intros ; fourier. apply Sublemma. apply Sublemma2. rewrite Rabs_Ropp. apply Rlt_le_trans with (r2:=a-lb) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_r] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_r] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. assert (Sublemma : forall x y z, y < z - x -> x + y < z). intros ; fourier. apply Sublemma. apply Sublemma2. apply Rlt_le_trans with (r2:=ub-a) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. intros Hyp eps eps_pos. elim (Hyp eps eps_pos) ; intros delta Hyp2. assert (Pos_cond : Rmin delta (Rmin (ub - a) (a - lb)) > 0). clear-a lb ub a_encad delta. apply Rmin_pos ; [exact (delta.(cond_pos)) | apply Rmin_pos ] ; apply Rlt_Rminus ; intuition. exists (mkposreal (Rmin delta (Rmin (ub - a) (a - lb))) Pos_cond). intros h h_neq h_encad. replace (f (a + h) - f a) with (g (a + h) - g a). apply Hyp2 ; intuition. apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))). assumption. apply Rmin_l. assert (local_eq2 : forall h : R, lb < h < ub -> - f h = - g h). intros ; apply Ropp_eq_compat ; intuition. rewrite local_eq ; unfold Rminus. rewrite local_eq2. reflexivity. assumption. assert (Sublemma2 : forall x y, Rabs x < Rabs y -> y > 0 -> x < y). intros m n Hyp_abs y_pos. apply Rlt_le_trans with (r2:=Rabs n). apply Rle_lt_trans with (r2:=Rabs m) ; [ | assumption] ; apply RRle_abs. apply Req_le ; apply Rabs_right ; apply Rgt_ge ; assumption. split. assert (Sublemma : forall x y z, -z < y - x -> x < y + z). intros ; fourier. apply Sublemma. apply Sublemma2. rewrite Rabs_Ropp. apply Rlt_le_trans with (r2:=a-lb) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_r] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_r] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. assert (Sublemma : forall x y z, y < z - x -> x + y < z). intros ; fourier. apply Sublemma. apply Sublemma2. apply Rlt_le_trans with (r2:=ub-a) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. apply Rlt_le_trans with (r2:=Rmin (ub - a) (a - lb)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta (Rmin (ub - a) (a - lb))) ; [| apply Rmin_r] ; assumption. unfold derivable_pt in Prf. unfold derivable_pt in Prg. elim Prf; intros. elim Prg; intros. assert (Temp := p); rewrite H in Temp. unfold derivable_pt_abs in p. unfold derivable_pt_abs in p0. simpl in |- *. apply (uniqueness_limite g x x0 x1 Temp p0). assumption. Qed. (* begin hide *) Lemma leftinv_is_rightinv : forall (f g:R->R), (forall x y, x < y -> f x < f y) -> (forall x, (comp f g) x = id x) -> (forall x, (comp g f) x = id x). Proof. intros f g f_incr Hyp x. assert (forall x, f (g (f x)) = f x). intros ; apply Hyp. assert(f_inj : forall x y, f x = f y -> x = y). intros a b fa_eq_fb. case(total_order_T a b). intro s ; case s ; clear s. intro Hf. assert (Hfalse := f_incr a b Hf). apply False_ind. apply (Rlt_not_eq (f a) (f b)) ; assumption. intuition. intro Hf. assert (Hfalse := f_incr b a Hf). apply False_ind. apply (Rlt_not_eq (f b) (f a)) ; [|symmetry] ; assumption. apply f_inj. unfold comp. unfold comp in Hyp. rewrite Hyp. unfold id. reflexivity. Qed. (* end hide *) Lemma leftinv_is_rightinv_interv : forall (f g:R->R) (lb ub:R), (forall x y, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall y, f lb <= y -> y <= f ub -> (comp f g) y = id y) -> (forall x, f lb <= x -> x <= f ub -> lb <= g x <= ub) -> forall x, lb <= x <= ub -> (comp g f) x = id x. Proof. intros f g lb ub f_incr_interv Hyp g_wf x x_encad. assert(f_inj : forall x y, lb <= x <= ub -> lb <= y <= ub -> f x = f y -> x = y). intros a b a_encad b_encad fa_eq_fb. case(total_order_T a b). intro s ; case s ; clear s. intro Hf. assert (Hfalse := f_incr_interv a b (proj1 a_encad) Hf (proj2 b_encad)). apply False_ind. apply (Rlt_not_eq (f a) (f b)) ; assumption. intuition. intro Hf. assert (Hfalse := f_incr_interv b a (proj1 b_encad) Hf (proj2 a_encad)). apply False_ind. apply (Rlt_not_eq (f b) (f a)) ; [|symmetry] ; assumption. assert (f_incr_interv2 : forall x y, lb <= x -> x <= y -> y <= ub -> f x <= f y). intros m n cond1 cond2 cond3. case cond2. intro cond. apply Rlt_le ; apply f_incr_interv ; assumption. intro cond ; right ; rewrite cond ; reflexivity. assert (Hyp2:forall x, lb <= x <= ub -> f (g (f x)) = f x). intros ; apply Hyp. apply f_incr_interv2 ; intuition. apply f_incr_interv2 ; intuition. unfold comp ; unfold comp in Hyp. apply f_inj. apply g_wf ; apply f_incr_interv2 ; intuition. unfold id ; assumption. apply Hyp2 ; unfold id ; assumption. Qed. (** Intermediate Value Theorem on an Interval (Proof mainly taken from Reals.Rsqrt_def) and its corollary *) Lemma IVT_interv_prelim0 : forall (x y:R) (P:R->bool) (N:nat), x < y -> x <= Dichotomy_ub x y P N <= y /\ x <= Dichotomy_lb x y P N <= y. Proof. assert (Sublemma : forall x y lb ub, lb <= x <= ub /\ lb <= y <= ub -> lb <= (x+y) / 2 <= ub). intros x y lb ub Hyp. split. replace lb with ((lb + lb) * /2) by field. unfold Rdiv ; apply Rmult_le_compat_r ; intuition. replace ub with ((ub + ub) * /2) by field. unfold Rdiv ; apply Rmult_le_compat_r ; intuition. intros x y P N x_lt_y. induction N. simpl ; intuition. simpl. case (P ((Dichotomy_lb x y P N + Dichotomy_ub x y P N) / 2)). split. apply Sublemma ; intuition. intuition. split. intuition. apply Sublemma ; intuition. Qed. Lemma IVT_interv_prelim1 : forall (x y x0:R) (D : R -> bool), x < y -> Un_cv (dicho_up x y D) x0 -> x <= x0 <= y. Proof. intros x y x0 D x_lt_y bnd. assert (Main : forall n, x <= dicho_up x y D n <= y). intro n. unfold dicho_up. apply (proj1 (IVT_interv_prelim0 x y D n x_lt_y)). split. apply Rle_cv_lim with (Vn:=dicho_up x y D) (Un:=fun n => x). intro n ; exact (proj1 (Main n)). unfold Un_cv ; intros ; exists 0%nat ; intros ; unfold R_dist ; replace (x -x) with 0 by field ; rewrite Rabs_R0 ; assumption. assumption. apply Rle_cv_lim with (Un:=dicho_up x y D) (Vn:=fun n => y). intro n ; exact (proj2 (Main n)). assumption. unfold Un_cv ; intros ; exists 0%nat ; intros ; unfold R_dist ; replace (y -y) with 0 by field ; rewrite Rabs_R0 ; assumption. Qed. Lemma IVT_interv : forall (f : R -> R) (x y : R), (forall a, x <= a <= y -> continuity_pt f a) -> x < y -> f x < 0 -> 0 < f y -> {z : R | x <= z <= y /\ f z = 0}. Proof. intros. (* f x y f_cont_interv x_lt_y fx_neg fy_pos.*) cut (x <= y). intro. generalize (dicho_lb_cv x y (fun z:R => cond_positivity (f z)) H3). generalize (dicho_up_cv x y (fun z:R => cond_positivity (f z)) H3). intros X X0. elim X; intros. elim X0; intros. assert (H4 := cv_dicho _ _ _ _ _ H3 p0 p). rewrite H4 in p0. exists x0. split. split. apply Rle_trans with (dicho_lb x y (fun z:R => cond_positivity (f z)) 0). simpl in |- *. right; reflexivity. apply growing_ineq. apply dicho_lb_growing; assumption. assumption. apply Rle_trans with (dicho_up x y (fun z:R => cond_positivity (f z)) 0). apply decreasing_ineq. apply dicho_up_decreasing; assumption. assumption. right; reflexivity. 2: left; assumption. set (Vn := fun n:nat => dicho_lb x y (fun z:R => cond_positivity (f z)) n). set (Wn := fun n:nat => dicho_up x y (fun z:R => cond_positivity (f z)) n). cut ((forall n:nat, f (Vn n) <= 0) -> f x0 <= 0). cut ((forall n:nat, 0 <= f (Wn n)) -> 0 <= f x0). intros. cut (forall n:nat, f (Vn n) <= 0). cut (forall n:nat, 0 <= f (Wn n)). intros. assert (H9 := H6 H8). assert (H10 := H5 H7). apply Rle_antisym; assumption. intro. unfold Wn in |- *. cut (forall z:R, cond_positivity z = true <-> 0 <= z). intro. assert (H8 := dicho_up_car x y (fun z:R => cond_positivity (f z)) n). elim (H7 (f (dicho_up x y (fun z:R => cond_positivity (f z)) n))); intros. apply H9. apply H8. elim (H7 (f y)); intros. apply H12. left; assumption. intro. unfold cond_positivity in |- *. case (Rle_dec 0 z); intro. split. intro; assumption. intro; reflexivity. split. intro feqt;discriminate feqt. intro. elim n0; assumption. unfold Vn in |- *. cut (forall z:R, cond_positivity z = false <-> z < 0). intros. assert (H8 := dicho_lb_car x y (fun z:R => cond_positivity (f z)) n). left. elim (H7 (f (dicho_lb x y (fun z:R => cond_positivity (f z)) n))); intros. apply H9. apply H8. elim (H7 (f x)); intros. apply H12. assumption. intro. unfold cond_positivity in |- *. case (Rle_dec 0 z); intro. split. intro feqt; discriminate feqt. intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H7)). split. intro; auto with real. intro; reflexivity. cut (Un_cv Wn x0). intros. assert (Temp : x <= x0 <= y). apply IVT_interv_prelim1 with (D:=(fun z : R => cond_positivity (f z))) ; assumption. assert (H7 := continuity_seq f Wn x0 (H x0 Temp) H5). case (total_order_T 0 (f x0)); intro. elim s; intro. left; assumption. rewrite <- b; right; reflexivity. unfold Un_cv in H7; unfold R_dist in H7. cut (0 < - f x0). intro. elim (H7 (- f x0) H8); intros. cut (x2 >= x2)%nat; [ intro | unfold ge in |- *; apply le_n ]. assert (H11 := H9 x2 H10). rewrite Rabs_right in H11. pattern (- f x0) at 1 in H11; rewrite <- Rplus_0_r in H11. unfold Rminus in H11; rewrite (Rplus_comm (f (Wn x2))) in H11. assert (H12 := Rplus_lt_reg_r _ _ _ H11). assert (H13 := H6 x2). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H13 H12)). apply Rle_ge; left; unfold Rminus in |- *; apply Rplus_le_lt_0_compat. apply H6. exact H8. apply Ropp_0_gt_lt_contravar; assumption. unfold Wn in |- *; assumption. cut (Un_cv Vn x0). intros. assert (Temp : x <= x0 <= y). apply IVT_interv_prelim1 with (D:=(fun z : R => cond_positivity (f z))) ; assumption. assert (H7 := continuity_seq f Vn x0 (H x0 Temp) H5). case (total_order_T 0 (f x0)); intro. elim s; intro. unfold Un_cv in H7; unfold R_dist in H7. elim (H7 (f x0) a); intros. cut (x2 >= x2)%nat; [ intro | unfold ge in |- *; apply le_n ]. assert (H10 := H8 x2 H9). rewrite Rabs_left in H10. pattern (f x0) at 2 in H10; rewrite <- Rplus_0_r in H10. rewrite Ropp_minus_distr' in H10. unfold Rminus in H10. assert (H11 := Rplus_lt_reg_r _ _ _ H10). assert (H12 := H6 x2). cut (0 < f (Vn x2)). intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H13 H12)). rewrite <- (Ropp_involutive (f (Vn x2))). apply Ropp_0_gt_lt_contravar; assumption. apply Rplus_lt_reg_r with (f x0 - f (Vn x2)). rewrite Rplus_0_r; replace (f x0 - f (Vn x2) + (f (Vn x2) - f x0)) with 0; [ unfold Rminus in |- *; apply Rplus_lt_le_0_compat | ring ]. assumption. apply Ropp_0_ge_le_contravar; apply Rle_ge; apply H6. right; rewrite <- b; reflexivity. left; assumption. unfold Vn in |- *; assumption. Qed. (* begin hide *) Ltac case_le H := let t := type of H in let h' := fresh in match t with ?x <= ?y => case (total_order_T x y); [intros h'; case h'; clear h' | intros h'; clear -H h'; elimtype False; fourier ] end. (* end hide *) Lemma f_interv_is_interv : forall (f:R->R) (lb ub y:R), lb < ub -> f lb <= y <= f ub -> (forall x, lb <= x <= ub -> continuity_pt f x) -> {x | lb <= x <= ub /\ f x = y}. Proof. intros f lb ub y lb_lt_ub y_encad f_cont_interv. case y_encad ; intro y_encad1. case_le y_encad1 ; intros y_encad2 y_encad3 ; case_le y_encad3. intro y_encad4. clear y_encad y_encad1 y_encad3. assert (Cont : forall a : R, lb <= a <= ub -> continuity_pt (fun x => f x - y) a). intros a a_encad. unfold continuity_pt, continue_in, limit1_in, limit_in ; simpl ; unfold R_dist. intros eps eps_pos. elim (f_cont_interv a a_encad eps eps_pos). intros alpha alpha_pos. destruct alpha_pos as (alpha_pos,Temp). exists alpha. split. assumption. intros x x_cond. replace (f x - y - (f a - y)) with (f x - f a) by field. exact (Temp x x_cond). assert (H1 : (fun x : R => f x - y) lb < 0). apply Rlt_minus. assumption. assert (H2 : 0 < (fun x : R => f x - y) ub). apply Rgt_minus ; assumption. destruct (IVT_interv (fun x => f x - y) lb ub Cont lb_lt_ub H1 H2) as (x,Hx). exists x. destruct Hx as (Hyp,Result). intuition. intro H ; exists ub ; intuition. intro H ; exists lb ; intuition. intro H ; exists ub ; intuition. Qed. (** ** The derivative of a reciprocal function *) (** * Continuity of the reciprocal function *) Lemma continuity_pt_recip_prelim : forall (f g:R->R) (lb ub : R) (Pr1:lb < ub), (forall x y, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall x, lb <= x <= ub -> (comp g f) x = id x) -> (forall a, lb <= a <= ub -> continuity_pt f a) -> forall b, f lb < b < f ub -> continuity_pt g b. Proof. assert (Sublemma : forall x y z, Rmax x y < z <-> x < z /\ y < z). intros x y z. split. unfold Rmax. case (Rle_dec x y) ; intros Hyp Hyp2. split. apply Rle_lt_trans with (r2:=y) ; assumption. assumption. split. assumption. apply Rlt_trans with (r2:=x). assert (Temp : forall x y, ~ x <= y -> x > y). intros m n Hypmn. intuition. apply Temp ; clear Temp ; assumption. assumption. intros Hyp. unfold Rmax. case (Rle_dec x y). intro ; exact (proj2 Hyp). intro ; exact (proj1 Hyp). assert (Sublemma2 : forall x y z, Rmin x y > z <-> x > z /\ y > z). intros x y z. split. unfold Rmin. case (Rle_dec x y) ; intros Hyp Hyp2. split. assumption. apply Rlt_le_trans with (r2:=x) ; intuition. split. apply Rlt_trans with (r2:=y). intuition. assert (Temp : forall x y, ~ x <= y -> x > y). intros m n Hypmn. intuition. apply Temp ; clear Temp ; assumption. assumption. intros Hyp. unfold Rmin. case (Rle_dec x y). intro ; exact (proj1 Hyp). intro ; exact (proj2 Hyp). assert (Sublemma3 : forall x y, x <= y /\ x <> y -> x < y). intros m n Hyp. unfold Rle in Hyp. destruct Hyp as (Hyp1,Hyp2). case Hyp1. intuition. intro Hfalse ; apply False_ind ; apply Hyp2 ; exact Hfalse. intros f g lb ub lb_lt_ub f_incr_interv f_eq_g f_cont_interv b b_encad. assert (f_incr_interv2 : forall x y, lb <= x -> x <= y -> y <= ub -> f x <= f y). intros m n cond1 cond2 cond3. case cond2. intro cond. apply Rlt_le ; apply f_incr_interv ; assumption. intro cond ; right ; rewrite cond ; reflexivity. unfold continuity_pt, continue_in, limit1_in, limit_in ; intros eps eps_pos. unfold dist ; simpl ; unfold R_dist. assert (b_encad_e : f lb <= b <= f ub) by intuition. elim (f_interv_is_interv f lb ub b lb_lt_ub b_encad_e f_cont_interv) ; intros x Temp. destruct Temp as (x_encad,f_x_b). assert (lb_lt_x : lb < x). assert (Temp : x <> lb). intro Hfalse. assert (Temp' : b = f lb). rewrite <- f_x_b ; rewrite Hfalse ; reflexivity. assert (Temp'' : b <> f lb). apply Rgt_not_eq ; exact (proj1 b_encad). apply Temp'' ; exact Temp'. apply Sublemma3. split. exact (proj1 x_encad). assert (Temp2 : forall x y:R, x <> y <-> y <> x). intros m n. split ; intuition. rewrite Temp2 ; assumption. assert (x_lt_ub : x < ub). assert (Temp : x <> ub). intro Hfalse. assert (Temp' : b = f ub). rewrite <- f_x_b ; rewrite Hfalse ; reflexivity. assert (Temp'' : b <> f ub). apply Rlt_not_eq ; exact (proj2 b_encad). apply Temp'' ; exact Temp'. apply Sublemma3. split ; [exact (proj2 x_encad) | assumption]. pose (x1 := Rmax (x - eps) lb). pose (x2 := Rmin (x + eps) ub). assert (Hx1 : x1 = Rmax (x - eps) lb) by intuition. assert (Hx2 : x2 = Rmin (x + eps) ub) by intuition. assert (x1_encad : lb <= x1 <= ub). split. apply RmaxLess2. apply Rlt_le. rewrite Hx1. rewrite Sublemma. split. apply Rlt_trans with (r2:=x) ; fourier. assumption. assert (x2_encad : lb <= x2 <= ub). split. apply Rlt_le ; rewrite Hx2 ; apply Rgt_lt ; rewrite Sublemma2. split. apply Rgt_trans with (r2:=x) ; fourier. assumption. apply Rmin_r. assert (x_lt_x2 : x < x2). rewrite Hx2. apply Rgt_lt. rewrite Sublemma2. split ; fourier. assert (x1_lt_x : x1 < x). rewrite Hx1. rewrite Sublemma. split ; fourier. exists (Rmin (f x - f x1) (f x2 - f x)). split. apply Rmin_pos ; apply Rgt_minus. apply f_incr_interv ; [apply RmaxLess2 | | ] ; fourier. apply f_incr_interv ; intuition. intros y Temp. destruct Temp as (_,y_cond). rewrite <- f_x_b in y_cond. assert (Temp : forall x y d1 d2, d1 > 0 -> d2 > 0 -> Rabs (y - x) < Rmin d1 d2 -> x - d1 <= y <= x + d2). intros. split. assert (H10 : forall x y z, x - y <= z -> x - z <= y). intuition. fourier. apply H10. apply Rle_trans with (r2:=Rabs (y0 - x0)). replace (Rabs (y0 - x0)) with (Rabs (x0 - y0)). apply RRle_abs. rewrite <- Rabs_Ropp. unfold Rminus ; rewrite Ropp_plus_distr. rewrite Ropp_involutive. intuition. apply Rle_trans with (r2:= Rmin d1 d2). apply Rlt_le ; assumption. apply Rmin_l. assert (H10 : forall x y z, x - y <= z -> x <= y + z). intuition. fourier. apply H10. apply Rle_trans with (r2:=Rabs (y0 - x0)). apply RRle_abs. apply Rle_trans with (r2:= Rmin d1 d2). apply Rlt_le ; assumption. apply Rmin_r. assert (Temp' := Temp (f x) y (f x - f x1) (f x2 - f x)). replace (f x - (f x - f x1)) with (f x1) in Temp' by field. replace (f x + (f x2 - f x)) with (f x2) in Temp' by field. assert (T : f x - f x1 > 0). apply Rgt_minus. apply f_incr_interv ; intuition. assert (T' : f x2 - f x > 0). apply Rgt_minus. apply f_incr_interv ; intuition. assert (Main := Temp' T T' y_cond). clear Temp Temp' T T'. assert (x1_lt_x2 : x1 < x2). apply Rlt_trans with (r2:=x) ; assumption. assert (f_cont_myinterv : forall a : R, x1 <= a <= x2 -> continuity_pt f a). intros ; apply f_cont_interv ; split. apply Rle_trans with (r2 := x1) ; intuition. apply Rle_trans with (r2 := x2) ; intuition. elim (f_interv_is_interv f x1 x2 y x1_lt_x2 Main f_cont_myinterv) ; intros x' Temp. destruct Temp as (x'_encad,f_x'_y). rewrite <- f_x_b ; rewrite <- f_x'_y. unfold comp in f_eq_g. rewrite f_eq_g. rewrite f_eq_g. unfold id. assert (x'_encad2 : x - eps <= x' <= x + eps). split. apply Rle_trans with (r2:=x1) ; [ apply RmaxLess1|] ; intuition. apply Rle_trans with (r2:=x2) ; [ | apply Rmin_l] ; intuition. assert (x1_lt_x' : x1 < x'). apply Sublemma3. assert (x1_neq_x' : x1 <> x'). intro Hfalse. rewrite Hfalse, f_x'_y in y_cond. assert (Hf : Rabs (y - f x) < f x - y). apply Rlt_le_trans with (r2:=Rmin (f x - y) (f x2 - f x)). fourier. apply Rmin_l. assert(Hfin : f x - y < f x - y). apply Rle_lt_trans with (r2:=Rabs (y - f x)). replace (Rabs (y - f x)) with (Rabs (f x - y)). apply RRle_abs. rewrite <- Rabs_Ropp. replace (- (f x - y)) with (y - f x) by field ; reflexivity. fourier. apply (Rlt_irrefl (f x - y)) ; assumption. split ; intuition. assert (x'_lb : x - eps < x'). apply Sublemma3. split. intuition. apply Rlt_not_eq. apply Rle_lt_trans with (r2:=x1) ; [ apply RmaxLess1|] ; intuition. assert (x'_lt_x2 : x' < x2). apply Sublemma3. assert (x1_neq_x' : x' <> x2). intro Hfalse. rewrite <- Hfalse, f_x'_y in y_cond. assert (Hf : Rabs (y - f x) < y - f x). apply Rlt_le_trans with (r2:=Rmin (f x - f x1) (y - f x)). fourier. apply Rmin_r. assert(Hfin : y - f x < y - f x). apply Rle_lt_trans with (r2:=Rabs (y - f x)). apply RRle_abs. fourier. apply (Rlt_irrefl (y - f x)) ; assumption. split ; intuition. assert (x'_ub : x' < x + eps). apply Sublemma3. split. intuition. apply Rlt_not_eq. apply Rlt_le_trans with (r2:=x2) ; [ |rewrite Hx2 ; apply Rmin_l] ; intuition. apply Rabs_def1 ; fourier. assumption. split. apply Rle_trans with (r2:=x1) ; intuition. apply Rle_trans with (r2:=x2) ; intuition. Qed. Lemma continuity_pt_recip_interv : forall (f g:R->R) (lb ub : R) (Pr1:lb < ub), (forall x y, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall x, f lb <= x -> x <= f ub -> (comp f g) x = id x) -> (forall x, f lb <= x -> x <= f ub -> lb <= g x <= ub) -> (forall a, lb <= a <= ub -> continuity_pt f a) -> forall b, f lb < b < f ub -> continuity_pt g b. Proof. intros f g lb ub lb_lt_ub f_incr_interv f_eq_g g_wf. assert (g_eq_f_prelim := leftinv_is_rightinv_interv f g lb ub f_incr_interv f_eq_g). assert (g_eq_f : forall x, lb <= x <= ub -> (comp g f) x = id x). intro x ; apply g_eq_f_prelim ; assumption. apply (continuity_pt_recip_prelim f g lb ub lb_lt_ub f_incr_interv g_eq_f). Qed. (** * Derivability of the reciprocal function *) Lemma derivable_pt_lim_recip_interv : forall (f g:R->R) (lb ub x:R) (Prf:forall a : R, g lb <= a <= g ub -> derivable_pt f a) (Prg : continuity_pt g x), lb < ub -> lb < x < ub -> forall (Prg_incr:g lb <= g x <= g ub), (forall x, lb <= x <= ub -> (comp f g) x = id x) -> derive_pt f (g x) (Prf (g x) Prg_incr) <> 0 -> derivable_pt_lim g x (1 / derive_pt f (g x) (Prf (g x) Prg_incr)). Proof. intros f g lb ub x Prf g_cont_pur lb_lt_ub x_encad Prg_incr f_eq_g df_neq. assert (x_encad2 : lb <= x <= ub). split ; apply Rlt_le ; intuition. elim (Prf (g x)); simpl; intros l Hl. unfold derivable_pt_lim. intros eps eps_pos. pose (y := g x). assert (Hlinv := limit_inv). assert (Hf_deriv : forall eps:R, 0 < eps -> exists delta : posreal, (forall h:R, h <> 0 -> Rabs h < delta -> Rabs ((f (g x + h) - f (g x)) / h - l) < eps)). intros eps0 eps0_pos. red in Hl ; red in Hl. elim (Hl eps0 eps0_pos). intros deltatemp Htemp. exists deltatemp ; exact Htemp. elim (Hf_deriv eps eps_pos). intros deltatemp Htemp. red in Hlinv ; red in Hlinv ; simpl dist in Hlinv ; unfold R_dist in Hlinv. assert (Hlinv' := Hlinv (fun h => (f (y+h) - f y)/h) (fun h => h <>0) l 0). unfold limit1_in, limit_in, dist in Hlinv' ; simpl in Hlinv'. unfold R_dist in Hlinv'. assert (Premisse : (forall eps : R, eps > 0 -> exists alp : R, alp > 0 /\ (forall x : R, (fun h => h <>0) x /\ Rabs (x - 0) < alp -> Rabs ((f (y + x) - f y) / x - l) < eps))). intros eps0 eps0_pos. elim (Hf_deriv eps0 eps0_pos). intros deltatemp' Htemp'. exists deltatemp'. split. exact deltatemp'.(cond_pos). intros htemp cond. apply (Htemp' htemp). exact (proj1 cond). replace (htemp) with (htemp - 0). exact (proj2 cond). intuition. assert (Premisse2 : l <> 0). intro l_null. rewrite l_null in Hl. apply df_neq. rewrite derive_pt_eq. exact Hl. elim (Hlinv' Premisse Premisse2 eps eps_pos). intros alpha cond. assert (alpha_pos := proj1 cond) ; assert (inv_cont := proj2 cond) ; clear cond. unfold derivable, derivable_pt, derivable_pt_abs, derivable_pt_lim in Prf. elim (Hl eps eps_pos). intros delta f_deriv. assert (g_cont := g_cont_pur). unfold continuity_pt, continue_in, limit1_in, limit_in in g_cont. pose (mydelta := Rmin delta alpha). assert (mydelta_pos : mydelta > 0). unfold mydelta, Rmin. case (Rle_dec delta alpha). intro ; exact (delta.(cond_pos)). intro ; exact alpha_pos. elim (g_cont mydelta mydelta_pos). intros delta' new_g_cont. assert(delta'_pos := proj1 (new_g_cont)). clear g_cont ; assert (g_cont := proj2 (new_g_cont)) ; clear new_g_cont. pose (mydelta'' := Rmin delta' (Rmin (x - lb) (ub - x))). assert(mydelta''_pos : mydelta'' > 0). unfold mydelta''. apply Rmin_pos ; [intuition | apply Rmin_pos] ; apply Rgt_minus ; intuition. pose (delta'' := mkposreal mydelta'' mydelta''_pos: posreal). exists delta''. intros h h_neq h_le_delta'. assert (lb <= x +h <= ub). assert (Sublemma2 : forall x y, Rabs x < Rabs y -> y > 0 -> x < y). intros m n Hyp_abs y_pos. apply Rlt_le_trans with (r2:=Rabs n). apply Rle_lt_trans with (r2:=Rabs m) ; [ | assumption] ; apply RRle_abs. apply Req_le ; apply Rabs_right ; apply Rgt_ge ; assumption. assert (lb <= x + h <= ub). split. assert (Sublemma : forall x y z, -z <= y - x -> x <= y + z). intros ; fourier. apply Sublemma. apply Rlt_le ; apply Sublemma2. rewrite Rabs_Ropp. apply Rlt_le_trans with (r2:=x-lb) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (x - lb) (ub - x)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta' (Rmin (x - lb) (ub - x))). apply Rlt_le_trans with (r2:=delta''). assumption. intuition. apply Rmin_r. apply Rgt_minus. intuition. assert (Sublemma : forall x y z, y <= z - x -> x + y <= z). intros ; fourier. apply Sublemma. apply Rlt_le ; apply Sublemma2. apply Rlt_le_trans with (r2:=ub-x) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (x - lb) (ub - x)) ; [| apply Rmin_r] ; apply Rlt_le_trans with (r2:=Rmin delta' (Rmin (x - lb) (ub - x))) ; [| apply Rmin_r] ; assumption. apply Rlt_le_trans with (r2:=delta''). assumption. apply Rle_trans with (r2:=Rmin delta' (Rmin (x - lb) (ub - x))). intuition. apply Rle_trans with (r2:=Rmin (x - lb) (ub - x)). apply Rmin_r. apply Rmin_r. replace ((g (x + h) - g x) / h) with (1/ (h / (g (x + h) - g x))). assert (Hrewr : h = (comp f g ) (x+h) - (comp f g) x). rewrite f_eq_g. rewrite f_eq_g ; unfold id. rewrite Rplus_comm ; unfold Rminus ; rewrite Rplus_assoc ; rewrite Rplus_opp_r. intuition. intuition. assumption. split ; [|intuition]. assert (Sublemma : forall x y z, - z <= y - x -> x <= y + z). intros ; fourier. apply Sublemma ; apply Rlt_le ; apply Sublemma2. rewrite Rabs_Ropp. apply Rlt_le_trans with (r2:=x-lb) ; [| apply RRle_abs] ; apply Rlt_le_trans with (r2:=Rmin (x - lb) (ub - x)) ; [| apply Rmin_l] ; apply Rlt_le_trans with (r2:=Rmin delta' (Rmin (x - lb) (ub - x))) ; [| apply Rmin_r] ; assumption. apply Rgt_minus. intuition. field. split. assumption. intro Hfalse. assert (Hf : g (x+h) = g x) by intuition. assert ((comp f g) (x+h) = (comp f g) x). unfold comp ; rewrite Hf ; intuition. assert (Main : x+h = x). replace (x +h) with (id (x+h)) by intuition. assert (Temp : x = id x) by intuition ; rewrite Temp at 2 ; clear Temp. rewrite <- f_eq_g. rewrite <- f_eq_g. assumption. intuition. assumption. assert (h = 0). apply Rplus_0_r_uniq with (r:=x) ; assumption. apply h_neq ; assumption. replace ((g (x + h) - g x) / h) with (1/ (h / (g (x + h) - g x))). assert (Hrewr : h = (comp f g ) (x+h) - (comp f g) x). rewrite f_eq_g. rewrite f_eq_g. unfold id ; rewrite Rplus_comm ; unfold Rminus ; rewrite Rplus_assoc ; rewrite Rplus_opp_r ; intuition. assumption. assumption. rewrite Hrewr at 1. unfold comp. replace (g(x+h)) with (g x + (g (x+h) - g(x))) by field. pose (h':=g (x+h) - g x). replace (g (x+h) - g x) with h' by intuition. replace (g x + h' - g x) with h' by field. assert (h'_neq : h' <> 0). unfold h'. intro Hfalse. unfold Rminus in Hfalse ; apply Rminus_diag_uniq in Hfalse. assert (Hfalse' : (comp f g) (x+h) = (comp f g) x). intros ; unfold comp ; rewrite Hfalse ; trivial. rewrite f_eq_g in Hfalse' ; rewrite f_eq_g in Hfalse'. unfold id in Hfalse'. apply Rplus_0_r_uniq in Hfalse'. apply h_neq ; exact Hfalse'. assumption. assumption. assumption. unfold Rdiv at 1 3; rewrite Rmult_1_l ; rewrite Rmult_1_l. apply inv_cont. split. exact h'_neq. rewrite Rminus_0_r. unfold continuity_pt, continue_in, limit1_in, limit_in in g_cont_pur. elim (g_cont_pur mydelta mydelta_pos). intros delta3 cond3. unfold dist in cond3 ; simpl in cond3 ; unfold R_dist in cond3. unfold h'. assert (mydelta_le_alpha : mydelta <= alpha). unfold mydelta, Rmin ; case (Rle_dec delta alpha). trivial. intro ; intuition. apply Rlt_le_trans with (r2:=mydelta). unfold dist in g_cont ; simpl in g_cont ; unfold R_dist in g_cont ; apply g_cont. split. unfold D_x ; simpl. split. unfold no_cond ; trivial. intro Hfalse ; apply h_neq. apply (Rplus_0_r_uniq x). symmetry ; assumption. replace (x + h - x) with h by field. apply Rlt_le_trans with (r2:=delta''). assumption ; unfold delta''. intuition. apply Rle_trans with (r2:=mydelta''). apply Req_le. unfold delta''. intuition. apply Rmin_l. assumption. field ; split. assumption. intro Hfalse ; apply h_neq. apply (Rplus_0_r_uniq x). assert (Hfin : (comp f g) (x+h) = (comp f g) x). apply Rminus_diag_uniq in Hfalse. unfold comp. rewrite Hfalse ; reflexivity. rewrite f_eq_g in Hfin. rewrite f_eq_g in Hfin. unfold id in Hfin. exact Hfin. assumption. assumption. Qed. Lemma derivable_pt_recip_interv_prelim0 : forall (f g : R -> R) (lb ub x : R) (Prf : forall a : R, g lb <= a <= g ub -> derivable_pt f a), continuity_pt g x -> lb < ub -> lb < x < ub -> forall Prg_incr : g lb <= g x <= g ub, (forall x0 : R, lb <= x0 <= ub -> comp f g x0 = id x0) -> derive_pt f (g x) (Prf (g x) Prg_incr) <> 0 -> derivable_pt g x. Proof. intros f g lb ub x Prf g_cont_pt lb_lt_ub x_encad Prg_incr f_eq_g Df_neq. unfold derivable_pt, derivable_pt_abs. exists (1 / derive_pt f (g x) (Prf (g x) Prg_incr)). apply derivable_pt_lim_recip_interv ; assumption. Qed. Lemma derivable_pt_recip_interv_prelim1 :forall (f g:R->R) (lb ub x : R), lb < ub -> f lb < x < f ub -> (forall x : R, f lb <= x -> x <= f ub -> comp f g x = id x) -> (forall x : R, f lb <= x -> x <= f ub -> lb <= g x <= ub) -> (forall x y : R, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall a : R, lb <= a <= ub -> derivable_pt f a) -> derivable_pt f (g x). Proof. intros f g lb ub x lb_lt_ub x_encad f_eq_g g_ok f_incr f_derivable. apply f_derivable. assert (Left_inv := leftinv_is_rightinv_interv f g lb ub f_incr f_eq_g g_ok). replace lb with ((comp g f) lb). replace ub with ((comp g f) ub). unfold comp. assert (Temp:= f_incr_implies_g_incr_interv f g lb ub lb_lt_ub f_incr f_eq_g g_ok). split ; apply Rlt_le ; apply Temp ; intuition. apply Left_inv ; intuition. apply Left_inv ; intuition. Qed. Lemma derivable_pt_recip_interv : forall (f g:R->R) (lb ub x : R) (lb_lt_ub:lb < ub) (x_encad:f lb < x < f ub) (f_eq_g:forall x : R, f lb <= x -> x <= f ub -> comp f g x = id x) (g_wf:forall x : R, f lb <= x -> x <= f ub -> lb <= g x <= ub) (f_incr:forall x y : R, lb <= x -> x < y -> y <= ub -> f x < f y) (f_derivable:forall a : R, lb <= a <= ub -> derivable_pt f a), derive_pt f (g x) (derivable_pt_recip_interv_prelim1 f g lb ub x lb_lt_ub x_encad f_eq_g g_wf f_incr f_derivable) <> 0 -> derivable_pt g x. Proof. intros f g lb ub x lb_lt_ub x_encad f_eq_g g_wf f_incr f_derivable Df_neq. assert(g_incr : g (f lb) < g x < g (f ub)). assert (Temp:= f_incr_implies_g_incr_interv f g lb ub lb_lt_ub f_incr f_eq_g g_wf). split ; apply Temp ; intuition. exact (proj1 x_encad). apply Rlt_le ; exact (proj2 x_encad). apply Rlt_le ; exact (proj1 x_encad). exact (proj2 x_encad). assert(g_incr2 : g (f lb) <= g x <= g (f ub)). split ; apply Rlt_le ; intuition. assert (g_eq_f := leftinv_is_rightinv_interv f g lb ub f_incr f_eq_g g_wf). unfold comp, id in g_eq_f. assert (f_derivable2 : forall a : R, g (f lb) <= a <= g (f ub) -> derivable_pt f a). intros a a_encad ; apply f_derivable. rewrite g_eq_f in a_encad ; rewrite g_eq_f in a_encad ; intuition. apply derivable_pt_recip_interv_prelim0 with (f:=f) (lb:=f lb) (ub:=f ub) (Prf:=f_derivable2) (Prg_incr:=g_incr2). apply continuity_pt_recip_interv with (f:=f) (lb:=lb) (ub:=ub) ; intuition. apply derivable_continuous_pt ; apply f_derivable ; intuition. exact (proj1 x_encad). exact (proj2 x_encad). apply f_incr ; intuition. assumption. intros x0 x0_encad ; apply f_eq_g ; intuition. rewrite pr_nu_var2_interv with (g:=f) (lb:=lb) (ub:=ub) (pr2:=derivable_pt_recip_interv_prelim1 f g lb ub x lb_lt_ub x_encad f_eq_g g_wf f_incr f_derivable) ; [| |rewrite g_eq_f in g_incr ; rewrite g_eq_f in g_incr| ] ; intuition. Qed. (****************************************************) (** * Value of the derivative of the reciprocal function *) (****************************************************) Lemma derive_pt_recip_interv_prelim0 : forall (f g:R->R) (lb ub x:R) (Prf:derivable_pt f (g x)) (Prg:derivable_pt g x), lb < ub -> lb < x < ub -> (forall x, lb < x < ub -> (comp f g) x = id x) -> derive_pt f (g x) Prf <> 0 -> derive_pt g x Prg = 1 / (derive_pt f (g x) Prf). Proof. intros f g lb ub x Prf Prg lb_lt_ub x_encad local_recip Df_neq. replace (derive_pt g x Prg) with ((derive_pt g x Prg) * (derive_pt f (g x) Prf) * / (derive_pt f (g x) Prf)). unfold Rdiv. rewrite (Rmult_comm _ (/ derive_pt f (g x) Prf)). rewrite (Rmult_comm _ (/ derive_pt f (g x) Prf)). apply Rmult_eq_compat_l. rewrite Rmult_comm. rewrite <- derive_pt_comp. assert (x_encad2 : lb <= x <= ub) by intuition. rewrite pr_nu_var2_interv with (g:=id) (pr2:= derivable_pt_id_interv lb ub x x_encad2) (lb:=lb) (ub:=ub) ; [reg| | |] ; assumption. rewrite Rmult_assoc, Rinv_r. intuition. assumption. Qed. Lemma derive_pt_recip_interv_prelim1_0 : forall (f g:R->R) (lb ub x:R), lb < ub -> f lb < x < f ub -> (forall x y : R, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall x : R, f lb <= x -> x <= f ub -> lb <= g x <= ub) -> (forall x, f lb <= x -> x <= f ub -> (comp f g) x = id x) -> lb < g x < ub. Proof. intros f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g. assert (Temp:= f_incr_implies_g_incr_interv f g lb ub lb_lt_ub f_incr f_eq_g g_wf). assert (Left_inv := leftinv_is_rightinv_interv f g lb ub f_incr f_eq_g g_wf). unfold comp, id in Left_inv. split ; [rewrite <- Left_inv with (x:=lb) | rewrite <- Left_inv ]. apply Temp ; intuition. intuition. apply Temp ; intuition. intuition. Qed. Lemma derive_pt_recip_interv_prelim1_1 : forall (f g:R->R) (lb ub x:R), lb < ub -> f lb < x < f ub -> (forall x y : R, lb <= x -> x < y -> y <= ub -> f x < f y) -> (forall x : R, f lb <= x -> x <= f ub -> lb <= g x <= ub) -> (forall x, f lb <= x -> x <= f ub -> (comp f g) x = id x) -> lb <= g x <= ub. Proof. intros f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g. assert (Temp := derive_pt_recip_interv_prelim1_0 f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g). split ; apply Rlt_le ; intuition. Qed. Lemma derive_pt_recip_interv : forall (f g:R->R) (lb ub x:R) (lb_lt_ub:lb < ub) (x_encad:f lb < x < f ub) (f_incr:forall x y : R, lb <= x -> x < y -> y <= ub -> f x < f y) (g_wf:forall x : R, f lb <= x -> x <= f ub -> lb <= g x <= ub) (Prf:forall a : R, lb <= a <= ub -> derivable_pt f a) (f_eq_g:forall x, f lb <= x -> x <= f ub -> (comp f g) x = id x) (Df_neq:derive_pt f (g x) (derivable_pt_recip_interv_prelim1 f g lb ub x lb_lt_ub x_encad f_eq_g g_wf f_incr Prf) <> 0), derive_pt g x (derivable_pt_recip_interv f g lb ub x lb_lt_ub x_encad f_eq_g g_wf f_incr Prf Df_neq) = 1 / (derive_pt f (g x) (Prf (g x) (derive_pt_recip_interv_prelim1_1 f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g))). Proof. intros. assert(g_incr := (derive_pt_recip_interv_prelim1_1 f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g)). apply derive_pt_recip_interv_prelim0 with (lb:=f lb) (ub:=f ub) ; [intuition |assumption | intuition |]. intro Hfalse ; apply Df_neq. rewrite pr_nu_var2_interv with (g:=f) (lb:=lb) (ub:=ub) (pr2:= (Prf (g x) (derive_pt_recip_interv_prelim1_1 f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g))) ; [intuition | intuition | | intuition]. exact (derive_pt_recip_interv_prelim1_0 f g lb ub x lb_lt_ub x_encad f_incr g_wf f_eq_g). Qed. (****************************************************) (** * Existence of the derivative of a function which is the limit of a sequence of functions *) (****************************************************) (* begin hide *) Lemma ub_lt_2_pos : forall x ub lb, lb < x -> x < ub -> 0 < (ub-lb)/2. Proof. intros x ub lb lb_lt_x x_lt_ub. assert (T : 0 < ub - lb). fourier. unfold Rdiv ; apply Rlt_mult_inv_pos ; intuition. Qed. Definition mkposreal_lb_ub (x lb ub:R) (lb_lt_x:lb Boule c2 r2 x -> {r3 : posreal | forall y, Boule x r3 y -> Boule c1 r1 y /\ Boule c2 r2 y}. intros c1 c2 [r1 r1p] [r2 r2p] x; unfold Boule; simpl; intros in1 in2. assert (Rmax (c1 - r1)(c2 - r2) < x). apply Rmax_lub_lt;[revert in1 | revert in2]; intros h; apply Rabs_def2 in h; destruct h; fourier. assert (x < Rmin (c1 + r1) (c2 + r2)). apply Rmin_glb_lt;[revert in1 | revert in2]; intros h; apply Rabs_def2 in h; destruct h; fourier. assert (t: 0 < Rmin (x - Rmax (c1 - r1) (c2 - r2)) (Rmin (c1 + r1) (c2 + r2) - x)). apply Rmin_glb_lt; fourier. exists (mkposreal _ t). apply Rabs_def2 in in1; destruct in1. apply Rabs_def2 in in2; destruct in2. assert (c1 - r1 <= Rmax (c1 - r1) (c2 - r2)) by apply Rmax_l. assert (c2 - r2 <= Rmax (c1 - r1) (c2 - r2)) by apply Rmax_r. assert (Rmin (c1 + r1) (c2 + r2) <= c1 + r1) by apply Rmin_l. assert (Rmin (c1 + r1) (c2 + r2) <= c2 + r2) by apply Rmin_r. assert (Rmin (x - Rmax (c1 - r1) (c2 - r2)) (Rmin (c1 + r1) (c2 + r2) - x) <= x - Rmax (c1 - r1) (c2 - r2)) by apply Rmin_l. assert (Rmin (x - Rmax (c1 - r1) (c2 - r2)) (Rmin (c1 + r1) (c2 + r2) - x) <= Rmin (c1 + r1) (c2 + r2) - x) by apply Rmin_r. simpl. intros y h; apply Rabs_def2 in h; destruct h;split; apply Rabs_def1; fourier. Qed. Lemma Boule_center : forall x r, Boule x r x. Proof. intros x [r rpos]; unfold Boule, Rminus; simpl; rewrite Rplus_opp_r. rewrite Rabs_pos_eq;[assumption | apply Rle_refl]. Qed. Lemma derivable_pt_lim_CVU : forall (fn fn':nat -> R -> R) (f g:R->R) (x:R) c r, Boule c r x -> (forall y n, Boule c r y -> derivable_pt_lim (fn n) y (fn' n y)) -> (forall y, Boule c r y -> Un_cv (fun n => fn n y) (f y)) -> (CVU fn' g c r) -> (forall y, Boule c r y -> continuity_pt g y) -> derivable_pt_lim f x (g x). Proof. intros fn fn' f g x c' r xinb Dfn_eq_fn' fn_CV_f fn'_CVU_g g_cont eps eps_pos. assert (eps_8_pos : 0 < eps / 8) by fourier. elim (g_cont x xinb _ eps_8_pos) ; clear g_cont ; intros delta1 (delta1_pos, g_cont). destruct (Ball_in_inter _ _ _ _ _ xinb (Boule_center x (mkposreal _ delta1_pos))) as [delta Pdelta]. exists delta; intros h hpos hinbdelta. assert (eps'_pos : 0 < (Rabs h) * eps / 4). unfold Rdiv ; rewrite Rmult_assoc ; apply Rmult_lt_0_compat. apply Rabs_pos_lt ; assumption. fourier. destruct (fn_CV_f x xinb ((Rabs h) * eps / 4) eps'_pos) as [N2 fnx_CV_fx]. assert (xhinbxdelta : Boule x delta (x + h)). clear -hinbdelta; apply Rabs_def2 in hinbdelta; unfold Boule; simpl. destruct hinbdelta; apply Rabs_def1; fourier. assert (t : Boule c' r (x + h)). apply Pdelta in xhinbxdelta; tauto. destruct (fn_CV_f (x+h) t ((Rabs h) * eps / 4) eps'_pos) as [N1 fnxh_CV_fxh]. clear fn_CV_f t. destruct (fn'_CVU_g (eps/8) eps_8_pos) as [N3 fn'c_CVU_gc]. pose (N := ((N1 + N2) + N3)%nat). assert (Main : Rabs ((f (x+h) - fn N (x+h)) - (f x - fn N x) + (fn N (x+h) - fn N x - h * (g x))) < (Rabs h)*eps). apply Rle_lt_trans with (Rabs (f (x + h) - fn N (x + h) - (f x - fn N x)) + Rabs ((fn N (x + h) - fn N x - h * g x))). solve[apply Rabs_triang]. apply Rle_lt_trans with (Rabs (f (x + h) - fn N (x + h)) + Rabs (- (f x - fn N x)) + Rabs (fn N (x + h) - fn N x - h * g x)). solve[apply Rplus_le_compat_r ; apply Rabs_triang]. rewrite Rabs_Ropp. case (Rlt_le_dec h 0) ; intro sgn_h. assert (pr1 : forall c : R, x + h < c < x -> derivable_pt (fn N) c). intros c c_encad ; unfold derivable_pt. exists (fn' N c) ; apply Dfn_eq_fn'. assert (t : Boule x delta c). apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta; destruct c_encad. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (pr2 : forall c : R, x + h < c < x -> derivable_pt id c). solve[intros; apply derivable_id]. assert (xh_x : x+h < x) by fourier. assert (pr3 : forall c : R, x + h <= c <= x -> continuity_pt (fn N) c). intros c c_encad ; apply derivable_continuous_pt. exists (fn' N c) ; apply Dfn_eq_fn' ; intuition. assert (t : Boule x delta c). apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (pr4 : forall c : R, x + h <= c <= x -> continuity_pt id c). solve[intros; apply derivable_continuous ; apply derivable_id]. destruct (MVT (fn N) id (x+h) x pr1 pr2 xh_x pr3 pr4) as [c [P Hc]]. assert (Hc' : h * derive_pt (fn N) c (pr1 c P) = (fn N (x+h) - fn N x)). apply Rmult_eq_reg_l with (-1). replace (-1 * (h * derive_pt (fn N) c (pr1 c P))) with (-h * derive_pt (fn N) c (pr1 c P)) by field. replace (-1 * (fn N (x + h) - fn N x)) with (- (fn N (x + h) - fn N x)) by field. replace (-h) with (id x - id (x + h)) by (unfold id; field). rewrite <- Rmult_1_r ; replace 1 with (derive_pt id c (pr2 c P)) by reg. replace (- (fn N (x + h) - fn N x)) with (fn N x - fn N (x + h)) by field. assumption. solve[apply Rlt_not_eq ; intuition]. rewrite <- Hc'; clear Hc Hc'. replace (derive_pt (fn N) c (pr1 c P)) with (fn' N c). replace (h * fn' N c - h * g x) with (h * (fn' N c - g x)) by field. rewrite Rabs_mult. apply Rlt_trans with (Rabs h * eps / 4 + Rabs (f x - fn N x) + Rabs h * Rabs (fn' N c - g x)). apply Rplus_lt_compat_r ; apply Rplus_lt_compat_r ; unfold R_dist in fnxh_CV_fxh ; rewrite Rabs_minus_sym ; apply fnxh_CV_fxh. unfold N; omega. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * Rabs (fn' N c - g x)). apply Rplus_lt_compat_r ; apply Rplus_lt_compat_l. unfold R_dist in fnx_CV_fx ; rewrite Rabs_minus_sym ; apply fnx_CV_fx. unfold N ; omega. replace (fn' N c - g x) with ((fn' N c - g c) + (g c - g x)) by field. apply Rle_lt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * Rabs (fn' N c - g c) + Rabs h * Rabs (g c - g x)). rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; apply Rplus_le_compat_l ; apply Rplus_le_compat_l ; rewrite <- Rmult_plus_distr_l ; apply Rmult_le_compat_l. solve[apply Rabs_pos]. solve[apply Rabs_triang]. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * (eps / 8) + Rabs h * Rabs (g c - g x)). apply Rplus_lt_compat_r; apply Rplus_lt_compat_l; apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. rewrite Rabs_minus_sym ; apply fn'c_CVU_gc. unfold N ; omega. assert (t : Boule x delta c). destruct P. apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * (eps / 8) + Rabs h * (eps / 8)). rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; apply Rplus_lt_compat_l ; apply Rplus_lt_compat_l ; rewrite <- Rmult_plus_distr_l ; rewrite <- Rmult_plus_distr_l ; apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. apply Rplus_lt_compat_l ; simpl in g_cont ; apply g_cont ; split ; [unfold D_x ; split |]. solve[unfold no_cond ; intuition]. apply Rgt_not_eq ; exact (proj2 P). apply Rlt_trans with (Rabs h). apply Rabs_def1. apply Rlt_trans with 0. destruct P; fourier. apply Rabs_pos_lt ; assumption. rewrite <- Rabs_Ropp, Rabs_pos_eq, Ropp_involutive;[ | fourier]. destruct P; fourier. clear -Pdelta xhinbxdelta. apply Pdelta in xhinbxdelta; destruct xhinbxdelta as [_ P']. apply Rabs_def2 in P'; simpl in P'; destruct P'; apply Rabs_def1; fourier. rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite <- Rmult_plus_distr_l. replace (Rabs h * eps / 4 + (Rabs h * eps / 4 + Rabs h * (eps / 8 + eps / 8))) with (Rabs h * (eps / 4 + eps / 4 + eps / 8 + eps / 8)) by field. apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. fourier. assert (H := pr1 c P) ; elim H ; clear H ; intros l Hl. assert (Temp : l = fn' N c). assert (bc'rc : Boule c' r c). assert (t : Boule x delta c). clear - xhinbxdelta P. destruct P; apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (Hl' := Dfn_eq_fn' c N bc'rc). unfold derivable_pt_abs in Hl; clear -Hl Hl'. apply uniqueness_limite with (f:= fn N) (x:=c) ; assumption. rewrite <- Temp. assert (Hl' : derivable_pt (fn N) c). exists l ; apply Hl. rewrite pr_nu_var with (g:= fn N) (pr2:=Hl'). elim Hl' ; clear Hl' ; intros l' Hl'. assert (Main : l = l'). apply uniqueness_limite with (f:= fn N) (x:=c) ; assumption. rewrite Main ; reflexivity. reflexivity. assert (h_pos : h > 0). case sgn_h ; intro Hyp. assumption. apply False_ind ; apply hpos ; symmetry ; assumption. clear sgn_h. assert (pr1 : forall c : R, x < c < x + h -> derivable_pt (fn N) c). intros c c_encad ; unfold derivable_pt. exists (fn' N c) ; apply Dfn_eq_fn'. assert (t : Boule x delta c). apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta; destruct c_encad. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (pr2 : forall c : R, x < c < x + h -> derivable_pt id c). solve[intros; apply derivable_id]. assert (xh_x : x < x + h) by fourier. assert (pr3 : forall c : R, x <= c <= x + h -> continuity_pt (fn N) c). intros c c_encad ; apply derivable_continuous_pt. exists (fn' N c) ; apply Dfn_eq_fn' ; intuition. assert (t : Boule x delta c). apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (pr4 : forall c : R, x <= c <= x + h -> continuity_pt id c). solve[intros; apply derivable_continuous ; apply derivable_id]. destruct (MVT (fn N) id x (x+h) pr1 pr2 xh_x pr3 pr4) as [c [P Hc]]. assert (Hc' : h * derive_pt (fn N) c (pr1 c P) = fn N (x+h) - fn N x). pattern h at 1; replace h with (id (x + h) - id x) by (unfold id; field). rewrite <- Rmult_1_r ; replace 1 with (derive_pt id c (pr2 c P)) by reg. assumption. rewrite <- Hc'; clear Hc Hc'. replace (derive_pt (fn N) c (pr1 c P)) with (fn' N c). replace (h * fn' N c - h * g x) with (h * (fn' N c - g x)) by field. rewrite Rabs_mult. apply Rlt_trans with (Rabs h * eps / 4 + Rabs (f x - fn N x) + Rabs h * Rabs (fn' N c - g x)). apply Rplus_lt_compat_r ; apply Rplus_lt_compat_r ; unfold R_dist in fnxh_CV_fxh ; rewrite Rabs_minus_sym ; apply fnxh_CV_fxh. unfold N; omega. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * Rabs (fn' N c - g x)). apply Rplus_lt_compat_r ; apply Rplus_lt_compat_l. unfold R_dist in fnx_CV_fx ; rewrite Rabs_minus_sym ; apply fnx_CV_fx. unfold N ; omega. replace (fn' N c - g x) with ((fn' N c - g c) + (g c - g x)) by field. apply Rle_lt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * Rabs (fn' N c - g c) + Rabs h * Rabs (g c - g x)). rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; apply Rplus_le_compat_l ; apply Rplus_le_compat_l ; rewrite <- Rmult_plus_distr_l ; apply Rmult_le_compat_l. solve[apply Rabs_pos]. solve[apply Rabs_triang]. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * (eps / 8) + Rabs h * Rabs (g c - g x)). apply Rplus_lt_compat_r; apply Rplus_lt_compat_l; apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. rewrite Rabs_minus_sym ; apply fn'c_CVU_gc. unfold N ; omega. assert (t : Boule x delta c). destruct P. apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def2 in xinb; apply Rabs_def1; fourier. apply Pdelta in t; tauto. apply Rlt_trans with (Rabs h * eps / 4 + Rabs h * eps / 4 + Rabs h * (eps / 8) + Rabs h * (eps / 8)). rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite Rplus_assoc ; apply Rplus_lt_compat_l ; apply Rplus_lt_compat_l ; rewrite <- Rmult_plus_distr_l ; rewrite <- Rmult_plus_distr_l ; apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. apply Rplus_lt_compat_l ; simpl in g_cont ; apply g_cont ; split ; [unfold D_x ; split |]. solve[unfold no_cond ; intuition]. apply Rlt_not_eq ; exact (proj1 P). apply Rlt_trans with (Rabs h). apply Rabs_def1. destruct P; rewrite Rabs_pos_eq;fourier. apply Rle_lt_trans with 0. assert (t := Rabs_pos h); clear -t; fourier. clear -P; destruct P; fourier. clear -Pdelta xhinbxdelta. apply Pdelta in xhinbxdelta; destruct xhinbxdelta as [_ P']. apply Rabs_def2 in P'; simpl in P'; destruct P'; apply Rabs_def1; fourier. rewrite Rplus_assoc ; rewrite Rplus_assoc ; rewrite <- Rmult_plus_distr_l. replace (Rabs h * eps / 4 + (Rabs h * eps / 4 + Rabs h * (eps / 8 + eps / 8))) with (Rabs h * (eps / 4 + eps / 4 + eps / 8 + eps / 8)) by field. apply Rmult_lt_compat_l. apply Rabs_pos_lt ; assumption. fourier. assert (H := pr1 c P) ; elim H ; clear H ; intros l Hl. assert (Temp : l = fn' N c). assert (bc'rc : Boule c' r c). assert (t : Boule x delta c). clear - xhinbxdelta P. destruct P; apply Rabs_def2 in xhinbxdelta; destruct xhinbxdelta. apply Rabs_def1; fourier. apply Pdelta in t; tauto. assert (Hl' := Dfn_eq_fn' c N bc'rc). unfold derivable_pt_abs in Hl; clear -Hl Hl'. apply uniqueness_limite with (f:= fn N) (x:=c) ; assumption. rewrite <- Temp. assert (Hl' : derivable_pt (fn N) c). exists l ; apply Hl. rewrite pr_nu_var with (g:= fn N) (pr2:=Hl'). elim Hl' ; clear Hl' ; intros l' Hl'. assert (Main : l = l'). apply uniqueness_limite with (f:= fn N) (x:=c) ; assumption. rewrite Main ; reflexivity. reflexivity. replace ((f (x + h) - f x) / h - g x) with ((/h) * ((f (x + h) - f x) - h * g x)). rewrite Rabs_mult ; rewrite Rabs_Rinv. replace eps with (/ Rabs h * (Rabs h * eps)). apply Rmult_lt_compat_l. apply Rinv_0_lt_compat ; apply Rabs_pos_lt ; assumption. replace (f (x + h) - f x - h * g x) with (f (x + h) - fn N (x + h) - (f x - fn N x) + (fn N (x + h) - fn N x - h * g x)) by field. assumption. field ; apply Rgt_not_eq ; apply Rabs_pos_lt ; assumption. assumption. field. assumption. Qed.coq-8.4pl2/theories/Reals/Rcomplete.v0000640000175000001440000001511312010532755016641 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R, Cauchy_crit Un -> { l:R | Un_cv Un l } . Proof. intros. set (Vn := sequence_minorant Un (cauchy_min Un H)). set (Wn := sequence_majorant Un (cauchy_maj Un H)). assert (H0 := maj_cv Un H). fold Wn in H0. assert (H1 := min_cv Un H). fold Vn in H1. elim H0; intros. elim H1; intros. cut (x = x0). intros. exists x. rewrite <- H2 in p0. unfold Un_cv. intros. unfold Un_cv in p; unfold Un_cv in p0. cut (0 < eps / 3). intro. elim (p (eps / 3) H4); intros. elim (p0 (eps / 3) H4); intros. exists (max x1 x2). intros. unfold R_dist. apply Rle_lt_trans with (Rabs (Un n - Vn n) + Rabs (Vn n - x)). replace (Un n - x) with (Un n - Vn n + (Vn n - x)); [ apply Rabs_triang | ring ]. apply Rle_lt_trans with (Rabs (Wn n - Vn n) + Rabs (Vn n - x)). do 2 rewrite <- (Rplus_comm (Rabs (Vn n - x))). apply Rplus_le_compat_l. repeat rewrite Rabs_right. unfold Rminus; do 2 rewrite <- (Rplus_comm (- Vn n)); apply Rplus_le_compat_l. assert (H8 := Vn_Un_Wn_order Un (cauchy_maj Un H) (cauchy_min Un H)). fold Vn Wn in H8. elim (H8 n); intros. assumption. apply Rle_ge. unfold Rminus; apply Rplus_le_reg_l with (Vn n). rewrite Rplus_0_r. replace (Vn n + (Wn n + - Vn n)) with (Wn n); [ idtac | ring ]. assert (H8 := Vn_Un_Wn_order Un (cauchy_maj Un H) (cauchy_min Un H)). fold Vn Wn in H8. elim (H8 n); intros. apply Rle_trans with (Un n); assumption. apply Rle_ge. unfold Rminus; apply Rplus_le_reg_l with (Vn n). rewrite Rplus_0_r. replace (Vn n + (Un n + - Vn n)) with (Un n); [ idtac | ring ]. assert (H8 := Vn_Un_Wn_order Un (cauchy_maj Un H) (cauchy_min Un H)). fold Vn Wn in H8. elim (H8 n); intros. assumption. apply Rle_lt_trans with (Rabs (Wn n - x) + Rabs (x - Vn n) + Rabs (Vn n - x)). do 2 rewrite <- (Rplus_comm (Rabs (Vn n - x))). apply Rplus_le_compat_l. replace (Wn n - Vn n) with (Wn n - x + (x - Vn n)); [ apply Rabs_triang | ring ]. apply Rlt_le_trans with (eps / 3 + eps / 3 + eps / 3). repeat apply Rplus_lt_compat. unfold R_dist in H5. apply H5. unfold ge; apply le_trans with (max x1 x2). apply le_max_l. assumption. rewrite <- Rabs_Ropp. replace (- (x - Vn n)) with (Vn n - x); [ idtac | ring ]. unfold R_dist in H6. apply H6. unfold ge; apply le_trans with (max x1 x2). apply le_max_r. assumption. unfold R_dist in H6. apply H6. unfold ge; apply le_trans with (max x1 x2). apply le_max_r. assumption. right. pattern eps at 4; replace eps with (3 * (eps / 3)). ring. unfold Rdiv; rewrite <- Rmult_assoc; apply Rinv_r_simpl_m; discrR. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. apply cond_eq. intros. cut (0 < eps / 5). intro. unfold Un_cv in p; unfold Un_cv in p0. unfold R_dist in p; unfold R_dist in p0. elim (p (eps / 5) H3); intros N1 H4. elim (p0 (eps / 5) H3); intros N2 H5. unfold Cauchy_crit in H. unfold R_dist in H. elim (H (eps / 5) H3); intros N3 H6. set (N := max (max N1 N2) N3). apply Rle_lt_trans with (Rabs (x - Wn N) + Rabs (Wn N - x0)). replace (x - x0) with (x - Wn N + (Wn N - x0)); [ apply Rabs_triang | ring ]. apply Rle_lt_trans with (Rabs (x - Wn N) + Rabs (Wn N - Vn N) + Rabs (Vn N - x0)). rewrite Rplus_assoc. apply Rplus_le_compat_l. replace (Wn N - x0) with (Wn N - Vn N + (Vn N - x0)); [ apply Rabs_triang | ring ]. replace eps with (eps / 5 + 3 * (eps / 5) + eps / 5). repeat apply Rplus_lt_compat. rewrite <- Rabs_Ropp. replace (- (x - Wn N)) with (Wn N - x); [ apply H4 | ring ]. unfold ge, N. apply le_trans with (max N1 N2); apply le_max_l. unfold Wn, Vn. unfold sequence_majorant, sequence_minorant. assert (H7 := approx_maj (fun k:nat => Un (N + k)%nat) (maj_ss Un N (cauchy_maj Un H))). assert (H8 := approx_min (fun k:nat => Un (N + k)%nat) (min_ss Un N (cauchy_min Un H))). cut (Wn N = majorant (fun k:nat => Un (N + k)%nat) (maj_ss Un N (cauchy_maj Un H))). cut (Vn N = minorant (fun k:nat => Un (N + k)%nat) (min_ss Un N (cauchy_min Un H))). intros. rewrite <- H9; rewrite <- H10. rewrite <- H9 in H8. rewrite <- H10 in H7. elim (H7 (eps / 5) H3); intros k2 H11. elim (H8 (eps / 5) H3); intros k1 H12. apply Rle_lt_trans with (Rabs (Wn N - Un (N + k2)%nat) + Rabs (Un (N + k2)%nat - Vn N)). replace (Wn N - Vn N) with (Wn N - Un (N + k2)%nat + (Un (N + k2)%nat - Vn N)); [ apply Rabs_triang | ring ]. apply Rle_lt_trans with (Rabs (Wn N - Un (N + k2)%nat) + Rabs (Un (N + k2)%nat - Un (N + k1)%nat) + Rabs (Un (N + k1)%nat - Vn N)). rewrite Rplus_assoc. apply Rplus_le_compat_l. replace (Un (N + k2)%nat - Vn N) with (Un (N + k2)%nat - Un (N + k1)%nat + (Un (N + k1)%nat - Vn N)); [ apply Rabs_triang | ring ]. replace (3 * (eps / 5)) with (eps / 5 + eps / 5 + eps / 5); [ repeat apply Rplus_lt_compat | ring ]. assumption. apply H6. unfold ge. apply le_trans with N. unfold N; apply le_max_r. apply le_plus_l. unfold ge. apply le_trans with N. unfold N; apply le_max_r. apply le_plus_l. rewrite <- Rabs_Ropp. replace (- (Un (N + k1)%nat - Vn N)) with (Vn N - Un (N + k1)%nat); [ assumption | ring ]. reflexivity. reflexivity. apply H5. unfold ge; apply le_trans with (max N1 N2). apply le_max_r. unfold N; apply le_max_l. pattern eps at 4; replace eps with (5 * (eps / 5)). ring. unfold Rdiv; rewrite <- Rmult_assoc; apply Rinv_r_simpl_m. discrR. unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat. prove_sup0; try apply lt_O_Sn. Qed. coq-8.4pl2/theories/Reals/Rderiv.v0000640000175000001440000005224612010532755016152 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop) (y x:R) : Prop := D x /\ y <> x. (*********) Definition continue_in (f:R -> R) (D:R -> Prop) (x0:R) : Prop := limit1_in f (D_x D x0) (f x0) x0. (*********) Definition D_in (f d:R -> R) (D:R -> Prop) (x0:R) : Prop := limit1_in (fun x:R => (f x - f x0) / (x - x0)) (D_x D x0) (d x0) x0. (*********) Lemma cont_deriv : forall (f d:R -> R) (D:R -> Prop) (x0:R), D_in f d D x0 -> continue_in f D x0. Proof. unfold continue_in; unfold D_in; unfold limit1_in; unfold limit_in; unfold Rdiv; simpl; intros; elim (H eps H0); clear H; intros; elim H; clear H; intros; elim (Req_dec (d x0) 0); intro. split with (Rmin 1 x); split. elim (Rmin_Rgt 1 x 0); intros a b; apply (b (conj Rlt_0_1 H)). intros; elim H3; clear H3; intros; generalize (let (H1, H2) := Rmin_Rgt 1 x (R_dist x1 x0) in H1); unfold Rgt; intro; elim (H5 H4); clear H5; intros; generalize (H1 x1 (conj H3 H6)); clear H1; intro; unfold D_x in H3; elim H3; intros. rewrite H2 in H1; unfold R_dist; unfold R_dist in H1; cut (Rabs (f x1 - f x0) < eps * Rabs (x1 - x0)). intro; unfold R_dist in H5; generalize (Rmult_lt_compat_l eps (Rabs (x1 - x0)) 1 H0 H5); rewrite Rmult_1_r; intro; apply Rlt_trans with (r2 := eps * Rabs (x1 - x0)); assumption. rewrite (Rminus_0_r ((f x1 - f x0) * / (x1 - x0))) in H1; rewrite Rabs_mult in H1; cut (x1 - x0 <> 0). intro; rewrite (Rabs_Rinv (x1 - x0) H9) in H1; generalize (Rmult_lt_compat_l (Rabs (x1 - x0)) (Rabs (f x1 - f x0) * / Rabs (x1 - x0)) eps (Rabs_pos_lt (x1 - x0) H9) H1); intro; rewrite Rmult_comm in H10; rewrite Rmult_assoc in H10; rewrite Rinv_l in H10. rewrite Rmult_1_r in H10; rewrite Rmult_comm; assumption. apply Rabs_no_R0; auto. apply Rminus_eq_contra; auto. (**) split with (Rmin (Rmin (/ 2) x) (eps * / Rabs (2 * d x0))); split. cut (Rmin (/ 2) x > 0). cut (eps * / Rabs (2 * d x0) > 0). intros; elim (Rmin_Rgt (Rmin (/ 2) x) (eps * / Rabs (2 * d x0)) 0); intros a b; apply (b (conj H4 H3)). apply Rmult_gt_0_compat; auto. unfold Rgt; apply Rinv_0_lt_compat; apply Rabs_pos_lt; apply Rmult_integral_contrapositive; split. discrR. assumption. elim (Rmin_Rgt (/ 2) x 0); intros a b; cut (0 < 2). intro; generalize (Rinv_0_lt_compat 2 H3); intro; fold (/ 2 > 0) in H4; apply (b (conj H4 H)). fourier. intros; elim H3; clear H3; intros; generalize (let (H1, H2) := Rmin_Rgt (Rmin (/ 2) x) (eps * / Rabs (2 * d x0)) (R_dist x1 x0) in H1); unfold Rgt; intro; elim (H5 H4); clear H5; intros; generalize (let (H1, H2) := Rmin_Rgt (/ 2) x (R_dist x1 x0) in H1); unfold Rgt; intro; elim (H7 H5); clear H7; intros; clear H4 H5; generalize (H1 x1 (conj H3 H8)); clear H1; intro; unfold D_x in H3; elim H3; intros; generalize (not_eq_sym H5); clear H5; intro H5; generalize (Rminus_eq_contra x1 x0 H5); intro; generalize H1; pattern (d x0) at 1; rewrite <- (let (H1, H2) := Rmult_ne (d x0) in H2); rewrite <- (Rinv_l (x1 - x0) H9); unfold R_dist; unfold Rminus at 1; rewrite (Rmult_comm (f x1 - f x0) (/ (x1 - x0))); rewrite (Rmult_comm (/ (x1 - x0) * (x1 - x0)) (d x0)); rewrite <- (Ropp_mult_distr_l_reverse (d x0) (/ (x1 - x0) * (x1 - x0))); rewrite (Rmult_comm (- d x0) (/ (x1 - x0) * (x1 - x0))); rewrite (Rmult_assoc (/ (x1 - x0)) (x1 - x0) (- d x0)); rewrite <- (Rmult_plus_distr_l (/ (x1 - x0)) (f x1 - f x0) ((x1 - x0) * - d x0)) ; rewrite (Rabs_mult (/ (x1 - x0)) (f x1 - f x0 + (x1 - x0) * - d x0)); clear H1; intro; generalize (Rmult_lt_compat_l (Rabs (x1 - x0)) (Rabs (/ (x1 - x0)) * Rabs (f x1 - f x0 + (x1 - x0) * - d x0)) eps (Rabs_pos_lt (x1 - x0) H9) H1); rewrite <- (Rmult_assoc (Rabs (x1 - x0)) (Rabs (/ (x1 - x0))) (Rabs (f x1 - f x0 + (x1 - x0) * - d x0))); rewrite (Rabs_Rinv (x1 - x0) H9); rewrite (Rinv_r (Rabs (x1 - x0)) (Rabs_no_R0 (x1 - x0) H9)); rewrite (let (H1, H2) := Rmult_ne (Rabs (f x1 - f x0 + (x1 - x0) * - d x0)) in H2) ; generalize (Rabs_triang_inv (f x1 - f x0) ((x1 - x0) * d x0)); intro; rewrite (Rmult_comm (x1 - x0) (- d x0)); rewrite (Ropp_mult_distr_l_reverse (d x0) (x1 - x0)); fold (f x1 - f x0 - d x0 * (x1 - x0)); rewrite (Rmult_comm (x1 - x0) (d x0)) in H10; clear H1; intro; generalize (Rle_lt_trans (Rabs (f x1 - f x0) - Rabs (d x0 * (x1 - x0))) (Rabs (f x1 - f x0 - d x0 * (x1 - x0))) (Rabs (x1 - x0) * eps) H10 H1); clear H1; intro; generalize (Rplus_lt_compat_l (Rabs (d x0 * (x1 - x0))) (Rabs (f x1 - f x0) - Rabs (d x0 * (x1 - x0))) ( Rabs (x1 - x0) * eps) H1); unfold Rminus at 2; rewrite (Rplus_comm (Rabs (f x1 - f x0)) (- Rabs (d x0 * (x1 - x0)))); rewrite <- (Rplus_assoc (Rabs (d x0 * (x1 - x0))) (- Rabs (d x0 * (x1 - x0))) (Rabs (f x1 - f x0))); rewrite (Rplus_opp_r (Rabs (d x0 * (x1 - x0)))); rewrite (let (H1, H2) := Rplus_ne (Rabs (f x1 - f x0)) in H2); clear H1; intro; cut (Rabs (d x0 * (x1 - x0)) + Rabs (x1 - x0) * eps < eps). intro; apply (Rlt_trans (Rabs (f x1 - f x0)) (Rabs (d x0 * (x1 - x0)) + Rabs (x1 - x0) * eps) eps H1 H11). clear H1 H5 H3 H10; generalize (Rabs_pos_lt (d x0) H2); intro; unfold Rgt in H0; generalize (Rmult_lt_compat_l eps (R_dist x1 x0) (/ 2) H0 H7); clear H7; intro; generalize (Rmult_lt_compat_l (Rabs (d x0)) (R_dist x1 x0) ( eps * / Rabs (2 * d x0)) H1 H6); clear H6; intro; rewrite (Rmult_comm eps (R_dist x1 x0)) in H3; unfold R_dist in H3, H5; rewrite <- (Rabs_mult (d x0) (x1 - x0)) in H5; rewrite (Rabs_mult 2 (d x0)) in H5; cut (Rabs 2 <> 0). intro; fold (Rabs (d x0) > 0) in H1; rewrite (Rinv_mult_distr (Rabs 2) (Rabs (d x0)) H6 (Rlt_dichotomy_converse (Rabs (d x0)) 0 (or_intror (Rabs (d x0) < 0) H1))) in H5; rewrite (Rmult_comm (Rabs (d x0)) (eps * (/ Rabs 2 * / Rabs (d x0)))) in H5; rewrite <- (Rmult_assoc eps (/ Rabs 2) (/ Rabs (d x0))) in H5; rewrite (Rmult_assoc (eps * / Rabs 2) (/ Rabs (d x0)) (Rabs (d x0))) in H5; rewrite (Rinv_l (Rabs (d x0)) (Rlt_dichotomy_converse (Rabs (d x0)) 0 (or_intror (Rabs (d x0) < 0) H1))) in H5; rewrite (let (H1, H2) := Rmult_ne (eps * / Rabs 2) in H1) in H5; cut (Rabs 2 = 2). intro; rewrite H7 in H5; generalize (Rplus_lt_compat (Rabs (d x0 * (x1 - x0))) (eps * / 2) (Rabs (x1 - x0) * eps) (eps * / 2) H5 H3); intro; rewrite eps2 in H10; assumption. unfold Rabs; case (Rcase_abs 2); auto. intro; cut (0 < 2). intro ; elim (Rlt_asym 0 2 H7 r). fourier. apply Rabs_no_R0. discrR. Qed. (*********) Lemma Dconst : forall (D:R -> Prop) (y x0:R), D_in (fun x:R => y) (fun x:R => 0) D x0. Proof. unfold D_in; intros; unfold limit1_in; unfold limit_in; unfold Rdiv; intros; simpl; split with eps; split; auto. intros; rewrite (Rminus_diag_eq y y (eq_refl y)); rewrite Rmult_0_l; unfold R_dist; rewrite (Rminus_diag_eq 0 0 (eq_refl 0)); unfold Rabs; case (Rcase_abs 0); intro. absurd (0 < 0); auto. red; intro; apply (Rlt_irrefl 0 H1). unfold Rgt in H0; assumption. Qed. (*********) Lemma Dx : forall (D:R -> Prop) (x0:R), D_in (fun x:R => x) (fun x:R => 1) D x0. Proof. unfold D_in; unfold Rdiv; intros; unfold limit1_in; unfold limit_in; intros; simpl; split with eps; split; auto. intros; elim H0; clear H0; intros; unfold D_x in H0; elim H0; intros; rewrite (Rinv_r (x - x0) (Rminus_eq_contra x x0 (not_eq_sym H3))); unfold R_dist; rewrite (Rminus_diag_eq 1 1 (eq_refl 1)); unfold Rabs; case (Rcase_abs 0); intro. absurd (0 < 0); auto. red; intro; apply (Rlt_irrefl 0 r). unfold Rgt in H; assumption. Qed. (*********) Lemma Dadd : forall (D:R -> Prop) (df dg f g:R -> R) (x0:R), D_in f df D x0 -> D_in g dg D x0 -> D_in (fun x:R => f x + g x) (fun x:R => df x + dg x) D x0. Proof. unfold D_in; intros; generalize (limit_plus (fun x:R => (f x - f x0) * / (x - x0)) (fun x:R => (g x - g x0) * / (x - x0)) (D_x D x0) ( df x0) (dg x0) x0 H H0); clear H H0; unfold limit1_in; unfold limit_in; simpl; intros; elim (H eps H0); clear H; intros; elim H; clear H; intros; split with x; split; auto; intros; generalize (H1 x1 H2); clear H1; intro; rewrite (Rmult_comm (f x1 - f x0) (/ (x1 - x0))) in H1; rewrite (Rmult_comm (g x1 - g x0) (/ (x1 - x0))) in H1; rewrite <- (Rmult_plus_distr_l (/ (x1 - x0)) (f x1 - f x0) (g x1 - g x0)) in H1; rewrite (Rmult_comm (/ (x1 - x0)) (f x1 - f x0 + (g x1 - g x0))) in H1; cut (f x1 - f x0 + (g x1 - g x0) = f x1 + g x1 - (f x0 + g x0)). intro; rewrite H3 in H1; assumption. ring. Qed. (*********) Lemma Dmult : forall (D:R -> Prop) (df dg f g:R -> R) (x0:R), D_in f df D x0 -> D_in g dg D x0 -> D_in (fun x:R => f x * g x) (fun x:R => df x * g x + f x * dg x) D x0. Proof. intros; unfold D_in; generalize H H0; intros; unfold D_in in H, H0; generalize (cont_deriv f df D x0 H1); unfold continue_in; intro; generalize (limit_mul (fun x:R => (g x - g x0) * / (x - x0)) ( fun x:R => f x) (D_x D x0) (dg x0) (f x0) x0 H0 H3); intro; cut (limit1_in (fun x:R => g x0) (D_x D x0) (g x0) x0). intro; generalize (limit_mul (fun x:R => (f x - f x0) * / (x - x0)) ( fun _:R => g x0) (D_x D x0) (df x0) (g x0) x0 H H5); clear H H0 H1 H2 H3 H5; intro; generalize (limit_plus (fun x:R => (f x - f x0) * / (x - x0) * g x0) (fun x:R => (g x - g x0) * / (x - x0) * f x) ( D_x D x0) (df x0 * g x0) (dg x0 * f x0) x0 H H4); clear H4 H; intro; unfold limit1_in in H; unfold limit_in in H; simpl in H; unfold limit1_in; unfold limit_in; simpl; intros; elim (H eps H0); clear H; intros; elim H; clear H; intros; split with x; split; auto; intros; generalize (H1 x1 H2); clear H1; intro; rewrite (Rmult_comm (f x1 - f x0) (/ (x1 - x0))) in H1; rewrite (Rmult_comm (g x1 - g x0) (/ (x1 - x0))) in H1; rewrite (Rmult_assoc (/ (x1 - x0)) (f x1 - f x0) (g x0)) in H1; rewrite (Rmult_assoc (/ (x1 - x0)) (g x1 - g x0) (f x1)) in H1; rewrite <- (Rmult_plus_distr_l (/ (x1 - x0)) ((f x1 - f x0) * g x0) ((g x1 - g x0) * f x1)) in H1; rewrite (Rmult_comm (/ (x1 - x0)) ((f x1 - f x0) * g x0 + (g x1 - g x0) * f x1)) in H1; rewrite (Rmult_comm (dg x0) (f x0)) in H1; cut ((f x1 - f x0) * g x0 + (g x1 - g x0) * f x1 = f x1 * g x1 - f x0 * g x0). intro; rewrite H3 in H1; assumption. ring. unfold limit1_in; unfold limit_in; simpl; intros; split with eps; split; auto; intros; elim (R_dist_refl (g x0) (g x0)); intros a b; rewrite (b (eq_refl (g x0))); unfold Rgt in H; assumption. Qed. (*********) Lemma Dmult_const : forall (D:R -> Prop) (f df:R -> R) (x0 a:R), D_in f df D x0 -> D_in (fun x:R => a * f x) (fun x:R => a * df x) D x0. Proof. intros; generalize (Dmult D (fun _:R => 0) df (fun _:R => a) f x0 (Dconst D a x0) H); unfold D_in; intros; rewrite (Rmult_0_l (f x0)) in H0; rewrite (let (H1, H2) := Rplus_ne (a * df x0) in H2) in H0; assumption. Qed. (*********) Lemma Dopp : forall (D:R -> Prop) (f df:R -> R) (x0:R), D_in f df D x0 -> D_in (fun x:R => - f x) (fun x:R => - df x) D x0. Proof. intros; generalize (Dmult_const D f df x0 (-1) H); unfold D_in; unfold limit1_in; unfold limit_in; intros; generalize (H0 eps H1); clear H0; intro; elim H0; clear H0; intros; elim H0; clear H0; simpl; intros; split with x; split; auto. intros; generalize (H2 x1 H3); clear H2; intro; rewrite Ropp_mult_distr_l_reverse in H2; rewrite Ropp_mult_distr_l_reverse in H2; rewrite Ropp_mult_distr_l_reverse in H2; rewrite (let (H1, H2) := Rmult_ne (f x1) in H2) in H2; rewrite (let (H1, H2) := Rmult_ne (f x0) in H2) in H2; rewrite (let (H1, H2) := Rmult_ne (df x0) in H2) in H2; assumption. Qed. (*********) Lemma Dminus : forall (D:R -> Prop) (df dg f g:R -> R) (x0:R), D_in f df D x0 -> D_in g dg D x0 -> D_in (fun x:R => f x - g x) (fun x:R => df x - dg x) D x0. Proof. unfold Rminus; intros; generalize (Dopp D g dg x0 H0); intro; apply (Dadd D df (fun x:R => - dg x) f (fun x:R => - g x) x0); assumption. Qed. (*********) Lemma Dx_pow_n : forall (n:nat) (D:R -> Prop) (x0:R), D_in (fun x:R => x ^ n) (fun x:R => INR n * x ^ (n - 1)) D x0. Proof. simple induction n; intros. simpl; rewrite Rmult_0_l; apply Dconst. intros; cut (n0 = (S n0 - 1)%nat); [ intro a; rewrite <- a; clear a | simpl; apply minus_n_O ]. generalize (Dmult D (fun _:R => 1) (fun x:R => INR n0 * x ^ (n0 - 1)) ( fun x:R => x) (fun x:R => x ^ n0) x0 (Dx D x0) ( H D x0)); unfold D_in; unfold limit1_in; unfold limit_in; simpl; intros; elim (H0 eps H1); clear H0; intros; elim H0; clear H0; intros; split with x; split; auto. intros; generalize (H2 x1 H3); clear H2 H3; intro; rewrite (let (H1, H2) := Rmult_ne (x0 ^ n0) in H2) in H2; rewrite (tech_pow_Rmult x1 n0) in H2; rewrite (tech_pow_Rmult x0 n0) in H2; rewrite (Rmult_comm (INR n0) (x0 ^ (n0 - 1))) in H2; rewrite <- (Rmult_assoc x0 (x0 ^ (n0 - 1)) (INR n0)) in H2; rewrite (tech_pow_Rmult x0 (n0 - 1)) in H2; elim (Peano_dec.eq_nat_dec n0 0) ; intros cond. rewrite cond in H2; rewrite cond; simpl in H2; simpl; cut (1 + x0 * 1 * 0 = 1 * 1); [ intro A; rewrite A in H2; assumption | ring ]. cut (n0 <> 0%nat -> S (n0 - 1) = n0); [ intro | omega ]; rewrite (H3 cond) in H2; rewrite (Rmult_comm (x0 ^ n0) (INR n0)) in H2; rewrite (tech_pow_Rplus x0 n0 n0) in H2; assumption. Qed. (*********) Lemma Dcomp : forall (Df Dg:R -> Prop) (df dg f g:R -> R) (x0:R), D_in f df Df x0 -> D_in g dg Dg (f x0) -> D_in (fun x:R => g (f x)) (fun x:R => df x * dg (f x)) (Dgf Df Dg f) x0. Proof. intros Df Dg df dg f g x0 H H0; generalize H H0; unfold D_in; unfold Rdiv; intros; generalize (limit_comp f (fun x:R => (g x - g (f x0)) * / (x - f x0)) ( D_x Df x0) (D_x Dg (f x0)) (f x0) (dg (f x0)) x0); intro; generalize (cont_deriv f df Df x0 H); intro; unfold continue_in in H4; generalize (H3 H4 H2); clear H3; intro; generalize (limit_mul (fun x:R => (g (f x) - g (f x0)) * / (f x - f x0)) (fun x:R => (f x - f x0) * / (x - x0)) (Dgf (D_x Df x0) (D_x Dg (f x0)) f) (dg (f x0)) ( df x0) x0 H3); intro; cut (limit1_in (fun x:R => (f x - f x0) * / (x - x0)) (Dgf (D_x Df x0) (D_x Dg (f x0)) f) (df x0) x0). intro; generalize (H5 H6); clear H5; intro; generalize (limit_mul (fun x:R => (f x - f x0) * / (x - x0)) ( fun x:R => dg (f x0)) (D_x Df x0) (df x0) (dg (f x0)) x0 H1 (limit_free (fun x:R => dg (f x0)) (D_x Df x0) x0 x0)); intro; unfold limit1_in; unfold limit_in; simpl; unfold limit1_in in H5, H7; unfold limit_in in H5, H7; simpl in H5, H7; intros; elim (H5 eps H8); elim (H7 eps H8); clear H5 H7; intros; elim H5; elim H7; clear H5 H7; intros; split with (Rmin x x1); split. elim (Rmin_Rgt x x1 0); intros a b; apply (b (conj H9 H5)); clear a b. intros; elim H11; clear H11; intros; elim (Rmin_Rgt x x1 (R_dist x2 x0)); intros a b; clear b; unfold Rgt in a; elim (a H12); clear H5 a; intros; unfold D_x, Dgf in H11, H7, H10; clear H12; elim (Req_dec (f x2) (f x0)); intro. elim H11; clear H11; intros; elim H11; clear H11; intros; generalize (H10 x2 (conj (conj H11 H14) H5)); intro; rewrite (Rminus_diag_eq (f x2) (f x0) H12) in H16; rewrite (Rmult_0_l (/ (x2 - x0))) in H16; rewrite (Rmult_0_l (dg (f x0))) in H16; rewrite H12; rewrite (Rminus_diag_eq (g (f x0)) (g (f x0)) (eq_refl (g (f x0)))); rewrite (Rmult_0_l (/ (x2 - x0))); assumption. clear H10 H5; elim H11; clear H11; intros; elim H5; clear H5; intros; cut (((Df x2 /\ x0 <> x2) /\ Dg (f x2) /\ f x0 <> f x2) /\ R_dist x2 x0 < x1); auto; intro; generalize (H7 x2 H14); intro; generalize (Rminus_eq_contra (f x2) (f x0) H12); intro; rewrite (Rmult_assoc (g (f x2) - g (f x0)) (/ (f x2 - f x0)) ((f x2 - f x0) * / (x2 - x0))) in H15; rewrite <- (Rmult_assoc (/ (f x2 - f x0)) (f x2 - f x0) (/ (x2 - x0))) in H15; rewrite (Rinv_l (f x2 - f x0) H16) in H15; rewrite (let (H1, H2) := Rmult_ne (/ (x2 - x0)) in H2) in H15; rewrite (Rmult_comm (df x0) (dg (f x0))); assumption. clear H5 H3 H4 H2; unfold limit1_in; unfold limit_in; simpl; unfold limit1_in in H1; unfold limit_in in H1; simpl in H1; intros; elim (H1 eps H2); clear H1; intros; elim H1; clear H1; intros; split with x; split; auto; intros; unfold D_x, Dgf in H4, H3; elim H4; clear H4; intros; elim H4; clear H4; intros; exact (H3 x1 (conj H4 H5)). Qed. (*********) Lemma D_pow_n : forall (n:nat) (D:R -> Prop) (x0:R) (expr dexpr:R -> R), D_in expr dexpr D x0 -> D_in (fun x:R => expr x ^ n) (fun x:R => INR n * expr x ^ (n - 1) * dexpr x) ( Dgf D D expr) x0. Proof. intros n D x0 expr dexpr H; generalize (Dcomp D D dexpr (fun x:R => INR n * x ^ (n - 1)) expr ( fun x:R => x ^ n) x0 H (Dx_pow_n n D (expr x0))); intro; unfold D_in; unfold limit1_in; unfold limit_in; simpl; intros; unfold D_in in H0; unfold limit1_in in H0; unfold limit_in in H0; simpl in H0; elim (H0 eps H1); clear H0; intros; elim H0; clear H0; intros; split with x; split; intros; auto. cut (dexpr x0 * (INR n * expr x0 ^ (n - 1)) = INR n * expr x0 ^ (n - 1) * dexpr x0); [ intro Rew; rewrite <- Rew; exact (H2 x1 H3) | ring ]. Qed. coq-8.4pl2/theories/Reals/Rdefinitions.v0000640000175000001440000000411612010532755017345 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R. Parameter Rmult : R -> R -> R. Parameter Ropp : R -> R. Parameter Rinv : R -> R. Parameter Rlt : R -> R -> Prop. Parameter up : R -> Z. Infix "+" := Rplus : R_scope. Infix "*" := Rmult : R_scope. Notation "- x" := (Ropp x) : R_scope. Notation "/ x" := (Rinv x) : R_scope. Infix "<" := Rlt : R_scope. (***********************************************************) (**********) Definition Rgt (r1 r2:R) : Prop := r2 < r1. (**********) Definition Rle (r1 r2:R) : Prop := r1 < r2 \/ r1 = r2. (**********) Definition Rge (r1 r2:R) : Prop := Rgt r1 r2 \/ r1 = r2. (**********) Definition Rminus (r1 r2:R) : R := r1 + - r2. (**********) Definition Rdiv (r1 r2:R) : R := r1 * / r2. (**********) Infix "-" := Rminus : R_scope. Infix "/" := Rdiv : R_scope. Infix "<=" := Rle : R_scope. Infix ">=" := Rge : R_scope. Infix ">" := Rgt : R_scope. Notation "x <= y <= z" := (x <= y /\ y <= z) : R_scope. Notation "x <= y < z" := (x <= y /\ y < z) : R_scope. Notation "x < y < z" := (x < y /\ y < z) : R_scope. Notation "x < y <= z" := (x < y /\ y <= z) : R_scope. coq-8.4pl2/theories/Reals/Machin.v0000640000175000001440000001361211776416522016123 0ustar notinusersRequire Import Fourier. Require Import Rbase. Require Import Rtrigo1. Require Import Ranalysis_reg. Require Import Rfunctions. Require Import AltSeries. Require Import Rseries. Require Import SeqProp. Require Import PartSum. Require Import Ratan. Local Open Scope R_scope. (* Proving a few formulas in the style of John Machin to compute Pi *) Definition atan_sub u v := (u - v)/(1 + u * v). Lemma atan_sub_correct : forall u v, 1 + u * v <> 0 -> -PI/2 < atan u - atan v < PI/2 -> -PI/2 < atan (atan_sub u v) < PI/2 -> atan u = atan v + atan (atan_sub u v). intros u v pn0 uvint aint. assert (cos (atan u) <> 0). destruct (atan_bound u); apply Rgt_not_eq, cos_gt_0; auto. rewrite <- Ropp_div; assumption. assert (cos (atan v) <> 0). destruct (atan_bound v); apply Rgt_not_eq, cos_gt_0; auto. rewrite <- Ropp_div; assumption. assert (t : forall a b c, a - b = c -> a = b + c) by (intros; subst; field). apply t, tan_is_inj; clear t; try assumption. rewrite tan_minus; auto. rewrite !atan_right_inv; reflexivity. apply Rgt_not_eq, cos_gt_0; rewrite <- ?Ropp_div; tauto. rewrite !atan_right_inv; assumption. Qed. Lemma tech : forall x y , -1 <= x <= 1 -> -1 < y < 1 -> -PI/2 < atan x - atan y < PI/2. assert (ut := PI_RGT_0). intros x y [xm1 x1] [ym1 y1]. assert (-(PI/4) <= atan x). destruct xm1 as [xm1 | xm1]. rewrite <- atan_1, <- atan_opp; apply Rlt_le, atan_increasing. assumption. solve[rewrite <- xm1, atan_opp, atan_1; apply Rle_refl]. assert (-(PI/4) < atan y). rewrite <- atan_1, <- atan_opp; apply atan_increasing. assumption. assert (atan x <= PI/4). destruct x1 as [x1 | x1]. rewrite <- atan_1; apply Rlt_le, atan_increasing. assumption. solve[rewrite x1, atan_1; apply Rle_refl]. assert (atan y < PI/4). rewrite <- atan_1; apply atan_increasing. assumption. rewrite Ropp_div; split; fourier. Qed. (* A simple formula, reasonably efficient. *) Lemma Machin_2_3 : PI/4 = atan(/2) + atan(/3). assert (utility : 0 < PI/2) by (apply PI2_RGT_0). rewrite <- atan_1. rewrite (atan_sub_correct 1 (/2)). apply f_equal, f_equal; unfold atan_sub; field. apply Rgt_not_eq; fourier. apply tech; try split; try fourier. apply atan_bound. Qed. Lemma Machin_4_5_239 : PI/4 = 4 * atan (/5) - atan(/239). rewrite <- atan_1. rewrite (atan_sub_correct 1 (/5)); [ | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. replace (4 * atan (/5) - atan (/239)) with (atan (/5) + (atan (/5) + (atan (/5) + (atan (/5) + - atan (/239))))) by ring. apply f_equal. replace (atan_sub 1 (/5)) with (2/3) by (unfold atan_sub; field). rewrite (atan_sub_correct (2/3) (/5)); [apply f_equal | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. replace (atan_sub (2/3) (/5)) with (7/17) by (unfold atan_sub; field). rewrite (atan_sub_correct (7/17) (/5)); [apply f_equal | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. replace (atan_sub (7/17) (/5)) with (9/46) by (unfold atan_sub; field). rewrite (atan_sub_correct (9/46) (/5)); [apply f_equal | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. rewrite <- atan_opp; apply f_equal. unfold atan_sub; field. Qed. Lemma Machin_2_3_7 : PI/4 = 2 * atan(/3) + (atan (/7)). rewrite <- atan_1. rewrite (atan_sub_correct 1 (/3)); [ | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. replace (2 * atan (/3) + atan (/7)) with (atan (/3) + (atan (/3) + atan (/7))) by ring. apply f_equal. replace (atan_sub 1 (/3)) with (/2) by (unfold atan_sub; field). rewrite (atan_sub_correct (/2) (/3)); [apply f_equal | apply Rgt_not_eq; fourier | apply tech; try split; fourier | apply atan_bound ]. apply f_equal; unfold atan_sub; field. Qed. (* More efficient way to compute approximations of PI. *) Definition PI_2_3_7_tg n := 2 * Ratan_seq (/3) n + Ratan_seq (/7) n. Lemma PI_2_3_7_ineq : forall N : nat, sum_f_R0 (tg_alt PI_2_3_7_tg) (S (2 * N)) <= PI / 4 <= sum_f_R0 (tg_alt PI_2_3_7_tg) (2 * N). Proof. assert (dec3 : 0 <= /3 <= 1) by (split; fourier). assert (dec7 : 0 <= /7 <= 1) by (split; fourier). assert (decr : Un_decreasing PI_2_3_7_tg). apply Ratan_seq_decreasing in dec3. apply Ratan_seq_decreasing in dec7. intros n; apply Rplus_le_compat. apply Rmult_le_compat_l; [ fourier | exact (dec3 n)]. exact (dec7 n). assert (cv : Un_cv PI_2_3_7_tg 0). apply Ratan_seq_converging in dec3. apply Ratan_seq_converging in dec7. intros eps ep. assert (ep' : 0 < eps /3) by fourier. destruct (dec3 _ ep') as [N1 Pn1]; destruct (dec7 _ ep') as [N2 Pn2]. exists (N1 + N2)%nat; intros n Nn. unfold PI_2_3_7_tg. rewrite <- (Rplus_0_l 0). apply Rle_lt_trans with (1 := R_dist_plus (2 * Ratan_seq (/3) n) 0 (Ratan_seq (/7) n) 0). replace eps with (2 * eps/3 + eps/3) by field. apply Rplus_lt_compat. unfold R_dist, Rminus, Rdiv. rewrite <- (Rmult_0_r 2), <- Ropp_mult_distr_r_reverse. rewrite <- Rmult_plus_distr_l, Rabs_mult, (Rabs_pos_eq 2);[|fourier]. rewrite Rmult_assoc; apply Rmult_lt_compat_l;[fourier | ]. apply (Pn1 n); omega. apply (Pn2 n); omega. rewrite Machin_2_3_7. rewrite !atan_eq_ps_atan; try (split; fourier). unfold ps_atan; destruct (in_int (/3)); destruct (in_int (/7)); try match goal with id : ~ _ |- _ => case id; split; fourier end. destruct (ps_atan_exists_1 (/3)) as [v3 Pv3]. destruct (ps_atan_exists_1 (/7)) as [v7 Pv7]. assert (main : Un_cv (sum_f_R0 (tg_alt PI_2_3_7_tg)) (2 * v3 + v7)). assert (main :Un_cv (fun n => 2 * sum_f_R0 (tg_alt (Ratan_seq (/3))) n + sum_f_R0 (tg_alt (Ratan_seq (/7))) n) (2 * v3 + v7)). apply CV_plus;[ | assumption]. apply CV_mult;[ | assumption]. exists 0%nat; intros; rewrite R_dist_eq; assumption. apply Un_cv_ext with (2 := main). intros n; rewrite scal_sum, <- plus_sum; apply sum_eq; intros. rewrite Rmult_comm; unfold PI_2_3_7_tg, tg_alt; field. intros N; apply (alternated_series_ineq _ _ _ decr cv main). Qed. coq-8.4pl2/theories/Reals/Rlimit.v0000640000175000001440000005051112010532755016150 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 -> eps * / 2 > 0. Proof. intros; fourier. Qed. (*********) Lemma eps2 : forall eps:R, eps * / 2 + eps * / 2 = eps. Proof. intro esp. assert (H := double_var esp). unfold Rdiv in H. symmetry ; exact H. Qed. (*********) Lemma eps4 : forall eps:R, eps * / (2 + 2) + eps * / (2 + 2) = eps * / 2. Proof. intro eps. replace (2 + 2) with 4. pattern eps at 3; rewrite double_var. rewrite (Rmult_plus_distr_r (eps / 2) (eps / 2) (/ 2)). unfold Rdiv. repeat rewrite Rmult_assoc. rewrite <- Rinv_mult_distr. reflexivity. discrR. discrR. ring. Qed. (*********) Lemma Rlt_eps2_eps : forall eps:R, eps > 0 -> eps * / 2 < eps. Proof. intros. pattern eps at 2; rewrite <- Rmult_1_r. repeat rewrite (Rmult_comm eps). apply Rmult_lt_compat_r. exact H. apply Rmult_lt_reg_l with 2. fourier. rewrite Rmult_1_r; rewrite <- Rinv_r_sym. fourier. discrR. Qed. (*********) Lemma Rlt_eps4_eps : forall eps:R, eps > 0 -> eps * / (2 + 2) < eps. Proof. intros. replace (2 + 2) with 4. pattern eps at 2; rewrite <- Rmult_1_r. repeat rewrite (Rmult_comm eps). apply Rmult_lt_compat_r. exact H. apply Rmult_lt_reg_l with 4. replace 4 with 4. apply Rmult_lt_0_compat; fourier. ring. rewrite Rmult_1_r; rewrite <- Rinv_r_sym. fourier. discrR. ring. Qed. (*********) Lemma prop_eps : forall r:R, (forall eps:R, eps > 0 -> r < eps) -> r <= 0. Proof. intros; elim (Rtotal_order r 0); intro. apply Rlt_le; assumption. elim H0; intro. apply Req_le; assumption. clear H0; generalize (H r H1); intro; generalize (Rlt_irrefl r); intro; exfalso; auto. Qed. (*********) Definition mul_factor (l l':R) := / (1 + (Rabs l + Rabs l')). (*********) Lemma mul_factor_wd : forall l l':R, 1 + (Rabs l + Rabs l') <> 0. Proof. intros; rewrite (Rplus_comm 1 (Rabs l + Rabs l')); apply tech_Rplus. cut (Rabs (l + l') <= Rabs l + Rabs l'). cut (0 <= Rabs (l + l')). exact (Rle_trans _ _ _). exact (Rabs_pos (l + l')). exact (Rabs_triang _ _). exact Rlt_0_1. Qed. (*********) Lemma mul_factor_gt : forall eps l l':R, eps > 0 -> eps * mul_factor l l' > 0. Proof. intros; unfold Rgt; rewrite <- (Rmult_0_r eps); apply Rmult_lt_compat_l. assumption. unfold mul_factor; apply Rinv_0_lt_compat; cut (1 <= 1 + (Rabs l + Rabs l')). cut (0 < 1). exact (Rlt_le_trans _ _ _). exact Rlt_0_1. replace (1 <= 1 + (Rabs l + Rabs l')) with (1 + 0 <= 1 + (Rabs l + Rabs l')). apply Rplus_le_compat_l. cut (Rabs (l + l') <= Rabs l + Rabs l'). cut (0 <= Rabs (l + l')). exact (Rle_trans _ _ _). exact (Rabs_pos _). exact (Rabs_triang _ _). rewrite (proj1 (Rplus_ne 1)); trivial. Qed. (*********) Lemma mul_factor_gt_f : forall eps l l':R, eps > 0 -> Rmin 1 (eps * mul_factor l l') > 0. intros; apply Rmin_Rgt_r; split. exact Rlt_0_1. exact (mul_factor_gt eps l l' H). Qed. (*******************************) (** * Metric space *) (*******************************) (*********) Record Metric_Space : Type := {Base : Type; dist : Base -> Base -> R; dist_pos : forall x y:Base, dist x y >= 0; dist_sym : forall x y:Base, dist x y = dist y x; dist_refl : forall x y:Base, dist x y = 0 <-> x = y; dist_tri : forall x y z:Base, dist x y <= dist x z + dist z y}. (*******************************) (** ** Limit in Metric space *) (*******************************) (*********) Definition limit_in (X X':Metric_Space) (f:Base X -> Base X') (D:Base X -> Prop) (x0:Base X) (l:Base X') := forall eps:R, eps > 0 -> exists alp : R, alp > 0 /\ (forall x:Base X, D x /\ dist X x x0 < alp -> dist X' (f x) l < eps). (*******************************) (** ** R is a metric space *) (*******************************) (*********) Definition R_met : Metric_Space := Build_Metric_Space R R_dist R_dist_pos R_dist_sym R_dist_refl R_dist_tri. (*******************************) (** * Limit 1 arg *) (*******************************) (*********) Definition Dgf (Df Dg:R -> Prop) (f:R -> R) (x:R) := Df x /\ Dg (f x). (*********) Definition limit1_in (f:R -> R) (D:R -> Prop) (l x0:R) : Prop := limit_in R_met R_met f D x0 l. (*********) Lemma tech_limit : forall (f:R -> R) (D:R -> Prop) (l x0:R), D x0 -> limit1_in f D l x0 -> l = f x0. Proof. intros f D l x0 H H0. case (Rabs_pos (f x0 - l)); intros H1. absurd (dist R_met (f x0) l < dist R_met (f x0) l). apply Rlt_irrefl. case (H0 (dist R_met (f x0) l)); auto. intros alpha1 [H2 H3]; apply H3; auto; split; auto. case (dist_refl R_met x0 x0); intros Hr1 Hr2; rewrite Hr2; auto. case (dist_refl R_met (f x0) l); intros Hr1 Hr2; symmetry; auto. Qed. (*********) Lemma tech_limit_contr : forall (f:R -> R) (D:R -> Prop) (l x0:R), D x0 -> l <> f x0 -> ~ limit1_in f D l x0. Proof. intros; generalize (tech_limit f D l x0); tauto. Qed. (*********) Lemma lim_x : forall (D:R -> Prop) (x0:R), limit1_in (fun x:R => x) D x0 x0. Proof. unfold limit1_in; unfold limit_in; simpl; intros; split with eps; split; auto; intros; elim H0; intros; auto. Qed. (*********) Lemma limit_plus : forall (f g:R -> R) (D:R -> Prop) (l l' x0:R), limit1_in f D l x0 -> limit1_in g D l' x0 -> limit1_in (fun x:R => f x + g x) D (l + l') x0. Proof. intros; unfold limit1_in; unfold limit_in; simpl; intros; elim (H (eps * / 2) (eps2_Rgt_R0 eps H1)); elim (H0 (eps * / 2) (eps2_Rgt_R0 eps H1)); simpl; clear H H0; intros; elim H; elim H0; clear H H0; intros; split with (Rmin x1 x); split. exact (Rmin_Rgt_r x1 x 0 (conj H H2)). intros; elim H4; clear H4; intros; cut (R_dist (f x2) l + R_dist (g x2) l' < eps). cut (R_dist (f x2 + g x2) (l + l') <= R_dist (f x2) l + R_dist (g x2) l'). exact (Rle_lt_trans _ _ _). exact (R_dist_plus _ _ _ _). elim (Rmin_Rgt_l x1 x (R_dist x2 x0) H5); clear H5; intros. generalize (H3 x2 (conj H4 H6)); generalize (H0 x2 (conj H4 H5)); intros; replace eps with (eps * / 2 + eps * / 2). exact (Rplus_lt_compat _ _ _ _ H7 H8). exact (eps2 eps). Qed. (*********) Lemma limit_Ropp : forall (f:R -> R) (D:R -> Prop) (l x0:R), limit1_in f D l x0 -> limit1_in (fun x:R => - f x) D (- l) x0. Proof. unfold limit1_in; unfold limit_in; simpl; intros; elim (H eps H0); clear H; intros; elim H; clear H; intros; split with x; split; auto; intros; generalize (H1 x1 H2); clear H1; intro; unfold R_dist; unfold Rminus; rewrite (Ropp_involutive l); rewrite (Rplus_comm (- f x1) l); fold (l - f x1); fold (R_dist l (f x1)); rewrite R_dist_sym; assumption. Qed. (*********) Lemma limit_minus : forall (f g:R -> R) (D:R -> Prop) (l l' x0:R), limit1_in f D l x0 -> limit1_in g D l' x0 -> limit1_in (fun x:R => f x - g x) D (l - l') x0. Proof. intros; unfold Rminus; generalize (limit_Ropp g D l' x0 H0); intro; exact (limit_plus f (fun x:R => - g x) D l (- l') x0 H H1). Qed. (*********) Lemma limit_free : forall (f:R -> R) (D:R -> Prop) (x x0:R), limit1_in (fun h:R => f x) D (f x) x0. Proof. unfold limit1_in; unfold limit_in; simpl; intros; split with eps; split; auto; intros; elim (R_dist_refl (f x) (f x)); intros a b; rewrite (b (eq_refl (f x))); unfold Rgt in H; assumption. Qed. (*********) Lemma limit_mul : forall (f g:R -> R) (D:R -> Prop) (l l' x0:R), limit1_in f D l x0 -> limit1_in g D l' x0 -> limit1_in (fun x:R => f x * g x) D (l * l') x0. Proof. intros; unfold limit1_in; unfold limit_in; simpl; intros; elim (H (Rmin 1 (eps * mul_factor l l')) (mul_factor_gt_f eps l l' H1)); elim (H0 (eps * mul_factor l l') (mul_factor_gt eps l l' H1)); clear H H0; simpl; intros; elim H; elim H0; clear H H0; intros; split with (Rmin x1 x); split. exact (Rmin_Rgt_r x1 x 0 (conj H H2)). intros; elim H4; clear H4; intros; unfold R_dist; replace (f x2 * g x2 - l * l') with (f x2 * (g x2 - l') + l' * (f x2 - l)). cut (Rabs (f x2 * (g x2 - l')) + Rabs (l' * (f x2 - l)) < eps). cut (Rabs (f x2 * (g x2 - l') + l' * (f x2 - l)) <= Rabs (f x2 * (g x2 - l')) + Rabs (l' * (f x2 - l))). exact (Rle_lt_trans _ _ _). exact (Rabs_triang _ _). rewrite (Rabs_mult (f x2) (g x2 - l')); rewrite (Rabs_mult l' (f x2 - l)); cut ((1 + Rabs l) * (eps * mul_factor l l') + Rabs l' * (eps * mul_factor l l') <= eps). cut (Rabs (f x2) * Rabs (g x2 - l') + Rabs l' * Rabs (f x2 - l) < (1 + Rabs l) * (eps * mul_factor l l') + Rabs l' * (eps * mul_factor l l')). exact (Rlt_le_trans _ _ _). elim (Rmin_Rgt_l x1 x (R_dist x2 x0) H5); clear H5; intros; generalize (H0 x2 (conj H4 H5)); intro; generalize (Rmin_Rgt_l _ _ _ H7); intro; elim H8; intros; clear H0 H8; apply Rplus_lt_le_compat. apply Rmult_ge_0_gt_0_lt_compat. apply Rle_ge. exact (Rabs_pos (g x2 - l')). rewrite (Rplus_comm 1 (Rabs l)); unfold Rgt; apply Rle_lt_0_plus_1; exact (Rabs_pos l). unfold R_dist in H9; apply (Rplus_lt_reg_r (- Rabs l) (Rabs (f x2)) (1 + Rabs l)). rewrite <- (Rplus_assoc (- Rabs l) 1 (Rabs l)); rewrite (Rplus_comm (- Rabs l) 1); rewrite (Rplus_assoc 1 (- Rabs l) (Rabs l)); rewrite (Rplus_opp_l (Rabs l)); rewrite (proj1 (Rplus_ne 1)); rewrite (Rplus_comm (- Rabs l) (Rabs (f x2))); generalize H9; cut (Rabs (f x2) - Rabs l <= Rabs (f x2 - l)). exact (Rle_lt_trans _ _ _). exact (Rabs_triang_inv _ _). generalize (H3 x2 (conj H4 H6)); trivial. apply Rmult_le_compat_l. exact (Rabs_pos l'). unfold Rle; left; assumption. rewrite (Rmult_comm (1 + Rabs l) (eps * mul_factor l l')); rewrite (Rmult_comm (Rabs l') (eps * mul_factor l l')); rewrite <- (Rmult_plus_distr_l (eps * mul_factor l l') (1 + Rabs l) (Rabs l')) ; rewrite (Rmult_assoc eps (mul_factor l l') (1 + Rabs l + Rabs l')); rewrite (Rplus_assoc 1 (Rabs l) (Rabs l')); unfold mul_factor; rewrite (Rinv_l (1 + (Rabs l + Rabs l')) (mul_factor_wd l l')); rewrite (proj1 (Rmult_ne eps)); apply Req_le; trivial. ring. Qed. (*********) Definition adhDa (D:R -> Prop) (a:R) : Prop := forall alp:R, alp > 0 -> exists x : R, D x /\ R_dist x a < alp. (*********) Lemma single_limit : forall (f:R -> R) (D:R -> Prop) (l l' x0:R), adhDa D x0 -> limit1_in f D l x0 -> limit1_in f D l' x0 -> l = l'. Proof. unfold limit1_in; unfold limit_in; intros. cut (forall eps:R, eps > 0 -> dist R_met l l' < 2 * eps). clear H0 H1; unfold dist; unfold R_met; unfold R_dist; unfold Rabs; case (Rcase_abs (l - l')); intros. cut (forall eps:R, eps > 0 -> - (l - l') < eps). intro; generalize (prop_eps (- (l - l')) H1); intro; generalize (Ropp_gt_lt_0_contravar (l - l') r); intro; unfold Rgt in H3; generalize (Rgt_not_le (- (l - l')) 0 H3); intro; exfalso; auto. intros; cut (eps * / 2 > 0). intro; generalize (H0 (eps * / 2) H2); rewrite (Rmult_comm eps (/ 2)); rewrite <- (Rmult_assoc 2 (/ 2) eps); rewrite (Rinv_r 2). elim (Rmult_ne eps); intros a b; rewrite b; clear a b; trivial. apply (Rlt_dichotomy_converse 2 0); right; generalize Rlt_0_1; intro; unfold Rgt; generalize (Rplus_lt_compat_l 1 0 1 H3); intro; elim (Rplus_ne 1); intros a b; rewrite a in H4; clear a b; apply (Rlt_trans 0 1 2 H3 H4). unfold Rgt; unfold Rgt in H1; rewrite (Rmult_comm eps (/ 2)); rewrite <- (Rmult_0_r (/ 2)); apply (Rmult_lt_compat_l (/ 2) 0 eps); auto. apply (Rinv_0_lt_compat 2); cut (1 < 2). intro; apply (Rlt_trans 0 1 2 Rlt_0_1 H2). generalize (Rplus_lt_compat_l 1 0 1 Rlt_0_1); elim (Rplus_ne 1); intros a b; rewrite a; clear a b; trivial. (**) cut (forall eps:R, eps > 0 -> l - l' < eps). intro; generalize (prop_eps (l - l') H1); intro; elim (Rle_le_eq (l - l') 0); intros a b; clear b; apply (Rminus_diag_uniq l l'); apply a; split. assumption. apply (Rge_le (l - l') 0 r). intros; cut (eps * / 2 > 0). intro; generalize (H0 (eps * / 2) H2); rewrite (Rmult_comm eps (/ 2)); rewrite <- (Rmult_assoc 2 (/ 2) eps); rewrite (Rinv_r 2). elim (Rmult_ne eps); intros a b; rewrite b; clear a b; trivial. apply (Rlt_dichotomy_converse 2 0); right; generalize Rlt_0_1; intro; unfold Rgt; generalize (Rplus_lt_compat_l 1 0 1 H3); intro; elim (Rplus_ne 1); intros a b; rewrite a in H4; clear a b; apply (Rlt_trans 0 1 2 H3 H4). unfold Rgt; unfold Rgt in H1; rewrite (Rmult_comm eps (/ 2)); rewrite <- (Rmult_0_r (/ 2)); apply (Rmult_lt_compat_l (/ 2) 0 eps); auto. apply (Rinv_0_lt_compat 2); cut (1 < 2). intro; apply (Rlt_trans 0 1 2 Rlt_0_1 H2). generalize (Rplus_lt_compat_l 1 0 1 Rlt_0_1); elim (Rplus_ne 1); intros a b; rewrite a; clear a b; trivial. (**) intros; unfold adhDa in H; elim (H0 eps H2); intros; elim (H1 eps H2); intros; clear H0 H1; elim H3; elim H4; clear H3 H4; intros; simpl; simpl in H1, H4; generalize (Rmin_Rgt x x1 0); intro; elim H5; intros; clear H5; elim (H (Rmin x x1) (H7 (conj H3 H0))); intros; elim H5; intros; clear H5 H H6 H7; generalize (Rmin_Rgt x x1 (R_dist x2 x0)); intro; elim H; intros; clear H H6; unfold Rgt in H5; elim (H5 H9); intros; clear H5 H9; generalize (H1 x2 (conj H8 H6)); generalize (H4 x2 (conj H8 H)); clear H8 H H6 H1 H4 H0 H3; intros; generalize (Rplus_lt_compat (R_dist (f x2) l) eps (R_dist (f x2) l') eps H H0); unfold R_dist; intros; rewrite (Rabs_minus_sym (f x2) l) in H1; rewrite (Rmult_comm 2 eps); rewrite (Rmult_plus_distr_l eps 1 1); elim (Rmult_ne eps); intros a b; rewrite a; clear a b; generalize (R_dist_tri l l' (f x2)); unfold R_dist; intros; apply (Rle_lt_trans (Rabs (l - l')) (Rabs (l - f x2) + Rabs (f x2 - l')) (eps + eps) H3 H1). Qed. (*********) Lemma limit_comp : forall (f g:R -> R) (Df Dg:R -> Prop) (l l' x0:R), limit1_in f Df l x0 -> limit1_in g Dg l' l -> limit1_in (fun x:R => g (f x)) (Dgf Df Dg f) l' x0. Proof. unfold limit1_in, limit_in, Dgf; simpl. intros f g Df Dg l l' x0 Hf Hg eps eps_pos. elim (Hg eps eps_pos). intros alpg lg. elim (Hf alpg). 2: tauto. intros alpf lf. exists alpf. intuition. Qed. (*********) Lemma limit_inv : forall (f:R -> R) (D:R -> Prop) (l x0:R), limit1_in f D l x0 -> l <> 0 -> limit1_in (fun x:R => / f x) D (/ l) x0. Proof. unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; elim (H (Rabs l / 2)). intros delta1 H2; elim (H (eps * (Rsqr l / 2))). intros delta2 H3; elim H2; elim H3; intros; exists (Rmin delta1 delta2); split. unfold Rmin; case (Rle_dec delta1 delta2); intro; assumption. intro; generalize (H5 x); clear H5; intro H5; generalize (H7 x); clear H7; intro H7; intro H10; elim H10; intros; cut (D x /\ Rabs (x - x0) < delta1). cut (D x /\ Rabs (x - x0) < delta2). intros; generalize (H5 H11); clear H5; intro H5; generalize (H7 H12); clear H7; intro H7; generalize (Rabs_triang_inv l (f x)); intro; rewrite Rabs_minus_sym in H7; generalize (Rle_lt_trans (Rabs l - Rabs (f x)) (Rabs (l - f x)) (Rabs l / 2) H13 H7); intro; generalize (Rplus_lt_compat_l (Rabs (f x) - Rabs l / 2) (Rabs l - Rabs (f x)) (Rabs l / 2) H14); replace (Rabs (f x) - Rabs l / 2 + (Rabs l - Rabs (f x))) with (Rabs l / 2). unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; intro; cut (f x <> 0). intro; replace (/ f x + - / l) with ((l - f x) * / (l * f x)). rewrite Rabs_mult; rewrite Rabs_Rinv. cut (/ Rabs (l * f x) < 2 / Rsqr l). intro; rewrite Rabs_minus_sym in H5; cut (0 <= / Rabs (l * f x)). intro; generalize (Rmult_le_0_lt_compat (Rabs (l - f x)) (eps * (Rsqr l / 2)) (/ Rabs (l * f x)) (2 / Rsqr l) (Rabs_pos (l - f x)) H18 H5 H17); replace (eps * (Rsqr l / 2) * (2 / Rsqr l)) with eps. intro; assumption. unfold Rdiv; unfold Rsqr; rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc. rewrite (Rmult_comm l). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm l). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite <- Rinv_l_sym. rewrite Rmult_1_r; reflexivity. discrR. exact H0. exact H0. exact H0. exact H0. left; apply Rinv_0_lt_compat; apply Rabs_pos_lt; apply prod_neq_R0; assumption. rewrite Rmult_comm; rewrite Rabs_mult; rewrite Rinv_mult_distr. rewrite (Rsqr_abs l); unfold Rsqr; unfold Rdiv; rewrite Rinv_mult_distr. repeat rewrite <- Rmult_assoc; apply Rmult_lt_compat_r. apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. apply Rmult_lt_reg_l with (Rabs (f x) * Rabs l * / 2). repeat apply Rmult_lt_0_compat. apply Rabs_pos_lt; assumption. apply Rabs_pos_lt; assumption. apply Rinv_0_lt_compat; cut (0%nat <> 2%nat); [ intro H17; generalize (lt_INR_0 2 (neq_O_lt 2 H17)); unfold INR; intro H18; assumption | discriminate ]. replace (Rabs (f x) * Rabs l * / 2 * / Rabs (f x)) with (Rabs l / 2). replace (Rabs (f x) * Rabs l * / 2 * (2 * / Rabs l)) with (Rabs (f x)). assumption. repeat rewrite Rmult_assoc. rewrite (Rmult_comm (Rabs l)). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite <- Rinv_l_sym. rewrite Rmult_1_r; reflexivity. discrR. apply Rabs_no_R0. assumption. unfold Rdiv. repeat rewrite Rmult_assoc. rewrite (Rmult_comm (Rabs (f x))). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. reflexivity. apply Rabs_no_R0; assumption. apply Rabs_no_R0; assumption. apply Rabs_no_R0; assumption. apply Rabs_no_R0; assumption. apply Rabs_no_R0; assumption. apply prod_neq_R0; assumption. rewrite (Rinv_mult_distr _ _ H0 H16). unfold Rminus; rewrite Rmult_plus_distr_r. rewrite <- Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_l. rewrite Ropp_mult_distr_l_reverse. rewrite (Rmult_comm (f x)). rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. reflexivity. assumption. assumption. red; intro; rewrite H16 in H15; rewrite Rabs_R0 in H15; cut (0 < Rabs l / 2). intro; elim (Rlt_irrefl 0 (Rlt_trans 0 (Rabs l / 2) 0 H17 H15)). unfold Rdiv; apply Rmult_lt_0_compat. apply Rabs_pos_lt; assumption. apply Rinv_0_lt_compat; cut (0%nat <> 2%nat); [ intro H17; generalize (lt_INR_0 2 (neq_O_lt 2 H17)); unfold INR; intro; assumption | discriminate ]. pattern (Rabs l) at 3; rewrite double_var. ring. split; [ assumption | apply Rlt_le_trans with (Rmin delta1 delta2); [ assumption | apply Rmin_r ] ]. split; [ assumption | apply Rlt_le_trans with (Rmin delta1 delta2); [ assumption | apply Rmin_l ] ]. change (0 < eps * (Rsqr l / 2)); unfold Rdiv; repeat rewrite Rmult_assoc; apply Rmult_lt_0_compat. assumption. apply Rmult_lt_0_compat. apply Rsqr_pos_lt; assumption. apply Rinv_0_lt_compat; cut (0%nat <> 2%nat); [ intro H3; generalize (lt_INR_0 2 (neq_O_lt 2 H3)); unfold INR; intro; assumption | discriminate ]. change (0 < Rabs l / 2); unfold Rdiv; apply Rmult_lt_0_compat; [ apply Rabs_pos_lt; assumption | apply Rinv_0_lt_compat; cut (0%nat <> 2%nat); [ intro H3; generalize (lt_INR_0 2 (neq_O_lt 2 H3)); unfold INR; intro; assumption | discriminate ] ]. Qed. coq-8.4pl2/theories/Reals/Cauchy_prod.v0000640000175000001440000003626712010532755017164 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (N:nat), (0 < N)%nat -> sum_f_R0 An N = sum_f_R0 An (pred N) + An N. Proof. intros. replace N with (S (pred N)). rewrite tech5. reflexivity. symmetry ; apply S_pred with 0%nat; assumption. Qed. (**********) Lemma sum_plus : forall (An Bn:nat -> R) (N:nat), sum_f_R0 (fun l:nat => An l + Bn l) N = sum_f_R0 An N + sum_f_R0 Bn N. Proof. intros. induction N as [| N HrecN]. reflexivity. do 3 rewrite tech5. rewrite HrecN; ring. Qed. (* The main result *) Theorem cauchy_finite : forall (An Bn:nat -> R) (N:nat), (0 < N)%nat -> sum_f_R0 An N * sum_f_R0 Bn N = sum_f_R0 (fun k:nat => sum_f_R0 (fun p:nat => An p * Bn (k - p)%nat) k) N + sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (l + k)) * Bn (N - l)%nat) (pred (N - k))) (pred N). Proof. intros; induction N as [| N HrecN]. elim (lt_irrefl _ H). cut (N = 0%nat \/ (0 < N)%nat). intro; elim H0; intro. rewrite H1; simpl; ring. replace (pred (S N)) with (S (pred N)). do 5 rewrite tech5. rewrite Rmult_plus_distr_r; rewrite Rmult_plus_distr_l; rewrite (HrecN H1). repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. replace (pred (S N - S (pred N))) with 0%nat. rewrite Rmult_plus_distr_l; replace (sum_f_R0 (fun l:nat => An (S (l + S (pred N))) * Bn (S N - l)%nat) 0) with (An (S N) * Bn (S N)). repeat rewrite <- Rplus_assoc; do 2 rewrite <- (Rplus_comm (An (S N) * Bn (S N))); repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. rewrite <- minus_n_n; cut (N = 1%nat \/ (2 <= N)%nat). intro; elim H2; intro. rewrite H3; simpl; ring. replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (l + k)) * Bn (N - l)%nat) (pred (N - k))) (pred N)) with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (pred (N - k)))) (pred (pred N)) + sum_f_R0 (fun l:nat => An (S l) * Bn (N - l)%nat) (pred N)). replace (sum_f_R0 (fun p:nat => An p * Bn (S N - p)%nat) N) with (sum_f_R0 (fun l:nat => An (S l) * Bn (N - l)%nat) (pred N) + An 0%nat * Bn (S N)). repeat rewrite <- Rplus_assoc; rewrite <- (Rplus_comm (sum_f_R0 (fun l:nat => An (S l) * Bn (N - l)%nat) (pred N))) ; repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (l + k)) * Bn (S N - l)%nat) (pred (S N - k))) (pred N)) with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k))) (pred N) + Bn (S N) * sum_f_R0 (fun l:nat => An (S l)) (pred N)). rewrite (decomp_sum An N H1); rewrite Rmult_plus_distr_r; repeat rewrite <- Rplus_assoc; rewrite <- (Rplus_comm (An 0%nat * Bn (S N))); repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. repeat rewrite <- Rplus_assoc; rewrite <- (Rplus_comm (sum_f_R0 (fun i:nat => An (S i)) (pred N) * Bn (S N))) ; rewrite <- (Rplus_comm (Bn (S N) * sum_f_R0 (fun i:nat => An (S i)) (pred N))) ; rewrite (Rmult_comm (Bn (S N))); repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k))) (pred N)) with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (pred (N - k)))) (pred (pred N)) + An (S N) * sum_f_R0 (fun l:nat => Bn (S l)) (pred N)). rewrite (decomp_sum Bn N H1); rewrite Rmult_plus_distr_l. set (Z := sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (pred (N - k)))) (pred (pred N))); set (Z2 := sum_f_R0 (fun i:nat => Bn (S i)) (pred N)); ring. rewrite (sum_N_predN (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k))) (pred N)). replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k))) (pred (pred N))) with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (pred (N - k))) + An (S N) * Bn (S k)) ( pred (pred N))). rewrite (sum_plus (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (pred (N - k)))) (fun k:nat => An (S N) * Bn (S k)) (pred (pred N))). repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. replace (pred (N - pred N)) with 0%nat. simpl; rewrite <- minus_n_O. replace (S (pred N)) with N. replace (sum_f_R0 (fun k:nat => An (S N) * Bn (S k)) (pred (pred N))) with (sum_f_R0 (fun k:nat => Bn (S k) * An (S N)) (pred (pred N))). rewrite <- (scal_sum (fun l:nat => Bn (S l)) (pred (pred N)) (An (S N))); rewrite (sum_N_predN (fun l:nat => Bn (S l)) (pred N)). replace (S (pred N)) with N. ring. apply S_pred with 0%nat; assumption. apply lt_pred; apply lt_le_trans with 2%nat; [ apply lt_n_Sn | assumption ]. apply sum_eq; intros; apply Rmult_comm. apply S_pred with 0%nat; assumption. replace (N - pred N)%nat with 1%nat. reflexivity. pattern N at 1; replace N with (S (pred N)). rewrite <- minus_Sn_m. rewrite <- minus_n_n; reflexivity. apply le_n. symmetry ; apply S_pred with 0%nat; assumption. apply sum_eq; intros; rewrite (sum_N_predN (fun l:nat => An (S (S (l + i))) * Bn (N - l)%nat) (pred (N - i))). replace (S (S (pred (N - i) + i))) with (S N). replace (N - pred (N - i))%nat with (S i). reflexivity. rewrite pred_of_minus; apply INR_eq; repeat rewrite minus_INR. rewrite S_INR; simpl; ring. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply INR_le; rewrite minus_INR. apply Rplus_le_reg_l with (INR i - 1). replace (INR i - 1 + INR 1) with (INR i); [ idtac | simpl; ring ]. replace (INR i - 1 + (INR N - INR i)) with (INR N - INR 1); [ idtac | simpl; ring ]. rewrite <- minus_INR. apply le_INR; apply le_trans with (pred (pred N)). assumption. rewrite <- pred_of_minus; apply le_pred_n. apply le_trans with 2%nat. apply le_n_Sn. assumption. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. rewrite <- pred_of_minus. apply le_trans with (pred N). apply le_S_n. replace (S (pred N)) with N. replace (S (pred (N - i))) with (N - i)%nat. apply (fun p n m:nat => plus_le_reg_l n m p) with i; rewrite le_plus_minus_r. apply le_plus_r. apply le_trans with (pred (pred N)); [ assumption | apply le_trans with (pred N); apply le_pred_n ]. apply S_pred with 0%nat. apply plus_lt_reg_l with i; rewrite le_plus_minus_r. replace (i + 0)%nat with i; [ idtac | ring ]. apply le_lt_trans with (pred (pred N)); [ assumption | apply lt_trans with (pred N); apply lt_pred_n_n ]. apply lt_S_n. replace (S (pred N)) with N. apply lt_le_trans with 2%nat. apply lt_n_Sn. assumption. apply S_pred with 0%nat; assumption. assumption. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply S_pred with 0%nat; assumption. apply le_pred_n. apply INR_eq; rewrite pred_of_minus; do 3 rewrite S_INR; rewrite plus_INR; repeat rewrite minus_INR. simpl; ring. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply INR_le. rewrite minus_INR. apply Rplus_le_reg_l with (INR i - 1). replace (INR i - 1 + INR 1) with (INR i); [ idtac | simpl; ring ]. replace (INR i - 1 + (INR N - INR i)) with (INR N - INR 1); [ idtac | simpl; ring ]. rewrite <- minus_INR. apply le_INR. apply le_trans with (pred (pred N)). assumption. rewrite <- pred_of_minus. apply le_pred_n. apply le_trans with 2%nat. apply le_n_Sn. assumption. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply lt_le_trans with 1%nat. apply lt_O_Sn. apply INR_le. rewrite pred_of_minus. repeat rewrite minus_INR. apply Rplus_le_reg_l with (INR i - 1). replace (INR i - 1 + INR 1) with (INR i); [ idtac | simpl; ring ]. replace (INR i - 1 + (INR N - INR i - INR 1)) with (INR N - INR 1 - INR 1). repeat rewrite <- minus_INR. apply le_INR. apply le_trans with (pred (pred N)). assumption. do 2 rewrite <- pred_of_minus. apply le_n. apply (fun p n m:nat => plus_le_reg_l n m p) with 1%nat. rewrite le_plus_minus_r. simpl; assumption. apply le_trans with 2%nat; [ apply le_n_Sn | assumption ]. apply le_trans with 2%nat; [ apply le_n_Sn | assumption ]. simpl; ring. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply (fun p n m:nat => plus_le_reg_l n m p) with i. rewrite le_plus_minus_r. replace (i + 1)%nat with (S i). replace N with (S (pred N)). apply le_n_S. apply le_trans with (pred (pred N)). assumption. apply le_pred_n. symmetry ; apply S_pred with 0%nat; assumption. apply INR_eq; rewrite S_INR; rewrite plus_INR; reflexivity. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply lt_le_trans with 1%nat. apply lt_O_Sn. apply le_S_n. replace (S (pred N)) with N. assumption. apply S_pred with 0%nat; assumption. replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (l + k)) * Bn (S N - l)%nat) (pred (S N - k))) (pred N)) with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k)) + An (S k) * Bn (S N)) (pred N)). rewrite (sum_plus (fun k:nat => sum_f_R0 (fun l:nat => An (S (S (l + k))) * Bn (N - l)%nat) (pred (N - k))) (fun k:nat => An (S k) * Bn (S N))). apply Rplus_eq_compat_l. rewrite scal_sum; reflexivity. apply sum_eq; intros; rewrite Rplus_comm; rewrite (decomp_sum (fun l:nat => An (S (l + i)) * Bn (S N - l)%nat) (pred (S N - i))). replace (0 + i)%nat with i; [ idtac | ring ]. rewrite <- minus_n_O; apply Rplus_eq_compat_l. replace (pred (pred (S N - i))) with (pred (N - i)). apply sum_eq; intros. replace (S N - S i0)%nat with (N - i0)%nat; [ idtac | reflexivity ]. replace (S i0 + i)%nat with (S (i0 + i)). reflexivity. apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; rewrite S_INR; simpl; ring. cut ((N - i)%nat = pred (S N - i)). intro; rewrite H5; reflexivity. rewrite pred_of_minus. apply INR_eq; repeat rewrite minus_INR. rewrite S_INR; simpl; ring. apply le_trans with N. apply le_trans with (pred N). assumption. apply le_pred_n. apply le_n_Sn. apply (fun p n m:nat => plus_le_reg_l n m p) with i. rewrite le_plus_minus_r. replace (i + 1)%nat with (S i). apply le_n_S. apply le_trans with (pred N). assumption. apply le_pred_n. apply INR_eq; rewrite S_INR; rewrite plus_INR; simpl; ring. apply le_trans with N. apply le_trans with (pred N). assumption. apply le_pred_n. apply le_n_Sn. apply le_trans with (pred N). assumption. apply le_pred_n. replace (pred (S N - i)) with (S N - S i)%nat. replace (S N - S i)%nat with (N - i)%nat; [ idtac | reflexivity ]. apply plus_lt_reg_l with i. rewrite le_plus_minus_r. replace (i + 0)%nat with i; [ idtac | ring ]. apply le_lt_trans with (pred N). assumption. apply lt_pred_n_n. assumption. apply le_trans with (pred N). assumption. apply le_pred_n. rewrite pred_of_minus. apply INR_eq; repeat rewrite minus_INR. repeat rewrite S_INR; simpl; ring. apply le_trans with N. apply le_trans with (pred N). assumption. apply le_pred_n. apply le_n_Sn. apply (fun p n m:nat => plus_le_reg_l n m p) with i. rewrite le_plus_minus_r. replace (i + 1)%nat with (S i). apply le_n_S. apply le_trans with (pred N). assumption. apply le_pred_n. apply INR_eq; rewrite S_INR; rewrite plus_INR; simpl; ring. apply le_trans with N. apply le_trans with (pred N). assumption. apply le_pred_n. apply le_n_Sn. apply le_n_S. apply le_trans with (pred N). assumption. apply le_pred_n. rewrite Rplus_comm. rewrite (decomp_sum (fun p:nat => An p * Bn (S N - p)%nat) N). rewrite <- minus_n_O. apply Rplus_eq_compat_l. apply sum_eq; intros. reflexivity. assumption. rewrite Rplus_comm. rewrite (decomp_sum (fun k:nat => sum_f_R0 (fun l:nat => An (S (l + k)) * Bn (N - l)%nat) (pred (N - k))) (pred N)). rewrite <- minus_n_O. replace (sum_f_R0 (fun l:nat => An (S (l + 0)) * Bn (N - l)%nat) (pred N)) with (sum_f_R0 (fun l:nat => An (S l) * Bn (N - l)%nat) (pred N)). apply Rplus_eq_compat_l. apply sum_eq; intros. replace (pred (N - S i)) with (pred (pred (N - i))). apply sum_eq; intros. replace (i0 + S i)%nat with (S (i0 + i)). reflexivity. apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; rewrite S_INR; simpl; ring. cut (pred (N - i) = (N - S i)%nat). intro; rewrite H5; reflexivity. rewrite pred_of_minus. apply INR_eq. repeat rewrite minus_INR. repeat rewrite S_INR; simpl; ring. apply le_trans with (S (pred (pred N))). apply le_n_S; assumption. replace (S (pred (pred N))) with (pred N). apply le_pred_n. apply S_pred with 0%nat. apply lt_S_n. replace (S (pred N)) with N. apply lt_le_trans with 2%nat. apply lt_n_Sn. assumption. apply S_pred with 0%nat; assumption. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply (fun p n m:nat => plus_le_reg_l n m p) with i. rewrite le_plus_minus_r. replace (i + 1)%nat with (S i). replace N with (S (pred N)). apply le_n_S. apply le_trans with (pred (pred N)). assumption. apply le_pred_n. symmetry ; apply S_pred with 0%nat; assumption. apply INR_eq; rewrite S_INR; rewrite plus_INR; simpl; ring. apply le_trans with (pred (pred N)). assumption. apply le_trans with (pred N); apply le_pred_n. apply sum_eq; intros. replace (i + 0)%nat with i; [ reflexivity | trivial ]. apply lt_S_n. replace (S (pred N)) with N. apply lt_le_trans with 2%nat; [ apply lt_n_Sn | assumption ]. apply S_pred with 0%nat; assumption. inversion H1. left; reflexivity. right; apply le_n_S; assumption. simpl. replace (S (pred N)) with N. reflexivity. apply S_pred with 0%nat; assumption. simpl. cut ((N - pred N)%nat = 1%nat). intro; rewrite H2; reflexivity. rewrite pred_of_minus. apply INR_eq; repeat rewrite minus_INR. simpl; ring. apply lt_le_S; assumption. rewrite <- pred_of_minus; apply le_pred_n. simpl; symmetry ; apply S_pred with 0%nat; assumption. inversion H. left; reflexivity. right; apply lt_le_trans with 1%nat; [ apply lt_n_Sn | exact H1 ]. Qed. coq-8.4pl2/theories/Reals/SeqSeries.v0000640000175000001440000003656712010532755016632 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R) (An:nat -> R) (x l1 l2:R) (N:nat), Un_cv (fun n:nat => SP fn n x) l1 -> Un_cv (fun n:nat => sum_f_R0 An n) l2 -> (forall n:nat, Rabs (fn n x) <= An n) -> Rabs (l1 - SP fn N x) <= l2 - sum_f_R0 An N. Proof. intros; cut { l:R | Un_cv (fun n => sum_f_R0 (fun l => fn (S N + l)%nat x) n) l }. intro X; cut { l:R | Un_cv (fun n => sum_f_R0 (fun l => An (S N + l)%nat) n) l }. intro X0; elim X; intros l1N H2. elim X0; intros l2N H3. cut (l1 - SP fn N x = l1N). intro; cut (l2 - sum_f_R0 An N = l2N). intro; rewrite H4; rewrite H5. apply sum_cv_maj with (fun l:nat => An (S N + l)%nat) (fun (l:nat) (x:R) => fn (S N + l)%nat x) x. unfold SP; apply H2. apply H3. intros; apply H1. symmetry ; eapply UL_sequence. apply H3. unfold Un_cv in H0; unfold Un_cv; intros; elim (H0 eps H5); intros N0 H6. unfold R_dist in H6; exists N0; intros. unfold R_dist; replace (sum_f_R0 (fun l:nat => An (S N + l)%nat) n - (l2 - sum_f_R0 An N)) with (sum_f_R0 An N + sum_f_R0 (fun l:nat => An (S N + l)%nat) n - l2); [ idtac | ring ]. replace (sum_f_R0 An N + sum_f_R0 (fun l:nat => An (S N + l)%nat) n) with (sum_f_R0 An (S (N + n))). apply H6; unfold ge; apply le_trans with n. apply H7. apply le_trans with (N + n)%nat. apply le_plus_r. apply le_n_Sn. cut (0 <= N)%nat. cut (N < S (N + n))%nat. intros; assert (H10 := sigma_split An H9 H8). unfold sigma in H10. do 2 rewrite <- minus_n_O in H10. replace (sum_f_R0 An (S (N + n))) with (sum_f_R0 (fun k:nat => An (0 + k)%nat) (S (N + n))). replace (sum_f_R0 An N) with (sum_f_R0 (fun k:nat => An (0 + k)%nat) N). cut ((S (N + n) - S N)%nat = n). intro; rewrite H11 in H10. apply H10. apply INR_eq; rewrite minus_INR. do 2 rewrite S_INR; rewrite plus_INR; ring. apply le_n_S; apply le_plus_l. apply sum_eq; intros. reflexivity. apply sum_eq; intros. reflexivity. apply le_lt_n_Sm; apply le_plus_l. apply le_O_n. symmetry ; eapply UL_sequence. apply H2. unfold Un_cv in H; unfold Un_cv; intros. elim (H eps H4); intros N0 H5. unfold R_dist in H5; exists N0; intros. unfold R_dist, SP; replace (sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n - (l1 - sum_f_R0 (fun k:nat => fn k x) N)) with (sum_f_R0 (fun k:nat => fn k x) N + sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n - l1); [ idtac | ring ]. replace (sum_f_R0 (fun k:nat => fn k x) N + sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n) with (sum_f_R0 (fun k:nat => fn k x) (S (N + n))). unfold SP in H5; apply H5; unfold ge; apply le_trans with n. apply H6. apply le_trans with (N + n)%nat. apply le_plus_r. apply le_n_Sn. cut (0 <= N)%nat. cut (N < S (N + n))%nat. intros; assert (H9 := sigma_split (fun k:nat => fn k x) H8 H7). unfold sigma in H9. do 2 rewrite <- minus_n_O in H9. replace (sum_f_R0 (fun k:nat => fn k x) (S (N + n))) with (sum_f_R0 (fun k:nat => fn (0 + k)%nat x) (S (N + n))). replace (sum_f_R0 (fun k:nat => fn k x) N) with (sum_f_R0 (fun k:nat => fn (0 + k)%nat x) N). cut ((S (N + n) - S N)%nat = n). intro; rewrite H10 in H9. apply H9. apply INR_eq; rewrite minus_INR. do 2 rewrite S_INR; rewrite plus_INR; ring. apply le_n_S; apply le_plus_l. apply sum_eq; intros. reflexivity. apply sum_eq; intros. reflexivity. apply le_lt_n_Sm. apply le_plus_l. apply le_O_n. exists (l2 - sum_f_R0 An N). unfold Un_cv in H0; unfold Un_cv; intros. elim (H0 eps H2); intros N0 H3. unfold R_dist in H3; exists N0; intros. unfold R_dist; replace (sum_f_R0 (fun l:nat => An (S N + l)%nat) n - (l2 - sum_f_R0 An N)) with (sum_f_R0 An N + sum_f_R0 (fun l:nat => An (S N + l)%nat) n - l2); [ idtac | ring ]. replace (sum_f_R0 An N + sum_f_R0 (fun l:nat => An (S N + l)%nat) n) with (sum_f_R0 An (S (N + n))). apply H3; unfold ge; apply le_trans with n. apply H4. apply le_trans with (N + n)%nat. apply le_plus_r. apply le_n_Sn. cut (0 <= N)%nat. cut (N < S (N + n))%nat. intros; assert (H7 := sigma_split An H6 H5). unfold sigma in H7. do 2 rewrite <- minus_n_O in H7. replace (sum_f_R0 An (S (N + n))) with (sum_f_R0 (fun k:nat => An (0 + k)%nat) (S (N + n))). replace (sum_f_R0 An N) with (sum_f_R0 (fun k:nat => An (0 + k)%nat) N). cut ((S (N + n) - S N)%nat = n). intro; rewrite H8 in H7. apply H7. apply INR_eq; rewrite minus_INR. do 2 rewrite S_INR; rewrite plus_INR; ring. apply le_n_S; apply le_plus_l. apply sum_eq; intros. reflexivity. apply sum_eq; intros. reflexivity. apply le_lt_n_Sm. apply le_plus_l. apply le_O_n. exists (l1 - SP fn N x). unfold Un_cv in H; unfold Un_cv; intros. elim (H eps H2); intros N0 H3. unfold R_dist in H3; exists N0; intros. unfold R_dist, SP. replace (sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n - (l1 - sum_f_R0 (fun k:nat => fn k x) N)) with (sum_f_R0 (fun k:nat => fn k x) N + sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n - l1); [ idtac | ring ]. replace (sum_f_R0 (fun k:nat => fn k x) N + sum_f_R0 (fun l:nat => fn (S N + l)%nat x) n) with (sum_f_R0 (fun k:nat => fn k x) (S (N + n))). unfold SP in H3; apply H3. unfold ge; apply le_trans with n. apply H4. apply le_trans with (N + n)%nat. apply le_plus_r. apply le_n_Sn. cut (0 <= N)%nat. cut (N < S (N + n))%nat. intros; assert (H7 := sigma_split (fun k:nat => fn k x) H6 H5). unfold sigma in H7. do 2 rewrite <- minus_n_O in H7. replace (sum_f_R0 (fun k:nat => fn k x) (S (N + n))) with (sum_f_R0 (fun k:nat => fn (0 + k)%nat x) (S (N + n))). replace (sum_f_R0 (fun k:nat => fn k x) N) with (sum_f_R0 (fun k:nat => fn (0 + k)%nat x) N). cut ((S (N + n) - S N)%nat = n). intro; rewrite H8 in H7. apply H7. apply INR_eq; rewrite minus_INR. do 2 rewrite S_INR; rewrite plus_INR; ring. apply le_n_S; apply le_plus_l. apply sum_eq; intros. reflexivity. apply sum_eq; intros. reflexivity. apply le_lt_n_Sm. apply le_plus_l. apply le_O_n. Qed. (** Comparaison of convergence for series *) Lemma Rseries_CV_comp : forall An Bn:nat -> R, (forall n:nat, 0 <= An n <= Bn n) -> { l:R | Un_cv (fun N:nat => sum_f_R0 Bn N) l } -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros An Bn H X; apply cv_cauchy_2. assert (H0 := cv_cauchy_1 _ X). unfold Cauchy_crit_series; unfold Cauchy_crit. intros; elim (H0 eps H1); intros. exists x; intros. cut (R_dist (sum_f_R0 An n) (sum_f_R0 An m) <= R_dist (sum_f_R0 Bn n) (sum_f_R0 Bn m)). intro; apply Rle_lt_trans with (R_dist (sum_f_R0 Bn n) (sum_f_R0 Bn m)). assumption. apply H2; assumption. assert (H5 := lt_eq_lt_dec n m). elim H5; intro. elim a; intro. rewrite (tech2 An n m); [ idtac | assumption ]. rewrite (tech2 Bn n m); [ idtac | assumption ]. unfold R_dist; unfold Rminus; do 2 rewrite Ropp_plus_distr; do 2 rewrite <- Rplus_assoc; do 2 rewrite Rplus_opp_r; do 2 rewrite Rplus_0_l; do 2 rewrite Rabs_Ropp; repeat rewrite Rabs_right. apply sum_Rle; intros. elim (H (S n + n0)%nat); intros. apply H8. apply Rle_ge; apply cond_pos_sum; intro. elim (H (S n + n0)%nat); intros. apply Rle_trans with (An (S n + n0)%nat); assumption. apply Rle_ge; apply cond_pos_sum; intro. elim (H (S n + n0)%nat); intros; assumption. rewrite b; unfold R_dist; unfold Rminus; do 2 rewrite Rplus_opp_r; rewrite Rabs_R0; right; reflexivity. rewrite (tech2 An m n); [ idtac | assumption ]. rewrite (tech2 Bn m n); [ idtac | assumption ]. unfold R_dist; unfold Rminus; do 2 rewrite Rplus_assoc; rewrite (Rplus_comm (sum_f_R0 An m)); rewrite (Rplus_comm (sum_f_R0 Bn m)); do 2 rewrite Rplus_assoc; do 2 rewrite Rplus_opp_l; do 2 rewrite Rplus_0_r; repeat rewrite Rabs_right. apply sum_Rle; intros. elim (H (S m + n0)%nat); intros; apply H8. apply Rle_ge; apply cond_pos_sum; intro. elim (H (S m + n0)%nat); intros. apply Rle_trans with (An (S m + n0)%nat); assumption. apply Rle_ge. apply cond_pos_sum; intro. elim (H (S m + n0)%nat); intros; assumption. Qed. (** Cesaro's theorem *) Lemma Cesaro : forall (An Bn:nat -> R) (l:R), Un_cv Bn l -> (forall n:nat, 0 < An n) -> cv_infty (fun n:nat => sum_f_R0 An n) -> Un_cv (fun n:nat => sum_f_R0 (fun k:nat => An k * Bn k) n / sum_f_R0 An n) l. Proof with trivial. unfold Un_cv; intros; assert (H3 : forall n:nat, 0 < sum_f_R0 An n)... intro; apply tech1... assert (H4 : forall n:nat, sum_f_R0 An n <> 0)... intro; red; intro; assert (H5 := H3 n); rewrite H4 in H5; elim (Rlt_irrefl _ H5)... assert (H5 := cv_infty_cv_R0 _ H4 H1); assert (H6 : 0 < eps / 2)... unfold Rdiv; apply Rmult_lt_0_compat... apply Rinv_0_lt_compat; prove_sup... elim (H _ H6); clear H; intros N1 H; set (C := Rabs (sum_f_R0 (fun k:nat => An k * (Bn k - l)) N1)); assert (H7 : exists N : nat, (forall n:nat, (N <= n)%nat -> C / sum_f_R0 An n < eps / 2))... case (Req_dec C 0); intro... exists 0%nat; intros... rewrite H7; unfold Rdiv; rewrite Rmult_0_l; apply Rmult_lt_0_compat... apply Rinv_0_lt_compat; prove_sup... assert (H8 : 0 < eps / (2 * Rabs C))... unfold Rdiv; apply Rmult_lt_0_compat... apply Rinv_0_lt_compat; apply Rmult_lt_0_compat... prove_sup... apply Rabs_pos_lt... elim (H5 _ H8); intros; exists x; intros; assert (H11 := H9 _ H10); unfold R_dist in H11; unfold Rminus in H11; rewrite Ropp_0 in H11; rewrite Rplus_0_r in H11... apply Rle_lt_trans with (Rabs (C / sum_f_R0 An n))... apply RRle_abs... unfold Rdiv; rewrite Rabs_mult; apply Rmult_lt_reg_l with (/ Rabs C)... apply Rinv_0_lt_compat; apply Rabs_pos_lt... rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_l; replace (/ Rabs C * (eps * / 2)) with (eps / (2 * Rabs C))... unfold Rdiv; rewrite Rinv_mult_distr... ring... discrR... apply Rabs_no_R0... apply Rabs_no_R0... elim H7; clear H7; intros N2 H7; set (N := max N1 N2); exists (S N); intros; unfold R_dist; replace (sum_f_R0 (fun k:nat => An k * Bn k) n / sum_f_R0 An n - l) with (sum_f_R0 (fun k:nat => An k * (Bn k - l)) n / sum_f_R0 An n)... assert (H9 : (N1 < n)%nat)... apply lt_le_trans with (S N)... apply le_lt_n_Sm; unfold N; apply le_max_l... rewrite (tech2 (fun k:nat => An k * (Bn k - l)) _ _ H9); unfold Rdiv; rewrite Rmult_plus_distr_r; apply Rle_lt_trans with (Rabs (sum_f_R0 (fun k:nat => An k * (Bn k - l)) N1 / sum_f_R0 An n) + Rabs (sum_f_R0 (fun i:nat => An (S N1 + i)%nat * (Bn (S N1 + i)%nat - l)) (n - S N1) / sum_f_R0 An n))... apply Rabs_triang... rewrite (double_var eps); apply Rplus_lt_compat... unfold Rdiv; rewrite Rabs_mult; fold C; rewrite Rabs_right... apply (H7 n); apply le_trans with (S N)... apply le_trans with N; [ unfold N; apply le_max_r | apply le_n_Sn ]... apply Rle_ge; left; apply Rinv_0_lt_compat... unfold R_dist in H; unfold Rdiv; rewrite Rabs_mult; rewrite (Rabs_right (/ sum_f_R0 An n))... apply Rle_lt_trans with (sum_f_R0 (fun i:nat => Rabs (An (S N1 + i)%nat * (Bn (S N1 + i)%nat - l))) (n - S N1) * / sum_f_R0 An n)... do 2 rewrite <- (Rmult_comm (/ sum_f_R0 An n)); apply Rmult_le_compat_l... left; apply Rinv_0_lt_compat... apply (Rsum_abs (fun i:nat => An (S N1 + i)%nat * (Bn (S N1 + i)%nat - l)) (n - S N1))... apply Rle_lt_trans with (sum_f_R0 (fun i:nat => An (S N1 + i)%nat * (eps / 2)) (n - S N1) * / sum_f_R0 An n)... do 2 rewrite <- (Rmult_comm (/ sum_f_R0 An n)); apply Rmult_le_compat_l... left; apply Rinv_0_lt_compat... apply sum_Rle; intros; rewrite Rabs_mult; pattern (An (S N1 + n0)%nat) at 2; rewrite <- (Rabs_right (An (S N1 + n0)%nat))... apply Rmult_le_compat_l... apply Rabs_pos... left; apply H; unfold ge; apply le_trans with (S N1); [ apply le_n_Sn | apply le_plus_l ]... apply Rle_ge; left... rewrite <- (scal_sum (fun i:nat => An (S N1 + i)%nat) (n - S N1) (eps / 2)); unfold Rdiv; repeat rewrite Rmult_assoc; apply Rmult_lt_compat_l... pattern (/ 2) at 2; rewrite <- Rmult_1_r; apply Rmult_lt_compat_l... apply Rinv_0_lt_compat; prove_sup... rewrite Rmult_comm; apply Rmult_lt_reg_l with (sum_f_R0 An n)... rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym... rewrite Rmult_1_l; rewrite Rmult_1_r; rewrite (tech2 An N1 n)... rewrite Rplus_comm; pattern (sum_f_R0 (fun i:nat => An (S N1 + i)%nat) (n - S N1)) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l... apply Rle_ge; left; apply Rinv_0_lt_compat... replace (sum_f_R0 (fun k:nat => An k * (Bn k - l)) n) with (sum_f_R0 (fun k:nat => An k * Bn k) n + sum_f_R0 (fun k:nat => An k * - l) n)... rewrite <- (scal_sum An n (- l)); field... rewrite <- plus_sum; apply sum_eq; intros; ring... Qed. Lemma Cesaro_1 : forall (An:nat -> R) (l:R), Un_cv An l -> Un_cv (fun n:nat => sum_f_R0 An (pred n) / INR n) l. Proof with trivial. intros Bn l H; set (An := fun _:nat => 1)... assert (H0 : forall n:nat, 0 < An n)... intro; unfold An; apply Rlt_0_1... assert (H1 : forall n:nat, 0 < sum_f_R0 An n)... intro; apply tech1... assert (H2 : cv_infty (fun n:nat => sum_f_R0 An n))... unfold cv_infty; intro; case (Rle_dec M 0); intro... exists 0%nat; intros; apply Rle_lt_trans with 0... assert (H2 : 0 < M)... auto with real... clear n; set (m := up M); elim (archimed M); intros; assert (H5 : (0 <= m)%Z)... apply le_IZR; unfold m; simpl; left; apply Rlt_trans with M... elim (IZN _ H5); intros; exists x; intros; unfold An; rewrite sum_cte; rewrite Rmult_1_l; apply Rlt_trans with (IZR (up M))... apply Rle_lt_trans with (INR x)... rewrite INR_IZR_INZ; fold m; rewrite <- H6; right... apply lt_INR; apply le_lt_n_Sm... assert (H3 := Cesaro _ _ _ H H0 H2)... unfold Un_cv; unfold Un_cv in H3; intros; elim (H3 _ H4); intros; exists (S x); intros; unfold R_dist; unfold R_dist in H5; apply Rle_lt_trans with (Rabs (sum_f_R0 (fun k:nat => An k * Bn k) (pred n) / sum_f_R0 An (pred n) - l))... right; replace (sum_f_R0 Bn (pred n) / INR n - l) with (sum_f_R0 (fun k:nat => An k * Bn k) (pred n) / sum_f_R0 An (pred n) - l)... unfold Rminus; do 2 rewrite <- (Rplus_comm (- l)); apply Rplus_eq_compat_l... unfold An; replace (sum_f_R0 (fun k:nat => 1 * Bn k) (pred n)) with (sum_f_R0 Bn (pred n))... rewrite sum_cte; rewrite Rmult_1_l; replace (S (pred n)) with n... apply S_pred with 0%nat; apply lt_le_trans with (S x)... apply lt_O_Sn... apply sum_eq; intros; ring... apply H5; unfold ge; apply le_S_n; replace (S (pred n)) with n... apply S_pred with 0%nat; apply lt_le_trans with (S x)... apply lt_O_Sn... Qed. coq-8.4pl2/theories/Reals/MVT.v0000640000175000001440000006157212010532755015367 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (a b:R) (pr1:forall c:R, a < c < b -> derivable_pt f c) (pr2:forall c:R, a < c < b -> derivable_pt g c), a < b -> (forall c:R, a <= c <= b -> continuity_pt f c) -> (forall c:R, a <= c <= b -> continuity_pt g c) -> exists c : R, (exists P : a < c < b, (g b - g a) * derive_pt f c (pr1 c P) = (f b - f a) * derive_pt g c (pr2 c P)). Proof. intros; assert (H2 := Rlt_le _ _ H). set (h := fun y:R => (g b - g a) * f y - (f b - f a) * g y). cut (forall c:R, a < c < b -> derivable_pt h c). intro X; cut (forall c:R, a <= c <= b -> continuity_pt h c). intro; assert (H4 := continuity_ab_maj h a b H2 H3). assert (H5 := continuity_ab_min h a b H2 H3). elim H4; intros Mx H6. elim H5; intros mx H7. cut (h a = h b). intro; set (M := h Mx); set (m := h mx). cut (forall (c:R) (P:a < c < b), derive_pt h c (X c P) = (g b - g a) * derive_pt f c (pr1 c P) - (f b - f a) * derive_pt g c (pr2 c P)). intro; case (Req_dec (h a) M); intro. case (Req_dec (h a) m); intro. cut (forall c:R, a <= c <= b -> h c = M). intro; cut (a < (a + b) / 2 < b). (*** h constant ***) intro; exists ((a + b) / 2). exists H13. apply Rminus_diag_uniq; rewrite <- H9; apply deriv_constant2 with a b. elim H13; intros; assumption. elim H13; intros; assumption. intros; rewrite (H12 ((a + b) / 2)). apply H12; split; left; assumption. elim H13; intros; split; left; assumption. split. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; apply H. discrR. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite Rplus_comm; rewrite double; apply Rplus_lt_compat_l; apply H. discrR. intros; elim H6; intros H13 _. elim H7; intros H14 _. apply Rle_antisym. apply H13; apply H12. rewrite H10 in H11; rewrite H11; apply H14; apply H12. cut (a < mx < b). (*** h admet un minimum global sur [a,b] ***) intro; exists mx. exists H12. apply Rminus_diag_uniq; rewrite <- H9; apply deriv_minimum with a b. elim H12; intros; assumption. elim H12; intros; assumption. intros; elim H7; intros. apply H15; split; left; assumption. elim H7; intros _ H12; elim H12; intros; split. inversion H13. apply H15. rewrite H15 in H11; elim H11; reflexivity. inversion H14. apply H15. rewrite H8 in H11; rewrite <- H15 in H11; elim H11; reflexivity. cut (a < Mx < b). (*** h admet un maximum global sur [a,b] ***) intro; exists Mx. exists H11. apply Rminus_diag_uniq; rewrite <- H9; apply deriv_maximum with a b. elim H11; intros; assumption. elim H11; intros; assumption. intros; elim H6; intros; apply H14. split; left; assumption. elim H6; intros _ H11; elim H11; intros; split. inversion H12. apply H14. rewrite H14 in H10; elim H10; reflexivity. inversion H13. apply H14. rewrite H8 in H10; rewrite <- H14 in H10; elim H10; reflexivity. intros; unfold h; replace (derive_pt (fun y:R => (g b - g a) * f y - (f b - f a) * g y) c (X c P)) with (derive_pt ((fct_cte (g b - g a) * f)%F - (fct_cte (f b - f a) * g)%F) c (derivable_pt_minus _ _ _ (derivable_pt_mult _ _ _ (derivable_pt_const (g b - g a) c) (pr1 c P)) (derivable_pt_mult _ _ _ (derivable_pt_const (f b - f a) c) (pr2 c P)))); [ idtac | apply pr_nu ]. rewrite derive_pt_minus; do 2 rewrite derive_pt_mult; do 2 rewrite derive_pt_const; do 2 rewrite Rmult_0_l; do 2 rewrite Rplus_0_l; reflexivity. unfold h; ring. intros; unfold h; change (continuity_pt ((fct_cte (g b - g a) * f)%F - (fct_cte (f b - f a) * g)%F) c). apply continuity_pt_minus; apply continuity_pt_mult. apply derivable_continuous_pt; apply derivable_const. apply H0; apply H3. apply derivable_continuous_pt; apply derivable_const. apply H1; apply H3. intros; change (derivable_pt ((fct_cte (g b - g a) * f)%F - (fct_cte (f b - f a) * g)%F) c). apply derivable_pt_minus; apply derivable_pt_mult. apply derivable_pt_const. apply (pr1 _ H3). apply derivable_pt_const. apply (pr2 _ H3). Qed. (* Corollaries ... *) Lemma MVT_cor1 : forall (f:R -> R) (a b:R) (pr:derivable f), a < b -> exists c : R, f b - f a = derive_pt f c (pr c) * (b - a) /\ a < c < b. Proof. intros f a b pr H; cut (forall c:R, a < c < b -> derivable_pt f c); [ intro X | intros; apply pr ]. cut (forall c:R, a < c < b -> derivable_pt id c); [ intro X0 | intros; apply derivable_pt_id ]. cut (forall c:R, a <= c <= b -> continuity_pt f c); [ intro | intros; apply derivable_continuous_pt; apply pr ]. cut (forall c:R, a <= c <= b -> continuity_pt id c); [ intro | intros; apply derivable_continuous_pt; apply derivable_id ]. assert (H2 := MVT f id a b X X0 H H0 H1). elim H2; intros c H3; elim H3; intros. exists c; split. cut (derive_pt id c (X0 c x) = derive_pt id c (derivable_pt_id c)); [ intro | apply pr_nu ]. rewrite H5 in H4; rewrite (derive_pt_id c) in H4; rewrite Rmult_1_r in H4; rewrite <- H4; replace (derive_pt f c (X c x)) with (derive_pt f c (pr c)); [ idtac | apply pr_nu ]; apply Rmult_comm. apply x. Qed. Theorem MVT_cor2 : forall (f f':R -> R) (a b:R), a < b -> (forall c:R, a <= c <= b -> derivable_pt_lim f c (f' c)) -> exists c : R, f b - f a = f' c * (b - a) /\ a < c < b. Proof. intros f f' a b H H0; cut (forall c:R, a <= c <= b -> derivable_pt f c). intro X; cut (forall c:R, a < c < b -> derivable_pt f c). intro X0; cut (forall c:R, a <= c <= b -> continuity_pt f c). intro; cut (forall c:R, a <= c <= b -> derivable_pt id c). intro X1; cut (forall c:R, a < c < b -> derivable_pt id c). intro X2; cut (forall c:R, a <= c <= b -> continuity_pt id c). intro; elim (MVT f id a b X0 X2 H H1 H2); intros; elim H3; clear H3; intros; exists x; split. cut (derive_pt id x (X2 x x0) = 1). cut (derive_pt f x (X0 x x0) = f' x). intros; rewrite H4 in H3; rewrite H5 in H3; unfold id in H3; rewrite Rmult_1_r in H3; rewrite Rmult_comm; symmetry ; assumption. apply derive_pt_eq_0; apply H0; elim x0; intros; split; left; assumption. apply derive_pt_eq_0; apply derivable_pt_lim_id. assumption. intros; apply derivable_continuous_pt; apply X1; assumption. intros; apply derivable_pt_id. intros; apply derivable_pt_id. intros; apply derivable_continuous_pt; apply X; assumption. intros; elim H1; intros; apply X; split; left; assumption. intros; unfold derivable_pt; exists (f' c); apply H0; apply H1. Qed. Lemma MVT_cor3 : forall (f f':R -> R) (a b:R), a < b -> (forall x:R, a <= x -> x <= b -> derivable_pt_lim f x (f' x)) -> exists c : R, a <= c /\ c <= b /\ f b = f a + f' c * (b - a). Proof. intros f f' a b H H0; assert (H1 : exists c : R, f b - f a = f' c * (b - a) /\ a < c < b); [ apply MVT_cor2; [ apply H | intros; elim H1; intros; apply (H0 _ H2 H3) ] | elim H1; intros; exists x; elim H2; intros; elim H4; intros; split; [ left; assumption | split; [ left; assumption | rewrite <- H3; ring ] ] ]. Qed. Lemma Rolle : forall (f:R -> R) (a b:R) (pr:forall x:R, a < x < b -> derivable_pt f x), (forall x:R, a <= x <= b -> continuity_pt f x) -> a < b -> f a = f b -> exists c : R, (exists P : a < c < b, derive_pt f c (pr c P) = 0). Proof. intros; assert (H2 : forall x:R, a < x < b -> derivable_pt id x). intros; apply derivable_pt_id. assert (H3 := MVT f id a b pr H2 H0 H); assert (H4 : forall x:R, a <= x <= b -> continuity_pt id x). intros; apply derivable_continuous; apply derivable_id. elim (H3 H4); intros; elim H5; intros; exists x; exists x0; rewrite H1 in H6; unfold id in H6; unfold Rminus in H6; rewrite Rplus_opp_r in H6; rewrite Rmult_0_l in H6; apply Rmult_eq_reg_l with (b - a); [ rewrite Rmult_0_r; apply H6 | apply Rminus_eq_contra; red; intro; rewrite H7 in H0; elim (Rlt_irrefl _ H0) ]. Qed. (**********) Lemma nonneg_derivative_1 : forall (f:R -> R) (pr:derivable f), (forall x:R, 0 <= derive_pt f x (pr x)) -> increasing f. Proof. intros. unfold increasing. intros. case (total_order_T x y); intro. elim s; intro. apply Rplus_le_reg_l with (- f x). rewrite Rplus_opp_l; rewrite Rplus_comm. assert (H1 := MVT_cor1 f _ _ pr a). elim H1; intros. elim H2; intros. unfold Rminus in H3. rewrite H3. apply Rmult_le_pos. apply H. apply Rplus_le_reg_l with x. rewrite Rplus_0_r; replace (x + (y + - x)) with y; [ assumption | ring ]. rewrite b; right; reflexivity. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 r)). Qed. (**********) Lemma nonpos_derivative_0 : forall (f:R -> R) (pr:derivable f), decreasing f -> forall x:R, derive_pt f x (pr x) <= 0. Proof. intros f pr H x; assert (H0 := H); unfold decreasing in H0; generalize (derivable_derive f x (pr x)); intro; elim H1; intros l H2. rewrite H2; case (Rtotal_order l 0); intro. left; assumption. elim H3; intro. right; assumption. generalize (derive_pt_eq_1 f x l (pr x) H2); intros; cut (0 < l / 2). intro; elim (H5 (l / 2) H6); intros delta H7; cut (delta / 2 <> 0 /\ 0 < delta / 2 /\ Rabs (delta / 2) < delta). intro; decompose [and] H8; intros; generalize (H7 (delta / 2) H9 H12); cut ((f (x + delta / 2) - f x) / (delta / 2) <= 0). intro; cut (0 < - ((f (x + delta / 2) - f x) / (delta / 2) - l)). intro; unfold Rabs; case (Rcase_abs ((f (x + delta / 2) - f x) / (delta / 2) - l)). intros; generalize (Rplus_lt_compat_r (- l) (- ((f (x + delta / 2) - f x) / (delta / 2) - l)) (l / 2) H14); unfold Rminus. replace (l / 2 + - l) with (- (l / 2)). replace (- ((f (x + delta / 2) + - f x) / (delta / 2) + - l) + - l) with (- ((f (x + delta / 2) + - f x) / (delta / 2))). intro. generalize (Ropp_lt_gt_contravar (- ((f (x + delta / 2) + - f x) / (delta / 2))) (- (l / 2)) H15). repeat rewrite Ropp_involutive. intro. generalize (Rlt_trans 0 (l / 2) ((f (x + delta / 2) - f x) / (delta / 2)) H6 H16); intro. elim (Rlt_irrefl 0 (Rlt_le_trans 0 ((f (x + delta / 2) - f x) / (delta / 2)) 0 H17 H10)). ring. pattern l at 3; rewrite double_var. ring. intros. generalize (Ropp_ge_le_contravar ((f (x + delta / 2) - f x) / (delta / 2) - l) 0 r). rewrite Ropp_0. intro. elim (Rlt_irrefl 0 (Rlt_le_trans 0 (- ((f (x + delta / 2) - f x) / (delta / 2) - l)) 0 H13 H15)). replace (- ((f (x + delta / 2) - f x) / (delta / 2) - l)) with ((f x - f (x + delta / 2)) / (delta / 2) + l). unfold Rminus. apply Rplus_le_lt_0_compat. unfold Rdiv; apply Rmult_le_pos. cut (x <= x + delta * / 2). intro; generalize (H0 x (x + delta * / 2) H13); intro; generalize (Rplus_le_compat_l (- f (x + delta / 2)) (f (x + delta / 2)) (f x) H14); rewrite Rplus_opp_l; rewrite Rplus_comm; intro; assumption. pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; left; assumption. left; apply Rinv_0_lt_compat; assumption. assumption. rewrite Ropp_minus_distr. unfold Rminus. rewrite (Rplus_comm l). unfold Rdiv. rewrite <- Ropp_mult_distr_l_reverse. rewrite Ropp_plus_distr. rewrite Ropp_involutive. rewrite (Rplus_comm (f x)). reflexivity. replace ((f (x + delta / 2) - f x) / (delta / 2)) with (- ((f x - f (x + delta / 2)) / (delta / 2))). rewrite <- Ropp_0. apply Ropp_ge_le_contravar. apply Rle_ge. unfold Rdiv; apply Rmult_le_pos. cut (x <= x + delta * / 2). intro; generalize (H0 x (x + delta * / 2) H10); intro. generalize (Rplus_le_compat_l (- f (x + delta / 2)) (f (x + delta / 2)) (f x) H13); rewrite Rplus_opp_l; rewrite Rplus_comm; intro; assumption. pattern x at 1; rewrite <- (Rplus_0_r x); apply Rplus_le_compat_l; left; assumption. left; apply Rinv_0_lt_compat; assumption. unfold Rdiv; rewrite <- Ropp_mult_distr_l_reverse. rewrite Ropp_minus_distr. reflexivity. split. unfold Rdiv; apply prod_neq_R0. generalize (cond_pos delta); intro; red; intro H9; rewrite H9 in H8; elim (Rlt_irrefl 0 H8). apply Rinv_neq_0_compat; discrR. split. unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos delta) | apply Rinv_0_lt_compat; prove_sup0 ]. rewrite Rabs_right. unfold Rdiv; apply Rmult_lt_reg_l with 2. prove_sup0. rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite double; pattern (pos delta) at 1; rewrite <- Rplus_0_r. apply Rplus_lt_compat_l; apply (cond_pos delta). discrR. apply Rle_ge; unfold Rdiv; left; apply Rmult_lt_0_compat. apply (cond_pos delta). apply Rinv_0_lt_compat; prove_sup0. unfold Rdiv; apply Rmult_lt_0_compat; [ apply H4 | apply Rinv_0_lt_compat; prove_sup0 ]. Qed. (**********) Lemma increasing_decreasing_opp : forall f:R -> R, increasing f -> decreasing (- f)%F. Proof. unfold increasing, decreasing, opp_fct; intros; generalize (H x y H0); intro; apply Ropp_ge_le_contravar; apply Rle_ge; assumption. Qed. (**********) Lemma nonpos_derivative_1 : forall (f:R -> R) (pr:derivable f), (forall x:R, derive_pt f x (pr x) <= 0) -> decreasing f. Proof. intros. cut (forall h:R, - - f h = f h). intro. generalize (increasing_decreasing_opp (- f)%F). unfold decreasing. unfold opp_fct. intros. rewrite <- (H0 x); rewrite <- (H0 y). apply H1. cut (forall x:R, 0 <= derive_pt (- f) x (derivable_opp f pr x)). intros. replace (fun x:R => - f x) with (- f)%F; [ idtac | reflexivity ]. apply (nonneg_derivative_1 (- f)%F (derivable_opp f pr) H3). intro. assert (H3 := derive_pt_opp f x0 (pr x0)). cut (derive_pt (- f) x0 (derivable_pt_opp f x0 (pr x0)) = derive_pt (- f) x0 (derivable_opp f pr x0)). intro. rewrite <- H4. rewrite H3. rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; apply (H x0). apply pr_nu. assumption. intro; ring. Qed. (**********) Lemma positive_derivative : forall (f:R -> R) (pr:derivable f), (forall x:R, 0 < derive_pt f x (pr x)) -> strict_increasing f. Proof. intros. unfold strict_increasing. intros. apply Rplus_lt_reg_r with (- f x). rewrite Rplus_opp_l; rewrite Rplus_comm. assert (H1 := MVT_cor1 f _ _ pr H0). elim H1; intros. elim H2; intros. unfold Rminus in H3. rewrite H3. apply Rmult_lt_0_compat. apply H. apply Rplus_lt_reg_r with x. rewrite Rplus_0_r; replace (x + (y + - x)) with y; [ assumption | ring ]. Qed. (**********) Lemma strictincreasing_strictdecreasing_opp : forall f:R -> R, strict_increasing f -> strict_decreasing (- f)%F. Proof. unfold strict_increasing, strict_decreasing, opp_fct; intros; generalize (H x y H0); intro; apply Ropp_lt_gt_contravar; assumption. Qed. (**********) Lemma negative_derivative : forall (f:R -> R) (pr:derivable f), (forall x:R, derive_pt f x (pr x) < 0) -> strict_decreasing f. Proof. intros. cut (forall h:R, - - f h = f h). intros. generalize (strictincreasing_strictdecreasing_opp (- f)%F). unfold strict_decreasing, opp_fct. intros. rewrite <- (H0 x). rewrite <- (H0 y). apply H1; [ idtac | assumption ]. cut (forall x:R, 0 < derive_pt (- f) x (derivable_opp f pr x)). intros; eapply positive_derivative; apply H3. intro. assert (H3 := derive_pt_opp f x0 (pr x0)). cut (derive_pt (- f) x0 (derivable_pt_opp f x0 (pr x0)) = derive_pt (- f) x0 (derivable_opp f pr x0)). intro. rewrite <- H4; rewrite H3. rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; apply (H x0). apply pr_nu. intro; ring. Qed. (**********) Lemma null_derivative_0 : forall (f:R -> R) (pr:derivable f), constant f -> forall x:R, derive_pt f x (pr x) = 0. Proof. intros. unfold constant in H. apply derive_pt_eq_0. intros; exists (mkposreal 1 Rlt_0_1); simpl; intros. rewrite (H x (x + h)); unfold Rminus; unfold Rdiv; rewrite Rplus_opp_r; rewrite Rmult_0_l; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. Qed. (**********) Lemma increasing_decreasing : forall f:R -> R, increasing f -> decreasing f -> constant f. Proof. unfold increasing, decreasing, constant; intros; case (Rtotal_order x y); intro. generalize (Rlt_le x y H1); intro; apply (Rle_antisym (f x) (f y) (H x y H2) (H0 x y H2)). elim H1; intro. rewrite H2; reflexivity. generalize (Rlt_le y x H2); intro; symmetry ; apply (Rle_antisym (f y) (f x) (H y x H3) (H0 y x H3)). Qed. (**********) Lemma null_derivative_1 : forall (f:R -> R) (pr:derivable f), (forall x:R, derive_pt f x (pr x) = 0) -> constant f. Proof. intros. cut (forall x:R, derive_pt f x (pr x) <= 0). cut (forall x:R, 0 <= derive_pt f x (pr x)). intros. assert (H2 := nonneg_derivative_1 f pr H0). assert (H3 := nonpos_derivative_1 f pr H1). apply increasing_decreasing; assumption. intro; right; symmetry ; apply (H x). intro; right; apply (H x). Qed. (**********) Lemma derive_increasing_interv_ax : forall (a b:R) (f:R -> R) (pr:derivable f), a < b -> ((forall t:R, a < t < b -> 0 < derive_pt f t (pr t)) -> forall x y:R, a <= x <= b -> a <= y <= b -> x < y -> f x < f y) /\ ((forall t:R, a < t < b -> 0 <= derive_pt f t (pr t)) -> forall x y:R, a <= x <= b -> a <= y <= b -> x < y -> f x <= f y). Proof. intros. split; intros. apply Rplus_lt_reg_r with (- f x). rewrite Rplus_opp_l; rewrite Rplus_comm. assert (H4 := MVT_cor1 f _ _ pr H3). elim H4; intros. elim H5; intros. unfold Rminus in H6. rewrite H6. apply Rmult_lt_0_compat. apply H0. elim H7; intros. split. elim H1; intros. apply Rle_lt_trans with x; assumption. elim H2; intros. apply Rlt_le_trans with y; assumption. apply Rplus_lt_reg_r with x. rewrite Rplus_0_r; replace (x + (y + - x)) with y; [ assumption | ring ]. apply Rplus_le_reg_l with (- f x). rewrite Rplus_opp_l; rewrite Rplus_comm. assert (H4 := MVT_cor1 f _ _ pr H3). elim H4; intros. elim H5; intros. unfold Rminus in H6. rewrite H6. apply Rmult_le_pos. apply H0. elim H7; intros. split. elim H1; intros. apply Rle_lt_trans with x; assumption. elim H2; intros. apply Rlt_le_trans with y; assumption. apply Rplus_le_reg_l with x. rewrite Rplus_0_r; replace (x + (y + - x)) with y; [ left; assumption | ring ]. Qed. (**********) Lemma derive_increasing_interv : forall (a b:R) (f:R -> R) (pr:derivable f), a < b -> (forall t:R, a < t < b -> 0 < derive_pt f t (pr t)) -> forall x y:R, a <= x <= b -> a <= y <= b -> x < y -> f x < f y. Proof. intros. generalize (derive_increasing_interv_ax a b f pr H); intro. elim H4; intros H5 _; apply (H5 H0 x y H1 H2 H3). Qed. (**********) Lemma derive_increasing_interv_var : forall (a b:R) (f:R -> R) (pr:derivable f), a < b -> (forall t:R, a < t < b -> 0 <= derive_pt f t (pr t)) -> forall x y:R, a <= x <= b -> a <= y <= b -> x < y -> f x <= f y. Proof. intros a b f pr H H0 x y H1 H2 H3; generalize (derive_increasing_interv_ax a b f pr H); intro; elim H4; intros _ H5; apply (H5 H0 x y H1 H2 H3). Qed. (**********) (**********) Theorem IAF : forall (f:R -> R) (a b k:R) (pr:derivable f), a <= b -> (forall c:R, a <= c <= b -> derive_pt f c (pr c) <= k) -> f b - f a <= k * (b - a). Proof. intros. case (total_order_T a b); intro. elim s; intro. assert (H1 := MVT_cor1 f _ _ pr a0). elim H1; intros. elim H2; intros. rewrite H3. do 2 rewrite <- (Rmult_comm (b - a)). apply Rmult_le_compat_l. apply Rplus_le_reg_l with a; rewrite Rplus_0_r. replace (a + (b - a)) with b; [ assumption | ring ]. apply H0. elim H4; intros. split; left; assumption. rewrite b0. unfold Rminus; do 2 rewrite Rplus_opp_r. rewrite Rmult_0_r; right; reflexivity. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). Qed. Lemma IAF_var : forall (f g:R -> R) (a b:R) (pr1:derivable f) (pr2:derivable g), a <= b -> (forall c:R, a <= c <= b -> derive_pt g c (pr2 c) <= derive_pt f c (pr1 c)) -> g b - g a <= f b - f a. Proof. intros. cut (derivable (g - f)). intro X. cut (forall c:R, a <= c <= b -> derive_pt (g - f) c (X c) <= 0). intro. assert (H2 := IAF (g - f)%F a b 0 X H H1). rewrite Rmult_0_l in H2; unfold minus_fct in H2. apply Rplus_le_reg_l with (- f b + f a). replace (- f b + f a + (f b - f a)) with 0; [ idtac | ring ]. replace (- f b + f a + (g b - g a)) with (g b - f b - (g a - f a)); [ apply H2 | ring ]. intros. cut (derive_pt (g - f) c (X c) = derive_pt (g - f) c (derivable_pt_minus _ _ _ (pr2 c) (pr1 c))). intro. rewrite H2. rewrite derive_pt_minus. apply Rplus_le_reg_l with (derive_pt f c (pr1 c)). rewrite Rplus_0_r. replace (derive_pt f c (pr1 c) + (derive_pt g c (pr2 c) - derive_pt f c (pr1 c))) with (derive_pt g c (pr2 c)); [ idtac | ring ]. apply H0; assumption. apply pr_nu. apply derivable_minus; assumption. Qed. (* If f has a null derivative in ]a,b[ and is continue in [a,b], *) (* then f is constant on [a,b] *) Lemma null_derivative_loc : forall (f:R -> R) (a b:R) (pr:forall x:R, a < x < b -> derivable_pt f x), (forall x:R, a <= x <= b -> continuity_pt f x) -> (forall (x:R) (P:a < x < b), derive_pt f x (pr x P) = 0) -> constant_D_eq f (fun x:R => a <= x <= b) (f a). Proof. intros; unfold constant_D_eq; intros; case (total_order_T a b); intro. elim s; intro. assert (H2 : forall y:R, a < y < x -> derivable_pt id y). intros; apply derivable_pt_id. assert (H3 : forall y:R, a <= y <= x -> continuity_pt id y). intros; apply derivable_continuous; apply derivable_id. assert (H4 : forall y:R, a < y < x -> derivable_pt f y). intros; apply pr; elim H4; intros; split. assumption. elim H1; intros; apply Rlt_le_trans with x; assumption. assert (H5 : forall y:R, a <= y <= x -> continuity_pt f y). intros; apply H; elim H5; intros; split. assumption. elim H1; intros; apply Rle_trans with x; assumption. elim H1; clear H1; intros; elim H1; clear H1; intro. assert (H7 := MVT f id a x H4 H2 H1 H5 H3). elim H7; intros; elim H8; intros; assert (H10 : a < x0 < b). elim x1; intros; split. assumption. apply Rlt_le_trans with x; assumption. assert (H11 : derive_pt f x0 (H4 x0 x1) = 0). replace (derive_pt f x0 (H4 x0 x1)) with (derive_pt f x0 (pr x0 H10)); [ apply H0 | apply pr_nu ]. assert (H12 : derive_pt id x0 (H2 x0 x1) = 1). apply derive_pt_eq_0; apply derivable_pt_lim_id. rewrite H11 in H9; rewrite H12 in H9; rewrite Rmult_0_r in H9; rewrite Rmult_1_r in H9; apply Rminus_diag_uniq; symmetry ; assumption. rewrite H1; reflexivity. assert (H2 : x = a). rewrite <- b0 in H1; elim H1; intros; apply Rle_antisym; assumption. rewrite H2; reflexivity. elim H1; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ (Rle_trans _ _ _ H2 H3) r)). Qed. (* Unicity of the antiderivative *) Lemma antiderivative_Ucte : forall (f g1 g2:R -> R) (a b:R), antiderivative f g1 a b -> antiderivative f g2 a b -> exists c : R, (forall x:R, a <= x <= b -> g1 x = g2 x + c). Proof. unfold antiderivative; intros; elim H; clear H; intros; elim H0; clear H0; intros H0 _; exists (g1 a - g2 a); intros; assert (H3 : forall x:R, a <= x <= b -> derivable_pt g1 x). intros; unfold derivable_pt; exists (f x0); elim (H x0 H3); intros; eapply derive_pt_eq_1; symmetry ; apply H4. assert (H4 : forall x:R, a <= x <= b -> derivable_pt g2 x). intros; unfold derivable_pt; exists (f x0); elim (H0 x0 H4); intros; eapply derive_pt_eq_1; symmetry ; apply H5. assert (H5 : forall x:R, a < x < b -> derivable_pt (g1 - g2) x). intros; elim H5; intros; apply derivable_pt_minus; [ apply H3; split; left; assumption | apply H4; split; left; assumption ]. assert (H6 : forall x:R, a <= x <= b -> continuity_pt (g1 - g2) x). intros; apply derivable_continuous_pt; apply derivable_pt_minus; [ apply H3 | apply H4 ]; assumption. assert (H7 : forall (x:R) (P:a < x < b), derive_pt (g1 - g2) x (H5 x P) = 0). intros; elim P; intros; apply derive_pt_eq_0; replace 0 with (f x0 - f x0); [ idtac | ring ]. assert (H9 : a <= x0 <= b). split; left; assumption. apply derivable_pt_lim_minus; [ elim (H _ H9) | elim (H0 _ H9) ]; intros; eapply derive_pt_eq_1; symmetry ; apply H10. assert (H8 := null_derivative_loc (g1 - g2)%F a b H5 H6 H7); unfold constant_D_eq in H8; assert (H9 := H8 _ H2); unfold minus_fct in H9; rewrite <- H9; ring. Qed. coq-8.4pl2/theories/Reals/ArithProp.v0000640000175000001440000001550212010532755016621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (n - i)%nat <> 0%nat. Proof. intros; red; intro. cut (forall n m:nat, (m <= n)%nat -> (n - m)%nat = 0%nat -> n = m). intro; assert (H2 := H1 _ _ (lt_le_weak _ _ H) H0); rewrite H2 in H; elim (lt_irrefl _ H). set (R := fun n m:nat => (m <= n)%nat -> (n - m)%nat = 0%nat -> n = m). cut ((forall n m:nat, R n m) -> forall n0 m:nat, (m <= n0)%nat -> (n0 - m)%nat = 0%nat -> n0 = m). intro; apply H1. apply nat_double_ind. unfold R; intros; inversion H2; reflexivity. unfold R; intros; simpl in H3; assumption. unfold R; intros; simpl in H4; assert (H5 := le_S_n _ _ H3); assert (H6 := H2 H5 H4); rewrite H6; reflexivity. unfold R; intros; apply H1; assumption. Qed. Lemma le_minusni_n : forall n i:nat, (i <= n)%nat -> (n - i <= n)%nat. Proof. set (R := fun m n:nat => (n <= m)%nat -> (m - n <= m)%nat). cut ((forall m n:nat, R m n) -> forall n i:nat, (i <= n)%nat -> (n - i <= n)%nat). intro; apply H. apply nat_double_ind. unfold R; intros; simpl; apply le_n. unfold R; intros; simpl; apply le_n. unfold R; intros; simpl; apply le_trans with n. apply H0; apply le_S_n; assumption. apply le_n_Sn. unfold R; intros; apply H; assumption. Qed. Lemma lt_minus_O_lt : forall m n:nat, (m < n)%nat -> (0 < n - m)%nat. Proof. intros n m; pattern n, m; apply nat_double_ind; [ intros; rewrite <- minus_n_O; assumption | intros; elim (lt_n_O _ H) | intros; simpl; apply H; apply lt_S_n; assumption ]. Qed. Lemma even_odd_cor : forall n:nat, exists p : nat, n = (2 * p)%nat \/ n = S (2 * p). Proof. intro. assert (H := even_or_odd n). exists (div2 n). assert (H0 := even_odd_double n). elim H0; intros. elim H1; intros H3 _. elim H2; intros H4 _. replace (2 * div2 n)%nat with (double (div2 n)). elim H; intro. left. apply H3; assumption. right. apply H4; assumption. unfold double;ring. Qed. (* 2m <= 2n => m<=n *) Lemma le_double : forall m n:nat, (2 * m <= 2 * n)%nat -> (m <= n)%nat. Proof. intros; apply INR_le. assert (H1 := le_INR _ _ H). do 2 rewrite mult_INR in H1. apply Rmult_le_reg_l with (INR 2). replace (INR 2) with 2; [ prove_sup0 | reflexivity ]. assumption. Qed. (** Here, we have the euclidian division *) (** This lemma is used in the proof of sin_eq_0 : (sin x)=0<->x=kPI *) Lemma euclidian_division : forall x y:R, y <> 0 -> exists k : Z, (exists r : R, x = IZR k * y + r /\ 0 <= r < Rabs y). Proof. intros. set (k0 := match Rcase_abs y with | left _ => (1 - up (x / - y))%Z | right _ => (up (x / y) - 1)%Z end). exists k0. exists (x - IZR k0 * y). split. ring. unfold k0; case (Rcase_abs y); intro. assert (H0 := archimed (x / - y)); rewrite <- Z_R_minus; simpl; unfold Rminus. replace (- ((1 + - IZR (up (x / - y))) * y)) with ((IZR (up (x / - y)) - 1) * y); [ idtac | ring ]. split. apply Rmult_le_reg_l with (/ - y). apply Rinv_0_lt_compat; apply Ropp_0_gt_lt_contravar; exact r. rewrite Rmult_0_r; rewrite (Rmult_comm (/ - y)); rewrite Rmult_plus_distr_r; rewrite <- Ropp_inv_permute; [ idtac | assumption ]. rewrite Rmult_assoc; repeat rewrite Ropp_mult_distr_r_reverse; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_r | assumption ]. apply Rplus_le_reg_l with (IZR (up (x / - y)) - x / - y). rewrite Rplus_0_r; unfold Rdiv; pattern (/ - y) at 4; rewrite <- Ropp_inv_permute; [ idtac | assumption ]. replace (IZR (up (x * / - y)) - x * - / y + (- (x * / y) + - (IZR (up (x * / - y)) - 1))) with 1; [ idtac | ring ]. elim H0; intros _ H1; unfold Rdiv in H1; exact H1. rewrite (Rabs_left _ r); apply Rmult_lt_reg_l with (/ - y). apply Rinv_0_lt_compat; apply Ropp_0_gt_lt_contravar; exact r. rewrite <- Rinv_l_sym. rewrite (Rmult_comm (/ - y)); rewrite Rmult_plus_distr_r; rewrite <- Ropp_inv_permute; [ idtac | assumption ]. rewrite Rmult_assoc; repeat rewrite Ropp_mult_distr_r_reverse; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_r | assumption ]; apply Rplus_lt_reg_r with (IZR (up (x / - y)) - 1). replace (IZR (up (x / - y)) - 1 + 1) with (IZR (up (x / - y))); [ idtac | ring ]. replace (IZR (up (x / - y)) - 1 + (- (x * / y) + - (IZR (up (x / - y)) - 1))) with (- (x * / y)); [ idtac | ring ]. rewrite <- Ropp_mult_distr_r_reverse; rewrite (Ropp_inv_permute _ H); elim H0; unfold Rdiv; intros H1 _; exact H1. apply Ropp_neq_0_compat; assumption. assert (H0 := archimed (x / y)); rewrite <- Z_R_minus; simpl; cut (0 < y). intro; unfold Rminus; replace (- ((IZR (up (x / y)) + -1) * y)) with ((1 - IZR (up (x / y))) * y); [ idtac | ring ]. split. apply Rmult_le_reg_l with (/ y). apply Rinv_0_lt_compat; assumption. rewrite Rmult_0_r; rewrite (Rmult_comm (/ y)); rewrite Rmult_plus_distr_r; rewrite Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_r | assumption ]; apply Rplus_le_reg_l with (IZR (up (x / y)) - x / y); rewrite Rplus_0_r; unfold Rdiv; replace (IZR (up (x * / y)) - x * / y + (x * / y + (1 - IZR (up (x * / y))))) with 1; [ idtac | ring ]; elim H0; intros _ H2; unfold Rdiv in H2; exact H2. rewrite (Rabs_right _ r); apply Rmult_lt_reg_l with (/ y). apply Rinv_0_lt_compat; assumption. rewrite <- (Rinv_l_sym _ H); rewrite (Rmult_comm (/ y)); rewrite Rmult_plus_distr_r; rewrite Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_r | assumption ]; apply Rplus_lt_reg_r with (IZR (up (x / y)) - 1); replace (IZR (up (x / y)) - 1 + 1) with (IZR (up (x / y))); [ idtac | ring ]; replace (IZR (up (x / y)) - 1 + (x * / y + (1 - IZR (up (x / y))))) with (x * / y); [ idtac | ring ]; elim H0; unfold Rdiv; intros H2 _; exact H2. case (total_order_T 0 y); intro. elim s; intro. assumption. elim H; symmetry ; exact b. assert (H1 := Rge_le _ _ r); elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 r0)). Qed. Lemma tech8 : forall n i:nat, (n <= S n + i)%nat. Proof. intros; induction i as [| i Hreci]. replace (S n + 0)%nat with (S n); [ apply le_n_Sn | ring ]. replace (S n + S i)%nat with (S (S n + i)). apply le_S; assumption. apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; do 2 rewrite S_INR; ring. Qed. coq-8.4pl2/theories/Reals/Rgeom.v0000640000175000001440000001710112010532755015757 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rsqr b = Rsqr c + Rsqr a - 2 * (a * c * cos ac). Proof. unfold dist_euc; intros; repeat rewrite Rsqr_sqrt; [ rewrite H; unfold Rsqr; ring | apply Rplus_le_le_0_compat | apply Rplus_le_le_0_compat | apply Rplus_le_le_0_compat ]; apply Rle_0_sqr. Qed. Lemma triangle : forall x0 y0 x1 y1 x2 y2:R, dist_euc x0 y0 x1 y1 <= dist_euc x0 y0 x2 y2 + dist_euc x2 y2 x1 y1. Proof. intros; unfold dist_euc; apply Rsqr_incr_0; [ rewrite Rsqr_plus; repeat rewrite Rsqr_sqrt; [ replace (Rsqr (x0 - x1)) with (Rsqr (x0 - x2) + Rsqr (x2 - x1) + 2 * (x0 - x2) * (x2 - x1)); [ replace (Rsqr (y0 - y1)) with (Rsqr (y0 - y2) + Rsqr (y2 - y1) + 2 * (y0 - y2) * (y2 - y1)); [ apply Rplus_le_reg_l with (- Rsqr (x0 - x2) - Rsqr (x2 - x1) - Rsqr (y0 - y2) - Rsqr (y2 - y1)); replace (- Rsqr (x0 - x2) - Rsqr (x2 - x1) - Rsqr (y0 - y2) - Rsqr (y2 - y1) + (Rsqr (x0 - x2) + Rsqr (x2 - x1) + 2 * (x0 - x2) * (x2 - x1) + (Rsqr (y0 - y2) + Rsqr (y2 - y1) + 2 * (y0 - y2) * (y2 - y1)))) with (2 * ((x0 - x2) * (x2 - x1) + (y0 - y2) * (y2 - y1))); [ replace (- Rsqr (x0 - x2) - Rsqr (x2 - x1) - Rsqr (y0 - y2) - Rsqr (y2 - y1) + (Rsqr (x0 - x2) + Rsqr (y0 - y2) + (Rsqr (x2 - x1) + Rsqr (y2 - y1)) + 2 * sqrt (Rsqr (x0 - x2) + Rsqr (y0 - y2)) * sqrt (Rsqr (x2 - x1) + Rsqr (y2 - y1)))) with (2 * (sqrt (Rsqr (x0 - x2) + Rsqr (y0 - y2)) * sqrt (Rsqr (x2 - x1) + Rsqr (y2 - y1)))); [ apply Rmult_le_compat_l; [ left; cut (0%nat <> 2%nat); [ intros; generalize (lt_INR_0 2 (neq_O_lt 2 H)); intro H0; assumption | discriminate ] | apply sqrt_cauchy ] | ring ] | ring ] | ring_Rsqr ] | ring_Rsqr ] | apply Rplus_le_le_0_compat; apply Rle_0_sqr | apply Rplus_le_le_0_compat; apply Rle_0_sqr | apply Rplus_le_le_0_compat; apply Rle_0_sqr ] | apply sqrt_positivity; apply Rplus_le_le_0_compat; apply Rle_0_sqr | apply Rplus_le_le_0_compat; apply sqrt_positivity; apply Rplus_le_le_0_compat; apply Rle_0_sqr ]. Qed. (******************************************************************) (** * Translation *) (******************************************************************) Definition xt (x tx:R) : R := x + tx. Definition yt (y ty:R) : R := y + ty. Lemma translation_0 : forall x y:R, xt x 0 = x /\ yt y 0 = y. Proof. intros x y; split; [ unfold xt | unfold yt ]; ring. Qed. Lemma isometric_translation : forall x1 x2 y1 y2 tx ty:R, Rsqr (x1 - x2) + Rsqr (y1 - y2) = Rsqr (xt x1 tx - xt x2 tx) + Rsqr (yt y1 ty - yt y2 ty). Proof. intros; unfold Rsqr, xt, yt; ring. Qed. (******************************************************************) (** * Rotation *) (******************************************************************) Definition xr (x y theta:R) : R := x * cos theta + y * sin theta. Definition yr (x y theta:R) : R := - x * sin theta + y * cos theta. Lemma rotation_0 : forall x y:R, xr x y 0 = x /\ yr x y 0 = y. Proof. intros x y; unfold xr, yr; split; rewrite cos_0; rewrite sin_0; ring. Qed. Lemma rotation_PI2 : forall x y:R, xr x y (PI / 2) = y /\ yr x y (PI / 2) = - x. Proof. intros x y; unfold xr, yr; split; rewrite cos_PI2; rewrite sin_PI2; ring. Qed. Lemma isometric_rotation_0 : forall x1 y1 x2 y2 theta:R, Rsqr (x1 - x2) + Rsqr (y1 - y2) = Rsqr (xr x1 y1 theta - xr x2 y2 theta) + Rsqr (yr x1 y1 theta - yr x2 y2 theta). Proof. intros; unfold xr, yr; replace (x1 * cos theta + y1 * sin theta - (x2 * cos theta + y2 * sin theta)) with (cos theta * (x1 - x2) + sin theta * (y1 - y2)); [ replace (- x1 * sin theta + y1 * cos theta - (- x2 * sin theta + y2 * cos theta)) with (cos theta * (y1 - y2) + sin theta * (x2 - x1)); [ repeat rewrite Rsqr_plus; repeat rewrite Rsqr_mult; repeat rewrite cos2; ring_simplify; replace (x2 - x1) with (- (x1 - x2)); [ rewrite <- Rsqr_neg; ring | ring ] | ring ] | ring ]. Qed. Lemma isometric_rotation : forall x1 y1 x2 y2 theta:R, dist_euc x1 y1 x2 y2 = dist_euc (xr x1 y1 theta) (yr x1 y1 theta) (xr x2 y2 theta) (yr x2 y2 theta). Proof. unfold dist_euc; intros; apply Rsqr_inj; [ apply sqrt_positivity; apply Rplus_le_le_0_compat | apply sqrt_positivity; apply Rplus_le_le_0_compat | repeat rewrite Rsqr_sqrt; [ apply isometric_rotation_0 | apply Rplus_le_le_0_compat | apply Rplus_le_le_0_compat ] ]; apply Rle_0_sqr. Qed. (******************************************************************) (** * Similarity *) (******************************************************************) Lemma isometric_rot_trans : forall x1 y1 x2 y2 tx ty theta:R, Rsqr (x1 - x2) + Rsqr (y1 - y2) = Rsqr (xr (xt x1 tx) (yt y1 ty) theta - xr (xt x2 tx) (yt y2 ty) theta) + Rsqr (yr (xt x1 tx) (yt y1 ty) theta - yr (xt x2 tx) (yt y2 ty) theta). Proof. intros; rewrite <- isometric_rotation_0; apply isometric_translation. Qed. Lemma isometric_trans_rot : forall x1 y1 x2 y2 tx ty theta:R, Rsqr (x1 - x2) + Rsqr (y1 - y2) = Rsqr (xt (xr x1 y1 theta) tx - xt (xr x2 y2 theta) tx) + Rsqr (yt (yr x1 y1 theta) ty - yt (yr x2 y2 theta) ty). Proof. intros; rewrite <- isometric_translation; apply isometric_rotation_0. Qed. coq-8.4pl2/theories/Reals/Rbasic_fun.v0000640000175000001440000005064212010532755016770 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x | right _ => y end. (*********) Lemma Rmin_case : forall r1 r2 (P:R -> Type), P r1 -> P r2 -> P (Rmin r1 r2). Proof. intros r1 r2 P H1 H2; unfold Rmin; case (Rle_dec r1 r2); auto. Qed. (*********) Lemma Rmin_case_strong : forall r1 r2 (P:R -> Type), (r1 <= r2 -> P r1) -> (r2 <= r1 -> P r2) -> P (Rmin r1 r2). Proof. intros r1 r2 P H1 H2; unfold Rmin; destruct (Rle_dec r1 r2); auto with real. Qed. (*********) Lemma Rmin_Rgt_l : forall r1 r2 r, Rmin r1 r2 > r -> r1 > r /\ r2 > r. Proof. intros r1 r2 r; unfold Rmin; case (Rle_dec r1 r2); intros. split. assumption. unfold Rgt; unfold Rgt in H; exact (Rlt_le_trans r r1 r2 H r0). split. generalize (Rnot_le_lt r1 r2 n); intro; exact (Rgt_trans r1 r2 r H0 H). assumption. Qed. (*********) Lemma Rmin_Rgt_r : forall r1 r2 r, r1 > r /\ r2 > r -> Rmin r1 r2 > r. Proof. intros; unfold Rmin; case (Rle_dec r1 r2); elim H; clear H; intros; assumption. Qed. (*********) Lemma Rmin_Rgt : forall r1 r2 r, Rmin r1 r2 > r <-> r1 > r /\ r2 > r. Proof. intros; split. exact (Rmin_Rgt_l r1 r2 r). exact (Rmin_Rgt_r r1 r2 r). Qed. (*********) Lemma Rmin_l : forall x y:R, Rmin x y <= x. Proof. intros; unfold Rmin; case (Rle_dec x y); intro H1; [ right; reflexivity | auto with real ]. Qed. (*********) Lemma Rmin_r : forall x y:R, Rmin x y <= y. Proof. intros; unfold Rmin; case (Rle_dec x y); intro H1; [ assumption | auto with real ]. Qed. (*********) Lemma Rmin_left : forall x y, x <= y -> Rmin x y = x. Proof. intros; apply Rmin_case_strong; auto using Rle_antisym. Qed. (*********) Lemma Rmin_right : forall x y, y <= x -> Rmin x y = y. Proof. intros; apply Rmin_case_strong; auto using Rle_antisym. Qed. (*********) Lemma Rle_min_compat_r : forall x y z, x <= y -> Rmin x z <= Rmin y z. Proof. intros; do 2 (apply Rmin_case_strong; intro); eauto using Rle_trans, Rle_refl. Qed. (*********) Lemma Rle_min_compat_l : forall x y z, x <= y -> Rmin z x <= Rmin z y. Proof. intros; do 2 (apply Rmin_case_strong; intro); eauto using Rle_trans, Rle_refl. Qed. (*********) Lemma Rmin_comm : forall x y:R, Rmin x y = Rmin y x. Proof. intros; unfold Rmin; case (Rle_dec x y); case (Rle_dec y x); intros; try reflexivity || (apply Rle_antisym; assumption || auto with real). Qed. (*********) Lemma Rmin_stable_in_posreal : forall x y:posreal, 0 < Rmin x y. Proof. intros; apply Rmin_Rgt_r; split; [ apply (cond_pos x) | apply (cond_pos y) ]. Qed. (*********) Lemma Rmin_pos : forall x y:R, 0 < x -> 0 < y -> 0 < Rmin x y. Proof. intros; unfold Rmin. case (Rle_dec x y); intro; assumption. Qed. (*********) Lemma Rmin_glb : forall x y z:R, z <= x -> z <= y -> z <= Rmin x y. Proof. intros; unfold Rmin; case (Rle_dec x y); intro; assumption. Qed. (*********) Lemma Rmin_glb_lt : forall x y z:R, z < x -> z < y -> z < Rmin x y. Proof. intros; unfold Rmin; case (Rle_dec x y); intro; assumption. Qed. (*******************************) (** * Rmax *) (*******************************) (*********) Definition Rmax (x y:R) : R := match Rle_dec x y with | left _ => y | right _ => x end. (*********) Lemma Rmax_case : forall r1 r2 (P:R -> Type), P r1 -> P r2 -> P (Rmax r1 r2). Proof. intros r1 r2 P H1 H2; unfold Rmax; case (Rle_dec r1 r2); auto. Qed. (*********) Lemma Rmax_case_strong : forall r1 r2 (P:R -> Type), (r2 <= r1 -> P r1) -> (r1 <= r2 -> P r2) -> P (Rmax r1 r2). Proof. intros r1 r2 P H1 H2; unfold Rmax; case (Rle_dec r1 r2); auto with real. Qed. (*********) Lemma Rmax_Rle : forall r1 r2 r, r <= Rmax r1 r2 <-> r <= r1 \/ r <= r2. Proof. intros; split. unfold Rmax; case (Rle_dec r1 r2); intros; auto. intro; unfold Rmax; case (Rle_dec r1 r2); elim H; clear H; intros; auto. apply (Rle_trans r r1 r2); auto. generalize (Rnot_le_lt r1 r2 n); clear n; intro; unfold Rgt in H0; apply (Rlt_le r r1 (Rle_lt_trans r r2 r1 H H0)). Qed. Lemma Rmax_comm : forall x y:R, Rmax x y = Rmax y x. Proof. intros p q; unfold Rmax; case (Rle_dec p q); case (Rle_dec q p); auto; intros H1 H2; apply Rle_antisym; auto with real. Qed. (* begin hide *) Notation RmaxSym := Rmax_comm (only parsing). (* end hide *) (*********) Lemma Rmax_l : forall x y:R, x <= Rmax x y. Proof. intros; unfold Rmax; case (Rle_dec x y); intro H1; [ assumption | auto with real ]. Qed. (*********) Lemma Rmax_r : forall x y:R, y <= Rmax x y. Proof. intros; unfold Rmax; case (Rle_dec x y); intro H1; [ right; reflexivity | auto with real ]. Qed. (* begin hide *) Notation RmaxLess1 := Rmax_l (only parsing). Notation RmaxLess2 := Rmax_r (only parsing). (* end hide *) (*********) Lemma Rmax_left : forall x y, y <= x -> Rmax x y = x. Proof. intros; apply Rmax_case_strong; auto using Rle_antisym. Qed. (*********) Lemma Rmax_right : forall x y, x <= y -> Rmax x y = y. Proof. intros; apply Rmax_case_strong; auto using Rle_antisym. Qed. (*********) Lemma Rle_max_compat_r : forall x y z, x <= y -> Rmax x z <= Rmax y z. Proof. intros; do 2 (apply Rmax_case_strong; intro); eauto using Rle_trans, Rle_refl. Qed. (*********) Lemma Rle_max_compat_l : forall x y z, x <= y -> Rmax z x <= Rmax z y. Proof. intros; do 2 (apply Rmax_case_strong; intro); eauto using Rle_trans, Rle_refl. Qed. (*********) Lemma RmaxRmult : forall (p q:R) r, 0 <= r -> Rmax (r * p) (r * q) = r * Rmax p q. Proof. intros p q r H; unfold Rmax. case (Rle_dec p q); case (Rle_dec (r * p) (r * q)); auto; intros H1 H2; auto. case H; intros E1. case H1; auto with real. rewrite <- E1; repeat rewrite Rmult_0_l; auto. case H; intros E1. case H2; auto with real. apply Rmult_le_reg_l with (r := r); auto. rewrite <- E1; repeat rewrite Rmult_0_l; auto. Qed. (*********) Lemma Rmax_stable_in_negreal : forall x y:negreal, Rmax x y < 0. Proof. intros; unfold Rmax; case (Rle_dec x y); intro; [ apply (cond_neg y) | apply (cond_neg x) ]. Qed. (*********) Lemma Rmax_lub : forall x y z:R, x <= z -> y <= z -> Rmax x y <= z. Proof. intros; unfold Rmax; case (Rle_dec x y); intro; assumption. Qed. (*********) Lemma Rmax_lub_lt : forall x y z:R, x < z -> y < z -> Rmax x y < z. Proof. intros; unfold Rmax; case (Rle_dec x y); intro; assumption. Qed. (*********) Lemma Rmax_neg : forall x y:R, x < 0 -> y < 0 -> Rmax x y < 0. Proof. intros; unfold Rmax. case (Rle_dec x y); intro; assumption. Qed. (*******************************) (** * Rabsolu *) (*******************************) (*********) Lemma Rcase_abs : forall r, {r < 0} + {r >= 0}. Proof. intro; generalize (Rle_dec 0 r); intro X; elim X; intro; clear X. right; apply (Rle_ge 0 r a). left; fold (0 > r); apply (Rnot_le_lt 0 r b). Qed. (*********) Definition Rabs r : R := match Rcase_abs r with | left _ => - r | right _ => r end. (*********) Lemma Rabs_R0 : Rabs 0 = 0. Proof. unfold Rabs; case (Rcase_abs 0); auto; intro. generalize (Rlt_irrefl 0); intro; exfalso; auto. Qed. Lemma Rabs_R1 : Rabs 1 = 1. Proof. unfold Rabs; case (Rcase_abs 1); auto with real. intros H; absurd (1 < 0); auto with real. Qed. (*********) Lemma Rabs_no_R0 : forall r, r <> 0 -> Rabs r <> 0. Proof. intros; unfold Rabs; case (Rcase_abs r); intro; auto. apply Ropp_neq_0_compat; auto. Qed. (*********) Lemma Rabs_left : forall r, r < 0 -> Rabs r = - r. Proof. intros; unfold Rabs; case (Rcase_abs r); trivial; intro; absurd (r >= 0). exact (Rlt_not_ge r 0 H). assumption. Qed. (*********) Lemma Rabs_right : forall r, r >= 0 -> Rabs r = r. Proof. intros; unfold Rabs; case (Rcase_abs r); intro. absurd (r >= 0). exact (Rlt_not_ge r 0 r0). assumption. trivial. Qed. Lemma Rabs_left1 : forall a:R, a <= 0 -> Rabs a = - a. Proof. intros a H; case H; intros H1. apply Rabs_left; auto. rewrite H1; simpl; rewrite Rabs_right; auto with real. Qed. (*********) Lemma Rabs_pos : forall x:R, 0 <= Rabs x. Proof. intros; unfold Rabs; case (Rcase_abs x); intro. generalize (Ropp_lt_gt_contravar x 0 r); intro; unfold Rgt in H; rewrite Ropp_0 in H; unfold Rle; left; assumption. apply Rge_le; assumption. Qed. Lemma Rle_abs : forall x:R, x <= Rabs x. Proof. intro; unfold Rabs; case (Rcase_abs x); intros; fourier. Qed. Definition RRle_abs := Rle_abs. (*********) Lemma Rabs_pos_eq : forall x:R, 0 <= x -> Rabs x = x. Proof. intros; unfold Rabs; case (Rcase_abs x); intro; [ generalize (Rgt_not_le 0 x r); intro; exfalso; auto | trivial ]. Qed. (*********) Lemma Rabs_Rabsolu : forall x:R, Rabs (Rabs x) = Rabs x. Proof. intro; apply (Rabs_pos_eq (Rabs x) (Rabs_pos x)). Qed. (*********) Lemma Rabs_pos_lt : forall x:R, x <> 0 -> 0 < Rabs x. Proof. intros; generalize (Rabs_pos x); intro; unfold Rle in H0; elim H0; intro; auto. exfalso; clear H0; elim H; clear H; generalize H1; unfold Rabs; case (Rcase_abs x); intros; auto. clear r H1; generalize (Rplus_eq_compat_l x 0 (- x) H0); rewrite (let (H1, H2) := Rplus_ne x in H1); rewrite (Rplus_opp_r x); trivial. Qed. (*********) Lemma Rabs_minus_sym : forall x y:R, Rabs (x - y) = Rabs (y - x). Proof. intros; unfold Rabs; case (Rcase_abs (x - y)); case (Rcase_abs (y - x)); intros. generalize (Rminus_lt y x r); generalize (Rminus_lt x y r0); intros; generalize (Rlt_asym x y H); intro; exfalso; auto. rewrite (Ropp_minus_distr x y); trivial. rewrite (Ropp_minus_distr y x); trivial. unfold Rge in r, r0; elim r; elim r0; intros; clear r r0. generalize (Ropp_lt_gt_0_contravar (x - y) H); rewrite (Ropp_minus_distr x y); intro; unfold Rgt in H0; generalize (Rlt_asym 0 (y - x) H0); intro; exfalso; auto. rewrite (Rminus_diag_uniq x y H); trivial. rewrite (Rminus_diag_uniq y x H0); trivial. rewrite (Rminus_diag_uniq y x H0); trivial. Qed. (*********) Lemma Rabs_mult : forall x y:R, Rabs (x * y) = Rabs x * Rabs y. Proof. intros; unfold Rabs; case (Rcase_abs (x * y)); case (Rcase_abs x); case (Rcase_abs y); intros; auto. generalize (Rmult_lt_gt_compat_neg_l y x 0 r r0); intro; rewrite (Rmult_0_r y) in H; generalize (Rlt_asym (x * y) 0 r1); intro; unfold Rgt in H; exfalso; rewrite (Rmult_comm y x) in H; auto. rewrite (Ropp_mult_distr_l_reverse x y); trivial. rewrite (Rmult_comm x (- y)); rewrite (Ropp_mult_distr_l_reverse y x); rewrite (Rmult_comm x y); trivial. unfold Rge in r, r0; elim r; elim r0; clear r r0; intros; unfold Rgt in H, H0. generalize (Rmult_lt_compat_l x 0 y H H0); intro; rewrite (Rmult_0_r x) in H1; generalize (Rlt_asym (x * y) 0 r1); intro; exfalso; auto. rewrite H in r1; rewrite (Rmult_0_l y) in r1; generalize (Rlt_irrefl 0); intro; exfalso; auto. rewrite H0 in r1; rewrite (Rmult_0_r x) in r1; generalize (Rlt_irrefl 0); intro; exfalso; auto. rewrite H0 in r1; rewrite (Rmult_0_r x) in r1; generalize (Rlt_irrefl 0); intro; exfalso; auto. rewrite (Rmult_opp_opp x y); trivial. unfold Rge in r, r1; elim r; elim r1; clear r r1; intros; unfold Rgt in H0, H. generalize (Rmult_lt_compat_l y x 0 H0 r0); intro; rewrite (Rmult_0_r y) in H1; rewrite (Rmult_comm y x) in H1; generalize (Rlt_asym (x * y) 0 H1); intro; exfalso; auto. generalize (Rlt_dichotomy_converse x 0 (or_introl (x > 0) r0)); generalize (Rlt_dichotomy_converse y 0 (or_intror (y < 0) H0)); intros; generalize (Rmult_integral x y H); intro; elim H3; intro; exfalso; auto. rewrite H0 in H; rewrite (Rmult_0_r x) in H; unfold Rgt in H; generalize (Rlt_irrefl 0); intro; exfalso; auto. rewrite H0; rewrite (Rmult_0_r x); rewrite (Rmult_0_r (- x)); trivial. unfold Rge in r0, r1; elim r0; elim r1; clear r0 r1; intros; unfold Rgt in H0, H. generalize (Rmult_lt_compat_l x y 0 H0 r); intro; rewrite (Rmult_0_r x) in H1; generalize (Rlt_asym (x * y) 0 H1); intro; exfalso; auto. generalize (Rlt_dichotomy_converse y 0 (or_introl (y > 0) r)); generalize (Rlt_dichotomy_converse 0 x (or_introl (0 > x) H0)); intros; generalize (Rmult_integral x y H); intro; elim H3; intro; exfalso; auto. rewrite H0 in H; rewrite (Rmult_0_l y) in H; unfold Rgt in H; generalize (Rlt_irrefl 0); intro; exfalso; auto. rewrite H0; rewrite (Rmult_0_l y); rewrite (Rmult_0_l (- y)); trivial. Qed. (*********) Lemma Rabs_Rinv : forall r, r <> 0 -> Rabs (/ r) = / Rabs r. Proof. intro; unfold Rabs; case (Rcase_abs r); case (Rcase_abs (/ r)); auto; intros. apply Ropp_inv_permute; auto. generalize (Rinv_lt_0_compat r r1); intro; unfold Rge in r0; elim r0; intros. unfold Rgt in H1; generalize (Rlt_asym 0 (/ r) H1); intro; exfalso; auto. generalize (Rlt_dichotomy_converse (/ r) 0 (or_introl (/ r > 0) H0)); intro; exfalso; auto. unfold Rge in r1; elim r1; clear r1; intro. unfold Rgt in H0; generalize (Rlt_asym 0 (/ r) (Rinv_0_lt_compat r H0)); intro; exfalso; auto. exfalso; auto. Qed. Lemma Rabs_Ropp : forall x:R, Rabs (- x) = Rabs x. Proof. intro; cut (- x = -1 * x). intros; rewrite H. rewrite Rabs_mult. cut (Rabs (-1) = 1). intros; rewrite H0. ring. unfold Rabs; case (Rcase_abs (-1)). intro; ring. intro H0; generalize (Rge_le (-1) 0 H0); intros. generalize (Ropp_le_ge_contravar 0 (-1) H1). rewrite Ropp_involutive; rewrite Ropp_0. intro; generalize (Rgt_not_le 1 0 Rlt_0_1); intro; generalize (Rge_le 0 1 H2); intro; exfalso; auto. ring. Qed. (*********) Lemma Rabs_triang : forall a b:R, Rabs (a + b) <= Rabs a + Rabs b. Proof. intros a b; unfold Rabs; case (Rcase_abs (a + b)); case (Rcase_abs a); case (Rcase_abs b); intros. apply (Req_le (- (a + b)) (- a + - b)); rewrite (Ropp_plus_distr a b); reflexivity. (**) rewrite (Ropp_plus_distr a b); apply (Rplus_le_compat_l (- a) (- b) b); unfold Rle; unfold Rge in r; elim r; intro. left; unfold Rgt in H; generalize (Rplus_lt_compat_l (- b) 0 b H); intro; elim (Rplus_ne (- b)); intros v w; rewrite v in H0; clear v w; rewrite (Rplus_opp_l b) in H0; apply (Rlt_trans (- b) 0 b H0 H). right; rewrite H; apply Ropp_0. (**) rewrite (Ropp_plus_distr a b); rewrite (Rplus_comm (- a) (- b)); rewrite (Rplus_comm a (- b)); apply (Rplus_le_compat_l (- b) (- a) a); unfold Rle; unfold Rge in r0; elim r0; intro. left; unfold Rgt in H; generalize (Rplus_lt_compat_l (- a) 0 a H); intro; elim (Rplus_ne (- a)); intros v w; rewrite v in H0; clear v w; rewrite (Rplus_opp_l a) in H0; apply (Rlt_trans (- a) 0 a H0 H). right; rewrite H; apply Ropp_0. (**) exfalso; generalize (Rplus_ge_compat_l a b 0 r); intro; elim (Rplus_ne a); intros v w; rewrite v in H; clear v w; generalize (Rge_trans (a + b) a 0 H r0); intro; clear H; unfold Rge in H0; elim H0; intro; clear H0. unfold Rgt in H; generalize (Rlt_asym (a + b) 0 r1); intro; auto. absurd (a + b = 0); auto. apply (Rlt_dichotomy_converse (a + b) 0); left; assumption. (**) exfalso; generalize (Rplus_lt_compat_l a b 0 r); intro; elim (Rplus_ne a); intros v w; rewrite v in H; clear v w; generalize (Rlt_trans (a + b) a 0 H r0); intro; clear H; unfold Rge in r1; elim r1; clear r1; intro. unfold Rgt in H; generalize (Rlt_trans (a + b) 0 (a + b) H0 H); intro; apply (Rlt_irrefl (a + b)); assumption. rewrite H in H0; apply (Rlt_irrefl 0); assumption. (**) rewrite (Rplus_comm a b); rewrite (Rplus_comm (- a) b); apply (Rplus_le_compat_l b a (- a)); apply (Rminus_le a (- a)); unfold Rminus; rewrite (Ropp_involutive a); generalize (Rplus_lt_compat_l a a 0 r0); clear r r1; intro; elim (Rplus_ne a); intros v w; rewrite v in H; clear v w; generalize (Rlt_trans (a + a) a 0 H r0); intro; apply (Rlt_le (a + a) 0 H0). (**) apply (Rplus_le_compat_l a b (- b)); apply (Rminus_le b (- b)); unfold Rminus; rewrite (Ropp_involutive b); generalize (Rplus_lt_compat_l b b 0 r); clear r0 r1; intro; elim (Rplus_ne b); intros v w; rewrite v in H; clear v w; generalize (Rlt_trans (b + b) b 0 H r); intro; apply (Rlt_le (b + b) 0 H0). (**) unfold Rle; right; reflexivity. Qed. (*********) Lemma Rabs_triang_inv : forall a b:R, Rabs a - Rabs b <= Rabs (a - b). Proof. intros; apply (Rplus_le_reg_l (Rabs b) (Rabs a - Rabs b) (Rabs (a - b))); unfold Rminus; rewrite <- (Rplus_assoc (Rabs b) (Rabs a) (- Rabs b)); rewrite (Rplus_comm (Rabs b) (Rabs a)); rewrite (Rplus_assoc (Rabs a) (Rabs b) (- Rabs b)); rewrite (Rplus_opp_r (Rabs b)); rewrite (proj1 (Rplus_ne (Rabs a))); replace (Rabs a) with (Rabs (a + 0)). rewrite <- (Rplus_opp_r b); rewrite <- (Rplus_assoc a b (- b)); rewrite (Rplus_comm a b); rewrite (Rplus_assoc b a (- b)). exact (Rabs_triang b (a + - b)). rewrite (proj1 (Rplus_ne a)); trivial. Qed. (* ||a|-|b||<=|a-b| *) Lemma Rabs_triang_inv2 : forall a b:R, Rabs (Rabs a - Rabs b) <= Rabs (a - b). Proof. cut (forall a b:R, Rabs b <= Rabs a -> Rabs (Rabs a - Rabs b) <= Rabs (a - b)). intros; destruct (Rtotal_order (Rabs a) (Rabs b)) as [Hlt| [Heq| Hgt]]. rewrite <- (Rabs_Ropp (Rabs a - Rabs b)); rewrite <- (Rabs_Ropp (a - b)); do 2 rewrite Ropp_minus_distr. apply H; left; assumption. rewrite Heq; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rabs_pos. apply H; left; assumption. intros; replace (Rabs (Rabs a - Rabs b)) with (Rabs a - Rabs b). apply Rabs_triang_inv. rewrite (Rabs_right (Rabs a - Rabs b)); [ reflexivity | apply Rle_ge; apply Rplus_le_reg_l with (Rabs b); rewrite Rplus_0_r; replace (Rabs b + (Rabs a - Rabs b)) with (Rabs a); [ assumption | ring ] ]. Qed. (*********) Lemma Rabs_def1 : forall x a:R, x < a -> - a < x -> Rabs x < a. Proof. unfold Rabs; intros; case (Rcase_abs x); intro. generalize (Ropp_lt_gt_contravar (- a) x H0); unfold Rgt; rewrite Ropp_involutive; intro; assumption. assumption. Qed. (*********) Lemma Rabs_def2 : forall x a:R, Rabs x < a -> x < a /\ - a < x. Proof. unfold Rabs; intro x; case (Rcase_abs x); intros. generalize (Ropp_gt_lt_0_contravar x r); unfold Rgt; intro; generalize (Rlt_trans 0 (- x) a H0 H); intro; split. apply (Rlt_trans x 0 a r H1). generalize (Ropp_lt_gt_contravar (- x) a H); rewrite (Ropp_involutive x); unfold Rgt; trivial. fold (a > x) in H; generalize (Rgt_ge_trans a x 0 H r); intro; generalize (Ropp_lt_gt_0_contravar a H0); intro; fold (0 > - a); generalize (Rge_gt_trans x 0 (- a) r H1); unfold Rgt; intro; split; assumption. Qed. Lemma RmaxAbs : forall (p q:R) r, p <= q -> q <= r -> Rabs q <= Rmax (Rabs p) (Rabs r). Proof. intros p q r H' H'0; case (Rle_or_lt 0 p); intros H'1. repeat rewrite Rabs_right; auto with real. apply Rle_trans with r; auto with real. apply RmaxLess2; auto. apply Rge_trans with p; auto with real; apply Rge_trans with q; auto with real. apply Rge_trans with p; auto with real. rewrite (Rabs_left p); auto. case (Rle_or_lt 0 q); intros H'2. repeat rewrite Rabs_right; auto with real. apply Rle_trans with r; auto. apply RmaxLess2; auto. apply Rge_trans with q; auto with real. rewrite (Rabs_left q); auto. case (Rle_or_lt 0 r); intros H'3. repeat rewrite Rabs_right; auto with real. apply Rle_trans with (- p); auto with real. apply RmaxLess1; auto. rewrite (Rabs_left r); auto. apply Rle_trans with (- p); auto with real. apply RmaxLess1; auto. Qed. Lemma Rabs_Zabs : forall z:Z, Rabs (IZR z) = IZR (Z.abs z). Proof. intros z; case z; simpl; auto with real. apply Rabs_right; auto with real. intros p0; apply Rabs_right; auto with real zarith. intros p0; rewrite Rabs_Ropp. apply Rabs_right; auto with real zarith. Qed. Lemma abs_IZR : forall z, IZR (Z.abs z) = Rabs (IZR z). Proof. intros. now rewrite Rabs_Zabs. Qed. coq-8.4pl2/theories/Reals/Rtrigo_fun.v0000640000175000001440000001214112010532755017023 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rabs (/ INR (fact (S n)) * / / INR (fact n))) 0. Proof. unfold Un_cv; intros; elim (Rgt_dec eps 1); intro. split with 0%nat; intros; rewrite (simpl_fact n); unfold R_dist; rewrite (Rminus_0_r (Rabs (/ INR (S n)))); rewrite (Rabs_Rabsolu (/ INR (S n))); cut (/ INR (S n) > 0). intro; rewrite (Rabs_pos_eq (/ INR (S n))). cut (/ eps - 1 < 0). intro; generalize (Rlt_le_trans (/ eps - 1) 0 (INR n) H2 (pos_INR n)); clear H2; intro; unfold Rminus in H2; generalize (Rplus_lt_compat_l 1 (/ eps + -1) (INR n) H2); replace (1 + (/ eps + -1)) with (/ eps); [ clear H2; intro | ring ]. rewrite (Rplus_comm 1 (INR n)) in H2; rewrite <- (S_INR n) in H2; generalize (Rmult_gt_0_compat (/ INR (S n)) eps H1 H); intro; unfold Rgt in H3; generalize (Rmult_lt_compat_l (/ INR (S n) * eps) (/ eps) (INR (S n)) H3 H2); intro; rewrite (Rmult_assoc (/ INR (S n)) eps (/ eps)) in H4; rewrite (Rinv_r eps (Rlt_dichotomy_converse eps 0 (or_intror (eps < 0) H))) in H4; rewrite (let (H1, H2) := Rmult_ne (/ INR (S n)) in H1) in H4; rewrite (Rmult_comm (/ INR (S n))) in H4; rewrite (Rmult_assoc eps (/ INR (S n)) (INR (S n))) in H4; rewrite (Rinv_l (INR (S n)) (not_O_INR (S n) (not_eq_sym (O_S n)))) in H4; rewrite (let (H1, H2) := Rmult_ne eps in H1) in H4; assumption. apply Rlt_minus; unfold Rgt in a; rewrite <- Rinv_1; apply (Rinv_lt_contravar 1 eps); auto; rewrite (let (H1, H2) := Rmult_ne eps in H2); unfold Rgt in H; assumption. unfold Rgt in H1; apply Rlt_le; assumption. unfold Rgt; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. (**) cut (0 <= up (/ eps - 1))%Z. intro; elim (IZN (up (/ eps - 1)) H0); intros; split with x; intros; rewrite (simpl_fact n); unfold R_dist; rewrite (Rminus_0_r (Rabs (/ INR (S n)))); rewrite (Rabs_Rabsolu (/ INR (S n))); cut (/ INR (S n) > 0). intro; rewrite (Rabs_pos_eq (/ INR (S n))). cut (/ eps - 1 < INR x). intro ; generalize (Rlt_le_trans (/ eps - 1) (INR x) (INR n) H4 (le_INR x n H2)); clear H4; intro; unfold Rminus in H4; generalize (Rplus_lt_compat_l 1 (/ eps + -1) (INR n) H4); replace (1 + (/ eps + -1)) with (/ eps); [ clear H4; intro | ring ]. rewrite (Rplus_comm 1 (INR n)) in H4; rewrite <- (S_INR n) in H4; generalize (Rmult_gt_0_compat (/ INR (S n)) eps H3 H); intro; unfold Rgt in H5; generalize (Rmult_lt_compat_l (/ INR (S n) * eps) (/ eps) (INR (S n)) H5 H4); intro; rewrite (Rmult_assoc (/ INR (S n)) eps (/ eps)) in H6; rewrite (Rinv_r eps (Rlt_dichotomy_converse eps 0 (or_intror (eps < 0) H))) in H6; rewrite (let (H1, H2) := Rmult_ne (/ INR (S n)) in H1) in H6; rewrite (Rmult_comm (/ INR (S n))) in H6; rewrite (Rmult_assoc eps (/ INR (S n)) (INR (S n))) in H6; rewrite (Rinv_l (INR (S n)) (not_O_INR (S n) (not_eq_sym (O_S n)))) in H6; rewrite (let (H1, H2) := Rmult_ne eps in H1) in H6; assumption. cut (IZR (up (/ eps - 1)) = IZR (Z.of_nat x)); [ intro | rewrite H1; trivial ]. elim (archimed (/ eps - 1)); intros; clear H6; unfold Rgt in H5; rewrite H4 in H5; rewrite INR_IZR_INZ; assumption. unfold Rgt in H1; apply Rlt_le; assumption. unfold Rgt; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. apply (le_O_IZR (up (/ eps - 1))); apply (Rle_trans 0 (/ eps - 1) (IZR (up (/ eps - 1)))). generalize (Rnot_gt_le eps 1 b); clear b; unfold Rle; intro; elim H0; clear H0; intro. left; unfold Rgt in H; generalize (Rmult_lt_compat_l (/ eps) eps 1 (Rinv_0_lt_compat eps H) H0); rewrite (Rinv_l eps (not_eq_sym (Rlt_dichotomy_converse 0 eps (or_introl (0 > eps) H)))) ; rewrite (let (H1, H2) := Rmult_ne (/ eps) in H1); intro; fold (/ eps - 1 > 0); apply Rgt_minus; unfold Rgt; assumption. right; rewrite H0; rewrite Rinv_1; symmetry; apply Rminus_diag_eq; auto. elim (archimed (/ eps - 1)); intros; clear H1; unfold Rgt in H0; apply Rlt_le; assumption. Qed. coq-8.4pl2/theories/Reals/R_sqr.v0000640000175000001440000002732312010532755016003 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x <> 0. Proof. intros; red; intro; rewrite H0 in H; rewrite Rsqr_0 in H; elim (Rlt_irrefl 0 H). Qed. Lemma Rsqr_pos_lt : forall x:R, x <> 0 -> 0 < Rsqr x. Proof. intros; case (Rtotal_order 0 x); intro; [ unfold Rsqr; apply Rmult_lt_0_compat; assumption | elim H0; intro; [ elim H; symmetry ; exact H1 | rewrite Rsqr_neg; generalize (Ropp_lt_gt_contravar x 0 H1); rewrite Ropp_0; intro; unfold Rsqr; apply Rmult_lt_0_compat; assumption ] ]. Qed. Lemma Rsqr_div : forall x y:R, y <> 0 -> Rsqr (x / y) = Rsqr x / Rsqr y. Proof. intros; unfold Rsqr. unfold Rdiv. rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc. apply Rmult_eq_compat_l. rewrite Rmult_comm. repeat rewrite Rmult_assoc. apply Rmult_eq_compat_l. reflexivity. assumption. assumption. Qed. Lemma Rsqr_eq_0 : forall x:R, Rsqr x = 0 -> x = 0. Proof. unfold Rsqr; intros; generalize (Rmult_integral x x H); intro; elim H0; intro; assumption. Qed. Lemma Rsqr_minus_plus : forall a b:R, (a - b) * (a + b) = Rsqr a - Rsqr b. Proof. intros; ring_Rsqr. Qed. Lemma Rsqr_plus_minus : forall a b:R, (a + b) * (a - b) = Rsqr a - Rsqr b. Proof. intros; ring_Rsqr. Qed. Lemma Rsqr_incr_0 : forall x y:R, Rsqr x <= Rsqr y -> 0 <= x -> 0 <= y -> x <= y. Proof. intros; case (Rle_dec x y); intro; [ assumption | cut (y < x); [ intro; unfold Rsqr in H; generalize (Rmult_le_0_lt_compat y x y x H1 H1 H2 H2); intro; generalize (Rle_lt_trans (x * x) (y * y) (x * x) H H3); intro; elim (Rlt_irrefl (x * x) H4) | auto with real ] ]. Qed. Lemma Rsqr_incr_0_var : forall x y:R, Rsqr x <= Rsqr y -> 0 <= y -> x <= y. Proof. intros; case (Rle_dec x y); intro; [ assumption | cut (y < x); [ intro; unfold Rsqr in H; generalize (Rmult_le_0_lt_compat y x y x H0 H0 H1 H1); intro; generalize (Rle_lt_trans (x * x) (y * y) (x * x) H H2); intro; elim (Rlt_irrefl (x * x) H3) | auto with real ] ]. Qed. Lemma Rsqr_incr_1 : forall x y:R, x <= y -> 0 <= x -> 0 <= y -> Rsqr x <= Rsqr y. Proof. intros; unfold Rsqr; apply Rmult_le_compat; assumption. Qed. Lemma Rsqr_incrst_0 : forall x y:R, Rsqr x < Rsqr y -> 0 <= x -> 0 <= y -> x < y. Proof. intros; case (Rtotal_order x y); intro; [ assumption | elim H2; intro; [ rewrite H3 in H; elim (Rlt_irrefl (Rsqr y) H) | generalize (Rmult_le_0_lt_compat y x y x H1 H1 H3 H3); intro; unfold Rsqr in H; generalize (Rlt_trans (x * x) (y * y) (x * x) H H4); intro; elim (Rlt_irrefl (x * x) H5) ] ]. Qed. Lemma Rsqr_incrst_1 : forall x y:R, x < y -> 0 <= x -> 0 <= y -> Rsqr x < Rsqr y. Proof. intros; unfold Rsqr; apply Rmult_le_0_lt_compat; assumption. Qed. Lemma Rsqr_neg_pos_le_0 : forall x y:R, Rsqr x <= Rsqr y -> 0 <= y -> - y <= x. Proof. intros; case (Rcase_abs x); intro. generalize (Ropp_lt_gt_contravar x 0 r); rewrite Ropp_0; intro; generalize (Rlt_le 0 (- x) H1); intro; rewrite (Rsqr_neg x) in H; generalize (Rsqr_incr_0 (- x) y H H2 H0); intro; rewrite <- (Ropp_involutive x); apply Ropp_ge_le_contravar; apply Rle_ge; assumption. apply Rle_trans with 0; [ rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; assumption | apply Rge_le; assumption ]. Qed. Lemma Rsqr_neg_pos_le_1 : forall x y:R, - y <= x -> x <= y -> 0 <= y -> Rsqr x <= Rsqr y. Proof. intros; case (Rcase_abs x); intro. generalize (Ropp_lt_gt_contravar x 0 r); rewrite Ropp_0; intro; generalize (Rlt_le 0 (- x) H2); intro; generalize (Ropp_le_ge_contravar (- y) x H); rewrite Ropp_involutive; intro; generalize (Rge_le y (- x) H4); intro; rewrite (Rsqr_neg x); apply Rsqr_incr_1; assumption. generalize (Rge_le x 0 r); intro; apply Rsqr_incr_1; assumption. Qed. Lemma neg_pos_Rsqr_le : forall x y:R, - y <= x -> x <= y -> Rsqr x <= Rsqr y. Proof. intros; case (Rcase_abs x); intro. generalize (Ropp_lt_gt_contravar x 0 r); rewrite Ropp_0; intro; generalize (Ropp_le_ge_contravar (- y) x H); rewrite Ropp_involutive; intro; generalize (Rge_le y (- x) H2); intro; generalize (Rlt_le 0 (- x) H1); intro; generalize (Rle_trans 0 (- x) y H4 H3); intro; rewrite (Rsqr_neg x); apply Rsqr_incr_1; assumption. generalize (Rge_le x 0 r); intro; generalize (Rle_trans 0 x y H1 H0); intro; apply Rsqr_incr_1; assumption. Qed. Lemma Rsqr_abs : forall x:R, Rsqr x = Rsqr (Rabs x). Proof. intro; unfold Rabs; case (Rcase_abs x); intro; [ apply Rsqr_neg | reflexivity ]. Qed. Lemma Rsqr_le_abs_0 : forall x y:R, Rsqr x <= Rsqr y -> Rabs x <= Rabs y. Proof. intros; apply Rsqr_incr_0; repeat rewrite <- Rsqr_abs; [ assumption | apply Rabs_pos | apply Rabs_pos ]. Qed. Lemma Rsqr_le_abs_1 : forall x y:R, Rabs x <= Rabs y -> Rsqr x <= Rsqr y. Proof. intros; rewrite (Rsqr_abs x); rewrite (Rsqr_abs y); apply (Rsqr_incr_1 (Rabs x) (Rabs y) H (Rabs_pos x) (Rabs_pos y)). Qed. Lemma Rsqr_lt_abs_0 : forall x y:R, Rsqr x < Rsqr y -> Rabs x < Rabs y. Proof. intros; apply Rsqr_incrst_0; repeat rewrite <- Rsqr_abs; [ assumption | apply Rabs_pos | apply Rabs_pos ]. Qed. Lemma Rsqr_lt_abs_1 : forall x y:R, Rabs x < Rabs y -> Rsqr x < Rsqr y. Proof. intros; rewrite (Rsqr_abs x); rewrite (Rsqr_abs y); apply (Rsqr_incrst_1 (Rabs x) (Rabs y) H (Rabs_pos x) (Rabs_pos y)). Qed. Lemma Rsqr_inj : forall x y:R, 0 <= x -> 0 <= y -> Rsqr x = Rsqr y -> x = y. Proof. intros; generalize (Rle_le_eq (Rsqr x) (Rsqr y)); intro; elim H2; intros _ H3; generalize (H3 H1); intro; elim H4; intros; apply Rle_antisym; apply Rsqr_incr_0; assumption. Qed. Lemma Rsqr_eq_abs_0 : forall x y:R, Rsqr x = Rsqr y -> Rabs x = Rabs y. Proof. intros; unfold Rabs; case (Rcase_abs x); case (Rcase_abs y); intros. rewrite (Rsqr_neg x) in H; rewrite (Rsqr_neg y) in H; generalize (Ropp_lt_gt_contravar y 0 r); generalize (Ropp_lt_gt_contravar x 0 r0); rewrite Ropp_0; intros; generalize (Rlt_le 0 (- x) H0); generalize (Rlt_le 0 (- y) H1); intros; apply Rsqr_inj; assumption. rewrite (Rsqr_neg x) in H; generalize (Rge_le y 0 r); intro; generalize (Ropp_lt_gt_contravar x 0 r0); rewrite Ropp_0; intro; generalize (Rlt_le 0 (- x) H1); intro; apply Rsqr_inj; assumption. rewrite (Rsqr_neg y) in H; generalize (Rge_le x 0 r0); intro; generalize (Ropp_lt_gt_contravar y 0 r); rewrite Ropp_0; intro; generalize (Rlt_le 0 (- y) H1); intro; apply Rsqr_inj; assumption. generalize (Rge_le x 0 r0); generalize (Rge_le y 0 r); intros; apply Rsqr_inj; assumption. Qed. Lemma Rsqr_eq_asb_1 : forall x y:R, Rabs x = Rabs y -> Rsqr x = Rsqr y. Proof. intros; cut (Rsqr (Rabs x) = Rsqr (Rabs y)). intro; repeat rewrite <- Rsqr_abs in H0; assumption. rewrite H; reflexivity. Qed. Lemma triangle_rectangle : forall x y z:R, 0 <= z -> Rsqr x + Rsqr y <= Rsqr z -> - z <= x <= z /\ - z <= y <= z. Proof. intros; generalize (plus_le_is_le (Rsqr x) (Rsqr y) (Rsqr z) (Rle_0_sqr y) H0); rewrite Rplus_comm in H0; generalize (plus_le_is_le (Rsqr y) (Rsqr x) (Rsqr z) (Rle_0_sqr x) H0); intros; split; [ split; [ apply Rsqr_neg_pos_le_0; assumption | apply Rsqr_incr_0_var; assumption ] | split; [ apply Rsqr_neg_pos_le_0; assumption | apply Rsqr_incr_0_var; assumption ] ]. Qed. Lemma triangle_rectangle_lt : forall x y z:R, Rsqr x + Rsqr y < Rsqr z -> Rabs x < Rabs z /\ Rabs y < Rabs z. Proof. intros; split; [ generalize (plus_lt_is_lt (Rsqr x) (Rsqr y) (Rsqr z) (Rle_0_sqr y) H); intro; apply Rsqr_lt_abs_0; assumption | rewrite Rplus_comm in H; generalize (plus_lt_is_lt (Rsqr y) (Rsqr x) (Rsqr z) (Rle_0_sqr x) H); intro; apply Rsqr_lt_abs_0; assumption ]. Qed. Lemma triangle_rectangle_le : forall x y z:R, Rsqr x + Rsqr y <= Rsqr z -> Rabs x <= Rabs z /\ Rabs y <= Rabs z. Proof. intros; split; [ generalize (plus_le_is_le (Rsqr x) (Rsqr y) (Rsqr z) (Rle_0_sqr y) H); intro; apply Rsqr_le_abs_0; assumption | rewrite Rplus_comm in H; generalize (plus_le_is_le (Rsqr y) (Rsqr x) (Rsqr z) (Rle_0_sqr x) H); intro; apply Rsqr_le_abs_0; assumption ]. Qed. Lemma Rsqr_inv : forall x:R, x <> 0 -> Rsqr (/ x) = / Rsqr x. Proof. intros; unfold Rsqr. rewrite Rinv_mult_distr; try reflexivity || assumption. Qed. Lemma canonical_Rsqr : forall (a:nonzeroreal) (b c x:R), a * Rsqr x + b * x + c = a * Rsqr (x + b / (2 * a)) + (4 * a * c - Rsqr b) / (4 * a). Proof. intros. rewrite Rsqr_plus. repeat rewrite Rmult_plus_distr_l. repeat rewrite Rplus_assoc. apply Rplus_eq_compat_l. unfold Rdiv, Rminus. replace (2 * 1 + 2 * 1) with 4; [ idtac | ring ]. rewrite (Rmult_plus_distr_r (4 * a * c) (- Rsqr b) (/ (4 * a))). rewrite Rsqr_mult. repeat rewrite Rinv_mult_distr. repeat rewrite (Rmult_comm a). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm (/ 2)). rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm a). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r. repeat rewrite Rplus_assoc. rewrite (Rplus_comm (Rsqr b * (Rsqr (/ a * / 2) * a))). repeat rewrite Rplus_assoc. rewrite (Rmult_comm x). apply Rplus_eq_compat_l. rewrite (Rmult_comm (/ a)). unfold Rsqr; repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. ring. apply (cond_nonzero a). discrR. apply (cond_nonzero a). discrR. discrR. apply (cond_nonzero a). discrR. discrR. discrR. apply (cond_nonzero a). discrR. apply (cond_nonzero a). Qed. Lemma Rsqr_eq : forall x y:R, Rsqr x = Rsqr y -> x = y \/ x = - y. Proof. intros; unfold Rsqr in H; generalize (Rplus_eq_compat_l (- (y * y)) (x * x) (y * y) H); rewrite Rplus_opp_l; replace (- (y * y) + x * x) with ((x - y) * (x + y)). intro; generalize (Rmult_integral (x - y) (x + y) H0); intro; elim H1; intros. left; apply Rminus_diag_uniq; assumption. right; apply Rminus_diag_uniq; unfold Rminus; rewrite Ropp_involutive; assumption. ring. Qed. coq-8.4pl2/theories/Reals/Rfunctions.v0000640000175000001440000005703112010532755017046 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0. Proof. intro; red; intro; apply (not_O_INR (fact n) (fact_neq_0 n)); assumption. Qed. (*********) Lemma fact_simpl : forall n:nat, fact (S n) = (S n * fact n)%nat. Proof. intro; reflexivity. Qed. (*********) Lemma simpl_fact : forall n:nat, / INR (fact (S n)) * / / INR (fact n) = / INR (S n). Proof. intro; rewrite (Rinv_involutive (INR (fact n)) (INR_fact_neq_0 n)); unfold fact at 1; cbv beta iota; fold fact; rewrite (mult_INR (S n) (fact n)); rewrite (Rinv_mult_distr (INR (S n)) (INR (fact n))). rewrite (Rmult_assoc (/ INR (S n)) (/ INR (fact n)) (INR (fact n))); rewrite (Rinv_l (INR (fact n)) (INR_fact_neq_0 n)); apply (let (H1, H2) := Rmult_ne (/ INR (S n)) in H1). apply not_O_INR; auto. apply INR_fact_neq_0. Qed. (*******************************) (** * Power *) (*******************************) (*********) Infix "^" := pow : R_scope. Lemma pow_O : forall x:R, x ^ 0 = 1. Proof. reflexivity. Qed. Lemma pow_1 : forall x:R, x ^ 1 = x. Proof. simpl; auto with real. Qed. Lemma pow_add : forall (x:R) (n m:nat), x ^ (n + m) = x ^ n * x ^ m. Proof. intros x n; elim n; simpl; auto with real. intros n0 H' m; rewrite H'; auto with real. Qed. Lemma Rpow_mult_distr : forall (x y:R) (n:nat), (x * y) ^ n = x^n * y^n. Proof. intros x y n ; induction n. field. simpl. repeat (rewrite Rmult_assoc) ; apply Rmult_eq_compat_l. rewrite IHn ; field. Qed. Lemma pow_nonzero : forall (x:R) (n:nat), x <> 0 -> x ^ n <> 0. Proof. intro; simple induction n; simpl. intro; red; intro; apply R1_neq_R0; assumption. intros; red; intro; elim (Rmult_integral x (x ^ n0) H1). intro; auto. apply H; assumption. Qed. Hint Resolve pow_O pow_1 pow_add pow_nonzero: real. Lemma pow_RN_plus : forall (x:R) (n m:nat), x <> 0 -> x ^ n = x ^ (n + m) * / x ^ m. Proof. intros x n; elim n; simpl; auto with real. intros n0 H' m H'0. rewrite Rmult_assoc; rewrite <- H'; auto. Qed. Lemma pow_lt : forall (x:R) (n:nat), 0 < x -> 0 < x ^ n. Proof. intros x n; elim n; simpl; auto with real. intros n0 H' H'0; replace 0 with (x * 0); auto with real. Qed. Hint Resolve pow_lt: real. Lemma Rlt_pow_R1 : forall (x:R) (n:nat), 1 < x -> (0 < n)%nat -> 1 < x ^ n. Proof. intros x n; elim n; simpl; auto with real. intros H' H'0; exfalso; omega. intros n0; case n0. simpl; rewrite Rmult_1_r; auto. intros n1 H' H'0 H'1. replace 1 with (1 * 1); auto with real. apply Rlt_trans with (r2 := x * 1); auto with real. apply Rmult_lt_compat_l; auto with real. apply Rlt_trans with (r2 := 1); auto with real. apply H'; auto with arith. Qed. Hint Resolve Rlt_pow_R1: real. Lemma Rlt_pow : forall (x:R) (n m:nat), 1 < x -> (n < m)%nat -> x ^ n < x ^ m. Proof. intros x n m H' H'0; replace m with (m - n + n)%nat. rewrite pow_add. pattern (x ^ n) at 1; replace (x ^ n) with (1 * x ^ n); auto with real. apply Rminus_lt. repeat rewrite (fun y:R => Rmult_comm y (x ^ n)); rewrite <- Rmult_minus_distr_l. replace 0 with (x ^ n * 0); auto with real. apply Rmult_lt_compat_l; auto with real. apply pow_lt; auto with real. apply Rlt_trans with (r2 := 1); auto with real. apply Rlt_minus; auto with real. apply Rlt_pow_R1; auto with arith. apply plus_lt_reg_l with (p := n); auto with arith. rewrite le_plus_minus_r; auto with arith; rewrite <- plus_n_O; auto. rewrite plus_comm; auto with arith. Qed. Hint Resolve Rlt_pow: real. (*********) Lemma tech_pow_Rmult : forall (x:R) (n:nat), x * x ^ n = x ^ S n. Proof. simple induction n; simpl; trivial. Qed. (*********) Lemma tech_pow_Rplus : forall (x:R) (a n:nat), x ^ a + INR n * x ^ a = INR (S n) * x ^ a. Proof. intros; pattern (x ^ a) at 1; rewrite <- (let (H1, H2) := Rmult_ne (x ^ a) in H1); rewrite (Rmult_comm (INR n) (x ^ a)); rewrite <- (Rmult_plus_distr_l (x ^ a) 1 (INR n)); rewrite (Rplus_comm 1 (INR n)); rewrite <- (S_INR n); apply Rmult_comm. Qed. Lemma poly : forall (n:nat) (x:R), 0 < x -> 1 + INR n * x <= (1 + x) ^ n. Proof. intros; elim n. simpl; cut (1 + 0 * x = 1). intro; rewrite H0; unfold Rle; right; reflexivity. ring. intros; unfold pow; fold pow; apply (Rle_trans (1 + INR (S n0) * x) ((1 + x) * (1 + INR n0 * x)) ((1 + x) * (1 + x) ^ n0)). cut ((1 + x) * (1 + INR n0 * x) = 1 + INR (S n0) * x + INR n0 * (x * x)). intro; rewrite H1; pattern (1 + INR (S n0) * x) at 1; rewrite <- (let (H1, H2) := Rplus_ne (1 + INR (S n0) * x) in H1); apply Rplus_le_compat_l; elim n0; intros. simpl; rewrite Rmult_0_l; unfold Rle; right; auto. unfold Rle; left; generalize Rmult_gt_0_compat; unfold Rgt; intro; fold (Rsqr x); apply (H3 (INR (S n1)) (Rsqr x) (lt_INR_0 (S n1) (lt_O_Sn n1))); fold (x > 0) in H; apply (Rlt_0_sqr x (Rlt_dichotomy_converse x 0 (or_intror (x < 0) H))). rewrite (S_INR n0); ring. unfold Rle in H0; elim H0; intro. unfold Rle; left; apply Rmult_lt_compat_l. rewrite Rplus_comm; apply (Rle_lt_0_plus_1 x (Rlt_le 0 x H)). assumption. rewrite H1; unfold Rle; right; trivial. Qed. Lemma Power_monotonic : forall (x:R) (m n:nat), Rabs x > 1 -> (m <= n)%nat -> Rabs (x ^ m) <= Rabs (x ^ n). Proof. intros x m n H; induction n as [| n Hrecn]; intros; inversion H0. unfold Rle; right; reflexivity. unfold Rle; right; reflexivity. apply (Rle_trans (Rabs (x ^ m)) (Rabs (x ^ n)) (Rabs (x ^ S n))). apply Hrecn; assumption. simpl; rewrite Rabs_mult. pattern (Rabs (x ^ n)) at 1. rewrite <- Rmult_1_r. rewrite (Rmult_comm (Rabs x) (Rabs (x ^ n))). apply Rmult_le_compat_l. apply Rabs_pos. unfold Rgt in H. apply Rlt_le; assumption. Qed. Lemma RPow_abs : forall (x:R) (n:nat), Rabs x ^ n = Rabs (x ^ n). Proof. intro; simple induction n; simpl. symmetry; apply Rabs_pos_eq; apply Rlt_le; apply Rlt_0_1. intros; rewrite H; symmetry; apply Rabs_mult. Qed. Lemma Pow_x_infinity : forall x:R, Rabs x > 1 -> forall b:R, exists N : nat, (forall n:nat, (n >= N)%nat -> Rabs (x ^ n) >= b). Proof. intros; elim (archimed (b * / (Rabs x - 1))); intros; clear H1; cut (exists N : nat, INR N >= b * / (Rabs x - 1)). intro; elim H1; clear H1; intros; exists x0; intros; apply (Rge_trans (Rabs (x ^ n)) (Rabs (x ^ x0)) b). apply Rle_ge; apply Power_monotonic; assumption. rewrite <- RPow_abs; cut (Rabs x = 1 + (Rabs x - 1)). intro; rewrite H3; apply (Rge_trans ((1 + (Rabs x - 1)) ^ x0) (1 + INR x0 * (Rabs x - 1)) b). apply Rle_ge; apply poly; fold (Rabs x - 1 > 0); apply Rgt_minus; assumption. apply (Rge_trans (1 + INR x0 * (Rabs x - 1)) (INR x0 * (Rabs x - 1)) b). apply Rle_ge; apply Rlt_le; rewrite (Rplus_comm 1 (INR x0 * (Rabs x - 1))); pattern (INR x0 * (Rabs x - 1)) at 1; rewrite <- (let (H1, H2) := Rplus_ne (INR x0 * (Rabs x - 1)) in H1); apply Rplus_lt_compat_l; apply Rlt_0_1. cut (b = b * / (Rabs x - 1) * (Rabs x - 1)). intros; rewrite H4; apply Rmult_ge_compat_r. apply Rge_minus; unfold Rge; left; assumption. assumption. rewrite Rmult_assoc; rewrite Rinv_l. ring. apply Rlt_dichotomy_converse; right; apply Rgt_minus; assumption. ring. cut ((0 <= up (b * / (Rabs x - 1)))%Z \/ (up (b * / (Rabs x - 1)) <= 0)%Z). intros; elim H1; intro. elim (IZN (up (b * / (Rabs x - 1))) H2); intros; exists x0; apply (Rge_trans (INR x0) (IZR (up (b * / (Rabs x - 1)))) (b * / (Rabs x - 1))). rewrite INR_IZR_INZ; apply IZR_ge; omega. unfold Rge; left; assumption. exists 0%nat; apply (Rge_trans (INR 0) (IZR (up (b * / (Rabs x - 1)))) (b * / (Rabs x - 1))). rewrite INR_IZR_INZ; apply IZR_ge; simpl; omega. unfold Rge; left; assumption. omega. Qed. Lemma pow_ne_zero : forall n:nat, n <> 0%nat -> 0 ^ n = 0. Proof. simple induction n. simpl; auto. intros; elim H; reflexivity. intros; simpl; apply Rmult_0_l. Qed. Lemma Rinv_pow : forall (x:R) (n:nat), x <> 0 -> / x ^ n = (/ x) ^ n. Proof. intros; elim n; simpl. apply Rinv_1. intro m; intro; rewrite Rinv_mult_distr. rewrite H0; reflexivity; assumption. assumption. apply pow_nonzero; assumption. Qed. Lemma pow_lt_1_zero : forall x:R, Rabs x < 1 -> forall y:R, 0 < y -> exists N : nat, (forall n:nat, (n >= N)%nat -> Rabs (x ^ n) < y). Proof. intros; elim (Req_dec x 0); intro. exists 1%nat; rewrite H1; intros n GE; rewrite pow_ne_zero. rewrite Rabs_R0; assumption. inversion GE; auto. cut (Rabs (/ x) > 1). intros; elim (Pow_x_infinity (/ x) H2 (/ y + 1)); intros N. exists N; intros; rewrite <- (Rinv_involutive y). rewrite <- (Rinv_involutive (Rabs (x ^ n))). apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply Rinv_0_lt_compat. assumption. apply Rinv_0_lt_compat. apply Rabs_pos_lt. apply pow_nonzero. assumption. rewrite <- Rabs_Rinv. rewrite Rinv_pow. apply (Rlt_le_trans (/ y) (/ y + 1) (Rabs ((/ x) ^ n))). pattern (/ y) at 1. rewrite <- (let (H1, H2) := Rplus_ne (/ y) in H1). apply Rplus_lt_compat_l. apply Rlt_0_1. apply Rge_le. apply H3. assumption. assumption. apply pow_nonzero. assumption. apply Rabs_no_R0. apply pow_nonzero. assumption. apply Rlt_dichotomy_converse. right; unfold Rgt; assumption. rewrite <- (Rinv_involutive 1). rewrite Rabs_Rinv. unfold Rgt; apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply Rabs_pos_lt. assumption. rewrite Rinv_1; apply Rlt_0_1. rewrite Rinv_1; assumption. assumption. red; intro; apply R1_neq_R0; assumption. Qed. Lemma pow_R1 : forall (r:R) (n:nat), r ^ n = 1 -> Rabs r = 1 \/ n = 0%nat. Proof. intros r n H'. case (Req_dec (Rabs r) 1); auto; intros H'1. case (Rdichotomy _ _ H'1); intros H'2. generalize H'; case n; auto. intros n0 H'0. cut (r <> 0); [ intros Eq1 | idtac ]. cut (Rabs r <> 0); [ intros Eq2 | apply Rabs_no_R0 ]; auto. absurd (Rabs (/ r) ^ 0 < Rabs (/ r) ^ S n0); auto. replace (Rabs (/ r) ^ S n0) with 1. simpl; apply Rlt_irrefl; auto. rewrite Rabs_Rinv; auto. rewrite <- Rinv_pow; auto. rewrite RPow_abs; auto. rewrite H'0; rewrite Rabs_right; auto with real rorders. apply Rlt_pow; auto with arith. rewrite Rabs_Rinv; auto. apply Rmult_lt_reg_l with (r := Rabs r). case (Rabs_pos r); auto. intros H'3; case Eq2; auto. rewrite Rmult_1_r; rewrite Rinv_r; auto with real. red; intro; absurd (r ^ S n0 = 1); auto. simpl; rewrite H; rewrite Rmult_0_l; auto with real. generalize H'; case n; auto. intros n0 H'0. cut (r <> 0); [ intros Eq1 | auto with real ]. cut (Rabs r <> 0); [ intros Eq2 | apply Rabs_no_R0 ]; auto. absurd (Rabs r ^ 0 < Rabs r ^ S n0); auto with real arith. repeat rewrite RPow_abs; rewrite H'0; simpl; auto with real. red; intro; absurd (r ^ S n0 = 1); auto. simpl; rewrite H; rewrite Rmult_0_l; auto with real. Qed. Lemma pow_Rsqr : forall (x:R) (n:nat), x ^ (2 * n) = Rsqr x ^ n. Proof. intros; induction n as [| n Hrecn]. reflexivity. replace (2 * S n)%nat with (S (S (2 * n))). replace (x ^ S (S (2 * n))) with (x * x * x ^ (2 * n)). rewrite Hrecn; reflexivity. simpl; ring. ring. Qed. Lemma pow_le : forall (a:R) (n:nat), 0 <= a -> 0 <= a ^ n. Proof. intros; induction n as [| n Hrecn]. simpl; left; apply Rlt_0_1. simpl; apply Rmult_le_pos; assumption. Qed. (**********) Lemma pow_1_even : forall n:nat, (-1) ^ (2 * n) = 1. Proof. intro; induction n as [| n Hrecn]. reflexivity. replace (2 * S n)%nat with (2 + 2 * n)%nat by ring. rewrite pow_add; rewrite Hrecn; simpl; ring. Qed. (**********) Lemma pow_1_odd : forall n:nat, (-1) ^ S (2 * n) = -1. Proof. intro; replace (S (2 * n)) with (2 * n + 1)%nat by ring. rewrite pow_add; rewrite pow_1_even; simpl; ring. Qed. (**********) Lemma pow_1_abs : forall n:nat, Rabs ((-1) ^ n) = 1. Proof. intro; induction n as [| n Hrecn]. simpl; apply Rabs_R1. replace (S n) with (n + 1)%nat; [ rewrite pow_add | ring ]. rewrite Rabs_mult. rewrite Hrecn; rewrite Rmult_1_l; simpl; rewrite Rmult_1_r; rewrite Rabs_Ropp; apply Rabs_R1. Qed. Lemma pow_mult : forall (x:R) (n1 n2:nat), x ^ (n1 * n2) = (x ^ n1) ^ n2. Proof. intros; induction n2 as [| n2 Hrecn2]. simpl; replace (n1 * 0)%nat with 0%nat; [ reflexivity | ring ]. replace (n1 * S n2)%nat with (n1 * n2 + n1)%nat. replace (S n2) with (n2 + 1)%nat by ring. do 2 rewrite pow_add. rewrite Hrecn2. simpl. ring. ring. Qed. Lemma pow_incr : forall (x y:R) (n:nat), 0 <= x <= y -> x ^ n <= y ^ n. Proof. intros. induction n as [| n Hrecn]. right; reflexivity. simpl. elim H; intros. apply Rle_trans with (y * x ^ n). do 2 rewrite <- (Rmult_comm (x ^ n)). apply Rmult_le_compat_l. apply pow_le; assumption. assumption. apply Rmult_le_compat_l. apply Rle_trans with x; assumption. apply Hrecn. Qed. Lemma pow_R1_Rle : forall (x:R) (k:nat), 1 <= x -> 1 <= x ^ k. Proof. intros. induction k as [| k Hreck]. right; reflexivity. simpl. apply Rle_trans with (x * 1). rewrite Rmult_1_r; assumption. apply Rmult_le_compat_l. left; apply Rlt_le_trans with 1; [ apply Rlt_0_1 | assumption ]. exact Hreck. Qed. Lemma Rle_pow : forall (x:R) (m n:nat), 1 <= x -> (m <= n)%nat -> x ^ m <= x ^ n. Proof. intros. replace n with (n - m + m)%nat. rewrite pow_add. rewrite Rmult_comm. pattern (x ^ m) at 1; rewrite <- Rmult_1_r. apply Rmult_le_compat_l. apply pow_le; left; apply Rlt_le_trans with 1; [ apply Rlt_0_1 | assumption ]. apply pow_R1_Rle; assumption. rewrite plus_comm. symmetry ; apply le_plus_minus; assumption. Qed. Lemma pow1 : forall n:nat, 1 ^ n = 1. Proof. intro; induction n as [| n Hrecn]. reflexivity. simpl; rewrite Hrecn; rewrite Rmult_1_r; reflexivity. Qed. Lemma pow_Rabs : forall (x:R) (n:nat), x ^ n <= Rabs x ^ n. Proof. intros; induction n as [| n Hrecn]. right; reflexivity. simpl; case (Rcase_abs x); intro. apply Rle_trans with (Rabs (x * x ^ n)). apply RRle_abs. rewrite Rabs_mult. apply Rmult_le_compat_l. apply Rabs_pos. right; symmetry ; apply RPow_abs. pattern (Rabs x) at 1; rewrite (Rabs_right x r); apply Rmult_le_compat_l. apply Rge_le; exact r. apply Hrecn. Qed. Lemma pow_maj_Rabs : forall (x y:R) (n:nat), Rabs y <= x -> y ^ n <= x ^ n. Proof. intros; cut (0 <= x). intro; apply Rle_trans with (Rabs y ^ n). apply pow_Rabs. induction n as [| n Hrecn]. right; reflexivity. simpl; apply Rle_trans with (x * Rabs y ^ n). do 2 rewrite <- (Rmult_comm (Rabs y ^ n)). apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. assumption. apply Rmult_le_compat_l. apply H0. apply Hrecn. apply Rle_trans with (Rabs y); [ apply Rabs_pos | exact H ]. Qed. (*******************************) (** * PowerRZ *) (*******************************) (*i Due to L.Thery i*) Ltac case_eq name := generalize (eq_refl name); pattern name at -1; case name. Definition powerRZ (x:R) (n:Z) := match n with | Z0 => 1 | Zpos p => x ^ Pos.to_nat p | Zneg p => / x ^ Pos.to_nat p end. Local Infix "^Z" := powerRZ (at level 30, right associativity) : R_scope. Lemma Zpower_NR0 : forall (x:Z) (n:nat), (0 <= x)%Z -> (0 <= Zpower_nat x n)%Z. Proof. induction n; unfold Zpower_nat; simpl; auto with zarith. Qed. Lemma powerRZ_O : forall x:R, x ^Z 0 = 1. Proof. reflexivity. Qed. Lemma powerRZ_1 : forall x:R, x ^Z Z.succ 0 = x. Proof. simpl; auto with real. Qed. Lemma powerRZ_NOR : forall (x:R) (z:Z), x <> 0 -> x ^Z z <> 0. Proof. destruct z; simpl; auto with real. Qed. Lemma powerRZ_pos_sub (x:R) (n m:positive) : x <> 0 -> x ^Z (Z.pos_sub n m) = x ^ Pos.to_nat n * / x ^ Pos.to_nat m. Proof. intro Hx. rewrite Z.pos_sub_spec. case Pos.compare_spec; intro H; simpl. - subst; auto with real. - rewrite Pos2Nat.inj_sub by trivial. rewrite Pos2Nat.inj_lt in H. rewrite (pow_RN_plus x _ (Pos.to_nat n)) by auto with real. rewrite plus_comm, le_plus_minus_r by auto with real. rewrite Rinv_mult_distr, Rinv_involutive; auto with real. - rewrite Pos2Nat.inj_sub by trivial. rewrite Pos2Nat.inj_lt in H. rewrite (pow_RN_plus x _ (Pos.to_nat m)) by auto with real. rewrite plus_comm, le_plus_minus_r by auto with real. reflexivity. Qed. Lemma powerRZ_add : forall (x:R) (n m:Z), x <> 0 -> x ^Z (n + m) = x ^Z n * x ^Z m. Proof. intros x [|n|n] [|m|m]; simpl; intros; auto with real. - (* + + *) rewrite Pos2Nat.inj_add; auto with real. - (* + - *) now apply powerRZ_pos_sub. - (* - + *) rewrite Rmult_comm. now apply powerRZ_pos_sub. - (* - - *) rewrite Pos2Nat.inj_add; auto with real. rewrite pow_add; auto with real. apply Rinv_mult_distr; apply pow_nonzero; auto. Qed. Hint Resolve powerRZ_O powerRZ_1 powerRZ_NOR powerRZ_add: real. Lemma Zpower_nat_powerRZ : forall n m:nat, IZR (Zpower_nat (Z.of_nat n) m) = INR n ^Z Z.of_nat m. Proof. intros n m; elim m; simpl; auto with real. intros m1 H'; rewrite SuccNat2Pos.id_succ; simpl. replace (Zpower_nat (Z.of_nat n) (S m1)) with (Z.of_nat n * Zpower_nat (Z.of_nat n) m1)%Z. rewrite mult_IZR; auto with real. repeat rewrite <- INR_IZR_INZ; simpl. rewrite H'; simpl. case m1; simpl; auto with real. intros m2; rewrite SuccNat2Pos.id_succ; auto. unfold Zpower_nat; auto. Qed. Lemma Zpower_pos_powerRZ : forall n m, IZR (Z.pow_pos n m) = IZR n ^Z Zpos m. Proof. intros. rewrite Zpower_pos_nat; simpl. induction (Pos.to_nat m). easy. unfold Zpower_nat; simpl. rewrite mult_IZR. now rewrite <- IHn0. Qed. Lemma powerRZ_lt : forall (x:R) (z:Z), 0 < x -> 0 < x ^Z z. Proof. intros x z; case z; simpl; auto with real. Qed. Hint Resolve powerRZ_lt: real. Lemma powerRZ_le : forall (x:R) (z:Z), 0 < x -> 0 <= x ^Z z. Proof. intros x z H'; apply Rlt_le; auto with real. Qed. Hint Resolve powerRZ_le: real. Lemma Zpower_nat_powerRZ_absolu : forall n m:Z, (0 <= m)%Z -> IZR (Zpower_nat n (Z.abs_nat m)) = IZR n ^Z m. Proof. intros n m; case m; simpl; auto with zarith. intros p H'; elim (Pos.to_nat p); simpl; auto with zarith. intros n0 H'0; rewrite <- H'0; simpl; auto with zarith. rewrite <- mult_IZR; auto. intros p H'; absurd (0 <= Zneg p)%Z; auto with zarith. Qed. Lemma powerRZ_R1 : forall n:Z, 1 ^Z n = 1. Proof. intros n; case n; simpl; auto. intros p; elim (Pos.to_nat p); simpl; auto; intros n0 H'; rewrite H'; ring. intros p; elim (Pos.to_nat p); simpl. exact Rinv_1. intros n1 H'; rewrite Rinv_mult_distr; try rewrite Rinv_1; try rewrite H'; auto with real. Qed. (*******************************) (* For easy interface *) (*******************************) (* decimal_exp r z is defined as r 10^z *) Definition decimal_exp (r:R) (z:Z) : R := (r * 10 ^Z z). (*******************************) (** * Sum of n first naturals *) (*******************************) (*********) Fixpoint sum_nat_f_O (f:nat -> nat) (n:nat) : nat := match n with | O => f 0%nat | S n' => (sum_nat_f_O f n' + f (S n'))%nat end. (*********) Definition sum_nat_f (s n:nat) (f:nat -> nat) : nat := sum_nat_f_O (fun x:nat => f (x + s)%nat) (n - s). (*********) Definition sum_nat_O (n:nat) : nat := sum_nat_f_O (fun x:nat => x) n. (*********) Definition sum_nat (s n:nat) : nat := sum_nat_f s n (fun x:nat => x). (*******************************) (** * Sum *) (*******************************) (*********) Fixpoint sum_f_R0 (f:nat -> R) (N:nat) : R := match N with | O => f 0%nat | S i => sum_f_R0 f i + f (S i) end. (*********) Definition sum_f (s n:nat) (f:nat -> R) : R := sum_f_R0 (fun x:nat => f (x + s)%nat) (n - s). Lemma GP_finite : forall (x:R) (n:nat), sum_f_R0 (fun n:nat => x ^ n) n * (x - 1) = x ^ (n + 1) - 1. Proof. intros; induction n as [| n Hrecn]; simpl. ring. rewrite Rmult_plus_distr_r; rewrite Hrecn; cut ((n + 1)%nat = S n). intro H; rewrite H; simpl; ring. omega. Qed. Lemma sum_f_R0_triangle : forall (x:nat -> R) (n:nat), Rabs (sum_f_R0 x n) <= sum_f_R0 (fun i:nat => Rabs (x i)) n. Proof. intro; simple induction n; simpl. unfold Rle; right; reflexivity. intro m; intro; apply (Rle_trans (Rabs (sum_f_R0 x m + x (S m))) (Rabs (sum_f_R0 x m) + Rabs (x (S m))) (sum_f_R0 (fun i:nat => Rabs (x i)) m + Rabs (x (S m)))). apply Rabs_triang. rewrite Rplus_comm; rewrite (Rplus_comm (sum_f_R0 (fun i:nat => Rabs (x i)) m) (Rabs (x (S m)))); apply Rplus_le_compat_l; assumption. Qed. (*******************************) (** * Distance in R *) (*******************************) (*********) Definition R_dist (x y:R) : R := Rabs (x - y). (*********) Lemma R_dist_pos : forall x y:R, R_dist x y >= 0. Proof. intros; unfold R_dist; unfold Rabs; case (Rcase_abs (x - y)); intro l. unfold Rge; left; apply (Ropp_gt_lt_0_contravar (x - y) l). trivial. Qed. (*********) Lemma R_dist_sym : forall x y:R, R_dist x y = R_dist y x. Proof. unfold R_dist; intros; split_Rabs; try ring. generalize (Ropp_gt_lt_0_contravar (y - x) r); intro; rewrite (Ropp_minus_distr y x) in H; generalize (Rlt_asym (x - y) 0 r0); intro; unfold Rgt in H; exfalso; auto. generalize (minus_Rge y x r); intro; generalize (minus_Rge x y r0); intro; generalize (Rge_antisym x y H0 H); intro; rewrite H1; ring. Qed. (*********) Lemma R_dist_refl : forall x y:R, R_dist x y = 0 <-> x = y. Proof. unfold R_dist; intros; split_Rabs; split; intros. rewrite (Ropp_minus_distr x y) in H; symmetry; apply (Rminus_diag_uniq y x H). rewrite (Ropp_minus_distr x y); generalize (eq_sym H); intro; apply (Rminus_diag_eq y x H0). apply (Rminus_diag_uniq x y H). apply (Rminus_diag_eq x y H). Qed. Lemma R_dist_eq : forall x:R, R_dist x x = 0. Proof. unfold R_dist; intros; split_Rabs; intros; ring. Qed. (***********) Lemma R_dist_tri : forall x y z:R, R_dist x y <= R_dist x z + R_dist z y. Proof. intros; unfold R_dist; replace (x - y) with (x - z + (z - y)); [ apply (Rabs_triang (x - z) (z - y)) | ring ]. Qed. (*********) Lemma R_dist_plus : forall a b c d:R, R_dist (a + c) (b + d) <= R_dist a b + R_dist c d. Proof. intros; unfold R_dist; replace (a + c - (b + d)) with (a - b + (c - d)). exact (Rabs_triang (a - b) (c - d)). ring. Qed. (*******************************) (** * Infinite Sum *) (*******************************) (*********) Definition infinite_sum (s:nat -> R) (l:R) : Prop := forall eps:R, eps > 0 -> exists N : nat, (forall n:nat, (n >= N)%nat -> R_dist (sum_f_R0 s n) l < eps). (** Compatibility with previous versions *) Notation infinit_sum := infinite_sum (only parsing). coq-8.4pl2/theories/Reals/LegacyRfield.v0000640000175000001440000000227112010532755017242 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false). split. exact Rplus_comm. symmetry ; apply Rplus_assoc. exact Rmult_comm. symmetry ; apply Rmult_assoc. intro; apply Rplus_0_l. intro; apply Rmult_1_l. exact Rplus_opp_r. intros. rewrite Rmult_comm. rewrite (Rmult_comm n p). rewrite (Rmult_comm m p). apply Rmult_plus_distr_l. intros; contradiction. Defined. End LegacyRfield. Add Legacy Field R Rplus Rmult 1%R 0%R Ropp (fun x y:R => false) Rinv RLegacyTheory Rinv_l with minus := Rminus div := Rdiv. coq-8.4pl2/theories/Reals/PartSum.v0000640000175000001440000004713012010532755016306 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (N:nat), (forall n:nat, (n <= N)%nat -> 0 < An n) -> 0 < sum_f_R0 An N. Proof. intros; induction N as [| N HrecN]. simpl; apply H; apply le_n. simpl; apply Rplus_lt_0_compat. apply HrecN; intros; apply H; apply le_S; assumption. apply H; apply le_n. Qed. (* Chasles' relation *) Lemma tech2 : forall (An:nat -> R) (m n:nat), (m < n)%nat -> sum_f_R0 An n = sum_f_R0 An m + sum_f_R0 (fun i:nat => An (S m + i)%nat) (n - S m). Proof. intros; induction n as [| n Hrecn]. elim (lt_n_O _ H). cut ((m < n)%nat \/ m = n). intro; elim H0; intro. replace (sum_f_R0 An (S n)) with (sum_f_R0 An n + An (S n)); [ idtac | reflexivity ]. replace (S n - S m)%nat with (S (n - S m)). replace (sum_f_R0 (fun i:nat => An (S m + i)%nat) (S (n - S m))) with (sum_f_R0 (fun i:nat => An (S m + i)%nat) (n - S m) + An (S m + S (n - S m))%nat); [ idtac | reflexivity ]. replace (S m + S (n - S m))%nat with (S n). rewrite (Hrecn H1). ring. apply INR_eq; rewrite S_INR; rewrite plus_INR; do 2 rewrite S_INR; rewrite minus_INR. rewrite S_INR; ring. apply lt_le_S; assumption. apply INR_eq; rewrite S_INR; repeat rewrite minus_INR. repeat rewrite S_INR; ring. apply le_n_S; apply lt_le_weak; assumption. apply lt_le_S; assumption. rewrite H1; rewrite <- minus_n_n; simpl. replace (n + 0)%nat with n; [ reflexivity | ring ]. inversion H. right; reflexivity. left; apply lt_le_trans with (S m); [ apply lt_n_Sn | assumption ]. Qed. (* Sum of geometric sequences *) Lemma tech3 : forall (k:R) (N:nat), k <> 1 -> sum_f_R0 (fun i:nat => k ^ i) N = (1 - k ^ S N) / (1 - k). Proof. intros; cut (1 - k <> 0). intro; induction N as [| N HrecN]. simpl; rewrite Rmult_1_r; unfold Rdiv; rewrite <- Rinv_r_sym. reflexivity. apply H0. replace (sum_f_R0 (fun i:nat => k ^ i) (S N)) with (sum_f_R0 (fun i:nat => k ^ i) N + k ^ S N); [ idtac | reflexivity ]; rewrite HrecN; replace ((1 - k ^ S N) / (1 - k) + k ^ S N) with ((1 - k ^ S N + (1 - k) * k ^ S N) / (1 - k)). apply Rmult_eq_reg_l with (1 - k). unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ (1 - k))); repeat rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ do 2 rewrite Rmult_1_l; simpl; ring | apply H0 ]. apply H0. unfold Rdiv; rewrite Rmult_plus_distr_r; rewrite (Rmult_comm (1 - k)); repeat rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; reflexivity. apply H0. apply Rminus_eq_contra; red; intro; elim H; symmetry ; assumption. Qed. Lemma tech4 : forall (An:nat -> R) (k:R) (N:nat), 0 <= k -> (forall i:nat, An (S i) < k * An i) -> An N <= An 0%nat * k ^ N. Proof. intros; induction N as [| N HrecN]. simpl; right; ring. apply Rle_trans with (k * An N). left; apply (H0 N). replace (S N) with (N + 1)%nat; [ idtac | ring ]. rewrite pow_add; simpl; rewrite Rmult_1_r; replace (An 0%nat * (k ^ N * k)) with (k * (An 0%nat * k ^ N)); [ idtac | ring ]; apply Rmult_le_compat_l. assumption. apply HrecN. Qed. Lemma tech5 : forall (An:nat -> R) (N:nat), sum_f_R0 An (S N) = sum_f_R0 An N + An (S N). Proof. intros; reflexivity. Qed. Lemma tech6 : forall (An:nat -> R) (k:R) (N:nat), 0 <= k -> (forall i:nat, An (S i) < k * An i) -> sum_f_R0 An N <= An 0%nat * sum_f_R0 (fun i:nat => k ^ i) N. Proof. intros; induction N as [| N HrecN]. simpl; right; ring. apply Rle_trans with (An 0%nat * sum_f_R0 (fun i:nat => k ^ i) N + An (S N)). rewrite tech5; do 2 rewrite <- (Rplus_comm (An (S N))); apply Rplus_le_compat_l. apply HrecN. rewrite tech5; rewrite Rmult_plus_distr_l; apply Rplus_le_compat_l. apply tech4; assumption. Qed. Lemma tech7 : forall r1 r2:R, r1 <> 0 -> r2 <> 0 -> r1 <> r2 -> / r1 <> / r2. Proof. intros; red; intro. assert (H3 := Rmult_eq_compat_l r1 _ _ H2). rewrite <- Rinv_r_sym in H3; [ idtac | assumption ]. assert (H4 := Rmult_eq_compat_l r2 _ _ H3). rewrite Rmult_1_r in H4; rewrite <- Rmult_assoc in H4. rewrite Rinv_r_simpl_m in H4; [ idtac | assumption ]. elim H1; symmetry ; assumption. Qed. Lemma tech11 : forall (An Bn Cn:nat -> R) (N:nat), (forall i:nat, An i = Bn i - Cn i) -> sum_f_R0 An N = sum_f_R0 Bn N - sum_f_R0 Cn N. Proof. intros; induction N as [| N HrecN]. simpl; apply H. do 3 rewrite tech5; rewrite HrecN; rewrite (H (S N)); ring. Qed. Lemma tech12 : forall (An:nat -> R) (x l:R), Un_cv (fun N:nat => sum_f_R0 (fun i:nat => An i * x ^ i) N) l -> Pser An x l. Proof. intros; unfold Pser; unfold infinite_sum; unfold Un_cv in H; assumption. Qed. Lemma scal_sum : forall (An:nat -> R) (N:nat) (x:R), x * sum_f_R0 An N = sum_f_R0 (fun i:nat => An i * x) N. Proof. intros; induction N as [| N HrecN]. simpl; ring. do 2 rewrite tech5. rewrite Rmult_plus_distr_l; rewrite <- HrecN; ring. Qed. Lemma decomp_sum : forall (An:nat -> R) (N:nat), (0 < N)%nat -> sum_f_R0 An N = An 0%nat + sum_f_R0 (fun i:nat => An (S i)) (pred N). Proof. intros; induction N as [| N HrecN]. elim (lt_irrefl _ H). cut ((0 < N)%nat \/ N = 0%nat). intro; elim H0; intro. cut (S (pred N) = pred (S N)). intro; rewrite <- H2. do 2 rewrite tech5. replace (S (S (pred N))) with (S N). rewrite (HrecN H1); ring. rewrite H2; simpl; reflexivity. assert (H2 := O_or_S N). elim H2; intros. elim a; intros. rewrite <- p. simpl; reflexivity. rewrite <- b in H1; elim (lt_irrefl _ H1). rewrite H1; simpl; reflexivity. inversion H. right; reflexivity. left; apply lt_le_trans with 1%nat; [ apply lt_O_Sn | assumption ]. Qed. Lemma plus_sum : forall (An Bn:nat -> R) (N:nat), sum_f_R0 (fun i:nat => An i + Bn i) N = sum_f_R0 An N + sum_f_R0 Bn N. Proof. intros; induction N as [| N HrecN]. simpl; ring. do 3 rewrite tech5; rewrite HrecN; ring. Qed. Lemma sum_eq : forall (An Bn:nat -> R) (N:nat), (forall i:nat, (i <= N)%nat -> An i = Bn i) -> sum_f_R0 An N = sum_f_R0 Bn N. Proof. intros; induction N as [| N HrecN]. simpl; apply H; apply le_n. do 2 rewrite tech5; rewrite HrecN. rewrite (H (S N)); [ reflexivity | apply le_n ]. intros; apply H; apply le_trans with N; [ assumption | apply le_n_Sn ]. Qed. (* Unicity of the limit defined by convergent series *) Lemma uniqueness_sum : forall (An:nat -> R) (l1 l2:R), infinite_sum An l1 -> infinite_sum An l2 -> l1 = l2. Proof. unfold infinite_sum; intros. case (Req_dec l1 l2); intro. assumption. cut (0 < Rabs ((l1 - l2) / 2)); [ intro | apply Rabs_pos_lt ]. elim (H (Rabs ((l1 - l2) / 2)) H2); intros. elim (H0 (Rabs ((l1 - l2) / 2)) H2); intros. set (N := max x0 x); cut (N >= x0)%nat. cut (N >= x)%nat. intros; assert (H7 := H3 N H5); assert (H8 := H4 N H6). cut (Rabs (l1 - l2) <= R_dist (sum_f_R0 An N) l1 + R_dist (sum_f_R0 An N) l2). intro; assert (H10 := Rplus_lt_compat _ _ _ _ H7 H8); assert (H11 := Rle_lt_trans _ _ _ H9 H10); unfold Rdiv in H11; rewrite Rabs_mult in H11. cut (Rabs (/ 2) = / 2). intro; rewrite H12 in H11; assert (H13 := double_var); unfold Rdiv in H13; rewrite <- H13 in H11. elim (Rlt_irrefl _ H11). apply Rabs_right; left; change (0 < / 2); apply Rinv_0_lt_compat; cut (0%nat <> 2%nat); [ intro H20; generalize (lt_INR_0 2 (neq_O_lt 2 H20)); unfold INR; intro; assumption | discriminate ]. unfold R_dist; rewrite <- (Rabs_Ropp (sum_f_R0 An N - l1)); rewrite Ropp_minus_distr'. replace (l1 - l2) with (l1 - sum_f_R0 An N + (sum_f_R0 An N - l2)); [ idtac | ring ]. apply Rabs_triang. unfold ge; unfold N; apply le_max_r. unfold ge; unfold N; apply le_max_l. unfold Rdiv; apply prod_neq_R0. apply Rminus_eq_contra; assumption. apply Rinv_neq_0_compat; discrR. Qed. Lemma minus_sum : forall (An Bn:nat -> R) (N:nat), sum_f_R0 (fun i:nat => An i - Bn i) N = sum_f_R0 An N - sum_f_R0 Bn N. Proof. intros; induction N as [| N HrecN]. simpl; ring. do 3 rewrite tech5; rewrite HrecN; ring. Qed. Lemma sum_decomposition : forall (An:nat -> R) (N:nat), sum_f_R0 (fun l:nat => An (2 * l)%nat) (S N) + sum_f_R0 (fun l:nat => An (S (2 * l))) N = sum_f_R0 An (2 * S N). Proof. intros. induction N as [| N HrecN]. simpl; ring. rewrite tech5. rewrite (tech5 (fun l:nat => An (S (2 * l))) N). replace (2 * S (S N))%nat with (S (S (2 * S N))). rewrite (tech5 An (S (2 * S N))). rewrite (tech5 An (2 * S N)). rewrite <- HrecN. ring. ring. Qed. Lemma sum_Rle : forall (An Bn:nat -> R) (N:nat), (forall n:nat, (n <= N)%nat -> An n <= Bn n) -> sum_f_R0 An N <= sum_f_R0 Bn N. Proof. intros. induction N as [| N HrecN]. simpl; apply H. apply le_n. do 2 rewrite tech5. apply Rle_trans with (sum_f_R0 An N + Bn (S N)). apply Rplus_le_compat_l. apply H. apply le_n. do 2 rewrite <- (Rplus_comm (Bn (S N))). apply Rplus_le_compat_l. apply HrecN. intros; apply H. apply le_trans with N; [ assumption | apply le_n_Sn ]. Qed. Lemma Rsum_abs : forall (An:nat -> R) (N:nat), Rabs (sum_f_R0 An N) <= sum_f_R0 (fun l:nat => Rabs (An l)) N. Proof. intros. induction N as [| N HrecN]. simpl. right; reflexivity. do 2 rewrite tech5. apply Rle_trans with (Rabs (sum_f_R0 An N) + Rabs (An (S N))). apply Rabs_triang. do 2 rewrite <- (Rplus_comm (Rabs (An (S N)))). apply Rplus_le_compat_l. apply HrecN. Qed. Lemma sum_cte : forall (x:R) (N:nat), sum_f_R0 (fun _:nat => x) N = x * INR (S N). Proof. intros. induction N as [| N HrecN]. simpl; ring. rewrite tech5. rewrite HrecN; repeat rewrite S_INR; ring. Qed. (**********) Lemma sum_growing : forall (An Bn:nat -> R) (N:nat), (forall n:nat, An n <= Bn n) -> sum_f_R0 An N <= sum_f_R0 Bn N. Proof. intros. induction N as [| N HrecN]. simpl; apply H. do 2 rewrite tech5. apply Rle_trans with (sum_f_R0 An N + Bn (S N)). apply Rplus_le_compat_l; apply H. do 2 rewrite <- (Rplus_comm (Bn (S N))). apply Rplus_le_compat_l; apply HrecN. Qed. (**********) Lemma Rabs_triang_gen : forall (An:nat -> R) (N:nat), Rabs (sum_f_R0 An N) <= sum_f_R0 (fun i:nat => Rabs (An i)) N. Proof. intros. induction N as [| N HrecN]. simpl. right; reflexivity. do 2 rewrite tech5. apply Rle_trans with (Rabs (sum_f_R0 An N) + Rabs (An (S N))). apply Rabs_triang. do 2 rewrite <- (Rplus_comm (Rabs (An (S N)))). apply Rplus_le_compat_l; apply HrecN. Qed. (**********) Lemma cond_pos_sum : forall (An:nat -> R) (N:nat), (forall n:nat, 0 <= An n) -> 0 <= sum_f_R0 An N. Proof. intros. induction N as [| N HrecN]. simpl; apply H. rewrite tech5. apply Rplus_le_le_0_compat. apply HrecN. apply H. Qed. (* Cauchy's criterion for series *) Definition Cauchy_crit_series (An:nat -> R) : Prop := Cauchy_crit (fun N:nat => sum_f_R0 An N). (* If (|An|) satisfies the Cauchy's criterion for series, then (An) too *) Lemma cauchy_abs : forall An:nat -> R, Cauchy_crit_series (fun i:nat => Rabs (An i)) -> Cauchy_crit_series An. Proof. unfold Cauchy_crit_series; unfold Cauchy_crit. intros. elim (H eps H0); intros. exists x. intros. cut (R_dist (sum_f_R0 An n) (sum_f_R0 An m) <= R_dist (sum_f_R0 (fun i:nat => Rabs (An i)) n) (sum_f_R0 (fun i:nat => Rabs (An i)) m)). intro. apply Rle_lt_trans with (R_dist (sum_f_R0 (fun i:nat => Rabs (An i)) n) (sum_f_R0 (fun i:nat => Rabs (An i)) m)). assumption. apply H1; assumption. assert (H4 := lt_eq_lt_dec n m). elim H4; intro. elim a; intro. rewrite (tech2 An n m); [ idtac | assumption ]. rewrite (tech2 (fun i:nat => Rabs (An i)) n m); [ idtac | assumption ]. unfold R_dist. unfold Rminus. do 2 rewrite Ropp_plus_distr. do 2 rewrite <- Rplus_assoc. do 2 rewrite Rplus_opp_r. do 2 rewrite Rplus_0_l. do 2 rewrite Rabs_Ropp. rewrite (Rabs_right (sum_f_R0 (fun i:nat => Rabs (An (S n + i)%nat)) (m - S n))) . set (Bn := fun i:nat => An (S n + i)%nat). replace (fun i:nat => Rabs (An (S n + i)%nat)) with (fun i:nat => Rabs (Bn i)). apply Rabs_triang_gen. unfold Bn; reflexivity. apply Rle_ge. apply cond_pos_sum. intro; apply Rabs_pos. rewrite b. unfold R_dist. unfold Rminus; do 2 rewrite Rplus_opp_r. rewrite Rabs_R0; right; reflexivity. rewrite (tech2 An m n); [ idtac | assumption ]. rewrite (tech2 (fun i:nat => Rabs (An i)) m n); [ idtac | assumption ]. unfold R_dist. unfold Rminus. do 2 rewrite Rplus_assoc. rewrite (Rplus_comm (sum_f_R0 An m)). rewrite (Rplus_comm (sum_f_R0 (fun i:nat => Rabs (An i)) m)). do 2 rewrite Rplus_assoc. do 2 rewrite Rplus_opp_l. do 2 rewrite Rplus_0_r. rewrite (Rabs_right (sum_f_R0 (fun i:nat => Rabs (An (S m + i)%nat)) (n - S m))) . set (Bn := fun i:nat => An (S m + i)%nat). replace (fun i:nat => Rabs (An (S m + i)%nat)) with (fun i:nat => Rabs (Bn i)). apply Rabs_triang_gen. unfold Bn; reflexivity. apply Rle_ge. apply cond_pos_sum. intro; apply Rabs_pos. Qed. (**********) Lemma cv_cauchy_1 : forall An:nat -> R, { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l } -> Cauchy_crit_series An. Proof. intros An X. elim X; intros. unfold Un_cv in p. unfold Cauchy_crit_series; unfold Cauchy_crit. intros. cut (0 < eps / 2). intro. elim (p (eps / 2) H0); intros. exists x0. intros. apply Rle_lt_trans with (R_dist (sum_f_R0 An n) x + R_dist (sum_f_R0 An m) x). unfold R_dist. replace (sum_f_R0 An n - sum_f_R0 An m) with (sum_f_R0 An n - x + - (sum_f_R0 An m - x)); [ idtac | ring ]. rewrite <- (Rabs_Ropp (sum_f_R0 An m - x)). apply Rabs_triang. apply Rlt_le_trans with (eps / 2 + eps / 2). apply Rplus_lt_compat. apply H1; assumption. apply H1; assumption. right; symmetry ; apply double_var. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. Qed. Lemma cv_cauchy_2 : forall An:nat -> R, Cauchy_crit_series An -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros. apply R_complete. unfold Cauchy_crit_series in H. exact H. Qed. (**********) Lemma sum_eq_R0 : forall (An:nat -> R) (N:nat), (forall n:nat, (n <= N)%nat -> An n = 0) -> sum_f_R0 An N = 0. Proof. intros; induction N as [| N HrecN]. simpl; apply H; apply le_n. rewrite tech5; rewrite HrecN; [ rewrite Rplus_0_l; apply H; apply le_n | intros; apply H; apply le_trans with N; [ assumption | apply le_n_Sn ] ]. Qed. Definition SP (fn:nat -> R -> R) (N:nat) (x:R) : R := sum_f_R0 (fun k:nat => fn k x) N. (**********) Lemma sum_incr : forall (An:nat -> R) (N:nat) (l:R), Un_cv (fun n:nat => sum_f_R0 An n) l -> (forall n:nat, 0 <= An n) -> sum_f_R0 An N <= l. Proof. intros; case (total_order_T (sum_f_R0 An N) l); intro. elim s; intro. left; apply a. right; apply b. cut (Un_growing (fun n:nat => sum_f_R0 An n)). intro; set (l1 := sum_f_R0 An N) in r. unfold Un_cv in H; cut (0 < l1 - l). intro; elim (H _ H2); intros. set (N0 := max x N); cut (N0 >= x)%nat. intro; assert (H5 := H3 N0 H4). cut (l1 <= sum_f_R0 An N0). intro; unfold R_dist in H5; rewrite Rabs_right in H5. cut (sum_f_R0 An N0 < l1). intro; elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H7 H6)). apply Rplus_lt_reg_r with (- l). do 2 rewrite (Rplus_comm (- l)). apply H5. apply Rle_ge; apply Rplus_le_reg_l with l. rewrite Rplus_0_r; replace (l + (sum_f_R0 An N0 - l)) with (sum_f_R0 An N0); [ idtac | ring ]; apply Rle_trans with l1. left; apply r. apply H6. unfold l1; apply Rge_le; apply (growing_prop (fun k:nat => sum_f_R0 An k)). apply H1. unfold ge, N0; apply le_max_r. unfold ge, N0; apply le_max_l. apply Rplus_lt_reg_r with l; rewrite Rplus_0_r; replace (l + (l1 - l)) with l1; [ apply r | ring ]. unfold Un_growing; intro; simpl; pattern (sum_f_R0 An n) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; apply H0. Qed. (**********) Lemma sum_cv_maj : forall (An:nat -> R) (fn:nat -> R -> R) (x l1 l2:R), Un_cv (fun n:nat => SP fn n x) l1 -> Un_cv (fun n:nat => sum_f_R0 An n) l2 -> (forall n:nat, Rabs (fn n x) <= An n) -> Rabs l1 <= l2. Proof. intros; case (total_order_T (Rabs l1) l2); intro. elim s; intro. left; apply a. right; apply b. cut (forall n0:nat, Rabs (SP fn n0 x) <= sum_f_R0 An n0). intro; cut (0 < (Rabs l1 - l2) / 2). intro; unfold Un_cv in H, H0. elim (H _ H3); intros Na H4. elim (H0 _ H3); intros Nb H5. set (N := max Na Nb). unfold R_dist in H4, H5. cut (Rabs (sum_f_R0 An N - l2) < (Rabs l1 - l2) / 2). intro; cut (Rabs (Rabs l1 - Rabs (SP fn N x)) < (Rabs l1 - l2) / 2). intro; cut (sum_f_R0 An N < (Rabs l1 + l2) / 2). intro; cut ((Rabs l1 + l2) / 2 < Rabs (SP fn N x)). intro; cut (sum_f_R0 An N < Rabs (SP fn N x)). intro; assert (H11 := H2 N). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H11 H10)). apply Rlt_trans with ((Rabs l1 + l2) / 2); assumption. case (Rcase_abs (Rabs l1 - Rabs (SP fn N x))); intro. apply Rlt_trans with (Rabs l1). apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite (Rmult_comm 2); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite double; apply Rplus_lt_compat_l; apply r. discrR. apply (Rminus_lt _ _ r0). rewrite (Rabs_right _ r0) in H7. apply Rplus_lt_reg_r with ((Rabs l1 - l2) / 2 - Rabs (SP fn N x)). replace ((Rabs l1 - l2) / 2 - Rabs (SP fn N x) + (Rabs l1 + l2) / 2) with (Rabs l1 - Rabs (SP fn N x)). unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply H7. unfold Rdiv; rewrite Rmult_plus_distr_r; rewrite <- (Rmult_comm (/ 2)); rewrite Rmult_minus_distr_l; repeat rewrite (Rmult_comm (/ 2)); pattern (Rabs l1) at 1; rewrite double_var; unfold Rdiv; ring. case (Rcase_abs (sum_f_R0 An N - l2)); intro. apply Rlt_trans with l2. apply (Rminus_lt _ _ r0). apply Rmult_lt_reg_l with 2. prove_sup0. rewrite (double l2); unfold Rdiv; rewrite (Rmult_comm 2); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite (Rplus_comm (Rabs l1)); apply Rplus_lt_compat_l; apply r. discrR. rewrite (Rabs_right _ r0) in H6; apply Rplus_lt_reg_r with (- l2). replace (- l2 + (Rabs l1 + l2) / 2) with ((Rabs l1 - l2) / 2). rewrite Rplus_comm; apply H6. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite Rmult_minus_distr_l; rewrite Rmult_plus_distr_r; pattern l2 at 2; rewrite double_var; repeat rewrite (Rmult_comm (/ 2)); rewrite Ropp_plus_distr; unfold Rdiv; ring. apply Rle_lt_trans with (Rabs (SP fn N x - l1)). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr'; apply Rabs_triang_inv2. apply H4; unfold ge, N; apply le_max_l. apply H5; unfold ge, N; apply le_max_r. unfold Rdiv; apply Rmult_lt_0_compat. apply Rplus_lt_reg_r with l2. rewrite Rplus_0_r; replace (l2 + (Rabs l1 - l2)) with (Rabs l1); [ apply r | ring ]. apply Rinv_0_lt_compat; prove_sup0. intros; induction n0 as [| n0 Hrecn0]. unfold SP; simpl; apply H1. unfold SP; simpl. apply Rle_trans with (Rabs (sum_f_R0 (fun k:nat => fn k x) n0) + Rabs (fn (S n0) x)). apply Rabs_triang. apply Rle_trans with (sum_f_R0 An n0 + Rabs (fn (S n0) x)). do 2 rewrite <- (Rplus_comm (Rabs (fn (S n0) x))). apply Rplus_le_compat_l; apply Hrecn0. apply Rplus_le_compat_l; apply H1. Qed. coq-8.4pl2/theories/Reals/Exp_prop.v0000640000175000001440000007306112010532755016511 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* / INR (fact k) * x ^ k) N. Lemma E1_cvg : forall x:R, Un_cv (E1 x) (exp x). Proof. intro; unfold exp; unfold projT1. case (exist_exp x); intro. unfold exp_in, Un_cv; unfold infinite_sum, E1; trivial. Qed. Definition Reste_E (x y:R) (N:nat) : R := sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (S (l + k))) * x ^ S (l + k) * (/ INR (fact (N - l)) * y ^ (N - l))) ( pred (N - k))) (pred N). Lemma exp_form : forall (x y:R) (n:nat), (0 < n)%nat -> E1 x n * E1 y n - Reste_E x y n = E1 (x + y) n. Proof. intros; unfold E1. rewrite cauchy_finite. unfold Reste_E; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; apply sum_eq; intros. rewrite binomial. rewrite scal_sum; apply sum_eq; intros. unfold C; unfold Rdiv; repeat rewrite Rmult_assoc; rewrite (Rmult_comm (INR (fact i))); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite Rinv_mult_distr. ring. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. apply H. Qed. Definition maj_Reste_E (x y:R) (N:nat) : R := 4 * (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (2 * N) / Rsqr (INR (fact (div2 (pred N))))). (**********) Lemma div2_double : forall N:nat, div2 (2 * N) = N. Proof. intro; induction N as [| N HrecN]. reflexivity. replace (2 * S N)%nat with (S (S (2 * N))). simpl; simpl in HrecN; rewrite HrecN; reflexivity. ring. Qed. Lemma div2_S_double : forall N:nat, div2 (S (2 * N)) = N. Proof. intro; induction N as [| N HrecN]. reflexivity. replace (2 * S N)%nat with (S (S (2 * N))). simpl; simpl in HrecN; rewrite HrecN; reflexivity. ring. Qed. Lemma div2_not_R0 : forall N:nat, (1 < N)%nat -> (0 < div2 N)%nat. Proof. intros; induction N as [| N HrecN]. elim (lt_n_O _ H). cut ((1 < N)%nat \/ N = 1%nat). intro; elim H0; intro. assert (H2 := even_odd_dec N). elim H2; intro. rewrite <- (even_div2 _ a); apply HrecN; assumption. rewrite <- (odd_div2 _ b); apply lt_O_Sn. rewrite H1; simpl; apply lt_O_Sn. inversion H. right; reflexivity. left; apply lt_le_trans with 2%nat; [ apply lt_n_Sn | apply H1 ]. Qed. Lemma Reste_E_maj : forall (x y:R) (N:nat), (0 < N)%nat -> Rabs (Reste_E x y N) <= maj_Reste_E x y N. Proof. intros; set (M := Rmax 1 (Rmax (Rabs x) (Rabs y))). apply Rle_trans with (M ^ (2 * N) * sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => / Rsqr (INR (fact (div2 (S N))))) (pred (N - k))) (pred N)). unfold Reste_E. apply Rle_trans with (sum_f_R0 (fun k:nat => Rabs (sum_f_R0 (fun l:nat => / INR (fact (S (l + k))) * x ^ S (l + k) * (/ INR (fact (N - l)) * y ^ (N - l))) ( pred (N - k)))) (pred N)). apply (Rsum_abs (fun k:nat => sum_f_R0 (fun l:nat => / INR (fact (S (l + k))) * x ^ S (l + k) * (/ INR (fact (N - l)) * y ^ (N - l))) ( pred (N - k))) (pred N)). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => Rabs (/ INR (fact (S (l + k))) * x ^ S (l + k) * (/ INR (fact (N - l)) * y ^ (N - l)))) ( pred (N - k))) (pred N)). apply sum_Rle; intros. apply (Rsum_abs (fun l:nat => / INR (fact (S (l + n))) * x ^ S (l + n) * (/ INR (fact (N - l)) * y ^ (N - l)))). apply Rle_trans with (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => M ^ (2 * N) * / INR (fact (S l)) * / INR (fact (N - l))) (pred (N - k))) (pred N)). apply sum_Rle; intros. apply sum_Rle; intros. repeat rewrite Rabs_mult. do 2 rewrite <- RPow_abs. rewrite (Rabs_right (/ INR (fact (S (n0 + n))))). rewrite (Rabs_right (/ INR (fact (N - n0)))). replace (/ INR (fact (S (n0 + n))) * Rabs x ^ S (n0 + n) * (/ INR (fact (N - n0)) * Rabs y ^ (N - n0))) with (/ INR (fact (N - n0)) * / INR (fact (S (n0 + n))) * Rabs x ^ S (n0 + n) * Rabs y ^ (N - n0)); [ idtac | ring ]. rewrite <- (Rmult_comm (/ INR (fact (N - n0)))). repeat rewrite Rmult_assoc. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_trans with (/ INR (fact (S n0)) * Rabs x ^ S (n0 + n) * Rabs y ^ (N - n0)). rewrite (Rmult_comm (/ INR (fact (S (n0 + n))))); rewrite (Rmult_comm (/ INR (fact (S n0)))); repeat rewrite Rmult_assoc; apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. rewrite (Rmult_comm (/ INR (fact (S n0)))); apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. apply Rle_Rinv. apply INR_fact_lt_0. apply INR_fact_lt_0. apply le_INR; apply fact_le; apply le_n_S. apply le_plus_l. rewrite (Rmult_comm (M ^ (2 * N))); rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_trans with (M ^ S (n0 + n) * Rabs y ^ (N - n0)). do 2 rewrite <- (Rmult_comm (Rabs y ^ (N - n0))). apply Rmult_le_compat_l. apply pow_le; apply Rabs_pos. apply pow_incr; split. apply Rabs_pos. apply Rle_trans with (Rmax (Rabs x) (Rabs y)). apply RmaxLess1. unfold M; apply RmaxLess2. apply Rle_trans with (M ^ S (n0 + n) * M ^ (N - n0)). apply Rmult_le_compat_l. apply pow_le; apply Rle_trans with 1. left; apply Rlt_0_1. unfold M; apply RmaxLess1. apply pow_incr; split. apply Rabs_pos. apply Rle_trans with (Rmax (Rabs x) (Rabs y)). apply RmaxLess2. unfold M; apply RmaxLess2. rewrite <- pow_add; replace (S (n0 + n) + (N - n0))%nat with (N + S n)%nat. apply Rle_pow. unfold M; apply RmaxLess1. replace (2 * N)%nat with (N + N)%nat; [ idtac | ring ]. apply plus_le_compat_l. replace N with (S (pred N)). apply le_n_S; apply H0. symmetry ; apply S_pred with 0%nat; apply H. apply INR_eq; do 2 rewrite plus_INR; do 2 rewrite S_INR; rewrite plus_INR; rewrite minus_INR. ring. apply le_trans with (pred (N - n)). apply H1. apply le_S_n. replace (S (pred (N - n))) with (N - n)%nat. apply le_trans with N. apply (fun p n m:nat => plus_le_reg_l n m p) with n. rewrite <- le_plus_minus. apply le_plus_r. apply le_trans with (pred N). apply H0. apply le_pred_n. apply le_n_Sn. apply S_pred with 0%nat. apply plus_lt_reg_l with n. rewrite <- le_plus_minus. replace (n + 0)%nat with n; [ idtac | ring ]. apply le_lt_trans with (pred N). apply H0. apply lt_pred_n_n. apply H. apply le_trans with (pred N). apply H0. apply le_pred_n. apply Rle_ge; left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_ge; left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. rewrite scal_sum. apply sum_Rle; intros. rewrite <- Rmult_comm. rewrite scal_sum. apply sum_Rle; intros. rewrite (Rmult_comm (/ Rsqr (INR (fact (div2 (S N)))))). rewrite Rmult_assoc; apply Rmult_le_compat_l. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. unfold M; apply RmaxLess1. assert (H2 := even_odd_cor N). elim H2; intros N0 H3. elim H3; intro. apply Rle_trans with (/ INR (fact n0) * / INR (fact (N - n0))). do 2 rewrite <- (Rmult_comm (/ INR (fact (N - n0)))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_Rinv. apply INR_fact_lt_0. apply INR_fact_lt_0. apply le_INR. apply fact_le. apply le_n_Sn. replace (/ INR (fact n0) * / INR (fact (N - n0))) with (C N n0 / INR (fact N)). pattern N at 1; rewrite H4. apply Rle_trans with (C N N0 / INR (fact N)). unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ INR (fact N))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. rewrite H4. apply C_maj. rewrite <- H4; apply le_trans with (pred (N - n)). apply H1. apply le_S_n. replace (S (pred (N - n))) with (N - n)%nat. apply le_trans with N. apply (fun p n m:nat => plus_le_reg_l n m p) with n. rewrite <- le_plus_minus. apply le_plus_r. apply le_trans with (pred N). apply H0. apply le_pred_n. apply le_n_Sn. apply S_pred with 0%nat. apply plus_lt_reg_l with n. rewrite <- le_plus_minus. replace (n + 0)%nat with n; [ idtac | ring ]. apply le_lt_trans with (pred N). apply H0. apply lt_pred_n_n. apply H. apply le_trans with (pred N). apply H0. apply le_pred_n. replace (C N N0 / INR (fact N)) with (/ Rsqr (INR (fact N0))). rewrite H4; rewrite div2_S_double; right; reflexivity. unfold Rsqr, C, Rdiv. repeat rewrite Rinv_mult_distr. rewrite (Rmult_comm (INR (fact N))). repeat rewrite Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; replace (N - N0)%nat with N0. ring. replace N with (N0 + N0)%nat. symmetry ; apply minus_plus. rewrite H4. ring. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. unfold C, Rdiv. rewrite (Rmult_comm (INR (fact N))). repeat rewrite Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rinv_mult_distr. rewrite Rmult_1_r; ring. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. replace (/ INR (fact (S n0)) * / INR (fact (N - n0))) with (C (S N) (S n0) / INR (fact (S N))). apply Rle_trans with (C (S N) (S N0) / INR (fact (S N))). unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ INR (fact (S N)))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. cut (S N = (2 * S N0)%nat). intro; rewrite H5; apply C_maj. rewrite <- H5; apply le_n_S. apply le_trans with (pred (N - n)). apply H1. apply le_S_n. replace (S (pred (N - n))) with (N - n)%nat. apply le_trans with N. apply (fun p n m:nat => plus_le_reg_l n m p) with n. rewrite <- le_plus_minus. apply le_plus_r. apply le_trans with (pred N). apply H0. apply le_pred_n. apply le_n_Sn. apply S_pred with 0%nat. apply plus_lt_reg_l with n. rewrite <- le_plus_minus. replace (n + 0)%nat with n; [ idtac | ring ]. apply le_lt_trans with (pred N). apply H0. apply lt_pred_n_n. apply H. apply le_trans with (pred N). apply H0. apply le_pred_n. rewrite H4; ring. cut (S N = (2 * S N0)%nat). intro. replace (C (S N) (S N0) / INR (fact (S N))) with (/ Rsqr (INR (fact (S N0)))). rewrite H5; rewrite div2_double. right; reflexivity. unfold Rsqr, C, Rdiv. repeat rewrite Rinv_mult_distr. replace (S N - S N0)%nat with (S N0). rewrite (Rmult_comm (INR (fact (S N)))). repeat rewrite Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; reflexivity. apply INR_fact_neq_0. replace (S N) with (S N0 + S N0)%nat. symmetry ; apply minus_plus. rewrite H5; ring. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. rewrite H4; ring. unfold C, Rdiv. rewrite (Rmult_comm (INR (fact (S N)))). rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; rewrite Rinv_mult_distr. reflexivity. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. unfold maj_Reste_E. unfold Rdiv; rewrite (Rmult_comm 4). rewrite Rmult_assoc. apply Rmult_le_compat_l. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. apply Rle_trans with (sum_f_R0 (fun k:nat => INR (N - k) * / Rsqr (INR (fact (div2 (S N))))) (pred N)). apply sum_Rle; intros. rewrite sum_cte. replace (S (pred (N - n))) with (N - n)%nat. right; apply Rmult_comm. apply S_pred with 0%nat. apply plus_lt_reg_l with n. rewrite <- le_plus_minus. replace (n + 0)%nat with n; [ idtac | ring ]. apply le_lt_trans with (pred N). apply H0. apply lt_pred_n_n. apply H. apply le_trans with (pred N). apply H0. apply le_pred_n. apply Rle_trans with (sum_f_R0 (fun k:nat => INR N * / Rsqr (INR (fact (div2 (S N))))) (pred N)). apply sum_Rle; intros. do 2 rewrite <- (Rmult_comm (/ Rsqr (INR (fact (div2 (S N)))))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply Rsqr_pos_lt. apply INR_fact_neq_0. apply le_INR. apply (fun p n m:nat => plus_le_reg_l n m p) with n. rewrite <- le_plus_minus. apply le_plus_r. apply le_trans with (pred N). apply H0. apply le_pred_n. rewrite sum_cte; replace (S (pred N)) with N. cut (div2 (S N) = S (div2 (pred N))). intro; rewrite H0. rewrite fact_simpl; rewrite mult_comm; rewrite mult_INR; rewrite Rsqr_mult. rewrite Rinv_mult_distr. rewrite (Rmult_comm (INR N)); repeat rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply Rsqr_pos_lt; apply INR_fact_neq_0. rewrite <- H0. cut (INR N <= INR (2 * div2 (S N))). intro; apply Rmult_le_reg_l with (Rsqr (INR (div2 (S N)))). apply Rsqr_pos_lt. apply not_O_INR; red; intro. cut (1 < S N)%nat. intro; assert (H4 := div2_not_R0 _ H3). rewrite H2 in H4; elim (lt_n_O _ H4). apply lt_n_S; apply H. repeat rewrite <- Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_l. replace (INR N * INR N) with (Rsqr (INR N)); [ idtac | reflexivity ]. rewrite Rmult_assoc. rewrite Rmult_comm. replace 4 with (Rsqr 2); [ idtac | ring_Rsqr ]. rewrite <- Rsqr_mult. apply Rsqr_incr_1. replace 2 with (INR 2). rewrite <- mult_INR; apply H1. reflexivity. left; apply lt_INR_0; apply H. left; apply Rmult_lt_0_compat. prove_sup0. apply lt_INR_0; apply div2_not_R0. apply lt_n_S; apply H. cut (1 < S N)%nat. intro; unfold Rsqr; apply prod_neq_R0; apply not_O_INR; intro; assert (H4 := div2_not_R0 _ H2); rewrite H3 in H4; elim (lt_n_O _ H4). apply lt_n_S; apply H. assert (H1 := even_odd_cor N). elim H1; intros N0 H2. elim H2; intro. pattern N at 2; rewrite H3. rewrite div2_S_double. right; rewrite H3; reflexivity. pattern N at 2; rewrite H3. replace (S (S (2 * N0))) with (2 * S N0)%nat. rewrite div2_double. rewrite H3. rewrite S_INR; do 2 rewrite mult_INR. rewrite (S_INR N0). rewrite Rmult_plus_distr_l. apply Rplus_le_compat_l. rewrite Rmult_1_r. simpl. pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; apply Rlt_0_1. ring. unfold Rsqr; apply prod_neq_R0; apply INR_fact_neq_0. unfold Rsqr; apply prod_neq_R0; apply not_O_INR; discriminate. assert (H0 := even_odd_cor N). elim H0; intros N0 H1. elim H1; intro. cut (0 < N0)%nat. intro; rewrite H2. rewrite div2_S_double. replace (2 * N0)%nat with (S (S (2 * pred N0))). replace (pred (S (S (2 * pred N0)))) with (S (2 * pred N0)). rewrite div2_S_double. apply S_pred with 0%nat; apply H3. reflexivity. omega. omega. rewrite H2. replace (pred (S (2 * N0))) with (2 * N0)%nat; [ idtac | reflexivity ]. replace (S (S (2 * N0))) with (2 * S N0)%nat. do 2 rewrite div2_double. reflexivity. ring. apply S_pred with 0%nat; apply H. Qed. Lemma maj_Reste_cv_R0 : forall x y:R, Un_cv (maj_Reste_E x y) 0. Proof. intros; assert (H := Majxy_cv_R0 x y). unfold Un_cv in H; unfold Un_cv; intros. cut (0 < eps / 4); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H _ H1); intros N0 H2. exists (max (2 * S N0) 2); intros. unfold R_dist in H2; unfold R_dist; rewrite Rminus_0_r; unfold Majxy in H2; unfold maj_Reste_E. rewrite Rabs_right. apply Rle_lt_trans with (4 * (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (4 * S (div2 (pred n))) / INR (fact (div2 (pred n))))). apply Rmult_le_compat_l. left; prove_sup0. unfold Rdiv, Rsqr; rewrite Rinv_mult_distr. rewrite (Rmult_comm (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (2 * n))); rewrite (Rmult_comm (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (4 * S (div2 (pred n))))) ; rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply Rle_trans with (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (2 * n)). rewrite Rmult_comm; pattern (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (2 * n)) at 2; rewrite <- Rmult_1_r; apply Rmult_le_compat_l. apply pow_le; apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. apply Rmult_le_reg_l with (INR (fact (div2 (pred n)))). apply INR_fact_lt_0. rewrite Rmult_1_r; rewrite <- Rinv_r_sym. replace 1 with (INR 1); [ apply le_INR | reflexivity ]. apply lt_le_S. apply INR_lt. apply INR_fact_lt_0. apply INR_fact_neq_0. apply Rle_pow. apply RmaxLess1. assert (H4 := even_odd_cor n). elim H4; intros N1 H5. elim H5; intro. cut (0 < N1)%nat. intro. rewrite H6. replace (pred (2 * N1)) with (S (2 * pred N1)). rewrite div2_S_double. omega. omega. assert (0 < n)%nat. apply lt_le_trans with 2%nat. apply lt_O_Sn. apply le_trans with (max (2 * S N0) 2). apply le_max_r. apply H3. omega. rewrite H6. replace (pred (S (2 * N1))) with (2 * N1)%nat. rewrite div2_double. replace (4 * S N1)%nat with (2 * (2 * S N1))%nat. apply (fun m n p:nat => mult_le_compat_l p n m). replace (2 * S N1)%nat with (S (S (2 * N1))). apply le_n_Sn. ring. ring. reflexivity. apply INR_fact_neq_0. apply INR_fact_neq_0. apply Rmult_lt_reg_l with (/ 4). apply Rinv_0_lt_compat; prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite Rmult_comm. replace (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (4 * S (div2 (pred n))) / INR (fact (div2 (pred n)))) with (Rabs (Rmax 1 (Rmax (Rabs x) (Rabs y)) ^ (4 * S (div2 (pred n))) / INR (fact (div2 (pred n))) - 0)). apply H2; unfold ge. cut (2 * S N0 <= n)%nat. intro; apply le_S_n. apply INR_le; apply Rmult_le_reg_l with (INR 2). simpl; prove_sup0. do 2 rewrite <- mult_INR; apply le_INR. apply le_trans with n. apply H4. assert (H5 := even_odd_cor n). elim H5; intros N1 H6. elim H6; intro. cut (0 < N1)%nat. intro. rewrite H7. apply (fun m n p:nat => mult_le_compat_l p n m). replace (pred (2 * N1)) with (S (2 * pred N1)). rewrite div2_S_double. replace (S (pred N1)) with N1. apply le_n. apply S_pred with 0%nat; apply H8. replace (2 * N1)%nat with (S (S (2 * pred N1))). reflexivity. pattern N1 at 2; replace N1 with (S (pred N1)). ring. symmetry ; apply S_pred with 0%nat; apply H8. apply INR_lt. apply Rmult_lt_reg_l with (INR 2). simpl; prove_sup0. rewrite Rmult_0_r; rewrite <- mult_INR. apply lt_INR_0. rewrite <- H7. apply lt_le_trans with 2%nat. apply lt_O_Sn. apply le_trans with (max (2 * S N0) 2). apply le_max_r. apply H3. rewrite H7. replace (pred (S (2 * N1))) with (2 * N1)%nat. rewrite div2_double. replace (2 * S N1)%nat with (S (S (2 * N1))). apply le_n_Sn. ring. reflexivity. apply le_trans with (max (2 * S N0) 2). apply le_max_l. apply H3. rewrite Rminus_0_r; apply Rabs_right. apply Rle_ge. unfold Rdiv; apply Rmult_le_pos. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. left; apply Rinv_0_lt_compat; apply INR_fact_lt_0. discrR. apply Rle_ge. unfold Rdiv; apply Rmult_le_pos. left; prove_sup0. apply Rmult_le_pos. apply pow_le. apply Rle_trans with 1. left; apply Rlt_0_1. apply RmaxLess1. left; apply Rinv_0_lt_compat; apply Rsqr_pos_lt; apply INR_fact_neq_0. Qed. (**********) Lemma Reste_E_cv : forall x y:R, Un_cv (Reste_E x y) 0. Proof. intros; assert (H := maj_Reste_cv_R0 x y). unfold Un_cv in H; unfold Un_cv; intros; elim (H _ H0); intros. exists (max x0 1); intros. unfold R_dist; rewrite Rminus_0_r. apply Rle_lt_trans with (maj_Reste_E x y n). apply Reste_E_maj. apply lt_le_trans with 1%nat. apply lt_O_Sn. apply le_trans with (max x0 1). apply le_max_r. apply H2. replace (maj_Reste_E x y n) with (R_dist (maj_Reste_E x y n) 0). apply H1. unfold ge; apply le_trans with (max x0 1). apply le_max_l. apply H2. unfold R_dist; rewrite Rminus_0_r; apply Rabs_right. apply Rle_ge; apply Rle_trans with (Rabs (Reste_E x y n)). apply Rabs_pos. apply Reste_E_maj. apply lt_le_trans with 1%nat. apply lt_O_Sn. apply le_trans with (max x0 1). apply le_max_r. apply H2. Qed. (**********) Lemma exp_plus : forall x y:R, exp (x + y) = exp x * exp y. Proof. intros; assert (H0 := E1_cvg x). assert (H := E1_cvg y). assert (H1 := E1_cvg (x + y)). eapply UL_sequence. apply H1. assert (H2 := CV_mult _ _ _ _ H0 H). assert (H3 := CV_minus _ _ _ _ H2 (Reste_E_cv x y)). unfold Un_cv; unfold Un_cv in H3; intros. elim (H3 _ H4); intros. exists (S x0); intros. rewrite <- (exp_form x y n). rewrite Rminus_0_r in H5. apply H5. unfold ge; apply le_trans with (S x0). apply le_n_Sn. apply H6. apply lt_le_trans with (S x0). apply lt_O_Sn. apply H6. Qed. (**********) Lemma exp_pos_pos : forall x:R, 0 < x -> 0 < exp x. Proof. intros; set (An := fun N:nat => / INR (fact N) * x ^ N). cut (Un_cv (fun n:nat => sum_f_R0 An n) (exp x)). intro; apply Rlt_le_trans with (sum_f_R0 An 0). unfold An; simpl; rewrite Rinv_1; rewrite Rmult_1_r; apply Rlt_0_1. apply sum_incr. assumption. intro; unfold An; left; apply Rmult_lt_0_compat. apply Rinv_0_lt_compat; apply INR_fact_lt_0. apply (pow_lt _ n H). unfold exp; unfold projT1; case (exist_exp x); intro. unfold exp_in; unfold infinite_sum, Un_cv; trivial. Qed. (**********) Lemma exp_pos : forall x:R, 0 < exp x. Proof. intro; case (total_order_T 0 x); intro. elim s; intro. apply (exp_pos_pos _ a). rewrite <- b; rewrite exp_0; apply Rlt_0_1. replace (exp x) with (1 / exp (- x)). unfold Rdiv; apply Rmult_lt_0_compat. apply Rlt_0_1. apply Rinv_0_lt_compat; apply exp_pos_pos. apply (Ropp_0_gt_lt_contravar _ r). cut (exp (- x) <> 0). intro; unfold Rdiv; apply Rmult_eq_reg_l with (exp (- x)). rewrite Rmult_1_l; rewrite <- Rinv_r_sym. rewrite <- exp_plus. rewrite Rplus_opp_l; rewrite exp_0; reflexivity. apply H. apply H. assert (H := exp_plus x (- x)). rewrite Rplus_opp_r in H; rewrite exp_0 in H. red; intro; rewrite H0 in H. rewrite Rmult_0_r in H. elim R1_neq_R0; assumption. Qed. (* ((exp h)-1)/h -> 0 quand h->0 *) Lemma derivable_pt_lim_exp_0 : derivable_pt_lim exp 0 1. Proof. unfold derivable_pt_lim; intros. set (fn := fun (N:nat) (x:R) => x ^ N / INR (fact (S N))). cut (CVN_R fn). intro; cut (forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }). intro cv; cut (forall n:nat, continuity (fn n)). intro; cut (continuity (SFL fn cv)). intro; unfold continuity in H1. assert (H2 := H1 0). unfold continuity_pt in H2; unfold continue_in in H2; unfold limit1_in in H2; unfold limit_in in H2; simpl in H2; unfold R_dist in H2. elim (H2 _ H); intros alp H3. elim H3; intros. exists (mkposreal _ H4); intros. rewrite Rplus_0_l; rewrite exp_0. replace ((exp h - 1) / h) with (SFL fn cv h). replace 1 with (SFL fn cv 0). apply H5. split. unfold D_x, no_cond; split. trivial. apply (not_eq_sym H6). rewrite Rminus_0_r; apply H7. unfold SFL. case (cv 0); intros. eapply UL_sequence. apply u. unfold Un_cv, SP. intros; exists 1%nat; intros. unfold R_dist; rewrite decomp_sum. rewrite (Rplus_comm (fn 0%nat 0)). replace (fn 0%nat 0) with 1. unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r. replace (sum_f_R0 (fun i:nat => fn (S i) 0) (pred n)) with 0. rewrite Rabs_R0; apply H8. symmetry ; apply sum_eq_R0; intros. unfold fn. simpl. unfold Rdiv; do 2 rewrite Rmult_0_l; reflexivity. unfold fn; simpl. unfold Rdiv; rewrite Rinv_1; rewrite Rmult_1_r; reflexivity. apply lt_le_trans with 1%nat; [ apply lt_n_Sn | apply H9 ]. unfold SFL, exp. case (cv h); case (exist_exp h); simpl; intros. eapply UL_sequence. apply u. unfold Un_cv; intros. unfold exp_in in e. unfold infinite_sum in e. cut (0 < eps0 * Rabs h). intro; elim (e _ H9); intros N0 H10. exists N0; intros. unfold R_dist. apply Rmult_lt_reg_l with (Rabs h). apply Rabs_pos_lt; assumption. rewrite <- Rabs_mult. rewrite Rmult_minus_distr_l. replace (h * ((x - 1) / h)) with (x - 1). unfold R_dist in H10. replace (h * SP fn n h - (x - 1)) with (sum_f_R0 (fun i:nat => / INR (fact i) * h ^ i) (S n) - x). rewrite (Rmult_comm (Rabs h)). apply H10. unfold ge. apply le_trans with (S N0). apply le_n_Sn. apply le_n_S; apply H11. rewrite decomp_sum. replace (/ INR (fact 0) * h ^ 0) with 1. unfold Rminus. rewrite Ropp_plus_distr. rewrite Ropp_involutive. rewrite <- (Rplus_comm (- x)). rewrite <- (Rplus_comm (- x + 1)). rewrite Rplus_assoc; repeat apply Rplus_eq_compat_l. replace (pred (S n)) with n; [ idtac | reflexivity ]. unfold SP. rewrite scal_sum. apply sum_eq; intros. unfold fn. replace (h ^ S i) with (h * h ^ i). unfold Rdiv; ring. simpl; ring. simpl; rewrite Rinv_1; rewrite Rmult_1_r; reflexivity. apply lt_O_Sn. unfold Rdiv. rewrite <- Rmult_assoc. symmetry ; apply Rinv_r_simpl_m. assumption. apply Rmult_lt_0_compat. apply H8. apply Rabs_pos_lt; assumption. apply SFL_continuity; assumption. intro; unfold fn. replace (fun x:R => x ^ n / INR (fact (S n))) with (pow_fct n / fct_cte (INR (fact (S n))))%F; [ idtac | reflexivity ]. apply continuity_div. apply derivable_continuous; apply (derivable_pow n). apply derivable_continuous; apply derivable_const. intro; unfold fct_cte; apply INR_fact_neq_0. apply (CVN_R_CVS _ X). assert (H0 := Alembert_exp). unfold CVN_R. intro; unfold CVN_r. exists (fun N:nat => r ^ N / INR (fact (S N))). cut { l:R | Un_cv (fun n:nat => sum_f_R0 (fun k:nat => Rabs (r ^ k / INR (fact (S k)))) n) l }. intro X. elim X; intros. exists x; intros. split. apply p. unfold Boule; intros. rewrite Rminus_0_r in H1. unfold fn. unfold Rdiv; rewrite Rabs_mult. cut (0 < INR (fact (S n))). intro. rewrite (Rabs_right (/ INR (fact (S n)))). do 2 rewrite <- (Rmult_comm (/ INR (fact (S n)))). apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply H2. rewrite <- RPow_abs. apply pow_maj_Rabs. rewrite Rabs_Rabsolu; left; apply H1. apply Rle_ge; left; apply Rinv_0_lt_compat; apply H2. apply INR_fact_lt_0. cut ((r:R) <> 0). intro; apply Alembert_C2. intro; apply Rabs_no_R0. unfold Rdiv; apply prod_neq_R0. apply pow_nonzero; assumption. apply Rinv_neq_0_compat; apply INR_fact_neq_0. unfold Un_cv in H0. unfold Un_cv; intros. cut (0 < eps0 / r); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply (cond_pos r) ] ]. elim (H0 _ H3); intros N0 H4. exists N0; intros. cut (S n >= N0)%nat. intro hyp_sn. assert (H6 := H4 _ hyp_sn). unfold R_dist in H6; rewrite Rminus_0_r in H6. rewrite Rabs_Rabsolu in H6. unfold R_dist; rewrite Rminus_0_r. rewrite Rabs_Rabsolu. replace (Rabs (r ^ S n / INR (fact (S (S n)))) / Rabs (r ^ n / INR (fact (S n)))) with (r * / INR (fact (S (S n))) * / / INR (fact (S n))). rewrite Rmult_assoc; rewrite Rabs_mult. rewrite (Rabs_right r). apply Rmult_lt_reg_l with (/ r). apply Rinv_0_lt_compat; apply (cond_pos r). rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps0). apply H6. assumption. apply Rle_ge; left; apply (cond_pos r). unfold Rdiv. repeat rewrite Rabs_mult. repeat rewrite Rabs_Rinv. rewrite Rinv_mult_distr. repeat rewrite Rabs_right. rewrite Rinv_involutive. rewrite (Rmult_comm r). rewrite (Rmult_comm (r ^ S n)). repeat rewrite Rmult_assoc. apply Rmult_eq_compat_l. rewrite (Rmult_comm r). rewrite <- Rmult_assoc; rewrite <- (Rmult_comm (INR (fact (S n)))). apply Rmult_eq_compat_l. simpl. rewrite Rmult_assoc; rewrite <- Rinv_r_sym. ring. apply pow_nonzero; assumption. apply INR_fact_neq_0. apply Rle_ge; left; apply INR_fact_lt_0. apply Rle_ge; left; apply pow_lt; apply (cond_pos r). apply Rle_ge; left; apply INR_fact_lt_0. apply Rle_ge; left; apply pow_lt; apply (cond_pos r). apply Rabs_no_R0; apply pow_nonzero; assumption. apply Rinv_neq_0_compat; apply Rabs_no_R0; apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. unfold ge; apply le_trans with n. apply H5. apply le_n_Sn. assert (H1 := cond_pos r); red; intro; rewrite H2 in H1; elim (Rlt_irrefl _ H1). Qed. (**********) Lemma derivable_pt_lim_exp : forall x:R, derivable_pt_lim exp x (exp x). Proof. intro; assert (H0 := derivable_pt_lim_exp_0). unfold derivable_pt_lim in H0; unfold derivable_pt_lim; intros. cut (0 < eps / exp x); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ apply H | apply Rinv_0_lt_compat; apply exp_pos ] ]. elim (H0 _ H1); intros del H2. exists del; intros. assert (H5 := H2 _ H3 H4). rewrite Rplus_0_l in H5; rewrite exp_0 in H5. replace ((exp (x + h) - exp x) / h - exp x) with (exp x * ((exp h - 1) / h - 1)). rewrite Rabs_mult; rewrite (Rabs_right (exp x)). apply Rmult_lt_reg_l with (/ exp x). apply Rinv_0_lt_compat; apply exp_pos. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps). apply H5. assert (H6 := exp_pos x); red; intro; rewrite H7 in H6; elim (Rlt_irrefl _ H6). apply Rle_ge; left; apply exp_pos. rewrite Rmult_minus_distr_l. rewrite Rmult_1_r; unfold Rdiv; rewrite <- Rmult_assoc; rewrite Rmult_minus_distr_l. rewrite Rmult_1_r; rewrite exp_plus; reflexivity. Qed. coq-8.4pl2/theories/Reals/Reals.v0000640000175000001440000000257012010532755015760 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0`` - Sup: for goals like ``?1 a <= 4 -> sin_approx a (2 * n + 1) <= sin a <= sin_approx a (2 * (n + 1)). Proof. intros; case (Req_dec a 0); intro Hyp_a. rewrite Hyp_a; rewrite sin_0; split; right; unfold sin_approx; apply sum_eq_R0 || (symmetry ; apply sum_eq_R0); intros; unfold sin_term; rewrite pow_add; simpl; unfold Rdiv; rewrite Rmult_0_l; ring. unfold sin_approx; cut (0 < a). intro Hyp_a_pos. rewrite (decomp_sum (sin_term a) (2 * n + 1)). rewrite (decomp_sum (sin_term a) (2 * (n + 1))). replace (sin_term a 0) with a. cut (sum_f_R0 (fun i:nat => sin_term a (S i)) (pred (2 * n + 1)) <= sin a - a /\ sin a - a <= sum_f_R0 (fun i:nat => sin_term a (S i)) (pred (2 * (n + 1))) -> a + sum_f_R0 (fun i:nat => sin_term a (S i)) (pred (2 * n + 1)) <= sin a /\ sin a <= a + sum_f_R0 (fun i:nat => sin_term a (S i)) (pred (2 * (n + 1)))). intro; apply H1. set (Un := fun n:nat => a ^ (2 * S n + 1) / INR (fact (2 * S n + 1))). replace (pred (2 * n + 1)) with (2 * n)%nat. replace (pred (2 * (n + 1))) with (S (2 * n)). replace (sum_f_R0 (fun i:nat => sin_term a (S i)) (2 * n)) with (- sum_f_R0 (tg_alt Un) (2 * n)). replace (sum_f_R0 (fun i:nat => sin_term a (S i)) (S (2 * n))) with (- sum_f_R0 (tg_alt Un) (S (2 * n))). cut (sum_f_R0 (tg_alt Un) (S (2 * n)) <= a - sin a <= sum_f_R0 (tg_alt Un) (2 * n) -> - sum_f_R0 (tg_alt Un) (2 * n) <= sin a - a <= - sum_f_R0 (tg_alt Un) (S (2 * n))). intro; apply H2. apply alternated_series_ineq. unfold Un_decreasing, Un; intro; cut ((2 * S (S n0) + 1)%nat = S (S (2 * S n0 + 1))). intro; rewrite H3. replace (a ^ S (S (2 * S n0 + 1))) with (a ^ (2 * S n0 + 1) * (a * a)). unfold Rdiv; rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply pow_lt; assumption. apply Rmult_le_reg_l with (INR (fact (S (S (2 * S n0 + 1))))). rewrite <- H3; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H5 := eq_sym H4); elim (fact_neq_0 _ H5). rewrite <- H3; rewrite (Rmult_comm (INR (fact (2 * S (S n0) + 1)))); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite H3; do 2 rewrite fact_simpl; do 2 rewrite mult_INR; repeat rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r. do 2 rewrite S_INR; rewrite plus_INR; rewrite mult_INR; repeat rewrite S_INR; simpl; replace (((0 + 1 + 1) * (INR n0 + 1) + (0 + 1) + 1 + 1) * ((0 + 1 + 1) * (INR n0 + 1) + (0 + 1) + 1)) with (4 * INR n0 * INR n0 + 18 * INR n0 + 20); [ idtac | ring ]. apply Rle_trans with 20. apply Rle_trans with 16. replace 16 with (Rsqr 4); [ idtac | ring_Rsqr ]. replace (a * a) with (Rsqr a); [ idtac | reflexivity ]. apply Rsqr_incr_1. assumption. assumption. left; prove_sup0. rewrite <- (Rplus_0_r 16); replace 20 with (16 + 4); [ apply Rplus_le_compat_l; left; prove_sup0 | ring ]. rewrite <- (Rplus_comm 20); pattern 20 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l. apply Rplus_le_le_0_compat. repeat apply Rmult_le_pos. left; prove_sup0. left; prove_sup0. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. apply Rmult_le_pos. left; prove_sup0. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. apply INR_fact_neq_0. apply INR_fact_neq_0. simpl; ring. ring. assert (H3 := cv_speed_pow_fact a); unfold Un; unfold Un_cv in H3; unfold R_dist in H3; unfold Un_cv; unfold R_dist; intros; elim (H3 eps H4); intros N H5. exists N; intros; apply H5. replace (2 * S n0 + 1)%nat with (S (2 * S n0)). unfold ge; apply le_trans with (2 * S n0)%nat. apply le_trans with (2 * S N)%nat. apply le_trans with (2 * N)%nat. apply le_n_2n. apply (fun m n p:nat => mult_le_compat_l p n m); apply le_n_Sn. apply (fun m n p:nat => mult_le_compat_l p n m); apply le_n_S; assumption. apply le_n_Sn. ring. assert (X := exist_sin (Rsqr a)); elim X; intros. cut (x = sin a / a). intro; rewrite H3 in p; unfold sin_in in p; unfold infinite_sum in p; unfold R_dist in p; unfold Un_cv; unfold R_dist; intros. cut (0 < eps / Rabs a). intro; elim (p _ H5); intros N H6. exists N; intros. replace (sum_f_R0 (tg_alt Un) n0) with (a * (1 - sum_f_R0 (fun i:nat => sin_n i * Rsqr a ^ i) (S n0))). unfold Rminus; rewrite Rmult_plus_distr_l; rewrite Rmult_1_r; rewrite Ropp_plus_distr; rewrite Ropp_involutive; repeat rewrite Rplus_assoc; rewrite (Rplus_comm a); rewrite (Rplus_comm (- a)); repeat rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply Rmult_lt_reg_l with (/ Rabs a). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. pattern (/ Rabs a) at 1; rewrite <- (Rabs_Rinv a Hyp_a). rewrite <- Rabs_mult; rewrite Rmult_plus_distr_l; rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym; [ rewrite Rmult_1_l | assumption ]; rewrite (Rmult_comm (/ a)); rewrite (Rmult_comm (/ Rabs a)); rewrite <- Rabs_Ropp; rewrite Ropp_plus_distr; rewrite Ropp_involutive; unfold Rminus, Rdiv in H6; apply H6; unfold ge; apply le_trans with n0; [ exact H7 | apply le_n_Sn ]. rewrite (decomp_sum (fun i:nat => sin_n i * Rsqr a ^ i) (S n0)). replace (sin_n 0) with 1. simpl; rewrite Rmult_1_r; unfold Rminus; rewrite Ropp_plus_distr; rewrite <- Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_l; rewrite Ropp_mult_distr_r_reverse; rewrite <- Ropp_mult_distr_l_reverse; rewrite scal_sum; apply sum_eq. intros; unfold sin_n, Un, tg_alt; replace ((-1) ^ S i) with (- (-1) ^ i). replace (a ^ (2 * S i + 1)) with (Rsqr a * Rsqr a ^ i * a). unfold Rdiv; ring. rewrite pow_add; rewrite pow_Rsqr; simpl; ring. simpl; ring. unfold sin_n; unfold Rdiv; simpl; rewrite Rinv_1; rewrite Rmult_1_r; reflexivity. apply lt_O_Sn. unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. unfold sin; case (exist_sin (Rsqr a)). intros; cut (x = x0). intro; rewrite H3; unfold Rdiv. symmetry ; apply Rinv_r_simpl_m; assumption. unfold sin_in in p; unfold sin_in in s; eapply uniqueness_sum. apply p. apply s. intros; elim H2; intros. replace (sin a - a) with (- (a - sin a)); [ idtac | ring ]. split; apply Ropp_le_contravar; assumption. replace (- sum_f_R0 (tg_alt Un) (S (2 * n))) with (-1 * sum_f_R0 (tg_alt Un) (S (2 * n))); [ rewrite scal_sum | ring ]. apply sum_eq; intros; unfold sin_term, Un, tg_alt; replace ((-1) ^ S i) with (-1 * (-1) ^ i). unfold Rdiv; ring. reflexivity. replace (- sum_f_R0 (tg_alt Un) (2 * n)) with (-1 * sum_f_R0 (tg_alt Un) (2 * n)); [ rewrite scal_sum | ring ]. apply sum_eq; intros. unfold sin_term, Un, tg_alt; replace ((-1) ^ S i) with (-1 * (-1) ^ i). unfold Rdiv; ring. reflexivity. replace (2 * (n + 1))%nat with (S (S (2 * n))). reflexivity. ring. replace (2 * n + 1)%nat with (S (2 * n)). reflexivity. ring. intro; elim H1; intros. split. apply Rplus_le_reg_l with (- a). rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite (Rplus_comm (- a)); apply H2. apply Rplus_le_reg_l with (- a). rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite (Rplus_comm (- a)); apply H3. unfold sin_term; simpl; unfold Rdiv; rewrite Rinv_1; ring. replace (2 * (n + 1))%nat with (S (S (2 * n))). apply lt_O_Sn. ring. replace (2 * n + 1)%nat with (S (2 * n)). apply lt_O_Sn. ring. inversion H; [ assumption | elim Hyp_a; symmetry ; assumption ]. Qed. (**********) Lemma pre_cos_bound : forall (a:R) (n:nat), - 2 <= a -> a <= 2 -> cos_approx a (2 * n + 1) <= cos a <= cos_approx a (2 * (n + 1)). Proof. cut ((forall (a:R) (n:nat), 0 <= a -> a <= 2 -> cos_approx a (2 * n + 1) <= cos a <= cos_approx a (2 * (n + 1))) -> forall (a:R) (n:nat), - 2 <= a -> a <= 2 -> cos_approx a (2 * n + 1) <= cos a <= cos_approx a (2 * (n + 1))). intros H a n; apply H. intros; unfold cos_approx. rewrite (decomp_sum (cos_term a0) (2 * n0 + 1)). rewrite (decomp_sum (cos_term a0) (2 * (n0 + 1))). replace (cos_term a0 0) with 1. cut (sum_f_R0 (fun i:nat => cos_term a0 (S i)) (pred (2 * n0 + 1)) <= cos a0 - 1 /\ cos a0 - 1 <= sum_f_R0 (fun i:nat => cos_term a0 (S i)) (pred (2 * (n0 + 1))) -> 1 + sum_f_R0 (fun i:nat => cos_term a0 (S i)) (pred (2 * n0 + 1)) <= cos a0 /\ cos a0 <= 1 + sum_f_R0 (fun i:nat => cos_term a0 (S i)) (pred (2 * (n0 + 1)))). intro; apply H2. set (Un := fun n:nat => a0 ^ (2 * S n) / INR (fact (2 * S n))). replace (pred (2 * n0 + 1)) with (2 * n0)%nat. replace (pred (2 * (n0 + 1))) with (S (2 * n0)). replace (sum_f_R0 (fun i:nat => cos_term a0 (S i)) (2 * n0)) with (- sum_f_R0 (tg_alt Un) (2 * n0)). replace (sum_f_R0 (fun i:nat => cos_term a0 (S i)) (S (2 * n0))) with (- sum_f_R0 (tg_alt Un) (S (2 * n0))). cut (sum_f_R0 (tg_alt Un) (S (2 * n0)) <= 1 - cos a0 <= sum_f_R0 (tg_alt Un) (2 * n0) -> - sum_f_R0 (tg_alt Un) (2 * n0) <= cos a0 - 1 <= - sum_f_R0 (tg_alt Un) (S (2 * n0))). intro; apply H3. apply alternated_series_ineq. unfold Un_decreasing; intro; unfold Un. cut ((2 * S (S n1))%nat = S (S (2 * S n1))). intro; rewrite H4; replace (a0 ^ S (S (2 * S n1))) with (a0 ^ (2 * S n1) * (a0 * a0)). unfold Rdiv; rewrite Rmult_assoc; apply Rmult_le_compat_l. apply pow_le; assumption. apply Rmult_le_reg_l with (INR (fact (S (S (2 * S n1))))). rewrite <- H4; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H6 := eq_sym H5); elim (fact_neq_0 _ H6). rewrite <- H4; rewrite (Rmult_comm (INR (fact (2 * S (S n1))))); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite H4; do 2 rewrite fact_simpl; do 2 rewrite mult_INR; repeat rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; do 2 rewrite S_INR; rewrite mult_INR; repeat rewrite S_INR; simpl; replace (((0 + 1 + 1) * (INR n1 + 1) + 1 + 1) * ((0 + 1 + 1) * (INR n1 + 1) + 1)) with (4 * INR n1 * INR n1 + 14 * INR n1 + 12); [ idtac | ring ]. apply Rle_trans with 12. apply Rle_trans with 4. replace 4 with (Rsqr 2); [ idtac | ring_Rsqr ]. replace (a0 * a0) with (Rsqr a0); [ idtac | reflexivity ]. apply Rsqr_incr_1. assumption. discrR. assumption. left; prove_sup0. pattern 4 at 1; rewrite <- Rplus_0_r; replace 12 with (4 + 8); [ apply Rplus_le_compat_l; left; prove_sup0 | ring ]. rewrite <- (Rplus_comm 12); pattern 12 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l. apply Rplus_le_le_0_compat. repeat apply Rmult_le_pos. left; prove_sup0. left; prove_sup0. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. apply Rmult_le_pos. left; prove_sup0. replace 0 with (INR 0); [ apply le_INR; apply le_O_n | reflexivity ]. apply INR_fact_neq_0. apply INR_fact_neq_0. simpl; ring. ring. assert (H4 := cv_speed_pow_fact a0); unfold Un; unfold Un_cv in H4; unfold R_dist in H4; unfold Un_cv; unfold R_dist; intros; elim (H4 eps H5); intros N H6; exists N; intros. apply H6; unfold ge; apply le_trans with (2 * S N)%nat. apply le_trans with (2 * N)%nat. apply le_n_2n. apply (fun m n p:nat => mult_le_compat_l p n m); apply le_n_Sn. apply (fun m n p:nat => mult_le_compat_l p n m); apply le_n_S; assumption. assert (X := exist_cos (Rsqr a0)); elim X; intros. cut (x = cos a0). intro; rewrite H4 in p; unfold cos_in in p; unfold infinite_sum in p; unfold R_dist in p; unfold Un_cv; unfold R_dist; intros. elim (p _ H5); intros N H6. exists N; intros. replace (sum_f_R0 (tg_alt Un) n1) with (1 - sum_f_R0 (fun i:nat => cos_n i * Rsqr a0 ^ i) (S n1)). unfold Rminus; rewrite Ropp_plus_distr; rewrite Ropp_involutive; repeat rewrite Rplus_assoc; rewrite (Rplus_comm 1); rewrite (Rplus_comm (-1)); repeat rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; rewrite <- Rabs_Ropp; rewrite Ropp_plus_distr; rewrite Ropp_involutive; unfold Rminus in H6; apply H6. unfold ge; apply le_trans with n1. exact H7. apply le_n_Sn. rewrite (decomp_sum (fun i:nat => cos_n i * Rsqr a0 ^ i) (S n1)). replace (cos_n 0) with 1. simpl; rewrite Rmult_1_r; unfold Rminus; rewrite Ropp_plus_distr; rewrite <- Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_l; replace (- sum_f_R0 (fun i:nat => cos_n (S i) * (Rsqr a0 * Rsqr a0 ^ i)) n1) with (-1 * sum_f_R0 (fun i:nat => cos_n (S i) * (Rsqr a0 * Rsqr a0 ^ i)) n1); [ idtac | ring ]; rewrite scal_sum; apply sum_eq; intros; unfold cos_n, Un, tg_alt. replace ((-1) ^ S i) with (- (-1) ^ i). replace (a0 ^ (2 * S i)) with (Rsqr a0 * Rsqr a0 ^ i). unfold Rdiv; ring. rewrite pow_Rsqr; reflexivity. simpl; ring. unfold cos_n; unfold Rdiv; simpl; rewrite Rinv_1; rewrite Rmult_1_r; reflexivity. apply lt_O_Sn. unfold cos; case (exist_cos (Rsqr a0)); intros; unfold cos_in in p; unfold cos_in in c; eapply uniqueness_sum. apply p. apply c. intros; elim H3; intros; replace (cos a0 - 1) with (- (1 - cos a0)); [ idtac | ring ]. split; apply Ropp_le_contravar; assumption. replace (- sum_f_R0 (tg_alt Un) (S (2 * n0))) with (-1 * sum_f_R0 (tg_alt Un) (S (2 * n0))); [ rewrite scal_sum | ring ]. apply sum_eq; intros; unfold cos_term, Un, tg_alt; replace ((-1) ^ S i) with (-1 * (-1) ^ i). unfold Rdiv; ring. reflexivity. replace (- sum_f_R0 (tg_alt Un) (2 * n0)) with (-1 * sum_f_R0 (tg_alt Un) (2 * n0)); [ rewrite scal_sum | ring ]; apply sum_eq; intros; unfold cos_term, Un, tg_alt; replace ((-1) ^ S i) with (-1 * (-1) ^ i). unfold Rdiv; ring. reflexivity. replace (2 * (n0 + 1))%nat with (S (S (2 * n0))). reflexivity. ring. replace (2 * n0 + 1)%nat with (S (2 * n0)). reflexivity. ring. intro; elim H2; intros; split. apply Rplus_le_reg_l with (-1). rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite (Rplus_comm (-1)); apply H3. apply Rplus_le_reg_l with (-1). rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite (Rplus_comm (-1)); apply H4. unfold cos_term; simpl; unfold Rdiv; rewrite Rinv_1; ring. replace (2 * (n0 + 1))%nat with (S (S (2 * n0))). apply lt_O_Sn. ring. replace (2 * n0 + 1)%nat with (S (2 * n0)). apply lt_O_Sn. ring. intros; case (total_order_T 0 a); intro. elim s; intro. apply H; [ left; assumption | assumption ]. apply H; [ right; assumption | assumption ]. cut (0 < - a). intro; cut (forall (x:R) (n:nat), cos_approx x n = cos_approx (- x) n). intro; rewrite H3; rewrite (H3 a (2 * (n + 1))%nat); rewrite cos_sym; apply H. left; assumption. rewrite <- (Ropp_involutive 2); apply Ropp_le_contravar; exact H0. intros; unfold cos_approx; apply sum_eq; intros; unfold cos_term; do 2 rewrite pow_Rsqr; rewrite Rsqr_neg; unfold Rdiv; reflexivity. apply Ropp_0_gt_lt_contravar; assumption. Qed. coq-8.4pl2/theories/Reals/SplitRmult.v0000640000175000001440000000141012010532755017021 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0`` /\ ``r2<>0`` -> ``r1*r2<>0``. i*) Require Import Rbase. Ltac split_Rmult := match goal with | |- ((?X1 * ?X2)%R <> 0%R) => apply Rmult_integral_contrapositive; split; try split_Rmult end. coq-8.4pl2/theories/Reals/Cos_rel.v0000640000175000001440000002612712010532755016304 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (-1) ^ k / INR (fact (2 * k)) * x ^ (2 * k)) N. Definition B1 (x:R) (N:nat) : R := sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * x ^ (2 * k + 1)) N. Definition C1 (x y:R) (N:nat) : R := sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * (x + y) ^ (2 * k)) N. Definition Reste1 (x y:R) (N:nat) : R := sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k))) * x ^ (2 * S (l + k)) * ((-1) ^ (N - l) / INR (fact (2 * (N - l)))) * y ^ (2 * (N - l))) (pred (N - k))) (pred N). Definition Reste2 (x y:R) (N:nat) : R := sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k) + 1)) * x ^ (2 * S (l + k) + 1) * ((-1) ^ (N - l) / INR (fact (2 * (N - l) + 1))) * y ^ (2 * (N - l) + 1)) (pred (N - k))) ( pred N). Definition Reste (x y:R) (N:nat) : R := Reste2 x y N - Reste1 x y (S N). (* Here is the main result that will be used to prove that (cos (x+y))=(cos x)(cos y)-(sin x)(sin y) *) Theorem cos_plus_form : forall (x y:R) (n:nat), (0 < n)%nat -> A1 x (S n) * A1 y (S n) - B1 x n * B1 y n + Reste x y n = C1 x y (S n). intros. unfold A1, B1. rewrite (cauchy_finite (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * x ^ (2 * k)) (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * y ^ (2 * k)) ( S n)). rewrite (cauchy_finite (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * x ^ (2 * k + 1)) (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * y ^ (2 * k + 1)) n H) . unfold Reste. replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k))) * x ^ (2 * S (l + k)) * ((-1) ^ (S n - l) / INR (fact (2 * (S n - l))) * y ^ (2 * (S n - l)))) (pred (S n - k))) ( pred (S n))) with (Reste1 x y (S n)). replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun l:nat => (-1) ^ S (l + k) / INR (fact (2 * S (l + k) + 1)) * x ^ (2 * S (l + k) + 1) * ((-1) ^ (n - l) / INR (fact (2 * (n - l) + 1)) * y ^ (2 * (n - l) + 1))) (pred (n - k))) ( pred n)) with (Reste2 x y n). replace (sum_f_R0 (fun k:nat => sum_f_R0 (fun p:nat => (-1) ^ p / INR (fact (2 * p)) * x ^ (2 * p) * ((-1) ^ (k - p) / INR (fact (2 * (k - p))) * y ^ (2 * (k - p)))) k) (S n)) with (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * sum_f_R0 (fun l:nat => C (2 * k) (2 * l) * x ^ (2 * l) * y ^ (2 * (k - l))) k) (S n)). pose (sin_nnn := fun n:nat => match n with | O => 0 | S p => (-1) ^ S p / INR (fact (2 * S p)) * sum_f_R0 (fun l:nat => C (2 * S p) (S (2 * l)) * x ^ S (2 * l) * y ^ S (2 * (p - l))) p end). ring_simplify. unfold Rminus. replace (* (- old ring compat *) (- sum_f_R0 (fun k:nat => sum_f_R0 (fun p:nat => (-1) ^ p / INR (fact (2 * p + 1)) * x ^ (2 * p + 1) * ((-1) ^ (k - p) / INR (fact (2 * (k - p) + 1)) * y ^ (2 * (k - p) + 1))) k) n) with (sum_f_R0 sin_nnn (S n)). rewrite <- sum_plus. unfold C1. apply sum_eq; intros. induction i as [| i Hreci]. simpl. unfold C; simpl. field; discrR. unfold sin_nnn. rewrite <- Rmult_plus_distr_l. apply Rmult_eq_compat_l. rewrite binomial. pose (Wn := fun i0:nat => C (2 * S i) i0 * x ^ i0 * y ^ (2 * S i - i0)). replace (sum_f_R0 (fun l:nat => C (2 * S i) (2 * l) * x ^ (2 * l) * y ^ (2 * (S i - l))) (S i)) with (sum_f_R0 (fun l:nat => Wn (2 * l)%nat) (S i)). replace (sum_f_R0 (fun l:nat => C (2 * S i) (S (2 * l)) * x ^ S (2 * l) * y ^ S (2 * (i - l))) i) with (sum_f_R0 (fun l:nat => Wn (S (2 * l))) i). apply sum_decomposition. apply sum_eq; intros. unfold Wn. apply Rmult_eq_compat_l. replace (2 * S i - S (2 * i0))%nat with (S (2 * (i - i0))). reflexivity. omega. apply sum_eq; intros. unfold Wn. apply Rmult_eq_compat_l. replace (2 * S i - 2 * i0)%nat with (2 * (S i - i0))%nat. reflexivity. omega. replace (- sum_f_R0 (fun k:nat => sum_f_R0 (fun p:nat => (-1) ^ p / INR (fact (2 * p + 1)) * x ^ (2 * p + 1) * ((-1) ^ (k - p) / INR (fact (2 * (k - p) + 1)) * y ^ (2 * (k - p) + 1))) k) n) with (-1 * sum_f_R0 (fun k:nat => sum_f_R0 (fun p:nat => (-1) ^ p / INR (fact (2 * p + 1)) * x ^ (2 * p + 1) * ((-1) ^ (k - p) / INR (fact (2 * (k - p) + 1)) * y ^ (2 * (k - p) + 1))) k) n);[idtac|ring]. rewrite scal_sum. rewrite decomp_sum. replace (sin_nnn 0%nat) with 0. rewrite Rplus_0_l. change (pred (S n)) with n. (* replace (pred (S n)) with n; [ idtac | reflexivity ]. *) apply sum_eq; intros. rewrite Rmult_comm. unfold sin_nnn. rewrite scal_sum. rewrite scal_sum. apply sum_eq; intros. unfold Rdiv. (*repeat rewrite Rmult_assoc.*) (* rewrite (Rmult_comm (/ INR (fact (2 * S i)))). *) repeat rewrite <- Rmult_assoc. rewrite <- (Rmult_comm (/ INR (fact (2 * S i)))). repeat rewrite <- Rmult_assoc. replace (/ INR (fact (2 * S i)) * C (2 * S i) (S (2 * i0))) with (/ INR (fact (2 * i0 + 1)) * / INR (fact (2 * (i - i0) + 1))). replace (S (2 * i0)) with (2 * i0 + 1)%nat; [ idtac | ring ]. replace (S (2 * (i - i0))) with (2 * (i - i0) + 1)%nat; [ idtac | ring ]. replace ((-1) ^ S i) with (-1 * (-1) ^ i0 * (-1) ^ (i - i0)). ring. simpl. pattern i at 2; replace i with (i0 + (i - i0))%nat. rewrite pow_add. ring. symmetry ; apply le_plus_minus; assumption. unfold C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite Rinv_mult_distr. replace (S (2 * i0)) with (2 * i0 + 1)%nat; [ apply Rmult_eq_compat_l | ring ]. replace (2 * S i - (2 * i0 + 1))%nat with (2 * (i - i0) + 1)%nat. reflexivity. omega. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. reflexivity. apply lt_O_Sn. (* ring. *) apply sum_eq; intros. rewrite scal_sum. apply sum_eq; intros. unfold Rdiv. repeat rewrite <- Rmult_assoc. rewrite <- (Rmult_comm (/ INR (fact (2 * i)))). repeat rewrite <- Rmult_assoc. replace (/ INR (fact (2 * i)) * C (2 * i) (2 * i0)) with (/ INR (fact (2 * i0)) * / INR (fact (2 * (i - i0)))). replace ((-1) ^ i) with ((-1) ^ i0 * (-1) ^ (i - i0)). ring. pattern i at 2; replace i with (i0 + (i - i0))%nat. rewrite pow_add. ring. symmetry ; apply le_plus_minus; assumption. unfold C. unfold Rdiv; repeat rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite Rinv_mult_distr. replace (2 * i - 2 * i0)%nat with (2 * (i - i0))%nat. reflexivity. omega. apply INR_fact_neq_0. apply INR_fact_neq_0. apply INR_fact_neq_0. unfold Reste2; apply sum_eq; intros. apply sum_eq; intros. unfold Rdiv; ring. unfold Reste1; apply sum_eq; intros. apply sum_eq; intros. unfold Rdiv; ring. apply lt_O_Sn. Qed. Lemma pow_sqr : forall (x:R) (i:nat), x ^ (2 * i) = (x * x) ^ i. intros. assert (H := pow_Rsqr x i). unfold Rsqr in H; exact H. Qed. Lemma A1_cvg : forall x:R, Un_cv (A1 x) (cos x). intro. assert (H := exist_cos (x * x)). elim H; intros. assert (p_i := p). unfold cos_in in p. unfold cos_n, infinite_sum in p. unfold R_dist in p. cut (cos x = x0). intro. rewrite H0. unfold Un_cv; unfold R_dist; intros. elim (p eps H1); intros. exists x1; intros. unfold A1. replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * x ^ (2 * k)) n) with (sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i)) * (x * x) ^ i) n). apply H2; assumption. apply sum_eq. intros. replace ((x * x) ^ i) with (x ^ (2 * i)). reflexivity. apply pow_sqr. unfold cos. case (exist_cos (Rsqr x)). unfold Rsqr; intros. unfold cos_in in p_i. unfold cos_in in c. apply uniqueness_sum with (fun i:nat => cos_n i * (x * x) ^ i); assumption. Qed. Lemma C1_cvg : forall x y:R, Un_cv (C1 x y) (cos (x + y)). intros. assert (H := exist_cos ((x + y) * (x + y))). elim H; intros. assert (p_i := p). unfold cos_in in p. unfold cos_n, infinite_sum in p. unfold R_dist in p. cut (cos (x + y) = x0). intro. rewrite H0. unfold Un_cv; unfold R_dist; intros. elim (p eps H1); intros. exists x1; intros. unfold C1. replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k)) * (x + y) ^ (2 * k)) n) with (sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i)) * ((x + y) * (x + y)) ^ i) n). apply H2; assumption. apply sum_eq. intros. replace (((x + y) * (x + y)) ^ i) with ((x + y) ^ (2 * i)). reflexivity. apply pow_sqr. unfold cos. case (exist_cos (Rsqr (x + y))). unfold Rsqr; intros. unfold cos_in in p_i. unfold cos_in in c. apply uniqueness_sum with (fun i:nat => cos_n i * ((x + y) * (x + y)) ^ i); assumption. Qed. Lemma B1_cvg : forall x:R, Un_cv (B1 x) (sin x). intro. case (Req_dec x 0); intro. rewrite H. rewrite sin_0. unfold B1. unfold Un_cv; unfold R_dist; intros; exists 0%nat; intros. replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * 0 ^ (2 * k + 1)) n) with 0. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. induction n as [| n Hrecn]. simpl; ring. rewrite tech5; rewrite <- Hrecn. simpl; ring. unfold ge; apply le_O_n. assert (H0 := exist_sin (x * x)). elim H0; intros. assert (p_i := p). unfold sin_in in p. unfold sin_n, infinite_sum in p. unfold R_dist in p. cut (sin x = x * x0). intro. rewrite H1. unfold Un_cv; unfold R_dist; intros. cut (0 < eps / Rabs x); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption ] ]. elim (p (eps / Rabs x) H3); intros. exists x1; intros. unfold B1. replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * x ^ (2 * k + 1)) n) with (x * sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i + 1)) * (x * x) ^ i) n). replace (x * sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i + 1)) * (x * x) ^ i) n - x * x0) with (x * (sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i + 1)) * (x * x) ^ i) n - x0)); [ idtac | ring ]. rewrite Rabs_mult. apply Rmult_lt_reg_l with (/ Rabs x). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps); unfold Rdiv in H4; apply H4; assumption. apply Rabs_no_R0; assumption. rewrite scal_sum. apply sum_eq. intros. rewrite pow_add. rewrite pow_sqr. simpl. ring. unfold sin. case (exist_sin (Rsqr x)). unfold Rsqr; intros. unfold sin_in in p_i. unfold sin_in in s. assert (H1 := uniqueness_sum (fun i:nat => sin_n i * (x * x) ^ i) x0 x1 p_i s). rewrite H1; reflexivity. Qed. coq-8.4pl2/theories/Reals/Sqrt_reg.v0000640000175000001440000003067612010532755016510 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rabs (sqrt (1 + h) - 1) <= Rabs h. Proof. intros; cut (0 <= 1 + h). intro; apply Rle_trans with (Rabs (sqrt (Rsqr (1 + h)) - 1)). case (total_order_T h 0); intro. elim s; intro. repeat rewrite Rabs_left. unfold Rminus; do 2 rewrite <- (Rplus_comm (-1)). do 2 rewrite Ropp_plus_distr; rewrite Ropp_involutive; apply Rplus_le_compat_l. apply Ropp_le_contravar; apply sqrt_le_1. apply Rle_0_sqr. apply H0. pattern (1 + h) at 2; rewrite <- Rmult_1_r; unfold Rsqr; apply Rmult_le_compat_l. apply H0. pattern 1 at 2; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; assumption. apply Rplus_lt_reg_r with 1; rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. pattern 1 at 2; rewrite <- sqrt_1; apply sqrt_lt_1. apply Rle_0_sqr. left; apply Rlt_0_1. pattern 1 at 2; rewrite <- Rsqr_1; apply Rsqr_incrst_1. pattern 1 at 2; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. apply H0. left; apply Rlt_0_1. apply Rplus_lt_reg_r with 1; rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. pattern 1 at 2; rewrite <- sqrt_1; apply sqrt_lt_1. apply H0. left; apply Rlt_0_1. pattern 1 at 2; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. rewrite b; rewrite Rplus_0_r; rewrite Rsqr_1; rewrite sqrt_1; right; reflexivity. repeat rewrite Rabs_right. unfold Rminus; do 2 rewrite <- (Rplus_comm (-1)); apply Rplus_le_compat_l. apply sqrt_le_1. apply H0. apply Rle_0_sqr. pattern (1 + h) at 1; rewrite <- Rmult_1_r; unfold Rsqr; apply Rmult_le_compat_l. apply H0. pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; assumption. apply Rle_ge; apply Rplus_le_reg_l with 1. rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. pattern 1 at 1; rewrite <- sqrt_1; apply sqrt_le_1. left; apply Rlt_0_1. apply Rle_0_sqr. pattern 1 at 1; rewrite <- Rsqr_1; apply Rsqr_incr_1. pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; assumption. left; apply Rlt_0_1. apply H0. apply Rle_ge; left; apply Rplus_lt_reg_r with 1. rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. pattern 1 at 1; rewrite <- sqrt_1; apply sqrt_lt_1. left; apply Rlt_0_1. apply H0. pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. rewrite sqrt_Rsqr. replace (1 + h - 1) with h; [ right; reflexivity | ring ]. apply H0. case (total_order_T h 0); intro. elim s; intro. rewrite (Rabs_left h a) in H. apply Rplus_le_reg_l with (- h). rewrite Rplus_0_r; rewrite Rplus_comm; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; exact H. left; rewrite b; rewrite Rplus_0_r; apply Rlt_0_1. left; apply Rplus_lt_0_compat. apply Rlt_0_1. apply r. Qed. (** sqrt is continuous in 1 *) Lemma sqrt_continuity_pt_R1 : continuity_pt sqrt 1. Proof. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. set (alpha := Rmin eps 1). exists alpha; intros. split. unfold alpha; unfold Rmin; case (Rle_dec eps 1); intro. assumption. apply Rlt_0_1. intros; elim H0; intros. rewrite sqrt_1; replace x with (1 + (x - 1)); [ idtac | ring ]; apply Rle_lt_trans with (Rabs (x - 1)). apply sqrt_var_maj. apply Rle_trans with alpha. left; apply H2. unfold alpha; apply Rmin_r. apply Rlt_le_trans with alpha; [ apply H2 | unfold alpha; apply Rmin_l ]. Qed. (** sqrt is continuous forall x>0 *) Lemma sqrt_continuity_pt : forall x:R, 0 < x -> continuity_pt sqrt x. Proof. intros; generalize sqrt_continuity_pt_R1. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; intros. cut (0 < eps / sqrt x). intro; elim (H0 _ H2); intros alp_1 H3. elim H3; intros. set (alpha := alp_1 * x). exists (Rmin alpha x); intros. split. change (0 < Rmin alpha x); unfold Rmin; case (Rle_dec alpha x); intro. unfold alpha; apply Rmult_lt_0_compat; assumption. apply H. intros; replace x0 with (x + (x0 - x)); [ idtac | ring ]; replace (sqrt (x + (x0 - x)) - sqrt x) with (sqrt x * (sqrt (1 + (x0 - x) / x) - sqrt 1)). rewrite Rabs_mult; rewrite (Rabs_right (sqrt x)). apply Rmult_lt_reg_l with (/ sqrt x). apply Rinv_0_lt_compat; apply sqrt_lt_R0; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite Rmult_comm. unfold Rdiv in H5. case (Req_dec x x0); intro. rewrite H7; unfold Rminus, Rdiv; rewrite Rplus_opp_r; rewrite Rmult_0_l; rewrite Rplus_0_r; rewrite Rplus_opp_r; rewrite Rabs_R0. apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; rewrite <- H7; apply sqrt_lt_R0; assumption. apply H5. split. unfold D_x, no_cond. split. trivial. red; intro. cut ((x0 - x) * / x = 0). intro. elim (Rmult_integral _ _ H9); intro. elim H7. apply (Rminus_diag_uniq_sym _ _ H10). assert (H11 := Rmult_eq_0_compat_r _ x H10). rewrite <- Rinv_l_sym in H11. elim R1_neq_R0; exact H11. red; intro; rewrite H12 in H; elim (Rlt_irrefl _ H). symmetry ; apply Rplus_eq_reg_l with 1; rewrite Rplus_0_r; unfold Rdiv in H8; exact H8. unfold Rminus; rewrite Rplus_comm; rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; elim H6; intros. unfold Rdiv; rewrite Rabs_mult. rewrite Rabs_Rinv. rewrite (Rabs_right x). rewrite Rmult_comm; apply Rmult_lt_reg_l with x. apply H. rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite Rmult_comm; fold alpha. apply Rlt_le_trans with (Rmin alpha x). apply H9. apply Rmin_l. red; intro; rewrite H10 in H; elim (Rlt_irrefl _ H). apply Rle_ge; left; apply H. red; intro; rewrite H10 in H; elim (Rlt_irrefl _ H). assert (H7 := sqrt_lt_R0 x H). red; intro; rewrite H8 in H7; elim (Rlt_irrefl _ H7). apply Rle_ge; apply sqrt_positivity. left; apply H. unfold Rminus; rewrite Rmult_plus_distr_l; rewrite Ropp_mult_distr_r_reverse; repeat rewrite <- sqrt_mult. rewrite Rmult_1_r; rewrite Rmult_plus_distr_l; rewrite Rmult_1_r; unfold Rdiv; rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; reflexivity. red; intro; rewrite H7 in H; elim (Rlt_irrefl _ H). left; apply H. left; apply Rlt_0_1. left; apply H. elim H6; intros. case (Rcase_abs (x0 - x)); intro. rewrite (Rabs_left (x0 - x) r) in H8. rewrite Rplus_comm. apply Rplus_le_reg_l with (- ((x0 - x) / x)). rewrite Rplus_0_r; rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; unfold Rdiv; rewrite <- Ropp_mult_distr_l_reverse. apply Rmult_le_reg_l with x. apply H. rewrite Rmult_1_r; rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; left; apply Rlt_le_trans with (Rmin alpha x). apply H8. apply Rmin_r. red; intro; rewrite H9 in H; elim (Rlt_irrefl _ H). apply Rplus_le_le_0_compat. left; apply Rlt_0_1. unfold Rdiv; apply Rmult_le_pos. apply Rge_le; exact r. left; apply Rinv_0_lt_compat; apply H. unfold Rdiv; apply Rmult_lt_0_compat. apply H1. apply Rinv_0_lt_compat; apply sqrt_lt_R0; apply H. Qed. (** sqrt is derivable for all x>0 *) Lemma derivable_pt_lim_sqrt : forall x:R, 0 < x -> derivable_pt_lim sqrt x (/ (2 * sqrt x)). Proof. intros; set (g := fun h:R => sqrt x + sqrt (x + h)). cut (continuity_pt g 0). intro; cut (g 0 <> 0). intro; assert (H2 := continuity_pt_inv g 0 H0 H1). unfold derivable_pt_lim; intros; unfold continuity_pt in H2; unfold continue_in in H2; unfold limit1_in in H2; unfold limit_in in H2; simpl in H2; unfold R_dist in H2. elim (H2 eps H3); intros alpha H4. elim H4; intros. set (alpha1 := Rmin alpha x). cut (0 < alpha1). intro; exists (mkposreal alpha1 H7); intros. replace ((sqrt (x + h) - sqrt x) / h) with (/ (sqrt x + sqrt (x + h))). unfold inv_fct, g in H6; replace (2 * sqrt x) with (sqrt x + sqrt (x + 0)). apply H6. split. unfold D_x, no_cond. split. trivial. apply (not_eq_sym (A:=R)); exact H8. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rlt_le_trans with alpha1. exact H9. unfold alpha1; apply Rmin_l. rewrite Rplus_0_r; ring. cut (0 <= x + h). intro; cut (0 < sqrt x + sqrt (x + h)). intro; apply Rmult_eq_reg_l with (sqrt x + sqrt (x + h)). rewrite <- Rinv_r_sym. rewrite Rplus_comm; unfold Rdiv; rewrite <- Rmult_assoc; rewrite Rsqr_plus_minus; repeat rewrite Rsqr_sqrt. rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; rewrite <- Rinv_r_sym. reflexivity. apply H8. left; apply H. assumption. red; intro; rewrite H12 in H11; elim (Rlt_irrefl _ H11). red; intro; rewrite H12 in H11; elim (Rlt_irrefl _ H11). apply Rplus_lt_le_0_compat. apply sqrt_lt_R0; apply H. apply sqrt_positivity; apply H10. case (Rcase_abs h); intro. rewrite (Rabs_left h r) in H9. apply Rplus_le_reg_l with (- h). rewrite Rplus_0_r; rewrite Rplus_comm; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; left; apply Rlt_le_trans with alpha1. apply H9. unfold alpha1; apply Rmin_r. apply Rplus_le_le_0_compat. left; assumption. apply Rge_le; apply r. unfold alpha1; unfold Rmin; case (Rle_dec alpha x); intro. apply H5. apply H. unfold g; rewrite Rplus_0_r. cut (0 < sqrt x + sqrt x). intro; red; intro; rewrite H2 in H1; elim (Rlt_irrefl _ H1). apply Rplus_lt_0_compat; apply sqrt_lt_R0; apply H. replace g with (fct_cte (sqrt x) + comp sqrt (fct_cte x + id))%F; [ idtac | reflexivity ]. apply continuity_pt_plus. apply continuity_pt_const; unfold constant, fct_cte; intro; reflexivity. apply continuity_pt_comp. apply continuity_pt_plus. apply continuity_pt_const; unfold constant, fct_cte; intro; reflexivity. apply derivable_continuous_pt; apply derivable_pt_id. apply sqrt_continuity_pt. unfold plus_fct, fct_cte, id; rewrite Rplus_0_r; apply H. Qed. (**********) Lemma derivable_pt_sqrt : forall x:R, 0 < x -> derivable_pt sqrt x. Proof. unfold derivable_pt; intros. exists (/ (2 * sqrt x)). apply derivable_pt_lim_sqrt; assumption. Qed. (**********) Lemma derive_pt_sqrt : forall (x:R) (pr:0 < x), derive_pt sqrt x (derivable_pt_sqrt _ pr) = / (2 * sqrt x). Proof. intros. apply derive_pt_eq_0. apply derivable_pt_lim_sqrt; assumption. Qed. (** We show that sqrt is continuous for all x>=0 *) (** Remark : by definition of sqrt (as extension of Rsqrt on |R), we could also show that sqrt is continuous for all x *) Lemma continuity_pt_sqrt : forall x:R, 0 <= x -> continuity_pt sqrt x. Proof. intros; case (Rtotal_order 0 x); intro. apply (sqrt_continuity_pt x H0). elim H0; intro. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros. exists (Rsqr eps); intros. split. change (0 < Rsqr eps); apply Rsqr_pos_lt. red; intro; rewrite H3 in H2; elim (Rlt_irrefl _ H2). intros; elim H3; intros. rewrite <- H1; rewrite sqrt_0; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite <- H1 in H5; unfold Rminus in H5; rewrite Ropp_0 in H5; rewrite Rplus_0_r in H5. case (Rcase_abs x0); intro. unfold sqrt; case (Rcase_abs x0); intro. rewrite Rabs_R0; apply H2. assert (H6 := Rge_le _ _ r0); elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H6 r)). rewrite Rabs_right. apply Rsqr_incrst_0. rewrite Rsqr_sqrt. rewrite (Rabs_right x0 r) in H5; apply H5. apply Rge_le; exact r. apply sqrt_positivity; apply Rge_le; exact r. left; exact H2. apply Rle_ge; apply sqrt_positivity; apply Rge_le; exact r. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H1 H)). Qed. coq-8.4pl2/theories/Reals/Ranalysis2.v0000640000175000001440000004014512010532755016741 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R), h <> 0 -> f2 x <> 0 -> f2 (x + h) <> 0 -> (f1 (x + h) / f2 (x + h) - f1 x / f2 x) / h - (l1 * f2 x - l2 * f1 x) / Rsqr (f2 x) = / f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1) + l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h)) - f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2) + l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x). Proof. intros; unfold Rdiv, Rminus, Rsqr. repeat rewrite Rmult_plus_distr_r; repeat rewrite Rmult_plus_distr_l; repeat rewrite Rinv_mult_distr; try assumption. replace (l1 * f2 x * (/ f2 x * / f2 x)) with (l1 * / f2 x * (f2 x * / f2 x)); [ idtac | ring ]. replace (l1 * (/ f2 x * / f2 (x + h)) * f2 x) with (l1 * / f2 (x + h) * (f2 x * / f2 x)); [ idtac | ring ]. replace (l1 * (/ f2 x * / f2 (x + h)) * - f2 (x + h)) with (- (l1 * / f2 x * (f2 (x + h) * / f2 (x + h)))); [ idtac | ring ]. replace (f1 x * (/ f2 x * / f2 (x + h)) * (f2 (x + h) * / h)) with (f1 x * / f2 x * / h * (f2 (x + h) * / f2 (x + h))); [ idtac | ring ]. replace (f1 x * (/ f2 x * / f2 (x + h)) * (- f2 x * / h)) with (- (f1 x * / f2 (x + h) * / h * (f2 x * / f2 x))); [ idtac | ring ]. replace (l2 * f1 x * (/ f2 x * / f2 x * / f2 (x + h)) * f2 (x + h)) with (l2 * f1 x * / f2 x * / f2 x * (f2 (x + h) * / f2 (x + h))); [ idtac | ring ]. replace (l2 * f1 x * (/ f2 x * / f2 x * / f2 (x + h)) * - f2 x) with (- (l2 * f1 x * / f2 x * / f2 (x + h) * (f2 x * / f2 x))); [ idtac | ring ]. repeat rewrite <- Rinv_r_sym; try assumption || ring. apply prod_neq_R0; assumption. Qed. (* begin hide *) Notation Rmin_pos := Rmin_pos (only parsing). (* compat *) (* end hide *) Lemma maj_term1 : forall (x h eps l1 alp_f2:R) (eps_f2 alp_f1d:posreal) (f1 f2:R -> R), 0 < eps -> f2 x <> 0 -> f2 (x + h) <> 0 -> (forall h:R, h <> 0 -> Rabs h < alp_f1d -> Rabs ((f1 (x + h) - f1 x) / h - l1) < Rabs (eps * f2 x / 8)) -> (forall a:R, Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) -> h <> 0 -> Rabs h < alp_f1d -> Rabs h < Rmin eps_f2 alp_f2 -> Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) < eps / 4. Proof. intros. assert (H7 := H3 h H6). assert (H8 := H2 h H4 H5). apply Rle_lt_trans with (2 / Rabs (f2 x) * Rabs ((f1 (x + h) - f1 x) / h - l1)). rewrite Rabs_mult. apply Rmult_le_compat_r. apply Rabs_pos. rewrite Rabs_Rinv; [ left; exact H7 | assumption ]. apply Rlt_le_trans with (2 / Rabs (f2 x) * Rabs (eps * f2 x / 8)). apply Rmult_lt_compat_l. unfold Rdiv; apply Rmult_lt_0_compat; [ prove_sup0 | apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption ]. exact H8. right; unfold Rdiv. repeat rewrite Rabs_mult. rewrite Rabs_Rinv; discrR. replace (Rabs 8) with 8. replace 8 with 8; [ idtac | ring ]. rewrite Rinv_mult_distr; [ idtac | discrR | discrR ]. replace (2 * / Rabs (f2 x) * (Rabs eps * Rabs (f2 x) * (/ 2 * / 4))) with (Rabs eps * / 4 * (2 * / 2) * (Rabs (f2 x) * / Rabs (f2 x))); [ idtac | ring ]. replace (Rabs eps) with eps. repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption). ring. symmetry ; apply Rabs_right; left; assumption. symmetry ; apply Rabs_right; left; prove_sup. Qed. Lemma maj_term2 : forall (x h eps l1 alp_f2 alp_f2t2:R) (eps_f2:posreal) (f2:R -> R), 0 < eps -> f2 x <> 0 -> f2 (x + h) <> 0 -> (forall a:R, Rabs a < alp_f2t2 -> Rabs (f2 (x + a) - f2 x) < Rabs (eps * Rsqr (f2 x) / (8 * l1))) -> (forall a:R, Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) -> h <> 0 -> Rabs h < alp_f2t2 -> Rabs h < Rmin eps_f2 alp_f2 -> l1 <> 0 -> Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) < eps / 4. Proof. intros. assert (H8 := H3 h H6). assert (H9 := H2 h H5). apply Rle_lt_trans with (Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (eps * Rsqr (f2 x) / (8 * l1))). rewrite Rabs_mult; apply Rmult_le_compat_l. apply Rabs_pos. rewrite <- (Rabs_Ropp (f2 x - f2 (x + h))); rewrite Ropp_minus_distr. left; apply H9. apply Rlt_le_trans with (Rabs (2 * (l1 / (f2 x * f2 x))) * Rabs (eps * Rsqr (f2 x) / (8 * l1))). apply Rmult_lt_compat_r. apply Rabs_pos_lt. unfold Rdiv; unfold Rsqr; repeat apply prod_neq_R0; try assumption || discrR. red; intro H10; rewrite H10 in H; elim (Rlt_irrefl _ H). apply Rinv_neq_0_compat; apply prod_neq_R0; try assumption || discrR. unfold Rdiv. repeat rewrite Rinv_mult_distr; try assumption. repeat rewrite Rabs_mult. replace (Rabs 2) with 2. rewrite (Rmult_comm 2). replace (Rabs l1 * (Rabs (/ f2 x) * Rabs (/ f2 x)) * 2) with (Rabs l1 * (Rabs (/ f2 x) * (Rabs (/ f2 x) * 2))); [ idtac | ring ]. repeat apply Rmult_lt_compat_l. apply Rabs_pos_lt; assumption. apply Rabs_pos_lt; apply Rinv_neq_0_compat; assumption. repeat rewrite Rabs_Rinv; try assumption. rewrite <- (Rmult_comm 2). unfold Rdiv in H8; exact H8. symmetry ; apply Rabs_right; left; prove_sup0. right. unfold Rsqr, Rdiv. do 1 rewrite Rinv_mult_distr; try assumption || discrR. do 1 rewrite Rinv_mult_distr; try assumption || discrR. repeat rewrite Rabs_mult. repeat rewrite Rabs_Rinv; try assumption || discrR. replace (Rabs eps) with eps. replace (Rabs 8) with 8. replace (Rabs 2) with 2. replace 8 with (4 * 2); [ idtac | ring ]. rewrite Rinv_mult_distr; discrR. replace (2 * (Rabs l1 * (/ Rabs (f2 x) * / Rabs (f2 x))) * (eps * (Rabs (f2 x) * Rabs (f2 x)) * (/ 4 * / 2 * / Rabs l1))) with (eps * / 4 * (Rabs l1 * / Rabs l1) * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) * (2 * / 2)); [ idtac | ring ]. repeat rewrite <- Rinv_r_sym; try (apply Rabs_no_R0; assumption) || discrR. ring. symmetry ; apply Rabs_right; left; prove_sup0. symmetry ; apply Rabs_right; left; prove_sup. symmetry ; apply Rabs_right; left; assumption. Qed. Lemma maj_term3 : forall (x h eps l2 alp_f2:R) (eps_f2 alp_f2d:posreal) (f1 f2:R -> R), 0 < eps -> f2 x <> 0 -> f2 (x + h) <> 0 -> (forall h:R, h <> 0 -> Rabs h < alp_f2d -> Rabs ((f2 (x + h) - f2 x) / h - l2) < Rabs (Rsqr (f2 x) * eps / (8 * f1 x))) -> (forall a:R, Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) -> h <> 0 -> Rabs h < alp_f2d -> Rabs h < Rmin eps_f2 alp_f2 -> f1 x <> 0 -> Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) < eps / 4. Proof. intros. assert (H8 := H2 h H4 H5). assert (H9 := H3 h H6). apply Rle_lt_trans with (Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs (Rsqr (f2 x) * eps / (8 * f1 x))). rewrite Rabs_mult. apply Rmult_le_compat_l. apply Rabs_pos. left; apply H8. apply Rlt_le_trans with (Rabs (2 * (f1 x / (f2 x * f2 x))) * Rabs (Rsqr (f2 x) * eps / (8 * f1 x))). apply Rmult_lt_compat_r. apply Rabs_pos_lt. unfold Rdiv; unfold Rsqr; repeat apply prod_neq_R0; try assumption. red; intro H10; rewrite H10 in H; elim (Rlt_irrefl _ H). apply Rinv_neq_0_compat; apply prod_neq_R0; discrR || assumption. unfold Rdiv. repeat rewrite Rinv_mult_distr; try assumption. repeat rewrite Rabs_mult. replace (Rabs 2) with 2. rewrite (Rmult_comm 2). replace (Rabs (f1 x) * (Rabs (/ f2 x) * Rabs (/ f2 x)) * 2) with (Rabs (f1 x) * (Rabs (/ f2 x) * (Rabs (/ f2 x) * 2))); [ idtac | ring ]. repeat apply Rmult_lt_compat_l. apply Rabs_pos_lt; assumption. apply Rabs_pos_lt; apply Rinv_neq_0_compat; assumption. repeat rewrite Rabs_Rinv; assumption || idtac. rewrite <- (Rmult_comm 2). unfold Rdiv in H9; exact H9. symmetry ; apply Rabs_right; left; prove_sup0. right. unfold Rsqr, Rdiv. rewrite Rinv_mult_distr; try assumption || discrR. rewrite Rinv_mult_distr; try assumption || discrR. repeat rewrite Rabs_mult. repeat rewrite Rabs_Rinv; try assumption || discrR. replace (Rabs eps) with eps. replace (Rabs 8) with 8. replace (Rabs 2) with 2. replace 8 with (4 * 2); [ idtac | ring ]. rewrite Rinv_mult_distr; discrR. replace (2 * (Rabs (f1 x) * (/ Rabs (f2 x) * / Rabs (f2 x))) * (Rabs (f2 x) * Rabs (f2 x) * eps * (/ 4 * / 2 * / Rabs (f1 x)))) with (eps * / 4 * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f1 x) * / Rabs (f1 x)) * (2 * / 2)); [ idtac | ring ]. repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption). ring. symmetry ; apply Rabs_right; left; prove_sup0. symmetry ; apply Rabs_right; left; prove_sup. symmetry ; apply Rabs_right; left; assumption. Qed. Lemma maj_term4 : forall (x h eps l2 alp_f2 alp_f2c:R) (eps_f2:posreal) (f1 f2:R -> R), 0 < eps -> f2 x <> 0 -> f2 (x + h) <> 0 -> (forall a:R, Rabs a < alp_f2c -> Rabs (f2 (x + a) - f2 x) < Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))) -> (forall a:R, Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) -> h <> 0 -> Rabs h < alp_f2c -> Rabs h < Rmin eps_f2 alp_f2 -> f1 x <> 0 -> l2 <> 0 -> Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x)) < eps / 4. Proof. intros. assert (H9 := H2 h H5). assert (H10 := H3 h H6). apply Rle_lt_trans with (Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) * Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))). rewrite Rabs_mult. apply Rmult_le_compat_l. apply Rabs_pos. left; apply H9. apply Rlt_le_trans with (Rabs (2 * l2 * (f1 x / (Rsqr (f2 x) * f2 x))) * Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))). apply Rmult_lt_compat_r. apply Rabs_pos_lt. unfold Rdiv; unfold Rsqr; repeat apply prod_neq_R0; assumption || idtac. red; intro H11; rewrite H11 in H; elim (Rlt_irrefl _ H). apply Rinv_neq_0_compat; apply prod_neq_R0. apply prod_neq_R0. discrR. assumption. assumption. unfold Rdiv. repeat rewrite Rinv_mult_distr; try assumption || (unfold Rsqr; apply prod_neq_R0; assumption). repeat rewrite Rabs_mult. replace (Rabs 2) with 2. replace (2 * Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 x)))) with (Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * (Rabs (/ f2 x) * 2)))); [ idtac | ring ]. replace (Rabs l2 * Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 (x + h)))) with (Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 (x + h))))); [ idtac | ring ]. repeat apply Rmult_lt_compat_l. apply Rabs_pos_lt; assumption. apply Rabs_pos_lt; assumption. apply Rabs_pos_lt; apply Rinv_neq_0_compat; unfold Rsqr; apply prod_neq_R0; assumption. repeat rewrite Rabs_Rinv; [ idtac | assumption | assumption ]. rewrite <- (Rmult_comm 2). unfold Rdiv in H10; exact H10. symmetry ; apply Rabs_right; left; prove_sup0. right; unfold Rsqr, Rdiv. rewrite Rinv_mult_distr; try assumption || discrR. rewrite Rinv_mult_distr; try assumption || discrR. rewrite Rinv_mult_distr; try assumption || discrR. rewrite Rinv_mult_distr; try assumption || discrR. repeat rewrite Rabs_mult. repeat rewrite Rabs_Rinv; try assumption || discrR. replace (Rabs eps) with eps. replace (Rabs 8) with 8. replace (Rabs 2) with 2. replace 8 with (4 * 2); [ idtac | ring ]. rewrite Rinv_mult_distr; discrR. replace (2 * Rabs l2 * (Rabs (f1 x) * (/ Rabs (f2 x) * / Rabs (f2 x) * / Rabs (f2 x))) * (Rabs (f2 x) * Rabs (f2 x) * Rabs (f2 x) * eps * (/ 4 * / 2 * / Rabs (f1 x) * / Rabs l2))) with (eps * / 4 * (Rabs l2 * / Rabs l2) * (Rabs (f1 x) * / Rabs (f1 x)) * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) * (2 * / 2)); [ idtac | ring ]. repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption). ring. symmetry ; apply Rabs_right; left; prove_sup0. symmetry ; apply Rabs_right; left; prove_sup. symmetry ; apply Rabs_right; left; assumption. apply prod_neq_R0; assumption || discrR. apply prod_neq_R0; assumption. Qed. Lemma D_x_no_cond : forall x a:R, a <> 0 -> D_x no_cond x (x + a). Proof. intros. unfold D_x, no_cond. split. trivial. apply Rminus_not_eq. unfold Rminus. rewrite Ropp_plus_distr. rewrite <- Rplus_assoc. rewrite Rplus_opp_r. rewrite Rplus_0_l. apply Ropp_neq_0_compat; assumption. Qed. Lemma Rabs_4 : forall a b c d:R, Rabs (a + b + c + d) <= Rabs a + Rabs b + Rabs c + Rabs d. Proof. intros. apply Rle_trans with (Rabs (a + b) + Rabs (c + d)). replace (a + b + c + d) with (a + b + (c + d)); [ apply Rabs_triang | ring ]. apply Rle_trans with (Rabs a + Rabs b + Rabs (c + d)). apply Rplus_le_compat_r. apply Rabs_triang. repeat rewrite Rplus_assoc; repeat apply Rplus_le_compat_l. apply Rabs_triang. Qed. Lemma Rlt_4 : forall a b c d e f g h:R, a < b -> c < d -> e < f -> g < h -> a + c + e + g < b + d + f + h. Proof. intros; apply Rlt_trans with (b + c + e + g). repeat apply Rplus_lt_compat_r; assumption. repeat rewrite Rplus_assoc; apply Rplus_lt_compat_l. apply Rlt_trans with (d + e + g). rewrite Rplus_assoc; apply Rplus_lt_compat_r; assumption. rewrite Rplus_assoc; apply Rplus_lt_compat_l; apply Rlt_trans with (f + g). apply Rplus_lt_compat_r; assumption. apply Rplus_lt_compat_l; assumption. Qed. (* begin hide *) Notation Rmin_2 := Rmin_glb_lt (only parsing). (* end hide *) Lemma quadruple : forall x:R, 4 * x = x + x + x + x. Proof. intro; ring. Qed. Lemma quadruple_var : forall x:R, x = x / 4 + x / 4 + x / 4 + x / 4. Proof. intro; rewrite <- quadruple. unfold Rdiv; rewrite <- Rmult_assoc; rewrite Rinv_r_simpl_m; discrR. reflexivity. Qed. (**********) Lemma continuous_neq_0 : forall (f:R -> R) (x0:R), continuity_pt f x0 -> f x0 <> 0 -> exists eps : posreal, (forall h:R, Rabs h < eps -> f (x0 + h) <> 0). Proof. intros; unfold continuity_pt in H; unfold continue_in in H; unfold limit1_in in H; unfold limit_in in H; elim (H (Rabs (f x0 / 2))). intros; elim H1; intros. exists (mkposreal x H2). intros; assert (H5 := H3 (x0 + h)). cut (dist R_met (x0 + h) x0 < x -> dist R_met (f (x0 + h)) (f x0) < Rabs (f x0 / 2)). unfold dist; simpl; unfold R_dist; replace (x0 + h - x0) with h. intros; assert (H7 := H6 H4). red; intro. rewrite H8 in H7; unfold Rminus in H7; rewrite Rplus_0_l in H7; rewrite Rabs_Ropp in H7; unfold Rdiv in H7; rewrite Rabs_mult in H7; pattern (Rabs (f x0)) at 1 in H7; rewrite <- Rmult_1_r in H7. cut (0 < Rabs (f x0)). intro; assert (H10 := Rmult_lt_reg_l _ _ _ H9 H7). cut (Rabs (/ 2) = / 2). assert (Hyp : 0 < 2). prove_sup0. intro; rewrite H11 in H10; assert (H12 := Rmult_lt_compat_l 2 _ _ Hyp H10); rewrite Rmult_1_r in H12; rewrite <- Rinv_r_sym in H12; [ idtac | discrR ]. cut (IZR 1 < IZR 2). unfold IZR; unfold INR, Pos.to_nat; simpl; intro; elim (Rlt_irrefl 1 (Rlt_trans _ _ _ H13 H12)). apply IZR_lt; omega. unfold Rabs; case (Rcase_abs (/ 2)); intro. assert (Hyp : 0 < 2). prove_sup0. assert (H11 := Rmult_lt_compat_l 2 _ _ Hyp r); rewrite Rmult_0_r in H11; rewrite <- Rinv_r_sym in H11; [ idtac | discrR ]. elim (Rlt_irrefl 0 (Rlt_trans _ _ _ Rlt_0_1 H11)). reflexivity. apply (Rabs_pos_lt _ H0). ring. assert (H6 := Req_dec x0 (x0 + h)); elim H6; intro. intro; rewrite <- H7; unfold dist, R_met; unfold R_dist; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rabs_pos_lt. unfold Rdiv; apply prod_neq_R0; [ assumption | apply Rinv_neq_0_compat; discrR ]. intro; apply H5. split. unfold D_x, no_cond. split; trivial || assumption. assumption. change (0 < Rabs (f x0 / 2)). apply Rabs_pos_lt; unfold Rdiv; apply prod_neq_R0. assumption. apply Rinv_neq_0_compat; discrR. Qed. coq-8.4pl2/theories/Reals/Ranalysis4.v0000640000175000001440000003055512010532755016747 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (x:R), f x <> 0 -> derivable_pt f x -> derivable_pt (/ f) x. Proof. intros f x H X; cut (derivable_pt (fct_cte 1 / f) x -> derivable_pt (/ f) x). intro X0; apply X0. apply derivable_pt_div. apply derivable_pt_const. assumption. assumption. unfold div_fct, inv_fct, fct_cte; intro X0; elim X0; intros; unfold derivable_pt; exists x0; unfold derivable_pt_abs; unfold derivable_pt_lim; unfold derivable_pt_abs in p; unfold derivable_pt_lim in p; intros; elim (p eps H0); intros; exists x1; intros; unfold Rdiv in H1; unfold Rdiv; rewrite <- (Rmult_1_l (/ f x)); rewrite <- (Rmult_1_l (/ f (x + h))). apply H1; assumption. Qed. (**********) Lemma pr_nu_var : forall (f g:R -> R) (x:R) (pr1:derivable_pt f x) (pr2:derivable_pt g x), f = g -> derive_pt f x pr1 = derive_pt g x pr2. Proof. unfold derivable_pt, derive_pt; intros. elim pr1; intros. elim pr2; intros. simpl. rewrite H in p. apply uniqueness_limite with g x; assumption. Qed. (**********) Lemma pr_nu_var2 : forall (f g:R -> R) (x:R) (pr1:derivable_pt f x) (pr2:derivable_pt g x), (forall h:R, f h = g h) -> derive_pt f x pr1 = derive_pt g x pr2. Proof. unfold derivable_pt, derive_pt; intros. elim pr1; intros. elim pr2; intros. simpl. assert (H0 := uniqueness_step2 _ _ _ p). assert (H1 := uniqueness_step2 _ _ _ p0). cut (limit1_in (fun h:R => (f (x + h) - f x) / h) (fun h:R => h <> 0) x1 0). intro; assert (H3 := uniqueness_step1 _ _ _ _ H0 H2). assumption. unfold limit1_in; unfold limit_in; unfold dist; simpl; unfold R_dist; unfold limit1_in in H1; unfold limit_in in H1; unfold dist in H1; simpl in H1; unfold R_dist in H1. intros; elim (H1 eps H2); intros. elim H3; intros. exists x2. split. assumption. intros; do 2 rewrite H; apply H5; assumption. Qed. (**********) Lemma derivable_inv : forall f:R -> R, (forall x:R, f x <> 0) -> derivable f -> derivable (/ f). Proof. intros f H X. unfold derivable; intro x. apply derivable_pt_inv. apply (H x). apply (X x). Qed. Lemma derive_pt_inv : forall (f:R -> R) (x:R) (pr:derivable_pt f x) (na:f x <> 0), derive_pt (/ f) x (derivable_pt_inv f x na pr) = - derive_pt f x pr / Rsqr (f x). Proof. intros; replace (derive_pt (/ f) x (derivable_pt_inv f x na pr)) with (derive_pt (fct_cte 1 / f) x (derivable_pt_div (fct_cte 1) f x (derivable_pt_const 1 x) pr na)). rewrite derive_pt_div; rewrite derive_pt_const; unfold fct_cte; rewrite Rmult_0_l; rewrite Rmult_1_r; unfold Rminus; rewrite Rplus_0_l; reflexivity. apply pr_nu_var2. intro; unfold div_fct, fct_cte, inv_fct. unfold Rdiv; ring. Qed. (** Rabsolu *) Lemma Rabs_derive_1 : forall x:R, 0 < x -> derivable_pt_lim Rabs x 1. Proof. intros. unfold derivable_pt_lim; intros. exists (mkposreal x H); intros. rewrite (Rabs_right x). rewrite (Rabs_right (x + h)). rewrite Rplus_comm. unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r. rewrite Rplus_0_r; unfold Rdiv; rewrite <- Rinv_r_sym. rewrite Rplus_opp_r; rewrite Rabs_R0; apply H0. apply H1. apply Rle_ge. case (Rcase_abs h); intro. rewrite (Rabs_left h r) in H2. left; rewrite Rplus_comm; apply Rplus_lt_reg_r with (- h); rewrite Rplus_0_r; rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; apply H2. apply Rplus_le_le_0_compat. left; apply H. apply Rge_le; apply r. left; apply H. Qed. Lemma Rabs_derive_2 : forall x:R, x < 0 -> derivable_pt_lim Rabs x (-1). Proof. intros. unfold derivable_pt_lim; intros. cut (0 < - x). intro; exists (mkposreal (- x) H1); intros. rewrite (Rabs_left x). rewrite (Rabs_left (x + h)). rewrite Rplus_comm. rewrite Ropp_plus_distr. unfold Rminus; rewrite Ropp_involutive; rewrite Rplus_assoc; rewrite Rplus_opp_l. rewrite Rplus_0_r; unfold Rdiv. rewrite Ropp_mult_distr_l_reverse. rewrite <- Rinv_r_sym. rewrite Ropp_involutive; rewrite Rplus_opp_l; rewrite Rabs_R0; apply H0. apply H2. case (Rcase_abs h); intro. apply Ropp_lt_cancel. rewrite Ropp_0; rewrite Ropp_plus_distr; apply Rplus_lt_0_compat. apply H1. apply Ropp_0_gt_lt_contravar; apply r. rewrite (Rabs_right h r) in H3. apply Rplus_lt_reg_r with (- x); rewrite Rplus_0_r; rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; apply H3. apply H. apply Ropp_0_gt_lt_contravar; apply H. Qed. (** Rabsolu is derivable for all x <> 0 *) Lemma Rderivable_pt_abs : forall x:R, x <> 0 -> derivable_pt Rabs x. Proof. intros. case (total_order_T x 0); intro. elim s; intro. unfold derivable_pt; exists (-1). apply (Rabs_derive_2 x a). elim H; exact b. unfold derivable_pt; exists 1. apply (Rabs_derive_1 x r). Qed. (** Rabsolu is continuous for all x *) Lemma Rcontinuity_abs : continuity Rabs. Proof. unfold continuity; intro. case (Req_dec x 0); intro. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; exists eps; split. apply H0. intros; rewrite H; rewrite Rabs_R0; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; elim H1; intros; rewrite H in H3; unfold Rminus in H3; rewrite Ropp_0 in H3; rewrite Rplus_0_r in H3; apply H3. apply derivable_continuous_pt; apply (Rderivable_pt_abs x H). Qed. (** Finite sums : Sum a_k x^k *) Lemma continuity_finite_sum : forall (An:nat -> R) (N:nat), continuity (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) N). Proof. intros; unfold continuity; intro. induction N as [| N HrecN]. simpl. apply continuity_pt_const. unfold constant; intros; reflexivity. replace (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) (S N)) with ((fun y:R => sum_f_R0 (fun k:nat => (An k * y ^ k)%R) N) + (fun y:R => (An (S N) * y ^ S N)%R))%F. apply continuity_pt_plus. apply HrecN. replace (fun y:R => An (S N) * y ^ S N) with (mult_real_fct (An (S N)) (fun y:R => y ^ S N)). apply continuity_pt_scal. apply derivable_continuous_pt. apply derivable_pt_pow. reflexivity. reflexivity. Qed. Lemma derivable_pt_lim_fs : forall (An:nat -> R) (x:R) (N:nat), (0 < N)%nat -> derivable_pt_lim (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) N) x (sum_f_R0 (fun k:nat => INR (S k) * An (S k) * x ^ k) (pred N)). Proof. intros; induction N as [| N HrecN]. elim (lt_irrefl _ H). cut (N = 0%nat \/ (0 < N)%nat). intro; elim H0; intro. rewrite H1. simpl. replace (fun y:R => An 0%nat * 1 + An 1%nat * (y * 1)) with (fct_cte (An 0%nat * 1) + mult_real_fct (An 1%nat) (id * fct_cte 1))%F. replace (1 * An 1%nat * 1) with (0 + An 1%nat * (1 * fct_cte 1 x + id x * 0)). apply derivable_pt_lim_plus. apply derivable_pt_lim_const. apply derivable_pt_lim_scal. apply derivable_pt_lim_mult. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte, id; ring. reflexivity. replace (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) (S N)) with ((fun y:R => sum_f_R0 (fun k:nat => (An k * y ^ k)%R) N) + (fun y:R => (An (S N) * y ^ S N)%R))%F. replace (sum_f_R0 (fun k:nat => INR (S k) * An (S k) * x ^ k) (pred (S N))) with (sum_f_R0 (fun k:nat => INR (S k) * An (S k) * x ^ k) (pred N) + An (S N) * (INR (S (pred (S N))) * x ^ pred (S N))). apply derivable_pt_lim_plus. apply HrecN. assumption. replace (fun y:R => An (S N) * y ^ S N) with (mult_real_fct (An (S N)) (fun y:R => y ^ S N)). apply derivable_pt_lim_scal. replace (pred (S N)) with N; [ idtac | reflexivity ]. pattern N at 3; replace N with (pred (S N)). apply derivable_pt_lim_pow. reflexivity. reflexivity. cut (pred (S N) = S (pred N)). intro; rewrite H2. rewrite tech5. apply Rplus_eq_compat_l. rewrite <- H2. replace (pred (S N)) with N; [ idtac | reflexivity ]. ring. simpl. apply S_pred with 0%nat; assumption. unfold plus_fct. simpl; reflexivity. inversion H. left; reflexivity. right; apply lt_le_trans with 1%nat; [ apply lt_O_Sn | assumption ]. Qed. Lemma derivable_pt_lim_finite_sum : forall (An:nat -> R) (x:R) (N:nat), derivable_pt_lim (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) N) x match N with | O => 0 | _ => sum_f_R0 (fun k:nat => INR (S k) * An (S k) * x ^ k) (pred N) end. Proof. intros. induction N as [| N HrecN]. simpl. rewrite Rmult_1_r. replace (fun _:R => An 0%nat) with (fct_cte (An 0%nat)); [ apply derivable_pt_lim_const | reflexivity ]. apply derivable_pt_lim_fs; apply lt_O_Sn. Qed. Lemma derivable_pt_finite_sum : forall (An:nat -> R) (N:nat) (x:R), derivable_pt (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) N) x. Proof. intros. unfold derivable_pt. assert (H := derivable_pt_lim_finite_sum An x N). induction N as [| N HrecN]. exists 0; apply H. exists (sum_f_R0 (fun k:nat => INR (S k) * An (S k) * x ^ k) (pred (S N))); apply H. Qed. Lemma derivable_finite_sum : forall (An:nat -> R) (N:nat), derivable (fun y:R => sum_f_R0 (fun k:nat => An k * y ^ k) N). Proof. intros; unfold derivable; intro; apply derivable_pt_finite_sum. Qed. (** Regularity of hyperbolic functions *) Lemma derivable_pt_lim_cosh : forall x:R, derivable_pt_lim cosh x (sinh x). Proof. intro. unfold cosh, sinh; unfold Rdiv. replace (fun x0:R => (exp x0 + exp (- x0)) * / 2) with ((exp + comp exp (- id)) * fct_cte (/ 2))%F; [ idtac | reflexivity ]. replace ((exp x - exp (- x)) * / 2) with ((exp x + exp (- x) * -1) * fct_cte (/ 2) x + (exp + comp exp (- id))%F x * 0). apply derivable_pt_lim_mult. apply derivable_pt_lim_plus. apply derivable_pt_lim_exp. apply derivable_pt_lim_comp. apply derivable_pt_lim_opp. apply derivable_pt_lim_id. apply derivable_pt_lim_exp. apply derivable_pt_lim_const. unfold plus_fct, mult_real_fct, comp, opp_fct, id, fct_cte; ring. Qed. Lemma derivable_pt_lim_sinh : forall x:R, derivable_pt_lim sinh x (cosh x). Proof. intro. unfold cosh, sinh; unfold Rdiv. replace (fun x0:R => (exp x0 - exp (- x0)) * / 2) with ((exp - comp exp (- id)) * fct_cte (/ 2))%F; [ idtac | reflexivity ]. replace ((exp x + exp (- x)) * / 2) with ((exp x - exp (- x) * -1) * fct_cte (/ 2) x + (exp - comp exp (- id))%F x * 0). apply derivable_pt_lim_mult. apply derivable_pt_lim_minus. apply derivable_pt_lim_exp. apply derivable_pt_lim_comp. apply derivable_pt_lim_opp. apply derivable_pt_lim_id. apply derivable_pt_lim_exp. apply derivable_pt_lim_const. unfold plus_fct, mult_real_fct, comp, opp_fct, id, fct_cte; ring. Qed. Lemma derivable_pt_exp : forall x:R, derivable_pt exp x. Proof. intro. unfold derivable_pt. exists (exp x). apply derivable_pt_lim_exp. Qed. Lemma derivable_pt_cosh : forall x:R, derivable_pt cosh x. Proof. intro. unfold derivable_pt. exists (sinh x). apply derivable_pt_lim_cosh. Qed. Lemma derivable_pt_sinh : forall x:R, derivable_pt sinh x. Proof. intro. unfold derivable_pt. exists (cosh x). apply derivable_pt_lim_sinh. Qed. Lemma derivable_exp : derivable exp. Proof. unfold derivable; apply derivable_pt_exp. Qed. Lemma derivable_cosh : derivable cosh. Proof. unfold derivable; apply derivable_pt_cosh. Qed. Lemma derivable_sinh : derivable sinh. Proof. unfold derivable; apply derivable_pt_sinh. Qed. Lemma derive_pt_exp : forall x:R, derive_pt exp x (derivable_pt_exp x) = exp x. Proof. intro; apply derive_pt_eq_0. apply derivable_pt_lim_exp. Qed. Lemma derive_pt_cosh : forall x:R, derive_pt cosh x (derivable_pt_cosh x) = sinh x. Proof. intro; apply derive_pt_eq_0. apply derivable_pt_lim_cosh. Qed. Lemma derive_pt_sinh : forall x:R, derive_pt sinh x (derivable_pt_sinh x) = cosh x. Proof. intro; apply derive_pt_eq_0. apply derivable_pt_lim_sinh. Qed. coq-8.4pl2/theories/Reals/Binomial.v0000640000175000001440000001667312010532755016455 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* C n i = C n (n - i). Proof. intros; unfold C; replace (n - (n - i))%nat with i. rewrite Rmult_comm. reflexivity. apply plus_minus; rewrite plus_comm; apply le_plus_minus; assumption. Qed. Lemma pascal_step2 : forall n i:nat, (i <= n)%nat -> C (S n) i = INR (S n) / INR (S n - i) * C n i. Proof. intros; unfold C; replace (S n - i)%nat with (S (n - i)). cut (forall n:nat, fact (S n) = (S n * fact n)%nat). intro; repeat rewrite H0. unfold Rdiv; repeat rewrite mult_INR; repeat rewrite Rinv_mult_distr. ring. apply INR_fact_neq_0. apply INR_fact_neq_0. apply not_O_INR; discriminate. apply INR_fact_neq_0. apply INR_fact_neq_0. apply prod_neq_R0. apply not_O_INR; discriminate. apply INR_fact_neq_0. intro; reflexivity. apply minus_Sn_m; assumption. Qed. Lemma pascal_step3 : forall n i:nat, (i < n)%nat -> C n (S i) = INR (n - i) / INR (S i) * C n i. Proof. intros; unfold C. cut (forall n:nat, fact (S n) = (S n * fact n)%nat). intro. cut ((n - i)%nat = S (n - S i)). intro. pattern (n - i)%nat at 2; rewrite H1. repeat rewrite H0; unfold Rdiv; repeat rewrite mult_INR; repeat rewrite Rinv_mult_distr. rewrite <- H1; rewrite (Rmult_comm (/ INR (n - i))); repeat rewrite Rmult_assoc; rewrite (Rmult_comm (INR (n - i))); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. ring. apply not_O_INR; apply minus_neq_O; assumption. apply not_O_INR; discriminate. apply INR_fact_neq_0. apply INR_fact_neq_0. apply prod_neq_R0; [ apply not_O_INR; discriminate | apply INR_fact_neq_0 ]. apply not_O_INR; discriminate. apply INR_fact_neq_0. apply prod_neq_R0; [ apply not_O_INR; discriminate | apply INR_fact_neq_0 ]. apply INR_fact_neq_0. rewrite minus_Sn_m. simpl; reflexivity. apply lt_le_S; assumption. intro; reflexivity. Qed. (**********) Lemma pascal : forall n i:nat, (i < n)%nat -> C n i + C n (S i) = C (S n) (S i). Proof. intros. rewrite pascal_step3; [ idtac | assumption ]. replace (C n i + INR (n - i) / INR (S i) * C n i) with (C n i * (1 + INR (n - i) / INR (S i))); [ idtac | ring ]. replace (1 + INR (n - i) / INR (S i)) with (INR (S n) / INR (S i)). rewrite pascal_step1. rewrite Rmult_comm; replace (S i) with (S n - (n - i))%nat. rewrite <- pascal_step2. apply pascal_step1. apply le_trans with n. apply le_minusni_n. apply lt_le_weak; assumption. apply le_n_Sn. apply le_minusni_n. apply lt_le_weak; assumption. rewrite <- minus_Sn_m. cut ((n - (n - i))%nat = i). intro; rewrite H0; reflexivity. symmetry ; apply plus_minus. rewrite plus_comm; rewrite le_plus_minus_r. reflexivity. apply lt_le_weak; assumption. apply le_minusni_n; apply lt_le_weak; assumption. apply lt_le_weak; assumption. unfold Rdiv. repeat rewrite S_INR. rewrite minus_INR. cut (INR i + 1 <> 0). intro. apply Rmult_eq_reg_l with (INR i + 1); [ idtac | assumption ]. rewrite Rmult_plus_distr_l. rewrite Rmult_1_r. do 2 rewrite (Rmult_comm (INR i + 1)). repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym; [ idtac | assumption ]. ring. rewrite <- S_INR. apply not_O_INR; discriminate. apply lt_le_weak; assumption. Qed. (*********************) (*********************) Lemma binomial : forall (x y:R) (n:nat), (x + y) ^ n = sum_f_R0 (fun i:nat => C n i * x ^ i * y ^ (n - i)) n. Proof. intros; induction n as [| n Hrecn]. unfold C; simpl; unfold Rdiv; repeat rewrite Rmult_1_r; rewrite Rinv_1; ring. pattern (S n) at 1; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite pow_add; rewrite Hrecn. replace ((x + y) ^ 1) with (x + y); [ idtac | simpl; ring ]. rewrite tech5. cut (forall p:nat, C p p = 1). cut (forall p:nat, C p 0 = 1). intros; rewrite H0; rewrite <- minus_n_n; rewrite Rmult_1_l. replace (y ^ 0) with 1; [ rewrite Rmult_1_r | simpl; reflexivity ]. induction n as [| n Hrecn0]. simpl; do 2 rewrite H; ring. (* N >= 1 *) set (N := S n). rewrite Rmult_plus_distr_l. replace (sum_f_R0 (fun i:nat => C N i * x ^ i * y ^ (N - i)) N * x) with (sum_f_R0 (fun i:nat => C N i * x ^ S i * y ^ (N - i)) N). replace (sum_f_R0 (fun i:nat => C N i * x ^ i * y ^ (N - i)) N * y) with (sum_f_R0 (fun i:nat => C N i * x ^ i * y ^ (S N - i)) N). rewrite (decomp_sum (fun i:nat => C (S N) i * x ^ i * y ^ (S N - i)) N). rewrite H; replace (x ^ 0) with 1; [ idtac | reflexivity ]. do 2 rewrite Rmult_1_l. replace (S N - 0)%nat with (S N); [ idtac | reflexivity ]. set (An := fun i:nat => C N i * x ^ S i * y ^ (N - i)). set (Bn := fun i:nat => C N (S i) * x ^ S i * y ^ (N - i)). replace (pred N) with n. replace (sum_f_R0 (fun i:nat => C (S N) (S i) * x ^ S i * y ^ (S N - S i)) n) with (sum_f_R0 (fun i:nat => An i + Bn i) n). rewrite plus_sum. replace (x ^ S N) with (An (S n)). rewrite (Rplus_comm (sum_f_R0 An n)). repeat rewrite Rplus_assoc. rewrite <- tech5. fold N. set (Cn := fun i:nat => C N i * x ^ i * y ^ (S N - i)). cut (forall i:nat, (i < N)%nat -> Cn (S i) = Bn i). intro; replace (sum_f_R0 Bn n) with (sum_f_R0 (fun i:nat => Cn (S i)) n). replace (y ^ S N) with (Cn 0%nat). rewrite <- Rplus_assoc; rewrite (decomp_sum Cn N). replace (pred N) with n. ring. unfold N; simpl; reflexivity. unfold N; apply lt_O_Sn. unfold Cn; rewrite H; simpl; ring. apply sum_eq. intros; apply H1. unfold N; apply le_lt_trans with n; [ assumption | apply lt_n_Sn ]. intros; unfold Bn, Cn. replace (S N - S i)%nat with (N - i)%nat; reflexivity. unfold An; fold N; rewrite <- minus_n_n; rewrite H0; simpl; ring. apply sum_eq. intros; unfold An, Bn; replace (S N - S i)%nat with (N - i)%nat; [ idtac | reflexivity ]. rewrite <- pascal; [ ring | apply le_lt_trans with n; [ assumption | unfold N; apply lt_n_Sn ] ]. unfold N; reflexivity. unfold N; apply lt_O_Sn. rewrite <- (Rmult_comm y); rewrite scal_sum; apply sum_eq. intros; replace (S N - i)%nat with (S (N - i)). replace (S (N - i)) with (N - i + 1)%nat; [ idtac | ring ]. rewrite pow_add; replace (y ^ 1) with y; [ idtac | simpl; ring ]; ring. apply minus_Sn_m; assumption. rewrite <- (Rmult_comm x); rewrite scal_sum; apply sum_eq. intros; replace (S i) with (i + 1)%nat; [ idtac | ring ]; rewrite pow_add; replace (x ^ 1) with x; [ idtac | simpl; ring ]; ring. intro; unfold C. replace (INR (fact 0)) with 1; [ idtac | reflexivity ]. replace (p - 0)%nat with p; [ idtac | apply minus_n_O ]. rewrite Rmult_1_l; unfold Rdiv; rewrite <- Rinv_r_sym; [ reflexivity | apply INR_fact_neq_0 ]. intro; unfold C. replace (p - p)%nat with 0%nat; [ idtac | apply minus_n_n ]. replace (INR (fact 0)) with 1; [ idtac | reflexivity ]. rewrite Rmult_1_r; unfold Rdiv; rewrite <- Rinv_r_sym; [ reflexivity | apply INR_fact_neq_0 ]. Qed. coq-8.4pl2/theories/Reals/Alembert.v0000640000175000001440000006552312010532755016454 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R, (forall n:nat, 0 < An n) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) 0 -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros An H H0. cut ({ l:R | is_lub (EUn (fun N:nat => sum_f_R0 An N)) l } -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }). intro X; apply X. apply completeness. unfold Un_cv in H0; unfold bound; cut (0 < / 2); [ intro | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H0 (/ 2) H1); intros. exists (sum_f_R0 An x + 2 * An (S x)). unfold is_upper_bound; intros; unfold EUn in H3; elim H3; intros. rewrite H4; assert (H5 := lt_eq_lt_dec x1 x). elim H5; intros. elim a; intro. replace (sum_f_R0 An x) with (sum_f_R0 An x1 + sum_f_R0 (fun i:nat => An (S x1 + i)%nat) (x - S x1)). pattern (sum_f_R0 An x1) at 1; rewrite <- Rplus_0_r; rewrite Rplus_assoc; apply Rplus_le_compat_l. left; apply Rplus_lt_0_compat. apply tech1; intros; apply H. apply Rmult_lt_0_compat; [ prove_sup0 | apply H ]. symmetry ; apply tech2; assumption. rewrite b; pattern (sum_f_R0 An x) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l. left; apply Rmult_lt_0_compat; [ prove_sup0 | apply H ]. replace (sum_f_R0 An x1) with (sum_f_R0 An x + sum_f_R0 (fun i:nat => An (S x + i)%nat) (x1 - S x)). apply Rplus_le_compat_l. cut (sum_f_R0 (fun i:nat => An (S x + i)%nat) (x1 - S x) <= An (S x) * sum_f_R0 (fun i:nat => (/ 2) ^ i) (x1 - S x)). intro; apply Rle_trans with (An (S x) * sum_f_R0 (fun i:nat => (/ 2) ^ i) (x1 - S x)). assumption. rewrite <- (Rmult_comm (An (S x))); apply Rmult_le_compat_l. left; apply H. rewrite tech3. replace (1 - / 2) with (/ 2). unfold Rdiv; rewrite Rinv_involutive. pattern 2 at 3; rewrite <- Rmult_1_r; rewrite <- (Rmult_comm 2); apply Rmult_le_compat_l. left; prove_sup0. left; apply Rplus_lt_reg_r with ((/ 2) ^ S (x1 - S x)). replace ((/ 2) ^ S (x1 - S x) + (1 - (/ 2) ^ S (x1 - S x))) with 1; [ idtac | ring ]. rewrite <- (Rplus_comm 1); pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l. apply pow_lt; apply Rinv_0_lt_compat; prove_sup0. discrR. apply Rmult_eq_reg_l with 2. rewrite Rmult_minus_distr_l; rewrite <- Rinv_r_sym. ring. discrR. discrR. pattern 1 at 3; replace 1 with (/ 1); [ apply tech7; discrR | apply Rinv_1 ]. replace (An (S x)) with (An (S x + 0)%nat). apply (tech6 (fun i:nat => An (S x + i)%nat) (/ 2)). left; apply Rinv_0_lt_compat; prove_sup0. intro; cut (forall n:nat, (n >= x)%nat -> An (S n) < / 2 * An n). intro; replace (S x + S i)%nat with (S (S x + i)). apply H6; unfold ge; apply tech8. apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; do 2 rewrite S_INR; ring. intros; unfold R_dist in H2; apply Rmult_lt_reg_l with (/ An n). apply Rinv_0_lt_compat; apply H. do 2 rewrite (Rmult_comm (/ An n)); rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; replace (An (S n) * / An n) with (Rabs (Rabs (An (S n) / An n) - 0)). apply H2; assumption. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; rewrite Rabs_right. unfold Rdiv; reflexivity. left; unfold Rdiv; change (0 < An (S n) * / An n); apply Rmult_lt_0_compat; [ apply H | apply Rinv_0_lt_compat; apply H ]. red; intro; assert (H8 := H n); rewrite H7 in H8; elim (Rlt_irrefl _ H8). replace (S x + 0)%nat with (S x); [ reflexivity | ring ]. symmetry ; apply tech2; assumption. exists (sum_f_R0 An 0); unfold EUn; exists 0%nat; reflexivity. intro X; elim X; intros. exists x; apply Un_cv_crit_lub; [ unfold Un_growing; intro; rewrite tech5; pattern (sum_f_R0 An n) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; apply H | apply p ]. Defined. Lemma Alembert_C2 : forall An:nat -> R, (forall n:nat, An n <> 0) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) 0 -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros. set (Vn := fun i:nat => (2 * Rabs (An i) + An i) / 2). set (Wn := fun i:nat => (2 * Rabs (An i) - An i) / 2). cut (forall n:nat, 0 < Vn n). intro; cut (forall n:nat, 0 < Wn n). intro; cut (Un_cv (fun n:nat => Rabs (Vn (S n) / Vn n)) 0). intro; cut (Un_cv (fun n:nat => Rabs (Wn (S n) / Wn n)) 0). intro; assert (H5 := Alembert_C1 Vn H1 H3). assert (H6 := Alembert_C1 Wn H2 H4). elim H5; intros. elim H6; intros. exists (x - x0); unfold Un_cv; unfold Un_cv in p; unfold Un_cv in p0; intros; cut (0 < eps / 2). intro; elim (p (eps / 2) H8); clear p; intros. elim (p0 (eps / 2) H8); clear p0; intros. set (N := max x1 x2). exists N; intros; replace (sum_f_R0 An n) with (sum_f_R0 Vn n - sum_f_R0 Wn n). unfold R_dist; replace (sum_f_R0 Vn n - sum_f_R0 Wn n - (x - x0)) with (sum_f_R0 Vn n - x + - (sum_f_R0 Wn n - x0)); [ idtac | ring ]; apply Rle_lt_trans with (Rabs (sum_f_R0 Vn n - x) + Rabs (- (sum_f_R0 Wn n - x0))). apply Rabs_triang. rewrite Rabs_Ropp; apply Rlt_le_trans with (eps / 2 + eps / 2). apply Rplus_lt_compat. unfold R_dist in H9; apply H9; unfold ge; apply le_trans with N; [ unfold N; apply le_max_l | assumption ]. unfold R_dist in H10; apply H10; unfold ge; apply le_trans with N; [ unfold N; apply le_max_r | assumption ]. right; symmetry ; apply double_var. symmetry ; apply tech11; intro; unfold Vn, Wn; unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ 2)); apply Rmult_eq_reg_l with 2. rewrite Rmult_minus_distr_l; repeat rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. ring. discrR. discrR. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. cut (forall n:nat, / 2 * Rabs (An n) <= Wn n <= 3 * / 2 * Rabs (An n)). intro; cut (forall n:nat, / Wn n <= 2 * / Rabs (An n)). intro; cut (forall n:nat, Wn (S n) / Wn n <= 3 * Rabs (An (S n) / An n)). intro; unfold Un_cv; intros; unfold Un_cv in H0; cut (0 < eps / 3). intro; elim (H0 (eps / 3) H8); intros. exists x; intros. assert (H11 := H9 n H10). unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; unfold R_dist in H11; unfold Rminus in H11; rewrite Ropp_0 in H11; rewrite Rplus_0_r in H11; rewrite Rabs_Rabsolu in H11; rewrite Rabs_right. apply Rle_lt_trans with (3 * Rabs (An (S n) / An n)). apply H6. apply Rmult_lt_reg_l with (/ 3). apply Rinv_0_lt_compat; prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]; rewrite Rmult_1_l; rewrite <- (Rmult_comm eps); unfold Rdiv in H11; exact H11. left; change (0 < Wn (S n) / Wn n); unfold Rdiv; apply Rmult_lt_0_compat. apply H2. apply Rinv_0_lt_compat; apply H2. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. intro; unfold Rdiv; rewrite Rabs_mult; rewrite <- Rmult_assoc; replace 3 with (2 * (3 * / 2)); [ idtac | rewrite <- Rmult_assoc; apply Rinv_r_simpl_m; discrR ]; apply Rle_trans with (Wn (S n) * 2 * / Rabs (An n)). rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply H2. apply H5. rewrite Rabs_Rinv. replace (Wn (S n) * 2 * / Rabs (An n)) with (2 * / Rabs (An n) * Wn (S n)); [ idtac | ring ]; replace (2 * (3 * / 2) * Rabs (An (S n)) * / Rabs (An n)) with (2 * / Rabs (An n) * (3 * / 2 * Rabs (An (S n)))); [ idtac | ring ]; apply Rmult_le_compat_l. left; apply Rmult_lt_0_compat. prove_sup0. apply Rinv_0_lt_compat; apply Rabs_pos_lt; apply H. elim (H4 (S n)); intros; assumption. apply H. intro; apply Rmult_le_reg_l with (Wn n). apply H2. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (Rabs (An n)). apply Rabs_pos_lt; apply H. rewrite Rmult_1_r; replace (Rabs (An n) * (Wn n * (2 * / Rabs (An n)))) with (2 * Wn n * (Rabs (An n) * / Rabs (An n))); [ idtac | ring ]; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; apply Rmult_le_reg_l with (/ 2). apply Rinv_0_lt_compat; prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; elim (H4 n); intros; assumption. discrR. apply Rabs_no_R0; apply H. red; intro; assert (H6 := H2 n); rewrite H5 in H6; elim (Rlt_irrefl _ H6). intro; split. unfold Wn; unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; prove_sup0. pattern (Rabs (An n)) at 1; rewrite <- Rplus_0_r; rewrite double; unfold Rminus; rewrite Rplus_assoc; apply Rplus_le_compat_l. apply Rplus_le_reg_l with (An n). rewrite Rplus_0_r; rewrite (Rplus_comm (An n)); rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply RRle_abs. unfold Wn; unfold Rdiv; repeat rewrite <- (Rmult_comm (/ 2)); repeat rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; prove_sup0. unfold Rminus; rewrite double; replace (3 * Rabs (An n)) with (Rabs (An n) + Rabs (An n) + Rabs (An n)); [ idtac | ring ]; repeat rewrite Rplus_assoc; repeat apply Rplus_le_compat_l. rewrite <- Rabs_Ropp; apply RRle_abs. cut (forall n:nat, / 2 * Rabs (An n) <= Vn n <= 3 * / 2 * Rabs (An n)). intro; cut (forall n:nat, / Vn n <= 2 * / Rabs (An n)). intro; cut (forall n:nat, Vn (S n) / Vn n <= 3 * Rabs (An (S n) / An n)). intro; unfold Un_cv; intros; unfold Un_cv in H1; cut (0 < eps / 3). intro; elim (H0 (eps / 3) H7); intros. exists x; intros. assert (H10 := H8 n H9). unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; unfold R_dist in H10; unfold Rminus in H10; rewrite Ropp_0 in H10; rewrite Rplus_0_r in H10; rewrite Rabs_Rabsolu in H10; rewrite Rabs_right. apply Rle_lt_trans with (3 * Rabs (An (S n) / An n)). apply H5. apply Rmult_lt_reg_l with (/ 3). apply Rinv_0_lt_compat; prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]; rewrite Rmult_1_l; rewrite <- (Rmult_comm eps); unfold Rdiv in H10; exact H10. left; change (0 < Vn (S n) / Vn n); unfold Rdiv; apply Rmult_lt_0_compat. apply H1. apply Rinv_0_lt_compat; apply H1. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. intro; unfold Rdiv; rewrite Rabs_mult; rewrite <- Rmult_assoc; replace 3 with (2 * (3 * / 2)); [ idtac | rewrite <- Rmult_assoc; apply Rinv_r_simpl_m; discrR ]; apply Rle_trans with (Vn (S n) * 2 * / Rabs (An n)). rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply H1. apply H4. rewrite Rabs_Rinv. replace (Vn (S n) * 2 * / Rabs (An n)) with (2 * / Rabs (An n) * Vn (S n)); [ idtac | ring ]; replace (2 * (3 * / 2) * Rabs (An (S n)) * / Rabs (An n)) with (2 * / Rabs (An n) * (3 * / 2 * Rabs (An (S n)))); [ idtac | ring ]; apply Rmult_le_compat_l. left; apply Rmult_lt_0_compat. prove_sup0. apply Rinv_0_lt_compat; apply Rabs_pos_lt; apply H. elim (H3 (S n)); intros; assumption. apply H. intro; apply Rmult_le_reg_l with (Vn n). apply H1. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (Rabs (An n)). apply Rabs_pos_lt; apply H. rewrite Rmult_1_r; replace (Rabs (An n) * (Vn n * (2 * / Rabs (An n)))) with (2 * Vn n * (Rabs (An n) * / Rabs (An n))); [ idtac | ring ]; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; apply Rmult_le_reg_l with (/ 2). apply Rinv_0_lt_compat; prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; elim (H3 n); intros; assumption. discrR. apply Rabs_no_R0; apply H. red; intro; assert (H5 := H1 n); rewrite H4 in H5; elim (Rlt_irrefl _ H5). intro; split. unfold Vn; unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; prove_sup0. pattern (Rabs (An n)) at 1; rewrite <- Rplus_0_r; rewrite double; rewrite Rplus_assoc; apply Rplus_le_compat_l. apply Rplus_le_reg_l with (- An n); rewrite Rplus_0_r; rewrite <- (Rplus_comm (An n)); rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite <- Rabs_Ropp; apply RRle_abs. unfold Vn; unfold Rdiv; repeat rewrite <- (Rmult_comm (/ 2)); repeat rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; prove_sup0. unfold Rminus; rewrite double; replace (3 * Rabs (An n)) with (Rabs (An n) + Rabs (An n) + Rabs (An n)); [ idtac | ring ]; repeat rewrite Rplus_assoc; repeat apply Rplus_le_compat_l; apply RRle_abs. intro; unfold Wn; unfold Rdiv; rewrite <- (Rmult_0_r (/ 2)); rewrite <- (Rmult_comm (/ 2)); apply Rmult_lt_compat_l. apply Rinv_0_lt_compat; prove_sup0. apply Rplus_lt_reg_r with (An n); rewrite Rplus_0_r; unfold Rminus; rewrite (Rplus_comm (An n)); rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; apply Rle_lt_trans with (Rabs (An n)). apply RRle_abs. rewrite double; pattern (Rabs (An n)) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rabs_pos_lt; apply H. intro; unfold Vn; unfold Rdiv; rewrite <- (Rmult_0_r (/ 2)); rewrite <- (Rmult_comm (/ 2)); apply Rmult_lt_compat_l. apply Rinv_0_lt_compat; prove_sup0. apply Rplus_lt_reg_r with (- An n); rewrite Rplus_0_r; unfold Rminus; rewrite (Rplus_comm (- An n)); rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; apply Rle_lt_trans with (Rabs (An n)). rewrite <- Rabs_Ropp; apply RRle_abs. rewrite double; pattern (Rabs (An n)) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rabs_pos_lt; apply H. Defined. Lemma AlembertC3_step1 : forall (An:nat -> R) (x:R), x <> 0 -> (forall n:nat, An n <> 0) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) 0 -> { l:R | Pser An x l }. Proof. intros; set (Bn := fun i:nat => An i * x ^ i). cut (forall n:nat, Bn n <> 0). intro; cut (Un_cv (fun n:nat => Rabs (Bn (S n) / Bn n)) 0). intro; assert (H4 := Alembert_C2 Bn H2 H3). elim H4; intros. exists x0; unfold Bn in p; apply tech12; assumption. unfold Un_cv; intros; unfold Un_cv in H1; cut (0 < eps / Rabs x). intro; elim (H1 (eps / Rabs x) H4); intros. exists x0; intros; unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; unfold Bn; replace (An (S n) * x ^ S n / (An n * x ^ n)) with (An (S n) / An n * x). rewrite Rabs_mult; apply Rmult_lt_reg_l with (/ Rabs x). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- (Rmult_comm (Rabs x)); rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps); unfold Rdiv in H5; replace (Rabs (An (S n) / An n)) with (R_dist (Rabs (An (S n) * / An n)) 0). apply H5; assumption. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_Rabsolu; unfold Rdiv; reflexivity. apply Rabs_no_R0; assumption. replace (S n) with (n + 1)%nat; [ idtac | ring ]; rewrite pow_add; unfold Rdiv; rewrite Rinv_mult_distr. replace (An (n + 1)%nat * (x ^ n * x ^ 1) * (/ An n * / x ^ n)) with (An (n + 1)%nat * x ^ 1 * / An n * (x ^ n * / x ^ n)); [ idtac | ring ]; rewrite <- Rinv_r_sym. simpl; ring. apply pow_nonzero; assumption. apply H0. apply pow_nonzero; assumption. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption ]. intro; unfold Bn; apply prod_neq_R0; [ apply H0 | apply pow_nonzero; assumption ]. Defined. Lemma AlembertC3_step2 : forall (An:nat -> R) (x:R), x = 0 -> { l:R | Pser An x l }. Proof. intros; exists (An 0%nat). unfold Pser; unfold infinite_sum; intros; exists 0%nat; intros; replace (sum_f_R0 (fun n0:nat => An n0 * x ^ n0) n) with (An 0%nat). unfold R_dist; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. induction n as [| n Hrecn]. simpl; ring. rewrite tech5; rewrite Hrecn; [ rewrite H; simpl; ring | unfold ge; apply le_O_n ]. Qed. (** A useful criterion of convergence for power series *) Theorem Alembert_C3 : forall (An:nat -> R) (x:R), (forall n:nat, An n <> 0) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) 0 -> { l:R | Pser An x l }. Proof. intros; case (total_order_T x 0); intro. elim s; intro. cut (x <> 0). intro; apply AlembertC3_step1; assumption. red; intro; rewrite H1 in a; elim (Rlt_irrefl _ a). apply AlembertC3_step2; assumption. cut (x <> 0). intro; apply AlembertC3_step1; assumption. red; intro; rewrite H1 in r; elim (Rlt_irrefl _ r). Defined. Lemma Alembert_C4 : forall (An:nat -> R) (k:R), 0 <= k < 1 -> (forall n:nat, 0 < An n) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) k -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros An k Hyp H H0. cut ({ l:R | is_lub (EUn (fun N:nat => sum_f_R0 An N)) l } -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }). intro X; apply X. apply completeness. assert (H1 := tech13 _ _ Hyp H0). elim H1; intros. elim H2; intros. elim H4; intros. unfold bound; exists (sum_f_R0 An x0 + / (1 - x) * An (S x0)). unfold is_upper_bound; intros; unfold EUn in H6. elim H6; intros. rewrite H7. assert (H8 := lt_eq_lt_dec x2 x0). elim H8; intros. elim a; intro. replace (sum_f_R0 An x0) with (sum_f_R0 An x2 + sum_f_R0 (fun i:nat => An (S x2 + i)%nat) (x0 - S x2)). pattern (sum_f_R0 An x2) at 1; rewrite <- Rplus_0_r. rewrite Rplus_assoc; apply Rplus_le_compat_l. left; apply Rplus_lt_0_compat. apply tech1. intros; apply H. apply Rmult_lt_0_compat. apply Rinv_0_lt_compat; apply Rplus_lt_reg_r with x; rewrite Rplus_0_r; replace (x + (1 - x)) with 1; [ elim H3; intros; assumption | ring ]. apply H. symmetry ; apply tech2; assumption. rewrite b; pattern (sum_f_R0 An x0) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l. left; apply Rmult_lt_0_compat. apply Rinv_0_lt_compat; apply Rplus_lt_reg_r with x; rewrite Rplus_0_r; replace (x + (1 - x)) with 1; [ elim H3; intros; assumption | ring ]. apply H. replace (sum_f_R0 An x2) with (sum_f_R0 An x0 + sum_f_R0 (fun i:nat => An (S x0 + i)%nat) (x2 - S x0)). apply Rplus_le_compat_l. cut (sum_f_R0 (fun i:nat => An (S x0 + i)%nat) (x2 - S x0) <= An (S x0) * sum_f_R0 (fun i:nat => x ^ i) (x2 - S x0)). intro; apply Rle_trans with (An (S x0) * sum_f_R0 (fun i:nat => x ^ i) (x2 - S x0)). assumption. rewrite <- (Rmult_comm (An (S x0))); apply Rmult_le_compat_l. left; apply H. rewrite tech3. unfold Rdiv; apply Rmult_le_reg_l with (1 - x). apply Rplus_lt_reg_r with x; rewrite Rplus_0_r. replace (x + (1 - x)) with 1; [ elim H3; intros; assumption | ring ]. do 2 rewrite (Rmult_comm (1 - x)). rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; apply Rplus_le_reg_l with (x ^ S (x2 - S x0)). replace (x ^ S (x2 - S x0) + (1 - x ^ S (x2 - S x0))) with 1; [ idtac | ring ]. rewrite <- (Rplus_comm 1); pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l. left; apply pow_lt. apply Rle_lt_trans with k. elim Hyp; intros; assumption. elim H3; intros; assumption. apply Rminus_eq_contra. red; intro. elim H3; intros. rewrite H10 in H12; elim (Rlt_irrefl _ H12). red; intro. elim H3; intros. rewrite H10 in H12; elim (Rlt_irrefl _ H12). replace (An (S x0)) with (An (S x0 + 0)%nat). apply (tech6 (fun i:nat => An (S x0 + i)%nat) x). left; apply Rle_lt_trans with k. elim Hyp; intros; assumption. elim H3; intros; assumption. intro. cut (forall n:nat, (n >= x0)%nat -> An (S n) < x * An n). intro. replace (S x0 + S i)%nat with (S (S x0 + i)). apply H9. unfold ge. apply tech8. apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; do 2 rewrite S_INR; ring. intros. apply Rmult_lt_reg_l with (/ An n). apply Rinv_0_lt_compat; apply H. do 2 rewrite (Rmult_comm (/ An n)). rewrite Rmult_assoc. rewrite <- Rinv_r_sym. rewrite Rmult_1_r. replace (An (S n) * / An n) with (Rabs (An (S n) / An n)). apply H5; assumption. rewrite Rabs_right. unfold Rdiv; reflexivity. left; unfold Rdiv; change (0 < An (S n) * / An n); apply Rmult_lt_0_compat. apply H. apply Rinv_0_lt_compat; apply H. red; intro. assert (H11 := H n). rewrite H10 in H11; elim (Rlt_irrefl _ H11). replace (S x0 + 0)%nat with (S x0); [ reflexivity | ring ]. symmetry ; apply tech2; assumption. exists (sum_f_R0 An 0); unfold EUn; exists 0%nat; reflexivity. intro X; elim X; intros. exists x; apply Un_cv_crit_lub; [ unfold Un_growing; intro; rewrite tech5; pattern (sum_f_R0 An n) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; apply H | apply p ]. Qed. Lemma Alembert_C5 : forall (An:nat -> R) (k:R), 0 <= k < 1 -> (forall n:nat, An n <> 0) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) k -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }. Proof. intros. cut ({ l:R | Un_cv (fun N:nat => sum_f_R0 An N) l } -> { l:R | Un_cv (fun N:nat => sum_f_R0 An N) l }). intro Hyp0; apply Hyp0. apply cv_cauchy_2. apply cauchy_abs. apply cv_cauchy_1. cut ({ l:R | Un_cv (fun N:nat => sum_f_R0 (fun i:nat => Rabs (An i)) N) l } -> { l:R | Un_cv (fun N:nat => sum_f_R0 (fun i:nat => Rabs (An i)) N) l }). intro Hyp; apply Hyp. apply (Alembert_C4 (fun i:nat => Rabs (An i)) k). assumption. intro; apply Rabs_pos_lt; apply H0. unfold Un_cv. unfold Un_cv in H1. unfold Rdiv. intros. elim (H1 eps H2); intros. exists x; intros. rewrite <- Rabs_Rinv. rewrite <- Rabs_mult. rewrite Rabs_Rabsolu. unfold Rdiv in H3; apply H3; assumption. apply H0. intro X. elim X; intros. exists x. assumption. intro X. elim X; intros. exists x. assumption. Qed. (** Convergence of power series in D(O,1/k) k=0 is described in Alembert_C3 *) Lemma Alembert_C6 : forall (An:nat -> R) (x k:R), 0 < k -> (forall n:nat, An n <> 0) -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) k -> Rabs x < / k -> { l:R | Pser An x l }. intros. cut { l:R | Un_cv (fun N:nat => sum_f_R0 (fun i:nat => An i * x ^ i) N) l }. intro X. elim X; intros. exists x0. apply tech12; assumption. case (total_order_T x 0); intro. elim s; intro. eapply Alembert_C5 with (k * Rabs x). split. unfold Rdiv; apply Rmult_le_pos. left; assumption. left; apply Rabs_pos_lt. red; intro; rewrite H3 in a; elim (Rlt_irrefl _ a). apply Rmult_lt_reg_l with (/ k). apply Rinv_0_lt_compat; assumption. rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite Rmult_1_r; assumption. red; intro; rewrite H3 in H; elim (Rlt_irrefl _ H). intro; apply prod_neq_R0. apply H0. apply pow_nonzero. red; intro; rewrite H3 in a; elim (Rlt_irrefl _ a). unfold Un_cv; unfold Un_cv in H1. intros. cut (0 < eps / Rabs x). intro. elim (H1 (eps / Rabs x) H4); intros. exists x0. intros. replace (An (S n) * x ^ S n / (An n * x ^ n)) with (An (S n) / An n * x). unfold R_dist. rewrite Rabs_mult. replace (Rabs (An (S n) / An n) * Rabs x - k * Rabs x) with (Rabs x * (Rabs (An (S n) / An n) - k)); [ idtac | ring ]. rewrite Rabs_mult. rewrite Rabs_Rabsolu. apply Rmult_lt_reg_l with (/ Rabs x). apply Rinv_0_lt_compat; apply Rabs_pos_lt. red; intro; rewrite H7 in a; elim (Rlt_irrefl _ a). rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite <- (Rmult_comm eps). unfold R_dist in H5. unfold Rdiv; unfold Rdiv in H5; apply H5; assumption. apply Rabs_no_R0. red; intro; rewrite H7 in a; elim (Rlt_irrefl _ a). unfold Rdiv; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite pow_add. simpl. rewrite Rmult_1_r. rewrite Rinv_mult_distr. replace (An (n + 1)%nat * (x ^ n * x) * (/ An n * / x ^ n)) with (An (n + 1)%nat * / An n * x * (x ^ n * / x ^ n)); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; reflexivity. apply pow_nonzero. red; intro; rewrite H7 in a; elim (Rlt_irrefl _ a). apply H0. apply pow_nonzero. red; intro; rewrite H7 in a; elim (Rlt_irrefl _ a). unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; apply Rabs_pos_lt. red; intro H7; rewrite H7 in a; elim (Rlt_irrefl _ a). exists (An 0%nat). unfold Un_cv. intros. exists 0%nat. intros. unfold R_dist. replace (sum_f_R0 (fun i:nat => An i * x ^ i) n) with (An 0%nat). unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. induction n as [| n Hrecn]. simpl; ring. rewrite tech5. rewrite <- Hrecn. rewrite b; simpl; ring. unfold ge; apply le_O_n. eapply Alembert_C5 with (k * Rabs x). split. unfold Rdiv; apply Rmult_le_pos. left; assumption. left; apply Rabs_pos_lt. red; intro; rewrite H3 in r; elim (Rlt_irrefl _ r). apply Rmult_lt_reg_l with (/ k). apply Rinv_0_lt_compat; assumption. rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite Rmult_1_r; assumption. red; intro; rewrite H3 in H; elim (Rlt_irrefl _ H). intro; apply prod_neq_R0. apply H0. apply pow_nonzero. red; intro; rewrite H3 in r; elim (Rlt_irrefl _ r). unfold Un_cv; unfold Un_cv in H1. intros. cut (0 < eps / Rabs x). intro. elim (H1 (eps / Rabs x) H4); intros. exists x0. intros. replace (An (S n) * x ^ S n / (An n * x ^ n)) with (An (S n) / An n * x). unfold R_dist. rewrite Rabs_mult. replace (Rabs (An (S n) / An n) * Rabs x - k * Rabs x) with (Rabs x * (Rabs (An (S n) / An n) - k)); [ idtac | ring ]. rewrite Rabs_mult. rewrite Rabs_Rabsolu. apply Rmult_lt_reg_l with (/ Rabs x). apply Rinv_0_lt_compat; apply Rabs_pos_lt. red; intro; rewrite H7 in r; elim (Rlt_irrefl _ r). rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_l. rewrite <- (Rmult_comm eps). unfold R_dist in H5. unfold Rdiv; unfold Rdiv in H5; apply H5; assumption. apply Rabs_no_R0. red; intro; rewrite H7 in r; elim (Rlt_irrefl _ r). unfold Rdiv; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite pow_add. simpl. rewrite Rmult_1_r. rewrite Rinv_mult_distr. replace (An (n + 1)%nat * (x ^ n * x) * (/ An n * / x ^ n)) with (An (n + 1)%nat * / An n * x * (x ^ n * / x ^ n)); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; reflexivity. apply pow_nonzero. red; intro; rewrite H7 in r; elim (Rlt_irrefl _ r). apply H0. apply pow_nonzero. red; intro; rewrite H7 in r; elim (Rlt_irrefl _ r). unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; apply Rabs_pos_lt. red; intro H7; rewrite H7 in r; elim (Rlt_irrefl _ r). Qed. coq-8.4pl2/theories/Reals/RiemannInt_SF.v0000640000175000001440000032664212010532755017357 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop) : Prop := exists n : nat, (forall i:nat, I i -> (i <= n)%nat). Lemma IZN_var : forall z:Z, (0 <= z)%Z -> {n : nat | z = Z.of_nat n}. Proof. intros; apply Z_of_nat_complete_inf; assumption. Qed. Lemma Nzorn : forall I:nat -> Prop, (exists n : nat, I n) -> Nbound I -> { n:nat | I n /\ (forall i:nat, I i -> (i <= n)%nat) }. Proof. intros I H H0; set (E := fun x:R => exists i : nat, I i /\ INR i = x); assert (H1 : bound E). unfold Nbound in H0; elim H0; intros N H1; unfold bound; exists (INR N); unfold is_upper_bound; intros; unfold E in H2; elim H2; intros; elim H3; intros; rewrite <- H5; apply le_INR; apply H1; assumption. assert (H2 : exists x : R, E x). elim H; intros; exists (INR x); unfold E; exists x; split; [ assumption | reflexivity ]. assert (H3 := completeness E H1 H2); elim H3; intros; unfold is_lub in p; elim p; clear p; intros; unfold is_upper_bound in H4, H5; assert (H6 : 0 <= x). elim H2; intros; unfold E in H6; elim H6; intros; elim H7; intros; apply Rle_trans with x0; [ rewrite <- H9; change (INR 0 <= INR x1); apply le_INR; apply le_O_n | apply H4; assumption ]. assert (H7 := archimed x); elim H7; clear H7; intros; assert (H9 : x <= IZR (up x) - 1). apply H5; intros; assert (H10 := H4 _ H9); unfold E in H9; elim H9; intros; elim H11; intros; rewrite <- H13; apply Rplus_le_reg_l with 1; replace (1 + (IZR (up x) - 1)) with (IZR (up x)); [ idtac | ring ]; replace (1 + INR x1) with (INR (S x1)); [ idtac | rewrite S_INR; ring ]. assert (H14 : (0 <= up x)%Z). apply le_IZR; apply Rle_trans with x; [ apply H6 | left; assumption ]. assert (H15 := IZN _ H14); elim H15; clear H15; intros; rewrite H15; rewrite <- INR_IZR_INZ; apply le_INR; apply lt_le_S; apply INR_lt; rewrite H13; apply Rle_lt_trans with x; [ assumption | rewrite INR_IZR_INZ; rewrite <- H15; assumption ]. assert (H10 : x = IZR (up x) - 1). apply Rle_antisym; [ assumption | apply Rplus_le_reg_l with (- x + 1); replace (- x + 1 + (IZR (up x) - 1)) with (IZR (up x) - x); [ idtac | ring ]; replace (- x + 1 + x) with 1; [ assumption | ring ] ]. assert (H11 : (0 <= up x)%Z). apply le_IZR; apply Rle_trans with x; [ apply H6 | left; assumption ]. assert (H12 := IZN_var H11); elim H12; clear H12; intros; assert (H13 : E x). elim (classic (E x)); intro; try assumption. cut (forall y:R, E y -> y <= x - 1). intro; assert (H14 := H5 _ H13); cut (x - 1 < x). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H14 H15)). apply Rminus_lt; replace (x - 1 - x) with (-1); [ idtac | ring ]; rewrite <- Ropp_0; apply Ropp_lt_gt_contravar; apply Rlt_0_1. intros; assert (H14 := H4 _ H13); elim H14; intro; unfold E in H13; elim H13; intros; elim H16; intros; apply Rplus_le_reg_l with 1. replace (1 + (x - 1)) with x; [ idtac | ring ]; rewrite <- H18; replace (1 + INR x1) with (INR (S x1)); [ idtac | rewrite S_INR; ring ]. cut (x = INR (pred x0)). intro; rewrite H19; apply le_INR; apply lt_le_S; apply INR_lt; rewrite H18; rewrite <- H19; assumption. rewrite H10; rewrite p; rewrite <- INR_IZR_INZ; replace 1 with (INR 1); [ idtac | reflexivity ]; rewrite <- minus_INR. replace (x0 - 1)%nat with (pred x0); [ reflexivity | case x0; [ reflexivity | intro; simpl; apply minus_n_O ] ]. induction x0 as [| x0 Hrecx0]; [ rewrite p in H7; rewrite <- INR_IZR_INZ in H7; simpl in H7; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H6 H7)) | apply le_n_S; apply le_O_n ]. rewrite H15 in H13; elim H12; assumption. split with (pred x0); unfold E in H13; elim H13; intros; elim H12; intros; rewrite H10 in H15; rewrite p in H15; rewrite <- INR_IZR_INZ in H15; assert (H16 : INR x0 = INR x1 + 1). rewrite H15; ring. rewrite <- S_INR in H16; assert (H17 := INR_eq _ _ H16); rewrite H17; simpl; split. assumption. intros; apply INR_le; rewrite H15; rewrite <- H15; elim H12; intros; rewrite H20; apply H4; unfold E; exists i; split; [ assumption | reflexivity ]. Qed. (*******************************************) (** * Step functions *) (*******************************************) Definition open_interval (a b x:R) : Prop := a < x < b. Definition co_interval (a b x:R) : Prop := a <= x < b. Definition adapted_couple (f:R -> R) (a b:R) (l lf:Rlist) : Prop := ordered_Rlist l /\ pos_Rl l 0 = Rmin a b /\ pos_Rl l (pred (Rlength l)) = Rmax a b /\ Rlength l = S (Rlength lf) /\ (forall i:nat, (i < pred (Rlength l))%nat -> constant_D_eq f (open_interval (pos_Rl l i) (pos_Rl l (S i))) (pos_Rl lf i)). Definition adapted_couple_opt (f:R -> R) (a b:R) (l lf:Rlist) := adapted_couple f a b l lf /\ (forall i:nat, (i < pred (Rlength lf))%nat -> pos_Rl lf i <> pos_Rl lf (S i) \/ f (pos_Rl l (S i)) <> pos_Rl lf i) /\ (forall i:nat, (i < pred (Rlength l))%nat -> pos_Rl l i <> pos_Rl l (S i)). Definition is_subdivision (f:R -> R) (a b:R) (l:Rlist) : Type := { l0:Rlist & adapted_couple f a b l l0 }. Definition IsStepFun (f:R -> R) (a b:R) : Type := { l:Rlist & is_subdivision f a b l }. (** ** Class of step functions *) Record StepFun (a b:R) : Type := mkStepFun {fe :> R -> R; pre : IsStepFun fe a b}. Definition subdivision (a b:R) (f:StepFun a b) : Rlist := projT1 (pre f). Definition subdivision_val (a b:R) (f:StepFun a b) : Rlist := match projT2 (pre f) with | existT a b => a end. Fixpoint Int_SF (l k:Rlist) : R := match l with | nil => 0 | cons a l' => match k with | nil => 0 | cons x nil => 0 | cons x (cons y k') => a * (y - x) + Int_SF l' (cons y k') end end. (** ** Integral of step functions *) Definition RiemannInt_SF (a b:R) (f:StepFun a b) : R := match Rle_dec a b with | left _ => Int_SF (subdivision_val f) (subdivision f) | right _ => - Int_SF (subdivision_val f) (subdivision f) end. (************************************) (** ** Properties of step functions *) (************************************) Lemma StepFun_P1 : forall (a b:R) (f:StepFun a b), adapted_couple f a b (subdivision f) (subdivision_val f). Proof. intros a b f; unfold subdivision_val; case (projT2 (pre f)); intros; apply a0. Qed. Lemma StepFun_P2 : forall (a b:R) (f:R -> R) (l lf:Rlist), adapted_couple f a b l lf -> adapted_couple f b a l lf. Proof. unfold adapted_couple; intros; decompose [and] H; clear H; repeat split; try assumption. rewrite H2; unfold Rmin; case (Rle_dec a b); intro; case (Rle_dec b a); intro; try reflexivity. apply Rle_antisym; assumption. apply Rle_antisym; auto with real. rewrite H1; unfold Rmax; case (Rle_dec a b); intro; case (Rle_dec b a); intro; try reflexivity. apply Rle_antisym; assumption. apply Rle_antisym; auto with real. Qed. Lemma StepFun_P3 : forall a b c:R, a <= b -> adapted_couple (fct_cte c) a b (cons a (cons b nil)) (cons c nil). Proof. intros; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H0; inversion H0; [ simpl; assumption | elim (le_Sn_O _ H2) ]. simpl; unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. simpl; unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. unfold constant_D_eq, open_interval; intros; simpl in H0; inversion H0; [ reflexivity | elim (le_Sn_O _ H3) ]. Qed. Lemma StepFun_P4 : forall a b c:R, IsStepFun (fct_cte c) a b. Proof. intros; unfold IsStepFun; case (Rle_dec a b); intro. apply existT with (cons a (cons b nil)); unfold is_subdivision; apply existT with (cons c nil); apply (StepFun_P3 c r). apply existT with (cons b (cons a nil)); unfold is_subdivision; apply existT with (cons c nil); apply StepFun_P2; apply StepFun_P3; auto with real. Qed. Lemma StepFun_P5 : forall (a b:R) (f:R -> R) (l:Rlist), is_subdivision f a b l -> is_subdivision f b a l. Proof. destruct 1 as (x,(H0,(H1,(H2,(H3,H4))))); exists x; repeat split; try assumption. rewrite H1; apply Rmin_comm. rewrite H2; apply Rmax_comm. Qed. Lemma StepFun_P6 : forall (f:R -> R) (a b:R), IsStepFun f a b -> IsStepFun f b a. Proof. unfold IsStepFun; intros; elim X; intros; apply existT with x; apply StepFun_P5; assumption. Qed. Lemma StepFun_P7 : forall (a b r1 r2 r3:R) (f:R -> R) (l lf:Rlist), a <= b -> adapted_couple f a b (cons r1 (cons r2 l)) (cons r3 lf) -> adapted_couple f r2 b (cons r2 l) lf. Proof. unfold adapted_couple; intros; decompose [and] H0; clear H0; assert (H5 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (H7 : r2 <= b). rewrite H5 in H2; rewrite <- H2; apply RList_P7; [ assumption | simpl; right; left; reflexivity ]. repeat split. apply RList_P4 with r1; assumption. rewrite H5 in H2; unfold Rmin; case (Rle_dec r2 b); intro; [ reflexivity | elim n; assumption ]. unfold Rmax; case (Rle_dec r2 b); intro; [ rewrite H5 in H2; rewrite <- H2; reflexivity | elim n; assumption ]. simpl in H4; simpl; apply INR_eq; apply Rplus_eq_reg_l with 1; do 2 rewrite (Rplus_comm 1); do 2 rewrite <- S_INR; rewrite H4; reflexivity. intros; unfold constant_D_eq, open_interval; intros; unfold constant_D_eq, open_interval in H6; assert (H9 : (S i < pred (Rlength (cons r1 (cons r2 l))))%nat). simpl; simpl in H0; apply lt_n_S; assumption. assert (H10 := H6 _ H9); apply H10; assumption. Qed. Lemma StepFun_P8 : forall (f:R -> R) (l1 lf1:Rlist) (a b:R), adapted_couple f a b l1 lf1 -> a = b -> Int_SF lf1 l1 = 0. Proof. simple induction l1. intros; induction lf1 as [| r lf1 Hreclf1]; reflexivity. simple induction r0. intros; induction lf1 as [| r1 lf1 Hreclf1]. reflexivity. unfold adapted_couple in H0; decompose [and] H0; clear H0; simpl in H5; discriminate. intros; induction lf1 as [| r3 lf1 Hreclf1]. reflexivity. simpl; cut (r = r1). intro; rewrite H3; rewrite (H0 lf1 r b). ring. rewrite H3; apply StepFun_P7 with a r r3; [ right; assumption | assumption ]. clear H H0 Hreclf1 r0; unfold adapted_couple in H1; decompose [and] H1; intros; simpl in H4; rewrite H4; unfold Rmin; case (Rle_dec a b); intro; [ assumption | reflexivity ]. unfold adapted_couple in H1; decompose [and] H1; intros; apply Rle_antisym. apply (H3 0%nat); simpl; apply lt_O_Sn. simpl in H5; rewrite H2 in H5; rewrite H5; replace (Rmin b b) with (Rmax a b); [ rewrite <- H4; apply RList_P7; [ assumption | simpl; right; left; reflexivity ] | unfold Rmin, Rmax; case (Rle_dec b b); case (Rle_dec a b); intros; try assumption || reflexivity ]. Qed. Lemma StepFun_P9 : forall (a b:R) (f:R -> R) (l lf:Rlist), adapted_couple f a b l lf -> a <> b -> (2 <= Rlength l)%nat. Proof. intros; unfold adapted_couple in H; decompose [and] H; clear H; induction l as [| r l Hrecl]; [ simpl in H4; discriminate | induction l as [| r0 l Hrecl0]; [ simpl in H3; simpl in H2; generalize H3; generalize H2; unfold Rmin, Rmax; case (Rle_dec a b); intros; elim H0; rewrite <- H5; rewrite <- H7; reflexivity | simpl; do 2 apply le_n_S; apply le_O_n ] ]. Qed. Lemma StepFun_P10 : forall (f:R -> R) (l lf:Rlist) (a b:R), a <= b -> adapted_couple f a b l lf -> exists l' : Rlist, (exists lf' : Rlist, adapted_couple_opt f a b l' lf'). Proof. simple induction l. intros; unfold adapted_couple in H0; decompose [and] H0; simpl in H4; discriminate. intros; case (Req_dec a b); intro. exists (cons a nil); exists nil; unfold adapted_couple_opt; unfold adapted_couple; unfold ordered_Rlist; repeat split; try (intros; simpl in H3; elim (lt_n_O _ H3)). simpl; rewrite <- H2; unfold Rmin; case (Rle_dec a a); intro; reflexivity. simpl; rewrite <- H2; unfold Rmax; case (Rle_dec a a); intro; reflexivity. elim (RList_P20 _ (StepFun_P9 H1 H2)); intros t1 [t2 [t3 H3]]; induction lf as [| r1 lf Hreclf]. unfold adapted_couple in H1; decompose [and] H1; rewrite H3 in H7; simpl in H7; discriminate. clear Hreclf; assert (H4 : adapted_couple f t2 b r0 lf). rewrite H3 in H1; assert (H4 := RList_P21 _ _ H3); simpl in H4; rewrite H4; eapply StepFun_P7; [ apply H0 | apply H1 ]. cut (t2 <= b). intro; assert (H6 := H _ _ _ H5 H4); case (Req_dec t1 t2); intro Hyp_eq. replace a with t2. apply H6. rewrite <- Hyp_eq; rewrite H3 in H1; unfold adapted_couple in H1; decompose [and] H1; clear H1; simpl in H9; rewrite H9; unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. elim H6; clear H6; intros l' [lf' H6]; case (Req_dec t2 b); intro. exists (cons a (cons b nil)); exists (cons r1 nil); unfold adapted_couple_opt; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H8; inversion H8; [ simpl; assumption | elim (le_Sn_O _ H10) ]. simpl; unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. simpl; unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. intros; simpl in H8; inversion H8. unfold constant_D_eq, open_interval; intros; simpl; simpl in H9; rewrite H3 in H1; unfold adapted_couple in H1; decompose [and] H1; apply (H16 0%nat). simpl; apply lt_O_Sn. unfold open_interval; simpl; rewrite H7; simpl in H13; rewrite H13; unfold Rmin; case (Rle_dec a b); intro; [ assumption | elim n; assumption ]. elim (le_Sn_O _ H10). intros; simpl in H8; elim (lt_n_O _ H8). intros; simpl in H8; inversion H8; [ simpl; assumption | elim (le_Sn_O _ H10) ]. assert (Hyp_min : Rmin t2 b = t2). unfold Rmin; case (Rle_dec t2 b); intro; [ reflexivity | elim n; assumption ]. unfold adapted_couple in H6; elim H6; clear H6; intros; elim (RList_P20 _ (StepFun_P9 H6 H7)); intros s1 [s2 [s3 H9]]; induction lf' as [| r2 lf' Hreclf']. unfold adapted_couple in H6; decompose [and] H6; rewrite H9 in H13; simpl in H13; discriminate. clear Hreclf'; case (Req_dec r1 r2); intro. case (Req_dec (f t2) r1); intro. exists (cons t1 (cons s2 s3)); exists (cons r1 lf'); rewrite H3 in H1; rewrite H9 in H6; unfold adapted_couple in H6, H1; decompose [and] H1; decompose [and] H6; clear H1 H6; unfold adapted_couple_opt; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H1; induction i as [| i Hreci]. simpl; apply Rle_trans with s1. replace s1 with t2. apply (H12 0%nat). simpl; apply lt_O_Sn. simpl in H19; rewrite H19; symmetry ; apply Hyp_min. apply (H16 0%nat); simpl; apply lt_O_Sn. change (pos_Rl (cons s2 s3) i <= pos_Rl (cons s2 s3) (S i)); apply (H16 (S i)); simpl; assumption. simpl; simpl in H14; rewrite H14; reflexivity. simpl; simpl in H18; rewrite H18; unfold Rmax; case (Rle_dec a b); case (Rle_dec t2 b); intros; reflexivity || elim n; assumption. simpl; simpl in H20; apply H20. intros; simpl in H1; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; simpl in H6; case (total_order_T x t2); intro. elim s; intro. apply (H17 0%nat); [ simpl; apply lt_O_Sn | unfold open_interval; simpl; elim H6; intros; split; assumption ]. rewrite b0; assumption. rewrite H10; apply (H22 0%nat); [ simpl; apply lt_O_Sn | unfold open_interval; simpl; replace s1 with t2; [ elim H6; intros; split; assumption | simpl in H19; rewrite H19; rewrite Hyp_min; reflexivity ] ]. simpl; simpl in H6; apply (H22 (S i)); [ simpl; assumption | unfold open_interval; simpl; apply H6 ]. intros; simpl in H1; rewrite H10; change (pos_Rl (cons r2 lf') i <> pos_Rl (cons r2 lf') (S i) \/ f (pos_Rl (cons s1 (cons s2 s3)) (S i)) <> pos_Rl (cons r2 lf') i) ; rewrite <- H9; elim H8; intros; apply H6; simpl; apply H1. intros; induction i as [| i Hreci]. simpl; red; intro; elim Hyp_eq; apply Rle_antisym. apply (H12 0%nat); simpl; apply lt_O_Sn. rewrite <- Hyp_min; rewrite H6; simpl in H19; rewrite <- H19; apply (H16 0%nat); simpl; apply lt_O_Sn. elim H8; intros; rewrite H9 in H21; apply (H21 (S i)); simpl; simpl in H1; apply H1. exists (cons t1 l'); exists (cons r1 (cons r2 lf')); rewrite H9 in H6; rewrite H3 in H1; unfold adapted_couple in H1, H6; decompose [and] H6; decompose [and] H1; clear H6 H1; unfold adapted_couple_opt; unfold adapted_couple; repeat split. rewrite H9; unfold ordered_Rlist; intros; simpl in H1; induction i as [| i Hreci]. simpl; replace s1 with t2. apply (H16 0%nat); simpl; apply lt_O_Sn. simpl in H14; rewrite H14; rewrite Hyp_min; reflexivity. change (pos_Rl (cons s1 (cons s2 s3)) i <= pos_Rl (cons s1 (cons s2 s3)) (S i)) ; apply (H12 i); simpl; apply lt_S_n; assumption. simpl; simpl in H19; apply H19. rewrite H9; simpl; simpl in H13; rewrite H13; unfold Rmax; case (Rle_dec t2 b); case (Rle_dec a b); intros; reflexivity || elim n; assumption. rewrite H9; simpl; simpl in H15; rewrite H15; reflexivity. intros; simpl in H1; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; rewrite H9 in H6; simpl in H6; apply (H22 0%nat). simpl; apply lt_O_Sn. unfold open_interval; simpl. replace t2 with s1. assumption. simpl in H14; rewrite H14; rewrite Hyp_min; reflexivity. change (f x = pos_Rl (cons r2 lf') i); clear Hreci; apply (H17 i). simpl; rewrite H9 in H1; simpl in H1; apply lt_S_n; apply H1. rewrite H9 in H6; unfold open_interval; apply H6. intros; simpl in H1; induction i as [| i Hreci]. simpl; rewrite H9; right; simpl; replace s1 with t2. assumption. simpl in H14; rewrite H14; rewrite Hyp_min; reflexivity. elim H8; intros; apply (H6 i). simpl; apply lt_S_n; apply H1. intros; rewrite H9; induction i as [| i Hreci]. simpl; red; intro; elim Hyp_eq; apply Rle_antisym. apply (H16 0%nat); simpl; apply lt_O_Sn. rewrite <- Hyp_min; rewrite H6; simpl in H14; rewrite <- H14; right; reflexivity. elim H8; intros; rewrite <- H9; apply (H21 i); rewrite H9; rewrite H9 in H1; simpl; simpl in H1; apply lt_S_n; apply H1. exists (cons t1 l'); exists (cons r1 (cons r2 lf')); rewrite H9 in H6; rewrite H3 in H1; unfold adapted_couple in H1, H6; decompose [and] H6; decompose [and] H1; clear H6 H1; unfold adapted_couple_opt; unfold adapted_couple; repeat split. rewrite H9; unfold ordered_Rlist; intros; simpl in H1; induction i as [| i Hreci]. simpl; replace s1 with t2. apply (H15 0%nat); simpl; apply lt_O_Sn. simpl in H13; rewrite H13; rewrite Hyp_min; reflexivity. change (pos_Rl (cons s1 (cons s2 s3)) i <= pos_Rl (cons s1 (cons s2 s3)) (S i)) ; apply (H11 i); simpl; apply lt_S_n; assumption. simpl; simpl in H18; apply H18. rewrite H9; simpl; simpl in H12; rewrite H12; unfold Rmax; case (Rle_dec t2 b); case (Rle_dec a b); intros; reflexivity || elim n; assumption. rewrite H9; simpl; simpl in H14; rewrite H14; reflexivity. intros; simpl in H1; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; rewrite H9 in H6; simpl in H6; apply (H21 0%nat). simpl; apply lt_O_Sn. unfold open_interval; simpl; replace t2 with s1. assumption. simpl in H13; rewrite H13; rewrite Hyp_min; reflexivity. change (f x = pos_Rl (cons r2 lf') i); clear Hreci; apply (H16 i). simpl; rewrite H9 in H1; simpl in H1; apply lt_S_n; apply H1. rewrite H9 in H6; unfold open_interval; apply H6. intros; simpl in H1; induction i as [| i Hreci]. simpl; left; assumption. elim H8; intros; apply (H6 i). simpl; apply lt_S_n; apply H1. intros; rewrite H9; induction i as [| i Hreci]. simpl; red; intro; elim Hyp_eq; apply Rle_antisym. apply (H15 0%nat); simpl; apply lt_O_Sn. rewrite <- Hyp_min; rewrite H6; simpl in H13; rewrite <- H13; right; reflexivity. elim H8; intros; rewrite <- H9; apply (H20 i); rewrite H9; rewrite H9 in H1; simpl; simpl in H1; apply lt_S_n; apply H1. rewrite H3 in H1; clear H4; unfold adapted_couple in H1; decompose [and] H1; clear H1; clear H H7 H9; cut (Rmax a b = b); [ intro; rewrite H in H5; rewrite <- H5; apply RList_P7; [ assumption | simpl; right; left; reflexivity ] | unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ] ]. Qed. Lemma StepFun_P11 : forall (a b r r1 r3 s1 s2 r4:R) (r2 lf1 s3 lf2:Rlist) (f:R -> R), a < b -> adapted_couple f a b (cons r (cons r1 r2)) (cons r3 lf1) -> adapted_couple_opt f a b (cons s1 (cons s2 s3)) (cons r4 lf2) -> r1 <= s2. Proof. intros; unfold adapted_couple_opt in H1; elim H1; clear H1; intros; unfold adapted_couple in H0, H1; decompose [and] H0; decompose [and] H1; clear H0 H1; assert (H12 : r = s1). simpl in H10; simpl in H5; rewrite H10; rewrite H5; reflexivity. assert (H14 := H3 0%nat (lt_O_Sn _)); simpl in H14; elim H14; intro. assert (H15 := H7 0%nat (lt_O_Sn _)); simpl in H15; elim H15; intro. rewrite <- H12 in H1; case (Rle_dec r1 s2); intro; try assumption. assert (H16 : s2 < r1); auto with real. induction s3 as [| r0 s3 Hrecs3]. simpl in H9; rewrite H9 in H16; cut (r1 <= Rmax a b). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H17 H16)). rewrite <- H4; apply RList_P7; [ assumption | simpl; right; left; reflexivity ]. clear Hrecs3; induction lf2 as [| r5 lf2 Hreclf2]. simpl in H11; discriminate. clear Hreclf2; assert (H17 : r3 = r4). set (x := (r + s2) / 2); assert (H17 := H8 0%nat (lt_O_Sn _)); assert (H18 := H13 0%nat (lt_O_Sn _)); unfold constant_D_eq, open_interval in H17, H18; simpl in H17; simpl in H18; rewrite <- (H17 x). rewrite <- (H18 x). reflexivity. rewrite <- H12; unfold x; split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite (Rplus_comm r); rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. unfold x; split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rlt_trans with s2; [ apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite (Rplus_comm r); rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ] | assumption ]. assert (H18 : f s2 = r3). apply (H8 0%nat); [ simpl; apply lt_O_Sn | unfold open_interval; simpl; split; assumption ]. assert (H19 : r3 = r5). assert (H19 := H7 1%nat); simpl in H19; assert (H20 := H19 (lt_n_S _ _ (lt_O_Sn _))); elim H20; intro. set (x := (s2 + Rmin r1 r0) / 2); assert (H22 := H8 0%nat); assert (H23 := H13 1%nat); simpl in H22; simpl in H23; rewrite <- (H22 (lt_O_Sn _) x). rewrite <- (H23 (lt_n_S _ _ (lt_O_Sn _)) x). reflexivity. unfold open_interval; simpl; unfold x; split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; unfold Rmin; case (Rle_dec r1 r0); intro; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rlt_le_trans with (r0 + Rmin r1 r0); [ do 2 rewrite <- (Rplus_comm (Rmin r1 r0)); apply Rplus_lt_compat_l; assumption | apply Rplus_le_compat_l; apply Rmin_r ] | discrR ] ]. unfold open_interval; simpl; unfold x; split. apply Rlt_trans with s2; [ assumption | apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; unfold Rmin; case (Rle_dec r1 r0); intro; assumption | discrR ] ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rlt_le_trans with (r1 + Rmin r1 r0); [ do 2 rewrite <- (Rplus_comm (Rmin r1 r0)); apply Rplus_lt_compat_l; assumption | apply Rplus_le_compat_l; apply Rmin_l ] | discrR ] ]. elim H2; clear H2; intros; assert (H23 := H22 1%nat); simpl in H23; assert (H24 := H23 (lt_n_S _ _ (lt_O_Sn _))); elim H24; assumption. elim H2; intros; assert (H22 := H20 0%nat); simpl in H22; assert (H23 := H22 (lt_O_Sn _)); elim H23; intro; [ elim H24; rewrite <- H17; rewrite <- H19; reflexivity | elim H24; rewrite <- H17; assumption ]. elim H2; clear H2; intros; assert (H17 := H16 0%nat); simpl in H17; elim (H17 (lt_O_Sn _)); assumption. rewrite <- H0; rewrite H12; apply (H7 0%nat); simpl; apply lt_O_Sn. Qed. Lemma StepFun_P12 : forall (a b:R) (f:R -> R) (l lf:Rlist), adapted_couple_opt f a b l lf -> adapted_couple_opt f b a l lf. Proof. unfold adapted_couple_opt; unfold adapted_couple; intros; decompose [and] H; clear H; repeat split; try assumption. rewrite H0; unfold Rmin; case (Rle_dec a b); intro; case (Rle_dec b a); intro; try reflexivity. apply Rle_antisym; assumption. apply Rle_antisym; auto with real. rewrite H3; unfold Rmax; case (Rle_dec a b); intro; case (Rle_dec b a); intro; try reflexivity. apply Rle_antisym; assumption. apply Rle_antisym; auto with real. Qed. Lemma StepFun_P13 : forall (a b r r1 r3 s1 s2 r4:R) (r2 lf1 s3 lf2:Rlist) (f:R -> R), a <> b -> adapted_couple f a b (cons r (cons r1 r2)) (cons r3 lf1) -> adapted_couple_opt f a b (cons s1 (cons s2 s3)) (cons r4 lf2) -> r1 <= s2. Proof. intros; case (total_order_T a b); intro. elim s; intro. eapply StepFun_P11; [ apply a0 | apply H0 | apply H1 ]. elim H; assumption. eapply StepFun_P11; [ apply r0 | apply StepFun_P2; apply H0 | apply StepFun_P12; apply H1 ]. Qed. Lemma StepFun_P14 : forall (f:R -> R) (l1 l2 lf1 lf2:Rlist) (a b:R), a <= b -> adapted_couple f a b l1 lf1 -> adapted_couple_opt f a b l2 lf2 -> Int_SF lf1 l1 = Int_SF lf2 l2. Proof. simple induction l1. intros l2 lf1 lf2 a b Hyp H H0; unfold adapted_couple in H; decompose [and] H; clear H H0 H2 H3 H1 H6; simpl in H4; discriminate. simple induction r0. intros; case (Req_dec a b); intro. unfold adapted_couple_opt in H2; elim H2; intros; rewrite (StepFun_P8 H4 H3); rewrite (StepFun_P8 H1 H3); reflexivity. assert (H4 := StepFun_P9 H1 H3); simpl in H4; elim (le_Sn_O _ (le_S_n _ _ H4)). intros; clear H; unfold adapted_couple_opt in H3; elim H3; clear H3; intros; case (Req_dec a b); intro. rewrite (StepFun_P8 H2 H4); rewrite (StepFun_P8 H H4); reflexivity. assert (Hyp_min : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (Hyp_max : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. elim (RList_P20 _ (StepFun_P9 H H4)); intros s1 [s2 [s3 H5]]; rewrite H5 in H; rewrite H5; induction lf1 as [| r3 lf1 Hreclf1]. unfold adapted_couple in H2; decompose [and] H2; clear H H2 H4 H5 H3 H6 H8 H7 H11; simpl in H9; discriminate. clear Hreclf1; induction lf2 as [| r4 lf2 Hreclf2]. unfold adapted_couple in H; decompose [and] H; clear H H2 H4 H5 H3 H6 H8 H7 H11; simpl in H9; discriminate. clear Hreclf2; assert (H6 : r = s1). unfold adapted_couple in H, H2; decompose [and] H; decompose [and] H2; clear H H2; simpl in H13; simpl in H8; rewrite H13; rewrite H8; reflexivity. assert (H7 : r3 = r4 \/ r = r1). case (Req_dec r r1); intro. right; assumption. left; cut (r1 <= s2). intro; unfold adapted_couple in H2, H; decompose [and] H; decompose [and] H2; clear H H2; set (x := (r + r1) / 2); assert (H18 := H14 0%nat); assert (H20 := H19 0%nat); unfold constant_D_eq, open_interval in H18, H20; simpl in H18; simpl in H20; rewrite <- (H18 (lt_O_Sn _) x). rewrite <- (H20 (lt_O_Sn _) x). reflexivity. assert (H21 := H13 0%nat (lt_O_Sn _)); simpl in H21; elim H21; intro; [ idtac | elim H7; assumption ]; unfold x; split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; apply H | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite <- (Rplus_comm r1); rewrite double; apply Rplus_lt_compat_l; apply H | discrR ] ]. rewrite <- H6; assert (H21 := H13 0%nat (lt_O_Sn _)); simpl in H21; elim H21; intro; [ idtac | elim H7; assumption ]; unfold x; split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; apply H | discrR ] ]. apply Rlt_le_trans with r1; [ apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite <- (Rplus_comm r1); rewrite double; apply Rplus_lt_compat_l; apply H | discrR ] ] | assumption ]. eapply StepFun_P13. apply H4. apply H2. unfold adapted_couple_opt; split. apply H. rewrite H5 in H3; apply H3. assert (H8 : r1 <= s2). eapply StepFun_P13. apply H4. apply H2. unfold adapted_couple_opt; split. apply H. rewrite H5 in H3; apply H3. elim H7; intro. simpl; elim H8; intro. replace (r4 * (s2 - s1)) with (r3 * (r1 - r) + r3 * (s2 - r1)); [ idtac | rewrite H9; rewrite H6; ring ]. rewrite Rplus_assoc; apply Rplus_eq_compat_l; change (Int_SF lf1 (cons r1 r2) = Int_SF (cons r3 lf2) (cons r1 (cons s2 s3))) ; apply H0 with r1 b. unfold adapted_couple in H2; decompose [and] H2; clear H2; replace b with (Rmax a b). rewrite <- H12; apply RList_P7; [ assumption | simpl; right; left; reflexivity ]. eapply StepFun_P7. apply H1. apply H2. unfold adapted_couple_opt; split. apply StepFun_P7 with a a r3. apply H1. unfold adapted_couple in H2, H; decompose [and] H2; decompose [and] H; clear H H2; assert (H20 : r = a). simpl in H13; rewrite H13; apply Hyp_min. unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H; induction i as [| i Hreci]. simpl; rewrite <- H20; apply (H11 0%nat). simpl; apply lt_O_Sn. induction i as [| i Hreci0]. simpl; assumption. change (pos_Rl (cons s2 s3) i <= pos_Rl (cons s2 s3) (S i)); apply (H15 (S i)); simpl; apply lt_S_n; assumption. simpl; symmetry ; apply Hyp_min. rewrite <- H17; reflexivity. simpl in H19; simpl; rewrite H19; reflexivity. intros; simpl in H; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; apply (H16 0%nat). simpl; apply lt_O_Sn. simpl in H2; rewrite <- H20 in H2; unfold open_interval; simpl; apply H2. clear Hreci; induction i as [| i Hreci]. simpl; simpl in H2; rewrite H9; apply (H21 0%nat). simpl; apply lt_O_Sn. unfold open_interval; simpl; elim H2; intros; split. apply Rle_lt_trans with r1; try assumption; rewrite <- H6; apply (H11 0%nat); simpl; apply lt_O_Sn. assumption. clear Hreci; simpl; apply (H21 (S i)). simpl; apply lt_S_n; assumption. unfold open_interval; apply H2. elim H3; clear H3; intros; split. rewrite H9; change (forall i:nat, (i < pred (Rlength (cons r4 lf2)))%nat -> pos_Rl (cons r4 lf2) i <> pos_Rl (cons r4 lf2) (S i) \/ f (pos_Rl (cons s1 (cons s2 s3)) (S i)) <> pos_Rl (cons r4 lf2) i) ; rewrite <- H5; apply H3. rewrite H5 in H11; intros; simpl in H12; induction i as [| i Hreci]. simpl; red; intro; rewrite H13 in H10; elim (Rlt_irrefl _ H10). clear Hreci; apply (H11 (S i)); simpl; apply H12. rewrite H9; rewrite H10; rewrite H6; apply Rplus_eq_compat_l; rewrite <- H10; apply H0 with r1 b. unfold adapted_couple in H2; decompose [and] H2; clear H2; replace b with (Rmax a b). rewrite <- H12; apply RList_P7; [ assumption | simpl; right; left; reflexivity ]. eapply StepFun_P7. apply H1. apply H2. unfold adapted_couple_opt; split. apply StepFun_P7 with a a r3. apply H1. unfold adapted_couple in H2, H; decompose [and] H2; decompose [and] H; clear H H2; assert (H20 : r = a). simpl in H13; rewrite H13; apply Hyp_min. unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H; induction i as [| i Hreci]. simpl; rewrite <- H20; apply (H11 0%nat); simpl; apply lt_O_Sn. rewrite H10; apply (H15 (S i)); simpl; assumption. simpl; symmetry ; apply Hyp_min. rewrite <- H17; rewrite H10; reflexivity. simpl in H19; simpl; apply H19. intros; simpl in H; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; apply (H16 0%nat). simpl; apply lt_O_Sn. simpl in H2; rewrite <- H20 in H2; unfold open_interval; simpl; apply H2. clear Hreci; simpl; apply (H21 (S i)). simpl; assumption. rewrite <- H10; unfold open_interval; apply H2. elim H3; clear H3; intros; split. rewrite H5 in H3; intros; apply (H3 (S i)). simpl; replace (Rlength lf2) with (S (pred (Rlength lf2))). apply lt_n_S; apply H12. symmetry ; apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H13 in H12; elim (lt_n_O _ H12). intros; simpl in H12; rewrite H10; rewrite H5 in H11; apply (H11 (S i)); simpl; apply lt_n_S; apply H12. simpl; rewrite H9; unfold Rminus; rewrite Rplus_opp_r; rewrite Rmult_0_r; rewrite Rplus_0_l; change (Int_SF lf1 (cons r1 r2) = Int_SF (cons r4 lf2) (cons s1 (cons s2 s3))) ; eapply H0. apply H1. 2: rewrite H5 in H3; unfold adapted_couple_opt; split; assumption. assert (H10 : r = a). unfold adapted_couple in H2; decompose [and] H2; clear H2; simpl in H12; rewrite H12; apply Hyp_min. rewrite <- H9; rewrite H10; apply StepFun_P7 with a r r3; [ apply H1 | pattern a at 2; rewrite <- H10; pattern r at 2; rewrite H9; apply H2 ]. Qed. Lemma StepFun_P15 : forall (f:R -> R) (l1 l2 lf1 lf2:Rlist) (a b:R), adapted_couple f a b l1 lf1 -> adapted_couple_opt f a b l2 lf2 -> Int_SF lf1 l1 = Int_SF lf2 l2. Proof. intros; case (Rle_dec a b); intro; [ apply (StepFun_P14 r H H0) | assert (H1 : b <= a); [ auto with real | eapply StepFun_P14; [ apply H1 | apply StepFun_P2; apply H | apply StepFun_P12; apply H0 ] ] ]. Qed. Lemma StepFun_P16 : forall (f:R -> R) (l lf:Rlist) (a b:R), adapted_couple f a b l lf -> exists l' : Rlist, (exists lf' : Rlist, adapted_couple_opt f a b l' lf'). Proof. intros; case (Rle_dec a b); intro; [ apply (StepFun_P10 r H) | assert (H1 : b <= a); [ auto with real | assert (H2 := StepFun_P10 H1 (StepFun_P2 H)); elim H2; intros l' [lf' H3]; exists l'; exists lf'; apply StepFun_P12; assumption ] ]. Qed. Lemma StepFun_P17 : forall (f:R -> R) (l1 l2 lf1 lf2:Rlist) (a b:R), adapted_couple f a b l1 lf1 -> adapted_couple f a b l2 lf2 -> Int_SF lf1 l1 = Int_SF lf2 l2. Proof. intros; elim (StepFun_P16 H); intros l' [lf' H1]; rewrite (StepFun_P15 H H1); rewrite (StepFun_P15 H0 H1); reflexivity. Qed. Lemma StepFun_P18 : forall a b c:R, RiemannInt_SF (mkStepFun (StepFun_P4 a b c)) = c * (b - a). Proof. intros; unfold RiemannInt_SF; case (Rle_dec a b); intro. replace (Int_SF (subdivision_val (mkStepFun (StepFun_P4 a b c))) (subdivision (mkStepFun (StepFun_P4 a b c)))) with (Int_SF (cons c nil) (cons a (cons b nil))); [ simpl; ring | apply StepFun_P17 with (fct_cte c) a b; [ apply StepFun_P3; assumption | apply (StepFun_P1 (mkStepFun (StepFun_P4 a b c))) ] ]. replace (Int_SF (subdivision_val (mkStepFun (StepFun_P4 a b c))) (subdivision (mkStepFun (StepFun_P4 a b c)))) with (Int_SF (cons c nil) (cons b (cons a nil))); [ simpl; ring | apply StepFun_P17 with (fct_cte c) a b; [ apply StepFun_P2; apply StepFun_P3; auto with real | apply (StepFun_P1 (mkStepFun (StepFun_P4 a b c))) ] ]. Qed. Lemma StepFun_P19 : forall (l1:Rlist) (f g:R -> R) (l:R), Int_SF (FF l1 (fun x:R => f x + l * g x)) l1 = Int_SF (FF l1 f) l1 + l * Int_SF (FF l1 g) l1. Proof. intros; induction l1 as [| r l1 Hrecl1]; [ simpl; ring | induction l1 as [| r0 l1 Hrecl0]; simpl; [ ring | simpl in Hrecl1; rewrite Hrecl1; ring ] ]. Qed. Lemma StepFun_P20 : forall (l:Rlist) (f:R -> R), (0 < Rlength l)%nat -> Rlength l = S (Rlength (FF l f)). Proof. intros l f H; induction l; [ elim (lt_irrefl _ H) | simpl; rewrite RList_P18; rewrite RList_P14; reflexivity ]. Qed. Lemma StepFun_P21 : forall (a b:R) (f:R -> R) (l:Rlist), is_subdivision f a b l -> adapted_couple f a b l (FF l f). Proof. intros; unfold adapted_couple; unfold is_subdivision in X; unfold adapted_couple in X; elim X; clear X; intros; decompose [and] p; clear p; repeat split; try assumption. apply StepFun_P20; rewrite H2; apply lt_O_Sn. intros; assert (H5 := H4 _ H3); unfold constant_D_eq, open_interval in H5; unfold constant_D_eq, open_interval; intros; induction l as [| r l Hrecl]. discriminate. unfold FF; rewrite RList_P12. simpl; change (f x0 = f (pos_Rl (mid_Rlist (cons r l) r) (S i))); rewrite RList_P13; try assumption; rewrite (H5 x0 H6); rewrite H5. reflexivity. split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; elim H6; intros; apply Rlt_trans with x0; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; rewrite (Rplus_comm (pos_Rl (cons r l) i)); apply Rplus_lt_compat_l; elim H6; intros; apply Rlt_trans with x0; assumption | discrR ] ]. rewrite RList_P14; simpl in H3; apply H3. Qed. Lemma StepFun_P22 : forall (a b:R) (f g:R -> R) (lf lg:Rlist), a <= b -> is_subdivision f a b lf -> is_subdivision g a b lg -> is_subdivision f a b (cons_ORlist lf lg). Proof. unfold is_subdivision; intros a b f g lf lg Hyp X X0; elim X; elim X0; clear X X0; intros lg0 p lf0 p0; assert (Hyp_min : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (Hyp_max : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. apply existT with (FF (cons_ORlist lf lg) f); unfold adapted_couple in p, p0; decompose [and] p; decompose [and] p0; clear p p0; rewrite Hyp_min in H6; rewrite Hyp_min in H1; rewrite Hyp_max in H0; rewrite Hyp_max in H5; unfold adapted_couple; repeat split. apply RList_P2; assumption. rewrite Hyp_min; symmetry ; apply Rle_antisym. induction lf as [| r lf Hreclf]. simpl; right; symmetry ; assumption. assert (H10 : In (pos_Rl (cons_ORlist (cons r lf) lg) 0) (cons_ORlist (cons r lf) lg)). elim (RList_P3 (cons_ORlist (cons r lf) lg) (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros _ H10; apply H10; exists 0%nat; split; [ reflexivity | rewrite RList_P11; simpl; apply lt_O_Sn ]. elim (RList_P9 (cons r lf) lg (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H12 _; assert (H13 := H12 H10); elim H13; intro. elim (RList_P3 (cons r lf) (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H11 _; assert (H14 := H11 H8); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H6; elim (RList_P6 (cons r lf)); intros; apply H17; [ assumption | apply le_O_n | assumption ]. elim (RList_P3 lg (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H11 _; assert (H14 := H11 H8); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H1; elim (RList_P6 lg); intros; apply H17; [ assumption | apply le_O_n | assumption ]. induction lf as [| r lf Hreclf]. simpl; right; assumption. assert (H8 : In a (cons_ORlist (cons r lf) lg)). elim (RList_P9 (cons r lf) lg a); intros; apply H10; left; elim (RList_P3 (cons r lf) a); intros; apply H12; exists 0%nat; split; [ symmetry ; assumption | simpl; apply lt_O_Sn ]. apply RList_P5; [ apply RList_P2; assumption | assumption ]. rewrite Hyp_max; apply Rle_antisym. induction lf as [| r lf Hreclf]. simpl; right; assumption. assert (H8 : In (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg)))) (cons_ORlist (cons r lf) lg)). elim (RList_P3 (cons_ORlist (cons r lf) lg) (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros _ H10; apply H10; exists (pred (Rlength (cons_ORlist (cons r lf) lg))); split; [ reflexivity | rewrite RList_P11; simpl; apply lt_n_Sn ]. elim (RList_P9 (cons r lf) lg (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H10 _. assert (H11 := H10 H8); elim H11; intro. elim (RList_P3 (cons r lf) (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H13 _; assert (H14 := H13 H12); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H5; elim (RList_P6 (cons r lf)); intros; apply H17; [ assumption | simpl; simpl in H14; apply lt_n_Sm_le; assumption | simpl; apply lt_n_Sn ]. elim (RList_P3 lg (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H13 _; assert (H14 := H13 H12); elim H14; intros; elim H15; clear H15; intros. rewrite H15; assert (H17 : Rlength lg = S (pred (Rlength lg))). apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H17 in H16; elim (lt_n_O _ H16). rewrite <- H0; elim (RList_P6 lg); intros; apply H18; [ assumption | rewrite H17 in H16; apply lt_n_Sm_le; assumption | apply lt_pred_n_n; rewrite H17; apply lt_O_Sn ]. induction lf as [| r lf Hreclf]. simpl; right; symmetry ; assumption. assert (H8 : In b (cons_ORlist (cons r lf) lg)). elim (RList_P9 (cons r lf) lg b); intros; apply H10; left; elim (RList_P3 (cons r lf) b); intros; apply H12; exists (pred (Rlength (cons r lf))); split; [ symmetry ; assumption | simpl; apply lt_n_Sn ]. apply RList_P7; [ apply RList_P2; assumption | assumption ]. apply StepFun_P20; rewrite RList_P11; rewrite H2; rewrite H7; simpl; apply lt_O_Sn. intros; unfold constant_D_eq, open_interval; intros; cut (exists l : R, constant_D_eq f (open_interval (pos_Rl (cons_ORlist lf lg) i) (pos_Rl (cons_ORlist lf lg) (S i))) l). intros; elim H11; clear H11; intros; assert (H12 := H11); assert (Hyp_cons : exists r : R, (exists r0 : Rlist, cons_ORlist lf lg = cons r r0)). apply RList_P19; red; intro; rewrite H13 in H8; elim (lt_n_O _ H8). elim Hyp_cons; clear Hyp_cons; intros r [r0 Hyp_cons]; rewrite Hyp_cons; unfold FF; rewrite RList_P12. change (f x = f (pos_Rl (mid_Rlist (cons r r0) r) (S i))); rewrite <- Hyp_cons; rewrite RList_P13. assert (H13 := RList_P2 _ _ H _ H8); elim H13; intro. unfold constant_D_eq, open_interval in H11, H12; rewrite (H11 x H10); assert (H15 : pos_Rl (cons_ORlist lf lg) i < (pos_Rl (cons_ORlist lf lg) i + pos_Rl (cons_ORlist lf lg) (S i)) / 2 < pos_Rl (cons_ORlist lf lg) (S i)). split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; rewrite (Rplus_comm (pos_Rl (cons_ORlist lf lg) i)); apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite (H11 _ H15); reflexivity. elim H10; intros; rewrite H14 in H15; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H16 H15)). apply H8. rewrite RList_P14; rewrite Hyp_cons in H8; simpl in H8; apply H8. assert (H11 : a < b). apply Rle_lt_trans with (pos_Rl (cons_ORlist lf lg) i). rewrite <- H6; rewrite <- (RList_P15 lf lg). elim (RList_P6 (cons_ORlist lf lg)); intros; apply H11. apply RList_P2; assumption. apply le_O_n. apply lt_trans with (pred (Rlength (cons_ORlist lf lg))); [ assumption | apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H8; elim (lt_n_O _ H8) ]. assumption. assumption. rewrite H1; assumption. apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)). elim H10; intros; apply Rlt_trans with x; assumption. rewrite <- H5; rewrite <- (RList_P16 lf lg); try assumption. elim (RList_P6 (cons_ORlist lf lg)); intros; apply H11. apply RList_P2; assumption. apply lt_n_Sm_le; apply lt_n_S; assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H8; elim (lt_n_O _ H8). rewrite H0; assumption. set (I := fun j:nat => pos_Rl lf j <= pos_Rl (cons_ORlist lf lg) i /\ (j < Rlength lf)%nat); assert (H12 : Nbound I). unfold Nbound; exists (Rlength lf); intros; unfold I in H12; elim H12; intros; apply lt_le_weak; assumption. assert (H13 : exists n : nat, I n). exists 0%nat; unfold I; split. apply Rle_trans with (pos_Rl (cons_ORlist lf lg) 0). right; symmetry . apply RList_P15; try assumption; rewrite H1; assumption. elim (RList_P6 (cons_ORlist lf lg)); intros; apply H13. apply RList_P2; assumption. apply le_O_n. apply lt_trans with (pred (Rlength (cons_ORlist lf lg))). assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H15 in H8; elim (lt_n_O _ H8). apply neq_O_lt; red; intro; rewrite <- H13 in H5; rewrite <- H6 in H11; rewrite <- H5 in H11; elim (Rlt_irrefl _ H11). assert (H14 := Nzorn H13 H12); elim H14; clear H14; intros x0 H14; exists (pos_Rl lf0 x0); unfold constant_D_eq, open_interval; intros; assert (H16 := H9 x0); assert (H17 : (x0 < pred (Rlength lf))%nat). elim H14; clear H14; intros; unfold I in H14; elim H14; clear H14; intros; apply lt_S_n; replace (S (pred (Rlength lf))) with (Rlength lf). inversion H18. 2: apply lt_n_S; assumption. cut (x0 = pred (Rlength lf)). intro; rewrite H19 in H14; rewrite H5 in H14; cut (pos_Rl (cons_ORlist lf lg) i < b). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H14 H21)). apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)). elim H10; intros; apply Rlt_trans with x; assumption. rewrite <- H5; apply Rle_trans with (pos_Rl (cons_ORlist lf lg) (pred (Rlength (cons_ORlist lf lg)))). elim (RList_P6 (cons_ORlist lf lg)); intros; apply H21. apply RList_P2; assumption. apply lt_n_Sm_le; apply lt_n_S; assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H23 in H8; elim (lt_n_O _ H8). right; apply RList_P16; try assumption; rewrite H0; assumption. rewrite <- H20; reflexivity. apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H19 in H18; elim (lt_n_O _ H18). assert (H18 := H16 H17); unfold constant_D_eq, open_interval in H18; rewrite (H18 x1). reflexivity. elim H15; clear H15; intros; elim H14; clear H14; intros; unfold I in H14; elim H14; clear H14; intros; split. apply Rle_lt_trans with (pos_Rl (cons_ORlist lf lg) i); assumption. apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)); try assumption. assert (H22 : (S x0 < Rlength lf)%nat). replace (Rlength lf) with (S (pred (Rlength lf))); [ apply lt_n_S; assumption | symmetry ; apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H22 in H21; elim (lt_n_O _ H21) ]. elim (Rle_dec (pos_Rl lf (S x0)) (pos_Rl (cons_ORlist lf lg) i)); intro. assert (H23 : (S x0 <= x0)%nat). apply H20; unfold I; split; assumption. elim (le_Sn_n _ H23). assert (H23 : pos_Rl (cons_ORlist lf lg) i < pos_Rl lf (S x0)). auto with real. clear b0; apply RList_P17; try assumption. apply RList_P2; assumption. elim (RList_P9 lf lg (pos_Rl lf (S x0))); intros; apply H25; left; elim (RList_P3 lf (pos_Rl lf (S x0))); intros; apply H27; exists (S x0); split; [ reflexivity | apply H22 ]. Qed. Lemma StepFun_P23 : forall (a b:R) (f g:R -> R) (lf lg:Rlist), is_subdivision f a b lf -> is_subdivision g a b lg -> is_subdivision f a b (cons_ORlist lf lg). Proof. intros; case (Rle_dec a b); intro; [ apply StepFun_P22 with g; assumption | apply StepFun_P5; apply StepFun_P22 with g; [ auto with real | apply StepFun_P5; assumption | apply StepFun_P5; assumption ] ]. Qed. Lemma StepFun_P24 : forall (a b:R) (f g:R -> R) (lf lg:Rlist), a <= b -> is_subdivision f a b lf -> is_subdivision g a b lg -> is_subdivision g a b (cons_ORlist lf lg). Proof. unfold is_subdivision; intros a b f g lf lg Hyp X X0; elim X; elim X0; clear X X0; intros lg0 p lf0 p0; assert (Hyp_min : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (Hyp_max : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. apply existT with (FF (cons_ORlist lf lg) g); unfold adapted_couple in p, p0; decompose [and] p; decompose [and] p0; clear p p0; rewrite Hyp_min in H1; rewrite Hyp_min in H6; rewrite Hyp_max in H0; rewrite Hyp_max in H5; unfold adapted_couple; repeat split. apply RList_P2; assumption. rewrite Hyp_min; symmetry ; apply Rle_antisym. induction lf as [| r lf Hreclf]. simpl; right; symmetry ; assumption. assert (H10 : In (pos_Rl (cons_ORlist (cons r lf) lg) 0) (cons_ORlist (cons r lf) lg)). elim (RList_P3 (cons_ORlist (cons r lf) lg) (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros _ H10; apply H10; exists 0%nat; split; [ reflexivity | rewrite RList_P11; simpl; apply lt_O_Sn ]. elim (RList_P9 (cons r lf) lg (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H12 _; assert (H13 := H12 H10); elim H13; intro. elim (RList_P3 (cons r lf) (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H11 _; assert (H14 := H11 H8); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H6; elim (RList_P6 (cons r lf)); intros; apply H17; [ assumption | apply le_O_n | assumption ]. elim (RList_P3 lg (pos_Rl (cons_ORlist (cons r lf) lg) 0)); intros H11 _; assert (H14 := H11 H8); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H1; elim (RList_P6 lg); intros; apply H17; [ assumption | apply le_O_n | assumption ]. induction lf as [| r lf Hreclf]. simpl; right; assumption. assert (H8 : In a (cons_ORlist (cons r lf) lg)). elim (RList_P9 (cons r lf) lg a); intros; apply H10; left; elim (RList_P3 (cons r lf) a); intros; apply H12; exists 0%nat; split; [ symmetry ; assumption | simpl; apply lt_O_Sn ]. apply RList_P5; [ apply RList_P2; assumption | assumption ]. rewrite Hyp_max; apply Rle_antisym. induction lf as [| r lf Hreclf]. simpl; right; assumption. assert (H8 : In (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg)))) (cons_ORlist (cons r lf) lg)). elim (RList_P3 (cons_ORlist (cons r lf) lg) (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros _ H10; apply H10; exists (pred (Rlength (cons_ORlist (cons r lf) lg))); split; [ reflexivity | rewrite RList_P11; simpl; apply lt_n_Sn ]. elim (RList_P9 (cons r lf) lg (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H10 _; assert (H11 := H10 H8); elim H11; intro. elim (RList_P3 (cons r lf) (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H13 _; assert (H14 := H13 H12); elim H14; intros; elim H15; clear H15; intros; rewrite H15; rewrite <- H5; elim (RList_P6 (cons r lf)); intros; apply H17; [ assumption | simpl; simpl in H14; apply lt_n_Sm_le; assumption | simpl; apply lt_n_Sn ]. elim (RList_P3 lg (pos_Rl (cons_ORlist (cons r lf) lg) (pred (Rlength (cons_ORlist (cons r lf) lg))))); intros H13 _; assert (H14 := H13 H12); elim H14; intros; elim H15; clear H15; intros; rewrite H15; assert (H17 : Rlength lg = S (pred (Rlength lg))). apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H17 in H16; elim (lt_n_O _ H16). rewrite <- H0; elim (RList_P6 lg); intros; apply H18; [ assumption | rewrite H17 in H16; apply lt_n_Sm_le; assumption | apply lt_pred_n_n; rewrite H17; apply lt_O_Sn ]. induction lf as [| r lf Hreclf]. simpl; right; symmetry ; assumption. assert (H8 : In b (cons_ORlist (cons r lf) lg)). elim (RList_P9 (cons r lf) lg b); intros; apply H10; left; elim (RList_P3 (cons r lf) b); intros; apply H12; exists (pred (Rlength (cons r lf))); split; [ symmetry ; assumption | simpl; apply lt_n_Sn ]. apply RList_P7; [ apply RList_P2; assumption | assumption ]. apply StepFun_P20; rewrite RList_P11; rewrite H7; rewrite H2; simpl; apply lt_O_Sn. unfold constant_D_eq, open_interval; intros; cut (exists l : R, constant_D_eq g (open_interval (pos_Rl (cons_ORlist lf lg) i) (pos_Rl (cons_ORlist lf lg) (S i))) l). intros; elim H11; clear H11; intros; assert (H12 := H11); assert (Hyp_cons : exists r : R, (exists r0 : Rlist, cons_ORlist lf lg = cons r r0)). apply RList_P19; red; intro; rewrite H13 in H8; elim (lt_n_O _ H8). elim Hyp_cons; clear Hyp_cons; intros r [r0 Hyp_cons]; rewrite Hyp_cons; unfold FF; rewrite RList_P12. change (g x = g (pos_Rl (mid_Rlist (cons r r0) r) (S i))); rewrite <- Hyp_cons; rewrite RList_P13. assert (H13 := RList_P2 _ _ H _ H8); elim H13; intro. unfold constant_D_eq, open_interval in H11, H12; rewrite (H11 x H10); assert (H15 : pos_Rl (cons_ORlist lf lg) i < (pos_Rl (cons_ORlist lf lg) i + pos_Rl (cons_ORlist lf lg) (S i)) / 2 < pos_Rl (cons_ORlist lf lg) (S i)). split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; rewrite (Rplus_comm (pos_Rl (cons_ORlist lf lg) i)); apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite (H11 _ H15); reflexivity. elim H10; intros; rewrite H14 in H15; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H16 H15)). apply H8. rewrite RList_P14; rewrite Hyp_cons in H8; simpl in H8; apply H8. assert (H11 : a < b). apply Rle_lt_trans with (pos_Rl (cons_ORlist lf lg) i). rewrite <- H6; rewrite <- (RList_P15 lf lg); try assumption. elim (RList_P6 (cons_ORlist lf lg)); intros; apply H11. apply RList_P2; assumption. apply le_O_n. apply lt_trans with (pred (Rlength (cons_ORlist lf lg))); [ assumption | apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H8; elim (lt_n_O _ H8) ]. rewrite H1; assumption. apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)). elim H10; intros; apply Rlt_trans with x; assumption. rewrite <- H5; rewrite <- (RList_P16 lf lg); try assumption. elim (RList_P6 (cons_ORlist lf lg)); intros; apply H11. apply RList_P2; assumption. apply lt_n_Sm_le; apply lt_n_S; assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H8; elim (lt_n_O _ H8). rewrite H0; assumption. set (I := fun j:nat => pos_Rl lg j <= pos_Rl (cons_ORlist lf lg) i /\ (j < Rlength lg)%nat); assert (H12 : Nbound I). unfold Nbound; exists (Rlength lg); intros; unfold I in H12; elim H12; intros; apply lt_le_weak; assumption. assert (H13 : exists n : nat, I n). exists 0%nat; unfold I; split. apply Rle_trans with (pos_Rl (cons_ORlist lf lg) 0). right; symmetry ; rewrite H1; rewrite <- H6; apply RList_P15; try assumption; rewrite H1; assumption. elim (RList_P6 (cons_ORlist lf lg)); intros; apply H13; [ apply RList_P2; assumption | apply le_O_n | apply lt_trans with (pred (Rlength (cons_ORlist lf lg))); [ assumption | apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H15 in H8; elim (lt_n_O _ H8) ] ]. apply neq_O_lt; red; intro; rewrite <- H13 in H0; rewrite <- H1 in H11; rewrite <- H0 in H11; elim (Rlt_irrefl _ H11). assert (H14 := Nzorn H13 H12); elim H14; clear H14; intros x0 H14; exists (pos_Rl lg0 x0); unfold constant_D_eq, open_interval; intros; assert (H16 := H4 x0); assert (H17 : (x0 < pred (Rlength lg))%nat). elim H14; clear H14; intros; unfold I in H14; elim H14; clear H14; intros; apply lt_S_n; replace (S (pred (Rlength lg))) with (Rlength lg). inversion H18. 2: apply lt_n_S; assumption. cut (x0 = pred (Rlength lg)). intro; rewrite H19 in H14; rewrite H0 in H14; cut (pos_Rl (cons_ORlist lf lg) i < b). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H14 H21)). apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)). elim H10; intros; apply Rlt_trans with x; assumption. rewrite <- H0; apply Rle_trans with (pos_Rl (cons_ORlist lf lg) (pred (Rlength (cons_ORlist lf lg)))). elim (RList_P6 (cons_ORlist lf lg)); intros; apply H21. apply RList_P2; assumption. apply lt_n_Sm_le; apply lt_n_S; assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H23 in H8; elim (lt_n_O _ H8). right; rewrite H0; rewrite <- H5; apply RList_P16; try assumption. rewrite H0; assumption. rewrite <- H20; reflexivity. apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H19 in H18; elim (lt_n_O _ H18). assert (H18 := H16 H17); unfold constant_D_eq, open_interval in H18; rewrite (H18 x1). reflexivity. elim H15; clear H15; intros; elim H14; clear H14; intros; unfold I in H14; elim H14; clear H14; intros; split. apply Rle_lt_trans with (pos_Rl (cons_ORlist lf lg) i); assumption. apply Rlt_le_trans with (pos_Rl (cons_ORlist lf lg) (S i)); try assumption. assert (H22 : (S x0 < Rlength lg)%nat). replace (Rlength lg) with (S (pred (Rlength lg))). apply lt_n_S; assumption. symmetry ; apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H22 in H21; elim (lt_n_O _ H21). elim (Rle_dec (pos_Rl lg (S x0)) (pos_Rl (cons_ORlist lf lg) i)); intro. assert (H23 : (S x0 <= x0)%nat); [ apply H20; unfold I; split; assumption | elim (le_Sn_n _ H23) ]. assert (H23 : pos_Rl (cons_ORlist lf lg) i < pos_Rl lg (S x0)). auto with real. clear b0; apply RList_P17; try assumption; [ apply RList_P2; assumption | elim (RList_P9 lf lg (pos_Rl lg (S x0))); intros; apply H25; right; elim (RList_P3 lg (pos_Rl lg (S x0))); intros; apply H27; exists (S x0); split; [ reflexivity | apply H22 ] ]. Qed. Lemma StepFun_P25 : forall (a b:R) (f g:R -> R) (lf lg:Rlist), is_subdivision f a b lf -> is_subdivision g a b lg -> is_subdivision g a b (cons_ORlist lf lg). Proof. intros a b f g lf lg H H0; case (Rle_dec a b); intro; [ apply StepFun_P24 with f; assumption | apply StepFun_P5; apply StepFun_P24 with f; [ auto with real | apply StepFun_P5; assumption | apply StepFun_P5; assumption ] ]. Qed. Lemma StepFun_P26 : forall (a b l:R) (f g:R -> R) (l1:Rlist), is_subdivision f a b l1 -> is_subdivision g a b l1 -> is_subdivision (fun x:R => f x + l * g x) a b l1. Proof. intros a b l f g l1 (x0,(H0,(H1,(H2,(H3,H4))))) (x,(_,(_,(_,(_,H9))))). exists (FF l1 (fun x:R => f x + l * g x)); repeat split; try assumption. apply StepFun_P20; rewrite H3; auto with arith. intros i H8 x1 H10; unfold open_interval in H10, H9, H4; rewrite (H9 _ H8 _ H10); rewrite (H4 _ H8 _ H10); assert (H11 : l1 <> nil). red; intro H11; rewrite H11 in H8; elim (lt_n_O _ H8). destruct (RList_P19 _ H11) as (r,(r0,H12)); rewrite H12; unfold FF; change (pos_Rl x0 i + l * pos_Rl x i = pos_Rl (app_Rlist (mid_Rlist (cons r r0) r) (fun x2:R => f x2 + l * g x2)) (S i)); rewrite RList_P12. rewrite RList_P13. rewrite <- H12; rewrite (H9 _ H8); try rewrite (H4 _ H8); reflexivity || (elim H10; clear H10; intros; split; [ apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; apply Rlt_trans with x1; assumption | discrR ] ] | apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; rewrite (Rplus_comm (pos_Rl l1 i)); apply Rplus_lt_compat_l; apply Rlt_trans with x1; assumption | discrR ] ] ]). rewrite <- H12; assumption. rewrite RList_P14; simpl; rewrite H12 in H8; simpl in H8; apply lt_n_S; apply H8. Qed. Lemma StepFun_P27 : forall (a b l:R) (f g:R -> R) (lf lg:Rlist), is_subdivision f a b lf -> is_subdivision g a b lg -> is_subdivision (fun x:R => f x + l * g x) a b (cons_ORlist lf lg). Proof. intros a b l f g lf lg H H0; apply StepFun_P26; [ apply StepFun_P23 with g; assumption | apply StepFun_P25 with f; assumption ]. Qed. (** The set of step functions on [a,b] is a vectorial space *) Lemma StepFun_P28 : forall (a b l:R) (f g:StepFun a b), IsStepFun (fun x:R => f x + l * g x) a b. Proof. intros a b l f g; unfold IsStepFun; assert (H := pre f); assert (H0 := pre g); unfold IsStepFun in H, H0; elim H; elim H0; intros; apply existT with (cons_ORlist x0 x); apply StepFun_P27; assumption. Qed. Lemma StepFun_P29 : forall (a b:R) (f:StepFun a b), is_subdivision f a b (subdivision f). Proof. intros a b f; unfold is_subdivision; apply existT with (subdivision_val f); apply StepFun_P1. Qed. Lemma StepFun_P30 : forall (a b l:R) (f g:StepFun a b), RiemannInt_SF (mkStepFun (StepFun_P28 l f g)) = RiemannInt_SF f + l * RiemannInt_SF g. Proof. intros a b l f g; unfold RiemannInt_SF; case (Rle_dec a b); (intro; replace (Int_SF (subdivision_val (mkStepFun (StepFun_P28 l f g))) (subdivision (mkStepFun (StepFun_P28 l f g)))) with (Int_SF (FF (cons_ORlist (subdivision f) (subdivision g)) (fun x:R => f x + l * g x)) (cons_ORlist (subdivision f) (subdivision g))); [ rewrite StepFun_P19; replace (Int_SF (FF (cons_ORlist (subdivision f) (subdivision g)) f) (cons_ORlist (subdivision f) (subdivision g))) with (Int_SF (subdivision_val f) (subdivision f)); [ replace (Int_SF (FF (cons_ORlist (subdivision f) (subdivision g)) g) (cons_ORlist (subdivision f) (subdivision g))) with (Int_SF (subdivision_val g) (subdivision g)); [ ring | apply StepFun_P17 with (fe g) a b; [ apply StepFun_P1 | apply StepFun_P21; apply StepFun_P25 with (fe f); apply StepFun_P29 ] ] | apply StepFun_P17 with (fe f) a b; [ apply StepFun_P1 | apply StepFun_P21; apply StepFun_P23 with (fe g); apply StepFun_P29 ] ] | apply StepFun_P17 with (fun x:R => f x + l * g x) a b; [ apply StepFun_P21; apply StepFun_P27; apply StepFun_P29 | apply (StepFun_P1 (mkStepFun (StepFun_P28 l f g))) ] ]). Qed. Lemma StepFun_P31 : forall (a b:R) (f:R -> R) (l lf:Rlist), adapted_couple f a b l lf -> adapted_couple (fun x:R => Rabs (f x)) a b l (app_Rlist lf Rabs). Proof. unfold adapted_couple; intros; decompose [and] H; clear H; repeat split; try assumption. symmetry ; rewrite H3; rewrite RList_P18; reflexivity. intros; unfold constant_D_eq, open_interval; unfold constant_D_eq, open_interval in H5; intros; rewrite (H5 _ H _ H4); rewrite RList_P12; [ reflexivity | rewrite H3 in H; simpl in H; apply H ]. Qed. Lemma StepFun_P32 : forall (a b:R) (f:StepFun a b), IsStepFun (fun x:R => Rabs (f x)) a b. Proof. intros a b f; unfold IsStepFun; apply existT with (subdivision f); unfold is_subdivision; apply existT with (app_Rlist (subdivision_val f) Rabs); apply StepFun_P31; apply StepFun_P1. Qed. Lemma StepFun_P33 : forall l2 l1:Rlist, ordered_Rlist l1 -> Rabs (Int_SF l2 l1) <= Int_SF (app_Rlist l2 Rabs) l1. Proof. simple induction l2; intros. simpl; rewrite Rabs_R0; right; reflexivity. simpl; induction l1 as [| r1 l1 Hrecl1]. rewrite Rabs_R0; right; reflexivity. induction l1 as [| r2 l1 Hrecl0]. rewrite Rabs_R0; right; reflexivity. apply Rle_trans with (Rabs (r * (r2 - r1)) + Rabs (Int_SF r0 (cons r2 l1))). apply Rabs_triang. rewrite Rabs_mult; rewrite (Rabs_right (r2 - r1)); [ apply Rplus_le_compat_l; apply H; apply RList_P4 with r1; assumption | apply Rge_minus; apply Rle_ge; apply (H0 0%nat); simpl; apply lt_O_Sn ]. Qed. Lemma StepFun_P34 : forall (a b:R) (f:StepFun a b), a <= b -> Rabs (RiemannInt_SF f) <= RiemannInt_SF (mkStepFun (StepFun_P32 f)). Proof. intros; unfold RiemannInt_SF; case (Rle_dec a b); intro. replace (Int_SF (subdivision_val (mkStepFun (StepFun_P32 f))) (subdivision (mkStepFun (StepFun_P32 f)))) with (Int_SF (app_Rlist (subdivision_val f) Rabs) (subdivision f)). apply StepFun_P33; assert (H0 := StepFun_P29 f); unfold is_subdivision in H0; elim H0; intros; unfold adapted_couple in p; decompose [and] p; assumption. apply StepFun_P17 with (fun x:R => Rabs (f x)) a b; [ apply StepFun_P31; apply StepFun_P1 | apply (StepFun_P1 (mkStepFun (StepFun_P32 f))) ]. elim n; assumption. Qed. Lemma StepFun_P35 : forall (l:Rlist) (a b:R) (f g:R -> R), ordered_Rlist l -> pos_Rl l 0 = a -> pos_Rl l (pred (Rlength l)) = b -> (forall x:R, a < x < b -> f x <= g x) -> Int_SF (FF l f) l <= Int_SF (FF l g) l. Proof. simple induction l; intros. right; reflexivity. simpl; induction r0 as [| r0 r1 Hrecr0]. right; reflexivity. simpl; apply Rplus_le_compat. case (Req_dec r r0); intro. rewrite H4; right; ring. do 2 rewrite <- (Rmult_comm (r0 - r)); apply Rmult_le_compat_l. apply Rge_le; apply Rge_minus; apply Rle_ge; apply (H0 0%nat); simpl; apply lt_O_Sn. apply H3; split. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. assert (H5 : r = a). apply H1. rewrite H5; rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l. assert (H6 := H0 0%nat (lt_O_Sn _)). simpl in H6. elim H6; intro. rewrite H5 in H7; apply H7. elim H4; assumption. discrR. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite double; assert (H5 : r0 <= b). replace b with (pos_Rl (cons r (cons r0 r1)) (pred (Rlength (cons r (cons r0 r1))))). replace r0 with (pos_Rl (cons r (cons r0 r1)) 1). elim (RList_P6 (cons r (cons r0 r1))); intros; apply H5. assumption. simpl; apply le_n_S. apply le_O_n. simpl; apply lt_n_Sn. reflexivity. apply Rle_lt_trans with (r + b). apply Rplus_le_compat_l; assumption. rewrite (Rplus_comm r); apply Rplus_lt_compat_l. apply Rlt_le_trans with r0. assert (H6 := H0 0%nat (lt_O_Sn _)). simpl in H6. elim H6; intro. apply H7. elim H4; assumption. assumption. discrR. simpl in H; apply H with r0 b. apply RList_P4 with r; assumption. reflexivity. rewrite <- H2; reflexivity. intros; apply H3; elim H4; intros; split; try assumption. apply Rle_lt_trans with r0; try assumption. rewrite <- H1. simpl; apply (H0 0%nat); simpl; apply lt_O_Sn. Qed. Lemma StepFun_P36 : forall (a b:R) (f g:StepFun a b) (l:Rlist), a <= b -> is_subdivision f a b l -> is_subdivision g a b l -> (forall x:R, a < x < b -> f x <= g x) -> RiemannInt_SF f <= RiemannInt_SF g. Proof. intros; unfold RiemannInt_SF; case (Rle_dec a b); intro. replace (Int_SF (subdivision_val f) (subdivision f)) with (Int_SF (FF l f) l). replace (Int_SF (subdivision_val g) (subdivision g)) with (Int_SF (FF l g) l). unfold is_subdivision in X; elim X; clear X; intros; unfold adapted_couple in p; decompose [and] p; clear p; assert (H5 : Rmin a b = a); [ unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ] | assert (H7 : Rmax a b = b); [ unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ] | rewrite H5 in H3; rewrite H7 in H2; eapply StepFun_P35 with a b; assumption ] ]. apply StepFun_P17 with (fe g) a b; [ apply StepFun_P21; assumption | apply StepFun_P1 ]. apply StepFun_P17 with (fe f) a b; [ apply StepFun_P21; assumption | apply StepFun_P1 ]. elim n; assumption. Qed. Lemma StepFun_P37 : forall (a b:R) (f g:StepFun a b), a <= b -> (forall x:R, a < x < b -> f x <= g x) -> RiemannInt_SF f <= RiemannInt_SF g. Proof. intros; eapply StepFun_P36; try assumption. eapply StepFun_P25; apply StepFun_P29. eapply StepFun_P23; apply StepFun_P29. Qed. Lemma StepFun_P38 : forall (l:Rlist) (a b:R) (f:R -> R), ordered_Rlist l -> pos_Rl l 0 = a -> pos_Rl l (pred (Rlength l)) = b -> { g:StepFun a b | g b = f b /\ (forall i:nat, (i < pred (Rlength l))%nat -> constant_D_eq g (co_interval (pos_Rl l i) (pos_Rl l (S i))) (f (pos_Rl l i))) }. Proof. intros l a b f; generalize a; clear a; induction l. intros a H H0 H1; simpl in H0; simpl in H1; exists (mkStepFun (StepFun_P4 a b (f b))); split. reflexivity. intros; elim (lt_n_O _ H2). intros; destruct l as [| r1 l]. simpl in H1; simpl in H0; exists (mkStepFun (StepFun_P4 a b (f b))); split. reflexivity. intros i H2; elim (lt_n_O _ H2). intros; assert (H2 : ordered_Rlist (cons r1 l)). apply RList_P4 with r; assumption. assert (H3 : pos_Rl (cons r1 l) 0 = r1). reflexivity. assert (H4 : pos_Rl (cons r1 l) (pred (Rlength (cons r1 l))) = b). rewrite <- H1; reflexivity. elim (IHl r1 H2 H3 H4); intros g [H5 H6]. set (g' := fun x:R => match Rle_dec r1 x with | left _ => g x | right _ => f a end). assert (H7 : r1 <= b). rewrite <- H4; apply RList_P7; [ assumption | left; reflexivity ]. assert (H8 : IsStepFun g' a b). unfold IsStepFun; assert (H8 := pre g); unfold IsStepFun in H8; elim H8; intros lg H9; unfold is_subdivision in H9; elim H9; clear H9; intros lg2 H9; split with (cons a lg); unfold is_subdivision; split with (cons (f a) lg2); unfold adapted_couple in H9; decompose [and] H9; clear H9; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H9; induction i as [| i Hreci]. simpl; rewrite H12; replace (Rmin r1 b) with r1. simpl in H0; rewrite <- H0; apply (H 0%nat); simpl; apply lt_O_Sn. unfold Rmin; case (Rle_dec r1 b); intro; [ reflexivity | elim n; assumption ]. apply (H10 i); apply lt_S_n. replace (S (pred (Rlength lg))) with (Rlength lg). apply H9. apply S_pred with 0%nat; apply neq_O_lt; intro; rewrite <- H14 in H9; elim (lt_n_O _ H9). simpl; assert (H14 : a <= b). rewrite <- H1; simpl in H0; rewrite <- H0; apply RList_P7; [ assumption | left; reflexivity ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (H14 : a <= b). rewrite <- H1; simpl in H0; rewrite <- H0; apply RList_P7; [ assumption | left; reflexivity ]. replace (Rmax a b) with (Rmax r1 b). rewrite <- H11; induction lg as [| r0 lg Hreclg]. simpl in H13; discriminate. reflexivity. unfold Rmax; case (Rle_dec a b); case (Rle_dec r1 b); intros; reflexivity || elim n; assumption. simpl; rewrite H13; reflexivity. intros; simpl in H9; induction i as [| i Hreci]. unfold constant_D_eq, open_interval; simpl; intros; assert (H16 : Rmin r1 b = r1). unfold Rmin; case (Rle_dec r1 b); intro; [ reflexivity | elim n; assumption ]. rewrite H16 in H12; rewrite H12 in H14; elim H14; clear H14; intros _ H14; unfold g'; case (Rle_dec r1 x); intro r3. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r3 H14)). reflexivity. change (constant_D_eq g' (open_interval (pos_Rl lg i) (pos_Rl lg (S i))) (pos_Rl lg2 i)); clear Hreci; assert (H16 := H15 i); assert (H17 : (i < pred (Rlength lg))%nat). apply lt_S_n. replace (S (pred (Rlength lg))) with (Rlength lg). assumption. apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H14 in H9; elim (lt_n_O _ H9). assert (H18 := H16 H17); unfold constant_D_eq, open_interval in H18; unfold constant_D_eq, open_interval; intros; assert (H19 := H18 _ H14); rewrite <- H19; unfold g'; case (Rle_dec r1 x); intro. reflexivity. elim n; replace r1 with (Rmin r1 b). rewrite <- H12; elim H14; clear H14; intros H14 _; left; apply Rle_lt_trans with (pos_Rl lg i); try assumption. apply RList_P5. assumption. elim (RList_P3 lg (pos_Rl lg i)); intros; apply H21; exists i; split. reflexivity. apply lt_trans with (pred (Rlength lg)); try assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H22 in H17; elim (lt_n_O _ H17). unfold Rmin; case (Rle_dec r1 b); intro; [ reflexivity | elim n0; assumption ]. exists (mkStepFun H8); split. simpl; unfold g'; case (Rle_dec r1 b); intro. assumption. elim n; assumption. intros; simpl in H9; induction i as [| i Hreci]. unfold constant_D_eq, co_interval; simpl; intros; simpl in H0; rewrite H0; elim H10; clear H10; intros; unfold g'; case (Rle_dec r1 x); intro r3. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r3 H11)). reflexivity. clear Hreci; change (constant_D_eq (mkStepFun H8) (co_interval (pos_Rl (cons r1 l) i) (pos_Rl (cons r1 l) (S i))) (f (pos_Rl (cons r1 l) i))); assert (H10 := H6 i); assert (H11 : (i < pred (Rlength (cons r1 l)))%nat). simpl; apply lt_S_n; assumption. assert (H12 := H10 H11); unfold constant_D_eq, co_interval in H12; unfold constant_D_eq, co_interval; intros; rewrite <- (H12 _ H13); simpl; unfold g'; case (Rle_dec r1 x); intro. reflexivity. elim n; elim H13; clear H13; intros; apply Rle_trans with (pos_Rl (cons r1 l) i); try assumption; change (pos_Rl (cons r1 l) 0 <= pos_Rl (cons r1 l) i); elim (RList_P6 (cons r1 l)); intros; apply H15; [ assumption | apply le_O_n | simpl; apply lt_trans with (Rlength l); [ apply lt_S_n; assumption | apply lt_n_Sn ] ]. Qed. Lemma StepFun_P39 : forall (a b:R) (f:StepFun a b), RiemannInt_SF f = - RiemannInt_SF (mkStepFun (StepFun_P6 (pre f))). Proof. intros; unfold RiemannInt_SF; case (Rle_dec a b); case (Rle_dec b a); intros. assert (H : adapted_couple f a b (subdivision f) (subdivision_val f)); [ apply StepFun_P1 | assert (H0 : adapted_couple (mkStepFun (StepFun_P6 (pre f))) b a (subdivision (mkStepFun (StepFun_P6 (pre f)))) (subdivision_val (mkStepFun (StepFun_P6 (pre f))))); [ apply StepFun_P1 | assert (H1 : a = b); [ apply Rle_antisym; assumption | rewrite (StepFun_P8 H H1); assert (H2 : b = a); [ symmetry ; apply H1 | rewrite (StepFun_P8 H0 H2); ring ] ] ] ]. rewrite Ropp_involutive; eapply StepFun_P17; [ apply StepFun_P1 | apply StepFun_P2; set (H := StepFun_P6 (pre f)); unfold IsStepFun in H; elim H; intros; unfold is_subdivision; elim p; intros; apply p0 ]. apply Ropp_eq_compat; eapply StepFun_P17; [ apply StepFun_P1 | apply StepFun_P2; set (H := StepFun_P6 (pre f)); unfold IsStepFun in H; elim H; intros; unfold is_subdivision; elim p; intros; apply p0 ]. assert (H : a < b); [ auto with real | assert (H0 : b < a); [ auto with real | elim (Rlt_irrefl _ (Rlt_trans _ _ _ H H0)) ] ]. Qed. Lemma StepFun_P40 : forall (f:R -> R) (a b c:R) (l1 l2 lf1 lf2:Rlist), a < b -> b < c -> adapted_couple f a b l1 lf1 -> adapted_couple f b c l2 lf2 -> adapted_couple f a c (cons_Rlist l1 l2) (FF (cons_Rlist l1 l2) f). Proof. intros f a b c l1 l2 lf1 lf2 H H0 H1 H2; unfold adapted_couple in H1, H2; unfold adapted_couple; decompose [and] H1; decompose [and] H2; clear H1 H2; repeat split. apply RList_P25; try assumption. rewrite H10; rewrite H4; unfold Rmin, Rmax; case (Rle_dec a b); case (Rle_dec b c); intros; (right; reflexivity) || (elim n; left; assumption). rewrite RList_P22. rewrite H5; unfold Rmin, Rmax; case (Rle_dec a b); case (Rle_dec a c); intros; [ reflexivity | elim n; apply Rle_trans with b; left; assumption | elim n; left; assumption | elim n0; left; assumption ]. red; intro; rewrite H1 in H6; discriminate. rewrite RList_P24. rewrite H9; unfold Rmin, Rmax; case (Rle_dec b c); case (Rle_dec a c); intros; [ reflexivity | elim n; apply Rle_trans with b; left; assumption | elim n; left; assumption | elim n0; left; assumption ]. red; intro; rewrite H1 in H11; discriminate. apply StepFun_P20. rewrite RList_P23; apply neq_O_lt; red; intro. assert (H2 : (Rlength l1 + Rlength l2)%nat = 0%nat). symmetry ; apply H1. elim (plus_is_O _ _ H2); intros; rewrite H12 in H6; discriminate. unfold constant_D_eq, open_interval; intros; elim (le_or_lt (S (S i)) (Rlength l1)); intro. assert (H14 : pos_Rl (cons_Rlist l1 l2) i = pos_Rl l1 i). apply RList_P26; apply lt_S_n; apply le_lt_n_Sm; apply le_S_n; apply le_trans with (Rlength l1); [ assumption | apply le_n_Sn ]. assert (H15 : pos_Rl (cons_Rlist l1 l2) (S i) = pos_Rl l1 (S i)). apply RList_P26; apply lt_S_n; apply le_lt_n_Sm; assumption. rewrite H14 in H2; rewrite H15 in H2; assert (H16 : (2 <= Rlength l1)%nat). apply le_trans with (S (S i)); [ repeat apply le_n_S; apply le_O_n | assumption ]. elim (RList_P20 _ H16); intros r1 [r2 [r3 H17]]; rewrite H17; change (f x = pos_Rl (app_Rlist (mid_Rlist (cons_Rlist (cons r2 r3) l2) r1) f) i) ; rewrite RList_P12. induction i as [| i Hreci]. simpl; assert (H18 := H8 0%nat); unfold constant_D_eq, open_interval in H18; assert (H19 : (0 < pred (Rlength l1))%nat). rewrite H17; simpl; apply lt_O_Sn. assert (H20 := H18 H19); repeat rewrite H20. reflexivity. assert (H21 : r1 <= r2). rewrite H17 in H3; apply (H3 0%nat). simpl; apply lt_O_Sn. elim H21; intro. split. rewrite H17; simpl; apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite H17; simpl; apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite (Rplus_comm r1); rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. elim H2; intros; rewrite H17 in H23; rewrite H17 in H24; simpl in H24; simpl in H23; rewrite H22 in H23; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H23 H24)). assumption. clear Hreci; rewrite RList_P13. rewrite H17 in H14; rewrite H17 in H15; change (pos_Rl (cons_Rlist (cons r2 r3) l2) i = pos_Rl (cons r1 (cons r2 r3)) (S i)) in H14; rewrite H14; change (pos_Rl (cons_Rlist (cons r2 r3) l2) (S i) = pos_Rl (cons r1 (cons r2 r3)) (S (S i))) in H15; rewrite H15; assert (H18 := H8 (S i)); unfold constant_D_eq, open_interval in H18; assert (H19 : (S i < pred (Rlength l1))%nat). apply lt_pred; apply lt_S_n; apply le_lt_n_Sm; assumption. assert (H20 := H18 H19); repeat rewrite H20. reflexivity. rewrite <- H17; assert (H21 : pos_Rl l1 (S i) <= pos_Rl l1 (S (S i))). apply (H3 (S i)); apply lt_pred; apply lt_S_n; apply le_lt_n_Sm; assumption. elim H21; intro. split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite (Rplus_comm (pos_Rl l1 (S i))); rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. elim H2; intros; rewrite H22 in H23; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H23 H24)). assumption. simpl; rewrite H17 in H1; simpl in H1; apply lt_S_n; assumption. rewrite RList_P14; rewrite H17 in H1; simpl in H1; apply H1. inversion H12. assert (H16 : pos_Rl (cons_Rlist l1 l2) (S i) = b). rewrite RList_P29. rewrite H15; rewrite <- minus_n_n; rewrite H10; unfold Rmin; case (Rle_dec b c); intro; [ reflexivity | elim n; left; assumption ]. rewrite H15; apply le_n. induction l1 as [| r l1 Hrecl1]. simpl in H15; discriminate. clear Hrecl1; simpl in H1; simpl; apply lt_n_S; assumption. assert (H17 : pos_Rl (cons_Rlist l1 l2) i = b). rewrite RList_P26. replace i with (pred (Rlength l1)); [ rewrite H4; unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; left; assumption ] | rewrite H15; reflexivity ]. rewrite H15; apply lt_n_Sn. rewrite H16 in H2; rewrite H17 in H2; elim H2; intros; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H14 H18)). assert (H16 : pos_Rl (cons_Rlist l1 l2) i = pos_Rl l2 (i - Rlength l1)). apply RList_P29. apply le_S_n; assumption. apply lt_le_trans with (pred (Rlength (cons_Rlist l1 l2))); [ assumption | apply le_pred_n ]. assert (H17 : pos_Rl (cons_Rlist l1 l2) (S i) = pos_Rl l2 (S (i - Rlength l1))). replace (S (i - Rlength l1)) with (S i - Rlength l1)%nat. apply RList_P29. apply le_S_n; apply le_trans with (S i); [ assumption | apply le_n_Sn ]. induction l1 as [| r l1 Hrecl1]. simpl in H6; discriminate. clear Hrecl1; simpl in H1; simpl; apply lt_n_S; assumption. symmetry ; apply minus_Sn_m; apply le_S_n; assumption. assert (H18 : (2 <= Rlength l1)%nat). clear f c l2 lf2 H0 H3 H8 H7 H10 H9 H11 H13 i H1 x H2 H12 m H14 H15 H16 H17; induction l1 as [| r l1 Hrecl1]. discriminate. clear Hrecl1; induction l1 as [| r0 l1 Hrecl1]. simpl in H5; simpl in H4; assert (H0 : Rmin a b < Rmax a b). unfold Rmin, Rmax; case (Rle_dec a b); intro; [ assumption | elim n; left; assumption ]. rewrite <- H5 in H0; rewrite <- H4 in H0; elim (Rlt_irrefl _ H0). clear Hrecl1; simpl; repeat apply le_n_S; apply le_O_n. elim (RList_P20 _ H18); intros r1 [r2 [r3 H19]]; rewrite H19; change (f x = pos_Rl (app_Rlist (mid_Rlist (cons_Rlist (cons r2 r3) l2) r1) f) i) ; rewrite RList_P12. induction i as [| i Hreci]. assert (H20 := le_S_n _ _ H15); assert (H21 := le_trans _ _ _ H18 H20); elim (le_Sn_O _ H21). clear Hreci; rewrite RList_P13. rewrite H19 in H16; rewrite H19 in H17; change (pos_Rl (cons_Rlist (cons r2 r3) l2) i = pos_Rl l2 (S i - Rlength (cons r1 (cons r2 r3)))) in H16; rewrite H16; change (pos_Rl (cons_Rlist (cons r2 r3) l2) (S i) = pos_Rl l2 (S (S i - Rlength (cons r1 (cons r2 r3))))) in H17; rewrite H17; assert (H20 := H13 (S i - Rlength l1)%nat); unfold constant_D_eq, open_interval in H20; assert (H21 : (S i - Rlength l1 < pred (Rlength l2))%nat). apply lt_pred; rewrite minus_Sn_m. apply plus_lt_reg_l with (Rlength l1); rewrite <- le_plus_minus. rewrite H19 in H1; simpl in H1; rewrite H19; simpl; rewrite RList_P23 in H1; apply lt_n_S; assumption. apply le_trans with (S i); [ apply le_S_n; assumption | apply le_n_Sn ]. apply le_S_n; assumption. assert (H22 := H20 H21); repeat rewrite H22. reflexivity. rewrite <- H19; assert (H23 : pos_Rl l2 (S i - Rlength l1) <= pos_Rl l2 (S (S i - Rlength l1))). apply H7; apply lt_pred. rewrite minus_Sn_m. apply plus_lt_reg_l with (Rlength l1); rewrite <- le_plus_minus. rewrite H19 in H1; simpl in H1; rewrite H19; simpl; rewrite RList_P23 in H1; apply lt_n_S; assumption. apply le_trans with (S i); [ apply le_S_n; assumption | apply le_n_Sn ]. apply le_S_n; assumption. elim H23; intro. split. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. apply Rmult_lt_reg_l with 2; [ prove_sup0 | unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite (Rplus_comm (pos_Rl l2 (S i - Rlength l1))); rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite <- H19 in H16; rewrite <- H19 in H17; elim H2; intros; rewrite H19 in H25; rewrite H19 in H26; simpl in H25; simpl in H16; rewrite H16 in H25; simpl in H26; simpl in H17; rewrite H17 in H26; simpl in H24; rewrite H24 in H25; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H25 H26)). assert (H23 : pos_Rl (cons_Rlist l1 l2) (S i) = pos_Rl l2 (S i - Rlength l1)). rewrite H19; simpl; simpl in H16; apply H16. assert (H24 : pos_Rl (cons_Rlist l1 l2) (S (S i)) = pos_Rl l2 (S (S i - Rlength l1))). rewrite H19; simpl; simpl in H17; apply H17. rewrite <- H23; rewrite <- H24; assumption. simpl; rewrite H19 in H1; simpl in H1; apply lt_S_n; assumption. rewrite RList_P14; rewrite H19 in H1; simpl in H1; simpl; apply H1. Qed. Lemma StepFun_P41 : forall (f:R -> R) (a b c:R), a <= b -> b <= c -> IsStepFun f a b -> IsStepFun f b c -> IsStepFun f a c. Proof. intros f a b c H H0 (l1,(lf1,H1)) (l2,(lf2,H2)); destruct (total_order_T a b) as [[Hltab|Hab]|Hgtab]. destruct (total_order_T b c) as [[Hltbc|Hbc]|Hgtbc]. exists (cons_Rlist l1 l2); exists (FF (cons_Rlist l1 l2) f); apply StepFun_P40 with b lf1 lf2; assumption. exists l1; exists lf1; rewrite Hbc in H1; assumption. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 Hgtbc)). exists l2; exists lf2; rewrite <- Hab in H2; assumption. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H Hgtab)). Qed. Lemma StepFun_P42 : forall (l1 l2:Rlist) (f:R -> R), pos_Rl l1 (pred (Rlength l1)) = pos_Rl l2 0 -> Int_SF (FF (cons_Rlist l1 l2) f) (cons_Rlist l1 l2) = Int_SF (FF l1 f) l1 + Int_SF (FF l2 f) l2. Proof. intros l1 l2 f; induction l1 as [| r l1 IHl1]; intros H; [ simpl; ring | destruct l1 as [| r0 r1]; [ simpl in H; simpl; destruct l2 as [| r0 r1]; [ simpl; ring | simpl; simpl in H; rewrite H; ring ] | simpl; rewrite Rplus_assoc; apply Rplus_eq_compat_l; apply IHl1; rewrite <- H; reflexivity ] ]. Qed. Lemma StepFun_P43 : forall (f:R -> R) (a b c:R) (pr1:IsStepFun f a b) (pr2:IsStepFun f b c) (pr3:IsStepFun f a c), RiemannInt_SF (mkStepFun pr1) + RiemannInt_SF (mkStepFun pr2) = RiemannInt_SF (mkStepFun pr3). Proof. intros f; intros. pose proof pr1 as (l1,(lf1,H1)). pose proof pr2 as (l2,(lf2,H2)). pose proof pr3 as (l3,(lf3,H3)). replace (RiemannInt_SF (mkStepFun pr1)) with match Rle_dec a b with | left _ => Int_SF lf1 l1 | right _ => - Int_SF lf1 l1 end. replace (RiemannInt_SF (mkStepFun pr2)) with match Rle_dec b c with | left _ => Int_SF lf2 l2 | right _ => - Int_SF lf2 l2 end. replace (RiemannInt_SF (mkStepFun pr3)) with match Rle_dec a c with | left _ => Int_SF lf3 l3 | right _ => - Int_SF lf3 l3 end. case (Rle_dec a b); case (Rle_dec b c); case (Rle_dec a c); intros. elim r1; intro. elim r0; intro. replace (Int_SF lf3 l3) with (Int_SF (FF (cons_Rlist l1 l2) f) (cons_Rlist l1 l2)). replace (Int_SF lf1 l1) with (Int_SF (FF l1 f) l1). replace (Int_SF lf2 l2) with (Int_SF (FF l2 f) l2). symmetry ; apply StepFun_P42. unfold adapted_couple in H1, H2; decompose [and] H1; decompose [and] H2; clear H1 H2; rewrite H11; rewrite H5; unfold Rmax, Rmin; case (Rle_dec a b); case (Rle_dec b c); intros; reflexivity || elim n; assumption. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf2; apply H2; assumption | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf1; apply H1 | assumption ]. eapply StepFun_P17; [ apply (StepFun_P40 H H0 H1 H2) | apply H3 ]. replace (Int_SF lf2 l2) with 0. rewrite Rplus_0_r; eapply StepFun_P17; [ apply H1 | rewrite <- H0 in H3; apply H3 ]. symmetry ; eapply StepFun_P8; [ apply H2 | assumption ]. replace (Int_SF lf1 l1) with 0. rewrite Rplus_0_l; eapply StepFun_P17; [ apply H2 | rewrite H in H3; apply H3 ]. symmetry ; eapply StepFun_P8; [ apply H1 | assumption ]. elim n; apply Rle_trans with b; assumption. apply Rplus_eq_reg_l with (Int_SF lf2 l2); replace (Int_SF lf2 l2 + (Int_SF lf1 l1 + - Int_SF lf2 l2)) with (Int_SF lf1 l1); [ idtac | ring ]. assert (H : c < b). auto with real. elim r; intro. rewrite Rplus_comm; replace (Int_SF lf1 l1) with (Int_SF (FF (cons_Rlist l3 l2) f) (cons_Rlist l3 l2)). replace (Int_SF lf3 l3) with (Int_SF (FF l3 f) l3). replace (Int_SF lf2 l2) with (Int_SF (FF l2 f) l2). apply StepFun_P42. unfold adapted_couple in H2, H3; decompose [and] H2; decompose [and] H3; clear H3 H2; rewrite H10; rewrite H6; unfold Rmax, Rmin; case (Rle_dec a c); case (Rle_dec b c); intros; [ elim n; assumption | reflexivity | elim n0; assumption | elim n1; assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf2; apply H2 | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf3; apply H3 | assumption ]. eapply StepFun_P17; [ apply (StepFun_P40 H0 H H3 (StepFun_P2 H2)) | apply H1 ]. replace (Int_SF lf3 l3) with 0. rewrite Rplus_0_r; eapply StepFun_P17; [ apply H1 | apply StepFun_P2; rewrite <- H0 in H2; apply H2 ]. symmetry ; eapply StepFun_P8; [ apply H3 | assumption ]. replace (Int_SF lf2 l2) with (Int_SF lf3 l3 + Int_SF lf1 l1). ring. elim r; intro. replace (Int_SF lf2 l2) with (Int_SF (FF (cons_Rlist l3 l1) f) (cons_Rlist l3 l1)). replace (Int_SF lf3 l3) with (Int_SF (FF l3 f) l3). replace (Int_SF lf1 l1) with (Int_SF (FF l1 f) l1). symmetry ; apply StepFun_P42. unfold adapted_couple in H1, H3; decompose [and] H1; decompose [and] H3; clear H3 H1; rewrite H9; rewrite H5; unfold Rmax, Rmin; case (Rle_dec a c); case (Rle_dec a b); intros; [ elim n; assumption | elim n1; assumption | reflexivity | elim n1; assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf1; apply H1 | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf3; apply H3 | assumption ]. eapply StepFun_P17. assert (H0 : c < a). auto with real. apply (StepFun_P40 H0 H (StepFun_P2 H3) H1). apply StepFun_P2; apply H2. replace (Int_SF lf1 l1) with 0. rewrite Rplus_0_r; eapply StepFun_P17; [ apply H3 | rewrite <- H in H2; apply H2 ]. symmetry ; eapply StepFun_P8; [ apply H1 | assumption ]. assert (H : b < a). auto with real. replace (Int_SF lf2 l2) with (Int_SF lf3 l3 + Int_SF lf1 l1). ring. rewrite Rplus_comm; elim r; intro. replace (Int_SF lf2 l2) with (Int_SF (FF (cons_Rlist l1 l3) f) (cons_Rlist l1 l3)). replace (Int_SF lf3 l3) with (Int_SF (FF l3 f) l3). replace (Int_SF lf1 l1) with (Int_SF (FF l1 f) l1). symmetry ; apply StepFun_P42. unfold adapted_couple in H1, H3; decompose [and] H1; decompose [and] H3; clear H3 H1; rewrite H11; rewrite H5; unfold Rmax, Rmin; case (Rle_dec a c); case (Rle_dec a b); intros; [ elim n; assumption | reflexivity | elim n0; assumption | elim n1; assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf1; apply H1 | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf3; apply H3 | assumption ]. eapply StepFun_P17. apply (StepFun_P40 H H0 (StepFun_P2 H1) H3). apply H2. replace (Int_SF lf3 l3) with 0. rewrite Rplus_0_r; eapply StepFun_P17; [ apply H1 | rewrite <- H0 in H2; apply StepFun_P2; apply H2 ]. symmetry ; eapply StepFun_P8; [ apply H3 | assumption ]. assert (H : c < a). auto with real. replace (Int_SF lf1 l1) with (Int_SF lf2 l2 + Int_SF lf3 l3). ring. elim r; intro. replace (Int_SF lf1 l1) with (Int_SF (FF (cons_Rlist l2 l3) f) (cons_Rlist l2 l3)). replace (Int_SF lf3 l3) with (Int_SF (FF l3 f) l3). replace (Int_SF lf2 l2) with (Int_SF (FF l2 f) l2). symmetry ; apply StepFun_P42. unfold adapted_couple in H2, H3; decompose [and] H2; decompose [and] H3; clear H3 H2; rewrite H11; rewrite H5; unfold Rmax, Rmin; case (Rle_dec a c); case (Rle_dec b c); intros; [ elim n; assumption | elim n1; assumption | reflexivity | elim n1; assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf2; apply H2 | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf3; apply H3 | assumption ]. eapply StepFun_P17. apply (StepFun_P40 H0 H H2 (StepFun_P2 H3)). apply StepFun_P2; apply H1. replace (Int_SF lf2 l2) with 0. rewrite Rplus_0_l; eapply StepFun_P17; [ apply H3 | rewrite H0 in H1; apply H1 ]. symmetry ; eapply StepFun_P8; [ apply H2 | assumption ]. elim n; apply Rle_trans with a; try assumption. auto with real. assert (H : c < b). auto with real. assert (H0 : b < a). auto with real. replace (Int_SF lf3 l3) with (Int_SF lf2 l2 + Int_SF lf1 l1). ring. replace (Int_SF lf3 l3) with (Int_SF (FF (cons_Rlist l2 l1) f) (cons_Rlist l2 l1)). replace (Int_SF lf1 l1) with (Int_SF (FF l1 f) l1). replace (Int_SF lf2 l2) with (Int_SF (FF l2 f) l2). symmetry ; apply StepFun_P42. unfold adapted_couple in H2, H1; decompose [and] H2; decompose [and] H1; clear H1 H2; rewrite H11; rewrite H5; unfold Rmax, Rmin; case (Rle_dec a b); case (Rle_dec b c); intros; [ elim n1; assumption | elim n1; assumption | elim n0; assumption | reflexivity ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf2; apply H2 | assumption ]. eapply StepFun_P17; [ apply StepFun_P21; unfold is_subdivision; split with lf1; apply H1 | assumption ]. eapply StepFun_P17. apply (StepFun_P40 H H0 (StepFun_P2 H2) (StepFun_P2 H1)). apply StepFun_P2; apply H3. unfold RiemannInt_SF; case (Rle_dec a c); intro. eapply StepFun_P17. apply H3. change (adapted_couple (mkStepFun pr3) a c (subdivision (mkStepFun pr3)) (subdivision_val (mkStepFun pr3))); apply StepFun_P1. apply Ropp_eq_compat; eapply StepFun_P17. apply H3. change (adapted_couple (mkStepFun pr3) a c (subdivision (mkStepFun pr3)) (subdivision_val (mkStepFun pr3))); apply StepFun_P1. unfold RiemannInt_SF; case (Rle_dec b c); intro. eapply StepFun_P17. apply H2. change (adapted_couple (mkStepFun pr2) b c (subdivision (mkStepFun pr2)) (subdivision_val (mkStepFun pr2))); apply StepFun_P1. apply Ropp_eq_compat; eapply StepFun_P17. apply H2. change (adapted_couple (mkStepFun pr2) b c (subdivision (mkStepFun pr2)) (subdivision_val (mkStepFun pr2))); apply StepFun_P1. unfold RiemannInt_SF; case (Rle_dec a b); intro. eapply StepFun_P17. apply H1. change (adapted_couple (mkStepFun pr1) a b (subdivision (mkStepFun pr1)) (subdivision_val (mkStepFun pr1))); apply StepFun_P1. apply Ropp_eq_compat; eapply StepFun_P17. apply H1. change (adapted_couple (mkStepFun pr1) a b (subdivision (mkStepFun pr1)) (subdivision_val (mkStepFun pr1))); apply StepFun_P1. Qed. Lemma StepFun_P44 : forall (f:R -> R) (a b c:R), IsStepFun f a b -> a <= c <= b -> IsStepFun f a c. Proof. intros f; intros; assert (H0 : a <= b). elim H; intros; apply Rle_trans with c; assumption. elim H; clear H; intros; unfold IsStepFun in X; unfold is_subdivision in X; elim X; clear X; intros l1 [lf1 H2]; cut (forall (l1 lf1:Rlist) (a b c:R) (f:R -> R), adapted_couple f a b l1 lf1 -> a <= c <= b -> { l:Rlist & { l0:Rlist & adapted_couple f a c l l0 } }). intro X; unfold IsStepFun; unfold is_subdivision; eapply X. apply H2. split; assumption. clear f a b c H0 H H1 H2 l1 lf1; simple induction l1. intros; unfold adapted_couple in H; decompose [and] H; clear H; simpl in H4; discriminate. simple induction r0. intros X lf1 a b c f H H0; assert (H1 : a = b). unfold adapted_couple in H; decompose [and] H; clear H; simpl in H3; simpl in H2; assert (H7 : a <= b). elim H0; intros; apply Rle_trans with c; assumption. replace a with (Rmin a b). pattern b at 2; replace b with (Rmax a b). rewrite <- H2; rewrite H3; reflexivity. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. split with (cons r nil); split with lf1; assert (H2 : c = b). rewrite H1 in H0; elim H0; intros; apply Rle_antisym; assumption. rewrite H2; assumption. intros r1 r2 _ X0 lf1 a b c f H H0; induction lf1 as [| r3 lf1 Hreclf1]. unfold adapted_couple in H; decompose [and] H; clear H; simpl in H4; discriminate. clear Hreclf1; assert (H1 : {c <= r1} + {r1 < c}). case (Rle_dec c r1); intro; [ left; assumption | right; auto with real ]. elim H1; intro. split with (cons r (cons c nil)); split with (cons r3 nil); unfold adapted_couple in H; decompose [and] H; clear H; assert (H6 : r = a). simpl in H4; rewrite H4; unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; elim H0; intros; apply Rle_trans with c; assumption ]. elim H0; clear H0; intros; unfold adapted_couple; repeat split. rewrite H6; unfold ordered_Rlist; intros; simpl in H8; inversion H8; [ simpl; assumption | elim (le_Sn_O _ H10) ]. simpl; unfold Rmin; case (Rle_dec a c); intro; [ assumption | elim n; assumption ]. simpl; unfold Rmax; case (Rle_dec a c); intro; [ reflexivity | elim n; assumption ]. unfold constant_D_eq, open_interval; intros; simpl in H8; inversion H8. simpl; assert (H10 := H7 0%nat); assert (H12 : (0 < pred (Rlength (cons r (cons r1 r2))))%nat). simpl; apply lt_O_Sn. apply (H10 H12); unfold open_interval; simpl; rewrite H11 in H9; simpl in H9; elim H9; clear H9; intros; split; try assumption. apply Rlt_le_trans with c; assumption. elim (le_Sn_O _ H11). cut (adapted_couple f r1 b (cons r1 r2) lf1). cut (r1 <= c <= b). intros. elim (X0 _ _ _ _ _ H3 H2); intros l1' [lf1' H4]; split with (cons r l1'); split with (cons r3 lf1'); unfold adapted_couple in H, H4; decompose [and] H; decompose [and] H4; clear H H4 X0; assert (H14 : a <= b). elim H0; intros; apply Rle_trans with c; assumption. assert (H16 : r = a). simpl in H7; rewrite H7; unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. induction l1' as [| r4 l1' Hrecl1']. simpl in H13; discriminate. clear Hrecl1'; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H; induction i as [| i Hreci]. simpl; replace r4 with r1. apply (H5 0%nat). simpl; apply lt_O_Sn. simpl in H12; rewrite H12; unfold Rmin; case (Rle_dec r1 c); intro; [ reflexivity | elim n; left; assumption ]. apply (H9 i); simpl; apply lt_S_n; assumption. simpl; unfold Rmin; case (Rle_dec a c); intro; [ assumption | elim n; elim H0; intros; assumption ]. replace (Rmax a c) with (Rmax r1 c). rewrite <- H11; reflexivity. unfold Rmax; case (Rle_dec r1 c); case (Rle_dec a c); intros; [ reflexivity | elim n; elim H0; intros; assumption | elim n; left; assumption | elim n0; left; assumption ]. simpl; simpl in H13; rewrite H13; reflexivity. intros; simpl in H; unfold constant_D_eq, open_interval; intros; induction i as [| i Hreci]. simpl; assert (H17 := H10 0%nat); assert (H18 : (0 < pred (Rlength (cons r (cons r1 r2))))%nat). simpl; apply lt_O_Sn. apply (H17 H18); unfold open_interval; simpl; simpl in H4; elim H4; clear H4; intros; split; try assumption; replace r1 with r4. assumption. simpl in H12; rewrite H12; unfold Rmin; case (Rle_dec r1 c); intro; [ reflexivity | elim n; left; assumption ]. clear Hreci; simpl; apply H15. simpl; apply lt_S_n; assumption. unfold open_interval; apply H4. split. left; assumption. elim H0; intros; assumption. eapply StepFun_P7; [ elim H0; intros; apply Rle_trans with c; [ apply H2 | apply H3 ] | apply H ]. Qed. Lemma StepFun_P45 : forall (f:R -> R) (a b c:R), IsStepFun f a b -> a <= c <= b -> IsStepFun f c b. Proof. intros f; intros; assert (H0 : a <= b). elim H; intros; apply Rle_trans with c; assumption. elim H; clear H; intros; unfold IsStepFun in X; unfold is_subdivision in X; elim X; clear X; intros l1 [lf1 H2]; cut (forall (l1 lf1:Rlist) (a b c:R) (f:R -> R), adapted_couple f a b l1 lf1 -> a <= c <= b -> { l:Rlist & { l0:Rlist & adapted_couple f c b l l0 } }). intro X; unfold IsStepFun; unfold is_subdivision; eapply X; [ apply H2 | split; assumption ]. clear f a b c H0 H H1 H2 l1 lf1; simple induction l1. intros; unfold adapted_couple in H; decompose [and] H; clear H; simpl in H4; discriminate. simple induction r0. intros X lf1 a b c f H H0; assert (H1 : a = b). unfold adapted_couple in H; decompose [and] H; clear H; simpl in H3; simpl in H2; assert (H7 : a <= b). elim H0; intros; apply Rle_trans with c; assumption. replace a with (Rmin a b). pattern b at 2; replace b with (Rmax a b). rewrite <- H2; rewrite H3; reflexivity. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. split with (cons r nil); split with lf1; assert (H2 : c = b). rewrite H1 in H0; elim H0; intros; apply Rle_antisym; assumption. rewrite <- H2 in H1; rewrite <- H1; assumption. intros r1 r2 _ X0 lf1 a b c f H H0; induction lf1 as [| r3 lf1 Hreclf1]. unfold adapted_couple in H; decompose [and] H; clear H; simpl in H4; discriminate. clear Hreclf1; assert (H1 : {c <= r1} + {r1 < c}). case (Rle_dec c r1); intro; [ left; assumption | right; auto with real ]. elim H1; intro. split with (cons c (cons r1 r2)); split with (cons r3 lf1); unfold adapted_couple in H; decompose [and] H; clear H; unfold adapted_couple; repeat split. unfold ordered_Rlist; intros; simpl in H; induction i as [| i Hreci]. simpl; assumption. clear Hreci; apply (H2 (S i)); simpl; assumption. simpl; unfold Rmin; case (Rle_dec c b); intro; [ reflexivity | elim n; elim H0; intros; assumption ]. replace (Rmax c b) with (Rmax a b). rewrite <- H3; reflexivity. unfold Rmax; case (Rle_dec a b); case (Rle_dec c b); intros; [ reflexivity | elim n; elim H0; intros; assumption | elim n; elim H0; intros; apply Rle_trans with c; assumption | elim n0; elim H0; intros; apply Rle_trans with c; assumption ]. simpl; simpl in H5; apply H5. intros; simpl in H; induction i as [| i Hreci]. unfold constant_D_eq, open_interval; intros; simpl; apply (H7 0%nat). simpl; apply lt_O_Sn. unfold open_interval; simpl; simpl in H6; elim H6; clear H6; intros; split; try assumption; apply Rle_lt_trans with c; try assumption; replace r with a. elim H0; intros; assumption. simpl in H4; rewrite H4; unfold Rmin; case (Rle_dec a b); intros; [ reflexivity | elim n; elim H0; intros; apply Rle_trans with c; assumption ]. clear Hreci; apply (H7 (S i)); simpl; assumption. cut (adapted_couple f r1 b (cons r1 r2) lf1). cut (r1 <= c <= b). intros; elim (X0 _ _ _ _ _ H3 H2); intros l1' [lf1' H4]; split with l1'; split with lf1'; assumption. split; [ left; assumption | elim H0; intros; assumption ]. eapply StepFun_P7; [ elim H0; intros; apply Rle_trans with c; [ apply H2 | apply H3 ] | apply H ]. Qed. Lemma StepFun_P46 : forall (f:R -> R) (a b c:R), IsStepFun f a b -> IsStepFun f b c -> IsStepFun f a c. Proof. intros f; intros; case (Rle_dec a b); case (Rle_dec b c); intros. apply StepFun_P41 with b; assumption. case (Rle_dec a c); intro. apply StepFun_P44 with b; try assumption. split; [ assumption | auto with real ]. apply StepFun_P6; apply StepFun_P44 with b. apply StepFun_P6; assumption. split; auto with real. case (Rle_dec a c); intro. apply StepFun_P45 with b; try assumption. split; auto with real. apply StepFun_P6; apply StepFun_P45 with b. apply StepFun_P6; assumption. split; [ assumption | auto with real ]. apply StepFun_P6; apply StepFun_P41 with b; auto with real || apply StepFun_P6; assumption. Qed. coq-8.4pl2/theories/Reals/Rtopology.v0000640000175000001440000022104712010532755016712 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop) : Prop := forall x:R, D1 x -> D2 x. Definition disc (x:R) (delta:posreal) (y:R) : Prop := Rabs (y - x) < delta. Definition neighbourhood (V:R -> Prop) (x:R) : Prop := exists delta : posreal, included (disc x delta) V. Definition open_set (D:R -> Prop) : Prop := forall x:R, D x -> neighbourhood D x. Definition complementary (D:R -> Prop) (c:R) : Prop := ~ D c. Definition closed_set (D:R -> Prop) : Prop := open_set (complementary D). Definition intersection_domain (D1 D2:R -> Prop) (c:R) : Prop := D1 c /\ D2 c. Definition union_domain (D1 D2:R -> Prop) (c:R) : Prop := D1 c \/ D2 c. Definition interior (D:R -> Prop) (x:R) : Prop := neighbourhood D x. Lemma interior_P1 : forall D:R -> Prop, included (interior D) D. Proof. intros; unfold included; unfold interior; intros; unfold neighbourhood in H; elim H; intros; unfold included in H0; apply H0; unfold disc; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (cond_pos x0). Qed. Lemma interior_P2 : forall D:R -> Prop, open_set D -> included D (interior D). Proof. intros; unfold open_set in H; unfold included; intros; assert (H1 := H _ H0); unfold interior; apply H1. Qed. Definition point_adherent (D:R -> Prop) (x:R) : Prop := forall V:R -> Prop, neighbourhood V x -> exists y : R, intersection_domain V D y. Definition adherence (D:R -> Prop) (x:R) : Prop := point_adherent D x. Lemma adherence_P1 : forall D:R -> Prop, included D (adherence D). Proof. intro; unfold included; intros; unfold adherence; unfold point_adherent; intros; exists x; unfold intersection_domain; split. unfold neighbourhood in H0; elim H0; intros; unfold included in H1; apply H1; unfold disc; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (cond_pos x0). apply H. Qed. Lemma included_trans : forall D1 D2 D3:R -> Prop, included D1 D2 -> included D2 D3 -> included D1 D3. Proof. unfold included; intros; apply H0; apply H; apply H1. Qed. Lemma interior_P3 : forall D:R -> Prop, open_set (interior D). Proof. intro; unfold open_set, interior; unfold neighbourhood; intros; elim H; intros. exists x0; unfold included; intros. set (del := x0 - Rabs (x - x1)). cut (0 < del). intro; exists (mkposreal del H2); intros. cut (included (disc x1 (mkposreal del H2)) (disc x x0)). intro; assert (H5 := included_trans _ _ _ H4 H0). apply H5; apply H3. unfold included; unfold disc; intros. apply Rle_lt_trans with (Rabs (x3 - x1) + Rabs (x1 - x)). replace (x3 - x) with (x3 - x1 + (x1 - x)); [ apply Rabs_triang | ring ]. replace (pos x0) with (del + Rabs (x1 - x)). do 2 rewrite <- (Rplus_comm (Rabs (x1 - x))); apply Rplus_lt_compat_l; apply H4. unfold del; rewrite <- (Rabs_Ropp (x - x1)); rewrite Ropp_minus_distr; ring. unfold del; apply Rplus_lt_reg_r with (Rabs (x - x1)); rewrite Rplus_0_r; replace (Rabs (x - x1) + (x0 - Rabs (x - x1))) with (pos x0); [ idtac | ring ]. unfold disc in H1; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H1. Qed. Lemma complementary_P1 : forall D:R -> Prop, ~ (exists y : R, intersection_domain D (complementary D) y). Proof. intro; red; intro; elim H; intros; unfold intersection_domain, complementary in H0; elim H0; intros; elim H2; assumption. Qed. Lemma adherence_P2 : forall D:R -> Prop, closed_set D -> included (adherence D) D. Proof. unfold closed_set; unfold open_set, complementary; intros; unfold included, adherence; intros; assert (H1 := classic (D x)); elim H1; intro. assumption. assert (H3 := H _ H2); assert (H4 := H0 _ H3); elim H4; intros; unfold intersection_domain in H5; elim H5; intros; elim H6; assumption. Qed. Lemma adherence_P3 : forall D:R -> Prop, closed_set (adherence D). Proof. intro; unfold closed_set, adherence; unfold open_set, complementary, point_adherent; intros; set (P := fun V:R -> Prop => neighbourhood V x -> exists y : R, intersection_domain V D y); assert (H0 := not_all_ex_not _ P H); elim H0; intros V0 H1; unfold P in H1; assert (H2 := imply_to_and _ _ H1); unfold neighbourhood; elim H2; intros; unfold neighbourhood in H3; elim H3; intros; exists x0; unfold included; intros; red; intro. assert (H8 := H7 V0); cut (exists delta : posreal, (forall x:R, disc x1 delta x -> V0 x)). intro; assert (H10 := H8 H9); elim H4; assumption. cut (0 < x0 - Rabs (x - x1)). intro; set (del := mkposreal _ H9); exists del; intros; unfold included in H5; apply H5; unfold disc; apply Rle_lt_trans with (Rabs (x2 - x1) + Rabs (x1 - x)). replace (x2 - x) with (x2 - x1 + (x1 - x)); [ apply Rabs_triang | ring ]. replace (pos x0) with (del + Rabs (x1 - x)). do 2 rewrite <- (Rplus_comm (Rabs (x1 - x))); apply Rplus_lt_compat_l; apply H10. unfold del; simpl; rewrite <- (Rabs_Ropp (x - x1)); rewrite Ropp_minus_distr; ring. apply Rplus_lt_reg_r with (Rabs (x - x1)); rewrite Rplus_0_r; replace (Rabs (x - x1) + (x0 - Rabs (x - x1))) with (pos x0); [ rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H6 | ring ]. Qed. Definition eq_Dom (D1 D2:R -> Prop) : Prop := included D1 D2 /\ included D2 D1. Infix "=_D" := eq_Dom (at level 70, no associativity). Lemma open_set_P1 : forall D:R -> Prop, open_set D <-> D =_D interior D. Proof. intro; split. intro; unfold eq_Dom; split. apply interior_P2; assumption. apply interior_P1. intro; unfold eq_Dom in H; elim H; clear H; intros; unfold open_set; intros; unfold included, interior in H; unfold included in H0; apply (H _ H1). Qed. Lemma closed_set_P1 : forall D:R -> Prop, closed_set D <-> D =_D adherence D. Proof. intro; split. intro; unfold eq_Dom; split. apply adherence_P1. apply adherence_P2; assumption. unfold eq_Dom; unfold included; intros; assert (H0 := adherence_P3 D); unfold closed_set in H0; unfold closed_set; unfold open_set; unfold open_set in H0; intros; assert (H2 : complementary (adherence D) x). unfold complementary; unfold complementary in H1; red; intro; elim H; clear H; intros _ H; elim H1; apply (H _ H2). assert (H3 := H0 _ H2); unfold neighbourhood; unfold neighbourhood in H3; elim H3; intros; exists x0; unfold included; unfold included in H4; intros; assert (H6 := H4 _ H5); unfold complementary in H6; unfold complementary; red; intro; elim H; clear H; intros H _; elim H6; apply (H _ H7). Qed. Lemma neighbourhood_P1 : forall (D1 D2:R -> Prop) (x:R), included D1 D2 -> neighbourhood D1 x -> neighbourhood D2 x. Proof. unfold included, neighbourhood; intros; elim H0; intros; exists x0; intros; unfold included; unfold included in H1; intros; apply (H _ (H1 _ H2)). Qed. Lemma open_set_P2 : forall D1 D2:R -> Prop, open_set D1 -> open_set D2 -> open_set (union_domain D1 D2). Proof. unfold open_set; intros; unfold union_domain in H1; elim H1; intro. apply neighbourhood_P1 with D1. unfold included, union_domain; tauto. apply H; assumption. apply neighbourhood_P1 with D2. unfold included, union_domain; tauto. apply H0; assumption. Qed. Lemma open_set_P3 : forall D1 D2:R -> Prop, open_set D1 -> open_set D2 -> open_set (intersection_domain D1 D2). Proof. unfold open_set; intros; unfold intersection_domain in H1; elim H1; intros. assert (H4 := H _ H2); assert (H5 := H0 _ H3); unfold intersection_domain; unfold neighbourhood in H4, H5; elim H4; clear H; intros del1 H; elim H5; clear H0; intros del2 H0; cut (0 < Rmin del1 del2). intro; set (del := mkposreal _ H6). exists del; unfold included; intros; unfold included in H, H0; unfold disc in H, H0, H7. split. apply H; apply Rlt_le_trans with (pos del). apply H7. unfold del; simpl; apply Rmin_l. apply H0; apply Rlt_le_trans with (pos del). apply H7. unfold del; simpl; apply Rmin_r. unfold Rmin; case (Rle_dec del1 del2); intro. apply (cond_pos del1). apply (cond_pos del2). Qed. Lemma open_set_P4 : open_set (fun x:R => False). Proof. unfold open_set; intros; elim H. Qed. Lemma open_set_P5 : open_set (fun x:R => True). Proof. unfold open_set; intros; unfold neighbourhood. exists (mkposreal 1 Rlt_0_1); unfold included; intros; trivial. Qed. Lemma disc_P1 : forall (x:R) (del:posreal), open_set (disc x del). Proof. intros; assert (H := open_set_P1 (disc x del)). elim H; intros; apply H1. unfold eq_Dom; split. unfold included, interior, disc; intros; cut (0 < del - Rabs (x - x0)). intro; set (del2 := mkposreal _ H3). exists del2; unfold included; intros. apply Rle_lt_trans with (Rabs (x1 - x0) + Rabs (x0 - x)). replace (x1 - x) with (x1 - x0 + (x0 - x)); [ apply Rabs_triang | ring ]. replace (pos del) with (del2 + Rabs (x0 - x)). do 2 rewrite <- (Rplus_comm (Rabs (x0 - x))); apply Rplus_lt_compat_l. apply H4. unfold del2; simpl; rewrite <- (Rabs_Ropp (x - x0)); rewrite Ropp_minus_distr; ring. apply Rplus_lt_reg_r with (Rabs (x - x0)); rewrite Rplus_0_r; replace (Rabs (x - x0) + (del - Rabs (x - x0))) with (pos del); [ rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H2 | ring ]. apply interior_P1. Qed. Lemma continuity_P1 : forall (f:R -> R) (x:R), continuity_pt f x <-> (forall W:R -> Prop, neighbourhood W (f x) -> exists V : R -> Prop, neighbourhood V x /\ (forall y:R, V y -> W (f y))). Proof. intros; split. intros; unfold neighbourhood in H0. elim H0; intros del1 H1. unfold continuity_pt in H; unfold continue_in in H; unfold limit1_in in H; unfold limit_in in H; simpl in H; unfold R_dist in H. assert (H2 := H del1 (cond_pos del1)). elim H2; intros del2 H3. elim H3; intros. exists (disc x (mkposreal del2 H4)). intros; unfold included in H1; split. unfold neighbourhood, disc. exists (mkposreal del2 H4). unfold included; intros; assumption. intros; apply H1; unfold disc; case (Req_dec y x); intro. rewrite H7; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (cond_pos del1). apply H5; split. unfold D_x, no_cond; split. trivial. apply (not_eq_sym (A:=R)); apply H7. unfold disc in H6; apply H6. intros; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; intros. assert (H1 := H (disc (f x) (mkposreal eps H0))). cut (neighbourhood (disc (f x) (mkposreal eps H0)) (f x)). intro; assert (H3 := H1 H2). elim H3; intros D H4; elim H4; intros; unfold neighbourhood in H5; elim H5; intros del1 H7. exists (pos del1); split. apply (cond_pos del1). intros; elim H8; intros; simpl in H10; unfold R_dist in H10; simpl; unfold R_dist; apply (H6 _ (H7 _ H10)). unfold neighbourhood, disc; exists (mkposreal eps H0); unfold included; intros; assumption. Qed. Definition image_rec (f:R -> R) (D:R -> Prop) (x:R) : Prop := D (f x). (**********) Lemma continuity_P2 : forall (f:R -> R) (D:R -> Prop), continuity f -> open_set D -> open_set (image_rec f D). Proof. intros; unfold open_set in H0; unfold open_set; intros; assert (H2 := continuity_P1 f x); elim H2; intros H3 _; assert (H4 := H3 (H x)); unfold neighbourhood, image_rec; unfold image_rec in H1; assert (H5 := H4 D (H0 (f x) H1)); elim H5; intros V0 H6; elim H6; intros; unfold neighbourhood in H7; elim H7; intros del H9; exists del; unfold included in H9; unfold included; intros; apply (H8 _ (H9 _ H10)). Qed. (**********) Lemma continuity_P3 : forall f:R -> R, continuity f <-> (forall D:R -> Prop, open_set D -> open_set (image_rec f D)). Proof. intros; split. intros; apply continuity_P2; assumption. intros; unfold continuity; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; cut (open_set (disc (f x) (mkposreal _ H0))). intro; assert (H2 := H _ H1). unfold open_set, image_rec in H2; cut (disc (f x) (mkposreal _ H0) (f x)). intro; assert (H4 := H2 _ H3). unfold neighbourhood in H4; elim H4; intros del H5. exists (pos del); split. apply (cond_pos del). intros; unfold included in H5; apply H5; elim H6; intros; apply H8. unfold disc; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply H0. apply disc_P1. Qed. (**********) Theorem Rsepare : forall x y:R, x <> y -> exists V : R -> Prop, (exists W : R -> Prop, neighbourhood V x /\ neighbourhood W y /\ ~ (exists y : R, intersection_domain V W y)). Proof. intros x y Hsep; set (D := Rabs (x - y)). cut (0 < D / 2). intro; exists (disc x (mkposreal _ H)). exists (disc y (mkposreal _ H)); split. unfold neighbourhood; exists (mkposreal _ H); unfold included; tauto. split. unfold neighbourhood; exists (mkposreal _ H); unfold included; tauto. red; intro; elim H0; intros; unfold intersection_domain in H1; elim H1; intros. cut (D < D). intro; elim (Rlt_irrefl _ H4). change (Rabs (x - y) < D); apply Rle_lt_trans with (Rabs (x - x0) + Rabs (x0 - y)). replace (x - y) with (x - x0 + (x0 - y)); [ apply Rabs_triang | ring ]. rewrite (double_var D); apply Rplus_lt_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H2. apply H3. unfold Rdiv; apply Rmult_lt_0_compat. unfold D; apply Rabs_pos_lt; apply (Rminus_eq_contra _ _ Hsep). apply Rinv_0_lt_compat; prove_sup0. Qed. Record family : Type := mkfamily {ind : R -> Prop; f :> R -> R -> Prop; cond_fam : forall x:R, (exists y : R, f x y) -> ind x}. Definition family_open_set (f:family) : Prop := forall x:R, open_set (f x). Definition domain_finite (D:R -> Prop) : Prop := exists l : Rlist, (forall x:R, D x <-> In x l). Definition family_finite (f:family) : Prop := domain_finite (ind f). Definition covering (D:R -> Prop) (f:family) : Prop := forall x:R, D x -> exists y : R, f y x. Definition covering_open_set (D:R -> Prop) (f:family) : Prop := covering D f /\ family_open_set f. Definition covering_finite (D:R -> Prop) (f:family) : Prop := covering D f /\ family_finite f. Lemma restriction_family : forall (f:family) (D:R -> Prop) (x:R), (exists y : R, (fun z1 z2:R => f z1 z2 /\ D z1) x y) -> intersection_domain (ind f) D x. Proof. intros; elim H; intros; unfold intersection_domain; elim H0; intros; split. apply (cond_fam f0); exists x0; assumption. assumption. Qed. Definition subfamily (f:family) (D:R -> Prop) : family := mkfamily (intersection_domain (ind f) D) (fun x y:R => f x y /\ D x) (restriction_family f D). Definition compact (X:R -> Prop) : Prop := forall f:family, covering_open_set X f -> exists D : R -> Prop, covering_finite X (subfamily f D). (**********) Lemma family_P1 : forall (f:family) (D:R -> Prop), family_open_set f -> family_open_set (subfamily f D). Proof. unfold family_open_set; intros; unfold subfamily; simpl; assert (H0 := classic (D x)). elim H0; intro. cut (open_set (f0 x) -> open_set (fun y:R => f0 x y /\ D x)). intro; apply H2; apply H. unfold open_set; unfold neighbourhood; intros; elim H3; intros; assert (H6 := H2 _ H4); elim H6; intros; exists x1; unfold included; intros; split. apply (H7 _ H8). assumption. cut (open_set (fun y:R => False) -> open_set (fun y:R => f0 x y /\ D x)). intro; apply H2; apply open_set_P4. unfold open_set; unfold neighbourhood; intros; elim H3; intros; elim H1; assumption. Qed. Definition bounded (D:R -> Prop) : Prop := exists m : R, (exists M : R, (forall x:R, D x -> m <= x <= M)). Lemma open_set_P6 : forall D1 D2:R -> Prop, open_set D1 -> D1 =_D D2 -> open_set D2. Proof. unfold open_set; unfold neighbourhood; intros. unfold eq_Dom in H0; elim H0; intros. assert (H4 := H _ (H3 _ H1)). elim H4; intros. exists x0; apply included_trans with D1; assumption. Qed. (**********) Lemma compact_P1 : forall X:R -> Prop, compact X -> bounded X. Proof. intros; unfold compact in H; set (D := fun x:R => True); set (g := fun x y:R => Rabs y < x); cut (forall x:R, (exists y : _, g x y) -> True); [ intro | intro; trivial ]. set (f0 := mkfamily D g H0); assert (H1 := H f0); cut (covering_open_set X f0). intro; assert (H3 := H1 H2); elim H3; intros D' H4; unfold covering_finite in H4; elim H4; intros; unfold family_finite in H6; unfold domain_finite in H6; elim H6; intros l H7; unfold bounded; set (r := MaxRlist l). exists (- r); exists r; intros. unfold covering in H5; assert (H9 := H5 _ H8); elim H9; intros; unfold subfamily in H10; simpl in H10; elim H10; intros; assert (H13 := H7 x0); simpl in H13; cut (intersection_domain D D' x0). elim H13; clear H13; intros. assert (H16 := H13 H15); unfold g in H11; split. cut (x0 <= r). intro; cut (Rabs x < r). intro; assert (H19 := Rabs_def2 x r H18); elim H19; intros; left; assumption. apply Rlt_le_trans with x0; assumption. apply (MaxRlist_P1 l x0 H16). cut (x0 <= r). intro; apply Rle_trans with (Rabs x). apply RRle_abs. apply Rle_trans with x0. left; apply H11. assumption. apply (MaxRlist_P1 l x0 H16). unfold intersection_domain, D; tauto. unfold covering_open_set; split. unfold covering; intros; simpl; exists (Rabs x + 1); unfold g; pattern (Rabs x) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1. unfold family_open_set; intro; case (Rtotal_order 0 x); intro. apply open_set_P6 with (disc 0 (mkposreal _ H2)). apply disc_P1. unfold eq_Dom; unfold f0; simpl; unfold g, disc; split. unfold included; intros; unfold Rminus in H3; rewrite Ropp_0 in H3; rewrite Rplus_0_r in H3; apply H3. unfold included; intros; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply H3. apply open_set_P6 with (fun x:R => False). apply open_set_P4. unfold eq_Dom; split. unfold included; intros; elim H3. unfold included, f0; simpl; unfold g; intros; elim H2; intro; [ rewrite <- H4 in H3; assert (H5 := Rabs_pos x0); elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H5 H3)) | assert (H6 := Rabs_pos x0); assert (H7 := Rlt_trans _ _ _ H3 H4); elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H6 H7)) ]. Qed. (**********) Lemma compact_P2 : forall X:R -> Prop, compact X -> closed_set X. Proof. intros; assert (H0 := closed_set_P1 X); elim H0; clear H0; intros _ H0; apply H0; clear H0. unfold eq_Dom; split. apply adherence_P1. unfold included; unfold adherence; unfold point_adherent; intros; unfold compact in H; assert (H1 := classic (X x)); elim H1; clear H1; intro. assumption. cut (forall y:R, X y -> 0 < Rabs (y - x) / 2). intro; set (D := X); set (g := fun y z:R => Rabs (y - z) < Rabs (y - x) / 2 /\ D y); cut (forall x:R, (exists y : _, g x y) -> D x). intro; set (f0 := mkfamily D g H3); assert (H4 := H f0); cut (covering_open_set X f0). intro; assert (H6 := H4 H5); elim H6; clear H6; intros D' H6. unfold covering_finite in H6; decompose [and] H6; unfold covering, subfamily in H7; simpl in H7; unfold family_finite, subfamily in H8; simpl in H8; unfold domain_finite in H8; elim H8; clear H8; intros l H8; set (alp := MinRlist (AbsList l x)); cut (0 < alp). intro; assert (H10 := H0 (disc x (mkposreal _ H9))); cut (neighbourhood (disc x (mkposreal alp H9)) x). intro; assert (H12 := H10 H11); elim H12; clear H12; intros y H12; unfold intersection_domain in H12; elim H12; clear H12; intros; assert (H14 := H7 _ H13); elim H14; clear H14; intros y0 H14; elim H14; clear H14; intros; unfold g in H14; elim H14; clear H14; intros; unfold disc in H12; simpl in H12; cut (alp <= Rabs (y0 - x) / 2). intro; assert (H18 := Rlt_le_trans _ _ _ H12 H17); cut (Rabs (y0 - x) < Rabs (y0 - x)). intro; elim (Rlt_irrefl _ H19). apply Rle_lt_trans with (Rabs (y0 - y) + Rabs (y - x)). replace (y0 - x) with (y0 - y + (y - x)); [ apply Rabs_triang | ring ]. rewrite (double_var (Rabs (y0 - x))); apply Rplus_lt_compat; assumption. apply (MinRlist_P1 (AbsList l x) (Rabs (y0 - x) / 2)); apply AbsList_P1; elim (H8 y0); clear H8; intros; apply H8; unfold intersection_domain; split; assumption. assert (H11 := disc_P1 x (mkposreal alp H9)); unfold open_set in H11; apply H11. unfold disc; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply H9. unfold alp; apply MinRlist_P2; intros; assert (H10 := AbsList_P2 _ _ _ H9); elim H10; clear H10; intros z H10; elim H10; clear H10; intros; rewrite H11; apply H2; elim (H8 z); clear H8; intros; assert (H13 := H12 H10); unfold intersection_domain, D in H13; elim H13; clear H13; intros; assumption. unfold covering_open_set; split. unfold covering; intros; exists x0; simpl; unfold g; split. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; unfold Rminus in H2; apply (H2 _ H5). apply H5. unfold family_open_set; intro; simpl; unfold g; elim (classic (D x0)); intro. apply open_set_P6 with (disc x0 (mkposreal _ (H2 _ H5))). apply disc_P1. unfold eq_Dom; split. unfold included, disc; simpl; intros; split. rewrite <- (Rabs_Ropp (x0 - x1)); rewrite Ropp_minus_distr; apply H6. apply H5. unfold included, disc; simpl; intros; elim H6; intros; rewrite <- (Rabs_Ropp (x1 - x0)); rewrite Ropp_minus_distr; apply H7. apply open_set_P6 with (fun z:R => False). apply open_set_P4. unfold eq_Dom; split. unfold included; intros; elim H6. unfold included; intros; elim H6; intros; elim H5; assumption. intros; elim H3; intros; unfold g in H4; elim H4; clear H4; intros _ H4; apply H4. intros; unfold Rdiv; apply Rmult_lt_0_compat. apply Rabs_pos_lt; apply Rminus_eq_contra; red; intro; rewrite H3 in H2; elim H1; apply H2. apply Rinv_0_lt_compat; prove_sup0. Qed. (**********) Lemma compact_EMP : compact (fun _:R => False). Proof. unfold compact; intros; exists (fun x:R => False); unfold covering_finite; split. unfold covering; intros; elim H0. unfold family_finite; unfold domain_finite; exists nil; intro. split. simpl; unfold intersection_domain; intros; elim H0. elim H0; clear H0; intros _ H0; elim H0. simpl; intro; elim H0. Qed. Lemma compact_eqDom : forall X1 X2:R -> Prop, compact X1 -> X1 =_D X2 -> compact X2. Proof. unfold compact; intros; unfold eq_Dom in H0; elim H0; clear H0; unfold included; intros; assert (H3 : covering_open_set X1 f0). unfold covering_open_set; unfold covering_open_set in H1; elim H1; clear H1; intros; split. unfold covering in H1; unfold covering; intros; apply (H1 _ (H0 _ H4)). apply H3. elim (H _ H3); intros D H4; exists D; unfold covering_finite; unfold covering_finite in H4; elim H4; intros; split. unfold covering in H5; unfold covering; intros; apply (H5 _ (H2 _ H7)). apply H6. Qed. (** Borel-Lebesgue's lemma *) Lemma compact_P3 : forall a b:R, compact (fun c:R => a <= c <= b). Proof. intros; case (Rle_dec a b); intro. unfold compact; intros; set (A := fun x:R => a <= x <= b /\ (exists D : R -> Prop, covering_finite (fun c:R => a <= c <= x) (subfamily f0 D))); cut (A a). intro; cut (bound A). intro; cut (exists a0 : R, A a0). intro; assert (H3 := completeness A H1 H2); elim H3; clear H3; intros m H3; unfold is_lub in H3; cut (a <= m <= b). intro; unfold covering_open_set in H; elim H; clear H; intros; unfold covering in H; assert (H6 := H m H4); elim H6; clear H6; intros y0 H6; unfold family_open_set in H5; assert (H7 := H5 y0); unfold open_set in H7; assert (H8 := H7 m H6); unfold neighbourhood in H8; elim H8; clear H8; intros eps H8; cut (exists x : R, A x /\ m - eps < x <= m). intro; elim H9; clear H9; intros x H9; elim H9; clear H9; intros; case (Req_dec m b); intro. rewrite H11 in H10; rewrite H11 in H8; unfold A in H9; elim H9; clear H9; intros; elim H12; clear H12; intros Dx H12; set (Db := fun x:R => Dx x \/ x = y0); exists Db; unfold covering_finite; split. unfold covering; unfold covering_finite in H12; elim H12; clear H12; intros; unfold covering in H12; case (Rle_dec x0 x); intro. cut (a <= x0 <= x). intro; assert (H16 := H12 x0 H15); elim H16; clear H16; intros; exists x1; simpl in H16; simpl; unfold Db; elim H16; clear H16; intros; split; [ apply H16 | left; apply H17 ]. split. elim H14; intros; assumption. assumption. exists y0; simpl; split. apply H8; unfold disc; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; rewrite Rabs_right. apply Rlt_trans with (b - x). unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_gt_contravar; auto with real. elim H10; intros H15 _; apply Rplus_lt_reg_r with (x - eps); replace (x - eps + (b - x)) with (b - eps); [ replace (x - eps + eps) with x; [ apply H15 | ring ] | ring ]. apply Rge_minus; apply Rle_ge; elim H14; intros _ H15; apply H15. unfold Db; right; reflexivity. unfold family_finite; unfold domain_finite; unfold covering_finite in H12; elim H12; clear H12; intros; unfold family_finite in H13; unfold domain_finite in H13; elim H13; clear H13; intros l H13; exists (cons y0 l); intro; split. intro; simpl in H14; unfold intersection_domain in H14; elim (H13 x0); clear H13; intros; case (Req_dec x0 y0); intro. simpl; left; apply H16. simpl; right; apply H13. simpl; unfold intersection_domain; unfold Db in H14; decompose [and or] H14. split; assumption. elim H16; assumption. intro; simpl in H14; elim H14; intro; simpl; unfold intersection_domain. split. apply (cond_fam f0); rewrite H15; exists m; apply H6. unfold Db; right; assumption. simpl; unfold intersection_domain; elim (H13 x0). intros _ H16; assert (H17 := H16 H15); simpl in H17; unfold intersection_domain in H17; split. elim H17; intros; assumption. unfold Db; left; elim H17; intros; assumption. set (m' := Rmin (m + eps / 2) b); cut (A m'). intro; elim H3; intros; unfold is_upper_bound in H13; assert (H15 := H13 m' H12); cut (m < m'). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H15 H16)). unfold m'; unfold Rmin; case (Rle_dec (m + eps / 2) b); intro. pattern m at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; prove_sup0 ]. elim H4; intros. elim H17; intro. assumption. elim H11; assumption. unfold A; split. split. apply Rle_trans with m. elim H4; intros; assumption. unfold m'; unfold Rmin; case (Rle_dec (m + eps / 2) b); intro. pattern m at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; prove_sup0 ]. elim H4; intros. elim H13; intro. assumption. elim H11; assumption. unfold m'; apply Rmin_r. unfold A in H9; elim H9; clear H9; intros; elim H12; clear H12; intros Dx H12; set (Db := fun x:R => Dx x \/ x = y0); exists Db; unfold covering_finite; split. unfold covering; unfold covering_finite in H12; elim H12; clear H12; intros; unfold covering in H12; case (Rle_dec x0 x); intro. cut (a <= x0 <= x). intro; assert (H16 := H12 x0 H15); elim H16; clear H16; intros; exists x1; simpl in H16; simpl; unfold Db. elim H16; clear H16; intros; split; [ apply H16 | left; apply H17 ]. elim H14; intros; split; assumption. exists y0; simpl; split. apply H8; unfold disc; unfold Rabs; case (Rcase_abs (x0 - m)); intro. rewrite Ropp_minus_distr; apply Rlt_trans with (m - x). unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_gt_contravar; auto with real. apply Rplus_lt_reg_r with (x - eps); replace (x - eps + (m - x)) with (m - eps). replace (x - eps + eps) with x. elim H10; intros; assumption. ring. ring. apply Rle_lt_trans with (m' - m). unfold Rminus; do 2 rewrite <- (Rplus_comm (- m)); apply Rplus_le_compat_l; elim H14; intros; assumption. apply Rplus_lt_reg_r with m; replace (m + (m' - m)) with m'. apply Rle_lt_trans with (m + eps / 2). unfold m'; apply Rmin_l. apply Rplus_lt_compat_l; apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; pattern (pos eps) at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; apply (cond_pos eps). discrR. ring. unfold Db; right; reflexivity. unfold family_finite; unfold domain_finite; unfold covering_finite in H12; elim H12; clear H12; intros; unfold family_finite in H13; unfold domain_finite in H13; elim H13; clear H13; intros l H13; exists (cons y0 l); intro; split. intro; simpl in H14; unfold intersection_domain in H14; elim (H13 x0); clear H13; intros; case (Req_dec x0 y0); intro. simpl; left; apply H16. simpl; right; apply H13; simpl; unfold intersection_domain; unfold Db in H14; decompose [and or] H14. split; assumption. elim H16; assumption. intro; simpl in H14; elim H14; intro; simpl; unfold intersection_domain. split. apply (cond_fam f0); rewrite H15; exists m; apply H6. unfold Db; right; assumption. elim (H13 x0); intros _ H16. assert (H17 := H16 H15). simpl in H17. unfold intersection_domain in H17. split. elim H17; intros; assumption. unfold Db; left; elim H17; intros; assumption. elim (classic (exists x : R, A x /\ m - eps < x <= m)); intro. assumption. elim H3; intros; cut (is_upper_bound A (m - eps)). intro; assert (H13 := H11 _ H12); cut (m - eps < m). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H13 H14)). pattern m at 2; rewrite <- Rplus_0_r; unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_cancel; rewrite Ropp_involutive; rewrite Ropp_0; apply (cond_pos eps). set (P := fun n:R => A n /\ m - eps < n <= m); assert (H12 := not_ex_all_not _ P H9); unfold P in H12; unfold is_upper_bound; intros; assert (H14 := not_and_or _ _ (H12 x)); elim H14; intro. elim H15; apply H13. elim (not_and_or _ _ H15); intro. case (Rle_dec x (m - eps)); intro. assumption. elim H16; auto with real. unfold is_upper_bound in H10; assert (H17 := H10 x H13); elim H16; apply H17. elim H3; clear H3; intros. unfold is_upper_bound in H3. split. apply (H3 _ H0). apply (H4 b); unfold is_upper_bound; intros; unfold A in H5; elim H5; clear H5; intros H5 _; elim H5; clear H5; intros _ H5; apply H5. exists a; apply H0. unfold bound; exists b; unfold is_upper_bound; intros; unfold A in H1; elim H1; clear H1; intros H1 _; elim H1; clear H1; intros _ H1; apply H1. unfold A; split. split; [ right; reflexivity | apply r ]. unfold covering_open_set in H; elim H; clear H; intros; unfold covering in H; cut (a <= a <= b). intro; elim (H _ H1); intros y0 H2; set (D' := fun x:R => x = y0); exists D'; unfold covering_finite; split. unfold covering; simpl; intros; cut (x = a). intro; exists y0; split. rewrite H4; apply H2. unfold D'; reflexivity. elim H3; intros; apply Rle_antisym; assumption. unfold family_finite; unfold domain_finite; exists (cons y0 nil); intro; split. simpl; unfold intersection_domain; intro; elim H3; clear H3; intros; unfold D' in H4; left; apply H4. simpl; unfold intersection_domain; intro; elim H3; intro. split; [ rewrite H4; apply (cond_fam f0); exists a; apply H2 | apply H4 ]. elim H4. split; [ right; reflexivity | apply r ]. apply compact_eqDom with (fun c:R => False). apply compact_EMP. unfold eq_Dom; split. unfold included; intros; elim H. unfold included; intros; elim H; clear H; intros; assert (H1 := Rle_trans _ _ _ H H0); elim n; apply H1. Qed. Lemma compact_P4 : forall X F:R -> Prop, compact X -> closed_set F -> included F X -> compact F. Proof. unfold compact; intros; elim (classic (exists z : R, F z)); intro Hyp_F_NE. set (D := ind f0); set (g := f f0); unfold closed_set in H0. set (g' := fun x y:R => f0 x y \/ complementary F y /\ D x). set (D' := D). cut (forall x:R, (exists y : R, g' x y) -> D' x). intro; set (f' := mkfamily D' g' H3); cut (covering_open_set X f'). intro; elim (H _ H4); intros DX H5; exists DX. unfold covering_finite; unfold covering_finite in H5; elim H5; clear H5; intros. split. unfold covering; unfold covering in H5; intros. elim (H5 _ (H1 _ H7)); intros y0 H8; exists y0; simpl in H8; simpl; elim H8; clear H8; intros. split. unfold g' in H8; elim H8; intro. apply H10. elim H10; intros H11 _; unfold complementary in H11; elim H11; apply H7. apply H9. unfold family_finite; unfold domain_finite; unfold family_finite in H6; unfold domain_finite in H6; elim H6; clear H6; intros l H6; exists l; intro; assert (H7 := H6 x); elim H7; clear H7; intros. split. intro; apply H7; simpl; unfold intersection_domain; simpl in H9; unfold intersection_domain in H9; unfold D'; apply H9. intro; assert (H10 := H8 H9); simpl in H10; unfold intersection_domain in H10; simpl; unfold intersection_domain; unfold D' in H10; apply H10. unfold covering_open_set; unfold covering_open_set in H2; elim H2; clear H2; intros. split. unfold covering; unfold covering in H2; intros. elim (classic (F x)); intro. elim (H2 _ H6); intros y0 H7; exists y0; simpl; unfold g'; left; assumption. cut (exists z : R, D z). intro; elim H7; clear H7; intros x0 H7; exists x0; simpl; unfold g'; right. split. unfold complementary; apply H6. apply H7. elim Hyp_F_NE; intros z0 H7. assert (H8 := H2 _ H7). elim H8; clear H8; intros t H8; exists t; apply (cond_fam f0); exists z0; apply H8. unfold family_open_set; intro; simpl; unfold g'; elim (classic (D x)); intro. apply open_set_P6 with (union_domain (f0 x) (complementary F)). apply open_set_P2. unfold family_open_set in H4; apply H4. apply H0. unfold eq_Dom; split. unfold included, union_domain, complementary; intros. elim H6; intro; [ left; apply H7 | right; split; assumption ]. unfold included, union_domain, complementary; intros. elim H6; intro; [ left; apply H7 | right; elim H7; intros; apply H8 ]. apply open_set_P6 with (f0 x). unfold family_open_set in H4; apply H4. unfold eq_Dom; split. unfold included, complementary; intros; left; apply H6. unfold included, complementary; intros. elim H6; intro. apply H7. elim H7; intros _ H8; elim H5; apply H8. intros; elim H3; intros y0 H4; unfold g' in H4; elim H4; intro. apply (cond_fam f0); exists y0; apply H5. elim H5; clear H5; intros _ H5; apply H5. (* Cas ou F est l'ensemble vide *) cut (compact F). intro; apply (H3 f0 H2). apply compact_eqDom with (fun _:R => False). apply compact_EMP. unfold eq_Dom; split. unfold included; intros; elim H3. assert (H3 := not_ex_all_not _ _ Hyp_F_NE); unfold included; intros; elim (H3 x); apply H4. Qed. (**********) Lemma compact_P5 : forall X:R -> Prop, closed_set X -> bounded X -> compact X. Proof. intros; unfold bounded in H0. elim H0; clear H0; intros m H0. elim H0; clear H0; intros M H0. assert (H1 := compact_P3 m M). apply (compact_P4 (fun c:R => m <= c <= M) X H1 H H0). Qed. (**********) Lemma compact_carac : forall X:R -> Prop, compact X <-> closed_set X /\ bounded X. Proof. intro; split. intro; split; [ apply (compact_P2 _ H) | apply (compact_P1 _ H) ]. intro; elim H; clear H; intros; apply (compact_P5 _ H H0). Qed. Definition image_dir (f:R -> R) (D:R -> Prop) (x:R) : Prop := exists y : R, x = f y /\ D y. (**********) Lemma continuity_compact : forall (f:R -> R) (X:R -> Prop), (forall x:R, continuity_pt f x) -> compact X -> compact (image_dir f X). Proof. unfold compact; intros; unfold covering_open_set in H1. elim H1; clear H1; intros. set (D := ind f1). set (g := fun x y:R => image_rec f0 (f1 x) y). cut (forall x:R, (exists y : R, g x y) -> D x). intro; set (f' := mkfamily D g H3). cut (covering_open_set X f'). intro; elim (H0 f' H4); intros D' H5; exists D'. unfold covering_finite in H5; elim H5; clear H5; intros; unfold covering_finite; split. unfold covering, image_dir; simpl; unfold covering in H5; intros; elim H7; intros y H8; elim H8; intros; assert (H11 := H5 _ H10); simpl in H11; elim H11; intros z H12; exists z; unfold g in H12; unfold image_rec in H12; rewrite H9; apply H12. unfold family_finite in H6; unfold domain_finite in H6; unfold family_finite; unfold domain_finite; elim H6; intros l H7; exists l; intro; elim (H7 x); intros; split; intro. apply H8; simpl in H10; simpl; apply H10. apply (H9 H10). unfold covering_open_set; split. unfold covering; intros; simpl; unfold covering in H1; unfold image_dir in H1; unfold g; unfold image_rec; apply H1. exists x; split; [ reflexivity | apply H4 ]. unfold family_open_set; unfold family_open_set in H2; intro; simpl; unfold g; cut ((fun y:R => image_rec f0 (f1 x) y) = image_rec f0 (f1 x)). intro; rewrite H4. apply (continuity_P2 f0 (f1 x) H (H2 x)). reflexivity. intros; apply (cond_fam f1); unfold g in H3; unfold image_rec in H3; elim H3; intros; exists (f0 x0); apply H4. Qed. Lemma Rlt_Rminus : forall a b:R, a < b -> 0 < b - a. Proof. intros; apply Rplus_lt_reg_r with a; rewrite Rplus_0_r; replace (a + (b - a)) with b; [ assumption | ring ]. Qed. Lemma prolongement_C0 : forall (f:R -> R) (a b:R), a <= b -> (forall c:R, a <= c <= b -> continuity_pt f c) -> exists g : R -> R, continuity g /\ (forall c:R, a <= c <= b -> g c = f c). Proof. intros; elim H; intro. set (h := fun x:R => match Rle_dec x a with | left _ => f0 a | right _ => match Rle_dec x b with | left _ => f0 x | right _ => f0 b end end). assert (H2 : 0 < b - a). apply Rlt_Rminus; assumption. exists h; split. unfold continuity; intro; case (Rtotal_order x a); intro. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; exists (a - x); split. change (0 < a - x); apply Rlt_Rminus; assumption. intros; elim H5; clear H5; intros _ H5; unfold h. case (Rle_dec x a); intro. case (Rle_dec x0 a); intro. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. elim n; left; apply Rplus_lt_reg_r with (- x); do 2 rewrite (Rplus_comm (- x)); apply Rle_lt_trans with (Rabs (x0 - x)). apply RRle_abs. assumption. elim n; left; assumption. elim H3; intro. assert (H5 : a <= a <= b). split; [ right; reflexivity | left; assumption ]. assert (H6 := H0 _ H5); unfold continuity_pt in H6; unfold continue_in in H6; unfold limit1_in in H6; unfold limit_in in H6; simpl in H6; unfold R_dist in H6; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; elim (H6 _ H7); intros; exists (Rmin x0 (b - a)); split. unfold Rmin; case (Rle_dec x0 (b - a)); intro. elim H8; intros; assumption. change (0 < b - a); apply Rlt_Rminus; assumption. intros; elim H9; clear H9; intros _ H9; cut (x1 < b). intro; unfold h; case (Rle_dec x a); intro. case (Rle_dec x1 a); intro. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. case (Rle_dec x1 b); intro. elim H8; intros; apply H12; split. unfold D_x, no_cond; split. trivial. red; intro; elim n; right; symmetry ; assumption. apply Rlt_le_trans with (Rmin x0 (b - a)). rewrite H4 in H9; apply H9. apply Rmin_l. elim n0; left; assumption. elim n; right; assumption. apply Rplus_lt_reg_r with (- a); do 2 rewrite (Rplus_comm (- a)); rewrite H4 in H9; apply Rle_lt_trans with (Rabs (x1 - a)). apply RRle_abs. apply Rlt_le_trans with (Rmin x0 (b - a)). assumption. apply Rmin_r. case (Rtotal_order x b); intro. assert (H6 : a <= x <= b). split; left; assumption. assert (H7 := H0 _ H6); unfold continuity_pt in H7; unfold continue_in in H7; unfold limit1_in in H7; unfold limit_in in H7; simpl in H7; unfold R_dist in H7; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; elim (H7 _ H8); intros; elim H9; clear H9; intros. assert (H11 : 0 < x - a). apply Rlt_Rminus; assumption. assert (H12 : 0 < b - x). apply Rlt_Rminus; assumption. exists (Rmin x0 (Rmin (x - a) (b - x))); split. unfold Rmin; case (Rle_dec (x - a) (b - x)); intro. case (Rle_dec x0 (x - a)); intro. assumption. assumption. case (Rle_dec x0 (b - x)); intro. assumption. assumption. intros; elim H13; clear H13; intros; cut (a < x1 < b). intro; elim H15; clear H15; intros; unfold h; case (Rle_dec x a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H4)). case (Rle_dec x b); intro. case (Rle_dec x1 a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 H15)). case (Rle_dec x1 b); intro. apply H10; split. assumption. apply Rlt_le_trans with (Rmin x0 (Rmin (x - a) (b - x))). assumption. apply Rmin_l. elim n1; left; assumption. elim n0; left; assumption. split. apply Ropp_lt_cancel; apply Rplus_lt_reg_r with x; apply Rle_lt_trans with (Rabs (x1 - x)). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply RRle_abs. apply Rlt_le_trans with (Rmin x0 (Rmin (x - a) (b - x))). assumption. apply Rle_trans with (Rmin (x - a) (b - x)). apply Rmin_r. apply Rmin_l. apply Rplus_lt_reg_r with (- x); do 2 rewrite (Rplus_comm (- x)); apply Rle_lt_trans with (Rabs (x1 - x)). apply RRle_abs. apply Rlt_le_trans with (Rmin x0 (Rmin (x - a) (b - x))). assumption. apply Rle_trans with (Rmin (x - a) (b - x)); apply Rmin_r. elim H5; intro. assert (H7 : a <= b <= b). split; [ left; assumption | right; reflexivity ]. assert (H8 := H0 _ H7); unfold continuity_pt in H8; unfold continue_in in H8; unfold limit1_in in H8; unfold limit_in in H8; simpl in H8; unfold R_dist in H8; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; elim (H8 _ H9); intros; exists (Rmin x0 (b - a)); split. unfold Rmin; case (Rle_dec x0 (b - a)); intro. elim H10; intros; assumption. change (0 < b - a); apply Rlt_Rminus; assumption. intros; elim H11; clear H11; intros _ H11; cut (a < x1). intro; unfold h; case (Rle_dec x a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H4)). case (Rle_dec x1 a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H12)). case (Rle_dec x b); intro. case (Rle_dec x1 b); intro. rewrite H6; elim H10; intros; elim r0; intro. apply H14; split. unfold D_x, no_cond; split. trivial. red; intro; rewrite <- H16 in H15; elim (Rlt_irrefl _ H15). rewrite H6 in H11; apply Rlt_le_trans with (Rmin x0 (b - a)). apply H11. apply Rmin_l. rewrite H15; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. rewrite H6; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. elim n1; right; assumption. rewrite H6 in H11; apply Ropp_lt_cancel; apply Rplus_lt_reg_r with b; apply Rle_lt_trans with (Rabs (x1 - b)). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply RRle_abs. apply Rlt_le_trans with (Rmin x0 (b - a)). assumption. apply Rmin_r. unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; exists (x - b); split. change (0 < x - b); apply Rlt_Rminus; assumption. intros; elim H8; clear H8; intros. assert (H10 : b < x0). apply Ropp_lt_cancel; apply Rplus_lt_reg_r with x; apply Rle_lt_trans with (Rabs (x0 - x)). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply RRle_abs. assumption. unfold h; case (Rle_dec x a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H4)). case (Rle_dec x b); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H6)). case (Rle_dec x0 a); intro. elim (Rlt_irrefl _ (Rlt_trans _ _ _ H1 (Rlt_le_trans _ _ _ H10 r))). case (Rle_dec x0 b); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H10)). unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. intros; elim H3; intros; unfold h; case (Rle_dec c a); intro. elim r; intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H4 H6)). rewrite H6; reflexivity. case (Rle_dec c b); intro. reflexivity. elim n0; assumption. exists (fun _:R => f0 a); split. apply derivable_continuous; apply (derivable_const (f0 a)). intros; elim H2; intros; rewrite H1 in H3; cut (b = c). intro; rewrite <- H5; rewrite H1; reflexivity. apply Rle_antisym; assumption. Qed. (**********) Lemma continuity_ab_maj : forall (f:R -> R) (a b:R), a <= b -> (forall c:R, a <= c <= b -> continuity_pt f c) -> exists Mx : R, (forall c:R, a <= c <= b -> f c <= f Mx) /\ a <= Mx <= b. Proof. intros; cut (exists g : R -> R, continuity g /\ (forall c:R, a <= c <= b -> g c = f0 c)). intro HypProl. elim HypProl; intros g Hcont_eq. elim Hcont_eq; clear Hcont_eq; intros Hcont Heq. assert (H1 := compact_P3 a b). assert (H2 := continuity_compact g (fun c:R => a <= c <= b) Hcont H1). assert (H3 := compact_P2 _ H2). assert (H4 := compact_P1 _ H2). cut (bound (image_dir g (fun c:R => a <= c <= b))). cut (exists x : R, image_dir g (fun c:R => a <= c <= b) x). intros; assert (H7 := completeness _ H6 H5). elim H7; clear H7; intros M H7; cut (image_dir g (fun c:R => a <= c <= b) M). intro; unfold image_dir in H8; elim H8; clear H8; intros Mxx H8; elim H8; clear H8; intros; exists Mxx; split. intros; rewrite <- (Heq c H10); rewrite <- (Heq Mxx H9); intros; rewrite <- H8; unfold is_lub in H7; elim H7; clear H7; intros H7 _; unfold is_upper_bound in H7; apply H7; unfold image_dir; exists c; split; [ reflexivity | apply H10 ]. apply H9. elim (classic (image_dir g (fun c:R => a <= c <= b) M)); intro. assumption. cut (exists eps : posreal, (forall y:R, ~ intersection_domain (disc M eps) (image_dir g (fun c:R => a <= c <= b)) y)). intro; elim H9; clear H9; intros eps H9; unfold is_lub in H7; elim H7; clear H7; intros; cut (is_upper_bound (image_dir g (fun c:R => a <= c <= b)) (M - eps)). intro; assert (H12 := H10 _ H11); cut (M - eps < M). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H12 H13)). pattern M at 2; rewrite <- Rplus_0_r; unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_cancel; rewrite Ropp_0; rewrite Ropp_involutive; apply (cond_pos eps). unfold is_upper_bound, image_dir; intros; cut (x <= M). intro; case (Rle_dec x (M - eps)); intro. apply r. elim (H9 x); unfold intersection_domain, disc, image_dir; split. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; rewrite Rabs_right. apply Rplus_lt_reg_r with (x - eps); replace (x - eps + (M - x)) with (M - eps). replace (x - eps + eps) with x. auto with real. ring. ring. apply Rge_minus; apply Rle_ge; apply H12. apply H11. apply H7; apply H11. cut (exists V : R -> Prop, neighbourhood V M /\ (forall y:R, ~ intersection_domain V (image_dir g (fun c:R => a <= c <= b)) y)). intro; elim H9; intros V H10; elim H10; clear H10; intros. unfold neighbourhood in H10; elim H10; intros del H12; exists del; intros; red; intro; elim (H11 y). unfold intersection_domain; unfold intersection_domain in H13; elim H13; clear H13; intros; split. apply (H12 _ H13). apply H14. cut (~ point_adherent (image_dir g (fun c:R => a <= c <= b)) M). intro; unfold point_adherent in H9. assert (H10 := not_all_ex_not _ (fun V:R -> Prop => neighbourhood V M -> exists y : R, intersection_domain V (image_dir g (fun c:R => a <= c <= b)) y) H9). elim H10; intros V0 H11; exists V0; assert (H12 := imply_to_and _ _ H11); elim H12; clear H12; intros. split. apply H12. apply (not_ex_all_not _ _ H13). red; intro; cut (adherence (image_dir g (fun c:R => a <= c <= b)) M). intro; elim (closed_set_P1 (image_dir g (fun c:R => a <= c <= b))); intros H11 _; assert (H12 := H11 H3). elim H8. unfold eq_Dom in H12; elim H12; clear H12; intros. apply (H13 _ H10). apply H9. exists (g a); unfold image_dir; exists a; split. reflexivity. split; [ right; reflexivity | apply H ]. unfold bound; unfold bounded in H4; elim H4; clear H4; intros m H4; elim H4; clear H4; intros M H4; exists M; unfold is_upper_bound; intros; elim (H4 _ H5); intros _ H6; apply H6. apply prolongement_C0; assumption. Qed. (**********) Lemma continuity_ab_min : forall (f:R -> R) (a b:R), a <= b -> (forall c:R, a <= c <= b -> continuity_pt f c) -> exists mx : R, (forall c:R, a <= c <= b -> f mx <= f c) /\ a <= mx <= b. Proof. intros. cut (forall c:R, a <= c <= b -> continuity_pt (- f0) c). intro; assert (H2 := continuity_ab_maj (- f0)%F a b H H1); elim H2; intros x0 H3; exists x0; intros; split. intros; rewrite <- (Ropp_involutive (f0 x0)); rewrite <- (Ropp_involutive (f0 c)); apply Ropp_le_contravar; elim H3; intros; unfold opp_fct in H5; apply H5; apply H4. elim H3; intros; assumption. intros. assert (H2 := H0 _ H1). apply (continuity_pt_opp _ _ H2). Qed. (********************************************************) (** * Proof of Bolzano-Weierstrass theorem *) (********************************************************) Definition ValAdh (un:nat -> R) (x:R) : Prop := forall (V:R -> Prop) (N:nat), neighbourhood V x -> exists p : nat, (N <= p)%nat /\ V (un p). Definition intersection_family (f:family) (x:R) : Prop := forall y:R, ind f y -> f y x. Lemma ValAdh_un_exists : forall (un:nat -> R) (D:=fun x:R => exists n : nat, x = INR n) (f:= fun x:R => adherence (fun y:R => (exists p : nat, y = un p /\ x <= INR p) /\ D x)) (x:R), (exists y : R, f x y) -> D x. Proof. intros; elim H; intros; unfold f in H0; unfold adherence in H0; unfold point_adherent in H0; assert (H1 : neighbourhood (disc x0 (mkposreal _ Rlt_0_1)) x0). unfold neighbourhood, disc; exists (mkposreal _ Rlt_0_1); unfold included; trivial. elim (H0 _ H1); intros; unfold intersection_domain in H2; elim H2; intros; elim H4; intros; apply H6. Qed. Definition ValAdh_un (un:nat -> R) : R -> Prop := let D := fun x:R => exists n : nat, x = INR n in let f := fun x:R => adherence (fun y:R => (exists p : nat, y = un p /\ x <= INR p) /\ D x) in intersection_family (mkfamily D f (ValAdh_un_exists un)). Lemma ValAdh_un_prop : forall (un:nat -> R) (x:R), ValAdh un x <-> ValAdh_un un x. Proof. intros; split; intro. unfold ValAdh in H; unfold ValAdh_un; unfold intersection_family; simpl; intros; elim H0; intros N H1; unfold adherence; unfold point_adherent; intros; elim (H V N H2); intros; exists (un x0); unfold intersection_domain; elim H3; clear H3; intros; split. assumption. split. exists x0; split; [ reflexivity | rewrite H1; apply (le_INR _ _ H3) ]. exists N; assumption. unfold ValAdh; intros; unfold ValAdh_un in H; unfold intersection_family in H; simpl in H; assert (H1 : adherence (fun y0:R => (exists p : nat, y0 = un p /\ INR N <= INR p) /\ (exists n : nat, INR N = INR n)) x). apply H; exists N; reflexivity. unfold adherence in H1; unfold point_adherent in H1; assert (H2 := H1 _ H0); elim H2; intros; unfold intersection_domain in H3; elim H3; clear H3; intros; elim H4; clear H4; intros; elim H4; clear H4; intros; elim H4; clear H4; intros; exists x1; split. apply (INR_le _ _ H6). rewrite H4 in H3; apply H3. Qed. Lemma adherence_P4 : forall F G:R -> Prop, included F G -> included (adherence F) (adherence G). Proof. unfold adherence, included; unfold point_adherent; intros; elim (H0 _ H1); unfold intersection_domain; intros; elim H2; clear H2; intros; exists x0; split; [ assumption | apply (H _ H3) ]. Qed. Definition family_closed_set (f:family) : Prop := forall x:R, closed_set (f x). Definition intersection_vide_in (D:R -> Prop) (f:family) : Prop := forall x:R, (ind f x -> included (f x) D) /\ ~ (exists y : R, intersection_family f y). Definition intersection_vide_finite_in (D:R -> Prop) (f:family) : Prop := intersection_vide_in D f /\ family_finite f. (**********) Lemma compact_P6 : forall X:R -> Prop, compact X -> (exists z : R, X z) -> forall g:family, family_closed_set g -> intersection_vide_in X g -> exists D : R -> Prop, intersection_vide_finite_in X (subfamily g D). Proof. intros X H Hyp g H0 H1. set (D' := ind g). set (f' := fun x y:R => complementary (g x) y /\ D' x). assert (H2 : forall x:R, (exists y : R, f' x y) -> D' x). intros; elim H2; intros; unfold f' in H3; elim H3; intros; assumption. set (f0 := mkfamily D' f' H2). unfold compact in H; assert (H3 : covering_open_set X f0). unfold covering_open_set; split. unfold covering; intros; unfold intersection_vide_in in H1; elim (H1 x); intros; unfold intersection_family in H5; assert (H6 := not_ex_all_not _ (fun y:R => forall y0:R, ind g y0 -> g y0 y) H5 x); assert (H7 := not_all_ex_not _ (fun y0:R => ind g y0 -> g y0 x) H6); elim H7; intros; exists x0; elim (imply_to_and _ _ H8); intros; unfold f0; simpl; unfold f'; split; [ apply H10 | apply H9 ]. unfold family_open_set; intro; elim (classic (D' x)); intro. apply open_set_P6 with (complementary (g x)). unfold family_closed_set in H0; unfold closed_set in H0; apply H0. unfold f0; simpl; unfold f'; unfold eq_Dom; split. unfold included; intros; split; [ apply H4 | apply H3 ]. unfold included; intros; elim H4; intros; assumption. apply open_set_P6 with (fun _:R => False). apply open_set_P4. unfold eq_Dom; unfold included; split; intros; [ elim H4 | simpl in H4; unfold f' in H4; elim H4; intros; elim H3; assumption ]. elim (H _ H3); intros SF H4; exists SF; unfold intersection_vide_finite_in; split. unfold intersection_vide_in; simpl; intros; split. intros; unfold included; intros; unfold intersection_vide_in in H1; elim (H1 x); intros; elim H6; intros; apply H7. unfold intersection_domain in H5; elim H5; intros; assumption. assumption. elim (classic (exists y : R, intersection_domain (ind g) SF y)); intro Hyp'. red; intro; elim H5; intros; unfold intersection_family in H6; simpl in H6. cut (X x0). intro; unfold covering_finite in H4; elim H4; clear H4; intros H4 _; unfold covering in H4; elim (H4 x0 H7); intros; simpl in H8; unfold intersection_domain in H6; cut (ind g x1 /\ SF x1). intro; assert (H10 := H6 x1 H9); elim H10; clear H10; intros H10 _; elim H8; clear H8; intros H8 _; unfold f' in H8; unfold complementary in H8; elim H8; clear H8; intros H8 _; elim H8; assumption. split. apply (cond_fam f0). exists x0; elim H8; intros; assumption. elim H8; intros; assumption. unfold intersection_vide_in in H1; elim Hyp'; intros; assert (H8 := H6 _ H7); elim H8; intros; cut (ind g x1). intro; elim (H1 x1); intros; apply H12. apply H11. apply H9. apply (cond_fam g); exists x0; assumption. unfold covering_finite in H4; elim H4; clear H4; intros H4 _; cut (exists z : R, X z). intro; elim H5; clear H5; intros; unfold covering in H4; elim (H4 x0 H5); intros; simpl in H6; elim Hyp'; exists x1; elim H6; intros; unfold intersection_domain; split. apply (cond_fam f0); exists x0; apply H7. apply H8. apply Hyp. unfold covering_finite in H4; elim H4; clear H4; intros; unfold family_finite in H5; unfold domain_finite in H5; unfold family_finite; unfold domain_finite; elim H5; clear H5; intros l H5; exists l; intro; elim (H5 x); intros; split; intro; [ apply H6; simpl; simpl in H8; apply H8 | apply (H7 H8) ]. Qed. Theorem Bolzano_Weierstrass : forall (un:nat -> R) (X:R -> Prop), compact X -> (forall n:nat, X (un n)) -> exists l : R, ValAdh un l. Proof. intros; cut (exists l : R, ValAdh_un un l). intro; elim H1; intros; exists x; elim (ValAdh_un_prop un x); intros; apply (H4 H2). assert (H1 : exists z : R, X z). exists (un 0%nat); apply H0. set (D := fun x:R => exists n : nat, x = INR n). set (g := fun x:R => adherence (fun y:R => (exists p : nat, y = un p /\ x <= INR p) /\ D x)). assert (H2 : forall x:R, (exists y : R, g x y) -> D x). intros; elim H2; intros; unfold g in H3; unfold adherence in H3; unfold point_adherent in H3. assert (H4 : neighbourhood (disc x0 (mkposreal _ Rlt_0_1)) x0). unfold neighbourhood; exists (mkposreal _ Rlt_0_1); unfold included; trivial. elim (H3 _ H4); intros; unfold intersection_domain in H5; decompose [and] H5; assumption. set (f0 := mkfamily D g H2). assert (H3 := compact_P6 X H H1 f0). elim (classic (exists l : R, ValAdh_un un l)); intro. assumption. cut (family_closed_set f0). intro; cut (intersection_vide_in X f0). intro; assert (H7 := H3 H5 H6). elim H7; intros SF H8; unfold intersection_vide_finite_in in H8; elim H8; clear H8; intros; unfold intersection_vide_in in H8; elim (H8 0); intros _ H10; elim H10; unfold family_finite in H9; unfold domain_finite in H9; elim H9; clear H9; intros l H9; set (r := MaxRlist l); cut (D r). intro; unfold D in H11; elim H11; intros; exists (un x); unfold intersection_family; simpl; unfold intersection_domain; intros; split. unfold g; apply adherence_P1; split. exists x; split; [ reflexivity | rewrite <- H12; unfold r; apply MaxRlist_P1; elim (H9 y); intros; apply H14; simpl; apply H13 ]. elim H13; intros; assumption. elim H13; intros; assumption. elim (H9 r); intros. simpl in H12; unfold intersection_domain in H12; cut (In r l). intro; elim (H12 H13); intros; assumption. unfold r; apply MaxRlist_P2; cut (exists z : R, intersection_domain (ind f0) SF z). intro; elim H13; intros; elim (H9 x); intros; simpl in H15; assert (H17 := H15 H14); exists x; apply H17. elim (classic (exists z : R, intersection_domain (ind f0) SF z)); intro. assumption. elim (H8 0); intros _ H14; elim H1; intros; assert (H16 := not_ex_all_not _ (fun y:R => intersection_family (subfamily f0 SF) y) H14); assert (H17 := not_ex_all_not _ (fun z:R => intersection_domain (ind f0) SF z) H13); assert (H18 := H16 x); unfold intersection_family in H18; simpl in H18; assert (H19 := not_all_ex_not _ (fun y:R => intersection_domain D SF y -> g y x /\ SF y) H18); elim H19; intros; assert (H21 := imply_to_and _ _ H20); elim (H17 x0); elim H21; intros; assumption. unfold intersection_vide_in; intros; split. intro; simpl in H6; unfold f0; simpl; unfold g; apply included_trans with (adherence X). apply adherence_P4. unfold included; intros; elim H7; intros; elim H8; intros; elim H10; intros; rewrite H11; apply H0. apply adherence_P2; apply compact_P2; assumption. apply H4. unfold family_closed_set; unfold f0; simpl; unfold g; intro; apply adherence_P3. Qed. (********************************************************) (** * Proof of Heine's theorem *) (********************************************************) Definition uniform_continuity (f:R -> R) (X:R -> Prop) : Prop := forall eps:posreal, exists delta : posreal, (forall x y:R, X x -> X y -> Rabs (x - y) < delta -> Rabs (f x - f y) < eps). Lemma is_lub_u : forall (E:R -> Prop) (x y:R), is_lub E x -> is_lub E y -> x = y. Proof. unfold is_lub; intros; elim H; elim H0; intros; apply Rle_antisym; [ apply (H4 _ H1) | apply (H2 _ H3) ]. Qed. Lemma domain_P1 : forall X:R -> Prop, ~ (exists y : R, X y) \/ (exists y : R, X y /\ (forall x:R, X x -> x = y)) \/ (exists x : R, (exists y : R, X x /\ X y /\ x <> y)). Proof. intro; elim (classic (exists y : R, X y)); intro. right; elim H; intros; elim (classic (exists y : R, X y /\ y <> x)); intro. right; elim H1; intros; elim H2; intros; exists x; exists x0; intros. split; [ assumption | split; [ assumption | apply (not_eq_sym (A:=R)); assumption ] ]. left; exists x; split. assumption. intros; case (Req_dec x0 x); intro. assumption. elim H1; exists x0; split; assumption. left; assumption. Qed. Theorem Heine : forall (f:R -> R) (X:R -> Prop), compact X -> (forall x:R, X x -> continuity_pt f x) -> uniform_continuity f X. Proof. intros f0 X H0 H; elim (domain_P1 X); intro Hyp. (* X is empty *) unfold uniform_continuity; intros; exists (mkposreal _ Rlt_0_1); intros; elim Hyp; exists x; assumption. elim Hyp; clear Hyp; intro Hyp. (* X has only one element *) unfold uniform_continuity; intros; exists (mkposreal _ Rlt_0_1); intros; elim Hyp; clear Hyp; intros; elim H4; clear H4; intros; assert (H6 := H5 _ H1); assert (H7 := H5 _ H2); rewrite H6; rewrite H7; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (cond_pos eps). (* X has at least two distinct elements *) assert (X_enc : exists m : R, (exists M : R, (forall x:R, X x -> m <= x <= M) /\ m < M)). assert (H1 := compact_P1 X H0); unfold bounded in H1; elim H1; intros; elim H2; intros; exists x; exists x0; split. apply H3. elim Hyp; intros; elim H4; intros; decompose [and] H5; assert (H10 := H3 _ H6); assert (H11 := H3 _ H8); elim H10; intros; elim H11; intros; case (total_order_T x x0); intro. elim s; intro. assumption. rewrite b in H13; rewrite b in H7; elim H9; apply Rle_antisym; apply Rle_trans with x0; assumption. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ (Rle_trans _ _ _ H13 H14) r)). elim X_enc; clear X_enc; intros m X_enc; elim X_enc; clear X_enc; intros M X_enc; elim X_enc; clear X_enc Hyp; intros X_enc Hyp; unfold uniform_continuity; intro; assert (H1 : forall t:posreal, 0 < t / 2). intro; unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos t) | apply Rinv_0_lt_compat; prove_sup0 ]. set (g := fun x y:R => X x /\ (exists del : posreal, (forall z:R, Rabs (z - x) < del -> Rabs (f0 z - f0 x) < eps / 2) /\ is_lub (fun zeta:R => 0 < zeta <= M - m /\ (forall z:R, Rabs (z - x) < zeta -> Rabs (f0 z - f0 x) < eps / 2)) del /\ disc x (mkposreal (del / 2) (H1 del)) y)). assert (H2 : forall x:R, (exists y : R, g x y) -> X x). intros; elim H2; intros; unfold g in H3; elim H3; clear H3; intros H3 _; apply H3. set (f' := mkfamily X g H2); unfold compact in H0; assert (H3 : covering_open_set X f'). unfold covering_open_set; split. unfold covering; intros; exists x; simpl; unfold g; split. assumption. assert (H4 := H _ H3); unfold continuity_pt in H4; unfold continue_in in H4; unfold limit1_in in H4; unfold limit_in in H4; simpl in H4; unfold R_dist in H4; elim (H4 (eps / 2) (H1 eps)); intros; set (E := fun zeta:R => 0 < zeta <= M - m /\ (forall z:R, Rabs (z - x) < zeta -> Rabs (f0 z - f0 x) < eps / 2)); assert (H6 : bound E). unfold bound; exists (M - m); unfold is_upper_bound; unfold E; intros; elim H6; clear H6; intros H6 _; elim H6; clear H6; intros _ H6; apply H6. assert (H7 : exists x : R, E x). elim H5; clear H5; intros; exists (Rmin x0 (M - m)); unfold E; intros; split. split. unfold Rmin; case (Rle_dec x0 (M - m)); intro. apply H5. apply Rlt_Rminus; apply Hyp. apply Rmin_r. intros; case (Req_dec x z); intro. rewrite H9; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (H1 eps). apply H7; split. unfold D_x, no_cond; split; [ trivial | assumption ]. apply Rlt_le_trans with (Rmin x0 (M - m)); [ apply H8 | apply Rmin_l ]. assert (H8 := completeness _ H6 H7); elim H8; clear H8; intros; cut (0 < x1 <= M - m). intro; elim H8; clear H8; intros; exists (mkposreal _ H8); split. intros; cut (exists alp : R, Rabs (z - x) < alp <= x1 /\ E alp). intros; elim H11; intros; elim H12; clear H12; intros; unfold E in H13; elim H13; intros; apply H15. elim H12; intros; assumption. elim (classic (exists alp : R, Rabs (z - x) < alp <= x1 /\ E alp)); intro. assumption. assert (H12 := not_ex_all_not _ (fun alp:R => Rabs (z - x) < alp <= x1 /\ E alp) H11); unfold is_lub in p; elim p; intros; cut (is_upper_bound E (Rabs (z - x))). intro; assert (H16 := H14 _ H15); elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H10 H16)). unfold is_upper_bound; intros; unfold is_upper_bound in H13; assert (H16 := H13 _ H15); case (Rle_dec x2 (Rabs (z - x))); intro. assumption. elim (H12 x2); split; [ split; [ auto with real | assumption ] | assumption ]. split. apply p. unfold disc; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; simpl; unfold Rdiv; apply Rmult_lt_0_compat; [ apply H8 | apply Rinv_0_lt_compat; prove_sup0 ]. elim H7; intros; unfold E in H8; elim H8; intros H9 _; elim H9; intros H10 _; unfold is_lub in p; elim p; intros; unfold is_upper_bound in H12; unfold is_upper_bound in H11; split. apply Rlt_le_trans with x2; [ assumption | apply (H11 _ H8) ]. apply H12; intros; unfold E in H13; elim H13; intros; elim H14; intros; assumption. unfold family_open_set; intro; simpl; elim (classic (X x)); intro. unfold g; unfold open_set; intros; elim H4; clear H4; intros _ H4; elim H4; clear H4; intros; elim H4; clear H4; intros; unfold neighbourhood; case (Req_dec x x0); intro. exists (mkposreal _ (H1 x1)); rewrite <- H6; unfold included; intros; split. assumption. exists x1; split. apply H4. split. elim H5; intros; apply H8. apply H7. set (d := x1 / 2 - Rabs (x0 - x)); assert (H7 : 0 < d). unfold d; apply Rlt_Rminus; elim H5; clear H5; intros; unfold disc in H7; apply H7. exists (mkposreal _ H7); unfold included; intros; split. assumption. exists x1; split. apply H4. elim H5; intros; split. assumption. unfold disc in H8; simpl in H8; unfold disc; simpl; unfold disc in H10; simpl in H10; apply Rle_lt_trans with (Rabs (x2 - x0) + Rabs (x0 - x)). replace (x2 - x) with (x2 - x0 + (x0 - x)); [ apply Rabs_triang | ring ]. replace (x1 / 2) with (d + Rabs (x0 - x)); [ idtac | unfold d; ring ]. do 2 rewrite <- (Rplus_comm (Rabs (x0 - x))); apply Rplus_lt_compat_l; apply H8. apply open_set_P6 with (fun _:R => False). apply open_set_P4. unfold eq_Dom; unfold included; intros; split. intros; elim H4. intros; unfold g in H4; elim H4; clear H4; intros H4 _; elim H3; apply H4. elim (H0 _ H3); intros DF H4; unfold covering_finite in H4; elim H4; clear H4; intros; unfold family_finite in H5; unfold domain_finite in H5; unfold covering in H4; simpl in H4; simpl in H5; elim H5; clear H5; intros l H5; unfold intersection_domain in H5; cut (forall x:R, In x l -> exists del : R, 0 < del /\ (forall z:R, Rabs (z - x) < del -> Rabs (f0 z - f0 x) < eps / 2) /\ included (g x) (fun z:R => Rabs (z - x) < del / 2)). intros; assert (H7 := Rlist_P1 l (fun x del:R => 0 < del /\ (forall z:R, Rabs (z - x) < del -> Rabs (f0 z - f0 x) < eps / 2) /\ included (g x) (fun z:R => Rabs (z - x) < del / 2)) H6); elim H7; clear H7; intros l' H7; elim H7; clear H7; intros; set (D := MinRlist l'); cut (0 < D / 2). intro; exists (mkposreal _ H9); intros; assert (H13 := H4 _ H10); elim H13; clear H13; intros xi H13; assert (H14 : In xi l). unfold g in H13; decompose [and] H13; elim (H5 xi); intros; apply H14; split; assumption. elim (pos_Rl_P2 l xi); intros H15 _; elim (H15 H14); intros i H16; elim H16; intros; apply Rle_lt_trans with (Rabs (f0 x - f0 xi) + Rabs (f0 xi - f0 y)). replace (f0 x - f0 y) with (f0 x - f0 xi + (f0 xi - f0 y)); [ apply Rabs_triang | ring ]. rewrite (double_var eps); apply Rplus_lt_compat. assert (H19 := H8 i H17); elim H19; clear H19; intros; rewrite <- H18 in H20; elim H20; clear H20; intros; apply H20; unfold included in H21; apply Rlt_trans with (pos_Rl l' i / 2). apply H21. elim H13; clear H13; intros; assumption. unfold Rdiv; apply Rmult_lt_reg_l with 2. prove_sup0. rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; pattern (pos_Rl l' i) at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; apply H19. discrR. assert (H19 := H8 i H17); elim H19; clear H19; intros; rewrite <- H18 in H20; elim H20; clear H20; intros; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H20; unfold included in H21; elim H13; intros; assert (H24 := H21 x H22); apply Rle_lt_trans with (Rabs (y - x) + Rabs (x - xi)). replace (y - xi) with (y - x + (x - xi)); [ apply Rabs_triang | ring ]. rewrite (double_var (pos_Rl l' i)); apply Rplus_lt_compat. apply Rlt_le_trans with (D / 2). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H12. unfold Rdiv; do 2 rewrite <- (Rmult_comm (/ 2)); apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; prove_sup0. unfold D; apply MinRlist_P1; elim (pos_Rl_P2 l' (pos_Rl l' i)); intros; apply H26; exists i; split; [ rewrite <- H7; assumption | reflexivity ]. assumption. unfold Rdiv; apply Rmult_lt_0_compat; [ unfold D; apply MinRlist_P2; intros; elim (pos_Rl_P2 l' y); intros; elim (H10 H9); intros; elim H12; intros; rewrite H14; rewrite <- H7 in H13; elim (H8 x H13); intros; apply H15 | apply Rinv_0_lt_compat; prove_sup0 ]. intros; elim (H5 x); intros; elim (H8 H6); intros; set (E := fun zeta:R => 0 < zeta <= M - m /\ (forall z:R, Rabs (z - x) < zeta -> Rabs (f0 z - f0 x) < eps / 2)); assert (H11 : bound E). unfold bound; exists (M - m); unfold is_upper_bound; unfold E; intros; elim H11; clear H11; intros H11 _; elim H11; clear H11; intros _ H11; apply H11. assert (H12 : exists x : R, E x). assert (H13 := H _ H9); unfold continuity_pt in H13; unfold continue_in in H13; unfold limit1_in in H13; unfold limit_in in H13; simpl in H13; unfold R_dist in H13; elim (H13 _ (H1 eps)); intros; elim H12; clear H12; intros; exists (Rmin x0 (M - m)); unfold E; intros; split. split; [ unfold Rmin; case (Rle_dec x0 (M - m)); intro; [ apply H12 | apply Rlt_Rminus; apply Hyp ] | apply Rmin_r ]. intros; case (Req_dec x z); intro. rewrite H16; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (H1 eps). apply H14; split; [ unfold D_x, no_cond; split; [ trivial | assumption ] | apply Rlt_le_trans with (Rmin x0 (M - m)); [ apply H15 | apply Rmin_l ] ]. assert (H13 := completeness _ H11 H12); elim H13; clear H13; intros; cut (0 < x0 <= M - m). intro; elim H13; clear H13; intros; exists x0; split. assumption. split. intros; cut (exists alp : R, Rabs (z - x) < alp <= x0 /\ E alp). intros; elim H16; intros; elim H17; clear H17; intros; unfold E in H18; elim H18; intros; apply H20; elim H17; intros; assumption. elim (classic (exists alp : R, Rabs (z - x) < alp <= x0 /\ E alp)); intro. assumption. assert (H17 := not_ex_all_not _ (fun alp:R => Rabs (z - x) < alp <= x0 /\ E alp) H16); unfold is_lub in p; elim p; intros; cut (is_upper_bound E (Rabs (z - x))). intro; assert (H21 := H19 _ H20); elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H15 H21)). unfold is_upper_bound; intros; unfold is_upper_bound in H18; assert (H21 := H18 _ H20); case (Rle_dec x1 (Rabs (z - x))); intro. assumption. elim (H17 x1); split. split; [ auto with real | assumption ]. assumption. unfold included, g; intros; elim H15; intros; elim H17; intros; decompose [and] H18; cut (x0 = x2). intro; rewrite H20; apply H22. unfold E in p; eapply is_lub_u. apply p. apply H21. elim H12; intros; unfold E in H13; elim H13; intros H14 _; elim H14; intros H15 _; unfold is_lub in p; elim p; intros; unfold is_upper_bound in H16; unfold is_upper_bound in H17; split. apply Rlt_le_trans with x1; [ assumption | apply (H16 _ H13) ]. apply H17; intros; unfold E in H18; elim H18; intros; elim H19; intros; assumption. Qed. coq-8.4pl2/theories/Reals/R_Ifp.v0000640000175000001440000011505112010532755015710 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* IZR z <= r + 1 -> z = up r. Proof. intros; generalize (archimed r); intro; elim H1; intros; clear H1; unfold Rgt in H2; unfold Rminus in H3; generalize (Rplus_le_compat_l r (IZR (up r) + - r) 1 H3); intro; clear H3; rewrite (Rplus_comm (IZR (up r)) (- r)) in H1; rewrite <- (Rplus_assoc r (- r) (IZR (up r))) in H1; rewrite (Rplus_opp_r r) in H1; elim (Rplus_ne (IZR (up r))); intros a b; rewrite b in H1; clear a b; apply (single_z_r_R1 r z (up r)); auto with zarith real. Qed. (**********) Lemma up_tech : forall (r:R) (z:Z), IZR z <= r -> r < IZR (z + 1) -> (z + 1)%Z = up r. Proof. intros; generalize (Rplus_le_compat_l 1 (IZR z) r H); intro; clear H; rewrite (Rplus_comm 1 (IZR z)) in H1; rewrite (Rplus_comm 1 r) in H1; cut (1 = IZR 1); auto with zarith real. intro; generalize H1; pattern 1 at 1; rewrite H; intro; clear H H1; rewrite <- (plus_IZR z 1) in H2; apply (tech_up r (z + 1)); auto with zarith real. Qed. (**********) Lemma fp_R0 : frac_part 0 = 0. Proof. unfold frac_part; unfold Int_part; elim (archimed 0); intros; unfold Rminus; elim (Rplus_ne (- IZR (up 0 - 1))); intros a b; rewrite b; clear a b; rewrite <- Z_R_minus; cut (up 0 = 1%Z). intro; rewrite H1; rewrite (Rminus_diag_eq (IZR 1) (IZR 1) (eq_refl (IZR 1))); apply Ropp_0. elim (archimed 0); intros; clear H2; unfold Rgt in H1; rewrite (Rminus_0_r (IZR (up 0))) in H0; generalize (lt_O_IZR (up 0) H1); intro; clear H1; generalize (le_IZR_R1 (up 0) H0); intro; clear H H0; omega. Qed. (**********) Lemma for_base_fp : forall r:R, IZR (up r) - r > 0 /\ IZR (up r) - r <= 1. Proof. intro; split; cut (IZR (up r) > r /\ IZR (up r) - r <= 1). intro; elim H; intros. apply (Rgt_minus (IZR (up r)) r H0). apply archimed. intro; elim H; intros. exact H1. apply archimed. Qed. (**********) Lemma base_fp : forall r:R, frac_part r >= 0 /\ frac_part r < 1. Proof. intro; unfold frac_part; unfold Int_part; split. (*sup a O*) cut (r - IZR (up r) >= -1). rewrite <- Z_R_minus; simpl; intro; unfold Rminus; rewrite Ropp_plus_distr; rewrite <- Rplus_assoc; fold (r - IZR (up r)); fold (r - IZR (up r) - -1); apply Rge_minus; auto with zarith real. rewrite <- Ropp_minus_distr; apply Ropp_le_ge_contravar; elim (for_base_fp r); auto with zarith real. (*inf a 1*) cut (r - IZR (up r) < 0). rewrite <- Z_R_minus; simpl; intro; unfold Rminus; rewrite Ropp_plus_distr; rewrite <- Rplus_assoc; fold (r - IZR (up r)); rewrite Ropp_involutive; elim (Rplus_ne 1); intros a b; pattern 1 at 2; rewrite <- a; clear a b; rewrite (Rplus_comm (r - IZR (up r)) 1); apply Rplus_lt_compat_l; auto with zarith real. elim (for_base_fp r); intros; rewrite <- Ropp_0; rewrite <- Ropp_minus_distr; apply Ropp_gt_lt_contravar; auto with zarith real. Qed. (*********************************************************) (** * Properties *) (*********************************************************) (**********) Lemma base_Int_part : forall r:R, IZR (Int_part r) <= r /\ IZR (Int_part r) - r > -1. Proof. intro; unfold Int_part; elim (archimed r); intros. split; rewrite <- (Z_R_minus (up r) 1); simpl. generalize (Rle_minus (IZR (up r) - r) 1 H0); intro; unfold Rminus in H1; rewrite (Rplus_assoc (IZR (up r)) (- r) (-1)) in H1; rewrite (Rplus_comm (- r) (-1)) in H1; rewrite <- (Rplus_assoc (IZR (up r)) (-1) (- r)) in H1; fold (IZR (up r) - 1) in H1; fold (IZR (up r) - 1 - r) in H1; apply Rminus_le; auto with zarith real. generalize (Rplus_gt_compat_l (-1) (IZR (up r)) r H); intro; rewrite (Rplus_comm (-1) (IZR (up r))) in H1; generalize (Rplus_gt_compat_l (- r) (IZR (up r) + -1) (-1 + r) H1); intro; clear H H0 H1; rewrite (Rplus_comm (- r) (IZR (up r) + -1)) in H2; fold (IZR (up r) - 1) in H2; fold (IZR (up r) - 1 - r) in H2; rewrite (Rplus_comm (- r) (-1 + r)) in H2; rewrite (Rplus_assoc (-1) r (- r)) in H2; rewrite (Rplus_opp_r r) in H2; elim (Rplus_ne (-1)); intros a b; rewrite a in H2; clear a b; auto with zarith real. Qed. (**********) Lemma Int_part_INR : forall n:nat, Int_part (INR n) = Z.of_nat n. Proof. intros n; unfold Int_part. cut (up (INR n) = (Z.of_nat n + Z.of_nat 1)%Z). intros H'; rewrite H'; simpl; ring. symmetry; apply tech_up; auto. replace (Z.of_nat n + Z.of_nat 1)%Z with (Z.of_nat (S n)). repeat rewrite <- INR_IZR_INZ. apply lt_INR; auto. rewrite Z.add_comm; rewrite <- Znat.Nat2Z.inj_add; simpl; auto. rewrite plus_IZR; simpl; auto with real. repeat rewrite <- INR_IZR_INZ; auto with real. Qed. (**********) Lemma fp_nat : forall r:R, frac_part r = 0 -> exists c : Z, r = IZR c. Proof. unfold frac_part; intros; split with (Int_part r); apply Rminus_diag_uniq; auto with zarith real. Qed. (**********) Lemma R0_fp_O : forall r:R, 0 <> frac_part r -> 0 <> r. Proof. red; intros; rewrite <- H0 in H; generalize fp_R0; intro; auto with zarith real. Qed. (**********) Lemma Rminus_Int_part1 : forall r1 r2:R, frac_part r1 >= frac_part r2 -> Int_part (r1 - r2) = (Int_part r1 - Int_part r2)%Z. Proof. intros; elim (base_fp r1); elim (base_fp r2); intros; generalize (Rge_le (frac_part r2) 0 H0); intro; clear H0; generalize (Ropp_le_ge_contravar 0 (frac_part r2) H4); intro; clear H4; rewrite Ropp_0 in H0; generalize (Rge_le 0 (- frac_part r2) H0); intro; clear H0; generalize (Rge_le (frac_part r1) 0 H2); intro; clear H2; generalize (Ropp_lt_gt_contravar (frac_part r2) 1 H1); intro; clear H1; unfold Rgt in H2; generalize (sum_inequa_Rle_lt 0 (frac_part r1) 1 (-1) (- frac_part r2) 0 H0 H3 H2 H4); intro; elim H1; intros; clear H1; elim (Rplus_ne 1); intros a b; rewrite a in H6; clear a b H5; generalize (Rge_minus (frac_part r1) (frac_part r2) H); intro; clear H; fold (frac_part r1 - frac_part r2) in H6; generalize (Rge_le (frac_part r1 - frac_part r2) 0 H1); intro; clear H1 H3 H4 H0 H2; unfold frac_part in H6, H; unfold Rminus in H6, H; rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))) in H; rewrite (Ropp_involutive (IZR (Int_part r2))) in H; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))) in H; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))) in H; rewrite (Rplus_comm (- IZR (Int_part r1)) (- r2)) in H; rewrite (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))) in H; rewrite <- (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))) in H; rewrite (Rplus_comm (- IZR (Int_part r1)) (IZR (Int_part r2))) in H; fold (r1 - r2) in H; fold (IZR (Int_part r2) - IZR (Int_part r1)) in H; generalize (Rplus_le_compat_l (IZR (Int_part r1) - IZR (Int_part r2)) 0 (r1 - r2 + (IZR (Int_part r2) - IZR (Int_part r1))) H); intro; clear H; rewrite (Rplus_comm (r1 - r2) (IZR (Int_part r2) - IZR (Int_part r1))) in H0; rewrite <- (Rplus_assoc (IZR (Int_part r1) - IZR (Int_part r2)) (IZR (Int_part r2) - IZR (Int_part r1)) (r1 - r2)) in H0; unfold Rminus in H0; fold (r1 - r2) in H0; rewrite (Rplus_assoc (IZR (Int_part r1)) (- IZR (Int_part r2)) (IZR (Int_part r2) + - IZR (Int_part r1))) in H0; rewrite <- (Rplus_assoc (- IZR (Int_part r2)) (IZR (Int_part r2)) (- IZR (Int_part r1))) in H0; rewrite (Rplus_opp_l (IZR (Int_part r2))) in H0; elim (Rplus_ne (- IZR (Int_part r1))); intros a b; rewrite b in H0; clear a b; elim (Rplus_ne (IZR (Int_part r1) + - IZR (Int_part r2))); intros a b; rewrite a in H0; clear a b; rewrite (Rplus_opp_r (IZR (Int_part r1))) in H0; elim (Rplus_ne (r1 - r2)); intros a b; rewrite b in H0; clear a b; fold (IZR (Int_part r1) - IZR (Int_part r2)) in H0; rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))) in H6; rewrite (Ropp_involutive (IZR (Int_part r2))) in H6; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))) in H6; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))) in H6; rewrite (Rplus_comm (- IZR (Int_part r1)) (- r2)) in H6; rewrite (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))) in H6; rewrite <- (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))) in H6; rewrite (Rplus_comm (- IZR (Int_part r1)) (IZR (Int_part r2))) in H6; fold (r1 - r2) in H6; fold (IZR (Int_part r2) - IZR (Int_part r1)) in H6; generalize (Rplus_lt_compat_l (IZR (Int_part r1) - IZR (Int_part r2)) (r1 - r2 + (IZR (Int_part r2) - IZR (Int_part r1))) 1 H6); intro; clear H6; rewrite (Rplus_comm (r1 - r2) (IZR (Int_part r2) - IZR (Int_part r1))) in H; rewrite <- (Rplus_assoc (IZR (Int_part r1) - IZR (Int_part r2)) (IZR (Int_part r2) - IZR (Int_part r1)) (r1 - r2)) in H; rewrite <- (Ropp_minus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H; rewrite (Rplus_opp_r (IZR (Int_part r1) - IZR (Int_part r2))) in H; elim (Rplus_ne (r1 - r2)); intros a b; rewrite b in H; clear a b; rewrite (Z_R_minus (Int_part r1) (Int_part r2)) in H0; rewrite (Z_R_minus (Int_part r1) (Int_part r2)) in H; cut (1 = IZR 1); auto with zarith real. intro; rewrite H1 in H; clear H1; rewrite <- (plus_IZR (Int_part r1 - Int_part r2) 1) in H; generalize (up_tech (r1 - r2) (Int_part r1 - Int_part r2) H0 H); intros; clear H H0; unfold Int_part at 1; omega. Qed. (**********) Lemma Rminus_Int_part2 : forall r1 r2:R, frac_part r1 < frac_part r2 -> Int_part (r1 - r2) = (Int_part r1 - Int_part r2 - 1)%Z. Proof. intros; elim (base_fp r1); elim (base_fp r2); intros; generalize (Rge_le (frac_part r2) 0 H0); intro; clear H0; generalize (Ropp_le_ge_contravar 0 (frac_part r2) H4); intro; clear H4; rewrite Ropp_0 in H0; generalize (Rge_le 0 (- frac_part r2) H0); intro; clear H0; generalize (Rge_le (frac_part r1) 0 H2); intro; clear H2; generalize (Ropp_lt_gt_contravar (frac_part r2) 1 H1); intro; clear H1; unfold Rgt in H2; generalize (sum_inequa_Rle_lt 0 (frac_part r1) 1 (-1) (- frac_part r2) 0 H0 H3 H2 H4); intro; elim H1; intros; clear H1; elim (Rplus_ne (-1)); intros a b; rewrite b in H5; clear a b H6; generalize (Rlt_minus (frac_part r1) (frac_part r2) H); intro; clear H; fold (frac_part r1 - frac_part r2) in H5; clear H3 H4 H0 H2; unfold frac_part in H5, H1; unfold Rminus in H5, H1; rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))) in H5; rewrite (Ropp_involutive (IZR (Int_part r2))) in H5; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))) in H5; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))) in H5; rewrite (Rplus_comm (- IZR (Int_part r1)) (- r2)) in H5; rewrite (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))) in H5; rewrite <- (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))) in H5; rewrite (Rplus_comm (- IZR (Int_part r1)) (IZR (Int_part r2))) in H5; fold (r1 - r2) in H5; fold (IZR (Int_part r2) - IZR (Int_part r1)) in H5; generalize (Rplus_lt_compat_l (IZR (Int_part r1) - IZR (Int_part r2)) (-1) (r1 - r2 + (IZR (Int_part r2) - IZR (Int_part r1))) H5); intro; clear H5; rewrite (Rplus_comm (r1 - r2) (IZR (Int_part r2) - IZR (Int_part r1))) in H; rewrite <- (Rplus_assoc (IZR (Int_part r1) - IZR (Int_part r2)) (IZR (Int_part r2) - IZR (Int_part r1)) (r1 - r2)) in H; unfold Rminus in H; fold (r1 - r2) in H; rewrite (Rplus_assoc (IZR (Int_part r1)) (- IZR (Int_part r2)) (IZR (Int_part r2) + - IZR (Int_part r1))) in H; rewrite <- (Rplus_assoc (- IZR (Int_part r2)) (IZR (Int_part r2)) (- IZR (Int_part r1))) in H; rewrite (Rplus_opp_l (IZR (Int_part r2))) in H; elim (Rplus_ne (- IZR (Int_part r1))); intros a b; rewrite b in H; clear a b; rewrite (Rplus_opp_r (IZR (Int_part r1))) in H; elim (Rplus_ne (r1 - r2)); intros a b; rewrite b in H; clear a b; fold (IZR (Int_part r1) - IZR (Int_part r2)) in H; fold (IZR (Int_part r1) - IZR (Int_part r2) - 1) in H; rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))) in H1; rewrite (Ropp_involutive (IZR (Int_part r2))) in H1; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))) in H1; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))) in H1; rewrite (Rplus_comm (- IZR (Int_part r1)) (- r2)) in H1; rewrite (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))) in H1; rewrite <- (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))) in H1; rewrite (Rplus_comm (- IZR (Int_part r1)) (IZR (Int_part r2))) in H1; fold (r1 - r2) in H1; fold (IZR (Int_part r2) - IZR (Int_part r1)) in H1; generalize (Rplus_lt_compat_l (IZR (Int_part r1) - IZR (Int_part r2)) (r1 - r2 + (IZR (Int_part r2) - IZR (Int_part r1))) 0 H1); intro; clear H1; rewrite (Rplus_comm (r1 - r2) (IZR (Int_part r2) - IZR (Int_part r1))) in H0; rewrite <- (Rplus_assoc (IZR (Int_part r1) - IZR (Int_part r2)) (IZR (Int_part r2) - IZR (Int_part r1)) (r1 - r2)) in H0; rewrite <- (Ropp_minus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H0; rewrite (Rplus_opp_r (IZR (Int_part r1) - IZR (Int_part r2))) in H0; elim (Rplus_ne (r1 - r2)); intros a b; rewrite b in H0; clear a b; rewrite <- (Rplus_opp_l 1) in H0; rewrite <- (Rplus_assoc (IZR (Int_part r1) - IZR (Int_part r2)) (-1) 1) in H0; fold (IZR (Int_part r1) - IZR (Int_part r2) - 1) in H0; rewrite (Z_R_minus (Int_part r1) (Int_part r2)) in H0; rewrite (Z_R_minus (Int_part r1) (Int_part r2)) in H; cut (1 = IZR 1); auto with zarith real. intro; rewrite H1 in H; rewrite H1 in H0; clear H1; rewrite (Z_R_minus (Int_part r1 - Int_part r2) 1) in H; rewrite (Z_R_minus (Int_part r1 - Int_part r2) 1) in H0; rewrite <- (plus_IZR (Int_part r1 - Int_part r2 - 1) 1) in H0; generalize (Rlt_le (IZR (Int_part r1 - Int_part r2 - 1)) (r1 - r2) H); intro; clear H; generalize (up_tech (r1 - r2) (Int_part r1 - Int_part r2 - 1) H1 H0); intros; clear H0 H1; unfold Int_part at 1; omega. Qed. (**********) Lemma Rminus_fp1 : forall r1 r2:R, frac_part r1 >= frac_part r2 -> frac_part (r1 - r2) = frac_part r1 - frac_part r2. Proof. intros; unfold frac_part; generalize (Rminus_Int_part1 r1 r2 H); intro; rewrite H0; rewrite <- (Z_R_minus (Int_part r1) (Int_part r2)); unfold Rminus; rewrite (Ropp_plus_distr (IZR (Int_part r1)) (- IZR (Int_part r2))); rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))); rewrite (Ropp_involutive (IZR (Int_part r2))); rewrite (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))); rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))); rewrite <- (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))); rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))); rewrite (Rplus_comm (- r2) (- IZR (Int_part r1))); auto with zarith real. Qed. (**********) Lemma Rminus_fp2 : forall r1 r2:R, frac_part r1 < frac_part r2 -> frac_part (r1 - r2) = frac_part r1 - frac_part r2 + 1. Proof. intros; unfold frac_part; generalize (Rminus_Int_part2 r1 r2 H); intro; rewrite H0; rewrite <- (Z_R_minus (Int_part r1 - Int_part r2) 1); rewrite <- (Z_R_minus (Int_part r1) (Int_part r2)); unfold Rminus; rewrite (Ropp_plus_distr (IZR (Int_part r1) + - IZR (Int_part r2)) (- IZR 1)) ; rewrite (Ropp_plus_distr r2 (- IZR (Int_part r2))); rewrite (Ropp_involutive (IZR 1)); rewrite (Ropp_involutive (IZR (Int_part r2))); rewrite (Ropp_plus_distr (IZR (Int_part r1))); rewrite (Ropp_involutive (IZR (Int_part r2))); simpl; rewrite <- (Rplus_assoc (r1 + - r2) (- IZR (Int_part r1) + IZR (Int_part r2)) 1) ; rewrite (Rplus_assoc r1 (- r2) (- IZR (Int_part r1) + IZR (Int_part r2))); rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (- r2 + IZR (Int_part r2))); rewrite <- (Rplus_assoc (- r2) (- IZR (Int_part r1)) (IZR (Int_part r2))); rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- r2) (IZR (Int_part r2))); rewrite (Rplus_comm (- r2) (- IZR (Int_part r1))); auto with zarith real. Qed. (**********) Lemma plus_Int_part1 : forall r1 r2:R, frac_part r1 + frac_part r2 >= 1 -> Int_part (r1 + r2) = (Int_part r1 + Int_part r2 + 1)%Z. Proof. intros; generalize (Rge_le (frac_part r1 + frac_part r2) 1 H); intro; clear H; elim (base_fp r1); elim (base_fp r2); intros; clear H H2; generalize (Rplus_lt_compat_l (frac_part r2) (frac_part r1) 1 H3); intro; clear H3; generalize (Rplus_lt_compat_l 1 (frac_part r2) 1 H1); intro; clear H1; rewrite (Rplus_comm 1 (frac_part r2)) in H2; generalize (Rlt_trans (frac_part r2 + frac_part r1) (frac_part r2 + 1) 2 H H2); intro; clear H H2; rewrite (Rplus_comm (frac_part r2) (frac_part r1)) in H1; unfold frac_part in H0, H1; unfold Rminus in H0, H1; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))) in H1; rewrite (Rplus_comm r2 (- IZR (Int_part r2))) in H1; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2) in H1; rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2) in H1; rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))) in H1; rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H1; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))) in H0; rewrite (Rplus_comm r2 (- IZR (Int_part r2))) in H0; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2) in H0; rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2) in H0; rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))) in H0; rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H0; generalize (Rplus_le_compat_l (IZR (Int_part r1) + IZR (Int_part r2)) 1 (r1 + r2 + - (IZR (Int_part r1) + IZR (Int_part r2))) H0); intro; clear H0; generalize (Rplus_lt_compat_l (IZR (Int_part r1) + IZR (Int_part r2)) (r1 + r2 + - (IZR (Int_part r1) + IZR (Int_part r2))) 2 H1); intro; clear H1; rewrite (Rplus_comm (r1 + r2) (- (IZR (Int_part r1) + IZR (Int_part r2)))) in H; rewrite <- (Rplus_assoc (IZR (Int_part r1) + IZR (Int_part r2)) (- (IZR (Int_part r1) + IZR (Int_part r2))) (r1 + r2)) in H; rewrite (Rplus_opp_r (IZR (Int_part r1) + IZR (Int_part r2))) in H; elim (Rplus_ne (r1 + r2)); intros a b; rewrite b in H; clear a b; rewrite (Rplus_comm (r1 + r2) (- (IZR (Int_part r1) + IZR (Int_part r2)))) in H0; rewrite <- (Rplus_assoc (IZR (Int_part r1) + IZR (Int_part r2)) (- (IZR (Int_part r1) + IZR (Int_part r2))) (r1 + r2)) in H0; rewrite (Rplus_opp_r (IZR (Int_part r1) + IZR (Int_part r2))) in H0; elim (Rplus_ne (r1 + r2)); intros a b; rewrite b in H0; clear a b; rewrite <- (Rplus_assoc (IZR (Int_part r1) + IZR (Int_part r2)) 1 1) in H0; cut (1 = IZR 1); auto with zarith real. intro; rewrite H1 in H0; rewrite H1 in H; clear H1; rewrite <- (plus_IZR (Int_part r1) (Int_part r2)) in H; rewrite <- (plus_IZR (Int_part r1) (Int_part r2)) in H0; rewrite <- (plus_IZR (Int_part r1 + Int_part r2) 1) in H; rewrite <- (plus_IZR (Int_part r1 + Int_part r2) 1) in H0; rewrite <- (plus_IZR (Int_part r1 + Int_part r2 + 1) 1) in H0; generalize (up_tech (r1 + r2) (Int_part r1 + Int_part r2 + 1) H H0); intro; clear H H0; unfold Int_part at 1; omega. Qed. (**********) Lemma plus_Int_part2 : forall r1 r2:R, frac_part r1 + frac_part r2 < 1 -> Int_part (r1 + r2) = (Int_part r1 + Int_part r2)%Z. Proof. intros; elim (base_fp r1); elim (base_fp r2); intros; clear H1 H3; generalize (Rge_le (frac_part r2) 0 H0); intro; clear H0; generalize (Rge_le (frac_part r1) 0 H2); intro; clear H2; generalize (Rplus_le_compat_l (frac_part r1) 0 (frac_part r2) H1); intro; clear H1; elim (Rplus_ne (frac_part r1)); intros a b; rewrite a in H2; clear a b; generalize (Rle_trans 0 (frac_part r1) (frac_part r1 + frac_part r2) H0 H2); intro; clear H0 H2; unfold frac_part in H, H1; unfold Rminus in H, H1; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))) in H1; rewrite (Rplus_comm r2 (- IZR (Int_part r2))) in H1; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2) in H1; rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2) in H1; rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))) in H1; rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H1; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))) in H; rewrite (Rplus_comm r2 (- IZR (Int_part r2))) in H; rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2) in H; rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2) in H; rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))) in H; rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))) in H; generalize (Rplus_le_compat_l (IZR (Int_part r1) + IZR (Int_part r2)) 0 (r1 + r2 + - (IZR (Int_part r1) + IZR (Int_part r2))) H1); intro; clear H1; generalize (Rplus_lt_compat_l (IZR (Int_part r1) + IZR (Int_part r2)) (r1 + r2 + - (IZR (Int_part r1) + IZR (Int_part r2))) 1 H); intro; clear H; rewrite (Rplus_comm (r1 + r2) (- (IZR (Int_part r1) + IZR (Int_part r2)))) in H1; rewrite <- (Rplus_assoc (IZR (Int_part r1) + IZR (Int_part r2)) (- (IZR (Int_part r1) + IZR (Int_part r2))) (r1 + r2)) in H1; rewrite (Rplus_opp_r (IZR (Int_part r1) + IZR (Int_part r2))) in H1; elim (Rplus_ne (r1 + r2)); intros a b; rewrite b in H1; clear a b; rewrite (Rplus_comm (r1 + r2) (- (IZR (Int_part r1) + IZR (Int_part r2)))) in H0; rewrite <- (Rplus_assoc (IZR (Int_part r1) + IZR (Int_part r2)) (- (IZR (Int_part r1) + IZR (Int_part r2))) (r1 + r2)) in H0; rewrite (Rplus_opp_r (IZR (Int_part r1) + IZR (Int_part r2))) in H0; elim (Rplus_ne (IZR (Int_part r1) + IZR (Int_part r2))); intros a b; rewrite a in H0; clear a b; elim (Rplus_ne (r1 + r2)); intros a b; rewrite b in H0; clear a b; cut (1 = IZR 1); auto with zarith real. intro; rewrite H in H1; clear H; rewrite <- (plus_IZR (Int_part r1) (Int_part r2)) in H0; rewrite <- (plus_IZR (Int_part r1) (Int_part r2)) in H1; rewrite <- (plus_IZR (Int_part r1 + Int_part r2) 1) in H1; generalize (up_tech (r1 + r2) (Int_part r1 + Int_part r2) H0 H1); intro; clear H0 H1; unfold Int_part at 1; omega. Qed. (**********) Lemma plus_frac_part1 : forall r1 r2:R, frac_part r1 + frac_part r2 >= 1 -> frac_part (r1 + r2) = frac_part r1 + frac_part r2 - 1. Proof. intros; unfold frac_part; generalize (plus_Int_part1 r1 r2 H); intro; rewrite H0; rewrite (plus_IZR (Int_part r1 + Int_part r2) 1); rewrite (plus_IZR (Int_part r1) (Int_part r2)); simpl; unfold Rminus at 3 4; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))); rewrite (Rplus_comm r2 (- IZR (Int_part r2))); rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2); rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2); rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))); rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))); unfold Rminus; rewrite (Rplus_assoc (r1 + r2) (- (IZR (Int_part r1) + IZR (Int_part r2))) (-1)) ; rewrite <- (Ropp_plus_distr (IZR (Int_part r1) + IZR (Int_part r2)) 1); trivial with zarith real. Qed. (**********) Lemma plus_frac_part2 : forall r1 r2:R, frac_part r1 + frac_part r2 < 1 -> frac_part (r1 + r2) = frac_part r1 + frac_part r2. Proof. intros; unfold frac_part; generalize (plus_Int_part2 r1 r2 H); intro; rewrite H0; rewrite (plus_IZR (Int_part r1) (Int_part r2)); unfold Rminus at 2 3; rewrite (Rplus_assoc r1 (- IZR (Int_part r1)) (r2 + - IZR (Int_part r2))); rewrite (Rplus_comm r2 (- IZR (Int_part r2))); rewrite <- (Rplus_assoc (- IZR (Int_part r1)) (- IZR (Int_part r2)) r2); rewrite (Rplus_comm (- IZR (Int_part r1) + - IZR (Int_part r2)) r2); rewrite <- (Rplus_assoc r1 r2 (- IZR (Int_part r1) + - IZR (Int_part r2))); rewrite <- (Ropp_plus_distr (IZR (Int_part r1)) (IZR (Int_part r2))); unfold Rminus; trivial with zarith real. Qed. coq-8.4pl2/theories/Reals/Rtrigo_reg.v0000640000175000001440000004077712010532755017030 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R, fn = (fun (N:nat) (x:R) => (-1) ^ N / INR (fact (2 * N + 1)) * x ^ (2 * N)) -> CVN_R fn. Proof. unfold CVN_R; unfold CVN_r; intros fn H r. exists (fun n:nat => / INR (fact (2 * n + 1)) * r ^ (2 * n)). cut { l:R | Un_cv (fun n:nat => sum_f_R0 (fun k:nat => Rabs (/ INR (fact (2 * k + 1)) * r ^ (2 * k))) n) l }. intro X; elim X; intros. exists x. split. apply p. intros; rewrite H; unfold Rdiv; do 2 rewrite Rabs_mult; rewrite pow_1_abs; rewrite Rmult_1_l. cut (0 < / INR (fact (2 * n + 1))). intro; rewrite (Rabs_right _ (Rle_ge _ _ (Rlt_le _ _ H1))). apply Rmult_le_compat_l. left; apply H1. rewrite <- RPow_abs; apply pow_maj_Rabs. rewrite Rabs_Rabsolu; unfold Boule in H0; rewrite Rminus_0_r in H0; left; apply H0. apply Rinv_0_lt_compat; apply INR_fact_lt_0. cut ((r:R) <> 0). intro; apply Alembert_C2. intro; apply Rabs_no_R0. apply prod_neq_R0. apply Rinv_neq_0_compat; apply INR_fact_neq_0. apply pow_nonzero; assumption. assert (H1 := Alembert_sin). unfold sin_n in H1; unfold Un_cv in H1; unfold Un_cv; intros. cut (0 < eps / Rsqr r). intro; elim (H1 _ H3); intros N0 H4. exists N0; intros. unfold R_dist; assert (H6 := H4 _ H5). unfold R_dist in H5; replace (Rabs (Rabs (/ INR (fact (2 * S n + 1)) * r ^ (2 * S n)) / Rabs (/ INR (fact (2 * n + 1)) * r ^ (2 * n)))) with (Rsqr r * Rabs ((-1) ^ S n / INR (fact (2 * S n + 1)) / ((-1) ^ n / INR (fact (2 * n + 1))))). apply Rmult_lt_reg_l with (/ Rsqr r). apply Rinv_0_lt_compat; apply Rsqr_pos_lt; assumption. pattern (/ Rsqr r) at 1; rewrite <- (Rabs_right (/ Rsqr r)). rewrite <- Rabs_mult. rewrite Rmult_minus_distr_l. rewrite Rmult_0_r; rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite <- (Rmult_comm eps). apply H6. unfold Rsqr; apply prod_neq_R0; assumption. apply Rle_ge; left; apply Rinv_0_lt_compat; apply Rsqr_pos_lt; assumption. unfold Rdiv; rewrite (Rmult_comm (Rsqr r)); repeat rewrite Rabs_mult; rewrite Rabs_Rabsolu; rewrite pow_1_abs. rewrite Rmult_1_l. repeat rewrite Rmult_assoc; apply Rmult_eq_compat_l. rewrite Rinv_mult_distr. rewrite Rinv_involutive. rewrite Rabs_mult. rewrite Rabs_Rinv. rewrite pow_1_abs; rewrite Rinv_1; rewrite Rmult_1_l. rewrite Rinv_mult_distr. rewrite <- Rabs_Rinv. rewrite Rinv_involutive. rewrite Rabs_mult. do 2 rewrite Rabs_Rabsolu. rewrite (Rmult_comm (Rabs (r ^ (2 * S n)))). rewrite Rmult_assoc; apply Rmult_eq_compat_l. rewrite Rabs_Rinv. rewrite Rabs_Rabsolu. repeat rewrite Rabs_right. replace (r ^ (2 * S n)) with (r ^ (2 * n) * r * r). do 2 rewrite <- Rmult_assoc. rewrite <- Rinv_l_sym. unfold Rsqr; ring. apply pow_nonzero; assumption. replace (2 * S n)%nat with (S (S (2 * n))). simpl; ring. ring. apply Rle_ge; apply pow_le; left; apply (cond_pos r). apply Rle_ge; apply pow_le; left; apply (cond_pos r). apply Rabs_no_R0; apply pow_nonzero; assumption. apply INR_fact_neq_0. apply Rinv_neq_0_compat; apply INR_fact_neq_0. apply Rabs_no_R0; apply Rinv_neq_0_compat; apply INR_fact_neq_0. apply Rabs_no_R0; apply pow_nonzero; assumption. apply pow_nonzero; discrR. apply INR_fact_neq_0. apply pow_nonzero; discrR. apply Rinv_neq_0_compat; apply INR_fact_neq_0. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply Rsqr_pos_lt; assumption ]. assert (H0 := cond_pos r); red; intro; rewrite H1 in H0; elim (Rlt_irrefl _ H0). Qed. (** (sin h)/h -> 1 when h -> 0 *) Lemma derivable_pt_lim_sin_0 : derivable_pt_lim sin 0 1. Proof. unfold derivable_pt_lim; intros. set (fn := fun (N:nat) (x:R) => (-1) ^ N / INR (fact (2 * N + 1)) * x ^ (2 * N)). cut (CVN_R fn). intro; cut (forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }). intro cv. set (r := mkposreal _ Rlt_0_1). cut (CVN_r fn r). intro; cut (forall (n:nat) (y:R), Boule 0 r y -> continuity_pt (fn n) y). intro; cut (Boule 0 r 0). intro; assert (H2 := SFL_continuity_pt _ cv _ X0 H0 _ H1). unfold continuity_pt in H2; unfold continue_in in H2; unfold limit1_in in H2; unfold limit_in in H2; simpl in H2; unfold R_dist in H2. elim (H2 _ H); intros alp H3. elim H3; intros. exists (mkposreal _ H4). simpl; intros. rewrite sin_0; rewrite Rplus_0_l; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r. cut (Rabs (SFL fn cv h - SFL fn cv 0) < eps). intro; cut (SFL fn cv 0 = 1). intro; cut (SFL fn cv h = sin h / h). intro; rewrite H9 in H8; rewrite H10 in H8. apply H8. unfold SFL, sin. case (cv h); intros. case (exist_sin (Rsqr h)); intros. unfold Rdiv; rewrite (Rinv_r_simpl_m h x0 H6). eapply UL_sequence. apply u. unfold sin_in in s; unfold sin_n, infinite_sum in s; unfold SP, fn, Un_cv; intros. elim (s _ H10); intros N0 H11. exists N0; intros. unfold R_dist; unfold R_dist in H11. replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * h ^ (2 * k)) n) with (sum_f_R0 (fun i:nat => (-1) ^ i / INR (fact (2 * i + 1)) * Rsqr h ^ i) n). apply H11; assumption. apply sum_eq; intros; apply Rmult_eq_compat_l; unfold Rsqr; rewrite pow_sqr; reflexivity. unfold SFL, sin. case (cv 0); intros. eapply UL_sequence. apply u. unfold SP, fn; unfold Un_cv; intros; exists 1%nat; intros. unfold R_dist; replace (sum_f_R0 (fun k:nat => (-1) ^ k / INR (fact (2 * k + 1)) * 0 ^ (2 * k)) n) with 1. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. rewrite decomp_sum. simpl; rewrite Rmult_1_r; unfold Rdiv; rewrite Rinv_1; rewrite Rmult_1_r; pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_eq_compat_l. symmetry ; apply sum_eq_R0; intros. rewrite Rmult_0_l; rewrite Rmult_0_r; reflexivity. unfold ge in H10; apply lt_le_trans with 1%nat; [ apply lt_n_Sn | apply H10 ]. apply H5. split. unfold D_x, no_cond; split. trivial. apply (not_eq_sym (A:=R)); apply H6. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply H7. unfold Boule; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_R0; apply (cond_pos r). intros; unfold fn; replace (fun x:R => (-1) ^ n / INR (fact (2 * n + 1)) * x ^ (2 * n)) with (fct_cte ((-1) ^ n / INR (fact (2 * n + 1))) * pow_fct (2 * n))%F; [ idtac | reflexivity ]. apply continuity_pt_mult. apply derivable_continuous_pt. apply derivable_pt_const. apply derivable_continuous_pt. apply (derivable_pt_pow (2 * n) y). apply (X r). apply (CVN_R_CVS _ X). apply CVN_R_sin; unfold fn; reflexivity. Qed. (** ((cos h)-1)/h -> 0 when h -> 0 *) Lemma derivable_pt_lim_cos_0 : derivable_pt_lim cos 0 0. Proof. unfold derivable_pt_lim; intros. assert (H0 := derivable_pt_lim_sin_0). unfold derivable_pt_lim in H0. cut (0 < eps / 2). intro; elim (H0 _ H1); intros del H2. cut (continuity_pt sin 0). intro; unfold continuity_pt in H3; unfold continue_in in H3; unfold limit1_in in H3; unfold limit_in in H3; simpl in H3; unfold R_dist in H3. cut (0 < eps / 2); [ intro | assumption ]. elim (H3 _ H4); intros del_c H5. cut (0 < Rmin del del_c). intro; set (delta := mkposreal _ H6). exists delta; intros. rewrite Rplus_0_l; replace (cos h - cos 0) with (-2 * Rsqr (sin (h / 2))). unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r. unfold Rdiv; do 2 rewrite Ropp_mult_distr_l_reverse. rewrite Rabs_Ropp. replace (2 * Rsqr (sin (h * / 2)) * / h) with (sin (h / 2) * (sin (h / 2) / (h / 2) - 1) + sin (h / 2)). apply Rle_lt_trans with (Rabs (sin (h / 2) * (sin (h / 2) / (h / 2) - 1)) + Rabs (sin (h / 2))). apply Rabs_triang. rewrite (double_var eps); apply Rplus_lt_compat. apply Rle_lt_trans with (Rabs (sin (h / 2) / (h / 2) - 1)). rewrite Rabs_mult; rewrite Rmult_comm; pattern (Rabs (sin (h / 2) / (h / 2) - 1)) at 2; rewrite <- Rmult_1_r; apply Rmult_le_compat_l. apply Rabs_pos. assert (H9 := SIN_bound (h / 2)). unfold Rabs; case (Rcase_abs (sin (h / 2))); intro. pattern 1 at 3; rewrite <- (Ropp_involutive 1). apply Ropp_le_contravar. elim H9; intros; assumption. elim H9; intros; assumption. cut (Rabs (h / 2) < del). intro; cut (h / 2 <> 0). intro; assert (H11 := H2 _ H10 H9). rewrite Rplus_0_l in H11; rewrite sin_0 in H11. rewrite Rminus_0_r in H11; apply H11. unfold Rdiv; apply prod_neq_R0. apply H7. apply Rinv_neq_0_compat; discrR. apply Rlt_trans with (del / 2). unfold Rdiv; rewrite Rabs_mult. rewrite (Rabs_right (/ 2)). do 2 rewrite <- (Rmult_comm (/ 2)); apply Rmult_lt_compat_l. apply Rinv_0_lt_compat; prove_sup0. apply Rlt_le_trans with (pos delta). apply H8. unfold delta; simpl; apply Rmin_l. apply Rle_ge; left; apply Rinv_0_lt_compat; prove_sup0. rewrite <- (Rplus_0_r (del / 2)); pattern del at 1; rewrite (double_var del); apply Rplus_lt_compat_l; unfold Rdiv; apply Rmult_lt_0_compat. apply (cond_pos del). apply Rinv_0_lt_compat; prove_sup0. elim H5; intros; assert (H11 := H10 (h / 2)). rewrite sin_0 in H11; do 2 rewrite Rminus_0_r in H11. apply H11. split. unfold D_x, no_cond; split. trivial. apply (not_eq_sym (A:=R)); unfold Rdiv; apply prod_neq_R0. apply H7. apply Rinv_neq_0_compat; discrR. apply Rlt_trans with (del_c / 2). unfold Rdiv; rewrite Rabs_mult. rewrite (Rabs_right (/ 2)). do 2 rewrite <- (Rmult_comm (/ 2)). apply Rmult_lt_compat_l. apply Rinv_0_lt_compat; prove_sup0. apply Rlt_le_trans with (pos delta). apply H8. unfold delta; simpl; apply Rmin_r. apply Rle_ge; left; apply Rinv_0_lt_compat; prove_sup0. rewrite <- (Rplus_0_r (del_c / 2)); pattern del_c at 2; rewrite (double_var del_c); apply Rplus_lt_compat_l. unfold Rdiv; apply Rmult_lt_0_compat. apply H9. apply Rinv_0_lt_compat; prove_sup0. rewrite Rmult_minus_distr_l; rewrite Rmult_1_r; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; rewrite (Rmult_comm 2); unfold Rdiv, Rsqr. repeat rewrite Rmult_assoc. repeat apply Rmult_eq_compat_l. rewrite Rinv_mult_distr. rewrite Rinv_involutive. apply Rmult_comm. discrR. apply H7. apply Rinv_neq_0_compat; discrR. pattern h at 2; replace h with (2 * (h / 2)). rewrite (cos_2a_sin (h / 2)). rewrite cos_0; unfold Rsqr; ring. unfold Rdiv; rewrite <- Rmult_assoc; apply Rinv_r_simpl_m. discrR. unfold Rmin; case (Rle_dec del del_c); intro. apply (cond_pos del). elim H5; intros; assumption. apply continuity_sin. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. Qed. (**********) Theorem derivable_pt_lim_sin : forall x:R, derivable_pt_lim sin x (cos x). Proof. intro; assert (H0 := derivable_pt_lim_sin_0). assert (H := derivable_pt_lim_cos_0). unfold derivable_pt_lim in H0, H. unfold derivable_pt_lim; intros. cut (0 < eps / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ apply H1 | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H0 _ H2); intros alp1 H3. elim (H _ H2); intros alp2 H4. set (alp := Rmin alp1 alp2). cut (0 < alp). intro; exists (mkposreal _ H5); intros. replace ((sin (x + h) - sin x) / h - cos x) with (sin x * ((cos h - 1) / h) + cos x * (sin h / h - 1)). apply Rle_lt_trans with (Rabs (sin x * ((cos h - 1) / h)) + Rabs (cos x * (sin h / h - 1))). apply Rabs_triang. rewrite (double_var eps); apply Rplus_lt_compat. apply Rle_lt_trans with (Rabs ((cos h - 1) / h)). rewrite Rabs_mult; rewrite Rmult_comm; pattern (Rabs ((cos h - 1) / h)) at 2; rewrite <- Rmult_1_r; apply Rmult_le_compat_l. apply Rabs_pos. assert (H8 := SIN_bound x); elim H8; intros. unfold Rabs; case (Rcase_abs (sin x)); intro. rewrite <- (Ropp_involutive 1). apply Ropp_le_contravar; assumption. assumption. cut (Rabs h < alp2). intro; assert (H9 := H4 _ H6 H8). rewrite cos_0 in H9; rewrite Rplus_0_l in H9; rewrite Rminus_0_r in H9; apply H9. apply Rlt_le_trans with alp. apply H7. unfold alp; apply Rmin_r. apply Rle_lt_trans with (Rabs (sin h / h - 1)). rewrite Rabs_mult; rewrite Rmult_comm; pattern (Rabs (sin h / h - 1)) at 2; rewrite <- Rmult_1_r; apply Rmult_le_compat_l. apply Rabs_pos. assert (H8 := COS_bound x); elim H8; intros. unfold Rabs; case (Rcase_abs (cos x)); intro. rewrite <- (Ropp_involutive 1); apply Ropp_le_contravar; assumption. assumption. cut (Rabs h < alp1). intro; assert (H9 := H3 _ H6 H8). rewrite sin_0 in H9; rewrite Rplus_0_l in H9; rewrite Rminus_0_r in H9; apply H9. apply Rlt_le_trans with alp. apply H7. unfold alp; apply Rmin_l. rewrite sin_plus; unfold Rminus, Rdiv; repeat rewrite Rmult_plus_distr_r; repeat rewrite Rmult_plus_distr_l; repeat rewrite Rmult_assoc; repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. rewrite (Rplus_comm (sin x * (-1 * / h))); repeat rewrite Rplus_assoc; apply Rplus_eq_compat_l. rewrite Ropp_mult_distr_r_reverse; rewrite Ropp_mult_distr_l_reverse; rewrite Rmult_1_r; rewrite Rmult_1_l; rewrite Ropp_mult_distr_r_reverse; rewrite <- Ropp_mult_distr_l_reverse; apply Rplus_comm. unfold alp; unfold Rmin; case (Rle_dec alp1 alp2); intro. apply (cond_pos alp1). apply (cond_pos alp2). Qed. Lemma derivable_pt_lim_cos : forall x:R, derivable_pt_lim cos x (- sin x). Proof. intro; cut (forall h:R, sin (h + PI / 2) = cos h). intro; replace (- sin x) with (cos (x + PI / 2) * (1 + 0)). generalize (derivable_pt_lim_comp (id + fct_cte (PI / 2))%F sin); intros. cut (derivable_pt_lim (id + fct_cte (PI / 2)) x (1 + 0)). cut (derivable_pt_lim sin ((id + fct_cte (PI / 2))%F x) (cos (x + PI / 2))). intros; generalize (H0 _ _ _ H2 H1); replace (comp sin (id + fct_cte (PI / 2))%F) with (fun x:R => sin (x + PI / 2)); [ idtac | reflexivity ]. unfold derivable_pt_lim; intros. elim (H3 eps H4); intros. exists x0. intros; rewrite <- (H (x + h)); rewrite <- (H x); apply H5; assumption. apply derivable_pt_lim_sin. apply derivable_pt_lim_plus. apply derivable_pt_lim_id. apply derivable_pt_lim_const. rewrite sin_cos; rewrite <- (Rplus_comm x); ring. intro; rewrite cos_sin; rewrite Rplus_comm; reflexivity. Qed. Lemma derivable_pt_sin : forall x:R, derivable_pt sin x. Proof. unfold derivable_pt; intro. exists (cos x). apply derivable_pt_lim_sin. Qed. Lemma derivable_pt_cos : forall x:R, derivable_pt cos x. Proof. unfold derivable_pt; intro. exists (- sin x). apply derivable_pt_lim_cos. Qed. Lemma derivable_sin : derivable sin. Proof. unfold derivable; intro; apply derivable_pt_sin. Qed. Lemma derivable_cos : derivable cos. Proof. unfold derivable; intro; apply derivable_pt_cos. Qed. Lemma derive_pt_sin : forall x:R, derive_pt sin x (derivable_pt_sin _) = cos x. Proof. intros; apply derive_pt_eq_0. apply derivable_pt_lim_sin. Qed. Lemma derive_pt_cos : forall x:R, derive_pt cos x (derivable_pt_cos _) = - sin x. Proof. intros; apply derive_pt_eq_0. apply derivable_pt_lim_cos. Qed. coq-8.4pl2/theories/Reals/RiemannInt.v0000640000175000001440000041314512010532755016762 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (a b:R) : Type := forall eps:posreal, { phi:StepFun a b & { psi:StepFun a b | (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - phi t) <= psi t) /\ Rabs (RiemannInt_SF psi) < eps } }. Definition phi_sequence (un:nat -> posreal) (f:R -> R) (a b:R) (pr:Riemann_integrable f a b) (n:nat) := projT1 (pr (un n)). Lemma phi_sequence_prop : forall (un:nat -> posreal) (f:R -> R) (a b:R) (pr:Riemann_integrable f a b) (N:nat), { psi:StepFun a b | (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - phi_sequence un pr N t) <= psi t) /\ Rabs (RiemannInt_SF psi) < un N }. Proof. intros; apply (projT2 (pr (un N))). Qed. Lemma RiemannInt_P1 : forall (f:R -> R) (a b:R), Riemann_integrable f a b -> Riemann_integrable f b a. Proof. unfold Riemann_integrable; intros; elim (X eps); clear X; intros; elim p; clear p; intros; exists (mkStepFun (StepFun_P6 (pre x))); exists (mkStepFun (StepFun_P6 (pre x0))); elim p; clear p; intros; split. intros; apply (H t); elim H1; clear H1; intros; split; [ apply Rle_trans with (Rmin b a); try assumption; right; unfold Rmin | apply Rle_trans with (Rmax b a); try assumption; right; unfold Rmax ]; (case (Rle_dec a b); case (Rle_dec b a); intros; try reflexivity || apply Rle_antisym; [ assumption | assumption | auto with real | auto with real ]). generalize H0; unfold RiemannInt_SF; case (Rle_dec a b); case (Rle_dec b a); intros; (replace (Int_SF (subdivision_val (mkStepFun (StepFun_P6 (pre x0)))) (subdivision (mkStepFun (StepFun_P6 (pre x0))))) with (Int_SF (subdivision_val x0) (subdivision x0)); [ idtac | apply StepFun_P17 with (fe x0) a b; [ apply StepFun_P1 | apply StepFun_P2; apply (StepFun_P1 (mkStepFun (StepFun_P6 (pre x0)))) ] ]). apply H1. rewrite Rabs_Ropp; apply H1. rewrite Rabs_Ropp in H1; apply H1. apply H1. Qed. Lemma RiemannInt_P2 : forall (f:R -> R) (a b:R) (un:nat -> posreal) (vn wn:nat -> StepFun a b), Un_cv un 0 -> a <= b -> (forall n:nat, (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - vn n t) <= wn n t) /\ Rabs (RiemannInt_SF (wn n)) < un n) -> { l:R | Un_cv (fun N:nat => RiemannInt_SF (vn N)) l }. Proof. intros; apply R_complete; unfold Un_cv in H; unfold Cauchy_crit; intros; assert (H3 : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H _ H3); intros N0 H4; exists N0; intros; unfold R_dist; unfold R_dist in H4; elim (H1 n); elim (H1 m); intros; replace (RiemannInt_SF (vn n) - RiemannInt_SF (vn m)) with (RiemannInt_SF (vn n) + -1 * RiemannInt_SF (vn m)); [ idtac | ring ]; rewrite <- StepFun_P30; apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (vn n) (vn m)))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (wn n) (wn m)))). apply StepFun_P37; try assumption. intros; simpl; apply Rle_trans with (Rabs (vn n x - f x) + Rabs (f x - vn m x)). replace (vn n x + -1 * vn m x) with (vn n x - f x + (f x - vn m x)); [ apply Rabs_triang | ring ]. assert (H12 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H13 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. rewrite <- H12 in H11; pattern b at 2 in H11; rewrite <- H13 in H11; rewrite Rmult_1_l; apply Rplus_le_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H9. elim H11; intros; split; left; assumption. apply H7. elim H11; intros; split; left; assumption. rewrite StepFun_P30; rewrite Rmult_1_l; apply Rlt_trans with (un n + un m). apply Rle_lt_trans with (Rabs (RiemannInt_SF (wn n)) + Rabs (RiemannInt_SF (wn m))). apply Rplus_le_compat; apply RRle_abs. apply Rplus_lt_compat; assumption. apply Rle_lt_trans with (Rabs (un n) + Rabs (un m)). apply Rplus_le_compat; apply RRle_abs. replace (pos (un n)) with (un n - 0); [ idtac | ring ]; replace (pos (un m)) with (un m - 0); [ idtac | ring ]; rewrite (double_var eps); apply Rplus_lt_compat; apply H4; assumption. Qed. Lemma RiemannInt_P3 : forall (f:R -> R) (a b:R) (un:nat -> posreal) (vn wn:nat -> StepFun a b), Un_cv un 0 -> (forall n:nat, (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - vn n t) <= wn n t) /\ Rabs (RiemannInt_SF (wn n)) < un n) -> { l:R | Un_cv (fun N:nat => RiemannInt_SF (vn N)) l }. Proof. intros; case (Rle_dec a b); intro. apply RiemannInt_P2 with f un wn; assumption. assert (H1 : b <= a); auto with real. set (vn' := fun n:nat => mkStepFun (StepFun_P6 (pre (vn n)))); set (wn' := fun n:nat => mkStepFun (StepFun_P6 (pre (wn n)))); assert (H2 : forall n:nat, (forall t:R, Rmin b a <= t <= Rmax b a -> Rabs (f t - vn' n t) <= wn' n t) /\ Rabs (RiemannInt_SF (wn' n)) < un n). intro; elim (H0 n0); intros; split. intros; apply (H2 t); elim H4; clear H4; intros; split; [ apply Rle_trans with (Rmin b a); try assumption; right; unfold Rmin | apply Rle_trans with (Rmax b a); try assumption; right; unfold Rmax ]; (case (Rle_dec a b); case (Rle_dec b a); intros; try reflexivity || apply Rle_antisym; [ assumption | assumption | auto with real | auto with real ]). generalize H3; unfold RiemannInt_SF; case (Rle_dec a b); case (Rle_dec b a); unfold wn'; intros; (replace (Int_SF (subdivision_val (mkStepFun (StepFun_P6 (pre (wn n0))))) (subdivision (mkStepFun (StepFun_P6 (pre (wn n0)))))) with (Int_SF (subdivision_val (wn n0)) (subdivision (wn n0))); [ idtac | apply StepFun_P17 with (fe (wn n0)) a b; [ apply StepFun_P1 | apply StepFun_P2; apply (StepFun_P1 (mkStepFun (StepFun_P6 (pre (wn n0))))) ] ]). apply H4. rewrite Rabs_Ropp; apply H4. rewrite Rabs_Ropp in H4; apply H4. apply H4. assert (H3 := RiemannInt_P2 _ _ _ _ H H1 H2); elim H3; intros; exists (- x); unfold Un_cv; unfold Un_cv in p; intros; elim (p _ H4); intros; exists x0; intros; generalize (H5 _ H6); unfold R_dist, RiemannInt_SF; case (Rle_dec b a); case (Rle_dec a b); intros. elim n; assumption. unfold vn' in H7; replace (Int_SF (subdivision_val (vn n0)) (subdivision (vn n0))) with (Int_SF (subdivision_val (mkStepFun (StepFun_P6 (pre (vn n0))))) (subdivision (mkStepFun (StepFun_P6 (pre (vn n0)))))); [ unfold Rminus; rewrite Ropp_involutive; rewrite <- Rabs_Ropp; rewrite Ropp_plus_distr; rewrite Ropp_involutive; apply H7 | symmetry ; apply StepFun_P17 with (fe (vn n0)) a b; [ apply StepFun_P1 | apply StepFun_P2; apply (StepFun_P1 (mkStepFun (StepFun_P6 (pre (vn n0))))) ] ]. elim n1; assumption. elim n2; assumption. Qed. Lemma RiemannInt_exists : forall (f:R -> R) (a b:R) (pr:Riemann_integrable f a b) (un:nat -> posreal), Un_cv un 0 -> { l:R | Un_cv (fun N:nat => RiemannInt_SF (phi_sequence un pr N)) l }. Proof. intros f; intros; apply RiemannInt_P3 with f un (fun n:nat => proj1_sig (phi_sequence_prop un pr n)); [ apply H | intro; apply (proj2_sig (phi_sequence_prop un pr n)) ]. Qed. Lemma RiemannInt_P4 : forall (f:R -> R) (a b l:R) (pr1 pr2:Riemann_integrable f a b) (un vn:nat -> posreal), Un_cv un 0 -> Un_cv vn 0 -> Un_cv (fun N:nat => RiemannInt_SF (phi_sequence un pr1 N)) l -> Un_cv (fun N:nat => RiemannInt_SF (phi_sequence vn pr2 N)) l. Proof. unfold Un_cv; unfold R_dist; intros f; intros; assert (H3 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H _ H3); clear H; intros N0 H; elim (H0 _ H3); clear H0; intros N1 H0; elim (H1 _ H3); clear H1; intros N2 H1; set (N := max (max N0 N1) N2); exists N; intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi_sequence vn pr2 n) - RiemannInt_SF (phi_sequence un pr1 n)) + Rabs (RiemannInt_SF (phi_sequence un pr1 n) - l)). replace (RiemannInt_SF (phi_sequence vn pr2 n) - l) with (RiemannInt_SF (phi_sequence vn pr2 n) - RiemannInt_SF (phi_sequence un pr1 n) + (RiemannInt_SF (phi_sequence un pr1 n) - l)); [ apply Rabs_triang | ring ]. replace eps with (2 * (eps / 3) + eps / 3). apply Rplus_lt_compat. elim (phi_sequence_prop vn pr2 n); intros psi_vn H5; elim (phi_sequence_prop un pr1 n); intros psi_un H6; replace (RiemannInt_SF (phi_sequence vn pr2 n) - RiemannInt_SF (phi_sequence un pr1 n)) with (RiemannInt_SF (phi_sequence vn pr2 n) + -1 * RiemannInt_SF (phi_sequence un pr1 n)); [ idtac | ring ]; rewrite <- StepFun_P30. case (Rle_dec a b); intro. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (phi_sequence vn pr2 n) (phi_sequence un pr1 n)))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 psi_un psi_vn))). apply StepFun_P37; try assumption; intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (phi_sequence vn pr2 n x - f x) + Rabs (f x - phi_sequence un pr1 n x)). replace (phi_sequence vn pr2 n x + -1 * phi_sequence un pr1 n x) with (phi_sequence vn pr2 n x - f x + (f x - phi_sequence un pr1 n x)); [ apply Rabs_triang | ring ]. assert (H10 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H11 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. rewrite (Rplus_comm (psi_un x)); apply Rplus_le_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; elim H5; intros; apply H8. rewrite H10; rewrite H11; elim H7; intros; split; left; assumption. elim H6; intros; apply H8. rewrite H10; rewrite H11; elim H7; intros; split; left; assumption. rewrite StepFun_P30; rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat. apply Rlt_trans with (pos (un n)). elim H6; intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF psi_un)). apply RRle_abs. assumption. replace (pos (un n)) with (Rabs (un n - 0)); [ apply H; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_trans with (max N0 N1); apply le_max_l | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (un n)) ]. apply Rlt_trans with (pos (vn n)). elim H5; intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF psi_vn)). apply RRle_abs; assumption. assumption. replace (pos (vn n)) with (Rabs (vn n - 0)); [ apply H0; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_trans with (max N0 N1); [ apply le_max_r | apply le_max_l ] | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (vn n)) ]. rewrite StepFun_P39; rewrite Rabs_Ropp; apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 (-1) (phi_sequence vn pr2 n) (phi_sequence un pr1 n))))))))). apply StepFun_P34; try auto with real. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 1 psi_vn psi_un)))))). apply StepFun_P37. auto with real. intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (phi_sequence vn pr2 n x - f x) + Rabs (f x - phi_sequence un pr1 n x)). replace (phi_sequence vn pr2 n x + -1 * phi_sequence un pr1 n x) with (phi_sequence vn pr2 n x - f x + (f x - phi_sequence un pr1 n x)); [ apply Rabs_triang | ring ]. assert (H10 : Rmin a b = b). unfold Rmin; case (Rle_dec a b); intro; [ elim n0; assumption | reflexivity ]. assert (H11 : Rmax a b = a). unfold Rmax; case (Rle_dec a b); intro; [ elim n0; assumption | reflexivity ]. apply Rplus_le_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; elim H5; intros; apply H8. rewrite H10; rewrite H11; elim H7; intros; split; left; assumption. elim H6; intros; apply H8. rewrite H10; rewrite H11; elim H7; intros; split; left; assumption. rewrite <- (Ropp_involutive (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 1 psi_vn psi_un))))))) ; rewrite <- StepFun_P39; rewrite StepFun_P30; rewrite Rmult_1_l; rewrite double; rewrite Ropp_plus_distr; apply Rplus_lt_compat. apply Rlt_trans with (pos (vn n)). elim H5; intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF psi_vn)). rewrite <- Rabs_Ropp; apply RRle_abs. assumption. replace (pos (vn n)) with (Rabs (vn n - 0)); [ apply H0; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_trans with (max N0 N1); [ apply le_max_r | apply le_max_l ] | unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (vn n)) ]. apply Rlt_trans with (pos (un n)). elim H6; intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF psi_un)). rewrite <- Rabs_Ropp; apply RRle_abs; assumption. assumption. replace (pos (un n)) with (Rabs (un n - 0)); [ apply H; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_trans with (max N0 N1); apply le_max_l | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (un n)) ]. apply H1; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_max_r. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. Qed. Lemma RinvN_pos : forall n:nat, 0 < / (INR n + 1). Proof. intro; apply Rinv_0_lt_compat; apply Rplus_le_lt_0_compat; [ apply pos_INR | apply Rlt_0_1 ]. Qed. Definition RinvN (N:nat) : posreal := mkposreal _ (RinvN_pos N). Lemma RinvN_cv : Un_cv RinvN 0. Proof. unfold Un_cv; intros; assert (H0 := archimed (/ eps)); elim H0; clear H0; intros; assert (H2 : (0 <= up (/ eps))%Z). apply le_IZR; left; apply Rlt_trans with (/ eps); [ apply Rinv_0_lt_compat; assumption | assumption ]. elim (IZN _ H2); intros; exists x; intros; unfold R_dist; simpl; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; assert (H5 : 0 < INR n + 1). apply Rplus_le_lt_0_compat; [ apply pos_INR | apply Rlt_0_1 ]. rewrite Rabs_right; [ idtac | left; change (0 < / (INR n + 1)); apply Rinv_0_lt_compat; assumption ]; apply Rle_lt_trans with (/ (INR x + 1)). apply Rle_Rinv. apply Rplus_le_lt_0_compat; [ apply pos_INR | apply Rlt_0_1 ]. assumption. do 2 rewrite <- (Rplus_comm 1); apply Rplus_le_compat_l; apply le_INR; apply H4. rewrite <- (Rinv_involutive eps). apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply Rinv_0_lt_compat; assumption. apply Rplus_le_lt_0_compat; [ apply pos_INR | apply Rlt_0_1 ]. apply Rlt_trans with (INR x); [ rewrite INR_IZR_INZ; rewrite <- H3; apply H0 | pattern (INR x) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1 ]. red; intro; rewrite H6 in H; elim (Rlt_irrefl _ H). Qed. (**********) Definition RiemannInt (f:R -> R) (a b:R) (pr:Riemann_integrable f a b) : R := let (a,_) := RiemannInt_exists pr RinvN RinvN_cv in a. Lemma RiemannInt_P5 : forall (f:R -> R) (a b:R) (pr1 pr2:Riemann_integrable f a b), RiemannInt pr1 = RiemannInt pr2. Proof. intros; unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); case (RiemannInt_exists pr2 RinvN RinvN_cv); intros; eapply UL_sequence; [ apply u0 | apply RiemannInt_P4 with pr2 RinvN; apply RinvN_cv || assumption ]. Qed. (***************************************) (** C°([a,b]) is included in L1([a,b]) *) (***************************************) Lemma maxN : forall (a b:R) (del:posreal), a < b -> { n:nat | a + INR n * del < b /\ b <= a + INR (S n) * del }. Proof. intros; set (I := fun n:nat => a + INR n * del < b); assert (H0 : exists n : nat, I n). exists 0%nat; unfold I; rewrite Rmult_0_l; rewrite Rplus_0_r; assumption. cut (Nbound I). intro; assert (H2 := Nzorn H0 H1); elim H2; intros; exists x; elim p; intros; split. apply H3. case (total_order_T (a + INR (S x) * del) b); intro. elim s; intro. assert (H5 := H4 (S x) a0); elim (le_Sn_n _ H5). right; symmetry ; assumption. left; apply r. assert (H1 : 0 <= (b - a) / del). unfold Rdiv; apply Rmult_le_pos; [ apply Rge_le; apply Rge_minus; apply Rle_ge; left; apply H | left; apply Rinv_0_lt_compat; apply (cond_pos del) ]. elim (archimed ((b - a) / del)); intros; assert (H4 : (0 <= up ((b - a) / del))%Z). apply le_IZR; simpl; left; apply Rle_lt_trans with ((b - a) / del); assumption. assert (H5 := IZN _ H4); elim H5; clear H5; intros N H5; unfold Nbound; exists N; intros; unfold I in H6; apply INR_le; rewrite H5 in H2; rewrite <- INR_IZR_INZ in H2; left; apply Rle_lt_trans with ((b - a) / del); try assumption; apply Rmult_le_reg_l with (pos del); [ apply (cond_pos del) | unfold Rdiv; rewrite <- (Rmult_comm (/ del)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; rewrite Rmult_comm; apply Rplus_le_reg_l with a; replace (a + (b - a)) with b; [ left; assumption | ring ] | assert (H7 := cond_pos del); red; intro; rewrite H8 in H7; elim (Rlt_irrefl _ H7) ] ]. Qed. Fixpoint SubEquiN (N:nat) (x y:R) (del:posreal) : Rlist := match N with | O => cons y nil | S p => cons x (SubEquiN p (x + del) y del) end. Definition max_N (a b:R) (del:posreal) (h:a < b) : nat := let (N,_) := maxN del h in N. Definition SubEqui (a b:R) (del:posreal) (h:a < b) : Rlist := SubEquiN (S (max_N del h)) a b del. Lemma Heine_cor1 : forall (f:R -> R) (a b:R), a < b -> (forall x:R, a <= x <= b -> continuity_pt f x) -> forall eps:posreal, { delta:posreal | delta <= b - a /\ (forall x y:R, a <= x <= b -> a <= y <= b -> Rabs (x - y) < delta -> Rabs (f x - f y) < eps) }. Proof. intro f; intros; set (E := fun l:R => 0 < l <= b - a /\ (forall x y:R, a <= x <= b -> a <= y <= b -> Rabs (x - y) < l -> Rabs (f x - f y) < eps)); assert (H1 : bound E). unfold bound; exists (b - a); unfold is_upper_bound; intros; unfold E in H1; elim H1; clear H1; intros H1 _; elim H1; intros; assumption. assert (H2 : exists x : R, E x). assert (H2 := Heine f (fun x:R => a <= x <= b) (compact_P3 a b) H0 eps); elim H2; intros; exists (Rmin x (b - a)); unfold E; split; [ split; [ unfold Rmin; case (Rle_dec x (b - a)); intro; [ apply (cond_pos x) | apply Rlt_Rminus; assumption ] | apply Rmin_r ] | intros; apply H3; try assumption; apply Rlt_le_trans with (Rmin x (b - a)); [ assumption | apply Rmin_l ] ]. assert (H3 := completeness E H1 H2); elim H3; intros; cut (0 < x <= b - a). intro; elim H4; clear H4; intros; exists (mkposreal _ H4); split. apply H5. unfold is_lub in p; elim p; intros; unfold is_upper_bound in H6; set (D := Rabs (x0 - y)); elim (classic (exists y : R, D < y /\ E y)); intro. elim H11; intros; elim H12; clear H12; intros; unfold E in H13; elim H13; intros; apply H15; assumption. assert (H12 := not_ex_all_not _ (fun y:R => D < y /\ E y) H11); assert (H13 : is_upper_bound E D). unfold is_upper_bound; intros; assert (H14 := H12 x1); elim (not_and_or (D < x1) (E x1) H14); intro. case (Rle_dec x1 D); intro. assumption. elim H15; auto with real. elim H15; assumption. assert (H14 := H7 _ H13); elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H14 H10)). unfold is_lub in p; unfold is_upper_bound in p; elim p; clear p; intros; split. elim H2; intros; assert (H7 := H4 _ H6); unfold E in H6; elim H6; clear H6; intros H6 _; elim H6; intros; apply Rlt_le_trans with x0; assumption. apply H5; intros; unfold E in H6; elim H6; clear H6; intros H6 _; elim H6; intros; assumption. Qed. Lemma Heine_cor2 : forall (f:R -> R) (a b:R), (forall x:R, a <= x <= b -> continuity_pt f x) -> forall eps:posreal, { delta:posreal | forall x y:R, a <= x <= b -> a <= y <= b -> Rabs (x - y) < delta -> Rabs (f x - f y) < eps }. Proof. intro f; intros; case (total_order_T a b); intro. elim s; intro. assert (H0 := Heine_cor1 a0 H eps); elim H0; intros; exists x; elim p; intros; apply H2; assumption. exists (mkposreal _ Rlt_0_1); intros; assert (H3 : x = y); [ elim H0; elim H1; intros; rewrite b0 in H3; rewrite b0 in H5; apply Rle_antisym; apply Rle_trans with b; assumption | rewrite H3; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply (cond_pos eps) ]. exists (mkposreal _ Rlt_0_1); intros; elim H0; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ (Rle_trans _ _ _ H3 H4) r)). Qed. Lemma SubEqui_P1 : forall (a b:R) (del:posreal) (h:a < b), pos_Rl (SubEqui del h) 0 = a. Proof. intros; unfold SubEqui; case (maxN del h); intros; reflexivity. Qed. Lemma SubEqui_P2 : forall (a b:R) (del:posreal) (h:a < b), pos_Rl (SubEqui del h) (pred (Rlength (SubEqui del h))) = b. Proof. intros; unfold SubEqui; case (maxN del h); intros; clear a0; cut (forall (x:nat) (a:R) (del:posreal), pos_Rl (SubEquiN (S x) a b del) (pred (Rlength (SubEquiN (S x) a b del))) = b); [ intro; apply H | simple induction x0; [ intros; reflexivity | intros; change (pos_Rl (SubEquiN (S n) (a0 + del0) b del0) (pred (Rlength (SubEquiN (S n) (a0 + del0) b del0))) = b) ; apply H ] ]. Qed. Lemma SubEqui_P3 : forall (N:nat) (a b:R) (del:posreal), Rlength (SubEquiN N a b del) = S N. Proof. simple induction N; intros; [ reflexivity | simpl; rewrite H; reflexivity ]. Qed. Lemma SubEqui_P4 : forall (N:nat) (a b:R) (del:posreal) (i:nat), (i < S N)%nat -> pos_Rl (SubEquiN (S N) a b del) i = a + INR i * del. Proof. simple induction N; [ intros; inversion H; [ simpl; ring | elim (le_Sn_O _ H1) ] | intros; induction i as [| i Hreci]; [ simpl; ring | change (pos_Rl (SubEquiN (S n) (a + del) b del) i = a + INR (S i) * del) ; rewrite H; [ rewrite S_INR; ring | apply lt_S_n; apply H0 ] ] ]. Qed. Lemma SubEqui_P5 : forall (a b:R) (del:posreal) (h:a < b), Rlength (SubEqui del h) = S (S (max_N del h)). Proof. intros; unfold SubEqui; apply SubEqui_P3. Qed. Lemma SubEqui_P6 : forall (a b:R) (del:posreal) (h:a < b) (i:nat), (i < S (max_N del h))%nat -> pos_Rl (SubEqui del h) i = a + INR i * del. Proof. intros; unfold SubEqui; apply SubEqui_P4; assumption. Qed. Lemma SubEqui_P7 : forall (a b:R) (del:posreal) (h:a < b), ordered_Rlist (SubEqui del h). Proof. intros; unfold ordered_Rlist; intros; rewrite SubEqui_P5 in H; simpl in H; inversion H. rewrite (SubEqui_P6 del h (i:=(max_N del h))). replace (S (max_N del h)) with (pred (Rlength (SubEqui del h))). rewrite SubEqui_P2; unfold max_N; case (maxN del h); intros; left; elim a0; intros; assumption. rewrite SubEqui_P5; reflexivity. apply lt_n_Sn. repeat rewrite SubEqui_P6. 3: assumption. 2: apply le_lt_n_Sm; assumption. apply Rplus_le_compat_l; rewrite S_INR; rewrite Rmult_plus_distr_r; pattern (INR i * del) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; rewrite Rmult_1_l; left; apply (cond_pos del). Qed. Lemma SubEqui_P8 : forall (a b:R) (del:posreal) (h:a < b) (i:nat), (i < Rlength (SubEqui del h))%nat -> a <= pos_Rl (SubEqui del h) i <= b. Proof. intros; split. pattern a at 1; rewrite <- (SubEqui_P1 del h); apply RList_P5. apply SubEqui_P7. elim (RList_P3 (SubEqui del h) (pos_Rl (SubEqui del h) i)); intros; apply H1; exists i; split; [ reflexivity | assumption ]. pattern b at 2; rewrite <- (SubEqui_P2 del h); apply RList_P7; [ apply SubEqui_P7 | elim (RList_P3 (SubEqui del h) (pos_Rl (SubEqui del h) i)); intros; apply H1; exists i; split; [ reflexivity | assumption ] ]. Qed. Lemma SubEqui_P9 : forall (a b:R) (del:posreal) (f:R -> R) (h:a < b), { g:StepFun a b | g b = f b /\ (forall i:nat, (i < pred (Rlength (SubEqui del h)))%nat -> constant_D_eq g (co_interval (pos_Rl (SubEqui del h) i) (pos_Rl (SubEqui del h) (S i))) (f (pos_Rl (SubEqui del h) i))) }. Proof. intros; apply StepFun_P38; [ apply SubEqui_P7 | apply SubEqui_P1 | apply SubEqui_P2 ]. Qed. Lemma RiemannInt_P6 : forall (f:R -> R) (a b:R), a < b -> (forall x:R, a <= x <= b -> continuity_pt f x) -> Riemann_integrable f a b. Proof. intros; unfold Riemann_integrable; intro; assert (H1 : 0 < eps / (2 * (b - a))). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; apply Rmult_lt_0_compat; [ prove_sup0 | apply Rlt_Rminus; assumption ] ]. assert (H2 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; left; assumption ]. assert (H3 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; left; assumption ]. elim (Heine_cor2 H0 (mkposreal _ H1)); intros del H4; elim (SubEqui_P9 del f H); intros phi [H5 H6]; split with phi; split with (mkStepFun (StepFun_P4 a b (eps / (2 * (b - a))))); split. 2: rewrite StepFun_P18; unfold Rdiv; rewrite Rinv_mult_distr. 2: do 2 rewrite Rmult_assoc; rewrite <- Rinv_l_sym. 2: rewrite Rmult_1_r; rewrite Rabs_right. 2: apply Rmult_lt_reg_l with 2. 2: prove_sup0. 2: rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. 2: rewrite Rmult_1_l; pattern (pos eps) at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; apply (cond_pos eps). 2: discrR. 2: apply Rle_ge; left; apply Rmult_lt_0_compat. 2: apply (cond_pos eps). 2: apply Rinv_0_lt_compat; prove_sup0. 2: apply Rminus_eq_contra; red; intro; clear H6; rewrite H7 in H; elim (Rlt_irrefl _ H). 2: discrR. 2: apply Rminus_eq_contra; red; intro; clear H6; rewrite H7 in H; elim (Rlt_irrefl _ H). intros; rewrite H2 in H7; rewrite H3 in H7; simpl; unfold fct_cte; cut (forall t:R, a <= t <= b -> t = b \/ (exists i : nat, (i < pred (Rlength (SubEqui del H)))%nat /\ co_interval (pos_Rl (SubEqui del H) i) (pos_Rl (SubEqui del H) (S i)) t)). intro; elim (H8 _ H7); intro. rewrite H9; rewrite H5; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; left; assumption. elim H9; clear H9; intros I [H9 H10]; assert (H11 := H6 I H9 t H10); rewrite H11; left; apply H4. assumption. apply SubEqui_P8; apply lt_trans with (pred (Rlength (SubEqui del H))). assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H12 in H9; elim (lt_n_O _ H9). unfold co_interval in H10; elim H10; clear H10; intros; rewrite Rabs_right. rewrite SubEqui_P5 in H9; simpl in H9; inversion H9. apply Rplus_lt_reg_r with (pos_Rl (SubEqui del H) (max_N del H)). replace (pos_Rl (SubEqui del H) (max_N del H) + (t - pos_Rl (SubEqui del H) (max_N del H))) with t; [ idtac | ring ]; apply Rlt_le_trans with b. rewrite H14 in H12; assert (H13 : S (max_N del H) = pred (Rlength (SubEqui del H))). rewrite SubEqui_P5; reflexivity. rewrite H13 in H12; rewrite SubEqui_P2 in H12; apply H12. rewrite SubEqui_P6. 2: apply lt_n_Sn. unfold max_N; case (maxN del H); intros; elim a0; clear a0; intros _ H13; replace (a + INR x * del + del) with (a + INR (S x) * del); [ assumption | rewrite S_INR; ring ]. apply Rplus_lt_reg_r with (pos_Rl (SubEqui del H) I); replace (pos_Rl (SubEqui del H) I + (t - pos_Rl (SubEqui del H) I)) with t; [ idtac | ring ]; replace (pos_Rl (SubEqui del H) I + del) with (pos_Rl (SubEqui del H) (S I)). assumption. repeat rewrite SubEqui_P6. rewrite S_INR; ring. assumption. apply le_lt_n_Sm; assumption. apply Rge_minus; apply Rle_ge; assumption. intros; clear H0 H1 H4 phi H5 H6 t H7; case (Req_dec t0 b); intro. left; assumption. right; set (I := fun j:nat => a + INR j * del <= t0); assert (H1 : exists n : nat, I n). exists 0%nat; unfold I; rewrite Rmult_0_l; rewrite Rplus_0_r; elim H8; intros; assumption. assert (H4 : Nbound I). unfold Nbound; exists (S (max_N del H)); intros; unfold max_N; case (maxN del H); intros; elim a0; clear a0; intros _ H5; apply INR_le; apply Rmult_le_reg_l with (pos del). apply (cond_pos del). apply Rplus_le_reg_l with a; do 2 rewrite (Rmult_comm del); apply Rle_trans with t0; unfold I in H4; try assumption; apply Rle_trans with b; try assumption; elim H8; intros; assumption. elim (Nzorn H1 H4); intros N [H5 H6]; assert (H7 : (N < S (max_N del H))%nat). unfold max_N; case (maxN del H); intros; apply INR_lt; apply Rmult_lt_reg_l with (pos del). apply (cond_pos del). apply Rplus_lt_reg_r with a; do 2 rewrite (Rmult_comm del); apply Rle_lt_trans with t0; unfold I in H5; try assumption; elim a0; intros; apply Rlt_le_trans with b; try assumption; elim H8; intros. elim H11; intro. assumption. elim H0; assumption. exists N; split. rewrite SubEqui_P5; simpl; assumption. unfold co_interval; split. rewrite SubEqui_P6. apply H5. assumption. inversion H7. replace (S (max_N del H)) with (pred (Rlength (SubEqui del H))). rewrite (SubEqui_P2 del H); elim H8; intros. elim H11; intro. assumption. elim H0; assumption. rewrite SubEqui_P5; reflexivity. rewrite SubEqui_P6. case (Rle_dec (a + INR (S N) * del) t0); intro. assert (H11 := H6 (S N) r); elim (le_Sn_n _ H11). auto with real. apply le_lt_n_Sm; assumption. Qed. Lemma RiemannInt_P7 : forall (f:R -> R) (a:R), Riemann_integrable f a a. Proof. unfold Riemann_integrable; intro f; intros; split with (mkStepFun (StepFun_P4 a a (f a))); split with (mkStepFun (StepFun_P4 a a 0)); split. intros; simpl; unfold fct_cte; replace t with a. unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; right; reflexivity. generalize H; unfold Rmin, Rmax; case (Rle_dec a a); intros; elim H0; intros; apply Rle_antisym; assumption. rewrite StepFun_P18; rewrite Rmult_0_l; rewrite Rabs_R0; apply (cond_pos eps). Qed. Lemma continuity_implies_RiemannInt : forall (f:R -> R) (a b:R), a <= b -> (forall x:R, a <= x <= b -> continuity_pt f x) -> Riemann_integrable f a b. Proof. intros; case (total_order_T a b); intro; [ elim s; intro; [ apply RiemannInt_P6; assumption | rewrite b0; apply RiemannInt_P7 ] | elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)) ]. Qed. Lemma RiemannInt_P8 : forall (f:R -> R) (a b:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable f b a), RiemannInt pr1 = - RiemannInt pr2. Proof. intro f; intros; eapply UL_sequence. unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); intros; apply u. unfold RiemannInt; case (RiemannInt_exists pr2 RinvN RinvN_cv); intros; cut (exists psi1 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi_sequence RinvN pr1 n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < RinvN n)). cut (exists psi2 : nat -> StepFun b a, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi_sequence RinvN pr2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). intros; elim H; clear H; intros psi2 H; elim H0; clear H0; intros psi1 H0; assert (H1 := RinvN_cv); unfold Un_cv; intros; assert (H3 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. unfold Un_cv in H1; elim (H1 _ H3); clear H1; intros N0 H1; unfold R_dist in H1; simpl in H1; assert (H4 : forall n:nat, (n >= N0)%nat -> RinvN n < eps / 3). intros; assert (H5 := H1 _ H4); replace (pos (RinvN n)) with (Rabs (/ (INR n + 1) - 0)); [ assumption | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; left; apply (cond_pos (RinvN n)) ]. clear H1; unfold Un_cv in u; elim (u _ H3); clear u; intros N1 H1; exists (max N0 N1); intros; unfold R_dist; apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n)) + Rabs (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)). rewrite <- (Rabs_Ropp (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)); replace (RiemannInt_SF (phi_sequence RinvN pr1 n) - - x) with (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n) + - (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)); [ apply Rabs_triang | ring ]. replace eps with (2 * (eps / 3) + eps / 3). apply Rplus_lt_compat. rewrite (StepFun_P39 (phi_sequence RinvN pr2 n)); replace (RiemannInt_SF (phi_sequence RinvN pr1 n) + - RiemannInt_SF (mkStepFun (StepFun_P6 (pre (phi_sequence RinvN pr2 n))))) with (RiemannInt_SF (phi_sequence RinvN pr1 n) + -1 * RiemannInt_SF (mkStepFun (StepFun_P6 (pre (phi_sequence RinvN pr2 n))))); [ idtac | ring ]; rewrite <- StepFun_P30. case (Rle_dec a b); intro. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (phi_sequence RinvN pr1 n) (mkStepFun (StepFun_P6 (pre (phi_sequence RinvN pr2 n))))))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (psi1 n) (mkStepFun (StepFun_P6 (pre (psi2 n))))))). apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (phi_sequence RinvN pr1 n x0 - f x0) + Rabs (f x0 - phi_sequence RinvN pr2 n x0)). replace (phi_sequence RinvN pr1 n x0 + -1 * phi_sequence RinvN pr2 n x0) with (phi_sequence RinvN pr1 n x0 - f x0 + (f x0 - phi_sequence RinvN pr2 n x0)); [ apply Rabs_triang | ring ]. assert (H7 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H8 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. apply Rplus_le_compat. elim (H0 n); intros; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H9; rewrite H7; rewrite H8. elim H6; intros; split; left; assumption. elim (H n); intros; apply H9; rewrite H7; rewrite H8. elim H6; intros; split; left; assumption. rewrite StepFun_P30; rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat. elim (H0 n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))); [ apply RRle_abs | apply Rlt_trans with (pos (RinvN n)); [ assumption | apply H4; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_l | assumption ] ] ]. elim (H n); intros; rewrite <- (Ropp_involutive (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (psi2 n)))))) ; rewrite <- StepFun_P39; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))); [ rewrite <- Rabs_Ropp; apply RRle_abs | apply Rlt_trans with (pos (RinvN n)); [ assumption | apply H4; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_l | assumption ] ] ]. assert (Hyp : b <= a). auto with real. rewrite StepFun_P39; rewrite Rabs_Ropp; apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P6 (StepFun_P28 (-1) (phi_sequence RinvN pr1 n) (mkStepFun (StepFun_P6 (pre (phi_sequence RinvN pr2 n)))))))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (mkStepFun (StepFun_P6 (pre (psi1 n)))) (psi2 n)))). apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (phi_sequence RinvN pr1 n x0 - f x0) + Rabs (f x0 - phi_sequence RinvN pr2 n x0)). replace (phi_sequence RinvN pr1 n x0 + -1 * phi_sequence RinvN pr2 n x0) with (phi_sequence RinvN pr1 n x0 - f x0 + (f x0 - phi_sequence RinvN pr2 n x0)); [ apply Rabs_triang | ring ]. assert (H7 : Rmin a b = b). unfold Rmin; case (Rle_dec a b); intro; [ elim n0; assumption | reflexivity ]. assert (H8 : Rmax a b = a). unfold Rmax; case (Rle_dec a b); intro; [ elim n0; assumption | reflexivity ]. apply Rplus_le_compat. elim (H0 n); intros; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H9; rewrite H7; rewrite H8. elim H6; intros; split; left; assumption. elim (H n); intros; apply H9; rewrite H7; rewrite H8; elim H6; intros; split; left; assumption. rewrite StepFun_P30; rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat. elim (H0 n); intros; rewrite <- (Ropp_involutive (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (psi1 n)))))) ; rewrite <- StepFun_P39; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))); [ rewrite <- Rabs_Ropp; apply RRle_abs | apply Rlt_trans with (pos (RinvN n)); [ assumption | apply H4; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_l | assumption ] ] ]. elim (H n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))); [ apply RRle_abs | apply Rlt_trans with (pos (RinvN n)); [ assumption | apply H4; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_l | assumption ] ] ]. unfold R_dist in H1; apply H1; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_r | assumption ]. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr2 n)); intro; rewrite Rmin_comm; rewrite RmaxSym; apply (proj2_sig (phi_sequence_prop RinvN pr2 n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n)). Qed. Lemma RiemannInt_P9 : forall (f:R -> R) (a:R) (pr:Riemann_integrable f a a), RiemannInt pr = 0. Proof. intros; assert (H := RiemannInt_P8 pr pr); apply Rmult_eq_reg_l with 2; [ rewrite Rmult_0_r; rewrite double; pattern (RiemannInt pr) at 2; rewrite H; apply Rplus_opp_r | discrR ]. Qed. Lemma Req_EM_T : forall r1 r2:R, {r1 = r2} + {r1 <> r2}. Proof. intros; elim (total_order_T r1 r2); intros; [ elim a; intro; [ right; red; intro; rewrite H in a0; elim (Rlt_irrefl r2 a0) | left; assumption ] | right; red; intro; rewrite H in b; elim (Rlt_irrefl r2 b) ]. Qed. (* L1([a,b]) is a vectorial space *) Lemma RiemannInt_P10 : forall (f g:R -> R) (a b l:R), Riemann_integrable f a b -> Riemann_integrable g a b -> Riemann_integrable (fun x:R => f x + l * g x) a b. Proof. unfold Riemann_integrable; intros f g; intros; case (Req_EM_T l 0); intro. elim (X eps); intros; split with x; elim p; intros; split with x0; elim p0; intros; split; try assumption; rewrite e; intros; rewrite Rmult_0_l; rewrite Rplus_0_r; apply H; assumption. assert (H : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; prove_sup0 ]. assert (H0 : 0 < eps / (2 * Rabs l)). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; apply Rmult_lt_0_compat; [ prove_sup0 | apply Rabs_pos_lt; assumption ] ]. elim (X (mkposreal _ H)); intros; elim (X0 (mkposreal _ H0)); intros; split with (mkStepFun (StepFun_P28 l x x0)); elim p0; elim p; intros; split with (mkStepFun (StepFun_P28 (Rabs l) x1 x2)); elim p1; elim p2; clear p1 p2 p0 p X X0; intros; split. intros; simpl; apply Rle_trans with (Rabs (f t - x t) + Rabs (l * (g t - x0 t))). replace (f t + l * g t - (x t + l * x0 t)) with (f t - x t + l * (g t - x0 t)); [ apply Rabs_triang | ring ]. apply Rplus_le_compat; [ apply H3; assumption | rewrite Rabs_mult; apply Rmult_le_compat_l; [ apply Rabs_pos | apply H1; assumption ] ]. rewrite StepFun_P30; apply Rle_lt_trans with (Rabs (RiemannInt_SF x1) + Rabs (Rabs l * RiemannInt_SF x2)). apply Rabs_triang. rewrite (double_var eps); apply Rplus_lt_compat. apply H4. rewrite Rabs_mult; rewrite Rabs_Rabsolu; apply Rmult_lt_reg_l with (/ Rabs l). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym; [ rewrite Rmult_1_l; replace (/ Rabs l * (eps / 2)) with (eps / (2 * Rabs l)); [ apply H2 | unfold Rdiv; rewrite Rinv_mult_distr; [ ring | discrR | apply Rabs_no_R0; assumption ] ] | apply Rabs_no_R0; assumption ]. Qed. Lemma RiemannInt_P11 : forall (f:R -> R) (a b l:R) (un:nat -> posreal) (phi1 phi2 psi1 psi2:nat -> StepFun a b), Un_cv un 0 -> (forall n:nat, (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - phi1 n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < un n) -> (forall n:nat, (forall t:R, Rmin a b <= t <= Rmax a b -> Rabs (f t - phi2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < un n) -> Un_cv (fun N:nat => RiemannInt_SF (phi1 N)) l -> Un_cv (fun N:nat => RiemannInt_SF (phi2 N)) l. Proof. unfold Un_cv; intro f; intros; intros. case (Rle_dec a b); intro Hyp. assert (H4 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H _ H4); clear H; intros N0 H. elim (H2 _ H4); clear H2; intros N1 H2. set (N := max N0 N1); exists N; intros; unfold R_dist. apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n)) + Rabs (RiemannInt_SF (phi1 n) - l)). replace (RiemannInt_SF (phi2 n) - l) with (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n) + (RiemannInt_SF (phi1 n) - l)); [ apply Rabs_triang | ring ]. replace eps with (2 * (eps / 3) + eps / 3). apply Rplus_lt_compat. replace (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n)) with (RiemannInt_SF (phi2 n) + -1 * RiemannInt_SF (phi1 n)); [ idtac | ring ]. rewrite <- StepFun_P30. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (phi2 n) (phi1 n)))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (psi1 n) (psi2 n)))). apply StepFun_P37; try assumption; intros; simpl; rewrite Rmult_1_l. apply Rle_trans with (Rabs (phi2 n x - f x) + Rabs (f x - phi1 n x)). replace (phi2 n x + -1 * phi1 n x) with (phi2 n x - f x + (f x - phi1 n x)); [ apply Rabs_triang | ring ]. rewrite (Rplus_comm (psi1 n x)); apply Rplus_le_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; elim (H1 n); intros; apply H7. assert (H10 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H11 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. rewrite H10; rewrite H11; elim H6; intros; split; left; assumption. elim (H0 n); intros; apply H7; assert (H10 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H11 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. rewrite H10; rewrite H11; elim H6; intros; split; left; assumption. rewrite StepFun_P30; rewrite Rmult_1_l; rewrite double; apply Rplus_lt_compat. apply Rlt_trans with (pos (un n)). elim (H0 n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))). apply RRle_abs. assumption. replace (pos (un n)) with (R_dist (un n) 0). apply H; unfold ge; apply le_trans with N; try assumption. unfold N; apply le_max_l. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right. apply Rle_ge; left; apply (cond_pos (un n)). apply Rlt_trans with (pos (un n)). elim (H1 n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))). apply RRle_abs; assumption. assumption. replace (pos (un n)) with (R_dist (un n) 0). apply H; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_max_l. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (un n)). unfold R_dist in H2; apply H2; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_max_r. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. assert (H4 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H _ H4); clear H; intros N0 H. elim (H2 _ H4); clear H2; intros N1 H2. set (N := max N0 N1); exists N; intros; unfold R_dist. apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n)) + Rabs (RiemannInt_SF (phi1 n) - l)). replace (RiemannInt_SF (phi2 n) - l) with (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n) + (RiemannInt_SF (phi1 n) - l)); [ apply Rabs_triang | ring ]. assert (Hyp_b : b <= a). auto with real. replace eps with (2 * (eps / 3) + eps / 3). apply Rplus_lt_compat. replace (RiemannInt_SF (phi2 n) - RiemannInt_SF (phi1 n)) with (RiemannInt_SF (phi2 n) + -1 * RiemannInt_SF (phi1 n)); [ idtac | ring ]. rewrite <- StepFun_P30. rewrite StepFun_P39. rewrite Rabs_Ropp. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 (-1) (phi2 n) (phi1 n))))))))). apply StepFun_P34; try assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 1 (psi1 n) (psi2 n))))))). apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l. apply Rle_trans with (Rabs (phi2 n x - f x) + Rabs (f x - phi1 n x)). replace (phi2 n x + -1 * phi1 n x) with (phi2 n x - f x + (f x - phi1 n x)); [ apply Rabs_triang | ring ]. rewrite (Rplus_comm (psi1 n x)); apply Rplus_le_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; elim (H1 n); intros; apply H7. assert (H10 : Rmin a b = b). unfold Rmin; case (Rle_dec a b); intro; [ elim Hyp; assumption | reflexivity ]. assert (H11 : Rmax a b = a). unfold Rmax; case (Rle_dec a b); intro; [ elim Hyp; assumption | reflexivity ]. rewrite H10; rewrite H11; elim H6; intros; split; left; assumption. elim (H0 n); intros; apply H7; assert (H10 : Rmin a b = b). unfold Rmin; case (Rle_dec a b); intro; [ elim Hyp; assumption | reflexivity ]. assert (H11 : Rmax a b = a). unfold Rmax; case (Rle_dec a b); intro; [ elim Hyp; assumption | reflexivity ]. rewrite H10; rewrite H11; elim H6; intros; split; left; assumption. rewrite <- (Ropp_involutive (RiemannInt_SF (mkStepFun (StepFun_P6 (pre (mkStepFun (StepFun_P28 1 (psi1 n) (psi2 n)))))))) . rewrite <- StepFun_P39. rewrite StepFun_P30. rewrite Rmult_1_l; rewrite double. rewrite Ropp_plus_distr; apply Rplus_lt_compat. apply Rlt_trans with (pos (un n)). elim (H0 n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))). rewrite <- Rabs_Ropp; apply RRle_abs. assumption. replace (pos (un n)) with (R_dist (un n) 0). apply H; unfold ge; apply le_trans with N; try assumption. unfold N; apply le_max_l. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right. apply Rle_ge; left; apply (cond_pos (un n)). apply Rlt_trans with (pos (un n)). elim (H1 n); intros; apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))). rewrite <- Rabs_Ropp; apply RRle_abs; assumption. assumption. replace (pos (un n)) with (R_dist (un n) 0). apply H; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_max_l. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (un n)). unfold R_dist in H2; apply H2; unfold ge; apply le_trans with N; try assumption; unfold N; apply le_max_r. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. Qed. Lemma RiemannInt_P12 : forall (f g:R -> R) (a b l:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable g a b) (pr3:Riemann_integrable (fun x:R => f x + l * g x) a b), a <= b -> RiemannInt pr3 = RiemannInt pr1 + l * RiemannInt pr2. Proof. intro f; intros; case (Req_dec l 0); intro. pattern l at 2; rewrite H0; rewrite Rmult_0_l; rewrite Rplus_0_r; unfold RiemannInt; case (RiemannInt_exists pr3 RinvN RinvN_cv); case (RiemannInt_exists pr1 RinvN RinvN_cv); intros; eapply UL_sequence; [ apply u0 | set (psi1 := fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); set (psi2 := fun n:nat => proj1_sig (phi_sequence_prop RinvN pr3 n)); apply RiemannInt_P11 with f RinvN (phi_sequence RinvN pr1) psi1 psi2; [ apply RinvN_cv | intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n)) | intro; assert (H1 : (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t + l * g t - phi_sequence RinvN pr3 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n); [ apply (proj2_sig (phi_sequence_prop RinvN pr3 n)) | elim H1; intros; split; try assumption; intros; replace (f t) with (f t + l * g t); [ apply H2; assumption | rewrite H0; ring ] ] | assumption ] ]. eapply UL_sequence. unfold RiemannInt; case (RiemannInt_exists pr3 RinvN RinvN_cv); intros; apply u. unfold Un_cv; intros; unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); case (RiemannInt_exists pr2 RinvN RinvN_cv); unfold Un_cv; intros; assert (H2 : 0 < eps / 5). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (u0 _ H2); clear u0; intros N0 H3; assert (H4 := RinvN_cv); unfold Un_cv in H4; elim (H4 _ H2); clear H4 H2; intros N1 H4; assert (H5 : 0 < eps / (5 * Rabs l)). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply Rmult_lt_0_compat; [ prove_sup0 | apply Rabs_pos_lt; assumption ] ]. elim (u _ H5); clear u; intros N2 H6; assert (H7 := RinvN_cv); unfold Un_cv in H7; elim (H7 _ H5); clear H7 H5; intros N3 H5; unfold R_dist in H3, H4, H5, H6; set (N := max (max N0 N1) (max N2 N3)). assert (H7 : forall n:nat, (n >= N1)%nat -> RinvN n < eps / 5). intros; replace (pos (RinvN n)) with (Rabs (RinvN n - 0)); [ unfold RinvN; apply H4; assumption | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; left; apply (cond_pos (RinvN n)) ]. clear H4; assert (H4 := H7); clear H7; assert (H7 : forall n:nat, (n >= N3)%nat -> RinvN n < eps / (5 * Rabs l)). intros; replace (pos (RinvN n)) with (Rabs (RinvN n - 0)); [ unfold RinvN; apply H5; assumption | unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; left; apply (cond_pos (RinvN n)) ]. clear H5; assert (H5 := H7); clear H7; exists N; intros; unfold R_dist. apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + l * RiemannInt_SF (phi_sequence RinvN pr2 n))) + Rabs (RiemannInt_SF (phi_sequence RinvN pr1 n) - x0) + Rabs l * Rabs (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)). apply Rle_trans with (Rabs (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + l * RiemannInt_SF (phi_sequence RinvN pr2 n))) + Rabs (RiemannInt_SF (phi_sequence RinvN pr1 n) - x0 + l * (RiemannInt_SF (phi_sequence RinvN pr2 n) - x))). replace (RiemannInt_SF (phi_sequence RinvN pr3 n) - (x0 + l * x)) with (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + l * RiemannInt_SF (phi_sequence RinvN pr2 n)) + (RiemannInt_SF (phi_sequence RinvN pr1 n) - x0 + l * (RiemannInt_SF (phi_sequence RinvN pr2 n) - x))); [ apply Rabs_triang | ring ]. rewrite Rplus_assoc; apply Rplus_le_compat_l; rewrite <- Rabs_mult; replace (RiemannInt_SF (phi_sequence RinvN pr1 n) - x0 + l * (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)) with (RiemannInt_SF (phi_sequence RinvN pr1 n) - x0 + l * (RiemannInt_SF (phi_sequence RinvN pr2 n) - x)); [ apply Rabs_triang | ring ]. replace eps with (3 * (eps / 5) + eps / 5 + eps / 5). repeat apply Rplus_lt_compat. assert (H7 : exists psi1 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi_sequence RinvN pr1 n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n0)). assert (H8 : exists psi2 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (g t - phi_sequence RinvN pr2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr2 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr2 n0)). assert (H9 : exists psi3 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t + l * g t - phi_sequence RinvN pr3 n t) <= psi3 n t) /\ Rabs (RiemannInt_SF (psi3 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr3 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr3 n0)). elim H7; clear H7; intros psi1 H7; elim H8; clear H8; intros psi2 H8; elim H9; clear H9; intros psi3 H9; replace (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + l * RiemannInt_SF (phi_sequence RinvN pr2 n))) with (RiemannInt_SF (phi_sequence RinvN pr3 n) + -1 * (RiemannInt_SF (phi_sequence RinvN pr1 n) + l * RiemannInt_SF (phi_sequence RinvN pr2 n))); [ idtac | ring ]; do 2 rewrite <- StepFun_P30; assert (H10 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. assert (H11 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. rewrite H10 in H7; rewrite H10 in H8; rewrite H10 in H9; rewrite H11 in H7; rewrite H11 in H8; rewrite H11 in H9; apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (phi_sequence RinvN pr3 n) (mkStepFun (StepFun_P28 l (phi_sequence RinvN pr1 n) (phi_sequence RinvN pr2 n)))))))). apply StepFun_P34; assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (psi3 n) (mkStepFun (StepFun_P28 (Rabs l) (psi1 n) (psi2 n)))))). apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l. apply Rle_trans with (Rabs (phi_sequence RinvN pr3 n x1 - (f x1 + l * g x1)) + Rabs (f x1 + l * g x1 + -1 * (phi_sequence RinvN pr1 n x1 + l * phi_sequence RinvN pr2 n x1))). replace (phi_sequence RinvN pr3 n x1 + -1 * (phi_sequence RinvN pr1 n x1 + l * phi_sequence RinvN pr2 n x1)) with (phi_sequence RinvN pr3 n x1 - (f x1 + l * g x1) + (f x1 + l * g x1 + -1 * (phi_sequence RinvN pr1 n x1 + l * phi_sequence RinvN pr2 n x1))); [ apply Rabs_triang | ring ]. rewrite Rplus_assoc; apply Rplus_le_compat. elim (H9 n); intros; rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H13. elim H12; intros; split; left; assumption. apply Rle_trans with (Rabs (f x1 - phi_sequence RinvN pr1 n x1) + Rabs l * Rabs (g x1 - phi_sequence RinvN pr2 n x1)). rewrite <- Rabs_mult; replace (f x1 + (l * g x1 + -1 * (phi_sequence RinvN pr1 n x1 + l * phi_sequence RinvN pr2 n x1))) with (f x1 - phi_sequence RinvN pr1 n x1 + l * (g x1 - phi_sequence RinvN pr2 n x1)); [ apply Rabs_triang | ring ]. apply Rplus_le_compat. elim (H7 n); intros; apply H13. elim H12; intros; split; left; assumption. apply Rmult_le_compat_l; [ apply Rabs_pos | elim (H8 n); intros; apply H13; elim H12; intros; split; left; assumption ]. do 2 rewrite StepFun_P30; rewrite Rmult_1_l; replace (3 * (eps / 5)) with (eps / 5 + (eps / 5 + eps / 5)); [ repeat apply Rplus_lt_compat | ring ]. apply Rlt_trans with (pos (RinvN n)); [ apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi3 n))); [ apply RRle_abs | elim (H9 n); intros; assumption ] | apply H4; unfold ge; apply le_trans with N; [ apply le_trans with (max N0 N1); [ apply le_max_r | unfold N; apply le_max_l ] | assumption ] ]. apply Rlt_trans with (pos (RinvN n)); [ apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))); [ apply RRle_abs | elim (H7 n); intros; assumption ] | apply H4; unfold ge; apply le_trans with N; [ apply le_trans with (max N0 N1); [ apply le_max_r | unfold N; apply le_max_l ] | assumption ] ]. apply Rmult_lt_reg_l with (/ Rabs l). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; replace (/ Rabs l * (eps / 5)) with (eps / (5 * Rabs l)). apply Rlt_trans with (pos (RinvN n)); [ apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))); [ apply RRle_abs | elim (H8 n); intros; assumption ] | apply H5; unfold ge; apply le_trans with N; [ apply le_trans with (max N2 N3); [ apply le_max_r | unfold N; apply le_max_r ] | assumption ] ]. unfold Rdiv; rewrite Rinv_mult_distr; [ ring | discrR | apply Rabs_no_R0; assumption ]. apply Rabs_no_R0; assumption. apply H3; unfold ge; apply le_trans with (max N0 N1); [ apply le_max_l | apply le_trans with N; [ unfold N; apply le_max_l | assumption ] ]. apply Rmult_lt_reg_l with (/ Rabs l). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; replace (/ Rabs l * (eps / 5)) with (eps / (5 * Rabs l)). apply H6; unfold ge; apply le_trans with (max N2 N3); [ apply le_max_l | apply le_trans with N; [ unfold N; apply le_max_r | assumption ] ]. unfold Rdiv; rewrite Rinv_mult_distr; [ ring | discrR | apply Rabs_no_R0; assumption ]. apply Rabs_no_R0; assumption. apply Rmult_eq_reg_l with 5; [ unfold Rdiv; do 2 rewrite Rmult_plus_distr_l; do 3 rewrite (Rmult_comm 5); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. Qed. Lemma RiemannInt_P13 : forall (f g:R -> R) (a b l:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable g a b) (pr3:Riemann_integrable (fun x:R => f x + l * g x) a b), RiemannInt pr3 = RiemannInt pr1 + l * RiemannInt pr2. Proof. intros; case (Rle_dec a b); intro; [ apply RiemannInt_P12; assumption | assert (H : b <= a); [ auto with real | replace (RiemannInt pr3) with (- RiemannInt (RiemannInt_P1 pr3)); [ idtac | symmetry ; apply RiemannInt_P8 ]; replace (RiemannInt pr2) with (- RiemannInt (RiemannInt_P1 pr2)); [ idtac | symmetry ; apply RiemannInt_P8 ]; replace (RiemannInt pr1) with (- RiemannInt (RiemannInt_P1 pr1)); [ idtac | symmetry ; apply RiemannInt_P8 ]; rewrite (RiemannInt_P12 (RiemannInt_P1 pr1) (RiemannInt_P1 pr2) (RiemannInt_P1 pr3) H); ring ] ]. Qed. Lemma RiemannInt_P14 : forall a b c:R, Riemann_integrable (fct_cte c) a b. Proof. unfold Riemann_integrable; intros; split with (mkStepFun (StepFun_P4 a b c)); split with (mkStepFun (StepFun_P4 a b 0)); split; [ intros; simpl; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; unfold fct_cte; right; reflexivity | rewrite StepFun_P18; rewrite Rmult_0_l; rewrite Rabs_R0; apply (cond_pos eps) ]. Qed. Lemma RiemannInt_P15 : forall (a b c:R) (pr:Riemann_integrable (fct_cte c) a b), RiemannInt pr = c * (b - a). Proof. intros; unfold RiemannInt; case (RiemannInt_exists pr RinvN RinvN_cv); intros; eapply UL_sequence. apply u. set (phi1 := fun N:nat => phi_sequence RinvN pr N); change (Un_cv (fun N:nat => RiemannInt_SF (phi1 N)) (c * (b - a))); set (f := fct_cte c); assert (H1 : exists psi1 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi_sequence RinvN pr n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr n)). elim H1; clear H1; intros psi1 H1; set (phi2 := fun n:nat => mkStepFun (StepFun_P4 a b c)); set (psi2 := fun n:nat => mkStepFun (StepFun_P4 a b 0)); apply RiemannInt_P11 with f RinvN phi2 psi2 psi1; try assumption. apply RinvN_cv. intro; split. intros; unfold f; simpl; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; unfold fct_cte; right; reflexivity. unfold psi2; rewrite StepFun_P18; rewrite Rmult_0_l; rewrite Rabs_R0; apply (cond_pos (RinvN n)). unfold Un_cv; intros; split with 0%nat; intros; unfold R_dist; unfold phi2; rewrite StepFun_P18; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply H. Qed. Lemma RiemannInt_P16 : forall (f:R -> R) (a b:R), Riemann_integrable f a b -> Riemann_integrable (fun x:R => Rabs (f x)) a b. Proof. unfold Riemann_integrable; intro f; intros; elim (X eps); clear X; intros phi [psi [H H0]]; split with (mkStepFun (StepFun_P32 phi)); split with psi; split; try assumption; intros; simpl; apply Rle_trans with (Rabs (f t - phi t)); [ apply Rabs_triang_inv2 | apply H; assumption ]. Qed. Lemma Rle_cv_lim : forall (Un Vn:nat -> R) (l1 l2:R), (forall n:nat, Un n <= Vn n) -> Un_cv Un l1 -> Un_cv Vn l2 -> l1 <= l2. Proof. intros; case (Rle_dec l1 l2); intro. assumption. assert (H2 : l2 < l1). auto with real. clear n; assert (H3 : 0 < (l1 - l2) / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ apply Rlt_Rminus; assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H1 _ H3); elim (H0 _ H3); clear H0 H1; unfold R_dist; intros; set (N := max x x0); cut (Vn N < Un N). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ (H N) H4)). apply Rlt_trans with ((l1 + l2) / 2). apply Rplus_lt_reg_r with (- l2); replace (- l2 + (l1 + l2) / 2) with ((l1 - l2) / 2). rewrite Rplus_comm; apply Rle_lt_trans with (Rabs (Vn N - l2)). apply RRle_abs. apply H1; unfold ge; unfold N; apply le_max_r. apply Rmult_eq_reg_l with 2; [ unfold Rdiv; do 2 rewrite (Rmult_comm 2); rewrite (Rmult_plus_distr_r (- l2) ((l1 + l2) * / 2) 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. apply Ropp_lt_cancel; apply Rplus_lt_reg_r with l1; replace (l1 + - ((l1 + l2) / 2)) with ((l1 - l2) / 2). apply Rle_lt_trans with (Rabs (Un N - l1)). rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply RRle_abs. apply H0; unfold ge; unfold N; apply le_max_l. apply Rmult_eq_reg_l with 2; [ unfold Rdiv; do 2 rewrite (Rmult_comm 2); rewrite (Rmult_plus_distr_r l1 (- ((l1 + l2) * / 2)) 2); rewrite <- Ropp_mult_distr_l_reverse; repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. Qed. Lemma RiemannInt_P17 : forall (f:R -> R) (a b:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable (fun x:R => Rabs (f x)) a b), a <= b -> Rabs (RiemannInt pr1) <= RiemannInt pr2. Proof. intro f; intros; unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); case (RiemannInt_exists pr2 RinvN RinvN_cv); intros; set (phi1 := phi_sequence RinvN pr1) in u0; set (phi2 := fun N:nat => mkStepFun (StepFun_P32 (phi1 N))); apply Rle_cv_lim with (fun N:nat => Rabs (RiemannInt_SF (phi1 N))) (fun N:nat => RiemannInt_SF (phi2 N)). intro; unfold phi2; apply StepFun_P34; assumption. apply (continuity_seq Rabs (fun N:nat => RiemannInt_SF (phi1 N)) x0); try assumption. apply Rcontinuity_abs. set (phi3 := phi_sequence RinvN pr2); assert (H0 : exists psi3 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (Rabs (f t) - phi3 n t) <= psi3 n t) /\ Rabs (RiemannInt_SF (psi3 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr2 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr2 n)). assert (H1 : exists psi2 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (Rabs (f t) - phi2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). assert (H1 : exists psi2 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi1 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n)). elim H1; clear H1; intros psi2 H1; split with psi2; intros; elim (H1 n); clear H1; intros; split; try assumption. intros; unfold phi2; simpl; apply Rle_trans with (Rabs (f t - phi1 n t)). apply Rabs_triang_inv2. apply H1; assumption. elim H0; clear H0; intros psi3 H0; elim H1; clear H1; intros psi2 H1; apply RiemannInt_P11 with (fun x:R => Rabs (f x)) RinvN phi3 psi3 psi2; try assumption; apply RinvN_cv. Qed. Lemma RiemannInt_P18 : forall (f g:R -> R) (a b:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable g a b), a <= b -> (forall x:R, a < x < b -> f x = g x) -> RiemannInt pr1 = RiemannInt pr2. Proof. intro f; intros; unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); case (RiemannInt_exists pr2 RinvN RinvN_cv); intros; eapply UL_sequence. apply u0. set (phi1 := fun N:nat => phi_sequence RinvN pr1 N); change (Un_cv (fun N:nat => RiemannInt_SF (phi1 N)) x); assert (H1 : exists psi1 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi1 n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n)). elim H1; clear H1; intros psi1 H1; set (phi2 := fun N:nat => phi_sequence RinvN pr2 N). set (phi2_aux := fun (N:nat) (x:R) => match Req_EM_T x a with | left _ => f a | right _ => match Req_EM_T x b with | left _ => f b | right _ => phi2 N x end end). cut (forall N:nat, IsStepFun (phi2_aux N) a b). intro; set (phi2_m := fun N:nat => mkStepFun (X N)). assert (H2 : exists psi2 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (g t - phi2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr2 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr2 n)). elim H2; clear H2; intros psi2 H2; apply RiemannInt_P11 with f RinvN phi2_m psi2 psi1; try assumption. apply RinvN_cv. intro; elim (H2 n); intros; split; try assumption. intros; unfold phi2_m; simpl; unfold phi2_aux; case (Req_EM_T t a); case (Req_EM_T t b); intros. rewrite e0; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rle_trans with (Rabs (g t - phi2 n t)). apply Rabs_pos. pattern a at 3; rewrite <- e0; apply H3; assumption. rewrite e; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rle_trans with (Rabs (g t - phi2 n t)). apply Rabs_pos. pattern a at 3; rewrite <- e; apply H3; assumption. rewrite e; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; apply Rle_trans with (Rabs (g t - phi2 n t)). apply Rabs_pos. pattern b at 3; rewrite <- e; apply H3; assumption. replace (f t) with (g t). apply H3; assumption. symmetry ; apply H0; elim H5; clear H5; intros. assert (H7 : Rmin a b = a). unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n2; assumption ]. assert (H8 : Rmax a b = b). unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n2; assumption ]. rewrite H7 in H5; rewrite H8 in H6; split. elim H5; intro; [ assumption | elim n1; symmetry ; assumption ]. elim H6; intro; [ assumption | elim n0; assumption ]. cut (forall N:nat, RiemannInt_SF (phi2_m N) = RiemannInt_SF (phi2 N)). intro; unfold Un_cv; intros; elim (u _ H4); intros; exists x1; intros; rewrite (H3 n); apply H5; assumption. intro; apply Rle_antisym. apply StepFun_P37; try assumption. intros; unfold phi2_m; simpl; unfold phi2_aux; case (Req_EM_T x1 a); case (Req_EM_T x1 b); intros. elim H3; intros; rewrite e0 in H4; elim (Rlt_irrefl _ H4). elim H3; intros; rewrite e in H4; elim (Rlt_irrefl _ H4). elim H3; intros; rewrite e in H5; elim (Rlt_irrefl _ H5). right; reflexivity. apply StepFun_P37; try assumption. intros; unfold phi2_m; simpl; unfold phi2_aux; case (Req_EM_T x1 a); case (Req_EM_T x1 b); intros. elim H3; intros; rewrite e0 in H4; elim (Rlt_irrefl _ H4). elim H3; intros; rewrite e in H4; elim (Rlt_irrefl _ H4). elim H3; intros; rewrite e in H5; elim (Rlt_irrefl _ H5). right; reflexivity. intro; assert (H2 := pre (phi2 N)); unfold IsStepFun in H2; unfold is_subdivision in H2; elim H2; clear H2; intros l [lf H2]; split with l; split with lf; unfold adapted_couple in H2; decompose [and] H2; clear H2; unfold adapted_couple; repeat split; try assumption. intros; assert (H9 := H8 i H2); unfold constant_D_eq, open_interval in H9; unfold constant_D_eq, open_interval; intros; rewrite <- (H9 x1 H7); assert (H10 : a <= pos_Rl l i). replace a with (Rmin a b). rewrite <- H5; elim (RList_P6 l); intros; apply H10. assumption. apply le_O_n. apply lt_trans with (pred (Rlength l)); [ assumption | apply lt_pred_n_n ]. apply neq_O_lt; intro; rewrite <- H12 in H6; discriminate. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (H11 : pos_Rl l (S i) <= b). replace b with (Rmax a b). rewrite <- H4; elim (RList_P6 l); intros; apply H11. assumption. apply lt_le_S; assumption. apply lt_pred_n_n; apply neq_O_lt; intro; rewrite <- H13 in H6; discriminate. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. elim H7; clear H7; intros; unfold phi2_aux; case (Req_EM_T x1 a); case (Req_EM_T x1 b); intros. rewrite e in H12; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H11 H12)). rewrite e in H7; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H10 H7)). rewrite e in H12; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H11 H12)). reflexivity. Qed. Lemma RiemannInt_P19 : forall (f g:R -> R) (a b:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable g a b), a <= b -> (forall x:R, a < x < b -> f x <= g x) -> RiemannInt pr1 <= RiemannInt pr2. Proof. intro f; intros; apply Rplus_le_reg_l with (- RiemannInt pr1); rewrite Rplus_opp_l; rewrite Rplus_comm; apply Rle_trans with (Rabs (RiemannInt (RiemannInt_P10 (-1) pr2 pr1))). apply Rabs_pos. replace (RiemannInt pr2 + - RiemannInt pr1) with (RiemannInt (RiemannInt_P16 (RiemannInt_P10 (-1) pr2 pr1))). apply (RiemannInt_P17 (RiemannInt_P10 (-1) pr2 pr1) (RiemannInt_P16 (RiemannInt_P10 (-1) pr2 pr1))); assumption. replace (RiemannInt pr2 + - RiemannInt pr1) with (RiemannInt (RiemannInt_P10 (-1) pr2 pr1)). apply RiemannInt_P18; try assumption. intros; apply Rabs_right. apply Rle_ge; apply Rplus_le_reg_l with (f x); rewrite Rplus_0_r; replace (f x + (g x + -1 * f x)) with (g x); [ apply H0; assumption | ring ]. rewrite (RiemannInt_P12 pr2 pr1 (RiemannInt_P10 (-1) pr2 pr1)); [ ring | assumption ]. Qed. Lemma FTC_P1 : forall (f:R -> R) (a b:R), a <= b -> (forall x:R, a <= x <= b -> continuity_pt f x) -> forall x:R, a <= x -> x <= b -> Riemann_integrable f a x. Proof. intros; apply continuity_implies_RiemannInt; [ assumption | intros; apply H0; elim H3; intros; split; assumption || apply Rle_trans with x; assumption ]. Qed. Definition primitive (f:R -> R) (a b:R) (h:a <= b) (pr:forall x:R, a <= x -> x <= b -> Riemann_integrable f a x) (x:R) : R := match Rle_dec a x with | left r => match Rle_dec x b with | left r0 => RiemannInt (pr x r r0) | right _ => f b * (x - b) + RiemannInt (pr b h (Rle_refl b)) end | right _ => f a * (x - a) end. Lemma RiemannInt_P20 : forall (f:R -> R) (a b:R) (h:a <= b) (pr:forall x:R, a <= x -> x <= b -> Riemann_integrable f a x) (pr0:Riemann_integrable f a b), RiemannInt pr0 = primitive h pr b - primitive h pr a. Proof. intros; replace (primitive h pr a) with 0. replace (RiemannInt pr0) with (primitive h pr b). ring. unfold primitive; case (Rle_dec a b); case (Rle_dec b b); intros; [ apply RiemannInt_P5 | elim n; right; reflexivity | elim n; assumption | elim n0; assumption ]. symmetry ; unfold primitive; case (Rle_dec a a); case (Rle_dec a b); intros; [ apply RiemannInt_P9 | elim n; assumption | elim n; right; reflexivity | elim n0; right; reflexivity ]. Qed. Lemma RiemannInt_P21 : forall (f:R -> R) (a b c:R), a <= b -> b <= c -> Riemann_integrable f a b -> Riemann_integrable f b c -> Riemann_integrable f a c. Proof. unfold Riemann_integrable; intros f a b c Hyp1 Hyp2 X X0 eps. assert (H : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ apply (cond_pos eps) | apply Rinv_0_lt_compat; prove_sup0 ]. elim (X (mkposreal _ H)); clear X; intros phi1 [psi1 H1]; elim (X0 (mkposreal _ H)); clear X0; intros phi2 [psi2 H2]. set (phi3 := fun x:R => match Rle_dec a x with | left _ => match Rle_dec x b with | left _ => phi1 x | right _ => phi2 x end | right _ => 0 end). set (psi3 := fun x:R => match Rle_dec a x with | left _ => match Rle_dec x b with | left _ => psi1 x | right _ => psi2 x end | right _ => 0 end). cut (IsStepFun phi3 a c). intro; cut (IsStepFun psi3 a b). intro; cut (IsStepFun psi3 b c). intro; cut (IsStepFun psi3 a c). intro; split with (mkStepFun X); split with (mkStepFun X2); simpl; split. intros; unfold phi3, psi3; case (Rle_dec t b); case (Rle_dec a t); intros. elim H1; intros; apply H3. replace (Rmin a b) with a. replace (Rmax a b) with b. split; assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. elim n; replace a with (Rmin a c). elim H0; intros; assumption. unfold Rmin; case (Rle_dec a c); intro; [ reflexivity | elim n0; apply Rle_trans with b; assumption ]. elim H2; intros; apply H3. replace (Rmax b c) with (Rmax a c). elim H0; intros; split; try assumption. replace (Rmin b c) with b. auto with real. unfold Rmin; case (Rle_dec b c); intro; [ reflexivity | elim n0; assumption ]. unfold Rmax; case (Rle_dec a c); case (Rle_dec b c); intros; try (elim n0; assumption || elim n0; apply Rle_trans with b; assumption). reflexivity. elim n; replace a with (Rmin a c). elim H0; intros; assumption. unfold Rmin; case (Rle_dec a c); intro; [ reflexivity | elim n1; apply Rle_trans with b; assumption ]. rewrite <- (StepFun_P43 X0 X1 X2). apply Rle_lt_trans with (Rabs (RiemannInt_SF (mkStepFun X0)) + Rabs (RiemannInt_SF (mkStepFun X1))). apply Rabs_triang. rewrite (double_var eps); replace (RiemannInt_SF (mkStepFun X0)) with (RiemannInt_SF psi1). replace (RiemannInt_SF (mkStepFun X1)) with (RiemannInt_SF psi2). apply Rplus_lt_compat. elim H1; intros; assumption. elim H2; intros; assumption. apply Rle_antisym. apply StepFun_P37; try assumption. simpl; intros; unfold psi3; elim H0; clear H0; intros; case (Rle_dec a x); case (Rle_dec x b); intros; [ elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H0)) | right; reflexivity | elim n; apply Rle_trans with b; [ assumption | left; assumption ] | elim n0; apply Rle_trans with b; [ assumption | left; assumption ] ]. apply StepFun_P37; try assumption. simpl; intros; unfold psi3; elim H0; clear H0; intros; case (Rle_dec a x); case (Rle_dec x b); intros; [ elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H0)) | right; reflexivity | elim n; apply Rle_trans with b; [ assumption | left; assumption ] | elim n0; apply Rle_trans with b; [ assumption | left; assumption ] ]. apply Rle_antisym. apply StepFun_P37; try assumption. simpl; intros; unfold psi3; elim H0; clear H0; intros; case (Rle_dec a x); case (Rle_dec x b); intros; [ right; reflexivity | elim n; left; assumption | elim n; left; assumption | elim n0; left; assumption ]. apply StepFun_P37; try assumption. simpl; intros; unfold psi3; elim H0; clear H0; intros; case (Rle_dec a x); case (Rle_dec x b); intros; [ right; reflexivity | elim n; left; assumption | elim n; left; assumption | elim n0; left; assumption ]. apply StepFun_P46 with b; assumption. assert (H3 := pre psi2); unfold IsStepFun in H3; unfold is_subdivision in H3; elim H3; clear H3; intros l1 [lf1 H3]; split with l1; split with lf1; unfold adapted_couple in H3; decompose [and] H3; clear H3; unfold adapted_couple; repeat split; try assumption. intros; assert (H9 := H8 i H3); unfold constant_D_eq, open_interval; unfold constant_D_eq, open_interval in H9; intros; rewrite <- (H9 x H7); unfold psi3; assert (H10 : b < x). apply Rle_lt_trans with (pos_Rl l1 i). replace b with (Rmin b c). rewrite <- H5; elim (RList_P6 l1); intros; apply H10; try assumption. apply le_O_n. apply lt_trans with (pred (Rlength l1)); try assumption; apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H12 in H6; discriminate. unfold Rmin; case (Rle_dec b c); intro; [ reflexivity | elim n; assumption ]. elim H7; intros; assumption. case (Rle_dec a x); case (Rle_dec x b); intros; [ elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H10)) | reflexivity | elim n; apply Rle_trans with b; [ assumption | left; assumption ] | elim n0; apply Rle_trans with b; [ assumption | left; assumption ] ]. assert (H3 := pre psi1); unfold IsStepFun in H3; unfold is_subdivision in H3; elim H3; clear H3; intros l1 [lf1 H3]; split with l1; split with lf1; unfold adapted_couple in H3; decompose [and] H3; clear H3; unfold adapted_couple; repeat split; try assumption. intros; assert (H9 := H8 i H3); unfold constant_D_eq, open_interval; unfold constant_D_eq, open_interval in H9; intros; rewrite <- (H9 x H7); unfold psi3; assert (H10 : x <= b). apply Rle_trans with (pos_Rl l1 (S i)). elim H7; intros; left; assumption. replace b with (Rmax a b). rewrite <- H4; elim (RList_P6 l1); intros; apply H10; try assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H12 in H6; discriminate. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (H11 : a <= x). apply Rle_trans with (pos_Rl l1 i). replace a with (Rmin a b). rewrite <- H5; elim (RList_P6 l1); intros; apply H11; try assumption. apply le_O_n. apply lt_trans with (pred (Rlength l1)); try assumption; apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H6; discriminate. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. left; elim H7; intros; assumption. case (Rle_dec a x); case (Rle_dec x b); intros; reflexivity || elim n; assumption. apply StepFun_P46 with b. assert (H3 := pre phi1); unfold IsStepFun in H3; unfold is_subdivision in H3; elim H3; clear H3; intros l1 [lf1 H3]; split with l1; split with lf1; unfold adapted_couple in H3; decompose [and] H3; clear H3; unfold adapted_couple; repeat split; try assumption. intros; assert (H9 := H8 i H3); unfold constant_D_eq, open_interval; unfold constant_D_eq, open_interval in H9; intros; rewrite <- (H9 x H7); unfold psi3; assert (H10 : x <= b). apply Rle_trans with (pos_Rl l1 (S i)). elim H7; intros; left; assumption. replace b with (Rmax a b). rewrite <- H4; elim (RList_P6 l1); intros; apply H10; try assumption. apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H12 in H6; discriminate. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. assert (H11 : a <= x). apply Rle_trans with (pos_Rl l1 i). replace a with (Rmin a b). rewrite <- H5; elim (RList_P6 l1); intros; apply H11; try assumption. apply le_O_n. apply lt_trans with (pred (Rlength l1)); try assumption; apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H13 in H6; discriminate. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; assumption ]. left; elim H7; intros; assumption. unfold phi3; case (Rle_dec a x); case (Rle_dec x b); intros; reflexivity || elim n; assumption. assert (H3 := pre phi2); unfold IsStepFun in H3; unfold is_subdivision in H3; elim H3; clear H3; intros l1 [lf1 H3]; split with l1; split with lf1; unfold adapted_couple in H3; decompose [and] H3; clear H3; unfold adapted_couple; repeat split; try assumption. intros; assert (H9 := H8 i H3); unfold constant_D_eq, open_interval; unfold constant_D_eq, open_interval in H9; intros; rewrite <- (H9 x H7); unfold psi3; assert (H10 : b < x). apply Rle_lt_trans with (pos_Rl l1 i). replace b with (Rmin b c). rewrite <- H5; elim (RList_P6 l1); intros; apply H10; try assumption. apply le_O_n. apply lt_trans with (pred (Rlength l1)); try assumption; apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H12 in H6; discriminate. unfold Rmin; case (Rle_dec b c); intro; [ reflexivity | elim n; assumption ]. elim H7; intros; assumption. unfold phi3; case (Rle_dec a x); case (Rle_dec x b); intros; [ elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H10)) | reflexivity | elim n; apply Rle_trans with b; [ assumption | left; assumption ] | elim n0; apply Rle_trans with b; [ assumption | left; assumption ] ]. Qed. Lemma RiemannInt_P22 : forall (f:R -> R) (a b c:R), Riemann_integrable f a b -> a <= c <= b -> Riemann_integrable f a c. Proof. unfold Riemann_integrable; intros; elim (X eps); clear X; intros phi [psi H0]; elim H; elim H0; clear H H0; intros; assert (H3 : IsStepFun phi a c). apply StepFun_P44 with b. apply (pre phi). split; assumption. assert (H4 : IsStepFun psi a c). apply StepFun_P44 with b. apply (pre psi). split; assumption. split with (mkStepFun H3); split with (mkStepFun H4); split. simpl; intros; apply H. replace (Rmin a b) with (Rmin a c). elim H5; intros; split; try assumption. apply Rle_trans with (Rmax a c); try assumption. replace (Rmax a b) with b. replace (Rmax a c) with c. assumption. unfold Rmax; case (Rle_dec a c); intro; [ reflexivity | elim n; assumption ]. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmin; case (Rle_dec a c); case (Rle_dec a b); intros; [ reflexivity | elim n; apply Rle_trans with c; assumption | elim n; assumption | elim n0; assumption ]. rewrite Rabs_right. assert (H5 : IsStepFun psi c b). apply StepFun_P46 with a. apply StepFun_P6; assumption. apply (pre psi). replace (RiemannInt_SF (mkStepFun H4)) with (RiemannInt_SF psi - RiemannInt_SF (mkStepFun H5)). apply Rle_lt_trans with (RiemannInt_SF psi). unfold Rminus; pattern (RiemannInt_SF psi) at 2; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; replace 0 with (RiemannInt_SF (mkStepFun (StepFun_P4 c b 0))). apply StepFun_P37; try assumption. intros; simpl; unfold fct_cte; apply Rle_trans with (Rabs (f x - phi x)). apply Rabs_pos. apply H. replace (Rmin a b) with a. replace (Rmax a b) with b. elim H6; intros; split; left. apply Rle_lt_trans with c; assumption. assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. rewrite StepFun_P18; ring. apply Rle_lt_trans with (Rabs (RiemannInt_SF psi)). apply RRle_abs. assumption. assert (H6 : IsStepFun psi a b). apply (pre psi). replace (RiemannInt_SF psi) with (RiemannInt_SF (mkStepFun H6)). rewrite <- (StepFun_P43 H4 H5 H6); ring. unfold RiemannInt_SF; case (Rle_dec a b); intro. eapply StepFun_P17. apply StepFun_P1. simpl; apply StepFun_P1. apply Ropp_eq_compat; eapply StepFun_P17. apply StepFun_P1. simpl; apply StepFun_P1. apply Rle_ge; replace 0 with (RiemannInt_SF (mkStepFun (StepFun_P4 a c 0))). apply StepFun_P37; try assumption. intros; simpl; unfold fct_cte; apply Rle_trans with (Rabs (f x - phi x)). apply Rabs_pos. apply H. replace (Rmin a b) with a. replace (Rmax a b) with b. elim H5; intros; split; left. assumption. apply Rlt_le_trans with c; assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. rewrite StepFun_P18; ring. Qed. Lemma RiemannInt_P23 : forall (f:R -> R) (a b c:R), Riemann_integrable f a b -> a <= c <= b -> Riemann_integrable f c b. Proof. unfold Riemann_integrable; intros; elim (X eps); clear X; intros phi [psi H0]; elim H; elim H0; clear H H0; intros; assert (H3 : IsStepFun phi c b). apply StepFun_P45 with a. apply (pre phi). split; assumption. assert (H4 : IsStepFun psi c b). apply StepFun_P45 with a. apply (pre psi). split; assumption. split with (mkStepFun H3); split with (mkStepFun H4); split. simpl; intros; apply H. replace (Rmax a b) with (Rmax c b). elim H5; intros; split; try assumption. apply Rle_trans with (Rmin c b); try assumption. replace (Rmin a b) with a. replace (Rmin c b) with c. assumption. unfold Rmin; case (Rle_dec c b); intro; [ reflexivity | elim n; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmax; case (Rle_dec c b); case (Rle_dec a b); intros; [ reflexivity | elim n; apply Rle_trans with c; assumption | elim n; assumption | elim n0; assumption ]. rewrite Rabs_right. assert (H5 : IsStepFun psi a c). apply StepFun_P46 with b. apply (pre psi). apply StepFun_P6; assumption. replace (RiemannInt_SF (mkStepFun H4)) with (RiemannInt_SF psi - RiemannInt_SF (mkStepFun H5)). apply Rle_lt_trans with (RiemannInt_SF psi). unfold Rminus; pattern (RiemannInt_SF psi) at 2; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; rewrite <- Ropp_0; apply Ropp_ge_le_contravar; apply Rle_ge; replace 0 with (RiemannInt_SF (mkStepFun (StepFun_P4 a c 0))). apply StepFun_P37; try assumption. intros; simpl; unfold fct_cte; apply Rle_trans with (Rabs (f x - phi x)). apply Rabs_pos. apply H. replace (Rmin a b) with a. replace (Rmax a b) with b. elim H6; intros; split; left. assumption. apply Rlt_le_trans with c; assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. rewrite StepFun_P18; ring. apply Rle_lt_trans with (Rabs (RiemannInt_SF psi)). apply RRle_abs. assumption. assert (H6 : IsStepFun psi a b). apply (pre psi). replace (RiemannInt_SF psi) with (RiemannInt_SF (mkStepFun H6)). rewrite <- (StepFun_P43 H5 H4 H6); ring. unfold RiemannInt_SF; case (Rle_dec a b); intro. eapply StepFun_P17. apply StepFun_P1. simpl; apply StepFun_P1. apply Ropp_eq_compat; eapply StepFun_P17. apply StepFun_P1. simpl; apply StepFun_P1. apply Rle_ge; replace 0 with (RiemannInt_SF (mkStepFun (StepFun_P4 c b 0))). apply StepFun_P37; try assumption. intros; simpl; unfold fct_cte; apply Rle_trans with (Rabs (f x - phi x)). apply Rabs_pos. apply H. replace (Rmin a b) with a. replace (Rmax a b) with b. elim H5; intros; split; left. apply Rle_lt_trans with c; assumption. assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n; apply Rle_trans with c; assumption ]. rewrite StepFun_P18; ring. Qed. Lemma RiemannInt_P24 : forall (f:R -> R) (a b c:R), Riemann_integrable f a b -> Riemann_integrable f b c -> Riemann_integrable f a c. Proof. intros; case (Rle_dec a b); case (Rle_dec b c); intros. apply RiemannInt_P21 with b; assumption. case (Rle_dec a c); intro. apply RiemannInt_P22 with b; try assumption. split; [ assumption | auto with real ]. apply RiemannInt_P1; apply RiemannInt_P22 with b. apply RiemannInt_P1; assumption. split; auto with real. case (Rle_dec a c); intro. apply RiemannInt_P23 with b; try assumption. split; auto with real. apply RiemannInt_P1; apply RiemannInt_P23 with b. apply RiemannInt_P1; assumption. split; [ assumption | auto with real ]. apply RiemannInt_P1; apply RiemannInt_P21 with b; auto with real || apply RiemannInt_P1; assumption. Qed. Lemma RiemannInt_P25 : forall (f:R -> R) (a b c:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable f b c) (pr3:Riemann_integrable f a c), a <= b -> b <= c -> RiemannInt pr1 + RiemannInt pr2 = RiemannInt pr3. Proof. intros f a b c pr1 pr2 pr3 Hyp1 Hyp2; unfold RiemannInt; case (RiemannInt_exists pr1 RinvN RinvN_cv); case (RiemannInt_exists pr2 RinvN RinvN_cv); case (RiemannInt_exists pr3 RinvN RinvN_cv); intros; symmetry ; eapply UL_sequence. apply u. unfold Un_cv; intros; assert (H0 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (u1 _ H0); clear u1; intros N1 H1; elim (u0 _ H0); clear u0; intros N2 H2; cut (Un_cv (fun n:nat => RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n))) 0). intro; elim (H3 _ H0); clear H3; intros N3 H3; set (N0 := max (max N1 N2) N3); exists N0; intros; unfold R_dist; apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n))) + Rabs (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n) - (x1 + x0))). replace (RiemannInt_SF (phi_sequence RinvN pr3 n) - (x1 + x0)) with (RiemannInt_SF (phi_sequence RinvN pr3 n) - (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n)) + (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n) - (x1 + x0))); [ apply Rabs_triang | ring ]. replace eps with (eps / 3 + eps / 3 + eps / 3). rewrite Rplus_assoc; apply Rplus_lt_compat. unfold R_dist in H3; cut (n >= N3)%nat. intro; assert (H6 := H3 _ H5); unfold Rminus in H6; rewrite Ropp_0 in H6; rewrite Rplus_0_r in H6; apply H6. unfold ge; apply le_trans with N0; [ unfold N0; apply le_max_r | assumption ]. apply Rle_lt_trans with (Rabs (RiemannInt_SF (phi_sequence RinvN pr1 n) - x1) + Rabs (RiemannInt_SF (phi_sequence RinvN pr2 n) - x0)). replace (RiemannInt_SF (phi_sequence RinvN pr1 n) + RiemannInt_SF (phi_sequence RinvN pr2 n) - (x1 + x0)) with (RiemannInt_SF (phi_sequence RinvN pr1 n) - x1 + (RiemannInt_SF (phi_sequence RinvN pr2 n) - x0)); [ apply Rabs_triang | ring ]. apply Rplus_lt_compat. unfold R_dist in H1; apply H1. unfold ge; apply le_trans with N0; [ apply le_trans with (max N1 N2); [ apply le_max_l | unfold N0; apply le_max_l ] | assumption ]. unfold R_dist in H2; apply H2. unfold ge; apply le_trans with N0; [ apply le_trans with (max N1 N2); [ apply le_max_r | unfold N0; apply le_max_l ] | assumption ]. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; repeat rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. clear x u x0 x1 eps H H0 N1 H1 N2 H2; assert (H1 : exists psi1 : nat -> StepFun a b, (forall n:nat, (forall t:R, Rmin a b <= t /\ t <= Rmax a b -> Rabs (f t - phi_sequence RinvN pr1 n t) <= psi1 n t) /\ Rabs (RiemannInt_SF (psi1 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr1 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr1 n)). assert (H2 : exists psi2 : nat -> StepFun b c, (forall n:nat, (forall t:R, Rmin b c <= t /\ t <= Rmax b c -> Rabs (f t - phi_sequence RinvN pr2 n t) <= psi2 n t) /\ Rabs (RiemannInt_SF (psi2 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr2 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr2 n)). assert (H3 : exists psi3 : nat -> StepFun a c, (forall n:nat, (forall t:R, Rmin a c <= t /\ t <= Rmax a c -> Rabs (f t - phi_sequence RinvN pr3 n t) <= psi3 n t) /\ Rabs (RiemannInt_SF (psi3 n)) < RinvN n)). split with (fun n:nat => proj1_sig (phi_sequence_prop RinvN pr3 n)); intro; apply (proj2_sig (phi_sequence_prop RinvN pr3 n)). elim H1; clear H1; intros psi1 H1; elim H2; clear H2; intros psi2 H2; elim H3; clear H3; intros psi3 H3; assert (H := RinvN_cv); unfold Un_cv; intros; assert (H4 : 0 < eps / 3). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H _ H4); clear H; intros N0 H; assert (H5 : forall n:nat, (n >= N0)%nat -> RinvN n < eps / 3). intros; replace (pos (RinvN n)) with (R_dist (mkposreal (/ (INR n + 1)) (RinvN_pos n)) 0). apply H; assumption. unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply Rabs_right; apply Rle_ge; left; apply (cond_pos (RinvN n)). exists N0; intros; elim (H1 n); elim (H2 n); elim (H3 n); clear H1 H2 H3; intros; unfold R_dist; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; set (phi1 := phi_sequence RinvN pr1 n) in H8 |- *; set (phi2 := phi_sequence RinvN pr2 n) in H3 |- *; set (phi3 := phi_sequence RinvN pr3 n) in H1 |- *; assert (H10 : IsStepFun phi3 a b). apply StepFun_P44 with c. apply (pre phi3). split; assumption. assert (H11 : IsStepFun (psi3 n) a b). apply StepFun_P44 with c. apply (pre (psi3 n)). split; assumption. assert (H12 : IsStepFun phi3 b c). apply StepFun_P45 with a. apply (pre phi3). split; assumption. assert (H13 : IsStepFun (psi3 n) b c). apply StepFun_P45 with a. apply (pre (psi3 n)). split; assumption. replace (RiemannInt_SF phi3) with (RiemannInt_SF (mkStepFun H10) + RiemannInt_SF (mkStepFun H12)). apply Rle_lt_trans with (Rabs (RiemannInt_SF (mkStepFun H10) - RiemannInt_SF phi1) + Rabs (RiemannInt_SF (mkStepFun H12) - RiemannInt_SF phi2)). replace (RiemannInt_SF (mkStepFun H10) + RiemannInt_SF (mkStepFun H12) + - (RiemannInt_SF phi1 + RiemannInt_SF phi2)) with (RiemannInt_SF (mkStepFun H10) - RiemannInt_SF phi1 + (RiemannInt_SF (mkStepFun H12) - RiemannInt_SF phi2)); [ apply Rabs_triang | ring ]. replace (RiemannInt_SF (mkStepFun H10) - RiemannInt_SF phi1) with (RiemannInt_SF (mkStepFun (StepFun_P28 (-1) (mkStepFun H10) phi1))). replace (RiemannInt_SF (mkStepFun H12) - RiemannInt_SF phi2) with (RiemannInt_SF (mkStepFun (StepFun_P28 (-1) (mkStepFun H12) phi2))). apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (mkStepFun H10) phi1)))) + RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (mkStepFun H12) phi2))))). apply Rle_trans with (Rabs (RiemannInt_SF (mkStepFun (StepFun_P28 (-1) (mkStepFun H10) phi1))) + RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (mkStepFun H12) phi2))))). apply Rplus_le_compat_l. apply StepFun_P34; try assumption. do 2 rewrite <- (Rplus_comm (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (mkStepFun H12) phi2)))))) ; apply Rplus_le_compat_l; apply StepFun_P34; try assumption. apply Rle_lt_trans with (RiemannInt_SF (mkStepFun (StepFun_P28 1 (mkStepFun H11) (psi1 n))) + RiemannInt_SF (mkStepFun (StepFun_P28 1 (mkStepFun H13) (psi2 n)))). apply Rle_trans with (RiemannInt_SF (mkStepFun (StepFun_P32 (mkStepFun (StepFun_P28 (-1) (mkStepFun H10) phi1)))) + RiemannInt_SF (mkStepFun (StepFun_P28 1 (mkStepFun H13) (psi2 n)))). apply Rplus_le_compat_l; apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (f x - phi3 x) + Rabs (f x - phi2 x)). rewrite <- (Rabs_Ropp (f x - phi3 x)); rewrite Ropp_minus_distr; replace (phi3 x + -1 * phi2 x) with (phi3 x - f x + (f x - phi2 x)); [ apply Rabs_triang | ring ]. apply Rplus_le_compat. apply H1. elim H14; intros; split. replace (Rmin a c) with a. apply Rle_trans with b; try assumption. left; assumption. unfold Rmin; case (Rle_dec a c); intro; [ reflexivity | elim n0; apply Rle_trans with b; assumption ]. replace (Rmax a c) with c. left; assumption. unfold Rmax; case (Rle_dec a c); intro; [ reflexivity | elim n0; apply Rle_trans with b; assumption ]. apply H3. elim H14; intros; split. replace (Rmin b c) with b. left; assumption. unfold Rmin; case (Rle_dec b c); intro; [ reflexivity | elim n0; assumption ]. replace (Rmax b c) with c. left; assumption. unfold Rmax; case (Rle_dec b c); intro; [ reflexivity | elim n0; assumption ]. do 2 rewrite <- (Rplus_comm (RiemannInt_SF (mkStepFun (StepFun_P28 1 (mkStepFun H13) (psi2 n))))) ; apply Rplus_le_compat_l; apply StepFun_P37; try assumption. intros; simpl; rewrite Rmult_1_l; apply Rle_trans with (Rabs (f x - phi3 x) + Rabs (f x - phi1 x)). rewrite <- (Rabs_Ropp (f x - phi3 x)); rewrite Ropp_minus_distr; replace (phi3 x + -1 * phi1 x) with (phi3 x - f x + (f x - phi1 x)); [ apply Rabs_triang | ring ]. apply Rplus_le_compat. apply H1. elim H14; intros; split. replace (Rmin a c) with a. left; assumption. unfold Rmin; case (Rle_dec a c); intro; [ reflexivity | elim n0; apply Rle_trans with b; assumption ]. replace (Rmax a c) with c. apply Rle_trans with b. left; assumption. assumption. unfold Rmax; case (Rle_dec a c); intro; [ reflexivity | elim n0; apply Rle_trans with b; assumption ]. apply H8. elim H14; intros; split. replace (Rmin a b) with a. left; assumption. unfold Rmin; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. replace (Rmax a b) with b. left; assumption. unfold Rmax; case (Rle_dec a b); intro; [ reflexivity | elim n0; assumption ]. do 2 rewrite StepFun_P30. do 2 rewrite Rmult_1_l; replace (RiemannInt_SF (mkStepFun H11) + RiemannInt_SF (psi1 n) + (RiemannInt_SF (mkStepFun H13) + RiemannInt_SF (psi2 n))) with (RiemannInt_SF (psi3 n) + RiemannInt_SF (psi1 n) + RiemannInt_SF (psi2 n)). replace eps with (eps / 3 + eps / 3 + eps / 3). repeat rewrite Rplus_assoc; repeat apply Rplus_lt_compat. apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi3 n))). apply RRle_abs. apply Rlt_trans with (pos (RinvN n)). assumption. apply H5; assumption. apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi1 n))). apply RRle_abs. apply Rlt_trans with (pos (RinvN n)). assumption. apply H5; assumption. apply Rle_lt_trans with (Rabs (RiemannInt_SF (psi2 n))). apply RRle_abs. apply Rlt_trans with (pos (RinvN n)). assumption. apply H5; assumption. apply Rmult_eq_reg_l with 3; [ unfold Rdiv; repeat rewrite Rmult_plus_distr_l; do 2 rewrite (Rmult_comm 3); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | discrR ] | discrR ]. replace (RiemannInt_SF (psi3 n)) with (RiemannInt_SF (mkStepFun (pre (psi3 n)))). rewrite <- (StepFun_P43 H11 H13 (pre (psi3 n))); ring. reflexivity. rewrite StepFun_P30; ring. rewrite StepFun_P30; ring. apply (StepFun_P43 H10 H12 (pre phi3)). Qed. Lemma RiemannInt_P26 : forall (f:R -> R) (a b c:R) (pr1:Riemann_integrable f a b) (pr2:Riemann_integrable f b c) (pr3:Riemann_integrable f a c), RiemannInt pr1 + RiemannInt pr2 = RiemannInt pr3. Proof. intros; case (Rle_dec a b); case (Rle_dec b c); intros. apply RiemannInt_P25; assumption. case (Rle_dec a c); intro. assert (H : c <= b). auto with real. rewrite <- (RiemannInt_P25 pr3 (RiemannInt_P1 pr2) pr1 r0 H); rewrite (RiemannInt_P8 pr2 (RiemannInt_P1 pr2)); ring. assert (H : c <= a). auto with real. rewrite (RiemannInt_P8 pr2 (RiemannInt_P1 pr2)); rewrite <- (RiemannInt_P25 (RiemannInt_P1 pr3) pr1 (RiemannInt_P1 pr2) H r); rewrite (RiemannInt_P8 pr3 (RiemannInt_P1 pr3)); ring. assert (H : b <= a). auto with real. case (Rle_dec a c); intro. rewrite <- (RiemannInt_P25 (RiemannInt_P1 pr1) pr3 pr2 H r0); rewrite (RiemannInt_P8 pr1 (RiemannInt_P1 pr1)); ring. assert (H0 : c <= a). auto with real. rewrite (RiemannInt_P8 pr1 (RiemannInt_P1 pr1)); rewrite <- (RiemannInt_P25 pr2 (RiemannInt_P1 pr3) (RiemannInt_P1 pr1) r H0); rewrite (RiemannInt_P8 pr3 (RiemannInt_P1 pr3)); ring. rewrite (RiemannInt_P8 pr1 (RiemannInt_P1 pr1)); rewrite (RiemannInt_P8 pr2 (RiemannInt_P1 pr2)); rewrite (RiemannInt_P8 pr3 (RiemannInt_P1 pr3)); rewrite <- (RiemannInt_P25 (RiemannInt_P1 pr2) (RiemannInt_P1 pr1) (RiemannInt_P1 pr3)) ; [ ring | auto with real | auto with real ]. Qed. Lemma RiemannInt_P27 : forall (f:R -> R) (a b x:R) (h:a <= b) (C0:forall x:R, a <= x <= b -> continuity_pt f x), a < x < b -> derivable_pt_lim (primitive h (FTC_P1 h C0)) x (f x). Proof. intro f; intros; elim H; clear H; intros; assert (H1 : continuity_pt f x). apply C0; split; left; assumption. unfold derivable_pt_lim; intros; assert (Hyp : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H1 _ Hyp); unfold dist, D_x, no_cond; simpl; unfold R_dist; intros; set (del := Rmin x0 (Rmin (b - x) (x - a))); assert (H4 : 0 < del). unfold del; unfold Rmin; case (Rle_dec (b - x) (x - a)); intro. case (Rle_dec x0 (b - x)); intro; [ elim H3; intros; assumption | apply Rlt_Rminus; assumption ]. case (Rle_dec x0 (x - a)); intro; [ elim H3; intros; assumption | apply Rlt_Rminus; assumption ]. split with (mkposreal _ H4); intros; assert (H7 : Riemann_integrable f x (x + h0)). case (Rle_dec x (x + h0)); intro. apply continuity_implies_RiemannInt; try assumption. intros; apply C0; elim H7; intros; split. apply Rle_trans with x; [ left; assumption | assumption ]. apply Rle_trans with (x + h0). assumption. left; apply Rlt_le_trans with (x + del). apply Rplus_lt_compat_l; apply Rle_lt_trans with (Rabs h0); [ apply RRle_abs | apply H6 ]. unfold del; apply Rle_trans with (x + Rmin (b - x) (x - a)). apply Rplus_le_compat_l; apply Rmin_r. pattern b at 2; replace b with (x + (b - x)); [ apply Rplus_le_compat_l; apply Rmin_l | ring ]. apply RiemannInt_P1; apply continuity_implies_RiemannInt; auto with real. intros; apply C0; elim H7; intros; split. apply Rle_trans with (x + h0). left; apply Rle_lt_trans with (x - del). unfold del; apply Rle_trans with (x - Rmin (b - x) (x - a)). pattern a at 1; replace a with (x + (a - x)); [ idtac | ring ]. unfold Rminus; apply Rplus_le_compat_l; apply Ropp_le_cancel. rewrite Ropp_involutive; rewrite Ropp_plus_distr; rewrite Ropp_involutive; rewrite (Rplus_comm x); apply Rmin_r. unfold Rminus; apply Rplus_le_compat_l; apply Ropp_le_cancel. do 2 rewrite Ropp_involutive; apply Rmin_r. unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_cancel. rewrite Ropp_involutive; apply Rle_lt_trans with (Rabs h0); [ rewrite <- Rabs_Ropp; apply RRle_abs | apply H6 ]. assumption. apply Rle_trans with x; [ assumption | left; assumption ]. replace (primitive h (FTC_P1 h C0) (x + h0) - primitive h (FTC_P1 h C0) x) with (RiemannInt H7). replace (f x) with (RiemannInt (RiemannInt_P14 x (x + h0) (f x)) / h0). replace (RiemannInt H7 / h0 - RiemannInt (RiemannInt_P14 x (x + h0) (f x)) / h0) with ((RiemannInt H7 - RiemannInt (RiemannInt_P14 x (x + h0) (f x))) / h0). replace (RiemannInt H7 - RiemannInt (RiemannInt_P14 x (x + h0) (f x))) with (RiemannInt (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))). unfold Rdiv; rewrite Rabs_mult; case (Rle_dec x (x + h0)); intro. apply Rle_lt_trans with (RiemannInt (RiemannInt_P16 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply (RiemannInt_P17 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x))) (RiemannInt_P16 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x))))); assumption. apply Rle_lt_trans with (RiemannInt (RiemannInt_P14 x (x + h0) (eps / 2)) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply RiemannInt_P19; try assumption. intros; replace (f x1 + -1 * fct_cte (f x) x1) with (f x1 - f x). unfold fct_cte; case (Req_dec x x1); intro. rewrite H9; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; left; assumption. elim H3; intros; left; apply H11. repeat split. assumption. rewrite Rabs_right. apply Rplus_lt_reg_r with x; replace (x + (x1 - x)) with x1; [ idtac | ring ]. apply Rlt_le_trans with (x + h0). elim H8; intros; assumption. apply Rplus_le_compat_l; apply Rle_trans with del. left; apply Rle_lt_trans with (Rabs h0); [ apply RRle_abs | assumption ]. unfold del; apply Rmin_l. apply Rge_minus; apply Rle_ge; left; elim H8; intros; assumption. unfold fct_cte; ring. rewrite RiemannInt_P15. rewrite Rmult_assoc; replace ((x + h0 - x) * Rabs (/ h0)) with 1. rewrite Rmult_1_r; unfold Rdiv; apply Rmult_lt_reg_l with 2; [ prove_sup0 | rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; pattern eps at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite Rabs_right. replace (x + h0 - x) with h0; [ idtac | ring ]. apply Rinv_r_sym. assumption. apply Rle_ge; left; apply Rinv_0_lt_compat. elim r; intro. apply Rplus_lt_reg_r with x; rewrite Rplus_0_r; assumption. elim H5; symmetry ; apply Rplus_eq_reg_l with x; rewrite Rplus_0_r; assumption. apply Rle_lt_trans with (RiemannInt (RiemannInt_P16 (RiemannInt_P1 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x))))) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. replace (RiemannInt (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))) with (- RiemannInt (RiemannInt_P1 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x))))). rewrite Rabs_Ropp; apply (RiemannInt_P17 (RiemannInt_P1 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))) (RiemannInt_P16 (RiemannInt_P1 (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))))); auto with real. symmetry ; apply RiemannInt_P8. apply Rle_lt_trans with (RiemannInt (RiemannInt_P14 (x + h0) x (eps / 2)) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply RiemannInt_P19. auto with real. intros; replace (f x1 + -1 * fct_cte (f x) x1) with (f x1 - f x). unfold fct_cte; case (Req_dec x x1); intro. rewrite H9; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; left; assumption. elim H3; intros; left; apply H11. repeat split. assumption. rewrite Rabs_left. apply Rplus_lt_reg_r with (x1 - x0); replace (x1 - x0 + x0) with x1; [ idtac | ring ]. replace (x1 - x0 + - (x1 - x)) with (x - x0); [ idtac | ring ]. apply Rle_lt_trans with (x + h0). unfold Rminus; apply Rplus_le_compat_l; apply Ropp_le_cancel. rewrite Ropp_involutive; apply Rle_trans with (Rabs h0). rewrite <- Rabs_Ropp; apply RRle_abs. apply Rle_trans with del; [ left; assumption | unfold del; apply Rmin_l ]. elim H8; intros; assumption. apply Rplus_lt_reg_r with x; rewrite Rplus_0_r; replace (x + (x1 - x)) with x1; [ elim H8; intros; assumption | ring ]. unfold fct_cte; ring. rewrite RiemannInt_P15. rewrite Rmult_assoc; replace ((x - (x + h0)) * Rabs (/ h0)) with 1. rewrite Rmult_1_r; unfold Rdiv; apply Rmult_lt_reg_l with 2; [ prove_sup0 | rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; pattern eps at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite Rabs_left. replace (x - (x + h0)) with (- h0); [ idtac | ring ]. rewrite Ropp_mult_distr_l_reverse; rewrite Ropp_mult_distr_r_reverse; rewrite Ropp_involutive; apply Rinv_r_sym. assumption. apply Rinv_lt_0_compat. assert (H8 : x + h0 < x). auto with real. apply Rplus_lt_reg_r with x; rewrite Rplus_0_r; assumption. rewrite (RiemannInt_P13 H7 (RiemannInt_P14 x (x + h0) (f x)) (RiemannInt_P10 (-1) H7 (RiemannInt_P14 x (x + h0) (f x)))) . ring. unfold Rdiv, Rminus; rewrite Rmult_plus_distr_r; ring. rewrite RiemannInt_P15; apply Rmult_eq_reg_l with h0; [ unfold Rdiv; rewrite (Rmult_comm h0); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | assumption ] | assumption ]. cut (a <= x + h0). cut (x + h0 <= b). intros; unfold primitive. case (Rle_dec a (x + h0)); case (Rle_dec (x + h0) b); case (Rle_dec a x); case (Rle_dec x b); intros; try (elim n; assumption || left; assumption). rewrite <- (RiemannInt_P26 (FTC_P1 h C0 r0 r) H7 (FTC_P1 h C0 r2 r1)); ring. apply Rplus_le_reg_l with (- x); replace (- x + (x + h0)) with h0; [ idtac | ring ]. rewrite Rplus_comm; apply Rle_trans with (Rabs h0). apply RRle_abs. apply Rle_trans with del; [ left; assumption | unfold del; apply Rle_trans with (Rmin (b - x) (x - a)); [ apply Rmin_r | apply Rmin_l ] ]. apply Ropp_le_cancel; apply Rplus_le_reg_l with x; replace (x + - (x + h0)) with (- h0); [ idtac | ring ]. apply Rle_trans with (Rabs h0); [ rewrite <- Rabs_Ropp; apply RRle_abs | apply Rle_trans with del; [ left; assumption | unfold del; apply Rle_trans with (Rmin (b - x) (x - a)); apply Rmin_r ] ]. Qed. Lemma RiemannInt_P28 : forall (f:R -> R) (a b x:R) (h:a <= b) (C0:forall x:R, a <= x <= b -> continuity_pt f x), a <= x <= b -> derivable_pt_lim (primitive h (FTC_P1 h C0)) x (f x). Proof. intro f; intros; elim h; intro. elim H; clear H; intros; elim H; intro. elim H1; intro. apply RiemannInt_P27; split; assumption. set (f_b := fun x:R => f b * (x - b) + RiemannInt (FTC_P1 h C0 h (Rle_refl b))); rewrite H3. assert (H4 : derivable_pt_lim f_b b (f b)). unfold f_b; pattern (f b) at 2; replace (f b) with (f b + 0). change (derivable_pt_lim ((fct_cte (f b) * (id - fct_cte b))%F + fct_cte (RiemannInt (FTC_P1 h C0 h (Rle_refl b)))) b ( f b + 0)). apply derivable_pt_lim_plus. pattern (f b) at 2; replace (f b) with (0 * (id - fct_cte b)%F b + fct_cte (f b) b * 1). apply derivable_pt_lim_mult. apply derivable_pt_lim_const. replace 1 with (1 - 0); [ idtac | ring ]. apply derivable_pt_lim_minus. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte; ring. apply derivable_pt_lim_const. ring. unfold derivable_pt_lim; intros; elim (H4 _ H5); intros; assert (H7 : continuity_pt f b). apply C0; split; [ left; assumption | right; reflexivity ]. assert (H8 : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H7 _ H8); unfold D_x, no_cond, dist; simpl; unfold R_dist; intros; set (del := Rmin x0 (Rmin x1 (b - a))); assert (H10 : 0 < del). unfold del; unfold Rmin; case (Rle_dec x1 (b - a)); intros. case (Rle_dec x0 x1); intro; [ apply (cond_pos x0) | elim H9; intros; assumption ]. case (Rle_dec x0 (b - a)); intro; [ apply (cond_pos x0) | apply Rlt_Rminus; assumption ]. split with (mkposreal _ H10); intros; case (Rcase_abs h0); intro. assert (H14 : b + h0 < b). pattern b at 2; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. assert (H13 : Riemann_integrable f (b + h0) b). apply continuity_implies_RiemannInt. left; assumption. intros; apply C0; elim H13; intros; split; try assumption. apply Rle_trans with (b + h0); try assumption. apply Rplus_le_reg_l with (- a - h0). replace (- a - h0 + a) with (- h0); [ idtac | ring ]. replace (- a - h0 + (b + h0)) with (b - a); [ idtac | ring ]. apply Rle_trans with del. apply Rle_trans with (Rabs h0). rewrite <- Rabs_Ropp; apply RRle_abs. left; assumption. unfold del; apply Rle_trans with (Rmin x1 (b - a)); apply Rmin_r. replace (primitive h (FTC_P1 h C0) (b + h0) - primitive h (FTC_P1 h C0) b) with (- RiemannInt H13). replace (f b) with (- RiemannInt (RiemannInt_P14 (b + h0) b (f b)) / h0). rewrite <- Rabs_Ropp; unfold Rminus; unfold Rdiv; rewrite Ropp_mult_distr_l_reverse; rewrite Ropp_plus_distr; repeat rewrite Ropp_involutive; replace (RiemannInt H13 * / h0 + - RiemannInt (RiemannInt_P14 (b + h0) b (f b)) * / h0) with ((RiemannInt H13 - RiemannInt (RiemannInt_P14 (b + h0) b (f b))) / h0). replace (RiemannInt H13 - RiemannInt (RiemannInt_P14 (b + h0) b (f b))) with (RiemannInt (RiemannInt_P10 (-1) H13 (RiemannInt_P14 (b + h0) b (f b)))). unfold Rdiv; rewrite Rabs_mult; apply Rle_lt_trans with (RiemannInt (RiemannInt_P16 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 (b + h0) b (f b)))) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply (RiemannInt_P17 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 (b + h0) b (f b))) (RiemannInt_P16 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 (b + h0) b (f b))))); left; assumption. apply Rle_lt_trans with (RiemannInt (RiemannInt_P14 (b + h0) b (eps / 2)) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply RiemannInt_P19. left; assumption. intros; replace (f x2 + -1 * fct_cte (f b) x2) with (f x2 - f b). unfold fct_cte; case (Req_dec b x2); intro. rewrite H16; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; left; assumption. elim H9; intros; left; apply H18. repeat split. assumption. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; rewrite Rabs_right. apply Rplus_lt_reg_r with (x2 - x1); replace (x2 - x1 + (b - x2)) with (b - x1); [ idtac | ring ]. replace (x2 - x1 + x1) with x2; [ idtac | ring ]. apply Rlt_le_trans with (b + h0). 2: elim H15; intros; left; assumption. unfold Rminus; apply Rplus_lt_compat_l; apply Ropp_lt_cancel; rewrite Ropp_involutive; apply Rle_lt_trans with (Rabs h0). rewrite <- Rabs_Ropp; apply RRle_abs. apply Rlt_le_trans with del; [ assumption | unfold del; apply Rle_trans with (Rmin x1 (b - a)); [ apply Rmin_r | apply Rmin_l ] ]. apply Rle_ge; left; apply Rlt_Rminus; elim H15; intros; assumption. unfold fct_cte; ring. rewrite RiemannInt_P15. rewrite Rmult_assoc; replace ((b - (b + h0)) * Rabs (/ h0)) with 1. rewrite Rmult_1_r; unfold Rdiv; apply Rmult_lt_reg_l with 2; [ prove_sup0 | rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; pattern eps at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite Rabs_left. apply Rmult_eq_reg_l with h0; [ do 2 rewrite (Rmult_comm h0); rewrite Rmult_assoc; rewrite Ropp_mult_distr_l_reverse; rewrite <- Rinv_l_sym; [ ring | assumption ] | assumption ]. apply Rinv_lt_0_compat; assumption. rewrite (RiemannInt_P13 H13 (RiemannInt_P14 (b + h0) b (f b)) (RiemannInt_P10 (-1) H13 (RiemannInt_P14 (b + h0) b (f b)))) ; ring. unfold Rdiv, Rminus; rewrite Rmult_plus_distr_r; ring. rewrite RiemannInt_P15. rewrite <- Ropp_mult_distr_l_reverse; apply Rmult_eq_reg_l with h0; [ repeat rewrite (Rmult_comm h0); unfold Rdiv; repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ ring | assumption ] | assumption ]. cut (a <= b + h0). cut (b + h0 <= b). intros; unfold primitive; case (Rle_dec a (b + h0)); case (Rle_dec (b + h0) b); case (Rle_dec a b); case (Rle_dec b b); intros; try (elim n; right; reflexivity) || (elim n; left; assumption). rewrite <- (RiemannInt_P26 (FTC_P1 h C0 r3 r2) H13 (FTC_P1 h C0 r1 r0)); ring. elim n; assumption. left; assumption. apply Rplus_le_reg_l with (- a - h0). replace (- a - h0 + a) with (- h0); [ idtac | ring ]. replace (- a - h0 + (b + h0)) with (b - a); [ idtac | ring ]. apply Rle_trans with del. apply Rle_trans with (Rabs h0). rewrite <- Rabs_Ropp; apply RRle_abs. left; assumption. unfold del; apply Rle_trans with (Rmin x1 (b - a)); apply Rmin_r. cut (primitive h (FTC_P1 h C0) b = f_b b). intro; cut (primitive h (FTC_P1 h C0) (b + h0) = f_b (b + h0)). intro; rewrite H13; rewrite H14; apply H6. assumption. apply Rlt_le_trans with del; [ assumption | unfold del; apply Rmin_l ]. assert (H14 : b < b + h0). pattern b at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l. assert (H14 := Rge_le _ _ r); elim H14; intro. assumption. elim H11; symmetry ; assumption. unfold primitive; case (Rle_dec a (b + h0)); case (Rle_dec (b + h0) b); intros; [ elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 H14)) | unfold f_b; reflexivity | elim n; left; apply Rlt_trans with b; assumption | elim n0; left; apply Rlt_trans with b; assumption ]. unfold f_b; unfold Rminus; rewrite Rplus_opp_r; rewrite Rmult_0_r; rewrite Rplus_0_l; unfold primitive; case (Rle_dec a b); case (Rle_dec b b); intros; [ apply RiemannInt_P5 | elim n; right; reflexivity | elim n; left; assumption | elim n; right; reflexivity ]. (*****) set (f_a := fun x:R => f a * (x - a)); rewrite <- H2; assert (H3 : derivable_pt_lim f_a a (f a)). unfold f_a; change (derivable_pt_lim (fct_cte (f a) * (id - fct_cte a)%F) a (f a)) ; pattern (f a) at 2; replace (f a) with (0 * (id - fct_cte a)%F a + fct_cte (f a) a * 1). apply derivable_pt_lim_mult. apply derivable_pt_lim_const. replace 1 with (1 - 0); [ idtac | ring ]. apply derivable_pt_lim_minus. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte; ring. unfold derivable_pt_lim; intros; elim (H3 _ H4); intros. assert (H6 : continuity_pt f a). apply C0; split; [ right; reflexivity | left; assumption ]. assert (H7 : 0 < eps / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. elim (H6 _ H7); unfold D_x, no_cond, dist; simpl; unfold R_dist; intros. set (del := Rmin x0 (Rmin x1 (b - a))). assert (H9 : 0 < del). unfold del; unfold Rmin. case (Rle_dec x1 (b - a)); intros. case (Rle_dec x0 x1); intro. apply (cond_pos x0). elim H8; intros; assumption. case (Rle_dec x0 (b - a)); intro. apply (cond_pos x0). apply Rlt_Rminus; assumption. split with (mkposreal _ H9). intros; case (Rcase_abs h0); intro. assert (H12 : a + h0 < a). pattern a at 2; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. unfold primitive. case (Rle_dec a (a + h0)); case (Rle_dec (a + h0) b); case (Rle_dec a a); case (Rle_dec a b); intros; try (elim n; left; assumption) || (elim n; right; reflexivity). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r3 H12)). elim n; left; apply Rlt_trans with a; assumption. rewrite RiemannInt_P9; replace 0 with (f_a a). replace (f a * (a + h0 - a)) with (f_a (a + h0)). apply H5; try assumption. apply Rlt_le_trans with del; [ assumption | unfold del; apply Rmin_l ]. unfold f_a; ring. unfold f_a; ring. elim n; left; apply Rlt_trans with a; assumption. assert (H12 : a < a + h0). pattern a at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l. assert (H12 := Rge_le _ _ r); elim H12; intro. assumption. elim H10; symmetry ; assumption. assert (H13 : Riemann_integrable f a (a + h0)). apply continuity_implies_RiemannInt. left; assumption. intros; apply C0; elim H13; intros; split; try assumption. apply Rle_trans with (a + h0); try assumption. apply Rplus_le_reg_l with (- b - h0). replace (- b - h0 + b) with (- h0); [ idtac | ring ]. replace (- b - h0 + (a + h0)) with (a - b); [ idtac | ring ]. apply Ropp_le_cancel; rewrite Ropp_involutive; rewrite Ropp_minus_distr; apply Rle_trans with del. apply Rle_trans with (Rabs h0); [ apply RRle_abs | left; assumption ]. unfold del; apply Rle_trans with (Rmin x1 (b - a)); apply Rmin_r. replace (primitive h (FTC_P1 h C0) (a + h0) - primitive h (FTC_P1 h C0) a) with (RiemannInt H13). replace (f a) with (RiemannInt (RiemannInt_P14 a (a + h0) (f a)) / h0). replace (RiemannInt H13 / h0 - RiemannInt (RiemannInt_P14 a (a + h0) (f a)) / h0) with ((RiemannInt H13 - RiemannInt (RiemannInt_P14 a (a + h0) (f a))) / h0). replace (RiemannInt H13 - RiemannInt (RiemannInt_P14 a (a + h0) (f a))) with (RiemannInt (RiemannInt_P10 (-1) H13 (RiemannInt_P14 a (a + h0) (f a)))). unfold Rdiv; rewrite Rabs_mult; apply Rle_lt_trans with (RiemannInt (RiemannInt_P16 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 a (a + h0) (f a)))) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply (RiemannInt_P17 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 a (a + h0) (f a))) (RiemannInt_P16 (RiemannInt_P10 (-1) H13 (RiemannInt_P14 a (a + h0) (f a))))); left; assumption. apply Rle_lt_trans with (RiemannInt (RiemannInt_P14 a (a + h0) (eps / 2)) * Rabs (/ h0)). do 2 rewrite <- (Rmult_comm (Rabs (/ h0))); apply Rmult_le_compat_l. apply Rabs_pos. apply RiemannInt_P19. left; assumption. intros; replace (f x2 + -1 * fct_cte (f a) x2) with (f x2 - f a). unfold fct_cte; case (Req_dec a x2); intro. rewrite H15; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; left; assumption. elim H8; intros; left; apply H17; repeat split. assumption. rewrite Rabs_right. apply Rplus_lt_reg_r with a; replace (a + (x2 - a)) with x2; [ idtac | ring ]. apply Rlt_le_trans with (a + h0). elim H14; intros; assumption. apply Rplus_le_compat_l; left; apply Rle_lt_trans with (Rabs h0). apply RRle_abs. apply Rlt_le_trans with del; [ assumption | unfold del; apply Rle_trans with (Rmin x1 (b - a)); [ apply Rmin_r | apply Rmin_l ] ]. apply Rle_ge; left; apply Rlt_Rminus; elim H14; intros; assumption. unfold fct_cte; ring. rewrite RiemannInt_P15. rewrite Rmult_assoc; replace ((a + h0 - a) * Rabs (/ h0)) with 1. rewrite Rmult_1_r; unfold Rdiv; apply Rmult_lt_reg_l with 2; [ prove_sup0 | rewrite <- (Rmult_comm (/ 2)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_l; pattern eps at 1; rewrite <- Rplus_0_r; rewrite double; apply Rplus_lt_compat_l; assumption | discrR ] ]. rewrite Rabs_right. rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; rewrite <- Rinv_r_sym; [ reflexivity | assumption ]. apply Rle_ge; left; apply Rinv_0_lt_compat; assert (H14 := Rge_le _ _ r); elim H14; intro. assumption. elim H10; symmetry ; assumption. rewrite (RiemannInt_P13 H13 (RiemannInt_P14 a (a + h0) (f a)) (RiemannInt_P10 (-1) H13 (RiemannInt_P14 a (a + h0) (f a)))) ; ring. unfold Rdiv, Rminus; rewrite Rmult_plus_distr_r; ring. rewrite RiemannInt_P15. rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_r; unfold Rdiv; rewrite Rmult_assoc; rewrite <- Rinv_r_sym; [ ring | assumption ]. cut (a <= a + h0). cut (a + h0 <= b). intros; unfold primitive; case (Rle_dec a (a + h0)); case (Rle_dec (a + h0) b); case (Rle_dec a a); case (Rle_dec a b); intros; try (elim n; right; reflexivity) || (elim n; left; assumption). rewrite RiemannInt_P9; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; apply RiemannInt_P5. elim n; assumption. elim n; assumption. 2: left; assumption. apply Rplus_le_reg_l with (- a); replace (- a + (a + h0)) with h0; [ idtac | ring ]. rewrite Rplus_comm; apply Rle_trans with del; [ apply Rle_trans with (Rabs h0); [ apply RRle_abs | left; assumption ] | unfold del; apply Rle_trans with (Rmin x1 (b - a)); apply Rmin_r ]. (*****) assert (H1 : x = a). rewrite <- H0 in H; elim H; intros; apply Rle_antisym; assumption. set (f_a := fun x:R => f a * (x - a)). assert (H2 : derivable_pt_lim f_a a (f a)). unfold f_a; change (derivable_pt_lim (fct_cte (f a) * (id - fct_cte a)%F) a (f a)) ; pattern (f a) at 2; replace (f a) with (0 * (id - fct_cte a)%F a + fct_cte (f a) a * 1). apply derivable_pt_lim_mult. apply derivable_pt_lim_const. replace 1 with (1 - 0); [ idtac | ring ]. apply derivable_pt_lim_minus. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte; ring. set (f_b := fun x:R => f b * (x - b) + RiemannInt (FTC_P1 h C0 h (Rle_refl b))). assert (H3 : derivable_pt_lim f_b b (f b)). unfold f_b; pattern (f b) at 2; replace (f b) with (f b + 0). change (derivable_pt_lim ((fct_cte (f b) * (id - fct_cte b))%F + fct_cte (RiemannInt (FTC_P1 h C0 h (Rle_refl b)))) b ( f b + 0)). apply derivable_pt_lim_plus. pattern (f b) at 2; replace (f b) with (0 * (id - fct_cte b)%F b + fct_cte (f b) b * 1). apply derivable_pt_lim_mult. apply derivable_pt_lim_const. replace 1 with (1 - 0); [ idtac | ring ]. apply derivable_pt_lim_minus. apply derivable_pt_lim_id. apply derivable_pt_lim_const. unfold fct_cte; ring. apply derivable_pt_lim_const. ring. unfold derivable_pt_lim; intros; elim (H2 _ H4); intros; elim (H3 _ H4); intros; set (del := Rmin x0 x1). assert (H7 : 0 < del). unfold del; unfold Rmin; case (Rle_dec x0 x1); intro. apply (cond_pos x0). apply (cond_pos x1). split with (mkposreal _ H7); intros; case (Rcase_abs h0); intro. assert (H10 : a + h0 < a). pattern a at 2; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; assumption. rewrite H1; unfold primitive; case (Rle_dec a (a + h0)); case (Rle_dec (a + h0) b); case (Rle_dec a a); case (Rle_dec a b); intros; try (elim n; right; assumption || reflexivity). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r3 H10)). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r2 H10)). rewrite RiemannInt_P9; replace 0 with (f_a a). replace (f a * (a + h0 - a)) with (f_a (a + h0)). apply H5; try assumption. apply Rlt_le_trans with del; try assumption. unfold del; apply Rmin_l. unfold f_a; ring. unfold f_a; ring. elim n; rewrite <- H0; left; assumption. assert (H10 : a < a + h0). pattern a at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l. assert (H10 := Rge_le _ _ r); elim H10; intro. assumption. elim H8; symmetry ; assumption. rewrite H0 in H1; rewrite H1; unfold primitive; case (Rle_dec a (b + h0)); case (Rle_dec (b + h0) b); case (Rle_dec a b); case (Rle_dec b b); intros; try (elim n; right; assumption || reflexivity). rewrite H0 in H10; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r2 H10)). repeat rewrite RiemannInt_P9. replace (RiemannInt (FTC_P1 h C0 r1 r0)) with (f_b b). fold (f_b (b + h0)). apply H6; try assumption. apply Rlt_le_trans with del; try assumption. unfold del; apply Rmin_r. unfold f_b; unfold Rminus; rewrite Rplus_opp_r; rewrite Rmult_0_r; rewrite Rplus_0_l; apply RiemannInt_P5. elim n; rewrite <- H0; left; assumption. elim n0; rewrite <- H0; left; assumption. Qed. Lemma RiemannInt_P29 : forall (f:R -> R) a b (h:a <= b) (C0:forall x:R, a <= x <= b -> continuity_pt f x), antiderivative f (primitive h (FTC_P1 h C0)) a b. Proof. intro f; intros; unfold antiderivative; split; try assumption; intros; assert (H0 := RiemannInt_P28 h C0 H); assert (H1 : derivable_pt (primitive h (FTC_P1 h C0)) x); [ unfold derivable_pt; split with (f x); apply H0 | split with H1; symmetry ; apply derive_pt_eq_0; apply H0 ]. Qed. Lemma RiemannInt_P30 : forall (f:R -> R) (a b:R), a <= b -> (forall x:R, a <= x <= b -> continuity_pt f x) -> { g:R -> R | antiderivative f g a b }. Proof. intros; split with (primitive H (FTC_P1 H H0)); apply RiemannInt_P29. Qed. Record C1_fun : Type := mkC1 {c1 :> R -> R; diff0 : derivable c1; cont1 : continuity (derive c1 diff0)}. Lemma RiemannInt_P31 : forall (f:C1_fun) (a b:R), a <= b -> antiderivative (derive f (diff0 f)) f a b. Proof. intro f; intros; unfold antiderivative; split; try assumption; intros; split with (diff0 f x); reflexivity. Qed. Lemma RiemannInt_P32 : forall (f:C1_fun) (a b:R), Riemann_integrable (derive f (diff0 f)) a b. Proof. intro f; intros; case (Rle_dec a b); intro; [ apply continuity_implies_RiemannInt; try assumption; intros; apply (cont1 f) | assert (H : b <= a); [ auto with real | apply RiemannInt_P1; apply continuity_implies_RiemannInt; try assumption; intros; apply (cont1 f) ] ]. Qed. Lemma RiemannInt_P33 : forall (f:C1_fun) (a b:R) (pr:Riemann_integrable (derive f (diff0 f)) a b), a <= b -> RiemannInt pr = f b - f a. Proof. intro f; intros; assert (H0 : forall x:R, a <= x <= b -> continuity_pt (derive f (diff0 f)) x). intros; apply (cont1 f). rewrite (RiemannInt_P20 H (FTC_P1 H H0) pr); assert (H1 := RiemannInt_P29 H H0); assert (H2 := RiemannInt_P31 f H); elim (antiderivative_Ucte (derive f (diff0 f)) _ _ _ _ H1 H2); intros C H3; repeat rewrite H3; [ ring | split; [ right; reflexivity | assumption ] | split; [ assumption | right; reflexivity ] ]. Qed. Lemma FTC_Riemann : forall (f:C1_fun) (a b:R) (pr:Riemann_integrable (derive f (diff0 f)) a b), RiemannInt pr = f b - f a. Proof. intro f; intros; case (Rle_dec a b); intro; [ apply RiemannInt_P33; assumption | assert (H : b <= a); [ auto with real | assert (H0 := RiemannInt_P1 pr); rewrite (RiemannInt_P8 pr H0); rewrite (RiemannInt_P33 _ H0 H); ring ] ]. Qed. coq-8.4pl2/theories/Reals/Ratan.v0000640000175000001440000016766011776416536016013 0ustar notinusersRequire Import Fourier. Require Import Rbase. Require Import PSeries_reg. Require Import Rtrigo1. Require Import Ranalysis_reg. Require Import Rfunctions. Require Import AltSeries. Require Import Rseries. Require Import SeqProp. Require Import Ranalysis5. Require Import SeqSeries. Require Import PartSum. Local Open Scope R_scope. (** Tools *) Lemma Ropp_div : forall x y, -x/y = -(x/y). Proof. intros x y; unfold Rdiv; rewrite <-Ropp_mult_distr_l_reverse; reflexivity. Qed. Definition pos_half_prf : 0 < /2. Proof. fourier. Qed. Definition pos_half := mkposreal (/2) pos_half_prf. Lemma Boule_half_to_interval : forall x , Boule (/2) pos_half x -> 0 <= x <= 1. Proof. unfold Boule, pos_half; simpl. intros x b; apply Rabs_def2 in b; destruct b; split; fourier. Qed. Lemma Boule_lt : forall c r x, Boule c r x -> Rabs x < Rabs c + r. Proof. unfold Boule; intros c r x h. apply Rabs_def2 in h; destruct h; apply Rabs_def1; (destruct (Rle_lt_dec 0 c);[rewrite Rabs_pos_eq; fourier | rewrite <- Rabs_Ropp, Rabs_pos_eq; fourier]). Qed. (* The following lemma does not belong here. *) Lemma Un_cv_ext : forall un vn, (forall n, un n = vn n) -> forall l, Un_cv un l -> Un_cv vn l. Proof. intros un vn quv l P eps ep; destruct (P eps ep) as [N Pn]; exists N. intro n; rewrite <- quv; apply Pn. Qed. (* The following two lemmas are general purposes about alternated series. They do not belong here. *) Lemma Alt_first_term_bound :forall f l N n, Un_decreasing f -> Un_cv f 0 -> Un_cv (sum_f_R0 (tg_alt f)) l -> (N <= n)%nat -> R_dist (sum_f_R0 (tg_alt f) n) l <= f N. Proof. intros f l. assert (WLOG : forall n P, (forall k, (0 < k)%nat -> P k) -> ((forall k, (0 < k)%nat -> P k) -> P 0%nat) -> P n). clear. intros [ | n] P Hs Ho;[solve[apply Ho, Hs] | apply Hs; auto with arith]. intros N; pattern N; apply WLOG; clear N. intros [ | N] Npos n decr to0 cv nN. clear -Npos; elimtype False; omega. assert (decr' : Un_decreasing (fun i => f (S N + i)%nat)). intros k; replace (S N+S k)%nat with (S (S N+k)) by ring. apply (decr (S N + k)%nat). assert (to' : Un_cv (fun i => f (S N + i)%nat) 0). intros eps ep; destruct (to0 eps ep) as [M PM]. exists M; intros k kM; apply PM; omega. assert (cv' : Un_cv (sum_f_R0 (tg_alt (fun i => ((-1) ^ S N * f(S N + i)%nat)))) (l - sum_f_R0 (tg_alt f) N)). intros eps ep; destruct (cv eps ep) as [M PM]; exists M. intros n' nM. match goal with |- ?C => set (U := C) end. assert (nM' : (n' + S N >= M)%nat) by omega. generalize (PM _ nM'); unfold R_dist. rewrite (tech2 (tg_alt f) N (n' + S N)). assert (t : forall a b c, (a + b) - c = b - (c - a)) by (intros; ring). rewrite t; clear t; unfold U, R_dist; clear U. replace (n' + S N - S N)%nat with n' by omega. rewrite <- (sum_eq (tg_alt (fun i => (-1) ^ S N * f(S N + i)%nat))). tauto. intros i _; unfold tg_alt. rewrite <- Rmult_assoc, <- pow_add, !(plus_comm i); reflexivity. omega. assert (cv'' : Un_cv (sum_f_R0 (tg_alt (fun i => f (S N + i)%nat))) ((-1) ^ S N * (l - sum_f_R0 (tg_alt f) N))). apply (Un_cv_ext (fun n => (-1) ^ S N * sum_f_R0 (tg_alt (fun i : nat => (-1) ^ S N * f (S N + i)%nat)) n)). intros n0; rewrite scal_sum; apply sum_eq; intros i _. unfold tg_alt; ring_simplify; replace (((-1) ^ S N) ^ 2) with 1. ring. rewrite <- pow_mult, mult_comm, pow_mult; replace ((-1) ^2) with 1 by ring. rewrite pow1; reflexivity. apply CV_mult. solve[intros eps ep; exists 0%nat; intros; rewrite R_dist_eq; auto]. assumption. destruct (even_odd_cor N) as [p [Neven | Nodd]]. rewrite Neven; destruct (alternated_series_ineq _ _ p decr to0 cv) as [B C]. case (even_odd_cor n) as [p' [neven | nodd]]. rewrite neven. destruct (alternated_series_ineq _ _ p' decr to0 cv) as [D E]. unfold R_dist; rewrite Rabs_pos_eq;[ | fourier]. assert (dist : (p <= p')%nat) by omega. assert (t := decreasing_prop _ _ _ (CV_ALT_step1 f decr) dist). apply Rle_trans with (sum_f_R0 (tg_alt f) (2 * p) - l). unfold Rminus; apply Rplus_le_compat_r; exact t. match goal with _ : ?a <= l, _ : l <= ?b |- _ => replace (f (S (2 * p))) with (b - a) by (rewrite tech5; unfold tg_alt; rewrite pow_1_odd; ring); fourier end. rewrite nodd; destruct (alternated_series_ineq _ _ p' decr to0 cv) as [D E]. unfold R_dist; rewrite <- Rabs_Ropp, Rabs_pos_eq, Ropp_minus_distr; [ | fourier]. assert (dist : (p <= p')%nat) by omega. apply Rle_trans with (l - sum_f_R0 (tg_alt f) (S (2 * p))). unfold Rminus; apply Rplus_le_compat_l, Ropp_le_contravar. solve[apply Rge_le, (growing_prop _ _ _ (CV_ALT_step0 f decr) dist)]. unfold Rminus; rewrite tech5, Ropp_plus_distr, <- Rplus_assoc. unfold tg_alt at 2; rewrite pow_1_odd, Ropp_mult_distr_l_reverse; fourier. rewrite Nodd; destruct (alternated_series_ineq _ _ p decr to0 cv) as [B _]. destruct (alternated_series_ineq _ _ (S p) decr to0 cv) as [_ C]. assert (keep : (2 * S p = S (S ( 2 * p)))%nat) by ring. case (even_odd_cor n) as [p' [neven | nodd]]. rewrite neven; destruct (alternated_series_ineq _ _ p' decr to0 cv) as [D E]. unfold R_dist; rewrite Rabs_pos_eq;[ | fourier]. assert (dist : (S p < S p')%nat) by omega. apply Rle_trans with (sum_f_R0 (tg_alt f) (2 * S p) - l). unfold Rminus; apply Rplus_le_compat_r, (decreasing_prop _ _ _ (CV_ALT_step1 f decr)). omega. rewrite keep, tech5; unfold tg_alt at 2; rewrite <- keep, pow_1_even. fourier. rewrite nodd; destruct (alternated_series_ineq _ _ p' decr to0 cv) as [D E]. unfold R_dist; rewrite <- Rabs_Ropp, Rabs_pos_eq;[ | fourier]. rewrite Ropp_minus_distr. apply Rle_trans with (l - sum_f_R0 (tg_alt f) (S (2 * p))). unfold Rminus; apply Rplus_le_compat_l, Ropp_le_contravar, Rge_le, (growing_prop _ _ _ (CV_ALT_step0 f decr)); omega. generalize C; rewrite keep, tech5; unfold tg_alt. rewrite <- keep, pow_1_even. assert (t : forall a b c, a <= b + 1 * c -> a - b <= c) by (intros; fourier). solve[apply t]. clear WLOG; intros Hyp [ | n] decr to0 cv _. generalize (alternated_series_ineq f l 0 decr to0 cv). unfold R_dist, tg_alt; simpl; rewrite !Rmult_1_l, !Rmult_1_r. assert (f 1%nat <= f 0%nat) by apply decr. rewrite Ropp_mult_distr_l_reverse. intros [A B]; rewrite Rabs_pos_eq; fourier. apply Rle_trans with (f 1%nat). apply (Hyp 1%nat (le_n 1) (S n) decr to0 cv). omega. solve[apply decr]. Qed. Lemma Alt_CVU : forall (f : nat -> R -> R) g h c r, (forall x, Boule c r x ->Un_decreasing (fun n => f n x)) -> (forall x, Boule c r x -> Un_cv (fun n => f n x) 0) -> (forall x, Boule c r x -> Un_cv (sum_f_R0 (tg_alt (fun i => f i x))) (g x)) -> (forall x n, Boule c r x -> f n x <= h n) -> (Un_cv h 0) -> CVU (fun N x => sum_f_R0 (tg_alt (fun i => f i x)) N) g c r. Proof. intros f g h c r decr to0 to_g bound bound0 eps ep. assert (ep' : 0 f i y) (g y) n n); auto]. apply Rle_lt_trans with (h n). apply bound; assumption. clear - nN Pn. generalize (Pn _ nN); unfold R_dist; rewrite Rminus_0_r; intros t. apply Rabs_def2 in t; tauto. Qed. (* The following lemmas are general purpose lemmas about squares. They do not belong here *) Lemma pow2_ge_0 : forall x, 0 <= x ^ 2. Proof. intros x; destruct (Rle_lt_dec 0 x). replace (x ^ 2) with (x * x) by field. apply Rmult_le_pos; assumption. replace (x ^ 2) with ((-x) * (-x)) by field. apply Rmult_le_pos; fourier. Qed. Lemma pow2_abs : forall x, Rabs x ^ 2 = x ^ 2. Proof. intros x; destruct (Rle_lt_dec 0 x). rewrite Rabs_pos_eq;[field | assumption]. rewrite <- Rabs_Ropp, Rabs_pos_eq;[field | fourier]. Qed. (** * Properties of tangent *) Lemma derivable_pt_tan : forall x, -PI/2 < x < PI/2 -> derivable_pt tan x. Proof. intros x xint. unfold derivable_pt, tan. apply derivable_pt_div ; [reg | reg | ]. apply Rgt_not_eq. unfold Rgt ; apply cos_gt_0; [unfold Rdiv; rewrite <- Ropp_mult_distr_l_reverse; fold (-PI/2) |];tauto. Qed. Lemma derive_pt_tan : forall (x:R), forall (Pr1: -PI/2 < x < PI/2), derive_pt tan x (derivable_pt_tan x Pr1) = 1 + (tan x)^2. Proof. intros x pr. assert (cos x <> 0). apply Rgt_not_eq, cos_gt_0; rewrite <- ?Ropp_div; tauto. unfold tan; reg; unfold pow, Rsqr; field; assumption. Qed. (** Proof that tangent is a bijection *) (* to be removed? *) Lemma derive_increasing_interv : forall (a b:R) (f:R -> R), a < b -> forall (pr:forall x, a < x < b -> derivable_pt f x), (forall t:R, forall (t_encad : a < t < b), 0 < derive_pt f t (pr t t_encad)) -> forall x y:R, a < x < b -> a < y < b -> x < y -> f x < f y. Proof. intros a b f a_lt_b pr Df_gt_0 x y x_encad y_encad x_lt_y. assert (derivable_id_interv : forall c : R, x < c < y -> derivable_pt id c). intros ; apply derivable_pt_id. assert (derivable_f_interv : forall c : R, x < c < y -> derivable_pt f c). intros c c_encad. apply pr. split. apply Rlt_trans with (r2:=x) ; [exact (proj1 x_encad) | exact (proj1 c_encad)]. apply Rlt_trans with (r2:=y) ; [exact (proj2 c_encad) | exact (proj2 y_encad)]. assert (f_cont_interv : forall c : R, x <= c <= y -> continuity_pt f c). intros c c_encad; apply derivable_continuous_pt ; apply pr. split. apply Rlt_le_trans with (r2:=x) ; [exact (proj1 x_encad) | exact (proj1 c_encad)]. apply Rle_lt_trans with (r2:=y) ; [ exact (proj2 c_encad) | exact (proj2 y_encad)]. assert (id_cont_interv : forall c : R, x <= c <= y -> continuity_pt id c). intros ; apply derivable_continuous_pt ; apply derivable_pt_id. elim (MVT f id x y derivable_f_interv derivable_id_interv x_lt_y f_cont_interv id_cont_interv). intros c Temp ; elim Temp ; clear Temp ; intros Pr eq. replace (id y - id x) with (y - x) in eq by intuition. replace (derive_pt id c (derivable_id_interv c Pr)) with 1 in eq. assert (Hyp : f y - f x > 0). rewrite Rmult_1_r in eq. rewrite <- eq. apply Rmult_gt_0_compat. apply Rgt_minus ; assumption. assert (c_encad2 : a <= c < b). split. apply Rlt_le ; apply Rlt_trans with (r2:=x) ; [exact (proj1 x_encad) | exact (proj1 Pr)]. apply Rle_lt_trans with (r2:=y) ; [apply Rlt_le ; exact (proj2 Pr) | exact (proj2 y_encad)]. assert (c_encad : a < c < b). split. apply Rlt_trans with (r2:=x) ; [exact (proj1 x_encad) | exact (proj1 Pr)]. apply Rle_lt_trans with (r2:=y) ; [apply Rlt_le ; exact (proj2 Pr) | exact (proj2 y_encad)]. assert (Temp := Df_gt_0 c c_encad). assert (Temp2 := pr_nu f c (derivable_f_interv c Pr) (pr c c_encad)). rewrite Temp2 ; apply Temp. apply Rminus_gt ; exact Hyp. symmetry ; rewrite derive_pt_eq ; apply derivable_pt_lim_id. Qed. (* begin hide *) Lemma plus_Rsqr_gt_0 : forall x, 1 + x ^ 2 > 0. Proof. intro m. replace 0 with (0+0) by intuition. apply Rplus_gt_ge_compat. intuition. elim (total_order_T m 0) ; intro s'. case s'. intros m_cond. replace 0 with (0*0) by intuition. replace (m ^ 2) with ((-m)^2). apply Rle_ge ; apply Rmult_le_compat ; intuition ; apply Rlt_le ; rewrite Rmult_1_r ; intuition. field. intro H' ; rewrite H' ; right ; field. left. intuition. Qed. (* end hide *) (* The following lemmas about PI should probably be in Rtrigo. *) Lemma PI2_lower_bound : forall x, 0 < x < 2 -> 0 < cos x -> x < PI/2. Proof. intros x [xp xlt2] cx. destruct (Rtotal_order x (PI/2)) as [xltpi2 | [xeqpi2 | xgtpi2]]. assumption. now case (Rgt_not_eq _ _ cx); rewrite xeqpi2, cos_PI2. destruct (MVT_cor1 cos (PI/2) x derivable_cos xgtpi2) as [c [Pc [cint1 cint2]]]. revert Pc; rewrite cos_PI2, Rminus_0_r. rewrite <- (pr_nu cos c (derivable_pt_cos c)), derive_pt_cos. assert (0 < c < 2) by (split; assert (t := PI2_RGT_0); fourier). assert (0 < sin c) by now apply sin_pos_tech. intros Pc. case (Rlt_not_le _ _ cx). rewrite <- (Rplus_0_l (cos x)), Pc, Ropp_mult_distr_l_reverse. apply Rle_minus, Rmult_le_pos;[apply Rlt_le; assumption | fourier ]. Qed. Lemma PI2_3_2 : 3/2 < PI/2. Proof. apply PI2_lower_bound;[split; fourier | ]. destruct (pre_cos_bound (3/2) 1) as [t _]; [fourier | fourier | ]. apply Rlt_le_trans with (2 := t); clear t. unfold cos_approx; simpl; unfold cos_term. simpl mult; replace ((-1)^ 0) with 1 by ring; replace ((-1)^2) with 1 by ring; replace ((-1)^4) with 1 by ring; replace ((-1)^1) with (-1) by ring; replace ((-1)^3) with (-1) by ring; replace 3 with (IZR 3) by (simpl; ring); replace 2 with (IZR 2) by (simpl; ring); simpl Z.of_nat; rewrite !INR_IZR_INZ, Ropp_mult_distr_l_reverse, Rmult_1_l. match goal with |- _ < ?a => replace a with ((- IZR 3 ^ 6 * IZR (Z.of_nat (fact 0)) * IZR (Z.of_nat (fact 2)) * IZR (Z.of_nat (fact 4)) + IZR 3 ^ 4 * IZR 2 ^ 2 * IZR (Z.of_nat (fact 0)) * IZR (Z.of_nat (fact 2)) * IZR (Z.of_nat (fact 6)) - IZR 3 ^ 2 * IZR 2 ^ 4 * IZR (Z.of_nat (fact 0)) * IZR (Z.of_nat (fact 4)) * IZR (Z.of_nat (fact 6)) + IZR 2 ^ 6 * IZR (Z.of_nat (fact 2)) * IZR (Z.of_nat (fact 4)) * IZR (Z.of_nat (fact 6))) / (IZR 2 ^ 6 * IZR (Z.of_nat (fact 0)) * IZR (Z.of_nat (fact 2)) * IZR (Z.of_nat (fact 4)) * IZR (Z.of_nat (fact 6))));[ | field; repeat apply conj;((rewrite <- INR_IZR_INZ; apply INR_fact_neq_0) || (apply Rgt_not_eq; apply (IZR_lt 0); reflexivity)) ] end. rewrite !fact_simpl, !Nat2Z.inj_mul; simpl Z.of_nat. unfold Rdiv; apply Rmult_lt_0_compat. unfold Rminus; rewrite !pow_IZR, <- !opp_IZR, <- !mult_IZR, <- !opp_IZR, <- !plus_IZR; apply (IZR_lt 0); reflexivity. apply Rinv_0_lt_compat; rewrite !pow_IZR, <- !mult_IZR; apply (IZR_lt 0). reflexivity. Qed. Lemma PI2_1 : 1 < PI/2. Proof. assert (t := PI2_3_2); fourier. Qed. Lemma tan_increasing : forall x y:R, -PI/2 < x -> x < y -> y < PI/2 -> tan x < tan y. Proof. intros x y Z_le_x x_lt_y y_le_1. assert (x_encad : -PI/2 < x < PI/2). split ; [assumption | apply Rlt_trans with (r2:=y) ; assumption]. assert (y_encad : -PI/2 < y < PI/2). split ; [apply Rlt_trans with (r2:=x) ; intuition | intuition ]. assert (local_derivable_pt_tan : forall x : R, -PI/2 < x < PI/2 -> derivable_pt tan x). intros ; apply derivable_pt_tan ; intuition. apply derive_increasing_interv with (a:=-PI/2) (b:=PI/2) (pr:=local_derivable_pt_tan) ; intuition. fourier. assert (Temp := pr_nu tan t (derivable_pt_tan t t_encad) (local_derivable_pt_tan t t_encad)) ; rewrite <- Temp ; clear Temp. assert (Temp := derive_pt_tan t t_encad) ; rewrite Temp ; clear Temp. apply plus_Rsqr_gt_0. Qed. Lemma tan_is_inj : forall x y, -PI/2 < x < PI/2 -> -PI/2 < y < PI/2 -> tan x = tan y -> x = y. Proof. intros a b a_encad b_encad fa_eq_fb. case(total_order_T a b). intro s ; case s ; clear s. intro Hf. assert (Hfalse := tan_increasing a b (proj1 a_encad) Hf (proj2 b_encad)). case (Rlt_not_eq (tan a) (tan b)) ; assumption. intuition. intro Hf. assert (Hfalse := tan_increasing b a (proj1 b_encad) Hf (proj2 a_encad)). case (Rlt_not_eq (tan b) (tan a)) ; [|symmetry] ; assumption. Qed. Lemma exists_atan_in_frame : forall lb ub y, lb < ub -> -PI/2 < lb -> ub < PI/2 -> tan lb < y < tan ub -> {x | lb < x < ub /\ tan x = y}. Proof. intros lb ub y lb_lt_ub lb_cond ub_cond y_encad. case y_encad ; intros y_encad1 y_encad2. assert (f_cont : forall a : R, lb <= a <= ub -> continuity_pt tan a). intros a a_encad. apply derivable_continuous_pt ; apply derivable_pt_tan. split. apply Rlt_le_trans with (r2:=lb) ; intuition. apply Rle_lt_trans with (r2:=ub) ; intuition. assert (Cont : forall a : R, lb <= a <= ub -> continuity_pt (fun x => tan x - y) a). intros a a_encad. unfold continuity_pt, continue_in, limit1_in, limit_in ; simpl ; unfold R_dist. intros eps eps_pos. elim (f_cont a a_encad eps eps_pos). intros alpha alpha_pos. destruct alpha_pos as (alpha_pos,Temp). exists alpha. split. assumption. intros x x_cond. replace (tan x - y - (tan a - y)) with (tan x - tan a) by field. exact (Temp x x_cond). assert (H1 : (fun x : R => tan x - y) lb < 0). apply Rlt_minus. assumption. assert (H2 : 0 < (fun x : R => tan x - y) ub). apply Rgt_minus. assumption. destruct (IVT_interv (fun x => tan x - y) lb ub Cont lb_lt_ub H1 H2) as (x,Hx). exists x. destruct Hx as (Hyp,Result). intuition. assert (Temp2 : x <> lb). intro Hfalse. rewrite Hfalse in Result. assert (Temp2 : y <> tan lb). apply Rgt_not_eq ; assumption. clear - Temp2 Result. apply Temp2. intuition. clear -Temp2 H3. case H3 ; intuition. apply False_ind ; apply Temp2 ; symmetry ; assumption. assert (Temp : x <> ub). intro Hfalse. rewrite Hfalse in Result. assert (Temp2 : y <> tan ub). apply Rlt_not_eq ; assumption. clear - Temp2 Result. apply Temp2. intuition. case H4 ; intuition. Qed. (** * Definition of arctangent as the reciprocal function of tangent and proof of this status *) Lemma tan_1_gt_1 : tan 1 > 1. Proof. assert (0 < cos 1) by (apply cos_gt_0; assert (t:=PI2_1); fourier). assert (t1 : cos 1 <= 1 - 1/2 + 1/24). destruct (pre_cos_bound 1 0) as [_ t]; try fourier; revert t. unfold cos_approx, cos_term; simpl; intros t; apply Rle_trans with (1:=t). clear t; apply Req_le; field. assert (t2 : 1 - 1/6 <= sin 1). destruct (pre_sin_bound 1 0) as [t _]; try fourier; revert t. unfold sin_approx, sin_term; simpl; intros t; apply Rle_trans with (2:=t). clear t; apply Req_le; field. pattern 1 at 2; replace 1 with (cos 1 / cos 1) by (field; apply Rgt_not_eq; fourier). apply Rlt_gt; apply (Rmult_lt_compat_r (/ cos 1) (cos 1) (sin 1)). apply Rinv_0_lt_compat; assumption. apply Rle_lt_trans with (1 := t1); apply Rlt_le_trans with (2 := t2). fourier. Qed. Definition frame_tan y : {x | 0 < x < PI/2 /\ Rabs y < tan x}. destruct (total_order_T (Rabs y) 1). assert (yle1 : Rabs y <= 1) by (destruct s; fourier). clear s; exists 1; split;[split; [exact Rlt_0_1 | exact PI2_1] | ]. apply Rle_lt_trans with (1 := yle1); exact tan_1_gt_1. assert (0 < / (Rabs y + 1)). apply Rinv_0_lt_compat; fourier. set (u := /2 * / (Rabs y + 1)). assert (0 < u). apply Rmult_lt_0_compat; [fourier | assumption]. assert (vlt1 : / (Rabs y + 1) < 1). apply Rmult_lt_reg_r with (Rabs y + 1). assert (t := Rabs_pos y); fourier. rewrite Rinv_l; [rewrite Rmult_1_l | apply Rgt_not_eq]; fourier. assert (vlt2 : u < 1). apply Rlt_trans with (/ (Rabs y + 1)). rewrite double_var. assert (t : forall x, 0 < x -> x < x + x) by (clear; intros; fourier). unfold u; rewrite Rmult_comm; apply t. unfold Rdiv; rewrite Rmult_comm; assumption. assumption. assert(int : 0 < PI / 2 - u < PI / 2). split. assert (t := PI2_1); apply Rlt_Rminus, Rlt_trans with (2 := t); assumption. assert (dumb : forall x y, 0 < y -> x - y < x) by (clear; intros; fourier). apply dumb; clear dumb; assumption. exists (PI/2 - u). assert (tmp : forall x y, 0 < x -> y < 1 -> x * y < x). clear; intros x y x0 y1; pattern x at 2; rewrite <- (Rmult_1_r x). apply Rmult_lt_compat_l; assumption. assert (0 < sin u). apply sin_gt_0;[ assumption | ]. assert (t := PI2_Rlt_PI); assert (t' := PI2_1). apply Rlt_trans with (2 := Rlt_trans _ _ _ t' t); assumption. split. assumption. apply Rlt_trans with (/2 * / cos(PI / 2 - u)). rewrite cos_shift. assert (sin u < u). assert (t1 : 0 <= u) by (apply Rlt_le; assumption). assert (t2 : u <= 4) by (apply Rle_trans with 1;[apply Rlt_le | fourier]; assumption). destruct (pre_sin_bound u 0 t1 t2) as [_ t]. apply Rle_lt_trans with (1 := t); clear t1 t2 t. unfold sin_approx; simpl; unfold sin_term; simpl ((-1) ^ 0); replace ((-1) ^ 2) with 1 by ring; simpl ((-1) ^ 1); rewrite !Rmult_1_r, !Rmult_1_l; simpl plus; simpl (INR (fact 1)). rewrite <- (fun x => tech_pow_Rmult x 0), <- (fun x => tech_pow_Rmult x 2), <- (fun x => tech_pow_Rmult x 4). rewrite (Rmult_comm (-1)); simpl ((/(Rabs y + 1)) ^ 0). unfold Rdiv; rewrite Rinv_1, !Rmult_assoc, <- !Rmult_plus_distr_l. apply tmp;[assumption | ]. rewrite Rplus_assoc, Rmult_1_l; pattern 1 at 3; rewrite <- Rplus_0_r. apply Rplus_lt_compat_l. rewrite <- Rmult_assoc. match goal with |- (?a * (-1)) + _ < 0 => rewrite <- (Rplus_opp_l a), Ropp_mult_distr_r_reverse, Rmult_1_r end. apply Rplus_lt_compat_l. assert (0 < u ^ 2) by (apply pow_lt; assumption). replace (u ^ 4) with (u ^ 2 * u ^ 2) by ring. rewrite Rmult_assoc; apply Rmult_lt_compat_l; auto. apply Rlt_trans with (u ^ 2 * /INR (fact 3)). apply Rmult_lt_compat_l; auto. apply Rinv_lt_contravar. solve[apply Rmult_lt_0_compat; apply INR_fact_lt_0]. rewrite !INR_IZR_INZ; apply IZR_lt; reflexivity. rewrite Rmult_comm; apply tmp. solve[apply Rinv_0_lt_compat, INR_fact_lt_0]. apply Rlt_trans with (2 := vlt2). simpl; unfold u; apply tmp; auto; rewrite Rmult_1_r; assumption. apply Rlt_trans with (Rabs y + 1);[fourier | ]. pattern (Rabs y + 1) at 1; rewrite <- (Rinv_involutive (Rabs y + 1)); [ | apply Rgt_not_eq; fourier]. rewrite <- Rinv_mult_distr. apply Rinv_lt_contravar. apply Rmult_lt_0_compat. apply Rmult_lt_0_compat;[fourier | assumption]. assumption. replace (/(Rabs y + 1)) with (2 * u). fourier. unfold u; field; apply Rgt_not_eq; clear -r; fourier. solve[discrR]. apply Rgt_not_eq; assumption. unfold tan. set (u' := PI / 2); unfold Rdiv; apply Rmult_lt_compat_r; unfold u'. apply Rinv_0_lt_compat. rewrite cos_shift; assumption. assert (vlt3 : u < /4). replace (/4) with (/2 * /2) by field. unfold u; apply Rmult_lt_compat_l;[fourier | ]. apply Rinv_lt_contravar. apply Rmult_lt_0_compat; fourier. fourier. assert (1 < PI / 2 - u) by (assert (t := PI2_3_2); fourier). apply Rlt_trans with (sin 1). assert (t' : 1 <= 4) by fourier. destruct (pre_sin_bound 1 0 (Rlt_le _ _ Rlt_0_1) t') as [t _]. apply Rlt_le_trans with (2 := t); clear t. simpl plus; replace (sin_approx 1 1) with (5/6);[fourier | ]. unfold sin_approx, sin_term; simpl; field. apply sin_increasing_1. assert (t := PI2_1); fourier. apply Rlt_le, PI2_1. assert (t := PI2_1); fourier. fourier. assumption. Qed. Lemma ub_opp : forall x, x < PI/2 -> -PI/2 < -x. Proof. intros x h; rewrite Ropp_div; apply Ropp_lt_contravar; assumption. Qed. Lemma pos_opp_lt : forall x, 0 < x -> -x < x. Proof. intros; fourier. Qed. Lemma tech_opp_tan : forall x y, -tan x < y -> tan (-x) < y. intros; rewrite tan_neg; assumption. Qed. Definition pre_atan (y : R) : {x : R | -PI/2 < x < PI/2 /\ tan x = y}. destruct (frame_tan y) as [ub [[ub0 ubpi2] Ptan_ub]]. set (pr := (conj (tech_opp_tan _ _ (proj2 (Rabs_def2 _ _ Ptan_ub))) (proj1 (Rabs_def2 _ _ Ptan_ub)))). destruct (exists_atan_in_frame (-ub) ub y (pos_opp_lt _ ub0) (ub_opp _ ubpi2) ubpi2 pr) as [v [[vl vu] vq]]. exists v; clear pr. split;[rewrite Ropp_div; split; fourier | assumption]. Qed. Definition atan x := let (v, _) := pre_atan x in v. Lemma atan_bound : forall x, -PI/2 < atan x < PI/2. Proof. intros x; unfold atan; destruct (pre_atan x) as [v [int _]]; exact int. Qed. Lemma atan_right_inv : forall x, tan (atan x) = x. Proof. intros x; unfold atan; destruct (pre_atan x) as [v [_ q]]; exact q. Qed. Lemma atan_opp : forall x, atan (- x) = - atan x. Proof. intros x; generalize (atan_bound (-x)); rewrite Ropp_div;intros [a b]. generalize (atan_bound x); rewrite Ropp_div; intros [c d]. apply tan_is_inj; try rewrite Ropp_div; try split; try fourier. rewrite tan_neg, !atan_right_inv; reflexivity. Qed. Lemma derivable_pt_atan : forall x, derivable_pt atan x. Proof. intros x. destruct (frame_tan x) as [ub [[ub0 ubpi] P]]. assert (lb_lt_ub : -ub < ub) by apply pos_opp_lt, ub0. assert (xint : tan(-ub) < x < tan ub). assert (xint' : x < tan ub /\ -(tan ub) < x) by apply Rabs_def2, P. rewrite tan_neg; tauto. assert (inv_p : forall x, tan(-ub) <= x -> x <= tan ub -> comp tan atan x = id x). intros; apply atan_right_inv. assert (int_tan : forall y, tan (- ub) <= y -> y <= tan ub -> -ub <= atan y <= ub). clear -ub0 ubpi; intros y lo up; split. destruct (Rle_lt_dec (-ub) (atan y)) as [h | abs]; auto. assert (y < tan (-ub)). rewrite <- (atan_right_inv y); apply tan_increasing. destruct (atan_bound y); assumption. assumption. fourier. fourier. destruct (Rle_lt_dec (atan y) ub) as [h | abs]; auto. assert (tan ub < y). rewrite <- (atan_right_inv y); apply tan_increasing. rewrite Ropp_div; fourier. assumption. destruct (atan_bound y); assumption. fourier. assert (incr : forall x y, -ub <= x -> x < y -> y <= ub -> tan x < tan y). intros y z l yz u; apply tan_increasing. rewrite Ropp_div; fourier. assumption. fourier. assert (der : forall a, -ub <= a <= ub -> derivable_pt tan a). intros a [la ua]; apply derivable_pt_tan. rewrite Ropp_div; split; fourier. assert (df_neq : derive_pt tan (atan x) (derivable_pt_recip_interv_prelim1 tan atan (- ub) ub x lb_lt_ub xint inv_p int_tan incr der) <> 0). rewrite <- (pr_nu tan (atan x) (derivable_pt_tan (atan x) (atan_bound x))). rewrite derive_pt_tan. solve[apply Rgt_not_eq, plus_Rsqr_gt_0]. apply (derivable_pt_recip_interv tan atan (-ub) ub x lb_lt_ub xint inv_p int_tan incr der). exact df_neq. Qed. Lemma atan_increasing : forall x y, x < y -> atan x < atan y. intros x y d. assert (t1 := atan_bound x). assert (t2 := atan_bound y). destruct (Rlt_le_dec (atan x) (atan y)) as [lt | bad]. assumption. apply Rlt_not_le in d. case d. rewrite <- (atan_right_inv y), <- (atan_right_inv x). destruct bad as [ylt | yx]. apply Rlt_le, tan_increasing; try tauto. solve[rewrite yx; apply Rle_refl]. Qed. Lemma atan_0 : atan 0 = 0. apply tan_is_inj; try (apply atan_bound). assert (t := PI_RGT_0); rewrite Ropp_div; split; fourier. rewrite atan_right_inv, tan_0. reflexivity. Qed. Lemma atan_1 : atan 1 = PI/4. assert (ut := PI_RGT_0). assert (-PI/2 < PI/4 < PI/2) by (rewrite Ropp_div; split; fourier). assert (t := atan_bound 1). apply tan_is_inj; auto. rewrite tan_PI4, atan_right_inv; reflexivity. Qed. (** atan's derivative value is the function 1 / (1+x²) *) Lemma derive_pt_atan : forall x, derive_pt atan x (derivable_pt_atan x) = 1 / (1 + x²). Proof. intros x. destruct (frame_tan x) as [ub [[ub0 ubpi] Pub]]. assert (lb_lt_ub : -ub < ub) by apply pos_opp_lt, ub0. assert (xint : tan(-ub) < x < tan ub). assert (xint' : x < tan ub /\ -(tan ub) < x) by apply Rabs_def2, Pub. rewrite tan_neg; tauto. assert (inv_p : forall x, tan(-ub) <= x -> x <= tan ub -> comp tan atan x = id x). intros; apply atan_right_inv. assert (int_tan : forall y, tan (- ub) <= y -> y <= tan ub -> -ub <= atan y <= ub). clear -ub0 ubpi; intros y lo up; split. destruct (Rle_lt_dec (-ub) (atan y)) as [h | abs]; auto. assert (y < tan (-ub)). rewrite <- (atan_right_inv y); apply tan_increasing. destruct (atan_bound y); assumption. assumption. fourier. fourier. destruct (Rle_lt_dec (atan y) ub) as [h | abs]; auto. assert (tan ub < y). rewrite <- (atan_right_inv y); apply tan_increasing. rewrite Ropp_div; fourier. assumption. destruct (atan_bound y); assumption. fourier. assert (incr : forall x y, -ub <= x -> x < y -> y <= ub -> tan x < tan y). intros y z l yz u; apply tan_increasing. rewrite Ropp_div; fourier. assumption. fourier. assert (der : forall a, -ub <= a <= ub -> derivable_pt tan a). intros a [la ua]; apply derivable_pt_tan. rewrite Ropp_div; split; fourier. assert (df_neq : derive_pt tan (atan x) (derivable_pt_recip_interv_prelim1 tan atan (- ub) ub x lb_lt_ub xint inv_p int_tan incr der) <> 0). rewrite <- (pr_nu tan (atan x) (derivable_pt_tan (atan x) (atan_bound x))). rewrite derive_pt_tan. solve[apply Rgt_not_eq, plus_Rsqr_gt_0]. assert (t := derive_pt_recip_interv tan atan (-ub) ub x lb_lt_ub xint incr int_tan der inv_p df_neq). rewrite <- (pr_nu atan x (derivable_pt_recip_interv tan atan (- ub) ub x lb_lt_ub xint inv_p int_tan incr der df_neq)). rewrite t. assert (t' := atan_bound x). rewrite <- (pr_nu tan (atan x) (derivable_pt_tan _ t')). rewrite derive_pt_tan, atan_right_inv. replace (Rsqr x) with (x ^ 2) by (unfold Rsqr; ring). reflexivity. Qed. (** * Definition of the arctangent function as the sum of the arctan power series *) (* Proof taken from Guillaume Melquiond's interval package for Coq *) Definition Ratan_seq x := fun n => (x ^ (2 * n + 1) / INR (2 * n + 1))%R. Lemma Ratan_seq_decreasing : forall x, (0 <= x <= 1)%R -> Un_decreasing (Ratan_seq x). Proof. intros x Hx n. unfold Ratan_seq, Rdiv. apply Rmult_le_compat. apply pow_le. exact (proj1 Hx). apply Rlt_le. apply Rinv_0_lt_compat. apply lt_INR_0. omega. destruct (proj1 Hx) as [Hx1|Hx1]. destruct (proj2 Hx) as [Hx2|Hx2]. (* . 0 < x < 1 *) rewrite <- (Rinv_involutive x). assert (/ x <> 0)%R by auto with real. repeat rewrite <- Rinv_pow with (1 := H). apply Rlt_le. apply Rinv_lt_contravar. apply Rmult_lt_0_compat ; apply pow_lt ; auto with real. apply Rlt_pow. rewrite <- Rinv_1. apply Rinv_lt_contravar. rewrite Rmult_1_r. exact Hx1. exact Hx2. omega. apply Rgt_not_eq. exact Hx1. (* . x = 1 *) rewrite Hx2. do 2 rewrite pow1. apply Rle_refl. (* . x = 0 *) rewrite <- Hx1. do 2 (rewrite pow_i ; [ idtac | omega ]). apply Rle_refl. apply Rlt_le. apply Rinv_lt_contravar. apply Rmult_lt_0_compat ; apply lt_INR_0 ; omega. apply lt_INR. omega. Qed. Lemma Ratan_seq_converging : forall x, (0 <= x <= 1)%R -> Un_cv (Ratan_seq x) 0. Proof. intros x Hx eps Heps. destruct (archimed (/ eps)) as (HN,_). assert (0 < up (/ eps))%Z. apply lt_IZR. apply Rlt_trans with (2 := HN). apply Rinv_0_lt_compat. exact Heps. case_eq (up (/ eps)) ; intros ; rewrite H0 in H ; try discriminate H. rewrite H0 in HN. simpl in HN. pose (N := Pos.to_nat p). fold N in HN. clear H H0. exists N. intros n Hn. unfold R_dist. rewrite Rminus_0_r. unfold Ratan_seq. rewrite Rabs_right. apply Rle_lt_trans with (1 ^ (2 * n + 1) / INR (2 * n + 1))%R. unfold Rdiv. apply Rmult_le_compat_r. apply Rlt_le. apply Rinv_0_lt_compat. apply lt_INR_0. omega. apply pow_incr. exact Hx. rewrite pow1. apply Rle_lt_trans with (/ INR (2 * N + 1))%R. unfold Rdiv. rewrite Rmult_1_l. apply Rle_Rinv. apply lt_INR_0. omega. replace 0 with (INR 0) by intuition. apply lt_INR. omega. intuition. rewrite <- (Rinv_involutive eps). apply Rinv_lt_contravar. apply Rmult_lt_0_compat. auto with real. apply lt_INR_0. omega. apply Rlt_trans with (INR N). destruct (archimed (/ eps)) as (H,_). assert (0 < up (/ eps))%Z. apply lt_IZR. apply Rlt_trans with (2 := H). apply Rinv_0_lt_compat. exact Heps. exact HN. apply lt_INR. omega. apply Rgt_not_eq. exact Heps. apply Rle_ge. unfold Rdiv. apply Rmult_le_pos. apply pow_le. exact (proj1 Hx). apply Rlt_le. apply Rinv_0_lt_compat. apply lt_INR_0. omega. Qed. Definition ps_atan_exists_01 (x : R) (Hx:0 <= x <= 1) : {l : R | Un_cv (fun N : nat => sum_f_R0 (tg_alt (Ratan_seq x)) N) l}. exact (alternated_series (Ratan_seq x) (Ratan_seq_decreasing _ Hx) (Ratan_seq_converging _ Hx)). Defined. Lemma Ratan_seq_opp : forall x n, Ratan_seq (-x) n = -Ratan_seq x n. Proof. intros x n; unfold Ratan_seq. rewrite !pow_add, !pow_mult, !pow_1. unfold Rdiv; replace ((-x) ^ 2) with (x ^ 2) by ring; ring. Qed. Lemma sum_Ratan_seq_opp : forall x n, sum_f_R0 (tg_alt (Ratan_seq (- x))) n = - sum_f_R0 (tg_alt (Ratan_seq x)) n. Proof. intros x n; replace (-sum_f_R0 (tg_alt (Ratan_seq x)) n) with (-1 * sum_f_R0 (tg_alt (Ratan_seq x)) n) by ring. rewrite scal_sum; apply sum_eq; intros i _; unfold tg_alt. rewrite Ratan_seq_opp; ring. Qed. Definition ps_atan_exists_1 (x : R) (Hx : -1 <= x <= 1) : {l : R | Un_cv (fun N : nat => sum_f_R0 (tg_alt (Ratan_seq x)) N) l}. destruct (Rle_lt_dec 0 x). assert (pr : 0 <= x <= 1) by tauto. exact (ps_atan_exists_01 x pr). assert (pr : 0 <= -x <= 1) by (destruct Hx; split; fourier). destruct (ps_atan_exists_01 _ pr) as [v Pv]. exists (-v). apply (Un_cv_ext (fun n => (- 1) * sum_f_R0 (tg_alt (Ratan_seq (- x))) n)). intros n; rewrite sum_Ratan_seq_opp; ring. replace (-v) with (-1 * v) by ring. apply CV_mult;[ | assumption]. solve[intros; exists 0%nat; intros; rewrite R_dist_eq; auto]. Qed. Definition in_int (x : R) : {-1 <= x <= 1}+{~ -1 <= x <= 1}. destruct (Rle_lt_dec x 1). destruct (Rle_lt_dec (-1) x). left;split; auto. right;intros [a1 a2]; fourier. right;intros [a1 a2]; fourier. Qed. Definition ps_atan (x : R) : R := match in_int x with left h => let (v, _) := ps_atan_exists_1 x h in v | right h => atan x end. (** * Proof of the equivalence of the two definitions between -1 and 1 *) Lemma ps_atan0_0 : ps_atan 0 = 0. Proof. unfold ps_atan. destruct (in_int 0) as [h1 | h2]. destruct (ps_atan_exists_1 0 h1) as [v P]. apply (UL_sequence _ _ _ P). apply (Un_cv_ext (fun n => 0)). symmetry;apply sum_eq_R0. intros i _; unfold tg_alt, Ratan_seq; rewrite plus_comm; simpl. unfold Rdiv; rewrite !Rmult_0_l, Rmult_0_r; reflexivity. intros eps ep; exists 0%nat; intros n _; unfold R_dist. rewrite Rminus_0_r, Rabs_pos_eq; auto with real. case h2; split; fourier. Qed. Lemma ps_atan_exists_1_opp : forall x h h', proj1_sig (ps_atan_exists_1 (-x) h) = -(proj1_sig (ps_atan_exists_1 x h')). Proof. intros x h h'; destruct (ps_atan_exists_1 (-x) h) as [v Pv]. destruct (ps_atan_exists_1 x h') as [u Pu]; simpl. assert (Pu' : Un_cv (fun N => (-1) * sum_f_R0 (tg_alt (Ratan_seq x)) N) (-1 * u)). apply CV_mult;[ | assumption]. intros eps ep; exists 0%nat; intros; rewrite R_dist_eq; assumption. assert (Pv' : Un_cv (fun N : nat => -1 * sum_f_R0 (tg_alt (Ratan_seq x)) N) v). apply Un_cv_ext with (2 := Pv); intros n; rewrite sum_Ratan_seq_opp; ring. replace (-u) with (-1 * u) by ring. apply UL_sequence with (1:=Pv') (2:= Pu'). Qed. Lemma ps_atan_opp : forall x, ps_atan (-x) = -ps_atan x. Proof. intros x; unfold ps_atan. destruct (in_int (- x)) as [inside | outside]. destruct (in_int x) as [ins' | outs']. generalize (ps_atan_exists_1_opp x inside ins'). intros h; exact h. destruct inside; case outs'; split; fourier. destruct (in_int x) as [ins' | outs']. destruct outside; case ins'; split; fourier. apply atan_opp. Qed. (** atan = ps_atan *) Lemma ps_atanSeq_continuity_pt_1 : forall (N:nat) (x:R), 0 <= x -> x <= 1 -> continuity_pt (fun x => sum_f_R0 (tg_alt (Ratan_seq x)) N) x. Proof. assert (Sublemma : forall (x:R) (N:nat), sum_f_R0 (tg_alt (Ratan_seq x)) N = x * (comp (fun x => sum_f_R0 (fun n => (fun i : nat => (-1) ^ i / INR (2 * i + 1)) n * x ^ n) N) (fun x => x ^ 2) x)). intros x N. induction N. unfold tg_alt, Ratan_seq, comp ; simpl ; field. simpl sum_f_R0 at 1. rewrite IHN. replace (comp (fun x => sum_f_R0 (fun n : nat => (-1) ^ n / INR (2 * n + 1) * x ^ n) (S N)) (fun x => x ^ 2)) with (comp (fun x => sum_f_R0 (fun n : nat => (-1) ^ n / INR (2 * n + 1) * x ^ n) N + (-1) ^ (S N) / INR (2 * (S N) + 1) * x ^ (S N)) (fun x => x ^ 2)). unfold comp. rewrite Rmult_plus_distr_l. apply Rplus_eq_compat_l. unfold tg_alt, Ratan_seq. rewrite <- Rmult_assoc. case (Req_dec x 0) ; intro Hyp. rewrite Hyp ; rewrite pow_i. rewrite Rmult_0_l ; rewrite Rmult_0_l. unfold Rdiv ; rewrite Rmult_0_l ; rewrite Rmult_0_r ; reflexivity. intuition. replace (x * ((-1) ^ S N / INR (2 * S N + 1)) * (x ^ 2) ^ S N) with (x ^ (2 * S N + 1) * ((-1) ^ S N / INR (2 * S N + 1))). rewrite Rmult_comm ; unfold Rdiv at 1. rewrite Rmult_assoc ; apply Rmult_eq_compat_l. field. apply Rgt_not_eq ; intuition. rewrite Rmult_assoc. replace (x * ((-1) ^ S N / INR (2 * S N + 1) * (x ^ 2) ^ S N)) with (((-1) ^ S N / INR (2 * S N + 1) * (x ^ 2) ^ S N) * x). rewrite Rmult_assoc. replace ((x ^ 2) ^ S N * x) with (x ^ (2 * S N + 1)). rewrite Rmult_comm at 1 ; reflexivity. rewrite <- pow_mult. assert (Temp : forall x n, x ^ n * x = x ^ (n+1)). intros a n ; induction n. rewrite pow_O. simpl ; intuition. simpl ; rewrite Rmult_assoc ; rewrite IHn ; intuition. rewrite Temp ; reflexivity. rewrite Rmult_comm ; reflexivity. intuition. intros N x x_lb x_ub. intros eps eps_pos. assert (continuity_id : continuity id). apply derivable_continuous ; exact derivable_id. assert (Temp := continuity_mult id (comp (fun x1 : R => sum_f_R0 (fun n : nat => (-1) ^ n / INR (2 * n + 1) * x1 ^ n) N) (fun x1 : R => x1 ^ 2)) continuity_id). assert (Temp2 : continuity (comp (fun x1 : R => sum_f_R0 (fun n : nat => (-1) ^ n / INR (2 * n + 1) * x1 ^ n) N) (fun x1 : R => x1 ^ 2))). apply continuity_comp. reg. apply continuity_finite_sum. elim (Temp Temp2 x eps eps_pos) ; clear Temp Temp2 ; intros alpha T ; destruct T as (alpha_pos, T). exists alpha ; split. intuition. intros x0 x0_cond. rewrite Sublemma ; rewrite Sublemma. apply T. intuition. Qed. (** Definition of ps_atan's derivative *) Definition Datan_seq := fun (x:R) (n:nat) => x ^ (2*n). Lemma pow_lt_1_compat : forall x n, 0 <= x < 1 -> (0 < n)%nat -> 0 <= x ^ n < 1. Proof. intros x n hx; induction 1; simpl. rewrite Rmult_1_r; tauto. split. apply Rmult_le_pos; tauto. rewrite <- (Rmult_1_r 1); apply Rmult_le_0_lt_compat; intuition. Qed. Lemma Datan_seq_Rabs : forall x n, Datan_seq (Rabs x) n = Datan_seq x n. Proof. intros x n; unfold Datan_seq; rewrite !pow_mult, pow2_abs; reflexivity. Qed. Lemma Datan_seq_pos : forall x n, 0 < x -> 0 < Datan_seq x n. Proof. intros x n x_lb ; unfold Datan_seq ; induction n. simpl ; intuition. replace (x ^ (2 * S n)) with ((x ^ 2) * (x ^ (2 * n))). apply Rmult_gt_0_compat. replace (x^2) with (x*x) by field ; apply Rmult_gt_0_compat ; assumption. assumption. replace (2 * S n)%nat with (S (S (2 * n))) by intuition. simpl ; field. Qed. Lemma Datan_sum_eq :forall x n, sum_f_R0 (tg_alt (Datan_seq x)) n = (1 - (- x ^ 2) ^ S n)/(1 + x ^ 2). Proof. intros x n. assert (dif : - x ^ 2 <> 1). apply Rlt_not_eq; apply Rle_lt_trans with 0;[ | apply Rlt_0_1]. assert (t := pow2_ge_0 x); fourier. replace (1 + x ^ 2) with (1 - - (x ^ 2)) by ring; rewrite <- (tech3 _ n dif). apply sum_eq; unfold tg_alt, Datan_seq; intros i _. rewrite pow_mult, <- Rpow_mult_distr, Ropp_mult_distr_l_reverse, Rmult_1_l. reflexivity. Qed. Lemma Datan_seq_increasing : forall x y n, (n > 0)%nat -> 0 <= x < y -> Datan_seq x n < Datan_seq y n. Proof. intros x y n n_lb x_encad ; assert (x_pos : x >= 0) by intuition. assert (y_pos : y > 0). apply Rle_lt_trans with (r2:=x) ; intuition. induction n. apply False_ind ; intuition. clear -x_encad x_pos y_pos ; induction n ; unfold Datan_seq. case x_pos ; clear x_pos ; intro x_pos. simpl ; apply Rmult_gt_0_lt_compat ; intuition. fourier. rewrite x_pos ; rewrite pow_i. replace (y ^ (2*1)) with (y*y). apply Rmult_gt_0_compat ; assumption. simpl ; field. intuition. assert (Hrew : forall a, a^(2 * S (S n)) = (a ^ 2) * (a ^ (2 * S n))). clear ; intro a ; replace (2 * S (S n))%nat with (S (S (2 * S n)))%nat by intuition. simpl ; field. case x_pos ; clear x_pos ; intro x_pos. rewrite Hrew ; rewrite Hrew. apply Rmult_gt_0_lt_compat ; intuition. apply Rmult_gt_0_lt_compat ; intuition ; fourier. rewrite x_pos. rewrite pow_i ; intuition. Qed. Lemma Datan_seq_decreasing : forall x, -1 < x -> x < 1 -> Un_decreasing (Datan_seq x). Proof. intros x x_lb x_ub n. unfold Datan_seq. replace (2 * S n)%nat with (2 + 2 * n)%nat by ring. rewrite <- (Rmult_1_l (x ^ (2 * n))). rewrite pow_add. apply Rmult_le_compat_r. rewrite pow_mult; apply pow_le, pow2_ge_0. apply Rlt_le; rewrite <- pow2_abs. assert (intabs : 0 <= Rabs x < 1). split;[apply Rabs_pos | apply Rabs_def1]; tauto. apply (pow_lt_1_compat (Rabs x) 2) in intabs. tauto. omega. Qed. Lemma Datan_seq_CV_0 : forall x, -1 < x -> x < 1 -> Un_cv (Datan_seq x) 0. Proof. intros x x_lb x_ub eps eps_pos. assert (x_ub2 : Rabs (x^2) < 1). rewrite Rabs_pos_eq;[ | apply pow2_ge_0]. rewrite <- pow2_abs. assert (H: 0 <= Rabs x < 1) by (split;[apply Rabs_pos | apply Rabs_def1; auto]). apply (pow_lt_1_compat _ 2) in H;[tauto | omega]. elim (pow_lt_1_zero (x^2) x_ub2 eps eps_pos) ; intros N HN ; exists N ; intros n Hn. unfold R_dist, Datan_seq. replace (x ^ (2 * n) - 0) with ((x ^ 2) ^ n). apply HN ; assumption. rewrite pow_mult ; field. Qed. Lemma Datan_lim : forall x, -1 < x -> x < 1 -> Un_cv (fun N : nat => sum_f_R0 (tg_alt (Datan_seq x)) N) (/ (1 + x ^ 2)). Proof. intros x x_lb x_ub eps eps_pos. assert (Tool0 : 0 <= x ^ 2) by apply pow2_ge_0. assert (Tool1 : 0 < (1 + x ^ 2)). solve[apply Rplus_lt_le_0_compat ; intuition]. assert (Tool2 : / (1 + x ^ 2) > 0). apply Rinv_0_lt_compat ; tauto. assert (x_ub2' : 0<= Rabs (x^2) < 1). rewrite Rabs_pos_eq, <- pow2_abs;[ | apply pow2_ge_0]. apply pow_lt_1_compat;[split;[apply Rabs_pos | ] | omega]. apply Rabs_def1; assumption. assert (x_ub2 : Rabs (x^2) < 1) by tauto. assert (eps'_pos : ((1+x^2)*eps) > 0). apply Rmult_gt_0_compat ; assumption. elim (pow_lt_1_zero _ x_ub2 _ eps'_pos) ; intros N HN ; exists N. intros n Hn. assert (H1 : - x^2 <> 1). apply Rlt_not_eq; apply Rle_lt_trans with (2 := Rlt_0_1). assert (t := pow2_ge_0 x); fourier. rewrite Datan_sum_eq. unfold R_dist. assert (tool : forall a b, a / b - /b = (-1 + a) /b). intros a b; rewrite <- (Rmult_1_l (/b)); unfold Rdiv, Rminus. rewrite <- Ropp_mult_distr_l_reverse, Rmult_plus_distr_r, Rplus_comm. reflexivity. set (u := 1 + x ^ 2); rewrite tool; unfold Rminus; rewrite <- Rplus_assoc. unfold Rdiv, u. rewrite Rplus_opp_l, Rplus_0_l, Ropp_mult_distr_l_reverse, Rabs_Ropp. rewrite Rabs_mult; clear tool u. assert (tool : forall k, Rabs ((-x ^ 2) ^ k) = Rabs ((x ^ 2) ^ k)). clear -Tool0; induction k;[simpl; rewrite Rabs_R1;tauto | ]. rewrite <- !(tech_pow_Rmult _ k), !Rabs_mult, Rabs_Ropp, IHk, Rabs_pos_eq. reflexivity. exact Tool0. rewrite tool, (Rabs_pos_eq (/ _)); clear tool;[ | apply Rlt_le; assumption]. assert (tool : forall a b c, 0 < b -> a < b * c -> a * / b < c). intros a b c bp h; replace c with (b * c * /b). apply Rmult_lt_compat_r. apply Rinv_0_lt_compat; assumption. assumption. field; apply Rgt_not_eq; exact bp. apply tool;[exact Tool1 | ]. apply HN; omega. Qed. Lemma Datan_CVU_prelim : forall c (r : posreal), Rabs c + r < 1 -> CVU (fun N x => sum_f_R0 (tg_alt (Datan_seq x)) N) (fun y : R => / (1 + y ^ 2)) c r. Proof. intros c r ub_ub eps eps_pos. apply (Alt_CVU (fun x n => Datan_seq n x) (fun x => /(1 + x ^ 2)) (Datan_seq (Rabs c + r)) c r). intros x inb; apply Datan_seq_decreasing; try (apply Boule_lt in inb; apply Rabs_def2 in inb; destruct inb; fourier). intros x inb; apply Datan_seq_CV_0; try (apply Boule_lt in inb; apply Rabs_def2 in inb; destruct inb; fourier). intros x inb; apply (Datan_lim x); try (apply Boule_lt in inb; apply Rabs_def2 in inb; destruct inb; fourier). intros x [ | n] inb. solve[unfold Datan_seq; apply Rle_refl]. rewrite <- (Datan_seq_Rabs x); apply Rlt_le, Datan_seq_increasing. omega. apply Boule_lt in inb; intuition. solve[apply Rabs_pos]. apply Datan_seq_CV_0. apply Rlt_trans with 0;[fourier | ]. apply Rplus_le_lt_0_compat. solve[apply Rabs_pos]. destruct r; assumption. assumption. assumption. Qed. Lemma Datan_is_datan : forall (N:nat) (x:R), -1 <= x -> x < 1 -> derivable_pt_lim (fun x => sum_f_R0 (tg_alt (Ratan_seq x)) N) x (sum_f_R0 (tg_alt (Datan_seq x)) N). Proof. assert (Tool : forall N, (-1) ^ (S (2 * N)) = - 1). intro n ; induction n. simpl ; field. replace ((-1) ^ S (2 * S n)) with ((-1) ^ 2 * (-1) ^ S (2*n)). rewrite IHn ; field. rewrite <- pow_add. replace (2 + S (2 * n))%nat with (S (2 * S n))%nat. reflexivity. intuition. intros N x x_lb x_ub. induction N. unfold Datan_seq, Ratan_seq, tg_alt ; simpl. intros eps eps_pos. elim (derivable_pt_lim_id x eps eps_pos) ; intros delta Hdelta ; exists delta. intros h hneq h_b. replace (1 * ((x + h) * 1 / 1) - 1 * (x * 1 / 1)) with (id (x + h) - id x). rewrite Rmult_1_r. apply Hdelta ; assumption. unfold id ; field ; assumption. intros eps eps_pos. assert (eps_3_pos : (eps/3) > 0) by fourier. elim (IHN (eps/3) eps_3_pos) ; intros delta1 Hdelta1. assert (Main : derivable_pt_lim (fun x : R =>tg_alt (Ratan_seq x) (S N)) x ((tg_alt (Datan_seq x)) (S N))). clear -Tool ; intros eps' eps'_pos. elim (derivable_pt_lim_pow x (2 * (S N) + 1) eps' eps'_pos) ; intros delta Hdelta ; exists delta. intros h h_neq h_b ; unfold tg_alt, Ratan_seq, Datan_seq. replace (((-1) ^ S N * ((x + h) ^ (2 * S N + 1) / INR (2 * S N + 1)) - (-1) ^ S N * (x ^ (2 * S N + 1) / INR (2 * S N + 1))) / h - (-1) ^ S N * x ^ (2 * S N)) with (((-1)^(S N)) * ((((x + h) ^ (2 * S N + 1) / INR (2 * S N + 1)) - (x ^ (2 * S N + 1) / INR (2 * S N + 1))) / h - x ^ (2 * S N))). rewrite Rabs_mult ; rewrite pow_1_abs ; rewrite Rmult_1_l. replace (((x + h) ^ (2 * S N + 1) / INR (2 * S N + 1) - x ^ (2 * S N + 1) / INR (2 * S N + 1)) / h - x ^ (2 * S N)) with ((/INR (2* S N + 1)) * (((x + h) ^ (2 * S N + 1) - x ^ (2 * S N + 1)) / h - INR (2 * S N + 1) * x ^ pred (2 * S N + 1))). rewrite Rabs_mult. case (Req_dec (((x + h) ^ (2 * S N + 1) - x ^ (2 * S N + 1)) / h - INR (2 * S N + 1) * x ^ pred (2 * S N + 1)) 0) ; intro Heq. rewrite Heq ; rewrite Rabs_R0 ; rewrite Rmult_0_r ; assumption. apply Rlt_trans with (r2:=Rabs (((x + h) ^ (2 * S N + 1) - x ^ (2 * S N + 1)) / h - INR (2 * S N + 1) * x ^ pred (2 * S N + 1))). rewrite <- Rmult_1_l ; apply Rmult_lt_compat_r. apply Rabs_pos_lt ; assumption. rewrite Rabs_right. replace 1 with (/1) by field. apply Rinv_1_lt_contravar ; intuition. apply Rgt_ge ; replace (INR (2 * S N + 1)) with (INR (2*S N) + 1) ; [apply RiemannInt.RinvN_pos | ]. replace (2 * S N + 1)%nat with (S (2 * S N))%nat by intuition ; rewrite S_INR ; reflexivity. apply Hdelta ; assumption. rewrite Rmult_minus_distr_l. replace (/ INR (2 * S N + 1) * (INR (2 * S N + 1) * x ^ pred (2 * S N + 1))) with (x ^ (2 * S N)). unfold Rminus ; rewrite Rplus_comm. replace (((x + h) ^ (2 * S N + 1) / INR (2 * S N + 1) + - (x ^ (2 * S N + 1) / INR (2 * S N + 1))) / h + - x ^ (2 * S N)) with (- x ^ (2 * S N) + (((x + h) ^ (2 * S N + 1) / INR (2 * S N + 1) + - (x ^ (2 * S N + 1) / INR (2 * S N + 1))) / h)) by intuition. apply Rplus_eq_compat_l. field. split ; [apply Rgt_not_eq|] ; intuition. clear ; replace (pred (2 * S N + 1)) with (2 * S N)%nat by intuition. field ; apply Rgt_not_eq ; intuition. field ; split ; [apply Rgt_not_eq |] ; intuition. elim (Main (eps/3) eps_3_pos) ; intros delta2 Hdelta2. destruct delta1 as (delta1, delta1_pos) ; destruct delta2 as (delta2, delta2_pos). pose (mydelta := Rmin delta1 delta2). assert (mydelta_pos : mydelta > 0). unfold mydelta ; rewrite Rmin_Rgt ; split ; assumption. pose (delta := mkposreal mydelta mydelta_pos) ; exists delta ; intros h h_neq h_b. clear Main IHN. unfold Rminus at 1. apply Rle_lt_trans with (r2:=eps/3 + eps / 3). assert (Temp : (sum_f_R0 (tg_alt (Ratan_seq (x + h))) (S N) - sum_f_R0 (tg_alt (Ratan_seq x)) (S N)) / h + - sum_f_R0 (tg_alt (Datan_seq x)) (S N) = ((sum_f_R0 (tg_alt (Ratan_seq (x + h))) N - sum_f_R0 (tg_alt (Ratan_seq x)) N) / h) + (- sum_f_R0 (tg_alt (Datan_seq x)) N) + ((tg_alt (Ratan_seq (x + h)) (S N) - tg_alt (Ratan_seq x) (S N)) / h - tg_alt (Datan_seq x) (S N))). simpl ; field ; intuition. apply Rle_trans with (r2:= Rabs ((sum_f_R0 (tg_alt (Ratan_seq (x + h))) N - sum_f_R0 (tg_alt (Ratan_seq x)) N) / h + - sum_f_R0 (tg_alt (Datan_seq x)) N) + Rabs ((tg_alt (Ratan_seq (x + h)) (S N) - tg_alt (Ratan_seq x) (S N)) / h - tg_alt (Datan_seq x) (S N))). rewrite Temp ; clear Temp ; apply Rabs_triang. apply Rplus_le_compat ; apply Rlt_le ; [apply Hdelta1 | apply Hdelta2] ; intuition ; apply Rlt_le_trans with (r2:=delta) ; intuition unfold delta, mydelta. apply Rmin_l. apply Rmin_r. fourier. Qed. Lemma Ratan_CVU' : CVU (fun N x => sum_f_R0 (tg_alt (Ratan_seq x)) N) ps_atan (/2) (mkposreal (/2) pos_half_prf). Proof. apply (Alt_CVU (fun i r => Ratan_seq r i) ps_atan PI_tg (/2) pos_half); lazy beta. now intros; apply Ratan_seq_decreasing, Boule_half_to_interval. now intros; apply Ratan_seq_converging, Boule_half_to_interval. intros x b; apply Boule_half_to_interval in b. unfold ps_atan; destruct (in_int x) as [inside | outside]; [ | destruct b; case outside; split; fourier]. destruct (ps_atan_exists_1 x inside) as [v Pv]. apply Un_cv_ext with (2 := Pv);[reflexivity]. intros x n b; apply Boule_half_to_interval in b. rewrite <- (Rmult_1_l (PI_tg n)); unfold Ratan_seq, PI_tg. apply Rmult_le_compat_r. apply Rlt_le, Rinv_0_lt_compat, (lt_INR 0); omega. rewrite <- (pow1 (2 * n + 1)); apply pow_incr; assumption. exact PI_tg_cv. Qed. Lemma Ratan_CVU : CVU (fun N x => sum_f_R0 (tg_alt (Ratan_seq x)) N) ps_atan 0 (mkposreal 1 Rlt_0_1). Proof. intros eps ep; destruct (Ratan_CVU' eps ep) as [N Pn]. exists N; intros n x nN b_y. case (Rtotal_order 0 x) as [xgt0 | [x0 | x0]]. assert (Boule (/2) {| pos := / 2; cond_pos := pos_half_prf|} x). revert b_y; unfold Boule; simpl; intros b_y; apply Rabs_def2 in b_y. destruct b_y; unfold Boule; simpl; apply Rabs_def1; fourier. apply Pn; assumption. rewrite <- x0, ps_atan0_0. rewrite <- (sum_eq (fun _ => 0)), sum_cte, Rmult_0_l, Rminus_0_r, Rabs_pos_eq. assumption. apply Rle_refl. intros i _; unfold tg_alt, Ratan_seq, Rdiv; rewrite plus_comm; simpl. solve[rewrite !Rmult_0_l, Rmult_0_r; auto]. replace (ps_atan x - sum_f_R0 (tg_alt (Ratan_seq x)) n) with (-(ps_atan (-x) - sum_f_R0 (tg_alt (Ratan_seq (-x))) n)). rewrite Rabs_Ropp. assert (Boule (/2) {| pos := / 2; cond_pos := pos_half_prf|} (-x)). revert b_y; unfold Boule; simpl; intros b_y; apply Rabs_def2 in b_y. destruct b_y; unfold Boule; simpl; apply Rabs_def1; fourier. apply Pn; assumption. unfold Rminus; rewrite ps_atan_opp, Ropp_plus_distr, sum_Ratan_seq_opp. rewrite !Ropp_involutive; reflexivity. Qed. Lemma Alt_PI_tg : forall n, PI_tg n = Ratan_seq 1 n. Proof. intros n; unfold PI_tg, Ratan_seq, Rdiv; rewrite pow1, Rmult_1_l. reflexivity. Qed. Lemma Ratan_is_ps_atan : forall eps, eps > 0 -> exists N, forall n, (n >= N)%nat -> forall x, -1 < x -> x < 1 -> Rabs (sum_f_R0 (tg_alt (Ratan_seq x)) n - ps_atan x) < eps. Proof. intros eps ep. destruct (Ratan_CVU _ ep) as [N1 PN1]. exists N1; intros n nN x xm1 x1; rewrite <- Rabs_Ropp, Ropp_minus_distr. apply PN1; [assumption | ]. unfold Boule; simpl; rewrite Rminus_0_r; apply Rabs_def1; assumption. Qed. Lemma Datan_continuity : continuity (fun x => /(1+x ^ 2)). Proof. apply continuity_inv. apply continuity_plus. apply continuity_const ; unfold constant ; intuition. apply derivable_continuous ; apply derivable_pow. intro x ; apply Rgt_not_eq ; apply Rge_gt_trans with (1+0) ; [|fourier] ; apply Rplus_ge_compat_l. replace (x^2) with (x²). apply Rle_ge ; apply Rle_0_sqr. unfold Rsqr ; field. Qed. Lemma derivable_pt_lim_ps_atan : forall x, -1 < x < 1 -> derivable_pt_lim ps_atan x ((fun y => /(1 + y ^ 2)) x). Proof. intros x x_encad. destruct (boule_in_interval (-1) 1 x x_encad) as [c [r [Pcr1 [P1 P2]]]]. change (/ (1 + x ^ 2)) with ((fun u => /(1 + u ^ 2)) x). assert (t := derivable_pt_lim_CVU). apply derivable_pt_lim_CVU with (fn := (fun N x => sum_f_R0 (tg_alt (Ratan_seq x)) N)) (fn' := (fun N x => sum_f_R0 (tg_alt (Datan_seq x)) N)) (c := c) (r := r). assumption. intros y N inb; apply Rabs_def2 in inb; destruct inb. apply Datan_is_datan. fourier. fourier. intros y inb; apply Rabs_def2 in inb; destruct inb. assert (y_gt_0 : -1 < y) by fourier. assert (y_lt_1 : y < 1) by fourier. intros eps eps_pos ; elim (Ratan_is_ps_atan eps eps_pos). intros N HN ; exists N; intros n n_lb ; apply HN ; tauto. apply Datan_CVU_prelim. replace ((c - r + (c + r)) / 2) with c by field. unfold mkposreal_lb_ub; simpl. replace ((c + r - (c - r)) / 2) with (r :R) by field. assert (Rabs c < 1 - r). unfold Boule in Pcr1; destruct r; simpl in *; apply Rabs_def1; apply Rabs_def2 in Pcr1; destruct Pcr1; fourier. fourier. intros; apply Datan_continuity. Qed. Lemma derivable_pt_ps_atan : forall x, -1 < x < 1 -> derivable_pt ps_atan x. Proof. intros x x_encad. exists (/(1+x^2)) ; apply derivable_pt_lim_ps_atan; assumption. Qed. Lemma ps_atan_continuity_pt_1 : forall eps : R, eps > 0 -> exists alp : R, alp > 0 /\ (forall x, x < 1 -> 0 < x -> R_dist x 1 < alp -> dist R_met (ps_atan x) (Alt_PI/4) < eps). Proof. intros eps eps_pos. assert (eps_3_pos : eps / 3 > 0) by fourier. elim (Ratan_is_ps_atan (eps / 3) eps_3_pos) ; intros N1 HN1. unfold Alt_PI. destruct exist_PI as [v Pv]; replace ((4 * v)/4) with v by field. assert (Pv' : Un_cv (sum_f_R0 (tg_alt (Ratan_seq 1))) v). apply Un_cv_ext with (2:= Pv). intros; apply sum_eq; intros; unfold tg_alt; rewrite Alt_PI_tg; tauto. destruct (Pv' (eps / 3) eps_3_pos) as [N2 HN2]. set (N := (N1 + N2)%nat). assert (O_lb : 0 <= 1) by intuition ; assert (O_ub : 1 <= 1) by intuition ; elim (ps_atanSeq_continuity_pt_1 N 1 O_lb O_ub (eps / 3) eps_3_pos) ; intros alpha Halpha ; clear -HN1 HN2 Halpha eps_3_pos; destruct Halpha as (alpha_pos, Halpha). exists alpha ; split;[assumption | ]. intros x x_ub x_lb x_bounds. simpl ; unfold R_dist. replace (ps_atan x - v) with ((ps_atan x - sum_f_R0 (tg_alt (Ratan_seq x)) N) + (sum_f_R0 (tg_alt (Ratan_seq x)) N - sum_f_R0 (tg_alt (Ratan_seq 1)) N) + (sum_f_R0 (tg_alt (Ratan_seq 1)) N - v)). apply Rle_lt_trans with (r2:=Rabs (ps_atan x - sum_f_R0 (tg_alt (Ratan_seq x)) N) + Rabs ((sum_f_R0 (tg_alt (Ratan_seq x)) N - sum_f_R0 (tg_alt (Ratan_seq 1)) N) + (sum_f_R0 (tg_alt (Ratan_seq 1)) N - v))). rewrite Rplus_assoc ; apply Rabs_triang. replace eps with (2 / 3 * eps + eps / 3). rewrite Rplus_comm. apply Rplus_lt_compat. apply Rle_lt_trans with (r2 := Rabs (sum_f_R0 (tg_alt (Ratan_seq x)) N - sum_f_R0 (tg_alt (Ratan_seq 1)) N) + Rabs (sum_f_R0 (tg_alt (Ratan_seq 1)) N - v)). apply Rabs_triang. apply Rlt_le_trans with (r2:= eps / 3 + eps / 3). apply Rplus_lt_compat. simpl in Halpha ; unfold R_dist in Halpha. apply Halpha ; split. unfold D_x, no_cond ; split ; [ | apply Rgt_not_eq ] ; intuition. intuition. apply HN2; unfold N; omega. fourier. rewrite <- Rabs_Ropp, Ropp_minus_distr; apply HN1. unfold N; omega. fourier. assumption. field. ring. Qed. Lemma Datan_eq_DatanSeq_interv : forall x, -1 < x < 1 -> forall (Pratan:derivable_pt ps_atan x) (Prmymeta:derivable_pt atan x), derive_pt ps_atan x Pratan = derive_pt atan x Prmymeta. Proof. assert (freq : 0 < tan 1) by apply (Rlt_trans _ _ _ Rlt_0_1 tan_1_gt_1). intros x x_encad Pratan Prmymeta. rewrite pr_nu_var2_interv with (g:=ps_atan) (lb:=-1) (ub:=tan 1) (pr2 := derivable_pt_ps_atan x x_encad). rewrite pr_nu_var2_interv with (f:=atan) (g:=atan) (lb:=-1) (ub:= 1) (pr2:=derivable_pt_atan x). assert (Temp := derivable_pt_lim_ps_atan x x_encad). assert (Hrew1 : derive_pt ps_atan x (derivable_pt_ps_atan x x_encad) = (/(1+x^2))). apply derive_pt_eq_0 ; assumption. rewrite derive_pt_atan. rewrite Hrew1. replace (Rsqr x) with (x ^ 2) by (unfold Rsqr; ring). unfold Rdiv; rewrite Rmult_1_l; reflexivity. fourier. assumption. intros; reflexivity. fourier. assert (t := tan_1_gt_1); split;destruct x_encad; fourier. intros; reflexivity. Qed. Lemma atan_eq_ps_atan : forall x, 0 < x < 1 -> atan x = ps_atan x. Proof. intros x x_encad. assert (pr1 : forall c : R, 0 < c < x -> derivable_pt (atan - ps_atan) c). intros c c_encad. apply derivable_pt_minus. exact (derivable_pt_atan c). apply derivable_pt_ps_atan. destruct x_encad; destruct c_encad; split; fourier. assert (pr2 : forall c : R, 0 < c < x -> derivable_pt id c). intros ; apply derivable_pt_id; fourier. assert (delta_cont : forall c : R, 0 <= c <= x -> continuity_pt (atan - ps_atan) c). intros c [[c_encad1 | c_encad1 ] [c_encad2 | c_encad2]]; apply continuity_pt_minus. apply derivable_continuous_pt ; apply derivable_pt_atan. apply derivable_continuous_pt ; apply derivable_pt_ps_atan. split; destruct x_encad; fourier. apply derivable_continuous_pt, derivable_pt_atan. apply derivable_continuous_pt, derivable_pt_ps_atan. subst c; destruct x_encad; split; fourier. apply derivable_continuous_pt, derivable_pt_atan. apply derivable_continuous_pt, derivable_pt_ps_atan. subst c; split; fourier. apply derivable_continuous_pt, derivable_pt_atan. apply derivable_continuous_pt, derivable_pt_ps_atan. subst c; destruct x_encad; split; fourier. assert (id_cont : forall c : R, 0 <= c <= x -> continuity_pt id c). intros ; apply derivable_continuous ; apply derivable_id. assert (x_lb : 0 < x) by (destruct x_encad; fourier). elim (MVT (atan - ps_atan)%F id 0 x pr1 pr2 x_lb delta_cont id_cont) ; intros d Temp ; elim Temp ; intros d_encad Main. clear - Main x_encad. assert (Temp : forall (pr: derivable_pt (atan - ps_atan) d), derive_pt (atan - ps_atan) d pr = 0). intro pr. assert (d_encad3 : -1 < d < 1). destruct d_encad; destruct x_encad; split; fourier. pose (pr3 := derivable_pt_minus atan ps_atan d (derivable_pt_atan d) (derivable_pt_ps_atan d d_encad3)). rewrite <- pr_nu_var2_interv with (f:=(atan - ps_atan)%F) (g:=(atan - ps_atan)%F) (lb:=0) (ub:=x) (pr1:=pr3) (pr2:=pr). unfold pr3. rewrite derive_pt_minus. rewrite Datan_eq_DatanSeq_interv with (Prmymeta := derivable_pt_atan d). intuition. assumption. destruct d_encad; fourier. assumption. reflexivity. assert (iatan0 : atan 0 = 0). apply tan_is_inj. apply atan_bound. rewrite Ropp_div; assert (t := PI2_RGT_0); split; fourier. rewrite tan_0, atan_right_inv; reflexivity. generalize Main; rewrite Temp, Rmult_0_r. replace ((atan - ps_atan)%F x) with (atan x - ps_atan x) by intuition. replace ((atan - ps_atan)%F 0) with (atan 0 - ps_atan 0) by intuition. rewrite iatan0, ps_atan0_0, !Rminus_0_r. replace (derive_pt id d (pr2 d d_encad)) with 1. rewrite Rmult_1_r. solve[intros M; apply Rminus_diag_uniq; auto]. rewrite pr_nu_var with (g:=id) (pr2:=derivable_pt_id d). symmetry ; apply derive_pt_id. tauto. Qed. Theorem Alt_PI_eq : Alt_PI = PI. apply Rmult_eq_reg_r with (/4); fold (Alt_PI/4); fold (PI/4); [ | apply Rgt_not_eq; fourier]. assert (0 < PI/6) by (apply PI6_RGT_0). assert (t1:= PI2_1). assert (t2 := PI_4). assert (m := Alt_PI_RGT_0). assert (-PI/2 < 1 < PI/2) by (rewrite Ropp_div; split; fourier). apply cond_eq; intros eps ep. change (R_dist (Alt_PI/4) (PI/4) < eps). assert (ca : continuity_pt atan 1). apply derivable_continuous_pt, derivable_pt_atan. assert (Xe : exists eps', exists eps'', eps' + eps'' <= eps /\ 0 < eps' /\ 0 < eps''). exists (eps/2); exists (eps/2); repeat apply conj; fourier. destruct Xe as [eps' [eps'' [eps_ineq [ep' ep'']]]]. destruct (ps_atan_continuity_pt_1 _ ep') as [alpha [a0 Palpha]]. destruct (ca _ ep'') as [beta [b0 Pbeta]]. assert (Xa : exists a, 0 < a < 1 /\ R_dist a 1 < alpha /\ R_dist a 1 < beta). exists (Rmax (/2) (Rmax (1 - alpha /2) (1 - beta /2))). assert (/2 <= Rmax (/2) (Rmax (1 - alpha /2) (1 - beta /2))) by apply Rmax_l. assert (Rmax (1 - alpha /2) (1 - beta /2) <= Rmax (/2) (Rmax (1 - alpha /2) (1 - beta /2))) by apply Rmax_r. assert ((1 - alpha /2) <= Rmax (1 - alpha /2) (1 - beta /2)) by apply Rmax_l. assert ((1 - beta /2) <= Rmax (1 - alpha /2) (1 - beta /2)) by apply Rmax_r. assert (Rmax (1 - alpha /2) (1 - beta /2) < 1) by (apply Rmax_lub_lt; fourier). split;[split;[ | apply Rmax_lub_lt]; fourier | ]. assert (0 <= 1 - Rmax (/ 2) (Rmax (1 - alpha / 2) (1 - beta / 2))). assert (Rmax (/2) (Rmax (1 - alpha / 2) (1 - beta /2)) <= 1) by (apply Rmax_lub; fourier). fourier. split; unfold R_dist; rewrite <-Rabs_Ropp, Ropp_minus_distr, Rabs_pos_eq;fourier. destruct Xa as [a [[Pa0 Pa1] [P1 P2]]]. apply Rle_lt_trans with (1 := R_dist_tri _ _ (ps_atan a)). apply Rlt_le_trans with (2 := eps_ineq). apply Rplus_lt_compat. rewrite R_dist_sym; apply Palpha; assumption. rewrite <- atan_eq_ps_atan. rewrite <- atan_1; apply (Pbeta a); auto. split; [ | exact P2]. split;[exact I | apply Rgt_not_eq; assumption]. split; assumption. Qed. Lemma PI_ineq : forall N : nat, sum_f_R0 (tg_alt PI_tg) (S (2 * N)) <= PI / 4 <= sum_f_R0 (tg_alt PI_tg) (2 * N). Proof. intros; rewrite <- Alt_PI_eq; apply Alt_PI_ineq. Qed. coq-8.4pl2/theories/Reals/Ranalysis.v0000640000175000001440000000204012010532755016647 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* r2}. Proof. intros; generalize (total_order_T r1 r2) Rlt_dichotomy_converse; intuition eauto 3. Qed. Definition Reqb r1 r2 := if Req_dec r1 r2 then true else false. Lemma Reqb_eq : forall r1 r2, Reqb r1 r2 = true <-> r1=r2. Proof. intros; unfold Reqb; destruct Req_dec as [EQ|NEQ]; auto with *. split; try discriminate. intro EQ; elim NEQ; auto. Qed. Module R_as_UBE <: UsualBoolEq. Definition t := R. Definition eq := @eq R. Definition eqb := Reqb. Definition eqb_eq := Reqb_eq. End R_as_UBE. Module R_as_DT <: UsualDecidableTypeFull := Make_UDTF R_as_UBE. (** Note that the last module fulfills by subtyping many other interfaces, such as [DecidableType] or [EqualityType]. *) (** Note that [R_as_DT] can also be seen as a [DecidableType] and a [DecidableTypeOrig]. *) (** * OrderedType structure for binary integers *) Definition Rcompare x y := match total_order_T x y with | inleft (left _) => Lt | inleft (right _) => Eq | inright _ => Gt end. Lemma Rcompare_spec : forall x y, CompareSpec (x=y) (xLogic.eq==>iff) Rlt. Proof. repeat red; intros; subst; auto. Qed. Lemma le_lteq : forall x y, x <= y <-> x < y \/ x = y. Proof. unfold Rle; auto with *. Qed. Definition compare_spec := Rcompare_spec. End R_as_OT. (** Note that [R_as_OT] can also be seen as a [UsualOrderedType] and a [OrderedType] (and also as a [DecidableType]). *) (** * An [order] tactic for real numbers *) Module ROrder := OTF_to_OrderTac R_as_OT. Ltac r_order := ROrder.order. (** Note that [r_order] is domain-agnostic: it will not prove [1<=2] or [x<=x+x], but rather things like [x<=y -> y<=x -> x=y]. *) coq-8.4pl2/theories/Reals/vo.itarget0000640000175000001440000000140211776416467016544 0ustar notinusersAlembert.vo AltSeries.vo ArithProp.vo Binomial.vo Cauchy_prod.vo Cos_plus.vo Cos_rel.vo DiscrR.vo Exp_prop.vo Integration.vo LegacyRfield.vo Machin.vo MVT.vo NewtonInt.vo PartSum.vo PSeries_reg.vo Ranalysis1.vo Ranalysis2.vo Ranalysis3.vo Ranalysis4.vo Ranalysis5.vo Ranalysis.vo Ranalysis_reg.vo Ratan.vo Raxioms.vo Rbase.vo Rbasic_fun.vo Rcomplete.vo Rdefinitions.vo Rderiv.vo Reals.vo Rfunctions.vo Rgeom.vo RiemannInt_SF.vo RiemannInt.vo R_Ifp.vo RIneq.vo Rlimit.vo RList.vo Rlogic.vo Rpow_def.vo Rpower.vo Rprod.vo Rseries.vo Rsigma.vo Rsqrt_def.vo R_sqrt.vo R_sqr.vo Rtopology.vo Rtrigo_alt.vo Rtrigo_calc.vo Rtrigo_def.vo Rtrigo_fun.vo Rtrigo_reg.vo Rtrigo1.vo Rtrigo.vo SeqProp.vo SeqSeries.vo SplitAbsolu.vo SplitRmult.vo Sqrt_reg.vo ROrderedType.vo Rminmax.vo coq-8.4pl2/theories/Reals/Rpower.v0000640000175000001440000005644212010532755016177 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R->R; main properties *) (************************************************************) Require Import Rbase. Require Import Rfunctions. Require Import SeqSeries. Require Import Rtrigo1. Require Import Ranalysis1. Require Import Exp_prop. Require Import Rsqrt_def. Require Import R_sqrt. Require Import MVT. Require Import Ranalysis4. Local Open Scope R_scope. Lemma P_Rmin : forall (P:R -> Prop) (x y:R), P x -> P y -> P (Rmin x y). Proof. intros P x y H1 H2; unfold Rmin; case (Rle_dec x y); intro; assumption. Qed. Lemma exp_le_3 : exp 1 <= 3. Proof. assert (exp_1 : exp 1 <> 0). assert (H0 := exp_pos 1); red; intro; rewrite H in H0; elim (Rlt_irrefl _ H0). apply Rmult_le_reg_l with (/ exp 1). apply Rinv_0_lt_compat; apply exp_pos. rewrite <- Rinv_l_sym. apply Rmult_le_reg_l with (/ 3). apply Rinv_0_lt_compat; prove_sup0. rewrite Rmult_1_r; rewrite <- (Rmult_comm 3); rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; replace (/ exp 1) with (exp (-1)). unfold exp; case (exist_exp (-1)); intros; simpl; unfold exp_in in e; assert (H := alternated_series_ineq (fun i:nat => / INR (fact i)) x 1). cut (sum_f_R0 (tg_alt (fun i:nat => / INR (fact i))) (S (2 * 1)) <= x <= sum_f_R0 (tg_alt (fun i:nat => / INR (fact i))) (2 * 1)). intro; elim H0; clear H0; intros H0 _; simpl in H0; unfold tg_alt in H0; simpl in H0. replace (/ 3) with (1 * / 1 + -1 * 1 * / 1 + -1 * (-1 * 1) * / 2 + -1 * (-1 * (-1 * 1)) * / (2 + 1 + 1 + 1 + 1)). apply H0. repeat rewrite Rinv_1; repeat rewrite Rmult_1_r; rewrite Ropp_mult_distr_l_reverse; rewrite Rmult_1_l; rewrite Ropp_involutive; rewrite Rplus_opp_r; rewrite Rmult_1_r; rewrite Rplus_0_l; rewrite Rmult_1_l; apply Rmult_eq_reg_l with 6. rewrite Rmult_plus_distr_l; replace (2 + 1 + 1 + 1 + 1) with 6. rewrite <- (Rmult_comm (/ 6)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l; replace 6 with 6. do 2 rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; rewrite (Rmult_comm 3); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. ring. discrR. discrR. ring. discrR. ring. discrR. apply H. unfold Un_decreasing; intros; apply Rmult_le_reg_l with (INR (fact n)). apply INR_fact_lt_0. apply Rmult_le_reg_l with (INR (fact (S n))). apply INR_fact_lt_0. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; apply le_INR; apply fact_le; apply le_n_Sn. apply INR_fact_neq_0. apply INR_fact_neq_0. assert (H0 := cv_speed_pow_fact 1); unfold Un_cv; unfold Un_cv in H0; intros; elim (H0 _ H1); intros; exists x0; intros; unfold R_dist in H2; unfold R_dist; replace (/ INR (fact n)) with (1 ^ n / INR (fact n)). apply (H2 _ H3). unfold Rdiv; rewrite pow1; rewrite Rmult_1_l; reflexivity. unfold infinite_sum in e; unfold Un_cv, tg_alt; intros; elim (e _ H0); intros; exists x0; intros; replace (sum_f_R0 (fun i:nat => (-1) ^ i * / INR (fact i)) n) with (sum_f_R0 (fun i:nat => / INR (fact i) * (-1) ^ i) n). apply (H1 _ H2). apply sum_eq; intros; apply Rmult_comm. apply Rmult_eq_reg_l with (exp 1). rewrite <- exp_plus; rewrite Rplus_opp_r; rewrite exp_0; rewrite <- Rinv_r_sym. reflexivity. assumption. assumption. discrR. assumption. Qed. (******************************************************************) (** * Properties of Exp *) (******************************************************************) Theorem exp_increasing : forall x y:R, x < y -> exp x < exp y. Proof. intros x y H. assert (H0 : derivable exp). apply derivable_exp. assert (H1 := positive_derivative _ H0). unfold strict_increasing in H1. apply H1. intro. replace (derive_pt exp x0 (H0 x0)) with (exp x0). apply exp_pos. symmetry ; apply derive_pt_eq_0. apply (derivable_pt_lim_exp x0). apply H. Qed. Theorem exp_lt_inv : forall x y:R, exp x < exp y -> x < y. Proof. intros x y H; case (Rtotal_order x y); [ intros H1 | intros [H1| H1] ]. assumption. rewrite H1 in H; elim (Rlt_irrefl _ H). assert (H2 := exp_increasing _ _ H1). elim (Rlt_irrefl _ (Rlt_trans _ _ _ H H2)). Qed. Lemma exp_ineq1 : forall x:R, 0 < x -> 1 + x < exp x. Proof. intros; apply Rplus_lt_reg_r with (- exp 0); rewrite <- (Rplus_comm (exp x)); assert (H0 := MVT_cor1 exp 0 x derivable_exp H); elim H0; intros; elim H1; intros; unfold Rminus in H2; rewrite H2; rewrite Ropp_0; rewrite Rplus_0_r; replace (derive_pt exp x0 (derivable_exp x0)) with (exp x0). rewrite exp_0; rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; pattern x at 1; rewrite <- Rmult_1_r; rewrite (Rmult_comm (exp x0)); apply Rmult_lt_compat_l. apply H. rewrite <- exp_0; apply exp_increasing; elim H3; intros; assumption. symmetry ; apply derive_pt_eq_0; apply derivable_pt_lim_exp. Qed. Lemma ln_exists1 : forall y:R, 1 <= y -> { z:R | y = exp z }. Proof. intros; set (f := fun x:R => exp x - y). assert (H0 : 0 < y) by (apply Rlt_le_trans with 1; auto with real). cut (f 0 <= 0); [intro H1|]. cut (continuity f); [intro H2|]. cut (0 <= f y); [intro H3|]. cut (f 0 * f y <= 0); [intro H4|]. pose proof (IVT_cor f 0 y H2 (Rlt_le _ _ H0) H4) as (t,(_,H7)); exists t; unfold f in H7; apply Rminus_diag_uniq_sym; exact H7. pattern 0 at 2; rewrite <- (Rmult_0_r (f y)); rewrite (Rmult_comm (f 0)); apply Rmult_le_compat_l; assumption. unfold f; apply Rplus_le_reg_l with y; left; apply Rlt_trans with (1 + y). rewrite <- (Rplus_comm y); apply Rplus_lt_compat_l; apply Rlt_0_1. replace (y + (exp y - y)) with (exp y); [ apply (exp_ineq1 y H0) | ring ]. unfold f; change (continuity (exp - fct_cte y)); apply continuity_minus; [ apply derivable_continuous; apply derivable_exp | apply derivable_continuous; apply derivable_const ]. unfold f; rewrite exp_0; apply Rplus_le_reg_l with y; rewrite Rplus_0_r; replace (y + (1 - y)) with 1; [ apply H | ring ]. Qed. (**********) Lemma ln_exists : forall y:R, 0 < y -> { z:R | y = exp z }. Proof. intros; case (Rle_dec 1 y); intro. apply (ln_exists1 _ r). assert (H0 : 1 <= / y). apply Rmult_le_reg_l with y. apply H. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; left; apply (Rnot_le_lt _ _ n). red; intro; rewrite H0 in H; elim (Rlt_irrefl _ H). destruct (ln_exists1 _ H0) as (x,p); exists (- x); apply Rmult_eq_reg_l with (exp x / y). unfold Rdiv; rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite <- (Rmult_comm (/ y)); rewrite Rmult_assoc; rewrite <- exp_plus; rewrite Rplus_opp_r; rewrite exp_0; rewrite Rmult_1_r; symmetry ; apply p. red; intro H3; rewrite H3 in H; elim (Rlt_irrefl _ H). unfold Rdiv; apply prod_neq_R0. assert (H3 := exp_pos x); red; intro H4; rewrite H4 in H3; elim (Rlt_irrefl _ H3). apply Rinv_neq_0_compat; red; intro H3; rewrite H3 in H; elim (Rlt_irrefl _ H). Qed. (* Definition of log R+* -> R *) Definition Rln (y:posreal) : R := let (a,_) := ln_exists (pos y) (cond_pos y) in a. (* Extension on R *) Definition ln (x:R) : R := match Rlt_dec 0 x with | left a => Rln (mkposreal x a) | right a => 0 end. Lemma exp_ln : forall x:R, 0 < x -> exp (ln x) = x. Proof. intros; unfold ln; case (Rlt_dec 0 x); intro. unfold Rln; case (ln_exists (mkposreal x r) (cond_pos (mkposreal x r))); intros. simpl in e; symmetry ; apply e. elim n; apply H. Qed. Theorem exp_inv : forall x y:R, exp x = exp y -> x = y. Proof. intros x y H; case (Rtotal_order x y); [ intros H1 | intros [H1| H1] ]; auto; assert (H2 := exp_increasing _ _ H1); rewrite H in H2; elim (Rlt_irrefl _ H2). Qed. Theorem exp_Ropp : forall x:R, exp (- x) = / exp x. Proof. intros x; assert (H : exp x <> 0). assert (H := exp_pos x); red; intro; rewrite H0 in H; elim (Rlt_irrefl _ H). apply Rmult_eq_reg_l with (r := exp x). rewrite <- exp_plus; rewrite Rplus_opp_r; rewrite exp_0. apply Rinv_r_sym. apply H. apply H. Qed. (******************************************************************) (** * Properties of Ln *) (******************************************************************) Theorem ln_increasing : forall x y:R, 0 < x -> x < y -> ln x < ln y. Proof. intros x y H H0; apply exp_lt_inv. repeat rewrite exp_ln. apply H0. apply Rlt_trans with x; assumption. apply H. Qed. Theorem ln_exp : forall x:R, ln (exp x) = x. Proof. intros x; apply exp_inv. apply exp_ln. apply exp_pos. Qed. Theorem ln_1 : ln 1 = 0. Proof. rewrite <- exp_0; rewrite ln_exp; reflexivity. Qed. Theorem ln_lt_inv : forall x y:R, 0 < x -> 0 < y -> ln x < ln y -> x < y. Proof. intros x y H H0 H1; rewrite <- (exp_ln x); try rewrite <- (exp_ln y). apply exp_increasing; apply H1. assumption. assumption. Qed. Theorem ln_inv : forall x y:R, 0 < x -> 0 < y -> ln x = ln y -> x = y. Proof. intros x y H H0 H'0; case (Rtotal_order x y); [ intros H1 | intros [H1| H1] ]; auto. assert (H2 := ln_increasing _ _ H H1); rewrite H'0 in H2; elim (Rlt_irrefl _ H2). assert (H2 := ln_increasing _ _ H0 H1); rewrite H'0 in H2; elim (Rlt_irrefl _ H2). Qed. Theorem ln_mult : forall x y:R, 0 < x -> 0 < y -> ln (x * y) = ln x + ln y. Proof. intros x y H H0; apply exp_inv. rewrite exp_plus. repeat rewrite exp_ln. reflexivity. assumption. assumption. apply Rmult_lt_0_compat; assumption. Qed. Theorem ln_Rinv : forall x:R, 0 < x -> ln (/ x) = - ln x. Proof. intros x H; apply exp_inv; repeat rewrite exp_ln || rewrite exp_Ropp. reflexivity. assumption. apply Rinv_0_lt_compat; assumption. Qed. Theorem ln_continue : forall y:R, 0 < y -> continue_in ln (fun x:R => 0 < x) y. Proof. intros y H. unfold continue_in, limit1_in, limit_in; intros eps Heps. cut (1 < exp eps); [ intros H1 | idtac ]. cut (exp (- eps) < 1); [ intros H2 | idtac ]. exists (Rmin (y * (exp eps - 1)) (y * (1 - exp (- eps)))); split. red; apply P_Rmin. apply Rmult_lt_0_compat. assumption. apply Rplus_lt_reg_r with 1. rewrite Rplus_0_r; replace (1 + (exp eps - 1)) with (exp eps); [ apply H1 | ring ]. apply Rmult_lt_0_compat. assumption. apply Rplus_lt_reg_r with (exp (- eps)). rewrite Rplus_0_r; replace (exp (- eps) + (1 - exp (- eps))) with 1; [ apply H2 | ring ]. unfold dist, R_met, R_dist; simpl. intros x [[H3 H4] H5]. cut (y * (x * / y) = x). intro Hxyy. replace (ln x - ln y) with (ln (x * / y)). case (Rtotal_order x y); [ intros Hxy | intros [Hxy| Hxy] ]. rewrite Rabs_left. apply Ropp_lt_cancel; rewrite Ropp_involutive. apply exp_lt_inv. rewrite exp_ln. apply Rmult_lt_reg_l with (r := y). apply H. rewrite Hxyy. apply Ropp_lt_cancel. apply Rplus_lt_reg_r with (r := y). replace (y + - (y * exp (- eps))) with (y * (1 - exp (- eps))); [ idtac | ring ]. replace (y + - x) with (Rabs (x - y)). apply Rlt_le_trans with (1 := H5); apply Rmin_r. rewrite Rabs_left; [ ring | idtac ]. apply (Rlt_minus _ _ Hxy). apply Rmult_lt_0_compat; [ apply H3 | apply (Rinv_0_lt_compat _ H) ]. rewrite <- ln_1. apply ln_increasing. apply Rmult_lt_0_compat; [ apply H3 | apply (Rinv_0_lt_compat _ H) ]. apply Rmult_lt_reg_l with (r := y). apply H. rewrite Hxyy; rewrite Rmult_1_r; apply Hxy. rewrite Hxy; rewrite Rinv_r. rewrite ln_1; rewrite Rabs_R0; apply Heps. red; intro; rewrite H0 in H; elim (Rlt_irrefl _ H). rewrite Rabs_right. apply exp_lt_inv. rewrite exp_ln. apply Rmult_lt_reg_l with (r := y). apply H. rewrite Hxyy. apply Rplus_lt_reg_r with (r := - y). replace (- y + y * exp eps) with (y * (exp eps - 1)); [ idtac | ring ]. replace (- y + x) with (Rabs (x - y)). apply Rlt_le_trans with (1 := H5); apply Rmin_l. rewrite Rabs_right; [ ring | idtac ]. left; apply (Rgt_minus _ _ Hxy). apply Rmult_lt_0_compat; [ apply H3 | apply (Rinv_0_lt_compat _ H) ]. rewrite <- ln_1. apply Rgt_ge; red; apply ln_increasing. apply Rlt_0_1. apply Rmult_lt_reg_l with (r := y). apply H. rewrite Hxyy; rewrite Rmult_1_r; apply Hxy. rewrite ln_mult. rewrite ln_Rinv. ring. assumption. assumption. apply Rinv_0_lt_compat; assumption. rewrite (Rmult_comm x); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. ring. red; intro; rewrite H0 in H; elim (Rlt_irrefl _ H). apply Rmult_lt_reg_l with (exp eps). apply exp_pos. rewrite <- exp_plus; rewrite Rmult_1_r; rewrite Rplus_opp_r; rewrite exp_0; apply H1. rewrite <- exp_0. apply exp_increasing; apply Heps. Qed. (******************************************************************) (** * Definition of Rpower *) (******************************************************************) Definition Rpower (x y:R) := exp (y * ln x). Local Infix "^R" := Rpower (at level 30, right associativity) : R_scope. (******************************************************************) (** * Properties of Rpower *) (******************************************************************) (** Note: [Rpower] is prolongated to [1] on negative real numbers and it thus does not extend integer power. The next two lemmas, which hold for integer power, accidentally hold on negative real numbers as a side effect of the default value taken on negative real numbers. Contrastingly, the lemmas that do not hold for the integer power of a negative number are stated for [Rpower] on the positive numbers only (even if they accidentally hold due to the default value of [Rpower] on the negative side, as it is the case for [Rpower_O]). *) Theorem Rpower_plus : forall x y z:R, z ^R (x + y) = z ^R x * z ^R y. Proof. intros x y z; unfold Rpower. rewrite Rmult_plus_distr_r; rewrite exp_plus; auto. Qed. Theorem Rpower_mult : forall x y z:R, (x ^R y) ^R z = x ^R (y * z). Proof. intros x y z; unfold Rpower. rewrite ln_exp. replace (z * (y * ln x)) with (y * z * ln x). reflexivity. ring. Qed. Theorem Rpower_O : forall x:R, 0 < x -> x ^R 0 = 1. Proof. intros x _; unfold Rpower. rewrite Rmult_0_l; apply exp_0. Qed. Theorem Rpower_1 : forall x:R, 0 < x -> x ^R 1 = x. Proof. intros x H; unfold Rpower. rewrite Rmult_1_l; apply exp_ln; apply H. Qed. Theorem Rpower_pow : forall (n:nat) (x:R), 0 < x -> x ^R INR n = x ^ n. Proof. intros n; elim n; simpl; auto; fold INR. intros x H; apply Rpower_O; auto. intros n1; case n1. intros H x H0; simpl; rewrite Rmult_1_r; apply Rpower_1; auto. intros n0 H x H0; rewrite Rpower_plus; rewrite H; try rewrite Rpower_1; try apply Rmult_comm || assumption. Qed. Theorem Rpower_lt : forall x y z:R, 1 < x -> 0 <= y -> y < z -> x ^R y < x ^R z. Proof. intros x y z H H0 H1. unfold Rpower. apply exp_increasing. apply Rmult_lt_compat_r. rewrite <- ln_1; apply ln_increasing. apply Rlt_0_1. apply H. apply H1. Qed. Theorem Rpower_sqrt : forall x:R, 0 < x -> x ^R (/ 2) = sqrt x. Proof. intros x H. apply ln_inv. unfold Rpower; apply exp_pos. apply sqrt_lt_R0; apply H. apply Rmult_eq_reg_l with (INR 2). apply exp_inv. fold Rpower. cut ((x ^R (/ INR 2)) ^R INR 2 = sqrt x ^R INR 2). unfold Rpower; auto. rewrite Rpower_mult. rewrite Rinv_l. replace 1 with (INR 1); auto. repeat rewrite Rpower_pow; simpl. pattern x at 1; rewrite <- (sqrt_sqrt x (Rlt_le _ _ H)). ring. apply sqrt_lt_R0; apply H. apply H. apply not_O_INR; discriminate. apply not_O_INR; discriminate. Qed. Theorem Rpower_Ropp : forall x y:R, x ^R (- y) = / x ^R y. Proof. unfold Rpower. intros x y; rewrite Ropp_mult_distr_l_reverse. apply exp_Ropp. Qed. Theorem Rle_Rpower : forall e n m:R, 1 < e -> 0 <= n -> n <= m -> e ^R n <= e ^R m. Proof. intros e n m H H0 H1; case H1. intros H2; left; apply Rpower_lt; assumption. intros H2; rewrite H2; right; reflexivity. Qed. Theorem ln_lt_2 : / 2 < ln 2. Proof. apply Rmult_lt_reg_l with (r := 2). prove_sup0. rewrite Rinv_r. apply exp_lt_inv. apply Rle_lt_trans with (1 := exp_le_3). change (3 < 2 ^R 2). repeat rewrite Rpower_plus; repeat rewrite Rpower_1. repeat rewrite Rmult_plus_distr_r; repeat rewrite Rmult_plus_distr_l; repeat rewrite Rmult_1_l. pattern 3 at 1; rewrite <- Rplus_0_r; replace (2 + 2) with (3 + 1); [ apply Rplus_lt_compat_l; apply Rlt_0_1 | ring ]. prove_sup0. discrR. Qed. (*****************************************) (** * Differentiability of Ln and Rpower *) (*****************************************) Theorem limit1_ext : forall (f g:R -> R) (D:R -> Prop) (l x:R), (forall x:R, D x -> f x = g x) -> limit1_in f D l x -> limit1_in g D l x. Proof. intros f g D l x H; unfold limit1_in, limit_in. intros H0 eps H1; case (H0 eps); auto. intros x0 [H2 H3]; exists x0; split; auto. intros x1 [H4 H5]; rewrite <- H; auto. Qed. Theorem limit1_imp : forall (f:R -> R) (D D1:R -> Prop) (l x:R), (forall x:R, D1 x -> D x) -> limit1_in f D l x -> limit1_in f D1 l x. Proof. intros f D D1 l x H; unfold limit1_in, limit_in. intros H0 eps H1; case (H0 eps H1); auto. intros alpha [H2 H3]; exists alpha; split; auto. intros d [H4 H5]; apply H3; split; auto. Qed. Theorem Rinv_Rdiv : forall x y:R, x <> 0 -> y <> 0 -> / (x / y) = y / x. Proof. intros x y H1 H2; unfold Rdiv; rewrite Rinv_mult_distr. rewrite Rinv_involutive. apply Rmult_comm. assumption. assumption. apply Rinv_neq_0_compat; assumption. Qed. Theorem Dln : forall y:R, 0 < y -> D_in ln Rinv (fun x:R => 0 < x) y. Proof. intros y Hy; unfold D_in. apply limit1_ext with (f := fun x:R => / ((exp (ln x) - exp (ln y)) / (ln x - ln y))). intros x [HD1 HD2]; repeat rewrite exp_ln. unfold Rdiv; rewrite Rinv_mult_distr. rewrite Rinv_involutive. apply Rmult_comm. apply Rminus_eq_contra. red; intros H2; case HD2. symmetry ; apply (ln_inv _ _ HD1 Hy H2). apply Rminus_eq_contra; apply (not_eq_sym HD2). apply Rinv_neq_0_compat; apply Rminus_eq_contra; red; intros H2; case HD2; apply ln_inv; auto. assumption. assumption. apply limit_inv with (f := fun x:R => (exp (ln x) - exp (ln y)) / (ln x - ln y)). apply limit1_imp with (f := fun x:R => (fun x:R => (exp x - exp (ln y)) / (x - ln y)) (ln x)) (D := Dgf (D_x (fun x:R => 0 < x) y) (D_x (fun x:R => True) (ln y)) ln). intros x [H1 H2]; split. split; auto. split; auto. red; intros H3; case H2; apply ln_inv; auto. apply limit_comp with (l := ln y) (g := fun x:R => (exp x - exp (ln y)) / (x - ln y)) (f := ln). apply ln_continue; auto. assert (H0 := derivable_pt_lim_exp (ln y)); unfold derivable_pt_lim in H0; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros; elim (H0 _ H); intros; exists (pos x); split. apply (cond_pos x). intros; pattern y at 3; rewrite <- exp_ln. pattern x0 at 1; replace x0 with (ln y + (x0 - ln y)); [ idtac | ring ]. apply H1. elim H2; intros H3 _; unfold D_x in H3; elim H3; clear H3; intros _ H3; apply Rminus_eq_contra; apply (not_eq_sym (A:=R)); apply H3. elim H2; clear H2; intros _ H2; apply H2. assumption. red; intro; rewrite H in Hy; elim (Rlt_irrefl _ Hy). Qed. Lemma derivable_pt_lim_ln : forall x:R, 0 < x -> derivable_pt_lim ln x (/ x). Proof. intros; assert (H0 := Dln x H); unfold D_in in H0; unfold limit1_in in H0; unfold limit_in in H0; simpl in H0; unfold R_dist in H0; unfold derivable_pt_lim; intros; elim (H0 _ H1); intros; elim H2; clear H2; intros; set (alp := Rmin x0 (x / 2)); assert (H4 : 0 < alp). unfold alp; unfold Rmin; case (Rle_dec x0 (x / 2)); intro. apply H2. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. exists (mkposreal _ H4); intros; pattern h at 2; replace h with (x + h - x); [ idtac | ring ]. apply H3; split. unfold D_x; split. case (Rcase_abs h); intro. assert (H7 : Rabs h < x / 2). apply Rlt_le_trans with alp. apply H6. unfold alp; apply Rmin_r. apply Rlt_trans with (x / 2). unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ]. rewrite Rabs_left in H7. apply Rplus_lt_reg_r with (- h - x / 2). replace (- h - x / 2 + x / 2) with (- h); [ idtac | ring ]. pattern x at 2; rewrite double_var. replace (- h - x / 2 + (x / 2 + x / 2 + h)) with (x / 2); [ apply H7 | ring ]. apply r. apply Rplus_lt_le_0_compat; [ assumption | apply Rge_le; apply r ]. apply (not_eq_sym (A:=R)); apply Rminus_not_eq; replace (x + h - x) with h; [ apply H5 | ring ]. replace (x + h - x) with h; [ apply Rlt_le_trans with alp; [ apply H6 | unfold alp; apply Rmin_l ] | ring ]. Qed. Theorem D_in_imp : forall (f g:R -> R) (D D1:R -> Prop) (x:R), (forall x:R, D1 x -> D x) -> D_in f g D x -> D_in f g D1 x. Proof. intros f g D D1 x H; unfold D_in. intros H0; apply limit1_imp with (D := D_x D x); auto. intros x1 [H1 H2]; split; auto. Qed. Theorem D_in_ext : forall (f g h:R -> R) (D:R -> Prop) (x:R), f x = g x -> D_in h f D x -> D_in h g D x. Proof. intros f g h D x H; unfold D_in. rewrite H; auto. Qed. Theorem Dpower : forall y z:R, 0 < y -> D_in (fun x:R => x ^R z) (fun x:R => z * x ^R (z - 1)) ( fun x:R => 0 < x) y. Proof. intros y z H; apply D_in_imp with (D := Dgf (fun x:R => 0 < x) (fun x:R => True) ln). intros x H0; repeat split. assumption. apply D_in_ext with (f := fun x:R => / x * (z * exp (z * ln x))). unfold Rminus; rewrite Rpower_plus; rewrite Rpower_Ropp; rewrite (Rpower_1 _ H); unfold Rpower; ring. apply Dcomp with (f := ln) (g := fun x:R => exp (z * x)) (df := Rinv) (dg := fun x:R => z * exp (z * x)). apply (Dln _ H). apply D_in_imp with (D := Dgf (fun x:R => True) (fun x:R => True) (fun x:R => z * x)). intros x H1; repeat split; auto. apply (Dcomp (fun _:R => True) (fun _:R => True) (fun x => z) exp (fun x:R => z * x) exp); simpl. apply D_in_ext with (f := fun x:R => z * 1). apply Rmult_1_r. apply (Dmult_const (fun x => True) (fun x => x) (fun x => 1)); apply Dx. assert (H0 := derivable_pt_lim_D_in exp exp (z * ln y)); elim H0; clear H0; intros _ H0; apply H0; apply derivable_pt_lim_exp. Qed. Theorem derivable_pt_lim_power : forall x y:R, 0 < x -> derivable_pt_lim (fun x => x ^R y) x (y * x ^R (y - 1)). Proof. intros x y H. unfold Rminus; rewrite Rpower_plus. rewrite Rpower_Ropp. rewrite Rpower_1; auto. rewrite <- Rmult_assoc. unfold Rpower. apply derivable_pt_lim_comp with (f1 := ln) (f2 := fun x => exp (y * x)). apply derivable_pt_lim_ln; assumption. rewrite (Rmult_comm y). apply derivable_pt_lim_comp with (f1 := fun x => y * x) (f2 := exp). pattern y at 2; replace y with (0 * ln x + y * 1). apply derivable_pt_lim_mult with (f1 := fun x:R => y) (f2 := fun x:R => x). apply derivable_pt_lim_const with (a := y). apply derivable_pt_lim_id. ring. apply derivable_pt_lim_exp. Qed. coq-8.4pl2/theories/Reals/Raxioms.v0000640000175000001440000001173512010532755016337 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 -> / r * r = 1. Hint Resolve Rinv_l: real. (**********) Axiom Rmult_1_l : forall r:R, 1 * r = r. Hint Resolve Rmult_1_l: real. (**********) Axiom R1_neq_R0 : 1 <> 0. Hint Resolve R1_neq_R0: real. (*********************************************************) (** ** Distributivity *) (*********************************************************) (**********) Axiom Rmult_plus_distr_l : forall r1 r2 r3:R, r1 * (r2 + r3) = r1 * r2 + r1 * r3. Hint Resolve Rmult_plus_distr_l: real v62. (*********************************************************) (** * Order axioms *) (*********************************************************) (*********************************************************) (** ** Total Order *) (*********************************************************) (**********) Axiom total_order_T : forall r1 r2:R, {r1 < r2} + {r1 = r2} + {r1 > r2}. (*********************************************************) (** ** Lower *) (*********************************************************) (**********) Axiom Rlt_asym : forall r1 r2:R, r1 < r2 -> ~ r2 < r1. (**********) Axiom Rlt_trans : forall r1 r2 r3:R, r1 < r2 -> r2 < r3 -> r1 < r3. (**********) Axiom Rplus_lt_compat_l : forall r r1 r2:R, r1 < r2 -> r + r1 < r + r2. (**********) Axiom Rmult_lt_compat_l : forall r r1 r2:R, 0 < r -> r1 < r2 -> r * r1 < r * r2. Hint Resolve Rlt_asym Rplus_lt_compat_l Rmult_lt_compat_l: real. (**********************************************************) (** * Injection from N to R *) (**********************************************************) (**********) Fixpoint INR (n:nat) : R := match n with | O => 0 | S O => 1 | S n => INR n + 1 end. Arguments INR n%nat. (**********************************************************) (** * Injection from [Z] to [R] *) (**********************************************************) (**********) Definition IZR (z:Z) : R := match z with | Z0 => 0 | Zpos n => INR (Pos.to_nat n) | Zneg n => - INR (Pos.to_nat n) end. Arguments IZR z%Z. (**********************************************************) (** * [R] Archimedean *) (**********************************************************) (**********) Axiom archimed : forall r:R, IZR (up r) > r /\ IZR (up r) - r <= 1. (**********************************************************) (** * [R] Complete *) (**********************************************************) (**********) Definition is_upper_bound (E:R -> Prop) (m:R) := forall x:R, E x -> x <= m. (**********) Definition bound (E:R -> Prop) := exists m : R, is_upper_bound E m. (**********) Definition is_lub (E:R -> Prop) (m:R) := is_upper_bound E m /\ (forall b:R, is_upper_bound E b -> m <= b). (**********) Axiom completeness : forall E:R -> Prop, bound E -> (exists x : R, E x) -> { m:R | is_lub E m }. coq-8.4pl2/theories/Reals/Integration.v0000640000175000001440000000114112010532755017166 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (a b:R) : Type := { g:R -> R | antiderivative f g a b \/ antiderivative f g b a }. Definition NewtonInt (f:R -> R) (a b:R) (pr:Newton_integrable f a b) : R := let (g,_) := pr in g b - g a. (* If f is differentiable, then f' is Newton integrable (Tautology ?) *) Lemma FTCN_step1 : forall (f:Differential) (a b:R), Newton_integrable (fun x:R => derive_pt f x (cond_diff f x)) a b. Proof. intros f a b; unfold Newton_integrable; exists (d1 f); unfold antiderivative; intros; case (Rle_dec a b); intro; [ left; split; [ intros; exists (cond_diff f x); reflexivity | assumption ] | right; split; [ intros; exists (cond_diff f x); reflexivity | auto with real ] ]. Defined. (* By definition, we have the Fondamental Theorem of Calculus *) Lemma FTC_Newton : forall (f:Differential) (a b:R), NewtonInt (fun x:R => derive_pt f x (cond_diff f x)) a b (FTCN_step1 f a b) = f b - f a. Proof. intros; unfold NewtonInt; reflexivity. Qed. (* $\int_a^a f$ exists forall a:R and f:R->R *) Lemma NewtonInt_P1 : forall (f:R -> R) (a:R), Newton_integrable f a a. Proof. intros f a; unfold Newton_integrable; exists (fct_cte (f a) * id)%F; left; unfold antiderivative; split. intros; assert (H1 : derivable_pt (fct_cte (f a) * id) x). apply derivable_pt_mult. apply derivable_pt_const. apply derivable_pt_id. exists H1; assert (H2 : x = a). elim H; intros; apply Rle_antisym; assumption. symmetry ; apply derive_pt_eq_0; replace (f x) with (0 * id x + fct_cte (f a) x * 1); [ apply (derivable_pt_lim_mult (fct_cte (f a)) id x); [ apply derivable_pt_lim_const | apply derivable_pt_lim_id ] | unfold id, fct_cte; rewrite H2; ring ]. right; reflexivity. Defined. (* $\int_a^a f = 0$ *) Lemma NewtonInt_P2 : forall (f:R -> R) (a:R), NewtonInt f a a (NewtonInt_P1 f a) = 0. Proof. intros; unfold NewtonInt; simpl; unfold mult_fct, fct_cte, id; ring. Qed. (* If $\int_a^b f$ exists, then $\int_b^a f$ exists too *) Lemma NewtonInt_P3 : forall (f:R -> R) (a b:R) (X:Newton_integrable f a b), Newton_integrable f b a. Proof. unfold Newton_integrable; intros; elim X; intros g H; exists g; tauto. Defined. (* $\int_a^b f = -\int_b^a f$ *) Lemma NewtonInt_P4 : forall (f:R -> R) (a b:R) (pr:Newton_integrable f a b), NewtonInt f a b pr = - NewtonInt f b a (NewtonInt_P3 f a b pr). Proof. intros; unfold Newton_integrable in pr; elim pr; intros; elim p; intro. unfold NewtonInt; case (NewtonInt_P3 f a b (exist (fun g:R -> R => antiderivative f g a b \/ antiderivative f g b a) x p)). intros; elim o; intro. unfold antiderivative in H0; elim H0; intros; elim H2; intro. unfold antiderivative in H; elim H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H5 H3)). rewrite H3; ring. assert (H1 := antiderivative_Ucte f x x0 a b H H0); elim H1; intros; unfold antiderivative in H0; elim H0; clear H0; intros _ H0. assert (H3 : a <= a <= b). split; [ right; reflexivity | assumption ]. assert (H4 : a <= b <= b). split; [ assumption | right; reflexivity ]. assert (H5 := H2 _ H3); assert (H6 := H2 _ H4); rewrite H5; rewrite H6; ring. unfold NewtonInt; case (NewtonInt_P3 f a b (exist (fun g:R -> R => antiderivative f g a b \/ antiderivative f g b a) x p)); intros; elim o; intro. assert (H1 := antiderivative_Ucte f x x0 b a H H0); elim H1; intros; unfold antiderivative in H0; elim H0; clear H0; intros _ H0. assert (H3 : b <= a <= a). split; [ assumption | right; reflexivity ]. assert (H4 : b <= b <= a). split; [ right; reflexivity | assumption ]. assert (H5 := H2 _ H3); assert (H6 := H2 _ H4); rewrite H5; rewrite H6; ring. unfold antiderivative in H0; elim H0; intros; elim H2; intro. unfold antiderivative in H; elim H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H5 H3)). rewrite H3; ring. Qed. (* The set of Newton integrable functions is a vectorial space *) Lemma NewtonInt_P5 : forall (f g:R -> R) (l a b:R), Newton_integrable f a b -> Newton_integrable g a b -> Newton_integrable (fun x:R => l * f x + g x) a b. Proof. unfold Newton_integrable; intros f g l a b X X0; elim X; intros; elim X0; intros; exists (fun y:R => l * x y + x0 y). elim p; intro. elim p0; intro. left; unfold antiderivative; unfold antiderivative in H, H0; elim H; clear H; intros; elim H0; clear H0; intros H0 _. split. intros; elim (H _ H2); elim (H0 _ H2); intros. assert (H5 : derivable_pt (fun y:R => l * x y + x0 y) x1). reg. exists H5; symmetry ; reg; rewrite <- H3; rewrite <- H4; reflexivity. assumption. unfold antiderivative in H, H0; elim H; elim H0; intros; elim H4; intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H5 H2)). left; rewrite <- H5; unfold antiderivative; split. intros; elim H6; intros; assert (H9 : x1 = a). apply Rle_antisym; assumption. assert (H10 : a <= x1 <= b). split; right; [ symmetry ; assumption | rewrite <- H5; assumption ]. assert (H11 : b <= x1 <= a). split; right; [ rewrite <- H5; symmetry ; assumption | assumption ]. assert (H12 : derivable_pt x x1). unfold derivable_pt; exists (f x1); elim (H3 _ H10); intros; eapply derive_pt_eq_1; symmetry ; apply H12. assert (H13 : derivable_pt x0 x1). unfold derivable_pt; exists (g x1); elim (H1 _ H11); intros; eapply derive_pt_eq_1; symmetry ; apply H13. assert (H14 : derivable_pt (fun y:R => l * x y + x0 y) x1). reg. exists H14; symmetry ; reg. assert (H15 : derive_pt x0 x1 H13 = g x1). elim (H1 _ H11); intros; rewrite H15; apply pr_nu. assert (H16 : derive_pt x x1 H12 = f x1). elim (H3 _ H10); intros; rewrite H16; apply pr_nu. rewrite H15; rewrite H16; ring. right; reflexivity. elim p0; intro. unfold antiderivative in H, H0; elim H; elim H0; intros; elim H4; intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H5 H2)). left; rewrite H5; unfold antiderivative; split. intros; elim H6; intros; assert (H9 : x1 = a). apply Rle_antisym; assumption. assert (H10 : a <= x1 <= b). split; right; [ symmetry ; assumption | rewrite H5; assumption ]. assert (H11 : b <= x1 <= a). split; right; [ rewrite H5; symmetry ; assumption | assumption ]. assert (H12 : derivable_pt x x1). unfold derivable_pt; exists (f x1); elim (H3 _ H11); intros; eapply derive_pt_eq_1; symmetry ; apply H12. assert (H13 : derivable_pt x0 x1). unfold derivable_pt; exists (g x1); elim (H1 _ H10); intros; eapply derive_pt_eq_1; symmetry ; apply H13. assert (H14 : derivable_pt (fun y:R => l * x y + x0 y) x1). reg. exists H14; symmetry ; reg. assert (H15 : derive_pt x0 x1 H13 = g x1). elim (H1 _ H10); intros; rewrite H15; apply pr_nu. assert (H16 : derive_pt x x1 H12 = f x1). elim (H3 _ H11); intros; rewrite H16; apply pr_nu. rewrite H15; rewrite H16; ring. right; reflexivity. right; unfold antiderivative; unfold antiderivative in H, H0; elim H; clear H; intros; elim H0; clear H0; intros H0 _; split. intros; elim (H _ H2); elim (H0 _ H2); intros. assert (H5 : derivable_pt (fun y:R => l * x y + x0 y) x1). reg. exists H5; symmetry ; reg; rewrite <- H3; rewrite <- H4; reflexivity. assumption. Defined. (**********) Lemma antiderivative_P1 : forall (f g F G:R -> R) (l a b:R), antiderivative f F a b -> antiderivative g G a b -> antiderivative (fun x:R => l * f x + g x) (fun x:R => l * F x + G x) a b. Proof. unfold antiderivative; intros; elim H; elim H0; clear H H0; intros; split. intros; elim (H _ H3); elim (H1 _ H3); intros. assert (H6 : derivable_pt (fun x:R => l * F x + G x) x). reg. exists H6; symmetry ; reg; rewrite <- H4; rewrite <- H5; ring. assumption. Qed. (* $\int_a^b \lambda f + g = \lambda \int_a^b f + \int_a^b f *) Lemma NewtonInt_P6 : forall (f g:R -> R) (l a b:R) (pr1:Newton_integrable f a b) (pr2:Newton_integrable g a b), NewtonInt (fun x:R => l * f x + g x) a b (NewtonInt_P5 f g l a b pr1 pr2) = l * NewtonInt f a b pr1 + NewtonInt g a b pr2. Proof. intros f g l a b pr1 pr2; unfold NewtonInt; case (NewtonInt_P5 f g l a b pr1 pr2); intros; case pr1; intros; case pr2; intros; case (total_order_T a b); intro. elim s; intro. elim o; intro. elim o0; intro. elim o1; intro. assert (H2 := antiderivative_P1 f g x0 x1 l a b H0 H1); assert (H3 := antiderivative_Ucte _ _ _ _ _ H H2); elim H3; intros; assert (H5 : a <= a <= b). split; [ right; reflexivity | left; assumption ]. assert (H6 : a <= b <= b). split; [ left; assumption | right; reflexivity ]. assert (H7 := H4 _ H5); assert (H8 := H4 _ H6); rewrite H7; rewrite H8; ring. unfold antiderivative in H1; elim H1; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H3 a0)). unfold antiderivative in H0; elim H0; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 a0)). unfold antiderivative in H; elim H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 a0)). rewrite b0; ring. elim o; intro. unfold antiderivative in H; elim H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 r)). elim o0; intro. unfold antiderivative in H0; elim H0; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 r)). elim o1; intro. unfold antiderivative in H1; elim H1; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H3 r)). assert (H2 := antiderivative_P1 f g x0 x1 l b a H0 H1); assert (H3 := antiderivative_Ucte _ _ _ _ _ H H2); elim H3; intros; assert (H5 : b <= a <= a). split; [ left; assumption | right; reflexivity ]. assert (H6 : b <= b <= a). split; [ right; reflexivity | left; assumption ]. assert (H7 := H4 _ H5); assert (H8 := H4 _ H6); rewrite H7; rewrite H8; ring. Qed. Lemma antiderivative_P2 : forall (f F0 F1:R -> R) (a b c:R), antiderivative f F0 a b -> antiderivative f F1 b c -> antiderivative f (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) a c. Proof. unfold antiderivative; intros; elim H; clear H; intros; elim H0; clear H0; intros; split. 2: apply Rle_trans with b; assumption. intros; elim H3; clear H3; intros; case (total_order_T x b); intro. elim s; intro. assert (H5 : a <= x <= b). split; [ assumption | left; assumption ]. assert (H6 := H _ H5); elim H6; clear H6; intros; assert (H7 : derivable_pt_lim (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x (f x)). unfold derivable_pt_lim; assert (H7 : derive_pt F0 x x0 = f x). symmetry ; assumption. assert (H8 := derive_pt_eq_1 F0 x (f x) x0 H7); unfold derivable_pt_lim in H8; intros; elim (H8 _ H9); intros; set (D := Rmin x1 (b - x)). assert (H11 : 0 < D). unfold D; unfold Rmin; case (Rle_dec x1 (b - x)); intro. apply (cond_pos x1). apply Rlt_Rminus; assumption. exists (mkposreal _ H11); intros; case (Rle_dec x b); intro. case (Rle_dec (x + h) b); intro. apply H10. assumption. apply Rlt_le_trans with D; [ assumption | unfold D; apply Rmin_l ]. elim n; left; apply Rlt_le_trans with (x + D). apply Rplus_lt_compat_l; apply Rle_lt_trans with (Rabs h). apply RRle_abs. apply H13. apply Rplus_le_reg_l with (- x); rewrite <- Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_l; rewrite Rplus_comm; unfold D; apply Rmin_r. elim n; left; assumption. assert (H8 : derivable_pt (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x). unfold derivable_pt; exists (f x); apply H7. exists H8; symmetry ; apply derive_pt_eq_0; apply H7. assert (H5 : a <= x <= b). split; [ assumption | right; assumption ]. assert (H6 : b <= x <= c). split; [ right; symmetry ; assumption | assumption ]. elim (H _ H5); elim (H0 _ H6); intros; assert (H9 : derive_pt F0 x x1 = f x). symmetry ; assumption. assert (H10 : derive_pt F1 x x0 = f x). symmetry ; assumption. assert (H11 := derive_pt_eq_1 F0 x (f x) x1 H9); assert (H12 := derive_pt_eq_1 F1 x (f x) x0 H10); assert (H13 : derivable_pt_lim (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x (f x)). unfold derivable_pt_lim; unfold derivable_pt_lim in H11, H12; intros; elim (H11 _ H13); elim (H12 _ H13); intros; set (D := Rmin x2 x3); assert (H16 : 0 < D). unfold D; unfold Rmin; case (Rle_dec x2 x3); intro. apply (cond_pos x2). apply (cond_pos x3). exists (mkposreal _ H16); intros; case (Rle_dec x b); intro. case (Rle_dec (x + h) b); intro. apply H15. assumption. apply Rlt_le_trans with D; [ assumption | unfold D; apply Rmin_r ]. replace (F1 (x + h) + (F0 b - F1 b) - F0 x) with (F1 (x + h) - F1 x). apply H14. assumption. apply Rlt_le_trans with D; [ assumption | unfold D; apply Rmin_l ]. rewrite b0; ring. elim n; right; assumption. assert (H14 : derivable_pt (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x). unfold derivable_pt; exists (f x); apply H13. exists H14; symmetry ; apply derive_pt_eq_0; apply H13. assert (H5 : b <= x <= c). split; [ left; assumption | assumption ]. assert (H6 := H0 _ H5); elim H6; clear H6; intros; assert (H7 : derivable_pt_lim (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x (f x)). unfold derivable_pt_lim; assert (H7 : derive_pt F1 x x0 = f x). symmetry ; assumption. assert (H8 := derive_pt_eq_1 F1 x (f x) x0 H7); unfold derivable_pt_lim in H8; intros; elim (H8 _ H9); intros; set (D := Rmin x1 (x - b)); assert (H11 : 0 < D). unfold D; unfold Rmin; case (Rle_dec x1 (x - b)); intro. apply (cond_pos x1). apply Rlt_Rminus; assumption. exists (mkposreal _ H11); intros; case (Rle_dec x b); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 r)). case (Rle_dec (x + h) b); intro. cut (b < x + h). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 H14)). apply Rplus_lt_reg_r with (- h - b); replace (- h - b + b) with (- h); [ idtac | ring ]; replace (- h - b + (x + h)) with (x - b); [ idtac | ring ]; apply Rle_lt_trans with (Rabs h). rewrite <- Rabs_Ropp; apply RRle_abs. apply Rlt_le_trans with D. apply H13. unfold D; apply Rmin_r. replace (F1 (x + h) + (F0 b - F1 b) - (F1 x + (F0 b - F1 b))) with (F1 (x + h) - F1 x); [ idtac | ring ]; apply H10. assumption. apply Rlt_le_trans with D. assumption. unfold D; apply Rmin_l. assert (H8 : derivable_pt (fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end) x). unfold derivable_pt; exists (f x); apply H7. exists H8; symmetry ; apply derive_pt_eq_0; apply H7. Qed. Lemma antiderivative_P3 : forall (f F0 F1:R -> R) (a b c:R), antiderivative f F0 a b -> antiderivative f F1 c b -> antiderivative f F1 c a \/ antiderivative f F0 a c. Proof. intros; unfold antiderivative in H, H0; elim H; clear H; elim H0; clear H0; intros; case (total_order_T a c); intro. elim s; intro. right; unfold antiderivative; split. intros; apply H1; elim H3; intros; split; [ assumption | apply Rle_trans with c; assumption ]. left; assumption. right; unfold antiderivative; split. intros; apply H1; elim H3; intros; split; [ assumption | apply Rle_trans with c; assumption ]. right; assumption. left; unfold antiderivative; split. intros; apply H; elim H3; intros; split; [ assumption | apply Rle_trans with a; assumption ]. left; assumption. Qed. Lemma antiderivative_P4 : forall (f F0 F1:R -> R) (a b c:R), antiderivative f F0 a b -> antiderivative f F1 a c -> antiderivative f F1 b c \/ antiderivative f F0 c b. Proof. intros; unfold antiderivative in H, H0; elim H; clear H; elim H0; clear H0; intros; case (total_order_T c b); intro. elim s; intro. right; unfold antiderivative; split. intros; apply H1; elim H3; intros; split; [ apply Rle_trans with c; assumption | assumption ]. left; assumption. right; unfold antiderivative; split. intros; apply H1; elim H3; intros; split; [ apply Rle_trans with c; assumption | assumption ]. right; assumption. left; unfold antiderivative; split. intros; apply H; elim H3; intros; split; [ apply Rle_trans with b; assumption | assumption ]. left; assumption. Qed. Lemma NewtonInt_P7 : forall (f:R -> R) (a b c:R), a < b -> b < c -> Newton_integrable f a b -> Newton_integrable f b c -> Newton_integrable f a c. Proof. unfold Newton_integrable; intros f a b c Hab Hbc X X0; elim X; clear X; intros F0 H0; elim X0; clear X0; intros F1 H1; set (g := fun x:R => match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end); exists g; left; unfold g; apply antiderivative_P2. elim H0; intro. assumption. unfold antiderivative in H; elim H; clear H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 Hab)). elim H1; intro. assumption. unfold antiderivative in H; elim H; clear H; intros; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 Hbc)). Qed. Lemma NewtonInt_P8 : forall (f:R -> R) (a b c:R), Newton_integrable f a b -> Newton_integrable f b c -> Newton_integrable f a c. Proof. intros. elim X; intros F0 H0. elim X0; intros F1 H1. case (total_order_T a b); intro. elim s; intro. case (total_order_T b c); intro. elim s0; intro. (* a match Rle_dec x b with | left _ => F0 x | right _ => F1 x + (F0 b - F1 b) end). elim H0; intro. elim H1; intro. left; apply antiderivative_P2; assumption. unfold antiderivative in H2; elim H2; clear H2; intros _ H2. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 a1)). unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H a0)). (* ac *) case (total_order_T a c); intro. elim s0; intro. unfold Newton_integrable; exists F0. left. elim H1; intro. unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). elim H0; intro. assert (H3 := antiderivative_P3 f F0 F1 a b c H2 H). elim H3; intro. unfold antiderivative in H4; elim H4; clear H4; intros _ H4. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H4 a1)). assumption. unfold antiderivative in H2; elim H2; clear H2; intros _ H2. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 a0)). rewrite b0; apply NewtonInt_P1. unfold Newton_integrable; exists F1. right. elim H1; intro. unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). elim H0; intro. assert (H3 := antiderivative_P3 f F0 F1 a b c H2 H). elim H3; intro. assumption. unfold antiderivative in H4; elim H4; clear H4; intros _ H4. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H4 r0)). unfold antiderivative in H2; elim H2; clear H2; intros _ H2. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 a0)). (* a=b *) rewrite b0; apply X0. case (total_order_T b c); intro. elim s; intro. (* a>b & bb & b=c *) rewrite b0 in X; apply X. (* a>b & b>c *) assert (X1 := NewtonInt_P3 f a b X). assert (X2 := NewtonInt_P3 f b c X0). apply NewtonInt_P3. apply NewtonInt_P7 with b; assumption. Defined. (* Chasles' relation *) Lemma NewtonInt_P9 : forall (f:R -> R) (a b c:R) (pr1:Newton_integrable f a b) (pr2:Newton_integrable f b c), NewtonInt f a c (NewtonInt_P8 f a b c pr1 pr2) = NewtonInt f a b pr1 + NewtonInt f b c pr2. Proof. intros; unfold NewtonInt. case (NewtonInt_P8 f a b c pr1 pr2); intros. case pr1; intros. case pr2; intros. case (total_order_T a b); intro. elim s; intro. case (total_order_T b c); intro. elim s0; intro. (* a match Rle_dec x b with | left _ => x0 x | right _ => x1 x + (x0 b - x1 b) end) a c H1 H2). elim H3; intros. assert (H5 : a <= a <= c). split; [ right; reflexivity | left; apply Rlt_trans with b; assumption ]. assert (H6 : a <= c <= c). split; [ left; apply Rlt_trans with b; assumption | right; reflexivity ]. rewrite (H4 _ H5); rewrite (H4 _ H6). case (Rle_dec a b); intro. case (Rle_dec c b); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 a1)). ring. elim n; left; assumption. unfold antiderivative in H1; elim H1; clear H1; intros _ H1. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 (Rlt_trans _ _ _ a0 a1))). unfold antiderivative in H0; elim H0; clear H0; intros _ H0. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 a1)). unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H a0)). (* ac *) elim o1; intro. unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). elim o0; intro. elim o; intro. assert (H2 := antiderivative_P2 f x x1 a c b H1 H). assert (H3 := antiderivative_Ucte _ _ _ a b H0 H2). elim H3; intros. rewrite (H4 a). rewrite (H4 b). case (Rle_dec b c); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 r)). case (Rle_dec a c); intro. ring. elim n0; unfold antiderivative in H1; elim H1; intros; assumption. split; [ left; assumption | right; reflexivity ]. split; [ right; reflexivity | left; assumption ]. assert (H2 := antiderivative_P2 _ _ _ _ _ _ H1 H0). assert (H3 := antiderivative_Ucte _ _ _ c b H H2). elim H3; intros. rewrite (H4 c). rewrite (H4 b). case (Rle_dec b a); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r0 a0)). case (Rle_dec c a); intro. ring. elim n0; unfold antiderivative in H1; elim H1; intros; assumption. split; [ left; assumption | right; reflexivity ]. split; [ right; reflexivity | left; assumption ]. unfold antiderivative in H0; elim H0; clear H0; intros _ H0. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 a0)). (* a=b *) rewrite b0 in o; rewrite b0. elim o; intro. elim o1; intro. assert (H1 := antiderivative_Ucte _ _ _ b c H H0). elim H1; intros. assert (H3 : b <= c). unfold antiderivative in H; elim H; intros; assumption. rewrite (H2 b). rewrite (H2 c). ring. split; [ assumption | right; reflexivity ]. split; [ right; reflexivity | assumption ]. assert (H1 : b = c). unfold antiderivative in H, H0; elim H; elim H0; intros; apply Rle_antisym; assumption. rewrite H1; ring. elim o1; intro. assert (H1 : b = c). unfold antiderivative in H, H0; elim H; elim H0; intros; apply Rle_antisym; assumption. rewrite H1; ring. assert (H1 := antiderivative_Ucte _ _ _ c b H H0). elim H1; intros. assert (H3 : c <= b). unfold antiderivative in H; elim H; intros; assumption. rewrite (H2 c). rewrite (H2 b). ring. split; [ assumption | right; reflexivity ]. split; [ right; reflexivity | assumption ]. (* a>b & bb & b=c *) rewrite <- b0. unfold Rminus; rewrite Rplus_opp_r; rewrite Rplus_0_r. rewrite <- b0 in o. elim o0; intro. unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). elim o; intro. unfold antiderivative in H0; elim H0; clear H0; intros _ H0. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 r)). assert (H1 := antiderivative_Ucte f x x0 b a H0 H). elim H1; intros. rewrite (H2 b). rewrite (H2 a). ring. split; [ left; assumption | right; reflexivity ]. split; [ right; reflexivity | left; assumption ]. (* a>b & b>c *) elim o0; intro. unfold antiderivative in H; elim H; clear H; intros _ H. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). elim o1; intro. unfold antiderivative in H0; elim H0; clear H0; intros _ H0. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H0 r0)). elim o; intro. unfold antiderivative in H1; elim H1; clear H1; intros _ H1. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 (Rlt_trans _ _ _ r0 r))). assert (H2 := antiderivative_P2 _ _ _ _ _ _ H0 H). assert (H3 := antiderivative_Ucte _ _ _ c a H1 H2). elim H3; intros. assert (H5 : c <= a). unfold antiderivative in H1; elim H1; intros; assumption. rewrite (H4 c). rewrite (H4 a). case (Rle_dec a b); intro. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r1 r)). case (Rle_dec c b); intro. ring. elim n0; left; assumption. split; [ assumption | right; reflexivity ]. split; [ right; reflexivity | assumption ]. Qed. coq-8.4pl2/theories/Reals/Rtrigo_calc.v0000640000175000001440000003627212010532755017150 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0); [ discrR | unfold Rdiv; rewrite Rinv_mult_distr; try ring ]... Qed. Lemma sin_PI3_cos_PI6 : sin (PI / 3) = cos (PI / 6). Proof with trivial. replace (PI / 6) with (PI / 2 - PI / 3)... rewrite cos_shift... assert (H0 : 6 <> 0); [ discrR | idtac ]... assert (H1 : 3 <> 0); [ discrR | idtac ]... assert (H2 : 2 <> 0); [ discrR | idtac ]... apply Rmult_eq_reg_l with 6... rewrite Rmult_minus_distr_l; repeat rewrite (Rmult_comm 6)... unfold Rdiv; repeat rewrite Rmult_assoc... rewrite <- Rinv_l_sym... rewrite (Rmult_comm (/ 3)); repeat rewrite Rmult_assoc; rewrite <- Rinv_r_sym... rewrite (Rmult_comm PI); repeat rewrite Rmult_1_r; repeat rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym... ring... Qed. Lemma sin_PI6_cos_PI3 : cos (PI / 3) = sin (PI / 6). Proof with trivial. replace (PI / 6) with (PI / 2 - PI / 3)... rewrite sin_shift... assert (H0 : 6 <> 0); [ discrR | idtac ]... assert (H1 : 3 <> 0); [ discrR | idtac ]... assert (H2 : 2 <> 0); [ discrR | idtac ]... apply Rmult_eq_reg_l with 6... rewrite Rmult_minus_distr_l; repeat rewrite (Rmult_comm 6)... unfold Rdiv; repeat rewrite Rmult_assoc... rewrite <- Rinv_l_sym... rewrite (Rmult_comm (/ 3)); repeat rewrite Rmult_assoc; rewrite <- Rinv_r_sym... rewrite (Rmult_comm PI); repeat rewrite Rmult_1_r; repeat rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym... ring... Qed. Lemma PI6_RGT_0 : 0 < PI / 6. Proof. unfold Rdiv; apply Rmult_lt_0_compat; [ apply PI_RGT_0 | apply Rinv_0_lt_compat; prove_sup0 ]. Qed. Lemma PI6_RLT_PI2 : PI / 6 < PI / 2. Proof. unfold Rdiv; apply Rmult_lt_compat_l. apply PI_RGT_0. apply Rinv_lt_contravar; prove_sup. Qed. Lemma sin_PI6 : sin (PI / 6) = 1 / 2. Proof with trivial. assert (H : 2 <> 0); [ discrR | idtac ]... apply Rmult_eq_reg_l with (2 * cos (PI / 6))... replace (2 * cos (PI / 6) * sin (PI / 6)) with (2 * sin (PI / 6) * cos (PI / 6))... rewrite <- sin_2a; replace (2 * (PI / 6)) with (PI / 3)... rewrite sin_PI3_cos_PI6... unfold Rdiv; rewrite Rmult_1_l; rewrite Rmult_assoc; pattern 2 at 2; rewrite (Rmult_comm 2); rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r... unfold Rdiv; rewrite Rinv_mult_distr... rewrite (Rmult_comm (/ 2)); rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r... discrR... ring... apply prod_neq_R0... cut (0 < cos (PI / 6)); [ intro H1; auto with real | apply cos_gt_0; [ apply (Rlt_trans (- (PI / 2)) 0 (PI / 6) _PI2_RLT_0 PI6_RGT_0) | apply PI6_RLT_PI2 ] ]... Qed. Lemma sqrt2_neq_0 : sqrt 2 <> 0. Proof. assert (Hyp : 0 < 2); [ prove_sup0 | generalize (Rlt_le 0 2 Hyp); intro H1; red; intro H2; generalize (sqrt_eq_0 2 H1 H2); intro H; absurd (2 = 0); [ discrR | assumption ] ]. Qed. Lemma R1_sqrt2_neq_0 : 1 / sqrt 2 <> 0. Proof. generalize (Rinv_neq_0_compat (sqrt 2) sqrt2_neq_0); intro H; generalize (prod_neq_R0 1 (/ sqrt 2) R1_neq_R0 H); intro H0; assumption. Qed. Lemma sqrt3_2_neq_0 : 2 * sqrt 3 <> 0. Proof. apply prod_neq_R0; [ discrR | assert (Hyp : 0 < 3); [ prove_sup0 | generalize (Rlt_le 0 3 Hyp); intro H1; red; intro H2; generalize (sqrt_eq_0 3 H1 H2); intro H; absurd (3 = 0); [ discrR | assumption ] ] ]. Qed. Lemma Rlt_sqrt2_0 : 0 < sqrt 2. Proof. assert (Hyp : 0 < 2); [ prove_sup0 | generalize (sqrt_positivity 2 (Rlt_le 0 2 Hyp)); intro H1; elim H1; intro H2; [ assumption | absurd (0 = sqrt 2); [ apply (not_eq_sym (A:=R)); apply sqrt2_neq_0 | assumption ] ] ]. Qed. Lemma Rlt_sqrt3_0 : 0 < sqrt 3. Proof. cut (0%nat <> 1%nat); [ intro H0; assert (Hyp : 0 < 2); [ prove_sup0 | generalize (Rlt_le 0 2 Hyp); intro H1; assert (Hyp2 : 0 < 3); [ prove_sup0 | generalize (Rlt_le 0 3 Hyp2); intro H2; generalize (lt_INR_0 1 (neq_O_lt 1 H0)); unfold INR; intro H3; generalize (Rplus_lt_compat_l 2 0 1 H3); rewrite Rplus_comm; rewrite Rplus_0_l; replace (2 + 1) with 3; [ intro H4; generalize (sqrt_lt_1 2 3 H1 H2 H4); clear H3; intro H3; apply (Rlt_trans 0 (sqrt 2) (sqrt 3) Rlt_sqrt2_0 H3) | ring ] ] ] | discriminate ]. Qed. Lemma PI4_RGT_0 : 0 < PI / 4. Proof. unfold Rdiv; apply Rmult_lt_0_compat; [ apply PI_RGT_0 | apply Rinv_0_lt_compat; prove_sup0 ]. Qed. Lemma cos_PI4 : cos (PI / 4) = 1 / sqrt 2. Proof with trivial. apply Rsqr_inj... apply cos_ge_0... left; apply (Rlt_trans (- (PI / 2)) 0 (PI / 4) _PI2_RLT_0 PI4_RGT_0)... left; apply PI4_RLT_PI2... left; apply (Rmult_lt_0_compat 1 (/ sqrt 2))... prove_sup... apply Rinv_0_lt_compat; apply Rlt_sqrt2_0... rewrite Rsqr_div... rewrite Rsqr_1; rewrite Rsqr_sqrt... assert (H : 2 <> 0); [ discrR | idtac ]... unfold Rsqr; pattern (cos (PI / 4)) at 1; rewrite <- sin_cos_PI4; replace (sin (PI / 4) * cos (PI / 4)) with (1 / 2 * (2 * sin (PI / 4) * cos (PI / 4)))... rewrite <- sin_2a; replace (2 * (PI / 4)) with (PI / 2)... rewrite sin_PI2... apply Rmult_1_r... unfold Rdiv; rewrite (Rmult_comm 2); rewrite Rinv_mult_distr... repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r... unfold Rdiv; rewrite Rmult_1_l; repeat rewrite <- Rmult_assoc... rewrite <- Rinv_l_sym... rewrite Rmult_1_l... left; prove_sup... apply sqrt2_neq_0... Qed. Lemma sin_PI4 : sin (PI / 4) = 1 / sqrt 2. Proof. rewrite sin_cos_PI4; apply cos_PI4. Qed. Lemma tan_PI4 : tan (PI / 4) = 1. Proof. unfold tan; rewrite sin_cos_PI4. unfold Rdiv; apply Rinv_r. change (cos (PI / 4) <> 0); rewrite cos_PI4; apply R1_sqrt2_neq_0. Qed. Lemma cos3PI4 : cos (3 * (PI / 4)) = -1 / sqrt 2. Proof with trivial. replace (3 * (PI / 4)) with (PI / 2 - - (PI / 4))... rewrite cos_shift; rewrite sin_neg; rewrite sin_PI4... unfold Rdiv; rewrite Ropp_mult_distr_l_reverse... unfold Rminus; rewrite Ropp_involutive; pattern PI at 1; rewrite double_var; unfold Rdiv; rewrite Rmult_plus_distr_r; repeat rewrite Rmult_assoc; rewrite <- Rinv_mult_distr; [ ring | discrR | discrR ]... Qed. Lemma sin3PI4 : sin (3 * (PI / 4)) = 1 / sqrt 2. Proof with trivial. replace (3 * (PI / 4)) with (PI / 2 - - (PI / 4))... rewrite sin_shift; rewrite cos_neg; rewrite cos_PI4... unfold Rminus; rewrite Ropp_involutive; pattern PI at 1; rewrite double_var; unfold Rdiv; rewrite Rmult_plus_distr_r; repeat rewrite Rmult_assoc; rewrite <- Rinv_mult_distr; [ ring | discrR | discrR ]... Qed. Lemma cos_PI6 : cos (PI / 6) = sqrt 3 / 2. Proof with trivial. apply Rsqr_inj... apply cos_ge_0... left; apply (Rlt_trans (- (PI / 2)) 0 (PI / 6) _PI2_RLT_0 PI6_RGT_0)... left; apply PI6_RLT_PI2... left; apply (Rmult_lt_0_compat (sqrt 3) (/ 2))... apply Rlt_sqrt3_0... apply Rinv_0_lt_compat; prove_sup0... assert (H : 2 <> 0); [ discrR | idtac ]... assert (H1 : 4 <> 0); [ apply prod_neq_R0 | idtac ]... rewrite Rsqr_div... rewrite cos2; unfold Rsqr; rewrite sin_PI6; rewrite sqrt_def... unfold Rdiv; rewrite Rmult_1_l; apply Rmult_eq_reg_l with 4... rewrite Rmult_minus_distr_l; rewrite (Rmult_comm 3); repeat rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym... rewrite Rmult_1_l; rewrite Rmult_1_r... rewrite <- (Rmult_comm (/ 2)); repeat rewrite <- Rmult_assoc... rewrite <- Rinv_l_sym... rewrite Rmult_1_l; rewrite <- Rinv_r_sym... ring... left; prove_sup0... Qed. Lemma tan_PI6 : tan (PI / 6) = 1 / sqrt 3. Proof. unfold tan; rewrite sin_PI6; rewrite cos_PI6; unfold Rdiv; repeat rewrite Rmult_1_l; rewrite Rinv_mult_distr. rewrite Rinv_involutive. rewrite (Rmult_comm (/ 2)); rewrite Rmult_assoc; rewrite <- Rinv_r_sym. apply Rmult_1_r. discrR. discrR. red; intro; assert (H1 := Rlt_sqrt3_0); rewrite H in H1; elim (Rlt_irrefl 0 H1). apply Rinv_neq_0_compat; discrR. Qed. Lemma sin_PI3 : sin (PI / 3) = sqrt 3 / 2. Proof. rewrite sin_PI3_cos_PI6; apply cos_PI6. Qed. Lemma cos_PI3 : cos (PI / 3) = 1 / 2. Proof. rewrite sin_PI6_cos_PI3; apply sin_PI6. Qed. Lemma tan_PI3 : tan (PI / 3) = sqrt 3. Proof. unfold tan; rewrite sin_PI3; rewrite cos_PI3; unfold Rdiv; rewrite Rmult_1_l; rewrite Rinv_involutive. rewrite Rmult_assoc; rewrite <- Rinv_l_sym. apply Rmult_1_r. discrR. discrR. Qed. Lemma sin_2PI3 : sin (2 * (PI / 3)) = sqrt 3 / 2. Proof. rewrite double; rewrite sin_plus; rewrite sin_PI3; rewrite cos_PI3; unfold Rdiv; repeat rewrite Rmult_1_l; rewrite (Rmult_comm (/ 2)); repeat rewrite <- Rmult_assoc; rewrite double_var; reflexivity. Qed. Lemma cos_2PI3 : cos (2 * (PI / 3)) = -1 / 2. Proof with trivial. assert (H : 2 <> 0); [ discrR | idtac ]... assert (H0 : 4 <> 0); [ apply prod_neq_R0 | idtac ]... rewrite double; rewrite cos_plus; rewrite sin_PI3; rewrite cos_PI3; unfold Rdiv; rewrite Rmult_1_l; apply Rmult_eq_reg_l with 4... rewrite Rmult_minus_distr_l; repeat rewrite Rmult_assoc; rewrite (Rmult_comm 2)... repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r; rewrite <- Rinv_r_sym... pattern 2 at 4; rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r; rewrite Ropp_mult_distr_r_reverse; rewrite Rmult_1_r... rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r; rewrite (Rmult_comm 2); rewrite (Rmult_comm (/ 2))... repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym... rewrite Rmult_1_r; rewrite sqrt_def... ring... left; prove_sup... Qed. Lemma tan_2PI3 : tan (2 * (PI / 3)) = - sqrt 3. Proof with trivial. assert (H : 2 <> 0); [ discrR | idtac ]... unfold tan; rewrite sin_2PI3; rewrite cos_2PI3; unfold Rdiv; rewrite Ropp_mult_distr_l_reverse; rewrite Rmult_1_l; rewrite <- Ropp_inv_permute... rewrite Rinv_involutive... rewrite Rmult_assoc; rewrite Ropp_mult_distr_r_reverse; rewrite <- Rinv_l_sym... ring... apply Rinv_neq_0_compat... Qed. Lemma cos_5PI4 : cos (5 * (PI / 4)) = -1 / sqrt 2. Proof with trivial. replace (5 * (PI / 4)) with (PI / 4 + PI)... rewrite neg_cos; rewrite cos_PI4; unfold Rdiv; rewrite Ropp_mult_distr_l_reverse... pattern PI at 2; rewrite double_var; pattern PI at 2 3; rewrite double_var; assert (H : 2 <> 0); [ discrR | unfold Rdiv; repeat rewrite Rinv_mult_distr; try ring ]... Qed. Lemma sin_5PI4 : sin (5 * (PI / 4)) = -1 / sqrt 2. Proof with trivial. replace (5 * (PI / 4)) with (PI / 4 + PI)... rewrite neg_sin; rewrite sin_PI4; unfold Rdiv; rewrite Ropp_mult_distr_l_reverse... pattern PI at 2; rewrite double_var; pattern PI at 2 3; rewrite double_var; assert (H : 2 <> 0); [ discrR | unfold Rdiv; repeat rewrite Rinv_mult_distr; try ring ]... Qed. Lemma sin_cos5PI4 : cos (5 * (PI / 4)) = sin (5 * (PI / 4)). Proof. rewrite cos_5PI4; rewrite sin_5PI4; reflexivity. Qed. Lemma Rgt_3PI2_0 : 0 < 3 * (PI / 2). Proof. apply Rmult_lt_0_compat; [ prove_sup0 | unfold Rdiv; apply Rmult_lt_0_compat; [ apply PI_RGT_0 | apply Rinv_0_lt_compat; prove_sup0 ] ]. Qed. Lemma Rgt_2PI_0 : 0 < 2 * PI. Proof. apply Rmult_lt_0_compat; [ prove_sup0 | apply PI_RGT_0 ]. Qed. Lemma Rlt_PI_3PI2 : PI < 3 * (PI / 2). Proof. generalize PI2_RGT_0; intro H1; generalize (Rplus_lt_compat_l PI 0 (PI / 2) H1); replace (PI + PI / 2) with (3 * (PI / 2)). rewrite Rplus_0_r; intro H2; assumption. pattern PI at 2; rewrite double_var; ring. Qed. Lemma Rlt_3PI2_2PI : 3 * (PI / 2) < 2 * PI. Proof. generalize PI2_RGT_0; intro H1; generalize (Rplus_lt_compat_l (3 * (PI / 2)) 0 (PI / 2) H1); replace (3 * (PI / 2) + PI / 2) with (2 * PI). rewrite Rplus_0_r; intro H2; assumption. rewrite double; pattern PI at 1 2; rewrite double_var; ring. Qed. (***************************************************************) (** Radian -> Degree | Degree -> Radian *) (***************************************************************) Definition plat : R := 180. Definition toRad (x:R) : R := x * PI * / plat. Definition toDeg (x:R) : R := x * plat * / PI. Lemma rad_deg : forall x:R, toRad (toDeg x) = x. Proof. intro; unfold toRad, toDeg; replace (x * plat * / PI * PI * / plat) with (x * (plat * / plat) * (PI * / PI)); [ idtac | ring ]. repeat rewrite <- Rinv_r_sym. ring. apply PI_neq0. unfold plat; discrR. Qed. Lemma toRad_inj : forall x y:R, toRad x = toRad y -> x = y. Proof. intros; unfold toRad in H; apply Rmult_eq_reg_l with PI. rewrite <- (Rmult_comm x); rewrite <- (Rmult_comm y). apply Rmult_eq_reg_l with (/ plat). rewrite <- (Rmult_comm (x * PI)); rewrite <- (Rmult_comm (y * PI)); assumption. apply Rinv_neq_0_compat; unfold plat; discrR. apply PI_neq0. Qed. Lemma deg_rad : forall x:R, toDeg (toRad x) = x. Proof. intro x; apply toRad_inj; rewrite (rad_deg (toRad x)); reflexivity. Qed. Definition sind (x:R) : R := sin (toRad x). Definition cosd (x:R) : R := cos (toRad x). Definition tand (x:R) : R := tan (toRad x). Lemma Rsqr_sin_cos_d_one : forall x:R, Rsqr (sind x) + Rsqr (cosd x) = 1. Proof. intro x; unfold sind; unfold cosd; apply sin2_cos2. Qed. (***************************************************) (** Other properties *) (***************************************************) Lemma sin_lb_ge_0 : forall a:R, 0 <= a -> a <= PI / 2 -> 0 <= sin_lb a. Proof. intros; case (Rtotal_order 0 a); intro. left; apply sin_lb_gt_0; assumption. elim H1; intro. rewrite <- H2; unfold sin_lb; unfold sin_approx; unfold sum_f_R0; unfold sin_term; repeat rewrite pow_ne_zero. unfold Rdiv; repeat rewrite Rmult_0_l; repeat rewrite Rmult_0_r; repeat rewrite Rplus_0_r; right; reflexivity. discriminate. discriminate. discriminate. discriminate. elim (Rlt_irrefl 0 (Rle_lt_trans 0 a 0 H H2)). Qed. coq-8.4pl2/theories/Reals/Rsqrt_def.v0000640000175000001440000005211212010532755016640 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool) (N:nat) {struct N} : R := match N with | O => x | S n => let down := Dichotomy_lb x y P n in let up := Dichotomy_ub x y P n in let z := (down + up) / 2 in if P z then down else z end with Dichotomy_ub (x y:R) (P:R -> bool) (N:nat) {struct N} : R := match N with | O => y | S n => let down := Dichotomy_lb x y P n in let up := Dichotomy_ub x y P n in let z := (down + up) / 2 in if P z then z else up end. Definition dicho_lb (x y:R) (P:R -> bool) (N:nat) : R := Dichotomy_lb x y P N. Definition dicho_up (x y:R) (P:R -> bool) (N:nat) : R := Dichotomy_ub x y P N. (**********) Lemma dicho_comp : forall (x y:R) (P:R -> bool) (n:nat), x <= y -> dicho_lb x y P n <= dicho_up x y P n. Proof. intros. induction n as [| n Hrecn]. simpl; assumption. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. pattern 2 at 1; rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]. rewrite Rmult_1_r. rewrite double. apply Rplus_le_compat_l. assumption. unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]. rewrite Rmult_1_r. rewrite double. rewrite <- (Rplus_comm (Dichotomy_ub x y P n)). apply Rplus_le_compat_l. assumption. Qed. Lemma dicho_lb_growing : forall (x y:R) (P:R -> bool), x <= y -> Un_growing (dicho_lb x y P). Proof. intros. unfold Un_growing. intro. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). right; reflexivity. unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. pattern 2 at 1; rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]. rewrite Rmult_1_r. rewrite double. apply Rplus_le_compat_l. replace (Dichotomy_ub x y P n) with (dicho_up x y P n); [ apply dicho_comp; assumption | reflexivity ]. Qed. Lemma dicho_up_decreasing : forall (x y:R) (P:R -> bool), x <= y -> Un_decreasing (dicho_up x y P). Proof. intros. unfold Un_decreasing. intro. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]. rewrite Rmult_1_r. rewrite double. replace (Dichotomy_ub x y P n) with (dicho_up x y P n); [ idtac | reflexivity ]. replace (Dichotomy_lb x y P n) with (dicho_lb x y P n); [ idtac | reflexivity ]. rewrite <- (Rplus_comm (dicho_up x y P n)). apply Rplus_le_compat_l. apply dicho_comp; assumption. right; reflexivity. Qed. Lemma dicho_lb_maj_y : forall (x y:R) (P:R -> bool), x <= y -> forall n:nat, dicho_lb x y P n <= y. Proof. intros. induction n as [| n Hrecn]. simpl; assumption. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). assumption. unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ rewrite Rmult_1_r | discrR ]. rewrite double; apply Rplus_le_compat. assumption. pattern y at 2; replace y with (Dichotomy_ub x y P 0); [ idtac | reflexivity ]. apply decreasing_prop. assert (H0 := dicho_up_decreasing x y P H). assumption. apply le_O_n. Qed. Lemma dicho_lb_maj : forall (x y:R) (P:R -> bool), x <= y -> has_ub (dicho_lb x y P). Proof. intros. cut (forall n:nat, dicho_lb x y P n <= y). intro. unfold has_ub. unfold bound. exists y. unfold is_upper_bound. intros. elim H1; intros. rewrite H2; apply H0. apply dicho_lb_maj_y; assumption. Qed. Lemma dicho_up_min_x : forall (x y:R) (P:R -> bool), x <= y -> forall n:nat, x <= dicho_up x y P n. Proof. intros. induction n as [| n Hrecn]. simpl; assumption. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). unfold Rdiv; apply Rmult_le_reg_l with 2. prove_sup0. pattern 2 at 1; rewrite Rmult_comm. rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ rewrite Rmult_1_r | discrR ]. rewrite double; apply Rplus_le_compat. pattern x at 1; replace x with (Dichotomy_lb x y P 0); [ idtac | reflexivity ]. apply tech9. assert (H0 := dicho_lb_growing x y P H). assumption. apply le_O_n. assumption. assumption. Qed. Lemma dicho_up_min : forall (x y:R) (P:R -> bool), x <= y -> has_lb (dicho_up x y P). Proof. intros. cut (forall n:nat, x <= dicho_up x y P n). intro. unfold has_lb. unfold bound. exists (- x). unfold is_upper_bound. intros. elim H1; intros. rewrite H2. unfold opp_seq. apply Ropp_le_contravar. apply H0. apply dicho_up_min_x; assumption. Qed. Lemma dicho_lb_cv : forall (x y:R) (P:R -> bool), x <= y -> { l:R | Un_cv (dicho_lb x y P) l }. Proof. intros. apply growing_cv. apply dicho_lb_growing; assumption. apply dicho_lb_maj; assumption. Qed. Lemma dicho_up_cv : forall (x y:R) (P:R -> bool), x <= y -> { l:R | Un_cv (dicho_up x y P) l }. Proof. intros. apply decreasing_cv. apply dicho_up_decreasing; assumption. apply dicho_up_min; assumption. Qed. Lemma dicho_lb_dicho_up : forall (x y:R) (P:R -> bool) (n:nat), x <= y -> dicho_up x y P n - dicho_lb x y P n = (y - x) / 2 ^ n. Proof. intros. induction n as [| n Hrecn]. simpl. unfold Rdiv; rewrite Rinv_1; ring. simpl. case (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2)). unfold Rdiv. replace ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) * / 2 - Dichotomy_lb x y P n) with ((dicho_up x y P n - dicho_lb x y P n) / 2). unfold Rdiv; rewrite Hrecn. unfold Rdiv. rewrite Rinv_mult_distr. ring. discrR. apply pow_nonzero; discrR. pattern (Dichotomy_lb x y P n) at 2; rewrite (double_var (Dichotomy_lb x y P n)); unfold dicho_up, dicho_lb, Rminus, Rdiv; ring. replace (Dichotomy_ub x y P n - (Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2) with ((dicho_up x y P n - dicho_lb x y P n) / 2). unfold Rdiv; rewrite Hrecn. unfold Rdiv. rewrite Rinv_mult_distr. ring. discrR. apply pow_nonzero; discrR. pattern (Dichotomy_ub x y P n) at 1; rewrite (double_var (Dichotomy_ub x y P n)); unfold dicho_up, dicho_lb, Rminus, Rdiv; ring. Qed. Definition pow_2_n (n:nat) := 2 ^ n. Lemma pow_2_n_neq_R0 : forall n:nat, pow_2_n n <> 0. Proof. intro. unfold pow_2_n. apply pow_nonzero. discrR. Qed. Lemma pow_2_n_growing : Un_growing pow_2_n. Proof. unfold Un_growing. intro. replace (S n) with (n + 1)%nat; [ unfold pow_2_n; rewrite pow_add | ring ]. pattern (2 ^ n) at 1; rewrite <- Rmult_1_r. apply Rmult_le_compat_l. left; apply pow_lt; prove_sup0. simpl. rewrite Rmult_1_r. pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; apply Rlt_0_1. Qed. Lemma pow_2_n_infty : cv_infty pow_2_n. Proof. cut (forall N:nat, INR N <= 2 ^ N). intros. unfold cv_infty. intro. case (total_order_T 0 M); intro. elim s; intro. set (N := up M). cut (0 <= N)%Z. intro. elim (IZN N H0); intros N0 H1. exists N0. intros. apply Rlt_le_trans with (INR N0). rewrite INR_IZR_INZ. rewrite <- H1. unfold N. assert (H3 := archimed M). elim H3; intros; assumption. apply Rle_trans with (pow_2_n N0). unfold pow_2_n; apply H. apply Rge_le. apply growing_prop. apply pow_2_n_growing. assumption. apply le_IZR. unfold N. simpl. assert (H0 := archimed M); elim H0; intros. left; apply Rlt_trans with M; assumption. exists 0%nat; intros. rewrite <- b. unfold pow_2_n; apply pow_lt; prove_sup0. exists 0%nat; intros. apply Rlt_trans with 0. assumption. unfold pow_2_n; apply pow_lt; prove_sup0. simple induction N. simpl. left; apply Rlt_0_1. intros. pattern (S n) at 2; replace (S n) with (n + 1)%nat; [ idtac | ring ]. rewrite S_INR; rewrite pow_add. simpl. rewrite Rmult_1_r. apply Rle_trans with (2 ^ n). rewrite <- (Rplus_comm 1). rewrite <- (Rmult_1_r (INR n)). apply (poly n 1). apply Rlt_0_1. pattern (2 ^ n) at 1; rewrite <- Rplus_0_r. rewrite <- (Rmult_comm 2). rewrite double. apply Rplus_le_compat_l. left; apply pow_lt; prove_sup0. Qed. Lemma cv_dicho : forall (x y l1 l2:R) (P:R -> bool), x <= y -> Un_cv (dicho_lb x y P) l1 -> Un_cv (dicho_up x y P) l2 -> l1 = l2. Proof. intros. assert (H2 := CV_minus _ _ _ _ H0 H1). cut (Un_cv (fun i:nat => dicho_lb x y P i - dicho_up x y P i) 0). intro. assert (H4 := UL_sequence _ _ _ H2 H3). symmetry ; apply Rminus_diag_uniq_sym; assumption. unfold Un_cv; unfold R_dist. intros. assert (H4 := cv_infty_cv_R0 pow_2_n pow_2_n_neq_R0 pow_2_n_infty). case (total_order_T x y); intro. elim s; intro. unfold Un_cv in H4; unfold R_dist in H4. cut (0 < y - x). intro Hyp. cut (0 < eps / (y - x)). intro. elim (H4 (eps / (y - x)) H5); intros N H6. exists N; intros. replace (dicho_lb x y P n - dicho_up x y P n - 0) with (dicho_lb x y P n - dicho_up x y P n); [ idtac | ring ]. rewrite <- Rabs_Ropp. rewrite Ropp_minus_distr'. rewrite dicho_lb_dicho_up. unfold Rdiv; rewrite Rabs_mult. rewrite (Rabs_right (y - x)). apply Rmult_lt_reg_l with (/ (y - x)). apply Rinv_0_lt_compat; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (/ 2 ^ n) with (/ 2 ^ n - 0); [ unfold pow_2_n, Rdiv in H6; rewrite <- (Rmult_comm eps); apply H6; assumption | ring ]. red; intro; rewrite H8 in Hyp; elim (Rlt_irrefl _ Hyp). apply Rle_ge. apply Rplus_le_reg_l with x; rewrite Rplus_0_r. replace (x + (y - x)) with y; [ assumption | ring ]. assumption. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; assumption ]. apply Rplus_lt_reg_r with x; rewrite Rplus_0_r. replace (x + (y - x)) with y; [ assumption | ring ]. exists 0%nat; intros. replace (dicho_lb x y P n - dicho_up x y P n - 0) with (dicho_lb x y P n - dicho_up x y P n); [ idtac | ring ]. rewrite <- Rabs_Ropp. rewrite Ropp_minus_distr'. rewrite dicho_lb_dicho_up. rewrite b. unfold Rminus, Rdiv; rewrite Rplus_opp_r; rewrite Rmult_0_l; rewrite Rabs_R0; assumption. assumption. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H r)). Qed. Definition cond_positivity (x:R) : bool := match Rle_dec 0 x with | left _ => true | right _ => false end. (** Sequential caracterisation of continuity *) Lemma continuity_seq : forall (f:R -> R) (Un:nat -> R) (l:R), continuity_pt f l -> Un_cv Un l -> Un_cv (fun i:nat => f (Un i)) (f l). Proof. unfold continuity_pt, Un_cv; unfold continue_in. unfold limit1_in. unfold limit_in. unfold dist. simpl. unfold R_dist. intros. elim (H eps H1); intros alp H2. elim H2; intros. elim (H0 alp H3); intros N H5. exists N; intros. case (Req_dec (Un n) l); intro. rewrite H7; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; assumption. apply H4. split. unfold D_x, no_cond. split. trivial. apply (not_eq_sym (A:=R)); assumption. apply H5; assumption. Qed. Lemma dicho_lb_car : forall (x y:R) (P:R -> bool) (n:nat), P x = false -> P (dicho_lb x y P n) = false. Proof. intros. induction n as [| n Hrecn]. simpl. assumption. simpl. assert (X := sumbool_of_bool (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2))). elim X; intro. rewrite a. unfold dicho_lb in Hrecn; assumption. rewrite b. assumption. Qed. Lemma dicho_up_car : forall (x y:R) (P:R -> bool) (n:nat), P y = true -> P (dicho_up x y P n) = true. Proof. intros. induction n as [| n Hrecn]. simpl. assumption. simpl. assert (X := sumbool_of_bool (P ((Dichotomy_lb x y P n + Dichotomy_ub x y P n) / 2))). elim X; intro. rewrite a. unfold dicho_lb in Hrecn; assumption. rewrite b. assumption. Qed. (** Intermediate Value Theorem *) Lemma IVT : forall (f:R -> R) (x y:R), continuity f -> x < y -> f x < 0 -> 0 < f y -> { z:R | x <= z <= y /\ f z = 0 }. Proof. intros. cut (x <= y). intro. generalize (dicho_lb_cv x y (fun z:R => cond_positivity (f z)) H3). generalize (dicho_up_cv x y (fun z:R => cond_positivity (f z)) H3). intros X X0. elim X; intros. elim X0; intros. assert (H4 := cv_dicho _ _ _ _ _ H3 p0 p). rewrite H4 in p0. exists x0. split. split. apply Rle_trans with (dicho_lb x y (fun z:R => cond_positivity (f z)) 0). simpl. right; reflexivity. apply growing_ineq. apply dicho_lb_growing; assumption. assumption. apply Rle_trans with (dicho_up x y (fun z:R => cond_positivity (f z)) 0). apply decreasing_ineq. apply dicho_up_decreasing; assumption. assumption. right; reflexivity. 2: left; assumption. set (Vn := fun n:nat => dicho_lb x y (fun z:R => cond_positivity (f z)) n). set (Wn := fun n:nat => dicho_up x y (fun z:R => cond_positivity (f z)) n). cut ((forall n:nat, f (Vn n) <= 0) -> f x0 <= 0). cut ((forall n:nat, 0 <= f (Wn n)) -> 0 <= f x0). intros. cut (forall n:nat, f (Vn n) <= 0). cut (forall n:nat, 0 <= f (Wn n)). intros. assert (H9 := H6 H8). assert (H10 := H5 H7). apply Rle_antisym; assumption. intro. unfold Wn. cut (forall z:R, cond_positivity z = true <-> 0 <= z). intro. assert (H8 := dicho_up_car x y (fun z:R => cond_positivity (f z)) n). elim (H7 (f (dicho_up x y (fun z:R => cond_positivity (f z)) n))); intros. apply H9. apply H8. elim (H7 (f y)); intros. apply H12. left; assumption. intro. unfold cond_positivity. case (Rle_dec 0 z); intro. split. intro; assumption. intro; reflexivity. split. intro feqt;discriminate feqt. intro. elim n0; assumption. unfold Vn. cut (forall z:R, cond_positivity z = false <-> z < 0). intros. assert (H8 := dicho_lb_car x y (fun z:R => cond_positivity (f z)) n). left. elim (H7 (f (dicho_lb x y (fun z:R => cond_positivity (f z)) n))); intros. apply H9. apply H8. elim (H7 (f x)); intros. apply H12. assumption. intro. unfold cond_positivity. case (Rle_dec 0 z); intro. split. intro feqt; discriminate feqt. intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ r H7)). split. intro; auto with real. intro; reflexivity. cut (Un_cv Wn x0). intros. assert (H7 := continuity_seq f Wn x0 (H x0) H5). case (total_order_T 0 (f x0)); intro. elim s; intro. left; assumption. rewrite <- b; right; reflexivity. unfold Un_cv in H7; unfold R_dist in H7. cut (0 < - f x0). intro. elim (H7 (- f x0) H8); intros. cut (x2 >= x2)%nat; [ intro | unfold ge; apply le_n ]. assert (H11 := H9 x2 H10). rewrite Rabs_right in H11. pattern (- f x0) at 1 in H11; rewrite <- Rplus_0_r in H11. unfold Rminus in H11; rewrite (Rplus_comm (f (Wn x2))) in H11. assert (H12 := Rplus_lt_reg_r _ _ _ H11). assert (H13 := H6 x2). elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H13 H12)). apply Rle_ge; left; unfold Rminus; apply Rplus_le_lt_0_compat. apply H6. exact H8. apply Ropp_0_gt_lt_contravar; assumption. unfold Wn; assumption. cut (Un_cv Vn x0). intros. assert (H7 := continuity_seq f Vn x0 (H x0) H5). case (total_order_T 0 (f x0)); intro. elim s; intro. unfold Un_cv in H7; unfold R_dist in H7. elim (H7 (f x0) a); intros. cut (x2 >= x2)%nat; [ intro | unfold ge; apply le_n ]. assert (H10 := H8 x2 H9). rewrite Rabs_left in H10. pattern (f x0) at 2 in H10; rewrite <- Rplus_0_r in H10. rewrite Ropp_minus_distr' in H10. unfold Rminus in H10. assert (H11 := Rplus_lt_reg_r _ _ _ H10). assert (H12 := H6 x2). cut (0 < f (Vn x2)). intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H13 H12)). rewrite <- (Ropp_involutive (f (Vn x2))). apply Ropp_0_gt_lt_contravar; assumption. apply Rplus_lt_reg_r with (f x0 - f (Vn x2)). rewrite Rplus_0_r; replace (f x0 - f (Vn x2) + (f (Vn x2) - f x0)) with 0; [ unfold Rminus; apply Rplus_lt_le_0_compat | ring ]. assumption. apply Ropp_0_ge_le_contravar; apply Rle_ge; apply H6. right; rewrite <- b; reflexivity. left; assumption. unfold Vn; assumption. Qed. Lemma IVT_cor : forall (f:R -> R) (x y:R), continuity f -> x <= y -> f x * f y <= 0 -> { z:R | x <= z <= y /\ f z = 0 }. Proof. intros. case (total_order_T 0 (f x)); intro. case (total_order_T 0 (f y)); intro. elim s; intro. elim s0; intro. cut (0 < f x * f y); [ intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H1 H2)) | apply Rmult_lt_0_compat; assumption ]. exists y. split. split; [ assumption | right; reflexivity ]. symmetry ; exact b. exists x. split. split; [ right; reflexivity | assumption ]. symmetry ; exact b. elim s; intro. cut (x < y). intro. assert (H3 := IVT (- f)%F x y (continuity_opp f H) H2). cut ((- f)%F x < 0). cut (0 < (- f)%F y). intros. elim (H3 H5 H4); intros. exists x0. elim p; intros. split. assumption. unfold opp_fct in H7. rewrite <- (Ropp_involutive (f x0)). apply Ropp_eq_0_compat; assumption. unfold opp_fct; apply Ropp_0_gt_lt_contravar; assumption. unfold opp_fct. apply Rplus_lt_reg_r with (f x); rewrite Rplus_opp_r; rewrite Rplus_0_r; assumption. inversion H0. assumption. rewrite H2 in a. elim (Rlt_irrefl _ (Rlt_trans _ _ _ r a)). exists x. split. split; [ right; reflexivity | assumption ]. symmetry ; assumption. case (total_order_T 0 (f y)); intro. elim s; intro. cut (x < y). intro. apply IVT; assumption. inversion H0. assumption. rewrite H2 in r. elim (Rlt_irrefl _ (Rlt_trans _ _ _ r a)). exists y. split. split; [ assumption | right; reflexivity ]. symmetry ; assumption. cut (0 < f x * f y). intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ H2 H1)). rewrite <- Rmult_opp_opp; apply Rmult_lt_0_compat; apply Ropp_0_gt_lt_contravar; assumption. Qed. (** We can now define the square root function as the reciprocal transformation of the square root function *) Lemma Rsqrt_exists : forall y:R, 0 <= y -> { z:R | 0 <= z /\ y = Rsqr z }. Proof. intros. set (f := fun x:R => Rsqr x - y). cut (f 0 <= 0). intro. cut (continuity f). intro. case (total_order_T y 1); intro. elim s; intro. cut (0 <= f 1). intro. cut (f 0 * f 1 <= 0). intro. assert (X := IVT_cor f 0 1 H1 (Rlt_le _ _ Rlt_0_1) H3). elim X; intros t H4. exists t. elim H4; intros. split. elim H5; intros; assumption. unfold f in H6. apply Rminus_diag_uniq_sym; exact H6. rewrite Rmult_comm; pattern 0 at 2; rewrite <- (Rmult_0_r (f 1)). apply Rmult_le_compat_l; assumption. unfold f. rewrite Rsqr_1. apply Rplus_le_reg_l with y. rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r; left; assumption. exists 1. split. left; apply Rlt_0_1. rewrite b; symmetry ; apply Rsqr_1. cut (0 <= f y). intro. cut (f 0 * f y <= 0). intro. assert (X := IVT_cor f 0 y H1 H H3). elim X; intros t H4. exists t. elim H4; intros. split. elim H5; intros; assumption. unfold f in H6. apply Rminus_diag_uniq_sym; exact H6. rewrite Rmult_comm; pattern 0 at 2; rewrite <- (Rmult_0_r (f y)). apply Rmult_le_compat_l; assumption. unfold f. apply Rplus_le_reg_l with y. rewrite Rplus_0_r; rewrite Rplus_comm; unfold Rminus; rewrite Rplus_assoc; rewrite Rplus_opp_l; rewrite Rplus_0_r. pattern y at 1; rewrite <- Rmult_1_r. unfold Rsqr; apply Rmult_le_compat_l. assumption. left; exact r. replace f with (Rsqr - fct_cte y)%F. apply continuity_minus. apply derivable_continuous; apply derivable_Rsqr. apply derivable_continuous; apply derivable_const. reflexivity. unfold f; rewrite Rsqr_0. unfold Rminus; rewrite Rplus_0_l. apply Rge_le. apply Ropp_0_le_ge_contravar; assumption. Qed. (* Definition of the square root: R+->R *) Definition Rsqrt (y:nonnegreal) : R := let (a,_) := Rsqrt_exists (nonneg y) (cond_nonneg y) in a. (**********) Lemma Rsqrt_positivity : forall x:nonnegreal, 0 <= Rsqrt x. Proof. intro. assert (X := Rsqrt_exists (nonneg x) (cond_nonneg x)). elim X; intros. cut (x0 = Rsqrt x). intros. elim p; intros. rewrite H in H0; assumption. unfold Rsqrt. case (Rsqrt_exists x (cond_nonneg x)). intros. elim p; elim a; intros. apply Rsqr_inj. assumption. assumption. rewrite <- H0; rewrite <- H2; reflexivity. Qed. (**********) Lemma Rsqrt_Rsqrt : forall x:nonnegreal, Rsqrt x * Rsqrt x = x. Proof. intros. assert (X := Rsqrt_exists (nonneg x) (cond_nonneg x)). elim X; intros. cut (x0 = Rsqrt x). intros. rewrite <- H. elim p; intros. rewrite H1; reflexivity. unfold Rsqrt. case (Rsqrt_exists x (cond_nonneg x)). intros. elim p; elim a; intros. apply Rsqr_inj. assumption. assumption. rewrite <- H0; rewrite <- H2; reflexivity. Qed. coq-8.4pl2/theories/Reals/SeqProp.v0000640000175000001440000011152712010532755016306 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) : Prop := forall n:nat, Un (S n) <= Un n. Definition opp_seq (Un:nat -> R) (n:nat) : R := - Un n. Definition has_ub (Un:nat -> R) : Prop := bound (EUn Un). Definition has_lb (Un:nat -> R) : Prop := bound (EUn (opp_seq Un)). (**********) Lemma growing_cv : forall Un:nat -> R, Un_growing Un -> has_ub Un -> { l:R | Un_cv Un l }. Proof. intros Un Hug Heub. exists (projT1 (completeness (EUn Un) Heub (EUn_noempty Un))). destruct (completeness _ Heub (EUn_noempty Un)) as (l, H). now apply Un_cv_crit_lub. Qed. Lemma decreasing_growing : forall Un:nat -> R, Un_decreasing Un -> Un_growing (opp_seq Un). Proof. intro. unfold Un_growing, opp_seq, Un_decreasing. intros. apply Ropp_le_contravar. apply H. Qed. Lemma decreasing_cv : forall Un:nat -> R, Un_decreasing Un -> has_lb Un -> { l:R | Un_cv Un l }. Proof. intros. cut ({ l:R | Un_cv (opp_seq Un) l } -> { l:R | Un_cv Un l }). intro X. apply X. apply growing_cv. apply decreasing_growing; assumption. exact H0. intro X. elim X; intros. exists (- x). unfold Un_cv in p. unfold R_dist in p. unfold opp_seq in p. unfold Un_cv. unfold R_dist. intros. elim (p eps H1); intros. exists x0; intros. assert (H4 := H2 n H3). rewrite <- Rabs_Ropp. replace (- (Un n - - x)) with (- Un n - x); [ assumption | ring ]. Qed. (***********) Lemma ub_to_lub : forall Un:nat -> R, has_ub Un -> { l:R | is_lub (EUn Un) l }. Proof. intros. unfold has_ub in H. apply completeness. assumption. exists (Un 0%nat). unfold EUn. exists 0%nat; reflexivity. Qed. (**********) Lemma lb_to_glb : forall Un:nat -> R, has_lb Un -> { l:R | is_lub (EUn (opp_seq Un)) l }. Proof. intros; unfold has_lb in H. apply completeness. assumption. exists (- Un 0%nat). exists 0%nat. reflexivity. Qed. Definition lub (Un:nat -> R) (pr:has_ub Un) : R := let (a,_) := ub_to_lub Un pr in a. Definition glb (Un:nat -> R) (pr:has_lb Un) : R := let (a,_) := lb_to_glb Un pr in - a. (* Compatibility with previous unappropriate terminology *) Notation maj_sup := ub_to_lub (only parsing). Notation min_inf := lb_to_glb (only parsing). Notation majorant := lub (only parsing). Notation minorant := glb (only parsing). Lemma maj_ss : forall (Un:nat -> R) (k:nat), has_ub Un -> has_ub (fun i:nat => Un (k + i)%nat). Proof. intros. unfold has_ub in H. unfold bound in H. elim H; intros. unfold is_upper_bound in H0. unfold has_ub. exists x. unfold is_upper_bound. intros. apply H0. elim H1; intros. exists (k + x1)%nat; assumption. Qed. Lemma min_ss : forall (Un:nat -> R) (k:nat), has_lb Un -> has_lb (fun i:nat => Un (k + i)%nat). Proof. intros. unfold has_lb in H. unfold bound in H. elim H; intros. unfold is_upper_bound in H0. unfold has_lb. exists x. unfold is_upper_bound. intros. apply H0. elim H1; intros. exists (k + x1)%nat; assumption. Qed. Definition sequence_ub (Un:nat -> R) (pr:has_ub Un) (i:nat) : R := lub (fun k:nat => Un (i + k)%nat) (maj_ss Un i pr). Definition sequence_lb (Un:nat -> R) (pr:has_lb Un) (i:nat) : R := glb (fun k:nat => Un (i + k)%nat) (min_ss Un i pr). (* Compatibility *) Notation sequence_majorant := sequence_ub (only parsing). Notation sequence_minorant := sequence_lb (only parsing). Lemma Wn_decreasing : forall (Un:nat -> R) (pr:has_ub Un), Un_decreasing (sequence_ub Un pr). Proof. intros. unfold Un_decreasing. intro. unfold sequence_ub. assert (H := ub_to_lub (fun k:nat => Un (S n + k)%nat) (maj_ss Un (S n) pr)). assert (H0 := ub_to_lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr)). elim H; intros. elim H0; intros. cut (lub (fun k:nat => Un (S n + k)%nat) (maj_ss Un (S n) pr) = x); [ intro Maj1; rewrite Maj1 | idtac ]. cut (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr) = x0); [ intro Maj2; rewrite Maj2 | idtac ]. unfold is_lub in p. unfold is_lub in p0. elim p; intros. apply H2. elim p0; intros. unfold is_upper_bound. intros. unfold is_upper_bound in H3. apply H3. elim H5; intros. exists (1 + x2)%nat. replace (n + (1 + x2))%nat with (S n + x2)%nat. assumption. replace (S n) with (1 + n)%nat; [ ring | ring ]. cut (is_lub (EUn (fun k:nat => Un (n + k)%nat)) (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr))). intro. unfold is_lub in p0; unfold is_lub in H1. elim p0; intros; elim H1; intros. assert (H6 := H5 x0 H2). assert (H7 := H3 (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr)) H4). apply Rle_antisym; assumption. unfold lub. case (ub_to_lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr)). trivial. cut (is_lub (EUn (fun k:nat => Un (S n + k)%nat)) (lub (fun k:nat => Un (S n + k)%nat) (maj_ss Un (S n) pr))). intro. unfold is_lub in p; unfold is_lub in H1. elim p; intros; elim H1; intros. assert (H6 := H5 x H2). assert (H7 := H3 (lub (fun k:nat => Un (S n + k)%nat) (maj_ss Un (S n) pr)) H4). apply Rle_antisym; assumption. unfold lub. case (ub_to_lub (fun k:nat => Un (S n + k)%nat) (maj_ss Un (S n) pr)). trivial. Qed. Lemma Vn_growing : forall (Un:nat -> R) (pr:has_lb Un), Un_growing (sequence_lb Un pr). Proof. intros. unfold Un_growing. intro. unfold sequence_lb. assert (H := lb_to_glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr)). assert (H0 := lb_to_glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr)). elim H; intros. elim H0; intros. cut (glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr) = - x); [ intro Maj1; rewrite Maj1 | idtac ]. cut (glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr) = - x0); [ intro Maj2; rewrite Maj2 | idtac ]. unfold is_lub in p. unfold is_lub in p0. elim p; intros. apply Ropp_le_contravar. apply H2. elim p0; intros. unfold is_upper_bound. intros. unfold is_upper_bound in H3. apply H3. elim H5; intros. exists (1 + x2)%nat. unfold opp_seq in H6. unfold opp_seq. replace (n + (1 + x2))%nat with (S n + x2)%nat. assumption. replace (S n) with (1 + n)%nat; [ ring | ring ]. cut (is_lub (EUn (opp_seq (fun k:nat => Un (n + k)%nat))) (- glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr))). intro. unfold is_lub in p0; unfold is_lub in H1. elim p0; intros; elim H1; intros. assert (H6 := H5 x0 H2). assert (H7 := H3 (- glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr)) H4). rewrite <- (Ropp_involutive (glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr))) . apply Ropp_eq_compat; apply Rle_antisym; assumption. unfold glb. case (lb_to_glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr)); simpl. intro; rewrite Ropp_involutive. trivial. cut (is_lub (EUn (opp_seq (fun k:nat => Un (S n + k)%nat))) (- glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr))). intro. unfold is_lub in p; unfold is_lub in H1. elim p; intros; elim H1; intros. assert (H6 := H5 x H2). assert (H7 := H3 (- glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr)) H4). rewrite <- (Ropp_involutive (glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr))) . apply Ropp_eq_compat; apply Rle_antisym; assumption. unfold glb. case (lb_to_glb (fun k:nat => Un (S n + k)%nat) (min_ss Un (S n) pr)); simpl. intro; rewrite Ropp_involutive. trivial. Qed. (**********) Lemma Vn_Un_Wn_order : forall (Un:nat -> R) (pr1:has_ub Un) (pr2:has_lb Un) (n:nat), sequence_lb Un pr2 n <= Un n <= sequence_ub Un pr1 n. Proof. intros. split. unfold sequence_lb. cut { l:R | is_lub (EUn (opp_seq (fun i:nat => Un (n + i)%nat))) l }. intro X. elim X; intros. replace (glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr2)) with (- x). unfold is_lub in p. elim p; intros. unfold is_upper_bound in H. rewrite <- (Ropp_involutive (Un n)). apply Ropp_le_contravar. apply H. exists 0%nat. unfold opp_seq. replace (n + 0)%nat with n; [ reflexivity | ring ]. cut (is_lub (EUn (opp_seq (fun k:nat => Un (n + k)%nat))) (- glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr2))). intro. unfold is_lub in p; unfold is_lub in H. elim p; intros; elim H; intros. assert (H4 := H3 x H0). assert (H5 := H1 (- glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr2)) H2). rewrite <- (Ropp_involutive (glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr2))) . apply Ropp_eq_compat; apply Rle_antisym; assumption. unfold glb. case (lb_to_glb (fun k:nat => Un (n + k)%nat) (min_ss Un n pr2)); simpl. intro; rewrite Ropp_involutive. trivial. apply lb_to_glb. apply min_ss; assumption. unfold sequence_ub. cut { l:R | is_lub (EUn (fun i:nat => Un (n + i)%nat)) l }. intro X. elim X; intros. replace (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr1)) with x. unfold is_lub in p. elim p; intros. unfold is_upper_bound in H. apply H. exists 0%nat. replace (n + 0)%nat with n; [ reflexivity | ring ]. cut (is_lub (EUn (fun k:nat => Un (n + k)%nat)) (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr1))). intro. unfold is_lub in p; unfold is_lub in H. elim p; intros; elim H; intros. assert (H4 := H3 x H0). assert (H5 := H1 (lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr1)) H2). apply Rle_antisym; assumption. unfold lub. case (ub_to_lub (fun k:nat => Un (n + k)%nat) (maj_ss Un n pr1)). intro; trivial. apply ub_to_lub. apply maj_ss; assumption. Qed. Lemma min_maj : forall (Un:nat -> R) (pr1:has_ub Un) (pr2:has_lb Un), has_ub (sequence_lb Un pr2). Proof. intros. assert (H := Vn_Un_Wn_order Un pr1 pr2). unfold has_ub. unfold bound. unfold has_ub in pr1. unfold bound in pr1. elim pr1; intros. exists x. unfold is_upper_bound. intros. unfold is_upper_bound in H0. elim H1; intros. rewrite H2. apply Rle_trans with (Un x1). assert (H3 := H x1); elim H3; intros; assumption. apply H0. exists x1; reflexivity. Qed. Lemma maj_min : forall (Un:nat -> R) (pr1:has_ub Un) (pr2:has_lb Un), has_lb (sequence_ub Un pr1). Proof. intros. assert (H := Vn_Un_Wn_order Un pr1 pr2). unfold has_lb. unfold bound. unfold has_lb in pr2. unfold bound in pr2. elim pr2; intros. exists x. unfold is_upper_bound. intros. unfold is_upper_bound in H0. elim H1; intros. rewrite H2. apply Rle_trans with (opp_seq Un x1). assert (H3 := H x1); elim H3; intros. unfold opp_seq; apply Ropp_le_contravar. assumption. apply H0. exists x1; reflexivity. Qed. (**********) Lemma cauchy_maj : forall Un:nat -> R, Cauchy_crit Un -> has_ub Un. Proof. intros. unfold has_ub. apply cauchy_bound. assumption. Qed. (**********) Lemma cauchy_opp : forall Un:nat -> R, Cauchy_crit Un -> Cauchy_crit (opp_seq Un). Proof. intro. unfold Cauchy_crit. unfold R_dist. intros. elim (H eps H0); intros. exists x; intros. unfold opp_seq. rewrite <- Rabs_Ropp. replace (- (- Un n - - Un m)) with (Un n - Un m); [ apply H1; assumption | ring ]. Qed. (**********) Lemma cauchy_min : forall Un:nat -> R, Cauchy_crit Un -> has_lb Un. Proof. intros. unfold has_lb. assert (H0 := cauchy_opp _ H). apply cauchy_bound. assumption. Qed. (**********) Lemma maj_cv : forall (Un:nat -> R) (pr:Cauchy_crit Un), { l:R | Un_cv (sequence_ub Un (cauchy_maj Un pr)) l }. Proof. intros. apply decreasing_cv. apply Wn_decreasing. apply maj_min. apply cauchy_min. assumption. Qed. (**********) Lemma min_cv : forall (Un:nat -> R) (pr:Cauchy_crit Un), { l:R | Un_cv (sequence_lb Un (cauchy_min Un pr)) l }. Proof. intros. apply growing_cv. apply Vn_growing. apply min_maj. apply cauchy_maj. assumption. Qed. Lemma cond_eq : forall x y:R, (forall eps:R, 0 < eps -> Rabs (x - y) < eps) -> x = y. Proof. intros. case (total_order_T x y); intro. elim s; intro. cut (0 < y - x). intro. assert (H1 := H (y - x) H0). rewrite <- Rabs_Ropp in H1. cut (- (x - y) = y - x); [ intro; rewrite H2 in H1 | ring ]. rewrite Rabs_right in H1. elim (Rlt_irrefl _ H1). left; assumption. apply Rplus_lt_reg_r with x. rewrite Rplus_0_r; replace (x + (y - x)) with y; [ assumption | ring ]. assumption. cut (0 < x - y). intro. assert (H1 := H (x - y) H0). rewrite Rabs_right in H1. elim (Rlt_irrefl _ H1). left; assumption. apply Rplus_lt_reg_r with y. rewrite Rplus_0_r; replace (y + (x - y)) with x; [ assumption | ring ]. Qed. Lemma not_Rlt : forall r1 r2:R, ~ r1 < r2 -> r1 >= r2. Proof. intros r1 r2; generalize (Rtotal_order r1 r2); unfold Rge. tauto. Qed. (**********) Lemma approx_maj : forall (Un:nat -> R) (pr:has_ub Un) (eps:R), 0 < eps -> exists k : nat, Rabs (lub Un pr - Un k) < eps. Proof. intros Un pr. pose (Vn := fix aux n := match n with S n' => if Rle_lt_dec (aux n') (Un n) then Un n else aux n' | O => Un O end). pose (In := fix aux n := match n with S n' => if Rle_lt_dec (Vn n) (Un n) then n else aux n' | O => O end). assert (VUI: forall n, Vn n = Un (In n)). induction n. easy. simpl. destruct (Rle_lt_dec (Vn n) (Un (S n))) as [H1|H1]. destruct (Rle_lt_dec (Un (S n)) (Un (S n))) as [H2|H2]. easy. elim (Rlt_irrefl _ H2). destruct (Rle_lt_dec (Vn n) (Un (S n))) as [H2|H2]. elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H2 H1)). exact IHn. assert (HubV : has_ub Vn). destruct pr as (ub, Hub). exists ub. intros x (n, Hn). rewrite Hn, VUI. apply Hub. now exists (In n). assert (HgrV : Un_growing Vn). intros n. induction n. simpl. destruct (Rle_lt_dec (Un O) (Un 1%nat)) as [H|_]. exact H. apply Rle_refl. simpl. destruct (Rle_lt_dec (Vn n) (Un (S n))) as [H1|H1]. destruct (Rle_lt_dec (Un (S n)) (Un (S (S n)))) as [H2|H2]. exact H2. apply Rle_refl. destruct (Rle_lt_dec (Vn n) (Un (S (S n)))) as [H2|H2]. exact H2. apply Rle_refl. destruct (ub_to_lub Vn HubV) as (l, Hl). unfold lub. destruct (ub_to_lub Un pr) as (l', Hl'). replace l' with l. intros eps Heps. destruct (Un_cv_crit_lub Vn HgrV l Hl eps Heps) as (n, Hn). exists (In n). rewrite <- VUI. rewrite Rabs_minus_sym. apply Hn. apply le_refl. apply Rle_antisym. apply Hl. intros n (k, Hk). rewrite Hk, VUI. apply Hl'. now exists (In k). apply Hl'. intros n (k, Hk). rewrite Hk. apply Rle_trans with (Vn k). clear. induction k. apply Rle_refl. simpl. destruct (Rle_lt_dec (Vn k) (Un (S k))) as [H|H]. apply Rle_refl. now apply Rlt_le. apply Hl. now exists k. Qed. (**********) Lemma approx_min : forall (Un:nat -> R) (pr:has_lb Un) (eps:R), 0 < eps -> exists k : nat, Rabs (glb Un pr - Un k) < eps. Proof. intros Un pr. unfold glb. destruct lb_to_glb as (lb, Hlb). intros eps Heps. destruct (approx_maj _ pr eps Heps) as (n, Hn). exists n. unfold Rminus. rewrite <- Ropp_plus_distr, Rabs_Ropp. replace lb with (lub (opp_seq Un) pr). now rewrite <- (Ropp_involutive (Un n)). unfold lub. destruct ub_to_lub as (ub, Hub). apply Rle_antisym. apply Hub. apply Hlb. apply Hlb. apply Hub. Qed. (** Unicity of limit for convergent sequences *) Lemma UL_sequence : forall (Un:nat -> R) (l1 l2:R), Un_cv Un l1 -> Un_cv Un l2 -> l1 = l2. Proof. intros Un l1 l2; unfold Un_cv; unfold R_dist; intros. apply cond_eq. intros; cut (0 < eps / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H (eps / 2) H2); intros. elim (H0 (eps / 2) H2); intros. set (N := max x x0). apply Rle_lt_trans with (Rabs (l1 - Un N) + Rabs (Un N - l2)). replace (l1 - l2) with (l1 - Un N + (Un N - l2)); [ apply Rabs_triang | ring ]. rewrite (double_var eps); apply Rplus_lt_compat. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H3; unfold ge, N; apply le_max_l. apply H4; unfold ge, N; apply le_max_r. Qed. (**********) Lemma CV_plus : forall (An Bn:nat -> R) (l1 l2:R), Un_cv An l1 -> Un_cv Bn l2 -> Un_cv (fun i:nat => An i + Bn i) (l1 + l2). Proof. unfold Un_cv; unfold R_dist; intros. cut (0 < eps / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H (eps / 2) H2); intros. elim (H0 (eps / 2) H2); intros. set (N := max x x0). exists N; intros. replace (An n + Bn n - (l1 + l2)) with (An n - l1 + (Bn n - l2)); [ idtac | ring ]. apply Rle_lt_trans with (Rabs (An n - l1) + Rabs (Bn n - l2)). apply Rabs_triang. rewrite (double_var eps); apply Rplus_lt_compat. apply H3; unfold ge; apply le_trans with N; [ unfold N; apply le_max_l | assumption ]. apply H4; unfold ge; apply le_trans with N; [ unfold N; apply le_max_r | assumption ]. Qed. (**********) Lemma cv_cvabs : forall (Un:nat -> R) (l:R), Un_cv Un l -> Un_cv (fun i:nat => Rabs (Un i)) (Rabs l). Proof. unfold Un_cv; unfold R_dist; intros. elim (H eps H0); intros. exists x; intros. apply Rle_lt_trans with (Rabs (Un n - l)). apply Rabs_triang_inv2. apply H1; assumption. Qed. (**********) Lemma CV_Cauchy : forall Un:nat -> R, { l:R | Un_cv Un l } -> Cauchy_crit Un. Proof. intros Un X; elim X; intros. unfold Cauchy_crit; intros. unfold Un_cv in p; unfold R_dist in p. cut (0 < eps / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (p (eps / 2) H0); intros. exists x0; intros. unfold R_dist; apply Rle_lt_trans with (Rabs (Un n - x) + Rabs (x - Un m)). replace (Un n - Un m) with (Un n - x + (x - Un m)); [ apply Rabs_triang | ring ]. rewrite (double_var eps); apply Rplus_lt_compat. apply H1; assumption. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr; apply H1; assumption. Qed. (**********) Lemma maj_by_pos : forall Un:nat -> R, { l:R | Un_cv Un l } -> exists l : R, 0 < l /\ (forall n:nat, Rabs (Un n) <= l). Proof. intros Un X; elim X; intros. cut { l:R | Un_cv (fun k:nat => Rabs (Un k)) l }. intro X0. assert (H := CV_Cauchy (fun k:nat => Rabs (Un k)) X0). assert (H0 := cauchy_bound (fun k:nat => Rabs (Un k)) H). elim H0; intros. exists (x0 + 1). cut (0 <= x0). intro. split. apply Rplus_le_lt_0_compat; [ assumption | apply Rlt_0_1 ]. intros. apply Rle_trans with x0. unfold is_upper_bound in H1. apply H1. exists n; reflexivity. pattern x0 at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; left; apply Rlt_0_1. apply Rle_trans with (Rabs (Un 0%nat)). apply Rabs_pos. unfold is_upper_bound in H1. apply H1. exists 0%nat; reflexivity. exists (Rabs x). apply cv_cvabs; assumption. Qed. (**********) Lemma CV_mult : forall (An Bn:nat -> R) (l1 l2:R), Un_cv An l1 -> Un_cv Bn l2 -> Un_cv (fun i:nat => An i * Bn i) (l1 * l2). Proof. intros. cut { l:R | Un_cv An l }. intro X. assert (H1 := maj_by_pos An X). elim H1; intros M H2. elim H2; intros. unfold Un_cv; unfold R_dist; intros. cut (0 < eps / (2 * M)). intro. case (Req_dec l2 0); intro. unfold Un_cv in H0; unfold R_dist in H0. elim (H0 (eps / (2 * M)) H6); intros. exists x; intros. apply Rle_lt_trans with (Rabs (An n * Bn n - An n * l2) + Rabs (An n * l2 - l1 * l2)). replace (An n * Bn n - l1 * l2) with (An n * Bn n - An n * l2 + (An n * l2 - l1 * l2)); [ apply Rabs_triang | ring ]. replace (Rabs (An n * Bn n - An n * l2)) with (Rabs (An n) * Rabs (Bn n - l2)). replace (Rabs (An n * l2 - l1 * l2)) with 0. rewrite Rplus_0_r. apply Rle_lt_trans with (M * Rabs (Bn n - l2)). do 2 rewrite <- (Rmult_comm (Rabs (Bn n - l2))). apply Rmult_le_compat_l. apply Rabs_pos. apply H4. apply Rmult_lt_reg_l with (/ M). apply Rinv_0_lt_compat; apply H3. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite (Rmult_comm (/ M)). apply Rlt_trans with (eps / (2 * M)). apply H8; assumption. unfold Rdiv; rewrite Rinv_mult_distr. apply Rmult_lt_reg_l with 2. prove_sup0. replace (2 * (eps * (/ 2 * / M))) with (2 * / 2 * (eps * / M)); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_l; rewrite double. pattern (eps * / M) at 1; rewrite <- Rplus_0_r. apply Rplus_lt_compat_l; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; assumption ]. discrR. discrR. red; intro; rewrite H10 in H3; elim (Rlt_irrefl _ H3). red; intro; rewrite H10 in H3; elim (Rlt_irrefl _ H3). rewrite H7; do 2 rewrite Rmult_0_r; unfold Rminus; rewrite Rplus_opp_r; rewrite Rabs_R0; reflexivity. replace (An n * Bn n - An n * l2) with (An n * (Bn n - l2)); [ idtac | ring ]. symmetry ; apply Rabs_mult. cut (0 < eps / (2 * Rabs l2)). intro. unfold Un_cv in H; unfold R_dist in H; unfold Un_cv in H0; unfold R_dist in H0. elim (H (eps / (2 * Rabs l2)) H8); intros N1 H9. elim (H0 (eps / (2 * M)) H6); intros N2 H10. set (N := max N1 N2). exists N; intros. apply Rle_lt_trans with (Rabs (An n * Bn n - An n * l2) + Rabs (An n * l2 - l1 * l2)). replace (An n * Bn n - l1 * l2) with (An n * Bn n - An n * l2 + (An n * l2 - l1 * l2)); [ apply Rabs_triang | ring ]. replace (Rabs (An n * Bn n - An n * l2)) with (Rabs (An n) * Rabs (Bn n - l2)). replace (Rabs (An n * l2 - l1 * l2)) with (Rabs l2 * Rabs (An n - l1)). rewrite (double_var eps); apply Rplus_lt_compat. apply Rle_lt_trans with (M * Rabs (Bn n - l2)). do 2 rewrite <- (Rmult_comm (Rabs (Bn n - l2))). apply Rmult_le_compat_l. apply Rabs_pos. apply H4. apply Rmult_lt_reg_l with (/ M). apply Rinv_0_lt_compat; apply H3. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; rewrite (Rmult_comm (/ M)). apply Rlt_le_trans with (eps / (2 * M)). apply H10. unfold ge; apply le_trans with N. unfold N; apply le_max_r. assumption. unfold Rdiv; rewrite Rinv_mult_distr. right; ring. discrR. red; intro; rewrite H12 in H3; elim (Rlt_irrefl _ H3). red; intro; rewrite H12 in H3; elim (Rlt_irrefl _ H3). apply Rmult_lt_reg_l with (/ Rabs l2). apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l; apply Rlt_le_trans with (eps / (2 * Rabs l2)). apply H9. unfold ge; apply le_trans with N. unfold N; apply le_max_l. assumption. unfold Rdiv; right; rewrite Rinv_mult_distr. ring. discrR. apply Rabs_no_R0; assumption. apply Rabs_no_R0; assumption. replace (An n * l2 - l1 * l2) with (l2 * (An n - l1)); [ symmetry ; apply Rabs_mult | ring ]. replace (An n * Bn n - An n * l2) with (An n * (Bn n - l2)); [ symmetry ; apply Rabs_mult | ring ]. unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; apply Rmult_lt_0_compat; [ prove_sup0 | apply Rabs_pos_lt; assumption ]. unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; apply Rmult_lt_0_compat; [ prove_sup0 | assumption ] ]. exists l1; assumption. Qed. Lemma tech9 : forall Un:nat -> R, Un_growing Un -> forall m n:nat, (m <= n)%nat -> Un m <= Un n. Proof. intros; unfold Un_growing in H. induction n as [| n Hrecn]. induction m as [| m Hrecm]. right; reflexivity. elim (le_Sn_O _ H0). cut ((m <= n)%nat \/ m = S n). intro; elim H1; intro. apply Rle_trans with (Un n). apply Hrecn; assumption. apply H. rewrite H2; right; reflexivity. inversion H0. right; reflexivity. left; assumption. Qed. Lemma tech13 : forall (An:nat -> R) (k:R), 0 <= k < 1 -> Un_cv (fun n:nat => Rabs (An (S n) / An n)) k -> exists k0 : R, k < k0 < 1 /\ (exists N : nat, (forall n:nat, (N <= n)%nat -> Rabs (An (S n) / An n) < k0)). Proof. intros; exists (k + (1 - k) / 2). split. split. pattern k at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l. unfold Rdiv; apply Rmult_lt_0_compat. apply Rplus_lt_reg_r with k; rewrite Rplus_0_r; replace (k + (1 - k)) with 1; [ elim H; intros; assumption | ring ]. apply Rinv_0_lt_compat; prove_sup0. apply Rmult_lt_reg_l with 2. prove_sup0. unfold Rdiv; rewrite Rmult_1_r; rewrite Rmult_plus_distr_l; pattern 2 at 1; rewrite Rmult_comm; rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ idtac | discrR ]; rewrite Rmult_1_r; replace (2 * k + (1 - k)) with (1 + k); [ idtac | ring ]. elim H; intros. apply Rplus_lt_compat_l; assumption. unfold Un_cv in H0; cut (0 < (1 - k) / 2). intro; elim (H0 ((1 - k) / 2) H1); intros. exists x; intros. assert (H4 := H2 n H3). unfold R_dist in H4; rewrite <- Rabs_Rabsolu; replace (Rabs (An (S n) / An n)) with (Rabs (An (S n) / An n) - k + k); [ idtac | ring ]; apply Rle_lt_trans with (Rabs (Rabs (An (S n) / An n) - k) + Rabs k). apply Rabs_triang. rewrite (Rabs_right k). apply Rplus_lt_reg_r with (- k); rewrite <- (Rplus_comm k); repeat rewrite <- Rplus_assoc; rewrite Rplus_opp_l; repeat rewrite Rplus_0_l; apply H4. apply Rle_ge; elim H; intros; assumption. unfold Rdiv; apply Rmult_lt_0_compat. apply Rplus_lt_reg_r with k; rewrite Rplus_0_r; elim H; intros; replace (k + (1 - k)) with 1; [ assumption | ring ]. apply Rinv_0_lt_compat; prove_sup0. Qed. (**********) Lemma growing_ineq : forall (Un:nat -> R) (l:R), Un_growing Un -> Un_cv Un l -> forall n:nat, Un n <= l. Proof. intros; case (total_order_T (Un n) l); intro. elim s; intro. left; assumption. right; assumption. cut (0 < Un n - l). intro; unfold Un_cv in H0; unfold R_dist in H0. elim (H0 (Un n - l) H1); intros N1 H2. set (N := max n N1). cut (Un n - l <= Un N - l). intro; cut (Un N - l < Un n - l). intro; elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H3 H4)). apply Rle_lt_trans with (Rabs (Un N - l)). apply RRle_abs. apply H2. unfold ge, N; apply le_max_r. unfold Rminus; do 2 rewrite <- (Rplus_comm (- l)); apply Rplus_le_compat_l. apply tech9. assumption. unfold N; apply le_max_l. apply Rplus_lt_reg_r with l. rewrite Rplus_0_r. replace (l + (Un n - l)) with (Un n); [ assumption | ring ]. Qed. (** Un->l => (-Un) -> (-l) *) Lemma CV_opp : forall (An:nat -> R) (l:R), Un_cv An l -> Un_cv (opp_seq An) (- l). Proof. intros An l. unfold Un_cv; unfold R_dist; intros. elim (H eps H0); intros. exists x; intros. unfold opp_seq; replace (- An n - - l) with (- (An n - l)); [ rewrite Rabs_Ropp | ring ]. apply H1; assumption. Qed. (**********) Lemma decreasing_ineq : forall (Un:nat -> R) (l:R), Un_decreasing Un -> Un_cv Un l -> forall n:nat, l <= Un n. Proof. intros. assert (H1 := decreasing_growing _ H). assert (H2 := CV_opp _ _ H0). assert (H3 := growing_ineq _ _ H1 H2). apply Ropp_le_cancel. unfold opp_seq in H3; apply H3. Qed. (**********) Lemma CV_minus : forall (An Bn:nat -> R) (l1 l2:R), Un_cv An l1 -> Un_cv Bn l2 -> Un_cv (fun i:nat => An i - Bn i) (l1 - l2). Proof. intros. replace (fun i:nat => An i - Bn i) with (fun i:nat => An i + opp_seq Bn i). unfold Rminus; apply CV_plus. assumption. apply CV_opp; assumption. unfold Rminus, opp_seq; reflexivity. Qed. (** Un -> +oo *) Definition cv_infty (Un:nat -> R) : Prop := forall M:R, exists N : nat, (forall n:nat, (N <= n)%nat -> M < Un n). (** Un -> +oo => /Un -> O *) Lemma cv_infty_cv_R0 : forall Un:nat -> R, (forall n:nat, Un n <> 0) -> cv_infty Un -> Un_cv (fun n:nat => / Un n) 0. Proof. unfold cv_infty, Un_cv; unfold R_dist; intros. elim (H0 (/ eps)); intros N0 H2. exists N0; intros. unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite (Rabs_Rinv _ (H n)). apply Rmult_lt_reg_l with (Rabs (Un n)). apply Rabs_pos_lt; apply H. rewrite <- Rinv_r_sym. apply Rmult_lt_reg_l with (/ eps). apply Rinv_0_lt_compat; assumption. rewrite Rmult_1_r; rewrite (Rmult_comm (/ eps)); rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; apply Rlt_le_trans with (Un n). apply H2; assumption. apply RRle_abs. red; intro; rewrite H4 in H1; elim (Rlt_irrefl _ H1). apply Rabs_no_R0; apply H. Qed. (**********) Lemma decreasing_prop : forall (Un:nat -> R) (m n:nat), Un_decreasing Un -> (m <= n)%nat -> Un n <= Un m. Proof. unfold Un_decreasing; intros. induction n as [| n Hrecn]. induction m as [| m Hrecm]. right; reflexivity. elim (le_Sn_O _ H0). cut ((m <= n)%nat \/ m = S n). intro; elim H1; intro. apply Rle_trans with (Un n). apply H. apply Hrecn; assumption. rewrite H2; right; reflexivity. inversion H0; [ right; reflexivity | left; assumption ]. Qed. (** |x|^n/n! -> 0 *) Lemma cv_speed_pow_fact : forall x:R, Un_cv (fun n:nat => x ^ n / INR (fact n)) 0. Proof. intro; cut (Un_cv (fun n:nat => Rabs x ^ n / INR (fact n)) 0 -> Un_cv (fun n:nat => x ^ n / INR (fact n)) 0). intro; apply H. unfold Un_cv; unfold R_dist; intros; case (Req_dec x 0); intro. exists 1%nat; intros. rewrite H1; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_R0; rewrite pow_ne_zero; [ unfold Rdiv; rewrite Rmult_0_l; rewrite Rabs_R0; assumption | red; intro; rewrite H3 in H2; elim (le_Sn_n _ H2) ]. assert (H2 := Rabs_pos_lt x H1); set (M := up (Rabs x)); cut (0 <= M)%Z. intro; elim (IZN M H3); intros M_nat H4. set (Un := fun n:nat => Rabs x ^ (M_nat + n) / INR (fact (M_nat + n))). cut (Un_cv Un 0); unfold Un_cv; unfold R_dist; intros. elim (H5 eps H0); intros N H6. exists (M_nat + N)%nat; intros; cut (exists p : nat, (p >= N)%nat /\ n = (M_nat + p)%nat). intro; elim H8; intros p H9. elim H9; intros; rewrite H11; unfold Un in H6; apply H6; assumption. exists (n - M_nat)%nat. split. unfold ge; apply (fun p n m:nat => plus_le_reg_l n m p) with M_nat; rewrite <- le_plus_minus. assumption. apply le_trans with (M_nat + N)%nat. apply le_plus_l. assumption. apply le_plus_minus; apply le_trans with (M_nat + N)%nat; [ apply le_plus_l | assumption ]. set (Vn := fun n:nat => Rabs x * (Un 0%nat / INR (S n))). cut (1 <= M_nat)%nat. intro; cut (forall n:nat, 0 < Un n). intro; cut (Un_decreasing Un). intro; cut (forall n:nat, Un (S n) <= Vn n). intro; cut (Un_cv Vn 0). unfold Un_cv; unfold R_dist; intros. elim (H10 eps0 H5); intros N1 H11. exists (S N1); intros. cut (forall n:nat, 0 < Vn n). intro; apply Rle_lt_trans with (Rabs (Vn (pred n) - 0)). repeat rewrite Rabs_right. unfold Rminus; rewrite Ropp_0; do 2 rewrite Rplus_0_r; replace n with (S (pred n)). apply H9. inversion H12; simpl; reflexivity. apply Rle_ge; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; left; apply H13. apply Rle_ge; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; left; apply H7. apply H11; unfold ge; apply le_S_n; replace (S (pred n)) with n; [ unfold ge in H12; exact H12 | inversion H12; simpl; reflexivity ]. intro; apply Rlt_le_trans with (Un (S n0)); [ apply H7 | apply H9 ]. cut (cv_infty (fun n:nat => INR (S n))). intro; cut (Un_cv (fun n:nat => / INR (S n)) 0). unfold Un_cv, R_dist; intros; unfold Vn. cut (0 < eps1 / (Rabs x * Un 0%nat)). intro; elim (H11 _ H13); intros N H14. exists N; intros; replace (Rabs x * (Un 0%nat / INR (S n)) - 0) with (Rabs x * Un 0%nat * (/ INR (S n) - 0)); [ idtac | unfold Rdiv; ring ]. rewrite Rabs_mult; apply Rmult_lt_reg_l with (/ Rabs (Rabs x * Un 0%nat)). apply Rinv_0_lt_compat; apply Rabs_pos_lt. apply prod_neq_R0. apply Rabs_no_R0; assumption. assert (H16 := H7 0%nat); red; intro; rewrite H17 in H16; elim (Rlt_irrefl _ H16). rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_l. replace (/ Rabs (Rabs x * Un 0%nat) * eps1) with (eps1 / (Rabs x * Un 0%nat)). apply H14; assumption. unfold Rdiv; rewrite (Rabs_right (Rabs x * Un 0%nat)). apply Rmult_comm. apply Rle_ge; apply Rmult_le_pos. apply Rabs_pos. left; apply H7. apply Rabs_no_R0. apply prod_neq_R0; [ apply Rabs_no_R0; assumption | assert (H16 := H7 0%nat); red; intro; rewrite H17 in H16; elim (Rlt_irrefl _ H16) ]. unfold Rdiv; apply Rmult_lt_0_compat. assumption. apply Rinv_0_lt_compat; apply Rmult_lt_0_compat. apply Rabs_pos_lt; assumption. apply H7. apply (cv_infty_cv_R0 (fun n:nat => INR (S n))). intro; apply not_O_INR; discriminate. assumption. unfold cv_infty; intro; case (total_order_T M0 0); intro. elim s; intro. exists 0%nat; intros. apply Rlt_trans with 0; [ assumption | apply lt_INR_0; apply lt_O_Sn ]. exists 0%nat; intros; rewrite b; apply lt_INR_0; apply lt_O_Sn. set (M0_z := up M0). assert (H10 := archimed M0). cut (0 <= M0_z)%Z. intro; elim (IZN _ H11); intros M0_nat H12. exists M0_nat; intros. apply Rlt_le_trans with (IZR M0_z). elim H10; intros; assumption. rewrite H12; rewrite <- INR_IZR_INZ; apply le_INR. apply le_trans with n; [ assumption | apply le_n_Sn ]. apply le_IZR; left; simpl; unfold M0_z; apply Rlt_trans with M0; [ assumption | elim H10; intros; assumption ]. intro; apply Rle_trans with (Rabs x * Un n * / INR (S n)). unfold Un; replace (M_nat + S n)%nat with (M_nat + n + 1)%nat. rewrite pow_add; replace (Rabs x ^ 1) with (Rabs x); [ idtac | simpl; ring ]. unfold Rdiv; rewrite <- (Rmult_comm (Rabs x)); repeat rewrite Rmult_assoc; repeat apply Rmult_le_compat_l. apply Rabs_pos. left; apply pow_lt; assumption. replace (M_nat + n + 1)%nat with (S (M_nat + n)). rewrite fact_simpl; rewrite mult_comm; rewrite mult_INR; rewrite Rinv_mult_distr. apply Rmult_le_compat_l. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H10 := eq_sym H9); elim (fact_neq_0 _ H10). left; apply Rinv_lt_contravar. apply Rmult_lt_0_compat; apply lt_INR_0; apply lt_O_Sn. apply lt_INR; apply lt_n_S. pattern n at 1; replace n with (0 + n)%nat; [ idtac | reflexivity ]. apply plus_lt_compat_r. apply lt_le_trans with 1%nat; [ apply lt_O_Sn | assumption ]. apply INR_fact_neq_0. apply not_O_INR; discriminate. ring. ring. unfold Vn; rewrite Rmult_assoc; unfold Rdiv; rewrite (Rmult_comm (Un 0%nat)); rewrite (Rmult_comm (Un n)). repeat apply Rmult_le_compat_l. apply Rabs_pos. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply lt_O_Sn. apply decreasing_prop; [ assumption | apply le_O_n ]. unfold Un_decreasing; intro; unfold Un. replace (M_nat + S n)%nat with (M_nat + n + 1)%nat. rewrite pow_add; unfold Rdiv; rewrite Rmult_assoc; apply Rmult_le_compat_l. left; apply pow_lt; assumption. replace (Rabs x ^ 1) with (Rabs x); [ idtac | simpl; ring ]. replace (M_nat + n + 1)%nat with (S (M_nat + n)). apply Rmult_le_reg_l with (INR (fact (S (M_nat + n)))). apply lt_INR_0; apply neq_O_lt; red; intro; assert (H9 := eq_sym H8); elim (fact_neq_0 _ H9). rewrite (Rmult_comm (Rabs x)); rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_l. rewrite fact_simpl; rewrite mult_INR; rewrite Rmult_assoc; rewrite <- Rinv_r_sym. rewrite Rmult_1_r; apply Rle_trans with (INR M_nat). left; rewrite INR_IZR_INZ. rewrite <- H4; assert (H8 := archimed (Rabs x)); elim H8; intros; assumption. apply le_INR; omega. apply INR_fact_neq_0. apply INR_fact_neq_0. ring. ring. intro; unfold Un; unfold Rdiv; apply Rmult_lt_0_compat. apply pow_lt; assumption. apply Rinv_0_lt_compat; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H8 := eq_sym H7); elim (fact_neq_0 _ H8). clear Un Vn; apply INR_le; simpl. induction M_nat as [| M_nat HrecM_nat]. assert (H6 := archimed (Rabs x)); fold M in H6; elim H6; intros. rewrite H4 in H7; rewrite <- INR_IZR_INZ in H7. simpl in H7; elim (Rlt_irrefl _ (Rlt_trans _ _ _ H2 H7)). replace 1 with (INR 1); [ apply le_INR | reflexivity ]; apply le_n_S; apply le_O_n. apply le_IZR; simpl; left; apply Rlt_trans with (Rabs x). assumption. elim (archimed (Rabs x)); intros; assumption. unfold Un_cv; unfold R_dist; intros; elim (H eps H0); intros. exists x0; intros; apply Rle_lt_trans with (Rabs (Rabs x ^ n / INR (fact n) - 0)). unfold Rminus; rewrite Ropp_0; do 2 rewrite Rplus_0_r; rewrite (Rabs_right (Rabs x ^ n / INR (fact n))). unfold Rdiv; rewrite Rabs_mult; rewrite (Rabs_right (/ INR (fact n))). rewrite RPow_abs; right; reflexivity. apply Rle_ge; left; apply Rinv_0_lt_compat; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H4 := eq_sym H3); elim (fact_neq_0 _ H4). apply Rle_ge; unfold Rdiv; apply Rmult_le_pos. case (Req_dec x 0); intro. rewrite H3; rewrite Rabs_R0. induction n as [| n Hrecn]; [ simpl; left; apply Rlt_0_1 | simpl; rewrite Rmult_0_l; right; reflexivity ]. left; apply pow_lt; apply Rabs_pos_lt; assumption. left; apply Rinv_0_lt_compat; apply lt_INR_0; apply neq_O_lt; red; intro; assert (H4 := eq_sym H3); elim (fact_neq_0 _ H4). apply H1; assumption. Qed. coq-8.4pl2/theories/Reals/AltSeries.v0000640000175000001440000003642212010532755016610 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R) (i:nat) : R := (-1) ^ i * Un i. Definition positivity_seq (Un:nat -> R) : Prop := forall n:nat, 0 <= Un n. Lemma CV_ALT_step0 : forall Un:nat -> R, Un_decreasing Un -> Un_growing (fun N:nat => sum_f_R0 (tg_alt Un) (S (2 * N))). Proof. intros; unfold Un_growing; intro. cut ((2 * S n)%nat = S (S (2 * n))). intro; rewrite H0. do 4 rewrite tech5; repeat rewrite Rplus_assoc; apply Rplus_le_compat_l. pattern (tg_alt Un (S (2 * n))) at 1; rewrite <- Rplus_0_r. apply Rplus_le_compat_l. unfold tg_alt; rewrite <- H0; rewrite pow_1_odd; rewrite pow_1_even; rewrite Rmult_1_l. apply Rplus_le_reg_l with (Un (S (2 * S n))). rewrite Rplus_0_r; replace (Un (S (2 * S n)) + (Un (2 * S n)%nat + -1 * Un (S (2 * S n)))) with (Un (2 * S n)%nat); [ idtac | ring ]. apply H. cut (forall n:nat, S n = (n + 1)%nat); [ intro | intro; ring ]. rewrite (H0 n); rewrite (H0 (S (2 * n))); rewrite (H0 (2 * n)%nat); ring. Qed. Lemma CV_ALT_step1 : forall Un:nat -> R, Un_decreasing Un -> Un_decreasing (fun N:nat => sum_f_R0 (tg_alt Un) (2 * N)). Proof. intros; unfold Un_decreasing; intro. cut ((2 * S n)%nat = S (S (2 * n))). intro; rewrite H0; do 2 rewrite tech5; repeat rewrite Rplus_assoc. pattern (sum_f_R0 (tg_alt Un) (2 * n)) at 2; rewrite <- Rplus_0_r. apply Rplus_le_compat_l. unfold tg_alt; rewrite <- H0; rewrite pow_1_odd; rewrite pow_1_even; rewrite Rmult_1_l. apply Rplus_le_reg_l with (Un (S (2 * n))). rewrite Rplus_0_r; replace (Un (S (2 * n)) + (-1 * Un (S (2 * n)) + Un (2 * S n)%nat)) with (Un (2 * S n)%nat); [ idtac | ring ]. rewrite H0; apply H. cut (forall n:nat, S n = (n + 1)%nat); [ intro | intro; ring ]. rewrite (H0 n); rewrite (H0 (S (2 * n))); rewrite (H0 (2 * n)%nat); ring. Qed. (**********) Lemma CV_ALT_step2 : forall (Un:nat -> R) (N:nat), Un_decreasing Un -> positivity_seq Un -> sum_f_R0 (fun i:nat => tg_alt Un (S i)) (S (2 * N)) <= 0. Proof. intros; induction N as [| N HrecN]. simpl; unfold tg_alt; simpl; rewrite Rmult_1_r. replace (-1 * -1 * Un 2%nat) with (Un 2%nat); [ idtac | ring ]. apply Rplus_le_reg_l with (Un 1%nat); rewrite Rplus_0_r. replace (Un 1%nat + (-1 * Un 1%nat + Un 2%nat)) with (Un 2%nat); [ apply H | ring ]. cut (S (2 * S N) = S (S (S (2 * N)))). intro; rewrite H1; do 2 rewrite tech5. apply Rle_trans with (sum_f_R0 (fun i:nat => tg_alt Un (S i)) (S (2 * N))). pattern (sum_f_R0 (fun i:nat => tg_alt Un (S i)) (S (2 * N))) at 2; rewrite <- Rplus_0_r. rewrite Rplus_assoc; apply Rplus_le_compat_l. unfold tg_alt; rewrite <- H1. rewrite pow_1_odd. cut (S (S (2 * S N)) = (2 * S (S N))%nat). intro; rewrite H2; rewrite pow_1_even; rewrite Rmult_1_l; rewrite <- H2. apply Rplus_le_reg_l with (Un (S (2 * S N))). rewrite Rplus_0_r; replace (Un (S (2 * S N)) + (-1 * Un (S (2 * S N)) + Un (S (S (2 * S N))))) with (Un (S (S (2 * S N)))); [ idtac | ring ]. apply H. ring. apply HrecN. ring. Qed. (** A more general inequality *) Lemma CV_ALT_step3 : forall (Un:nat -> R) (N:nat), Un_decreasing Un -> positivity_seq Un -> sum_f_R0 (fun i:nat => tg_alt Un (S i)) N <= 0. Proof. intros; induction N as [| N HrecN]. simpl; unfold tg_alt; simpl; rewrite Rmult_1_r. apply Rplus_le_reg_l with (Un 1%nat). rewrite Rplus_0_r; replace (Un 1%nat + -1 * Un 1%nat) with 0; [ apply H0 | ring ]. assert (H1 := even_odd_cor N). elim H1; intros. elim H2; intro. rewrite H3; apply CV_ALT_step2; assumption. rewrite H3; rewrite tech5. apply Rle_trans with (sum_f_R0 (fun i:nat => tg_alt Un (S i)) (S (2 * x))). pattern (sum_f_R0 (fun i:nat => tg_alt Un (S i)) (S (2 * x))) at 2; rewrite <- Rplus_0_r. apply Rplus_le_compat_l. unfold tg_alt; simpl. replace (x + (x + 0))%nat with (2 * x)%nat; [ idtac | ring ]. rewrite pow_1_even. replace (-1 * (-1 * (-1 * 1)) * Un (S (S (S (2 * x))))) with (- Un (S (S (S (2 * x))))); [ idtac | ring ]. apply Rplus_le_reg_l with (Un (S (S (S (2 * x))))). rewrite Rplus_0_r; rewrite Rplus_opp_r. apply H0. apply CV_ALT_step2; assumption. Qed. (**********) Lemma CV_ALT_step4 : forall Un:nat -> R, Un_decreasing Un -> positivity_seq Un -> has_ub (fun N:nat => sum_f_R0 (tg_alt Un) (S (2 * N))). Proof. intros; unfold has_ub; unfold bound. exists (Un 0%nat). unfold is_upper_bound; intros; elim H1; intros. rewrite H2; rewrite decomp_sum. replace (tg_alt Un 0) with (Un 0%nat). pattern (Un 0%nat) at 2; rewrite <- Rplus_0_r. apply Rplus_le_compat_l. apply CV_ALT_step3; assumption. unfold tg_alt; simpl; ring. apply lt_O_Sn. Qed. (** This lemma gives an interesting result about alternated series *) Lemma CV_ALT : forall Un:nat -> R, Un_decreasing Un -> positivity_seq Un -> Un_cv Un 0 -> { l:R | Un_cv (fun N:nat => sum_f_R0 (tg_alt Un) N) l }. Proof. intros. assert (H2 := CV_ALT_step0 _ H). assert (H3 := CV_ALT_step4 _ H H0). assert (X := growing_cv _ H2 H3). elim X; intros. exists x. unfold Un_cv; unfold R_dist; unfold Un_cv in H1; unfold R_dist in H1; unfold Un_cv in p; unfold R_dist in p. intros; cut (0 < eps / 2); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H1 (eps / 2) H5); intros N2 H6. elim (p (eps / 2) H5); intros N1 H7. set (N := max (S (2 * N1)) N2). exists N; intros. assert (H9 := even_odd_cor n). elim H9; intros P H10. cut (N1 <= P)%nat. intro; elim H10; intro. replace (sum_f_R0 (tg_alt Un) n - x) with (sum_f_R0 (tg_alt Un) (S n) - x + - tg_alt Un (S n)). apply Rle_lt_trans with (Rabs (sum_f_R0 (tg_alt Un) (S n) - x) + Rabs (- tg_alt Un (S n))). apply Rabs_triang. rewrite (double_var eps); apply Rplus_lt_compat. rewrite H12; apply H7; assumption. rewrite Rabs_Ropp; unfold tg_alt; rewrite Rabs_mult; rewrite pow_1_abs; rewrite Rmult_1_l; unfold Rminus in H6; rewrite Ropp_0 in H6; rewrite <- (Rplus_0_r (Un (S n))); apply H6. unfold ge; apply le_trans with n. apply le_trans with N; [ unfold N; apply le_max_r | assumption ]. apply le_n_Sn. rewrite tech5; ring. rewrite H12; apply Rlt_trans with (eps / 2). apply H7; assumption. unfold Rdiv; apply Rmult_lt_reg_l with 2. prove_sup0. rewrite (Rmult_comm 2); rewrite Rmult_assoc; rewrite <- Rinv_l_sym; [ rewrite Rmult_1_r | discrR ]. rewrite double. pattern eps at 1; rewrite <- (Rplus_0_r eps); apply Rplus_lt_compat_l; assumption. elim H10; intro; apply le_double. rewrite <- H11; apply le_trans with N. unfold N; apply le_trans with (S (2 * N1)); [ apply le_n_Sn | apply le_max_l ]. assumption. apply lt_n_Sm_le. rewrite <- H11. apply lt_le_trans with N. unfold N; apply lt_le_trans with (S (2 * N1)). apply lt_n_Sn. apply le_max_l. assumption. Qed. (*************************************************) (** * Convergence of alternated series *) Theorem alternated_series : forall Un:nat -> R, Un_decreasing Un -> Un_cv Un 0 -> { l:R | Un_cv (fun N:nat => sum_f_R0 (tg_alt Un) N) l }. Proof. intros; apply CV_ALT. assumption. unfold positivity_seq; apply decreasing_ineq; assumption. assumption. Qed. Theorem alternated_series_ineq : forall (Un:nat -> R) (l:R) (N:nat), Un_decreasing Un -> Un_cv Un 0 -> Un_cv (fun N:nat => sum_f_R0 (tg_alt Un) N) l -> sum_f_R0 (tg_alt Un) (S (2 * N)) <= l <= sum_f_R0 (tg_alt Un) (2 * N). Proof. intros. cut (Un_cv (fun N:nat => sum_f_R0 (tg_alt Un) (2 * N)) l). cut (Un_cv (fun N:nat => sum_f_R0 (tg_alt Un) (S (2 * N))) l). intros; split. apply (growing_ineq (fun N:nat => sum_f_R0 (tg_alt Un) (S (2 * N)))). apply CV_ALT_step0; assumption. assumption. apply (decreasing_ineq (fun N:nat => sum_f_R0 (tg_alt Un) (2 * N))). apply CV_ALT_step1; assumption. assumption. unfold Un_cv; unfold R_dist; unfold Un_cv in H1; unfold R_dist in H1; intros. elim (H1 eps H2); intros. exists x; intros. apply H3. unfold ge; apply le_trans with (2 * n)%nat. apply le_trans with n. assumption. assert (H5 := mult_O_le n 2). elim H5; intro. cut (0%nat <> 2%nat); [ intro; elim H7; symmetry ; assumption | discriminate ]. assumption. apply le_n_Sn. unfold Un_cv; unfold R_dist; unfold Un_cv in H1; unfold R_dist in H1; intros. elim (H1 eps H2); intros. exists x; intros. apply H3. unfold ge; apply le_trans with n. assumption. assert (H5 := mult_O_le n 2). elim H5; intro. cut (0%nat <> 2%nat); [ intro; elim H7; symmetry ; assumption | discriminate ]. assumption. Qed. (***************************************) (** * Application : construction of PI *) (***************************************) Definition PI_tg (n:nat) := / INR (2 * n + 1). Lemma PI_tg_pos : forall n:nat, 0 <= PI_tg n. Proof. intro; unfold PI_tg; left; apply Rinv_0_lt_compat; apply lt_INR_0; replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_O_Sn | ring ]. Qed. Lemma PI_tg_decreasing : Un_decreasing PI_tg. Proof. unfold PI_tg, Un_decreasing; intro. apply Rmult_le_reg_l with (INR (2 * n + 1)). apply lt_INR_0. replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_O_Sn | ring ]. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (INR (2 * S n + 1)). apply lt_INR_0. replace (2 * S n + 1)%nat with (S (2 * S n)); [ apply lt_O_Sn | ring ]. rewrite (Rmult_comm (INR (2 * S n + 1))); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. do 2 rewrite Rmult_1_r; apply le_INR. replace (2 * S n + 1)%nat with (S (S (2 * n + 1))). apply le_trans with (S (2 * n + 1)); apply le_n_Sn. ring. apply not_O_INR; discriminate. apply not_O_INR; replace (2 * n + 1)%nat with (S (2 * n)); [ discriminate | ring ]. Qed. Lemma PI_tg_cv : Un_cv PI_tg 0. Proof. unfold Un_cv; unfold R_dist; intros. cut (0 < 2 * eps); [ intro | apply Rmult_lt_0_compat; [ prove_sup0 | assumption ] ]. assert (H1 := archimed (/ (2 * eps))). cut (0 <= up (/ (2 * eps)))%Z. intro; assert (H3 := IZN (up (/ (2 * eps))) H2). elim H3; intros N H4. cut (0 < N)%nat. intro; exists N; intros. cut (0 < n)%nat. intro; unfold Rminus; rewrite Ropp_0; rewrite Rplus_0_r; rewrite Rabs_right. unfold PI_tg; apply Rlt_trans with (/ INR (2 * n)). apply Rmult_lt_reg_l with (INR (2 * n)). apply lt_INR_0. replace (2 * n)%nat with (n + n)%nat; [ idtac | ring ]. apply lt_le_trans with n. assumption. apply le_plus_l. rewrite <- Rinv_r_sym. apply Rmult_lt_reg_l with (INR (2 * n + 1)). apply lt_INR_0. replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_O_Sn | ring ]. rewrite (Rmult_comm (INR (2 * n + 1))). rewrite Rmult_assoc; rewrite <- Rinv_l_sym. do 2 rewrite Rmult_1_r; apply lt_INR. replace (2 * n + 1)%nat with (S (2 * n)); [ apply lt_n_Sn | ring ]. apply not_O_INR; replace (2 * n + 1)%nat with (S (2 * n)); [ discriminate | ring ]. replace n with (S (pred n)). apply not_O_INR; discriminate. symmetry ; apply S_pred with 0%nat. assumption. apply Rle_lt_trans with (/ INR (2 * N)). apply Rmult_le_reg_l with (INR (2 * N)). rewrite mult_INR; apply Rmult_lt_0_compat; [ simpl; prove_sup0 | apply lt_INR_0; assumption ]. rewrite <- Rinv_r_sym. apply Rmult_le_reg_l with (INR (2 * n)). rewrite mult_INR; apply Rmult_lt_0_compat; [ simpl; prove_sup0 | apply lt_INR_0; assumption ]. rewrite (Rmult_comm (INR (2 * n))); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. do 2 rewrite Rmult_1_r; apply le_INR. apply (fun m n p:nat => mult_le_compat_l p n m); assumption. replace n with (S (pred n)). apply not_O_INR; discriminate. symmetry ; apply S_pred with 0%nat. assumption. replace N with (S (pred N)). apply not_O_INR; discriminate. symmetry ; apply S_pred with 0%nat. assumption. rewrite mult_INR. rewrite Rinv_mult_distr. replace (INR 2) with 2; [ idtac | reflexivity ]. apply Rmult_lt_reg_l with 2. prove_sup0. rewrite <- Rmult_assoc; rewrite <- Rinv_r_sym; [ idtac | discrR ]. rewrite Rmult_1_l; apply Rmult_lt_reg_l with (INR N). apply lt_INR_0; assumption. rewrite <- Rinv_r_sym. apply Rmult_lt_reg_l with (/ (2 * eps)). apply Rinv_0_lt_compat; assumption. rewrite Rmult_1_r; replace (/ (2 * eps) * (INR N * (2 * eps))) with (INR N * (2 * eps * / (2 * eps))); [ idtac | ring ]. rewrite <- Rinv_r_sym. rewrite Rmult_1_r; replace (INR N) with (IZR (Z.of_nat N)). rewrite <- H4. elim H1; intros; assumption. symmetry ; apply INR_IZR_INZ. apply prod_neq_R0; [ discrR | red; intro; rewrite H8 in H; elim (Rlt_irrefl _ H) ]. apply not_O_INR. red; intro; rewrite H8 in H5; elim (lt_irrefl _ H5). replace (INR 2) with 2; [ discrR | reflexivity ]. apply not_O_INR. red; intro; rewrite H8 in H5; elim (lt_irrefl _ H5). apply Rle_ge; apply PI_tg_pos. apply lt_le_trans with N; assumption. elim H1; intros H5 _. assert (H6 := lt_eq_lt_dec 0 N). elim H6; intro. elim a; intro. assumption. rewrite <- b in H4. rewrite H4 in H5. simpl in H5. cut (0 < / (2 * eps)); [ intro | apply Rinv_0_lt_compat; assumption ]. elim (Rlt_irrefl _ (Rlt_trans _ _ _ H7 H5)). elim (lt_n_O _ b). apply le_IZR. simpl. left; apply Rlt_trans with (/ (2 * eps)). apply Rinv_0_lt_compat; assumption. elim H1; intros; assumption. Qed. Lemma exist_PI : { l:R | Un_cv (fun N:nat => sum_f_R0 (tg_alt PI_tg) N) l }. Proof. apply alternated_series. apply PI_tg_decreasing. apply PI_tg_cv. Qed. (** Now, PI is defined *) Definition Alt_PI : R := 4 * (let (a,_) := exist_PI in a). (** We can get an approximation of PI with the following inequality *) Lemma Alt_PI_ineq : forall N:nat, sum_f_R0 (tg_alt PI_tg) (S (2 * N)) <= Alt_PI / 4 <= sum_f_R0 (tg_alt PI_tg) (2 * N). Proof. intro; apply alternated_series_ineq. apply PI_tg_decreasing. apply PI_tg_cv. unfold Alt_PI; case exist_PI; intro. replace (4 * x / 4) with x. trivial. unfold Rdiv; rewrite (Rmult_comm 4); rewrite Rmult_assoc; rewrite <- Rinv_r_sym; [ rewrite Rmult_1_r; reflexivity | discrR ]. Qed. Lemma Alt_PI_RGT_0 : 0 < Alt_PI. Proof. assert (H := Alt_PI_ineq 0). apply Rmult_lt_reg_l with (/ 4). apply Rinv_0_lt_compat; prove_sup0. rewrite Rmult_0_r; rewrite Rmult_comm. elim H; clear H; intros H _. unfold Rdiv in H; apply Rlt_le_trans with (sum_f_R0 (tg_alt PI_tg) (S (2 * 0))). simpl; unfold tg_alt; simpl; rewrite Rmult_1_l; rewrite Rmult_1_r; apply Rplus_lt_reg_r with (PI_tg 1). rewrite Rplus_0_r; replace (PI_tg 1 + (PI_tg 0 + -1 * PI_tg 1)) with (PI_tg 0); [ unfold PI_tg | ring ]. simpl; apply Rinv_lt_contravar. rewrite Rmult_1_l; replace (2 + 1) with 3; [ prove_sup0 | ring ]. rewrite Rplus_comm; pattern 1 at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; prove_sup0. assumption. Qed. coq-8.4pl2/theories/Reals/Rbase.v0000640000175000001440000000116012010532755015740 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rlist -> Rlist. Fixpoint In (x:R) (l:Rlist) : Prop := match l with | nil => False | cons a l' => x = a \/ In x l' end. Fixpoint Rlength (l:Rlist) : nat := match l with | nil => 0%nat | cons a l' => S (Rlength l') end. Fixpoint MaxRlist (l:Rlist) : R := match l with | nil => 0 | cons a l1 => match l1 with | nil => a | cons a' l2 => Rmax a (MaxRlist l1) end end. Fixpoint MinRlist (l:Rlist) : R := match l with | nil => 1 | cons a l1 => match l1 with | nil => a | cons a' l2 => Rmin a (MinRlist l1) end end. Lemma MaxRlist_P1 : forall (l:Rlist) (x:R), In x l -> x <= MaxRlist l. Proof. intros; induction l as [| r l Hrecl]. simpl in H; elim H. induction l as [| r0 l Hrecl0]. simpl in H; elim H; intro. simpl; right; assumption. elim H0. replace (MaxRlist (cons r (cons r0 l))) with (Rmax r (MaxRlist (cons r0 l))). simpl in H; decompose [or] H. rewrite H0; apply RmaxLess1. unfold Rmax; case (Rle_dec r (MaxRlist (cons r0 l))); intro. apply Hrecl; simpl; tauto. apply Rle_trans with (MaxRlist (cons r0 l)); [ apply Hrecl; simpl; tauto | left; auto with real ]. unfold Rmax; case (Rle_dec r (MaxRlist (cons r0 l))); intro. apply Hrecl; simpl; tauto. apply Rle_trans with (MaxRlist (cons r0 l)); [ apply Hrecl; simpl; tauto | left; auto with real ]. reflexivity. Qed. Fixpoint AbsList (l:Rlist) (x:R) : Rlist := match l with | nil => nil | cons a l' => cons (Rabs (a - x) / 2) (AbsList l' x) end. Lemma MinRlist_P1 : forall (l:Rlist) (x:R), In x l -> MinRlist l <= x. Proof. intros; induction l as [| r l Hrecl]. simpl in H; elim H. induction l as [| r0 l Hrecl0]. simpl in H; elim H; intro. simpl; right; symmetry ; assumption. elim H0. replace (MinRlist (cons r (cons r0 l))) with (Rmin r (MinRlist (cons r0 l))). simpl in H; decompose [or] H. rewrite H0; apply Rmin_l. unfold Rmin; case (Rle_dec r (MinRlist (cons r0 l))); intro. apply Rle_trans with (MinRlist (cons r0 l)). assumption. apply Hrecl; simpl; tauto. apply Hrecl; simpl; tauto. apply Rle_trans with (MinRlist (cons r0 l)). apply Rmin_r. apply Hrecl; simpl; tauto. reflexivity. Qed. Lemma AbsList_P1 : forall (l:Rlist) (x y:R), In y l -> In (Rabs (y - x) / 2) (AbsList l x). Proof. intros; induction l as [| r l Hrecl]. elim H. simpl; simpl in H; elim H; intro. left; rewrite H0; reflexivity. right; apply Hrecl; assumption. Qed. Lemma MinRlist_P2 : forall l:Rlist, (forall y:R, In y l -> 0 < y) -> 0 < MinRlist l. Proof. intros; induction l as [| r l Hrecl]. apply Rlt_0_1. induction l as [| r0 l Hrecl0]. simpl; apply H; simpl; tauto. replace (MinRlist (cons r (cons r0 l))) with (Rmin r (MinRlist (cons r0 l))). unfold Rmin; case (Rle_dec r (MinRlist (cons r0 l))); intro. apply H; simpl; tauto. apply Hrecl; intros; apply H; simpl; simpl in H0; tauto. reflexivity. Qed. Lemma AbsList_P2 : forall (l:Rlist) (x y:R), In y (AbsList l x) -> exists z : R, In z l /\ y = Rabs (z - x) / 2. Proof. intros; induction l as [| r l Hrecl]. elim H. elim H; intro. exists r; split. simpl; tauto. assumption. assert (H1 := Hrecl H0); elim H1; intros; elim H2; clear H2; intros; exists x0; simpl; simpl in H2; tauto. Qed. Lemma MaxRlist_P2 : forall l:Rlist, (exists y : R, In y l) -> In (MaxRlist l) l. Proof. intros; induction l as [| r l Hrecl]. simpl in H; elim H; trivial. induction l as [| r0 l Hrecl0]. simpl; left; reflexivity. change (In (Rmax r (MaxRlist (cons r0 l))) (cons r (cons r0 l))); unfold Rmax; case (Rle_dec r (MaxRlist (cons r0 l))); intro. right; apply Hrecl; exists r0; left; reflexivity. left; reflexivity. Qed. Fixpoint pos_Rl (l:Rlist) (i:nat) : R := match l with | nil => 0 | cons a l' => match i with | O => a | S i' => pos_Rl l' i' end end. Lemma pos_Rl_P1 : forall (l:Rlist) (a:R), (0 < Rlength l)%nat -> pos_Rl (cons a l) (Rlength l) = pos_Rl l (pred (Rlength l)). Proof. intros; induction l as [| r l Hrecl]; [ elim (lt_n_O _ H) | simpl; case (Rlength l); [ reflexivity | intro; reflexivity ] ]. Qed. Lemma pos_Rl_P2 : forall (l:Rlist) (x:R), In x l <-> (exists i : nat, (i < Rlength l)%nat /\ x = pos_Rl l i). Proof. intros; induction l as [| r l Hrecl]. split; intro; [ elim H | elim H; intros; elim H0; intros; elim (lt_n_O _ H1) ]. split; intro. elim H; intro. exists 0%nat; split; [ simpl; apply lt_O_Sn | simpl; apply H0 ]. elim Hrecl; intros; assert (H3 := H1 H0); elim H3; intros; elim H4; intros; exists (S x0); split; [ simpl; apply lt_n_S; assumption | simpl; assumption ]. elim H; intros; elim H0; intros; elim (zerop x0); intro. rewrite a in H2; simpl in H2; left; assumption. right; elim Hrecl; intros; apply H4; assert (H5 : S (pred x0) = x0). symmetry ; apply S_pred with 0%nat; assumption. exists (pred x0); split; [ simpl in H1; apply lt_S_n; rewrite H5; assumption | rewrite <- H5 in H2; simpl in H2; assumption ]. Qed. Lemma Rlist_P1 : forall (l:Rlist) (P:R -> R -> Prop), (forall x:R, In x l -> exists y : R, P x y) -> exists l' : Rlist, Rlength l = Rlength l' /\ (forall i:nat, (i < Rlength l)%nat -> P (pos_Rl l i) (pos_Rl l' i)). Proof. intros; induction l as [| r l Hrecl]. exists nil; intros; split; [ reflexivity | intros; simpl in H0; elim (lt_n_O _ H0) ]. assert (H0 : In r (cons r l)). simpl; left; reflexivity. assert (H1 := H _ H0); assert (H2 : forall x:R, In x l -> exists y : R, P x y). intros; apply H; simpl; right; assumption. assert (H3 := Hrecl H2); elim H1; intros; elim H3; intros; exists (cons x x0); intros; elim H5; clear H5; intros; split. simpl; rewrite H5; reflexivity. intros; elim (zerop i); intro. rewrite a; simpl; assumption. assert (H8 : i = S (pred i)). apply S_pred with 0%nat; assumption. rewrite H8; simpl; apply H6; simpl in H7; apply lt_S_n; rewrite <- H8; assumption. Qed. Definition ordered_Rlist (l:Rlist) : Prop := forall i:nat, (i < pred (Rlength l))%nat -> pos_Rl l i <= pos_Rl l (S i). Fixpoint insert (l:Rlist) (x:R) : Rlist := match l with | nil => cons x nil | cons a l' => match Rle_dec a x with | left _ => cons a (insert l' x) | right _ => cons x l end end. Fixpoint cons_Rlist (l k:Rlist) : Rlist := match l with | nil => k | cons a l' => cons a (cons_Rlist l' k) end. Fixpoint cons_ORlist (k l:Rlist) : Rlist := match k with | nil => l | cons a k' => cons_ORlist k' (insert l a) end. Fixpoint app_Rlist (l:Rlist) (f:R -> R) : Rlist := match l with | nil => nil | cons a l' => cons (f a) (app_Rlist l' f) end. Fixpoint mid_Rlist (l:Rlist) (x:R) : Rlist := match l with | nil => nil | cons a l' => cons ((x + a) / 2) (mid_Rlist l' a) end. Definition Rtail (l:Rlist) : Rlist := match l with | nil => nil | cons a l' => l' end. Definition FF (l:Rlist) (f:R -> R) : Rlist := match l with | nil => nil | cons a l' => app_Rlist (mid_Rlist l' a) f end. Lemma RList_P0 : forall (l:Rlist) (a:R), pos_Rl (insert l a) 0 = a \/ pos_Rl (insert l a) 0 = pos_Rl l 0. Proof. intros; induction l as [| r l Hrecl]; [ left; reflexivity | simpl; case (Rle_dec r a); intro; [ right; reflexivity | left; reflexivity ] ]. Qed. Lemma RList_P1 : forall (l:Rlist) (a:R), ordered_Rlist l -> ordered_Rlist (insert l a). Proof. intros; induction l as [| r l Hrecl]. simpl; unfold ordered_Rlist; intros; simpl in H0; elim (lt_n_O _ H0). simpl; case (Rle_dec r a); intro. assert (H1 : ordered_Rlist l). unfold ordered_Rlist; unfold ordered_Rlist in H; intros; assert (H1 : (S i < pred (Rlength (cons r l)))%nat); [ simpl; replace (Rlength l) with (S (pred (Rlength l))); [ apply lt_n_S; assumption | symmetry ; apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H1 in H0; simpl in H0; elim (lt_n_O _ H0) ] | apply (H _ H1) ]. assert (H2 := Hrecl H1); unfold ordered_Rlist; intros; induction i as [| i Hreci]. simpl; assert (H3 := RList_P0 l a); elim H3; intro. rewrite H4; assumption. induction l as [| r1 l Hrecl0]; [ simpl; assumption | rewrite H4; apply (H 0%nat); simpl; apply lt_O_Sn ]. simpl; apply H2; simpl in H0; apply lt_S_n; replace (S (pred (Rlength (insert l a)))) with (Rlength (insert l a)); [ assumption | apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H3 in H0; elim (lt_n_O _ H0) ]. unfold ordered_Rlist; intros; induction i as [| i Hreci]; [ simpl; auto with real | change (pos_Rl (cons r l) i <= pos_Rl (cons r l) (S i)); apply H; simpl in H0; simpl; apply (lt_S_n _ _ H0) ]. Qed. Lemma RList_P2 : forall l1 l2:Rlist, ordered_Rlist l2 -> ordered_Rlist (cons_ORlist l1 l2). Proof. simple induction l1; [ intros; simpl; apply H | intros; simpl; apply H; apply RList_P1; assumption ]. Qed. Lemma RList_P3 : forall (l:Rlist) (x:R), In x l <-> (exists i : nat, x = pos_Rl l i /\ (i < Rlength l)%nat). Proof. intros; split; intro; [ induction l as [| r l Hrecl] | induction l as [| r l Hrecl] ]. elim H. elim H; intro; [ exists 0%nat; split; [ apply H0 | simpl; apply lt_O_Sn ] | elim (Hrecl H0); intros; elim H1; clear H1; intros; exists (S x0); split; [ apply H1 | simpl; apply lt_n_S; assumption ] ]. elim H; intros; elim H0; intros; elim (lt_n_O _ H2). simpl; elim H; intros; elim H0; clear H0; intros; induction x0 as [| x0 Hrecx0]; [ left; apply H0 | right; apply Hrecl; exists x0; split; [ apply H0 | simpl in H1; apply lt_S_n; assumption ] ]. Qed. Lemma RList_P4 : forall (l1:Rlist) (a:R), ordered_Rlist (cons a l1) -> ordered_Rlist l1. Proof. intros; unfold ordered_Rlist; intros; apply (H (S i)); simpl; replace (Rlength l1) with (S (pred (Rlength l1))); [ apply lt_n_S; assumption | symmetry ; apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H1 in H0; elim (lt_n_O _ H0) ]. Qed. Lemma RList_P5 : forall (l:Rlist) (x:R), ordered_Rlist l -> In x l -> pos_Rl l 0 <= x. Proof. intros; induction l as [| r l Hrecl]; [ elim H0 | simpl; elim H0; intro; [ rewrite H1; right; reflexivity | apply Rle_trans with (pos_Rl l 0); [ apply (H 0%nat); simpl; induction l as [| r0 l Hrecl0]; [ elim H1 | simpl; apply lt_O_Sn ] | apply Hrecl; [ eapply RList_P4; apply H | assumption ] ] ] ]. Qed. Lemma RList_P6 : forall l:Rlist, ordered_Rlist l <-> (forall i j:nat, (i <= j)%nat -> (j < Rlength l)%nat -> pos_Rl l i <= pos_Rl l j). Proof. simple induction l; split; intro. intros; right; reflexivity. unfold ordered_Rlist; intros; simpl in H0; elim (lt_n_O _ H0). intros; induction i as [| i Hreci]; [ induction j as [| j Hrecj]; [ right; reflexivity | simpl; apply Rle_trans with (pos_Rl r0 0); [ apply (H0 0%nat); simpl; simpl in H2; apply neq_O_lt; red; intro; rewrite <- H3 in H2; assert (H4 := lt_S_n _ _ H2); elim (lt_n_O _ H4) | elim H; intros; apply H3; [ apply RList_P4 with r; assumption | apply le_O_n | simpl in H2; apply lt_S_n; assumption ] ] ] | induction j as [| j Hrecj]; [ elim (le_Sn_O _ H1) | simpl; elim H; intros; apply H3; [ apply RList_P4 with r; assumption | apply le_S_n; assumption | simpl in H2; apply lt_S_n; assumption ] ] ]. unfold ordered_Rlist; intros; apply H0; [ apply le_n_Sn | simpl; simpl in H1; apply lt_n_S; assumption ]. Qed. Lemma RList_P7 : forall (l:Rlist) (x:R), ordered_Rlist l -> In x l -> x <= pos_Rl l (pred (Rlength l)). Proof. intros; assert (H1 := RList_P6 l); elim H1; intros H2 _; assert (H3 := H2 H); clear H1 H2; assert (H1 := RList_P3 l x); elim H1; clear H1; intros; assert (H4 := H1 H0); elim H4; clear H4; intros; elim H4; clear H4; intros; rewrite H4; assert (H6 : Rlength l = S (pred (Rlength l))). apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H6 in H5; elim (lt_n_O _ H5). apply H3; [ rewrite H6 in H5; apply lt_n_Sm_le; assumption | apply lt_pred_n_n; apply neq_O_lt; red; intro; rewrite <- H7 in H5; elim (lt_n_O _ H5) ]. Qed. Lemma RList_P8 : forall (l:Rlist) (a x:R), In x (insert l a) <-> x = a \/ In x l. Proof. simple induction l. intros; split; intro; simpl in H; apply H. intros; split; intro; [ simpl in H0; generalize H0; case (Rle_dec r a); intros; [ simpl in H1; elim H1; intro; [ right; left; assumption | elim (H a x); intros; elim (H3 H2); intro; [ left; assumption | right; right; assumption ] ] | simpl in H1; decompose [or] H1; [ left; assumption | right; left; assumption | right; right; assumption ] ] | simpl; case (Rle_dec r a); intro; [ simpl in H0; decompose [or] H0; [ right; elim (H a x); intros; apply H3; left | left | right; elim (H a x); intros; apply H3; right ] | simpl in H0; decompose [or] H0; [ left | right; left | right; right ] ]; assumption ]. Qed. Lemma RList_P9 : forall (l1 l2:Rlist) (x:R), In x (cons_ORlist l1 l2) <-> In x l1 \/ In x l2. Proof. simple induction l1. intros; split; intro; [ simpl in H; right; assumption | simpl; elim H; intro; [ elim H0 | assumption ] ]. intros; split. simpl; intros; elim (H (insert l2 r) x); intros; assert (H3 := H1 H0); elim H3; intro; [ left; right; assumption | elim (RList_P8 l2 r x); intros H5 _; assert (H6 := H5 H4); elim H6; intro; [ left; left; assumption | right; assumption ] ]. intro; simpl; elim (H (insert l2 r) x); intros _ H1; apply H1; elim H0; intro; [ elim H2; intro; [ right; elim (RList_P8 l2 r x); intros _ H4; apply H4; left; assumption | left; assumption ] | right; elim (RList_P8 l2 r x); intros _ H3; apply H3; right; assumption ]. Qed. Lemma RList_P10 : forall (l:Rlist) (a:R), Rlength (insert l a) = S (Rlength l). Proof. intros; induction l as [| r l Hrecl]; [ reflexivity | simpl; case (Rle_dec r a); intro; [ simpl; rewrite Hrecl; reflexivity | reflexivity ] ]. Qed. Lemma RList_P11 : forall l1 l2:Rlist, Rlength (cons_ORlist l1 l2) = (Rlength l1 + Rlength l2)%nat. Proof. simple induction l1; [ intro; reflexivity | intros; simpl; rewrite (H (insert l2 r)); rewrite RList_P10; apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; rewrite S_INR; ring ]. Qed. Lemma RList_P12 : forall (l:Rlist) (i:nat) (f:R -> R), (i < Rlength l)%nat -> pos_Rl (app_Rlist l f) i = f (pos_Rl l i). Proof. simple induction l; [ intros; elim (lt_n_O _ H) | intros; induction i as [| i Hreci]; [ reflexivity | simpl; apply H; apply lt_S_n; apply H0 ] ]. Qed. Lemma RList_P13 : forall (l:Rlist) (i:nat) (a:R), (i < pred (Rlength l))%nat -> pos_Rl (mid_Rlist l a) (S i) = (pos_Rl l i + pos_Rl l (S i)) / 2. Proof. simple induction l. intros; simpl in H; elim (lt_n_O _ H). simple induction r0. intros; simpl in H0; elim (lt_n_O _ H0). intros; simpl in H1; induction i as [| i Hreci]. reflexivity. change (pos_Rl (mid_Rlist (cons r1 r2) r) (S i) = (pos_Rl (cons r1 r2) i + pos_Rl (cons r1 r2) (S i)) / 2) ; apply H0; simpl; apply lt_S_n; assumption. Qed. Lemma RList_P14 : forall (l:Rlist) (a:R), Rlength (mid_Rlist l a) = Rlength l. Proof. simple induction l; intros; [ reflexivity | simpl; rewrite (H r); reflexivity ]. Qed. Lemma RList_P15 : forall l1 l2:Rlist, ordered_Rlist l1 -> ordered_Rlist l2 -> pos_Rl l1 0 = pos_Rl l2 0 -> pos_Rl (cons_ORlist l1 l2) 0 = pos_Rl l1 0. Proof. intros; apply Rle_antisym. induction l1 as [| r l1 Hrecl1]; [ simpl; simpl in H1; right; symmetry ; assumption | elim (RList_P9 (cons r l1) l2 (pos_Rl (cons r l1) 0)); intros; assert (H4 : In (pos_Rl (cons r l1) 0) (cons r l1) \/ In (pos_Rl (cons r l1) 0) l2); [ left; left; reflexivity | assert (H5 := H3 H4); apply RList_P5; [ apply RList_P2; assumption | assumption ] ] ]. induction l1 as [| r l1 Hrecl1]; [ simpl; simpl in H1; right; assumption | assert (H2 : In (pos_Rl (cons_ORlist (cons r l1) l2) 0) (cons_ORlist (cons r l1) l2)); [ elim (RList_P3 (cons_ORlist (cons r l1) l2) (pos_Rl (cons_ORlist (cons r l1) l2) 0)); intros; apply H3; exists 0%nat; split; [ reflexivity | rewrite RList_P11; simpl; apply lt_O_Sn ] | elim (RList_P9 (cons r l1) l2 (pos_Rl (cons_ORlist (cons r l1) l2) 0)); intros; assert (H5 := H3 H2); elim H5; intro; [ apply RList_P5; assumption | rewrite H1; apply RList_P5; assumption ] ] ]. Qed. Lemma RList_P16 : forall l1 l2:Rlist, ordered_Rlist l1 -> ordered_Rlist l2 -> pos_Rl l1 (pred (Rlength l1)) = pos_Rl l2 (pred (Rlength l2)) -> pos_Rl (cons_ORlist l1 l2) (pred (Rlength (cons_ORlist l1 l2))) = pos_Rl l1 (pred (Rlength l1)). Proof. intros; apply Rle_antisym. induction l1 as [| r l1 Hrecl1]. simpl; simpl in H1; right; symmetry ; assumption. assert (H2 : In (pos_Rl (cons_ORlist (cons r l1) l2) (pred (Rlength (cons_ORlist (cons r l1) l2)))) (cons_ORlist (cons r l1) l2)); [ elim (RList_P3 (cons_ORlist (cons r l1) l2) (pos_Rl (cons_ORlist (cons r l1) l2) (pred (Rlength (cons_ORlist (cons r l1) l2))))); intros; apply H3; exists (pred (Rlength (cons_ORlist (cons r l1) l2))); split; [ reflexivity | rewrite RList_P11; simpl; apply lt_n_Sn ] | elim (RList_P9 (cons r l1) l2 (pos_Rl (cons_ORlist (cons r l1) l2) (pred (Rlength (cons_ORlist (cons r l1) l2))))); intros; assert (H5 := H3 H2); elim H5; intro; [ apply RList_P7; assumption | rewrite H1; apply RList_P7; assumption ] ]. induction l1 as [| r l1 Hrecl1]. simpl; simpl in H1; right; assumption. elim (RList_P9 (cons r l1) l2 (pos_Rl (cons r l1) (pred (Rlength (cons r l1))))); intros; assert (H4 : In (pos_Rl (cons r l1) (pred (Rlength (cons r l1)))) (cons r l1) \/ In (pos_Rl (cons r l1) (pred (Rlength (cons r l1)))) l2); [ left; change (In (pos_Rl (cons r l1) (Rlength l1)) (cons r l1)); elim (RList_P3 (cons r l1) (pos_Rl (cons r l1) (Rlength l1))); intros; apply H5; exists (Rlength l1); split; [ reflexivity | simpl; apply lt_n_Sn ] | assert (H5 := H3 H4); apply RList_P7; [ apply RList_P2; assumption | elim (RList_P9 (cons r l1) l2 (pos_Rl (cons r l1) (pred (Rlength (cons r l1))))); intros; apply H7; left; elim (RList_P3 (cons r l1) (pos_Rl (cons r l1) (pred (Rlength (cons r l1))))); intros; apply H9; exists (pred (Rlength (cons r l1))); split; [ reflexivity | simpl; apply lt_n_Sn ] ] ]. Qed. Lemma RList_P17 : forall (l1:Rlist) (x:R) (i:nat), ordered_Rlist l1 -> In x l1 -> pos_Rl l1 i < x -> (i < pred (Rlength l1))%nat -> pos_Rl l1 (S i) <= x. Proof. simple induction l1. intros; elim H0. intros; induction i as [| i Hreci]. simpl; elim H1; intro; [ simpl in H2; rewrite H4 in H2; elim (Rlt_irrefl _ H2) | apply RList_P5; [ apply RList_P4 with r; assumption | assumption ] ]. simpl; simpl in H2; elim H1; intro. rewrite H4 in H2; assert (H5 : r <= pos_Rl r0 i); [ apply Rle_trans with (pos_Rl r0 0); [ apply (H0 0%nat); simpl; simpl in H3; apply neq_O_lt; red; intro; rewrite <- H5 in H3; elim (lt_n_O _ H3) | elim (RList_P6 r0); intros; apply H5; [ apply RList_P4 with r; assumption | apply le_O_n | simpl in H3; apply lt_S_n; apply lt_trans with (Rlength r0); [ apply H3 | apply lt_n_Sn ] ] ] | elim (Rlt_irrefl _ (Rle_lt_trans _ _ _ H5 H2)) ]. apply H; try assumption; [ apply RList_P4 with r; assumption | simpl in H3; apply lt_S_n; replace (S (pred (Rlength r0))) with (Rlength r0); [ apply H3 | apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H5 in H3; elim (lt_n_O _ H3) ] ]. Qed. Lemma RList_P18 : forall (l:Rlist) (f:R -> R), Rlength (app_Rlist l f) = Rlength l. Proof. simple induction l; intros; [ reflexivity | simpl; rewrite H; reflexivity ]. Qed. Lemma RList_P19 : forall l:Rlist, l <> nil -> exists r : R, (exists r0 : Rlist, l = cons r r0). Proof. intros; induction l as [| r l Hrecl]; [ elim H; reflexivity | exists r; exists l; reflexivity ]. Qed. Lemma RList_P20 : forall l:Rlist, (2 <= Rlength l)%nat -> exists r : R, (exists r1 : R, (exists l' : Rlist, l = cons r (cons r1 l'))). Proof. intros; induction l as [| r l Hrecl]; [ simpl in H; elim (le_Sn_O _ H) | induction l as [| r0 l Hrecl0]; [ simpl in H; elim (le_Sn_O _ (le_S_n _ _ H)) | exists r; exists r0; exists l; reflexivity ] ]. Qed. Lemma RList_P21 : forall l l':Rlist, l = l' -> Rtail l = Rtail l'. Proof. intros; rewrite H; reflexivity. Qed. Lemma RList_P22 : forall l1 l2:Rlist, l1 <> nil -> pos_Rl (cons_Rlist l1 l2) 0 = pos_Rl l1 0. Proof. simple induction l1; [ intros; elim H; reflexivity | intros; reflexivity ]. Qed. Lemma RList_P23 : forall l1 l2:Rlist, Rlength (cons_Rlist l1 l2) = (Rlength l1 + Rlength l2)%nat. Proof. simple induction l1; [ intro; reflexivity | intros; simpl; rewrite H; reflexivity ]. Qed. Lemma RList_P24 : forall l1 l2:Rlist, l2 <> nil -> pos_Rl (cons_Rlist l1 l2) (pred (Rlength (cons_Rlist l1 l2))) = pos_Rl l2 (pred (Rlength l2)). Proof. simple induction l1. intros; reflexivity. intros; rewrite <- (H l2 H0); induction l2 as [| r1 l2 Hrecl2]. elim H0; reflexivity. do 2 rewrite RList_P23; replace (Rlength (cons r r0) + Rlength (cons r1 l2))%nat with (S (S (Rlength r0 + Rlength l2))); [ replace (Rlength r0 + Rlength (cons r1 l2))%nat with (S (Rlength r0 + Rlength l2)); [ reflexivity | simpl; apply INR_eq; rewrite S_INR; do 2 rewrite plus_INR; rewrite S_INR; ring ] | simpl; apply INR_eq; do 3 rewrite S_INR; do 2 rewrite plus_INR; rewrite S_INR; ring ]. Qed. Lemma RList_P25 : forall l1 l2:Rlist, ordered_Rlist l1 -> ordered_Rlist l2 -> pos_Rl l1 (pred (Rlength l1)) <= pos_Rl l2 0 -> ordered_Rlist (cons_Rlist l1 l2). Proof. simple induction l1. intros; simpl; assumption. simple induction r0. intros; simpl; simpl in H2; unfold ordered_Rlist; intros; simpl in H3. induction i as [| i Hreci]. simpl; assumption. change (pos_Rl l2 i <= pos_Rl l2 (S i)); apply (H1 i); apply lt_S_n; replace (S (pred (Rlength l2))) with (Rlength l2); [ assumption | apply S_pred with 0%nat; apply neq_O_lt; red; intro; rewrite <- H4 in H3; elim (lt_n_O _ H3) ]. intros; clear H; assert (H : ordered_Rlist (cons_Rlist (cons r1 r2) l2)). apply H0; try assumption. apply RList_P4 with r; assumption. unfold ordered_Rlist; intros; simpl in H4; induction i as [| i Hreci]. simpl; apply (H1 0%nat); simpl; apply lt_O_Sn. change (pos_Rl (cons_Rlist (cons r1 r2) l2) i <= pos_Rl (cons_Rlist (cons r1 r2) l2) (S i)); apply (H i); simpl; apply lt_S_n; assumption. Qed. Lemma RList_P26 : forall (l1 l2:Rlist) (i:nat), (i < Rlength l1)%nat -> pos_Rl (cons_Rlist l1 l2) i = pos_Rl l1 i. Proof. simple induction l1. intros; elim (lt_n_O _ H). intros; induction i as [| i Hreci]. apply RList_P22; discriminate. apply (H l2 i); simpl in H0; apply lt_S_n; assumption. Qed. Lemma RList_P27 : forall l1 l2 l3:Rlist, cons_Rlist l1 (cons_Rlist l2 l3) = cons_Rlist (cons_Rlist l1 l2) l3. Proof. simple induction l1; intros; [ reflexivity | simpl; rewrite (H l2 l3); reflexivity ]. Qed. Lemma RList_P28 : forall l:Rlist, cons_Rlist l nil = l. Proof. simple induction l; [ reflexivity | intros; simpl; rewrite H; reflexivity ]. Qed. Lemma RList_P29 : forall (l2 l1:Rlist) (i:nat), (Rlength l1 <= i)%nat -> (i < Rlength (cons_Rlist l1 l2))%nat -> pos_Rl (cons_Rlist l1 l2) i = pos_Rl l2 (i - Rlength l1). Proof. simple induction l2. intros; rewrite RList_P28 in H0; elim (lt_irrefl _ (le_lt_trans _ _ _ H H0)). intros; replace (cons_Rlist l1 (cons r r0)) with (cons_Rlist (cons_Rlist l1 (cons r nil)) r0). inversion H0. rewrite <- minus_n_n; simpl; rewrite RList_P26. clear l2 r0 H i H0 H1 H2; induction l1 as [| r0 l1 Hrecl1]. reflexivity. simpl; assumption. rewrite RList_P23; rewrite plus_comm; simpl; apply lt_n_Sn. replace (S m - Rlength l1)%nat with (S (S m - S (Rlength l1))). rewrite H3; simpl; replace (S (Rlength l1)) with (Rlength (cons_Rlist l1 (cons r nil))). apply (H (cons_Rlist l1 (cons r nil)) i). rewrite RList_P23; rewrite plus_comm; simpl; rewrite <- H3; apply le_n_S; assumption. repeat rewrite RList_P23; simpl; rewrite RList_P23 in H1; rewrite plus_comm in H1; simpl in H1; rewrite (plus_comm (Rlength l1)); simpl; rewrite plus_comm; apply H1. rewrite RList_P23; rewrite plus_comm; reflexivity. change (S (m - Rlength l1) = (S m - Rlength l1)%nat); apply minus_Sn_m; assumption. replace (cons r r0) with (cons_Rlist (cons r nil) r0); [ symmetry ; apply RList_P27 | reflexivity ]. Qed. coq-8.4pl2/theories/Reals/SplitAbsolu.v0000640000175000001440000000157412010532755017156 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* case (Rcase_abs X1); try split_case_Rabs end. Ltac split_Rabs := match goal with | id:context [(Rabs _)] |- _ => generalize id; clear id; try split_Rabs | |- context [(Rabs ?X1)] => unfold Rabs; try split_case_Rabs; intros end. coq-8.4pl2/theories/Reals/Ranalysis_reg.v0000640000175000001440000007171612010532755017524 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* match goal with | |- (derivable _) => intro_hyp_glob X1; intro_hyp_glob X2 | |- (continuity _) => intro_hyp_glob X1; intro_hyp_glob X2 | _ => idtac end | (?X1 - ?X2)%F => match goal with | |- (derivable _) => intro_hyp_glob X1; intro_hyp_glob X2 | |- (continuity _) => intro_hyp_glob X1; intro_hyp_glob X2 | _ => idtac end | (?X1 * ?X2)%F => match goal with | |- (derivable _) => intro_hyp_glob X1; intro_hyp_glob X2 | |- (continuity _) => intro_hyp_glob X1; intro_hyp_glob X2 | _ => idtac end | (?X1 / ?X2)%F => let aux := constr:X2 in match goal with | _:(forall x0:R, aux x0 <> 0) |- (derivable _) => intro_hyp_glob X1; intro_hyp_glob X2 | _:(forall x0:R, aux x0 <> 0) |- (continuity _) => intro_hyp_glob X1; intro_hyp_glob X2 | |- (derivable _) => cut (forall x0:R, aux x0 <> 0); [ intro; intro_hyp_glob X1; intro_hyp_glob X2 | try assumption ] | |- (continuity _) => cut (forall x0:R, aux x0 <> 0); [ intro; intro_hyp_glob X1; intro_hyp_glob X2 | try assumption ] | _ => idtac end | (comp ?X1 ?X2) => match goal with | |- (derivable _) => intro_hyp_glob X1; intro_hyp_glob X2 | |- (continuity _) => intro_hyp_glob X1; intro_hyp_glob X2 | _ => idtac end | (- ?X1)%F => match goal with | |- (derivable _) => intro_hyp_glob X1 | |- (continuity _) => intro_hyp_glob X1 | _ => idtac end | (/ ?X1)%F => let aux := constr:X1 in match goal with | _:(forall x0:R, aux x0 <> 0) |- (derivable _) => intro_hyp_glob X1 | _:(forall x0:R, aux x0 <> 0) |- (continuity _) => intro_hyp_glob X1 | |- (derivable _) => cut (forall x0:R, aux x0 <> 0); [ intro; intro_hyp_glob X1 | try assumption ] | |- (continuity _) => cut (forall x0:R, aux x0 <> 0); [ intro; intro_hyp_glob X1 | try assumption ] | _ => idtac end | cos => idtac | sin => idtac | cosh => idtac | sinh => idtac | exp => idtac | Rsqr => idtac | sqrt => idtac | id => idtac | (fct_cte _) => idtac | (pow_fct _) => idtac | Rabs => idtac | ?X1 => let p := constr:X1 in match goal with | _:(derivable p) |- _ => idtac | |- (derivable p) => idtac | |- (derivable _) => cut (True -> derivable p); [ intro HYPPD; cut (derivable p); [ intro; clear HYPPD | apply HYPPD; clear HYPPD; trivial ] | idtac ] | _:(continuity p) |- _ => idtac | |- (continuity p) => idtac | |- (continuity _) => cut (True -> continuity p); [ intro HYPPD; cut (continuity p); [ intro; clear HYPPD | apply HYPPD; clear HYPPD; trivial ] | idtac ] | _ => idtac end end. (**********) Ltac intro_hyp_pt trm pt := match constr:trm with | (?X1 + ?X2)%F => match goal with | |- (derivable_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (continuity_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | _ => idtac end | (?X1 - ?X2)%F => match goal with | |- (derivable_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (continuity_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | _ => idtac end | (?X1 * ?X2)%F => match goal with | |- (derivable_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (continuity_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | _ => idtac end | (?X1 / ?X2)%F => let aux := constr:X2 in match goal with | _:(aux pt <> 0) |- (derivable_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | _:(aux pt <> 0) |- (continuity_pt _ _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | _:(aux pt <> 0) |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | id:(forall x0:R, aux x0 <> 0) |- (derivable_pt _ _) => generalize (id pt); intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | id:(forall x0:R, aux x0 <> 0) |- (continuity_pt _ _) => generalize (id pt); intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | id:(forall x0:R, aux x0 <> 0) |- (derive_pt _ _ _ = _) => generalize (id pt); intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | |- (derivable_pt _ _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | try assumption ] | |- (continuity_pt _ _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | try assumption ] | |- (derive_pt _ _ _ = _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt; intro_hyp_pt X2 pt | try assumption ] | _ => idtac end | (comp ?X1 ?X2) => match goal with | |- (derivable_pt _ _) => let pt_f1 := eval cbv beta in (X2 pt) in (intro_hyp_pt X1 pt_f1; intro_hyp_pt X2 pt) | |- (continuity_pt _ _) => let pt_f1 := eval cbv beta in (X2 pt) in (intro_hyp_pt X1 pt_f1; intro_hyp_pt X2 pt) | |- (derive_pt _ _ _ = _) => let pt_f1 := eval cbv beta in (X2 pt) in (intro_hyp_pt X1 pt_f1; intro_hyp_pt X2 pt) | _ => idtac end | (- ?X1)%F => match goal with | |- (derivable_pt _ _) => intro_hyp_pt X1 pt | |- (continuity_pt _ _) => intro_hyp_pt X1 pt | |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt | _ => idtac end | (/ ?X1)%F => let aux := constr:X1 in match goal with | _:(aux pt <> 0) |- (derivable_pt _ _) => intro_hyp_pt X1 pt | _:(aux pt <> 0) |- (continuity_pt _ _) => intro_hyp_pt X1 pt | _:(aux pt <> 0) |- (derive_pt _ _ _ = _) => intro_hyp_pt X1 pt | id:(forall x0:R, aux x0 <> 0) |- (derivable_pt _ _) => generalize (id pt); intro; intro_hyp_pt X1 pt | id:(forall x0:R, aux x0 <> 0) |- (continuity_pt _ _) => generalize (id pt); intro; intro_hyp_pt X1 pt | id:(forall x0:R, aux x0 <> 0) |- (derive_pt _ _ _ = _) => generalize (id pt); intro; intro_hyp_pt X1 pt | |- (derivable_pt _ _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt | try assumption ] | |- (continuity_pt _ _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt | try assumption ] | |- (derive_pt _ _ _ = _) => cut (aux pt <> 0); [ intro; intro_hyp_pt X1 pt | try assumption ] | _ => idtac end | cos => idtac | sin => idtac | cosh => idtac | sinh => idtac | exp => idtac | Rsqr => idtac | id => idtac | (fct_cte _) => idtac | (pow_fct _) => idtac | sqrt => match goal with | |- (derivable_pt _ _) => cut (0 < pt); [ intro | try assumption ] | |- (continuity_pt _ _) => cut (0 <= pt); [ intro | try assumption ] | |- (derive_pt _ _ _ = _) => cut (0 < pt); [ intro | try assumption ] | _ => idtac end | Rabs => match goal with | |- (derivable_pt _ _) => cut (pt <> 0); [ intro | try assumption ] | _ => idtac end | ?X1 => let p := constr:X1 in match goal with | _:(derivable_pt p pt) |- _ => idtac | |- (derivable_pt p pt) => idtac | |- (derivable_pt _ _) => cut (True -> derivable_pt p pt); [ intro HYPPD; cut (derivable_pt p pt); [ intro; clear HYPPD | apply HYPPD; clear HYPPD; trivial ] | idtac ] | _:(continuity_pt p pt) |- _ => idtac | |- (continuity_pt p pt) => idtac | |- (continuity_pt _ _) => cut (True -> continuity_pt p pt); [ intro HYPPD; cut (continuity_pt p pt); [ intro; clear HYPPD | apply HYPPD; clear HYPPD; trivial ] | idtac ] | |- (derive_pt _ _ _ = _) => cut (True -> derivable_pt p pt); [ intro HYPPD; cut (derivable_pt p pt); [ intro; clear HYPPD | apply HYPPD; clear HYPPD; trivial ] | idtac ] | _ => idtac end end. (**********) Ltac is_diff_pt := match goal with | |- (derivable_pt Rsqr _) => (* fonctions de base *) apply derivable_pt_Rsqr | |- (derivable_pt id ?X1) => apply (derivable_pt_id X1) | |- (derivable_pt (fct_cte _) _) => apply derivable_pt_const | |- (derivable_pt sin _) => apply derivable_pt_sin | |- (derivable_pt cos _) => apply derivable_pt_cos | |- (derivable_pt sinh _) => apply derivable_pt_sinh | |- (derivable_pt cosh _) => apply derivable_pt_cosh | |- (derivable_pt exp _) => apply derivable_pt_exp | |- (derivable_pt (pow_fct _) _) => unfold pow_fct in |- *; apply derivable_pt_pow | |- (derivable_pt sqrt ?X1) => apply (derivable_pt_sqrt X1); assumption || unfold plus_fct, minus_fct, opp_fct, mult_fct, div_fct, inv_fct, comp, id, fct_cte, pow_fct in |- * | |- (derivable_pt Rabs ?X1) => apply (Rderivable_pt_abs X1); assumption || unfold plus_fct, minus_fct, opp_fct, mult_fct, div_fct, inv_fct, comp, id, fct_cte, pow_fct in |- * (* regles de differentiabilite *) (* PLUS *) | |- (derivable_pt (?X1 + ?X2) ?X3) => apply (derivable_pt_plus X1 X2 X3); is_diff_pt (* MOINS *) | |- (derivable_pt (?X1 - ?X2) ?X3) => apply (derivable_pt_minus X1 X2 X3); is_diff_pt (* OPPOSE *) | |- (derivable_pt (- ?X1) ?X2) => apply (derivable_pt_opp X1 X2); is_diff_pt (* MULTIPLICATION PAR UN SCALAIRE *) | |- (derivable_pt (mult_real_fct ?X1 ?X2) ?X3) => apply (derivable_pt_scal X2 X1 X3); is_diff_pt (* MULTIPLICATION *) | |- (derivable_pt (?X1 * ?X2) ?X3) => apply (derivable_pt_mult X1 X2 X3); is_diff_pt (* DIVISION *) | |- (derivable_pt (?X1 / ?X2) ?X3) => apply (derivable_pt_div X1 X2 X3); [ is_diff_pt | is_diff_pt | try assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, comp, pow_fct, id, fct_cte in |- * ] | |- (derivable_pt (/ ?X1) ?X2) => (* INVERSION *) apply (derivable_pt_inv X1 X2); [ assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, comp, pow_fct, id, fct_cte in |- * | is_diff_pt ] | |- (derivable_pt (comp ?X1 ?X2) ?X3) => (* COMPOSITION *) apply (derivable_pt_comp X2 X1 X3); is_diff_pt | _:(derivable_pt ?X1 ?X2) |- (derivable_pt ?X1 ?X2) => assumption | _:(derivable ?X1) |- (derivable_pt ?X1 ?X2) => cut (derivable X1); [ intro HypDDPT; apply HypDDPT | assumption ] | |- (True -> derivable_pt _ _) => intro HypTruE; clear HypTruE; is_diff_pt | _ => try unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * end. (**********) Ltac is_diff_glob := match goal with | |- (derivable Rsqr) => (* fonctions de base *) apply derivable_Rsqr | |- (derivable id) => apply derivable_id | |- (derivable (fct_cte _)) => apply derivable_const | |- (derivable sin) => apply derivable_sin | |- (derivable cos) => apply derivable_cos | |- (derivable cosh) => apply derivable_cosh | |- (derivable sinh) => apply derivable_sinh | |- (derivable exp) => apply derivable_exp | |- (derivable (pow_fct _)) => unfold pow_fct in |- *; apply derivable_pow (* regles de differentiabilite *) (* PLUS *) | |- (derivable (?X1 + ?X2)) => apply (derivable_plus X1 X2); is_diff_glob (* MOINS *) | |- (derivable (?X1 - ?X2)) => apply (derivable_minus X1 X2); is_diff_glob (* OPPOSE *) | |- (derivable (- ?X1)) => apply (derivable_opp X1); is_diff_glob (* MULTIPLICATION PAR UN SCALAIRE *) | |- (derivable (mult_real_fct ?X1 ?X2)) => apply (derivable_scal X2 X1); is_diff_glob (* MULTIPLICATION *) | |- (derivable (?X1 * ?X2)) => apply (derivable_mult X1 X2); is_diff_glob (* DIVISION *) | |- (derivable (?X1 / ?X2)) => apply (derivable_div X1 X2); [ is_diff_glob | is_diff_glob | try assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * ] | |- (derivable (/ ?X1)) => (* INVERSION *) apply (derivable_inv X1); [ try assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * | is_diff_glob ] | |- (derivable (comp sqrt _)) => (* COMPOSITION *) unfold derivable in |- *; intro; try is_diff_pt | |- (derivable (comp Rabs _)) => unfold derivable in |- *; intro; try is_diff_pt | |- (derivable (comp ?X1 ?X2)) => apply (derivable_comp X2 X1); is_diff_glob | _:(derivable ?X1) |- (derivable ?X1) => assumption | |- (True -> derivable _) => intro HypTruE; clear HypTruE; is_diff_glob | _ => try unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * end. (**********) Ltac is_cont_pt := match goal with | |- (continuity_pt Rsqr _) => (* fonctions de base *) apply derivable_continuous_pt; apply derivable_pt_Rsqr | |- (continuity_pt id ?X1) => apply derivable_continuous_pt; apply (derivable_pt_id X1) | |- (continuity_pt (fct_cte _) _) => apply derivable_continuous_pt; apply derivable_pt_const | |- (continuity_pt sin _) => apply derivable_continuous_pt; apply derivable_pt_sin | |- (continuity_pt cos _) => apply derivable_continuous_pt; apply derivable_pt_cos | |- (continuity_pt sinh _) => apply derivable_continuous_pt; apply derivable_pt_sinh | |- (continuity_pt cosh _) => apply derivable_continuous_pt; apply derivable_pt_cosh | |- (continuity_pt exp _) => apply derivable_continuous_pt; apply derivable_pt_exp | |- (continuity_pt (pow_fct _) _) => unfold pow_fct in |- *; apply derivable_continuous_pt; apply derivable_pt_pow | |- (continuity_pt sqrt ?X1) => apply continuity_pt_sqrt; assumption || unfold plus_fct, minus_fct, opp_fct, mult_fct, div_fct, inv_fct, comp, id, fct_cte, pow_fct in |- * | |- (continuity_pt Rabs ?X1) => apply (Rcontinuity_abs X1) (* regles de differentiabilite *) (* PLUS *) | |- (continuity_pt (?X1 + ?X2) ?X3) => apply (continuity_pt_plus X1 X2 X3); is_cont_pt (* MOINS *) | |- (continuity_pt (?X1 - ?X2) ?X3) => apply (continuity_pt_minus X1 X2 X3); is_cont_pt (* OPPOSE *) | |- (continuity_pt (- ?X1) ?X2) => apply (continuity_pt_opp X1 X2); is_cont_pt (* MULTIPLICATION PAR UN SCALAIRE *) | |- (continuity_pt (mult_real_fct ?X1 ?X2) ?X3) => apply (continuity_pt_scal X2 X1 X3); is_cont_pt (* MULTIPLICATION *) | |- (continuity_pt (?X1 * ?X2) ?X3) => apply (continuity_pt_mult X1 X2 X3); is_cont_pt (* DIVISION *) | |- (continuity_pt (?X1 / ?X2) ?X3) => apply (continuity_pt_div X1 X2 X3); [ is_cont_pt | is_cont_pt | try assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, comp, id, fct_cte, pow_fct in |- * ] | |- (continuity_pt (/ ?X1) ?X2) => (* INVERSION *) apply (continuity_pt_inv X1 X2); [ is_cont_pt | assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, comp, id, fct_cte, pow_fct in |- * ] | |- (continuity_pt (comp ?X1 ?X2) ?X3) => (* COMPOSITION *) apply (continuity_pt_comp X2 X1 X3); is_cont_pt | _:(continuity_pt ?X1 ?X2) |- (continuity_pt ?X1 ?X2) => assumption | _:(continuity ?X1) |- (continuity_pt ?X1 ?X2) => cut (continuity X1); [ intro HypDDPT; apply HypDDPT | assumption ] | _:(derivable_pt ?X1 ?X2) |- (continuity_pt ?X1 ?X2) => apply derivable_continuous_pt; assumption | _:(derivable ?X1) |- (continuity_pt ?X1 ?X2) => cut (continuity X1); [ intro HypDDPT; apply HypDDPT | apply derivable_continuous; assumption ] | |- (True -> continuity_pt _ _) => intro HypTruE; clear HypTruE; is_cont_pt | _ => try unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * end. (**********) Ltac is_cont_glob := match goal with | |- (continuity Rsqr) => (* fonctions de base *) apply derivable_continuous; apply derivable_Rsqr | |- (continuity id) => apply derivable_continuous; apply derivable_id | |- (continuity (fct_cte _)) => apply derivable_continuous; apply derivable_const | |- (continuity sin) => apply derivable_continuous; apply derivable_sin | |- (continuity cos) => apply derivable_continuous; apply derivable_cos | |- (continuity exp) => apply derivable_continuous; apply derivable_exp | |- (continuity (pow_fct _)) => unfold pow_fct in |- *; apply derivable_continuous; apply derivable_pow | |- (continuity sinh) => apply derivable_continuous; apply derivable_sinh | |- (continuity cosh) => apply derivable_continuous; apply derivable_cosh | |- (continuity Rabs) => apply Rcontinuity_abs (* regles de continuite *) (* PLUS *) | |- (continuity (?X1 + ?X2)) => apply (continuity_plus X1 X2); try is_cont_glob || assumption (* MOINS *) | |- (continuity (?X1 - ?X2)) => apply (continuity_minus X1 X2); try is_cont_glob || assumption (* OPPOSE *) | |- (continuity (- ?X1)) => apply (continuity_opp X1); try is_cont_glob || assumption (* INVERSE *) | |- (continuity (/ ?X1)) => apply (continuity_inv X1); try is_cont_glob || assumption (* MULTIPLICATION PAR UN SCALAIRE *) | |- (continuity (mult_real_fct ?X1 ?X2)) => apply (continuity_scal X2 X1); try is_cont_glob || assumption (* MULTIPLICATION *) | |- (continuity (?X1 * ?X2)) => apply (continuity_mult X1 X2); try is_cont_glob || assumption (* DIVISION *) | |- (continuity (?X1 / ?X2)) => apply (continuity_div X1 X2); [ try is_cont_glob || assumption | try is_cont_glob || assumption | try assumption || unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, pow_fct in |- * ] | |- (continuity (comp sqrt _)) => (* COMPOSITION *) unfold continuity_pt in |- *; intro; try is_cont_pt | |- (continuity (comp ?X1 ?X2)) => apply (continuity_comp X2 X1); try is_cont_glob || assumption | _:(continuity ?X1) |- (continuity ?X1) => assumption | |- (True -> continuity _) => intro HypTruE; clear HypTruE; is_cont_glob | _:(derivable ?X1) |- (continuity ?X1) => apply derivable_continuous; assumption | _ => try unfold plus_fct, mult_fct, div_fct, minus_fct, opp_fct, inv_fct, id, fct_cte, comp, pow_fct in |- * end. (**********) Ltac rew_term trm := match constr:trm with | (?X1 + ?X2) => let p1 := rew_term X1 with p2 := rew_term X2 in match constr:p1 with | (fct_cte ?X3) => match constr:p2 with | (fct_cte ?X4) => constr:(fct_cte (X3 + X4)) | _ => constr:(p1 + p2)%F end | _ => constr:(p1 + p2)%F end | (?X1 - ?X2) => let p1 := rew_term X1 with p2 := rew_term X2 in match constr:p1 with | (fct_cte ?X3) => match constr:p2 with | (fct_cte ?X4) => constr:(fct_cte (X3 - X4)) | _ => constr:(p1 - p2)%F end | _ => constr:(p1 - p2)%F end | (?X1 / ?X2) => let p1 := rew_term X1 with p2 := rew_term X2 in match constr:p1 with | (fct_cte ?X3) => match constr:p2 with | (fct_cte ?X4) => constr:(fct_cte (X3 / X4)) | _ => constr:(p1 / p2)%F end | _ => match constr:p2 with | (fct_cte ?X4) => constr:(p1 * fct_cte (/ X4))%F | _ => constr:(p1 / p2)%F end end | (?X1 * / ?X2) => let p1 := rew_term X1 with p2 := rew_term X2 in match constr:p1 with | (fct_cte ?X3) => match constr:p2 with | (fct_cte ?X4) => constr:(fct_cte (X3 / X4)) | _ => constr:(p1 / p2)%F end | _ => match constr:p2 with | (fct_cte ?X4) => constr:(p1 * fct_cte (/ X4))%F | _ => constr:(p1 / p2)%F end end | (?X1 * ?X2) => let p1 := rew_term X1 with p2 := rew_term X2 in match constr:p1 with | (fct_cte ?X3) => match constr:p2 with | (fct_cte ?X4) => constr:(fct_cte (X3 * X4)) | _ => constr:(p1 * p2)%F end | _ => constr:(p1 * p2)%F end | (- ?X1) => let p := rew_term X1 in match constr:p with | (fct_cte ?X2) => constr:(fct_cte (- X2)) | _ => constr:(- p)%F end | (/ ?X1) => let p := rew_term X1 in match constr:p with | (fct_cte ?X2) => constr:(fct_cte (/ X2)) | _ => constr:(/ p)%F end | (?X1 AppVar) => constr:X1 | (?X1 ?X2) => let p := rew_term X2 in match constr:p with | (fct_cte ?X3) => constr:(fct_cte (X1 X3)) | _ => constr:(comp X1 p) end | AppVar => constr:id | (AppVar ^ ?X1) => constr:(pow_fct X1) | (?X1 ^ ?X2) => let p := rew_term X1 in match constr:p with | (fct_cte ?X3) => constr:(fct_cte (pow_fct X2 X3)) | _ => constr:(comp (pow_fct X2) p) end | ?X1 => constr:(fct_cte X1) end. (**********) Ltac deriv_proof trm pt := match constr:trm with | (?X1 + ?X2)%F => let p1 := deriv_proof X1 pt with p2 := deriv_proof X2 pt in constr:(derivable_pt_plus X1 X2 pt p1 p2) | (?X1 - ?X2)%F => let p1 := deriv_proof X1 pt with p2 := deriv_proof X2 pt in constr:(derivable_pt_minus X1 X2 pt p1 p2) | (?X1 * ?X2)%F => let p1 := deriv_proof X1 pt with p2 := deriv_proof X2 pt in constr:(derivable_pt_mult X1 X2 pt p1 p2) | (?X1 / ?X2)%F => match goal with | id:(?X2 pt <> 0) |- _ => let p1 := deriv_proof X1 pt with p2 := deriv_proof X2 pt in constr:(derivable_pt_div X1 X2 pt p1 p2 id) | _ => constr:False end | (/ ?X1)%F => match goal with | id:(?X1 pt <> 0) |- _ => let p1 := deriv_proof X1 pt in constr:(derivable_pt_inv X1 pt p1 id) | _ => constr:False end | (comp ?X1 ?X2) => let pt_f1 := eval cbv beta in (X2 pt) in let p1 := deriv_proof X1 pt_f1 with p2 := deriv_proof X2 pt in constr:(derivable_pt_comp X2 X1 pt p2 p1) | (- ?X1)%F => let p1 := deriv_proof X1 pt in constr:(derivable_pt_opp X1 pt p1) | sin => constr:(derivable_pt_sin pt) | cos => constr:(derivable_pt_cos pt) | sinh => constr:(derivable_pt_sinh pt) | cosh => constr:(derivable_pt_cosh pt) | exp => constr:(derivable_pt_exp pt) | id => constr:(derivable_pt_id pt) | Rsqr => constr:(derivable_pt_Rsqr pt) | sqrt => match goal with | id:(0 < pt) |- _ => constr:(derivable_pt_sqrt pt id) | _ => constr:False end | (fct_cte ?X1) => constr:(derivable_pt_const X1 pt) | ?X1 => let aux := constr:X1 in match goal with | id:(derivable_pt aux pt) |- _ => constr:id | id:(derivable aux) |- _ => constr:(id pt) | _ => constr:False end end. (**********) Ltac simplify_derive trm pt := match constr:trm with | (?X1 + ?X2)%F => try rewrite derive_pt_plus; simplify_derive X1 pt; simplify_derive X2 pt | (?X1 - ?X2)%F => try rewrite derive_pt_minus; simplify_derive X1 pt; simplify_derive X2 pt | (?X1 * ?X2)%F => try rewrite derive_pt_mult; simplify_derive X1 pt; simplify_derive X2 pt | (?X1 / ?X2)%F => try rewrite derive_pt_div; simplify_derive X1 pt; simplify_derive X2 pt | (comp ?X1 ?X2) => let pt_f1 := eval cbv beta in (X2 pt) in (try rewrite derive_pt_comp; simplify_derive X1 pt_f1; simplify_derive X2 pt) | (- ?X1)%F => try rewrite derive_pt_opp; simplify_derive X1 pt | (/ ?X1)%F => try rewrite derive_pt_inv; simplify_derive X1 pt | (fct_cte ?X1) => try rewrite derive_pt_const | id => try rewrite derive_pt_id | sin => try rewrite derive_pt_sin | cos => try rewrite derive_pt_cos | sinh => try rewrite derive_pt_sinh | cosh => try rewrite derive_pt_cosh | exp => try rewrite derive_pt_exp | Rsqr => try rewrite derive_pt_Rsqr | sqrt => try rewrite derive_pt_sqrt | ?X1 => let aux := constr:X1 in match goal with | id:(derive_pt aux pt ?X2 = _),H:(derivable aux) |- _ => try replace (derive_pt aux pt (H pt)) with (derive_pt aux pt X2); [ rewrite id | apply pr_nu ] | id:(derive_pt aux pt ?X2 = _),H:(derivable_pt aux pt) |- _ => try replace (derive_pt aux pt H) with (derive_pt aux pt X2); [ rewrite id | apply pr_nu ] | _ => idtac end | _ => idtac end. (**********) Ltac reg := match goal with | |- (derivable_pt ?X1 ?X2) => let trm := eval cbv beta in (X1 AppVar) in let aux := rew_term trm in (intro_hyp_pt aux X2; try (change (derivable_pt aux X2) in |- *; is_diff_pt) || is_diff_pt) | |- (derivable ?X1) => let trm := eval cbv beta in (X1 AppVar) in let aux := rew_term trm in (intro_hyp_glob aux; try (change (derivable aux) in |- *; is_diff_glob) || is_diff_glob) | |- (continuity ?X1) => let trm := eval cbv beta in (X1 AppVar) in let aux := rew_term trm in (intro_hyp_glob aux; try (change (continuity aux) in |- *; is_cont_glob) || is_cont_glob) | |- (continuity_pt ?X1 ?X2) => let trm := eval cbv beta in (X1 AppVar) in let aux := rew_term trm in (intro_hyp_pt aux X2; try (change (continuity_pt aux X2) in |- *; is_cont_pt) || is_cont_pt) | |- (derive_pt ?X1 ?X2 ?X3 = ?X4) => let trm := eval cbv beta in (X1 AppVar) in let aux := rew_term trm in intro_hyp_pt aux X2; (let aux2 := deriv_proof aux X2 in try (replace (derive_pt X1 X2 X3) with (derive_pt aux X2 aux2); [ simplify_derive aux X2; try unfold plus_fct, minus_fct, mult_fct, div_fct, id, fct_cte, inv_fct, opp_fct in |- *; ring || ring_simplify | try apply pr_nu ]) || is_diff_pt) end. coq-8.4pl2/theories/Reals/Rpow_def.v0000640000175000001440000000122112010532755016447 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R1 | S n => Rmult r (pow r n) end. coq-8.4pl2/theories/Reals/PSeries_reg.v0000640000175000001440000002202212010532755017113 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R) (f:R -> R) (x:R) (r:posreal) : Prop := forall eps:R, 0 < eps -> exists N : nat, (forall (n:nat) (y:R), (N <= n)%nat -> Boule x r y -> Rabs (f y - fn n y) < eps). (** Normal convergence *) Definition CVN_r (fn:nat -> R -> R) (r:posreal) : Type := { An:nat -> R & { l:R | Un_cv (fun n:nat => sum_f_R0 (fun k:nat => Rabs (An k)) n) l /\ (forall (n:nat) (y:R), Boule 0 r y -> Rabs (fn n y) <= An n) } }. Definition CVN_R (fn:nat -> R -> R) : Type := forall r:posreal, CVN_r fn r. Definition SFL (fn:nat -> R -> R) (cv:forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }) (y:R) : R := let (a,_) := cv y in a. (** In a complete space, normal convergence implies uniform convergence *) Lemma CVN_CVU : forall (fn:nat -> R -> R) (cv:forall x:R, {l:R | Un_cv (fun N:nat => SP fn N x) l }) (r:posreal), CVN_r fn r -> CVU (fun n:nat => SP fn n) (SFL fn cv) 0 r. Proof. intros; unfold CVU; intros. unfold CVN_r in X. elim X; intros An X0. elim X0; intros s H0. elim H0; intros. cut (Un_cv (fun n:nat => sum_f_R0 (fun k:nat => Rabs (An k)) n - s) 0). intro; unfold Un_cv in H3. elim (H3 eps H); intros N0 H4. exists N0; intros. apply Rle_lt_trans with (Rabs (sum_f_R0 (fun k:nat => Rabs (An k)) n - s)). rewrite <- (Rabs_Ropp (sum_f_R0 (fun k:nat => Rabs (An k)) n - s)); rewrite Ropp_minus_distr'; rewrite (Rabs_right (s - sum_f_R0 (fun k:nat => Rabs (An k)) n)). eapply sum_maj1. unfold SFL; case (cv y); intro. trivial. apply H1. intro; elim H0; intros. rewrite (Rabs_right (An n0)). apply H8; apply H6. apply Rle_ge; apply Rle_trans with (Rabs (fn n0 y)). apply Rabs_pos. apply H8; apply H6. apply Rle_ge; apply Rplus_le_reg_l with (sum_f_R0 (fun k:nat => Rabs (An k)) n). rewrite Rplus_0_r; unfold Rminus; rewrite (Rplus_comm s); rewrite <- Rplus_assoc; rewrite Rplus_opp_r; rewrite Rplus_0_l; apply sum_incr. apply H1. intro; apply Rabs_pos. unfold R_dist in H4; unfold Rminus in H4; rewrite Ropp_0 in H4. assert (H7 := H4 n H5). rewrite Rplus_0_r in H7; apply H7. unfold Un_cv in H1; unfold Un_cv; intros. elim (H1 _ H3); intros. exists x; intros. unfold R_dist; unfold R_dist in H4. rewrite Rminus_0_r; apply H4; assumption. Qed. (** Each limit of a sequence of functions which converges uniformly is continue *) Lemma CVU_continuity : forall (fn:nat -> R -> R) (f:R -> R) (x:R) (r:posreal), CVU fn f x r -> (forall (n:nat) (y:R), Boule x r y -> continuity_pt (fn n) y) -> forall y:R, Boule x r y -> continuity_pt f y. Proof. intros; unfold continuity_pt; unfold continue_in; unfold limit1_in; unfold limit_in; simpl; unfold R_dist; intros. unfold CVU in H. cut (0 < eps / 3); [ intro | unfold Rdiv; apply Rmult_lt_0_compat; [ assumption | apply Rinv_0_lt_compat; prove_sup0 ] ]. elim (H _ H3); intros N0 H4. assert (H5 := H0 N0 y H1). cut (exists del : posreal, (forall h:R, Rabs h < del -> Boule x r (y + h))). intro. elim H6; intros del1 H7. unfold continuity_pt in H5; unfold continue_in in H5; unfold limit1_in in H5; unfold limit_in in H5; simpl in H5; unfold R_dist in H5. elim (H5 _ H3); intros del2 H8. set (del := Rmin del1 del2). exists del; intros. split. unfold del; unfold Rmin; case (Rle_dec del1 del2); intro. apply (cond_pos del1). elim H8; intros; assumption. intros; apply Rle_lt_trans with (Rabs (f x0 - fn N0 x0) + Rabs (fn N0 x0 - f y)). replace (f x0 - f y) with (f x0 - fn N0 x0 + (fn N0 x0 - f y)); [ apply Rabs_triang | ring ]. apply Rle_lt_trans with (Rabs (f x0 - fn N0 x0) + Rabs (fn N0 x0 - fn N0 y) + Rabs (fn N0 y - f y)). rewrite Rplus_assoc; apply Rplus_le_compat_l. replace (fn N0 x0 - f y) with (fn N0 x0 - fn N0 y + (fn N0 y - f y)); [ apply Rabs_triang | ring ]. replace eps with (eps / 3 + eps / 3 + eps / 3). repeat apply Rplus_lt_compat. apply H4. apply le_n. replace x0 with (y + (x0 - y)); [ idtac | ring ]; apply H7. elim H9; intros. apply Rlt_le_trans with del. assumption. unfold del; apply Rmin_l. elim H8; intros. apply H11. split. elim H9; intros; assumption. elim H9; intros; apply Rlt_le_trans with del. assumption. unfold del; apply Rmin_r. rewrite <- Rabs_Ropp; rewrite Ropp_minus_distr'; apply H4. apply le_n. assumption. apply Rmult_eq_reg_l with 3. do 2 rewrite Rmult_plus_distr_l; unfold Rdiv; rewrite <- Rmult_assoc; rewrite Rinv_r_simpl_m. ring. discrR. discrR. cut (0 < r - Rabs (x - y)). intro; exists (mkposreal _ H6). simpl; intros. unfold Boule; replace (y + h - x) with (h + (y - x)); [ idtac | ring ]; apply Rle_lt_trans with (Rabs h + Rabs (y - x)). apply Rabs_triang. apply Rplus_lt_reg_r with (- Rabs (x - y)). rewrite <- (Rabs_Ropp (y - x)); rewrite Ropp_minus_distr'. replace (- Rabs (x - y) + r) with (r - Rabs (x - y)). replace (- Rabs (x - y) + (Rabs h + Rabs (x - y))) with (Rabs h). apply H7. ring. ring. unfold Boule in H1; rewrite <- (Rabs_Ropp (x - y)); rewrite Ropp_minus_distr'; apply Rplus_lt_reg_r with (Rabs (y - x)). rewrite Rplus_0_r; replace (Rabs (y - x) + (r - Rabs (y - x))) with (pos r); [ apply H1 | ring ]. Qed. (**********) Lemma continuity_pt_finite_SF : forall (fn:nat -> R -> R) (N:nat) (x:R), (forall n:nat, (n <= N)%nat -> continuity_pt (fn n) x) -> continuity_pt (fun y:R => sum_f_R0 (fun k:nat => fn k y) N) x. Proof. intros; induction N as [| N HrecN]. simpl; apply (H 0%nat); apply le_n. simpl; replace (fun y:R => sum_f_R0 (fun k:nat => fn k y) N + fn (S N) y) with ((fun y:R => sum_f_R0 (fun k:nat => fn k y) N) + (fun y:R => fn (S N) y))%F; [ idtac | reflexivity ]. apply continuity_pt_plus. apply HrecN. intros; apply H. apply le_trans with N; [ assumption | apply le_n_Sn ]. apply (H (S N)); apply le_n. Qed. (** Continuity and normal convergence *) Lemma SFL_continuity_pt : forall (fn:nat -> R -> R) (cv:forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }) (r:posreal), CVN_r fn r -> (forall (n:nat) (y:R), Boule 0 r y -> continuity_pt (fn n) y) -> forall y:R, Boule 0 r y -> continuity_pt (SFL fn cv) y. Proof. intros; eapply CVU_continuity. apply CVN_CVU. apply X. intros; unfold SP; apply continuity_pt_finite_SF. intros; apply H. apply H1. apply H0. Qed. Lemma SFL_continuity : forall (fn:nat -> R -> R) (cv:forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }), CVN_R fn -> (forall n:nat, continuity (fn n)) -> continuity (SFL fn cv). Proof. intros; unfold continuity; intro. cut (0 < Rabs x + 1); [ intro | apply Rplus_le_lt_0_compat; [ apply Rabs_pos | apply Rlt_0_1 ] ]. cut (Boule 0 (mkposreal _ H0) x). intro; eapply SFL_continuity_pt with (mkposreal _ H0). apply X. intros; apply (H n y). apply H1. unfold Boule; simpl; rewrite Rminus_0_r; pattern (Rabs x) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1. Qed. (** As R is complete, normal convergence implies that (fn) is simply-uniformly convergent *) Lemma CVN_R_CVS : forall fn:nat -> R -> R, CVN_R fn -> forall x:R, { l:R | Un_cv (fun N:nat => SP fn N x) l }. Proof. intros; apply R_complete. unfold SP; set (An := fun N:nat => fn N x). change (Cauchy_crit_series An). apply cauchy_abs. unfold Cauchy_crit_series; apply CV_Cauchy. unfold CVN_R in X; cut (0 < Rabs x + 1). intro; assert (H0 := X (mkposreal _ H)). unfold CVN_r in H0; elim H0; intros Bn H1. elim H1; intros l H2. elim H2; intros. apply Rseries_CV_comp with Bn. intro; split. apply Rabs_pos. unfold An; apply H4; unfold Boule; simpl; rewrite Rminus_0_r. pattern (Rabs x) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1. exists l. cut (forall n:nat, 0 <= Bn n). intro; unfold Un_cv in H3; unfold Un_cv; intros. elim (H3 _ H6); intros. exists x0; intros. replace (sum_f_R0 Bn n) with (sum_f_R0 (fun k:nat => Rabs (Bn k)) n). apply H7; assumption. apply sum_eq; intros; apply Rabs_right; apply Rle_ge; apply H5. intro; apply Rle_trans with (Rabs (An n)). apply Rabs_pos. unfold An; apply H4; unfold Boule; simpl; rewrite Rminus_0_r; pattern (Rabs x) at 1; rewrite <- Rplus_0_r; apply Rplus_lt_compat_l; apply Rlt_0_1. apply Rplus_le_lt_0_compat; [ apply Rabs_pos | apply Rlt_0_1 ]. Qed. coq-8.4pl2/theories/Reals/R_sqrt.v0000640000175000001440000003716312010532755016172 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | right a => Rsqrt (mknonnegreal x (Rge_le _ _ a)) end. Lemma sqrt_pos : forall x : R, 0 <= sqrt x. Proof. intros x. unfold sqrt. destruct (Rcase_abs x) as [H|H]. apply Rle_refl. apply Rsqrt_positivity. Qed. Lemma sqrt_positivity : forall x:R, 0 <= x -> 0 <= sqrt x. Proof. intros x _. apply sqrt_pos. Qed. Lemma sqrt_sqrt : forall x:R, 0 <= x -> sqrt x * sqrt x = x. Proof. intros. unfold sqrt. case (Rcase_abs x); intro. elim (Rlt_irrefl _ (Rlt_le_trans _ _ _ r H)). rewrite Rsqrt_Rsqrt; reflexivity. Qed. Lemma sqrt_0 : sqrt 0 = 0. Proof. apply Rsqr_eq_0; unfold Rsqr; apply sqrt_sqrt; right; reflexivity. Qed. Lemma sqrt_1 : sqrt 1 = 1. Proof. apply (Rsqr_inj (sqrt 1) 1); [ apply sqrt_positivity; left | left | unfold Rsqr; rewrite sqrt_sqrt; [ ring | left ] ]; apply Rlt_0_1. Qed. Lemma sqrt_eq_0 : forall x:R, 0 <= x -> sqrt x = 0 -> x = 0. Proof. intros; cut (Rsqr (sqrt x) = 0). intro; unfold Rsqr in H1; rewrite sqrt_sqrt in H1; assumption. rewrite H0; apply Rsqr_0. Qed. Lemma sqrt_lem_0 : forall x y:R, 0 <= x -> 0 <= y -> sqrt x = y -> y * y = x. Proof. intros; rewrite <- H1; apply (sqrt_sqrt x H). Qed. Lemma sqrt_lem_1 : forall x y:R, 0 <= x -> 0 <= y -> y * y = x -> sqrt x = y. Proof. intros; apply Rsqr_inj; [ apply (sqrt_positivity x H) | assumption | unfold Rsqr; rewrite H1; apply (sqrt_sqrt x H) ]. Qed. Lemma sqrt_def : forall x:R, 0 <= x -> sqrt x * sqrt x = x. Proof. intros; apply (sqrt_sqrt x H). Qed. Lemma sqrt_square : forall x:R, 0 <= x -> sqrt (x * x) = x. Proof. intros; apply (Rsqr_inj (sqrt (Rsqr x)) x (sqrt_positivity (Rsqr x) (Rle_0_sqr x)) H); unfold Rsqr; apply (sqrt_sqrt (Rsqr x) (Rle_0_sqr x)). Qed. Lemma sqrt_Rsqr : forall x:R, 0 <= x -> sqrt (Rsqr x) = x. Proof. intros; unfold Rsqr; apply sqrt_square; assumption. Qed. Lemma sqrt_Rsqr_abs : forall x:R, sqrt (Rsqr x) = Rabs x. Proof. intro x; rewrite Rsqr_abs; apply sqrt_Rsqr; apply Rabs_pos. Qed. Lemma Rsqr_sqrt : forall x:R, 0 <= x -> Rsqr (sqrt x) = x. Proof. intros x H1; unfold Rsqr; apply (sqrt_sqrt x H1). Qed. Lemma sqrt_mult_alt : forall x y : R, 0 <= x -> sqrt (x * y) = sqrt x * sqrt y. Proof. intros x y Hx. unfold sqrt at 3. destruct (Rcase_abs y) as [Hy|Hy]. rewrite Rmult_0_r. destruct Hx as [Hx'|Hx']. unfold sqrt. destruct (Rcase_abs (x * y)) as [Hxy|Hxy]. apply eq_refl. elim Rge_not_lt with (1 := Hxy). rewrite <- (Rmult_0_r x). now apply Rmult_lt_compat_l. rewrite <- Hx', Rmult_0_l. exact sqrt_0. apply Rsqr_inj. apply sqrt_pos. apply Rmult_le_pos. apply sqrt_pos. apply Rsqrt_positivity. rewrite Rsqr_mult, 2!Rsqr_sqrt. unfold Rsqr. now rewrite Rsqrt_Rsqrt. exact Hx. apply Rmult_le_pos. exact Hx. now apply Rge_le. Qed. Lemma sqrt_mult : forall x y:R, 0 <= x -> 0 <= y -> sqrt (x * y) = sqrt x * sqrt y. Proof. intros x y Hx _. now apply sqrt_mult_alt. Qed. Lemma sqrt_lt_R0 : forall x:R, 0 < x -> 0 < sqrt x. Proof. intros x H1; apply Rsqr_incrst_0; [ rewrite Rsqr_0; rewrite Rsqr_sqrt; [ assumption | left; assumption ] | right; reflexivity | apply (sqrt_positivity x (Rlt_le 0 x H1)) ]. Qed. Lemma sqrt_div_alt : forall x y : R, 0 < y -> sqrt (x / y) = sqrt x / sqrt y. Proof. intros x y Hy. unfold sqrt at 2. destruct (Rcase_abs x) as [Hx|Hx]. unfold Rdiv. rewrite Rmult_0_l. unfold sqrt. destruct (Rcase_abs (x * / y)) as [Hxy|Hxy]. apply eq_refl. elim Rge_not_lt with (1 := Hxy). apply Rmult_lt_reg_r with y. exact Hy. rewrite Rmult_assoc, Rinv_l, Rmult_1_r, Rmult_0_l. exact Hx. now apply Rgt_not_eq. set (Hx' := Rge_le x 0 Hx). clearbody Hx'. clear Hx. apply Rsqr_inj. apply sqrt_pos. apply Fourier_util.Rle_mult_inv_pos. apply Rsqrt_positivity. now apply sqrt_lt_R0. rewrite Rsqr_div, 2!Rsqr_sqrt. unfold Rsqr. now rewrite Rsqrt_Rsqrt. now apply Rlt_le. now apply Fourier_util.Rle_mult_inv_pos. apply Rgt_not_eq. now apply sqrt_lt_R0. Qed. Lemma sqrt_div : forall x y:R, 0 <= x -> 0 < y -> sqrt (x / y) = sqrt x / sqrt y. Proof. intros x y _ H. now apply sqrt_div_alt. Qed. Lemma sqrt_lt_0_alt : forall x y : R, sqrt x < sqrt y -> x < y. Proof. intros x y. unfold sqrt at 2. destruct (Rcase_abs y) as [Hy|Hy]. intros Hx. elim Rlt_not_le with (1 := Hx). apply sqrt_pos. set (Hy' := Rge_le y 0 Hy). clearbody Hy'. clear Hy. unfold sqrt. destruct (Rcase_abs x) as [Hx|Hx]. intros _. now apply Rlt_le_trans with R0. intros Hxy. apply Rsqr_incrst_1 in Hxy ; try apply Rsqrt_positivity. unfold Rsqr in Hxy. now rewrite 2!Rsqrt_Rsqrt in Hxy. Qed. Lemma sqrt_lt_0 : forall x y:R, 0 <= x -> 0 <= y -> sqrt x < sqrt y -> x < y. Proof. intros x y _ _. apply sqrt_lt_0_alt. Qed. Lemma sqrt_lt_1_alt : forall x y : R, 0 <= x < y -> sqrt x < sqrt y. Proof. intros x y (Hx, Hxy). apply Rsqr_incrst_0 ; try apply sqrt_pos. rewrite 2!Rsqr_sqrt. exact Hxy. apply Rlt_le. now apply Rle_lt_trans with x. exact Hx. Qed. Lemma sqrt_lt_1 : forall x y:R, 0 <= x -> 0 <= y -> x < y -> sqrt x < sqrt y. Proof. intros x y Hx _ Hxy. apply sqrt_lt_1_alt. now split. Qed. Lemma sqrt_le_0 : forall x y:R, 0 <= x -> 0 <= y -> sqrt x <= sqrt y -> x <= y. Proof. intros x y H1 H2 H3; generalize (Rsqr_incr_1 (sqrt x) (sqrt y) H3 (sqrt_positivity x H1) (sqrt_positivity y H2)); intro H4; rewrite (Rsqr_sqrt x H1) in H4; rewrite (Rsqr_sqrt y H2) in H4; assumption. Qed. Lemma sqrt_le_1_alt : forall x y : R, x <= y -> sqrt x <= sqrt y. Proof. intros x y [Hxy|Hxy]. destruct (Rle_or_lt 0 x) as [Hx|Hx]. apply Rlt_le. apply sqrt_lt_1_alt. now split. unfold sqrt at 1. destruct (Rcase_abs x) as [Hx'|Hx']. apply sqrt_pos. now elim Rge_not_lt with (1 := Hx'). rewrite Hxy. apply Rle_refl. Qed. Lemma sqrt_le_1 : forall x y:R, 0 <= x -> 0 <= y -> x <= y -> sqrt x <= sqrt y. Proof. intros x y _ _ Hxy. now apply sqrt_le_1_alt. Qed. Lemma sqrt_inj : forall x y:R, 0 <= x -> 0 <= y -> sqrt x = sqrt y -> x = y. Proof. intros; cut (Rsqr (sqrt x) = Rsqr (sqrt y)). intro; rewrite (Rsqr_sqrt x H) in H2; rewrite (Rsqr_sqrt y H0) in H2; assumption. rewrite H1; reflexivity. Qed. Lemma sqrt_less_alt : forall x : R, 1 < x -> sqrt x < x. Proof. intros x Hx. assert (Hx1 := Rle_lt_trans _ _ _ Rle_0_1 Hx). assert (Hx2 := Rlt_le _ _ Hx1). apply Rsqr_incrst_0 ; trivial. rewrite Rsqr_sqrt ; trivial. rewrite <- (Rmult_1_l x) at 1. now apply Rmult_lt_compat_r. apply sqrt_pos. Qed. Lemma sqrt_less : forall x:R, 0 <= x -> 1 < x -> sqrt x < x. Proof. intros x _. apply sqrt_less_alt. Qed. Lemma sqrt_more : forall x:R, 0 < x -> x < 1 -> x < sqrt x. Proof. intros x H1 H2; generalize (sqrt_lt_1 x 1 (Rlt_le 0 x H1) (Rlt_le 0 1 Rlt_0_1) H2); intro H3; rewrite sqrt_1 in H3; generalize (Rmult_ne (sqrt x)); intro H4; elim H4; intros H5 H6; rewrite <- H5; pattern x at 1; rewrite <- (sqrt_def x (Rlt_le 0 x H1)); apply (Rmult_lt_compat_l (sqrt x) (sqrt x) 1 (sqrt_lt_R0 x H1) H3). Qed. Lemma sqrt_cauchy : forall a b c d:R, a * c + b * d <= sqrt (Rsqr a + Rsqr b) * sqrt (Rsqr c + Rsqr d). Proof. intros a b c d; apply Rsqr_incr_0_var; [ rewrite Rsqr_mult; repeat rewrite Rsqr_sqrt; unfold Rsqr; [ replace ((a * c + b * d) * (a * c + b * d)) with (a * a * c * c + b * b * d * d + 2 * a * b * c * d); [ replace ((a * a + b * b) * (c * c + d * d)) with (a * a * c * c + b * b * d * d + (a * a * d * d + b * b * c * c)); [ apply Rplus_le_compat_l; replace (a * a * d * d + b * b * c * c) with (2 * a * b * c * d + (a * a * d * d + b * b * c * c - 2 * a * b * c * d)); [ pattern (2 * a * b * c * d) at 1; rewrite <- Rplus_0_r; apply Rplus_le_compat_l; replace (a * a * d * d + b * b * c * c - 2 * a * b * c * d) with (Rsqr (a * d - b * c)); [ apply Rle_0_sqr | unfold Rsqr; ring ] | ring ] | ring ] | ring ] | apply (Rplus_le_le_0_compat (Rsqr c) (Rsqr d) (Rle_0_sqr c) (Rle_0_sqr d)) | apply (Rplus_le_le_0_compat (Rsqr a) (Rsqr b) (Rle_0_sqr a) (Rle_0_sqr b)) ] | apply Rmult_le_pos; apply sqrt_positivity; apply Rplus_le_le_0_compat; apply Rle_0_sqr ]. Qed. (************************************************************) (** * Resolution of [a*X^2+b*X+c=0] *) (************************************************************) Definition Delta (a:nonzeroreal) (b c:R) : R := Rsqr b - 4 * a * c. Definition Delta_is_pos (a:nonzeroreal) (b c:R) : Prop := 0 <= Delta a b c. Definition sol_x1 (a:nonzeroreal) (b c:R) : R := (- b + sqrt (Delta a b c)) / (2 * a). Definition sol_x2 (a:nonzeroreal) (b c:R) : R := (- b - sqrt (Delta a b c)) / (2 * a). Lemma Rsqr_sol_eq_0_1 : forall (a:nonzeroreal) (b c x:R), Delta_is_pos a b c -> x = sol_x1 a b c \/ x = sol_x2 a b c -> a * Rsqr x + b * x + c = 0. Proof. intros; elim H0; intro. unfold sol_x1 in H1; unfold Delta in H1; rewrite H1; unfold Rdiv; repeat rewrite Rsqr_mult; rewrite Rsqr_plus; rewrite <- Rsqr_neg; rewrite Rsqr_sqrt. rewrite Rsqr_inv. unfold Rsqr; repeat rewrite Rinv_mult_distr. repeat rewrite Rmult_assoc; rewrite (Rmult_comm a). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite Rmult_plus_distr_r. repeat rewrite Rmult_assoc. pattern 2 at 2; rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite (Rmult_plus_distr_r (- b) (sqrt (b * b - 2 * (2 * (a * c)))) (/ 2 * / a)) . rewrite Rmult_plus_distr_l; repeat rewrite Rplus_assoc. replace (- b * (sqrt (b * b - 2 * (2 * (a * c))) * (/ 2 * / a)) + (b * (- b * (/ 2 * / a)) + (b * (sqrt (b * b - 2 * (2 * (a * c))) * (/ 2 * / a)) + c))) with (b * (- b * (/ 2 * / a)) + c). unfold Rminus; repeat rewrite <- Rplus_assoc. replace (b * b + b * b) with (2 * (b * b)). rewrite Rmult_plus_distr_r; repeat rewrite Rmult_assoc. rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r. rewrite Ropp_mult_distr_l_reverse; repeat rewrite Rmult_assoc; rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite (Rmult_comm (/ 2)); repeat rewrite Rmult_assoc; rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; repeat rewrite Rmult_assoc. rewrite (Rmult_comm a); rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite <- Rmult_opp_opp. ring. apply (cond_nonzero a). discrR. discrR. discrR. ring. ring. discrR. apply (cond_nonzero a). discrR. apply (cond_nonzero a). apply prod_neq_R0; [ discrR | apply (cond_nonzero a) ]. apply prod_neq_R0; [ discrR | apply (cond_nonzero a) ]. apply prod_neq_R0; [ discrR | apply (cond_nonzero a) ]. assumption. unfold sol_x2 in H1; unfold Delta in H1; rewrite H1; unfold Rdiv; repeat rewrite Rsqr_mult; rewrite Rsqr_minus; rewrite <- Rsqr_neg; rewrite Rsqr_sqrt. rewrite Rsqr_inv. unfold Rsqr; repeat rewrite Rinv_mult_distr; repeat rewrite Rmult_assoc. rewrite (Rmult_comm a); repeat rewrite Rmult_assoc. rewrite <- Rinv_l_sym. rewrite Rmult_1_r; unfold Rminus; rewrite Rmult_plus_distr_r. rewrite Ropp_mult_distr_l_reverse; repeat rewrite Rmult_assoc; pattern 2 at 2; rewrite (Rmult_comm 2). repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite (Rmult_plus_distr_r (- b) (- sqrt (b * b + - (2 * (2 * (a * c))))) (/ 2 * / a)). rewrite Rmult_plus_distr_l; repeat rewrite Rplus_assoc. rewrite Ropp_mult_distr_l_reverse; rewrite Ropp_involutive. replace (b * (sqrt (b * b + - (2 * (2 * (a * c)))) * (/ 2 * / a)) + (b * (- b * (/ 2 * / a)) + (b * (- sqrt (b * b + - (2 * (2 * (a * c)))) * (/ 2 * / a)) + c))) with (b * (- b * (/ 2 * / a)) + c). repeat rewrite <- Rplus_assoc; replace (b * b + b * b) with (2 * (b * b)). rewrite Rmult_plus_distr_r; repeat rewrite Rmult_assoc; rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Ropp_mult_distr_l_reverse; repeat rewrite Rmult_assoc. rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite (Rmult_comm (/ 2)); repeat rewrite Rmult_assoc. rewrite (Rmult_comm 2); repeat rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; repeat rewrite Rmult_assoc; rewrite (Rmult_comm a); rewrite Rmult_assoc; rewrite <- Rinv_l_sym. rewrite Rmult_1_r; rewrite <- Rmult_opp_opp; ring. apply (cond_nonzero a). discrR. discrR. discrR. ring. ring. discrR. apply (cond_nonzero a). discrR. discrR. apply (cond_nonzero a). apply prod_neq_R0; discrR || apply (cond_nonzero a). apply prod_neq_R0; discrR || apply (cond_nonzero a). apply prod_neq_R0; discrR || apply (cond_nonzero a). assumption. Qed. Lemma Rsqr_sol_eq_0_0 : forall (a:nonzeroreal) (b c x:R), Delta_is_pos a b c -> a * Rsqr x + b * x + c = 0 -> x = sol_x1 a b c \/ x = sol_x2 a b c. Proof. intros; rewrite (canonical_Rsqr a b c x) in H0; rewrite Rplus_comm in H0; generalize (Rplus_opp_r_uniq ((4 * a * c - Rsqr b) / (4 * a)) (a * Rsqr (x + b / (2 * a))) H0); cut (Rsqr b - 4 * a * c = Delta a b c). intro; replace (- ((4 * a * c - Rsqr b) / (4 * a))) with ((Rsqr b - 4 * a * c) / (4 * a)). rewrite H1; intro; generalize (Rmult_eq_compat_l (/ a) (a * Rsqr (x + b / (2 * a))) (Delta a b c / (4 * a)) H2); replace (/ a * (a * Rsqr (x + b / (2 * a)))) with (Rsqr (x + b / (2 * a))). replace (/ a * (Delta a b c / (4 * a))) with (Rsqr (sqrt (Delta a b c) / (2 * a))). intro; generalize (Rsqr_eq (x + b / (2 * a)) (sqrt (Delta a b c) / (2 * a)) H3); intro; elim H4; intro. left; unfold sol_x1; generalize (Rplus_eq_compat_l (- (b / (2 * a))) (x + b / (2 * a)) (sqrt (Delta a b c) / (2 * a)) H5); replace (- (b / (2 * a)) + (x + b / (2 * a))) with x. intro; rewrite H6; unfold Rdiv; ring. ring. right; unfold sol_x2; generalize (Rplus_eq_compat_l (- (b / (2 * a))) (x + b / (2 * a)) (- (sqrt (Delta a b c) / (2 * a))) H5); replace (- (b / (2 * a)) + (x + b / (2 * a))) with x. intro; rewrite H6; unfold Rdiv; ring. ring. rewrite Rsqr_div. rewrite Rsqr_sqrt. unfold Rdiv. repeat rewrite Rmult_assoc. rewrite (Rmult_comm (/ a)). rewrite Rmult_assoc. rewrite <- Rinv_mult_distr. replace (2 * (2 * a) * a) with (Rsqr (2 * a)). reflexivity. ring_Rsqr. rewrite <- Rmult_assoc; apply prod_neq_R0; [ discrR | apply (cond_nonzero a) ]. apply (cond_nonzero a). assumption. apply prod_neq_R0; [ discrR | apply (cond_nonzero a) ]. rewrite <- Rmult_assoc; rewrite <- Rinv_l_sym. symmetry ; apply Rmult_1_l. apply (cond_nonzero a). unfold Rdiv; rewrite <- Ropp_mult_distr_l_reverse. rewrite Ropp_minus_distr. reflexivity. reflexivity. Qed. coq-8.4pl2/theories/Program/0000750000175000001440000000000012127276547015074 5ustar notinuserscoq-8.4pl2/theories/Program/Wf.v0000640000175000001440000001700512010532755015626 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Hypothesis Rwf : well_founded R. Variable P : A -> Type. Variable F_sub : forall x:A, (forall y: { y : A | R y x }, P (proj1_sig y)) -> P x. Fixpoint Fix_F_sub (x : A) (r : Acc R x) : P x := F_sub x (fun y: { y : A | R y x} => Fix_F_sub (proj1_sig y) (Acc_inv r (proj2_sig y))). Definition Fix_sub (x : A) := Fix_F_sub x (Rwf x). (* Notation Fix_F := (Fix_F_sub P F_sub) (only parsing). (* alias *) *) (* Definition Fix (x:A) := Fix_F_sub P F_sub x (Rwf x). *) Hypothesis F_ext : forall (x:A) (f g:forall y:{y:A | R y x}, P (`y)), (forall (y : A | R y x), f y = g y) -> F_sub x f = F_sub x g. Lemma Fix_F_eq : forall (x:A) (r:Acc R x), F_sub x (fun (y:A|R y x) => Fix_F_sub (`y) (Acc_inv r (proj2_sig y))) = Fix_F_sub x r. Proof. destruct r using Acc_inv_dep; auto. Qed. Lemma Fix_F_inv : forall (x:A) (r s:Acc R x), Fix_F_sub x r = Fix_F_sub x s. Proof. intro x; induction (Rwf x); intros. rewrite (proof_irrelevance (Acc R x) r s) ; auto. Qed. Lemma Fix_eq : forall x:A, Fix_sub x = F_sub x (fun (y:A|R y x) => Fix_sub (proj1_sig y)). Proof. intro x; unfold Fix_sub. rewrite <- (Fix_F_eq ). apply F_ext; intros. apply Fix_F_inv. Qed. Lemma fix_sub_eq : forall x : A, Fix_sub x = let f_sub := F_sub in f_sub x (fun (y : A | R y x) => Fix_sub (`y)). exact Fix_eq. Qed. End Well_founded. Extraction Inline Fix_F_sub Fix_sub. Set Implicit Arguments. (** Reasoning about well-founded fixpoints on measures. *) Section Measure_well_founded. (* Measure relations are well-founded if the underlying relation is well-founded. *) Variables T M: Type. Variable R: M -> M -> Prop. Hypothesis wf: well_founded R. Variable m: T -> M. Definition MR (x y: T): Prop := R (m x) (m y). Lemma measure_wf: well_founded MR. Proof with auto. unfold well_founded. cut (forall a: M, (fun mm: M => forall a0: T, m a0 = mm -> Acc MR a0) a). intros. apply (H (m a))... apply (@well_founded_ind M R wf (fun mm => forall a, m a = mm -> Acc MR a)). intros. apply Acc_intro. intros. unfold MR in H1. rewrite H0 in H1. apply (H (m y))... Defined. End Measure_well_founded. Hint Resolve measure_wf. Section Fix_rects. Variable A: Type. Variable P: A -> Type. Variable R : A -> A -> Prop. Variable Rwf : well_founded R. Variable f: forall (x : A), (forall y: { y: A | R y x }, P (proj1_sig y)) -> P x. Lemma F_unfold x r: Fix_F_sub A R P f x r = f (fun y => Fix_F_sub A R P f (proj1_sig y) (Acc_inv r (proj2_sig y))). Proof. intros. case r; auto. Qed. (* Fix_F_sub_rect lets one prove a property of functions defined using Fix_F_sub by showing that property to be invariant over single application of the function body (f in our case). *) Lemma Fix_F_sub_rect (Q: forall x, P x -> Type) (inv: forall x: A, (forall (y: A) (H: R y x) (a: Acc R y), Q y (Fix_F_sub A R P f y a)) -> forall (a: Acc R x), Q x (f (fun y: {y: A | R y x} => Fix_F_sub A R P f (proj1_sig y) (Acc_inv a (proj2_sig y))))) : forall x a, Q _ (Fix_F_sub A R P f x a). Proof with auto. set (R' := fun (x: A) => forall a, Q _ (Fix_F_sub A R P f x a)). cut (forall x, R' x)... apply (well_founded_induction_type Rwf). subst R'. simpl. intros. rewrite F_unfold... Qed. (* Let's call f's second parameter its "lowers" function, since it provides it access to results for inputs with a lower measure. In preparation of lemma similar to Fix_F_sub_rect, but for Fix_sub, we first need an extra hypothesis stating that the function body has the same result for different "lowers" functions (g and h below) as long as those produce the same results for lower inputs, regardless of the lt proofs. *) Hypothesis equiv_lowers: forall x0 (g h: forall x: {y: A | R y x0}, P (proj1_sig x)), (forall x p p', g (exist (fun y: A => R y x0) x p) = h (exist _ x p')) -> f g = f h. (* From equiv_lowers, it follows that [Fix_F_sub A R P f x] applications do not not depend on the Acc proofs. *) Lemma eq_Fix_F_sub x (a a': Acc R x): Fix_F_sub A R P f x a = Fix_F_sub A R P f x a'. Proof. revert a'. pattern x, (Fix_F_sub A R P f x a). apply Fix_F_sub_rect. intros. rewrite F_unfold. apply equiv_lowers. intros. apply H. assumption. Qed. (* Finally, Fix_F_rect lets one prove a property of functions defined using Fix_F_sub by showing that property to be invariant over single application of the function body (f). *) Lemma Fix_sub_rect (Q: forall x, P x -> Type) (inv: forall (x: A) (H: forall (y: A), R y x -> Q y (Fix_sub A R Rwf P f y)) (a: Acc R x), Q x (f (fun y: {y: A | R y x} => Fix_sub A R Rwf P f (proj1_sig y)))) : forall x, Q _ (Fix_sub A R Rwf P f x). Proof with auto. unfold Fix_sub. intros. apply Fix_F_sub_rect. intros. assert (forall y: A, R y x0 -> Q y (Fix_F_sub A R P f y (Rwf y)))... set (inv x0 X0 a). clearbody q. rewrite <- (equiv_lowers (fun y: {y: A | R y x0} => Fix_F_sub A R P f (proj1_sig y) (Rwf (proj1_sig y))) (fun y: {y: A | R y x0} => Fix_F_sub A R P f (proj1_sig y) (Acc_inv a (proj2_sig y))))... intros. apply eq_Fix_F_sub. Qed. End Fix_rects. (** Tactic to fold a definition based on [Fix_measure_sub]. *) Ltac fold_sub f := match goal with | [ |- ?T ] => match T with appcontext C [ @Fix_sub _ _ _ _ _ ?arg ] => let app := context C [ f arg ] in change app end end. (** This module provides the fixpoint equation provided one assumes functional extensionality. *) Module WfExtensionality. Require Import FunctionalExtensionality. (** The two following lemmas allow to unfold a well-founded fixpoint definition without restriction using the functional extensionality axiom. *) (** For a function defined with Program using a well-founded order. *) Program Lemma fix_sub_eq_ext : forall (A : Type) (R : A -> A -> Prop) (Rwf : well_founded R) (P : A -> Type) (F_sub : forall x : A, (forall (y : A | R y x), P y) -> P x), forall x : A, Fix_sub A R Rwf P F_sub x = F_sub x (fun (y : A | R y x) => Fix_sub A R Rwf P F_sub y). Proof. intros ; apply Fix_eq ; auto. intros. assert(f = g). extensionality y ; apply H. rewrite H0 ; auto. Qed. (** Tactic to unfold once a definition based on [Fix_sub]. *) Ltac unfold_sub f fargs := set (call:=fargs) ; unfold f in call ; unfold call ; clear call ; rewrite fix_sub_eq_ext ; repeat fold_sub f ; simpl proj1_sig. End WfExtensionality. coq-8.4pl2/theories/Program/Equality.v0000640000175000001440000004032712010532755017052 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* is_ground T end. (** Try to find a contradiction. *) Hint Extern 10 => is_ground_goal ; progress exfalso : exfalso. (** We will use the [block] definition to separate the goal from the equalities generated by the tactic. *) Definition block {A : Type} (a : A) := a. Ltac block_goal := match goal with [ |- ?T ] => change (block T) end. Ltac unblock_goal := unfold block in *. (** Notation for heterogenous equality. *) Notation " x ~= y " := (@JMeq _ x _ y) (at level 70, no associativity). (** Do something on an heterogeneous equality appearing in the context. *) Ltac on_JMeq tac := match goal with | [ H : @JMeq ?x ?X ?y ?Y |- _ ] => tac H end. (** Try to apply [JMeq_eq] to get back a regular equality when the two types are equal. *) Ltac simpl_one_JMeq := on_JMeq ltac:(fun H => apply JMeq_eq in H). (** Repeat it for every possible hypothesis. *) Ltac simpl_JMeq := repeat simpl_one_JMeq. (** Just simplify an h.eq. without clearing it. *) Ltac simpl_one_dep_JMeq := on_JMeq ltac:(fun H => let H' := fresh "H" in assert (H' := JMeq_eq H)). Require Import Eqdep. (** Simplify dependent equality using sigmas to equality of the second projections if possible. Uses UIP. *) Ltac simpl_existT := match goal with [ H : existT _ ?x _ = existT _ ?x _ |- _ ] => let Hi := fresh H in assert(Hi:=inj_pairT2 _ _ _ _ _ H) ; clear H end. Ltac simpl_existTs := repeat simpl_existT. (** Tries to eliminate a call to [eq_rect] (the substitution principle) by any means available. *) Ltac elim_eq_rect := match goal with | [ |- ?t ] => match t with | context [ @eq_rect _ _ _ _ _ ?p ] => let P := fresh "P" in set (P := p); simpl in P ; ((case P ; clear P) || (clearbody P; rewrite (UIP_refl _ _ P); clear P)) | context [ @eq_rect _ _ _ _ _ ?p _ ] => let P := fresh "P" in set (P := p); simpl in P ; ((case P ; clear P) || (clearbody P; rewrite (UIP_refl _ _ P); clear P)) end end. (** Rewrite using uniqueness of indentity proofs [H = eq_refl]. *) Ltac simpl_uip := match goal with [ H : ?X = ?X |- _ ] => rewrite (UIP_refl _ _ H) in *; clear H end. (** Simplify equalities appearing in the context and goal. *) Ltac simpl_eq := simpl ; unfold eq_rec_r, eq_rec ; repeat (elim_eq_rect ; simpl) ; repeat (simpl_uip ; simpl). (** Try to abstract a proof of equality, if no proof of the same equality is present in the context. *) Ltac abstract_eq_hyp H' p := let ty := type of p in let tyred := eval simpl in ty in match tyred with ?X = ?Y => match goal with | [ H : X = Y |- _ ] => fail 1 | _ => set (H':=p) ; try (change p with H') ; clearbody H' ; simpl in H' end end. (** Apply the tactic tac to proofs of equality appearing as coercion arguments. Just redefine this tactic (using [Ltac on_coerce_proof tac ::=]) to handle custom coercion operators. *) Ltac on_coerce_proof tac T := match T with | context [ eq_rect _ _ _ _ ?p ] => tac p end. Ltac on_coerce_proof_gl tac := match goal with [ |- ?T ] => on_coerce_proof tac T end. (** Abstract proofs of equalities of coercions. *) Ltac abstract_eq_proof := on_coerce_proof_gl ltac:(fun p => let H := fresh "eqH" in abstract_eq_hyp H p). Ltac abstract_eq_proofs := repeat abstract_eq_proof. (** Factorize proofs, by using proof irrelevance so that two proofs of the same equality in the goal become convertible. *) Ltac pi_eq_proof_hyp p := let ty := type of p in let tyred := eval simpl in ty in match tyred with ?X = ?Y => match goal with | [ H : X = Y |- _ ] => match p with | H => fail 2 | _ => rewrite (proof_irrelevance (X = Y) p H) end | _ => fail " No hypothesis with same type " end end. (** Factorize proofs of equality appearing as coercion arguments. *) Ltac pi_eq_proof := on_coerce_proof_gl pi_eq_proof_hyp. Ltac pi_eq_proofs := repeat pi_eq_proof. (** The two preceding tactics in sequence. *) Ltac clear_eq_proofs := abstract_eq_proofs ; pi_eq_proofs. Hint Rewrite <- eq_rect_eq : refl_id. (** The [refl_id] database should be populated with lemmas of the form [coerce_* t eq_refl = t]. *) Lemma JMeq_eq_refl {A} (x : A) : JMeq_eq (@JMeq_refl _ x) = eq_refl. Proof. apply proof_irrelevance. Qed. Lemma UIP_refl_refl A (x : A) : Eqdep.EqdepTheory.UIP_refl A x eq_refl = eq_refl. Proof. apply UIP_refl. Qed. Lemma inj_pairT2_refl A (x : A) (P : A -> Type) (p : P x) : Eqdep.EqdepTheory.inj_pairT2 A P x p p eq_refl = eq_refl. Proof. apply UIP_refl. Qed. Hint Rewrite @JMeq_eq_refl @UIP_refl_refl @inj_pairT2_refl : refl_id. Ltac rewrite_refl_id := autorewrite with refl_id. (** Clear the context and goal of equality proofs. *) Ltac clear_eq_ctx := rewrite_refl_id ; clear_eq_proofs. (** Reapeated elimination of [eq_rect] applications. Abstracting equalities makes it run much faster than an naive implementation. *) Ltac simpl_eqs := repeat (elim_eq_rect ; simpl ; clear_eq_ctx). (** Clear unused reflexivity proofs. *) Ltac clear_refl_eq := match goal with [ H : ?X = ?X |- _ ] => clear H end. Ltac clear_refl_eqs := repeat clear_refl_eq. (** Clear unused equality proofs. *) Ltac clear_eq := match goal with [ H : _ = _ |- _ ] => clear H end. Ltac clear_eqs := repeat clear_eq. (** Combine all the tactics to simplify goals containing coercions. *) Ltac simplify_eqs := simpl ; simpl_eqs ; clear_eq_ctx ; clear_refl_eqs ; try subst ; simpl ; repeat simpl_uip ; rewrite_refl_id. (** A tactic that tries to remove trivial equality guards in induction hypotheses coming from [dependent induction]/[generalize_eqs] invocations. *) Ltac simplify_IH_hyps := repeat match goal with | [ hyp : context [ block _ ] |- _ ] => specialize_eqs hyp end. (** We split substitution tactics in the two directions depending on which names we want to keep corresponding to the generalization performed by the [generalize_eqs] tactic. *) Ltac subst_left_no_fail := repeat (match goal with [ H : ?X = ?Y |- _ ] => subst X end). Ltac subst_right_no_fail := repeat (match goal with [ H : ?X = ?Y |- _ ] => subst Y end). Ltac inject_left H := progress (inversion H ; subst_left_no_fail ; clear_dups) ; clear H. Ltac inject_right H := progress (inversion H ; subst_right_no_fail ; clear_dups) ; clear H. Ltac autoinjections_left := repeat autoinjection ltac:inject_left. Ltac autoinjections_right := repeat autoinjection ltac:inject_right. Ltac simpl_depind := subst_no_fail ; autoinjections ; try discriminates ; simpl_JMeq ; simpl_existTs ; simplify_IH_hyps. Ltac simpl_depind_l := subst_left_no_fail ; autoinjections_left ; try discriminates ; simpl_JMeq ; simpl_existTs ; simplify_IH_hyps. Ltac simpl_depind_r := subst_right_no_fail ; autoinjections_right ; try discriminates ; simpl_JMeq ; simpl_existTs ; simplify_IH_hyps. Ltac blocked t := block_goal ; t ; unblock_goal. (** The [DependentEliminationPackage] provides the default dependent elimination principle to be used by the [equations] resolver. It is especially useful to register the dependent elimination principles for things in [Prop] which are not automatically generated. *) Class DependentEliminationPackage (A : Type) := { elim_type : Type ; elim : elim_type }. (** A higher-order tactic to apply a registered eliminator. *) Ltac elim_tac tac p := let ty := type of p in let eliminator := eval simpl in (elim (A:=ty)) in tac p eliminator. (** Specialization to do case analysis or induction. Note: the [equations] tactic tries [case] before [elim_case]: there is no need to register generated induction principles. *) Ltac elim_case p := elim_tac ltac:(fun p el => destruct p using el) p. Ltac elim_ind p := elim_tac ltac:(fun p el => induction p using el) p. (** Lemmas used by the simplifier, mainly rephrasings of [eq_rect], [eq_ind]. *) Lemma solution_left A (B : A -> Type) (t : A) : B t -> (forall x, x = t -> B x). Proof. intros; subst; assumption. Defined. Lemma solution_right A (B : A -> Type) (t : A) : B t -> (forall x, t = x -> B x). Proof. intros; subst; assumption. Defined. Lemma deletion A B (t : A) : B -> (t = t -> B). Proof. intros; assumption. Defined. Lemma simplification_heq A B (x y : A) : (x = y -> B) -> (JMeq x y -> B). Proof. intros H J; apply H; apply (JMeq_eq J). Defined. Definition conditional_eq {A} (x y : A) := eq x y. Lemma simplification_existT2 A (P : A -> Type) B (p : A) (x y : P p) : (x = y -> B) -> (conditional_eq (existT P p x) (existT P p y) -> B). Proof. intros H E. apply H. apply inj_pair2. assumption. Defined. Lemma simplification_existT1 A (P : A -> Type) B (p q : A) (x : P p) (y : P q) : (p = q -> conditional_eq (existT P p x) (existT P q y) -> B) -> (existT P p x = existT P q y -> B). Proof. injection 2. auto. Defined. Lemma simplification_K A (x : A) (B : x = x -> Type) : B eq_refl -> (forall p : x = x, B p). Proof. intros. rewrite (UIP_refl A). assumption. Defined. (** This hint database and the following tactic can be used with [autounfold] to unfold everything to [eq_rect]s. *) Hint Unfold solution_left solution_right deletion simplification_heq simplification_existT1 simplification_existT2 simplification_K eq_rect_r eq_rec eq_ind : dep_elim. (** Using these we can make a simplifier that will perform the unification steps needed to put the goal in normalised form (provided there are only constructor forms). Compare with the lemma 16 of the paper. We don't have a [noCycle] procedure yet. *) Ltac simplify_one_dep_elim_term c := match c with | @JMeq _ _ _ _ -> _ => refine (simplification_heq _ _ _ _ _) | ?t = ?t -> _ => intros _ || refine (simplification_K _ t _ _) | eq (existT _ _ _) (existT _ _ _) -> _ => refine (simplification_existT1 _ _ _ _ _ _ _ _) | conditional_eq (existT _ _ _) (existT _ _ _) -> _ => refine (simplification_existT2 _ _ _ _ _ _ _) || (unfold conditional_eq; intro) | ?x = ?y -> _ => (* variables case *) (unfold x) || (unfold y) || (let hyp := fresh in intros hyp ; move hyp before x ; revert_until hyp ; generalize dependent x ; refine (solution_left _ _ _ _)(* ; intros until 0 *)) || (let hyp := fresh in intros hyp ; move hyp before y ; revert_until hyp ; generalize dependent y ; refine (solution_right _ _ _ _)(* ; intros until 0 *)) | ?f ?x = ?g ?y -> _ => let H := fresh in progress (intros H ; injection H ; clear H) | ?t = ?u -> _ => let hyp := fresh in intros hyp ; exfalso ; discriminate | ?x = ?y -> _ => let hyp := fresh in intros hyp ; (try (clear hyp ; (* If non dependent, don't clear it! *) fail 1)) ; case hyp ; clear hyp | block ?T => fail 1 (* Do not put any part of the rhs in the hyps *) | forall x, _ => intro x || (let H := fresh x in rename x into H ; intro x) (* Try to keep original names *) | _ => intro end. Ltac simplify_one_dep_elim := match goal with | [ |- ?gl ] => simplify_one_dep_elim_term gl end. (** Repeat until no progress is possible. By construction, it should leave the goal with no remaining equalities generated by the [generalize_eqs] tactic. *) Ltac simplify_dep_elim := repeat simplify_one_dep_elim. (** Do dependent elimination of the last hypothesis, but not simplifying yet (used internally). *) Ltac destruct_last := on_last_hyp ltac:(fun id => simpl in id ; generalize_eqs id ; destruct id). Ltac introduce p := first [ match p with _ => (* Already there, generalize dependent hyps *) generalize dependent p ; intros p end | intros until p | intros until 1 | intros ]. Ltac do_case p := introduce p ; (destruct p || elim_case p || (case p ; clear p)). Ltac do_ind p := introduce p ; (induction p || elim_ind p). (** The following tactics allow to do induction on an already instantiated inductive predicate by first generalizing it and adding the proper equalities to the context, in a maner similar to the BasicElim tactic of "Elimination with a motive" by Conor McBride. *) (** The [do_depelim] higher-order tactic takes an elimination tactic as argument and an hypothesis and starts a dependent elimination using this tactic. *) Ltac is_introduced H := match goal with | [ H' : _ |- _ ] => match H' with H => idtac end end. Tactic Notation "intro_block" hyp(H) := (is_introduced H ; block_goal ; revert_until H ; block_goal) || (let H' := fresh H in intros until H' ; block_goal) || (intros ; block_goal). Tactic Notation "intro_block_id" ident(H) := (is_introduced H ; block_goal ; revert_until H; block_goal) || (let H' := fresh H in intros until H' ; block_goal) || (intros ; block_goal). Ltac unblock_dep_elim := match goal with | |- block ?T => match T with context [ block _ ] => change T ; intros ; unblock_goal end | _ => unblock_goal end. Ltac simpl_dep_elim := simplify_dep_elim ; simplify_IH_hyps ; unblock_dep_elim. Ltac do_intros H := (try intros until H) ; (intro_block_id H || intro_block H). Ltac do_depelim_nosimpl tac H := do_intros H ; generalize_eqs H ; tac H. Ltac do_depelim tac H := do_depelim_nosimpl tac H ; simpl_dep_elim. Ltac do_depind tac H := (try intros until H) ; intro_block H ; generalize_eqs_vars H ; tac H ; simpl_dep_elim. (** To dependent elimination on some hyp. *) Ltac depelim id := do_depelim ltac:(fun hyp => do_case hyp) id. (** Used internally. *) Ltac depelim_nosimpl id := do_depelim_nosimpl ltac:(fun hyp => do_case hyp) id. (** To dependent induction on some hyp. *) Ltac depind id := do_depind ltac:(fun hyp => do_ind hyp) id. (** A variant where generalized variables should be given by the user. *) Ltac do_depelim' rev tac H := (try intros until H) ; block_goal ; rev H ; (try revert_until H ; block_goal) ; generalize_eqs H ; tac H ; simpl_dep_elim. (** Calls [destruct] on the generalized hypothesis, results should be similar to inversion. By default, we don't try to generalize the hyp by its variable indices. *) Tactic Notation "dependent" "destruction" ident(H) := do_depelim' ltac:(fun hyp => idtac) ltac:(fun hyp => do_case hyp) H. Tactic Notation "dependent" "destruction" ident(H) "using" constr(c) := do_depelim' ltac:(fun hyp => idtac) ltac:(fun hyp => destruct hyp using c) H. (** This tactic also generalizes the goal by the given variables before the elimination. *) Tactic Notation "dependent" "destruction" ident(H) "generalizing" ne_hyp_list(l) := do_depelim' ltac:(fun hyp => revert l) ltac:(fun hyp => do_case hyp) H. Tactic Notation "dependent" "destruction" ident(H) "generalizing" ne_hyp_list(l) "using" constr(c) := do_depelim' ltac:(fun hyp => revert l) ltac:(fun hyp => destruct hyp using c) H. (** Then we have wrappers for usual calls to induction. One can customize the induction tactic by writting another wrapper calling do_depelim. We suppose the hyp has to be generalized before calling [induction]. *) Tactic Notation "dependent" "induction" ident(H) := do_depind ltac:(fun hyp => do_ind hyp) H. Tactic Notation "dependent" "induction" ident(H) "using" constr(c) := do_depind ltac:(fun hyp => induction hyp using c) H. (** This tactic also generalizes the goal by the given variables before the induction. *) Tactic Notation "dependent" "induction" ident(H) "generalizing" ne_hyp_list(l) := do_depelim' ltac:(fun hyp => revert l) ltac:(fun hyp => do_ind hyp) H. Tactic Notation "dependent" "induction" ident(H) "generalizing" ne_hyp_list(l) "using" constr(c) := do_depelim' ltac:(fun hyp => revert l) ltac:(fun hyp => induction hyp using c) H. coq-8.4pl2/theories/Program/vo.itarget0000640000175000001440000000013611307752066017074 0ustar notinusersBasics.vo Combinators.vo Equality.vo Program.vo Subset.vo Syntax.vo Tactics.vo Utils.vo Wf.vo coq-8.4pl2/theories/Program/Combinators.v0000640000175000001440000000415612010532755017535 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B), id ∘ f = f. Proof. intros. unfold id, compose. symmetry. apply eta_expansion. Qed. Lemma compose_id_right : forall A B (f : A -> B), f ∘ id = f. Proof. intros. unfold id, compose. symmetry ; apply eta_expansion. Qed. Lemma compose_assoc : forall A B C D (f : A -> B) (g : B -> C) (h : C -> D), h ∘ g ∘ f = h ∘ (g ∘ f). Proof. intros. reflexivity. Qed. Hint Rewrite @compose_id_left @compose_id_right : core. Hint Rewrite <- @compose_assoc : core. (** [flip] is involutive. *) Lemma flip_flip : forall A B C, @flip A B C ∘ flip = id. Proof. unfold flip, compose. intros. extensionality x ; extensionality y ; extensionality z. reflexivity. Qed. (** [prod_curry] and [prod_uncurry] are each others inverses. *) Lemma prod_uncurry_curry : forall A B C, @prod_uncurry A B C ∘ prod_curry = id. Proof. simpl ; intros. unfold prod_uncurry, prod_curry, compose. extensionality x ; extensionality y ; extensionality z. reflexivity. Qed. Lemma prod_curry_uncurry : forall A B C, @prod_curry A B C ∘ prod_uncurry = id. Proof. simpl ; intros. unfold prod_uncurry, prod_curry, compose. extensionality x ; extensionality p. destruct p ; simpl ; reflexivity. Qed. coq-8.4pl2/theories/Program/Utils.v0000640000175000001440000000372512010532755016356 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let (x,y) := anonymous in P)) (x ident, y ident, at level 10) : type_scope. (** Generates an obligation to prove False. *) Notation " ! " := (False_rect _ _) : program_scope. Delimit Scope program_scope with prg. (** Abbreviation for first projection and hiding of proofs of subset objects. *) Notation " ` t " := (proj1_sig t) (at level 10, t at next level) : program_scope. (** Coerces objects to their support before comparing them. *) Notation " x '`=' y " := ((x :>) = (y :>)) (at level 70) : program_scope. Require Import Coq.Bool.Sumbool. (** Construct a dependent disjunction from a boolean. *) Notation dec := sumbool_of_bool. (** The notations [in_right] and [in_left] construct objects of a dependent disjunction. *) (** Hide proofs and generates obligations when put in a term. *) Notation in_left := (@left _ _ _). Notation in_right := (@right _ _ _). (** Extraction directives *) (* Extraction Inline proj1_sig. Extract Inductive unit => "unit" [ "()" ]. Extract Inductive bool => "bool" [ "true" "false" ]. Extract Inductive sumbool => "bool" [ "true" "false" ]. (* Extract Inductive prod "'a" "'b" => " 'a * 'b " [ "(,)" ]. *) (* Extract Inductive sigT => "prod" [ "" ]. *) *) coq-8.4pl2/theories/Program/Basics.v0000640000175000001440000000336512010532755016462 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* C) (f : A -> B) := fun x : A => g (f x). Hint Unfold compose. Notation " g ∘ f " := (compose g f) (at level 40, left associativity) : program_scope. Local Open Scope program_scope. (** The non-dependent function space between [A] and [B]. *) Definition arrow (A B : Type) := A -> B. (** Logical implication. *) Definition impl (A B : Prop) : Prop := A -> B. (** The constant function [const a] always returns [a]. *) Definition const {A B} (a : A) := fun _ : B => a. (** The [flip] combinator reverses the first two arguments of a function. *) Definition flip {A B C} (f : A -> B -> C) x y := f y x. (** Application as a combinator. *) Definition apply {A B} (f : A -> B) (x : A) := f x. (** Curryfication of [prod] is defined in [Logic.Datatypes]. *) Arguments prod_curry {A B C} f p. Arguments prod_uncurry {A B C} f x y. coq-8.4pl2/theories/Program/Subset.v0000640000175000001440000000671412010532755016524 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* try on_subset_proof_aux tac P ; tac p end. Ltac on_subset_proof tac := match goal with [ |- ?T ] => on_subset_proof_aux tac T end. Ltac abstract_any_hyp H' p := match type of p with ?X => match goal with | [ H : X |- _ ] => fail 1 | _ => set (H':=p) ; try (change p with H') ; clearbody H' end end. Ltac abstract_subset_proof := on_subset_proof ltac:(fun p => let H := fresh "eqH" in abstract_any_hyp H p ; simpl in H). Ltac abstract_subset_proofs := repeat abstract_subset_proof. Ltac pi_subset_proof_hyp p := match type of p with ?X => match goal with | [ H : X |- _ ] => match p with | H => fail 2 | _ => rewrite (proof_irrelevance X p H) end | _ => fail " No hypothesis with same type " end end. Ltac pi_subset_proof := on_subset_proof pi_subset_proof_hyp. Ltac pi_subset_proofs := repeat pi_subset_proof. (** The two preceding tactics in sequence. *) Ltac clear_subset_proofs := abstract_subset_proofs ; simpl in * |- ; pi_subset_proofs ; clear_dups. Ltac pi := repeat progress f_equal ; apply proof_irrelevance. Lemma subset_eq : forall A (P : A -> Prop) (n m : sig P), n = m <-> `n = `m. Proof. induction n. induction m. simpl. split ; intros ; subst. inversion H. reflexivity. pi. Qed. (* Somewhat trivial definition, but not unfolded automatically hence we can match on [match_eq ?A ?B ?x ?f] in tactics. *) Definition match_eq (A B : Type) (x : A) (fn : forall (y : A | y = x), B) : B := fn (exist _ x eq_refl). (* This is what we want to be able to do: replace the originaly matched object by a new, propositionally equal one. If [fn] works on [x] it should work on any [y | y = x]. *) Lemma match_eq_rewrite : forall (A B : Type) (x : A) (fn : forall (y : A | y = x), B) (y : A | y = x), match_eq A B x fn = fn y. Proof. intros. unfold match_eq. f_equal. destruct y. (* uses proof-irrelevance *) apply <- subset_eq. symmetry. assumption. Qed. (** Now we make a tactic to be able to rewrite a term [t] which is applied to a [match_eq] using an arbitrary equality [t = u], and [u] is now the subject of the [match]. *) Ltac rewrite_match_eq H := match goal with [ |- ?T ] => match T with context [ match_eq ?A ?B ?t ?f ] => rewrite (match_eq_rewrite A B t f (exist _ _ (eq_sym H))) end end. (** Otherwise we can simply unfold [match_eq] and the term trivially reduces to the original definition. *) Ltac simpl_match_eq := unfold match_eq ; simpl. coq-8.4pl2/theories/Program/Syntax.v0000640000175000001440000000266712010532755016550 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* idtac T end. Ltac show_hyp id := match goal with | [ H := ?b : ?T |- _ ] => match H with | id => idtac id ":=" b ":" T end | [ H : ?T |- _ ] => match H with | id => idtac id ":" T end end. Ltac show_hyps := try match reverse goal with | [ H : ?T |- _ ] => show_hyp H ; fail end. (** The [do] tactic but using a Coq-side nat. *) Ltac do_nat n tac := match n with | 0 => idtac | S ?n' => tac ; do_nat n' tac end. (** Do something on the last hypothesis, or fail *) Ltac on_last_hyp tac := match goal with [ H : _ |- _ ] => first [ tac H | fail 1 ] end. (** Destructs one pair, without care regarding naming. *) Ltac destruct_one_pair := match goal with | [H : (_ /\ _) |- _] => destruct H | [H : prod _ _ |- _] => destruct H end. (** Repeateadly destruct pairs. *) Ltac destruct_pairs := repeat (destruct_one_pair). (** Destruct one existential package, keeping the name of the hypothesis for the first component. *) Ltac destruct_one_ex := let tac H := let ph := fresh "H" in (destruct H as [H ph]) in let tac2 H := let ph := fresh "H" in let ph' := fresh "H" in (destruct H as [H ph ph']) in let tacT H := let ph := fresh "X" in (destruct H as [H ph]) in let tacT2 H := let ph := fresh "X" in let ph' := fresh "X" in (destruct H as [H ph ph']) in match goal with | [H : (ex _) |- _] => tac H | [H : (sig ?P) |- _ ] => tac H | [H : (sigT ?P) |- _ ] => tacT H | [H : (ex2 _ _) |- _] => tac2 H | [H : (sig2 ?P _) |- _ ] => tac2 H | [H : (sigT2 ?P _) |- _ ] => tacT2 H end. (** Repeateadly destruct existentials. *) Ltac destruct_exists := repeat (destruct_one_ex). (** Repeateadly destruct conjunctions and existentials. *) Ltac destruct_conjs := repeat (destruct_one_pair || destruct_one_ex). (** Destruct an existential hypothesis [t] keeping its name for the first component and using [Ht] for the second *) Tactic Notation "destruct" "exist" ident(t) ident(Ht) := destruct t as [t Ht]. (** Destruct a disjunction keeping its name in both subgoals. *) Tactic Notation "destruct" "or" ident(H) := destruct H as [H|H]. (** Discriminate that also work on a [x <> x] hypothesis. *) Ltac discriminates := match goal with | [ H : ?x <> ?x |- _ ] => elim H ; reflexivity | _ => discriminate end. (** Revert the last hypothesis. *) Ltac revert_last := match goal with [ H : _ |- _ ] => revert H end. (** Repeatedly reverse the last hypothesis, putting everything in the goal. *) Ltac reverse := repeat revert_last. (** Reverse everything up to hypothesis id (not included). *) Ltac revert_until id := on_last_hyp ltac:(fun id' => match id' with | id => idtac | _ => revert id' ; revert_until id end). (** Clear duplicated hypotheses *) Ltac clear_dup := match goal with | [ H : ?X |- _ ] => match goal with | [ H' : ?Y |- _ ] => match H with | H' => fail 2 | _ => unify X Y ; (clear H' || clear H) end end end. Ltac clear_dups := repeat clear_dup. (** Try to clear everything except some hyp *) Ltac clear_except hyp := repeat match goal with [ H : _ |- _ ] => match H with | hyp => fail 1 | _ => clear H end end. (** A non-failing subst that substitutes as much as possible. *) Ltac subst_no_fail := repeat (match goal with [ H : ?X = ?Y |- _ ] => subst X || subst Y end). Tactic Notation "subst" "*" := subst_no_fail. Ltac on_application f tac T := match T with | context [f ?x ?y ?z ?w ?v ?u ?a ?b ?c] => tac (f x y z w v u a b c) | context [f ?x ?y ?z ?w ?v ?u ?a ?b] => tac (f x y z w v u a b) | context [f ?x ?y ?z ?w ?v ?u ?a] => tac (f x y z w v u a) | context [f ?x ?y ?z ?w ?v ?u] => tac (f x y z w v u) | context [f ?x ?y ?z ?w ?v] => tac (f x y z w v) | context [f ?x ?y ?z ?w] => tac (f x y z w) | context [f ?x ?y ?z] => tac (f x y z) | context [f ?x ?y] => tac (f x y) | context [f ?x] => tac (f x) end. (** A variant of [apply] using [refine], doing as much conversion as necessary. *) Ltac rapply p := refine (p _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _ _) || refine (p _ _ _ _ _ _) || refine (p _ _ _ _ _) || refine (p _ _ _ _) || refine (p _ _ _) || refine (p _ _) || refine (p _) || refine p. (** Tactical [on_call f tac] applies [tac] on any application of [f] in the hypothesis or goal. *) Ltac on_call f tac := match goal with | |- ?T => on_application f tac T | H : ?T |- _ => on_application f tac T end. (* Destructs calls to f in hypothesis or conclusion, useful if f creates a subset object. *) Ltac destruct_call f := let tac t := (destruct t) in on_call f tac. Ltac destruct_calls f := repeat destruct_call f. Ltac destruct_call_in f H := let tac t := (destruct t) in let T := type of H in on_application f tac T. Ltac destruct_call_as f l := let tac t := (destruct t as l) in on_call f tac. Ltac destruct_call_as_in f l H := let tac t := (destruct t as l) in let T := type of H in on_application f tac T. Tactic Notation "destruct_call" constr(f) := destruct_call f. (** Permit to name the results of destructing the call to [f]. *) Tactic Notation "destruct_call" constr(f) "as" simple_intropattern(l) := destruct_call_as f l. (** Specify the hypothesis in which the call occurs as well. *) Tactic Notation "destruct_call" constr(f) "in" hyp(id) := destruct_call_in f id. Tactic Notation "destruct_call" constr(f) "as" simple_intropattern(l) "in" hyp(id) := destruct_call_as_in f l id. (** A marker for prototypes to destruct. *) Definition fix_proto {A : Type} (a : A) := a. Ltac destruct_rec_calls := match goal with | [ H : fix_proto _ |- _ ] => destruct_calls H ; clear H end. Ltac destruct_all_rec_calls := repeat destruct_rec_calls ; unfold fix_proto in *. (** Try to inject any potential constructor equality hypothesis. *) Ltac autoinjection tac := match goal with | [ H : ?f ?a = ?f' ?a' |- _ ] => tac H end. Ltac inject H := progress (inversion H ; subst*; clear_dups) ; clear H. Ltac autoinjections := repeat (clear_dups ; autoinjection ltac:inject). (** Destruct an hypothesis by first copying it to avoid dependencies. *) Ltac destruct_nondep H := let H0 := fresh "H" in assert(H0 := H); destruct H0. (** If bang appears in the goal, it means that we have a proof of False and the goal is solved. *) Ltac bang := match goal with | |- ?x => match x with | appcontext [False_rect _ ?p] => elim p end end. (** A tactic to show contradiction by first asserting an automatically provable hypothesis. *) Tactic Notation "contradiction" "by" constr(t) := let H := fresh in assert t as H by auto with * ; contradiction. (** A tactic that adds [H:=p:typeof(p)] to the context if no hypothesis of the same type appears in the goal. Useful to do saturation using tactics. *) Ltac add_hypothesis H' p := match type of p with ?X => match goal with | [ H : X |- _ ] => fail 1 | _ => set (H':=p) ; try (change p with H') ; clearbody H' end end. (** A tactic to replace an hypothesis by another term. *) Ltac replace_hyp H c := let H' := fresh "H" in assert(H' := c) ; clear H ; rename H' into H. (** A tactic to refine an hypothesis by supplying some of its arguments. *) Ltac refine_hyp c := let tac H := replace_hyp H c in match c with | ?H _ => tac H | ?H _ _ => tac H | ?H _ _ _ => tac H | ?H _ _ _ _ => tac H | ?H _ _ _ _ _ => tac H | ?H _ _ _ _ _ _ => tac H | ?H _ _ _ _ _ _ _ => tac H | ?H _ _ _ _ _ _ _ _ => tac H end. (** The default simplification tactic used by Program is defined by [program_simpl], sometimes [auto] is not enough, better rebind using [Obligation Tactic := tac] in this case, possibly using [program_simplify] to use standard goal-cleaning tactics. *) Ltac program_simplify := simpl; intros ; destruct_all_rec_calls ; repeat (destruct_conjs; simpl proj1_sig in * ); subst*; autoinjections ; try discriminates ; try (solve [ red ; intros ; destruct_conjs ; autoinjections ; discriminates ]). (** Restrict automation to propositional obligations. *) Ltac program_solve_wf := match goal with | |- well_founded _ => auto with * | |- ?T => match type of T with Prop => auto end end. Create HintDb program discriminated. Ltac program_simpl := program_simplify ; try typeclasses eauto with program ; try program_solve_wf. Obligation Tactic := program_simpl. Definition obligation (A : Type) {a : A} := a.coq-8.4pl2/theories/Arith/0000750000175000001440000000000012127276547014534 5ustar notinuserscoq-8.4pl2/theories/Arith/Peano_dec.v0000640000175000001440000000364112010532755016570 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* m}. Proof. induction n; destruct m; auto. elim (IHn m); auto. Defined. Hint Resolve O_or_S eq_nat_dec: arith. Theorem dec_eq_nat : forall n m, decidable (n = m). intros x y; unfold decidable; elim (eq_nat_dec x y); auto with arith. Defined. Definition UIP_nat:= Eqdep_dec.UIP_dec eq_nat_dec. Lemma le_unique: forall m n (h1 h2: m <= n), h1 = h2. Proof. fix 3. refine (fun m _ h1 => match h1 as h' in _ <= k return forall hh: m <= k, h' = hh with le_n => _ |le_S i H => _ end). refine (fun hh => match hh as h' in _ <= k return forall eq: m = k, le_n m = match eq in _ = p return m <= p -> m <= m with |eq_refl => fun bli => bli end h' with |le_n => fun eq => _ |le_S j H' => fun eq => _ end eq_refl). rewrite (UIP_nat _ _ eq eq_refl). reflexivity. subst m. destruct (Lt.lt_irrefl j H'). refine (fun hh => match hh as h' in _ <= k return match k as k' return m <= k' -> Prop with |0 => fun _ => True |S i' => fun h'' => forall H':m <= i', le_S m i' H' = h'' end h' with |le_n => _ |le_S j H2 => fun H' => _ end H). destruct m. exact I. intros; destruct (Lt.lt_irrefl m H'). f_equal. apply le_unique. Qed. coq-8.4pl2/theories/Arith/vo.itarget0000640000175000001440000000031011334317442016521 0ustar notinusersArith_base.vo Arith.vo Between.vo Bool_nat.vo Compare_dec.vo Compare.vo Div2.vo EqNat.vo Euclid.vo Even.vo Factorial.vo Gt.vo Le.vo Lt.vo Max.vo Minus.vo Min.vo Mult.vo Peano_dec.vo Plus.vo Wf_nat.vo coq-8.4pl2/theories/Arith/Wf_nat.v0000640000175000001440000001607712010532755016140 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* nat. Definition ltof (a b:A) := f a < f b. Definition gtof (a b:A) := f b > f a. Theorem well_founded_ltof : well_founded ltof. Proof. red. cut (forall n (a:A), f a < n -> Acc ltof a). intros H a; apply (H (S (f a))); auto with arith. induction n. intros; absurd (f a < 0); auto with arith. intros a ltSma. apply Acc_intro. unfold ltof; intros b ltfafb. apply IHn. apply lt_le_trans with (f a); auto with arith. Defined. Theorem well_founded_gtof : well_founded gtof. Proof. exact well_founded_ltof. Defined. (** It is possible to directly prove the induction principle going back to primitive recursion on natural numbers ([induction_ltof1]) or to use the previous lemmas to extract a program with a fixpoint ([induction_ltof2]) the ML-like program for [induction_ltof1] is : [[ let induction_ltof1 f F a = let rec indrec n k = match n with | O -> error | S m -> F k (indrec m) in indrec (f a + 1) a ]] the ML-like program for [induction_ltof2] is : [[ let induction_ltof2 F a = indrec a where rec indrec a = F a indrec;; ]] *) Theorem induction_ltof1 : forall P:A -> Set, (forall x:A, (forall y:A, ltof y x -> P y) -> P x) -> forall a:A, P a. Proof. intros P F; cut (forall n (a:A), f a < n -> P a). intros H a; apply (H (S (f a))); auto with arith. induction n. intros; absurd (f a < 0); auto with arith. intros a ltSma. apply F. unfold ltof; intros b ltfafb. apply IHn. apply lt_le_trans with (f a); auto with arith. Defined. Theorem induction_gtof1 : forall P:A -> Set, (forall x:A, (forall y:A, gtof y x -> P y) -> P x) -> forall a:A, P a. Proof. exact induction_ltof1. Defined. Theorem induction_ltof2 : forall P:A -> Set, (forall x:A, (forall y:A, ltof y x -> P y) -> P x) -> forall a:A, P a. Proof. exact (well_founded_induction well_founded_ltof). Defined. Theorem induction_gtof2 : forall P:A -> Set, (forall x:A, (forall y:A, gtof y x -> P y) -> P x) -> forall a:A, P a. Proof. exact induction_ltof2. Defined. (** If a relation [R] is compatible with [lt] i.e. if [x R y => f(x) < f(y)] then [R] is well-founded. *) Variable R : A -> A -> Prop. Hypothesis H_compat : forall x y:A, R x y -> f x < f y. Theorem well_founded_lt_compat : well_founded R. Proof. red. cut (forall n (a:A), f a < n -> Acc R a). intros H a; apply (H (S (f a))); auto with arith. induction n. intros; absurd (f a < 0); auto with arith. intros a ltSma. apply Acc_intro. intros b ltfafb. apply IHn. apply lt_le_trans with (f a); auto with arith. Defined. End Well_founded_Nat. Lemma lt_wf : well_founded lt. Proof. exact (well_founded_ltof nat (fun m => m)). Defined. Lemma lt_wf_rec1 : forall n (P:nat -> Set), (forall n, (forall m, m < n -> P m) -> P n) -> P n. Proof. exact (fun p P F => induction_ltof1 nat (fun m => m) P F p). Defined. Lemma lt_wf_rec : forall n (P:nat -> Set), (forall n, (forall m, m < n -> P m) -> P n) -> P n. Proof. exact (fun p P F => induction_ltof2 nat (fun m => m) P F p). Defined. Lemma lt_wf_ind : forall n (P:nat -> Prop), (forall n, (forall m, m < n -> P m) -> P n) -> P n. Proof. intro p; intros; elim (lt_wf p); auto with arith. Qed. Lemma gt_wf_rec : forall n (P:nat -> Set), (forall n, (forall m, n > m -> P m) -> P n) -> P n. Proof. exact lt_wf_rec. Defined. Lemma gt_wf_ind : forall n (P:nat -> Prop), (forall n, (forall m, n > m -> P m) -> P n) -> P n. Proof lt_wf_ind. Lemma lt_wf_double_rec : forall P:nat -> nat -> Set, (forall n m, (forall p q, p < n -> P p q) -> (forall p, p < m -> P n p) -> P n m) -> forall n m, P n m. Proof. intros P Hrec p; pattern p; apply lt_wf_rec. intros n H q; pattern q; apply lt_wf_rec; auto with arith. Defined. Lemma lt_wf_double_ind : forall P:nat -> nat -> Prop, (forall n m, (forall p (q:nat), p < n -> P p q) -> (forall p, p < m -> P n p) -> P n m) -> forall n m, P n m. Proof. intros P Hrec p; pattern p; apply lt_wf_ind. intros n H q; pattern q; apply lt_wf_ind; auto with arith. Qed. Hint Resolve lt_wf: arith. Hint Resolve well_founded_lt_compat: arith. Section LT_WF_REL. Variable A : Set. Variable R : A -> A -> Prop. (* Relational form of inversion *) Variable F : A -> nat -> Prop. Definition inv_lt_rel x y := exists2 n, F x n & (forall m, F y m -> n < m). Hypothesis F_compat : forall x y:A, R x y -> inv_lt_rel x y. Remark acc_lt_rel : forall x:A, (exists n, F x n) -> Acc R x. Proof. intros x [n fxn]; generalize dependent x. pattern n; apply lt_wf_ind; intros. constructor; intros. destruct (F_compat y x) as (x0,H1,H2); trivial. apply (H x0); auto. Qed. Theorem well_founded_inv_lt_rel_compat : well_founded R. Proof. constructor; intros. case (F_compat y a); trivial; intros. apply acc_lt_rel; trivial. exists x; trivial. Qed. End LT_WF_REL. Lemma well_founded_inv_rel_inv_lt_rel : forall (A:Set) (F:A -> nat -> Prop), well_founded (inv_lt_rel A F). intros; apply (well_founded_inv_lt_rel_compat A (inv_lt_rel A F) F); trivial. Qed. (** A constructive proof that any non empty decidable subset of natural numbers has a least element *) Set Implicit Arguments. Require Import Le. Require Import Compare_dec. Require Import Decidable. Definition has_unique_least_element (A:Type) (R:A->A->Prop) (P:A->Prop) := exists! x, P x /\ forall x', P x' -> R x x'. Lemma dec_inh_nat_subset_has_unique_least_element : forall P:nat->Prop, (forall n, P n \/ ~ P n) -> (exists n, P n) -> has_unique_least_element le P. Proof. intros P Pdec (n0,HPn0). assert (forall n, (exists n', n' n'<=n'') \/(forall n', P n' -> n<=n')). induction n. right. intros n' Hn'. apply le_O_n. destruct IHn. left; destruct H as (n', (Hlt', HPn')). exists n'; split. apply lt_S; assumption. assumption. destruct (Pdec n). left; exists n; split. apply lt_n_Sn. split; assumption. right. intros n' Hltn'. destruct (le_lt_eq_dec n n') as [Hltn|Heqn]. apply H; assumption. assumption. destruct H0. rewrite Heqn; assumption. destruct (H n0) as [(n,(Hltn,(Hmin,Huniqn)))|]; [exists n | exists n0]; repeat split; assumption || intros n' (HPn',Hminn'); apply le_antisym; auto. Qed. Unset Implicit Arguments. Notation iter_nat := @nat_iter (only parsing). Notation iter_nat_plus := @nat_iter_plus (only parsing). Notation iter_nat_invariant := @nat_iter_invariant (only parsing). coq-8.4pl2/theories/Arith/Lt.v0000640000175000001440000001056212010532755015272 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* > *) Require Import Le. Local Open Scope nat_scope. Implicit Types m n p : nat. (** * Irreflexivity *) Theorem lt_irrefl : forall n, ~ n < n. Proof le_Sn_n. Hint Resolve lt_irrefl: arith v62. (** * Relationship between [le] and [lt] *) Theorem lt_le_S : forall n m, n < m -> S n <= m. Proof. auto with arith. Qed. Hint Immediate lt_le_S: arith v62. Theorem lt_n_Sm_le : forall n m, n < S m -> n <= m. Proof. auto with arith. Qed. Hint Immediate lt_n_Sm_le: arith v62. Theorem le_lt_n_Sm : forall n m, n <= m -> n < S m. Proof. auto with arith. Qed. Hint Immediate le_lt_n_Sm: arith v62. Theorem le_not_lt : forall n m, n <= m -> ~ m < n. Proof. induction 1; auto with arith. Qed. Theorem lt_not_le : forall n m, n < m -> ~ m <= n. Proof. red; intros n m Lt Le; exact (le_not_lt m n Le Lt). Qed. Hint Immediate le_not_lt lt_not_le: arith v62. (** * Asymmetry *) Theorem lt_asym : forall n m, n < m -> ~ m < n. Proof. induction 1; auto with arith. Qed. (** * Order and successor *) Theorem lt_n_Sn : forall n, n < S n. Proof. auto with arith. Qed. Hint Resolve lt_n_Sn: arith v62. Theorem lt_S : forall n m, n < m -> n < S m. Proof. auto with arith. Qed. Hint Resolve lt_S: arith v62. Theorem lt_n_S : forall n m, n < m -> S n < S m. Proof. auto with arith. Qed. Hint Resolve lt_n_S: arith v62. Theorem lt_S_n : forall n m, S n < S m -> n < m. Proof. auto with arith. Qed. Hint Immediate lt_S_n: arith v62. Theorem lt_0_Sn : forall n, 0 < S n. Proof. auto with arith. Qed. Hint Resolve lt_0_Sn: arith v62. Theorem lt_n_0 : forall n, ~ n < 0. Proof le_Sn_0. Hint Resolve lt_n_0: arith v62. (** * Predecessor *) Lemma S_pred : forall n m, m < n -> n = S (pred n). Proof. induction 1; auto with arith. Qed. Lemma lt_pred : forall n m, S n < m -> n < pred m. Proof. induction 1; simpl; auto with arith. Qed. Hint Immediate lt_pred: arith v62. Lemma lt_pred_n_n : forall n, 0 < n -> pred n < n. destruct 1; simpl; auto with arith. Qed. Hint Resolve lt_pred_n_n: arith v62. (** * Transitivity properties *) Theorem lt_trans : forall n m p, n < m -> m < p -> n < p. Proof. induction 2; auto with arith. Qed. Theorem lt_le_trans : forall n m p, n < m -> m <= p -> n < p. Proof. induction 2; auto with arith. Qed. Theorem le_lt_trans : forall n m p, n <= m -> m < p -> n < p. Proof. induction 2; auto with arith. Qed. Hint Resolve lt_trans lt_le_trans le_lt_trans: arith v62. (** * Large = strict or equal *) Theorem le_lt_or_eq : forall n m, n <= m -> n < m \/ n = m. Proof. induction 1; auto with arith. Qed. Theorem le_lt_or_eq_iff : forall n m, n <= m <-> n < m \/ n = m. Proof. split. intros; apply le_lt_or_eq; auto. destruct 1; subst; auto with arith. Qed. Theorem lt_le_weak : forall n m, n < m -> n <= m. Proof. auto with arith. Qed. Hint Immediate lt_le_weak: arith v62. (** * Dichotomy *) Theorem le_or_lt : forall n m, n <= m \/ m < n. Proof. intros n m; pattern n, m; apply nat_double_ind; auto with arith. induction 1; auto with arith. Qed. Theorem nat_total_order : forall n m, n <> m -> n < m \/ m < n. Proof. intros m n diff. elim (le_or_lt n m); [ intro H'0 | auto with arith ]. elim (le_lt_or_eq n m); auto with arith. intro H'; elim diff; auto with arith. Qed. (** * Comparison to 0 *) Theorem neq_0_lt : forall n, 0 <> n -> 0 < n. Proof. induction n; auto with arith. intros; absurd (0 = 0); trivial with arith. Qed. Hint Immediate neq_0_lt: arith v62. Theorem lt_0_neq : forall n, 0 < n -> 0 <> n. Proof. induction 1; auto with arith. Qed. Hint Immediate lt_0_neq: arith v62. (* begin hide *) Notation lt_O_Sn := lt_0_Sn (only parsing). Notation neq_O_lt := neq_0_lt (only parsing). Notation lt_O_neq := lt_0_neq (only parsing). Notation lt_n_O := lt_n_0 (only parsing). (* end hide *) coq-8.4pl2/theories/Arith/Mult.v0000640000175000001440000001533412010532755015636 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n = 0 \/ m = 0. Proof. destruct n as [| n]; simpl; intros m H. left; trivial. right; apply plus_is_O in H; destruct H; trivial. Qed. Lemma mult_is_one : forall n m, n * m = 1 -> n = 1 /\ m = 1. Proof. destruct n as [|n]; simpl; intros m H. edestruct O_S; eauto. destruct plus_is_one with (1:=H) as [[-> Hnm] | [-> Hnm]]. simpl in H; rewrite mult_0_r in H; elim (O_S _ H). rewrite mult_1_r in Hnm; auto. Qed. (** ** Multiplication and successor *) Lemma mult_succ_l : forall n m:nat, S n * m = n * m + m. Proof. intros; simpl. rewrite plus_comm. reflexivity. Qed. Lemma mult_succ_r : forall n m:nat, n * S m = n * m + n. Proof. induction n as [| p H]; intro m; simpl. reflexivity. rewrite H, <- plus_n_Sm; apply f_equal; rewrite plus_assoc; reflexivity. Qed. (** * Compatibility with orders *) Lemma mult_O_le : forall n m, m = 0 \/ n <= m * n. Proof. induction m; simpl; auto with arith. Qed. Hint Resolve mult_O_le: arith v62. Lemma mult_le_compat_l : forall n m p, n <= m -> p * n <= p * m. Proof. induction p as [| p IHp]; intros; simpl. apply le_n. auto using plus_le_compat. Qed. Hint Resolve mult_le_compat_l: arith. Lemma mult_le_compat_r : forall n m p, n <= m -> n * p <= m * p. Proof. intros m n p H; rewrite mult_comm, (mult_comm n); auto with arith. Qed. Lemma mult_le_compat : forall n m p (q:nat), n <= m -> p <= q -> n * p <= m * q. Proof. intros m n p q Hmn Hpq; induction Hmn. induction Hpq. (* m*p<=m*p *) apply le_n. (* m*p<=m*m0 -> m*p<=m*(S m0) *) rewrite <- mult_n_Sm; apply le_trans with (m * m0). assumption. apply le_plus_l. (* m*p<=m0*q -> m*p<=(S m0)*q *) simpl; apply le_trans with (m0 * q). assumption. apply le_plus_r. Qed. Lemma mult_S_lt_compat_l : forall n m p, m < p -> S n * m < S n * p. Proof. induction n; intros; simpl in *. rewrite <- 2 plus_n_O; assumption. auto using plus_lt_compat. Qed. Hint Resolve mult_S_lt_compat_l: arith. Lemma mult_lt_compat_l : forall n m p, n < m -> 0 < p -> p * n < p * m. Proof. intros m n p H Hp. destruct p. elim (lt_irrefl _ Hp). now apply mult_S_lt_compat_l. Qed. Lemma mult_lt_compat_r : forall n m p, n < m -> 0 < p -> n * p < m * p. Proof. intros m n p H Hp. destruct p. elim (lt_irrefl _ Hp). rewrite (mult_comm m), (mult_comm n). now apply mult_S_lt_compat_l. Qed. Lemma mult_S_le_reg_l : forall n m p, S n * m <= S n * p -> m <= p. Proof. intros m n p H; destruct (le_or_lt n p). trivial. assert (H1:S m * n < S m * n). apply le_lt_trans with (m := S m * p). assumption. apply mult_S_lt_compat_l. assumption. elim (lt_irrefl _ H1). Qed. (** * n|->2*n and n|->2n+1 have disjoint image *) Theorem odd_even_lem : forall p q, 2 * p + 1 <> 2 * q. Proof. induction p; destruct q. discriminate. simpl; rewrite plus_comm. discriminate. discriminate. intro H0; destruct (IHp q). replace (2 * q) with (2 * S q - 2). rewrite <- H0; simpl. repeat rewrite (fun x y => plus_comm x (S y)); simpl; auto. simpl; rewrite (fun y => plus_comm q (S y)); destruct q; simpl; auto. Qed. (** * Tail-recursive mult *) (** [tail_mult] is an alternative definition for [mult] which is tail-recursive, whereas [mult] is not. This can be useful when extracting programs. *) Fixpoint mult_acc (s:nat) m n : nat := match n with | O => s | S p => mult_acc (tail_plus m s) m p end. Lemma mult_acc_aux : forall n m p, m + n * p = mult_acc m p n. Proof. induction n as [| p IHp]; simpl; auto. intros s m; rewrite <- plus_tail_plus; rewrite <- IHp. rewrite <- plus_assoc_reverse; apply f_equal2; auto. rewrite plus_comm; auto. Qed. Definition tail_mult n m := mult_acc 0 m n. Lemma mult_tail_mult : forall n m, n * m = tail_mult n m. Proof. intros; unfold tail_mult; rewrite <- mult_acc_aux; auto. Qed. (** [TailSimpl] transforms any [tail_plus] and [tail_mult] into [plus] and [mult] and simplify *) Ltac tail_simpl := repeat rewrite <- plus_tail_plus; repeat rewrite <- mult_tail_mult; simpl. coq-8.4pl2/theories/Arith/Plus.v0000640000175000001440000001314511776416531015651 0ustar notinusers (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* m | S p => S (p + m) end where "n + m" := (plus n m) : nat_scope. >> *) Require Import Le. Require Import Lt. Local Open Scope nat_scope. Implicit Types m n p q : nat. (** * Zero is neutral Deprecated : Already in Init/Peano.v *) Notation plus_0_l := plus_O_n (only parsing). Definition plus_0_r n := eq_sym (plus_n_O n). (** * Commutativity *) Lemma plus_comm : forall n m, n + m = m + n. Proof. intros n m; elim n; simpl; auto with arith. intros y H; elim (plus_n_Sm m y); auto with arith. Qed. Hint Immediate plus_comm: arith v62. (** * Associativity *) Definition plus_Snm_nSm : forall n m, S n + m = n + S m:= plus_n_Sm. Lemma plus_assoc : forall n m p, n + (m + p) = n + m + p. Proof. intros n m p; elim n; simpl; auto with arith. Qed. Hint Resolve plus_assoc: arith v62. Lemma plus_permute : forall n m p, n + (m + p) = m + (n + p). Proof. intros; rewrite (plus_assoc m n p); rewrite (plus_comm m n); auto with arith. Qed. Lemma plus_assoc_reverse : forall n m p, n + m + p = n + (m + p). Proof. auto with arith. Qed. Hint Resolve plus_assoc_reverse: arith v62. (** * Simplification *) Lemma plus_reg_l : forall n m p, p + n = p + m -> n = m. Proof. intros m p n; induction n; simpl; auto with arith. Qed. Lemma plus_le_reg_l : forall n m p, p + n <= p + m -> n <= m. Proof. induction p; simpl; auto with arith. Qed. Lemma plus_lt_reg_l : forall n m p, p + n < p + m -> n < m. Proof. induction p; simpl; auto with arith. Qed. (** * Compatibility with order *) Lemma plus_le_compat_l : forall n m p, n <= m -> p + n <= p + m. Proof. induction p; simpl; auto with arith. Qed. Hint Resolve plus_le_compat_l: arith v62. Lemma plus_le_compat_r : forall n m p, n <= m -> n + p <= m + p. Proof. induction 1; simpl; auto with arith. Qed. Hint Resolve plus_le_compat_r: arith v62. Lemma le_plus_l : forall n m, n <= n + m. Proof. induction n; simpl; auto with arith. Qed. Hint Resolve le_plus_l: arith v62. Lemma le_plus_r : forall n m, m <= n + m. Proof. intros n m; elim n; simpl; auto with arith. Qed. Hint Resolve le_plus_r: arith v62. Theorem le_plus_trans : forall n m p, n <= m -> n <= m + p. Proof. intros; apply le_trans with (m := m); auto with arith. Qed. Hint Resolve le_plus_trans: arith v62. Theorem lt_plus_trans : forall n m p, n < m -> n < m + p. Proof. intros; apply lt_le_trans with (m := m); auto with arith. Qed. Hint Immediate lt_plus_trans: arith v62. Lemma plus_lt_compat_l : forall n m p, n < m -> p + n < p + m. Proof. induction p; simpl; auto with arith. Qed. Hint Resolve plus_lt_compat_l: arith v62. Lemma plus_lt_compat_r : forall n m p, n < m -> n + p < m + p. Proof. intros n m p H; rewrite (plus_comm n p); rewrite (plus_comm m p). elim p; auto with arith. Qed. Hint Resolve plus_lt_compat_r: arith v62. Lemma plus_le_compat : forall n m p q, n <= m -> p <= q -> n + p <= m + q. Proof. intros n m p q H H0. elim H; simpl; auto with arith. Qed. Lemma plus_le_lt_compat : forall n m p q, n <= m -> p < q -> n + p < m + q. Proof. unfold lt. intros. change (S n + p <= m + q). rewrite plus_Snm_nSm. apply plus_le_compat; assumption. Qed. Lemma plus_lt_le_compat : forall n m p q, n < m -> p <= q -> n + p < m + q. Proof. unfold lt. intros. change (S n + p <= m + q). apply plus_le_compat; assumption. Qed. Lemma plus_lt_compat : forall n m p q, n < m -> p < q -> n + p < m + q. Proof. intros. apply plus_lt_le_compat. assumption. apply lt_le_weak. assumption. Qed. (** * Inversion lemmas *) Lemma plus_is_O : forall n m, n + m = 0 -> n = 0 /\ m = 0. Proof. intro m; destruct m as [| n]; auto. intros. discriminate H. Qed. Definition plus_is_one : forall m n, m + n = 1 -> {m = 0 /\ n = 1} + {m = 1 /\ n = 0}. Proof. intro m; destruct m as [| n]; auto. destruct n; auto. intros. simpl in H. discriminate H. Defined. (** * Derived properties *) Lemma plus_permute_2_in_4 : forall n m p q, n + m + (p + q) = n + p + (m + q). Proof. intros m n p q. rewrite <- (plus_assoc m n (p + q)). rewrite (plus_assoc n p q). rewrite (plus_comm n p). rewrite <- (plus_assoc p n q). apply plus_assoc. Qed. (** * Tail-recursive plus *) (** [tail_plus] is an alternative definition for [plus] which is tail-recursive, whereas [plus] is not. This can be useful when extracting programs. *) Fixpoint tail_plus n m : nat := match n with | O => m | S n => tail_plus n (S m) end. Lemma plus_tail_plus : forall n m, n + m = tail_plus n m. induction n as [| n IHn]; simpl; auto. intro m; rewrite <- IHn; simpl; auto. Qed. (** * Discrimination *) Lemma succ_plus_discr : forall n m, n <> S (plus m n). Proof. intros n m; induction n as [|n IHn]. discriminate. intro H; apply IHn; apply eq_add_S; rewrite H; rewrite <- plus_n_Sm; reflexivity. Qed. Lemma n_SSn : forall n, n <> S (S n). Proof. intro n; exact (succ_plus_discr n 1). Qed. Lemma n_SSSn : forall n, n <> S (S (S n)). Proof. intro n; exact (succ_plus_discr n 2). Qed. Lemma n_SSSSn : forall n, n <> S (S (S (S n))). Proof. intro n; exact (succ_plus_discr n 3). Qed. coq-8.4pl2/theories/Arith/Min.v0000640000175000001440000000311412010532755015431 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n} + {n = m}. Lemma le_decide : forall n m, n <= m -> lt_or_eq n m. Proof le_lt_eq_dec. Lemma le_le_S_eq : forall n m, n <= m -> S n <= m \/ n = m. Proof le_lt_or_eq. (* By special request of G. Kahn - Used in Group Theory *) Lemma discrete_nat : forall n m, n < m -> S n = m \/ (exists r : nat, m = S (S (n + r))). Proof. intros m n H. lapply (lt_le_S m n); auto with arith. intro H'; lapply (le_lt_or_eq (S m) n); auto with arith. induction 1; auto with arith. right; exists (n - S (S m)); simpl. rewrite (plus_comm m (n - S (S m))). rewrite (plus_n_Sm (n - S (S m)) m). rewrite (plus_n_Sm (n - S (S m)) (S m)). rewrite (plus_comm (n - S (S m)) (S (S m))); auto with arith. Qed. Require Export Wf_nat. Require Export Min Max. coq-8.4pl2/theories/Arith/Between.v0000640000175000001440000001251312010532755016302 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop. Inductive between k : nat -> Prop := | bet_emp : between k k | bet_S : forall l, between k l -> P l -> between k (S l). Hint Constructors between: arith v62. Lemma bet_eq : forall k l, l = k -> between k l. Proof. induction 1; auto with arith. Qed. Hint Resolve bet_eq: arith v62. Lemma between_le : forall k l, between k l -> k <= l. Proof. induction 1; auto with arith. Qed. Hint Immediate between_le: arith v62. Lemma between_Sk_l : forall k l, between k l -> S k <= l -> between (S k) l. Proof. intros k l H; induction H as [|l H]. intros; absurd (S k <= k); auto with arith. destruct H; auto with arith. Qed. Hint Resolve between_Sk_l: arith v62. Lemma between_restr : forall k l (m:nat), k <= l -> l <= m -> between k m -> between l m. Proof. induction 1; auto with arith. Qed. Inductive exists_between k : nat -> Prop := | exists_S : forall l, exists_between k l -> exists_between k (S l) | exists_le : forall l, k <= l -> Q l -> exists_between k (S l). Hint Constructors exists_between: arith v62. Lemma exists_le_S : forall k l, exists_between k l -> S k <= l. Proof. induction 1; auto with arith. Qed. Lemma exists_lt : forall k l, exists_between k l -> k < l. Proof exists_le_S. Hint Immediate exists_le_S exists_lt: arith v62. Lemma exists_S_le : forall k l, exists_between k (S l) -> k <= l. Proof. intros; apply le_S_n; auto with arith. Qed. Hint Immediate exists_S_le: arith v62. Definition in_int p q r := p <= r /\ r < q. Lemma in_int_intro : forall p q r, p <= r -> r < q -> in_int p q r. Proof. red; auto with arith. Qed. Hint Resolve in_int_intro: arith v62. Lemma in_int_lt : forall p q r, in_int p q r -> p < q. Proof. induction 1; intros. apply le_lt_trans with r; auto with arith. Qed. Lemma in_int_p_Sq : forall p q r, in_int p (S q) r -> in_int p q r \/ r = q :>nat. Proof. induction 1; intros. elim (le_lt_or_eq r q); auto with arith. Qed. Lemma in_int_S : forall p q r, in_int p q r -> in_int p (S q) r. Proof. induction 1; auto with arith. Qed. Hint Resolve in_int_S: arith v62. Lemma in_int_Sp_q : forall p q r, in_int (S p) q r -> in_int p q r. Proof. induction 1; auto with arith. Qed. Hint Immediate in_int_Sp_q: arith v62. Lemma between_in_int : forall k l, between k l -> forall r, in_int k l r -> P r. Proof. induction 1; intros. absurd (k < k); auto with arith. apply in_int_lt with r; auto with arith. elim (in_int_p_Sq k l r); intros; auto with arith. rewrite H2; trivial with arith. Qed. Lemma in_int_between : forall k l, k <= l -> (forall r, in_int k l r -> P r) -> between k l. Proof. induction 1; auto with arith. Qed. Lemma exists_in_int : forall k l, exists_between k l -> exists2 m : nat, in_int k l m & Q m. Proof. induction 1. case IHexists_between; intros p inp Qp; exists p; auto with arith. exists l; auto with arith. Qed. Lemma in_int_exists : forall k l r, in_int k l r -> Q r -> exists_between k l. Proof. destruct 1; intros. elim H0; auto with arith. Qed. Lemma between_or_exists : forall k l, k <= l -> (forall n:nat, in_int k l n -> P n \/ Q n) -> between k l \/ exists_between k l. Proof. induction 1; intros; auto with arith. elim IHle; intro; auto with arith. elim (H0 m); auto with arith. Qed. Lemma between_not_exists : forall k l, between k l -> (forall n:nat, in_int k l n -> P n -> ~ Q n) -> ~ exists_between k l. Proof. induction 1; red; intros. absurd (k < k); auto with arith. absurd (Q l); auto with arith. elim (exists_in_int k (S l)); auto with arith; intros l' inl' Ql'. replace l with l'; auto with arith. elim inl'; intros. elim (le_lt_or_eq l' l); auto with arith; intros. absurd (exists_between k l); auto with arith. apply in_int_exists with l'; auto with arith. Qed. Inductive P_nth (init:nat) : nat -> nat -> Prop := | nth_O : P_nth init init 0 | nth_S : forall k l (n:nat), P_nth init k n -> between (S k) l -> Q l -> P_nth init l (S n). Lemma nth_le : forall (init:nat) l (n:nat), P_nth init l n -> init <= l. Proof. induction 1; intros; auto with arith. apply le_trans with (S k); auto with arith. Qed. Definition eventually (n:nat) := exists2 k : nat, k <= n & Q k. Lemma event_O : eventually 0 -> Q 0. Proof. induction 1; intros. replace 0 with x; auto with arith. Qed. End Between. Hint Resolve nth_O bet_S bet_emp bet_eq between_Sk_l exists_S exists_le in_int_S in_int_intro: arith v62. Hint Immediate in_int_Sp_q exists_le_S exists_S_le: arith v62. coq-8.4pl2/theories/Arith/Minus.v0000640000175000001440000001044112010532755016002 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n | S k, O => S k | S k, S l => k - l end where "n - m" := (minus n m) : nat_scope. >> *) Require Import Lt. Require Import Le. Local Open Scope nat_scope. Implicit Types m n p : nat. (** * 0 is right neutral *) Lemma minus_n_O : forall n, n = n - 0. Proof. induction n; simpl; auto with arith. Qed. Hint Resolve minus_n_O: arith v62. (** * Permutation with successor *) Lemma minus_Sn_m : forall n m, m <= n -> S (n - m) = S n - m. Proof. intros n m Le; pattern m, n; apply le_elim_rel; simpl; auto with arith. Qed. Hint Resolve minus_Sn_m: arith v62. Theorem pred_of_minus : forall n, pred n = n - 1. Proof. intro x; induction x; simpl; auto with arith. Qed. (** * Diagonal *) Lemma minus_diag : forall n, n - n = 0. Proof. induction n; simpl; auto with arith. Qed. Lemma minus_diag_reverse : forall n, 0 = n - n. Proof. auto using minus_diag. Qed. Hint Resolve minus_diag_reverse: arith v62. Notation minus_n_n := minus_diag_reverse. (** * Simplification *) Lemma minus_plus_simpl_l_reverse : forall n m p, n - m = p + n - (p + m). Proof. induction p; simpl; auto with arith. Qed. Hint Resolve minus_plus_simpl_l_reverse: arith v62. (** * Relation with plus *) Lemma plus_minus : forall n m p, n = m + p -> p = n - m. Proof. intros n m p; pattern m, n; apply nat_double_ind; simpl; intros. replace (n0 - 0) with n0; auto with arith. absurd (0 = S (n0 + p)); auto with arith. auto with arith. Qed. Hint Immediate plus_minus: arith v62. Lemma minus_plus : forall n m, n + m - n = m. symmetry ; auto with arith. Qed. Hint Resolve minus_plus: arith v62. Lemma le_plus_minus : forall n m, n <= m -> m = n + (m - n). Proof. intros n m Le; pattern n, m; apply le_elim_rel; simpl; auto with arith. Qed. Hint Resolve le_plus_minus: arith v62. Lemma le_plus_minus_r : forall n m, n <= m -> n + (m - n) = m. Proof. symmetry ; auto with arith. Qed. Hint Resolve le_plus_minus_r: arith v62. (** * Relation with order *) Theorem minus_le_compat_r : forall n m p : nat, n <= m -> n - p <= m - p. Proof. intros n m p; generalize n m; clear n m; induction p as [|p HI]. intros n m; rewrite <- (minus_n_O n); rewrite <- (minus_n_O m); trivial. intros n m Hnm; apply le_elim_rel with (n:=n) (m:=m); auto with arith. intros q r H _. simpl. auto using HI. Qed. Theorem minus_le_compat_l : forall n m p : nat, n <= m -> p - m <= p - n. Proof. intros n m p; generalize n m; clear n m; induction p as [|p HI]. trivial. intros n m Hnm; apply le_elim_rel with (n:=n) (m:=m); trivial. intros q; destruct q; auto with arith. simpl. apply le_trans with (m := p - 0); [apply HI | rewrite <- minus_n_O]; auto with arith. intros q r Hqr _. simpl. auto using HI. Qed. Corollary le_minus : forall n m, n - m <= n. Proof. intros n m; rewrite minus_n_O; auto using minus_le_compat_l with arith. Qed. Lemma lt_minus : forall n m, m <= n -> 0 < m -> n - m < n. Proof. intros n m Le; pattern m, n; apply le_elim_rel; simpl; auto using le_minus with arith. intros; absurd (0 < 0); auto with arith. Qed. Hint Resolve lt_minus: arith v62. Lemma lt_O_minus_lt : forall n m, 0 < n - m -> m < n. Proof. intros n m; pattern n, m; apply nat_double_ind; simpl; auto with arith. intros; absurd (0 < 0); trivial with arith. Qed. Hint Immediate lt_O_minus_lt: arith v62. Theorem not_le_minus_0 : forall n m, ~ m <= n -> n - m = 0. Proof. intros y x; pattern y, x; apply nat_double_ind; [ simpl; trivial with arith | intros n H; absurd (0 <= S n); [ assumption | apply le_O_n ] | simpl; intros n m H1 H2; apply H1; unfold not; intros H3; apply H2; apply le_n_S; assumption ]. Qed. coq-8.4pl2/theories/Arith/Bool_nat.v0000640000175000001440000000277312010532755016455 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = y} := fun n m => sumbool_not _ _ (le_lt_dec m n). Definition nat_lt_ge_bool x y := bool_of_sumbool (lt_ge_dec x y). Definition nat_ge_lt_bool x y := bool_of_sumbool (sumbool_not _ _ (lt_ge_dec x y)). Definition nat_le_gt_bool x y := bool_of_sumbool (le_gt_dec x y). Definition nat_gt_le_bool x y := bool_of_sumbool (sumbool_not _ _ (le_gt_dec x y)). Definition nat_eq_bool x y := bool_of_sumbool (eq_nat_dec x y). Definition nat_noteq_bool x y := bool_of_sumbool (sumbool_not _ _ (eq_nat_dec x y)). Definition zerop_bool x := bool_of_sumbool (zerop x). Definition notzerop_bool x := bool_of_sumbool (notzerop x). coq-8.4pl2/theories/Arith/Even.v0000640000175000001440000001702012010532755015604 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | even_O : even 0 | even_S : forall n, odd n -> even (S n) with odd : nat -> Prop := odd_S : forall n, even n -> odd (S n). Hint Constructors even: arith. Hint Constructors odd: arith. Lemma even_or_odd : forall n, even n \/ odd n. Proof. induction n. auto with arith. elim IHn; auto with arith. Qed. Lemma even_odd_dec : forall n, {even n} + {odd n}. Proof. induction n. auto with arith. elim IHn; auto with arith. Defined. Lemma not_even_and_odd : forall n, even n -> odd n -> False. Proof. induction n. intros even_0 odd_0. inversion odd_0. intros even_Sn odd_Sn. inversion even_Sn. inversion odd_Sn. auto with arith. Qed. (** * Facts about [even] & [odd] wrt. [plus] *) Lemma even_plus_split : forall n m, (even (n + m) -> even n /\ even m \/ odd n /\ odd m) with odd_plus_split : forall n m, odd (n + m) -> odd n /\ even m \/ even n /\ odd m. Proof. intros. clear even_plus_split. destruct n; simpl in *. auto with arith. inversion_clear H; apply odd_plus_split in H0 as [(H0,?)|(H0,?)]; auto with arith. intros. clear odd_plus_split. destruct n; simpl in *. auto with arith. inversion_clear H; apply even_plus_split in H0 as [(H0,?)|(H0,?)]; auto with arith. Qed. Lemma even_even_plus : forall n m, even n -> even m -> even (n + m) with odd_plus_l : forall n m, odd n -> even m -> odd (n + m). Proof. intros n m [|] ?. trivial. apply even_S, odd_plus_l; trivial. intros n m [] ?. apply odd_S, even_even_plus; trivial. Qed. Lemma odd_plus_r : forall n m, even n -> odd m -> odd (n + m) with odd_even_plus : forall n m, odd n -> odd m -> even (n + m). Proof. intros n m [|] ?. trivial. apply odd_S, odd_even_plus; trivial. intros n m [] ?. apply even_S, odd_plus_r; trivial. Qed. Lemma even_plus_aux : forall n m, (odd (n + m) <-> odd n /\ even m \/ even n /\ odd m) /\ (even (n + m) <-> even n /\ even m \/ odd n /\ odd m). Proof. split; split; auto using odd_plus_split, even_plus_split. intros [[]|[]]; auto using odd_plus_r, odd_plus_l. intros [[]|[]]; auto using even_even_plus, odd_even_plus. Qed. Lemma even_plus_even_inv_r : forall n m, even (n + m) -> even n -> even m. Proof. intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd n); auto. Qed. Lemma even_plus_even_inv_l : forall n m, even (n + m) -> even m -> even n. Proof. intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd m); auto. Qed. Lemma even_plus_odd_inv_r : forall n m, even (n + m) -> odd n -> odd m. Proof. intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd n); auto. Qed. Lemma even_plus_odd_inv_l : forall n m, even (n + m) -> odd m -> odd n. Proof. intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd m); auto. Qed. Hint Resolve even_even_plus odd_even_plus: arith. Lemma odd_plus_even_inv_l : forall n m, odd (n + m) -> odd m -> even n. Proof. intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd m); auto. Qed. Lemma odd_plus_even_inv_r : forall n m, odd (n + m) -> odd n -> even m. Proof. intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd n); auto. Qed. Lemma odd_plus_odd_inv_l : forall n m, odd (n + m) -> even m -> odd n. Proof. intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd m); auto. Qed. Lemma odd_plus_odd_inv_r : forall n m, odd (n + m) -> even n -> odd m. Proof. intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto. intro; destruct (not_even_and_odd n); auto. Qed. Hint Resolve odd_plus_l odd_plus_r: arith. (** * Facts about [even] and [odd] wrt. [mult] *) Lemma even_mult_aux : forall n m, (odd (n * m) <-> odd n /\ odd m) /\ (even (n * m) <-> even n \/ even m). Proof. intros n; elim n; simpl; auto with arith. intros m; split; split; auto with arith. intros H'; inversion H'. intros H'; elim H'; auto. intros n0 H' m; split; split; auto with arith. intros H'0. elim (even_plus_aux m (n0 * m)); intros H'3 H'4; case H'3; intros H'1 H'2; case H'1; auto. intros H'5; elim H'5; intros H'6 H'7; auto with arith. split; auto with arith. case (H' m). intros H'8 H'9; case H'9. intros H'10; case H'10; auto with arith. intros H'11 H'12; case (not_even_and_odd m); auto with arith. intros H'5; elim H'5; intros H'6 H'7; case (not_even_and_odd (n0 * m)); auto. case (H' m). intros H'8 H'9; case H'9; auto. intros H'0; elim H'0; intros H'1 H'2; clear H'0. elim (even_plus_aux m (n0 * m)); auto. intros H'0 H'3. elim H'0. intros H'4 H'5; apply H'5; auto. left; split; auto with arith. case (H' m). intros H'6 H'7; elim H'7. intros H'8 H'9; apply H'9. left. inversion H'1; auto. intros H'0. elim (even_plus_aux m (n0 * m)); intros H'3 H'4; case H'4. intros H'1 H'2. elim H'1; auto. intros H; case H; auto. intros H'5; elim H'5; intros H'6 H'7; auto with arith. left. case (H' m). intros H'8; elim H'8. intros H'9; elim H'9; auto with arith. intros H'0; elim H'0; intros H'1. case (even_or_odd m); intros H'2. apply even_even_plus; auto. case (H' m). intros H H0; case H0; auto. apply odd_even_plus; auto. inversion H'1; case (H' m); auto. intros H1; case H1; auto. apply even_even_plus; auto. case (H' m). intros H H0; case H0; auto. Qed. Lemma even_mult_l : forall n m, even n -> even (n * m). Proof. intros n m; case (even_mult_aux n m); auto. intros H H0; case H0; auto. Qed. Lemma even_mult_r : forall n m, even m -> even (n * m). Proof. intros n m; case (even_mult_aux n m); auto. intros H H0; case H0; auto. Qed. Hint Resolve even_mult_l even_mult_r: arith. Lemma even_mult_inv_r : forall n m, even (n * m) -> odd n -> even m. Proof. intros n m H' H'0. case (even_mult_aux n m). intros H'1 H'2; elim H'2. intros H'3; elim H'3; auto. intros H; case (not_even_and_odd n); auto. Qed. Lemma even_mult_inv_l : forall n m, even (n * m) -> odd m -> even n. Proof. intros n m H' H'0. case (even_mult_aux n m). intros H'1 H'2; elim H'2. intros H'3; elim H'3; auto. intros H; case (not_even_and_odd m); auto. Qed. Lemma odd_mult : forall n m, odd n -> odd m -> odd (n * m). Proof. intros n m; case (even_mult_aux n m); intros H; case H; auto. Qed. Hint Resolve even_mult_l even_mult_r odd_mult: arith. Lemma odd_mult_inv_l : forall n m, odd (n * m) -> odd n. Proof. intros n m H'. case (even_mult_aux n m). intros H'1 H'2; elim H'1. intros H'3; elim H'3; auto. Qed. Lemma odd_mult_inv_r : forall n m, odd (n * m) -> odd m. Proof. intros n m H'. case (even_mult_aux n m). intros H'1 H'2; elim H'1. intros H'3; elim H'3; auto. Qed. coq-8.4pl2/theories/Arith/Le.v0000640000175000001440000000623512010532755015255 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | le_n : n <= n | le_S : forall m:nat, n <= m -> n <= S m where "n <= m" := (le n m) : nat_scope. >> *) Local Open Scope nat_scope. Implicit Types m n p : nat. (** * [le] is a pre-order *) (** Reflexivity *) Theorem le_refl : forall n, n <= n. Proof. exact le_n. Qed. (** Transitivity *) Theorem le_trans : forall n m p, n <= m -> m <= p -> n <= p. Proof. induction 2; auto. Qed. Hint Resolve le_trans: arith v62. (** * Properties of [le] w.r.t. successor, predecessor and 0 *) (** Comparison to 0 *) Theorem le_0_n : forall n, 0 <= n. Proof. induction n; auto. Qed. Theorem le_Sn_0 : forall n, ~ S n <= 0. Proof. red; intros n H. change (IsSucc 0); elim H; simpl; auto with arith. Qed. Hint Resolve le_0_n le_Sn_0: arith v62. Theorem le_n_0_eq : forall n, n <= 0 -> 0 = n. Proof. induction n; auto with arith. intro; contradiction le_Sn_0 with n. Qed. Hint Immediate le_n_0_eq: arith v62. (** [le] and successor *) Theorem le_n_S : forall n m, n <= m -> S n <= S m. Proof. induction 1; auto. Qed. Theorem le_n_Sn : forall n, n <= S n. Proof. auto. Qed. Hint Resolve le_n_S le_n_Sn : arith v62. Theorem le_Sn_le : forall n m, S n <= m -> n <= m. Proof. intros n m H; apply le_trans with (S n); auto with arith. Qed. Hint Immediate le_Sn_le: arith v62. Theorem le_S_n : forall n m, S n <= S m -> n <= m. Proof. exact Peano.le_S_n. Qed. Hint Immediate le_S_n: arith v62. Theorem le_Sn_n : forall n, ~ S n <= n. Proof. induction n; auto with arith. Qed. Hint Resolve le_Sn_n: arith v62. (** [le] and predecessor *) Theorem le_pred_n : forall n, pred n <= n. Proof. induction n; auto with arith. Qed. Hint Resolve le_pred_n: arith v62. Theorem le_pred : forall n m, n <= m -> pred n <= pred m. Proof. exact Peano.le_pred. Qed. (** * [le] is a order on [nat] *) (** Antisymmetry *) Theorem le_antisym : forall n m, n <= m -> m <= n -> n = m. Proof. intros n m H; destruct H as [|m' H]; auto with arith. intros H1. absurd (S m' <= m'); auto with arith. apply le_trans with n; auto with arith. Qed. Hint Immediate le_antisym: arith v62. (** * A different elimination principle for the order on natural numbers *) Lemma le_elim_rel : forall P:nat -> nat -> Prop, (forall p, P 0 p) -> (forall p (q:nat), p <= q -> P p q -> P (S p) (S q)) -> forall n m, n <= m -> P n m. Proof. induction n; auto with arith. intros m Le. elim Le; auto with arith. Qed. (* begin hide *) Notation le_O_n := le_0_n (only parsing). Notation le_Sn_O := le_Sn_0 (only parsing). Notation le_n_O_eq := le_n_0_eq (only parsing). (* end hide *) coq-8.4pl2/theories/Arith/Compare_dec.v0000640000175000001440000001652212010532755017116 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n} + {n = m} + {n > m}. Proof. intros; apply lt_eq_lt_dec; assumption. Defined. Definition le_lt_dec n m : {n <= m} + {m < n}. Proof. induction n in m |- *. auto with arith. destruct m. auto with arith. elim (IHn m); auto with arith. Defined. Definition le_le_S_dec n m : {n <= m} + {S m <= n}. Proof. intros; exact (le_lt_dec n m). Defined. Definition le_ge_dec n m : {n <= m} + {n >= m}. Proof. intros; elim (le_lt_dec n m); auto with arith. Defined. Definition le_gt_dec n m : {n <= m} + {n > m}. Proof. intros; exact (le_lt_dec n m). Defined. Definition le_lt_eq_dec n m : n <= m -> {n < m} + {n = m}. Proof. intros; destruct (lt_eq_lt_dec n m); auto with arith. intros; absurd (m < n); auto with arith. Defined. Theorem le_dec : forall n m, {n <= m} + {~ n <= m}. Proof. intros n m. destruct (le_gt_dec n m). auto with arith. right. apply gt_not_le. assumption. Defined. Theorem lt_dec : forall n m, {n < m} + {~ n < m}. Proof. intros; apply le_dec. Defined. Theorem gt_dec : forall n m, {n > m} + {~ n > m}. Proof. intros; apply lt_dec. Defined. Theorem ge_dec : forall n m, {n >= m} + {~ n >= m}. Proof. intros; apply le_dec. Defined. (** Proofs of decidability *) Theorem dec_le : forall n m, decidable (n <= m). Proof. intros n m; destruct (le_dec n m); unfold decidable; auto. Qed. Theorem dec_lt : forall n m, decidable (n < m). Proof. intros; apply dec_le. Qed. Theorem dec_gt : forall n m, decidable (n > m). Proof. intros; apply dec_lt. Qed. Theorem dec_ge : forall n m, decidable (n >= m). Proof. intros; apply dec_le. Qed. Theorem not_eq : forall n m, n <> m -> n < m \/ m < n. Proof. intros x y H; elim (lt_eq_lt_dec x y); [ intros H1; elim H1; [ auto with arith | intros H2; absurd (x = y); assumption ] | auto with arith ]. Qed. Theorem not_le : forall n m, ~ n <= m -> n > m. Proof. intros x y H; elim (le_gt_dec x y); [ intros H1; absurd (x <= y); assumption | trivial with arith ]. Qed. Theorem not_gt : forall n m, ~ n > m -> n <= m. Proof. intros x y H; elim (le_gt_dec x y); [ trivial with arith | intros H1; absurd (x > y); assumption ]. Qed. Theorem not_ge : forall n m, ~ n >= m -> n < m. Proof. intros x y H; exact (not_le y x H). Qed. Theorem not_lt : forall n m, ~ n < m -> n >= m. Proof. intros x y H; exact (not_gt y x H). Qed. (** A ternary comparison function in the spirit of [Z.compare]. *) Fixpoint nat_compare n m := match n, m with | O, O => Eq | O, S _ => Lt | S _, O => Gt | S n', S m' => nat_compare n' m' end. Lemma nat_compare_S : forall n m, nat_compare (S n) (S m) = nat_compare n m. Proof. reflexivity. Qed. Lemma nat_compare_eq_iff : forall n m, nat_compare n m = Eq <-> n = m. Proof. induction n; destruct m; simpl; split; auto; try discriminate; destruct (IHn m); auto. Qed. Lemma nat_compare_eq : forall n m, nat_compare n m = Eq -> n = m. Proof. intros; apply -> nat_compare_eq_iff; auto. Qed. Lemma nat_compare_lt : forall n m, n nat_compare n m = Lt. Proof. induction n; destruct m; simpl; split; auto with arith; try solve [inversion 1]. destruct (IHn m); auto with arith. destruct (IHn m); auto with arith. Qed. Lemma nat_compare_gt : forall n m, n>m <-> nat_compare n m = Gt. Proof. induction n; destruct m; simpl; split; auto with arith; try solve [inversion 1]. destruct (IHn m); auto with arith. destruct (IHn m); auto with arith. Qed. Lemma nat_compare_le : forall n m, n<=m <-> nat_compare n m <> Gt. Proof. split. intros LE; contradict LE. apply lt_not_le. apply <- nat_compare_gt; auto. intros NGT. apply not_lt. contradict NGT. apply -> nat_compare_gt; auto. Qed. Lemma nat_compare_ge : forall n m, n>=m <-> nat_compare n m <> Lt. Proof. split. intros GE; contradict GE. apply lt_not_le. apply <- nat_compare_lt; auto. intros NLT. apply not_lt. contradict NLT. apply -> nat_compare_lt; auto. Qed. Lemma nat_compare_spec : forall x y, CompareSpec (x=y) (x n n>m. Proof. intros; apply <- nat_compare_gt; auto. Qed. (** A previous definition of [nat_compare] in terms of [lt_eq_lt_dec]. The new version avoids the creation of proof parts. *) Definition nat_compare_alt (n m:nat) := match lt_eq_lt_dec n m with | inleft (left _) => Lt | inleft (right _) => Eq | inright _ => Gt end. Lemma nat_compare_equiv: forall n m, nat_compare n m = nat_compare_alt n m. Proof. intros; unfold nat_compare_alt; destruct lt_eq_lt_dec as [[LT|EQ]|GT]. apply -> nat_compare_lt; auto. apply <- nat_compare_eq_iff; auto. apply -> nat_compare_gt; auto. Qed. (** A boolean version of [le] over [nat]. *) Fixpoint leb (m:nat) : nat -> bool := match m with | O => fun _:nat => true | S m' => fun n:nat => match n with | O => false | S n' => leb m' n' end end. Lemma leb_correct : forall m n, m <= n -> leb m n = true. Proof. induction m as [| m IHm]. trivial. destruct n. intro H. elim (le_Sn_O _ H). intros. simpl. apply IHm. apply le_S_n. assumption. Qed. Lemma leb_complete : forall m n, leb m n = true -> m <= n. Proof. induction m. trivial with arith. destruct n. intro H. discriminate H. auto with arith. Qed. Lemma leb_iff : forall m n, leb m n = true <-> m <= n. Proof. split; auto using leb_correct, leb_complete. Qed. Lemma leb_correct_conv : forall m n, m < n -> leb n m = false. Proof. intros. generalize (leb_complete n m). destruct (leb n m); auto. intros; elim (lt_not_le m n); auto. Qed. Lemma leb_complete_conv : forall m n, leb n m = false -> m < n. Proof. intros m n EQ. apply not_le. intro LE. apply leb_correct in LE. rewrite LE in EQ; discriminate. Qed. Lemma leb_iff_conv : forall m n, leb n m = false <-> m < n. Proof. split; auto using leb_complete_conv, leb_correct_conv. Qed. Lemma leb_compare : forall n m, leb n m = true <-> nat_compare n m <> Gt. Proof. split; intros. apply -> nat_compare_le. auto using leb_complete. apply leb_correct. apply <- nat_compare_le; auto. Qed. coq-8.4pl2/theories/Arith/Arith_base.v0000640000175000001440000000143412010532755016752 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1 | S n => S n * fact n end. Arguments fact n%nat. Lemma lt_O_fact : forall n:nat, 0 < fact n. Proof. simple induction n; unfold lt; simpl; auto with arith. Qed. Lemma fact_neq_0 : forall n:nat, fact n <> 0. Proof. intro. apply not_eq_sym. apply lt_O_neq. apply lt_O_fact. Qed. Lemma fact_le : forall n m:nat, n <= m -> fact n <= fact m. Proof. induction 1. apply le_n. assert (1 * fact n <= S m * fact m). apply mult_le_compat. apply lt_le_S; apply lt_O_Sn. assumption. simpl (1 * fact n) in H0. rewrite <- plus_n_O in H0. assumption. Qed. coq-8.4pl2/theories/Arith/Div2.v0000640000175000001440000001210512010532755015512 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | S O => 0 | S (S n') => S (div2 n') end. (** Since [div2] is recursively defined on [0], [1] and [(S (S n))], it is useful to prove the corresponding induction principle *) Lemma ind_0_1_SS : forall P:nat -> Prop, P 0 -> P 1 -> (forall n, P n -> P (S (S n))) -> forall n, P n. Proof. intros P H0 H1 Hn. cut (forall n, P n /\ P (S n)). intros H'n n. elim (H'n n). auto with arith. induction n. auto with arith. intros. elim IHn; auto with arith. Qed. (** [0 n/2 < n] *) Lemma lt_div2 : forall n, 0 < n -> div2 n < n. Proof. intro n. pattern n. apply ind_0_1_SS. (* n = 0 *) inversion 1. (* n=1 *) simpl; trivial. (* n=S S n' *) intro n'; case (zerop n'). intro n'_eq_0. rewrite n'_eq_0. auto with arith. auto with arith. Qed. Hint Resolve lt_div2: arith. (** Properties related to the parity *) Lemma even_div2 : forall n, even n -> div2 n = div2 (S n) with odd_div2 : forall n, odd n -> S (div2 n) = div2 (S n). Proof. destruct n; intro H. (* 0 *) trivial. (* S n *) inversion_clear H. apply odd_div2 in H0 as <-. trivial. destruct n; intro. (* 0 *) inversion H. (* S n *) inversion_clear H. apply even_div2 in H0 as <-. trivial. Qed. Lemma div2_even n : div2 n = div2 (S n) -> even n with div2_odd n : S (div2 n) = div2 (S n) -> odd n. Proof. { destruct n; intro H. - constructor. - constructor. apply div2_odd. rewrite H. trivial. } { destruct n; intro H. - discriminate. - constructor. apply div2_even. injection H as <-. trivial. } Qed. Hint Resolve even_div2 div2_even odd_div2 div2_odd: arith. Lemma even_odd_div2 n : (even n <-> div2 n = div2 (S n)) /\ (odd n <-> S (div2 n) = div2 (S n)). Proof. split; split; auto using div2_odd, div2_even, odd_div2, even_div2. Qed. (** Properties related to the double ([2n]) *) Definition double n := n + n. Hint Unfold double: arith. Lemma double_S : forall n, double (S n) = S (S (double n)). Proof. intro. unfold double. simpl. auto with arith. Qed. Lemma double_plus : forall n (m:nat), double (n + m) = double n + double m. Proof. intros m n. unfold double. do 2 rewrite plus_assoc_reverse. rewrite (plus_permute n). reflexivity. Qed. Hint Resolve double_S: arith. Lemma even_odd_double : forall n, (even n <-> n = double (div2 n)) /\ (odd n <-> n = S (double (div2 n))). Proof. intro n. pattern n. apply ind_0_1_SS. (* n = 0 *) split; split; auto with arith. intro H. inversion H. (* n = 1 *) split; split; auto with arith. intro H. inversion H. inversion H1. (* n = (S (S n')) *) intros. destruct H as ((IH1,IH2),(IH3,IH4)). split; split. intro H. inversion H. inversion H1. simpl. rewrite (double_S (div2 n0)). auto with arith. simpl. rewrite (double_S (div2 n0)). intro H. injection H. auto with arith. intro H. inversion H. inversion H1. simpl. rewrite (double_S (div2 n0)). auto with arith. simpl. rewrite (double_S (div2 n0)). intro H. injection H. auto with arith. Qed. (** Specializations *) Lemma even_double : forall n, even n -> n = double (div2 n). Proof fun n => proj1 (proj1 (even_odd_double n)). Lemma double_even : forall n, n = double (div2 n) -> even n. Proof fun n => proj2 (proj1 (even_odd_double n)). Lemma odd_double : forall n, odd n -> n = S (double (div2 n)). Proof fun n => proj1 (proj2 (even_odd_double n)). Lemma double_odd : forall n, n = S (double (div2 n)) -> odd n. Proof fun n => proj2 (proj2 (even_odd_double n)). Hint Resolve even_double double_even odd_double double_odd: arith. (** Application: - if [n] is even then there is a [p] such that [n = 2p] - if [n] is odd then there is a [p] such that [n = 2p+1] (Immediate: it is [n/2]) *) Lemma even_2n : forall n, even n -> {p : nat | n = double p}. Proof. intros n H. exists (div2 n). auto with arith. Defined. Lemma odd_S2n : forall n, odd n -> {p : nat | n = S (double p)}. Proof. intros n H. exists (div2 n). auto with arith. Defined. (** Doubling before dividing by two brings back to the initial number. *) Lemma div2_double : forall n:nat, div2 (2*n) = n. Proof. induction n. simpl; auto. simpl. replace (n+S(n+0)) with (S (2*n)). f_equal; auto. simpl; auto with arith. Qed. Lemma div2_double_plus_one : forall n:nat, div2 (S (2*n)) = n. Proof. induction n. simpl; auto. simpl. replace (n+S(n+0)) with (S (2*n)). f_equal; auto. simpl; auto with arith. Qed. coq-8.4pl2/theories/Arith/Euclid.v0000640000175000001440000000401712010532755016116 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* r -> a = q * b + r -> diveucl a b. Lemma eucl_dev : forall n, n > 0 -> forall m:nat, diveucl m n. Proof. intros b H a; pattern a; apply gt_wf_rec; intros n H0. elim (le_gt_dec b n). intro lebn. elim (H0 (n - b)); auto with arith. intros q r g e. apply divex with (S q) r; simpl; auto with arith. elim plus_assoc. elim e; auto with arith. intros gtbn. apply divex with 0 n; simpl; auto with arith. Defined. Lemma quotient : forall n, n > 0 -> forall m:nat, {q : nat | exists r : nat, m = q * n + r /\ n > r}. Proof. intros b H a; pattern a; apply gt_wf_rec; intros n H0. elim (le_gt_dec b n). intro lebn. elim (H0 (n - b)); auto with arith. intros q Hq; exists (S q). elim Hq; intros r Hr. exists r; simpl; elim Hr; intros. elim plus_assoc. elim H1; auto with arith. intros gtbn. exists 0; exists n; simpl; auto with arith. Defined. Lemma modulo : forall n, n > 0 -> forall m:nat, {r : nat | exists q : nat, m = q * n + r /\ n > r}. Proof. intros b H a; pattern a; apply gt_wf_rec; intros n H0. elim (le_gt_dec b n). intro lebn. elim (H0 (n - b)); auto with arith. intros r Hr; exists r. elim Hr; intros q Hq. elim Hq; intros; exists (S q); simpl. elim plus_assoc. elim H1; auto with arith. intros gtbn. exists n; exists 0; simpl; auto with arith. Defined. coq-8.4pl2/theories/Arith/Arith.v0000640000175000001440000000110312010532755015751 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* True | O, S _ => False | S _, O => False | S n1, S m1 => eq_nat n1 m1 end. Theorem eq_nat_refl : forall n, eq_nat n n. induction n; simpl; auto. Qed. Hint Resolve eq_nat_refl: arith v62. (** [eq] restricted to [nat] and [eq_nat] are equivalent *) Lemma eq_eq_nat : forall n m, n = m -> eq_nat n m. induction 1; trivial with arith. Qed. Hint Immediate eq_eq_nat: arith v62. Lemma eq_nat_eq : forall n m, eq_nat n m -> n = m. induction n; induction m; simpl; contradiction || auto with arith. Qed. Hint Immediate eq_nat_eq: arith v62. Theorem eq_nat_is_eq : forall n m, eq_nat n m <-> n = m. Proof. split; auto with arith. Qed. Theorem eq_nat_elim : forall n (P:nat -> Prop), P n -> forall m, eq_nat n m -> P m. Proof. intros; replace m with n; auto with arith. Qed. Theorem eq_nat_decide : forall n m, {eq_nat n m} + {~ eq_nat n m}. Proof. induction n. destruct m as [| n]. auto with arith. intros; right; red; trivial with arith. destruct m as [| n0]. right; red; auto with arith. intros. simpl. apply IHn. Defined. (** * Boolean equality on [nat] *) Fixpoint beq_nat n m : bool := match n, m with | O, O => true | O, S _ => false | S _, O => false | S n1, S m1 => beq_nat n1 m1 end. Lemma beq_nat_refl : forall n, true = beq_nat n n. Proof. intro x; induction x; simpl; auto. Qed. Definition beq_nat_eq : forall x y, true = beq_nat x y -> x = y. Proof. double induction x y; simpl. reflexivity. intros n H1 H2. discriminate H2. intros n H1 H2. discriminate H2. intros n H1 z H2 H3. case (H2 _ H3). reflexivity. Defined. Lemma beq_nat_true : forall x y, beq_nat x y = true -> x=y. Proof. induction x; destruct y; simpl; auto; intros; discriminate. Qed. Lemma beq_nat_false : forall x y, beq_nat x y = false -> x<>y. Proof. induction x; destruct y; simpl; auto; intros; discriminate. Qed. Lemma beq_nat_true_iff : forall x y, beq_nat x y = true <-> x=y. Proof. split. apply beq_nat_true. intros; subst; symmetry; apply beq_nat_refl. Qed. Lemma beq_nat_false_iff : forall x y, beq_nat x y = false <-> x<>y. Proof. intros x y. split. apply beq_nat_false. generalize (beq_nat_true_iff x y). destruct beq_nat; auto. intros IFF NEQ. elim NEQ. apply IFF; auto. Qed. coq-8.4pl2/theories/Arith/Gt.v0000640000175000001440000000700212010532755015260 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* > *) Require Import Le. Require Import Lt. Require Import Plus. Local Open Scope nat_scope. Implicit Types m n p : nat. (** * Order and successor *) Theorem gt_Sn_O : forall n, S n > 0. Proof. auto with arith. Qed. Hint Resolve gt_Sn_O: arith v62. Theorem gt_Sn_n : forall n, S n > n. Proof. auto with arith. Qed. Hint Resolve gt_Sn_n: arith v62. Theorem gt_n_S : forall n m, n > m -> S n > S m. Proof. auto with arith. Qed. Hint Resolve gt_n_S: arith v62. Lemma gt_S_n : forall n m, S m > S n -> m > n. Proof. auto with arith. Qed. Hint Immediate gt_S_n: arith v62. Theorem gt_S : forall n m, S n > m -> n > m \/ m = n. Proof. intros n m H; unfold gt; apply le_lt_or_eq; auto with arith. Qed. Lemma gt_pred : forall n m, m > S n -> pred m > n. Proof. auto with arith. Qed. Hint Immediate gt_pred: arith v62. (** * Irreflexivity *) Lemma gt_irrefl : forall n, ~ n > n. Proof lt_irrefl. Hint Resolve gt_irrefl: arith v62. (** * Asymmetry *) Lemma gt_asym : forall n m, n > m -> ~ m > n. Proof fun n m => lt_asym m n. Hint Resolve gt_asym: arith v62. (** * Relating strict and large orders *) Lemma le_not_gt : forall n m, n <= m -> ~ n > m. Proof le_not_lt. Hint Resolve le_not_gt: arith v62. Lemma gt_not_le : forall n m, n > m -> ~ n <= m. Proof. auto with arith. Qed. Hint Resolve gt_not_le: arith v62. Theorem le_S_gt : forall n m, S n <= m -> m > n. Proof. auto with arith. Qed. Hint Immediate le_S_gt: arith v62. Lemma gt_S_le : forall n m, S m > n -> n <= m. Proof. intros n p; exact (lt_n_Sm_le n p). Qed. Hint Immediate gt_S_le: arith v62. Lemma gt_le_S : forall n m, m > n -> S n <= m. Proof. auto with arith. Qed. Hint Resolve gt_le_S: arith v62. Lemma le_gt_S : forall n m, n <= m -> S m > n. Proof. auto with arith. Qed. Hint Resolve le_gt_S: arith v62. (** * Transitivity *) Theorem le_gt_trans : forall n m p, m <= n -> m > p -> n > p. Proof. red; intros; apply lt_le_trans with m; auto with arith. Qed. Theorem gt_le_trans : forall n m p, n > m -> p <= m -> n > p. Proof. red; intros; apply le_lt_trans with m; auto with arith. Qed. Lemma gt_trans : forall n m p, n > m -> m > p -> n > p. Proof. red; intros n m p H1 H2. apply lt_trans with m; auto with arith. Qed. Theorem gt_trans_S : forall n m p, S n > m -> m > p -> n > p. Proof. red; intros; apply lt_le_trans with m; auto with arith. Qed. Hint Resolve gt_trans_S le_gt_trans gt_le_trans: arith v62. (** * Comparison to 0 *) Theorem gt_0_eq : forall n, n > 0 \/ 0 = n. Proof. intro n; apply gt_S; auto with arith. Qed. (** * Simplification and compatibility *) Lemma plus_gt_reg_l : forall n m p, p + n > p + m -> n > m. Proof. red; intros n m p H; apply plus_lt_reg_l with p; auto with arith. Qed. Lemma plus_gt_compat_l : forall n m p, n > m -> p + n > p + m. Proof. auto with arith. Qed. Hint Resolve plus_gt_compat_l: arith v62. (* begin hide *) Notation gt_O_eq := gt_0_eq (only parsing). (* end hide *) coq-8.4pl2/theories/Wellfounded/0000750000175000001440000000000012127276547015735 5ustar notinuserscoq-8.4pl2/theories/Wellfounded/Disjoint_Union.v0000640000175000001440000000302612010532755021044 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Variable leB : B -> B -> Prop. Notation Le_AsB := (le_AsB A B leA leB). Lemma acc_A_sum : forall x:A, Acc leA x -> Acc Le_AsB (inl B x). Proof. induction 1. apply Acc_intro; intros y H2. inversion_clear H2. auto with sets. Qed. Lemma acc_B_sum : well_founded leA -> forall x:B, Acc leB x -> Acc Le_AsB (inr A x). Proof. induction 2. apply Acc_intro; intros y H3. inversion_clear H3; auto with sets. apply acc_A_sum; auto with sets. Qed. Lemma wf_disjoint_sum : well_founded leA -> well_founded leB -> well_founded Le_AsB. Proof. intros. unfold well_founded. destruct a as [a| b]. apply (acc_A_sum a). apply (H a). apply (acc_B_sum H b). apply (H0 b). Qed. End Wf_Disjoint_Union. coq-8.4pl2/theories/Wellfounded/Well_Ordering.v0000640000175000001440000000362412010532755020651 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type. Inductive WO : Type := sup : forall (a:A) (f:B a -> WO), WO. Inductive le_WO : WO -> WO -> Prop := le_sup : forall (a:A) (f:B a -> WO) (v:B a), le_WO (f v) (sup a f). Theorem wf_WO : well_founded le_WO. Proof. unfold well_founded; intro. apply Acc_intro. elim a. intros. inversion H0. apply Acc_intro. generalize H4; generalize H1; generalize f0; generalize v. rewrite H3. intros. apply (H v0 y0). cut (f = f1). intros E; rewrite E; auto. symmetry . apply (inj_pair2 A (fun a0:A => B a0 -> WO) a0 f1 f H5). Qed. End WellOrdering. Section Characterisation_wf_relations. (** Wellfounded relations are the inverse image of wellordering types *) (* in course of development *) Variable A : Type. Variable leA : A -> A -> Prop. Definition B (a:A) := {x : A | leA x a}. Definition wof : well_founded leA -> A -> WO A B. Proof. intros. apply (well_founded_induction_type H (fun a:A => WO A B)); auto. intros x H1. apply (sup A B x). unfold B at 1. destruct 1 as [x0]. apply (H1 x0); auto. Qed. End Characterisation_wf_relations. coq-8.4pl2/theories/Wellfounded/Inverse_Image.v0000640000175000001440000000327312010532755020632 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* B -> Prop. Variable f : A -> B. Let Rof (x y:A) : Prop := R (f x) (f y). Remark Acc_lemma : forall y:B, Acc R y -> forall x:A, y = f x -> Acc Rof x. Proof. induction 1 as [y _ IHAcc]; intros x H. apply Acc_intro; intros y0 H1. apply (IHAcc (f y0)); try trivial. rewrite H; trivial. Qed. Lemma Acc_inverse_image : forall x:A, Acc R (f x) -> Acc Rof x. Proof. intros; apply (Acc_lemma (f x)); trivial. Qed. Theorem wf_inverse_image : well_founded R -> well_founded Rof. Proof. red; intros; apply Acc_inverse_image; auto. Qed. Variable F : A -> B -> Prop. Let RoF (x y:A) : Prop := exists2 b : B, F x b & (forall c:B, F y c -> R b c). Lemma Acc_inverse_rel : forall b:B, Acc R b -> forall x:A, F x b -> Acc RoF x. Proof. induction 1 as [x _ IHAcc]; intros x0 H2. constructor; intros y H3. destruct H3. apply (IHAcc x1); auto. Qed. Theorem wf_inverse_rel : well_founded R -> well_founded RoF. Proof. red; constructor; intros. case H0; intros. apply (Acc_inverse_rel x); auto. Qed. End Inverse_Image. coq-8.4pl2/theories/Wellfounded/vo.itarget0000640000175000001440000000025011307752066017732 0ustar notinusersDisjoint_Union.vo Inclusion.vo Inverse_Image.vo Lexicographic_Exponentiation.vo Lexicographic_Product.vo Transitive_Closure.vo Union.vo Wellfounded.vo Well_Ordering.vo coq-8.4pl2/theories/Wellfounded/Lexicographic_Product.v0000640000175000001440000001021512010532755022370 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Type. Variable leA : A -> A -> Prop. Variable leB : forall x:A, B x -> B x -> Prop. Notation LexProd := (lexprod A B leA leB). Lemma acc_A_B_lexprod : forall x:A, Acc leA x -> (forall x0:A, clos_trans A leA x0 x -> well_founded (leB x0)) -> forall y:B x, Acc (leB x) y -> Acc LexProd (existT B x y). Proof. induction 1 as [x _ IHAcc]; intros H2 y. induction 1 as [x0 H IHAcc0]; intros. apply Acc_intro. destruct y as [x2 y1]; intro H6. simple inversion H6; intro. cut (leA x2 x); intros. apply IHAcc; auto with sets. intros. apply H2. apply t_trans with x2; auto with sets. red in H2. apply H2. auto with sets. injection H1. destruct 2. injection H3. destruct 2; auto with sets. rewrite <- H1. injection H3; intros _ Hx1. subst x1. apply IHAcc0. elim inj_pair2 with A B x y' x0; assumption. Defined. Theorem wf_lexprod : well_founded leA -> (forall x:A, well_founded (leB x)) -> well_founded LexProd. Proof. intros wfA wfB; unfold well_founded. destruct a. apply acc_A_B_lexprod; auto with sets; intros. red in wfB. auto with sets. Defined. End WfLexicographic_Product. Section Wf_Symmetric_Product. Variable A : Type. Variable B : Type. Variable leA : A -> A -> Prop. Variable leB : B -> B -> Prop. Notation Symprod := (symprod A B leA leB). Lemma Acc_symprod : forall x:A, Acc leA x -> forall y:B, Acc leB y -> Acc Symprod (x, y). Proof. induction 1 as [x _ IHAcc]; intros y H2. induction H2 as [x1 H3 IHAcc1]. apply Acc_intro; intros y H5. inversion_clear H5; auto with sets. apply IHAcc; auto. apply Acc_intro; trivial. Defined. Lemma wf_symprod : well_founded leA -> well_founded leB -> well_founded Symprod. Proof. red. destruct a. apply Acc_symprod; auto with sets. Defined. End Wf_Symmetric_Product. Section Swap. Variable A : Type. Variable R : A -> A -> Prop. Notation SwapProd := (swapprod A R). Lemma swap_Acc : forall x y:A, Acc SwapProd (x, y) -> Acc SwapProd (y, x). Proof. intros. inversion_clear H. apply Acc_intro. destruct y0; intros. inversion_clear H; inversion_clear H1; apply H0. apply sp_swap. apply right_sym; auto with sets. apply sp_swap. apply left_sym; auto with sets. apply sp_noswap. apply right_sym; auto with sets. apply sp_noswap. apply left_sym; auto with sets. Defined. Lemma Acc_swapprod : forall x y:A, Acc R x -> Acc R y -> Acc SwapProd (x, y). Proof. induction 1 as [x0 _ IHAcc0]; intros H2. cut (forall y0:A, R y0 x0 -> Acc SwapProd (y0, y)). clear IHAcc0. induction H2 as [x1 _ IHAcc1]; intros H4. cut (forall y:A, R y x1 -> Acc SwapProd (x0, y)). clear IHAcc1. intro. apply Acc_intro. destruct y; intro H5. inversion_clear H5. inversion_clear H0; auto with sets. apply swap_Acc. inversion_clear H0; auto with sets. intros. apply IHAcc1; auto with sets; intros. apply Acc_inv with (y0, x1); auto with sets. apply sp_noswap. apply right_sym; auto with sets. auto with sets. Defined. Lemma wf_swapprod : well_founded R -> well_founded SwapProd. Proof. red. destruct a; intros. apply Acc_swapprod; auto with sets. Defined. End Swap. coq-8.4pl2/theories/Wellfounded/Inclusion.v0000640000175000001440000000174312010532755020060 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Lemma Acc_incl : inclusion A R1 R2 -> forall z:A, Acc R2 z -> Acc R1 z. Proof. induction 2. apply Acc_intro; auto with sets. Qed. Hint Resolve Acc_incl. Theorem wf_incl : inclusion A R1 R2 -> well_founded R2 -> well_founded R1. Proof. unfold well_founded; auto with sets. Qed. End WfInclusion. coq-8.4pl2/theories/Wellfounded/Transitive_Closure.v0000640000175000001440000000262712010532755021743 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Acc trans_clos x. induction 1 as [x0 _ H1]. apply Acc_intro. intros y H2. induction H2; auto with sets. apply Acc_inv with y; auto with sets. Defined. Hint Resolve Acc_clos_trans. Lemma Acc_inv_trans : forall x y:A, trans_clos y x -> Acc R x -> Acc R y. Proof. induction 1 as [| x y]; auto with sets. intro; apply Acc_inv with y; assumption. Qed. Theorem wf_clos_trans : well_founded R -> well_founded trans_clos. Proof. unfold well_founded; auto with sets. Defined. End Wf_Transitive_Closure. coq-8.4pl2/theories/Wellfounded/Lexicographic_Exponentiation.v0000640000175000001440000002320612010532755023760 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Notation Power := (Pow A leA). Notation Lex_Exp := (lex_exp A leA). Notation ltl := (Ltl A leA). Notation Descl := (Desc A leA). Notation List := (list A). Notation Nil := (nil (A:=A)). (* useless but symmetric *) Notation Cons := (cons (A:=A)). Notation "<< x , y >>" := (exist Descl x y) (at level 0, x, y at level 100). (* Hint Resolve d_one d_nil t_step. *) Lemma left_prefix : forall x y z:List, ltl (x ++ y) z -> ltl x z. Proof. simple induction x. simple induction z. simpl; intros H. inversion_clear H. simpl; intros; apply (Lt_nil A leA). intros a l HInd. simpl. intros. inversion_clear H. apply (Lt_hd A leA); auto with sets. apply (Lt_tl A leA). apply (HInd y y0); auto with sets. Qed. Lemma right_prefix : forall x y z:List, ltl x (y ++ z) -> ltl x y \/ (exists y' : List, x = y ++ y' /\ ltl y' z). Proof. intros x y; generalize x. elim y; simpl. right. exists x0; auto with sets. intros. inversion H0. left; apply (Lt_nil A leA). left; apply (Lt_hd A leA); auto with sets. generalize (H x1 z H3). simple induction 1. left; apply (Lt_tl A leA); auto with sets. simple induction 1. simple induction 1; intros. rewrite H8. right; exists x2; auto with sets. Qed. Lemma desc_prefix : forall (x:List) (a:A), Descl (x ++ Cons a Nil) -> Descl x. Proof. intros. inversion H. generalize (app_cons_not_nil _ _ _ H1); simple induction 1. cut (x ++ Cons a Nil = Cons x0 Nil); auto with sets. intro. generalize (app_eq_unit _ _ H0). simple induction 1; simple induction 1; intros. rewrite H4; auto using d_nil with sets. discriminate H5. generalize (app_inj_tail _ _ _ _ H0). simple induction 1; intros. rewrite <- H4; auto with sets. Qed. Lemma desc_tail : forall (x:List) (a b:A), Descl (Cons b (x ++ Cons a Nil)) -> clos_trans A leA a b. Proof. intro. apply rev_ind with (A := A) (P := fun x:List => forall a b:A, Descl (Cons b (x ++ Cons a Nil)) -> clos_trans A leA a b). intros. inversion H. cut (Cons b (Cons a Nil) = (Nil ++ Cons b Nil) ++ Cons a Nil); auto with sets; intro. generalize H0. intro. generalize (app_inj_tail (l ++ Cons y Nil) (Nil ++ Cons b Nil) _ _ H4); simple induction 1. intros. generalize (app_inj_tail _ _ _ _ H6); simple induction 1; intros. generalize H1. rewrite <- H10; rewrite <- H7; intro. apply (t_step A leA); auto with sets. intros. inversion H0. generalize (app_cons_not_nil _ _ _ H3); intro. elim H1. generalize H0. generalize (app_comm_cons (l ++ Cons x0 Nil) (Cons a Nil) b); simple induction 1. intro. generalize (desc_prefix (Cons b (l ++ Cons x0 Nil)) a H5); intro. generalize (H x0 b H6). intro. apply t_trans with (A := A) (y := x0); auto with sets. apply t_step. generalize H1. rewrite H4; intro. generalize (app_inj_tail _ _ _ _ H8); simple induction 1. intros. generalize H2; generalize (app_comm_cons l (Cons x0 Nil) b). intro. generalize H10. rewrite H12; intro. generalize (app_inj_tail _ _ _ _ H13); simple induction 1. intros. rewrite <- H11; rewrite <- H16; auto with sets. Qed. Lemma dist_aux : forall z:List, Descl z -> forall x y:List, z = x ++ y -> Descl x /\ Descl y. Proof. intros z D. elim D. intros. cut (x ++ y = Nil); auto with sets; intro. generalize (app_eq_nil _ _ H0); simple induction 1. intros. rewrite H2; rewrite H3; split; apply d_nil. intros. cut (x0 ++ y = Cons x Nil); auto with sets. intros E. generalize (app_eq_unit _ _ E); simple induction 1. simple induction 1; intros. rewrite H2; rewrite H3; split. apply d_nil. apply d_one. simple induction 1; intros. rewrite H2; rewrite H3; split. apply d_one. apply d_nil. do 5 intro. intros Hind. do 2 intro. generalize x0. apply rev_ind with (A := A) (P := fun y0:List => forall x0:List, (l ++ Cons y Nil) ++ Cons x Nil = x0 ++ y0 -> Descl x0 /\ Descl y0). intro. generalize (app_nil_end x1); simple induction 1; simple induction 1. split. apply d_conc; auto with sets. apply d_nil. do 3 intro. generalize x1. apply rev_ind with (A := A) (P := fun l0:List => forall (x1:A) (x0:List), (l ++ Cons y Nil) ++ Cons x Nil = x0 ++ l0 ++ Cons x1 Nil -> Descl x0 /\ Descl (l0 ++ Cons x1 Nil)). simpl. split. generalize (app_inj_tail _ _ _ _ H2); simple induction 1. simple induction 1; auto with sets. apply d_one. do 5 intro. generalize (app_ass x4 (l1 ++ Cons x2 Nil) (Cons x3 Nil)). simple induction 1. generalize (app_ass x4 l1 (Cons x2 Nil)); simple induction 1. intro E. generalize (app_inj_tail _ _ _ _ E). simple induction 1; intros. generalize (app_inj_tail _ _ _ _ H6); simple induction 1; intros. rewrite <- H7; rewrite <- H10; generalize H6. generalize (app_ass x4 l1 (Cons x2 Nil)); intro E1. rewrite E1. intro. generalize (Hind x4 (l1 ++ Cons x2 Nil) H11). simple induction 1; split. auto with sets. generalize H14. rewrite <- H10; intro. apply d_conc; auto with sets. Qed. Lemma dist_Desc_concat : forall x y:List, Descl (x ++ y) -> Descl x /\ Descl y. Proof. intros. apply (dist_aux (x ++ y) H x y); auto with sets. Qed. Lemma desc_end : forall (a b:A) (x:List), Descl (x ++ Cons a Nil) /\ ltl (x ++ Cons a Nil) (Cons b Nil) -> clos_trans A leA a b. Proof. intros a b x. case x. simpl. simple induction 1. intros. inversion H1; auto with sets. inversion H3. simple induction 1. generalize (app_comm_cons l (Cons a Nil) a0). intros E; rewrite <- E; intros. generalize (desc_tail l a a0 H0); intro. inversion H1. apply t_trans with (y := a0); auto with sets. inversion H4. Qed. Lemma ltl_unit : forall (x:List) (a b:A), Descl (x ++ Cons a Nil) -> ltl (x ++ Cons a Nil) (Cons b Nil) -> ltl x (Cons b Nil). Proof. intro. case x. intros; apply (Lt_nil A leA). simpl; intros. inversion_clear H0. apply (Lt_hd A leA a b); auto with sets. inversion_clear H1. Qed. Lemma acc_app : forall (x1 x2:List) (y1:Descl (x1 ++ x2)), Acc Lex_Exp << x1 ++ x2, y1 >> -> forall (x:List) (y:Descl x), ltl x (x1 ++ x2) -> Acc Lex_Exp << x, y >>. Proof. intros. apply (Acc_inv (R:=Lex_Exp) (x:=<< x1 ++ x2, y1 >>)). auto with sets. unfold lex_exp; simpl; auto with sets. Qed. Theorem wf_lex_exp : well_founded leA -> well_founded Lex_Exp. Proof. unfold well_founded at 2. simple induction a; intros x y. apply Acc_intro. simple induction y0. unfold lex_exp at 1; simpl. apply rev_ind with (A := A) (P := fun x:List => forall (x0:List) (y:Descl x0), ltl x0 x -> Acc Lex_Exp << x0, y >>). intros. inversion_clear H0. intro. generalize (well_founded_ind (wf_clos_trans A leA H)). intros GR. apply GR with (P := fun x0:A => forall l:List, (forall (x1:List) (y:Descl x1), ltl x1 l -> Acc Lex_Exp << x1, y >>) -> forall (x1:List) (y:Descl x1), ltl x1 (l ++ Cons x0 Nil) -> Acc Lex_Exp << x1, y >>). intro; intros HInd; intros. generalize (right_prefix x2 l (Cons x1 Nil) H1). simple induction 1. intro; apply (H0 x2 y1 H3). simple induction 1. intro; simple induction 1. clear H4 H2. intro; generalize y1; clear y1. rewrite H2. apply rev_ind with (A := A) (P := fun x3:List => forall y1:Descl (l ++ x3), ltl x3 (Cons x1 Nil) -> Acc Lex_Exp << l ++ x3, y1 >>). intros. generalize (app_nil_end l); intros Heq. generalize y1. clear y1. rewrite <- Heq. intro. apply Acc_intro. simple induction y2. unfold lex_exp at 1. simpl; intros x4 y3. intros. apply (H0 x4 y3); auto with sets. intros. generalize (dist_Desc_concat l (l0 ++ Cons x4 Nil) y1). simple induction 1. intros. generalize (desc_end x4 x1 l0 (conj H8 H5)); intros. generalize y1. rewrite <- (app_ass l l0 (Cons x4 Nil)); intro. generalize (HInd x4 H9 (l ++ l0)); intros HInd2. generalize (ltl_unit l0 x4 x1 H8 H5); intro. generalize (dist_Desc_concat (l ++ l0) (Cons x4 Nil) y2). simple induction 1; intros. generalize (H4 H12 H10); intro. generalize (Acc_inv H14). generalize (acc_app l l0 H12 H14). intros f g. generalize (HInd2 f); intro. apply Acc_intro. simple induction y3. unfold lex_exp at 1; simpl; intros. apply H15; auto with sets. Qed. End Wf_Lexicographic_Exponentiation. coq-8.4pl2/theories/Wellfounded/intro.tex0000750000175000001440000000017707265050405017610 0ustar notinusers\section{Well-founded relations}\label{Wellfounded} This library gives definitions and results about well-founded relations. coq-8.4pl2/theories/Wellfounded/Union.v0000640000175000001440000000432412010532755017203 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* forall x y:A, clos_trans A R1 y x -> forall z:A, R2 z y -> exists2 y' : A, R2 y' x & clos_trans A R1 z y'. Proof. induction 2 as [x y| x y z H0 IH1 H1 IH2]; intros. elim H with y x z; auto with sets; intros x0 H2 H3. exists x0; auto with sets. elim IH1 with z0; auto with sets; intros. elim IH2 with x0; auto with sets; intros. exists x1; auto with sets. apply t_trans with x0; auto with sets. Qed. Lemma Acc_union : commut A R1 R2 -> (forall x:A, Acc R2 x -> Acc R1 x) -> forall a:A, Acc R2 a -> Acc Union a. Proof. induction 3 as [x H1 H2]. apply Acc_intro; intros. elim H3; intros; auto with sets. cut (clos_trans A R1 y x); auto with sets. elimtype (Acc (clos_trans A R1) y); intros. apply Acc_intro; intros. elim H8; intros. apply H6; auto with sets. apply t_trans with x0; auto with sets. elim strip_commut with x x0 y0; auto with sets; intros. apply Acc_inv_trans with x1; auto with sets. unfold union. elim H11; auto with sets; intros. apply t_trans with y1; auto with sets. apply (Acc_clos_trans A). apply Acc_inv with x; auto with sets. apply H0. apply Acc_intro; auto with sets. Qed. Theorem wf_union : commut A R1 R2 -> well_founded R1 -> well_founded R2 -> well_founded Union. Proof. unfold well_founded. intros. apply Acc_union; auto with sets. Qed. End WfUnion. coq-8.4pl2/theories/Wellfounded/Wellfounded.v0000640000175000001440000000142012010532755020355 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Aeq y x. unfold Setoid_Theory in s. intros ; symmetry ; assumption. Defined. Definition Seq_trans A Aeq (s : Setoid_Theory A Aeq) : forall x y z:A, Aeq x y -> Aeq y z -> Aeq x z. unfold Setoid_Theory in s. intros ; transitivity y ; assumption. Defined. (** Some tactics for manipulating Setoid Theory not officially declared as Setoid. *) Ltac trans_st x := idtac "trans_st on Setoid_Theory is OBSOLETE"; idtac "use transitivity on Equivalence instead"; match goal with | H : Setoid_Theory _ ?eqA |- ?eqA _ _ => apply (Seq_trans _ _ H) with x; auto end. Ltac sym_st := idtac "sym_st on Setoid_Theory is OBSOLETE"; idtac "use symmetry on Equivalence instead"; match goal with | H : Setoid_Theory _ ?eqA |- ?eqA _ _ => apply (Seq_sym _ _ H); auto end. Ltac refl_st := idtac "refl_st on Setoid_Theory is OBSOLETE"; idtac "use reflexivity on Equivalence instead"; match goal with | H : Setoid_Theory _ ?eqA |- ?eqA _ _ => apply (Seq_refl _ _ H); auto end. Definition gen_st : forall A : Set, Setoid_Theory _ (@eq A). Proof. constructor; congruence. Qed. coq-8.4pl2/theories/Numbers/0000750000175000001440000000000012127276550015072 5ustar notinuserscoq-8.4pl2/theories/Numbers/BinNums.v0000640000175000001440000000430512010532755016630 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* positive | xO : positive -> positive | xH : positive. Delimit Scope positive_scope with positive. Bind Scope positive_scope with positive. Arguments xO _%positive. Arguments xI _%positive. (** [N] is a datatype representing natural numbers in a binary way, by extending the [positive] datatype with a zero. Numbers in [N] can also be denoted using a decimal notation; e.g. [6%N] abbreviates [Npos (xO (xI xH))] *) Inductive N : Set := | N0 : N | Npos : positive -> N. Delimit Scope N_scope with N. Bind Scope N_scope with N. Arguments Npos _%positive. (** [Z] is a datatype representing the integers in a binary way. An integer is either zero or a strictly positive number (coded as a [positive]) or a strictly negative number (whose opposite is stored as a [positive] value). Numbers in [Z] can also be denoted using a decimal notation; e.g. [(-6)%Z] abbreviates [Zneg (xO (xI xH))] *) Inductive Z : Set := | Z0 : Z | Zpos : positive -> Z | Zneg : positive -> Z. Delimit Scope Z_scope with Z. Bind Scope Z_scope with Z. Arguments Zpos _%positive. Arguments Zneg _%positive. coq-8.4pl2/theories/Numbers/vo.itarget0000640000175000001440000000437511560537111017102 0ustar notinusersBinNums.vo BigNumPrelude.vo Cyclic/Abstract/CyclicAxioms.vo Cyclic/Abstract/NZCyclic.vo Cyclic/DoubleCyclic/DoubleAdd.vo Cyclic/DoubleCyclic/DoubleBase.vo Cyclic/DoubleCyclic/DoubleCyclic.vo Cyclic/DoubleCyclic/DoubleDivn1.vo Cyclic/DoubleCyclic/DoubleDiv.vo Cyclic/DoubleCyclic/DoubleLift.vo Cyclic/DoubleCyclic/DoubleMul.vo Cyclic/DoubleCyclic/DoubleSqrt.vo Cyclic/DoubleCyclic/DoubleSub.vo Cyclic/DoubleCyclic/DoubleType.vo Cyclic/Int31/Int31.vo Cyclic/Int31/Cyclic31.vo Cyclic/Int31/Ring31.vo Cyclic/ZModulo/ZModulo.vo Integer/Abstract/ZAddOrder.vo Integer/Abstract/ZAdd.vo Integer/Abstract/ZAxioms.vo Integer/Abstract/ZBase.vo Integer/Abstract/ZLt.vo Integer/Abstract/ZMulOrder.vo Integer/Abstract/ZMul.vo Integer/Abstract/ZSgnAbs.vo Integer/Abstract/ZDivFloor.vo Integer/Abstract/ZDivTrunc.vo Integer/Abstract/ZDivEucl.vo Integer/Abstract/ZMaxMin.vo Integer/Abstract/ZParity.vo Integer/Abstract/ZPow.vo Integer/Abstract/ZGcd.vo Integer/Abstract/ZLcm.vo Integer/Abstract/ZBits.vo Integer/Abstract/ZProperties.vo Integer/BigZ/BigZ.vo Integer/BigZ/ZMake.vo Integer/Binary/ZBinary.vo Integer/NatPairs/ZNatPairs.vo Integer/SpecViaZ/ZSig.vo Integer/SpecViaZ/ZSigZAxioms.vo NaryFunctions.vo NatInt/NZAddOrder.vo NatInt/NZAdd.vo NatInt/NZAxioms.vo NatInt/NZBase.vo NatInt/NZMulOrder.vo NatInt/NZMul.vo NatInt/NZOrder.vo NatInt/NZProperties.vo NatInt/NZDomain.vo NatInt/NZParity.vo NatInt/NZDiv.vo NatInt/NZPow.vo NatInt/NZSqrt.vo NatInt/NZLog.vo NatInt/NZGcd.vo NatInt/NZBits.vo Natural/Abstract/NAddOrder.vo Natural/Abstract/NAdd.vo Natural/Abstract/NAxioms.vo Natural/Abstract/NBase.vo Natural/Abstract/NDefOps.vo Natural/Abstract/NIso.vo Natural/Abstract/NMulOrder.vo Natural/Abstract/NOrder.vo Natural/Abstract/NStrongRec.vo Natural/Abstract/NSub.vo Natural/Abstract/NProperties.vo Natural/Abstract/NDiv.vo Natural/Abstract/NMaxMin.vo Natural/Abstract/NParity.vo Natural/Abstract/NPow.vo Natural/Abstract/NSqrt.vo Natural/Abstract/NLog.vo Natural/Abstract/NGcd.vo Natural/Abstract/NLcm.vo Natural/Abstract/NBits.vo Natural/BigN/BigN.vo Natural/BigN/Nbasic.vo Natural/BigN/NMake_gen.vo Natural/BigN/NMake.vo Natural/Binary/NBinary.vo Natural/Peano/NPeano.vo Natural/SpecViaZ/NSigNAxioms.vo Natural/SpecViaZ/NSig.vo NumPrelude.vo Rational/BigQ/BigQ.vo Rational/BigQ/QMake.vo Rational/SpecViaQ/QSig.vo coq-8.4pl2/theories/Numbers/NumPrelude.v0000640000175000001440000000224312010532755017334 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n<>0. Proof. auto with zarith. Qed. Definition Zdiv_mult_cancel_r a b c H := Zdiv.Zdiv_mult_cancel_r a b c (Zlt0_not_eq _ H). Definition Zdiv_mult_cancel_l a b c H := Zdiv.Zdiv_mult_cancel_r a b c (Zlt0_not_eq _ H). Definition Z_div_plus_l a b c H := Zdiv.Z_div_plus_full_l a b c (Zlt0_not_eq _ H). (* Automation *) Hint Extern 2 (Z.le _ _) => (match goal with |- Zpos _ <= Zpos _ => exact (eq_refl _) | H: _ <= ?p |- _ <= ?p => apply Z.le_trans with (2 := H) | H: _ < ?p |- _ <= ?p => apply Z.lt_le_incl; apply Z.le_lt_trans with (2 := H) end). Hint Extern 2 (Z.lt _ _) => (match goal with |- Zpos _ < Zpos _ => exact (eq_refl _) | H: _ <= ?p |- _ <= ?p => apply Z.lt_le_trans with (2 := H) | H: _ < ?p |- _ <= ?p => apply Z.le_lt_trans with (2 := H) end). Hint Resolve Z.lt_gt Z.le_ge Z_div_pos: zarith. (************************************** Properties of order and product **************************************) Theorem beta_lex: forall a b c d beta, a * beta + b <= c * beta + d -> 0 <= b < beta -> 0 <= d < beta -> a <= c. Proof. intros a b c d beta H1 (H3, H4) (H5, H6). assert (a - c < 1); auto with zarith. apply Z.mul_lt_mono_pos_r with beta; auto with zarith. apply Z.le_lt_trans with (d - b); auto with zarith. rewrite Z.mul_sub_distr_r; auto with zarith. Qed. Theorem beta_lex_inv: forall a b c d beta, a < c -> 0 <= b < beta -> 0 <= d < beta -> a * beta + b < c * beta + d. Proof. intros a b c d beta H1 (H3, H4) (H5, H6). case (Z.le_gt_cases (c * beta + d) (a * beta + b)); auto with zarith. intros H7. contradict H1. apply Z.le_ngt. apply beta_lex with (1 := H7); auto. Qed. Lemma beta_mult : forall h l beta, 0 <= h < beta -> 0 <= l < beta -> 0 <= h*beta+l < beta^2. Proof. intros h l beta H1 H2;split. auto with zarith. rewrite <- (Z.add_0_r (beta^2)); rewrite Z.pow_2_r; apply beta_lex_inv;auto with zarith. Qed. Lemma Zmult_lt_b : forall b x y, 0 <= x < b -> 0 <= y < b -> 0 <= x * y <= b^2 - 2*b + 1. Proof. intros b x y (Hx1,Hx2) (Hy1,Hy2);split;auto with zarith. apply Z.le_trans with ((b-1)*(b-1)). apply Z.mul_le_mono_nonneg;auto with zarith. apply Z.eq_le_incl; ring. Qed. Lemma sum_mul_carry : forall xh xl yh yl wc cc beta, 1 < beta -> 0 <= wc < beta -> 0 <= xh < beta -> 0 <= xl < beta -> 0 <= yh < beta -> 0 <= yl < beta -> 0 <= cc < beta^2 -> wc*beta^2 + cc = xh*yl + xl*yh -> 0 <= wc <= 1. Proof. intros xh xl yh yl wc cc beta U H1 H2 H3 H4 H5 H6 H7. assert (H8 := Zmult_lt_b beta xh yl H2 H5). assert (H9 := Zmult_lt_b beta xl yh H3 H4). split;auto with zarith. apply beta_lex with (cc) (beta^2 - 2) (beta^2); auto with zarith. Qed. Theorem mult_add_ineq: forall x y cross beta, 0 <= x < beta -> 0 <= y < beta -> 0 <= cross < beta -> 0 <= x * y + cross < beta^2. Proof. intros x y cross beta HH HH1 HH2. split; auto with zarith. apply Z.le_lt_trans with ((beta-1)*(beta-1)+(beta-1)); auto with zarith. apply Z.add_le_mono; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. rewrite ?Z.mul_sub_distr_l, ?Z.mul_sub_distr_r, Z.pow_2_r; auto with zarith. Qed. Theorem mult_add_ineq2: forall x y c cross beta, 0 <= x < beta -> 0 <= y < beta -> 0 <= c*beta + cross <= 2*beta - 2 -> 0 <= x * y + (c*beta + cross) < beta^2. Proof. intros x y c cross beta HH HH1 HH2. split; auto with zarith. apply Z.le_lt_trans with ((beta-1)*(beta-1)+(2*beta-2));auto with zarith. apply Z.add_le_mono; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. rewrite ?Z.mul_sub_distr_l, ?Z.mul_sub_distr_r, Z.pow_2_r; auto with zarith. Qed. Theorem mult_add_ineq3: forall x y c cross beta, 0 <= x < beta -> 0 <= y < beta -> 0 <= cross <= beta - 2 -> 0 <= c <= 1 -> 0 <= x * y + (c*beta + cross) < beta^2. Proof. intros x y c cross beta HH HH1 HH2 HH3. apply mult_add_ineq2;auto with zarith. split;auto with zarith. apply Z.le_trans with (1*beta+cross);auto with zarith. Qed. Hint Rewrite Z.mul_1_r Z.mul_0_r Z.mul_1_l Z.mul_0_l Z.add_0_l Z.add_0_r Z.sub_0_r: rm10. (************************************** Properties of Z.div and Z.modulo **************************************) Theorem Zmod_le_first: forall a b, 0 <= a -> 0 < b -> 0 <= a mod b <= a. Proof. intros a b H H1;case (Z_mod_lt a b);auto with zarith;intros H2 H3;split;auto. case (Z.le_gt_cases b a); intros H4; auto with zarith. rewrite Zmod_small; auto with zarith. Qed. Theorem Zmod_distr: forall a b r t, 0 <= a <= b -> 0 <= r -> 0 <= t < 2 ^a -> (2 ^a * r + t) mod (2 ^ b) = (2 ^a * r) mod (2 ^ b) + t. Proof. intros a b r t (H1, H2) H3 (H4, H5). assert (t < 2 ^ b). apply Z.lt_le_trans with (1:= H5); auto with zarith. apply Zpower_le_monotone; auto with zarith. rewrite Zplus_mod; auto with zarith. rewrite Zmod_small with (a := t); auto with zarith. apply Zmod_small; auto with zarith. split; auto with zarith. assert (0 <= 2 ^a * r); auto with zarith. apply Z.add_nonneg_nonneg; auto with zarith. match goal with |- context [?X mod ?Y] => case (Z_mod_lt X Y) end; auto with zarith. pattern (2 ^ b) at 2; replace (2 ^ b) with ((2 ^ b - 2 ^a) + 2 ^ a); try ring. apply Z.add_le_lt_mono; auto with zarith. replace b with ((b - a) + a); try ring. rewrite Zpower_exp; auto with zarith. pattern (2 ^a) at 4; rewrite <- (Z.mul_1_l (2 ^a)); try rewrite <- Z.mul_sub_distr_r. rewrite (Z.mul_comm (2 ^(b - a))); rewrite Zmult_mod_distr_l; auto with zarith. rewrite (Z.mul_comm (2 ^a)); apply Z.mul_le_mono_nonneg_r; auto with zarith. match goal with |- context [?X mod ?Y] => case (Z_mod_lt X Y) end; auto with zarith. Qed. Theorem Zmod_shift_r: forall a b r t, 0 <= a <= b -> 0 <= r -> 0 <= t < 2 ^a -> (r * 2 ^a + t) mod (2 ^ b) = (r * 2 ^a) mod (2 ^ b) + t. Proof. intros a b r t (H1, H2) H3 (H4, H5). assert (t < 2 ^ b). apply Z.lt_le_trans with (1:= H5); auto with zarith. apply Zpower_le_monotone; auto with zarith. rewrite Zplus_mod; auto with zarith. rewrite Zmod_small with (a := t); auto with zarith. apply Zmod_small; auto with zarith. split; auto with zarith. assert (0 <= 2 ^a * r); auto with zarith. apply Z.add_nonneg_nonneg; auto with zarith. match goal with |- context [?X mod ?Y] => case (Z_mod_lt X Y) end; auto with zarith. pattern (2 ^ b) at 2;replace (2 ^ b) with ((2 ^ b - 2 ^a) + 2 ^ a); try ring. apply Z.add_le_lt_mono; auto with zarith. replace b with ((b - a) + a); try ring. rewrite Zpower_exp; auto with zarith. pattern (2 ^a) at 4; rewrite <- (Z.mul_1_l (2 ^a)); try rewrite <- Z.mul_sub_distr_r. repeat rewrite (fun x => Z.mul_comm x (2 ^ a)); rewrite Zmult_mod_distr_l; auto with zarith. apply Z.mul_le_mono_nonneg_l; auto with zarith. match goal with |- context [?X mod ?Y] => case (Z_mod_lt X Y) end; auto with zarith. Qed. Theorem Zdiv_shift_r: forall a b r t, 0 <= a <= b -> 0 <= r -> 0 <= t < 2 ^a -> (r * 2 ^a + t) / (2 ^ b) = (r * 2 ^a) / (2 ^ b). Proof. intros a b r t (H1, H2) H3 (H4, H5). assert (Eq: t < 2 ^ b); auto with zarith. apply Z.lt_le_trans with (1 := H5); auto with zarith. apply Zpower_le_monotone; auto with zarith. pattern (r * 2 ^ a) at 1; rewrite Z_div_mod_eq with (b := 2 ^ b); auto with zarith. rewrite <- Z.add_assoc. rewrite <- Zmod_shift_r; auto with zarith. rewrite (Z.mul_comm (2 ^ b)); rewrite Z_div_plus_full_l; auto with zarith. rewrite (fun x y => @Zdiv_small (x mod y)); auto with zarith. match goal with |- context [?X mod ?Y] => case (Z_mod_lt X Y) end; auto with zarith. Qed. Lemma shift_unshift_mod : forall n p a, 0 <= a < 2^n -> 0 <= p <= n -> a * 2^p = a / 2^(n - p) * 2^n + (a*2^p) mod 2^n. Proof. intros n p a H1 H2. pattern (a*2^p) at 1;replace (a*2^p) with (a*2^p/2^n * 2^n + a*2^p mod 2^n). 2:symmetry;rewrite (Z.mul_comm (a*2^p/2^n));apply Z_div_mod_eq. replace (a * 2 ^ p / 2 ^ n) with (a / 2 ^ (n - p));trivial. replace (2^n) with (2^(n-p)*2^p). symmetry;apply Zdiv_mult_cancel_r. destruct H1;trivial. cut (0 < 2^p); auto with zarith. rewrite <- Zpower_exp. replace (n-p+p) with n;trivial. ring. omega. omega. apply Z.lt_gt. apply Z.pow_pos_nonneg;auto with zarith. Qed. Lemma shift_unshift_mod_2 : forall n p a, 0 <= p <= n -> ((a * 2 ^ (n - p)) mod (2^n) / 2 ^ (n - p)) mod (2^n) = a mod 2 ^ p. Proof. intros. rewrite Zmod_small. rewrite Zmod_eq by (auto with zarith). unfold Z.sub at 1. rewrite Z_div_plus_l by (auto with zarith). assert (2^n = 2^(n-p)*2^p). rewrite <- Zpower_exp by (auto with zarith). replace (n-p+p) with n; auto with zarith. rewrite H0. rewrite <- Zdiv_Zdiv, Z_div_mult by (auto with zarith). rewrite (Z.mul_comm (2^(n-p))), Z.mul_assoc. rewrite <- Z.mul_opp_l. rewrite Z_div_mult by (auto with zarith). symmetry; apply Zmod_eq; auto with zarith. remember (a * 2 ^ (n - p)) as b. destruct (Z_mod_lt b (2^n)); auto with zarith. split. apply Z_div_pos; auto with zarith. apply Zdiv_lt_upper_bound; auto with zarith. apply Z.lt_le_trans with (2^n); auto with zarith. rewrite <- (Z.mul_1_r (2^n)) at 1. apply Z.mul_le_mono_nonneg; auto with zarith. cut (0 < 2 ^ (n-p)); auto with zarith. Qed. Lemma div_le_0 : forall p x, 0 <= x -> 0 <= x / 2 ^ p. Proof. intros p x Hle;destruct (Z_le_gt_dec 0 p). apply Zdiv_le_lower_bound;auto with zarith. replace (2^p) with 0. destruct x;compute;intro;discriminate. destruct p;trivial;discriminate. Qed. Lemma div_lt : forall p x y, 0 <= x < y -> x / 2^p < y. Proof. intros p x y H;destruct (Z_le_gt_dec 0 p). apply Zdiv_lt_upper_bound;auto with zarith. apply Z.lt_le_trans with y;auto with zarith. rewrite <- (Z.mul_1_r y);apply Z.mul_le_mono_nonneg;auto with zarith. assert (0 < 2^p);auto with zarith. replace (2^p) with 0. destruct x;change (0 0 < Z.gcd a b -> 0 < b / Z.gcd a b. Proof. intros Hb Hg. assert (H : 0 <= b / Z.gcd a b) by (apply Z.div_pos; auto with zarith). Z.le_elim H; trivial. rewrite (Zdivide_Zdiv_eq (Z.gcd a b) b), <- H, Z.mul_0_r in Hb; auto using Z.gcd_divide_r with zarith. Qed. Theorem Zdiv_neg a b: a < 0 -> 0 < b -> a / b < 0. Proof. intros Ha Hb. assert (b > 0) by omega. generalize (Z_mult_div_ge a _ H); intros. assert (b * (a / b) < 0)%Z. apply Z.le_lt_trans with a; auto with zarith. destruct b; try (compute in Hb; discriminate). destruct (a/Zpos p)%Z. compute in H1; discriminate. compute in H1; discriminate. compute; auto. Qed. Lemma Zdiv_gcd_zero : forall a b, b / Z.gcd a b = 0 -> b <> 0 -> Z.gcd a b = 0. Proof. intros. generalize (Zgcd_is_gcd a b); destruct 1. destruct H2 as (k,Hk). generalize H; rewrite Hk at 1. destruct (Z.eq_dec (Z.gcd a b) 0) as [H'|H']; auto. rewrite Z_div_mult_full; auto. intros; subst k; simpl in *; subst b; elim H0; auto. Qed. Lemma Zgcd_mult_rel_prime : forall a b c, Z.gcd a c = 1 -> Z.gcd b c = 1 -> Z.gcd (a*b) c = 1. Proof. intros. rewrite Zgcd_1_rel_prime in *. apply rel_prime_sym; apply rel_prime_mult; apply rel_prime_sym; auto. Qed. Lemma Zcompare_gt : forall (A:Type)(a a':A)(p q:Z), match (p?=q)%Z with Gt => a | _ => a' end = if Z_le_gt_dec p q then a' else a. Proof. intros. destruct Z_le_gt_dec as [H|H]. red in H. destruct (p?=q)%Z; auto; elim H; auto. rewrite H; auto. Qed. Theorem Zbounded_induction : (forall Q : Z -> Prop, forall b : Z, Q 0 -> (forall n, 0 <= n -> n < b - 1 -> Q n -> Q (n + 1)) -> forall n, 0 <= n -> n < b -> Q n)%Z. Proof. intros Q b Q0 QS. set (Q' := fun n => (n < b /\ Q n) \/ (b <= n)). assert (H : forall n, 0 <= n -> Q' n). apply natlike_rec2; unfold Q'. destruct (Z.le_gt_cases b 0) as [H | H]. now right. left; now split. intros n H IH. destruct IH as [[IH1 IH2] | IH]. destruct (Z.le_gt_cases (b - 1) n) as [H1 | H1]. right; auto with zarith. left. split; [auto with zarith | now apply (QS n)]. right; auto with zarith. unfold Q' in *; intros n H1 H2. destruct (H n H1) as [[H3 H4] | H3]. assumption. now apply Z.le_ngt in H3. Qed. Lemma Zsquare_le x : x <= x*x. Proof. destruct (Z.lt_ge_cases 0 x). - rewrite <- Z.mul_1_l at 1. rewrite <- Z.mul_le_mono_pos_r; auto with zarith. - pose proof (Z.square_nonneg x); auto with zarith. Qed. coq-8.4pl2/theories/Numbers/Cyclic/0000750000175000001440000000000012127276547016306 5ustar notinuserscoq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/0000750000175000001440000000000012127276547020647 5ustar notinuserscoq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleDivn1.v0000640000175000001440000005005612010532755023144 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_head0 : w -> w. Variable w_add_mul_div : w -> w -> w -> w. Variable w_div21 : w -> w -> w -> w * w. Variable w_compare : w -> w -> comparison. Variable w_sub : w -> w -> w. (* ** For proofs ** *) Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[! n | x !]" := (double_to_Z w_digits w_to_Z n x) (at level 0, x at level 99). Notation "[[ x ]]" := (zn2z_to_Z wB w_to_Z x) (at level 0, x at level 99). Variable spec_to_Z : forall x, 0 <= [| x |] < wB. Variable spec_w_zdigits: [|w_zdigits|] = Zpos w_digits. Variable spec_0 : [|w_0|] = 0. Variable spec_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ [|w_head0 x|] * [|x|] < wB. Variable spec_add_mul_div : forall x y p, [|p|] <= Zpos w_digits -> [| w_add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos w_digits) - [|p|]))) mod wB. Variable spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := w_div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Variable spec_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_sub: forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Section DIVAUX. Variable b2p : w. Variable b2p_le : wB/2 <= [|b2p|]. Definition double_divn1_0_aux n (divn1: w -> word w n -> word w n * w) r h := let (hh,hl) := double_split w_0 n h in let (qh,rh) := divn1 r hh in let (ql,rl) := divn1 rh hl in (double_WW w_WW n qh ql, rl). Fixpoint double_divn1_0 (n:nat) : w -> word w n -> word w n * w := match n return w -> word w n -> word w n * w with | O => fun r x => w_div21 r x b2p | S n => double_divn1_0_aux n (double_divn1_0 n) end. Lemma spec_split : forall (n : nat) (x : zn2z (word w n)), let (h, l) := double_split w_0 n x in [!S n | x!] = [!n | h!] * double_wB w_digits n + [!n | l!]. Proof (spec_double_split w_0 w_digits w_to_Z spec_0). Lemma spec_double_divn1_0 : forall n r a, [|r|] < [|b2p|] -> let (q,r') := double_divn1_0 n r a in [|r|] * double_wB w_digits n + [!n|a!] = [!n|q!] * [|b2p|] + [|r'|] /\ 0 <= [|r'|] < [|b2p|]. Proof. induction n;intros. exact (spec_div21 a b2p_le H). simpl (double_divn1_0 (S n) r a); unfold double_divn1_0_aux. assert (H1 := spec_split n a);destruct (double_split w_0 n a) as (hh,hl). rewrite H1. assert (H2 := IHn r hh H);destruct (double_divn1_0 n r hh) as (qh,rh). destruct H2. assert ([|rh|] < [|b2p|]). omega. assert (H4 := IHn rh hl H3);destruct (double_divn1_0 n rh hl) as (ql,rl). destruct H4;split;trivial. rewrite spec_double_WW;trivial. rewrite <- double_wB_wwB. rewrite Z.mul_assoc;rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite H0;rewrite Z.mul_add_distr_r;rewrite <- Z.add_assoc. rewrite H4;ring. Qed. Definition double_modn1_0_aux n (modn1:w -> word w n -> w) r h := let (hh,hl) := double_split w_0 n h in modn1 (modn1 r hh) hl. Fixpoint double_modn1_0 (n:nat) : w -> word w n -> w := match n return w -> word w n -> w with | O => fun r x => snd (w_div21 r x b2p) | S n => double_modn1_0_aux n (double_modn1_0 n) end. Lemma spec_double_modn1_0 : forall n r x, double_modn1_0 n r x = snd (double_divn1_0 n r x). Proof. induction n;simpl;intros;trivial. unfold double_modn1_0_aux, double_divn1_0_aux. destruct (double_split w_0 n x) as (hh,hl). rewrite (IHn r hh). destruct (double_divn1_0 n r hh) as (qh,rh);simpl. rewrite IHn. destruct (double_divn1_0 n rh hl);trivial. Qed. Variable p : w. Variable p_bounded : [|p|] <= Zpos w_digits. Lemma spec_add_mul_divp : forall x y, [| w_add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos w_digits) - [|p|]))) mod wB. Proof. intros;apply spec_add_mul_div;auto. Qed. Definition double_divn1_p_aux n (divn1 : w -> word w n -> word w n -> word w n * w) r h l := let (hh,hl) := double_split w_0 n h in let (lh,ll) := double_split w_0 n l in let (qh,rh) := divn1 r hh hl in let (ql,rl) := divn1 rh hl lh in (double_WW w_WW n qh ql, rl). Fixpoint double_divn1_p (n:nat) : w -> word w n -> word w n -> word w n * w := match n return w -> word w n -> word w n -> word w n * w with | O => fun r h l => w_div21 r (w_add_mul_div p h l) b2p | S n => double_divn1_p_aux n (double_divn1_p n) end. Lemma p_lt_double_digits : forall n, [|p|] <= Zpos (w_digits << n). Proof. induction n;simpl. trivial. case (spec_to_Z p); rewrite Pshiftl_nat_S, Pos2Z.inj_xO;auto with zarith. Qed. Lemma spec_double_divn1_p : forall n r h l, [|r|] < [|b2p|] -> let (q,r') := double_divn1_p n r h l in [|r|] * double_wB w_digits n + ([!n|h!]*2^[|p|] + [!n|l!] / (2^(Zpos(w_digits << n) - [|p|]))) mod double_wB w_digits n = [!n|q!] * [|b2p|] + [|r'|] /\ 0 <= [|r'|] < [|b2p|]. Proof. case (spec_to_Z p); intros HH0 HH1. induction n;intros. simpl (double_divn1_p 0 r h l). unfold double_to_Z, double_wB, "<<". rewrite <- spec_add_mul_divp. exact (spec_div21 (w_add_mul_div p h l) b2p_le H). simpl (double_divn1_p (S n) r h l). unfold double_divn1_p_aux. assert (H1 := spec_split n h);destruct (double_split w_0 n h) as (hh,hl). rewrite H1. rewrite <- double_wB_wwB. assert (H2 := spec_split n l);destruct (double_split w_0 n l) as (lh,ll). rewrite H2. replace ([|r|] * (double_wB w_digits n * double_wB w_digits n) + (([!n|hh!] * double_wB w_digits n + [!n|hl!]) * 2 ^ [|p|] + ([!n|lh!] * double_wB w_digits n + [!n|ll!]) / 2^(Zpos (w_digits << (S n)) - [|p|])) mod (double_wB w_digits n * double_wB w_digits n)) with (([|r|] * double_wB w_digits n + ([!n|hh!] * 2^[|p|] + [!n|hl!] / 2^(Zpos (w_digits << n) - [|p|])) mod double_wB w_digits n) * double_wB w_digits n + ([!n|hl!] * 2^[|p|] + [!n|lh!] / 2^(Zpos (w_digits << n) - [|p|])) mod double_wB w_digits n). generalize (IHn r hh hl H);destruct (double_divn1_p n r hh hl) as (qh,rh); intros (H3,H4);rewrite H3. assert ([|rh|] < [|b2p|]). omega. replace (([!n|qh!] * [|b2p|] + [|rh|]) * double_wB w_digits n + ([!n|hl!] * 2 ^ [|p|] + [!n|lh!] / 2 ^ (Zpos (w_digits << n) - [|p|])) mod double_wB w_digits n) with ([!n|qh!] * [|b2p|] *double_wB w_digits n + ([|rh|]*double_wB w_digits n + ([!n|hl!] * 2 ^ [|p|] + [!n|lh!] / 2 ^ (Zpos (w_digits << n) - [|p|])) mod double_wB w_digits n)). 2:ring. generalize (IHn rh hl lh H0);destruct (double_divn1_p n rh hl lh) as (ql,rl); intros (H5,H6);rewrite H5. split;[rewrite spec_double_WW;trivial;ring|trivial]. assert (Uhh := spec_double_to_Z w_digits w_to_Z spec_to_Z n hh); unfold double_wB,base in Uhh. assert (Uhl := spec_double_to_Z w_digits w_to_Z spec_to_Z n hl); unfold double_wB,base in Uhl. assert (Ulh := spec_double_to_Z w_digits w_to_Z spec_to_Z n lh); unfold double_wB,base in Ulh. assert (Ull := spec_double_to_Z w_digits w_to_Z spec_to_Z n ll); unfold double_wB,base in Ull. unfold double_wB,base. assert (UU:=p_lt_double_digits n). rewrite Zdiv_shift_r;auto with zarith. 2:change (Zpos (w_digits << (S n))) with (2*Zpos (w_digits << n));auto with zarith. replace (2 ^ (Zpos (w_digits << (S n)) - [|p|])) with (2^(Zpos (w_digits << n) - [|p|])*2^Zpos (w_digits << n)). rewrite Zdiv_mult_cancel_r;auto with zarith. rewrite Z.mul_add_distr_r with (p:= 2^[|p|]). pattern ([!n|hl!] * 2^[|p|]) at 2; rewrite (shift_unshift_mod (Zpos(w_digits << n))([|p|])([!n|hl!])); auto with zarith. rewrite Z.add_assoc. replace ([!n|hh!] * 2^Zpos (w_digits << n)* 2^[|p|] + ([!n|hl!] / 2^(Zpos (w_digits << n)-[|p|])* 2^Zpos(w_digits << n))) with (([!n|hh!] *2^[|p|] + double_to_Z w_digits w_to_Z n hl / 2^(Zpos (w_digits << n)-[|p|])) * 2^Zpos(w_digits << n));try (ring;fail). rewrite <- Z.add_assoc. rewrite <- (Zmod_shift_r ([|p|]));auto with zarith. replace (2 ^ Zpos (w_digits << n) * 2 ^ Zpos (w_digits << n)) with (2 ^ (Zpos (w_digits << n) + Zpos (w_digits << n))). rewrite (Zmod_shift_r (Zpos (w_digits << n)));auto with zarith. replace (2 ^ (Zpos (w_digits << n) + Zpos (w_digits << n))) with (2^Zpos(w_digits << n) *2^Zpos(w_digits << n)). rewrite (Z.mul_comm (([!n|hh!] * 2 ^ [|p|] + [!n|hl!] / 2 ^ (Zpos (w_digits << n) - [|p|])))). rewrite Zmult_mod_distr_l;auto with zarith. ring. rewrite Zpower_exp;auto with zarith. assert (0 < Zpos (w_digits << n)). unfold Z.lt;reflexivity. auto with zarith. apply Z_mod_lt;auto with zarith. rewrite Zpower_exp;auto with zarith. split;auto with zarith. apply Zdiv_lt_upper_bound;auto with zarith. rewrite <- Zpower_exp;auto with zarith. replace ([|p|] + (Zpos (w_digits << n) - [|p|])) with (Zpos(w_digits << n));auto with zarith. rewrite <- Zpower_exp;auto with zarith. replace (Zpos (w_digits << (S n)) - [|p|]) with (Zpos (w_digits << n) - [|p|] + Zpos (w_digits << n));trivial. change (Zpos (w_digits << (S n))) with (2*Zpos (w_digits << n)). ring. Qed. Definition double_modn1_p_aux n (modn1 : w -> word w n -> word w n -> w) r h l:= let (hh,hl) := double_split w_0 n h in let (lh,ll) := double_split w_0 n l in modn1 (modn1 r hh hl) hl lh. Fixpoint double_modn1_p (n:nat) : w -> word w n -> word w n -> w := match n return w -> word w n -> word w n -> w with | O => fun r h l => snd (w_div21 r (w_add_mul_div p h l) b2p) | S n => double_modn1_p_aux n (double_modn1_p n) end. Lemma spec_double_modn1_p : forall n r h l , double_modn1_p n r h l = snd (double_divn1_p n r h l). Proof. induction n;simpl;intros;trivial. unfold double_modn1_p_aux, double_divn1_p_aux. destruct(double_split w_0 n h)as(hh,hl);destruct(double_split w_0 n l) as (lh,ll). rewrite (IHn r hh hl);destruct (double_divn1_p n r hh hl) as (qh,rh). rewrite IHn;simpl;destruct (double_divn1_p n rh hl lh);trivial. Qed. End DIVAUX. Fixpoint high (n:nat) : word w n -> w := match n return word w n -> w with | O => fun a => a | S n => fun (a:zn2z (word w n)) => match a with | W0 => w_0 | WW h l => high n h end end. Lemma spec_double_digits:forall n, Zpos w_digits <= Zpos (w_digits << n). Proof. induction n;simpl;auto with zarith. rewrite Pshiftl_nat_S. change (Zpos (xO (w_digits << n))) with (2*Zpos (w_digits << n)). assert (0 < Zpos w_digits) by reflexivity. auto with zarith. Qed. Lemma spec_high : forall n (x:word w n), [|high n x|] = [!n|x!] / 2^(Zpos (w_digits << n) - Zpos w_digits). Proof. induction n;intros. unfold high,double_to_Z. rewrite Pshiftl_nat_0. replace (Zpos w_digits - Zpos w_digits) with 0;try ring. simpl. rewrite <- (Zdiv_unique [|x|] 1 [|x|] 0);auto with zarith. assert (U2 := spec_double_digits n). assert (U3 : 0 < Zpos w_digits). exact (eq_refl Lt). destruct x;unfold high;fold high. unfold double_to_Z,zn2z_to_Z;rewrite spec_0. rewrite Zdiv_0_l;trivial. assert (U0 := spec_double_to_Z w_digits w_to_Z spec_to_Z n w0); assert (U1 := spec_double_to_Z w_digits w_to_Z spec_to_Z n w1). simpl [!S n|WW w0 w1!]. unfold double_wB,base;rewrite Zdiv_shift_r;auto with zarith. replace (2 ^ (Zpos (w_digits << (S n)) - Zpos w_digits)) with (2^(Zpos (w_digits << n) - Zpos w_digits) * 2^Zpos (w_digits << n)). rewrite Zdiv_mult_cancel_r;auto with zarith. rewrite <- Zpower_exp;auto with zarith. replace (Zpos (w_digits << n) - Zpos w_digits + Zpos (w_digits << n)) with (Zpos (w_digits << (S n)) - Zpos w_digits);trivial. change (Zpos (w_digits << (S n))) with (2*Zpos (w_digits << n));ring. change (Zpos (w_digits << (S n))) with (2*Zpos (w_digits << n)); auto with zarith. Qed. Definition double_divn1 (n:nat) (a:word w n) (b:w) := let p := w_head0 b in match w_compare p w_0 with | Gt => let b2p := w_add_mul_div p b w_0 in let ha := high n a in let k := w_sub w_zdigits p in let lsr_n := w_add_mul_div k w_0 in let r0 := w_add_mul_div p w_0 ha in let (q,r) := double_divn1_p b2p p n r0 a (double_0 w_0 n) in (q, lsr_n r) | _ => double_divn1_0 b n w_0 a end. Lemma spec_double_divn1 : forall n a b, 0 < [|b|] -> let (q,r) := double_divn1 n a b in [!n|a!] = [!n|q!] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. intros n a b H. unfold double_divn1. case (spec_head0 H); intros H0 H1. case (spec_to_Z (w_head0 b)); intros HH1 HH2. rewrite spec_compare; case Z.compare_spec; rewrite spec_0; intros H2; auto with zarith. assert (Hv1: wB/2 <= [|b|]). generalize H0; rewrite H2; rewrite Z.pow_0_r; rewrite Z.mul_1_l; auto. assert (Hv2: [|w_0|] < [|b|]). rewrite spec_0; auto. generalize (spec_double_divn1_0 Hv1 n a Hv2). rewrite spec_0;rewrite Z.mul_0_l; rewrite Z.add_0_l; auto. contradict H2; auto with zarith. assert (HHHH : 0 < [|w_head0 b|]); auto with zarith. assert ([|w_head0 b|] < Zpos w_digits). case (Z.le_gt_cases (Zpos w_digits) [|w_head0 b|]); auto; intros HH. assert (2 ^ [|w_head0 b|] < wB). apply Z.le_lt_trans with (2 ^ [|w_head0 b|] * [|b|]);auto with zarith. replace (2 ^ [|w_head0 b|]) with (2^[|w_head0 b|] * 1);try (ring;fail). apply Z.mul_le_mono_nonneg;auto with zarith. assert (wB <= 2^[|w_head0 b|]). unfold base;apply Zpower_le_monotone;auto with zarith. omega. assert ([|w_add_mul_div (w_head0 b) b w_0|] = 2 ^ [|w_head0 b|] * [|b|]). rewrite (spec_add_mul_div b w_0); auto with zarith. rewrite spec_0;rewrite Zdiv_0_l; try omega. rewrite Z.add_0_r; rewrite Z.mul_comm. rewrite Zmod_small; auto with zarith. assert (H5 := spec_to_Z (high n a)). assert ([|w_add_mul_div (w_head0 b) w_0 (high n a)|] <[|w_add_mul_div (w_head0 b) b w_0|]). rewrite H4. rewrite spec_add_mul_div;auto with zarith. rewrite spec_0;rewrite Z.mul_0_l;rewrite Z.add_0_l. assert (([|high n a|]/2^(Zpos w_digits - [|w_head0 b|])) < wB). apply Zdiv_lt_upper_bound;auto with zarith. apply Z.lt_le_trans with wB;auto with zarith. pattern wB at 1;replace wB with (wB*1);try ring. apply Z.mul_le_mono_nonneg;auto with zarith. assert (H6 := Z.pow_pos_nonneg 2 (Zpos w_digits - [|w_head0 b|])); auto with zarith. rewrite Zmod_small;auto with zarith. apply Zdiv_lt_upper_bound;auto with zarith. apply Z.lt_le_trans with wB;auto with zarith. apply Z.le_trans with (2 ^ [|w_head0 b|] * [|b|] * 2). rewrite <- wB_div_2; try omega. apply Z.mul_le_mono_nonneg;auto with zarith. pattern 2 at 1;rewrite <- Z.pow_1_r. apply Zpower_le_monotone;split;auto with zarith. rewrite <- H4 in H0. assert (Hb3: [|w_head0 b|] <= Zpos w_digits); auto with zarith. assert (H7:= spec_double_divn1_p H0 Hb3 n a (double_0 w_0 n) H6). destruct (double_divn1_p (w_add_mul_div (w_head0 b) b w_0) (w_head0 b) n (w_add_mul_div (w_head0 b) w_0 (high n a)) a (double_0 w_0 n)) as (q,r). assert (U:= spec_double_digits n). rewrite spec_double_0 in H7;trivial;rewrite Zdiv_0_l in H7. rewrite Z.add_0_r in H7. rewrite spec_add_mul_div in H7;auto with zarith. rewrite spec_0 in H7;rewrite Z.mul_0_l in H7;rewrite Z.add_0_l in H7. assert (([|high n a|] / 2 ^ (Zpos w_digits - [|w_head0 b|])) mod wB = [!n|a!] / 2^(Zpos (w_digits << n) - [|w_head0 b|])). rewrite Zmod_small;auto with zarith. rewrite spec_high. rewrite Zdiv_Zdiv;auto with zarith. rewrite <- Zpower_exp;auto with zarith. replace (Zpos (w_digits << n) - Zpos w_digits + (Zpos w_digits - [|w_head0 b|])) with (Zpos (w_digits << n) - [|w_head0 b|]);trivial;ring. assert (H8 := Z.pow_pos_nonneg 2 (Zpos w_digits - [|w_head0 b|]));auto with zarith. split;auto with zarith. apply Z.le_lt_trans with ([|high n a|]);auto with zarith. apply Zdiv_le_upper_bound;auto with zarith. pattern ([|high n a|]) at 1;rewrite <- Z.mul_1_r. apply Z.mul_le_mono_nonneg;auto with zarith. rewrite H8 in H7;unfold double_wB,base in H7. rewrite <- shift_unshift_mod in H7;auto with zarith. rewrite H4 in H7. assert ([|w_add_mul_div (w_sub w_zdigits (w_head0 b)) w_0 r|] = [|r|]/2^[|w_head0 b|]). rewrite spec_add_mul_div. rewrite spec_0;rewrite Z.mul_0_l;rewrite Z.add_0_l. replace (Zpos w_digits - [|w_sub w_zdigits (w_head0 b)|]) with ([|w_head0 b|]). rewrite Zmod_small;auto with zarith. assert (H9 := spec_to_Z r). split;auto with zarith. apply Z.le_lt_trans with ([|r|]);auto with zarith. apply Zdiv_le_upper_bound;auto with zarith. pattern ([|r|]) at 1;rewrite <- Z.mul_1_r. apply Z.mul_le_mono_nonneg;auto with zarith. assert (H10 := Z.pow_pos_nonneg 2 ([|w_head0 b|]));auto with zarith. rewrite spec_sub. rewrite Zmod_small; auto with zarith. split; auto with zarith. case (spec_to_Z w_zdigits); auto with zarith. rewrite spec_sub. rewrite Zmod_small; auto with zarith. split; auto with zarith. case (spec_to_Z w_zdigits); auto with zarith. case H7; intros H71 H72. split. rewrite <- (Z_div_mult [!n|a!] (2^[|w_head0 b|]));auto with zarith. rewrite H71;rewrite H9. replace ([!n|q!] * (2 ^ [|w_head0 b|] * [|b|])) with ([!n|q!] *[|b|] * 2^[|w_head0 b|]); try (ring;fail). rewrite Z_div_plus_l;auto with zarith. assert (H10 := spec_to_Z (w_add_mul_div (w_sub w_zdigits (w_head0 b)) w_0 r));split; auto with zarith. rewrite H9. apply Zdiv_lt_upper_bound;auto with zarith. rewrite Z.mul_comm;auto with zarith. exact (spec_double_to_Z w_digits w_to_Z spec_to_Z n a). Qed. Definition double_modn1 (n:nat) (a:word w n) (b:w) := let p := w_head0 b in match w_compare p w_0 with | Gt => let b2p := w_add_mul_div p b w_0 in let ha := high n a in let k := w_sub w_zdigits p in let lsr_n := w_add_mul_div k w_0 in let r0 := w_add_mul_div p w_0 ha in let r := double_modn1_p b2p p n r0 a (double_0 w_0 n) in lsr_n r | _ => double_modn1_0 b n w_0 a end. Lemma spec_double_modn1_aux : forall n a b, double_modn1 n a b = snd (double_divn1 n a b). Proof. intros n a b;unfold double_divn1,double_modn1. rewrite spec_compare; case Z.compare_spec; rewrite spec_0; intros H2; auto with zarith. apply spec_double_modn1_0. apply spec_double_modn1_0. rewrite spec_double_modn1_p. destruct (double_divn1_p (w_add_mul_div (w_head0 b) b w_0) (w_head0 b) n (w_add_mul_div (w_head0 b) w_0 (high n a)) a (double_0 w_0 n));simpl;trivial. Qed. Lemma spec_double_modn1 : forall n a b, 0 < [|b|] -> [|double_modn1 n a b|] = [!n|a!] mod [|b|]. Proof. intros n a b H;assert (H1 := spec_double_divn1 n a H). assert (H2 := spec_double_modn1_aux n a b). rewrite H2;destruct (double_divn1 n a b) as (q,r). simpl;apply Zmod_unique with (double_to_Z w_digits w_to_Z n q);auto with zarith. destruct H1 as (h1,h2);rewrite h1;ring. Qed. End GENDIVN1. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleType.v0000640000175000001440000000350212010532755023076 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* carry | C1 : A -> carry. Definition interp_carry (sign:Z)(B:Z)(interp:A -> Z) c := match c with | C0 x => interp x | C1 x => sign*B + interp x end. End Carry. Section Zn2Z. Variable znz : Type. (** From a type [znz] representing a cyclic structure Z/nZ, we produce a representation of Z/2nZ by pairs of elements of [znz] (plus a special case for zero). High half of the new number comes first. *) Inductive zn2z := | W0 : zn2z | WW : znz -> znz -> zn2z. Definition zn2z_to_Z (wB:Z) (w_to_Z:znz->Z) (x:zn2z) := match x with | W0 => 0 | WW xh xl => w_to_Z xh * wB + w_to_Z xl end. End Zn2Z. Arguments W0 [znz]. (** From a cyclic representation [w], we iterate the [zn2z] construct [n] times, gaining the type of binary trees of depth at most [n], whose leafs are either W0 (if depth < n) or elements of w (if depth = n). *) Fixpoint word (w:Type) (n:nat) : Type := match n with | O => w | S n => zn2z (word w n) end. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleDiv.v0000640000175000001440000015714312010532755022712 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_pos_mod : w -> w -> w. Variable w_compare : w -> w -> comparison. Variable ww_compare : zn2z w -> zn2z w -> comparison. Variable w_0W : w -> zn2z w. Variable low: zn2z w -> w. Variable ww_sub: zn2z w -> zn2z w -> zn2z w. Variable ww_zdigits : zn2z w. Definition ww_pos_mod p x := let zdigits := w_0W w_zdigits in match x with | W0 => W0 | WW xh xl => match ww_compare p zdigits with | Eq => w_WW w_0 xl | Lt => w_WW w_0 (w_pos_mod (low p) xl) | Gt => match ww_compare p ww_zdigits with | Lt => let n := low (ww_sub p zdigits) in w_WW (w_pos_mod n xh) xl | _ => x end end end. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_to_w_Z : forall x, 0 <= [[x]] < wwB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_pos_mod : forall w p, [|w_pos_mod p w|] = [|w|] mod (2 ^ [|p|]). Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Variable spec_ww_sub: forall x y, [[ww_sub x y]] = ([[x]] - [[y]]) mod wwB. Variable spec_zdigits : [| w_zdigits |] = Zpos w_digits. Variable spec_low: forall x, [| low x|] = [[x]] mod wB. Variable spec_ww_zdigits : [[ww_zdigits]] = 2 * [|w_zdigits|]. Variable spec_ww_digits : ww_digits w_digits = xO w_digits. Hint Rewrite spec_w_0 spec_w_WW : w_rewrite. Lemma spec_ww_pos_mod : forall w p, [[ww_pos_mod p w]] = [[w]] mod (2 ^ [[p]]). assert (HHHHH:= lt_0_wB w_digits). assert (F0: forall x y, x - y + y = x); auto with zarith. intros w1 p; case (spec_to_w_Z p); intros HH1 HH2. unfold ww_pos_mod; case w1. simpl; rewrite Zmod_small; split; auto with zarith. intros xh xl; rewrite spec_ww_compare. case Z.compare_spec; rewrite spec_w_0W; rewrite spec_zdigits; fold wB; intros H1. rewrite H1; simpl ww_to_Z. autorewrite with w_rewrite rm10. rewrite Zplus_mod; auto with zarith. rewrite Z_mod_mult; auto with zarith. autorewrite with rm10. rewrite Zmod_mod; auto with zarith. rewrite Zmod_small; auto with zarith. autorewrite with w_rewrite rm10. simpl ww_to_Z. rewrite spec_pos_mod. assert (HH0: [|low p|] = [[p]]). rewrite spec_low. apply Zmod_small; auto with zarith. case (spec_to_w_Z p); intros HHH1 HHH2; split; auto with zarith. apply Z.lt_le_trans with (1 := H1). unfold base; apply Zpower2_le_lin; auto with zarith. rewrite HH0. rewrite Zplus_mod; auto with zarith. unfold base. rewrite <- (F0 (Zpos w_digits) [[p]]). rewrite Zpower_exp; auto with zarith. rewrite Z.mul_assoc. rewrite Z_mod_mult; auto with zarith. autorewrite with w_rewrite rm10. rewrite Zmod_mod; auto with zarith. rewrite spec_ww_compare. case Z.compare_spec; rewrite spec_ww_zdigits; rewrite spec_zdigits; intros H2. replace (2^[[p]]) with wwB. rewrite Zmod_small; auto with zarith. unfold base; rewrite H2. rewrite spec_ww_digits; auto. assert (HH0: [|low (ww_sub p (w_0W w_zdigits))|] = [[p]] - Zpos w_digits). rewrite spec_low. rewrite spec_ww_sub. rewrite spec_w_0W; rewrite spec_zdigits. rewrite <- Zmod_div_mod; auto with zarith. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.lt_le_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_le_lin; auto with zarith. exists wB; unfold base; rewrite <- Zpower_exp; auto with zarith. rewrite spec_ww_digits; apply f_equal with (f := Z.pow 2); rewrite Pos2Z.inj_xO; auto with zarith. simpl ww_to_Z; autorewrite with w_rewrite. rewrite spec_pos_mod; rewrite HH0. pattern [|xh|] at 2; rewrite Z_div_mod_eq with (b := 2 ^ ([[p]] - Zpos w_digits)); auto with zarith. rewrite (fun x => (Z.mul_comm (2 ^ x))); rewrite Z.mul_add_distr_r. unfold base; rewrite <- Z.mul_assoc; rewrite <- Zpower_exp; auto with zarith. rewrite F0; auto with zarith. rewrite <- Z.add_assoc; rewrite Zplus_mod; auto with zarith. rewrite Z_mod_mult; auto with zarith. autorewrite with rm10. rewrite Zmod_mod; auto with zarith. symmetry; apply Zmod_small; auto with zarith. case (spec_to_Z xh); intros U1 U2. case (spec_to_Z xl); intros U3 U4. split; auto with zarith. apply Z.add_nonneg_nonneg; auto with zarith. apply Z.mul_nonneg_nonneg; auto with zarith. match goal with |- 0 <= ?X mod ?Y => case (Z_mod_lt X Y); auto with zarith end. match goal with |- ?X mod ?Y * ?U + ?Z < ?T => apply Z.le_lt_trans with ((Y - 1) * U + Z ); [case (Z_mod_lt X Y); auto with zarith | idtac] end. match goal with |- ?X * ?U + ?Y < ?Z => apply Z.le_lt_trans with (X * U + (U - 1)) end. apply Z.add_le_mono_l; auto with zarith. case (spec_to_Z xl); unfold base; auto with zarith. rewrite Z.mul_sub_distr_r; rewrite <- Zpower_exp; auto with zarith. rewrite F0; auto with zarith. rewrite Zmod_small; auto with zarith. case (spec_to_w_Z (WW xh xl)); intros U1 U2. split; auto with zarith. apply Z.lt_le_trans with (1:= U2). unfold base; rewrite spec_ww_digits. apply Zpower_le_monotone; auto with zarith. split; auto with zarith. rewrite Pos2Z.inj_xO; auto with zarith. Qed. End POS_MOD. Section DoubleDiv32. Variable w : Type. Variable w_0 : w. Variable w_Bm1 : w. Variable w_Bm2 : w. Variable w_WW : w -> w -> zn2z w. Variable w_compare : w -> w -> comparison. Variable w_add_c : w -> w -> carry w. Variable w_add_carry_c : w -> w -> carry w. Variable w_add : w -> w -> w. Variable w_add_carry : w -> w -> w. Variable w_pred : w -> w. Variable w_sub : w -> w -> w. Variable w_mul_c : w -> w -> zn2z w. Variable w_div21 : w -> w -> w -> w*w. Variable ww_sub_c : zn2z w -> zn2z w -> carry (zn2z w). Definition w_div32 a1 a2 a3 b1 b2 := Eval lazy beta iota delta [ww_add_c_cont ww_add] in match w_compare a1 b1 with | Lt => let (q,r) := w_div21 a1 a2 b1 in match ww_sub_c (w_WW r a3) (w_mul_c q b2) with | C0 r1 => (q,r1) | C1 r1 => let q := w_pred q in ww_add_c_cont w_WW w_add_c w_add_carry_c (fun r2=>(w_pred q, ww_add w_add_c w_add w_add_carry r2 (WW b1 b2))) (fun r2 => (q,r2)) r1 (WW b1 b2) end | Eq => ww_add_c_cont w_WW w_add_c w_add_carry_c (fun r => (w_Bm2, ww_add w_add_c w_add w_add_carry r (WW b1 b2))) (fun r => (w_Bm1,r)) (WW (w_sub a2 b2) a3) (WW b1 b2) | Gt => (w_0, W0) (* cas absurde *) end. (* Proof *) Variable w_digits : positive. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB w_to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_Bm1 : [|w_Bm1|] = wB - 1. Variable spec_w_Bm2 : [|w_Bm2|] = wB - 2. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_w_add_c : forall x y, [+|w_add_c x y|] = [|x|] + [|y|]. Variable spec_w_add_carry_c : forall x y, [+|w_add_carry_c x y|] = [|x|] + [|y|] + 1. Variable spec_w_add : forall x y, [|w_add x y|] = ([|x|] + [|y|]) mod wB. Variable spec_w_add_carry : forall x y, [|w_add_carry x y|] = ([|x|] + [|y|] + 1) mod wB. Variable spec_pred : forall x, [|w_pred x|] = ([|x|] - 1) mod wB. Variable spec_sub : forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Variable spec_mul_c : forall x y, [[ w_mul_c x y ]] = [|x|] * [|y|]. Variable spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := w_div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Variable spec_ww_sub_c : forall x y, [-[ww_sub_c x y]] = [[x]] - [[y]]. Ltac Spec_w_to_Z x := let H:= fresh "HH" in assert (H:= spec_to_Z x). Ltac Spec_ww_to_Z x := let H:= fresh "HH" in assert (H:= spec_ww_to_Z w_digits w_to_Z spec_to_Z x). Theorem wB_div2: forall x, wB/2 <= x -> wB <= 2 * x. intros x H; rewrite <- wB_div_2; apply Z.mul_le_mono_nonneg_l; auto with zarith. Qed. Lemma Zmult_lt_0_reg_r_2 : forall n m : Z, 0 <= n -> 0 < m * n -> 0 < m. Proof. intros n m H1 H2;apply Z.mul_pos_cancel_r with n;trivial. Z.le_elim H1; trivial. subst;rewrite Z.mul_0_r in H2;discriminate H2. Qed. Theorem spec_w_div32 : forall a1 a2 a3 b1 b2, wB/2 <= [|b1|] -> [[WW a1 a2]] < [[WW b1 b2]] -> let (q,r) := w_div32 a1 a2 a3 b1 b2 in [|a1|] * wwB + [|a2|] * wB + [|a3|] = [|q|] * ([|b1|] * wB + [|b2|]) + [[r]] /\ 0 <= [[r]] < [|b1|] * wB + [|b2|]. Proof. intros a1 a2 a3 b1 b2 Hle Hlt. assert (U:= lt_0_wB w_digits); assert (U1:= lt_0_wwB w_digits). Spec_w_to_Z a1;Spec_w_to_Z a2;Spec_w_to_Z a3;Spec_w_to_Z b1;Spec_w_to_Z b2. rewrite wwB_wBwB; rewrite Z.pow_2_r; rewrite Z.mul_assoc;rewrite <- Z.mul_add_distr_r. change (w_div32 a1 a2 a3 b1 b2) with match w_compare a1 b1 with | Lt => let (q,r) := w_div21 a1 a2 b1 in match ww_sub_c (w_WW r a3) (w_mul_c q b2) with | C0 r1 => (q,r1) | C1 r1 => let q := w_pred q in ww_add_c_cont w_WW w_add_c w_add_carry_c (fun r2=>(w_pred q, ww_add w_add_c w_add w_add_carry r2 (WW b1 b2))) (fun r2 => (q,r2)) r1 (WW b1 b2) end | Eq => ww_add_c_cont w_WW w_add_c w_add_carry_c (fun r => (w_Bm2, ww_add w_add_c w_add w_add_carry r (WW b1 b2))) (fun r => (w_Bm1,r)) (WW (w_sub a2 b2) a3) (WW b1 b2) | Gt => (w_0, W0) (* cas absurde *) end. rewrite spec_compare. case Z.compare_spec; intro Hcmp. simpl in Hlt. rewrite Hcmp in Hlt;assert ([|a2|] < [|b2|]). omega. assert ([[WW (w_sub a2 b2) a3]] = ([|a2|]-[|b2|])*wB + [|a3|] + wwB). simpl;rewrite spec_sub. assert ([|a2|] - [|b2|] = wB*(-1) + ([|a2|] - [|b2|] + wB)). ring. assert (0 <= [|a2|] - [|b2|] + wB < wB). omega. rewrite <-(Zmod_unique ([|a2|]-[|b2|]) wB (-1) ([|a2|]-[|b2|]+wB) H1 H0). rewrite wwB_wBwB;ring. assert (U2 := wB_pos w_digits). eapply spec_ww_add_c_cont with (P := fun (x y:zn2z w) (res:w*zn2z w) => let (q, r) := res in ([|a1|] * wB + [|a2|]) * wB + [|a3|] = [|q|] * ([|b1|] * wB + [|b2|]) + [[r]] /\ 0 <= [[r]] < [|b1|] * wB + [|b2|]);eauto. rewrite H0;intros r. repeat (rewrite spec_ww_add;eauto || rewrite spec_w_Bm1 || rewrite spec_w_Bm2); simpl ww_to_Z;try rewrite Z.mul_1_l;intros H1. assert (0<= ([[r]] + ([|b1|] * wB + [|b2|])) - wwB < [|b1|] * wB + [|b2|]). Spec_ww_to_Z r;split;zarith. rewrite H1. assert (H12:= wB_div2 Hle). assert (wwB <= 2 * [|b1|] * wB). rewrite wwB_wBwB; rewrite Z.pow_2_r; zarith. assert (-wwB < ([|a2|] - [|b2|]) * wB + [|a3|] < 0). split. apply Z.lt_le_trans with (([|a2|] - [|b2|]) * wB);zarith. rewrite wwB_wBwB;replace (-(wB^2)) with (-wB*wB);[zarith | ring]. apply Z.mul_lt_mono_pos_r;zarith. apply Z.le_lt_trans with (([|a2|] - [|b2|]) * wB + (wB -1));zarith. replace ( ([|a2|] - [|b2|]) * wB + (wB - 1)) with (([|a2|] - [|b2|] + 1) * wB + - 1);[zarith | ring]. assert (([|a2|] - [|b2|] + 1) * wB <= 0);zarith. replace 0 with (0*wB);zarith. replace (([|a2|] - [|b2|]) * wB + [|a3|] + wwB + ([|b1|] * wB + [|b2|]) + ([|b1|] * wB + [|b2|]) - wwB) with (([|a2|] - [|b2|]) * wB + [|a3|] + 2*[|b1|] * wB + 2*[|b2|]); [zarith | ring]. rewrite <- (Zmod_unique ([[r]] + ([|b1|] * wB + [|b2|])) wwB 1 ([[r]] + ([|b1|] * wB + [|b2|]) - wwB));zarith;try (ring;fail). split. rewrite H1;rewrite Hcmp;ring. trivial. Spec_ww_to_Z (WW b1 b2). simpl in HH4;zarith. rewrite H0;intros r;repeat (rewrite spec_w_Bm1 || rewrite spec_w_Bm2); simpl ww_to_Z;try rewrite Z.mul_1_l;intros H1. assert ([[r]]=([|a2|]-[|b2|])*wB+[|a3|]+([|b1|]*wB+[|b2|])). zarith. split. rewrite H2;rewrite Hcmp;ring. split. Spec_ww_to_Z r;zarith. rewrite H2. assert (([|a2|] - [|b2|]) * wB + [|a3|] < 0);zarith. apply Z.le_lt_trans with (([|a2|] - [|b2|]) * wB + (wB -1));zarith. replace ( ([|a2|] - [|b2|]) * wB + (wB - 1)) with (([|a2|] - [|b2|] + 1) * wB + - 1);[zarith|ring]. assert (([|a2|] - [|b2|] + 1) * wB <= 0);zarith. replace 0 with (0*wB);zarith. (* Cas Lt *) assert (Hdiv21 := spec_div21 a2 Hle Hcmp); destruct (w_div21 a1 a2 b1) as (q, r);destruct Hdiv21. rewrite H. assert (Hq := spec_to_Z q). generalize (spec_ww_sub_c (w_WW r a3) (w_mul_c q b2)); destruct (ww_sub_c (w_WW r a3) (w_mul_c q b2)) as [r1|r1];repeat (rewrite spec_w_WW || rewrite spec_mul_c); unfold interp_carry;intros H1. rewrite H1. split. ring. split. rewrite <- H1;destruct (spec_ww_to_Z w_digits w_to_Z spec_to_Z r1);trivial. apply Z.le_lt_trans with ([|r|] * wB + [|a3|]). assert ( 0 <= [|q|] * [|b2|]);zarith. apply beta_lex_inv;zarith. assert ([[r1]] = [|r|] * wB + [|a3|] - [|q|] * [|b2|] + wwB). rewrite <- H1;ring. Spec_ww_to_Z r1; assert (0 <= [|r|]*wB). zarith. assert (0 < [|q|] * [|b2|]). zarith. assert (0 < [|q|]). apply Zmult_lt_0_reg_r_2 with [|b2|];zarith. eapply spec_ww_add_c_cont with (P := fun (x y:zn2z w) (res:w*zn2z w) => let (q0, r0) := res in ([|q|] * [|b1|] + [|r|]) * wB + [|a3|] = [|q0|] * ([|b1|] * wB + [|b2|]) + [[r0]] /\ 0 <= [[r0]] < [|b1|] * wB + [|b2|]);eauto. intros r2;repeat (rewrite spec_pred || rewrite spec_ww_add;eauto); simpl ww_to_Z;intros H7. assert (0 < [|q|] - 1). assert (H6 : 1 <= [|q|]) by zarith. Z.le_elim H6;zarith. rewrite <- H6 in H2;rewrite H2 in H7. assert (0 < [|b1|]*wB). apply Z.mul_pos_pos;zarith. Spec_ww_to_Z r2. zarith. rewrite (Zmod_small ([|q|] -1));zarith. rewrite (Zmod_small ([|q|] -1 -1));zarith. assert ([[r2]] + ([|b1|] * wB + [|b2|]) = wwB * 1 + ([|r|] * wB + [|a3|] - [|q|] * [|b2|] + 2 * ([|b1|] * wB + [|b2|]))). rewrite H7;rewrite H2;ring. assert ([|r|]*wB + [|a3|] - [|q|]*[|b2|] + 2 * ([|b1|]*wB + [|b2|]) < [|b1|]*wB + [|b2|]). Spec_ww_to_Z r2;omega. Spec_ww_to_Z (WW b1 b2). simpl in HH5. assert (0 <= [|r|]*wB + [|a3|] - [|q|]*[|b2|] + 2 * ([|b1|]*wB + [|b2|]) < wwB). split;try omega. replace (2*([|b1|]*wB+[|b2|])) with ((2*[|b1|])*wB+2*[|b2|]). 2:ring. assert (H12:= wB_div2 Hle). assert (wwB <= 2 * [|b1|] * wB). rewrite wwB_wBwB; rewrite Z.pow_2_r; zarith. omega. rewrite <- (Zmod_unique ([[r2]] + ([|b1|] * wB + [|b2|])) wwB 1 ([|r|] * wB + [|a3|] - [|q|] * [|b2|] + 2*([|b1|] * wB + [|b2|])) H10 H8). split. ring. zarith. intros r2;repeat (rewrite spec_pred);simpl ww_to_Z;intros H7. rewrite (Zmod_small ([|q|] -1));zarith. split. replace [[r2]] with ([[r1]] + ([|b1|] * wB + [|b2|]) -wwB). rewrite H2; ring. rewrite <- H7; ring. Spec_ww_to_Z r2;Spec_ww_to_Z r1. omega. simpl in Hlt. assert ([|a1|] * wB + [|a2|] <= [|b1|] * wB + [|b2|]). zarith. assert (H1 := beta_lex _ _ _ _ _ H HH0 HH3). rewrite spec_w_0;simpl;zarith. Qed. End DoubleDiv32. Section DoubleDiv21. Variable w : Type. Variable w_0 : w. Variable w_0W : w -> zn2z w. Variable w_div32 : w -> w -> w -> w -> w -> w * zn2z w. Variable ww_1 : zn2z w. Variable ww_compare : zn2z w -> zn2z w -> comparison. Variable ww_sub : zn2z w -> zn2z w -> zn2z w. Definition ww_div21 a1 a2 b := match a1 with | W0 => match ww_compare a2 b with | Gt => (ww_1, ww_sub a2 b) | Eq => (ww_1, W0) | Lt => (W0, a2) end | WW a1h a1l => match a2 with | W0 => match b with | W0 => (W0,W0) (* cas absurde *) | WW b1 b2 => let (q1, r) := w_div32 a1h a1l w_0 b1 b2 in match r with | W0 => (WW q1 w_0, W0) | WW r1 r2 => let (q2, s) := w_div32 r1 r2 w_0 b1 b2 in (WW q1 q2, s) end end | WW a2h a2l => match b with | W0 => (W0,W0) (* cas absurde *) | WW b1 b2 => let (q1, r) := w_div32 a1h a1l a2h b1 b2 in match r with | W0 => (WW q1 w_0, w_0W a2l) | WW r1 r2 => let (q2, s) := w_div32 r1 r2 a2l b1 b2 in (WW q1 q2, s) end end end end. (* Proof *) Variable w_digits : positive. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_w_div32 : forall a1 a2 a3 b1 b2, wB/2 <= [|b1|] -> [[WW a1 a2]] < [[WW b1 b2]] -> let (q,r) := w_div32 a1 a2 a3 b1 b2 in [|a1|] * wwB + [|a2|] * wB + [|a3|] = [|q|] * ([|b1|] * wB + [|b2|]) + [[r]] /\ 0 <= [[r]] < [|b1|] * wB + [|b2|]. Variable spec_ww_1 : [[ww_1]] = 1. Variable spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Variable spec_ww_sub : forall x y, [[ww_sub x y]] = ([[x]] - [[y]]) mod wwB. Theorem wwB_div: wwB = 2 * (wwB / 2). Proof. rewrite wwB_div_2; rewrite Z.mul_assoc; rewrite wB_div_2; auto. rewrite <- Z.pow_2_r; apply wwB_wBwB. Qed. Ltac Spec_w_to_Z x := let H:= fresh "HH" in assert (H:= spec_to_Z x). Ltac Spec_ww_to_Z x := let H:= fresh "HH" in assert (H:= spec_ww_to_Z w_digits w_to_Z spec_to_Z x). Theorem spec_ww_div21 : forall a1 a2 b, wwB/2 <= [[b]] -> [[a1]] < [[b]] -> let (q,r) := ww_div21 a1 a2 b in [[a1]] *wwB+[[a2]] = [[q]] * [[b]] + [[r]] /\ 0 <= [[r]] < [[b]]. Proof. assert (U:= lt_0_wB w_digits). assert (U1:= lt_0_wwB w_digits). intros a1 a2 b H Hlt; unfold ww_div21. Spec_ww_to_Z b; assert (Eq: 0 < [[b]]). Spec_ww_to_Z a1;omega. generalize Hlt H ;clear Hlt H;case a1. intros H1 H2;simpl in H1;Spec_ww_to_Z a2. rewrite spec_ww_compare. case Z.compare_spec; simpl;try rewrite spec_ww_1;autorewrite with rm10; intros;zarith. rewrite spec_ww_sub;simpl. rewrite Zmod_small;zarith. split. ring. assert (wwB <= 2*[[b]]);zarith. rewrite wwB_div;zarith. intros a1h a1l. Spec_w_to_Z a1h;Spec_w_to_Z a1l. Spec_ww_to_Z a2. destruct a2 as [ |a3 a4]; (destruct b as [ |b1 b2];[unfold Z.le in Eq;discriminate Eq|idtac]); try (Spec_w_to_Z a3; Spec_w_to_Z a4); Spec_w_to_Z b1; Spec_w_to_Z b2; intros Hlt H; match goal with |-context [w_div32 ?X ?Y ?Z ?T ?U] => generalize (@spec_w_div32 X Y Z T U); case (w_div32 X Y Z T U); intros q1 r H0 end; (assert (Eq1: wB / 2 <= [|b1|]);[ apply (@beta_lex (wB / 2) 0 [|b1|] [|b2|] wB); auto with zarith; autorewrite with rm10;repeat rewrite (Z.mul_comm wB); rewrite <- wwB_div_2; trivial | generalize (H0 Eq1 Hlt);clear H0;destruct r as [ |r1 r2];simpl; try rewrite spec_w_0; try rewrite spec_w_0W;repeat rewrite Z.add_0_r; intros (H1,H2) ]). split;[rewrite wwB_wBwB; rewrite Z.pow_2_r | trivial]. rewrite Z.mul_assoc;rewrite Z.mul_add_distr_r;rewrite <- Z.mul_assoc; rewrite <- Z.pow_2_r; rewrite <- wwB_wBwB;rewrite H1;ring. destruct H2 as (H2,H3);match goal with |-context [w_div32 ?X ?Y ?Z ?T ?U] => generalize (@spec_w_div32 X Y Z T U); case (w_div32 X Y Z T U); intros q r H0;generalize (H0 Eq1 H3);clear H0;intros (H4,H5) end. split;[rewrite wwB_wBwB | trivial]. rewrite Z.pow_2_r. rewrite Z.mul_assoc;rewrite Z.mul_add_distr_r;rewrite <- Z.mul_assoc; rewrite <- Z.pow_2_r. rewrite <- wwB_wBwB;rewrite H1. rewrite spec_w_0 in H4;rewrite Z.add_0_r in H4. repeat rewrite Z.mul_add_distr_r. rewrite <- (Z.mul_assoc [|r1|]). rewrite <- Z.pow_2_r; rewrite <- wwB_wBwB;rewrite H4;simpl;ring. split;[rewrite wwB_wBwB | split;zarith]. replace (([|a1h|] * wB + [|a1l|]) * wB^2 + ([|a3|] * wB + [|a4|])) with (([|a1h|] * wwB + [|a1l|] * wB + [|a3|])*wB+ [|a4|]). rewrite H1;ring. rewrite wwB_wBwB;ring. change [|a4|] with (0*wB+[|a4|]);apply beta_lex_inv;zarith. assert (1 <= wB/2);zarith. assert (H_:= wB_pos w_digits);apply Zdiv_le_lower_bound;zarith. destruct H2 as (H2,H3);match goal with |-context [w_div32 ?X ?Y ?Z ?T ?U] => generalize (@spec_w_div32 X Y Z T U); case (w_div32 X Y Z T U); intros q r H0;generalize (H0 Eq1 H3);clear H0;intros (H4,H5) end. split;trivial. replace (([|a1h|] * wB + [|a1l|]) * wwB + ([|a3|] * wB + [|a4|])) with (([|a1h|] * wwB + [|a1l|] * wB + [|a3|])*wB + [|a4|]); [rewrite H1 | rewrite wwB_wBwB;ring]. replace (([|q1|]*([|b1|]*wB+[|b2|])+([|r1|]*wB+[|r2|]))*wB+[|a4|]) with (([|q1|]*([|b1|]*wB+[|b2|]))*wB+([|r1|]*wwB+[|r2|]*wB+[|a4|])); [rewrite H4;simpl|rewrite wwB_wBwB];ring. Qed. End DoubleDiv21. Section DoubleDivGt. Variable w : Type. Variable w_digits : positive. Variable w_0 : w. Variable w_WW : w -> w -> zn2z w. Variable w_0W : w -> zn2z w. Variable w_compare : w -> w -> comparison. Variable w_eq0 : w -> bool. Variable w_opp_c : w -> carry w. Variable w_opp w_opp_carry : w -> w. Variable w_sub_c : w -> w -> carry w. Variable w_sub w_sub_carry : w -> w -> w. Variable w_div_gt : w -> w -> w*w. Variable w_mod_gt : w -> w -> w. Variable w_gcd_gt : w -> w -> w. Variable w_add_mul_div : w -> w -> w -> w. Variable w_head0 : w -> w. Variable w_div21 : w -> w -> w -> w * w. Variable w_div32 : w -> w -> w -> w -> w -> w * zn2z w. Variable _ww_zdigits : zn2z w. Variable ww_1 : zn2z w. Variable ww_add_mul_div : zn2z w -> zn2z w -> zn2z w -> zn2z w. Variable w_zdigits : w. Definition ww_div_gt_aux ah al bh bl := Eval lazy beta iota delta [ww_sub ww_opp] in let p := w_head0 bh in match w_compare p w_0 with | Gt => let b1 := w_add_mul_div p bh bl in let b2 := w_add_mul_div p bl w_0 in let a1 := w_add_mul_div p w_0 ah in let a2 := w_add_mul_div p ah al in let a3 := w_add_mul_div p al w_0 in let (q,r) := w_div32 a1 a2 a3 b1 b2 in (WW w_0 q, ww_add_mul_div (ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry _ww_zdigits (w_0W p)) W0 r) | _ => (ww_1, ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry (WW ah al) (WW bh bl)) end. Definition ww_div_gt a b := Eval lazy beta iota delta [ww_div_gt_aux double_divn1 double_divn1_p double_divn1_p_aux double_divn1_0 double_divn1_0_aux double_split double_0 double_WW] in match a, b with | W0, _ => (W0,W0) | _, W0 => (W0,W0) | WW ah al, WW bh bl => if w_eq0 ah then let (q,r) := w_div_gt al bl in (WW w_0 q, w_0W r) else match w_compare w_0 bh with | Eq => let(q,r):= double_divn1 w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub 1 a bl in (q, w_0W r) | Lt => ww_div_gt_aux ah al bh bl | Gt => (W0,W0) (* cas absurde *) end end. Definition ww_mod_gt_aux ah al bh bl := Eval lazy beta iota delta [ww_sub ww_opp] in let p := w_head0 bh in match w_compare p w_0 with | Gt => let b1 := w_add_mul_div p bh bl in let b2 := w_add_mul_div p bl w_0 in let a1 := w_add_mul_div p w_0 ah in let a2 := w_add_mul_div p ah al in let a3 := w_add_mul_div p al w_0 in let (q,r) := w_div32 a1 a2 a3 b1 b2 in ww_add_mul_div (ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry _ww_zdigits (w_0W p)) W0 r | _ => ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry (WW ah al) (WW bh bl) end. Definition ww_mod_gt a b := Eval lazy beta iota delta [ww_mod_gt_aux double_modn1 double_modn1_p double_modn1_p_aux double_modn1_0 double_modn1_0_aux double_split double_0 double_WW snd] in match a, b with | W0, _ => W0 | _, W0 => W0 | WW ah al, WW bh bl => if w_eq0 ah then w_0W (w_mod_gt al bl) else match w_compare w_0 bh with | Eq => w_0W (double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 a bl) | Lt => ww_mod_gt_aux ah al bh bl | Gt => W0 (* cas absurde *) end end. Definition ww_gcd_gt_body (cont: w->w->w->w->zn2z w) (ah al bh bl: w) := Eval lazy beta iota delta [ww_mod_gt_aux double_modn1 double_modn1_p double_modn1_p_aux double_modn1_0 double_modn1_0_aux double_split double_0 double_WW snd] in match w_compare w_0 bh with | Eq => match w_compare w_0 bl with | Eq => WW ah al (* normalement n'arrive pas si forme normale *) | Lt => let m := double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW ah al) bl in WW w_0 (w_gcd_gt bl m) | Gt => W0 (* absurde *) end | Lt => let m := ww_mod_gt_aux ah al bh bl in match m with | W0 => WW bh bl | WW mh ml => match w_compare w_0 mh with | Eq => match w_compare w_0 ml with | Eq => WW bh bl | _ => let r := double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW bh bl) ml in WW w_0 (w_gcd_gt ml r) end | Lt => let r := ww_mod_gt_aux bh bl mh ml in match r with | W0 => m | WW rh rl => cont mh ml rh rl end | Gt => W0 (* absurde *) end end | Gt => W0 (* absurde *) end. Fixpoint ww_gcd_gt_aux (p:positive) (cont: w -> w -> w -> w -> zn2z w) (ah al bh bl : w) {struct p} : zn2z w := ww_gcd_gt_body (fun mh ml rh rl => match p with | xH => cont mh ml rh rl | xO p => ww_gcd_gt_aux p (ww_gcd_gt_aux p cont) mh ml rh rl | xI p => ww_gcd_gt_aux p (ww_gcd_gt_aux p cont) mh ml rh rl end) ah al bh bl. (* Proof *) Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_to_w_Z : forall x, 0 <= [[x]] < wwB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_eq0 : forall x, w_eq0 x = true -> [|x|] = 0. Variable spec_opp_c : forall x, [-|w_opp_c x|] = -[|x|]. Variable spec_opp : forall x, [|w_opp x|] = (-[|x|]) mod wB. Variable spec_opp_carry : forall x, [|w_opp_carry x|] = wB - [|x|] - 1. Variable spec_sub_c : forall x y, [-|w_sub_c x y|] = [|x|] - [|y|]. Variable spec_sub : forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Variable spec_sub_carry : forall x y, [|w_sub_carry x y|] = ([|x|] - [|y|] - 1) mod wB. Variable spec_div_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> let (q,r) := w_div_gt a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Variable spec_mod_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> [|w_mod_gt a b|] = [|a|] mod [|b|]. Variable spec_gcd_gt : forall a b, [|a|] > [|b|] -> Zis_gcd [|a|] [|b|] [|w_gcd_gt a b|]. Variable spec_add_mul_div : forall x y p, [|p|] <= Zpos w_digits -> [| w_add_mul_div p x y |] = ([|x|] * (2 ^ ([|p|])) + [|y|] / (2 ^ ((Zpos w_digits) - [|p|]))) mod wB. Variable spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ [|w_head0 x|] * [|x|] < wB. Variable spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := w_div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Variable spec_w_div32 : forall a1 a2 a3 b1 b2, wB/2 <= [|b1|] -> [[WW a1 a2]] < [[WW b1 b2]] -> let (q,r) := w_div32 a1 a2 a3 b1 b2 in [|a1|] * wwB + [|a2|] * wB + [|a3|] = [|q|] * ([|b1|] * wB + [|b2|]) + [[r]] /\ 0 <= [[r]] < [|b1|] * wB + [|b2|]. Variable spec_w_zdigits: [|w_zdigits|] = Zpos w_digits. Variable spec_ww_digits_ : [[_ww_zdigits]] = Zpos (xO w_digits). Variable spec_ww_1 : [[ww_1]] = 1. Variable spec_ww_add_mul_div : forall x y p, [[p]] <= Zpos (xO w_digits) -> [[ ww_add_mul_div p x y ]] = ([[x]] * (2^[[p]]) + [[y]] / (2^(Zpos (xO w_digits) - [[p]]))) mod wwB. Ltac Spec_w_to_Z x := let H:= fresh "HH" in assert (H:= spec_to_Z x). Ltac Spec_ww_to_Z x := let H:= fresh "HH" in assert (H:= spec_ww_to_Z w_digits w_to_Z spec_to_Z x). Lemma to_Z_div_minus_p : forall x p, 0 < [|p|] < Zpos w_digits -> 0 <= [|x|] / 2 ^ (Zpos w_digits - [|p|]) < 2 ^ [|p|]. Proof. intros x p H;Spec_w_to_Z x. split. apply Zdiv_le_lower_bound;zarith. apply Zdiv_lt_upper_bound;zarith. rewrite <- Zpower_exp;zarith. ring_simplify ([|p|] + (Zpos w_digits - [|p|])); unfold base in HH;zarith. Qed. Hint Resolve to_Z_div_minus_p : zarith. Lemma spec_ww_div_gt_aux : forall ah al bh bl, [[WW ah al]] > [[WW bh bl]] -> 0 < [|bh|] -> let (q,r) := ww_div_gt_aux ah al bh bl in [[WW ah al]] = [[q]] * [[WW bh bl]] + [[r]] /\ 0 <= [[r]] < [[WW bh bl]]. Proof. intros ah al bh bl Hgt Hpos;unfold ww_div_gt_aux. change (let (q, r) := let p := w_head0 bh in match w_compare p w_0 with | Gt => let b1 := w_add_mul_div p bh bl in let b2 := w_add_mul_div p bl w_0 in let a1 := w_add_mul_div p w_0 ah in let a2 := w_add_mul_div p ah al in let a3 := w_add_mul_div p al w_0 in let (q,r) := w_div32 a1 a2 a3 b1 b2 in (WW w_0 q, ww_add_mul_div (ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry _ww_zdigits (w_0W p)) W0 r) | _ => (ww_1, ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry (WW ah al) (WW bh bl)) end in [[WW ah al]]=[[q]]*[[WW bh bl]]+[[r]] /\ 0 <=[[r]]< [[WW bh bl]]). assert (Hh := spec_head0 Hpos). lazy zeta. rewrite spec_compare; case Z.compare_spec; rewrite spec_w_0; intros HH. generalize Hh; rewrite HH; simpl Z.pow; rewrite Z.mul_1_l; intros (HH1, HH2); clear HH. assert (wwB <= 2*[[WW bh bl]]). apply Z.le_trans with (2*[|bh|]*wB). rewrite wwB_wBwB; rewrite Z.pow_2_r; apply Z.mul_le_mono_nonneg_r; zarith. rewrite <- wB_div_2; apply Z.mul_le_mono_nonneg_l; zarith. simpl ww_to_Z;rewrite Z.mul_add_distr_l;rewrite Z.mul_assoc. Spec_w_to_Z bl;zarith. Spec_ww_to_Z (WW ah al). rewrite spec_ww_sub;eauto. simpl;rewrite spec_ww_1;rewrite Z.mul_1_l;simpl. simpl ww_to_Z in Hgt, H, HH;rewrite Zmod_small;split;zarith. case (spec_to_Z (w_head0 bh)); auto with zarith. assert ([|w_head0 bh|] < Zpos w_digits). destruct (Z_lt_ge_dec [|w_head0 bh|] (Zpos w_digits));trivial. exfalso. assert (2 ^ [|w_head0 bh|] * [|bh|] >= wB);auto with zarith. apply Z.le_ge; replace wB with (wB * 1);try ring. Spec_w_to_Z bh;apply Z.mul_le_mono_nonneg;zarith. unfold base;apply Zpower_le_monotone;zarith. assert (HHHH : 0 < [|w_head0 bh|] < Zpos w_digits); auto with zarith. assert (Hb:= Z.lt_le_incl _ _ H). generalize (spec_add_mul_div w_0 ah Hb) (spec_add_mul_div ah al Hb) (spec_add_mul_div al w_0 Hb) (spec_add_mul_div bh bl Hb) (spec_add_mul_div bl w_0 Hb); rewrite spec_w_0; repeat rewrite Z.mul_0_l;repeat rewrite Z.add_0_l; rewrite Zdiv_0_l;repeat rewrite Z.add_0_r. Spec_w_to_Z ah;Spec_w_to_Z bh. unfold base;repeat rewrite Zmod_shift_r;zarith. assert (H3:=to_Z_div_minus_p ah HHHH);assert(H4:=to_Z_div_minus_p al HHHH); assert (H5:=to_Z_div_minus_p bl HHHH). rewrite Z.mul_comm in Hh. assert (2^[|w_head0 bh|] < wB). unfold base;apply Zpower_lt_monotone;zarith. unfold base in H0;rewrite Zmod_small;zarith. fold wB; rewrite (Zmod_small ([|bh|] * 2 ^ [|w_head0 bh|]));zarith. intros U1 U2 U3 V1 V2. generalize (@spec_w_div32 (w_add_mul_div (w_head0 bh) w_0 ah) (w_add_mul_div (w_head0 bh) ah al) (w_add_mul_div (w_head0 bh) al w_0) (w_add_mul_div (w_head0 bh) bh bl) (w_add_mul_div (w_head0 bh) bl w_0)). destruct (w_div32 (w_add_mul_div (w_head0 bh) w_0 ah) (w_add_mul_div (w_head0 bh) ah al) (w_add_mul_div (w_head0 bh) al w_0) (w_add_mul_div (w_head0 bh) bh bl) (w_add_mul_div (w_head0 bh) bl w_0)) as (q,r). rewrite V1;rewrite V2. rewrite Z.mul_add_distr_r. rewrite <- (Z.add_assoc ([|bh|] * 2 ^ [|w_head0 bh|] * wB)). unfold base;rewrite <- shift_unshift_mod;zarith. fold wB. replace ([|bh|] * 2 ^ [|w_head0 bh|] * wB + [|bl|] * 2 ^ [|w_head0 bh|]) with ([[WW bh bl]] * 2^[|w_head0 bh|]). 2:simpl;ring. fold wwB. rewrite wwB_wBwB. rewrite Z.pow_2_r. rewrite U1;rewrite U2;rewrite U3. rewrite Z.mul_assoc. rewrite Z.mul_add_distr_r. rewrite (Z.add_assoc ([|ah|] / 2^(Zpos(w_digits) - [|w_head0 bh|])*wB * wB)). rewrite <- Z.mul_add_distr_r. rewrite <- Z.add_assoc. unfold base;repeat rewrite <- shift_unshift_mod;zarith. fold wB. replace ([|ah|] * 2 ^ [|w_head0 bh|] * wB + [|al|] * 2 ^ [|w_head0 bh|]) with ([[WW ah al]] * 2^[|w_head0 bh|]). 2:simpl;ring. intros Hd;destruct Hd;zarith. simpl. apply beta_lex_inv;zarith. rewrite U1;rewrite V1. assert ([|ah|] / 2 ^ (Zpos (w_digits) - [|w_head0 bh|]) < wB/2);zarith. apply Zdiv_lt_upper_bound;zarith. unfold base. replace (2^Zpos (w_digits)) with (2^(Zpos (w_digits) - 1)*2). rewrite Z_div_mult;zarith. rewrite <- Zpower_exp;zarith. apply Z.lt_le_trans with wB;zarith. unfold base;apply Zpower_le_monotone;zarith. pattern 2 at 2;replace 2 with (2^1);trivial. rewrite <- Zpower_exp;zarith. ring_simplify (Zpos (w_digits) - 1 + 1);trivial. change [[WW w_0 q]] with ([|w_0|]*wB+[|q|]);rewrite spec_w_0;rewrite Z.mul_0_l;rewrite Z.add_0_l. replace [[ww_add_mul_div (ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry _ww_zdigits (w_0W (w_head0 bh))) W0 r]] with ([[r]]/2^[|w_head0 bh|]). assert (0 < 2^[|w_head0 bh|]). apply Z.pow_pos_nonneg;zarith. split. rewrite <- (Z_div_mult [[WW ah al]] (2^[|w_head0 bh|]));zarith. rewrite H1;rewrite Z.mul_assoc;apply Z_div_plus_l;trivial. split;[apply Zdiv_le_lower_bound| apply Zdiv_lt_upper_bound];zarith. rewrite spec_ww_add_mul_div. rewrite spec_ww_sub; auto with zarith. rewrite spec_ww_digits_. change (Zpos (xO (w_digits))) with (2*Zpos (w_digits));zarith. simpl ww_to_Z;rewrite Z.mul_0_l;rewrite Z.add_0_l. rewrite spec_w_0W. rewrite (fun x y => Zmod_small (x-y)); auto with zarith. ring_simplify (2 * Zpos w_digits - (2 * Zpos w_digits - [|w_head0 bh|])). rewrite Zmod_small;zarith. split;[apply Zdiv_le_lower_bound| apply Zdiv_lt_upper_bound];zarith. Spec_ww_to_Z r. apply Z.lt_le_trans with wwB;zarith. rewrite <- (Z.mul_1_r wwB);apply Z.mul_le_mono_nonneg;zarith. split; auto with zarith. apply Z.le_lt_trans with (2 * Zpos w_digits); auto with zarith. unfold base, ww_digits; rewrite (Pos2Z.inj_xO w_digits). apply Zpower2_lt_lin; auto with zarith. rewrite spec_ww_sub; auto with zarith. rewrite spec_ww_digits_; rewrite spec_w_0W. rewrite Zmod_small;zarith. rewrite Pos2Z.inj_xO; split; auto with zarith. apply Z.le_lt_trans with (2 * Zpos w_digits); auto with zarith. unfold base, ww_digits; rewrite (Pos2Z.inj_xO w_digits). apply Zpower2_lt_lin; auto with zarith. Qed. Lemma spec_ww_div_gt : forall a b, [[a]] > [[b]] -> 0 < [[b]] -> let (q,r) := ww_div_gt a b in [[a]] = [[q]] * [[b]] + [[r]] /\ 0 <= [[r]] < [[b]]. Proof. intros a b Hgt Hpos;unfold ww_div_gt. change (let (q,r) := match a, b with | W0, _ => (W0,W0) | _, W0 => (W0,W0) | WW ah al, WW bh bl => if w_eq0 ah then let (q,r) := w_div_gt al bl in (WW w_0 q, w_0W r) else match w_compare w_0 bh with | Eq => let(q,r):= double_divn1 w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub 1 a bl in (q, w_0W r) | Lt => ww_div_gt_aux ah al bh bl | Gt => (W0,W0) (* cas absurde *) end end in [[a]] = [[q]] * [[b]] + [[r]] /\ 0 <= [[r]] < [[b]]). destruct a as [ |ah al]. simpl in Hgt;omega. destruct b as [ |bh bl]. simpl in Hpos;omega. Spec_w_to_Z ah; Spec_w_to_Z al; Spec_w_to_Z bh; Spec_w_to_Z bl. assert (H:=@spec_eq0 ah);destruct (w_eq0 ah). simpl ww_to_Z;rewrite H;trivial. simpl in Hgt;rewrite H in Hgt;trivial. assert ([|bh|] <= 0). apply beta_lex with (d:=[|al|])(b:=[|bl|]) (beta := wB);zarith. assert ([|bh|] = 0);zarith. rewrite H1 in Hgt;rewrite H1;simpl in Hgt. simpl. simpl in Hpos;rewrite H1 in Hpos;simpl in Hpos. assert (H2:=spec_div_gt Hgt Hpos);destruct (w_div_gt al bl). repeat rewrite spec_w_0W;simpl;rewrite spec_w_0;simpl;trivial. clear H. rewrite spec_compare; case Z.compare_spec; intros Hcmp. rewrite spec_w_0 in Hcmp. change [[WW bh bl]] with ([|bh|]*wB+[|bl|]). rewrite <- Hcmp;rewrite Z.mul_0_l;rewrite Z.add_0_l. simpl in Hpos;rewrite <- Hcmp in Hpos;simpl in Hpos. assert (H2:= @spec_double_divn1 w w_digits w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub w_to_Z spec_to_Z spec_w_zdigits spec_w_0 spec_w_WW spec_head0 spec_add_mul_div spec_div21 spec_compare spec_sub 1 (WW ah al) bl Hpos). destruct (double_divn1 w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW ah al) bl). rewrite spec_w_0W;unfold ww_to_Z;trivial. apply spec_ww_div_gt_aux;trivial. rewrite spec_w_0 in Hcmp;trivial. rewrite spec_w_0 in Hcmp;exfalso;omega. Qed. Lemma spec_ww_mod_gt_aux_eq : forall ah al bh bl, ww_mod_gt_aux ah al bh bl = snd (ww_div_gt_aux ah al bh bl). Proof. intros ah al bh bl. unfold ww_mod_gt_aux, ww_div_gt_aux. case w_compare; auto. case w_div32; auto. Qed. Lemma spec_ww_mod_gt_aux : forall ah al bh bl, [[WW ah al]] > [[WW bh bl]] -> 0 < [|bh|] -> [[ww_mod_gt_aux ah al bh bl]] = [[WW ah al]] mod [[WW bh bl]]. Proof. intros. rewrite spec_ww_mod_gt_aux_eq;trivial. assert (H3 := spec_ww_div_gt_aux ah al bl H H0). destruct (ww_div_gt_aux ah al bh bl) as (q,r);simpl. simpl in H,H3. destruct H3;apply Zmod_unique with [[q]];zarith. rewrite H1;ring. Qed. Lemma spec_w_mod_gt_eq : forall a b, [|a|] > [|b|] -> 0 <[|b|] -> [|w_mod_gt a b|] = [|snd (w_div_gt a b)|]. Proof. intros a b Hgt Hpos. rewrite spec_mod_gt;trivial. assert (H:=spec_div_gt Hgt Hpos). destruct (w_div_gt a b) as (q,r);simpl. rewrite Z.mul_comm in H;destruct H. symmetry;apply Zmod_unique with [|q|];trivial. Qed. Lemma spec_ww_mod_gt_eq : forall a b, [[a]] > [[b]] -> 0 < [[b]] -> [[ww_mod_gt a b]] = [[snd (ww_div_gt a b)]]. Proof. intros a b Hgt Hpos. change (ww_mod_gt a b) with (match a, b with | W0, _ => W0 | _, W0 => W0 | WW ah al, WW bh bl => if w_eq0 ah then w_0W (w_mod_gt al bl) else match w_compare w_0 bh with | Eq => w_0W (double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 a bl) | Lt => ww_mod_gt_aux ah al bh bl | Gt => W0 (* cas absurde *) end end). change (ww_div_gt a b) with (match a, b with | W0, _ => (W0,W0) | _, W0 => (W0,W0) | WW ah al, WW bh bl => if w_eq0 ah then let (q,r) := w_div_gt al bl in (WW w_0 q, w_0W r) else match w_compare w_0 bh with | Eq => let(q,r):= double_divn1 w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub 1 a bl in (q, w_0W r) | Lt => ww_div_gt_aux ah al bh bl | Gt => (W0,W0) (* cas absurde *) end end). destruct a as [ |ah al];trivial. destruct b as [ |bh bl];trivial. Spec_w_to_Z ah; Spec_w_to_Z al; Spec_w_to_Z bh; Spec_w_to_Z bl. assert (H:=@spec_eq0 ah);destruct (w_eq0 ah). simpl in Hgt;rewrite H in Hgt;trivial. assert ([|bh|] <= 0). apply beta_lex with (d:=[|al|])(b:=[|bl|]) (beta := wB);zarith. assert ([|bh|] = 0);zarith. rewrite H1 in Hgt;simpl in Hgt. simpl in Hpos;rewrite H1 in Hpos;simpl in Hpos. rewrite spec_w_0W;rewrite spec_w_mod_gt_eq;trivial. destruct (w_div_gt al bl);simpl;rewrite spec_w_0W;trivial. clear H. rewrite spec_compare; case Z.compare_spec; intros H2. rewrite (@spec_double_modn1_aux w w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub w_to_Z spec_w_0 spec_compare 1 (WW ah al) bl). destruct (double_divn1 w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW ah al) bl);simpl;trivial. rewrite spec_ww_mod_gt_aux_eq;trivial;symmetry;trivial. trivial. Qed. Lemma spec_ww_mod_gt : forall a b, [[a]] > [[b]] -> 0 < [[b]] -> [[ww_mod_gt a b]] = [[a]] mod [[b]]. Proof. intros a b Hgt Hpos. assert (H:= spec_ww_div_gt a b Hgt Hpos). rewrite (spec_ww_mod_gt_eq a b Hgt Hpos). destruct (ww_div_gt a b)as(q,r);destruct H. apply Zmod_unique with[[q]];simpl;trivial. rewrite Z.mul_comm;trivial. Qed. Lemma Zis_gcd_mod : forall a b d, 0 < b -> Zis_gcd b (a mod b) d -> Zis_gcd a b d. Proof. intros a b d H H1; apply Zis_gcd_for_euclid with (a/b). pattern a at 1;rewrite (Z_div_mod_eq a b). ring_simplify (b * (a / b) + a mod b - a / b * b);trivial. zarith. Qed. Lemma spec_ww_gcd_gt_aux_body : forall ah al bh bl n cont, [[WW bh bl]] <= 2^n -> [[WW ah al]] > [[WW bh bl]] -> (forall xh xl yh yl, [[WW xh xl]] > [[WW yh yl]] -> [[WW yh yl]] <= 2^(n-1) -> Zis_gcd [[WW xh xl]] [[WW yh yl]] [[cont xh xl yh yl]]) -> Zis_gcd [[WW ah al]] [[WW bh bl]] [[ww_gcd_gt_body cont ah al bh bl]]. Proof. intros ah al bh bl n cont Hlog Hgt Hcont. change (ww_gcd_gt_body cont ah al bh bl) with (match w_compare w_0 bh with | Eq => match w_compare w_0 bl with | Eq => WW ah al (* normalement n'arrive pas si forme normale *) | Lt => let m := double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW ah al) bl in WW w_0 (w_gcd_gt bl m) | Gt => W0 (* absurde *) end | Lt => let m := ww_mod_gt_aux ah al bh bl in match m with | W0 => WW bh bl | WW mh ml => match w_compare w_0 mh with | Eq => match w_compare w_0 ml with | Eq => WW bh bl | _ => let r := double_modn1 w_zdigits w_0 w_head0 w_add_mul_div w_div21 w_compare w_sub 1 (WW bh bl) ml in WW w_0 (w_gcd_gt ml r) end | Lt => let r := ww_mod_gt_aux bh bl mh ml in match r with | W0 => m | WW rh rl => cont mh ml rh rl end | Gt => W0 (* absurde *) end end | Gt => W0 (* absurde *) end). rewrite spec_compare, spec_w_0. case Z.compare_spec; intros Hbh. simpl ww_to_Z in *. rewrite <- Hbh. rewrite Z.mul_0_l;rewrite Z.add_0_l. rewrite spec_compare, spec_w_0. case Z.compare_spec; intros Hbl. rewrite <- Hbl;apply Zis_gcd_0. simpl;rewrite spec_w_0;rewrite Z.mul_0_l;rewrite Z.add_0_l. apply Zis_gcd_mod;zarith. change ([|ah|] * wB + [|al|]) with (double_to_Z w_digits w_to_Z 1 (WW ah al)). rewrite <- (@spec_double_modn1 w w_digits w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub w_to_Z spec_to_Z spec_w_zdigits spec_w_0 spec_w_WW spec_head0 spec_add_mul_div spec_div21 spec_compare spec_sub 1 (WW ah al) bl Hbl). apply spec_gcd_gt. rewrite (@spec_double_modn1 w w_digits w_zdigits w_0 w_WW); trivial. apply Z.lt_gt;match goal with | |- ?x mod ?y < ?y => destruct (Z_mod_lt x y);zarith end. Spec_w_to_Z bl;exfalso;omega. assert (H:= spec_ww_mod_gt_aux _ _ _ Hgt Hbh). assert (H2 : 0 < [[WW bh bl]]). simpl;Spec_w_to_Z bl. apply Z.lt_le_trans with ([|bh|]*wB);zarith. apply Z.mul_pos_pos;zarith. apply Zis_gcd_mod;trivial. rewrite <- H. simpl in *;destruct (ww_mod_gt_aux ah al bh bl) as [ |mh ml]. simpl;apply Zis_gcd_0;zarith. rewrite spec_compare, spec_w_0; case Z.compare_spec; intros Hmh. simpl;rewrite <- Hmh;simpl. rewrite spec_compare, spec_w_0; case Z.compare_spec; intros Hml. rewrite <- Hml;simpl;apply Zis_gcd_0. simpl; rewrite spec_w_0; simpl. apply Zis_gcd_mod;zarith. change ([|bh|] * wB + [|bl|]) with (double_to_Z w_digits w_to_Z 1 (WW bh bl)). rewrite <- (@spec_double_modn1 w w_digits w_zdigits w_0 w_WW w_head0 w_add_mul_div w_div21 w_compare w_sub w_to_Z spec_to_Z spec_w_zdigits spec_w_0 spec_w_WW spec_head0 spec_add_mul_div spec_div21 spec_compare spec_sub 1 (WW bh bl) ml Hml). apply spec_gcd_gt. rewrite (@spec_double_modn1 w w_digits w_zdigits w_0 w_WW); trivial. apply Z.lt_gt;match goal with | |- ?x mod ?y < ?y => destruct (Z_mod_lt x y);zarith end. Spec_w_to_Z ml;exfalso;omega. assert ([[WW bh bl]] > [[WW mh ml]]). rewrite H;simpl; apply Z.lt_gt;match goal with | |- ?x mod ?y < ?y => destruct (Z_mod_lt x y);zarith end. assert (H1:= spec_ww_mod_gt_aux _ _ _ H0 Hmh). assert (H3 : 0 < [[WW mh ml]]). simpl;Spec_w_to_Z ml. apply Z.lt_le_trans with ([|mh|]*wB);zarith. apply Z.mul_pos_pos;zarith. apply Zis_gcd_mod;zarith. simpl in *;rewrite <- H1. destruct (ww_mod_gt_aux bh bl mh ml) as [ |rh rl]. simpl; apply Zis_gcd_0. simpl;apply Hcont. simpl in H1;rewrite H1. apply Z.lt_gt;match goal with | |- ?x mod ?y < ?y => destruct (Z_mod_lt x y);zarith end. apply Z.le_trans with (2^n/2). apply Zdiv_le_lower_bound;zarith. apply Z.le_trans with ([|bh|] * wB + [|bl|]);zarith. assert (H3' := Z_div_mod_eq [[WW bh bl]] [[WW mh ml]] (Z.lt_gt _ _ H3)). assert (H4 : 0 <= [[WW bh bl]]/[[WW mh ml]]). apply Z.ge_le;apply Z_div_ge0;zarith. simpl in *;rewrite H1. pattern ([|bh|] * wB + [|bl|]) at 2;rewrite H3'. Z.le_elim H4. assert (H6' : [[WW bh bl]] mod [[WW mh ml]] = [[WW bh bl]] - [[WW mh ml]] * ([[WW bh bl]]/[[WW mh ml]])). simpl;pattern ([|bh|] * wB + [|bl|]) at 2;rewrite H3';ring. simpl in H6'. assert ([[WW mh ml]] <= [[WW mh ml]] * ([[WW bh bl]]/[[WW mh ml]])). simpl;pattern ([|mh|]*wB+[|ml|]) at 1;rewrite <- Z.mul_1_r;zarith. simpl in *;assert (H8 := Z_mod_lt [[WW bh bl]] [[WW mh ml]]);simpl in H8; zarith. assert (H8 := Z_mod_lt [[WW bh bl]] [[WW mh ml]]);simpl in *;zarith. rewrite <- H4 in H3';rewrite Z.mul_0_r in H3';simpl in H3';zarith. pattern n at 1;replace n with (n-1+1);try ring. rewrite Zpower_exp;zarith. change (2^1) with 2. rewrite Z_div_mult;zarith. assert (2^1 <= 2^n). change (2^1) with 2;zarith. assert (H7 := @Zpower_le_monotone_inv 2 1 n);zarith. Spec_w_to_Z mh;exfalso;zarith. Spec_w_to_Z bh;exfalso;zarith. Qed. Lemma spec_ww_gcd_gt_aux : forall p cont n, (forall xh xl yh yl, [[WW xh xl]] > [[WW yh yl]] -> [[WW yh yl]] <= 2^n -> Zis_gcd [[WW xh xl]] [[WW yh yl]] [[cont xh xl yh yl]]) -> forall ah al bh bl , [[WW ah al]] > [[WW bh bl]] -> [[WW bh bl]] <= 2^(Zpos p + n) -> Zis_gcd [[WW ah al]] [[WW bh bl]] [[ww_gcd_gt_aux p cont ah al bh bl]]. Proof. induction p;intros cont n Hcont ah al bh bl Hgt Hs;simpl ww_gcd_gt_aux. assert (0 < Zpos p). unfold Z.lt;reflexivity. apply spec_ww_gcd_gt_aux_body with (n := Zpos (xI p) + n); trivial;rewrite Pos2Z.inj_xI. intros. apply IHp with (n := Zpos p + n);zarith. intros. apply IHp with (n := n );zarith. apply Z.le_trans with (2 ^ (2* Zpos p + 1+ n -1));zarith. apply Z.pow_le_mono_r;zarith. assert (0 < Zpos p). unfold Z.lt;reflexivity. apply spec_ww_gcd_gt_aux_body with (n := Zpos (xO p) + n );trivial. rewrite (Pos2Z.inj_xO p). intros. apply IHp with (n := Zpos p + n - 1);zarith. intros. apply IHp with (n := n -1 );zarith. intros;apply Hcont;zarith. apply Z.le_trans with (2^(n-1));zarith. apply Z.pow_le_mono_r;zarith. apply Z.le_trans with (2 ^ (Zpos p + n -1));zarith. apply Z.pow_le_mono_r;zarith. apply Z.le_trans with (2 ^ (2*Zpos p + n -1));zarith. apply Z.pow_le_mono_r;zarith. apply spec_ww_gcd_gt_aux_body with (n := n+1);trivial. rewrite Z.add_comm;trivial. ring_simplify (n + 1 - 1);trivial. Qed. End DoubleDivGt. Section DoubleDiv. Variable w : Type. Variable w_digits : positive. Variable ww_1 : zn2z w. Variable ww_compare : zn2z w -> zn2z w -> comparison. Variable ww_div_gt : zn2z w -> zn2z w -> zn2z w * zn2z w. Variable ww_mod_gt : zn2z w -> zn2z w -> zn2z w. Definition ww_div a b := match ww_compare a b with | Gt => ww_div_gt a b | Eq => (ww_1, W0) | Lt => (W0, a) end. Definition ww_mod a b := match ww_compare a b with | Gt => ww_mod_gt a b | Eq => W0 | Lt => a end. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_ww_1 : [[ww_1]] = 1. Variable spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Variable spec_ww_div_gt : forall a b, [[a]] > [[b]] -> 0 < [[b]] -> let (q,r) := ww_div_gt a b in [[a]] = [[q]] * [[b]] + [[r]] /\ 0 <= [[r]] < [[b]]. Variable spec_ww_mod_gt : forall a b, [[a]] > [[b]] -> 0 < [[b]] -> [[ww_mod_gt a b]] = [[a]] mod [[b]]. Ltac Spec_w_to_Z x := let H:= fresh "HH" in assert (H:= spec_to_Z x). Ltac Spec_ww_to_Z x := let H:= fresh "HH" in assert (H:= spec_ww_to_Z w_digits w_to_Z spec_to_Z x). Lemma spec_ww_div : forall a b, 0 < [[b]] -> let (q,r) := ww_div a b in [[a]] = [[q]] * [[b]] + [[r]] /\ 0 <= [[r]] < [[b]]. Proof. intros a b Hpos;unfold ww_div. rewrite spec_ww_compare; case Z.compare_spec; intros. simpl;rewrite spec_ww_1;split;zarith. simpl;split;[ring|Spec_ww_to_Z a;zarith]. apply spec_ww_div_gt;auto with zarith. Qed. Lemma spec_ww_mod : forall a b, 0 < [[b]] -> [[ww_mod a b]] = [[a]] mod [[b]]. Proof. intros a b Hpos;unfold ww_mod. rewrite spec_ww_compare; case Z.compare_spec; intros. simpl;apply Zmod_unique with 1;try rewrite H;zarith. Spec_ww_to_Z a;symmetry;apply Zmod_small;zarith. apply spec_ww_mod_gt;auto with zarith. Qed. Variable w_0 : w. Variable w_1 : w. Variable w_compare : w -> w -> comparison. Variable w_eq0 : w -> bool. Variable w_gcd_gt : w -> w -> w. Variable _ww_digits : positive. Variable spec_ww_digits_ : _ww_digits = xO w_digits. Variable ww_gcd_gt_fix : positive -> (w -> w -> w -> w -> zn2z w) -> w -> w -> w -> w -> zn2z w. Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_1 : [|w_1|] = 1. Variable spec_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_eq0 : forall x, w_eq0 x = true -> [|x|] = 0. Variable spec_gcd_gt : forall a b, [|a|] > [|b|] -> Zis_gcd [|a|] [|b|] [|w_gcd_gt a b|]. Variable spec_gcd_gt_fix : forall p cont n, (forall xh xl yh yl, [[WW xh xl]] > [[WW yh yl]] -> [[WW yh yl]] <= 2^n -> Zis_gcd [[WW xh xl]] [[WW yh yl]] [[cont xh xl yh yl]]) -> forall ah al bh bl , [[WW ah al]] > [[WW bh bl]] -> [[WW bh bl]] <= 2^(Zpos p + n) -> Zis_gcd [[WW ah al]] [[WW bh bl]] [[ww_gcd_gt_fix p cont ah al bh bl]]. Definition gcd_cont (xh xl yh yl:w) := match w_compare w_1 yl with | Eq => ww_1 | _ => WW xh xl end. Lemma spec_gcd_cont : forall xh xl yh yl, [[WW xh xl]] > [[WW yh yl]] -> [[WW yh yl]] <= 1 -> Zis_gcd [[WW xh xl]] [[WW yh yl]] [[gcd_cont xh xl yh yl]]. Proof. intros xh xl yh yl Hgt' Hle. simpl in Hle. assert ([|yh|] = 0). change 1 with (0*wB+1) in Hle. assert (0 <= 1 < wB). split;zarith. apply wB_pos. assert (H1:= beta_lex _ _ _ _ _ Hle (spec_to_Z yl) H). Spec_w_to_Z yh;zarith. unfold gcd_cont; rewrite spec_compare, spec_w_1. case Z.compare_spec; intros Hcmpy. simpl;rewrite H;simpl; rewrite spec_ww_1;rewrite <- Hcmpy;apply Zis_gcd_mod;zarith. rewrite <- (Zmod_unique ([|xh|]*wB+[|xl|]) 1 ([|xh|]*wB+[|xl|]) 0);zarith. rewrite H in Hle; exfalso;zarith. assert (H0 : [|yl|] = 0) by (Spec_w_to_Z yl;zarith). simpl. rewrite H0, H;simpl;apply Zis_gcd_0;trivial. Qed. Variable cont : w -> w -> w -> w -> zn2z w. Variable spec_cont : forall xh xl yh yl, [[WW xh xl]] > [[WW yh yl]] -> [[WW yh yl]] <= 1 -> Zis_gcd [[WW xh xl]] [[WW yh yl]] [[cont xh xl yh yl]]. Definition ww_gcd_gt a b := match a, b with | W0, _ => b | _, W0 => a | WW ah al, WW bh bl => if w_eq0 ah then (WW w_0 (w_gcd_gt al bl)) else ww_gcd_gt_fix _ww_digits cont ah al bh bl end. Definition ww_gcd a b := Eval lazy beta delta [ww_gcd_gt] in match ww_compare a b with | Gt => ww_gcd_gt a b | Eq => a | Lt => ww_gcd_gt b a end. Lemma spec_ww_gcd_gt : forall a b, [[a]] > [[b]] -> Zis_gcd [[a]] [[b]] [[ww_gcd_gt a b]]. Proof. intros a b Hgt;unfold ww_gcd_gt. destruct a as [ |ah al]. simpl;apply Zis_gcd_sym;apply Zis_gcd_0. destruct b as [ |bh bl]. simpl;apply Zis_gcd_0. simpl in Hgt. generalize (@spec_eq0 ah);destruct (w_eq0 ah);intros. simpl;rewrite H in Hgt;trivial;rewrite H;trivial;rewrite spec_w_0;simpl. assert ([|bh|] <= 0). apply beta_lex with (d:=[|al|])(b:=[|bl|]) (beta := wB);zarith. Spec_w_to_Z bh;assert ([|bh|] = 0);zarith. rewrite H1 in Hgt;simpl in Hgt. rewrite H1;simpl;auto. clear H. apply spec_gcd_gt_fix with (n:= 0);trivial. rewrite Z.add_0_r;rewrite spec_ww_digits_. change (2 ^ Zpos (xO w_digits)) with wwB. Spec_ww_to_Z (WW bh bl);zarith. Qed. Lemma spec_ww_gcd : forall a b, Zis_gcd [[a]] [[b]] [[ww_gcd a b]]. Proof. intros a b. change (ww_gcd a b) with (match ww_compare a b with | Gt => ww_gcd_gt a b | Eq => a | Lt => ww_gcd_gt b a end). rewrite spec_ww_compare; case Z.compare_spec; intros Hcmp. Spec_ww_to_Z b;rewrite Hcmp. apply Zis_gcd_for_euclid with 1;zarith. ring_simplify ([[b]] - 1 * [[b]]). apply Zis_gcd_0;zarith. apply Zis_gcd_sym;apply spec_ww_gcd_gt;zarith. apply spec_ww_gcd_gt;zarith. Qed. End DoubleDiv. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleCyclic.v0000640000175000001440000006166212010532755023376 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* WW w_0 p | C1 p => WW w_1 p end. Let _ww_digits := xO w_digits. Let _ww_zdigits := w_add2 w_zdigits w_zdigits. Let to_Z := zn2z_to_Z wB w_to_Z. Let w_W0 := ZnZ.WO. Let w_0W := ZnZ.OW. Let w_WW := ZnZ.WW. Let ww_of_pos p := match w_of_pos p with | (N0, l) => (N0, WW w_0 l) | (Npos ph,l) => let (n,h) := w_of_pos ph in (n, w_WW h l) end. Let head0 := Eval lazy beta delta [ww_head0] in ww_head0 w_0 w_0W w_compare w_head0 w_add2 w_zdigits _ww_zdigits. Let tail0 := Eval lazy beta delta [ww_tail0] in ww_tail0 w_0 w_0W w_compare w_tail0 w_add2 w_zdigits _ww_zdigits. Let ww_WW := Eval lazy beta delta [ww_WW] in (@ww_WW t). Let ww_0W := Eval lazy beta delta [ww_0W] in (@ww_0W t). Let ww_W0 := Eval lazy beta delta [ww_W0] in (@ww_W0 t). (* ** Comparison ** *) Let compare := Eval lazy beta delta[ww_compare] in ww_compare w_0 w_compare. Let eq0 (x:zn2z t) := match x with | W0 => true | _ => false end. (* ** Opposites ** *) Let opp_c := Eval lazy beta delta [ww_opp_c] in ww_opp_c w_0 w_opp_c w_opp_carry. Let opp := Eval lazy beta delta [ww_opp] in ww_opp w_0 w_opp_c w_opp_carry w_opp. Let opp_carry := Eval lazy beta delta [ww_opp_carry] in ww_opp_carry w_WW ww_Bm1 w_opp_carry. (* ** Additions ** *) Let succ_c := Eval lazy beta delta [ww_succ_c] in ww_succ_c w_0 ww_1 w_succ_c. Let add_c := Eval lazy beta delta [ww_add_c] in ww_add_c w_WW w_add_c w_add_carry_c. Let add_carry_c := Eval lazy beta iota delta [ww_add_carry_c ww_succ_c] in ww_add_carry_c w_0 w_WW ww_1 w_succ_c w_add_c w_add_carry_c. Let succ := Eval lazy beta delta [ww_succ] in ww_succ w_W0 ww_1 w_succ_c w_succ. Let add := Eval lazy beta delta [ww_add] in ww_add w_add_c w_add w_add_carry. Let add_carry := Eval lazy beta iota delta [ww_add_carry ww_succ] in ww_add_carry w_W0 ww_1 w_succ_c w_add_carry_c w_succ w_add w_add_carry. (* ** Subtractions ** *) Let pred_c := Eval lazy beta delta [ww_pred_c] in ww_pred_c w_Bm1 w_WW ww_Bm1 w_pred_c. Let sub_c := Eval lazy beta iota delta [ww_sub_c ww_opp_c] in ww_sub_c w_0 w_WW w_opp_c w_opp_carry w_sub_c w_sub_carry_c. Let sub_carry_c := Eval lazy beta iota delta [ww_sub_carry_c ww_pred_c ww_opp_carry] in ww_sub_carry_c w_Bm1 w_WW ww_Bm1 w_opp_carry w_pred_c w_sub_c w_sub_carry_c. Let pred := Eval lazy beta delta [ww_pred] in ww_pred w_Bm1 w_WW ww_Bm1 w_pred_c w_pred. Let sub := Eval lazy beta iota delta [ww_sub ww_opp] in ww_sub w_0 w_WW w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry. Let sub_carry := Eval lazy beta iota delta [ww_sub_carry ww_pred ww_opp_carry] in ww_sub_carry w_Bm1 w_WW ww_Bm1 w_opp_carry w_pred_c w_sub_carry_c w_pred w_sub w_sub_carry. (* ** Multiplication ** *) Let mul_c := Eval lazy beta iota delta [ww_mul_c double_mul_c] in ww_mul_c w_0 w_1 w_WW w_W0 w_mul_c add_c add add_carry. Let karatsuba_c := Eval lazy beta iota delta [ww_karatsuba_c double_mul_c kara_prod] in ww_karatsuba_c w_0 w_1 w_WW w_W0 w_compare w_add w_sub w_mul_c add_c add add_carry sub_c sub. Let mul := Eval lazy beta delta [ww_mul] in ww_mul w_W0 w_add w_mul_c w_mul add. Let square_c := Eval lazy beta delta [ww_square_c] in ww_square_c w_0 w_1 w_WW w_W0 w_mul_c w_square_c add_c add add_carry. (* Division operation *) Let div32 := Eval lazy beta iota delta [w_div32] in w_div32 w_0 w_Bm1 w_Bm2 w_WW w_compare w_add_c w_add_carry_c w_add w_add_carry w_pred w_sub w_mul_c w_div21 sub_c. Let div21 := Eval lazy beta iota delta [ww_div21] in ww_div21 w_0 w_0W div32 ww_1 compare sub. Let low (p: zn2z t) := match p with WW _ p1 => p1 | _ => w_0 end. Let add_mul_div := Eval lazy beta delta [ww_add_mul_div] in ww_add_mul_div w_0 w_WW w_W0 w_0W compare w_add_mul_div sub w_zdigits low. Let div_gt := Eval lazy beta delta [ww_div_gt] in ww_div_gt w_0 w_WW w_0W w_compare w_eq0 w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_div_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits ww_1 add_mul_div w_zdigits. Let div := Eval lazy beta delta [ww_div] in ww_div ww_1 compare div_gt. Let mod_gt := Eval lazy beta delta [ww_mod_gt] in ww_mod_gt w_0 w_WW w_0W w_compare w_eq0 w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_mod_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits add_mul_div w_zdigits. Let mod_ := Eval lazy beta delta [ww_mod] in ww_mod compare mod_gt. Let pos_mod := Eval lazy beta delta [ww_pos_mod] in ww_pos_mod w_0 w_zdigits w_WW w_pos_mod compare w_0W low sub _ww_zdigits. Let is_even := Eval lazy beta delta [ww_is_even] in ww_is_even w_is_even. Let sqrt2 := Eval lazy beta delta [ww_sqrt2] in ww_sqrt2 w_is_even w_compare w_0 w_1 w_Bm1 w_0W w_sub w_square_c w_div21 w_add_mul_div w_zdigits w_add_c w_sqrt2 w_pred pred_c pred add_c add sub_c add_mul_div. Let sqrt := Eval lazy beta delta [ww_sqrt] in ww_sqrt w_is_even w_0 w_sub w_add_mul_div w_zdigits _ww_zdigits w_sqrt2 pred add_mul_div head0 compare low. Let gcd_gt_fix := Eval cbv beta delta [ww_gcd_gt_aux ww_gcd_gt_body] in ww_gcd_gt_aux w_0 w_WW w_0W w_compare w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_gcd_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits add_mul_div w_zdigits. Let gcd_cont := Eval lazy beta delta [gcd_cont] in gcd_cont ww_1 w_1 w_compare. Let gcd_gt := Eval lazy beta delta [ww_gcd_gt] in ww_gcd_gt w_0 w_eq0 w_gcd_gt _ww_digits gcd_gt_fix gcd_cont. Let gcd := Eval lazy beta delta [ww_gcd] in ww_gcd compare w_0 w_eq0 w_gcd_gt _ww_digits gcd_gt_fix gcd_cont. (* ** Record of operators on 2 words *) Global Instance mk_zn2z_ops : ZnZ.Ops (zn2z t) | 1 := ZnZ.MkOps _ww_digits _ww_zdigits to_Z ww_of_pos head0 tail0 W0 ww_1 ww_Bm1 compare eq0 opp_c opp opp_carry succ_c add_c add_carry_c succ add add_carry pred_c sub_c sub_carry_c pred sub sub_carry mul_c mul square_c div21 div_gt div mod_gt mod_ gcd_gt gcd add_mul_div pos_mod is_even sqrt2 sqrt. Global Instance mk_zn2z_ops_karatsuba : ZnZ.Ops (zn2z t) | 2 := ZnZ.MkOps _ww_digits _ww_zdigits to_Z ww_of_pos head0 tail0 W0 ww_1 ww_Bm1 compare eq0 opp_c opp opp_carry succ_c add_c add_carry_c succ add add_carry pred_c sub_c sub_carry_c pred sub sub_carry karatsuba_c mul square_c div21 div_gt div mod_gt mod_ gcd_gt gcd add_mul_div pos_mod is_even sqrt2 sqrt. (* Proof *) Context {specs : ZnZ.Specs ops}. Hint Resolve ZnZ.spec_to_Z ZnZ.spec_of_pos ZnZ.spec_0 ZnZ.spec_1 ZnZ.spec_m1 ZnZ.spec_compare ZnZ.spec_eq0 ZnZ.spec_opp_c ZnZ.spec_opp ZnZ.spec_opp_carry ZnZ.spec_succ_c ZnZ.spec_add_c ZnZ.spec_add_carry_c ZnZ.spec_succ ZnZ.spec_add ZnZ.spec_add_carry ZnZ.spec_pred_c ZnZ.spec_sub_c ZnZ.spec_sub_carry_c ZnZ.spec_pred ZnZ.spec_sub ZnZ.spec_sub_carry ZnZ.spec_mul_c ZnZ.spec_mul ZnZ.spec_square_c ZnZ.spec_div21 ZnZ.spec_div_gt ZnZ.spec_div ZnZ.spec_modulo_gt ZnZ.spec_modulo ZnZ.spec_gcd_gt ZnZ.spec_gcd ZnZ.spec_head0 ZnZ.spec_tail0 ZnZ.spec_add_mul_div ZnZ.spec_pos_mod ZnZ.spec_is_even ZnZ.spec_sqrt2 ZnZ.spec_sqrt ZnZ.spec_WO ZnZ.spec_OW ZnZ.spec_WW. Ltac wwauto := unfold ww_to_Z; auto. Let wwB := base _ww_digits. Notation "[| x |]" := (to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wwB to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wwB to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (zn2z_to_Z wwB to_Z x) (at level 0, x at level 99). Let spec_ww_to_Z : forall x, 0 <= [| x |] < wwB. Proof. refine (spec_ww_to_Z w_digits w_to_Z _);auto. Qed. Let spec_ww_of_pos : forall p, Zpos p = (Z.of_N (fst (ww_of_pos p)))*wwB + [|(snd (ww_of_pos p))|]. Proof. unfold ww_of_pos;intros. rewrite (ZnZ.spec_of_pos p). unfold w_of_pos. case (ZnZ.of_pos p); intros. simpl. destruct n; simpl ZnZ.to_Z. simpl;unfold w_to_Z,w_0; rewrite ZnZ.spec_0;trivial. unfold Z.of_N. rewrite (ZnZ.spec_of_pos p0). case (ZnZ.of_pos p0); intros. simpl. unfold fst, snd,Z.of_N, to_Z, wB, w_digits, w_to_Z, w_WW. rewrite ZnZ.spec_WW. replace wwB with (wB*wB). unfold wB,w_to_Z,w_digits;destruct n;ring. symmetry. rewrite <- Z.pow_2_r; exact (wwB_wBwB w_digits). Qed. Let spec_ww_0 : [|W0|] = 0. Proof. reflexivity. Qed. Let spec_ww_1 : [|ww_1|] = 1. Proof. refine (spec_ww_1 w_0 w_1 w_digits w_to_Z _ _);auto. Qed. Let spec_ww_Bm1 : [|ww_Bm1|] = wwB - 1. Proof. refine (spec_ww_Bm1 w_Bm1 w_digits w_to_Z _);auto. Qed. Let spec_ww_compare : forall x y, compare x y = Z.compare [|x|] [|y|]. Proof. refine (spec_ww_compare w_0 w_digits w_to_Z w_compare _ _ _);auto. Qed. Let spec_ww_eq0 : forall x, eq0 x = true -> [|x|] = 0. Proof. destruct x;simpl;intros;trivial;discriminate. Qed. Let spec_ww_opp_c : forall x, [-|opp_c x|] = -[|x|]. Proof. refine(spec_ww_opp_c w_0 w_0 W0 w_opp_c w_opp_carry w_digits w_to_Z _ _ _ _); auto. Qed. Let spec_ww_opp : forall x, [|opp x|] = (-[|x|]) mod wwB. Proof. refine(spec_ww_opp w_0 w_0 W0 w_opp_c w_opp_carry w_opp w_digits w_to_Z _ _ _ _ _); auto. Qed. Let spec_ww_opp_carry : forall x, [|opp_carry x|] = wwB - [|x|] - 1. Proof. refine (spec_ww_opp_carry w_WW ww_Bm1 w_opp_carry w_digits w_to_Z _ _ _); wwauto. Qed. Let spec_ww_succ_c : forall x, [+|succ_c x|] = [|x|] + 1. Proof. refine (spec_ww_succ_c w_0 w_0 ww_1 w_succ_c w_digits w_to_Z _ _ _ _);auto. Qed. Let spec_ww_add_c : forall x y, [+|add_c x y|] = [|x|] + [|y|]. Proof. refine (spec_ww_add_c w_WW w_add_c w_add_carry_c w_digits w_to_Z _ _ _);wwauto. Qed. Let spec_ww_add_carry_c : forall x y, [+|add_carry_c x y|] = [|x|]+[|y|]+1. Proof. refine (spec_ww_add_carry_c w_0 w_0 w_WW ww_1 w_succ_c w_add_c w_add_carry_c w_digits w_to_Z _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_succ : forall x, [|succ x|] = ([|x|] + 1) mod wwB. Proof. refine (spec_ww_succ w_W0 ww_1 w_succ_c w_succ w_digits w_to_Z _ _ _ _ _); wwauto. Qed. Let spec_ww_add : forall x y, [|add x y|] = ([|x|] + [|y|]) mod wwB. Proof. refine (spec_ww_add w_add_c w_add w_add_carry w_digits w_to_Z _ _ _ _);auto. Qed. Let spec_ww_add_carry : forall x y, [|add_carry x y|]=([|x|]+[|y|]+1)mod wwB. Proof. refine (spec_ww_add_carry w_W0 ww_1 w_succ_c w_add_carry_c w_succ w_add w_add_carry w_digits w_to_Z _ _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_pred_c : forall x, [-|pred_c x|] = [|x|] - 1. Proof. refine (spec_ww_pred_c w_0 w_Bm1 w_WW ww_Bm1 w_pred_c w_digits w_to_Z _ _ _ _ _);wwauto. Qed. Let spec_ww_sub_c : forall x y, [-|sub_c x y|] = [|x|] - [|y|]. Proof. refine (spec_ww_sub_c w_0 w_0 w_WW W0 w_opp_c w_opp_carry w_sub_c w_sub_carry_c w_digits w_to_Z _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_sub_carry_c : forall x y, [-|sub_carry_c x y|] = [|x|]-[|y|]-1. Proof. refine (spec_ww_sub_carry_c w_0 w_Bm1 w_WW ww_Bm1 w_opp_carry w_pred_c w_sub_c w_sub_carry_c w_digits w_to_Z _ _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_pred : forall x, [|pred x|] = ([|x|] - 1) mod wwB. Proof. refine (spec_ww_pred w_0 w_Bm1 w_WW ww_Bm1 w_pred_c w_pred w_digits w_to_Z _ _ _ _ _ _);wwauto. Qed. Let spec_ww_sub : forall x y, [|sub x y|] = ([|x|] - [|y|]) mod wwB. Proof. refine (spec_ww_sub w_0 w_0 w_WW W0 w_opp_c w_opp_carry w_sub_c w_opp w_sub w_sub_carry w_digits w_to_Z _ _ _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_sub_carry : forall x y, [|sub_carry x y|]=([|x|]-[|y|]-1) mod wwB. Proof. refine (spec_ww_sub_carry w_0 w_Bm1 w_WW ww_Bm1 w_opp_carry w_pred_c w_sub_carry_c w_pred w_sub w_sub_carry w_digits w_to_Z _ _ _ _ _ _ _ _ _ _); wwauto. Qed. Let spec_ww_mul_c : forall x y, [[mul_c x y ]] = [|x|] * [|y|]. Proof. refine (spec_ww_mul_c w_0 w_1 w_WW w_W0 w_mul_c add_c add add_carry w_digits w_to_Z _ _ _ _ _ _ _ _ _);wwauto. Qed. Let spec_ww_karatsuba_c : forall x y, [[karatsuba_c x y ]] = [|x|] * [|y|]. Proof. refine (spec_ww_karatsuba_c _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _); wwauto. unfold w_digits; apply ZnZ.spec_more_than_1_digit; auto. Qed. Let spec_ww_mul : forall x y, [|mul x y|] = ([|x|] * [|y|]) mod wwB. Proof. refine (spec_ww_mul w_W0 w_add w_mul_c w_mul add w_digits w_to_Z _ _ _ _ _); wwauto. Qed. Let spec_ww_square_c : forall x, [[square_c x]] = [|x|] * [|x|]. Proof. refine (spec_ww_square_c w_0 w_1 w_WW w_W0 w_mul_c w_square_c add_c add add_carry w_digits w_to_Z _ _ _ _ _ _ _ _ _ _);wwauto. Qed. Let spec_w_div32 : forall a1 a2 a3 b1 b2, wB / 2 <= (w_to_Z b1) -> [|WW a1 a2|] < [|WW b1 b2|] -> let (q, r) := div32 a1 a2 a3 b1 b2 in (w_to_Z a1) * wwB + (w_to_Z a2) * wB + (w_to_Z a3) = (w_to_Z q) * ((w_to_Z b1)*wB + (w_to_Z b2)) + [|r|] /\ 0 <= [|r|] < (w_to_Z b1)*wB + w_to_Z b2. Proof. refine (spec_w_div32 w_0 w_Bm1 w_Bm2 w_WW w_compare w_add_c w_add_carry_c w_add w_add_carry w_pred w_sub w_mul_c w_div21 sub_c w_digits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _);wwauto. unfold w_Bm2, w_to_Z, w_pred, w_Bm1. rewrite ZnZ.spec_pred, ZnZ.spec_m1. unfold w_digits;rewrite Zmod_small. ring. assert (H:= wB_pos(ZnZ.digits)). omega. exact ZnZ.spec_div21. Qed. Let spec_ww_div21 : forall a1 a2 b, wwB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div21 a1 a2 b in [|a1|] *wwB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. refine (spec_ww_div21 w_0 w_0W div32 ww_1 compare sub w_digits w_to_Z _ _ _ _ _ _ _);wwauto. Qed. Let spec_add2: forall x y, [|w_add2 x y|] = w_to_Z x + w_to_Z y. unfold w_add2. intros xh xl; generalize (ZnZ.spec_add_c xh xl). unfold w_add_c; case ZnZ.add_c; unfold interp_carry; simpl ww_to_Z. intros w0 Hw0; simpl; unfold w_to_Z; rewrite Hw0. unfold w_0; rewrite ZnZ.spec_0; simpl; auto with zarith. intros w0; rewrite Z.mul_1_l; simpl. unfold w_to_Z, w_1; rewrite ZnZ.spec_1; auto with zarith. rewrite Z.mul_1_l; auto. Qed. Let spec_low: forall x, w_to_Z (low x) = [|x|] mod wB. intros x; case x; simpl low. unfold ww_to_Z, w_to_Z, w_0; rewrite ZnZ.spec_0; simpl; auto. intros xh xl; simpl. rewrite Z.add_comm; rewrite Z_mod_plus; auto with zarith. rewrite Zmod_small; auto with zarith. unfold wB, base; auto with zarith. Qed. Let spec_ww_digits: [|_ww_zdigits|] = Zpos (xO w_digits). Proof. unfold w_to_Z, _ww_zdigits. rewrite spec_add2. unfold w_to_Z, w_zdigits, w_digits. rewrite ZnZ.spec_zdigits; auto. rewrite Pos2Z.inj_xO; auto with zarith. Qed. Let spec_ww_head00 : forall x, [|x|] = 0 -> [|head0 x|] = Zpos _ww_digits. Proof. refine (spec_ww_head00 w_0 w_0W w_compare w_head0 w_add2 w_zdigits _ww_zdigits w_to_Z _ _ _ (eq_refl _ww_digits) _ _ _ _); auto. exact ZnZ.spec_head00. exact ZnZ.spec_zdigits. Qed. Let spec_ww_head0 : forall x, 0 < [|x|] -> wwB/ 2 <= 2 ^ [|head0 x|] * [|x|] < wwB. Proof. refine (spec_ww_head0 w_0 w_0W w_compare w_head0 w_add2 w_zdigits _ww_zdigits w_to_Z _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_zdigits. Qed. Let spec_ww_tail00 : forall x, [|x|] = 0 -> [|tail0 x|] = Zpos _ww_digits. Proof. refine (spec_ww_tail00 w_0 w_0W w_compare w_tail0 w_add2 w_zdigits _ww_zdigits w_to_Z _ _ _ (eq_refl _ww_digits) _ _ _ _); wwauto. exact ZnZ.spec_tail00. exact ZnZ.spec_zdigits. Qed. Let spec_ww_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * 2 ^ [|tail0 x|]. Proof. refine (spec_ww_tail0 (w_digits := w_digits) w_0 w_0W w_compare w_tail0 w_add2 w_zdigits _ww_zdigits w_to_Z _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_zdigits. Qed. Lemma spec_ww_add_mul_div : forall x y p, [|p|] <= Zpos _ww_digits -> [| add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos _ww_digits) - [|p|]))) mod wwB. Proof. refine (@spec_ww_add_mul_div t w_0 w_WW w_W0 w_0W compare w_add_mul_div sub w_digits w_zdigits low w_to_Z _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_zdigits. Qed. Let spec_ww_div_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> let (q,r) := div_gt a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. refine (@spec_ww_div_gt t w_digits w_0 w_WW w_0W w_compare w_eq0 w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_div_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits ww_1 add_mul_div w_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ). exact ZnZ.spec_0. exact ZnZ.spec_to_Z. wwauto. wwauto. exact ZnZ.spec_compare. exact ZnZ.spec_eq0. exact ZnZ.spec_opp_c. exact ZnZ.spec_opp. exact ZnZ.spec_opp_carry. exact ZnZ.spec_sub_c. exact ZnZ.spec_sub. exact ZnZ.spec_sub_carry. exact ZnZ.spec_div_gt. exact ZnZ.spec_add_mul_div. exact ZnZ.spec_head0. exact ZnZ.spec_div21. exact spec_w_div32. exact ZnZ.spec_zdigits. exact spec_ww_digits. exact spec_ww_1. exact spec_ww_add_mul_div. Qed. Let spec_ww_div : forall a b, 0 < [|b|] -> let (q,r) := div a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. refine (spec_ww_div w_digits ww_1 compare div_gt w_to_Z _ _ _ _);auto. Qed. Let spec_ww_mod_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> [|mod_gt a b|] = [|a|] mod [|b|]. Proof. refine (@spec_ww_mod_gt t w_digits w_0 w_WW w_0W w_compare w_eq0 w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_div_gt w_mod_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits ww_1 add_mul_div w_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_div_gt. exact ZnZ.spec_div21. exact ZnZ.spec_zdigits. exact spec_ww_add_mul_div. Qed. Let spec_ww_mod : forall a b, 0 < [|b|] -> [|mod_ a b|] = [|a|] mod [|b|]. Proof. refine (spec_ww_mod w_digits W0 compare mod_gt w_to_Z _ _ _);auto. Qed. Let spec_ww_gcd_gt : forall a b, [|a|] > [|b|] -> Zis_gcd [|a|] [|b|] [|gcd_gt a b|]. Proof. refine (@spec_ww_gcd_gt t w_digits W0 w_to_Z _ w_0 w_0 w_eq0 w_gcd_gt _ww_digits _ gcd_gt_fix _ _ _ _ gcd_cont _);auto. refine (@spec_ww_gcd_gt_aux t w_digits w_0 w_WW w_0W w_compare w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_gcd_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits ww_1 add_mul_div w_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_div21. exact ZnZ.spec_zdigits. exact spec_ww_add_mul_div. refine (@spec_gcd_cont t w_digits ww_1 w_to_Z _ _ w_0 w_1 w_compare _ _);auto. Qed. Let spec_ww_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd a b|]. Proof. refine (@spec_ww_gcd t w_digits W0 compare w_to_Z _ _ w_0 w_0 w_eq0 w_gcd_gt _ww_digits _ gcd_gt_fix _ _ _ _ gcd_cont _);auto. refine (@spec_ww_gcd_gt_aux t w_digits w_0 w_WW w_0W w_compare w_opp_c w_opp w_opp_carry w_sub_c w_sub w_sub_carry w_gcd_gt w_add_mul_div w_head0 w_div21 div32 _ww_zdigits ww_1 add_mul_div w_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_div21. exact ZnZ.spec_zdigits. exact spec_ww_add_mul_div. refine (@spec_gcd_cont t w_digits ww_1 w_to_Z _ _ w_0 w_1 w_compare _ _);auto. Qed. Let spec_ww_is_even : forall x, match is_even x with true => [|x|] mod 2 = 0 | false => [|x|] mod 2 = 1 end. Proof. refine (@spec_ww_is_even t w_is_even w_digits _ _ ). exact ZnZ.spec_is_even. Qed. Let spec_ww_sqrt2 : forall x y, wwB/ 4 <= [|x|] -> let (s,r) := sqrt2 x y in [[WW x y]] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Proof. intros x y H. refine (@spec_ww_sqrt2 t w_is_even w_compare w_0 w_1 w_Bm1 w_0W w_sub w_square_c w_div21 w_add_mul_div w_digits w_zdigits _ww_zdigits w_add_c w_sqrt2 w_pred pred_c pred add_c add sub_c add_mul_div _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _); wwauto. exact ZnZ.spec_zdigits. exact ZnZ.spec_more_than_1_digit. exact ZnZ.spec_is_even. exact ZnZ.spec_div21. exact spec_ww_add_mul_div. exact ZnZ.spec_sqrt2. Qed. Let spec_ww_sqrt : forall x, [|sqrt x|] ^ 2 <= [|x|] < ([|sqrt x|] + 1) ^ 2. Proof. refine (@spec_ww_sqrt t w_is_even w_0 w_1 w_Bm1 w_sub w_add_mul_div w_digits w_zdigits _ww_zdigits w_sqrt2 pred add_mul_div head0 compare _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _); wwauto. exact ZnZ.spec_zdigits. exact ZnZ.spec_more_than_1_digit. exact ZnZ.spec_is_even. exact spec_ww_add_mul_div. exact ZnZ.spec_sqrt2. Qed. Global Instance mk_zn2z_specs : ZnZ.Specs mk_zn2z_ops. Proof. apply ZnZ.MkSpecs; auto. exact spec_ww_add_mul_div. refine (@spec_ww_pos_mod t w_0 w_digits w_zdigits w_WW w_pos_mod compare w_0W low sub _ww_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_zdigits. unfold w_to_Z, w_zdigits. rewrite ZnZ.spec_zdigits. rewrite <- Pos2Z.inj_xO; exact spec_ww_digits. Qed. Global Instance mk_zn2z_specs_karatsuba : ZnZ.Specs mk_zn2z_ops_karatsuba. Proof. apply ZnZ.MkSpecs; auto. exact spec_ww_add_mul_div. refine (@spec_ww_pos_mod t w_0 w_digits w_zdigits w_WW w_pos_mod compare w_0W low sub _ww_zdigits w_to_Z _ _ _ _ _ _ _ _ _ _ _ _);wwauto. exact ZnZ.spec_zdigits. unfold w_to_Z, w_zdigits. rewrite ZnZ.spec_zdigits. rewrite <- Pos2Z.inj_xO; exact spec_ww_digits. Qed. End Z_2nZ. Section MulAdd. Context {t : Type}{ops : ZnZ.Ops t}{specs : ZnZ.Specs ops}. Definition mul_add:= w_mul_add ZnZ.zero ZnZ.succ ZnZ.add_c ZnZ.mul_c. Notation "[| x |]" := (ZnZ.to_Z x) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z (base ZnZ.digits) ZnZ.to_Z x) (at level 0, x at level 99). Lemma spec_mul_add: forall x y z, let (zh, zl) := mul_add x y z in [||WW zh zl||] = [|x|] * [|y|] + [|z|]. Proof. intros x y z. refine (spec_w_mul_add _ _ _ _ _ _ _ _ _ _ _ _ x y z); auto. exact ZnZ.spec_0. exact ZnZ.spec_to_Z. exact ZnZ.spec_succ. exact ZnZ.spec_add_c. exact ZnZ.spec_mul_c. Qed. End MulAdd. (** Modular versions of DoubleCyclic *) Module DoubleCyclic (C:CyclicType) <: CyclicType. Definition t := zn2z C.t. Instance ops : ZnZ.Ops t := mk_zn2z_ops. Instance specs : ZnZ.Specs ops := mk_zn2z_specs. End DoubleCyclic. Module DoubleCyclicKaratsuba (C:CyclicType) <: CyclicType. Definition t := zn2z C.t. Definition ops : ZnZ.Ops t := mk_zn2z_ops_karatsuba. Definition specs : ZnZ.Specs ops := mk_zn2z_specs_karatsuba. End DoubleCyclicKaratsuba. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleAdd.v0000640000175000001440000002625712010532755022661 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_W0 : w -> zn2z w. Variable ww_1 : zn2z w. Variable w_succ_c : w -> carry w. Variable w_add_c : w -> w -> carry w. Variable w_add_carry_c : w -> w -> carry w. Variable w_succ : w -> w. Variable w_add : w -> w -> w. Variable w_add_carry : w -> w -> w. Definition ww_succ_c x := match x with | W0 => C0 ww_1 | WW xh xl => match w_succ_c xl with | C0 l => C0 (WW xh l) | C1 l => match w_succ_c xh with | C0 h => C0 (WW h w_0) | C1 h => C1 W0 end end end. Definition ww_succ x := match x with | W0 => ww_1 | WW xh xl => match w_succ_c xl with | C0 l => WW xh l | C1 l => w_W0 (w_succ xh) end end. Definition ww_add_c x y := match x, y with | W0, _ => C0 y | _, W0 => C0 x | WW xh xl, WW yh yl => match w_add_c xl yl with | C0 l => match w_add_c xh yh with | C0 h => C0 (WW h l) | C1 h => C1 (w_WW h l) end | C1 l => match w_add_carry_c xh yh with | C0 h => C0 (WW h l) | C1 h => C1 (w_WW h l) end end end. Variable R : Type. Variable f0 f1 : zn2z w -> R. Definition ww_add_c_cont x y := match x, y with | W0, _ => f0 y | _, W0 => f0 x | WW xh xl, WW yh yl => match w_add_c xl yl with | C0 l => match w_add_c xh yh with | C0 h => f0 (WW h l) | C1 h => f1 (w_WW h l) end | C1 l => match w_add_carry_c xh yh with | C0 h => f0 (WW h l) | C1 h => f1 (w_WW h l) end end end. (* ww_add et ww_add_carry conserve la forme normale s'il n'y a pas de debordement *) Definition ww_add x y := match x, y with | W0, _ => y | _, W0 => x | WW xh xl, WW yh yl => match w_add_c xl yl with | C0 l => WW (w_add xh yh) l | C1 l => WW (w_add_carry xh yh) l end end. Definition ww_add_carry_c x y := match x, y with | W0, W0 => C0 ww_1 | W0, WW yh yl => ww_succ_c (WW yh yl) | WW xh xl, W0 => ww_succ_c (WW xh xl) | WW xh xl, WW yh yl => match w_add_carry_c xl yl with | C0 l => match w_add_c xh yh with | C0 h => C0 (WW h l) | C1 h => C1 (WW h l) end | C1 l => match w_add_carry_c xh yh with | C0 h => C0 (WW h l) | C1 h => C1 (w_WW h l) end end end. Definition ww_add_carry x y := match x, y with | W0, W0 => ww_1 | W0, WW yh yl => ww_succ (WW yh yl) | WW xh xl, W0 => ww_succ (WW xh xl) | WW xh xl, WW yh yl => match w_add_carry_c xl yl with | C0 l => WW (w_add xh yh) l | C1 l => WW (w_add_carry xh yh) l end end. (*Section DoubleProof.*) Variable w_digits : positive. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB w_to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[+[ c ]]" := (interp_carry 1 wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_1 : [|w_1|] = 1. Variable spec_ww_1 : [[ww_1]] = 1. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_W0 : forall h, [[w_W0 h]] = [|h|] * wB. Variable spec_w_succ_c : forall x, [+|w_succ_c x|] = [|x|] + 1. Variable spec_w_add_c : forall x y, [+|w_add_c x y|] = [|x|] + [|y|]. Variable spec_w_add_carry_c : forall x y, [+|w_add_carry_c x y|] = [|x|] + [|y|] + 1. Variable spec_w_succ : forall x, [|w_succ x|] = ([|x|] + 1) mod wB. Variable spec_w_add : forall x y, [|w_add x y|] = ([|x|] + [|y|]) mod wB. Variable spec_w_add_carry : forall x y, [|w_add_carry x y|] = ([|x|] + [|y|] + 1) mod wB. Lemma spec_ww_succ_c : forall x, [+[ww_succ_c x]] = [[x]] + 1. Proof. destruct x as [ |xh xl];simpl. apply spec_ww_1. generalize (spec_w_succ_c xl);destruct (w_succ_c xl) as [l|l]; intro H;unfold interp_carry in H. simpl;rewrite H;ring. rewrite <- Z.add_assoc;rewrite <- H;rewrite Z.mul_1_l. assert ([|l|] = 0). generalize (spec_to_Z xl)(spec_to_Z l);omega. rewrite H0;generalize (spec_w_succ_c xh);destruct (w_succ_c xh) as [h|h]; intro H1;unfold interp_carry in H1. simpl;rewrite H1;rewrite spec_w_0;ring. unfold interp_carry;simpl ww_to_Z;rewrite wwB_wBwB. assert ([|xh|] = wB - 1). generalize (spec_to_Z xh)(spec_to_Z h);omega. rewrite H2;ring. Qed. Lemma spec_ww_add_c : forall x y, [+[ww_add_c x y]] = [[x]] + [[y]]. Proof. destruct x as [ |xh xl];simpl;trivial. destruct y as [ |yh yl];simpl. rewrite Z.add_0_r;trivial. replace ([|xh|] * wB + [|xl|] + ([|yh|] * wB + [|yl|])) with (([|xh|]+[|yh|])*wB + ([|xl|]+[|yl|])). 2:ring. generalize (spec_w_add_c xl yl);destruct (w_add_c xl yl) as [l|l]; intros H;unfold interp_carry in H;rewrite <- H. generalize (spec_w_add_c xh yh);destruct (w_add_c xh yh) as [h|h]; intros H1;unfold interp_carry in *;rewrite <- H1. trivial. repeat rewrite Z.mul_1_l;rewrite spec_w_WW;rewrite wwB_wBwB; ring. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. generalize (spec_w_add_carry_c xh yh);destruct (w_add_carry_c xh yh) as [h|h]; intros H1;unfold interp_carry in *;rewrite <- H1. simpl;ring. repeat rewrite Z.mul_1_l;rewrite wwB_wBwB;rewrite spec_w_WW;ring. Qed. Section Cont. Variable P : zn2z w -> zn2z w -> R -> Prop. Variable x y : zn2z w. Variable spec_f0 : forall r, [[r]] = [[x]] + [[y]] -> P x y (f0 r). Variable spec_f1 : forall r, wwB + [[r]] = [[x]] + [[y]] -> P x y (f1 r). Lemma spec_ww_add_c_cont : P x y (ww_add_c_cont x y). Proof. destruct x as [ |xh xl];simpl;trivial. apply spec_f0;trivial. destruct y as [ |yh yl];simpl. apply spec_f0;simpl;rewrite Z.add_0_r;trivial. generalize (spec_w_add_c xl yl);destruct (w_add_c xl yl) as [l|l]; intros H;unfold interp_carry in H. generalize (spec_w_add_c xh yh);destruct (w_add_c xh yh) as [h|h]; intros H1;unfold interp_carry in *. apply spec_f0. simpl;rewrite H;rewrite H1;ring. apply spec_f1. simpl;rewrite spec_w_WW;rewrite H. rewrite Z.add_assoc;rewrite wwB_wBwB. rewrite Z.pow_2_r; rewrite <- Z.mul_add_distr_r. rewrite Z.mul_1_l in H1;rewrite H1;ring. generalize (spec_w_add_carry_c xh yh);destruct (w_add_carry_c xh yh) as [h|h]; intros H1;unfold interp_carry in *. apply spec_f0;simpl;rewrite H1. rewrite Z.mul_add_distr_r. rewrite <- Z.add_assoc;rewrite H;ring. apply spec_f1. simpl;rewrite spec_w_WW;rewrite wwB_wBwB. rewrite Z.add_assoc; rewrite Z.pow_2_r; rewrite <- Z.mul_add_distr_r. rewrite Z.mul_1_l in H1;rewrite H1. rewrite Z.mul_add_distr_r. rewrite <- Z.add_assoc;rewrite H;ring. Qed. End Cont. Lemma spec_ww_add_carry_c : forall x y, [+[ww_add_carry_c x y]] = [[x]] + [[y]] + 1. Proof. destruct x as [ |xh xl];intro y;simpl. exact (spec_ww_succ_c y). destruct y as [ |yh yl];simpl. rewrite Z.add_0_r;exact (spec_ww_succ_c (WW xh xl)). replace ([|xh|] * wB + [|xl|] + ([|yh|] * wB + [|yl|]) + 1) with (([|xh|]+[|yh|])*wB + ([|xl|]+[|yl|]+1)). 2:ring. generalize (spec_w_add_carry_c xl yl);destruct (w_add_carry_c xl yl) as [l|l];intros H;unfold interp_carry in H;rewrite <- H. generalize (spec_w_add_c xh yh);destruct (w_add_c xh yh) as [h|h]; intros H1;unfold interp_carry in H1;rewrite <- H1. trivial. unfold interp_carry;repeat rewrite Z.mul_1_l;simpl;rewrite wwB_wBwB;ring. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. generalize (spec_w_add_carry_c xh yh);destruct (w_add_carry_c xh yh) as [h|h];intros H1;unfold interp_carry in H1;rewrite <- H1. trivial. unfold interp_carry;rewrite spec_w_WW; repeat rewrite Z.mul_1_l;simpl;rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_succ : forall x, [[ww_succ x]] = ([[x]] + 1) mod wwB. Proof. destruct x as [ |xh xl];simpl. rewrite spec_ww_1;rewrite Zmod_small;trivial. split;[intro;discriminate|apply wwB_pos]. rewrite <- Z.add_assoc;generalize (spec_w_succ_c xl); destruct (w_succ_c xl) as[l|l];intro H;unfold interp_carry in H;rewrite <-H. rewrite Zmod_small;trivial. rewrite wwB_wBwB;apply beta_mult;apply spec_to_Z. assert ([|l|] = 0). clear spec_ww_1 spec_w_1 spec_w_0. assert (H1:= spec_to_Z l); assert (H2:= spec_to_Z xl); omega. rewrite H0;rewrite Z.add_0_r;rewrite <- Z.mul_add_distr_r;rewrite wwB_wBwB. rewrite Z.pow_2_r; rewrite Zmult_mod_distr_r;try apply lt_0_wB. rewrite spec_w_W0;rewrite spec_w_succ;trivial. Qed. Lemma spec_ww_add : forall x y, [[ww_add x y]] = ([[x]] + [[y]]) mod wwB. Proof. destruct x as [ |xh xl];intros y;simpl. rewrite Zmod_small;trivial. apply spec_ww_to_Z;trivial. destruct y as [ |yh yl]. change [[W0]] with 0;rewrite Z.add_0_r. rewrite Zmod_small;trivial. exact (spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW xh xl)). simpl. replace ([|xh|] * wB + [|xl|] + ([|yh|] * wB + [|yl|])) with (([|xh|]+[|yh|])*wB + ([|xl|]+[|yl|])). 2:ring. generalize (spec_w_add_c xl yl);destruct (w_add_c xl yl) as [l|l]; unfold interp_carry;intros H;simpl;rewrite <- H. rewrite (mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_w_add;trivial. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite(mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_w_add_carry;trivial. Qed. Lemma spec_ww_add_carry : forall x y, [[ww_add_carry x y]] = ([[x]] + [[y]] + 1) mod wwB. Proof. destruct x as [ |xh xl];intros y;simpl. exact (spec_ww_succ y). destruct y as [ |yh yl]. change [[W0]] with 0;rewrite Z.add_0_r. exact (spec_ww_succ (WW xh xl)). simpl;replace ([|xh|] * wB + [|xl|] + ([|yh|] * wB + [|yl|]) + 1) with (([|xh|]+[|yh|])*wB + ([|xl|]+[|yl|]+1)). 2:ring. generalize (spec_w_add_carry_c xl yl);destruct (w_add_carry_c xl yl) as [l|l];unfold interp_carry;intros H;rewrite <- H;simpl ww_to_Z. rewrite(mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_w_add;trivial. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite(mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_w_add_carry;trivial. Qed. (* End DoubleProof. *) End DoubleAdd. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleSqrt.v0000640000175000001440000014035612010532755023117 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool. Variable w_compare : w -> w -> comparison. Variable w_0 : w. Variable w_1 : w. Variable w_Bm1 : w. Variable w_WW : w -> w -> zn2z w. Variable w_W0 : w -> zn2z w. Variable w_0W : w -> zn2z w. Variable w_sub : w -> w -> w. Variable w_sub_c : w -> w -> carry w. Variable w_square_c : w -> zn2z w. Variable w_div21 : w -> w -> w -> w * w. Variable w_add_mul_div : w -> w -> w -> w. Variable w_digits : positive. Variable w_zdigits : w. Variable ww_zdigits : zn2z w. Variable w_add_c : w -> w -> carry w. Variable w_sqrt2 : w -> w -> w * carry w. Variable w_pred : w -> w. Variable ww_pred_c : zn2z w -> carry (zn2z w). Variable ww_pred : zn2z w -> zn2z w. Variable ww_add_c : zn2z w -> zn2z w -> carry (zn2z w). Variable ww_add : zn2z w -> zn2z w -> zn2z w. Variable ww_sub_c : zn2z w -> zn2z w -> carry (zn2z w). Variable ww_add_mul_div : zn2z w -> zn2z w -> zn2z w -> zn2z w. Variable ww_head0 : zn2z w -> zn2z w. Variable ww_compare : zn2z w -> zn2z w -> comparison. Variable low : zn2z w -> w. Let wwBm1 := ww_Bm1 w_Bm1. Definition ww_is_even x := match x with | W0 => true | WW xh xl => w_is_even xl end. Let w_div21c x y z := match w_compare x z with | Eq => match w_compare y z with Eq => (C1 w_1, w_0) | Gt => (C1 w_1, w_sub y z) | Lt => (C1 w_0, y) end | Gt => let x1 := w_sub x z in let (q, r) := w_div21 x1 y z in (C1 q, r) | Lt => let (q, r) := w_div21 x y z in (C0 q, r) end. Let w_div2s x y s := match x with C1 x1 => let x2 := w_sub x1 s in let (q, r) := w_div21c x2 y s in match q with C0 q1 => if w_is_even q1 then (C0 (w_add_mul_div (w_pred w_zdigits) w_1 q1), C0 r) else (C0 (w_add_mul_div (w_pred w_zdigits) w_1 q1), w_add_c r s) | C1 q1 => if w_is_even q1 then (C1 (w_add_mul_div (w_pred w_zdigits) w_0 q1), C0 r) else (C1 (w_add_mul_div (w_pred w_zdigits) w_0 q1), w_add_c r s) end | C0 x1 => let (q, r) := w_div21c x1 y s in match q with C0 q1 => if w_is_even q1 then (C0 (w_add_mul_div (w_pred w_zdigits) w_0 q1), C0 r) else (C0 (w_add_mul_div (w_pred w_zdigits) w_0 q1), w_add_c r s) | C1 q1 => if w_is_even q1 then (C0 (w_add_mul_div (w_pred w_zdigits) w_1 q1), C0 r) else (C0 (w_add_mul_div (w_pred w_zdigits) w_1 q1), w_add_c r s) end end. Definition split x := match x with | W0 => (w_0,w_0) | WW h l => (h,l) end. Definition ww_sqrt2 x y := let (x1, x2) := split x in let (y1, y2) := split y in let ( q, r) := w_sqrt2 x1 x2 in let (q1, r1) := w_div2s r y1 q in match q1 with C0 q1 => let q2 := w_square_c q1 in let a := WW q q1 in match r1 with C1 r2 => match ww_sub_c (WW r2 y2) q2 with C0 r3 => (a, C1 r3) | C1 r3 => (a, C0 r3) end | C0 r2 => match ww_sub_c (WW r2 y2) q2 with C0 r3 => (a, C0 r3) | C1 r3 => let a2 := ww_add_mul_div (w_0W w_1) a W0 in match ww_pred_c a2 with C0 a3 => (ww_pred a, ww_add_c a3 r3) | C1 a3 => (ww_pred a, C0 (ww_add a3 r3)) end end end | C1 q1 => let a1 := WW q w_Bm1 in let a2 := ww_add_mul_div (w_0W w_1) a1 wwBm1 in (a1, ww_add_c a2 y) end. Definition ww_is_zero x := match ww_compare W0 x with Eq => true | _ => false end. Definition ww_head1 x := let p := ww_head0 x in if (ww_is_even p) then p else ww_pred p. Definition ww_sqrt x := if (ww_is_zero x) then W0 else let p := ww_head1 x in match ww_compare p W0 with | Gt => match ww_add_mul_div p x W0 with W0 => W0 | WW x1 x2 => let (r, _) := w_sqrt2 x1 x2 in WW w_0 (w_add_mul_div (w_sub w_zdigits (low (ww_add_mul_div (ww_pred ww_zdigits) W0 p))) w_0 r) end | _ => match x with W0 => W0 | WW x1 x2 => WW w_0 (fst (w_sqrt2 x1 x2)) end end. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB w_to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[+[ c ]]" := (interp_carry 1 wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z wwB (ww_to_Z w_digits w_to_Z) x) (at level 0, x at level 99). Notation "[! n | x !]" := (double_to_Z w_digits w_to_Z n x) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_1 : [|w_1|] = 1. Variable spec_w_Bm1 : [|w_Bm1|] = wB - 1. Variable spec_w_zdigits : [|w_zdigits|] = Zpos w_digits. Variable spec_more_than_1_digit: 1 < Zpos w_digits. Variable spec_ww_zdigits : [[ww_zdigits]] = Zpos (xO w_digits). Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_to_w_Z : forall x, 0 <= [[x]] < wwB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_W0 : forall h, [[w_W0 h]] = [|h|] * wB. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_w_is_even : forall x, if w_is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1. Variable spec_w_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_w_sub : forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Variable spec_w_square_c : forall x, [[ w_square_c x]] = [|x|] * [|x|]. Variable spec_w_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := w_div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Variable spec_w_add_mul_div : forall x y p, [|p|] <= Zpos w_digits -> [| w_add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (Z.pow 2 ((Zpos w_digits) - [|p|]))) mod wB. Variable spec_ww_add_mul_div : forall x y p, [[p]] <= Zpos (xO w_digits) -> [[ ww_add_mul_div p x y ]] = ([[x]] * (2^ [[p]]) + [[y]] / (2^ (Zpos (xO w_digits) - [[p]]))) mod wwB. Variable spec_w_add_c : forall x y, [+|w_add_c x y|] = [|x|] + [|y|]. Variable spec_ww_add : forall x y, [[ww_add x y]] = ([[x]] + [[y]]) mod wwB. Variable spec_w_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := w_sqrt2 x y in [[WW x y]] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Variable spec_ww_sub_c : forall x y, [-[ww_sub_c x y]] = [[x]] - [[y]]. Variable spec_ww_pred_c : forall x, [-[ww_pred_c x]] = [[x]] - 1. Variable spec_pred : forall x, [|w_pred x|] = ([|x|] - 1) mod wB. Variable spec_ww_pred : forall x, [[ww_pred x]] = ([[x]] - 1) mod wwB. Variable spec_ww_add_c : forall x y, [+[ww_add_c x y]] = [[x]] + [[y]]. Variable spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Variable spec_ww_head0 : forall x, 0 < [[x]] -> wwB/ 2 <= 2 ^ [[ww_head0 x]] * [[x]] < wwB. Variable spec_low: forall x, [|low x|] = [[x]] mod wB. Let spec_ww_Bm1 : [[wwBm1]] = wwB - 1. Proof. refine (spec_ww_Bm1 w_Bm1 w_digits w_to_Z _);auto. Qed. Hint Rewrite spec_w_0 spec_w_1 spec_w_WW spec_w_sub spec_w_add_mul_div spec_ww_Bm1 spec_w_add_c : w_rewrite. Lemma spec_ww_is_even : forall x, if ww_is_even x then [[x]] mod 2 = 0 else [[x]] mod 2 = 1. clear spec_more_than_1_digit. intros x; case x; simpl ww_is_even. simpl. rewrite Zmod_small; auto with zarith. intros w1 w2; simpl. unfold base. rewrite Zplus_mod; auto with zarith. rewrite (fun x y => (Zdivide_mod (x * y))); auto with zarith. rewrite Z.add_0_l; rewrite Zmod_mod; auto with zarith. apply spec_w_is_even; auto with zarith. apply Z.divide_mul_r; apply Zpower_divide; auto with zarith. Qed. Theorem spec_w_div21c : forall a1 a2 b, wB/2 <= [|b|] -> let (q,r) := w_div21c a1 a2 b in [|a1|] * wB + [|a2|] = [+|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. intros a1 a2 b Hb; unfold w_div21c. assert (H: 0 < [|b|]); auto with zarith. assert (U := wB_pos w_digits). apply Z.lt_le_trans with (2 := Hb); auto with zarith. apply Z.lt_le_trans with 1; auto with zarith. apply Zdiv_le_lower_bound; auto with zarith. rewrite !spec_w_compare. repeat case Z.compare_spec. intros H1 H2; split. unfold interp_carry; autorewrite with w_rewrite rm10; auto with zarith. rewrite H1; rewrite H2; ring. autorewrite with w_rewrite; auto with zarith. intros H1 H2; split. unfold interp_carry; autorewrite with w_rewrite rm10; auto with zarith. rewrite H2; ring. destruct (spec_to_Z a2);auto with zarith. intros H1 H2; split. unfold interp_carry; autorewrite with w_rewrite rm10; auto with zarith. rewrite H2; rewrite Zmod_small; auto with zarith. ring. destruct (spec_to_Z a2);auto with zarith. rewrite spec_w_sub; auto with zarith. destruct (spec_to_Z a2) as [H3 H4];auto with zarith. rewrite Zmod_small; auto with zarith. split; auto with zarith. assert ([|a2|] < 2 * [|b|]); auto with zarith. apply Z.lt_le_trans with (2 * (wB / 2)); auto with zarith. rewrite wB_div_2; auto. intros H1. match goal with |- context[w_div21 ?y ?z ?t] => generalize (@spec_w_div21 y z t Hb H1); case (w_div21 y z t); simpl; autorewrite with w_rewrite; auto end. intros H1. assert (H2: [|w_sub a1 b|] < [|b|]). rewrite spec_w_sub; auto with zarith. rewrite Zmod_small; auto with zarith. assert ([|a1|] < 2 * [|b|]); auto with zarith. apply Z.lt_le_trans with (2 * (wB / 2)); auto with zarith. rewrite wB_div_2; auto. destruct (spec_to_Z a1);auto with zarith. destruct (spec_to_Z a1);auto with zarith. match goal with |- context[w_div21 ?y ?z ?t] => generalize (@spec_w_div21 y z t Hb H2); case (w_div21 y z t); autorewrite with w_rewrite; auto end. intros w0 w1; replace [+|C1 w0|] with (wB + [|w0|]). rewrite Zmod_small; auto with zarith. intros (H3, H4); split; auto. rewrite Z.mul_add_distr_r. rewrite <- Z.add_assoc; rewrite <- H3; ring. split; auto with zarith. assert ([|a1|] < 2 * [|b|]); auto with zarith. apply Z.lt_le_trans with (2 * (wB / 2)); auto with zarith. rewrite wB_div_2; auto. destruct (spec_to_Z a1);auto with zarith. destruct (spec_to_Z a1);auto with zarith. simpl; case wB; auto. Qed. Theorem C0_id: forall p, [+|C0 p|] = [|p|]. intros p; simpl; auto. Qed. Theorem add_mult_div_2: forall w, [|w_add_mul_div (w_pred w_zdigits) w_0 w|] = [|w|] / 2. intros w1. assert (Hp: [|w_pred w_zdigits|] = Zpos w_digits - 1). rewrite spec_pred; rewrite spec_w_zdigits. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.lt_le_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_le_lin; auto with zarith. rewrite spec_w_add_mul_div; auto with zarith. autorewrite with w_rewrite rm10. match goal with |- context[?X - ?Y] => replace (X - Y) with 1 end. rewrite Z.pow_1_r; rewrite Zmod_small; auto with zarith. destruct (spec_to_Z w1) as [H1 H2];auto with zarith. split; auto with zarith. apply Zdiv_lt_upper_bound; auto with zarith. rewrite Hp; ring. Qed. Theorem add_mult_div_2_plus_1: forall w, [|w_add_mul_div (w_pred w_zdigits) w_1 w|] = [|w|] / 2 + 2 ^ Zpos (w_digits - 1). intros w1. assert (Hp: [|w_pred w_zdigits|] = Zpos w_digits - 1). rewrite spec_pred; rewrite spec_w_zdigits. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.lt_le_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_le_lin; auto with zarith. autorewrite with w_rewrite rm10; auto with zarith. match goal with |- context[?X - ?Y] => replace (X - Y) with 1 end; rewrite Hp; try ring. rewrite Pos2Z.inj_sub_max; auto with zarith. rewrite Z.max_r; auto with zarith. rewrite Z.pow_1_r; rewrite Zmod_small; auto with zarith. destruct (spec_to_Z w1) as [H1 H2];auto with zarith. split; auto with zarith. unfold base. match goal with |- _ < _ ^ ?X => assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite <- (tmp X); clear tmp end. rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith. assert (tmp: forall p, 1 + (p -1) - 1 = p - 1); auto with zarith; rewrite tmp; clear tmp; auto with zarith. match goal with |- ?X + ?Y < _ => assert (Y < X); auto with zarith end. apply Zdiv_lt_upper_bound; auto with zarith. pattern 2 at 2; rewrite <- Z.pow_1_r; rewrite <- Zpower_exp; auto with zarith. assert (tmp: forall p, (p - 1) + 1 = p); auto with zarith; rewrite tmp; clear tmp; auto with zarith. Qed. Theorem add_mult_mult_2: forall w, [|w_add_mul_div w_1 w w_0|] = 2 * [|w|] mod wB. intros w1. autorewrite with w_rewrite rm10; auto with zarith. rewrite Z.pow_1_r; auto with zarith. rewrite Z.mul_comm; auto. Qed. Theorem ww_add_mult_mult_2: forall w, [[ww_add_mul_div (w_0W w_1) w W0]] = 2 * [[w]] mod wwB. intros w1. rewrite spec_ww_add_mul_div; auto with zarith. autorewrite with w_rewrite rm10. rewrite spec_w_0W; rewrite spec_w_1. rewrite Z.pow_1_r; auto with zarith. rewrite Z.mul_comm; auto. rewrite spec_w_0W; rewrite spec_w_1; auto with zarith. red; simpl; intros; discriminate. Qed. Theorem ww_add_mult_mult_2_plus_1: forall w, [[ww_add_mul_div (w_0W w_1) w wwBm1]] = (2 * [[w]] + 1) mod wwB. intros w1. rewrite spec_ww_add_mul_div; auto with zarith. rewrite spec_w_0W; rewrite spec_w_1; auto with zarith. rewrite Z.pow_1_r; auto with zarith. f_equal; auto. rewrite Z.mul_comm; f_equal; auto. autorewrite with w_rewrite rm10. unfold ww_digits, base. symmetry; apply Zdiv_unique with (r := 2 ^ (Zpos (ww_digits w_digits) - 1) -1); auto with zarith. unfold ww_digits; split; auto with zarith. match goal with |- 0 <= ?X - 1 => assert (0 < X); auto with zarith end. apply Z.pow_pos_nonneg; auto with zarith. match goal with |- 0 <= ?X - 1 => assert (0 < X); auto with zarith; red; reflexivity end. unfold ww_digits; autorewrite with rm10. assert (tmp: forall p q r, p + (q - r) = p + q - r); auto with zarith; rewrite tmp; clear tmp. assert (tmp: forall p, p + p = 2 * p); auto with zarith; rewrite tmp; clear tmp. f_equal; auto. pattern 2 at 2; rewrite <- Z.pow_1_r; rewrite <- Zpower_exp; auto with zarith. assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite tmp; clear tmp; auto. match goal with |- ?X - 1 >= 0 => assert (0 < X); auto with zarith; red; reflexivity end. rewrite spec_w_0W; rewrite spec_w_1; auto with zarith. red; simpl; intros; discriminate. Qed. Theorem Zplus_mod_one: forall a1 b1, 0 < b1 -> (a1 + b1) mod b1 = a1 mod b1. intros a1 b1 H; rewrite Zplus_mod; auto with zarith. rewrite Z_mod_same; try rewrite Z.add_0_r; auto with zarith. apply Zmod_mod; auto. Qed. Lemma C1_plus_wB: forall x, [+|C1 x|] = wB + [|x|]. unfold interp_carry; auto with zarith. Qed. Theorem spec_w_div2s : forall a1 a2 b, wB/2 <= [|b|] -> [+|a1|] <= 2 * [|b|] -> let (q,r) := w_div2s a1 a2 b in [+|a1|] * wB + [|a2|] = [+|q|] * (2 * [|b|]) + [+|r|] /\ 0 <= [+|r|] < 2 * [|b|]. intros a1 a2 b H. assert (HH: 0 < [|b|]); auto with zarith. assert (U := wB_pos w_digits). apply Z.lt_le_trans with (2 := H); auto with zarith. apply Z.lt_le_trans with 1; auto with zarith. apply Zdiv_le_lower_bound; auto with zarith. unfold w_div2s; case a1; intros w0 H0. match goal with |- context[w_div21c ?y ?z ?t] => generalize (@spec_w_div21c y z t H); case (w_div21c y z t); autorewrite with w_rewrite; auto end. intros c w1; case c. simpl interp_carry; intros w2 (Hw1, Hw2). match goal with |- context[w_is_even ?y] => generalize (spec_w_is_even y); case (w_is_even y) end. repeat rewrite C0_id. rewrite add_mult_div_2. intros H1; split; auto with zarith. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; ring. repeat rewrite C0_id. rewrite add_mult_div_2. rewrite spec_w_add_c; auto with zarith. intros H1; split; auto with zarith. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; ring. intros w2; rewrite C1_plus_wB. intros (Hw1, Hw2). match goal with |- context[w_is_even ?y] => generalize (spec_w_is_even y); case (w_is_even y) end. repeat rewrite C0_id. intros H1; split; auto with zarith. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1. repeat rewrite C0_id. rewrite add_mult_div_2_plus_1; unfold base. match goal with |- context[_ ^ ?X] => assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite <- (tmp X); clear tmp; rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith end. rewrite Pos2Z.inj_sub_max; auto with zarith. rewrite Z.max_r; auto with zarith. ring. repeat rewrite C0_id. rewrite spec_w_add_c; auto with zarith. intros H1; split; auto with zarith. rewrite add_mult_div_2_plus_1. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1. unfold base. match goal with |- context[_ ^ ?X] => assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite <- (tmp X); clear tmp; rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith end. rewrite Pos2Z.inj_sub_max; auto with zarith. rewrite Z.max_r; auto with zarith. ring. repeat rewrite C1_plus_wB in H0. rewrite C1_plus_wB. match goal with |- context[w_div21c ?y ?z ?t] => generalize (@spec_w_div21c y z t H); case (w_div21c y z t); autorewrite with w_rewrite; auto end. intros c w1; case c. intros w2 (Hw1, Hw2); rewrite C0_id in Hw1. rewrite <- Zplus_mod_one in Hw1; auto with zarith. rewrite Zmod_small in Hw1; auto with zarith. match goal with |- context[w_is_even ?y] => generalize (spec_w_is_even y); case (w_is_even y) end. repeat rewrite C0_id. intros H1; split; auto with zarith. rewrite add_mult_div_2_plus_1. replace (wB + [|w0|]) with ([|b|] + ([|w0|] - [|b|] + wB)); auto with zarith. rewrite Z.mul_add_distr_r; rewrite <- Z.add_assoc. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; unfold base. match goal with |- context[_ ^ ?X] => assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite <- (tmp X); clear tmp; rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith end. rewrite Pos2Z.inj_sub_max; auto with zarith. rewrite Z.max_r; auto with zarith. ring. repeat rewrite C0_id. rewrite add_mult_div_2_plus_1. rewrite spec_w_add_c; auto with zarith. intros H1; split; auto with zarith. replace (wB + [|w0|]) with ([|b|] + ([|w0|] - [|b|] + wB)); auto with zarith. rewrite Z.mul_add_distr_r; rewrite <- Z.add_assoc. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; unfold base. match goal with |- context[_ ^ ?X] => assert (tmp: forall p, 1 + (p - 1) = p); auto with zarith; rewrite <- (tmp X); clear tmp; rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith end. rewrite Pos2Z.inj_sub_max; auto with zarith. rewrite Z.max_r; auto with zarith. ring. split; auto with zarith. destruct (spec_to_Z b);auto with zarith. destruct (spec_to_Z w0);auto with zarith. destruct (spec_to_Z b);auto with zarith. destruct (spec_to_Z b);auto with zarith. intros w2; rewrite C1_plus_wB. rewrite <- Zplus_mod_one; auto with zarith. rewrite Zmod_small; auto with zarith. intros (Hw1, Hw2). match goal with |- context[w_is_even ?y] => generalize (spec_w_is_even y); case (w_is_even y) end. repeat (rewrite C0_id || rewrite C1_plus_wB). intros H1; split; auto with zarith. rewrite add_mult_div_2. replace (wB + [|w0|]) with ([|b|] + ([|w0|] - [|b|] + wB)); auto with zarith. rewrite Z.mul_add_distr_r; rewrite <- Z.add_assoc. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; ring. repeat (rewrite C0_id || rewrite C1_plus_wB). rewrite spec_w_add_c; auto with zarith. intros H1; split; auto with zarith. rewrite add_mult_div_2. replace (wB + [|w0|]) with ([|b|] + ([|w0|] - [|b|] + wB)); auto with zarith. rewrite Z.mul_add_distr_r; rewrite <- Z.add_assoc. rewrite Hw1. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] 2); auto with zarith. rewrite H1; ring. split; auto with zarith. destruct (spec_to_Z b);auto with zarith. destruct (spec_to_Z w0);auto with zarith. destruct (spec_to_Z b);auto with zarith. destruct (spec_to_Z b);auto with zarith. Qed. Theorem wB_div_4: 4 * (wB / 4) = wB. Proof. unfold base. assert (2 ^ Zpos w_digits = 4 * (2 ^ (Zpos w_digits - 2))). change 4 with (2 ^ 2). rewrite <- Zpower_exp; auto with zarith. f_equal; auto with zarith. rewrite H. rewrite (fun x => (Z.mul_comm 4 (2 ^x))). rewrite Z_div_mult; auto with zarith. Qed. Theorem Zsquare_mult: forall p, p ^ 2 = p * p. intros p; change 2 with (1 + 1); rewrite Zpower_exp; try rewrite Z.pow_1_r; auto with zarith. Qed. Theorem Zsquare_pos: forall p, 0 <= p ^ 2. intros p; case (Z.le_gt_cases 0 p); intros H1. rewrite Zsquare_mult; apply Z.mul_nonneg_nonneg; auto with zarith. rewrite Zsquare_mult; replace (p * p) with ((- p) * (- p)); try ring. apply Z.mul_nonneg_nonneg; auto with zarith. Qed. Lemma spec_split: forall x, [|fst (split x)|] * wB + [|snd (split x)|] = [[x]]. intros x; case x; simpl; autorewrite with w_rewrite; auto with zarith. Qed. Theorem mult_wwB: forall x y, [|x|] * [|y|] < wwB. Proof. intros x y; rewrite wwB_wBwB; rewrite Z.pow_2_r. generalize (spec_to_Z x); intros U. generalize (spec_to_Z y); intros U1. apply Z.le_lt_trans with ((wB -1 ) * (wB - 1)); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. rewrite ?Z.mul_sub_distr_l, ?Z.mul_sub_distr_r; auto with zarith. Qed. Hint Resolve mult_wwB. Lemma spec_ww_sqrt2 : forall x y, wwB/ 4 <= [[x]] -> let (s,r) := ww_sqrt2 x y in [||WW x y||] = [[s]] ^ 2 + [+[r]] /\ [+[r]] <= 2 * [[s]]. intros x y H; unfold ww_sqrt2. repeat match goal with |- context[split ?x] => generalize (spec_split x); case (split x) end; simpl fst; simpl snd. intros w0 w1 Hw0 w2 w3 Hw1. assert (U: wB/4 <= [|w2|]). case (Z.le_gt_cases (wB / 4) [|w2|]); auto; intros H1. contradict H; apply Z.lt_nge. rewrite wwB_wBwB; rewrite Z.pow_2_r. pattern wB at 1; rewrite <- wB_div_4; rewrite <- Z.mul_assoc; rewrite Z.mul_comm. rewrite Z_div_mult; auto with zarith. rewrite <- Hw1. match goal with |- _ < ?X => pattern X; rewrite <- Z.add_0_r; apply beta_lex_inv; auto with zarith end. destruct (spec_to_Z w3);auto with zarith. generalize (@spec_w_sqrt2 w2 w3 U); case (w_sqrt2 w2 w3). intros w4 c (H1, H2). assert (U1: wB/2 <= [|w4|]). case (Z.le_gt_cases (wB/2) [|w4|]); auto with zarith. intros U1. assert (U2 : [|w4|] <= wB/2 -1); auto with zarith. assert (U3 : [|w4|] ^ 2 <= wB/4 * wB - wB + 1); auto with zarith. match goal with |- ?X ^ 2 <= ?Y => rewrite Zsquare_mult; replace Y with ((wB/2 - 1) * (wB/2 -1)) end. apply Z.mul_le_mono_nonneg; auto with zarith. destruct (spec_to_Z w4);auto with zarith. destruct (spec_to_Z w4);auto with zarith. pattern wB at 4 5; rewrite <- wB_div_2. rewrite Z.mul_assoc. replace ((wB / 4) * 2) with (wB / 2). ring. pattern wB at 1; rewrite <- wB_div_4. change 4 with (2 * 2). rewrite <- Z.mul_assoc; rewrite (Z.mul_comm 2). rewrite Z_div_mult; try ring; auto with zarith. assert (U4 : [+|c|] <= wB -2); auto with zarith. apply Z.le_trans with (1 := H2). match goal with |- ?X <= ?Y => replace Y with (2 * (wB/ 2 - 1)); auto with zarith end. pattern wB at 2; rewrite <- wB_div_2; auto with zarith. match type of H1 with ?X = _ => assert (U5: X < wB / 4 * wB) end. rewrite H1; auto with zarith. contradict U; apply Z.lt_nge. apply Z.mul_lt_mono_pos_r with wB; auto with zarith. destruct (spec_to_Z w4);auto with zarith. apply Z.le_lt_trans with (2 := U5). unfold ww_to_Z, zn2z_to_Z. destruct (spec_to_Z w3);auto with zarith. generalize (@spec_w_div2s c w0 w4 U1 H2). case (w_div2s c w0 w4). intros c0; case c0; intros w5; repeat (rewrite C0_id || rewrite C1_plus_wB). intros c1; case c1; intros w6; repeat (rewrite C0_id || rewrite C1_plus_wB). intros (H3, H4). match goal with |- context [ww_sub_c ?y ?z] => generalize (spec_ww_sub_c y z); case (ww_sub_c y z) end. intros z; change [-[C0 z]] with ([[z]]). change [+[C0 z]] with ([[z]]). intros H5; rewrite spec_w_square_c in H5; auto. split. unfold zn2z_to_Z; rewrite <- Hw1. unfold ww_to_Z, zn2z_to_Z in H1; rewrite H1. rewrite <- Hw0. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H3. rewrite H5. unfold ww_to_Z, zn2z_to_Z. repeat rewrite Zsquare_mult; ring. rewrite H5. unfold ww_to_Z, zn2z_to_Z. match goal with |- ?X - ?Y * ?Y <= _ => assert (V := Zsquare_pos Y); rewrite Zsquare_mult in V; apply Z.le_trans with X; auto with zarith; clear V end. match goal with |- ?X * wB + ?Y <= 2 * (?Z * wB + ?T) => apply Z.le_trans with ((2 * Z - 1) * wB + wB); auto with zarith end. destruct (spec_to_Z w1);auto with zarith. match goal with |- ?X <= _ => replace X with (2 * [|w4|] * wB); auto with zarith end. rewrite Z.mul_add_distr_l; rewrite Z.mul_assoc. destruct (spec_to_Z w5); auto with zarith. ring. intros z; replace [-[C1 z]] with (- wwB + [[z]]). 2: simpl; case wwB; auto with zarith. intros H5; rewrite spec_w_square_c in H5; auto. match goal with |- context [ww_pred_c ?y] => generalize (spec_ww_pred_c y); case (ww_pred_c y) end. intros z1; change [-[C0 z1]] with ([[z1]]). rewrite ww_add_mult_mult_2. rewrite spec_ww_add_c. rewrite spec_ww_pred. rewrite <- Zmod_unique with (q := 1) (r := -wwB + 2 * [[WW w4 w5]]); auto with zarith. intros Hz1; rewrite Zmod_small; auto with zarith. match type of H5 with -?X + ?Y = ?Z => assert (V: Y = Z + X); try (rewrite <- H5; ring) end. split. unfold zn2z_to_Z; rewrite <- Hw1. unfold ww_to_Z, zn2z_to_Z in H1; rewrite H1. rewrite <- Hw0. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H3. rewrite V. rewrite Hz1. unfold ww_to_Z; simpl zn2z_to_Z. repeat rewrite Zsquare_mult; ring. rewrite Hz1. destruct (spec_ww_to_Z w_digits w_to_Z spec_to_Z z);auto with zarith. assert (V1 := spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW w4 w5)). assert (0 < [[WW w4 w5]]); auto with zarith. apply Z.lt_le_trans with (wB/ 2 * wB + 0); auto with zarith. autorewrite with rm10; apply Z.mul_pos_pos; auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. autorewrite with rm10. rewrite Z.mul_comm; rewrite wB_div_2; auto with zarith. case (spec_to_Z w5);auto with zarith. case (spec_to_Z w5);auto with zarith. simpl. assert (V2 := spec_to_Z w5);auto with zarith. assert (V1 := spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW w4 w5)); auto with zarith. split; auto with zarith. assert (wwB <= 2 * [[WW w4 w5]]); auto with zarith. apply Z.le_trans with (2 * ([|w4|] * wB)). rewrite wwB_wBwB; rewrite Z.pow_2_r. rewrite Z.mul_assoc; apply Z.mul_le_mono_nonneg_r; auto with zarith. assert (V2 := spec_to_Z w5);auto with zarith. rewrite <- wB_div_2; auto with zarith. simpl ww_to_Z; assert (V2 := spec_to_Z w5);auto with zarith. assert (V1 := spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW w4 w5)); auto with zarith. intros z1; change [-[C1 z1]] with (-wwB + [[z1]]). match goal with |- context[([+[C0 ?z]])] => change [+[C0 z]] with ([[z]]) end. rewrite spec_ww_add; auto with zarith. rewrite spec_ww_pred; auto with zarith. rewrite ww_add_mult_mult_2. rename V1 into VV1. assert (VV2: 0 < [[WW w4 w5]]); auto with zarith. apply Z.lt_le_trans with (wB/ 2 * wB + 0); auto with zarith. autorewrite with rm10; apply Z.mul_pos_pos; auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. autorewrite with rm10. rewrite Z.mul_comm; rewrite wB_div_2; auto with zarith. assert (VV3 := spec_to_Z w5);auto with zarith. assert (VV3 := spec_to_Z w5);auto with zarith. simpl. assert (VV3 := spec_to_Z w5);auto with zarith. assert (VV3: wwB <= 2 * [[WW w4 w5]]); auto with zarith. apply Z.le_trans with (2 * ([|w4|] * wB)). rewrite wwB_wBwB; rewrite Z.pow_2_r. rewrite Z.mul_assoc; apply Z.mul_le_mono_nonneg_r; auto with zarith. case (spec_to_Z w5);auto with zarith. rewrite <- wB_div_2; auto with zarith. simpl ww_to_Z; assert (V4 := spec_to_Z w5);auto with zarith. rewrite <- Zmod_unique with (q := 1) (r := -wwB + 2 * [[WW w4 w5]]); auto with zarith. intros Hz1; rewrite Zmod_small; auto with zarith. match type of H5 with -?X + ?Y = ?Z => assert (V: Y = Z + X); try (rewrite <- H5; ring) end. match type of Hz1 with -?X + ?Y = -?X + ?Z - 1 => assert (V1: Y = Z - 1); [replace (Z - 1) with (X + (-X + Z -1)); [rewrite <- Hz1 | idtac]; ring | idtac] end. rewrite <- Zmod_unique with (q := 1) (r := -wwB + [[z1]] + [[z]]); auto with zarith. unfold zn2z_to_Z; rewrite <- Hw1. unfold ww_to_Z, zn2z_to_Z in H1; rewrite H1. rewrite <- Hw0. split. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H3. rewrite V. rewrite Hz1. unfold ww_to_Z; simpl zn2z_to_Z. repeat rewrite Zsquare_mult; ring. assert (V2 := spec_ww_to_Z w_digits w_to_Z spec_to_Z z);auto with zarith. assert (V2 := spec_ww_to_Z w_digits w_to_Z spec_to_Z z);auto with zarith. assert (V3 := spec_ww_to_Z w_digits w_to_Z spec_to_Z z1);auto with zarith. split; auto with zarith. rewrite (Z.add_comm (-wwB)); rewrite <- Z.add_assoc. rewrite H5. match goal with |- 0 <= ?X + (?Y - ?Z) => apply Z.le_trans with (X - Z); auto with zarith end. 2: generalize (spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW w6 w1)); unfold ww_to_Z; auto with zarith. rewrite V1. match goal with |- 0 <= ?X - 1 - ?Y => assert (Y < X); auto with zarith end. apply Z.lt_le_trans with wwB; auto with zarith. intros (H3, H4). match goal with |- context [ww_sub_c ?y ?z] => generalize (spec_ww_sub_c y z); case (ww_sub_c y z) end. intros z; change [-[C0 z]] with ([[z]]). match goal with |- context[([+[C1 ?z]])] => replace [+[C1 z]] with (wwB + [[z]]) end. 2: simpl; case wwB; auto. intros H5; rewrite spec_w_square_c in H5; auto. split. change ([||WW x y||]) with ([[x]] * wwB + [[y]]). rewrite <- Hw1. unfold ww_to_Z, zn2z_to_Z in H1; rewrite H1. rewrite <- Hw0. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H3. rewrite H5. unfold ww_to_Z; simpl zn2z_to_Z. rewrite wwB_wBwB. repeat rewrite Zsquare_mult; ring. simpl ww_to_Z. rewrite H5. simpl ww_to_Z. rewrite wwB_wBwB; rewrite Z.pow_2_r. match goal with |- ?X * ?Y + (?Z * ?Y + ?T - ?U) <= _ => apply Z.le_trans with (X * Y + (Z * Y + T - 0)); auto with zarith end. assert (V := Zsquare_pos [|w5|]); rewrite Zsquare_mult in V; auto with zarith. autorewrite with rm10. match goal with |- _ <= 2 * (?U * ?V + ?W) => apply Z.le_trans with (2 * U * V + 0); auto with zarith end. match goal with |- ?X * ?Y + (?Z * ?Y + ?T) <= _ => replace (X * Y + (Z * Y + T)) with ((X + Z) * Y + T); try ring end. apply Z.lt_le_incl; apply beta_lex_inv; auto with zarith. destruct (spec_to_Z w1);auto with zarith. destruct (spec_to_Z w5);auto with zarith. rewrite Z.mul_add_distr_l; auto with zarith. rewrite Z.mul_assoc; auto with zarith. intros z; replace [-[C1 z]] with (- wwB + [[z]]). 2: simpl; case wwB; auto with zarith. intros H5; rewrite spec_w_square_c in H5; auto. match goal with |- context[([+[C0 ?z]])] => change [+[C0 z]] with ([[z]]) end. match type of H5 with -?X + ?Y = ?Z => assert (V: Y = Z + X); try (rewrite <- H5; ring) end. change ([||WW x y||]) with ([[x]] * wwB + [[y]]). simpl ww_to_Z. rewrite <- Hw1. simpl ww_to_Z in H1; rewrite H1. rewrite <- Hw0. split. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H3. rewrite V. simpl ww_to_Z. rewrite wwB_wBwB. repeat rewrite Zsquare_mult; ring. rewrite V. simpl ww_to_Z. rewrite wwB_wBwB; rewrite Z.pow_2_r. match goal with |- (?Z * ?Y + ?T - ?U) + ?X * ?Y <= _ => apply Z.le_trans with ((Z * Y + T - 0) + X * Y); auto with zarith end. assert (V1 := Zsquare_pos [|w5|]); rewrite Zsquare_mult in V1; auto with zarith. autorewrite with rm10. match goal with |- _ <= 2 * (?U * ?V + ?W) => apply Z.le_trans with (2 * U * V + 0); auto with zarith end. match goal with |- (?Z * ?Y + ?T) + ?X * ?Y <= _ => replace ((Z * Y + T) + X * Y) with ((X + Z) * Y + T); try ring end. apply Z.lt_le_incl; apply beta_lex_inv; auto with zarith. destruct (spec_to_Z w1);auto with zarith. destruct (spec_to_Z w5);auto with zarith. rewrite Z.mul_add_distr_l; auto with zarith. rewrite Z.mul_assoc; auto with zarith. Z.le_elim H2. intros c1 (H3, H4). match type of H3 with ?X = ?Y => absurd (X < Y) end. apply Z.le_ngt; rewrite <- H3; auto with zarith. rewrite Z.mul_add_distr_r. apply Z.lt_le_trans with ((2 * [|w4|]) * wB + 0); auto with zarith. apply beta_lex_inv; auto with zarith. destruct (spec_to_Z w0);auto with zarith. assert (V1 := spec_to_Z w5);auto with zarith. rewrite (Z.mul_comm wB); auto with zarith. assert (0 <= [|w5|] * (2 * [|w4|])); auto with zarith. intros c1 (H3, H4); rewrite H2 in H3. match type of H3 with ?X + ?Y = (?Z + ?T) * ?U + ?V => assert (VV: (Y = (T * U) + V)); [replace Y with ((X + Y) - X); [rewrite H3; ring | ring] | idtac] end. assert (V1 := spec_to_Z w0);auto with zarith. assert (V2 := spec_to_Z w5);auto with zarith. case V2; intros V3 _. Z.le_elim V3; auto with zarith. match type of VV with ?X = ?Y => absurd (X < Y) end. apply Z.le_ngt; rewrite <- VV; auto with zarith. apply Z.lt_le_trans with wB; auto with zarith. match goal with |- _ <= ?X + _ => apply Z.le_trans with X; auto with zarith end. match goal with |- _ <= _ * ?X => apply Z.le_trans with (1 * X); auto with zarith end. autorewrite with rm10. rewrite <- wB_div_2; apply Z.mul_le_mono_nonneg_l; auto with zarith. rewrite <- V3 in VV; generalize VV; autorewrite with rm10; clear VV; intros VV. rewrite spec_ww_add_c; auto with zarith. rewrite ww_add_mult_mult_2_plus_1. match goal with |- context[?X mod wwB] => rewrite <- Zmod_unique with (q := 1) (r := -wwB + X) end; auto with zarith. simpl ww_to_Z. rewrite spec_w_Bm1; auto with zarith. split. change ([||WW x y||]) with ([[x]] * wwB + [[y]]). rewrite <- Hw1. simpl ww_to_Z in H1; rewrite H1. rewrite <- Hw0. match goal with |- (?X ^2 + ?Y) * wwB + (?Z * wB + ?T) = ?U => transitivity ((X * wB) ^ 2 + (Y * wB + Z) * wB + T) end. repeat rewrite Zsquare_mult. rewrite wwB_wBwB; ring. rewrite H2. rewrite wwB_wBwB. repeat rewrite Zsquare_mult; ring. assert (V4 := spec_ww_to_Z w_digits w_to_Z spec_to_Z y);auto with zarith. assert (V4 := spec_ww_to_Z w_digits w_to_Z spec_to_Z y);auto with zarith. simpl ww_to_Z; unfold ww_to_Z. rewrite spec_w_Bm1; auto with zarith. split. rewrite wwB_wBwB; rewrite Z.pow_2_r. match goal with |- _ <= -?X + (2 * (?Z * ?T + ?U) + ?V) => assert (X <= 2 * Z * T); auto with zarith end. apply Z.mul_le_mono_nonneg_r; auto with zarith. rewrite <- wB_div_2; apply Z.mul_le_mono_nonneg_l; auto with zarith. rewrite Z.mul_add_distr_l; auto with zarith. rewrite Z.mul_assoc; auto with zarith. match goal with |- _ + ?X < _ => replace X with ((2 * (([|w4|]) + 1) * wB) - 1); try ring end. assert (2 * ([|w4|] + 1) * wB <= 2 * wwB); auto with zarith. rewrite <- Z.mul_assoc; apply Z.mul_le_mono_nonneg_l; auto with zarith. rewrite wwB_wBwB; rewrite Z.pow_2_r. apply Z.mul_le_mono_nonneg_r; auto with zarith. case (spec_to_Z w4);auto with zarith. Qed. Lemma spec_ww_is_zero: forall x, if ww_is_zero x then [[x]] = 0 else 0 < [[x]]. intro x; unfold ww_is_zero. rewrite spec_ww_compare. case Z.compare_spec; auto with zarith. simpl ww_to_Z. assert (V4 := spec_ww_to_Z w_digits w_to_Z spec_to_Z x);auto with zarith. Qed. Lemma wwB_4_2: 2 * (wwB / 4) = wwB/ 2. pattern wwB at 1; rewrite wwB_wBwB; rewrite Z.pow_2_r. rewrite <- wB_div_2. match goal with |- context[(2 * ?X) * (2 * ?Z)] => replace ((2 * X) * (2 * Z)) with ((X * Z) * 4); try ring end. rewrite Z_div_mult; auto with zarith. rewrite Z.mul_assoc; rewrite wB_div_2. rewrite wwB_div_2; ring. Qed. Lemma spec_ww_head1 : forall x : zn2z w, (ww_is_even (ww_head1 x) = true) /\ (0 < [[x]] -> wwB / 4 <= 2 ^ [[ww_head1 x]] * [[x]] < wwB). assert (U := wB_pos w_digits). intros x; unfold ww_head1. generalize (spec_ww_is_even (ww_head0 x)); case_eq (ww_is_even (ww_head0 x)). intros HH H1; rewrite HH; split; auto. intros H2. generalize (spec_ww_head0 x H2); case (ww_head0 x); autorewrite with rm10. intros (H3, H4); split; auto with zarith. apply Z.le_trans with (2 := H3). apply Zdiv_le_compat_l; auto with zarith. intros xh xl (H3, H4); split; auto with zarith. apply Z.le_trans with (2 := H3). apply Zdiv_le_compat_l; auto with zarith. intros H1. case (spec_to_w_Z (ww_head0 x)); intros Hv1 Hv2. assert (Hp0: 0 < [[ww_head0 x]]). generalize (spec_ww_is_even (ww_head0 x)); rewrite H1. generalize Hv1; case [[ww_head0 x]]. rewrite Zmod_small; auto with zarith. intros; assert (0 < Zpos p); auto with zarith. red; simpl; auto. intros p H2; case H2; auto. assert (Hp: [[ww_pred (ww_head0 x)]] = [[ww_head0 x]] - 1). rewrite spec_ww_pred. rewrite Zmod_small; auto with zarith. intros H2; split. generalize (spec_ww_is_even (ww_pred (ww_head0 x))); case ww_is_even; auto. rewrite Hp. rewrite Zminus_mod; auto with zarith. rewrite H2; repeat rewrite Zmod_small; auto with zarith. intros H3; rewrite Hp. case (spec_ww_head0 x); auto; intros Hv3 Hv4. assert (Hu: forall u, 0 < u -> 2 * 2 ^ (u - 1) = 2 ^u). intros u Hu. pattern 2 at 1; rewrite <- Z.pow_1_r. rewrite <- Zpower_exp; auto with zarith. ring_simplify (1 + (u - 1)); auto with zarith. split; auto with zarith. apply Z.mul_le_mono_pos_r with 2; auto with zarith. repeat rewrite (fun x => Z.mul_comm x 2). rewrite wwB_4_2. rewrite Z.mul_assoc; rewrite Hu; auto with zarith. apply Z.le_lt_trans with (2 * 2 ^ ([[ww_head0 x]] - 1) * [[x]]); auto with zarith; rewrite Hu; auto with zarith. apply Z.mul_le_mono_nonneg_r; auto with zarith. apply Zpower_le_monotone; auto with zarith. Qed. Theorem wwB_4_wB_4: wwB / 4 = wB / 4 * wB. Proof. symmetry; apply Zdiv_unique with 0; auto with zarith. rewrite Z.mul_assoc; rewrite wB_div_4; auto with zarith. rewrite wwB_wBwB; ring. Qed. Lemma spec_ww_sqrt : forall x, [[ww_sqrt x]] ^ 2 <= [[x]] < ([[ww_sqrt x]] + 1) ^ 2. assert (U := wB_pos w_digits). intro x; unfold ww_sqrt. generalize (spec_ww_is_zero x); case (ww_is_zero x). simpl ww_to_Z; simpl Z.pow; unfold Z.pow_pos; simpl; auto with zarith. intros H1. rewrite spec_ww_compare. case Z.compare_spec; simpl ww_to_Z; autorewrite with rm10. generalize H1; case x. intros HH; contradict HH; simpl ww_to_Z; auto with zarith. intros w0 w1; simpl ww_to_Z; autorewrite with w_rewrite rm10. intros H2; case (spec_ww_head1 (WW w0 w1)); intros H3 H4 H5. generalize (H4 H2); clear H4; rewrite H5; clear H5; autorewrite with rm10. intros (H4, H5). assert (V: wB/4 <= [|w0|]). apply beta_lex with 0 [|w1|] wB; auto with zarith; autorewrite with rm10. rewrite <- wwB_4_wB_4; auto. generalize (@spec_w_sqrt2 w0 w1 V);auto with zarith. case (w_sqrt2 w0 w1); intros w2 c. simpl ww_to_Z; simpl fst. case c; unfold interp_carry; autorewrite with rm10. intros w3 (H6, H7); rewrite H6. assert (V1 := spec_to_Z w3);auto with zarith. split; auto with zarith. apply Z.le_lt_trans with ([|w2|] ^2 + 2 * [|w2|]); auto with zarith. match goal with |- ?X < ?Z => replace Z with (X + 1); auto with zarith end. repeat rewrite Zsquare_mult; ring. intros w3 (H6, H7); rewrite H6. assert (V1 := spec_to_Z w3);auto with zarith. split; auto with zarith. apply Z.le_lt_trans with ([|w2|] ^2 + 2 * [|w2|]); auto with zarith. match goal with |- ?X < ?Z => replace Z with (X + 1); auto with zarith end. repeat rewrite Zsquare_mult; ring. intros HH; case (spec_to_w_Z (ww_head1 x)); auto with zarith. intros Hv1. case (spec_ww_head1 x); intros Hp1 Hp2. generalize (Hp2 H1); clear Hp2; intros Hp2. assert (Hv2: [[ww_head1 x]] <= Zpos (xO w_digits)). case (Z.le_gt_cases (Zpos (xO w_digits)) [[ww_head1 x]]); auto with zarith; intros HH1. case Hp2; intros _ HH2; contradict HH2. apply Z.le_ngt; unfold base. apply Z.le_trans with (2 ^ [[ww_head1 x]]). apply Zpower_le_monotone; auto with zarith. pattern (2 ^ [[ww_head1 x]]) at 1; rewrite <- (Z.mul_1_r (2 ^ [[ww_head1 x]])). apply Z.mul_le_mono_nonneg_l; auto with zarith. generalize (spec_ww_add_mul_div x W0 (ww_head1 x) Hv2); case ww_add_mul_div. simpl ww_to_Z; autorewrite with w_rewrite rm10. rewrite Zmod_small; auto with zarith. intros H2. symmetry in H2. rewrite Z.mul_eq_0 in H2. destruct H2 as [H2|H2]. rewrite H2; unfold Z.pow, Z.pow_pos; simpl; auto with zarith. match type of H2 with ?X = ?Y => absurd (Y < X); try (rewrite H2; auto with zarith; fail) end. apply Z.pow_pos_nonneg; auto with zarith. split; auto with zarith. case Hp2; intros _ tmp; apply Z.le_lt_trans with (2 := tmp); clear tmp. rewrite Z.mul_comm; apply Z.mul_le_mono_nonneg_r; auto with zarith. assert (Hv0: [[ww_head1 x]] = 2 * ([[ww_head1 x]]/2)). pattern [[ww_head1 x]] at 1; rewrite (Z_div_mod_eq [[ww_head1 x]] 2); auto with zarith. generalize (spec_ww_is_even (ww_head1 x)); rewrite Hp1; intros tmp; rewrite tmp; rewrite Z.add_0_r; auto. intros w0 w1; autorewrite with w_rewrite rm10. rewrite Zmod_small; auto with zarith. 2: rewrite Z.mul_comm; auto with zarith. intros H2. assert (V: wB/4 <= [|w0|]). apply beta_lex with 0 [|w1|] wB; auto with zarith; autorewrite with rm10. simpl ww_to_Z in H2; rewrite H2. rewrite <- wwB_4_wB_4; auto with zarith. rewrite Z.mul_comm; auto with zarith. assert (V1 := spec_to_Z w1);auto with zarith. generalize (@spec_w_sqrt2 w0 w1 V);auto with zarith. case (w_sqrt2 w0 w1); intros w2 c. case (spec_to_Z w2); intros HH1 HH2. simpl ww_to_Z; simpl fst. assert (Hv3: [[ww_pred ww_zdigits]] = Zpos (xO w_digits) - 1). rewrite spec_ww_pred; rewrite spec_ww_zdigits. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.lt_le_trans with (Zpos (xO w_digits)); auto with zarith. unfold base; apply Zpower2_le_lin; auto with zarith. assert (Hv4: [[ww_head1 x]]/2 < wB). apply Z.le_lt_trans with (Zpos w_digits). apply Z.mul_le_mono_pos_r with 2; auto with zarith. repeat rewrite (fun x => Z.mul_comm x 2). rewrite <- Hv0; rewrite <- Pos2Z.inj_xO; auto. unfold base; apply Zpower2_lt_lin; auto with zarith. assert (Hv5: [[(ww_add_mul_div (ww_pred ww_zdigits) W0 (ww_head1 x))]] = [[ww_head1 x]]/2). rewrite spec_ww_add_mul_div. simpl ww_to_Z; autorewrite with rm10. rewrite Hv3. ring_simplify (Zpos (xO w_digits) - (Zpos (xO w_digits) - 1)). rewrite Z.pow_1_r. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.lt_le_trans with (1 := Hv4); auto with zarith. unfold base; apply Zpower_le_monotone; auto with zarith. split; unfold ww_digits; try rewrite Pos2Z.inj_xO; auto with zarith. rewrite Hv3; auto with zarith. assert (Hv6: [|low(ww_add_mul_div (ww_pred ww_zdigits) W0 (ww_head1 x))|] = [[ww_head1 x]]/2). rewrite spec_low. rewrite Hv5; rewrite Zmod_small; auto with zarith. rewrite spec_w_add_mul_div; auto with zarith. rewrite spec_w_sub; auto with zarith. rewrite spec_w_0. simpl ww_to_Z; autorewrite with rm10. rewrite Hv6; rewrite spec_w_zdigits. rewrite (fun x y => Zmod_small (x - y)). ring_simplify (Zpos w_digits - (Zpos w_digits - [[ww_head1 x]] / 2)). rewrite Zmod_small. simpl ww_to_Z in H2; rewrite H2; auto with zarith. intros (H4, H5); split. apply Z.mul_le_mono_pos_r with (2 ^ [[ww_head1 x]]); auto with zarith. rewrite H4. apply Z.le_trans with ([|w2|] ^ 2); auto with zarith. rewrite Z.mul_comm. pattern [[ww_head1 x]] at 1; rewrite Hv0; auto with zarith. rewrite (Z.mul_comm 2); rewrite Z.pow_mul_r; auto with zarith. assert (tmp: forall p q, p ^ 2 * q ^ 2 = (p * q) ^2); try (intros; repeat rewrite Zsquare_mult; ring); rewrite tmp; clear tmp. apply Zpower_le_monotone3; auto with zarith. split; auto with zarith. pattern [|w2|] at 2; rewrite (Z_div_mod_eq [|w2|] (2 ^ ([[ww_head1 x]] / 2))); auto with zarith. match goal with |- ?X <= ?X + ?Y => assert (0 <= Y); auto with zarith end. case (Z_mod_lt [|w2|] (2 ^ ([[ww_head1 x]] / 2))); auto with zarith. case c; unfold interp_carry; autorewrite with rm10; intros w3; assert (V3 := spec_to_Z w3);auto with zarith. apply Z.mul_lt_mono_pos_r with (2 ^ [[ww_head1 x]]); auto with zarith. rewrite H4. apply Z.le_lt_trans with ([|w2|] ^ 2 + 2 * [|w2|]); auto with zarith. apply Z.lt_le_trans with (([|w2|] + 1) ^ 2); auto with zarith. match goal with |- ?X < ?Y => replace Y with (X + 1); auto with zarith end. repeat rewrite (Zsquare_mult); ring. rewrite Z.mul_comm. pattern [[ww_head1 x]] at 1; rewrite Hv0. rewrite (Z.mul_comm 2); rewrite Z.pow_mul_r; auto with zarith. assert (tmp: forall p q, p ^ 2 * q ^ 2 = (p * q) ^2); try (intros; repeat rewrite Zsquare_mult; ring); rewrite tmp; clear tmp. apply Zpower_le_monotone3; auto with zarith. split; auto with zarith. pattern [|w2|] at 1; rewrite (Z_div_mod_eq [|w2|] (2 ^ ([[ww_head1 x]]/2))); auto with zarith. rewrite <- Z.add_assoc; rewrite Z.mul_add_distr_l. autorewrite with rm10; apply Z.add_le_mono_l; auto with zarith. case (Z_mod_lt [|w2|] (2 ^ ([[ww_head1 x]]/2))); auto with zarith. split; auto with zarith. apply Z.le_lt_trans with ([|w2|]); auto with zarith. apply Zdiv_le_upper_bound; auto with zarith. pattern [|w2|] at 1; replace [|w2|] with ([|w2|] * 2 ^0); auto with zarith. apply Z.mul_le_mono_nonneg_l; auto with zarith. apply Zpower_le_monotone; auto with zarith. rewrite Z.pow_0_r; autorewrite with rm10; auto. split; auto with zarith. rewrite Hv0 in Hv2; rewrite (Pos2Z.inj_xO w_digits) in Hv2; auto with zarith. apply Z.le_lt_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_lt_lin; auto with zarith. rewrite spec_w_sub; auto with zarith. rewrite Hv6; rewrite spec_w_zdigits; auto with zarith. assert (Hv7: 0 < [[ww_head1 x]]/2); auto with zarith. rewrite Zmod_small; auto with zarith. split; auto with zarith. assert ([[ww_head1 x]]/2 <= Zpos w_digits); auto with zarith. apply Z.mul_le_mono_pos_r with 2; auto with zarith. repeat rewrite (fun x => Z.mul_comm x 2). rewrite <- Hv0; rewrite <- Pos2Z.inj_xO; auto with zarith. apply Z.le_lt_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_lt_lin; auto with zarith. Qed. End DoubleSqrt. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleSub.v0000640000175000001440000003044312010532755022712 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable ww_Bm1 : zn2z w. Variable w_opp_c : w -> carry w. Variable w_opp_carry : w -> w. Variable w_pred_c : w -> carry w. Variable w_sub_c : w -> w -> carry w. Variable w_sub_carry_c : w -> w -> carry w. Variable w_opp : w -> w. Variable w_pred : w -> w. Variable w_sub : w -> w -> w. Variable w_sub_carry : w -> w -> w. (* ** Opposites ** *) Definition ww_opp_c x := match x with | W0 => C0 W0 | WW xh xl => match w_opp_c xl with | C0 _ => match w_opp_c xh with | C0 h => C0 W0 | C1 h => C1 (WW h w_0) end | C1 l => C1 (WW (w_opp_carry xh) l) end end. Definition ww_opp x := match x with | W0 => W0 | WW xh xl => match w_opp_c xl with | C0 _ => WW (w_opp xh) w_0 | C1 l => WW (w_opp_carry xh) l end end. Definition ww_opp_carry x := match x with | W0 => ww_Bm1 | WW xh xl => w_WW (w_opp_carry xh) (w_opp_carry xl) end. Definition ww_pred_c x := match x with | W0 => C1 ww_Bm1 | WW xh xl => match w_pred_c xl with | C0 l => C0 (w_WW xh l) | C1 _ => match w_pred_c xh with | C0 h => C0 (WW h w_Bm1) | C1 _ => C1 ww_Bm1 end end end. Definition ww_pred x := match x with | W0 => ww_Bm1 | WW xh xl => match w_pred_c xl with | C0 l => w_WW xh l | C1 l => WW (w_pred xh) w_Bm1 end end. Definition ww_sub_c x y := match y, x with | W0, _ => C0 x | WW yh yl, W0 => ww_opp_c (WW yh yl) | WW yh yl, WW xh xl => match w_sub_c xl yl with | C0 l => match w_sub_c xh yh with | C0 h => C0 (w_WW h l) | C1 h => C1 (WW h l) end | C1 l => match w_sub_carry_c xh yh with | C0 h => C0 (WW h l) | C1 h => C1 (WW h l) end end end. Definition ww_sub x y := match y, x with | W0, _ => x | WW yh yl, W0 => ww_opp (WW yh yl) | WW yh yl, WW xh xl => match w_sub_c xl yl with | C0 l => w_WW (w_sub xh yh) l | C1 l => WW (w_sub_carry xh yh) l end end. Definition ww_sub_carry_c x y := match y, x with | W0, W0 => C1 ww_Bm1 | W0, WW xh xl => ww_pred_c (WW xh xl) | WW yh yl, W0 => C1 (ww_opp_carry (WW yh yl)) | WW yh yl, WW xh xl => match w_sub_carry_c xl yl with | C0 l => match w_sub_c xh yh with | C0 h => C0 (w_WW h l) | C1 h => C1 (WW h l) end | C1 l => match w_sub_carry_c xh yh with | C0 h => C0 (w_WW h l) | C1 h => C1 (w_WW h l) end end end. Definition ww_sub_carry x y := match y, x with | W0, W0 => ww_Bm1 | W0, WW xh xl => ww_pred (WW xh xl) | WW yh yl, W0 => ww_opp_carry (WW yh yl) | WW yh yl, WW xh xl => match w_sub_carry_c xl yl with | C0 l => w_WW (w_sub xh yh) l | C1 l => w_WW (w_sub_carry xh yh) l end end. (*Section DoubleProof.*) Variable w_digits : positive. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB w_to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[+[ c ]]" := (interp_carry 1 wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_Bm1 : [|w_Bm1|] = wB - 1. Variable spec_ww_Bm1 : [[ww_Bm1]] = wwB - 1. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_opp_c : forall x, [-|w_opp_c x|] = -[|x|]. Variable spec_opp : forall x, [|w_opp x|] = (-[|x|]) mod wB. Variable spec_opp_carry : forall x, [|w_opp_carry x|] = wB - [|x|] - 1. Variable spec_pred_c : forall x, [-|w_pred_c x|] = [|x|] - 1. Variable spec_sub_c : forall x y, [-|w_sub_c x y|] = [|x|] - [|y|]. Variable spec_sub_carry_c : forall x y, [-|w_sub_carry_c x y|] = [|x|] - [|y|] - 1. Variable spec_pred : forall x, [|w_pred x|] = ([|x|] - 1) mod wB. Variable spec_sub : forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Variable spec_sub_carry : forall x y, [|w_sub_carry x y|] = ([|x|] - [|y|] - 1) mod wB. Lemma spec_ww_opp_c : forall x, [-[ww_opp_c x]] = -[[x]]. Proof. destruct x as [ |xh xl];simpl. reflexivity. rewrite Z.opp_add_distr;generalize (spec_opp_c xl);destruct (w_opp_c xl) as [l|l];intros H;unfold interp_carry in H;rewrite <- H; rewrite <- Z.mul_opp_l. assert ([|l|] = 0). assert (H1:= spec_to_Z l);assert (H2 := spec_to_Z xl);omega. rewrite H0;generalize (spec_opp_c xh);destruct (w_opp_c xh) as [h|h];intros H1;unfold interp_carry in *;rewrite <- H1. assert ([|h|] = 0). assert (H3:= spec_to_Z h);assert (H2 := spec_to_Z xh);omega. rewrite H2;reflexivity. simpl ww_to_Z;rewrite wwB_wBwB;rewrite spec_w_0;ring. unfold interp_carry;simpl ww_to_Z;rewrite wwB_wBwB;rewrite spec_opp_carry; ring. Qed. Lemma spec_ww_opp : forall x, [[ww_opp x]] = (-[[x]]) mod wwB. Proof. destruct x as [ |xh xl];simpl. reflexivity. rewrite Z.opp_add_distr, <- Z.mul_opp_l. generalize (spec_opp_c xl);destruct (w_opp_c xl) as [l|l];intros H;unfold interp_carry in H;rewrite <- H;simpl ww_to_Z. rewrite spec_w_0;rewrite Z.add_0_r;rewrite wwB_wBwB. assert ([|l|] = 0). assert (H1:= spec_to_Z l);assert (H2 := spec_to_Z xl);omega. rewrite H0;rewrite Z.add_0_r; rewrite Z.pow_2_r; rewrite Zmult_mod_distr_r;try apply lt_0_wB. rewrite spec_opp;trivial. apply Zmod_unique with (q:= -1). exact (spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW (w_opp_carry xh) l)). rewrite spec_opp_carry;rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_opp_carry : forall x, [[ww_opp_carry x]] = wwB - [[x]] - 1. Proof. destruct x as [ |xh xl];simpl. rewrite spec_ww_Bm1;ring. rewrite spec_w_WW;simpl;repeat rewrite spec_opp_carry;rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_pred_c : forall x, [-[ww_pred_c x]] = [[x]] - 1. Proof. destruct x as [ |xh xl];unfold ww_pred_c. unfold interp_carry;rewrite spec_ww_Bm1;simpl ww_to_Z;ring. simpl ww_to_Z;replace (([|xh|]*wB+[|xl|])-1) with ([|xh|]*wB+([|xl|]-1)). 2:ring. generalize (spec_pred_c xl);destruct (w_pred_c xl) as [l|l]; intros H;unfold interp_carry in H;rewrite <- H. simpl;apply spec_w_WW. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. assert ([|l|] = wB - 1). assert (H1:= spec_to_Z l);assert (H2 := spec_to_Z xl);omega. rewrite H0;change ([|xh|] + -1) with ([|xh|] - 1). generalize (spec_pred_c xh);destruct (w_pred_c xh) as [h|h]; intros H1;unfold interp_carry in H1;rewrite <- H1. simpl;rewrite spec_w_Bm1;ring. assert ([|h|] = wB - 1). assert (H3:= spec_to_Z h);assert (H2 := spec_to_Z xh);omega. rewrite H2;unfold interp_carry;rewrite spec_ww_Bm1;rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_sub_c : forall x y, [-[ww_sub_c x y]] = [[x]] - [[y]]. Proof. destruct y as [ |yh yl];simpl. ring. destruct x as [ |xh xl];simpl. exact (spec_ww_opp_c (WW yh yl)). replace ([|xh|] * wB + [|xl|] - ([|yh|] * wB + [|yl|])) with (([|xh|]-[|yh|])*wB + ([|xl|]-[|yl|])). 2:ring. generalize (spec_sub_c xl yl);destruct (w_sub_c xl yl) as [l|l];intros H; unfold interp_carry in H;rewrite <- H. generalize (spec_sub_c xh yh);destruct (w_sub_c xh yh) as [h|h];intros H1; unfold interp_carry in H1;rewrite <- H1;unfold interp_carry; try rewrite spec_w_WW;simpl ww_to_Z;try rewrite wwB_wBwB;ring. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. change ([|xh|] - [|yh|] + -1) with ([|xh|] - [|yh|] - 1). generalize (spec_sub_carry_c xh yh);destruct (w_sub_carry_c xh yh) as [h|h]; intros H1;unfold interp_carry in *;rewrite <- H1;simpl ww_to_Z; try rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_sub_carry_c : forall x y, [-[ww_sub_carry_c x y]] = [[x]] - [[y]] - 1. Proof. destruct y as [ |yh yl];simpl. unfold Z.sub;simpl;rewrite Z.add_0_r;exact (spec_ww_pred_c x). destruct x as [ |xh xl]. unfold interp_carry;rewrite spec_w_WW;simpl ww_to_Z;rewrite wwB_wBwB; repeat rewrite spec_opp_carry;ring. simpl ww_to_Z. replace ([|xh|] * wB + [|xl|] - ([|yh|] * wB + [|yl|]) - 1) with (([|xh|]-[|yh|])*wB + ([|xl|]-[|yl|]-1)). 2:ring. generalize (spec_sub_carry_c xl yl);destruct (w_sub_carry_c xl yl) as [l|l];intros H;unfold interp_carry in H;rewrite <- H. generalize (spec_sub_c xh yh);destruct (w_sub_c xh yh) as [h|h];intros H1; unfold interp_carry in H1;rewrite <- H1;unfold interp_carry; try rewrite spec_w_WW;simpl ww_to_Z;try rewrite wwB_wBwB;ring. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. change ([|xh|] - [|yh|] + -1) with ([|xh|] - [|yh|] - 1). generalize (spec_sub_carry_c xh yh);destruct (w_sub_carry_c xh yh) as [h|h]; intros H1;unfold interp_carry in *;rewrite <- H1;try rewrite spec_w_WW; simpl ww_to_Z; try rewrite wwB_wBwB;ring. Qed. Lemma spec_ww_pred : forall x, [[ww_pred x]] = ([[x]] - 1) mod wwB. Proof. destruct x as [ |xh xl];simpl. apply Zmod_unique with (-1). apply spec_ww_to_Z;trivial. rewrite spec_ww_Bm1;ring. replace ([|xh|]*wB + [|xl|] - 1) with ([|xh|]*wB + ([|xl|] - 1)). 2:ring. generalize (spec_pred_c xl);destruct (w_pred_c xl) as [l|l];intro H; unfold interp_carry in H;rewrite <- H;simpl ww_to_Z. rewrite Zmod_small. apply spec_w_WW. exact (spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW xh l)). rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. change ([|xh|] + -1) with ([|xh|] - 1). assert ([|l|] = wB - 1). assert (H1:= spec_to_Z l);assert (H2:= spec_to_Z xl);omega. rewrite (mod_wwB w_digits w_to_Z);trivial. rewrite spec_pred;rewrite spec_w_Bm1;rewrite <- H0;trivial. Qed. Lemma spec_ww_sub : forall x y, [[ww_sub x y]] = ([[x]] - [[y]]) mod wwB. Proof. destruct y as [ |yh yl];simpl. ring_simplify ([[x]] - 0);rewrite Zmod_small;trivial. apply spec_ww_to_Z;trivial. destruct x as [ |xh xl];simpl. exact (spec_ww_opp (WW yh yl)). replace ([|xh|] * wB + [|xl|] - ([|yh|] * wB + [|yl|])) with (([|xh|] - [|yh|]) * wB + ([|xl|] - [|yl|])). 2:ring. generalize (spec_sub_c xl yl);destruct (w_sub_c xl yl)as[l|l];intros H; unfold interp_carry in H;rewrite <- H. rewrite spec_w_WW;rewrite (mod_wwB w_digits w_to_Z spec_to_Z). rewrite spec_sub;trivial. simpl ww_to_Z;rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite (mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_sub_carry;trivial. Qed. Lemma spec_ww_sub_carry : forall x y, [[ww_sub_carry x y]] = ([[x]] - [[y]] - 1) mod wwB. Proof. destruct y as [ |yh yl];simpl. ring_simplify ([[x]] - 0);exact (spec_ww_pred x). destruct x as [ |xh xl];simpl. apply Zmod_unique with (-1). apply spec_ww_to_Z;trivial. fold (ww_opp_carry (WW yh yl)). rewrite (spec_ww_opp_carry (WW yh yl));simpl ww_to_Z;ring. replace ([|xh|] * wB + [|xl|] - ([|yh|] * wB + [|yl|]) - 1) with (([|xh|] - [|yh|]) * wB + ([|xl|] - [|yl|] - 1)). 2:ring. generalize (spec_sub_carry_c xl yl);destruct (w_sub_carry_c xl yl)as[l|l]; intros H;unfold interp_carry in H;rewrite <- H;rewrite spec_w_WW. rewrite (mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_sub;trivial. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite (mod_wwB w_digits w_to_Z spec_to_Z);rewrite spec_sub_carry;trivial. Qed. (* End DoubleProof. *) End DoubleSub. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleBase.v0000640000175000001440000003161112010532755023031 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_0W : w -> zn2z w. Variable w_digits : positive. Variable w_zdigits: w. Variable w_add: w -> w -> zn2z w. Variable w_to_Z : w -> Z. Variable w_compare : w -> w -> comparison. Definition ww_digits := xO w_digits. Definition ww_zdigits := w_add w_zdigits w_zdigits. Definition ww_to_Z := zn2z_to_Z (base w_digits) w_to_Z. Definition ww_1 := WW w_0 w_1. Definition ww_Bm1 := WW w_Bm1 w_Bm1. Definition ww_WW xh xl : zn2z (zn2z w) := match xh, xl with | W0, W0 => W0 | _, _ => WW xh xl end. Definition ww_W0 h : zn2z (zn2z w) := match h with | W0 => W0 | _ => WW h W0 end. Definition ww_0W l : zn2z (zn2z w) := match l with | W0 => W0 | _ => WW W0 l end. Definition double_WW (n:nat) := match n return word w n -> word w n -> word w (S n) with | O => w_WW | S n => fun (h l : zn2z (word w n)) => match h, l with | W0, W0 => W0 | _, _ => WW h l end end. Definition double_wB n := base (w_digits << n). Fixpoint double_to_Z (n:nat) : word w n -> Z := match n return word w n -> Z with | O => w_to_Z | S n => zn2z_to_Z (double_wB n) (double_to_Z n) end. Fixpoint extend_aux (n:nat) (x:zn2z w) {struct n}: word w (S n) := match n return word w (S n) with | O => x | S n1 => WW W0 (extend_aux n1 x) end. Definition extend (n:nat) (x:w) : word w (S n) := let r := w_0W x in match r with | W0 => W0 | _ => extend_aux n r end. Definition double_0 n : word w n := match n return word w n with | O => w_0 | S _ => W0 end. Definition double_split (n:nat) (x:zn2z (word w n)) := match x with | W0 => match n return word w n * word w n with | O => (w_0,w_0) | S _ => (W0, W0) end | WW h l => (h,l) end. Definition ww_compare x y := match x, y with | W0, W0 => Eq | W0, WW yh yl => match w_compare w_0 yh with | Eq => w_compare w_0 yl | _ => Lt end | WW xh xl, W0 => match w_compare xh w_0 with | Eq => w_compare xl w_0 | _ => Gt end | WW xh xl, WW yh yl => match w_compare xh yh with | Eq => w_compare xl yl | Lt => Lt | Gt => Gt end end. (* Return the low part of the composed word*) Fixpoint get_low (n : nat) {struct n}: word w n -> w := match n return (word w n -> w) with | 0%nat => fun x => x | S n1 => fun x => match x with | W0 => w_0 | WW _ x1 => get_low n1 x1 end end. Section DoubleProof. Notation wB := (base w_digits). Notation wwB := (base ww_digits). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z x) (at level 0, x at level 99). Notation "[+[ c ]]" := (interp_carry 1 wwB ww_to_Z c) (at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB ww_to_Z c) (at level 0, x at level 99). Notation "[! n | x !]" := (double_to_Z n x) (at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_1 : [|w_1|] = 1. Variable spec_w_Bm1 : [|w_Bm1|] = wB - 1. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Lemma wwB_wBwB : wwB = wB^2. Proof. unfold base, ww_digits;rewrite Z.pow_2_r; rewrite (Pos2Z.inj_xO w_digits). replace (2 * Zpos w_digits) with (Zpos w_digits + Zpos w_digits). apply Zpower_exp; unfold Z.ge;simpl;intros;discriminate. ring. Qed. Lemma spec_ww_1 : [[ww_1]] = 1. Proof. simpl;rewrite spec_w_0;rewrite spec_w_1;ring. Qed. Lemma spec_ww_Bm1 : [[ww_Bm1]] = wwB - 1. Proof. simpl;rewrite spec_w_Bm1;rewrite wwB_wBwB;ring. Qed. Lemma lt_0_wB : 0 < wB. Proof. unfold base;apply Z.pow_pos_nonneg. unfold Z.lt;reflexivity. unfold Z.le;intros H;discriminate H. Qed. Lemma lt_0_wwB : 0 < wwB. Proof. rewrite wwB_wBwB; rewrite Z.pow_2_r; apply Z.mul_pos_pos;apply lt_0_wB. Qed. Lemma wB_pos: 1 < wB. Proof. unfold base;apply Z.lt_le_trans with (2^1). unfold Z.lt;reflexivity. apply Zpower_le_monotone. unfold Z.lt;reflexivity. split;unfold Z.le;intros H. discriminate H. clear spec_w_0W w_0W spec_w_Bm1 spec_to_Z spec_w_WW w_WW. destruct w_digits; discriminate H. Qed. Lemma wwB_pos: 1 < wwB. Proof. assert (H:= wB_pos);rewrite wwB_wBwB;rewrite <-(Z.mul_1_r 1). rewrite Z.pow_2_r. apply Zmult_lt_compat2;(split;[unfold Z.lt;reflexivity|trivial]). apply Z.lt_le_incl;trivial. Qed. Theorem wB_div_2: 2 * (wB / 2) = wB. Proof. clear spec_w_0 w_0 spec_w_1 w_1 spec_w_Bm1 w_Bm1 spec_w_WW spec_w_0W spec_to_Z;unfold base. assert (2 ^ Zpos w_digits = 2 * (2 ^ (Zpos w_digits - 1))). pattern 2 at 2; rewrite <- Z.pow_1_r. rewrite <- Zpower_exp; auto with zarith. f_equal; auto with zarith. case w_digits; compute; intros; discriminate. rewrite H; f_equal; auto with zarith. rewrite Z.mul_comm; apply Z_div_mult; auto with zarith. Qed. Theorem wwB_div_2 : wwB / 2 = wB / 2 * wB. Proof. clear spec_w_0 w_0 spec_w_1 w_1 spec_w_Bm1 w_Bm1 spec_w_WW spec_w_0W spec_to_Z. rewrite wwB_wBwB; rewrite Z.pow_2_r. pattern wB at 1; rewrite <- wB_div_2; auto. rewrite <- Z.mul_assoc. repeat (rewrite (Z.mul_comm 2); rewrite Z_div_mult); auto with zarith. Qed. Lemma mod_wwB : forall z x, (z*wB + [|x|]) mod wwB = (z mod wB)*wB + [|x|]. Proof. intros z x. rewrite Zplus_mod. pattern wwB at 1;rewrite wwB_wBwB; rewrite Z.pow_2_r. rewrite Zmult_mod_distr_r;try apply lt_0_wB. rewrite (Zmod_small [|x|]). apply Zmod_small;rewrite wwB_wBwB;apply beta_mult;try apply spec_to_Z. apply Z_mod_lt;apply Z.lt_gt;apply lt_0_wB. destruct (spec_to_Z x);split;trivial. change [|x|] with (0*wB+[|x|]). rewrite wwB_wBwB. rewrite Z.pow_2_r;rewrite <- (Z.add_0_r (wB*wB));apply beta_lex_inv. apply lt_0_wB. apply spec_to_Z. split;[apply Z.le_refl | apply lt_0_wB]. Qed. Lemma wB_div : forall x y, ([|x|] * wB + [|y|]) / wB = [|x|]. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. intros x y;unfold base;rewrite Zdiv_shift_r;auto with zarith. rewrite Z_div_mult;auto with zarith. destruct (spec_to_Z x);trivial. Qed. Lemma wB_div_plus : forall x y p, 0 <= p -> ([|x|]*wB + [|y|]) / 2^(Zpos w_digits + p) = [|x|] / 2^p. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. intros x y p Hp;rewrite Zpower_exp;auto with zarith. rewrite <- Zdiv_Zdiv;auto with zarith. rewrite wB_div;trivial. Qed. Lemma lt_wB_wwB : wB < wwB. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. unfold base;apply Zpower_lt_monotone;auto with zarith. assert (0 < Zpos w_digits). compute;reflexivity. unfold ww_digits;rewrite Pos2Z.inj_xO;auto with zarith. Qed. Lemma w_to_Z_wwB : forall x, x < wB -> x < wwB. Proof. intros x H;apply Z.lt_trans with wB;trivial;apply lt_wB_wwB. Qed. Lemma spec_ww_to_Z : forall x, 0 <= [[x]] < wwB. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. destruct x as [ |h l];simpl. split;[apply Z.le_refl|apply lt_0_wwB]. assert (H:=spec_to_Z h);assert (L:=spec_to_Z l);split. apply Z.add_nonneg_nonneg;auto with zarith. rewrite <- (Z.add_0_r wwB);rewrite wwB_wBwB; rewrite Z.pow_2_r; apply beta_lex_inv;auto with zarith. Qed. Lemma double_wB_wwB : forall n, double_wB n * double_wB n = double_wB (S n). Proof. intros n;unfold double_wB;simpl. unfold base. rewrite Pshiftl_nat_S, (Pos2Z.inj_xO (_ << _)). replace (2 * Zpos (w_digits << n)) with (Zpos (w_digits << n) + Zpos (w_digits << n)) by ring. symmetry; apply Zpower_exp;intro;discriminate. Qed. Lemma double_wB_pos: forall n, 0 <= double_wB n. Proof. intros n; unfold double_wB, base; auto with zarith. Qed. Lemma double_wB_more_digits: forall n, wB <= double_wB n. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. intros n; elim n; clear n; auto. unfold double_wB, "<<"; auto with zarith. intros n H1; rewrite <- double_wB_wwB. apply Z.le_trans with (wB * 1). rewrite Z.mul_1_r; apply Z.le_refl. unfold base; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. apply Z.le_trans with wB; auto with zarith. unfold base. rewrite <- (Z.pow_0_r 2). apply Z.pow_le_mono_r; auto with zarith. Qed. Lemma spec_double_to_Z : forall n (x:word w n), 0 <= [!n | x!] < double_wB n. Proof. clear spec_w_0 spec_w_1 spec_w_Bm1 w_0 w_1 w_Bm1. induction n;intros. exact (spec_to_Z x). unfold double_to_Z;fold double_to_Z. destruct x;unfold zn2z_to_Z. unfold double_wB,base;split;auto with zarith. assert (U0:= IHn w0);assert (U1:= IHn w1). split;auto with zarith. apply Z.lt_le_trans with ((double_wB n - 1) * double_wB n + double_wB n). assert (double_to_Z n w0*double_wB n <= (double_wB n - 1)*double_wB n). apply Z.mul_le_mono_nonneg_r;auto with zarith. auto with zarith. rewrite <- double_wB_wwB. replace ((double_wB n - 1) * double_wB n + double_wB n) with (double_wB n * double_wB n); [auto with zarith | ring]. Qed. Lemma spec_get_low: forall n x, [!n | x!] < wB -> [|get_low n x|] = [!n | x!]. Proof. clear spec_w_1 spec_w_Bm1. intros n; elim n; auto; clear n. intros n Hrec x; case x; clear x; auto. intros xx yy; simpl. destruct (spec_double_to_Z n xx) as [F1 _]. Z.le_elim F1. - (* 0 < [!n | xx!] *) intros; exfalso. assert (F3 := double_wB_more_digits n). destruct (spec_double_to_Z n yy) as [F4 _]. assert (F5: 1 * wB <= [!n | xx!] * double_wB n); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. unfold base; auto with zarith. - (* 0 = [!n | xx!] *) rewrite <- F1; rewrite Z.mul_0_l, Z.add_0_l. intros; apply Hrec; auto. Qed. Lemma spec_double_WW : forall n (h l : word w n), [!S n|double_WW n h l!] = [!n|h!] * double_wB n + [!n|l!]. Proof. induction n;simpl;intros;trivial. destruct h;auto. destruct l;auto. Qed. Lemma spec_extend_aux : forall n x, [!S n|extend_aux n x!] = [[x]]. Proof. induction n;simpl;trivial. Qed. Lemma spec_extend : forall n x, [!S n|extend n x!] = [|x|]. Proof. intros n x;assert (H:= spec_w_0W x);unfold extend. destruct (w_0W x);simpl;trivial. rewrite <- H;exact (spec_extend_aux n (WW w0 w1)). Qed. Lemma spec_double_0 : forall n, [!n|double_0 n!] = 0. Proof. destruct n;trivial. Qed. Lemma spec_double_split : forall n x, let (h,l) := double_split n x in [!S n|x!] = [!n|h!] * double_wB n + [!n|l!]. Proof. destruct x;simpl;auto. destruct n;simpl;trivial. rewrite spec_w_0;trivial. Qed. Lemma wB_lex_inv: forall a b c d, a < c -> a * wB + [|b|] < c * wB + [|d|]. Proof. intros a b c d H1; apply beta_lex_inv with (1 := H1); auto. Qed. Ltac comp2ord := match goal with | |- Lt = (?x ?= ?y) => symmetry; change (x < y) | |- Gt = (?x ?= ?y) => symmetry; change (x > y); apply Z.lt_gt end. Lemma spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Proof. destruct x as [ |xh xl];destruct y as [ |yh yl];simpl;trivial. (* 1st case *) rewrite 2 spec_w_compare, spec_w_0. destruct (Z.compare_spec 0 [|yh|]) as [H|H|H]. rewrite <- H;simpl. reflexivity. symmetry. change (0 < [|yh|]*wB+[|yl|]). change 0 with (0*wB+0). rewrite <- spec_w_0 at 2. apply wB_lex_inv;trivial. absurd (0 <= [|yh|]). apply Z.lt_nge; trivial. destruct (spec_to_Z yh);trivial. (* 2nd case *) rewrite 2 spec_w_compare, spec_w_0. destruct (Z.compare_spec [|xh|] 0) as [H|H|H]. rewrite H;simpl;reflexivity. absurd (0 <= [|xh|]). apply Z.lt_nge; trivial. destruct (spec_to_Z xh);trivial. comp2ord. change 0 with (0*wB+0). rewrite <- spec_w_0 at 2. apply wB_lex_inv;trivial. (* 3rd case *) rewrite 2 spec_w_compare. destruct (Z.compare_spec [|xh|] [|yh|]) as [H|H|H]. rewrite H. symmetry. apply Z.add_compare_mono_l. comp2ord. apply wB_lex_inv;trivial. comp2ord. apply wB_lex_inv;trivial. Qed. End DoubleProof. End DoubleBase. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleMul.v0000640000175000001440000005714612010532755022727 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_W0 : w -> zn2z w. Variable w_0W : w -> zn2z w. Variable w_compare : w -> w -> comparison. Variable w_succ : w -> w. Variable w_add_c : w -> w -> carry w. Variable w_add : w -> w -> w. Variable w_sub: w -> w -> w. Variable w_mul_c : w -> w -> zn2z w. Variable w_mul : w -> w -> w. Variable w_square_c : w -> zn2z w. Variable ww_add_c : zn2z w -> zn2z w -> carry (zn2z w). Variable ww_add : zn2z w -> zn2z w -> zn2z w. Variable ww_add_carry : zn2z w -> zn2z w -> zn2z w. Variable ww_sub_c : zn2z w -> zn2z w -> carry (zn2z w). Variable ww_sub : zn2z w -> zn2z w -> zn2z w. (* ** Multiplication ** *) (* (xh*B+xl) (yh*B + yl) xh*yh = hh = |hhh|hhl|B2 xh*yl +xl*yh = cc = |cch|ccl|B xl*yl = ll = |llh|lll *) Definition double_mul_c (cross:w->w->w->w->zn2z w -> zn2z w -> w*zn2z w) x y := match x, y with | W0, _ => W0 | _, W0 => W0 | WW xh xl, WW yh yl => let hh := w_mul_c xh yh in let ll := w_mul_c xl yl in let (wc,cc) := cross xh xl yh yl hh ll in match cc with | W0 => WW (ww_add hh (w_W0 wc)) ll | WW cch ccl => match ww_add_c (w_W0 ccl) ll with | C0 l => WW (ww_add hh (w_WW wc cch)) l | C1 l => WW (ww_add_carry hh (w_WW wc cch)) l end end end. Definition ww_mul_c := double_mul_c (fun xh xl yh yl hh ll=> match ww_add_c (w_mul_c xh yl) (w_mul_c xl yh) with | C0 cc => (w_0, cc) | C1 cc => (w_1, cc) end). Definition w_2 := w_add w_1 w_1. Definition kara_prod xh xl yh yl hh ll := match ww_add_c hh ll with C0 m => match w_compare xl xh with Eq => (w_0, m) | Lt => match w_compare yl yh with Eq => (w_0, m) | Lt => (w_0, ww_sub m (w_mul_c (w_sub xh xl) (w_sub yh yl))) | Gt => match ww_add_c m (w_mul_c (w_sub xh xl) (w_sub yl yh)) with C1 m1 => (w_1, m1) | C0 m1 => (w_0, m1) end end | Gt => match w_compare yl yh with Eq => (w_0, m) | Lt => match ww_add_c m (w_mul_c (w_sub xl xh) (w_sub yh yl)) with C1 m1 => (w_1, m1) | C0 m1 => (w_0, m1) end | Gt => (w_0, ww_sub m (w_mul_c (w_sub xl xh) (w_sub yl yh))) end end | C1 m => match w_compare xl xh with Eq => (w_1, m) | Lt => match w_compare yl yh with Eq => (w_1, m) | Lt => match ww_sub_c m (w_mul_c (w_sub xh xl) (w_sub yh yl)) with C0 m1 => (w_1, m1) | C1 m1 => (w_0, m1) end | Gt => match ww_add_c m (w_mul_c (w_sub xh xl) (w_sub yl yh)) with C1 m1 => (w_2, m1) | C0 m1 => (w_1, m1) end end | Gt => match w_compare yl yh with Eq => (w_1, m) | Lt => match ww_add_c m (w_mul_c (w_sub xl xh) (w_sub yh yl)) with C1 m1 => (w_2, m1) | C0 m1 => (w_1, m1) end | Gt => match ww_sub_c m (w_mul_c (w_sub xl xh) (w_sub yl yh)) with C1 m1 => (w_0, m1) | C0 m1 => (w_1, m1) end end end end. Definition ww_karatsuba_c := double_mul_c kara_prod. Definition ww_mul x y := match x, y with | W0, _ => W0 | _, W0 => W0 | WW xh xl, WW yh yl => let ccl := w_add (w_mul xh yl) (w_mul xl yh) in ww_add (w_W0 ccl) (w_mul_c xl yl) end. Definition ww_square_c x := match x with | W0 => W0 | WW xh xl => let hh := w_square_c xh in let ll := w_square_c xl in let xhxl := w_mul_c xh xl in let (wc,cc) := match ww_add_c xhxl xhxl with | C0 cc => (w_0, cc) | C1 cc => (w_1, cc) end in match cc with | W0 => WW (ww_add hh (w_W0 wc)) ll | WW cch ccl => match ww_add_c (w_W0 ccl) ll with | C0 l => WW (ww_add hh (w_WW wc cch)) l | C1 l => WW (ww_add_carry hh (w_WW wc cch)) l end end end. Section DoubleMulAddn1. Variable w_mul_add : w -> w -> w -> w * w. Fixpoint double_mul_add_n1 (n:nat) : word w n -> w -> w -> w * word w n := match n return word w n -> w -> w -> w * word w n with | O => w_mul_add | S n1 => let mul_add := double_mul_add_n1 n1 in fun x y r => match x with | W0 => (w_0,extend w_0W n1 r) | WW xh xl => let (rl,l) := mul_add xl y r in let (rh,h) := mul_add xh y rl in (rh, double_WW w_WW n1 h l) end end. End DoubleMulAddn1. Section DoubleMulAddmn1. Variable wn: Type. Variable extend_n : w -> wn. Variable wn_0W : wn -> zn2z wn. Variable wn_WW : wn -> wn -> zn2z wn. Variable w_mul_add_n1 : wn -> w -> w -> w*wn. Fixpoint double_mul_add_mn1 (m:nat) : word wn m -> w -> w -> w*word wn m := match m return word wn m -> w -> w -> w*word wn m with | O => w_mul_add_n1 | S m1 => let mul_add := double_mul_add_mn1 m1 in fun x y r => match x with | W0 => (w_0,extend wn_0W m1 (extend_n r)) | WW xh xl => let (rl,l) := mul_add xl y r in let (rh,h) := mul_add xh y rl in (rh, double_WW wn_WW m1 h l) end end. End DoubleMulAddmn1. Definition w_mul_add x y r := match w_mul_c x y with | W0 => (w_0, r) | WW h l => match w_add_c l r with | C0 lr => (h,lr) | C1 lr => (w_succ h, lr) end end. (*Section DoubleProof. *) Variable w_digits : positive. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB w_to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB w_to_Z c) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Notation "[+[ c ]]" := (interp_carry 1 wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[-[ c ]]" := (interp_carry (-1) wwB (ww_to_Z w_digits w_to_Z) c) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z wwB (ww_to_Z w_digits w_to_Z) x) (at level 0, x at level 99). Notation "[! n | x !]" := (double_to_Z w_digits w_to_Z n x) (at level 0, x at level 99). Variable spec_more_than_1_digit: 1 < Zpos w_digits. Variable spec_w_0 : [|w_0|] = 0. Variable spec_w_1 : [|w_1|] = 1. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_W0 : forall h, [[w_W0 h]] = [|h|] * wB. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_w_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_w_succ : forall x, [|w_succ x|] = ([|x|] + 1) mod wB. Variable spec_w_add_c : forall x y, [+|w_add_c x y|] = [|x|] + [|y|]. Variable spec_w_add : forall x y, [|w_add x y|] = ([|x|] + [|y|]) mod wB. Variable spec_w_sub : forall x y, [|w_sub x y|] = ([|x|] - [|y|]) mod wB. Variable spec_w_mul_c : forall x y, [[ w_mul_c x y ]] = [|x|] * [|y|]. Variable spec_w_mul : forall x y, [|w_mul x y|] = ([|x|] * [|y|]) mod wB. Variable spec_w_square_c : forall x, [[ w_square_c x]] = [|x|] * [|x|]. Variable spec_ww_add_c : forall x y, [+[ww_add_c x y]] = [[x]] + [[y]]. Variable spec_ww_add : forall x y, [[ww_add x y]] = ([[x]] + [[y]]) mod wwB. Variable spec_ww_add_carry : forall x y, [[ww_add_carry x y]] = ([[x]] + [[y]] + 1) mod wwB. Variable spec_ww_sub : forall x y, [[ww_sub x y]] = ([[x]] - [[y]]) mod wwB. Variable spec_ww_sub_c : forall x y, [-[ww_sub_c x y]] = [[x]] - [[y]]. Lemma spec_ww_to_Z : forall x, 0 <= [[x]] < wwB. Proof. intros x;apply spec_ww_to_Z;auto. Qed. Lemma spec_ww_to_Z_wBwB : forall x, 0 <= [[x]] < wB^2. Proof. rewrite <- wwB_wBwB;apply spec_ww_to_Z. Qed. Hint Resolve spec_ww_to_Z spec_ww_to_Z_wBwB : mult. Ltac zarith := auto with zarith mult. Lemma wBwB_lex: forall a b c d, a * wB^2 + [[b]] <= c * wB^2 + [[d]] -> a <= c. Proof. intros a b c d H; apply beta_lex with [[b]] [[d]] (wB^2);zarith. Qed. Lemma wBwB_lex_inv: forall a b c d, a < c -> a * wB^2 + [[b]] < c * wB^2 + [[d]]. Proof. intros a b c d H; apply beta_lex_inv; zarith. Qed. Lemma sum_mul_carry : forall xh xl yh yl wc cc, [|wc|]*wB^2 + [[cc]] = [|xh|] * [|yl|] + [|xl|] * [|yh|] -> 0 <= [|wc|] <= 1. Proof. intros. apply (sum_mul_carry [|xh|] [|xl|] [|yh|] [|yl|] [|wc|][[cc]] wB);zarith. apply wB_pos. Qed. Theorem mult_add_ineq: forall xH yH crossH, 0 <= [|xH|] * [|yH|] + [|crossH|] < wwB. Proof. intros;rewrite wwB_wBwB;apply mult_add_ineq;zarith. Qed. Hint Resolve mult_add_ineq : mult. Lemma spec_mul_aux : forall xh xl yh yl wc (cc:zn2z w) hh ll, [[hh]] = [|xh|] * [|yh|] -> [[ll]] = [|xl|] * [|yl|] -> [|wc|]*wB^2 + [[cc]] = [|xh|] * [|yl|] + [|xl|] * [|yh|] -> [||match cc with | W0 => WW (ww_add hh (w_W0 wc)) ll | WW cch ccl => match ww_add_c (w_W0 ccl) ll with | C0 l => WW (ww_add hh (w_WW wc cch)) l | C1 l => WW (ww_add_carry hh (w_WW wc cch)) l end end||] = ([|xh|] * wB + [|xl|]) * ([|yh|] * wB + [|yl|]). Proof. intros;assert (U1 := wB_pos w_digits). replace (([|xh|] * wB + [|xl|]) * ([|yh|] * wB + [|yl|])) with ([|xh|]*[|yh|]*wB^2 + ([|xh|]*[|yl|] + [|xl|]*[|yh|])*wB + [|xl|]*[|yl|]). 2:ring. rewrite <- H1;rewrite <- H;rewrite <- H0. assert (H2 := sum_mul_carry _ _ _ _ _ _ H1). destruct cc as [ | cch ccl]; simpl zn2z_to_Z; simpl ww_to_Z. rewrite spec_ww_add;rewrite spec_w_W0;rewrite Zmod_small; rewrite wwB_wBwB. ring. rewrite <- (Z.add_0_r ([|wc|]*wB));rewrite H;apply mult_add_ineq3;zarith. simpl ww_to_Z in H1. assert (U:=spec_to_Z cch). assert ([|wc|]*wB + [|cch|] <= 2*wB - 3). destruct (Z_le_gt_dec ([|wc|]*wB + [|cch|]) (2*wB - 3));trivial. assert ([|xh|] * [|yl|] + [|xl|] * [|yh|] <= (2*wB - 4)*wB + 2). ring_simplify ((2*wB - 4)*wB + 2). assert (H4 := Zmult_lt_b _ _ _ (spec_to_Z xh) (spec_to_Z yl)). assert (H5 := Zmult_lt_b _ _ _ (spec_to_Z xl) (spec_to_Z yh)). omega. generalize H3;clear H3;rewrite <- H1. rewrite Z.add_assoc; rewrite Z.pow_2_r; rewrite Z.mul_assoc; rewrite <- Z.mul_add_distr_r. assert (((2 * wB - 4) + 2)*wB <= ([|wc|] * wB + [|cch|])*wB). apply Z.mul_le_mono_nonneg;zarith. rewrite Z.mul_add_distr_r in H3. intros. assert (U2 := spec_to_Z ccl);omega. generalize (spec_ww_add_c (w_W0 ccl) ll);destruct (ww_add_c (w_W0 ccl) ll) as [l|l];unfold interp_carry;rewrite spec_w_W0;try rewrite Z.mul_1_l; simpl zn2z_to_Z; try rewrite spec_ww_add;try rewrite spec_ww_add_carry;rewrite spec_w_WW; rewrite Zmod_small;rewrite wwB_wBwB;intros. rewrite H4;ring. rewrite H;apply mult_add_ineq2;zarith. rewrite Z.add_assoc;rewrite Z.mul_add_distr_r. rewrite Z.mul_1_l;rewrite <- Z.add_assoc;rewrite H4;ring. repeat rewrite <- Z.add_assoc;rewrite H;apply mult_add_ineq2;zarith. Qed. Lemma spec_double_mul_c : forall cross:w->w->w->w->zn2z w -> zn2z w -> w*zn2z w, (forall xh xl yh yl hh ll, [[hh]] = [|xh|]*[|yh|] -> [[ll]] = [|xl|]*[|yl|] -> let (wc,cc) := cross xh xl yh yl hh ll in [|wc|]*wwB + [[cc]] = [|xh|]*[|yl|] + [|xl|]*[|yh|]) -> forall x y, [||double_mul_c cross x y||] = [[x]] * [[y]]. Proof. intros cross Hcross x y;destruct x as [ |xh xl];simpl;trivial. destruct y as [ |yh yl];simpl. rewrite Z.mul_0_r;trivial. assert (H1:= spec_w_mul_c xh yh);assert (H2:= spec_w_mul_c xl yl). generalize (Hcross _ _ _ _ _ _ H1 H2). destruct (cross xh xl yh yl (w_mul_c xh yh) (w_mul_c xl yl)) as (wc,cc). intros;apply spec_mul_aux;trivial. rewrite <- wwB_wBwB;trivial. Qed. Lemma spec_ww_mul_c : forall x y, [||ww_mul_c x y||] = [[x]] * [[y]]. Proof. intros x y;unfold ww_mul_c;apply spec_double_mul_c. intros xh xl yh yl hh ll H1 H2. generalize (spec_ww_add_c (w_mul_c xh yl) (w_mul_c xl yh)); destruct (ww_add_c (w_mul_c xh yl) (w_mul_c xl yh)) as [c|c]; unfold interp_carry;repeat rewrite spec_w_mul_c;intros H; (rewrite spec_w_0 || rewrite spec_w_1);rewrite H;ring. Qed. Lemma spec_w_2: [|w_2|] = 2. unfold w_2; rewrite spec_w_add; rewrite spec_w_1; simpl. apply Zmod_small; split; auto with zarith. rewrite <- (Z.pow_1_r 2); unfold base; apply Zpower_lt_monotone; auto with zarith. Qed. Lemma kara_prod_aux : forall xh xl yh yl, xh*yh + xl*yl - (xh-xl)*(yh-yl) = xh*yl + xl*yh. Proof. intros;ring. Qed. Lemma spec_kara_prod : forall xh xl yh yl hh ll, [[hh]] = [|xh|]*[|yh|] -> [[ll]] = [|xl|]*[|yl|] -> let (wc,cc) := kara_prod xh xl yh yl hh ll in [|wc|]*wwB + [[cc]] = [|xh|]*[|yl|] + [|xl|]*[|yh|]. Proof. intros xh xl yh yl hh ll H H0; rewrite <- kara_prod_aux; rewrite <- H; rewrite <- H0; unfold kara_prod. assert (Hxh := (spec_to_Z xh)); assert (Hxl := (spec_to_Z xl)); assert (Hyh := (spec_to_Z yh)); assert (Hyl := (spec_to_Z yl)). generalize (spec_ww_add_c hh ll); case (ww_add_c hh ll); intros z Hz; rewrite <- Hz; unfold interp_carry; assert (Hz1 := (spec_ww_to_Z z)). rewrite spec_w_compare; case Z.compare_spec; intros Hxlh; try rewrite Hxlh; try rewrite spec_w_0; try (ring; fail). rewrite spec_w_compare; case Z.compare_spec; intros Hylh. rewrite Hylh; rewrite spec_w_0; try (ring; fail). rewrite spec_w_0; try (ring; fail). repeat (rewrite spec_ww_sub || rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). split; auto with zarith. simpl in Hz; rewrite Hz; rewrite H; rewrite H0. rewrite kara_prod_aux; apply Z.add_nonneg_nonneg; apply Z.mul_nonneg_nonneg; auto with zarith. apply Z.le_lt_trans with ([[z]]-0); auto with zarith. unfold Z.sub; apply Z.add_le_mono_l; apply Z.le_0_sub; simpl; rewrite Z.opp_involutive. apply Z.mul_nonneg_nonneg; auto with zarith. match goal with |- context[ww_add_c ?x ?y] => generalize (spec_ww_add_c x y); case (ww_add_c x y); try rewrite spec_w_0; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_1; unfold interp_carry in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_compare; case Z.compare_spec; intros Hylh. rewrite Hylh; rewrite spec_w_0; try (ring; fail). match goal with |- context[ww_add_c ?x ?y] => generalize (spec_ww_add_c x y); case (ww_add_c x y); try rewrite spec_w_0; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_1; unfold interp_carry in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_0; try (ring; fail). repeat (rewrite spec_ww_sub || rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). split. match goal with |- context[(?x - ?y) * (?z - ?t)] => replace ((x - y) * (z - t)) with ((y - x) * (t - z)); [idtac | ring] end. simpl in Hz; rewrite Hz; rewrite H; rewrite H0. rewrite kara_prod_aux; apply Z.add_nonneg_nonneg; apply Z.mul_nonneg_nonneg; auto with zarith. apply Z.le_lt_trans with ([[z]]-0); auto with zarith. unfold Z.sub; apply Z.add_le_mono_l; apply Z.le_0_sub; simpl; rewrite Z.opp_involutive. apply Z.mul_nonneg_nonneg; auto with zarith. (** there is a carry in hh + ll **) rewrite Z.mul_1_l. rewrite spec_w_compare; case Z.compare_spec; intros Hxlh; try rewrite Hxlh; try rewrite spec_w_1; try (ring; fail). rewrite spec_w_compare; case Z.compare_spec; intros Hylh; try rewrite Hylh; try rewrite spec_w_1; try (ring; fail). match goal with |- context[ww_sub_c ?x ?y] => generalize (spec_ww_sub_c x y); case (ww_sub_c x y); try rewrite spec_w_1; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_0; rewrite Z.mul_0_l; rewrite Z.add_0_l. generalize Hz2; clear Hz2; unfold interp_carry. repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). match goal with |- context[ww_add_c ?x ?y] => generalize (spec_ww_add_c x y); case (ww_add_c x y); try rewrite spec_w_1; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_2; unfold interp_carry in Hz2. transitivity (wwB + (1 * wwB + [[z1]])). ring. rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_compare; case Z.compare_spec; intros Hylh; try rewrite Hylh; try rewrite spec_w_1; try (ring; fail). match goal with |- context[ww_add_c ?x ?y] => generalize (spec_ww_add_c x y); case (ww_add_c x y); try rewrite spec_w_1; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_2; unfold interp_carry in Hz2. transitivity (wwB + (1 * wwB + [[z1]])). ring. rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). match goal with |- context[ww_sub_c ?x ?y] => generalize (spec_ww_sub_c x y); case (ww_sub_c x y); try rewrite spec_w_1; intros z1 Hz2 end. simpl in Hz2; rewrite Hz2; repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). rewrite spec_w_0; rewrite Z.mul_0_l; rewrite Z.add_0_l. match goal with |- context[(?x - ?y) * (?z - ?t)] => replace ((x - y) * (z - t)) with ((y - x) * (t - z)); [idtac | ring] end. generalize Hz2; clear Hz2; unfold interp_carry. repeat (rewrite spec_w_sub || rewrite spec_w_mul_c). repeat rewrite Zmod_small; auto with zarith; try (ring; fail). Qed. Lemma sub_carry : forall xh xl yh yl z, 0 <= z -> [|xh|]*[|yl|] + [|xl|]*[|yh|] = wwB + z -> z < wwB. Proof. intros xh xl yh yl z Hle Heq. destruct (Z_le_gt_dec wwB z);auto with zarith. generalize (Zmult_lt_b _ _ _ (spec_to_Z xh) (spec_to_Z yl)). generalize (Zmult_lt_b _ _ _ (spec_to_Z xl) (spec_to_Z yh)). rewrite <- wwB_wBwB;intros H1 H2. assert (H3 := wB_pos w_digits). assert (2*wB <= wwB). rewrite wwB_wBwB; rewrite Z.pow_2_r; apply Z.mul_le_mono_nonneg;zarith. omega. Qed. Ltac Spec_ww_to_Z x := let H:= fresh "H" in assert (H:= spec_ww_to_Z x). Ltac Zmult_lt_b x y := let H := fresh "H" in assert (H := Zmult_lt_b _ _ _ (spec_to_Z x) (spec_to_Z y)). Lemma spec_ww_karatsuba_c : forall x y, [||ww_karatsuba_c x y||]=[[x]]*[[y]]. Proof. intros x y; unfold ww_karatsuba_c;apply spec_double_mul_c. intros; apply spec_kara_prod; auto. Qed. Lemma spec_ww_mul : forall x y, [[ww_mul x y]] = [[x]]*[[y]] mod wwB. Proof. assert (U:= lt_0_wB w_digits). assert (U1:= lt_0_wwB w_digits). intros x y; case x; auto; intros xh xl. case y; auto. simpl; rewrite Z.mul_0_r; rewrite Zmod_small; auto with zarith. intros yh yl;simpl. repeat (rewrite spec_ww_add || rewrite spec_w_W0 || rewrite spec_w_mul_c || rewrite spec_w_add || rewrite spec_w_mul). rewrite <- Zplus_mod; auto with zarith. repeat (rewrite Z.mul_add_distr_r || rewrite Z.mul_add_distr_l). rewrite <- Zmult_mod_distr_r; auto with zarith. rewrite <- Z.pow_2_r; rewrite <- wwB_wBwB; auto with zarith. rewrite Zplus_mod; auto with zarith. rewrite Zmod_mod; auto with zarith. rewrite <- Zplus_mod; auto with zarith. match goal with |- ?X mod _ = _ => rewrite <- Z_mod_plus with (a := X) (b := [|xh|] * [|yh|]) end; auto with zarith. f_equal; auto; rewrite wwB_wBwB; ring. Qed. Lemma spec_ww_square_c : forall x, [||ww_square_c x||] = [[x]]*[[x]]. Proof. destruct x as [ |xh xl];simpl;trivial. case_eq match ww_add_c (w_mul_c xh xl) (w_mul_c xh xl) with | C0 cc => (w_0, cc) | C1 cc => (w_1, cc) end;intros wc cc Heq. apply (spec_mul_aux xh xl xh xl wc cc);trivial. generalize Heq (spec_ww_add_c (w_mul_c xh xl) (w_mul_c xh xl));clear Heq. rewrite spec_w_mul_c;destruct (ww_add_c (w_mul_c xh xl) (w_mul_c xh xl)); unfold interp_carry;try rewrite Z.mul_1_l;intros Heq Heq';inversion Heq; rewrite (Z.mul_comm [|xl|]);subst. rewrite spec_w_0;rewrite Z.mul_0_l;rewrite Z.add_0_l;trivial. rewrite spec_w_1;rewrite Z.mul_1_l;rewrite <- wwB_wBwB;trivial. Qed. Section DoubleMulAddn1Proof. Variable w_mul_add : w -> w -> w -> w * w. Variable spec_w_mul_add : forall x y r, let (h,l):= w_mul_add x y r in [|h|]*wB+[|l|] = [|x|]*[|y|] + [|r|]. Lemma spec_double_mul_add_n1 : forall n x y r, let (h,l) := double_mul_add_n1 w_mul_add n x y r in [|h|]*double_wB w_digits n + [!n|l!] = [!n|x!]*[|y|]+[|r|]. Proof. induction n;intros x y r;trivial. exact (spec_w_mul_add x y r). unfold double_mul_add_n1;destruct x as[ |xh xl]; fold(double_mul_add_n1 w_mul_add). rewrite spec_w_0;rewrite spec_extend;simpl;trivial. assert(H:=IHn xl y r);destruct (double_mul_add_n1 w_mul_add n xl y r)as(rl,l). assert(U:=IHn xh y rl);destruct(double_mul_add_n1 w_mul_add n xh y rl)as(rh,h). rewrite <- double_wB_wwB. rewrite spec_double_WW;simpl;trivial. rewrite Z.mul_add_distr_r;rewrite <- Z.add_assoc;rewrite <- H. rewrite Z.mul_assoc;rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite U;ring. Qed. End DoubleMulAddn1Proof. Lemma spec_w_mul_add : forall x y r, let (h,l):= w_mul_add x y r in [|h|]*wB+[|l|] = [|x|]*[|y|] + [|r|]. Proof. intros x y r;unfold w_mul_add;assert (H:=spec_w_mul_c x y); destruct (w_mul_c x y) as [ |h l];simpl;rewrite <- H. rewrite spec_w_0;trivial. assert (U:=spec_w_add_c l r);destruct (w_add_c l r) as [lr|lr];unfold interp_carry in U;try rewrite Z.mul_1_l in H;simpl. rewrite U;ring. rewrite spec_w_succ. rewrite Zmod_small. rewrite <- Z.add_assoc;rewrite <- U;ring. simpl in H;assert (H1:= Zmult_lt_b _ _ _ (spec_to_Z x) (spec_to_Z y)). rewrite <- H in H1. assert (H2:=spec_to_Z h);split;zarith. case H1;clear H1;intro H1;clear H1. replace (wB ^ 2 - 2 * wB) with ((wB - 2)*wB). 2:ring. intros H0;assert (U1:= wB_pos w_digits). assert (H1 := beta_lex _ _ _ _ _ H0 (spec_to_Z l));zarith. Qed. (* End DoubleProof. *) End DoubleMul. coq-8.4pl2/theories/Numbers/Cyclic/DoubleCyclic/DoubleLift.v0000640000175000001440000004603012010532755023056 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* w -> zn2z w. Variable w_W0 : w -> zn2z w. Variable w_0W : w -> zn2z w. Variable w_compare : w -> w -> comparison. Variable ww_compare : zn2z w -> zn2z w -> comparison. Variable w_head0 : w -> w. Variable w_tail0 : w -> w. Variable w_add: w -> w -> zn2z w. Variable w_add_mul_div : w -> w -> w -> w. Variable ww_sub: zn2z w -> zn2z w -> zn2z w. Variable w_digits : positive. Variable ww_Digits : positive. Variable w_zdigits : w. Variable ww_zdigits : zn2z w. Variable low: zn2z w -> w. Definition ww_head0 x := match x with | W0 => ww_zdigits | WW xh xl => match w_compare w_0 xh with | Eq => w_add w_zdigits (w_head0 xl) | _ => w_0W (w_head0 xh) end end. Definition ww_tail0 x := match x with | W0 => ww_zdigits | WW xh xl => match w_compare w_0 xl with | Eq => w_add w_zdigits (w_tail0 xh) | _ => w_0W (w_tail0 xl) end end. (* 0 < p < ww_digits *) Definition ww_add_mul_div p x y := let zdigits := w_0W w_zdigits in match x, y with | W0, W0 => W0 | W0, WW yh yl => match ww_compare p zdigits with | Eq => w_0W yh | Lt => w_0W (w_add_mul_div (low p) w_0 yh) | Gt => let n := low (ww_sub p zdigits) in w_WW (w_add_mul_div n w_0 yh) (w_add_mul_div n yh yl) end | WW xh xl, W0 => match ww_compare p zdigits with | Eq => w_W0 xl | Lt => w_WW (w_add_mul_div (low p) xh xl) (w_add_mul_div (low p) xl w_0) | Gt => let n := low (ww_sub p zdigits) in w_W0 (w_add_mul_div n xl w_0) end | WW xh xl, WW yh yl => match ww_compare p zdigits with | Eq => w_WW xl yh | Lt => w_WW (w_add_mul_div (low p) xh xl) (w_add_mul_div (low p) xl yh) | Gt => let n := low (ww_sub p zdigits) in w_WW (w_add_mul_div n xl yh) (w_add_mul_div n yh yl) end end. Section DoubleProof. Variable w_to_Z : w -> Z. Notation wB := (base w_digits). Notation wwB := (base (ww_digits w_digits)). Notation "[| x |]" := (w_to_Z x) (at level 0, x at level 99). Notation "[[ x ]]" := (ww_to_Z w_digits w_to_Z x)(at level 0, x at level 99). Variable spec_w_0 : [|w_0|] = 0. Variable spec_to_Z : forall x, 0 <= [|x|] < wB. Variable spec_to_w_Z : forall x, 0 <= [[x]] < wwB. Variable spec_w_WW : forall h l, [[w_WW h l]] = [|h|] * wB + [|l|]. Variable spec_w_W0 : forall h, [[w_W0 h]] = [|h|] * wB. Variable spec_w_0W : forall l, [[w_0W l]] = [|l|]. Variable spec_compare : forall x y, w_compare x y = Z.compare [|x|] [|y|]. Variable spec_ww_compare : forall x y, ww_compare x y = Z.compare [[x]] [[y]]. Variable spec_ww_digits : ww_Digits = xO w_digits. Variable spec_w_head00 : forall x, [|x|] = 0 -> [|w_head0 x|] = Zpos w_digits. Variable spec_w_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|w_head0 x|]) * [|x|] < wB. Variable spec_w_tail00 : forall x, [|x|] = 0 -> [|w_tail0 x|] = Zpos w_digits. Variable spec_w_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2* y + 1) * (2 ^ [|w_tail0 x|]). Variable spec_w_add_mul_div : forall x y p, [|p|] <= Zpos w_digits -> [| w_add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos w_digits) - [|p|]))) mod wB. Variable spec_w_add: forall x y, [[w_add x y]] = [|x|] + [|y|]. Variable spec_ww_sub: forall x y, [[ww_sub x y]] = ([[x]] - [[y]]) mod wwB. Variable spec_zdigits : [| w_zdigits |] = Zpos w_digits. Variable spec_low: forall x, [| low x|] = [[x]] mod wB. Variable spec_ww_zdigits : [[ww_zdigits]] = Zpos ww_Digits. Hint Resolve div_le_0 div_lt w_to_Z_wwB: lift. Ltac zarith := auto with zarith lift. Lemma spec_ww_head00 : forall x, [[x]] = 0 -> [[ww_head0 x]] = Zpos ww_Digits. Proof. intros x; case x; unfold ww_head0. intros HH; rewrite spec_ww_zdigits; auto. intros xh xl; simpl; intros Hx. case (spec_to_Z xh); intros Hx1 Hx2. case (spec_to_Z xl); intros Hy1 Hy2. assert (F1: [|xh|] = 0). { Z.le_elim Hy1; auto. - absurd (0 < [|xh|] * wB + [|xl|]); auto with zarith. apply Z.lt_le_trans with (1 := Hy1); auto with zarith. pattern [|xl|] at 1; rewrite <- (Z.add_0_l [|xl|]). apply Z.add_le_mono_r; auto with zarith. - Z.le_elim Hx1; auto. absurd (0 < [|xh|] * wB + [|xl|]); auto with zarith. rewrite <- Hy1; rewrite Z.add_0_r; auto with zarith. apply Z.mul_pos_pos; auto with zarith. } rewrite spec_compare. case Z.compare_spec. intros H; simpl. rewrite spec_w_add; rewrite spec_w_head00. rewrite spec_zdigits; rewrite spec_ww_digits. rewrite Pos2Z.inj_xO; auto with zarith. rewrite F1 in Hx; auto with zarith. rewrite spec_w_0; auto with zarith. rewrite spec_w_0; auto with zarith. Qed. Lemma spec_ww_head0 : forall x, 0 < [[x]] -> wwB/ 2 <= 2 ^ [[ww_head0 x]] * [[x]] < wwB. Proof. clear spec_ww_zdigits. rewrite wwB_div_2;rewrite Z.mul_comm;rewrite wwB_wBwB. assert (U:= lt_0_wB w_digits); destruct x as [ |xh xl];simpl ww_to_Z;intros H. unfold Z.lt in H;discriminate H. rewrite spec_compare, spec_w_0. case Z.compare_spec; intros H0. rewrite <- H0 in *. simpl Z.add. simpl in H. case (spec_to_Z w_zdigits); case (spec_to_Z (w_head0 xl)); intros HH1 HH2 HH3 HH4. rewrite spec_w_add. rewrite spec_zdigits; rewrite Zpower_exp; auto with zarith. case (spec_w_head0 H); intros H1 H2. rewrite Z.pow_2_r; fold wB; rewrite <- Z.mul_assoc; split. apply Z.mul_le_mono_nonneg_l; auto with zarith. apply Z.mul_lt_mono_pos_l; auto with zarith. assert (H1 := spec_w_head0 H0). rewrite spec_w_0W. split. rewrite Z.mul_add_distr_l;rewrite Z.mul_assoc. apply Z.le_trans with (2 ^ [|w_head0 xh|] * [|xh|] * wB). rewrite Z.mul_comm; zarith. assert (0 <= 2 ^ [|w_head0 xh|] * [|xl|]);zarith. assert (H2:=spec_to_Z xl);apply Z.mul_nonneg_nonneg;zarith. case (spec_to_Z (w_head0 xh)); intros H2 _. generalize ([|w_head0 xh|]) H1 H2;clear H1 H2; intros p H1 H2. assert (Eq1 : 2^p < wB). rewrite <- (Z.mul_1_r (2^p));apply Z.le_lt_trans with (2^p*[|xh|]);zarith. assert (Eq2: p < Zpos w_digits). destruct (Z.le_gt_cases (Zpos w_digits) p);trivial;contradict Eq1. apply Z.le_ngt;unfold base;apply Zpower_le_monotone;zarith. assert (Zpos w_digits = p + (Zpos w_digits - p)). ring. rewrite Z.pow_2_r. unfold base at 2;rewrite H3;rewrite Zpower_exp;zarith. rewrite <- Z.mul_assoc; apply Z.mul_lt_mono_pos_l; zarith. rewrite <- (Z.add_0_r (2^(Zpos w_digits - p)*wB));apply beta_lex_inv;zarith. apply Z.mul_lt_mono_pos_r with (2 ^ p); zarith. rewrite <- Zpower_exp;zarith. rewrite Z.mul_comm;ring_simplify (Zpos w_digits - p + p);fold wB;zarith. assert (H1 := spec_to_Z xh);zarith. Qed. Lemma spec_ww_tail00 : forall x, [[x]] = 0 -> [[ww_tail0 x]] = Zpos ww_Digits. Proof. intros x; case x; unfold ww_tail0. intros HH; rewrite spec_ww_zdigits; auto. intros xh xl; simpl; intros Hx. case (spec_to_Z xh); intros Hx1 Hx2. case (spec_to_Z xl); intros Hy1 Hy2. assert (F1: [|xh|] = 0). { Z.le_elim Hy1; auto. - absurd (0 < [|xh|] * wB + [|xl|]); auto with zarith. apply Z.lt_le_trans with (1 := Hy1); auto with zarith. pattern [|xl|] at 1; rewrite <- (Z.add_0_l [|xl|]). apply Z.add_le_mono_r; auto with zarith. - Z.le_elim Hx1; auto. absurd (0 < [|xh|] * wB + [|xl|]); auto with zarith. rewrite <- Hy1; rewrite Z.add_0_r; auto with zarith. apply Z.mul_pos_pos; auto with zarith. } assert (F2: [|xl|] = 0). rewrite F1 in Hx; auto with zarith. rewrite spec_compare; case Z.compare_spec. intros H; simpl. rewrite spec_w_add; rewrite spec_w_tail00; auto. rewrite spec_zdigits; rewrite spec_ww_digits. rewrite Pos2Z.inj_xO; auto with zarith. rewrite spec_w_0; auto with zarith. rewrite spec_w_0; auto with zarith. Qed. Lemma spec_ww_tail0 : forall x, 0 < [[x]] -> exists y, 0 <= y /\ [[x]] = (2 * y + 1) * 2 ^ [[ww_tail0 x]]. Proof. clear spec_ww_zdigits. destruct x as [ |xh xl];simpl ww_to_Z;intros H. unfold Z.lt in H;discriminate H. rewrite spec_compare, spec_w_0. case Z.compare_spec; intros H0. rewrite <- H0; rewrite Z.add_0_r. case (spec_to_Z (w_tail0 xh)); intros HH1 HH2. generalize H; rewrite <- H0; rewrite Z.add_0_r; clear H; intros H. case (@spec_w_tail0 xh). apply Z.mul_lt_mono_pos_r with wB; auto with zarith. unfold base; auto with zarith. intros z (Hz1, Hz2); exists z; split; auto. rewrite spec_w_add; rewrite (fun x => Z.add_comm [|x|]). rewrite spec_zdigits; rewrite Zpower_exp; auto with zarith. rewrite Z.mul_assoc; rewrite <- Hz2; auto. case (spec_to_Z (w_tail0 xh)); intros HH1 HH2. case (spec_w_tail0 H0); intros z (Hz1, Hz2). assert (Hp: [|w_tail0 xl|] < Zpos w_digits). case (Z.le_gt_cases (Zpos w_digits) [|w_tail0 xl|]); auto; intros H1. absurd (2 ^ (Zpos w_digits) <= 2 ^ [|w_tail0 xl|]). apply Z.lt_nge. case (spec_to_Z xl); intros HH3 HH4. apply Z.le_lt_trans with (2 := HH4). apply Z.le_trans with (1 * 2 ^ [|w_tail0 xl|]); auto with zarith. rewrite Hz2. apply Z.mul_le_mono_nonneg_r; auto with zarith. apply Zpower_le_monotone; auto with zarith. exists ([|xh|] * (2 ^ ((Zpos w_digits - [|w_tail0 xl|]) - 1)) + z); split. apply Z.add_nonneg_nonneg; auto. apply Z.mul_nonneg_nonneg; auto with zarith. case (spec_to_Z xh); auto. rewrite spec_w_0W. rewrite (Z.mul_add_distr_l 2); rewrite <- Z.add_assoc. rewrite Z.mul_add_distr_r; rewrite <- Hz2. apply f_equal2 with (f := Z.add); auto. rewrite (Z.mul_comm 2). repeat rewrite <- Z.mul_assoc. apply f_equal2 with (f := Z.mul); auto. case (spec_to_Z (w_tail0 xl)); intros HH3 HH4. pattern 2 at 2; rewrite <- Z.pow_1_r. lazy beta; repeat rewrite <- Zpower_exp; auto with zarith. unfold base; apply f_equal with (f := Z.pow 2); auto with zarith. contradict H0; case (spec_to_Z xl); auto with zarith. Qed. Hint Rewrite Zdiv_0_l Z.mul_0_l Z.add_0_l Z.mul_0_r Z.add_0_r spec_w_W0 spec_w_0W spec_w_WW spec_w_0 (wB_div w_digits w_to_Z spec_to_Z) (wB_div_plus w_digits w_to_Z spec_to_Z) : w_rewrite. Ltac w_rewrite := autorewrite with w_rewrite;trivial. Lemma spec_ww_add_mul_div_aux : forall xh xl yh yl p, let zdigits := w_0W w_zdigits in [[p]] <= Zpos (xO w_digits) -> [[match ww_compare p zdigits with | Eq => w_WW xl yh | Lt => w_WW (w_add_mul_div (low p) xh xl) (w_add_mul_div (low p) xl yh) | Gt => let n := low (ww_sub p zdigits) in w_WW (w_add_mul_div n xl yh) (w_add_mul_div n yh yl) end]] = ([[WW xh xl]] * (2^[[p]]) + [[WW yh yl]] / (2^(Zpos (xO w_digits) - [[p]]))) mod wwB. Proof. clear spec_ww_zdigits. intros xh xl yh yl p zdigits;assert (HwwB := wwB_pos w_digits). case (spec_to_w_Z p); intros Hv1 Hv2. replace (Zpos (xO w_digits)) with (Zpos w_digits + Zpos w_digits). 2 : rewrite Pos2Z.inj_xO;ring. replace (Zpos w_digits + Zpos w_digits - [[p]]) with (Zpos w_digits + (Zpos w_digits - [[p]])). 2:ring. intros Hp; assert (Hxh := spec_to_Z xh);assert (Hxl:=spec_to_Z xl); assert (Hx := spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW xh xl)); simpl in Hx;assert (Hyh := spec_to_Z yh);assert (Hyl:=spec_to_Z yl); assert (Hy:=spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW yh yl));simpl in Hy. rewrite spec_ww_compare; case Z.compare_spec; intros H1. rewrite H1; unfold zdigits; rewrite spec_w_0W. rewrite spec_zdigits; rewrite Z.sub_diag; rewrite Z.add_0_r. simpl ww_to_Z; w_rewrite;zarith. fold wB. rewrite Z.mul_add_distr_r;rewrite <- Z.mul_assoc;rewrite <- Z.add_assoc. rewrite <- Z.pow_2_r. rewrite <- wwB_wBwB;apply Zmod_unique with [|xh|]. exact (spec_ww_to_Z w_digits w_to_Z spec_to_Z (WW xl yh)). ring. simpl ww_to_Z; w_rewrite;zarith. assert (HH0: [|low p|] = [[p]]). rewrite spec_low. apply Zmod_small. case (spec_to_w_Z p); intros HH1 HH2; split; auto. generalize H1; unfold zdigits; rewrite spec_w_0W; rewrite spec_zdigits; intros tmp. apply Z.lt_le_trans with (1 := tmp). unfold base. apply Zpower2_le_lin; auto with zarith. 2: generalize H1; unfold zdigits; rewrite spec_w_0W; rewrite spec_zdigits; auto with zarith. generalize H1; unfold zdigits; rewrite spec_w_0W; rewrite spec_zdigits; auto; clear H1; intros H1. assert (HH: [|low p|] <= Zpos w_digits). rewrite HH0; auto with zarith. repeat rewrite spec_w_add_mul_div with (1 := HH). rewrite HH0. rewrite Z.mul_add_distr_r. pattern ([|xl|] * 2 ^ [[p]]) at 2; rewrite shift_unshift_mod with (n:= Zpos w_digits);fold wB;zarith. replace ([|xh|] * wB * 2^[[p]]) with ([|xh|] * 2^[[p]] * wB). 2:ring. rewrite Z.add_assoc;rewrite <- Z.mul_add_distr_r. rewrite <- Z.add_assoc. unfold base at 5;rewrite <- Zmod_shift_r;zarith. unfold base;rewrite Zmod_shift_r with (b:= Zpos (ww_digits w_digits)); fold wB;fold wwB;zarith. rewrite wwB_wBwB;rewrite Z.pow_2_r; rewrite Zmult_mod_distr_r;zarith. unfold ww_digits;rewrite Pos2Z.inj_xO;zarith. apply Z_mod_lt;zarith. split;zarith. apply Zdiv_lt_upper_bound;zarith. rewrite <- Zpower_exp;zarith. ring_simplify ([[p]] + (Zpos w_digits - [[p]]));fold wB;zarith. assert (Hv: [[p]] > Zpos w_digits). generalize H1; clear H1. unfold zdigits; rewrite spec_w_0W; rewrite spec_zdigits; auto with zarith. clear H1. assert (HH0: [|low (ww_sub p zdigits)|] = [[p]] - Zpos w_digits). rewrite spec_low. rewrite spec_ww_sub. unfold zdigits; rewrite spec_w_0W; rewrite spec_zdigits. rewrite <- Zmod_div_mod; auto with zarith. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.le_lt_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_lt_lin; auto with zarith. exists wB; unfold base. unfold ww_digits; rewrite (Pos2Z.inj_xO w_digits). rewrite <- Zpower_exp; auto with zarith. apply f_equal with (f := fun x => 2 ^ x); auto with zarith. assert (HH: [|low (ww_sub p zdigits)|] <= Zpos w_digits). rewrite HH0; auto with zarith. replace (Zpos w_digits + (Zpos w_digits - [[p]])) with (Zpos w_digits - ([[p]] - Zpos w_digits)); zarith. lazy zeta; simpl ww_to_Z; w_rewrite;zarith. repeat rewrite spec_w_add_mul_div;zarith. rewrite HH0. pattern wB at 5;replace wB with (2^(([[p]] - Zpos w_digits) + (Zpos w_digits - ([[p]] - Zpos w_digits)))). rewrite Zpower_exp;zarith. rewrite Z.mul_assoc. rewrite Z_div_plus_l;zarith. rewrite shift_unshift_mod with (a:= [|yh|]) (p:= [[p]] - Zpos w_digits) (n := Zpos w_digits);zarith. fold wB. set (u := [[p]] - Zpos w_digits). replace [[p]] with (u + Zpos w_digits);zarith. rewrite Zpower_exp;zarith. rewrite Z.mul_assoc. fold wB. repeat rewrite Z.add_assoc. rewrite <- Z.mul_add_distr_r. repeat rewrite <- Z.add_assoc. unfold base;rewrite Zmod_shift_r with (b:= Zpos (ww_digits w_digits)); fold wB;fold wwB;zarith. unfold base;rewrite Zmod_shift_r with (a:= Zpos w_digits) (b:= Zpos w_digits);fold wB;fold wwB;zarith. rewrite wwB_wBwB; rewrite Z.pow_2_r; rewrite Zmult_mod_distr_r;zarith. rewrite Z.mul_add_distr_r. replace ([|xh|] * wB * 2 ^ u) with ([|xh|]*2^u*wB). 2:ring. repeat rewrite <- Z.add_assoc. rewrite (Z.add_comm ([|xh|] * 2 ^ u * wB)). rewrite Z_mod_plus;zarith. rewrite Z_mod_mult;zarith. unfold base;rewrite <- Zmod_shift_r;zarith. fold base;apply Z_mod_lt;zarith. unfold u; split;zarith. split;zarith. unfold u; apply Zdiv_lt_upper_bound;zarith. rewrite <- Zpower_exp;zarith. fold u. ring_simplify (u + (Zpos w_digits - u)); fold wB;zarith. unfold ww_digits;rewrite Pos2Z.inj_xO;zarith. unfold base;rewrite <- Zmod_shift_r;zarith. fold base;apply Z_mod_lt;zarith. unfold u; split;zarith. unfold u; split;zarith. apply Zdiv_lt_upper_bound;zarith. rewrite <- Zpower_exp;zarith. fold u. ring_simplify (u + (Zpos w_digits - u)); fold wB; auto with zarith. unfold u;zarith. unfold u;zarith. set (u := [[p]] - Zpos w_digits). ring_simplify (u + (Zpos w_digits - u)); fold wB; auto with zarith. Qed. Lemma spec_ww_add_mul_div : forall x y p, [[p]] <= Zpos (xO w_digits) -> [[ ww_add_mul_div p x y ]] = ([[x]] * (2^[[p]]) + [[y]] / (2^(Zpos (xO w_digits) - [[p]]))) mod wwB. Proof. clear spec_ww_zdigits. intros x y p H. destruct x as [ |xh xl]; [assert (H1 := @spec_ww_add_mul_div_aux w_0 w_0) |assert (H1 := @spec_ww_add_mul_div_aux xh xl)]; (destruct y as [ |yh yl]; [generalize (H1 w_0 w_0 p H) | generalize (H1 yh yl p H)]; clear H1;w_rewrite);simpl ww_add_mul_div. replace [[WW w_0 w_0]] with 0;[w_rewrite|simpl;w_rewrite;trivial]. intros Heq;rewrite <- Heq;clear Heq; auto. rewrite spec_ww_compare. case Z.compare_spec; intros H1; w_rewrite. rewrite (spec_w_add_mul_div w_0 w_0);w_rewrite;zarith. generalize H1; w_rewrite; rewrite spec_zdigits; clear H1; intros H1. assert (HH0: [|low p|] = [[p]]). rewrite spec_low. apply Zmod_small. case (spec_to_w_Z p); intros HH1 HH2; split; auto. apply Z.lt_le_trans with (1 := H1). unfold base; apply Zpower2_le_lin; auto with zarith. rewrite HH0; auto with zarith. replace [[WW w_0 w_0]] with 0;[w_rewrite|simpl;w_rewrite;trivial]. intros Heq;rewrite <- Heq;clear Heq. generalize (spec_ww_compare p (w_0W w_zdigits)); case ww_compare; intros H1; w_rewrite. rewrite (spec_w_add_mul_div w_0 w_0);w_rewrite;zarith. rewrite Pos2Z.inj_xO in H;zarith. assert (HH: [|low (ww_sub p (w_0W w_zdigits)) |] = [[p]] - Zpos w_digits). symmetry in H1; change ([[p]] > [[w_0W w_zdigits]]) in H1. revert H1. rewrite spec_low. rewrite spec_ww_sub; w_rewrite; intros H1. rewrite <- Zmod_div_mod; auto with zarith. rewrite Zmod_small; auto with zarith. split; auto with zarith. apply Z.le_lt_trans with (Zpos w_digits); auto with zarith. unfold base; apply Zpower2_lt_lin; auto with zarith. unfold base; auto with zarith. unfold base; auto with zarith. exists wB; unfold base. unfold ww_digits; rewrite (Pos2Z.inj_xO w_digits). rewrite <- Zpower_exp; auto with zarith. apply f_equal with (f := fun x => 2 ^ x); auto with zarith. case (spec_to_Z xh); auto with zarith. Qed. End DoubleProof. End DoubleLift. coq-8.4pl2/theories/Numbers/Cyclic/Abstract/0000750000175000001440000000000012127276547020051 5ustar notinuserscoq-8.4pl2/theories/Numbers/Cyclic/Abstract/NZCyclic.v0000640000175000001440000001262312010532755021706 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq) succ. Program Instance pred_wd : Proper (eq ==> eq) pred. Program Instance add_wd : Proper (eq ==> eq ==> eq) add. Program Instance sub_wd : Proper (eq ==> eq ==> eq) sub. Program Instance mul_wd : Proper (eq ==> eq ==> eq) mul. Theorem gt_wB_1 : 1 < wB. Proof. unfold base. apply Zpower_gt_1; unfold Z.lt; auto with zarith. Qed. Theorem gt_wB_0 : 0 < wB. Proof. pose proof gt_wB_1; auto with zarith. Qed. Lemma one_mod_wB : 1 mod wB = 1. Proof. rewrite Zmod_small. reflexivity. split. auto with zarith. apply gt_wB_1. Qed. Lemma succ_mod_wB : forall n : Z, (n + 1) mod wB = ((n mod wB) + 1) mod wB. Proof. intro n. rewrite <- one_mod_wB at 2. now rewrite <- Zplus_mod. Qed. Lemma pred_mod_wB : forall n : Z, (n - 1) mod wB = ((n mod wB) - 1) mod wB. Proof. intro n. rewrite <- one_mod_wB at 2. now rewrite Zminus_mod. Qed. Lemma NZ_to_Z_mod : forall n, [| n |] mod wB = [| n |]. Proof. intro n; rewrite Zmod_small. reflexivity. apply ZnZ.spec_to_Z. Qed. Theorem pred_succ : forall n, P (S n) == n. Proof. intro n. zify. rewrite <- pred_mod_wB. replace ([| n |] + 1 - 1)%Z with [| n |] by ring. apply NZ_to_Z_mod. Qed. Theorem one_succ : one == succ zero. Proof. zify; simpl. now rewrite one_mod_wB. Qed. Theorem two_succ : two == succ one. Proof. reflexivity. Qed. Section Induction. Variable A : t -> Prop. Hypothesis A_wd : Proper (eq ==> iff) A. Hypothesis A0 : A 0. Hypothesis AS : forall n, A n <-> A (S n). (* Below, we use only -> direction *) Let B (n : Z) := A (ZnZ.of_Z n). Lemma B0 : B 0. Proof. unfold B. setoid_replace (ZnZ.of_Z 0) with zero. assumption. red; zify. apply ZnZ.of_Z_correct. auto using gt_wB_0 with zarith. Qed. Lemma BS : forall n : Z, 0 <= n -> n < wB - 1 -> B n -> B (n + 1). Proof. intros n H1 H2 H3. unfold B in *. apply AS in H3. setoid_replace (ZnZ.of_Z (n + 1)) with (S (ZnZ.of_Z n)). assumption. zify. rewrite 2 ZnZ.of_Z_correct; auto with zarith. symmetry; apply Zmod_small; auto with zarith. Qed. Lemma B_holds : forall n : Z, 0 <= n < wB -> B n. Proof. intros n [H1 H2]. apply Zbounded_induction with wB. apply B0. apply BS. assumption. assumption. Qed. Theorem bi_induction : forall n, A n. Proof. intro n. setoid_replace n with (ZnZ.of_Z (ZnZ.to_Z n)). apply B_holds. apply ZnZ.spec_to_Z. red. symmetry. apply ZnZ.of_Z_correct. apply ZnZ.spec_to_Z. Qed. End Induction. Theorem add_0_l : forall n, 0 + n == n. Proof. intro n. zify. rewrite Z.add_0_l. apply Zmod_small. apply ZnZ.spec_to_Z. Qed. Theorem add_succ_l : forall n m, (S n) + m == S (n + m). Proof. intros n m. zify. rewrite succ_mod_wB. repeat rewrite Zplus_mod_idemp_l; try apply gt_wB_0. rewrite <- (Z.add_assoc ([| n |] mod wB) 1 [| m |]). rewrite Zplus_mod_idemp_l. rewrite (Z.add_comm 1 [| m |]); now rewrite Z.add_assoc. Qed. Theorem sub_0_r : forall n, n - 0 == n. Proof. intro n. zify. rewrite Z.sub_0_r. apply NZ_to_Z_mod. Qed. Theorem sub_succ_r : forall n m, n - (S m) == P (n - m). Proof. intros n m. zify. rewrite Zminus_mod_idemp_r, Zminus_mod_idemp_l. now replace ([|n|] - ([|m|] + 1))%Z with ([|n|] - [|m|] - 1)%Z by ring. Qed. Theorem mul_0_l : forall n, 0 * n == 0. Proof. intro n. now zify. Qed. Theorem mul_succ_l : forall n m, (S n) * m == n * m + m. Proof. intros n m. zify. rewrite Zplus_mod_idemp_l, Zmult_mod_idemp_l. now rewrite Z.mul_add_distr_r, Z.mul_1_l. Qed. Definition t := t. End NZCyclicAxiomsMod. coq-8.4pl2/theories/Numbers/Cyclic/Abstract/CyclicAxioms.v0000640000175000001440000003051512010532755022617 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z; of_pos : positive -> N * t; (* Euclidean division by [2^digits] *) head0 : t -> t; (* number of digits 0 in front of the number *) tail0 : t -> t; (* number of digits 0 at the bottom of the number *) (* Basic numbers *) zero : t; one : t; minus_one : t; (* [2^digits-1], which is equivalent to [-1] *) (* Comparison *) compare : t -> t -> comparison; eq0 : t -> bool; (* Basic arithmetic operations *) opp_c : t -> carry t; opp : t -> t; opp_carry : t -> t; (* the carry is known to be -1 *) succ_c : t -> carry t; add_c : t -> t -> carry t; add_carry_c : t -> t -> carry t; succ : t -> t; add : t -> t -> t; add_carry : t -> t -> t; pred_c : t -> carry t; sub_c : t -> t -> carry t; sub_carry_c : t -> t -> carry t; pred : t -> t; sub : t -> t -> t; sub_carry : t -> t -> t; mul_c : t -> t -> zn2z t; mul : t -> t -> t; square_c : t -> zn2z t; (* Special divisions operations *) div21 : t -> t -> t -> t*t; div_gt : t -> t -> t * t; (* specialized version of [div] *) div : t -> t -> t * t; modulo_gt : t -> t -> t; (* specialized version of [mod] *) modulo : t -> t -> t; gcd_gt : t -> t -> t; (* specialized version of [gcd] *) gcd : t -> t -> t; (* [add_mul_div p i j] is a combination of the [(digits-p)] low bits of [i] above the [p] high bits of [j]: [add_mul_div p i j = i*2^p+j/2^(digits-p)] *) add_mul_div : t -> t -> t -> t; (* [pos_mod p i] is [i mod 2^p] *) pos_mod : t -> t -> t; is_even : t -> bool; (* square root *) sqrt2 : t -> t -> t * carry t; sqrt : t -> t }. Section Specs. Context {t : Type}{ops : Ops t}. Notation "[| x |]" := (to_Z x) (at level 0, x at level 99). Let wB := base digits. Notation "[+| c |]" := (interp_carry 1 wB to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB to_Z c) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z wB to_Z x) (at level 0, x at level 99). Class Specs := MkSpecs { (* Conversion functions with Z *) spec_to_Z : forall x, 0 <= [| x |] < wB; spec_of_pos : forall p, Zpos p = (Z.of_N (fst (of_pos p)))*wB + [|(snd (of_pos p))|]; spec_zdigits : [| zdigits |] = Zpos digits; spec_more_than_1_digit: 1 < Zpos digits; (* Basic numbers *) spec_0 : [|zero|] = 0; spec_1 : [|one|] = 1; spec_m1 : [|minus_one|] = wB - 1; (* Comparison *) spec_compare : forall x y, compare x y = ([|x|] ?= [|y|]); (* NB: the spec of [eq0] is deliberately partial, see DoubleCyclic where [eq0 x = true <-> x = W0] *) spec_eq0 : forall x, eq0 x = true -> [|x|] = 0; (* Basic arithmetic operations *) spec_opp_c : forall x, [-|opp_c x|] = -[|x|]; spec_opp : forall x, [|opp x|] = (-[|x|]) mod wB; spec_opp_carry : forall x, [|opp_carry x|] = wB - [|x|] - 1; spec_succ_c : forall x, [+|succ_c x|] = [|x|] + 1; spec_add_c : forall x y, [+|add_c x y|] = [|x|] + [|y|]; spec_add_carry_c : forall x y, [+|add_carry_c x y|] = [|x|] + [|y|] + 1; spec_succ : forall x, [|succ x|] = ([|x|] + 1) mod wB; spec_add : forall x y, [|add x y|] = ([|x|] + [|y|]) mod wB; spec_add_carry : forall x y, [|add_carry x y|] = ([|x|] + [|y|] + 1) mod wB; spec_pred_c : forall x, [-|pred_c x|] = [|x|] - 1; spec_sub_c : forall x y, [-|sub_c x y|] = [|x|] - [|y|]; spec_sub_carry_c : forall x y, [-|sub_carry_c x y|] = [|x|] - [|y|] - 1; spec_pred : forall x, [|pred x|] = ([|x|] - 1) mod wB; spec_sub : forall x y, [|sub x y|] = ([|x|] - [|y|]) mod wB; spec_sub_carry : forall x y, [|sub_carry x y|] = ([|x|] - [|y|] - 1) mod wB; spec_mul_c : forall x y, [|| mul_c x y ||] = [|x|] * [|y|]; spec_mul : forall x y, [|mul x y|] = ([|x|] * [|y|]) mod wB; spec_square_c : forall x, [|| square_c x||] = [|x|] * [|x|]; (* Special divisions operations *) spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]; spec_div_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> let (q,r) := div_gt a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]; spec_div : forall a b, 0 < [|b|] -> let (q,r) := div a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]; spec_modulo_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> [|modulo_gt a b|] = [|a|] mod [|b|]; spec_modulo : forall a b, 0 < [|b|] -> [|modulo a b|] = [|a|] mod [|b|]; spec_gcd_gt : forall a b, [|a|] > [|b|] -> Zis_gcd [|a|] [|b|] [|gcd_gt a b|]; spec_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd a b|]; (* shift operations *) spec_head00: forall x, [|x|] = 0 -> [|head0 x|] = Zpos digits; spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|head0 x|]) * [|x|] < wB; spec_tail00: forall x, [|x|] = 0 -> [|tail0 x|] = Zpos digits; spec_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * (2 ^ [|tail0 x|]) ; spec_add_mul_div : forall x y p, [|p|] <= Zpos digits -> [| add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos digits) - [|p|]))) mod wB; spec_pos_mod : forall w p, [|pos_mod p w|] = [|w|] mod (2 ^ [|p|]); (* sqrt *) spec_is_even : forall x, if is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1; spec_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := sqrt2 x y in [||WW x y||] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]; spec_sqrt : forall x, [|sqrt x|] ^ 2 <= [|x|] < ([|sqrt x|] + 1) ^ 2 }. End Specs. Arguments Specs {t} ops. (** Generic construction of double words *) Section WW. Context {t : Type}{ops : Ops t}{specs : Specs ops}. Let wB := base digits. Definition WO' (eq0:t->bool) zero h := if eq0 h then W0 else WW h zero. Definition WO := Eval lazy beta delta [WO'] in let eq0 := ZnZ.eq0 in let zero := ZnZ.zero in WO' eq0 zero. Definition OW' (eq0:t->bool) zero l := if eq0 l then W0 else WW zero l. Definition OW := Eval lazy beta delta [OW'] in let eq0 := ZnZ.eq0 in let zero := ZnZ.zero in OW' eq0 zero. Definition WW' (eq0:t->bool) zero h l := if eq0 h then OW' eq0 zero l else WW h l. Definition WW := Eval lazy beta delta [WW' OW'] in let eq0 := ZnZ.eq0 in let zero := ZnZ.zero in WW' eq0 zero. Lemma spec_WO : forall h, zn2z_to_Z wB to_Z (WO h) = (to_Z h)*wB. Proof. unfold zn2z_to_Z, WO; simpl; intros. case_eq (eq0 h); intros. rewrite (spec_eq0 _ H); auto. rewrite spec_0; auto with zarith. Qed. Lemma spec_OW : forall l, zn2z_to_Z wB to_Z (OW l) = to_Z l. Proof. unfold zn2z_to_Z, OW; simpl; intros. case_eq (eq0 l); intros. rewrite (spec_eq0 _ H); auto. rewrite spec_0; auto with zarith. Qed. Lemma spec_WW : forall h l, zn2z_to_Z wB to_Z (WW h l) = (to_Z h)*wB + to_Z l. Proof. unfold WW; simpl; intros. case_eq (eq0 h); intros. rewrite (spec_eq0 _ H); auto. fold (OW l). rewrite spec_OW; auto. simpl; auto. Qed. End WW. (** Injecting [Z] numbers into a cyclic structure *) Section Of_Z. Context {t : Type}{ops : Ops t}{specs : Specs ops}. Notation "[| x |]" := (to_Z x) (at level 0, x at level 99). Theorem of_pos_correct: forall p, Zpos p < base digits -> [|(snd (of_pos p))|] = Zpos p. Proof. intros p Hp. generalize (spec_of_pos p). case (of_pos p); intros n w1; simpl. case n; simpl Npos; auto with zarith. intros p1 Hp1; contradict Hp; apply Z.le_ngt. replace (base digits) with (1 * base digits + 0) by ring. rewrite Hp1. apply Z.add_le_mono. apply Z.mul_le_mono_nonneg; auto with zarith. case p1; simpl; intros; red; simpl; intros; discriminate. unfold base; auto with zarith. case (spec_to_Z w1); auto with zarith. Qed. Definition of_Z z := match z with | Zpos p => snd (of_pos p) | _ => zero end. Theorem of_Z_correct: forall p, 0 <= p < base digits -> [|of_Z p|] = p. Proof. intros p; case p; simpl; try rewrite spec_0; auto. intros; rewrite of_pos_correct; auto with zarith. intros p1 (H1, _); contradict H1; apply Z.lt_nge; red; simpl; auto. Qed. End Of_Z. End ZnZ. (** A modular specification grouping the earlier records. *) Module Type CyclicType. Parameter t : Type. Declare Instance ops : ZnZ.Ops t. Declare Instance specs : ZnZ.Specs ops. End CyclicType. (** A Cyclic structure can be seen as a ring *) Module CyclicRing (Import Cyclic : CyclicType). Local Notation "[| x |]" := (ZnZ.to_Z x) (at level 0, x at level 99). Definition eq (n m : t) := [| n |] = [| m |]. Local Infix "==" := eq (at level 70). Local Notation "0" := ZnZ.zero. Local Notation "1" := ZnZ.one. Local Infix "+" := ZnZ.add. Local Infix "-" := ZnZ.sub. Local Notation "- x" := (ZnZ.opp x). Local Infix "*" := ZnZ.mul. Local Notation wB := (base ZnZ.digits). Hint Rewrite ZnZ.spec_0 ZnZ.spec_1 ZnZ.spec_add ZnZ.spec_mul ZnZ.spec_opp ZnZ.spec_sub : cyclic. Ltac zify := unfold eq in *; autorewrite with cyclic. Lemma add_0_l : forall x, 0 + x == x. Proof. intros. zify. rewrite Z.add_0_l. apply Zmod_small. apply ZnZ.spec_to_Z. Qed. Lemma add_comm : forall x y, x + y == y + x. Proof. intros. zify. now rewrite Z.add_comm. Qed. Lemma add_assoc : forall x y z, x + (y + z) == x + y + z. Proof. intros. zify. now rewrite Zplus_mod_idemp_r, Zplus_mod_idemp_l, Z.add_assoc. Qed. Lemma mul_1_l : forall x, 1 * x == x. Proof. intros. zify. rewrite Z.mul_1_l. apply Zmod_small. apply ZnZ.spec_to_Z. Qed. Lemma mul_comm : forall x y, x * y == y * x. Proof. intros. zify. now rewrite Z.mul_comm. Qed. Lemma mul_assoc : forall x y z, x * (y * z) == x * y * z. Proof. intros. zify. now rewrite Zmult_mod_idemp_r, Zmult_mod_idemp_l, Z.mul_assoc. Qed. Lemma mul_add_distr_r : forall x y z, (x+y)*z == x*z + y*z. Proof. intros. zify. now rewrite <- Zplus_mod, Zmult_mod_idemp_l, Z.mul_add_distr_r. Qed. Lemma add_opp_r : forall x y, x + - y == x-y. Proof. intros. zify. rewrite <- Zminus_mod_idemp_r. unfold Z.sub. destruct (Z.eq_dec ([|y|] mod wB) 0) as [EQ|NEQ]. rewrite Z_mod_zero_opp_full, EQ, 2 Z.add_0_r; auto. rewrite Z_mod_nz_opp_full by auto. rewrite <- Zplus_mod_idemp_r, <- Zminus_mod_idemp_l. rewrite Z_mod_same_full. simpl. now rewrite Zplus_mod_idemp_r. Qed. Lemma add_opp_diag_r : forall x, x + - x == 0. Proof. intros. red. rewrite add_opp_r. zify. now rewrite Z.sub_diag, Zmod_0_l. Qed. Lemma CyclicRing : ring_theory 0 1 ZnZ.add ZnZ.mul ZnZ.sub ZnZ.opp eq. Proof. constructor. exact add_0_l. exact add_comm. exact add_assoc. exact mul_1_l. exact mul_comm. exact mul_assoc. exact mul_add_distr_r. symmetry. apply add_opp_r. exact add_opp_diag_r. Qed. Definition eqb x y := match ZnZ.compare x y with Eq => true | _ => false end. Lemma eqb_eq : forall x y, eqb x y = true <-> x == y. Proof. intros. unfold eqb, eq. rewrite ZnZ.spec_compare. case Z.compare_spec; intuition; try discriminate. Qed. Lemma eqb_correct : forall x y, eqb x y = true -> x==y. Proof. now apply eqb_eq. Qed. End CyclicRing. coq-8.4pl2/theories/Numbers/Cyclic/ZModulo/0000750000175000001440000000000012127276547017677 5ustar notinuserscoq-8.4pl2/theories/Numbers/Cyclic/ZModulo/ZModulo.v0000640000175000001440000006171312010532755021453 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1%positive. Definition wB := base digits. Definition t := Z. Definition zdigits := Zpos digits. Definition to_Z x := x mod wB. Notation "[| x |]" := (to_Z x) (at level 0, x at level 99). Notation "[+| c |]" := (interp_carry 1 wB to_Z c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB to_Z c) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z wB to_Z x) (at level 0, x at level 99). Lemma spec_more_than_1_digit: 1 < Zpos digits. Proof. generalize digits_ne_1; destruct digits; auto. destruct 1; auto. Qed. Let digits_gt_1 := spec_more_than_1_digit. Lemma wB_pos : wB > 0. Proof. unfold wB, base; auto with zarith. Qed. Hint Resolve wB_pos. Lemma spec_to_Z_1 : forall x, 0 <= [|x|]. Proof. unfold to_Z; intros; destruct (Z_mod_lt x wB wB_pos); auto. Qed. Lemma spec_to_Z_2 : forall x, [|x|] < wB. Proof. unfold to_Z; intros; destruct (Z_mod_lt x wB wB_pos); auto. Qed. Hint Resolve spec_to_Z_1 spec_to_Z_2. Lemma spec_to_Z : forall x, 0 <= [|x|] < wB. Proof. auto. Qed. Definition of_pos x := let (q,r) := Z.pos_div_eucl x wB in (N_of_Z q, r). Lemma spec_of_pos : forall p, Zpos p = (Z.of_N (fst (of_pos p)))*wB + [|(snd (of_pos p))|]. Proof. intros; unfold of_pos; simpl. generalize (Z_div_mod_POS wB wB_pos p). destruct (Z.pos_div_eucl p wB); simpl; destruct 1. unfold to_Z; rewrite Zmod_small; auto. assert (0 <= z). replace z with (Zpos p / wB) by (symmetry; apply Zdiv_unique with z0; auto). apply Z_div_pos; auto with zarith. replace (Z.of_N (N_of_Z z)) with z by (destruct z; simpl; auto; elim H1; auto). rewrite Z.mul_comm; auto. Qed. Lemma spec_zdigits : [|zdigits|] = Zpos digits. Proof. unfold to_Z, zdigits. apply Zmod_small. unfold wB, base. split; auto with zarith. apply Zpower2_lt_lin; auto with zarith. Qed. Definition zero := 0. Definition one := 1. Definition minus_one := wB - 1. Lemma spec_0 : [|zero|] = 0. Proof. unfold to_Z, zero. apply Zmod_small; generalize wB_pos; auto with zarith. Qed. Lemma spec_1 : [|one|] = 1. Proof. unfold to_Z, one. apply Zmod_small; split; auto with zarith. unfold wB, base. apply Z.lt_trans with (Zpos digits); auto. apply Zpower2_lt_lin; auto with zarith. Qed. Lemma spec_Bm1 : [|minus_one|] = wB - 1. Proof. unfold to_Z, minus_one. apply Zmod_small; split; auto with zarith. unfold wB, base. cut (1 <= 2 ^ Zpos digits); auto with zarith. apply Z.le_trans with (Zpos digits); auto with zarith. apply Zpower2_le_lin; auto with zarith. Qed. Definition compare x y := Z.compare [|x|] [|y|]. Lemma spec_compare : forall x y, compare x y = Z.compare [|x|] [|y|]. Proof. reflexivity. Qed. Definition eq0 x := match [|x|] with Z0 => true | _ => false end. Lemma spec_eq0 : forall x, eq0 x = true -> [|x|] = 0. Proof. unfold eq0; intros; now destruct [|x|]. Qed. Definition opp_c x := if eq0 x then C0 0 else C1 (- x). Definition opp x := - x. Definition opp_carry x := - x - 1. Lemma spec_opp_c : forall x, [-|opp_c x|] = -[|x|]. Proof. intros; unfold opp_c, to_Z; auto. case_eq (eq0 x); intros; unfold interp_carry. fold [|x|]; rewrite (spec_eq0 x H); auto. assert (x mod wB <> 0). unfold eq0, to_Z in H. intro H0; rewrite H0 in H; discriminate. rewrite Z_mod_nz_opp_full; auto with zarith. Qed. Lemma spec_opp : forall x, [|opp x|] = (-[|x|]) mod wB. Proof. intros; unfold opp, to_Z; auto. change ((- x) mod wB = (0 - (x mod wB)) mod wB). rewrite Zminus_mod_idemp_r; simpl; auto. Qed. Lemma spec_opp_carry : forall x, [|opp_carry x|] = wB - [|x|] - 1. Proof. intros; unfold opp_carry, to_Z; auto. replace (- x - 1) with (- 1 - x) by omega. rewrite <- Zminus_mod_idemp_r. replace ( -1 - x mod wB) with (0 + ( -1 - x mod wB)) by omega. rewrite <- (Z_mod_same_full wB). rewrite Zplus_mod_idemp_l. replace (wB + (-1 - x mod wB)) with (wB - x mod wB -1) by omega. apply Zmod_small. generalize (Z_mod_lt x wB wB_pos); omega. Qed. Definition succ_c x := let y := Z.succ x in if eq0 y then C1 0 else C0 y. Definition add_c x y := let z := [|x|] + [|y|] in if Z_lt_le_dec z wB then C0 z else C1 (z-wB). Definition add_carry_c x y := let z := [|x|]+[|y|]+1 in if Z_lt_le_dec z wB then C0 z else C1 (z-wB). Definition succ := Z.succ. Definition add := Z.add. Definition add_carry x y := x + y + 1. Lemma Zmod_equal : forall x y z, z>0 -> (x-y) mod z = 0 -> x mod z = y mod z. Proof. intros. generalize (Z_div_mod_eq (x-y) z H); rewrite H0, Z.add_0_r. remember ((x-y)/z) as k. rewrite Z.sub_move_r, Z.add_comm, Z.mul_comm. intros ->. now apply Z_mod_plus. Qed. Lemma spec_succ_c : forall x, [+|succ_c x|] = [|x|] + 1. Proof. intros; unfold succ_c, to_Z, Z.succ. case_eq (eq0 (x+1)); intros; unfold interp_carry. rewrite Z.mul_1_l. replace (wB + 0 mod wB) with wB by auto with zarith. symmetry. rewrite Z.add_move_r. assert ((x+1) mod wB = 0) by (apply spec_eq0; auto). replace (wB-1) with ((wB-1) mod wB) by (apply Zmod_small; generalize wB_pos; omega). rewrite <- Zminus_mod_idemp_l; rewrite Z_mod_same; simpl; auto. apply Zmod_equal; auto. assert ((x+1) mod wB <> 0). unfold eq0, to_Z in *; now destruct ((x+1) mod wB). assert (x mod wB + 1 <> wB). contradict H0. rewrite Z.add_move_r in H0; simpl in H0. rewrite <- Zplus_mod_idemp_l; rewrite H0. replace (wB-1+1) with wB; auto with zarith; apply Z_mod_same; auto. rewrite <- Zplus_mod_idemp_l. apply Zmod_small. generalize (Z_mod_lt x wB wB_pos); omega. Qed. Lemma spec_add_c : forall x y, [+|add_c x y|] = [|x|] + [|y|]. Proof. intros; unfold add_c, to_Z, interp_carry. destruct Z_lt_le_dec. apply Zmod_small; generalize (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. rewrite Z.mul_1_l, Z.add_comm, Z.add_move_r. apply Zmod_small; generalize (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. Qed. Lemma spec_add_carry_c : forall x y, [+|add_carry_c x y|] = [|x|] + [|y|] + 1. Proof. intros; unfold add_carry_c, to_Z, interp_carry. destruct Z_lt_le_dec. apply Zmod_small; generalize (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. rewrite Z.mul_1_l, Z.add_comm, Z.add_move_r. apply Zmod_small; generalize (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. Qed. Lemma spec_succ : forall x, [|succ x|] = ([|x|] + 1) mod wB. Proof. intros; unfold succ, to_Z, Z.succ. symmetry; apply Zplus_mod_idemp_l. Qed. Lemma spec_add : forall x y, [|add x y|] = ([|x|] + [|y|]) mod wB. Proof. intros; unfold add, to_Z; apply Zplus_mod. Qed. Lemma spec_add_carry : forall x y, [|add_carry x y|] = ([|x|] + [|y|] + 1) mod wB. Proof. intros; unfold add_carry, to_Z. rewrite <- Zplus_mod_idemp_l. rewrite (Zplus_mod x y). rewrite Zplus_mod_idemp_l; auto. Qed. Definition pred_c x := if eq0 x then C1 (wB-1) else C0 (x-1). Definition sub_c x y := let z := [|x|]-[|y|] in if Z_lt_le_dec z 0 then C1 (wB+z) else C0 z. Definition sub_carry_c x y := let z := [|x|]-[|y|]-1 in if Z_lt_le_dec z 0 then C1 (wB+z) else C0 z. Definition pred := Z.pred. Definition sub := Z.sub. Definition sub_carry x y := x - y - 1. Lemma spec_pred_c : forall x, [-|pred_c x|] = [|x|] - 1. Proof. intros; unfold pred_c, to_Z, interp_carry. case_eq (eq0 x); intros. fold [|x|]; rewrite spec_eq0; auto. replace ((wB-1) mod wB) with (wB-1); auto with zarith. symmetry; apply Zmod_small; generalize wB_pos; omega. assert (x mod wB <> 0). unfold eq0, to_Z in *; now destruct (x mod wB). rewrite <- Zminus_mod_idemp_l. apply Zmod_small. generalize (Z_mod_lt x wB wB_pos); omega. Qed. Lemma spec_sub_c : forall x y, [-|sub_c x y|] = [|x|] - [|y|]. Proof. intros; unfold sub_c, to_Z, interp_carry. destruct Z_lt_le_dec. replace ((wB + (x mod wB - y mod wB)) mod wB) with (wB + (x mod wB - y mod wB)). omega. symmetry; apply Zmod_small. generalize wB_pos (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. apply Zmod_small. generalize wB_pos (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. Qed. Lemma spec_sub_carry_c : forall x y, [-|sub_carry_c x y|] = [|x|] - [|y|] - 1. Proof. intros; unfold sub_carry_c, to_Z, interp_carry. destruct Z_lt_le_dec. replace ((wB + (x mod wB - y mod wB - 1)) mod wB) with (wB + (x mod wB - y mod wB -1)). omega. symmetry; apply Zmod_small. generalize wB_pos (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. apply Zmod_small. generalize wB_pos (Z_mod_lt x wB wB_pos) (Z_mod_lt y wB wB_pos); omega. Qed. Lemma spec_pred : forall x, [|pred x|] = ([|x|] - 1) mod wB. Proof. intros; unfold pred, to_Z, Z.pred. rewrite <- Zplus_mod_idemp_l; auto. Qed. Lemma spec_sub : forall x y, [|sub x y|] = ([|x|] - [|y|]) mod wB. Proof. intros; unfold sub, to_Z; apply Zminus_mod. Qed. Lemma spec_sub_carry : forall x y, [|sub_carry x y|] = ([|x|] - [|y|] - 1) mod wB. Proof. intros; unfold sub_carry, to_Z. rewrite <- Zminus_mod_idemp_l. rewrite (Zminus_mod x y). rewrite Zminus_mod_idemp_l. auto. Qed. Definition mul_c x y := let (h,l) := Z.div_eucl ([|x|]*[|y|]) wB in if eq0 h then if eq0 l then W0 else WW h l else WW h l. Definition mul := Z.mul. Definition square_c x := mul_c x x. Lemma spec_mul_c : forall x y, [|| mul_c x y ||] = [|x|] * [|y|]. Proof. intros; unfold mul_c, zn2z_to_Z. assert (Z.div_eucl ([|x|]*[|y|]) wB = (([|x|]*[|y|])/wB,([|x|]*[|y|]) mod wB)). unfold Z.modulo, Z.div; destruct Z.div_eucl; auto. generalize (Z_div_mod ([|x|]*[|y|]) wB wB_pos); destruct Z.div_eucl as (h,l). destruct 1; injection H; clear H; intros. rewrite H0. assert ([|l|] = l). apply Zmod_small; auto. assert ([|h|] = h). apply Zmod_small. subst h. split. apply Z_div_pos; auto with zarith. apply Zdiv_lt_upper_bound; auto with zarith. apply Z.mul_lt_mono_nonneg; auto with zarith. clear H H0 H1 H2. case_eq (eq0 h); simpl; intros. case_eq (eq0 l); simpl; intros. rewrite <- H3, <- H4, (spec_eq0 h), (spec_eq0 l); auto with zarith. rewrite H3, H4; auto with zarith. rewrite H3, H4; auto with zarith. Qed. Lemma spec_mul : forall x y, [|mul x y|] = ([|x|] * [|y|]) mod wB. Proof. intros; unfold mul, to_Z; apply Zmult_mod. Qed. Lemma spec_square_c : forall x, [|| square_c x||] = [|x|] * [|x|]. Proof. intros x; exact (spec_mul_c x x). Qed. Definition div x y := Z.div_eucl [|x|] [|y|]. Lemma spec_div : forall a b, 0 < [|b|] -> let (q,r) := div a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. intros; unfold div. assert ([|b|]>0) by auto with zarith. assert (Z.div_eucl [|a|] [|b|] = ([|a|]/[|b|], [|a|] mod [|b|])). unfold Z.modulo, Z.div; destruct Z.div_eucl; auto. generalize (Z_div_mod [|a|] [|b|] H0). destruct Z.div_eucl as (q,r); destruct 1; intros. injection H1; clear H1; intros. assert ([|r|]=r). apply Zmod_small; generalize (Z_mod_lt b wB wB_pos); fold [|b|]; auto with zarith. assert ([|q|]=q). apply Zmod_small. subst q. split. apply Z_div_pos; auto with zarith. apply Zdiv_lt_upper_bound; auto with zarith. apply Z.lt_le_trans with (wB*1). rewrite Z.mul_1_r; auto with zarith. apply Z.mul_le_mono_nonneg; generalize wB_pos; auto with zarith. rewrite H5, H6; rewrite Z.mul_comm; auto with zarith. Qed. Definition div_gt := div. Lemma spec_div_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> let (q,r) := div_gt a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. intros. apply spec_div; auto. Qed. Definition modulo x y := [|x|] mod [|y|]. Definition modulo_gt x y := [|x|] mod [|y|]. Lemma spec_modulo : forall a b, 0 < [|b|] -> [|modulo a b|] = [|a|] mod [|b|]. Proof. intros; unfold modulo. apply Zmod_small. assert ([|b|]>0) by auto with zarith. generalize (Z_mod_lt [|a|] [|b|] H0) (Z_mod_lt b wB wB_pos). fold [|b|]; omega. Qed. Lemma spec_modulo_gt : forall a b, [|a|] > [|b|] -> 0 < [|b|] -> [|modulo_gt a b|] = [|a|] mod [|b|]. Proof. intros; apply spec_modulo; auto. Qed. Definition gcd x y := Z.gcd [|x|] [|y|]. Definition gcd_gt x y := Z.gcd [|x|] [|y|]. Lemma Zgcd_bound : forall a b, 0<=a -> 0<=b -> Z.gcd a b <= Z.max a b. Proof. intros. generalize (Zgcd_is_gcd a b); inversion_clear 1. destruct H2 as (q,H2); destruct H3 as (q',H3); clear H4. assert (H4:=Z.gcd_nonneg a b). destruct (Z.eq_dec (Z.gcd a b) 0). rewrite e; generalize (Zmax_spec a b); omega. assert (0 <= q). apply Z.mul_le_mono_pos_r with (Z.gcd a b); auto with zarith. destruct (Z.eq_dec q 0). subst q; simpl in *; subst a; simpl; auto. generalize (Zmax_spec 0 b) (Zabs_spec b); omega. apply Z.le_trans with a. rewrite H2 at 2. rewrite <- (Z.mul_1_l (Z.gcd a b)) at 1. apply Z.mul_le_mono_nonneg; auto with zarith. generalize (Zmax_spec a b); omega. Qed. Lemma spec_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd a b|]. Proof. intros; unfold gcd. generalize (Z_mod_lt a wB wB_pos)(Z_mod_lt b wB wB_pos); intros. fold [|a|] in *; fold [|b|] in *. replace ([|Z.gcd [|a|] [|b|]|]) with (Z.gcd [|a|] [|b|]). apply Zgcd_is_gcd. symmetry; apply Zmod_small. split. apply Z.gcd_nonneg. apply Z.le_lt_trans with (Z.max [|a|] [|b|]). apply Zgcd_bound; auto with zarith. generalize (Zmax_spec [|a|] [|b|]); omega. Qed. Lemma spec_gcd_gt : forall a b, [|a|] > [|b|] -> Zis_gcd [|a|] [|b|] [|gcd_gt a b|]. Proof. intros. apply spec_gcd; auto. Qed. Definition div21 a1 a2 b := Z.div_eucl ([|a1|]*wB+[|a2|]) [|b|]. Lemma spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div21 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. intros; unfold div21. generalize (Z_mod_lt a1 wB wB_pos); fold [|a1|]; intros. generalize (Z_mod_lt a2 wB wB_pos); fold [|a2|]; intros. assert ([|b|]>0) by auto with zarith. remember ([|a1|]*wB+[|a2|]) as a. assert (Z.div_eucl a [|b|] = (a/[|b|], a mod [|b|])). unfold Z.modulo, Z.div; destruct Z.div_eucl; auto. generalize (Z_div_mod a [|b|] H3). destruct Z.div_eucl as (q,r); destruct 1; intros. injection H4; clear H4; intros. assert ([|r|]=r). apply Zmod_small; generalize (Z_mod_lt b wB wB_pos); fold [|b|]; auto with zarith. assert ([|q|]=q). apply Zmod_small. subst q. split. apply Z_div_pos; auto with zarith. subst a; auto with zarith. apply Zdiv_lt_upper_bound; auto with zarith. subst a. replace (wB*[|b|]) with (([|b|]-1)*wB + wB) by ring. apply Z.lt_le_trans with ([|a1|]*wB+wB); auto with zarith. rewrite H8, H9; rewrite Z.mul_comm; auto with zarith. Qed. Definition add_mul_div p x y := ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos digits) - [|p|]))). Lemma spec_add_mul_div : forall x y p, [|p|] <= Zpos digits -> [| add_mul_div p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos digits) - [|p|]))) mod wB. Proof. intros; unfold add_mul_div; auto. Qed. Definition pos_mod p w := [|w|] mod (2 ^ [|p|]). Lemma spec_pos_mod : forall w p, [|pos_mod p w|] = [|w|] mod (2 ^ [|p|]). Proof. intros; unfold pos_mod. apply Zmod_small. generalize (Z_mod_lt [|w|] (2 ^ [|p|])); intros. split. destruct H; auto with zarith. apply Z.le_lt_trans with [|w|]; auto with zarith. apply Zmod_le; auto with zarith. Qed. Definition is_even x := if Z.eq_dec ([|x|] mod 2) 0 then true else false. Lemma spec_is_even : forall x, if is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1. Proof. intros; unfold is_even; destruct Z.eq_dec; auto. generalize (Z_mod_lt [|x|] 2); omega. Qed. Definition sqrt x := Z.sqrt [|x|]. Lemma spec_sqrt : forall x, [|sqrt x|] ^ 2 <= [|x|] < ([|sqrt x|] + 1) ^ 2. Proof. intros. unfold sqrt. repeat rewrite Z.pow_2_r. replace [|Z.sqrt [|x|]|] with (Z.sqrt [|x|]). apply Z.sqrt_spec; auto with zarith. symmetry; apply Zmod_small. split. apply Z.sqrt_nonneg; auto. apply Z.le_lt_trans with [|x|]; auto. apply Z.sqrt_le_lin; auto. Qed. Definition sqrt2 x y := let z := [|x|]*wB+[|y|] in match z with | Z0 => (0, C0 0) | Zpos p => let (s,r) := Z.sqrtrem (Zpos p) in (s, if Z_lt_le_dec r wB then C0 r else C1 (r-wB)) | Zneg _ => (0, C0 0) end. Lemma spec_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := sqrt2 x y in [||WW x y||] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Proof. intros; unfold sqrt2. simpl zn2z_to_Z. remember ([|x|]*wB+[|y|]) as z. destruct z. auto with zarith. generalize (Z.sqrtrem_spec (Zpos p)). destruct Z.sqrtrem as (s,r); intros [U V]; auto with zarith. assert (s < wB). destruct (Z_lt_le_dec s wB); auto. assert (wB * wB <= Zpos p). rewrite U. apply Z.le_trans with (s*s); try omega. apply Z.mul_le_mono_nonneg; generalize wB_pos; auto with zarith. assert (Zpos p < wB*wB). rewrite Heqz. replace (wB*wB) with ((wB-1)*wB+wB) by ring. apply Z.add_le_lt_mono; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. generalize (spec_to_Z x); auto with zarith. generalize wB_pos; auto with zarith. omega. replace [|s|] with s by (symmetry; apply Zmod_small; auto with zarith). destruct Z_lt_le_dec; unfold interp_carry. replace [|r|] with r by (symmetry; apply Zmod_small; auto with zarith). rewrite Z.pow_2_r; auto with zarith. replace [|r-wB|] with (r-wB) by (symmetry; apply Zmod_small; auto with zarith). rewrite Z.pow_2_r; omega. assert (0<=Zneg p). rewrite Heqz; generalize wB_pos; auto with zarith. compute in H0; elim H0; auto. Qed. Lemma two_p_power2 : forall x, x>=0 -> two_p x = 2 ^ x. Proof. intros. unfold two_p. destruct x; simpl; auto. apply two_power_pos_correct. Qed. Definition head0 x := match [|x|] with | Z0 => zdigits | Zpos p => zdigits - log_inf p - 1 | _ => 0 end. Lemma spec_head00: forall x, [|x|] = 0 -> [|head0 x|] = Zpos digits. Proof. unfold head0; intros. rewrite H; simpl. apply spec_zdigits. Qed. Lemma log_inf_bounded : forall x p, Zpos x < 2^p -> log_inf x < p. Proof. induction x; simpl; intros. assert (0 < p) by (destruct p; compute; auto with zarith; discriminate). cut (log_inf x < p - 1); [omega| ]. apply IHx. change (Zpos x~1) with (2*(Zpos x)+1) in H. replace p with (Z.succ (p-1)) in H; auto with zarith. rewrite Z.pow_succ_r in H; auto with zarith. assert (0 < p) by (destruct p; compute; auto with zarith; discriminate). cut (log_inf x < p - 1); [omega| ]. apply IHx. change (Zpos x~0) with (2*(Zpos x)) in H. replace p with (Z.succ (p-1)) in H; auto with zarith. rewrite Z.pow_succ_r in H; auto with zarith. simpl; intros; destruct p; compute; auto with zarith. Qed. Lemma spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|head0 x|]) * [|x|] < wB. Proof. intros; unfold head0. generalize (spec_to_Z x). destruct [|x|]; try discriminate. intros. destruct (log_inf_correct p). rewrite 2 two_p_power2 in H2; auto with zarith. assert (0 <= zdigits - log_inf p - 1 < wB). split. cut (log_inf p < zdigits); try omega. unfold zdigits. unfold wB, base in *. apply log_inf_bounded; auto with zarith. apply Z.lt_trans with zdigits. omega. unfold zdigits, wB, base; apply Zpower2_lt_lin; auto with zarith. unfold to_Z; rewrite (Zmod_small _ _ H3). destruct H2. split. apply Z.le_trans with (2^(zdigits - log_inf p - 1)*(2^log_inf p)). apply Zdiv_le_upper_bound; auto with zarith. rewrite <- Zpower_exp; auto with zarith. rewrite Z.mul_comm; rewrite <- Z.pow_succ_r; auto with zarith. replace (Z.succ (zdigits - log_inf p -1 +log_inf p)) with zdigits by ring. unfold wB, base, zdigits; auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. apply Z.lt_le_trans with (2^(zdigits - log_inf p - 1)*(2^(Z.succ (log_inf p)))). apply Z.mul_lt_mono_pos_l; auto with zarith. rewrite <- Zpower_exp; auto with zarith. replace (zdigits - log_inf p -1 +Z.succ (log_inf p)) with zdigits by ring. unfold wB, base, zdigits; auto with zarith. Qed. Fixpoint Ptail p := match p with | xO p => (Ptail p)+1 | _ => 0 end. Lemma Ptail_pos : forall p, 0 <= Ptail p. Proof. induction p; simpl; auto with zarith. Qed. Hint Resolve Ptail_pos. Lemma Ptail_bounded : forall p d, Zpos p < 2^(Zpos d) -> Ptail p < Zpos d. Proof. induction p; try (compute; auto; fail). intros; simpl. assert (d <> xH). intro; subst. compute in H; destruct p; discriminate. assert (Z.succ (Zpos (Pos.pred d)) = Zpos d). simpl; f_equal. rewrite Pos.add_1_r. destruct (Pos.succ_pred_or d); auto. rewrite H1 in H0; elim H0; auto. assert (Ptail p < Zpos (Pos.pred d)). apply IHp. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. rewrite (Z.mul_comm (Zpos p)). change (2 * Zpos p) with (Zpos p~0). rewrite Z.mul_comm. rewrite <- Z.pow_succ_r; auto with zarith. rewrite H1; auto. rewrite <- H1; omega. Qed. Definition tail0 x := match [|x|] with | Z0 => zdigits | Zpos p => Ptail p | Zneg _ => 0 end. Lemma spec_tail00: forall x, [|x|] = 0 -> [|tail0 x|] = Zpos digits. Proof. unfold tail0; intros. rewrite H; simpl. apply spec_zdigits. Qed. Lemma spec_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * (2 ^ [|tail0 x|]). Proof. intros; unfold tail0. generalize (spec_to_Z x). destruct [|x|]; try discriminate; intros. assert ([|Ptail p|] = Ptail p). apply Zmod_small. split; auto. unfold wB, base in *. apply Z.lt_trans with (Zpos digits). apply Ptail_bounded; auto with zarith. apply Zpower2_lt_lin; auto with zarith. rewrite H1. clear; induction p. exists (Zpos p); simpl; rewrite Pos.mul_1_r; auto with zarith. destruct IHp as (y & Yp & Ye). exists y. split; auto. change (Zpos p~0) with (2*Zpos p). rewrite Ye. change (Ptail p~0) with (Z.succ (Ptail p)). rewrite Z.pow_succ_r; auto; ring. exists 0; simpl; auto with zarith. Qed. (** Let's now group everything in two records *) Instance zmod_ops : ZnZ.Ops Z := ZnZ.MkOps (digits : positive) (zdigits: t) (to_Z : t -> Z) (of_pos : positive -> N * t) (head0 : t -> t) (tail0 : t -> t) (zero : t) (one : t) (minus_one : t) (compare : t -> t -> comparison) (eq0 : t -> bool) (opp_c : t -> carry t) (opp : t -> t) (opp_carry : t -> t) (succ_c : t -> carry t) (add_c : t -> t -> carry t) (add_carry_c : t -> t -> carry t) (succ : t -> t) (add : t -> t -> t) (add_carry : t -> t -> t) (pred_c : t -> carry t) (sub_c : t -> t -> carry t) (sub_carry_c : t -> t -> carry t) (pred : t -> t) (sub : t -> t -> t) (sub_carry : t -> t -> t) (mul_c : t -> t -> zn2z t) (mul : t -> t -> t) (square_c : t -> zn2z t) (div21 : t -> t -> t -> t*t) (div_gt : t -> t -> t * t) (div : t -> t -> t * t) (modulo_gt : t -> t -> t) (modulo : t -> t -> t) (gcd_gt : t -> t -> t) (gcd : t -> t -> t) (add_mul_div : t -> t -> t -> t) (pos_mod : t -> t -> t) (is_even : t -> bool) (sqrt2 : t -> t -> t * carry t) (sqrt : t -> t). Instance zmod_specs : ZnZ.Specs zmod_ops := ZnZ.MkSpecs spec_to_Z spec_of_pos spec_zdigits spec_more_than_1_digit spec_0 spec_1 spec_Bm1 spec_compare spec_eq0 spec_opp_c spec_opp spec_opp_carry spec_succ_c spec_add_c spec_add_carry_c spec_succ spec_add spec_add_carry spec_pred_c spec_sub_c spec_sub_carry_c spec_pred spec_sub spec_sub_carry spec_mul_c spec_mul spec_square_c spec_div21 spec_div_gt spec_div spec_modulo_gt spec_modulo spec_gcd_gt spec_gcd spec_head00 spec_head0 spec_tail00 spec_tail0 spec_add_mul_div spec_pos_mod spec_is_even spec_sqrt2 spec_sqrt. End ZModulo. (** A modular version of the previous construction. *) Module Type PositiveNotOne. Parameter p : positive. Axiom not_one : p <> 1%positive. End PositiveNotOne. Module ZModuloCyclicType (P:PositiveNotOne) <: CyclicType. Definition t := Z. Instance ops : ZnZ.Ops t := zmod_ops P.p. Instance specs : ZnZ.Specs ops := zmod_specs P.not_one. End ZModuloCyclicType. coq-8.4pl2/theories/Numbers/Cyclic/Int31/0000750000175000001440000000000012127276550017176 5ustar notinuserscoq-8.4pl2/theories/Numbers/Cyclic/Int31/Int31.v0000640000175000001440000003352612010532755020266 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* I31 b d0 ... d(N-1) end ] *) Definition sneakr : digits -> int31 -> int31 := Eval compute in fun b => int31_rect _ (napply_except_last _ _ (size-1) (I31 b)). (** [sneakl b x] shifts [x] to the left by one bit. Leftmost digit is lost while rightmost digit becomes [b]. Pseudo-code is [ match x with (I31 d0 ... dN) => I31 d1 ... dN b end ] *) Definition sneakl : digits -> int31 -> int31 := Eval compute in fun b => int31_rect _ (fun _ => napply_then_last _ _ b (size-1) I31). (** [shiftl], [shiftr], [twice] and [twice_plus_one] are direct consequences of [sneakl] and [sneakr]. *) Definition shiftl := sneakl D0. Definition shiftr := sneakr D0. Definition twice := sneakl D0. Definition twice_plus_one := sneakl D1. (** [firstl x] returns the leftmost digit of number [x]. Pseudo-code is [ match x with (I31 d0 ... dN) => d0 end ] *) Definition firstl : int31 -> digits := Eval compute in int31_rect _ (fun d => napply_discard _ _ d (size-1)). (** [firstr x] returns the rightmost digit of number [x]. Pseudo-code is [ match x with (I31 d0 ... dN) => dN end ] *) Definition firstr : int31 -> digits := Eval compute in int31_rect _ (napply_discard _ _ (fun d=>d) (size-1)). (** [iszero x] is true iff [x = I31 D0 ... D0]. Pseudo-code is [ match x with (I31 D0 ... D0) => true | _ => false end ] *) Definition iszero : int31 -> bool := Eval compute in let f d b := match d with D0 => b | D1 => false end in int31_rect _ (nfold_bis _ _ f true size). (* NB: DO NOT transform the above match in a nicer (if then else). It seems to work, but later "unfold iszero" takes forever. *) (** [base] is [2^31], obtained via iterations of [Z.double]. It can also be seen as the smallest b > 0 s.t. phi_inv b = 0 (see below) *) Definition base := Eval compute in iter_nat size Z Z.double 1%Z. (** * Recursors *) Fixpoint recl_aux (n:nat)(A:Type)(case0:A)(caserec:digits->int31->A->A) (i:int31) : A := match n with | O => case0 | S next => if iszero i then case0 else let si := shiftl i in caserec (firstl i) si (recl_aux next A case0 caserec si) end. Fixpoint recr_aux (n:nat)(A:Type)(case0:A)(caserec:digits->int31->A->A) (i:int31) : A := match n with | O => case0 | S next => if iszero i then case0 else let si := shiftr i in caserec (firstr i) si (recr_aux next A case0 caserec si) end. Definition recl := recl_aux size. Definition recr := recr_aux size. (** * Conversions *) (** From int31 to Z, we simply iterates [Z.double] or [Z.succ_double]. *) Definition phi : int31 -> Z := recr Z (0%Z) (fun b _ => match b with D0 => Z.double | D1 => Z.succ_double end). (** From positive to int31. An abstract definition could be : [ phi_inv (2n) = 2*(phi_inv n) /\ phi_inv 2n+1 = 2*(phi_inv n) + 1 ] *) Fixpoint phi_inv_positive p := match p with | xI q => twice_plus_one (phi_inv_positive q) | xO q => twice (phi_inv_positive q) | xH => In end. (** The negative part : 2-complement *) Fixpoint complement_negative p := match p with | xI q => twice (complement_negative q) | xO q => twice_plus_one (complement_negative q) | xH => twice Tn end. (** A simple incrementation function *) Definition incr : int31 -> int31 := recr int31 In (fun b si rec => match b with | D0 => sneakl D1 si | D1 => sneakl D0 rec end). (** We can now define the conversion from Z to int31. *) Definition phi_inv : Z -> int31 := fun n => match n with | Z0 => On | Zpos p => phi_inv_positive p | Zneg p => incr (complement_negative p) end. (** [phi_inv2] is similar to [phi_inv] but returns a double word [zn2z int31] *) Definition phi_inv2 n := match n with | Z0 => W0 | _ => WW (phi_inv (n/base)%Z) (phi_inv n) end. (** [phi2] is similar to [phi] but takes a double word (two args) *) Definition phi2 nh nl := ((phi nh)*base+(phi nl))%Z. (** * Addition *) (** Addition modulo [2^31] *) Definition add31 (n m : int31) := phi_inv ((phi n)+(phi m)). Notation "n + m" := (add31 n m) : int31_scope. (** Addition with carry (the result is thus exact) *) (* spiwack : when executed in non-compiled*) (* mode, (phi n)+(phi m) is computed twice*) (* it may be considered to optimize it *) Definition add31c (n m : int31) := let npm := n+m in match (phi npm ?= (phi n)+(phi m))%Z with | Eq => C0 npm | _ => C1 npm end. Notation "n '+c' m" := (add31c n m) (at level 50, no associativity) : int31_scope. (** Addition plus one with carry (the result is thus exact) *) Definition add31carryc (n m : int31) := let npmpone_exact := ((phi n)+(phi m)+1)%Z in let npmpone := phi_inv npmpone_exact in match (phi npmpone ?= npmpone_exact)%Z with | Eq => C0 npmpone | _ => C1 npmpone end. (** * Substraction *) (** Subtraction modulo [2^31] *) Definition sub31 (n m : int31) := phi_inv ((phi n)-(phi m)). Notation "n - m" := (sub31 n m) : int31_scope. (** Subtraction with carry (thus exact) *) Definition sub31c (n m : int31) := let nmm := n-m in match (phi nmm ?= (phi n)-(phi m))%Z with | Eq => C0 nmm | _ => C1 nmm end. Notation "n '-c' m" := (sub31c n m) (at level 50, no associativity) : int31_scope. (** subtraction minus one with carry (thus exact) *) Definition sub31carryc (n m : int31) := let nmmmone_exact := ((phi n)-(phi m)-1)%Z in let nmmmone := phi_inv nmmmone_exact in match (phi nmmmone ?= nmmmone_exact)%Z with | Eq => C0 nmmmone | _ => C1 nmmmone end. (** Opposite *) Definition opp31 x := On - x. Notation "- x" := (opp31 x) : int31_scope. (** Multiplication *) (** multiplication modulo [2^31] *) Definition mul31 (n m : int31) := phi_inv ((phi n)*(phi m)). Notation "n * m" := (mul31 n m) : int31_scope. (** multiplication with double word result (thus exact) *) Definition mul31c (n m : int31) := phi_inv2 ((phi n)*(phi m)). Notation "n '*c' m" := (mul31c n m) (at level 40, no associativity) : int31_scope. (** * Division *) (** Division of a double size word modulo [2^31] *) Definition div3121 (nh nl m : int31) := let (q,r) := Z.div_eucl (phi2 nh nl) (phi m) in (phi_inv q, phi_inv r). (** Division modulo [2^31] *) Definition div31 (n m : int31) := let (q,r) := Z.div_eucl (phi n) (phi m) in (phi_inv q, phi_inv r). Notation "n / m" := (div31 n m) : int31_scope. (** * Unsigned comparison *) Definition compare31 (n m : int31) := ((phi n)?=(phi m))%Z. Notation "n ?= m" := (compare31 n m) (at level 70, no associativity) : int31_scope. Definition eqb31 (n m : int31) := match n ?= m with Eq => true | _ => false end. (** Computing the [i]-th iterate of a function: [iter_int31 i A f = f^i] *) Definition iter_int31 i A f := recr (A->A) (fun x => x) (fun b si rec => match b with | D0 => fun x => rec (rec x) | D1 => fun x => f (rec (rec x)) end) i. (** Combining the [(31-p)] low bits of [i] above the [p] high bits of [j]: [addmuldiv31 p i j = i*2^p+j/2^(31-p)] (modulo [2^31]) *) Definition addmuldiv31 p i j := let (res, _ ) := iter_int31 p (int31*int31) (fun ij => let (i,j) := ij in (sneakl (firstl j) i, shiftl j)) (i,j) in res. Register add31 as int31 plus in "coq_int31" by True. Register add31c as int31 plusc in "coq_int31" by True. Register add31carryc as int31 pluscarryc in "coq_int31" by True. Register sub31 as int31 minus in "coq_int31" by True. Register sub31c as int31 minusc in "coq_int31" by True. Register sub31carryc as int31 minuscarryc in "coq_int31" by True. Register mul31 as int31 times in "coq_int31" by True. Register mul31c as int31 timesc in "coq_int31" by True. Register div3121 as int31 div21 in "coq_int31" by True. Register div31 as int31 div in "coq_int31" by True. Register compare31 as int31 compare in "coq_int31" by True. Register addmuldiv31 as int31 addmuldiv in "coq_int31" by True. Fixpoint euler (guard:nat) (i j:int31) {struct guard} := match guard with | O => In | S p => match j ?= On with | Eq => i | _ => euler p j (let (_, r ) := i/j in r) end end. Definition gcd31 (i j:int31) := euler (2*size)%nat i j. (** Square root functions using newton iteration we use a very naive upper-bound on the iteration 2^31 instead of the usual 31. **) Definition sqrt31_step (rec: int31 -> int31 -> int31) (i j: int31) := Eval lazy delta [Twon] in let (quo,_) := i/j in match quo ?= j with Lt => rec i (fst ((j + quo)/Twon)) | _ => j end. Fixpoint iter31_sqrt (n: nat) (rec: int31 -> int31 -> int31) (i j: int31) {struct n} : int31 := sqrt31_step (match n with O => rec | S n => (iter31_sqrt n (iter31_sqrt n rec)) end) i j. Definition sqrt31 i := Eval lazy delta [On In Twon] in match compare31 In i with Gt => On | Eq => In | Lt => iter31_sqrt 31 (fun i j => j) i (fst (i/Twon)) end. Definition v30 := Eval compute in (addmuldiv31 (phi_inv (Z.of_nat size - 1)) In On). Definition sqrt312_step (rec: int31 -> int31 -> int31 -> int31) (ih il j: int31) := Eval lazy delta [Twon v30] in match ih ?= j with Eq => j | Gt => j | _ => let (quo,_) := div3121 ih il j in match quo ?= j with Lt => let m := match j +c quo with C0 m1 => fst (m1/Twon) | C1 m1 => fst (m1/Twon) + v30 end in rec ih il m | _ => j end end. Fixpoint iter312_sqrt (n: nat) (rec: int31 -> int31 -> int31 -> int31) (ih il j: int31) {struct n} : int31 := sqrt312_step (match n with O => rec | S n => (iter312_sqrt n (iter312_sqrt n rec)) end) ih il j. Definition sqrt312 ih il := Eval lazy delta [On In] in let s := iter312_sqrt 31 (fun ih il j => j) ih il Tn in match s *c s with W0 => (On, C0 On) (* impossible *) | WW ih1 il1 => match il -c il1 with C0 il2 => match ih ?= ih1 with Gt => (s, C1 il2) | _ => (s, C0 il2) end | C1 il2 => match (ih - In) ?= ih1 with (* we could parametrize ih - 1 *) Gt => (s, C1 il2) | _ => (s, C0 il2) end end end. Fixpoint p2i n p : (N*int31)%type := match n with | O => (Npos p, On) | S n => match p with | xO p => let (r,i) := p2i n p in (r, Twon*i) | xI p => let (r,i) := p2i n p in (r, Twon*i+In) | xH => (N0, In) end end. Definition positive_to_int31 (p:positive) := p2i size p. (** Constant 31 converted into type int31. It is used as default answer for numbers of zeros in [head0] and [tail0] *) Definition T31 : int31 := Eval compute in phi_inv (Z.of_nat size). Definition head031 (i:int31) := recl _ (fun _ => T31) (fun b si rec n => match b with | D0 => rec (add31 n In) | D1 => n end) i On. Definition tail031 (i:int31) := recr _ (fun _ => T31) (fun b si rec n => match b with | D0 => rec (add31 n In) | D1 => n end) i On. Register head031 as int31 head0 in "coq_int31" by True. Register tail031 as int31 tail0 in "coq_int31" by True. coq-8.4pl2/theories/Numbers/Cyclic/Int31/Ring31.v0000640000175000001440000000601112010532755020420 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr:true | ?t::?l => match t with | D1 => isInt31cst_lst l | D0 => isInt31cst_lst l | _ => constr:false end | _ => constr:false end. Ltac isInt31cst t := match t with | I31 ?i0 ?i1 ?i2 ?i3 ?i4 ?i5 ?i6 ?i7 ?i8 ?i9 ?i10 ?i11 ?i12 ?i13 ?i14 ?i15 ?i16 ?i17 ?i18 ?i19 ?i20 ?i21 ?i22 ?i23 ?i24 ?i25 ?i26 ?i27 ?i28 ?i29 ?i30 => let l := constr:(i0::i1::i2::i3::i4::i5::i6::i7::i8::i9::i10 ::i11::i12::i13::i14::i15::i16::i17::i18::i19::i20 ::i21::i22::i23::i24::i25::i26::i27::i28::i29::i30::nil) in isInt31cst_lst l | Int31.On => constr:true | Int31.In => constr:true | Int31.Tn => constr:true | Int31.Twon => constr:true | _ => constr:false end. Ltac Int31cst t := match isInt31cst t with | true => constr:t | false => constr:NotConstant end. (** The generic ring structure inferred from the Cyclic structure *) Module Int31ring := CyclicRing Int31Cyclic. (** Unlike in the generic [CyclicRing], we can use Leibniz here. *) Lemma Int31_canonic : forall x y, phi x = phi y -> x = y. Proof. intros x y EQ. now rewrite <- (phi_inv_phi x), <- (phi_inv_phi y), EQ. Qed. Lemma ring_theory_switch_eq : forall A (R R':A->A->Prop) zero one add mul sub opp, (forall x y : A, R x y -> R' x y) -> ring_theory zero one add mul sub opp R -> ring_theory zero one add mul sub opp R'. Proof. intros A R R' zero one add mul sub opp Impl Ring. constructor; intros; apply Impl; apply Ring. Qed. Lemma Int31Ring : ring_theory 0 1 add31 mul31 sub31 opp31 Logic.eq. Proof. exact (ring_theory_switch_eq _ _ _ _ _ _ _ _ _ Int31_canonic Int31ring.CyclicRing). Qed. Lemma eqb31_eq : forall x y, eqb31 x y = true <-> x=y. Proof. unfold eqb31. intros x y. rewrite Cyclic31.spec_compare. case Z.compare_spec. intuition. apply Int31_canonic; auto. intuition; subst; auto with zarith; try discriminate. intuition; subst; auto with zarith; try discriminate. Qed. Lemma eqb31_correct : forall x y, eqb31 x y = true -> x=y. Proof. now apply eqb31_eq. Qed. Add Ring Int31Ring : Int31Ring (decidable eqb31_correct, constants [Int31cst]). Section TestRing. Let test : forall x y, 1 + x*y + x*x + 1 = 1*1 + 1 + y*x + 1*x*x. intros. ring. Qed. End TestRing. coq-8.4pl2/theories/Numbers/Cyclic/Int31/Cyclic31.v0000640000175000001440000021761612010532755020746 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x=0. Proof. destruct x; simpl; intros. repeat match goal with H:(if ?d then _ else _) = true |- _ => destruct d; try discriminate end. reflexivity. Qed. Lemma iszero_not_eq0 : forall x, iszero x = false -> x<>0. Proof. intros x H Eq; rewrite Eq in H; simpl in *; discriminate. Qed. Lemma sneakl_shiftr : forall x, x = sneakl (firstr x) (shiftr x). Proof. destruct x; simpl; auto. Qed. Lemma sneakr_shiftl : forall x, x = sneakr (firstl x) (shiftl x). Proof. destruct x; simpl; auto. Qed. Lemma twice_zero : forall x, twice x = 0 <-> twice_plus_one x = 1. Proof. destruct x; simpl in *; split; intro H; injection H; intros; subst; auto. Qed. Lemma twice_or_twice_plus_one : forall x, x = twice (shiftr x) \/ x = twice_plus_one (shiftr x). Proof. intros; case_eq (firstr x); intros. destruct x; simpl in *; rewrite H; auto. destruct x; simpl in *; rewrite H; auto. Qed. (** * Iterated shift to the right *) Definition nshiftr n x := iter_nat n _ shiftr x. Lemma nshiftr_S : forall n x, nshiftr (S n) x = shiftr (nshiftr n x). Proof. reflexivity. Qed. Lemma nshiftr_S_tail : forall n x, nshiftr (S n) x = nshiftr n (shiftr x). Proof. induction n; simpl; auto. intros; rewrite nshiftr_S, IHn, nshiftr_S; auto. Qed. Lemma nshiftr_n_0 : forall n, nshiftr n 0 = 0. Proof. induction n; simpl; auto. rewrite nshiftr_S, IHn; auto. Qed. Lemma nshiftr_size : forall x, nshiftr size x = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftr_above_size : forall k x, size<=k -> nshiftr k x = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftr_size; auto. simpl; rewrite nshiftr_S, IHn; auto. Qed. (** * Iterated shift to the left *) Definition nshiftl n x := iter_nat n _ shiftl x. Lemma nshiftl_S : forall n x, nshiftl (S n) x = shiftl (nshiftl n x). Proof. reflexivity. Qed. Lemma nshiftl_S_tail : forall n x, nshiftl (S n) x = nshiftl n (shiftl x). Proof. induction n; simpl; auto. intros; rewrite nshiftl_S, IHn, nshiftl_S; auto. Qed. Lemma nshiftl_n_0 : forall n, nshiftl n 0 = 0. Proof. induction n; simpl; auto. rewrite nshiftl_S, IHn; auto. Qed. Lemma nshiftl_size : forall x, nshiftl size x = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftl_above_size : forall k x, size<=k -> nshiftl k x = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftl_size; auto. simpl; rewrite nshiftl_S, IHn; auto. Qed. Lemma firstr_firstl : forall x, firstr x = firstl (nshiftl (pred size) x). Proof. destruct x; simpl; auto. Qed. Lemma firstl_firstr : forall x, firstl x = firstr (nshiftr (pred size) x). Proof. destruct x; simpl; auto. Qed. (** More advanced results about [nshiftr] *) Lemma nshiftr_predsize_0_firstl : forall x, nshiftr (pred size) x = 0 -> firstl x = D0. Proof. destruct x; compute; intros H; injection H; intros; subst; auto. Qed. Lemma nshiftr_0_propagates : forall n p x, n <= p -> nshiftr n x = 0 -> nshiftr p x = 0. Proof. intros. replace p with ((p-n)+n)%nat by omega. induction (p-n)%nat. simpl; auto. simpl; rewrite nshiftr_S; rewrite IHn0; auto. Qed. Lemma nshiftr_0_firstl : forall n x, n < size -> nshiftr n x = 0 -> firstl x = D0. Proof. intros. apply nshiftr_predsize_0_firstl. apply nshiftr_0_propagates with n; auto; omega. Qed. (** * Some induction principles over [int31] *) (** Not used for the moment. Are they really useful ? *) Lemma int31_ind_sneakl : forall P : int31->Prop, P 0 -> (forall x d, P x -> P (sneakl d x)) -> forall x, P x. Proof. intros. assert (forall n, n<=size -> P (nshiftr (size - n) x)). induction n; intros. rewrite nshiftr_size; auto. rewrite sneakl_shiftr. apply H0. change (P (nshiftr (S (size - S n)) x)). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; omega. change x with (nshiftr (size-size) x); auto. Qed. Lemma int31_ind_twice : forall P : int31->Prop, P 0 -> (forall x, P x -> P (twice x)) -> (forall x, P x -> P (twice_plus_one x)) -> forall x, P x. Proof. induction x using int31_ind_sneakl; auto. destruct d; auto. Qed. (** * Some generic results about [recr] *) Section Recr. (** [recr] satisfies the fixpoint equation used for its definition. *) Variable (A:Type)(case0:A)(caserec:digits->int31->A->A). Lemma recr_aux_eqn : forall n x, iszero x = false -> recr_aux (S n) A case0 caserec x = caserec (firstr x) (shiftr x) (recr_aux n A case0 caserec (shiftr x)). Proof. intros; simpl; rewrite H; auto. Qed. Lemma recr_aux_converges : forall n p x, n <= size -> n <= p -> recr_aux n A case0 caserec (nshiftr (size - n) x) = recr_aux p A case0 caserec (nshiftr (size - n) x). Proof. induction n. simpl; intros. rewrite nshiftr_size; destruct p; simpl; auto. intros. destruct p. inversion H0. unfold recr_aux; fold recr_aux. destruct (iszero (nshiftr (size - S n) x)); auto. f_equal. change (shiftr (nshiftr (size - S n) x)) with (nshiftr (S (size - S n)) x). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; auto with arith. Qed. Lemma recr_eqn : forall x, iszero x = false -> recr A case0 caserec x = caserec (firstr x) (shiftr x) (recr A case0 caserec (shiftr x)). Proof. intros. unfold recr. change x with (nshiftr (size - size) x). rewrite (recr_aux_converges size (S size)); auto with arith. rewrite recr_aux_eqn; auto. Qed. (** [recr] is usually equivalent to a variant [recrbis] written without [iszero] check. *) Fixpoint recrbis_aux (n:nat)(A:Type)(case0:A)(caserec:digits->int31->A->A) (i:int31) : A := match n with | O => case0 | S next => let si := shiftr i in caserec (firstr i) si (recrbis_aux next A case0 caserec si) end. Definition recrbis := recrbis_aux size. Hypothesis case0_caserec : caserec D0 0 case0 = case0. Lemma recrbis_aux_equiv : forall n x, recrbis_aux n A case0 caserec x = recr_aux n A case0 caserec x. Proof. induction n; simpl; auto; intros. case_eq (iszero x); intros; [ | f_equal; auto ]. rewrite (iszero_eq0 _ H); simpl; auto. replace (recrbis_aux n A case0 caserec 0) with case0; auto. clear H IHn; induction n; simpl; congruence. Qed. Lemma recrbis_equiv : forall x, recrbis A case0 caserec x = recr A case0 caserec x. Proof. intros; apply recrbis_aux_equiv; auto. Qed. End Recr. (** * Incrementation *) Section Incr. (** Variant of [incr] via [recrbis] *) Let Incr (b : digits) (si rec : int31) := match b with | D0 => sneakl D1 si | D1 => sneakl D0 rec end. Definition incrbis_aux n x := recrbis_aux n _ In Incr x. Lemma incrbis_aux_equiv : forall x, incrbis_aux size x = incr x. Proof. unfold incr, recr, incrbis_aux; fold Incr; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [incr] *) Lemma incr_eqn1 : forall x, firstr x = D0 -> incr x = twice_plus_one (shiftr x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_eqn2 : forall x, firstr x = D1 -> incr x = twice (incr (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_twice : forall x, incr (twice x) = twice_plus_one x. Proof. intros. rewrite incr_eqn1; destruct x; simpl; auto. Qed. Lemma incr_twice_plus_one_firstl : forall x, firstl x = D0 -> incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. (** The previous result is actually true even without the constraint on [firstl], but this is harder to prove (see later). *) End Incr. (** * Conversion to [Z] : the [phi] function *) Section Phi. (** Variant of [phi] via [recrbis] *) Let Phi := fun b (_:int31) => match b with D0 => Z.double | D1 => Z.succ_double end. Definition phibis_aux n x := recrbis_aux n _ Z0 Phi x. Lemma phibis_aux_equiv : forall x, phibis_aux size x = phi x. Proof. unfold phi, recr, phibis_aux; fold Phi; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [phi] *) Lemma phi_eqn1 : forall x, firstr x = D0 -> phi x = Z.double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_eqn2 : forall x, firstr x = D1 -> phi x = Z.succ_double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_twice_firstl : forall x, firstl x = D0 -> phi (twice x) = Z.double (phi x). Proof. intros. rewrite phi_eqn1; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. Lemma phi_twice_plus_one_firstl : forall x, firstl x = D0 -> phi (twice_plus_one x) = Z.succ_double (phi x). Proof. intros. rewrite phi_eqn2; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. End Phi. (** [phi x] is positive and lower than [2^31] *) Lemma phibis_aux_pos : forall n x, (0 <= phibis_aux n x)%Z. Proof. induction n. simpl; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr x)). destruct (firstr x). specialize IHn with (shiftr x); rewrite Z.double_spec; omega. specialize IHn with (shiftr x); rewrite Z.succ_double_spec; omega. Qed. Lemma phibis_aux_bounded : forall n x, n <= size -> (phibis_aux n (nshiftr (size-n) x) < 2 ^ (Z.of_nat n))%Z. Proof. induction n. simpl; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr (size - S n) x))). assert (shiftr (nshiftr (size - S n) x) = nshiftr (size-n) x). replace (size - n)%nat with (S (size - (S n))) by omega. simpl; auto. rewrite H0. assert (H1 : n <= size) by omega. specialize (IHn x H1). set (y:=phibis_aux n (nshiftr (size - n) x)) in *. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. case_eq (firstr (nshiftr (size - S n) x)); intros. rewrite Z.double_spec; auto with zarith. rewrite Z.succ_double_spec; auto with zarith. Qed. Lemma phi_bounded : forall x, (0 <= phi x < 2 ^ (Z.of_nat size))%Z. Proof. intros. rewrite <- phibis_aux_equiv. split. apply phibis_aux_pos. change x with (nshiftr (size-size) x). apply phibis_aux_bounded; auto. Qed. Lemma phibis_aux_lowerbound : forall n x, firstr (nshiftr n x) = D1 -> (2 ^ Z.of_nat n <= phibis_aux (S n) x)%Z. Proof. induction n. intros. unfold nshiftr in H; simpl in *. unfold phibis_aux, recrbis_aux. rewrite H, Z.succ_double_spec; omega. intros. remember (S n) as m. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux m (shiftr x)). subst m. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. assert (2^(Z.of_nat n) <= phibis_aux (S n) (shiftr x))%Z. apply IHn. rewrite <- nshiftr_S_tail; auto. destruct (firstr x). change (Z.double (phibis_aux (S n) (shiftr x))) with (2*(phibis_aux (S n) (shiftr x)))%Z. omega. rewrite Z.succ_double_spec; omega. Qed. Lemma phi_lowerbound : forall x, firstl x = D1 -> (2^(Z.of_nat (pred size)) <= phi x)%Z. Proof. intros. generalize (phibis_aux_lowerbound (pred size) x). rewrite <- firstl_firstr. change (S (pred size)) with size; auto. rewrite phibis_aux_equiv; auto. Qed. (** * Equivalence modulo [2^n] *) Section EqShiftL. (** After killing [n] bits at the left, are the numbers equal ?*) Definition EqShiftL n x y := nshiftl n x = nshiftl n y. Lemma EqShiftL_zero : forall x y, EqShiftL O x y <-> x = y. Proof. unfold EqShiftL; intros; unfold nshiftl; simpl; split; auto. Qed. Lemma EqShiftL_size : forall k x y, size<=k -> EqShiftL k x y. Proof. red; intros; rewrite 2 nshiftl_above_size; auto. Qed. Lemma EqShiftL_le : forall k k' x y, k <= k' -> EqShiftL k x y -> EqShiftL k' x y. Proof. unfold EqShiftL; intros. replace k' with ((k'-k)+k)%nat by omega. remember (k'-k)%nat as n. clear Heqn H k'. induction n; simpl; auto. rewrite 2 nshiftl_S; f_equal; auto. Qed. Lemma EqShiftL_firstr : forall k x y, k < size -> EqShiftL k x y -> firstr x = firstr y. Proof. intros. rewrite 2 firstr_firstl. f_equal. apply EqShiftL_le with k; auto. unfold size. auto with arith. Qed. Lemma EqShiftL_twice : forall k x y, EqShiftL k (twice x) (twice y) <-> EqShiftL (S k) x y. Proof. intros; unfold EqShiftL. rewrite 2 nshiftl_S_tail; split; auto. Qed. (** * From int31 to list of digits. *) (** Lower (=rightmost) bits comes first. *) Definition i2l := recrbis _ nil (fun d _ rec => d::rec). Lemma i2l_length : forall x, length (i2l x) = size. Proof. intros; reflexivity. Qed. Fixpoint lshiftl l x := match l with | nil => x | d::l => sneakl d (lshiftl l x) end. Definition l2i l := lshiftl l On. Lemma l2i_i2l : forall x, l2i (i2l x) = x. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakr : forall x d, i2l (sneakr d x) = tail (i2l x) ++ d::nil. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakl : forall x d, i2l (sneakl d x) = d :: removelast (i2l x). Proof. destruct x; compute; auto. Qed. Lemma i2l_l2i : forall l, length l = size -> i2l (l2i l) = l. Proof. repeat (destruct l as [ |? l]; [intros; discriminate | ]). destruct l; [ | intros; discriminate]. intros _; compute; auto. Qed. Fixpoint cstlist (A:Type)(a:A) n := match n with | O => nil | S n => a::cstlist _ a n end. Lemma i2l_nshiftl : forall n x, n<=size -> i2l (nshiftl n x) = cstlist _ D0 n ++ firstn (size-n) (i2l x). Proof. induction n. intros. assert (firstn (size-0) (i2l x) = i2l x). rewrite <- minus_n_O, <- (i2l_length x). induction (i2l x); simpl; f_equal; auto. rewrite H0; clear H0. reflexivity. intros. rewrite nshiftl_S. unfold shiftl; rewrite i2l_sneakl. simpl cstlist. rewrite <- app_comm_cons; f_equal. rewrite IHn; [ | omega]. rewrite removelast_app. f_equal. replace (size-n)%nat with (S (size - S n))%nat by omega. rewrite removelast_firstn; auto. rewrite i2l_length; omega. generalize (firstn_length (size-n) (i2l x)). rewrite i2l_length. intros H0 H1; rewrite H1 in H0. rewrite min_l in H0 by omega. simpl length in H0. omega. Qed. (** [i2l] can be used to define a relation equivalent to [EqShiftL] *) Lemma EqShiftL_i2l : forall k x y, EqShiftL k x y <-> firstn (size-k) (i2l x) = firstn (size-k) (i2l y). Proof. intros. destruct (le_lt_dec size k). split; intros. replace (size-k)%nat with O by omega. unfold firstn; auto. apply EqShiftL_size; auto. unfold EqShiftL. assert (k <= size) by omega. split; intros. assert (i2l (nshiftl k x) = i2l (nshiftl k y)) by (f_equal; auto). rewrite 2 i2l_nshiftl in H1; auto. eapply app_inv_head; eauto. assert (i2l (nshiftl k x) = i2l (nshiftl k y)). rewrite 2 i2l_nshiftl; auto. f_equal; auto. rewrite <- (l2i_i2l (nshiftl k x)), <- (l2i_i2l (nshiftl k y)). f_equal; auto. Qed. (** This equivalence allows to prove easily the following delicate result *) Lemma EqShiftL_twice_plus_one : forall k x y, EqShiftL k (twice_plus_one x) (twice_plus_one y) <-> EqShiftL (S k) x y. Proof. intros. destruct (le_lt_dec size k). split; intros; apply EqShiftL_size; auto. rewrite 2 EqShiftL_i2l. unfold twice_plus_one. rewrite 2 i2l_sneakl. replace (size-k)%nat with (S (size - S k))%nat by omega. remember (size - S k)%nat as n. remember (i2l x) as lx. remember (i2l y) as ly. simpl. rewrite 2 firstn_removelast. split; intros. injection H; auto. f_equal; auto. subst ly n; rewrite i2l_length; omega. subst lx n; rewrite i2l_length; omega. Qed. Lemma EqShiftL_shiftr : forall k x y, EqShiftL k x y -> EqShiftL (S k) (shiftr x) (shiftr y). Proof. intros. destruct (le_lt_dec size (S k)). apply EqShiftL_size; auto. case_eq (firstr x); intros. rewrite <- EqShiftL_twice. unfold twice; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. rewrite <- EqShiftL_twice_plus_one. unfold twice_plus_one; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. Qed. Lemma EqShiftL_incrbis : forall n k x y, n<=size -> (n+k=S size)%nat -> EqShiftL k x y -> EqShiftL k (incrbis_aux n x) (incrbis_aux n y). Proof. induction n; simpl; intros. red; auto. destruct (eq_nat_dec k size). subst k; apply EqShiftL_size; auto. unfold incrbis_aux; simpl; fold (incrbis_aux n (shiftr x)); fold (incrbis_aux n (shiftr y)). rewrite (EqShiftL_firstr k x y); auto; try omega. case_eq (firstr y); intros. rewrite EqShiftL_twice_plus_one. apply EqShiftL_shiftr; auto. rewrite EqShiftL_twice. apply IHn; try omega. apply EqShiftL_shiftr; auto. Qed. Lemma EqShiftL_incr : forall x y, EqShiftL 1 x y -> EqShiftL 1 (incr x) (incr y). Proof. intros. rewrite <- 2 incrbis_aux_equiv. apply EqShiftL_incrbis; auto. Qed. End EqShiftL. (** * More equations about [incr] *) Lemma incr_twice_plus_one : forall x, incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto]. apply EqShiftL_incr. red; destruct x; simpl; auto. Qed. Lemma incr_firstr : forall x, firstr (incr x) <> firstr x. Proof. intros. case_eq (firstr x); intros. rewrite incr_eqn1; auto. destruct (shiftr x); simpl; discriminate. rewrite incr_eqn2; auto. destruct (incr (shiftr x)); simpl; discriminate. Qed. Lemma incr_inv : forall x y, incr x = twice_plus_one y -> x = twice y. Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in *; simpl in *. change (incr 0) with 1 in H. symmetry; rewrite twice_zero; auto. case_eq (firstr x); intros. rewrite incr_eqn1 in H; auto. clear H0; destruct x; destruct y; simpl in *. injection H; intros; subst; auto. elim (incr_firstr x). rewrite H1, H; destruct y; simpl; auto. Qed. (** * Conversion from [Z] : the [phi_inv] function *) (** First, recursive equations *) Lemma phi_inv_double_plus_one : forall z, phi_inv (Z.succ_double z) = twice_plus_one (phi_inv z). Proof. destruct z; simpl; auto. induction p; simpl. rewrite 2 incr_twice; auto. rewrite incr_twice, incr_twice_plus_one. f_equal. apply incr_inv; auto. auto. Qed. Lemma phi_inv_double : forall z, phi_inv (Z.double z) = twice (phi_inv z). Proof. destruct z; simpl; auto. rewrite incr_twice_plus_one; auto. Qed. Lemma phi_inv_incr : forall z, phi_inv (Z.succ z) = incr (phi_inv z). Proof. destruct z. simpl; auto. simpl; auto. induction p; simpl; auto. rewrite <- Pos.add_1_r, IHp, incr_twice_plus_one; auto. rewrite incr_twice; auto. simpl; auto. destruct p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. induction p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. Qed. (** [phi_inv o inv], the always-exact and easy-to-prove trip : from int31 to Z and then back to int31. *) Lemma phi_inv_phi_aux : forall n x, n <= size -> phi_inv (phibis_aux n (nshiftr (size-n) x)) = nshiftr (size-n) x. Proof. induction n. intros; simpl. rewrite nshiftr_size; auto. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr (size-S n) x))). assert (shiftr (nshiftr (size - S n) x) = nshiftr (size-n) x). replace (size - n)%nat with (S (size - (S n))); auto; omega. rewrite H0. case_eq (firstr (nshiftr (size - S n) x)); intros. rewrite phi_inv_double. rewrite IHn by omega. rewrite <- H0. remember (nshiftr (size - S n) x) as y. destruct y; simpl in H1; rewrite H1; auto. rewrite phi_inv_double_plus_one. rewrite IHn by omega. rewrite <- H0. remember (nshiftr (size - S n) x) as y. destruct y; simpl in H1; rewrite H1; auto. Qed. Lemma phi_inv_phi : forall x, phi_inv (phi x) = x. Proof. intros. rewrite <- phibis_aux_equiv. replace x with (nshiftr (size - size) x) by auto. apply phi_inv_phi_aux; auto. Qed. (** The other composition [phi o phi_inv] is harder to prove correct. In particular, an overflow can happen, so a modulo is needed. For the moment, we proceed via several steps, the first one being a detour to [positive_to_in31]. *) (** * [positive_to_int31] *) (** A variant of [p2i] with [twice] and [twice_plus_one] instead of [2*i] and [2*i+1] *) Fixpoint p2ibis n p : (N*int31)%type := match n with | O => (Npos p, On) | S n => match p with | xO p => let (r,i) := p2ibis n p in (r, twice i) | xI p => let (r,i) := p2ibis n p in (r, twice_plus_one i) | xH => (N0, In) end end. Lemma p2ibis_bounded : forall n p, nshiftr n (snd (p2ibis n p)) = 0. Proof. induction n. simpl; intros; auto. simpl; intros. destruct p; simpl. specialize IHn with p. destruct (p2ibis n p); simpl in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n). rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ l IHn). replace (shiftr (twice_plus_one i)) with i; auto. destruct i; simpl in *; rewrite H; auto. specialize IHn with p. destruct (p2ibis n p); simpl in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n). rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ l IHn). replace (shiftr (twice i)) with i; auto. destruct i; simpl in *; rewrite H; auto. rewrite nshiftr_S_tail; auto. replace (shiftr In) with 0; auto. apply nshiftr_n_0. Qed. Local Open Scope Z_scope. Lemma p2ibis_spec : forall n p, (n<=size)%nat -> Zpos p = (Z.of_N (fst (p2ibis n p)))*2^(Z.of_nat n) + phi (snd (p2ibis n p)). Proof. induction n; intros. simpl; rewrite Pos.mul_1_r; auto. replace (2^(Z.of_nat (S n)))%Z with (2*2^(Z.of_nat n))%Z by (rewrite <- Z.pow_succ_r, <- Zpos_P_of_succ_nat; auto with zarith). rewrite (Z.mul_comm 2). assert (n<=size)%nat by omega. destruct p; simpl; [ | | auto]; specialize (IHn p H0); generalize (p2ibis_bounded n p); destruct (p2ibis n p) as (r,i); simpl in *; intros. change (Zpos p~1) with (2*Zpos p + 1)%Z. rewrite phi_twice_plus_one_firstl, Z.succ_double_spec. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. change (Zpos p~0) with (2*Zpos p)%Z. rewrite phi_twice_firstl. change (Z.double (phi i)) with (2*(phi i))%Z. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. Qed. (** We now prove that this [p2ibis] is related to [phi_inv_positive] *) Lemma phi_inv_positive_p2ibis : forall n p, (n<=size)%nat -> EqShiftL (size-n) (phi_inv_positive p) (snd (p2ibis n p)). Proof. induction n. intros. apply EqShiftL_size; auto. intros. simpl p2ibis; destruct p; [ | | red; auto]; specialize IHn with p; destruct (p2ibis n p); simpl snd in *; simpl phi_inv_positive; rewrite ?EqShiftL_twice_plus_one, ?EqShiftL_twice; replace (S (size - S n))%nat with (size - n)%nat by omega; apply IHn; omega. Qed. (** This gives the expected result about [phi o phi_inv], at least for the positive case. *) Lemma phi_phi_inv_positive : forall p, phi (phi_inv_positive p) = (Zpos p) mod (2^(Z.of_nat size)). Proof. intros. replace (phi_inv_positive p) with (snd (p2ibis size p)). rewrite (p2ibis_spec size p) by auto. rewrite Z.add_comm, Z_mod_plus. symmetry; apply Zmod_small. apply phi_bounded. auto with zarith. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size p); auto. Qed. (** Moreover, [p2ibis] is also related with [p2i] and hence with [positive_to_int31]. *) Lemma double_twice_firstl : forall x, firstl x = D0 -> (Twon*x = twice x)%int31. Proof. intros. unfold mul31. rewrite <- Z.double_spec, <- phi_twice_firstl, phi_inv_phi; auto. Qed. Lemma double_twice_plus_one_firstl : forall x, firstl x = D0 -> (Twon*x+In = twice_plus_one x)%int31. Proof. intros. rewrite double_twice_firstl; auto. unfold add31. rewrite phi_twice_firstl, <- Z.succ_double_spec, <- phi_twice_plus_one_firstl, phi_inv_phi; auto. Qed. Lemma p2i_p2ibis : forall n p, (n<=size)%nat -> p2i n p = p2ibis n p. Proof. induction n; simpl; auto; intros. destruct p; auto; specialize IHn with p; generalize (p2ibis_bounded n p); rewrite IHn; try omega; destruct (p2ibis n p); simpl; intros; f_equal; auto. apply double_twice_plus_one_firstl. apply (nshiftr_0_firstl n); auto; omega. apply double_twice_firstl. apply (nshiftr_0_firstl n); auto; omega. Qed. Lemma positive_to_int31_phi_inv_positive : forall p, snd (positive_to_int31 p) = phi_inv_positive p. Proof. intros; unfold positive_to_int31. rewrite p2i_p2ibis; auto. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size); auto. Qed. Lemma positive_to_int31_spec : forall p, Zpos p = (Z.of_N (fst (positive_to_int31 p)))*2^(Z.of_nat size) + phi (snd (positive_to_int31 p)). Proof. unfold positive_to_int31. intros; rewrite p2i_p2ibis; auto. apply p2ibis_spec; auto. Qed. (** Thanks to the result about [phi o phi_inv_positive], we can now establish easily the most general results about [phi o twice] and so one. *) Lemma phi_twice : forall x, phi (twice x) = (Z.double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double. assert (0 <= Z.double (phi x)). rewrite Z.double_spec; generalize (phi_bounded x); omega. destruct (Z.double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_twice_plus_one : forall x, phi (twice_plus_one x) = (Z.succ_double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double_plus_one. assert (0 <= Z.succ_double (phi x)). rewrite Z.succ_double_spec; generalize (phi_bounded x); omega. destruct (Z.succ_double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_incr : forall x, phi (incr x) = (Z.succ (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_incr. assert (0 <= Z.succ (phi x)). change (Z.succ (phi x)) with ((phi x)+1)%Z; generalize (phi_bounded x); omega. destruct (Z.succ (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. (** With the previous results, we can deal with [phi o phi_inv] even in the negative case *) Lemma phi_phi_inv_negative : forall p, phi (incr (complement_negative p)) = (Zneg p) mod 2^(Z.of_nat size). Proof. induction p. simpl complement_negative. rewrite phi_incr in IHp. rewrite incr_twice, phi_twice_plus_one. remember (phi (complement_negative p)) as q. rewrite Z.succ_double_spec. replace (2*q+1) with (2*(Z.succ q)-1) by omega. rewrite <- Zminus_mod_idemp_l, <- Zmult_mod_idemp_r, IHp. rewrite Zmult_mod_idemp_r, Zminus_mod_idemp_l; auto with zarith. simpl complement_negative. rewrite incr_twice_plus_one, phi_twice. remember (phi (incr (complement_negative p))) as q. rewrite Z.double_spec, IHp, Zmult_mod_idemp_r; auto with zarith. simpl; auto. Qed. Lemma phi_phi_inv : forall z, phi (phi_inv z) = z mod 2 ^ (Z.of_nat size). Proof. destruct z. simpl; auto. apply phi_phi_inv_positive. apply phi_phi_inv_negative. Qed. End Basics. Instance int31_ops : ZnZ.Ops int31 := { digits := 31%positive; (* number of digits *) zdigits := 31; (* number of digits *) to_Z := phi; (* conversion to Z *) of_pos := positive_to_int31; (* positive -> N*int31 : p => N,i where p = N*2^31+phi i *) head0 := head031; (* number of head 0 *) tail0 := tail031; (* number of tail 0 *) zero := 0; one := 1; minus_one := Tn; (* 2^31 - 1 *) compare := compare31; eq0 := fun i => match i ?= 0 with Eq => true | _ => false end; opp_c := fun i => 0 -c i; opp := opp31; opp_carry := fun i => 0-i-1; succ_c := fun i => i +c 1; add_c := add31c; add_carry_c := add31carryc; succ := fun i => i + 1; add := add31; add_carry := fun i j => i + j + 1; pred_c := fun i => i -c 1; sub_c := sub31c; sub_carry_c := sub31carryc; pred := fun i => i - 1; sub := sub31; sub_carry := fun i j => i - j - 1; mul_c := mul31c; mul := mul31; square_c := fun x => x *c x; div21 := div3121; div_gt := div31; (* this is supposed to be the special case of division a/b where a > b *) div := div31; modulo_gt := fun i j => let (_,r) := i/j in r; modulo := fun i j => let (_,r) := i/j in r; gcd_gt := gcd31; gcd := gcd31; add_mul_div := addmuldiv31; pos_mod := (* modulo 2^p *) fun p i => match p ?= 31 with | Lt => addmuldiv31 p 0 (addmuldiv31 (31-p) i 0) | _ => i end; is_even := fun i => let (_,r) := i/2 in match r ?= 0 with Eq => true | _ => false end; sqrt2 := sqrt312; sqrt := sqrt31 }. Section Int31_Specs. Local Open Scope Z_scope. Notation "[| x |]" := (phi x) (at level 0, x at level 99). Local Notation wB := (2 ^ (Z.of_nat size)). Lemma wB_pos : wB > 0. Proof. auto with zarith. Qed. Notation "[+| c |]" := (interp_carry 1 wB phi c) (at level 0, x at level 99). Notation "[-| c |]" := (interp_carry (-1) wB phi c) (at level 0, x at level 99). Notation "[|| x ||]" := (zn2z_to_Z wB phi x) (at level 0, x at level 99). Lemma spec_zdigits : [| 31 |] = 31. Proof. reflexivity. Qed. Lemma spec_more_than_1_digit: 1 < 31. Proof. auto with zarith. Qed. Lemma spec_0 : [| 0 |] = 0. Proof. reflexivity. Qed. Lemma spec_1 : [| 1 |] = 1. Proof. reflexivity. Qed. Lemma spec_m1 : [| Tn |] = wB - 1. Proof. reflexivity. Qed. Lemma spec_compare : forall x y, (x ?= y)%int31 = ([|x|] ?= [|y|]). Proof. reflexivity. Qed. (** Addition *) Lemma spec_add_c : forall x y, [+|add31c x y|] = [|x|] + [|y|]. Proof. intros; unfold add31c, add31, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y) mod wB ?= X+Y <> Eq -> [+|C1 (phi_inv (X+Y))|] = X+Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y) mod wB) (X+Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_succ_c : forall x, [+|add31c x 1|] = [|x|] + 1. Proof. intros; apply spec_add_c. Qed. Lemma spec_add_carry_c : forall x y, [+|add31carryc x y|] = [|x|] + [|y|] + 1. Proof. intros. unfold add31carryc, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y+1) mod wB ?= X+Y+1 <> Eq -> [+|C1 (phi_inv (X+Y+1))|] = X+Y+1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y+1) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y+1) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y+1) mod wB) (X+Y+1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_add : forall x y, [|x+y|] = ([|x|] + [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_add_carry : forall x y, [|x+y+1|] = ([|x|] + [|y|] + 1) mod wB. Proof. unfold add31; intros. repeat rewrite phi_phi_inv. apply Zplus_mod_idemp_l. Qed. Lemma spec_succ : forall x, [|x+1|] = ([|x|] + 1) mod wB. Proof. intros; rewrite <- spec_1; apply spec_add. Qed. (** Substraction *) Lemma spec_sub_c : forall x y, [-|sub31c x y|] = [|x|] - [|y|]. Proof. unfold sub31c, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y) mod wB ?= X-Y <> Eq -> [-|C1 (phi_inv (X-Y))|] = X-Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y) 0). rewrite <- (Z_mod_plus_full (X-Y) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y) mod wB) (X-Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub_carry_c : forall x y, [-|sub31carryc x y|] = [|x|] - [|y|] - 1. Proof. unfold sub31carryc, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y-1) mod wB ?= X-Y-1 <> Eq -> [-|C1 (phi_inv (X-Y-1))|] = X-Y-1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y-1) 0). rewrite <- (Z_mod_plus_full (X-Y-1) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y-1) mod wB) (X-Y-1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub : forall x y, [|x-y|] = ([|x|] - [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_sub_carry : forall x y, [|x-y-1|] = ([|x|] - [|y|] - 1) mod wB. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. apply Zminus_mod_idemp_l. Qed. Lemma spec_opp_c : forall x, [-|sub31c 0 x|] = -[|x|]. Proof. intros; apply spec_sub_c. Qed. Lemma spec_opp : forall x, [|0 - x|] = (-[|x|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_opp_carry : forall x, [|0 - x - 1|] = wB - [|x|] - 1. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. change [|1|] with 1; change [|0|] with 0. rewrite <- (Z_mod_plus_full (0-[|x|]) 1 wB). rewrite Zminus_mod_idemp_l. rewrite Zmod_small; generalize (phi_bounded x); romega. Qed. Lemma spec_pred_c : forall x, [-|sub31c x 1|] = [|x|] - 1. Proof. intros; apply spec_sub_c. Qed. Lemma spec_pred : forall x, [|x-1|] = ([|x|] - 1) mod wB. Proof. intros; apply spec_sub. Qed. (** Multiplication *) Lemma phi2_phi_inv2 : forall x, [||phi_inv2 x||] = x mod (wB^2). Proof. assert (forall z, (z / wB) mod wB * wB + z mod wB = z mod wB ^ 2). intros. assert ((z/wB) mod wB = z/wB - (z/wB/wB)*wB). rewrite (Z_div_mod_eq (z/wB) wB wB_pos) at 2; ring. assert (z mod wB = z - (z/wB)*wB). rewrite (Z_div_mod_eq z wB wB_pos) at 2; ring. rewrite H. rewrite H0 at 1. ring_simplify. rewrite Zdiv_Zdiv; auto with zarith. rewrite (Z_div_mod_eq z (wB*wB)) at 2; auto with zarith. change (wB*wB) with (wB^2); ring. unfold phi_inv2. destruct x; unfold zn2z_to_Z; rewrite ?phi_phi_inv; change base with wB; auto. Qed. Lemma spec_mul_c : forall x y, [|| mul31c x y ||] = [|x|] * [|y|]. Proof. unfold mul31c; intros. rewrite phi2_phi_inv2. apply Zmod_small. generalize (phi_bounded x)(phi_bounded y); intros. change (wB^2) with (wB * wB). auto using Z.mul_lt_mono_nonneg with zarith. Qed. Lemma spec_mul : forall x y, [|x*y|] = ([|x|] * [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_square_c : forall x, [|| mul31c x x ||] = [|x|] * [|x|]. Proof. intros; apply spec_mul_c. Qed. (** Division *) Lemma spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div3121 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div3121; intros. generalize (phi_bounded a1)(phi_bounded a2)(phi_bounded b); intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod (phi2 a1 a2) [|b|] H4) (Z_div_pos (phi2 a1 a2) [|b|] H4). unfold Z.div; destruct (Z.div_eucl (phi2 a1 a2) [|b|]); simpl. rewrite ?phi_phi_inv. destruct 1; intros. unfold phi2 in *. change base with wB; change base with wB in H5. change (Z.pow_pos 2 31) with wB; change (Z.pow_pos 2 31) with wB in H. rewrite H5, Z.mul_comm. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split. apply H7; change base with wB; auto with zarith. apply Z.mul_lt_mono_pos_r with [|b|]; [omega| ]. rewrite Z.mul_comm. apply Z.le_lt_trans with ([|b|]*z+z0); [omega| ]. rewrite <- H5. apply Z.le_lt_trans with ([|a1|]*wB+(wB-1)); [omega | ]. replace ([|a1|]*wB+(wB-1)) with (wB*([|a1|]+1)-1) by ring. assert (wB*([|a1|]+1) <= wB*[|b|]); try omega. apply Z.mul_le_mono_nonneg; omega. Qed. Lemma spec_div : forall a b, 0 < [|b|] -> let (q,r) := div31 a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod [|a|] [|b|] H0) (Z_div_pos [|a|] [|b|] H0). unfold Z.div; destruct (Z.div_eucl [|a|] [|b|]); simpl. rewrite ?phi_phi_inv. destruct 1; intros. rewrite H1, Z.mul_comm. generalize (phi_bounded a)(phi_bounded b); intros. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split; auto with zarith. apply Z.le_lt_trans with [|a|]; auto with zarith. rewrite H1. apply Z.le_trans with ([|b|]*z); try omega. rewrite <- (Z.mul_1_l z) at 1. apply Z.mul_le_mono_nonneg; auto with zarith. Qed. Lemma spec_mod : forall a b, 0 < [|b|] -> [|let (_,r) := (a/b)%int31 in r|] = [|a|] mod [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). unfold Z.modulo. generalize (Z_div_mod [|a|] [|b|] H0). destruct (Z.div_eucl [|a|] [|b|]); simpl. rewrite ?phi_phi_inv. destruct 1; intros. generalize (phi_bounded b); intros. apply Zmod_small; omega. Qed. Lemma phi_gcd : forall i j, [|gcd31 i j|] = Zgcdn (2*size) [|j|] [|i|]. Proof. unfold gcd31. induction (2*size)%nat; intros. reflexivity. simpl. unfold compare31. change [|On|] with 0. generalize (phi_bounded j)(phi_bounded i); intros. case_eq [|j|]; intros. simpl; intros. generalize (Zabs_spec [|i|]); omega. simpl. rewrite IHn, H1; f_equal. rewrite spec_mod, H1; auto. rewrite H1; compute; auto. rewrite H1 in H; destruct H as [H _]; compute in H; elim H; auto. Qed. Lemma spec_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd31 a b|]. Proof. intros. rewrite phi_gcd. apply Zis_gcd_sym. apply Zgcdn_is_gcd. unfold Zgcd_bound. generalize (phi_bounded b). destruct [|b|]. unfold size; auto with zarith. intros (_,H). cut (Pos.size_nat p <= size)%nat; [ omega | rewrite <- Zpower2_Psize; auto]. intros (H,_); compute in H; elim H; auto. Qed. Lemma iter_int31_iter_nat : forall A f i a, iter_int31 i A f a = iter_nat (Z.abs_nat [|i|]) A f a. Proof. intros. unfold iter_int31. rewrite <- recrbis_equiv; auto; unfold recrbis. rewrite <- phibis_aux_equiv. revert i a; induction size. simpl; auto. simpl; intros. case_eq (firstr i); intros H; rewrite 2 IHn; unfold phibis_aux; simpl; rewrite H; fold (phibis_aux n (shiftr i)); generalize (phibis_aux_pos n (shiftr i)); intros; set (z := phibis_aux n (shiftr i)) in *; clearbody z; rewrite <- iter_nat_plus. f_equal. rewrite Z.double_spec, <- Z.add_diag. symmetry; apply Zabs2Nat.inj_add; auto with zarith. change (iter_nat (S (Z.abs_nat z + Z.abs_nat z)) A f a = iter_nat (Z.abs_nat (Z.succ_double z)) A f a); f_equal. rewrite Z.succ_double_spec, <- Z.add_diag. rewrite Zabs2Nat.inj_add; auto with zarith. rewrite Zabs2Nat.inj_add; auto with zarith. change (Z.abs_nat 1) with 1%nat; omega. Qed. Fixpoint addmuldiv31_alt n i j := match n with | O => i | S n => addmuldiv31_alt n (sneakl (firstl j) i) (shiftl j) end. Lemma addmuldiv31_equiv : forall p x y, addmuldiv31 p x y = addmuldiv31_alt (Z.abs_nat [|p|]) x y. Proof. intros. unfold addmuldiv31. rewrite iter_int31_iter_nat. set (n:=Z.abs_nat [|p|]); clearbody n; clear p. revert x y; induction n. simpl; auto. intros. simpl addmuldiv31_alt. replace (S n) with (n+1)%nat by (rewrite plus_comm; auto). rewrite iter_nat_plus; simpl; auto. Qed. Lemma spec_add_mul_div : forall x y p, [|p|] <= Zpos 31 -> [| addmuldiv31 p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos 31) - [|p|]))) mod wB. Proof. intros. rewrite addmuldiv31_equiv. assert ([|p|] = Z.of_nat (Z.abs_nat [|p|])). rewrite Zabs2Nat.id_abs; symmetry; apply Z.abs_eq. destruct (phi_bounded p); auto. rewrite H0; rewrite H0 in H; clear H0; rewrite Zabs2Nat.id. set (n := Z.abs_nat [|p|]) in *; clearbody n. assert (n <= 31)%nat. rewrite Nat2Z.inj_le; auto with zarith. clear p H; revert x y. induction n. simpl; intros. change (Z.pow_pos 2 31) with (2^31). rewrite Z.mul_1_r. replace ([|y|] / 2^31) with 0. rewrite Z.add_0_r. symmetry; apply Zmod_small; apply phi_bounded. symmetry; apply Zdiv_small; apply phi_bounded. simpl addmuldiv31_alt; intros. rewrite IHn; [ | omega ]. case_eq (firstl y); intros. rewrite phi_twice, Z.double_spec. rewrite phi_twice_firstl; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. f_equal. f_equal. ring. replace (31-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. rewrite phi_twice_plus_one, Z.succ_double_spec. rewrite phi_twice; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. rewrite Z.mul_add_distr_r, Z.mul_1_l, <- Z.add_assoc. f_equal. f_equal. ring. assert ((2*[|y|]) mod wB = 2*[|y|] - wB). clear - H. symmetry. apply Zmod_unique with 1; [ | ring ]. generalize (phi_lowerbound _ H) (phi_bounded y). set (wB' := 2^Z.of_nat (pred size)). replace wB with (2*wB'); [ omega | ]. unfold wB'. rewrite <- Z.pow_succ_r, <- Nat2Z.inj_succ by (auto with zarith). f_equal. rewrite H1. replace wB with (2^(Z.of_nat n)*2^(31-Z.of_nat n)) by (rewrite <- Zpower_exp; auto with zarith; f_equal; unfold size; ring). unfold Z.sub; rewrite <- Z.mul_opp_l. rewrite Z_div_plus; auto with zarith. ring_simplify. replace (31+-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. Qed. Lemma spec_pos_mod : forall w p, [|ZnZ.pos_mod p w|] = [|w|] mod (2 ^ [|p|]). Proof. unfold ZnZ.pos_mod, int31_ops, compare31. change [|31|] with 31%Z. assert (forall w p, 31<=p -> [|w|] = [|w|] mod 2^p). intros. generalize (phi_bounded w). symmetry; apply Zmod_small. split; auto with zarith. apply Z.lt_le_trans with wB; auto with zarith. apply Zpower_le_monotone; auto with zarith. intros. case_eq ([|p|] ?= 31); intros; [ apply H; rewrite (Z.compare_eq _ _ H0); auto with zarith | | apply H; change ([|p|]>31)%Z in H0; auto with zarith ]. change ([|p|]<31) in H0. rewrite spec_add_mul_div by auto with zarith. change [|0|] with 0%Z; rewrite Z.mul_0_l, Z.add_0_l. generalize (phi_bounded p)(phi_bounded w); intros. assert (31-[|p|] [|head031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint head031_alt n x := match n with | O => 0%nat | S n => match firstl x with | D0 => S (head031_alt n (shiftl x)) | D1 => 0%nat end end. Lemma head031_equiv : forall x, [|head031 x|] = Z.of_nat (head031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold head031, recl. change On with (phi_inv (Z.of_nat (31-size))). replace (head031_alt size x) with (head031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recl_aux; fold recl_aux. unfold head031_alt; fold head031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstl x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakr_shiftl x) in H. rewrite H2 in H. case_eq (iszero (shiftl x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma phi_nz : forall x, 0 < [|x|] <-> x <> 0%int31. Proof. split; intros. red; intro; subst x; discriminate. assert ([|x|]<>0%Z). contradict H. rewrite <- (phi_inv_phi x); rewrite H; auto. generalize (phi_bounded x); auto with zarith. Qed. Lemma spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|head031 x|]) * [|x|] < wB. Proof. intros. rewrite head031_equiv. assert (nshiftl size x = 0%int31). apply nshiftl_size. revert x H H0. unfold size at 2 5. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl head031_alt. case_eq (firstl x); intros. rewrite (Nat2Z.inj_succ (head031_alt n (shiftl x))), Z.pow_succ_r; auto with zarith. rewrite <- Z.mul_assoc, Z.mul_comm, <- Z.mul_assoc, <-(Z.mul_comm 2). rewrite <- Z.double_spec, <- (phi_twice_firstl _ H1). apply IHn. rewrite phi_nz; rewrite phi_nz in H; contradict H. change twice with shiftl in H. rewrite (sneakr_shiftl x), H1, H; auto. rewrite <- nshiftl_S_tail; auto. change (2^(Z.of_nat 0)) with 1; rewrite Z.mul_1_l. generalize (phi_bounded x); unfold size; split; auto with zarith. change (2^(Z.of_nat 31)/2) with (2^(Z.of_nat (pred size))). apply phi_lowerbound; auto. Qed. Lemma spec_tail00: forall x, [|x|] = 0 -> [|tail031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint tail031_alt n x := match n with | O => 0%nat | S n => match firstr x with | D0 => S (tail031_alt n (shiftr x)) | D1 => 0%nat end end. Lemma tail031_equiv : forall x, [|tail031 x|] = Z.of_nat (tail031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold tail031, recr. change On with (phi_inv (Z.of_nat (31-size))). replace (tail031_alt size x) with (tail031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recr_aux; fold recr_aux. unfold tail031_alt; fold tail031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstr x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakl_shiftr x) in H. rewrite H2 in H. case_eq (iszero (shiftr x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma spec_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * (2 ^ [|tail031 x|]). Proof. intros. rewrite tail031_equiv. assert (nshiftr size x = 0%int31). apply nshiftr_size. revert x H H0. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl tail031_alt. case_eq (firstr x); intros. rewrite (Nat2Z.inj_succ (tail031_alt n (shiftr x))), Z.pow_succ_r; auto with zarith. destruct (IHn (shiftr x)) as (y & Hy1 & Hy2). rewrite phi_nz; rewrite phi_nz in H; contradict H. rewrite (sneakl_shiftr x), H1, H; auto. rewrite <- nshiftr_S_tail; auto. exists y; split; auto. rewrite phi_eqn1; auto. rewrite Z.double_spec, Hy2; ring. exists [|shiftr x|]. split. generalize (phi_bounded (shiftr x)); auto with zarith. rewrite phi_eqn2; auto. rewrite Z.succ_double_spec; simpl; ring. Qed. (* Sqrt *) (* Direct transcription of an old proof of a fortran program in boyer-moore *) Lemma quotient_by_2 a: a - 1 <= (a/2) + (a/2). Proof. case (Z_mod_lt a 2); auto with zarith. intros H1; rewrite Zmod_eq_full; auto with zarith. Qed. Lemma sqrt_main_trick j k: 0 <= j -> 0 <= k -> (j * k) + j <= ((j + k)/2 + 1) ^ 2. Proof. intros Hj; generalize Hj k; pattern j; apply natlike_ind; auto; clear k j Hj. intros _ k Hk; repeat rewrite Z.add_0_l. apply Z.mul_nonneg_nonneg; generalize (Z_div_pos k 2); auto with zarith. intros j Hj Hrec _ k Hk; pattern k; apply natlike_ind; auto; clear k Hk. rewrite Z.mul_0_r, Z.add_0_r, Z.add_0_l. generalize (sqr_pos (Z.succ j / 2)) (quotient_by_2 (Z.succ j)); unfold Z.succ. rewrite Z.pow_2_r, Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. intros k Hk _. replace ((Z.succ j + Z.succ k) / 2) with ((j + k)/2 + 1). generalize (Hrec Hj k Hk) (quotient_by_2 (j + k)). unfold Z.succ; repeat rewrite Z.pow_2_r; repeat rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. repeat rewrite Z.mul_1_l; repeat rewrite Z.mul_1_r. auto with zarith. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. apply f_equal2 with (f := Z.div); auto with zarith. Qed. Lemma sqrt_main i j: 0 <= i -> 0 < j -> i < ((j + (i/j))/2 + 1) ^ 2. Proof. intros Hi Hj. assert (Hij: 0 <= i/j) by (apply Z_div_pos; auto with zarith). apply Z.lt_le_trans with (2 := sqrt_main_trick _ _ (Z.lt_le_incl _ _ Hj) Hij). pattern i at 1; rewrite (Z_div_mod_eq i j); case (Z_mod_lt i j); auto with zarith. Qed. Lemma sqrt_init i: 1 < i -> i < (i/2 + 1) ^ 2. Proof. intros Hi. assert (H1: 0 <= i - 2) by auto with zarith. assert (H2: 1 <= (i / 2) ^ 2); auto with zarith. replace i with (1* 2 + (i - 2)); auto with zarith. rewrite Z.pow_2_r, Z_div_plus_full_l; auto with zarith. generalize (sqr_pos ((i - 2)/ 2)) (Z_div_pos (i - 2) 2). rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. generalize (quotient_by_2 i). rewrite Z.pow_2_r in H2 |- *; repeat (rewrite Z.mul_add_distr_r || rewrite Z.mul_add_distr_l || rewrite Z.mul_1_l || rewrite Z.mul_1_r). auto with zarith. Qed. Lemma sqrt_test_true i j: 0 <= i -> 0 < j -> i/j >= j -> j ^ 2 <= i. Proof. intros Hi Hj Hd; rewrite Z.pow_2_r. apply Z.le_trans with (j * (i/j)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt_test_false i j: 0 <= i -> 0 < j -> i/j < j -> (j + (i/j))/2 < j. Proof. intros Hi Hj H; case (Z.le_gt_cases j ((j + (i/j))/2)); auto. intros H1; contradict H; apply Z.le_ngt. assert (2 * j <= j + (i/j)); auto with zarith. apply Z.le_trans with (2 * ((j + (i/j))/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt31_step_def rec i j: sqrt31_step rec i j = match (fst (i/j) ?= j)%int31 with Lt => rec i (fst ((j + fst(i/j))/2))%int31 | _ => j end. Proof. unfold sqrt31_step; case div31; intros. simpl; case compare31; auto. Qed. Lemma div31_phi i j: 0 < [|j|] -> [|fst (i/j)%int31|] = [|i|]/[|j|]. intros Hj; generalize (spec_div i j Hj). case div31; intros q r; simpl fst. intros (H1,H2); apply Zdiv_unique with [|r|]; auto with zarith. rewrite H1; ring. Qed. Lemma sqrt31_step_correct rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < wB -> (forall j1 : int31, 0 < [|j1|] < [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|sqrt31_step rec i j|] ^ 2 <= [|i|] < ([|sqrt31_step rec i j|] + 1) ^ 2. Proof. assert (Hp2: 0 < [|2|]) by exact (eq_refl Lt). intros Hi Hj Hij H31 Hrec; rewrite sqrt31_step_def. rewrite spec_compare, div31_phi; auto. case Z.compare_spec; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec; repeat rewrite div31_phi; auto with zarith. replace [|(j + fst (i / j)%int31)|] with ([|j|] + [|i|] / [|j|]). split. apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. replace ([|j|] + [|i|]/[|j|]) with (1 * 2 + (([|j|] - 2) + [|i|] / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= [|i|]/ [|j|]) by (apply Z_div_pos; auto with zarith). assert (0 <= ([|j|] - 2 + [|i|] / [|j|]) / [|2|]) ; auto with zarith. rewrite <- Hj, Zdiv_1_r. replace (1 + [|i|])%Z with (1 * 2 + ([|i|] - 1))%Z; try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|i|] - 1) /2)%Z by (apply Z_div_pos; auto with zarith). change ([|2|]) with 2%Z; auto with zarith. apply sqrt_test_false; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. replace [|j + fst (i / j)%int31|] with ([|j|] + [|i|] / [|j|]). apply sqrt_main; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. Qed. Lemma iter31_sqrt_correct n rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < 2 ^ (Z.of_nat size) -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> 2 * [|j1|] < 2 ^ (Z.of_nat size) -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|iter31_sqrt n rec i j|] ^ 2 <= [|i|] < ([|iter31_sqrt n rec i j|] + 1) ^ 2. Proof. revert rec i j; elim n; unfold iter31_sqrt; fold iter31_sqrt; clear n. intros rec i j Hi Hj Hij H31 Hrec; apply sqrt31_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec i j Hi Hj Hij H31 HHrec. apply sqrt31_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2 Hj31; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|]); auto with zarith. apply Nat2Z.is_nonneg. Qed. Lemma spec_sqrt : forall x, [|sqrt31 x|] ^ 2 <= [|x|] < ([|sqrt31 x|] + 1) ^ 2. Proof. intros i; unfold sqrt31. rewrite spec_compare. case Z.compare_spec; change [|1|] with 1; intros Hi; auto with zarith. repeat rewrite Z.pow_2_r; auto with zarith. apply iter31_sqrt_correct; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. replace ([|i|]) with (1 * 2 + ([|i|] - 2))%Z; try ring. assert (0 <= ([|i|] - 2)/2)%Z by (apply Z_div_pos; auto with zarith). rewrite Z_div_plus_full_l; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply sqrt_init; auto. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]). apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); auto. intros j2 H1 H2; contradict H2; apply Z.lt_nge. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]); auto with zarith. assert (0 <= [|i|]/2)%Z by (apply Z_div_pos; auto with zarith). apply Z.le_trans with (2 * ([|i|]/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); unfold size; auto with zarith. change [|0|] with 0; auto with zarith. case (phi_bounded i); repeat rewrite Z.pow_2_r; auto with zarith. Qed. Lemma sqrt312_step_def rec ih il j: sqrt312_step rec ih il j = match (ih ?= j)%int31 with Eq => j | Gt => j | _ => match (fst (div3121 ih il j) ?= j)%int31 with Lt => let m := match j +c fst (div3121 ih il j) with C0 m1 => fst (m1/2)%int31 | C1 m1 => (fst (m1/2) + v30)%int31 end in rec ih il m | _ => j end end. Proof. unfold sqrt312_step; case div3121; intros. simpl; case compare31; auto. Qed. Lemma sqrt312_lower_bound ih il j: phi2 ih il < ([|j|] + 1) ^ 2 -> [|ih|] <= [|j|]. Proof. intros H1. case (phi_bounded j); intros Hbj _. case (phi_bounded il); intros Hbil _. case (phi_bounded ih); intros Hbih Hbih1. assert (([|ih|] < [|j|] + 1)%Z); auto with zarith. apply Z.square_lt_simpl_nonneg; auto with zarith. repeat rewrite <-Z.pow_2_r; apply Z.le_lt_trans with (2 := H1). apply Z.le_trans with ([|ih|] * base)%Z; unfold phi2, base; try rewrite Z.pow_2_r; auto with zarith. Qed. Lemma div312_phi ih il j: (2^30 <= [|j|] -> [|ih|] < [|j|] -> [|fst (div3121 ih il j)|] = phi2 ih il/[|j|])%Z. Proof. intros Hj Hj1. generalize (spec_div21 ih il j Hj Hj1). case div3121; intros q r (Hq, Hr). apply Zdiv_unique with (phi r); auto with zarith. simpl fst; apply eq_trans with (1 := Hq); ring. Qed. Lemma sqrt312_step_correct rec ih il j: 2 ^ 29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] < [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|sqrt312_step rec ih il j|] ^ 2 <= phi2 ih il < ([|sqrt312_step rec ih il j|] + 1) ^ 2. Proof. assert (Hp2: (0 < [|2|])%Z) by exact (eq_refl Lt). intros Hih Hj Hij Hrec; rewrite sqrt312_step_def. assert (H1: ([|ih|] <= [|j|])%Z) by (apply sqrt312_lower_bound with il; auto). case (phi_bounded ih); intros Hih1 _. case (phi_bounded il); intros Hil1 _. case (phi_bounded j); intros _ Hj1. assert (Hp3: (0 < phi2 ih il)). unfold phi2; apply Z.lt_le_trans with ([|ih|] * base)%Z; auto with zarith. apply Z.mul_pos_pos; auto with zarith. apply Z.lt_le_trans with (2:= Hih); auto with zarith. rewrite spec_compare. case Z.compare_spec; intros Hc1. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. unfold phi2; rewrite Hc1. assert (0 <= [|il|]/[|j|]) by (apply Z_div_pos; auto with zarith). rewrite Z.mul_comm, Z_div_plus_full_l; unfold base; auto with zarith. unfold Z.pow, Z.pow_pos in Hj1; simpl in Hj1; auto with zarith. case (Z.le_gt_cases (2 ^ 30) [|j|]); intros Hjj. rewrite spec_compare; case Z.compare_spec; rewrite div312_phi; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec. assert (Hf1: 0 <= phi2 ih il/ [|j|]) by (apply Z_div_pos; auto with zarith). apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. 2: contradict Hc; apply Z.le_ngt; rewrite <- Hj, Zdiv_1_r; auto with zarith. assert (Hf3: 0 < ([|j|] + phi2 ih il / [|j|]) / 2). replace ([|j|] + phi2 ih il/ [|j|])%Z with (1 * 2 + (([|j|] - 2) + phi2 ih il / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|j|] - 2 + phi2 ih il / [|j|]) / 2) ; auto with zarith. assert (Hf4: ([|j|] + phi2 ih il / [|j|]) / 2 < [|j|]). apply sqrt_test_false; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; change [|2|] with 2%Z; auto with zarith. intros HH; rewrite HH; clear HH; auto with zarith. rewrite spec_add, div31_phi; change [|2|] with 2%Z; auto. rewrite Z.mul_1_l; intros HH. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. change (phi v30 * 2) with (2 ^ Z.of_nat size). rewrite HH, Zmod_small; auto with zarith. replace (phi match j +c fst (div3121 ih il j) with | C0 m1 => fst (m1 / 2)%int31 | C1 m1 => fst (m1 / 2)%int31 + v30 end) with ((([|j|] + (phi2 ih il)/([|j|]))/2)). apply sqrt_main; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; auto with zarith. intros HH; rewrite HH; auto with zarith. intros HH; rewrite <- HH. change (1 * 2 ^ Z.of_nat size) with (phi (v30) * 2). rewrite Z_div_plus_full_l; auto with zarith. rewrite Z.add_comm. rewrite spec_add, Zmod_small. rewrite div31_phi; auto. split; auto with zarith. case (phi_bounded (fst (r/2)%int31)); case (phi_bounded v30); auto with zarith. rewrite div31_phi; change (phi 2) with 2%Z; auto. change (2 ^Z.of_nat size) with (base/2 + phi v30). assert (phi r / 2 < base/2); auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. change (base/2 * 2) with base. apply Z.le_lt_trans with (phi r). rewrite Z.mul_comm; apply Z_mult_div_ge; auto with zarith. case (phi_bounded r); auto with zarith. contradict Hij; apply Z.le_ngt. assert ((1 + [|j|]) <= 2 ^ 30); auto with zarith. apply Z.le_trans with ((2 ^ 30) * (2 ^ 30)); auto with zarith. assert (0 <= 1 + [|j|]); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. change ((2 ^ 30) * (2 ^ 30)) with ((2 ^ 29) * base). apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2, base; auto with zarith. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. apply Z.le_ge; apply Z.le_trans with (([|j|] * base)/[|j|]). rewrite Z.mul_comm, Z_div_mult; auto with zarith. apply Z.ge_le; apply Z_div_ge; auto with zarith. Qed. Lemma iter312_sqrt_correct n rec ih il j: 2^29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|iter312_sqrt n rec ih il j|] ^ 2 <= phi2 ih il < ([|iter312_sqrt n rec ih il j|] + 1) ^ 2. Proof. revert rec ih il j; elim n; unfold iter312_sqrt; fold iter312_sqrt; clear n. intros rec ih il j Hi Hj Hij Hrec; apply sqrt312_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec ih il j Hi Hj Hij HHrec. apply sqrt312_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|])%Z; auto with zarith. apply Nat2Z.is_nonneg. Qed. Lemma spec_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := sqrt312 x y in [||WW x y||] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Proof. intros ih il Hih; unfold sqrt312. change [||WW ih il||] with (phi2 ih il). assert (Hbin: forall s, s * s + 2* s + 1 = (s + 1) ^ 2) by (intros s; ring). assert (Hb: 0 <= base) by (red; intros HH; discriminate). assert (Hi2: phi2 ih il < (phi Tn + 1) ^ 2). { change ((phi Tn + 1) ^ 2) with (2^62). apply Z.le_lt_trans with ((2^31 -1) * base + (2^31 - 1)); auto with zarith. 2: simpl; unfold Z.pow_pos; simpl; auto with zarith. case (phi_bounded ih); case (phi_bounded il); intros H1 H2 H3 H4. unfold base, Z.pow, Z.pow_pos in H2,H4; simpl in H2,H4. unfold phi2,Z.pow, Z.pow_pos. simpl Pos.iter; auto with zarith. } case (iter312_sqrt_correct 31 (fun _ _ j => j) ih il Tn); auto with zarith. change [|Tn|] with 2147483647; auto with zarith. intros j1 _ HH; contradict HH. apply Z.lt_nge. change [|Tn|] with 2147483647; auto with zarith. change (2 ^ Z.of_nat 31) with 2147483648; auto with zarith. case (phi_bounded j1); auto with zarith. set (s := iter312_sqrt 31 (fun _ _ j : int31 => j) ih il Tn). intros Hs1 Hs2. generalize (spec_mul_c s s); case mul31c. simpl zn2z_to_Z; intros HH. assert ([|s|] = 0). { symmetry in HH. rewrite Z.mul_eq_0 in HH. destruct HH; auto. } contradict Hs2; apply Z.le_ngt; rewrite H. change ((0 + 1) ^ 2) with 1. apply Z.le_trans with (2 ^ Z.of_nat size / 4 * base). simpl; auto with zarith. apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2; case (phi_bounded il); auto with zarith. intros ih1 il1. change [||WW ih1 il1||] with (phi2 ih1 il1). intros Hihl1. generalize (spec_sub_c il il1). case sub31c; intros il2 Hil2. simpl interp_carry in Hil2. rewrite spec_compare; case Z.compare_spec. unfold interp_carry. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; ring[Hil2 H1]. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite H1, Hil2; ring. unfold interp_carry. intros H1; contradict Hs1. apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); intros _ H2. apply Z.lt_le_trans with (([|ih|] + 1) * base + 0). rewrite Z.mul_add_distr_r, Z.add_0_r; auto with zarith. case (phi_bounded il1); intros H3 _. apply Z.add_le_mono; auto with zarith. unfold interp_carry; change (1 * 2 ^ Z.of_nat size) with base. rewrite Z.pow_2_r, <- Hihl1, Hil2. intros H1. rewrite <- Z.le_succ_l, <- Z.add_1_r in H1. Z.le_elim H1. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. case (phi_bounded il); intros Hpil _. assert (Hl1l: [|il1|] <= [|il|]). { case (phi_bounded il2); rewrite Hil2; auto with zarith. } assert ([|ih1|] * base + 2 * [|s|] + 1 <= [|ih|] * base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. case (phi_bounded ih1); intros Hpih1 _; auto with zarith. apply Z.le_trans with (([|ih1|] + 2) * base); auto with zarith. rewrite Z.mul_add_distr_r. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. rewrite Hihl1, Hbin; auto. split. unfold phi2; rewrite <- H1; ring. replace (base + ([|il|] - [|il1|])) with (phi2 ih il - ([|s|] * [|s|])). rewrite <-Hbin in Hs2; auto with zarith. rewrite <- Hihl1; unfold phi2; rewrite <- H1; ring. unfold interp_carry in Hil2 |- *. unfold interp_carry; change (1 * 2 ^ Z.of_nat size) with base. assert (Hsih: [|ih - 1|] = [|ih|] - 1). { rewrite spec_sub, Zmod_small; auto; change [|1|] with 1. case (phi_bounded ih); intros H1 H2. generalize Hih; change (2 ^ Z.of_nat size / 4) with 536870912. split; auto with zarith. } rewrite spec_compare; case Z.compare_spec. rewrite Hsih. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; rewrite <-H1. transitivity ([|ih|] * base + [|il1|] + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2. rewrite <-H1. ring_simplify. transitivity (base + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. rewrite Hsih; intros H1. assert (He: [|ih|] = [|ih1|]). { apply Z.le_antisymm; auto with zarith. case (Z.le_gt_cases [|ih1|] [|ih|]); auto; intros H2. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); change (2 ^ Z.of_nat size) with base; intros _ Hpil1. apply Z.lt_le_trans with (([|ih|] + 1) * base). rewrite Z.mul_add_distr_r, Z.mul_1_l; auto with zarith. case (phi_bounded il1); intros Hpil2 _. apply Z.le_trans with (([|ih1|]) * base); auto with zarith. } rewrite Z.pow_2_r, <-Hihl1; unfold phi2; rewrite <-He. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2; rewrite He. assert (phi il - phi il1 < 0); auto with zarith. rewrite <-Hil2. case (phi_bounded il2); auto with zarith. intros H1. rewrite Z.pow_2_r, <-Hihl1. assert (H2 : [|ih1|]+2 <= [|ih|]); auto with zarith. Z.le_elim H2. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. assert ([|ih1|] * base + 2 * phi s + 1 <= [|ih|] * base + ([|il|] - [|il1|])); auto with zarith. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base). case (phi_bounded il2); intros Hpil2 _. apply Z.le_trans with ([|ih|] * base + - base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. apply Z.le_trans with ([|ih1|] * base + 2 * base); auto with zarith. assert (Hi: ([|ih1|] + 3) * base <= [|ih|] * base); auto with zarith. rewrite Z.mul_add_distr_r in Hi; auto with zarith. rewrite Hihl1, Hbin; auto. unfold phi2; rewrite <-H2. split. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. replace (base + [|il2|]) with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite <-H2. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. Qed. (** [iszero] *) Lemma spec_eq0 : forall x, ZnZ.eq0 x = true -> [|x|] = 0. Proof. clear; unfold ZnZ.eq0; simpl. unfold compare31; simpl; intros. change [|0|] with 0 in H. apply Z.compare_eq. now destruct ([|x|] ?= 0). Qed. (* Even *) Lemma spec_is_even : forall x, if ZnZ.is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1. Proof. unfold ZnZ.is_even; simpl; intros. generalize (spec_div x 2). destruct (x/2)%int31 as (q,r); intros. unfold compare31. change [|2|] with 2 in H. change [|0|] with 0. destruct H; auto with zarith. replace ([|x|] mod 2) with [|r|]. destruct H; auto with zarith. case Z.compare_spec; auto with zarith. apply Zmod_unique with [|q|]; auto with zarith. Qed. Global Instance int31_specs : ZnZ.Specs int31_ops := { spec_to_Z := phi_bounded; spec_of_pos := positive_to_int31_spec; spec_zdigits := spec_zdigits; spec_more_than_1_digit := spec_more_than_1_digit; spec_0 := spec_0; spec_1 := spec_1; spec_m1 := spec_m1; spec_compare := spec_compare; spec_eq0 := spec_eq0; spec_opp_c := spec_opp_c; spec_opp := spec_opp; spec_opp_carry := spec_opp_carry; spec_succ_c := spec_succ_c; spec_add_c := spec_add_c; spec_add_carry_c := spec_add_carry_c; spec_succ := spec_succ; spec_add := spec_add; spec_add_carry := spec_add_carry; spec_pred_c := spec_pred_c; spec_sub_c := spec_sub_c; spec_sub_carry_c := spec_sub_carry_c; spec_pred := spec_pred; spec_sub := spec_sub; spec_sub_carry := spec_sub_carry; spec_mul_c := spec_mul_c; spec_mul := spec_mul; spec_square_c := spec_square_c; spec_div21 := spec_div21; spec_div_gt := fun a b _ => spec_div a b; spec_div := spec_div; spec_modulo_gt := fun a b _ => spec_mod a b; spec_modulo := spec_mod; spec_gcd_gt := fun a b _ => spec_gcd a b; spec_gcd := spec_gcd; spec_head00 := spec_head00; spec_head0 := spec_head0; spec_tail00 := spec_tail00; spec_tail0 := spec_tail0; spec_add_mul_div := spec_add_mul_div; spec_pos_mod := spec_pos_mod; spec_is_even := spec_is_even; spec_sqrt2 := spec_sqrt2; spec_sqrt := spec_sqrt }. End Int31_Specs. Module Int31Cyclic <: CyclicType. Definition t := int31. Definition ops := int31_ops. Definition specs := int31_specs. End Int31Cyclic. coq-8.4pl2/theories/Numbers/NaryFunctions.v0000640000175000001440000001033612010532755020060 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ... -> A -> B] with [n] occurences of [A] in this type. *) Fixpoint nfun A n B := match n with | O => B | S n => A -> (nfun A n B) end. Notation " A ^^ n --> B " := (nfun A n B) (at level 50, n at next level) : type_scope. (** [napply_cst _ _ a n f] iterates [n] times the application of a particular constant [a] to the [n]-ary function [f]. *) Fixpoint napply_cst (A B:Type)(a:A) n : (A^^n-->B) -> B := match n return (A^^n-->B) -> B with | O => fun x => x | S n => fun x => napply_cst _ _ a n (x a) end. (** A generic transformation from an n-ary function to another one.*) Fixpoint nfun_to_nfun (A B C:Type)(f:B -> C) n : (A^^n-->B) -> (A^^n-->C) := match n return (A^^n-->B) -> (A^^n-->C) with | O => f | S n => fun g a => nfun_to_nfun _ _ _ f n (g a) end. (** [napply_except_last _ _ n f] expects [n] arguments of type [A], applies [n-1] of them to [f] and discard the last one. *) Definition napply_except_last (A B:Type) := nfun_to_nfun A B (A->B) (fun b a => b). (** [napply_then_last _ _ a n f] expects [n] arguments of type [A], applies them to [f] and then apply [a] to the result. *) Definition napply_then_last (A B:Type)(a:A) := nfun_to_nfun A (A->B) B (fun fab => fab a). (** [napply_discard _ b n] expects [n] arguments, discards then, and returns [b]. *) Fixpoint napply_discard (A B:Type)(b:B) n : A^^n-->B := match n return A^^n-->B with | O => b | S n => fun _ => napply_discard _ _ b n end. (** A fold function *) Fixpoint nfold A B (f:A->B->B)(b:B) n : (A^^n-->B) := match n return (A^^n-->B) with | O => b | S n => fun a => (nfold _ _ f (f a b) n) end. (** [n]-ary products : [nprod A n] is [A*...*A*unit], with [n] occurrences of [A] in this type. *) Fixpoint nprod A n : Type := match n with | O => unit | S n => (A * nprod A n)%type end. Notation "A ^ n" := (nprod A n) : type_scope. (** [n]-ary curryfication / uncurryfication *) Fixpoint ncurry (A B:Type) n : (A^n -> B) -> (A^^n-->B) := match n return (A^n -> B) -> (A^^n-->B) with | O => fun x => x tt | S n => fun f a => ncurry _ _ n (fun p => f (a,p)) end. Fixpoint nuncurry (A B:Type) n : (A^^n-->B) -> (A^n -> B) := match n return (A^^n-->B) -> (A^n -> B) with | O => fun x _ => x | S n => fun f p => let (x,p) := p in nuncurry _ _ n (f x) p end. (** Earlier functions can also be defined via [ncurry/nuncurry]. For instance : *) Definition nfun_to_nfun_bis A B C (f:B->C) n : (A^^n-->B) -> (A^^n-->C) := fun anb => ncurry _ _ n (fun an => f ((nuncurry _ _ n anb) an)). (** We can also us it to obtain another [fold] function, equivalent to the previous one, but with a nicer expansion (see for instance Int31.iszero). *) Fixpoint nfold_bis A B (f:A->B->B)(b:B) n : (A^^n-->B) := match n return (A^^n-->B) with | O => b | S n => fun a => nfun_to_nfun_bis _ _ _ (f a) n (nfold_bis _ _ f b n) end. (** From [nprod] to [list] *) Fixpoint nprod_to_list (A:Type) n : A^n -> list A := match n with | O => fun _ => nil | S n => fun p => let (x,p) := p in x::(nprod_to_list _ n p) end. (** From [list] to [nprod] *) Fixpoint nprod_of_list (A:Type)(l:list A) : A^(length l) := match l return A^(length l) with | nil => tt | x::l => (x, nprod_of_list _ l) end. (** This gives an additional way to write the fold *) Definition nfold_list (A B:Type)(f:A->B->B)(b:B) n : (A^^n-->B) := ncurry _ _ n (fun p => fold_right f b (nprod_to_list _ _ p)). coq-8.4pl2/theories/Numbers/Natural/0000750000175000001440000000000012127276550016500 5ustar notinuserscoq-8.4pl2/theories/Numbers/Natural/BigN/0000750000175000001440000000000012127276550017317 5ustar notinuserscoq-8.4pl2/theories/Numbers/Natural/BigN/NMake.v0000640000175000001440000014551312010532755020504 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let v := (eval red_t in u) in change v end. (** * Generic results *) Tactic Notation "destr_t" constr(x) "as" simple_intropattern(pat) := destruct (destr_t x) as pat; cbv zeta; rewrite ?iter_mk_t, ?spec_mk_t, ?spec_reduce. Lemma spec_same_level : forall A (P:Z->Z->A->Prop) (f : forall n, dom_t n -> dom_t n -> A), (forall n x y, P (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y)) -> forall x y, P [x] [y] (same_level f x y). Proof. intros. apply spec_same_level_dep with (P:=fun _ => P); auto. Qed. Theorem spec_pos: forall x, 0 <= [x]. Proof. intros x. destr_t x as (n,x). now case (ZnZ.spec_to_Z x). Qed. Lemma digits_dom_op_incr : forall n m, (n<=m)%nat -> (ZnZ.digits (dom_op n) <= ZnZ.digits (dom_op m))%positive. Proof. intros. change (Zpos (ZnZ.digits (dom_op n)) <= Zpos (ZnZ.digits (dom_op m))). rewrite !digits_dom_op, !Pshiftl_nat_Zpower. apply Z.mul_le_mono_nonneg_l; auto with zarith. apply Z.pow_le_mono_r; auto with zarith. Qed. Definition to_N (x : t) := Z.to_N (to_Z x). (** * Zero, One *) Definition zero := mk_t O ZnZ.zero. Definition one := mk_t O ZnZ.one. Theorem spec_0: [zero] = 0. Proof. unfold zero. rewrite spec_mk_t. exact ZnZ.spec_0. Qed. Theorem spec_1: [one] = 1. Proof. unfold one. rewrite spec_mk_t. exact ZnZ.spec_1. Qed. (** * Successor *) (** NB: it is crucial here and for the rest of this file to preserve the let-in's. They allow to pre-compute once and for all the field access to Z/nZ initial structures (when n=0..6). *) Local Notation succn := (fun n => let op := dom_op n in let succ_c := ZnZ.succ_c in let one := ZnZ.one in fun x => match succ_c x with | C0 r => mk_t n r | C1 r => mk_t_S n (WW one r) end). Definition succ : t -> t := Eval red_t in iter_t succn. Lemma succ_fold : succ = iter_t succn. Proof. red_t; reflexivity. Qed. Theorem spec_succ: forall n, [succ n] = [n] + 1. Proof. intros x. rewrite succ_fold. destr_t x as (n,x). generalize (ZnZ.spec_succ_c x); case ZnZ.succ_c. intros. rewrite spec_mk_t. assumption. intros. unfold interp_carry in *. rewrite spec_mk_t_S. simpl. rewrite ZnZ.spec_1. assumption. Qed. (** Two *) (** Not really pretty, but since W0 might be Z/2Z, we're not sure there's a proper 2 there. *) Definition two := succ one. Lemma spec_2 : [two] = 2. Proof. unfold two. now rewrite spec_succ, spec_1. Qed. (** * Addition *) Local Notation addn := (fun n => let op := dom_op n in let add_c := ZnZ.add_c in let one := ZnZ.one in fun x y =>match add_c x y with | C0 r => mk_t n r | C1 r => mk_t_S n (WW one r) end). Definition add : t -> t -> t := Eval red_t in same_level addn. Lemma add_fold : add = same_level addn. Proof. red_t; reflexivity. Qed. Theorem spec_add: forall x y, [add x y] = [x] + [y]. Proof. intros x y. rewrite add_fold. apply spec_same_level; clear x y. intros n x y. simpl. generalize (ZnZ.spec_add_c x y); case ZnZ.add_c; intros z H. rewrite spec_mk_t. assumption. rewrite spec_mk_t_S. unfold interp_carry in H. simpl. rewrite ZnZ.spec_1. assumption. Qed. (** * Predecessor *) Local Notation predn := (fun n => let pred_c := ZnZ.pred_c in fun x => match pred_c x with | C0 r => reduce n r | C1 _ => zero end). Definition pred : t -> t := Eval red_t in iter_t predn. Lemma pred_fold : pred = iter_t predn. Proof. red_t; reflexivity. Qed. Theorem spec_pred_pos : forall x, 0 < [x] -> [pred x] = [x] - 1. Proof. intros x. rewrite pred_fold. destr_t x as (n,x). intros H. generalize (ZnZ.spec_pred_c x); case ZnZ.pred_c; intros y H'. rewrite spec_reduce. assumption. exfalso. unfold interp_carry in *. generalize (ZnZ.spec_to_Z x) (ZnZ.spec_to_Z y); auto with zarith. Qed. Theorem spec_pred0 : forall x, [x] = 0 -> [pred x] = 0. Proof. intros x. rewrite pred_fold. destr_t x as (n,x). intros H. generalize (ZnZ.spec_pred_c x); case ZnZ.pred_c; intros y H'. rewrite spec_reduce. unfold interp_carry in H'. generalize (ZnZ.spec_to_Z y); auto with zarith. exact spec_0. Qed. Lemma spec_pred x : [pred x] = Z.max 0 ([x]-1). Proof. rewrite Z.max_comm. destruct (Z.max_spec ([x]-1) 0) as [(H,->)|(H,->)]. - apply spec_pred0; generalize (spec_pos x); auto with zarith. - apply spec_pred_pos; auto with zarith. Qed. (** * Subtraction *) Local Notation subn := (fun n => let sub_c := ZnZ.sub_c in fun x y => match sub_c x y with | C0 r => reduce n r | C1 r => zero end). Definition sub : t -> t -> t := Eval red_t in same_level subn. Lemma sub_fold : sub = same_level subn. Proof. red_t; reflexivity. Qed. Theorem spec_sub_pos : forall x y, [y] <= [x] -> [sub x y] = [x] - [y]. Proof. intros x y. rewrite sub_fold. apply spec_same_level. clear x y. intros n x y. simpl. generalize (ZnZ.spec_sub_c x y); case ZnZ.sub_c; intros z H LE. rewrite spec_reduce. assumption. unfold interp_carry in H. exfalso. generalize (ZnZ.spec_to_Z z); auto with zarith. Qed. Theorem spec_sub0 : forall x y, [x] < [y] -> [sub x y] = 0. Proof. intros x y. rewrite sub_fold. apply spec_same_level. clear x y. intros n x y. simpl. generalize (ZnZ.spec_sub_c x y); case ZnZ.sub_c; intros z H LE. rewrite spec_reduce. unfold interp_carry in H. generalize (ZnZ.spec_to_Z z); auto with zarith. exact spec_0. Qed. Lemma spec_sub : forall x y, [sub x y] = Z.max 0 ([x]-[y]). Proof. intros. destruct (Z.le_gt_cases [y] [x]). rewrite Z.max_r; auto with zarith. apply spec_sub_pos; auto. rewrite Z.max_l; auto with zarith. apply spec_sub0; auto. Qed. (** * Comparison *) Definition comparen_m n : forall m, word (dom_t n) (S m) -> dom_t n -> comparison := let op := dom_op n in let zero := @ZnZ.zero _ op in let compare := @ZnZ.compare _ op in let compare0 := compare zero in fun m => compare_mn_1 (dom_t n) (dom_t n) zero compare compare0 compare (S m). Let spec_comparen_m: forall n m (x : word (dom_t n) (S m)) (y : dom_t n), comparen_m n m x y = Z.compare (eval n (S m) x) (ZnZ.to_Z y). Proof. intros n m x y. unfold comparen_m, eval. rewrite nmake_double. apply spec_compare_mn_1. exact ZnZ.spec_0. intros. apply ZnZ.spec_compare. exact ZnZ.spec_to_Z. exact ZnZ.spec_compare. exact ZnZ.spec_compare. exact ZnZ.spec_to_Z. Qed. Definition comparenm n m wx wy := let mn := Max.max n m in let d := diff n m in let op := make_op mn in ZnZ.compare (castm (diff_r n m) (extend_tr wx (snd d))) (castm (diff_l n m) (extend_tr wy (fst d))). Local Notation compare_folded := (iter_sym _ (fun n => @ZnZ.compare _ (dom_op n)) comparen_m comparenm CompOpp). Definition compare : t -> t -> comparison := Eval lazy beta iota delta [iter_sym dom_op dom_t comparen_m] in compare_folded. Lemma compare_fold : compare = compare_folded. Proof. lazy beta iota delta [iter_sym dom_op dom_t comparen_m]. reflexivity. Qed. Theorem spec_compare : forall x y, compare x y = Z.compare [x] [y]. Proof. intros x y. rewrite compare_fold. apply spec_iter_sym; clear x y. intros. apply ZnZ.spec_compare. intros. cbv beta zeta. apply spec_comparen_m. intros n m x y; unfold comparenm. rewrite (spec_cast_l n m x), (spec_cast_r n m y). unfold to_Z; apply ZnZ.spec_compare. intros. subst. now rewrite <- Z.compare_antisym. Qed. Definition eqb (x y : t) : bool := match compare x y with | Eq => true | _ => false end. Theorem spec_eqb x y : eqb x y = Z.eqb [x] [y]. Proof. apply eq_iff_eq_true. unfold eqb. rewrite Z.eqb_eq, <- Z.compare_eq_iff, spec_compare. split; [now destruct Z.compare | now intros ->]. Qed. Definition lt (n m : t) := [n] < [m]. Definition le (n m : t) := [n] <= [m]. Definition ltb (x y : t) : bool := match compare x y with | Lt => true | _ => false end. Theorem spec_ltb x y : ltb x y = Z.ltb [x] [y]. Proof. apply eq_iff_eq_true. rewrite Z.ltb_lt. unfold Z.lt, ltb. rewrite spec_compare. split; [now destruct Z.compare | now intros ->]. Qed. Definition leb (x y : t) : bool := match compare x y with | Gt => false | _ => true end. Theorem spec_leb x y : leb x y = Z.leb [x] [y]. Proof. apply eq_iff_eq_true. rewrite Z.leb_le. unfold Z.le, leb. rewrite spec_compare. destruct Z.compare; split; try easy. now destruct 1. Qed. Definition min (n m : t) : t := match compare n m with Gt => m | _ => n end. Definition max (n m : t) : t := match compare n m with Lt => m | _ => n end. Theorem spec_max : forall n m, [max n m] = Z.max [n] [m]. Proof. intros. unfold max, Z.max. rewrite spec_compare; destruct Z.compare; reflexivity. Qed. Theorem spec_min : forall n m, [min n m] = Z.min [n] [m]. Proof. intros. unfold min, Z.min. rewrite spec_compare; destruct Z.compare; reflexivity. Qed. (** * Multiplication *) Definition wn_mul n : forall m, word (dom_t n) (S m) -> dom_t n -> t := let op := dom_op n in let zero := @ZnZ.zero _ op in let succ := @ZnZ.succ _ op in let add_c := @ZnZ.add_c _ op in let mul_c := @ZnZ.mul_c _ op in let ww := @ZnZ.WW _ op in let ow := @ZnZ.OW _ op in let eq0 := @ZnZ.eq0 _ op in let mul_add := @DoubleMul.w_mul_add _ zero succ add_c mul_c in let mul_add_n1 := @DoubleMul.double_mul_add_n1 _ zero ww ow mul_add in fun m x y => let (w,r) := mul_add_n1 (S m) x y zero in if eq0 w then mk_t_w' n m r else mk_t_w' n (S m) (WW (extend n m w) r). Definition mulnm n m x y := let mn := Max.max n m in let d := diff n m in let op := make_op mn in reduce_n (S mn) (ZnZ.mul_c (castm (diff_r n m) (extend_tr x (snd d))) (castm (diff_l n m) (extend_tr y (fst d)))). Local Notation mul_folded := (iter_sym _ (fun n => let mul_c := ZnZ.mul_c in fun x y => reduce (S n) (succ_t _ (mul_c x y))) wn_mul mulnm (fun x => x)). Definition mul : t -> t -> t := Eval lazy beta iota delta [iter_sym dom_op dom_t reduce succ_t extend zeron wn_mul DoubleMul.w_mul_add mk_t_w'] in mul_folded. Lemma mul_fold : mul = mul_folded. Proof. lazy beta iota delta [iter_sym dom_op dom_t reduce succ_t extend zeron wn_mul DoubleMul.w_mul_add mk_t_w']. reflexivity. Qed. Lemma spec_muln: forall n (x: word _ (S n)) y, [Nn (S n) (ZnZ.mul_c (Ops:=make_op n) x y)] = [Nn n x] * [Nn n y]. Proof. intros n x y; unfold to_Z. rewrite <- ZnZ.spec_mul_c. rewrite make_op_S. case ZnZ.mul_c; auto. Qed. Lemma spec_mul_add_n1: forall n m x y z, let (q,r) := DoubleMul.double_mul_add_n1 ZnZ.zero ZnZ.WW ZnZ.OW (DoubleMul.w_mul_add ZnZ.zero ZnZ.succ ZnZ.add_c ZnZ.mul_c) (S m) x y z in ZnZ.to_Z q * (base (ZnZ.digits (nmake_op _ (dom_op n) (S m)))) + eval n (S m) r = eval n (S m) x * ZnZ.to_Z y + ZnZ.to_Z z. Proof. intros n m x y z. rewrite digits_nmake. unfold eval. rewrite nmake_double. apply DoubleMul.spec_double_mul_add_n1. apply ZnZ.spec_0. exact ZnZ.spec_WW. exact ZnZ.spec_OW. apply DoubleCyclic.spec_mul_add. Qed. Lemma spec_wn_mul : forall n m x y, [wn_mul n m x y] = (eval n (S m) x) * ZnZ.to_Z y. Proof. intros; unfold wn_mul. generalize (spec_mul_add_n1 n m x y ZnZ.zero). case DoubleMul.double_mul_add_n1; intros q r Hqr. rewrite ZnZ.spec_0, Z.add_0_r in Hqr. rewrite <- Hqr. generalize (ZnZ.spec_eq0 q); case ZnZ.eq0; intros HH. rewrite HH; auto. simpl. apply spec_mk_t_w'. clear. rewrite spec_mk_t_w'. set (m' := S m) in *. unfold eval. rewrite nmake_WW. f_equal. f_equal. rewrite <- spec_mk_t. symmetry. apply spec_extend. Qed. Theorem spec_mul : forall x y, [mul x y] = [x] * [y]. Proof. intros x y. rewrite mul_fold. apply spec_iter_sym; clear x y. intros n x y. cbv zeta beta. rewrite spec_reduce, spec_succ_t, <- ZnZ.spec_mul_c; auto. apply spec_wn_mul. intros n m x y; unfold mulnm. rewrite spec_reduce_n. rewrite (spec_cast_l n m x), (spec_cast_r n m y). apply spec_muln. intros. rewrite Z.mul_comm; auto. Qed. (** * Division by a smaller number *) Definition wn_divn1 n := let op := dom_op n in let zd := ZnZ.zdigits op in let zero := @ZnZ.zero _ op in let ww := @ZnZ.WW _ op in let head0 := @ZnZ.head0 _ op in let add_mul_div := @ZnZ.add_mul_div _ op in let div21 := @ZnZ.div21 _ op in let compare := @ZnZ.compare _ op in let sub := @ZnZ.sub _ op in let ddivn1 := DoubleDivn1.double_divn1 zd zero ww head0 add_mul_div div21 compare sub in fun m x y => let (u,v) := ddivn1 (S m) x y in (mk_t_w' n m u, mk_t n v). Let div_gtnm n m wx wy := let mn := Max.max n m in let d := diff n m in let op := make_op mn in let (q, r):= ZnZ.div_gt (castm (diff_r n m) (extend_tr wx (snd d))) (castm (diff_l n m) (extend_tr wy (fst d))) in (reduce_n mn q, reduce_n mn r). Local Notation div_gt_folded := (iter _ (fun n => let div_gt := ZnZ.div_gt in fun x y => let (u,v) := div_gt x y in (reduce n u, reduce n v)) (fun n => let div_gt := ZnZ.div_gt in fun m x y => let y' := DoubleBase.get_low (zeron n) (S m) y in let (u,v) := div_gt x y' in (reduce n u, reduce n v)) wn_divn1 div_gtnm). Definition div_gt := Eval lazy beta iota delta [iter dom_op dom_t reduce zeron wn_divn1 mk_t_w' mk_t] in div_gt_folded. Lemma div_gt_fold : div_gt = div_gt_folded. Proof. lazy beta iota delta [iter dom_op dom_t reduce zeron wn_divn1 mk_t_w' mk_t]. reflexivity. Qed. Lemma spec_get_endn: forall n m x y, eval n m x <= [mk_t n y] -> [mk_t n (DoubleBase.get_low (zeron n) m x)] = eval n m x. Proof. intros n m x y H. unfold eval. rewrite nmake_double. rewrite spec_mk_t in *. apply DoubleBase.spec_get_low. apply spec_zeron. exact ZnZ.spec_to_Z. apply Z.le_lt_trans with (ZnZ.to_Z y); auto. rewrite <- nmake_double; auto. case (ZnZ.spec_to_Z y); auto. Qed. Let spec_divn1 n := DoubleDivn1.spec_double_divn1 (ZnZ.zdigits (dom_op n)) (ZnZ.zero:dom_t n) ZnZ.WW ZnZ.head0 ZnZ.add_mul_div ZnZ.div21 ZnZ.compare ZnZ.sub ZnZ.to_Z ZnZ.spec_to_Z ZnZ.spec_zdigits ZnZ.spec_0 ZnZ.spec_WW ZnZ.spec_head0 ZnZ.spec_add_mul_div ZnZ.spec_div21 ZnZ.spec_compare ZnZ.spec_sub. Lemma spec_div_gt_aux : forall x y, [x] > [y] -> 0 < [y] -> let (q,r) := div_gt x y in [x] = [q] * [y] + [r] /\ 0 <= [r] < [y]. Proof. intros x y. rewrite div_gt_fold. apply spec_iter; clear x y. intros n x y H1 H2. simpl. generalize (ZnZ.spec_div_gt x y H1 H2); case ZnZ.div_gt. intros u v. rewrite 2 spec_reduce. auto. intros n m x y H1 H2. cbv zeta beta. generalize (ZnZ.spec_div_gt x (DoubleBase.get_low (zeron n) (S m) y)). case ZnZ.div_gt. intros u v H3; repeat rewrite spec_reduce. generalize (spec_get_endn n (S m) y x). rewrite !spec_mk_t. intros H4. rewrite H4 in H3; auto with zarith. intros n m x y H1 H2. generalize (spec_divn1 n (S m) x y H2). unfold wn_divn1; case DoubleDivn1.double_divn1. intros u v H3. rewrite spec_mk_t_w', spec_mk_t. rewrite <- !nmake_double in H3; auto. intros n m x y H1 H2; unfold div_gtnm. generalize (ZnZ.spec_div_gt (castm (diff_r n m) (extend_tr x (snd (diff n m)))) (castm (diff_l n m) (extend_tr y (fst (diff n m))))). case ZnZ.div_gt. intros xx yy HH. repeat rewrite spec_reduce_n. rewrite (spec_cast_l n m x), (spec_cast_r n m y). unfold to_Z; apply HH. rewrite (spec_cast_l n m x) in H1; auto. rewrite (spec_cast_r n m y) in H1; auto. rewrite (spec_cast_r n m y) in H2; auto. Qed. Theorem spec_div_gt: forall x y, [x] > [y] -> 0 < [y] -> let (q,r) := div_gt x y in [q] = [x] / [y] /\ [r] = [x] mod [y]. Proof. intros x y H1 H2; generalize (spec_div_gt_aux x y H1 H2); case div_gt. intros q r (H3, H4); split. apply (Zdiv_unique [x] [y] [q] [r]); auto. rewrite Z.mul_comm; auto. apply (Zmod_unique [x] [y] [q] [r]); auto. rewrite Z.mul_comm; auto. Qed. (** * General Division *) Definition div_eucl (x y : t) : t * t := if eqb y zero then (zero,zero) else match compare x y with | Eq => (one, zero) | Lt => (zero, x) | Gt => div_gt x y end. Theorem spec_div_eucl: forall x y, let (q,r) := div_eucl x y in ([q], [r]) = Z.div_eucl [x] [y]. Proof. intros x y. unfold div_eucl. rewrite spec_eqb, spec_compare, spec_0. case Z.eqb_spec. intros ->. rewrite spec_0. destruct [x]; auto. intros H'. assert (H : 0 < [y]) by (generalize (spec_pos y); auto with zarith). clear H'. case Z.compare_spec; intros Cmp; rewrite ?spec_0, ?spec_1; intros; auto with zarith. rewrite Cmp; generalize (Z_div_same [y] (Z.lt_gt _ _ H)) (Z_mod_same [y] (Z.lt_gt _ _ H)); unfold Z.div, Z.modulo; case Z.div_eucl; intros; subst; auto. assert (LeLt: 0 <= [x] < [y]) by (generalize (spec_pos x); auto). generalize (Zdiv_small _ _ LeLt) (Zmod_small _ _ LeLt); unfold Z.div, Z.modulo; case Z.div_eucl; intros; subst; auto. generalize (spec_div_gt _ _ (Z.lt_gt _ _ Cmp) H); auto. unfold Z.div, Z.modulo; case Z.div_eucl; case div_gt. intros a b c d (H1, H2); subst; auto. Qed. Definition div (x y : t) : t := fst (div_eucl x y). Theorem spec_div: forall x y, [div x y] = [x] / [y]. Proof. intros x y; unfold div; generalize (spec_div_eucl x y); case div_eucl; simpl fst. intros xx yy; unfold Z.div; case Z.div_eucl; intros qq rr H; injection H; auto. Qed. (** * Modulo by a smaller number *) Definition wn_modn1 n := let op := dom_op n in let zd := ZnZ.zdigits op in let zero := @ZnZ.zero _ op in let head0 := @ZnZ.head0 _ op in let add_mul_div := @ZnZ.add_mul_div _ op in let div21 := @ZnZ.div21 _ op in let compare := @ZnZ.compare _ op in let sub := @ZnZ.sub _ op in let dmodn1 := DoubleDivn1.double_modn1 zd zero head0 add_mul_div div21 compare sub in fun m x y => reduce n (dmodn1 (S m) x y). Let mod_gtnm n m wx wy := let mn := Max.max n m in let d := diff n m in let op := make_op mn in reduce_n mn (ZnZ.modulo_gt (castm (diff_r n m) (extend_tr wx (snd d))) (castm (diff_l n m) (extend_tr wy (fst d)))). Local Notation mod_gt_folded := (iter _ (fun n => let modulo_gt := ZnZ.modulo_gt in fun x y => reduce n (modulo_gt x y)) (fun n => let modulo_gt := ZnZ.modulo_gt in fun m x y => reduce n (modulo_gt x (DoubleBase.get_low (zeron n) (S m) y))) wn_modn1 mod_gtnm). Definition mod_gt := Eval lazy beta iota delta [iter dom_op dom_t reduce wn_modn1 zeron] in mod_gt_folded. Lemma mod_gt_fold : mod_gt = mod_gt_folded. Proof. lazy beta iota delta [iter dom_op dom_t reduce wn_modn1 zeron]. reflexivity. Qed. Let spec_modn1 n := DoubleDivn1.spec_double_modn1 (ZnZ.zdigits (dom_op n)) (ZnZ.zero:dom_t n) ZnZ.WW ZnZ.head0 ZnZ.add_mul_div ZnZ.div21 ZnZ.compare ZnZ.sub ZnZ.to_Z ZnZ.spec_to_Z ZnZ.spec_zdigits ZnZ.spec_0 ZnZ.spec_WW ZnZ.spec_head0 ZnZ.spec_add_mul_div ZnZ.spec_div21 ZnZ.spec_compare ZnZ.spec_sub. Theorem spec_mod_gt: forall x y, [x] > [y] -> 0 < [y] -> [mod_gt x y] = [x] mod [y]. Proof. intros x y. rewrite mod_gt_fold. apply spec_iter; clear x y. intros n x y H1 H2. simpl. rewrite spec_reduce. exact (ZnZ.spec_modulo_gt x y H1 H2). intros n m x y H1 H2. cbv zeta beta. rewrite spec_reduce. rewrite <- spec_mk_t in H1. rewrite <- (spec_get_endn n (S m) y x); auto with zarith. rewrite spec_mk_t. apply ZnZ.spec_modulo_gt; auto. rewrite <- (spec_get_endn n (S m) y x), !spec_mk_t in H1; auto with zarith. rewrite <- (spec_get_endn n (S m) y x), !spec_mk_t in H2; auto with zarith. intros n m x y H1 H2. unfold wn_modn1. rewrite spec_reduce. unfold eval; rewrite nmake_double. apply (spec_modn1 n); auto. intros n m x y H1 H2; unfold mod_gtnm. repeat rewrite spec_reduce_n. rewrite (spec_cast_l n m x), (spec_cast_r n m y). unfold to_Z; apply ZnZ.spec_modulo_gt. rewrite (spec_cast_l n m x) in H1; auto. rewrite (spec_cast_r n m y) in H1; auto. rewrite (spec_cast_r n m y) in H2; auto. Qed. (** * General Modulo *) Definition modulo (x y : t) : t := if eqb y zero then zero else match compare x y with | Eq => zero | Lt => x | Gt => mod_gt x y end. Theorem spec_modulo: forall x y, [modulo x y] = [x] mod [y]. Proof. intros x y. unfold modulo. rewrite spec_eqb, spec_compare, spec_0. case Z.eqb_spec. intros ->; rewrite spec_0. destruct [x]; auto. intro H'. assert (H : 0 < [y]) by (generalize (spec_pos y); auto with zarith). clear H'. case Z.compare_spec; rewrite ?spec_0, ?spec_1; intros; try split; auto with zarith. rewrite H0; symmetry; apply Z_mod_same; auto with zarith. symmetry; apply Zmod_small; auto with zarith. generalize (spec_pos x); auto with zarith. apply spec_mod_gt; auto with zarith. Qed. (** * Square *) Local Notation squaren := (fun n => let square_c := ZnZ.square_c in fun x => reduce (S n) (succ_t _ (square_c x))). Definition square : t -> t := Eval red_t in iter_t squaren. Lemma square_fold : square = iter_t squaren. Proof. red_t; reflexivity. Qed. Theorem spec_square: forall x, [square x] = [x] * [x]. Proof. intros x. rewrite square_fold. destr_t x as (n,x). rewrite spec_succ_t. exact (ZnZ.spec_square_c x). Qed. (** * Square Root *) Local Notation sqrtn := (fun n => let sqrt := ZnZ.sqrt in fun x => reduce n (sqrt x)). Definition sqrt : t -> t := Eval red_t in iter_t sqrtn. Lemma sqrt_fold : sqrt = iter_t sqrtn. Proof. red_t; reflexivity. Qed. Theorem spec_sqrt_aux: forall x, [sqrt x] ^ 2 <= [x] < ([sqrt x] + 1) ^ 2. Proof. intros x. rewrite sqrt_fold. destr_t x as (n,x). exact (ZnZ.spec_sqrt x). Qed. Theorem spec_sqrt: forall x, [sqrt x] = Z.sqrt [x]. Proof. intros x. symmetry. apply Z.sqrt_unique. rewrite <- ! Z.pow_2_r. apply spec_sqrt_aux. Qed. (** * Power *) Fixpoint pow_pos (x:t)(p:positive) : t := match p with | xH => x | xO p => square (pow_pos x p) | xI p => mul (square (pow_pos x p)) x end. Theorem spec_pow_pos: forall x n, [pow_pos x n] = [x] ^ Zpos n. Proof. intros x n; generalize x; elim n; clear n x; simpl pow_pos. intros; rewrite spec_mul; rewrite spec_square; rewrite H. rewrite Pos2Z.inj_xI; rewrite Zpower_exp; auto with zarith. rewrite (Z.mul_comm 2); rewrite Z.pow_mul_r; auto with zarith. rewrite Z.pow_2_r; rewrite Z.pow_1_r; auto. intros; rewrite spec_square; rewrite H. rewrite Pos2Z.inj_xO; auto with zarith. rewrite (Z.mul_comm 2); rewrite Z.pow_mul_r; auto with zarith. rewrite Z.pow_2_r; auto. intros; rewrite Z.pow_1_r; auto. Qed. Definition pow_N (x:t)(n:N) : t := match n with | BinNat.N0 => one | BinNat.Npos p => pow_pos x p end. Theorem spec_pow_N: forall x n, [pow_N x n] = [x] ^ Z.of_N n. Proof. destruct n; simpl. apply spec_1. apply spec_pow_pos. Qed. Definition pow (x y:t) : t := pow_N x (to_N y). Theorem spec_pow : forall x y, [pow x y] = [x] ^ [y]. Proof. intros. unfold pow, to_N. now rewrite spec_pow_N, Z2N.id by apply spec_pos. Qed. (** * digits Number of digits in the representation of a numbers (including head zero's). NB: This function isn't a morphism for setoid [eq]. *) Local Notation digitsn := (fun n => let digits := ZnZ.digits (dom_op n) in fun _ => digits). Definition digits : t -> positive := Eval red_t in iter_t digitsn. Lemma digits_fold : digits = iter_t digitsn. Proof. red_t; reflexivity. Qed. Theorem spec_digits: forall x, 0 <= [x] < 2 ^ Zpos (digits x). Proof. intros x. rewrite digits_fold. destr_t x as (n,x). exact (ZnZ.spec_to_Z x). Qed. Lemma digits_level : forall x, digits x = ZnZ.digits (dom_op (level x)). Proof. intros x. rewrite digits_fold. unfold level. destr_t x as (n,x). reflexivity. Qed. (** * Gcd *) Definition gcd_gt_body a b cont := match compare b zero with | Gt => let r := mod_gt a b in match compare r zero with | Gt => cont r (mod_gt b r) | _ => b end | _ => a end. Theorem Zspec_gcd_gt_body: forall a b cont p, [a] > [b] -> [a] < 2 ^ p -> (forall a1 b1, [a1] < 2 ^ (p - 1) -> [a1] > [b1] -> Zis_gcd [a1] [b1] [cont a1 b1]) -> Zis_gcd [a] [b] [gcd_gt_body a b cont]. Proof. intros a b cont p H2 H3 H4; unfold gcd_gt_body. rewrite ! spec_compare, spec_0. case Z.compare_spec. intros ->; apply Zis_gcd_0. intros HH; absurd (0 <= [b]); auto with zarith. case (spec_digits b); auto with zarith. intros H5; case Z.compare_spec. intros H6; rewrite <- (Z.mul_1_r [b]). rewrite (Z_div_mod_eq [a] [b]); auto with zarith. rewrite <- spec_mod_gt; auto with zarith. rewrite H6; rewrite Z.add_0_r. apply Zis_gcd_mult; apply Zis_gcd_1. intros; apply False_ind. case (spec_digits (mod_gt a b)); auto with zarith. intros H6; apply DoubleDiv.Zis_gcd_mod; auto with zarith. apply DoubleDiv.Zis_gcd_mod; auto with zarith. rewrite <- spec_mod_gt; auto with zarith. assert (F2: [b] > [mod_gt a b]). case (Z_mod_lt [a] [b]); auto with zarith. repeat rewrite <- spec_mod_gt; auto with zarith. assert (F3: [mod_gt a b] > [mod_gt b (mod_gt a b)]). case (Z_mod_lt [b] [mod_gt a b]); auto with zarith. rewrite <- spec_mod_gt; auto with zarith. repeat rewrite <- spec_mod_gt; auto with zarith. apply H4; auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. apply Z.le_lt_trans with ([b] + [mod_gt a b]); auto with zarith. apply Z.le_lt_trans with (([a]/[b]) * [b] + [mod_gt a b]); auto with zarith. - apply Z.add_le_mono_r. rewrite <- (Z.mul_1_l [b]) at 1. apply Z.mul_le_mono_nonneg_r; auto with zarith. change 1 with (Z.succ 0). apply Z.le_succ_l. apply Z.div_str_pos; auto with zarith. - rewrite Z.mul_comm; rewrite spec_mod_gt; auto with zarith. rewrite <- Z_div_mod_eq; auto with zarith. rewrite Z.mul_comm, <- Z.pow_succ_r, Z.sub_1_r, Z.succ_pred; auto. apply Z.le_0_sub. change 1 with (Z.succ 0). apply Z.le_succ_l. destruct p; simpl in H3; auto with zarith. Qed. Fixpoint gcd_gt_aux (p:positive) (cont:t->t->t) (a b:t) : t := gcd_gt_body a b (fun a b => match p with | xH => cont a b | xO p => gcd_gt_aux p (gcd_gt_aux p cont) a b | xI p => gcd_gt_aux p (gcd_gt_aux p cont) a b end). Theorem Zspec_gcd_gt_aux: forall p n a b cont, [a] > [b] -> [a] < 2 ^ (Zpos p + n) -> (forall a1 b1, [a1] < 2 ^ n -> [a1] > [b1] -> Zis_gcd [a1] [b1] [cont a1 b1]) -> Zis_gcd [a] [b] [gcd_gt_aux p cont a b]. intros p; elim p; clear p. intros p Hrec n a b cont H2 H3 H4. unfold gcd_gt_aux; apply Zspec_gcd_gt_body with (Zpos (xI p) + n); auto. intros a1 b1 H6 H7. apply Hrec with (Zpos p + n); auto. replace (Zpos p + (Zpos p + n)) with (Zpos (xI p) + n - 1); auto. rewrite Pos2Z.inj_xI; ring. intros a2 b2 H9 H10. apply Hrec with n; auto. intros p Hrec n a b cont H2 H3 H4. unfold gcd_gt_aux; apply Zspec_gcd_gt_body with (Zpos (xO p) + n); auto. intros a1 b1 H6 H7. apply Hrec with (Zpos p + n - 1); auto. replace (Zpos p + (Zpos p + n - 1)) with (Zpos (xO p) + n - 1); auto. rewrite Pos2Z.inj_xO; ring. intros a2 b2 H9 H10. apply Hrec with (n - 1); auto. replace (Zpos p + (n - 1)) with (Zpos p + n - 1); auto with zarith. intros a3 b3 H12 H13; apply H4; auto with zarith. apply Z.lt_le_trans with (1 := H12). apply Z.pow_le_mono_r; auto with zarith. intros n a b cont H H2 H3. simpl gcd_gt_aux. apply Zspec_gcd_gt_body with (n + 1); auto with zarith. rewrite Z.add_comm; auto. intros a1 b1 H5 H6; apply H3; auto. replace n with (n + 1 - 1); auto; try ring. Qed. Definition gcd_cont a b := match compare one b with | Eq => one | _ => a end. Definition gcd_gt a b := gcd_gt_aux (digits a) gcd_cont a b. Theorem spec_gcd_gt: forall a b, [a] > [b] -> [gcd_gt a b] = Z.gcd [a] [b]. Proof. intros a b H2. case (spec_digits (gcd_gt a b)); intros H3 H4. case (spec_digits a); intros H5 H6. symmetry; apply Zis_gcd_gcd; auto with zarith. unfold gcd_gt; apply Zspec_gcd_gt_aux with 0; auto with zarith. intros a1 a2; rewrite Z.pow_0_r. case (spec_digits a2); intros H7 H8; intros; apply False_ind; auto with zarith. Qed. Definition gcd (a b : t) : t := match compare a b with | Eq => a | Lt => gcd_gt b a | Gt => gcd_gt a b end. Theorem spec_gcd: forall a b, [gcd a b] = Z.gcd [a] [b]. Proof. intros a b. case (spec_digits a); intros H1 H2. case (spec_digits b); intros H3 H4. unfold gcd. rewrite spec_compare. case Z.compare_spec. intros HH; rewrite HH; symmetry; apply Zis_gcd_gcd; auto. apply Zis_gcd_refl. intros; transitivity (Z.gcd [b] [a]). apply spec_gcd_gt; auto with zarith. apply Zis_gcd_gcd; auto with zarith. apply Z.gcd_nonneg. apply Zis_gcd_sym; apply Zgcd_is_gcd. intros; apply spec_gcd_gt; auto with zarith. Qed. (** * Parity test *) Definition even : t -> bool := Eval red_t in iter_t (fun n x => ZnZ.is_even x). Definition odd x := negb (even x). Lemma even_fold : even = iter_t (fun n x => ZnZ.is_even x). Proof. red_t; reflexivity. Qed. Theorem spec_even_aux: forall x, if even x then [x] mod 2 = 0 else [x] mod 2 = 1. Proof. intros x. rewrite even_fold. destr_t x as (n,x). exact (ZnZ.spec_is_even x). Qed. Theorem spec_even: forall x, even x = Z.even [x]. Proof. intros x. assert (H := spec_even_aux x). symmetry. rewrite (Z.div_mod [x] 2); auto with zarith. destruct (even x); rewrite H, ?Z.add_0_r. rewrite Zeven_bool_iff. apply Zeven_2p. apply not_true_is_false. rewrite Zeven_bool_iff. apply Zodd_not_Zeven. apply Zodd_2p_plus_1. Qed. Theorem spec_odd: forall x, odd x = Z.odd [x]. Proof. intros x. unfold odd. assert (H := spec_even_aux x). symmetry. rewrite (Z.div_mod [x] 2); auto with zarith. destruct (even x); rewrite H, ?Z.add_0_r; simpl negb. apply not_true_is_false. rewrite Zodd_bool_iff. apply Zeven_not_Zodd. apply Zeven_2p. apply Zodd_bool_iff. apply Zodd_2p_plus_1. Qed. (** * Conversion *) Definition pheight p := Peano.pred (Pos.to_nat (get_height (ZnZ.digits (dom_op 0)) (plength p))). Theorem pheight_correct: forall p, Zpos p < 2 ^ (Zpos (ZnZ.digits (dom_op 0)) * 2 ^ (Z.of_nat (pheight p))). Proof. intros p; unfold pheight. rewrite Nat2Z.inj_pred by apply Pos2Nat.is_pos. rewrite positive_nat_Z. rewrite <- Z.sub_1_r. assert (F2:= (get_height_correct (ZnZ.digits (dom_op 0)) (plength p))). apply Z.lt_le_trans with (Zpos (Pos.succ p)). rewrite Pos2Z.inj_succ; auto with zarith. apply Z.le_trans with (1 := plength_pred_correct (Pos.succ p)). rewrite Pos.pred_succ. apply Z.pow_le_mono_r; auto with zarith. Qed. Definition of_pos (x:positive) : t := let n := pheight x in reduce n (snd (ZnZ.of_pos x)). Theorem spec_of_pos: forall x, [of_pos x] = Zpos x. Proof. intros x; unfold of_pos. rewrite spec_reduce. simpl. apply ZnZ.of_pos_correct. unfold base. apply Z.lt_le_trans with (1 := pheight_correct x). apply Z.pow_le_mono_r; auto with zarith. rewrite (digits_dom_op (_ _)), Pshiftl_nat_Zpower. auto with zarith. Qed. Definition of_N (x:N) : t := match x with | BinNat.N0 => zero | Npos p => of_pos p end. Theorem spec_of_N: forall x, [of_N x] = Z.of_N x. Proof. intros x; case x. simpl of_N. exact spec_0. intros p; exact (spec_of_pos p). Qed. (** * [head0] and [tail0] Number of zero at the beginning and at the end of the representation of the number. NB: these functions are not morphism for setoid [eq]. *) Local Notation head0n := (fun n => let head0 := ZnZ.head0 in fun x => reduce n (head0 x)). Definition head0 : t -> t := Eval red_t in iter_t head0n. Lemma head0_fold : head0 = iter_t head0n. Proof. red_t; reflexivity. Qed. Theorem spec_head00: forall x, [x] = 0 -> [head0 x] = Zpos (digits x). Proof. intros x. rewrite head0_fold, digits_fold. destr_t x as (n,x). exact (ZnZ.spec_head00 x). Qed. Lemma pow2_pos_minus_1 : forall z, 0 2^(z-1) = 2^z / 2. Proof. intros. apply Zdiv_unique with 0; auto with zarith. change 2 with (2^1) at 2. rewrite <- Zpower_exp; auto with zarith. rewrite Z.add_0_r. f_equal. auto with zarith. Qed. Theorem spec_head0: forall x, 0 < [x] -> 2 ^ (Zpos (digits x) - 1) <= 2 ^ [head0 x] * [x] < 2 ^ Zpos (digits x). Proof. intros x. rewrite pow2_pos_minus_1 by (red; auto). rewrite head0_fold, digits_fold. destr_t x as (n,x). exact (ZnZ.spec_head0 x). Qed. Local Notation tail0n := (fun n => let tail0 := ZnZ.tail0 in fun x => reduce n (tail0 x)). Definition tail0 : t -> t := Eval red_t in iter_t tail0n. Lemma tail0_fold : tail0 = iter_t tail0n. Proof. red_t; reflexivity. Qed. Theorem spec_tail00: forall x, [x] = 0 -> [tail0 x] = Zpos (digits x). Proof. intros x. rewrite tail0_fold, digits_fold. destr_t x as (n,x). exact (ZnZ.spec_tail00 x). Qed. Theorem spec_tail0: forall x, 0 < [x] -> exists y, 0 <= y /\ [x] = (2 * y + 1) * 2 ^ [tail0 x]. Proof. intros x. rewrite tail0_fold. destr_t x as (n,x). exact (ZnZ.spec_tail0 x). Qed. (** * [Ndigits] Same as [digits] but encoded using large integers NB: this function is not a morphism for setoid [eq]. *) Local Notation Ndigitsn := (fun n => let d := reduce n (ZnZ.zdigits (dom_op n)) in fun _ => d). Definition Ndigits : t -> t := Eval red_t in iter_t Ndigitsn. Lemma Ndigits_fold : Ndigits = iter_t Ndigitsn. Proof. red_t; reflexivity. Qed. Theorem spec_Ndigits: forall x, [Ndigits x] = Zpos (digits x). Proof. intros x. rewrite Ndigits_fold, digits_fold. destr_t x as (n,x). apply ZnZ.spec_zdigits. Qed. (** * Binary logarithm *) Local Notation log2n := (fun n => let op := dom_op n in let zdigits := ZnZ.zdigits op in let head0 := ZnZ.head0 in let sub_carry := ZnZ.sub_carry in fun x => reduce n (sub_carry zdigits (head0 x))). Definition log2 : t -> t := Eval red_t in let log2 := iter_t log2n in fun x => if eqb x zero then zero else log2 x. Lemma log2_fold : log2 = fun x => if eqb x zero then zero else iter_t log2n x. Proof. red_t; reflexivity. Qed. Lemma spec_log2_0 : forall x, [x] = 0 -> [log2 x] = 0. Proof. intros x H. rewrite log2_fold. rewrite spec_eqb, H. rewrite spec_0. simpl. exact spec_0. Qed. Lemma head0_zdigits : forall n (x : dom_t n), 0 < ZnZ.to_Z x -> ZnZ.to_Z (ZnZ.head0 x) < ZnZ.to_Z (ZnZ.zdigits (dom_op n)). Proof. intros n x H. destruct (ZnZ.spec_head0 x H) as (_,H0). intros. assert (H1 := ZnZ.spec_to_Z (ZnZ.head0 x)). assert (H2 := ZnZ.spec_to_Z (ZnZ.zdigits (dom_op n))). unfold base in *. rewrite ZnZ.spec_zdigits in H2 |- *. set (h := ZnZ.to_Z (ZnZ.head0 x)) in *; clearbody h. set (d := ZnZ.digits (dom_op n)) in *; clearbody d. destruct (Z_lt_le_dec h (Zpos d)); auto. exfalso. assert (1 * 2^Zpos d <= ZnZ.to_Z x * 2^h). apply Z.mul_le_mono_nonneg; auto with zarith. apply Z.pow_le_mono_r; auto with zarith. rewrite Z.mul_comm in H0. auto with zarith. Qed. Lemma spec_log2_pos : forall x, [x]<>0 -> 2^[log2 x] <= [x] < 2^([log2 x]+1). Proof. intros x H. rewrite log2_fold. rewrite spec_eqb. rewrite spec_0. case Z.eqb_spec. auto with zarith. clear H. destr_t x as (n,x). intros H. rewrite ZnZ.spec_sub_carry. assert (H0 := ZnZ.spec_to_Z x). assert (H1 := ZnZ.spec_to_Z (ZnZ.head0 x)). assert (H2 := ZnZ.spec_to_Z (ZnZ.zdigits (dom_op n))). assert (H3 := head0_zdigits n x). rewrite Zmod_small by auto with zarith. rewrite Z.sub_simpl_r. rewrite (Z.mul_lt_mono_pos_l (2^(ZnZ.to_Z (ZnZ.head0 x)))); auto with zarith. rewrite (Z.mul_le_mono_pos_l _ _ (2^(ZnZ.to_Z (ZnZ.head0 x)))); auto with zarith. rewrite <- 2 Zpower_exp; auto with zarith. rewrite !Z.add_sub_assoc, !Z.add_simpl_l. rewrite ZnZ.spec_zdigits. rewrite pow2_pos_minus_1 by (red; auto). apply ZnZ.spec_head0; auto with zarith. Qed. Lemma spec_log2 : forall x, [log2 x] = Z.log2 [x]. Proof. intros. destruct (Z_lt_ge_dec 0 [x]). symmetry. apply Z.log2_unique. apply spec_pos. apply spec_log2_pos. intro EQ; rewrite EQ in *; auto with zarith. rewrite spec_log2_0. rewrite Z.log2_nonpos; auto with zarith. generalize (spec_pos x); auto with zarith. Qed. Lemma log2_digits_head0 : forall x, 0 < [x] -> [log2 x] = Zpos (digits x) - [head0 x] - 1. Proof. intros. rewrite log2_fold. rewrite spec_eqb. rewrite spec_0. case Z.eqb_spec. auto with zarith. intros _. revert H. rewrite digits_fold, head0_fold. destr_t x as (n,x). rewrite ZnZ.spec_sub_carry. intros. generalize (head0_zdigits n x H). generalize (ZnZ.spec_to_Z (ZnZ.head0 x)). generalize (ZnZ.spec_to_Z (ZnZ.zdigits (dom_op n))). rewrite ZnZ.spec_zdigits. intros. apply Zmod_small. auto with zarith. Qed. (** * Right shift *) Local Notation shiftrn := (fun n => let op := dom_op n in let zdigits := ZnZ.zdigits op in let sub_c := ZnZ.sub_c in let add_mul_div := ZnZ.add_mul_div in let zzero := ZnZ.zero in fun x p => match sub_c zdigits p with | C0 d => reduce n (add_mul_div d zzero x) | C1 _ => zero end). Definition shiftr : t -> t -> t := Eval red_t in same_level shiftrn. Lemma shiftr_fold : shiftr = same_level shiftrn. Proof. red_t; reflexivity. Qed. Lemma div_pow2_bound :forall x y z, 0 <= x -> 0 <= y -> x < z -> 0 <= x / 2 ^ y < z. Proof. intros x y z HH HH1 HH2. split; auto with zarith. apply Z.le_lt_trans with (2 := HH2); auto with zarith. apply Zdiv_le_upper_bound; auto with zarith. pattern x at 1; replace x with (x * 2 ^ 0); auto with zarith. apply Z.mul_le_mono_nonneg_l; auto. apply Z.pow_le_mono_r; auto with zarith. rewrite Z.pow_0_r; ring. Qed. Theorem spec_shiftr_pow2 : forall x n, [shiftr x n] = [x] / 2 ^ [n]. Proof. intros x y. rewrite shiftr_fold. apply spec_same_level. clear x y. intros n x p. simpl. assert (Hx := ZnZ.spec_to_Z x). assert (Hy := ZnZ.spec_to_Z p). generalize (ZnZ.spec_sub_c (ZnZ.zdigits (dom_op n)) p). case ZnZ.sub_c; intros d H; unfold interp_carry in *; simpl. (** Subtraction without underflow : [ p <= digits ] *) rewrite spec_reduce. rewrite ZnZ.spec_zdigits in H. rewrite ZnZ.spec_add_mul_div by auto with zarith. rewrite ZnZ.spec_0, Z.mul_0_l, Z.add_0_l. rewrite Zmod_small. f_equal. f_equal. auto with zarith. split. auto with zarith. apply div_pow2_bound; auto with zarith. (** Subtraction with underflow : [ digits < p ] *) rewrite ZnZ.spec_0. symmetry. apply Zdiv_small. split; auto with zarith. apply Z.lt_le_trans with (base (ZnZ.digits (dom_op n))); auto with zarith. unfold base. apply Z.pow_le_mono_r; auto with zarith. rewrite ZnZ.spec_zdigits in H. generalize (ZnZ.spec_to_Z d); auto with zarith. Qed. Lemma spec_shiftr: forall x p, [shiftr x p] = Z.shiftr [x] [p]. Proof. intros. now rewrite spec_shiftr_pow2, Z.shiftr_div_pow2 by apply spec_pos. Qed. (** * Left shift *) (** First an unsafe version, working correctly only if the representation is large enough *) Local Notation unsafe_shiftln := (fun n => let op := dom_op n in let add_mul_div := ZnZ.add_mul_div in let zero := ZnZ.zero in fun x p => reduce n (add_mul_div p x zero)). Definition unsafe_shiftl : t -> t -> t := Eval red_t in same_level unsafe_shiftln. Lemma unsafe_shiftl_fold : unsafe_shiftl = same_level unsafe_shiftln. Proof. red_t; reflexivity. Qed. Theorem spec_unsafe_shiftl_aux : forall x p K, 0 <= K -> [x] < 2^K -> [p] + K <= Zpos (digits x) -> [unsafe_shiftl x p] = [x] * 2 ^ [p]. Proof. intros x p. rewrite unsafe_shiftl_fold. rewrite digits_level. apply spec_same_level_dep. intros n m z z' r LE H K HK H1 H2. apply (H K); auto. transitivity (Zpos (ZnZ.digits (dom_op n))); auto. apply digits_dom_op_incr; auto. clear x p. intros n x p K HK Hx Hp. simpl. rewrite spec_reduce. destruct (ZnZ.spec_to_Z x). destruct (ZnZ.spec_to_Z p). rewrite ZnZ.spec_add_mul_div by (omega with *). rewrite ZnZ.spec_0, Zdiv_0_l, Z.add_0_r. apply Zmod_small. unfold base. split; auto with zarith. rewrite Z.mul_comm. apply Z.lt_le_trans with (2^(ZnZ.to_Z p + K)). rewrite Zpower_exp; auto with zarith. apply Z.mul_lt_mono_pos_l; auto with zarith. apply Z.pow_le_mono_r; auto with zarith. Qed. Theorem spec_unsafe_shiftl: forall x p, [p] <= [head0 x] -> [unsafe_shiftl x p] = [x] * 2 ^ [p]. Proof. intros. destruct (Z.eq_dec [x] 0) as [EQ|NEQ]. (* [x] = 0 *) apply spec_unsafe_shiftl_aux with 0; auto with zarith. now rewrite EQ. rewrite spec_head00 in *; auto with zarith. (* [x] <> 0 *) apply spec_unsafe_shiftl_aux with ([log2 x] + 1); auto with zarith. generalize (spec_pos (log2 x)); auto with zarith. destruct (spec_log2_pos x); auto with zarith. rewrite log2_digits_head0; auto with zarith. generalize (spec_pos x); auto with zarith. Qed. (** Then we define a function doubling the size of the representation but without changing the value of the number. *) Local Notation double_size_n := (fun n => let zero := ZnZ.zero in fun x => mk_t_S n (WW zero x)). Definition double_size : t -> t := Eval red_t in iter_t double_size_n. Lemma double_size_fold : double_size = iter_t double_size_n. Proof. red_t; reflexivity. Qed. Lemma double_size_level : forall x, level (double_size x) = S (level x). Proof. intros x. rewrite double_size_fold; unfold level at 2. destr_t x as (n,x). apply mk_t_S_level. Qed. Theorem spec_double_size_digits: forall x, Zpos (digits (double_size x)) = 2 * (Zpos (digits x)). Proof. intros x. rewrite ! digits_level, double_size_level. rewrite 2 digits_dom_op, 2 Pshiftl_nat_Zpower, Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. ring. Qed. Theorem spec_double_size: forall x, [double_size x] = [x]. Proof. intros x. rewrite double_size_fold. destr_t x as (n,x). rewrite spec_mk_t_S. simpl. rewrite ZnZ.spec_0. auto with zarith. Qed. Theorem spec_double_size_head0: forall x, 2 * [head0 x] <= [head0 (double_size x)]. Proof. intros x. assert (F1:= spec_pos (head0 x)). assert (F2: 0 < Zpos (digits x)). red; auto. assert (HH := spec_pos x). Z.le_elim HH. generalize HH; rewrite <- (spec_double_size x); intros HH1. case (spec_head0 x HH); intros _ HH2. case (spec_head0 _ HH1). rewrite (spec_double_size x); rewrite (spec_double_size_digits x). intros HH3 _. case (Z.le_gt_cases ([head0 (double_size x)]) (2 * [head0 x])); auto; intros HH4. absurd (2 ^ (2 * [head0 x] )* [x] < 2 ^ [head0 (double_size x)] * [x]); auto. apply Z.le_ngt. apply Z.mul_le_mono_nonneg_r; auto with zarith. apply Z.pow_le_mono_r; auto; auto with zarith. assert (HH5: 2 ^[head0 x] <= 2 ^(Zpos (digits x) - 1)). { apply Z.le_succ_l in HH. change (1 <= [x]) in HH. Z.le_elim HH. - apply Z.mul_le_mono_pos_r with (2 ^ 1); auto with zarith. rewrite <- (fun x y z => Z.pow_add_r x (y - z)); auto with zarith. rewrite Z.sub_add. apply Z.le_trans with (2 := Z.lt_le_incl _ _ HH2). apply Z.mul_le_mono_nonneg_l; auto with zarith. rewrite Z.pow_1_r; auto with zarith. - apply Z.pow_le_mono_r; auto with zarith. case (Z.le_gt_cases (Zpos (digits x)) [head0 x]); auto with zarith; intros HH6. absurd (2 ^ Zpos (digits x) <= 2 ^ [head0 x] * [x]); auto with zarith. rewrite <- HH; rewrite Z.mul_1_r. apply Z.pow_le_mono_r; auto with zarith. } rewrite (Z.mul_comm 2). rewrite Z.pow_mul_r; auto with zarith. rewrite Z.pow_2_r. apply Z.lt_le_trans with (2 := HH3). rewrite <- Z.mul_assoc. replace (2 * Zpos (digits x) - 1) with ((Zpos (digits x) - 1) + (Zpos (digits x))). rewrite Zpower_exp; auto with zarith. apply Zmult_lt_compat2; auto with zarith. split; auto with zarith. apply Z.mul_pos_pos; auto with zarith. rewrite Pos2Z.inj_xO; ring. apply Z.lt_le_incl; auto. repeat rewrite spec_head00; auto. rewrite spec_double_size_digits. rewrite Pos2Z.inj_xO; auto with zarith. rewrite spec_double_size; auto. Qed. Theorem spec_double_size_head0_pos: forall x, 0 < [head0 (double_size x)]. Proof. intros x. assert (F := Pos2Z.is_pos (digits x)). assert (F0 := spec_pos (head0 (double_size x))). Z.le_elim F0; auto. assert (F1 := spec_pos (head0 x)). Z.le_elim F1. apply Z.lt_le_trans with (2 := (spec_double_size_head0 x)); auto with zarith. assert (F3 := spec_pos x). Z.le_elim F3. generalize F3; rewrite <- (spec_double_size x); intros F4. absurd (2 ^ (Zpos (xO (digits x)) - 1) < 2 ^ (Zpos (digits x))). { apply Z.le_ngt. apply Z.pow_le_mono_r; auto with zarith. rewrite Pos2Z.inj_xO; auto with zarith. } case (spec_head0 x F3). rewrite <- F1; rewrite Z.pow_0_r; rewrite Z.mul_1_l; intros _ HH. apply Z.le_lt_trans with (2 := HH). case (spec_head0 _ F4). rewrite (spec_double_size x); rewrite (spec_double_size_digits x). rewrite <- F0; rewrite Z.pow_0_r; rewrite Z.mul_1_l; auto. generalize F1; rewrite (spec_head00 _ (eq_sym F3)); auto with zarith. Qed. (** Finally we iterate [double_size] enough before [unsafe_shiftl] in order to get a fully correct [shiftl]. *) Definition shiftl_aux_body cont x n := match compare n (head0 x) with Gt => cont (double_size x) n | _ => unsafe_shiftl x n end. Theorem spec_shiftl_aux_body: forall n x p cont, 2^ Zpos p <= [head0 x] -> (forall x, 2 ^ (Zpos p + 1) <= [head0 x]-> [cont x n] = [x] * 2 ^ [n]) -> [shiftl_aux_body cont x n] = [x] * 2 ^ [n]. Proof. intros n x p cont H1 H2; unfold shiftl_aux_body. rewrite spec_compare; case Z.compare_spec; intros H. apply spec_unsafe_shiftl; auto with zarith. apply spec_unsafe_shiftl; auto with zarith. rewrite H2. rewrite spec_double_size; auto. rewrite Z.add_comm; rewrite Zpower_exp; auto with zarith. apply Z.le_trans with (2 := spec_double_size_head0 x). rewrite Z.pow_1_r; apply Z.mul_le_mono_nonneg_l; auto with zarith. Qed. Fixpoint shiftl_aux p cont x n := shiftl_aux_body (fun x n => match p with | xH => cont x n | xO p => shiftl_aux p (shiftl_aux p cont) x n | xI p => shiftl_aux p (shiftl_aux p cont) x n end) x n. Theorem spec_shiftl_aux: forall p q x n cont, 2 ^ (Zpos q) <= [head0 x] -> (forall x, 2 ^ (Zpos p + Zpos q) <= [head0 x] -> [cont x n] = [x] * 2 ^ [n]) -> [shiftl_aux p cont x n] = [x] * 2 ^ [n]. Proof. intros p; elim p; unfold shiftl_aux; fold shiftl_aux; clear p. intros p Hrec q x n cont H1 H2. apply spec_shiftl_aux_body with (q); auto. intros x1 H3; apply Hrec with (q + 1)%positive; auto. intros x2 H4; apply Hrec with (p + q + 1)%positive; auto. rewrite <- Pos.add_assoc. rewrite Pos2Z.inj_add; auto. intros x3 H5; apply H2. rewrite Pos2Z.inj_xI. replace (2 * Zpos p + 1 + Zpos q) with (Zpos p + Zpos (p + q + 1)); auto. rewrite !Pos2Z.inj_add; ring. intros p Hrec q n x cont H1 H2. apply spec_shiftl_aux_body with (q); auto. intros x1 H3; apply Hrec with (q); auto. apply Z.le_trans with (2 := H3); auto with zarith. apply Z.pow_le_mono_r; auto with zarith. intros x2 H4; apply Hrec with (p + q)%positive; auto. intros x3 H5; apply H2. rewrite (Pos2Z.inj_xO p). replace (2 * Zpos p + Zpos q) with (Zpos p + Zpos (p + q)); auto. rewrite Pos2Z.inj_add; ring. intros q n x cont H1 H2. apply spec_shiftl_aux_body with (q); auto. rewrite Z.add_comm; auto. Qed. Definition shiftl x n := shiftl_aux_body (shiftl_aux_body (shiftl_aux (digits n) unsafe_shiftl)) x n. Theorem spec_shiftl_pow2 : forall x n, [shiftl x n] = [x] * 2 ^ [n]. Proof. intros x n; unfold shiftl, shiftl_aux_body. rewrite spec_compare; case Z.compare_spec; intros H. apply spec_unsafe_shiftl; auto with zarith. apply spec_unsafe_shiftl; auto with zarith. rewrite <- (spec_double_size x). rewrite spec_compare; case Z.compare_spec; intros H1. apply spec_unsafe_shiftl; auto with zarith. apply spec_unsafe_shiftl; auto with zarith. rewrite <- (spec_double_size (double_size x)). apply spec_shiftl_aux with 1%positive. apply Z.le_trans with (2 := spec_double_size_head0 (double_size x)). replace (2 ^ 1) with (2 * 1). apply Z.mul_le_mono_nonneg_l; auto with zarith. generalize (spec_double_size_head0_pos x); auto with zarith. rewrite Z.pow_1_r; ring. intros x1 H2; apply spec_unsafe_shiftl. apply Z.le_trans with (2 := H2). apply Z.le_trans with (2 ^ Zpos (digits n)); auto with zarith. case (spec_digits n); auto with zarith. apply Z.pow_le_mono_r; auto with zarith. Qed. Lemma spec_shiftl: forall x p, [shiftl x p] = Z.shiftl [x] [p]. Proof. intros. now rewrite spec_shiftl_pow2, Z.shiftl_mul_pow2 by apply spec_pos. Qed. (** Other bitwise operations *) Definition testbit x n := odd (shiftr x n). Lemma spec_testbit: forall x p, testbit x p = Z.testbit [x] [p]. Proof. intros. unfold testbit. symmetry. rewrite spec_odd, spec_shiftr. apply Z.testbit_odd. Qed. Definition div2 x := shiftr x one. Lemma spec_div2: forall x, [div2 x] = Z.div2 [x]. Proof. intros. unfold div2. symmetry. rewrite spec_shiftr, spec_1. apply Z.div2_spec. Qed. (** TODO : provide efficient versions instead of just converting from/to N (see with Laurent) *) Definition lor x y := of_N (N.lor (to_N x) (to_N y)). Definition land x y := of_N (N.land (to_N x) (to_N y)). Definition ldiff x y := of_N (N.ldiff (to_N x) (to_N y)). Definition lxor x y := of_N (N.lxor (to_N x) (to_N y)). Lemma spec_land: forall x y, [land x y] = Z.land [x] [y]. Proof. intros x y. unfold land. rewrite spec_of_N. unfold to_N. generalize (spec_pos x), (spec_pos y). destruct [x], [y]; trivial; (now destruct 1) || (now destruct 2). Qed. Lemma spec_lor: forall x y, [lor x y] = Z.lor [x] [y]. Proof. intros x y. unfold lor. rewrite spec_of_N. unfold to_N. generalize (spec_pos x), (spec_pos y). destruct [x], [y]; trivial; (now destruct 1) || (now destruct 2). Qed. Lemma spec_ldiff: forall x y, [ldiff x y] = Z.ldiff [x] [y]. Proof. intros x y. unfold ldiff. rewrite spec_of_N. unfold to_N. generalize (spec_pos x), (spec_pos y). destruct [x], [y]; trivial; (now destruct 1) || (now destruct 2). Qed. Lemma spec_lxor: forall x y, [lxor x y] = Z.lxor [x] [y]. Proof. intros x y. unfold lxor. rewrite spec_of_N. unfold to_N. generalize (spec_pos x), (spec_pos y). destruct [x], [y]; trivial; (now destruct 1) || (now destruct 2). Qed. End Make. coq-8.4pl2/theories/Numbers/Natural/BigN/BigN.v0000640000175000001440000001454712010532755020332 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y" := (y < x) (only parsing) : bigN_scope. Notation "x >= y" := (y <= x) (only parsing) : bigN_scope. Notation "x < y < z" := (x BigN.succ (BigN.pred q) == q. Proof. intros; apply BigN.succ_pred. intro H'; rewrite H' in H; discriminate. Qed. (** [BigN] is a semi-ring *) Lemma BigNring : semi_ring_theory 0 1 BigN.add BigN.mul BigN.eq. Proof. constructor. exact BigN.add_0_l. exact BigN.add_comm. exact BigN.add_assoc. exact BigN.mul_1_l. exact BigN.mul_0_l. exact BigN.mul_comm. exact BigN.mul_assoc. exact BigN.mul_add_distr_r. Qed. Lemma BigNeqb_correct : forall x y, (x =? y) = true -> x==y. Proof. now apply BigN.eqb_eq. Qed. Lemma BigNpower : power_theory 1 BigN.mul BigN.eq BigN.of_N BigN.pow. Proof. constructor. intros. red. rewrite BigN.spec_pow, BigN.spec_of_N. rewrite Zpower_theory.(rpow_pow_N). destruct n; simpl. reflexivity. induction p; simpl; intros; BigN.zify; rewrite ?IHp; auto. Qed. Lemma BigNdiv : div_theory BigN.eq BigN.add BigN.mul (@id _) (fun a b => if b =? 0 then (0,a) else BigN.div_eucl a b). Proof. constructor. unfold id. intros a b. BigN.zify. case Z.eqb_spec. BigN.zify. auto with zarith. intros NEQ. generalize (BigN.spec_div_eucl a b). generalize (Z_div_mod_full [a] [b] NEQ). destruct BigN.div_eucl as (q,r), Z.div_eucl as (q',r'). intros (EQ,_). injection 1. intros EQr EQq. BigN.zify. rewrite EQr, EQq; auto. Qed. (** Detection of constants *) Ltac isStaticWordCst t := match t with | W0 => constr:true | WW ?t1 ?t2 => match isStaticWordCst t1 with | false => constr:false | true => isStaticWordCst t2 end | _ => isInt31cst t end. Ltac isBigNcst t := match t with | BigN.N0 ?t => isStaticWordCst t | BigN.N1 ?t => isStaticWordCst t | BigN.N2 ?t => isStaticWordCst t | BigN.N3 ?t => isStaticWordCst t | BigN.N4 ?t => isStaticWordCst t | BigN.N5 ?t => isStaticWordCst t | BigN.N6 ?t => isStaticWordCst t | BigN.Nn ?n ?t => match isnatcst n with | true => isStaticWordCst t | false => constr:false end | BigN.zero => constr:true | BigN.one => constr:true | BigN.two => constr:true | _ => constr:false end. Ltac BigNcst t := match isBigNcst t with | true => constr:t | false => constr:NotConstant end. Ltac BigN_to_N t := match isBigNcst t with | true => eval vm_compute in (BigN.to_N t) | false => constr:NotConstant end. Ltac Ncst t := match isNcst t with | true => constr:t | false => constr:NotConstant end. (** Registration for the "ring" tactic *) Add Ring BigNr : BigNring (decidable BigNeqb_correct, constants [BigNcst], power_tac BigNpower [BigN_to_N], div BigNdiv). Section TestRing. Let test : forall x y, 1 + x*y^1 + x^2 + 1 == 1*1 + 1 + y*x + 1*x*x. intros. ring_simplify. reflexivity. Qed. End TestRing. (** We benefit also from an "order" tactic *) Ltac bigN_order := BigN.order. Section TestOrder. Let test : forall x y : bigN, x<=y -> y<=x -> x==y. Proof. bigN_order. Qed. End TestOrder. (** We can use at least a bit of (r)omega by translating to [Z]. *) Section TestOmega. Let test : forall x y : bigN, x<=y -> y<=x -> x==y. Proof. intros x y. BigN.zify. omega. Qed. End TestOmega. (** Todo: micromega *) coq-8.4pl2/theories/Numbers/Natural/BigN/Nbasic.v0000640000175000001440000004032212010532755020700 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* xH | xO p1 => Pos.succ (plength p1) | xI p1 => Pos.succ (plength p1) end. Theorem plength_correct: forall p, (Zpos p < 2 ^ Zpos (plength p))%Z. assert (F: (forall p, 2 ^ (Zpos (Pos.succ p)) = 2 * 2 ^ Zpos p)%Z). intros p; replace (Zpos (Pos.succ p)) with (1 + Zpos p)%Z. rewrite Zpower_exp; auto with zarith. rewrite Pos2Z.inj_succ; unfold Z.succ; auto with zarith. intros p; elim p; simpl plength; auto. intros p1 Hp1; rewrite F; repeat rewrite Pos2Z.inj_xI. assert (tmp: (forall p, 2 * p = p + p)%Z); try repeat rewrite tmp; auto with zarith. intros p1 Hp1; rewrite F; rewrite (Pos2Z.inj_xO p1). assert (tmp: (forall p, 2 * p = p + p)%Z); try repeat rewrite tmp; auto with zarith. rewrite Z.pow_1_r; auto with zarith. Qed. Theorem plength_pred_correct: forall p, (Zpos p <= 2 ^ Zpos (plength (Pos.pred p)))%Z. intros p; case (Pos.succ_pred_or p); intros H1. subst; simpl plength. rewrite Z.pow_1_r; auto with zarith. pattern p at 1; rewrite <- H1. rewrite Pos2Z.inj_succ; unfold Z.succ; auto with zarith. generalize (plength_correct (Pos.pred p)); auto with zarith. Qed. Definition Pdiv p q := match Z.div (Zpos p) (Zpos q) with Zpos q1 => match (Zpos p) - (Zpos q) * (Zpos q1) with Z0 => q1 | _ => (Pos.succ q1) end | _ => xH end. Theorem Pdiv_le: forall p q, Zpos p <= Zpos q * Zpos (Pdiv p q). intros p q. unfold Pdiv. assert (H1: Zpos q > 0); auto with zarith. assert (H1b: Zpos p >= 0); auto with zarith. generalize (Z_div_ge0 (Zpos p) (Zpos q) H1 H1b). generalize (Z_div_mod_eq (Zpos p) (Zpos q) H1); case Z.div. intros HH _; rewrite HH; rewrite Z.mul_0_r; rewrite Z.mul_1_r; simpl. case (Z_mod_lt (Zpos p) (Zpos q) H1); auto with zarith. intros q1 H2. replace (Zpos p - Zpos q * Zpos q1) with (Zpos p mod Zpos q). 2: pattern (Zpos p) at 2; rewrite H2; auto with zarith. generalize H2 (Z_mod_lt (Zpos p) (Zpos q) H1); clear H2; case Z.modulo. intros HH _; rewrite HH; auto with zarith. intros r1 HH (_,HH1); rewrite HH; rewrite Pos2Z.inj_succ. unfold Z.succ; rewrite Z.mul_add_distr_l; auto with zarith. intros r1 _ (HH,_); case HH; auto. intros q1 HH; rewrite HH. unfold Z.ge; simpl Z.compare; intros HH1; case HH1; auto. Qed. Definition is_one p := match p with xH => true | _ => false end. Theorem is_one_one: forall p, is_one p = true -> p = xH. intros p; case p; auto; intros p1 H1; discriminate H1. Qed. Definition get_height digits p := let r := Pdiv p digits in if is_one r then xH else Pos.succ (plength (Pos.pred r)). Theorem get_height_correct: forall digits N, Zpos N <= Zpos digits * (2 ^ (Zpos (get_height digits N) -1)). intros digits N. unfold get_height. assert (H1 := Pdiv_le N digits). case_eq (is_one (Pdiv N digits)); intros H2. rewrite (is_one_one _ H2) in H1. rewrite Z.mul_1_r in H1. change (2^(1-1))%Z with 1; rewrite Z.mul_1_r; auto. clear H2. apply Z.le_trans with (1 := H1). apply Z.mul_le_mono_nonneg_l; auto with zarith. rewrite Pos2Z.inj_succ; unfold Z.succ. rewrite Z.add_comm; rewrite Z.add_simpl_l. apply plength_pred_correct. Qed. Definition zn2z_word_comm : forall w n, zn2z (word w n) = word (zn2z w) n. fix zn2z_word_comm 2. intros w n; case n. reflexivity. intros n0;simpl. case (zn2z_word_comm w n0). reflexivity. Defined. Fixpoint extend (n:nat) {struct n} : forall w:Type, zn2z w -> word w (S n) := match n return forall w:Type, zn2z w -> word w (S n) with | O => fun w x => x | S m => let aux := extend m in fun w x => WW W0 (aux w x) end. Section ExtendMax. Open Scope nat_scope. Fixpoint plusnS (n m: nat) {struct n} : (n + S m = S (n + m))%nat := match n return (n + S m = S (n + m))%nat with | 0 => eq_refl (S m) | S n1 => let v := S (S n1 + m) in eq_ind_r (fun n => S n = v) (eq_refl v) (plusnS n1 m) end. Fixpoint plusn0 n : n + 0 = n := match n return (n + 0 = n) with | 0 => eq_refl 0 | S n1 => let v := S n1 in eq_ind_r (fun n : nat => S n = v) (eq_refl v) (plusn0 n1) end. Fixpoint diff (m n: nat) {struct m}: nat * nat := match m, n with O, n => (O, n) | m, O => (m, O) | S m1, S n1 => diff m1 n1 end. Fixpoint diff_l (m n : nat) {struct m} : fst (diff m n) + n = max m n := match m return fst (diff m n) + n = max m n with | 0 => match n return (n = max 0 n) with | 0 => eq_refl _ | S n0 => eq_refl _ end | S m1 => match n return (fst (diff (S m1) n) + n = max (S m1) n) with | 0 => plusn0 _ | S n1 => let v := fst (diff m1 n1) + n1 in let v1 := fst (diff m1 n1) + S n1 in eq_ind v (fun n => v1 = S n) (eq_ind v1 (fun n => v1 = n) (eq_refl v1) (S v) (plusnS _ _)) _ (diff_l _ _) end end. Fixpoint diff_r (m n: nat) {struct m}: snd (diff m n) + m = max m n := match m return (snd (diff m n) + m = max m n) with | 0 => match n return (snd (diff 0 n) + 0 = max 0 n) with | 0 => eq_refl _ | S _ => plusn0 _ end | S m => match n return (snd (diff (S m) n) + S m = max (S m) n) with | 0 => eq_refl (snd (diff (S m) 0) + S m) | S n1 => let v := S (max m n1) in eq_ind_r (fun n => n = v) (eq_ind_r (fun n => S n = v) (eq_refl v) (diff_r _ _)) (plusnS _ _) end end. Variable w: Type. Definition castm (m n: nat) (H: m = n) (x: word w (S m)): (word w (S n)) := match H in (_ = y) return (word w (S y)) with | eq_refl => x end. Variable m: nat. Variable v: (word w (S m)). Fixpoint extend_tr (n : nat) {struct n}: (word w (S (n + m))) := match n return (word w (S (n + m))) with | O => v | S n1 => WW W0 (extend_tr n1) end. End ExtendMax. Arguments extend_tr [w m] v n. Arguments castm [w m n] H x. Section Reduce. Variable w : Type. Variable nT : Type. Variable N0 : nT. Variable eq0 : w -> bool. Variable reduce_n : w -> nT. Variable zn2z_to_Nt : zn2z w -> nT. Definition reduce_n1 (x:zn2z w) := match x with | W0 => N0 | WW xh xl => if eq0 xh then reduce_n xl else zn2z_to_Nt x end. End Reduce. Section ReduceRec. Variable w : Type. Variable nT : Type. Variable N0 : nT. Variable reduce_1n : zn2z w -> nT. Variable c : forall n, word w (S n) -> nT. Fixpoint reduce_n (n:nat) : word w (S n) -> nT := match n return word w (S n) -> nT with | O => reduce_1n | S m => fun x => match x with | W0 => N0 | WW xh xl => match xh with | W0 => @reduce_n m xl | _ => @c (S m) x end end end. End ReduceRec. Section CompareRec. Variable wm w : Type. Variable w_0 : w. Variable compare : w -> w -> comparison. Variable compare0_m : wm -> comparison. Variable compare_m : wm -> w -> comparison. Fixpoint compare0_mn (n:nat) : word wm n -> comparison := match n return word wm n -> comparison with | O => compare0_m | S m => fun x => match x with | W0 => Eq | WW xh xl => match compare0_mn m xh with | Eq => compare0_mn m xl | r => Lt end end end. Variable wm_base: positive. Variable wm_to_Z: wm -> Z. Variable w_to_Z: w -> Z. Variable w_to_Z_0: w_to_Z w_0 = 0. Variable spec_compare0_m: forall x, compare0_m x = (w_to_Z w_0 ?= wm_to_Z x). Variable wm_to_Z_pos: forall x, 0 <= wm_to_Z x < base wm_base. Let double_to_Z := double_to_Z wm_base wm_to_Z. Let double_wB := double_wB wm_base. Lemma base_xO: forall n, base (xO n) = (base n)^2. Proof. intros n1; unfold base. rewrite (Pos2Z.inj_xO n1); rewrite Z.mul_comm; rewrite Z.pow_mul_r; auto with zarith. Qed. Let double_to_Z_pos: forall n x, 0 <= double_to_Z n x < double_wB n := (spec_double_to_Z wm_base wm_to_Z wm_to_Z_pos). Lemma spec_compare0_mn: forall n x, compare0_mn n x = (0 ?= double_to_Z n x). Proof. intros n; elim n; clear n; auto. intros x; rewrite spec_compare0_m; rewrite w_to_Z_0; auto. intros n Hrec x; case x; unfold compare0_mn; fold compare0_mn; auto. fold word in *. intros xh xl. rewrite 2 Hrec. simpl double_to_Z. set (wB := DoubleBase.double_wB wm_base n). case Z.compare_spec; intros Cmp. rewrite <- Cmp. reflexivity. symmetry. apply Z.gt_lt, Z.lt_gt. (* ;-) *) assert (0 < wB). unfold wB, DoubleBase.double_wB, base; auto with zarith. change 0 with (0 + 0); apply Z.add_lt_le_mono; auto with zarith. apply Z.mul_pos_pos; auto with zarith. case (double_to_Z_pos n xl); auto with zarith. case (double_to_Z_pos n xh); intros; exfalso; omega. Qed. Fixpoint compare_mn_1 (n:nat) : word wm n -> w -> comparison := match n return word wm n -> w -> comparison with | O => compare_m | S m => fun x y => match x with | W0 => compare w_0 y | WW xh xl => match compare0_mn m xh with | Eq => compare_mn_1 m xl y | r => Gt end end end. Variable spec_compare: forall x y, compare x y = Z.compare (w_to_Z x) (w_to_Z y). Variable spec_compare_m: forall x y, compare_m x y = Z.compare (wm_to_Z x) (w_to_Z y). Variable wm_base_lt: forall x, 0 <= w_to_Z x < base (wm_base). Let double_wB_lt: forall n x, 0 <= w_to_Z x < (double_wB n). Proof. intros n x; elim n; simpl; auto; clear n. intros n (H0, H); split; auto. apply Z.lt_le_trans with (1:= H). unfold double_wB, DoubleBase.double_wB; simpl. rewrite Pshiftl_nat_S, base_xO. set (u := base (Pos.shiftl_nat wm_base n)). assert (0 < u). unfold u, base; auto with zarith. replace (u^2) with (u * u); simpl; auto with zarith. apply Z.le_trans with (1 * u); auto with zarith. unfold Z.pow_pos; simpl; ring. Qed. Lemma spec_compare_mn_1: forall n x y, compare_mn_1 n x y = Z.compare (double_to_Z n x) (w_to_Z y). Proof. intros n; elim n; simpl; auto; clear n. intros n Hrec x; case x; clear x; auto. intros y; rewrite spec_compare; rewrite w_to_Z_0. reflexivity. intros xh xl y; simpl; rewrite spec_compare0_mn, Hrec. case Z.compare_spec. intros H1b. rewrite <- H1b; rewrite Z.mul_0_l; rewrite Z.add_0_l; auto. symmetry. apply Z.lt_gt. case (double_wB_lt n y); intros _ H0. apply Z.lt_le_trans with (1:= H0). fold double_wB. case (double_to_Z_pos n xl); intros H1 H2. apply Z.le_trans with (double_to_Z n xh * double_wB n); auto with zarith. apply Z.le_trans with (1 * double_wB n); auto with zarith. case (double_to_Z_pos n xh); intros; exfalso; omega. Qed. End CompareRec. Section AddS. Variable w wm : Type. Variable incr : wm -> carry wm. Variable addr : w -> wm -> carry wm. Variable injr : w -> zn2z wm. Variable w_0 u: w. Fixpoint injs (n:nat): word w (S n) := match n return (word w (S n)) with O => WW w_0 u | S n1 => (WW W0 (injs n1)) end. Definition adds x y := match y with W0 => C0 (injr x) | WW hy ly => match addr x ly with C0 z => C0 (WW hy z) | C1 z => match incr hy with C0 z1 => C0 (WW z1 z) | C1 z1 => C1 (WW z1 z) end end end. End AddS. Fixpoint length_pos x := match x with xH => O | xO x1 => S (length_pos x1) | xI x1 => S (length_pos x1) end. Theorem length_pos_lt: forall x y, (length_pos x < length_pos y)%nat -> Zpos x < Zpos y. Proof. intros x; elim x; clear x; [intros x1 Hrec | intros x1 Hrec | idtac]; intros y; case y; clear y; intros y1 H || intros H; simpl length_pos; try (rewrite (Pos2Z.inj_xI x1) || rewrite (Pos2Z.inj_xO x1)); try (rewrite (Pos2Z.inj_xI y1) || rewrite (Pos2Z.inj_xO y1)); try (inversion H; fail); try (assert (Zpos x1 < Zpos y1); [apply Hrec; apply lt_S_n | idtac]; auto with zarith); assert (0 < Zpos y1); auto with zarith; red; auto. Qed. Theorem cancel_app: forall A B (f g: A -> B) x, f = g -> f x = g x. Proof. intros A B f g x H; rewrite H; auto. Qed. Section SimplOp. Variable w: Type. Theorem digits_zop: forall t (ops : ZnZ.Ops t), ZnZ.digits (mk_zn2z_ops ops) = xO (ZnZ.digits ops). Proof. intros ww x; auto. Qed. Theorem digits_kzop: forall t (ops : ZnZ.Ops t), ZnZ.digits (mk_zn2z_ops_karatsuba ops) = xO (ZnZ.digits ops). Proof. intros ww x; auto. Qed. Theorem make_zop: forall t (ops : ZnZ.Ops t), @ZnZ.to_Z _ (mk_zn2z_ops ops) = fun z => match z with | W0 => 0 | WW xh xl => ZnZ.to_Z xh * base (ZnZ.digits ops) + ZnZ.to_Z xl end. Proof. intros ww x; auto. Qed. Theorem make_kzop: forall t (ops: ZnZ.Ops t), @ZnZ.to_Z _ (mk_zn2z_ops_karatsuba ops) = fun z => match z with | W0 => 0 | WW xh xl => ZnZ.to_Z xh * base (ZnZ.digits ops) + ZnZ.to_Z xl end. Proof. intros ww x; auto. Qed. End SimplOp. (** Abstract vision of a datatype of arbitrary-large numbers. Concrete operations can be derived from these generic fonctions, in particular from [iter_t] and [same_level]. *) Module Type NAbstract. (** The domains: a sequence of [Z/nZ] structures *) Parameter dom_t : nat -> Type. Declare Instance dom_op n : ZnZ.Ops (dom_t n). Declare Instance dom_spec n : ZnZ.Specs (dom_op n). Axiom digits_dom_op : forall n, ZnZ.digits (dom_op n) = Pos.shiftl_nat (ZnZ.digits (dom_op 0)) n. (** The type [t] of arbitrary-large numbers, with abstract constructor [mk_t] and destructor [destr_t] and iterator [iter_t] *) Parameter t : Type. Parameter mk_t : forall (n:nat), dom_t n -> t. Inductive View_t : t -> Prop := Mk_t : forall n (x : dom_t n), View_t (mk_t n x). Axiom destr_t : forall x, View_t x. (* i.e. every x is a (mk_t n xw) *) Parameter iter_t : forall {A:Type}(f : forall n, dom_t n -> A), t -> A. Axiom iter_mk_t : forall A (f:forall n, dom_t n -> A), forall n x, iter_t f (mk_t n x) = f n x. (** Conversion to [ZArith] *) Parameter to_Z : t -> Z. Local Notation "[ x ]" := (to_Z x). Axiom spec_mk_t : forall n x, [mk_t n x] = ZnZ.to_Z x. (** [reduce] is like [mk_t], but try to minimise the level of the number *) Parameter reduce : forall (n:nat), dom_t n -> t. Axiom spec_reduce : forall n x, [reduce n x] = ZnZ.to_Z x. (** Number of level in the tree representation of a number. NB: This function isn't a morphism for setoid [eq]. *) Definition level := iter_t (fun n _ => n). (** [same_level] and its rich specification, indexed by [level] *) Parameter same_level : forall {A:Type} (f : forall n, dom_t n -> dom_t n -> A), t -> t -> A. Axiom spec_same_level_dep : forall res (P : nat -> Z -> Z -> res -> Prop) (Pantimon : forall n m z z' r, (n <= m)%nat -> P m z z' r -> P n z z' r) (f : forall n, dom_t n -> dom_t n -> res) (Pf: forall n x y, P n (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y)), forall x y, P (level x) [x] [y] (same_level f x y). (** [mk_t_S] : building a number of the next level *) Parameter mk_t_S : forall (n:nat), zn2z (dom_t n) -> t. Axiom spec_mk_t_S : forall n (x:zn2z (dom_t n)), [mk_t_S n x] = zn2z_to_Z (base (ZnZ.digits (dom_op n))) ZnZ.to_Z x. Axiom mk_t_S_level : forall n x, level (mk_t_S n x) = S n. End NAbstract. coq-8.4pl2/theories/Numbers/Natural/BigN/NMake_gen.ml0000640000175000001440000007013412010532755021474 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z/2nZ process before relying on a generic construct *) (*s Some utilities *) let rec iter_str n s = if n = 0 then "" else (iter_str (n-1) s) ^ s let rec iter_str_gen n f = if n < 0 then "" else (iter_str_gen (n-1) f) ^ (f n) let rec iter_name i j base sep = if i >= j then base^(string_of_int i) else (iter_name i (j-1) base sep)^sep^" "^base^(string_of_int j) let pr s = Printf.printf (s^^"\n") (*s The actual printing *) let _ = pr "(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ZnZ.Ops (zn2z w')."; pr ""; pr " Fixpoint make_op_aux (n:nat) : ZnZ.Ops (word w%i (S n)):=" size; pr " match n return ZnZ.Ops (word w%i (S n)) with" size; pr " | O => w%i_op" (size+1); pr " | S n1 =>"; pr " match n1 return ZnZ.Ops (word w%i (S (S n1))) with" size; pr " | O => w%i_op" (size+2); pr " | S n2 =>"; pr " match n2 return ZnZ.Ops (word w%i (S (S (S n2)))) with" size; pr " | O => w%i_op" (size+3); pr " | S n3 => mk _ (mk _ (mk _ (make_op_aux n3)))"; pr " end"; pr " end"; pr " end."; pr ""; pr " End Make_op."; pr ""; pr " Definition omake_op := make_op_aux mk_zn2z_ops_karatsuba."; pr ""; pr ""; pr " Definition make_op_list := dmemo_list _ omake_op."; pr ""; pr " Instance make_op n : ZnZ.Ops (word w%i (S n))" size; pr " := dmemo_get _ omake_op n make_op_list."; pr ""; pr " Ltac unfold_ops := unfold omake_op, make_op_aux, w%i_op, w%i_op." (size+3) (size+2); pr " Lemma make_op_omake: forall n, make_op n = omake_op n. Proof. intros n; unfold make_op, make_op_list. refine (dmemo_get_correct _ _ _). Qed. Theorem make_op_S: forall n, make_op (S n) = mk_zn2z_ops_karatsuba (make_op n). Proof. intros n. do 2 rewrite make_op_omake. revert n. fix IHn 1. do 3 (destruct n; [unfold_ops; reflexivity|]). simpl mk_zn2z_ops_karatsuba. simpl word in *. rewrite <- (IHn n). auto. Qed. (** * The main type [t], isomorphic with [exists n, word w0 n] *) "; pr " Inductive t' :="; for i = 0 to size do pr " | N%i : w%i -> t'" i i done; pr " | Nn : forall n, word w%i (S n) -> t'." size; pr ""; pr " Definition t := t'."; pr ""; pr " Bind Scope abstract_scope with t t'."; pr ""; pr " (** * A generic toolbox for building and deconstructing [t] *)"; pr ""; pr " Local Notation SizePlus n := %sn%s." (iter_str size "(S ") (iter_str size ")"); pr " Local Notation Size := (SizePlus O)."; pr ""; pr " Tactic Notation \"do_size\" tactic(t) := do %i t." (size+1); pr ""; pr " Definition dom_t n := match n with"; for i = 0 to size do pr " | %i => w%i" i i; done; pr " | %sn => word w%i n" (if size=0 then "" else "SizePlus ") size; pr " end."; pr ""; pr " Instance dom_op n : ZnZ.Ops (dom_t n) | 10. Proof. do_size (destruct n; [simpl;auto with *|]). unfold dom_t. auto with *. Defined. "; pr " Definition iter_t {A:Type}(f : forall n, dom_t n -> A) : t -> A :="; for i = 0 to size do pr " let f%i := f %i in" i i; done; pr " let fn n := f (SizePlus (S n)) in"; pr " fun x => match x with"; for i = 0 to size do pr " | N%i wx => f%i wx" i i; done; pr " | Nn n wx => fn n wx"; pr " end."; pr ""; pr " Definition mk_t (n:nat) : dom_t n -> t :="; pr " match n as n' return dom_t n' -> t with"; for i = 0 to size do pr " | %i => N%i" i i; done; pr " | %s(S n) => Nn n" (if size=0 then "" else "SizePlus "); pr " end."; pr ""; pr " Definition level := iter_t (fun n _ => n). Inductive View_t : t -> Prop := Mk_t : forall n (x : dom_t n), View_t (mk_t n x). Lemma destr_t : forall x, View_t x. Proof. intros x. generalize (Mk_t (level x)). destruct x; simpl; auto. Defined. Lemma iter_mk_t : forall A (f:forall n, dom_t n -> A), forall n x, iter_t f (mk_t n x) = f n x. Proof. do_size (destruct n; try reflexivity). Qed. (** * Projection to ZArith *) Definition to_Z : t -> Z := Eval lazy beta iota delta [iter_t dom_t dom_op] in iter_t (fun _ x => ZnZ.to_Z x). Notation \"[ x ]\" := (to_Z x). Theorem spec_mk_t : forall n (x:dom_t n), [mk_t n x] = ZnZ.to_Z x. Proof. intros. change to_Z with (iter_t (fun _ x => ZnZ.to_Z x)). rewrite iter_mk_t; auto. Qed. (** * Regular make op, without memoization or karatsuba This will normally never be used for actual computations, but only for specification purpose when using [word (dom_t n) m] intermediate values. *) Fixpoint nmake_op (ww:Type) (ww_op: ZnZ.Ops ww) (n: nat) : ZnZ.Ops (word ww n) := match n return ZnZ.Ops (word ww n) with O => ww_op | S n1 => mk_zn2z_ops (nmake_op ww ww_op n1) end. Let eval n m := ZnZ.to_Z (Ops:=nmake_op _ (dom_op n) m). Theorem nmake_op_S: forall ww (w_op: ZnZ.Ops ww) x, nmake_op _ w_op (S x) = mk_zn2z_ops (nmake_op _ w_op x). Proof. auto. Qed. Theorem digits_nmake_S :forall n ww (w_op: ZnZ.Ops ww), ZnZ.digits (nmake_op _ w_op (S n)) = xO (ZnZ.digits (nmake_op _ w_op n)). Proof. auto. Qed. Theorem digits_nmake : forall n ww (w_op: ZnZ.Ops ww), ZnZ.digits (nmake_op _ w_op n) = Pos.shiftl_nat (ZnZ.digits w_op) n. Proof. induction n. auto. intros ww ww_op. rewrite Pshiftl_nat_S, <- IHn; auto. Qed. Theorem nmake_double: forall n ww (w_op: ZnZ.Ops ww), ZnZ.to_Z (Ops:=nmake_op _ w_op n) = @DoubleBase.double_to_Z _ (ZnZ.digits w_op) (ZnZ.to_Z (Ops:=w_op)) n. Proof. intros n; elim n; auto; clear n. intros n Hrec ww ww_op; simpl DoubleBase.double_to_Z; unfold zn2z_to_Z. rewrite <- Hrec; auto. unfold DoubleBase.double_wB; rewrite <- digits_nmake; auto. Qed. Theorem nmake_WW: forall ww ww_op n xh xl, (ZnZ.to_Z (Ops:=nmake_op ww ww_op (S n)) (WW xh xl) = ZnZ.to_Z (Ops:=nmake_op ww ww_op n) xh * base (ZnZ.digits (nmake_op ww ww_op n)) + ZnZ.to_Z (Ops:=nmake_op ww ww_op n) xl)%%Z. Proof. auto. Qed. (** * The specification proofs for the word operators *) "; if size <> 0 then pr " Typeclasses Opaque %s." (iter_name 1 size "w" ""); pr ""; pr " Instance w0_spec: ZnZ.Specs w0_op := W0.specs."; for i = 1 to min 3 size do pr " Instance w%i_spec: ZnZ.Specs w%i_op := mk_zn2z_specs w%i_spec." i i (i-1) done; for i = 4 to size do pr " Instance w%i_spec: ZnZ.Specs w%i_op := mk_zn2z_specs_karatsuba w%i_spec." i i (i-1) done; pr " Instance w%i_spec: ZnZ.Specs w%i_op := mk_zn2z_specs_karatsuba w%i_spec." (size+1) (size+1) size; pr " Instance wn_spec (n:nat) : ZnZ.Specs (make_op n). Proof. induction n. rewrite make_op_omake; simpl; auto with *. rewrite make_op_S. exact (mk_zn2z_specs_karatsuba IHn). Qed. Instance dom_spec n : ZnZ.Specs (dom_op n) | 10. Proof. do_size (destruct n; auto with *). apply wn_spec. Qed. Let make_op_WW : forall n x y, (ZnZ.to_Z (Ops:=make_op (S n)) (WW x y) = ZnZ.to_Z (Ops:=make_op n) x * base (ZnZ.digits (make_op n)) + ZnZ.to_Z (Ops:=make_op n) y)%%Z. Proof. intros n x y; rewrite make_op_S; auto. Qed. (** * Zero *) Definition zero0 : w0 := ZnZ.zero. Definition zeron n : dom_t n := match n with | O => zero0 | SizePlus (S n) => W0 | _ => W0 end. Lemma spec_zeron : forall n, ZnZ.to_Z (zeron n) = 0%%Z. Proof. do_size (destruct n; [exact ZnZ.spec_0|]). destruct n; auto. simpl. rewrite make_op_S. exact ZnZ.spec_0. Qed. (** * Digits *) Lemma digits_make_op_0 : forall n, ZnZ.digits (make_op n) = Pos.shiftl_nat (ZnZ.digits (dom_op Size)) (S n). Proof. induction n. auto. replace (ZnZ.digits (make_op (S n))) with (xO (ZnZ.digits (make_op n))). rewrite IHn; auto. rewrite make_op_S; auto. Qed. Lemma digits_make_op : forall n, ZnZ.digits (make_op n) = Pos.shiftl_nat (ZnZ.digits w0_op) (SizePlus (S n)). Proof. intros. rewrite digits_make_op_0. replace (SizePlus (S n)) with (S n + Size) by (rewrite <- plus_comm; auto). rewrite Pshiftl_nat_plus. auto. Qed. Lemma digits_dom_op : forall n, ZnZ.digits (dom_op n) = Pos.shiftl_nat (ZnZ.digits w0_op) n. Proof. do_size (destruct n; try reflexivity). exact (digits_make_op n). Qed. Lemma digits_dom_op_nmake : forall n m, ZnZ.digits (dom_op (m+n)) = ZnZ.digits (nmake_op _ (dom_op n) m). Proof. intros. rewrite digits_nmake, 2 digits_dom_op. apply Pshiftl_nat_plus. Qed. (** * Conversion between [zn2z (dom_t n)] and [dom_t (S n)]. These two types are provably equal, but not convertible, hence we need some work. We now avoid using generic casts (i.e. rewrite via proof of equalities in types), since proving things with them is a mess. *) Definition succ_t n : zn2z (dom_t n) -> dom_t (S n) := match n with | SizePlus (S _) => fun x => x | _ => fun x => x end. Lemma spec_succ_t : forall n x, ZnZ.to_Z (succ_t n x) = zn2z_to_Z (base (ZnZ.digits (dom_op n))) ZnZ.to_Z x. Proof. do_size (destruct n ; [reflexivity|]). intros. simpl. rewrite make_op_S. simpl. auto. Qed. Definition pred_t n : dom_t (S n) -> zn2z (dom_t n) := match n with | SizePlus (S _) => fun x => x | _ => fun x => x end. Lemma succ_pred_t : forall n x, succ_t n (pred_t n x) = x. Proof. do_size (destruct n ; [reflexivity|]). reflexivity. Qed. (** We can hence project from [zn2z (dom_t n)] to [t] : *) Definition mk_t_S n (x : zn2z (dom_t n)) : t := mk_t (S n) (succ_t n x). Lemma spec_mk_t_S : forall n x, [mk_t_S n x] = zn2z_to_Z (base (ZnZ.digits (dom_op n))) ZnZ.to_Z x. Proof. intros. unfold mk_t_S. rewrite spec_mk_t. apply spec_succ_t. Qed. Lemma mk_t_S_level : forall n x, level (mk_t_S n x) = S n. Proof. intros. unfold mk_t_S, level. rewrite iter_mk_t; auto. Qed. (** * Conversion from [word (dom_t n) m] to [dom_t (m+n)]. Things are more complex here. We start with a naive version that breaks zn2z-trees and reconstruct them. Doing this is quite unfortunate, but I don't know how to fully avoid that. (cast someday ?). Then we build an optimized version where all basic cases (n<=6 or m<=7) are nicely handled. *) Definition zn2z_map {A} {B} (f:A->B) (x:zn2z A) : zn2z B := match x with | W0 => W0 | WW h l => WW (f h) (f l) end. Lemma zn2z_map_id : forall A f (x:zn2z A), (forall u, f u = u) -> zn2z_map f x = x. Proof. destruct x; auto; intros. simpl; f_equal; auto. Qed. (** The naive version *) Fixpoint plus_t n m : word (dom_t n) m -> dom_t (m+n) := match m as m' return word (dom_t n) m' -> dom_t (m'+n) with | O => fun x => x | S m => fun x => succ_t _ (zn2z_map (plus_t n m) x) end. Theorem spec_plus_t : forall n m (x:word (dom_t n) m), ZnZ.to_Z (plus_t n m x) = eval n m x. Proof. unfold eval. induction m. simpl; auto. intros. simpl plus_t; simpl plus. rewrite spec_succ_t. destruct x. simpl; auto. fold word in w, w0. simpl. rewrite 2 IHm. f_equal. f_equal. f_equal. apply digits_dom_op_nmake. Qed. Definition mk_t_w n m (x:word (dom_t n) m) : t := mk_t (m+n) (plus_t n m x). Theorem spec_mk_t_w : forall n m (x:word (dom_t n) m), [mk_t_w n m x] = eval n m x. Proof. intros. unfold mk_t_w. rewrite spec_mk_t. apply spec_plus_t. Qed. (** The optimized version. NB: the last particular case for m could depend on n, but it's simplier to just expand everywhere up to m=7 (cf [mk_t_w'] later). *) Definition plus_t' n : forall m, word (dom_t n) m -> dom_t (m+n) := match n return (forall m, word (dom_t n) m -> dom_t (m+n)) with | SizePlus (S n') as n => plus_t n | _ as n => fun m => match m return (word (dom_t n) m -> dom_t (m+n)) with | SizePlus (S (S m')) as m => plus_t n m | _ => fun x => x end end. Lemma plus_t_equiv : forall n m x, plus_t' n m x = plus_t n m x. Proof. (do_size try destruct n); try reflexivity; (do_size try destruct m); try destruct m; try reflexivity; simpl; symmetry; repeat (intros; apply zn2z_map_id; trivial). Qed. Lemma spec_plus_t' : forall n m x, ZnZ.to_Z (plus_t' n m x) = eval n m x. Proof. intros; rewrite plus_t_equiv. apply spec_plus_t. Qed. (** Particular cases [Nk x] = eval i j x with specific k,i,j can be solved by the following tactic *) Ltac solve_eval := intros; rewrite <- spec_plus_t'; unfold to_Z; simpl dom_op; reflexivity. (** The last particular case that remains useful *) Lemma spec_eval_size : forall n x, [Nn n x] = eval Size (S n) x. Proof. induction n. solve_eval. destruct x as [ | xh xl ]. simpl. unfold eval. rewrite make_op_S. rewrite nmake_op_S. auto. simpl word in xh, xl |- *. unfold to_Z in *. rewrite make_op_WW. unfold eval in *. rewrite nmake_WW. f_equal; auto. f_equal; auto. f_equal. rewrite <- digits_dom_op_nmake. rewrite plus_comm; auto. Qed. (** An optimized [mk_t_w]. We could say mk_t_w' := mk_t _ (plus_t' n m x) (TODO: WHY NOT, BTW ??). Instead we directly define functions for all intersting [n], reverting to naive [mk_t_w] at places that should normally never be used (see [mul] and [div_gt]). *) "; for i = 0 to size-1 do let pattern = (iter_str (size+1-i) "(S ") ^ "_" ^ (iter_str (size+1-i) ")") in pr " Let mk_t_%iw m := Eval cbv beta zeta iota delta [ mk_t plus ] in match m return word w%i (S m) -> t with | %s as p => mk_t_w %i (S p) | p => mk_t (%i+p) end. " i i pattern i (i+1) done; pr " Let mk_t_w' n : forall m, word (dom_t n) (S m) -> t := match n return (forall m, word (dom_t n) (S m) -> t) with"; for i = 0 to size-1 do pr " | %i => mk_t_%iw" i i done; pr " | Size => Nn | _ as n' => fun m => mk_t_w n' (S m) end. "; pr " Ltac solve_spec_mk_t_w' := rewrite <- spec_plus_t'; match goal with _ : word (dom_t ?n) ?m |- _ => apply (spec_mk_t (n+m)) end. Theorem spec_mk_t_w' : forall n m x, [mk_t_w' n m x] = eval n (S m) x. Proof. intros. repeat (apply spec_mk_t_w || (destruct n; [repeat (apply spec_mk_t_w || (destruct m; [solve_spec_mk_t_w'|]))|])). apply spec_eval_size. Qed. (** * Extend : injecting [dom_t n] into [word (dom_t n) (S m)] *) Definition extend n m (x:dom_t n) : word (dom_t n) (S m) := DoubleBase.extend_aux m (WW (zeron n) x). Lemma spec_extend : forall n m x, [mk_t n x] = eval n (S m) (extend n m x). Proof. intros. unfold eval, extend. rewrite spec_mk_t. assert (H : forall (x:dom_t n), (ZnZ.to_Z (zeron n) * base (ZnZ.digits (dom_op n)) + ZnZ.to_Z x = ZnZ.to_Z x)%%Z). clear; intros; rewrite spec_zeron; auto. rewrite <- (@DoubleBase.spec_extend _ (WW (zeron n)) (ZnZ.digits (dom_op n)) ZnZ.to_Z H m x). simpl. rewrite digits_nmake, <- nmake_double. auto. Qed. (** A particular case of extend, used in [same_level]: [extend_size] is [extend Size] *) Definition extend_size := DoubleBase.extend (WW (W0:dom_t Size)). Lemma spec_extend_size : forall n x, [mk_t Size x] = [Nn n (extend_size n x)]. Proof. intros. rewrite spec_eval_size. apply (spec_extend Size n). Qed. (** Misc results about extensions *) Let spec_extend_WW : forall n x, [Nn (S n) (WW W0 x)] = [Nn n x]. Proof. intros n x. set (N:=SizePlus (S n)). change ([Nn (S n) (extend N 0 x)]=[mk_t N x]). rewrite (spec_extend N 0). solve_eval. Qed. Let spec_extend_tr: forall m n w, [Nn (m + n) (extend_tr w m)] = [Nn n w]. Proof. induction m; auto. intros n x; simpl extend_tr. simpl plus; rewrite spec_extend_WW; auto. Qed. Let spec_cast_l: forall n m x1, [Nn n x1] = [Nn (Max.max n m) (castm (diff_r n m) (extend_tr x1 (snd (diff n m))))]. Proof. intros n m x1; case (diff_r n m); simpl castm. rewrite spec_extend_tr; auto. Qed. Let spec_cast_r: forall n m x1, [Nn m x1] = [Nn (Max.max n m) (castm (diff_l n m) (extend_tr x1 (fst (diff n m))))]. Proof. intros n m x1; case (diff_l n m); simpl castm. rewrite spec_extend_tr; auto. Qed. Ltac unfold_lets := match goal with | h : _ |- _ => unfold h; clear h; unfold_lets | _ => idtac end. (** * [same_level] Generic binary operator construction, by extending the smaller argument to the level of the other. *) Section SameLevel. Variable res: Type. Variable P : Z -> Z -> res -> Prop. Variable f : forall n, dom_t n -> dom_t n -> res. Variable Pf : forall n x y, P (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y). "; for i = 0 to size do pr " Let f%i : w%i -> w%i -> res := f %i." i i i i done; pr " Let fn n := f (SizePlus (S n)). Let Pf' : forall n x y u v, u = [mk_t n x] -> v = [mk_t n y] -> P u v (f n x y). Proof. intros. subst. rewrite 2 spec_mk_t. apply Pf. Qed. "; let ext i j s = if j <= i then s else Printf.sprintf "(extend %i %i %s)" i (j-i-1) s in pr " Notation same_level_folded := (fun x y => match x, y with"; for i = 0 to size do for j = 0 to size do pr " | N%i wx, N%i wy => f%i %s %s" i j (max i j) (ext i j "wx") (ext j i "wy") done; pr " | N%i wx, Nn m wy => fn m (extend_size m %s) wy" i (ext i size "wx") done; for i = 0 to size do pr " | Nn n wx, N%i wy => fn n wx (extend_size n %s)" i (ext i size "wy") done; pr " | Nn n wx, Nn m wy => let mn := Max.max n m in let d := diff n m in fn mn (castm (diff_r n m) (extend_tr wx (snd d))) (castm (diff_l n m) (extend_tr wy (fst d))) end). "; pr " Definition same_level := Eval lazy beta iota delta [ DoubleBase.extend DoubleBase.extend_aux extend zeron ] in same_level_folded. Lemma spec_same_level_0: forall x y, P [x] [y] (same_level x y). Proof. change same_level with same_level_folded. unfold_lets. destruct x, y; apply Pf'; simpl mk_t; rewrite <- ?spec_extend_size; match goal with | |- context [ extend ?n ?m _ ] => apply (spec_extend n m) | |- context [ castm _ _ ] => apply spec_cast_l || apply spec_cast_r | _ => reflexivity end. Qed. End SameLevel. Arguments same_level [res] f x y. Theorem spec_same_level_dep : forall res (P : nat -> Z -> Z -> res -> Prop) (Pantimon : forall n m z z' r, n <= m -> P m z z' r -> P n z z' r) (f : forall n, dom_t n -> dom_t n -> res) (Pf: forall n x y, P n (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y)), forall x y, P (level x) [x] [y] (same_level f x y). Proof. intros res P Pantimon f Pf. set (f' := fun n x y => (n, f n x y)). set (P' := fun z z' r => P (fst r) z z' (snd r)). assert (FST : forall x y, level x <= fst (same_level f' x y)) by (destruct x, y; simpl; omega with * ). assert (SND : forall x y, same_level f x y = snd (same_level f' x y)) by (destruct x, y; reflexivity). intros. eapply Pantimon; [eapply FST|]. rewrite SND. eapply (@spec_same_level_0 _ P' f'); eauto. Qed. (** * [iter] Generic binary operator construction, by splitting the larger argument in blocks and applying the smaller argument to them. *) Section Iter. Variable res: Type. Variable P: Z -> Z -> res -> Prop. Variable f : forall n, dom_t n -> dom_t n -> res. Variable Pf : forall n x y, P (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y). Variable fd : forall n m, dom_t n -> word (dom_t n) (S m) -> res. Variable fg : forall n m, word (dom_t n) (S m) -> dom_t n -> res. Variable Pfd : forall n m x y, P (ZnZ.to_Z x) (eval n (S m) y) (fd n m x y). Variable Pfg : forall n m x y, P (eval n (S m) x) (ZnZ.to_Z y) (fg n m x y). Variable fnm: forall n m, word (dom_t Size) (S n) -> word (dom_t Size) (S m) -> res. Variable Pfnm: forall n m x y, P [Nn n x] [Nn m y] (fnm n m x y). Let Pf' : forall n x y u v, u = [mk_t n x] -> v = [mk_t n y] -> P u v (f n x y). Proof. intros. subst. rewrite 2 spec_mk_t. apply Pf. Qed. Let Pfd' : forall n m x y u v, u = [mk_t n x] -> v = eval n (S m) y -> P u v (fd n m x y). Proof. intros. subst. rewrite spec_mk_t. apply Pfd. Qed. Let Pfg' : forall n m x y u v, u = eval n (S m) x -> v = [mk_t n y] -> P u v (fg n m x y). Proof. intros. subst. rewrite spec_mk_t. apply Pfg. Qed. "; for i = 0 to size do pr " Let f%i := f %i." i i done; for i = 0 to size do pr " Let f%in := fd %i." i i; pr " Let fn%i := fg %i." i i; done; pr " Notation iter_folded := (fun x y => match x, y with"; for i = 0 to size do for j = 0 to size do pr " | N%i wx, N%i wy => f%s wx wy" i j (if i = j then string_of_int i else if i < j then string_of_int i ^ "n " ^ string_of_int (j-i-1) else "n" ^ string_of_int j ^ " " ^ string_of_int (i-j-1)) done; pr " | N%i wx, Nn m wy => f%in m %s wy" i size (ext i size "wx") done; for i = 0 to size do pr " | Nn n wx, N%i wy => fn%i n wx %s" i size (ext i size "wy") done; pr " | Nn n wx, Nn m wy => fnm n m wx wy end). "; pr " Definition iter := Eval lazy beta iota delta [extend DoubleBase.extend DoubleBase.extend_aux zeron] in iter_folded. Lemma spec_iter: forall x y, P [x] [y] (iter x y). Proof. change iter with iter_folded; unfold_lets. destruct x; destruct y; apply Pf' || apply Pfd' || apply Pfg' || apply Pfnm; simpl mk_t; match goal with | |- ?x = ?x => reflexivity | |- [Nn _ _] = _ => apply spec_eval_size | |- context [extend ?n ?m _] => apply (spec_extend n m) | _ => idtac end; unfold to_Z; rewrite <- spec_plus_t'; simpl dom_op; reflexivity. Qed. End Iter. "; pr " Definition switch (P:nat->Type)%s (fn:forall n, P n) n := match n return P n with" (iter_str_gen size (fun i -> Printf.sprintf "(f%i:P %i)" i i)); for i = 0 to size do pr " | %i => f%i" i i done; pr " | n => fn n end. "; pr " Lemma spec_switch : forall P (f:forall n, P n) n, switch P %sf n = f n. Proof. repeat (destruct n; try reflexivity). Qed. " (iter_str_gen size (fun i -> Printf.sprintf "(f %i) " i)); pr " (** * [iter_sym] A variant of [iter] for symmetric functions, or pseudo-symmetric functions (when f y x can be deduced from f x y). *) Section IterSym. Variable res: Type. Variable P: Z -> Z -> res -> Prop. Variable f : forall n, dom_t n -> dom_t n -> res. Variable Pf : forall n x y, P (ZnZ.to_Z x) (ZnZ.to_Z y) (f n x y). Variable fg : forall n m, word (dom_t n) (S m) -> dom_t n -> res. Variable Pfg : forall n m x y, P (eval n (S m) x) (ZnZ.to_Z y) (fg n m x y). Variable fnm: forall n m, word (dom_t Size) (S n) -> word (dom_t Size) (S m) -> res. Variable Pfnm: forall n m x y, P [Nn n x] [Nn m y] (fnm n m x y). Variable opp: res -> res. Variable Popp : forall u v r, P u v r -> P v u (opp r). "; for i = 0 to size do pr " Let f%i := f %i." i i done; for i = 0 to size do pr " Let fn%i := fg %i." i i; done; pr " Let f' := switch _ %s f." (iter_name 0 size "f" ""); pr " Let fg' := switch _ %s fg." (iter_name 0 size "fn" ""); pr " Local Notation iter_sym_folded := (iter res f' (fun n m x y => opp (fg' n m y x)) fg' fnm). Definition iter_sym := Eval lazy beta zeta iota delta [iter f' fg' switch] in iter_sym_folded. Lemma spec_iter_sym: forall x y, P [x] [y] (iter_sym x y). Proof. intros. change iter_sym with iter_sym_folded. apply spec_iter; clear x y. unfold_lets. intros. rewrite spec_switch. auto. intros. apply Popp. unfold_lets. rewrite spec_switch; auto. intros. unfold_lets. rewrite spec_switch; auto. auto. Qed. End IterSym. (** * Reduction [reduce] can be used instead of [mk_t], it will choose the lowest possible level. NB: We only search and remove leftmost W0's via ZnZ.eq0, any non-W0 block ends the process, even if its value is 0. *) (** First, a direct version ... *) Fixpoint red_t n : dom_t n -> t := match n return dom_t n -> t with | O => N0 | S n => fun x => let x' := pred_t n x in reduce_n1 _ _ (N0 zero0) ZnZ.eq0 (red_t n) (mk_t_S n) x' end. Lemma spec_red_t : forall n x, [red_t n x] = [mk_t n x]. Proof. induction n. reflexivity. intros. simpl red_t. unfold reduce_n1. rewrite <- (succ_pred_t n x) at 2. remember (pred_t n x) as x'. rewrite spec_mk_t, spec_succ_t. destruct x' as [ | xh xl]. simpl. apply ZnZ.spec_0. generalize (ZnZ.spec_eq0 xh); case ZnZ.eq0; intros H. rewrite IHn, spec_mk_t. simpl. rewrite H; auto. apply spec_mk_t_S. Qed. (** ... then a specialized one *) "; for i = 0 to size do pr " Definition eq0%i := @ZnZ.eq0 _ w%i_op." i i; done; pr " Definition reduce_0 := N0."; for i = 1 to size do pr " Definition reduce_%i :=" i; pr " Eval lazy beta iota delta [reduce_n1] in"; pr " reduce_n1 _ _ (N0 zero0) eq0%i reduce_%i N%i." (i-1) (i-1) i done; pr " Definition reduce_%i :=" (size+1); pr " Eval lazy beta iota delta [reduce_n1] in"; pr " reduce_n1 _ _ (N0 zero0) eq0%i reduce_%i (Nn 0)." size size; pr " Definition reduce_n n :="; pr " Eval lazy beta iota delta [reduce_n] in"; pr " reduce_n _ _ (N0 zero0) reduce_%i Nn n." (size + 1); pr ""; pr " Definition reduce n : dom_t n -> t :="; pr " match n with"; for i = 0 to size do pr " | %i => reduce_%i" i i; done; pr " | %s(S n) => reduce_n n" (if size=0 then "" else "SizePlus "); pr " end."; pr ""; pr " Ltac unfold_red := unfold reduce, %s." (iter_name 1 size "reduce_" ","); pr " Ltac solve_red := let H := fresh in let G := fresh in match goal with | |- ?P (S ?n) => assert (H:P n) by solve_red | _ => idtac end; intros n G x; destruct (le_lt_eq_dec _ _ G) as [LT|EQ]; solve [ apply (H _ (lt_n_Sm_le _ _ LT)) | inversion LT | subst; change (reduce 0 x = red_t 0 x); reflexivity | specialize (H (pred n)); subst; destruct x; [|unfold_red; rewrite H; auto]; reflexivity ]. Lemma reduce_equiv : forall n x, n <= Size -> reduce n x = red_t n x. Proof. set (P N := forall n, n <= N -> forall x, reduce n x = red_t n x). intros n x H. revert n H x. change (P Size). solve_red. Qed. Lemma spec_reduce_n : forall n x, [reduce_n n x] = [Nn n x]. Proof. assert (H : forall x, reduce_%i x = red_t (SizePlus 1) x). destruct x; [|unfold reduce_%i; rewrite (reduce_equiv Size)]; auto. induction n. intros. rewrite H. apply spec_red_t. destruct x as [|xh xl]. simpl. rewrite make_op_S. exact ZnZ.spec_0. fold word in *. destruct xh; auto. simpl reduce_n. rewrite IHn. rewrite spec_extend_WW; auto. Qed. " (size+1) (size+1); pr " Lemma spec_reduce : forall n x, [reduce n x] = ZnZ.to_Z x. Proof. do_size (destruct n; [intros; rewrite reduce_equiv;[apply spec_red_t|auto with arith]|]). apply spec_reduce_n. Qed. End Make. "; coq-8.4pl2/theories/Numbers/Natural/Peano/0000750000175000001440000000000012127276550017542 5ustar notinuserscoq-8.4pl2/theories/Numbers/Natural/Peano/NPeano.v0000640000175000001440000005353412010532755021115 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _, O => false | S n', S m' => leb n' m' end. Definition ltb n m := leb (S n) m. Infix "<=?" := leb (at level 70) : nat_scope. Infix " n <= m. Proof. revert m. induction n. split; auto with arith. destruct m; simpl. now split. rewrite IHn. split; auto with arith. Qed. Lemma ltb_lt n m : (n n < m. Proof. unfold ltb, lt. apply leb_le. Qed. Fixpoint pow n m := match m with | O => 1 | S m => n * (pow n m) end. Infix "^" := pow : nat_scope. Lemma pow_0_r : forall a, a^0 = 1. Proof. reflexivity. Qed. Lemma pow_succ_r : forall a b, 0<=b -> a^(S b) = a * a^b. Proof. reflexivity. Qed. Definition square n := n * n. Lemma square_spec n : square n = n * n. Proof. reflexivity. Qed. Definition Even n := exists m, n = 2*m. Definition Odd n := exists m, n = 2*m+1. Fixpoint even n := match n with | O => true | 1 => false | S (S n') => even n' end. Definition odd n := negb (even n). Lemma even_spec : forall n, even n = true <-> Even n. Proof. fix 1. destruct n as [|[|n]]; simpl; try rewrite even_spec; split. now exists 0. trivial. discriminate. intros (m,H). destruct m. discriminate. simpl in H. rewrite <- plus_n_Sm in H. discriminate. intros (m,H). exists (S m). rewrite H. simpl. now rewrite plus_n_Sm. intros (m,H). destruct m. discriminate. exists m. simpl in H. rewrite <- plus_n_Sm in H. inversion H. reflexivity. Qed. Lemma odd_spec : forall n, odd n = true <-> Odd n. Proof. unfold odd. fix 1. destruct n as [|[|n]]; simpl; try rewrite odd_spec; split. discriminate. intros (m,H). rewrite <- plus_n_Sm in H; discriminate. now exists 0. trivial. intros (m,H). exists (S m). rewrite H. simpl. now rewrite <- (plus_n_Sm m). intros (m,H). destruct m. discriminate. exists m. simpl in H. rewrite <- plus_n_Sm in H. inversion H. simpl. now rewrite <- !plus_n_Sm, <- !plus_n_O. Qed. Lemma Even_equiv : forall n, Even n <-> Even.even n. Proof. split. intros (p,->). apply Even.even_mult_l. do 3 constructor. intros H. destruct (even_2n n H) as (p,->). exists p. unfold double. simpl. now rewrite <- plus_n_O. Qed. Lemma Odd_equiv : forall n, Odd n <-> Even.odd n. Proof. split. intros (p,->). rewrite <- plus_n_Sm, <- plus_n_O. apply Even.odd_S. apply Even.even_mult_l. do 3 constructor. intros H. destruct (odd_S2n n H) as (p,->). exists p. unfold double. simpl. now rewrite <- plus_n_Sm, <- !plus_n_O. Qed. (* A linear, tail-recursive, division for nat. In [divmod], [y] is the predecessor of the actual divisor, and [u] is [y] minus the real remainder *) Fixpoint divmod x y q u := match x with | 0 => (q,u) | S x' => match u with | 0 => divmod x' y (S q) y | S u' => divmod x' y q u' end end. Definition div x y := match y with | 0 => y | S y' => fst (divmod x y' 0 y') end. Definition modulo x y := match y with | 0 => y | S y' => y' - snd (divmod x y' 0 y') end. Infix "/" := div : nat_scope. Infix "mod" := modulo (at level 40, no associativity) : nat_scope. Lemma divmod_spec : forall x y q u, u <= y -> let (q',u') := divmod x y q u in x + (S y)*q + (y-u) = (S y)*q' + (y-u') /\ u' <= y. Proof. induction x. simpl. intuition. intros y q u H. destruct u; simpl divmod. generalize (IHx y (S q) y (le_n y)). destruct divmod as (q',u'). intros (EQ,LE); split; trivial. rewrite <- EQ, <- minus_n_O, minus_diag, <- plus_n_O. now rewrite !plus_Sn_m, plus_n_Sm, <- plus_assoc, mult_n_Sm. generalize (IHx y q u (le_Sn_le _ _ H)). destruct divmod as (q',u'). intros (EQ,LE); split; trivial. rewrite <- EQ. rewrite !plus_Sn_m, plus_n_Sm. f_equal. now apply minus_Sn_m. Qed. Lemma div_mod : forall x y, y<>0 -> x = y*(x/y) + x mod y. Proof. intros x y Hy. destruct y; [ now elim Hy | clear Hy ]. unfold div, modulo. generalize (divmod_spec x y 0 y (le_n y)). destruct divmod as (q,u). intros (U,V). simpl in *. now rewrite <- mult_n_O, minus_diag, <- !plus_n_O in U. Qed. Lemma mod_bound_pos : forall x y, 0<=x -> 0 0 <= x mod y < y. Proof. intros x y Hx Hy. split. auto with arith. destruct y; [ now elim Hy | clear Hy ]. unfold modulo. apply le_n_S, le_minus. Qed. (** Square root *) (** The following square root function is linear (and tail-recursive). With Peano representation, we can't do better. For faster algorithm, see Psqrt/Zsqrt/Nsqrt... We search the square root of n = k + p^2 + (q - r) with q = 2p and 0<=r<=q. We start with p=q=r=0, hence looking for the square root of n = k. Then we progressively decrease k and r. When k = S k' and r=0, it means we can use (S p) as new sqrt candidate, since (S k')+p^2+2p = k'+(S p)^2. When k reaches 0, we have found the biggest p^2 square contained in n, hence the square root of n is p. *) Fixpoint sqrt_iter k p q r := match k with | O => p | S k' => match r with | O => sqrt_iter k' (S p) (S (S q)) (S (S q)) | S r' => sqrt_iter k' p q r' end end. Definition sqrt n := sqrt_iter n 0 0 0. Lemma sqrt_iter_spec : forall k p q r, q = p+p -> r<=q -> let s := sqrt_iter k p q r in s*s <= k + p*p + (q - r) < (S s)*(S s). Proof. induction k. (* k = 0 *) simpl; intros p q r Hq Hr. split. apply le_plus_l. apply le_lt_n_Sm. rewrite <- mult_n_Sm. rewrite plus_assoc, (plus_comm p), <- plus_assoc. apply plus_le_compat; trivial. rewrite <- Hq. apply le_minus. (* k = S k' *) destruct r. (* r = 0 *) intros Hq _. replace (S k + p*p + (q-0)) with (k + (S p)*(S p) + (S (S q) - S (S q))). apply IHk. simpl. rewrite <- plus_n_Sm. congruence. auto with arith. rewrite minus_diag, <- minus_n_O, <- plus_n_O. simpl. rewrite <- plus_n_Sm; f_equal. rewrite <- plus_assoc; f_equal. rewrite <- mult_n_Sm, (plus_comm p), <- plus_assoc. congruence. (* r = S r' *) intros Hq Hr. replace (S k + p*p + (q-S r)) with (k + p*p + (q - r)). apply IHk; auto with arith. simpl. rewrite plus_n_Sm. f_equal. rewrite minus_Sn_m; auto. Qed. Lemma sqrt_spec : forall n, (sqrt n)*(sqrt n) <= n < S (sqrt n) * S (sqrt n). Proof. intros. set (s:=sqrt n). replace n with (n + 0*0 + (0-0)). apply sqrt_iter_spec; auto. simpl. now rewrite <- 2 plus_n_O. Qed. (** A linear tail-recursive base-2 logarithm In [log2_iter], we maintain the logarithm [p] of the counter [q], while [r] is the distance between [q] and the next power of 2, more precisely [q + S r = 2^(S p)] and [r<2^p]. At each recursive call, [q] goes up while [r] goes down. When [r] is 0, we know that [q] has almost reached a power of 2, and we increase [p] at the next call, while resetting [r] to [q]. Graphically (numbers are [q], stars are [r]) : << 10 9 8 7 * 6 * 5 ... 4 3 * 2 * 1 * * 0 * * * >> We stop when [k], the global downward counter reaches 0. At that moment, [q] is the number we're considering (since [k+q] is invariant), and [p] its logarithm. *) Fixpoint log2_iter k p q r := match k with | O => p | S k' => match r with | O => log2_iter k' (S p) (S q) q | S r' => log2_iter k' p (S q) r' end end. Definition log2 n := log2_iter (pred n) 0 1 0. Lemma log2_iter_spec : forall k p q r, 2^(S p) = q + S r -> r < 2^p -> let s := log2_iter k p q r in 2^s <= k + q < 2^(S s). Proof. induction k. (* k = 0 *) intros p q r EQ LT. simpl log2_iter. cbv zeta. split. rewrite plus_O_n. apply plus_le_reg_l with (2^p). simpl pow in EQ. rewrite <- plus_n_O in EQ. rewrite EQ. rewrite plus_comm. apply plus_le_compat_r. now apply lt_le_S. rewrite EQ, plus_comm. apply plus_lt_compat_l. apply lt_0_Sn. (* k = S k' *) intros p q r EQ LT. destruct r. (* r = 0 *) rewrite <- plus_n_Sm, <- plus_n_O in EQ. rewrite plus_Sn_m, plus_n_Sm. apply IHk. rewrite <- EQ. remember (S p) as p'; simpl. now rewrite <- plus_n_O. unfold lt. now rewrite EQ. (* r = S r' *) rewrite plus_Sn_m, plus_n_Sm. apply IHk. now rewrite plus_Sn_m, plus_n_Sm. unfold lt. now apply lt_le_weak. Qed. Lemma log2_spec : forall n, 0 2^(log2 n) <= n < 2^(S (log2 n)). Proof. intros. set (s:=log2 n). replace n with (pred n + 1). apply log2_iter_spec; auto. rewrite <- plus_n_Sm, <- plus_n_O. symmetry. now apply S_pred with 0. Qed. Lemma log2_nonpos : forall n, n<=0 -> log2 n = 0. Proof. inversion 1; now subst. Qed. (** * Gcd *) (** We use Euclid algorithm, which is normally not structural, but Coq is now clever enough to accept this (behind modulo there is a subtraction, which now preserves being a subterm) *) Fixpoint gcd a b := match a with | O => b | S a' => gcd (b mod (S a')) (S a') end. Definition divide x y := exists z, y=z*x. Notation "( x | y )" := (divide x y) (at level 0) : nat_scope. Lemma gcd_divide : forall a b, (gcd a b | a) /\ (gcd a b | b). Proof. fix 1. intros [|a] b; simpl. split. now exists 0. exists 1. simpl. now rewrite <- plus_n_O. fold (b mod (S a)). destruct (gcd_divide (b mod (S a)) (S a)) as (H,H'). set (a':=S a) in *. split; auto. rewrite (div_mod b a') at 2 by discriminate. destruct H as (u,Hu), H' as (v,Hv). rewrite mult_comm. exists ((b/a')*v + u). rewrite mult_plus_distr_r. now rewrite <- mult_assoc, <- Hv, <- Hu. Qed. Lemma gcd_divide_l : forall a b, (gcd a b | a). Proof. intros. apply gcd_divide. Qed. Lemma gcd_divide_r : forall a b, (gcd a b | b). Proof. intros. apply gcd_divide. Qed. Lemma gcd_greatest : forall a b c, (c|a) -> (c|b) -> (c|gcd a b). Proof. fix 1. intros [|a] b; simpl; auto. fold (b mod (S a)). intros c H H'. apply gcd_greatest; auto. set (a':=S a) in *. rewrite (div_mod b a') in H' by discriminate. destruct H as (u,Hu), H' as (v,Hv). exists (v - (b/a')*u). rewrite mult_comm in Hv. now rewrite mult_minus_distr_r, <- Hv, <-mult_assoc, <-Hu, minus_plus. Qed. (** * Bitwise operations *) (** We provide here some bitwise operations for unary numbers. Some might be really naive, they are just there for fullfiling the same interface as other for natural representations. As soon as binary representations such as NArith are available, it is clearly better to convert to/from them and use their ops. *) Fixpoint testbit a n := match n with | O => odd a | S n => testbit (div2 a) n end. Definition shiftl a n := iter_nat n _ double a. Definition shiftr a n := iter_nat n _ div2 a. Fixpoint bitwise (op:bool->bool->bool) n a b := match n with | O => O | S n' => (if op (odd a) (odd b) then 1 else 0) + 2*(bitwise op n' (div2 a) (div2 b)) end. Definition land a b := bitwise andb a a b. Definition lor a b := bitwise orb (max a b) a b. Definition ldiff a b := bitwise (fun b b' => b && negb b') a a b. Definition lxor a b := bitwise xorb (max a b) a b. Lemma double_twice : forall n, double n = 2*n. Proof. simpl; intros. now rewrite <- plus_n_O. Qed. Lemma testbit_0_l : forall n, testbit 0 n = false. Proof. now induction n. Qed. Lemma testbit_odd_0 a : testbit (2*a+1) 0 = true. Proof. unfold testbit. rewrite odd_spec. now exists a. Qed. Lemma testbit_even_0 a : testbit (2*a) 0 = false. Proof. unfold testbit, odd. rewrite (proj2 (even_spec _)); trivial. now exists a. Qed. Lemma testbit_odd_succ a n : testbit (2*a+1) (S n) = testbit a n. Proof. unfold testbit; fold testbit. rewrite <- plus_n_Sm, <- plus_n_O. f_equal. apply div2_double_plus_one. Qed. Lemma testbit_even_succ a n : testbit (2*a) (S n) = testbit a n. Proof. unfold testbit; fold testbit. f_equal. apply div2_double. Qed. Lemma shiftr_spec : forall a n m, testbit (shiftr a n) m = testbit a (m+n). Proof. induction n; intros m. trivial. now rewrite <- plus_n_O. now rewrite <- plus_n_Sm, <- plus_Sn_m, <- IHn. Qed. Lemma shiftl_spec_high : forall a n m, n<=m -> testbit (shiftl a n) m = testbit a (m-n). Proof. induction n; intros m H. trivial. now rewrite <- minus_n_O. destruct m. inversion H. simpl. apply le_S_n in H. change (shiftl a (S n)) with (double (shiftl a n)). rewrite double_twice, div2_double. now apply IHn. Qed. Lemma shiftl_spec_low : forall a n m, m testbit (shiftl a n) m = false. Proof. induction n; intros m H. inversion H. change (shiftl a (S n)) with (double (shiftl a n)). destruct m; simpl. unfold odd. apply negb_false_iff. apply even_spec. exists (shiftl a n). apply double_twice. rewrite double_twice, div2_double. apply IHn. now apply lt_S_n. Qed. Lemma div2_bitwise : forall op n a b, div2 (bitwise op (S n) a b) = bitwise op n (div2 a) (div2 b). Proof. intros. unfold bitwise; fold bitwise. destruct (op (odd a) (odd b)). now rewrite div2_double_plus_one. now rewrite plus_O_n, div2_double. Qed. Lemma odd_bitwise : forall op n a b, odd (bitwise op (S n) a b) = op (odd a) (odd b). Proof. intros. unfold bitwise; fold bitwise. destruct (op (odd a) (odd b)). apply odd_spec. rewrite plus_comm. eexists; eauto. unfold odd. apply negb_false_iff. apply even_spec. rewrite plus_O_n; eexists; eauto. Qed. Lemma div2_decr : forall a n, a <= S n -> div2 a <= n. Proof. destruct a; intros. apply le_0_n. apply le_trans with a. apply lt_n_Sm_le, lt_div2, lt_0_Sn. now apply le_S_n. Qed. Lemma testbit_bitwise_1 : forall op, (forall b, op false b = false) -> forall n m a b, a<=n -> testbit (bitwise op n a b) m = op (testbit a m) (testbit b m). Proof. intros op Hop. induction n; intros m a b Ha. simpl. inversion Ha; subst. now rewrite testbit_0_l. destruct m. apply odd_bitwise. unfold testbit; fold testbit. rewrite div2_bitwise. apply IHn; now apply div2_decr. Qed. Lemma testbit_bitwise_2 : forall op, op false false = false -> forall n m a b, a<=n -> b<=n -> testbit (bitwise op n a b) m = op (testbit a m) (testbit b m). Proof. intros op Hop. induction n; intros m a b Ha Hb. simpl. inversion Ha; inversion Hb; subst. now rewrite testbit_0_l. destruct m. apply odd_bitwise. unfold testbit; fold testbit. rewrite div2_bitwise. apply IHn; now apply div2_decr. Qed. Lemma land_spec : forall a b n, testbit (land a b) n = testbit a n && testbit b n. Proof. intros. unfold land. apply testbit_bitwise_1; trivial. Qed. Lemma ldiff_spec : forall a b n, testbit (ldiff a b) n = testbit a n && negb (testbit b n). Proof. intros. unfold ldiff. apply testbit_bitwise_1; trivial. Qed. Lemma lor_spec : forall a b n, testbit (lor a b) n = testbit a n || testbit b n. Proof. intros. unfold lor. apply testbit_bitwise_2. trivial. destruct (le_ge_dec a b). now rewrite max_r. now rewrite max_l. destruct (le_ge_dec a b). now rewrite max_r. now rewrite max_l. Qed. Lemma lxor_spec : forall a b n, testbit (lxor a b) n = xorb (testbit a n) (testbit b n). Proof. intros. unfold lxor. apply testbit_bitwise_2. trivial. destruct (le_ge_dec a b). now rewrite max_r. now rewrite max_l. destruct (le_ge_dec a b). now rewrite max_r. now rewrite max_l. Qed. (** * Implementation of [NAxiomsSig] by [nat] *) Module Nat <: NAxiomsSig <: UsualDecidableTypeFull <: OrderedTypeFull <: TotalOrder. (** Bi-directional induction. *) Theorem bi_induction : forall A : nat -> Prop, Proper (eq==>iff) A -> A 0 -> (forall n : nat, A n <-> A (S n)) -> forall n : nat, A n. Proof. intros A A_wd A0 AS. apply nat_ind. assumption. intros; now apply -> AS. Qed. (** Basic operations. *) Definition eq_equiv : Equivalence (@eq nat) := eq_equivalence. Local Obligation Tactic := simpl_relation. Program Instance succ_wd : Proper (eq==>eq) S. Program Instance pred_wd : Proper (eq==>eq) pred. Program Instance add_wd : Proper (eq==>eq==>eq) plus. Program Instance sub_wd : Proper (eq==>eq==>eq) minus. Program Instance mul_wd : Proper (eq==>eq==>eq) mult. Theorem pred_succ : forall n : nat, pred (S n) = n. Proof. reflexivity. Qed. Theorem one_succ : 1 = S 0. Proof. reflexivity. Qed. Theorem two_succ : 2 = S 1. Proof. reflexivity. Qed. Theorem add_0_l : forall n : nat, 0 + n = n. Proof. reflexivity. Qed. Theorem add_succ_l : forall n m : nat, (S n) + m = S (n + m). Proof. reflexivity. Qed. Theorem sub_0_r : forall n : nat, n - 0 = n. Proof. intro n; now destruct n. Qed. Theorem sub_succ_r : forall n m : nat, n - (S m) = pred (n - m). Proof. induction n; destruct m; simpl; auto. apply sub_0_r. Qed. Theorem mul_0_l : forall n : nat, 0 * n = 0. Proof. reflexivity. Qed. Theorem mul_succ_l : forall n m : nat, S n * m = n * m + m. Proof. assert (add_S_r : forall n m, n+S m = S(n+m)) by (induction n; auto). assert (add_comm : forall n m, n+m = m+n). induction n; simpl; auto. intros; rewrite add_S_r; auto. intros n m; now rewrite add_comm. Qed. (** Order on natural numbers *) Program Instance lt_wd : Proper (eq==>eq==>iff) lt. Theorem lt_succ_r : forall n m : nat, n < S m <-> n <= m. Proof. unfold lt; split. apply le_S_n. induction 1; auto. Qed. Theorem lt_eq_cases : forall n m : nat, n <= m <-> n < m \/ n = m. Proof. split. inversion 1; auto. rewrite lt_succ_r; auto. destruct 1; [|subst; auto]. rewrite <- lt_succ_r; auto. Qed. Theorem lt_irrefl : forall n : nat, ~ (n < n). Proof. induction n. intro H; inversion H. rewrite lt_succ_r; auto. Qed. (** Facts specific to natural numbers, not integers. *) Theorem pred_0 : pred 0 = 0. Proof. reflexivity. Qed. (** Recursion fonction *) Definition recursion {A} : A -> (nat -> A -> A) -> nat -> A := nat_rect (fun _ => A). Instance recursion_wd {A} (Aeq : relation A) : Proper (Aeq ==> (eq==>Aeq==>Aeq) ==> eq ==> Aeq) recursion. Proof. intros a a' Ha f f' Hf n n' Hn. subst n'. induction n; simpl; auto. apply Hf; auto. Qed. Theorem recursion_0 : forall {A} (a : A) (f : nat -> A -> A), recursion a f 0 = a. Proof. reflexivity. Qed. Theorem recursion_succ : forall {A} (Aeq : relation A) (a : A) (f : nat -> A -> A), Aeq a a -> Proper (eq==>Aeq==>Aeq) f -> forall n : nat, Aeq (recursion a f (S n)) (f n (recursion a f n)). Proof. unfold Proper, respectful in *; induction n; simpl; auto. Qed. (** The instantiation of operations. Placing them at the very end avoids having indirections in above lemmas. *) Definition t := nat. Definition eq := @eq nat. Definition eqb := beq_nat. Definition compare := nat_compare. Definition zero := 0. Definition one := 1. Definition two := 2. Definition succ := S. Definition pred := pred. Definition add := plus. Definition sub := minus. Definition mul := mult. Definition lt := lt. Definition le := le. Definition ltb := ltb. Definition leb := leb. Definition min := min. Definition max := max. Definition max_l := max_l. Definition max_r := max_r. Definition min_l := min_l. Definition min_r := min_r. Definition eqb_eq := beq_nat_true_iff. Definition compare_spec := nat_compare_spec. Definition eq_dec := eq_nat_dec. Definition leb_le := leb_le. Definition ltb_lt := ltb_lt. Definition Even := Even. Definition Odd := Odd. Definition even := even. Definition odd := odd. Definition even_spec := even_spec. Definition odd_spec := odd_spec. Program Instance pow_wd : Proper (eq==>eq==>eq) pow. Definition pow_0_r := pow_0_r. Definition pow_succ_r := pow_succ_r. Lemma pow_neg_r : forall a b, b<0 -> a^b = 0. inversion 1. Qed. Definition pow := pow. Definition square := square. Definition square_spec := square_spec. Definition log2_spec := log2_spec. Definition log2_nonpos := log2_nonpos. Definition log2 := log2. Definition sqrt_spec a (Ha:0<=a) := sqrt_spec a. Lemma sqrt_neg : forall a, a<0 -> sqrt a = 0. inversion 1. Qed. Definition sqrt := sqrt. Definition div := div. Definition modulo := modulo. Program Instance div_wd : Proper (eq==>eq==>eq) div. Program Instance mod_wd : Proper (eq==>eq==>eq) modulo. Definition div_mod := div_mod. Definition mod_bound_pos := mod_bound_pos. Definition divide := divide. Definition gcd := gcd. Definition gcd_divide_l := gcd_divide_l. Definition gcd_divide_r := gcd_divide_r. Definition gcd_greatest := gcd_greatest. Lemma gcd_nonneg : forall a b, 0<=gcd a b. Proof. intros. apply le_O_n. Qed. Definition testbit := testbit. Definition shiftl := shiftl. Definition shiftr := shiftr. Definition lxor := lxor. Definition land := land. Definition lor := lor. Definition ldiff := ldiff. Definition div2 := div2. Program Instance testbit_wd : Proper (eq==>eq==>Logic.eq) testbit. Definition testbit_odd_0 := testbit_odd_0. Definition testbit_even_0 := testbit_even_0. Definition testbit_odd_succ a n (_:0<=n) := testbit_odd_succ a n. Definition testbit_even_succ a n (_:0<=n) := testbit_even_succ a n. Lemma testbit_neg_r a n (H:n<0) : testbit a n = false. Proof. inversion H. Qed. Definition shiftl_spec_low := shiftl_spec_low. Definition shiftl_spec_high a n m (_:0<=m) := shiftl_spec_high a n m. Definition shiftr_spec a n m (_:0<=m) := shiftr_spec a n m. Definition lxor_spec := lxor_spec. Definition land_spec := land_spec. Definition lor_spec := lor_spec. Definition ldiff_spec := ldiff_spec. Definition div2_spec a : div2 a = shiftr a 1 := eq_refl _. (** Generic Properties *) Include NProp <+ UsualMinMaxLogicalProperties <+ UsualMinMaxDecProperties. End Nat. (** [Nat] contains an [order] tactic for natural numbers *) (** Note that [Nat.order] is domain-agnostic: it will not prove [1<=2] or [x<=x+x], but rather things like [x<=y -> y<=x -> x=y]. *) Section TestOrder. Let test : forall x y, x<=y -> y<=x -> x=y. Proof. Nat.order. Qed. End TestOrder. coq-8.4pl2/theories/Numbers/Natural/Abstract/0000750000175000001440000000000012127276550020243 5ustar notinuserscoq-8.4pl2/theories/Numbers/Natural/Abstract/NGcd.v0000640000175000001440000001567312010532755021253 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n == 1 := divide_1_r_nonneg n (le_0_l n). Definition divide_antisym n m : (n | m) -> (m | n) -> n == m := divide_antisym_nonneg n m (le_0_l n) (le_0_l m). Lemma divide_add_cancel_r : forall n m p, (n | m) -> (n | m + p) -> (n | p). Proof. intros n m p (q,Hq) (r,Hr). exists (r-q). rewrite mul_sub_distr_r, <- Hq, <- Hr. now rewrite add_comm, add_sub. Qed. Lemma divide_sub_r : forall n m p, (n | m) -> (n | p) -> (n | m - p). Proof. intros n m p H H'. destruct (le_ge_cases m p) as [LE|LE]. apply sub_0_le in LE. rewrite LE. apply divide_0_r. apply divide_add_cancel_r with p; trivial. now rewrite add_comm, sub_add. Qed. (** Properties of gcd *) Definition gcd_0_l n : gcd 0 n == n := gcd_0_l_nonneg n (le_0_l n). Definition gcd_0_r n : gcd n 0 == n := gcd_0_r_nonneg n (le_0_l n). Definition gcd_diag n : gcd n n == n := gcd_diag_nonneg n (le_0_l n). Definition gcd_unique' n m p := gcd_unique n m p (le_0_l p). Definition gcd_unique_alt' n m p := gcd_unique_alt n m p (le_0_l p). Definition divide_gcd_iff' n m := divide_gcd_iff n m (le_0_l n). Lemma gcd_add_mult_diag_r : forall n m p, gcd n (m+p*n) == gcd n m. Proof. intros. apply gcd_unique_alt'. intros. rewrite gcd_divide_iff. split; intros (U,V); split; trivial. apply divide_add_r; trivial. now apply divide_mul_r. apply divide_add_cancel_r with (p*n); trivial. now apply divide_mul_r. now rewrite add_comm. Qed. Lemma gcd_add_diag_r : forall n m, gcd n (m+n) == gcd n m. Proof. intros n m. rewrite <- (mul_1_l n) at 2. apply gcd_add_mult_diag_r. Qed. Lemma gcd_sub_diag_r : forall n m, n<=m -> gcd n (m-n) == gcd n m. Proof. intros n m H. symmetry. rewrite <- (sub_add n m H) at 1. apply gcd_add_diag_r. Qed. (** On natural numbers, we should use a particular form for the Bezout identity, since we don't have full subtraction. *) Definition Bezout n m p := exists a b, a*n == p + b*m. Instance Bezout_wd : Proper (eq==>eq==>eq==>iff) Bezout. Proof. unfold Bezout. intros x x' Hx y y' Hy z z' Hz. setoid_rewrite Hx. setoid_rewrite Hy. now setoid_rewrite Hz. Qed. Lemma bezout_1_gcd : forall n m, Bezout n m 1 -> gcd n m == 1. Proof. intros n m (q & r & H). apply gcd_unique; trivial using divide_1_l, le_0_1. intros p Hn Hm. apply divide_add_cancel_r with (r*m). now apply divide_mul_r. rewrite add_comm, <- H. now apply divide_mul_r. Qed. (** For strictly positive numbers, we have Bezout in the two directions. *) Lemma gcd_bezout_pos_pos : forall n, 0 forall m, 0 Bezout n m (gcd n m) /\ Bezout m n (gcd n m). Proof. intros n Hn. rewrite <- le_succ_l, <- one_succ in Hn. pattern n. apply strong_right_induction with (z:=1); trivial. unfold Bezout. solve_proper. clear n Hn. intros n Hn IHn. intros m Hm. rewrite <- le_succ_l, <- one_succ in Hm. pattern m. apply strong_right_induction with (z:=1); trivial. unfold Bezout. solve_proper. clear m Hm. intros m Hm IHm. destruct (lt_trichotomy n m) as [LT|[EQ|LT]]. (* n < m *) destruct (IHm (m-n)) as ((a & b & EQ), (a' & b' & EQ')). rewrite one_succ, le_succ_l. apply lt_add_lt_sub_l; now nzsimpl. apply sub_lt; order'. split. exists (a+b). exists b. rewrite mul_add_distr_r, EQ, mul_sub_distr_l, <- add_assoc. rewrite gcd_sub_diag_r by order. rewrite sub_add. reflexivity. apply mul_le_mono_l; order. exists a'. exists (a'+b'). rewrite gcd_sub_diag_r in EQ' by order. rewrite (add_comm a'), mul_add_distr_r, add_assoc, <- EQ'. rewrite mul_sub_distr_l, sub_add. reflexivity. apply mul_le_mono_l; order. (* n = m *) rewrite EQ. rewrite gcd_diag. split. exists 1. exists 0. now nzsimpl. exists 1. exists 0. now nzsimpl. (* m < n *) rewrite gcd_comm, and_comm. apply IHn; trivial. now rewrite <- le_succ_l, <- one_succ. Qed. Lemma gcd_bezout_pos : forall n m, 0 Bezout n m (gcd n m). Proof. intros n m Hn. destruct (eq_0_gt_0_cases m) as [EQ|LT]. rewrite EQ, gcd_0_r. exists 1. exists 0. now nzsimpl. now apply gcd_bezout_pos_pos. Qed. (** For arbitrary natural numbers, we could only say that at least one of the Bezout identities holds. *) Lemma gcd_bezout : forall n m, Bezout n m (gcd n m) \/ Bezout m n (gcd n m). Proof. intros n m. destruct (eq_0_gt_0_cases n) as [EQ|LT]. right. rewrite EQ, gcd_0_l. exists 1. exists 0. now nzsimpl. left. now apply gcd_bezout_pos. Qed. Lemma gcd_mul_mono_l : forall n m p, gcd (p * n) (p * m) == p * gcd n m. Proof. intros n m p. apply gcd_unique'. apply mul_divide_mono_l, gcd_divide_l. apply mul_divide_mono_l, gcd_divide_r. intros q H H'. destruct (eq_0_gt_0_cases n) as [EQ|LT]. rewrite EQ in *. now rewrite gcd_0_l. destruct (gcd_bezout_pos n m) as (a & b & EQ); trivial. apply divide_add_cancel_r with (p*m*b). now apply divide_mul_l. rewrite <- mul_assoc, <- mul_add_distr_l, add_comm, (mul_comm m), <- EQ. rewrite (mul_comm a), mul_assoc. now apply divide_mul_l. Qed. Lemma gcd_mul_mono_r : forall n m p, gcd (n*p) (m*p) == gcd n m * p. Proof. intros. rewrite !(mul_comm _ p). apply gcd_mul_mono_l. Qed. Lemma gauss : forall n m p, (n | m * p) -> gcd n m == 1 -> (n | p). Proof. intros n m p H G. destruct (eq_0_gt_0_cases n) as [EQ|LT]. rewrite EQ in *. rewrite gcd_0_l in G. now rewrite <- (mul_1_l p), <- G. destruct (gcd_bezout_pos n m) as (a & b & EQ); trivial. rewrite G in EQ. apply divide_add_cancel_r with (m*p*b). now apply divide_mul_l. rewrite (mul_comm _ b), mul_assoc. rewrite <- (mul_1_l p) at 2. rewrite <- mul_add_distr_r, add_comm, <- EQ. now apply divide_mul_l, divide_factor_r. Qed. Lemma divide_mul_split : forall n m p, n ~= 0 -> (n | m * p) -> exists q r, n == q*r /\ (q | m) /\ (r | p). Proof. intros n m p Hn H. assert (G := gcd_nonneg n m). le_elim G. destruct (gcd_divide_l n m) as (q,Hq). exists (gcd n m). exists q. split. now rewrite mul_comm. split. apply gcd_divide_r. destruct (gcd_divide_r n m) as (r,Hr). rewrite Hr in H. rewrite Hq in H at 1. rewrite mul_shuffle0 in H. apply mul_divide_cancel_r in H; [|order]. apply gauss with r; trivial. apply mul_cancel_r with (gcd n m); [order|]. rewrite mul_1_l. rewrite <- gcd_mul_mono_r, <- Hq, <- Hr; order. symmetry in G. apply gcd_eq_0 in G. destruct G as (Hn',_); order. Qed. (** TODO : relation between gcd and division and modulo *) (** TODO : more about rel_prime (i.e. gcd == 1), about prime ... *) End NGcdProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NAxioms.v0000640000175000001440000000526712010532755022014 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* a mod b < b. End NDivSpecific. (** For all other functions, the NZ axiomatizations are enough. *) (** We now group everything together. *) Module Type NAxiomsSig := NAxiomsMiniSig <+ OrderFunctions <+ NZParity.NZParity <+ NZPow.NZPow <+ NZSqrt.NZSqrt <+ NZLog.NZLog2 <+ NZGcd.NZGcd <+ NZDiv.NZDiv <+ NZBits.NZBits <+ NZSquare. Module Type NAxiomsSig' := NAxiomsMiniSig' <+ OrderFunctions' <+ NZParity.NZParity <+ NZPow.NZPow' <+ NZSqrt.NZSqrt' <+ NZLog.NZLog2 <+ NZGcd.NZGcd' <+ NZDiv.NZDiv' <+ NZBits.NZBits' <+ NZSquare. (** It could also be interesting to have a constructive recursor function. *) Module Type NAxiomsRec (Import NZ : NZDomainSig'). Parameter Inline recursion : forall {A : Type}, A -> (t -> A -> A) -> t -> A. Declare Instance recursion_wd {A : Type} (Aeq : relation A) : Proper (Aeq ==> (eq==>Aeq==>Aeq) ==> eq ==> Aeq) recursion. Axiom recursion_0 : forall {A} (a : A) (f : t -> A -> A), recursion a f 0 = a. Axiom recursion_succ : forall {A} (Aeq : relation A) (a : A) (f : t -> A -> A), Aeq a a -> Proper (eq==>Aeq==>Aeq) f -> forall n, Aeq (recursion a f (S n)) (f n (recursion a f n)). End NAxiomsRec. Module Type NAxiomsRecSig := NAxiomsMiniSig <+ NAxiomsRec. Module Type NAxiomsRecSig' := NAxiomsMiniSig' <+ NAxiomsRec. Module Type NAxiomsFullSig := NAxiomsSig <+ NAxiomsRec. Module Type NAxiomsFullSig' := NAxiomsSig' <+ NAxiomsRec. coq-8.4pl2/theories/Numbers/Natural/Abstract/NBits.v0000640000175000001440000012065112010532755021450 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* c<=b -> a^(b-c) == a^b / a^c. Proof. intros a b c Ha H. apply div_unique with 0. generalize (pow_nonzero a c Ha) (le_0_l (a^c)); order'. nzsimpl. now rewrite <- pow_add_r, add_comm, sub_add. Qed. Lemma pow_div_l : forall a b c, b~=0 -> a mod b == 0 -> (a/b)^c == a^c / b^c. Proof. intros a b c Hb H. apply div_unique with 0. generalize (pow_nonzero b c Hb) (le_0_l (b^c)); order'. nzsimpl. rewrite <- pow_mul_l. f_equiv. now apply div_exact. Qed. (** An injection from bits [true] and [false] to numbers 1 and 0. We declare it as a (local) coercion for shorter statements. *) Definition b2n (b:bool) := if b then 1 else 0. Local Coercion b2n : bool >-> t. Instance b2n_proper : Proper (Logic.eq ==> eq) b2n. Proof. solve_proper. Qed. Lemma exists_div2 a : exists a' (b:bool), a == 2*a' + b. Proof. elim (Even_or_Odd a); [intros (a',H)| intros (a',H)]. exists a'. exists false. now nzsimpl. exists a'. exists true. now simpl. Qed. (** We can compact [testbit_odd_0] [testbit_even_0] [testbit_even_succ] [testbit_odd_succ] in only two lemmas. *) Lemma testbit_0_r a (b:bool) : testbit (2*a+b) 0 = b. Proof. destruct b; simpl; rewrite ?add_0_r. apply testbit_odd_0. apply testbit_even_0. Qed. Lemma testbit_succ_r a (b:bool) n : testbit (2*a+b) (succ n) = testbit a n. Proof. destruct b; simpl; rewrite ?add_0_r. apply testbit_odd_succ, le_0_l. apply testbit_even_succ, le_0_l. Qed. (** Alternative caracterisations of [testbit] *) (** This concise equation could have been taken as specification for testbit in the interface, but it would have been hard to implement with little initial knowledge about div and mod *) Lemma testbit_spec' a n : a.[n] == (a / 2^n) mod 2. Proof. revert a. induct n. intros a. nzsimpl. destruct (exists_div2 a) as (a' & b & H). rewrite H at 1. rewrite testbit_0_r. apply mod_unique with a'; trivial. destruct b; order'. intros n IH a. destruct (exists_div2 a) as (a' & b & H). rewrite H at 1. rewrite testbit_succ_r, IH. f_equiv. rewrite pow_succ_r', <- div_div by order_nz. f_equiv. apply div_unique with b; trivial. destruct b; order'. Qed. (** This caracterisation that uses only basic operations and power was initially taken as specification for testbit. We describe [a] as having a low part and a high part, with the corresponding bit in the middle. This caracterisation is moderatly complex to implement, but also moderately usable... *) Lemma testbit_spec a n : exists l h, 0<=l<2^n /\ a == l + (a.[n] + 2*h)*2^n. Proof. exists (a mod 2^n). exists (a / 2^n / 2). split. split; [apply le_0_l | apply mod_upper_bound; order_nz]. rewrite add_comm, mul_comm, (add_comm a.[n]). rewrite (div_mod a (2^n)) at 1 by order_nz. do 2 f_equiv. rewrite testbit_spec'. apply div_mod. order'. Qed. Lemma testbit_true : forall a n, a.[n] = true <-> (a / 2^n) mod 2 == 1. Proof. intros a n. rewrite <- testbit_spec'; destruct a.[n]; split; simpl; now try order'. Qed. Lemma testbit_false : forall a n, a.[n] = false <-> (a / 2^n) mod 2 == 0. Proof. intros a n. rewrite <- testbit_spec'; destruct a.[n]; split; simpl; now try order'. Qed. Lemma testbit_eqb : forall a n, a.[n] = eqb ((a / 2^n) mod 2) 1. Proof. intros a n. apply eq_true_iff_eq. now rewrite testbit_true, eqb_eq. Qed. (** Results about the injection [b2n] *) Lemma b2n_inj : forall (a0 b0:bool), a0 == b0 -> a0 = b0. Proof. intros [|] [|]; simpl; trivial; order'. Qed. Lemma add_b2n_double_div2 : forall (a0:bool) a, (a0+2*a)/2 == a. Proof. intros a0 a. rewrite mul_comm, div_add by order'. now rewrite div_small, add_0_l by (destruct a0; order'). Qed. Lemma add_b2n_double_bit0 : forall (a0:bool) a, (a0+2*a).[0] = a0. Proof. intros a0 a. apply b2n_inj. rewrite testbit_spec'. nzsimpl. rewrite mul_comm, mod_add by order'. now rewrite mod_small by (destruct a0; order'). Qed. Lemma b2n_div2 : forall (a0:bool), a0/2 == 0. Proof. intros a0. rewrite <- (add_b2n_double_div2 a0 0). now nzsimpl. Qed. Lemma b2n_bit0 : forall (a0:bool), a0.[0] = a0. Proof. intros a0. rewrite <- (add_b2n_double_bit0 a0 0) at 2. now nzsimpl. Qed. (** The specification of testbit by low and high parts is complete *) Lemma testbit_unique : forall a n (a0:bool) l h, l<2^n -> a == l + (a0 + 2*h)*2^n -> a.[n] = a0. Proof. intros a n a0 l h Hl EQ. apply b2n_inj. rewrite testbit_spec' by trivial. symmetry. apply mod_unique with h. destruct a0; simpl; order'. symmetry. apply div_unique with l; trivial. now rewrite add_comm, (add_comm _ a0), mul_comm. Qed. (** All bits of number 0 are 0 *) Lemma bits_0 : forall n, 0.[n] = false. Proof. intros n. apply testbit_false. nzsimpl; order_nz. Qed. (** Various ways to refer to the lowest bit of a number *) Lemma bit0_odd : forall a, a.[0] = odd a. Proof. intros. symmetry. destruct (exists_div2 a) as (a' & b & EQ). rewrite EQ, testbit_0_r, add_comm, odd_add_mul_2. destruct b; simpl; apply odd_1 || apply odd_0. Qed. Lemma bit0_eqb : forall a, a.[0] = eqb (a mod 2) 1. Proof. intros a. rewrite testbit_eqb. now nzsimpl. Qed. Lemma bit0_mod : forall a, a.[0] == a mod 2. Proof. intros a. rewrite testbit_spec'. now nzsimpl. Qed. (** Hence testing a bit is equivalent to shifting and testing parity *) Lemma testbit_odd : forall a n, a.[n] = odd (a>>n). Proof. intros. now rewrite <- bit0_odd, shiftr_spec, add_0_l. Qed. (** [log2] gives the highest nonzero bit *) Lemma bit_log2 : forall a, a~=0 -> a.[log2 a] = true. Proof. intros a Ha. assert (Ha' : 0 < a) by (generalize (le_0_l a); order). destruct (log2_spec_alt a Ha') as (r & EQ & (_,Hr)). rewrite EQ at 1. rewrite testbit_true, add_comm. rewrite <- (mul_1_l (2^log2 a)) at 1. rewrite div_add by order_nz. rewrite div_small by trivial. rewrite add_0_l. apply mod_small. order'. Qed. Lemma bits_above_log2 : forall a n, log2 a < n -> a.[n] = false. Proof. intros a n H. rewrite testbit_false. rewrite div_small. nzsimpl; order'. apply log2_lt_cancel. rewrite log2_pow2; trivial using le_0_l. Qed. (** Hence the number of bits of [a] is [1+log2 a] (see [Pos.size_nat] and [Pos.size]). *) (** Testing bits after division or multiplication by a power of two *) Lemma div2_bits : forall a n, (a/2).[n] = a.[S n]. Proof. intros. apply eq_true_iff_eq. rewrite 2 testbit_true. rewrite pow_succ_r by apply le_0_l. now rewrite div_div by order_nz. Qed. Lemma div_pow2_bits : forall a n m, (a/2^n).[m] = a.[m+n]. Proof. intros a n. revert a. induct n. intros a m. now nzsimpl. intros n IH a m. nzsimpl; try apply le_0_l. rewrite <- div_div by order_nz. now rewrite IH, div2_bits. Qed. Lemma double_bits_succ : forall a n, (2*a).[S n] = a.[n]. Proof. intros. rewrite <- div2_bits. now rewrite mul_comm, div_mul by order'. Qed. Lemma mul_pow2_bits_add : forall a n m, (a*2^n).[m+n] = a.[m]. Proof. intros. rewrite <- div_pow2_bits. now rewrite div_mul by order_nz. Qed. Lemma mul_pow2_bits_high : forall a n m, n<=m -> (a*2^n).[m] = a.[m-n]. Proof. intros. rewrite <- (sub_add n m) at 1 by order'. now rewrite mul_pow2_bits_add. Qed. Lemma mul_pow2_bits_low : forall a n m, m (a*2^n).[m] = false. Proof. intros. apply testbit_false. rewrite <- (sub_add m n) by order'. rewrite pow_add_r, mul_assoc. rewrite div_mul by order_nz. rewrite <- (succ_pred (n-m)). rewrite pow_succ_r. now rewrite (mul_comm 2), mul_assoc, mod_mul by order'. apply lt_le_pred. apply sub_gt in H. generalize (le_0_l (n-m)); order. now apply sub_gt. Qed. (** Selecting the low part of a number can be done by a modulo *) Lemma mod_pow2_bits_high : forall a n m, n<=m -> (a mod 2^n).[m] = false. Proof. intros a n m H. destruct (eq_0_gt_0_cases (a mod 2^n)) as [EQ|LT]. now rewrite EQ, bits_0. apply bits_above_log2. apply lt_le_trans with n; trivial. apply log2_lt_pow2; trivial. apply mod_upper_bound; order_nz. Qed. Lemma mod_pow2_bits_low : forall a n m, m (a mod 2^n).[m] = a.[m]. Proof. intros a n m H. rewrite testbit_eqb. rewrite <- (mod_add _ (2^(P (n-m))*(a/2^n))) by order'. rewrite <- div_add by order_nz. rewrite (mul_comm _ 2), mul_assoc, <- pow_succ_r', succ_pred by now apply sub_gt. rewrite mul_comm, mul_assoc, <- pow_add_r, (add_comm m), sub_add by order. rewrite add_comm, <- div_mod by order_nz. symmetry. apply testbit_eqb. Qed. (** We now prove that having the same bits implies equality. For that we use a notion of equality over functional streams of bits. *) Definition eqf (f g:t -> bool) := forall n:t, f n = g n. Instance eqf_equiv : Equivalence eqf. Proof. split; congruence. Qed. Local Infix "===" := eqf (at level 70, no associativity). Instance testbit_eqf : Proper (eq==>eqf) testbit. Proof. intros a a' Ha n. now rewrite Ha. Qed. (** Only zero corresponds to the always-false stream. *) Lemma bits_inj_0 : forall a, (forall n, a.[n] = false) -> a == 0. Proof. intros a H. destruct (eq_decidable a 0) as [EQ|NEQ]; trivial. apply bit_log2 in NEQ. now rewrite H in NEQ. Qed. (** If two numbers produce the same stream of bits, they are equal. *) Lemma bits_inj : forall a b, testbit a === testbit b -> a == b. Proof. intros a. pattern a. apply strong_right_induction with 0;[solve_proper|clear a|apply le_0_l]. intros a _ IH b H. destruct (eq_0_gt_0_cases a) as [EQ|LT]. rewrite EQ in H |- *. symmetry. apply bits_inj_0. intros n. now rewrite <- H, bits_0. rewrite (div_mod a 2), (div_mod b 2) by order'. f_equiv; [ | now rewrite <- 2 bit0_mod, H]. f_equiv. apply IH; trivial using le_0_l. apply div_lt; order'. intro n. rewrite 2 div2_bits. apply H. Qed. Lemma bits_inj_iff : forall a b, testbit a === testbit b <-> a == b. Proof. split. apply bits_inj. intros EQ; now rewrite EQ. Qed. Hint Rewrite lxor_spec lor_spec land_spec ldiff_spec bits_0 : bitwise. Ltac bitwise := apply bits_inj; intros ?m; autorewrite with bitwise. (** The streams of bits that correspond to a natural numbers are exactly the ones that are always 0 after some point *) Lemma are_bits : forall (f:t->bool), Proper (eq==>Logic.eq) f -> ((exists n, f === testbit n) <-> (exists k, forall m, k<=m -> f m = false)). Proof. intros f Hf. split. intros (a,H). exists (S (log2 a)). intros m Hm. apply le_succ_l in Hm. rewrite H, bits_above_log2; trivial using lt_succ_diag_r. intros (k,Hk). revert f Hf Hk. induct k. intros f Hf H0. exists 0. intros m. rewrite bits_0, H0; trivial. apply le_0_l. intros k IH f Hf Hk. destruct (IH (fun m => f (S m))) as (n, Hn). solve_proper. intros m Hm. apply Hk. now rewrite <- succ_le_mono. exists (f 0 + 2*n). intros m. destruct (zero_or_succ m) as [Hm|(m', Hm)]; rewrite Hm. symmetry. apply add_b2n_double_bit0. rewrite Hn, <- div2_bits. rewrite mul_comm, div_add, b2n_div2, add_0_l; trivial. order'. Qed. (** Properties of shifts *) Lemma shiftr_spec' : forall a n m, (a >> n).[m] = a.[m+n]. Proof. intros. apply shiftr_spec. apply le_0_l. Qed. Lemma shiftl_spec_high' : forall a n m, n<=m -> (a << n).[m] = a.[m-n]. Proof. intros. apply shiftl_spec_high; trivial. apply le_0_l. Qed. Lemma shiftr_div_pow2 : forall a n, a >> n == a / 2^n. Proof. intros. bitwise. rewrite shiftr_spec'. symmetry. apply div_pow2_bits. Qed. Lemma shiftl_mul_pow2 : forall a n, a << n == a * 2^n. Proof. intros. bitwise. destruct (le_gt_cases n m) as [H|H]. now rewrite shiftl_spec_high', mul_pow2_bits_high. now rewrite shiftl_spec_low, mul_pow2_bits_low. Qed. Lemma shiftl_spec_alt : forall a n m, (a << n).[m+n] = a.[m]. Proof. intros. now rewrite shiftl_mul_pow2, mul_pow2_bits_add. Qed. Instance shiftr_wd : Proper (eq==>eq==>eq) shiftr. Proof. intros a a' Ha b b' Hb. now rewrite 2 shiftr_div_pow2, Ha, Hb. Qed. Instance shiftl_wd : Proper (eq==>eq==>eq) shiftl. Proof. intros a a' Ha b b' Hb. now rewrite 2 shiftl_mul_pow2, Ha, Hb. Qed. Lemma shiftl_shiftl : forall a n m, (a << n) << m == a << (n+m). Proof. intros. now rewrite !shiftl_mul_pow2, pow_add_r, mul_assoc. Qed. Lemma shiftr_shiftr : forall a n m, (a >> n) >> m == a >> (n+m). Proof. intros. now rewrite !shiftr_div_pow2, pow_add_r, div_div by order_nz. Qed. Lemma shiftr_shiftl_l : forall a n m, m<=n -> (a << n) >> m == a << (n-m). Proof. intros. rewrite shiftr_div_pow2, !shiftl_mul_pow2. rewrite <- (sub_add m n) at 1 by trivial. now rewrite pow_add_r, mul_assoc, div_mul by order_nz. Qed. Lemma shiftr_shiftl_r : forall a n m, n<=m -> (a << n) >> m == a >> (m-n). Proof. intros. rewrite !shiftr_div_pow2, shiftl_mul_pow2. rewrite <- (sub_add n m) at 1 by trivial. rewrite pow_add_r, (mul_comm (2^(m-n))). now rewrite <- div_div, div_mul by order_nz. Qed. (** shifts and constants *) Lemma shiftl_1_l : forall n, 1 << n == 2^n. Proof. intros. now rewrite shiftl_mul_pow2, mul_1_l. Qed. Lemma shiftl_0_r : forall a, a << 0 == a. Proof. intros. rewrite shiftl_mul_pow2. now nzsimpl. Qed. Lemma shiftr_0_r : forall a, a >> 0 == a. Proof. intros. rewrite shiftr_div_pow2. now nzsimpl. Qed. Lemma shiftl_0_l : forall n, 0 << n == 0. Proof. intros. rewrite shiftl_mul_pow2. now nzsimpl. Qed. Lemma shiftr_0_l : forall n, 0 >> n == 0. Proof. intros. rewrite shiftr_div_pow2. nzsimpl; order_nz. Qed. Lemma shiftl_eq_0_iff : forall a n, a << n == 0 <-> a == 0. Proof. intros a n. rewrite shiftl_mul_pow2. rewrite eq_mul_0. split. intros [H | H]; trivial. contradict H; order_nz. intros H. now left. Qed. Lemma shiftr_eq_0_iff : forall a n, a >> n == 0 <-> a==0 \/ (0 a >> n == 0. Proof. intros a n H. rewrite shiftr_eq_0_iff. destruct (eq_0_gt_0_cases a) as [EQ|LT]. now left. right; now split. Qed. (** Properties of [div2]. *) Lemma div2_div : forall a, div2 a == a/2. Proof. intros. rewrite div2_spec, shiftr_div_pow2. now nzsimpl. Qed. Instance div2_wd : Proper (eq==>eq) div2. Proof. intros a a' Ha. now rewrite 2 div2_div, Ha. Qed. Lemma div2_odd : forall a, a == 2*(div2 a) + odd a. Proof. intros a. rewrite div2_div, <- bit0_odd, bit0_mod. apply div_mod. order'. Qed. (** Properties of [lxor] and others, directly deduced from properties of [xorb] and others. *) Instance lxor_wd : Proper (eq ==> eq ==> eq) lxor. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance land_wd : Proper (eq ==> eq ==> eq) land. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance lor_wd : Proper (eq ==> eq ==> eq) lor. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance ldiff_wd : Proper (eq ==> eq ==> eq) ldiff. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Lemma lxor_eq : forall a a', lxor a a' == 0 -> a == a'. Proof. intros a a' H. bitwise. apply xorb_eq. now rewrite <- lxor_spec, H, bits_0. Qed. Lemma lxor_nilpotent : forall a, lxor a a == 0. Proof. intros. bitwise. apply xorb_nilpotent. Qed. Lemma lxor_eq_0_iff : forall a a', lxor a a' == 0 <-> a == a'. Proof. split. apply lxor_eq. intros EQ; rewrite EQ; apply lxor_nilpotent. Qed. Lemma lxor_0_l : forall a, lxor 0 a == a. Proof. intros. bitwise. apply xorb_false_l. Qed. Lemma lxor_0_r : forall a, lxor a 0 == a. Proof. intros. bitwise. apply xorb_false_r. Qed. Lemma lxor_comm : forall a b, lxor a b == lxor b a. Proof. intros. bitwise. apply xorb_comm. Qed. Lemma lxor_assoc : forall a b c, lxor (lxor a b) c == lxor a (lxor b c). Proof. intros. bitwise. apply xorb_assoc. Qed. Lemma lor_0_l : forall a, lor 0 a == a. Proof. intros. bitwise. trivial. Qed. Lemma lor_0_r : forall a, lor a 0 == a. Proof. intros. bitwise. apply orb_false_r. Qed. Lemma lor_comm : forall a b, lor a b == lor b a. Proof. intros. bitwise. apply orb_comm. Qed. Lemma lor_assoc : forall a b c, lor a (lor b c) == lor (lor a b) c. Proof. intros. bitwise. apply orb_assoc. Qed. Lemma lor_diag : forall a, lor a a == a. Proof. intros. bitwise. apply orb_diag. Qed. Lemma lor_eq_0_l : forall a b, lor a b == 0 -> a == 0. Proof. intros a b H. bitwise. apply (orb_false_iff a.[m] b.[m]). now rewrite <- lor_spec, H, bits_0. Qed. Lemma lor_eq_0_iff : forall a b, lor a b == 0 <-> a == 0 /\ b == 0. Proof. intros a b. split. split. now apply lor_eq_0_l in H. rewrite lor_comm in H. now apply lor_eq_0_l in H. intros (EQ,EQ'). now rewrite EQ, lor_0_l. Qed. Lemma land_0_l : forall a, land 0 a == 0. Proof. intros. bitwise. trivial. Qed. Lemma land_0_r : forall a, land a 0 == 0. Proof. intros. bitwise. apply andb_false_r. Qed. Lemma land_comm : forall a b, land a b == land b a. Proof. intros. bitwise. apply andb_comm. Qed. Lemma land_assoc : forall a b c, land a (land b c) == land (land a b) c. Proof. intros. bitwise. apply andb_assoc. Qed. Lemma land_diag : forall a, land a a == a. Proof. intros. bitwise. apply andb_diag. Qed. Lemma ldiff_0_l : forall a, ldiff 0 a == 0. Proof. intros. bitwise. trivial. Qed. Lemma ldiff_0_r : forall a, ldiff a 0 == a. Proof. intros. bitwise. now rewrite andb_true_r. Qed. Lemma ldiff_diag : forall a, ldiff a a == 0. Proof. intros. bitwise. apply andb_negb_r. Qed. Lemma lor_land_distr_l : forall a b c, lor (land a b) c == land (lor a c) (lor b c). Proof. intros. bitwise. apply orb_andb_distrib_l. Qed. Lemma lor_land_distr_r : forall a b c, lor a (land b c) == land (lor a b) (lor a c). Proof. intros. bitwise. apply orb_andb_distrib_r. Qed. Lemma land_lor_distr_l : forall a b c, land (lor a b) c == lor (land a c) (land b c). Proof. intros. bitwise. apply andb_orb_distrib_l. Qed. Lemma land_lor_distr_r : forall a b c, land a (lor b c) == lor (land a b) (land a c). Proof. intros. bitwise. apply andb_orb_distrib_r. Qed. Lemma ldiff_ldiff_l : forall a b c, ldiff (ldiff a b) c == ldiff a (lor b c). Proof. intros. bitwise. now rewrite negb_orb, andb_assoc. Qed. Lemma lor_ldiff_and : forall a b, lor (ldiff a b) (land a b) == a. Proof. intros. bitwise. now rewrite <- andb_orb_distrib_r, orb_comm, orb_negb_r, andb_true_r. Qed. Lemma land_ldiff : forall a b, land (ldiff a b) b == 0. Proof. intros. bitwise. now rewrite <-andb_assoc, (andb_comm (negb _)), andb_negb_r, andb_false_r. Qed. (** Properties of [setbit] and [clearbit] *) Definition setbit a n := lor a (1<eq==>eq) setbit. Proof. unfold setbit. solve_proper. Qed. Instance clearbit_wd : Proper (eq==>eq==>eq) clearbit. Proof. unfold clearbit. solve_proper. Qed. Lemma pow2_bits_true : forall n, (2^n).[n] = true. Proof. intros. rewrite <- (mul_1_l (2^n)). rewrite <- (add_0_l n) at 2. now rewrite mul_pow2_bits_add, bit0_odd, odd_1. Qed. Lemma pow2_bits_false : forall n m, n~=m -> (2^n).[m] = false. Proof. intros. rewrite <- (mul_1_l (2^n)). destruct (le_gt_cases n m). rewrite mul_pow2_bits_high; trivial. rewrite <- (succ_pred (m-n)) by (apply sub_gt; order). now rewrite <- div2_bits, div_small, bits_0 by order'. rewrite mul_pow2_bits_low; trivial. Qed. Lemma pow2_bits_eqb : forall n m, (2^n).[m] = eqb n m. Proof. intros. apply eq_true_iff_eq. rewrite eqb_eq. split. destruct (eq_decidable n m) as [H|H]. trivial. now rewrite (pow2_bits_false _ _ H). intros EQ. rewrite EQ. apply pow2_bits_true. Qed. Lemma setbit_eqb : forall a n m, (setbit a n).[m] = eqb n m || a.[m]. Proof. intros. now rewrite setbit_spec', lor_spec, pow2_bits_eqb, orb_comm. Qed. Lemma setbit_iff : forall a n m, (setbit a n).[m] = true <-> n==m \/ a.[m] = true. Proof. intros. now rewrite setbit_eqb, orb_true_iff, eqb_eq. Qed. Lemma setbit_eq : forall a n, (setbit a n).[n] = true. Proof. intros. apply setbit_iff. now left. Qed. Lemma setbit_neq : forall a n m, n~=m -> (setbit a n).[m] = a.[m]. Proof. intros a n m H. rewrite setbit_eqb. rewrite <- eqb_eq in H. apply not_true_is_false in H. now rewrite H. Qed. Lemma clearbit_eqb : forall a n m, (clearbit a n).[m] = a.[m] && negb (eqb n m). Proof. intros. now rewrite clearbit_spec', ldiff_spec, pow2_bits_eqb. Qed. Lemma clearbit_iff : forall a n m, (clearbit a n).[m] = true <-> a.[m] = true /\ n~=m. Proof. intros. rewrite clearbit_eqb, andb_true_iff, <- eqb_eq. now rewrite negb_true_iff, not_true_iff_false. Qed. Lemma clearbit_eq : forall a n, (clearbit a n).[n] = false. Proof. intros. rewrite clearbit_eqb, (proj2 (eqb_eq _ _) (eq_refl n)). apply andb_false_r. Qed. Lemma clearbit_neq : forall a n m, n~=m -> (clearbit a n).[m] = a.[m]. Proof. intros a n m H. rewrite clearbit_eqb. rewrite <- eqb_eq in H. apply not_true_is_false in H. rewrite H. apply andb_true_r. Qed. (** Shifts of bitwise operations *) Lemma shiftl_lxor : forall a b n, (lxor a b) << n == lxor (a << n) (b << n). Proof. intros. bitwise. destruct (le_gt_cases n m). now rewrite !shiftl_spec_high', lxor_spec. now rewrite !shiftl_spec_low. Qed. Lemma shiftr_lxor : forall a b n, (lxor a b) >> n == lxor (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec', lxor_spec. Qed. Lemma shiftl_land : forall a b n, (land a b) << n == land (a << n) (b << n). Proof. intros. bitwise. destruct (le_gt_cases n m). now rewrite !shiftl_spec_high', land_spec. now rewrite !shiftl_spec_low. Qed. Lemma shiftr_land : forall a b n, (land a b) >> n == land (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec', land_spec. Qed. Lemma shiftl_lor : forall a b n, (lor a b) << n == lor (a << n) (b << n). Proof. intros. bitwise. destruct (le_gt_cases n m). now rewrite !shiftl_spec_high', lor_spec. now rewrite !shiftl_spec_low. Qed. Lemma shiftr_lor : forall a b n, (lor a b) >> n == lor (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec', lor_spec. Qed. Lemma shiftl_ldiff : forall a b n, (ldiff a b) << n == ldiff (a << n) (b << n). Proof. intros. bitwise. destruct (le_gt_cases n m). now rewrite !shiftl_spec_high', ldiff_spec. now rewrite !shiftl_spec_low. Qed. Lemma shiftr_ldiff : forall a b n, (ldiff a b) >> n == ldiff (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec', ldiff_spec. Qed. (** We cannot have a function complementing all bits of a number, otherwise it would have an infinity of bit 1. Nonetheless, we can design a bounded complement *) Definition ones n := P (1 << n). Definition lnot a n := lxor a (ones n). Instance ones_wd : Proper (eq==>eq) ones. Proof. unfold ones. solve_proper. Qed. Instance lnot_wd : Proper (eq==>eq==>eq) lnot. Proof. unfold lnot. solve_proper. Qed. Lemma ones_equiv : forall n, ones n == P (2^n). Proof. intros; unfold ones; now rewrite shiftl_1_l. Qed. Lemma ones_add : forall n m, ones (m+n) == 2^m * ones n + ones m. Proof. intros n m. rewrite !ones_equiv. rewrite <- !sub_1_r, mul_sub_distr_l, mul_1_r, <- pow_add_r. rewrite add_sub_assoc, sub_add. reflexivity. apply pow_le_mono_r. order'. rewrite <- (add_0_r m) at 1. apply add_le_mono_l, le_0_l. rewrite <- (pow_0_r 2). apply pow_le_mono_r. order'. apply le_0_l. Qed. Lemma ones_div_pow2 : forall n m, m<=n -> ones n / 2^m == ones (n-m). Proof. intros n m H. symmetry. apply div_unique with (ones m). rewrite ones_equiv. apply le_succ_l. rewrite succ_pred; order_nz. rewrite <- (sub_add m n H) at 1. rewrite (add_comm _ m). apply ones_add. Qed. Lemma ones_mod_pow2 : forall n m, m<=n -> (ones n) mod (2^m) == ones m. Proof. intros n m H. symmetry. apply mod_unique with (ones (n-m)). rewrite ones_equiv. apply le_succ_l. rewrite succ_pred; order_nz. rewrite <- (sub_add m n H) at 1. rewrite (add_comm _ m). apply ones_add. Qed. Lemma ones_spec_low : forall n m, m (ones n).[m] = true. Proof. intros. apply testbit_true. rewrite ones_div_pow2 by order. rewrite <- (pow_1_r 2). rewrite ones_mod_pow2. rewrite ones_equiv. now nzsimpl'. apply le_add_le_sub_r. nzsimpl. now apply le_succ_l. Qed. Lemma ones_spec_high : forall n m, n<=m -> (ones n).[m] = false. Proof. intros. destruct (eq_0_gt_0_cases n) as [EQ|LT]; rewrite ones_equiv. now rewrite EQ, pow_0_r, one_succ, pred_succ, bits_0. apply bits_above_log2. rewrite log2_pred_pow2; trivial. rewrite <-le_succ_l, succ_pred; order. Qed. Lemma ones_spec_iff : forall n m, (ones n).[m] = true <-> m (lnot a n).[m] = negb a.[m]. Proof. intros. unfold lnot. now rewrite lxor_spec, ones_spec_low. Qed. Lemma lnot_spec_high : forall a n m, n<=m -> (lnot a n).[m] = a.[m]. Proof. intros. unfold lnot. now rewrite lxor_spec, ones_spec_high, xorb_false_r. Qed. Lemma lnot_involutive : forall a n, lnot (lnot a n) n == a. Proof. intros a n. bitwise. destruct (le_gt_cases n m). now rewrite 2 lnot_spec_high. now rewrite 2 lnot_spec_low, negb_involutive. Qed. Lemma lnot_0_l : forall n, lnot 0 n == ones n. Proof. intros. unfold lnot. apply lxor_0_l. Qed. Lemma lnot_ones : forall n, lnot (ones n) n == 0. Proof. intros. unfold lnot. apply lxor_nilpotent. Qed. (** Bounded complement and other operations *) Lemma lor_ones_low : forall a n, log2 a < n -> lor a (ones n) == ones n. Proof. intros a n H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, bits_above_log2; trivial. now apply lt_le_trans with n. now rewrite ones_spec_low, orb_true_r. Qed. Lemma land_ones : forall a n, land a (ones n) == a mod 2^n. Proof. intros a n. bitwise. destruct (le_gt_cases n m). now rewrite ones_spec_high, mod_pow2_bits_high, andb_false_r. now rewrite ones_spec_low, mod_pow2_bits_low, andb_true_r. Qed. Lemma land_ones_low : forall a n, log2 a < n -> land a (ones n) == a. Proof. intros; rewrite land_ones. apply mod_small. apply log2_lt_cancel. rewrite log2_pow2; trivial using le_0_l. Qed. Lemma ldiff_ones_r : forall a n, ldiff a (ones n) == (a >> n) << n. Proof. intros a n. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, shiftl_spec_high', shiftr_spec'; trivial. rewrite sub_add; trivial. apply andb_true_r. now rewrite ones_spec_low, shiftl_spec_low, andb_false_r. Qed. Lemma ldiff_ones_r_low : forall a n, log2 a < n -> ldiff a (ones n) == 0. Proof. intros a n H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, bits_above_log2; trivial. now apply lt_le_trans with n. now rewrite ones_spec_low, andb_false_r. Qed. Lemma ldiff_ones_l_low : forall a n, log2 a < n -> ldiff (ones n) a == lnot a n. Proof. intros a n H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, lnot_spec_high, bits_above_log2; trivial. now apply lt_le_trans with n. now rewrite ones_spec_low, lnot_spec_low. Qed. Lemma lor_lnot_diag : forall a n, lor a (lnot a n) == lor a (ones n). Proof. intros a n. bitwise. destruct (le_gt_cases n m). rewrite lnot_spec_high, ones_spec_high; trivial. now destruct a.[m]. rewrite lnot_spec_low, ones_spec_low; trivial. now destruct a.[m]. Qed. Lemma lor_lnot_diag_low : forall a n, log2 a < n -> lor a (lnot a n) == ones n. Proof. intros a n H. now rewrite lor_lnot_diag, lor_ones_low. Qed. Lemma land_lnot_diag : forall a n, land a (lnot a n) == ldiff a (ones n). Proof. intros a n. bitwise. destruct (le_gt_cases n m). rewrite lnot_spec_high, ones_spec_high; trivial. now destruct a.[m]. rewrite lnot_spec_low, ones_spec_low; trivial. now destruct a.[m]. Qed. Lemma land_lnot_diag_low : forall a n, log2 a < n -> land a (lnot a n) == 0. Proof. intros. now rewrite land_lnot_diag, ldiff_ones_r_low. Qed. Lemma lnot_lor_low : forall a b n, log2 a < n -> log2 b < n -> lnot (lor a b) n == land (lnot a n) (lnot b n). Proof. intros a b n Ha Hb. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high, lor_spec, !bits_above_log2; trivial. now apply lt_le_trans with n. now apply lt_le_trans with n. now rewrite !lnot_spec_low, lor_spec, negb_orb. Qed. Lemma lnot_land_low : forall a b n, log2 a < n -> log2 b < n -> lnot (land a b) n == lor (lnot a n) (lnot b n). Proof. intros a b n Ha Hb. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high, land_spec, !bits_above_log2; trivial. now apply lt_le_trans with n. now apply lt_le_trans with n. now rewrite !lnot_spec_low, land_spec, negb_andb. Qed. Lemma ldiff_land_low : forall a b n, log2 a < n -> ldiff a b == land a (lnot b n). Proof. intros a b n Ha. bitwise. destruct (le_gt_cases n m). rewrite (bits_above_log2 a m). trivial. now apply lt_le_trans with n. rewrite !lnot_spec_low; trivial. Qed. Lemma lnot_ldiff_low : forall a b n, log2 a < n -> log2 b < n -> lnot (ldiff a b) n == lor (lnot a n) b. Proof. intros a b n Ha Hb. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high, ldiff_spec, !bits_above_log2; trivial. now apply lt_le_trans with n. now apply lt_le_trans with n. now rewrite !lnot_spec_low, ldiff_spec, negb_andb, negb_involutive. Qed. Lemma lxor_lnot_lnot : forall a b n, lxor (lnot a n) (lnot b n) == lxor a b. Proof. intros a b n. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high; trivial. rewrite !lnot_spec_low, xorb_negb_negb; trivial. Qed. Lemma lnot_lxor_l : forall a b n, lnot (lxor a b) n == lxor (lnot a n) b. Proof. intros a b n. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high, lxor_spec; trivial. rewrite !lnot_spec_low, lxor_spec, negb_xorb_l; trivial. Qed. Lemma lnot_lxor_r : forall a b n, lnot (lxor a b) n == lxor a (lnot b n). Proof. intros a b n. bitwise. destruct (le_gt_cases n m). rewrite !lnot_spec_high, lxor_spec; trivial. rewrite !lnot_spec_low, lxor_spec, negb_xorb_r; trivial. Qed. Lemma lxor_lor : forall a b, land a b == 0 -> lxor a b == lor a b. Proof. intros a b H. bitwise. assert (a.[m] && b.[m] = false) by now rewrite <- land_spec, H, bits_0. now destruct a.[m], b.[m]. Qed. (** Bitwise operations and log2 *) Lemma log2_bits_unique : forall a n, a.[n] = true -> (forall m, n a.[m] = false) -> log2 a == n. Proof. intros a n H H'. destruct (eq_0_gt_0_cases a) as [Ha|Ha]. now rewrite Ha, bits_0 in H. apply le_antisymm; apply le_ngt; intros LT. specialize (H' _ LT). now rewrite bit_log2 in H' by order. now rewrite bits_above_log2 in H by order. Qed. Lemma log2_shiftr : forall a n, log2 (a >> n) == log2 a - n. Proof. intros a n. destruct (eq_0_gt_0_cases a) as [Ha|Ha]. now rewrite Ha, shiftr_0_l, log2_nonpos, sub_0_l by order. destruct (lt_ge_cases (log2 a) n). rewrite shiftr_eq_0, log2_nonpos by order. symmetry. rewrite sub_0_le; order. apply log2_bits_unique. now rewrite shiftr_spec', sub_add, bit_log2 by order. intros m Hm. rewrite shiftr_spec'; trivial. apply bits_above_log2; try order. now apply lt_sub_lt_add_r. Qed. Lemma log2_shiftl : forall a n, a~=0 -> log2 (a << n) == log2 a + n. Proof. intros a n Ha. rewrite shiftl_mul_pow2, add_comm by trivial. apply log2_mul_pow2. generalize (le_0_l a); order. apply le_0_l. Qed. Lemma log2_lor : forall a b, log2 (lor a b) == max (log2 a) (log2 b). Proof. assert (AUX : forall a b, a<=b -> log2 (lor a b) == log2 b). intros a b H. destruct (eq_0_gt_0_cases a) as [Ha|Ha]. now rewrite Ha, lor_0_l. apply log2_bits_unique. now rewrite lor_spec, bit_log2, orb_true_r by order. intros m Hm. assert (H' := log2_le_mono _ _ H). now rewrite lor_spec, 2 bits_above_log2 by order. (* main *) intros a b. destruct (le_ge_cases a b) as [H|H]. rewrite max_r by now apply log2_le_mono. now apply AUX. rewrite max_l by now apply log2_le_mono. rewrite lor_comm. now apply AUX. Qed. Lemma log2_land : forall a b, log2 (land a b) <= min (log2 a) (log2 b). Proof. assert (AUX : forall a b, a<=b -> log2 (land a b) <= log2 a). intros a b H. apply le_ngt. intros H'. destruct (eq_decidable (land a b) 0) as [EQ|NEQ]. rewrite EQ in H'. apply log2_lt_cancel in H'. generalize (le_0_l a); order. generalize (bit_log2 (land a b) NEQ). now rewrite land_spec, bits_above_log2. (* main *) intros a b. destruct (le_ge_cases a b) as [H|H]. rewrite min_l by now apply log2_le_mono. now apply AUX. rewrite min_r by now apply log2_le_mono. rewrite land_comm. now apply AUX. Qed. Lemma log2_lxor : forall a b, log2 (lxor a b) <= max (log2 a) (log2 b). Proof. assert (AUX : forall a b, a<=b -> log2 (lxor a b) <= log2 b). intros a b H. apply le_ngt. intros H'. destruct (eq_decidable (lxor a b) 0) as [EQ|NEQ]. rewrite EQ in H'. apply log2_lt_cancel in H'. generalize (le_0_l a); order. generalize (bit_log2 (lxor a b) NEQ). rewrite lxor_spec, 2 bits_above_log2; try order. discriminate. apply le_lt_trans with (log2 b); trivial. now apply log2_le_mono. (* main *) intros a b. destruct (le_ge_cases a b) as [H|H]. rewrite max_r by now apply log2_le_mono. now apply AUX. rewrite max_l by now apply log2_le_mono. rewrite lxor_comm. now apply AUX. Qed. (** Bitwise operations and arithmetical operations *) Local Notation xor3 a b c := (xorb (xorb a b) c). Local Notation lxor3 a b c := (lxor (lxor a b) c). Local Notation nextcarry a b c := ((a&&b) || (c && (a||b))). Local Notation lnextcarry a b c := (lor (land a b) (land c (lor a b))). Lemma add_bit0 : forall a b, (a+b).[0] = xorb a.[0] b.[0]. Proof. intros. now rewrite !bit0_odd, odd_add. Qed. Lemma add3_bit0 : forall a b c, (a+b+c).[0] = xor3 a.[0] b.[0] c.[0]. Proof. intros. now rewrite !add_bit0. Qed. Lemma add3_bits_div2 : forall (a0 b0 c0 : bool), (a0 + b0 + c0)/2 == nextcarry a0 b0 c0. Proof. assert (H : 1+1 == 2) by now nzsimpl'. intros [|] [|] [|]; simpl; rewrite ?add_0_l, ?add_0_r, ?H; (apply div_same; order') || (apply div_small; order') || idtac. symmetry. apply div_unique with 1. order'. now nzsimpl'. Qed. Lemma add_carry_div2 : forall a b (c0:bool), (a + b + c0)/2 == a/2 + b/2 + nextcarry a.[0] b.[0] c0. Proof. intros. rewrite <- add3_bits_div2. rewrite (add_comm ((a/2)+_)). rewrite <- div_add by order'. f_equiv. rewrite <- !div2_div, mul_comm, mul_add_distr_l. rewrite (div2_odd a), <- bit0_odd at 1. fold (b2n a.[0]). rewrite (div2_odd b), <- bit0_odd at 1. fold (b2n b.[0]). rewrite add_shuffle1. rewrite <-(add_assoc _ _ c0). apply add_comm. Qed. (** The main result concerning addition: we express the bits of the sum in term of bits of [a] and [b] and of some carry stream which is also recursively determined by another equation. *) Lemma add_carry_bits : forall a b (c0:bool), exists c, a+b+c0 == lxor3 a b c /\ c/2 == lnextcarry a b c /\ c.[0] = c0. Proof. intros a b c0. (* induction over some n such that [a<2^n] and [b<2^n] *) set (n:=max a b). assert (Ha : a<2^n). apply lt_le_trans with (2^a). apply pow_gt_lin_r, lt_1_2. apply pow_le_mono_r. order'. unfold n. destruct (le_ge_cases a b); [rewrite max_r|rewrite max_l]; order'. assert (Hb : b<2^n). apply lt_le_trans with (2^b). apply pow_gt_lin_r, lt_1_2. apply pow_le_mono_r. order'. unfold n. destruct (le_ge_cases a b); [rewrite max_r|rewrite max_l]; order'. clearbody n. revert a b c0 Ha Hb. induct n. (*base*) intros a b c0. rewrite !pow_0_r, !one_succ, !lt_succ_r. intros Ha Hb. exists c0. setoid_replace a with 0 by (generalize (le_0_l a); order'). setoid_replace b with 0 by (generalize (le_0_l b); order'). rewrite !add_0_l, !lxor_0_l, !lor_0_r, !land_0_r, !lor_0_r. rewrite b2n_div2, b2n_bit0; now repeat split. (*step*) intros n IH a b c0 Ha Hb. set (c1:=nextcarry a.[0] b.[0] c0). destruct (IH (a/2) (b/2) c1) as (c & IH1 & IH2 & Hc); clear IH. apply div_lt_upper_bound; trivial. order'. now rewrite <- pow_succ_r'. apply div_lt_upper_bound; trivial. order'. now rewrite <- pow_succ_r'. exists (c0 + 2*c). repeat split. (* - add *) bitwise. destruct (zero_or_succ m) as [EQ|[m' EQ]]; rewrite EQ; clear EQ. now rewrite add_b2n_double_bit0, add3_bit0, b2n_bit0. rewrite <- !div2_bits, <- 2 lxor_spec. f_equiv. rewrite add_b2n_double_div2, <- IH1. apply add_carry_div2. (* - carry *) rewrite add_b2n_double_div2. bitwise. destruct (zero_or_succ m) as [EQ|[m' EQ]]; rewrite EQ; clear EQ. now rewrite add_b2n_double_bit0. rewrite <- !div2_bits, IH2. autorewrite with bitwise. now rewrite add_b2n_double_div2. (* - carry0 *) apply add_b2n_double_bit0. Qed. (** Particular case : the second bit of an addition *) Lemma add_bit1 : forall a b, (a+b).[1] = xor3 a.[1] b.[1] (a.[0] && b.[0]). Proof. intros a b. destruct (add_carry_bits a b false) as (c & EQ1 & EQ2 & Hc). simpl in EQ1; rewrite add_0_r in EQ1. rewrite EQ1. autorewrite with bitwise. f_equal. rewrite one_succ, <- div2_bits, EQ2. autorewrite with bitwise. rewrite Hc. simpl. apply orb_false_r. Qed. (** In an addition, there will be no carries iff there is no common bits in the numbers to add *) Lemma nocarry_equiv : forall a b c, c/2 == lnextcarry a b c -> c.[0] = false -> (c == 0 <-> land a b == 0). Proof. intros a b c H H'. split. intros EQ; rewrite EQ in *. rewrite div_0_l in H by order'. symmetry in H. now apply lor_eq_0_l in H. intros EQ. rewrite EQ, lor_0_l in H. apply bits_inj_0. induct n. trivial. intros n IH. rewrite <- div2_bits, H. autorewrite with bitwise. now rewrite IH. Qed. (** When there is no common bits, the addition is just a xor *) Lemma add_nocarry_lxor : forall a b, land a b == 0 -> a+b == lxor a b. Proof. intros a b H. destruct (add_carry_bits a b false) as (c & EQ1 & EQ2 & Hc). simpl in EQ1; rewrite add_0_r in EQ1. rewrite EQ1. apply (nocarry_equiv a b c) in H; trivial. rewrite H. now rewrite lxor_0_r. Qed. (** A null [ldiff] implies being smaller *) Lemma ldiff_le : forall a b, ldiff a b == 0 -> a <= b. Proof. cut (forall n a b, a < 2^n -> ldiff a b == 0 -> a <= b). intros H a b. apply (H a), pow_gt_lin_r; order'. induct n. intros a b Ha _. rewrite pow_0_r, one_succ, lt_succ_r in Ha. assert (Ha' : a == 0) by (generalize (le_0_l a); order'). rewrite Ha'. apply le_0_l. intros n IH a b Ha H. assert (NEQ : 2 ~= 0) by order'. rewrite (div_mod a 2 NEQ), (div_mod b 2 NEQ). apply add_le_mono. apply mul_le_mono_l. apply IH. apply div_lt_upper_bound; trivial. now rewrite <- pow_succ_r'. rewrite <- (pow_1_r 2), <- 2 shiftr_div_pow2. now rewrite <- shiftr_ldiff, H, shiftr_div_pow2, pow_1_r, div_0_l. rewrite <- 2 bit0_mod. apply bits_inj_iff in H. specialize (H 0). rewrite ldiff_spec, bits_0 in H. destruct a.[0], b.[0]; try discriminate; simpl; order'. Qed. (** Subtraction can be a ldiff when the opposite ldiff is null. *) Lemma sub_nocarry_ldiff : forall a b, ldiff b a == 0 -> a-b == ldiff a b. Proof. intros a b H. apply add_cancel_r with b. rewrite sub_add. symmetry. rewrite add_nocarry_lxor. bitwise. apply bits_inj_iff in H. specialize (H m). rewrite ldiff_spec, bits_0 in H. now destruct a.[m], b.[m]. apply land_ldiff. now apply ldiff_le. Qed. (** We can express lnot in term of subtraction *) Lemma add_lnot_diag_low : forall a n, log2 a < n -> a + lnot a n == ones n. Proof. intros a n H. assert (H' := land_lnot_diag_low a n H). rewrite add_nocarry_lxor, lxor_lor by trivial. now apply lor_lnot_diag_low. Qed. Lemma lnot_sub_low : forall a n, log2 a < n -> lnot a n == ones n - a. Proof. intros a n H. now rewrite <- (add_lnot_diag_low a n H), add_comm, add_sub. Qed. (** Adding numbers with no common bits cannot lead to a much bigger number *) Lemma add_nocarry_lt_pow2 : forall a b n, land a b == 0 -> a < 2^n -> b < 2^n -> a+b < 2^n. Proof. intros a b n H Ha Hb. rewrite add_nocarry_lxor by trivial. apply div_small_iff. order_nz. rewrite <- shiftr_div_pow2, shiftr_lxor, !shiftr_div_pow2. rewrite 2 div_small by trivial. apply lxor_0_l. Qed. Lemma add_nocarry_mod_lt_pow2 : forall a b n, land a b == 0 -> a mod 2^n + b mod 2^n < 2^n. Proof. intros a b n H. apply add_nocarry_lt_pow2. bitwise. destruct (le_gt_cases n m). now rewrite mod_pow2_bits_high. now rewrite !mod_pow2_bits_low, <- land_spec, H, bits_0. apply mod_upper_bound; order_nz. apply mod_upper_bound; order_nz. Qed. End NBitsProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NPow.v0000640000175000001440000001075512010532755021317 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0^a == 0. Proof. wrap pow_0_l. Qed. Definition pow_1_r : forall a, a^1 == a := pow_1_r. Lemma pow_1_l : forall a, 1^a == 1. Proof. wrap pow_1_l. Qed. Definition pow_2_r : forall a, a^2 == a*a := pow_2_r. (** Power and addition, multiplication *) Lemma pow_add_r : forall a b c, a^(b+c) == a^b * a^c. Proof. wrap pow_add_r. Qed. Lemma pow_mul_l : forall a b c, (a*b)^c == a^c * b^c. Proof. wrap pow_mul_l. Qed. Lemma pow_mul_r : forall a b c, a^(b*c) == (a^b)^c. Proof. wrap pow_mul_r. Qed. (** Power and nullity *) Lemma pow_eq_0 : forall a b, b~=0 -> a^b == 0 -> a == 0. Proof. intros. apply (pow_eq_0 a b); trivial. auto'. Qed. Lemma pow_nonzero : forall a b, a~=0 -> a^b ~= 0. Proof. wrap pow_nonzero. Qed. Lemma pow_eq_0_iff : forall a b, a^b == 0 <-> b~=0 /\ a==0. Proof. intros a b. split. rewrite pow_eq_0_iff. intros [H |[H H']]. generalize (le_0_l b); order. split; order. intros (Hb,Ha). rewrite Ha. now apply pow_0_l'. Qed. (** Monotonicity *) Lemma pow_lt_mono_l : forall a b c, c~=0 -> a a^c < b^c. Proof. wrap pow_lt_mono_l. Qed. Lemma pow_le_mono_l : forall a b c, a<=b -> a^c <= b^c. Proof. wrap pow_le_mono_l. Qed. Lemma pow_gt_1 : forall a b, 1 b~=0 -> 1 b a^b < a^c. Proof. wrap pow_lt_mono_r. Qed. (** NB: since 0^0 > 0^1, the following result isn't valid with a=0 *) Lemma pow_le_mono_r : forall a b c, a~=0 -> b<=c -> a^b <= a^c. Proof. wrap pow_le_mono_r. Qed. Lemma pow_le_mono : forall a b c d, a~=0 -> a<=c -> b<=d -> a^b <= c^d. Proof. wrap pow_le_mono. Qed. Definition pow_lt_mono : forall a b c d, 0 0 a^b < c^d := pow_lt_mono. (** Injectivity *) Lemma pow_inj_l : forall a b c, c~=0 -> a^c == b^c -> a == b. Proof. intros; eapply pow_inj_l; eauto; auto'. Qed. Lemma pow_inj_r : forall a b c, 1 a^b == a^c -> b == c. Proof. intros; eapply pow_inj_r; eauto; auto'. Qed. (** Monotonicity results, both ways *) Lemma pow_lt_mono_l_iff : forall a b c, c~=0 -> (a a^c < b^c). Proof. wrap pow_lt_mono_l_iff. Qed. Lemma pow_le_mono_l_iff : forall a b c, c~=0 -> (a<=b <-> a^c <= b^c). Proof. wrap pow_le_mono_l_iff. Qed. Lemma pow_lt_mono_r_iff : forall a b c, 1 (b a^b < a^c). Proof. wrap pow_lt_mono_r_iff. Qed. Lemma pow_le_mono_r_iff : forall a b c, 1 (b<=c <-> a^b <= a^c). Proof. wrap pow_le_mono_r_iff. Qed. (** For any a>1, the a^x function is above the identity function *) Lemma pow_gt_lin_r : forall a b, 1 b < a^b. Proof. wrap pow_gt_lin_r. Qed. (** Someday, we should say something about the full Newton formula. In the meantime, we can at least provide some inequalities about (a+b)^c. *) Lemma pow_add_lower : forall a b c, c~=0 -> a^c + b^c <= (a+b)^c. Proof. wrap pow_add_lower. Qed. (** This upper bound can also be seen as a convexity proof for x^c : image of (a+b)/2 is below the middle of the images of a and b *) Lemma pow_add_upper : forall a b c, c~=0 -> (a+b)^c <= 2^(pred c) * (a^c + b^c). Proof. wrap pow_add_upper. Qed. (** Power and parity *) Lemma even_pow : forall a b, b~=0 -> even (a^b) = even a. Proof. intros a b Hb. rewrite neq_0_lt_0 in Hb. apply lt_ind with (4:=Hb). solve_proper. now nzsimpl. clear b Hb. intros b Hb IH. rewrite pow_succ_r', even_mul, IH. now destruct (even a). Qed. Lemma odd_pow : forall a b, b~=0 -> odd (a^b) = odd a. Proof. intros. now rewrite <- !negb_even, even_pow. Qed. End NPowProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NStrongRec.v0000640000175000001440000001271412010532755022455 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A)->N->A] is the step function: [F f n] should return [phi(n)] when [f] is a function that coincide with [phi] for numbers strictly less than [n]. *) Definition strong_rec (a : A) (f : (N.t -> A) -> N.t -> A) (n : N.t) : A := recursion (fun _ => a) (fun _ => f) (S n) n. (** For convenience, we use in proofs an intermediate definition between [recursion] and [strong_rec]. *) Definition strong_rec0 (a : A) (f : (N.t -> A) -> N.t -> A) : N.t -> N.t -> A := recursion (fun _ => a) (fun _ => f). Lemma strong_rec_alt : forall a f n, strong_rec a f n = strong_rec0 a f (S n) n. Proof. reflexivity. Qed. Instance strong_rec0_wd : Proper (Aeq ==> ((N.eq ==> Aeq) ==> N.eq ==> Aeq) ==> N.eq ==> N.eq ==> Aeq) strong_rec0. Proof. unfold strong_rec0; f_equiv'. Qed. Instance strong_rec_wd : Proper (Aeq ==> ((N.eq ==> Aeq) ==> N.eq ==> Aeq) ==> N.eq ==> Aeq) strong_rec. Proof. intros a a' Eaa' f f' Eff' n n' Enn'. rewrite !strong_rec_alt; f_equiv'. Qed. Section FixPoint. Variable f : (N.t -> A) -> N.t -> A. Variable f_wd : Proper ((N.eq==>Aeq)==>N.eq==>Aeq) f. Lemma strong_rec0_0 : forall a m, (strong_rec0 a f 0 m) = a. Proof. intros. unfold strong_rec0. rewrite recursion_0; auto. Qed. Lemma strong_rec0_succ : forall a n m, Aeq (strong_rec0 a f (S n) m) (f (strong_rec0 a f n) m). Proof. intros. unfold strong_rec0. f_equiv. rewrite recursion_succ; f_equiv'. reflexivity. Qed. Lemma strong_rec_0 : forall a, Aeq (strong_rec a f 0) (f (fun _ => a) 0). Proof. intros. rewrite strong_rec_alt, strong_rec0_succ; f_equiv'. rewrite strong_rec0_0. reflexivity. Qed. (* We need an assumption saying that for every n, the step function (f h n) calls h only on the segment [0 ... n - 1]. This means that if h1 and h2 coincide on values < n, then (f h1 n) coincides with (f h2 n) *) Hypothesis step_good : forall (n : N.t) (h1 h2 : N.t -> A), (forall m : N.t, m < n -> Aeq (h1 m) (h2 m)) -> Aeq (f h1 n) (f h2 n). Lemma strong_rec0_more_steps : forall a k n m, m < n -> Aeq (strong_rec0 a f n m) (strong_rec0 a f (n+k) m). Proof. intros a k n. pattern n. apply induction; clear n. intros n n' Hn; setoid_rewrite Hn; auto with *. intros m Hm. destruct (nlt_0_r _ Hm). intros n IH m Hm. rewrite lt_succ_r in Hm. rewrite add_succ_l. rewrite 2 strong_rec0_succ. apply step_good. intros m' Hm'. apply IH. apply lt_le_trans with m; auto. Qed. Lemma strong_rec0_fixpoint : forall (a : A) (n : N.t), Aeq (strong_rec0 a f (S n) n) (f (fun n => strong_rec0 a f (S n) n) n). Proof. intros. rewrite strong_rec0_succ. apply step_good. intros m Hm. symmetry. setoid_replace n with (S m + (n - S m)). apply strong_rec0_more_steps. apply lt_succ_diag_r. rewrite add_comm. symmetry. apply sub_add. rewrite le_succ_l; auto. Qed. Theorem strong_rec_fixpoint : forall (a : A) (n : N.t), Aeq (strong_rec a f n) (f (strong_rec a f) n). Proof. intros. transitivity (f (fun n => strong_rec0 a f (S n) n) n). rewrite strong_rec_alt. apply strong_rec0_fixpoint. f_equiv. intros x x' Hx; rewrite strong_rec_alt, Hx; auto with *. Qed. (** NB: without the [step_good] hypothesis, we have proved that [strong_rec a f 0] is [f (fun _ => a) 0]. Now we can prove that the first argument of [f] is arbitrary in this case... *) Theorem strong_rec_0_any : forall (a : A)(any : N.t->A), Aeq (strong_rec a f 0) (f any 0). Proof. intros. rewrite strong_rec_fixpoint. apply step_good. intros m Hm. destruct (nlt_0_r _ Hm). Qed. (** ... and that first argument of [strong_rec] is always arbitrary. *) Lemma strong_rec_any_fst_arg : forall a a' n, Aeq (strong_rec a f n) (strong_rec a' f n). Proof. intros a a' n. generalize (le_refl n). set (k:=n) at -2. clearbody k. revert k. pattern n. apply induction; clear n. (* compat *) intros n n' Hn. setoid_rewrite Hn; auto with *. (* 0 *) intros k Hk. rewrite le_0_r in Hk. rewrite Hk, strong_rec_0. symmetry. apply strong_rec_0_any. (* S *) intros n IH k Hk. rewrite 2 strong_rec_fixpoint. apply step_good. intros m Hm. apply IH. rewrite succ_le_mono. apply le_trans with k; auto. rewrite le_succ_l; auto. Qed. End FixPoint. End StrongRecursion. Arguments strong_rec [A] a f n. End NStrongRecProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NParity.v0000640000175000001440000000446212010532755022020 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* odd (P n) = even n. Proof. intros. rewrite <- (succ_pred n) at 2 by trivial. symmetry. apply even_succ. Qed. Lemma even_pred : forall n, n~=0 -> even (P n) = odd n. Proof. intros. rewrite <- (succ_pred n) at 2 by trivial. symmetry. apply odd_succ. Qed. Lemma even_sub : forall n m, m<=n -> even (n-m) = Bool.eqb (even n) (even m). Proof. intros. case_eq (even n); case_eq (even m); rewrite <- ?negb_true_iff, ?negb_even, ?odd_spec, ?even_spec; intros (m',Hm) (n',Hn). exists (n'-m'). now rewrite mul_sub_distr_l, Hn, Hm. exists (n'-m'-1). rewrite !mul_sub_distr_l, Hn, Hm, sub_add_distr, mul_1_r. rewrite two_succ at 5. rewrite <- (add_1_l 1). rewrite sub_add_distr. symmetry. apply sub_add. apply le_add_le_sub_l. rewrite add_1_l, <- two_succ, <- (mul_1_r 2) at 1. rewrite <- mul_sub_distr_l. rewrite <- mul_le_mono_pos_l by order'. rewrite one_succ, le_succ_l. rewrite <- lt_add_lt_sub_l, add_0_r. destruct (le_gt_cases n' m') as [LE|GT]; trivial. generalize (double_below _ _ LE). order. exists (n'-m'). rewrite mul_sub_distr_l, Hn, Hm. apply add_sub_swap. apply mul_le_mono_pos_l; try order'. destruct (le_gt_cases m' n') as [LE|GT]; trivial. generalize (double_above _ _ GT). order. exists (n'-m'). rewrite Hm,Hn, mul_sub_distr_l. rewrite sub_add_distr. rewrite add_sub_swap. apply add_sub. apply succ_le_mono. rewrite add_1_r in Hm,Hn. order. Qed. Lemma odd_sub : forall n m, m<=n -> odd (n-m) = xorb (odd n) (odd m). Proof. intros. rewrite <- !negb_even. rewrite even_sub by trivial. now destruct (even n), (even m). Qed. End NParityProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NBase.v0000640000175000001440000001200012010532755021405 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop, Proper (N.eq==>iff) A -> A 0 -> (forall n, A n -> A (S n)) -> forall n, A n. Proof. intros A A_wd A0 AS n; apply right_induction with 0; try assumption. intros; auto; apply le_0_l. apply le_0_l. Qed. (** The theorems [bi_induction], [central_induction] and the tactic [nzinduct] refer to bidirectional induction, which is not useful on natural numbers. Therefore, we define a new induction tactic for natural numbers. We do not have to call "Declare Left Step" and "Declare Right Step" commands again, since the data for stepl and stepr tactics is inherited from NZ. *) Ltac induct n := induction_maker n ltac:(apply induction). Theorem case_analysis : forall A : N.t -> Prop, Proper (N.eq==>iff) A -> A 0 -> (forall n, A (S n)) -> forall n, A n. Proof. intros; apply induction; auto. Qed. Ltac cases n := induction_maker n ltac:(apply case_analysis). Theorem neq_0 : ~ forall n, n == 0. Proof. intro H; apply (neq_succ_0 0). apply H. Qed. Theorem neq_0_r : forall n, n ~= 0 <-> exists m, n == S m. Proof. cases n. split; intro H; [now elim H | destruct H as [m H]; symmetry in H; false_hyp H neq_succ_0]. intro n; split; intro H; [now exists n | apply neq_succ_0]. Qed. Theorem zero_or_succ : forall n, n == 0 \/ exists m, n == S m. Proof. cases n. now left. intro n; right; now exists n. Qed. Theorem eq_pred_0 : forall n, P n == 0 <-> n == 0 \/ n == 1. Proof. cases n. rewrite pred_0. now split; [left|]. intro n. rewrite pred_succ. split. intros H; right. now rewrite H, one_succ. intros [H|H]. elim (neq_succ_0 _ H). apply succ_inj_wd. now rewrite <- one_succ. Qed. Theorem succ_pred : forall n, n ~= 0 -> S (P n) == n. Proof. cases n. intro H; exfalso; now apply H. intros; now rewrite pred_succ. Qed. Theorem pred_inj : forall n m, n ~= 0 -> m ~= 0 -> P n == P m -> n == m. Proof. intros n m; cases n. intros H; exfalso; now apply H. intros n _; cases m. intros H; exfalso; now apply H. intros m H2 H3. do 2 rewrite pred_succ in H3. now rewrite H3. Qed. (** The following induction principle is useful for reasoning about, e.g., Fibonacci numbers *) Section PairInduction. Variable A : N.t -> Prop. Hypothesis A_wd : Proper (N.eq==>iff) A. Theorem pair_induction : A 0 -> A 1 -> (forall n, A n -> A (S n) -> A (S (S n))) -> forall n, A n. Proof. rewrite one_succ. intros until 3. assert (D : forall n, A n /\ A (S n)); [ |intro n; exact (proj1 (D n))]. induct n; [ | intros n [IH1 IH2]]; auto. Qed. End PairInduction. (** The following is useful for reasoning about, e.g., Ackermann function *) Section TwoDimensionalInduction. Variable R : N.t -> N.t -> Prop. Hypothesis R_wd : Proper (N.eq==>N.eq==>iff) R. Theorem two_dim_induction : R 0 0 -> (forall n m, R n m -> R n (S m)) -> (forall n, (forall m, R n m) -> R (S n) 0) -> forall n m, R n m. Proof. intros H1 H2 H3. induct n. induct m. exact H1. exact (H2 0). intros n IH. induct m. now apply H3. exact (H2 (S n)). Qed. End TwoDimensionalInduction. Section DoubleInduction. Variable R : N.t -> N.t -> Prop. Hypothesis R_wd : Proper (N.eq==>N.eq==>iff) R. Theorem double_induction : (forall m, R 0 m) -> (forall n, R (S n) 0) -> (forall n m, R n m -> R (S n) (S m)) -> forall n m, R n m. Proof. intros H1 H2 H3; induct n; auto. intros n H; cases m; auto. Qed. End DoubleInduction. Ltac double_induct n m := try intros until n; try intros until m; pattern n, m; apply double_induction; clear n m; [solve_proper | | | ]. End NBaseProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NLcm.v0000640000175000001440000002024112010532755021254 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (a mod b == 0 <-> (b|a)). Proof. intros a b Hb. split. intros Hab. exists (a/b). rewrite mul_comm. rewrite (div_mod a b Hb) at 1. rewrite Hab; now nzsimpl. intros (c,Hc). rewrite Hc. now apply mod_mul. Qed. Lemma divide_div_mul_exact : forall a b c, b~=0 -> (b|a) -> (c*a)/b == c*(a/b). Proof. intros a b c Hb H. apply mul_cancel_l with b; trivial. rewrite mul_assoc, mul_shuffle0. assert (H':=H). apply mod_divide, div_exact in H'; trivial. rewrite <- H', (mul_comm a c). symmetry. apply div_exact; trivial. apply mod_divide; trivial. now apply divide_mul_r. Qed. (** Gcd of divided elements, for exact divisions *) Lemma gcd_div_factor : forall a b c, c~=0 -> (c|a) -> (c|b) -> gcd (a/c) (b/c) == (gcd a b)/c. Proof. intros a b c Hc Ha Hb. apply mul_cancel_l with c; try order. assert (H:=gcd_greatest _ _ _ Ha Hb). apply mod_divide, div_exact in H; try order. rewrite <- H. rewrite <- gcd_mul_mono_l; try order. f_equiv; symmetry; apply div_exact; try order; apply mod_divide; trivial; try order. Qed. Lemma gcd_div_gcd : forall a b g, g~=0 -> g == gcd a b -> gcd (a/g) (b/g) == 1. Proof. intros a b g NZ EQ. rewrite gcd_div_factor. now rewrite <- EQ, div_same. generalize (gcd_nonneg a b); order. rewrite EQ; apply gcd_divide_l. rewrite EQ; apply gcd_divide_r. Qed. (** The following equality is crucial for Euclid algorithm *) Lemma gcd_mod : forall a b, b~=0 -> gcd (a mod b) b == gcd b a. Proof. intros a b Hb. rewrite (gcd_comm _ b). rewrite <- (gcd_add_mult_diag_r b (a mod b) (a/b)). now rewrite add_comm, mul_comm, <- div_mod. Qed. (** We now define lcm thanks to gcd: lcm a b = a * (b / gcd a b) = (a / gcd a b) * b = (a*b) / gcd a b Nota: [lcm 0 0] should be 0, which isn't garantee with the third equation above. *) Definition lcm a b := a*(b/gcd a b). Instance lcm_wd : Proper (eq==>eq==>eq) lcm. Proof. unfold lcm. solve_proper. Qed. Lemma lcm_equiv1 : forall a b, gcd a b ~= 0 -> a * (b / gcd a b) == (a*b)/gcd a b. Proof. intros a b H. rewrite divide_div_mul_exact; try easy. apply gcd_divide_r. Qed. Lemma lcm_equiv2 : forall a b, gcd a b ~= 0 -> (a / gcd a b) * b == (a*b)/gcd a b. Proof. intros a b H. rewrite 2 (mul_comm _ b). rewrite divide_div_mul_exact; try easy. apply gcd_divide_l. Qed. Lemma gcd_div_swap : forall a b, (a / gcd a b) * b == a * (b / gcd a b). Proof. intros a b. destruct (eq_decidable (gcd a b) 0) as [EQ|NEQ]. apply gcd_eq_0 in EQ. destruct EQ as (EQ,EQ'). rewrite EQ, EQ'. now nzsimpl. now rewrite lcm_equiv1, <-lcm_equiv2. Qed. Lemma divide_lcm_l : forall a b, (a | lcm a b). Proof. unfold lcm. intros a b. apply divide_factor_l. Qed. Lemma divide_lcm_r : forall a b, (b | lcm a b). Proof. unfold lcm. intros a b. rewrite <- gcd_div_swap. apply divide_factor_r. Qed. Lemma divide_div : forall a b c, a~=0 -> (a|b) -> (b|c) -> (b/a|c/a). Proof. intros a b c Ha Hb (c',Hc). exists c'. now rewrite <- divide_div_mul_exact, Hc. Qed. Lemma lcm_least : forall a b c, (a | c) -> (b | c) -> (lcm a b | c). Proof. intros a b c Ha Hb. unfold lcm. destruct (eq_decidable (gcd a b) 0) as [EQ|NEQ]. apply gcd_eq_0 in EQ. destruct EQ as (EQ,EQ'). rewrite EQ in *. now nzsimpl. assert (Ga := gcd_divide_l a b). assert (Gb := gcd_divide_r a b). set (g:=gcd a b) in *. assert (Ha' := divide_div g a c NEQ Ga Ha). assert (Hb' := divide_div g b c NEQ Gb Hb). destruct Ha' as (a',Ha'). rewrite Ha', mul_comm in Hb'. apply gauss in Hb'; [|apply gcd_div_gcd; unfold g; trivial using gcd_comm]. destruct Hb' as (b',Hb'). exists b'. rewrite mul_shuffle3, <- Hb'. rewrite (proj2 (div_exact c g NEQ)). rewrite Ha', mul_shuffle3, (mul_comm a a'). f_equiv. symmetry. apply div_exact; trivial. apply mod_divide; trivial. apply mod_divide; trivial. transitivity a; trivial. Qed. Lemma lcm_comm : forall a b, lcm a b == lcm b a. Proof. intros a b. unfold lcm. rewrite (gcd_comm b), (mul_comm b). now rewrite <- gcd_div_swap. Qed. Lemma lcm_divide_iff : forall n m p, (lcm n m | p) <-> (n | p) /\ (m | p). Proof. intros. split. split. transitivity (lcm n m); trivial using divide_lcm_l. transitivity (lcm n m); trivial using divide_lcm_r. intros (H,H'). now apply lcm_least. Qed. Lemma lcm_unique : forall n m p, 0<=p -> (n|p) -> (m|p) -> (forall q, (n|q) -> (m|q) -> (p|q)) -> lcm n m == p. Proof. intros n m p Hp Hn Hm H. apply divide_antisym; trivial. now apply lcm_least. apply H. apply divide_lcm_l. apply divide_lcm_r. Qed. Lemma lcm_unique_alt : forall n m p, 0<=p -> (forall q, (p|q) <-> (n|q) /\ (m|q)) -> lcm n m == p. Proof. intros n m p Hp H. apply lcm_unique; trivial. apply H, divide_refl. apply H, divide_refl. intros. apply H. now split. Qed. Lemma lcm_assoc : forall n m p, lcm n (lcm m p) == lcm (lcm n m) p. Proof. intros. apply lcm_unique_alt. apply le_0_l. intros. now rewrite !lcm_divide_iff, and_assoc. Qed. Lemma lcm_0_l : forall n, lcm 0 n == 0. Proof. intros. apply lcm_unique; trivial. order. apply divide_refl. apply divide_0_r. Qed. Lemma lcm_0_r : forall n, lcm n 0 == 0. Proof. intros. now rewrite lcm_comm, lcm_0_l. Qed. Lemma lcm_1_l : forall n, lcm 1 n == n. Proof. intros. apply lcm_unique; trivial using divide_1_l, le_0_l, divide_refl. Qed. Lemma lcm_1_r : forall n, lcm n 1 == n. Proof. intros. now rewrite lcm_comm, lcm_1_l. Qed. Lemma lcm_diag : forall n, lcm n n == n. Proof. intros. apply lcm_unique; trivial using divide_refl, le_0_l. Qed. Lemma lcm_eq_0 : forall n m, lcm n m == 0 <-> n == 0 \/ m == 0. Proof. intros. split. intros EQ. apply eq_mul_0. apply divide_0_l. rewrite <- EQ. apply lcm_least. apply divide_factor_l. apply divide_factor_r. destruct 1 as [EQ|EQ]; rewrite EQ. apply lcm_0_l. apply lcm_0_r. Qed. Lemma divide_lcm_eq_r : forall n m, (n|m) -> lcm n m == m. Proof. intros n m H. apply lcm_unique_alt; trivial using le_0_l. intros q. split. split; trivial. now transitivity m. now destruct 1. Qed. Lemma divide_lcm_iff : forall n m, (n|m) <-> lcm n m == m. Proof. intros n m. split. now apply divide_lcm_eq_r. intros EQ. rewrite <- EQ. apply divide_lcm_l. Qed. Lemma lcm_mul_mono_l : forall n m p, lcm (p * n) (p * m) == p * lcm n m. Proof. intros n m p. destruct (eq_decidable p 0) as [Hp|Hp]. rewrite Hp. nzsimpl. rewrite lcm_0_l. now nzsimpl. destruct (eq_decidable (gcd n m) 0) as [Hg|Hg]. apply gcd_eq_0 in Hg. destruct Hg as (Hn,Hm); rewrite Hn, Hm. nzsimpl. rewrite lcm_0_l. now nzsimpl. unfold lcm. rewrite gcd_mul_mono_l. rewrite mul_assoc. f_equiv. now rewrite div_mul_cancel_l. Qed. Lemma lcm_mul_mono_r : forall n m p, lcm (n * p) (m * p) == lcm n m * p. Proof. intros n m p. now rewrite !(mul_comm _ p), lcm_mul_mono_l, mul_comm. Qed. Lemma gcd_1_lcm_mul : forall n m, n~=0 -> m~=0 -> (gcd n m == 1 <-> lcm n m == n*m). Proof. intros n m Hn Hm. split; intros H. unfold lcm. rewrite H. now rewrite div_1_r. unfold lcm in *. apply mul_cancel_l in H; trivial. assert (Hg : gcd n m ~= 0) by (red; rewrite gcd_eq_0; destruct 1; order). assert (H' := gcd_divide_r n m). apply mod_divide in H'; trivial. apply div_exact in H'; trivial. rewrite H in H'. rewrite <- (mul_1_l m) in H' at 1. now apply mul_cancel_r in H'. Qed. End NLcmProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NOrder.v0000640000175000001440000001522212010532755021617 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 <= n < m). apply lt_wf. intros x y; split. intro H; split; [apply le_0_l | assumption]. now intros [_ H]. Defined. (* "le_0_l : forall n : N, 0 <= n" was proved in NBase.v *) Theorem nlt_0_r : forall n, ~ n < 0. Proof. intro n; apply le_ngt. apply le_0_l. Qed. Theorem nle_succ_0 : forall n, ~ (S n <= 0). Proof. intros n H; apply le_succ_l in H; false_hyp H nlt_0_r. Qed. Theorem le_0_r : forall n, n <= 0 <-> n == 0. Proof. intros n; split; intro H. le_elim H; [false_hyp H nlt_0_r | assumption]. now apply eq_le_incl. Qed. Theorem lt_0_succ : forall n, 0 < S n. Proof. induct n; [apply lt_succ_diag_r | intros n H; now apply lt_lt_succ_r]. Qed. Theorem neq_0_lt_0 : forall n, n ~= 0 <-> 0 < n. Proof. cases n. split; intro H; [now elim H | intro; now apply lt_irrefl with 0]. intro n; split; intro H; [apply lt_0_succ | apply neq_succ_0]. Qed. Theorem eq_0_gt_0_cases : forall n, n == 0 \/ 0 < n. Proof. cases n. now left. intro; right; apply lt_0_succ. Qed. Theorem zero_one : forall n, n == 0 \/ n == 1 \/ 1 < n. Proof. setoid_rewrite one_succ. induct n. now left. cases n. intros; right; now left. intros n IH. destruct IH as [H | [H | H]]. false_hyp H neq_succ_0. right; right. rewrite H. apply lt_succ_diag_r. right; right. now apply lt_lt_succ_r. Qed. Theorem lt_1_r : forall n, n < 1 <-> n == 0. Proof. setoid_rewrite one_succ. cases n. split; intro; [reflexivity | apply lt_succ_diag_r]. intros n. rewrite <- succ_lt_mono. split; intro H; [false_hyp H nlt_0_r | false_hyp H neq_succ_0]. Qed. Theorem le_1_r : forall n, n <= 1 <-> n == 0 \/ n == 1. Proof. setoid_rewrite one_succ. cases n. split; intro; [now left | apply le_succ_diag_r]. intro n. rewrite <- succ_le_mono, le_0_r, succ_inj_wd. split; [intro; now right | intros [H | H]; [false_hyp H neq_succ_0 | assumption]]. Qed. Theorem lt_lt_0 : forall n m, n < m -> 0 < m. Proof. intros n m; induct n. trivial. intros n IH H. apply IH; now apply lt_succ_l. Qed. Theorem lt_1_l' : forall n m p, n < m -> m < p -> 1 < p. Proof. intros. apply lt_1_l with m; auto. apply le_lt_trans with n; auto. now apply le_0_l. Qed. (** Elimination principlies for < and <= for relations *) Section RelElim. Variable R : relation N.t. Hypothesis R_wd : Proper (N.eq==>N.eq==>iff) R. Theorem le_ind_rel : (forall m, R 0 m) -> (forall n m, n <= m -> R n m -> R (S n) (S m)) -> forall n m, n <= m -> R n m. Proof. intros Base Step; induct n. intros; apply Base. intros n IH m H. elim H using le_ind. solve_proper. apply Step; [| apply IH]; now apply eq_le_incl. intros k H1 H2. apply le_succ_l in H1. apply lt_le_incl in H1. auto. Qed. Theorem lt_ind_rel : (forall m, R 0 (S m)) -> (forall n m, n < m -> R n m -> R (S n) (S m)) -> forall n m, n < m -> R n m. Proof. intros Base Step; induct n. intros m H. apply lt_exists_pred in H; destruct H as [m' [H _]]. rewrite H; apply Base. intros n IH m H. elim H using lt_ind. solve_proper. apply Step; [| apply IH]; now apply lt_succ_diag_r. intros k H1 H2. apply lt_succ_l in H1. auto. Qed. End RelElim. (** Predecessor and order *) Theorem succ_pred_pos : forall n, 0 < n -> S (P n) == n. Proof. intros n H; apply succ_pred; intro H1; rewrite H1 in H. false_hyp H lt_irrefl. Qed. Theorem le_pred_l : forall n, P n <= n. Proof. cases n. rewrite pred_0; now apply eq_le_incl. intros; rewrite pred_succ; apply le_succ_diag_r. Qed. Theorem lt_pred_l : forall n, n ~= 0 -> P n < n. Proof. cases n. intro H; exfalso; now apply H. intros; rewrite pred_succ; apply lt_succ_diag_r. Qed. Theorem le_le_pred : forall n m, n <= m -> P n <= m. Proof. intros n m H; apply le_trans with n. apply le_pred_l. assumption. Qed. Theorem lt_lt_pred : forall n m, n < m -> P n < m. Proof. intros n m H; apply le_lt_trans with n. apply le_pred_l. assumption. Qed. Theorem lt_le_pred : forall n m, n < m -> n <= P m. (* Converse is false for n == m == 0 *) Proof. intro n; cases m. intro H; false_hyp H nlt_0_r. intros m IH. rewrite pred_succ; now apply lt_succ_r. Qed. Theorem lt_pred_le : forall n m, P n < m -> n <= m. (* Converse is false for n == m == 0 *) Proof. intros n m; cases n. rewrite pred_0; intro H; now apply lt_le_incl. intros n IH. rewrite pred_succ in IH. now apply le_succ_l. Qed. Theorem lt_pred_lt : forall n m, n < P m -> n < m. Proof. intros n m H; apply lt_le_trans with (P m); [assumption | apply le_pred_l]. Qed. Theorem le_pred_le : forall n m, n <= P m -> n <= m. Proof. intros n m H; apply le_trans with (P m); [assumption | apply le_pred_l]. Qed. Theorem pred_le_mono : forall n m, n <= m -> P n <= P m. (* Converse is false for n == 1, m == 0 *) Proof. intros n m H; elim H using le_ind_rel. solve_proper. intro; rewrite pred_0; apply le_0_l. intros p q H1 _; now do 2 rewrite pred_succ. Qed. Theorem pred_lt_mono : forall n m, n ~= 0 -> (n < m <-> P n < P m). Proof. intros n m H1; split; intro H2. assert (m ~= 0). apply neq_0_lt_0. now apply lt_lt_0 with n. now rewrite <- (succ_pred n) in H2; rewrite <- (succ_pred m) in H2 ; [apply succ_lt_mono | | |]. assert (m ~= 0). apply neq_0_lt_0. apply lt_lt_0 with (P n). apply lt_le_trans with (P m). assumption. apply le_pred_l. apply succ_lt_mono in H2. now do 2 rewrite succ_pred in H2. Qed. Theorem lt_succ_lt_pred : forall n m, S n < m <-> n < P m. Proof. intros n m. rewrite pred_lt_mono by apply neq_succ_0. now rewrite pred_succ. Qed. Theorem le_succ_le_pred : forall n m, S n <= m -> n <= P m. (* Converse is false for n == m == 0 *) Proof. intros n m H. apply lt_le_pred. now apply le_succ_l. Qed. Theorem lt_pred_lt_succ : forall n m, P n < m -> n < S m. (* Converse is false for n == m == 0 *) Proof. intros n m H. apply lt_succ_r. now apply lt_pred_le. Qed. Theorem le_pred_le_succ : forall n m, P n <= m <-> n <= S m. Proof. intros n m; cases n. rewrite pred_0. split; intro H; apply le_0_l. intro n. rewrite pred_succ. apply succ_le_mono. Qed. End NOrderProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NDefOps.v0000640000175000001440000002665212010532755021735 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* b) n. Arguments if_zero [A] a b n. Instance if_zero_wd (A : Type) : Proper (Logic.eq ==> Logic.eq ==> N.eq ==> Logic.eq) (@if_zero A). Proof. unfold if_zero. (* TODO : solve_proper : SLOW + BUG *) f_equiv'. Qed. Theorem if_zero_0 : forall (A : Type) (a b : A), if_zero a b 0 = a. Proof. unfold if_zero; intros; now rewrite recursion_0. Qed. Theorem if_zero_succ : forall (A : Type) (a b : A) (n : N.t), if_zero a b (S n) = b. Proof. intros; unfold if_zero. now rewrite recursion_succ. Qed. (*****************************************************) (** Addition *) Definition def_add (x y : N.t) := recursion y (fun _ => S) x. Local Infix "+++" := def_add (at level 50, left associativity). Instance def_add_wd : Proper (N.eq ==> N.eq ==> N.eq) def_add. Proof. unfold def_add. f_equiv'. Qed. Theorem def_add_0_l : forall y, 0 +++ y == y. Proof. intro y. unfold def_add. now rewrite recursion_0. Qed. Theorem def_add_succ_l : forall x y, S x +++ y == S (x +++ y). Proof. intros x y; unfold def_add. rewrite recursion_succ; f_equiv'. Qed. Theorem def_add_add : forall n m, n +++ m == n + m. Proof. intros n m; induct n. now rewrite def_add_0_l, add_0_l. intros n H. now rewrite def_add_succ_l, add_succ_l, H. Qed. (*****************************************************) (** Multiplication *) Definition def_mul (x y : N.t) := recursion 0 (fun _ p => p +++ x) y. Local Infix "**" := def_mul (at level 40, left associativity). Instance def_mul_wd : Proper (N.eq ==> N.eq ==> N.eq) def_mul. Proof. unfold def_mul. (* TODO : solve_proper SLOW + BUG *) f_equiv'. Qed. Theorem def_mul_0_r : forall x, x ** 0 == 0. Proof. intro. unfold def_mul. now rewrite recursion_0. Qed. Theorem def_mul_succ_r : forall x y, x ** S y == x ** y +++ x. Proof. intros x y; unfold def_mul. rewrite recursion_succ; auto with *. f_equiv'. Qed. Theorem def_mul_mul : forall n m, n ** m == n * m. Proof. intros n m; induct m. now rewrite def_mul_0_r, mul_0_r. intros m IH; now rewrite def_mul_succ_r, mul_succ_r, def_add_add, IH. Qed. (*****************************************************) (** Order *) Definition ltb (m : N.t) : N.t -> bool := recursion (if_zero false true) (fun _ f n => recursion false (fun n' _ => f n') n) m. Local Infix "<<" := ltb (at level 70, no associativity). Instance ltb_wd : Proper (N.eq ==> N.eq ==> Logic.eq) ltb. Proof. unfold ltb. f_equiv'. Qed. Theorem ltb_base : forall n, 0 << n = if_zero false true n. Proof. intro n; unfold ltb; now rewrite recursion_0. Qed. Theorem ltb_step : forall m n, S m << n = recursion false (fun n' _ => m << n') n. Proof. intros m n; unfold ltb at 1. f_equiv. rewrite recursion_succ; f_equiv'. reflexivity. Qed. (* Above, we rewrite applications of function. Is it possible to rewrite functions themselves, i.e., rewrite (recursion lt_base lt_step (S n)) to lt_step n (recursion lt_base lt_step n)? *) Theorem ltb_0 : forall n, n << 0 = false. Proof. cases n. rewrite ltb_base; now rewrite if_zero_0. intro n; rewrite ltb_step. now rewrite recursion_0. Qed. Theorem ltb_0_succ : forall n, 0 << S n = true. Proof. intro n; rewrite ltb_base; now rewrite if_zero_succ. Qed. Theorem succ_ltb_mono : forall n m, (S n << S m) = (n << m). Proof. intros n m. rewrite ltb_step. rewrite recursion_succ; f_equiv'. Qed. Theorem ltb_lt : forall n m, n << m = true <-> n < m. Proof. double_induct n m. cases m. rewrite ltb_0. split; intro H; [discriminate H | false_hyp H nlt_0_r]. intro n. rewrite ltb_0_succ. split; intro; [apply lt_0_succ | reflexivity]. intro n. rewrite ltb_0. split; intro H; [discriminate | false_hyp H nlt_0_r]. intros n m. rewrite succ_ltb_mono. now rewrite <- succ_lt_mono. Qed. Theorem ltb_ge : forall n m, n << m = false <-> n >= m. Proof. intros. rewrite <- not_true_iff_false, ltb_lt. apply nlt_ge. Qed. (*****************************************************) (** Even *) Definition even (x : N.t) := recursion true (fun _ p => negb p) x. Instance even_wd : Proper (N.eq==>Logic.eq) even. Proof. unfold even. f_equiv'. Qed. Theorem even_0 : even 0 = true. Proof. unfold even. now rewrite recursion_0. Qed. Theorem even_succ : forall x, even (S x) = negb (even x). Proof. unfold even. intro x; rewrite recursion_succ; f_equiv'. Qed. (*****************************************************) (** Division by 2 *) Definition half_aux (x : N.t) : N.t * N.t := recursion (0, 0) (fun _ p => let (x1, x2) := p in (S x2, x1)) x. Definition half (x : N.t) := snd (half_aux x). Instance half_aux_wd : Proper (N.eq ==> N.eq*N.eq) half_aux. Proof. intros x x' Hx. unfold half_aux. f_equiv; trivial. intros y y' Hy (u,v) (u',v') (Hu,Hv). compute in *. rewrite Hu, Hv; auto with *. Qed. Instance half_wd : Proper (N.eq==>N.eq) half. Proof. unfold half. f_equiv'. Qed. Lemma half_aux_0 : half_aux 0 = (0,0). Proof. unfold half_aux. rewrite recursion_0; auto. Qed. Lemma half_aux_succ : forall x, half_aux (S x) = (S (snd (half_aux x)), fst (half_aux x)). Proof. intros. remember (half_aux x) as h. destruct h as (f,s); simpl in *. unfold half_aux in *. rewrite recursion_succ, <- Heqh; simpl; f_equiv'. Qed. Theorem half_aux_spec : forall n, n == fst (half_aux n) + snd (half_aux n). Proof. apply induction. intros x x' Hx. setoid_rewrite Hx; auto with *. rewrite half_aux_0; simpl; rewrite add_0_l; auto with *. intros. rewrite half_aux_succ. simpl. rewrite add_succ_l, add_comm; auto. now f_equiv. Qed. Theorem half_aux_spec2 : forall n, fst (half_aux n) == snd (half_aux n) \/ fst (half_aux n) == S (snd (half_aux n)). Proof. apply induction. intros x x' Hx. setoid_rewrite Hx; auto with *. rewrite half_aux_0; simpl. auto with *. intros. rewrite half_aux_succ; simpl. destruct H; auto with *. right; now f_equiv. Qed. Theorem half_0 : half 0 == 0. Proof. unfold half. rewrite half_aux_0; simpl; auto with *. Qed. Theorem half_1 : half 1 == 0. Proof. unfold half. rewrite one_succ, half_aux_succ, half_aux_0; simpl; auto with *. Qed. Theorem half_double : forall n, n == 2 * half n \/ n == 1 + 2 * half n. Proof. intros. unfold half. nzsimpl'. destruct (half_aux_spec2 n) as [H|H]; [left|right]. rewrite <- H at 1. apply half_aux_spec. rewrite <- add_succ_l. rewrite <- H at 1. apply half_aux_spec. Qed. Theorem half_upper_bound : forall n, 2 * half n <= n. Proof. intros. destruct (half_double n) as [E|E]; rewrite E at 2. apply le_refl. nzsimpl. apply le_le_succ_r, le_refl. Qed. Theorem half_lower_bound : forall n, n <= 1 + 2 * half n. Proof. intros. destruct (half_double n) as [E|E]; rewrite E at 1. nzsimpl. apply le_le_succ_r, le_refl. apply le_refl. Qed. Theorem half_nz : forall n, 1 < n -> 0 < half n. Proof. intros n LT. assert (LE : 0 <= half n) by apply le_0_l. le_elim LE; auto. destruct (half_double n) as [E|E]; rewrite <- LE, mul_0_r, ?add_0_r in E; rewrite E in LT. order'. order. Qed. Theorem half_decrease : forall n, 0 < n -> half n < n. Proof. intros n LT. destruct (half_double n) as [E|E]; rewrite E at 2; nzsimpl'. rewrite <- add_0_l at 1. rewrite <- add_lt_mono_r. assert (LE : 0 <= half n) by apply le_0_l. le_elim LE; auto. rewrite <- LE, mul_0_r in E. rewrite E in LT. destruct (nlt_0_r _ LT). rewrite <- add_succ_l. rewrite <- add_0_l at 1. rewrite <- add_lt_mono_r. apply lt_0_succ. Qed. (*****************************************************) (** Power *) Definition pow (n m : N.t) := recursion 1 (fun _ r => n*r) m. Local Infix "^^" := pow (at level 30, right associativity). Instance pow_wd : Proper (N.eq==>N.eq==>N.eq) pow. Proof. unfold pow. f_equiv'. Qed. Lemma pow_0 : forall n, n^^0 == 1. Proof. intros. unfold pow. rewrite recursion_0. auto with *. Qed. Lemma pow_succ : forall n m, n^^(S m) == n*(n^^m). Proof. intros. unfold pow. rewrite recursion_succ; f_equiv'. Qed. (*****************************************************) (** Logarithm for the base 2 *) Definition log (x : N.t) : N.t := strong_rec 0 (fun g x => if x << 2 then 0 else S (g (half x))) x. Instance log_prewd : Proper ((N.eq==>N.eq)==>N.eq==>N.eq) (fun g x => if x<<2 then 0 else S (g (half x))). Proof. intros g g' Hg n n' Hn. rewrite Hn. destruct (n' << 2); auto with *. f_equiv. apply Hg. now f_equiv. Qed. Instance log_wd : Proper (N.eq==>N.eq) log. Proof. intros x x' Exx'. unfold log. apply strong_rec_wd; f_equiv'. Qed. Lemma log_good_step : forall n h1 h2, (forall m, m < n -> h1 m == h2 m) -> (if n << 2 then 0 else S (h1 (half n))) == (if n << 2 then 0 else S (h2 (half n))). Proof. intros n h1 h2 E. destruct (n<<2) eqn:H. auto with *. f_equiv. apply E, half_decrease. rewrite two_succ, <- not_true_iff_false, ltb_lt, nlt_ge, le_succ_l in H. order'. Qed. Hint Resolve log_good_step. Theorem log_init : forall n, n < 2 -> log n == 0. Proof. intros n Hn. unfold log. rewrite strong_rec_fixpoint; auto with *. replace (n << 2) with true; auto with *. symmetry. now rewrite ltb_lt. Qed. Theorem log_step : forall n, 2 <= n -> log n == S (log (half n)). Proof. intros n Hn. unfold log. rewrite strong_rec_fixpoint; auto with *. replace (n << 2) with false; auto with *. symmetry. rewrite <- not_true_iff_false, ltb_lt, nlt_ge; auto. Qed. Theorem pow2_log : forall n, 0 < n -> half n < 2^^(log n) <= n. Proof. intro n; generalize (le_refl n). set (k:=n) at -2. clearbody k. revert k. pattern n. apply induction; clear n. intros n n' Hn; setoid_rewrite Hn; auto with *. intros k Hk1 Hk2. le_elim Hk1. destruct (nlt_0_r _ Hk1). rewrite Hk1 in Hk2. destruct (nlt_0_r _ Hk2). intros n IH k Hk1 Hk2. destruct (lt_ge_cases k 2) as [LT|LE]. (* base *) rewrite log_init, pow_0 by auto. rewrite <- le_succ_l, <- one_succ in Hk2. le_elim Hk2. rewrite two_succ, <- nle_gt, le_succ_l in LT. destruct LT; auto. rewrite <- Hk2. rewrite half_1; auto using lt_0_1, le_refl. (* step *) rewrite log_step, pow_succ by auto. rewrite two_succ, le_succ_l in LE. destruct (IH (half k)) as (IH1,IH2). rewrite <- lt_succ_r. apply lt_le_trans with k; auto. now apply half_decrease. apply half_nz; auto. set (K:=2^^log (half k)) in *; clearbody K. split. rewrite <- le_succ_l in IH1. apply mul_le_mono_l with (p:=2) in IH1. eapply lt_le_trans; eauto. nzsimpl'. rewrite lt_succ_r. eapply le_trans; [ eapply half_lower_bound | ]. nzsimpl'; apply le_refl. eapply le_trans; [ | eapply half_upper_bound ]. apply mul_le_mono_l; auto. Qed. End NdefOpsProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NLog.v0000640000175000001440000000155512010532755021271 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* a mod b < b. Proof. intros. apply mod_bound_pos; auto'. Qed. (** Another formulation of the main equation *) Lemma mod_eq : forall a b, b~=0 -> a mod b == a - b*(a/b). Proof. intros. symmetry. apply add_sub_eq_l. symmetry. now apply div_mod. Qed. (** Uniqueness theorems *) Theorem div_mod_unique : forall b q1 q2 r1 r2, r1 r2 b*q1+r1 == b*q2+r2 -> q1 == q2 /\ r1 == r2. Proof. intros. apply div_mod_unique with b; auto'. Qed. Theorem div_unique: forall a b q r, r a == b*q + r -> q == a/b. Proof. intros; apply div_unique with r; auto'. Qed. Theorem mod_unique: forall a b q r, r a == b*q + r -> r == a mod b. Proof. intros. apply mod_unique with q; auto'. Qed. Theorem div_unique_exact: forall a b q, b~=0 -> a == b*q -> q == a/b. Proof. intros. apply div_unique_exact; auto'. Qed. (** A division by itself returns 1 *) Lemma div_same : forall a, a~=0 -> a/a == 1. Proof. intros. apply div_same; auto'. Qed. Lemma mod_same : forall a, a~=0 -> a mod a == 0. Proof. intros. apply mod_same; auto'. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem div_small: forall a b, a a/b == 0. Proof. intros. apply div_small; auto'. Qed. (** Same situation, in term of modulo: *) Theorem mod_small: forall a b, a a mod b == a. Proof. intros. apply mod_small; auto'. Qed. (** * Basic values of divisions and modulo. *) Lemma div_0_l: forall a, a~=0 -> 0/a == 0. Proof. intros. apply div_0_l; auto'. Qed. Lemma mod_0_l: forall a, a~=0 -> 0 mod a == 0. Proof. intros. apply mod_0_l; auto'. Qed. Lemma div_1_r: forall a, a/1 == a. Proof. intros. apply div_1_r; auto'. Qed. Lemma mod_1_r: forall a, a mod 1 == 0. Proof. intros. apply mod_1_r; auto'. Qed. Lemma div_1_l: forall a, 1 1/a == 0. Proof. exact div_1_l. Qed. Lemma mod_1_l: forall a, 1 1 mod a == 1. Proof. exact mod_1_l. Qed. Lemma div_mul : forall a b, b~=0 -> (a*b)/b == a. Proof. intros. apply div_mul; auto'. Qed. Lemma mod_mul : forall a b, b~=0 -> (a*b) mod b == 0. Proof. intros. apply mod_mul; auto'. Qed. (** * Order results about mod and div *) (** A modulo cannot grow beyond its starting point. *) Theorem mod_le: forall a b, b~=0 -> a mod b <= a. Proof. intros. apply mod_le; auto'. Qed. Lemma div_str_pos : forall a b, 0 0 < a/b. Proof. exact div_str_pos. Qed. Lemma div_small_iff : forall a b, b~=0 -> (a/b==0 <-> a (a mod b == a <-> a (0 b<=a). Proof. intros. apply div_str_pos_iff; auto'. Qed. (** As soon as the divisor is strictly greater than 1, the division is strictly decreasing. *) Lemma div_lt : forall a b, 0 1 a/b < a. Proof. exact div_lt. Qed. (** [le] is compatible with a positive division. *) Lemma div_le_mono : forall a b c, c~=0 -> a<=b -> a/c <= b/c. Proof. intros. apply div_le_mono; auto'. Qed. Lemma mul_div_le : forall a b, b~=0 -> b*(a/b) <= a. Proof. intros. apply mul_div_le; auto'. Qed. Lemma mul_succ_div_gt: forall a b, b~=0 -> a < b*(S (a/b)). Proof. intros; apply mul_succ_div_gt; auto'. Qed. (** The previous inequality is exact iff the modulo is zero. *) Lemma div_exact : forall a b, b~=0 -> (a == b*(a/b) <-> a mod b == 0). Proof. intros. apply div_exact; auto'. Qed. (** Some additionnal inequalities about div. *) Theorem div_lt_upper_bound: forall a b q, b~=0 -> a < b*q -> a/b < q. Proof. intros. apply div_lt_upper_bound; auto'. Qed. Theorem div_le_upper_bound: forall a b q, b~=0 -> a <= b*q -> a/b <= q. Proof. intros; apply div_le_upper_bound; auto'. Qed. Theorem div_le_lower_bound: forall a b q, b~=0 -> b*q <= a -> q <= a/b. Proof. intros; apply div_le_lower_bound; auto'. Qed. (** A division respects opposite monotonicity for the divisor *) Lemma div_le_compat_l: forall p q r, 0 p/r <= p/q. Proof. intros. apply div_le_compat_l. auto'. auto. Qed. (** * Relations between usual operations and mod and div *) Lemma mod_add : forall a b c, c~=0 -> (a + b * c) mod c == a mod c. Proof. intros. apply mod_add; auto'. Qed. Lemma div_add : forall a b c, c~=0 -> (a + b * c) / c == a / c + b. Proof. intros. apply div_add; auto'. Qed. Lemma div_add_l: forall a b c, b~=0 -> (a * b + c) / b == a + c / b. Proof. intros. apply div_add_l; auto'. Qed. (** Cancellations. *) Lemma div_mul_cancel_r : forall a b c, b~=0 -> c~=0 -> (a*c)/(b*c) == a/b. Proof. intros. apply div_mul_cancel_r; auto'. Qed. Lemma div_mul_cancel_l : forall a b c, b~=0 -> c~=0 -> (c*a)/(c*b) == a/b. Proof. intros. apply div_mul_cancel_l; auto'. Qed. Lemma mul_mod_distr_r: forall a b c, b~=0 -> c~=0 -> (a*c) mod (b*c) == (a mod b) * c. Proof. intros. apply mul_mod_distr_r; auto'. Qed. Lemma mul_mod_distr_l: forall a b c, b~=0 -> c~=0 -> (c*a) mod (c*b) == c * (a mod b). Proof. intros. apply mul_mod_distr_l; auto'. Qed. (** Operations modulo. *) Theorem mod_mod: forall a n, n~=0 -> (a mod n) mod n == a mod n. Proof. intros. apply mod_mod; auto'. Qed. Lemma mul_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)*b) mod n == (a*b) mod n. Proof. intros. apply mul_mod_idemp_l; auto'. Qed. Lemma mul_mod_idemp_r : forall a b n, n~=0 -> (a*(b mod n)) mod n == (a*b) mod n. Proof. intros. apply mul_mod_idemp_r; auto'. Qed. Theorem mul_mod: forall a b n, n~=0 -> (a * b) mod n == ((a mod n) * (b mod n)) mod n. Proof. intros. apply mul_mod; auto'. Qed. Lemma add_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)+b) mod n == (a+b) mod n. Proof. intros. apply add_mod_idemp_l; auto'. Qed. Lemma add_mod_idemp_r : forall a b n, n~=0 -> (a+(b mod n)) mod n == (a+b) mod n. Proof. intros. apply add_mod_idemp_r; auto'. Qed. Theorem add_mod: forall a b n, n~=0 -> (a+b) mod n == (a mod n + b mod n) mod n. Proof. intros. apply add_mod; auto'. Qed. Lemma div_div : forall a b c, b~=0 -> c~=0 -> (a/b)/c == a/(b*c). Proof. intros. apply div_div; auto'. Qed. Lemma mod_mul_r : forall a b c, b~=0 -> c~=0 -> a mod (b*c) == a mod b + b*((a/b) mod c). Proof. intros. apply mod_mul_r; auto'. Qed. (** A last inequality: *) Theorem div_mul_le: forall a b c, b~=0 -> c*(a/b) <= (c*a)/b. Proof. intros. apply div_mul_le; auto'. Qed. (** mod is related to divisibility *) Lemma mod_divides : forall a b, b~=0 -> (a mod b == 0 <-> exists c, a == b*c). Proof. intros. apply mod_divides; auto'. Qed. End NDivProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NAddOrder.v0000640000175000001440000000301212010532755022222 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n < m + p. Proof. intros n m p H; rewrite <- (add_0_r n). apply add_lt_le_mono; [assumption | apply le_0_l]. Qed. Theorem lt_lt_add_l : forall n m p, n < m -> n < p + m. Proof. intros n m p; rewrite add_comm; apply lt_lt_add_r. Qed. Theorem add_pos_l : forall n m, 0 < n -> 0 < n + m. Proof. intros; apply add_pos_nonneg. assumption. apply le_0_l. Qed. Theorem add_pos_r : forall n m, 0 < m -> 0 < n + m. Proof. intros; apply add_nonneg_pos. apply le_0_l. assumption. Qed. End NAddOrderProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NAdd.v0000640000175000001440000000531712010532755021240 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n == 0 /\ m == 0. Proof. intros n m; induct n. nzsimpl; intuition. intros n IH. nzsimpl. setoid_replace (S (n + m) == 0) with False by (apply neg_false; apply neq_succ_0). setoid_replace (S n == 0) with False by (apply neg_false; apply neq_succ_0). tauto. Qed. Theorem eq_add_succ : forall n m, (exists p, n + m == S p) <-> (exists n', n == S n') \/ (exists m', m == S m'). Proof. intros n m; cases n. split; intro H. destruct H as [p H]. rewrite add_0_l in H; right; now exists p. destruct H as [[n' H] | [m' H]]. symmetry in H; false_hyp H neq_succ_0. exists m'; now rewrite add_0_l. intro n; split; intro H. left; now exists n. exists (n + m); now rewrite add_succ_l. Qed. Theorem eq_add_1 : forall n m, n + m == 1 -> n == 1 /\ m == 0 \/ n == 0 /\ m == 1. Proof. intros n m. rewrite one_succ. intro H. assert (H1 : exists p, n + m == S p) by now exists 0. apply eq_add_succ in H1. destruct H1 as [[n' H1] | [m' H1]]. left. rewrite H1 in H; rewrite add_succ_l in H; apply succ_inj in H. apply eq_add_0 in H. destruct H as [H2 H3]; rewrite H2 in H1; now split. right. rewrite H1 in H; rewrite add_succ_r in H; apply succ_inj in H. apply eq_add_0 in H. destruct H as [H2 H3]; rewrite H3 in H1; now split. Qed. Theorem succ_add_discr : forall n m, m ~= S (n + m). Proof. intro n; induct m. apply neq_sym. apply neq_succ_0. intros m IH H. apply succ_inj in H. rewrite add_succ_r in H. unfold not in IH; now apply IH. Qed. Theorem add_pred_l : forall n m, n ~= 0 -> P n + m == P (n + m). Proof. intros n m; cases n. intro H; now elim H. intros n IH; rewrite add_succ_l; now do 2 rewrite pred_succ. Qed. Theorem add_pred_r : forall n m, m ~= 0 -> n + P m == P (n + m). Proof. intros n m H; rewrite (add_comm n (P m)); rewrite (add_comm n m); now apply add_pred_l. Qed. End NAddProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NMaxMin.v0000640000175000001440000001000112010532755021723 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n * n < m * m. Proof. intros n m; split; intro; [apply square_lt_mono_nonneg | apply square_lt_simpl_nonneg]; try assumption; apply le_0_l. Qed. Theorem square_le_mono : forall n m, n <= m <-> n * n <= m * m. Proof. intros n m; split; intro; [apply square_le_mono_nonneg | apply square_le_simpl_nonneg]; try assumption; apply le_0_l. Qed. Theorem mul_le_mono_l : forall n m p, n <= m -> p * n <= p * m. Proof. intros; apply mul_le_mono_nonneg_l. apply le_0_l. assumption. Qed. Theorem mul_le_mono_r : forall n m p, n <= m -> n * p <= m * p. Proof. intros; apply mul_le_mono_nonneg_r. apply le_0_l. assumption. Qed. Theorem mul_lt_mono : forall n m p q, n < m -> p < q -> n * p < m * q. Proof. intros; apply mul_lt_mono_nonneg; try assumption; apply le_0_l. Qed. Theorem mul_le_mono : forall n m p q, n <= m -> p <= q -> n * p <= m * q. Proof. intros; apply mul_le_mono_nonneg; try assumption; apply le_0_l. Qed. Theorem lt_0_mul' : forall n m, n * m > 0 <-> n > 0 /\ m > 0. Proof. intros n m; split; [intro H | intros [H1 H2]]. apply lt_0_mul in H. destruct H as [[H1 H2] | [H1 H2]]. now split. false_hyp H1 nlt_0_r. now apply mul_pos_pos. Qed. Notation mul_pos := lt_0_mul' (only parsing). Theorem eq_mul_1 : forall n m, n * m == 1 <-> n == 1 /\ m == 1. Proof. intros n m. split; [| intros [H1 H2]; now rewrite H1, H2, mul_1_l]. intro H; destruct (lt_trichotomy n 1) as [H1 | [H1 | H1]]. apply lt_1_r in H1. rewrite H1, mul_0_l in H. order'. rewrite H1, mul_1_l in H; now split. destruct (eq_0_gt_0_cases m) as [H2 | H2]. rewrite H2, mul_0_r in H. order'. apply (mul_lt_mono_pos_r m) in H1; [| assumption]. rewrite mul_1_l in H1. assert (H3 : 1 < n * m) by now apply (lt_1_l m). rewrite H in H3; false_hyp H3 lt_irrefl. Qed. (** Alternative name : *) Definition mul_eq_1 := eq_mul_1. End NMulOrderProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NSqrt.v0000640000175000001440000000456312010532755021503 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* √a == b := sqrt_unique. Lemma sqrt_square : forall a, √(a*a) == a. Proof. wrap sqrt_square. Qed. Definition sqrt_le_mono : forall a b, a<=b -> √a <= √b := sqrt_le_mono. Definition sqrt_lt_cancel : forall a b, √a < √b -> a < b := sqrt_lt_cancel. Lemma sqrt_le_square : forall a b, b*b<=a <-> b <= √a. Proof. wrap sqrt_le_square. Qed. Lemma sqrt_lt_square : forall a b, a √a < b. Proof. wrap sqrt_lt_square. Qed. Definition sqrt_0 := sqrt_0. Definition sqrt_1 := sqrt_1. Definition sqrt_2 := sqrt_2. Definition sqrt_lt_lin : forall a, 1 √a m -> n - m ~= 0. Proof. intros n m H; elim H using lt_ind_rel; clear n m H. solve_proper. intro; rewrite sub_0_r; apply neq_succ_0. intros; now rewrite sub_succ. Qed. Theorem add_sub_assoc : forall n m p, p <= m -> n + (m - p) == (n + m) - p. Proof. intros n m p; induct p. intro; now do 2 rewrite sub_0_r. intros p IH H. do 2 rewrite sub_succ_r. rewrite <- IH by (apply lt_le_incl; now apply le_succ_l). rewrite add_pred_r by (apply sub_gt; now apply le_succ_l). reflexivity. Qed. Theorem sub_succ_l : forall n m, n <= m -> S m - n == S (m - n). Proof. intros n m H. rewrite <- (add_1_l m). rewrite <- (add_1_l (m - n)). symmetry; now apply add_sub_assoc. Qed. Theorem add_sub : forall n m, (n + m) - m == n. Proof. intros n m. rewrite <- add_sub_assoc by (apply le_refl). rewrite sub_diag; now rewrite add_0_r. Qed. Theorem sub_add : forall n m, n <= m -> (m - n) + n == m. Proof. intros n m H. rewrite add_comm. rewrite add_sub_assoc by assumption. rewrite add_comm. apply add_sub. Qed. Theorem add_sub_eq_l : forall n m p, m + p == n -> n - m == p. Proof. intros n m p H. symmetry. assert (H1 : m + p - m == n - m) by now rewrite H. rewrite add_comm in H1. now rewrite add_sub in H1. Qed. Theorem add_sub_eq_r : forall n m p, m + p == n -> n - p == m. Proof. intros n m p H; rewrite add_comm in H; now apply add_sub_eq_l. Qed. (* This could be proved by adding m to both sides. Then the proof would use add_sub_assoc and sub_0_le, which is proven below. *) Theorem add_sub_eq_nz : forall n m p, p ~= 0 -> n - m == p -> m + p == n. Proof. intros n m p H; double_induct n m. intros m H1; rewrite sub_0_l in H1. symmetry in H1; false_hyp H1 H. intro n; rewrite sub_0_r; now rewrite add_0_l. intros n m IH H1. rewrite sub_succ in H1. apply IH in H1. rewrite add_succ_l; now rewrite H1. Qed. Theorem sub_add_distr : forall n m p, n - (m + p) == (n - m) - p. Proof. intros n m; induct p. rewrite add_0_r; now rewrite sub_0_r. intros p IH. rewrite add_succ_r; do 2 rewrite sub_succ_r. now rewrite IH. Qed. Theorem add_sub_swap : forall n m p, p <= n -> n + m - p == n - p + m. Proof. intros n m p H. rewrite (add_comm n m). rewrite <- add_sub_assoc by assumption. now rewrite (add_comm m (n - p)). Qed. (** Sub and order *) Theorem le_sub_l : forall n m, n - m <= n. Proof. intro n; induct m. rewrite sub_0_r; now apply eq_le_incl. intros m IH. rewrite sub_succ_r. apply le_trans with (n - m); [apply le_pred_l | assumption]. Qed. Theorem sub_0_le : forall n m, n - m == 0 <-> n <= m. Proof. double_induct n m. intro m; split; intro; [apply le_0_l | apply sub_0_l]. intro m; rewrite sub_0_r; split; intro H; [false_hyp H neq_succ_0 | false_hyp H nle_succ_0]. intros n m H. rewrite <- succ_le_mono. now rewrite sub_succ. Qed. Theorem sub_add_le : forall n m, n <= n - m + m. Proof. intros. destruct (le_ge_cases n m) as [LE|GE]. rewrite <- sub_0_le in LE. rewrite LE; nzsimpl. now rewrite <- sub_0_le. rewrite sub_add by assumption. apply le_refl. Qed. Theorem le_sub_le_add_r : forall n m p, n - p <= m <-> n <= m + p. Proof. intros n m p. split; intros LE. rewrite (add_le_mono_r _ _ p) in LE. apply le_trans with (n-p+p); auto using sub_add_le. destruct (le_ge_cases n p) as [LE'|GE]. rewrite <- sub_0_le in LE'. rewrite LE'. apply le_0_l. rewrite (add_le_mono_r _ _ p). now rewrite sub_add. Qed. Theorem le_sub_le_add_l : forall n m p, n - m <= p <-> n <= m + p. Proof. intros n m p. rewrite add_comm; apply le_sub_le_add_r. Qed. Theorem lt_sub_lt_add_r : forall n m p, n - p < m -> n < m + p. Proof. intros n m p LT. rewrite (add_lt_mono_r _ _ p) in LT. apply le_lt_trans with (n-p+p); auto using sub_add_le. Qed. (** Unfortunately, we do not have [n < m + p -> n - p < m]. For instance [1<0+2] but not [1-2<0]. *) Theorem lt_sub_lt_add_l : forall n m p, n - m < p -> n < m + p. Proof. intros n m p. rewrite add_comm; apply lt_sub_lt_add_r. Qed. Theorem le_add_le_sub_r : forall n m p, n + p <= m -> n <= m - p. Proof. intros n m p LE. apply (add_le_mono_r _ _ p). rewrite sub_add. assumption. apply le_trans with (n+p); trivial. rewrite <- (add_0_l p) at 1. rewrite <- add_le_mono_r. apply le_0_l. Qed. (** Unfortunately, we do not have [n <= m - p -> n + p <= m]. For instance [0<=1-2] but not [2+0<=1]. *) Theorem le_add_le_sub_l : forall n m p, n + p <= m -> p <= m - n. Proof. intros n m p. rewrite add_comm; apply le_add_le_sub_r. Qed. Theorem lt_add_lt_sub_r : forall n m p, n + p < m <-> n < m - p. Proof. intros n m p. destruct (le_ge_cases p m) as [LE|GE]. rewrite <- (sub_add p m) at 1 by assumption. now rewrite <- add_lt_mono_r. assert (GE' := GE). rewrite <- sub_0_le in GE'; rewrite GE'. split; intros LT. elim (lt_irrefl m). apply le_lt_trans with (n+p); trivial. rewrite <- (add_0_l m). apply add_le_mono. apply le_0_l. assumption. now elim (nlt_0_r n). Qed. Theorem lt_add_lt_sub_l : forall n m p, n + p < m <-> p < m - n. Proof. intros n m p. rewrite add_comm; apply lt_add_lt_sub_r. Qed. Theorem sub_lt : forall n m, m <= n -> 0 < m -> n - m < n. Proof. intros n m LE LT. assert (LE' := le_sub_l n m). rewrite lt_eq_cases in LE'. destruct LE' as [LT'|EQ]. assumption. apply add_sub_eq_nz in EQ; [|order]. rewrite (add_lt_mono_r _ _ n), add_0_l in LT. order. Qed. Lemma sub_le_mono_r : forall n m p, n <= m -> n-p <= m-p. Proof. intros. rewrite le_sub_le_add_r. transitivity m. assumption. apply sub_add_le. Qed. Lemma sub_le_mono_l : forall n m p, n <= m -> p-m <= p-n. Proof. intros. rewrite le_sub_le_add_r. transitivity (p-n+n); [ apply sub_add_le | now apply add_le_mono_l]. Qed. (** Sub and mul *) Theorem mul_pred_r : forall n m, n * (P m) == n * m - n. Proof. intros n m; cases m. now rewrite pred_0, mul_0_r, sub_0_l. intro m; rewrite pred_succ, mul_succ_r, <- add_sub_assoc. now rewrite sub_diag, add_0_r. now apply eq_le_incl. Qed. Theorem mul_sub_distr_r : forall n m p, (n - m) * p == n * p - m * p. Proof. intros n m p; induct n. now rewrite sub_0_l, mul_0_l, sub_0_l. intros n IH. destruct (le_gt_cases m n) as [H | H]. rewrite sub_succ_l by assumption. do 2 rewrite mul_succ_l. rewrite (add_comm ((n - m) * p) p), (add_comm (n * p) p). rewrite <- (add_sub_assoc p (n * p) (m * p)) by now apply mul_le_mono_r. now apply add_cancel_l. assert (H1 : S n <= m); [now apply le_succ_l |]. setoid_replace (S n - m) with 0 by now apply sub_0_le. setoid_replace ((S n * p) - m * p) with 0 by (apply sub_0_le; now apply mul_le_mono_r). apply mul_0_l. Qed. Theorem mul_sub_distr_l : forall n m p, p * (n - m) == p * n - p * m. Proof. intros n m p; rewrite (mul_comm p (n - m)), (mul_comm p n), (mul_comm p m). apply mul_sub_distr_r. Qed. (** Alternative definitions of [<=] and [<] based on [+] *) Definition le_alt n m := exists p, p + n == m. Definition lt_alt n m := exists p, S p + n == m. Lemma le_equiv : forall n m, le_alt n m <-> n <= m. Proof. split. intros (p,H). rewrite <- H, add_comm. apply le_add_r. intro H. exists (m-n). now apply sub_add. Qed. Lemma lt_equiv : forall n m, lt_alt n m <-> n < m. Proof. split. intros (p,H). rewrite <- H, add_succ_l, lt_succ_r, add_comm. apply le_add_r. intro H. exists (m-S n). rewrite add_succ_l, <- add_succ_r. apply sub_add. now rewrite le_succ_l. Qed. Instance le_alt_wd : Proper (eq==>eq==>iff) le_alt. Proof. intros x x' Hx y y' Hy; unfold le_alt. setoid_rewrite Hx. setoid_rewrite Hy. auto with *. Qed. Instance lt_alt_wd : Proper (eq==>eq==>iff) lt_alt. Proof. intros x x' Hx y y' Hy; unfold lt_alt. setoid_rewrite Hx. setoid_rewrite Hy. auto with *. Qed. (** With these alternative definition, the dichotomy: [forall n m, n <= m \/ m <= n] becomes: [forall n m, (exists p, p + n == m) \/ (exists p, p + m == n)] We will need this in the proof of induction principle for integers constructed as pairs of natural numbers. This formula can be proved from know properties of [<=]. However, it can also be done directly. *) Theorem le_alt_dichotomy : forall n m, le_alt n m \/ le_alt m n. Proof. intros n m; induct n. left; exists m; apply add_0_r. intros n IH. destruct IH as [[p H] | [p H]]. destruct (zero_or_succ p) as [H1 | [p' H1]]; rewrite H1 in H. rewrite add_0_l in H. right; exists (S 0); rewrite H, add_succ_l; now rewrite add_0_l. left; exists p'; rewrite add_succ_r; now rewrite add_succ_l in H. right; exists (S p). rewrite add_succ_l; now rewrite H. Qed. Theorem add_dichotomy : forall n m, (exists p, p + n == m) \/ (exists p, p + m == n). Proof. exact le_alt_dichotomy. Qed. End NSubProp. coq-8.4pl2/theories/Numbers/Natural/Abstract/NIso.v0000640000175000001440000000630112010532755021274 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* N2.t) : Prop := f N1.zero == N2.zero /\ forall n, f (N1.succ n) == N2.succ (f n). Definition natural_isomorphism : N1.t -> N2.t := N1.recursion N2.zero (fun (n : N1.t) (p : N2.t) => N2.succ p). Instance natural_isomorphism_wd : Proper (N1.eq ==> N2.eq) natural_isomorphism. Proof. unfold natural_isomorphism. repeat red; intros. f_equiv; trivial. repeat red; intros. now f_equiv. Qed. Theorem natural_isomorphism_0 : natural_isomorphism N1.zero == N2.zero. Proof. unfold natural_isomorphism; now rewrite N1.recursion_0. Qed. Theorem natural_isomorphism_succ : forall n : N1.t, natural_isomorphism (N1.succ n) == N2.succ (natural_isomorphism n). Proof. unfold natural_isomorphism. intro n. rewrite N1.recursion_succ; auto with *. repeat red; intros. now f_equiv. Qed. Theorem hom_nat_iso : homomorphism natural_isomorphism. Proof. unfold homomorphism, natural_isomorphism; split; [exact natural_isomorphism_0 | exact natural_isomorphism_succ]. Qed. End Homomorphism. Module Inverse (N1 N2 : NAxiomsRecSig). Module Import NBasePropMod1 := NBaseProp N1. (* This makes the tactic induct available. Since it is taken from (NBasePropFunct NAxiomsMod1), it refers to induction on N1. *) Module Hom12 := Homomorphism N1 N2. Module Hom21 := Homomorphism N2 N1. Local Notation h12 := Hom12.natural_isomorphism. Local Notation h21 := Hom21.natural_isomorphism. Local Notation "n == m" := (N1.eq n m) (at level 70, no associativity). Lemma inverse_nat_iso : forall n : N1.t, h21 (h12 n) == n. Proof. induct n. now rewrite Hom12.natural_isomorphism_0, Hom21.natural_isomorphism_0. intros n IH. now rewrite Hom12.natural_isomorphism_succ, Hom21.natural_isomorphism_succ, IH. Qed. End Inverse. Module Isomorphism (N1 N2 : NAxiomsRecSig). Module Hom12 := Homomorphism N1 N2. Module Hom21 := Homomorphism N2 N1. Module Inverse12 := Inverse N1 N2. Module Inverse21 := Inverse N2 N1. Local Notation h12 := Hom12.natural_isomorphism. Local Notation h21 := Hom21.natural_isomorphism. Definition isomorphism (f1 : N1.t -> N2.t) (f2 : N2.t -> N1.t) : Prop := Hom12.homomorphism f1 /\ Hom21.homomorphism f2 /\ forall n, N1.eq (f2 (f1 n)) n /\ forall n, N2.eq (f1 (f2 n)) n. Theorem iso_nat_iso : isomorphism h12 h21. Proof. unfold isomorphism. split. apply Hom12.hom_nat_iso. split. apply Hom21.hom_nat_iso. split. apply Inverse12.inverse_nat_iso. apply Inverse21.inverse_nat_iso. Qed. End Isomorphism. coq-8.4pl2/theories/Numbers/Natural/Abstract/NProperties.v0000640000175000001440000000152312010532755022677 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 | xO p' => N.succ (binposlog p') | xI p' => N.succ (binposlog p') end. Definition binlog (n : N) : N := match n with | 0 => 0 | Npos p => binposlog p end. Time Eval vm_compute in (binlog 500000). (* 0 sec *) Time Eval vm_compute in (binlog 1000000000000000000000000000000). (* 0 sec *) *) coq-8.4pl2/theories/Numbers/Natural/SpecViaZ/0000750000175000001440000000000012127276550020164 5ustar notinuserscoq-8.4pl2/theories/Numbers/Natural/SpecViaZ/NSigNAxioms.v0000640000175000001440000002702612010532755022513 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq) succ. Program Instance pred_wd : Proper (eq==>eq) pred. Program Instance add_wd : Proper (eq==>eq==>eq) add. Program Instance sub_wd : Proper (eq==>eq==>eq) sub. Program Instance mul_wd : Proper (eq==>eq==>eq) mul. Theorem pred_succ : forall n, pred (succ n) == n. Proof. intros. zify. omega_pos n. Qed. Theorem one_succ : 1 == succ 0. Proof. now zify. Qed. Theorem two_succ : 2 == succ 1. Proof. now zify. Qed. Definition N_of_Z z := of_N (Z.to_N z). Lemma spec_N_of_Z z : (0<=z)%Z -> [N_of_Z z] = z. Proof. unfold N_of_Z. zify. apply Z2N.id. Qed. Section Induction. Variable A : NN.t -> Prop. Hypothesis A_wd : Proper (eq==>iff) A. Hypothesis A0 : A 0. Hypothesis AS : forall n, A n <-> A (succ n). Let B (z : Z) := A (N_of_Z z). Lemma B0 : B 0. Proof. unfold B, N_of_Z; simpl. rewrite <- (A_wd 0); auto. red; rewrite spec_0, spec_of_N; auto. Qed. Lemma BS : forall z : Z, (0 <= z)%Z -> B z -> B (z + 1). Proof. intros z H1 H2. unfold B in *. apply -> AS in H2. setoid_replace (N_of_Z (z + 1)) with (succ (N_of_Z z)); auto. unfold eq. rewrite spec_succ, 2 spec_N_of_Z; auto with zarith. Qed. Lemma B_holds : forall z : Z, (0 <= z)%Z -> B z. Proof. exact (natlike_ind B B0 BS). Qed. Theorem bi_induction : forall n, A n. Proof. intro n. setoid_replace n with (N_of_Z (to_Z n)). apply B_holds. apply spec_pos. red. now rewrite spec_N_of_Z by apply spec_pos. Qed. End Induction. Theorem add_0_l : forall n, 0 + n == n. Proof. intros. zify. auto with zarith. Qed. Theorem add_succ_l : forall n m, (succ n) + m == succ (n + m). Proof. intros. zify. auto with zarith. Qed. Theorem sub_0_r : forall n, n - 0 == n. Proof. intros. zify. omega_pos n. Qed. Theorem sub_succ_r : forall n m, n - (succ m) == pred (n - m). Proof. intros. zify. omega with *. Qed. Theorem mul_0_l : forall n, 0 * n == 0. Proof. intros. zify. auto with zarith. Qed. Theorem mul_succ_l : forall n m, (succ n) * m == n * m + m. Proof. intros. zify. ring. Qed. (** Order *) Lemma eqb_eq x y : eqb x y = true <-> x == y. Proof. zify. apply Z.eqb_eq. Qed. Lemma leb_le x y : leb x y = true <-> x <= y. Proof. zify. apply Z.leb_le. Qed. Lemma ltb_lt x y : ltb x y = true <-> x < y. Proof. zify. apply Z.ltb_lt. Qed. Lemma compare_eq_iff n m : compare n m = Eq <-> n == m. Proof. intros. zify. apply Z.compare_eq_iff. Qed. Lemma compare_lt_iff n m : compare n m = Lt <-> n < m. Proof. intros. zify. reflexivity. Qed. Lemma compare_le_iff n m : compare n m <> Gt <-> n <= m. Proof. intros. zify. reflexivity. Qed. Lemma compare_antisym n m : compare m n = CompOpp (compare n m). Proof. intros. zify. apply Z.compare_antisym. Qed. Include BoolOrderFacts NN NN NN [no inline]. Instance compare_wd : Proper (eq ==> eq ==> Logic.eq) compare. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance eqb_wd : Proper (eq ==> eq ==> Logic.eq) eqb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance ltb_wd : Proper (eq ==> eq ==> Logic.eq) ltb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance leb_wd : Proper (eq ==> eq ==> Logic.eq) leb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance lt_wd : Proper (eq ==> eq ==> iff) lt. Proof. intros x x' Hx y y' Hy; unfold lt; rewrite Hx, Hy; intuition. Qed. Theorem lt_succ_r : forall n m, n < succ m <-> n <= m. Proof. intros. zify. omega. Qed. Theorem min_l : forall n m, n <= m -> min n m == n. Proof. intros n m. zify. omega with *. Qed. Theorem min_r : forall n m, m <= n -> min n m == m. Proof. intros n m. zify. omega with *. Qed. Theorem max_l : forall n m, m <= n -> max n m == n. Proof. intros n m. zify. omega with *. Qed. Theorem max_r : forall n m, n <= m -> max n m == m. Proof. intros n m. zify. omega with *. Qed. (** Properties specific to natural numbers, not integers. *) Theorem pred_0 : pred 0 == 0. Proof. zify. auto. Qed. (** Power *) Program Instance pow_wd : Proper (eq==>eq==>eq) pow. Lemma pow_0_r : forall a, a^0 == 1. Proof. intros. now zify. Qed. Lemma pow_succ_r : forall a b, 0<=b -> a^(succ b) == a * a^b. Proof. intros a b. zify. intros. now Z.nzsimpl. Qed. Lemma pow_neg_r : forall a b, b<0 -> a^b == 0. Proof. intros a b. zify. intro Hb. exfalso. omega_pos b. Qed. Lemma pow_pow_N : forall a b, a^b == pow_N a (to_N b). Proof. intros. zify. f_equal. now rewrite Z2N.id by apply spec_pos. Qed. Lemma pow_N_pow : forall a b, pow_N a b == a^(of_N b). Proof. intros. now zify. Qed. Lemma pow_pos_N : forall a p, pow_pos a p == pow_N a (Npos p). Proof. intros. now zify. Qed. (** Square *) Lemma square_spec n : square n == n * n. Proof. now zify. Qed. (** Sqrt *) Lemma sqrt_spec : forall n, 0<=n -> (sqrt n)*(sqrt n) <= n /\ n < (succ (sqrt n))*(succ (sqrt n)). Proof. intros n. zify. apply Z.sqrt_spec. Qed. Lemma sqrt_neg : forall n, n<0 -> sqrt n == 0. Proof. intros n. zify. intro H. exfalso. omega_pos n. Qed. (** Log2 *) Lemma log2_spec : forall n, 0 2^(log2 n) <= n /\ n < 2^(succ (log2 n)). Proof. intros n. zify. change (Z.log2 [n]+1)%Z with (Z.succ (Z.log2 [n])). apply Z.log2_spec. Qed. Lemma log2_nonpos : forall n, n<=0 -> log2 n == 0. Proof. intros n. zify. apply Z.log2_nonpos. Qed. (** Even / Odd *) Definition Even n := exists m, n == 2*m. Definition Odd n := exists m, n == 2*m+1. Lemma even_spec n : even n = true <-> Even n. Proof. unfold Even. zify. rewrite Z.even_spec. split; intros (m,Hm). - exists (N_of_Z m). zify. rewrite spec_N_of_Z; trivial. omega_pos n. - exists [m]. revert Hm; now zify. Qed. Lemma odd_spec n : odd n = true <-> Odd n. Proof. unfold Odd. zify. rewrite Z.odd_spec. split; intros (m,Hm). - exists (N_of_Z m). zify. rewrite spec_N_of_Z; trivial. omega_pos n. - exists [m]. revert Hm; now zify. Qed. (** Div / Mod *) Program Instance div_wd : Proper (eq==>eq==>eq) div. Program Instance mod_wd : Proper (eq==>eq==>eq) modulo. Theorem div_mod : forall a b, ~b==0 -> a == b*(div a b) + (modulo a b). Proof. intros a b. zify. intros. apply Z.div_mod; auto. Qed. Theorem mod_bound_pos : forall a b, 0<=a -> 0 0 <= modulo a b /\ modulo a b < b. Proof. intros a b. zify. apply Z.mod_bound_pos. Qed. (** Gcd *) Definition divide n m := exists p, m == p*n. Local Notation "( x | y )" := (divide x y) (at level 0). Lemma spec_divide : forall n m, (n|m) <-> Z.divide [n] [m]. Proof. intros n m. split. - intros (p,H). exists [p]. revert H; now zify. - intros (z,H). exists (of_N (Z.abs_N z)). zify. rewrite N2Z.inj_abs_N. rewrite <- (Z.abs_eq [m]), <- (Z.abs_eq [n]) by apply spec_pos. now rewrite H, Z.abs_mul. Qed. Lemma gcd_divide_l : forall n m, (gcd n m | n). Proof. intros n m. apply spec_divide. zify. apply Z.gcd_divide_l. Qed. Lemma gcd_divide_r : forall n m, (gcd n m | m). Proof. intros n m. apply spec_divide. zify. apply Z.gcd_divide_r. Qed. Lemma gcd_greatest : forall n m p, (p|n) -> (p|m) -> (p|gcd n m). Proof. intros n m p. rewrite !spec_divide. zify. apply Z.gcd_greatest. Qed. Lemma gcd_nonneg : forall n m, 0 <= gcd n m. Proof. intros. zify. apply Z.gcd_nonneg. Qed. (** Bitwise operations *) Program Instance testbit_wd : Proper (eq==>eq==>Logic.eq) testbit. Lemma testbit_odd_0 : forall a, testbit (2*a+1) 0 = true. Proof. intros. zify. apply Z.testbit_odd_0. Qed. Lemma testbit_even_0 : forall a, testbit (2*a) 0 = false. Proof. intros. zify. apply Z.testbit_even_0. Qed. Lemma testbit_odd_succ : forall a n, 0<=n -> testbit (2*a+1) (succ n) = testbit a n. Proof. intros a n. zify. apply Z.testbit_odd_succ. Qed. Lemma testbit_even_succ : forall a n, 0<=n -> testbit (2*a) (succ n) = testbit a n. Proof. intros a n. zify. apply Z.testbit_even_succ. Qed. Lemma testbit_neg_r : forall a n, n<0 -> testbit a n = false. Proof. intros a n. zify. apply Z.testbit_neg_r. Qed. Lemma shiftr_spec : forall a n m, 0<=m -> testbit (shiftr a n) m = testbit a (m+n). Proof. intros a n m. zify. apply Z.shiftr_spec. Qed. Lemma shiftl_spec_high : forall a n m, 0<=m -> n<=m -> testbit (shiftl a n) m = testbit a (m-n). Proof. intros a n m. zify. intros Hn H. rewrite Z.max_r by auto with zarith. now apply Z.shiftl_spec_high. Qed. Lemma shiftl_spec_low : forall a n m, m testbit (shiftl a n) m = false. Proof. intros a n m. zify. intros H. now apply Z.shiftl_spec_low. Qed. Lemma land_spec : forall a b n, testbit (land a b) n = testbit a n && testbit b n. Proof. intros a n m. zify. now apply Z.land_spec. Qed. Lemma lor_spec : forall a b n, testbit (lor a b) n = testbit a n || testbit b n. Proof. intros a n m. zify. now apply Z.lor_spec. Qed. Lemma ldiff_spec : forall a b n, testbit (ldiff a b) n = testbit a n && negb (testbit b n). Proof. intros a n m. zify. now apply Z.ldiff_spec. Qed. Lemma lxor_spec : forall a b n, testbit (lxor a b) n = xorb (testbit a n) (testbit b n). Proof. intros a n m. zify. now apply Z.lxor_spec. Qed. Lemma div2_spec : forall a, div2 a == shiftr a 1. Proof. intros a. zify. now apply Z.div2_spec. Qed. (** Recursion *) Definition recursion (A : Type) (a : A) (f : NN.t -> A -> A) (n : NN.t) := N.peano_rect (fun _ => A) a (fun n a => f (NN.of_N n) a) (NN.to_N n). Arguments recursion [A] a f n. Instance recursion_wd (A : Type) (Aeq : relation A) : Proper (Aeq ==> (eq==>Aeq==>Aeq) ==> eq ==> Aeq) (@recursion A). Proof. unfold eq. intros a a' Eaa' f f' Eff' x x' Exx'. unfold recursion. unfold NN.to_N. rewrite <- Exx'; clear x' Exx'. induction (Z.to_N [x]) using N.peano_ind. simpl; auto. rewrite 2 N.peano_rect_succ. now apply Eff'. Qed. Theorem recursion_0 : forall (A : Type) (a : A) (f : NN.t -> A -> A), recursion a f 0 = a. Proof. intros A a f; unfold recursion, NN.to_N; rewrite NN.spec_0; simpl; auto. Qed. Theorem recursion_succ : forall (A : Type) (Aeq : relation A) (a : A) (f : NN.t -> A -> A), Aeq a a -> Proper (eq==>Aeq==>Aeq) f -> forall n, Aeq (recursion a f (succ n)) (f n (recursion a f n)). Proof. unfold eq, recursion; intros A Aeq a f EAaa f_wd n. replace (to_N (succ n)) with (N.succ (to_N n)) by (zify; now rewrite <- Z2N.inj_succ by apply spec_pos). rewrite N.peano_rect_succ. apply f_wd; auto. zify. now rewrite Z2N.id by apply spec_pos. fold (recursion a f n). apply recursion_wd; auto. red; auto. Qed. End NTypeIsNAxioms. Module NType_NAxioms (NN : NType) <: NAxiomsSig <: OrderFunctions NN <: HasMinMax NN := NN <+ NTypeIsNAxioms. coq-8.4pl2/theories/Numbers/Natural/SpecViaZ/NSig.v0000640000175000001440000001102112010532755021200 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z. Local Notation "[ x ]" := (to_Z x). Parameter spec_pos: forall x, 0 <= [x]. Parameter of_N : N -> t. Parameter spec_of_N: forall x, to_Z (of_N x) = Z.of_N x. Definition to_N n := Z.to_N (to_Z n). Definition eq n m := [n] = [m]. Definition lt n m := [n] < [m]. Definition le n m := [n] <= [m]. Parameter compare : t -> t -> comparison. Parameter eqb : t -> t -> bool. Parameter ltb : t -> t -> bool. Parameter leb : t -> t -> bool. Parameter max : t -> t -> t. Parameter min : t -> t -> t. Parameter zero : t. Parameter one : t. Parameter two : t. Parameter succ : t -> t. Parameter pred : t -> t. Parameter add : t -> t -> t. Parameter sub : t -> t -> t. Parameter mul : t -> t -> t. Parameter square : t -> t. Parameter pow_pos : t -> positive -> t. Parameter pow_N : t -> N -> t. Parameter pow : t -> t -> t. Parameter sqrt : t -> t. Parameter log2 : t -> t. Parameter div_eucl : t -> t -> t * t. Parameter div : t -> t -> t. Parameter modulo : t -> t -> t. Parameter gcd : t -> t -> t. Parameter even : t -> bool. Parameter odd : t -> bool. Parameter testbit : t -> t -> bool. Parameter shiftr : t -> t -> t. Parameter shiftl : t -> t -> t. Parameter land : t -> t -> t. Parameter lor : t -> t -> t. Parameter ldiff : t -> t -> t. Parameter lxor : t -> t -> t. Parameter div2 : t -> t. Parameter spec_compare: forall x y, compare x y = ([x] ?= [y]). Parameter spec_eqb : forall x y, eqb x y = ([x] =? [y]). Parameter spec_ltb : forall x y, ltb x y = ([x] ZZ.t. Parameter spec_Z_of_N : forall n, ZZ.to_Z (Z_of_N n) = NN.to_Z n. Parameter Zabs_N : ZZ.t -> NN.t. Parameter spec_Zabs_N : forall z, NN.to_Z (Zabs_N z) = Z.abs (ZZ.to_Z z). End NType_ZType. Module Make (NN:NType)(ZZ:ZType)(Import NZ:NType_ZType NN ZZ) <: QType. (** The notation of a rational number is either an integer x, interpreted as itself or a pair (x,y) of an integer x and a natural number y interpreted as x/y. The pairs (x,0) and (0,y) are all interpreted as 0. *) Inductive t_ := | Qz : ZZ.t -> t_ | Qq : ZZ.t -> NN.t -> t_. Definition t := t_. Bind Scope abstract_scope with t t_. (** Specification with respect to [QArith] *) Local Open Scope Q_scope. Definition of_Z x: t := Qz (ZZ.of_Z x). Definition of_Q (q:Q) : t := let (x,y) := q in match y with | 1%positive => Qz (ZZ.of_Z x) | _ => Qq (ZZ.of_Z x) (NN.of_N (Npos y)) end. Definition to_Q (q: t) := match q with | Qz x => ZZ.to_Z x # 1 | Qq x y => if NN.eqb y NN.zero then 0 else ZZ.to_Z x # Z.to_pos (NN.to_Z y) end. Notation "[ x ]" := (to_Q x). Lemma N_to_Z_pos : forall x, (NN.to_Z x <> NN.to_Z NN.zero)%Z -> (0 < NN.to_Z x)%Z. Proof. intros x; rewrite NN.spec_0; generalize (NN.spec_pos x). romega. Qed. Ltac destr_zcompare := case Z.compare_spec; intros ?H. Ltac destr_eqb := match goal with | |- context [ZZ.eqb ?x ?y] => rewrite (ZZ.spec_eqb x y); case (Z.eqb_spec (ZZ.to_Z x) (ZZ.to_Z y)); destr_eqb | |- context [NN.eqb ?x ?y] => rewrite (NN.spec_eqb x y); case (Z.eqb_spec (NN.to_Z x) (NN.to_Z y)); [ | let H:=fresh "H" in try (intro H;generalize (N_to_Z_pos _ H); clear H)]; destr_eqb | _ => idtac end. Hint Rewrite Z.add_0_r Z.add_0_l Z.mul_0_r Z.mul_0_l Z.mul_1_r Z.mul_1_l ZZ.spec_0 NN.spec_0 ZZ.spec_1 NN.spec_1 ZZ.spec_m1 ZZ.spec_opp ZZ.spec_compare NN.spec_compare ZZ.spec_add NN.spec_add ZZ.spec_mul NN.spec_mul ZZ.spec_div NN.spec_div ZZ.spec_gcd NN.spec_gcd Z.gcd_abs_l Z.gcd_1_r spec_Z_of_N spec_Zabs_N : nz. Ltac nzsimpl := autorewrite with nz in *. Ltac qsimpl := try red; unfold to_Q; simpl; intros; destr_eqb; simpl; nzsimpl; intros; rewrite ?Z2Pos.id by auto; auto. Theorem strong_spec_of_Q: forall q: Q, [of_Q q] = q. Proof. intros(x,y); destruct y; simpl; rewrite ?ZZ.spec_of_Z; auto; destr_eqb; now rewrite ?NN.spec_0, ?NN.spec_of_N. Qed. Theorem spec_of_Q: forall q: Q, [of_Q q] == q. Proof. intros; rewrite strong_spec_of_Q; red; auto. Qed. Definition eq x y := [x] == [y]. Definition zero: t := Qz ZZ.zero. Definition one: t := Qz ZZ.one. Definition minus_one: t := Qz ZZ.minus_one. Lemma spec_0: [zero] == 0. Proof. simpl. nzsimpl. reflexivity. Qed. Lemma spec_1: [one] == 1. Proof. simpl. nzsimpl. reflexivity. Qed. Lemma spec_m1: [minus_one] == -(1). Proof. simpl. nzsimpl. reflexivity. Qed. Definition compare (x y: t) := match x, y with | Qz zx, Qz zy => ZZ.compare zx zy | Qz zx, Qq ny dy => if NN.eqb dy NN.zero then ZZ.compare zx ZZ.zero else ZZ.compare (ZZ.mul zx (Z_of_N dy)) ny | Qq nx dx, Qz zy => if NN.eqb dx NN.zero then ZZ.compare ZZ.zero zy else ZZ.compare nx (ZZ.mul zy (Z_of_N dx)) | Qq nx dx, Qq ny dy => match NN.eqb dx NN.zero, NN.eqb dy NN.zero with | true, true => Eq | true, false => ZZ.compare ZZ.zero ny | false, true => ZZ.compare nx ZZ.zero | false, false => ZZ.compare (ZZ.mul nx (Z_of_N dy)) (ZZ.mul ny (Z_of_N dx)) end end. Theorem spec_compare: forall q1 q2, (compare q1 q2) = ([q1] ?= [q2]). Proof. intros [z1 | x1 y1] [z2 | x2 y2]; unfold Qcompare, compare; qsimpl. Qed. Definition lt n m := [n] < [m]. Definition le n m := [n] <= [m]. Definition min n m := match compare n m with Gt => m | _ => n end. Definition max n m := match compare n m with Lt => m | _ => n end. Lemma spec_min : forall n m, [min n m] == Qmin [n] [m]. Proof. unfold min, Qmin, GenericMinMax.gmin. intros. rewrite spec_compare; destruct Qcompare; auto with qarith. Qed. Lemma spec_max : forall n m, [max n m] == Qmax [n] [m]. Proof. unfold max, Qmax, GenericMinMax.gmax. intros. rewrite spec_compare; destruct Qcompare; auto with qarith. Qed. Definition eq_bool n m := match compare n m with Eq => true | _ => false end. Theorem spec_eq_bool: forall x y, eq_bool x y = Qeq_bool [x] [y]. Proof. intros. unfold eq_bool. rewrite spec_compare. reflexivity. Qed. (** [check_int] : is a reduced fraction [n/d] in fact a integer ? *) Definition check_int n d := match NN.compare NN.one d with | Lt => Qq n d | Eq => Qz n | Gt => zero (* n/0 encodes 0 *) end. Theorem strong_spec_check_int : forall n d, [check_int n d] = [Qq n d]. Proof. intros; unfold check_int. nzsimpl. destr_zcompare. simpl. rewrite <- H; qsimpl. congruence. reflexivity. qsimpl. exfalso; romega. Qed. (** Normalisation function *) Definition norm n d : t := let gcd := NN.gcd (Zabs_N n) d in match NN.compare NN.one gcd with | Lt => check_int (ZZ.div n (Z_of_N gcd)) (NN.div d gcd) | Eq => check_int n d | Gt => zero (* gcd = 0 => both numbers are 0 *) end. Theorem spec_norm: forall n q, [norm n q] == [Qq n q]. Proof. intros p q; unfold norm. assert (Hp := NN.spec_pos (Zabs_N p)). assert (Hq := NN.spec_pos q). nzsimpl. destr_zcompare. (* Eq *) rewrite strong_spec_check_int; reflexivity. (* Lt *) rewrite strong_spec_check_int. qsimpl. generalize (Zgcd_div_pos (ZZ.to_Z p) (NN.to_Z q)). romega. replace (NN.to_Z q) with 0%Z in * by assumption. rewrite Zdiv_0_l in *; auto with zarith. apply Zgcd_div_swap0; romega. (* Gt *) qsimpl. assert (H' : Z.gcd (ZZ.to_Z p) (NN.to_Z q) = 0%Z). generalize (Z.gcd_nonneg (ZZ.to_Z p) (NN.to_Z q)); romega. symmetry; apply (Z.gcd_eq_0_l _ _ H'); auto. Qed. Theorem strong_spec_norm : forall p q, [norm p q] = Qred [Qq p q]. Proof. intros. replace (Qred [Qq p q]) with (Qred [norm p q]) by (apply Qred_complete; apply spec_norm). symmetry; apply Qred_identity. unfold norm. assert (Hp := NN.spec_pos (Zabs_N p)). assert (Hq := NN.spec_pos q). nzsimpl. destr_zcompare; rewrite ?strong_spec_check_int. (* Eq *) qsimpl. (* Lt *) qsimpl. rewrite Zgcd_1_rel_prime. destruct (Z_lt_le_dec 0 (NN.to_Z q)). apply Zis_gcd_rel_prime; auto with zarith. apply Zgcd_is_gcd. replace (NN.to_Z q) with 0%Z in * by romega. rewrite Zdiv_0_l in *; romega. (* Gt *) simpl; auto with zarith. Qed. (** Reduction function : producing irreducible fractions *) Definition red (x : t) : t := match x with | Qz z => x | Qq n d => norm n d end. Class Reduced x := is_reduced : [red x] = [x]. Theorem spec_red : forall x, [red x] == [x]. Proof. intros [ z | n d ]. auto with qarith. unfold red. apply spec_norm. Qed. Theorem strong_spec_red : forall x, [red x] = Qred [x]. Proof. intros [ z | n d ]. unfold red. symmetry; apply Qred_identity; simpl; auto with zarith. unfold red; apply strong_spec_norm. Qed. Definition add (x y: t): t := match x with | Qz zx => match y with | Qz zy => Qz (ZZ.add zx zy) | Qq ny dy => if NN.eqb dy NN.zero then x else Qq (ZZ.add (ZZ.mul zx (Z_of_N dy)) ny) dy end | Qq nx dx => if NN.eqb dx NN.zero then y else match y with | Qz zy => Qq (ZZ.add nx (ZZ.mul zy (Z_of_N dx))) dx | Qq ny dy => if NN.eqb dy NN.zero then x else let n := ZZ.add (ZZ.mul nx (Z_of_N dy)) (ZZ.mul ny (Z_of_N dx)) in let d := NN.mul dx dy in Qq n d end end. Theorem spec_add : forall x y, [add x y] == [x] + [y]. Proof. intros [x | nx dx] [y | ny dy]; unfold Qplus; qsimpl; auto with zarith. rewrite Pos.mul_1_r, Z2Pos.id; auto. rewrite Pos.mul_1_r, Z2Pos.id; auto. rewrite Z.mul_eq_0 in *; intuition. rewrite Pos2Z.inj_mul, 2 Z2Pos.id; auto. Qed. Definition add_norm (x y: t): t := match x with | Qz zx => match y with | Qz zy => Qz (ZZ.add zx zy) | Qq ny dy => if NN.eqb dy NN.zero then x else norm (ZZ.add (ZZ.mul zx (Z_of_N dy)) ny) dy end | Qq nx dx => if NN.eqb dx NN.zero then y else match y with | Qz zy => norm (ZZ.add nx (ZZ.mul zy (Z_of_N dx))) dx | Qq ny dy => if NN.eqb dy NN.zero then x else let n := ZZ.add (ZZ.mul nx (Z_of_N dy)) (ZZ.mul ny (Z_of_N dx)) in let d := NN.mul dx dy in norm n d end end. Theorem spec_add_norm : forall x y, [add_norm x y] == [x] + [y]. Proof. intros x y; rewrite <- spec_add. destruct x; destruct y; unfold add_norm, add; destr_eqb; auto using Qeq_refl, spec_norm. Qed. Instance strong_spec_add_norm x y `(Reduced x, Reduced y) : Reduced (add_norm x y). Proof. unfold Reduced; intros. rewrite strong_spec_red. rewrite <- (Qred_complete [add x y]); [ | rewrite spec_add, spec_add_norm; apply Qeq_refl ]. rewrite <- strong_spec_red. destruct x as [zx|nx dx]; destruct y as [zy|ny dy]; simpl; destr_eqb; nzsimpl; simpl; auto. Qed. Definition opp (x: t): t := match x with | Qz zx => Qz (ZZ.opp zx) | Qq nx dx => Qq (ZZ.opp nx) dx end. Theorem strong_spec_opp: forall q, [opp q] = -[q]. Proof. intros [z | x y]; simpl. rewrite ZZ.spec_opp; auto. match goal with |- context[NN.eqb ?X ?Y] => generalize (NN.spec_eqb X Y); case NN.eqb end; auto; rewrite NN.spec_0. rewrite ZZ.spec_opp; auto. Qed. Theorem spec_opp : forall q, [opp q] == -[q]. Proof. intros; rewrite strong_spec_opp; red; auto. Qed. Instance strong_spec_opp_norm q `(Reduced q) : Reduced (opp q). Proof. unfold Reduced; intros. rewrite strong_spec_opp, <- H, !strong_spec_red, <- Qred_opp. apply Qred_complete; apply spec_opp. Qed. Definition sub x y := add x (opp y). Theorem spec_sub : forall x y, [sub x y] == [x] - [y]. Proof. intros x y; unfold sub; rewrite spec_add; auto. rewrite spec_opp; ring. Qed. Definition sub_norm x y := add_norm x (opp y). Theorem spec_sub_norm : forall x y, [sub_norm x y] == [x] - [y]. Proof. intros x y; unfold sub_norm; rewrite spec_add_norm; auto. rewrite spec_opp; ring. Qed. Instance strong_spec_sub_norm x y `(Reduced x, Reduced y) : Reduced (sub_norm x y). Proof. intros. unfold sub_norm. apply strong_spec_add_norm; auto. apply strong_spec_opp_norm; auto. Qed. Definition mul (x y: t): t := match x, y with | Qz zx, Qz zy => Qz (ZZ.mul zx zy) | Qz zx, Qq ny dy => Qq (ZZ.mul zx ny) dy | Qq nx dx, Qz zy => Qq (ZZ.mul nx zy) dx | Qq nx dx, Qq ny dy => Qq (ZZ.mul nx ny) (NN.mul dx dy) end. Ltac nsubst := match goal with E : NN.to_Z _ = _ |- _ => rewrite E in * end. Theorem spec_mul : forall x y, [mul x y] == [x] * [y]. Proof. intros [x | nx dx] [y | ny dy]; unfold Qmult; simpl; qsimpl. rewrite Pos.mul_1_r, Z2Pos.id; auto. rewrite Z.mul_eq_0 in *; intuition. nsubst; auto with zarith. nsubst; auto with zarith. nsubst; nzsimpl; auto with zarith. rewrite Pos2Z.inj_mul, 2 Z2Pos.id; auto. Qed. Definition norm_denum n d := if NN.eqb d NN.one then Qz n else Qq n d. Lemma spec_norm_denum : forall n d, [norm_denum n d] == [Qq n d]. Proof. unfold norm_denum; intros; simpl; qsimpl. congruence. nsubst; auto with zarith. Qed. Definition irred n d := let gcd := NN.gcd (Zabs_N n) d in match NN.compare gcd NN.one with | Gt => (ZZ.div n (Z_of_N gcd), NN.div d gcd) | _ => (n, d) end. Lemma spec_irred : forall n d, exists g, let (n',d') := irred n d in (ZZ.to_Z n' * g = ZZ.to_Z n)%Z /\ (NN.to_Z d' * g = NN.to_Z d)%Z. Proof. intros. unfold irred; nzsimpl; simpl. destr_zcompare. exists 1%Z; nzsimpl; auto. exists 0%Z; nzsimpl. assert (Z.gcd (ZZ.to_Z n) (NN.to_Z d) = 0%Z). generalize (Z.gcd_nonneg (ZZ.to_Z n) (NN.to_Z d)); romega. clear H. split. symmetry; apply (Z.gcd_eq_0_l _ _ H0). symmetry; apply (Z.gcd_eq_0_r _ _ H0). exists (Z.gcd (ZZ.to_Z n) (NN.to_Z d)). simpl. split. nzsimpl. destruct (Zgcd_is_gcd (ZZ.to_Z n) (NN.to_Z d)). rewrite Z.mul_comm; symmetry; apply Zdivide_Zdiv_eq; auto with zarith. nzsimpl. destruct (Zgcd_is_gcd (ZZ.to_Z n) (NN.to_Z d)). rewrite Z.mul_comm; symmetry; apply Zdivide_Zdiv_eq; auto with zarith. Qed. Lemma spec_irred_zero : forall n d, (NN.to_Z d = 0)%Z <-> (NN.to_Z (snd (irred n d)) = 0)%Z. Proof. intros. unfold irred. split. nzsimpl; intros. destr_zcompare; auto. simpl. nzsimpl. rewrite H, Zdiv_0_l; auto. nzsimpl; destr_zcompare; simpl; auto. nzsimpl. intros. generalize (NN.spec_pos d); intros. destruct (NN.to_Z d); auto. assert (0 < 0)%Z. rewrite <- H0 at 2. apply Zgcd_div_pos; auto with zarith. compute; auto. discriminate. compute in H1; elim H1; auto. Qed. Lemma strong_spec_irred : forall n d, (NN.to_Z d <> 0%Z) -> let (n',d') := irred n d in Z.gcd (ZZ.to_Z n') (NN.to_Z d') = 1%Z. Proof. unfold irred; intros. nzsimpl. destr_zcompare; simpl; auto. elim H. apply (Z.gcd_eq_0_r (ZZ.to_Z n)). generalize (Z.gcd_nonneg (ZZ.to_Z n) (NN.to_Z d)); romega. nzsimpl. rewrite Zgcd_1_rel_prime. apply Zis_gcd_rel_prime. generalize (NN.spec_pos d); romega. generalize (Z.gcd_nonneg (ZZ.to_Z n) (NN.to_Z d)); romega. apply Zgcd_is_gcd; auto. Qed. Definition mul_norm_Qz_Qq z n d := if ZZ.eqb z ZZ.zero then zero else let gcd := NN.gcd (Zabs_N z) d in match NN.compare gcd NN.one with | Gt => let z := ZZ.div z (Z_of_N gcd) in let d := NN.div d gcd in norm_denum (ZZ.mul z n) d | _ => Qq (ZZ.mul z n) d end. Definition mul_norm (x y: t): t := match x, y with | Qz zx, Qz zy => Qz (ZZ.mul zx zy) | Qz zx, Qq ny dy => mul_norm_Qz_Qq zx ny dy | Qq nx dx, Qz zy => mul_norm_Qz_Qq zy nx dx | Qq nx dx, Qq ny dy => let (nx, dy) := irred nx dy in let (ny, dx) := irred ny dx in norm_denum (ZZ.mul ny nx) (NN.mul dx dy) end. Lemma spec_mul_norm_Qz_Qq : forall z n d, [mul_norm_Qz_Qq z n d] == [Qq (ZZ.mul z n) d]. Proof. intros z n d; unfold mul_norm_Qz_Qq; nzsimpl; rewrite Zcompare_gt. destr_eqb; nzsimpl; intros Hz. qsimpl; rewrite Hz; auto. destruct Z_le_gt_dec as [LE|GT]. qsimpl. rewrite spec_norm_denum. qsimpl. rewrite Zdiv_gcd_zero in GT; auto with zarith. nsubst. rewrite Zdiv_0_l in *; discriminate. rewrite <- Z.mul_assoc, (Z.mul_comm (ZZ.to_Z n)), Z.mul_assoc. rewrite Zgcd_div_swap0; try romega. ring. Qed. Instance strong_spec_mul_norm_Qz_Qq z n d : forall `(Reduced (Qq n d)), Reduced (mul_norm_Qz_Qq z n d). Proof. unfold Reduced. rewrite 2 strong_spec_red, 2 Qred_iff. simpl; nzsimpl. destr_eqb; intros Hd H; simpl in *; nzsimpl. unfold mul_norm_Qz_Qq; nzsimpl; rewrite Zcompare_gt. destr_eqb; intros Hz; simpl; nzsimpl; simpl; auto. destruct Z_le_gt_dec. simpl; nzsimpl. destr_eqb; simpl; nzsimpl; auto with zarith. unfold norm_denum. destr_eqb; simpl; nzsimpl. rewrite Hd, Zdiv_0_l; discriminate. intros _. destr_eqb; simpl; nzsimpl; auto. nzsimpl; rewrite Hd, Zdiv_0_l; auto with zarith. rewrite Z2Pos.id in H; auto. unfold mul_norm_Qz_Qq; nzsimpl; rewrite Zcompare_gt. destr_eqb; intros Hz; simpl; nzsimpl; simpl; auto. destruct Z_le_gt_dec as [H'|H']. simpl; nzsimpl. destr_eqb; simpl; nzsimpl; auto. intros. rewrite Z2Pos.id; auto. apply Zgcd_mult_rel_prime; auto. generalize (Z.gcd_eq_0_l (ZZ.to_Z z) (NN.to_Z d)) (Z.gcd_nonneg (ZZ.to_Z z) (NN.to_Z d)); romega. destr_eqb; simpl; nzsimpl; auto. unfold norm_denum. destr_eqb; nzsimpl; simpl; destr_eqb; simpl; auto. intros; nzsimpl. rewrite Z2Pos.id; auto. apply Zgcd_mult_rel_prime. rewrite Zgcd_1_rel_prime. apply Zis_gcd_rel_prime. generalize (NN.spec_pos d); romega. generalize (Z.gcd_nonneg (ZZ.to_Z z) (NN.to_Z d)); romega. apply Zgcd_is_gcd. destruct (Zgcd_is_gcd (ZZ.to_Z z) (NN.to_Z d)) as [ (z0,Hz0) (d0,Hd0) Hzd]. replace (NN.to_Z d / Z.gcd (ZZ.to_Z z) (NN.to_Z d))%Z with d0. rewrite Zgcd_1_rel_prime in *. apply bezout_rel_prime. destruct (rel_prime_bezout _ _ H) as [u v Huv]. apply Bezout_intro with u (v*(Z.gcd (ZZ.to_Z z) (NN.to_Z d)))%Z. rewrite <- Huv; rewrite Hd0 at 2; ring. rewrite Hd0 at 1. symmetry; apply Z_div_mult_full; auto with zarith. Qed. Theorem spec_mul_norm : forall x y, [mul_norm x y] == [x] * [y]. Proof. intros x y; rewrite <- spec_mul; auto. unfold mul_norm, mul; destruct x; destruct y. apply Qeq_refl. apply spec_mul_norm_Qz_Qq. rewrite spec_mul_norm_Qz_Qq; qsimpl; ring. rename t0 into nx, t3 into dy, t2 into ny, t1 into dx. destruct (spec_irred nx dy) as (g & Hg). destruct (spec_irred ny dx) as (g' & Hg'). assert (Hz := spec_irred_zero nx dy). assert (Hz':= spec_irred_zero ny dx). destruct irred as (n1,d1); destruct irred as (n2,d2). simpl snd in *; destruct Hg as [Hg1 Hg2]; destruct Hg' as [Hg1' Hg2']. rewrite spec_norm_denum. qsimpl. match goal with E : (_ * _ = 0)%Z |- _ => rewrite Z.mul_eq_0 in E; destruct E as [Eq|Eq] end. rewrite Eq in *; simpl in *. rewrite <- Hg2' in *; auto with zarith. rewrite Eq in *; simpl in *. rewrite <- Hg2 in *; auto with zarith. match goal with E : (_ * _ = 0)%Z |- _ => rewrite Z.mul_eq_0 in E; destruct E as [Eq|Eq] end. rewrite Hz' in Eq; rewrite Eq in *; auto with zarith. rewrite Hz in Eq; rewrite Eq in *; auto with zarith. rewrite <- Hg1, <- Hg2, <- Hg1', <- Hg2'; ring. Qed. Instance strong_spec_mul_norm x y : forall `(Reduced x, Reduced y), Reduced (mul_norm x y). Proof. unfold Reduced; intros. rewrite strong_spec_red, Qred_iff. destruct x as [zx|nx dx]; destruct y as [zy|ny dy]. simpl in *; auto with zarith. simpl. rewrite <- Qred_iff, <- strong_spec_red, strong_spec_mul_norm_Qz_Qq; auto. simpl. rewrite <- Qred_iff, <- strong_spec_red, strong_spec_mul_norm_Qz_Qq; auto. simpl. destruct (spec_irred nx dy) as [g Hg]. destruct (spec_irred ny dx) as [g' Hg']. assert (Hz := spec_irred_zero nx dy). assert (Hz':= spec_irred_zero ny dx). assert (Hgc := strong_spec_irred nx dy). assert (Hgc' := strong_spec_irred ny dx). destruct irred as (n1,d1); destruct irred as (n2,d2). simpl snd in *; destruct Hg as [Hg1 Hg2]; destruct Hg' as [Hg1' Hg2']. unfold norm_denum; qsimpl. assert (NEQ : NN.to_Z dy <> 0%Z) by (rewrite Hz; intros EQ; rewrite EQ in *; romega). specialize (Hgc NEQ). assert (NEQ' : NN.to_Z dx <> 0%Z) by (rewrite Hz'; intro EQ; rewrite EQ in *; romega). specialize (Hgc' NEQ'). revert H H0. rewrite 2 strong_spec_red, 2 Qred_iff; simpl. destr_eqb; simpl; nzsimpl; try romega; intros. rewrite Z2Pos.id in *; auto. apply Zgcd_mult_rel_prime; rewrite Z.gcd_comm; apply Zgcd_mult_rel_prime; rewrite Z.gcd_comm; auto. rewrite Zgcd_1_rel_prime in *. apply bezout_rel_prime. destruct (rel_prime_bezout (ZZ.to_Z ny) (NN.to_Z dy)) as [u v Huv]; trivial. apply Bezout_intro with (u*g')%Z (v*g)%Z. rewrite <- Huv, <- Hg1', <- Hg2. ring. rewrite Zgcd_1_rel_prime in *. apply bezout_rel_prime. destruct (rel_prime_bezout (ZZ.to_Z nx) (NN.to_Z dx)) as [u v Huv]; trivial. apply Bezout_intro with (u*g)%Z (v*g')%Z. rewrite <- Huv, <- Hg2', <- Hg1. ring. Qed. Definition inv (x: t): t := match x with | Qz z => match ZZ.compare ZZ.zero z with | Eq => zero | Lt => Qq ZZ.one (Zabs_N z) | Gt => Qq ZZ.minus_one (Zabs_N z) end | Qq n d => match ZZ.compare ZZ.zero n with | Eq => zero | Lt => Qq (Z_of_N d) (Zabs_N n) | Gt => Qq (ZZ.opp (Z_of_N d)) (Zabs_N n) end end. Theorem spec_inv : forall x, [inv x] == /[x]. Proof. destruct x as [ z | n d ]. (* Qz z *) simpl. rewrite ZZ.spec_compare; destr_zcompare. (* 0 = z *) rewrite <- H. simpl; nzsimpl; compute; auto. (* 0 < z *) simpl. destr_eqb; nzsimpl; [ intros; rewrite Z.abs_eq in *; romega | intros _ ]. set (z':=ZZ.to_Z z) in *; clearbody z'. red; simpl. rewrite Z.abs_eq by romega. rewrite Z2Pos.id by auto. unfold Qinv; simpl; destruct z'; simpl; auto; discriminate. (* 0 > z *) simpl. destr_eqb; nzsimpl; [ intros; rewrite Z.abs_neq in *; romega | intros _ ]. set (z':=ZZ.to_Z z) in *; clearbody z'. red; simpl. rewrite Z.abs_neq by romega. rewrite Z2Pos.id by romega. unfold Qinv; simpl; destruct z'; simpl; auto; discriminate. (* Qq n d *) simpl. rewrite ZZ.spec_compare; destr_zcompare. (* 0 = n *) rewrite <- H. simpl; nzsimpl. destr_eqb; intros; compute; auto. (* 0 < n *) simpl. destr_eqb; nzsimpl; intros. intros; rewrite Z.abs_eq in *; romega. intros; rewrite Z.abs_eq in *; romega. nsubst; compute; auto. set (n':=ZZ.to_Z n) in *; clearbody n'. rewrite Z.abs_eq by romega. red; simpl. rewrite Z2Pos.id by auto. unfold Qinv; simpl; destruct n'; simpl; auto; try discriminate. rewrite Pos2Z.inj_mul, Z2Pos.id; auto. (* 0 > n *) simpl. destr_eqb; nzsimpl; intros. intros; rewrite Z.abs_neq in *; romega. intros; rewrite Z.abs_neq in *; romega. nsubst; compute; auto. set (n':=ZZ.to_Z n) in *; clearbody n'. red; simpl; nzsimpl. rewrite Z.abs_neq by romega. rewrite Z2Pos.id by romega. unfold Qinv; simpl; destruct n'; simpl; auto; try discriminate. assert (T : forall x, Zneg x = Z.opp (Zpos x)) by auto. rewrite T, Pos2Z.inj_mul, Z2Pos.id; auto; ring. Qed. Definition inv_norm (x: t): t := match x with | Qz z => match ZZ.compare ZZ.zero z with | Eq => zero | Lt => Qq ZZ.one (Zabs_N z) | Gt => Qq ZZ.minus_one (Zabs_N z) end | Qq n d => if NN.eqb d NN.zero then zero else match ZZ.compare ZZ.zero n with | Eq => zero | Lt => match ZZ.compare n ZZ.one with | Gt => Qq (Z_of_N d) (Zabs_N n) | _ => Qz (Z_of_N d) end | Gt => match ZZ.compare n ZZ.minus_one with | Lt => Qq (ZZ.opp (Z_of_N d)) (Zabs_N n) | _ => Qz (ZZ.opp (Z_of_N d)) end end end. Theorem spec_inv_norm : forall x, [inv_norm x] == /[x]. Proof. intros. rewrite <- spec_inv. destruct x as [ z | n d ]. (* Qz z *) simpl. rewrite ZZ.spec_compare; destr_zcompare; auto with qarith. (* Qq n d *) simpl; nzsimpl; destr_eqb. destr_zcompare; simpl; auto with qarith. destr_eqb; nzsimpl; auto with qarith. intros _ Hd; rewrite Hd; auto with qarith. destr_eqb; nzsimpl; auto with qarith. intros _ Hd; rewrite Hd; auto with qarith. (* 0 < n *) destr_zcompare; auto with qarith. destr_zcompare; nzsimpl; simpl; auto with qarith; intros. destr_eqb; nzsimpl; [ intros; rewrite Z.abs_eq in *; romega | intros _ ]. rewrite H0; auto with qarith. romega. (* 0 > n *) destr_zcompare; nzsimpl; simpl; auto with qarith. destr_eqb; nzsimpl; [ intros; rewrite Z.abs_neq in *; romega | intros _ ]. rewrite H0; auto with qarith. romega. Qed. Instance strong_spec_inv_norm x : Reduced x -> Reduced (inv_norm x). Proof. unfold Reduced. intros. destruct x as [ z | n d ]. (* Qz *) simpl; nzsimpl. rewrite strong_spec_red, Qred_iff. destr_zcompare; simpl; nzsimpl; auto. destr_eqb; nzsimpl; simpl; auto. destr_eqb; nzsimpl; simpl; auto. (* Qq n d *) rewrite strong_spec_red, Qred_iff in H; revert H. simpl; nzsimpl. destr_eqb; nzsimpl; auto with qarith. destr_zcompare; simpl; nzsimpl; auto; intros. (* 0 < n *) destr_zcompare; simpl; nzsimpl; auto. destr_eqb; nzsimpl; simpl; auto. rewrite Z.abs_eq; romega. intros _. rewrite strong_spec_norm; simpl; nzsimpl. destr_eqb; nzsimpl. rewrite Z.abs_eq; romega. intros _. rewrite Qred_iff. simpl. rewrite Z.abs_eq; auto with zarith. rewrite Z2Pos.id in *; auto. rewrite Z.gcd_comm; auto. (* 0 > n *) destr_eqb; nzsimpl; simpl; auto; intros. destr_zcompare; simpl; nzsimpl; auto. destr_eqb; nzsimpl. rewrite Z.abs_neq; romega. intros _. rewrite strong_spec_norm; simpl; nzsimpl. destr_eqb; nzsimpl. rewrite Z.abs_neq; romega. intros _. rewrite Qred_iff. simpl. rewrite Z2Pos.id in *; auto. intros. rewrite Z.gcd_comm, Z.gcd_abs_l, Z.gcd_comm. apply Zis_gcd_gcd; auto with zarith. apply Zis_gcd_minus. rewrite Z.opp_involutive, <- H1; apply Zgcd_is_gcd. rewrite Z.abs_neq; romega. Qed. Definition div x y := mul x (inv y). Theorem spec_div x y: [div x y] == [x] / [y]. Proof. unfold div; rewrite spec_mul; auto. unfold Qdiv; apply Qmult_comp. apply Qeq_refl. apply spec_inv; auto. Qed. Definition div_norm x y := mul_norm x (inv_norm y). Theorem spec_div_norm x y: [div_norm x y] == [x] / [y]. Proof. unfold div_norm; rewrite spec_mul_norm; auto. unfold Qdiv; apply Qmult_comp. apply Qeq_refl. apply spec_inv_norm; auto. Qed. Instance strong_spec_div_norm x y `(Reduced x, Reduced y) : Reduced (div_norm x y). Proof. intros; unfold div_norm. apply strong_spec_mul_norm; auto. apply strong_spec_inv_norm; auto. Qed. Definition square (x: t): t := match x with | Qz zx => Qz (ZZ.square zx) | Qq nx dx => Qq (ZZ.square nx) (NN.square dx) end. Theorem spec_square : forall x, [square x] == [x] ^ 2. Proof. destruct x as [ z | n d ]. simpl; rewrite ZZ.spec_square; red; auto. simpl. destr_eqb; nzsimpl; intros. apply Qeq_refl. rewrite NN.spec_square in *; nzsimpl. rewrite Z.mul_eq_0 in *; romega. rewrite NN.spec_square in *; nzsimpl; nsubst; romega. rewrite ZZ.spec_square, NN.spec_square. red; simpl. rewrite Pos2Z.inj_mul; rewrite !Z2Pos.id; auto. apply Z.mul_pos_pos; auto. Qed. Definition power_pos (x : t) p : t := match x with | Qz zx => Qz (ZZ.pow_pos zx p) | Qq nx dx => Qq (ZZ.pow_pos nx p) (NN.pow_pos dx p) end. Theorem spec_power_pos : forall x p, [power_pos x p] == [x] ^ Zpos p. Proof. intros [ z | n d ] p; unfold power_pos. (* Qz *) simpl. rewrite ZZ.spec_pow_pos, Qpower_decomp. red; simpl; f_equal. now rewrite Pos2Z.inj_pow, Z.pow_1_l. (* Qq *) simpl. rewrite ZZ.spec_pow_pos. destr_eqb; nzsimpl; intros. - apply Qeq_sym; apply Qpower_positive_0. - rewrite NN.spec_pow_pos in *. assert (0 < NN.to_Z d ^ ' p)%Z by (apply Z.pow_pos_nonneg; auto with zarith). romega. - exfalso. rewrite NN.spec_pow_pos in *. nsubst. rewrite Z.pow_0_l' in *; [romega|discriminate]. - rewrite Qpower_decomp. red; simpl; do 3 f_equal. apply Pos2Z.inj. rewrite Pos2Z.inj_pow. rewrite 2 Z2Pos.id by (generalize (NN.spec_pos d); romega). now rewrite NN.spec_pow_pos. Qed. Instance strong_spec_power_pos x p `(Reduced x) : Reduced (power_pos x p). Proof. destruct x as [z | n d]; simpl; intros. red; simpl; auto. red; simpl; intros. rewrite strong_spec_norm; simpl. destr_eqb; nzsimpl; intros. simpl; auto. rewrite Qred_iff. revert H. unfold Reduced; rewrite strong_spec_red, Qred_iff; simpl. destr_eqb; nzsimpl; simpl; intros. exfalso. rewrite NN.spec_pow_pos in *. nsubst. rewrite Z.pow_0_l' in *; [romega|discriminate]. rewrite Z2Pos.id in *; auto. rewrite NN.spec_pow_pos, ZZ.spec_pow_pos; auto. rewrite Zgcd_1_rel_prime in *. apply rel_prime_Zpower; auto with zarith. Qed. Definition power (x : t) (z : Z) : t := match z with | Z0 => one | Zpos p => power_pos x p | Zneg p => inv (power_pos x p) end. Theorem spec_power : forall x z, [power x z] == [x]^z. Proof. destruct z. simpl; nzsimpl; red; auto. apply spec_power_pos. simpl. rewrite spec_inv, spec_power_pos; apply Qeq_refl. Qed. Definition power_norm (x : t) (z : Z) : t := match z with | Z0 => one | Zpos p => power_pos x p | Zneg p => inv_norm (power_pos x p) end. Theorem spec_power_norm : forall x z, [power_norm x z] == [x]^z. Proof. destruct z. simpl; nzsimpl; red; auto. apply spec_power_pos. simpl. rewrite spec_inv_norm, spec_power_pos; apply Qeq_refl. Qed. Instance strong_spec_power_norm x z : Reduced x -> Reduced (power_norm x z). Proof. destruct z; simpl. intros _; unfold Reduced; rewrite strong_spec_red. unfold one. simpl to_Q; nzsimpl; auto. intros; apply strong_spec_power_pos; auto. intros; apply strong_spec_inv_norm; apply strong_spec_power_pos; auto. Qed. (** Interaction with [Qcanon.Qc] *) Open Scope Qc_scope. Definition of_Qc q := of_Q (this q). Definition to_Qc q := !! [q]. Notation "[[ x ]]" := (to_Qc x). Theorem strong_spec_of_Qc : forall q, [of_Qc q] = q. Proof. intros (q,Hq); intros. unfold of_Qc; rewrite strong_spec_of_Q; auto. Qed. Instance strong_spec_of_Qc_bis q : Reduced (of_Qc q). Proof. intros; red; rewrite strong_spec_red, strong_spec_of_Qc. destruct q; simpl; auto. Qed. Theorem spec_of_Qc: forall q, [[of_Qc q]] = q. Proof. intros; apply Qc_decomp; simpl; intros. rewrite strong_spec_of_Qc; auto. Qed. Theorem spec_oppc: forall q, [[opp q]] = -[[q]]. Proof. intros q; unfold Qcopp, to_Qc, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. rewrite spec_opp, <- Qred_opp, Qred_correct. apply Qeq_refl. Qed. Theorem spec_oppc_bis : forall q : Qc, [opp (of_Qc q)] = - q. Proof. intros. rewrite <- strong_spec_opp_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (-q)%Q). rewrite spec_opp, strong_spec_of_Qc; auto with qarith. Qed. Theorem spec_comparec: forall q1 q2, compare q1 q2 = ([[q1]] ?= [[q2]]). Proof. unfold Qccompare, to_Qc. intros q1 q2; rewrite spec_compare; simpl; auto. apply Qcompare_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_addc x y: [[add x y]] = [[x]] + [[y]]. Proof. unfold to_Qc. transitivity (!! ([x] + [y])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_add; auto. unfold Qcplus, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qplus_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_add_normc x y: [[add_norm x y]] = [[x]] + [[y]]. Proof. unfold to_Qc. transitivity (!! ([x] + [y])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_add_norm; auto. unfold Qcplus, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qplus_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_add_normc_bis : forall x y : Qc, [add_norm (of_Qc x) (of_Qc y)] = x+y. Proof. intros. rewrite <- strong_spec_add_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (x+y)%Q). rewrite spec_add_norm, ! strong_spec_of_Qc; auto with qarith. Qed. Theorem spec_subc x y: [[sub x y]] = [[x]] - [[y]]. Proof. unfold sub; rewrite spec_addc; auto. rewrite spec_oppc; ring. Qed. Theorem spec_sub_normc x y: [[sub_norm x y]] = [[x]] - [[y]]. Proof. unfold sub_norm; rewrite spec_add_normc; auto. rewrite spec_oppc; ring. Qed. Theorem spec_sub_normc_bis : forall x y : Qc, [sub_norm (of_Qc x) (of_Qc y)] = x-y. Proof. intros. rewrite <- strong_spec_sub_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (x+(-y)%Qc)%Q). rewrite spec_sub_norm, ! strong_spec_of_Qc. unfold Qcopp, Q2Qc, this. rewrite Qred_correct ; auto with qarith. Qed. Theorem spec_mulc x y: [[mul x y]] = [[x]] * [[y]]. Proof. unfold to_Qc. transitivity (!! ([x] * [y])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_mul; auto. unfold Qcmult, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qmult_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_mul_normc x y: [[mul_norm x y]] = [[x]] * [[y]]. Proof. unfold to_Qc. transitivity (!! ([x] * [y])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_mul_norm; auto. unfold Qcmult, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qmult_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_mul_normc_bis : forall x y : Qc, [mul_norm (of_Qc x) (of_Qc y)] = x*y. Proof. intros. rewrite <- strong_spec_mul_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (x*y)%Q). rewrite spec_mul_norm, ! strong_spec_of_Qc; auto with qarith. Qed. Theorem spec_invc x: [[inv x]] = /[[x]]. Proof. unfold to_Qc. transitivity (!! (/[x])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_inv; auto. unfold Qcinv, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qinv_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_inv_normc x: [[inv_norm x]] = /[[x]]. Proof. unfold to_Qc. transitivity (!! (/[x])). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_inv_norm; auto. unfold Qcinv, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qinv_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_inv_normc_bis : forall x : Qc, [inv_norm (of_Qc x)] = /x. Proof. intros. rewrite <- strong_spec_inv_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (/x)%Q). rewrite spec_inv_norm, ! strong_spec_of_Qc; auto with qarith. Qed. Theorem spec_divc x y: [[div x y]] = [[x]] / [[y]]. Proof. unfold div; rewrite spec_mulc; auto. unfold Qcdiv; apply f_equal2 with (f := Qcmult); auto. apply spec_invc; auto. Qed. Theorem spec_div_normc x y: [[div_norm x y]] = [[x]] / [[y]]. Proof. unfold div_norm; rewrite spec_mul_normc; auto. unfold Qcdiv; apply f_equal2 with (f := Qcmult); auto. apply spec_inv_normc; auto. Qed. Theorem spec_div_normc_bis : forall x y : Qc, [div_norm (of_Qc x) (of_Qc y)] = x/y. Proof. intros. rewrite <- strong_spec_div_norm by apply strong_spec_of_Qc_bis. rewrite strong_spec_red. symmetry; apply (Qred_complete (x*(/y)%Qc)%Q). rewrite spec_div_norm, ! strong_spec_of_Qc. unfold Qcinv, Q2Qc, this; rewrite Qred_correct; auto with qarith. Qed. Theorem spec_squarec x: [[square x]] = [[x]]^2. Proof. unfold to_Qc. transitivity (!! ([x]^2)). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_square; auto. simpl Qcpower. replace (!! [x] * 1) with (!![x]); try ring. simpl. unfold Qcmult, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. apply Qmult_comp; apply Qeq_sym; apply Qred_correct. Qed. Theorem spec_power_posc x p: [[power_pos x p]] = [[x]] ^ Pos.to_nat p. Proof. unfold to_Qc. transitivity (!! ([x]^Zpos p)). unfold Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete; apply spec_power_pos; auto. induction p using Pos.peano_ind. simpl; ring. rewrite Pos2Nat.inj_succ; simpl Qcpower. rewrite <- IHp; clear IHp. unfold Qcmult, Q2Qc. apply Qc_decomp; intros _ _; unfold this. apply Qred_complete. setoid_replace ([x] ^ ' Pos.succ p)%Q with ([x] * [x] ^ ' p)%Q. apply Qmult_comp; apply Qeq_sym; apply Qred_correct. simpl. rewrite <- Pos.add_1_l. rewrite Qpower_plus_positive; simpl; apply Qeq_refl. Qed. End Make. coq-8.4pl2/theories/Numbers/Rational/BigQ/BigQ.v0000640000175000001440000001140412064143020020460 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y" := (BigQ.lt y x) (only parsing) : bigQ_scope. Notation "x >= y" := (BigQ.le y x) (only parsing) : bigQ_scope. Notation "x < y < z" := (x isBigZcst t | BigQ.Qq ?n ?d => match isBigZcst n with | true => isBigNcst d | false => constr:false end | BigQ.zero => constr:true | BigQ.one => constr:true | BigQ.minus_one => constr:true | _ => constr:false end. Ltac BigQcst t := match isBigQcst t with | true => constr:t | false => constr:NotConstant end. Add Field BigQfield : BigQfieldth (decidable BigQ.eqb_correct, completeness BigQ.eqb_complete, constants [BigQcst], power_tac BigQpowerth [Qpow_tac]). Section TestField. Let ex1 : forall x y z, (x+y)*z == (x*z)+(y*z). intros. ring. Qed. Let ex8 : forall x, x ^ 2 == x*x. intro. ring. Qed. Let ex10 : forall x y, y!=0 -> (x/y)*y == x. intros. field. auto. Qed. End TestField. (** [BigQ] can also benefit from an "order" tactic *) Ltac bigQ_order := BigQ.order. Section TestOrder. Let test : forall x y : bigQ, x<=y -> y<=x -> x==y. Proof. bigQ_order. Qed. End TestOrder. (** We can also reason by switching to QArith thanks to tactic BigQ.qify. *) Section TestQify. Let test : forall x : bigQ, 0+x == 1*x. Proof. intro x. BigQ.qify. ring. Qed. End TestQify. coq-8.4pl2/theories/Numbers/Rational/SpecViaQ/0000750000175000001440000000000012127276550020316 5ustar notinuserscoq-8.4pl2/theories/Numbers/Rational/SpecViaQ/QSig.v0000640000175000001440000001621012010532755021342 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Q. Local Notation "[ x ]" := (to_Q x). Definition eq x y := [x] == [y]. Definition lt x y := [x] < [y]. Definition le x y := [x] <= [y]. Parameter of_Q : Q -> t. Parameter spec_of_Q: forall x, to_Q (of_Q x) == x. Parameter red : t -> t. Parameter compare : t -> t -> comparison. Parameter eq_bool : t -> t -> bool. Parameter max : t -> t -> t. Parameter min : t -> t -> t. Parameter zero : t. Parameter one : t. Parameter minus_one : t. Parameter add : t -> t -> t. Parameter sub : t -> t -> t. Parameter opp : t -> t. Parameter mul : t -> t -> t. Parameter square : t -> t. Parameter inv : t -> t. Parameter div : t -> t -> t. Parameter power : t -> Z -> t. Parameter spec_red : forall x, [red x] == [x]. Parameter strong_spec_red : forall x, [red x] = Qred [x]. Parameter spec_compare : forall x y, compare x y = ([x] ?= [y]). Parameter spec_eq_bool : forall x y, eq_bool x y = Qeq_bool [x] [y]. Parameter spec_max : forall x y, [max x y] == Qmax [x] [y]. Parameter spec_min : forall x y, [min x y] == Qmin [x] [y]. Parameter spec_0: [zero] == 0. Parameter spec_1: [one] == 1. Parameter spec_m1: [minus_one] == -(1). Parameter spec_add: forall x y, [add x y] == [x] + [y]. Parameter spec_sub: forall x y, [sub x y] == [x] - [y]. Parameter spec_opp: forall x, [opp x] == - [x]. Parameter spec_mul: forall x y, [mul x y] == [x] * [y]. Parameter spec_square: forall x, [square x] == [x] ^ 2. Parameter spec_inv : forall x, [inv x] == / [x]. Parameter spec_div: forall x y, [div x y] == [x] / [y]. Parameter spec_power: forall x z, [power x z] == [x] ^ z. End QType. (** NB: several of the above functions come with [..._norm] variants that expect reduced arguments and return reduced results. *) (** TODO : also speak of specifications via Qcanon ... *) Module Type QType_Notation (Import Q : QType). Notation "[ x ]" := (to_Q x). Infix "==" := eq (at level 70). Notation "x != y" := (~x==y) (at level 70). Infix "<=" := le. Infix "<" := lt. Notation "0" := zero. Notation "1" := one. Infix "+" := add. Infix "-" := sub. Infix "*" := mul. Notation "- x" := (opp x). Infix "/" := div. Notation "/ x" := (inv x). Infix "^" := power. End QType_Notation. Module Type QType' := QType <+ QType_Notation. Module QProperties (Import Q : QType'). (** Conversion to Q *) Hint Rewrite spec_red spec_compare spec_eq_bool spec_min spec_max spec_add spec_sub spec_opp spec_mul spec_square spec_inv spec_div spec_power : qsimpl. Ltac qify := unfold eq, lt, le in *; autorewrite with qsimpl; try rewrite spec_0 in *; try rewrite spec_1 in *; try rewrite spec_m1 in *. (** NB: do not add [spec_0] in the autorewrite database. Otherwise, after instanciation in BigQ, this lemma become convertible to 0=0, and autorewrite loops. Idem for [spec_1] and [spec_m1] *) (** Morphisms *) Ltac solve_wd1 := intros x x' Hx; qify; now rewrite Hx. Ltac solve_wd2 := intros x x' Hx y y' Hy; qify; now rewrite Hx, Hy. Local Obligation Tactic := solve_wd2 || solve_wd1. Instance : Measure to_Q. Instance eq_equiv : Equivalence eq := {}. Program Instance lt_wd : Proper (eq==>eq==>iff) lt. Program Instance le_wd : Proper (eq==>eq==>iff) le. Program Instance red_wd : Proper (eq==>eq) red. Program Instance compare_wd : Proper (eq==>eq==>Logic.eq) compare. Program Instance eq_bool_wd : Proper (eq==>eq==>Logic.eq) eq_bool. Program Instance min_wd : Proper (eq==>eq==>eq) min. Program Instance max_wd : Proper (eq==>eq==>eq) max. Program Instance add_wd : Proper (eq==>eq==>eq) add. Program Instance sub_wd : Proper (eq==>eq==>eq) sub. Program Instance opp_wd : Proper (eq==>eq) opp. Program Instance mul_wd : Proper (eq==>eq==>eq) mul. Program Instance square_wd : Proper (eq==>eq) square. Program Instance inv_wd : Proper (eq==>eq) inv. Program Instance div_wd : Proper (eq==>eq==>eq) div. Program Instance power_wd : Proper (eq==>Logic.eq==>eq) power. (** Let's implement [HasCompare] *) Lemma compare_spec : forall x y, CompareSpec (x==y) (x x x == y. Proof. intros. qify. apply Qeq_bool_iff. Qed. Lemma eqb_correct : forall x y, eq_bool x y = true -> x == y. Proof. now apply eqb_eq. Qed. Lemma eqb_complete : forall x y, x == y -> eq_bool x y = true. Proof. now apply eqb_eq. Qed. (** Let's implement [HasMinMax] *) Lemma max_l : forall x y, y<=x -> max x y == x. Proof. intros x y. qify. apply Qminmax.Q.max_l. Qed. Lemma max_r : forall x y, x<=y -> max x y == y. Proof. intros x y. qify. apply Qminmax.Q.max_r. Qed. Lemma min_l : forall x y, x<=y -> min x y == x. Proof. intros x y. qify. apply Qminmax.Q.min_l. Qed. Lemma min_r : forall x y, y<=x -> min x y == y. Proof. intros x y. qify. apply Qminmax.Q.min_r. Qed. (** Q is a ring *) Lemma add_0_l : forall x, 0+x == x. Proof. intros. qify. apply Qplus_0_l. Qed. Lemma add_comm : forall x y, x+y == y+x. Proof. intros. qify. apply Qplus_comm. Qed. Lemma add_assoc : forall x y z, x+(y+z) == x+y+z. Proof. intros. qify. apply Qplus_assoc. Qed. Lemma mul_1_l : forall x, 1*x == x. Proof. intros. qify. apply Qmult_1_l. Qed. Lemma mul_comm : forall x y, x*y == y*x. Proof. intros. qify. apply Qmult_comm. Qed. Lemma mul_assoc : forall x y z, x*(y*z) == x*y*z. Proof. intros. qify. apply Qmult_assoc. Qed. Lemma mul_add_distr_r : forall x y z, (x+y)*z == x*z + y*z. Proof. intros. qify. apply Qmult_plus_distr_l. Qed. Lemma sub_add_opp : forall x y, x-y == x+(-y). Proof. intros. qify. now unfold Qminus. Qed. Lemma add_opp_diag_r : forall x, x+(-x) == 0. Proof. intros. qify. apply Qplus_opp_r. Qed. (** Q is a field *) Lemma neq_1_0 : 1!=0. Proof. intros. qify. apply Q_apart_0_1. Qed. Lemma div_mul_inv : forall x y, x/y == x*(/y). Proof. intros. qify. now unfold Qdiv. Qed. Lemma mul_inv_diag_l : forall x, x!=0 -> /x * x == 1. Proof. intros x. qify. rewrite Qmult_comm. apply Qmult_inv_r. Qed. End QProperties. Module QTypeExt (Q : QType) <: QType <: TotalOrder <: HasCompare Q <: HasMinMax Q <: HasEqBool Q := Q <+ QProperties. coq-8.4pl2/theories/Numbers/Integer/0000750000175000001440000000000012127276550016467 5ustar notinuserscoq-8.4pl2/theories/Numbers/Integer/NatPairs/0000750000175000001440000000000012127276550020210 5ustar notinuserscoq-8.4pl2/theories/Numbers/Integer/NatPairs/ZNatPairs.v0000640000175000001440000002404712010532755022253 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* a mod b == a - b*(a/b). Proof. intros. rewrite <- add_move_l. symmetry. now apply div_mod. Qed. (** We have a general bound for absolute values *) Lemma mod_bound_abs : forall a b, b~=0 -> abs (a mod b) < abs b. Proof. intros. destruct (abs_spec b) as [(LE,EQ)|(LE,EQ)]; rewrite EQ. destruct (mod_pos_bound a b). order. now rewrite abs_eq. destruct (mod_neg_bound a b). order. rewrite abs_neq; trivial. now rewrite <- opp_lt_mono. Qed. (** Uniqueness theorems *) Theorem div_mod_unique : forall b q1 q2 r1 r2 : t, (0<=r1 (0<=r2 b*q1+r1 == b*q2+r2 -> q1 == q2 /\ r1 == r2. Proof. intros b q1 q2 r1 r2 Hr1 Hr2 EQ. destruct Hr1; destruct Hr2; try (intuition; order). apply div_mod_unique with b; trivial. rewrite <- (opp_inj_wd r1 r2). apply div_mod_unique with (-b); trivial. rewrite <- opp_lt_mono, opp_nonneg_nonpos; tauto. rewrite <- opp_lt_mono, opp_nonneg_nonpos; tauto. now rewrite 2 mul_opp_l, <- 2 opp_add_distr, opp_inj_wd. Qed. Theorem div_unique: forall a b q r, (0<=r a == b*q + r -> q == a/b. Proof. intros a b q r Hr EQ. assert (Hb : b~=0) by (destruct Hr; intuition; order). destruct (div_mod_unique b q (a/b) r (a mod b)); trivial. destruct Hr; [left; apply mod_pos_bound|right; apply mod_neg_bound]; intuition order. now rewrite <- div_mod. Qed. Theorem div_unique_pos: forall a b q r, 0<=r a == b*q + r -> q == a/b. Proof. intros; apply div_unique with r; auto. Qed. Theorem div_unique_neg: forall a b q r, b a == b*q + r -> q == a/b. Proof. intros; apply div_unique with r; auto. Qed. Theorem mod_unique: forall a b q r, (0<=r a == b*q + r -> r == a mod b. Proof. intros a b q r Hr EQ. assert (Hb : b~=0) by (destruct Hr; intuition; order). destruct (div_mod_unique b q (a/b) r (a mod b)); trivial. destruct Hr; [left; apply mod_pos_bound|right; apply mod_neg_bound]; intuition order. now rewrite <- div_mod. Qed. Theorem mod_unique_pos: forall a b q r, 0<=r a == b*q + r -> r == a mod b. Proof. intros; apply mod_unique with q; auto. Qed. Theorem mod_unique_neg: forall a b q r, b a == b*q + r -> r == a mod b. Proof. intros; apply mod_unique with q; auto. Qed. (** Sign rules *) Ltac pos_or_neg a := let LT := fresh "LT" in let LE := fresh "LE" in destruct (le_gt_cases 0 a) as [LE|LT]; [|rewrite <- opp_pos_neg in LT]. Fact mod_bound_or : forall a b, b~=0 -> 0<=a mod b 0 <= -(a mod b) < -b \/ -b < -(a mod b) <= 0. Proof. intros. destruct (lt_ge_cases 0 b); [right|left]. rewrite <- opp_lt_mono, opp_nonpos_nonneg. destruct (mod_pos_bound a b); intuition; order. rewrite <- opp_lt_mono, opp_nonneg_nonpos. destruct (mod_neg_bound a b); intuition; order. Qed. Lemma div_opp_opp : forall a b, b~=0 -> -a/-b == a/b. Proof. intros. symmetry. apply div_unique with (- (a mod b)). now apply opp_mod_bound_or. rewrite mul_opp_l, <- opp_add_distr, <- div_mod; order. Qed. Lemma mod_opp_opp : forall a b, b~=0 -> (-a) mod (-b) == - (a mod b). Proof. intros. symmetry. apply mod_unique with (a/b). now apply opp_mod_bound_or. rewrite mul_opp_l, <- opp_add_distr, <- div_mod; order. Qed. (** With the current conventions, the other sign rules are rather complex. *) Lemma div_opp_l_z : forall a b, b~=0 -> a mod b == 0 -> (-a)/b == -(a/b). Proof. intros a b Hb H. symmetry. apply div_unique with 0. destruct (lt_ge_cases 0 b); [left|right]; intuition; order. rewrite <- opp_0, <- H. rewrite mul_opp_r, <- opp_add_distr, <- div_mod; order. Qed. Lemma div_opp_l_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a)/b == -(a/b)-1. Proof. intros a b Hb H. symmetry. apply div_unique with (b - a mod b). destruct (lt_ge_cases 0 b); [left|right]. rewrite le_0_sub. rewrite <- (sub_0_r b) at 5. rewrite <- sub_lt_mono_l. destruct (mod_pos_bound a b); intuition; order. rewrite le_sub_0. rewrite <- (sub_0_r b) at 1. rewrite <- sub_lt_mono_l. destruct (mod_neg_bound a b); intuition; order. rewrite <- (add_opp_r b), mul_sub_distr_l, mul_1_r, sub_add_simpl_r_l. rewrite mul_opp_r, <-opp_add_distr, <-div_mod; order. Qed. Lemma mod_opp_l_z : forall a b, b~=0 -> a mod b == 0 -> (-a) mod b == 0. Proof. intros a b Hb H. symmetry. apply mod_unique with (-(a/b)). destruct (lt_ge_cases 0 b); [left|right]; intuition; order. rewrite <- opp_0, <- H. rewrite mul_opp_r, <- opp_add_distr, <- div_mod; order. Qed. Lemma mod_opp_l_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a) mod b == b - a mod b. Proof. intros a b Hb H. symmetry. apply mod_unique with (-(a/b)-1). destruct (lt_ge_cases 0 b); [left|right]. rewrite le_0_sub. rewrite <- (sub_0_r b) at 5. rewrite <- sub_lt_mono_l. destruct (mod_pos_bound a b); intuition; order. rewrite le_sub_0. rewrite <- (sub_0_r b) at 1. rewrite <- sub_lt_mono_l. destruct (mod_neg_bound a b); intuition; order. rewrite <- (add_opp_r b), mul_sub_distr_l, mul_1_r, sub_add_simpl_r_l. rewrite mul_opp_r, <-opp_add_distr, <-div_mod; order. Qed. Lemma div_opp_r_z : forall a b, b~=0 -> a mod b == 0 -> a/(-b) == -(a/b). Proof. intros. rewrite <- (opp_involutive a) at 1. rewrite div_opp_opp; auto using div_opp_l_z. Qed. Lemma div_opp_r_nz : forall a b, b~=0 -> a mod b ~= 0 -> a/(-b) == -(a/b)-1. Proof. intros. rewrite <- (opp_involutive a) at 1. rewrite div_opp_opp; auto using div_opp_l_nz. Qed. Lemma mod_opp_r_z : forall a b, b~=0 -> a mod b == 0 -> a mod (-b) == 0. Proof. intros. rewrite <- (opp_involutive a) at 1. now rewrite mod_opp_opp, mod_opp_l_z, opp_0. Qed. Lemma mod_opp_r_nz : forall a b, b~=0 -> a mod b ~= 0 -> a mod (-b) == (a mod b) - b. Proof. intros. rewrite <- (opp_involutive a) at 1. rewrite mod_opp_opp, mod_opp_l_nz by trivial. now rewrite opp_sub_distr, add_comm, add_opp_r. Qed. (** The sign of [a mod b] is the one of [b] (when it isn't null) *) Lemma mod_sign_nz : forall a b, b~=0 -> a mod b ~= 0 -> sgn (a mod b) == sgn b. Proof. intros a b Hb H. destruct (lt_ge_cases 0 b) as [Hb'|Hb']. destruct (mod_pos_bound a b Hb'). rewrite 2 sgn_pos; order. destruct (mod_neg_bound a b). order. rewrite 2 sgn_neg; order. Qed. Lemma mod_sign : forall a b, b~=0 -> sgn (a mod b) ~= -sgn b. Proof. intros a b Hb H. destruct (eq_decidable (a mod b) 0) as [EQ|NEQ]. apply Hb, sgn_null_iff, opp_inj. now rewrite <- H, opp_0, EQ, sgn_0. apply Hb, sgn_null_iff. apply eq_mul_0_l with 2; try order'. nzsimpl'. apply add_move_0_l. rewrite <- H. symmetry. now apply mod_sign_nz. Qed. Lemma mod_sign_mul : forall a b, b~=0 -> 0 <= (a mod b) * b. Proof. intros. destruct (lt_ge_cases 0 b). apply mul_nonneg_nonneg; destruct (mod_pos_bound a b); order. apply mul_nonpos_nonpos; destruct (mod_neg_bound a b); order. Qed. (** A division by itself returns 1 *) Lemma div_same : forall a, a~=0 -> a/a == 1. Proof. intros. pos_or_neg a. apply div_same; order. rewrite <- div_opp_opp by trivial. now apply div_same. Qed. Lemma mod_same : forall a, a~=0 -> a mod a == 0. Proof. intros. rewrite mod_eq, div_same by trivial. nzsimpl. apply sub_diag. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem div_small: forall a b, 0<=a a/b == 0. Proof. exact div_small. Qed. (** Same situation, in term of modulo: *) Theorem mod_small: forall a b, 0<=a a mod b == a. Proof. exact mod_small. Qed. (** * Basic values of divisions and modulo. *) Lemma div_0_l: forall a, a~=0 -> 0/a == 0. Proof. intros. pos_or_neg a. apply div_0_l; order. rewrite <- div_opp_opp, opp_0 by trivial. now apply div_0_l. Qed. Lemma mod_0_l: forall a, a~=0 -> 0 mod a == 0. Proof. intros; rewrite mod_eq, div_0_l; now nzsimpl. Qed. Lemma div_1_r: forall a, a/1 == a. Proof. intros. symmetry. apply div_unique with 0. left. split; order || apply lt_0_1. now nzsimpl. Qed. Lemma mod_1_r: forall a, a mod 1 == 0. Proof. intros. rewrite mod_eq, div_1_r; nzsimpl; auto using sub_diag. intro EQ; symmetry in EQ; revert EQ; apply lt_neq; apply lt_0_1. Qed. Lemma div_1_l: forall a, 1 1/a == 0. Proof. exact div_1_l. Qed. Lemma mod_1_l: forall a, 1 1 mod a == 1. Proof. exact mod_1_l. Qed. Lemma div_mul : forall a b, b~=0 -> (a*b)/b == a. Proof. intros. symmetry. apply div_unique with 0. destruct (lt_ge_cases 0 b); [left|right]; split; order. nzsimpl; apply mul_comm. Qed. Lemma mod_mul : forall a b, b~=0 -> (a*b) mod b == 0. Proof. intros. rewrite mod_eq, div_mul by trivial. rewrite mul_comm; apply sub_diag. Qed. Theorem div_unique_exact a b q: b~=0 -> a == b*q -> q == a/b. Proof. intros Hb H. rewrite H, mul_comm. symmetry. now apply div_mul. Qed. (** * Order results about mod and div *) (** A modulo cannot grow beyond its starting point. *) Theorem mod_le: forall a b, 0<=a -> 0 a mod b <= a. Proof. exact mod_le. Qed. Theorem div_pos : forall a b, 0<=a -> 0 0<= a/b. Proof. exact div_pos. Qed. Lemma div_str_pos : forall a b, 0 0 < a/b. Proof. exact div_str_pos. Qed. Lemma div_small_iff : forall a b, b~=0 -> (a/b==0 <-> 0<=a (a mod b == a <-> 0<=a 1 a/b < a. Proof. exact div_lt. Qed. (** [le] is compatible with a positive division. *) Lemma div_le_mono : forall a b c, 0 a<=b -> a/c <= b/c. Proof. intros a b c Hc Hab. rewrite lt_eq_cases in Hab. destruct Hab as [LT|EQ]; [|rewrite EQ; order]. rewrite <- lt_succ_r. rewrite (mul_lt_mono_pos_l c) by order. nzsimpl. rewrite (add_lt_mono_r _ _ (a mod c)). rewrite <- div_mod by order. apply lt_le_trans with b; trivial. rewrite (div_mod b c) at 1 by order. rewrite <- add_assoc, <- add_le_mono_l. apply le_trans with (c+0). nzsimpl; destruct (mod_pos_bound b c); order. rewrite <- add_le_mono_l. destruct (mod_pos_bound a c); order. Qed. (** In this convention, [div] performs Rounding-Toward-Bottom. Since we cannot speak of rational values here, we express this fact by multiplying back by [b], and this leads to separates statements according to the sign of [b]. First, [a/b] is below the exact fraction ... *) Lemma mul_div_le : forall a b, 0 b*(a/b) <= a. Proof. intros. rewrite (div_mod a b) at 2; try order. rewrite <- (add_0_r (b*(a/b))) at 1. rewrite <- add_le_mono_l. now destruct (mod_pos_bound a b). Qed. Lemma mul_div_ge : forall a b, b<0 -> a <= b*(a/b). Proof. intros. rewrite <- div_opp_opp, opp_le_mono, <-mul_opp_l by order. apply mul_div_le. now rewrite opp_pos_neg. Qed. (** ... and moreover it is the larger such integer, since [S(a/b)] is strictly above the exact fraction. *) Lemma mul_succ_div_gt: forall a b, 0 a < b*(S (a/b)). Proof. intros. nzsimpl. rewrite (div_mod a b) at 1; try order. rewrite <- add_lt_mono_l. destruct (mod_pos_bound a b); order. Qed. Lemma mul_succ_div_lt: forall a b, b<0 -> b*(S (a/b)) < a. Proof. intros. rewrite <- div_opp_opp, opp_lt_mono, <-mul_opp_l by order. apply mul_succ_div_gt. now rewrite opp_pos_neg. Qed. (** NB: The four previous properties could be used as specifications for [div]. *) (** Inequality [mul_div_le] is exact iff the modulo is zero. *) Lemma div_exact : forall a b, b~=0 -> (a == b*(a/b) <-> a mod b == 0). Proof. intros. rewrite (div_mod a b) at 1; try order. rewrite <- (add_0_r (b*(a/b))) at 2. apply add_cancel_l. Qed. (** Some additionnal inequalities about div. *) Theorem div_lt_upper_bound: forall a b q, 0 a < b*q -> a/b < q. Proof. intros. rewrite (mul_lt_mono_pos_l b) by trivial. apply le_lt_trans with a; trivial. now apply mul_div_le. Qed. Theorem div_le_upper_bound: forall a b q, 0 a <= b*q -> a/b <= q. Proof. intros. rewrite <- (div_mul q b) by order. apply div_le_mono; trivial. now rewrite mul_comm. Qed. Theorem div_le_lower_bound: forall a b q, 0 b*q <= a -> q <= a/b. Proof. intros. rewrite <- (div_mul q b) by order. apply div_le_mono; trivial. now rewrite mul_comm. Qed. (** A division respects opposite monotonicity for the divisor *) Lemma div_le_compat_l: forall p q r, 0<=p -> 0 p/r <= p/q. Proof. exact div_le_compat_l. Qed. (** * Relations between usual operations and mod and div *) Lemma mod_add : forall a b c, c~=0 -> (a + b * c) mod c == a mod c. Proof. intros. symmetry. apply mod_unique with (a/c+b); trivial. now apply mod_bound_or. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add : forall a b c, c~=0 -> (a + b * c) / c == a / c + b. Proof. intros. apply (mul_cancel_l _ _ c); try order. apply (add_cancel_r _ _ ((a+b*c) mod c)). rewrite <- div_mod, mod_add by order. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add_l: forall a b c, b~=0 -> (a * b + c) / b == a + c / b. Proof. intros a b c. rewrite (add_comm _ c), (add_comm a). now apply div_add. Qed. (** Cancellations. *) Lemma div_mul_cancel_r : forall a b c, b~=0 -> c~=0 -> (a*c)/(b*c) == a/b. Proof. intros. symmetry. apply div_unique with ((a mod b)*c). (* ineqs *) destruct (lt_ge_cases 0 c). rewrite <-(mul_0_l c), <-2mul_lt_mono_pos_r, <-2mul_le_mono_pos_r by trivial. now apply mod_bound_or. rewrite <-(mul_0_l c), <-2mul_lt_mono_neg_r, <-2mul_le_mono_neg_r by order. destruct (mod_bound_or a b); tauto. (* equation *) rewrite (div_mod a b) at 1 by order. rewrite mul_add_distr_r. rewrite add_cancel_r. rewrite <- 2 mul_assoc. now rewrite (mul_comm c). Qed. Lemma div_mul_cancel_l : forall a b c, b~=0 -> c~=0 -> (c*a)/(c*b) == a/b. Proof. intros. rewrite !(mul_comm c); now apply div_mul_cancel_r. Qed. Lemma mul_mod_distr_l: forall a b c, b~=0 -> c~=0 -> (c*a) mod (c*b) == c * (a mod b). Proof. intros. rewrite <- (add_cancel_l _ _ ((c*b)* ((c*a)/(c*b)))). rewrite <- div_mod. rewrite div_mul_cancel_l by trivial. rewrite <- mul_assoc, <- mul_add_distr_l, mul_cancel_l by order. apply div_mod; order. rewrite <- neq_mul_0; auto. Qed. Lemma mul_mod_distr_r: forall a b c, b~=0 -> c~=0 -> (a*c) mod (b*c) == (a mod b) * c. Proof. intros. rewrite !(mul_comm _ c); now rewrite mul_mod_distr_l. Qed. (** Operations modulo. *) Theorem mod_mod: forall a n, n~=0 -> (a mod n) mod n == a mod n. Proof. intros. rewrite mod_small_iff by trivial. now apply mod_bound_or. Qed. Lemma mul_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)*b) mod n == (a*b) mod n. Proof. intros a b n Hn. symmetry. rewrite (div_mod a n) at 1 by order. rewrite add_comm, (mul_comm n), (mul_comm _ b). rewrite mul_add_distr_l, mul_assoc. intros. rewrite mod_add by trivial. now rewrite mul_comm. Qed. Lemma mul_mod_idemp_r : forall a b n, n~=0 -> (a*(b mod n)) mod n == (a*b) mod n. Proof. intros. rewrite !(mul_comm a). now apply mul_mod_idemp_l. Qed. Theorem mul_mod: forall a b n, n~=0 -> (a * b) mod n == ((a mod n) * (b mod n)) mod n. Proof. intros. now rewrite mul_mod_idemp_l, mul_mod_idemp_r. Qed. Lemma add_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)+b) mod n == (a+b) mod n. Proof. intros a b n Hn. symmetry. rewrite (div_mod a n) at 1 by order. rewrite <- add_assoc, add_comm, mul_comm. intros. now rewrite mod_add. Qed. Lemma add_mod_idemp_r : forall a b n, n~=0 -> (a+(b mod n)) mod n == (a+b) mod n. Proof. intros. rewrite !(add_comm a). now apply add_mod_idemp_l. Qed. Theorem add_mod: forall a b n, n~=0 -> (a+b) mod n == (a mod n + b mod n) mod n. Proof. intros. now rewrite add_mod_idemp_l, add_mod_idemp_r. Qed. (** With the current convention, the following result isn't always true with a negative last divisor. For instance [ 3/(-2)/(-2) = 1 <> 0 = 3 / (-2*-2) ], or [ 5/2/(-2) = -1 <> -2 = 5 / (2*-2) ]. *) Lemma div_div : forall a b c, b~=0 -> 0 (a/b)/c == a/(b*c). Proof. intros a b c Hb Hc. apply div_unique with (b*((a/b) mod c) + a mod b). (* begin 0<= ... 0 a mod (b*c) == a mod b + b*((a/b) mod c). Proof. intros a b c Hb Hc. apply add_cancel_l with (b*c*(a/(b*c))). rewrite <- div_mod by (apply neq_mul_0; split; order). rewrite <- div_div by trivial. rewrite add_assoc, add_shuffle0, <- mul_assoc, <- mul_add_distr_l. rewrite <- div_mod by order. apply div_mod; order. Qed. (** A last inequality: *) Theorem div_mul_le: forall a b c, 0<=a -> 0 0<=c -> c*(a/b) <= (c*a)/b. Proof. exact div_mul_le. Qed. End ZDivProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZAdd.v0000640000175000001440000002006712010532755021242 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n == m. Proof. intros n m H. apply opp_wd in H. now rewrite 2 opp_involutive in H. Qed. Theorem opp_inj_wd : forall n m, - n == - m <-> n == m. Proof. intros n m; split; [apply opp_inj | intros; now f_equiv]. Qed. Theorem eq_opp_l : forall n m, - n == m <-> n == - m. Proof. intros n m. now rewrite <- (opp_inj_wd (- n) m), opp_involutive. Qed. Theorem eq_opp_r : forall n m, n == - m <-> - n == m. Proof. symmetry; apply eq_opp_l. Qed. Theorem sub_add_distr : forall n m p, n - (m + p) == (n - m) - p. Proof. intros n m p; rewrite <- add_opp_r, opp_add_distr, add_assoc. now rewrite 2 add_opp_r. Qed. Theorem sub_sub_distr : forall n m p, n - (m - p) == (n - m) + p. Proof. intros n m p; rewrite <- add_opp_r, opp_sub_distr, add_assoc. now rewrite add_opp_r. Qed. Theorem sub_opp_l : forall n m, - n - m == - m - n. Proof. intros n m. rewrite <- 2 add_opp_r. now rewrite add_comm. Qed. Theorem sub_opp_r : forall n m, n - (- m) == n + m. Proof. intros n m; rewrite <- add_opp_r; now rewrite opp_involutive. Qed. Theorem add_sub_swap : forall n m p, n + m - p == n - p + m. Proof. intros n m p. rewrite <- add_sub_assoc, <- (add_opp_r n p), <- add_assoc. now rewrite add_opp_l. Qed. Theorem sub_cancel_l : forall n m p, n - m == n - p <-> m == p. Proof. intros n m p. rewrite <- (add_cancel_l (n - m) (n - p) (- n)). rewrite 2 add_sub_assoc. rewrite add_opp_diag_l; rewrite 2 sub_0_l. apply opp_inj_wd. Qed. Theorem sub_cancel_r : forall n m p, n - p == m - p <-> n == m. Proof. intros n m p. stepl (n - p + p == m - p + p) by apply add_cancel_r. now do 2 rewrite <- sub_sub_distr, sub_diag, sub_0_r. Qed. (** The next several theorems are devoted to moving terms from one side of an equation to the other. The name contains the operation in the original equation ([add] or [sub]) and the indication whether the left or right term is moved. *) Theorem add_move_l : forall n m p, n + m == p <-> m == p - n. Proof. intros n m p. stepl (n + m - n == p - n) by apply sub_cancel_r. now rewrite add_comm, <- add_sub_assoc, sub_diag, add_0_r. Qed. Theorem add_move_r : forall n m p, n + m == p <-> n == p - m. Proof. intros n m p; rewrite add_comm; now apply add_move_l. Qed. (** The two theorems above do not allow rewriting subformulas of the form [n - m == p] to [n == p + m] since subtraction is in the right-hand side of the equation. Hence the following two theorems. *) Theorem sub_move_l : forall n m p, n - m == p <-> - m == p - n. Proof. intros n m p; rewrite <- (add_opp_r n m); apply add_move_l. Qed. Theorem sub_move_r : forall n m p, n - m == p <-> n == p + m. Proof. intros n m p; rewrite <- (add_opp_r n m). now rewrite add_move_r, sub_opp_r. Qed. Theorem add_move_0_l : forall n m, n + m == 0 <-> m == - n. Proof. intros n m; now rewrite add_move_l, sub_0_l. Qed. Theorem add_move_0_r : forall n m, n + m == 0 <-> n == - m. Proof. intros n m; now rewrite add_move_r, sub_0_l. Qed. Theorem sub_move_0_l : forall n m, n - m == 0 <-> - m == - n. Proof. intros n m. now rewrite sub_move_l, sub_0_l. Qed. Theorem sub_move_0_r : forall n m, n - m == 0 <-> n == m. Proof. intros n m. now rewrite sub_move_r, add_0_l. Qed. (** The following section is devoted to cancellation of like terms. The name includes the first operator and the position of the term being canceled. *) Theorem add_simpl_l : forall n m, n + m - n == m. Proof. intros; now rewrite add_sub_swap, sub_diag, add_0_l. Qed. Theorem add_simpl_r : forall n m, n + m - m == n. Proof. intros; now rewrite <- add_sub_assoc, sub_diag, add_0_r. Qed. Theorem sub_simpl_l : forall n m, - n - m + n == - m. Proof. intros; now rewrite <- add_sub_swap, add_opp_diag_l, sub_0_l. Qed. Theorem sub_simpl_r : forall n m, n - m + m == n. Proof. intros; now rewrite <- sub_sub_distr, sub_diag, sub_0_r. Qed. Theorem sub_add : forall n m, m - n + n == m. Proof. intros. now rewrite <- add_sub_swap, add_simpl_r. Qed. (** Now we have two sums or differences; the name includes the two operators and the position of the terms being canceled *) Theorem add_add_simpl_l_l : forall n m p, (n + m) - (n + p) == m - p. Proof. intros n m p. now rewrite (add_comm n m), <- add_sub_assoc, sub_add_distr, sub_diag, sub_0_l, add_opp_r. Qed. Theorem add_add_simpl_l_r : forall n m p, (n + m) - (p + n) == m - p. Proof. intros n m p. rewrite (add_comm p n); apply add_add_simpl_l_l. Qed. Theorem add_add_simpl_r_l : forall n m p, (n + m) - (m + p) == n - p. Proof. intros n m p. rewrite (add_comm n m); apply add_add_simpl_l_l. Qed. Theorem add_add_simpl_r_r : forall n m p, (n + m) - (p + m) == n - p. Proof. intros n m p. rewrite (add_comm p m); apply add_add_simpl_r_l. Qed. Theorem sub_add_simpl_r_l : forall n m p, (n - m) + (m + p) == n + p. Proof. intros n m p. now rewrite <- sub_sub_distr, sub_add_distr, sub_diag, sub_0_l, sub_opp_r. Qed. Theorem sub_add_simpl_r_r : forall n m p, (n - m) + (p + m) == n + p. Proof. intros n m p. rewrite (add_comm p m); apply sub_add_simpl_r_l. Qed. (** Of course, there are many other variants *) End ZAddProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZMaxMin.v0000640000175000001440000001365112010532755021744 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* max (p * n) (p * m) == p * max n m. Proof. intros. destruct (le_ge_cases n m); [rewrite 2 max_r | rewrite 2 max_l]; try order; now apply mul_le_mono_nonneg_l. Qed. Lemma mul_max_distr_nonneg_r : forall n m p, 0 <= p -> max (n * p) (m * p) == max n m * p. Proof. intros. destruct (le_ge_cases n m); [rewrite 2 max_r | rewrite 2 max_l]; try order; now apply mul_le_mono_nonneg_r. Qed. Lemma mul_min_distr_nonneg_l : forall n m p, 0 <= p -> min (p * n) (p * m) == p * min n m. Proof. intros. destruct (le_ge_cases n m); [rewrite 2 min_l | rewrite 2 min_r]; try order; now apply mul_le_mono_nonneg_l. Qed. Lemma mul_min_distr_nonneg_r : forall n m p, 0 <= p -> min (n * p) (m * p) == min n m * p. Proof. intros. destruct (le_ge_cases n m); [rewrite 2 min_l | rewrite 2 min_r]; try order; now apply mul_le_mono_nonneg_r. Qed. Lemma mul_max_distr_nonpos_l : forall n m p, p <= 0 -> max (p * n) (p * m) == p * min n m. Proof. intros. destruct (le_ge_cases n m). rewrite min_l by trivial. rewrite max_l. reflexivity. now apply mul_le_mono_nonpos_l. rewrite min_r by trivial. rewrite max_r. reflexivity. now apply mul_le_mono_nonpos_l. Qed. Lemma mul_max_distr_nonpos_r : forall n m p, p <= 0 -> max (n * p) (m * p) == min n m * p. Proof. intros. destruct (le_ge_cases n m). rewrite min_l by trivial. rewrite max_l. reflexivity. now apply mul_le_mono_nonpos_r. rewrite min_r by trivial. rewrite max_r. reflexivity. now apply mul_le_mono_nonpos_r. Qed. Lemma mul_min_distr_nonpos_l : forall n m p, p <= 0 -> min (p * n) (p * m) == p * max n m. Proof. intros. destruct (le_ge_cases n m). rewrite max_r by trivial. rewrite min_r. reflexivity. now apply mul_le_mono_nonpos_l. rewrite max_l by trivial. rewrite min_l. reflexivity. now apply mul_le_mono_nonpos_l. Qed. Lemma mul_min_distr_nonpos_r : forall n m p, p <= 0 -> min (n * p) (m * p) == max n m * p. Proof. intros. destruct (le_ge_cases n m). rewrite max_r by trivial. rewrite min_r. reflexivity. now apply mul_le_mono_nonpos_r. rewrite max_l by trivial. rewrite min_l. reflexivity. now apply mul_le_mono_nonpos_r. Qed. End ZMaxMinProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZMulOrder.v0000640000175000001440000001535612010532755022310 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n < m -> q <= 0 -> p < q -> m * q < n * p. Proof. intros n m p q H1 H2 H3 H4. apply le_lt_trans with (m * p). apply mul_le_mono_nonpos_l; [assumption | now apply lt_le_incl]. apply -> mul_lt_mono_neg_r; [assumption | now apply lt_le_trans with q]. Qed. Theorem mul_le_mono_nonpos : forall n m p q, m <= 0 -> n <= m -> q <= 0 -> p <= q -> m * q <= n * p. Proof. intros n m p q H1 H2 H3 H4. apply le_trans with (m * p). now apply mul_le_mono_nonpos_l. apply mul_le_mono_nonpos_r; [now apply le_trans with q | assumption]. Qed. Theorem mul_nonpos_nonpos : forall n m, n <= 0 -> m <= 0 -> 0 <= n * m. Proof. intros n m H1 H2. rewrite <- (mul_0_l m). now apply mul_le_mono_nonpos_r. Qed. Theorem mul_nonneg_nonpos : forall n m, 0 <= n -> m <= 0 -> n * m <= 0. Proof. intros n m H1 H2. rewrite <- (mul_0_l m). now apply mul_le_mono_nonpos_r. Qed. Theorem mul_nonpos_nonneg : forall n m, n <= 0 -> 0 <= m -> n * m <= 0. Proof. intros; rewrite mul_comm; now apply mul_nonneg_nonpos. Qed. Notation mul_pos := lt_0_mul (only parsing). Theorem lt_mul_0 : forall n m, n * m < 0 <-> n < 0 /\ m > 0 \/ n > 0 /\ m < 0. Proof. intros n m; split; [intro H | intros [[H1 H2] | [H1 H2]]]. destruct (lt_trichotomy n 0) as [H1 | [H1 | H1]]; [| rewrite H1 in H; rewrite mul_0_l in H; false_hyp H lt_irrefl |]; (destruct (lt_trichotomy m 0) as [H2 | [H2 | H2]]; [| rewrite H2 in H; rewrite mul_0_r in H; false_hyp H lt_irrefl |]); try (left; now split); try (right; now split). assert (H3 : n * m > 0) by now apply mul_neg_neg. exfalso; now apply (lt_asymm (n * m) 0). assert (H3 : n * m > 0) by now apply mul_pos_pos. exfalso; now apply (lt_asymm (n * m) 0). now apply mul_neg_pos. now apply mul_pos_neg. Qed. Notation mul_neg := lt_mul_0 (only parsing). Theorem le_0_mul : forall n m, 0 <= n * m -> 0 <= n /\ 0 <= m \/ n <= 0 /\ m <= 0. Proof. assert (R : forall n, 0 == n <-> n == 0) by (intros; split; apply eq_sym). intros n m. repeat rewrite lt_eq_cases. repeat rewrite R. rewrite lt_0_mul, eq_mul_0. pose proof (lt_trichotomy n 0); pose proof (lt_trichotomy m 0). tauto. Qed. Notation mul_nonneg := le_0_mul (only parsing). Theorem le_mul_0 : forall n m, n * m <= 0 -> 0 <= n /\ m <= 0 \/ n <= 0 /\ 0 <= m. Proof. assert (R : forall n, 0 == n <-> n == 0) by (intros; split; apply eq_sym). intros n m. repeat rewrite lt_eq_cases. repeat rewrite R. rewrite lt_mul_0, eq_mul_0. pose proof (lt_trichotomy n 0); pose proof (lt_trichotomy m 0). tauto. Qed. Notation mul_nonpos := le_mul_0 (only parsing). Notation le_0_square := square_nonneg (only parsing). Theorem nlt_square_0 : forall n, ~ n * n < 0. Proof. intros n H. apply lt_nge in H. apply H. apply square_nonneg. Qed. Theorem square_lt_mono_nonpos : forall n m, n <= 0 -> m < n -> n * n < m * m. Proof. intros n m H1 H2. now apply mul_lt_mono_nonpos. Qed. Theorem square_le_mono_nonpos : forall n m, n <= 0 -> m <= n -> n * n <= m * m. Proof. intros n m H1 H2. now apply mul_le_mono_nonpos. Qed. Theorem square_lt_simpl_nonpos : forall n m, m <= 0 -> n * n < m * m -> m < n. Proof. intros n m H1 H2. destruct (le_gt_cases n 0); [|order]. destruct (lt_ge_cases m n) as [LE|GT]; trivial. apply square_le_mono_nonpos in GT; order. Qed. Theorem square_le_simpl_nonpos : forall n m, m <= 0 -> n * n <= m * m -> m <= n. Proof. intros n m H1 H2. destruct (le_gt_cases n 0); [|order]. destruct (le_gt_cases m n) as [LE|GT]; trivial. apply square_lt_mono_nonpos in GT; order. Qed. Theorem lt_1_mul_neg : forall n m, n < -1 -> m < 0 -> 1 < n * m. Proof. intros n m H1 H2. apply (mul_lt_mono_neg_r m) in H1. apply opp_pos_neg in H2. rewrite mul_opp_l, mul_1_l in H1. now apply lt_1_l with (- m). assumption. Qed. Theorem lt_mul_m1_neg : forall n m, 1 < n -> m < 0 -> n * m < -1. Proof. intros n m H1 H2. apply (mul_lt_mono_neg_r m) in H1. rewrite mul_1_l in H1. now apply lt_m1_r with m. assumption. Qed. Theorem lt_mul_m1_pos : forall n m, n < -1 -> 0 < m -> n * m < -1. Proof. intros n m H1 H2. apply (mul_lt_mono_pos_r m) in H1. rewrite mul_opp_l, mul_1_l in H1. apply opp_neg_pos in H2. now apply lt_m1_r with (- m). assumption. Qed. Theorem lt_1_mul_l : forall n m, 1 < n -> n * m < -1 \/ n * m == 0 \/ 1 < n * m. Proof. intros n m H; destruct (lt_trichotomy m 0) as [H1 | [H1 | H1]]. left. now apply lt_mul_m1_neg. right; left; now rewrite H1, mul_0_r. right; right; now apply lt_1_mul_pos. Qed. Theorem lt_m1_mul_r : forall n m, n < -1 -> n * m < -1 \/ n * m == 0 \/ 1 < n * m. Proof. intros n m H; destruct (lt_trichotomy m 0) as [H1 | [H1 | H1]]. right; right. now apply lt_1_mul_neg. right; left; now rewrite H1, mul_0_r. left. now apply lt_mul_m1_pos. Qed. Theorem eq_mul_1 : forall n m, n * m == 1 -> n == 1 \/ n == -1. Proof. assert (F := lt_m1_0). zero_pos_neg n. (* n = 0 *) intros m. nzsimpl. now left. (* 0 < n, proving P n /\ P (-n) *) intros n Hn. rewrite <- le_succ_l, <- one_succ in Hn. le_elim Hn; split; intros m H. destruct (lt_1_mul_l n m) as [|[|]]; order'. rewrite mul_opp_l, eq_opp_l in H. destruct (lt_1_mul_l n m) as [|[|]]; order'. now left. intros; right. now f_equiv. Qed. Theorem lt_mul_diag_l : forall n m, n < 0 -> (1 < m <-> n * m < n). Proof. intros n m H. stepr (n * m < n * 1) by now rewrite mul_1_r. now apply mul_lt_mono_neg_l. Qed. Theorem lt_mul_diag_r : forall n m, 0 < n -> (1 < m <-> n < n * m). Proof. intros n m H. stepr (n * 1 < n * m) by now rewrite mul_1_r. now apply mul_lt_mono_pos_l. Qed. Theorem le_mul_diag_l : forall n m, n < 0 -> (1 <= m <-> n * m <= n). Proof. intros n m H. stepr (n * m <= n * 1) by now rewrite mul_1_r. now apply mul_le_mono_neg_l. Qed. Theorem le_mul_diag_r : forall n m, 0 < n -> (1 <= m <-> n <= n * m). Proof. intros n m H. stepr (n * 1 <= n * m) by now rewrite mul_1_r. now apply mul_le_mono_pos_l. Qed. Theorem lt_mul_r : forall n m p, 0 < n -> 1 < p -> n < m -> n < m * p. Proof. intros. stepl (n * 1) by now rewrite mul_1_r. apply mul_lt_mono_nonneg. now apply lt_le_incl. assumption. apply le_0_1. assumption. Qed. (** Alternative name : *) Definition mul_eq_1 := eq_mul_1. End ZMulOrderProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZDivEucl.v0000640000175000001440000004156412010532755022112 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 -> exists b q, a = b*q+r /\ 0 < r < |b| ] The outcome of the modulo function is hence always positive. This corresponds to convention "E" in the following paper: R. Boute, "The Euclidean definition of the functions div and mod", ACM Transactions on Programming Languages and Systems, Vol. 14, No.2, pp. 127-144, April 1992. See files [ZDivTrunc] and [ZDivFloor] for others conventions. We simply extend NZDiv with a bound for modulo that holds regardless of the sign of a and b. This new specification subsume mod_bound_pos, which nonetheless stays there for subtyping. Note also that ZAxiomSig now already contain a div and a modulo (that follow the Floor convention). We just ignore them here. *) Module Type EuclidSpec (Import A : ZAxiomsSig')(Import B : DivMod' A). Axiom mod_always_pos : forall a b, b ~= 0 -> 0 <= a mod b < abs b. End EuclidSpec. Module Type ZEuclid (Z:ZAxiomsSig) := NZDiv.NZDiv Z <+ EuclidSpec Z. Module Type ZEuclid' (Z:ZAxiomsSig) := NZDiv.NZDiv' Z <+ EuclidSpec Z. Module ZEuclidProp (Import A : ZAxiomsSig') (Import B : ZMulOrderProp A) (Import C : ZSgnAbsProp A B) (Import D : ZEuclid' A). Module Import Private_NZDiv := Nop <+ NZDivProp A D B. (** Another formulation of the main equation *) Lemma mod_eq : forall a b, b~=0 -> a mod b == a - b*(a/b). Proof. intros. rewrite <- add_move_l. symmetry. now apply div_mod. Qed. Ltac pos_or_neg a := let LT := fresh "LT" in let LE := fresh "LE" in destruct (le_gt_cases 0 a) as [LE|LT]; [|rewrite <- opp_pos_neg in LT]. (** Uniqueness theorems *) Theorem div_mod_unique : forall b q1 q2 r1 r2 : t, 0<=r1 0<=r2 b*q1+r1 == b*q2+r2 -> q1 == q2 /\ r1 == r2. Proof. intros b q1 q2 r1 r2 Hr1 Hr2 EQ. pos_or_neg b. rewrite abs_eq in * by trivial. apply div_mod_unique with b; trivial. rewrite abs_neq' in * by auto using lt_le_incl. rewrite eq_sym_iff. apply div_mod_unique with (-b); trivial. rewrite 2 mul_opp_l. rewrite add_move_l, sub_opp_r. rewrite <-add_assoc. symmetry. rewrite add_move_l, sub_opp_r. now rewrite (add_comm r2), (add_comm r1). Qed. Theorem div_unique: forall a b q r, 0<=r a == b*q + r -> q == a/b. Proof. intros a b q r Hr EQ. assert (Hb : b~=0). pos_or_neg b. rewrite abs_eq in Hr; intuition; order. rewrite <- opp_0, eq_opp_r. rewrite abs_neq' in Hr; intuition; order. destruct (div_mod_unique b q (a/b) r (a mod b)); trivial. now apply mod_always_pos. now rewrite <- div_mod. Qed. Theorem mod_unique: forall a b q r, 0<=r a == b*q + r -> r == a mod b. Proof. intros a b q r Hr EQ. assert (Hb : b~=0). pos_or_neg b. rewrite abs_eq in Hr; intuition; order. rewrite <- opp_0, eq_opp_r. rewrite abs_neq' in Hr; intuition; order. destruct (div_mod_unique b q (a/b) r (a mod b)); trivial. now apply mod_always_pos. now rewrite <- div_mod. Qed. (** Sign rules *) Lemma div_opp_r : forall a b, b~=0 -> a/(-b) == -(a/b). Proof. intros. symmetry. apply div_unique with (a mod b). rewrite abs_opp; now apply mod_always_pos. rewrite mul_opp_opp; now apply div_mod. Qed. Lemma mod_opp_r : forall a b, b~=0 -> a mod (-b) == a mod b. Proof. intros. symmetry. apply mod_unique with (-(a/b)). rewrite abs_opp; now apply mod_always_pos. rewrite mul_opp_opp; now apply div_mod. Qed. Lemma div_opp_l_z : forall a b, b~=0 -> a mod b == 0 -> (-a)/b == -(a/b). Proof. intros a b Hb Hab. symmetry. apply div_unique with (-(a mod b)). rewrite Hab, opp_0. split; [order|]. pos_or_neg b; [rewrite abs_eq | rewrite abs_neq']; order. now rewrite mul_opp_r, <-opp_add_distr, <-div_mod. Qed. Lemma div_opp_l_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a)/b == -(a/b)-sgn b. Proof. intros a b Hb Hab. symmetry. apply div_unique with (abs b -(a mod b)). rewrite lt_sub_lt_add_l. rewrite <- le_add_le_sub_l. nzsimpl. rewrite <- (add_0_l (abs b)) at 2. rewrite <- add_lt_mono_r. destruct (mod_always_pos a b); intuition order. rewrite <- 2 add_opp_r, mul_add_distr_l, 2 mul_opp_r. rewrite sgn_abs. rewrite add_shuffle2, add_opp_diag_l; nzsimpl. rewrite <-opp_add_distr, <-div_mod; order. Qed. Lemma mod_opp_l_z : forall a b, b~=0 -> a mod b == 0 -> (-a) mod b == 0. Proof. intros a b Hb Hab. symmetry. apply mod_unique with (-(a/b)). split; [order|now rewrite abs_pos]. now rewrite <-opp_0, <-Hab, mul_opp_r, <-opp_add_distr, <-div_mod. Qed. Lemma mod_opp_l_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a) mod b == abs b - (a mod b). Proof. intros a b Hb Hab. symmetry. apply mod_unique with (-(a/b)-sgn b). rewrite lt_sub_lt_add_l. rewrite <- le_add_le_sub_l. nzsimpl. rewrite <- (add_0_l (abs b)) at 2. rewrite <- add_lt_mono_r. destruct (mod_always_pos a b); intuition order. rewrite <- 2 add_opp_r, mul_add_distr_l, 2 mul_opp_r. rewrite sgn_abs. rewrite add_shuffle2, add_opp_diag_l; nzsimpl. rewrite <-opp_add_distr, <-div_mod; order. Qed. Lemma div_opp_opp_z : forall a b, b~=0 -> a mod b == 0 -> (-a)/(-b) == a/b. Proof. intros. now rewrite div_opp_r, div_opp_l_z, opp_involutive. Qed. Lemma div_opp_opp_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a)/(-b) == a/b + sgn(b). Proof. intros. rewrite div_opp_r, div_opp_l_nz by trivial. now rewrite opp_sub_distr, opp_involutive. Qed. Lemma mod_opp_opp_z : forall a b, b~=0 -> a mod b == 0 -> (-a) mod (-b) == 0. Proof. intros. now rewrite mod_opp_r, mod_opp_l_z. Qed. Lemma mod_opp_opp_nz : forall a b, b~=0 -> a mod b ~= 0 -> (-a) mod (-b) == abs b - a mod b. Proof. intros. now rewrite mod_opp_r, mod_opp_l_nz. Qed. (** A division by itself returns 1 *) Lemma div_same : forall a, a~=0 -> a/a == 1. Proof. intros. symmetry. apply div_unique with 0. split; [order|now rewrite abs_pos]. now nzsimpl. Qed. Lemma mod_same : forall a, a~=0 -> a mod a == 0. Proof. intros. rewrite mod_eq, div_same by trivial. nzsimpl. apply sub_diag. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem div_small: forall a b, 0<=a a/b == 0. Proof. exact div_small. Qed. (** Same situation, in term of modulo: *) Theorem mod_small: forall a b, 0<=a a mod b == a. Proof. exact mod_small. Qed. (** * Basic values of divisions and modulo. *) Lemma div_0_l: forall a, a~=0 -> 0/a == 0. Proof. intros. pos_or_neg a. apply div_0_l; order. apply opp_inj. rewrite <- div_opp_r, opp_0 by trivial. now apply div_0_l. Qed. Lemma mod_0_l: forall a, a~=0 -> 0 mod a == 0. Proof. intros; rewrite mod_eq, div_0_l; now nzsimpl. Qed. Lemma div_1_r: forall a, a/1 == a. Proof. intros. symmetry. apply div_unique with 0. assert (H:=lt_0_1); rewrite abs_pos; intuition; order. now nzsimpl. Qed. Lemma mod_1_r: forall a, a mod 1 == 0. Proof. intros. rewrite mod_eq, div_1_r; nzsimpl; auto using sub_diag. apply neq_sym, lt_neq; apply lt_0_1. Qed. Lemma div_1_l: forall a, 1 1/a == 0. Proof. exact div_1_l. Qed. Lemma mod_1_l: forall a, 1 1 mod a == 1. Proof. exact mod_1_l. Qed. Lemma div_mul : forall a b, b~=0 -> (a*b)/b == a. Proof. intros. symmetry. apply div_unique with 0. split; [order|now rewrite abs_pos]. nzsimpl; apply mul_comm. Qed. Lemma mod_mul : forall a b, b~=0 -> (a*b) mod b == 0. Proof. intros. rewrite mod_eq, div_mul by trivial. rewrite mul_comm; apply sub_diag. Qed. Theorem div_unique_exact a b q: b~=0 -> a == b*q -> q == a/b. Proof. intros Hb H. rewrite H, mul_comm. symmetry. now apply div_mul. Qed. (** * Order results about mod and div *) (** A modulo cannot grow beyond its starting point. *) Theorem mod_le: forall a b, 0<=a -> b~=0 -> a mod b <= a. Proof. intros. pos_or_neg b. apply mod_le; order. rewrite <- mod_opp_r by trivial. apply mod_le; order. Qed. Theorem div_pos : forall a b, 0<=a -> 0 0<= a/b. Proof. exact div_pos. Qed. Lemma div_str_pos : forall a b, 0 0 < a/b. Proof. exact div_str_pos. Qed. Lemma div_small_iff : forall a b, b~=0 -> (a/b==0 <-> 0<=a (a mod b == a <-> 0<=a 1 a/b < a. Proof. exact div_lt. Qed. (** [le] is compatible with a positive division. *) Lemma div_le_mono : forall a b c, 0 a<=b -> a/c <= b/c. Proof. intros a b c Hc Hab. rewrite lt_eq_cases in Hab. destruct Hab as [LT|EQ]; [|rewrite EQ; order]. rewrite <- lt_succ_r. rewrite (mul_lt_mono_pos_l c) by order. nzsimpl. rewrite (add_lt_mono_r _ _ (a mod c)). rewrite <- div_mod by order. apply lt_le_trans with b; trivial. rewrite (div_mod b c) at 1 by order. rewrite <- add_assoc, <- add_le_mono_l. apply le_trans with (c+0). nzsimpl; destruct (mod_always_pos b c); try order. rewrite abs_eq in *; order. rewrite <- add_le_mono_l. destruct (mod_always_pos a c); order. Qed. (** In this convention, [div] performs Rounding-Toward-Bottom when divisor is positive, and Rounding-Toward-Top otherwise. Since we cannot speak of rational values here, we express this fact by multiplying back by [b], and this leads to a nice unique statement. *) Lemma mul_div_le : forall a b, b~=0 -> b*(a/b) <= a. Proof. intros. rewrite (div_mod a b) at 2; trivial. rewrite <- (add_0_r (b*(a/b))) at 1. rewrite <- add_le_mono_l. now destruct (mod_always_pos a b). Qed. (** Giving a reversed bound is slightly more complex *) Lemma mul_succ_div_gt: forall a b, 0 a < b*(S (a/b)). Proof. intros. nzsimpl. rewrite (div_mod a b) at 1; try order. rewrite <- add_lt_mono_l. destruct (mod_always_pos a b). order. rewrite abs_eq in *; order. Qed. Lemma mul_pred_div_gt: forall a b, b<0 -> a < b*(P (a/b)). Proof. intros a b Hb. rewrite mul_pred_r, <- add_opp_r. rewrite (div_mod a b) at 1; try order. rewrite <- add_lt_mono_l. destruct (mod_always_pos a b). order. rewrite <- opp_pos_neg in Hb. rewrite abs_neq' in *; order. Qed. (** NB: The three previous properties could be used as specifications for [div]. *) (** Inequality [mul_div_le] is exact iff the modulo is zero. *) Lemma div_exact : forall a b, b~=0 -> (a == b*(a/b) <-> a mod b == 0). Proof. intros. rewrite (div_mod a b) at 1; try order. rewrite <- (add_0_r (b*(a/b))) at 2. apply add_cancel_l. Qed. (** Some additionnal inequalities about div. *) Theorem div_lt_upper_bound: forall a b q, 0 a < b*q -> a/b < q. Proof. intros. rewrite (mul_lt_mono_pos_l b) by trivial. apply le_lt_trans with a; trivial. apply mul_div_le; order. Qed. Theorem div_le_upper_bound: forall a b q, 0 a <= b*q -> a/b <= q. Proof. intros. rewrite <- (div_mul q b) by order. apply div_le_mono; trivial. now rewrite mul_comm. Qed. Theorem div_le_lower_bound: forall a b q, 0 b*q <= a -> q <= a/b. Proof. intros. rewrite <- (div_mul q b) by order. apply div_le_mono; trivial. now rewrite mul_comm. Qed. (** A division respects opposite monotonicity for the divisor *) Lemma div_le_compat_l: forall p q r, 0<=p -> 0 p/r <= p/q. Proof. exact div_le_compat_l. Qed. (** * Relations between usual operations and mod and div *) Lemma mod_add : forall a b c, c~=0 -> (a + b * c) mod c == a mod c. Proof. intros. symmetry. apply mod_unique with (a/c+b); trivial. now apply mod_always_pos. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add : forall a b c, c~=0 -> (a + b * c) / c == a / c + b. Proof. intros. apply (mul_cancel_l _ _ c); try order. apply (add_cancel_r _ _ ((a+b*c) mod c)). rewrite <- div_mod, mod_add by order. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add_l: forall a b c, b~=0 -> (a * b + c) / b == a + c / b. Proof. intros a b c. rewrite (add_comm _ c), (add_comm a). now apply div_add. Qed. (** Cancellations. *) (** With the current convention, the following isn't always true when [c<0]: [-3*-1 / -2*-1 = 3/2 = 1] while [-3/-2 = 2] *) Lemma div_mul_cancel_r : forall a b c, b~=0 -> 0 (a*c)/(b*c) == a/b. Proof. intros. symmetry. apply div_unique with ((a mod b)*c). (* ineqs *) rewrite abs_mul, (abs_eq c) by order. rewrite <-(mul_0_l c), <-mul_lt_mono_pos_r, <-mul_le_mono_pos_r by trivial. now apply mod_always_pos. (* equation *) rewrite (div_mod a b) at 1 by order. rewrite mul_add_distr_r. rewrite add_cancel_r. rewrite <- 2 mul_assoc. now rewrite (mul_comm c). Qed. Lemma div_mul_cancel_l : forall a b c, b~=0 -> 0 (c*a)/(c*b) == a/b. Proof. intros. rewrite !(mul_comm c); now apply div_mul_cancel_r. Qed. Lemma mul_mod_distr_l: forall a b c, b~=0 -> 0 (c*a) mod (c*b) == c * (a mod b). Proof. intros. rewrite <- (add_cancel_l _ _ ((c*b)* ((c*a)/(c*b)))). rewrite <- div_mod. rewrite div_mul_cancel_l by trivial. rewrite <- mul_assoc, <- mul_add_distr_l, mul_cancel_l by order. apply div_mod; order. rewrite <- neq_mul_0; intuition; order. Qed. Lemma mul_mod_distr_r: forall a b c, b~=0 -> 0 (a*c) mod (b*c) == (a mod b) * c. Proof. intros. rewrite !(mul_comm _ c); now rewrite mul_mod_distr_l. Qed. (** Operations modulo. *) Theorem mod_mod: forall a n, n~=0 -> (a mod n) mod n == a mod n. Proof. intros. rewrite mod_small_iff by trivial. now apply mod_always_pos. Qed. Lemma mul_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)*b) mod n == (a*b) mod n. Proof. intros a b n Hn. symmetry. rewrite (div_mod a n) at 1 by order. rewrite add_comm, (mul_comm n), (mul_comm _ b). rewrite mul_add_distr_l, mul_assoc. rewrite mod_add by trivial. now rewrite mul_comm. Qed. Lemma mul_mod_idemp_r : forall a b n, n~=0 -> (a*(b mod n)) mod n == (a*b) mod n. Proof. intros. rewrite !(mul_comm a). now apply mul_mod_idemp_l. Qed. Theorem mul_mod: forall a b n, n~=0 -> (a * b) mod n == ((a mod n) * (b mod n)) mod n. Proof. intros. now rewrite mul_mod_idemp_l, mul_mod_idemp_r. Qed. Lemma add_mod_idemp_l : forall a b n, n~=0 -> ((a mod n)+b) mod n == (a+b) mod n. Proof. intros a b n Hn. symmetry. rewrite (div_mod a n) at 1 by order. rewrite <- add_assoc, add_comm, mul_comm. now rewrite mod_add. Qed. Lemma add_mod_idemp_r : forall a b n, n~=0 -> (a+(b mod n)) mod n == (a+b) mod n. Proof. intros. rewrite !(add_comm a). now apply add_mod_idemp_l. Qed. Theorem add_mod: forall a b n, n~=0 -> (a+b) mod n == (a mod n + b mod n) mod n. Proof. intros. now rewrite add_mod_idemp_l, add_mod_idemp_r. Qed. (** With the current convention, the following result isn't always true with a negative intermediate divisor. For instance [ 3/(-2)/(-2) = 1 <> 0 = 3 / (-2*-2) ] and [ 3/(-2)/2 = -1 <> 0 = 3 / (-2*2) ]. *) Lemma div_div : forall a b c, 0 c~=0 -> (a/b)/c == a/(b*c). Proof. intros a b c Hb Hc. apply div_unique with (b*((a/b) mod c) + a mod b). (* begin 0<= ... c~=0 -> a mod (b*c) == a mod b + b*((a/b) mod c). Proof. intros a b c Hb Hc. apply add_cancel_l with (b*c*(a/(b*c))). rewrite <- div_mod by (apply neq_mul_0; split; order). rewrite <- div_div by trivial. rewrite add_assoc, add_shuffle0, <- mul_assoc, <- mul_add_distr_l. rewrite <- div_mod by order. apply div_mod; order. Qed. (** A last inequality: *) Theorem div_mul_le: forall a b c, 0<=a -> 0 0<=c -> c*(a/b) <= (c*a)/b. Proof. exact div_mul_le. Qed. (** mod is related to divisibility *) Lemma mod_divides : forall a b, b~=0 -> (a mod b == 0 <-> (b|a)). Proof. intros a b Hb. split. intros Hab. exists (a/b). rewrite mul_comm. rewrite (div_mod a b Hb) at 1. rewrite Hab; now nzsimpl. intros (c,Hc). rewrite Hc. now apply mod_mul. Qed. End ZEuclidProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZProperties.v0000640000175000001440000000165712010532755022712 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (n | m). Proof. intros n m. split; intros (p,Hp); exists (-p); rewrite Hp. now rewrite mul_opp_l, mul_opp_r. now rewrite mul_opp_opp. Qed. Lemma divide_opp_r : forall n m, (n | -m) <-> (n | m). Proof. intros n m. split; intros (p,Hp); exists (-p). now rewrite mul_opp_l, <- Hp, opp_involutive. now rewrite Hp, mul_opp_l. Qed. Lemma divide_abs_l : forall n m, (abs n | m) <-> (n | m). Proof. intros n m. destruct (abs_eq_or_opp n) as [H|H]; rewrite H. easy. apply divide_opp_l. Qed. Lemma divide_abs_r : forall n m, (n | abs m) <-> (n | m). Proof. intros n m. destruct (abs_eq_or_opp m) as [H|H]; rewrite H. easy. apply divide_opp_r. Qed. Lemma divide_1_r_abs : forall n, (n | 1) -> abs n == 1. Proof. intros n Hn. apply divide_1_r_nonneg. apply abs_nonneg. now apply divide_abs_l. Qed. Lemma divide_1_r : forall n, (n | 1) -> n==1 \/ n==-1. Proof. intros n (m,H). rewrite mul_comm in H. now apply eq_mul_1 with m. Qed. Lemma divide_antisym_abs : forall n m, (n | m) -> (m | n) -> abs n == abs m. Proof. intros. apply divide_antisym_nonneg; try apply abs_nonneg. now apply divide_abs_l, divide_abs_r. now apply divide_abs_l, divide_abs_r. Qed. Lemma divide_antisym : forall n m, (n | m) -> (m | n) -> n == m \/ n == -m. Proof. intros. now apply abs_eq_cases, divide_antisym_abs. Qed. Lemma divide_sub_r : forall n m p, (n | m) -> (n | p) -> (n | m - p). Proof. intros n m p H H'. rewrite <- add_opp_r. apply divide_add_r; trivial. now apply divide_opp_r. Qed. Lemma divide_add_cancel_r : forall n m p, (n | m) -> (n | m + p) -> (n | p). Proof. intros n m p H H'. rewrite <- (add_simpl_l m p). now apply divide_sub_r. Qed. (** Properties of gcd *) Lemma gcd_opp_l : forall n m, gcd (-n) m == gcd n m. Proof. intros. apply gcd_unique_alt; try apply gcd_nonneg. intros. rewrite divide_opp_r. apply gcd_divide_iff. Qed. Lemma gcd_opp_r : forall n m, gcd n (-m) == gcd n m. Proof. intros. now rewrite gcd_comm, gcd_opp_l, gcd_comm. Qed. Lemma gcd_abs_l : forall n m, gcd (abs n) m == gcd n m. Proof. intros. destruct (abs_eq_or_opp n) as [H|H]; rewrite H. easy. apply gcd_opp_l. Qed. Lemma gcd_abs_r : forall n m, gcd n (abs m) == gcd n m. Proof. intros. now rewrite gcd_comm, gcd_abs_l, gcd_comm. Qed. Lemma gcd_0_l : forall n, gcd 0 n == abs n. Proof. intros. rewrite <- gcd_abs_r. apply gcd_0_l_nonneg, abs_nonneg. Qed. Lemma gcd_0_r : forall n, gcd n 0 == abs n. Proof. intros. now rewrite gcd_comm, gcd_0_l. Qed. Lemma gcd_diag : forall n, gcd n n == abs n. Proof. intros. rewrite <- gcd_abs_l, <- gcd_abs_r. apply gcd_diag_nonneg, abs_nonneg. Qed. Lemma gcd_add_mult_diag_r : forall n m p, gcd n (m+p*n) == gcd n m. Proof. intros. apply gcd_unique_alt; try apply gcd_nonneg. intros. rewrite gcd_divide_iff. split; intros (U,V); split; trivial. apply divide_add_r; trivial. now apply divide_mul_r. apply divide_add_cancel_r with (p*n); trivial. now apply divide_mul_r. now rewrite add_comm. Qed. Lemma gcd_add_diag_r : forall n m, gcd n (m+n) == gcd n m. Proof. intros n m. rewrite <- (mul_1_l n) at 2. apply gcd_add_mult_diag_r. Qed. Lemma gcd_sub_diag_r : forall n m, gcd n (m-n) == gcd n m. Proof. intros n m. rewrite <- (mul_1_l n) at 2. rewrite <- add_opp_r, <- mul_opp_l. apply gcd_add_mult_diag_r. Qed. Definition Bezout n m p := exists a b, a*n + b*m == p. Instance Bezout_wd : Proper (eq==>eq==>eq==>iff) Bezout. Proof. unfold Bezout. intros x x' Hx y y' Hy z z' Hz. setoid_rewrite Hx. setoid_rewrite Hy. now setoid_rewrite Hz. Qed. Lemma bezout_1_gcd : forall n m, Bezout n m 1 -> gcd n m == 1. Proof. intros n m (q & r & H). apply gcd_unique; trivial using divide_1_l, le_0_1. intros p Hn Hm. rewrite <- H. apply divide_add_r; now apply divide_mul_r. Qed. Lemma gcd_bezout : forall n m p, gcd n m == p -> Bezout n m p. Proof. (* First, a version restricted to natural numbers *) assert (aux : forall n, 0<=n -> forall m, 0<=m -> Bezout n m (gcd n m)). intros n Hn; pattern n. apply strong_right_induction with (z:=0); trivial. unfold Bezout. solve_proper. clear n Hn. intros n Hn IHn. apply le_lteq in Hn; destruct Hn as [Hn|Hn]. intros m Hm; pattern m. apply strong_right_induction with (z:=0); trivial. unfold Bezout. solve_proper. clear m Hm. intros m Hm IHm. destruct (lt_trichotomy n m) as [LT|[EQ|LT]]. (* n < m *) destruct (IHm (m-n)) as (a & b & EQ). apply sub_nonneg; order. now apply lt_sub_pos. exists (a-b). exists b. rewrite gcd_sub_diag_r in EQ. rewrite <- EQ. rewrite mul_sub_distr_r, mul_sub_distr_l. now rewrite add_sub_assoc, add_sub_swap. (* n = m *) rewrite EQ. rewrite gcd_diag_nonneg; trivial. exists 1. exists 0. now nzsimpl. (* m < n *) destruct (IHn m Hm LT n) as (a & b & EQ). order. exists b. exists a. now rewrite gcd_comm, <- EQ, add_comm. (* n = 0 *) intros m Hm. rewrite <- Hn, gcd_0_l_nonneg; trivial. exists 0. exists 1. now nzsimpl. (* Then we relax the positivity condition on n *) assert (aux' : forall n m, 0<=m -> Bezout n m (gcd n m)). intros n m Hm. destruct (le_ge_cases 0 n). now apply aux. assert (Hn' : 0 <= -n) by now apply opp_nonneg_nonpos. destruct (aux (-n) Hn' m Hm) as (a & b & EQ). exists (-a). exists b. now rewrite <- gcd_opp_l, <- EQ, mul_opp_r, mul_opp_l. (* And finally we do the same for m *) intros n m p Hp. rewrite <- Hp; clear Hp. destruct (le_ge_cases 0 m). now apply aux'. assert (Hm' : 0 <= -m) by now apply opp_nonneg_nonpos. destruct (aux' n (-m) Hm') as (a & b & EQ). exists a. exists (-b). now rewrite <- gcd_opp_r, <- EQ, mul_opp_r, mul_opp_l. Qed. Lemma gcd_mul_mono_l : forall n m p, gcd (p * n) (p * m) == abs p * gcd n m. Proof. intros n m p. apply gcd_unique. apply mul_nonneg_nonneg; trivial using gcd_nonneg, abs_nonneg. destruct (gcd_divide_l n m) as (q,Hq). rewrite Hq at 2. rewrite mul_assoc. apply mul_divide_mono_r. rewrite <- (abs_sgn p) at 2. rewrite <- mul_assoc. apply divide_factor_l. destruct (gcd_divide_r n m) as (q,Hq). rewrite Hq at 2. rewrite mul_assoc. apply mul_divide_mono_r. rewrite <- (abs_sgn p) at 2. rewrite <- mul_assoc. apply divide_factor_l. intros q H H'. destruct (gcd_bezout n m (gcd n m) (eq_refl _)) as (a & b & EQ). rewrite <- EQ, <- sgn_abs, mul_add_distr_l. apply divide_add_r. rewrite mul_shuffle2. now apply divide_mul_l. rewrite mul_shuffle2. now apply divide_mul_l. Qed. Lemma gcd_mul_mono_l_nonneg : forall n m p, 0<=p -> gcd (p*n) (p*m) == p * gcd n m. Proof. intros. rewrite <- (abs_eq p) at 3; trivial. apply gcd_mul_mono_l. Qed. Lemma gcd_mul_mono_r : forall n m p, gcd (n * p) (m * p) == gcd n m * abs p. Proof. intros n m p. now rewrite !(mul_comm _ p), gcd_mul_mono_l, mul_comm. Qed. Lemma gcd_mul_mono_r_nonneg : forall n m p, 0<=p -> gcd (n*p) (m*p) == gcd n m * p. Proof. intros. rewrite <- (abs_eq p) at 3; trivial. apply gcd_mul_mono_r. Qed. Lemma gauss : forall n m p, (n | m * p) -> gcd n m == 1 -> (n | p). Proof. intros n m p H G. destruct (gcd_bezout n m 1 G) as (a & b & EQ). rewrite <- (mul_1_l p), <- EQ, mul_add_distr_r. apply divide_add_r. rewrite mul_shuffle0. apply divide_factor_r. rewrite <- mul_assoc. now apply divide_mul_r. Qed. Lemma divide_mul_split : forall n m p, n ~= 0 -> (n | m * p) -> exists q r, n == q*r /\ (q | m) /\ (r | p). Proof. intros n m p Hn H. assert (G := gcd_nonneg n m). apply le_lteq in G; destruct G as [G|G]. destruct (gcd_divide_l n m) as (q,Hq). exists (gcd n m). exists q. split. now rewrite mul_comm. split. apply gcd_divide_r. destruct (gcd_divide_r n m) as (r,Hr). rewrite Hr in H. rewrite Hq in H at 1. rewrite mul_shuffle0 in H. apply mul_divide_cancel_r in H; [|order]. apply gauss with r; trivial. apply mul_cancel_r with (gcd n m); [order|]. rewrite mul_1_l. rewrite <- gcd_mul_mono_r_nonneg, <- Hq, <- Hr; order. symmetry in G. apply gcd_eq_0 in G. destruct G as (Hn',_); order. Qed. (** TODO : more about rel_prime (i.e. gcd == 1), about prime ... *) End ZGcdProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZMul.v0000640000175000001440000000501212010532755021300 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* a rem b == a - b*(a÷b). Proof. intros. rewrite <- add_move_l. symmetry. now apply quot_rem. Qed. (** A few sign rules (simple ones) *) Lemma rem_opp_opp : forall a b, b ~= 0 -> (-a) rem (-b) == - (a rem b). Proof. intros. now rewrite rem_opp_r, rem_opp_l. Qed. Lemma quot_opp_l : forall a b, b ~= 0 -> (-a)÷b == -(a÷b). Proof. intros. rewrite <- (mul_cancel_l _ _ b) by trivial. rewrite <- (add_cancel_r _ _ ((-a) rem b)). now rewrite <- quot_rem, rem_opp_l, mul_opp_r, <- opp_add_distr, <- quot_rem. Qed. Lemma quot_opp_r : forall a b, b ~= 0 -> a÷(-b) == -(a÷b). Proof. intros. assert (-b ~= 0) by (now rewrite eq_opp_l, opp_0). rewrite <- (mul_cancel_l _ _ (-b)) by trivial. rewrite <- (add_cancel_r _ _ (a rem (-b))). now rewrite <- quot_rem, rem_opp_r, mul_opp_opp, <- quot_rem. Qed. Lemma quot_opp_opp : forall a b, b ~= 0 -> (-a)÷(-b) == a÷b. Proof. intros. now rewrite quot_opp_r, quot_opp_l, opp_involutive. Qed. (** Uniqueness theorems *) Theorem quot_rem_unique : forall b q1 q2 r1 r2 : t, (0<=r1 (0<=r2 b*q1+r1 == b*q2+r2 -> q1 == q2 /\ r1 == r2. Proof. intros b q1 q2 r1 r2 Hr1 Hr2 EQ. destruct Hr1; destruct Hr2; try (intuition; order). apply NZQuot.div_mod_unique with b; trivial. rewrite <- (opp_inj_wd r1 r2). apply NZQuot.div_mod_unique with (-b); trivial. rewrite <- opp_lt_mono, opp_nonneg_nonpos; tauto. rewrite <- opp_lt_mono, opp_nonneg_nonpos; tauto. now rewrite 2 mul_opp_l, <- 2 opp_add_distr, opp_inj_wd. Qed. Theorem quot_unique: forall a b q r, 0<=a -> 0<=r a == b*q + r -> q == a÷b. Proof. intros; now apply NZQuot.div_unique with r. Qed. Theorem rem_unique: forall a b q r, 0<=a -> 0<=r a == b*q + r -> r == a rem b. Proof. intros; now apply NZQuot.mod_unique with q. Qed. (** A division by itself returns 1 *) Lemma quot_same : forall a, a~=0 -> a÷a == 1. Proof. intros. pos_or_neg a. apply NZQuot.div_same; order. rewrite <- quot_opp_opp by trivial. now apply NZQuot.div_same. Qed. Lemma rem_same : forall a, a~=0 -> a rem a == 0. Proof. intros. rewrite rem_eq, quot_same by trivial. nzsimpl. apply sub_diag. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem quot_small: forall a b, 0<=a a÷b == 0. Proof. exact NZQuot.div_small. Qed. (** Same situation, in term of remulo: *) Theorem rem_small: forall a b, 0<=a a rem b == a. Proof. exact NZQuot.mod_small. Qed. (** * Basic values of divisions and modulo. *) Lemma quot_0_l: forall a, a~=0 -> 0÷a == 0. Proof. intros. pos_or_neg a. apply NZQuot.div_0_l; order. rewrite <- quot_opp_opp, opp_0 by trivial. now apply NZQuot.div_0_l. Qed. Lemma rem_0_l: forall a, a~=0 -> 0 rem a == 0. Proof. intros; rewrite rem_eq, quot_0_l; now nzsimpl. Qed. Lemma quot_1_r: forall a, a÷1 == a. Proof. intros. pos_or_neg a. now apply NZQuot.div_1_r. apply opp_inj. rewrite <- quot_opp_l. apply NZQuot.div_1_r; order. intro EQ; symmetry in EQ; revert EQ; apply lt_neq, lt_0_1. Qed. Lemma rem_1_r: forall a, a rem 1 == 0. Proof. intros. rewrite rem_eq, quot_1_r; nzsimpl; auto using sub_diag. intro EQ; symmetry in EQ; revert EQ; apply lt_neq; apply lt_0_1. Qed. Lemma quot_1_l: forall a, 1 1÷a == 0. Proof. exact NZQuot.div_1_l. Qed. Lemma rem_1_l: forall a, 1 1 rem a == 1. Proof. exact NZQuot.mod_1_l. Qed. Lemma quot_mul : forall a b, b~=0 -> (a*b)÷b == a. Proof. intros. pos_or_neg a; pos_or_neg b. apply NZQuot.div_mul; order. rewrite <- quot_opp_opp, <- mul_opp_r by order. apply NZQuot.div_mul; order. rewrite <- opp_inj_wd, <- quot_opp_l, <- mul_opp_l by order. apply NZQuot.div_mul; order. rewrite <- opp_inj_wd, <- quot_opp_r, <- mul_opp_opp by order. apply NZQuot.div_mul; order. Qed. Lemma rem_mul : forall a b, b~=0 -> (a*b) rem b == 0. Proof. intros. rewrite rem_eq, quot_mul by trivial. rewrite mul_comm; apply sub_diag. Qed. Theorem quot_unique_exact a b q: b~=0 -> a == b*q -> q == a÷b. Proof. intros Hb H. rewrite H, mul_comm. symmetry. now apply quot_mul. Qed. (** The sign of [a rem b] is the one of [a] (when it's not null) *) Lemma rem_nonneg : forall a b, b~=0 -> 0 <= a -> 0 <= a rem b. Proof. intros. pos_or_neg b. destruct (rem_bound_pos a b); order. rewrite <- rem_opp_r; trivial. destruct (rem_bound_pos a (-b)); trivial. Qed. Lemma rem_nonpos : forall a b, b~=0 -> a <= 0 -> a rem b <= 0. Proof. intros a b Hb Ha. apply opp_nonneg_nonpos. apply opp_nonneg_nonpos in Ha. rewrite <- rem_opp_l by trivial. now apply rem_nonneg. Qed. Lemma rem_sign_mul : forall a b, b~=0 -> 0 <= (a rem b) * a. Proof. intros a b Hb. destruct (le_ge_cases 0 a). apply mul_nonneg_nonneg; trivial. now apply rem_nonneg. apply mul_nonpos_nonpos; trivial. now apply rem_nonpos. Qed. Lemma rem_sign_nz : forall a b, b~=0 -> a rem b ~= 0 -> sgn (a rem b) == sgn a. Proof. intros a b Hb H. destruct (lt_trichotomy 0 a) as [LT|[EQ|LT]]. rewrite 2 sgn_pos; try easy. generalize (rem_nonneg a b Hb (lt_le_incl _ _ LT)). order. now rewrite <- EQ, rem_0_l, sgn_0. rewrite 2 sgn_neg; try easy. generalize (rem_nonpos a b Hb (lt_le_incl _ _ LT)). order. Qed. Lemma rem_sign : forall a b, a~=0 -> b~=0 -> sgn (a rem b) ~= -sgn a. Proof. intros a b Ha Hb H. destruct (eq_decidable (a rem b) 0) as [EQ|NEQ]. apply Ha, sgn_null_iff, opp_inj. now rewrite <- H, opp_0, EQ, sgn_0. apply Ha, sgn_null_iff. apply eq_mul_0_l with 2; try order'. nzsimpl'. apply add_move_0_l. rewrite <- H. symmetry. now apply rem_sign_nz. Qed. (** Operations and absolute value *) Lemma rem_abs_l : forall a b, b ~= 0 -> (abs a) rem b == abs (a rem b). Proof. intros a b Hb. destruct (le_ge_cases 0 a) as [LE|LE]. rewrite 2 abs_eq; try easy. now apply rem_nonneg. rewrite 2 abs_neq, rem_opp_l; try easy. now apply rem_nonpos. Qed. Lemma rem_abs_r : forall a b, b ~= 0 -> a rem (abs b) == a rem b. Proof. intros a b Hb. destruct (le_ge_cases 0 b). now rewrite abs_eq. now rewrite abs_neq, ?rem_opp_r. Qed. Lemma rem_abs : forall a b, b ~= 0 -> (abs a) rem (abs b) == abs (a rem b). Proof. intros. now rewrite rem_abs_r, rem_abs_l. Qed. Lemma quot_abs_l : forall a b, b ~= 0 -> (abs a)÷b == (sgn a)*(a÷b). Proof. intros a b Hb. destruct (lt_trichotomy 0 a) as [LT|[EQ|LT]]. rewrite abs_eq, sgn_pos by order. now nzsimpl. rewrite <- EQ, abs_0, quot_0_l; trivial. now nzsimpl. rewrite abs_neq, quot_opp_l, sgn_neg by order. rewrite mul_opp_l. now nzsimpl. Qed. Lemma quot_abs_r : forall a b, b ~= 0 -> a÷(abs b) == (sgn b)*(a÷b). Proof. intros a b Hb. destruct (lt_trichotomy 0 b) as [LT|[EQ|LT]]. rewrite abs_eq, sgn_pos by order. now nzsimpl. order. rewrite abs_neq, quot_opp_r, sgn_neg by order. rewrite mul_opp_l. now nzsimpl. Qed. Lemma quot_abs : forall a b, b ~= 0 -> (abs a)÷(abs b) == abs (a÷b). Proof. intros a b Hb. pos_or_neg a; [rewrite (abs_eq a)|rewrite (abs_neq a)]; try apply opp_nonneg_nonpos; try order. pos_or_neg b; [rewrite (abs_eq b)|rewrite (abs_neq b)]; try apply opp_nonneg_nonpos; try order. rewrite abs_eq; try easy. apply NZQuot.div_pos; order. rewrite <- abs_opp, <- quot_opp_r, abs_eq; try easy. apply NZQuot.div_pos; order. pos_or_neg b; [rewrite (abs_eq b)|rewrite (abs_neq b)]; try apply opp_nonneg_nonpos; try order. rewrite <- (abs_opp (_÷_)), <- quot_opp_l, abs_eq; try easy. apply NZQuot.div_pos; order. rewrite <- (quot_opp_opp a b), abs_eq; try easy. apply NZQuot.div_pos; order. Qed. (** We have a general bound for absolute values *) Lemma rem_bound_abs : forall a b, b~=0 -> abs (a rem b) < abs b. Proof. intros. rewrite <- rem_abs; trivial. apply rem_bound_pos. apply abs_nonneg. now apply abs_pos. Qed. (** * Order results about rem and quot *) (** A modulo cannot grow beyond its starting point. *) Theorem rem_le: forall a b, 0<=a -> 0 a rem b <= a. Proof. exact NZQuot.mod_le. Qed. Theorem quot_pos : forall a b, 0<=a -> 0 0<= a÷b. Proof. exact NZQuot.div_pos. Qed. Lemma quot_str_pos : forall a b, 0 0 < a÷b. Proof. exact NZQuot.div_str_pos. Qed. Lemma quot_small_iff : forall a b, b~=0 -> (a÷b==0 <-> abs a < abs b). Proof. intros. pos_or_neg a; pos_or_neg b. rewrite NZQuot.div_small_iff; try order. rewrite 2 abs_eq; intuition; order. rewrite <- opp_inj_wd, opp_0, <- quot_opp_r, NZQuot.div_small_iff by order. rewrite (abs_eq a), (abs_neq' b); intuition; order. rewrite <- opp_inj_wd, opp_0, <- quot_opp_l, NZQuot.div_small_iff by order. rewrite (abs_neq' a), (abs_eq b); intuition; order. rewrite <- quot_opp_opp, NZQuot.div_small_iff by order. rewrite (abs_neq' a), (abs_neq' b); intuition; order. Qed. Lemma rem_small_iff : forall a b, b~=0 -> (a rem b == a <-> abs a < abs b). Proof. intros. rewrite rem_eq, <- quot_small_iff by order. rewrite sub_move_r, <- (add_0_r a) at 1. rewrite add_cancel_l. rewrite eq_sym_iff, eq_mul_0. tauto. Qed. (** As soon as the divisor is strictly greater than 1, the division is strictly decreasing. *) Lemma quot_lt : forall a b, 0 1 a÷b < a. Proof. exact NZQuot.div_lt. Qed. (** [le] is compatible with a positive division. *) Lemma quot_le_mono : forall a b c, 0 a<=b -> a÷c <= b÷c. Proof. intros. pos_or_neg a. apply NZQuot.div_le_mono; auto. pos_or_neg b. apply le_trans with 0. rewrite <- opp_nonneg_nonpos, <- quot_opp_l by order. apply quot_pos; order. apply quot_pos; order. rewrite opp_le_mono in *. rewrite <- 2 quot_opp_l by order. apply NZQuot.div_le_mono; intuition; order. Qed. (** With this choice of division, rounding of quot is always done toward zero: *) Lemma mul_quot_le : forall a b, 0<=a -> b~=0 -> 0 <= b*(a÷b) <= a. Proof. intros. pos_or_neg b. split. apply mul_nonneg_nonneg; [|apply quot_pos]; order. apply NZQuot.mul_div_le; order. rewrite <- mul_opp_opp, <- quot_opp_r by order. split. apply mul_nonneg_nonneg; [|apply quot_pos]; order. apply NZQuot.mul_div_le; order. Qed. Lemma mul_quot_ge : forall a b, a<=0 -> b~=0 -> a <= b*(a÷b) <= 0. Proof. intros. rewrite <- opp_nonneg_nonpos, opp_le_mono, <-mul_opp_r, <-quot_opp_l by order. rewrite <- opp_nonneg_nonpos in *. destruct (mul_quot_le (-a) b); tauto. Qed. (** For positive numbers, considering [S (a÷b)] leads to an upper bound for [a] *) Lemma mul_succ_quot_gt: forall a b, 0<=a -> 0 a < b*(S (a÷b)). Proof. exact NZQuot.mul_succ_div_gt. Qed. (** Similar results with negative numbers *) Lemma mul_pred_quot_lt: forall a b, a<=0 -> 0 b*(P (a÷b)) < a. Proof. intros. rewrite opp_lt_mono, <- mul_opp_r, opp_pred, <- quot_opp_l by order. rewrite <- opp_nonneg_nonpos in *. now apply mul_succ_quot_gt. Qed. Lemma mul_pred_quot_gt: forall a b, 0<=a -> b<0 -> a < b*(P (a÷b)). Proof. intros. rewrite <- mul_opp_opp, opp_pred, <- quot_opp_r by order. rewrite <- opp_pos_neg in *. now apply mul_succ_quot_gt. Qed. Lemma mul_succ_quot_lt: forall a b, a<=0 -> b<0 -> b*(S (a÷b)) < a. Proof. intros. rewrite opp_lt_mono, <- mul_opp_l, <- quot_opp_opp by order. rewrite <- opp_nonneg_nonpos, <- opp_pos_neg in *. now apply mul_succ_quot_gt. Qed. (** Inequality [mul_quot_le] is exact iff the modulo is zero. *) Lemma quot_exact : forall a b, b~=0 -> (a == b*(a÷b) <-> a rem b == 0). Proof. intros. rewrite rem_eq by order. rewrite sub_move_r; nzsimpl; tauto. Qed. (** Some additionnal inequalities about quot. *) Theorem quot_lt_upper_bound: forall a b q, 0<=a -> 0 a < b*q -> a÷b < q. Proof. exact NZQuot.div_lt_upper_bound. Qed. Theorem quot_le_upper_bound: forall a b q, 0 a <= b*q -> a÷b <= q. Proof. intros. rewrite <- (quot_mul q b) by order. apply quot_le_mono; trivial. now rewrite mul_comm. Qed. Theorem quot_le_lower_bound: forall a b q, 0 b*q <= a -> q <= a÷b. Proof. intros. rewrite <- (quot_mul q b) by order. apply quot_le_mono; trivial. now rewrite mul_comm. Qed. (** A division respects opposite monotonicity for the divisor *) Lemma quot_le_compat_l: forall p q r, 0<=p -> 0 p÷r <= p÷q. Proof. exact NZQuot.div_le_compat_l. Qed. (** * Relations between usual operations and rem and quot *) (** Unlike with other division conventions, some results here aren't always valid, and need to be restricted. For instance [(a+b*c) rem c <> a rem c] for [a=9,b=-5,c=2] *) Lemma rem_add : forall a b c, c~=0 -> 0 <= (a+b*c)*a -> (a + b * c) rem c == a rem c. Proof. assert (forall a b c, c~=0 -> 0<=a -> 0<=a+b*c -> (a+b*c) rem c == a rem c). intros. pos_or_neg c. apply NZQuot.mod_add; order. rewrite <- (rem_opp_r a), <- (rem_opp_r (a+b*c)) by order. rewrite <- mul_opp_opp in *. apply NZQuot.mod_add; order. intros a b c Hc Habc. destruct (le_0_mul _ _ Habc) as [(Habc',Ha)|(Habc',Ha)]. auto. apply opp_inj. revert Ha Habc'. rewrite <- 2 opp_nonneg_nonpos. rewrite <- 2 rem_opp_l, opp_add_distr, <- mul_opp_l by order. auto. Qed. Lemma quot_add : forall a b c, c~=0 -> 0 <= (a+b*c)*a -> (a + b * c) ÷ c == a ÷ c + b. Proof. intros. rewrite <- (mul_cancel_l _ _ c) by trivial. rewrite <- (add_cancel_r _ _ ((a+b*c) rem c)). rewrite <- quot_rem, rem_add by trivial. now rewrite mul_add_distr_l, add_shuffle0, <-quot_rem, mul_comm. Qed. Lemma quot_add_l: forall a b c, b~=0 -> 0 <= (a*b+c)*c -> (a * b + c) ÷ b == a + c ÷ b. Proof. intros a b c. rewrite add_comm, (add_comm a). now apply quot_add. Qed. (** Cancellations. *) Lemma quot_mul_cancel_r : forall a b c, b~=0 -> c~=0 -> (a*c)÷(b*c) == a÷b. Proof. assert (Aux1 : forall a b c, 0<=a -> 0 c~=0 -> (a*c)÷(b*c) == a÷b). intros. pos_or_neg c. apply NZQuot.div_mul_cancel_r; order. rewrite <- quot_opp_opp, <- 2 mul_opp_r. apply NZQuot.div_mul_cancel_r; order. rewrite <- neq_mul_0; intuition order. assert (Aux2 : forall a b c, 0<=a -> b~=0 -> c~=0 -> (a*c)÷(b*c) == a÷b). intros. pos_or_neg b. apply Aux1; order. apply opp_inj. rewrite <- 2 quot_opp_r, <- mul_opp_l; try order. apply Aux1; order. rewrite <- neq_mul_0; intuition order. intros. pos_or_neg a. apply Aux2; order. apply opp_inj. rewrite <- 2 quot_opp_l, <- mul_opp_l; try order. apply Aux2; order. rewrite <- neq_mul_0; intuition order. Qed. Lemma quot_mul_cancel_l : forall a b c, b~=0 -> c~=0 -> (c*a)÷(c*b) == a÷b. Proof. intros. rewrite !(mul_comm c); now apply quot_mul_cancel_r. Qed. Lemma mul_rem_distr_r: forall a b c, b~=0 -> c~=0 -> (a*c) rem (b*c) == (a rem b) * c. Proof. intros. assert (b*c ~= 0) by (rewrite <- neq_mul_0; tauto). rewrite ! rem_eq by trivial. rewrite quot_mul_cancel_r by order. now rewrite mul_sub_distr_r, <- !mul_assoc, (mul_comm (a÷b) c). Qed. Lemma mul_rem_distr_l: forall a b c, b~=0 -> c~=0 -> (c*a) rem (c*b) == c * (a rem b). Proof. intros; rewrite !(mul_comm c); now apply mul_rem_distr_r. Qed. (** Operations modulo. *) Theorem rem_rem: forall a n, n~=0 -> (a rem n) rem n == a rem n. Proof. intros. pos_or_neg a; pos_or_neg n. apply NZQuot.mod_mod; order. rewrite <- ! (rem_opp_r _ n) by trivial. apply NZQuot.mod_mod; order. apply opp_inj. rewrite <- !rem_opp_l by order. apply NZQuot.mod_mod; order. apply opp_inj. rewrite <- !rem_opp_opp by order. apply NZQuot.mod_mod; order. Qed. Lemma mul_rem_idemp_l : forall a b n, n~=0 -> ((a rem n)*b) rem n == (a*b) rem n. Proof. assert (Aux1 : forall a b n, 0<=a -> 0<=b -> n~=0 -> ((a rem n)*b) rem n == (a*b) rem n). intros. pos_or_neg n. apply NZQuot.mul_mod_idemp_l; order. rewrite <- ! (rem_opp_r _ n) by order. apply NZQuot.mul_mod_idemp_l; order. assert (Aux2 : forall a b n, 0<=a -> n~=0 -> ((a rem n)*b) rem n == (a*b) rem n). intros. pos_or_neg b. now apply Aux1. apply opp_inj. rewrite <-2 rem_opp_l, <-2 mul_opp_r by order. apply Aux1; order. intros a b n Hn. pos_or_neg a. now apply Aux2. apply opp_inj. rewrite <-2 rem_opp_l, <-2 mul_opp_l, <-rem_opp_l by order. apply Aux2; order. Qed. Lemma mul_rem_idemp_r : forall a b n, n~=0 -> (a*(b rem n)) rem n == (a*b) rem n. Proof. intros. rewrite !(mul_comm a). now apply mul_rem_idemp_l. Qed. Theorem mul_rem: forall a b n, n~=0 -> (a * b) rem n == ((a rem n) * (b rem n)) rem n. Proof. intros. now rewrite mul_rem_idemp_l, mul_rem_idemp_r. Qed. (** addition and modulo Generally speaking, unlike with other conventions, we don't have [(a+b) rem n = (a rem n + b rem n) rem n] for any a and b. For instance, take (8 + (-10)) rem 3 = -2 whereas (8 rem 3 + (-10 rem 3)) rem 3 = 1. *) Lemma add_rem_idemp_l : forall a b n, n~=0 -> 0 <= a*b -> ((a rem n)+b) rem n == (a+b) rem n. Proof. assert (Aux : forall a b n, 0<=a -> 0<=b -> n~=0 -> ((a rem n)+b) rem n == (a+b) rem n). intros. pos_or_neg n. apply NZQuot.add_mod_idemp_l; order. rewrite <- ! (rem_opp_r _ n) by order. apply NZQuot.add_mod_idemp_l; order. intros a b n Hn Hab. destruct (le_0_mul _ _ Hab) as [(Ha,Hb)|(Ha,Hb)]. now apply Aux. apply opp_inj. rewrite <-2 rem_opp_l, 2 opp_add_distr, <-rem_opp_l by order. rewrite <- opp_nonneg_nonpos in *. now apply Aux. Qed. Lemma add_rem_idemp_r : forall a b n, n~=0 -> 0 <= a*b -> (a+(b rem n)) rem n == (a+b) rem n. Proof. intros. rewrite !(add_comm a). apply add_rem_idemp_l; trivial. now rewrite mul_comm. Qed. Theorem add_rem: forall a b n, n~=0 -> 0 <= a*b -> (a+b) rem n == (a rem n + b rem n) rem n. Proof. intros a b n Hn Hab. rewrite add_rem_idemp_l, add_rem_idemp_r; trivial. reflexivity. destruct (le_0_mul _ _ Hab) as [(Ha,Hb)|(Ha,Hb)]; destruct (le_0_mul _ _ (rem_sign_mul b n Hn)) as [(Hb',Hm)|(Hb',Hm)]; auto using mul_nonneg_nonneg, mul_nonpos_nonpos. setoid_replace b with 0 by order. rewrite rem_0_l by order. nzsimpl; order. setoid_replace b with 0 by order. rewrite rem_0_l by order. nzsimpl; order. Qed. (** Conversely, the following results need less restrictions here. *) Lemma quot_quot : forall a b c, b~=0 -> c~=0 -> (a÷b)÷c == a÷(b*c). Proof. assert (Aux1 : forall a b c, 0<=a -> 0 c~=0 -> (a÷b)÷c == a÷(b*c)). intros. pos_or_neg c. apply NZQuot.div_div; order. apply opp_inj. rewrite <- 2 quot_opp_r, <- mul_opp_r; trivial. apply NZQuot.div_div; order. rewrite <- neq_mul_0; intuition order. assert (Aux2 : forall a b c, 0<=a -> b~=0 -> c~=0 -> (a÷b)÷c == a÷(b*c)). intros. pos_or_neg b. apply Aux1; order. apply opp_inj. rewrite <- quot_opp_l, <- 2 quot_opp_r, <- mul_opp_l; trivial. apply Aux1; trivial. rewrite <- neq_mul_0; intuition order. intros. pos_or_neg a. apply Aux2; order. apply opp_inj. rewrite <- 3 quot_opp_l; try order. apply Aux2; order. rewrite <- neq_mul_0. tauto. Qed. Lemma mod_mul_r : forall a b c, b~=0 -> c~=0 -> a rem (b*c) == a rem b + b*((a÷b) rem c). Proof. intros a b c Hb Hc. apply add_cancel_l with (b*c*(a÷(b*c))). rewrite <- quot_rem by (apply neq_mul_0; split; order). rewrite <- quot_quot by trivial. rewrite add_assoc, add_shuffle0, <- mul_assoc, <- mul_add_distr_l. rewrite <- quot_rem by order. apply quot_rem; order. Qed. (** A last inequality: *) Theorem quot_mul_le: forall a b c, 0<=a -> 0 0<=c -> c*(a÷b) <= (c*a)÷b. Proof. exact NZQuot.div_mul_le. Qed. End ZQuotProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZAxioms.v0000640000175000001440000001070312010532755022006 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t. End Opp. Module Type OppNotation (T:Typ)(Import O : Opp T). Notation "- x" := (opp x) (at level 35, right associativity). End OppNotation. Module Type Opp' (T:Typ) := Opp T <+ OppNotation T. Module Type IsOpp (Import Z : NZAxiomsSig')(Import O : Opp' Z). Declare Instance opp_wd : Proper (eq==>eq) opp. Axiom opp_0 : - 0 == 0. Axiom opp_succ : forall n, - (S n) == P (- n). End IsOpp. Module Type OppCstNotation (Import A : NZAxiomsSig)(Import B : Opp A). Notation "- 1" := (opp one). Notation "- 2" := (opp two). End OppCstNotation. Module Type ZAxiomsMiniSig := NZOrdAxiomsSig <+ ZAxiom <+ Opp <+ IsOpp. Module Type ZAxiomsMiniSig' := NZOrdAxiomsSig' <+ ZAxiom <+ Opp' <+ IsOpp <+ OppCstNotation. (** Other functions and their specifications *) (** Absolute value *) Module Type HasAbs(Import Z : ZAxiomsMiniSig'). Parameter Inline abs : t -> t. Axiom abs_eq : forall n, 0<=n -> abs n == n. Axiom abs_neq : forall n, n<=0 -> abs n == -n. End HasAbs. (** A sign function *) Module Type HasSgn (Import Z : ZAxiomsMiniSig'). Parameter Inline sgn : t -> t. Axiom sgn_null : forall n, n==0 -> sgn n == 0. Axiom sgn_pos : forall n, 0 sgn n == 1. Axiom sgn_neg : forall n, n<0 -> sgn n == -1. End HasSgn. (** Divisions *) (** First, the usual Coq convention of Truncated-Toward-Bottom (a.k.a Floor). We simply extend the NZ signature. *) Module Type ZDivSpecific (Import A:ZAxiomsMiniSig')(Import B : DivMod' A). Axiom mod_pos_bound : forall a b, 0 < b -> 0 <= a mod b < b. Axiom mod_neg_bound : forall a b, b < 0 -> b < a mod b <= 0. End ZDivSpecific. Module Type ZDiv (Z:ZAxiomsMiniSig) := NZDiv.NZDiv Z <+ ZDivSpecific Z. Module Type ZDiv' (Z:ZAxiomsMiniSig) := NZDiv.NZDiv' Z <+ ZDivSpecific Z. (** Then, the Truncated-Toward-Zero convention. For not colliding with Floor operations, we use different names *) Module Type QuotRem (Import A : Typ). Parameters Inline quot rem : t -> t -> t. End QuotRem. Module Type QuotRemNotation (A : Typ)(Import B : QuotRem A). Infix "÷" := quot (at level 40, left associativity). Infix "rem" := rem (at level 40, no associativity). End QuotRemNotation. Module Type QuotRem' (A : Typ) := QuotRem A <+ QuotRemNotation A. Module Type QuotRemSpec (Import A : ZAxiomsMiniSig')(Import B : QuotRem' A). Declare Instance quot_wd : Proper (eq==>eq==>eq) quot. Declare Instance rem_wd : Proper (eq==>eq==>eq) B.rem. Axiom quot_rem : forall a b, b ~= 0 -> a == b*(a÷b) + (a rem b). Axiom rem_bound_pos : forall a b, 0<=a -> 0 0 <= a rem b < b. Axiom rem_opp_l : forall a b, b ~= 0 -> (-a) rem b == - (a rem b). Axiom rem_opp_r : forall a b, b ~= 0 -> a rem (-b) == a rem b. End QuotRemSpec. Module Type ZQuot (Z:ZAxiomsMiniSig) := QuotRem Z <+ QuotRemSpec Z. Module Type ZQuot' (Z:ZAxiomsMiniSig) := QuotRem' Z <+ QuotRemSpec Z. (** For all other functions, the NZ axiomatizations are enough. *) (** Let's group everything *) Module Type ZAxiomsSig := ZAxiomsMiniSig <+ OrderFunctions <+ HasAbs <+ HasSgn <+ NZParity.NZParity <+ NZPow.NZPow <+ NZSqrt.NZSqrt <+ NZLog.NZLog2 <+ NZGcd.NZGcd <+ ZDiv <+ ZQuot <+ NZBits.NZBits <+ NZSquare. Module Type ZAxiomsSig' := ZAxiomsMiniSig' <+ OrderFunctions' <+ HasAbs <+ HasSgn <+ NZParity.NZParity <+ NZPow.NZPow' <+ NZSqrt.NZSqrt' <+ NZLog.NZLog2 <+ NZGcd.NZGcd' <+ ZDiv' <+ ZQuot' <+ NZBits.NZBits' <+ NZSquare. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZBits.v0000640000175000001440000016177712010532755021471 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0<=c<=b -> a^(b-c) == a^b / a^c. Proof. intros a b c Ha (H,H'). rewrite <- (sub_simpl_r b c) at 2. rewrite pow_add_r; trivial. rewrite div_mul. reflexivity. now apply pow_nonzero. now apply le_0_sub. Qed. Lemma pow_div_l : forall a b c, b~=0 -> 0<=c -> a mod b == 0 -> (a/b)^c == a^c / b^c. Proof. intros a b c Hb Hc H. rewrite (div_mod a b Hb) at 2. rewrite H, add_0_r, pow_mul_l, mul_comm, div_mul. reflexivity. now apply pow_nonzero. Qed. (** An injection from bits [true] and [false] to numbers 1 and 0. We declare it as a (local) coercion for shorter statements. *) Definition b2z (b:bool) := if b then 1 else 0. Local Coercion b2z : bool >-> t. Instance b2z_wd : Proper (Logic.eq ==> eq) b2z := _. Lemma exists_div2 a : exists a' (b:bool), a == 2*a' + b. Proof. elim (Even_or_Odd a); [intros (a',H)| intros (a',H)]. exists a'. exists false. now nzsimpl. exists a'. exists true. now simpl. Qed. (** We can compact [testbit_odd_0] [testbit_even_0] [testbit_even_succ] [testbit_odd_succ] in only two lemmas. *) Lemma testbit_0_r a (b:bool) : testbit (2*a+b) 0 = b. Proof. destruct b; simpl; rewrite ?add_0_r. apply testbit_odd_0. apply testbit_even_0. Qed. Lemma testbit_succ_r a (b:bool) n : 0<=n -> testbit (2*a+b) (succ n) = testbit a n. Proof. destruct b; simpl; rewrite ?add_0_r. now apply testbit_odd_succ. now apply testbit_even_succ. Qed. (** Alternative caracterisations of [testbit] *) (** This concise equation could have been taken as specification for testbit in the interface, but it would have been hard to implement with little initial knowledge about div and mod *) Lemma testbit_spec' a n : 0<=n -> a.[n] == (a / 2^n) mod 2. Proof. intro Hn. revert a. apply le_ind with (4:=Hn). solve_proper. intros a. nzsimpl. destruct (exists_div2 a) as (a' & b & H). rewrite H at 1. rewrite testbit_0_r. apply mod_unique with a'; trivial. left. destruct b; split; simpl; order'. clear n Hn. intros n Hn IH a. destruct (exists_div2 a) as (a' & b & H). rewrite H at 1. rewrite testbit_succ_r, IH by trivial. f_equiv. rewrite pow_succ_r, <- div_div by order_pos. f_equiv. apply div_unique with b; trivial. left. destruct b; split; simpl; order'. Qed. (** This caracterisation that uses only basic operations and power was initially taken as specification for testbit. We describe [a] as having a low part and a high part, with the corresponding bit in the middle. This caracterisation is moderatly complex to implement, but also moderately usable... *) Lemma testbit_spec a n : 0<=n -> exists l h, 0<=l<2^n /\ a == l + (a.[n] + 2*h)*2^n. Proof. intro Hn. exists (a mod 2^n). exists (a / 2^n / 2). split. apply mod_pos_bound; order_pos. rewrite add_comm, mul_comm, (add_comm a.[n]). rewrite (div_mod a (2^n)) at 1 by order_nz. do 2 f_equiv. rewrite testbit_spec' by trivial. apply div_mod. order'. Qed. Lemma testbit_true : forall a n, 0<=n -> (a.[n] = true <-> (a / 2^n) mod 2 == 1). Proof. intros a n Hn. rewrite <- testbit_spec' by trivial. destruct a.[n]; split; simpl; now try order'. Qed. Lemma testbit_false : forall a n, 0<=n -> (a.[n] = false <-> (a / 2^n) mod 2 == 0). Proof. intros a n Hn. rewrite <- testbit_spec' by trivial. destruct a.[n]; split; simpl; now try order'. Qed. Lemma testbit_eqb : forall a n, 0<=n -> a.[n] = eqb ((a / 2^n) mod 2) 1. Proof. intros a n Hn. apply eq_true_iff_eq. now rewrite testbit_true, eqb_eq. Qed. (** Results about the injection [b2z] *) Lemma b2z_inj : forall (a0 b0:bool), a0 == b0 -> a0 = b0. Proof. intros [|] [|]; simpl; trivial; order'. Qed. Lemma add_b2z_double_div2 : forall (a0:bool) a, (a0+2*a)/2 == a. Proof. intros a0 a. rewrite mul_comm, div_add by order'. now rewrite div_small, add_0_l by (destruct a0; split; simpl; order'). Qed. Lemma add_b2z_double_bit0 : forall (a0:bool) a, (a0+2*a).[0] = a0. Proof. intros a0 a. apply b2z_inj. rewrite testbit_spec' by order. nzsimpl. rewrite mul_comm, mod_add by order'. now rewrite mod_small by (destruct a0; split; simpl; order'). Qed. Lemma b2z_div2 : forall (a0:bool), a0/2 == 0. Proof. intros a0. rewrite <- (add_b2z_double_div2 a0 0). now nzsimpl. Qed. Lemma b2z_bit0 : forall (a0:bool), a0.[0] = a0. Proof. intros a0. rewrite <- (add_b2z_double_bit0 a0 0) at 2. now nzsimpl. Qed. (** The specification of testbit by low and high parts is complete *) Lemma testbit_unique : forall a n (a0:bool) l h, 0<=l<2^n -> a == l + (a0 + 2*h)*2^n -> a.[n] = a0. Proof. intros a n a0 l h Hl EQ. assert (0<=n). destruct (le_gt_cases 0 n) as [Hn|Hn]; trivial. rewrite pow_neg_r in Hl by trivial. destruct Hl; order. apply b2z_inj. rewrite testbit_spec' by trivial. symmetry. apply mod_unique with h. left; destruct a0; simpl; split; order'. symmetry. apply div_unique with l. now left. now rewrite add_comm, (add_comm _ a0), mul_comm. Qed. (** All bits of number 0 are 0 *) Lemma bits_0 : forall n, 0.[n] = false. Proof. intros n. destruct (le_gt_cases 0 n). apply testbit_false; trivial. nzsimpl; order_nz. now apply testbit_neg_r. Qed. (** For negative numbers, we are actually doing two's complement *) Lemma bits_opp : forall a n, 0<=n -> (-a).[n] = negb (P a).[n]. Proof. intros a n Hn. destruct (testbit_spec (-a) n Hn) as (l & h & Hl & EQ). fold (b2z (-a).[n]) in EQ. apply negb_sym. apply testbit_unique with (2^n-l-1) (-h-1). split. apply lt_succ_r. rewrite sub_1_r, succ_pred. now apply lt_0_sub. apply le_succ_l. rewrite sub_1_r, succ_pred. apply le_sub_le_add_r. rewrite <- (add_0_r (2^n)) at 1. now apply add_le_mono_l. rewrite <- add_sub_swap, sub_1_r. f_equiv. apply opp_inj. rewrite opp_add_distr, opp_sub_distr. rewrite (add_comm _ l), <- add_assoc. rewrite EQ at 1. apply add_cancel_l. rewrite <- opp_add_distr. rewrite <- (mul_1_l (2^n)) at 2. rewrite <- mul_add_distr_r. rewrite <- mul_opp_l. f_equiv. rewrite !opp_add_distr. rewrite <- mul_opp_r. rewrite opp_sub_distr, opp_involutive. rewrite (add_comm h). rewrite mul_add_distr_l. rewrite !add_assoc. apply add_cancel_r. rewrite mul_1_r. rewrite add_comm, add_assoc, !add_opp_r, sub_1_r, two_succ, pred_succ. destruct (-a).[n]; simpl. now rewrite sub_0_r. now nzsimpl'. Qed. (** All bits of number (-1) are 1 *) Lemma bits_m1 : forall n, 0<=n -> (-1).[n] = true. Proof. intros. now rewrite bits_opp, one_succ, pred_succ, bits_0. Qed. (** Various ways to refer to the lowest bit of a number *) Lemma bit0_odd : forall a, a.[0] = odd a. Proof. intros. symmetry. destruct (exists_div2 a) as (a' & b & EQ). rewrite EQ, testbit_0_r, add_comm, odd_add_mul_2. destruct b; simpl; apply odd_1 || apply odd_0. Qed. Lemma bit0_eqb : forall a, a.[0] = eqb (a mod 2) 1. Proof. intros a. rewrite testbit_eqb by order. now nzsimpl. Qed. Lemma bit0_mod : forall a, a.[0] == a mod 2. Proof. intros a. rewrite testbit_spec' by order. now nzsimpl. Qed. (** Hence testing a bit is equivalent to shifting and testing parity *) Lemma testbit_odd : forall a n, a.[n] = odd (a>>n). Proof. intros. now rewrite <- bit0_odd, shiftr_spec, add_0_l. Qed. (** [log2] gives the highest nonzero bit of positive numbers *) Lemma bit_log2 : forall a, 0 a.[log2 a] = true. Proof. intros a Ha. assert (Ha' := log2_nonneg a). destruct (log2_spec_alt a Ha) as (r & EQ & Hr). rewrite EQ at 1. rewrite testbit_true, add_comm by trivial. rewrite <- (mul_1_l (2^log2 a)) at 1. rewrite div_add by order_nz. rewrite div_small; trivial. rewrite add_0_l. apply mod_small. split; order'. Qed. Lemma bits_above_log2 : forall a n, 0<=a -> log2 a < n -> a.[n] = false. Proof. intros a n Ha H. assert (Hn : 0<=n). transitivity (log2 a). apply log2_nonneg. order'. rewrite testbit_false by trivial. rewrite div_small. nzsimpl; order'. split. order. apply log2_lt_cancel. now rewrite log2_pow2. Qed. (** Hence the number of bits of [a] is [1+log2 a] (see [Pos.size_nat] and [Pos.size]). *) (** For negative numbers, things are the other ways around: log2 gives the highest zero bit (for numbers below -1). *) Lemma bit_log2_neg : forall a, a < -1 -> a.[log2 (P (-a))] = false. Proof. intros a Ha. rewrite <- (opp_involutive a) at 1. rewrite bits_opp. apply negb_false_iff. apply bit_log2. apply opp_lt_mono in Ha. rewrite opp_involutive in Ha. apply lt_succ_lt_pred. now rewrite <- one_succ. apply log2_nonneg. Qed. Lemma bits_above_log2_neg : forall a n, a < 0 -> log2 (P (-a)) < n -> a.[n] = true. Proof. intros a n Ha H. assert (Hn : 0<=n). transitivity (log2 (P (-a))). apply log2_nonneg. order'. rewrite <- (opp_involutive a), bits_opp, negb_true_iff by trivial. apply bits_above_log2; trivial. now rewrite <- opp_succ, opp_nonneg_nonpos, le_succ_l. Qed. (** Accesing a high enough bit of a number gives its sign *) Lemma bits_iff_nonneg : forall a n, log2 (abs a) < n -> (0<=a <-> a.[n] = false). Proof. intros a n Hn. split; intros H. rewrite abs_eq in Hn; trivial. now apply bits_above_log2. destruct (le_gt_cases 0 a); trivial. rewrite abs_neq in Hn by order. rewrite bits_above_log2_neg in H; try easy. apply le_lt_trans with (log2 (-a)); trivial. apply log2_le_mono. apply le_pred_l. Qed. Lemma bits_iff_nonneg' : forall a, 0<=a <-> a.[S (log2 (abs a))] = false. Proof. intros. apply bits_iff_nonneg. apply lt_succ_diag_r. Qed. Lemma bits_iff_nonneg_ex : forall a, 0<=a <-> (exists k, forall m, k a.[m] = false). Proof. intros a. split. intros Ha. exists (log2 a). intros m Hm. now apply bits_above_log2. intros (k,Hk). destruct (le_gt_cases k (log2 (abs a))). now apply bits_iff_nonneg', Hk, lt_succ_r. apply (bits_iff_nonneg a (S k)). now apply lt_succ_r, lt_le_incl. apply Hk. apply lt_succ_diag_r. Qed. Lemma bits_iff_neg : forall a n, log2 (abs a) < n -> (a<0 <-> a.[n] = true). Proof. intros a n Hn. now rewrite lt_nge, <- not_false_iff_true, (bits_iff_nonneg a n). Qed. Lemma bits_iff_neg' : forall a, a<0 <-> a.[S (log2 (abs a))] = true. Proof. intros. apply bits_iff_neg. apply lt_succ_diag_r. Qed. Lemma bits_iff_neg_ex : forall a, a<0 <-> (exists k, forall m, k a.[m] = true). Proof. intros a. split. intros Ha. exists (log2 (P (-a))). intros m Hm. now apply bits_above_log2_neg. intros (k,Hk). destruct (le_gt_cases k (log2 (abs a))). now apply bits_iff_neg', Hk, lt_succ_r. apply (bits_iff_neg a (S k)). now apply lt_succ_r, lt_le_incl. apply Hk. apply lt_succ_diag_r. Qed. (** Testing bits after division or multiplication by a power of two *) Lemma div2_bits : forall a n, 0<=n -> (a/2).[n] = a.[S n]. Proof. intros a n Hn. apply eq_true_iff_eq. rewrite 2 testbit_true by order_pos. rewrite pow_succ_r by trivial. now rewrite div_div by order_pos. Qed. Lemma div_pow2_bits : forall a n m, 0<=n -> 0<=m -> (a/2^n).[m] = a.[m+n]. Proof. intros a n m Hn. revert a m. apply le_ind with (4:=Hn). solve_proper. intros a m Hm. now nzsimpl. clear n Hn. intros n Hn IH a m Hm. nzsimpl; trivial. rewrite <- div_div by order_pos. now rewrite IH, div2_bits by order_pos. Qed. Lemma double_bits_succ : forall a n, (2*a).[S n] = a.[n]. Proof. intros a n. destruct (le_gt_cases 0 n) as [Hn|Hn]. now rewrite <- div2_bits, mul_comm, div_mul by order'. rewrite (testbit_neg_r a n Hn). apply le_succ_l in Hn. le_elim Hn. now rewrite testbit_neg_r. now rewrite Hn, bit0_odd, odd_mul, odd_2. Qed. Lemma double_bits : forall a n, (2*a).[n] = a.[P n]. Proof. intros a n. rewrite <- (succ_pred n) at 1. apply double_bits_succ. Qed. Lemma mul_pow2_bits_add : forall a n m, 0<=n -> (a*2^n).[n+m] = a.[m]. Proof. intros a n m Hn. revert a m. apply le_ind with (4:=Hn). solve_proper. intros a m. now nzsimpl. clear n Hn. intros n Hn IH a m. nzsimpl; trivial. rewrite mul_assoc, (mul_comm _ 2), <- mul_assoc. now rewrite double_bits_succ. Qed. Lemma mul_pow2_bits : forall a n m, 0<=n -> (a*2^n).[m] = a.[m-n]. Proof. intros. rewrite <- (add_simpl_r m n) at 1. rewrite add_sub_swap, add_comm. now apply mul_pow2_bits_add. Qed. Lemma mul_pow2_bits_low : forall a n m, m (a*2^n).[m] = false. Proof. intros. destruct (le_gt_cases 0 n). rewrite mul_pow2_bits by trivial. apply testbit_neg_r. now apply lt_sub_0. now rewrite pow_neg_r, mul_0_r, bits_0. Qed. (** Selecting the low part of a number can be done by a modulo *) Lemma mod_pow2_bits_high : forall a n m, 0<=n<=m -> (a mod 2^n).[m] = false. Proof. intros a n m (Hn,H). destruct (mod_pos_bound a (2^n)) as [LE LT]. order_pos. le_elim LE. apply bits_above_log2; try order. apply lt_le_trans with n; trivial. apply log2_lt_pow2; trivial. now rewrite <- LE, bits_0. Qed. Lemma mod_pow2_bits_low : forall a n m, m (a mod 2^n).[m] = a.[m]. Proof. intros a n m H. destruct (le_gt_cases 0 m) as [Hm|Hm]; [|now rewrite !testbit_neg_r]. rewrite testbit_eqb; trivial. rewrite <- (mod_add _ (2^(P (n-m))*(a/2^n))) by order'. rewrite <- div_add by order_nz. rewrite (mul_comm _ 2), mul_assoc, <- pow_succ_r, succ_pred. rewrite mul_comm, mul_assoc, <- pow_add_r, (add_comm m), sub_add; trivial. rewrite add_comm, <- div_mod by order_nz. symmetry. apply testbit_eqb; trivial. apply le_0_sub; order. now apply lt_le_pred, lt_0_sub. Qed. (** We now prove that having the same bits implies equality. For that we use a notion of equality over functional streams of bits. *) Definition eqf (f g:t -> bool) := forall n:t, f n = g n. Instance eqf_equiv : Equivalence eqf. Proof. split; congruence. Qed. Local Infix "===" := eqf (at level 70, no associativity). Instance testbit_eqf : Proper (eq==>eqf) testbit. Proof. intros a a' Ha n. now rewrite Ha. Qed. (** Only zero corresponds to the always-false stream. *) Lemma bits_inj_0 : forall a, (forall n, a.[n] = false) -> a == 0. Proof. intros a H. destruct (lt_trichotomy a 0) as [Ha|[Ha|Ha]]; trivial. apply (bits_above_log2_neg a (S (log2 (P (-a))))) in Ha. now rewrite H in Ha. apply lt_succ_diag_r. apply bit_log2 in Ha. now rewrite H in Ha. Qed. (** If two numbers produce the same stream of bits, they are equal. *) Lemma bits_inj : forall a b, testbit a === testbit b -> a == b. Proof. assert (AUX : forall n, 0<=n -> forall a b, 0<=a<2^n -> testbit a === testbit b -> a == b). intros n Hn. apply le_ind with (4:=Hn). solve_proper. intros a b Ha H. rewrite pow_0_r, one_succ, lt_succ_r in Ha. assert (Ha' : a == 0) by (destruct Ha; order). rewrite Ha' in *. symmetry. apply bits_inj_0. intros m. now rewrite <- H, bits_0. clear n Hn. intros n Hn IH a b (Ha,Ha') H. rewrite (div_mod a 2), (div_mod b 2) by order'. f_equiv; [ | now rewrite <- 2 bit0_mod, H]. f_equiv. apply IH. split. apply div_pos; order'. apply div_lt_upper_bound. order'. now rewrite <- pow_succ_r. intros m. destruct (le_gt_cases 0 m). rewrite 2 div2_bits by trivial. apply H. now rewrite 2 testbit_neg_r. intros a b H. destruct (le_gt_cases 0 a) as [Ha|Ha]. apply (AUX a); trivial. split; trivial. apply pow_gt_lin_r; order'. apply succ_inj, opp_inj. assert (0 <= - S a). apply opp_le_mono. now rewrite opp_involutive, opp_0, le_succ_l. apply (AUX (-(S a))); trivial. split; trivial. apply pow_gt_lin_r; order'. intros m. destruct (le_gt_cases 0 m). now rewrite 2 bits_opp, 2 pred_succ, H. now rewrite 2 testbit_neg_r. Qed. Lemma bits_inj_iff : forall a b, testbit a === testbit b <-> a == b. Proof. split. apply bits_inj. intros EQ; now rewrite EQ. Qed. (** In fact, checking the bits at positive indexes is enough. *) Lemma bits_inj' : forall a b, (forall n, 0<=n -> a.[n] = b.[n]) -> a == b. Proof. intros a b H. apply bits_inj. intros n. destruct (le_gt_cases 0 n). now apply H. now rewrite 2 testbit_neg_r. Qed. Lemma bits_inj_iff' : forall a b, (forall n, 0<=n -> a.[n] = b.[n]) <-> a == b. Proof. split. apply bits_inj'. intros EQ n Hn; now rewrite EQ. Qed. Ltac bitwise := apply bits_inj'; intros ?m ?Hm; autorewrite with bitwise. Hint Rewrite lxor_spec lor_spec land_spec ldiff_spec bits_0 : bitwise. (** The streams of bits that correspond to a numbers are exactly the ones which are stationary after some point. *) Lemma are_bits : forall (f:t->bool), Proper (eq==>Logic.eq) f -> ((exists n, forall m, 0<=m -> f m = n.[m]) <-> (exists k, forall m, k<=m -> f m = f k)). Proof. intros f Hf. split. intros (a,H). destruct (le_gt_cases 0 a). exists (S (log2 a)). intros m Hm. apply le_succ_l in Hm. rewrite 2 H, 2 bits_above_log2; trivial using lt_succ_diag_r. order_pos. apply le_trans with (log2 a); order_pos. exists (S (log2 (P (-a)))). intros m Hm. apply le_succ_l in Hm. rewrite 2 H, 2 bits_above_log2_neg; trivial using lt_succ_diag_r. order_pos. apply le_trans with (log2 (P (-a))); order_pos. intros (k,Hk). destruct (lt_ge_cases k 0) as [LT|LE]. case_eq (f 0); intros H0. exists (-1). intros m Hm. rewrite bits_m1, Hk by order. symmetry; rewrite <- H0. apply Hk; order. exists 0. intros m Hm. rewrite bits_0, Hk by order. symmetry; rewrite <- H0. apply Hk; order. revert f Hf Hk. apply le_ind with (4:=LE). (* compat : solve_proper fails here *) apply proper_sym_impl_iff. exact eq_sym. clear k LE. intros k k' Hk IH f Hf H. apply IH; trivial. now setoid_rewrite Hk. (* /compat *) intros f Hf H0. destruct (f 0). exists (-1). intros m Hm. now rewrite bits_m1, H0. exists 0. intros m Hm. now rewrite bits_0, H0. clear k LE. intros k LE IH f Hf Hk. destruct (IH (fun m => f (S m))) as (n, Hn). solve_proper. intros m Hm. apply Hk. now rewrite <- succ_le_mono. exists (f 0 + 2*n). intros m Hm. le_elim Hm. rewrite <- (succ_pred m), Hn, <- div2_bits. rewrite mul_comm, div_add, b2z_div2, add_0_l; trivial. order'. now rewrite <- lt_succ_r, succ_pred. now rewrite <- lt_succ_r, succ_pred. rewrite <- Hm. symmetry. apply add_b2z_double_bit0. Qed. (** * Properties of shifts *) (** First, a unified specification for [shiftl] : the [shiftl_spec] below (combined with [testbit_neg_r]) is equivalent to [shiftl_spec_low] and [shiftl_spec_high]. *) Lemma shiftl_spec : forall a n m, 0<=m -> (a << n).[m] = a.[m-n]. Proof. intros. destruct (le_gt_cases n m). now apply shiftl_spec_high. rewrite shiftl_spec_low, testbit_neg_r; trivial. now apply lt_sub_0. Qed. (** A shiftl by a negative number is a shiftr, and vice-versa *) Lemma shiftr_opp_r : forall a n, a >> (-n) == a << n. Proof. intros. bitwise. now rewrite shiftr_spec, shiftl_spec, add_opp_r. Qed. Lemma shiftl_opp_r : forall a n, a << (-n) == a >> n. Proof. intros. bitwise. now rewrite shiftr_spec, shiftl_spec, sub_opp_r. Qed. (** Shifts correspond to multiplication or division by a power of two *) Lemma shiftr_div_pow2 : forall a n, 0<=n -> a >> n == a / 2^n. Proof. intros. bitwise. now rewrite shiftr_spec, div_pow2_bits. Qed. Lemma shiftr_mul_pow2 : forall a n, n<=0 -> a >> n == a * 2^(-n). Proof. intros. bitwise. rewrite shiftr_spec, mul_pow2_bits; trivial. now rewrite sub_opp_r. now apply opp_nonneg_nonpos. Qed. Lemma shiftl_mul_pow2 : forall a n, 0<=n -> a << n == a * 2^n. Proof. intros. bitwise. now rewrite shiftl_spec, mul_pow2_bits. Qed. Lemma shiftl_div_pow2 : forall a n, n<=0 -> a << n == a / 2^(-n). Proof. intros. bitwise. rewrite shiftl_spec, div_pow2_bits; trivial. now rewrite add_opp_r. now apply opp_nonneg_nonpos. Qed. (** Shifts are morphisms *) Instance shiftr_wd : Proper (eq==>eq==>eq) shiftr. Proof. intros a a' Ha n n' Hn. destruct (le_ge_cases n 0) as [H|H]; assert (H':=H); rewrite Hn in H'. now rewrite 2 shiftr_mul_pow2, Ha, Hn. now rewrite 2 shiftr_div_pow2, Ha, Hn. Qed. Instance shiftl_wd : Proper (eq==>eq==>eq) shiftl. Proof. intros a a' Ha n n' Hn. now rewrite <- 2 shiftr_opp_r, Ha, Hn. Qed. (** We could also have specified shiftl with an addition on the left. *) Lemma shiftl_spec_alt : forall a n m, 0<=n -> (a << n).[m+n] = a.[m]. Proof. intros. now rewrite shiftl_mul_pow2, mul_pow2_bits, add_simpl_r. Qed. (** Chaining several shifts. The only case for which there isn't any simple expression is a true shiftr followed by a true shiftl. *) Lemma shiftl_shiftl : forall a n m, 0<=n -> (a << n) << m == a << (n+m). Proof. intros a n p Hn. bitwise. rewrite 2 (shiftl_spec _ _ m) by trivial. rewrite add_comm, sub_add_distr. destruct (le_gt_cases 0 (m-p)) as [H|H]. now rewrite shiftl_spec. rewrite 2 testbit_neg_r; trivial. apply lt_sub_0. now apply lt_le_trans with 0. Qed. Lemma shiftr_shiftl_l : forall a n m, 0<=n -> (a << n) >> m == a << (n-m). Proof. intros. now rewrite <- shiftl_opp_r, shiftl_shiftl, add_opp_r. Qed. Lemma shiftr_shiftl_r : forall a n m, 0<=n -> (a << n) >> m == a >> (m-n). Proof. intros. now rewrite <- 2 shiftl_opp_r, shiftl_shiftl, opp_sub_distr, add_comm. Qed. Lemma shiftr_shiftr : forall a n m, 0<=m -> (a >> n) >> m == a >> (n+m). Proof. intros a n p Hn. bitwise. rewrite 3 shiftr_spec; trivial. now rewrite (add_comm n p), add_assoc. now apply add_nonneg_nonneg. Qed. (** shifts and constants *) Lemma shiftl_1_l : forall n, 1 << n == 2^n. Proof. intros n. destruct (le_gt_cases 0 n). now rewrite shiftl_mul_pow2, mul_1_l. rewrite shiftl_div_pow2, div_1_l, pow_neg_r; try order. apply pow_gt_1. order'. now apply opp_pos_neg. Qed. Lemma shiftl_0_r : forall a, a << 0 == a. Proof. intros. rewrite shiftl_mul_pow2 by order. now nzsimpl. Qed. Lemma shiftr_0_r : forall a, a >> 0 == a. Proof. intros. now rewrite <- shiftl_opp_r, opp_0, shiftl_0_r. Qed. Lemma shiftl_0_l : forall n, 0 << n == 0. Proof. intros. destruct (le_ge_cases 0 n). rewrite shiftl_mul_pow2 by trivial. now nzsimpl. rewrite shiftl_div_pow2 by trivial. rewrite <- opp_nonneg_nonpos in H. nzsimpl; order_nz. Qed. Lemma shiftr_0_l : forall n, 0 >> n == 0. Proof. intros. now rewrite <- shiftl_opp_r, shiftl_0_l. Qed. Lemma shiftl_eq_0_iff : forall a n, 0<=n -> (a << n == 0 <-> a == 0). Proof. intros a n Hn. rewrite shiftl_mul_pow2 by trivial. rewrite eq_mul_0. split. intros [H | H]; trivial. contradict H; order_nz. intros H. now left. Qed. Lemma shiftr_eq_0_iff : forall a n, a >> n == 0 <-> a==0 \/ (0 log2 a < n -> a >> n == 0. Proof. intros a n Ha H. apply shiftr_eq_0_iff. le_elim Ha. right. now split. now left. Qed. (** Properties of [div2]. *) Lemma div2_div : forall a, div2 a == a/2. Proof. intros. rewrite div2_spec, shiftr_div_pow2. now nzsimpl. order'. Qed. Instance div2_wd : Proper (eq==>eq) div2. Proof. intros a a' Ha. now rewrite 2 div2_div, Ha. Qed. Lemma div2_odd : forall a, a == 2*(div2 a) + odd a. Proof. intros a. rewrite div2_div, <- bit0_odd, bit0_mod. apply div_mod. order'. Qed. (** Properties of [lxor] and others, directly deduced from properties of [xorb] and others. *) Instance lxor_wd : Proper (eq ==> eq ==> eq) lxor. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance land_wd : Proper (eq ==> eq ==> eq) land. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance lor_wd : Proper (eq ==> eq ==> eq) lor. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Instance ldiff_wd : Proper (eq ==> eq ==> eq) ldiff. Proof. intros a a' Ha b b' Hb. bitwise. now rewrite Ha, Hb. Qed. Lemma lxor_eq : forall a a', lxor a a' == 0 -> a == a'. Proof. intros a a' H. bitwise. apply xorb_eq. now rewrite <- lxor_spec, H, bits_0. Qed. Lemma lxor_nilpotent : forall a, lxor a a == 0. Proof. intros. bitwise. apply xorb_nilpotent. Qed. Lemma lxor_eq_0_iff : forall a a', lxor a a' == 0 <-> a == a'. Proof. split. apply lxor_eq. intros EQ; rewrite EQ; apply lxor_nilpotent. Qed. Lemma lxor_0_l : forall a, lxor 0 a == a. Proof. intros. bitwise. apply xorb_false_l. Qed. Lemma lxor_0_r : forall a, lxor a 0 == a. Proof. intros. bitwise. apply xorb_false_r. Qed. Lemma lxor_comm : forall a b, lxor a b == lxor b a. Proof. intros. bitwise. apply xorb_comm. Qed. Lemma lxor_assoc : forall a b c, lxor (lxor a b) c == lxor a (lxor b c). Proof. intros. bitwise. apply xorb_assoc. Qed. Lemma lor_0_l : forall a, lor 0 a == a. Proof. intros. bitwise. trivial. Qed. Lemma lor_0_r : forall a, lor a 0 == a. Proof. intros. bitwise. apply orb_false_r. Qed. Lemma lor_comm : forall a b, lor a b == lor b a. Proof. intros. bitwise. apply orb_comm. Qed. Lemma lor_assoc : forall a b c, lor a (lor b c) == lor (lor a b) c. Proof. intros. bitwise. apply orb_assoc. Qed. Lemma lor_diag : forall a, lor a a == a. Proof. intros. bitwise. apply orb_diag. Qed. Lemma lor_eq_0_l : forall a b, lor a b == 0 -> a == 0. Proof. intros a b H. bitwise. apply (orb_false_iff a.[m] b.[m]). now rewrite <- lor_spec, H, bits_0. Qed. Lemma lor_eq_0_iff : forall a b, lor a b == 0 <-> a == 0 /\ b == 0. Proof. intros a b. split. split. now apply lor_eq_0_l in H. rewrite lor_comm in H. now apply lor_eq_0_l in H. intros (EQ,EQ'). now rewrite EQ, lor_0_l. Qed. Lemma land_0_l : forall a, land 0 a == 0. Proof. intros. bitwise. trivial. Qed. Lemma land_0_r : forall a, land a 0 == 0. Proof. intros. bitwise. apply andb_false_r. Qed. Lemma land_comm : forall a b, land a b == land b a. Proof. intros. bitwise. apply andb_comm. Qed. Lemma land_assoc : forall a b c, land a (land b c) == land (land a b) c. Proof. intros. bitwise. apply andb_assoc. Qed. Lemma land_diag : forall a, land a a == a. Proof. intros. bitwise. apply andb_diag. Qed. Lemma ldiff_0_l : forall a, ldiff 0 a == 0. Proof. intros. bitwise. trivial. Qed. Lemma ldiff_0_r : forall a, ldiff a 0 == a. Proof. intros. bitwise. now rewrite andb_true_r. Qed. Lemma ldiff_diag : forall a, ldiff a a == 0. Proof. intros. bitwise. apply andb_negb_r. Qed. Lemma lor_land_distr_l : forall a b c, lor (land a b) c == land (lor a c) (lor b c). Proof. intros. bitwise. apply orb_andb_distrib_l. Qed. Lemma lor_land_distr_r : forall a b c, lor a (land b c) == land (lor a b) (lor a c). Proof. intros. bitwise. apply orb_andb_distrib_r. Qed. Lemma land_lor_distr_l : forall a b c, land (lor a b) c == lor (land a c) (land b c). Proof. intros. bitwise. apply andb_orb_distrib_l. Qed. Lemma land_lor_distr_r : forall a b c, land a (lor b c) == lor (land a b) (land a c). Proof. intros. bitwise. apply andb_orb_distrib_r. Qed. Lemma ldiff_ldiff_l : forall a b c, ldiff (ldiff a b) c == ldiff a (lor b c). Proof. intros. bitwise. now rewrite negb_orb, andb_assoc. Qed. Lemma lor_ldiff_and : forall a b, lor (ldiff a b) (land a b) == a. Proof. intros. bitwise. now rewrite <- andb_orb_distrib_r, orb_comm, orb_negb_r, andb_true_r. Qed. Lemma land_ldiff : forall a b, land (ldiff a b) b == 0. Proof. intros. bitwise. now rewrite <-andb_assoc, (andb_comm (negb _)), andb_negb_r, andb_false_r. Qed. (** Properties of [setbit] and [clearbit] *) Definition setbit a n := lor a (1 << n). Definition clearbit a n := ldiff a (1 << n). Lemma setbit_spec' : forall a n, setbit a n == lor a (2^n). Proof. intros. unfold setbit. now rewrite shiftl_1_l. Qed. Lemma clearbit_spec' : forall a n, clearbit a n == ldiff a (2^n). Proof. intros. unfold clearbit. now rewrite shiftl_1_l. Qed. Instance setbit_wd : Proper (eq==>eq==>eq) setbit. Proof. unfold setbit. solve_proper. Qed. Instance clearbit_wd : Proper (eq==>eq==>eq) clearbit. Proof. unfold clearbit. solve_proper. Qed. Lemma pow2_bits_true : forall n, 0<=n -> (2^n).[n] = true. Proof. intros. rewrite <- (mul_1_l (2^n)). now rewrite mul_pow2_bits, sub_diag, bit0_odd, odd_1. Qed. Lemma pow2_bits_false : forall n m, n~=m -> (2^n).[m] = false. Proof. intros. destruct (le_gt_cases 0 n); [|now rewrite pow_neg_r, bits_0]. destruct (le_gt_cases n m). rewrite <- (mul_1_l (2^n)), mul_pow2_bits; trivial. rewrite <- (succ_pred (m-n)), <- div2_bits. now rewrite div_small, bits_0 by (split; order'). rewrite <- lt_succ_r, succ_pred, lt_0_sub. order. rewrite <- (mul_1_l (2^n)), mul_pow2_bits_low; trivial. Qed. Lemma pow2_bits_eqb : forall n m, 0<=n -> (2^n).[m] = eqb n m. Proof. intros n m Hn. apply eq_true_iff_eq. rewrite eqb_eq. split. destruct (eq_decidable n m) as [H|H]. trivial. now rewrite (pow2_bits_false _ _ H). intros EQ. rewrite EQ. apply pow2_bits_true; order. Qed. Lemma setbit_eqb : forall a n m, 0<=n -> (setbit a n).[m] = eqb n m || a.[m]. Proof. intros. now rewrite setbit_spec', lor_spec, pow2_bits_eqb, orb_comm. Qed. Lemma setbit_iff : forall a n m, 0<=n -> ((setbit a n).[m] = true <-> n==m \/ a.[m] = true). Proof. intros. now rewrite setbit_eqb, orb_true_iff, eqb_eq. Qed. Lemma setbit_eq : forall a n, 0<=n -> (setbit a n).[n] = true. Proof. intros. apply setbit_iff; trivial. now left. Qed. Lemma setbit_neq : forall a n m, 0<=n -> n~=m -> (setbit a n).[m] = a.[m]. Proof. intros a n m Hn H. rewrite setbit_eqb; trivial. rewrite <- eqb_eq in H. apply not_true_is_false in H. now rewrite H. Qed. Lemma clearbit_eqb : forall a n m, (clearbit a n).[m] = a.[m] && negb (eqb n m). Proof. intros. destruct (le_gt_cases 0 m); [| now rewrite 2 testbit_neg_r]. rewrite clearbit_spec', ldiff_spec. f_equal. f_equal. destruct (le_gt_cases 0 n) as [Hn|Hn]. now apply pow2_bits_eqb. symmetry. rewrite pow_neg_r, bits_0, <- not_true_iff_false, eqb_eq; order. Qed. Lemma clearbit_iff : forall a n m, (clearbit a n).[m] = true <-> a.[m] = true /\ n~=m. Proof. intros. rewrite clearbit_eqb, andb_true_iff, <- eqb_eq. now rewrite negb_true_iff, not_true_iff_false. Qed. Lemma clearbit_eq : forall a n, (clearbit a n).[n] = false. Proof. intros. rewrite clearbit_eqb, (proj2 (eqb_eq _ _) (eq_refl n)). apply andb_false_r. Qed. Lemma clearbit_neq : forall a n m, n~=m -> (clearbit a n).[m] = a.[m]. Proof. intros a n m H. rewrite clearbit_eqb. rewrite <- eqb_eq in H. apply not_true_is_false in H. rewrite H. apply andb_true_r. Qed. (** Shifts of bitwise operations *) Lemma shiftl_lxor : forall a b n, (lxor a b) << n == lxor (a << n) (b << n). Proof. intros. bitwise. now rewrite !shiftl_spec, lxor_spec. Qed. Lemma shiftr_lxor : forall a b n, (lxor a b) >> n == lxor (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec, lxor_spec. Qed. Lemma shiftl_land : forall a b n, (land a b) << n == land (a << n) (b << n). Proof. intros. bitwise. now rewrite !shiftl_spec, land_spec. Qed. Lemma shiftr_land : forall a b n, (land a b) >> n == land (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec, land_spec. Qed. Lemma shiftl_lor : forall a b n, (lor a b) << n == lor (a << n) (b << n). Proof. intros. bitwise. now rewrite !shiftl_spec, lor_spec. Qed. Lemma shiftr_lor : forall a b n, (lor a b) >> n == lor (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec, lor_spec. Qed. Lemma shiftl_ldiff : forall a b n, (ldiff a b) << n == ldiff (a << n) (b << n). Proof. intros. bitwise. now rewrite !shiftl_spec, ldiff_spec. Qed. Lemma shiftr_ldiff : forall a b n, (ldiff a b) >> n == ldiff (a >> n) (b >> n). Proof. intros. bitwise. now rewrite !shiftr_spec, ldiff_spec. Qed. (** For integers, we do have a binary complement function *) Definition lnot a := P (-a). Instance lnot_wd : Proper (eq==>eq) lnot. Proof. unfold lnot. solve_proper. Qed. Lemma lnot_spec : forall a n, 0<=n -> (lnot a).[n] = negb a.[n]. Proof. intros. unfold lnot. rewrite <- (opp_involutive a) at 2. rewrite bits_opp, negb_involutive; trivial. Qed. Lemma lnot_involutive : forall a, lnot (lnot a) == a. Proof. intros a. bitwise. now rewrite 2 lnot_spec, negb_involutive. Qed. Lemma lnot_0 : lnot 0 == -1. Proof. unfold lnot. now rewrite opp_0, <- sub_1_r, sub_0_l. Qed. Lemma lnot_m1 : lnot (-1) == 0. Proof. unfold lnot. now rewrite opp_involutive, one_succ, pred_succ. Qed. (** Complement and other operations *) Lemma lor_m1_r : forall a, lor a (-1) == -1. Proof. intros. bitwise. now rewrite bits_m1, orb_true_r. Qed. Lemma lor_m1_l : forall a, lor (-1) a == -1. Proof. intros. now rewrite lor_comm, lor_m1_r. Qed. Lemma land_m1_r : forall a, land a (-1) == a. Proof. intros. bitwise. now rewrite bits_m1, andb_true_r. Qed. Lemma land_m1_l : forall a, land (-1) a == a. Proof. intros. now rewrite land_comm, land_m1_r. Qed. Lemma ldiff_m1_r : forall a, ldiff a (-1) == 0. Proof. intros. bitwise. now rewrite bits_m1, andb_false_r. Qed. Lemma ldiff_m1_l : forall a, ldiff (-1) a == lnot a. Proof. intros. bitwise. now rewrite lnot_spec, bits_m1. Qed. Lemma lor_lnot_diag : forall a, lor a (lnot a) == -1. Proof. intros a. bitwise. rewrite lnot_spec, bits_m1; trivial. now destruct a.[m]. Qed. Lemma add_lnot_diag : forall a, a + lnot a == -1. Proof. intros a. unfold lnot. now rewrite add_pred_r, add_opp_r, sub_diag, one_succ, opp_succ, opp_0. Qed. Lemma ldiff_land : forall a b, ldiff a b == land a (lnot b). Proof. intros. bitwise. now rewrite lnot_spec. Qed. Lemma land_lnot_diag : forall a, land a (lnot a) == 0. Proof. intros. now rewrite <- ldiff_land, ldiff_diag. Qed. Lemma lnot_lor : forall a b, lnot (lor a b) == land (lnot a) (lnot b). Proof. intros a b. bitwise. now rewrite !lnot_spec, lor_spec, negb_orb. Qed. Lemma lnot_land : forall a b, lnot (land a b) == lor (lnot a) (lnot b). Proof. intros a b. bitwise. now rewrite !lnot_spec, land_spec, negb_andb. Qed. Lemma lnot_ldiff : forall a b, lnot (ldiff a b) == lor (lnot a) b. Proof. intros a b. bitwise. now rewrite !lnot_spec, ldiff_spec, negb_andb, negb_involutive. Qed. Lemma lxor_lnot_lnot : forall a b, lxor (lnot a) (lnot b) == lxor a b. Proof. intros a b. bitwise. now rewrite !lnot_spec, xorb_negb_negb. Qed. Lemma lnot_lxor_l : forall a b, lnot (lxor a b) == lxor (lnot a) b. Proof. intros a b. bitwise. now rewrite !lnot_spec, !lxor_spec, negb_xorb_l. Qed. Lemma lnot_lxor_r : forall a b, lnot (lxor a b) == lxor a (lnot b). Proof. intros a b. bitwise. now rewrite !lnot_spec, !lxor_spec, negb_xorb_r. Qed. Lemma lxor_m1_r : forall a, lxor a (-1) == lnot a. Proof. intros. now rewrite <- (lxor_0_r (lnot a)), <- lnot_m1, lxor_lnot_lnot. Qed. Lemma lxor_m1_l : forall a, lxor (-1) a == lnot a. Proof. intros. now rewrite lxor_comm, lxor_m1_r. Qed. Lemma lxor_lor : forall a b, land a b == 0 -> lxor a b == lor a b. Proof. intros a b H. bitwise. assert (a.[m] && b.[m] = false) by now rewrite <- land_spec, H, bits_0. now destruct a.[m], b.[m]. Qed. Lemma lnot_shiftr : forall a n, 0<=n -> lnot (a >> n) == (lnot a) >> n. Proof. intros a n Hn. bitwise. now rewrite lnot_spec, 2 shiftr_spec, lnot_spec by order_pos. Qed. (** [(ones n)] is [2^n-1], the number with [n] digits 1 *) Definition ones n := P (1<eq) ones. Proof. unfold ones. solve_proper. Qed. Lemma ones_equiv : forall n, ones n == P (2^n). Proof. intros. unfold ones. destruct (le_gt_cases 0 n). now rewrite shiftl_mul_pow2, mul_1_l. f_equiv. rewrite pow_neg_r; trivial. rewrite <- shiftr_opp_r. apply shiftr_eq_0_iff. right; split. order'. rewrite log2_1. now apply opp_pos_neg. Qed. Lemma ones_add : forall n m, 0<=n -> 0<=m -> ones (m+n) == 2^m * ones n + ones m. Proof. intros n m Hn Hm. rewrite !ones_equiv. rewrite <- !sub_1_r, mul_sub_distr_l, mul_1_r, <- pow_add_r by trivial. rewrite add_sub_assoc, sub_add. reflexivity. Qed. Lemma ones_div_pow2 : forall n m, 0<=m<=n -> ones n / 2^m == ones (n-m). Proof. intros n m (Hm,H). symmetry. apply div_unique with (ones m). left. rewrite ones_equiv. split. rewrite <- lt_succ_r, succ_pred. order_pos. now rewrite <- le_succ_l, succ_pred. rewrite <- (sub_add m n) at 1. rewrite (add_comm _ m). apply ones_add; trivial. now apply le_0_sub. Qed. Lemma ones_mod_pow2 : forall n m, 0<=m<=n -> (ones n) mod (2^m) == ones m. Proof. intros n m (Hm,H). symmetry. apply mod_unique with (ones (n-m)). left. rewrite ones_equiv. split. rewrite <- lt_succ_r, succ_pred. order_pos. now rewrite <- le_succ_l, succ_pred. rewrite <- (sub_add m n) at 1. rewrite (add_comm _ m). apply ones_add; trivial. now apply le_0_sub. Qed. Lemma ones_spec_low : forall n m, 0<=m (ones n).[m] = true. Proof. intros n m (Hm,H). apply testbit_true; trivial. rewrite ones_div_pow2 by (split; order). rewrite <- (pow_1_r 2). rewrite ones_mod_pow2. rewrite ones_equiv. now nzsimpl'. split. order'. apply le_add_le_sub_r. nzsimpl. now apply le_succ_l. Qed. Lemma ones_spec_high : forall n m, 0<=n<=m -> (ones n).[m] = false. Proof. intros n m (Hn,H). le_elim Hn. apply bits_above_log2; rewrite ones_equiv. rewrite <-lt_succ_r, succ_pred; order_pos. rewrite log2_pred_pow2; trivial. now rewrite <-le_succ_l, succ_pred. rewrite ones_equiv. now rewrite <- Hn, pow_0_r, one_succ, pred_succ, bits_0. Qed. Lemma ones_spec_iff : forall n m, 0<=n -> ((ones n).[m] = true <-> 0<=m log2 a < n -> lor a (ones n) == ones n. Proof. intros a n Ha H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, bits_above_log2; try split; trivial. now apply lt_le_trans with n. apply le_trans with (log2 a); order_pos. rewrite ones_spec_low, orb_true_r; try split; trivial. Qed. Lemma land_ones : forall a n, 0<=n -> land a (ones n) == a mod 2^n. Proof. intros a n Hn. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, mod_pow2_bits_high, andb_false_r; try split; trivial. rewrite ones_spec_low, mod_pow2_bits_low, andb_true_r; try split; trivial. Qed. Lemma land_ones_low : forall a n, 0<=a -> log2 a < n -> land a (ones n) == a. Proof. intros a n Ha H. assert (Hn : 0<=n) by (generalize (log2_nonneg a); order). rewrite land_ones; trivial. apply mod_small. split; trivial. apply log2_lt_cancel. now rewrite log2_pow2. Qed. Lemma ldiff_ones_r : forall a n, 0<=n -> ldiff a (ones n) == (a >> n) << n. Proof. intros a n Hn. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, shiftl_spec_high, shiftr_spec; trivial. rewrite sub_add; trivial. apply andb_true_r. now apply le_0_sub. now split. rewrite ones_spec_low, shiftl_spec_low, andb_false_r; try split; trivial. Qed. Lemma ldiff_ones_r_low : forall a n, 0<=a -> log2 a < n -> ldiff a (ones n) == 0. Proof. intros a n Ha H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, bits_above_log2; trivial. now apply lt_le_trans with n. split; trivial. now apply le_trans with (log2 a); order_pos. rewrite ones_spec_low, andb_false_r; try split; trivial. Qed. Lemma ldiff_ones_l_low : forall a n, 0<=a -> log2 a < n -> ldiff (ones n) a == lxor a (ones n). Proof. intros a n Ha H. bitwise. destruct (le_gt_cases n m). rewrite ones_spec_high, bits_above_log2; trivial. now apply lt_le_trans with n. split; trivial. now apply le_trans with (log2 a); order_pos. rewrite ones_spec_low, xorb_true_r; try split; trivial. Qed. (** Bitwise operations and sign *) Lemma shiftl_nonneg : forall a n, 0 <= (a << n) <-> 0 <= a. Proof. intros a n. destruct (le_ge_cases 0 n) as [Hn|Hn]. (* 0<=n *) rewrite 2 bits_iff_nonneg_ex. split; intros (k,Hk). exists (k-n). intros m Hm. destruct (le_gt_cases 0 m); [|now rewrite testbit_neg_r]. rewrite <- (add_simpl_r m n), <- (shiftl_spec a n) by order_pos. apply Hk. now apply lt_sub_lt_add_r. exists (k+n). intros m Hm. destruct (le_gt_cases 0 m); [|now rewrite testbit_neg_r]. rewrite shiftl_spec by trivial. apply Hk. now apply lt_add_lt_sub_r. (* n<=0*) rewrite <- shiftr_opp_r, 2 bits_iff_nonneg_ex. split; intros (k,Hk). destruct (le_gt_cases 0 k). exists (k-n). intros m Hm. apply lt_sub_lt_add_r in Hm. rewrite <- (add_simpl_r m n), <- add_opp_r, <- (shiftr_spec a (-n)). now apply Hk. order. assert (EQ : a >> (-n) == 0). apply bits_inj'. intros m Hm. rewrite bits_0. apply Hk; order. apply shiftr_eq_0_iff in EQ. rewrite <- bits_iff_nonneg_ex. destruct EQ as [EQ|[LT _]]; order. exists (k+n). intros m Hm. destruct (le_gt_cases 0 m); [|now rewrite testbit_neg_r]. rewrite shiftr_spec by trivial. apply Hk. rewrite add_opp_r. now apply lt_add_lt_sub_r. Qed. Lemma shiftl_neg : forall a n, (a << n) < 0 <-> a < 0. Proof. intros a n. now rewrite 2 lt_nge, shiftl_nonneg. Qed. Lemma shiftr_nonneg : forall a n, 0 <= (a >> n) <-> 0 <= a. Proof. intros. rewrite <- shiftl_opp_r. apply shiftl_nonneg. Qed. Lemma shiftr_neg : forall a n, (a >> n) < 0 <-> a < 0. Proof. intros a n. now rewrite 2 lt_nge, shiftr_nonneg. Qed. Lemma div2_nonneg : forall a, 0 <= div2 a <-> 0 <= a. Proof. intros. rewrite div2_spec. apply shiftr_nonneg. Qed. Lemma div2_neg : forall a, div2 a < 0 <-> a < 0. Proof. intros a. now rewrite 2 lt_nge, div2_nonneg. Qed. Lemma lor_nonneg : forall a b, 0 <= lor a b <-> 0<=a /\ 0<=b. Proof. intros a b. rewrite 3 bits_iff_nonneg_ex. split. intros (k,Hk). split; exists k; intros m Hm; apply (orb_false_elim a.[m] b.[m]); rewrite <- lor_spec; now apply Hk. intros ((k,Hk),(k',Hk')). destruct (le_ge_cases k k'); [ exists k' | exists k ]; intros m Hm; rewrite lor_spec, Hk, Hk'; trivial; order. Qed. Lemma lor_neg : forall a b, lor a b < 0 <-> a < 0 \/ b < 0. Proof. intros a b. rewrite 3 lt_nge, lor_nonneg. split. apply not_and. apply le_decidable. now intros [H|H] (H',H''). Qed. Lemma lnot_nonneg : forall a, 0 <= lnot a <-> a < 0. Proof. intros a; unfold lnot. now rewrite <- opp_succ, opp_nonneg_nonpos, le_succ_l. Qed. Lemma lnot_neg : forall a, lnot a < 0 <-> 0 <= a. Proof. intros a. now rewrite le_ngt, lt_nge, lnot_nonneg. Qed. Lemma land_nonneg : forall a b, 0 <= land a b <-> 0<=a \/ 0<=b. Proof. intros a b. now rewrite <- (lnot_involutive (land a b)), lnot_land, lnot_nonneg, lor_neg, !lnot_neg. Qed. Lemma land_neg : forall a b, land a b < 0 <-> a < 0 /\ b < 0. Proof. intros a b. now rewrite <- (lnot_involutive (land a b)), lnot_land, lnot_neg, lor_nonneg, !lnot_nonneg. Qed. Lemma ldiff_nonneg : forall a b, 0 <= ldiff a b <-> 0<=a \/ b<0. Proof. intros. now rewrite ldiff_land, land_nonneg, lnot_nonneg. Qed. Lemma ldiff_neg : forall a b, ldiff a b < 0 <-> a<0 /\ 0<=b. Proof. intros. now rewrite ldiff_land, land_neg, lnot_neg. Qed. Lemma lxor_nonneg : forall a b, 0 <= lxor a b <-> (0<=a <-> 0<=b). Proof. assert (H : forall a b, 0<=a -> 0<=b -> 0<=lxor a b). intros a b. rewrite 3 bits_iff_nonneg_ex. intros (k,Hk) (k', Hk'). destruct (le_ge_cases k k'); [ exists k' | exists k]; intros m Hm; rewrite lxor_spec, Hk, Hk'; trivial; order. assert (H' : forall a b, 0<=a -> b<0 -> lxor a b<0). intros a b. rewrite bits_iff_nonneg_ex, 2 bits_iff_neg_ex. intros (k,Hk) (k', Hk'). destruct (le_ge_cases k k'); [ exists k' | exists k]; intros m Hm; rewrite lxor_spec, Hk, Hk'; trivial; order. intros a b. split. intros Hab. split. intros Ha. destruct (le_gt_cases 0 b) as [Hb|Hb]; trivial. generalize (H' _ _ Ha Hb). order. intros Hb. destruct (le_gt_cases 0 a) as [Ha|Ha]; trivial. generalize (H' _ _ Hb Ha). rewrite lxor_comm. order. intros E. destruct (le_gt_cases 0 a) as [Ha|Ha]. apply H; trivial. apply E; trivial. destruct (le_gt_cases 0 b) as [Hb|Hb]. apply H; trivial. apply E; trivial. rewrite <- lxor_lnot_lnot. apply H; now apply lnot_nonneg. Qed. (** Bitwise operations and log2 *) Lemma log2_bits_unique : forall a n, a.[n] = true -> (forall m, n a.[m] = false) -> log2 a == n. Proof. intros a n H H'. destruct (lt_trichotomy a 0) as [Ha|[Ha|Ha]]. (* a < 0 *) destruct (proj1 (bits_iff_neg_ex a) Ha) as (k,Hk). destruct (le_gt_cases n k). specialize (Hk (S k) (lt_succ_diag_r _)). rewrite H' in Hk. discriminate. apply lt_succ_r; order. specialize (H' (S n) (lt_succ_diag_r _)). rewrite Hk in H'. discriminate. apply lt_succ_r; order. (* a = 0 *) now rewrite Ha, bits_0 in H. (* 0 < a *) apply le_antisymm; apply le_ngt; intros LT. specialize (H' _ LT). now rewrite bit_log2 in H'. now rewrite bits_above_log2 in H by order. Qed. Lemma log2_shiftr : forall a n, 0 log2 (a >> n) == max 0 (log2 a - n). Proof. intros a n Ha. destruct (le_gt_cases 0 (log2 a - n)); [rewrite max_r | rewrite max_l]; try order. apply log2_bits_unique. now rewrite shiftr_spec, sub_add, bit_log2. intros m Hm. destruct (le_gt_cases 0 m); [|now rewrite testbit_neg_r]. rewrite shiftr_spec; trivial. apply bits_above_log2; try order. now apply lt_sub_lt_add_r. rewrite lt_sub_lt_add_r, add_0_l in H. apply log2_nonpos. apply le_lteq; right. apply shiftr_eq_0_iff. right. now split. Qed. Lemma log2_shiftl : forall a n, 0 0<=n -> log2 (a << n) == log2 a + n. Proof. intros a n Ha Hn. rewrite shiftl_mul_pow2, add_comm by trivial. now apply log2_mul_pow2. Qed. Lemma log2_shiftl' : forall a n, 0 log2 (a << n) == max 0 (log2 a + n). Proof. intros a n Ha. rewrite <- shiftr_opp_r, log2_shiftr by trivial. destruct (le_gt_cases 0 (log2 a + n)); [rewrite 2 max_r | rewrite 2 max_l]; rewrite ?sub_opp_r; try order. Qed. Lemma log2_lor : forall a b, 0<=a -> 0<=b -> log2 (lor a b) == max (log2 a) (log2 b). Proof. assert (AUX : forall a b, 0<=a -> a<=b -> log2 (lor a b) == log2 b). intros a b Ha H. le_elim Ha; [|now rewrite <- Ha, lor_0_l]. apply log2_bits_unique. now rewrite lor_spec, bit_log2, orb_true_r by order. intros m Hm. assert (H' := log2_le_mono _ _ H). now rewrite lor_spec, 2 bits_above_log2 by order. (* main *) intros a b Ha Hb. destruct (le_ge_cases a b) as [H|H]. rewrite max_r by now apply log2_le_mono. now apply AUX. rewrite max_l by now apply log2_le_mono. rewrite lor_comm. now apply AUX. Qed. Lemma log2_land : forall a b, 0<=a -> 0<=b -> log2 (land a b) <= min (log2 a) (log2 b). Proof. assert (AUX : forall a b, 0<=a -> a<=b -> log2 (land a b) <= log2 a). intros a b Ha Hb. apply le_ngt. intros LT. assert (H : 0 <= land a b) by (apply land_nonneg; now left). le_elim H. generalize (bit_log2 (land a b) H). now rewrite land_spec, bits_above_log2. rewrite <- H in LT. apply log2_lt_cancel in LT; order. (* main *) intros a b Ha Hb. destruct (le_ge_cases a b) as [H|H]. rewrite min_l by now apply log2_le_mono. now apply AUX. rewrite min_r by now apply log2_le_mono. rewrite land_comm. now apply AUX. Qed. Lemma log2_lxor : forall a b, 0<=a -> 0<=b -> log2 (lxor a b) <= max (log2 a) (log2 b). Proof. assert (AUX : forall a b, 0<=a -> a<=b -> log2 (lxor a b) <= log2 b). intros a b Ha Hb. apply le_ngt. intros LT. assert (H : 0 <= lxor a b) by (apply lxor_nonneg; split; order). le_elim H. generalize (bit_log2 (lxor a b) H). rewrite lxor_spec, 2 bits_above_log2; try order. discriminate. apply le_lt_trans with (log2 b); trivial. now apply log2_le_mono. rewrite <- H in LT. apply log2_lt_cancel in LT; order. (* main *) intros a b Ha Hb. destruct (le_ge_cases a b) as [H|H]. rewrite max_r by now apply log2_le_mono. now apply AUX. rewrite max_l by now apply log2_le_mono. rewrite lxor_comm. now apply AUX. Qed. (** Bitwise operations and arithmetical operations *) Local Notation xor3 a b c := (xorb (xorb a b) c). Local Notation lxor3 a b c := (lxor (lxor a b) c). Local Notation nextcarry a b c := ((a&&b) || (c && (a||b))). Local Notation lnextcarry a b c := (lor (land a b) (land c (lor a b))). Lemma add_bit0 : forall a b, (a+b).[0] = xorb a.[0] b.[0]. Proof. intros. now rewrite !bit0_odd, odd_add. Qed. Lemma add3_bit0 : forall a b c, (a+b+c).[0] = xor3 a.[0] b.[0] c.[0]. Proof. intros. now rewrite !add_bit0. Qed. Lemma add3_bits_div2 : forall (a0 b0 c0 : bool), (a0 + b0 + c0)/2 == nextcarry a0 b0 c0. Proof. assert (H : 1+1 == 2) by now nzsimpl'. intros [|] [|] [|]; simpl; rewrite ?add_0_l, ?add_0_r, ?H; (apply div_same; order') || (apply div_small; split; order') || idtac. symmetry. apply div_unique with 1. left; split; order'. now nzsimpl'. Qed. Lemma add_carry_div2 : forall a b (c0:bool), (a + b + c0)/2 == a/2 + b/2 + nextcarry a.[0] b.[0] c0. Proof. intros. rewrite <- add3_bits_div2. rewrite (add_comm ((a/2)+_)). rewrite <- div_add by order'. f_equiv. rewrite <- !div2_div, mul_comm, mul_add_distr_l. rewrite (div2_odd a), <- bit0_odd at 1. rewrite (div2_odd b), <- bit0_odd at 1. rewrite add_shuffle1. rewrite <-(add_assoc _ _ c0). apply add_comm. Qed. (** The main result concerning addition: we express the bits of the sum in term of bits of [a] and [b] and of some carry stream which is also recursively determined by another equation. *) Lemma add_carry_bits_aux : forall n, 0<=n -> forall a b (c0:bool), -(2^n) <= a < 2^n -> -(2^n) <= b < 2^n -> exists c, a+b+c0 == lxor3 a b c /\ c/2 == lnextcarry a b c /\ c.[0] = c0. Proof. intros n Hn. apply le_ind with (4:=Hn). solve_proper. (* base *) intros a b c0. rewrite !pow_0_r, !one_succ, !lt_succ_r, <- !one_succ. intros (Ha1,Ha2) (Hb1,Hb2). le_elim Ha1; rewrite <- ?le_succ_l, ?succ_m1 in Ha1; le_elim Hb1; rewrite <- ?le_succ_l, ?succ_m1 in Hb1. (* base, a = 0, b = 0 *) exists c0. rewrite (le_antisymm _ _ Ha2 Ha1), (le_antisymm _ _ Hb2 Hb1). rewrite !add_0_l, !lxor_0_l, !lor_0_r, !land_0_r, !lor_0_r. rewrite b2z_div2, b2z_bit0; now repeat split. (* base, a = 0, b = -1 *) exists (-c0). rewrite <- Hb1, (le_antisymm _ _ Ha2 Ha1). repeat split. rewrite add_0_l, lxor_0_l, lxor_m1_l. unfold lnot. now rewrite opp_involutive, add_comm, add_opp_r, sub_1_r. rewrite land_0_l, !lor_0_l, land_m1_r. symmetry. apply div_unique with c0. left; destruct c0; simpl; split; order'. now rewrite two_succ, mul_succ_l, mul_1_l, add_opp_r, sub_add. rewrite bit0_odd, odd_opp; destruct c0; simpl; apply odd_1 || apply odd_0. (* base, a = -1, b = 0 *) exists (-c0). rewrite <- Ha1, (le_antisymm _ _ Hb2 Hb1). repeat split. rewrite add_0_r, lxor_0_r, lxor_m1_l. unfold lnot. now rewrite opp_involutive, add_comm, add_opp_r, sub_1_r. rewrite land_0_r, lor_0_r, lor_0_l, land_m1_r. symmetry. apply div_unique with c0. left; destruct c0; simpl; split; order'. now rewrite two_succ, mul_succ_l, mul_1_l, add_opp_r, sub_add. rewrite bit0_odd, odd_opp; destruct c0; simpl; apply odd_1 || apply odd_0. (* base, a = -1, b = -1 *) exists (c0 + 2*(-1)). rewrite <- Ha1, <- Hb1. repeat split. rewrite lxor_m1_l, lnot_m1, lxor_0_l. now rewrite two_succ, mul_succ_l, mul_1_l, add_comm, add_assoc. rewrite land_m1_l, lor_m1_l. apply add_b2z_double_div2. apply add_b2z_double_bit0. (* step *) clear n Hn. intros n Hn IH a b c0 Ha Hb. set (c1:=nextcarry a.[0] b.[0] c0). destruct (IH (a/2) (b/2) c1) as (c & IH1 & IH2 & Hc); clear IH. split. apply div_le_lower_bound. order'. now rewrite mul_opp_r, <- pow_succ_r. apply div_lt_upper_bound. order'. now rewrite <- pow_succ_r. split. apply div_le_lower_bound. order'. now rewrite mul_opp_r, <- pow_succ_r. apply div_lt_upper_bound. order'. now rewrite <- pow_succ_r. exists (c0 + 2*c). repeat split. (* step, add *) bitwise. le_elim Hm. rewrite <- (succ_pred m), lt_succ_r in Hm. rewrite <- (succ_pred m), <- !div2_bits, <- 2 lxor_spec by trivial. f_equiv. rewrite add_b2z_double_div2, <- IH1. apply add_carry_div2. rewrite <- Hm. now rewrite add_b2z_double_bit0, add3_bit0, b2z_bit0. (* step, carry *) rewrite add_b2z_double_div2. bitwise. le_elim Hm. rewrite <- (succ_pred m), lt_succ_r in Hm. rewrite <- (succ_pred m), <- !div2_bits, IH2 by trivial. autorewrite with bitwise. now rewrite add_b2z_double_div2. rewrite <- Hm. now rewrite add_b2z_double_bit0. (* step, carry0 *) apply add_b2z_double_bit0. Qed. Lemma add_carry_bits : forall a b (c0:bool), exists c, a+b+c0 == lxor3 a b c /\ c/2 == lnextcarry a b c /\ c.[0] = c0. Proof. intros a b c0. set (n := max (abs a) (abs b)). apply (add_carry_bits_aux n). (* positivity *) unfold n. destruct (le_ge_cases (abs a) (abs b)); [rewrite max_r|rewrite max_l]; order_pos'. (* bound for a *) assert (Ha : abs a < 2^n). apply lt_le_trans with (2^(abs a)). apply pow_gt_lin_r; order_pos'. apply pow_le_mono_r. order'. unfold n. destruct (le_ge_cases (abs a) (abs b)); [rewrite max_r|rewrite max_l]; try order. apply abs_lt in Ha. destruct Ha; split; order. (* bound for b *) assert (Hb : abs b < 2^n). apply lt_le_trans with (2^(abs b)). apply pow_gt_lin_r; order_pos'. apply pow_le_mono_r. order'. unfold n. destruct (le_ge_cases (abs a) (abs b)); [rewrite max_r|rewrite max_l]; try order. apply abs_lt in Hb. destruct Hb; split; order. Qed. (** Particular case : the second bit of an addition *) Lemma add_bit1 : forall a b, (a+b).[1] = xor3 a.[1] b.[1] (a.[0] && b.[0]). Proof. intros a b. destruct (add_carry_bits a b false) as (c & EQ1 & EQ2 & Hc). simpl in EQ1; rewrite add_0_r in EQ1. rewrite EQ1. autorewrite with bitwise. f_equal. rewrite one_succ, <- div2_bits, EQ2 by order. autorewrite with bitwise. rewrite Hc. simpl. apply orb_false_r. Qed. (** In an addition, there will be no carries iff there is no common bits in the numbers to add *) Lemma nocarry_equiv : forall a b c, c/2 == lnextcarry a b c -> c.[0] = false -> (c == 0 <-> land a b == 0). Proof. intros a b c H H'. split. intros EQ; rewrite EQ in *. rewrite div_0_l in H by order'. symmetry in H. now apply lor_eq_0_l in H. intros EQ. rewrite EQ, lor_0_l in H. apply bits_inj'. intros n Hn. rewrite bits_0. apply le_ind with (4:=Hn). solve_proper. trivial. clear n Hn. intros n Hn IH. rewrite <- div2_bits, H; trivial. autorewrite with bitwise. now rewrite IH. Qed. (** When there is no common bits, the addition is just a xor *) Lemma add_nocarry_lxor : forall a b, land a b == 0 -> a+b == lxor a b. Proof. intros a b H. destruct (add_carry_bits a b false) as (c & EQ1 & EQ2 & Hc). simpl in EQ1; rewrite add_0_r in EQ1. rewrite EQ1. apply (nocarry_equiv a b c) in H; trivial. rewrite H. now rewrite lxor_0_r. Qed. (** A null [ldiff] implies being smaller *) Lemma ldiff_le : forall a b, 0<=b -> ldiff a b == 0 -> 0 <= a <= b. Proof. assert (AUX : forall n, 0<=n -> forall a b, 0 <= a < 2^n -> 0<=b -> ldiff a b == 0 -> a <= b). intros n Hn. apply le_ind with (4:=Hn); clear n Hn. solve_proper. intros a b Ha Hb _. rewrite pow_0_r, one_succ, lt_succ_r in Ha. setoid_replace a with 0 by (destruct Ha; order'); trivial. intros n Hn IH a b (Ha,Ha') Hb H. assert (NEQ : 2 ~= 0) by order'. rewrite (div_mod a 2 NEQ), (div_mod b 2 NEQ). apply add_le_mono. apply mul_le_mono_pos_l; try order'. apply IH. split. apply div_pos; order'. apply div_lt_upper_bound; try order'. now rewrite <- pow_succ_r. apply div_pos; order'. rewrite <- (pow_1_r 2), <- 2 shiftr_div_pow2 by order'. rewrite <- shiftr_ldiff, H, shiftr_div_pow2, pow_1_r, div_0_l; order'. rewrite <- 2 bit0_mod. apply bits_inj_iff in H. specialize (H 0). rewrite ldiff_spec, bits_0 in H. destruct a.[0], b.[0]; try discriminate; simpl; order'. (* main *) intros a b Hb Hd. assert (Ha : 0<=a). apply le_ngt; intros Ha'. apply (lt_irrefl 0). rewrite <- Hd at 1. apply ldiff_neg. now split. split; trivial. apply (AUX a); try split; trivial. apply pow_gt_lin_r; order'. Qed. (** Subtraction can be a ldiff when the opposite ldiff is null. *) Lemma sub_nocarry_ldiff : forall a b, ldiff b a == 0 -> a-b == ldiff a b. Proof. intros a b H. apply add_cancel_r with b. rewrite sub_add. symmetry. rewrite add_nocarry_lxor; trivial. bitwise. apply bits_inj_iff in H. specialize (H m). rewrite ldiff_spec, bits_0 in H. now destruct a.[m], b.[m]. apply land_ldiff. Qed. (** Adding numbers with no common bits cannot lead to a much bigger number *) Lemma add_nocarry_lt_pow2 : forall a b n, land a b == 0 -> a < 2^n -> b < 2^n -> a+b < 2^n. Proof. intros a b n H Ha Hb. destruct (le_gt_cases a 0) as [Ha'|Ha']. apply le_lt_trans with (0+b). now apply add_le_mono_r. now nzsimpl. destruct (le_gt_cases b 0) as [Hb'|Hb']. apply le_lt_trans with (a+0). now apply add_le_mono_l. now nzsimpl. rewrite add_nocarry_lxor by order. destruct (lt_ge_cases 0 (lxor a b)); [|apply le_lt_trans with 0; order_pos]. apply log2_lt_pow2; trivial. apply log2_lt_pow2 in Ha; trivial. apply log2_lt_pow2 in Hb; trivial. apply le_lt_trans with (max (log2 a) (log2 b)). apply log2_lxor; order. destruct (le_ge_cases (log2 a) (log2 b)); [rewrite max_r|rewrite max_l]; order. Qed. Lemma add_nocarry_mod_lt_pow2 : forall a b n, 0<=n -> land a b == 0 -> a mod 2^n + b mod 2^n < 2^n. Proof. intros a b n Hn H. apply add_nocarry_lt_pow2. bitwise. destruct (le_gt_cases n m). rewrite mod_pow2_bits_high; now split. now rewrite !mod_pow2_bits_low, <- land_spec, H, bits_0. apply mod_pos_bound; order_pos. apply mod_pos_bound; order_pos. Qed. End ZBitsProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZPow.v0000640000175000001440000001000012010532755021301 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* even (a^b) = even a. Proof. intros a b Hb. apply lt_ind with (4:=Hb). solve_proper. now nzsimpl. clear b Hb. intros b Hb IH. nzsimpl; [|order]. rewrite even_mul, IH. now destruct (even a). Qed. Lemma odd_pow : forall a b, 0 odd (a^b) = odd a. Proof. intros. now rewrite <- !negb_even, even_pow. Qed. (** Properties of power of negative numbers *) Lemma pow_opp_even : forall a b, Even b -> (-a)^b == a^b. Proof. intros a b (c,H). rewrite H. destruct (le_gt_cases 0 c). rewrite 2 pow_mul_r by order'. rewrite 2 pow_2_r. now rewrite mul_opp_opp. assert (2*c < 0) by (apply mul_pos_neg; order'). now rewrite !pow_neg_r. Qed. Lemma pow_opp_odd : forall a b, Odd b -> (-a)^b == -(a^b). Proof. intros a b (c,H). rewrite H. destruct (le_gt_cases 0 c) as [LE|GT]. assert (0 <= 2*c) by (apply mul_nonneg_nonneg; order'). rewrite add_1_r, !pow_succ_r; trivial. rewrite pow_opp_even by (now exists c). apply mul_opp_l. apply double_above in GT. rewrite mul_0_r in GT. rewrite !pow_neg_r by trivial. now rewrite opp_0. Qed. Lemma pow_even_abs : forall a b, Even b -> a^b == (abs a)^b. Proof. intros. destruct (abs_eq_or_opp a) as [EQ|EQ]; rewrite EQ. reflexivity. symmetry. now apply pow_opp_even. Qed. Lemma pow_even_nonneg : forall a b, Even b -> 0 <= a^b. Proof. intros. rewrite pow_even_abs by trivial. apply pow_nonneg, abs_nonneg. Qed. Lemma pow_odd_abs_sgn : forall a b, Odd b -> a^b == sgn a * (abs a)^b. Proof. intros a b H. destruct (sgn_spec a) as [(LT,EQ)|[(EQ',EQ)|(LT,EQ)]]; rewrite EQ. nzsimpl. rewrite abs_eq; order. rewrite <- EQ'. nzsimpl. destruct (le_gt_cases 0 b). apply pow_0_l. assert (b~=0) by (contradict H; now rewrite H, <-odd_spec, odd_0). order. now rewrite pow_neg_r. rewrite abs_neq by order. rewrite pow_opp_odd; trivial. now rewrite mul_opp_opp, mul_1_l. Qed. Lemma pow_odd_sgn : forall a b, 0<=b -> Odd b -> sgn (a^b) == sgn a. Proof. intros a b Hb H. destruct (sgn_spec a) as [(LT,EQ)|[(EQ',EQ)|(LT,EQ)]]; rewrite EQ. apply sgn_pos. apply pow_pos_nonneg; trivial. rewrite <- EQ'. rewrite pow_0_l. apply sgn_0. assert (b~=0) by (contradict H; now rewrite H, <-odd_spec, odd_0). order. apply sgn_neg. rewrite <- (opp_involutive a). rewrite pow_opp_odd by trivial. apply opp_neg_pos. apply pow_pos_nonneg; trivial. now apply opp_pos_neg. Qed. Lemma abs_pow : forall a b, abs (a^b) == (abs a)^b. Proof. intros a b. destruct (Even_or_Odd b). rewrite pow_even_abs by trivial. apply abs_eq, pow_nonneg, abs_nonneg. rewrite pow_odd_abs_sgn by trivial. rewrite abs_mul. destruct (lt_trichotomy 0 a) as [Ha|[Ha|Ha]]. rewrite (sgn_pos a), (abs_eq 1), mul_1_l by order'. apply abs_eq, pow_nonneg, abs_nonneg. rewrite <- Ha, sgn_0, abs_0, mul_0_l. symmetry. apply pow_0_l'. intro Hb. rewrite Hb in H. apply (Even_Odd_False 0); trivial. exists 0; now nzsimpl. rewrite (sgn_neg a), abs_opp, (abs_eq 1), mul_1_l by order'. apply abs_eq, pow_nonneg, abs_nonneg. Qed. End ZPowProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZParity.v0000640000175000001440000000321412010532755022015 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Even (-n)). intros n (m,H). exists (-m). rewrite mul_opp_r. now f_equiv. intros. rewrite eq_iff_eq_true, !even_spec. split. rewrite <- (opp_involutive n) at 2. apply H. apply H. Qed. Lemma odd_opp : forall n, odd (-n) = odd n. Proof. intros. rewrite <- !negb_even. now rewrite even_opp. Qed. Lemma even_sub : forall n m, even (n-m) = Bool.eqb (even n) (even m). Proof. intros. now rewrite <- add_opp_r, even_add, even_opp. Qed. Lemma odd_sub : forall n m, odd (n-m) = xorb (odd n) (odd m). Proof. intros. now rewrite <- add_opp_r, odd_add, odd_opp. Qed. End ZParityProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZBase.v0000640000175000001440000000236512010532755021425 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n == m. Proof. intros n m H. apply succ_wd in H. now rewrite 2 succ_pred in H. Qed. Theorem pred_inj_wd : forall n1 n2, P n1 == P n2 <-> n1 == n2. Proof. intros n1 n2; split; [apply pred_inj | intros; now f_equiv]. Qed. Lemma succ_m1 : S (-1) == 0. Proof. now rewrite one_succ, opp_succ, opp_0, succ_pred. Qed. End ZBaseProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZLcm.v0000640000175000001440000003362112010532755021265 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 a÷b == a/b. Proof. intros. apply div_unique_pos with (a rem b). now apply rem_bound_pos. apply quot_rem. order. Qed. Lemma rem_mod_nonneg : forall a b, 0<=a -> 0 a rem b == a mod b. Proof. intros. apply mod_unique_pos with (a÷b). now apply rem_bound_pos. apply quot_rem. order. Qed. (** We can use the sign rule to have an relation between divisions. *) Lemma quot_div : forall a b, b~=0 -> a÷b == (sgn a)*(sgn b)*(abs a / abs b). Proof. assert (AUX : forall a b, 0 a÷b == (sgn a)*(sgn b)*(abs a / abs b)). intros a b Hb. rewrite (sgn_pos b), (abs_eq b), mul_1_r by order. destruct (lt_trichotomy 0 a) as [Ha|[Ha|Ha]]. rewrite sgn_pos, abs_eq, mul_1_l, quot_div_nonneg; order. rewrite <- Ha, abs_0, sgn_0, quot_0_l, div_0_l, mul_0_l; order. rewrite sgn_neg, abs_neq, mul_opp_l, mul_1_l, eq_opp_r, <-quot_opp_l by order. apply quot_div_nonneg; trivial. apply opp_nonneg_nonpos; order. (* main *) intros a b Hb. apply neg_pos_cases in Hb. destruct Hb as [Hb|Hb]; [|now apply AUX]. rewrite <- (opp_involutive b) at 1. rewrite quot_opp_r. rewrite AUX, abs_opp, sgn_opp, mul_opp_r, mul_opp_l, opp_involutive. reflexivity. now apply opp_pos_neg. rewrite eq_opp_l, opp_0; order. Qed. Lemma rem_mod : forall a b, b~=0 -> a rem b == (sgn a) * ((abs a) mod (abs b)). Proof. intros a b Hb. rewrite <- rem_abs_r by trivial. assert (Hb' := proj2 (abs_pos b) Hb). destruct (lt_trichotomy 0 a) as [Ha|[Ha|Ha]]. rewrite (abs_eq a), sgn_pos, mul_1_l, rem_mod_nonneg; order. rewrite <- Ha, abs_0, sgn_0, mod_0_l, rem_0_l, mul_0_l; order. rewrite sgn_neg, (abs_neq a), mul_opp_l, mul_1_l, eq_opp_r, <-rem_opp_l by order. apply rem_mod_nonneg; trivial. apply opp_nonneg_nonpos; order. Qed. (** Modulo and remainder are null at the same place, and this correspond to the divisibility relation. *) Lemma mod_divide : forall a b, b~=0 -> (a mod b == 0 <-> (b|a)). Proof. intros a b Hb. split. intros Hab. exists (a/b). rewrite mul_comm. rewrite (div_mod a b Hb) at 1. rewrite Hab; now nzsimpl. intros (c,Hc). rewrite Hc. now apply mod_mul. Qed. Lemma rem_divide : forall a b, b~=0 -> (a rem b == 0 <-> (b|a)). Proof. intros a b Hb. split. intros Hab. exists (a÷b). rewrite mul_comm. rewrite (quot_rem a b Hb) at 1. rewrite Hab; now nzsimpl. intros (c,Hc). rewrite Hc. now apply rem_mul. Qed. Lemma rem_mod_eq_0 : forall a b, b~=0 -> (a rem b == 0 <-> a mod b == 0). Proof. intros a b Hb. now rewrite mod_divide, rem_divide. Qed. (** When division is exact, div and quot agree *) Lemma quot_div_exact : forall a b, b~=0 -> (b|a) -> a÷b == a/b. Proof. intros a b Hb H. apply mul_cancel_l with b; trivial. assert (H':=H). apply rem_divide, quot_exact in H; trivial. apply mod_divide, div_exact in H'; trivial. now rewrite <-H,<-H'. Qed. Lemma divide_div_mul_exact : forall a b c, b~=0 -> (b|a) -> (c*a)/b == c*(a/b). Proof. intros a b c Hb H. apply mul_cancel_l with b; trivial. rewrite mul_assoc, mul_shuffle0. assert (H':=H). apply mod_divide, div_exact in H'; trivial. rewrite <- H', (mul_comm a c). symmetry. apply div_exact; trivial. apply mod_divide; trivial. now apply divide_mul_r. Qed. Lemma divide_quot_mul_exact : forall a b c, b~=0 -> (b|a) -> (c*a)÷b == c*(a÷b). Proof. intros a b c Hb H. rewrite 2 quot_div_exact; trivial. apply divide_div_mul_exact; trivial. now apply divide_mul_r. Qed. (** Gcd of divided elements, for exact divisions *) Lemma gcd_div_factor : forall a b c, 0 (c|a) -> (c|b) -> gcd (a/c) (b/c) == (gcd a b)/c. Proof. intros a b c Hc Ha Hb. apply mul_cancel_l with c; try order. assert (H:=gcd_greatest _ _ _ Ha Hb). apply mod_divide, div_exact in H; try order. rewrite <- H. rewrite <- gcd_mul_mono_l_nonneg; try order. f_equiv; symmetry; apply div_exact; try order; apply mod_divide; trivial; try order. Qed. Lemma gcd_quot_factor : forall a b c, 0 (c|a) -> (c|b) -> gcd (a÷c) (b÷c) == (gcd a b)÷c. Proof. intros a b c Hc Ha Hb. rewrite !quot_div_exact; trivial; try order. now apply gcd_div_factor. now apply gcd_greatest. Qed. Lemma gcd_div_gcd : forall a b g, g~=0 -> g == gcd a b -> gcd (a/g) (b/g) == 1. Proof. intros a b g NZ EQ. rewrite gcd_div_factor. now rewrite <- EQ, div_same. generalize (gcd_nonneg a b); order. rewrite EQ; apply gcd_divide_l. rewrite EQ; apply gcd_divide_r. Qed. Lemma gcd_quot_gcd : forall a b g, g~=0 -> g == gcd a b -> gcd (a÷g) (b÷g) == 1. Proof. intros a b g NZ EQ. rewrite !quot_div_exact; trivial. now apply gcd_div_gcd. rewrite EQ; apply gcd_divide_r. rewrite EQ; apply gcd_divide_l. Qed. (** The following equality is crucial for Euclid algorithm *) Lemma gcd_mod : forall a b, b~=0 -> gcd (a mod b) b == gcd b a. Proof. intros a b Hb. rewrite mod_eq; trivial. rewrite <- add_opp_r, mul_comm, <- mul_opp_l. rewrite (gcd_comm _ b). apply gcd_add_mult_diag_r. Qed. Lemma gcd_rem : forall a b, b~=0 -> gcd (a rem b) b == gcd b a. Proof. intros a b Hb. rewrite rem_eq; trivial. rewrite <- add_opp_r, mul_comm, <- mul_opp_l. rewrite (gcd_comm _ b). apply gcd_add_mult_diag_r. Qed. (** We now define lcm thanks to gcd: lcm a b = a * (b / gcd a b) = (a / gcd a b) * b = (a*b) / gcd a b We had an abs in order to have an always-nonnegative lcm, in the spirit of gcd. Nota: [lcm 0 0] should be 0, which isn't garantee with the third equation above. *) Definition lcm a b := abs (a*(b/gcd a b)). Instance lcm_wd : Proper (eq==>eq==>eq) lcm. Proof. unfold lcm. solve_proper. Qed. Lemma lcm_equiv1 : forall a b, gcd a b ~= 0 -> a * (b / gcd a b) == (a*b)/gcd a b. Proof. intros a b H. rewrite divide_div_mul_exact; try easy. apply gcd_divide_r. Qed. Lemma lcm_equiv2 : forall a b, gcd a b ~= 0 -> (a / gcd a b) * b == (a*b)/gcd a b. Proof. intros a b H. rewrite 2 (mul_comm _ b). rewrite divide_div_mul_exact; try easy. apply gcd_divide_l. Qed. Lemma gcd_div_swap : forall a b, (a / gcd a b) * b == a * (b / gcd a b). Proof. intros a b. destruct (eq_decidable (gcd a b) 0) as [EQ|NEQ]. apply gcd_eq_0 in EQ. destruct EQ as (EQ,EQ'). rewrite EQ, EQ'. now nzsimpl. now rewrite lcm_equiv1, <-lcm_equiv2. Qed. Lemma divide_lcm_l : forall a b, (a | lcm a b). Proof. unfold lcm. intros a b. apply divide_abs_r, divide_factor_l. Qed. Lemma divide_lcm_r : forall a b, (b | lcm a b). Proof. unfold lcm. intros a b. apply divide_abs_r. rewrite <- gcd_div_swap. apply divide_factor_r. Qed. Lemma divide_div : forall a b c, a~=0 -> (a|b) -> (b|c) -> (b/a|c/a). Proof. intros a b c Ha Hb (c',Hc). exists c'. now rewrite <- divide_div_mul_exact, <- Hc. Qed. Lemma lcm_least : forall a b c, (a | c) -> (b | c) -> (lcm a b | c). Proof. intros a b c Ha Hb. unfold lcm. apply divide_abs_l. destruct (eq_decidable (gcd a b) 0) as [EQ|NEQ]. apply gcd_eq_0 in EQ. destruct EQ as (EQ,EQ'). rewrite EQ in *. now nzsimpl. assert (Ga := gcd_divide_l a b). assert (Gb := gcd_divide_r a b). set (g:=gcd a b) in *. assert (Ha' := divide_div g a c NEQ Ga Ha). assert (Hb' := divide_div g b c NEQ Gb Hb). destruct Ha' as (a',Ha'). rewrite Ha', mul_comm in Hb'. apply gauss in Hb'; [|apply gcd_div_gcd; unfold g; trivial using gcd_comm]. destruct Hb' as (b',Hb'). exists b'. rewrite mul_shuffle3, <- Hb'. rewrite (proj2 (div_exact c g NEQ)). rewrite Ha', mul_shuffle3, (mul_comm a a'). f_equiv. symmetry. apply div_exact; trivial. apply mod_divide; trivial. apply mod_divide; trivial. transitivity a; trivial. Qed. Lemma lcm_nonneg : forall a b, 0 <= lcm a b. Proof. intros a b. unfold lcm. apply abs_nonneg. Qed. Lemma lcm_comm : forall a b, lcm a b == lcm b a. Proof. intros a b. unfold lcm. rewrite (gcd_comm b), (mul_comm b). now rewrite <- gcd_div_swap. Qed. Lemma lcm_divide_iff : forall n m p, (lcm n m | p) <-> (n | p) /\ (m | p). Proof. intros. split. split. transitivity (lcm n m); trivial using divide_lcm_l. transitivity (lcm n m); trivial using divide_lcm_r. intros (H,H'). now apply lcm_least. Qed. Lemma lcm_unique : forall n m p, 0<=p -> (n|p) -> (m|p) -> (forall q, (n|q) -> (m|q) -> (p|q)) -> lcm n m == p. Proof. intros n m p Hp Hn Hm H. apply divide_antisym_nonneg; trivial. apply lcm_nonneg. now apply lcm_least. apply H. apply divide_lcm_l. apply divide_lcm_r. Qed. Lemma lcm_unique_alt : forall n m p, 0<=p -> (forall q, (p|q) <-> (n|q) /\ (m|q)) -> lcm n m == p. Proof. intros n m p Hp H. apply lcm_unique; trivial. apply H, divide_refl. apply H, divide_refl. intros. apply H. now split. Qed. Lemma lcm_assoc : forall n m p, lcm n (lcm m p) == lcm (lcm n m) p. Proof. intros. apply lcm_unique_alt; try apply lcm_nonneg. intros. now rewrite !lcm_divide_iff, and_assoc. Qed. Lemma lcm_0_l : forall n, lcm 0 n == 0. Proof. intros. apply lcm_unique; trivial. order. apply divide_refl. apply divide_0_r. Qed. Lemma lcm_0_r : forall n, lcm n 0 == 0. Proof. intros. now rewrite lcm_comm, lcm_0_l. Qed. Lemma lcm_1_l_nonneg : forall n, 0<=n -> lcm 1 n == n. Proof. intros. apply lcm_unique; trivial using divide_1_l, le_0_1, divide_refl. Qed. Lemma lcm_1_r_nonneg : forall n, 0<=n -> lcm n 1 == n. Proof. intros. now rewrite lcm_comm, lcm_1_l_nonneg. Qed. Lemma lcm_diag_nonneg : forall n, 0<=n -> lcm n n == n. Proof. intros. apply lcm_unique; trivial using divide_refl. Qed. Lemma lcm_eq_0 : forall n m, lcm n m == 0 <-> n == 0 \/ m == 0. Proof. intros. split. intros EQ. apply eq_mul_0. apply divide_0_l. rewrite <- EQ. apply lcm_least. apply divide_factor_l. apply divide_factor_r. destruct 1 as [EQ|EQ]; rewrite EQ. apply lcm_0_l. apply lcm_0_r. Qed. Lemma divide_lcm_eq_r : forall n m, 0<=m -> (n|m) -> lcm n m == m. Proof. intros n m Hm H. apply lcm_unique_alt; trivial. intros q. split. split; trivial. now transitivity m. now destruct 1. Qed. Lemma divide_lcm_iff : forall n m, 0<=m -> ((n|m) <-> lcm n m == m). Proof. intros n m Hn. split. now apply divide_lcm_eq_r. intros EQ. rewrite <- EQ. apply divide_lcm_l. Qed. Lemma lcm_opp_l : forall n m, lcm (-n) m == lcm n m. Proof. intros. apply lcm_unique_alt; try apply lcm_nonneg. intros. rewrite divide_opp_l. apply lcm_divide_iff. Qed. Lemma lcm_opp_r : forall n m, lcm n (-m) == lcm n m. Proof. intros. now rewrite lcm_comm, lcm_opp_l, lcm_comm. Qed. Lemma lcm_abs_l : forall n m, lcm (abs n) m == lcm n m. Proof. intros. destruct (abs_eq_or_opp n) as [H|H]; rewrite H. easy. apply lcm_opp_l. Qed. Lemma lcm_abs_r : forall n m, lcm n (abs m) == lcm n m. Proof. intros. now rewrite lcm_comm, lcm_abs_l, lcm_comm. Qed. Lemma lcm_1_l : forall n, lcm 1 n == abs n. Proof. intros. rewrite <- lcm_abs_r. apply lcm_1_l_nonneg, abs_nonneg. Qed. Lemma lcm_1_r : forall n, lcm n 1 == abs n. Proof. intros. now rewrite lcm_comm, lcm_1_l. Qed. Lemma lcm_diag : forall n, lcm n n == abs n. Proof. intros. rewrite <- lcm_abs_l, <- lcm_abs_r. apply lcm_diag_nonneg, abs_nonneg. Qed. Lemma lcm_mul_mono_l : forall n m p, lcm (p * n) (p * m) == abs p * lcm n m. Proof. intros n m p. destruct (eq_decidable p 0) as [Hp|Hp]. rewrite Hp. nzsimpl. rewrite lcm_0_l, abs_0. now nzsimpl. destruct (eq_decidable (gcd n m) 0) as [Hg|Hg]. apply gcd_eq_0 in Hg. destruct Hg as (Hn,Hm); rewrite Hn, Hm. nzsimpl. rewrite lcm_0_l. now nzsimpl. unfold lcm. rewrite gcd_mul_mono_l. rewrite !abs_mul, mul_assoc. f_equiv. rewrite <- (abs_sgn p) at 1. rewrite <- mul_assoc. rewrite div_mul_cancel_l; trivial. rewrite divide_div_mul_exact; trivial. rewrite abs_mul. rewrite <- (sgn_abs (sgn p)), sgn_sgn. destruct (sgn_spec p) as [(_,EQ)|[(EQ,_)|(_,EQ)]]. rewrite EQ. now nzsimpl. order. rewrite EQ. rewrite mul_opp_l, mul_opp_r, opp_involutive. now nzsimpl. apply gcd_divide_r. contradict Hp. now apply abs_0_iff. Qed. Lemma lcm_mul_mono_l_nonneg : forall n m p, 0<=p -> lcm (p*n) (p*m) == p * lcm n m. Proof. intros. rewrite <- (abs_eq p) at 3; trivial. apply lcm_mul_mono_l. Qed. Lemma lcm_mul_mono_r : forall n m p, lcm (n * p) (m * p) == lcm n m * abs p. Proof. intros n m p. now rewrite !(mul_comm _ p), lcm_mul_mono_l, mul_comm. Qed. Lemma lcm_mul_mono_r_nonneg : forall n m p, 0<=p -> lcm (n*p) (m*p) == lcm n m * p. Proof. intros. rewrite <- (abs_eq p) at 3; trivial. apply lcm_mul_mono_r. Qed. Lemma gcd_1_lcm_mul : forall n m, n~=0 -> m~=0 -> (gcd n m == 1 <-> lcm n m == abs (n*m)). Proof. intros n m Hn Hm. split; intros H. unfold lcm. rewrite H. now rewrite div_1_r. unfold lcm in *. rewrite !abs_mul in H. apply mul_cancel_l in H; [|now rewrite abs_0_iff]. assert (H' := gcd_divide_r n m). assert (Hg : gcd n m ~= 0) by (red; rewrite gcd_eq_0; destruct 1; order). apply mod_divide in H'; trivial. apply div_exact in H'; trivial. assert (m / gcd n m ~=0) by (contradict Hm; rewrite H', Hm; now nzsimpl). rewrite <- (mul_1_l (abs (_/_))) in H. rewrite H' in H at 3. rewrite abs_mul in H. apply mul_cancel_r in H; [|now rewrite abs_0_iff]. rewrite abs_eq in H. order. apply gcd_nonneg. Qed. End ZLcmProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZSgnAbs.v0000640000175000001440000002555112010532755021732 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* abs n == n. Proof. intros. unfold abs. apply max_l. apply le_trans with 0; auto. rewrite opp_nonpos_nonneg; auto. Qed. Lemma abs_neq : forall n, n<=0 -> abs n == -n. Proof. intros. unfold abs. apply max_r. apply le_trans with 0; auto. rewrite opp_nonneg_nonpos; auto. Qed. End GenericAbs. (** We can deduce a [sgn] function from a [compare] function *) Module Type ZDecAxiomsSig := ZAxiomsMiniSig <+ HasCompare. Module Type ZDecAxiomsSig' := ZAxiomsMiniSig' <+ HasCompare. Module Type GenericSgn (Import Z : ZDecAxiomsSig') (Import ZP : ZMulOrderProp Z) <: HasSgn Z. Definition sgn n := match compare 0 n with Eq => 0 | Lt => 1 | Gt => -1 end. Lemma sgn_null : forall n, n==0 -> sgn n == 0. Proof. unfold sgn; intros. destruct (compare_spec 0 n); order. Qed. Lemma sgn_pos : forall n, 0 sgn n == 1. Proof. unfold sgn; intros. destruct (compare_spec 0 n); order. Qed. Lemma sgn_neg : forall n, n<0 -> sgn n == -1. Proof. unfold sgn; intros. destruct (compare_spec 0 n); order. Qed. End GenericSgn. (** Derived properties of [abs] and [sgn] *) Module Type ZSgnAbsProp (Import Z : ZAxiomsSig') (Import ZP : ZMulOrderProp Z). Ltac destruct_max n := destruct (le_ge_cases 0 n); [rewrite (abs_eq n) by auto | rewrite (abs_neq n) by auto]. Instance abs_wd : Proper (eq==>eq) abs. Proof. intros x y EQ. destruct_max x. rewrite abs_eq; trivial. now rewrite <- EQ. rewrite abs_neq; try order. now rewrite opp_inj_wd. Qed. Lemma abs_max : forall n, abs n == max n (-n). Proof. intros n. destruct_max n. rewrite max_l; auto with relations. apply le_trans with 0; auto. rewrite opp_nonpos_nonneg; auto. rewrite max_r; auto with relations. apply le_trans with 0; auto. rewrite opp_nonneg_nonpos; auto. Qed. Lemma abs_neq' : forall n, 0<=-n -> abs n == -n. Proof. intros. apply abs_neq. now rewrite <- opp_nonneg_nonpos. Qed. Lemma abs_nonneg : forall n, 0 <= abs n. Proof. intros n. destruct_max n; auto. now rewrite opp_nonneg_nonpos. Qed. Lemma abs_eq_iff : forall n, abs n == n <-> 0<=n. Proof. split; try apply abs_eq. intros EQ. rewrite <- EQ. apply abs_nonneg. Qed. Lemma abs_neq_iff : forall n, abs n == -n <-> n<=0. Proof. split; try apply abs_neq. intros EQ. rewrite <- opp_nonneg_nonpos, <- EQ. apply abs_nonneg. Qed. Lemma abs_opp : forall n, abs (-n) == abs n. Proof. intros. destruct_max n. rewrite (abs_neq (-n)), opp_involutive. reflexivity. now rewrite opp_nonpos_nonneg. rewrite (abs_eq (-n)). reflexivity. now rewrite opp_nonneg_nonpos. Qed. Lemma abs_0 : abs 0 == 0. Proof. apply abs_eq. apply le_refl. Qed. Lemma abs_0_iff : forall n, abs n == 0 <-> n==0. Proof. split. destruct_max n; auto. now rewrite eq_opp_l, opp_0. intros EQ; rewrite EQ. rewrite abs_eq; auto using eq_refl, le_refl. Qed. Lemma abs_pos : forall n, 0 < abs n <-> n~=0. Proof. intros. rewrite <- abs_0_iff. split; [intros LT| intros NEQ]. intro EQ. rewrite EQ in LT. now elim (lt_irrefl 0). assert (LE : 0 <= abs n) by apply abs_nonneg. rewrite lt_eq_cases in LE; destruct LE; auto. elim NEQ; auto with relations. Qed. Lemma abs_eq_or_opp : forall n, abs n == n \/ abs n == -n. Proof. intros. destruct_max n; auto with relations. Qed. Lemma abs_or_opp_abs : forall n, n == abs n \/ n == - abs n. Proof. intros. destruct_max n; rewrite ? opp_involutive; auto with relations. Qed. Lemma abs_involutive : forall n, abs (abs n) == abs n. Proof. intros. apply abs_eq. apply abs_nonneg. Qed. Lemma abs_spec : forall n, (0 <= n /\ abs n == n) \/ (n < 0 /\ abs n == -n). Proof. intros. destruct (le_gt_cases 0 n). left; split; auto. now apply abs_eq. right; split; auto. apply abs_neq. now apply lt_le_incl. Qed. Lemma abs_case_strong : forall (P:t->Prop) n, Proper (eq==>iff) P -> (0<=n -> P n) -> (n<=0 -> P (-n)) -> P (abs n). Proof. intros. destruct_max n; auto. Qed. Lemma abs_case : forall (P:t->Prop) n, Proper (eq==>iff) P -> P n -> P (-n) -> P (abs n). Proof. intros. now apply abs_case_strong. Qed. Lemma abs_eq_cases : forall n m, abs n == abs m -> n == m \/ n == - m. Proof. intros n m EQ. destruct (abs_or_opp_abs n) as [EQn|EQn]. rewrite EQn, EQ. apply abs_eq_or_opp. rewrite EQn, EQ, opp_inj_wd, eq_opp_l, or_comm. apply abs_eq_or_opp. Qed. Lemma abs_lt : forall a b, abs a < b <-> -b < a < b. Proof. intros a b. destruct (abs_spec a) as [[LE EQ]|[LT EQ]]; rewrite EQ; clear EQ. split; try split; try destruct 1; try order. apply lt_le_trans with 0; trivial. apply opp_neg_pos; order. rewrite opp_lt_mono, opp_involutive. split; try split; try destruct 1; try order. apply lt_le_trans with 0; trivial. apply opp_nonpos_nonneg; order. Qed. Lemma abs_le : forall a b, abs a <= b <-> -b <= a <= b. Proof. intros a b. destruct (abs_spec a) as [[LE EQ]|[LT EQ]]; rewrite EQ; clear EQ. split; try split; try destruct 1; try order. apply le_trans with 0; trivial. apply opp_nonpos_nonneg; order. rewrite opp_le_mono, opp_involutive. split; try split; try destruct 1; try order. apply le_trans with 0. order. apply opp_nonpos_nonneg; order. Qed. (** Triangular inequality *) Lemma abs_triangle : forall n m, abs (n + m) <= abs n + abs m. Proof. intros. destruct_max n; destruct_max m. rewrite abs_eq. apply le_refl. now apply add_nonneg_nonneg. destruct_max (n+m); try rewrite opp_add_distr; apply add_le_mono_l || apply add_le_mono_r. apply le_trans with 0; auto. now rewrite opp_nonneg_nonpos. apply le_trans with 0; auto. now rewrite opp_nonpos_nonneg. destruct_max (n+m); try rewrite opp_add_distr; apply add_le_mono_l || apply add_le_mono_r. apply le_trans with 0; auto. now rewrite opp_nonneg_nonpos. apply le_trans with 0; auto. now rewrite opp_nonpos_nonneg. rewrite abs_neq, opp_add_distr. apply le_refl. now apply add_nonpos_nonpos. Qed. Lemma abs_sub_triangle : forall n m, abs n - abs m <= abs (n-m). Proof. intros. rewrite le_sub_le_add_l, add_comm. rewrite <- (sub_simpl_r n m) at 1. apply abs_triangle. Qed. (** Absolute value and multiplication *) Lemma abs_mul : forall n m, abs (n * m) == abs n * abs m. Proof. assert (H : forall n m, 0<=n -> abs (n*m) == n * abs m). intros. destruct_max m. rewrite abs_eq. apply eq_refl. now apply mul_nonneg_nonneg. rewrite abs_neq, mul_opp_r. reflexivity. now apply mul_nonneg_nonpos . intros. destruct_max n. now apply H. rewrite <- mul_opp_opp, H, abs_opp. reflexivity. now apply opp_nonneg_nonpos. Qed. Lemma abs_square : forall n, abs n * abs n == n * n. Proof. intros. rewrite <- abs_mul. apply abs_eq. apply le_0_square. Qed. (** Some results about the sign function. *) Ltac destruct_sgn n := let LT := fresh "LT" in let EQ := fresh "EQ" in let GT := fresh "GT" in destruct (lt_trichotomy 0 n) as [LT|[EQ|GT]]; [rewrite (sgn_pos n) by auto| rewrite (sgn_null n) by auto with relations| rewrite (sgn_neg n) by auto]. Instance sgn_wd : Proper (eq==>eq) sgn. Proof. intros x y Hxy. destruct_sgn x. rewrite sgn_pos; auto with relations. rewrite <- Hxy; auto. rewrite sgn_null; auto with relations. rewrite <- Hxy; auto with relations. rewrite sgn_neg; auto with relations. rewrite <- Hxy; auto. Qed. Lemma sgn_spec : forall n, 0 < n /\ sgn n == 1 \/ 0 == n /\ sgn n == 0 \/ 0 > n /\ sgn n == -1. Proof. intros n. destruct_sgn n; [left|right;left|right;right]; auto with relations. Qed. Lemma sgn_0 : sgn 0 == 0. Proof. now apply sgn_null. Qed. Lemma sgn_pos_iff : forall n, sgn n == 1 <-> 0 n==0. Proof. split; try apply sgn_null. destruct_sgn n; auto with relations. intros. elim (lt_neq 0 1); auto with relations. apply lt_0_1. intros. elim (lt_neq (-1) 0); auto. rewrite opp_neg_pos. apply lt_0_1. Qed. Lemma sgn_neg_iff : forall n, sgn n == -1 <-> n<0. Proof. split; try apply sgn_neg. destruct_sgn n; auto with relations. intros. elim (lt_neq (-1) 1); auto with relations. apply lt_trans with 0. rewrite opp_neg_pos. apply lt_0_1. apply lt_0_1. intros. elim (lt_neq (-1) 0); auto with relations. rewrite opp_neg_pos. apply lt_0_1. Qed. Lemma sgn_opp : forall n, sgn (-n) == - sgn n. Proof. intros. destruct_sgn n. apply sgn_neg. now rewrite opp_neg_pos. setoid_replace n with 0 by auto with relations. rewrite opp_0. apply sgn_0. rewrite opp_involutive. apply sgn_pos. now rewrite opp_pos_neg. Qed. Lemma sgn_nonneg : forall n, 0 <= sgn n <-> 0 <= n. Proof. split. destruct_sgn n; intros. now apply lt_le_incl. order. elim (lt_irrefl 0). apply lt_le_trans with 1; auto using lt_0_1. now rewrite <- opp_nonneg_nonpos. rewrite lt_eq_cases; destruct 1. rewrite sgn_pos by auto. apply lt_le_incl, lt_0_1. rewrite sgn_null by auto with relations. apply le_refl. Qed. Lemma sgn_nonpos : forall n, sgn n <= 0 <-> n <= 0. Proof. intros. rewrite <- 2 opp_nonneg_nonpos, <- sgn_opp. apply sgn_nonneg. Qed. Lemma sgn_mul : forall n m, sgn (n*m) == sgn n * sgn m. Proof. intros. destruct_sgn n; nzsimpl. destruct_sgn m. apply sgn_pos. now apply mul_pos_pos. apply sgn_null. rewrite eq_mul_0; auto with relations. apply sgn_neg. now apply mul_pos_neg. apply sgn_null. rewrite eq_mul_0; auto with relations. destruct_sgn m; try rewrite mul_opp_opp; nzsimpl. apply sgn_neg. now apply mul_neg_pos. apply sgn_null. rewrite eq_mul_0; auto with relations. apply sgn_pos. now apply mul_neg_neg. Qed. Lemma sgn_abs : forall n, n * sgn n == abs n. Proof. intros. symmetry. destruct_sgn n; try rewrite mul_opp_r; nzsimpl. apply abs_eq. now apply lt_le_incl. rewrite abs_0_iff; auto with relations. apply abs_neq. now apply lt_le_incl. Qed. Lemma abs_sgn : forall n, abs n * sgn n == n. Proof. intros. destruct_sgn n; try rewrite mul_opp_r; nzsimpl; auto. apply abs_eq. now apply lt_le_incl. rewrite eq_opp_l. apply abs_neq. now apply lt_le_incl. Qed. Lemma sgn_sgn : forall x, sgn (sgn x) == sgn x. Proof. intros. destruct (sgn_spec x) as [(LT,EQ)|[(EQ',EQ)|(LT,EQ)]]; rewrite EQ. apply sgn_pos, lt_0_1. now apply sgn_null. apply sgn_neg. rewrite opp_neg_pos. apply lt_0_1. Qed. End ZSgnAbsProp. coq-8.4pl2/theories/Numbers/Integer/Abstract/ZLt.v0000640000175000001440000000672312010532755021134 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n < 0 \/ n > 0. Proof. intro; apply lt_gt_cases. Qed. Theorem nonpos_pos_cases : forall n, n <= 0 \/ n > 0. Proof. intro; apply le_gt_cases. Qed. Theorem neg_nonneg_cases : forall n, n < 0 \/ n >= 0. Proof. intro; apply lt_ge_cases. Qed. Theorem nonpos_nonneg_cases : forall n, n <= 0 \/ n >= 0. Proof. intro; apply le_ge_cases. Qed. Ltac zinduct n := induction_maker n ltac:(apply order_induction_0). (** Theorems that are either not valid on N or have different proofs on N and Z *) Theorem lt_pred_l : forall n, P n < n. Proof. intro n; rewrite <- (succ_pred n) at 2; apply lt_succ_diag_r. Qed. Theorem le_pred_l : forall n, P n <= n. Proof. intro; apply lt_le_incl; apply lt_pred_l. Qed. Theorem lt_le_pred : forall n m, n < m <-> n <= P m. Proof. intros n m; rewrite <- (succ_pred m); rewrite pred_succ. apply lt_succ_r. Qed. Theorem nle_pred_r : forall n, ~ n <= P n. Proof. intro; rewrite <- lt_le_pred; apply lt_irrefl. Qed. Theorem lt_pred_le : forall n m, P n < m <-> n <= m. Proof. intros n m; rewrite <- (succ_pred n) at 2. symmetry; apply le_succ_l. Qed. Theorem lt_lt_pred : forall n m, n < m -> P n < m. Proof. intros; apply lt_pred_le; now apply lt_le_incl. Qed. Theorem le_le_pred : forall n m, n <= m -> P n <= m. Proof. intros; apply lt_le_incl; now apply lt_pred_le. Qed. Theorem lt_pred_lt : forall n m, n < P m -> n < m. Proof. intros n m H; apply lt_trans with (P m); [assumption | apply lt_pred_l]. Qed. Theorem le_pred_lt : forall n m, n <= P m -> n <= m. Proof. intros; apply lt_le_incl; now apply lt_le_pred. Qed. Theorem pred_lt_mono : forall n m, n < m <-> P n < P m. Proof. intros; rewrite lt_le_pred; symmetry; apply lt_pred_le. Qed. Theorem pred_le_mono : forall n m, n <= m <-> P n <= P m. Proof. intros; rewrite <- lt_pred_le; now rewrite lt_le_pred. Qed. Theorem lt_succ_lt_pred : forall n m, S n < m <-> n < P m. Proof. intros n m; now rewrite (pred_lt_mono (S n) m), pred_succ. Qed. Theorem le_succ_le_pred : forall n m, S n <= m <-> n <= P m. Proof. intros n m; now rewrite (pred_le_mono (S n) m), pred_succ. Qed. Theorem lt_pred_lt_succ : forall n m, P n < m <-> n < S m. Proof. intros; rewrite lt_pred_le; symmetry; apply lt_succ_r. Qed. Theorem le_pred_lt_succ : forall n m, P n <= m <-> n <= S m. Proof. intros n m; now rewrite (pred_le_mono n (S m)), pred_succ. Qed. Theorem neq_pred_l : forall n, P n ~= n. Proof. intro; apply lt_neq; apply lt_pred_l. Qed. Theorem lt_m1_r : forall n m, n < m -> m < 0 -> n < -1. Proof. intros n m H1 H2. apply lt_le_pred in H2. setoid_replace (P 0) with (-1) in H2. now apply lt_le_trans with m. apply eq_opp_r. now rewrite one_succ, opp_pred, opp_0. Qed. End ZOrderProp. coq-8.4pl2/theories/Numbers/Integer/Binary/0000750000175000001440000000000012127276550017713 5ustar notinuserscoq-8.4pl2/theories/Numbers/Integer/Binary/ZBinary.v0000640000175000001440000000277312010532755021463 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y<=x -> x=y]. *) Section TestOrder. Let test : forall x y, x<=y -> y<=x -> x=y. Proof. z_order. Qed. End TestOrder. (** Z forms a ring *) (*Lemma Zring : ring_theory 0 1 NZadd NZmul NZsub Z.opp NZeq. Proof. constructor. exact Zadd_0_l. exact Zadd_comm. exact Zadd_assoc. exact Zmul_1_l. exact Zmul_comm. exact Zmul_assoc. exact Zmul_add_distr_r. intros; now rewrite Zadd_opp_minus. exact Zadd_opp_r. Qed. Add Ring ZR : Zring.*) coq-8.4pl2/theories/Numbers/Integer/SpecViaZ/0000750000175000001440000000000012127276550020153 5ustar notinuserscoq-8.4pl2/theories/Numbers/Integer/SpecViaZ/ZSig.v0000640000175000001440000001163112010532755021212 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Z. Local Notation "[ x ]" := (to_Z x). Definition eq x y := [x] = [y]. Definition lt x y := [x] < [y]. Definition le x y := [x] <= [y]. Parameter of_Z : Z -> t. Parameter spec_of_Z: forall x, to_Z (of_Z x) = x. Parameter compare : t -> t -> comparison. Parameter eqb : t -> t -> bool. Parameter ltb : t -> t -> bool. Parameter leb : t -> t -> bool. Parameter min : t -> t -> t. Parameter max : t -> t -> t. Parameter zero : t. Parameter one : t. Parameter two : t. Parameter minus_one : t. Parameter succ : t -> t. Parameter add : t -> t -> t. Parameter pred : t -> t. Parameter sub : t -> t -> t. Parameter opp : t -> t. Parameter mul : t -> t -> t. Parameter square : t -> t. Parameter pow_pos : t -> positive -> t. Parameter pow_N : t -> N -> t. Parameter pow : t -> t -> t. Parameter sqrt : t -> t. Parameter log2 : t -> t. Parameter div_eucl : t -> t -> t * t. Parameter div : t -> t -> t. Parameter modulo : t -> t -> t. Parameter quot : t -> t -> t. Parameter rem : t -> t -> t. Parameter gcd : t -> t -> t. Parameter sgn : t -> t. Parameter abs : t -> t. Parameter even : t -> bool. Parameter odd : t -> bool. Parameter testbit : t -> t -> bool. Parameter shiftr : t -> t -> t. Parameter shiftl : t -> t -> t. Parameter land : t -> t -> t. Parameter lor : t -> t -> t. Parameter ldiff : t -> t -> t. Parameter lxor : t -> t -> t. Parameter div2 : t -> t. Parameter spec_compare: forall x y, compare x y = ([x] ?= [y]). Parameter spec_eqb : forall x y, eqb x y = ([x] =? [y]). Parameter spec_ltb : forall x y, ltb x y = ([x] eq) succ. Program Instance pred_wd : Proper (eq ==> eq) pred. Program Instance add_wd : Proper (eq ==> eq ==> eq) add. Program Instance sub_wd : Proper (eq ==> eq ==> eq) sub. Program Instance mul_wd : Proper (eq ==> eq ==> eq) mul. Theorem pred_succ : forall n, pred (succ n) == n. Proof. intros. zify. auto with zarith. Qed. Theorem one_succ : 1 == succ 0. Proof. now zify. Qed. Theorem two_succ : 2 == succ 1. Proof. now zify. Qed. Section Induction. Variable A : ZZ.t -> Prop. Hypothesis A_wd : Proper (eq==>iff) A. Hypothesis A0 : A 0. Hypothesis AS : forall n, A n <-> A (succ n). Let B (z : Z) := A (of_Z z). Lemma B0 : B 0. Proof. unfold B; simpl. rewrite <- (A_wd 0); auto. zify. auto. Qed. Lemma BS : forall z : Z, B z -> B (z + 1). Proof. intros z H. unfold B in *. apply -> AS in H. setoid_replace (of_Z (z + 1)) with (succ (of_Z z)); auto. zify. auto. Qed. Lemma BP : forall z : Z, B z -> B (z - 1). Proof. intros z H. unfold B in *. rewrite AS. setoid_replace (succ (of_Z (z - 1))) with (of_Z z); auto. zify. auto with zarith. Qed. Lemma B_holds : forall z : Z, B z. Proof. intros; destruct (Z_lt_le_dec 0 z). apply natlike_ind; auto with zarith. apply B0. intros; apply BS; auto. replace z with (-(-z))%Z in * by (auto with zarith). remember (-z)%Z as z'. pattern z'; apply natlike_ind. apply B0. intros; rewrite Z.opp_succ; unfold Z.pred; apply BP; auto. subst z'; auto with zarith. Qed. Theorem bi_induction : forall n, A n. Proof. intro n. setoid_replace n with (of_Z (to_Z n)). apply B_holds. zify. auto. Qed. End Induction. Theorem add_0_l : forall n, 0 + n == n. Proof. intros. zify. auto with zarith. Qed. Theorem add_succ_l : forall n m, (succ n) + m == succ (n + m). Proof. intros. zify. auto with zarith. Qed. Theorem sub_0_r : forall n, n - 0 == n. Proof. intros. zify. auto with zarith. Qed. Theorem sub_succ_r : forall n m, n - (succ m) == pred (n - m). Proof. intros. zify. auto with zarith. Qed. Theorem mul_0_l : forall n, 0 * n == 0. Proof. intros. zify. auto with zarith. Qed. Theorem mul_succ_l : forall n m, (succ n) * m == n * m + m. Proof. intros. zify. ring. Qed. (** Order *) Lemma eqb_eq x y : eqb x y = true <-> x == y. Proof. zify. apply Z.eqb_eq. Qed. Lemma leb_le x y : leb x y = true <-> x <= y. Proof. zify. apply Z.leb_le. Qed. Lemma ltb_lt x y : ltb x y = true <-> x < y. Proof. zify. apply Z.ltb_lt. Qed. Lemma compare_eq_iff n m : compare n m = Eq <-> n == m. Proof. intros. zify. apply Z.compare_eq_iff. Qed. Lemma compare_lt_iff n m : compare n m = Lt <-> n < m. Proof. intros. zify. reflexivity. Qed. Lemma compare_le_iff n m : compare n m <> Gt <-> n <= m. Proof. intros. zify. reflexivity. Qed. Lemma compare_antisym n m : compare m n = CompOpp (compare n m). Proof. intros. zify. apply Z.compare_antisym. Qed. Include BoolOrderFacts ZZ ZZ ZZ [no inline]. Instance compare_wd : Proper (eq ==> eq ==> Logic.eq) compare. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance eqb_wd : Proper (eq ==> eq ==> Logic.eq) eqb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance ltb_wd : Proper (eq ==> eq ==> Logic.eq) ltb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance leb_wd : Proper (eq ==> eq ==> Logic.eq) leb. Proof. intros x x' Hx y y' Hy. zify. now rewrite Hx, Hy. Qed. Instance lt_wd : Proper (eq ==> eq ==> iff) lt. Proof. intros x x' Hx y y' Hy; unfold lt; rewrite Hx, Hy; intuition. Qed. Theorem lt_succ_r : forall n m, n < (succ m) <-> n <= m. Proof. intros. zify. omega. Qed. Theorem min_l : forall n m, n <= m -> min n m == n. Proof. intros n m. zify. omega with *. Qed. Theorem min_r : forall n m, m <= n -> min n m == m. Proof. intros n m. zify. omega with *. Qed. Theorem max_l : forall n m, m <= n -> max n m == n. Proof. intros n m. zify. omega with *. Qed. Theorem max_r : forall n m, n <= m -> max n m == m. Proof. intros n m. zify. omega with *. Qed. (** Part specific to integers, not natural numbers *) Theorem succ_pred : forall n, succ (pred n) == n. Proof. intros. zify. auto with zarith. Qed. (** Opp *) Program Instance opp_wd : Proper (eq ==> eq) opp. Theorem opp_0 : - 0 == 0. Proof. intros. zify. auto with zarith. Qed. Theorem opp_succ : forall n, - (succ n) == pred (- n). Proof. intros. zify. auto with zarith. Qed. (** Abs / Sgn *) Theorem abs_eq : forall n, 0 <= n -> abs n == n. Proof. intros n. zify. omega with *. Qed. Theorem abs_neq : forall n, n <= 0 -> abs n == -n. Proof. intros n. zify. omega with *. Qed. Theorem sgn_null : forall n, n==0 -> sgn n == 0. Proof. intros n. zify. omega with *. Qed. Theorem sgn_pos : forall n, 0 sgn n == 1. Proof. intros n. zify. omega with *. Qed. Theorem sgn_neg : forall n, n<0 -> sgn n == opp 1. Proof. intros n. zify. omega with *. Qed. (** Power *) Program Instance pow_wd : Proper (eq==>eq==>eq) pow. Lemma pow_0_r : forall a, a^0 == 1. Proof. intros. now zify. Qed. Lemma pow_succ_r : forall a b, 0<=b -> a^(succ b) == a * a^b. Proof. intros a b. zify. intros. now rewrite Z.add_1_r, Z.pow_succ_r. Qed. Lemma pow_neg_r : forall a b, b<0 -> a^b == 0. Proof. intros a b. zify. intros Hb. destruct [b]; reflexivity || discriminate. Qed. Lemma pow_pow_N : forall a b, 0<=b -> a^b == pow_N a (Z.to_N (to_Z b)). Proof. intros a b. zify. intros Hb. now rewrite spec_pow_N, Z2N.id. Qed. Lemma pow_pos_N : forall a p, pow_pos a p == pow_N a (Npos p). Proof. intros a b. red. now rewrite spec_pow_N, spec_pow_pos. Qed. (** Square *) Lemma square_spec n : square n == n * n. Proof. now zify. Qed. (** Sqrt *) Lemma sqrt_spec : forall n, 0<=n -> (sqrt n)*(sqrt n) <= n /\ n < (succ (sqrt n))*(succ (sqrt n)). Proof. intros n. zify. apply Z.sqrt_spec. Qed. Lemma sqrt_neg : forall n, n<0 -> sqrt n == 0. Proof. intros n. zify. apply Z.sqrt_neg. Qed. (** Log2 *) Lemma log2_spec : forall n, 0 2^(log2 n) <= n /\ n < 2^(succ (log2 n)). Proof. intros n. zify. apply Z.log2_spec. Qed. Lemma log2_nonpos : forall n, n<=0 -> log2 n == 0. Proof. intros n. zify. apply Z.log2_nonpos. Qed. (** Even / Odd *) Definition Even n := exists m, n == 2*m. Definition Odd n := exists m, n == 2*m+1. Lemma even_spec n : even n = true <-> Even n. Proof. unfold Even. zify. rewrite Z.even_spec. split; intros (m,Hm). - exists (of_Z m). now zify. - exists [m]. revert Hm. now zify. Qed. Lemma odd_spec n : odd n = true <-> Odd n. Proof. unfold Odd. zify. rewrite Z.odd_spec. split; intros (m,Hm). - exists (of_Z m). now zify. - exists [m]. revert Hm. now zify. Qed. (** Div / Mod *) Program Instance div_wd : Proper (eq==>eq==>eq) div. Program Instance mod_wd : Proper (eq==>eq==>eq) modulo. Theorem div_mod : forall a b, ~b==0 -> a == b*(div a b) + (modulo a b). Proof. intros a b. zify. intros. apply Z.div_mod; auto. Qed. Theorem mod_pos_bound : forall a b, 0 < b -> 0 <= modulo a b /\ modulo a b < b. Proof. intros a b. zify. intros. apply Z_mod_lt; auto with zarith. Qed. Theorem mod_neg_bound : forall a b, b < 0 -> b < modulo a b /\ modulo a b <= 0. Proof. intros a b. zify. intros. apply Z_mod_neg; auto with zarith. Qed. Definition mod_bound_pos : forall a b, 0<=a -> 0 0 <= modulo a b /\ modulo a b < b := fun a b _ H => mod_pos_bound a b H. (** Quot / Rem *) Program Instance quot_wd : Proper (eq==>eq==>eq) quot. Program Instance rem_wd : Proper (eq==>eq==>eq) rem. Theorem quot_rem : forall a b, ~b==0 -> a == b*(quot a b) + rem a b. Proof. intros a b. zify. apply Z.quot_rem. Qed. Theorem rem_bound_pos : forall a b, 0<=a -> 0 0 <= rem a b /\ rem a b < b. Proof. intros a b. zify. apply Z.rem_bound_pos. Qed. Theorem rem_opp_l : forall a b, ~b==0 -> rem (-a) b == -(rem a b). Proof. intros a b. zify. apply Z.rem_opp_l. Qed. Theorem rem_opp_r : forall a b, ~b==0 -> rem a (-b) == rem a b. Proof. intros a b. zify. apply Z.rem_opp_r. Qed. (** Gcd *) Definition divide n m := exists p, m == p*n. Local Notation "( x | y )" := (divide x y) (at level 0). Lemma spec_divide : forall n m, (n|m) <-> Z.divide [n] [m]. Proof. intros n m. split. - intros (p,H). exists [p]. revert H; now zify. - intros (z,H). exists (of_Z z). now zify. Qed. Lemma gcd_divide_l : forall n m, (gcd n m | n). Proof. intros n m. apply spec_divide. zify. apply Z.gcd_divide_l. Qed. Lemma gcd_divide_r : forall n m, (gcd n m | m). Proof. intros n m. apply spec_divide. zify. apply Z.gcd_divide_r. Qed. Lemma gcd_greatest : forall n m p, (p|n) -> (p|m) -> (p|gcd n m). Proof. intros n m p. rewrite !spec_divide. zify. apply Z.gcd_greatest. Qed. Lemma gcd_nonneg : forall n m, 0 <= gcd n m. Proof. intros. zify. apply Z.gcd_nonneg. Qed. (** Bitwise operations *) Program Instance testbit_wd : Proper (eq==>eq==>Logic.eq) testbit. Lemma testbit_odd_0 : forall a, testbit (2*a+1) 0 = true. Proof. intros. zify. apply Z.testbit_odd_0. Qed. Lemma testbit_even_0 : forall a, testbit (2*a) 0 = false. Proof. intros. zify. apply Z.testbit_even_0. Qed. Lemma testbit_odd_succ : forall a n, 0<=n -> testbit (2*a+1) (succ n) = testbit a n. Proof. intros a n. zify. apply Z.testbit_odd_succ. Qed. Lemma testbit_even_succ : forall a n, 0<=n -> testbit (2*a) (succ n) = testbit a n. Proof. intros a n. zify. apply Z.testbit_even_succ. Qed. Lemma testbit_neg_r : forall a n, n<0 -> testbit a n = false. Proof. intros a n. zify. apply Z.testbit_neg_r. Qed. Lemma shiftr_spec : forall a n m, 0<=m -> testbit (shiftr a n) m = testbit a (m+n). Proof. intros a n m. zify. apply Z.shiftr_spec. Qed. Lemma shiftl_spec_high : forall a n m, 0<=m -> n<=m -> testbit (shiftl a n) m = testbit a (m-n). Proof. intros a n m. zify. intros Hn H. now apply Z.shiftl_spec_high. Qed. Lemma shiftl_spec_low : forall a n m, m testbit (shiftl a n) m = false. Proof. intros a n m. zify. intros H. now apply Z.shiftl_spec_low. Qed. Lemma land_spec : forall a b n, testbit (land a b) n = testbit a n && testbit b n. Proof. intros a n m. zify. now apply Z.land_spec. Qed. Lemma lor_spec : forall a b n, testbit (lor a b) n = testbit a n || testbit b n. Proof. intros a n m. zify. now apply Z.lor_spec. Qed. Lemma ldiff_spec : forall a b n, testbit (ldiff a b) n = testbit a n && negb (testbit b n). Proof. intros a n m. zify. now apply Z.ldiff_spec. Qed. Lemma lxor_spec : forall a b n, testbit (lxor a b) n = xorb (testbit a n) (testbit b n). Proof. intros a n m. zify. now apply Z.lxor_spec. Qed. Lemma div2_spec : forall a, div2 a == shiftr a 1. Proof. intros a. zify. now apply Z.div2_spec. Qed. End ZTypeIsZAxioms. Module ZType_ZAxioms (ZZ : ZType) <: ZAxiomsSig <: OrderFunctions ZZ <: HasMinMax ZZ := ZZ <+ ZTypeIsZAxioms. coq-8.4pl2/theories/Numbers/Integer/BigZ/0000750000175000001440000000000012127276550017322 5ustar notinuserscoq-8.4pl2/theories/Numbers/Integer/BigZ/BigZ.v0000640000175000001440000001470012010532755020340 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y" := (y < x) (only parsing) : bigZ_scope. Notation "x >= y" := (y <= x) (only parsing) : bigZ_scope. Notation "x < y < z" := (x BigN.to_Z (BigZ.to_N n) = [n]. Proof. intros n; case n; simpl; intros p; generalize (BigN.spec_pos p); case (BigN.to_Z p); auto. intros p1 _ H1; case H1; auto. intros p1 H1; case H1; auto. Qed. (** [BigZ] is a ring *) Lemma BigZring : ring_theory 0 1 BigZ.add BigZ.mul BigZ.sub BigZ.opp BigZ.eq. Proof. constructor. exact BigZ.add_0_l. exact BigZ.add_comm. exact BigZ.add_assoc. exact BigZ.mul_1_l. exact BigZ.mul_comm. exact BigZ.mul_assoc. exact BigZ.mul_add_distr_r. symmetry. apply BigZ.add_opp_r. exact BigZ.add_opp_diag_r. Qed. Lemma BigZeqb_correct : forall x y, (x =? y) = true -> x==y. Proof. now apply BigZ.eqb_eq. Qed. Definition BigZ_of_N n := BigZ.of_Z (Z.of_N n). Lemma BigZpower : power_theory 1 BigZ.mul BigZ.eq BigZ_of_N BigZ.pow. Proof. constructor. intros. unfold BigZ.eq, BigZ_of_N. rewrite BigZ.spec_pow, BigZ.spec_of_Z. rewrite Zpower_theory.(rpow_pow_N). destruct n; simpl. reflexivity. induction p; simpl; intros; BigZ.zify; rewrite ?IHp; auto. Qed. Lemma BigZdiv : div_theory BigZ.eq BigZ.add BigZ.mul (@id _) (fun a b => if b =? 0 then (0,a) else BigZ.div_eucl a b). Proof. constructor. unfold id. intros a b. BigZ.zify. case Z.eqb_spec. BigZ.zify. auto with zarith. intros NEQ. generalize (BigZ.spec_div_eucl a b). generalize (Z_div_mod_full [a] [b] NEQ). destruct BigZ.div_eucl as (q,r), Z.div_eucl as (q',r'). intros (EQ,_). injection 1. intros EQr EQq. BigZ.zify. rewrite EQr, EQq; auto. Qed. (** Detection of constants *) Ltac isBigZcst t := match t with | BigZ.Pos ?t => isBigNcst t | BigZ.Neg ?t => isBigNcst t | BigZ.zero => constr:true | BigZ.one => constr:true | BigZ.two => constr:true | BigZ.minus_one => constr:true | _ => constr:false end. Ltac BigZcst t := match isBigZcst t with | true => constr:t | false => constr:NotConstant end. Ltac BigZ_to_N t := match t with | BigZ.Pos ?t => BigN_to_N t | BigZ.zero => constr:0%N | BigZ.one => constr:1%N | BigZ.two => constr:2%N | _ => constr:NotConstant end. (** Registration for the "ring" tactic *) Add Ring BigZr : BigZring (decidable BigZeqb_correct, constants [BigZcst], power_tac BigZpower [BigZ_to_N], div BigZdiv). Section TestRing. Let test : forall x y, 1 + x*y + x^2 + 1 == 1*1 + 1 + (y + 1*x)*x. Proof. intros. ring_simplify. reflexivity. Qed. Let test' : forall x y, 1 + x*y + x^2 - 1*1 - y*x + 1*(-x)*x == 0. Proof. intros. ring_simplify. reflexivity. Qed. End TestRing. (** [BigZ] also benefits from an "order" tactic *) Ltac bigZ_order := BigZ.order. Section TestOrder. Let test : forall x y : bigZ, x<=y -> y<=x -> x==y. Proof. bigZ_order. Qed. End TestOrder. (** We can use at least a bit of (r)omega by translating to [Z]. *) Section TestOmega. Let test : forall x y : bigZ, x<=y -> y<=x -> x==y. Proof. intros x y. BigZ.zify. omega. Qed. End TestOmega. (** Todo: micromega *) coq-8.4pl2/theories/Numbers/Integer/BigZ/ZMake.v0000640000175000001440000005305512010532755020522 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t_ | Neg : NN.t -> t_. Definition t := t_. Bind Scope abstract_scope with t t_. Definition zero := Pos NN.zero. Definition one := Pos NN.one. Definition two := Pos NN.two. Definition minus_one := Neg NN.one. Definition of_Z x := match x with | Zpos x => Pos (NN.of_N (Npos x)) | Z0 => zero | Zneg x => Neg (NN.of_N (Npos x)) end. Definition to_Z x := match x with | Pos nx => NN.to_Z nx | Neg nx => Z.opp (NN.to_Z nx) end. Theorem spec_of_Z: forall x, to_Z (of_Z x) = x. Proof. intros x; case x; unfold to_Z, of_Z, zero. exact NN.spec_0. intros; rewrite NN.spec_of_N; auto. intros; rewrite NN.spec_of_N; auto. Qed. Definition eq x y := (to_Z x = to_Z y). Theorem spec_0: to_Z zero = 0. exact NN.spec_0. Qed. Theorem spec_1: to_Z one = 1. exact NN.spec_1. Qed. Theorem spec_2: to_Z two = 2. exact NN.spec_2. Qed. Theorem spec_m1: to_Z minus_one = -1. simpl; rewrite NN.spec_1; auto. Qed. Definition compare x y := match x, y with | Pos nx, Pos ny => NN.compare nx ny | Pos nx, Neg ny => match NN.compare nx NN.zero with | Gt => Gt | _ => NN.compare ny NN.zero end | Neg nx, Pos ny => match NN.compare NN.zero nx with | Lt => Lt | _ => NN.compare NN.zero ny end | Neg nx, Neg ny => NN.compare ny nx end. Theorem spec_compare : forall x y, compare x y = Z.compare (to_Z x) (to_Z y). Proof. unfold compare, to_Z. destruct x as [x|x], y as [y|y]; rewrite ?NN.spec_compare, ?NN.spec_0, ?Z.compare_opp; auto; assert (Hx:=NN.spec_pos x); assert (Hy:=NN.spec_pos y); set (X:=NN.to_Z x) in *; set (Y:=NN.to_Z y) in *; clearbody X Y. - destruct (Z.compare_spec X 0) as [EQ|LT|GT]. + rewrite <- Z.opp_0 in EQ. now rewrite EQ, Z.compare_opp. + exfalso. omega. + symmetry. change (X > -Y). omega. - destruct (Z.compare_spec 0 X) as [EQ|LT|GT]. + rewrite <- EQ, Z.opp_0; auto. + symmetry. change (-X < Y). omega. + exfalso. omega. Qed. Definition eqb x y := match compare x y with | Eq => true | _ => false end. Theorem spec_eqb x y : eqb x y = Z.eqb (to_Z x) (to_Z y). Proof. apply Bool.eq_iff_eq_true. unfold eqb. rewrite Z.eqb_eq, <- Z.compare_eq_iff, spec_compare. split; [now destruct Z.compare | now intros ->]. Qed. Definition lt n m := to_Z n < to_Z m. Definition le n m := to_Z n <= to_Z m. Definition ltb (x y : t) : bool := match compare x y with | Lt => true | _ => false end. Theorem spec_ltb x y : ltb x y = Z.ltb (to_Z x) (to_Z y). Proof. apply Bool.eq_iff_eq_true. rewrite Z.ltb_lt. unfold Z.lt, ltb. rewrite spec_compare. split; [now destruct Z.compare | now intros ->]. Qed. Definition leb (x y : t) : bool := match compare x y with | Gt => false | _ => true end. Theorem spec_leb x y : leb x y = Z.leb (to_Z x) (to_Z y). Proof. apply Bool.eq_iff_eq_true. rewrite Z.leb_le. unfold Z.le, leb. rewrite spec_compare. destruct Z.compare; split; try easy. now destruct 1. Qed. Definition min n m := match compare n m with Gt => m | _ => n end. Definition max n m := match compare n m with Lt => m | _ => n end. Theorem spec_min : forall n m, to_Z (min n m) = Z.min (to_Z n) (to_Z m). Proof. unfold min, Z.min. intros. rewrite spec_compare. destruct Z.compare; auto. Qed. Theorem spec_max : forall n m, to_Z (max n m) = Z.max (to_Z n) (to_Z m). Proof. unfold max, Z.max. intros. rewrite spec_compare. destruct Z.compare; auto. Qed. Definition to_N x := match x with | Pos nx => nx | Neg nx => nx end. Definition abs x := Pos (to_N x). Theorem spec_abs: forall x, to_Z (abs x) = Z.abs (to_Z x). Proof. intros x; case x; clear x; intros x; assert (F:=NN.spec_pos x). simpl; rewrite Z.abs_eq; auto. simpl; rewrite Z.abs_neq; simpl; auto with zarith. Qed. Definition opp x := match x with | Pos nx => Neg nx | Neg nx => Pos nx end. Theorem spec_opp: forall x, to_Z (opp x) = - to_Z x. Proof. intros x; case x; simpl; auto with zarith. Qed. Definition succ x := match x with | Pos n => Pos (NN.succ n) | Neg n => match NN.compare NN.zero n with | Lt => Neg (NN.pred n) | _ => one end end. Theorem spec_succ: forall n, to_Z (succ n) = to_Z n + 1. Proof. intros x; case x; clear x; intros x. exact (NN.spec_succ x). simpl. rewrite NN.spec_compare. case Z.compare_spec; rewrite ?NN.spec_0; simpl. intros HH; rewrite <- HH; rewrite NN.spec_1; ring. intros HH; rewrite NN.spec_pred, Z.max_r; auto with zarith. generalize (NN.spec_pos x); auto with zarith. Qed. Definition add x y := match x, y with | Pos nx, Pos ny => Pos (NN.add nx ny) | Pos nx, Neg ny => match NN.compare nx ny with | Gt => Pos (NN.sub nx ny) | Eq => zero | Lt => Neg (NN.sub ny nx) end | Neg nx, Pos ny => match NN.compare nx ny with | Gt => Neg (NN.sub nx ny) | Eq => zero | Lt => Pos (NN.sub ny nx) end | Neg nx, Neg ny => Neg (NN.add nx ny) end. Theorem spec_add: forall x y, to_Z (add x y) = to_Z x + to_Z y. Proof. unfold add, to_Z; intros [x | x] [y | y]; try (rewrite NN.spec_add; auto with zarith); rewrite NN.spec_compare; case Z.compare_spec; unfold zero; rewrite ?NN.spec_0, ?NN.spec_sub; omega with *. Qed. Definition pred x := match x with | Pos nx => match NN.compare NN.zero nx with | Lt => Pos (NN.pred nx) | _ => minus_one end | Neg nx => Neg (NN.succ nx) end. Theorem spec_pred: forall x, to_Z (pred x) = to_Z x - 1. Proof. unfold pred, to_Z, minus_one; intros [x | x]; try (rewrite NN.spec_succ; ring). rewrite NN.spec_compare; case Z.compare_spec; rewrite ?NN.spec_0, ?NN.spec_1, ?NN.spec_pred; generalize (NN.spec_pos x); omega with *. Qed. Definition sub x y := match x, y with | Pos nx, Pos ny => match NN.compare nx ny with | Gt => Pos (NN.sub nx ny) | Eq => zero | Lt => Neg (NN.sub ny nx) end | Pos nx, Neg ny => Pos (NN.add nx ny) | Neg nx, Pos ny => Neg (NN.add nx ny) | Neg nx, Neg ny => match NN.compare nx ny with | Gt => Neg (NN.sub nx ny) | Eq => zero | Lt => Pos (NN.sub ny nx) end end. Theorem spec_sub: forall x y, to_Z (sub x y) = to_Z x - to_Z y. Proof. unfold sub, to_Z; intros [x | x] [y | y]; try (rewrite NN.spec_add; auto with zarith); rewrite NN.spec_compare; case Z.compare_spec; unfold zero; rewrite ?NN.spec_0, ?NN.spec_sub; omega with *. Qed. Definition mul x y := match x, y with | Pos nx, Pos ny => Pos (NN.mul nx ny) | Pos nx, Neg ny => Neg (NN.mul nx ny) | Neg nx, Pos ny => Neg (NN.mul nx ny) | Neg nx, Neg ny => Pos (NN.mul nx ny) end. Theorem spec_mul: forall x y, to_Z (mul x y) = to_Z x * to_Z y. Proof. unfold mul, to_Z; intros [x | x] [y | y]; rewrite NN.spec_mul; ring. Qed. Definition square x := match x with | Pos nx => Pos (NN.square nx) | Neg nx => Pos (NN.square nx) end. Theorem spec_square: forall x, to_Z (square x) = to_Z x * to_Z x. Proof. unfold square, to_Z; intros [x | x]; rewrite NN.spec_square; ring. Qed. Definition pow_pos x p := match x with | Pos nx => Pos (NN.pow_pos nx p) | Neg nx => match p with | xH => x | xO _ => Pos (NN.pow_pos nx p) | xI _ => Neg (NN.pow_pos nx p) end end. Theorem spec_pow_pos: forall x n, to_Z (pow_pos x n) = to_Z x ^ Zpos n. Proof. assert (F0: forall x, (-x)^2 = x^2). intros x; rewrite Z.pow_2_r; ring. unfold pow_pos, to_Z; intros [x | x] [p | p |]; try rewrite NN.spec_pow_pos; try ring. assert (F: 0 <= 2 * Zpos p). assert (0 <= Zpos p); auto with zarith. rewrite Pos2Z.inj_xI; repeat rewrite Zpower_exp; auto with zarith. repeat rewrite Z.pow_mul_r; auto with zarith. rewrite F0; ring. assert (F: 0 <= 2 * Zpos p). assert (0 <= Zpos p); auto with zarith. rewrite Pos2Z.inj_xO; repeat rewrite Zpower_exp; auto with zarith. repeat rewrite Z.pow_mul_r; auto with zarith. rewrite F0; ring. Qed. Definition pow_N x n := match n with | N0 => one | Npos p => pow_pos x p end. Theorem spec_pow_N: forall x n, to_Z (pow_N x n) = to_Z x ^ Z.of_N n. Proof. destruct n; simpl. apply NN.spec_1. apply spec_pow_pos. Qed. Definition pow x y := match to_Z y with | Z0 => one | Zpos p => pow_pos x p | Zneg p => zero end. Theorem spec_pow: forall x y, to_Z (pow x y) = to_Z x ^ to_Z y. Proof. intros. unfold pow. destruct (to_Z y); simpl. apply NN.spec_1. apply spec_pow_pos. apply NN.spec_0. Qed. Definition log2 x := match x with | Pos nx => Pos (NN.log2 nx) | Neg nx => zero end. Theorem spec_log2: forall x, to_Z (log2 x) = Z.log2 (to_Z x). Proof. intros. destruct x as [p|p]; simpl. apply NN.spec_log2. rewrite NN.spec_0. destruct (Z_le_lt_eq_dec _ _ (NN.spec_pos p)) as [LT|EQ]. rewrite Z.log2_nonpos; auto with zarith. now rewrite <- EQ. Qed. Definition sqrt x := match x with | Pos nx => Pos (NN.sqrt nx) | Neg nx => Neg NN.zero end. Theorem spec_sqrt: forall x, to_Z (sqrt x) = Z.sqrt (to_Z x). Proof. destruct x as [p|p]; simpl. apply NN.spec_sqrt. rewrite NN.spec_0. destruct (Z_le_lt_eq_dec _ _ (NN.spec_pos p)) as [LT|EQ]. rewrite Z.sqrt_neg; auto with zarith. now rewrite <- EQ. Qed. Definition div_eucl x y := match x, y with | Pos nx, Pos ny => let (q, r) := NN.div_eucl nx ny in (Pos q, Pos r) | Pos nx, Neg ny => let (q, r) := NN.div_eucl nx ny in if NN.eqb NN.zero r then (Neg q, zero) else (Neg (NN.succ q), Neg (NN.sub ny r)) | Neg nx, Pos ny => let (q, r) := NN.div_eucl nx ny in if NN.eqb NN.zero r then (Neg q, zero) else (Neg (NN.succ q), Pos (NN.sub ny r)) | Neg nx, Neg ny => let (q, r) := NN.div_eucl nx ny in (Pos q, Neg r) end. Ltac break_nonneg x px EQx := let H := fresh "H" in assert (H:=NN.spec_pos x); destruct (NN.to_Z x) as [|px|px] eqn:EQx; [clear H|clear H|elim H; reflexivity]. Theorem spec_div_eucl: forall x y, let (q,r) := div_eucl x y in (to_Z q, to_Z r) = Z.div_eucl (to_Z x) (to_Z y). Proof. unfold div_eucl, to_Z. intros [x | x] [y | y]. (* Pos Pos *) generalize (NN.spec_div_eucl x y); destruct (NN.div_eucl x y); auto. (* Pos Neg *) generalize (NN.spec_div_eucl x y); destruct (NN.div_eucl x y) as (q,r). break_nonneg x px EQx; break_nonneg y py EQy; try (injection 1; intros Hr Hq; rewrite NN.spec_eqb, NN.spec_0, Hr; simpl; rewrite Hq, NN.spec_0; auto). change (- Zpos py) with (Zneg py). assert (GT : Zpos py > 0) by (compute; auto). generalize (Z_div_mod (Zpos px) (Zpos py) GT). unfold Z.div_eucl. destruct (Z.pos_div_eucl px (Zpos py)) as (q',r'). intros (EQ,MOD). injection 1. intros Hr' Hq'. rewrite NN.spec_eqb, NN.spec_0, Hr'. break_nonneg r pr EQr. subst; simpl. rewrite NN.spec_0; auto. subst. lazy iota beta delta [Z.eqb]. rewrite NN.spec_sub, NN.spec_succ, EQy, EQr. f_equal. omega with *. (* Neg Pos *) generalize (NN.spec_div_eucl x y); destruct (NN.div_eucl x y) as (q,r). break_nonneg x px EQx; break_nonneg y py EQy; try (injection 1; intros Hr Hq; rewrite NN.spec_eqb, NN.spec_0, Hr; simpl; rewrite Hq, NN.spec_0; auto). change (- Zpos px) with (Zneg px). assert (GT : Zpos py > 0) by (compute; auto). generalize (Z_div_mod (Zpos px) (Zpos py) GT). unfold Z.div_eucl. destruct (Z.pos_div_eucl px (Zpos py)) as (q',r'). intros (EQ,MOD). injection 1. intros Hr' Hq'. rewrite NN.spec_eqb, NN.spec_0, Hr'. break_nonneg r pr EQr. subst; simpl. rewrite NN.spec_0; auto. subst. lazy iota beta delta [Z.eqb]. rewrite NN.spec_sub, NN.spec_succ, EQy, EQr. f_equal. omega with *. (* Neg Neg *) generalize (NN.spec_div_eucl x y); destruct (NN.div_eucl x y) as (q,r). break_nonneg x px EQx; break_nonneg y py EQy; try (injection 1; intros Hr Hq; rewrite Hr, Hq; auto). simpl. intros <-; auto. Qed. Definition div x y := fst (div_eucl x y). Definition spec_div: forall x y, to_Z (div x y) = to_Z x / to_Z y. Proof. intros x y; generalize (spec_div_eucl x y); unfold div, Z.div. case div_eucl; case Z.div_eucl; simpl; auto. intros q r q11 r1 H; injection H; auto. Qed. Definition modulo x y := snd (div_eucl x y). Theorem spec_modulo: forall x y, to_Z (modulo x y) = to_Z x mod to_Z y. Proof. intros x y; generalize (spec_div_eucl x y); unfold modulo, Z.modulo. case div_eucl; case Z.div_eucl; simpl; auto. intros q r q11 r1 H; injection H; auto. Qed. Definition quot x y := match x, y with | Pos nx, Pos ny => Pos (NN.div nx ny) | Pos nx, Neg ny => Neg (NN.div nx ny) | Neg nx, Pos ny => Neg (NN.div nx ny) | Neg nx, Neg ny => Pos (NN.div nx ny) end. Definition rem x y := if eqb y zero then x else match x, y with | Pos nx, Pos ny => Pos (NN.modulo nx ny) | Pos nx, Neg ny => Pos (NN.modulo nx ny) | Neg nx, Pos ny => Neg (NN.modulo nx ny) | Neg nx, Neg ny => Neg (NN.modulo nx ny) end. Lemma spec_quot : forall x y, to_Z (quot x y) = (to_Z x) ÷ (to_Z y). Proof. intros [x|x] [y|y]; simpl; symmetry; rewrite NN.spec_div; (* Nota: we rely here on [forall a b, a ÷ 0 = b / 0] *) destruct (Z.eq_dec (NN.to_Z y) 0) as [EQ|NEQ]; try (rewrite EQ; now destruct (NN.to_Z x)); rewrite ?Z.quot_opp_r, ?Z.quot_opp_l, ?Z.opp_involutive, ?Z.opp_inj_wd; trivial; apply Z.quot_div_nonneg; generalize (NN.spec_pos x) (NN.spec_pos y); Z.order. Qed. Lemma spec_rem : forall x y, to_Z (rem x y) = Z.rem (to_Z x) (to_Z y). Proof. intros x y. unfold rem. rewrite spec_eqb, spec_0. case Z.eqb_spec; intros Hy. (* Nota: we rely here on [Z.rem a 0 = a] *) rewrite Hy. now destruct (to_Z x). destruct x as [x|x], y as [y|y]; simpl in *; symmetry; rewrite ?Z.eq_opp_l, ?Z.opp_0 in Hy; rewrite NN.spec_modulo, ?Z.rem_opp_r, ?Z.rem_opp_l, ?Z.opp_involutive, ?Z.opp_inj_wd; trivial; apply Z.rem_mod_nonneg; generalize (NN.spec_pos x) (NN.spec_pos y); Z.order. Qed. Definition gcd x y := match x, y with | Pos nx, Pos ny => Pos (NN.gcd nx ny) | Pos nx, Neg ny => Pos (NN.gcd nx ny) | Neg nx, Pos ny => Pos (NN.gcd nx ny) | Neg nx, Neg ny => Pos (NN.gcd nx ny) end. Theorem spec_gcd: forall a b, to_Z (gcd a b) = Z.gcd (to_Z a) (to_Z b). Proof. unfold gcd, Z.gcd, to_Z; intros [x | x] [y | y]; rewrite NN.spec_gcd; unfold Z.gcd; auto; case NN.to_Z; simpl; auto with zarith; try rewrite Z.abs_opp; auto; case NN.to_Z; simpl; auto with zarith. Qed. Definition sgn x := match compare zero x with | Lt => one | Eq => zero | Gt => minus_one end. Lemma spec_sgn : forall x, to_Z (sgn x) = Z.sgn (to_Z x). Proof. intros. unfold sgn. rewrite spec_compare. case Z.compare_spec. rewrite spec_0. intros <-; auto. rewrite spec_0, spec_1. symmetry. rewrite Z.sgn_pos_iff; auto. rewrite spec_0, spec_m1. symmetry. rewrite Z.sgn_neg_iff; auto with zarith. Qed. Definition even z := match z with | Pos n => NN.even n | Neg n => NN.even n end. Definition odd z := match z with | Pos n => NN.odd n | Neg n => NN.odd n end. Lemma spec_even : forall z, even z = Z.even (to_Z z). Proof. intros [n|n]; simpl; rewrite NN.spec_even; trivial. destruct (NN.to_Z n) as [|p|p]; now try destruct p. Qed. Lemma spec_odd : forall z, odd z = Z.odd (to_Z z). Proof. intros [n|n]; simpl; rewrite NN.spec_odd; trivial. destruct (NN.to_Z n) as [|p|p]; now try destruct p. Qed. Definition norm_pos z := match z with | Pos _ => z | Neg n => if NN.eqb n NN.zero then Pos n else z end. Definition testbit a n := match norm_pos n, norm_pos a with | Pos p, Pos a => NN.testbit a p | Pos p, Neg a => negb (NN.testbit (NN.pred a) p) | Neg p, _ => false end. Definition shiftl a n := match norm_pos a, n with | Pos a, Pos n => Pos (NN.shiftl a n) | Pos a, Neg n => Pos (NN.shiftr a n) | Neg a, Pos n => Neg (NN.shiftl a n) | Neg a, Neg n => Neg (NN.succ (NN.shiftr (NN.pred a) n)) end. Definition shiftr a n := shiftl a (opp n). Definition lor a b := match norm_pos a, norm_pos b with | Pos a, Pos b => Pos (NN.lor a b) | Neg a, Pos b => Neg (NN.succ (NN.ldiff (NN.pred a) b)) | Pos a, Neg b => Neg (NN.succ (NN.ldiff (NN.pred b) a)) | Neg a, Neg b => Neg (NN.succ (NN.land (NN.pred a) (NN.pred b))) end. Definition land a b := match norm_pos a, norm_pos b with | Pos a, Pos b => Pos (NN.land a b) | Neg a, Pos b => Pos (NN.ldiff b (NN.pred a)) | Pos a, Neg b => Pos (NN.ldiff a (NN.pred b)) | Neg a, Neg b => Neg (NN.succ (NN.lor (NN.pred a) (NN.pred b))) end. Definition ldiff a b := match norm_pos a, norm_pos b with | Pos a, Pos b => Pos (NN.ldiff a b) | Neg a, Pos b => Neg (NN.succ (NN.lor (NN.pred a) b)) | Pos a, Neg b => Pos (NN.land a (NN.pred b)) | Neg a, Neg b => Pos (NN.ldiff (NN.pred b) (NN.pred a)) end. Definition lxor a b := match norm_pos a, norm_pos b with | Pos a, Pos b => Pos (NN.lxor a b) | Neg a, Pos b => Neg (NN.succ (NN.lxor (NN.pred a) b)) | Pos a, Neg b => Neg (NN.succ (NN.lxor a (NN.pred b))) | Neg a, Neg b => Pos (NN.lxor (NN.pred a) (NN.pred b)) end. Definition div2 x := shiftr x one. Lemma Zlnot_alt1 : forall x, -(x+1) = Z.lnot x. Proof. unfold Z.lnot, Z.pred; auto with zarith. Qed. Lemma Zlnot_alt2 : forall x, Z.lnot (x-1) = -x. Proof. unfold Z.lnot, Z.pred; auto with zarith. Qed. Lemma Zlnot_alt3 : forall x, Z.lnot (-x) = x-1. Proof. unfold Z.lnot, Z.pred; auto with zarith. Qed. Lemma spec_norm_pos : forall x, to_Z (norm_pos x) = to_Z x. Proof. intros [x|x]; simpl; trivial. rewrite NN.spec_eqb, NN.spec_0. case Z.eqb_spec; simpl; auto with zarith. Qed. Lemma spec_norm_pos_pos : forall x y, norm_pos x = Neg y -> 0 < NN.to_Z y. Proof. intros [x|x] y; simpl; try easy. rewrite NN.spec_eqb, NN.spec_0. case Z.eqb_spec; simpl; try easy. inversion 2. subst. generalize (NN.spec_pos y); auto with zarith. Qed. Ltac destr_norm_pos x := rewrite <- (spec_norm_pos x); let H := fresh in let x' := fresh x in assert (H := spec_norm_pos_pos x); destruct (norm_pos x) as [x'|x']; specialize (H x' (eq_refl _)) || clear H. Lemma spec_testbit: forall x p, testbit x p = Z.testbit (to_Z x) (to_Z p). Proof. intros x p. unfold testbit. destr_norm_pos p; simpl. destr_norm_pos x; simpl. apply NN.spec_testbit. rewrite NN.spec_testbit, NN.spec_pred, Z.max_r by auto with zarith. symmetry. apply Z.bits_opp. apply NN.spec_pos. symmetry. apply Z.testbit_neg_r; auto with zarith. Qed. Lemma spec_shiftl: forall x p, to_Z (shiftl x p) = Z.shiftl (to_Z x) (to_Z p). Proof. intros x p. unfold shiftl. destr_norm_pos x; destruct p as [p|p]; simpl; assert (Hp := NN.spec_pos p). apply NN.spec_shiftl. rewrite Z.shiftl_opp_r. apply NN.spec_shiftr. rewrite !NN.spec_shiftl. rewrite !Z.shiftl_mul_pow2 by apply NN.spec_pos. symmetry. apply Z.mul_opp_l. rewrite Z.shiftl_opp_r, NN.spec_succ, NN.spec_shiftr, NN.spec_pred, Z.max_r by auto with zarith. now rewrite Zlnot_alt1, Z.lnot_shiftr, Zlnot_alt2. Qed. Lemma spec_shiftr: forall x p, to_Z (shiftr x p) = Z.shiftr (to_Z x) (to_Z p). Proof. intros. unfold shiftr. rewrite spec_shiftl, spec_opp. apply Z.shiftl_opp_r. Qed. Lemma spec_land: forall x y, to_Z (land x y) = Z.land (to_Z x) (to_Z y). Proof. intros x y. unfold land. destr_norm_pos x; destr_norm_pos y; simpl; rewrite ?NN.spec_succ, ?NN.spec_land, ?NN.spec_ldiff, ?NN.spec_lor, ?NN.spec_pred, ?Z.max_r, ?Zlnot_alt1; auto with zarith. now rewrite Z.ldiff_land, Zlnot_alt2. now rewrite Z.ldiff_land, Z.land_comm, Zlnot_alt2. now rewrite Z.lnot_lor, !Zlnot_alt2. Qed. Lemma spec_lor: forall x y, to_Z (lor x y) = Z.lor (to_Z x) (to_Z y). Proof. intros x y. unfold lor. destr_norm_pos x; destr_norm_pos y; simpl; rewrite ?NN.spec_succ, ?NN.spec_land, ?NN.spec_ldiff, ?NN.spec_lor, ?NN.spec_pred, ?Z.max_r, ?Zlnot_alt1; auto with zarith. now rewrite Z.lnot_ldiff, Z.lor_comm, Zlnot_alt2. now rewrite Z.lnot_ldiff, Zlnot_alt2. now rewrite Z.lnot_land, !Zlnot_alt2. Qed. Lemma spec_ldiff: forall x y, to_Z (ldiff x y) = Z.ldiff (to_Z x) (to_Z y). Proof. intros x y. unfold ldiff. destr_norm_pos x; destr_norm_pos y; simpl; rewrite ?NN.spec_succ, ?NN.spec_land, ?NN.spec_ldiff, ?NN.spec_lor, ?NN.spec_pred, ?Z.max_r, ?Zlnot_alt1; auto with zarith. now rewrite Z.ldiff_land, Zlnot_alt3. now rewrite Z.lnot_lor, Z.ldiff_land, <- Zlnot_alt2. now rewrite 2 Z.ldiff_land, Zlnot_alt2, Z.land_comm, Zlnot_alt3. Qed. Lemma spec_lxor: forall x y, to_Z (lxor x y) = Z.lxor (to_Z x) (to_Z y). Proof. intros x y. unfold lxor. destr_norm_pos x; destr_norm_pos y; simpl; rewrite ?NN.spec_succ, ?NN.spec_lxor, ?NN.spec_pred, ?Z.max_r, ?Zlnot_alt1; auto with zarith. now rewrite !Z.lnot_lxor_r, Zlnot_alt2. now rewrite !Z.lnot_lxor_l, Zlnot_alt2. now rewrite <- Z.lxor_lnot_lnot, !Zlnot_alt2. Qed. Lemma spec_div2: forall x, to_Z (div2 x) = Z.div2 (to_Z x). Proof. intros x. unfold div2. now rewrite spec_shiftr, Z.div2_spec, spec_1. Qed. End Make. coq-8.4pl2/theories/Numbers/NatInt/0000750000175000001440000000000012127276551016270 5ustar notinuserscoq-8.4pl2/theories/Numbers/NatInt/NZPow.v0000640000175000001440000002732112010532755017472 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t. End Pow. Module Type PowNotation (A : Typ)(Import B : Pow A). Infix "^" := pow. End PowNotation. Module Type Pow' (A : Typ) := Pow A <+ PowNotation A. Module Type NZPowSpec (Import A : NZOrdAxiomsSig')(Import B : Pow' A). Declare Instance pow_wd : Proper (eq==>eq==>eq) pow. Axiom pow_0_r : forall a, a^0 == 1. Axiom pow_succ_r : forall a b, 0<=b -> a^(succ b) == a * a^b. Axiom pow_neg_r : forall a b, b<0 -> a^b == 0. End NZPowSpec. (** The above [pow_neg_r] specification is useless (and trivially provable) for N. Having it here allows to already derive some slightly more general statements. *) Module Type NZPow (A : NZOrdAxiomsSig) := Pow A <+ NZPowSpec A. Module Type NZPow' (A : NZOrdAxiomsSig) := Pow' A <+ NZPowSpec A. (** Derived properties of power *) Module Type NZPowProp (Import A : NZOrdAxiomsSig') (Import B : NZPow' A) (Import C : NZMulOrderProp A). Hint Rewrite pow_0_r pow_succ_r : nz. (** Power and basic constants *) Lemma pow_0_l : forall a, 0 0^a == 0. Proof. intros a Ha. destruct (lt_exists_pred _ _ Ha) as (a' & EQ & Ha'). rewrite EQ. now nzsimpl. Qed. Lemma pow_0_l' : forall a, a~=0 -> 0^a == 0. Proof. intros a Ha. destruct (lt_trichotomy a 0) as [LT|[EQ|GT]]; try order. now rewrite pow_neg_r. now apply pow_0_l. Qed. Lemma pow_1_r : forall a, a^1 == a. Proof. intros. now nzsimpl'. Qed. Lemma pow_1_l : forall a, 0<=a -> 1^a == 1. Proof. apply le_ind; intros. solve_proper. now nzsimpl. now nzsimpl. Qed. Hint Rewrite pow_1_r pow_1_l : nz. Lemma pow_2_r : forall a, a^2 == a*a. Proof. intros. rewrite two_succ. nzsimpl; order'. Qed. Hint Rewrite pow_2_r : nz. (** Power and nullity *) Lemma pow_eq_0 : forall a b, 0<=b -> a^b == 0 -> a == 0. Proof. intros a b Hb. apply le_ind with (4:=Hb). solve_proper. rewrite pow_0_r. order'. clear b Hb. intros b Hb IH. rewrite pow_succ_r by trivial. intros H. apply eq_mul_0 in H. destruct H; trivial. now apply IH. Qed. Lemma pow_nonzero : forall a b, a~=0 -> 0<=b -> a^b ~= 0. Proof. intros a b Ha Hb. contradict Ha. now apply pow_eq_0 with b. Qed. Lemma pow_eq_0_iff : forall a b, a^b == 0 <-> b<0 \/ (0 0<=c -> a^(b+c) == a^b * a^c. Proof. intros a b c Hb. apply le_ind with (4:=Hb). solve_proper. now nzsimpl. clear b Hb. intros b Hb IH Hc. nzsimpl; trivial. rewrite IH; trivial. apply mul_assoc. now apply add_nonneg_nonneg. Qed. Lemma pow_mul_l : forall a b c, (a*b)^c == a^c * b^c. Proof. intros a b c. destruct (lt_ge_cases c 0) as [Hc|Hc]. rewrite !(pow_neg_r _ _ Hc). now nzsimpl. apply le_ind with (4:=Hc). solve_proper. now nzsimpl. clear c Hc. intros c Hc IH. nzsimpl; trivial. rewrite IH; trivial. apply mul_shuffle1. Qed. Lemma pow_mul_r : forall a b c, 0<=b -> 0<=c -> a^(b*c) == (a^b)^c. Proof. intros a b c Hb. apply le_ind with (4:=Hb). solve_proper. intros. now nzsimpl. clear b Hb. intros b Hb IH Hc. nzsimpl; trivial. rewrite pow_add_r, IH, pow_mul_l; trivial. apply mul_comm. now apply mul_nonneg_nonneg. Qed. (** Positivity *) Lemma pow_nonneg : forall a b, 0<=a -> 0<=a^b. Proof. intros a b Ha. destruct (lt_ge_cases b 0) as [Hb|Hb]. now rewrite !(pow_neg_r _ _ Hb). apply le_ind with (4:=Hb). solve_proper. nzsimpl; order'. clear b Hb. intros b Hb IH. nzsimpl; trivial. now apply mul_nonneg_nonneg. Qed. Lemma pow_pos_nonneg : forall a b, 0 0<=b -> 0 0<=a a^c < b^c. Proof. intros a b c Hc. apply lt_ind with (4:=Hc). solve_proper. intros (Ha,H). nzsimpl; trivial; order. clear c Hc. intros c Hc IH (Ha,H). nzsimpl; try order. apply mul_lt_mono_nonneg; trivial. apply pow_nonneg; try order. apply IH. now split. Qed. Lemma pow_le_mono_l : forall a b c, 0<=a<=b -> a^c <= b^c. Proof. intros a b c (Ha,H). destruct (lt_trichotomy c 0) as [Hc|[Hc|Hc]]. rewrite !(pow_neg_r _ _ Hc); now nzsimpl. rewrite Hc; now nzsimpl. apply lt_eq_cases in H. destruct H as [H|H]; [|now rewrite <- H]. apply lt_le_incl, pow_lt_mono_l; now try split. Qed. Lemma pow_gt_1 : forall a b, 1 (0 1 0<=c -> b a^b < a^c. Proof. intros a b c Ha Hc H. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite pow_neg_r by trivial. apply pow_pos_nonneg; order'. assert (H' : b<=c) by order. destruct (le_exists_sub _ _ H') as (d & EQ & Hd). rewrite EQ, pow_add_r; trivial. rewrite <- (mul_1_l (a^b)) at 1. apply mul_lt_mono_pos_r. apply pow_pos_nonneg; order'. apply pow_gt_1; trivial. apply lt_eq_cases in Hd; destruct Hd as [LT|EQ']; trivial. rewrite <- EQ' in *. rewrite add_0_l in EQ. order. Qed. (** NB: since 0^0 > 0^1, the following result isn't valid with a=0 *) Lemma pow_le_mono_r : forall a b c, 0 b<=c -> a^b <= a^c. Proof. intros a b c Ha H. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite (pow_neg_r _ _ Hb). apply pow_nonneg; order. apply le_succ_l in Ha; rewrite <- one_succ in Ha. apply lt_eq_cases in Ha; destruct Ha as [Ha|Ha]; [|rewrite <- Ha]. apply lt_eq_cases in H; destruct H as [H|H]; [|now rewrite <- H]. apply lt_le_incl, pow_lt_mono_r; order. nzsimpl; order. Qed. Lemma pow_le_mono : forall a b c d, 0 b<=d -> a^b <= c^d. Proof. intros. transitivity (a^d). apply pow_le_mono_r; intuition order. apply pow_le_mono_l; intuition order. Qed. Lemma pow_lt_mono : forall a b c d, 0 0 a^b < c^d. Proof. intros a b c d (Ha,Hac) (Hb,Hbd). apply le_succ_l in Ha; rewrite <- one_succ in Ha. apply lt_eq_cases in Ha; destruct Ha as [Ha|Ha]; [|rewrite <- Ha]. transitivity (a^d). apply pow_lt_mono_r; intuition order. apply pow_lt_mono_l; try split; order'. nzsimpl; try order. apply pow_gt_1; order. Qed. (** Injectivity *) Lemma pow_inj_l : forall a b c, 0<=a -> 0<=b -> 0 a^c == b^c -> a == b. Proof. intros a b c Ha Hb Hc EQ. destruct (lt_trichotomy a b) as [LT|[EQ'|GT]]; trivial. assert (a^c < b^c) by (apply pow_lt_mono_l; try split; trivial). order. assert (b^c < a^c) by (apply pow_lt_mono_l; try split; trivial). order. Qed. Lemma pow_inj_r : forall a b c, 1 0<=b -> 0<=c -> a^b == a^c -> b == c. Proof. intros a b c Ha Hb Hc EQ. destruct (lt_trichotomy b c) as [LT|[EQ'|GT]]; trivial. assert (a^b < a^c) by (apply pow_lt_mono_r; try split; trivial). order. assert (a^c < a^b) by (apply pow_lt_mono_r; try split; trivial). order. Qed. (** Monotonicity results, both ways *) Lemma pow_lt_mono_l_iff : forall a b c, 0<=a -> 0<=b -> 0 (a a^c < b^c). Proof. intros a b c Ha Hb Hc. split; intro LT. apply pow_lt_mono_l; try split; trivial. destruct (le_gt_cases b a) as [LE|GT]; trivial. assert (b^c <= a^c) by (apply pow_le_mono_l; try split; order). order. Qed. Lemma pow_le_mono_l_iff : forall a b c, 0<=a -> 0<=b -> 0 (a<=b <-> a^c <= b^c). Proof. intros a b c Ha Hb Hc. split; intro LE. apply pow_le_mono_l; try split; trivial. destruct (le_gt_cases a b) as [LE'|GT]; trivial. assert (b^c < a^c) by (apply pow_lt_mono_l; try split; trivial). order. Qed. Lemma pow_lt_mono_r_iff : forall a b c, 1 0<=c -> (b a^b < a^c). Proof. intros a b c Ha Hc. split; intro LT. now apply pow_lt_mono_r. destruct (le_gt_cases c b) as [LE|GT]; trivial. assert (a^c <= a^b) by (apply pow_le_mono_r; order'). order. Qed. Lemma pow_le_mono_r_iff : forall a b c, 1 0<=c -> (b<=c <-> a^b <= a^c). Proof. intros a b c Ha Hc. split; intro LE. apply pow_le_mono_r; order'. destruct (le_gt_cases b c) as [LE'|GT]; trivial. assert (a^c < a^b) by (apply pow_lt_mono_r; order'). order. Qed. (** For any a>1, the a^x function is above the identity function *) Lemma pow_gt_lin_r : forall a b, 1 0<=b -> b < a^b. Proof. intros a b Ha Hb. apply le_ind with (4:=Hb). solve_proper. nzsimpl. order'. clear b Hb. intros b Hb IH. nzsimpl; trivial. rewrite <- !le_succ_l in *. rewrite <- two_succ in Ha. transitivity (2*(S b)). nzsimpl'. rewrite <- 2 succ_le_mono. rewrite <- (add_0_l b) at 1. apply add_le_mono; order. apply mul_le_mono_nonneg; trivial. order'. now apply lt_le_incl, lt_succ_r. Qed. (** Someday, we should say something about the full Newton formula. In the meantime, we can at least provide some inequalities about (a+b)^c. *) Lemma pow_add_lower : forall a b c, 0<=a -> 0<=b -> 0 a^c + b^c <= (a+b)^c. Proof. intros a b c Ha Hb Hc. apply lt_ind with (4:=Hc). solve_proper. nzsimpl; order. clear c Hc. intros c Hc IH. assert (0<=c) by order'. nzsimpl; trivial. transitivity ((a+b)*(a^c + b^c)). rewrite mul_add_distr_r, !mul_add_distr_l. apply add_le_mono. rewrite <- add_0_r at 1. apply add_le_mono_l. apply mul_nonneg_nonneg; trivial. apply pow_nonneg; trivial. rewrite <- add_0_l at 1. apply add_le_mono_r. apply mul_nonneg_nonneg; trivial. apply pow_nonneg; trivial. apply mul_le_mono_nonneg_l; trivial. now apply add_nonneg_nonneg. Qed. (** This upper bound can also be seen as a convexity proof for x^c : image of (a+b)/2 is below the middle of the images of a and b *) Lemma pow_add_upper : forall a b c, 0<=a -> 0<=b -> 0 (a+b)^c <= 2^(pred c) * (a^c + b^c). Proof. assert (aux : forall a b c, 0<=a<=b -> 0 (a + b) * (a ^ c + b ^ c) <= 2 * (a * a ^ c + b * b ^ c)). (* begin *) intros a b c (Ha,H) Hc. rewrite !mul_add_distr_l, !mul_add_distr_r. nzsimpl'. rewrite <- !add_assoc. apply add_le_mono_l. rewrite !add_assoc. apply add_le_mono_r. destruct (le_exists_sub _ _ H) as (d & EQ & Hd). rewrite EQ. rewrite 2 mul_add_distr_r. rewrite !add_assoc. apply add_le_mono_r. rewrite add_comm. apply add_le_mono_l. apply mul_le_mono_nonneg_l; trivial. apply pow_le_mono_l; try split; order. (* end *) intros a b c Ha Hb Hc. apply lt_ind with (4:=Hc). solve_proper. nzsimpl; order. clear c Hc. intros c Hc IH. assert (0<=c) by order. nzsimpl; trivial. transitivity ((a+b)*(2^(pred c) * (a^c + b^c))). apply mul_le_mono_nonneg_l; trivial. now apply add_nonneg_nonneg. rewrite mul_assoc. rewrite (mul_comm (a+b)). assert (EQ : S (P c) == c) by (apply lt_succ_pred with 0; order'). assert (LE : 0 <= P c) by (now rewrite succ_le_mono, EQ, le_succ_l). assert (EQ' : 2^c == 2^(P c) * 2) by (rewrite <- EQ at 1; nzsimpl'; order). rewrite EQ', <- !mul_assoc. apply mul_le_mono_nonneg_l. apply pow_nonneg; order'. destruct (le_gt_cases a b). apply aux; try split; order'. rewrite (add_comm a), (add_comm (a^c)), (add_comm (a*a^c)). apply aux; try split; order'. Qed. End NZPowProp. coq-8.4pl2/theories/Numbers/NatInt/NZDomain.v0000640000175000001440000002356612010532755020143 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R)==>R==>R) (nat_iter n). Proof. intros f f' Hf. induction n; simpl; red; auto. Qed. Module NZDomainProp (Import NZ:NZDomainSig'). Include NZBaseProp NZ. (** * Relationship between points thanks to [succ] and [pred]. *) (** For any two points, one is an iterated successor of the other. *) Lemma itersucc_or_itersucc n m : exists k, n == (S^k) m \/ m == (S^k) n. Proof. nzinduct n m. exists 0%nat. now left. intros n. split; intros [k [L|R]]. exists (Datatypes.S k). left. now apply succ_wd. destruct k as [|k]. simpl in R. exists 1%nat. left. now apply succ_wd. rewrite nat_iter_succ_r in R. exists k. now right. destruct k as [|k]; simpl in L. exists 1%nat. now right. apply succ_inj in L. exists k. now left. exists (Datatypes.S k). right. now rewrite nat_iter_succ_r. Qed. (** Generalized version of [pred_succ] when iterating *) Lemma succ_swap_pred : forall k n m, n == (S^k) m -> m == (P^k) n. Proof. induction k. simpl; auto with *. simpl; intros. apply pred_wd in H. rewrite pred_succ in H. apply IHk in H; auto. rewrite <- nat_iter_succ_r in H; auto. Qed. (** From a given point, all others are iterated successors or iterated predecessors. *) Lemma itersucc_or_iterpred : forall n m, exists k, n == (S^k) m \/ n == (P^k) m. Proof. intros n m. destruct (itersucc_or_itersucc n m) as (k,[H|H]). exists k; left; auto. exists k; right. apply succ_swap_pred; auto. Qed. (** In particular, all points are either iterated successors of [0] or iterated predecessors of [0] (or both). *) Lemma itersucc0_or_iterpred0 : forall n, exists p:nat, n == (S^p) 0 \/ n == (P^p) 0. Proof. intros n. exact (itersucc_or_iterpred n 0). Qed. (** * Study of initial point w.r.t. [succ] (if any). *) Definition initial n := forall m, n ~= S m. Lemma initial_alt : forall n, initial n <-> S (P n) ~= n. Proof. split. intros Bn EQ. symmetry in EQ. destruct (Bn _ EQ). intros NEQ m EQ. apply NEQ. rewrite EQ, pred_succ; auto with *. Qed. Lemma initial_alt2 : forall n, initial n <-> ~exists m, n == S m. Proof. firstorder. Qed. (** First case: let's assume such an initial point exists (i.e. [S] isn't surjective)... *) Section InitialExists. Hypothesis init : t. Hypothesis Initial : initial init. (** ... then we have unicity of this initial point. *) Lemma initial_unique : forall m, initial m -> m == init. Proof. intros m Im. destruct (itersucc_or_itersucc init m) as (p,[H|H]). destruct p. now simpl in *. destruct (Initial _ H). destruct p. now simpl in *. destruct (Im _ H). Qed. (** ... then all other points are descendant of it. *) Lemma initial_ancestor : forall m, exists p, m == (S^p) init. Proof. intros m. destruct (itersucc_or_itersucc init m) as (p,[H|H]). destruct p; simpl in *; auto. exists O; auto with *. destruct (Initial _ H). exists p; auto. Qed. (** NB : We would like to have [pred n == n] for the initial element, but nothing forces that. For instance we can have -3 as initial point, and P(-3) = 2. A bit odd indeed, but legal according to [NZDomainSig]. We can hence have [n == (P^k) m] without [exists k', m == (S^k') n]. *) (** We need decidability of [eq] (or classical reasoning) for this: *) Section SuccPred. Hypothesis eq_decidable : forall n m, n==m \/ n~=m. Lemma succ_pred_approx : forall n, ~initial n -> S (P n) == n. Proof. intros n NB. rewrite initial_alt in NB. destruct (eq_decidable (S (P n)) n); auto. elim NB; auto. Qed. End SuccPred. End InitialExists. (** Second case : let's suppose now [S] surjective, i.e. no initial point. *) Section InitialDontExists. Hypothesis succ_onto : forall n, exists m, n == S m. Lemma succ_onto_gives_succ_pred : forall n, S (P n) == n. Proof. intros n. destruct (succ_onto n) as (m,H). rewrite H, pred_succ; auto with *. Qed. Lemma succ_onto_pred_injective : forall n m, P n == P m -> n == m. Proof. intros n m. intros H; apply succ_wd in H. rewrite !succ_onto_gives_succ_pred in H; auto. Qed. End InitialDontExists. (** To summarize: S is always injective, P is always surjective (thanks to [pred_succ]). I) If S is not surjective, we have an initial point, which is unique. This bottom is below zero: we have N shifted (or not) to the left. P cannot be injective: P init = P (S (P init)). (P init) can be arbitrary. II) If S is surjective, we have [forall n, S (P n) = n], S and P are bijective and reciprocal. IIa) if [exists k<>O, 0 == S^k 0], then we have a cyclic structure Z/nZ IIb) otherwise, we have Z *) (** * An alternative induction principle using [S] and [P]. *) (** It is weaker than [bi_induction]. For instance it cannot prove that we can go from one point by many [S] _or_ many [P], but only by many [S] mixed with many [P]. Think of a model with two copies of N: 0, 1=S 0, 2=S 1, ... 0', 1'=S 0', 2'=S 1', ... and P 0 = 0' and P 0' = 0. *) Lemma bi_induction_pred : forall A : t -> Prop, Proper (eq==>iff) A -> A 0 -> (forall n, A n -> A (S n)) -> (forall n, A n -> A (P n)) -> forall n, A n. Proof. intros. apply bi_induction; auto. clear n. intros n; split; auto. intros G; apply H2 in G. rewrite pred_succ in G; auto. Qed. Lemma central_induction_pred : forall A : t -> Prop, Proper (eq==>iff) A -> forall n0, A n0 -> (forall n, A n -> A (S n)) -> (forall n, A n -> A (P n)) -> forall n, A n. Proof. intros. assert (A 0). destruct (itersucc_or_iterpred 0 n0) as (k,[Hk|Hk]); rewrite Hk; clear Hk. clear H2. induction k; simpl in *; auto. clear H1. induction k; simpl in *; auto. apply bi_induction_pred; auto. Qed. End NZDomainProp. (** We now focus on the translation from [nat] into [NZ]. First, relationship with [0], [succ], [pred]. *) Module NZOfNat (Import NZ:NZDomainSig'). Definition ofnat (n : nat) : t := (S^n) 0. Notation "[ n ]" := (ofnat n) (at level 7) : ofnat. Local Open Scope ofnat. Lemma ofnat_zero : [O] == 0. Proof. reflexivity. Qed. Lemma ofnat_succ : forall n, [Datatypes.S n] == succ [n]. Proof. now unfold ofnat. Qed. Lemma ofnat_pred : forall n, n<>O -> [Peano.pred n] == P [n]. Proof. unfold ofnat. destruct n. destruct 1; auto. intros _. simpl. symmetry. apply pred_succ. Qed. (** Since [P 0] can be anything in NZ (either [-1], [0], or even other numbers, we cannot state previous lemma for [n=O]. *) End NZOfNat. (** If we require in addition a strict order on NZ, we can prove that [ofnat] is injective, and hence that NZ is infinite (i.e. we ban Z/nZ models) *) Module NZOfNatOrd (Import NZ:NZOrdSig'). Include NZOfNat NZ. Include NZBaseProp NZ <+ NZOrderProp NZ. Local Open Scope ofnat. Theorem ofnat_S_gt_0 : forall n : nat, 0 < [Datatypes.S n]. Proof. unfold ofnat. intros n; induction n as [| n IH]; simpl in *. apply lt_succ_diag_r. apply lt_trans with (S 0). apply lt_succ_diag_r. now rewrite <- succ_lt_mono. Qed. Theorem ofnat_S_neq_0 : forall n : nat, 0 ~= [Datatypes.S n]. Proof. intros. apply lt_neq, ofnat_S_gt_0. Qed. Lemma ofnat_injective : forall n m, [n]==[m] -> n = m. Proof. induction n as [|n IH]; destruct m; auto. intros H; elim (ofnat_S_neq_0 _ H). intros H; symmetry in H; elim (ofnat_S_neq_0 _ H). intros. f_equal. apply IH. now rewrite <- succ_inj_wd. Qed. Lemma ofnat_eq : forall n m, [n]==[m] <-> n = m. Proof. split. apply ofnat_injective. intros; now subst. Qed. (* In addition, we can prove that [ofnat] preserves order. *) Lemma ofnat_lt : forall n m : nat, [n]<[m] <-> (n (n<=m)%nat. Proof. intros. rewrite lt_eq_cases, ofnat_lt, ofnat_eq. split. destruct 1; subst; auto with arith. apply Lt.le_lt_or_eq. Qed. End NZOfNatOrd. (** For basic operations, we can prove correspondance with their counterpart in [nat]. *) Module NZOfNatOps (Import NZ:NZAxiomsSig'). Include NZOfNat NZ. Local Open Scope ofnat. Lemma ofnat_add_l : forall n m, [n]+m == (S^n) m. Proof. induction n; intros. apply add_0_l. rewrite ofnat_succ, add_succ_l. simpl. now f_equiv. Qed. Lemma ofnat_add : forall n m, [n+m] == [n]+[m]. Proof. intros. rewrite ofnat_add_l. induction n; simpl. reflexivity. rewrite ofnat_succ. now f_equiv. Qed. Lemma ofnat_mul : forall n m, [n*m] == [n]*[m]. Proof. induction n; simpl; intros. symmetry. apply mul_0_l. rewrite plus_comm. rewrite ofnat_succ, ofnat_add, mul_succ_l. now f_equiv. Qed. Lemma ofnat_sub_r : forall n m, n-[m] == (P^m) n. Proof. induction m; simpl; intros. rewrite ofnat_zero. apply sub_0_r. rewrite ofnat_succ, sub_succ_r. now f_equiv. Qed. Lemma ofnat_sub : forall n m, m<=n -> [n-m] == [n]-[m]. Proof. intros n m H. rewrite ofnat_sub_r. revert n H. induction m. intros. rewrite <- minus_n_O. now simpl. intros. destruct n. inversion H. rewrite nat_iter_succ_r. simpl. rewrite ofnat_succ, pred_succ; auto with arith. Qed. End NZOfNatOps. coq-8.4pl2/theories/Numbers/NatInt/NZParity.v0000640000175000001440000001606212010532755020175 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool. Definition Even n := exists m, n == 2*m. Definition Odd n := exists m, n == 2*m+1. Axiom even_spec : forall n, even n = true <-> Even n. Axiom odd_spec : forall n, odd n = true <-> Odd n. End NZParity. Module Type NZParityProp (Import A : NZOrdAxiomsSig') (Import B : NZParity A) (Import C : NZMulOrderProp A). (** Morphisms *) Instance Even_wd : Proper (eq==>iff) Even. Proof. unfold Even. solve_proper. Qed. Instance Odd_wd : Proper (eq==>iff) Odd. Proof. unfold Odd. solve_proper. Qed. Instance even_wd : Proper (eq==>Logic.eq) even. Proof. intros x x' EQ. rewrite eq_iff_eq_true, 2 even_spec. now f_equiv. Qed. Instance odd_wd : Proper (eq==>Logic.eq) odd. Proof. intros x x' EQ. rewrite eq_iff_eq_true, 2 odd_spec. now f_equiv. Qed. (** Evenness and oddity are dual notions *) Lemma Even_or_Odd : forall x, Even x \/ Odd x. Proof. nzinduct x. left. exists 0. now nzsimpl. intros x. split; intros [(y,H)|(y,H)]. right. exists y. rewrite H. now nzsimpl. left. exists (S y). rewrite H. now nzsimpl'. right. assert (LT : exists z, z 2*n < 2*m+1. Proof. intros. nzsimpl'. apply lt_succ_r. now apply add_le_mono. Qed. Lemma double_above : forall n m, n 2*n+1 < 2*m. Proof. intros. nzsimpl'. rewrite <- le_succ_l, <- add_succ_l, <- add_succ_r. apply add_le_mono; now apply le_succ_l. Qed. Lemma Even_Odd_False : forall x, Even x -> Odd x -> False. Proof. intros x (y,E) (z,O). rewrite O in E; clear O. destruct (le_gt_cases y z) as [LE|GT]. generalize (double_below _ _ LE); order. generalize (double_above _ _ GT); order. Qed. Lemma orb_even_odd : forall n, orb (even n) (odd n) = true. Proof. intros. destruct (Even_or_Odd n) as [H|H]. rewrite <- even_spec in H. now rewrite H. rewrite <- odd_spec in H. now rewrite H, orb_true_r. Qed. Lemma negb_odd : forall n, negb (odd n) = even n. Proof. intros. generalize (Even_or_Odd n) (Even_Odd_False n). rewrite <- even_spec, <- odd_spec. destruct (odd n), (even n); simpl; intuition. Qed. Lemma negb_even : forall n, negb (even n) = odd n. Proof. intros. rewrite <- negb_odd. apply negb_involutive. Qed. (** Constants *) Lemma even_0 : even 0 = true. Proof. rewrite even_spec. exists 0. now nzsimpl. Qed. Lemma odd_0 : odd 0 = false. Proof. now rewrite <- negb_even, even_0. Qed. Lemma odd_1 : odd 1 = true. Proof. rewrite odd_spec. exists 0. now nzsimpl'. Qed. Lemma even_1 : even 1 = false. Proof. now rewrite <- negb_odd, odd_1. Qed. Lemma even_2 : even 2 = true. Proof. rewrite even_spec. exists 1. now nzsimpl'. Qed. Lemma odd_2 : odd 2 = false. Proof. now rewrite <- negb_even, even_2. Qed. (** Parity and successor *) Lemma Odd_succ : forall n, Odd (S n) <-> Even n. Proof. split; intros (m,H). exists m. apply succ_inj. now rewrite add_1_r in H. exists m. rewrite add_1_r. now f_equiv. Qed. Lemma odd_succ : forall n, odd (S n) = even n. Proof. intros. apply eq_iff_eq_true. rewrite even_spec, odd_spec. apply Odd_succ. Qed. Lemma even_succ : forall n, even (S n) = odd n. Proof. intros. now rewrite <- negb_odd, odd_succ, negb_even. Qed. Lemma Even_succ : forall n, Even (S n) <-> Odd n. Proof. intros. now rewrite <- even_spec, even_succ, odd_spec. Qed. (** Parity and successor of successor *) Lemma Even_succ_succ : forall n, Even (S (S n)) <-> Even n. Proof. intros. now rewrite Even_succ, Odd_succ. Qed. Lemma Odd_succ_succ : forall n, Odd (S (S n)) <-> Odd n. Proof. intros. now rewrite Odd_succ, Even_succ. Qed. Lemma even_succ_succ : forall n, even (S (S n)) = even n. Proof. intros. now rewrite even_succ, odd_succ. Qed. Lemma odd_succ_succ : forall n, odd (S (S n)) = odd n. Proof. intros. now rewrite odd_succ, even_succ. Qed. (** Parity and addition *) Lemma even_add : forall n m, even (n+m) = Bool.eqb (even n) (even m). Proof. intros. case_eq (even n); case_eq (even m); rewrite <- ?negb_true_iff, ?negb_even, ?odd_spec, ?even_spec; intros (m',Hm) (n',Hn). exists (n'+m'). now rewrite mul_add_distr_l, Hn, Hm. exists (n'+m'). now rewrite mul_add_distr_l, Hn, Hm, add_assoc. exists (n'+m'). now rewrite mul_add_distr_l, Hn, Hm, add_shuffle0. exists (n'+m'+1). rewrite Hm,Hn. nzsimpl'. now rewrite add_shuffle1. Qed. Lemma odd_add : forall n m, odd (n+m) = xorb (odd n) (odd m). Proof. intros. rewrite <- !negb_even. rewrite even_add. now destruct (even n), (even m). Qed. (** Parity and multiplication *) Lemma even_mul : forall n m, even (mul n m) = even n || even m. Proof. intros. case_eq (even n); simpl; rewrite ?even_spec. intros (n',Hn). exists (n'*m). now rewrite Hn, mul_assoc. case_eq (even m); simpl; rewrite ?even_spec. intros (m',Hm). exists (n*m'). now rewrite Hm, !mul_assoc, (mul_comm 2). (* odd / odd *) rewrite <- !negb_true_iff, !negb_even, !odd_spec. intros (m',Hm) (n',Hn). exists (n'*2*m' +n'+m'). rewrite Hn,Hm, !mul_add_distr_l, !mul_add_distr_r, !mul_1_l, !mul_1_r. now rewrite add_shuffle1, add_assoc, !mul_assoc. Qed. Lemma odd_mul : forall n m, odd (mul n m) = odd n && odd m. Proof. intros. rewrite <- !negb_even. rewrite even_mul. now destruct (even n), (even m). Qed. (** A particular case : adding by an even number *) Lemma even_add_even : forall n m, Even m -> even (n+m) = even n. Proof. intros n m Hm. apply even_spec in Hm. rewrite even_add, Hm. now destruct (even n). Qed. Lemma odd_add_even : forall n m, Even m -> odd (n+m) = odd n. Proof. intros n m Hm. apply even_spec in Hm. rewrite odd_add, <- (negb_even m), Hm. now destruct (odd n). Qed. Lemma even_add_mul_even : forall n m p, Even m -> even (n+m*p) = even n. Proof. intros n m p Hm. apply even_spec in Hm. apply even_add_even. apply even_spec. now rewrite even_mul, Hm. Qed. Lemma odd_add_mul_even : forall n m p, Even m -> odd (n+m*p) = odd n. Proof. intros n m p Hm. apply even_spec in Hm. apply odd_add_even. apply even_spec. now rewrite even_mul, Hm. Qed. Lemma even_add_mul_2 : forall n m, even (n+2*m) = even n. Proof. intros. apply even_add_mul_even. apply even_spec, even_2. Qed. Lemma odd_add_mul_2 : forall n m, odd (n+2*m) = odd n. Proof. intros. apply odd_add_mul_even. apply even_spec, even_2. Qed. End NZParityProp.coq-8.4pl2/theories/Numbers/NatInt/NZBase.v0000640000175000001440000000523212010532755017574 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y==x. Proof. intros; split; symmetry; auto. Qed. (* TODO: how register ~= (which is just a notation) as a Symmetric relation, hence allowing "symmetry" tac ? *) Theorem neq_sym : forall n m, n ~= m -> m ~= n. Proof. intros n m H1 H2; symmetry in H2; false_hyp H2 H1. Qed. Theorem eq_stepl : forall x y z, x == y -> x == z -> z == y. Proof. intros x y z H1 H2; now rewrite <- H1. Qed. Declare Left Step eq_stepl. (* The right step lemma is just the transitivity of eq *) Declare Right Step (@Equivalence_Transitive _ _ eq_equiv). Theorem succ_inj : forall n1 n2, S n1 == S n2 -> n1 == n2. Proof. intros n1 n2 H. apply pred_wd in H. now do 2 rewrite pred_succ in H. Qed. (* The following theorem is useful as an equivalence for proving bidirectional induction steps *) Theorem succ_inj_wd : forall n1 n2, S n1 == S n2 <-> n1 == n2. Proof. intros; split. apply succ_inj. intros. now f_equiv. Qed. Theorem succ_inj_wd_neg : forall n m, S n ~= S m <-> n ~= m. Proof. intros; now rewrite succ_inj_wd. Qed. (* We cannot prove that the predecessor is injective, nor that it is left-inverse to the successor at this point *) Section CentralInduction. Variable A : t -> Prop. Hypothesis A_wd : Proper (eq==>iff) A. Theorem central_induction : forall z, A z -> (forall n, A n <-> A (S n)) -> forall n, A n. Proof. intros z Base Step; revert Base; pattern z; apply bi_induction. solve_proper. intro; now apply bi_induction. intro; pose proof (Step n); tauto. Qed. End CentralInduction. Tactic Notation "nzinduct" ident(n) := induction_maker n ltac:(apply bi_induction). Tactic Notation "nzinduct" ident(n) constr(u) := induction_maker n ltac:(apply central_induction with (z := u)). End NZBaseProp. coq-8.4pl2/theories/Numbers/NatInt/NZOrder.v0000640000175000001440000004231312106745424020003 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq==>iff) le. Proof. intros n n' Hn m m' Hm. now rewrite <- !lt_succ_r, Hn, Hm. Qed. Ltac le_elim H := rewrite lt_eq_cases in H; destruct H as [H | H]. Theorem lt_le_incl : forall n m, n < m -> n <= m. Proof. intros. apply lt_eq_cases. now left. Qed. Theorem le_refl : forall n, n <= n. Proof. intro. apply lt_eq_cases. now right. Qed. Theorem lt_succ_diag_r : forall n, n < S n. Proof. intro n. rewrite lt_succ_r. apply le_refl. Qed. Theorem le_succ_diag_r : forall n, n <= S n. Proof. intro; apply lt_le_incl; apply lt_succ_diag_r. Qed. Theorem neq_succ_diag_l : forall n, S n ~= n. Proof. intros n H. apply (lt_irrefl n). rewrite <- H at 2. apply lt_succ_diag_r. Qed. Theorem neq_succ_diag_r : forall n, n ~= S n. Proof. intro n; apply neq_sym, neq_succ_diag_l. Qed. Theorem nlt_succ_diag_l : forall n, ~ S n < n. Proof. intros n H. apply (lt_irrefl (S n)). rewrite lt_succ_r. now apply lt_le_incl. Qed. Theorem nle_succ_diag_l : forall n, ~ S n <= n. Proof. intros n H; le_elim H. false_hyp H nlt_succ_diag_l. false_hyp H neq_succ_diag_l. Qed. Theorem le_succ_l : forall n m, S n <= m <-> n < m. Proof. intro n; nzinduct m n. split; intro H. false_hyp H nle_succ_diag_l. false_hyp H lt_irrefl. intro m. rewrite (lt_eq_cases (S n) (S m)), !lt_succ_r, (lt_eq_cases n m), succ_inj_wd. rewrite or_cancel_r. reflexivity. intros LE EQ; rewrite EQ in LE; false_hyp LE nle_succ_diag_l. intros LT EQ; rewrite EQ in LT; false_hyp LT lt_irrefl. Qed. (** Trichotomy *) Theorem le_gt_cases : forall n m, n <= m \/ n > m. Proof. intros n m; nzinduct n m. left; apply le_refl. intro n. rewrite lt_succ_r, le_succ_l, !lt_eq_cases. intuition. Qed. Theorem lt_trichotomy : forall n m, n < m \/ n == m \/ m < n. Proof. intros n m. generalize (le_gt_cases n m); rewrite lt_eq_cases; tauto. Qed. Notation lt_eq_gt_cases := lt_trichotomy (only parsing). (** Asymmetry and transitivity. *) Theorem lt_asymm : forall n m, n < m -> ~ m < n. Proof. intros n m; nzinduct n m. intros H; false_hyp H lt_irrefl. intro n; split; intros H H1 H2. apply lt_succ_r in H2. le_elim H2. apply H; auto. apply le_succ_l. now apply lt_le_incl. rewrite H2 in H1. false_hyp H1 nlt_succ_diag_l. apply le_succ_l in H1. le_elim H1. apply H; auto. rewrite lt_succ_r. now apply lt_le_incl. rewrite <- H1 in H2. false_hyp H2 nlt_succ_diag_l. Qed. Notation lt_ngt := lt_asymm (only parsing). Theorem lt_trans : forall n m p, n < m -> m < p -> n < p. Proof. intros n m p; nzinduct p m. intros _ H; false_hyp H lt_irrefl. intro p. rewrite 2 lt_succ_r. split; intros H H1 H2. apply lt_le_incl; le_elim H2; [now apply H | now rewrite H2 in H1]. assert (n <= p) as H3 by (auto using lt_le_incl). le_elim H3. assumption. rewrite <- H3 in H2. elim (lt_asymm n m); auto. Qed. Theorem le_trans : forall n m p, n <= m -> m <= p -> n <= p. Proof. intros n m p. rewrite 3 lt_eq_cases. intros [LT|EQ] [LT'|EQ']; try rewrite EQ; try rewrite <- EQ'; generalize (lt_trans n m p); auto with relations. Qed. (** Some type classes about order *) Instance lt_strorder : StrictOrder lt. Proof. split. exact lt_irrefl. exact lt_trans. Qed. Instance le_preorder : PreOrder le. Proof. split. exact le_refl. exact le_trans. Qed. Instance le_partialorder : PartialOrder _ le. Proof. intros x y. compute. split. intro EQ; now rewrite EQ. rewrite 2 lt_eq_cases. intuition. elim (lt_irrefl x). now transitivity y. Qed. (** We know enough now to benefit from the generic [order] tactic. *) Definition lt_compat := lt_wd. Definition lt_total := lt_trichotomy. Definition le_lteq := lt_eq_cases. Module Private_OrderTac. Module IsTotal. Definition eq_equiv := eq_equiv. Definition lt_strorder := lt_strorder. Definition lt_compat := lt_compat. Definition lt_total := lt_total. Definition le_lteq := le_lteq. End IsTotal. Module Tac := !MakeOrderTac NZ IsTotal. End Private_OrderTac. Ltac order := Private_OrderTac.Tac.order. (** Some direct consequences of [order]. *) Theorem lt_neq : forall n m, n < m -> n ~= m. Proof. order. Qed. Theorem le_neq : forall n m, n < m <-> n <= m /\ n ~= m. Proof. intuition order. Qed. Theorem eq_le_incl : forall n m, n == m -> n <= m. Proof. order. Qed. Lemma lt_stepl : forall x y z, x < y -> x == z -> z < y. Proof. order. Qed. Lemma lt_stepr : forall x y z, x < y -> y == z -> x < z. Proof. order. Qed. Lemma le_stepl : forall x y z, x <= y -> x == z -> z <= y. Proof. order. Qed. Lemma le_stepr : forall x y z, x <= y -> y == z -> x <= z. Proof. order. Qed. Declare Left Step lt_stepl. Declare Right Step lt_stepr. Declare Left Step le_stepl. Declare Right Step le_stepr. Theorem le_lt_trans : forall n m p, n <= m -> m < p -> n < p. Proof. order. Qed. Theorem lt_le_trans : forall n m p, n < m -> m <= p -> n < p. Proof. order. Qed. Theorem le_antisymm : forall n m, n <= m -> m <= n -> n == m. Proof. order. Qed. (** More properties of [<] and [<=] with respect to [S] and [0]. *) Theorem le_succ_r : forall n m, n <= S m <-> n <= m \/ n == S m. Proof. intros n m; rewrite lt_eq_cases. now rewrite lt_succ_r. Qed. Theorem lt_succ_l : forall n m, S n < m -> n < m. Proof. intros n m H; apply le_succ_l; order. Qed. Theorem le_le_succ_r : forall n m, n <= m -> n <= S m. Proof. intros n m LE. apply lt_succ_r in LE. order. Qed. Theorem lt_lt_succ_r : forall n m, n < m -> n < S m. Proof. intros. rewrite lt_succ_r. order. Qed. Theorem succ_lt_mono : forall n m, n < m <-> S n < S m. Proof. intros n m. rewrite <- le_succ_l. symmetry. apply lt_succ_r. Qed. Theorem succ_le_mono : forall n m, n <= m <-> S n <= S m. Proof. intros n m. now rewrite 2 lt_eq_cases, <- succ_lt_mono, succ_inj_wd. Qed. Theorem lt_0_1 : 0 < 1. Proof. rewrite one_succ. apply lt_succ_diag_r. Qed. Theorem le_0_1 : 0 <= 1. Proof. apply lt_le_incl, lt_0_1. Qed. Theorem lt_1_2 : 1 < 2. Proof. rewrite two_succ. apply lt_succ_diag_r. Qed. Theorem lt_0_2 : 0 < 2. Proof. transitivity 1. apply lt_0_1. apply lt_1_2. Qed. Theorem le_0_2 : 0 <= 2. Proof. apply lt_le_incl, lt_0_2. Qed. (** The order tactic enriched with some knowledge of 0,1,2 *) Ltac order' := generalize lt_0_1 lt_1_2; order. Theorem lt_1_l : forall n m, 0 < n -> n < m -> 1 < m. Proof. intros n m H1 H2. rewrite <- le_succ_l, <- one_succ in H1. order. Qed. (** More Trichotomy, decidability and double negation elimination. *) (** The following theorem is cleary redundant, but helps not to remember whether one has to say le_gt_cases or lt_ge_cases *) Theorem lt_ge_cases : forall n m, n < m \/ n >= m. Proof. intros n m; destruct (le_gt_cases m n); intuition order. Qed. Theorem le_ge_cases : forall n m, n <= m \/ n >= m. Proof. intros n m; destruct (le_gt_cases n m); intuition order. Qed. Theorem lt_gt_cases : forall n m, n ~= m <-> n < m \/ n > m. Proof. intros n m; destruct (lt_trichotomy n m); intuition order. Qed. (** Decidability of equality, even though true in each finite ring, does not have a uniform proof. Otherwise, the proof for two fixed numbers would reduce to a normal form that will say if the numbers are equal or not, which cannot be true in all finite rings. Therefore, we prove decidability in the presence of order. *) Theorem eq_decidable : forall n m, decidable (n == m). Proof. intros n m; destruct (lt_trichotomy n m) as [ | [ | ]]; (right; order) || (left; order). Qed. (** DNE stands for double-negation elimination *) Theorem eq_dne : forall n m, ~ ~ n == m <-> n == m. Proof. intros n m; split; intro H. destruct (eq_decidable n m) as [H1 | H1]. assumption. false_hyp H1 H. intro H1; now apply H1. Qed. Theorem le_ngt : forall n m, n <= m <-> ~ n > m. Proof. intuition order. Qed. (** Redundant but useful *) Theorem nlt_ge : forall n m, ~ n < m <-> n >= m. Proof. intuition order. Qed. Theorem lt_decidable : forall n m, decidable (n < m). Proof. intros n m; destruct (le_gt_cases m n); [right|left]; order. Qed. Theorem lt_dne : forall n m, ~ ~ n < m <-> n < m. Proof. intros n m; split; intro H. destruct (lt_decidable n m) as [H1 | H1]; [assumption | false_hyp H1 H]. intro H1; false_hyp H H1. Qed. Theorem nle_gt : forall n m, ~ n <= m <-> n > m. Proof. intuition order. Qed. (** Redundant but useful *) Theorem lt_nge : forall n m, n < m <-> ~ n >= m. Proof. intuition order. Qed. Theorem le_decidable : forall n m, decidable (n <= m). Proof. intros n m; destruct (le_gt_cases n m); [left|right]; order. Qed. Theorem le_dne : forall n m, ~ ~ n <= m <-> n <= m. Proof. intros n m; split; intro H. destruct (le_decidable n m) as [H1 | H1]; [assumption | false_hyp H1 H]. intro H1; false_hyp H H1. Qed. Theorem nlt_succ_r : forall n m, ~ m < S n <-> n < m. Proof. intros n m; rewrite lt_succ_r. intuition order. Qed. (** The difference between integers and natural numbers is that for every integer there is a predecessor, which is not true for natural numbers. However, for both classes, every number that is bigger than some other number has a predecessor. The proof of this fact by regular induction does not go through, so we need to use strong (course-of-value) induction. *) Lemma lt_exists_pred_strong : forall z n m, z < m -> m <= n -> exists k, m == S k /\ z <= k. Proof. intro z; nzinduct n z. order. intro n; split; intros IH m H1 H2. apply le_succ_r in H2. destruct H2 as [H2 | H2]. now apply IH. exists n. now split; [| rewrite <- lt_succ_r; rewrite <- H2]. apply IH. assumption. now apply le_le_succ_r. Qed. Theorem lt_exists_pred : forall z n, z < n -> exists k, n == S k /\ z <= k. Proof. intros z n H; apply lt_exists_pred_strong with (z := z) (n := n). assumption. apply le_refl. Qed. Lemma lt_succ_pred : forall z n, z < n -> S (P n) == n. Proof. intros z n H. destruct (lt_exists_pred _ _ H) as (n' & EQ & LE). rewrite EQ. now rewrite pred_succ. Qed. (** Stronger variant of induction with assumptions n >= 0 (n < 0) in the induction step *) Section Induction. Variable A : t -> Prop. Hypothesis A_wd : Proper (eq==>iff) A. Section Center. Variable z : t. (* A z is the basis of induction *) Section RightInduction. Let A' (n : t) := forall m, z <= m -> m < n -> A m. Let right_step := forall n, z <= n -> A n -> A (S n). Let right_step' := forall n, z <= n -> A' n -> A n. Let right_step'' := forall n, A' n <-> A' (S n). Lemma rs_rs' : A z -> right_step -> right_step'. Proof. intros Az RS n H1 H2. le_elim H1. apply lt_exists_pred in H1. destruct H1 as [k [H3 H4]]. rewrite H3. apply RS; trivial. apply H2; trivial. rewrite H3; apply lt_succ_diag_r. rewrite <- H1; apply Az. Qed. Lemma rs'_rs'' : right_step' -> right_step''. Proof. intros RS' n; split; intros H1 m H2 H3. apply lt_succ_r in H3; le_elim H3; [now apply H1 | rewrite H3 in *; now apply RS']. apply H1; [assumption | now apply lt_lt_succ_r]. Qed. Lemma rbase : A' z. Proof. intros m H1 H2. apply le_ngt in H1. false_hyp H2 H1. Qed. Lemma A'A_right : (forall n, A' n) -> forall n, z <= n -> A n. Proof. intros H1 n H2. apply H1 with (n := S n); [assumption | apply lt_succ_diag_r]. Qed. Theorem strong_right_induction: right_step' -> forall n, z <= n -> A n. Proof. intro RS'; apply A'A_right; unfold A'; nzinduct n z; [apply rbase | apply rs'_rs''; apply RS']. Qed. Theorem right_induction : A z -> right_step -> forall n, z <= n -> A n. Proof. intros Az RS; apply strong_right_induction; now apply rs_rs'. Qed. Theorem right_induction' : (forall n, n <= z -> A n) -> right_step -> forall n, A n. Proof. intros L R n. destruct (lt_trichotomy n z) as [H | [H | H]]. apply L; now apply lt_le_incl. apply L; now apply eq_le_incl. apply right_induction. apply L; now apply eq_le_incl. assumption. now apply lt_le_incl. Qed. Theorem strong_right_induction' : (forall n, n <= z -> A n) -> right_step' -> forall n, A n. Proof. intros L R n. destruct (lt_trichotomy n z) as [H | [H | H]]. apply L; now apply lt_le_incl. apply L; now apply eq_le_incl. apply strong_right_induction. assumption. now apply lt_le_incl. Qed. End RightInduction. Section LeftInduction. Let A' (n : t) := forall m, m <= z -> n <= m -> A m. Let left_step := forall n, n < z -> A (S n) -> A n. Let left_step' := forall n, n <= z -> A' (S n) -> A n. Let left_step'' := forall n, A' n <-> A' (S n). Lemma ls_ls' : A z -> left_step -> left_step'. Proof. intros Az LS n H1 H2. le_elim H1. apply LS; trivial. apply H2; [now apply le_succ_l | now apply eq_le_incl]. rewrite H1; apply Az. Qed. Lemma ls'_ls'' : left_step' -> left_step''. Proof. intros LS' n; split; intros H1 m H2 H3. apply le_succ_l in H3. apply lt_le_incl in H3. now apply H1. le_elim H3. apply le_succ_l in H3. now apply H1. rewrite <- H3 in *; now apply LS'. Qed. Lemma lbase : A' (S z). Proof. intros m H1 H2. apply le_succ_l in H2. apply le_ngt in H1. false_hyp H2 H1. Qed. Lemma A'A_left : (forall n, A' n) -> forall n, n <= z -> A n. Proof. intros H1 n H2. apply (H1 n); [assumption | now apply eq_le_incl]. Qed. Theorem strong_left_induction: left_step' -> forall n, n <= z -> A n. Proof. intro LS'; apply A'A_left; unfold A'; nzinduct n (S z); [apply lbase | apply ls'_ls''; apply LS']. Qed. Theorem left_induction : A z -> left_step -> forall n, n <= z -> A n. Proof. intros Az LS; apply strong_left_induction; now apply ls_ls'. Qed. Theorem left_induction' : (forall n, z <= n -> A n) -> left_step -> forall n, A n. Proof. intros R L n. destruct (lt_trichotomy n z) as [H | [H | H]]. apply left_induction. apply R. now apply eq_le_incl. assumption. now apply lt_le_incl. rewrite H; apply R; now apply eq_le_incl. apply R; now apply lt_le_incl. Qed. Theorem strong_left_induction' : (forall n, z <= n -> A n) -> left_step' -> forall n, A n. Proof. intros R L n. destruct (lt_trichotomy n z) as [H | [H | H]]. apply strong_left_induction; auto. now apply lt_le_incl. rewrite H; apply R; now apply eq_le_incl. apply R; now apply lt_le_incl. Qed. End LeftInduction. Theorem order_induction : A z -> (forall n, z <= n -> A n -> A (S n)) -> (forall n, n < z -> A (S n) -> A n) -> forall n, A n. Proof. intros Az RS LS n. destruct (lt_trichotomy n z) as [H | [H | H]]. now apply left_induction; [| | apply lt_le_incl]. now rewrite H. now apply right_induction; [| | apply lt_le_incl]. Qed. Theorem order_induction' : A z -> (forall n, z <= n -> A n -> A (S n)) -> (forall n, n <= z -> A n -> A (P n)) -> forall n, A n. Proof. intros Az AS AP n; apply order_induction; try assumption. intros m H1 H2. apply AP in H2; [|now apply le_succ_l]. now rewrite pred_succ in H2. Qed. End Center. Theorem order_induction_0 : A 0 -> (forall n, 0 <= n -> A n -> A (S n)) -> (forall n, n < 0 -> A (S n) -> A n) -> forall n, A n. Proof (order_induction 0). Theorem order_induction'_0 : A 0 -> (forall n, 0 <= n -> A n -> A (S n)) -> (forall n, n <= 0 -> A n -> A (P n)) -> forall n, A n. Proof (order_induction' 0). (** Elimintation principle for < *) Theorem lt_ind : forall (n : t), A (S n) -> (forall m, n < m -> A m -> A (S m)) -> forall m, n < m -> A m. Proof. intros n H1 H2 m H3. apply right_induction with (S n); [assumption | | now apply le_succ_l]. intros; apply H2; try assumption. now apply le_succ_l. Qed. (** Elimination principle for <= *) Theorem le_ind : forall (n : t), A n -> (forall m, n <= m -> A m -> A (S m)) -> forall m, n <= m -> A m. Proof. intros n H1 H2 m H3. now apply right_induction with n. Qed. End Induction. Tactic Notation "nzord_induct" ident(n) := induction_maker n ltac:(apply order_induction_0). Tactic Notation "nzord_induct" ident(n) constr(z) := induction_maker n ltac:(apply order_induction with z). Section WF. Variable z : t. Let Rlt (n m : t) := z <= n < m. Let Rgt (n m : t) := m < n <= z. Instance Rlt_wd : Proper (eq ==> eq ==> iff) Rlt. Proof. intros x1 x2 H1 x3 x4 H2; unfold Rlt. rewrite H1; now rewrite H2. Qed. Instance Rgt_wd : Proper (eq ==> eq ==> iff) Rgt. Proof. intros x1 x2 H1 x3 x4 H2; unfold Rgt; rewrite H1; now rewrite H2. Qed. Theorem lt_wf : well_founded Rlt. Proof. unfold well_founded. apply strong_right_induction' with (z := z). auto with typeclass_instances. intros n H; constructor; intros y [H1 H2]. apply nle_gt in H2. elim H2. now apply le_trans with z. intros n H1 H2; constructor; intros m [H3 H4]. now apply H2. Qed. Theorem gt_wf : well_founded Rgt. Proof. unfold well_founded. apply strong_left_induction' with (z := z). auto with typeclass_instances. intros n H; constructor; intros y [H1 H2]. apply nle_gt in H2. elim H2. now apply le_lt_trans with n. intros n H1 H2; constructor; intros m [H3 H4]. apply H2. assumption. now apply le_succ_l. Qed. End WF. End NZOrderProp. (** If we have moreover a [compare] function, we can build an [OrderedType] structure. *) Module NZOrderedType (NZ : NZDecOrdSig') <: DecidableTypeFull <: OrderedTypeFull := NZ <+ NZBaseProp <+ NZOrderProp NZ <+ Compare2EqBool <+ HasEqBool2Dec. coq-8.4pl2/theories/Numbers/NatInt/NZLog.v0000640000175000001440000006704112010532755017451 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t. End Log2. Module Type NZLog2Spec (A : NZOrdAxiomsSig')(B : Pow' A)(C : Log2 A). Import A B C. Axiom log2_spec : forall a, 0 2^(log2 a) <= a < 2^(S (log2 a)). Axiom log2_nonpos : forall a, a<=0 -> log2 a == 0. End NZLog2Spec. Module Type NZLog2 (A : NZOrdAxiomsSig)(B : Pow A) := Log2 A <+ NZLog2Spec A B. (** Derived properties of logarithm *) Module Type NZLog2Prop (Import A : NZOrdAxiomsSig') (Import B : NZPow' A) (Import C : NZLog2 A B) (Import D : NZMulOrderProp A) (Import E : NZPowProp A B D). (** log2 is always non-negative *) Lemma log2_nonneg : forall a, 0 <= log2 a. Proof. intros a. destruct (le_gt_cases a 0) as [Ha|Ha]. now rewrite log2_nonpos. destruct (log2_spec a Ha) as (_,LT). apply lt_succ_r, (pow_gt_1 2). order'. rewrite <- le_succ_l, <- one_succ in Ha. order. Qed. (** A tactic for proving positivity and non-negativity *) Ltac order_pos := ((apply add_pos_pos || apply add_nonneg_nonneg || apply mul_pos_pos || apply mul_nonneg_nonneg || apply pow_nonneg || apply pow_pos_nonneg || apply log2_nonneg || apply (le_le_succ_r 0)); order_pos) (* in case of success of an apply, we recurse *) || order'. (* otherwise *) (** The spec of log2 indeed determines it *) Lemma log2_unique : forall a b, 0<=b -> 2^b<=a<2^(S b) -> log2 a == b. Proof. intros a b Hb (LEb,LTb). assert (Ha : 0 < a). apply lt_le_trans with (2^b); trivial. apply pow_pos_nonneg; order'. assert (Hc := log2_nonneg a). destruct (log2_spec a Ha) as (LEc,LTc). assert (log2 a <= b). apply lt_succ_r, (pow_lt_mono_r_iff 2); try order'. now apply le_le_succ_r. assert (b <= log2 a). apply lt_succ_r, (pow_lt_mono_r_iff 2); try order'. now apply le_le_succ_r. order. Qed. (** Hence log2 is a morphism. *) Instance log2_wd : Proper (eq==>eq) log2. Proof. intros x x' Hx. destruct (le_gt_cases x 0). rewrite 2 log2_nonpos; trivial. reflexivity. now rewrite <- Hx. apply log2_unique. apply log2_nonneg. rewrite Hx in *. now apply log2_spec. Qed. (** An alternate specification *) Lemma log2_spec_alt : forall a, 0 exists r, a == 2^(log2 a) + r /\ 0 <= r < 2^(log2 a). Proof. intros a Ha. destruct (log2_spec _ Ha) as (LE,LT). destruct (le_exists_sub _ _ LE) as (r & Hr & Hr'). exists r. split. now rewrite add_comm. split. trivial. apply (add_lt_mono_r _ _ (2^log2 a)). rewrite <- Hr. generalize LT. rewrite pow_succ_r by order_pos. rewrite two_succ at 1. now nzsimpl. Qed. Lemma log2_unique' : forall a b c, 0<=b -> 0<=c<2^b -> a == 2^b + c -> log2 a == b. Proof. intros a b c Hb (Hc,H) EQ. apply log2_unique. trivial. rewrite EQ. split. rewrite <- add_0_r at 1. now apply add_le_mono_l. rewrite pow_succ_r by order. rewrite two_succ at 2. nzsimpl. now apply add_lt_mono_l. Qed. (** log2 is exact on powers of 2 *) Lemma log2_pow2 : forall a, 0<=a -> log2 (2^a) == a. Proof. intros a Ha. apply log2_unique' with 0; trivial. split; order_pos. now nzsimpl. Qed. (** log2 and predecessors of powers of 2 *) Lemma log2_pred_pow2 : forall a, 0 log2 (P (2^a)) == P a. Proof. intros a Ha. assert (Ha' : S (P a) == a) by (now rewrite lt_succ_pred with 0). apply log2_unique. apply lt_succ_r; order. rewrite <-le_succ_l, <-lt_succ_r, Ha'. rewrite lt_succ_pred with 0. split; try easy. apply pow_lt_mono_r_iff; try order'. rewrite succ_lt_mono, Ha'. apply lt_succ_diag_r. apply pow_pos_nonneg; order'. Qed. (** log2 and basic constants *) Lemma log2_1 : log2 1 == 0. Proof. rewrite <- (pow_0_r 2). now apply log2_pow2. Qed. Lemma log2_2 : log2 2 == 1. Proof. rewrite <- (pow_1_r 2). apply log2_pow2; order'. Qed. (** log2 n is strictly positive for 1 0 < log2 a. Proof. intros a Ha. assert (Ha' : 0 < a) by order'. assert (H := log2_nonneg a). le_elim H; trivial. generalize (log2_spec a Ha'). rewrite <- H in *. nzsimpl; try order. intros (_,H'). rewrite two_succ in H'. apply lt_succ_r in H'; order. Qed. (** Said otherwise, log2 is null only below 1 *) Lemma log2_null : forall a, log2 a == 0 <-> a <= 1. Proof. intros a. split; intros H. destruct (le_gt_cases a 1) as [Ha|Ha]; trivial. generalize (log2_pos a Ha); order. le_elim H. apply log2_nonpos. apply lt_succ_r. now rewrite <- one_succ. rewrite H. apply log2_1. Qed. (** log2 is a monotone function (but not a strict one) *) Lemma log2_le_mono : forall a b, a<=b -> log2 a <= log2 b. Proof. intros a b H. destruct (le_gt_cases a 0) as [Ha|Ha]. rewrite log2_nonpos; order_pos. assert (Hb : 0 < b) by order. destruct (log2_spec a Ha) as (LEa,_). destruct (log2_spec b Hb) as (_,LTb). apply lt_succ_r, (pow_lt_mono_r_iff 2); order_pos. Qed. (** No reverse result for <=, consider for instance log2 3 <= log2 2 *) Lemma log2_lt_cancel : forall a b, log2 a < log2 b -> a < b. Proof. intros a b H. destruct (le_gt_cases b 0) as [Hb|Hb]. rewrite (log2_nonpos b) in H; trivial. generalize (log2_nonneg a); order. destruct (le_gt_cases a 0) as [Ha|Ha]. order. destruct (log2_spec a Ha) as (_,LTa). destruct (log2_spec b Hb) as (LEb,_). apply le_succ_l in H. apply (pow_le_mono_r_iff 2) in H; order_pos. Qed. (** When left side is a power of 2, we have an equivalence for <= *) Lemma log2_le_pow2 : forall a b, 0 (2^b<=a <-> b <= log2 a). Proof. intros a b Ha. split; intros H. destruct (lt_ge_cases b 0) as [Hb|Hb]. generalize (log2_nonneg a); order. rewrite <- (log2_pow2 b); trivial. now apply log2_le_mono. transitivity (2^(log2 a)). apply pow_le_mono_r; order'. now destruct (log2_spec a Ha). Qed. (** When right side is a square, we have an equivalence for < *) Lemma log2_lt_pow2 : forall a b, 0 (a<2^b <-> log2 a < b). Proof. intros a b Ha. split; intros H. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite pow_neg_r in H; order. apply (pow_lt_mono_r_iff 2); try order_pos. apply le_lt_trans with a; trivial. now destruct (log2_spec a Ha). destruct (lt_ge_cases b 0) as [Hb|Hb]. generalize (log2_nonneg a); order. apply log2_lt_cancel; try order. now rewrite log2_pow2. Qed. (** Comparing log2 and identity *) Lemma log2_lt_lin : forall a, 0 log2 a < a. Proof. intros a Ha. apply (pow_lt_mono_r_iff 2); try order_pos. apply le_lt_trans with a. now destruct (log2_spec a Ha). apply pow_gt_lin_r; order'. Qed. Lemma log2_le_lin : forall a, 0<=a -> log2 a <= a. Proof. intros a Ha. le_elim Ha. now apply lt_le_incl, log2_lt_lin. rewrite <- Ha, log2_nonpos; order. Qed. (** Log2 and multiplication. *) (** Due to rounding error, we don't have the usual [log2 (a*b) = log2 a + log2 b] but we may be off by 1 at most *) Lemma log2_mul_below : forall a b, 0 0 log2 a + log2 b <= log2 (a*b). Proof. intros a b Ha Hb. apply log2_le_pow2; try order_pos. rewrite pow_add_r by order_pos. apply mul_le_mono_nonneg; try apply log2_spec; order_pos. Qed. Lemma log2_mul_above : forall a b, 0<=a -> 0<=b -> log2 (a*b) <= log2 a + log2 b + 1. Proof. intros a b Ha Hb. le_elim Ha. le_elim Hb. apply lt_succ_r. rewrite add_1_r, <- add_succ_r, <- add_succ_l. apply log2_lt_pow2; try order_pos. rewrite pow_add_r by order_pos. apply mul_lt_mono_nonneg; try order; now apply log2_spec. rewrite <- Hb. nzsimpl. rewrite log2_nonpos; order_pos. rewrite <- Ha. nzsimpl. rewrite log2_nonpos; order_pos. Qed. (** And we can't find better approximations in general. - The lower bound is exact for powers of 2. - Concerning the upper bound, for any c>1, take a=b=2^c-1, then log2 (a*b) = c+c -1 while (log2 a) = (log2 b) = c-1 *) (** At least, we get back the usual equation when we multiply by 2 (or 2^k) *) Lemma log2_mul_pow2 : forall a b, 0 0<=b -> log2 (a*2^b) == b + log2 a. Proof. intros a b Ha Hb. apply log2_unique; try order_pos. split. rewrite pow_add_r, mul_comm; try order_pos. apply mul_le_mono_nonneg_r. order_pos. now apply log2_spec. rewrite <-add_succ_r, pow_add_r, mul_comm; try order_pos. apply mul_lt_mono_pos_l. order_pos. now apply log2_spec. Qed. Lemma log2_double : forall a, 0 log2 (2*a) == S (log2 a). Proof. intros a Ha. generalize (log2_mul_pow2 a 1 Ha le_0_1). now nzsimpl'. Qed. (** Two numbers with same log2 cannot be far away. *) Lemma log2_same : forall a b, 0 0 log2 a == log2 b -> a < 2*b. Proof. intros a b Ha Hb H. apply log2_lt_cancel. rewrite log2_double, H by trivial. apply lt_succ_diag_r. Qed. (** Log2 and successor : - the log2 function climbs by at most 1 at a time - otherwise it stays at the same value - the +1 steps occur for powers of two *) Lemma log2_succ_le : forall a, log2 (S a) <= S (log2 a). Proof. intros a. destruct (lt_trichotomy 0 a) as [LT|[EQ|LT]]. apply (pow_le_mono_r_iff 2); try order_pos. transitivity (S a). apply log2_spec. apply lt_succ_r; order. now apply le_succ_l, log2_spec. rewrite <- EQ, <- one_succ, log2_1; order_pos. rewrite 2 log2_nonpos. order_pos. order'. now rewrite le_succ_l. Qed. Lemma log2_succ_or : forall a, log2 (S a) == S (log2 a) \/ log2 (S a) == log2 a. Proof. intros. destruct (le_gt_cases (log2 (S a)) (log2 a)) as [H|H]. right. generalize (log2_le_mono _ _ (le_succ_diag_r a)); order. left. apply le_succ_l in H. generalize (log2_succ_le a); order. Qed. Lemma log2_eq_succ_is_pow2 : forall a, log2 (S a) == S (log2 a) -> exists b, S a == 2^b. Proof. intros a H. destruct (le_gt_cases a 0) as [Ha|Ha]. rewrite 2 (proj2 (log2_null _)) in H. generalize (lt_succ_diag_r 0); order. order'. apply le_succ_l. order'. assert (Ha' : 0 < S a) by (apply lt_succ_r; order). exists (log2 (S a)). generalize (proj1 (log2_spec (S a) Ha')) (proj2 (log2_spec a Ha)). rewrite <- le_succ_l, <- H. order. Qed. Lemma log2_eq_succ_iff_pow2 : forall a, 0 (log2 (S a) == S (log2 a) <-> exists b, S a == 2^b). Proof. intros a Ha. split. apply log2_eq_succ_is_pow2. intros (b,Hb). assert (Hb' : 0 < b). apply (pow_gt_1 2); try order'; now rewrite <- Hb, one_succ, <- succ_lt_mono. rewrite Hb, log2_pow2; try order'. setoid_replace a with (P (2^b)). rewrite log2_pred_pow2; trivial. symmetry; now apply lt_succ_pred with 0. apply succ_inj. rewrite Hb. symmetry. apply lt_succ_pred with 0. rewrite <- Hb, lt_succ_r; order. Qed. Lemma log2_succ_double : forall a, 0 log2 (2*a+1) == S (log2 a). Proof. intros a Ha. rewrite add_1_r. destruct (log2_succ_or (2*a)) as [H|H]; [exfalso|now rewrite H, log2_double]. apply log2_eq_succ_is_pow2 in H. destruct H as (b,H). destruct (lt_trichotomy b 0) as [LT|[EQ|LT]]. rewrite pow_neg_r in H; trivial. apply (mul_pos_pos 2), succ_lt_mono in Ha; try order'. rewrite <- one_succ in Ha. order'. rewrite EQ, pow_0_r in H. apply (mul_pos_pos 2), succ_lt_mono in Ha; try order'. rewrite <- one_succ in Ha. order'. assert (EQ:=lt_succ_pred 0 b LT). rewrite <- EQ, pow_succ_r in H; [|now rewrite <- lt_succ_r, EQ]. destruct (lt_ge_cases a (2^(P b))) as [LT'|LE']. generalize (mul_2_mono_l _ _ LT'). rewrite add_1_l. order. rewrite (mul_le_mono_pos_l _ _ 2) in LE'; try order'. rewrite <- H in LE'. apply le_succ_l in LE'. order. Qed. (** Log2 and addition *) Lemma log2_add_le : forall a b, a~=1 -> b~=1 -> log2 (a+b) <= log2 a + log2 b. Proof. intros a b Ha Hb. destruct (lt_trichotomy a 1) as [Ha'|[Ha'|Ha']]; [|order|]. rewrite one_succ, lt_succ_r in Ha'. rewrite (log2_nonpos a); trivial. nzsimpl. apply log2_le_mono. rewrite <- (add_0_l b) at 2. now apply add_le_mono. destruct (lt_trichotomy b 1) as [Hb'|[Hb'|Hb']]; [|order|]. rewrite one_succ, lt_succ_r in Hb'. rewrite (log2_nonpos b); trivial. nzsimpl. apply log2_le_mono. rewrite <- (add_0_r a) at 2. now apply add_le_mono. clear Ha Hb. apply lt_succ_r. apply log2_lt_pow2; try order_pos. rewrite pow_succ_r by order_pos. rewrite two_succ, one_succ at 1. nzsimpl. apply add_lt_mono. apply lt_le_trans with (2^(S (log2 a))). apply log2_spec; order'. apply pow_le_mono_r. order'. rewrite <- add_1_r. apply add_le_mono_l. rewrite one_succ; now apply le_succ_l, log2_pos. apply lt_le_trans with (2^(S (log2 b))). apply log2_spec; order'. apply pow_le_mono_r. order'. rewrite <- add_1_l. apply add_le_mono_r. rewrite one_succ; now apply le_succ_l, log2_pos. Qed. (** The sum of two log2 is less than twice the log2 of the sum. The large inequality is obvious thanks to monotonicity. The strict one requires some more work. This is almost a convexity inequality for points [2a], [2b] and their middle [a+b] : ideally, we would have [2*log(a+b) >= log(2a)+log(2b) = 2+log a+log b]. Here, we cannot do better: consider for instance a=2 b=4, then 1+2<2*2 *) Lemma add_log2_lt : forall a b, 0 0 log2 a + log2 b < 2 * log2 (a+b). Proof. intros a b Ha Hb. nzsimpl'. assert (H : log2 a <= log2 (a+b)). apply log2_le_mono. rewrite <- (add_0_r a) at 1. apply add_le_mono; order. assert (H' : log2 b <= log2 (a+b)). apply log2_le_mono. rewrite <- (add_0_l b) at 1. apply add_le_mono; order. le_elim H. apply lt_le_trans with (log2 (a+b) + log2 b). now apply add_lt_mono_r. now apply add_le_mono_l. rewrite <- H at 1. apply add_lt_mono_l. le_elim H'; trivial. symmetry in H. apply log2_same in H; try order_pos. symmetry in H'. apply log2_same in H'; try order_pos. revert H H'. nzsimpl'. rewrite <- add_lt_mono_l, <- add_lt_mono_r; order. Qed. End NZLog2Prop. Module NZLog2UpProp (Import A : NZDecOrdAxiomsSig') (Import B : NZPow' A) (Import C : NZLog2 A B) (Import D : NZMulOrderProp A) (Import E : NZPowProp A B D) (Import F : NZLog2Prop A B C D E). (** * [log2_up] : a binary logarithm that rounds up instead of down *) (** For once, we define instead of axiomatizing, thanks to log2 *) Definition log2_up a := match compare 1 a with | Lt => S (log2 (P a)) | _ => 0 end. Lemma log2_up_eqn0 : forall a, a<=1 -> log2_up a == 0. Proof. intros a Ha. unfold log2_up. case compare_spec; try order. Qed. Lemma log2_up_eqn : forall a, 1 log2_up a == S (log2 (P a)). Proof. intros a Ha. unfold log2_up. case compare_spec; try order. Qed. Lemma log2_up_spec : forall a, 1 2^(P (log2_up a)) < a <= 2^(log2_up a). Proof. intros a Ha. rewrite log2_up_eqn; trivial. rewrite pred_succ. rewrite <- (lt_succ_pred 1 a Ha) at 2 3. rewrite lt_succ_r, le_succ_l. apply log2_spec. apply succ_lt_mono. now rewrite (lt_succ_pred 1 a Ha), <- one_succ. Qed. Lemma log2_up_nonpos : forall a, a<=0 -> log2_up a == 0. Proof. intros. apply log2_up_eqn0. order'. Qed. Instance log2_up_wd : Proper (eq==>eq) log2_up. Proof. assert (Proper (eq==>eq==>Logic.eq) compare). repeat red; intros; do 2 case compare_spec; trivial; order. intros a a' Ha. unfold log2_up. rewrite Ha at 1. case compare; now rewrite ?Ha. Qed. (** [log2_up] is always non-negative *) Lemma log2_up_nonneg : forall a, 0 <= log2_up a. Proof. intros a. unfold log2_up. case compare_spec; try order. intros. apply le_le_succ_r, log2_nonneg. Qed. (** The spec of [log2_up] indeed determines it *) Lemma log2_up_unique : forall a b, 0 2^(P b) log2_up a == b. Proof. intros a b Hb (LEb,LTb). assert (Ha : 1 < a). apply le_lt_trans with (2^(P b)); trivial. rewrite one_succ. apply le_succ_l. apply pow_pos_nonneg. order'. apply lt_succ_r. now rewrite (lt_succ_pred 0 b Hb). assert (Hc := log2_up_nonneg a). destruct (log2_up_spec a Ha) as (LTc,LEc). assert (b <= log2_up a). apply lt_succ_r. rewrite <- (lt_succ_pred 0 b Hb). rewrite <- succ_lt_mono. apply (pow_lt_mono_r_iff 2); try order'. assert (Hc' : 0 < log2_up a) by order. assert (log2_up a <= b). apply lt_succ_r. rewrite <- (lt_succ_pred 0 _ Hc'). rewrite <- succ_lt_mono. apply (pow_lt_mono_r_iff 2); try order'. order. Qed. (** [log2_up] is exact on powers of 2 *) Lemma log2_up_pow2 : forall a, 0<=a -> log2_up (2^a) == a. Proof. intros a Ha. le_elim Ha. apply log2_up_unique; trivial. split; try order. apply pow_lt_mono_r; try order'. rewrite <- (lt_succ_pred 0 a Ha) at 2. now apply lt_succ_r. now rewrite <- Ha, pow_0_r, log2_up_eqn0. Qed. (** [log2_up] and successors of powers of 2 *) Lemma log2_up_succ_pow2 : forall a, 0<=a -> log2_up (S (2^a)) == S a. Proof. intros a Ha. rewrite log2_up_eqn, pred_succ, log2_pow2; try easy. rewrite one_succ, <- succ_lt_mono. apply pow_pos_nonneg; order'. Qed. (** Basic constants *) Lemma log2_up_1 : log2_up 1 == 0. Proof. now apply log2_up_eqn0. Qed. Lemma log2_up_2 : log2_up 2 == 1. Proof. rewrite <- (pow_1_r 2). apply log2_up_pow2; order'. Qed. (** Links between log2 and [log2_up] *) Lemma le_log2_log2_up : forall a, log2 a <= log2_up a. Proof. intros a. unfold log2_up. case compare_spec; intros H. rewrite <- H, log2_1. order. rewrite <- (lt_succ_pred 1 a H) at 1. apply log2_succ_le. rewrite log2_nonpos. order. now rewrite <-lt_succ_r, <-one_succ. Qed. Lemma le_log2_up_succ_log2 : forall a, log2_up a <= S (log2 a). Proof. intros a. unfold log2_up. case compare_spec; intros H; try order_pos. rewrite <- succ_le_mono. apply log2_le_mono. rewrite <- (lt_succ_pred 1 a H) at 2. apply le_succ_diag_r. Qed. Lemma log2_log2_up_spec : forall a, 0 2^log2 a <= a <= 2^log2_up a. Proof. intros a H. split. now apply log2_spec. rewrite <-le_succ_l, <-one_succ in H. le_elim H. now apply log2_up_spec. now rewrite <-H, log2_up_1, pow_0_r. Qed. Lemma log2_log2_up_exact : forall a, 0 (log2 a == log2_up a <-> exists b, a == 2^b). Proof. intros a Ha. split. intros. exists (log2 a). generalize (log2_log2_up_spec a Ha). rewrite <-H. destruct 1; order. intros (b,Hb). rewrite Hb. destruct (le_gt_cases 0 b). now rewrite log2_pow2, log2_up_pow2. rewrite pow_neg_r; trivial. now rewrite log2_nonpos, log2_up_nonpos. Qed. (** [log2_up] n is strictly positive for 1 0 < log2_up a. Proof. intros. rewrite log2_up_eqn; trivial. apply lt_succ_r; order_pos. Qed. (** Said otherwise, [log2_up] is null only below 1 *) Lemma log2_up_null : forall a, log2_up a == 0 <-> a <= 1. Proof. intros a. split; intros H. destruct (le_gt_cases a 1) as [Ha|Ha]; trivial. generalize (log2_up_pos a Ha); order. now apply log2_up_eqn0. Qed. (** [log2_up] is a monotone function (but not a strict one) *) Lemma log2_up_le_mono : forall a b, a<=b -> log2_up a <= log2_up b. Proof. intros a b H. destruct (le_gt_cases a 1) as [Ha|Ha]. rewrite log2_up_eqn0; trivial. apply log2_up_nonneg. rewrite 2 log2_up_eqn; try order. rewrite <- succ_le_mono. apply log2_le_mono, succ_le_mono. rewrite 2 lt_succ_pred with 1; order. Qed. (** No reverse result for <=, consider for instance log2_up 4 <= log2_up 3 *) Lemma log2_up_lt_cancel : forall a b, log2_up a < log2_up b -> a < b. Proof. intros a b H. destruct (le_gt_cases b 1) as [Hb|Hb]. rewrite (log2_up_eqn0 b) in H; trivial. generalize (log2_up_nonneg a); order. destruct (le_gt_cases a 1) as [Ha|Ha]. order. rewrite 2 log2_up_eqn in H; try order. rewrite <- succ_lt_mono in H. apply log2_lt_cancel, succ_lt_mono in H. rewrite 2 lt_succ_pred with 1 in H; order. Qed. (** When left side is a power of 2, we have an equivalence for < *) Lemma log2_up_lt_pow2 : forall a b, 0 (2^b b < log2_up a). Proof. intros a b Ha. split; intros H. destruct (lt_ge_cases b 0) as [Hb|Hb]. generalize (log2_up_nonneg a); order. apply (pow_lt_mono_r_iff 2). order'. apply log2_up_nonneg. apply lt_le_trans with a; trivial. apply (log2_up_spec a). apply le_lt_trans with (2^b); trivial. rewrite one_succ, le_succ_l. apply pow_pos_nonneg; order'. destruct (lt_ge_cases b 0) as [Hb|Hb]. now rewrite pow_neg_r. rewrite <- (log2_up_pow2 b) in H; trivial. now apply log2_up_lt_cancel. Qed. (** When right side is a square, we have an equivalence for <= *) Lemma log2_up_le_pow2 : forall a b, 0 (a<=2^b <-> log2_up a <= b). Proof. intros a b Ha. split; intros H. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite pow_neg_r in H; order. rewrite <- (log2_up_pow2 b); trivial. now apply log2_up_le_mono. transitivity (2^(log2_up a)). now apply log2_log2_up_spec. apply pow_le_mono_r; order'. Qed. (** Comparing [log2_up] and identity *) Lemma log2_up_lt_lin : forall a, 0 log2_up a < a. Proof. intros a Ha. assert (H : S (P a) == a) by (now apply lt_succ_pred with 0). rewrite <- H at 2. apply lt_succ_r. apply log2_up_le_pow2; trivial. rewrite <- H at 1. apply le_succ_l. apply pow_gt_lin_r. order'. apply lt_succ_r; order. Qed. Lemma log2_up_le_lin : forall a, 0<=a -> log2_up a <= a. Proof. intros a Ha. le_elim Ha. now apply lt_le_incl, log2_up_lt_lin. rewrite <- Ha, log2_up_nonpos; order. Qed. (** [log2_up] and multiplication. *) (** Due to rounding error, we don't have the usual [log2_up (a*b) = log2_up a + log2_up b] but we may be off by 1 at most *) Lemma log2_up_mul_above : forall a b, 0<=a -> 0<=b -> log2_up (a*b) <= log2_up a + log2_up b. Proof. intros a b Ha Hb. assert (Ha':=log2_up_nonneg a). assert (Hb':=log2_up_nonneg b). le_elim Ha. le_elim Hb. apply log2_up_le_pow2; try order_pos. rewrite pow_add_r; trivial. apply mul_le_mono_nonneg; try apply log2_log2_up_spec; order'. rewrite <- Hb. nzsimpl. rewrite log2_up_nonpos; order_pos. rewrite <- Ha. nzsimpl. rewrite log2_up_nonpos; order_pos. Qed. Lemma log2_up_mul_below : forall a b, 0 0 log2_up a + log2_up b <= S (log2_up (a*b)). Proof. intros a b Ha Hb. rewrite <-le_succ_l, <-one_succ in Ha. le_elim Ha. rewrite <-le_succ_l, <-one_succ in Hb. le_elim Hb. assert (Ha' : 0 < log2_up a) by (apply log2_up_pos; trivial). assert (Hb' : 0 < log2_up b) by (apply log2_up_pos; trivial). rewrite <- (lt_succ_pred 0 (log2_up a)); trivial. rewrite <- (lt_succ_pred 0 (log2_up b)); trivial. nzsimpl. rewrite <- succ_le_mono, le_succ_l. apply (pow_lt_mono_r_iff 2). order'. apply log2_up_nonneg. rewrite pow_add_r; try (apply lt_succ_r; rewrite (lt_succ_pred 0); trivial). apply lt_le_trans with (a*b). apply mul_lt_mono_nonneg; try order_pos; try now apply log2_up_spec. apply log2_up_spec. setoid_replace 1 with (1*1) by now nzsimpl. apply mul_lt_mono_nonneg; order'. rewrite <- Hb, log2_up_1; nzsimpl. apply le_succ_diag_r. rewrite <- Ha, log2_up_1; nzsimpl. apply le_succ_diag_r. Qed. (** And we can't find better approximations in general. - The upper bound is exact for powers of 2. - Concerning the lower bound, for any c>1, take a=b=2^c+1, then [log2_up (a*b) = c+c +1] while [(log2_up a) = (log2_up b) = c+1] *) (** At least, we get back the usual equation when we multiply by 2 (or 2^k) *) Lemma log2_up_mul_pow2 : forall a b, 0 0<=b -> log2_up (a*2^b) == b + log2_up a. Proof. intros a b Ha Hb. rewrite <- le_succ_l, <- one_succ in Ha; le_elim Ha. apply log2_up_unique. apply add_nonneg_pos; trivial. now apply log2_up_pos. split. assert (EQ := lt_succ_pred 0 _ (log2_up_pos _ Ha)). rewrite <- EQ. nzsimpl. rewrite pow_add_r, mul_comm; trivial. apply mul_lt_mono_pos_r. order_pos. now apply log2_up_spec. rewrite <- lt_succ_r, EQ. now apply log2_up_pos. rewrite pow_add_r, mul_comm; trivial. apply mul_le_mono_nonneg_l. order_pos. now apply log2_up_spec. apply log2_up_nonneg. now rewrite <- Ha, mul_1_l, log2_up_1, add_0_r, log2_up_pow2. Qed. Lemma log2_up_double : forall a, 0 log2_up (2*a) == S (log2_up a). Proof. intros a Ha. generalize (log2_up_mul_pow2 a 1 Ha le_0_1). now nzsimpl'. Qed. (** Two numbers with same [log2_up] cannot be far away. *) Lemma log2_up_same : forall a b, 0 0 log2_up a == log2_up b -> a < 2*b. Proof. intros a b Ha Hb H. apply log2_up_lt_cancel. rewrite log2_up_double, H by trivial. apply lt_succ_diag_r. Qed. (** [log2_up] and successor : - the [log2_up] function climbs by at most 1 at a time - otherwise it stays at the same value - the +1 steps occur after powers of two *) Lemma log2_up_succ_le : forall a, log2_up (S a) <= S (log2_up a). Proof. intros a. destruct (lt_trichotomy 1 a) as [LT|[EQ|LT]]. rewrite 2 log2_up_eqn; trivial. rewrite pred_succ, <- succ_le_mono. rewrite <-(lt_succ_pred 1 a LT) at 1. apply log2_succ_le. apply lt_succ_r; order. rewrite <- EQ, <- two_succ, log2_up_1, log2_up_2. now nzsimpl'. rewrite 2 log2_up_eqn0. order_pos. order'. now rewrite le_succ_l. Qed. Lemma log2_up_succ_or : forall a, log2_up (S a) == S (log2_up a) \/ log2_up (S a) == log2_up a. Proof. intros. destruct (le_gt_cases (log2_up (S a)) (log2_up a)). right. generalize (log2_up_le_mono _ _ (le_succ_diag_r a)); order. left. apply le_succ_l in H. generalize (log2_up_succ_le a); order. Qed. Lemma log2_up_eq_succ_is_pow2 : forall a, log2_up (S a) == S (log2_up a) -> exists b, a == 2^b. Proof. intros a H. destruct (le_gt_cases a 0) as [Ha|Ha]. rewrite 2 (proj2 (log2_up_null _)) in H. generalize (lt_succ_diag_r 0); order. order'. apply le_succ_l. order'. assert (Ha' : 1 < S a) by (now rewrite one_succ, <- succ_lt_mono). exists (log2_up a). generalize (proj1 (log2_up_spec (S a) Ha')) (proj2 (log2_log2_up_spec a Ha)). rewrite H, pred_succ, lt_succ_r. order. Qed. Lemma log2_up_eq_succ_iff_pow2 : forall a, 0 (log2_up (S a) == S (log2_up a) <-> exists b, a == 2^b). Proof. intros a Ha. split. apply log2_up_eq_succ_is_pow2. intros (b,Hb). destruct (lt_ge_cases b 0) as [Hb'|Hb']. rewrite pow_neg_r in Hb; order. rewrite Hb, log2_up_pow2; try order'. now rewrite log2_up_succ_pow2. Qed. Lemma log2_up_succ_double : forall a, 0 log2_up (2*a+1) == 2 + log2 a. Proof. intros a Ha. rewrite log2_up_eqn. rewrite add_1_r, pred_succ, log2_double; now nzsimpl'. apply le_lt_trans with (0+1). now nzsimpl'. apply add_lt_mono_r. order_pos. Qed. (** [log2_up] and addition *) Lemma log2_up_add_le : forall a b, a~=1 -> b~=1 -> log2_up (a+b) <= log2_up a + log2_up b. Proof. intros a b Ha Hb. destruct (lt_trichotomy a 1) as [Ha'|[Ha'|Ha']]; [|order|]. rewrite (log2_up_eqn0 a) by order. nzsimpl. apply log2_up_le_mono. rewrite one_succ, lt_succ_r in Ha'. rewrite <- (add_0_l b) at 2. now apply add_le_mono. destruct (lt_trichotomy b 1) as [Hb'|[Hb'|Hb']]; [|order|]. rewrite (log2_up_eqn0 b) by order. nzsimpl. apply log2_up_le_mono. rewrite one_succ, lt_succ_r in Hb'. rewrite <- (add_0_r a) at 2. now apply add_le_mono. clear Ha Hb. transitivity (log2_up (a*b)). now apply log2_up_le_mono, add_le_mul. apply log2_up_mul_above; order'. Qed. (** The sum of two [log2_up] is less than twice the [log2_up] of the sum. The large inequality is obvious thanks to monotonicity. The strict one requires some more work. This is almost a convexity inequality for points [2a], [2b] and their middle [a+b] : ideally, we would have [2*log(a+b) >= log(2a)+log(2b) = 2+log a+log b]. Here, we cannot do better: consider for instance a=3 b=5, then 2+3<2*3 *) Lemma add_log2_up_lt : forall a b, 0 0 log2_up a + log2_up b < 2 * log2_up (a+b). Proof. intros a b Ha Hb. nzsimpl'. assert (H : log2_up a <= log2_up (a+b)). apply log2_up_le_mono. rewrite <- (add_0_r a) at 1. apply add_le_mono; order. assert (H' : log2_up b <= log2_up (a+b)). apply log2_up_le_mono. rewrite <- (add_0_l b) at 1. apply add_le_mono; order. le_elim H. apply lt_le_trans with (log2_up (a+b) + log2_up b). now apply add_lt_mono_r. now apply add_le_mono_l. rewrite <- H at 1. apply add_lt_mono_l. le_elim H'. trivial. symmetry in H. apply log2_up_same in H; try order_pos. symmetry in H'. apply log2_up_same in H'; try order_pos. revert H H'. nzsimpl'. rewrite <- add_lt_mono_l, <- add_lt_mono_r; order. Qed. End NZLog2UpProp. coq-8.4pl2/theories/Numbers/NatInt/NZDiv.v0000640000175000001440000003575412010532755017460 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t. End DivMod. Module Type DivModNotation (A : Typ)(Import B : DivMod A). Infix "/" := div. Infix "mod" := modulo (at level 40, no associativity). End DivModNotation. Module Type DivMod' (A : Typ) := DivMod A <+ DivModNotation A. Module Type NZDivSpec (Import A : NZOrdAxiomsSig')(Import B : DivMod' A). Declare Instance div_wd : Proper (eq==>eq==>eq) div. Declare Instance mod_wd : Proper (eq==>eq==>eq) modulo. Axiom div_mod : forall a b, b ~= 0 -> a == b*(a/b) + (a mod b). Axiom mod_bound_pos : forall a b, 0<=a -> 0 0 <= a mod b < b. End NZDivSpec. (** The different divisions will only differ in the conditions they impose on [modulo]. For NZ, we have only described the behavior on positive numbers. *) Module Type NZDiv (A : NZOrdAxiomsSig) := DivMod A <+ NZDivSpec A. Module Type NZDiv' (A : NZOrdAxiomsSig) := NZDiv A <+ DivModNotation A. Module Type NZDivProp (Import A : NZOrdAxiomsSig') (Import B : NZDiv' A) (Import C : NZMulOrderProp A). (** Uniqueness theorems *) Theorem div_mod_unique : forall b q1 q2 r1 r2, 0<=r1 0<=r2 b*q1+r1 == b*q2+r2 -> q1 == q2 /\ r1 == r2. Proof. intros b. assert (U : forall q1 q2 r1 r2, b*q1+r1 == b*q2+r2 -> 0<=r1 0<=r2 -> q1 False). intros q1 q2 r1 r2 EQ LT Hr1 Hr2. contradict EQ. apply lt_neq. apply lt_le_trans with (b*q1+b). rewrite <- add_lt_mono_l. tauto. apply le_trans with (b*q2). rewrite mul_comm, <- mul_succ_l, mul_comm. apply mul_le_mono_nonneg_l; intuition; try order. rewrite le_succ_l; auto. rewrite <- (add_0_r (b*q2)) at 1. rewrite <- add_le_mono_l. tauto. intros q1 q2 r1 r2 Hr1 Hr2 EQ; destruct (lt_trichotomy q1 q2) as [LT|[EQ'|GT]]. elim (U q1 q2 r1 r2); intuition. split; auto. rewrite EQ' in EQ. rewrite add_cancel_l in EQ; auto. elim (U q2 q1 r2 r1); intuition. Qed. Theorem div_unique: forall a b q r, 0<=a -> 0<=r a == b*q + r -> q == a/b. Proof. intros a b q r Ha (Hb,Hr) EQ. destruct (div_mod_unique b q (a/b) r (a mod b)); auto. apply mod_bound_pos; order. rewrite <- div_mod; order. Qed. Theorem mod_unique: forall a b q r, 0<=a -> 0<=r a == b*q + r -> r == a mod b. Proof. intros a b q r Ha (Hb,Hr) EQ. destruct (div_mod_unique b q (a/b) r (a mod b)); auto. apply mod_bound_pos; order. rewrite <- div_mod; order. Qed. Theorem div_unique_exact a b q: 0<=a -> 0 a == b*q -> q == a/b. Proof. intros Ha Hb H. apply div_unique with 0; nzsimpl; now try split. Qed. (** A division by itself returns 1 *) Lemma div_same : forall a, 0 a/a == 1. Proof. intros. symmetry. apply div_unique_exact; nzsimpl; order. Qed. Lemma mod_same : forall a, 0 a mod a == 0. Proof. intros. symmetry. apply mod_unique with 1; intuition; try order. now nzsimpl. Qed. (** A division of a small number by a bigger one yields zero. *) Theorem div_small: forall a b, 0<=a a/b == 0. Proof. intros. symmetry. apply div_unique with a; intuition; try order. now nzsimpl. Qed. (** Same situation, in term of modulo: *) Theorem mod_small: forall a b, 0<=a a mod b == a. Proof. intros. symmetry. apply mod_unique with 0; intuition; try order. now nzsimpl. Qed. (** * Basic values of divisions and modulo. *) Lemma div_0_l: forall a, 0 0/a == 0. Proof. intros; apply div_small; split; order. Qed. Lemma mod_0_l: forall a, 0 0 mod a == 0. Proof. intros; apply mod_small; split; order. Qed. Lemma div_1_r: forall a, 0<=a -> a/1 == a. Proof. intros. symmetry. apply div_unique_exact; nzsimpl; order'. Qed. Lemma mod_1_r: forall a, 0<=a -> a mod 1 == 0. Proof. intros. symmetry. apply mod_unique with a; try split; try order; try apply lt_0_1. now nzsimpl. Qed. Lemma div_1_l: forall a, 1 1/a == 0. Proof. intros; apply div_small; split; auto. apply le_0_1. Qed. Lemma mod_1_l: forall a, 1 1 mod a == 1. Proof. intros; apply mod_small; split; auto. apply le_0_1. Qed. Lemma div_mul : forall a b, 0<=a -> 0 (a*b)/b == a. Proof. intros; symmetry. apply div_unique_exact; trivial. apply mul_nonneg_nonneg; order. apply mul_comm. Qed. Lemma mod_mul : forall a b, 0<=a -> 0 (a*b) mod b == 0. Proof. intros; symmetry. apply mod_unique with a; try split; try order. apply mul_nonneg_nonneg; order. nzsimpl; apply mul_comm. Qed. (** * Order results about mod and div *) (** A modulo cannot grow beyond its starting point. *) Theorem mod_le: forall a b, 0<=a -> 0 a mod b <= a. Proof. intros. destruct (le_gt_cases b a). apply le_trans with b; auto. apply lt_le_incl. destruct (mod_bound_pos a b); auto. rewrite lt_eq_cases; right. apply mod_small; auto. Qed. (* Division of positive numbers is positive. *) Lemma div_pos: forall a b, 0<=a -> 0 0 <= a/b. Proof. intros. rewrite (mul_le_mono_pos_l _ _ b); auto; nzsimpl. rewrite (add_le_mono_r _ _ (a mod b)). rewrite <- div_mod by order. nzsimpl. apply mod_le; auto. Qed. Lemma div_str_pos : forall a b, 0 0 < a/b. Proof. intros a b (Hb,Hab). assert (LE : 0 <= a/b) by (apply div_pos; order). assert (MOD : a mod b < b) by (destruct (mod_bound_pos a b); order). rewrite lt_eq_cases in LE; destruct LE as [LT|EQ]; auto. exfalso; revert Hab. rewrite (div_mod a b), <-EQ; nzsimpl; order. Qed. Lemma div_small_iff : forall a b, 0<=a -> 0 (a/b==0 <-> a 0 (a mod b == a <-> a 0 (0 b<=a). Proof. intros a b Ha Hb; split; intros Hab. destruct (lt_ge_cases a b) as [LT|LE]; auto. rewrite <- div_small_iff in LT; order. apply div_str_pos; auto. Qed. (** As soon as the divisor is strictly greater than 1, the division is strictly decreasing. *) Lemma div_lt : forall a b, 0 1 a/b < a. Proof. intros. assert (0 < b) by (apply lt_trans with 1; auto using lt_0_1). destruct (lt_ge_cases a b). rewrite div_small; try split; order. rewrite (div_mod a b) at 2 by order. apply lt_le_trans with (b*(a/b)). rewrite <- (mul_1_l (a/b)) at 1. rewrite <- mul_lt_mono_pos_r; auto. apply div_str_pos; auto. rewrite <- (add_0_r (b*(a/b))) at 1. rewrite <- add_le_mono_l. destruct (mod_bound_pos a b); order. Qed. (** [le] is compatible with a positive division. *) Lemma div_le_mono : forall a b c, 0 0<=a<=b -> a/c <= b/c. Proof. intros a b c Hc (Ha,Hab). rewrite lt_eq_cases in Hab. destruct Hab as [LT|EQ]; [|rewrite EQ; order]. rewrite <- lt_succ_r. rewrite (mul_lt_mono_pos_l c) by order. nzsimpl. rewrite (add_lt_mono_r _ _ (a mod c)). rewrite <- div_mod by order. apply lt_le_trans with b; auto. rewrite (div_mod b c) at 1 by order. rewrite <- add_assoc, <- add_le_mono_l. apply le_trans with (c+0). nzsimpl; destruct (mod_bound_pos b c); order. rewrite <- add_le_mono_l. destruct (mod_bound_pos a c); order. Qed. (** The following two properties could be used as specification of div *) Lemma mul_div_le : forall a b, 0<=a -> 0 b*(a/b) <= a. Proof. intros. rewrite (add_le_mono_r _ _ (a mod b)), <- div_mod by order. rewrite <- (add_0_r a) at 1. rewrite <- add_le_mono_l. destruct (mod_bound_pos a b); order. Qed. Lemma mul_succ_div_gt : forall a b, 0<=a -> 0 a < b*(S (a/b)). Proof. intros. rewrite (div_mod a b) at 1 by order. rewrite (mul_succ_r). rewrite <- add_lt_mono_l. destruct (mod_bound_pos a b); auto. Qed. (** The previous inequality is exact iff the modulo is zero. *) Lemma div_exact : forall a b, 0<=a -> 0 (a == b*(a/b) <-> a mod b == 0). Proof. intros. rewrite (div_mod a b) at 1 by order. rewrite <- (add_0_r (b*(a/b))) at 2. apply add_cancel_l. Qed. (** Some additionnal inequalities about div. *) Theorem div_lt_upper_bound: forall a b q, 0<=a -> 0 a < b*q -> a/b < q. Proof. intros. rewrite (mul_lt_mono_pos_l b) by order. apply le_lt_trans with a; auto. apply mul_div_le; auto. Qed. Theorem div_le_upper_bound: forall a b q, 0<=a -> 0 a <= b*q -> a/b <= q. Proof. intros. rewrite (mul_le_mono_pos_l _ _ b) by order. apply le_trans with a; auto. apply mul_div_le; auto. Qed. Theorem div_le_lower_bound: forall a b q, 0<=a -> 0 b*q <= a -> q <= a/b. Proof. intros a b q Ha Hb H. destruct (lt_ge_cases 0 q). rewrite <- (div_mul q b); try order. apply div_le_mono; auto. rewrite mul_comm; split; auto. apply lt_le_incl, mul_pos_pos; auto. apply le_trans with 0; auto; apply div_pos; auto. Qed. (** A division respects opposite monotonicity for the divisor *) Lemma div_le_compat_l: forall p q r, 0<=p -> 0 p/r <= p/q. Proof. intros p q r Hp (Hq,Hqr). apply div_le_lower_bound; auto. rewrite (div_mod p r) at 2 by order. apply le_trans with (r*(p/r)). apply mul_le_mono_nonneg_r; try order. apply div_pos; order. rewrite <- (add_0_r (r*(p/r))) at 1. rewrite <- add_le_mono_l. destruct (mod_bound_pos p r); order. Qed. (** * Relations between usual operations and mod and div *) Lemma mod_add : forall a b c, 0<=a -> 0<=a+b*c -> 0 (a + b * c) mod c == a mod c. Proof. intros. symmetry. apply mod_unique with (a/c+b); auto. apply mod_bound_pos; auto. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add : forall a b c, 0<=a -> 0<=a+b*c -> 0 (a + b * c) / c == a / c + b. Proof. intros. apply (mul_cancel_l _ _ c); try order. apply (add_cancel_r _ _ ((a+b*c) mod c)). rewrite <- div_mod, mod_add by order. rewrite mul_add_distr_l, add_shuffle0, <- div_mod by order. now rewrite mul_comm. Qed. Lemma div_add_l: forall a b c, 0<=c -> 0<=a*b+c -> 0 (a * b + c) / b == a + c / b. Proof. intros a b c. rewrite (add_comm _ c), (add_comm a). intros. apply div_add; auto. Qed. (** Cancellations. *) Lemma div_mul_cancel_r : forall a b c, 0<=a -> 0 0 (a*c)/(b*c) == a/b. Proof. intros. symmetry. apply div_unique with ((a mod b)*c). apply mul_nonneg_nonneg; order. split. apply mul_nonneg_nonneg; destruct (mod_bound_pos a b); order. rewrite <- mul_lt_mono_pos_r; auto. destruct (mod_bound_pos a b); auto. rewrite (div_mod a b) at 1 by order. rewrite mul_add_distr_r. rewrite add_cancel_r. rewrite <- 2 mul_assoc. now rewrite (mul_comm c). Qed. Lemma div_mul_cancel_l : forall a b c, 0<=a -> 0 0 (c*a)/(c*b) == a/b. Proof. intros. rewrite !(mul_comm c); apply div_mul_cancel_r; auto. Qed. Lemma mul_mod_distr_l: forall a b c, 0<=a -> 0 0 (c*a) mod (c*b) == c * (a mod b). Proof. intros. rewrite <- (add_cancel_l _ _ ((c*b)* ((c*a)/(c*b)))). rewrite <- div_mod. rewrite div_mul_cancel_l; auto. rewrite <- mul_assoc, <- mul_add_distr_l, mul_cancel_l by order. apply div_mod; order. rewrite <- neq_mul_0; intuition; order. Qed. Lemma mul_mod_distr_r: forall a b c, 0<=a -> 0 0 (a*c) mod (b*c) == (a mod b) * c. Proof. intros. rewrite !(mul_comm _ c); now rewrite mul_mod_distr_l. Qed. (** Operations modulo. *) Theorem mod_mod: forall a n, 0<=a -> 0 (a mod n) mod n == a mod n. Proof. intros. destruct (mod_bound_pos a n); auto. now rewrite mod_small_iff. Qed. Lemma mul_mod_idemp_l : forall a b n, 0<=a -> 0<=b -> 0 ((a mod n)*b) mod n == (a*b) mod n. Proof. intros a b n Ha Hb Hn. symmetry. generalize (mul_nonneg_nonneg _ _ Ha Hb). rewrite (div_mod a n) at 1 2 by order. rewrite add_comm, (mul_comm n), (mul_comm _ b). rewrite mul_add_distr_l, mul_assoc. intros. rewrite mod_add; auto. now rewrite mul_comm. apply mul_nonneg_nonneg; destruct (mod_bound_pos a n); auto. Qed. Lemma mul_mod_idemp_r : forall a b n, 0<=a -> 0<=b -> 0 (a*(b mod n)) mod n == (a*b) mod n. Proof. intros. rewrite !(mul_comm a). apply mul_mod_idemp_l; auto. Qed. Theorem mul_mod: forall a b n, 0<=a -> 0<=b -> 0 (a * b) mod n == ((a mod n) * (b mod n)) mod n. Proof. intros. rewrite mul_mod_idemp_l, mul_mod_idemp_r; trivial. reflexivity. now destruct (mod_bound_pos b n). Qed. Lemma add_mod_idemp_l : forall a b n, 0<=a -> 0<=b -> 0 ((a mod n)+b) mod n == (a+b) mod n. Proof. intros a b n Ha Hb Hn. symmetry. generalize (add_nonneg_nonneg _ _ Ha Hb). rewrite (div_mod a n) at 1 2 by order. rewrite <- add_assoc, add_comm, mul_comm. intros. rewrite mod_add; trivial. reflexivity. apply add_nonneg_nonneg; auto. destruct (mod_bound_pos a n); auto. Qed. Lemma add_mod_idemp_r : forall a b n, 0<=a -> 0<=b -> 0 (a+(b mod n)) mod n == (a+b) mod n. Proof. intros. rewrite !(add_comm a). apply add_mod_idemp_l; auto. Qed. Theorem add_mod: forall a b n, 0<=a -> 0<=b -> 0 (a+b) mod n == (a mod n + b mod n) mod n. Proof. intros. rewrite add_mod_idemp_l, add_mod_idemp_r; trivial. reflexivity. now destruct (mod_bound_pos b n). Qed. Lemma div_div : forall a b c, 0<=a -> 0 0 (a/b)/c == a/(b*c). Proof. intros a b c Ha Hb Hc. apply div_unique with (b*((a/b) mod c) + a mod b); trivial. (* begin 0<= ... 0 0 a mod (b*c) == a mod b + b*((a/b) mod c). Proof. intros a b c Ha Hb Hc. apply add_cancel_l with (b*c*(a/(b*c))). rewrite <- div_mod by (apply neq_mul_0; split; order). rewrite <- div_div by trivial. rewrite add_assoc, add_shuffle0, <- mul_assoc, <- mul_add_distr_l. rewrite <- div_mod by order. apply div_mod; order. Qed. (** A last inequality: *) Theorem div_mul_le: forall a b c, 0<=a -> 0 0<=c -> c*(a/b) <= (c*a)/b. Proof. intros. apply div_le_lower_bound; auto. apply mul_nonneg_nonneg; auto. rewrite mul_assoc, (mul_comm b c), <- mul_assoc. apply mul_le_mono_nonneg_l; auto. apply mul_div_le; auto. Qed. (** mod is related to divisibility *) Lemma mod_divides : forall a b, 0<=a -> 0 (a mod b == 0 <-> exists c, a == b*c). Proof. split. intros. exists (a/b). rewrite div_exact; auto. intros (c,Hc). rewrite Hc, mul_comm. apply mod_mul; auto. rewrite (mul_le_mono_pos_l _ _ b); auto. nzsimpl. order. Qed. End NZDivProp. coq-8.4pl2/theories/Numbers/NatInt/NZAddOrder.v0000640000175000001440000001217312010532755020410 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* p + n < p + m. Proof. intros n m p; nzinduct p. now nzsimpl. intro p. nzsimpl. now rewrite <- succ_lt_mono. Qed. Theorem add_lt_mono_r : forall n m p, n < m <-> n + p < m + p. Proof. intros n m p. rewrite (add_comm n p), (add_comm m p); apply add_lt_mono_l. Qed. Theorem add_lt_mono : forall n m p q, n < m -> p < q -> n + p < m + q. Proof. intros n m p q H1 H2. apply lt_trans with (m + p); [now apply add_lt_mono_r | now apply add_lt_mono_l]. Qed. Theorem add_le_mono_l : forall n m p, n <= m <-> p + n <= p + m. Proof. intros n m p; nzinduct p. now nzsimpl. intro p. nzsimpl. now rewrite <- succ_le_mono. Qed. Theorem add_le_mono_r : forall n m p, n <= m <-> n + p <= m + p. Proof. intros n m p. rewrite (add_comm n p), (add_comm m p); apply add_le_mono_l. Qed. Theorem add_le_mono : forall n m p q, n <= m -> p <= q -> n + p <= m + q. Proof. intros n m p q H1 H2. apply le_trans with (m + p); [now apply add_le_mono_r | now apply add_le_mono_l]. Qed. Theorem add_lt_le_mono : forall n m p q, n < m -> p <= q -> n + p < m + q. Proof. intros n m p q H1 H2. apply lt_le_trans with (m + p); [now apply add_lt_mono_r | now apply add_le_mono_l]. Qed. Theorem add_le_lt_mono : forall n m p q, n <= m -> p < q -> n + p < m + q. Proof. intros n m p q H1 H2. apply le_lt_trans with (m + p); [now apply add_le_mono_r | now apply add_lt_mono_l]. Qed. Theorem add_pos_pos : forall n m, 0 < n -> 0 < m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (add_0_l 0). now apply add_lt_mono. Qed. Theorem add_pos_nonneg : forall n m, 0 < n -> 0 <= m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (add_0_l 0). now apply add_lt_le_mono. Qed. Theorem add_nonneg_pos : forall n m, 0 <= n -> 0 < m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (add_0_l 0). now apply add_le_lt_mono. Qed. Theorem add_nonneg_nonneg : forall n m, 0 <= n -> 0 <= m -> 0 <= n + m. Proof. intros n m H1 H2. rewrite <- (add_0_l 0). now apply add_le_mono. Qed. Theorem lt_add_pos_l : forall n m, 0 < n -> m < n + m. Proof. intros n m. rewrite (add_lt_mono_r 0 n m). now nzsimpl. Qed. Theorem lt_add_pos_r : forall n m, 0 < n -> m < m + n. Proof. intros; rewrite add_comm; now apply lt_add_pos_l. Qed. Theorem le_lt_add_lt : forall n m p q, n <= m -> p + m < q + n -> p < q. Proof. intros n m p q H1 H2. destruct (le_gt_cases q p); [| assumption]. contradict H2. rewrite nlt_ge. now apply add_le_mono. Qed. Theorem lt_le_add_lt : forall n m p q, n < m -> p + m <= q + n -> p < q. Proof. intros n m p q H1 H2. destruct (le_gt_cases q p); [| assumption]. contradict H2. rewrite nle_gt. now apply add_le_lt_mono. Qed. Theorem le_le_add_le : forall n m p q, n <= m -> p + m <= q + n -> p <= q. Proof. intros n m p q H1 H2. destruct (le_gt_cases p q); [assumption |]. contradict H2. rewrite nle_gt. now apply add_lt_le_mono. Qed. Theorem add_lt_cases : forall n m p q, n + m < p + q -> n < p \/ m < q. Proof. intros n m p q H; destruct (le_gt_cases p n) as [H1 | H1]; [| now left]. destruct (le_gt_cases q m) as [H2 | H2]; [| now right]. contradict H; rewrite nlt_ge. now apply add_le_mono. Qed. Theorem add_le_cases : forall n m p q, n + m <= p + q -> n <= p \/ m <= q. Proof. intros n m p q H. destruct (le_gt_cases n p) as [H1 | H1]. now left. destruct (le_gt_cases m q) as [H2 | H2]. now right. contradict H; rewrite nle_gt. now apply add_lt_mono. Qed. Theorem add_neg_cases : forall n m, n + m < 0 -> n < 0 \/ m < 0. Proof. intros n m H; apply add_lt_cases; now nzsimpl. Qed. Theorem add_pos_cases : forall n m, 0 < n + m -> 0 < n \/ 0 < m. Proof. intros n m H; apply add_lt_cases; now nzsimpl. Qed. Theorem add_nonpos_cases : forall n m, n + m <= 0 -> n <= 0 \/ m <= 0. Proof. intros n m H; apply add_le_cases; now nzsimpl. Qed. Theorem add_nonneg_cases : forall n m, 0 <= n + m -> 0 <= n \/ 0 <= m. Proof. intros n m H; apply add_le_cases; now nzsimpl. Qed. (** Substraction *) (** We can prove the existence of a subtraction of any number by a smaller one *) Lemma le_exists_sub : forall n m, n<=m -> exists p, m == p+n /\ 0<=p. Proof. intros n m H. apply le_ind with (4:=H). solve_proper. exists 0; nzsimpl; split; order. clear m H. intros m H (p & EQ & LE). exists (S p). split. nzsimpl. now f_equiv. now apply le_le_succ_r. Qed. (** For the moment, it doesn't seem possible to relate this existing subtraction with [sub]. *) End NZAddOrderProp. coq-8.4pl2/theories/Numbers/NatInt/NZAdd.v0000640000175000001440000000555212010532755017417 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n == m. Proof. intros n m p; nzinduct p. now nzsimpl. intro p. nzsimpl. now rewrite succ_inj_wd. Qed. Theorem add_cancel_r : forall n m p, n + p == m + p <-> n == m. Proof. intros n m p. rewrite (add_comm n p), (add_comm m p). apply add_cancel_l. Qed. Theorem add_shuffle0 : forall n m p, n+m+p == n+p+m. Proof. intros n m p. rewrite <- 2 add_assoc, add_cancel_l. apply add_comm. Qed. Theorem add_shuffle1 : forall n m p q, (n + m) + (p + q) == (n + p) + (m + q). Proof. intros n m p q. rewrite 2 add_assoc, add_cancel_r. apply add_shuffle0. Qed. Theorem add_shuffle2 : forall n m p q, (n + m) + (p + q) == (n + q) + (m + p). Proof. intros n m p q. rewrite (add_comm p). apply add_shuffle1. Qed. Theorem add_shuffle3 : forall n m p, n + (m + p) == m + (n + p). Proof. intros n m p. now rewrite add_comm, <- add_assoc, (add_comm p). Qed. Theorem sub_1_r : forall n, n - 1 == P n. Proof. intro n; now nzsimpl'. Qed. Hint Rewrite sub_1_r : nz. End NZAddProp. coq-8.4pl2/theories/Numbers/NatInt/NZMulOrder.v0000640000175000001440000003127112010532755020455 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (p * n < p * m <-> q * n + m < q * m + n). Proof. intros p q n m H. rewrite <- H. nzsimpl. rewrite <- ! add_assoc, (add_comm n m). now rewrite <- add_lt_mono_r. Qed. Theorem mul_lt_mono_pos_l : forall p n m, 0 < p -> (n < m <-> p * n < p * m). Proof. intros p n m Hp. revert n m. apply lt_ind with (4:=Hp). solve_proper. intros. now nzsimpl. clear p Hp. intros p Hp IH n m. nzsimpl. assert (LR : forall n m, n < m -> p * n + n < p * m + m) by (intros n1 m1 H; apply add_lt_mono; trivial; now rewrite <- IH). split; intros H. now apply LR. destruct (lt_trichotomy n m) as [LT|[EQ|GT]]; trivial. rewrite EQ in H. order. apply LR in GT. order. Qed. Theorem mul_lt_mono_pos_r : forall p n m, 0 < p -> (n < m <-> n * p < m * p). Proof. intros p n m. rewrite (mul_comm n p), (mul_comm m p). now apply mul_lt_mono_pos_l. Qed. Theorem mul_lt_mono_neg_l : forall p n m, p < 0 -> (n < m <-> p * m < p * n). Proof. nzord_induct p. order. intros p Hp _ n m Hp'. apply lt_succ_l in Hp'. order. intros p Hp IH n m _. apply le_succ_l in Hp. le_elim Hp. assert (LR : forall n m, n < m -> p * m < p * n). intros n1 m1 H. apply (le_lt_add_lt n1 m1). now apply lt_le_incl. rewrite <- 2 mul_succ_l. now rewrite <- IH. split; intros H. now apply LR. destruct (lt_trichotomy n m) as [LT|[EQ|GT]]; trivial. rewrite EQ in H. order. apply LR in GT. order. rewrite (mul_lt_pred p (S p)), Hp; now nzsimpl. Qed. Theorem mul_lt_mono_neg_r : forall p n m, p < 0 -> (n < m <-> m * p < n * p). Proof. intros p n m. rewrite (mul_comm n p), (mul_comm m p). now apply mul_lt_mono_neg_l. Qed. Theorem mul_le_mono_nonneg_l : forall n m p, 0 <= p -> n <= m -> p * n <= p * m. Proof. intros n m p H1 H2. le_elim H1. le_elim H2. apply lt_le_incl. now apply mul_lt_mono_pos_l. apply eq_le_incl; now rewrite H2. apply eq_le_incl; rewrite <- H1; now do 2 rewrite mul_0_l. Qed. Theorem mul_le_mono_nonpos_l : forall n m p, p <= 0 -> n <= m -> p * m <= p * n. Proof. intros n m p H1 H2. le_elim H1. le_elim H2. apply lt_le_incl. now apply mul_lt_mono_neg_l. apply eq_le_incl; now rewrite H2. apply eq_le_incl; rewrite H1; now do 2 rewrite mul_0_l. Qed. Theorem mul_le_mono_nonneg_r : forall n m p, 0 <= p -> n <= m -> n * p <= m * p. Proof. intros n m p H1 H2; rewrite (mul_comm n p), (mul_comm m p); now apply mul_le_mono_nonneg_l. Qed. Theorem mul_le_mono_nonpos_r : forall n m p, p <= 0 -> n <= m -> m * p <= n * p. Proof. intros n m p H1 H2; rewrite (mul_comm n p), (mul_comm m p); now apply mul_le_mono_nonpos_l. Qed. Theorem mul_cancel_l : forall n m p, p ~= 0 -> (p * n == p * m <-> n == m). Proof. intros n m p Hp; split; intro H; [|now f_equiv]. apply lt_gt_cases in Hp; destruct Hp as [Hp|Hp]; destruct (lt_trichotomy n m) as [LT|[EQ|GT]]; trivial. apply (mul_lt_mono_neg_l p) in LT; order. apply (mul_lt_mono_neg_l p) in GT; order. apply (mul_lt_mono_pos_l p) in LT; order. apply (mul_lt_mono_pos_l p) in GT; order. Qed. Theorem mul_cancel_r : forall n m p, p ~= 0 -> (n * p == m * p <-> n == m). Proof. intros n m p. rewrite (mul_comm n p), (mul_comm m p); apply mul_cancel_l. Qed. Theorem mul_id_l : forall n m, m ~= 0 -> (n * m == m <-> n == 1). Proof. intros n m H. stepl (n * m == 1 * m) by now rewrite mul_1_l. now apply mul_cancel_r. Qed. Theorem mul_id_r : forall n m, n ~= 0 -> (n * m == n <-> m == 1). Proof. intros n m; rewrite mul_comm; apply mul_id_l. Qed. Theorem mul_le_mono_pos_l : forall n m p, 0 < p -> (n <= m <-> p * n <= p * m). Proof. intros n m p H; do 2 rewrite lt_eq_cases. rewrite (mul_lt_mono_pos_l p n m) by assumption. now rewrite -> (mul_cancel_l n m p) by (intro H1; rewrite H1 in H; false_hyp H lt_irrefl). Qed. Theorem mul_le_mono_pos_r : forall n m p, 0 < p -> (n <= m <-> n * p <= m * p). Proof. intros n m p. rewrite (mul_comm n p), (mul_comm m p); apply mul_le_mono_pos_l. Qed. Theorem mul_le_mono_neg_l : forall n m p, p < 0 -> (n <= m <-> p * m <= p * n). Proof. intros n m p H; do 2 rewrite lt_eq_cases. rewrite (mul_lt_mono_neg_l p n m); [| assumption]. rewrite -> (mul_cancel_l m n p) by (intro H1; rewrite H1 in H; false_hyp H lt_irrefl). now setoid_replace (n == m) with (m == n) by (split; now intro). Qed. Theorem mul_le_mono_neg_r : forall n m p, p < 0 -> (n <= m <-> m * p <= n * p). Proof. intros n m p. rewrite (mul_comm n p), (mul_comm m p); apply mul_le_mono_neg_l. Qed. Theorem mul_lt_mono_nonneg : forall n m p q, 0 <= n -> n < m -> 0 <= p -> p < q -> n * p < m * q. Proof. intros n m p q H1 H2 H3 H4. apply le_lt_trans with (m * p). apply mul_le_mono_nonneg_r; [assumption | now apply lt_le_incl]. apply -> mul_lt_mono_pos_l; [assumption | now apply le_lt_trans with n]. Qed. (* There are still many variants of the theorem above. One can assume 0 < n or 0 < p or n <= m or p <= q. *) Theorem mul_le_mono_nonneg : forall n m p q, 0 <= n -> n <= m -> 0 <= p -> p <= q -> n * p <= m * q. Proof. intros n m p q H1 H2 H3 H4. le_elim H2; le_elim H4. apply lt_le_incl; now apply mul_lt_mono_nonneg. rewrite <- H4; apply mul_le_mono_nonneg_r; [assumption | now apply lt_le_incl]. rewrite <- H2; apply mul_le_mono_nonneg_l; [assumption | now apply lt_le_incl]. rewrite H2; rewrite H4; now apply eq_le_incl. Qed. Theorem mul_pos_pos : forall n m, 0 < n -> 0 < m -> 0 < n * m. Proof. intros n m H1 H2. rewrite <- (mul_0_l m). now apply mul_lt_mono_pos_r. Qed. Theorem mul_neg_neg : forall n m, n < 0 -> m < 0 -> 0 < n * m. Proof. intros n m H1 H2. rewrite <- (mul_0_l m). now apply mul_lt_mono_neg_r. Qed. Theorem mul_pos_neg : forall n m, 0 < n -> m < 0 -> n * m < 0. Proof. intros n m H1 H2. rewrite <- (mul_0_l m). now apply mul_lt_mono_neg_r. Qed. Theorem mul_neg_pos : forall n m, n < 0 -> 0 < m -> n * m < 0. Proof. intros; rewrite mul_comm; now apply mul_pos_neg. Qed. Theorem mul_nonneg_nonneg : forall n m, 0 <= n -> 0 <= m -> 0 <= n*m. Proof. intros. rewrite <- (mul_0_l m). apply mul_le_mono_nonneg; order. Qed. Theorem mul_pos_cancel_l : forall n m, 0 < n -> (0 < n*m <-> 0 < m). Proof. intros n m Hn. rewrite <- (mul_0_r n) at 1. symmetry. now apply mul_lt_mono_pos_l. Qed. Theorem mul_pos_cancel_r : forall n m, 0 < m -> (0 < n*m <-> 0 < n). Proof. intros n m Hn. rewrite <- (mul_0_l m) at 1. symmetry. now apply mul_lt_mono_pos_r. Qed. Theorem mul_nonneg_cancel_l : forall n m, 0 < n -> (0 <= n*m <-> 0 <= m). Proof. intros n m Hn. rewrite <- (mul_0_r n) at 1. symmetry. now apply mul_le_mono_pos_l. Qed. Theorem mul_nonneg_cancel_r : forall n m, 0 < m -> (0 <= n*m <-> 0 <= n). Proof. intros n m Hn. rewrite <- (mul_0_l m) at 1. symmetry. now apply mul_le_mono_pos_r. Qed. Theorem lt_1_mul_pos : forall n m, 1 < n -> 0 < m -> 1 < n * m. Proof. intros n m H1 H2. apply (mul_lt_mono_pos_r m) in H1. rewrite mul_1_l in H1. now apply lt_1_l with m. assumption. Qed. Theorem eq_mul_0 : forall n m, n * m == 0 <-> n == 0 \/ m == 0. Proof. intros n m; split. intro H; destruct (lt_trichotomy n 0) as [H1 | [H1 | H1]]; destruct (lt_trichotomy m 0) as [H2 | [H2 | H2]]; try (now right); try (now left). exfalso; now apply (lt_neq 0 (n * m)); [apply mul_neg_neg |]. exfalso; now apply (lt_neq (n * m) 0); [apply mul_neg_pos |]. exfalso; now apply (lt_neq (n * m) 0); [apply mul_pos_neg |]. exfalso; now apply (lt_neq 0 (n * m)); [apply mul_pos_pos |]. intros [H | H]. now rewrite H, mul_0_l. now rewrite H, mul_0_r. Qed. Theorem neq_mul_0 : forall n m, n ~= 0 /\ m ~= 0 <-> n * m ~= 0. Proof. intros n m; split; intro H. intro H1; apply eq_mul_0 in H1. tauto. split; intro H1; rewrite H1 in H; (rewrite mul_0_l in H || rewrite mul_0_r in H); now apply H. Qed. Theorem eq_square_0 : forall n, n * n == 0 <-> n == 0. Proof. intro n; rewrite eq_mul_0; tauto. Qed. Theorem eq_mul_0_l : forall n m, n * m == 0 -> m ~= 0 -> n == 0. Proof. intros n m H1 H2. apply eq_mul_0 in H1. destruct H1 as [H1 | H1]. assumption. false_hyp H1 H2. Qed. Theorem eq_mul_0_r : forall n m, n * m == 0 -> n ~= 0 -> m == 0. Proof. intros n m H1 H2; apply eq_mul_0 in H1. destruct H1 as [H1 | H1]. false_hyp H1 H2. assumption. Qed. (** Some alternative names: *) Definition mul_eq_0 := eq_mul_0. Definition mul_eq_0_l := eq_mul_0_l. Definition mul_eq_0_r := eq_mul_0_r. Theorem lt_0_mul n m : 0 < n * m <-> (0 < n /\ 0 < m) \/ (m < 0 /\ n < 0). Proof. split; [intro H | intros [[H1 H2] | [H1 H2]]]. destruct (lt_trichotomy n 0) as [H1 | [H1 | H1]]; [| rewrite H1 in H; rewrite mul_0_l in H; false_hyp H lt_irrefl |]; (destruct (lt_trichotomy m 0) as [H2 | [H2 | H2]]; [| rewrite H2 in H; rewrite mul_0_r in H; false_hyp H lt_irrefl |]); try (left; now split); try (right; now split). assert (H3 : n * m < 0) by now apply mul_neg_pos. exfalso; now apply (lt_asymm (n * m) 0). assert (H3 : n * m < 0) by now apply mul_pos_neg. exfalso; now apply (lt_asymm (n * m) 0). now apply mul_pos_pos. now apply mul_neg_neg. Qed. Theorem square_lt_mono_nonneg : forall n m, 0 <= n -> n < m -> n * n < m * m. Proof. intros n m H1 H2. now apply mul_lt_mono_nonneg. Qed. Theorem square_le_mono_nonneg : forall n m, 0 <= n -> n <= m -> n * n <= m * m. Proof. intros n m H1 H2. now apply mul_le_mono_nonneg. Qed. (* The converse theorems require nonnegativity (or nonpositivity) of the other variable *) Theorem square_lt_simpl_nonneg : forall n m, 0 <= m -> n * n < m * m -> n < m. Proof. intros n m H1 H2. destruct (lt_ge_cases n 0). now apply lt_le_trans with 0. destruct (lt_ge_cases n m) as [LT|LE]; trivial. apply square_le_mono_nonneg in LE; order. Qed. Theorem square_le_simpl_nonneg : forall n m, 0 <= m -> n * n <= m * m -> n <= m. Proof. intros n m H1 H2. destruct (lt_ge_cases n 0). apply lt_le_incl; now apply lt_le_trans with 0. destruct (le_gt_cases n m) as [LE|LT]; trivial. apply square_lt_mono_nonneg in LT; order. Qed. Theorem mul_2_mono_l : forall n m, n < m -> 1 + 2 * n < 2 * m. Proof. intros n m. rewrite <- le_succ_l, (mul_le_mono_pos_l (S n) m two). rewrite two_succ. nzsimpl. now rewrite le_succ_l. order'. Qed. Lemma add_le_mul : forall a b, 1 1 a+b <= a*b. Proof. assert (AUX : forall a b, 0 0 (S a)+(S b) <= (S a)*(S b)). intros a b Ha Hb. nzsimpl. rewrite <- succ_le_mono. apply le_succ_l. rewrite <- add_assoc, <- (add_0_l (a+b)), (add_comm b). apply add_lt_mono_r. now apply mul_pos_pos. intros a b Ha Hb. assert (Ha' := lt_succ_pred 1 a Ha). assert (Hb' := lt_succ_pred 1 b Hb). rewrite <- Ha', <- Hb'. apply AUX; rewrite succ_lt_mono, <- one_succ; order. Qed. (** A few results about squares *) Lemma square_nonneg : forall a, 0 <= a * a. Proof. intros. rewrite <- (mul_0_r a). destruct (le_gt_cases a 0). now apply mul_le_mono_nonpos_l. apply mul_le_mono_nonneg_l; order. Qed. Lemma crossmul_le_addsquare : forall a b, 0<=a -> 0<=b -> b*a+a*b <= a*a+b*b. Proof. assert (AUX : forall a b, 0<=a<=b -> b*a+a*b <= a*a+b*b). intros a b (Ha,H). destruct (le_exists_sub _ _ H) as (d & EQ & Hd). rewrite EQ. rewrite 2 mul_add_distr_r. rewrite !add_assoc. apply add_le_mono_r. rewrite add_comm. apply add_le_mono_l. apply mul_le_mono_nonneg_l; trivial. order. intros a b Ha Hb. destruct (le_gt_cases a b). apply AUX; split; order. rewrite (add_comm (b*a)), (add_comm (a*a)). apply AUX; split; order. Qed. Lemma add_square_le : forall a b, 0<=a -> 0<=b -> a*a + b*b <= (a+b)*(a+b). Proof. intros a b Ha Hb. rewrite mul_add_distr_r, !mul_add_distr_l. rewrite add_assoc. apply add_le_mono_r. rewrite <- add_assoc. rewrite <- (add_0_r (a*a)) at 1. apply add_le_mono_l. apply add_nonneg_nonneg; now apply mul_nonneg_nonneg. Qed. Lemma square_add_le : forall a b, 0<=a -> 0<=b -> (a+b)*(a+b) <= 2*(a*a + b*b). Proof. intros a b Ha Hb. rewrite !mul_add_distr_l, !mul_add_distr_r. nzsimpl'. rewrite <- !add_assoc. apply add_le_mono_l. rewrite !add_assoc. apply add_le_mono_r. apply crossmul_le_addsquare; order. Qed. Lemma quadmul_le_squareadd : forall a b, 0<=a -> 0<=b -> 2*2*a*b <= (a+b)*(a+b). Proof. intros. nzsimpl'. rewrite !mul_add_distr_l, !mul_add_distr_r. rewrite (add_comm _ (b*b)), add_assoc. apply add_le_mono_r. rewrite (add_shuffle0 (a*a)), (mul_comm b a). apply add_le_mono_r. rewrite (mul_comm a b) at 1. now apply crossmul_le_addsquare. Qed. End NZMulOrderProp. coq-8.4pl2/theories/Numbers/NatInt/NZSqrt.v0000640000175000001440000005366312010532755017666 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t. End Sqrt. Module Type SqrtNotation (A : Typ)(Import B : Sqrt A). Notation "√ x" := (sqrt x) (at level 6). End SqrtNotation. Module Type Sqrt' (A : Typ) := Sqrt A <+ SqrtNotation A. Module Type NZSqrtSpec (Import A : NZOrdAxiomsSig')(Import B : Sqrt' A). Axiom sqrt_spec : forall a, 0<=a -> √a * √a <= a < S (√a) * S (√a). Axiom sqrt_neg : forall a, a<0 -> √a == 0. End NZSqrtSpec. Module Type NZSqrt (A : NZOrdAxiomsSig) := Sqrt A <+ NZSqrtSpec A. Module Type NZSqrt' (A : NZOrdAxiomsSig) := Sqrt' A <+ NZSqrtSpec A. (** Derived properties of power *) Module Type NZSqrtProp (Import A : NZOrdAxiomsSig') (Import B : NZSqrt' A) (Import C : NZMulOrderProp A). Local Notation "a ²" := (a*a) (at level 5, no associativity, format "a ²"). (** First, sqrt is non-negative *) Lemma sqrt_spec_nonneg : forall b, b² < (S b)² -> 0 <= b. Proof. intros b LT. destruct (le_gt_cases 0 b) as [Hb|Hb]; trivial. exfalso. assert ((S b)² < b²). rewrite mul_succ_l, <- (add_0_r b²). apply add_lt_le_mono. apply mul_lt_mono_neg_l; trivial. apply lt_succ_diag_r. now apply le_succ_l. order. Qed. Lemma sqrt_nonneg : forall a, 0<=√a. Proof. intros. destruct (lt_ge_cases a 0) as [Ha|Ha]. now rewrite (sqrt_neg _ Ha). apply sqrt_spec_nonneg. destruct (sqrt_spec a Ha). order. Qed. (** The spec of sqrt indeed determines it *) Lemma sqrt_unique : forall a b, b² <= a < (S b)² -> √a == b. Proof. intros a b (LEb,LTb). assert (Ha : 0<=a) by (transitivity b²; trivial using square_nonneg). assert (Hb : 0<=b) by (apply sqrt_spec_nonneg; order). assert (Ha': 0<=√a) by now apply sqrt_nonneg. destruct (sqrt_spec a Ha) as (LEa,LTa). assert (b <= √a). apply lt_succ_r, square_lt_simpl_nonneg; [|order]. now apply lt_le_incl, lt_succ_r. assert (√a <= b). apply lt_succ_r, square_lt_simpl_nonneg; [|order]. now apply lt_le_incl, lt_succ_r. order. Qed. (** Hence sqrt is a morphism *) Instance sqrt_wd : Proper (eq==>eq) sqrt. Proof. intros x x' Hx. destruct (lt_ge_cases x 0) as [H|H]. rewrite 2 sqrt_neg; trivial. reflexivity. now rewrite <- Hx. apply sqrt_unique. rewrite Hx in *. now apply sqrt_spec. Qed. (** An alternate specification *) Lemma sqrt_spec_alt : forall a, 0<=a -> exists r, a == (√a)² + r /\ 0 <= r <= 2*√a. Proof. intros a Ha. destruct (sqrt_spec _ Ha) as (LE,LT). destruct (le_exists_sub _ _ LE) as (r & Hr & Hr'). exists r. split. now rewrite add_comm. split. trivial. apply (add_le_mono_r _ _ (√a)²). rewrite <- Hr, add_comm. generalize LT. nzsimpl'. now rewrite lt_succ_r, add_assoc. Qed. Lemma sqrt_unique' : forall a b c, 0<=c<=2*b -> a == b² + c -> √a == b. Proof. intros a b c (Hc,H) EQ. apply sqrt_unique. rewrite EQ. split. rewrite <- add_0_r at 1. now apply add_le_mono_l. nzsimpl. apply lt_succ_r. rewrite <- add_assoc. apply add_le_mono_l. generalize H; now nzsimpl'. Qed. (** Sqrt is exact on squares *) Lemma sqrt_square : forall a, 0<=a -> √(a²) == a. Proof. intros a Ha. apply sqrt_unique' with 0. split. order. apply mul_nonneg_nonneg; order'. now nzsimpl. Qed. (** Sqrt and predecessors of squares *) Lemma sqrt_pred_square : forall a, 0 √(P a²) == P a. Proof. intros a Ha. apply sqrt_unique. assert (EQ := lt_succ_pred 0 a Ha). rewrite EQ. split. apply lt_succ_r. rewrite (lt_succ_pred 0). assert (0 <= P a) by (now rewrite <- lt_succ_r, EQ). assert (P a < a) by (now rewrite <- le_succ_l, EQ). apply mul_lt_mono_nonneg; trivial. now apply mul_pos_pos. apply le_succ_l. rewrite (lt_succ_pred 0). reflexivity. now apply mul_pos_pos. Qed. (** Sqrt is a monotone function (but not a strict one) *) Lemma sqrt_le_mono : forall a b, a <= b -> √a <= √b. Proof. intros a b Hab. destruct (lt_ge_cases a 0) as [Ha|Ha]. rewrite (sqrt_neg _ Ha). apply sqrt_nonneg. assert (Hb : 0 <= b) by order. destruct (sqrt_spec a Ha) as (LE,_). destruct (sqrt_spec b Hb) as (_,LT). apply lt_succ_r. apply square_lt_simpl_nonneg; try order. now apply lt_le_incl, lt_succ_r, sqrt_nonneg. Qed. (** No reverse result for <=, consider for instance √2 <= √1 *) Lemma sqrt_lt_cancel : forall a b, √a < √b -> a < b. Proof. intros a b H. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite (sqrt_neg b Hb) in H; generalize (sqrt_nonneg a); order. destruct (lt_ge_cases a 0) as [Ha|Ha]; [order|]. destruct (sqrt_spec a Ha) as (_,LT). destruct (sqrt_spec b Hb) as (LE,_). apply le_succ_l in H. assert ((S (√a))² <= (√b)²). apply mul_le_mono_nonneg; trivial. now apply lt_le_incl, lt_succ_r, sqrt_nonneg. now apply lt_le_incl, lt_succ_r, sqrt_nonneg. order. Qed. (** When left side is a square, we have an equivalence for <= *) Lemma sqrt_le_square : forall a b, 0<=a -> 0<=b -> (b²<=a <-> b <= √a). Proof. intros a b Ha Hb. split; intros H. rewrite <- (sqrt_square b); trivial. now apply sqrt_le_mono. destruct (sqrt_spec a Ha) as (LE,LT). transitivity (√a)²; trivial. now apply mul_le_mono_nonneg. Qed. (** When right side is a square, we have an equivalence for < *) Lemma sqrt_lt_square : forall a b, 0<=a -> 0<=b -> (a √a < b). Proof. intros a b Ha Hb. split; intros H. destruct (sqrt_spec a Ha) as (LE,_). apply square_lt_simpl_nonneg; try order. rewrite <- (sqrt_square b Hb) in H. now apply sqrt_lt_cancel. Qed. (** Sqrt and basic constants *) Lemma sqrt_0 : √0 == 0. Proof. rewrite <- (mul_0_l 0) at 1. now apply sqrt_square. Qed. Lemma sqrt_1 : √1 == 1. Proof. rewrite <- (mul_1_l 1) at 1. apply sqrt_square. order'. Qed. Lemma sqrt_2 : √2 == 1. Proof. apply sqrt_unique' with 1. nzsimpl; split; order'. now nzsimpl'. Qed. Lemma sqrt_pos : forall a, 0 < √a <-> 0 < a. Proof. intros a. split; intros Ha. apply sqrt_lt_cancel. now rewrite sqrt_0. rewrite <- le_succ_l, <- one_succ, <- sqrt_1. apply sqrt_le_mono. now rewrite one_succ, le_succ_l. Qed. Lemma sqrt_lt_lin : forall a, 1 √a √a<=a. Proof. intros a Ha. destruct (le_gt_cases a 0) as [H|H]. setoid_replace a with 0 by order. now rewrite sqrt_0. destruct (le_gt_cases a 1) as [H'|H']. rewrite <- le_succ_l, <- one_succ in H. setoid_replace a with 1 by order. now rewrite sqrt_1. now apply lt_le_incl, sqrt_lt_lin. Qed. (** Sqrt and multiplication. *) (** Due to rounding error, we don't have the usual √(a*b) = √a*√b but only lower and upper bounds. *) Lemma sqrt_mul_below : forall a b, √a * √b <= √(a*b). Proof. intros a b. destruct (lt_ge_cases a 0) as [Ha|Ha]. rewrite (sqrt_neg a Ha). nzsimpl. apply sqrt_nonneg. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite (sqrt_neg b Hb). nzsimpl. apply sqrt_nonneg. assert (Ha':=sqrt_nonneg a). assert (Hb':=sqrt_nonneg b). apply sqrt_le_square; try now apply mul_nonneg_nonneg. rewrite mul_shuffle1. apply mul_le_mono_nonneg; try now apply mul_nonneg_nonneg. now apply sqrt_spec. now apply sqrt_spec. Qed. Lemma sqrt_mul_above : forall a b, 0<=a -> 0<=b -> √(a*b) < S (√a) * S (√b). Proof. intros a b Ha Hb. apply sqrt_lt_square. now apply mul_nonneg_nonneg. apply mul_nonneg_nonneg. now apply lt_le_incl, lt_succ_r, sqrt_nonneg. now apply lt_le_incl, lt_succ_r, sqrt_nonneg. rewrite mul_shuffle1. apply mul_lt_mono_nonneg; trivial; now apply sqrt_spec. Qed. (** And we can't find better approximations in general. - The lower bound is exact for squares - Concerning the upper bound, for any c>0, take a=b=c²-1, then √(a*b) = c² -1 while S √a = S √b = c *) (** Sqrt and successor : - the sqrt function climbs by at most 1 at a time - otherwise it stays at the same value - the +1 steps occur for squares *) Lemma sqrt_succ_le : forall a, 0<=a -> √(S a) <= S (√a). Proof. intros a Ha. apply lt_succ_r. apply sqrt_lt_square. now apply le_le_succ_r. apply le_le_succ_r, le_le_succ_r, sqrt_nonneg. rewrite <- (add_1_l (S (√a))). apply lt_le_trans with (1²+(S (√a))²). rewrite mul_1_l, add_1_l, <- succ_lt_mono. now apply sqrt_spec. apply add_square_le. order'. apply le_le_succ_r, sqrt_nonneg. Qed. Lemma sqrt_succ_or : forall a, 0<=a -> √(S a) == S (√a) \/ √(S a) == √a. Proof. intros a Ha. destruct (le_gt_cases (√(S a)) (√a)) as [H|H]. right. generalize (sqrt_le_mono _ _ (le_succ_diag_r a)); order. left. apply le_succ_l in H. generalize (sqrt_succ_le a Ha); order. Qed. Lemma sqrt_eq_succ_iff_square : forall a, 0<=a -> (√(S a) == S (√a) <-> exists b, 0 √(a+b) <= √a + √b). intros a b Ha. rewrite (sqrt_neg a Ha). nzsimpl. apply sqrt_le_mono. rewrite <- (add_0_l b) at 2. apply add_le_mono_r; order. intros a b. destruct (lt_ge_cases a 0) as [Ha|Ha]. now apply AUX. destruct (lt_ge_cases b 0) as [Hb|Hb]. rewrite (add_comm a), (add_comm (√a)); now apply AUX. assert (Ha':=sqrt_nonneg a). assert (Hb':=sqrt_nonneg b). rewrite <- lt_succ_r. apply sqrt_lt_square. now apply add_nonneg_nonneg. now apply lt_le_incl, lt_succ_r, add_nonneg_nonneg. destruct (sqrt_spec a Ha) as (_,LTa). destruct (sqrt_spec b Hb) as (_,LTb). revert LTa LTb. nzsimpl. rewrite 3 lt_succ_r. intros LTa LTb. assert (H:=add_le_mono _ _ _ _ LTa LTb). etransitivity; [eexact H|]. clear LTa LTb H. rewrite <- (add_assoc _ (√a) (√a)). rewrite <- (add_assoc _ (√b) (√b)). rewrite add_shuffle1. rewrite <- (add_assoc _ (√a + √b)). rewrite (add_shuffle1 (√a) (√b)). apply add_le_mono_r. now apply add_square_le. Qed. (** convexity inequality for sqrt: sqrt of middle is above middle of square roots. *) Lemma add_sqrt_le : forall a b, 0<=a -> 0<=b -> √a + √b <= √(2*(a+b)). Proof. intros a b Ha Hb. assert (Ha':=sqrt_nonneg a). assert (Hb':=sqrt_nonneg b). apply sqrt_le_square. apply mul_nonneg_nonneg. order'. now apply add_nonneg_nonneg. now apply add_nonneg_nonneg. transitivity (2*((√a)² + (√b)²)). now apply square_add_le. apply mul_le_mono_nonneg_l. order'. apply add_le_mono; now apply sqrt_spec. Qed. End NZSqrtProp. Module Type NZSqrtUpProp (Import A : NZDecOrdAxiomsSig') (Import B : NZSqrt' A) (Import C : NZMulOrderProp A) (Import D : NZSqrtProp A B C). (** * [sqrt_up] : a square root that rounds up instead of down *) Local Notation "a ²" := (a*a) (at level 5, no associativity, format "a ²"). (** For once, we define instead of axiomatizing, thanks to sqrt *) Definition sqrt_up a := match compare 0 a with | Lt => S √(P a) | _ => 0 end. Local Notation "√° a" := (sqrt_up a) (at level 6, no associativity). Lemma sqrt_up_eqn0 : forall a, a<=0 -> √°a == 0. Proof. intros a Ha. unfold sqrt_up. case compare_spec; try order. Qed. Lemma sqrt_up_eqn : forall a, 0 √°a == S √(P a). Proof. intros a Ha. unfold sqrt_up. case compare_spec; try order. Qed. Lemma sqrt_up_spec : forall a, 0 (P √°a)² < a <= (√°a)². Proof. intros a Ha. rewrite sqrt_up_eqn, pred_succ; trivial. assert (Ha' := lt_succ_pred 0 a Ha). rewrite <- Ha' at 3 4. rewrite le_succ_l, lt_succ_r. apply sqrt_spec. now rewrite <- lt_succ_r, Ha'. Qed. (** First, [sqrt_up] is non-negative *) Lemma sqrt_up_nonneg : forall a, 0<=√°a. Proof. intros. destruct (le_gt_cases a 0) as [Ha|Ha]. now rewrite sqrt_up_eqn0. rewrite sqrt_up_eqn; trivial. apply le_le_succ_r, sqrt_nonneg. Qed. (** [sqrt_up] is a morphism *) Instance sqrt_up_wd : Proper (eq==>eq) sqrt_up. Proof. assert (Proper (eq==>eq==>Logic.eq) compare). intros x x' Hx y y' Hy. do 2 case compare_spec; trivial; order. intros x x' Hx. unfold sqrt_up. rewrite Hx. case compare; now rewrite ?Hx. Qed. (** The spec of [sqrt_up] indeed determines it *) Lemma sqrt_up_unique : forall a b, 0 (P b)² < a <= b² -> √°a == b. Proof. intros a b Hb (LEb,LTb). assert (Ha : 0 √°(a²) == a. Proof. intros a Ha. le_elim Ha. rewrite sqrt_up_eqn by (now apply mul_pos_pos). rewrite sqrt_pred_square; trivial. apply (lt_succ_pred 0); trivial. rewrite sqrt_up_eqn0; trivial. rewrite <- Ha. now nzsimpl. Qed. (** [sqrt_up] and successors of squares *) Lemma sqrt_up_succ_square : forall a, 0<=a -> √°(S a²) == S a. Proof. intros a Ha. rewrite sqrt_up_eqn by (now apply lt_succ_r, mul_nonneg_nonneg). now rewrite pred_succ, sqrt_square. Qed. (** Basic constants *) Lemma sqrt_up_0 : √°0 == 0. Proof. rewrite <- (mul_0_l 0) at 1. now apply sqrt_up_square. Qed. Lemma sqrt_up_1 : √°1 == 1. Proof. rewrite <- (mul_1_l 1) at 1. apply sqrt_up_square. order'. Qed. Lemma sqrt_up_2 : √°2 == 2. Proof. rewrite sqrt_up_eqn by order'. now rewrite two_succ, pred_succ, sqrt_1. Qed. (** Links between sqrt and [sqrt_up] *) Lemma le_sqrt_sqrt_up : forall a, √a <= √°a. Proof. intros a. unfold sqrt_up. case compare_spec; intros H. rewrite <- H, sqrt_0. order. rewrite <- (lt_succ_pred 0 a H) at 1. apply sqrt_succ_le. apply lt_succ_r. now rewrite (lt_succ_pred 0 a H). now rewrite sqrt_neg. Qed. Lemma le_sqrt_up_succ_sqrt : forall a, √°a <= S (√a). Proof. intros a. unfold sqrt_up. case compare_spec; intros H; try apply le_le_succ_r, sqrt_nonneg. rewrite <- succ_le_mono. apply sqrt_le_mono. rewrite <- (lt_succ_pred 0 a H) at 2. apply le_succ_diag_r. Qed. Lemma sqrt_sqrt_up_spec : forall a, 0<=a -> (√a)² <= a <= (√°a)². Proof. intros a H. split. now apply sqrt_spec. le_elim H. now apply sqrt_up_spec. now rewrite <-H, sqrt_up_0, mul_0_l. Qed. Lemma sqrt_sqrt_up_exact : forall a, 0<=a -> (√a == √°a <-> exists b, 0<=b /\ a == b²). Proof. intros a Ha. split. intros. exists √a. split. apply sqrt_nonneg. generalize (sqrt_sqrt_up_spec a Ha). rewrite <-H. destruct 1; order. intros (b & Hb & Hb'). rewrite Hb'. now rewrite sqrt_square, sqrt_up_square. Qed. (** [sqrt_up] is a monotone function (but not a strict one) *) Lemma sqrt_up_le_mono : forall a b, a <= b -> √°a <= √°b. Proof. intros a b H. destruct (le_gt_cases a 0) as [Ha|Ha]. rewrite (sqrt_up_eqn0 _ Ha). apply sqrt_up_nonneg. rewrite 2 sqrt_up_eqn by order. rewrite <- succ_le_mono. apply sqrt_le_mono, succ_le_mono. rewrite 2 (lt_succ_pred 0); order. Qed. (** No reverse result for <=, consider for instance √°3 <= √°2 *) Lemma sqrt_up_lt_cancel : forall a b, √°a < √°b -> a < b. Proof. intros a b H. destruct (le_gt_cases b 0) as [Hb|Hb]. rewrite (sqrt_up_eqn0 _ Hb) in H; generalize (sqrt_up_nonneg a); order. destruct (le_gt_cases a 0) as [Ha|Ha]; [order|]. rewrite <- (lt_succ_pred 0 a Ha), <- (lt_succ_pred 0 b Hb), <- succ_lt_mono. apply sqrt_lt_cancel, succ_lt_mono. now rewrite <- 2 sqrt_up_eqn. Qed. (** When left side is a square, we have an equivalence for < *) Lemma sqrt_up_lt_square : forall a b, 0<=a -> 0<=b -> (b² < a <-> b < √°a). Proof. intros a b Ha Hb. split; intros H. destruct (sqrt_up_spec a) as (LE,LT). apply le_lt_trans with b²; trivial using square_nonneg. apply square_lt_simpl_nonneg; try order. apply sqrt_up_nonneg. apply sqrt_up_lt_cancel. now rewrite sqrt_up_square. Qed. (** When right side is a square, we have an equivalence for <= *) Lemma sqrt_up_le_square : forall a b, 0<=a -> 0<=b -> (a <= b² <-> √°a <= b). Proof. intros a b Ha Hb. split; intros H. rewrite <- (sqrt_up_square b Hb). now apply sqrt_up_le_mono. apply square_le_mono_nonneg in H; [|now apply sqrt_up_nonneg]. transitivity (√°a)²; trivial. now apply sqrt_sqrt_up_spec. Qed. Lemma sqrt_up_pos : forall a, 0 < √°a <-> 0 < a. Proof. intros a. split; intros Ha. apply sqrt_up_lt_cancel. now rewrite sqrt_up_0. rewrite <- le_succ_l, <- one_succ, <- sqrt_up_1. apply sqrt_up_le_mono. now rewrite one_succ, le_succ_l. Qed. Lemma sqrt_up_lt_lin : forall a, 2 √°a < a. Proof. intros a Ha. rewrite sqrt_up_eqn by order'. assert (Ha' := lt_succ_pred 2 a Ha). rewrite <- Ha' at 2. rewrite <- succ_lt_mono. apply sqrt_lt_lin. rewrite succ_lt_mono. now rewrite Ha', <- two_succ. Qed. Lemma sqrt_up_le_lin : forall a, 0<=a -> √°a<=a. Proof. intros a Ha. le_elim Ha. rewrite sqrt_up_eqn; trivial. apply le_succ_l. apply le_lt_trans with (P a). apply sqrt_le_lin. now rewrite <- lt_succ_r, (lt_succ_pred 0). rewrite <- (lt_succ_pred 0 a) at 2; trivial. apply lt_succ_diag_r. now rewrite <- Ha, sqrt_up_0. Qed. (** [sqrt_up] and multiplication. *) (** Due to rounding error, we don't have the usual [√(a*b) = √a*√b] but only lower and upper bounds. *) Lemma sqrt_up_mul_above : forall a b, 0<=a -> 0<=b -> √°(a*b) <= √°a * √°b. Proof. intros a b Ha Hb. apply sqrt_up_le_square. now apply mul_nonneg_nonneg. apply mul_nonneg_nonneg; apply sqrt_up_nonneg. rewrite mul_shuffle1. apply mul_le_mono_nonneg; trivial; now apply sqrt_sqrt_up_spec. Qed. Lemma sqrt_up_mul_below : forall a b, 0 0 (P √°a)*(P √°b) < √°(a*b). Proof. intros a b Ha Hb. apply sqrt_up_lt_square. apply mul_nonneg_nonneg; order. apply mul_nonneg_nonneg; apply lt_succ_r. rewrite (lt_succ_pred 0); now rewrite sqrt_up_pos. rewrite (lt_succ_pred 0); now rewrite sqrt_up_pos. rewrite mul_shuffle1. apply mul_lt_mono_nonneg; trivial using square_nonneg; now apply sqrt_up_spec. Qed. (** And we can't find better approximations in general. - The upper bound is exact for squares - Concerning the lower bound, for any c>0, take [a=b=c²+1], then [√°(a*b) = c²+1] while [P √°a = P √°b = c] *) (** [sqrt_up] and successor : - the [sqrt_up] function climbs by at most 1 at a time - otherwise it stays at the same value - the +1 steps occur after squares *) Lemma sqrt_up_succ_le : forall a, 0<=a -> √°(S a) <= S (√°a). Proof. intros a Ha. apply sqrt_up_le_square. now apply le_le_succ_r. apply le_le_succ_r, sqrt_up_nonneg. rewrite <- (add_1_l (√°a)). apply le_trans with (1²+(√°a)²). rewrite mul_1_l, add_1_l, <- succ_le_mono. now apply sqrt_sqrt_up_spec. apply add_square_le. order'. apply sqrt_up_nonneg. Qed. Lemma sqrt_up_succ_or : forall a, 0<=a -> √°(S a) == S (√°a) \/ √°(S a) == √°a. Proof. intros a Ha. destruct (le_gt_cases (√°(S a)) (√°a)) as [H|H]. right. generalize (sqrt_up_le_mono _ _ (le_succ_diag_r a)); order. left. apply le_succ_l in H. generalize (sqrt_up_succ_le a Ha); order. Qed. Lemma sqrt_up_eq_succ_iff_square : forall a, 0<=a -> (√°(S a) == S (√°a) <-> exists b, 0<=b /\ a == b²). Proof. intros a Ha. split. intros EQ. le_elim Ha. exists (√°a). split. apply sqrt_up_nonneg. generalize (proj2 (sqrt_up_spec a Ha)). assert (Ha' : 0 < S a) by (apply lt_succ_r; order'). generalize (proj1 (sqrt_up_spec (S a) Ha')). rewrite EQ, pred_succ, lt_succ_r. order. exists 0. nzsimpl. now split. intros (b & Hb & H). now rewrite H, sqrt_up_succ_square, sqrt_up_square. Qed. (** [sqrt_up] and addition *) Lemma sqrt_up_add_le : forall a b, √°(a+b) <= √°a + √°b. Proof. assert (AUX : forall a b, a<=0 -> √°(a+b) <= √°a + √°b). intros a b Ha. rewrite (sqrt_up_eqn0 a Ha). nzsimpl. apply sqrt_up_le_mono. rewrite <- (add_0_l b) at 2. apply add_le_mono_r; order. intros a b. destruct (le_gt_cases a 0) as [Ha|Ha]. now apply AUX. destruct (le_gt_cases b 0) as [Hb|Hb]. rewrite (add_comm a), (add_comm (√°a)); now apply AUX. rewrite 2 sqrt_up_eqn; trivial. nzsimpl. rewrite <- succ_le_mono. transitivity (√(P a) + √b). rewrite <- (lt_succ_pred 0 a Ha) at 1. nzsimpl. apply sqrt_add_le. apply add_le_mono_l. apply le_sqrt_sqrt_up. now apply add_pos_pos. Qed. (** Convexity-like inequality for [sqrt_up]: [sqrt_up] of middle is above middle of square roots. We cannot say more, for instance take a=b=2, then 2+2 <= S 3 *) Lemma add_sqrt_up_le : forall a b, 0<=a -> 0<=b -> √°a + √°b <= S √°(2*(a+b)). Proof. intros a b Ha Hb. le_elim Ha. le_elim Hb. rewrite 3 sqrt_up_eqn; trivial. nzsimpl. rewrite <- 2 succ_le_mono. etransitivity; [eapply add_sqrt_le|]. apply lt_succ_r. now rewrite (lt_succ_pred 0 a Ha). apply lt_succ_r. now rewrite (lt_succ_pred 0 b Hb). apply sqrt_le_mono. apply lt_succ_r. rewrite (lt_succ_pred 0). apply mul_lt_mono_pos_l. order'. apply add_lt_mono. apply le_succ_l. now rewrite (lt_succ_pred 0). apply le_succ_l. now rewrite (lt_succ_pred 0). apply mul_pos_pos. order'. now apply add_pos_pos. apply mul_pos_pos. order'. now apply add_pos_pos. rewrite <- Hb, sqrt_up_0. nzsimpl. apply le_le_succ_r, sqrt_up_le_mono. rewrite <- (mul_1_l a) at 1. apply mul_le_mono_nonneg_r; order'. rewrite <- Ha, sqrt_up_0. nzsimpl. apply le_le_succ_r, sqrt_up_le_mono. rewrite <- (mul_1_l b) at 1. apply mul_le_mono_nonneg_r; order'. Qed. End NZSqrtUpProp. coq-8.4pl2/theories/Numbers/NatInt/NZProperties.v0000640000175000001440000000160412010532755021055 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t. End Gcd. Module Type NZGcdSpec (A : NZOrdAxiomsSig')(B : Gcd A). Import A B. Definition divide n m := exists p, m == p*n. Local Notation "( n | m )" := (divide n m) (at level 0). Axiom gcd_divide_l : forall n m, (gcd n m | n). Axiom gcd_divide_r : forall n m, (gcd n m | m). Axiom gcd_greatest : forall n m p, (p | n) -> (p | m) -> (p | gcd n m). Axiom gcd_nonneg : forall n m, 0 <= gcd n m. End NZGcdSpec. Module Type DivideNotation (A:NZOrdAxiomsSig')(B:Gcd A)(C:NZGcdSpec A B). Import A B C. Notation "( n | m )" := (divide n m) (at level 0). End DivideNotation. Module Type NZGcd (A : NZOrdAxiomsSig) := Gcd A <+ NZGcdSpec A. Module Type NZGcd' (A : NZOrdAxiomsSig) := Gcd A <+ NZGcdSpec A <+ DivideNotation A. (** Derived properties of gcd *) Module NZGcdProp (Import A : NZOrdAxiomsSig') (Import B : NZGcd' A) (Import C : NZMulOrderProp A). (** Results concerning divisibility*) Instance divide_wd : Proper (eq==>eq==>iff) divide. Proof. unfold divide. intros x x' Hx y y' Hy. setoid_rewrite Hx. setoid_rewrite Hy. easy. Qed. Lemma divide_1_l : forall n, (1 | n). Proof. intros n. exists n. now nzsimpl. Qed. Lemma divide_0_r : forall n, (n | 0). Proof. intros n. exists 0. now nzsimpl. Qed. Hint Rewrite divide_1_l divide_0_r : nz. Lemma divide_0_l : forall n, (0 | n) -> n==0. Proof. intros n (m,Hm). revert Hm. now nzsimpl. Qed. Lemma eq_mul_1_nonneg : forall n m, 0<=n -> n*m == 1 -> n==1 /\ m==1. Proof. intros n m Hn H. le_elim Hn. destruct (lt_ge_cases m 0) as [Hm|Hm]. generalize (mul_pos_neg n m Hn Hm). order'. le_elim Hm. apply le_succ_l in Hn. rewrite <- one_succ in Hn. le_elim Hn. generalize (lt_1_mul_pos n m Hn Hm). order. rewrite <- Hn, mul_1_l in H. now split. rewrite <- Hm, mul_0_r in H. order'. rewrite <- Hn, mul_0_l in H. order'. Qed. Lemma eq_mul_1_nonneg' : forall n m, 0<=m -> n*m == 1 -> n==1 /\ m==1. Proof. intros n m Hm H. rewrite mul_comm in H. now apply and_comm, eq_mul_1_nonneg. Qed. Lemma divide_1_r_nonneg : forall n, 0<=n -> (n | 1) -> n==1. Proof. intros n Hn (m,Hm). symmetry in Hm. now apply (eq_mul_1_nonneg' m n). Qed. Lemma divide_refl : forall n, (n | n). Proof. intros n. exists 1. now nzsimpl. Qed. Lemma divide_trans : forall n m p, (n | m) -> (m | p) -> (n | p). Proof. intros n m p (q,Hq) (r,Hr). exists (r*q). now rewrite Hr, Hq, mul_assoc. Qed. Instance divide_reflexive : Reflexive divide := divide_refl. Instance divide_transitive : Transitive divide := divide_trans. (** Due to sign, no general antisymmetry result *) Lemma divide_antisym_nonneg : forall n m, 0<=n -> 0<=m -> (n | m) -> (m | n) -> n == m. Proof. intros n m Hn Hm (q,Hq) (r,Hr). le_elim Hn. destruct (lt_ge_cases q 0) as [Hq'|Hq']. generalize (mul_neg_pos q n Hq' Hn). order. rewrite Hq, mul_assoc in Hr. symmetry in Hr. apply mul_id_l in Hr; [|order]. destruct (eq_mul_1_nonneg' r q) as [_ H]; trivial. now rewrite H, mul_1_l in Hq. rewrite <- Hn, mul_0_r in Hq. now rewrite <- Hn. Qed. Lemma mul_divide_mono_l : forall n m p, (n | m) -> (p * n | p * m). Proof. intros n m p (q,Hq). exists q. now rewrite mul_shuffle3, Hq. Qed. Lemma mul_divide_mono_r : forall n m p, (n | m) -> (n * p | m * p). Proof. intros n m p (q,Hq). exists q. now rewrite mul_assoc, Hq. Qed. Lemma mul_divide_cancel_l : forall n m p, p ~= 0 -> ((p * n | p * m) <-> (n | m)). Proof. intros n m p Hp. split. intros (q,Hq). exists q. now rewrite mul_shuffle3, mul_cancel_l in Hq. apply mul_divide_mono_l. Qed. Lemma mul_divide_cancel_r : forall n m p, p ~= 0 -> ((n * p | m * p) <-> (n | m)). Proof. intros. rewrite 2 (mul_comm _ p). now apply mul_divide_cancel_l. Qed. Lemma divide_add_r : forall n m p, (n | m) -> (n | p) -> (n | m + p). Proof. intros n m p (q,Hq) (r,Hr). exists (q+r). now rewrite mul_add_distr_r, Hq, Hr. Qed. Lemma divide_mul_l : forall n m p, (n | m) -> (n | m * p). Proof. intros n m p (q,Hq). exists (q*p). now rewrite mul_shuffle0, Hq. Qed. Lemma divide_mul_r : forall n m p, (n | p) -> (n | m * p). Proof. intros n m p. rewrite mul_comm. apply divide_mul_l. Qed. Lemma divide_factor_l : forall n m, (n | n * m). Proof. intros. apply divide_mul_l, divide_refl. Qed. Lemma divide_factor_r : forall n m, (n | m * n). Proof. intros. apply divide_mul_r, divide_refl. Qed. Lemma divide_pos_le : forall n m, 0 < m -> (n | m) -> n <= m. Proof. intros n m Hm (q,Hq). destruct (le_gt_cases n 0) as [Hn|Hn]. order. rewrite Hq. destruct (lt_ge_cases q 0) as [Hq'|Hq']. generalize (mul_neg_pos q n Hq' Hn). order. le_elim Hq'. rewrite <- (mul_1_l n) at 1. apply mul_le_mono_pos_r; trivial. now rewrite one_succ, le_succ_l. rewrite <- Hq', mul_0_l in Hq. order. Qed. (** Basic properties of gcd *) Lemma gcd_unique : forall n m p, 0<=p -> (p|n) -> (p|m) -> (forall q, (q|n) -> (q|m) -> (q|p)) -> gcd n m == p. Proof. intros n m p Hp Hn Hm H. apply divide_antisym_nonneg; trivial. apply gcd_nonneg. apply H. apply gcd_divide_l. apply gcd_divide_r. now apply gcd_greatest. Qed. Instance gcd_wd : Proper (eq==>eq==>eq) gcd. Proof. intros x x' Hx y y' Hy. apply gcd_unique. apply gcd_nonneg. rewrite Hx. apply gcd_divide_l. rewrite Hy. apply gcd_divide_r. intro. rewrite Hx, Hy. apply gcd_greatest. Qed. Lemma gcd_divide_iff : forall n m p, (p | gcd n m) <-> (p | n) /\ (p | m). Proof. intros. split. split. transitivity (gcd n m); trivial using gcd_divide_l. transitivity (gcd n m); trivial using gcd_divide_r. intros (H,H'). now apply gcd_greatest. Qed. Lemma gcd_unique_alt : forall n m p, 0<=p -> (forall q, (q|p) <-> (q|n) /\ (q|m)) -> gcd n m == p. Proof. intros n m p Hp H. apply gcd_unique; trivial. apply H. apply divide_refl. apply H. apply divide_refl. intros. apply H. now split. Qed. Lemma gcd_comm : forall n m, gcd n m == gcd m n. Proof. intros. apply gcd_unique_alt; try apply gcd_nonneg. intros. rewrite and_comm. apply gcd_divide_iff. Qed. Lemma gcd_assoc : forall n m p, gcd n (gcd m p) == gcd (gcd n m) p. Proof. intros. apply gcd_unique_alt; try apply gcd_nonneg. intros. now rewrite !gcd_divide_iff, and_assoc. Qed. Lemma gcd_0_l_nonneg : forall n, 0<=n -> gcd 0 n == n. Proof. intros. apply gcd_unique; trivial. apply divide_0_r. apply divide_refl. Qed. Lemma gcd_0_r_nonneg : forall n, 0<=n -> gcd n 0 == n. Proof. intros. now rewrite gcd_comm, gcd_0_l_nonneg. Qed. Lemma gcd_1_l : forall n, gcd 1 n == 1. Proof. intros. apply gcd_unique; trivial using divide_1_l, le_0_1. Qed. Lemma gcd_1_r : forall n, gcd n 1 == 1. Proof. intros. now rewrite gcd_comm, gcd_1_l. Qed. Lemma gcd_diag_nonneg : forall n, 0<=n -> gcd n n == n. Proof. intros. apply gcd_unique; trivial using divide_refl. Qed. Lemma gcd_eq_0_l : forall n m, gcd n m == 0 -> n == 0. Proof. intros. generalize (gcd_divide_l n m). rewrite H. apply divide_0_l. Qed. Lemma gcd_eq_0_r : forall n m, gcd n m == 0 -> m == 0. Proof. intros. apply gcd_eq_0_l with n. now rewrite gcd_comm. Qed. Lemma gcd_eq_0 : forall n m, gcd n m == 0 <-> n == 0 /\ m == 0. Proof. intros. split. split. now apply gcd_eq_0_l with m. now apply gcd_eq_0_r with n. intros (EQ,EQ'). rewrite EQ, EQ'. now apply gcd_0_r_nonneg. Qed. Lemma gcd_mul_diag_l : forall n m, 0<=n -> gcd n (n*m) == n. Proof. intros n m Hn. apply gcd_unique_alt; trivial. intros q. split. split; trivial. now apply divide_mul_l. now destruct 1. Qed. Lemma divide_gcd_iff : forall n m, 0<=n -> ((n|m) <-> gcd n m == n). Proof. intros n m Hn. split. intros (q,Hq). rewrite Hq. rewrite mul_comm. now apply gcd_mul_diag_l. intros EQ. rewrite <- EQ. apply gcd_divide_r. Qed. End NZGcdProp. coq-8.4pl2/theories/Numbers/NatInt/NZMul.v0000640000175000001440000000513512010532755017461 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t. End ZeroSuccPred. Module Type ZeroSuccPredNotation (T:Typ)(Import NZ:ZeroSuccPred T). Notation "0" := zero. Notation S := succ. Notation P := pred. End ZeroSuccPredNotation. Module Type ZeroSuccPred' (T:Typ) := ZeroSuccPred T <+ ZeroSuccPredNotation T. Module Type IsNZDomain (Import E:Eq')(Import NZ:ZeroSuccPred' E). Declare Instance succ_wd : Proper (eq ==> eq) S. Declare Instance pred_wd : Proper (eq ==> eq) P. Axiom pred_succ : forall n, P (S n) == n. Axiom bi_induction : forall A : t -> Prop, Proper (eq==>iff) A -> A 0 -> (forall n, A n <-> A (S n)) -> forall n, A n. End IsNZDomain. (** Axiomatization of some more constants Simply denoting "1" for (S 0) and so on works ok when implementing by nat, but leaves some (N.succ N0) when implementing by N. *) Module Type OneTwo (Import T:Typ). Parameter Inline(20) one two : t. End OneTwo. Module Type OneTwoNotation (T:Typ)(Import NZ:OneTwo T). Notation "1" := one. Notation "2" := two. End OneTwoNotation. Module Type OneTwo' (T:Typ) := OneTwo T <+ OneTwoNotation T. Module Type IsOneTwo (E:Eq')(Z:ZeroSuccPred' E)(O:OneTwo' E). Import E Z O. Axiom one_succ : 1 == S 0. Axiom two_succ : 2 == S 1. End IsOneTwo. Module Type NZDomainSig := EqualityType <+ ZeroSuccPred <+ IsNZDomain <+ OneTwo <+ IsOneTwo. Module Type NZDomainSig' := EqualityType' <+ ZeroSuccPred' <+ IsNZDomain <+ OneTwo' <+ IsOneTwo. (** Axiomatization of basic operations : [+] [-] [*] *) Module Type AddSubMul (Import T:Typ). Parameters Inline add sub mul : t -> t -> t. End AddSubMul. Module Type AddSubMulNotation (T:Typ)(Import NZ:AddSubMul T). Notation "x + y" := (add x y). Notation "x - y" := (sub x y). Notation "x * y" := (mul x y). End AddSubMulNotation. Module Type AddSubMul' (T:Typ) := AddSubMul T <+ AddSubMulNotation T. Module Type IsAddSubMul (Import E:NZDomainSig')(Import NZ:AddSubMul' E). Declare Instance add_wd : Proper (eq ==> eq ==> eq) add. Declare Instance sub_wd : Proper (eq ==> eq ==> eq) sub. Declare Instance mul_wd : Proper (eq ==> eq ==> eq) mul. Axiom add_0_l : forall n, 0 + n == n. Axiom add_succ_l : forall n m, (S n) + m == S (n + m). Axiom sub_0_r : forall n, n - 0 == n. Axiom sub_succ_r : forall n m, n - (S m) == P (n - m). Axiom mul_0_l : forall n, 0 * n == 0. Axiom mul_succ_l : forall n m, S n * m == n * m + m. End IsAddSubMul. Module Type NZBasicFunsSig := NZDomainSig <+ AddSubMul <+ IsAddSubMul. Module Type NZBasicFunsSig' := NZDomainSig' <+ AddSubMul' <+IsAddSubMul. (** Old name for the same interface: *) Module Type NZAxiomsSig := NZBasicFunsSig. Module Type NZAxiomsSig' := NZBasicFunsSig'. (** Axiomatization of order *) Module Type NZOrd := NZDomainSig <+ HasLt <+ HasLe. Module Type NZOrd' := NZDomainSig' <+ HasLt <+ HasLe <+ LtNotation <+ LeNotation <+ LtLeNotation. Module Type IsNZOrd (Import NZ : NZOrd'). Declare Instance lt_wd : Proper (eq ==> eq ==> iff) lt. Axiom lt_eq_cases : forall n m, n <= m <-> n < m \/ n == m. Axiom lt_irrefl : forall n, ~ (n < n). Axiom lt_succ_r : forall n m, n < S m <-> n <= m. End IsNZOrd. (** NB: the compatibility of [le] can be proved later from [lt_wd] and [lt_eq_cases] *) Module Type NZOrdSig := NZOrd <+ IsNZOrd. Module Type NZOrdSig' := NZOrd' <+ IsNZOrd. (** Everything together : *) Module Type NZOrdAxiomsSig <: NZBasicFunsSig <: NZOrdSig := NZOrdSig <+ AddSubMul <+ IsAddSubMul <+ HasMinMax. Module Type NZOrdAxiomsSig' <: NZOrdAxiomsSig := NZOrdSig' <+ AddSubMul' <+ IsAddSubMul <+ HasMinMax. (** Same, plus a comparison function. *) Module Type NZDecOrdSig := NZOrdSig <+ HasCompare. Module Type NZDecOrdSig' := NZOrdSig' <+ HasCompare. Module Type NZDecOrdAxiomsSig := NZOrdAxiomsSig <+ HasCompare. Module Type NZDecOrdAxiomsSig' := NZOrdAxiomsSig' <+ HasCompare. (** A square function *) Module Type NZSquare (Import NZ : NZBasicFunsSig'). Parameter Inline square : t -> t. Axiom square_spec : forall n, square n == n * n. End NZSquare. coq-8.4pl2/theories/Numbers/NatInt/NZBits.v0000640000175000001440000000551412010532755017626 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> bool. Parameters Inline shiftl shiftr land lor ldiff lxor : t -> t -> t. Parameter Inline div2 : t -> t. End Bits. Module Type BitsNotation (Import A : Typ)(Import B : Bits A). Notation "a .[ n ]" := (testbit a n) (at level 5, format "a .[ n ]"). Infix ">>" := shiftr (at level 30, no associativity). Infix "<<" := shiftl (at level 30, no associativity). End BitsNotation. Module Type Bits' (A:Typ) := Bits A <+ BitsNotation A. Module Type NZBitsSpec (Import A : NZOrdAxiomsSig')(Import B : Bits' A). Declare Instance testbit_wd : Proper (eq==>eq==>Logic.eq) testbit. Axiom testbit_odd_0 : forall a, (2*a+1).[0] = true. Axiom testbit_even_0 : forall a, (2*a).[0] = false. Axiom testbit_odd_succ : forall a n, 0<=n -> (2*a+1).[S n] = a.[n]. Axiom testbit_even_succ : forall a n, 0<=n -> (2*a).[S n] = a.[n]. Axiom testbit_neg_r : forall a n, n<0 -> a.[n] = false. Axiom shiftr_spec : forall a n m, 0<=m -> (a >> n).[m] = a.[m+n]. Axiom shiftl_spec_high : forall a n m, 0<=m -> n<=m -> (a << n).[m] = a.[m-n]. Axiom shiftl_spec_low : forall a n m, m (a << n).[m] = false. Axiom land_spec : forall a b n, (land a b).[n] = a.[n] && b.[n]. Axiom lor_spec : forall a b n, (lor a b).[n] = a.[n] || b.[n]. Axiom ldiff_spec : forall a b n, (ldiff a b).[n] = a.[n] && negb b.[n]. Axiom lxor_spec : forall a b n, (lxor a b).[n] = xorb a.[n] b.[n]. Axiom div2_spec : forall a, div2 a == a >> 1. End NZBitsSpec. Module Type NZBits (A:NZOrdAxiomsSig) := Bits A <+ NZBitsSpec A. Module Type NZBits' (A:NZOrdAxiomsSig) := Bits' A <+ NZBitsSpec A. (** In the functor of properties will also be defined: - [setbit : t -> t -> t ] defined as [lor a (1< t -> t ] defined as [ldiff a (1< t], the number with [n] initial true bits, corresponding to [2^n - 1]. - a logical complement [lnot]. For integer numbers it will be a [t->t], doing a swap of all bits, while on natural numbers, it will be a bounded complement [t->t->t], swapping only the first [n] bits. *) (** For the moment, no shared properties about NZ here, since properties and proofs for N and Z are quite different *) coq-8.4pl2/theories/Sets/0000750000175000001440000000000012127276551014376 5ustar notinuserscoq-8.4pl2/theories/Sets/Finite_sets.v0000640000175000001440000000563612010532755017044 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | Empty_is_finite : Finite (Empty_set U) | Union_is_finite : forall A:Ensemble U, Finite A -> forall x:U, ~ In U A x -> Finite (Add U A x). Inductive cardinal : Ensemble U -> nat -> Prop := | card_empty : cardinal (Empty_set U) 0 | card_add : forall (A:Ensemble U) (n:nat), cardinal A n -> forall x:U, ~ In U A x -> cardinal (Add U A x) (S n). End Ensembles_finis. Hint Resolve Empty_is_finite Union_is_finite: sets v62. Hint Resolve card_empty card_add: sets v62. Require Import Constructive_sets. Section Ensembles_finis_facts. Variable U : Type. Lemma cardinal_invert : forall (X:Ensemble U) (p:nat), cardinal U X p -> match p with | O => X = Empty_set U | S n => exists A : _, (exists x : _, X = Add U A x /\ ~ In U A x /\ cardinal U A n) end. Proof. induction 1; simpl; auto. exists A; exists x; auto. Qed. Lemma cardinal_elim : forall (X:Ensemble U) (p:nat), cardinal U X p -> match p with | O => X = Empty_set U | S n => Inhabited U X end. Proof. intros X p C; elim C; simpl; trivial with sets. Qed. End Ensembles_finis_facts. coq-8.4pl2/theories/Sets/Permut.v0000640000175000001440000000555512010532755016044 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* U -> U. Variable cong : U -> U -> Prop. Hypothesis op_comm : forall x y:U, cong (op x y) (op y x). Hypothesis op_ass : forall x y z:U, cong (op (op x y) z) (op x (op y z)). Hypothesis cong_left : forall x y z:U, cong x y -> cong (op x z) (op y z). Hypothesis cong_right : forall x y z:U, cong x y -> cong (op z x) (op z y). Hypothesis cong_trans : forall x y z:U, cong x y -> cong y z -> cong x z. Hypothesis cong_sym : forall x y:U, cong x y -> cong y x. (** Remark. we do not need: [Hypothesis cong_refl : (x:U)(cong x x)]. *) Lemma cong_congr : forall x y z t:U, cong x y -> cong z t -> cong (op x z) (op y t). Proof. intros; apply cong_trans with (op y z). apply cong_left; trivial. apply cong_right; trivial. Qed. Lemma comm_right : forall x y z:U, cong (op x (op y z)) (op x (op z y)). Proof. intros; apply cong_right; apply op_comm. Qed. Lemma comm_left : forall x y z:U, cong (op (op x y) z) (op (op y x) z). Proof. intros; apply cong_left; apply op_comm. Qed. Lemma perm_right : forall x y z:U, cong (op (op x y) z) (op (op x z) y). Proof. intros. apply cong_trans with (op x (op y z)). apply op_ass. apply cong_trans with (op x (op z y)). apply cong_right; apply op_comm. apply cong_sym; apply op_ass. Qed. Lemma perm_left : forall x y z:U, cong (op x (op y z)) (op y (op x z)). Proof. intros. apply cong_trans with (op (op x y) z). apply cong_sym; apply op_ass. apply cong_trans with (op (op y x) z). apply cong_left; apply op_comm. apply op_ass. Qed. Lemma op_rotate : forall x y z t:U, cong (op x (op y z)) (op z (op x y)). Proof. intros; apply cong_trans with (op (op x y) z). apply cong_sym; apply op_ass. apply op_comm. Qed. (** Needed for treesort ... *) Lemma twist : forall x y z t:U, cong (op x (op (op y z) t)) (op (op y (op x t)) z). Proof. intros. apply cong_trans with (op x (op (op y t) z)). apply cong_right; apply perm_right. apply cong_trans with (op (op x (op y t)) z). apply cong_sym; apply op_ass. apply cong_left; apply perm_left. Qed. End Axiomatisation. coq-8.4pl2/theories/Sets/Ensembles.v0000640000175000001440000000766012010532755016504 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop. Definition In (A:Ensemble) (x:U) : Prop := A x. Definition Included (B C:Ensemble) : Prop := forall x:U, In B x -> In C x. Inductive Empty_set : Ensemble :=. Inductive Full_set : Ensemble := Full_intro : forall x:U, In Full_set x. (** NB: The following definition builds-in equality of elements in [U] as Leibniz equality. This may have to be changed if we replace [U] by a Setoid on [U] with its own equality [eqs], with [In_singleton: (y: U)(eqs x y) -> (In (Singleton x) y)]. *) Inductive Singleton (x:U) : Ensemble := In_singleton : In (Singleton x) x. Inductive Union (B C:Ensemble) : Ensemble := | Union_introl : forall x:U, In B x -> In (Union B C) x | Union_intror : forall x:U, In C x -> In (Union B C) x. Definition Add (B:Ensemble) (x:U) : Ensemble := Union B (Singleton x). Inductive Intersection (B C:Ensemble) : Ensemble := Intersection_intro : forall x:U, In B x -> In C x -> In (Intersection B C) x. Inductive Couple (x y:U) : Ensemble := | Couple_l : In (Couple x y) x | Couple_r : In (Couple x y) y. Inductive Triple (x y z:U) : Ensemble := | Triple_l : In (Triple x y z) x | Triple_m : In (Triple x y z) y | Triple_r : In (Triple x y z) z. Definition Complement (A:Ensemble) : Ensemble := fun x:U => ~ In A x. Definition Setminus (B C:Ensemble) : Ensemble := fun x:U => In B x /\ ~ In C x. Definition Subtract (B:Ensemble) (x:U) : Ensemble := Setminus B (Singleton x). Inductive Disjoint (B C:Ensemble) : Prop := Disjoint_intro : (forall x:U, ~ In (Intersection B C) x) -> Disjoint B C. Inductive Inhabited (B:Ensemble) : Prop := Inhabited_intro : forall x:U, In B x -> Inhabited B. Definition Strict_Included (B C:Ensemble) : Prop := Included B C /\ B <> C. Definition Same_set (B C:Ensemble) : Prop := Included B C /\ Included C B. (** Extensionality Axiom *) Axiom Extensionality_Ensembles : forall A B:Ensemble, Same_set A B -> A = B. End Ensembles. Hint Unfold In Included Same_set Strict_Included Add Setminus Subtract: sets v62. Hint Resolve Union_introl Union_intror Intersection_intro In_singleton Couple_l Couple_r Triple_l Triple_m Triple_r Disjoint_intro Extensionality_Ensembles: sets v62. coq-8.4pl2/theories/Sets/vo.itarget0000640000175000001440000000053411307752066016405 0ustar notinusersClassical_sets.vo Constructive_sets.vo Cpo.vo Ensembles.vo Finite_sets_facts.vo Finite_sets.vo Image.vo Infinite_sets.vo Integers.vo Multiset.vo Partial_Order.vo Permut.vo Powerset_Classical_facts.vo Powerset_facts.vo Powerset.vo Relations_1_facts.vo Relations_1.vo Relations_2_facts.vo Relations_2.vo Relations_3_facts.vo Relations_3.vo Uniset.vo coq-8.4pl2/theories/Sets/Integers.v0000640000175000001440000001232012010532755016334 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* exists m : nat, Upper_Bound nat nat_po X m. Proof. intros X H'; elim H'. exists 0. apply Upper_Bound_definition. unfold nat_po. simpl. apply triv_nat. intros y H'0; elim H'0; auto with sets arith. intros A H'0 H'1 x H'2; try assumption. elim H'1; intros x0 H'3; clear H'1. elim le_total_order. simpl. intro H'1; try assumption. lapply H'1; [ intro H'4; idtac | try assumption ]; auto with sets arith. generalize (H'4 x0 x). clear H'4. clear H'1. intro H'1; lapply H'1; [ intro H'4; elim H'4; [ intro H'5; try exact H'5; clear H'4 H'1 | intro H'5; clear H'4 H'1 ] | clear H'1 ]. exists x. apply Upper_Bound_definition. simpl. apply triv_nat. intros y H'1; elim H'1. generalize le_trans. intro H'4; red in H'4. intros x1 H'6; try assumption. apply H'4 with (y := x0). elim H'3; simpl; auto with sets arith. trivial. intros x1 H'4; elim H'4. unfold nat_po; simpl; trivial. exists x0. apply Upper_Bound_definition. unfold nat_po. simpl. apply triv_nat. intros y H'1; elim H'1. intros x1 H'4; try assumption. elim H'3; simpl; auto with sets arith. intros x1 H'4; elim H'4; auto with sets arith. red. intros x1 H'1; elim H'1; apply triv_nat. Qed. Lemma Integers_has_no_ub : ~ (exists m : nat, Upper_Bound nat nat_po Integers m). Proof. red; intro H'; elim H'. intros x H'0. elim H'0; intros H'1 H'2. cut (In nat Integers (S x)). intro H'3. specialize H'2 with (y := S x); lapply H'2; [ intro H'5; clear H'2 | try assumption; clear H'2 ]. simpl in H'5. absurd (S x <= x); auto with arith. apply triv_nat. Qed. Lemma Integers_infinite : ~ Finite nat Integers. Proof. generalize Integers_has_no_ub. intro H'; red; intro H'0; try exact H'0. apply H'. apply Finite_subset_has_lub; auto with sets arith. Qed. End Integers_sect. coq-8.4pl2/theories/Sets/Relations_1.v0000640000175000001440000000552612010532755016746 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* U -> Prop. Variable R : Relation. Definition Reflexive : Prop := forall x:U, R x x. Definition Transitive : Prop := forall x y z:U, R x y -> R y z -> R x z. Definition Symmetric : Prop := forall x y:U, R x y -> R y x. Definition Antisymmetric : Prop := forall x y:U, R x y -> R y x -> x = y. Definition contains (R R':Relation) : Prop := forall x y:U, R' x y -> R x y. Definition same_relation (R R':Relation) : Prop := contains R R' /\ contains R' R. Inductive Preorder : Prop := Definition_of_preorder : Reflexive -> Transitive -> Preorder. Inductive Order : Prop := Definition_of_order : Reflexive -> Transitive -> Antisymmetric -> Order. Inductive Equivalence : Prop := Definition_of_equivalence : Reflexive -> Transitive -> Symmetric -> Equivalence. Inductive PER : Prop := Definition_of_PER : Symmetric -> Transitive -> PER. End Relations_1. Hint Unfold Reflexive Transitive Antisymmetric Symmetric contains same_relation: sets v62. Hint Resolve Definition_of_preorder Definition_of_order Definition_of_equivalence Definition_of_PER: sets v62. coq-8.4pl2/theories/Sets/Relations_2.v0000640000175000001440000000465112010532755016745 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Prop := | Rstar_0 : Rstar x x | Rstar_n : forall y z:U, R x y -> Rstar y z -> Rstar x z. Inductive Rstar1 (x:U) : U -> Prop := | Rstar1_0 : Rstar1 x x | Rstar1_1 : forall y:U, R x y -> Rstar1 x y | Rstar1_n : forall y z:U, Rstar1 x y -> Rstar1 y z -> Rstar1 x z. Inductive Rplus (x:U) : U -> Prop := | Rplus_0 : forall y:U, R x y -> Rplus x y | Rplus_n : forall y z:U, R x y -> Rplus y z -> Rplus x z. Definition Strongly_confluent : Prop := forall x a b:U, R x a -> R x b -> ex (fun z:U => R a z /\ R b z). End Relations_2. Hint Resolve Rstar_0: sets v62. Hint Resolve Rstar1_0: sets v62. Hint Resolve Rstar1_1: sets v62. Hint Resolve Rplus_0: sets v62. coq-8.4pl2/theories/Sets/Cpo.v0000640000175000001440000001013612010532755015300 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (forall y:U, In U B y -> R y x) -> Upper_Bound B x. Inductive Lower_Bound (B:Ensemble U) (x:U) : Prop := Lower_Bound_definition : In U C x -> (forall y:U, In U B y -> R x y) -> Lower_Bound B x. Inductive Lub (B:Ensemble U) (x:U) : Prop := Lub_definition : Upper_Bound B x -> (forall y:U, Upper_Bound B y -> R x y) -> Lub B x. Inductive Glb (B:Ensemble U) (x:U) : Prop := Glb_definition : Lower_Bound B x -> (forall y:U, Lower_Bound B y -> R y x) -> Glb B x. Inductive Bottom (bot:U) : Prop := Bottom_definition : In U C bot -> (forall y:U, In U C y -> R bot y) -> Bottom bot. Inductive Totally_ordered (B:Ensemble U) : Prop := Totally_ordered_definition : (Included U B C -> forall x y:U, Included U (Couple U x y) B -> R x y \/ R y x) -> Totally_ordered B. Definition Compatible : Relation U := fun x y:U => In U C x -> In U C y -> exists z : _, In U C z /\ Upper_Bound (Couple U x y) z. Inductive Directed (X:Ensemble U) : Prop := Definition_of_Directed : Included U X C -> Inhabited U X -> (forall x1 x2:U, Included U (Couple U x1 x2) X -> exists x3 : _, In U X x3 /\ Upper_Bound (Couple U x1 x2) x3) -> Directed X. Inductive Complete : Prop := Definition_of_Complete : (exists bot : _, Bottom bot) -> (forall X:Ensemble U, Directed X -> exists bsup : _, Lub X bsup) -> Complete. Inductive Conditionally_complete : Prop := Definition_of_Conditionally_complete : (forall X:Ensemble U, Included U X C -> (exists maj : _, Upper_Bound X maj) -> exists bsup : _, Lub X bsup) -> Conditionally_complete. End Bounds. Hint Resolve Totally_ordered_definition Upper_Bound_definition Lower_Bound_definition Lub_definition Glb_definition Bottom_definition Definition_of_Complete Definition_of_Complete Definition_of_Conditionally_complete. Section Specific_orders. Variable U : Type. Record Cpo : Type := Definition_of_cpo {PO_of_cpo : PO U; Cpo_cond : Complete U PO_of_cpo}. Record Chain : Type := Definition_of_chain {PO_of_chain : PO U; Chain_cond : Totally_ordered U PO_of_chain (Carrier_of U PO_of_chain)}. End Specific_orders. coq-8.4pl2/theories/Sets/Relations_3.v0000640000175000001440000000517012010532755016743 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R x z -> coherent y z. Definition Locally_confluent : Prop := forall x:U, locally_confluent x. Definition confluent (x:U) : Prop := forall y z:U, Rstar U R x y -> Rstar U R x z -> coherent y z. Definition Confluent : Prop := forall x:U, confluent x. Inductive noetherian (x: U) : Prop := definition_of_noetherian : (forall y:U, R x y -> noetherian y) -> noetherian x. Definition Noetherian : Prop := forall x:U, noetherian x. End Relations_3. Hint Unfold coherent: sets v62. Hint Unfold locally_confluent: sets v62. Hint Unfold confluent: sets v62. Hint Unfold Confluent: sets v62. Hint Resolve definition_of_noetherian: sets v62. Hint Unfold Noetherian: sets v62. coq-8.4pl2/theories/Sets/Image.v0000640000175000001440000001602012010532755015577 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* V) : Ensemble V := Im_intro : forall x:U, In _ X x -> forall y:V, y = f x -> In _ (Im X f) y. Lemma Im_def : forall (X:Ensemble U) (f:U -> V) (x:U), In _ X x -> In _ (Im X f) (f x). Proof. intros X f x H'; try assumption. apply Im_intro with (x := x); auto with sets. Qed. Lemma Im_add : forall (X:Ensemble U) (x:U) (f:U -> V), Im (Add _ X x) f = Add _ (Im X f) (f x). Proof. intros X x f. apply Extensionality_Ensembles. split; red; intros x0 H'. elim H'; intros. rewrite H0. elim Add_inv with U X x x1; auto using Im_def with sets. destruct 1; auto using Im_def with sets. elim Add_inv with V (Im X f) (f x) x0. destruct 1 as [x0 H y H0]. rewrite H0; auto using Im_def with sets. destruct 1; auto using Im_def with sets. trivial. Qed. Lemma image_empty : forall f:U -> V, Im (Empty_set U) f = Empty_set V. Proof. intro f; try assumption. apply Extensionality_Ensembles. split; auto with sets. red. intros x H'; elim H'. intros x0 H'0; elim H'0; auto with sets. Qed. Lemma finite_image : forall (X:Ensemble U) (f:U -> V), Finite _ X -> Finite _ (Im X f). Proof. intros X f H'; elim H'. rewrite (image_empty f); auto with sets. intros A H'0 H'1 x H'2; clear H' X. rewrite (Im_add A x f); auto with sets. apply Add_preserves_Finite; auto with sets. Qed. Lemma Im_inv : forall (X:Ensemble U) (f:U -> V) (y:V), In _ (Im X f) y -> exists x : U, In _ X x /\ f x = y. Proof. intros X f y H'; elim H'. intros x H'0 y0 H'1; rewrite H'1. exists x; auto with sets. Qed. Definition injective (f:U -> V) := forall x y:U, f x = f y -> x = y. Lemma not_injective_elim : forall f:U -> V, ~ injective f -> exists x : _, (exists y : _, f x = f y /\ x <> y). Proof. unfold injective; intros f H. cut (exists x : _, ~ (forall y:U, f x = f y -> x = y)). 2: apply not_all_ex_not with (P := fun x:U => forall y:U, f x = f y -> x = y); trivial with sets. destruct 1 as [x C]; exists x. cut (exists y : _, ~ (f x = f y -> x = y)). 2: apply not_all_ex_not with (P := fun y:U => f x = f y -> x = y); trivial with sets. destruct 1 as [y D]; exists y. apply imply_to_and; trivial with sets. Qed. Lemma cardinal_Im_intro : forall (A:Ensemble U) (f:U -> V) (n:nat), cardinal _ A n -> exists p : nat, cardinal _ (Im A f) p. Proof. intros. apply finite_cardinal; apply finite_image. apply cardinal_finite with n; trivial with sets. Qed. Lemma In_Image_elim : forall (A:Ensemble U) (f:U -> V), injective f -> forall x:U, In _ (Im A f) (f x) -> In _ A x. Proof. intros. elim Im_inv with A f (f x); trivial with sets. intros z C; elim C; intros InAz E. elim (H z x E); trivial with sets. Qed. Lemma injective_preserves_cardinal : forall (A:Ensemble U) (f:U -> V) (n:nat), injective f -> cardinal _ A n -> forall n':nat, cardinal _ (Im A f) n' -> n' = n. Proof. induction 2 as [| A n H'0 H'1 x H'2]; auto with sets. rewrite (image_empty f). intros n' CE. apply cardinal_unicity with V (Empty_set V); auto with sets. intro n'. rewrite (Im_add A x f). intro H'3. elim cardinal_Im_intro with A f n; trivial with sets. intros i CI. lapply (H'1 i); trivial with sets. cut (~ In _ (Im A f) (f x)). intros H0 H1. apply cardinal_unicity with V (Add _ (Im A f) (f x)); trivial with sets. apply card_add; auto with sets. rewrite <- H1; trivial with sets. red; intro; apply H'2. apply In_Image_elim with f; trivial with sets. Qed. Lemma cardinal_decreases : forall (A:Ensemble U) (f:U -> V) (n:nat), cardinal U A n -> forall n':nat, cardinal V (Im A f) n' -> n' <= n. Proof. induction 1 as [| A n H'0 H'1 x H'2]; auto with sets. rewrite (image_empty f); intros. cut (n' = 0). intro E; rewrite E; trivial with sets. apply cardinal_unicity with V (Empty_set V); auto with sets. intro n'. rewrite (Im_add A x f). elim cardinal_Im_intro with A f n; trivial with sets. intros p C H'3. apply le_trans with (S p). apply card_Add_gen with V (Im A f) (f x); trivial with sets. apply le_n_S; auto with sets. Qed. Theorem Pigeonhole : forall (A:Ensemble U) (f:U -> V) (n:nat), cardinal U A n -> forall n':nat, cardinal V (Im A f) n' -> n' < n -> ~ injective f. Proof. unfold not; intros A f n CAn n' CIfn' ltn'n I. cut (n' = n). intro E; generalize ltn'n; rewrite E; exact (lt_irrefl n). apply injective_preserves_cardinal with (A := A) (f := f) (n := n); trivial with sets. Qed. Lemma Pigeonhole_principle : forall (A:Ensemble U) (f:U -> V) (n:nat), cardinal _ A n -> forall n':nat, cardinal _ (Im A f) n' -> n' < n -> exists x : _, (exists y : _, f x = f y /\ x <> y). Proof. intros; apply not_injective_elim. apply Pigeonhole with A n n'; trivial with sets. Qed. End Image. Hint Resolve Im_def image_empty finite_image: sets v62. coq-8.4pl2/theories/Sets/intro.tex0000750000175000001440000000133507265050405016253 0ustar notinusers\section{Sets}\label{Sets} This is a library on sets defined by their characteristic predicate. It contains the following modules: \begin{itemize} \item {\tt Ensembles.v} \item {\tt Constructive\_sets.v}, {\tt Classical\_sets.v} \item {\tt Relations\_1.v}, {\tt Relations\_2.v}, {\tt Relations\_3.v}, {\tt Relations\_1\_facts.v}, \\ {\tt Relations\_2\_facts.v}, {\tt Relations\_3\_facts.v} \item {\tt Partial\_Order.v}, {\tt Cpo.v} \item {\tt Powerset.v}, {\tt Powerset\_facts.v}, {\tt Powerset\_Classical\_facts.v} \item {\tt Finite\_sets.v}, {\tt Finite\_sets\_facts.v} \item {\tt Image.v} \item {\tt Infinite\_sets.v} \item {\tt Integers.v} \end{itemize} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: coq-8.4pl2/theories/Sets/Constructive_sets.v0000640000175000001440000001153212010532755020306 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Same_set U B C. Proof. intros B C H'; rewrite H'; auto with sets. Qed. Lemma Noone_in_empty : forall x:U, ~ In U (Empty_set U) x. Proof. red; destruct 1. Qed. Lemma Included_Empty : forall A:Ensemble U, Included U (Empty_set U) A. Proof. intro; red. intros x H; elim (Noone_in_empty x); auto with sets. Qed. Lemma Add_intro1 : forall (A:Ensemble U) (x y:U), In U A y -> In U (Add U A x) y. Proof. unfold Add at 1; auto with sets. Qed. Lemma Add_intro2 : forall (A:Ensemble U) (x:U), In U (Add U A x) x. Proof. unfold Add at 1; auto with sets. Qed. Lemma Inhabited_add : forall (A:Ensemble U) (x:U), Inhabited U (Add U A x). Proof. intros A x. apply Inhabited_intro with (x := x); auto using Add_intro2 with sets. Qed. Lemma Inhabited_not_empty : forall X:Ensemble U, Inhabited U X -> X <> Empty_set U. Proof. intros X H'; elim H'. intros x H'0; red; intro H'1. absurd (In U X x); auto with sets. rewrite H'1; auto using Noone_in_empty with sets. Qed. Lemma Add_not_Empty : forall (A:Ensemble U) (x:U), Add U A x <> Empty_set U. Proof. intros A x; apply Inhabited_not_empty; apply Inhabited_add. Qed. Lemma not_Empty_Add : forall (A:Ensemble U) (x:U), Empty_set U <> Add U A x. Proof. intros; red; intro H; generalize (Add_not_Empty A x); auto with sets. Qed. Lemma Singleton_inv : forall x y:U, In U (Singleton U x) y -> x = y. Proof. intros x y H'; elim H'; trivial with sets. Qed. Lemma Singleton_intro : forall x y:U, x = y -> In U (Singleton U x) y. Proof. intros x y H'; rewrite H'; trivial with sets. Qed. Lemma Union_inv : forall (B C:Ensemble U) (x:U), In U (Union U B C) x -> In U B x \/ In U C x. Proof. intros B C x H'; elim H'; auto with sets. Qed. Lemma Add_inv : forall (A:Ensemble U) (x y:U), In U (Add U A x) y -> In U A y \/ x = y. Proof. intros A x y H'; induction H'. left; assumption. right; apply Singleton_inv; assumption. Qed. Lemma Intersection_inv : forall (B C:Ensemble U) (x:U), In U (Intersection U B C) x -> In U B x /\ In U C x. Proof. intros B C x H'; elim H'; auto with sets. Qed. Lemma Couple_inv : forall x y z:U, In U (Couple U x y) z -> z = x \/ z = y. Proof. intros x y z H'; elim H'; auto with sets. Qed. Lemma Setminus_intro : forall (A B:Ensemble U) (x:U), In U A x -> ~ In U B x -> In U (Setminus U A B) x. Proof. unfold Setminus at 1; red; auto with sets. Qed. Lemma Strict_Included_intro : forall X Y:Ensemble U, Included U X Y /\ X <> Y -> Strict_Included U X Y. Proof. auto with sets. Qed. Lemma Strict_Included_strict : forall X:Ensemble U, ~ Strict_Included U X X. Proof. intro X; red; intro H'; elim H'. intros H'0 H'1; elim H'1; auto with sets. Qed. End Ensembles_facts. Hint Resolve Singleton_inv Singleton_intro Add_intro1 Add_intro2 Intersection_inv Couple_inv Setminus_intro Strict_Included_intro Strict_Included_strict Noone_in_empty Inhabited_not_empty Add_not_Empty not_Empty_Add Inhabited_add Included_Empty: sets v62. coq-8.4pl2/theories/Sets/Partial_Order.v0000640000175000001440000000704112010532755017307 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Rel_of p x y /\ x <> y. Inductive covers (y x:U) : Prop := Definition_of_covers : Strict_Rel_of x y -> ~ (exists z : _, Strict_Rel_of x z /\ Strict_Rel_of z y) -> covers y x. End Partial_orders. Hint Unfold Carrier_of Rel_of Strict_Rel_of: sets v62. Hint Resolve Definition_of_covers: sets v62. Section Partial_order_facts. Variable U : Type. Variable D : PO U. Lemma Strict_Rel_Transitive_with_Rel : forall x y z:U, Strict_Rel_of U D x y -> Rel_of U D y z -> Strict_Rel_of U D x z. Proof. unfold Strict_Rel_of at 1. red. elim D; simpl. intros C R H' H'0; elim H'0. intros H'1 H'2 H'3 x y z H'4 H'5; split. apply H'2 with (y := y); tauto. red; intro H'6. elim H'4; intros H'7 H'8; apply H'8; clear H'4. apply H'3; auto. rewrite H'6; tauto. Qed. Lemma Strict_Rel_Transitive_with_Rel_left : forall x y z:U, Rel_of U D x y -> Strict_Rel_of U D y z -> Strict_Rel_of U D x z. Proof. unfold Strict_Rel_of at 1. red. elim D; simpl. intros C R H' H'0; elim H'0. intros H'1 H'2 H'3 x y z H'4 H'5; split. apply H'2 with (y := y); tauto. red; intro H'6. elim H'5; intros H'7 H'8; apply H'8; clear H'5. apply H'3; auto. rewrite <- H'6; auto. Qed. Lemma Strict_Rel_Transitive : Transitive U (Strict_Rel_of U D). red. intros x y z H' H'0. apply Strict_Rel_Transitive_with_Rel with (y := y); [ intuition | unfold Strict_Rel_of in H', H'0; intuition ]. Qed. End Partial_order_facts. coq-8.4pl2/theories/Sets/Powerset_facts.v0000640000175000001440000002046512010532755017555 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* X = Empty_set U. Proof. auto with sets. Qed. Theorem Union_commutative : forall A B:Ensemble U, Union U A B = Union U B A. Proof. auto with sets. Qed. Theorem Union_associative : forall A B C:Ensemble U, Union U (Union U A B) C = Union U A (Union U B C). Proof. auto 9 with sets. Qed. Theorem Union_idempotent : forall A:Ensemble U, Union U A A = A. Proof. auto 7 with sets. Qed. Lemma Union_absorbs : forall A B:Ensemble U, Included U B A -> Union U A B = A. Proof. auto 7 with sets. Qed. Theorem Couple_as_union : forall x y:U, Union U (Singleton U x) (Singleton U y) = Couple U x y. Proof. intros x y; apply Extensionality_Ensembles; split; red. intros x0 H'; elim H'; (intros x1 H'0; elim H'0; auto with sets). intros x0 H'; elim H'; auto with sets. Qed. Theorem Triple_as_union : forall x y z:U, Union U (Union U (Singleton U x) (Singleton U y)) (Singleton U z) = Triple U x y z. Proof. intros x y z; apply Extensionality_Ensembles; split; red. intros x0 H'; elim H'. intros x1 H'0; elim H'0; (intros x2 H'1; elim H'1; auto with sets). intros x1 H'0; elim H'0; auto with sets. intros x0 H'; elim H'; auto with sets. Qed. Theorem Triple_as_Couple : forall x y:U, Couple U x y = Triple U x x y. Proof. intros x y. rewrite <- (Couple_as_union x y). rewrite <- (Union_idempotent (Singleton U x)). apply Triple_as_union. Qed. Theorem Triple_as_Couple_Singleton : forall x y z:U, Triple U x y z = Union U (Couple U x y) (Singleton U z). Proof. intros x y z. rewrite <- (Triple_as_union x y z). rewrite <- (Couple_as_union x y); auto with sets. Qed. Theorem Intersection_commutative : forall A B:Ensemble U, Intersection U A B = Intersection U B A. Proof. intros A B. apply Extensionality_Ensembles. split; red; intros x H'; elim H'; auto with sets. Qed. Theorem Distributivity : forall A B C:Ensemble U, Intersection U A (Union U B C) = Union U (Intersection U A B) (Intersection U A C). Proof. intros A B C. apply Extensionality_Ensembles. split; red; intros x H'. elim H'. intros x0 H'0 H'1; generalize H'0. elim H'1; auto with sets. elim H'; intros x0 H'0; elim H'0; auto with sets. Qed. Theorem Distributivity' : forall A B C:Ensemble U, Union U A (Intersection U B C) = Intersection U (Union U A B) (Union U A C). Proof. intros A B C. apply Extensionality_Ensembles. split; red; intros x H'. elim H'; auto with sets. intros x0 H'0; elim H'0; auto with sets. elim H'. intros x0 H'0; elim H'0; auto with sets. intros x1 H'1 H'2; try exact H'2. generalize H'1. elim H'2; auto with sets. Qed. Theorem Union_add : forall (A B:Ensemble U) (x:U), Add U (Union U A B) x = Union U A (Add U B x). Proof. unfold Add; auto using Union_associative with sets. Qed. Theorem Non_disjoint_union : forall (X:Ensemble U) (x:U), In U X x -> Add U X x = X. Proof. intros X x H'; unfold Add. apply Extensionality_Ensembles; red. split; red; auto with sets. intros x0 H'0; elim H'0; auto with sets. intros t H'1; elim H'1; auto with sets. Qed. Theorem Non_disjoint_union' : forall (X:Ensemble U) (x:U), ~ In U X x -> Subtract U X x = X. Proof. intros X x H'; unfold Subtract. apply Extensionality_Ensembles. split; red; auto with sets. intros x0 H'0; elim H'0; auto with sets. intros x0 H'0; apply Setminus_intro; auto with sets. red; intro H'1; elim H'1. lapply (Singleton_inv U x x0); auto with sets. intro H'4; apply H'; rewrite H'4; auto with sets. Qed. Lemma singlx : forall x y:U, In U (Add U (Empty_set U) x) y -> x = y. Proof. intro x; rewrite (Empty_set_zero' x); auto with sets. Qed. Lemma incl_add : forall (A B:Ensemble U) (x:U), Included U A B -> Included U (Add U A x) (Add U B x). Proof. intros A B x H'; red; auto with sets. intros x0 H'0. lapply (Add_inv U A x x0); auto with sets. intro H'1; elim H'1; [ intro H'2; clear H'1 | intro H'2; rewrite <- H'2; clear H'1 ]; auto with sets. Qed. Lemma incl_add_x : forall (A B:Ensemble U) (x:U), ~ In U A x -> Included U (Add U A x) (Add U B x) -> Included U A B. Proof. unfold Included. intros A B x H' H'0 x0 H'1. lapply (H'0 x0); auto with sets. intro H'2; lapply (Add_inv U B x x0); auto with sets. intro H'3; elim H'3; [ intro H'4; try exact H'4; clear H'3 | intro H'4; clear H'3 ]. absurd (In U A x0); auto with sets. rewrite <- H'4; auto with sets. Qed. Lemma Add_commutative : forall (A:Ensemble U) (x y:U), Add U (Add U A x) y = Add U (Add U A y) x. Proof. intros A x y. unfold Add. rewrite (Union_associative A (Singleton U x) (Singleton U y)). rewrite (Union_commutative (Singleton U x) (Singleton U y)). rewrite <- (Union_associative A (Singleton U y) (Singleton U x)); auto with sets. Qed. Lemma Add_commutative' : forall (A:Ensemble U) (x y z:U), Add U (Add U (Add U A x) y) z = Add U (Add U (Add U A z) x) y. Proof. intros A x y z. rewrite (Add_commutative (Add U A x) y z). rewrite (Add_commutative A x z); auto with sets. Qed. Lemma Add_distributes : forall (A B:Ensemble U) (x y:U), Included U B A -> Add U (Add U A x) y = Union U (Add U A x) (Add U B y). Proof. intros A B x y H'; try assumption. rewrite <- (Union_add (Add U A x) B y). unfold Add at 4. rewrite (Union_commutative A (Singleton U x)). rewrite Union_associative. rewrite (Union_absorbs A B H'). rewrite (Union_commutative (Singleton U x) A). auto with sets. Qed. Lemma setcover_intro : forall (U:Type) (A x y:Ensemble U), Strict_Included U x y -> ~ (exists z : _, Strict_Included U x z /\ Strict_Included U z y) -> covers (Ensemble U) (Power_set_PO U A) y x. Proof. intros; apply Definition_of_covers; auto with sets. Qed. End Sets_as_an_algebra. Hint Resolve Empty_set_zero Empty_set_zero' Union_associative Union_add singlx incl_add: sets v62. coq-8.4pl2/theories/Sets/Classical_sets.v0000640000175000001440000001074712010532755017523 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Inhabited U A. Proof. intros A NI. elim (not_all_ex_not U (fun x:U => ~ In U A x)). intros x H; apply Inhabited_intro with x. apply NNPP; auto with sets. red; intro. apply NI; red. intros x H'; elim (H x); trivial with sets. Qed. Lemma not_empty_Inhabited : forall A:Ensemble U, A <> Empty_set U -> Inhabited U A. Proof. intros; apply not_included_empty_Inhabited. red; auto with sets. Qed. Lemma Inhabited_Setminus : forall X Y:Ensemble U, Included U X Y -> ~ Included U Y X -> Inhabited U (Setminus U Y X). Proof. intros X Y I NI. elim (not_all_ex_not U (fun x:U => In U Y x -> In U X x) NI). intros x YX. apply Inhabited_intro with x. apply Setminus_intro. apply not_imply_elim with (In U X x); trivial with sets. auto with sets. Qed. Lemma Strict_super_set_contains_new_element : forall X Y:Ensemble U, Included U X Y -> X <> Y -> Inhabited U (Setminus U Y X). Proof. auto 7 using Inhabited_Setminus with sets. Qed. Lemma Subtract_intro : forall (A:Ensemble U) (x y:U), In U A y -> x <> y -> In U (Subtract U A x) y. Proof. unfold Subtract at 1; auto with sets. Qed. Hint Resolve Subtract_intro : sets. Lemma Subtract_inv : forall (A:Ensemble U) (x y:U), In U (Subtract U A x) y -> In U A y /\ x <> y. Proof. intros A x y H'; elim H'; auto with sets. Qed. Lemma Included_Strict_Included : forall X Y:Ensemble U, Included U X Y -> Strict_Included U X Y \/ X = Y. Proof. intros X Y H'; try assumption. elim (classic (X = Y)); auto with sets. Qed. Lemma Strict_Included_inv : forall X Y:Ensemble U, Strict_Included U X Y -> Included U X Y /\ Inhabited U (Setminus U Y X). Proof. intros X Y H'; red in H'. split; [ tauto | idtac ]. elim H'; intros H'0 H'1; try exact H'1; clear H'. apply Strict_super_set_contains_new_element; auto with sets. Qed. Lemma not_SIncl_empty : forall X:Ensemble U, ~ Strict_Included U X (Empty_set U). Proof. intro X; red; intro H'; try exact H'. lapply (Strict_Included_inv X (Empty_set U)); auto with sets. intro H'0; elim H'0; intros H'1 H'2; elim H'2; clear H'0. intros x H'0; elim H'0. intro H'3; elim H'3. Qed. Lemma Complement_Complement : forall A:Ensemble U, Complement U (Complement U A) = A. Proof. unfold Complement; intros; apply Extensionality_Ensembles; auto with sets. red; split; auto with sets. red; intros; apply NNPP; auto with sets. Qed. End Ensembles_classical. Hint Resolve Strict_super_set_contains_new_element Subtract_intro not_SIncl_empty: sets v62. coq-8.4pl2/theories/Sets/Finite_sets_facts.v0000640000175000001440000002757312010532755020230 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* exists n : nat, cardinal U X n. Proof. induction 1 as [| A _ [n H]]. exists 0; auto with sets. exists (S n); auto with sets. Qed. Lemma cardinal_finite : forall (X:Ensemble U) (n:nat), cardinal U X n -> Finite U X. Proof. induction 1; auto with sets. Qed. Theorem Add_preserves_Finite : forall (X:Ensemble U) (x:U), Finite U X -> Finite U (Add U X x). Proof. intros X x H'. elim (classic (In U X x)); intro H'0; auto with sets. rewrite (Non_disjoint_union U X x); auto with sets. Qed. Theorem Singleton_is_finite : forall x:U, Finite U (Singleton U x). Proof. intro x; rewrite <- (Empty_set_zero U (Singleton U x)). change (Finite U (Add U (Empty_set U) x)); auto with sets. Qed. Theorem Union_preserves_Finite : forall X Y:Ensemble U, Finite U X -> Finite U Y -> Finite U (Union U X Y). Proof. intros X Y H; induction H as [|A Fin_A Hind x]. rewrite (Empty_set_zero U Y). trivial. intros. rewrite (Union_commutative U (Add U A x) Y). rewrite <- (Union_add U Y A x). rewrite (Union_commutative U Y A). apply Add_preserves_Finite. apply Hind. assumption. Qed. Lemma Finite_downward_closed : forall A:Ensemble U, Finite U A -> forall X:Ensemble U, Included U X A -> Finite U X. Proof. intros A H'; elim H'; auto with sets. intros X H'0. rewrite (less_than_empty U X H'0); auto with sets. intros; elim Included_Add with U X A0 x; auto with sets. destruct 1 as [A' [H5 H6]]. rewrite H5; auto with sets. Qed. Lemma Intersection_preserves_finite : forall A:Ensemble U, Finite U A -> forall X:Ensemble U, Finite U (Intersection U X A). Proof. intros A H' X; apply Finite_downward_closed with A; auto with sets. Qed. Lemma cardinalO_empty : forall X:Ensemble U, cardinal U X 0 -> X = Empty_set U. Proof. intros X H; apply (cardinal_invert U X 0); trivial with sets. Qed. Lemma inh_card_gt_O : forall X:Ensemble U, Inhabited U X -> forall n:nat, cardinal U X n -> n > 0. Proof. induction 1 as [x H']. intros n H'0. elim (gt_O_eq n); auto with sets. intro H'1; generalize H'; generalize H'0. rewrite <- H'1; intro H'2. rewrite (cardinalO_empty X); auto with sets. intro H'3; elim H'3. Qed. Lemma card_soustr_1 : forall (X:Ensemble U) (n:nat), cardinal U X n -> forall x:U, In U X x -> cardinal U (Subtract U X x) (pred n). Proof. intros X n H'; elim H'. intros x H'0; elim H'0. clear H' n X. intros X n H' H'0 x H'1 x0 H'2. elim (classic (In U X x0)). intro H'4; rewrite (add_soustr_xy U X x x0). elim (classic (x = x0)). intro H'5. absurd (In U X x0); auto with sets. rewrite <- H'5; auto with sets. intro H'3; try assumption. cut (S (pred n) = pred (S n)). intro H'5; rewrite <- H'5. apply card_add; auto with sets. red; intro H'6; elim H'6. intros H'7 H'8; try assumption. elim H'1; auto with sets. unfold pred at 2; symmetry . apply S_pred with (m := 0). change (n > 0). apply inh_card_gt_O with (X := X); auto with sets. apply Inhabited_intro with (x := x0); auto with sets. red; intro H'3. apply H'1. elim H'3; auto with sets. rewrite H'3; auto with sets. elim (classic (x = x0)). intro H'3; rewrite <- H'3. cut (Subtract U (Add U X x) x = X); auto with sets. intro H'4; rewrite H'4; auto with sets. intros H'3 H'4; try assumption. absurd (In U (Add U X x) x0); auto with sets. red; intro H'5; try exact H'5. lapply (Add_inv U X x x0); tauto. Qed. Lemma cardinal_is_functional : forall (X:Ensemble U) (c1:nat), cardinal U X c1 -> forall (Y:Ensemble U) (c2:nat), cardinal U Y c2 -> X = Y -> c1 = c2. Proof. intros X c1 H'; elim H'. intros Y c2 H'0; elim H'0; auto with sets. intros A n H'1 H'2 x H'3 H'5. elim (not_Empty_Add U A x); auto with sets. clear H' c1 X. intros X n H' H'0 x H'1 Y c2 H'2. elim H'2. intro H'3. elim (not_Empty_Add U X x); auto with sets. clear H'2 c2 Y. intros X0 c2 H'2 H'3 x0 H'4 H'5. elim (classic (In U X0 x)). intro H'6; apply f_equal. apply H'0 with (Y := Subtract U (Add U X0 x0) x). elimtype (pred (S c2) = c2); auto with sets. apply card_soustr_1; auto with sets. rewrite <- H'5. apply Sub_Add_new; auto with sets. elim (classic (x = x0)). intros H'6 H'7; apply f_equal. apply H'0 with (Y := X0); auto with sets. apply Simplify_add with (x := x); auto with sets. pattern x at 2; rewrite H'6; auto with sets. intros H'6 H'7. absurd (Add U X x = Add U X0 x0); auto with sets. clear H'0 H' H'3 n H'5 H'4 H'2 H'1 c2. red; intro H'. lapply (Extension U (Add U X x) (Add U X0 x0)); auto with sets. clear H'. intro H'; red in H'. elim H'; intros H'0 H'1; red in H'0; clear H' H'1. absurd (In U (Add U X0 x0) x); auto with sets. lapply (Add_inv U X0 x0 x); [ intuition | apply (H'0 x); apply Add_intro2 ]. Qed. Lemma cardinal_Empty : forall m:nat, cardinal U (Empty_set U) m -> 0 = m. Proof. intros m Cm; generalize (cardinal_invert U (Empty_set U) m Cm). elim m; auto with sets. intros; elim H0; intros; elim H1; intros; elim H2; intros. elim (not_Empty_Add U x x0 H3). Qed. Lemma cardinal_unicity : forall (X:Ensemble U) (n:nat), cardinal U X n -> forall m:nat, cardinal U X m -> n = m. Proof. intros; apply cardinal_is_functional with X X; auto with sets. Qed. Lemma card_Add_gen : forall (A:Ensemble U) (x:U) (n n':nat), cardinal U A n -> cardinal U (Add U A x) n' -> n' <= S n. Proof. intros A x n n' H'. elim (classic (In U A x)). intro H'0. rewrite (Non_disjoint_union U A x H'0). intro H'1; cut (n = n'). intro E; rewrite E; auto with sets. apply cardinal_unicity with A; auto with sets. intros H'0 H'1. cut (n' = S n). intro E; rewrite E; auto with sets. apply cardinal_unicity with (Add U A x); auto with sets. Qed. Lemma incl_st_card_lt : forall (X:Ensemble U) (c1:nat), cardinal U X c1 -> forall (Y:Ensemble U) (c2:nat), cardinal U Y c2 -> Strict_Included U X Y -> c2 > c1. Proof. intros X c1 H'; elim H'. intros Y c2 H'0; elim H'0; auto with sets arith. intro H'1. elim (Strict_Included_strict U (Empty_set U)); auto with sets arith. clear H' c1 X. intros X n H' H'0 x H'1 Y c2 H'2. elim H'2. intro H'3; elim (not_SIncl_empty U (Add U X x)); auto with sets arith. clear H'2 c2 Y. intros X0 c2 H'2 H'3 x0 H'4 H'5; elim (classic (In U X0 x)). intro H'6; apply gt_n_S. apply H'0 with (Y := Subtract U (Add U X0 x0) x). elimtype (pred (S c2) = c2); auto with sets arith. apply card_soustr_1; auto with sets arith. apply incl_st_add_soustr; auto with sets arith. elim (classic (x = x0)). intros H'6 H'7; apply gt_n_S. apply H'0 with (Y := X0); auto with sets arith. apply sincl_add_x with (x := x0). rewrite <- H'6; auto with sets arith. pattern x0 at 1; rewrite <- H'6; trivial with sets arith. intros H'6 H'7; red in H'5. elim H'5; intros H'8 H'9; try exact H'8; clear H'5. red in H'8. generalize (H'8 x). intro H'5; lapply H'5; auto with sets arith. intro H; elim Add_inv with U X0 x0 x; auto with sets arith. intro; absurd (In U X0 x); auto with sets arith. intro; absurd (x = x0); auto with sets arith. Qed. Lemma incl_card_le : forall (X Y:Ensemble U) (n m:nat), cardinal U X n -> cardinal U Y m -> Included U X Y -> n <= m. Proof. intros; elim Included_Strict_Included with U X Y; auto with sets arith; intro. cut (m > n); auto with sets arith. apply incl_st_card_lt with (X := X) (Y := Y); auto with sets arith. generalize H0; rewrite <- H2; intro. cut (n = m). intro E; rewrite E; auto with sets arith. apply cardinal_unicity with X; auto with sets arith. Qed. Lemma G_aux : forall P:Ensemble U -> Prop, (forall X:Ensemble U, Finite U X -> (forall Y:Ensemble U, Strict_Included U Y X -> P Y) -> P X) -> P (Empty_set U). Proof. intros P H'; try assumption. apply H'; auto with sets. clear H'; auto with sets. intros Y H'; try assumption. red in H'. elim H'; intros H'0 H'1; try exact H'1; clear H'. lapply (less_than_empty U Y); [ intro H'3; try exact H'3 | assumption ]. elim H'1; auto with sets. Qed. Lemma Generalized_induction_on_finite_sets : forall P:Ensemble U -> Prop, (forall X:Ensemble U, Finite U X -> (forall Y:Ensemble U, Strict_Included U Y X -> P Y) -> P X) -> forall X:Ensemble U, Finite U X -> P X. Proof. intros P H'0 X H'1. generalize P H'0; clear H'0 P. elim H'1. intros P H'0. apply G_aux; auto with sets. clear H'1 X. intros A H' H'0 x H'1 P H'3. cut (forall Y:Ensemble U, Included U Y (Add U A x) -> P Y); auto with sets. generalize H'1. apply H'0. intros X K H'5 L Y H'6; apply H'3; auto with sets. apply Finite_downward_closed with (A := Add U X x); auto with sets. intros Y0 H'7. elim (Strict_inclusion_is_transitive_with_inclusion U Y0 Y (Add U X x)); auto with sets. intros H'2 H'4. elim (Included_Add U Y0 X x); [ intro H'14 | intro H'14; elim H'14; intros A' E; elim E; intros H'15 H'16; clear E H'14 | idtac ]; auto with sets. elim (Included_Strict_Included U Y0 X); auto with sets. intro H'9; apply H'5 with (Y := Y0); auto with sets. intro H'9; rewrite H'9. apply H'3; auto with sets. intros Y1 H'8; elim H'8. intros H'10 H'11; apply H'5 with (Y := Y1); auto with sets. elim (Included_Strict_Included U A' X); auto with sets. intro H'8; apply H'5 with (Y := A'); auto with sets. rewrite <- H'15; auto with sets. intro H'8. elim H'7. intros H'9 H'10; apply H'10 || elim H'10; try assumption. generalize H'6. rewrite <- H'8. rewrite <- H'15; auto with sets. Qed. End Finite_sets_facts. coq-8.4pl2/theories/Sets/Relations_1_facts.v0000640000175000001440000000770012010532755020122 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ~ R x y. Theorem Rsym_imp_notRsym : forall (U:Type) (R:Relation U), Symmetric U R -> Symmetric U (Complement U R). Proof. unfold Symmetric, Complement. intros U R H' x y H'0; red; intro H'1; apply H'0; auto with sets. Qed. Theorem Equiv_from_preorder : forall (U:Type) (R:Relation U), Preorder U R -> Equivalence U (fun x y:U => R x y /\ R y x). Proof. intros U R H'; elim H'; intros H'0 H'1. apply Definition_of_equivalence. red in H'0; auto 10 with sets. 2: red; intros x y h; elim h; intros H'3 H'4; auto 10 with sets. red in H'1; red; auto 10 with sets. intros x y z h; elim h; intros H'3 H'4; clear h. intro h; elim h; intros H'5 H'6; clear h. split; apply H'1 with y; auto 10 with sets. Qed. Hint Resolve Equiv_from_preorder. Theorem Equiv_from_order : forall (U:Type) (R:Relation U), Order U R -> Equivalence U (fun x y:U => R x y /\ R y x). Proof. intros U R H'; elim H'; auto 10 with sets. Qed. Hint Resolve Equiv_from_order. Theorem contains_is_preorder : forall U:Type, Preorder (Relation U) (contains U). Proof. auto 10 with sets. Qed. Hint Resolve contains_is_preorder. Theorem same_relation_is_equivalence : forall U:Type, Equivalence (Relation U) (same_relation U). Proof. unfold same_relation at 1; auto 10 with sets. Qed. Hint Resolve same_relation_is_equivalence. Theorem cong_reflexive_same_relation : forall (U:Type) (R R':Relation U), same_relation U R R' -> Reflexive U R -> Reflexive U R'. Proof. unfold same_relation; intuition. Qed. Theorem cong_symmetric_same_relation : forall (U:Type) (R R':Relation U), same_relation U R R' -> Symmetric U R -> Symmetric U R'. Proof. compute; intros; elim H; intros; clear H; apply (H3 y x (H0 x y (H2 x y H1))). (*Intuition.*) Qed. Theorem cong_antisymmetric_same_relation : forall (U:Type) (R R':Relation U), same_relation U R R' -> Antisymmetric U R -> Antisymmetric U R'. Proof. compute; intros; elim H; intros; clear H; apply (H0 x y (H3 x y H1) (H3 y x H2)). (*Intuition.*) Qed. Theorem cong_transitive_same_relation : forall (U:Type) (R R':Relation U), same_relation U R R' -> Transitive U R -> Transitive U R'. Proof. intros U R R' H' H'0; red. elim H'. intros H'1 H'2 x y z H'3 H'4; apply H'2. apply H'0 with y; auto with sets. Qed. coq-8.4pl2/theories/Sets/Relations_2_facts.v0000640000175000001440000001264512010532755020127 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x = y \/ (exists u : _, R x u /\ Rstar U R u y). Proof. intros U R x y H'; elim H'; auto with sets. intros x0 y0 z H'0 H'1 H'2; right; exists y0; auto with sets. Qed. Theorem Rstar_equiv_Rstar1 : forall (U:Type) (R:Relation U), same_relation U (Rstar U R) (Rstar1 U R). Proof. generalize Rstar_contains_R; intro T; red in T. intros U R; unfold same_relation, contains. split; intros x y H'; elim H'; auto with sets. generalize Rstar_transitive; intro T1; red in T1. intros x0 y0 z H'0 H'1 H'2 H'3; apply T1 with y0; auto with sets. intros x0 y0 z H'0 H'1 H'2; apply Rstar1_n with y0; auto with sets. Qed. Theorem Rsym_imp_Rstarsym : forall (U:Type) (R:Relation U), Symmetric U R -> Symmetric U (Rstar U R). Proof. intros U R H'; red. intros x y H'0; elim H'0; auto with sets. intros x0 y0 z H'1 H'2 H'3. generalize Rstar_transitive; intro T1; red in T1. apply T1 with y0; auto with sets. apply Rstar_n with x0; auto with sets. Qed. Theorem Sstar_contains_Rstar : forall (U:Type) (R S:Relation U), contains U (Rstar U S) R -> contains U (Rstar U S) (Rstar U R). Proof. unfold contains. intros U R S H' x y H'0; elim H'0; auto with sets. generalize Rstar_transitive; intro T1; red in T1. intros x0 y0 z H'1 H'2 H'3; apply T1 with y0; auto with sets. Qed. Theorem star_monotone : forall (U:Type) (R S:Relation U), contains U S R -> contains U (Rstar U S) (Rstar U R). Proof. intros U R S H'. apply Sstar_contains_Rstar; auto with sets. generalize (Rstar_contains_R U S); auto with sets. Qed. Theorem RstarRplus_RRstar : forall (U:Type) (R:Relation U) (x y z:U), Rstar U R x y -> Rplus U R y z -> exists u : _, R x u /\ Rstar U R u z. Proof. generalize Rstar_contains_Rplus; intro T; red in T. generalize Rstar_transitive; intro T1; red in T1. intros U R x y z H'; elim H'. intros x0 H'0; elim H'0. intros x1 y0 H'1; exists y0; auto with sets. intros x1 y0 z0 H'1 H'2 H'3; exists y0; auto with sets. intros x0 y0 z0 H'0 H'1 H'2 H'3; exists y0. split; [ try assumption | idtac ]. apply T1 with z0; auto with sets. Qed. Theorem Lemma1 : forall (U:Type) (R:Relation U), Strongly_confluent U R -> forall x b:U, Rstar U R x b -> forall a:U, R x a -> exists z : _, Rstar U R a z /\ R b z. Proof. intros U R H' x b H'0; elim H'0. intros x0 a H'1; exists a; auto with sets. intros x0 y z H'1 H'2 H'3 a H'4. red in H'. specialize H' with (x := x0) (a := a) (b := y); lapply H'; [ intro H'8; lapply H'8; [ intro H'9; try exact H'9; clear H'8 H' | clear H'8 H' ] | clear H' ]; auto with sets. elim H'9. intros t H'5; elim H'5; intros H'6 H'7; try exact H'6; clear H'5. elim (H'3 t); auto with sets. intros z1 H'5; elim H'5; intros H'8 H'10; try exact H'8; clear H'5. exists z1; split; [ idtac | assumption ]. apply Rstar_n with t; auto with sets. Qed. coq-8.4pl2/theories/Sets/Relations_3_facts.v0000640000175000001440000001430512010532755020123 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* coherent U R x y. Proof. intros U R x y H'; red. exists y; auto with sets. Qed. Hint Resolve Rstar_imp_coherent. Theorem coherent_symmetric : forall (U:Type) (R:Relation U), Symmetric U (coherent U R). Proof. unfold coherent at 1. intros U R; red. intros x y H'; elim H'. intros z H'0; exists z; tauto. Qed. Theorem Strong_confluence : forall (U:Type) (R:Relation U), Strongly_confluent U R -> Confluent U R. Proof. intros U R H'; red. intro x; red; intros a b H'0. unfold coherent at 1. generalize b; clear b. elim H'0; clear H'0. intros x0 b H'1; exists b; auto with sets. intros x0 y z H'1 H'2 H'3 b H'4. generalize (Lemma1 U R); intro h; lapply h; [ intro H'0; generalize (H'0 x0 b); intro h0; lapply h0; [ intro H'5; generalize (H'5 y); intro h1; lapply h1; [ intro h2; elim h2; intros z0 h3; elim h3; intros H'6 H'7; clear h h0 h1 h2 h3 | clear h h0 h1 ] | clear h h0 ] | clear h ]; auto with sets. generalize (H'3 z0); intro h; lapply h; [ intro h0; elim h0; intros z1 h1; elim h1; intros H'8 H'9; clear h h0 h1 | clear h ]; auto with sets. exists z1; split; auto with sets. apply Rstar_n with z0; auto with sets. Qed. Theorem Strong_confluence_direct : forall (U:Type) (R:Relation U), Strongly_confluent U R -> Confluent U R. Proof. intros U R H'; red. intro x; red; intros a b H'0. unfold coherent at 1. generalize b; clear b. elim H'0; clear H'0. intros x0 b H'1; exists b; auto with sets. intros x0 y z H'1 H'2 H'3 b H'4. cut (ex (fun t:U => Rstar U R y t /\ R b t)). intro h; elim h; intros t h0; elim h0; intros H'0 H'5; clear h h0. generalize (H'3 t); intro h; lapply h; [ intro h0; elim h0; intros z0 h1; elim h1; intros H'6 H'7; clear h h0 h1 | clear h ]; auto with sets. exists z0; split; [ assumption | idtac ]. apply Rstar_n with t; auto with sets. generalize H'1; generalize y; clear H'1. elim H'4. intros x1 y0 H'0; exists y0; auto with sets. intros x1 y0 z0 H'0 H'1 H'5 y1 H'6. red in H'. generalize (H' x1 y0 y1); intro h; lapply h; [ intro H'7; lapply H'7; [ intro h0; elim h0; intros z1 h1; elim h1; intros H'8 H'9; clear h H'7 h0 h1 | clear h ] | clear h ]; auto with sets. generalize (H'5 z1); intro h; lapply h; [ intro h0; elim h0; intros t h1; elim h1; intros H'7 H'10; clear h h0 h1 | clear h ]; auto with sets. exists t; split; auto with sets. apply Rstar_n with z1; auto with sets. Qed. Theorem Noetherian_contains_Noetherian : forall (U:Type) (R R':Relation U), Noetherian U R -> contains U R R' -> Noetherian U R'. Proof. unfold Noetherian at 2. intros U R R' H' H'0 x. elim (H' x); auto with sets. Qed. Theorem Newman : forall (U:Type) (R:Relation U), Noetherian U R -> Locally_confluent U R -> Confluent U R. Proof. intros U R H' H'0; red; intro x. elim (H' x); unfold confluent. intros x0 H'1 H'2 y z H'3 H'4. generalize (Rstar_cases U R x0 y); intro h; lapply h; [ intro h0; elim h0; [ clear h h0; intro h1 | intro h1; elim h1; intros u h2; elim h2; intros H'5 H'6; clear h h0 h1 h2 ] | clear h ]; auto with sets. elim h1; auto with sets. generalize (Rstar_cases U R x0 z); intro h; lapply h; [ intro h0; elim h0; [ clear h h0; intro h1 | intro h1; elim h1; intros v h2; elim h2; intros H'7 H'8; clear h h0 h1 h2 ] | clear h ]; auto with sets. elim h1; generalize coherent_symmetric; intro t; red in t; auto with sets. unfold Locally_confluent, locally_confluent, coherent in H'0. generalize (H'0 x0 u v); intro h; lapply h; [ intro H'9; lapply H'9; [ intro h0; elim h0; intros t h1; elim h1; intros H'10 H'11; clear h H'9 h0 h1 | clear h ] | clear h ]; auto with sets. clear H'0. unfold coherent at 1 in H'2. generalize (H'2 u); intro h; lapply h; [ intro H'0; generalize (H'0 y t); intro h0; lapply h0; [ intro H'9; lapply H'9; [ intro h1; elim h1; intros y1 h2; elim h2; intros H'12 H'13; clear h h0 H'9 h1 h2 | clear h h0 ] | clear h h0 ] | clear h ]; auto with sets. generalize Rstar_transitive; intro T; red in T. generalize (H'2 v); intro h; lapply h; [ intro H'9; generalize (H'9 y1 z); intro h0; lapply h0; [ intro H'14; lapply H'14; [ intro h1; elim h1; intros z1 h2; elim h2; intros H'15 H'16; clear h h0 H'14 h1 h2 | clear h h0 ] | clear h h0 ] | clear h ]; auto with sets. red; (exists z1; split); auto with sets. apply T with y1; auto with sets. apply T with t; auto with sets. Qed. coq-8.4pl2/theories/Sets/Infinite_sets.v0000640000175000001440000002123212010532755017361 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Included U X A -> Approximant A X. End Approx. Hint Resolve Defn_of_Approximant. Section Infinite_sets. Variable U : Type. Lemma make_new_approximant : forall A X:Ensemble U, ~ Finite U A -> Approximant U A X -> Inhabited U (Setminus U A X). Proof. intros A X H' H'0. elim H'0; intros H'1 H'2. apply Strict_super_set_contains_new_element; auto with sets. red; intro H'3; apply H'. rewrite <- H'3; auto with sets. Qed. Lemma approximants_grow : forall A X:Ensemble U, ~ Finite U A -> forall n:nat, cardinal U X n -> Included U X A -> exists Y : _, cardinal U Y (S n) /\ Included U Y A. Proof. intros A X H' n H'0; elim H'0; auto with sets. intro H'1. cut (Inhabited U (Setminus U A (Empty_set U))). intro H'2; elim H'2. intros x H'3. exists (Add U (Empty_set U) x); auto with sets. split. apply card_add; auto with sets. cut (In U A x). intro H'4; red; auto with sets. intros x0 H'5; elim H'5; auto with sets. intros x1 H'6; elim H'6; auto with sets. elim H'3; auto with sets. apply make_new_approximant; auto with sets. intros A0 n0 H'1 H'2 x H'3 H'5. lapply H'2; [ intro H'6; elim H'6; clear H'2 | clear H'2 ]; auto with sets. intros x0 H'2; try assumption. elim H'2; intros H'7 H'8; try exact H'8; clear H'2. elim (make_new_approximant A x0); auto with sets. intros x1 H'2; try assumption. exists (Add U x0 x1); auto with sets. split. apply card_add; auto with sets. elim H'2; auto with sets. red. intros x2 H'9; elim H'9; auto with sets. intros x3 H'10; elim H'10; auto with sets. elim H'2; auto with sets. auto with sets. apply Defn_of_Approximant; auto with sets. apply cardinal_finite with (n := S n0); auto with sets. Qed. Lemma approximants_grow' : forall A X:Ensemble U, ~ Finite U A -> forall n:nat, cardinal U X n -> Approximant U A X -> exists Y : _, cardinal U Y (S n) /\ Approximant U A Y. Proof. intros A X H' n H'0 H'1; try assumption. elim H'1. intros H'2 H'3. elimtype (exists Y : _, cardinal U Y (S n) /\ Included U Y A). intros x H'4; elim H'4; intros H'5 H'6; try exact H'5; clear H'4. exists x; auto with sets. split; [ auto with sets | idtac ]. apply Defn_of_Approximant; auto with sets. apply cardinal_finite with (n := S n); auto with sets. apply approximants_grow with (X := X); auto with sets. Qed. Lemma approximant_can_be_any_size : forall A X:Ensemble U, ~ Finite U A -> forall n:nat, exists Y : _, cardinal U Y n /\ Approximant U A Y. Proof. intros A H' H'0 n; elim n. exists (Empty_set U); auto with sets. intros n0 H'1; elim H'1. intros x H'2. apply approximants_grow' with (X := x); tauto. Qed. Variable V : Type. Theorem Image_set_continuous : forall (A:Ensemble U) (f:U -> V) (X:Ensemble V), Finite V X -> Included V X (Im U V A f) -> exists n : _, (exists Y : _, (cardinal U Y n /\ Included U Y A) /\ Im U V Y f = X). Proof. intros A f X H'; elim H'. intro H'0; exists 0. exists (Empty_set U); auto with sets. intros A0 H'0 H'1 x H'2 H'3; try assumption. lapply H'1; [ intro H'4; elim H'4; intros n E; elim E; clear H'4 H'1 | clear H'1 ]; auto with sets. intros x0 H'1; try assumption. exists (S n); try assumption. elim H'1; intros H'4 H'5; elim H'4; intros H'6 H'7; try exact H'6; clear H'4 H'1. clear E. generalize H'2. rewrite <- H'5. intro H'1; try assumption. red in H'3. generalize (H'3 x). intro H'4; lapply H'4; [ intro H'8; try exact H'8; clear H'4 | clear H'4 ]; auto with sets. specialize Im_inv with (U := U) (V := V) (X := A) (f := f) (y := x); intro H'11; lapply H'11; [ intro H'13; elim H'11; clear H'11 | clear H'11 ]; auto with sets. intros x1 H'4; try assumption. apply ex_intro with (x := Add U x0 x1). split; [ split; [ try assumption | idtac ] | idtac ]. apply card_add; auto with sets. red; intro H'9; try exact H'9. apply H'1. elim H'4; intros H'10 H'11; rewrite <- H'11; clear H'4; auto with sets. elim H'4; intros H'9 H'10; try exact H'9; clear H'4; auto with sets. red; auto with sets. intros x2 H'4; elim H'4; auto with sets. intros x3 H'11; elim H'11; auto with sets. elim H'4; intros H'9 H'10; rewrite <- H'10; clear H'4; auto with sets. apply Im_add; auto with sets. Qed. Theorem Image_set_continuous' : forall (A:Ensemble U) (f:U -> V) (X:Ensemble V), Approximant V (Im U V A f) X -> exists Y : _, Approximant U A Y /\ Im U V Y f = X. Proof. intros A f X H'; try assumption. cut (exists n : _, (exists Y : _, (cardinal U Y n /\ Included U Y A) /\ Im U V Y f = X)). intro H'0; elim H'0; intros n E; elim E; clear H'0. intros x H'0; try assumption. elim H'0; intros H'1 H'2; elim H'1; intros H'3 H'4; try exact H'3; clear H'1 H'0; auto with sets. exists x. split; [ idtac | try assumption ]. apply Defn_of_Approximant; auto with sets. apply cardinal_finite with (n := n); auto with sets. apply Image_set_continuous; auto with sets. elim H'; auto with sets. elim H'; auto with sets. Qed. Theorem Pigeonhole_bis : forall (A:Ensemble U) (f:U -> V), ~ Finite U A -> Finite V (Im U V A f) -> ~ injective U V f. Proof. intros A f H'0 H'1; try assumption. elim (Image_set_continuous' A f (Im U V A f)); auto with sets. intros x H'2; elim H'2; intros H'3 H'4; try exact H'3; clear H'2. elim (make_new_approximant A x); auto with sets. intros x0 H'2; elim H'2. intros H'5 H'6. elim (finite_cardinal V (Im U V A f)); auto with sets. intros n E. elim (finite_cardinal U x); auto with sets. intros n0 E0. apply Pigeonhole with (A := Add U x x0) (n := S n0) (n' := n). apply card_add; auto with sets. rewrite (Im_add U V x x0 f); auto with sets. cut (In V (Im U V x f) (f x0)). intro H'8. rewrite (Non_disjoint_union V (Im U V x f) (f x0)); auto with sets. rewrite H'4; auto with sets. elim (Extension V (Im U V x f) (Im U V A f)); auto with sets. apply le_lt_n_Sm. apply cardinal_decreases with (U := U) (V := V) (A := x) (f := f); auto with sets. rewrite H'4; auto with sets. elim H'3; auto with sets. Qed. Theorem Pigeonhole_ter : forall (A:Ensemble U) (f:U -> V) (n:nat), injective U V f -> Finite V (Im U V A f) -> Finite U A. Proof. intros A f H' H'0 H'1. apply NNPP. red; intro H'2. elim (Pigeonhole_bis A f); auto with sets. Qed. End Infinite_sets. coq-8.4pl2/theories/Sets/Powerset_Classical_facts.v0000640000175000001440000002637512010532755021541 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Strict_Included U (Add U A x) (Add U B x) -> Strict_Included U A B. Proof. intros A B x H' H'0; red. lapply (Strict_Included_inv U (Add U A x) (Add U B x)); auto with sets. clear H'0; intro H'0; split. apply incl_add_x with (x := x); tauto. elim H'0; intros H'1 H'2; elim H'2; clear H'0 H'2. intros x0 H'0. red; intro H'2. elim H'0; clear H'0. rewrite <- H'2; auto with sets. Qed. Lemma incl_soustr_in : forall (X:Ensemble U) (x:U), In U X x -> Included U (Subtract U X x) X. Proof. intros X x H'; red. intros x0 H'0; elim H'0; auto with sets. Qed. Lemma incl_soustr : forall (X Y:Ensemble U) (x:U), Included U X Y -> Included U (Subtract U X x) (Subtract U Y x). Proof. intros X Y x H'; red. intros x0 H'0; elim H'0. intros H'1 H'2. apply Subtract_intro; auto with sets. Qed. Lemma incl_soustr_add_l : forall (X:Ensemble U) (x:U), Included U (Subtract U (Add U X x) x) X. Proof. intros X x; red. intros x0 H'; elim H'; auto with sets. intro H'0; elim H'0; auto with sets. intros t H'1 H'2; elim H'2; auto with sets. Qed. Lemma incl_soustr_add_r : forall (X:Ensemble U) (x:U), ~ In U X x -> Included U X (Subtract U (Add U X x) x). Proof. intros X x H'; red. intros x0 H'0; try assumption. apply Subtract_intro; auto with sets. red; intro H'1; apply H'; rewrite H'1; auto with sets. Qed. Hint Resolve incl_soustr_add_r: sets v62. Lemma add_soustr_2 : forall (X:Ensemble U) (x:U), In U X x -> Included U X (Add U (Subtract U X x) x). Proof. intros X x H'; red. intros x0 H'0; try assumption. elim (classic (x = x0)); intro K; auto with sets. elim K; auto with sets. Qed. Lemma add_soustr_1 : forall (X:Ensemble U) (x:U), In U X x -> Included U (Add U (Subtract U X x) x) X. Proof. intros X x H'; red. intros x0 H'0; elim H'0; auto with sets. intros y H'1; elim H'1; auto with sets. intros t H'1; try assumption. rewrite <- (Singleton_inv U x t); auto with sets. Qed. Lemma add_soustr_xy : forall (X:Ensemble U) (x y:U), x <> y -> Subtract U (Add U X x) y = Add U (Subtract U X y) x. Proof. intros X x y H'; apply Extensionality_Ensembles. split; red. intros x0 H'0; elim H'0; auto with sets. intro H'1; elim H'1. intros u H'2 H'3; try assumption. apply Add_intro1. apply Subtract_intro; auto with sets. intros t H'2 H'3; try assumption. elim (Singleton_inv U x t); auto with sets. intros u H'2; try assumption. elim (Add_inv U (Subtract U X y) x u); auto with sets. intro H'0; elim H'0; auto with sets. intro H'0; rewrite <- H'0; auto with sets. Qed. Lemma incl_st_add_soustr : forall (X Y:Ensemble U) (x:U), ~ In U X x -> Strict_Included U (Add U X x) Y -> Strict_Included U X (Subtract U Y x). Proof. intros X Y x H' H'0; apply sincl_add_x with (x := x); auto using add_soustr_1 with sets. split. elim H'0. intros H'1 H'2. generalize (Inclusion_is_transitive U). intro H'4; red in H'4. apply H'4 with (y := Y); auto using add_soustr_2 with sets. red in H'0. elim H'0; intros H'1 H'2; try exact H'1; clear H'0. (* PB *) red; intro H'0; apply H'2. rewrite H'0; auto 8 using add_soustr_xy, add_soustr_1, add_soustr_2 with sets. Qed. Lemma Sub_Add_new : forall (X:Ensemble U) (x:U), ~ In U X x -> X = Subtract U (Add U X x) x. Proof. auto using incl_soustr_add_l with sets. Qed. Lemma Simplify_add : forall (X X0:Ensemble U) (x:U), ~ In U X x -> ~ In U X0 x -> Add U X x = Add U X0 x -> X = X0. Proof. intros X X0 x H' H'0 H'1; try assumption. rewrite (Sub_Add_new X x); auto with sets. rewrite (Sub_Add_new X0 x); auto with sets. rewrite H'1; auto with sets. Qed. Lemma Included_Add : forall (X A:Ensemble U) (x:U), Included U X (Add U A x) -> Included U X A \/ (exists A' : _, X = Add U A' x /\ Included U A' A). Proof. intros X A x H'0; try assumption. elim (classic (In U X x)). intro H'1; right; try assumption. exists (Subtract U X x). split; auto using incl_soustr_in, add_soustr_xy, add_soustr_1, add_soustr_2 with sets. red in H'0. red. intros x0 H'2; try assumption. lapply (Subtract_inv U X x x0); auto with sets. intro H'3; elim H'3; intros K K'; clear H'3. lapply (H'0 x0); auto with sets. intro H'3; try assumption. lapply (Add_inv U A x x0); auto with sets. intro H'4; elim H'4; [ intro H'5; try exact H'5; clear H'4 | intro H'5; clear H'4 ]. elim K'; auto with sets. intro H'1; left; try assumption. red in H'0. red. intros x0 H'2; try assumption. lapply (H'0 x0); auto with sets. intro H'3; try assumption. lapply (Add_inv U A x x0); auto with sets. intro H'4; elim H'4; [ intro H'5; try exact H'5; clear H'4 | intro H'5; clear H'4 ]. absurd (In U X x0); auto with sets. rewrite <- H'5; auto with sets. Qed. Lemma setcover_inv : forall A x y:Ensemble U, covers (Ensemble U) (Power_set_PO U A) y x -> Strict_Included U x y /\ (forall z:Ensemble U, Included U x z -> Included U z y -> x = z \/ z = y). Proof. intros A x y H'; elim H'. unfold Strict_Rel_of; simpl. intros H'0 H'1; split; [ auto with sets | idtac ]. intros z H'2 H'3; try assumption. elim (classic (x = z)); auto with sets. intro H'4; right; try assumption. elim (classic (z = y)); auto with sets. intro H'5; try assumption. elim H'1. exists z; auto with sets. Qed. Theorem Add_covers : forall A a:Ensemble U, Included U a A -> forall x:U, In U A x -> ~ In U a x -> covers (Ensemble U) (Power_set_PO U A) (Add U a x) a. Proof. intros A a H' x H'0 H'1; try assumption. apply setcover_intro; auto with sets. red. split; [ idtac | red; intro H'2; try exact H'2 ]; auto with sets. apply H'1. rewrite H'2; auto with sets. red; intro H'2; elim H'2; clear H'2. intros z H'2; elim H'2; intros H'3 H'4; try exact H'3; clear H'2. lapply (Strict_Included_inv U a z); auto with sets; clear H'3. intro H'2; elim H'2; intros H'3 H'5; elim H'5; clear H'2 H'5. intros x0 H'2; elim H'2. intros H'5 H'6; try assumption. generalize H'4; intro K. red in H'4. elim H'4; intros H'8 H'9; red in H'8; clear H'4. lapply (H'8 x0); auto with sets. intro H'7; try assumption. elim (Add_inv U a x x0); auto with sets. intro H'15. cut (Included U (Add U a x) z). intro H'10; try assumption. red in K. elim K; intros H'11 H'12; apply H'12; clear K; auto with sets. rewrite H'15. red. intros x1 H'10; elim H'10; auto with sets. intros x2 H'11; elim H'11; auto with sets. Qed. Theorem covers_Add : forall A a a':Ensemble U, Included U a A -> Included U a' A -> covers (Ensemble U) (Power_set_PO U A) a' a -> exists x : _, a' = Add U a x /\ In U A x /\ ~ In U a x. Proof. intros A a a' H' H'0 H'1; try assumption. elim (setcover_inv A a a'); auto with sets. intros H'6 H'7. clear H'1. elim (Strict_Included_inv U a a'); auto with sets. intros H'5 H'8; elim H'8. intros x H'1; elim H'1. intros H'2 H'3; try assumption. exists x. split; [ try assumption | idtac ]. clear H'8 H'1. elim (H'7 (Add U a x)); auto with sets. intro H'1. absurd (a = Add U a x); auto with sets. red; intro H'8; try exact H'8. apply H'3. rewrite H'8; auto with sets. auto with sets. red. intros x0 H'1; elim H'1; auto with sets. intros x1 H'8; elim H'8; auto with sets. split; [ idtac | try assumption ]. red in H'0; auto with sets. Qed. Theorem covers_is_Add : forall A a a':Ensemble U, Included U a A -> Included U a' A -> (covers (Ensemble U) (Power_set_PO U A) a' a <-> (exists x : _, a' = Add U a x /\ In U A x /\ ~ In U a x)). Proof. intros A a a' H' H'0; split; intro K. apply covers_Add with (A := A); auto with sets. elim K. intros x H'1; elim H'1; intros H'2 H'3; rewrite H'2; clear H'1. apply Add_covers; intuition. Qed. Theorem Singleton_atomic : forall (x:U) (A:Ensemble U), In U A x -> covers (Ensemble U) (Power_set_PO U A) (Singleton U x) (Empty_set U). Proof. intros x A H'. rewrite <- (Empty_set_zero' U x). apply Add_covers; auto with sets. Qed. Lemma less_than_singleton : forall (X:Ensemble U) (x:U), Strict_Included U X (Singleton U x) -> X = Empty_set U. Proof. intros X x H'; try assumption. red in H'. lapply (Singleton_atomic x (Full_set U)); [ intro H'2; try exact H'2 | apply Full_intro ]. elim H'; intros H'0 H'1; try exact H'1; clear H'. elim (setcover_inv (Full_set U) (Empty_set U) (Singleton U x)); [ intros H'6 H'7; try exact H'7 | idtac ]; auto with sets. elim (H'7 X); [ intro H'5; try exact H'5 | intro H'5 | idtac | idtac ]; auto with sets. elim H'1; auto with sets. Qed. End Sets_as_an_algebra. Hint Resolve incl_soustr_in: sets v62. Hint Resolve incl_soustr: sets v62. Hint Resolve incl_soustr_add_l: sets v62. Hint Resolve incl_soustr_add_r: sets v62. Hint Resolve add_soustr_1 add_soustr_2: sets v62. Hint Resolve add_soustr_xy: sets v62. coq-8.4pl2/theories/Sets/Multiset.v0000640000175000001440000001336112010532755016370 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Hypothesis eqA_equiv : Equivalence eqA. Hypothesis Aeq_dec : forall x y:A, {eqA x y} + {~ eqA x y}. Inductive multiset : Type := Bag : (A -> nat) -> multiset. Definition EmptyBag := Bag (fun a:A => 0). Definition SingletonBag (a:A) := Bag (fun a':A => match Aeq_dec a a' with | left _ => 1 | right _ => 0 end). Definition multiplicity (m:multiset) (a:A) : nat := let (f) := m in f a. (** multiset equality *) Definition meq (m1 m2:multiset) := forall a:A, multiplicity m1 a = multiplicity m2 a. Lemma meq_refl : forall x:multiset, meq x x. Proof. destruct x; unfold meq; reflexivity. Qed. Lemma meq_trans : forall x y z:multiset, meq x y -> meq y z -> meq x z. Proof. unfold meq. destruct x; destruct y; destruct z. intros; rewrite H; auto. Qed. Lemma meq_sym : forall x y:multiset, meq x y -> meq y x. Proof. unfold meq. destruct x; destruct y; auto. Qed. (** multiset union *) Definition munion (m1 m2:multiset) := Bag (fun a:A => multiplicity m1 a + multiplicity m2 a). Lemma munion_empty_left : forall x:multiset, meq x (munion EmptyBag x). Proof. unfold meq; unfold munion; simpl; auto. Qed. Lemma munion_empty_right : forall x:multiset, meq x (munion x EmptyBag). Proof. unfold meq; unfold munion; simpl; auto. Qed. Require Plus. (* comm. and ass. of plus *) Lemma munion_comm : forall x y:multiset, meq (munion x y) (munion y x). Proof. unfold meq; unfold multiplicity; unfold munion. destruct x; destruct y; auto with arith. Qed. Lemma munion_ass : forall x y z:multiset, meq (munion (munion x y) z) (munion x (munion y z)). Proof. unfold meq; unfold munion; unfold multiplicity. destruct x; destruct y; destruct z; auto with arith. Qed. Lemma meq_left : forall x y z:multiset, meq x y -> meq (munion x z) (munion y z). Proof. unfold meq; unfold munion; unfold multiplicity. destruct x; destruct y; destruct z. intros; elim H; auto with arith. Qed. Lemma meq_right : forall x y z:multiset, meq x y -> meq (munion z x) (munion z y). Proof. unfold meq; unfold munion; unfold multiplicity. destruct x; destruct y; destruct z. intros; elim H; auto. Qed. (** Here we should make multiset an abstract datatype, by hiding [Bag], [munion], [multiplicity]; all further properties are proved abstractly *) Lemma munion_rotate : forall x y z:multiset, meq (munion x (munion y z)) (munion z (munion x y)). Proof. intros; apply (op_rotate multiset munion meq). apply munion_comm. apply munion_ass. exact meq_trans. exact meq_sym. trivial. Qed. Lemma meq_congr : forall x y z t:multiset, meq x y -> meq z t -> meq (munion x z) (munion y t). Proof. intros; apply (cong_congr multiset munion meq); auto using meq_left, meq_right. exact meq_trans. Qed. Lemma munion_perm_left : forall x y z:multiset, meq (munion x (munion y z)) (munion y (munion x z)). Proof. intros; apply (perm_left multiset munion meq); auto using munion_comm, munion_ass, meq_left, meq_right, meq_sym. exact meq_trans. Qed. Lemma multiset_twist1 : forall x y z t:multiset, meq (munion x (munion (munion y z) t)) (munion (munion y (munion x t)) z). Proof. intros; apply (twist multiset munion meq); auto using munion_comm, munion_ass, meq_sym, meq_left, meq_right. exact meq_trans. Qed. Lemma multiset_twist2 : forall x y z t:multiset, meq (munion x (munion (munion y z) t)) (munion (munion y (munion x z)) t). Proof. intros; apply meq_trans with (munion (munion x (munion y z)) t). apply meq_sym; apply munion_ass. apply meq_left; apply munion_perm_left. Qed. (** specific for treesort *) Lemma treesort_twist1 : forall x y z t u:multiset, meq u (munion y z) -> meq (munion x (munion u t)) (munion (munion y (munion x t)) z). Proof. intros; apply meq_trans with (munion x (munion (munion y z) t)). apply meq_right; apply meq_left; trivial. apply multiset_twist1. Qed. Lemma treesort_twist2 : forall x y z t u:multiset, meq u (munion y z) -> meq (munion x (munion u t)) (munion (munion y (munion x z)) t). Proof. intros; apply meq_trans with (munion x (munion (munion y z) t)). apply meq_right; apply meq_left; trivial. apply multiset_twist2. Qed. (** SingletonBag *) Lemma meq_singleton : forall a a', eqA a a' -> meq (SingletonBag a) (SingletonBag a'). Proof. intros; red; simpl; intro a0. destruct (Aeq_dec a a0) as [Ha|Ha]; rewrite H in Ha; decide (Aeq_dec a' a0) with Ha; reflexivity. Qed. (*i theory of minter to do similarly Require Min. (* multiset intersection *) Definition minter := [m1,m2:multiset] (Bag [a:A](min (multiplicity m1 a)(multiplicity m2 a))). i*) End multiset_defs. Unset Implicit Arguments. Hint Unfold meq multiplicity: v62 datatypes. Hint Resolve munion_empty_right munion_comm munion_ass meq_left meq_right munion_empty_left: v62 datatypes. Hint Immediate meq_sym: v62 datatypes. coq-8.4pl2/theories/Sets/Uniset.v0000640000175000001440000001261712010532755016034 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Hypothesis eqA_dec : forall x y:A, {eqA x y} + {~ eqA x y}. Inductive uniset : Set := Charac : (A -> bool) -> uniset. Definition charac (s:uniset) (a:A) : bool := let (f) := s in f a. Definition Emptyset := Charac (fun a:A => false). Definition Fullset := Charac (fun a:A => true). Definition Singleton (a:A) := Charac (fun a':A => match eqA_dec a a' with | left h => true | right h => false end). Definition In (s:uniset) (a:A) : Prop := charac s a = true. Hint Unfold In. (** uniset inclusion *) Definition incl (s1 s2:uniset) := forall a:A, leb (charac s1 a) (charac s2 a). Hint Unfold incl. (** uniset equality *) Definition seq (s1 s2:uniset) := forall a:A, charac s1 a = charac s2 a. Hint Unfold seq. Lemma leb_refl : forall b:bool, leb b b. Proof. destruct b; simpl; auto. Qed. Hint Resolve leb_refl. Lemma incl_left : forall s1 s2:uniset, seq s1 s2 -> incl s1 s2. Proof. unfold incl; intros s1 s2 E a; elim (E a); auto. Qed. Lemma incl_right : forall s1 s2:uniset, seq s1 s2 -> incl s2 s1. Proof. unfold incl; intros s1 s2 E a; elim (E a); auto. Qed. Lemma seq_refl : forall x:uniset, seq x x. Proof. destruct x; unfold seq; auto. Qed. Hint Resolve seq_refl. Lemma seq_trans : forall x y z:uniset, seq x y -> seq y z -> seq x z. Proof. unfold seq. destruct x; destruct y; destruct z; simpl; intros. rewrite H; auto. Qed. Lemma seq_sym : forall x y:uniset, seq x y -> seq y x. Proof. unfold seq. destruct x; destruct y; simpl; auto. Qed. (** uniset union *) Definition union (m1 m2:uniset) := Charac (fun a:A => orb (charac m1 a) (charac m2 a)). Lemma union_empty_left : forall x:uniset, seq x (union Emptyset x). Proof. unfold seq; unfold union; simpl; auto. Qed. Hint Resolve union_empty_left. Lemma union_empty_right : forall x:uniset, seq x (union x Emptyset). Proof. unfold seq; unfold union; simpl. intros x a; rewrite (orb_b_false (charac x a)); auto. Qed. Hint Resolve union_empty_right. Lemma union_comm : forall x y:uniset, seq (union x y) (union y x). Proof. unfold seq; unfold charac; unfold union. destruct x; destruct y; auto with bool. Qed. Hint Resolve union_comm. Lemma union_ass : forall x y z:uniset, seq (union (union x y) z) (union x (union y z)). Proof. unfold seq; unfold union; unfold charac. destruct x; destruct y; destruct z; auto with bool. Qed. Hint Resolve union_ass. Lemma seq_left : forall x y z:uniset, seq x y -> seq (union x z) (union y z). Proof. unfold seq; unfold union; unfold charac. destruct x; destruct y; destruct z. intros; elim H; auto. Qed. Hint Resolve seq_left. Lemma seq_right : forall x y z:uniset, seq x y -> seq (union z x) (union z y). Proof. unfold seq; unfold union; unfold charac. destruct x; destruct y; destruct z. intros; elim H; auto. Qed. Hint Resolve seq_right. (** All the proofs that follow duplicate [Multiset_of_A] *) (** Here we should make uniset an abstract datatype, by hiding [Charac], [union], [charac]; all further properties are proved abstractly *) Require Import Permut. Lemma union_rotate : forall x y z:uniset, seq (union x (union y z)) (union z (union x y)). Proof. intros; apply (op_rotate uniset union seq); auto. exact seq_trans. Qed. Lemma seq_congr : forall x y z t:uniset, seq x y -> seq z t -> seq (union x z) (union y t). Proof. intros; apply (cong_congr uniset union seq); auto. exact seq_trans. Qed. Lemma union_perm_left : forall x y z:uniset, seq (union x (union y z)) (union y (union x z)). Proof. intros; apply (perm_left uniset union seq); auto. exact seq_trans. Qed. Lemma uniset_twist1 : forall x y z t:uniset, seq (union x (union (union y z) t)) (union (union y (union x t)) z). Proof. intros; apply (twist uniset union seq); auto. exact seq_trans. Qed. Lemma uniset_twist2 : forall x y z t:uniset, seq (union x (union (union y z) t)) (union (union y (union x z)) t). Proof. intros; apply seq_trans with (union (union x (union y z)) t). apply seq_sym; apply union_ass. apply seq_left; apply union_perm_left. Qed. (** specific for treesort *) Lemma treesort_twist1 : forall x y z t u:uniset, seq u (union y z) -> seq (union x (union u t)) (union (union y (union x t)) z). Proof. intros; apply seq_trans with (union x (union (union y z) t)). apply seq_right; apply seq_left; trivial. apply uniset_twist1. Qed. Lemma treesort_twist2 : forall x y z t u:uniset, seq u (union y z) -> seq (union x (union u t)) (union (union y (union x z)) t). Proof. intros; apply seq_trans with (union x (union (union y z) t)). apply seq_right; apply seq_left; trivial. apply uniset_twist2. Qed. (*i theory of minter to do similarly Require Min. (* uniset intersection *) Definition minter := [m1,m2:uniset] (Charac [a:A](andb (charac m1 a)(charac m2 a))). i*) End defs. Unset Implicit Arguments. coq-8.4pl2/theories/Sets/Powerset.v0000640000175000001440000001500612010532755016370 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* In (Ensemble U) (Power_set A) X. Hint Resolve Definition_of_Power_set. Theorem Empty_set_minimal : forall X:Ensemble U, Included U (Empty_set U) X. intro X; red. intros x H'; elim H'. Qed. Hint Resolve Empty_set_minimal. Theorem Power_set_Inhabited : forall X:Ensemble U, Inhabited (Ensemble U) (Power_set X). intro X. apply Inhabited_intro with (Empty_set U); auto with sets. Qed. Hint Resolve Power_set_Inhabited. Theorem Inclusion_is_an_order : Order (Ensemble U) (Included U). auto 6 with sets. Qed. Hint Resolve Inclusion_is_an_order. Theorem Inclusion_is_transitive : Transitive (Ensemble U) (Included U). elim Inclusion_is_an_order; auto with sets. Qed. Hint Resolve Inclusion_is_transitive. Definition Power_set_PO : Ensemble U -> PO (Ensemble U). intro A; try assumption. apply Definition_of_PO with (Power_set A) (Included U); auto with sets. Defined. Hint Unfold Power_set_PO. Theorem Strict_Rel_is_Strict_Included : same_relation (Ensemble U) (Strict_Included U) (Strict_Rel_of (Ensemble U) (Power_set_PO (Full_set U))). auto with sets. Qed. Hint Resolve Strict_Rel_Transitive Strict_Rel_is_Strict_Included. Lemma Strict_inclusion_is_transitive_with_inclusion : forall x y z:Ensemble U, Strict_Included U x y -> Included U y z -> Strict_Included U x z. intros x y z H' H'0; try assumption. elim Strict_Rel_is_Strict_Included. unfold contains. intros H'1 H'2; try assumption. apply H'1. apply Strict_Rel_Transitive_with_Rel with (y := y); auto with sets. Qed. Lemma Strict_inclusion_is_transitive_with_inclusion_left : forall x y z:Ensemble U, Included U x y -> Strict_Included U y z -> Strict_Included U x z. intros x y z H' H'0; try assumption. elim Strict_Rel_is_Strict_Included. unfold contains. intros H'1 H'2; try assumption. apply H'1. apply Strict_Rel_Transitive_with_Rel_left with (y := y); auto with sets. Qed. Lemma Strict_inclusion_is_transitive : Transitive (Ensemble U) (Strict_Included U). apply cong_transitive_same_relation with (R := Strict_Rel_of (Ensemble U) (Power_set_PO (Full_set U))); auto with sets. Qed. Theorem Empty_set_is_Bottom : forall A:Ensemble U, Bottom (Ensemble U) (Power_set_PO A) (Empty_set U). intro A; apply Bottom_definition; simpl; auto with sets. Qed. Hint Resolve Empty_set_is_Bottom. Theorem Union_minimal : forall a b X:Ensemble U, Included U a X -> Included U b X -> Included U (Union U a b) X. intros a b X H' H'0; red. intros x H'1; elim H'1; auto with sets. Qed. Hint Resolve Union_minimal. Theorem Intersection_maximal : forall a b X:Ensemble U, Included U X a -> Included U X b -> Included U X (Intersection U a b). auto with sets. Qed. Theorem Union_increases_l : forall a b:Ensemble U, Included U a (Union U a b). auto with sets. Qed. Theorem Union_increases_r : forall a b:Ensemble U, Included U b (Union U a b). auto with sets. Qed. Theorem Intersection_decreases_l : forall a b:Ensemble U, Included U (Intersection U a b) a. intros a b; red. intros x H'; elim H'; auto with sets. Qed. Theorem Intersection_decreases_r : forall a b:Ensemble U, Included U (Intersection U a b) b. intros a b; red. intros x H'; elim H'; auto with sets. Qed. Hint Resolve Union_increases_l Union_increases_r Intersection_decreases_l Intersection_decreases_r. Theorem Union_is_Lub : forall A a b:Ensemble U, Included U a A -> Included U b A -> Lub (Ensemble U) (Power_set_PO A) (Couple (Ensemble U) a b) (Union U a b). intros A a b H' H'0. apply Lub_definition; simpl. apply Upper_Bound_definition; simpl; auto with sets. intros y H'1; elim H'1; auto with sets. intros y H'1; elim H'1; simpl; auto with sets. Qed. Theorem Intersection_is_Glb : forall A a b:Ensemble U, Included U a A -> Included U b A -> Glb (Ensemble U) (Power_set_PO A) (Couple (Ensemble U) a b) (Intersection U a b). intros A a b H' H'0. apply Glb_definition; simpl. apply Lower_Bound_definition; simpl; auto with sets. apply Definition_of_Power_set. generalize Inclusion_is_transitive; intro IT; red in IT; apply IT with a; auto with sets. intros y H'1; elim H'1; auto with sets. intros y H'1; elim H'1; simpl; auto with sets. Qed. End The_power_set_partial_order. Hint Resolve Empty_set_minimal: sets v62. Hint Resolve Power_set_Inhabited: sets v62. Hint Resolve Inclusion_is_an_order: sets v62. Hint Resolve Inclusion_is_transitive: sets v62. Hint Resolve Union_minimal: sets v62. Hint Resolve Union_increases_l: sets v62. Hint Resolve Union_increases_r: sets v62. Hint Resolve Intersection_decreases_l: sets v62. Hint Resolve Intersection_decreases_r: sets v62. Hint Resolve Empty_set_is_Bottom: sets v62. Hint Resolve Strict_inclusion_is_transitive: sets v62. coq-8.4pl2/LICENSE0000640000175000001440000005747510334351275012661 0ustar notinusers GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS coq-8.4pl2/myocamlbuild.ml0000640000175000001440000004316312122347656014661 0ustar notinusers(** * Plugin for building Coq via Ocamlbuild *) open Ocamlbuild_plugin open Ocamlbuild_pack open Printf open Scanf (** WARNING !! this is preliminary stuff. It should allows you to build coq and its libraries if everything goes right. Support for all the build rules and configuration options is progressively added. Tested only on linux + ocaml 3.11 + local + natdynlink for now. Usage: ./configure -local -opt ./build (which launches ocamlbuild coq.otarget) Then you can (hopefully) launch bin/coqtop, bin/coqide and so on. Apart from the links in bin, every created files are in _build. A "./build clean" should give you back a clean source tree *) (** F.A.Q about ocamlbuild: * P / Px ? Same, except that the second can be use to signal the main target of a rule, in order to get a nicer log (otherwise the full command is used as target name) *) (** Generic file reader, which produces a list of strings, one per line *) let read_file f = let ic = open_in f and l = ref [] in (try while true do l := (input_line ic)::!l done with End_of_file -> ()); close_in ic; List.rev !l (** Configuration *) (** First, we access coq_config.ml indirectly : we symlink it to myocamlbuild_config.ml, which is linked with this myocamlbuild.ml *) module Coq_config = struct include Myocamlbuild_config end let _ = begin Options.ocamlc := A Coq_config.ocamlc; Options.ocamlopt := A Coq_config.ocamlopt; Options.ocamlmklib := A Coq_config.ocamlmklib; Options.ocamldep := A Coq_config.ocamldep; Options.ocamldoc := A Coq_config.ocamldoc; Options.ocamlyacc := A Coq_config.ocamlyacc; Options.ocamllex := A Coq_config.ocamllex; end let w32 = (Coq_config.arch = "win32") let w32pref = "i586-mingw32msvc" let w32ocamlc = w32pref^"-ocamlc" let w32ocamlopt = w32pref^"-ocamlopt" let w32ocamlmklib = w32pref^"-ocamlmklib" let w32res = w32pref^"-windres" let w32lib = "/usr/"^w32pref^"/lib/" let w32bin = "/usr/"^w32pref^"/bin/" let w32ico = "ide/coq_icon.o" let _ = if w32 then begin Options.ocamlopt := A w32ocamlopt; Options.ocamlmklib := A w32ocamlmklib; end let use_camlp5 = (Coq_config.camlp4 = "camlp5") let camlp4args = if use_camlp5 then [A "pa_extend.cmo";A "q_MLast.cmo";A "pa_macro.cmo"] else [] let ocaml = A Coq_config.ocaml let camlp4o = S ((A Coq_config.camlp4o) :: camlp4args) let camlp4incl = S[A"-I"; A Coq_config.camlp4lib] let camlp4compat = Sh Coq_config.camlp4compat let opt = (Coq_config.best = "opt") let ide = Coq_config.has_coqide let hasdynlink = Coq_config.has_natdynlink let os5fix = (Coq_config.natdynlinkflag = "os5fixme") let flag_dynlink = if hasdynlink then A"-DHasDynlink" else N let dep_dynlink = if hasdynlink then N else Sh"-natdynlink no" let lablgtkincl = Sh Coq_config.coqideincl let local = Coq_config.local let cflags = S[A"-ccopt";A Coq_config.cflags] (** Do we want to inspect .ml generated from .ml4 ? *) let readable_genml = false let readable_flag = if readable_genml then A"pr_o.cmo" else N let _build = Options.build_dir (** Abbreviations about files *) let core_libs = ["lib/lib"; "kernel/kernel"; "library/library"; "pretyping/pretyping"; "interp/interp"; "proofs/proofs"; "parsing/parsing"; "tactics/tactics"; "toplevel/toplevel"; "parsing/highparsing"; "tactics/hightactics"] let core_cma = List.map (fun s -> s^".cma") core_libs let core_cmxa = List.map (fun s -> s^".cmxa") core_libs let core_mllib = List.map (fun s -> s^".mllib") core_libs let tolink = "scripts/tolink.ml" let c_headers_base = ["coq_fix_code.h";"coq_instruct.h"; "coq_memory.h"; "int64_emul.h"; "coq_gc.h"; "coq_interp.h"; "coq_values.h"; "int64_native.h"; "coq_jumptbl.h"] let c_headers = List.map ((^) "kernel/byterun/") c_headers_base let coqinstrs = "kernel/byterun/coq_instruct.h" let coqjumps = "kernel/byterun/coq_jumptbl.h" let copcodes = "kernel/copcodes.ml" let libcoqrun = "kernel/byterun/libcoqrun.a" let initialcoq = "states/initial.coq" let init_vo = ["theories/Init/Prelude.vo";"theories/Init/Logic_Type.vo"] let makeinitial = "states/MakeInitial.v" let nmake = "theories/Numbers/Natural/BigN/NMake_gen.v" let nmakegen = "theories/Numbers/Natural/BigN/NMake_gen.ml" let adapt_name (pref,oldsuf,newsuf) f = pref ^ (Filename.chop_suffix f oldsuf) ^ newsuf let get_names (oldsuf,newsuf) s = let pref = Filename.dirname s ^ "/" in List.map (adapt_name (pref,oldsuf,newsuf)) (string_list_of_file s) let get_vo_itargets f = let vo_itargets = get_names (".otarget",".itarget") f in List.flatten (List.map (get_names (".vo",".v")) vo_itargets) let theoriesv = get_vo_itargets "theories/theories.itarget" let pluginsv = get_vo_itargets "plugins/pluginsvo.itarget" let pluginsmllib = get_names (".cma",".mllib") "plugins/pluginsbyte.itarget" (** for correct execution of coqdep_boot, source files should have been imported in _build (and NMake_gen.v should have been created). *) let coqdepdeps = theoriesv @ pluginsv @ pluginsmllib let coqtop = "toplevel/coqtop" let coqide = "ide/coqide" let coqdepboot = "tools/coqdep_boot" let coqmktop = "scripts/coqmktop" (** The list of binaries to build: (name of link in bin/, name in _build, install both or only best) *) type links = Both | Best | BestInPlace | Ide let all_binaries = (if w32 then [ "mkwinapp", "tools/mkwinapp", Best ] else []) @ [ "coqtop", coqtop, Both; "coqide", "ide/coqide_main", Ide; "coqmktop", coqmktop, Both; "coqc", "scripts/coqc", Both; "coqchk", "checker/main", Both; "coqdep_boot", coqdepboot, Best; "coqdep", "tools/coqdep", Best; "coqdoc", "tools/coqdoc/main", Best; "coqwc", "tools/coqwc", Best; "coq_makefile", "tools/coq_makefile", Best; "coq-tex", "tools/coq_tex", Best; "gallina", "tools/gallina", Best; "csdpcert", "plugins/micromega/csdpcert", BestInPlace; "fake_ide", "tools/fake_ide", Best; ] let best_oext = if opt then ".native" else ".byte" let best_ext = if opt then ".opt" else ".byte" let best_iext = if ide = "opt" then ".opt" else ".byte" let coqtopbest = coqtop^best_oext (* For inner needs, we rather use the bytecode versions of coqdep and coqmktop: slightly slower but compile quickly, and ok with w32 cross-compilation *) let coqdep_boot = coqdepboot^".byte" let coqmktop_boot = coqmktop^".byte" let binariesopt_deps = let addext b = b ^ ".native" in let rec deps = function | [] -> [] | (_,b,Ide)::l -> if ide="opt" then addext b :: deps l else deps l | (_,b,_)::l -> if opt then addext b :: deps l else deps l in deps all_binaries let binariesbyte_deps = let addext b = b ^ ".byte" in let rec deps = function | [] -> [] | (_,b,Ide)::l -> if ide<>"no" then addext b :: deps l else deps l | (_,b,Both)::l -> addext b :: deps l | (_,b,_)::l -> if not opt then addext b :: deps l else deps l in deps all_binaries let ln_sf toward f = Command.execute ~quiet:true (Cmd (S [A"ln";A"-sf";P toward;P f])) let rec make_bin_links = function | [] -> () | (b,ob,kind)::l -> make_bin_links l; let obd = "../"^ !_build^"/"^ob and bd = "bin/"^b in match kind with | Ide when ide <> "no" -> ln_sf (obd^".byte") (bd^".byte"); if ide = "opt" then ln_sf (obd^".native") (bd^".opt"); ln_sf (b^best_iext) bd | Ide (* when ide = "no" *) -> () | Both -> ln_sf (obd^".byte") (bd^".byte"); if opt then ln_sf (obd^".native") (bd^".opt"); ln_sf (b^best_ext) bd | Best -> ln_sf (obd^best_oext) bd | BestInPlace -> ln_sf (b^best_oext) (!_build^"/"^ob) let incl f = Ocaml_utils.ocaml_include_flags f let cmd cl = (fun _ _ -> (Cmd (S cl))) let initial_actions () = begin (** We "pre-create" a few subdirs in _build *) Shell.mkdir_p (!_build^"/dev"); Shell.mkdir_p (!_build^"/bin"); Shell.mkdir_p (!_build^"/plugins/micromega"); make_bin_links all_binaries; end let extra_rules () = begin (** Virtual target for building all binaries *) rule "binariesopt" ~stamp:"binariesopt" ~deps:binariesopt_deps (fun _ _ -> Nop); rule "binariesbyte" ~stamp:"binariesbyte" ~deps:binariesbyte_deps (fun _ _ -> Nop); rule "binaries" ~stamp:"binaries" ~deps:["binariesbyte";"binariesopt"] (fun _ _ -> Nop); (** We create a special coq_config which mentions _build *) rule "coq_config.ml" ~prod:"coq_config.ml" ~dep:"config/coq_config.ml" (fun _ _ -> if w32 then cp "config/coq_config.ml" "coq_config.ml" else let lines = read_file "config/coq_config.ml" in let lines = List.map (fun s -> s^"\n") lines in let line0 = "\n(* Adapted variables for ocamlbuild *)\n" in (* TODO : line2 isn't completely accurate with respect to ./configure: the case of -local -coqrunbyteflags foo isn't supported *) let line1 = "let coqrunbyteflags = \"-dllib -lcoqrun\"\n" in Echo (lines @ (if local then [line0;line1] else []), "coq_config.ml")); (** Camlp4 extensions *) rule ".ml4.ml" ~dep:"%.ml4" ~prod:"%.ml" (fun env _ -> let ml4 = env "%.ml4" and ml = env "%.ml" in Cmd (S[camlp4o;T(tags_of_pathname ml4 ++ "p4mod");readable_flag; T(tags_of_pathname ml4 ++ "p4option"); camlp4compat; A"-o"; Px ml; A"-impl"; P ml4])); flag_and_dep ["p4mod"; "use_grammar"] (P "parsing/grammar.cma"); flag_and_dep ["p4mod"; "use_constr"] (P "parsing/q_constr.cmo"); flag_and_dep ["p4mod"; "use_compat5"] (P "tools/compat5.cmo"); flag_and_dep ["p4mod"; "use_compat5b"] (P "tools/compat5b.cmo"); if w32 then begin flag ["p4mod"] (A "-DWIN32"); dep ["ocaml"; "link"; "ide"] ["ide/ide_win32_stubs.o"]; end; if not use_camlp5 then begin let mlp_cmo s = let src=s^".mlp" and dst=s^".cmo" in rule (src^".cmo") ~dep:src ~prod:dst ~insert:`top (fun env _ -> Cmd (S [!Options.ocamlc; A"-c"; A"-pp"; Quote (S [camlp4o;A"-impl"]); camlp4incl; A"-impl"; P src])) in mlp_cmo "tools/compat5"; mlp_cmo "tools/compat5b"; end; ocaml_lib ~extern:true ~dir:Coq_config.camlp4lib ~tag_name:"use_camlpX" ~byte:true ~native:true (if use_camlp5 then "gramlib" else "camlp4lib"); (** Special case of toplevel/mltop.ml4: - mltop.ml will be the old mltop.optml and be used to obtain mltop.cmx - we add a special mltop.ml4 --> mltop.cmo rule, before all the others *) flag ["is_mltop"; "p4option"] flag_dynlink; (*TODO: this is rather ugly for a simple file, we should try to benefit more from predefined rules *) let mltop = "toplevel/mltop" in let ml4 = mltop^".ml4" and mlo = mltop^".cmo" and ml = mltop^".ml" and mld = mltop^".ml.depends" in rule "mltop_byte" ~deps:[ml4;mld] ~prod:mlo ~insert:`top (fun env build -> Ocaml_compiler.prepare_compile build ml; Cmd (S [!Options.ocamlc; A"-c"; A"-pp"; Quote (S [camlp4o; T(tags_of_pathname ml4 ++ "p4mod"); A"-DByte";A"-DHasDynlink";camlp4compat;A"-impl"]); A"-rectypes"; camlp4incl; incl ml4; A"-impl"; P ml4])); (** All caml files are compiled with -rectypes and +camlp4/5 and ide files need +lablgtk2 *) flag ["compile"; "ocaml"] (S [A"-rectypes"; camlp4incl]); flag ["link"; "ocaml"] (S [A"-rectypes"; camlp4incl]); flag ["ocaml"; "ide"; "compile"] lablgtkincl; flag ["ocaml"; "ide"; "link"] lablgtkincl; flag ["ocaml"; "ide"; "link"; "byte"] (S [A"lablgtk.cma"; A"gtkThread.cmo"]); flag ["ocaml"; "ide"; "link"; "native"] (S [A"lablgtk.cmxa"; A"gtkThread.cmx"]); (** C code for the VM *) dep ["compile"; "c"] c_headers; flag ["compile"; "c"] cflags; dep ["ocaml"; "use_libcoqrun"; "compile"] [libcoqrun]; dep ["ocaml"; "use_libcoqrun"; "link"; "native"] [libcoqrun]; flag ["ocaml"; "use_libcoqrun"; "link"; "byte"] (Sh Coq_config.coqrunbyteflags); (* we need to use a different ocamlc. For now we copy the rule *) if w32 then rule ".c.o" ~deps:("%.c"::c_headers) ~prod:"%.o" ~insert:`top (fun env _ -> let c = env "%.c" in let o = env "%.o" in Seq [Cmd (S [P w32ocamlc;cflags;A"-c";Px c]); mv (Filename.basename o) o]); (** VM: Generation of coq_jumbtbl.h and copcodes.ml from coq_instruct.h *) rule "coqinstrs" ~dep:coqinstrs ~prods:[coqjumps;copcodes] (fun _ _ -> let jmps = ref [] and ops = ref [] and i = ref 0 in let add_instr instr comma = if instr = "" then failwith "Empty" else begin jmps:=sprintf "&&coq_lbl_%s%s \n" instr comma :: !jmps; ops:=sprintf "let op%s = %d\n" instr !i :: !ops; incr i end in (** we recognize comma-separated uppercase instruction names *) let parse_line s = let b = Scanning.from_string s in try while true do bscanf b " %[A-Z0-9_]%[,]" add_instr done with _ -> () in List.iter parse_line (read_file coqinstrs); Seq [Echo (List.rev !jmps, coqjumps); Echo (List.rev !ops, copcodes)]); (** Generation of tolink.ml *) rule tolink ~deps:core_mllib ~prod:tolink (fun _ _ -> let cat s = String.concat " " (string_list_of_file s) in let core_mods = String.concat " " (List.map cat core_mllib) in let core_cmas = String.concat " " core_cma in Echo (["let copts = \"-cclib -lcoqrun\"\n"; "let core_libs = \"coq_config.cmo "^core_cmas^"\"\n"; "let core_objs = \"Coq_config "^core_mods^"\"\n"], tolink)); (** For windows, building coff object file from a .rc (for the icon) *) if w32 then rule ".rc.o" ~deps:["%.rc";"ide/coq.ico"] ~prod:"%.o" (fun env _ -> let rc = env "%.rc" and o = env "%.o" in Cmd (S [P w32res;A "--input-format";A "rc";A "--input";P rc; A "--output-format";A "coff";A "--output"; Px o])); (** Embed the Coq icon inside the windows version of Coqide *) if w32 then dep ["link"; "ocaml"; "program"; "ide"] [w32ico]; if w32 then flag ["link"; "ocaml"; "program"; "ide"] (P w32ico); (** Ealier we tried to make Coqide a console-free win32 app, but that was troublesome (unavailable stdout/stderr, issues with the stop button,...). If somebody really want to try again, the extra args to add are : [A "-ccopt"; A "-link -Wl,-subsystem,windows"] Other solution: use the mkwinapp tool. *) (** The mingw32-ocaml cross-compiler currently uses Filename.dir_sep="/". Let's tweak that... *) if w32 then begin ocaml_lib "tools/win32hack"; List.iter (fun (_,s,_) -> tag_file (s^".native") ["use_win32hack"]) all_binaries end; (** Coqtop *) let () = let fo = coqtop^".native" and fb = coqtop^".byte" in let depsall = (if w32 then [w32ico] else [])@[coqmktop_boot;libcoqrun] in let depso = "coq_config.cmx" :: core_cmxa in let depsb = "coq_config.cmo" :: core_cma in let w32flag = if not w32 then N else S ([A"-camlbin";A w32bin;A "-ccopt";P w32ico]) in if opt then rule fo ~prod:fo ~deps:(depsall@depso) ~insert:`top (cmd [P coqmktop_boot;w32flag;A"-boot";A"-opt";incl fo;camlp4incl;A"-o";Px fo]); rule fb ~prod:fb ~deps:(depsall@depsb) ~insert:`top (cmd [P coqmktop_boot;w32flag;A"-boot";A"-top";incl fb;camlp4incl;A"-o";Px fb]); in (** Coq files dependencies *) rule "coqdepready" ~stamp:"coqdepready" ~deps:coqdepdeps (fun _ _ -> Nop); rule ".v.d" ~prod:"%.v.depends" ~deps:["%.v";coqdep_boot;"coqdepready"] (fun env _ -> let v = env "%.v" and vd = env "%.v.depends" in (** NB: this relies on all .v files being already in _build. *) Cmd (S [P coqdep_boot;dep_dynlink;A"-slash";P v;Sh">";Px vd])); (** Coq files compilation *) let coq_build_dep f build = (** NB: this relies on coqdep producing a single Makefile line for one .v file, with some specific shape "f.vo ...: f.v deps.vo ..." *) let src = f^".v" in let depends = f^".v.depends" in let rec get_deps keep = function | [] -> [] | d::deps when d = src -> get_deps keep deps | d::deps when keep -> [d] :: get_deps keep deps | d::deps -> get_deps (String.contains d ':') deps in let d = get_deps false (string_list_of_file depends) in List.iter Outcome.ignore_good (build d) in let coq_v_rule d init = let bootflag = if init then A"-nois" else N in let gendep = if init then coqtopbest else initialcoq in rule (d^".v.vo") ~prods:[d^"%.vo";d^"%.glob"] ~deps:[gendep;d^"%.v";d^"%.v.depends"] (fun env build -> let f = env (d^"%") in coq_build_dep f build; Cmd (S [P coqtopbest;A"-boot";bootflag;A"-compile";Px f])) in coq_v_rule "theories/Init/" true; coq_v_rule "" false; (** Initial state *) rule "initial.coq" ~prod:initialcoq ~deps:(makeinitial::init_vo) (cmd [P coqtopbest;A"-boot";A"-batch";A"-nois";A"-notop";A"-silent"; A"-l";P makeinitial; A"-outputstate";Px initialcoq]); (** Generation of _plugin_mod.ml files *) rule "_mod.ml" ~prod:"%_plugin_mod.ml" ~dep:"%_plugin.mllib" (fun env _ -> let line s = "let _ = Mltop.add_known_module \""^s^"\"\n" in let mods = string_list_of_file (env "%_plugin.mllib") @ [Filename.basename (env "%_plugin")] in Echo (List.map line mods, env "%_plugin_mod.ml")); (** Rule for native dynlinkable plugins *) rule ".cmxa.cmxs" ~prod:"%.cmxs" ~dep:"%.cmxa" (fun env _ -> let cmxs = Px (env "%.cmxs") and cmxa = P (env "%.cmxa") in if os5fix then Cmd (S [A"../dev/ocamlopt_shared_os5fix.sh"; !Options.ocamlopt; cmxs]) else Cmd (S [!Options.ocamlopt;A"-linkall";A"-shared";A"-o";cmxs;cmxa])); (** Generation of NMake.v from NMake_gen.ml *) rule "NMake" ~prod:nmake ~dep:nmakegen (cmd [ocaml;P nmakegen;Sh ">";Px nmake]); end (** Registration of our rules (after the standard ones) *) let _ = dispatch begin function | After_rules -> initial_actions (); extra_rules () | _ -> () end (** TODO / Remarques: * Apres un premier build, le second prend du temps, meme cached: 1 min 25 pour les 2662 targets en cache. Etonnement, refaire coqtop.byte ne prend que ~4s, au lieu des ~40s pour coqtop.opt. A comprendre ... * Parallelisation: vraiment pas top *) coq-8.4pl2/toplevel/0000750000175000001440000000000012127276552013471 5ustar notinuserscoq-8.4pl2/toplevel/vernacinterp.mli0000640000175000001440000000156312010532755016671 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* exn val vinterp_add : string -> (raw_generic_argument list -> unit -> unit) -> unit val overwriting_vinterp_add : string -> (raw_generic_argument list -> unit -> unit) -> unit val vinterp_init : unit -> unit val call : string * raw_generic_argument list -> unit coq-8.4pl2/toplevel/ind_tables.mli0000640000175000001440000000363212010532755016274 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr array type individual_scheme_object_function = inductive -> constr (** Main functions to register a scheme builder *) val declare_mutual_scheme_object : string -> ?aux:string -> mutual_scheme_object_function -> mutual scheme_kind val declare_individual_scheme_object : string -> ?aux:string -> individual_scheme_object_function -> individual scheme_kind (* val declare_scheme : 'a scheme_kind -> (inductive * constant) array -> unit *) (** Force generation of a (mutually) scheme with possibly user-level names *) val define_individual_scheme : individual scheme_kind -> Declare.internal_flag (** internal *) -> identifier option -> inductive -> constant val define_mutual_scheme : mutual scheme_kind -> Declare.internal_flag (** internal *) -> (int * identifier) list -> mutual_inductive -> constant array (** Main function to retrieve a scheme in the cache or to generate it *) val find_scheme : 'a scheme_kind -> inductive -> constant val check_scheme : 'a scheme_kind -> inductive -> bool coq-8.4pl2/toplevel/usage.mli0000640000175000001440000000170112010532755015267 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a (** {6 Prints the usage on the error output, preceeded by a user-provided message. } *) val print_usage : string -> unit (** {6 Prints the usage on the error output. } *) val print_usage_coqtop : unit -> unit val print_usage_coqc : unit -> unit (** {6 Prints the configuration information } *) val print_config : unit -> unit coq-8.4pl2/toplevel/lemmas.ml0000640000175000001440000003221712111141101015255 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (pi2 (Global.lookup_named id),variable_opacity id) | ConstRef cst -> let cb = Global.lookup_constant cst in (Option.map Declarations.force (body_of_constant cb), is_opaque cb) | _ -> assert false let adjust_guardness_conditions const = function | [] -> const (* Not a recursive statement *) | possible_indexes -> (* Try all combinations... not optimal *) match kind_of_term const.const_entry_body with | Fix ((nv,0),(_,_,fixdefs as fixdecls)) -> (* let possible_indexes = List.map2 (fun i c -> match i with Some i -> i | None -> interval 0 (List.length ((lam_assum c)))) lemma_guard (Array.to_list fixdefs) in *) let indexes = search_guard dummy_loc (Global.env()) possible_indexes fixdecls in { const with const_entry_body = mkFix ((indexes,0),fixdecls) } | c -> const let find_mutually_recursive_statements thms = let n = List.length thms in let inds = List.map (fun (id,(t,impls,annot)) -> let (hyps,ccl) = decompose_prod_assum t in let x = (id,(t,impls)) in match annot with (* Explicit fixpoint decreasing argument is given *) | Some (Some (_,id),CStructRec) -> let i,b,typ = lookup_rel_id id hyps in (match kind_of_term t with | Ind (kn,_ as ind) when let mind = Global.lookup_mind kn in mind.mind_finite & b = None -> [ind,x,i],[] | _ -> error "Decreasing argument is not an inductive assumption.") (* Unsupported cases *) | Some (_,(CWfRec _|CMeasureRec _)) -> error "Only structural decreasing is supported for mutual statements." (* Cofixpoint or fixpoint w/o explicit decreasing argument *) | None | Some (None, CStructRec) -> let whnf_hyp_hds = map_rel_context_in_env (fun env c -> fst (whd_betadeltaiota_stack env Evd.empty c)) (Global.env()) hyps in let ind_hyps = List.flatten (list_map_i (fun i (_,b,t) -> match kind_of_term t with | Ind (kn,_ as ind) when let mind = Global.lookup_mind kn in mind.mind_finite & b = None -> [ind,x,i] | _ -> []) 0 (List.rev whnf_hyp_hds)) in let ind_ccl = let cclenv = push_rel_context hyps (Global.env()) in let whnf_ccl,_ = whd_betadeltaiota_stack cclenv Evd.empty ccl in match kind_of_term whnf_ccl with | Ind (kn,_ as ind) when let mind = Global.lookup_mind kn in mind.mind_ntypes = n & not mind.mind_finite -> [ind,x,0] | _ -> [] in ind_hyps,ind_ccl) thms in let inds_hyps,ind_ccls = List.split inds in let of_same_mutind ((kn,_),_,_) = function ((kn',_),_,_) -> kn = kn' in (* Check if all conclusions are coinductive in the same type *) (* (degenerated cartesian product since there is at most one coind ccl) *) let same_indccl = list_cartesians_filter (fun hyp oks -> if List.for_all (of_same_mutind hyp) oks then Some (hyp::oks) else None) [] ind_ccls in let ordered_same_indccl = List.filter (list_for_all_i (fun i ((kn,j),_,_) -> i=j) 0) same_indccl in (* Check if some hypotheses are inductive in the same type *) let common_same_indhyp = list_cartesians_filter (fun hyp oks -> if List.for_all (of_same_mutind hyp) oks then Some (hyp::oks) else None) [] inds_hyps in let ordered_inds,finite,guard = match ordered_same_indccl, common_same_indhyp with | indccl::rest, _ -> assert (rest=[]); (* One occ. of common coind ccls and no common inductive hyps *) if common_same_indhyp <> [] then if_verbose msgnl (str "Assuming mutual coinductive statements."); flush_all (); indccl, true, [] | [], _::_ -> if same_indccl <> [] && list_distinct (List.map pi1 (List.hd same_indccl)) then if_verbose msgnl (strbrk "Coinductive statements do not follow the order of definition, assuming the proof to be by induction."); flush_all (); let possible_guards = List.map (List.map pi3) inds_hyps in (* assume the largest indices as possible *) list_last common_same_indhyp, false, possible_guards | _, [] -> error ("Cannot find common (mutual) inductive premises or coinductive" ^ " conclusions in the statements.") in (finite,guard,None), ordered_inds let look_for_possibly_mutual_statements = function | [id,(t,impls,None)] -> (* One non recursively proved theorem *) None,[id,(t,impls)],None | _::_ as thms -> (* More than one statement and/or an explicit decreasing mark: *) (* we look for a common inductive hyp or a common coinductive conclusion *) let recguard,ordered_inds = find_mutually_recursive_statements thms in let thms = List.map pi2 ordered_inds in Some recguard,thms, Some (List.map (fun (_,_,i) -> succ i) ordered_inds) | [] -> anomaly "Empty list of theorems." (* Saving a goal *) let save id const do_guard (locality,kind) hook = let const = adjust_guardness_conditions const do_guard in let {const_entry_body = pft; const_entry_type = tpo; const_entry_opaque = opacity } = const in let k = logical_kind_of_goal_kind kind in let l,r = match locality with | Local when Lib.sections_are_opened () -> let c = SectionLocalDef (pft, tpo, opacity) in let _ = declare_variable id (Lib.cwd(), c, k) in (Local, VarRef id) | Local | Global -> let kn = declare_constant id (DefinitionEntry const, k) in Autoinstance.search_declaration (ConstRef kn); (Global, ConstRef kn) in Pfedit.delete_current_proof (); definition_message id; hook l r let default_thm_id = id_of_string "Unnamed_thm" let compute_proof_name locality = function | Some (loc,id) -> (* We check existence here: it's a bit late at Qed time *) if Nametab.exists_cci (Lib.make_path id) || is_section_variable id || locality=Global && Nametab.exists_cci (Lib.make_path_except_section id) then user_err_loc (loc,"",pr_id id ++ str " already exists."); id | None -> next_global_ident_away default_thm_id (Pfedit.get_all_proof_names ()) let save_remaining_recthms (local,kind) body opaq i (id,(t_i,(_,imps))) = match body with | None -> (match local with | Local -> let impl=false in (* copy values from Vernacentries *) let k = IsAssumption Conjectural in let c = SectionLocalAssum (t_i,impl) in let _ = declare_variable id (Lib.cwd(),c,k) in (Local,VarRef id,imps) | Global -> let k = IsAssumption Conjectural in let kn = declare_constant id (ParameterEntry (None,t_i,None), k) in (Global,ConstRef kn,imps)) | Some body -> let k = logical_kind_of_goal_kind kind in let body_i = match kind_of_term body with | Fix ((nv,0),decls) -> mkFix ((nv,i),decls) | CoFix (0,decls) -> mkCoFix (i,decls) | _ -> anomaly "Not a proof by induction" in match local with | Local -> let c = SectionLocalDef (body_i, Some t_i, opaq) in let _ = declare_variable id (Lib.cwd(), c, k) in (Local,VarRef id,imps) | Global -> let const = { const_entry_body = body_i; const_entry_secctx = None; const_entry_type = Some t_i; const_entry_opaque = opaq } in let kn = declare_constant id (DefinitionEntry const, k) in (Global,ConstRef kn,imps) let save_hook = ref ignore let set_save_hook f = save_hook := f let get_proof opacity = let id,(const,do_guard,persistence,hook) = Pfedit.cook_proof !save_hook in id,{const with const_entry_opaque = opacity},do_guard,persistence,hook let save_named opacity = let p = Proof_global.give_me_the_proof () in Proof.transaction p begin fun () -> let id,const,do_guard,persistence,hook = get_proof opacity in save id const do_guard persistence hook end let check_anonymity id save_ident = if atompart_of_id id <> string_of_id (default_thm_id) then error "This command can only be used for unnamed theorem." let save_anonymous opacity save_ident = let p = Proof_global.give_me_the_proof () in Proof.transaction p begin fun () -> let id,const,do_guard,persistence,hook = get_proof opacity in check_anonymity id save_ident; save save_ident const do_guard persistence hook end let save_anonymous_with_strength kind opacity save_ident = let p = Proof_global.give_me_the_proof () in Proof.transaction p begin fun () -> let id,const,do_guard,_,hook = get_proof opacity in check_anonymity id save_ident; (* we consider that non opaque behaves as local for discharge *) save save_ident const do_guard (Global, Proof kind) hook end (* Starting a goal *) let start_hook = ref ignore let set_start_hook = (:=) start_hook let start_proof id kind c ?init_tac ?(compute_guard=[]) hook = let sign = initialize_named_context_for_proof () in !start_hook c; Pfedit.start_proof id kind sign c ?init_tac ~compute_guard hook let rec_tac_initializer finite guard thms snl = if finite then match List.map (fun (id,(t,_)) -> (id,t)) thms with | (id,_)::l -> Hiddentac.h_mutual_cofix true id l | _ -> assert false else (* nl is dummy: it will be recomputed at Qed-time *) let nl = match snl with | None -> List.map succ (List.map list_last guard) | Some nl -> nl in match List.map2 (fun (id,(t,_)) n -> (id,n,t)) thms nl with | (id,n,_)::l -> Hiddentac.h_mutual_fix true id n l | _ -> assert false let start_proof_with_initialization kind recguard thms snl hook = let intro_tac (_, (_, (ids, _))) = Refiner.tclMAP (function | Name id -> Tactics.intro_mustbe_force id | Anonymous -> Tactics.intro) (List.rev ids) in let init_tac,guard = match recguard with | Some (finite,guard,init_tac) -> let rec_tac = rec_tac_initializer finite guard thms snl in Some (match init_tac with | None -> if Flags.is_auto_intros () then tclTHENS rec_tac (List.map intro_tac thms) else rec_tac | Some tacl -> tclTHENS rec_tac (if Flags.is_auto_intros () then List.map2 (fun tac thm -> tclTHEN tac (intro_tac thm)) tacl thms else tacl)),guard | None -> assert (List.length thms = 1); (if Flags.is_auto_intros () then Some (intro_tac (List.hd thms)) else None), [] in match thms with | [] -> anomaly "No proof to start" | (id,(t,(_,imps)))::other_thms -> let hook strength ref = let other_thms_data = if other_thms = [] then [] else (* there are several theorems defined mutually *) let body,opaq = retrieve_first_recthm ref in list_map_i (save_remaining_recthms kind body opaq) 1 other_thms in let thms_data = (strength,ref,imps)::other_thms_data in List.iter (fun (strength,ref,imps) -> maybe_declare_manual_implicits false ref imps; hook strength ref) thms_data in start_proof id kind t ?init_tac hook ~compute_guard:guard let start_proof_com kind thms hook = let evdref = ref Evd.empty in let env0 = Global.env () in let thms = List.map (fun (sopt,(bl,t,guard)) -> let impls, ((env, ctx), imps) = interp_context_evars evdref env0 bl in let t', imps' = interp_type_evars_impls ~impls ~evdref env t in Sign.iter_rel_context (check_evars env Evd.empty !evdref) ctx; let ids = List.map pi1 ctx in (compute_proof_name (fst kind) sopt, (nf_evar !evdref (it_mkProd_or_LetIn t' ctx), (ids, imps @ lift_implicits (List.length ids) imps'), guard))) thms in let recguard,thms,snl = look_for_possibly_mutual_statements thms in start_proof_with_initialization kind recguard thms snl hook (* Admitted *) let admit () = let (id,k,typ,hook) = Pfedit.current_proof_statement () in let e = Pfedit.get_used_variables(), typ, None in let kn = declare_constant id (ParameterEntry e,IsAssumption Conjectural) in Pfedit.delete_current_proof (); assumption_message id; hook Global (ConstRef kn) (* Miscellaneous *) let get_current_context () = try Pfedit.get_current_goal_context () with e when Logic.catchable_exception e -> (Evd.empty, Global.env()) coq-8.4pl2/toplevel/search.mli0000640000175000001440000000440412010532755015433 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* dir_path list * bool -> unit val search_rewrite : constr -> dir_path list * bool -> unit val search_pattern : constr -> dir_path list * bool -> unit val search_about : (bool * glob_search_about_item) list -> dir_path list * bool -> unit (** The filtering function that is by standard search facilities. It can be passed as argument to the raw search functions. It is used in pcoq. *) val filter_by_module_from_list : dir_path list * bool -> global_reference -> env -> 'a -> bool val filter_blacklist : global_reference -> env -> constr -> bool (** raw search functions can be used for various extensions. They are also used for pcoq. *) val gen_filtered_search : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> unit val filtered_search : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> global_reference -> unit val raw_pattern_search : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> constr_pattern -> unit val raw_search_rewrite : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> constr_pattern -> unit val raw_search_about : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> (bool * glob_search_about_item) list -> unit val raw_search_by_head : (global_reference -> env -> constr -> bool) -> (global_reference -> env -> constr -> unit) -> constr_pattern -> unit coq-8.4pl2/toplevel/cerrors.ml0000640000175000001440000001133212010532755015472 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ") else let loc = unloc loc in (int (fst loc) ++ str"-" ++ int (snd loc)) let guill s = "\""^s^"\"" exception EvaluatedError of std_ppcmds * exn option (** Registration of generic errors Nota: explain_exn does NOT end with a newline anymore! *) let explain_exn_default = function (* Basic interaction exceptions *) | Stream.Error txt -> hov 0 (str ("Syntax error: " ^ txt ^ ".")) | Token.Error txt -> hov 0 (str ("Syntax error: " ^ txt ^ ".")) | Lexer.Error.E err -> hov 0 (str (Lexer.Error.to_string err)) | Sys_error msg -> hov 0 (str ("System error: " ^ guill msg)) | Out_of_memory -> hov 0 (str "Out of memory.") | Stack_overflow -> hov 0 (str "Stack overflow.") | Timeout -> hov 0 (str "Timeout!") | Sys.Break -> hov 0 (fnl () ++ str "User interrupt.") (* Meta-exceptions *) | Loc.Exc_located (loc,exc) -> hov 0 ((if loc = dummy_loc then (mt ()) else (str"At location " ++ print_loc loc ++ str":" ++ fnl ())) ++ Errors.print_no_anomaly exc) | EvaluatedError (msg,None) -> msg | EvaluatedError (msg,Some reraise) -> msg ++ Errors.print_no_anomaly reraise (* Otherwise, not handled here *) | _ -> raise Errors.Unhandled let _ = Errors.register_handler explain_exn_default (** Pre-explain a vernac interpretation error *) let wrap_vernac_error strm = EvaluatedError (hov 0 (str "Error:" ++ spc () ++ strm), None) let rec process_vernac_interp_error = function | Univ.UniverseInconsistency (o,u,v) -> let msg = if !Constrextern.print_universes then spc() ++ str "(cannot enforce" ++ spc() ++ Univ.pr_uni u ++ spc() ++ str (match o with Univ.Lt -> "<" | Univ.Le -> "<=" | Univ.Eq -> "=") ++ spc() ++ Univ.pr_uni v ++ str")" else mt() in wrap_vernac_error (str "Universe inconsistency" ++ msg ++ str ".") | TypeError(ctx,te) -> wrap_vernac_error (Himsg.explain_type_error ctx Evd.empty te) | PretypeError(ctx,sigma,te) -> wrap_vernac_error (Himsg.explain_pretype_error ctx sigma te) | Typeclasses_errors.TypeClassError(env, te) -> wrap_vernac_error (Himsg.explain_typeclass_error env te) | InductiveError e -> wrap_vernac_error (Himsg.explain_inductive_error e) | Modops.ModuleTypingError e -> wrap_vernac_error (Himsg.explain_module_error e) | Modintern.ModuleInternalizationError e -> wrap_vernac_error (Himsg.explain_module_internalization_error e) | RecursionSchemeError e -> wrap_vernac_error (Himsg.explain_recursion_scheme_error e) | Cases.PatternMatchingError (env,e) -> wrap_vernac_error (Himsg.explain_pattern_matching_error env e) | Tacred.ReductionTacticError e -> wrap_vernac_error (Himsg.explain_reduction_tactic_error e) | Logic.RefinerError e -> wrap_vernac_error (Himsg.explain_refiner_error e) | Nametab.GlobalizationError q -> wrap_vernac_error (str "The reference" ++ spc () ++ Libnames.pr_qualid q ++ spc () ++ str "was not found" ++ spc () ++ str "in the current" ++ spc () ++ str "environment.") | Nametab.GlobalizationConstantError q -> wrap_vernac_error (str "No constant of this name:" ++ spc () ++ Libnames.pr_qualid q ++ str ".") | Refiner.FailError (i,s) -> wrap_vernac_error (str "Tactic failure" ++ (if Lazy.force s <> mt() then str ":" ++ Lazy.force s else mt ()) ++ if i=0 then str "." else str " (level " ++ int i ++ str").") | AlreadyDeclared msg -> wrap_vernac_error (msg ++ str ".") | Proof_type.LtacLocated (_,(Refiner.FailError (i,s) as exc)) when Lazy.force s <> mt () -> process_vernac_interp_error exc | Proof_type.LtacLocated (s,exc) -> EvaluatedError (hov 0 (Himsg.explain_ltac_call_trace s ++ fnl()), Some (process_vernac_interp_error exc)) | Loc.Exc_located (loc,exc) -> Loc.Exc_located (loc,process_vernac_interp_error exc) | exc -> exc let _ = Tactic_debug.explain_logic_error := (fun e -> Errors.print (process_vernac_interp_error e)) let _ = Tactic_debug.explain_logic_error_no_anomaly := (fun e -> Errors.print_no_report (process_vernac_interp_error e)) coq-8.4pl2/toplevel/coqinit.mli0000640000175000001440000000166312010532755015640 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val set_rcfile : string -> unit val no_load_rc : unit -> unit val load_rcfile : unit -> unit val push_include : string * Names.dir_path -> unit val push_rec_include : string * Names.dir_path -> unit val init_load_path : unit -> unit val init_library_roots : unit -> unit val init_ocaml_path : unit -> unit val get_compat_version : string -> Flags.compat_version coq-8.4pl2/toplevel/mltop.ml40000640000175000001440000002523212121620060015224 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* canonical_path_name path') in coq_mlpath_copy := path :: List.filter filter !coq_mlpath_copy (* If there is a toplevel under Coq *) type toplevel = { load_obj : string -> unit; use_file : string -> unit; add_dir : string -> unit; ml_loop : unit -> unit } (* Determines the behaviour of Coq with respect to ML files (compiled or not) *) type kind_load = | WithTop of toplevel | WithoutTop (* Must be always initialized *) let load = ref WithoutTop (* Are we in a native version of Coq? *) let is_native = IFDEF Byte THEN false ELSE true END (* Sets and initializes a toplevel (if any) *) let set_top toplevel = load := WithTop toplevel (* Removes the toplevel (if any) *) let remove ()= load := WithoutTop (* Tests if an Ocaml toplevel runs under Coq *) let is_ocaml_top () = match !load with | WithTop _ -> true |_ -> false (* Tests if we can load ML files *) let has_dynlink = IFDEF HasDynlink THEN true ELSE false END (* Runs the toplevel loop of Ocaml *) let ocaml_toploop () = match !load with | WithTop t -> Printexc.catch t.ml_loop () | _ -> () (* Dynamic loading of .cmo/.cma *) let dir_ml_load s = match !load with | WithTop t -> (try t.load_obj s with | (UserError _ | Failure _ | Anomaly _ | Not_found as u) -> raise u | e when Errors.noncritical e -> errorlabstrm "Mltop.load_object" (str"Cannot link ml-object " ++ str s ++ str" to Coq code.")) (* TO DO: .cma loading without toplevel *) | WithoutTop -> IFDEF HasDynlink THEN (* WARNING * if this code section starts to use a module not used elsewhere * in this file, the Makefile dependency logic needs to be updated. *) let warn = Flags.is_verbose() in let _,gname = find_file_in_path ~warn !coq_mlpath_copy s in try Dynlink.loadfile gname; with | Dynlink.Error a -> errorlabstrm "Mltop.load_object" (str (Dynlink.error_message a)) ELSE errorlabstrm "Mltop.no_load_object" (str"Loading of ML object file forbidden in a native Coq.") END (* Dynamic interpretation of .ml *) let dir_ml_use s = match !load with | WithTop t -> t.use_file s | _ -> warning "Cannot access the ML compiler" (* Adds a path to the ML paths *) let add_ml_dir s = match !load with | WithTop t -> t.add_dir s; keep_copy_mlpath s | WithoutTop when has_dynlink -> keep_copy_mlpath s | _ -> () (* For Rec Add ML Path *) let add_rec_ml_dir unix_path = List.iter (fun (lp,_) -> add_ml_dir lp) (all_subdirs ~unix_path) (* Adding files to Coq and ML loadpath *) let add_path ~unix_path:dir ~coq_root:coq_dirpath = if exists_dir dir then begin add_ml_dir dir; Library.add_load_path true (dir,coq_dirpath) end else msg_warning (str ("Cannot open " ^ dir)) let convert_string d = try Names.id_of_string d with e when Errors.noncritical e -> if_warn msg_warning (str ("Directory "^d^" cannot be used as a Coq identifier (skipped)")); flush_all (); failwith "caught" let add_rec_path ~unix_path ~coq_root = if exists_dir unix_path then let dirs = all_subdirs ~unix_path in let prefix = Names.repr_dirpath coq_root in let convert_dirs (lp,cp) = (lp,Names.make_dirpath (List.map convert_string (List.rev cp)@prefix)) in let dirs = map_succeed convert_dirs dirs in List.iter (fun lpe -> add_ml_dir (fst lpe)) dirs; add_ml_dir unix_path; List.iter (Library.add_load_path false) dirs; Library.add_load_path true (unix_path, coq_root) else msg_warning (str ("Cannot open " ^ unix_path)) (* convertit un nom quelconque en nom de fichier ou de module *) let mod_of_name name = let base = if Filename.check_suffix name ".cmo" then Filename.chop_suffix name ".cmo" else name in String.capitalize base let get_ml_object_suffix name = if Filename.check_suffix name ".cmo" then Some ".cmo" else if Filename.check_suffix name ".cma" then Some ".cma" else if Filename.check_suffix name ".cmxs" then Some ".cmxs" else None let file_of_name name = let name = String.uncapitalize name in let suffix = get_ml_object_suffix name in let fail s = errorlabstrm "Mltop.load_object" (str"File not found on loadpath : " ++ str s) in if is_native then let name = match suffix with | Some ((".cmo"|".cma") as suffix) -> (Filename.chop_suffix name suffix) ^ ".cmxs" | Some ".cmxs" -> name | _ -> name ^ ".cmxs" in if is_in_path !coq_mlpath_copy name then name else fail name else let (full, base) = match suffix with | Some ".cmo" | Some ".cma" -> true, name | Some ".cmxs" -> false, Filename.chop_suffix name ".cmxs" | _ -> false, name in if full then if is_in_path !coq_mlpath_copy base then base else fail base else let name = base ^ ".cmo" in if is_in_path !coq_mlpath_copy name then name else let name = base ^ ".cma" in if is_in_path !coq_mlpath_copy name then name else fail (base ^ ".cm[oa]") (** Is the ML code of the standard library placed into loadable plugins or statically compiled into coqtop ? For the moment this choice is made according to the presence of native dynlink : even if bytecode coqtop could always load plugins, we prefer to have uniformity between bytecode and native versions. *) (* [known_loaded_module] contains the names of the loaded ML modules * (linked or loaded with load_object). It is used not to load a * module twice. It is NOT the list of ML modules Coq knows. *) let known_loaded_modules = ref Stringset.empty let add_known_module mname = let mname = String.capitalize mname in known_loaded_modules := Stringset.add mname !known_loaded_modules let module_is_known mname = Stringset.mem (String.capitalize mname) !known_loaded_modules (** A plugin is just an ML module with an initialization function. *) let known_loaded_plugins = ref Stringmap.empty let add_known_plugin init name = let name = String.capitalize name in add_known_module name; known_loaded_plugins := Stringmap.add name init !known_loaded_plugins let init_known_plugins () = Stringmap.iter (fun _ f -> f()) !known_loaded_plugins (** ml object = ml module or plugin *) let init_ml_object mname = try Stringmap.find mname !known_loaded_plugins () with Not_found -> () let load_ml_object mname fname= dir_ml_load fname; add_known_module mname; init_ml_object mname (* Summary of declared ML Modules *) (* List and not Stringset because order is important: most recent first. *) let loaded_modules = ref [] let get_loaded_modules () = List.rev !loaded_modules let add_loaded_module md = loaded_modules := md :: !loaded_modules let reset_loaded_modules () = loaded_modules := [] let if_verbose_load verb f name fname = if not verb then f name fname else let info = "[Loading ML file "^fname^" ..." in try f name fname; msgnl (str (info^" done]")); with reraise -> msgnl (str (info^" failed]")); raise reraise (** Load a module for the first time (i.e. dynlink it) or simulate its reload (i.e. doing nothing except maybe an initialization function). *) let cache_ml_object verb reinit name = begin if module_is_known name then (if reinit then init_ml_object name) else if not has_dynlink then error ("Dynamic link not supported (module "^name^")") else if_verbose_load (verb && is_verbose ()) load_ml_object name (file_of_name name) end; add_loaded_module name let unfreeze_ml_modules x = reset_loaded_modules (); List.iter (cache_ml_object false false) x let _ = Summary.declare_summary "ML-MODULES" { Summary.freeze_function = get_loaded_modules; Summary.unfreeze_function = unfreeze_ml_modules; Summary.init_function = reset_loaded_modules } (* Liboject entries of declared ML Modules *) type ml_module_object = { mlocal : Vernacexpr.locality_flag; mnames : string list } let cache_ml_objects (_,{mnames=mnames}) = List.iter (cache_ml_object true true) mnames let classify_ml_objects ({mlocal=mlocal} as o) = if mlocal then Dispose else Substitute o let inMLModule : ml_module_object -> obj = declare_object {(default_object "ML-MODULE") with load_function = (fun _ -> cache_ml_objects); cache_function = cache_ml_objects; subst_function = (fun (_,o) -> o); classify_function = classify_ml_objects } let declare_ml_modules local l = let l = List.map mod_of_name l in Lib.add_anonymous_leaf (inMLModule {mlocal=local; mnames=l}) let print_ml_path () = let l = !coq_mlpath_copy in ppnl (str"ML Load Path:" ++ fnl () ++ str" " ++ hv 0 (prlist_with_sep pr_fnl pr_str l)) (* Printing of loaded ML modules *) let print_ml_modules () = let l = get_loaded_modules () in pp (str"Loaded ML Modules: " ++ pr_vertical_list pr_str l) coq-8.4pl2/toplevel/himsg.mli0000640000175000001440000000310412010532755015271 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Evd.evar_map -> type_error -> std_ppcmds val explain_pretype_error : env -> Evd.evar_map -> pretype_error -> std_ppcmds val explain_inductive_error : inductive_error -> std_ppcmds val explain_typeclass_error : env -> typeclass_error -> Pp.std_ppcmds val explain_recursion_scheme_error : recursion_scheme_error -> std_ppcmds val explain_refiner_error : refiner_error -> std_ppcmds val explain_pattern_matching_error : env -> pattern_matching_error -> std_ppcmds val explain_reduction_tactic_error : Tacred.reduction_tactic_error -> std_ppcmds val explain_ltac_call_trace : int * Proof_type.ltac_call_kind * Proof_type.ltac_trace * Util.loc -> std_ppcmds val explain_module_error : Modops.module_typing_error -> std_ppcmds val explain_module_internalization_error : Modintern.module_internalization_error -> std_ppcmds coq-8.4pl2/toplevel/ide_intf.ml0000640000175000001440000004442512121620060015573 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string; rewind : int -> int; goals : unit -> goals option; evars : unit -> evar list option; hints : unit -> (hint list * hint) option; status : unit -> status; search : search_flags -> string coq_object list; get_options : unit -> (option_name * option_state) list; set_options : (option_name * option_value) list -> unit; inloadpath : string -> bool; mkcases : string -> string list list; quit : unit -> unit; about : unit -> coq_info; handle_exn : exn -> location * string; } (** The actual calls *) let interp (r,b,s) : string call = Interp (r,b,s) let rewind i : int call = Rewind i let goals : goals option call = Goal let evars : evar list option call = Evars let hints : (hint list * hint) option call = Hints let status : status call = Status let search flags : string coq_object list call = Search flags let get_options : (option_name * option_state) list call = GetOptions let set_options l : unit call = SetOptions l let inloadpath s : bool call = InLoadPath s let mkcases s : string list list call = MkCases s let quit : unit call = Quit (** * Coq answers to CoqIde *) let abstract_eval_call handler c = try let res = match c with | Interp (r,b,s) -> Obj.magic (handler.interp (r,b,s) : string) | Rewind i -> Obj.magic (handler.rewind i : int) | Goal -> Obj.magic (handler.goals () : goals option) | Evars -> Obj.magic (handler.evars () : evar list option) | Hints -> Obj.magic (handler.hints () : (hint list * hint) option) | Status -> Obj.magic (handler.status () : status) | Search flags -> Obj.magic (handler.search flags : string coq_object list) | GetOptions -> Obj.magic (handler.get_options () : (option_name * option_state) list) | SetOptions opts -> Obj.magic (handler.set_options opts : unit) | InLoadPath s -> Obj.magic (handler.inloadpath s : bool) | MkCases s -> Obj.magic (handler.mkcases s : string list list) | Quit -> Obj.magic (handler.quit () : unit) | About -> Obj.magic (handler.about () : coq_info) in Good res with any -> let (l, str) = handler.handle_exn any in Fail (l,str) (** * XML data marshalling *) exception Marshal_error (** Utility functions *) let massoc x l = try List.assoc x l with Not_found -> raise Marshal_error let constructor t c args = Element (t, ["val", c], args) let do_match constr t mf = match constr with | Element (s, attrs, args) -> if s = t then let c = massoc "val" attrs in mf c args else raise Marshal_error | _ -> raise Marshal_error let pcdata = function | PCData s -> s | _ -> raise Marshal_error let singleton = function | [x] -> x | _ -> raise Marshal_error let raw_string = function | [] -> "" | [PCData s] -> s | _ -> raise Marshal_error let bool_arg tag b = if b then [tag, ""] else [] (** Base types *) let of_unit () = Element ("unit", [], []) let to_unit = function | Element ("unit", [], []) -> () | _ -> raise Marshal_error let of_bool b = if b then constructor "bool" "true" [] else constructor "bool" "false" [] let to_bool xml = do_match xml "bool" (fun s _ -> match s with | "true" -> true | "false" -> false | _ -> raise Marshal_error) let of_list f l = Element ("list", [], List.map f l) let to_list f = function | Element ("list", [], l) -> List.map f l | _ -> raise Marshal_error let of_option f = function | None -> Element ("option", ["val", "none"], []) | Some x -> Element ("option", ["val", "some"], [f x]) let to_option f = function | Element ("option", ["val", "none"], []) -> None | Element ("option", ["val", "some"], [x]) -> Some (f x) | _ -> raise Marshal_error let of_string s = Element ("string", [], [PCData s]) let to_string = function | Element ("string", [], l) -> raw_string l | _ -> raise Marshal_error let of_int i = Element ("int", [], [PCData (string_of_int i)]) let to_int = function | Element ("int", [], [PCData s]) -> (try int_of_string s with Failure _ -> raise Marshal_error) | _ -> raise Marshal_error let of_pair f g (x, y) = Element ("pair", [], [f x; g y]) let to_pair f g = function | Element ("pair", [], [x; y]) -> (f x, g y) | _ -> raise Marshal_error (** More elaborate types *) let of_option_value = function | IntValue i -> constructor "option_value" "intvalue" [of_option of_int i] | BoolValue b -> constructor "option_value" "boolvalue" [of_bool b] | StringValue s -> constructor "option_value" "stringvalue" [of_string s] let to_option_value xml = do_match xml "option_value" (fun s args -> match s with | "intvalue" -> IntValue (to_option to_int (singleton args)) | "boolvalue" -> BoolValue (to_bool (singleton args)) | "stringvalue" -> StringValue (to_string (singleton args)) | _ -> raise Marshal_error ) let of_option_state s = Element ("option_state", [], [ of_bool s.opt_sync; of_bool s.opt_depr; of_string s.opt_name; of_option_value s.opt_value] ) let to_option_state = function | Element ("option_state", [], [sync; depr; name; value]) -> { opt_sync = to_bool sync; opt_depr = to_bool depr; opt_name = to_string name; opt_value = to_option_value value; } | _ -> raise Marshal_error let of_search_constraint = function | Name_Pattern s -> constructor "search_constraint" "name_pattern" [of_string s] | Type_Pattern s -> constructor "search_constraint" "type_pattern" [of_string s] | SubType_Pattern s -> constructor "search_constraint" "subtype_pattern" [of_string s] | In_Module m -> constructor "search_constraint" "in_module" [of_list of_string m] | Include_Blacklist -> constructor "search_constraint" "include_blacklist" [] let to_search_constraint xml = do_match xml "search_constraint" (fun s args -> match s with | "name_pattern" -> Name_Pattern (to_string (singleton args)) | "type_pattern" -> Type_Pattern (to_string (singleton args)) | "subtype_pattern" -> SubType_Pattern (to_string (singleton args)) | "in_module" -> In_Module (to_list to_string (singleton args)) | "include_blacklist" -> Include_Blacklist | _ -> raise Marshal_error) let of_coq_object f ans = let prefix = of_list of_string ans.coq_object_prefix in let qualid = of_list of_string ans.coq_object_qualid in let obj = f ans.coq_object_object in Element ("coq_object", [], [prefix; qualid; obj]) let to_coq_object f = function | Element ("coq_object", [], [prefix; qualid; obj]) -> let prefix = to_list to_string prefix in let qualid = to_list to_string qualid in let obj = f obj in { coq_object_prefix = prefix; coq_object_qualid = qualid; coq_object_object = obj; } | _ -> raise Marshal_error let of_value f = function | Good x -> Element ("value", ["val", "good"], [f x]) | Fail (loc, msg) -> let loc = match loc with | None -> [] | Some (s, e) -> [("loc_s", string_of_int s); ("loc_e", string_of_int e)] in Element ("value", ["val", "fail"] @ loc, [PCData msg]) let to_value f = function | Element ("value", attrs, l) -> let ans = massoc "val" attrs in if ans = "good" then Good (f (singleton l)) else if ans = "fail" then let loc = try let loc_s = int_of_string (List.assoc "loc_s" attrs) in let loc_e = int_of_string (List.assoc "loc_e" attrs) in Some (loc_s, loc_e) with e when e <> Sys.Break -> None in let msg = raw_string l in Fail (loc, msg) else raise Marshal_error | _ -> raise Marshal_error let of_call = function | Interp (raw, vrb, cmd) -> let flags = (bool_arg "raw" raw) @ (bool_arg "verbose" vrb) in Element ("call", ("val", "interp") :: flags, [PCData cmd]) | Rewind n -> Element ("call", ("val", "rewind") :: ["steps", string_of_int n], []) | Goal -> Element ("call", ["val", "goal"], []) | Evars -> Element ("call", ["val", "evars"], []) | Hints -> Element ("call", ["val", "hints"], []) | Status -> Element ("call", ["val", "status"], []) | Search flags -> let args = List.map (of_pair of_search_constraint of_bool) flags in Element ("call", ["val", "search"], args) | GetOptions -> Element ("call", ["val", "getoptions"], []) | SetOptions opts -> let args = List.map (of_pair (of_list of_string) of_option_value) opts in Element ("call", ["val", "setoptions"], args) | InLoadPath file -> Element ("call", ["val", "inloadpath"], [PCData file]) | MkCases ind -> Element ("call", ["val", "mkcases"], [PCData ind]) | Quit -> Element ("call", ["val", "quit"], []) | About -> Element ("call", ["val", "about"], []) let to_call = function | Element ("call", attrs, l) -> let ans = massoc "val" attrs in begin match ans with | "interp" -> let raw = List.mem_assoc "raw" attrs in let vrb = List.mem_assoc "verbose" attrs in Interp (raw, vrb, raw_string l) | "rewind" -> let steps = int_of_string (massoc "steps" attrs) in Rewind steps | "goal" -> Goal | "evars" -> Evars | "status" -> Status | "search" -> let args = List.map (to_pair to_search_constraint to_bool) l in Search args | "getoptions" -> GetOptions | "setoptions" -> let args = List.map (to_pair (to_list to_string) to_option_value) l in SetOptions args | "inloadpath" -> InLoadPath (raw_string l) | "mkcases" -> MkCases (raw_string l) | "hints" -> Hints | "quit" -> Quit | "about" -> About | _ -> raise Marshal_error end | _ -> raise Marshal_error let of_status s = let of_so = of_option of_string in let of_sl = of_list of_string in Element ("status", [], [ of_sl s.status_path; of_so s.status_proofname; of_sl s.status_allproofs; of_int s.status_statenum; of_int s.status_proofnum; ] ) let to_status = function | Element ("status", [], [path; name; prfs; snum; pnum]) -> { status_path = to_list to_string path; status_proofname = to_option to_string name; status_allproofs = to_list to_string prfs; status_statenum = to_int snum; status_proofnum = to_int pnum; } | _ -> raise Marshal_error let of_evar s = Element ("evar", [], [PCData s.evar_info]) let to_evar = function | Element ("evar", [], data) -> { evar_info = raw_string data; } | _ -> raise Marshal_error let of_goal g = let hyp = of_list of_string g.goal_hyp in let ccl = of_string g.goal_ccl in let id = of_string g.goal_id in Element ("goal", [], [id; hyp; ccl]) let to_goal = function | Element ("goal", [], [id; hyp; ccl]) -> let hyp = to_list to_string hyp in let ccl = to_string ccl in let id = to_string id in { goal_hyp = hyp; goal_ccl = ccl; goal_id = id; } | _ -> raise Marshal_error let of_goals g = let of_glist = of_list of_goal in let fg = of_list of_goal g.fg_goals in let bg = of_list (of_pair of_glist of_glist) g.bg_goals in Element ("goals", [], [fg; bg]) let to_goals = function | Element ("goals", [], [fg; bg]) -> let to_glist = to_list to_goal in let fg = to_list to_goal fg in let bg = to_list (to_pair to_glist to_glist) bg in { fg_goals = fg; bg_goals = bg; } | _ -> raise Marshal_error let of_coq_info info = let version = of_string info.coqtop_version in let protocol = of_string info.protocol_version in let release = of_string info.release_date in let compile = of_string info.compile_date in Element ("coq_info", [], [version; protocol; release; compile]) let to_coq_info = function | Element ("coq_info", [], [version; protocol; release; compile]) -> { coqtop_version = to_string version; protocol_version = to_string protocol; release_date = to_string release; compile_date = to_string compile; } | _ -> raise Marshal_error (** Conversions between ['a value] and xml answers When decoding an xml answer, we dynamically check that it is compatible with the original call. For that we now rely on the fact that all sub-fonctions [to_xxx : xml -> xxx] check that the current xml element is "xxx", and raise [Marshal_error] if anything goes wrong. *) type value_type = | Unit | String | Int | Bool | Goals | Evar | State | Option_state | Coq_info | Option of value_type | List of value_type | Coq_object of value_type | Pair of value_type * value_type let hint = List (Pair (String, String)) let option_name = List String let expected_answer_type = function | Interp _ -> String | Rewind _ -> Int | Goal -> Option Goals | Evars -> Option (List Evar) | Hints -> Option (Pair (List hint, hint)) | Status -> State | Search _ -> List (Coq_object String) | GetOptions -> List (Pair (option_name, Option_state)) | SetOptions _ -> Unit | InLoadPath _ -> Bool | MkCases _ -> List (List String) | Quit -> Unit | About -> Coq_info let of_answer (q : 'a call) (r : 'a value) : xml = let rec convert ty : 'a -> xml = match ty with | Unit -> Obj.magic of_unit | Bool -> Obj.magic of_bool | String -> Obj.magic of_string | Int -> Obj.magic of_int | State -> Obj.magic of_status | Option_state -> Obj.magic of_option_state | Coq_info -> Obj.magic of_coq_info | Goals -> Obj.magic of_goals | Evar -> Obj.magic of_evar | List t -> Obj.magic (of_list (convert t)) | Option t -> Obj.magic (of_option (convert t)) | Coq_object t -> Obj.magic (of_coq_object (convert t)) | Pair (t1,t2) -> Obj.magic (of_pair (convert t1) (convert t2)) in of_value (convert (expected_answer_type q)) r let to_answer xml (c : 'a call) : 'a value = let rec convert ty : xml -> 'a = match ty with | Unit -> Obj.magic to_unit | Bool -> Obj.magic to_bool | String -> Obj.magic to_string | Int -> Obj.magic to_int | State -> Obj.magic to_status | Option_state -> Obj.magic to_option_state | Coq_info -> Obj.magic to_coq_info | Goals -> Obj.magic to_goals | Evar -> Obj.magic to_evar | List t -> Obj.magic (to_list (convert t)) | Option t -> Obj.magic (to_option (convert t)) | Coq_object t -> Obj.magic (to_coq_object (convert t)) | Pair (t1,t2) -> Obj.magic (to_pair (convert t1) (convert t2)) in to_value (convert (expected_answer_type c)) xml (** * Debug printing *) let pr_option_value = function | IntValue None -> "none" | IntValue (Some i) -> string_of_int i | StringValue s -> s | BoolValue b -> if b then "true" else "false" let rec pr_setoptions opts = let map (key, v) = let key = String.concat " " key in key ^ " := " ^ (pr_option_value v) in String.concat "; " (List.map map opts) let pr_getoptions opts = let map (key, s) = let key = String.concat " " key in Printf.sprintf "%s: sync := %b; depr := %b; name := %s; value := %s\n" key s.opt_sync s.opt_depr s.opt_name (pr_option_value s.opt_value) in "\n" ^ String.concat "" (List.map map opts) let pr_call = function | Interp (r,b,s) -> let raw = if r then "RAW" else "" in let verb = if b then "" else "SILENT" in "INTERP"^raw^verb^" ["^s^"]" | Rewind i -> "REWIND "^(string_of_int i) | Goal -> "GOALS" | Evars -> "EVARS" | Hints -> "HINTS" | Status -> "STATUS" | Search _ -> "SEARCH" | GetOptions -> "GETOPTIONS" | SetOptions l -> "SETOPTIONS" ^ " [" ^ pr_setoptions l ^ "]" | InLoadPath s -> "INLOADPATH "^s | MkCases s -> "MKCASES "^s | Quit -> "QUIT" | About -> "ABOUT" let pr_value_gen pr = function | Good v -> "GOOD " ^ pr v | Fail (_,str) -> "FAIL ["^str^"]" let pr_value v = pr_value_gen (fun _ -> "") v let pr_string s = "["^s^"]" let pr_bool b = if b then "true" else "false" let pr_status s = let path = let l = String.concat "." s.status_path in "path=" ^ l ^ ";" in let name = match s.status_proofname with | None -> "no proof;" | Some n -> "proof = " ^ n ^ ";" in "Status: " ^ path ^ name let pr_mkcases l = let l = List.map (String.concat " ") l in "[" ^ String.concat " | " l ^ "]" let pr_goals_aux g = if g.fg_goals = [] then if g.bg_goals = [] then "Proof completed." else let rec pr_focus _ = function | [] -> assert false | [lg, rg] -> Printf.sprintf "%i" (List.length lg + List.length rg) | (lg, rg) :: l -> Printf.sprintf "%i:%a" (List.length lg + List.length rg) pr_focus l in Printf.sprintf "Still focussed: [%a]." pr_focus g.bg_goals else let pr_menu s = s in let pr_goal { goal_hyp = hyps; goal_ccl = goal } = "[" ^ String.concat "; " (List.map pr_menu hyps) ^ " |- " ^ pr_menu goal ^ "]" in String.concat " " (List.map pr_goal g.fg_goals) let pr_goals = function | None -> "No proof in progress." | Some g -> pr_goals_aux g let pr_evar ev = "[" ^ ev.evar_info ^ "]" let pr_evars = function | None -> "No proof in progress." | Some evars -> String.concat " " (List.map pr_evar evars) let pr_full_value call value = match call with | Interp _ -> pr_value_gen pr_string (Obj.magic value : string value) | Rewind i -> pr_value_gen string_of_int (Obj.magic value : int value) | Goal -> pr_value_gen pr_goals (Obj.magic value : goals option value) | Evars -> pr_value_gen pr_evars (Obj.magic value : evar list option value) | Hints -> pr_value value | Status -> pr_value_gen pr_status (Obj.magic value : status value) | Search _ -> pr_value value | GetOptions -> pr_value_gen pr_getoptions (Obj.magic value : (option_name * option_state) list value) | SetOptions _ -> pr_value value | InLoadPath s -> pr_value_gen pr_bool (Obj.magic value : bool value) | MkCases s -> pr_value_gen pr_mkcases (Obj.magic value : string list list value) | Quit -> pr_value value | About -> pr_value value coq-8.4pl2/toplevel/toplevel.mllib0000640000175000001440000000036011741616002016331 0ustar notinusersHimsg Cerrors Class Vernacexpr Metasyntax Auto_ind_decl Libtypes Search Autoinstance Lemmas Indschemes Command Classes Record Ppvernac Backtrack Vernacinterp Mltop Vernacentries Whelp Vernac Ide_intf Ide_slave Toplevel Usage Coqinit Coqtop coq-8.4pl2/toplevel/vernacentries.mli0000640000175000001440000000461012010532755017035 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit (** Vernacular entries *) val show_script : unit -> unit val show_prooftree : unit -> unit val show_node : unit -> unit (** This function can be used by any command that want to observe terms in the context of the current goal, as for instance in pcoq *) val get_current_context_of_args : int option -> Evd.evar_map * Environ.env type pcoq_hook = { start_proof : unit -> unit; solve : int -> unit; abort : string -> unit; search : searchable -> dir_path list * bool -> unit; print_name : Libnames.reference Genarg.or_by_notation -> unit; print_check : Environ.env -> Environ.unsafe_judgment -> unit; print_eval : Reductionops.reduction_function -> Environ.env -> Evd.evar_map -> constr_expr -> Environ.unsafe_judgment -> unit; show_goal : goal_reference -> unit } val set_pcoq_hook : pcoq_hook -> unit (** The main interpretation function of vernacular expressions *) val interp : Vernacexpr.vernac_expr -> unit (** Print subgoals when the verbose flag is on. Meant to be used inside vernac commands from plugins. *) val print_subgoals : unit -> unit (** The printing of goals via [print_subgoals] or during [interp] can be controlled by the following flag. Used for instance by coqide, since it has its own goal-fetching mechanism. *) val enable_goal_printing : bool ref (** Should Qed try to display the proof script ? True by default, but false in ProofGeneral and coqIDE *) val qed_display_script : bool ref (** Prepare a "match" template for a given inductive type. For each branch of the match, we list the constructor name followed by enough pattern variables. [Not_found] is raised if the given string isn't the qualid of a known inductive type. *) val make_cases : string -> string list list coq-8.4pl2/toplevel/command.mli0000640000175000001440000001161212010532755015603 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit) -> unit val get_declare_definition_hook : unit -> (definition_entry -> unit) val set_declare_assumptions_hook : (types -> unit) -> unit (** {6 Definitions/Let} *) val interp_definition : local_binder list -> red_expr option -> constr_expr -> constr_expr option -> definition_entry * Impargs.manual_implicits val declare_definition : identifier -> locality * definition_object_kind -> definition_entry -> Impargs.manual_implicits -> declaration_hook -> unit (** {6 Parameters/Assumptions} *) val interp_assumption : local_binder list -> constr_expr -> types * Impargs.manual_implicits val declare_assumption : coercion_flag -> assumption_kind -> types -> Impargs.manual_implicits -> bool (** implicit *) -> Entries.inline -> variable located -> unit val declare_assumptions : variable located list -> coercion_flag -> assumption_kind -> types -> Impargs.manual_implicits -> bool -> Entries.inline -> unit (** {6 Inductive and coinductive types} *) (** Extracting the semantical components out of the raw syntax of mutual inductive declarations *) type structured_one_inductive_expr = { ind_name : identifier; ind_arity : constr_expr; ind_lc : (identifier * constr_expr) list } type structured_inductive_expr = local_binder list * structured_one_inductive_expr list val extract_mutual_inductive_declaration_components : (one_inductive_expr * decl_notation list) list -> structured_inductive_expr * (*coercions:*) qualid list * decl_notation list (** Typing mutual inductive definitions *) type one_inductive_impls = Impargs.manual_implicits (** for inds *)* Impargs.manual_implicits list (** for constrs *) val interp_mutual_inductive : structured_inductive_expr -> decl_notation list -> bool -> mutual_inductive_entry * one_inductive_impls list (** Registering a mutual inductive definition together with its associated schemes *) val declare_mutual_inductive_with_eliminations : Declare.internal_flag -> mutual_inductive_entry -> one_inductive_impls list -> mutual_inductive (** Entry points for the vernacular commands Inductive and CoInductive *) val do_mutual_inductive : (one_inductive_expr * decl_notation list) list -> bool -> unit (** {6 Fixpoints and cofixpoints} *) type structured_fixpoint_expr = { fix_name : identifier; fix_annot : identifier located option; fix_binders : local_binder list; fix_body : constr_expr option; fix_type : constr_expr } (** Extracting the semantical components out of the raw syntax of (co)fixpoints declarations *) val extract_fixpoint_components : bool -> (fixpoint_expr * decl_notation list) list -> structured_fixpoint_expr list * decl_notation list val extract_cofixpoint_components : (cofixpoint_expr * decl_notation list) list -> structured_fixpoint_expr list * decl_notation list (** Typing global fixpoints and cofixpoint_expr *) type recursive_preentry = identifier list * constr option list * types list val interp_fixpoint : structured_fixpoint_expr list -> decl_notation list -> recursive_preentry * (name list * Impargs.manual_implicits * int option) list val interp_cofixpoint : structured_fixpoint_expr list -> decl_notation list -> recursive_preentry * (name list * Impargs.manual_implicits * int option) list (** Registering fixpoints and cofixpoints in the environment *) val declare_fixpoint : recursive_preentry * (name list * Impargs.manual_implicits * int option) list -> lemma_possible_guards -> decl_notation list -> unit val declare_cofixpoint : recursive_preentry * (name list * Impargs.manual_implicits * int option) list -> decl_notation list -> unit (** Entry points for the vernacular commands Fixpoint and CoFixpoint *) val do_fixpoint : (fixpoint_expr * decl_notation list) list -> unit val do_cofixpoint : (cofixpoint_expr * decl_notation list) list -> unit (** Utils *) val check_mutuality : Environ.env -> bool -> (identifier * types) list -> unit val declare_fix : definition_object_kind -> identifier -> constr -> types -> Impargs.manual_implicits -> global_reference coq-8.4pl2/toplevel/record.mli0000640000175000001440000000334412010532755015446 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ?kind:Decl_kinds.definition_object_kind -> ?name:identifier -> coercion_flag list -> manual_explicitation list list -> rel_context -> (name * bool) list * constant option list val declare_structure : Decl_kinds.recursivity_kind -> bool (**infer?*) -> identifier -> identifier -> manual_explicitation list -> rel_context -> (** params *) constr -> (** arity *) Impargs.manual_explicitation list list -> rel_context -> (** fields *) ?kind:Decl_kinds.definition_object_kind -> ?name:identifier -> bool -> (** coercion? *) bool list -> (** field coercions *) Evd.evar_map -> inductive val definition_structure : inductive_kind * Decl_kinds.recursivity_kind * bool(**infer?*)* lident with_coercion * local_binder list * (local_decl_expr with_instance with_priority with_notation) list * identifier * constr_expr option -> global_reference coq-8.4pl2/toplevel/vernac.mli0000640000175000001440000000346612010532755015453 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Util.loc * Vernacexpr.vernac_expr (** Reads and executes vernac commands from a stream. The boolean [just_parsing] disables interpretation of commands. *) exception DuringCommandInterp of Util.loc * exn exception End_of_input val just_parsing : bool ref (** [eval_expr] executes one vernacular command. By default the command is considered as non-state-preserving, in which case we add it to the Backtrack stack (triggering a save of a frozen state and the generation of a new state label). An example of state-preserving command is one coming from the query panel of Coqide. *) val eval_expr : ?preserving:bool -> Util.loc * Vernacexpr.vernac_expr -> unit val raw_do_vernac : Pcoq.Gram.parsable -> unit (** Set XML hooks *) val set_xml_start_library : (unit -> unit) -> unit val set_xml_end_library : (unit -> unit) -> unit (** Load a vernac file, verbosely or not. Errors are annotated with file and location *) val load_vernac : bool -> string -> unit (** Compile a vernac file, verbosely or not (f is assumed without .v suffix) *) val compile : bool -> string -> unit coq-8.4pl2/toplevel/whelp.mli0000640000175000001440000000143312010532755015304 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit coq-8.4pl2/toplevel/cerrors.mli0000640000175000001440000000153712010532755015651 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* std_ppcmds (** Pre-explain a vernac interpretation error *) val process_vernac_interp_error : exn -> exn (** General explain function. Should not be used directly now, see instead function [Errors.print] and variants *) val explain_exn_default : exn -> std_ppcmds coq-8.4pl2/toplevel/discharge.ml0000640000175000001440000000625512010532755015754 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* id, Entries.LocalAssum p | (Name id,Some p,_) -> id, Entries.LocalDef p | (Anonymous,_,_) -> anomaly"Unnamed inductive local variable" (* Replace Var(y1)..Var(yq):C1..Cq |- Ij:Bj Var(y1)..Var(yq):C1..Cq; I1..Ip:B1..Bp |- ci : Ti by |- Ij: (y1..yq:C1..Cq)Bj I1..Ip:(B1 y1..yq)..(Bp y1..yq) |- ci : (y1..yq:C1..Cq)Ti[Ij:=(Ij y1..yq)] *) let abstract_inductive hyps nparams inds = let ntyp = List.length inds in let nhyp = named_context_length hyps in let args = instance_from_named_context (List.rev hyps) in let subs = list_tabulate (fun k -> lift nhyp (mkApp(mkRel (k+1),args))) ntyp in let inds' = List.map (function (tname,arity,cnames,lc) -> let lc' = List.map (substl subs) lc in let lc'' = List.map (fun b -> Termops.it_mkNamedProd_wo_LetIn b hyps) lc' in let arity' = Termops.it_mkNamedProd_wo_LetIn arity hyps in (tname,arity',cnames,lc'')) inds in let nparams' = nparams + Array.length args in (* To be sure to be the same as before, should probably be moved to process_inductive *) let params' = let (_,arity,_,_) = List.hd inds' in let (params,_) = decompose_prod_n_assum nparams' arity in List.map detype_param params in let ind'' = List.map (fun (a,arity,c,lc) -> let _, short_arity = decompose_prod_n_assum nparams' arity in let shortlc = List.map (fun c -> snd (decompose_prod_n_assum nparams' c)) lc in { mind_entry_typename = a; mind_entry_arity = short_arity; mind_entry_consnames = c; mind_entry_lc = shortlc }) inds' in (params',ind'') let refresh_polymorphic_type_of_inductive (_,mip) = match mip.mind_arity with | Monomorphic s -> s.mind_user_arity | Polymorphic ar -> let ctx = List.rev mip.mind_arity_ctxt in mkArity (List.rev ctx,Termops.new_Type_sort()) let process_inductive sechyps modlist mib = let nparams = mib.mind_nparams in let inds = array_map_to_list (fun mip -> let arity = expmod_constr modlist (refresh_polymorphic_type_of_inductive (mib,mip)) in let lc = Array.map (expmod_constr modlist) mip.mind_user_lc in (mip.mind_typename, arity, Array.to_list mip.mind_consnames, Array.to_list lc)) mib.mind_packets in let sechyps' = map_named_context (expmod_constr modlist) sechyps in let (params',inds') = abstract_inductive sechyps' nparams inds in { mind_entry_record = mib.mind_record; mind_entry_finite = mib.mind_finite; mind_entry_params = params'; mind_entry_inds = inds' } coq-8.4pl2/toplevel/ide_intf.mli0000640000175000001440000000770712052731210015751 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string call (** Backtracking by at least a certain number of phrases. No finished proofs will be re-opened. Instead, we continue backtracking until before these proofs, and answer the amount of extra backtracking performed. Backtracking by more than the number of phrases already interpreted successfully (and not yet undone) will fail. *) val rewind : int -> int call (** Fetching the list of current goals. Return [None] if no proof is in progress, [Some gl] otherwise. *) val goals : goals option call (** Retrieving the tactics applicable to the current goal. [None] if there is no proof in progress. *) val hints : (hint list * hint) option call (** The status, for instance "Ready in SomeSection, proving Foo" *) val status : status call (** Is a directory part of Coq's loadpath ? *) val inloadpath : string -> bool call (** Create a "match" template for a given inductive type. For each branch of the match, we list the constructor name followed by enough pattern variables. *) val mkcases : string -> string list list call (** Retrieve the list of unintantiated evars in the current proof. [None] if no proof is in progress. *) val evars : evar list option call (** Retrieve the list of options of the current toplevel, together with their state. *) val get_options : (option_name * option_state) list call (** Set the options to the given value. Warning: this is not atomic, so whenever the call fails, the option state can be messed up... This is the caller duty to check that everything is correct. *) val set_options : (option_name * option_value) list -> unit call (** Quit gracefully the interpreter. *) val quit : unit call (** The structure that coqtop should implement *) type handler = { interp : raw * verbose * string -> string; rewind : int -> int; goals : unit -> goals option; evars : unit -> evar list option; hints : unit -> (hint list * hint) option; status : unit -> status; search : search_flags -> string coq_object list; get_options : unit -> (option_name * option_state) list; set_options : (option_name * option_value) list -> unit; inloadpath : string -> bool; mkcases : string -> string list list; quit : unit -> unit; about : unit -> coq_info; handle_exn : exn -> location * string; } val abstract_eval_call : handler -> 'a call -> 'a value (** * Protocol version *) val protocol_version : string (** * XML data marshalling *) exception Marshal_error val of_value : ('a -> xml) -> 'a value -> xml val to_value : (xml -> 'a) -> xml -> 'a value val of_call : 'a call -> xml val to_call : xml -> 'a call val of_answer : 'a call -> 'a value -> xml val to_answer : xml -> 'a call -> 'a value (** * Debug printing *) val pr_call : 'a call -> string val pr_value : 'a value -> string val pr_full_value : 'a call -> 'a value -> string coq-8.4pl2/toplevel/libtypes.ml0000640000175000001440000000573712010532755015662 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* obj = declare_object { (default_object "LIBTYPES") with load_function = (fun _ -> load); subst_function = (fun (s,t) -> subst s t); classify_function = (fun x -> Substitute x) } let update () = Lib.add_anonymous_leaf (input !defined_types) (* * Search interface *) let search_pattern pat = TypeDnet.search_pattern !all_types pat let search_concl pat = TypeDnet.search_concl !all_types pat let search_head_concl pat = TypeDnet.search_head_concl !all_types pat let search_eq_concl eq pat = TypeDnet.search_eq_concl !all_types eq pat let add typ gr = defined_types := TypeDnet.add typ gr !defined_types; all_types := TypeDnet.add typ gr !all_types (* let add_key = Profile.declare_profile "add" let add a b = Profile.profile1 add_key add a b *) (* * Hooks declaration *) let _ = Declare.add_cache_hook ( fun sp -> let gr = Nametab.global_of_path sp in let ty = Global.type_of_global gr in add ty gr ) let _ = Declaremods.set_end_library_hook update coq-8.4pl2/toplevel/indschemes.mli0000640000175000001440000000331412010532755016307 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val declare_eq_decidability : mutual_inductive -> unit (** Build and register a congruence scheme for an equality-like inductive type *) val declare_congr_scheme : inductive -> unit (** Build and register rewriting schemes for an equality-like inductive type *) val declare_rewriting_schemes : inductive -> unit (** Mutual Minimality/Induction scheme *) val do_mutual_induction_scheme : (identifier located * bool * inductive * glob_sort) list -> unit (** Main calls to interpret the Scheme command *) val do_scheme : (identifier located option * scheme) list -> unit (** Combine a list of schemes into a conjunction of them *) val build_combined_scheme : env -> constant list -> constr * types val do_combined_scheme : identifier located -> identifier located list -> unit (** Hook called at each inductive type definition *) val declare_default_schemes : mutual_inductive -> unit coq-8.4pl2/toplevel/metasyntax.ml0000640000175000001440000012714112121620060016204 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* obj = declare_object {(default_object "TOKEN") with open_function = (fun i o -> if i=1 then cache_token o); cache_function = cache_token; subst_function = Libobject.ident_subst_function; classify_function = (fun o -> Substitute o)} let add_token_obj s = Lib.add_anonymous_leaf (inToken s) (**********************************************************************) (* Tactic Notation *) let interp_prod_item lev = function | TacTerm s -> GramTerminal s | TacNonTerm (loc, nt, po) -> let sep = match po with Some (_,sep) -> sep | _ -> "" in let (etyp, e) = interp_entry_name true (Some lev) nt sep in GramNonTerminal (loc, etyp, e, Option.map fst po) let make_terminal_status = function | GramTerminal s -> Some s | GramNonTerminal _ -> None let rec make_tags = function | GramTerminal s :: l -> make_tags l | GramNonTerminal (loc, etyp, _, po) :: l -> etyp :: make_tags l | [] -> [] let cache_tactic_notation (_,(pa,pp)) = Egrammar.extend_grammar (Egrammar.TacticGrammar pa); Pptactic.declare_extra_tactic_pprule pp let subst_tactic_parule subst (key,n,p,(d,tac)) = (key,n,p,(d,Tacinterp.subst_tactic subst tac)) let subst_tactic_notation (subst,(pa,pp)) = (subst_tactic_parule subst pa,pp) type tactic_grammar_obj = (string * int * grammar_prod_item list * (dir_path * Tacexpr.glob_tactic_expr)) * (string * Genarg.argument_type list * (int * Pptactic.grammar_terminals)) let inTacticGrammar : tactic_grammar_obj -> obj = declare_object {(default_object "TacticGrammar") with open_function = (fun i o -> if i=1 then cache_tactic_notation o); cache_function = cache_tactic_notation; subst_function = subst_tactic_notation; classify_function = (fun o -> Substitute o)} let cons_production_parameter l = function | GramTerminal _ -> l | GramNonTerminal (_,_,_,ido) -> Option.List.cons ido l let rec tactic_notation_key = function | GramTerminal id :: _ -> id | _ :: l -> tactic_notation_key l | [] -> "terminal_free_notation" let rec next_key_away key t = if Pptactic.exists_extra_tactic_pprule key t then next_key_away (key^"'") t else key let add_tactic_notation (n,prods,e) = let prods = List.map (interp_prod_item n) prods in let tags = make_tags prods in let key = next_key_away (tactic_notation_key prods) tags in let pprule = (key,tags,(n,List.map make_terminal_status prods)) in let ids = List.fold_left cons_production_parameter [] prods in let tac = Tacinterp.glob_tactic_env ids (Global.env()) e in let parule = (key,n,prods,(Lib.cwd(),tac)) in Lib.add_anonymous_leaf (inTacticGrammar (parule,pprule)) (**********************************************************************) (* Printing grammar entries *) let print_grammar = function | "constr" | "operconstr" | "binder_constr" -> msgnl (str "Entry constr is"); Gram.entry_print Pcoq.Constr.constr; msgnl (str "and lconstr is"); Gram.entry_print Pcoq.Constr.lconstr; msgnl (str "where binder_constr is"); Gram.entry_print Pcoq.Constr.binder_constr; msgnl (str "and operconstr is"); Gram.entry_print Pcoq.Constr.operconstr; | "pattern" -> Gram.entry_print Pcoq.Constr.pattern | "tactic" -> msgnl (str "Entry tactic_expr is"); Gram.entry_print Pcoq.Tactic.tactic_expr; msgnl (str "Entry binder_tactic is"); Gram.entry_print Pcoq.Tactic.binder_tactic; msgnl (str "Entry simple_tactic is"); Gram.entry_print Pcoq.Tactic.simple_tactic; | "vernac" -> msgnl (str "Entry vernac is"); Gram.entry_print Pcoq.Vernac_.vernac; msgnl (str "Entry command is"); Gram.entry_print Pcoq.Vernac_.command; msgnl (str "Entry syntax is"); Gram.entry_print Pcoq.Vernac_.syntax; msgnl (str "Entry gallina is"); Gram.entry_print Pcoq.Vernac_.gallina; msgnl (str "Entry gallina_ext is"); Gram.entry_print Pcoq.Vernac_.gallina_ext; | _ -> error "Unknown or unprintable grammar entry." (**********************************************************************) (* Parse a format (every terminal starting with a letter or a single quote (except a single quote alone) must be quoted) *) let parse_format (loc,str) = let str = " "^str in let l = String.length str in let push_token a = function | cur::l -> (a::cur)::l | [] -> [[a]] in let push_white n l = if n = 0 then l else push_token (UnpTerminal (String.make n ' ')) l in let close_box i b = function | a::(_::_ as l) -> push_token (UnpBox (b,a)) l | _ -> error "Non terminated box in format." in let close_quotation i = if i < String.length str & str.[i] = '\'' & (i+1 = l or str.[i+1] = ' ') then i+1 else error "Incorrectly terminated quoted expression." in let rec spaces n i = if i < String.length str & str.[i] = ' ' then spaces (n+1) (i+1) else n in let rec nonspaces quoted n i = if i < String.length str & str.[i] <> ' ' then if str.[i] = '\'' & quoted & (i+1 >= String.length str or str.[i+1] = ' ') then if n=0 then error "Empty quoted token." else n else nonspaces quoted (n+1) (i+1) else if quoted then error "Spaces are not allowed in (quoted) symbols." else n in let rec parse_non_format i = let n = nonspaces false 0 i in push_token (UnpTerminal (String.sub str i n)) (parse_token (i+n)) and parse_quoted n i = if i < String.length str then match str.[i] with (* Parse " // " *) | '/' when i <= String.length str & str.[i+1] = '/' -> (* We forget the useless n spaces... *) push_token (UnpCut PpFnl) (parse_token (close_quotation (i+2))) (* Parse " .. / .. " *) | '/' when i <= String.length str -> let p = spaces 0 (i+1) in push_token (UnpCut (PpBrk (n,p))) (parse_token (close_quotation (i+p+1))) | c -> (* The spaces are real spaces *) push_white n (match c with | '[' -> if i <= String.length str then match str.[i+1] with (* Parse " [h .. ", *) | 'h' when i+1 <= String.length str & str.[i+2] = 'v' -> (parse_box (fun n -> PpHVB n) (i+3)) (* Parse " [v .. ", *) | 'v' -> parse_box (fun n -> PpVB n) (i+2) (* Parse " [ .. ", *) | ' ' | '\'' -> parse_box (fun n -> PpHOVB n) (i+1) | _ -> error "\"v\", \"hv\", \" \" expected after \"[\" in format." else error "\"v\", \"hv\" or \" \" expected after \"[\" in format." (* Parse "]" *) | ']' -> ([] :: parse_token (close_quotation (i+1))) (* Parse a non formatting token *) | c -> let n = nonspaces true 0 i in push_token (UnpTerminal (String.sub str (i-1) (n+2))) (parse_token (close_quotation (i+n)))) else if n = 0 then [] else error "Ending spaces non part of a format annotation." and parse_box box i = let n = spaces 0 i in close_box i (box n) (parse_token (close_quotation (i+n))) and parse_token i = let n = spaces 0 i in let i = i+n in if i < l then match str.[i] with (* Parse a ' *) | '\'' when i+1 >= String.length str or str.[i+1] = ' ' -> push_white (n-1) (push_token (UnpTerminal "'") (parse_token (i+1))) (* Parse the beginning of a quoted expression *) | '\'' -> parse_quoted (n-1) (i+1) (* Otherwise *) | _ -> push_white (n-1) (parse_non_format i) else push_white n [[]] in try if str <> "" then match parse_token 0 with | [l] -> l | _ -> error "Box closed without being opened in format." else error "Empty format." with e when Errors.noncritical e -> Loc.raise loc e (***********************) (* Analyzing notations *) type symbol_token = WhiteSpace of int | String of string let split_notation_string str = let push_token beg i l = if beg = i then l else let s = String.sub str beg (i - beg) in String s :: l in let push_whitespace beg i l = if beg = i then l else WhiteSpace (i-beg) :: l in let rec loop beg i = if i < String.length str then if str.[i] = ' ' then push_token beg i (loop_on_whitespace (i+1) (i+1)) else loop beg (i+1) else push_token beg i [] and loop_on_whitespace beg i = if i < String.length str then if str.[i] <> ' ' then push_whitespace beg i (loop i (i+1)) else loop_on_whitespace beg (i+1) else push_whitespace beg i [] in loop 0 0 (* Interpret notations with a recursive component *) let out_nt = function NonTerminal x -> x | _ -> assert false let msg_expected_form_of_recursive_notation = "In the notation, the special symbol \"..\" must occur in\na configuration of the form \"x symbs .. symbs y\"." let rec find_pattern nt xl = function | Break n as x :: l, Break n' :: l' when n=n' -> find_pattern nt (x::xl) (l,l') | Terminal s as x :: l, Terminal s' :: l' when s = s' -> find_pattern nt (x::xl) (l,l') | [], NonTerminal x' :: l' -> (out_nt nt,x',List.rev xl),l' | _, Terminal s :: _ | Terminal s :: _, _ -> error ("The token \""^s^"\" occurs on one side of \"..\" but not on the other side.") | _, Break s :: _ | Break s :: _, _ -> error ("A break occurs on one side of \"..\" but not on the other side.") | _, [] -> error msg_expected_form_of_recursive_notation | ((SProdList _ | NonTerminal _) :: _), _ | _, (SProdList _ :: _) -> anomaly "Only Terminal or Break expected on left, non-SProdList on right" let rec interp_list_parser hd = function | [] -> [], List.rev hd | NonTerminal id :: tl when id = ldots_var -> if hd = [] then error msg_expected_form_of_recursive_notation; let hd = List.rev hd in let ((x,y,sl),tl') = find_pattern (List.hd hd) [] (List.tl hd,tl) in let xyl,tl'' = interp_list_parser [] tl' in (* We remember each pair of variable denoting a recursive part to *) (* remove the second copy of it afterwards *) (x,y)::xyl, SProdList (x,sl) :: tl'' | (Terminal _ | Break _) as s :: tl -> if hd = [] then let yl,tl' = interp_list_parser [] tl in yl, s :: tl' else interp_list_parser (s::hd) tl | NonTerminal _ as x :: tl -> let xyl,tl' = interp_list_parser [x] tl in xyl, List.rev_append hd tl' | SProdList _ :: _ -> anomaly "Unexpected SProdList in interp_list_parser" (* Find non-terminal tokens of notation *) (* To protect alphabetic tokens and quotes from being seen as variables *) let quote_notation_token x = let n = String.length x in let norm = is_ident x in if (n > 0 & norm) or (n > 2 & x.[0] = '\'') then "'"^x^"'" else x let rec raw_analyze_notation_tokens = function | [] -> [] | String ".." :: sl -> NonTerminal ldots_var :: raw_analyze_notation_tokens sl | String "_" :: _ -> error "_ must be quoted." | String x :: sl when is_ident x -> NonTerminal (Names.id_of_string x) :: raw_analyze_notation_tokens sl | String s :: sl -> Terminal (drop_simple_quotes s) :: raw_analyze_notation_tokens sl | WhiteSpace n :: sl -> Break n :: raw_analyze_notation_tokens sl let is_numeral symbs = match List.filter (function Break _ -> false | _ -> true) symbs with | ([Terminal "-"; Terminal x] | [Terminal x]) -> (try let _ = Bigint.of_string x in true with e when Errors.noncritical e -> false) | _ -> false let rec get_notation_vars = function | [] -> [] | NonTerminal id :: sl -> let vars = get_notation_vars sl in if id = ldots_var then vars else if List.mem id vars then error ("Variable "^string_of_id id^" occurs more than once.") else id::vars | (Terminal _ | Break _) :: sl -> get_notation_vars sl | SProdList _ :: _ -> assert false let analyze_notation_tokens l = let l = raw_analyze_notation_tokens l in let vars = get_notation_vars l in let recvars,l = interp_list_parser [] l in recvars, list_subtract vars (List.map snd recvars), l let error_not_same_scope x y = error ("Variables "^string_of_id x^" and "^string_of_id y^ " must be in the same scope.") (**********************************************************************) (* Build pretty-printing rules *) type printing_precedence = int * parenRelation type parsing_precedence = int option let prec_assoc = function | RightA -> (L,E) | LeftA -> (E,L) | NonA -> (L,L) let precedence_of_entry_type from = function | ETConstr (NumLevel n,BorderProd (_,None)) -> n, Prec n | ETConstr (NumLevel n,BorderProd (b,Some a)) -> n, let (lp,rp) = prec_assoc a in if b=Left then lp else rp | ETConstr (NumLevel n,InternalProd) -> n, Prec n | ETConstr (NextLevel,_) -> from, L | _ -> 0, E (* ?? *) (* Some breaking examples *) (* "x = y" : "x /1 = y" (breaks before any symbol) *) (* "x =S y" : "x /1 =S /1 y" (protect from confusion; each side for symmetry)*) (* "+ {" : "+ {" may breaks reversibility without space but oth. not elegant *) (* "x y" : "x spc y" *) (* "{ x } + { y }" : "{ x } / + { y }" *) (* "< x , y > { z , t }" : "< x , / y > / { z , / t }" *) let is_left_bracket s = let l = String.length s in l <> 0 & (s.[0] = '{' or s.[0] = '[' or s.[0] = '(') let is_right_bracket s = let l = String.length s in l <> 0 & (s.[l-1] = '}' or s.[l-1] = ']' or s.[l-1] = ')') let is_comma s = let l = String.length s in l <> 0 & (s.[0] = ',' or s.[0] = ';') let is_operator s = let l = String.length s in l <> 0 & (s.[0] = '+' or s.[0] = '*' or s.[0] = '=' or s.[0] = '-' or s.[0] = '/' or s.[0] = '<' or s.[0] = '>' or s.[0] = '@' or s.[0] = '\\' or s.[0] = '&' or s.[0] = '~' or s.[0] = '$') let is_prod_ident = function | Terminal s when is_letter s.[0] or s.[0] = '_' -> true | _ -> false let rec is_non_terminal = function | NonTerminal _ | SProdList _ -> true | _ -> false let add_break n l = UnpCut (PpBrk(n,0)) :: l let check_open_binder isopen sl m = if isopen & sl <> [] then errorlabstrm "" (str "as " ++ pr_id m ++ str " is a non-closed binder, no such \"" ++ prlist_with_sep spc (function Terminal s -> str s | _ -> assert false) sl ++ strbrk "\" is allowed to occur.") (* Heuristics for building default printing rules *) type previous_prod_status = NoBreak | CanBreak let make_hunks etyps symbols from = let vars,typs = List.split etyps in let rec make ws = function | NonTerminal m :: prods -> let i = list_index m vars in let _,prec = precedence_of_entry_type from (List.nth typs (i-1)) in let u = UnpMetaVar (i,prec) in if prods <> [] && is_non_terminal (List.hd prods) then u :: add_break 1 (make CanBreak prods) else u :: make CanBreak prods | Terminal s :: prods when List.exists is_non_terminal prods -> if is_comma s then UnpTerminal s :: add_break 1 (make NoBreak prods) else if is_right_bracket s then UnpTerminal s :: add_break 0 (make NoBreak prods) else if is_left_bracket s then if ws = CanBreak then add_break 1 (UnpTerminal s :: make CanBreak prods) else UnpTerminal s :: make CanBreak prods else if is_operator s then if ws = CanBreak then UnpTerminal (" "^s) :: add_break 1 (make NoBreak prods) else UnpTerminal s :: add_break 1 (make NoBreak prods) else if is_ident_tail s.[String.length s - 1] then let sep = if is_prod_ident (List.hd prods) then "" else " " in if ws = CanBreak then add_break 1 (UnpTerminal (s^sep) :: make CanBreak prods) else UnpTerminal (s^sep) :: make CanBreak prods else if ws = CanBreak then add_break 1 (UnpTerminal (s^" ") :: make CanBreak prods) else UnpTerminal s :: make CanBreak prods | Terminal s :: prods -> if is_right_bracket s then UnpTerminal s :: make NoBreak prods else if ws = CanBreak then add_break 1 (UnpTerminal s :: make NoBreak prods) else UnpTerminal s :: make NoBreak prods | Break n :: prods -> add_break n (make NoBreak prods) | SProdList (m,sl) :: prods -> let i = list_index m vars in let typ = List.nth typs (i-1) in let _,prec = precedence_of_entry_type from typ in let sl' = (* If no separator: add a break *) if sl = [] then add_break 1 [] (* We add NonTerminal for simulation but remove it afterwards *) else snd (list_sep_last (make NoBreak (sl@[NonTerminal m]))) in let hunk = match typ with | ETConstr _ -> UnpListMetaVar (i,prec,sl') | ETBinder isopen -> check_open_binder isopen sl m; UnpBinderListMetaVar (i,isopen,sl') | _ -> assert false in hunk :: make CanBreak prods | [] -> [] in make NoBreak symbols (* Build default printing rules from explicit format *) let error_format () = error "The format does not match the notation." let rec split_format_at_ldots hd = function | UnpTerminal s :: fmt when s = string_of_id ldots_var -> List.rev hd, fmt | u :: fmt -> check_no_ldots_in_box u; split_format_at_ldots (u::hd) fmt | [] -> raise Exit and check_no_ldots_in_box = function | UnpBox (_,fmt) -> (try let _ = split_format_at_ldots [] fmt in error ("The special symbol \"..\" must occur at the same formatting depth than the variables of which it is the ellipse.") with Exit -> ()) | _ -> () let skip_var_in_recursive_format = function | UnpTerminal _ :: sl (* skip first var *) -> (* To do, though not so important: check that the names match the names in the notation *) sl | _ -> error_format () let read_recursive_format sl fmt = let get_head fmt = let sl = skip_var_in_recursive_format fmt in try split_format_at_ldots [] sl with Exit -> error_format () in let rec get_tail = function | a :: sepfmt, b :: fmt when a = b -> get_tail (sepfmt, fmt) | [], tail -> skip_var_in_recursive_format tail | _ -> error "The format is not the same on the right and left hand side of the special token \"..\"." in let slfmt, fmt = get_head fmt in slfmt, get_tail (slfmt, fmt) let hunks_of_format (from,(vars,typs)) symfmt = let rec aux = function | symbs, (UnpTerminal s' as u) :: fmt when s' = String.make (String.length s') ' ' -> let symbs, l = aux (symbs,fmt) in symbs, u :: l | Terminal s :: symbs, (UnpTerminal s') :: fmt when s = drop_simple_quotes s' -> let symbs, l = aux (symbs,fmt) in symbs, UnpTerminal s :: l | NonTerminal s :: symbs, UnpTerminal s' :: fmt when s = id_of_string s' -> let i = list_index s vars in let _,prec = precedence_of_entry_type from (List.nth typs (i-1)) in let symbs, l = aux (symbs,fmt) in symbs, UnpMetaVar (i,prec) :: l | symbs, UnpBox (a,b) :: fmt -> let symbs', b' = aux (symbs,b) in let symbs', l = aux (symbs',fmt) in symbs', UnpBox (a,b') :: l | symbs, (UnpCut _ as u) :: fmt -> let symbs, l = aux (symbs,fmt) in symbs, u :: l | SProdList (m,sl) :: symbs, fmt -> let i = list_index m vars in let typ = List.nth typs (i-1) in let _,prec = precedence_of_entry_type from typ in let slfmt,fmt = read_recursive_format sl fmt in let sl, slfmt = aux (sl,slfmt) in if sl <> [] then error_format (); let symbs, l = aux (symbs,fmt) in let hunk = match typ with | ETConstr _ -> UnpListMetaVar (i,prec,slfmt) | ETBinder isopen -> check_open_binder isopen sl m; UnpBinderListMetaVar (i,isopen,slfmt) | _ -> assert false in symbs, hunk :: l | symbs, [] -> symbs, [] | _, _ -> error_format () in match aux symfmt with | [], l -> l | _ -> error_format () (**********************************************************************) (* Build parsing rules *) let assoc_of_type n (_,typ) = precedence_of_entry_type n typ let is_not_small_constr = function ETConstr _ -> true | ETOther("constr","binder_constr") -> true | _ -> false let rec define_keywords_aux = function | GramConstrNonTerminal(e,Some _) as n1 :: GramConstrTerminal(IDENT k) :: l when is_not_small_constr e -> message ("Identifier '"^k^"' now a keyword"); Lexer.add_keyword k; n1 :: GramConstrTerminal(KEYWORD k) :: define_keywords_aux l | n :: l -> n :: define_keywords_aux l | [] -> [] (* Ensure that IDENT articulation terminal symbols are keywords *) let define_keywords = function | GramConstrTerminal(IDENT k)::l -> message ("Identifier '"^k^"' now a keyword"); Lexer.add_keyword k; GramConstrTerminal(KEYWORD k) :: define_keywords_aux l | l -> define_keywords_aux l let distribute a ll = List.map (fun l -> a @ l) ll (* Expand LIST1(t,sep) into the combination of t and t;sep;LIST1(t,sep) as many times as expected in [n] argument *) let rec expand_list_rule typ tkl x n i hds ll = if i = n then let hds = GramConstrListMark (n,true) :: hds @ [GramConstrNonTerminal (ETConstrList (typ,tkl), Some x)] in distribute hds ll else let camlp4_message_name = Some (add_suffix x ("_"^string_of_int n)) in let main = GramConstrNonTerminal (ETConstr typ, camlp4_message_name) in let tks = List.map (fun x -> GramConstrTerminal x) tkl in distribute (GramConstrListMark (i+1,false) :: hds @ [main]) ll @ expand_list_rule typ tkl x n (i+1) (main :: tks @ hds) ll let make_production etyps symbols = let prod = List.fold_right (fun t ll -> match t with | NonTerminal m -> let typ = List.assoc m etyps in distribute [GramConstrNonTerminal (typ, Some m)] ll | Terminal s -> distribute [GramConstrTerminal (terminal s)] ll | Break _ -> ll | SProdList (x,sl) -> let tkl = List.flatten (List.map (function Terminal s -> [terminal s] | Break _ -> [] | _ -> anomaly "Found a non terminal token in recursive notation separator") sl) in match List.assoc x etyps with | ETConstr typ -> expand_list_rule typ tkl x 1 0 [] ll | ETBinder o -> distribute [GramConstrNonTerminal (ETBinderList (o,tkl), Some x)] ll | _ -> error "Components of recursive patterns in notation must be terms or binders.") symbols [[]] in List.map define_keywords prod let rec find_symbols c_current c_next c_last = function | [] -> [] | NonTerminal id :: sl -> let prec = if sl <> [] then c_current else c_last in (id, prec) :: (find_symbols c_next c_next c_last sl) | Terminal s :: sl -> find_symbols c_next c_next c_last sl | Break n :: sl -> find_symbols c_current c_next c_last sl | SProdList (x,_) :: sl' -> (x,c_next)::(find_symbols c_next c_next c_last sl') let border = function | (_,ETConstr(_,BorderProd (_,a))) :: _ -> a | _ -> None let recompute_assoc typs = match border typs, border (List.rev typs) with | Some LeftA, Some RightA -> assert false | Some LeftA, _ -> Some LeftA | _, Some RightA -> Some RightA | _ -> None (**************************************************************************) (* Registration of syntax extensions (parsing/printing, no interpretation)*) let pr_arg_level from = function | (n,L) when n=from -> str "at next level" | (n,E) -> str "at level " ++ int n | (n,L) -> str "at level below " ++ int n | (n,Prec m) when m=n -> str "at level " ++ int n | (n,_) -> str "Unknown level" let pr_level ntn (from,args) = str "at level " ++ int from ++ spc () ++ str "with arguments" ++ spc() ++ prlist_with_sep pr_comma (pr_arg_level from) args let error_incompatible_level ntn oldprec prec = errorlabstrm "" (str ("Notation "^ntn^" is already defined") ++ spc() ++ pr_level ntn oldprec ++ spc() ++ str "while it is now required to be" ++ spc() ++ pr_level ntn prec ++ str ".") let cache_one_syntax_extension (typs,prec,ntn,gr,pp) = try let oldprec = Notation.level_of_notation ntn in if prec <> oldprec then error_incompatible_level ntn oldprec prec with Not_found -> (* Reserve the notation level *) Notation.declare_notation_level ntn prec; (* Declare the parsing rule *) Egrammar.extend_grammar (Egrammar.Notation (prec,typs,gr)); (* Declare the printing rule *) Notation.declare_notation_printing_rule ntn (pp,fst prec) let cache_syntax_extension (_,(_,sy_rules)) = List.iter cache_one_syntax_extension sy_rules let subst_parsing_rule subst x = x let subst_printing_rule subst x = x let subst_syntax_extension (subst,(local,sy)) = (local, List.map (fun (typs,prec,ntn,gr,pp) -> (typs,prec,ntn,subst_parsing_rule subst gr,subst_printing_rule subst pp)) sy) let classify_syntax_definition (local,_ as o) = if local then Dispose else Substitute o type syntax_extension_obj = bool * (notation_var_internalization_type list * Notation.level * notation * notation_grammar * unparsing list) list let inSyntaxExtension : syntax_extension_obj -> obj = declare_object {(default_object "SYNTAX-EXTENSION") with open_function = (fun i o -> if i=1 then cache_syntax_extension o); cache_function = cache_syntax_extension; subst_function = subst_syntax_extension; classify_function = classify_syntax_definition} (**************************************************************************) (* Precedences *) (* Interpreting user-provided modifiers *) let interp_modifiers modl = let onlyparsing = ref false in let rec interp assoc level etyps format = function | [] -> (assoc,level,etyps,!onlyparsing,format) | SetEntryType (s,typ) :: l -> let id = id_of_string s in if List.mem_assoc id etyps then error (s^" is already assigned to an entry or constr level."); interp assoc level ((id,typ)::etyps) format l | SetItemLevel ([],n) :: l -> interp assoc level etyps format l | SetItemLevel (s::idl,n) :: l -> let id = id_of_string s in if List.mem_assoc id etyps then error (s^" is already assigned to an entry or constr level."); let typ = ETConstr (n,()) in interp assoc level ((id,typ)::etyps) format (SetItemLevel (idl,n)::l) | SetLevel n :: l -> if level <> None then error "A level is given more than once."; interp assoc (Some n) etyps format l | SetAssoc a :: l -> if assoc <> None then error"An associativity is given more than once."; interp (Some a) level etyps format l | SetOnlyParsing _ :: l -> onlyparsing := true; interp assoc level etyps format l | SetFormat s :: l -> if format <> None then error "A format is given more than once."; interp assoc level etyps (Some s) l in interp None None [] None modl let check_infix_modifiers modifiers = let (assoc,level,t,b,fmt) = interp_modifiers modifiers in if t <> [] then error "Explicit entry level or type unexpected in infix notation." let no_syntax_modifiers = function | [] | [SetOnlyParsing _] -> true | _ -> false let is_only_parsing = function | [SetOnlyParsing _] -> true | _ -> false (* Compute precedences from modifiers (or find default ones) *) let set_entry_type etyps (x,typ) = let typ = try match List.assoc x etyps, typ with | ETConstr (n,()), (_,BorderProd (left,_)) -> ETConstr (n,BorderProd (left,None)) | ETConstr (n,()), (_,InternalProd) -> ETConstr (n,InternalProd) | (ETPattern | ETName | ETBigint | ETOther _ | ETReference | ETBinder _ as t), _ -> t | (ETBinderList _ |ETConstrList _), _ -> assert false with Not_found -> ETConstr typ in (x,typ) let join_auxiliary_recursive_types recvars etyps = List.fold_right (fun (x,y) typs -> let xtyp = try Some (List.assoc x etyps) with Not_found -> None in let ytyp = try Some (List.assoc y etyps) with Not_found -> None in match xtyp,ytyp with | None, None -> typs | Some _, None -> typs | None, Some ytyp -> (x,ytyp)::typs | Some xtyp, Some ytyp when xtyp = ytyp -> typs | Some xtyp, Some ytyp -> errorlabstrm "" (strbrk "In " ++ pr_id x ++ str " .. " ++ pr_id y ++ strbrk ", both ends have incompatible types.")) recvars etyps let internalization_type_of_entry_type = function | ETConstr _ -> NtnInternTypeConstr | ETBigint | ETReference -> NtnInternTypeConstr | ETBinder _ -> NtnInternTypeBinder | ETName -> NtnInternTypeIdent | ETPattern | ETOther _ -> error "Not supported." | ETBinderList _ | ETConstrList _ -> assert false let set_internalization_type typs = List.map (down_snd internalization_type_of_entry_type) typs let make_internalization_vars recvars mainvars typs = let maintyps = List.combine mainvars typs in let extratyps = List.map (fun (x,y) -> (y,List.assoc x maintyps)) recvars in maintyps@extratyps let make_interpretation_type isrec = function | NtnInternTypeConstr when isrec -> NtnTypeConstrList | NtnInternTypeConstr | NtnInternTypeIdent -> NtnTypeConstr | NtnInternTypeBinder when isrec -> NtnTypeBinderList | NtnInternTypeBinder -> error "Type not allowed in recursive notation." let make_interpretation_vars recvars allvars = List.iter (fun (x,y) -> if fst (List.assoc x allvars) <> fst (List.assoc y allvars) then error_not_same_scope x y) recvars; let useless_recvars = List.map snd recvars in let mainvars = List.filter (fun (x,_) -> not (List.mem x useless_recvars)) allvars in List.map (fun (x,(sc,typ)) -> (x,(sc,make_interpretation_type (List.mem_assoc x recvars) typ))) mainvars let check_rule_productivity l = if List.for_all (function NonTerminal _ -> true | _ -> false) l then error "A notation must include at least one symbol."; if (match l with SProdList _ :: _ -> true | _ -> false) then error "A recursive notation must start with at least one symbol." let is_not_printable = function | AVar _ -> warning "This notation will not be used for printing as it is bound to a \nsingle variable"; true | _ -> false let find_precedence lev etyps symbols = match symbols with | NonTerminal x :: _ -> (try match List.assoc x etyps with | ETConstr _ -> error "The level of the leftmost non-terminal cannot be changed." | ETName | ETBigint | ETReference -> if lev = None then ([msgnl,str "Setting notation at level 0."],0) else if lev <> Some 0 then error "A notation starting with an atomic expression must be at level 0." else ([],0) | ETPattern | ETBinder _ | ETOther _ -> (* Give a default ? *) if lev = None then error "Need an explicit level." else [],Option.get lev | ETConstrList _ | ETBinderList _ -> assert false (* internally used in grammar only *) with Not_found -> if lev = None then error "A left-recursive notation must have an explicit level." else [],Option.get lev) | Terminal _ ::l when (match list_last symbols with Terminal _ -> true |_ -> false) -> if lev = None then ([msgnl,str "Setting notation at level 0."], 0) else [],Option.get lev | _ -> if lev = None then error "Cannot determine the level."; [],Option.get lev let check_curly_brackets_notation_exists () = try let _ = Notation.level_of_notation "{ _ }" in () with Not_found -> error "Notations involving patterns of the form \"{ _ }\" are treated \n\ specially and require that the notation \"{ _ }\" is already reserved." (* Remove patterns of the form "{ _ }", unless it is the "{ _ }" notation *) let remove_curly_brackets l = let rec skip_break acc = function | Break _ as br :: l -> skip_break (br::acc) l | l -> List.rev acc, l in let rec aux deb = function | [] -> [] | Terminal "{" as t1 :: l -> let br,next = skip_break [] l in (match next with | NonTerminal _ as x :: l' as l0 -> let br',next' = skip_break [] l' in (match next' with | Terminal "}" as t2 :: l'' as l1 -> if l <> l0 or l' <> l1 then warning "Skipping spaces inside curly brackets"; if deb & l'' = [] then [t1;x;t2] else begin check_curly_brackets_notation_exists (); x :: aux false l'' end | l1 -> t1 :: br @ x :: br' @ aux false l1) | l0 -> t1 :: aux false l0) | x :: l -> x :: aux false l in aux true l let compute_syntax_data (df,modifiers) = let (assoc,n,etyps,onlyparse,fmt) = interp_modifiers modifiers in let assoc = match assoc with None -> (* default *) Some NonA | a -> a in let toks = split_notation_string df in let (recvars,mainvars,symbols) = analyze_notation_tokens toks in let ntn_for_interp = make_notation_key symbols in let symbols' = remove_curly_brackets symbols in let need_squash = (symbols <> symbols') in let ntn_for_grammar = make_notation_key symbols' in check_rule_productivity symbols'; let msgs,n = find_precedence n etyps symbols' in let innerlevel = NumLevel 200 in let typs = find_symbols (NumLevel n,BorderProd(Left,assoc)) (innerlevel,InternalProd) (NumLevel n,BorderProd(Right,assoc)) symbols' in (* To globalize... *) let etyps = join_auxiliary_recursive_types recvars etyps in let sy_typs = List.map (set_entry_type etyps) typs in let prec = (n,List.map (assoc_of_type n) sy_typs) in let i_typs = set_internalization_type sy_typs in let sy_data = (n,sy_typs,symbols',fmt) in let sy_fulldata = (i_typs,ntn_for_grammar,prec,need_squash,sy_data) in let df' = ((Lib.library_dp(),Lib.current_dirpath true),df) in let i_data = (onlyparse,recvars,mainvars,(ntn_for_interp,df')) in (* Return relevant data for interpretation and for parsing/printing *) (msgs,i_data,i_typs,sy_fulldata) let compute_pure_syntax_data (df,mods) = let (msgs,(onlyparse,_,_,_),_,sy_data) = compute_syntax_data (df,mods) in let msgs = if onlyparse then (msg_warning, str "The only parsing modifier has no effect in Reserved Notation.")::msgs else msgs in msgs, sy_data (**********************************************************************) (* Registration of notations interpretation *) let load_notation _ (_,(_,scope,pat,onlyparse,_)) = Option.iter Notation.declare_scope scope let open_notation i (_,(_,scope,pat,onlyparse,(ntn,df))) = if i=1 & not (Notation.exists_notation_in_scope scope ntn pat) then begin (* Declare the interpretation *) Notation.declare_notation_interpretation ntn scope pat df; (* Declare the uninterpretation *) if not onlyparse then Notation.declare_uninterpretation (NotationRule (scope,ntn)) pat end let cache_notation o = load_notation 1 o; open_notation 1 o let subst_notation (subst,(lc,scope,pat,b,ndf)) = (lc,scope,subst_interpretation subst pat,b,ndf) let classify_notation (local,_,_,_,_ as o) = if local then Dispose else Substitute o type notation_obj = bool * scope_name option * interpretation * bool * (notation * notation_location) let inNotation : notation_obj -> obj = declare_object {(default_object "NOTATION") with open_function = open_notation; cache_function = cache_notation; subst_function = subst_notation; load_function = load_notation; classify_function = classify_notation} (**********************************************************************) let with_lib_stk_protection f x = let fs = Lib.freeze () in try let a = f x in Lib.unfreeze fs; a with reraise -> Lib.unfreeze fs; raise reraise let with_syntax_protection f x = with_lib_stk_protection (with_grammar_rule_protection (with_notation_protection f)) x (**********************************************************************) (* Recovering existing syntax *) let contract_notation ntn = if ntn = "{ _ }" then ntn else let rec aux ntn i = if i <= String.length ntn - 5 then let ntn' = if String.sub ntn i 5 = "{ _ }" then String.sub ntn 0 i ^ "_" ^ String.sub ntn (i+5) (String.length ntn -i-5) else ntn in aux ntn' (i+1) else ntn in aux ntn 0 exception NoSyntaxRule let recover_syntax ntn = try let prec = Notation.level_of_notation ntn in let pp_rule,_ = Notation.find_notation_printing_rule ntn in let typs,pa_rule = Egrammar.recover_notation_grammar ntn prec in (typs,prec,ntn,pa_rule,pp_rule) with Not_found -> raise NoSyntaxRule let recover_squash_syntax () = recover_syntax "{ _ }" let recover_notation_syntax rawntn = let ntn = contract_notation rawntn in let (typs,_,_,_,_ as sy_rule) = recover_syntax ntn in let need_squash = ntn<>rawntn in typs,if need_squash then [sy_rule; recover_squash_syntax ()] else [sy_rule] (**********************************************************************) (* Main entry point for building parsing and printing rules *) let make_pa_rule (n,typs,symbols,_) ntn = let assoc = recompute_assoc typs in let prod = make_production typs symbols in (n,assoc,ntn,prod) let make_pp_rule (n,typs,symbols,fmt) = match fmt with | None -> [UnpBox (PpHOVB 0, make_hunks typs symbols n)] | Some fmt -> hunks_of_format (n,List.split typs) (symbols,parse_format fmt) let make_syntax_rules (i_typs,ntn,prec,need_squash,sy_data) = let pa_rule = make_pa_rule sy_data ntn in let pp_rule = make_pp_rule sy_data in let sy_rule = (i_typs,prec,ntn,pa_rule,pp_rule) in (* By construction, the rule for "{ _ }" is declared, but we need to redeclare it because the file where it is declared needs not be open when the current file opens (especially in presence of -nois) *) if need_squash then [sy_rule; recover_squash_syntax ()] else [sy_rule] (**********************************************************************) (* Main functions about notations *) let add_notation_in_scope local df c mods scope = let (msgs,i_data,i_typs,sy_data) = compute_syntax_data (df,mods) in (* Prepare the parsing and printing rules *) let sy_rules = make_syntax_rules sy_data in (* Prepare the interpretation *) let (onlyparse,recvars,mainvars,df') = i_data in let i_vars = make_internalization_vars recvars mainvars i_typs in let (acvars,ac) = interp_aconstr i_vars recvars c in let a = (make_interpretation_vars recvars acvars,ac) in let onlyparse = onlyparse or is_not_printable ac in (* Ready to change the global state *) Flags.if_verbose (List.iter (fun (f,x) -> f x)) msgs; Lib.add_anonymous_leaf (inSyntaxExtension(local,sy_rules)); Lib.add_anonymous_leaf (inNotation (local,scope,a,onlyparse,df')); df' let add_notation_interpretation_core local df ?(impls=empty_internalization_env) c scope onlyparse = let dfs = split_notation_string df in let (recvars,mainvars,symbs) = analyze_notation_tokens dfs in (* Recover types of variables and pa/pp rules; redeclare them if needed *) let i_typs = if not (is_numeral symbs) then begin let i_typs,sy_rules = recover_notation_syntax (make_notation_key symbs) in Lib.add_anonymous_leaf (inSyntaxExtension (local,sy_rules)); i_typs end else [] in (* Declare interpretation *) let path = (Lib.library_dp(),Lib.current_dirpath true) in let df' = (make_notation_key symbs,(path,df)) in let i_vars = make_internalization_vars recvars mainvars i_typs in let (acvars,ac) = interp_aconstr ~impls i_vars recvars c in let a = (make_interpretation_vars recvars acvars,ac) in let onlyparse = onlyparse or is_not_printable ac in Lib.add_anonymous_leaf (inNotation (local,scope,a,onlyparse,df')); df' (* Notations without interpretation (Reserved Notation) *) let add_syntax_extension local ((loc,df),mods) = let msgs,sy_data = compute_pure_syntax_data (df,mods) in let sy_rules = make_syntax_rules sy_data in Flags.if_verbose (List.iter (fun (f,x) -> f x)) msgs; Lib.add_anonymous_leaf (inSyntaxExtension(local,sy_rules)) (* Notations with only interpretation *) let add_notation_interpretation ((loc,df),c,sc) = let df' = add_notation_interpretation_core false df c sc false in Dumpglob.dump_notation (loc,df') sc true let set_notation_for_interpretation impls ((_,df),c,sc) = (try ignore (silently (add_notation_interpretation_core false df ~impls c sc) false); with NoSyntaxRule -> error "Parsing rule for this notation has to be previously declared."); Option.iter (fun sc -> Notation.open_close_scope (false,true,sc)) sc (* Main entry point *) let add_notation local c ((loc,df),modifiers) sc = let df' = if no_syntax_modifiers modifiers then (* No syntax data: try to rely on a previously declared rule *) let onlyparse = is_only_parsing modifiers in try add_notation_interpretation_core local df c sc onlyparse with NoSyntaxRule -> (* Try to determine a default syntax rule *) add_notation_in_scope local df c modifiers sc else (* Declare both syntax and interpretation *) add_notation_in_scope local df c modifiers sc in Dumpglob.dump_notation (loc,df') sc true (* Infix notations *) let inject_var x = CRef (Ident (dummy_loc, id_of_string x)) let add_infix local ((loc,inf),modifiers) pr sc = check_infix_modifiers modifiers; (* check the precedence *) let metas = [inject_var "x"; inject_var "y"] in let c = mkAppC (pr,metas) in let df = "x "^(quote_notation_token inf)^" y" in add_notation local c ((loc,df),modifiers) sc (**********************************************************************) (* Delimiters and classes bound to scopes *) type scope_command = ScopeDelim of string | ScopeClasses of Classops.cl_typ let load_scope_command _ (_,(scope,dlm)) = Notation.declare_scope scope let open_scope_command i (_,(scope,o)) = if i=1 then match o with | ScopeDelim dlm -> Notation.declare_delimiters scope dlm | ScopeClasses cl -> Notation.declare_class_scope scope cl let cache_scope_command o = load_scope_command 1 o; open_scope_command 1 o let subst_scope_command (subst,(scope,o as x)) = match o with | ScopeClasses cl -> let cl' = Classops.subst_cl_typ subst cl in if cl'==cl then x else scope, ScopeClasses cl' | _ -> x let inScopeCommand : scope_name * scope_command -> obj = declare_object {(default_object "DELIMITERS") with cache_function = cache_scope_command; open_function = open_scope_command; load_function = load_scope_command; subst_function = subst_scope_command; classify_function = (fun obj -> Substitute obj)} let add_delimiters scope key = Lib.add_anonymous_leaf (inScopeCommand(scope,ScopeDelim key)) let add_class_scope scope cl = Lib.add_anonymous_leaf (inScopeCommand(scope,ScopeClasses cl)) (* Check if abbreviation to a name and avoid early insertion of maximal implicit arguments *) let try_interp_name_alias = function | [], CRef ref -> intern_reference ref | _ -> raise Not_found let add_syntactic_definition ident (vars,c) local onlyparse = let vars,pat = try [], ARef (try_interp_name_alias (vars,c)) with Not_found -> let i_vars = List.map (fun id -> (id,NtnInternTypeConstr)) vars in let vars,pat = interp_aconstr i_vars [] c in List.map (fun (id,(sc,kind)) -> (id,sc)) vars, pat in let onlyparse = match onlyparse with | None when (is_not_printable pat) -> Some Flags.Current | p -> p in Syntax_def.declare_syntactic_definition local ident onlyparse (vars,pat) coq-8.4pl2/toplevel/classes.ml0000640000175000001440000002700612121620060015443 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let path = try Auto.PathHints [global_of_constr inst] with e when Errors.noncritical e -> Auto.PathAny in Flags.silently (fun () -> Auto.add_hints local [typeclasses_db] (Auto.HintsResolveEntry [pri, false, path, inst])) ()); Typeclasses.register_set_typeclass_transparency set_typeclass_transparency; Typeclasses.register_classes_transparent_state (fun () -> Auto.Hint_db.transparent_state (Auto.searchtable_map typeclasses_db)) let declare_class g = match global g with | ConstRef x -> Typeclasses.add_constant_class x | IndRef x -> Typeclasses.add_inductive_class x | _ -> user_err_loc (loc_of_reference g, "declare_class", Pp.str"Unsupported class type, only constants and inductives are allowed") (** TODO: add subinstances *) let existing_instance glob g = let c = global g in let instance = Typing.type_of (Global.env ()) Evd.empty (constr_of_global c) in let _, r = decompose_prod_assum instance in match class_of_constr r with | Some (_, (tc, _)) -> add_instance (new_instance tc None glob c) | None -> user_err_loc (loc_of_reference g, "declare_instance", Pp.str "Constant does not build instances of a declared type class.") let mismatched_params env n m = mismatched_ctx_inst env Parameters n m let mismatched_props env n m = mismatched_ctx_inst env Properties n m type binder_list = (identifier located * bool * constr_expr) list (* Declare everything in the parameters as implicit, and the class instance as well *) open Topconstr let type_ctx_instance evars env ctx inst subst = let rec aux (subst, instctx) l = function (na, b, t) :: ctx -> let t' = substl subst t in let c', l = match b with | None -> interp_casted_constr_evars evars env (List.hd l) t', List.tl l | Some b -> substl subst b, l in let d = na, Some c', t' in aux (c' :: subst, d :: instctx) l ctx | [] -> subst in aux (subst, []) inst (List.rev ctx) let refine_ref = ref (fun _ -> assert(false)) let id_of_class cl = match cl.cl_impl with | ConstRef kn -> let _,_,l = repr_con kn in id_of_label l | IndRef (kn,i) -> let mip = (Environ.lookup_mind kn (Global.env ())).Declarations.mind_packets in mip.(0).Declarations.mind_typename | _ -> assert false open Pp let ($$) g f = fun x -> g (f x) let instance_hook k pri global imps ?hook cst = Impargs.maybe_declare_manual_implicits false cst ~enriching:false imps; Typeclasses.declare_instance pri (not global) cst; (match hook with Some h -> h cst | None -> ()) let declare_instance_constant k pri global imps ?hook id term termtype = let cdecl = let kind = IsDefinition Instance in let entry = { const_entry_body = term; const_entry_secctx = None; const_entry_type = Some termtype; const_entry_opaque = false } in DefinitionEntry entry, kind in let kn = Declare.declare_constant id cdecl in Declare.definition_message id; instance_hook k pri global imps ?hook (ConstRef kn); id let new_instance ?(abstract=false) ?(global=false) ctx (instid, bk, cl) props ?(generalize=true) ?(tac:Proof_type.tactic option) ?(hook:(global_reference -> unit) option) pri = let env = Global.env() in let evars = ref Evd.empty in let tclass, ids = match bk with | Implicit -> Implicit_quantifiers.implicit_application Idset.empty ~allow_partial:false (fun avoid (clname, (id, _, t)) -> match clname with | Some (cl, b) -> let t = CHole (Util.dummy_loc, None) in t, avoid | None -> failwith ("new instance: under-applied typeclass")) cl | Explicit -> cl, Idset.empty in let tclass = if generalize then CGeneralization (dummy_loc, Implicit, Some AbsPi, tclass) else tclass in let k, cty, ctx', ctx, len, imps, subst = let impls, ((env', ctx), imps) = interp_context_evars evars env ctx in let c', imps' = interp_type_evars_impls ~impls ~evdref:evars ~fail_evar:false env' tclass in let len = List.length ctx in let imps = imps @ Impargs.lift_implicits len imps' in let ctx', c = decompose_prod_assum c' in let ctx'' = ctx' @ ctx in let cl, args = Typeclasses.dest_class_app (push_rel_context ctx'' env) c in let _, args = List.fold_right (fun (na, b, t) (args, args') -> match b with | None -> (List.tl args, List.hd args :: args') | Some b -> (args, substl args' b :: args')) (snd cl.cl_context) (args, []) in cl, c', ctx', ctx, len, imps, args in let id = match snd instid with Name id -> let sp = Lib.make_path id in if Nametab.exists_cci sp then errorlabstrm "new_instance" (Nameops.pr_id id ++ Pp.str " already exists."); id | Anonymous -> let i = Nameops.add_suffix (id_of_class k) "_instance_0" in Namegen.next_global_ident_away i (Termops.ids_of_context env) in let env' = push_rel_context ctx env in evars := Evarutil.nf_evar_map !evars; evars := resolve_typeclasses env !evars; let sigma = !evars in let subst = List.map (Evarutil.nf_evar sigma) subst in if abstract then begin if not (Lib.is_modtype ()) then error "Declare Instance while not in Module Type."; let _, ty_constr = instance_constructor k (List.rev subst) in let termtype = let t = it_mkProd_or_LetIn ty_constr (ctx' @ ctx) in Evarutil.nf_evar !evars t in Evarutil.check_evars env Evd.empty !evars termtype; let cst = Declare.declare_constant ~internal:Declare.KernelSilent id (Entries.ParameterEntry (None,termtype,None), Decl_kinds.IsAssumption Decl_kinds.Logical) in instance_hook k None global imps ?hook (ConstRef cst); id end else begin let props = match props with | Some (CRecord (loc, _, fs)) -> if List.length fs > List.length k.cl_props then mismatched_props env' (List.map snd fs) k.cl_props; Some (Inl fs) | Some t -> Some (Inr t) | None -> None in let subst = match props with | None -> if k.cl_props = [] then Some (Inl subst) else None | Some (Inr term) -> let c = interp_casted_constr_evars evars env' term cty in Some (Inr (c, subst)) | Some (Inl props) -> let get_id = function | Ident id' -> id' | _ -> errorlabstrm "new_instance" (Pp.str "Only local structures are handled") in let props, rest = List.fold_left (fun (props, rest) (id,b,_) -> if b = None then try let (loc_mid, c) = List.find (fun (id', _) -> Name (snd (get_id id')) = id) rest in let rest' = List.filter (fun (id', _) -> Name (snd (get_id id')) <> id) rest in let (loc, mid) = get_id loc_mid in List.iter (fun (n, _, x) -> if n = Name mid then Option.iter (fun x -> Dumpglob.add_glob loc (ConstRef x)) x) k.cl_projs; c :: props, rest' with Not_found -> (CHole (Util.dummy_loc, None) :: props), rest else props, rest) ([], props) k.cl_props in if rest <> [] then unbound_method env' k.cl_impl (get_id (fst (List.hd rest))) else Some (Inl (type_ctx_instance evars (push_rel_context ctx' env') k.cl_props props subst)) in evars := Evarutil.nf_evar_map !evars; let term, termtype = match subst with | None -> let termtype = it_mkProd_or_LetIn cty ctx in None, termtype | Some (Inl subst) -> let subst = List.fold_left2 (fun subst' s (_, b, _) -> if b = None then s :: subst' else subst') [] subst (k.cl_props @ snd k.cl_context) in let app, ty_constr = instance_constructor k subst in let termtype = it_mkProd_or_LetIn ty_constr (ctx' @ ctx) in let term = Termops.it_mkLambda_or_LetIn (Option.get app) (ctx' @ ctx) in Some term, termtype | Some (Inr (def, subst)) -> let termtype = it_mkProd_or_LetIn cty ctx in let term = Termops.it_mkLambda_or_LetIn def ctx in Some term, termtype in let _ = evars := Evarutil.nf_evar_map !evars; evars := Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals ~fail:true env !evars; (* Try resolving fields that are typeclasses automatically. *) evars := Typeclasses.resolve_typeclasses ~filter:Typeclasses.all_evars ~fail:false env !evars in let termtype = Evarutil.nf_evar !evars termtype in let term = Option.map (Evarutil.nf_evar !evars) term in let evm = undefined_evars !evars in Evarutil.check_evars env Evd.empty !evars termtype; if Evd.is_empty evm && term <> None then declare_instance_constant k pri global imps ?hook id (Option.get term) termtype else begin let kind = Decl_kinds.Global, Decl_kinds.DefinitionBody Decl_kinds.Instance in Flags.silently (fun () -> Lemmas.start_proof id kind termtype (fun _ -> instance_hook k pri global imps ?hook); if term <> None then Pfedit.by (!refine_ref (evm, Option.get term)) else if Flags.is_auto_intros () then Pfedit.by (Refiner.tclDO len Tactics.intro); (match tac with Some tac -> Pfedit.by tac | None -> ())) (); Flags.if_verbose (msg $$ Printer.pr_open_subgoals) (); id end end let named_of_rel_context l = let acc, ctx = List.fold_right (fun (na, b, t) (subst, ctx) -> let id = match na with Anonymous -> raise (Invalid_argument "named_of_rel_context") | Name id -> id in let d = (id, Option.map (substl subst) b, substl subst t) in (mkVar id :: subst, d :: ctx)) l ([], []) in ctx let string_of_global r = string_of_qualid (Nametab.shortest_qualid_of_global Idset.empty r) let context l = let env = Global.env() in let evars = ref Evd.empty in let _, ((env', fullctx), impls) = interp_context_evars evars env l in let fullctx = Evarutil.nf_rel_context_evar !evars fullctx in let ce t = Evarutil.check_evars env Evd.empty !evars t in List.iter (fun (n, b, t) -> Option.iter ce b; ce t) fullctx; let ctx = try named_of_rel_context fullctx with e when Errors.noncritical e -> error "Anonymous variables not allowed in contexts." in let fn (id, _, t) = if Lib.is_modtype () && not (Lib.sections_are_opened ()) then let cst = Declare.declare_constant ~internal:Declare.KernelSilent id (ParameterEntry (None,t,None), IsAssumption Logical) in match class_of_constr t with | Some (rels, (tc, args) as _cl) -> add_instance (Typeclasses.new_instance tc None false (ConstRef cst)) (* declare_subclasses (ConstRef cst) cl *) | None -> () else ( let impl = List.exists (fun (x,_) -> match x with ExplByPos (_, Some id') -> id = id' | _ -> false) impls in Command.declare_assumption false (Local (* global *), Definitional) t [] impl (* implicit *) None (* inline *) (dummy_loc, id)) in List.iter fn (List.rev ctx) coq-8.4pl2/toplevel/autoinstance.ml0000640000175000001440000002744412121620060016511 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* rel_context -> constr list -> unit (* * Search algorithm *) let rec subst_evar evar def n c = match kind_of_term c with | Evar (e,_) when e=evar -> lift n def | _ -> map_constr_with_binders (fun n->n+1) (subst_evar evar def) n c let subst_evar_in_evm evar def evm = Evd.fold (fun ev evi acc -> let evar_body = match evi.evar_body with | Evd.Evar_empty -> Evd.Evar_empty | Evd.Evar_defined c -> Evd.Evar_defined (subst_evar evar def 0 c) in let evar_concl = subst_evar evar def 0 evi.evar_concl in Evd.add acc ev {evi with evar_body=evar_body; evar_concl=evar_concl} ) evm empty (* Tries to define ev by c in evd. Fails if ev := c1 and c1 /= c ev : * T1, c : T2 and T1 /= T2. Defines recursively all evars instantiated * by this definition. *) let rec safe_define evm ev c = if not (closedn (-1) c) then raise Termops.CannotFilter else (* msgnl(str"safe_define "++pr_evar_map evm++spc()++str" |- ?"++Util.pr_int ev++str" := "++pr_constr c);*) let evi = (Evd.find evm ev) in let define_subst evm sigma = Util.Intmap.fold ( fun ev (e,c) evm -> match kind_of_term c with Evar (i,_) when i=ev -> evm | _ -> safe_define evm ev (lift (-List.length e) c) ) sigma evm in match evi.evar_body with | Evd.Evar_defined def -> define_subst evm (Termops.filtering [] Reduction.CUMUL def c) | Evd.Evar_empty -> let t = Libtypes.reduce (Typing.type_of (Global.env()) evm c) in let u = Libtypes.reduce (evar_concl evi) in let evm = subst_evar_in_evm ev c evm in define_subst (Evd.define ev c evm) (Termops.filtering [] Reduction.CUMUL t u) let add_gen_ctx (cl,gen,evm) ctx : signature * constr list = let rec really_new_evar () = let ev = Evarutil.new_untyped_evar() in if Evd.is_evar evm ev then really_new_evar() else ev in let add_gen_evar (cl,gen,evm) ev ty : signature = let evm = Evd.add evm ev (Evd.make_evar Environ.empty_named_context_val ty) in (cl,ev::gen,evm) in let rec mksubst b = function | [] -> [] | a::tl -> b::(mksubst (a::b) tl) in let evl = List.map (fun _ -> really_new_evar()) ctx in let evcl = List.map (fun i -> mkEvar (i,[||])) evl in let substl = List.rev (mksubst [] (evcl)) in let ctx = List.map2 (fun s t -> substnl s 0 t) substl ctx in let sign = List.fold_left2 add_gen_evar (cl,gen,evm) (List.rev evl) ctx in sign,evcl (* TODO : for full proof-irrelevance in the search, provide a real compare function for constr instead of Pervasive's one! *) module SubstSet : Set.S with type elt = Termops.subst = Set.Make (struct type t = Termops.subst let compare = Util.Intmap.compare (Pervasives.compare) end) (* searches instatiations in the library for just one evar [ev] of a signature. [k] is called on each resulting signature *) let complete_evar (cl,gen,evm:signature) (ev,evi) (k:signature -> unit) = let ev_typ = Libtypes.reduce (evar_concl evi) in let sort_is_prop = is_Prop (Typing.type_of (Global.env()) evm (evar_concl evi)) in (* msgnl(str"cherche "++pr_constr ev_typ++str" pour "++Util.pr_int ev);*) let substs = ref SubstSet.empty in try List.iter ( fun (gr,(pat,_),s) -> let (_,genl,_) = Termops.decompose_prod_letin pat in let genl = List.map (fun (_,_,t) -> t) genl in let ((cl,gen,evm),argl) = add_gen_ctx (cl,gen,evm) genl in let def = applistc (Libnames.constr_of_global gr) argl in (* msgnl(str"essayons ?"++Util.pr_int ev++spc()++str":="++spc() ++pr_constr def++spc()++str":"++spc()++pr_constr (Global.type_of_global gr)*) (*++spc()++str"dans"++spc()++pr_evar_map evm++spc());*) try let evm = safe_define evm ev def in k (cl,gen,evm); if sort_is_prop && SubstSet.mem s !substs then raise Exit; substs := SubstSet.add s !substs with Termops.CannotFilter -> () ) (Libtypes.search_concl ev_typ) with Exit -> () let evm_fold_rev f evm acc = let l = Evd.fold (fun ev evi acc -> (ev,evi)::acc) evm [] in List.fold_left (fun acc (ev,evi) -> f ev evi acc) acc l exception Continue of Evd.evar * Evd.evar_info (* searches matches for all the uninstantiated evars of evd in the context. For each totally instantiated evar_map found, apply k. *) let rec complete_signature (k:signature -> unit) (cl,gen,evm:signature) = try evm_fold_rev ( fun ev evi _ -> if not (is_defined evm ev) && not (List.mem ev gen) then raise (Continue (ev,evi)) ) evm (); k (cl,gen,evm) with Continue (ev,evi) -> complete_evar (cl,gen,evm) (ev,evi) (complete_signature k) (* define all permutations of the evars to evd and call k on the resulting evd *) let complete_with_evars_permut (cl,gen,evm:signature) evl c (k:signature -> unit) : unit = let rec aux evm = List.iter ( fun (ctx,ev) -> let tyl = List.map (fun (_,_,t) -> t) ctx in let ((cl,gen,evm),argl) = add_gen_ctx (cl,gen,evm) tyl in let def = applistc c argl in (* msgnl(str"trouvé def ?"++Util.pr_int ev++str" := "++pr_constr def++str " dans "++pr_evar_map evm);*) try if not (Evd.is_defined evm ev) then let evm = safe_define evm ev def in aux evm; k (cl,gen,evm) with Termops.CannotFilter -> () ) evl in aux evm let new_inst_no = let cnt = ref 0 in fun () -> incr cnt; string_of_int !cnt let make_instance_ident gr = Nameops.add_suffix (Nametab.basename_of_global gr) ("_autoinstance_"^new_inst_no()) let new_instance_message ident typ def = Flags.if_verbose msgnl (str"new instance"++spc() ++Nameops.pr_id ident++spc()++str":"++spc() ++pr_constr typ++spc()++str":="++spc() ++pr_constr def) open Entries let rec deep_refresh_universes c = match kind_of_term c with | Sort (Type _) -> Termops.new_Type() | _ -> map_constr deep_refresh_universes c let declare_record_instance gr ctx params = let ident = make_instance_ident gr in let def = it_mkLambda_or_LetIn (applistc (constr_of_global gr) params) ctx in let def = deep_refresh_universes def in let ce = { const_entry_body= def; const_entry_secctx = None; const_entry_type=None; const_entry_opaque=false } in let cst = Declare.declare_constant ident (DefinitionEntry ce,Decl_kinds.IsDefinition Decl_kinds.StructureComponent) in new_instance_message ident (Typeops.type_of_constant (Global.env()) cst) def let declare_class_instance gr ctx params = let ident = make_instance_ident gr in let cl = Typeclasses.class_info gr in let (def,typ) = Typeclasses.instance_constructor cl params in let (def,typ) = it_mkLambda_or_LetIn (Option.get def) ctx, it_mkProd_or_LetIn typ ctx in let def = deep_refresh_universes def in let typ = deep_refresh_universes typ in let ce = Entries.DefinitionEntry { const_entry_type = Some typ; const_entry_secctx = None; const_entry_body= def; const_entry_opaque=false } in try let cst = Declare.declare_constant ident (ce,Decl_kinds.IsDefinition Decl_kinds.Instance) in Typeclasses.add_instance (Typeclasses.new_instance cl (Some 100) true (ConstRef cst)); new_instance_message ident typ def with e when Errors.noncritical e -> msgnl (str"Error defining instance := "++pr_constr def++str" : "++pr_constr typ++str" "++Errors.print e) let rec iter_under_prod (f:rel_context->constr->unit) (ctx:rel_context) t = f ctx t; match kind_of_term t with | Prod (n,t,c) -> iter_under_prod f ((n,None,t)::ctx) c | _ -> () (* main search function: search for total instances containing gr, and apply k to each of them *) let complete_signature_with_def gr deftyp (k:instance_decl_function -> signature -> unit) : unit = let gr_c = Libnames.constr_of_global gr in let (smap:(Libnames.global_reference * Evd.evar_map, ('a * 'b * Term.constr) list * Evd.evar) Gmapl.t ref) = ref Gmapl.empty in iter_under_prod ( fun ctx typ -> List.iter (fun ((cl,ev,evm),_,_) -> (* msgnl(pr_global gr++str" : "++pr_constr typ++str" matche ?"++Util.pr_int ev++str " dans "++pr_evar_map evm);*) smap := Gmapl.add (cl,evm) (ctx,ev) !smap) (Recordops.methods_matching typ) ) [] deftyp; Gmapl.iter ( fun (cl,evm) evl -> let f = if Typeclasses.is_class cl then declare_class_instance else declare_record_instance in complete_with_evars_permut (cl,[],evm) evl gr_c (fun sign -> complete_signature (k f) sign) ) !smap (* * Interface with other parts: hooks & declaration *) let evar_definition evi = match evar_body evi with Evar_empty -> assert false | Evar_defined c -> c let gen_sort_topo l evm = let iter_evar f ev = let rec aux c = match kind_of_term c with Evar (e,_) -> f e | _ -> iter_constr aux c in aux (Evd.evar_concl (Evd.find evm ev)); if Evd.is_defined evm ev then aux (evar_definition (Evd.find evm ev)) in let r = ref [] in let rec dfs ev = iter_evar dfs ev; if not(List.mem ev !r) then r := ev::!r in List.iter dfs l; List.rev !r (* register real typeclass instance given a totally defined evd *) let declare_instance (k:global_reference -> rel_context -> constr list -> unit) (cl,gen,evm:signature) = let evm = Evarutil.nf_evar_map evm in let gen = gen_sort_topo gen evm in let (evm,gen) = List.fold_right (fun ev (evm,gen) -> if Evd.is_defined evm ev then Evd.remove evm ev,gen else evm,(ev::gen)) gen (evm,[]) in (* msgnl(str"instance complète : ["++Util.prlist_with_sep (fun _ -> str";") Util.pr_int gen++str"] : "++spc()++pr_evar_map evm);*) let ngen = List.length gen in let (_,ctx,evm) = List.fold_left ( fun (i,ctx,evm) ev -> let ctx = (Anonymous,None,lift (-i) (Evd.evar_concl(Evd.find evm ev)))::ctx in let evm = subst_evar_in_evm ev (mkRel i) (Evd.remove evm ev) in (i-1,ctx,evm) ) (ngen,[],evm) gen in let fields = List.rev (Evd.fold ( fun ev evi l -> evar_definition evi::l ) evm []) in k cl ctx fields let autoinstance_opt = ref true let search_declaration gr = if !autoinstance_opt && not (Lib.is_modtype()) then let deftyp = Global.type_of_global gr in complete_signature_with_def gr deftyp declare_instance let search_record k cons sign = if !autoinstance_opt && not (Lib.is_modtype()) then complete_signature (declare_instance k) (cons,[],sign) (* let dh_key = Profile.declare_profile "declaration_hook" let ch_key = Profile.declare_profile "class_decl_hook" let declaration_hook = Profile.profile1 dh_key declaration_hook let class_decl_hook = Profile.profile1 ch_key class_decl_hook *) (* * Options and bookeeping *) let begin_autoinstance () = if not !autoinstance_opt then ( autoinstance_opt := true; ) let end_autoinstance () = if !autoinstance_opt then ( autoinstance_opt := false; ) let _ = Goptions.declare_bool_option { Goptions.optsync=true; Goptions.optdepr=false; Goptions.optkey=["Autoinstance"]; Goptions.optname="automatic typeclass instance recognition"; Goptions.optread=(fun () -> !autoinstance_opt); Goptions.optwrite=(fun b -> if b then begin_autoinstance() else end_autoinstance()) } coq-8.4pl2/toplevel/vernacinterp.ml0000640000175000001440000000354512121620060016510 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Drop then e else UserError("Vernac.disable_drop",(str"Drop is forbidden.")) (* Table of vernac entries *) let vernac_tab = (Hashtbl.create 51 : (string, Tacexpr.raw_generic_argument list -> unit -> unit) Hashtbl.t) let vinterp_add s f = try Hashtbl.add vernac_tab s f with Failure _ -> errorlabstrm "vinterp_add" (str"Cannot add the vernac command " ++ str s ++ str" twice.") let overwriting_vinterp_add s f = begin try let _ = Hashtbl.find vernac_tab s in Hashtbl.remove vernac_tab s with Not_found -> () end; Hashtbl.add vernac_tab s f let vinterp_map s = try Hashtbl.find vernac_tab s with Not_found -> errorlabstrm "Vernac Interpreter" (str"Cannot find vernac command " ++ str s ++ str".") let vinterp_init () = Hashtbl.clear vernac_tab (* Interpretation of a vernac command *) let call (opn,converted_args) = let loc = ref "Looking up command" in try let callback = vinterp_map opn in loc:= "Checking arguments"; let hunk = callback converted_args in loc:= "Executing command"; hunk() with | Drop -> raise Drop | reraise -> if !Flags.debug then msgnl (str"Vernac Interpreter " ++ str !loc); raise reraise coq-8.4pl2/toplevel/discharge.mli0000640000175000001440000000125512010532755016120 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* work_list -> mutual_inductive_body -> mutual_inductive_entry coq-8.4pl2/toplevel/whelp.ml40000640000175000001440000001711312010532755015221 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !whelp_server_name); optwrite = (fun s -> whelp_server_name := s) } let _ = declare_string_option { optsync = false; optdepr = false; optname = "Whelp getter"; optkey = ["Whelp";"Getter"]; optread = (fun () -> !getter_server_name); optwrite = (fun s -> getter_server_name := s) } let make_whelp_request req c = !whelp_server_name ^ "/apply?xmluri=" ^ !getter_server_name ^ "/getempty¶m.profile=firewall&profile=firewall¶m.keys=d_c%2CC1%2CHC2%2CL¶m.embedkeys=d_c%2CTC1%2CHC2%2CL¶m.thkeys=T1%2CT2%2CL%2CE¶m.prooftreekeys=HAT%2CG%2CHAO%2CL¶m.media-type=text%2Fhtml¶m.thmedia-type=&prooftreemedia-type=¶m.doctype-public=¶m.encoding=¶m.thencoding=¶m.prooftreeencoding=&advanced=no&keys=S%2CT1%2CT2%2CL%2CRT%2CE¶m.expression=" ^ c ^ "¶m.action=" ^ req let b = Buffer.create 16 let url_char c = if 'A' <= c & c <= 'Z' or 'a' <= c & c <= 'z' or '0' <= c & c <= '9' or c ='.' then Buffer.add_char b c else Buffer.add_string b (Printf.sprintf "%%%2X" (Char.code c)) let url_string s = String.iter url_char s let rec url_list_with_sep sep f = function | [] -> () | [a] -> f a | a::l -> f a; url_string sep; url_list_with_sep sep f l let url_id id = url_string (string_of_id id) let uri_of_dirpath dir = url_string "cic:/"; url_list_with_sep "/" url_id (List.rev dir) let error_whelp_unknown_reference ref = let qid = Nametab.shortest_qualid_of_global Idset.empty ref in errorlabstrm "" (strbrk "Definitions of the current session, like " ++ pr_qualid qid ++ strbrk ", are not supported in Whelp.") let uri_of_repr_kn ref (mp,dir,l) = match mp with | MPfile sl -> uri_of_dirpath (id_of_label l :: repr_dirpath dir @ repr_dirpath sl) | _ -> error_whelp_unknown_reference ref let url_paren f l = url_char '('; f l; url_char ')' let url_bracket f l = url_char '['; f l; url_char ']' let whelp_of_glob_sort = function | GProp Null -> "Prop" | GProp Pos -> "Set" | GType _ -> "Type" let uri_int n = Buffer.add_string b (string_of_int n) let uri_of_ind_pointer l = url_string ".ind#xpointer"; url_paren (url_list_with_sep "/" uri_int) l let uri_of_global ref = match ref with | VarRef id -> error ("Unknown Whelp reference: "^(string_of_id id)^".") | ConstRef cst -> uri_of_repr_kn ref (repr_con cst); url_string ".con" | IndRef (kn,i) -> uri_of_repr_kn ref (repr_mind kn); uri_of_ind_pointer [1;i+1] | ConstructRef ((kn,i),j) -> uri_of_repr_kn ref (repr_mind kn); uri_of_ind_pointer [1;i+1;j] let whelm_special = id_of_string "WHELM_ANON_VAR" let url_of_name = function | Name id -> url_id id | Anonymous -> url_id whelm_special (* No anon id in Whelp *) let uri_of_binding f (id,c) = url_id id; url_string "\\Assign "; f c let uri_params f = function | [] -> () | l -> url_string "\\subst"; url_bracket (url_list_with_sep ";" (uri_of_binding f)) l let get_discharged_hyp_names sp = List.map basename (get_discharged_hyps sp) let section_parameters = function | GRef (_,(ConstructRef ((induri,_),_) | IndRef (induri,_))) -> get_discharged_hyp_names (path_of_global (IndRef(induri,0))) | GRef (_,(ConstRef cst as ref)) -> get_discharged_hyp_names (path_of_global ref) | _ -> [] let merge vl al = let rec aux acc = function | ([],l) | (_,([] as l)) -> List.rev acc, l | (v::vl,a::al) -> aux ((v,a)::acc) (vl,al) in aux [] (vl,al) let rec uri_of_constr c = match c with | GVar (_,id) -> url_id id | GRef (_,ref) -> uri_of_global ref | GHole _ | GEvar _ -> url_string "?" | GSort (_,s) -> url_string (whelp_of_glob_sort s) | _ -> url_paren (fun () -> match c with | GApp (_,f,args) -> let inst,rest = merge (section_parameters f) args in uri_of_constr f; url_char ' '; uri_params uri_of_constr inst; url_list_with_sep " " uri_of_constr rest | GLambda (_,na,k,ty,c) -> url_string "\\lambda "; url_of_name na; url_string ":"; uri_of_constr ty; url_string "."; uri_of_constr c | GProd (_,Anonymous,k,ty,c) -> uri_of_constr ty; url_string "\\to "; uri_of_constr c | GProd (_,Name id,k,ty,c) -> url_string "\\forall "; url_id id; url_string ":"; uri_of_constr ty; url_string "."; uri_of_constr c | GLetIn (_,na,b,c) -> url_string "let "; url_of_name na; url_string "\\def "; uri_of_constr b; url_string " in "; uri_of_constr c | GCast (_,c, CastConv (_,t)) -> uri_of_constr c; url_string ":"; uri_of_constr t | GRec _ | GIf _ | GLetTuple _ | GCases _ -> error "Whelp does not support pattern-matching and (co-)fixpoint." | GVar _ | GRef _ | GHole _ | GEvar _ | GSort _ | GCast (_,_, CastCoerce) -> anomaly "Written w/o parenthesis" | GPatVar _ -> anomaly "Found constructors not supported in constr") () let make_string f x = Buffer.reset b; f x; Buffer.contents b let send_whelp req s = let url = make_whelp_request req s in let command = subst_command_placeholder browser_cmd_fmt url in let _ = run_command (fun x -> x) print_string command in () let whelp_constr req c = let c = detype false [whelm_special] [] c in send_whelp req (make_string uri_of_constr c) let whelp_constr_expr req c = let (sigma,env)= Lemmas.get_current_context () in let _,c = interp_open_constr sigma env c in whelp_constr req c let whelp_locate s = send_whelp "locate" s let whelp_elim ind = send_whelp "elim" (make_string uri_of_global (IndRef ind)) let on_goal f = let { Evd.it=goals ; sigma=sigma } = Proof.V82.subgoals (get_pftreestate ()) in let gls = { Evd.it=List.hd goals ; sigma = sigma } in f (Termops.it_mkNamedProd_or_LetIn (pf_concl gls) (pf_hyps gls)) type whelp_request = | Locate of string | Elim of inductive | Constr of string * constr let whelp = function | Locate s -> whelp_locate s | Elim ind -> whelp_elim ind | Constr (s,c) -> whelp_constr s c VERNAC ARGUMENT EXTEND whelp_constr_request | [ "Match" ] -> [ "match" ] | [ "Instance" ] -> [ "instance" ] END VERNAC COMMAND EXTEND Whelp | [ "Whelp" "Locate" string(s) ] -> [ whelp_locate s ] | [ "Whelp" "Locate" preident(s) ] -> [ whelp_locate s ] | [ "Whelp" "Elim" global(r) ] -> [ whelp_elim (Smartlocate.global_inductive_with_alias r) ] | [ "Whelp" whelp_constr_request(req) constr(c) ] -> [ whelp_constr_expr req c] END VERNAC COMMAND EXTEND WhelpHint | [ "Whelp" "Hint" constr(c) ] -> [ whelp_constr_expr "hint" c ] | [ "Whelp" "Hint" ] -> [ on_goal (whelp_constr "hint") ] END coq-8.4pl2/toplevel/toplevel.mli0000640000175000001440000000311212010532755016013 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string; mutable str : string; (** buffer of already read characters *) mutable len : int; (** number of chars in the buffer *) mutable bols : int list; (** offsets in str of begining of lines *) mutable tokens : Pcoq.Gram.parsable; (** stream of tokens *) mutable start : int } (** stream count of the first char of the buffer *) (** The input buffer of stdin. *) val top_buffer : input_buffer val set_prompt : (unit -> string) -> unit (** Toplevel error explanation, dealing with locations, Drop, Ctrl-D May raise only the following exceptions: [Drop] and [End_of_input], meaning we get out of the Coq loop. *) val print_toplevel_error : exn -> std_ppcmds (** Parse and execute a vernac command. *) val do_vernac : unit -> unit (** Main entry point of Coq: read and execute vernac commands. *) val loop : unit -> unit coq-8.4pl2/toplevel/usage.ml0000640000175000001440000001214712010532755015124 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* \n\n" let print_usage_coqc () = print_usage "Usage: coqc file...\n\ \noptions are:\ \n -verbose compile verbosely\ \n -image f specify an alternative executable for Coq\ \n -t keep temporary files\n\n" (* Print the configuration information *) let print_config () = if Coq_config.local then Printf.printf "LOCAL=1\n" else Printf.printf "LOCAL=0\n"; Printf.printf "COQLIB=%s/\n" (Envars.coqlib ()); Printf.printf "DOCDIR=%s/\n" (Envars.docdir ()); Printf.printf "OCAMLDEP=%s\n" Coq_config.ocamldep; Printf.printf "OCAMLC=%s\n" Coq_config.ocamlc; Printf.printf "OCAMLOPT=%s\n" Coq_config.ocamlopt; Printf.printf "OCAMLDOC=%s\n" Coq_config.ocamldoc; Printf.printf "CAMLBIN=%s/\n" (Envars.camlbin ()); Printf.printf "CAMLLIB=%s/\n" (Envars.camllib ()); Printf.printf "CAMLP4=%s\n" Coq_config.camlp4; Printf.printf "CAMLP4BIN=%s/\n" (Envars.camlp4bin ()); Printf.printf "CAMLP4LIB=%s\n" (Envars.camlp4lib ()); Printf.printf "HASNATDYNLINK=%s\n" (if Coq_config.has_natdynlink then "true" else "false") coq-8.4pl2/toplevel/libtypes.mli0000640000175000001440000000226612010532755016025 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* types (** The different types of search available. See term_dnet.mli for more explanations *) val search_pattern : types -> result list val search_concl : types -> result list val search_head_concl : types -> result list val search_eq_concl : constr -> types -> result list coq-8.4pl2/toplevel/ind_tables.ml0000640000175000001440000001453712121620060016117 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr array type individual_scheme_object_function = inductive -> constr type 'a scheme_kind = string let scheme_map = ref Indmap.empty let cache_one_scheme kind (ind,const) = let map = try Indmap.find ind !scheme_map with Not_found -> Stringmap.empty in scheme_map := Indmap.add ind (Stringmap.add kind const map) !scheme_map let cache_scheme (_,(kind,l)) = Array.iter (cache_one_scheme kind) l let subst_one_scheme subst ((mind,i),const) = (* Remark: const is a def: the result of substitution is a constant *) ((subst_ind subst mind,i),fst (subst_con subst const)) let subst_scheme (subst,(kind,l)) = (kind,Array.map (subst_one_scheme subst) l) let discharge_scheme (_,(kind,l)) = Some (kind,Array.map (fun (ind,const) -> (Lib.discharge_inductive ind,Lib.discharge_con const)) l) let inScheme : string * (inductive * constant) array -> obj = declare_object {(default_object "SCHEME") with cache_function = cache_scheme; load_function = (fun _ -> cache_scheme); subst_function = subst_scheme; classify_function = (fun obj -> Substitute obj); discharge_function = discharge_scheme} (**********************************************************************) (* Saving/restoring the table of scheme *) let freeze_schemes () = !scheme_map let unfreeze_schemes sch = scheme_map := sch let init_schemes () = scheme_map := Indmap.empty let _ = Summary.declare_summary "Schemes" { Summary.freeze_function = freeze_schemes; Summary.unfreeze_function = unfreeze_schemes; Summary.init_function = init_schemes } (**********************************************************************) (* The table of scheme building functions *) type individual type mutual type scheme_object_function = | MutualSchemeFunction of (mutual_inductive -> constr array) | IndividualSchemeFunction of (inductive -> constr) let scheme_object_table = (Hashtbl.create 17 : (string, string * scheme_object_function) Hashtbl.t) let declare_scheme_object s aux f = (try check_ident ("ind"^s) with e when Errors.noncritical e -> error ("Illegal induction scheme suffix: "^s)); let key = if aux = "" then s else aux in try let _ = Hashtbl.find scheme_object_table key in (* let aux_msg = if aux="" then "" else " (with key "^aux^")" in*) error ("Scheme object "^key^" already declared.") with Not_found -> Hashtbl.add scheme_object_table key (s,f); key let declare_mutual_scheme_object s ?(aux="") f = declare_scheme_object s aux (MutualSchemeFunction f) let declare_individual_scheme_object s ?(aux="") f = declare_scheme_object s aux (IndividualSchemeFunction f) (**********************************************************************) (* Defining/retrieving schemes *) let declare_scheme kind indcl = Lib.add_anonymous_leaf (inScheme (kind,indcl)) let is_visible_name id = try ignore (Nametab.locate (Libnames.qualid_of_ident id)); true with Not_found -> false let compute_name internal id = match internal with | KernelVerbose | UserVerbose -> id | KernelSilent -> Namegen.next_ident_away_from (add_prefix "internal_" id) is_visible_name let define internal id c = let fd = declare_constant ~internal in let id = compute_name internal id in let kn = fd id (DefinitionEntry { const_entry_body = c; const_entry_secctx = None; const_entry_type = None; const_entry_opaque = false }, Decl_kinds.IsDefinition Scheme) in (match internal with | KernelSilent -> () | _-> definition_message id); kn let define_individual_scheme_base kind suff f internal idopt (mind,i as ind) = let c = f ind in let mib = Global.lookup_mind mind in let id = match idopt with | Some id -> id | None -> add_suffix mib.mind_packets.(i).mind_typename suff in let const = define internal id c in declare_scheme kind [|ind,const|]; const let define_individual_scheme kind internal names (mind,i as ind) = match Hashtbl.find scheme_object_table kind with | _,MutualSchemeFunction f -> assert false | s,IndividualSchemeFunction f -> define_individual_scheme_base kind s f internal names ind let define_mutual_scheme_base kind suff f internal names mind = let cl = f mind in let mib = Global.lookup_mind mind in let ids = Array.init (Array.length mib.mind_packets) (fun i -> try List.assoc i names with Not_found -> add_suffix mib.mind_packets.(i).mind_typename suff) in let consts = array_map2 (define internal) ids cl in declare_scheme kind (Array.mapi (fun i cst -> ((mind,i),cst)) consts); consts let define_mutual_scheme kind internal names mind = match Hashtbl.find scheme_object_table kind with | _,IndividualSchemeFunction _ -> assert false | s,MutualSchemeFunction f -> define_mutual_scheme_base kind s f internal names mind let find_scheme kind (mind,i as ind) = try Stringmap.find kind (Indmap.find ind !scheme_map) with Not_found -> match Hashtbl.find scheme_object_table kind with | s,IndividualSchemeFunction f -> define_individual_scheme_base kind s f KernelSilent None ind | s,MutualSchemeFunction f -> (define_mutual_scheme_base kind s f KernelSilent [] mind).(i) let check_scheme kind ind = try let _ = Stringmap.find kind (Indmap.find ind !scheme_map) in true with Not_found -> false coq-8.4pl2/toplevel/coqtop.ml0000640000175000001440000003061212121620060015310 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (Coq_config.version,Coq_config.date) let print_header () = let (ver,rev) = (get_version_date ()) in Printf.printf "Welcome to Coq %s (%s)\n" ver rev; flush stdout let output_context = ref false let memory_stat = ref false let print_memory_stat () = if !memory_stat then Format.printf "total heap size = %d kbytes\n" (heap_size_kb ()) let _ = at_exit print_memory_stat let engagement = ref None let set_engagement c = engagement := Some c let engage () = match !engagement with Some c -> Global.set_engagement c | None -> () let set_batch_mode () = batch_mode := true let toplevel_default_name = make_dirpath [id_of_string "Top"] let toplevel_name = ref (Some toplevel_default_name) let set_toplevel_name dir = if dir = empty_dirpath then error "Need a non empty toplevel module name"; toplevel_name := Some dir let unset_toplevel_name () = toplevel_name := None let remove_top_ml () = Mltop.remove () let inputstate = ref None let set_inputstate s = inputstate:= Some s let inputstate () = match !inputstate with | Some "" -> () | Some s -> intern_state s | None -> intern_state "initial.coq" let outputstate = ref "" let set_outputstate s = outputstate:=s let outputstate () = if !outputstate <> "" then extern_state !outputstate let set_default_include d = push_include (d,Nameops.default_root_prefix) let set_include d p = let p = dirpath_of_string p in push_include (d,p) let set_rec_include d p = let p = dirpath_of_string p in push_rec_include(d,p) let load_vernacular_list = ref ([] : (string * bool) list) let add_load_vernacular verb s = load_vernacular_list := ((make_suffix s ".v"),verb) :: !load_vernacular_list let load_vernacular () = List.iter (fun (s,b) -> if Flags.do_beautify () then with_option beautify_file (Vernac.load_vernac b) s else Vernac.load_vernac b s) (List.rev !load_vernacular_list) let load_vernacular_obj = ref ([] : string list) let add_vernac_obj s = load_vernacular_obj := s :: !load_vernacular_obj let load_vernac_obj () = List.iter (fun f -> Library.require_library_from_file None f None) (List.rev !load_vernacular_obj) let require_list = ref ([] : string list) let add_require s = require_list := s :: !require_list let require () = List.iter (fun s -> Library.require_library_from_file None s (Some false)) (List.rev !require_list) let compile_list = ref ([] : (bool * string) list) let add_compile verbose s = set_batch_mode (); Flags.make_silent true; compile_list := (verbose,s) :: !compile_list let compile_files () = let init_state = States.freeze() in let coqdoc_init_state = Dumpglob.coqdoc_freeze () in List.iter (fun (v,f) -> States.unfreeze init_state; Dumpglob.coqdoc_unfreeze coqdoc_init_state; if Flags.do_beautify () then with_option beautify_file (Vernac.compile v) f else Vernac.compile v f) (List.rev !compile_list) (*s options for the virtual machine *) let boxed_val = ref false let use_vm = ref false let set_vm_opt () = Vm.set_transp_values (not !boxed_val); Vconv.set_use_vm !use_vm (*s Parsing of the command line. We no longer use [Arg.parse], in order to use share [Usage.print_usage] between coqtop and coqc. *) let usage () = if !batch_mode then Usage.print_usage_coqc () else Usage.print_usage_coqtop () ; flush stderr ; exit 1 let warning s = msg_warning (str s) let ide_slave = ref false let filter_opts = ref false let verb_compat_ntn = ref false let no_compat_ntn = ref false let parse_args arglist = let glob_opt = ref false in let rec parse = function | [] -> [] | "-with-geoproof" :: s :: rem -> if s = "yes" then Coq_config.with_geoproof := true else if s = "no" then Coq_config.with_geoproof := false else usage (); parse rem | "-impredicative-set" :: rem -> set_engagement Declarations.ImpredicativeSet; parse rem | ("-I"|"-include") :: d :: "-as" :: p :: rem -> set_include d p; parse rem | ("-I"|"-include") :: d :: "-as" :: [] -> usage () | ("-I"|"-include") :: d :: rem -> set_default_include d; parse rem | ("-I"|"-include") :: [] -> usage () | "-R" :: d :: "-as" :: p :: rem -> set_rec_include d p;parse rem | "-R" :: d :: "-as" :: [] -> usage () | "-R" :: d :: p :: rem -> set_rec_include d p;parse rem | "-R" :: ([] | [_]) -> usage () | "-top" :: d :: rem -> set_toplevel_name (dirpath_of_string d); parse rem | "-top" :: [] -> usage () | "-exclude-dir" :: f :: rem -> exclude_search_in_dirname f; parse rem | "-exclude-dir" :: [] -> usage () | "-notop" :: rem -> unset_toplevel_name (); parse rem | "-q" :: rem -> no_load_rc (); parse rem | "-opt" :: rem -> warning "option -opt deprecated, call with .opt suffix\n"; parse rem | "-byte" :: rem -> warning "option -byte deprecated, call with .byte suffix\n"; parse rem | "-full" :: rem -> warning "option -full deprecated\n"; parse rem | "-batch" :: rem -> set_batch_mode (); parse rem | "-boot" :: rem -> boot := true; no_load_rc (); parse rem | "-quality" :: rem -> term_quality := true; no_load_rc (); parse rem | "-outputstate" :: s :: rem -> Flags.load_proofs := Flags.Force; set_outputstate s; parse rem | "-outputstate" :: [] -> usage () | "-nois" :: rem -> set_inputstate ""; parse rem | ("-inputstate"|"-is") :: s :: rem -> set_inputstate s; parse rem | ("-inputstate"|"-is") :: [] -> usage () | "-load-ml-object" :: f :: rem -> Mltop.dir_ml_load f; parse rem | "-load-ml-object" :: [] -> usage () | "-load-ml-source" :: f :: rem -> Mltop.dir_ml_use f; parse rem | "-load-ml-source" :: [] -> usage () | ("-load-vernac-source"|"-l") :: f :: rem -> add_load_vernacular false f; parse rem | ("-load-vernac-source"|"-l") :: [] -> usage () | ("-load-vernac-source-verbose"|"-lv") :: f :: rem -> add_load_vernacular true f; parse rem | ("-load-vernac-source-verbose"|"-lv") :: [] -> usage () | "-load-vernac-object" :: f :: rem -> add_vernac_obj f; parse rem | "-load-vernac-object" :: [] -> usage () | "-dump-glob" :: "stdout" :: rem -> Dumpglob.dump_to_stdout (); glob_opt := true; parse rem (* À ne pas documenter : l'option 'stdout' n'étant éventuellement utile que pour le debugging... *) | "-dump-glob" :: f :: rem -> Dumpglob.dump_into_file f; glob_opt := true; parse rem | "-dump-glob" :: [] -> usage () | ("-no-glob" | "-noglob") :: rem -> Dumpglob.noglob (); glob_opt := true; parse rem | "-require" :: f :: rem -> add_require f; parse rem | "-require" :: [] -> usage () | "-compile" :: f :: rem -> add_compile false f; if not !glob_opt then Dumpglob.dump_to_dotglob (); parse rem | "-compile" :: [] -> usage () | "-compile-verbose" :: f :: rem -> add_compile true f; if not !glob_opt then Dumpglob.dump_to_dotglob (); parse rem | "-compile-verbose" :: [] -> usage () | "-force-load-proofs" :: rem -> Flags.load_proofs := Flags.Force; parse rem | "-lazy-load-proofs" :: rem -> Flags.load_proofs := Flags.Lazy; parse rem | "-dont-load-proofs" :: rem -> Flags.load_proofs := Flags.Dont; parse rem | "-beautify" :: rem -> make_beautify true; parse rem | "-unsafe" :: f :: rem -> add_unsafe f; parse rem | "-unsafe" :: [] -> usage () | "-debug" :: rem -> set_debug (); parse rem | "-compat" :: v :: rem -> Flags.compat_version := get_compat_version v; parse rem | "-compat" :: [] -> usage () | "-verbose-compat-notations" :: rem -> verb_compat_ntn := true; parse rem | "-no-compat-notations" :: rem -> no_compat_ntn := true; parse rem | "-vm" :: rem -> use_vm := true; parse rem | "-emacs" :: rem -> Flags.print_emacs := true; Pp.make_pp_emacs(); Vernacentries.qed_display_script := false; parse rem | "-emacs-U" :: rem -> warning "Obsolete option \"-emacs-U\", use -emacs instead."; parse ("-emacs" :: rem) | "-unicode" :: rem -> add_require "Utf8_core"; parse rem | "-coqlib" :: d :: rem -> Flags.coqlib_spec:=true; Flags.coqlib:=d; parse rem | "-coqlib" :: [] -> usage () | "-where" :: _ -> print_endline (Envars.coqlib ()); exit (if !filter_opts then 2 else 0) | ("-config"|"--config") :: _ -> Usage.print_config (); exit (if !filter_opts then 2 else 0) | ("-quiet"|"-silent") :: rem -> Flags.make_silent true; parse rem | ("-?"|"-h"|"-H"|"-help"|"--help") :: _ -> usage () | ("-v"|"--version") :: _ -> Usage.version (if !filter_opts then 2 else 0) | "-init-file" :: f :: rem -> set_rcfile f; parse rem | "-init-file" :: [] -> usage () | "-notactics" :: rem -> warning "Obsolete option \"-notactics\"."; remove_top_ml (); parse rem | "-just-parsing" :: rem -> Vernac.just_parsing := true; parse rem | ("-m" | "--memory") :: rem -> memory_stat := true; parse rem | "-xml" :: rem -> Flags.xml_export := true; parse rem | "-output-context" :: rem -> output_context := true; parse rem (* Scanned in Flags. *) | "-v7" :: rem -> error "This version of Coq does not support v7 syntax" | "-v8" :: rem -> parse rem | "-no-hash-consing" :: rem -> Flags.hash_cons_proofs := false; parse rem | "-ideslave" :: rem -> ide_slave := true; parse rem | "-filteropts" :: rem -> filter_opts := true; parse rem | s :: rem -> if !filter_opts then s :: (parse rem) else (prerr_endline ("Don't know what to do with " ^ s); usage ()) in try parse arglist with | UserError(_,s) as e -> begin try Stream.empty s; exit 1 with Stream.Failure -> msgnl (Errors.print e); exit 1 end | any -> begin msgnl (Errors.print any); exit 1 end let init arglist = Sys.catch_break false; (* Ctrl-C is fatal during the initialisation *) Lib.init(); (* Default Proofb Mode starts with an alternative default. *) Goptions.set_string_option_value ["Default";"Proof";"Mode"] "Classic"; begin try let foreign_args = parse_args arglist in if !filter_opts then (print_string (String.concat "\n" foreign_args); exit 0); if !ide_slave then begin Flags.make_silent true; Ide_slave.init_stdout () end; if_verbose print_header (); init_load_path (); inputstate (); Mltop.init_known_plugins (); set_vm_opt (); engage (); (* Be careful to set these variables after the inputstate *) Syntax_def.set_verbose_compat_notations !verb_compat_ntn; Syntax_def.set_compat_notations (not !no_compat_ntn); if (not !batch_mode|| !compile_list=[]) && Global.env_is_empty() then Option.iter Declaremods.start_library !toplevel_name; init_library_roots (); load_vernac_obj (); require (); load_rcfile(); load_vernacular (); compile_files (); outputstate () with any -> flush_all(); if not !batch_mode then message "Error during initialization:"; msgnl (Toplevel.print_toplevel_error any); exit 1 end; if !batch_mode then (flush_all(); if !output_context then Pp.ppnl (with_option raw_print Prettyp.print_full_pure_context ()); Profile.print_profile (); exit 0); (* We initialize the command history stack with a first entry *) Backtrack.mark_command Vernacexpr.VernacNop let init_toplevel = init let start () = init_toplevel (List.tl (Array.to_list Sys.argv)); if !ide_slave then Ide_slave.loop () else Toplevel.loop(); (* Initialise and launch the Ocaml toplevel *) Coqinit.init_ocaml_path(); Mltop.ocaml_toploop(); exit 1 (* [Coqtop.start] will be called by the code produced by coqmktop *) coq-8.4pl2/toplevel/class.ml0000640000175000001440000002142412010532755015123 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (Printer.pr_global g ++ str" is already a coercion") | NotAFunction -> (Printer.pr_global g ++ str" is not a function") | NoSource (Some cl) -> (str "Cannot recognize " ++ pr_class cl ++ str " as a source class of " ++ Printer.pr_global g) | NoSource None -> (str ": cannot find the source class of " ++ Printer.pr_global g) | ForbiddenSourceClass cl -> pr_class cl ++ str " cannot be a source class" | NotUniform -> (Printer.pr_global g ++ str" does not respect the uniform inheritance condition"); | NoTarget -> (str"Cannot find the target class") | WrongTarget (clt,cl) -> (str"Found target class " ++ pr_class cl ++ str " instead of " ++ pr_class clt) | NotAClass ref -> (str "Type of " ++ Printer.pr_global ref ++ str " does not end with a sort") | NotEnoughClassArgs cl -> (str"Wrong number of parameters for " ++ pr_class cl) (* Verifications pour l'ajout d'une classe *) let check_reference_arity ref = if not (Reductionops.is_arity (Global.env()) Evd.empty (Global.type_of_global ref)) then raise (CoercionError (NotAClass ref)) let check_arity = function | CL_FUN | CL_SORT -> () | CL_CONST cst -> check_reference_arity (ConstRef cst) | CL_SECVAR id -> check_reference_arity (VarRef id) | CL_IND kn -> check_reference_arity (IndRef kn) (* Coercions *) (* check that the computed target is the provided one *) let check_target clt = function | Some cl when cl <> clt -> raise (CoercionError (WrongTarget(clt,cl))) | _ -> () (* condition d'heritage uniforme *) let uniform_cond nargs lt = let rec aux = function | (0,[]) -> true | (n,t::l) -> let t = strip_outer_cast t in isRel t && destRel t = n && aux ((n-1),l) | _ -> false in aux (nargs,lt) let class_of_global = function | ConstRef sp -> CL_CONST sp | IndRef sp -> CL_IND sp | VarRef id -> CL_SECVAR id | ConstructRef _ as c -> errorlabstrm "class_of_global" (str "Constructors, such as " ++ Printer.pr_global c ++ str ", cannot be used as a class.") (* lp est la liste (inverse'e) des arguments de la coercion ids est le nom de la classe source sps_opt est le sp de la classe source dans le cas des structures retourne: la classe source nbre d'arguments de la classe le constr de la class la liste des variables dont depend la classe source l'indice de la classe source dans la liste lp *) let get_source lp source = match source with | None -> let (cl1,lv1) = match lp with | [] -> raise Not_found | t1::_ -> find_class_type Evd.empty t1 in (cl1,lv1,1) | Some cl -> let rec aux = function | [] -> raise Not_found | t1::lt -> try let cl1,lv1 = find_class_type Evd.empty t1 in if cl = cl1 then cl1,lv1,(List.length lt+1) else raise Not_found with Not_found -> aux lt in aux (List.rev lp) let get_target t ind = if (ind > 1) then CL_FUN else fst (find_class_type Evd.empty t) let prods_of t = let rec aux acc d = match kind_of_term d with | Prod (_,c1,c2) -> aux (c1::acc) c2 | Cast (c,_,_) -> aux acc c | _ -> (d,acc) in aux [] t let strength_of_cl = function | CL_CONST kn -> Global | CL_SECVAR id -> Local | _ -> Global let get_strength stre ref cls clt = let stres = strength_of_cl cls in let stret = strength_of_cl clt in let stref = strength_of_global ref in strength_min [stre;stres;stret;stref] let ident_key_of_class = function | CL_FUN -> "Funclass" | CL_SORT -> "Sortclass" | CL_CONST sp -> string_of_label (con_label sp) | CL_IND (sp,_) -> string_of_label (mind_label sp) | CL_SECVAR id -> string_of_id id (* coercion identit *) let error_not_transparent source = errorlabstrm "build_id_coercion" (pr_class source ++ str " must be a transparent constant.") let build_id_coercion idf_opt source = let env = Global.env () in let vs = match source with | CL_CONST sp -> mkConst sp | _ -> error_not_transparent source in let c = match constant_opt_value env (destConst vs) with | Some c -> c | None -> error_not_transparent source in let lams,t = decompose_lam_assum c in let val_f = it_mkLambda_or_LetIn (mkLambda (Name (id_of_string "x"), applistc vs (extended_rel_list 0 lams), mkRel 1)) lams in let typ_f = it_mkProd_wo_LetIn (mkProd (Anonymous, applistc vs (extended_rel_list 0 lams), lift 1 t)) lams in (* juste pour verification *) let _ = if not (Reductionops.is_conv_leq env Evd.empty (Typing.type_of env Evd.empty val_f) typ_f) then errorlabstrm "" (strbrk "Cannot be defined as coercion (maybe a bad number of arguments).") in let idf = match idf_opt with | Some idf -> idf | None -> let cl,_ = find_class_type Evd.empty t in id_of_string ("Id_"^(ident_key_of_class source)^"_"^ (ident_key_of_class cl)) in let constr_entry = (* Cast is necessary to express [val_f] is identity *) DefinitionEntry { const_entry_body = mkCast (val_f, DEFAULTcast, typ_f); const_entry_secctx = None; const_entry_type = Some typ_f; const_entry_opaque = false } in let kn = declare_constant idf (constr_entry,IsDefinition IdentityCoercion) in ConstRef kn let check_source = function | Some (CL_FUN|CL_SORT as s) -> raise (CoercionError (ForbiddenSourceClass s)) | _ -> () (* nom de la fonction coercion strength de f nom de la classe source (optionnel) sp de la classe source (dans le cas des structures) nom de la classe target (optionnel) booleen "coercion identite'?" lorque source est None alors target est None aussi. *) let add_new_coercion_core coef stre source target isid = check_source source; let t = Global.type_of_global coef in if coercion_exists coef then raise (CoercionError AlreadyExists); let tg,lp = prods_of t in let llp = List.length lp in if llp = 0 then raise (CoercionError NotAFunction); let (cls,lvs,ind) = try get_source lp source with Not_found -> raise (CoercionError (NoSource source)) in check_source (Some cls); if not (uniform_cond (llp-ind) lvs) then warning (Pp.string_of_ppcmds (explain_coercion_error coef NotUniform)); let clt = try get_target tg ind with Not_found -> raise (CoercionError NoTarget) in check_target clt target; check_arity cls; check_arity clt; let stre' = get_strength stre coef cls clt in declare_coercion coef stre' ~isid ~src:cls ~target:clt ~params:(List.length lvs) let try_add_new_coercion_core ref b c d e = try add_new_coercion_core ref b c d e with CoercionError e -> errorlabstrm "try_add_new_coercion_core" (explain_coercion_error ref e ++ str ".") let try_add_new_coercion ref stre = try_add_new_coercion_core ref stre None None false let try_add_new_coercion_subclass cl stre = let coe_ref = build_id_coercion None cl in try_add_new_coercion_core coe_ref stre (Some cl) None true let try_add_new_coercion_with_target ref stre ~source ~target = try_add_new_coercion_core ref stre (Some source) (Some target) false let try_add_new_identity_coercion id stre ~source ~target = let ref = build_id_coercion (Some id) source in try_add_new_coercion_core ref stre (Some source) (Some target) true let try_add_new_coercion_with_source ref stre ~source = try_add_new_coercion_core ref stre (Some source) None false let add_coercion_hook stre ref = try_add_new_coercion ref stre; Flags.if_verbose message (string_of_qualid (shortest_qualid_of_global Idset.empty ref) ^ " is now a coercion") let add_subclass_hook stre ref = let cl = class_of_global ref in try_add_new_coercion_subclass cl stre coq-8.4pl2/toplevel/autoinstance.mli0000640000175000001440000000275112010532755016666 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* rel_context -> constr list -> unit (** [search_declaration gr] Search in the library if the (new) * declaration gr can form an instance of a registered record/class *) val search_declaration : global_reference -> unit (** [search_record declf gr evm] Search the library for instances of the (new) record/class declaration [gr], and register them using [declf]. [evm] is the signature of the record (to avoid recomputing it) *) val search_record : instance_decl_function -> global_reference -> evar_map -> unit (** Instance declaration for both scenarios *) val declare_record_instance : instance_decl_function val declare_class_instance : instance_decl_function coq-8.4pl2/toplevel/classes.mli0000640000175000001440000000372712010532755015632 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr_expr list -> rel_context -> 'a val mismatched_props : env -> constr_expr list -> rel_context -> 'a (** Post-hoc class declaration. *) val declare_class : reference -> unit (** Instance declaration *) val existing_instance : bool -> reference -> unit val declare_instance_constant : typeclass -> int option -> (** priority *) bool -> (** globality *) Impargs.manual_explicitation list -> (** implicits *) ?hook:(Libnames.global_reference -> unit) -> identifier -> (** name *) Term.constr -> (** body *) Term.types -> (** type *) Names.identifier val new_instance : ?abstract:bool -> (** Not abstract by default. *) ?global:bool -> (** Not global by default. *) local_binder list -> typeclass_constraint -> constr_expr option -> ?generalize:bool -> ?tac:Proof_type.tactic -> ?hook:(Libnames.global_reference -> unit) -> int option -> identifier (** Setting opacity *) val set_typeclass_transparency : evaluable_global_reference -> bool -> bool -> unit (** For generation on names based on classes only *) val id_of_class : typeclass -> identifier (** Context command *) val context : local_binder list -> unit (** Forward ref for refine *) val refine_ref : (open_constr -> Proof_type.tactic) ref coq-8.4pl2/toplevel/coqinit.ml0000640000175000001440000001276312121620060015460 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* () (* Flags.if_verbose mSGNL (str ("No coqrc or coqrc."^Coq_config.version^ " found. Skipping rcfile loading.")) *) with reraise -> (msgnl (str"Load of rcfile failed."); raise reraise) else Flags.if_verbose msgnl (str"Skipping rcfile loading.") (* Puts dir in the path of ML and in the LoadPath *) let coq_add_path unix_path s = Mltop.add_path ~unix_path ~coq_root:(Names.make_dirpath [Nameops.coq_root;Names.id_of_string s]) let coq_add_rec_path unix_path = Mltop.add_rec_path ~unix_path ~coq_root:(Names.make_dirpath [Nameops.coq_root]) (* By the option -include -I or -R of the command line *) let includes = ref [] let push_include (s, alias) = includes := (s,alias,false) :: !includes let push_rec_include (s, alias) = includes := (s,alias,true) :: !includes (* The list of all theories in the standard library /!\ order does matter *) let theories_dirs_map = [ "theories/Unicode", "Unicode" ; "theories/Classes", "Classes" ; "theories/Program", "Program" ; "theories/MSets", "MSets" ; "theories/FSets", "FSets" ; "theories/Reals", "Reals" ; "theories/Strings", "Strings" ; "theories/Sorting", "Sorting" ; "theories/Setoids", "Setoids" ; "theories/Sets", "Sets" ; "theories/Structures", "Structures" ; "theories/Lists", "Lists" ; "theories/Vectors", "Vectors" ; "theories/Wellfounded", "Wellfounded" ; "theories/Relations", "Relations" ; "theories/Numbers", "Numbers" ; "theories/QArith", "QArith" ; "theories/PArith", "PArith" ; "theories/NArith", "NArith" ; "theories/ZArith", "ZArith" ; "theories/Arith", "Arith" ; "theories/Bool", "Bool" ; "theories/Logic", "Logic" ; "theories/Init", "Init" ] (* Initializes the LoadPath *) let init_load_path () = let coqlib = Envars.coqlib () in let user_contrib = coqlib/"user-contrib" in let xdg_dirs = Envars.xdg_dirs in let coqpath = Envars.coqpath in let dirs = ["states";"plugins"] in (* NOTE: These directories are searched from last to first *) (* first, developer specific directory to open *) if Coq_config.local then coq_add_path (coqlib/"dev") "dev"; (* then standard library *) List.iter (fun (s,alias) -> Mltop.add_rec_path ~unix_path:(coqlib/s) ~coq_root:(Names.make_dirpath [Names.id_of_string alias; Nameops.coq_root])) theories_dirs_map; (* then states and plugins *) List.iter (fun s -> coq_add_rec_path (coqlib/s)) dirs; (* then user-contrib *) if Sys.file_exists user_contrib then Mltop.add_rec_path ~unix_path:user_contrib ~coq_root:Nameops.default_root_prefix; (* then directories in XDG_DATA_DIRS and XDG_DATA_HOME *) List.iter (fun s -> Mltop.add_rec_path ~unix_path:s ~coq_root:Nameops.default_root_prefix) xdg_dirs; (* then directories in COQPATH *) List.iter (fun s -> Mltop.add_rec_path ~unix_path:s ~coq_root:Nameops.default_root_prefix) coqpath; (* then current directory *) Mltop.add_path ~unix_path:"." ~coq_root:Nameops.default_root_prefix; (* additional loadpath, given with -I -include -R options *) List.iter (fun (unix_path, coq_root, reci) -> if reci then Mltop.add_rec_path ~unix_path ~coq_root else Mltop.add_path ~unix_path ~coq_root) (List.rev !includes) let init_library_roots () = includes := [] (* Initialises the Ocaml toplevel before launching it, so that it can find the "include" file in the *source* directory *) let init_ocaml_path () = let add_subdir dl = Mltop.add_ml_dir (List.fold_left (/) Envars.coqroot dl) in Mltop.add_ml_dir (Envars.coqlib ()); List.iter add_subdir [ [ "config" ]; [ "dev" ]; [ "lib" ]; [ "kernel" ]; [ "library" ]; [ "pretyping" ]; [ "interp" ]; [ "parsing" ]; [ "proofs" ]; [ "tactics" ]; [ "toplevel" ]; [ "translate" ]; [ "ide" ] ] let get_compat_version = function | "8.3" -> Flags.V8_3 | "8.2" -> Flags.V8_2 | ("8.1" | "8.0") as s -> warning ("Compatibility with version "^s^" not supported."); Flags.V8_2 | s -> Util.error ("Unknown compatibility version \""^s^"\".") coq-8.4pl2/toplevel/metasyntax.mli0000640000175000001440000000416712010532755016371 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit (** Adding a tactic notation in the environment *) val add_tactic_notation : int * grammar_tactic_prod_item_expr list * raw_tactic_expr -> unit (** Adding a (constr) notation in the environment*) val add_infix : locality_flag -> (lstring * syntax_modifier list) -> constr_expr -> scope_name option -> unit val add_notation : locality_flag -> constr_expr -> (lstring * syntax_modifier list) -> scope_name option -> unit (** Declaring delimiter keys and default scopes *) val add_delimiters : scope_name -> string -> unit val add_class_scope : scope_name -> Classops.cl_typ -> unit (** Add only the interpretation of a notation that already has pa/pp rules *) val add_notation_interpretation : (lstring * constr_expr * scope_name option) -> unit (** Add a notation interpretation for supporting the "where" clause *) val set_notation_for_interpretation : Constrintern.internalization_env -> (lstring * constr_expr * scope_name option) -> unit (** Add only the parsing/printing rule of a notation *) val add_syntax_extension : locality_flag -> (lstring * syntax_modifier list) -> unit (** Add a syntactic definition (as in "Notation f := ...") *) val add_syntactic_definition : identifier -> identifier list * constr_expr -> bool -> Flags.compat_version option -> unit (** Print the Camlp4 state of a grammar *) val print_grammar : string -> unit val check_infix_modifiers : syntax_modifier list -> unit val with_syntax_protection : ('a -> 'b) -> 'a -> 'b coq-8.4pl2/toplevel/vernacentries.ml0000640000175000001440000017010712122623646016675 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit; solve : int -> unit; abort : string -> unit; search : searchable -> dir_path list * bool -> unit; print_name : reference Genarg.or_by_notation -> unit; print_check : Environ.env -> Environ.unsafe_judgment -> unit; print_eval : Reductionops.reduction_function -> Environ.env -> Evd.evar_map -> constr_expr -> Environ.unsafe_judgment -> unit; show_goal : goal_reference -> unit } let pcoq = ref None let set_pcoq_hook f = pcoq := Some f (* Misc *) let cl_of_qualid = function | FunClass -> Classops.CL_FUN | SortClass -> Classops.CL_SORT | RefClass r -> Class.class_of_global (Smartlocate.smart_global r) (*******************) (* "Show" commands *) let show_proof () = (* spiwack: this would probably be cooler with a bit of polishing. *) let p = Proof_global.give_me_the_proof () in let pprf = Proof.partial_proof p in msgnl (Util.prlist_with_sep Pp.fnl Printer.pr_constr pprf) let show_node () = (* spiwack: I'm have little clue what this function used to do. I deactivated it, could, possibly, be cleaned away. (Feb. 2010) *) () (* indentation code for Show Script, initially contributed by D. de Rauglaudre *) let indent_script_item ((ng1,ngl1),nl,beginend,ppl) (cmd,ng) = (* ng1 : number of goals remaining at the current level (before cmd) ngl1 : stack of previous levels with their remaining goals ng : number of goals after the execution of cmd beginend : special indentation stack for { } *) let ngprev = List.fold_left (+) ng1 ngl1 in let new_ngl = if ng > ngprev then (* We've branched *) (ng - ngprev + 1, ng1 - 1 :: ngl1) else if ng < ngprev then (* A subgoal have been solved. Let's compute the new current level by discarding all levels with 0 remaining goals. *) let _ = assert (ng = ngprev - 1) in let rec loop = function | (0, ng2::ngl2) -> loop (ng2,ngl2) | p -> p in loop (ng1-1, ngl1) else (* Standard case, same goal number as before *) (ng1, ngl1) in (* When a subgoal have been solved, separate this block by an empty line *) let new_nl = (ng < ngprev) in (* Indentation depth *) let ind = List.length ngl1 in (* Some special handling of bullets and { }, to get a nicer display *) let pred n = max 0 (n-1) in let ind, nl, new_beginend = match cmd with | VernacSubproof _ -> pred ind, nl, (pred ind)::beginend | VernacEndSubproof -> List.hd beginend, false, List.tl beginend | VernacBullet _ -> pred ind, nl, beginend | _ -> ind, nl, beginend in let pp = (if nl then fnl () else mt ()) ++ (hov (ind+1) (str (String.make ind ' ') ++ Ppvernac.pr_vernac cmd)) in (new_ngl, new_nl, new_beginend, pp :: ppl) let show_script () = let prf = Pfedit.get_current_proof_name () in let cmds = Backtrack.get_script prf in let _,_,_,indented_cmds = List.fold_left indent_script_item ((1,[]),false,[],[]) cmds in let indented_cmds = List.rev (indented_cmds) in msgnl (v 0 (Util.prlist_with_sep Pp.fnl (fun x -> x) indented_cmds)) let show_thesis () = msgnl (anomaly "TODO" ) let show_top_evars () = (* spiwack: new as of Feb. 2010: shows goal evars in addition to non-goal evars. *) let pfts = get_pftreestate () in let gls = Proof.V82.subgoals pfts in let sigma = gls.Evd.sigma in msg (pr_evars_int 1 (Evarutil.non_instantiated sigma)) let show_prooftree () = (* Spiwack: proof tree is currently not working *) () let enable_goal_printing = ref true let print_subgoals () = if !enable_goal_printing && is_verbose () then msg (pr_open_subgoals ()) let try_print_subgoals () = Pp.flush_all(); try print_subgoals () with Proof_global.NoCurrentProof | UserError _ -> () (* Simulate the Intro(s) tactic *) let show_intro all = let pf = get_pftreestate() in let {Evd.it=gls ; sigma=sigma} = Proof.V82.subgoals pf in let gl = {Evd.it=List.hd gls ; sigma = sigma} in let l,_= decompose_prod_assum (strip_outer_cast (pf_concl gl)) in if all then let lid = Tactics.find_intro_names l gl in msgnl (hov 0 (prlist_with_sep spc pr_id lid)) else try let n = list_last l in msgnl (pr_id (List.hd (Tactics.find_intro_names [n] gl))) with Failure "list_last" -> message "" (** Prepare a "match" template for a given inductive type. For each branch of the match, we list the constructor name followed by enough pattern variables. [Not_found] is raised if the given string isn't the qualid of a known inductive type. *) let make_cases s = let qualified_name = Libnames.qualid_of_string s in let glob_ref = Nametab.locate qualified_name in match glob_ref with | Libnames.IndRef i -> let {Declarations.mind_nparams = np} , {Declarations.mind_consnames = carr ; Declarations.mind_nf_lc = tarr } = Global.lookup_inductive i in Util.array_fold_right2 (fun consname typ l -> let al = List.rev (fst (Term.decompose_prod typ)) in let al = Util.list_skipn np al in let rec rename avoid = function | [] -> [] | (n,_)::l -> let n' = Namegen.next_name_away_in_cases_pattern n avoid in string_of_id n' :: rename (n'::avoid) l in let al' = rename [] al in (string_of_id consname :: al') :: l) carr tarr [] | _ -> raise Not_found (** Textual display of a generic "match" template *) let show_match id = let patterns = try make_cases (string_of_id (snd id)) with Not_found -> error "Unknown inductive type." in let pr_branch l = str "| " ++ hov 1 (prlist_with_sep spc str l) ++ str " =>" in msg (v 1 (str "match # with" ++ fnl () ++ prlist_with_sep fnl pr_branch patterns ++ fnl () ++ str "end" ++ fnl ())) (* "Print" commands *) let print_path_entry (s,l) = (str (string_of_dirpath l) ++ str " " ++ tbrk (0,0) ++ str s) let print_loadpath dir = let l = Library.get_full_load_paths () in let l = match dir with | None -> l | Some dir -> List.filter (fun (s,l) -> is_dirpath_prefix_of dir l) l in msgnl (Pp.t (str "Logical Path: " ++ tab () ++ str "Physical path:" ++ fnl () ++ prlist_with_sep pr_fnl print_path_entry l)) let print_modules () = let opened = Library.opened_libraries () and loaded = Library.loaded_libraries () in (* we intersect over opened to preserve the order of opened since *) (* non-commutative operations (e.g. visibility) are done at import time *) let loaded_opened = list_intersect opened loaded and only_loaded = list_subtract loaded opened in str"Loaded and imported library files: " ++ pr_vertical_list pr_dirpath loaded_opened ++ fnl () ++ str"Loaded and not imported library files: " ++ pr_vertical_list pr_dirpath only_loaded let print_module r = let (loc,qid) = qualid_of_reference r in try let globdir = Nametab.locate_dir qid in match globdir with DirModule (dirpath,(mp,_)) -> msgnl (Printmod.print_module (Printmod.printable_body dirpath) mp) | _ -> raise Not_found with Not_found -> msgnl (str"Unknown Module " ++ pr_qualid qid) let print_modtype r = let (loc,qid) = qualid_of_reference r in try let kn = Nametab.locate_modtype qid in msgnl (Printmod.print_modtype kn) with Not_found -> (* Is there a module of this name ? If yes we display its type *) try let mp = Nametab.locate_module qid in msgnl (Printmod.print_module false mp) with Not_found -> msgnl (str"Unknown Module Type or Module " ++ pr_qualid qid) let dump_universes_gen g s = let output = open_out s in let output_constraint, close = if Filename.check_suffix s ".dot" || Filename.check_suffix s ".gv" then begin (* the lazy unit is to handle errors while printing the first line *) let init = lazy (Printf.fprintf output "digraph universes {\n") in begin fun kind left right -> let () = Lazy.force init in match kind with | Univ.Lt -> Printf.fprintf output " \"%s\" -> \"%s\" [style=bold];\n" right left | Univ.Le -> Printf.fprintf output " \"%s\" -> \"%s\" [style=solid];\n" right left | Univ.Eq -> Printf.fprintf output " \"%s\" -> \"%s\" [style=dashed];\n" left right end, begin fun () -> if Lazy.lazy_is_val init then Printf.fprintf output "}\n"; close_out output end end else begin begin fun kind left right -> let kind = match kind with | Univ.Lt -> "<" | Univ.Le -> "<=" | Univ.Eq -> "=" in Printf.fprintf output "%s %s %s ;\n" left kind right end, (fun () -> close_out output) end in try Univ.dump_universes output_constraint g; close (); msgnl (str ("Universes written to file \""^s^"\".")) with reraise -> close (); raise reraise let dump_universes sorted s = let g = Global.universes () in let g = if sorted then Univ.sort_universes g else g in dump_universes_gen g s (*********************) (* "Locate" commands *) let locate_file f = let _,file = System.find_file_in_path ~warn:false (Library.get_load_paths ()) f in msgnl (str file) let msg_found_library = function | Library.LibLoaded, fulldir, file -> msgnl (hov 0 (pr_dirpath fulldir ++ strbrk " has been loaded from file " ++ str file)) | Library.LibInPath, fulldir, file -> msgnl (hov 0 (pr_dirpath fulldir ++ strbrk " is bound to file " ++ str file)) let msg_notfound_library loc qid = function | Library.LibUnmappedDir -> let dir = fst (repr_qualid qid) in user_err_loc (loc,"locate_library", strbrk "Cannot find a physical path bound to logical path " ++ pr_dirpath dir ++ str".") | Library.LibNotFound -> msgnl (hov 0 (strbrk "Unable to locate library " ++ pr_qualid qid ++ str".")) | e -> assert false let print_located_library r = let (loc,qid) = qualid_of_reference r in try msg_found_library (Library.locate_qualified_library false qid) with e when Errors.noncritical e -> msg_notfound_library loc qid e let print_located_module r = let (loc,qid) = qualid_of_reference r in let msg = try let dir = Nametab.full_name_module qid in str "Module " ++ pr_dirpath dir with Not_found -> (if fst (repr_qualid qid) = empty_dirpath then str "No module is referred to by basename " else str "No module is referred to by name ") ++ pr_qualid qid in msgnl msg let print_located_tactic r = let (loc,qid) = qualid_of_reference r in msgnl (try str "Ltac " ++ pr_path (Nametab.path_of_tactic (Nametab.locate_tactic qid)) with Not_found -> str "No Ltac definition is referred to by " ++ pr_qualid qid) let smart_global r = let gr = Smartlocate.smart_global r in Dumpglob.add_glob (Genarg.loc_of_or_by_notation loc_of_reference r) gr; gr let dump_global r = try let gr = Smartlocate.smart_global r in Dumpglob.add_glob (Genarg.loc_of_or_by_notation loc_of_reference r) gr with e when Errors.noncritical e -> () (**********) (* Syntax *) let vernac_syntax_extension = Metasyntax.add_syntax_extension let vernac_delimiters = Metasyntax.add_delimiters let vernac_bind_scope sc cll = List.iter (fun cl -> Metasyntax.add_class_scope sc (cl_of_qualid cl)) cll let vernac_open_close_scope = Notation.open_close_scope let vernac_arguments_scope local r scl = Notation.declare_arguments_scope local (smart_global r) scl let vernac_infix = Metasyntax.add_infix let vernac_notation = Metasyntax.add_notation (***********) (* Gallina *) let start_proof_and_print k l hook = check_locality (); (* early check, cf #2975 *) start_proof_com k l hook; print_subgoals (); if !pcoq <> None then (Option.get !pcoq).start_proof () let vernac_definition (local,k) (loc,id as lid) def hook = if local = Local then Dumpglob.dump_definition lid true "var" else Dumpglob.dump_definition lid false "def"; (match def with | ProveBody (bl,t) -> (* local binders, typ *) let hook _ _ = () in start_proof_and_print (local,DefinitionBody Definition) [Some lid, (bl,t,None)] hook | DefineBody (bl,red_option,c,typ_opt) -> let red_option = match red_option with | None -> None | Some r -> let (evc,env)= get_current_context () in Some (snd (interp_redexp env evc r)) in let ce,imps = interp_definition bl red_option c typ_opt in declare_definition id (local,k) ce imps hook) let vernac_start_proof kind l lettop hook = if Dumpglob.dump () then List.iter (fun (id, _) -> match id with | Some lid -> Dumpglob.dump_definition lid false "prf" | None -> ()) l; if not(refining ()) then if lettop then errorlabstrm "Vernacentries.StartProof" (str "Let declarations can only be used in proof editing mode."); start_proof_and_print (Global, Proof kind) l hook let qed_display_script = ref true let vernac_end_proof = function | Admitted -> Backtrack.mark_unreachable [Pfedit.get_current_proof_name ()]; admit () | Proved (is_opaque,idopt) -> let prf = Pfedit.get_current_proof_name () in if is_verbose () && !qed_display_script then (show_script (); msg (fnl())); begin match idopt with | None -> save_named is_opaque | Some ((_,id),None) -> save_anonymous is_opaque id | Some ((_,id),Some kind) -> save_anonymous_with_strength kind is_opaque id end; Backtrack.mark_unreachable [prf] (* A stupid macro that should be replaced by ``Exact c. Save.'' all along the theories [??] *) let vernac_exact_proof c = (* spiwack: for simplicity I do not enforce that "Proof proof_term" is called only at the begining of a proof. *) let prf = Pfedit.get_current_proof_name () in by (Tactics.exact_proof c); save_named true; Backtrack.mark_unreachable [prf] let vernac_assumption kind l nl= let global = fst kind = Global in List.iter (fun (is_coe,(idl,c)) -> if Dumpglob.dump () then List.iter (fun lid -> if global then Dumpglob.dump_definition lid false "ax" else Dumpglob.dump_definition lid true "var") idl; let t,imps = interp_assumption [] c in declare_assumptions idl is_coe kind t imps false nl) l let vernac_record k finite infer struc binders sort nameopt cfs = let const = match nameopt with | None -> add_prefix "Build_" (snd (snd struc)) | Some (_,id as lid) -> Dumpglob.dump_definition lid false "constr"; id in if Dumpglob.dump () then ( Dumpglob.dump_definition (snd struc) false "rec"; List.iter (fun (((_, x), _), _) -> match x with | Vernacexpr.AssumExpr ((loc, Name id), _) -> Dumpglob.dump_definition (loc,id) false "proj" | _ -> ()) cfs); ignore(Record.definition_structure (k,finite,infer,struc,binders,cfs,const,sort)) let vernac_inductive finite infer indl = if Dumpglob.dump () then List.iter (fun (((coe,lid), _, _, _, cstrs), _) -> match cstrs with | Constructors cstrs -> Dumpglob.dump_definition lid false "ind"; List.iter (fun (_, (lid, _)) -> Dumpglob.dump_definition lid false "constr") cstrs | _ -> () (* dumping is done by vernac_record (called below) *) ) indl; match indl with | [ ( id , bl , c , b, RecordDecl (oc,fs) ), [] ] -> vernac_record (match b with Class true -> Class false | _ -> b) finite infer id bl c oc fs | [ ( id , bl , c , Class true, Constructors [l]), _ ] -> let f = let (coe, ((loc, id), ce)) = l in let coe' = if coe then Some true else None in (((coe', AssumExpr ((loc, Name id), ce)), None), []) in vernac_record (Class true) finite infer id bl c None [f] | [ ( id , bl , c , Class true, _), _ ] -> Util.error "Definitional classes must have a single method" | [ ( id , bl , c , Class false, Constructors _), _ ] -> Util.error "Inductive classes not supported" | [ ( _ , _ , _ , _, RecordDecl _ ) , _ ] -> Util.error "where clause not supported for (co)inductive records" | _ -> let unpack = function | ( (_, id) , bl , c , _ , Constructors l ) , ntn -> ( id , bl , c , l ) , ntn | _ -> Util.error "Cannot handle mutually (co)inductive records." in let indl = List.map unpack indl in do_mutual_inductive indl (recursivity_flag_of_kind finite) let vernac_fixpoint l = if Dumpglob.dump () then List.iter (fun ((lid, _, _, _, _), _) -> Dumpglob.dump_definition lid false "def") l; do_fixpoint l let vernac_cofixpoint l = if Dumpglob.dump () then List.iter (fun ((lid, _, _, _), _) -> Dumpglob.dump_definition lid false "def") l; do_cofixpoint l let vernac_scheme l = if Dumpglob.dump () then List.iter (fun (lid, s) -> Option.iter (fun lid -> Dumpglob.dump_definition lid false "def") lid; match s with | InductionScheme (_, r, _) | CaseScheme (_, r, _) | EqualityScheme r -> dump_global r) l; Indschemes.do_scheme l let vernac_combined_scheme lid l = if Dumpglob.dump () then (Dumpglob.dump_definition lid false "def"; List.iter (fun lid -> dump_global (Genarg.AN (Ident lid))) l); Indschemes.do_combined_scheme lid l (**********************) (* Modules *) let vernac_import export refl = let import ref = Library.import_module export (qualid_of_reference ref) in List.iter import refl; Lib.add_frozen_state () let vernac_declare_module export (loc, id) binders_ast mty_ast = (* We check the state of the system (in section, in module type) and what module information is supplied *) if Lib.sections_are_opened () then error "Modules and Module Types are not allowed inside sections."; let binders_ast = List.map (fun (export,idl,ty) -> if export <> None then error ("Arguments of a functor declaration cannot be exported. " ^ "Remove the \"Export\" and \"Import\" keywords from every functor " ^ "argument.") else (idl,ty)) binders_ast in let mp = Declaremods.declare_module Modintern.interp_modtype Modintern.interp_modexpr Modintern.interp_modexpr_or_modtype id binders_ast (Enforce mty_ast) [] in Dumpglob.dump_moddef loc mp "mod"; if_verbose message ("Module "^ string_of_id id ^" is declared"); Option.iter (fun export -> vernac_import export [Ident (dummy_loc,id)]) export let vernac_define_module export (loc, id) binders_ast mty_ast_o mexpr_ast_l = (* We check the state of the system (in section, in module type) and what module information is supplied *) if Lib.sections_are_opened () then error "Modules and Module Types are not allowed inside sections."; match mexpr_ast_l with | [] -> check_no_pending_proofs (); let binders_ast,argsexport = List.fold_right (fun (export,idl,ty) (args,argsexport) -> (idl,ty)::args, (List.map (fun (_,i) -> export,i)idl)@argsexport) binders_ast ([],[]) in let mp = Declaremods.start_module Modintern.interp_modtype export id binders_ast mty_ast_o in Dumpglob.dump_moddef loc mp "mod"; if_verbose message ("Interactive Module "^ string_of_id id ^" started") ; List.iter (fun (export,id) -> Option.iter (fun export -> vernac_import export [Ident (dummy_loc,id)]) export ) argsexport | _::_ -> let binders_ast = List.map (fun (export,idl,ty) -> if export <> None then error ("Arguments of a functor definition can be imported only if" ^ " the definition is interactive. Remove the \"Export\" and " ^ "\"Import\" keywords from every functor argument.") else (idl,ty)) binders_ast in let mp = Declaremods.declare_module Modintern.interp_modtype Modintern.interp_modexpr Modintern.interp_modexpr_or_modtype id binders_ast mty_ast_o mexpr_ast_l in Dumpglob.dump_moddef loc mp "mod"; if_verbose message ("Module "^ string_of_id id ^" is defined"); Option.iter (fun export -> vernac_import export [Ident (dummy_loc,id)]) export let vernac_end_module export (loc,id as lid) = let mp = Declaremods.end_module () in Dumpglob.dump_modref loc mp "mod"; if_verbose message ("Module "^ string_of_id id ^" is defined") ; Option.iter (fun export -> vernac_import export [Ident lid]) export let vernac_declare_module_type (loc,id) binders_ast mty_sign mty_ast_l = if Lib.sections_are_opened () then error "Modules and Module Types are not allowed inside sections."; match mty_ast_l with | [] -> check_no_pending_proofs (); let binders_ast,argsexport = List.fold_right (fun (export,idl,ty) (args,argsexport) -> (idl,ty)::args, (List.map (fun (_,i) -> export,i)idl)@argsexport) binders_ast ([],[]) in let mp = Declaremods.start_modtype Modintern.interp_modtype id binders_ast mty_sign in Dumpglob.dump_moddef loc mp "modtype"; if_verbose message ("Interactive Module Type "^ string_of_id id ^" started"); List.iter (fun (export,id) -> Option.iter (fun export -> vernac_import export [Ident (dummy_loc,id)]) export ) argsexport | _ :: _ -> let binders_ast = List.map (fun (export,idl,ty) -> if export <> None then error ("Arguments of a functor definition can be imported only if" ^ " the definition is interactive. Remove the \"Export\" " ^ "and \"Import\" keywords from every functor argument.") else (idl,ty)) binders_ast in let mp = Declaremods.declare_modtype Modintern.interp_modtype Modintern.interp_modexpr_or_modtype id binders_ast mty_sign mty_ast_l in Dumpglob.dump_moddef loc mp "modtype"; if_verbose message ("Module Type "^ string_of_id id ^" is defined") let vernac_end_modtype (loc,id) = let mp = Declaremods.end_modtype () in Dumpglob.dump_modref loc mp "modtype"; if_verbose message ("Module Type "^ string_of_id id ^" is defined") let vernac_include l = Declaremods.declare_include Modintern.interp_modexpr_or_modtype l (**********************) (* Gallina extensions *) (* Sections *) let vernac_begin_section (_, id as lid) = check_no_pending_proofs (); Dumpglob.dump_definition lid true "sec"; Lib.open_section id let vernac_end_section (loc,_) = Dumpglob.dump_reference loc (string_of_dirpath (Lib.current_dirpath true)) "<>" "sec"; Lib.close_section () (* Dispatcher of the "End" command *) let vernac_end_segment (_,id as lid) = check_no_pending_proofs (); match Lib.find_opening_node id with | Lib.OpenedModule (false,export,_,_) -> vernac_end_module export lid | Lib.OpenedModule (true,_,_,_) -> vernac_end_modtype lid | Lib.OpenedSection _ -> vernac_end_section lid | _ -> assert false (* Libraries *) let vernac_require import _ qidl = let qidl = List.map qualid_of_reference qidl in let modrefl = Flags.silently (List.map Library.try_locate_qualified_library) qidl in if Dumpglob.dump () then List.iter2 (fun (loc, _) dp -> Dumpglob.dump_libref loc dp "lib") qidl (List.map fst modrefl); Library.require_library_from_dirpath modrefl import (* Coercions and canonical structures *) let vernac_canonical r = Recordops.declare_canonical_structure (smart_global r) let vernac_coercion stre ref qids qidt = let target = cl_of_qualid qidt in let source = cl_of_qualid qids in let ref' = smart_global ref in Class.try_add_new_coercion_with_target ref' stre ~source ~target; if_verbose msgnl (pr_global ref' ++ str " is now a coercion") let vernac_identity_coercion stre id qids qidt = let target = cl_of_qualid qidt in let source = cl_of_qualid qids in Class.try_add_new_identity_coercion id stre ~source ~target (* Type classes *) let vernac_instance abst glob sup inst props pri = Dumpglob.dump_constraint inst false "inst"; ignore(Classes.new_instance ~abstract:abst ~global:glob sup inst props pri) let vernac_context l = Classes.context l let vernac_declare_instances glob ids = List.iter (fun (id) -> Classes.existing_instance glob id) ids let vernac_declare_class id = Classes.declare_class id (***********) (* Solving *) let command_focus = Proof.new_focus_kind () let focus_command_cond = Proof.no_cond command_focus let vernac_solve n tcom b = if not (refining ()) then error "Unknown command of the non proof-editing mode."; let p = Proof_global.give_me_the_proof () in Proof.transaction p begin fun () -> solve_nth n (Tacinterp.hide_interp tcom None) ~with_end_tac:b; (* in case a strict subtree was completed, go back to the top of the prooftree *) Proof_global.maximal_unfocus command_focus p; print_subgoals(); if !pcoq <> None then (Option.get !pcoq).solve n end (* A command which should be a tactic. It has been added by Christine to patch an error in the design of the proof machine, and enables to instantiate existential variables when there are no more goals to solve. It cannot be a tactic since all tactics fail if there are no further goals to prove. *) let vernac_solve_existential = instantiate_nth_evar_com let vernac_set_end_tac tac = if not (refining ()) then error "Unknown command of the non proof-editing mode."; if tac <> (Tacexpr.TacId []) then set_end_tac (Tacinterp.interp tac) else () (* TO DO verifier s'il faut pas mettre exist s | TacId s ici*) let vernac_set_used_variables l = let l = List.map snd l in if not (list_distinct l) then error "Used variables list contains duplicates"; let vars = Environ.named_context (Global.env ()) in List.iter (fun id -> if not (List.exists (fun (id',_,_) -> id = id') vars) then error ("Unknown variable: " ^ string_of_id id)) l; set_used_variables l (*****************************) (* Auxiliary file management *) let vernac_require_from export spec filename = Library.require_library_from_file None (System.expand_path_macros filename) export let vernac_add_loadpath isrec pdir ldiropt = let pdir = System.expand_path_macros pdir in let alias = match ldiropt with | None -> Nameops.default_root_prefix | Some ldir -> ldir in (if isrec then Mltop.add_rec_path else Mltop.add_path) ~unix_path:pdir ~coq_root:alias let vernac_remove_loadpath path = Library.remove_load_path (System.expand_path_macros path) (* Coq syntax for ML or system commands *) let vernac_add_ml_path isrec path = (if isrec then Mltop.add_rec_ml_dir else Mltop.add_ml_dir) (System.expand_path_macros path) let vernac_declare_ml_module local l = Mltop.declare_ml_modules local (List.map System.expand_path_macros l) let vernac_chdir = function | None -> message (Sys.getcwd()) | Some path -> begin try Sys.chdir (System.expand_path_macros path) with Sys_error str -> warning ("Cd failed: " ^ str) end; if_verbose message (Sys.getcwd()) (********************) (* State management *) let vernac_write_state file = Pfedit.delete_all_proofs (); States.extern_state file let vernac_restore_state file = Pfedit.delete_all_proofs (); States.intern_state file (************) (* Commands *) let vernac_declare_tactic_definition (local,x,def) = Tacinterp.add_tacdef local x def let vernac_create_hintdb local id b = Auto.create_hint_db local id full_transparent_state b let vernac_remove_hints local dbs ids = Auto.remove_hints local dbs (List.map Smartlocate.global_with_alias ids) let vernac_hints local lb h = Auto.add_hints local lb (Auto.interp_hints h) let vernac_syntactic_definition lid = Dumpglob.dump_definition lid false "syndef"; Metasyntax.add_syntactic_definition (snd lid) let vernac_declare_implicits local r = function | [] -> Impargs.declare_implicits local (smart_global r) | _::_ as imps -> Impargs.declare_manual_implicits local (smart_global r) ~enriching:false (List.map (List.map (fun (ex,b,f) -> ex, (b,true,f))) imps) let vernac_declare_arguments local r l nargs flags = let extra_scope_flag = List.mem `ExtraScopes flags in let names = List.map (List.map (fun (id, _,_,_,_) -> id)) l in let names, rest = List.hd names, List.tl names in let scopes = List.map (List.map (fun (_,_, s, _,_) -> s)) l in if List.exists ((<>) names) rest then error "All arguments lists must declare the same names."; if not (Util.list_distinct (List.filter ((<>) Anonymous) names)) then error "Arguments names must be distinct."; let sr = smart_global r in let inf_names = Impargs.compute_implicits_names (Global.env()) (Global.type_of_global sr) in let string_of_name = function Anonymous -> "_" | Name id -> string_of_id id in let rec check li ld ls = match li, ld, ls with | [], [], [] -> () | [], Anonymous::ld, (Some _)::ls when extra_scope_flag -> check li ld ls | [], _::_, (Some _)::ls when extra_scope_flag -> error "Extra notation scopes can be set on anonymous arguments only" | [], x::_, _ -> error ("Extra argument " ^ string_of_name x ^ ".") | l, [], _ -> error ("The following arguments are not declared: " ^ (String.concat ", " (List.map string_of_name l)) ^ ".") | _::li, _::ld, _::ls -> check li ld ls | _ -> assert false in if l <> [[]] then List.iter2 (fun l -> check inf_names l) (names :: rest) scopes; (* we take extra scopes apart, and we check they are consistent *) let l, scopes = let scopes, rest = List.hd scopes, List.tl scopes in if List.exists (List.exists ((<>) None)) rest then error "Notation scopes can be given only once"; if not extra_scope_flag then l, scopes else let l, _ = List.split (List.map (list_chop (List.length inf_names)) l) in l, scopes in (* we interpret _ as the inferred names *) let l = if l = [[]] then l else let name_anons = function | (Anonymous, a,b,c,d), Name id -> Name id, a,b,c,d | x, _ -> x in List.map (fun ns -> List.map name_anons (List.combine ns inf_names)) l in let names_decl = List.map (List.map (fun (id, _,_,_,_) -> id)) l in let some_renaming_specified = try Arguments_renaming.arguments_names sr <> names_decl with Not_found -> false in let some_renaming_specified, implicits = if l = [[]] then false, [[]] else Util.list_fold_map (fun sr il -> let sr', impl = Util.list_fold_map (fun b -> function | (Anonymous, _,_, true, max), Name id -> assert false | (Name x, _,_, true, _), Anonymous -> error ("Argument "^string_of_id x^" cannot be declared implicit.") | (Name iid, _,_, true, max), Name id -> b || iid <> id, Some (ExplByName id, max, false) | (Name iid, _,_, _, _), Name id -> b || iid <> id, None | _ -> b, None) sr (List.combine il inf_names) in sr || sr', Util.list_map_filter (fun x -> x) impl) some_renaming_specified l in if some_renaming_specified then if not (List.mem `Rename flags) then error "To rename arguments the \"rename\" flag must be specified." else Arguments_renaming.rename_arguments local sr names_decl; (* All other infos are in the first item of l *) let l = List.hd l in let some_implicits_specified = implicits <> [[]] in let scopes = List.map (function | None -> None | Some (o, k) -> try Some(ignore(Notation.find_scope k); k) with e when Errors.noncritical e -> Some (Notation.find_delimiters_scope o k)) scopes in let some_scopes_specified = List.exists ((<>) None) scopes in let rargs = Util.list_map_filter (function (n, true) -> Some n | _ -> None) (Util.list_map_i (fun i (_, b, _,_,_) -> i, b) 0 l) in if some_scopes_specified || List.mem `ClearScopes flags then vernac_arguments_scope local r scopes; if not some_implicits_specified && List.mem `DefaultImplicits flags then vernac_declare_implicits local r [] else if some_implicits_specified || List.mem `ClearImplicits flags then vernac_declare_implicits local r implicits; if nargs >= 0 && nargs < List.fold_left max 0 rargs then error "The \"/\" option must be placed after the last \"!\"."; let rec narrow = function | #Tacred.simpl_flag as x :: tl -> x :: narrow tl | [] -> [] | _ :: tl -> narrow tl in let flags = narrow flags in if rargs <> [] || nargs >= 0 || flags <> [] then match sr with | ConstRef _ as c -> Tacred.set_simpl_behaviour local c (rargs, nargs, flags) | _ -> errorlabstrm "" (strbrk "Modifiers of the behavior of the simpl tactic are relevant for constants only.") let vernac_reserve bl = let sb_decl = (fun (idl,c) -> let t = Constrintern.interp_type Evd.empty (Global.env()) c in let t = Detyping.detype false [] [] t in let t = aconstr_of_glob_constr [] [] t in Reserve.declare_reserved_type idl t) in List.iter sb_decl bl let vernac_generalizable = Implicit_quantifiers.declare_generalizable let make_silent_if_not_pcoq b = if !pcoq <> None then error "Turning on/off silent flag is not supported in Pcoq mode." else make_silent b let _ = declare_bool_option { optsync = false; optdepr = false; optname = "silent"; optkey = ["Silent"]; optread = is_silent; optwrite = make_silent_if_not_pcoq } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "implicit arguments"; optkey = ["Implicit";"Arguments"]; optread = Impargs.is_implicit_args; optwrite = Impargs.make_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "strict implicit arguments"; optkey = ["Strict";"Implicit"]; optread = Impargs.is_strict_implicit_args; optwrite = Impargs.make_strict_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "strong strict implicit arguments"; optkey = ["Strongly";"Strict";"Implicit"]; optread = Impargs.is_strongly_strict_implicit_args; optwrite = Impargs.make_strongly_strict_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "contextual implicit arguments"; optkey = ["Contextual";"Implicit"]; optread = Impargs.is_contextual_implicit_args; optwrite = Impargs.make_contextual_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "implicit status of reversible patterns"; optkey = ["Reversible";"Pattern";"Implicit"]; optread = Impargs.is_reversible_pattern_implicit_args; optwrite = Impargs.make_reversible_pattern_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "maximal insertion of implicit"; optkey = ["Maximal";"Implicit";"Insertion"]; optread = Impargs.is_maximal_implicit_args; optwrite = Impargs.make_maximal_implicit_args } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "automatic introduction of variables"; optkey = ["Automatic";"Introduction"]; optread = Flags.is_auto_intros; optwrite = make_auto_intros } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "coercion printing"; optkey = ["Printing";"Coercions"]; optread = (fun () -> !Constrextern.print_coercions); optwrite = (fun b -> Constrextern.print_coercions := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "printing of existential variable instances"; optkey = ["Printing";"Existential";"Instances"]; optread = (fun () -> !Constrextern.print_evar_arguments); optwrite = (:=) Constrextern.print_evar_arguments } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "implicit arguments printing"; optkey = ["Printing";"Implicit"]; optread = (fun () -> !Constrextern.print_implicits); optwrite = (fun b -> Constrextern.print_implicits := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "implicit arguments defensive printing"; optkey = ["Printing";"Implicit";"Defensive"]; optread = (fun () -> !Constrextern.print_implicits_defensive); optwrite = (fun b -> Constrextern.print_implicits_defensive := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "projection printing using dot notation"; optkey = ["Printing";"Projections"]; optread = (fun () -> !Constrextern.print_projections); optwrite = (fun b -> Constrextern.print_projections := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "notations printing"; optkey = ["Printing";"Notations"]; optread = (fun () -> not !Constrextern.print_no_symbol); optwrite = (fun b -> Constrextern.print_no_symbol := not b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "raw printing"; optkey = ["Printing";"All"]; optread = (fun () -> !Flags.raw_print); optwrite = (fun b -> Flags.raw_print := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "record printing"; optkey = ["Printing";"Records"]; optread = (fun () -> !Flags.record_print); optwrite = (fun b -> Flags.record_print := b) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "use of virtual machine inside the kernel"; optkey = ["Virtual";"Machine"]; optread = (fun () -> Vconv.use_vm ()); optwrite = (fun b -> Vconv.set_use_vm b) } let _ = declare_int_option { optsync = true; optdepr = false; optname = "the level of inling duging functor application"; optkey = ["Inline";"Level"]; optread = (fun () -> Some (Flags.get_inline_level ())); optwrite = (fun o -> let lev = Option.default Flags.default_inline_level o in Flags.set_inline_level lev) } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "use of boxed values"; optkey = ["Boxed";"Values"]; optread = (fun _ -> not (Vm.transp_values ())); optwrite = (fun b -> Vm.set_transp_values (not b)) } (* No more undo limit in the new proof engine. The command still exists for compatibility (e.g. with ProofGeneral) *) let _ = declare_int_option { optsync = false; optdepr = true; optname = "the undo limit (OBSOLETE)"; optkey = ["Undo"]; optread = (fun _ -> None); optwrite = (fun _ -> ()) } let _ = declare_int_option { optsync = false; optdepr = false; optname = "the hypotheses limit"; optkey = ["Hyps";"Limit"]; optread = Flags.print_hyps_limit; optwrite = Flags.set_print_hyps_limit } let _ = declare_int_option { optsync = true; optdepr = false; optname = "the printing depth"; optkey = ["Printing";"Depth"]; optread = Pp_control.get_depth_boxes; optwrite = Pp_control.set_depth_boxes } let _ = declare_int_option { optsync = true; optdepr = false; optname = "the printing width"; optkey = ["Printing";"Width"]; optread = Pp_control.get_margin; optwrite = Pp_control.set_margin } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "printing of universes"; optkey = ["Printing";"Universes"]; optread = (fun () -> !Constrextern.print_universes); optwrite = (fun b -> Constrextern.print_universes:=b) } let vernac_debug b = set_debug (if b then Tactic_debug.DebugOn 0 else Tactic_debug.DebugOff) let _ = declare_bool_option { optsync = false; optdepr = false; optname = "Ltac debug"; optkey = ["Ltac";"Debug"]; optread = (fun () -> get_debug () <> Tactic_debug.DebugOff); optwrite = vernac_debug } let _ = declare_bool_option { optsync = true; optdepr = false; optname = "explicitly parsing implicit arguments"; optkey = ["Parsing";"Explicit"]; optread = (fun () -> !Constrintern.parsing_explicit); optwrite = (fun b -> Constrintern.parsing_explicit := b) } let vernac_set_opacity local str = let glob_ref r = match smart_global r with | ConstRef sp -> EvalConstRef sp | VarRef id -> EvalVarRef id | _ -> error "cannot set an inductive type or a constructor as transparent" in let str = List.map (fun (lev,ql) -> (lev,List.map glob_ref ql)) str in Redexpr.set_strategy local str let vernac_set_option locality key = function | StringValue s -> set_string_option_value_gen locality key s | IntValue n -> set_int_option_value_gen locality key n | BoolValue b -> set_bool_option_value_gen locality key b let vernac_unset_option locality key = unset_option_value_gen locality key let vernac_add_option key lv = let f = function | StringRefValue s -> (get_string_table key)#add s | QualidRefValue locqid -> (get_ref_table key)#add locqid in try List.iter f lv with Not_found -> error_undeclared_key key let vernac_remove_option key lv = let f = function | StringRefValue s -> (get_string_table key)#remove s | QualidRefValue locqid -> (get_ref_table key)#remove locqid in try List.iter f lv with Not_found -> error_undeclared_key key let vernac_mem_option key lv = let f = function | StringRefValue s -> (get_string_table key)#mem s | QualidRefValue locqid -> (get_ref_table key)#mem locqid in try List.iter f lv with Not_found -> error_undeclared_key key let vernac_print_option key = try (get_ref_table key)#print with Not_found -> try (get_string_table key)#print with Not_found -> try print_option_value key with Not_found -> error_undeclared_key key let get_current_context_of_args = function | Some n -> get_goal_context n | None -> get_current_context () let vernac_check_may_eval redexp glopt rc = let module P = Pretype_errors in let (sigma, env) = get_current_context_of_args glopt in let sigma', c = interp_open_constr sigma env rc in let sigma' = Evarconv.consider_remaining_unif_problems env sigma' in let j = try Evarutil.check_evars env sigma sigma' c; Arguments_renaming.rename_typing env c with P.PretypeError (_,_,P.UnsolvableImplicit _) | Compat.Loc.Exc_located (_,P.PretypeError (_,_,P.UnsolvableImplicit _)) -> Evarutil.j_nf_evar sigma' (Retyping.get_judgment_of env sigma' c) in match redexp with | None -> if !pcoq <> None then (Option.get !pcoq).print_check env j else msg (print_judgment env j) | Some r -> Tacinterp.dump_glob_red_expr r; let (sigma',r_interp) = interp_redexp env sigma' r in let redfun = fst (reduction_of_red_expr r_interp) in if !pcoq <> None then (Option.get !pcoq).print_eval redfun env sigma' rc j else msg (print_eval redfun env sigma' rc j) let vernac_declare_reduction locality s r = declare_red_expr locality s (snd (interp_redexp (Global.env()) Evd.empty r)) (* The same but avoiding the current goal context if any *) let vernac_global_check c = let evmap = Evd.empty in let env = Global.env() in let c = interp_constr evmap env c in let senv = Global.safe_env() in let j = Safe_typing.typing senv c in msg (print_safe_judgment env j) let vernac_print = function | PrintTables -> print_tables () | PrintFullContext-> msg (print_full_context_typ ()) | PrintSectionContext qid -> msg (print_sec_context_typ qid) | PrintInspect n -> msg (inspect n) | PrintGrammar ent -> Metasyntax.print_grammar ent | PrintLoadPath dir -> (* For compatibility ? *) print_loadpath dir | PrintModules -> msg (print_modules ()) | PrintModule qid -> print_module qid | PrintModuleType qid -> print_modtype qid | PrintMLLoadPath -> Mltop.print_ml_path () | PrintMLModules -> Mltop.print_ml_modules () | PrintName qid -> if !pcoq <> None then (Option.get !pcoq).print_name qid else msg (print_name qid) | PrintGraph -> ppnl (Prettyp.print_graph()) | PrintClasses -> ppnl (Prettyp.print_classes()) | PrintTypeClasses -> ppnl (Prettyp.print_typeclasses()) | PrintInstances c -> ppnl (Prettyp.print_instances (smart_global c)) | PrintLtac qid -> ppnl (Tacinterp.print_ltac (snd (qualid_of_reference qid))) | PrintCoercions -> ppnl (Prettyp.print_coercions()) | PrintCoercionPaths (cls,clt) -> ppnl (Prettyp.print_path_between (cl_of_qualid cls) (cl_of_qualid clt)) | PrintCanonicalConversions -> ppnl (Prettyp.print_canonical_projections ()) | PrintUniverses (b, None) -> let univ = Global.universes () in let univ = if b then Univ.sort_universes univ else univ in pp (Univ.pr_universes univ) | PrintUniverses (b, Some s) -> dump_universes b s | PrintHint r -> Auto.print_hint_ref (smart_global r) | PrintHintGoal -> Auto.print_applicable_hint () | PrintHintDbName s -> Auto.print_hint_db_by_name s | PrintRewriteHintDbName s -> Autorewrite.print_rewrite_hintdb s | PrintHintDb -> Auto.print_searchtable () | PrintScopes -> pp (Notation.pr_scopes (Constrextern.without_symbols pr_lglob_constr)) | PrintScope s -> pp (Notation.pr_scope (Constrextern.without_symbols pr_lglob_constr) s) | PrintVisibility s -> pp (Notation.pr_visibility (Constrextern.without_symbols pr_lglob_constr) s) | PrintAbout qid -> msg (print_about qid) | PrintImplicit qid -> dump_global qid; msg (print_impargs qid) | PrintAssumptions (o,r) -> (* Prints all the axioms and section variables used by a term *) let cstr = constr_of_global (smart_global r) in let st = Conv_oracle.get_transp_state () in let nassums = Assumptions.assumptions st ~add_opaque:o cstr in msg (Printer.pr_assumptionset (Global.env ()) nassums) let global_module r = let (loc,qid) = qualid_of_reference r in try Nametab.full_name_module qid with Not_found -> user_err_loc (loc, "global_module", str "Module/section " ++ pr_qualid qid ++ str " not found.") let interp_search_restriction = function | SearchOutside l -> (List.map global_module l, true) | SearchInside l -> (List.map global_module l, false) open Search let is_ident s = try ignore (check_ident s); true with UserError _ -> false let interp_search_about_item = function | SearchSubPattern pat -> let _,pat = intern_constr_pattern Evd.empty (Global.env()) pat in GlobSearchSubPattern pat | SearchString (s,None) when is_ident s -> GlobSearchString s | SearchString (s,sc) -> try let ref = Notation.interp_notation_as_global_reference dummy_loc (fun _ -> true) s sc in GlobSearchSubPattern (Pattern.PRef ref) with UserError _ -> error ("Unable to interp \""^s^"\" either as a reference or \ as an identifier component") let vernac_search s r = let r = interp_search_restriction r in if !pcoq <> None then (Option.get !pcoq).search s r else match s with | SearchPattern c -> let (_,c) = interp_open_constr_patvar Evd.empty (Global.env()) c in Search.search_pattern c r | SearchRewrite c -> let _,pat = interp_open_constr_patvar Evd.empty (Global.env()) c in Search.search_rewrite pat r | SearchHead c -> let _,pat = interp_open_constr_patvar Evd.empty (Global.env()) c in Search.search_by_head pat r | SearchAbout sl -> Search.search_about (List.map (on_snd interp_search_about_item) sl) r let vernac_locate = function | LocateTerm (Genarg.AN qid) -> msgnl (print_located_qualid qid) | LocateTerm (Genarg.ByNotation (_,ntn,sc)) -> ppnl (Notation.locate_notation (Constrextern.without_symbols pr_lglob_constr) ntn sc) | LocateLibrary qid -> print_located_library qid | LocateModule qid -> print_located_module qid | LocateTactic qid -> print_located_tactic qid | LocateFile f -> locate_file f (****************) (* Backtracking *) (** NB: these commands are now forbidden in non-interactive use, e.g. inside VernacLoad, VernacList, ... *) let vernac_backto lbl = try let lbl' = Backtrack.backto lbl in if lbl <> lbl' then Pp.msg_warning (str "Actually back to state "++ Pp.int lbl' ++ str "."); try_print_subgoals () with Backtrack.Invalid -> error "Invalid backtrack." let vernac_back n = try let extra = Backtrack.back n in if extra <> 0 then Pp.msg_warning (str "Actually back by " ++ Pp.int (extra+n) ++ str " steps."); try_print_subgoals () with Backtrack.Invalid -> error "Invalid backtrack." let vernac_reset_name id = try let globalized = try let gr = Smartlocate.global_with_alias (Ident id) in Dumpglob.add_glob (fst id) gr; true with e when Errors.noncritical e -> false in if not globalized then begin try begin match Lib.find_opening_node (snd id) with | Lib.OpenedSection _ -> Dumpglob.dump_reference (fst id) (string_of_dirpath (Lib.current_dirpath true)) "<>" "sec"; (* Might be nice to implement module cases, too.... *) | _ -> () end with UserError _ -> () end; if Backtrack.is_active () then (Backtrack.reset_name id; try_print_subgoals ()) else (* When compiling files, Reset is now allowed again as asked by A. Chlipala. we emulate a simple reset, that discards all proofs. *) let lbl = Lib.label_before_name id in Pfedit.delete_all_proofs (); Pp.msg_warning (str "Reset command occurred in non-interactive mode."); Lib.reset_label lbl with Backtrack.Invalid | Not_found -> error "Invalid Reset." let vernac_reset_initial () = if Backtrack.is_active () then Backtrack.reset_initial () else begin Pp.msg_warning (str "Reset command occurred in non-interactive mode."); Lib.reset_label Lib.first_command_label end (* For compatibility with ProofGeneral: *) let vernac_backtrack snum pnum naborts = Backtrack.backtrack snum pnum naborts; try_print_subgoals () (********************) (* Proof management *) let vernac_abort = function | None -> Backtrack.mark_unreachable [Pfedit.get_current_proof_name ()]; delete_current_proof (); if_verbose message "Current goal aborted"; if !pcoq <> None then (Option.get !pcoq).abort "" | Some id -> Backtrack.mark_unreachable [snd id]; delete_proof id; let s = string_of_id (snd id) in if_verbose message ("Goal "^s^" aborted"); if !pcoq <> None then (Option.get !pcoq).abort s let vernac_abort_all () = if refining() then begin Backtrack.mark_unreachable (Pfedit.get_all_proof_names ()); delete_all_proofs (); message "Current goals aborted" end else error "No proof-editing in progress." let vernac_restart () = Backtrack.mark_unreachable [Pfedit.get_current_proof_name ()]; restart_proof(); print_subgoals () let vernac_undo n = let d = Pfedit.current_proof_depth () - n in Backtrack.mark_unreachable ~after:d [Pfedit.get_current_proof_name ()]; Pfedit.undo n; print_subgoals () let vernac_undoto n = Backtrack.mark_unreachable ~after:n [Pfedit.get_current_proof_name ()]; Pfedit.undo_todepth n; print_subgoals () let vernac_focus gln = let p = Proof_global.give_me_the_proof () in let n = match gln with None -> 1 | Some n -> n in if n = 0 then Util.error "Invalid goal number: 0. Goal numbering starts with 1." else Proof.focus focus_command_cond () n p; print_subgoals () (* Unfocuses one step in the focus stack. *) let vernac_unfocus () = let p = Proof_global.give_me_the_proof () in Proof.unfocus command_focus p; print_subgoals () (* Checks that a proof is fully unfocused. Raises an error if not. *) let vernac_unfocused () = let p = Proof_global.give_me_the_proof () in if Proof.unfocused p then msg (str"The proof is indeed fully unfocused.") else error "The proof is not fully unfocused." (* BeginSubproof / EndSubproof. BeginSubproof (vernac_subproof) focuses on the first goal, or the goal given as argument. EndSubproof (vernac_end_subproof) unfocuses from a BeginSubproof, provided that the proof of the goal has been completed. *) let subproof_kind = Proof.new_focus_kind () let subproof_cond = Proof.done_cond subproof_kind let vernac_subproof gln = let p = Proof_global.give_me_the_proof () in begin match gln with | None -> Proof.focus subproof_cond () 1 p | Some n -> Proof.focus subproof_cond () n p end ; print_subgoals () let vernac_end_subproof () = let p = Proof_global.give_me_the_proof () in Proof.unfocus subproof_kind p ; print_subgoals () let vernac_bullet (bullet:Proof_global.Bullet.t) = let p = Proof_global.give_me_the_proof () in Proof.transaction p (fun () -> Proof_global.Bullet.put p bullet); (* Makes the focus visible in emacs by re-printing the goal. *) if !Flags.print_emacs then print_subgoals () let vernac_show = function | ShowGoal goalref -> if !pcoq <> None then (Option.get !pcoq).show_goal goalref else msg (match goalref with | OpenSubgoals -> pr_open_subgoals () | NthGoal n -> pr_nth_open_subgoal n | GoalId id -> pr_goal_by_id id) | ShowGoalImplicitly None -> Constrextern.with_implicits msg (pr_open_subgoals ()) | ShowGoalImplicitly (Some n) -> Constrextern.with_implicits msg (pr_nth_open_subgoal n) | ShowProof -> show_proof () | ShowNode -> show_node () | ShowScript -> show_script () | ShowExistentials -> show_top_evars () | ShowTree -> show_prooftree () | ShowProofNames -> msgnl (prlist_with_sep pr_spc pr_id (Pfedit.get_all_proof_names())) | ShowIntros all -> show_intro all | ShowMatch id -> show_match id | ShowThesis -> show_thesis () let vernac_check_guard () = let pts = get_pftreestate () in let pfterm = List.hd (Proof.partial_proof pts) in let message = try let { Evd.it=gl ; sigma=sigma } = Proof.V82.top_goal pts in Inductiveops.control_only_guard (Goal.V82.env sigma gl) pfterm; (str "The condition holds up to here") with UserError(_,s) -> (str ("Condition violated: ") ++s) in msgnl message let interp c = match c with (* Control (done in vernac) *) | (VernacTime _|VernacList _|VernacLoad _|VernacTimeout _|VernacFail _) -> assert false (* Syntax *) | VernacTacticNotation (n,r,e) -> Metasyntax.add_tactic_notation (n,r,e) | VernacSyntaxExtension (lcl,sl) -> vernac_syntax_extension lcl sl | VernacDelimiters (sc,lr) -> vernac_delimiters sc lr | VernacBindScope (sc,rl) -> vernac_bind_scope sc rl | VernacOpenCloseScope sc -> vernac_open_close_scope sc | VernacArgumentsScope (lcl,qid,scl) -> vernac_arguments_scope lcl qid scl | VernacInfix (local,mv,qid,sc) -> vernac_infix local mv qid sc | VernacNotation (local,c,infpl,sc) -> vernac_notation local c infpl sc (* Gallina *) | VernacDefinition (k,lid,d,f) -> vernac_definition k lid d f | VernacStartTheoremProof (k,l,top,f) -> vernac_start_proof k l top f | VernacEndProof e -> vernac_end_proof e | VernacExactProof c -> vernac_exact_proof c | VernacAssumption (stre,nl,l) -> vernac_assumption stre l nl | VernacInductive (finite,infer,l) -> vernac_inductive finite infer l | VernacFixpoint l -> vernac_fixpoint l | VernacCoFixpoint l -> vernac_cofixpoint l | VernacScheme l -> vernac_scheme l | VernacCombinedScheme (id, l) -> vernac_combined_scheme id l (* Modules *) | VernacDeclareModule (export,lid,bl,mtyo) -> vernac_declare_module export lid bl mtyo | VernacDefineModule (export,lid,bl,mtys,mexprl) -> vernac_define_module export lid bl mtys mexprl | VernacDeclareModuleType (lid,bl,mtys,mtyo) -> vernac_declare_module_type lid bl mtys mtyo | VernacInclude in_asts -> vernac_include in_asts (* Gallina extensions *) | VernacBeginSection lid -> vernac_begin_section lid | VernacEndSegment lid -> vernac_end_segment lid | VernacRequire (export,spec,qidl) -> vernac_require export spec qidl | VernacImport (export,qidl) -> vernac_import export qidl | VernacCanonical qid -> vernac_canonical qid | VernacCoercion (str,r,s,t) -> vernac_coercion str r s t | VernacIdentityCoercion (str,(_,id),s,t) -> vernac_identity_coercion str id s t (* Type classes *) | VernacInstance (abst, glob, sup, inst, props, pri) -> vernac_instance abst glob sup inst props pri | VernacContext sup -> vernac_context sup | VernacDeclareInstances (glob, ids) -> vernac_declare_instances glob ids | VernacDeclareClass id -> vernac_declare_class id (* Solving *) | VernacSolve (n,tac,b) -> vernac_solve n tac b | VernacSolveExistential (n,c) -> vernac_solve_existential n c (* Auxiliary file and library management *) | VernacRequireFrom (exp,spec,f) -> vernac_require_from exp spec f | VernacAddLoadPath (isrec,s,alias) -> vernac_add_loadpath isrec s alias | VernacRemoveLoadPath s -> vernac_remove_loadpath s | VernacAddMLPath (isrec,s) -> vernac_add_ml_path isrec s | VernacDeclareMLModule (local, l) -> vernac_declare_ml_module local l | VernacChdir s -> vernac_chdir s (* State management *) | VernacWriteState s -> vernac_write_state s | VernacRestoreState s -> vernac_restore_state s (* Resetting *) | VernacResetName id -> vernac_reset_name id | VernacResetInitial -> vernac_reset_initial () | VernacBack n -> vernac_back n | VernacBackTo n -> vernac_backto n (* Commands *) | VernacDeclareTacticDefinition def -> vernac_declare_tactic_definition def | VernacCreateHintDb (local,dbname,b) -> vernac_create_hintdb local dbname b | VernacRemoveHints (local,dbnames,ids) -> vernac_remove_hints local dbnames ids | VernacHints (local,dbnames,hints) -> vernac_hints local dbnames hints | VernacSyntacticDefinition (id,c,l,b) ->vernac_syntactic_definition id c l b | VernacDeclareImplicits (local,qid,l) ->vernac_declare_implicits local qid l | VernacArguments (local, qid, l, narg, flags) -> vernac_declare_arguments local qid l narg flags | VernacReserve bl -> vernac_reserve bl | VernacGeneralizable (local,gen) -> vernac_generalizable local gen | VernacSetOpacity (local,qidl) -> vernac_set_opacity local qidl | VernacSetOption (locality,key,v) -> vernac_set_option locality key v | VernacUnsetOption (locality,key) -> vernac_unset_option locality key | VernacRemoveOption (key,v) -> vernac_remove_option key v | VernacAddOption (key,v) -> vernac_add_option key v | VernacMemOption (key,v) -> vernac_mem_option key v | VernacPrintOption key -> vernac_print_option key | VernacCheckMayEval (r,g,c) -> vernac_check_may_eval r g c | VernacDeclareReduction (b,s,r) -> vernac_declare_reduction b s r | VernacGlobalCheck c -> vernac_global_check c | VernacPrint p -> vernac_print p | VernacSearch (s,r) -> vernac_search s r | VernacLocate l -> vernac_locate l | VernacComments l -> if_verbose message ("Comments ok\n") | VernacNop -> () (* Proof management *) | VernacGoal t -> vernac_start_proof Theorem [None,([],t,None)] false (fun _ _->()) | VernacAbort id -> vernac_abort id | VernacAbortAll -> vernac_abort_all () | VernacRestart -> vernac_restart () | VernacUndo n -> vernac_undo n | VernacUndoTo n -> vernac_undoto n | VernacBacktrack (snum,pnum,naborts) -> vernac_backtrack snum pnum naborts | VernacFocus n -> vernac_focus n | VernacUnfocus -> vernac_unfocus () | VernacUnfocused -> vernac_unfocused () | VernacBullet b -> vernac_bullet b | VernacSubproof n -> vernac_subproof n | VernacEndSubproof -> vernac_end_subproof () | VernacShow s -> vernac_show s | VernacCheckGuard -> vernac_check_guard () | VernacProof (None, None) -> print_subgoals () | VernacProof (Some tac, None) -> vernac_set_end_tac tac ; print_subgoals () | VernacProof (None, Some l) -> vernac_set_used_variables l ; print_subgoals () | VernacProof (Some tac, Some l) -> vernac_set_end_tac tac; vernac_set_used_variables l ; print_subgoals () | VernacProofMode mn -> Proof_global.set_proof_mode mn (* Toplevel control *) | VernacToplevelControl e -> raise e (* Extensions *) | VernacExtend (opn,args) -> Vernacinterp.call (opn,args) let interp c = interp c ; check_locality () coq-8.4pl2/toplevel/interface.mli0000640000175000001440000000670112023354106016124 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Current, it contains the name of the coq version which this notation is trying to be compatible with *) type option_value = Goptionstyp.option_value = | BoolValue of bool | IntValue of int option | StringValue of string type option_ref_value = | StringRefValue of string | QualidRefValue of reference type sort_expr = Glob_term.glob_sort type definition_expr = | ProveBody of local_binder list * constr_expr | DefineBody of local_binder list * raw_red_expr option * constr_expr * constr_expr option type fixpoint_expr = identifier located * (identifier located option * recursion_order_expr) * local_binder list * constr_expr * constr_expr option type cofixpoint_expr = identifier located * local_binder list * constr_expr * constr_expr option type local_decl_expr = | AssumExpr of lname * constr_expr | DefExpr of lname * constr_expr * constr_expr option type inductive_kind = Inductive_kw | CoInductive | Record | Structure | Class of bool (* true = definitional, false = inductive *) type decl_notation = lstring * constr_expr * scope_name option type simple_binder = lident list * constr_expr type class_binder = lident * constr_expr list type 'a with_coercion = coercion_flag * 'a type 'a with_instance = instance_flag * 'a type 'a with_notation = 'a * decl_notation list type 'a with_priority = 'a * int option type constructor_expr = (lident * constr_expr) with_coercion type constructor_list_or_record_decl_expr = | Constructors of constructor_expr list | RecordDecl of lident option * local_decl_expr with_instance with_priority with_notation list type inductive_expr = lident with_coercion * local_binder list * constr_expr option * inductive_kind * constructor_list_or_record_decl_expr type one_inductive_expr = lident * local_binder list * constr_expr option * constructor_expr list type module_ast_inl = module_ast annotated type module_binder = bool option * lident list * module_ast_inl type grammar_tactic_prod_item_expr = | TacTerm of string | TacNonTerm of loc * string * (Names.identifier * string) option type syntax_modifier = | SetItemLevel of string list * production_level | SetLevel of int | SetAssoc of gram_assoc | SetEntryType of string * simple_constr_prod_entry_key | SetOnlyParsing of Flags.compat_version | SetFormat of string located type proof_end = | Admitted | Proved of opacity_flag * (lident * theorem_kind option) option type scheme = | InductionScheme of bool * reference or_by_notation * sort_expr | CaseScheme of bool * reference or_by_notation * sort_expr | EqualityScheme of reference or_by_notation type inline = int option (* inlining level, none for no inlining *) type bullet = | Dash | Star | Plus type vernac_expr = (* Control *) | VernacList of located_vernac_expr list | VernacLoad of verbose_flag * string | VernacTime of vernac_expr | VernacTimeout of int * vernac_expr | VernacFail of vernac_expr (* Syntax *) | VernacTacticNotation of int * grammar_tactic_prod_item_expr list * raw_tactic_expr | VernacSyntaxExtension of locality_flag * (lstring * syntax_modifier list) | VernacOpenCloseScope of (locality_flag * bool * scope_name) | VernacDelimiters of scope_name * string | VernacBindScope of scope_name * class_rawexpr list | VernacInfix of locality_flag * (lstring * syntax_modifier list) * constr_expr * scope_name option | VernacNotation of locality_flag * constr_expr * (lstring * syntax_modifier list) * scope_name option (* Gallina *) | VernacDefinition of definition_kind * lident * definition_expr * declaration_hook | VernacStartTheoremProof of theorem_kind * (lident option * (local_binder list * constr_expr * (lident option * recursion_order_expr) option)) list * bool * declaration_hook | VernacEndProof of proof_end | VernacExactProof of constr_expr | VernacAssumption of assumption_kind * inline * simple_binder with_coercion list | VernacInductive of inductive_flag * infer_flag * (inductive_expr * decl_notation list) list | VernacFixpoint of (fixpoint_expr * decl_notation list) list | VernacCoFixpoint of (cofixpoint_expr * decl_notation list) list | VernacScheme of (lident option * scheme) list | VernacCombinedScheme of lident * lident list (* Gallina extensions *) | VernacBeginSection of lident | VernacEndSegment of lident | VernacRequire of export_flag option * specif_flag option * lreference list | VernacImport of export_flag * lreference list | VernacCanonical of reference or_by_notation | VernacCoercion of locality * reference or_by_notation * class_rawexpr * class_rawexpr | VernacIdentityCoercion of locality * lident * class_rawexpr * class_rawexpr (* Type classes *) | VernacInstance of bool * (* abstract instance *) bool * (* global *) local_binder list * (* super *) typeclass_constraint * (* instance name, class name, params *) constr_expr option * (* props *) int option (* Priority *) | VernacContext of local_binder list | VernacDeclareInstances of bool (* global *) * reference list (* instance names *) | VernacDeclareClass of reference (* inductive or definition name *) (* Modules and Module Types *) | VernacDeclareModule of bool option * lident * module_binder list * module_ast_inl | VernacDefineModule of bool option * lident * module_binder list * module_ast_inl module_signature * module_ast_inl list | VernacDeclareModuleType of lident * module_binder list * module_ast_inl list * module_ast_inl list | VernacInclude of module_ast_inl list (* Solving *) | VernacSolve of int * raw_tactic_expr * bool | VernacSolveExistential of int * constr_expr (* Auxiliary file and library management *) | VernacRequireFrom of export_flag option * specif_flag option * string | VernacAddLoadPath of rec_flag * string * dir_path option | VernacRemoveLoadPath of string | VernacAddMLPath of rec_flag * string | VernacDeclareMLModule of locality_flag * string list | VernacChdir of string option (* State management *) | VernacWriteState of string | VernacRestoreState of string (* Resetting *) | VernacResetName of lident | VernacResetInitial | VernacBack of int | VernacBackTo of int (* Commands *) | VernacDeclareTacticDefinition of (locality_flag * rec_flag * (reference * bool * raw_tactic_expr) list) | VernacCreateHintDb of locality_flag * string * bool | VernacRemoveHints of locality_flag * string list * reference list | VernacHints of locality_flag * string list * hints_expr | VernacSyntacticDefinition of identifier located * (identifier list * constr_expr) * locality_flag * onlyparsing_flag | VernacDeclareImplicits of locality_flag * reference or_by_notation * (explicitation * bool * bool) list list | VernacArguments of locality_flag * reference or_by_notation * ((name * bool * (loc * string) option * bool * bool) list) list * int * [ `SimplDontExposeCase | `SimplNeverUnfold | `Rename | `ExtraScopes | `ClearImplicits | `ClearScopes | `DefaultImplicits ] list | VernacArgumentsScope of locality_flag * reference or_by_notation * scope_name option list | VernacReserve of simple_binder list | VernacGeneralizable of locality_flag * (lident list) option | VernacSetOpacity of locality_flag * (Conv_oracle.level * reference or_by_notation list) list | VernacUnsetOption of full_locality_flag * Goptions.option_name | VernacSetOption of full_locality_flag * Goptions.option_name * option_value | VernacAddOption of Goptions.option_name * option_ref_value list | VernacRemoveOption of Goptions.option_name * option_ref_value list | VernacMemOption of Goptions.option_name * option_ref_value list | VernacPrintOption of Goptions.option_name | VernacCheckMayEval of raw_red_expr option * int option * constr_expr | VernacGlobalCheck of constr_expr | VernacDeclareReduction of locality_flag * string * raw_red_expr | VernacPrint of printable | VernacSearch of searchable * search_restriction | VernacLocate of locatable | VernacComments of comment list | VernacNop (* Proof management *) | VernacGoal of constr_expr | VernacAbort of lident option | VernacAbortAll | VernacRestart | VernacUndo of int | VernacUndoTo of int | VernacBacktrack of int*int*int | VernacFocus of int option | VernacUnfocus | VernacUnfocused | VernacBullet of bullet | VernacSubproof of int option | VernacEndSubproof | VernacShow of showable | VernacCheckGuard | VernacProof of raw_tactic_expr option * lident list option | VernacProofMode of string (* Toplevel control *) | VernacToplevelControl of exn (* For extension *) | VernacExtend of string * raw_generic_argument list and located_vernac_expr = loc * vernac_expr (** Categories of [vernac_expr] *) let rec strip_vernac = function | VernacTime c | VernacTimeout(_,c) | VernacFail c -> strip_vernac c | c -> c (* TODO: what about VernacList ? *) let rec is_navigation_vernac = function | VernacResetInitial | VernacResetName _ | VernacBacktrack _ | VernacBackTo _ | VernacBack _ -> true | VernacTime c -> is_navigation_vernac c (* Time Back* is harmless *) | c -> is_deep_navigation_vernac c and is_deep_navigation_vernac = function | VernacTimeout (_,c) | VernacFail c -> is_navigation_vernac c | VernacList l -> List.exists (fun (_,c) -> is_navigation_vernac c) l | _ -> false (* NB: Reset is now allowed again as asked by A. Chlipala *) let is_reset = function | VernacResetInitial | VernacResetName _ -> true | _ -> false (* Locating errors raised just after the dot is parsed but before the interpretation phase *) let syntax_checking_error loc s = user_err_loc (loc,"",Pp.str s) (**********************************************************************) (* Managing locality *) let locality_flag = ref None let local_of_bool = function true -> Local | false -> Global let check_locality () = match !locality_flag with | Some (loc,true) -> syntax_checking_error loc "This command does not support the \"Local\" prefix."; | Some (loc,false) -> syntax_checking_error loc "This command does not support the \"Global\" prefix." | None -> () (** Extracting the locality flag *) (* Commands which supported an inlined Local flag *) let enforce_locality_full local = let local = match !locality_flag with | Some (_,false) when local -> error "Cannot be simultaneously Local and Global." | Some (_,true) when local -> error "Use only prefix \"Local\"." | None -> if local then begin Flags.if_warn Pp.msg_warning (Pp.str"Obsolete syntax: use \"Local\" as a prefix."); Some true end else None | Some (_,b) -> Some b in locality_flag := None; local (* Commands which did not supported an inlined Local flag (synonym of [enforce_locality_full false]) *) let use_locality_full () = let r = Option.map snd !locality_flag in locality_flag := None; r (** Positioning locality for commands supporting discharging and export outside of modules *) (* For commands whose default is to discharge and export: Global is the default and is neutral; Local in a section deactivates discharge, Local not in a section deactivates export *) let make_locality = function Some true -> true | _ -> false let use_locality () = make_locality (use_locality_full ()) let use_locality_exp () = local_of_bool (use_locality ()) let enforce_locality local = make_locality (enforce_locality_full local) let enforce_locality_exp local = local_of_bool (enforce_locality local) (* For commands whose default is not to discharge and not to export: Global forces discharge and export; Local is the default and is neutral *) let use_non_locality () = match use_locality_full () with Some false -> false | _ -> true (* For commands whose default is to not discharge but to export: Global in sections forces discharge, Global not in section is the default; Local in sections is the default, Local not in section forces non-export *) let make_section_locality = function Some b -> b | None -> Lib.sections_are_opened () let use_section_locality () = make_section_locality (use_locality_full ()) let enforce_section_locality local = make_section_locality (enforce_locality_full local) (** Positioning locality for commands supporting export but not discharge *) (* For commands whose default is to export (if not in section): Global in sections is forbidden, Global not in section is neutral; Local in sections is the default, Local not in section forces non-export *) let make_module_locality = function | Some false -> if Lib.sections_are_opened () then error "This command does not support the Global option in sections."; false | Some true -> true | None -> false let use_module_locality () = make_module_locality (use_locality_full ()) let enforce_module_locality local = make_module_locality (enforce_locality_full local) (**********************************************************************) coq-8.4pl2/toplevel/coqtop.mli0000640000175000001440000000160112010532755015467 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val start : unit -> unit coq-8.4pl2/toplevel/class.mli0000640000175000001440000000376312010532755015302 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* locality -> source:cl_typ -> target:cl_typ -> unit (** [try_add_new_coercion ref s] declares [ref], assumed to be of type [(x1:T1)...(xn:Tn)src->tg], as a coercion from [src] to [tg] *) val try_add_new_coercion : global_reference -> locality -> unit (** [try_add_new_coercion_subclass cst s] expects that [cst] denotes a transparent constant which unfolds to some class [tg]; it declares an identity coercion from [cst] to [tg], named something like ["Id_cst_tg"] *) val try_add_new_coercion_subclass : cl_typ -> locality -> unit (** [try_add_new_coercion_with_source ref s src] declares [ref] as a coercion from [src] to [tg] where the target is inferred from the type of [ref] *) val try_add_new_coercion_with_source : global_reference -> locality -> source:cl_typ -> unit (** [try_add_new_identity_coercion id s src tg] enriches the environment with a new definition of name [id] declared as an identity coercion from [src] to [tg] *) val try_add_new_identity_coercion : identifier -> locality -> source:cl_typ -> target:cl_typ -> unit val add_coercion_hook : Tacexpr.declaration_hook val add_subclass_hook : Tacexpr.declaration_hook val class_of_global : global_reference -> cl_typ coq-8.4pl2/toplevel/command.ml0000640000175000001440000006111012010532755015430 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mkLambda (x,t,under_binders (push_rel (x,None,t) env) f (n-1) c) | LetIn (x,b,t,c) -> mkLetIn (x,b,t,under_binders (push_rel (x,Some b,t) env) f (n-1) c) | _ -> assert false let rec complete_conclusion a cs = function | CProdN (loc,bl,c) -> CProdN (loc,bl,complete_conclusion a cs c) | CLetIn (loc,b,t,c) -> CLetIn (loc,b,t,complete_conclusion a cs c) | CHole (loc, k) -> let (has_no_args,name,params) = a in if not has_no_args then user_err_loc (loc,"", strbrk"Cannot infer the non constant arguments of the conclusion of " ++ pr_id cs ++ str "."); let args = List.map (fun id -> CRef(Ident(loc,id))) params in CAppExpl (loc,(None,Ident(loc,name)),List.rev args) | c -> c (* Commands of the interface *) (* 1| Constant definitions *) let red_constant_entry n ce = function | None -> ce | Some red -> let body = ce.const_entry_body in { ce with const_entry_body = under_binders (Global.env()) (fst (reduction_of_red_expr red)) n body } let interp_definition bl red_option c ctypopt = let env = Global.env() in let evdref = ref Evd.empty in let impls, ((env_bl, ctx), imps1) = interp_context_evars evdref env bl in let nb_args = List.length ctx in let imps,ce = match ctypopt with None -> let c, imps2 = interp_constr_evars_impls ~impls ~evdref ~fail_evar:false env_bl c in let body = nf_evar !evdref (it_mkLambda_or_LetIn c ctx) in check_evars env Evd.empty !evdref body; imps1@(Impargs.lift_implicits nb_args imps2), { const_entry_body = body; const_entry_secctx = None; const_entry_type = None; const_entry_opaque = false } | Some ctyp -> let ty, impsty = interp_type_evars_impls ~impls ~evdref ~fail_evar:false env_bl ctyp in let c, imps2 = interp_casted_constr_evars_impls ~impls ~evdref ~fail_evar:false env_bl c ty in let body = nf_evar !evdref (it_mkLambda_or_LetIn c ctx) in let typ = nf_evar !evdref (it_mkProd_or_LetIn ty ctx) in check_evars env Evd.empty !evdref body; check_evars env Evd.empty !evdref typ; (* Check that all implicit arguments inferable from the term is inferable from the type *) if not (try List.for_all (fun (key,va) -> List.assoc key impsty = va) imps2 with Not_found -> false) then warn (str "Implicit arguments declaration relies on type." ++ spc () ++ str "The term declares more implicits than the type here."); imps1@(Impargs.lift_implicits nb_args impsty), { const_entry_body = body; const_entry_secctx = None; const_entry_type = Some typ; const_entry_opaque = false } in red_constant_entry (rel_context_length ctx) ce red_option, imps let declare_global_definition ident ce local k imps = let kn = declare_constant ident (DefinitionEntry ce,IsDefinition k) in let gr = ConstRef kn in maybe_declare_manual_implicits false gr imps; if local = Local && Flags.is_verbose() then msg_warning (pr_id ident ++ str" is declared as a global definition"); definition_message ident; Autoinstance.search_declaration (ConstRef kn); gr let declare_definition_hook = ref ignore let set_declare_definition_hook = (:=) declare_definition_hook let get_declare_definition_hook () = !declare_definition_hook let declare_definition ident (local,k) ce imps hook = !declare_definition_hook ce; let r = match local with | Local when Lib.sections_are_opened () -> let c = SectionLocalDef(ce.const_entry_body ,ce.const_entry_type,false) in let _ = declare_variable ident (Lib.cwd(),c,IsDefinition k) in definition_message ident; if Pfedit.refining () then Flags.if_warn msg_warning (str"Local definition " ++ pr_id ident ++ str" is not visible from current goals"); VarRef ident | (Global|Local) -> declare_global_definition ident ce local k imps in hook local r (* 2| Variable/Hypothesis/Parameter/Axiom declarations *) let declare_assumption is_coe (local,kind) c imps impl nl (_,ident) = let r = match local with | Local when Lib.sections_are_opened () -> let _ = declare_variable ident (Lib.cwd(), SectionLocalAssum (c,impl), IsAssumption kind) in assumption_message ident; if is_verbose () & Pfedit.refining () then msgerrnl (str"Warning: Variable " ++ pr_id ident ++ str" is not visible from current goals"); let r = VarRef ident in Typeclasses.declare_instance None true r; r | (Global|Local) -> let kn = declare_constant ident (ParameterEntry (None,c,nl), IsAssumption kind) in let gr = ConstRef kn in maybe_declare_manual_implicits false gr imps; assumption_message ident; if local=Local & Flags.is_verbose () then msg_warning (pr_id ident ++ str" is declared as a parameter" ++ str" because it is at a global level"); Autoinstance.search_declaration (ConstRef kn); Typeclasses.declare_instance None false gr; gr in if is_coe then Class.try_add_new_coercion r local let declare_assumptions_hook = ref ignore let set_declare_assumptions_hook = (:=) declare_assumptions_hook let interp_assumption bl c = let c = prod_constr_expr c bl in let env = Global.env () in interp_type_evars_impls env c let declare_assumptions idl is_coe k c imps impl_is_on nl = !declare_assumptions_hook c; List.iter (declare_assumption is_coe k c imps impl_is_on nl) idl (* 3a| Elimination schemes for mutual inductive definitions *) (* 3b| Mutual inductive definitions *) let push_named_types env idl tl = List.fold_left2 (fun env id t -> Environ.push_named (id,None,t) env) env idl tl let push_types env idl tl = List.fold_left2 (fun env id t -> Environ.push_rel (Name id,None,t) env) env idl tl type structured_one_inductive_expr = { ind_name : identifier; ind_arity : constr_expr; ind_lc : (identifier * constr_expr) list } type structured_inductive_expr = local_binder list * structured_one_inductive_expr list let minductive_message = function | [] -> error "No inductive definition." | [x] -> (pr_id x ++ str " is defined") | l -> hov 0 (prlist_with_sep pr_comma pr_id l ++ spc () ++ str "are defined") let check_all_names_different indl = let ind_names = List.map (fun ind -> ind.ind_name) indl in let cstr_names = list_map_append (fun ind -> List.map fst ind.ind_lc) indl in let l = list_duplicates ind_names in if l <> [] then raise (InductiveError (SameNamesTypes (List.hd l))); let l = list_duplicates cstr_names in if l <> [] then raise (InductiveError (SameNamesConstructors (List.hd l))); let l = list_intersect ind_names cstr_names in if l <> [] then raise (InductiveError (SameNamesOverlap l)) let mk_mltype_data evdref env assums arity indname = let is_ml_type = is_sort env !evdref arity in (is_ml_type,indname,assums) let prepare_param = function | (na,None,t) -> out_name na, LocalAssum t | (na,Some b,_) -> out_name na, LocalDef b let interp_ind_arity evdref env ind = interp_type_evars_impls ~evdref env ind.ind_arity let interp_cstrs evdref env impls mldata arity ind = let cnames,ctyps = List.split ind.ind_lc in (* Complete conclusions of constructor types if given in ML-style syntax *) let ctyps' = List.map2 (complete_conclusion mldata) cnames ctyps in (* Interpret the constructor types *) let ctyps'', cimpls = List.split (List.map (interp_type_evars_impls ~evdref env ~impls) ctyps') in (cnames, ctyps'', cimpls) let interp_mutual_inductive (paramsl,indl) notations finite = check_all_names_different indl; let env0 = Global.env() in let evdref = ref Evd.empty in let _, ((env_params, ctx_params), userimpls) = interp_context_evars evdref env0 paramsl in let indnames = List.map (fun ind -> ind.ind_name) indl in (* Names of parameters as arguments of the inductive type (defs removed) *) let assums = List.filter(fun (_,b,_) -> b=None) ctx_params in let params = List.map (fun (na,_,_) -> out_name na) assums in (* Interpret the arities *) let arities = List.map (interp_ind_arity evdref env_params) indl in let fullarities = List.map (fun (c, _) -> it_mkProd_or_LetIn c ctx_params) arities in let env_ar = push_types env0 indnames fullarities in let env_ar_params = push_rel_context ctx_params env_ar in (* Compute interpretation metadatas *) let indimpls = List.map (fun (_, impls) -> userimpls @ lift_implicits (rel_context_nhyps ctx_params) impls) arities in let arities = List.map fst arities in let impls = compute_internalization_env env0 (Inductive params) indnames fullarities indimpls in let mldatas = List.map2 (mk_mltype_data evdref env_params params) arities indnames in let constructors = Metasyntax.with_syntax_protection (fun () -> (* Temporary declaration of notations and scopes *) List.iter (Metasyntax.set_notation_for_interpretation impls) notations; (* Interpret the constructor types *) list_map3 (interp_cstrs evdref env_ar_params impls) mldatas arities indl) () in (* Instantiate evars and check all are resolved *) let evd = consider_remaining_unif_problems env_params !evdref in let evd = Typeclasses.resolve_typeclasses ~filter:(Typeclasses.no_goals) ~fail:true env_params evd in let sigma = evd in let constructors = List.map (fun (idl,cl,impsl) -> (idl,List.map (nf_evar sigma) cl,impsl)) constructors in let ctx_params = Sign.map_rel_context (nf_evar sigma) ctx_params in let arities = List.map (nf_evar sigma) arities in List.iter (check_evars env_params Evd.empty evd) arities; Sign.iter_rel_context (check_evars env0 Evd.empty evd) ctx_params; List.iter (fun (_,ctyps,_) -> List.iter (check_evars env_ar_params Evd.empty evd) ctyps) constructors; (* Build the inductive entries *) let entries = list_map3 (fun ind arity (cnames,ctypes,cimpls) -> { mind_entry_typename = ind.ind_name; mind_entry_arity = arity; mind_entry_consnames = cnames; mind_entry_lc = ctypes }) indl arities constructors in let impls = let len = rel_context_nhyps ctx_params in List.map2 (fun indimpls (_,_,cimpls) -> indimpls, List.map (fun impls -> userimpls @ (lift_implicits len impls)) cimpls) indimpls constructors in (* Build the mutual inductive entry *) { mind_entry_params = List.map prepare_param ctx_params; mind_entry_record = false; mind_entry_finite = finite; mind_entry_inds = entries }, impls (* Very syntactical equality *) let eq_local_binder d1 d2 = match d1,d2 with | LocalRawAssum (nal1,k1,c1), LocalRawAssum (nal2,k2,c2) -> List.length nal1 = List.length nal2 && k1 = k2 && List.for_all2 (fun (_,na1) (_,na2) -> na1 = na2) nal1 nal2 && Constrextern.is_same_type c1 c2 | LocalRawDef ((_,id1),c1), LocalRawDef ((_,id2),c2) -> id1 = id2 && Constrextern.is_same_type c1 c2 | _ -> false let eq_local_binders bl1 bl2 = List.length bl1 = List.length bl2 && List.for_all2 eq_local_binder bl1 bl2 let extract_coercions indl = let mkqid (_,((_,id),_)) = qualid_of_ident id in let extract lc = List.filter (fun (iscoe,_) -> iscoe) lc in List.map mkqid (List.flatten(List.map (fun (_,_,_,lc) -> extract lc) indl)) let extract_params indl = let paramsl = List.map (fun (_,params,_,_) -> params) indl in match paramsl with | [] -> anomaly "empty list of inductive types" | params::paramsl -> if not (List.for_all (eq_local_binders params) paramsl) then error "Parameters should be syntactically the same for each inductive type."; params let extract_inductive indl = List.map (fun ((_,indname),_,ar,lc) -> { ind_name = indname; ind_arity = Option.cata (fun x -> x) (CSort (dummy_loc, Glob_term.GType None)) ar; ind_lc = List.map (fun (_,((_,id),t)) -> (id,t)) lc }) indl let extract_mutual_inductive_declaration_components indl = let indl,ntnl = List.split indl in let params = extract_params indl in let coes = extract_coercions indl in let indl = extract_inductive indl in (params,indl), coes, List.flatten ntnl let declare_mutual_inductive_with_eliminations isrecord mie impls = let names = List.map (fun e -> e.mind_entry_typename) mie.mind_entry_inds in let (_,kn) = declare_mind isrecord mie in let mind = Global.mind_of_delta_kn kn in list_iter_i (fun i (indimpls, constrimpls) -> let ind = (mind,i) in Autoinstance.search_declaration (IndRef ind); maybe_declare_manual_implicits false (IndRef ind) indimpls; list_iter_i (fun j impls -> (* Autoinstance.search_declaration (ConstructRef (ind,j));*) maybe_declare_manual_implicits false (ConstructRef (ind, succ j)) impls) constrimpls) impls; if_verbose ppnl (minductive_message names); declare_default_schemes mind; mind open Vernacexpr type one_inductive_impls = Impargs.manual_explicitation list (* for inds *)* Impargs.manual_explicitation list list (* for constrs *) type one_inductive_expr = lident * local_binder list * constr_expr option * constructor_expr list let do_mutual_inductive indl finite = let indl,coes,ntns = extract_mutual_inductive_declaration_components indl in (* Interpret the types *) let mie,impls = interp_mutual_inductive indl ntns finite in (* Declare the mutual inductive block with its associated schemes *) ignore (declare_mutual_inductive_with_eliminations UserVerbose mie impls); (* Declare the possible notations of inductive types *) List.iter Metasyntax.add_notation_interpretation ntns; (* Declare the coercions *) List.iter (fun qid -> Class.try_add_new_coercion (locate qid) Global) coes (* 3c| Fixpoints and co-fixpoints *) (* An (unoptimized) function that maps preorders to partial orders... Input: a list of associations (x,[y1;...;yn]), all yi distincts and different of x, meaning x<=y1, ..., x<=yn Output: a list of associations (x,Inr [y1;...;yn]), collecting all distincts yi greater than x, _or_, (x, Inl y) meaning that x is in the same class as y (in which case, x occurs nowhere else in the association map) partial_order : ('a * 'a list) list -> ('a * ('a,'a list) union) list *) let rec partial_order = function | [] -> [] | (x,xge)::rest -> let rec browse res xge' = function | [] -> let res = List.map (function | (z, Inr zge) when List.mem x zge -> (z, Inr (list_union zge xge')) | r -> r) res in (x,Inr xge')::res | y::xge -> let rec link y = try match List.assoc y res with | Inl z -> link z | Inr yge -> if List.mem x yge then let res = List.remove_assoc y res in let res = List.map (function | (z, Inl t) -> if t = y then (z, Inl x) else (z, Inl t) | (z, Inr zge) -> if List.mem y zge then (z, Inr (list_add_set x (list_remove y zge))) else (z, Inr zge)) res in browse ((y,Inl x)::res) xge' (list_union xge (list_remove x yge)) else browse res (list_add_set y (list_union xge' yge)) xge with Not_found -> browse res (list_add_set y xge') xge in link y in browse (partial_order rest) [] xge let non_full_mutual_message x xge y yge isfix rest = let reason = if List.mem x yge then string_of_id y^" depends on "^string_of_id x^" but not conversely" else if List.mem y xge then string_of_id x^" depends on "^string_of_id y^" but not conversely" else string_of_id y^" and "^string_of_id x^" are not mutually dependent" in let e = if rest <> [] then "e.g.: "^reason else reason in let k = if isfix then "fixpoint" else "cofixpoint" in let w = if isfix then strbrk "Well-foundedness check may fail unexpectedly." ++ fnl() else mt () in strbrk ("Not a fully mutually defined "^k) ++ fnl () ++ strbrk ("("^e^").") ++ fnl () ++ w let check_mutuality env isfix fixl = let names = List.map fst fixl in let preorder = List.map (fun (id,def) -> (id, List.filter (fun id' -> id<>id' & occur_var env id' def) names)) fixl in let po = partial_order preorder in match List.filter (function (_,Inr _) -> true | _ -> false) po with | (x,Inr xge)::(y,Inr yge)::rest -> if_warn msg_warning (non_full_mutual_message x xge y yge isfix rest) | _ -> () type structured_fixpoint_expr = { fix_name : identifier; fix_annot : identifier located option; fix_binders : local_binder list; fix_body : constr_expr option; fix_type : constr_expr } let interp_fix_context evdref env isfix fix = let before, after = if isfix then split_at_annot fix.fix_binders fix.fix_annot else [], fix.fix_binders in let impl_env, ((env', ctx), imps) = interp_context_evars evdref env before in let impl_env', ((env'', ctx'), imps') = interp_context_evars ~impl_env evdref env' after in let annot = Option.map (fun _ -> List.length (assums_of_rel_context ctx)) fix.fix_annot in ((env'', ctx' @ ctx), (impl_env',imps @ imps'), annot) let interp_fix_ccl evdref impls (env,_) fix = interp_type_evars_impls ~impls ~evdref ~fail_evar:false env fix.fix_type let interp_fix_body evdref env_rec impls (_,ctx) fix ccl = Option.map (fun body -> let env = push_rel_context ctx env_rec in let body = interp_casted_constr_evars evdref env ~impls body ccl in it_mkLambda_or_LetIn body ctx) fix.fix_body let build_fix_type (_,ctx) ccl = it_mkProd_or_LetIn ccl ctx let declare_fix kind f def t imps = let ce = { const_entry_body = def; const_entry_secctx = None; const_entry_type = Some t; const_entry_opaque = false } in let kn = declare_constant f (DefinitionEntry ce,IsDefinition kind) in let gr = ConstRef kn in Autoinstance.search_declaration (ConstRef kn); maybe_declare_manual_implicits false gr imps; gr let prepare_recursive_declaration fixnames fixtypes fixdefs = let defs = List.map (subst_vars (List.rev fixnames)) fixdefs in let names = List.map (fun id -> Name id) fixnames in (Array.of_list names, Array.of_list fixtypes, Array.of_list defs) (* Jump over let-bindings. *) let compute_possible_guardness_evidences (ids,_,na) = match na with | Some i -> [i] | None -> (* If recursive argument was not given by user, we try all args. An earlier approach was to look only for inductive arguments, but doing it properly involves delta-reduction, and it finally doesn't seem to worth the effort (except for huge mutual fixpoints ?) *) interval 0 (List.length ids - 1) type recursive_preentry = identifier list * constr option list * types list let interp_recursive isfix fixl notations = let env = Global.env() in let fixnames = List.map (fun fix -> fix.fix_name) fixl in (* Interp arities allowing for unresolved types *) let evdref = ref Evd.empty in let fixctxs, fiximppairs, fixannots = list_split3 (List.map (interp_fix_context evdref env isfix) fixl) in let fixctximpenvs, fixctximps = List.split fiximppairs in let fixccls,fixcclimps = List.split (list_map3 (interp_fix_ccl evdref) fixctximpenvs fixctxs fixl) in let fixtypes = List.map2 build_fix_type fixctxs fixccls in let fixtypes = List.map (nf_evar !evdref) fixtypes in let fiximps = list_map3 (fun ctximps cclimps (_,ctx) -> ctximps@(Impargs.lift_implicits (List.length ctx) cclimps)) fixctximps fixcclimps fixctxs in let env_rec = push_named_types env fixnames fixtypes in (* Get interpretation metadatas *) let impls = compute_internalization_env env Recursive fixnames fixtypes fiximps in (* Interp bodies with rollback because temp use of notations/implicit *) let fixdefs = Metasyntax.with_syntax_protection (fun () -> List.iter (Metasyntax.set_notation_for_interpretation impls) notations; list_map4 (fun fixctximpenv -> interp_fix_body evdref env_rec (Idmap.fold Idmap.add fixctximpenv impls)) fixctximpenvs fixctxs fixl fixccls) () in (* Instantiate evars and check all are resolved *) let evd = consider_remaining_unif_problems env_rec !evdref in let fixdefs = List.map (Option.map (nf_evar evd)) fixdefs in let fixtypes = List.map (nf_evar evd) fixtypes in let fixctxnames = List.map (fun (_,ctx) -> List.map pi1 ctx) fixctxs in let evd = Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals ~fail:true env evd in List.iter (Option.iter (check_evars env_rec Evd.empty evd)) fixdefs; List.iter (check_evars env Evd.empty evd) fixtypes; if not (List.mem None fixdefs) then begin let fixdefs = List.map Option.get fixdefs in check_mutuality env isfix (List.combine fixnames fixdefs) end; (* Build the fix declaration block *) (fixnames,fixdefs,fixtypes), list_combine3 fixctxnames fiximps fixannots let interp_fixpoint = interp_recursive true let interp_cofixpoint = interp_recursive false let declare_fixpoint ((fixnames,fixdefs,fixtypes),fiximps) indexes ntns = if List.mem None fixdefs then (* Some bodies to define by proof *) let thms = list_map3 (fun id t (len,imps,_) -> (id,(t,(len,imps)))) fixnames fixtypes fiximps in let init_tac = Some (List.map (Option.cata Tacmach.refine_no_check Tacticals.tclIDTAC) fixdefs) in Lemmas.start_proof_with_initialization (Global,DefinitionBody Fixpoint) (Some(false,indexes,init_tac)) thms None (fun _ _ -> ()) else begin (* We shortcut the proof process *) let fixdefs = List.map Option.get fixdefs in let fixdecls = prepare_recursive_declaration fixnames fixtypes fixdefs in let indexes = search_guard dummy_loc (Global.env()) indexes fixdecls in let fiximps = List.map (fun (n,r,p) -> r) fiximps in let fixdecls = list_map_i (fun i _ -> mkFix ((indexes,i),fixdecls)) 0 fixnames in ignore (list_map4 (declare_fix Fixpoint) fixnames fixdecls fixtypes fiximps); (* Declare the recursive definitions *) fixpoint_message (Some indexes) fixnames; end; (* Declare notations *) List.iter Metasyntax.add_notation_interpretation ntns let declare_cofixpoint ((fixnames,fixdefs,fixtypes),fiximps) ntns = if List.mem None fixdefs then (* Some bodies to define by proof *) let thms = list_map3 (fun id t (len,imps,_) -> (id,(t,(len,imps)))) fixnames fixtypes fiximps in let init_tac = Some (List.map (Option.cata Tacmach.refine_no_check Tacticals.tclIDTAC) fixdefs) in Lemmas.start_proof_with_initialization (Global,DefinitionBody CoFixpoint) (Some(true,[],init_tac)) thms None (fun _ _ -> ()) else begin (* We shortcut the proof process *) let fixdefs = List.map Option.get fixdefs in let fixdecls = prepare_recursive_declaration fixnames fixtypes fixdefs in let fixdecls = list_map_i (fun i _ -> mkCoFix (i,fixdecls)) 0 fixnames in let fiximps = List.map (fun (len,imps,idx) -> imps) fiximps in ignore (list_map4 (declare_fix CoFixpoint) fixnames fixdecls fixtypes fiximps); (* Declare the recursive definitions *) cofixpoint_message fixnames end; (* Declare notations *) List.iter Metasyntax.add_notation_interpretation ntns let extract_decreasing_argument limit = function | (na,CStructRec) -> na | (na,_) when not limit -> na | _ -> error "Only structural decreasing is supported for a non-Program Fixpoint" let extract_fixpoint_components limit l = let fixl, ntnl = List.split l in let fixl = List.map (fun ((_,id),ann,bl,typ,def) -> let ann = extract_decreasing_argument limit ann in {fix_name = id; fix_annot = ann; fix_binders = bl; fix_body = def; fix_type = typ}) fixl in fixl, List.flatten ntnl let extract_cofixpoint_components l = let fixl, ntnl = List.split l in List.map (fun ((_,id),bl,typ,def) -> {fix_name = id; fix_annot = None; fix_binders = bl; fix_body = def; fix_type = typ}) fixl, List.flatten ntnl let do_fixpoint l = let fixl,ntns = extract_fixpoint_components true l in let fix = interp_fixpoint fixl ntns in let possible_indexes = List.map compute_possible_guardness_evidences (snd fix) in declare_fixpoint fix possible_indexes ntns let do_cofixpoint l = let fixl,ntns = extract_cofixpoint_components l in declare_cofixpoint (interp_cofixpoint fixl ntns) ntns coq-8.4pl2/toplevel/record.ml0000640000175000001440000004216712010532755015303 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let impl, t' = interp_evars evars env impls Pretyping.IsType t in let b' = Option.map (fun x -> snd (interp_evars evars env impls (Pretyping.OfType (Some t')) x)) b in let impls = match i with | Anonymous -> impls | Name id -> Idmap.add id (compute_internalization_data env Constrintern.Method t' impl) impls in let d = (i,b',t') in List.iter (Metasyntax.set_notation_for_interpretation impls) no; (push_rel d env, impl :: uimpls, d::params, impls)) (env, [], [], impls_env) nots l let binder_of_decl = function | Vernacexpr.AssumExpr(n,t) -> (n,None,t) | Vernacexpr.DefExpr(n,c,t) -> (n,Some c, match t with Some c -> c | None -> CHole (fst n, None)) let binders_of_decls = List.map binder_of_decl let typecheck_params_and_fields id t ps nots fs = let env0 = Global.env () in let evars = ref Evd.empty in let _ = let error bk (loc, name) = match bk with | Default _ -> if name = Anonymous then user_err_loc (loc, "record", str "Record parameters must be named") | _ -> () in List.iter (function LocalRawDef (b, _) -> error default_binder_kind b | LocalRawAssum (ls, bk, ce) -> List.iter (error bk) ls) ps in let impls_env, ((env1,newps), imps) = interp_context_evars evars env0 ps in let fullarity = it_mkProd_or_LetIn (Option.cata (fun x -> x) (Termops.new_Type ()) t) newps in let env_ar = push_rel_context newps (push_rel (Name id,None,fullarity) env0) in let env2,impls,newfs,data = interp_fields_evars evars env_ar impls_env nots (binders_of_decls fs) in let evars = Evarconv.consider_remaining_unif_problems env_ar !evars in let evars = Typeclasses.resolve_typeclasses env_ar evars in let sigma = evars in let newps = Evarutil.nf_rel_context_evar sigma newps in let newfs = Evarutil.nf_rel_context_evar sigma newfs in let ce t = Evarutil.check_evars env0 Evd.empty evars t in List.iter (fun (n, b, t) -> Option.iter ce b; ce t) (List.rev newps); List.iter (fun (n, b, t) -> Option.iter ce b; ce t) (List.rev newfs); imps, newps, impls, newfs let degenerate_decl (na,b,t) = let id = match na with | Name id -> id | Anonymous -> anomaly "Unnamed record variable" in match b with | None -> (id, Entries.LocalAssum t) | Some b -> (id, Entries.LocalDef b) type record_error = | MissingProj of identifier * identifier list | BadTypedProj of identifier * env * Type_errors.type_error let warning_or_error coe indsp err = let st = match err with | MissingProj (fi,projs) -> let s,have = if List.length projs > 1 then "s","were" else "","was" in (str(string_of_id fi) ++ strbrk" cannot be defined because the projection" ++ str s ++ spc () ++ prlist_with_sep pr_comma pr_id projs ++ spc () ++ str have ++ strbrk " not defined.") | BadTypedProj (fi,ctx,te) -> match te with | ElimArity (_,_,_,_,Some (_,_,NonInformativeToInformative)) -> (pr_id fi ++ strbrk" cannot be defined because it is informative and " ++ Printer.pr_inductive (Global.env()) indsp ++ strbrk " is not.") | ElimArity (_,_,_,_,Some (_,_,StrongEliminationOnNonSmallType)) -> (pr_id fi ++ strbrk" cannot be defined because it is large and " ++ Printer.pr_inductive (Global.env()) indsp ++ strbrk " is not.") | _ -> (pr_id fi ++ strbrk " cannot be defined because it is not typable.") in if coe then errorlabstrm "structure" st; Flags.if_verbose ppnl (hov 0 (str"Warning: " ++ st)) type field_status = | NoProjection of name | Projection of constr exception NotDefinable of record_error (* This replaces previous projection bodies in current projection *) (* Undefined projs are collected and, at least one undefined proj occurs *) (* in the body of current projection then the latter can not be defined *) (* [c] is defined in ctxt [[params;fields]] and [l] is an instance of *) (* [[fields]] defined in ctxt [[params;x:ind]] *) let subst_projection fid l c = let lv = List.length l in let bad_projs = ref [] in let rec substrec depth c = match kind_of_term c with | Rel k -> (* We are in context [[params;fields;x:ind;...depth...]] *) if k <= depth+1 then c else if k-depth-1 <= lv then match List.nth l (k-depth-2) with | Projection t -> lift depth t | NoProjection (Name id) -> bad_projs := id :: !bad_projs; mkRel k | NoProjection Anonymous -> errorlabstrm "" (str "Field " ++ pr_id fid ++ str " depends on the " ++ str (ordinal (k-depth-1)) ++ str " field which has no name.") else mkRel (k-lv) | _ -> map_constr_with_binders succ substrec depth c in let c' = lift 1 c in (* to get [c] defined in ctxt [[params;fields;x:ind]] *) let c'' = substrec 0 c' in if !bad_projs <> [] then raise (NotDefinable (MissingProj (fid,List.rev !bad_projs))); c'' let instantiate_possibly_recursive_type indsp paramdecls fields = let subst = list_map_i (fun i _ -> mkRel i) 1 paramdecls in Termops.substl_rel_context (subst@[mkInd indsp]) fields (* We build projections *) let declare_projections indsp ?(kind=StructureComponent) ?name coers fieldimpls fields = let env = Global.env() in let (mib,mip) = Global.lookup_inductive indsp in let paramdecls = mib.mind_params_ctxt in let r = mkInd indsp in let rp = applist (r, Termops.extended_rel_list 0 paramdecls) in let paramargs = Termops.extended_rel_list 1 paramdecls in (*def in [[params;x:rp]]*) let x = match name with Some n -> Name n | None -> Namegen.named_hd (Global.env()) r Anonymous in let fields = instantiate_possibly_recursive_type indsp paramdecls fields in let lifted_fields = Termops.lift_rel_context 1 fields in let (_,kinds,sp_projs,_) = list_fold_left3 (fun (nfi,kinds,sp_projs,subst) coe (fi,optci,ti) impls -> let (sp_projs,subst) = match fi with | Anonymous -> (None::sp_projs,NoProjection fi::subst) | Name fid -> try let ccl = subst_projection fid subst ti in let body = match optci with | Some ci -> subst_projection fid subst ci | None -> (* [ccl] is defined in context [params;x:rp] *) (* [ccl'] is defined in context [params;x:rp;x:rp] *) let ccl' = liftn 1 2 ccl in let p = mkLambda (x, lift 1 rp, ccl') in let branch = it_mkLambda_or_LetIn (mkRel nfi) lifted_fields in let ci = Inductiveops.make_case_info env indsp LetStyle in mkCase (ci, p, mkRel 1, [|branch|]) in let proj = it_mkLambda_or_LetIn (mkLambda (x,rp,body)) paramdecls in let projtyp = it_mkProd_or_LetIn (mkProd (x,rp,ccl)) paramdecls in let kn = try let cie = { const_entry_body = proj; const_entry_secctx = None; const_entry_type = Some projtyp; const_entry_opaque = false } in let k = (DefinitionEntry cie,IsDefinition kind) in let kn = declare_constant ~internal:KernelSilent fid k in Flags.if_verbose message (string_of_id fid ^" is defined"); kn with Type_errors.TypeError (ctx,te) -> raise (NotDefinable (BadTypedProj (fid,ctx,te))) in let refi = ConstRef kn in let constr_fi = mkConst kn in Impargs.maybe_declare_manual_implicits false refi impls; if coe then begin let cl = Class.class_of_global (IndRef indsp) in Class.try_add_new_coercion_with_source refi Global ~source:cl end; let proj_args = (*Rel 1 refers to "x"*) paramargs@[mkRel 1] in let constr_fip = applist (constr_fi,proj_args) in (Some kn::sp_projs, Projection constr_fip::subst) with NotDefinable why -> warning_or_error coe indsp why; (None::sp_projs,NoProjection fi::subst) in (nfi-1,(fi, optci=None)::kinds,sp_projs,subst)) (List.length fields,[],[],[]) coers (List.rev fields) (List.rev fieldimpls) in (kinds,sp_projs) let structure_signature ctx = let rec deps_to_evar evm l = match l with [] -> Evd.empty | [(_,_,typ)] -> Evd.add evm (Evarutil.new_untyped_evar()) (Evd.make_evar Environ.empty_named_context_val typ) | (_,_,typ)::tl -> let ev = Evarutil.new_untyped_evar() in let evm = Evd.add evm ev (Evd.make_evar Environ.empty_named_context_val typ) in let new_tl = Util.list_map_i (fun pos (n,c,t) -> n,c, Termops.replace_term (mkRel pos) (mkEvar(ev,[||])) t) 1 tl in deps_to_evar evm new_tl in deps_to_evar Evd.empty (List.rev ctx) open Typeclasses let declare_structure finite infer id idbuild paramimpls params arity fieldimpls fields ?(kind=StructureComponent) ?name is_coe coers sign = let nparams = List.length params and nfields = List.length fields in let args = Termops.extended_rel_list nfields params in let ind = applist (mkRel (1+nparams+nfields), args) in let type_constructor = it_mkProd_or_LetIn ind fields in let mie_ind = { mind_entry_typename = id; mind_entry_arity = arity; mind_entry_consnames = [idbuild]; mind_entry_lc = [type_constructor] } in (* spiwack: raises an error if the structure is supposed to be non-recursive, but isn't *) (* there is probably a way to push this to "declare_mutual" *) begin match finite with | BiFinite -> if Termops.dependent (mkRel (nparams+1)) (it_mkProd_or_LetIn mkProp fields) then error "Records declared with the keyword Record or Structure cannot be recursive. You can, however, define recursive records using the Inductive or CoInductive command." | _ -> () end; let mie = { mind_entry_params = List.map degenerate_decl params; mind_entry_record = true; mind_entry_finite = recursivity_flag_of_kind finite; mind_entry_inds = [mie_ind] } in let kn = Command.declare_mutual_inductive_with_eliminations KernelVerbose mie [(paramimpls,[])] in let rsp = (kn,0) in (* This is ind path of idstruc *) let cstr = (rsp,1) in let kinds,sp_projs = declare_projections rsp ~kind ?name coers fieldimpls fields in let build = ConstructRef cstr in if is_coe then Class.try_add_new_coercion build Global; Recordops.declare_structure(rsp,cstr,List.rev kinds,List.rev sp_projs); if infer then Evd.fold (fun ev evi () -> Recordops.declare_method (ConstructRef cstr) ev sign) sign (); rsp let implicits_of_context ctx = list_map_i (fun i name -> let explname = match name with | Name n -> Some n | Anonymous -> None in ExplByPos (i, explname), (true, true, true)) 1 (List.rev (Anonymous :: (List.map pi1 ctx))) let declare_instance_cst glob con pri = let instance = Typeops.type_of_constant (Global.env ()) con in let _, r = decompose_prod_assum instance in match class_of_constr r with | Some (_, (tc, _)) -> add_instance (new_instance tc pri glob (ConstRef con)) | None -> errorlabstrm "" (Pp.strbrk "Constant does not build instances of a declared type class.") let declare_class finite def infer id idbuild paramimpls params arity fieldimpls fields ?(kind=StructureComponent) ?name is_coe coers priorities sign = let fieldimpls = (* Make the class and all params implicits in the projections *) let ctx_impls = implicits_of_context params in let len = succ (List.length params) in List.map (fun x -> ctx_impls @ Impargs.lift_implicits len x) fieldimpls in let impl, projs = match fields with | [(Name proj_name, _, field)] when def -> let class_body = it_mkLambda_or_LetIn field params in let class_type = Option.map (fun ar -> it_mkProd_or_LetIn ar params) arity in let class_entry = { const_entry_body = class_body; const_entry_secctx = None; const_entry_type = class_type; const_entry_opaque = false } in let cst = Declare.declare_constant (snd id) (DefinitionEntry class_entry, IsDefinition Definition) in let inst_type = appvectc (mkConst cst) (Termops.rel_vect 0 (List.length params)) in let proj_type = it_mkProd_or_LetIn (mkProd(Name (snd id), inst_type, lift 1 field)) params in let proj_body = it_mkLambda_or_LetIn (mkLambda (Name (snd id), inst_type, mkRel 1)) params in let proj_entry = { const_entry_body = proj_body; const_entry_secctx = None; const_entry_type = Some proj_type; const_entry_opaque = false } in let proj_cst = Declare.declare_constant proj_name (DefinitionEntry proj_entry, IsDefinition Definition) in let cref = ConstRef cst in Impargs.declare_manual_implicits false cref [paramimpls]; Impargs.declare_manual_implicits false (ConstRef proj_cst) [List.hd fieldimpls]; Classes.set_typeclass_transparency (EvalConstRef cst) false false; if infer then Evd.fold (fun ev evi _ -> Recordops.declare_method (ConstRef cst) ev sign) sign (); let sub = match List.hd coers with Some b -> Some ((if b then Backward else Forward), List.hd priorities) | None -> None in cref, [Name proj_name, sub, Some proj_cst] | _ -> let idarg = Namegen.next_ident_away (snd id) (Termops.ids_of_context (Global.env())) in let ind = declare_structure BiFinite infer (snd id) idbuild paramimpls params (Option.default (Termops.new_Type ()) arity) fieldimpls fields ~kind:Method ~name:idarg false (List.map (fun _ -> false) fields) sign in let coers = List.map2 (fun coe pri -> Option.map (fun b -> if b then Backward, pri else Forward, pri) coe) coers priorities in IndRef ind, (list_map3 (fun (id, _, _) b y -> (id, b, y)) (List.rev fields) coers (Recordops.lookup_projections ind)) in let ctx_context = List.map (fun (na, b, t) -> match Typeclasses.class_of_constr t with | Some (_, (cl, _)) -> Some (cl.cl_impl, true) (*List.exists (fun (_, n) -> n = na) supnames)*) | None -> None) params, params in let k = { cl_impl = impl; cl_context = ctx_context; cl_props = fields; cl_projs = projs } in (* list_iter3 (fun p sub pri -> *) (* if sub then match p with (_, _, Some p) -> declare_instance_cst true p pri | _ -> ()) *) (* k.cl_projs coers priorities; *) add_class k; impl let interp_and_check_sort sort = Option.map (fun sort -> let env = Global.env() and sigma = Evd.empty in let s = interp_constr sigma env sort in if isSort (Reductionops.whd_betadeltaiota env sigma s) then s else user_err_loc (constr_loc sort,"", str"Sort expected.")) sort open Vernacexpr open Autoinstance (* [fs] corresponds to fields and [ps] to parameters; [coers] is a list telling if the corresponding fields must me declared as coercions or subinstances *) let definition_structure (kind,finite,infer,(is_coe,(loc,idstruc)),ps,cfs,idbuild,s) = let cfs,notations = List.split cfs in let cfs,priorities = List.split cfs in let coers,fs = List.split cfs in let extract_name acc = function Vernacexpr.AssumExpr((_,Name id),_) -> id::acc | Vernacexpr.DefExpr ((_,Name id),_,_) -> id::acc | _ -> acc in let allnames = idstruc::(List.fold_left extract_name [] fs) in if not (list_distinct allnames) then error "Two objects have the same name"; if not (kind = Class false) && List.exists ((<>) None) priorities then error "Priorities only allowed for type class substructures"; (* Now, younger decl in params and fields is on top *) let sc = interp_and_check_sort s in let implpars, params, implfs, fields = States.with_state_protection (fun () -> typecheck_params_and_fields idstruc sc ps notations fs) () in let sign = structure_signature (fields@params) in match kind with | Class def -> let gr = declare_class finite def infer (loc,idstruc) idbuild implpars params sc implfs fields is_coe coers priorities sign in if infer then search_record declare_class_instance gr sign; gr | _ -> let arity = Option.default (Termops.new_Type ()) sc in let implfs = List.map (fun impls -> implpars @ Impargs.lift_implicits (succ (List.length params)) impls) implfs in let ind = declare_structure finite infer idstruc idbuild implpars params arity implfs fields is_coe (List.map (fun coe -> coe <> None) coers) sign in if infer then search_record declare_record_instance (ConstructRef (ind,1)) sign; IndRef ind coq-8.4pl2/toplevel/vernac.ml0000640000175000001440000003055512121620060015267 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !atomic_load); Goptions.optwrite = ((:=) atomic_load) } (* Specifies which file is read. The intermediate file names are discarded here. The Drop exception becomes an error. We forget if the error ocurred during interpretation or not *) let raise_with_file file exc = let (cmdloc,re) = match exc with | DuringCommandInterp(loc,e) -> (loc,e) | e -> (dummy_loc,e) in let (inner,inex) = match re with | Error_in_file (_, (b,f,loc), e) when loc <> dummy_loc -> ((b, f, loc), e) | Loc.Exc_located (loc, e) when loc <> dummy_loc -> ((false,file, loc), e) | Loc.Exc_located (_, e) | e -> ((false,file,cmdloc), e) in raise (Error_in_file (file, inner, disable_drop inex)) let real_error = function | Loc.Exc_located (_, e) -> e | Error_in_file (_, _, e) -> e | e -> e let user_error loc s = Util.user_err_loc (loc,"_",str s) (** Timeout handling *) (** A global default timeout, controled by option "Set Default Timeout n". Use "Unset Default Timeout" to deactivate it (or set it to 0). *) let default_timeout = ref None let _ = Goptions.declare_int_option { Goptions.optsync = true; Goptions.optdepr = false; Goptions.optname = "the default timeout"; Goptions.optkey = ["Default";"Timeout"]; Goptions.optread = (fun () -> !default_timeout); Goptions.optwrite = ((:=) default_timeout) } (** When interpreting a command, the current timeout is initially the default one, but may be modified locally by a Timeout command. *) let current_timeout = ref None (** Installing and de-installing a timer. Note: according to ocaml documentation, Unix.alarm isn't available for native win32. *) let timeout_handler _ = raise Timeout let set_timeout n = let psh = Sys.signal Sys.sigalrm (Sys.Signal_handle timeout_handler) in ignore (Unix.alarm n); Some psh let default_set_timeout () = match !current_timeout with | Some n -> set_timeout n | None -> None let restore_timeout = function | None -> () | Some psh -> (* stop alarm *) ignore(Unix.alarm 0); (* restore handler *) Sys.set_signal Sys.sigalrm psh (* Open an utf-8 encoded file and skip the byte-order mark if any *) let open_utf8_file_in fname = let is_bom s = Char.code s.[0] = 0xEF && Char.code s.[1] = 0xBB && Char.code s.[2] = 0xBF in let in_chan = open_in fname in let s = " " in if input in_chan s 0 3 < 3 || not (is_bom s) then seek_in in_chan 0; in_chan (* Opening and closing a channel. Open it twice when verbose: the first channel is used to read the commands, and the second one to print them. Note: we could use only one thanks to seek_in, but seeking on and on in the file we parse seems a bit risky to me. B.B. *) let open_file_twice_if verbosely fname = let paths = Library.get_load_paths () in let _,longfname = find_file_in_path ~warn:(Flags.is_verbose()) paths fname in let in_chan = open_utf8_file_in longfname in let verb_ch = if verbosely then Some (open_utf8_file_in longfname) else None in let po = Pcoq.Gram.parsable (Stream.of_channel in_chan) in (in_chan, longfname, (po, verb_ch)) let close_input in_chan (_,verb) = try close_in in_chan; match verb with | Some verb_ch -> close_in verb_ch | _ -> () with e when Errors.noncritical e -> () let verbose_phrase verbch loc = let loc = unloc loc in match verbch with | Some ch -> let len = snd loc - fst loc in let s = String.create len in seek_in ch (fst loc); really_input ch s 0 len; message s; pp_flush() | _ -> () exception End_of_input let parse_sentence (po, verbch) = match Pcoq.Gram.entry_parse Pcoq.main_entry po with | Some (loc,_ as com) -> verbose_phrase verbch loc; com | None -> raise End_of_input (* vernac parses the given stream, executes interpfun on the syntax tree it * parses, and is verbose on "primitives" commands if verbosely is true *) let just_parsing = ref false let chan_beautify = ref stdout let beautify_suffix = ".beautified" let set_formatter_translator() = let ch = !chan_beautify in let out s b e = output ch s b e in Format.set_formatter_output_functions out (fun () -> flush ch); Format.set_max_boxes max_int let pr_new_syntax loc ocom = let loc = unloc loc in if !beautify_file then set_formatter_translator(); let fs = States.freeze () in let com = match ocom with | Some VernacNop -> mt() | Some com -> pr_vernac com | None -> mt() in if !beautify_file then msg (hov 0 (comment (fst loc) ++ com ++ comment (snd loc))) else msgnl (hov 4 (str"New Syntax:" ++ fnl() ++ (hov 0 com))); States.unfreeze fs; Format.set_formatter_out_channel stdout let rec vernac_com interpfun checknav (loc,com) = let rec interp = function | VernacLoad (verbosely, fname) -> let fname = expand_path_macros fname in (* translator state *) let ch = !chan_beautify in let cs = Lexer.com_state() in let cl = !Pp.comments in (* end translator state *) (* coqdoc state *) let cds = Dumpglob.coqdoc_freeze() in if !Flags.beautify_file then begin let _,f = find_file_in_path ~warn:(Flags.is_verbose()) (Library.get_load_paths ()) (make_suffix fname ".v") in chan_beautify := open_out (f^beautify_suffix); Pp.comments := [] end; begin try read_vernac_file verbosely (make_suffix fname ".v"); if !Flags.beautify_file then close_out !chan_beautify; chan_beautify := ch; Lexer.restore_com_state cs; Pp.comments := cl; Dumpglob.coqdoc_unfreeze cds with reraise -> if !Flags.beautify_file then close_out !chan_beautify; chan_beautify := ch; Lexer.restore_com_state cs; Pp.comments := cl; Dumpglob.coqdoc_unfreeze cds; raise reraise end | VernacList l -> List.iter (fun (_,v) -> interp v) l | v when !just_parsing -> () | VernacFail v -> begin try (* If the command actually works, ignore its effects on the state *) States.with_state_protection (fun v -> interp v; raise HasNotFailed) v with e when Errors.noncritical e -> match real_error e with | HasNotFailed -> errorlabstrm "Fail" (str "The command has not failed !") | e -> (* Anomalies are re-raised by the next line *) let msg = Errors.print_no_anomaly e in if_verbose msgnl (str "The command has indeed failed with message:" ++ fnl () ++ str "=> " ++ hov 0 msg) end | VernacTime v -> let tstart = System.get_time() in interp v; let tend = System.get_time() in msgnl (str"Finished transaction in " ++ System.fmt_time_difference tstart tend) | VernacTimeout(n,v) -> current_timeout := Some n; interp v | v -> let psh = default_set_timeout () in try States.with_heavy_rollback interpfun Cerrors.process_vernac_interp_error v; restore_timeout psh with reraise -> restore_timeout psh; raise reraise in try checknav loc com; current_timeout := !default_timeout; if do_beautify () then pr_new_syntax loc (Some com); interp com with any -> Format.set_formatter_out_channel stdout; raise (DuringCommandInterp (loc, any)) and read_vernac_file verbosely s = Flags.make_warn verbosely; let interpfun = if verbosely then Vernacentries.interp else Flags.silently Vernacentries.interp in let checknav loc cmd = if is_navigation_vernac cmd && not (is_reset cmd) then user_error loc "Navigation commands forbidden in files" in let end_inner_command cmd = if !atomic_load || is_reset cmd then Lib.mark_end_of_command () (* for Reset in coqc or coqtop -l *) else Backtrack.mark_command cmd; (* for Show Script, cf bug #2820 *) in let (in_chan, fname, input) = open_file_twice_if verbosely s in try (* we go out of the following infinite loop when a End_of_input is * raised, which means that we raised the end of the file being loaded *) while true do let loc_ast = parse_sentence input in vernac_com interpfun checknav loc_ast; end_inner_command (snd loc_ast); pp_flush () done with reraise -> (* whatever the exception *) Format.set_formatter_out_channel stdout; close_input in_chan input; (* we must close the file first *) match real_error reraise with | End_of_input -> if do_beautify () then pr_new_syntax (make_loc (max_int,max_int)) None | _ -> raise_with_file fname reraise (** [eval_expr : ?preserving:bool -> Pp.loc * Vernacexpr.vernac_expr -> unit] It executes one vernacular command. By default the command is considered as non-state-preserving, in which case we add it to the Backtrack stack (triggering a save of a frozen state and the generation of a new state label). An example of state-preserving command is one coming from the query panel of Coqide. *) let checknav loc ast = if is_deep_navigation_vernac ast then user_error loc "Navigation commands forbidden in nested commands" let eval_expr ?(preserving=false) loc_ast = vernac_com Vernacentries.interp checknav loc_ast; if not preserving && not (is_navigation_vernac (snd loc_ast)) then Backtrack.mark_command (snd loc_ast) (* raw_do_vernac : Pcoq.Gram.parsable -> unit * vernac_step . parse_sentence *) let raw_do_vernac po = eval_expr (parse_sentence (po,None)) (* XML output hooks *) let xml_start_library = ref (fun _ -> ()) let xml_end_library = ref (fun _ -> ()) let set_xml_start_library f = xml_start_library := f let set_xml_end_library f = xml_end_library := f (* Load a vernac file. Errors are annotated with file and location *) let load_vernac verb file = chan_beautify := if !Flags.beautify_file then open_out (file^beautify_suffix) else stdout; try Lib.mark_end_of_command (); (* in case we're still in coqtop init *) read_vernac_file verb file; if !Flags.beautify_file then close_out !chan_beautify; with reraise -> if !Flags.beautify_file then close_out !chan_beautify; raise_with_file file reraise (* Compile a vernac file (f is assumed without .v suffix) *) let compile verbosely f = let ldir,long_f_dot_v = Flags.verbosely Library.start_library f in Dumpglob.start_dump_glob long_f_dot_v; Dumpglob.dump_string ("F" ^ Names.string_of_dirpath ldir ^ "\n"); if !Flags.xml_export then !xml_start_library (); let _ = load_vernac verbosely long_f_dot_v in if Pfedit.get_all_proof_names () <> [] then (message "Error: There are pending proofs"; exit 1); if !Flags.xml_export then !xml_end_library (); Dumpglob.end_dump_glob (); Library.save_library_to ldir (long_f_dot_v ^ "o") coq-8.4pl2/toplevel/indschemes.ml0000640000175000001440000003721612121620060016134 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !elim_flag) ; optwrite = (fun b -> elim_flag := b) } let case_flag = ref false let _ = declare_bool_option { optsync = true; optdepr = false; optname = "automatic declaration of case analysis schemes"; optkey = ["Case";"Analysis";"Schemes"]; optread = (fun () -> !case_flag) ; optwrite = (fun b -> case_flag := b) } let eq_flag = ref false let _ = declare_bool_option { optsync = true; optdepr = false; optname = "automatic declaration of boolean equality"; optkey = ["Boolean";"Equality";"Schemes"]; optread = (fun () -> !eq_flag) ; optwrite = (fun b -> eq_flag := b) } let _ = (* compatibility *) declare_bool_option { optsync = true; optdepr = true; optname = "automatic declaration of boolean equality"; optkey = ["Equality";"Scheme"]; optread = (fun () -> !eq_flag) ; optwrite = (fun b -> eq_flag := b) } let is_eq_flag () = !eq_flag && Flags.version_strictly_greater Flags.V8_2 let eq_dec_flag = ref false let _ = declare_bool_option { optsync = true; optdepr = false; optname = "automatic declaration of decidable equality"; optkey = ["Decidable";"Equality";"Schemes"]; optread = (fun () -> !eq_dec_flag) ; optwrite = (fun b -> eq_dec_flag := b) } let rewriting_flag = ref false let _ = declare_bool_option { optsync = true; optdepr = false; optname ="automatic declaration of rewriting schemes for equality types"; optkey = ["Rewriting";"Schemes"]; optread = (fun () -> !rewriting_flag) ; optwrite = (fun b -> rewriting_flag := b) } (* Util *) let define id internal c t = let f = declare_constant ~internal in let kn = f id (DefinitionEntry { const_entry_body = c; const_entry_secctx = None; const_entry_type = t; const_entry_opaque = false }, Decl_kinds.IsDefinition Scheme) in definition_message id; kn (* Boolean equality *) let declare_beq_scheme_gen internal names kn = ignore (define_mutual_scheme beq_scheme_kind internal names kn) let alarm what internal msg = let debug = false in match internal with | KernelVerbose | KernelSilent -> (if debug then Flags.if_warn Pp.msg_warning (hov 0 msg ++ fnl () ++ what ++ str " not defined.")) | _ -> errorlabstrm "" msg let try_declare_scheme what f internal names kn = try f internal names kn with | ParameterWithoutEquality cst -> alarm what internal (str "Boolean equality not found for parameter " ++ pr_con cst ++ str".") | InductiveWithProduct -> alarm what internal (str "Unable to decide equality of functional arguments.") | InductiveWithSort -> alarm what internal (str "Unable to decide equality of type arguments.") | NonSingletonProp ind -> alarm what internal (str "Cannot extract computational content from proposition " ++ quote (Printer.pr_inductive (Global.env()) ind) ++ str ".") | EqNotFound (ind',ind) -> alarm what internal (str "Boolean equality on " ++ quote (Printer.pr_inductive (Global.env()) ind') ++ strbrk " is missing.") | UndefinedCst s -> alarm what internal (strbrk "Required constant " ++ str s ++ str " undefined.") | AlreadyDeclared msg -> alarm what internal (msg ++ str ".") | e when Errors.noncritical e -> alarm what internal (str "Unknown exception during scheme creation.") let beq_scheme_msg mind = let mib = Global.lookup_mind mind in (* TODO: mutual inductive case *) str "Boolean equality on " ++ pr_enum (fun ind -> quote (Printer.pr_inductive (Global.env()) ind)) (list_tabulate (fun i -> (mind,i)) (Array.length mib.mind_packets)) let declare_beq_scheme_with l kn = try_declare_scheme (beq_scheme_msg kn) declare_beq_scheme_gen UserVerbose l kn let try_declare_beq_scheme kn = (* TODO: handle Fix, eventually handle proof-irrelevance; improve decidability by depending on decidability for the parameters rather than on the bl and lb properties *) try_declare_scheme (beq_scheme_msg kn) declare_beq_scheme_gen KernelVerbose [] kn let declare_beq_scheme = declare_beq_scheme_with [] (* Case analysis schemes *) let declare_one_case_analysis_scheme ind = let (mib,mip) = Global.lookup_inductive ind in let kind = inductive_sort_family mip in let dep = if kind = InProp then case_scheme_kind_from_prop else case_dep_scheme_kind_from_type in let kelim = elim_sorts (mib,mip) in (* in case the inductive has a type elimination, generates only one induction scheme, the other ones share the same code with the apropriate type *) if List.mem InType kelim then ignore (define_individual_scheme dep KernelVerbose None ind) (* Induction/recursion schemes *) let kinds_from_prop = [InType,rect_scheme_kind_from_prop; InProp,ind_scheme_kind_from_prop; InSet,rec_scheme_kind_from_prop] let kinds_from_type = [InType,rect_dep_scheme_kind_from_type; InProp,ind_dep_scheme_kind_from_type; InSet,rec_dep_scheme_kind_from_type] let declare_one_induction_scheme ind = let (mib,mip) = Global.lookup_inductive ind in let kind = inductive_sort_family mip in let from_prop = kind = InProp in let kelim = elim_sorts (mib,mip) in let elims = list_map_filter (fun (sort,kind) -> if List.mem sort kelim then Some kind else None) (if from_prop then kinds_from_prop else kinds_from_type) in List.iter (fun kind -> ignore (define_individual_scheme kind KernelVerbose None ind)) elims let declare_induction_schemes kn = let mib = Global.lookup_mind kn in if mib.mind_finite then begin for i = 0 to Array.length mib.mind_packets - 1 do declare_one_induction_scheme (kn,i); done; end (* Decidable equality *) let declare_eq_decidability_gen internal names kn = let mib = Global.lookup_mind kn in if mib.mind_finite then ignore (define_mutual_scheme eq_dec_scheme_kind internal names kn) let eq_dec_scheme_msg ind = (* TODO: mutual inductive case *) str "Decidable equality on " ++ quote (Printer.pr_inductive (Global.env()) ind) let declare_eq_decidability_scheme_with l kn = try_declare_scheme (eq_dec_scheme_msg (kn,0)) declare_eq_decidability_gen UserVerbose l kn let try_declare_eq_decidability kn = try_declare_scheme (eq_dec_scheme_msg (kn,0)) declare_eq_decidability_gen KernelVerbose [] kn let declare_eq_decidability = declare_eq_decidability_scheme_with [] let ignore_error f x = try ignore (f x) with e when Errors.noncritical e -> () let declare_rewriting_schemes ind = if Hipattern.is_inductive_equality ind then begin ignore (define_individual_scheme rew_r2l_scheme_kind KernelVerbose None ind); ignore (define_individual_scheme rew_r2l_dep_scheme_kind KernelVerbose None ind); ignore (define_individual_scheme rew_r2l_forward_dep_scheme_kind KernelVerbose None ind); (* These ones expect the equality to be symmetric; the first one also *) (* needs eq *) ignore_error (define_individual_scheme rew_l2r_scheme_kind KernelVerbose None) ind; ignore_error (define_individual_scheme rew_l2r_dep_scheme_kind KernelVerbose None) ind; ignore_error (define_individual_scheme rew_l2r_forward_dep_scheme_kind KernelVerbose None) ind end let declare_congr_scheme ind = if Hipattern.is_equality_type (mkInd ind) then begin if try Coqlib.check_required_library Coqlib.logic_module_name; true with e when Errors.noncritical e -> false then ignore (define_individual_scheme congr_scheme_kind KernelVerbose None ind) else warning "Cannot build congruence scheme because eq is not found" end let declare_sym_scheme ind = if Hipattern.is_inductive_equality ind then (* Expect the equality to be symmetric *) ignore_error (define_individual_scheme sym_scheme_kind KernelVerbose None) ind (* Scheme command *) let rec split_scheme l = let env = Global.env() in match l with | [] -> [],[] | (Some id,t)::q -> let l1,l2 = split_scheme q in ( match t with | InductionScheme (x,y,z) -> ((id,x,smart_global_inductive y,z)::l1),l2 | CaseScheme (x,y,z) -> ((id,x,smart_global_inductive y,z)::l1),l2 | EqualityScheme x -> l1,((Some id,smart_global_inductive x)::l2) ) (* if no name has been provided, we build one from the types of the ind requested *) | (None,t)::q -> let l1,l2 = split_scheme q in let names inds recs isdep y z = let ind = smart_global_inductive y in let sort_of_ind = inductive_sort_family (snd (lookup_mind_specif env ind)) in let z' = family_of_sort (interp_sort z) in let suffix = ( match sort_of_ind with | InProp -> if isdep then (match z' with | InProp -> inds ^ "_dep" | InSet -> recs ^ "_dep" | InType -> recs ^ "t_dep") else ( match z' with | InProp -> inds | InSet -> recs | InType -> recs ^ "t" ) | _ -> if isdep then (match z' with | InProp -> inds | InSet -> recs | InType -> recs ^ "t" ) else (match z' with | InProp -> inds ^ "_nodep" | InSet -> recs ^ "_nodep" | InType -> recs ^ "t_nodep") ) in let newid = add_suffix (basename_of_global (IndRef ind)) suffix in let newref = (dummy_loc,newid) in ((newref,isdep,ind,z)::l1),l2 in match t with | CaseScheme (x,y,z) -> names "_case" "_case" x y z | InductionScheme (x,y,z) -> names "_ind" "_rec" x y z | EqualityScheme x -> l1,((None,smart_global_inductive x)::l2) let do_mutual_induction_scheme lnamedepindsort = let lrecnames = List.map (fun ((_,f),_,_,_) -> f) lnamedepindsort and sigma = Evd.empty and env0 = Global.env() in let lrecspec = List.map (fun (_,dep,ind,sort) -> (ind,dep,interp_elimination_sort sort)) lnamedepindsort in let listdecl = Indrec.build_mutual_induction_scheme env0 sigma lrecspec in let rec declare decl fi lrecref = let decltype = Retyping.get_type_of env0 Evd.empty decl in let decltype = refresh_universes decltype in let cst = define fi UserVerbose decl (Some decltype) in ConstRef cst :: lrecref in let _ = List.fold_right2 declare listdecl lrecnames [] in fixpoint_message None lrecnames let get_common_underlying_mutual_inductive = function | [] -> assert false | (id,(mind,i as ind))::l as all -> match List.filter (fun (_,(mind',_)) -> mind <> mind') l with | (_,ind')::_ -> raise (RecursionSchemeError (NotMutualInScheme (ind,ind'))) | [] -> if not (list_distinct (List.map snd (List.map snd all))) then error "A type occurs twice"; mind, list_map_filter (function (Some id,(_,i)) -> Some (i,snd id) | (None,_) -> None) all let do_scheme l = let ischeme,escheme = split_scheme l in (* we want 1 kind of scheme at a time so we check if the user tried to declare different schemes at once *) if (ischeme <> []) && (escheme <> []) then error "Do not declare equality and induction scheme at the same time." else ( if ischeme <> [] then do_mutual_induction_scheme ischeme else let mind,l = get_common_underlying_mutual_inductive escheme in declare_beq_scheme_with l mind; declare_eq_decidability_scheme_with l mind ) (**********************************************************************) (* Combined scheme *) (* Matthieu Sozeau, Dec 2006 *) let list_split_rev_at index l = let rec aux i acc = function hd :: tl when i = index -> acc, tl | hd :: tl -> aux (succ i) (hd :: acc) tl | [] -> failwith "list_split_when: Invalid argument" in aux 0 [] l let fold_left' f = function [] -> raise (Invalid_argument "fold_left'") | hd :: tl -> List.fold_left f hd tl let build_combined_scheme env schemes = let defs = List.map (fun cst -> (cst, Typeops.type_of_constant env cst)) schemes in (* let nschemes = List.length schemes in *) let find_inductive ty = let (ctx, arity) = decompose_prod ty in let (_, last) = List.hd ctx in match kind_of_term last with | App (ind, args) -> let ind = destInd ind in let (_,spec) = Inductive.lookup_mind_specif env ind in ctx, ind, spec.mind_nrealargs | _ -> ctx, destInd last, 0 in let (c, t) = List.hd defs in let ctx, ind, nargs = find_inductive t in (* Number of clauses, including the predicates quantification *) let prods = nb_prod t - (nargs + 1) in let coqand = Coqlib.build_coq_and () and coqconj = Coqlib.build_coq_conj () in let relargs = rel_vect 0 prods in let concls = List.rev_map (fun (cst, t) -> mkApp(mkConst cst, relargs), snd (decompose_prod_n prods t)) defs in let concl_bod, concl_typ = fold_left' (fun (accb, acct) (cst, x) -> mkApp (coqconj, [| x; acct; cst; accb |]), mkApp (coqand, [| x; acct |])) concls in let ctx, _ = list_split_rev_at prods (List.rev_map (fun (x, y) -> x, None, y) ctx) in let typ = it_mkProd_wo_LetIn concl_typ ctx in let body = it_mkLambda_or_LetIn concl_bod ctx in (body, typ) let do_combined_scheme name schemes = let csts = List.map (fun x -> let refe = Ident x in let qualid = qualid_of_reference refe in try Nametab.locate_constant (snd qualid) with Not_found -> error ((string_of_qualid (snd qualid))^" is not declared.")) schemes in let body,typ = build_combined_scheme (Global.env ()) csts in ignore (define (snd name) UserVerbose body (Some typ)); fixpoint_message None [snd name] (**********************************************************************) let map_inductive_block f kn n = for i=0 to n-1 do f (kn,i) done let mutual_inductive_size kn = Array.length (Global.lookup_mind kn).mind_packets let declare_default_schemes kn = let n = mutual_inductive_size kn in if !elim_flag then declare_induction_schemes kn; if !case_flag then map_inductive_block declare_one_case_analysis_scheme kn n; if is_eq_flag() then try_declare_beq_scheme kn; if !eq_dec_flag then try_declare_eq_decidability kn; if !rewriting_flag then map_inductive_block declare_congr_scheme kn n; if !rewriting_flag then map_inductive_block declare_sym_scheme kn n; if !rewriting_flag then map_inductive_block declare_rewriting_schemes kn n coq-8.4pl2/toplevel/backtrack.ml0000640000175000001440000002063012121620060015727 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* l:=i::!l) history; !l (** Basic manipulation of the command history stack *) exception Invalid let pop () = ignore (Stack.pop history) let npop n = (* Since our history stack always contains an initial entry, it's invalid to try to completely empty it *) if n < 0 || n >= Stack.length history then raise Invalid else for i = 1 to n do pop () done let top () = try Stack.top history with Stack.Empty -> raise Invalid (** Search the history stack for a suitable location. We perform first a non-destructive search: in case of search failure, the stack is unchanged. *) exception Found of info let search test = try Stack.iter (fun i -> if test i then raise (Found i)) history; raise Invalid with Found i -> while i != Stack.top history do pop () done (** An auxiliary function to retrieve the number of remaining subgoals *) let get_ngoals () = try let prf = Proof_global.give_me_the_proof () in List.length (Evd.sig_it (Proof.V82.background_subgoals prf)) with Proof_global.NoCurrentProof -> 0 (** Register the end of a command and store the current state *) let mark_command ast = Lib.add_frozen_state(); Lib.mark_end_of_command(); Stack.push { label = Lib.current_command_label (); nproofs = List.length (Pfedit.get_all_proof_names ()); prfname = (try Some (Pfedit.get_current_proof_name ()) with Proof_global.NoCurrentProof -> None); prfdepth = max 0 (Pfedit.current_proof_depth ()); reachable = true; ngoals = get_ngoals (); cmd = ast } history (** Backtrack by aborting [naborts] proofs, then setting proof-depth back to [pnum] and finally going to state number [snum]. *) let raw_backtrack snum pnum naborts = for i = 1 to naborts do Pfedit.delete_current_proof () done; Pfedit.undo_todepth pnum; Lib.reset_label snum (** Re-sync the state of the system (label, proofs) with the top of the history stack. We may end on some earlier state to avoid re-opening proofs. This function will return the final label and the number of extra backtracking steps performed. *) let sync nb_opened_proofs = (* Backtrack by enough additional steps to avoid re-opening proofs. Typically, when a Qed has been crossed, we backtrack to the proof start. NB: We cannot reach the empty stack, since the first entry in the stack has no opened proofs and is tagged as reachable. *) let extra = ref 0 in while not (top()).reachable do incr extra; pop () done; let target = top () in (* Now the opened proofs at target is a subset of the opened proofs before the backtrack, we simply abort the extra proofs (if any). NB: It is critical here that proofs are nested in a regular way (i.e. no more Resume or Suspend commands as earlier). This way, we can simply count the extra proofs to abort instead of taking care of their names. *) let naborts = nb_opened_proofs - target.nproofs in (* We are now ready to do a low-level backtrack *) raw_backtrack target.label target.prfdepth naborts; (target.label, !extra) (** Backtracking by a certain number of (non-state-preserving) commands. This is used by Coqide. It may actually undo more commands than asked : for instance instead of jumping back in the middle of a finished proof, we jump back before this proof. The number of extra backtracked command is returned at the end. *) let back count = if count = 0 then 0 else let nb_opened_proofs = List.length (Pfedit.get_all_proof_names ()) in npop count; snd (sync nb_opened_proofs) (** Backtracking to a certain state number, and reset proofs accordingly. We may end on some earlier state if needed to avoid re-opening proofs. Return the final state number. *) let backto snum = if snum = Lib.current_command_label () then snum else let nb_opened_proofs = List.length (Pfedit.get_all_proof_names ()) in search (fun i -> i.label = snum); fst (sync nb_opened_proofs) (** Old [Backtrack] code with corresponding update of the history stack. [Backtrack] is now deprecated (in favor of [BackTo]) but is kept for compatibility with ProofGeneral. It's completely up to ProofGeneral to decide where to go and how to adapt proofs. Note that the choices of ProofGeneral are currently not always perfect (for instance when backtracking an Undo). *) let backtrack snum pnum naborts = raw_backtrack snum pnum naborts; search (fun i -> i.label = snum) (** [reset_initial] resets the system and clears the command history stack, only pushing back the initial entry. It should be equivalent to [backto Lib.first_command_label], but sligthly more efficient. *) let reset_initial () = let init_label = Lib.first_command_label in if Lib.current_command_label () = init_label then () else begin Pfedit.delete_all_proofs (); Lib.reset_label init_label; Stack.clear history; Stack.push { label = init_label; nproofs = 0; prfname = None; prfdepth = 0; ngoals = 0; reachable = true; cmd = VernacNop } history end (** Reset to the last known state just before defining [id] *) let reset_name id = let lbl = try Lib.label_before_name id with Not_found -> raise Invalid in ignore (backto lbl) (** When a proof is ended (via either Qed/Admitted/Restart/Abort), old proof steps should be marked differently to avoid jumping back to them: - either this proof isn't there anymore in the proof engine - either it's there but it's a more recent attempt after a Restart, so we shouldn't mix the two. We also mark as unreachable the proof steps cancelled via a Undo. *) let mark_unreachable ?(after=0) prf_lst = let fix i = match i.prfname with | None -> raise Not_found (* stop hacking the history outside of proofs *) | Some p -> if List.mem p prf_lst && i.prfdepth > after then i.reachable <- false in try Stack.iter fix history with Not_found -> () (** Parse the history stack for printing the script of a proof *) let get_script prf = let script = ref [] in let select i = match i.prfname with | None -> raise Not_found | Some p when p=prf && i.reachable -> script := i :: !script | _ -> () in (try Stack.iter select history with Not_found -> ()); (* Get rid of intermediate commands which don't grow the proof depth *) let rec filter n = function | [] -> [] | {prfdepth=d; cmd=c; ngoals=ng}::l when n < d -> (c,ng) :: filter d l | {prfdepth=d}::l -> filter d l in (* initial proof depth (after entering the lemma statement) is 1 *) filter 1 !script coq-8.4pl2/toplevel/lemmas.mli0000640000175000001440000000460412010532755015446 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit) -> unit val start_proof : identifier -> goal_kind -> types -> ?init_tac:tactic -> ?compute_guard:lemma_possible_guards -> declaration_hook -> unit val start_proof_com : goal_kind -> (lident option * (local_binder list * constr_expr * (lident option * recursion_order_expr) option)) list -> declaration_hook -> unit val start_proof_with_initialization : goal_kind -> (bool * lemma_possible_guards * tactic list option) option -> (identifier * (types * (name list * Impargs.manual_explicitation list))) list -> int list option -> declaration_hook -> unit (** A hook the next three functions pass to cook_proof *) val set_save_hook : (Proof.proof -> unit) -> unit (** {6 ... } *) (** [save_named b] saves the current completed proof under the name it was started; boolean [b] tells if the theorem is declared opaque; it fails if the proof is not completed *) val save_named : bool -> unit (** [save_anonymous b name] behaves as [save_named] but declares the theorem under the name [name] and respects the strength of the declaration *) val save_anonymous : bool -> identifier -> unit (** [save_anonymous_with_strength s b name] behaves as [save_anonymous] but declares the theorem under the name [name] and gives it the strength [strength] *) val save_anonymous_with_strength : theorem_kind -> bool -> identifier -> unit (** [admit ()] aborts the current goal and save it as an assmumption *) val admit : unit -> unit (** [get_current_context ()] returns the evar context and env of the current open proof if any, otherwise returns the empty evar context and the current global env *) val get_current_context : unit -> Evd.evar_map * Environ.env coq-8.4pl2/toplevel/auto_ind_decl.ml0000640000175000001440000010267112121620060016601 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [] | t::q -> t::(kick_last q) | [] -> failwith "kick_last" and aux = function | (0,l') -> l' | (n,h::t) -> aux (n-1,t) | _ -> failwith "quick_chop" in if n > (List.length l) then failwith "quick_chop args" else kick_last (aux (n,l) ) let rec deconstruct_type t = let l,r = decompose_prod t in (List.map (fun (_,b) -> b) (List.rev l))@[r] exception EqNotFound of inductive * inductive exception EqUnknown of string exception UndefinedCst of string exception InductiveWithProduct exception InductiveWithSort exception ParameterWithoutEquality of constant exception NonSingletonProp of inductive let dl = dummy_loc (* Some pre declaration of constant we are going to use *) let bb = constr_of_global Coqlib.glob_bool let andb_prop = fun _ -> (Coqlib.build_bool_type()).Coqlib.andb_prop let andb_true_intro = fun _ -> (Coqlib.build_bool_type()).Coqlib.andb_true_intro let tt = constr_of_global Coqlib.glob_true let ff = constr_of_global Coqlib.glob_false let eq = constr_of_global Coqlib.glob_eq let sumbool = Coqlib.build_coq_sumbool let andb = fun _ -> (Coqlib.build_bool_type()).Coqlib.andb let induct_on c = new_induct false [Tacexpr.ElimOnConstr (Evd.empty,(c,Glob_term.NoBindings))] None (None,None) None let destruct_on_using c id = new_destruct false [Tacexpr.ElimOnConstr (Evd.empty,(c,Glob_term.NoBindings))] None (None,Some (dl,Genarg.IntroOrAndPattern [ [dl,Genarg.IntroAnonymous]; [dl,Genarg.IntroIdentifier id]])) None let destruct_on c = new_destruct false [Tacexpr.ElimOnConstr (Evd.empty,(c,Glob_term.NoBindings))] None (None,None) None (* reconstruct the inductive with the correct deBruijn indexes *) let mkFullInd ind n = let mib = Global.lookup_mind (fst ind) in let nparams = mib.mind_nparams in let nparrec = mib.mind_nparams_rec in (* params context divided *) let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in if nparrec > 0 then mkApp (mkInd ind, Array.of_list(extended_rel_list (nparrec+n) lnamesparrec)) else mkInd ind let check_bool_is_defined () = try let _ = Global.type_of_global Coqlib.glob_bool in () with e when Errors.noncritical e -> raise (UndefinedCst "bool") let beq_scheme_kind_aux = ref (fun _ -> failwith "Undefined") let build_beq_scheme kn = check_bool_is_defined (); (* fetching global env *) let env = Global.env() in (* fetching the mutual inductive body *) let mib = Global.lookup_mind kn in (* number of inductives in the mutual *) let nb_ind = Array.length mib.mind_packets in (* number of params in the type *) let nparams = mib.mind_nparams in let nparrec = mib.mind_nparams_rec in (* params context divided *) let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in (* predef coq's boolean type *) (* rec name *) let rec_name i =(string_of_id (Array.get mib.mind_packets i).mind_typename)^ "_eqrec" in (* construct the "fun A B ... N, eqA eqB eqC ... N => fixpoint" part *) let create_input c = let myArrow u v = mkArrow u (lift 1 v) and eqName = function | Name s -> id_of_string ("eq_"^(string_of_id s)) | Anonymous -> id_of_string "eq_A" in let ext_rel_list = extended_rel_list 0 lnamesparrec in let lift_cnt = ref 0 in let eqs_typ = List.map (fun aa -> let a = lift !lift_cnt aa in incr lift_cnt; myArrow a (myArrow a bb) ) ext_rel_list in let eq_input = List.fold_left2 ( fun a b (n,_,_) -> (* mkLambda(n,b,a) ) *) (* here I leave the Naming thingy so that the type of the function is more readable for the user *) mkNamedLambda (eqName n) b a ) c (List.rev eqs_typ) lnamesparrec in List.fold_left (fun a (n,_,t) ->(* mkLambda(n,t,a)) eq_input rel_list *) (* Same here , hoping the auto renaming will do something good ;) *) mkNamedLambda (match n with Name s -> s | Anonymous -> id_of_string "A") t a) eq_input lnamesparrec in let make_one_eq cur = let ind = kn,cur in (* current inductive we are working on *) let cur_packet = mib.mind_packets.(snd ind) in (* Inductive toto : [rettyp] := *) let rettyp = Inductive.type_of_inductive env (mib,cur_packet) in (* split rettyp in a list without the non rec params and the last -> e.g. Inductive vec (A:Set) : nat -> Set := ... will do [nat] *) let rettyp_l = quick_chop nparrec (deconstruct_type rettyp) in (* give a type A, this function tries to find the equality on A declared previously *) (* nlist = the number of args (A , B , ... ) eqA = the deBruijn index of the first eq param ndx = how much to translate due to the 2nd Case *) let compute_A_equality rel_list nlist eqA ndx t = let lifti = ndx in let rec aux c = let (c,a) = Reductionops.whd_betaiota_stack Evd.empty c in match kind_of_term c with | Rel x -> mkRel (x-nlist+ndx) | Var x -> mkVar (id_of_string ("eq_"^(string_of_id x))) | Cast (x,_,_) -> aux (applist (x,a)) | App _ -> assert false | Ind (kn',i as ind') -> if eq_mind kn kn' then mkRel(eqA-nlist-i+nb_ind-1) else ( try let a = Array.of_list a in let eq = mkConst (find_scheme (!beq_scheme_kind_aux()) (kn',i)) and eqa = Array.map aux a in let args = Array.append (Array.map (fun x->lift lifti x) a) eqa in if args = [||] then eq else mkApp (eq,Array.append (Array.map (fun x->lift lifti x) a) eqa) with Not_found -> raise(EqNotFound (ind',ind)) ) | Sort _ -> raise InductiveWithSort | Prod _ -> raise InductiveWithProduct | Lambda _-> raise (EqUnknown "Lambda") | LetIn _ -> raise (EqUnknown "LetIn") | Const kn -> (match Environ.constant_opt_value env kn with | None -> raise (ParameterWithoutEquality kn) | Some c -> aux (applist (c,a))) | Construct _ -> raise (EqUnknown "Construct") | Case _ -> raise (EqUnknown "Case") | CoFix _ -> raise (EqUnknown "CoFix") | Fix _ -> raise (EqUnknown "Fix") | Meta _ -> raise (EqUnknown "Meta") | Evar _ -> raise (EqUnknown "Evar") in aux t in (* construct the predicate for the Case part*) let do_predicate rel_list n = List.fold_left (fun a b -> mkLambda(Anonymous,b,a)) (mkLambda (Anonymous, mkFullInd ind (n+3+(List.length rettyp_l)+nb_ind-1), bb)) (List.rev rettyp_l) in (* make_one_eq *) (* do the [| C1 ... => match Y with ... end ... Cn => match Y with ... end |] part *) let ci = make_case_info env ind MatchStyle in let constrs n = get_constructors env (make_ind_family (ind, extended_rel_list (n+nb_ind-1) mib.mind_params_ctxt)) in let constrsi = constrs (3+nparrec) in let n = Array.length constrsi in let ar = Array.create n ff in for i=0 to n-1 do let nb_cstr_args = List.length constrsi.(i).cs_args in let ar2 = Array.create n ff in let constrsj = constrs (3+nparrec+nb_cstr_args) in for j=0 to n-1 do if (i=j) then ar2.(j) <- let cc = (match nb_cstr_args with | 0 -> tt | _ -> let eqs = Array.make nb_cstr_args tt in for ndx = 0 to nb_cstr_args-1 do let _,_,cc = List.nth constrsi.(i).cs_args ndx in let eqA = compute_A_equality rel_list nparrec (nparrec+3+2*nb_cstr_args) (nb_cstr_args+ndx+1) cc in Array.set eqs ndx (mkApp (eqA, [|mkRel (ndx+1+nb_cstr_args);mkRel (ndx+1)|] )) done; Array.fold_left (fun a b -> mkApp (andb(),[|b;a|])) (eqs.(0)) (Array.sub eqs 1 (nb_cstr_args - 1)) ) in (List.fold_left (fun a (p,q,r) -> mkLambda (p,r,a)) cc (constrsj.(j).cs_args) ) else ar2.(j) <- (List.fold_left (fun a (p,q,r) -> mkLambda (p,r,a)) ff (constrsj.(j).cs_args) ) done; ar.(i) <- (List.fold_left (fun a (p,q,r) -> mkLambda (p,r,a)) (mkCase (ci,do_predicate rel_list nb_cstr_args, mkVar (id_of_string "Y") ,ar2)) (constrsi.(i).cs_args)) done; mkNamedLambda (id_of_string "X") (mkFullInd ind (nb_ind-1+1)) ( mkNamedLambda (id_of_string "Y") (mkFullInd ind (nb_ind-1+2)) ( mkCase (ci, do_predicate rel_list 0,mkVar (id_of_string "X"),ar))) in (* build_beq_scheme *) let names = Array.make nb_ind Anonymous and types = Array.make nb_ind mkSet and cores = Array.make nb_ind mkSet in for i=0 to (nb_ind-1) do names.(i) <- Name (id_of_string (rec_name i)); types.(i) <- mkArrow (mkFullInd (kn,i) 0) (mkArrow (mkFullInd (kn,i) 1) bb); cores.(i) <- make_one_eq i done; Array.init nb_ind (fun i -> let kelim = Inductive.elim_sorts (mib,mib.mind_packets.(i)) in if not (List.mem InSet kelim) then raise (NonSingletonProp (kn,i)); let fix = mkFix (((Array.make nb_ind 0),i),(names,types,cores)) in create_input fix) let beq_scheme_kind = declare_mutual_scheme_object "_beq" build_beq_scheme let _ = beq_scheme_kind_aux := fun () -> beq_scheme_kind (* This function tryies to get the [inductive] between a constr the constr should be Ind i or App(Ind i,[|args|]) *) let destruct_ind c = try let u,v = destApp c in let indc = destInd u in indc,v with e when Errors.noncritical e -> let indc = destInd c in indc,[||] (* In the following, avoid is the list of names to avoid. If the args of the Inductive type are A1 ... An then avoid should be [| lb_An ... lb _A1 (resp. bl_An ... bl_A1) eq_An .... eq_A1 An ... A1 |] so from Ai we can find the the correct eq_Ai bl_ai or lb_ai *) (* used in the leib -> bool side*) let do_replace_lb lb_scheme_key aavoid narg gls p q = let avoid = Array.of_list aavoid in let do_arg v offset = try let x = narg*offset in let s = destVar v in let n = Array.length avoid in let rec find i = if avoid.(n-i) = s then avoid.(n-i-x) else (if i (* if this happen then the args have to be already declared as a Parameter*) ( let mp,dir,lbl = repr_con (destConst v) in mkConst (make_con mp dir (mk_label ( if offset=1 then ("eq_"^(string_of_label lbl)) else ((string_of_label lbl)^"_lb") ))) ) in let type_of_pq = pf_type_of gls p in let u,v = destruct_ind type_of_pq in let lb_type_of_p = try mkConst (find_scheme lb_scheme_key u) with Not_found -> (* spiwack: the format of this error message should probably be improved. *) let err_msg = string_of_ppcmds (str "Leibniz->boolean:" ++ str "You have to declare the" ++ str "decidability over " ++ Printer.pr_constr type_of_pq ++ str " first.") in error err_msg in let lb_args = Array.append (Array.append (Array.map (fun x -> x) v) (Array.map (fun x -> do_arg x 1) v)) (Array.map (fun x -> do_arg x 2) v) in let app = if lb_args = [||] then lb_type_of_p else mkApp (lb_type_of_p,lb_args) in [Equality.replace p q ; apply app ; Auto.default_auto] (* used in the bool -> leib side *) let do_replace_bl bl_scheme_key ind gls aavoid narg lft rgt = let avoid = Array.of_list aavoid in let do_arg v offset = try let x = narg*offset in let s = destVar v in let n = Array.length avoid in let rec find i = if avoid.(n-i) = s then avoid.(n-i-x) else (if i (* if this happen then the args have to be already declared as a Parameter*) ( let mp,dir,lbl = repr_con (destConst v) in mkConst (make_con mp dir (mk_label ( if offset=1 then ("eq_"^(string_of_label lbl)) else ((string_of_label lbl)^"_bl") ))) ) in let rec aux l1 l2 = match (l1,l2) with | (t1::q1,t2::q2) -> let tt1 = pf_type_of gls t1 in if t1=t2 then aux q1 q2 else ( let u,v = try destruct_ind tt1 (* trick so that the good sequence is returned*) with e when Errors.noncritical e -> ind,[||] in if u = ind then (Equality.replace t1 t2)::(Auto.default_auto)::(aux q1 q2) else ( let bl_t1 = try mkConst (find_scheme bl_scheme_key u) with Not_found -> (* spiwack: the format of this error message should probably be improved. *) let err_msg = string_of_ppcmds (str "boolean->Leibniz:" ++ str "You have to declare the" ++ str "decidability over " ++ Printer.pr_constr tt1 ++ str " first.") in error err_msg in let bl_args = Array.append (Array.append (Array.map (fun x -> x) v) (Array.map (fun x -> do_arg x 1) v)) (Array.map (fun x -> do_arg x 2) v ) in let app = if bl_args = [||] then bl_t1 else mkApp (bl_t1,bl_args) in (Equality.replace_by t1 t2 (tclTHEN (apply app) (Auto.default_auto)))::(aux q1 q2) ) ) | ([],[]) -> [] | _ -> error "Both side of the equality must have the same arity." in let (ind1,ca1) = try destApp lft with e when Errors.noncritical e -> error "replace failed." and (ind2,ca2) = try destApp rgt with e when Errors.noncritical e -> error "replace failed." in let (sp1,i1) = try destInd ind1 with e when Errors.noncritical e -> try fst (destConstruct ind1) with e when Errors.noncritical e -> error "The expected type is an inductive one." and (sp2,i2) = try destInd ind2 with e when Errors.noncritical e -> try fst (destConstruct ind2) with e when Errors.noncritical e -> error "The expected type is an inductive one." in if (sp1 <> sp2) || (i1 <> i2) then (error "Eq should be on the same type") else (aux (Array.to_list ca1) (Array.to_list ca2)) (* create, from a list of ids [i1,i2,...,in] the list [(in,eq_in,in_bl,in_al),,...,(i1,eq_i1,i1_bl_i1_al )] *) let list_id l = List.fold_left ( fun a (n,_,t) -> let s' = match n with Name s -> string_of_id s | Anonymous -> "A" in (id_of_string s',id_of_string ("eq_"^s'), id_of_string (s'^"_bl"), id_of_string (s'^"_lb")) ::a ) [] l (* build the right eq_I A B.. N eq_A .. eq_N *) let eqI ind l = let list_id = list_id l in let eA = Array.of_list((List.map (fun (s,_,_,_) -> mkVar s) list_id)@ (List.map (fun (_,seq,_,_)-> mkVar seq) list_id )) and e = try mkConst (find_scheme beq_scheme_kind ind) with Not_found -> error ("The boolean equality on "^(string_of_mind (fst ind))^" is needed."); in (if eA = [||] then e else mkApp(e,eA)) (**********************************************************************) (* Boolean->Leibniz *) let compute_bl_goal ind lnamesparrec nparrec = let eqI = eqI ind lnamesparrec in let list_id = list_id lnamesparrec in let create_input c = let x = id_of_string "x" and y = id_of_string "y" in let bl_typ = List.map (fun (s,seq,_,_) -> mkNamedProd x (mkVar s) ( mkNamedProd y (mkVar s) ( mkArrow ( mkApp(eq,[|bb;mkApp(mkVar seq,[|mkVar x;mkVar y|]);tt|])) ( mkApp(eq,[|mkVar s;mkVar x;mkVar y|])) )) ) list_id in let bl_input = List.fold_left2 ( fun a (s,_,sbl,_) b -> mkNamedProd sbl b a ) c (List.rev list_id) (List.rev bl_typ) in let eqs_typ = List.map (fun (s,_,_,_) -> mkProd(Anonymous,mkVar s,mkProd(Anonymous,mkVar s,bb)) ) list_id in let eq_input = List.fold_left2 ( fun a (s,seq,_,_) b -> mkNamedProd seq b a ) bl_input (List.rev list_id) (List.rev eqs_typ) in List.fold_left (fun a (n,_,t) -> mkNamedProd (match n with Name s -> s | Anonymous -> id_of_string "A") t a) eq_input lnamesparrec in let n = id_of_string "x" and m = id_of_string "y" in create_input ( mkNamedProd n (mkFullInd ind nparrec) ( mkNamedProd m (mkFullInd ind (nparrec+1)) ( mkArrow (mkApp(eq,[|bb;mkApp(eqI,[|mkVar n;mkVar m|]);tt|])) (mkApp(eq,[|mkFullInd ind (nparrec+3);mkVar n;mkVar m|])) ))) let compute_bl_tact bl_scheme_key ind lnamesparrec nparrec gsig = let list_id = list_id lnamesparrec in let avoid = ref [] in let first_intros = ( List.map (fun (s,_,_,_) -> s ) list_id ) @ ( List.map (fun (_,seq,_,_ ) -> seq) list_id ) @ ( List.map (fun (_,_,sbl,_ ) -> sbl) list_id ) in let fresh_first_intros = List.map ( fun s -> let fresh = fresh_id (!avoid) s gsig in avoid := fresh::(!avoid); fresh ) first_intros in let freshn = fresh_id (!avoid) (id_of_string "x") gsig in let freshm = avoid := freshn::(!avoid); fresh_id (!avoid) (id_of_string "y") gsig in let freshz = avoid := freshm::(!avoid); fresh_id (!avoid) (id_of_string "Z") gsig in (* try with *) avoid := freshz::(!avoid); tclTHENSEQ [ intros_using fresh_first_intros; intro_using freshn ; induct_on (mkVar freshn); intro_using freshm; destruct_on (mkVar freshm); intro_using freshz; intros; tclTRY ( tclORELSE reflexivity (Equality.discr_tac false None) ); simpl_in_hyp (freshz,InHyp); (* repeat ( apply andb_prop in z;let z1:= fresh "Z" in destruct z as [z1 z]). *) tclREPEAT ( tclTHENSEQ [ simple_apply_in freshz (andb_prop()); fun gl -> let fresht = fresh_id (!avoid) (id_of_string "Z") gsig in avoid := fresht::(!avoid); (new_destruct false [Tacexpr.ElimOnConstr (Evd.empty,((mkVar freshz,Glob_term.NoBindings)))] None (None, Some (dl,Genarg.IntroOrAndPattern [[ dl,Genarg.IntroIdentifier fresht; dl,Genarg.IntroIdentifier freshz]])) None) gl ]); (* Ci a1 ... an = Ci b1 ... bn replace bi with ai; auto || replace bi with ai by apply typeofbi_prod ; auto *) fun gls-> let gl = pf_concl gls in match (kind_of_term gl) with | App (c,ca) -> ( match (kind_of_term c) with | Ind indeq -> if IndRef indeq = Coqlib.glob_eq then ( tclTHENSEQ ((do_replace_bl bl_scheme_key ind gls (!avoid) nparrec (ca.(2)) (ca.(1)))@[Auto.default_auto]) gls ) else (error "Failure while solving Boolean->Leibniz.") | _ -> error "Failure while solving Boolean->Leibniz." ) | _ -> error "Failure while solving Boolean->Leibniz." ] gsig let bl_scheme_kind_aux = ref (fun _ -> failwith "Undefined") let make_bl_scheme mind = let mib = Global.lookup_mind mind in if Array.length mib.mind_packets <> 1 then errorlabstrm "" (str "Automatic building of boolean->Leibniz lemmas not supported"); let ind = (mind,0) in let nparams = mib.mind_nparams in let nparrec = mib.mind_nparams_rec in let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in [|Pfedit.build_by_tactic (Global.env()) (compute_bl_goal ind lnamesparrec nparrec) (compute_bl_tact (!bl_scheme_kind_aux()) ind lnamesparrec nparrec)|] let bl_scheme_kind = declare_mutual_scheme_object "_dec_bl" make_bl_scheme let _ = bl_scheme_kind_aux := fun () -> bl_scheme_kind (**********************************************************************) (* Leibniz->Boolean *) let compute_lb_goal ind lnamesparrec nparrec = let list_id = list_id lnamesparrec in let eqI = eqI ind lnamesparrec in let create_input c = let x = id_of_string "x" and y = id_of_string "y" in let lb_typ = List.map (fun (s,seq,_,_) -> mkNamedProd x (mkVar s) ( mkNamedProd y (mkVar s) ( mkArrow ( mkApp(eq,[|mkVar s;mkVar x;mkVar y|])) ( mkApp(eq,[|bb;mkApp(mkVar seq,[|mkVar x;mkVar y|]);tt|])) )) ) list_id in let lb_input = List.fold_left2 ( fun a (s,_,_,slb) b -> mkNamedProd slb b a ) c (List.rev list_id) (List.rev lb_typ) in let eqs_typ = List.map (fun (s,_,_,_) -> mkProd(Anonymous,mkVar s,mkProd(Anonymous,mkVar s,bb)) ) list_id in let eq_input = List.fold_left2 ( fun a (s,seq,_,_) b -> mkNamedProd seq b a ) lb_input (List.rev list_id) (List.rev eqs_typ) in List.fold_left (fun a (n,_,t) -> mkNamedProd (match n with Name s -> s | Anonymous -> id_of_string "A") t a) eq_input lnamesparrec in let n = id_of_string "x" and m = id_of_string "y" in create_input ( mkNamedProd n (mkFullInd ind nparrec) ( mkNamedProd m (mkFullInd ind (nparrec+1)) ( mkArrow (mkApp(eq,[|mkFullInd ind (nparrec+2);mkVar n;mkVar m|])) (mkApp(eq,[|bb;mkApp(eqI,[|mkVar n;mkVar m|]);tt|])) ))) let compute_lb_tact lb_scheme_key ind lnamesparrec nparrec gsig = let list_id = list_id lnamesparrec in let avoid = ref [] in let first_intros = ( List.map (fun (s,_,_,_) -> s ) list_id ) @ ( List.map (fun (_,seq,_,_) -> seq) list_id ) @ ( List.map (fun (_,_,_,slb) -> slb) list_id ) in let fresh_first_intros = List.map ( fun s -> let fresh = fresh_id (!avoid) s gsig in avoid := fresh::(!avoid); fresh ) first_intros in let freshn = fresh_id (!avoid) (id_of_string "x") gsig in let freshm = avoid := freshn::(!avoid); fresh_id (!avoid) (id_of_string "y") gsig in let freshz = avoid := freshm::(!avoid); fresh_id (!avoid) (id_of_string "Z") gsig in (* try with *) avoid := freshz::(!avoid); tclTHENSEQ [ intros_using fresh_first_intros; intro_using freshn ; induct_on (mkVar freshn); intro_using freshm; destruct_on (mkVar freshm); intro_using freshz; intros; tclTRY ( tclORELSE reflexivity (Equality.discr_tac false None) ); Equality.inj [] false (mkVar freshz,Glob_term.NoBindings); intros; simpl_in_concl; Auto.default_auto; tclREPEAT ( tclTHENSEQ [apply (andb_true_intro()); simplest_split ;Auto.default_auto ] ); fun gls -> let gl = pf_concl gls in (* assume the goal to be eq (eq_type ...) = true *) match (kind_of_term gl) with | App(c,ca) -> (match (kind_of_term ca.(1)) with | App(c',ca') -> let n = Array.length ca' in tclTHENSEQ (do_replace_lb lb_scheme_key (!avoid) nparrec gls ca'.(n-2) ca'.(n-1)) gls | _ -> error "Failure while solving Leibniz->Boolean." ) | _ -> error "Failure while solving Leibniz->Boolean." ] gsig let lb_scheme_kind_aux = ref (fun () -> failwith "Undefined") let make_lb_scheme mind = let mib = Global.lookup_mind mind in if Array.length mib.mind_packets <> 1 then errorlabstrm "" (str "Automatic building of Leibniz->boolean lemmas not supported"); let ind = (mind,0) in let nparams = mib.mind_nparams in let nparrec = mib.mind_nparams_rec in let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in [|Pfedit.build_by_tactic (Global.env()) (compute_lb_goal ind lnamesparrec nparrec) (compute_lb_tact (!lb_scheme_kind_aux()) ind lnamesparrec nparrec)|] let lb_scheme_kind = declare_mutual_scheme_object "_dec_lb" make_lb_scheme let _ = lb_scheme_kind_aux := fun () -> lb_scheme_kind (**********************************************************************) (* Decidable equality *) let check_not_is_defined () = try ignore (Coqlib.build_coq_not ()) with e when Errors.noncritical e -> raise (UndefinedCst "not") (* {n=m}+{n<>m} part *) let compute_dec_goal ind lnamesparrec nparrec = check_not_is_defined (); let list_id = list_id lnamesparrec in let create_input c = let x = id_of_string "x" and y = id_of_string "y" in let lb_typ = List.map (fun (s,seq,_,_) -> mkNamedProd x (mkVar s) ( mkNamedProd y (mkVar s) ( mkArrow ( mkApp(eq,[|mkVar s;mkVar x;mkVar y|])) ( mkApp(eq,[|bb;mkApp(mkVar seq,[|mkVar x;mkVar y|]);tt|])) )) ) list_id in let bl_typ = List.map (fun (s,seq,_,_) -> mkNamedProd x (mkVar s) ( mkNamedProd y (mkVar s) ( mkArrow ( mkApp(eq,[|bb;mkApp(mkVar seq,[|mkVar x;mkVar y|]);tt|])) ( mkApp(eq,[|mkVar s;mkVar x;mkVar y|])) )) ) list_id in let lb_input = List.fold_left2 ( fun a (s,_,_,slb) b -> mkNamedProd slb b a ) c (List.rev list_id) (List.rev lb_typ) in let bl_input = List.fold_left2 ( fun a (s,_,sbl,_) b -> mkNamedProd sbl b a ) lb_input (List.rev list_id) (List.rev bl_typ) in let eqs_typ = List.map (fun (s,_,_,_) -> mkProd(Anonymous,mkVar s,mkProd(Anonymous,mkVar s,bb)) ) list_id in let eq_input = List.fold_left2 ( fun a (s,seq,_,_) b -> mkNamedProd seq b a ) bl_input (List.rev list_id) (List.rev eqs_typ) in List.fold_left (fun a (n,_,t) -> mkNamedProd (match n with Name s -> s | Anonymous -> id_of_string "A") t a) eq_input lnamesparrec in let n = id_of_string "x" and m = id_of_string "y" in let eqnm = mkApp(eq,[|mkFullInd ind (2*nparrec+2);mkVar n;mkVar m|]) in create_input ( mkNamedProd n (mkFullInd ind (2*nparrec)) ( mkNamedProd m (mkFullInd ind (2*nparrec+1)) ( mkApp(sumbool(),[|eqnm;mkApp (Coqlib.build_coq_not(),[|eqnm|])|]) ) ) ) let compute_dec_tact ind lnamesparrec nparrec gsig = let list_id = list_id lnamesparrec in let eqI = eqI ind lnamesparrec in let avoid = ref [] in let eqtrue x = mkApp(eq,[|bb;x;tt|]) in let eqfalse x = mkApp(eq,[|bb;x;ff|]) in let first_intros = ( List.map (fun (s,_,_,_) -> s ) list_id ) @ ( List.map (fun (_,seq,_,_) -> seq) list_id ) @ ( List.map (fun (_,_,sbl,_) -> sbl) list_id ) @ ( List.map (fun (_,_,_,slb) -> slb) list_id ) in let fresh_first_intros = List.map ( fun s -> let fresh = fresh_id (!avoid) s gsig in avoid := fresh::(!avoid); fresh ) first_intros in let freshn = fresh_id (!avoid) (id_of_string "x") gsig in let freshm = avoid := freshn::(!avoid); fresh_id (!avoid) (id_of_string "y") gsig in let freshH = avoid := freshm::(!avoid); fresh_id (!avoid) (id_of_string "H") gsig in let eqbnm = mkApp(eqI,[|mkVar freshn;mkVar freshm|]) in avoid := freshH::(!avoid); let arfresh = Array.of_list fresh_first_intros in let xargs = Array.sub arfresh 0 (2*nparrec) in let blI = try mkConst (find_scheme bl_scheme_kind ind) with Not_found -> error ( "Error during the decidability part, boolean to leibniz"^ " equality is required.") in let lbI = try mkConst (find_scheme lb_scheme_kind ind) with Not_found -> error ( "Error during the decidability part, leibniz to boolean"^ " equality is required.") in tclTHENSEQ [ intros_using fresh_first_intros; intros_using [freshn;freshm]; (*we do this so we don't have to prove the same goal twice *) assert_by (Name freshH) ( mkApp(sumbool(),[|eqtrue eqbnm; eqfalse eqbnm|]) ) (tclTHEN (destruct_on eqbnm) Auto.default_auto); (fun gsig -> let freshH2 = fresh_id (!avoid) (id_of_string "H") gsig in avoid := freshH2::(!avoid); tclTHENS (destruct_on_using (mkVar freshH) freshH2) [ (* left *) tclTHENSEQ [ simplest_left; apply (mkApp(blI,Array.map(fun x->mkVar x) xargs)); Auto.default_auto ]; (*right *) (fun gsig -> let freshH3 = fresh_id (!avoid) (id_of_string "H") gsig in avoid := freshH3::(!avoid); tclTHENSEQ [ simplest_right ; unfold_constr (Lazy.force Coqlib.coq_not_ref); intro; Equality.subst_all; assert_by (Name freshH3) (mkApp(eq,[|bb;mkApp(eqI,[|mkVar freshm;mkVar freshm|]);tt|])) (tclTHENSEQ [ apply (mkApp(lbI,Array.map (fun x->mkVar x) xargs)); Auto.default_auto ]); Equality.general_rewrite_bindings_in true all_occurrences true false (List.hd !avoid) ((mkVar (List.hd (List.tl !avoid))), Glob_term.NoBindings ) true; Equality.discr_tac false None ] gsig) ] gsig) ] gsig let make_eq_decidability mind = let mib = Global.lookup_mind mind in if Array.length mib.mind_packets <> 1 then anomaly "Decidability lemma for mutual inductive types not supported"; let ind = (mind,0) in let nparams = mib.mind_nparams in let nparrec = mib.mind_nparams_rec in let lnonparrec,lnamesparrec = context_chop (nparams-nparrec) mib.mind_params_ctxt in [|Pfedit.build_by_tactic (Global.env()) (compute_dec_goal ind lnamesparrec nparrec) (compute_dec_tact ind lnamesparrec nparrec)|] let eq_dec_scheme_kind = declare_mutual_scheme_object "_eq_dec" make_eq_decidability (* The eq_dec_scheme proofs depend on the equality and discr tactics but the inj tactics, that comes with discr, depends on the eq_dec_scheme... *) let _ = Equality.set_eq_dec_scheme_kind eq_dec_scheme_kind coq-8.4pl2/toplevel/utils/0000750000175000001440000000000012127276552014631 5ustar notinuserscoq-8.4pl2/toplevel/toplevel.ml0000640000175000001440000003062612121620060015642 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string; mutable str : string; (* buffer of already read characters *) mutable len : int; (* number of chars in the buffer *) mutable bols : int list; (* offsets in str of begining of lines *) mutable tokens : Gram.parsable; (* stream of tokens *) mutable start : int } (* stream count of the first char of the buffer *) (* Double the size of the buffer. *) let resize_buffer ibuf = let nstr = String.create (2 * String.length ibuf.str + 1) in String.blit ibuf.str 0 nstr 0 (String.length ibuf.str); ibuf.str <- nstr (* Delete all irrelevent lines of the input buffer. Keep the last line in the buffer (useful when there are several commands on the same line. *) let resynch_buffer ibuf = match ibuf.bols with | ll::_ -> let new_len = ibuf.len - ll in String.blit ibuf.str ll ibuf.str 0 new_len; ibuf.len <- new_len; ibuf.bols <- []; ibuf.start <- ibuf.start + ll | _ -> () (* emacs special prompt tag for easy detection. No special character, to avoid interfering with utf8. Compatibility code removed. *) let emacs_prompt_startstring() = Printer.emacs_str "" let emacs_prompt_endstring() = Printer.emacs_str "" (* Read a char in an input channel, displaying a prompt at every beginning of line. *) let prompt_char ic ibuf count = let bol = match ibuf.bols with | ll::_ -> ibuf.len == ll | [] -> ibuf.len == 0 in if bol && not !print_emacs then msgerr (str (ibuf.prompt())); try let c = input_char ic in if c == '\n' then ibuf.bols <- (ibuf.len+1) :: ibuf.bols; if ibuf.len == String.length ibuf.str then resize_buffer ibuf; ibuf.str.[ibuf.len] <- c; ibuf.len <- ibuf.len + 1; Some c with End_of_file -> None (* Reinitialize the char stream (after a Drop) *) let reset_input_buffer ic ibuf = ibuf.str <- ""; ibuf.len <- 0; ibuf.bols <- []; ibuf.tokens <- Gram.parsable (Stream.from (prompt_char ic ibuf)); ibuf.start <- 0 (* Functions to print underlined locations from an input buffer. *) (* Given a location, returns the list of locations of each line. The last line is returned separately. It also checks the location bounds. *) let get_bols_of_loc ibuf (bp,ep) = let add_line (b,e) lines = if b < 0 or e < b then anomaly "Bad location"; match lines with | ([],None) -> ([], Some (b,e)) | (fl,oe) -> ((b,e)::fl, oe) in let rec lines_rec ba after = function | [] -> add_line (0,ba) after | ll::_ when ll <= bp -> add_line (ll,ba) after | ll::fl -> let nafter = if ll < ep then add_line (ll,ba) after else after in lines_rec ll nafter fl in let (fl,ll) = lines_rec ibuf.len ([],None) ibuf.bols in (fl,Option.get ll) let dotted_location (b,e) = if e-b < 3 then ("", String.make (e-b) ' ') else (String.make (e-b-1) '.', " ") let blanch_utf8_string s bp ep = let s' = String.make (ep-bp) ' ' in let j = ref 0 in for i = bp to ep - 1 do let n = Char.code s.[i] in (* Heuristic: assume utf-8 chars are printed using a single fixed-size char and therefore contract all utf-8 code into one space; in any case, preserve tabulation so that its effective interpretation in terms of spacing is preserved *) if s.[i] = '\t' then s'.[!j] <- '\t'; if n < 0x80 || 0xC0 <= n then incr j done; String.sub s' 0 !j let print_highlight_location ib loc = let (bp,ep) = unloc loc in let bp = bp - ib.start and ep = ep - ib.start in let highlight_lines = match get_bols_of_loc ib (bp,ep) with | ([],(bl,el)) -> let shift = blanch_utf8_string ib.str bl bp in let span = String.length (blanch_utf8_string ib.str bp ep) in (str"> " ++ str(String.sub ib.str bl (el-bl-1)) ++ fnl () ++ str"> " ++ str(shift) ++ str(String.make span '^')) | ((b1,e1)::ml,(bn,en)) -> let (d1,s1) = dotted_location (b1,bp) in let (dn,sn) = dotted_location (ep,en) in let l1 = (str"> " ++ str d1 ++ str s1 ++ str(String.sub ib.str bp (e1-bp))) in let li = prlist (fun (bi,ei) -> (str"> " ++ str(String.sub ib.str bi (ei-bi)))) ml in let ln = (str"> " ++ str(String.sub ib.str bn (ep-bn)) ++ str sn ++ str dn) in (l1 ++ li ++ ln) in let loc = make_loc (bp,ep) in (str"Toplevel input, characters " ++ Cerrors.print_loc loc ++ str":" ++ fnl () ++ highlight_lines ++ fnl ()) (* Functions to report located errors in a file. *) let print_location_in_file s inlibrary fname loc = let errstrm = str"Error while reading " ++ str s in if loc = dummy_loc then hov 1 (errstrm ++ spc() ++ str" (unknown location):") ++ fnl () else let errstrm = if s = fname then mt() else errstrm ++ str":" ++ fnl() in if inlibrary then hov 0 (errstrm ++ str"Module " ++ str ("\""^fname^"\"") ++ spc() ++ str"characters " ++ Cerrors.print_loc loc) ++ fnl () else let (bp,ep) = unloc loc in let ic = open_in fname in let rec line_of_pos lin bol cnt = if cnt < bp then if input_char ic == '\n' then line_of_pos (lin + 1) (cnt +1) (cnt+1) else line_of_pos lin bol (cnt+1) else (lin, bol) in try let (line, bol) = line_of_pos 1 0 0 in close_in ic; hov 0 (* No line break so as to follow emacs error message format *) (errstrm ++ str"File " ++ str ("\""^fname^"\"") ++ str", line " ++ int line ++ str", characters " ++ Cerrors.print_loc (make_loc (bp-bol,ep-bol))) ++ str":" ++ fnl () with e when Errors.noncritical e -> (close_in ic; hov 1 (errstrm ++ spc() ++ str"(invalid location):") ++ fnl ()) let print_command_location ib dloc = match dloc with | Some (bp,ep) -> (str"Error during interpretation of command:" ++ fnl () ++ str(String.sub ib.str (bp-ib.start) (ep-bp)) ++ fnl ()) | None -> (mt ()) let valid_loc dloc loc = loc <> dummy_loc & match dloc with | Some dloc -> let (bd,ed) = unloc dloc in let (b,e) = unloc loc in bd<=b & e<=ed | _ -> true let valid_buffer_loc ib dloc loc = valid_loc dloc loc & let (b,e) = unloc loc in b-ib.start >= 0 & e-ib.start < ib.len & b<=e (*s The Coq prompt is the name of the focused proof, if any, and "Coq" otherwise. We trap all exceptions to prevent the error message printing from cycling. *) let make_prompt () = try (Names.string_of_id (Pfedit.get_current_proof_name ())) ^ " < " with Proof_global.NoCurrentProof -> "Coq < " (*let build_pending_list l = let pl = ref ">" in let l' = ref l in let res = while List.length !l' > 1 do pl := !pl ^ "|" Names.string_of_id x; l':=List.tl !l' done in let last = try List.hd !l' with _ -> in "<"^l' *) (* the coq prompt added to the default one when in emacs mode The prompt contains the current state label [n] (for global backtracking) and the current proof state [p] (for proof backtracking) plus the list of open (nested) proofs (for proof aborting when backtracking). It looks like: "n |lem1|lem2|lem3| p < " *) let make_emacs_prompt() = let statnum = string_of_int (Lib.current_command_label ()) in let dpth = Pfedit.current_proof_depth() in let pending = Pfedit.get_all_proof_names() in let pendingprompt = List.fold_left (fun acc x -> acc ^ (if acc <> "" then "|" else "") ^ Names.string_of_id x) "" pending in let proof_info = if dpth >= 0 then string_of_int dpth else "0" in if !Flags.print_emacs then statnum ^ " |" ^ pendingprompt ^ "| " ^ proof_info ^ " < " else "" (* A buffer to store the current command read on stdin. It is * initialized when a vernac command is immediately followed by "\n", * or after a Drop. *) let top_buffer = let pr() = emacs_prompt_startstring() ^ make_prompt() ^ make_emacs_prompt() ^ emacs_prompt_endstring() in { prompt = pr; str = ""; len = 0; bols = []; tokens = Gram.parsable (Stream.of_list []); start = 0 } let set_prompt prompt = top_buffer.prompt <- (fun () -> emacs_prompt_startstring() ^ prompt () ^ emacs_prompt_endstring()) (* Removes and prints the location of the error. The following exceptions need not be located. *) let rec is_pervasive_exn = function | Out_of_memory | Stack_overflow | Sys.Break -> true | Error_in_file (_,_,e) -> is_pervasive_exn e | Loc.Exc_located (_,e) -> is_pervasive_exn e | DuringCommandInterp (_,e) -> is_pervasive_exn e | _ -> false (* Toplevel error explanation, dealing with locations, Drop, Ctrl-D May raise only the following exceptions: Drop and End_of_input, meaning we get out of the Coq loop *) let print_toplevel_error exc = let (dloc,exc) = match exc with | DuringCommandInterp (loc,ie) -> if loc = dummy_loc then (None,ie) else (Some loc, ie) | _ -> (None, exc) in let (locstrm,exc) = match exc with | Loc.Exc_located (loc, ie) -> if valid_buffer_loc top_buffer dloc loc then (print_highlight_location top_buffer loc, ie) else ((mt ()) (* print_command_location top_buffer dloc *), ie) | Error_in_file (s, (inlibrary, fname, loc), ie) -> (print_location_in_file s inlibrary fname loc, ie) | _ -> ((mt ()) (* print_command_location top_buffer dloc *), exc) in match exc with | End_of_input -> msgerrnl (mt ()); pp_flush(); exit 0 | Vernacexpr.Drop -> (* Last chance *) if Mltop.is_ocaml_top() then raise Vernacexpr.Drop; (str"Error: There is no ML toplevel." ++ fnl ()) | Vernacexpr.Quit -> raise Vernacexpr.Quit | _ -> (if is_pervasive_exn exc then (mt ()) else locstrm) ++ Errors.print exc (* Read the input stream until a dot is encountered *) let parse_to_dot = let rec dot st = match get_tok (Stream.next st) with | Tok.KEYWORD "." -> () | Tok.EOI -> raise End_of_input | _ -> dot st in Gram.Entry.of_parser "Coqtoplevel.dot" dot (* We assume that when a lexer error occurs, at least one char was eaten *) let rec discard_to_dot () = try Gram.entry_parse parse_to_dot top_buffer.tokens with Loc.Exc_located(_,(Token.Error _|Lexer.Error.E _)) -> discard_to_dot() (* If the error occured while parsing, we read the input until a dot token * in encountered. *) let process_error = function | DuringCommandInterp _ as e -> e | e -> if is_pervasive_exn e then e else try discard_to_dot (); e with | End_of_input -> End_of_input | any -> if is_pervasive_exn any then any else e (* do_vernac reads and executes a toplevel phrase, and print error messages when an exception is raised, except for the following: Drop: kill the Coq toplevel, going down to the Caml toplevel if it exists. Otherwise, exit. End_of_input: Ctrl-D was typed in, we will quit *) let do_vernac () = msgerrnl (mt ()); if !print_emacs then msgerr (str (top_buffer.prompt())); resynch_buffer top_buffer; begin try raw_do_vernac top_buffer.tokens with any -> msgnl (print_toplevel_error (process_error any)) end; flush_all() (* coq and go read vernacular expressions until Drop is entered. * Ctrl-C will raise the exception Break instead of aborting Coq. * Here we catch the exceptions terminating the Coq loop, and decide * if we really must quit. *) let rec loop () = Sys.catch_break true; try reset_input_buffer stdin top_buffer; while true do do_vernac() done with | Vernacexpr.Drop -> () | End_of_input -> msgerrnl (mt ()); pp_flush(); exit 0 | Vernacexpr.Quit -> exit 0 | any -> msgerrnl (str"Anomaly. Please report."); loop () coq-8.4pl2/toplevel/ide_slave.ml0000640000175000001440000003735512121620060015751 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* flush_all (); orig_stdout := Unix.out_channel_of_descr (Unix.dup Unix.stdout); Unix.dup2 Unix.stderr Unix.stdout; Pp_control.std_ft := out_ft; Pp_control.err_ft := out_ft; Pp_control.deep_ft := deep_out_ft; set_binary_mode_out !orig_stdout true; set_binary_mode_in stdin true; ), (fun () -> Format.pp_print_flush out_ft (); let r = Buffer.contents out_buff in Buffer.clear out_buff; r) let pr_debug s = if !Flags.debug then Printf.eprintf "[pid %d] %s\n%!" (Unix.getpid ()) s (** Categories of commands *) let coqide_known_option table = List.mem table [ ["Printing";"Implicit"]; ["Printing";"Coercions"]; ["Printing";"Matching"]; ["Printing";"Synth"]; ["Printing";"Notations"]; ["Printing";"All"]; ["Printing";"Records"]; ["Printing";"Existential";"Instances"]; ["Printing";"Universes"]] let is_known_option cmd = match cmd with | VernacSetOption (_,o,BoolValue true) | VernacUnsetOption (_,o) -> coqide_known_option o | _ -> false let is_debug cmd = match cmd with | VernacSetOption (_,["Ltac";"Debug"], _) -> true | _ -> false let is_query cmd = match cmd with | VernacChdir None | VernacMemOption _ | VernacPrintOption _ | VernacCheckMayEval _ | VernacGlobalCheck _ | VernacPrint _ | VernacSearch _ | VernacLocate _ -> true | _ -> false let is_undo cmd = match cmd with | VernacUndo _ | VernacUndoTo _ -> true | _ -> false (** Check whether a command is forbidden by CoqIDE *) let coqide_cmd_checks (loc,ast) = let user_error s = raise (Loc.Exc_located (loc, Util.UserError ("CoqIde", str s))) in if is_debug ast then user_error "Debug mode not available within CoqIDE"; if is_known_option ast then user_error "Use CoqIDE display menu instead"; if is_navigation_vernac ast then user_error "Use CoqIDE navigation instead"; if is_undo ast then msgerrnl (str "Warning: rather use CoqIDE navigation instead"); if is_query ast then msgerrnl (str "Warning: query commands should not be inserted in scripts") (** Interpretation (cf. [Ide_intf.interp]) *) let interp (raw,verbosely,s) = let pa = Pcoq.Gram.parsable (Stream.of_string s) in let loc_ast = Vernac.parse_sentence (pa,None) in if not raw then coqide_cmd_checks loc_ast; Flags.make_silent (not verbosely); Vernac.eval_expr ~preserving:raw loc_ast; Flags.make_silent true; read_stdout () (** Goal display *) let hyp_next_tac sigma env (id,_,ast) = let id_s = Names.string_of_id id in let type_s = string_of_ppcmds (pr_ltype_env env ast) in [ ("clear "^id_s),("clear "^id_s^"."); ("apply "^id_s),("apply "^id_s^"."); ("exact "^id_s),("exact "^id_s^"."); ("generalize "^id_s),("generalize "^id_s^"."); ("absurd <"^id_s^">"),("absurd "^type_s^".") ] @ [ ("discriminate "^id_s),("discriminate "^id_s^"."); ("injection "^id_s),("injection "^id_s^".") ] @ [ ("rewrite "^id_s),("rewrite "^id_s^"."); ("rewrite <- "^id_s),("rewrite <- "^id_s^".") ] @ [ ("elim "^id_s), ("elim "^id_s^"."); ("inversion "^id_s), ("inversion "^id_s^"."); ("inversion clear "^id_s), ("inversion_clear "^id_s^".") ] let concl_next_tac sigma concl = let expand s = (s,s^".") in List.map expand ([ "intro"; "intros"; "intuition" ] @ [ "reflexivity"; "discriminate"; "symmetry" ] @ [ "assumption"; "omega"; "ring"; "auto"; "eauto"; "tauto"; "trivial"; "decide equality"; "simpl"; "subst"; "red"; "split"; "left"; "right" ]) let process_goal sigma g = let env = Goal.V82.env sigma g in let id = Goal.uid g in let ccl = let norm_constr = Reductionops.nf_evar sigma (Goal.V82.concl sigma g) in string_of_ppcmds (pr_goal_concl_style_env env norm_constr) in let process_hyp h_env d acc = let d = Term.map_named_declaration (Reductionops.nf_evar sigma) d in (string_of_ppcmds (pr_var_decl h_env d)) :: acc in let hyps = List.rev (Environ.fold_named_context process_hyp env ~init: []) in { Interface.goal_hyp = hyps; Interface.goal_ccl = ccl; Interface.goal_id = id; } let goals () = try let pfts = Proof_global.give_me_the_proof () in let (goals, zipper, sigma) = Proof.proof pfts in let fg = List.map (process_goal sigma) goals in let map_zip (lg, rg) = let lg = List.map (process_goal sigma) lg in let rg = List.map (process_goal sigma) rg in (lg, rg) in let bg = List.map map_zip zipper in Some { Interface.fg_goals = fg; Interface.bg_goals = bg; } with Proof_global.NoCurrentProof -> None let evars () = try let pfts = Proof_global.give_me_the_proof () in let { Evd.it = all_goals ; sigma = sigma } = Proof.V82.subgoals pfts in let exl = Evarutil.non_instantiated sigma in let map_evar ev = { Interface.evar_info = string_of_ppcmds (pr_evar ev); } in let el = List.map map_evar exl in Some el with Proof_global.NoCurrentProof -> None let hints () = try let pfts = Proof_global.give_me_the_proof () in let { Evd.it = all_goals ; sigma = sigma } = Proof.V82.subgoals pfts in match all_goals with | [] -> None | g :: _ -> let env = Goal.V82.env sigma g in let hint_goal = concl_next_tac sigma g in let get_hint_hyp env d accu = hyp_next_tac sigma env d :: accu in let hint_hyps = List.rev (Environ.fold_named_context get_hint_hyp env ~init: []) in Some (hint_hyps, hint_goal) with Proof_global.NoCurrentProof -> None (** Other API calls *) let inloadpath dir = Library.is_in_load_paths (System.physical_path_of_string dir) let status () = (** We remove the initial part of the current [dir_path] (usually Top in an interactive session, cf "coqtop -top"), and display the other parts (opened sections and modules) *) let path = let l = Names.repr_dirpath (Lib.cwd ()) in List.rev_map Names.string_of_id l in let proof = try Some (Names.string_of_id (Proof_global.get_current_proof_name ())) with Proof_global.NoCurrentProof -> None in let allproofs = let l = Proof_global.get_all_proof_names () in List.map Names.string_of_id l in { Interface.status_path = path; Interface.status_proofname = proof; Interface.status_allproofs = allproofs; Interface.status_statenum = Lib.current_command_label (); Interface.status_proofnum = Pfedit.current_proof_depth (); } (** This should be elsewhere... *) let search flags = let env = Global.env () in let rec extract_flags name tpe subtpe mods blacklist = function | [] -> (name, tpe, subtpe, mods, blacklist) | (Interface.Name_Pattern s, b) :: l -> let regexp = try Str.regexp s with e when Errors.noncritical e -> Util.error ("Invalid regexp: " ^ s) in extract_flags ((regexp, b) :: name) tpe subtpe mods blacklist l | (Interface.Type_Pattern s, b) :: l -> let constr = Pcoq.parse_string Pcoq.Constr.lconstr_pattern s in let (_, pat) = Constrintern.intern_constr_pattern Evd.empty env constr in extract_flags name ((pat, b) :: tpe) subtpe mods blacklist l | (Interface.SubType_Pattern s, b) :: l -> let constr = Pcoq.parse_string Pcoq.Constr.lconstr_pattern s in let (_, pat) = Constrintern.intern_constr_pattern Evd.empty env constr in extract_flags name tpe ((pat, b) :: subtpe) mods blacklist l | (Interface.In_Module m, b) :: l -> let path = String.concat "." m in let m = Pcoq.parse_string Pcoq.Constr.global path in let (_, qid) = Libnames.qualid_of_reference m in let id = try Nametab.full_name_module qid with Not_found -> Util.error ("Module " ^ path ^ " not found.") in extract_flags name tpe subtpe ((id, b) :: mods) blacklist l | (Interface.Include_Blacklist, b) :: l -> extract_flags name tpe subtpe mods b l in let (name, tpe, subtpe, mods, blacklist) = extract_flags [] [] [] [] false flags in let filter_function ref env constr = let id = Names.string_of_id (Nametab.basename_of_global ref) in let path = Libnames.dirpath (Nametab.path_of_global ref) in let toggle x b = if x then b else not b in let match_name (regexp, flag) = toggle (Str.string_match regexp id 0) flag in let match_type (pat, flag) = toggle (Matching.is_matching pat constr) flag in let match_subtype (pat, flag) = toggle (Matching.is_matching_appsubterm ~closed:false pat constr) flag in let match_module (mdl, flag) = toggle (Libnames.is_dirpath_prefix_of mdl path) flag in let in_blacklist = blacklist || (Search.filter_blacklist ref env constr) in List.for_all match_name name && List.for_all match_type tpe && List.for_all match_subtype subtpe && List.for_all match_module mods && in_blacklist in let ans = ref [] in let print_function ref env constr = let fullpath = repr_dirpath (Nametab.dirpath_of_global ref) in let qualid = Nametab.shortest_qualid_of_global Idset.empty ref in let (shortpath, basename) = Libnames.repr_qualid qualid in let shortpath = repr_dirpath shortpath in (* [shortpath] is a suffix of [fullpath] and we're looking for the missing prefix *) let rec prefix full short accu = match full, short with | _, [] -> let full = List.rev_map string_of_id full in (full, accu) | _ :: full, m :: short -> prefix full short (string_of_id m :: accu) | _ -> assert false in let (prefix, qualid) = prefix fullpath shortpath [string_of_id basename] in let answer = { Interface.coq_object_prefix = prefix; Interface.coq_object_qualid = qualid; Interface.coq_object_object = string_of_ppcmds (pr_lconstr_env env constr); } in ans := answer :: !ans; in let () = Search.gen_filtered_search filter_function print_function in !ans let get_options () = let table = Goptions.get_tables () in let fold key state accu = (key, state) :: accu in Goptions.OptionMap.fold fold table [] let set_options options = let iter (name, value) = match value with | BoolValue b -> Goptions.set_bool_option_value name b | IntValue i -> Goptions.set_int_option_value name i | StringValue s -> Goptions.set_string_option_value name s in List.iter iter options let about () = { Interface.coqtop_version = Coq_config.version; Interface.protocol_version = Ide_intf.protocol_version; Interface.release_date = Coq_config.date; Interface.compile_date = Coq_config.compile_date; } (** Grouping all call handlers together + error handling *) exception Quit let eval_call c = let rec handle_exn e = catch_break := false; let pr_exn e = (read_stdout ())^("\n"^(string_of_ppcmds (Errors.print e))) in match e with | Quit -> (* Here we do send an acknowledgement message to prove everything went OK. *) let dummy = Interface.Good () in let xml_answer = Ide_intf.of_answer Ide_intf.quit dummy in let () = Xml_utils.print_xml !orig_stdout xml_answer in let () = flush !orig_stdout in let () = pr_debug "Exiting gracefully." in exit 0 | Vernacexpr.Drop -> None, "Drop is not allowed by coqide!" | Vernacexpr.Quit -> None, "Quit is not allowed by coqide!" | Vernac.DuringCommandInterp (_,inner) -> handle_exn inner | Error_in_file (_,_,inner) -> None, pr_exn inner | Loc.Exc_located (loc, inner) when loc = dummy_loc -> None, pr_exn inner | Loc.Exc_located (loc, inner) -> Some (Util.unloc loc), pr_exn inner | e -> None, pr_exn e in let interruptible f x = catch_break := true; Util.check_for_interrupt (); let r = f x in catch_break := false; r in let handler = { Ide_intf.interp = interruptible interp; Ide_intf.rewind = interruptible Backtrack.back; Ide_intf.goals = interruptible goals; Ide_intf.evars = interruptible evars; Ide_intf.hints = interruptible hints; Ide_intf.status = interruptible status; Ide_intf.search = interruptible search; Ide_intf.inloadpath = interruptible inloadpath; Ide_intf.get_options = interruptible get_options; Ide_intf.set_options = interruptible set_options; Ide_intf.mkcases = interruptible Vernacentries.make_cases; Ide_intf.quit = (fun () -> raise Quit); Ide_intf.about = interruptible about; Ide_intf.handle_exn = handle_exn; } in (* If the messages of last command are still there, we remove them *) ignore (read_stdout ()); Ide_intf.abstract_eval_call handler c (** The main loop *) (** Exceptions during eval_call should be converted into [Interface.Fail] messages by [handle_exn] above. Otherwise, we die badly, after having tried to send a last message to the ide: trying to recover from errors with the current protocol would most probably bring desynchronisation between coqtop and ide. With marshalling, reading an answer to a different request could hang the ide... *) let fail err = Ide_intf.of_value (fun _ -> assert false) (Interface.Fail (None, err)) let loop () = let p = Xml_parser.make () in let () = Xml_parser.check_eof p false in init_signal_handler (); catch_break := false; (* We'll handle goal fetching and display in our own way *) Vernacentries.enable_goal_printing := false; Vernacentries.qed_display_script := false; try while true do let xml_answer = try let xml_query = Xml_parser.parse p (Xml_parser.SChannel stdin) in let q = Ide_intf.to_call xml_query in let () = pr_debug ("<-- " ^ Ide_intf.pr_call q) in let r = eval_call q in let () = pr_debug ("--> " ^ Ide_intf.pr_full_value q r) in Ide_intf.of_answer q r with | Xml_parser.Error (Xml_parser.Empty, _) -> pr_debug ("End of input, exiting"); exit 0 | Xml_parser.Error (err, loc) -> let msg = "Syntax error in query: " ^ Xml_parser.error_msg err in fail msg | Ide_intf.Marshal_error -> fail "Incorrect query." in Xml_utils.print_xml !orig_stdout xml_answer; flush !orig_stdout done with any -> let msg = Printexc.to_string any in let r = "Fatal exception in coqtop:\n" ^ msg in pr_debug ("==> " ^ r); (try Xml_utils.print_xml !orig_stdout (fail r); flush !orig_stdout with any -> ()); exit 1 coq-8.4pl2/toplevel/doc.tex0000640000175000001440000000036607044425616014765 0ustar notinusers \newpage \section*{The Coq toplevel} \ocwsection \label{toplevel} This chapter describes the highest modules of the \Coq\ system. They are organized as follows: \bigskip \begin{center}\epsfig{file=toplevel.dep.ps,width=\linewidth}\end{center} coq-8.4pl2/toplevel/backtrack.mli0000640000175000001440000000725112010532755016116 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit (** Is this history stack active (i.e. nonempty) ? The stack is currently inactive when compiling files (coqc). *) val is_active : unit -> bool (** The [Invalid] exception is raised when one of the following function tries to empty the history stack, or reach an unknown states, etc. The stack is preserved in these cases. *) exception Invalid (** Nota Bene: it is critical for the following functions that proofs are nested in a regular way (i.e. no more Resume or Suspend commands as earlier). *) (** Backtracking by a certain number of (non-state-preserving) commands. This is used by Coqide. It may actually undo more commands than asked : for instance instead of jumping back in the middle of a finished proof, we jump back before this proof. The number of extra backtracked command is returned at the end. *) val back : int -> int (** Backtracking to a certain state number, and reset proofs accordingly. We may end on some earlier state if needed to avoid re-opening proofs. Return the state number on which we finally end. *) val backto : int -> int (** Old [Backtrack] code with corresponding update of the history stack. [Backtrack] is now deprecated (in favor of [BackTo]) but is kept for compatibility with ProofGeneral. It's completely up to ProofGeneral to decide where to go and how to adapt proofs. Note that the choices of ProofGeneral are currently not always perfect (for instance when backtracking an Undo). *) val backtrack : int -> int -> int -> unit (** [reset_initial] resets the system and clears the command history stack, only pushing back the initial entry. It should be equivalent to [backto Lib.first_command_label], but sligthly more efficient. *) val reset_initial : unit -> unit (** Reset to the last known state just before defining [id] *) val reset_name : Names.identifier Util.located -> unit (** When a proof is ended (via either Qed/Admitted/Restart/Abort), old proof steps should be marked differently to avoid jumping back to them: - either this proof isn't there anymore in the proof engine - either a proof with the same name is there, but it's a more recent attempt after a Restart/Abort, we shouldn't mix the two. We also mark as unreachable the proof steps cancelled via a Undo. *) val mark_unreachable : ?after:int -> Names.identifier list -> unit (** Parse the history stack for printing the script of a proof *) val get_script : Names.identifier -> (Vernacexpr.vernac_expr * int) list (** For debug purpose, a dump of the history *) type info = { label : int; nproofs : int; prfname : Names.identifier option; prfdepth : int; ngoals : int; cmd : Vernacexpr.vernac_expr; mutable reachable : bool; } val dump_history : unit -> info list coq-8.4pl2/toplevel/auto_ind_decl.mli0000640000175000001440000000307112010532755016756 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr array (** {6 Build equivalence between boolean equality and Leibniz equality } *) val lb_scheme_kind : mutual scheme_kind val make_lb_scheme : mutual_inductive -> constr array val bl_scheme_kind : mutual scheme_kind val make_bl_scheme : mutual_inductive -> constr array (** {6 Build decidability of equality } *) val eq_dec_scheme_kind : mutual scheme_kind val make_eq_decidability : mutual_inductive -> constr array coq-8.4pl2/toplevel/ide_slave.mli0000640000175000001440000000163112010532755016120 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val loop : unit -> unit coq-8.4pl2/toplevel/mltop.mli0000640000175000001440000000454512010532755015327 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit; use_file : string -> unit; add_dir : string -> unit; ml_loop : unit -> unit } (** Sets and initializes a toplevel (if any) *) val set_top : toplevel -> unit (** Are we in a native version of Coq? *) val is_native : bool (** Removes the toplevel (if any) *) val remove : unit -> unit (** Tests if an Ocaml toplevel runs under Coq *) val is_ocaml_top : unit -> bool (** Tests if we can load ML files *) val has_dynlink : bool (** Starts the Ocaml toplevel loop *) val ocaml_toploop : unit -> unit (** Dynamic loading of .cmo *) val dir_ml_load : string -> unit (** Dynamic interpretation of .ml *) val dir_ml_use : string -> unit (** Adds a path to the ML paths *) val add_ml_dir : string -> unit val add_rec_ml_dir : string -> unit (** Adds a path to the Coq and ML paths *) val add_path : unix_path:string -> coq_root:Names.dir_path -> unit val add_rec_path : unix_path:string -> coq_root:Names.dir_path -> unit (** List of modules linked to the toplevel *) val add_known_module : string -> unit val module_is_known : string -> bool val load_ml_object : string -> string -> unit (** Declare a plugin and its initialization function. A plugin is just an ML module with an initialization function. Adding a known plugin implies adding it as a known ML module. The initialization function is granted to be called after Coq is fully bootstrapped, even if the plugin is statically linked with the toplevel *) val add_known_plugin : (unit -> unit) -> string -> unit (** Calls all initialization functions in a non-specified order *) val init_known_plugins : unit -> unit val declare_ml_modules : Vernacexpr.locality_flag -> string list -> unit val print_ml_path : unit -> unit val print_ml_modules : unit -> unit coq-8.4pl2/toplevel/search.ml0000640000175000001440000001726012121620060015254 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* head_const d | LetIn (_,_,_,d) -> head_const d | App (f,_) -> head_const f | Cast (d,_,_) -> head_const d | _ -> c (* General search, restricted to head constant if [only_head] *) let gen_crible refopt (fn : global_reference -> env -> constr -> unit) = let env = Global.env () in let crible_rec (sp,kn) lobj = match object_tag lobj with | "VARIABLE" -> (try let (id,_,typ) = Global.lookup_named (basename sp) in if refopt = None || head_const typ = constr_of_global (Option.get refopt) then fn (VarRef id) env typ with Not_found -> (* we are in a section *) ()) | "CONSTANT" -> let cst = Global.constant_of_delta_kn kn in let typ = Typeops.type_of_constant env cst in if refopt = None || head_const typ = constr_of_global (Option.get refopt) then fn (ConstRef cst) env typ | "INDUCTIVE" -> let mind = Global.mind_of_delta_kn kn in let mib = Global.lookup_mind mind in (match refopt with | Some (IndRef ((kn',tyi) as ind)) when eq_mind mind kn' -> print_constructors ind fn env (Array.length (mib.mind_packets.(tyi).mind_user_lc)) | Some _ -> () | _ -> Array.iteri (fun i mip -> print_constructors (mind,i) fn env (Array.length mip.mind_user_lc)) mib.mind_packets) | _ -> () in try Declaremods.iter_all_segments crible_rec with Not_found -> () let crible ref = gen_crible (Some ref) (* Fine Search. By Yves Bertot. *) exception No_full_path let rec head c = let c = strip_outer_cast c in match kind_of_term c with | Prod (_,_,c) -> head c | LetIn (_,_,_,c) -> head c | _ -> c let xor a b = (a or b) & (not (a & b)) let plain_display ref a c = let pc = pr_lconstr_env a c in let pr = pr_global ref in msg (hov 2 (pr ++ str":" ++ spc () ++ pc) ++ fnl ()) let filter_by_module (module_list:dir_path list) (accept:bool) (ref:global_reference) _ _ = try let sp = path_of_global ref in let sl = dirpath sp in let rec filter_aux = function | m :: tl -> (not (is_dirpath_prefix_of m sl)) && (filter_aux tl) | [] -> true in xor accept (filter_aux module_list) with No_full_path -> false let ref_eq = Libnames.encode_mind Coqlib.logic_module (id_of_string "eq"), 0 let c_eq = mkInd ref_eq let gref_eq = IndRef ref_eq let mk_rewrite_pattern1 eq pattern = PApp (PRef eq, [| PMeta None; pattern; PMeta None |]) let mk_rewrite_pattern2 eq pattern = PApp (PRef eq, [| PMeta None; PMeta None; pattern |]) let pattern_filter pat _ a c = try try is_matching pat (head c) with e when Errors.noncritical e -> is_matching pat (head (Typing.type_of (Global.env()) Evd.empty c)) with UserError _ -> false let filtered_search filter_function display_function ref = crible ref (fun s a c -> if filter_function s a c then display_function s a c) let rec id_from_pattern = function | PRef gr -> gr (* should be appear as a PRef (VarRef sp) !! | PVar id -> Nametab.locate (make_qualid [] (string_of_id id)) *) | PApp (p,_) -> id_from_pattern p | _ -> error "The pattern is not simple enough." let raw_pattern_search extra_filter display_function pat = let name = id_from_pattern pat in filtered_search (fun s a c -> (pattern_filter pat s a c) & extra_filter s a c) display_function name let raw_search_rewrite extra_filter display_function pattern = filtered_search (fun s a c -> ((pattern_filter (mk_rewrite_pattern1 gref_eq pattern) s a c) || (pattern_filter (mk_rewrite_pattern2 gref_eq pattern) s a c)) && extra_filter s a c) display_function gref_eq let raw_search_by_head extra_filter display_function pattern = Util.todo "raw_search_by_head" let name_of_reference ref = string_of_id (basename_of_global ref) let full_name_of_reference ref = let (dir,id) = repr_path (path_of_global ref) in string_of_dirpath dir ^ "." ^ string_of_id id (* * functions to use the new Libtypes facility *) let raw_search search_function extra_filter display_function pat = let env = Global.env() in List.iter (fun (gr,_,_) -> let typ = Global.type_of_global gr in if extra_filter gr env typ then display_function gr env typ ) (search_function pat) let text_pattern_search extra_filter = raw_search Libtypes.search_concl extra_filter plain_display let text_search_rewrite extra_filter = raw_search (Libtypes.search_eq_concl c_eq) extra_filter plain_display let text_search_by_head extra_filter = raw_search Libtypes.search_head_concl extra_filter plain_display let filter_by_module_from_list = function | [], _ -> (fun _ _ _ -> true) | l, outside -> filter_by_module l (not outside) let filter_blacklist gr _ _ = let name = full_name_of_reference gr in let l = SearchBlacklist.elements () in List.for_all (fun str -> not (string_string_contains ~where:name ~what:str)) l let (&&&&&) f g x y z = f x y z && g x y z let search_by_head pat inout = text_search_by_head (filter_by_module_from_list inout &&&&& filter_blacklist) pat let search_rewrite pat inout = text_search_rewrite (filter_by_module_from_list inout &&&&& filter_blacklist) pat let search_pattern pat inout = text_pattern_search (filter_by_module_from_list inout &&&&& filter_blacklist) pat let gen_filtered_search filter_function display_function = gen_crible None (fun s a c -> if filter_function s a c then display_function s a c) (** SearchAbout *) type glob_search_about_item = | GlobSearchSubPattern of constr_pattern | GlobSearchString of string let search_about_item (itemref,typ) = function | GlobSearchSubPattern pat -> is_matching_appsubterm ~closed:false pat typ | GlobSearchString s -> string_string_contains ~where:(name_of_reference itemref) ~what:s let raw_search_about filter_modules display_function l = let filter ref' env typ = filter_modules ref' env typ && List.for_all (fun (b,i) -> b = search_about_item (ref',typ) i) l && filter_blacklist ref' () () in gen_filtered_search filter display_function let search_about ref inout = raw_search_about (filter_by_module_from_list inout) plain_display ref coq-8.4pl2/toplevel/himsg.ml0000640000175000001440000011762112122674544015141 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* pr_id id | Anonymous, _, _ -> str "<>" with Not_found -> str "UNBOUND_REL_" ++ int i let explain_unbound_rel env n = let pe = pr_ne_context_of (str "In environment") env in str "Unbound reference: " ++ pe ++ str "The reference " ++ int n ++ str " is free." let explain_unbound_var env v = let var = pr_id v in str "No such section variable or assumption: " ++ var ++ str "." let explain_not_type env sigma j = let j = j_nf_evar sigma j in let pe = pr_ne_context_of (str "In environment") env in let pc,pt = pr_ljudge_env env j in pe ++ str "The term" ++ brk(1,1) ++ pc ++ spc () ++ str "has type" ++ spc () ++ pt ++ spc () ++ str "which should be Set, Prop or Type." let explain_bad_assumption env j = let pe = pr_ne_context_of (str "In environment") env in let pc,pt = pr_ljudge_env env j in pe ++ str "Cannot declare a variable or hypothesis over the term" ++ brk(1,1) ++ pc ++ spc () ++ str "of type" ++ spc () ++ pt ++ spc () ++ str "because this term is not a type." let explain_reference_variables c = let pc = pr_lconstr c in str "The constant" ++ spc () ++ pc ++ spc () ++ str "refers to variables which are not in the context." let rec pr_disjunction pr = function | [a] -> pr a | [a;b] -> pr a ++ str " or" ++ spc () ++ pr b | a::l -> pr a ++ str "," ++ spc () ++ pr_disjunction pr l | [] -> assert false let explain_elim_arity env ind sorts c pj okinds = let env = make_all_name_different env in let pi = pr_inductive env ind in let pc = pr_lconstr_env env c in let msg = match okinds with | Some(kp,ki,explanation) -> let pki = pr_sort_family ki in let pkp = pr_sort_family kp in let explanation = match explanation with | NonInformativeToInformative -> "proofs can be eliminated only to build proofs" | StrongEliminationOnNonSmallType -> "strong elimination on non-small inductive types leads to paradoxes" | WrongArity -> "wrong arity" in let ppar = pr_disjunction (fun s -> quote (pr_sort_family s)) sorts in let ppt = pr_lconstr_env env ((strip_prod_assum pj.uj_type)) in hov 0 (str "the return type has sort" ++ spc () ++ ppt ++ spc () ++ str "while it" ++ spc () ++ str "should be " ++ ppar ++ str ".") ++ fnl () ++ hov 0 (str "Elimination of an inductive object of sort " ++ pki ++ brk(1,0) ++ str "is not allowed on a predicate in sort " ++ pkp ++ fnl () ++ str "because" ++ spc () ++ str explanation ++ str ".") | None -> str "ill-formed elimination predicate." in hov 0 ( str "Incorrect elimination of" ++ spc () ++ pc ++ spc () ++ str "in the inductive type" ++ spc () ++ quote pi ++ str ":") ++ fnl () ++ msg let explain_case_not_inductive env sigma cj = let cj = j_nf_evar sigma cj in let env = make_all_name_different env in let pc = pr_lconstr_env env cj.uj_val in let pct = pr_lconstr_env env cj.uj_type in match kind_of_term cj.uj_type with | Evar _ -> str "Cannot infer a type for this expression." | _ -> str "The term" ++ brk(1,1) ++ pc ++ spc () ++ str "has type" ++ brk(1,1) ++ pct ++ spc () ++ str "which is not a (co-)inductive type." let explain_number_branches env sigma cj expn = let cj = j_nf_evar sigma cj in let env = make_all_name_different env in let pc = pr_lconstr_env env cj.uj_val in let pct = pr_lconstr_env env cj.uj_type in str "Matching on term" ++ brk(1,1) ++ pc ++ spc () ++ str "of type" ++ brk(1,1) ++ pct ++ spc () ++ str "expects " ++ int expn ++ str " branches." let explain_ill_formed_branch env sigma c ci actty expty = let simp t = Reduction.nf_betaiota (nf_evar sigma t) in let c = nf_evar sigma c in let env = make_all_name_different env in let pc = pr_lconstr_env env c in let pa = pr_lconstr_env env (simp actty) in let pe = pr_lconstr_env env (simp expty) in strbrk "In pattern-matching on term" ++ brk(1,1) ++ pc ++ spc () ++ strbrk "the branch for constructor" ++ spc () ++ quote (pr_constructor env ci) ++ spc () ++ str "has type" ++ brk(1,1) ++ pa ++ spc () ++ str "which should be" ++ brk(1,1) ++ pe ++ str "." let explain_generalization env (name,var) j = let pe = pr_ne_context_of (str "In environment") env in let pv = pr_ltype_env env var in let (pc,pt) = pr_ljudge_env (push_rel_assum (name,var) env) j in pe ++ str "Cannot generalize" ++ brk(1,1) ++ pv ++ spc () ++ str "over" ++ brk(1,1) ++ pc ++ str "," ++ spc () ++ str "it has type" ++ spc () ++ pt ++ spc () ++ str "which should be Set, Prop or Type." let explain_actual_type env sigma j pt = let j = j_nf_betaiotaevar sigma j in let pt = Reductionops.nf_betaiota sigma pt in let pe = pr_ne_context_of (str "In environment") env in let (pc,pct) = pr_ljudge_env env j in let pt = pr_lconstr_env env pt in pe ++ str "The term" ++ brk(1,1) ++ pc ++ spc () ++ str "has type" ++ brk(1,1) ++ pct ++ brk(1,1) ++ str "while it is expected to have type" ++ brk(1,1) ++ pt ++ str "." let explain_cant_apply_bad_type env sigma (n,exptyp,actualtyp) rator randl = let randl = jv_nf_betaiotaevar sigma randl in let exptyp = nf_evar sigma exptyp in let actualtyp = Reductionops.nf_betaiota sigma actualtyp in let rator = j_nf_evar sigma rator in let env = make_all_name_different env in let nargs = Array.length randl in (* let pe = pr_ne_context_of (str "in environment") env in*) let pr,prt = pr_ljudge_env env rator in let term_string1 = str (plural nargs "term") in let term_string2 = if nargs>1 then str "The " ++ nth n ++ str " term" else str "This term" in let appl = prvect_with_sep pr_fnl (fun c -> let pc,pct = pr_ljudge_env env c in hov 2 (pc ++ spc () ++ str ": " ++ pct)) randl in str "Illegal application (Type Error): " ++ (* pe ++ *) fnl () ++ str "The term" ++ brk(1,1) ++ pr ++ spc () ++ str "of type" ++ brk(1,1) ++ prt ++ spc () ++ str "cannot be applied to the " ++ term_string1 ++ fnl () ++ str " " ++ v 0 appl ++ fnl () ++ term_string2 ++ str " has type" ++ brk(1,1) ++ pr_lconstr_env env actualtyp ++ spc () ++ str "which should be coercible to" ++ brk(1,1) ++ pr_lconstr_env env exptyp ++ str "." let explain_cant_apply_not_functional env sigma rator randl = let randl = jv_nf_evar sigma randl in let rator = j_nf_evar sigma rator in let env = make_all_name_different env in let nargs = Array.length randl in (* let pe = pr_ne_context_of (str "in environment") env in*) let pr = pr_lconstr_env env rator.uj_val in let prt = pr_lconstr_env env rator.uj_type in let appl = prvect_with_sep pr_fnl (fun c -> let pc = pr_lconstr_env env c.uj_val in let pct = pr_lconstr_env env c.uj_type in hov 2 (pc ++ spc () ++ str ": " ++ pct)) randl in str "Illegal application (Non-functional construction): " ++ (* pe ++ *) fnl () ++ str "The expression" ++ brk(1,1) ++ pr ++ spc () ++ str "of type" ++ brk(1,1) ++ prt ++ spc () ++ str "cannot be applied to the " ++ str (plural nargs "term") ++ fnl () ++ str " " ++ v 0 appl let explain_unexpected_type env sigma actual_type expected_type = let actual_type = nf_evar sigma actual_type in let expected_type = nf_evar sigma expected_type in let pract = pr_lconstr_env env actual_type in let prexp = pr_lconstr_env env expected_type in str "Found type" ++ spc () ++ pract ++ spc () ++ str "where" ++ spc () ++ prexp ++ str " was expected." let explain_not_product env sigma c = let c = nf_evar sigma c in let pr = pr_lconstr_env env c in str "The type of this term is a product" ++ spc () ++ str "while it is expected to be" ++ (if is_Type c then str " a sort" else (brk(1,1) ++ pr)) ++ str "." (* TODO: use the names *) (* (co)fixpoints *) let explain_ill_formed_rec_body env err names i fixenv vdefj = let prt_name i = match names.(i) with Name id -> str "Recursive definition of " ++ pr_id id | Anonymous -> str "The " ++ nth i ++ str " definition" in let st = match err with (* Fixpoint guard errors *) | NotEnoughAbstractionInFixBody -> str "Not enough abstractions in the definition" | RecursionNotOnInductiveType c -> str "Recursive definition on" ++ spc () ++ pr_lconstr_env env c ++ spc () ++ str "which should be an inductive type" | RecursionOnIllegalTerm(j,(arg_env, arg),le,lt) -> let arg_env = make_all_name_different arg_env in let called = match names.(j) with Name id -> pr_id id | Anonymous -> str "the " ++ nth i ++ str " definition" in let pr_db x = quote (pr_db env x) in let vars = match (lt,le) with ([],[]) -> assert false | ([],[x]) -> str "a subterm of " ++ pr_db x | ([],_) -> str "a subterm of the following variables: " ++ prlist_with_sep pr_spc pr_db le | ([x],_) -> pr_db x | _ -> str "one of the following variables: " ++ prlist_with_sep pr_spc pr_db lt in str "Recursive call to " ++ called ++ spc () ++ strbrk "has principal argument equal to" ++ spc () ++ pr_lconstr_env arg_env arg ++ strbrk " instead of " ++ vars | NotEnoughArgumentsForFixCall j -> let called = match names.(j) with Name id -> pr_id id | Anonymous -> str "the " ++ nth i ++ str " definition" in str "Recursive call to " ++ called ++ str " has not enough arguments" (* CoFixpoint guard errors *) | CodomainNotInductiveType c -> str "The codomain is" ++ spc () ++ pr_lconstr_env env c ++ spc () ++ str "which should be a coinductive type" | NestedRecursiveOccurrences -> str "Nested recursive occurrences" | UnguardedRecursiveCall c -> str "Unguarded recursive call in" ++ spc () ++ pr_lconstr_env env c | RecCallInTypeOfAbstraction c -> str "Recursive call forbidden in the domain of an abstraction:" ++ spc () ++ pr_lconstr_env env c | RecCallInNonRecArgOfConstructor c -> str "Recursive call on a non-recursive argument of constructor" ++ spc () ++ pr_lconstr_env env c | RecCallInTypeOfDef c -> str "Recursive call forbidden in the type of a recursive definition" ++ spc () ++ pr_lconstr_env env c | RecCallInCaseFun c -> str "Invalid recursive call in a branch of" ++ spc () ++ pr_lconstr_env env c | RecCallInCaseArg c -> str "Invalid recursive call in the argument of \"match\" in" ++ spc () ++ pr_lconstr_env env c | RecCallInCasePred c -> str "Invalid recursive call in the \"return\" clause of \"match\" in" ++ spc () ++ pr_lconstr_env env c | NotGuardedForm c -> str "Sub-expression " ++ pr_lconstr_env env c ++ strbrk " not in guarded form (should be a constructor," ++ strbrk " an abstraction, a match, a cofix or a recursive call)" in prt_name i ++ str " is ill-formed." ++ fnl () ++ pr_ne_context_of (str "In environment") env ++ st ++ str "." ++ fnl () ++ (try (* May fail with unresolved globals. *) let fixenv = make_all_name_different fixenv in let pvd = pr_lconstr_env fixenv vdefj.(i).uj_val in str"Recursive definition is:" ++ spc () ++ pvd ++ str "." with e when Errors.noncritical e -> mt ()) let explain_ill_typed_rec_body env sigma i names vdefj vargs = let vdefj = jv_nf_evar sigma vdefj in let vargs = Array.map (nf_evar sigma) vargs in let env = make_all_name_different env in let pvd,pvdt = pr_ljudge_env env (vdefj.(i)) in let pv = pr_lconstr_env env vargs.(i) in str "The " ++ (if Array.length vdefj = 1 then mt () else nth (i+1) ++ spc ()) ++ str "recursive definition" ++ spc () ++ pvd ++ spc () ++ str "has type" ++ spc () ++ pvdt ++ spc () ++ str "while it should be" ++ spc () ++ pv ++ str "." let explain_cant_find_case_type env sigma c = let c = nf_evar sigma c in let env = make_all_name_different env in let pe = pr_lconstr_env env c in str "Cannot infer type of pattern-matching on" ++ ws 1 ++ pe ++ str "." let explain_occur_check env sigma ev rhs = let rhs = nf_evar sigma rhs in let env = make_all_name_different env in let id = Evd.string_of_existential ev in let pt = pr_lconstr_env env rhs in str "Cannot define " ++ str id ++ str " with term" ++ brk(1,1) ++ pt ++ spc () ++ str "that would depend on itself." let pr_ne_context_of header footer env = if Environ.rel_context env = empty_rel_context & Environ.named_context env = empty_named_context then footer else pr_ne_context_of header env let explain_hole_kind env evi = function | QuestionMark _ -> str "this placeholder" | CasesType -> str "the type of this pattern-matching problem" | BinderType (Name id) -> str "the type of " ++ Nameops.pr_id id | BinderType Anonymous -> str "the type of this anonymous binder" | ImplicitArg (c,(n,ido),b) -> let id = Option.get ido in str "the implicit parameter " ++ pr_id id ++ spc () ++ str "of" ++ spc () ++ Nametab.pr_global_env Idset.empty c | InternalHole -> str "an internal placeholder" ++ Option.cata (fun evi -> let env = Evd.evar_env evi in str " of type " ++ pr_lconstr_env env evi.evar_concl ++ pr_ne_context_of (str " in environment:"++ fnl ()) (mt ()) env) (mt ()) evi | TomatchTypeParameter (tyi,n) -> str "the " ++ nth n ++ str " argument of the inductive type (" ++ pr_inductive env tyi ++ str ") of this term" | GoalEvar -> str "an existential variable" | ImpossibleCase -> str "the type of an impossible pattern-matching clause" | MatchingVar _ -> assert false let explain_not_clean env sigma ev t k = let t = nf_evar sigma t in let env = make_all_name_different env in let id = Evd.string_of_existential ev in let var = pr_lconstr_env env t in str "Tried to instantiate " ++ explain_hole_kind env None k ++ str " (" ++ str id ++ str ")" ++ spc () ++ str "with a term using variable " ++ var ++ spc () ++ str "which is not in its scope." let explain_unsolvability = function | None -> mt() | Some (SeveralInstancesFound n) -> strbrk " (several distinct possible instances found)" let explain_typeclass_resolution env evi k = match Typeclasses.class_of_constr evi.evar_concl with | Some c -> let env = Evd.evar_env evi in fnl () ++ str "Could not find an instance for " ++ pr_lconstr_env env evi.evar_concl ++ pr_ne_context_of (str " in environment:"++ fnl ()) (str ".") env | _ -> mt() let explain_unsolvable_implicit env evi k explain = str "Cannot infer " ++ explain_hole_kind env (Some evi) k ++ explain_unsolvability explain ++ str "." ++ explain_typeclass_resolution env evi k let explain_var_not_found env id = str "The variable" ++ spc () ++ pr_id id ++ spc () ++ str "was not found" ++ spc () ++ str "in the current" ++ spc () ++ str "environment" ++ str "." let explain_wrong_case_info env ind ci = let pi = pr_inductive (Global.env()) ind in if ci.ci_ind = ind then str "Pattern-matching expression on an object of inductive type" ++ spc () ++ pi ++ spc () ++ str "has invalid information." else let pc = pr_inductive (Global.env()) ci.ci_ind in str "A term of inductive type" ++ spc () ++ pi ++ spc () ++ str "was given to a pattern-matching expression on the inductive type" ++ spc () ++ pc ++ str "." let explain_cannot_unify env sigma m n = let m = nf_evar sigma m in let n = nf_evar sigma n in let pm = pr_lconstr_env env m in let pn = pr_lconstr_env env n in str "Impossible to unify" ++ brk(1,1) ++ pm ++ spc () ++ str "with" ++ brk(1,1) ++ pn ++ str "." let explain_cannot_unify_local env sigma m n subn = let pm = pr_lconstr_env env m in let pn = pr_lconstr_env env n in let psubn = pr_lconstr_env env subn in str "Impossible to unify" ++ brk(1,1) ++ pm ++ spc () ++ str "with" ++ brk(1,1) ++ pn ++ spc () ++ str "as" ++ brk(1,1) ++ psubn ++ str " contains local variables." let explain_refiner_cannot_generalize env ty = str "Cannot find a well-typed generalisation of the goal with type: " ++ pr_lconstr_env env ty ++ str "." let explain_no_occurrence_found env c id = str "Found no subterm matching " ++ pr_lconstr_env env c ++ str " in " ++ (match id with | Some id -> pr_id id | None -> str"the current goal") ++ str "." let explain_cannot_unify_binding_type env m n = let pm = pr_lconstr_env env m in let pn = pr_lconstr_env env n in str "This binding has type" ++ brk(1,1) ++ pm ++ spc () ++ str "which should be unifiable with" ++ brk(1,1) ++ pn ++ str "." let explain_cannot_find_well_typed_abstraction env p l = str "Abstracting over the " ++ str (plural (List.length l) "term") ++ spc () ++ hov 0 (pr_enum (pr_lconstr_env env) l) ++ spc () ++ str "leads to a term" ++ spc () ++ pr_lconstr_env env p ++ spc () ++ str "which is ill-typed." let explain_abstraction_over_meta _ m n = strbrk "Too complex unification problem: cannot find a solution for both " ++ pr_name m ++ spc () ++ str "and " ++ pr_name n ++ str "." let explain_non_linear_unification env m t = strbrk "Cannot unambiguously instantiate " ++ pr_name m ++ str ":" ++ strbrk " which would require to abstract twice on " ++ pr_lconstr_env env t ++ str "." let explain_type_error env sigma err = let env = make_all_name_different env in match err with | UnboundRel n -> explain_unbound_rel env n | UnboundVar v -> explain_unbound_var env v | NotAType j -> explain_not_type env sigma j | BadAssumption c -> explain_bad_assumption env c | ReferenceVariables id -> explain_reference_variables id | ElimArity (ind, aritylst, c, pj, okinds) -> explain_elim_arity env ind aritylst c pj okinds | CaseNotInductive cj -> explain_case_not_inductive env sigma cj | NumberBranches (cj, n) -> explain_number_branches env sigma cj n | IllFormedBranch (c, i, actty, expty) -> explain_ill_formed_branch env sigma c i actty expty | Generalization (nvar, c) -> explain_generalization env nvar c | ActualType (j, pt) -> explain_actual_type env sigma j pt | CantApplyBadType (t, rator, randl) -> explain_cant_apply_bad_type env sigma t rator randl | CantApplyNonFunctional (rator, randl) -> explain_cant_apply_not_functional env sigma rator randl | IllFormedRecBody (err, lna, i, fixenv, vdefj) -> explain_ill_formed_rec_body env err lna i fixenv vdefj | IllTypedRecBody (i, lna, vdefj, vargs) -> explain_ill_typed_rec_body env sigma i lna vdefj vargs | WrongCaseInfo (ind,ci) -> explain_wrong_case_info env ind ci let explain_pretype_error env sigma err = let env = env_nf_betaiotaevar sigma env in let env = make_all_name_different env in match err with | CantFindCaseType c -> explain_cant_find_case_type env sigma c | OccurCheck (n,c) -> explain_occur_check env sigma n c | NotClean (n,c,k) -> explain_not_clean env sigma n c k | UnsolvableImplicit (evi,k,exp) -> explain_unsolvable_implicit env evi k exp | VarNotFound id -> explain_var_not_found env id | UnexpectedType (actual,expect) -> explain_unexpected_type env sigma actual expect | NotProduct c -> explain_not_product env sigma c | CannotUnify (m,n) -> explain_cannot_unify env sigma m n | CannotUnifyLocal (m,n,sn) -> explain_cannot_unify_local env sigma m n sn | CannotGeneralize ty -> explain_refiner_cannot_generalize env ty | NoOccurrenceFound (c, id) -> explain_no_occurrence_found env c id | CannotUnifyBindingType (m,n) -> explain_cannot_unify_binding_type env m n | CannotFindWellTypedAbstraction (p,l) -> explain_cannot_find_well_typed_abstraction env p l | AbstractionOverMeta (m,n) -> explain_abstraction_over_meta env m n | NonLinearUnification (m,c) -> explain_non_linear_unification env m c | TypingError t -> explain_type_error env sigma t (* Module errors *) open Modops let explain_not_match_error = function | InductiveFieldExpected _ -> strbrk "an inductive definition is expected" | DefinitionFieldExpected -> strbrk "a definition is expected" | ModuleFieldExpected -> strbrk "a module is expected" | ModuleTypeFieldExpected -> strbrk "a module type is expected" | NotConvertibleInductiveField id | NotConvertibleConstructorField id -> str "types given to " ++ str (string_of_id id) ++ str " differ" | NotConvertibleBodyField -> str "the body of definitions differs" | NotConvertibleTypeField (env, typ1, typ2) -> str "expected type" ++ spc () ++ quote (Printer.safe_pr_lconstr_env env typ2) ++ spc () ++ str "but found type" ++ spc () ++ quote (Printer.safe_pr_lconstr_env env typ1) | NotSameConstructorNamesField -> str "constructor names differ" | NotSameInductiveNameInBlockField -> str "inductive types names differ" | FiniteInductiveFieldExpected isfinite -> str "type is expected to be " ++ str (if isfinite then "coinductive" else "inductive") | InductiveNumbersFieldExpected n -> str "number of inductive types differs" | InductiveParamsNumberField n -> str "inductive type has not the right number of parameters" | RecordFieldExpected isrecord -> str "type is expected " ++ str (if isrecord then "" else "not ") ++ str "to be a record" | RecordProjectionsExpected nal -> (if List.length nal >= 2 then str "expected projection names are " else str "expected projection name is ") ++ pr_enum (function Name id -> str (string_of_id id) | _ -> str "_") nal | NotEqualInductiveAliases -> str "Aliases to inductive types do not match" | NoTypeConstraintExpected -> strbrk "a definition whose type is constrained can only be subtype of a definition whose type is itself constrained" let explain_signature_mismatch l spec why = str "Signature components for label " ++ str (string_of_label l) ++ str " do not match:" ++ spc () ++ explain_not_match_error why ++ str "." let explain_label_already_declared l = str ("The label "^string_of_label l^" is already declared.") let explain_application_to_not_path _ = str "Application of modules is restricted to paths." let explain_not_a_functor mtb = str "Application of not a functor." let explain_incompatible_module_types mexpr1 mexpr2 = str "Incompatible module types." let explain_not_equal_module_paths mp1 mp2 = str "Non equal modules." let explain_no_such_label l = str "No such label " ++ str (string_of_label l) ++ str "." let explain_incompatible_labels l l' = str "Opening and closing labels are not the same: " ++ str (string_of_label l) ++ str " <> " ++ str (string_of_label l') ++ str "!" let explain_signature_expected mtb = str "Signature expected." let explain_no_module_to_end () = str "No open module to end." let explain_no_module_type_to_end () = str "No open module type to end." let explain_not_a_module s = quote (str s) ++ str " is not a module." let explain_not_a_module_type s = quote (str s) ++ str " is not a module type." let explain_not_a_constant l = quote (pr_label l) ++ str " is not a constant." let explain_incorrect_label_constraint l = str "Incorrect constraint for label " ++ quote (pr_label l) ++ str "." let explain_generative_module_expected l = str "The module " ++ str (string_of_label l) ++ strbrk " is not generative. Only components of generative modules can be changed using the \"with\" construct." let explain_non_empty_local_context = function | None -> str "The local context is not empty." | Some l -> str "The local context of the component " ++ str (string_of_label l) ++ str " is not empty." let explain_label_missing l s = str "The field " ++ str (string_of_label l) ++ str " is missing in " ++ str s ++ str "." let explain_module_error = function | SignatureMismatch (l,spec,err) -> explain_signature_mismatch l spec err | LabelAlreadyDeclared l -> explain_label_already_declared l | ApplicationToNotPath mexpr -> explain_application_to_not_path mexpr | NotAFunctor mtb -> explain_not_a_functor mtb | IncompatibleModuleTypes (m1,m2) -> explain_incompatible_module_types m1 m2 | NotEqualModulePaths (mp1,mp2) -> explain_not_equal_module_paths mp1 mp2 | NoSuchLabel l -> explain_no_such_label l | IncompatibleLabels (l1,l2) -> explain_incompatible_labels l1 l2 | SignatureExpected mtb -> explain_signature_expected mtb | NoModuleToEnd -> explain_no_module_to_end () | NoModuleTypeToEnd -> explain_no_module_type_to_end () | NotAModule s -> explain_not_a_module s | NotAModuleType s -> explain_not_a_module_type s | NotAConstant l -> explain_not_a_constant l | IncorrectWithConstraint l -> explain_incorrect_label_constraint l | GenerativeModuleExpected l -> explain_generative_module_expected l | NonEmptyLocalContect lopt -> explain_non_empty_local_context lopt | LabelMissing (l,s) -> explain_label_missing l s (* Module internalization errors *) (* let explain_declaration_not_path _ = str "Declaration is not a path." *) let explain_not_module_nor_modtype s = quote (str s) ++ str " is not a module or module type." let explain_incorrect_with_in_module () = str "The syntax \"with\" is not allowed for modules." let explain_incorrect_module_application () = str "Illegal application to a module type." open Modintern let explain_module_internalization_error = function | NotAModuleNorModtype s -> explain_not_module_nor_modtype s | IncorrectWithInModule -> explain_incorrect_with_in_module () | IncorrectModuleApplication -> explain_incorrect_module_application () (* Typeclass errors *) let explain_not_a_class env c = pr_constr_env env c ++ str" is not a declared type class." let explain_unbound_method env cid id = str "Unbound method name " ++ Nameops.pr_id (snd id) ++ spc () ++ str"of class" ++ spc () ++ pr_global cid ++ str "." let pr_constr_exprs exprs = hv 0 (List.fold_right (fun d pps -> ws 2 ++ Ppconstr.pr_constr_expr d ++ pps) exprs (mt ())) let explain_no_instance env (_,id) l = str "No instance found for class " ++ Nameops.pr_id id ++ spc () ++ str "applied to arguments" ++ spc () ++ prlist_with_sep pr_spc (pr_lconstr_env env) l let is_goal_evar evi = match evi.evar_source with (_, GoalEvar) -> true | _ -> false let pr_constraints printenv env evm = let l = Evd.to_list evm in assert(l <> []); let (ev, evi) = List.hd l in if List.for_all (fun (ev', evi') -> eq_named_context_val evi.evar_hyps evi'.evar_hyps) l then let pe = pr_ne_context_of (str "In environment:") (mt ()) (reset_with_named_context evi.evar_hyps env) in (if printenv then pe ++ fnl () else mt ()) ++ prlist_with_sep (fun () -> fnl ()) (fun (ev, evi) -> str(string_of_existential ev) ++ str " : " ++ pr_lconstr evi.evar_concl) l ++ fnl() ++ pr_evar_map_constraints evm else pr_evar_map None evm let explain_unsatisfiable_constraints env evd constr = let evm = Evd.undefined_evars (Evarutil.nf_evar_map_undefined evd) in (* Remove goal evars *) let undef = fold_undefined (fun ev evi evm' -> if is_goal_evar evi then Evd.remove evm' ev else evm') evm evm in match constr with | None -> str"Unable to satisfy the following constraints:" ++ fnl() ++ pr_constraints true env undef | Some (ev, k) -> explain_typeclass_resolution env (Evd.find evm ev) k ++ fnl () ++ (let remaining = Evd.remove undef ev in if Evd.has_undefined remaining then str"With the following constraints:" ++ fnl() ++ pr_constraints false env remaining else mt ()) let explain_mismatched_contexts env c i j = str"Mismatched contexts while declaring instance: " ++ brk (1,1) ++ hov 1 (str"Expected:" ++ brk (1, 1) ++ pr_rel_context env j) ++ fnl () ++ brk (1,1) ++ hov 1 (str"Found:" ++ brk (1, 1) ++ pr_constr_exprs i) let explain_typeclass_error env err = match err with | NotAClass c -> explain_not_a_class env c | UnboundMethod (cid, id) -> explain_unbound_method env cid id | NoInstance (id, l) -> explain_no_instance env id l | UnsatisfiableConstraints (evd, c) -> explain_unsatisfiable_constraints env evd c | MismatchedContextInstance (c, i, j) -> explain_mismatched_contexts env c i j (* Refiner errors *) let explain_refiner_bad_type arg ty conclty = str "Refiner was given an argument" ++ brk(1,1) ++ pr_lconstr arg ++ spc () ++ str "of type" ++ brk(1,1) ++ pr_lconstr ty ++ spc () ++ str "instead of" ++ brk(1,1) ++ pr_lconstr conclty ++ str "." let explain_refiner_unresolved_bindings l = str "Unable to find an instance for the " ++ str (plural (List.length l) "variable") ++ spc () ++ prlist_with_sep pr_comma pr_name l ++ str"." let explain_refiner_cannot_apply t harg = str "In refiner, a term of type" ++ brk(1,1) ++ pr_lconstr t ++ spc () ++ str "could not be applied to" ++ brk(1,1) ++ pr_lconstr harg ++ str "." let explain_refiner_not_well_typed c = str "The term " ++ pr_lconstr c ++ str " is not well-typed." let explain_intro_needs_product () = str "Introduction tactics needs products." let explain_does_not_occur_in c hyp = str "The term" ++ spc () ++ pr_lconstr c ++ spc () ++ str "does not occur in" ++ spc () ++ pr_id hyp ++ str "." let explain_non_linear_proof c = str "Cannot refine with term" ++ brk(1,1) ++ pr_lconstr c ++ spc () ++ str "because a metavariable has several occurrences." let explain_meta_in_type c = str "In refiner, a meta appears in the type " ++ brk(1,1) ++ pr_lconstr c ++ str " of another meta" let explain_refiner_error = function | BadType (arg,ty,conclty) -> explain_refiner_bad_type arg ty conclty | UnresolvedBindings t -> explain_refiner_unresolved_bindings t | CannotApply (t,harg) -> explain_refiner_cannot_apply t harg | NotWellTyped c -> explain_refiner_not_well_typed c | IntroNeedsProduct -> explain_intro_needs_product () | DoesNotOccurIn (c,hyp) -> explain_does_not_occur_in c hyp | NonLinearProof c -> explain_non_linear_proof c | MetaInType c -> explain_meta_in_type c (* Inductive errors *) let error_non_strictly_positive env c v = let pc = pr_lconstr_env env c in let pv = pr_lconstr_env env v in str "Non strictly positive occurrence of " ++ pv ++ str " in" ++ brk(1,1) ++ pc ++ str "." let error_ill_formed_inductive env c v = let pc = pr_lconstr_env env c in let pv = pr_lconstr_env env v in str "Not enough arguments applied to the " ++ pv ++ str " in" ++ brk(1,1) ++ pc ++ str "." let error_ill_formed_constructor env id c v nparams nargs = let pv = pr_lconstr_env env v in let atomic = (nb_prod c = 0) in str "The type of constructor" ++ brk(1,1) ++ pr_id id ++ brk(1,1) ++ str "is not valid;" ++ brk(1,1) ++ strbrk (if atomic then "it must be " else "its conclusion must be ") ++ pv ++ (* warning: because of implicit arguments it is difficult to say which parameters must be explicitly given *) (if nparams<>0 then strbrk " applied to its " ++ str (plural nparams "parameter") else mt()) ++ (if nargs<>0 then str (if nparams<>0 then " and" else " applied") ++ strbrk " to some " ++ str (plural nargs "argument") else mt()) ++ str "." let pr_ltype_using_barendregt_convention_env env c = (* Use goal_concl_style as an approximation of Barendregt's convention (?) *) quote (pr_goal_concl_style_env env c) let error_bad_ind_parameters env c n v1 v2 = let pc = pr_ltype_using_barendregt_convention_env env c in let pv1 = pr_lconstr_env env v1 in let pv2 = pr_lconstr_env env v2 in str "Last occurrence of " ++ pv2 ++ str " must have " ++ pv1 ++ str " as " ++ nth n ++ str " argument in " ++ brk(1,1) ++ pc ++ str "." let error_same_names_types id = str "The name" ++ spc () ++ pr_id id ++ spc () ++ str "is used more than once." let error_same_names_constructors id = str "The constructor name" ++ spc () ++ pr_id id ++ spc () ++ str "is used more than once." let error_same_names_overlap idl = strbrk "The following names are used both as type names and constructor " ++ str "names:" ++ spc () ++ prlist_with_sep pr_comma pr_id idl ++ str "." let error_not_an_arity id = str "The type of" ++ spc () ++ pr_id id ++ spc () ++ str "is not an arity." let error_bad_entry () = str "Bad inductive definition." let error_large_non_prop_inductive_not_in_type () = str "Large non-propositional inductive types must be in Type." (* Recursion schemes errors *) let error_not_allowed_case_analysis isrec kind i = str (if isrec then "Induction" else "Case analysis") ++ strbrk " on sort " ++ pr_sort kind ++ strbrk " is not allowed for inductive definition " ++ pr_inductive (Global.env()) i ++ str "." let error_not_mutual_in_scheme ind ind' = if ind = ind' then str "The inductive type " ++ pr_inductive (Global.env()) ind ++ str " occurs twice." else str "The inductive types " ++ pr_inductive (Global.env()) ind ++ spc () ++ str "and" ++ spc () ++ pr_inductive (Global.env()) ind' ++ spc () ++ str "are not mutually defined." (* Inductive constructions errors *) let explain_inductive_error = function | NonPos (env,c,v) -> error_non_strictly_positive env c v | NotEnoughArgs (env,c,v) -> error_ill_formed_inductive env c v | NotConstructor (env,id,c,v,n,m) -> error_ill_formed_constructor env id c v n m | NonPar (env,c,n,v1,v2) -> error_bad_ind_parameters env c n v1 v2 | SameNamesTypes id -> error_same_names_types id | SameNamesConstructors id -> error_same_names_constructors id | SameNamesOverlap idl -> error_same_names_overlap idl | NotAnArity id -> error_not_an_arity id | BadEntry -> error_bad_entry () | LargeNonPropInductiveNotInType -> error_large_non_prop_inductive_not_in_type () (* Recursion schemes errors *) let explain_recursion_scheme_error = function | NotAllowedCaseAnalysis (isrec,k,i) -> error_not_allowed_case_analysis isrec k i | NotMutualInScheme (ind,ind')-> error_not_mutual_in_scheme ind ind' (* Pattern-matching errors *) let explain_bad_pattern env cstr ty = let env = make_all_name_different env in let pt = pr_lconstr_env env ty in let pc = pr_constructor env cstr in str "Found the constructor " ++ pc ++ brk(1,1) ++ str "while matching a term of type " ++ pt ++ brk(1,1) ++ str "which is not an inductive type." let explain_bad_constructor env cstr ind = let pi = pr_inductive env ind in (* let pc = pr_constructor env cstr in*) let pt = pr_inductive env (inductive_of_constructor cstr) in str "Found a constructor of inductive type " ++ pt ++ brk(1,1) ++ str "while a constructor of " ++ pi ++ brk(1,1) ++ str "is expected." let decline_string n s = if n = 0 then "no "^s^"s" else if n = 1 then "1 "^s else (string_of_int n^" "^s^"s") let explain_wrong_numarg_constructor env cstr n = str "The constructor " ++ pr_constructor env cstr ++ str " expects " ++ str (decline_string n "argument") ++ str "." let explain_wrong_numarg_inductive env ind n = str "The inductive type " ++ pr_inductive env ind ++ str " expects " ++ str (decline_string n "argument") ++ str "." let explain_wrong_predicate_arity env pred nondep_arity dep_arity= let env = make_all_name_different env in let pp = pr_lconstr_env env pred in str "The elimination predicate " ++ spc () ++ pp ++ fnl () ++ str "should be of arity" ++ spc () ++ pr_lconstr_env env nondep_arity ++ spc () ++ str "(for non dependent case) or" ++ spc () ++ pr_lconstr_env env dep_arity ++ spc () ++ str "(for dependent case)." let explain_needs_inversion env x t = let env = make_all_name_different env in let px = pr_lconstr_env env x in let pt = pr_lconstr_env env t in str "Sorry, I need inversion to compile pattern matching on term " ++ px ++ str " of type: " ++ pt ++ str "." let explain_unused_clause env pats = (* Without localisation let s = if List.length pats > 1 then "s" else "" in (str ("Unused clause with pattern"^s) ++ spc () ++ hov 0 (prlist_with_sep pr_spc pr_cases_pattern pats) ++ str ")") *) str "This clause is redundant." let explain_non_exhaustive env pats = str "Non exhaustive pattern-matching: no clause found for " ++ str (plural (List.length pats) "pattern") ++ spc () ++ hov 0 (prlist_with_sep pr_spc pr_cases_pattern pats) let explain_cannot_infer_predicate env typs = let env = make_all_name_different env in let pr_branch (cstr,typ) = let cstr,_ = decompose_app cstr in str "For " ++ pr_lconstr_env env cstr ++ str ": " ++ pr_lconstr_env env typ in str "Unable to unify the types found in the branches:" ++ spc () ++ hov 0 (prlist_with_sep pr_fnl pr_branch (Array.to_list typs)) let explain_pattern_matching_error env = function | BadPattern (c,t) -> explain_bad_pattern env c t | BadConstructor (c,ind) -> explain_bad_constructor env c ind | WrongNumargConstructor (c,n) -> explain_wrong_numarg_constructor env c n | WrongNumargInductive (c,n) -> explain_wrong_numarg_inductive env c n | WrongPredicateArity (pred,n,dep) -> explain_wrong_predicate_arity env pred n dep | NeedsInversion (x,t) -> explain_needs_inversion env x t | UnusedClause tms -> explain_unused_clause env tms | NonExhaustive tms -> explain_non_exhaustive env tms | CannotInferPredicate typs -> explain_cannot_infer_predicate env typs let explain_reduction_tactic_error = function | Tacred.InvalidAbstraction (env,c,(env',e)) -> str "The abstracted term" ++ spc () ++ quote (pr_goal_concl_style_env env c) ++ spc () ++ str "is not well typed." ++ fnl () ++ explain_type_error env' Evd.empty e let explain_ltac_call_trace (nrep,last,trace,loc) = let calls = (nrep,last) :: List.rev (List.map(fun(n,_,ck)->(n,ck))trace) in let tacexpr_differ te te' = (* NB: The following comparison may raise an exception since a tacexpr may embed a functional part via a TacExtend *) try te <> te' with Invalid_argument _ -> false in let pr_call (n,ck) = (match ck with | Proof_type.LtacNotationCall s -> quote (str s) | Proof_type.LtacNameCall cst -> quote (Pptactic.pr_ltac_constant cst) | Proof_type.LtacVarCall (id,t) -> quote (Nameops.pr_id id) ++ strbrk " (bound to " ++ Pptactic.pr_glob_tactic (Global.env()) t ++ str ")" | Proof_type.LtacAtomCall (te,otac) -> quote (Pptactic.pr_glob_tactic (Global.env()) (Tacexpr.TacAtom (dummy_loc,te))) ++ (match !otac with | Some te' when tacexpr_differ (Obj.magic te') te -> strbrk " (expanded to " ++ quote (Pptactic.pr_tactic (Global.env()) (Tacexpr.TacAtom (dummy_loc,te'))) ++ str ")" | _ -> mt ()) | Proof_type.LtacConstrInterp (c,(vars,unboundvars)) -> let filter = function (id,None) -> None | (id,Some id') -> Some(id,([],mkVar id')) in let unboundvars = list_map_filter filter unboundvars in quote (pr_glob_constr_env (Global.env()) c) ++ (if unboundvars <> [] or vars <> [] then strbrk " (with " ++ prlist_with_sep pr_comma (fun (id,c) -> pr_id id ++ str ":=" ++ Printer.pr_lconstr_under_binders c) (List.rev vars @ unboundvars) ++ str ")" else mt())) ++ (if n=2 then str " (repeated twice)" else if n>2 then str " (repeated "++int n++str" times)" else mt()) in if calls <> [] then let kind_of_last_call = match list_last calls with | (_,Proof_type.LtacConstrInterp _) -> ", last term evaluation failed." | _ -> ", last call failed." in hov 0 (str "In nested Ltac calls to " ++ pr_enum pr_call calls ++ strbrk kind_of_last_call) else mt () coq-8.4pl2/tactics/0000750000175000001440000000000012127276553013272 5ustar notinuserscoq-8.4pl2/tactics/refine.ml0000640000175000001440000003134712010532755015073 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* exist nat [y:nat]((minus y x)=x) (plus x x) ? * ce qui engendre le but * (minus (plus x x) x) = x *) (* Pour cela, on procde de la manire suivante : * * 1. Un terme de preuve incomplet est un terme contenant des variables * existentielles Evar i.e. "_" en syntaxe concrte. * La rsolution de ces variables n'est plus ncessairement totale * (ise_resolve called with fail_evar=false) et les variables * existentielles restantes sont remplaces par des mta-variables * castes par leur types (celui est connu : soit donn, soit trouv * pendant la phase de rsolution). * * 2. On met ensuite le terme " plat" i.e. on n'autorise des MV qu'au * permier niveau et pour chacune d'elles, si ncessaire, on donne * son tour un terme de preuve incomplet pour la rsoudre. * Exemple: le terme (f a _ (fun (x:nat) => e _)) donne * (f a ?1 ?2) avec: * - ?2 := fun (x:nat) => ?3 * - ?3 := e ?4 * ?1 et ?4 donneront des buts * * 3. On crit ensuite une tactique tcc qui engendre les sous-buts * partir d'une preuve incomplte. *) open Pp open Util open Names open Term open Termops open Namegen open Tacmach open Sign open Environ open Reduction open Typing open Tactics open Tacticals open Printer type term_with_holes = TH of constr * meta_type_map * sg_proofs and sg_proofs = (term_with_holes option) list (* pour debugger *) let rec pp_th (TH(c,mm,sg)) = (str"TH=[ " ++ hov 0 (pr_lconstr c ++ fnl () ++ (* pp_mm mm ++ fnl () ++ *) pp_sg sg) ++ str "]") and pp_mm l = hov 0 (prlist_with_sep (fun _ -> (fnl ())) (fun (n,c) -> (int n ++ str" --> " ++ pr_lconstr c)) l) and pp_sg sg = hov 0 (prlist_with_sep (fun _ -> (fnl ())) (function None -> (str"None") | Some th -> (pp_th th)) sg) (* compute_metamap : constr -> 'a evar_map -> term_with_holes * ralise le 2. ci-dessus * * Pour cela, on renvoie une meta_map qui indique pour chaque meta-variable * si elle correspond un but (None) ou si elle rduite son tour * par un terme de preuve incomplet (Some c). * * On a donc l'INVARIANT suivant : le terme c rendu est "de niveau 1" * -- i.e. plat -- et la meta_map contient autant d'lments qu'il y * a de meta-variables dans c. On suppose de plus que l'ordre dans la * meta_map correspond celui des buts qui seront engendrs par le refine. *) let replace_by_meta env sigma = function | TH (m, mm, sgp) when isMeta (strip_outer_cast m) -> m,mm,sgp | (TH (c,mm,_)) as th -> let n = Evarutil.new_meta() in let m = mkMeta n in (* quand on introduit une mv on calcule son type *) let ty = match kind_of_term c with | Lambda (Name id,c1,c2) when isCast c2 -> let _,_,t = destCast c2 in mkNamedProd id c1 t | Lambda (Anonymous,c1,c2) when isCast c2 -> let _,_,t = destCast c2 in mkArrow c1 t | _ -> (* (App _ | Case _) -> *) let sigma' = List.fold_right (fun (m,t) sigma -> Evd.meta_declare m t sigma) mm sigma in Retyping.get_type_of env sigma' c (* | Fix ((_,j),(v,_,_)) -> v.(j) (* en pleine confiance ! *) | _ -> invalid_arg "Tcc.replace_by_meta (TO DO)" *) in mkCast (m,DEFAULTcast, ty),[n,ty],[Some th] exception NoMeta let replace_in_array keep_length env sigma a = if array_for_all (function (TH (_,_,[])) -> true | _ -> false) a then raise NoMeta; let a' = Array.map (function | (TH (c,mm,[])) when not keep_length -> c,mm,[] | th -> replace_by_meta env sigma th) a in let v' = Array.map pi1 a' in let mm = Array.fold_left (@) [] (Array.map pi2 a') in let sgp = Array.fold_left (@) [] (Array.map pi3 a') in v',mm,sgp let fresh env n = let id = match n with Name x -> x | _ -> id_of_string "_H" in next_ident_away_in_goal id (ids_of_named_context (named_context env)) let rec compute_metamap env sigma c = match kind_of_term c with (* le terme est directement une preuve *) | (Const _ | Evar _ | Ind _ | Construct _ | Sort _ | Var _ | Rel _) -> TH (c,[],[]) (* le terme est une mv => un but *) | Meta n -> TH (c,[],[None]) | Cast (m,_, ty) when isMeta m -> TH (c,[destMeta m,ty],[None]) (* abstraction => il faut dcomposer si le terme dessous n'est pas pur * attention : dans ce cas il faut remplacer (Rel 1) par (Var x) * o x est une variable FRAICHE *) | Lambda (name,c1,c2) -> let v = fresh env name in let env' = push_named (v,None,c1) env in begin match compute_metamap env' sigma (subst1 (mkVar v) c2) with (* terme de preuve complet *) | TH (_,_,[]) -> TH (c,[],[]) (* terme de preuve incomplet *) | th -> let m,mm,sgp = replace_by_meta env' sigma th in TH (mkLambda (Name v,c1,m), mm, sgp) end | LetIn (name, c1, t1, c2) -> let v = fresh env name in let th1 = compute_metamap env sigma c1 in let env' = push_named (v,Some c1,t1) env in let th2 = compute_metamap env' sigma (subst1 (mkVar v) c2) in begin match th1,th2 with (* terme de preuve complet *) | TH (_,_,[]), TH (_,_,[]) -> TH (c,[],[]) (* terme de preuve incomplet *) | TH (c1,mm1,sgp1), TH (c2,mm2,sgp2) -> let m1,mm1,sgp1 = if sgp1=[] then (c1,mm1,[]) else replace_by_meta env sigma th1 in let m2,mm2,sgp2 = if sgp2=[] then (c2,mm2,[]) else replace_by_meta env' sigma th2 in TH (mkNamedLetIn v m1 t1 m2, mm1@mm2, sgp1@sgp2) end (* 4. Application *) | App (f,v) -> let a = Array.map (compute_metamap env sigma) (Array.append [|f|] v) in begin try let v',mm,sgp = replace_in_array false env sigma a in let v'' = Array.sub v' 1 (Array.length v) in TH (mkApp(v'.(0), v''),mm,sgp) with NoMeta -> TH (c,[],[]) end | Case (ci,p,cc,v) -> (* bof... *) let nbr = Array.length v in let v = Array.append [|p;cc|] v in let a = Array.map (compute_metamap env sigma) v in begin try let v',mm,sgp = replace_in_array false env sigma a in let v'' = Array.sub v' 2 nbr in TH (mkCase (ci,v'.(0),v'.(1),v''),mm,sgp) with NoMeta -> TH (c,[],[]) end (* 5. Fix. *) | Fix ((ni,i),(fi,ai,v)) -> (* TODO: use a fold *) let vi = Array.map (fresh env) fi in let fi' = Array.map (fun id -> Name id) vi in let env' = push_named_rec_types (fi',ai,v) env in let a = Array.map (compute_metamap env' sigma) (Array.map (substl (List.map mkVar (Array.to_list vi))) v) in begin try let v',mm,sgp = replace_in_array true env' sigma a in let fix = mkFix ((ni,i),(fi',ai,v')) in TH (fix,mm,sgp) with NoMeta -> TH (c,[],[]) end (* Cast. Est-ce bien exact ? *) | Cast (c,_,t) -> compute_metamap env sigma c (*let TH (c',mm,sgp) = compute_metamap sign c in TH (mkCast (c',t),mm,sgp) *) (* Produit. Est-ce bien exact ? *) | Prod (_,_,_) -> if occur_meta c then error "refine: proof term contains metas in a product." else TH (c,[],[]) (* Cofix. *) | CoFix (i,(fi,ai,v)) -> let vi = Array.map (fresh env) fi in let fi' = Array.map (fun id -> Name id) vi in let env' = push_named_rec_types (fi',ai,v) env in let a = Array.map (compute_metamap env' sigma) (Array.map (substl (List.map mkVar (Array.to_list vi))) v) in begin try let v',mm,sgp = replace_in_array true env' sigma a in let cofix = mkCoFix (i,(fi',ai,v')) in TH (cofix,mm,sgp) with NoMeta -> TH (c,[],[]) end (* tcc_aux : term_with_holes -> tactic * * Ralise le 3. ci-dessus *) let ensure_products n = let p = ref 0 in let rec aux n gl = if n = 0 then tclFAIL 0 (mt()) gl else tclTHEN (tclORELSE intro (fun gl -> incr p; introf gl)) (aux (n-1)) gl in tclORELSE (aux n) (* Now we know how many red are needed *) (fun gl -> tclDO !p red_in_concl gl) let rec tcc_aux subst (TH (c,mm,sgp) as _th) gl = let c = substl subst c in match (kind_of_term c,sgp) with (* mv => sous-but : on ne fait rien *) | Meta _ , _ -> tclIDTAC gl | Cast (c,_,_), _ when isMeta c -> tclIDTAC gl (* terme pur => refine *) | _,[] -> refine c gl (* abstraction => intro *) | Lambda (Name id,_,m), _ -> assert (isMeta (strip_outer_cast m)); begin match sgp with | [None] -> intro_mustbe_force id gl | [Some th] -> tclTHEN (introduction id) (onLastHypId (fun id -> tcc_aux (mkVar id::subst) th)) gl | _ -> assert false end | Lambda (Anonymous,_,m), _ -> (* if anon vars are allowed in evars *) assert (isMeta (strip_outer_cast m)); begin match sgp with | [None] -> tclTHEN intro (onLastHypId (fun id -> clear [id])) gl | [Some th] -> tclTHEN intro (onLastHypId (fun id -> tclTHEN (clear [id]) (tcc_aux (mkVar (*dummy*) id::subst) th))) gl | _ -> assert false end (* let in without holes in the body => possibly dependent intro *) | LetIn (Name id,c1,t1,c2), _ when not (isMeta (strip_outer_cast c1)) -> let c = pf_concl gl in let newc = mkNamedLetIn id c1 t1 c in tclTHEN (change_in_concl None newc) (match sgp with | [None] -> introduction id | [Some th] -> tclTHEN (introduction id) (onLastHypId (fun id -> tcc_aux (mkVar id::subst) th)) | _ -> assert false) gl (* let in with holes in the body => unable to handle dependency because of evars limitation, use non dependent assert instead *) | LetIn (Name id,c1,t1,c2), _ -> tclTHENS (assert_tac (Name id) t1) [(match List.hd sgp with | None -> tclIDTAC | Some th -> onLastHypId (fun id -> tcc_aux (mkVar id::subst) th)); (match List.tl sgp with | [] -> refine (subst1 (mkVar id) c2) (* a complete proof *) | [None] -> tclIDTAC (* a meta *) | [Some th] -> (* a partial proof *) onLastHypId (fun id -> tcc_aux (mkVar id::subst) th) | _ -> assert false)] gl (* fix => tactique Fix *) | Fix ((ni,j),(fi,ai,_)) , _ -> let out_name = function | Name id -> id | _ -> error "Recursive functions must have names." in let fixes = array_map3 (fun f n c -> (out_name f,succ n,c)) fi ni ai in let firsts,lasts = list_chop j (Array.to_list fixes) in tclTHENS (tclTHEN (ensure_products (succ ni.(j))) (mutual_fix (out_name fi.(j)) (succ ni.(j)) (firsts@List.tl lasts) j)) (List.map (function | None -> tclIDTAC | Some th -> tcc_aux subst th) sgp) gl (* cofix => tactique CoFix *) | CoFix (j,(fi,ai,_)) , _ -> let out_name = function | Name id -> id | _ -> error "Recursive functions must have names." in let cofixes = array_map2 (fun f c -> (out_name f,c)) fi ai in let firsts,lasts = list_chop j (Array.to_list cofixes) in tclTHENS (mutual_cofix (out_name fi.(j)) (firsts@List.tl lasts) j) (List.map (function | None -> tclIDTAC | Some th -> tcc_aux subst th) sgp) gl (* sinon on fait refine du terme puis appels rec. sur les sous-buts. * c'est le cas pour App et MutCase. *) | _ -> tclTHENS (refine c) (List.map (function None -> tclIDTAC | Some th -> tcc_aux subst th) sgp) gl (* Et finalement la tactique refine elle-mme : *) let refine (evd,c) gl = let sigma = project gl in let evd = Typeclasses.resolve_typeclasses ~filter:Typeclasses.no_goals (pf_env gl) evd in let c = Evarutil.nf_evar evd c in let (evd,c) = Evarutil.evars_to_metas sigma (evd,c) in (* Relies on Cast's put on Meta's by evars_to_metas, because it is otherwise complicated to update meta types when passing through a binder *) let th = compute_metamap (pf_env gl) evd c in tclTHEN (Refiner.tclEVARS evd) (tcc_aux [] th) gl coq-8.4pl2/tactics/extraargs.mli0000640000175000001440000000551712010532755015774 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Pp.std_ppcmds val occurrences : (int list or_var) Pcoq.Gram.entry val rawwit_occurrences : (int list or_var) raw_abstract_argument_type val wit_occurrences : (int list) typed_abstract_argument_type val pr_occurrences : int list Glob_term.or_var -> Pp.std_ppcmds val rawwit_glob : constr_expr raw_abstract_argument_type val wit_glob : (Tacinterp.interp_sign * glob_constr) typed_abstract_argument_type val glob : constr_expr Pcoq.Gram.entry type 'id gen_place= ('id * hyp_location_flag,unit) location type loc_place = identifier Util.located gen_place type place = identifier gen_place val rawwit_hloc : loc_place raw_abstract_argument_type val wit_hloc : place typed_abstract_argument_type val hloc : loc_place Pcoq.Gram.entry val pr_hloc : loc_place -> Pp.std_ppcmds val in_arg_hyp: (Names.identifier Util.located list option * bool) Pcoq.Gram.entry val globwit_in_arg_hyp : (Names.identifier Util.located list option * bool) glob_abstract_argument_type val rawwit_in_arg_hyp : (Names.identifier Util.located list option * bool) raw_abstract_argument_type val wit_in_arg_hyp : (Names.identifier list option * bool) typed_abstract_argument_type val raw_in_arg_hyp_to_clause : (Names.identifier Util.located list option * bool) -> Tacticals.clause val glob_in_arg_hyp_to_clause : (Names.identifier list option * bool) -> Tacticals.clause val pr_in_arg_hyp : (Names.identifier list option * bool) -> Pp.std_ppcmds val by_arg_tac : Tacexpr.raw_tactic_expr option Pcoq.Gram.entry val rawwit_by_arg_tac : raw_tactic_expr option raw_abstract_argument_type val wit_by_arg_tac : glob_tactic_expr option typed_abstract_argument_type val pr_by_arg_tac : (int * Ppextend.parenRelation -> raw_tactic_expr -> Pp.std_ppcmds) -> raw_tactic_expr option -> Pp.std_ppcmds (** Spiwack: Primitive for retroknowledge registration *) val retroknowledge_field : Retroknowledge.field Pcoq.Gram.entry val rawwit_retroknowledge_field : Retroknowledge.field raw_abstract_argument_type val wit_retroknowledge_field : Retroknowledge.field typed_abstract_argument_type coq-8.4pl2/tactics/nbtermdn.ml0000640000175000001440000001074212010532755015430 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* struct module X = struct type t = constr_pattern*int let compare = Pervasives.compare end module Term_dn = Termdn.Make(Y) open Term_dn module Z = struct type t = Term_dn.term_label let compare x y = let make_name n = match n with | GRLabel(ConstRef con) -> GRLabel(ConstRef(constant_of_kn(canonical_con con))) | GRLabel(IndRef (kn,i)) -> GRLabel(IndRef(mind_of_kn(canonical_mind kn),i)) | GRLabel(ConstructRef ((kn,i),j ))-> GRLabel(ConstructRef((mind_of_kn(canonical_mind kn),i),j)) | k -> k in Pervasives.compare (make_name x) (make_name y) end module Dn = Dn.Make(X)(Z)(Y) module Bounded_net = Btermdn.Make(Y) type 'na t = { mutable table : ('na,constr_pattern * Y.t) Gmap.t; mutable patterns : (Term_dn.term_label option,Bounded_net.t) Gmap.t } type 'na frozen_t = ('na,constr_pattern * Y.t) Gmap.t * (Term_dn.term_label option, Bounded_net.t) Gmap.t let create () = { table = Gmap.empty; patterns = Gmap.empty } let get_dn dnm hkey = try Gmap.find hkey dnm with Not_found -> Bounded_net.create () let add dn (na,(pat,valu)) = let hkey = Option.map fst (Term_dn.constr_pat_discr pat) in dn.table <- Gmap.add na (pat,valu) dn.table; let dnm = dn.patterns in dn.patterns <- Gmap.add hkey (Bounded_net.add None (get_dn dnm hkey) (pat,valu)) dnm let rmv dn na = let (pat,valu) = Gmap.find na dn.table in let hkey = Option.map fst (Term_dn.constr_pat_discr pat) in dn.table <- Gmap.remove na dn.table; let dnm = dn.patterns in dn.patterns <- Gmap.add hkey (Bounded_net.rmv None (get_dn dnm hkey) (pat,valu)) dnm let in_dn dn na = Gmap.mem na dn.table let remap ndn na (pat,valu) = rmv ndn na; add ndn (na,(pat,valu)) let decomp = let rec decrec acc c = match kind_of_term c with | App (f,l) -> decrec (Array.fold_right (fun a l -> a::l) l acc) f | Cast (c1,_,_) -> decrec acc c1 | _ -> (c,acc) in decrec [] let constr_val_discr t = let c, l = decomp t in match kind_of_term c with | Ind ind_sp -> Dn.Label(Term_dn.GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Dn.Label(Term_dn.GRLabel (ConstructRef cstr_sp),l) | Var id -> Dn.Label(Term_dn.GRLabel (VarRef id),l) | Const _ -> Dn.Everything | _ -> Dn.Nothing let constr_val_discr_st (idpred,cpred) t = let c, l = decomp t in match kind_of_term c with | Const c -> if Cpred.mem c cpred then Dn.Everything else Dn.Label(Term_dn.GRLabel (ConstRef c),l) | Ind ind_sp -> Dn.Label(Term_dn.GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Dn.Label(Term_dn.GRLabel (ConstructRef cstr_sp),l) | Var id when not (Idpred.mem id idpred) -> Dn.Label(Term_dn.GRLabel (VarRef id),l) | Prod (n, d, c) -> Dn.Label(Term_dn.ProdLabel, [d; c]) | Lambda (n, d, c) -> Dn.Label(Term_dn.LambdaLabel, [d; c] @ l) | Sort _ -> Dn.Label(Term_dn.SortLabel, []) | Evar _ -> Dn.Everything | _ -> Dn.Nothing let lookup dn valu = let hkey = match (constr_val_discr valu) with | Dn.Label(l,_) -> Some l | _ -> None in try Bounded_net.lookup None (Gmap.find hkey dn.patterns) valu with Not_found -> [] let app f dn = Gmap.iter f dn.table let dnet_depth = Btermdn.dnet_depth let freeze dn = (dn.table, dn.patterns) let unfreeze (fnm,fdnm) dn = dn.table <- fnm; dn.patterns <- fdnm let empty dn = dn.table <- Gmap.empty; dn.patterns <- Gmap.empty let to2lists dn = (Gmap.to_list dn.table, Gmap.to_list dn.patterns) end coq-8.4pl2/tactics/dn.mli0000640000175000001440000000245211366253725014402 0ustar notinusers module Make : functor (X : Set.OrderedType) -> functor (Y : Map.OrderedType) -> functor (Z : Map.OrderedType) -> sig type decompose_fun = X.t -> (Y.t * X.t list) option type t val create : unit -> t (** [add t f (tree,inf)] adds a structured object [tree] together with the associated information [inf] to the table [t]; the function [f] is used to translated [tree] into its prefix decomposition: [f] must decompose any tree into a label characterizing its root node and the list of its subtree *) val add : t -> decompose_fun -> X.t * Z.t -> t val rmv : t -> decompose_fun -> X.t * Z.t -> t type 'res lookup_res = Label of 'res | Nothing | Everything type 'tree lookup_fun = 'tree -> (Y.t * 'tree list) lookup_res (** [lookup t f tree] looks for trees (and their associated information) in table [t] such that the structured object [tree] matches against them; [f] is used to translated [tree] into its prefix decomposition: [f] must decompose any tree into a label characterizing its root node and the list of its subtree *) val lookup : t -> 'term lookup_fun -> 'term -> (X.t * Z.t) list val app : ((X.t * Z.t) -> unit) -> t -> unit val skip_arg : int -> t -> (t * bool) list end coq-8.4pl2/tactics/evar_tactics.mli0000640000175000001440000000136412010532755016437 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Tacinterp.interp_sign * Glob_term.glob_constr -> (identifier * hyp_location_flag, unit) location -> tactic val let_evar : name -> Term.types -> tactic coq-8.4pl2/tactics/tauto.ml40000640000175000001440000002242512010532755015040 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* c | _ -> failwith "tauto: anomaly" (** Parametrization of tauto *) (* Whether conjunction and disjunction are restricted to binary connectives *) (* (this is the compatibility mode) *) let binary_mode = true (* Whether conjunction and disjunction are restricted to the connectives *) (* having the structure of "and" and "or" (up to the choice of sorts) in *) (* contravariant position in an hypothesis (this is the compatibility mode) *) let strict_in_contravariant_hyp = true (* Whether conjunction and disjunction are restricted to the connectives *) (* having the structure of "and" and "or" (up to the choice of sorts) in *) (* an hypothesis and in the conclusion *) let strict_in_hyp_and_ccl = false (* Whether unit type includes equality types *) let strict_unit = false (* Whether inner iff are unfolded *) let iff_unfolding = ref false let unfold_iff () = !iff_unfolding || Flags.version_less_or_equal Flags.V8_2 open Goptions let _ = declare_bool_option { optsync = true; optdepr = false; optname = "unfolding of iff and not in intuition"; optkey = ["Intuition";"Iff";"Unfolding"]; optread = (fun () -> !iff_unfolding); optwrite = (:=) iff_unfolding } (** Test *) let is_empty ist = if is_empty_type (assoc_var "X1" ist) then <:tactic> else <:tactic> (* Strictly speaking, this exceeds the propositional fragment as it matches also equality types (and solves them if a reflexivity) *) let is_unit_or_eq ist = let test = if strict_unit then is_unit_type else is_unit_or_eq_type in if test (assoc_var "X1" ist) then <:tactic> else <:tactic> let is_record t = let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in mib.Declarations.mind_record | _ -> false let is_binary t = isApp t && let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in mib.Declarations.mind_nparams = 2 | _ -> false let iter_tac tacl = List.fold_right (fun tac tacs -> <:tactic< $tac; $tacs >>) tacl (** Dealing with conjunction *) let is_conj ist = let ind = assoc_var "X1" ist in if (not binary_mode || is_binary ind) (* && not (is_record ind) *) && is_conjunction ~strict:strict_in_hyp_and_ccl ind then <:tactic> else <:tactic> let flatten_contravariant_conj ist = let typ = assoc_var "X1" ist in let c = assoc_var "X2" ist in let hyp = assoc_var "id" ist in match match_with_conjunction ~strict:strict_in_contravariant_hyp typ with | Some (_,args) -> let i = List.length args in if not binary_mode || i = 2 then let newtyp = valueIn (VConstr ([],List.fold_right mkArrow args c)) in let hyp = valueIn (VConstr ([],hyp)) in let intros = iter_tac (List.map (fun _ -> <:tactic< intro >>) args) <:tactic< idtac >> in <:tactic< let newtyp := $newtyp in let hyp := $hyp in assert newtyp by ($intros; apply hyp; split; assumption); clear hyp >> else <:tactic> | _ -> <:tactic> (** Dealing with disjunction *) let is_disj ist = let t = assoc_var "X1" ist in if (not binary_mode || is_binary t) && is_disjunction ~strict:strict_in_hyp_and_ccl t then <:tactic> else <:tactic> let flatten_contravariant_disj ist = let typ = assoc_var "X1" ist in let c = assoc_var "X2" ist in let hyp = assoc_var "id" ist in match match_with_disjunction ~strict:strict_in_contravariant_hyp typ with | Some (_,args) -> let i = List.length args in if not binary_mode || i = 2 then let hyp = valueIn (VConstr ([],hyp)) in iter_tac (list_map_i (fun i arg -> let typ = valueIn (VConstr ([],mkArrow arg c)) in let i = Tacexpr.Integer i in <:tactic< let typ := $typ in let hyp := $hyp in let i := $i in assert typ by (intro; apply hyp; constructor i; assumption) >>) 1 args) <:tactic< let hyp := $hyp in clear hyp >> else <:tactic> | _ -> <:tactic> (** Main tactic *) let not_dep_intros ist = <:tactic< repeat match goal with | |- (?X1 -> ?X2) => intro | |- (Coq.Init.Logic.not _) => unfold Coq.Init.Logic.not at 1 | H:(Coq.Init.Logic.not _)|-_ => unfold Coq.Init.Logic.not at 1 in H | H:(Coq.Init.Logic.not _)->_|-_ => unfold Coq.Init.Logic.not at 1 in H end >> let axioms ist = let t_is_unit_or_eq = tacticIn is_unit_or_eq and t_is_empty = tacticIn is_empty in <:tactic< match reverse goal with | |- ?X1 => $t_is_unit_or_eq; constructor 1 | _:?X1 |- _ => $t_is_empty; elimtype X1; assumption | _:?X1 |- ?X1 => assumption end >> let simplif ist = let t_is_unit_or_eq = tacticIn is_unit_or_eq and t_is_conj = tacticIn is_conj and t_flatten_contravariant_conj = tacticIn flatten_contravariant_conj and t_flatten_contravariant_disj = tacticIn flatten_contravariant_disj and t_is_disj = tacticIn is_disj and t_not_dep_intros = tacticIn not_dep_intros in <:tactic< $t_not_dep_intros; repeat (match reverse goal with | id: ?X1 |- _ => $t_is_conj; elim id; do 2 intro; clear id | id: (Coq.Init.Logic.iff _ _) |- _ => elim id; do 2 intro; clear id | id: (Coq.Init.Logic.not _) |- _ => red in id | id: ?X1 |- _ => $t_is_disj; elim id; intro; clear id | id0: ?X1 -> ?X2, id1: ?X1|- _ => (* generalize (id0 id1); intro; clear id0 does not work (see Marco Maggiesi's bug PR#301) so we instead use Assert and exact. *) assert X2; [exact (id0 id1) | clear id0] | id: ?X1 -> ?X2|- _ => $t_is_unit_or_eq; cut X2; [ intro; clear id | (* id : ?X1 -> ?X2 |- ?X2 *) cut X1; [exact id| constructor 1; fail] ] | id: ?X1 -> ?X2|- _ => $t_flatten_contravariant_conj (* moved from "id:(?A/\?B)->?X2|-" to "?A->?B->?X2|-" *) | id: (Coq.Init.Logic.iff ?X1 ?X2) -> ?X3|- _ => assert ((X1 -> X2) -> (X2 -> X1) -> X3) by (do 2 intro; apply id; split; assumption); clear id | id: ?X1 -> ?X2|- _ => $t_flatten_contravariant_disj (* moved from "id:(?A\/?B)->?X2|-" to "?A->?X2,?B->?X2|-" *) | |- ?X1 => $t_is_conj; split | |- (Coq.Init.Logic.iff _ _) => split | |- (Coq.Init.Logic.not _) => red end; $t_not_dep_intros) >> let rec tauto_intuit t_reduce solver ist = let t_axioms = tacticIn axioms and t_simplif = tacticIn simplif and t_is_disj = tacticIn is_disj and t_tauto_intuit = tacticIn (tauto_intuit t_reduce solver) in let t_solver = globTacticIn (fun _ist -> solver) in <:tactic< ($t_simplif;$t_axioms || match reverse goal with | id:(?X1 -> ?X2)-> ?X3|- _ => cut X3; [ intro; clear id; $t_tauto_intuit | cut (X1 -> X2); [ exact id | generalize (fun y:X2 => id (fun x:X1 => y)); intro; clear id; solve [ $t_tauto_intuit ]]] | |- ?X1 => $t_is_disj; solve [left;$t_tauto_intuit | right;$t_tauto_intuit] end || (* NB: [|- _ -> _] matches any product *) match goal with | |- _ -> _ => intro; $t_tauto_intuit | |- _ => $t_reduce;$t_solver end || $t_solver ) >> let reduction_not _ist = if unfold_iff () then <:tactic< unfold Coq.Init.Logic.not, Coq.Init.Logic.iff in * >> else <:tactic< unfold Coq.Init.Logic.not in * >> let t_reduction_not = tacticIn reduction_not let intuition_gen tac = interp (tacticIn (tauto_intuit t_reduction_not tac)) let tauto_intuitionistic g = try intuition_gen <:tactic> g with Refiner.FailError _ | UserError _ -> errorlabstrm "tauto" (str "tauto failed.") let coq_nnpp_path = let dir = List.map id_of_string ["Classical_Prop";"Logic";"Coq"] in Libnames.make_path (make_dirpath dir) (id_of_string "NNPP") let tauto_classical nnpp g = try tclTHEN (apply nnpp) tauto_intuitionistic g with UserError _ -> errorlabstrm "tauto" (str "Classical tauto failed.") let tauto g = try let nnpp = constr_of_global (Nametab.global_of_path coq_nnpp_path) in (* try intuitionistic version first to avoid an axiom if possible *) tclORELSE tauto_intuitionistic (tauto_classical nnpp) g with Not_found -> tauto_intuitionistic g let default_intuition_tac = <:tactic< auto with * >> TACTIC EXTEND tauto | [ "tauto" ] -> [ tauto ] END TACTIC EXTEND intuition | [ "intuition" ] -> [ intuition_gen default_intuition_tac ] | [ "intuition" tactic(t) ] -> [ intuition_gen t ] END coq-8.4pl2/tactics/hightactics.mllib0000640000175000001440000000011111166736124016574 0ustar notinusersRefine Extraargs Extratactics Eauto Class_tactics Rewrite Tauto Eqdecide coq-8.4pl2/tactics/auto.mli0000640000175000001440000002141012010532755014732 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* hints_path val path_matches : hints_path -> hints_path_atom list -> bool val path_derivate : hints_path -> hints_path_atom -> hints_path val pp_hints_path : hints_path -> Pp.std_ppcmds module Hint_db : sig type t val empty : transparent_state -> bool -> t val find : global_reference -> t -> search_entry val map_none : t -> pri_auto_tactic list val map_all : global_reference -> t -> pri_auto_tactic list val map_auto : global_reference * constr -> t -> pri_auto_tactic list val add_one : hint_entry -> t -> t val add_list : (hint_entry) list -> t -> t val remove_one : global_reference -> t -> t val remove_list : global_reference list -> t -> t val iter : (global_reference option -> pri_auto_tactic list -> unit) -> t -> unit val use_dn : t -> bool val transparent_state : t -> transparent_state val set_transparent_state : t -> transparent_state -> t val add_cut : hints_path -> t -> t val cut : t -> hints_path val unfolds : t -> Idset.t * Cset.t end type hint_db_name = string type hint_db = Hint_db.t type hints_entry = | HintsResolveEntry of (int option * bool * hints_path_atom * constr) list | HintsImmediateEntry of (hints_path_atom * constr) list | HintsCutEntry of hints_path | HintsUnfoldEntry of evaluable_global_reference list | HintsTransparencyEntry of evaluable_global_reference list * bool | HintsExternEntry of int * (patvar list * constr_pattern) option * Tacexpr.glob_tactic_expr val searchtable_map : hint_db_name -> hint_db val searchtable_add : (hint_db_name * hint_db) -> unit (** [create_hint_db local name st use_dn]. [st] is a transparency state for unification using this db [use_dn] switches the use of the discrimination net for all hints and patterns. *) val create_hint_db : bool -> hint_db_name -> transparent_state -> bool -> unit val remove_hints : bool -> hint_db_name list -> global_reference list -> unit val current_db_names : unit -> hint_db_name list val interp_hints : hints_expr -> hints_entry val add_hints : locality_flag -> hint_db_name list -> hints_entry -> unit val prepare_hint : env -> open_constr -> constr val print_searchtable : unit -> unit val print_applicable_hint : unit -> unit val print_hint_ref : global_reference -> unit val print_hint_db_by_name : hint_db_name -> unit val print_hint_db : Hint_db.t -> unit (** [make_exact_entry pri (c, ctyp)]. [c] is the term given as an exact proof to solve the goal; [ctyp] is the type of [c]. *) val make_exact_entry : evar_map -> int option -> ?name:hints_path_atom -> constr * constr -> hint_entry (** [make_apply_entry (eapply,hnf,verbose) pri (c,cty)]. [eapply] is true if this hint will be used only with EApply; [hnf] should be true if we should expand the head of cty before searching for products; [c] is the term given as an exact proof to solve the goal; [cty] is the type of [c]. *) val make_apply_entry : env -> evar_map -> bool * bool * bool -> int option -> ?name:hints_path_atom -> constr * constr -> hint_entry (** A constr which is Hint'ed will be: - (1) used as an Exact, if it does not start with a product - (2) used as an Apply, if its HNF starts with a product, and has no missing arguments. - (3) used as an EApply, if its HNF starts with a product, and has missing arguments. *) val make_resolves : env -> evar_map -> bool * bool * bool -> int option -> ?name:hints_path_atom -> constr -> hint_entry list (** [make_resolve_hyp hname htyp]. used to add an hypothesis to the local hint database; Never raises a user exception; If the hyp cannot be used as a Hint, the empty list is returned. *) val make_resolve_hyp : env -> evar_map -> named_declaration -> hint_entry list (** [make_extern pri pattern tactic_expr] *) val make_extern : int -> constr_pattern option -> Tacexpr.glob_tactic_expr -> hint_entry val set_extern_interp : (patvar_map -> Tacexpr.glob_tactic_expr -> tactic) -> unit val set_extern_intern_tac : (patvar list -> Tacexpr.raw_tactic_expr -> Tacexpr.glob_tactic_expr) -> unit val set_extern_subst_tactic : (substitution -> Tacexpr.glob_tactic_expr -> Tacexpr.glob_tactic_expr) -> unit (** Create a Hint database from the pairs (name, constr). Useful to take the current goal hypotheses as hints; Boolean tells if lemmas with evars are allowed *) val make_local_hint_db : ?ts:transparent_state -> bool -> open_constr list -> goal sigma -> hint_db val priority : ('a * pri_auto_tactic) list -> ('a * pri_auto_tactic) list val default_search_depth : int ref val auto_unif_flags : Unification.unify_flags (** Try unification with the precompiled clause, then use registered Apply *) val unify_resolve_nodelta : (constr * clausenv) -> tactic val unify_resolve : Unification.unify_flags -> (constr * clausenv) -> tactic (** [ConclPattern concl pat tacast]: if the term concl matches the pattern pat, (in sense of [Pattern.somatches], then replace [?1] [?2] metavars in tacast by the right values to build a tactic *) val conclPattern : constr -> constr_pattern option -> Tacexpr.glob_tactic_expr -> tactic (** The Auto tactic *) (** The use of the "core" database can be de-activated by passing "nocore" amongst the databases. *) val make_db_list : hint_db_name list -> hint_db list val auto : ?debug:Tacexpr.debug -> int -> open_constr list -> hint_db_name list -> tactic (** Auto with more delta. *) val new_auto : ?debug:Tacexpr.debug -> int -> open_constr list -> hint_db_name list -> tactic (** auto with default search depth and with the hint database "core" *) val default_auto : tactic (** auto with all hint databases except the "v62" compatibility database *) val full_auto : ?debug:Tacexpr.debug -> int -> open_constr list -> tactic (** auto with all hint databases except the "v62" compatibility database and doing delta *) val new_full_auto : ?debug:Tacexpr.debug -> int -> open_constr list -> tactic (** auto with default search depth and with all hint databases except the "v62" compatibility database *) val default_full_auto : tactic (** The generic form of auto (second arg [None] means all bases) *) val gen_auto : ?debug:Tacexpr.debug -> int option -> open_constr list -> hint_db_name list option -> tactic (** The hidden version of auto *) val h_auto : ?debug:Tacexpr.debug -> int option -> open_constr list -> hint_db_name list option -> tactic (** Trivial *) val trivial : ?debug:Tacexpr.debug -> open_constr list -> hint_db_name list -> tactic val gen_trivial : ?debug:Tacexpr.debug -> open_constr list -> hint_db_name list option -> tactic val full_trivial : ?debug:Tacexpr.debug -> open_constr list -> tactic val h_trivial : ?debug:Tacexpr.debug -> open_constr list -> hint_db_name list option -> tactic val pr_autotactic : 'a auto_tactic -> Pp.std_ppcmds (** Hook for changing the initialization of auto *) val add_auto_init : (unit -> unit) -> unit coq-8.4pl2/tactics/tactics.mllib0000640000175000001440000000027711741616026015746 0ustar notinusersDn Termdn Btermdn Nbtermdn Tacticals Hipattern Ind_tables Eqschemes Elimschemes Tactics Hiddentac Elim Auto Equality Contradiction Inv Leminv Tacinterp Evar_tactics Autorewrite Tactic_option coq-8.4pl2/tactics/extratactics.ml40000640000175000001440000006452512121620060016377 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ admit_as_an_axiom ] END let replace_in_clause_maybe_by (sigma1,c1) c2 in_hyp tac = Refiner.tclWITHHOLES false (replace_in_clause_maybe_by c1 c2 (glob_in_arg_hyp_to_clause in_hyp)) sigma1 (Option.map Tacinterp.eval_tactic tac) let replace_multi_term dir_opt (sigma,c) in_hyp = Refiner.tclWITHHOLES false (replace_multi_term dir_opt c) sigma (glob_in_arg_hyp_to_clause in_hyp) TACTIC EXTEND replace ["replace" open_constr(c1) "with" constr(c2) in_arg_hyp(in_hyp) by_arg_tac(tac) ] -> [ replace_in_clause_maybe_by c1 c2 in_hyp tac ] END TACTIC EXTEND replace_term_left [ "replace" "->" open_constr(c) in_arg_hyp(in_hyp) ] -> [ replace_multi_term (Some true) c in_hyp] END TACTIC EXTEND replace_term_right [ "replace" "<-" open_constr(c) in_arg_hyp(in_hyp) ] -> [replace_multi_term (Some false) c in_hyp] END TACTIC EXTEND replace_term [ "replace" open_constr(c) in_arg_hyp(in_hyp) ] -> [ replace_multi_term None c in_hyp ] END let induction_arg_of_quantified_hyp = function | AnonHyp n -> ElimOnAnonHyp n | NamedHyp id -> ElimOnIdent (Util.dummy_loc,id) (* Versions *_main must come first!! so that "1" is interpreted as a ElimOnAnonHyp and not as a "constr", and "id" is interpreted as a ElimOnIdent and not as "constr" *) let elimOnConstrWithHoles tac with_evars c = Refiner.tclWITHHOLES with_evars (tac with_evars) c.sigma (Some (ElimOnConstr c.it)) TACTIC EXTEND simplify_eq_main | [ "simplify_eq" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles dEq false c ] END TACTIC EXTEND simplify_eq [ "simplify_eq" ] -> [ dEq false None ] | [ "simplify_eq" quantified_hypothesis(h) ] -> [ dEq false (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND esimplify_eq_main | [ "esimplify_eq" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles dEq true c ] END TACTIC EXTEND esimplify_eq | [ "esimplify_eq" ] -> [ dEq true None ] | [ "esimplify_eq" quantified_hypothesis(h) ] -> [ dEq true (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND discriminate_main | [ "discriminate" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles discr_tac false c ] END TACTIC EXTEND discriminate | [ "discriminate" ] -> [ discr_tac false None ] | [ "discriminate" quantified_hypothesis(h) ] -> [ discr_tac false (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND ediscriminate_main | [ "ediscriminate" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles discr_tac true c ] END TACTIC EXTEND ediscriminate | [ "ediscriminate" ] -> [ discr_tac true None ] | [ "ediscriminate" quantified_hypothesis(h) ] -> [ discr_tac true (Some (induction_arg_of_quantified_hyp h)) ] END let h_discrHyp id gl = h_discriminate_main {it = Term.mkVar id,NoBindings; sigma = Refiner.project gl} gl TACTIC EXTEND injection_main | [ "injection" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles (injClause []) false c ] END TACTIC EXTEND injection | [ "injection" ] -> [ injClause [] false None ] | [ "injection" quantified_hypothesis(h) ] -> [ injClause [] false (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND einjection_main | [ "einjection" constr_with_bindings(c) ] -> [ elimOnConstrWithHoles (injClause []) true c ] END TACTIC EXTEND einjection | [ "einjection" ] -> [ injClause [] true None ] | [ "einjection" quantified_hypothesis(h) ] -> [ injClause [] true (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND injection_as_main | [ "injection" constr_with_bindings(c) "as" simple_intropattern_list(ipat)] -> [ elimOnConstrWithHoles (injClause ipat) false c ] END TACTIC EXTEND injection_as | [ "injection" "as" simple_intropattern_list(ipat)] -> [ injClause ipat false None ] | [ "injection" quantified_hypothesis(h) "as" simple_intropattern_list(ipat) ] -> [ injClause ipat false (Some (induction_arg_of_quantified_hyp h)) ] END TACTIC EXTEND einjection_as_main | [ "einjection" constr_with_bindings(c) "as" simple_intropattern_list(ipat)] -> [ elimOnConstrWithHoles (injClause ipat) true c ] END TACTIC EXTEND einjection_as | [ "einjection" "as" simple_intropattern_list(ipat)] -> [ injClause ipat true None ] | [ "einjection" quantified_hypothesis(h) "as" simple_intropattern_list(ipat) ] -> [ injClause ipat true (Some (induction_arg_of_quantified_hyp h)) ] END let h_injHyp id gl = h_injection_main { it = Term.mkVar id,NoBindings; sigma = Refiner.project gl } gl TACTIC EXTEND dependent_rewrite | [ "dependent" "rewrite" orient(b) constr(c) ] -> [ rewriteInConcl b c ] | [ "dependent" "rewrite" orient(b) constr(c) "in" hyp(id) ] -> [ rewriteInHyp b c id ] END TACTIC EXTEND cut_rewrite | [ "cutrewrite" orient(b) constr(eqn) ] -> [ cutRewriteInConcl b eqn ] | [ "cutrewrite" orient(b) constr(eqn) "in" hyp(id) ] -> [ cutRewriteInHyp b eqn id ] END (**********************************************************************) (* Contradiction *) open Contradiction TACTIC EXTEND absurd [ "absurd" constr(c) ] -> [ absurd c ] END let onSomeWithHoles tac = function | None -> tac None | Some c -> Refiner.tclWITHHOLES false tac c.sigma (Some c.it) TACTIC EXTEND contradiction [ "contradiction" constr_with_bindings_opt(c) ] -> [ onSomeWithHoles contradiction c ] END (**********************************************************************) (* AutoRewrite *) open Autorewrite let pr_orient _prc _prlc _prt = function | true -> Pp.mt () | false -> Pp.str " <-" let pr_orient_string _prc _prlc _prt (orient, s) = pr_orient _prc _prlc _prt orient ++ Pp.spc () ++ Pp.str s ARGUMENT EXTEND orient_string TYPED AS (bool * string) PRINTED BY pr_orient_string | [ orient(r) preident(i) ] -> [ r, i ] END TACTIC EXTEND autorewrite | [ "autorewrite" "with" ne_preident_list(l) in_arg_hyp(cl) ] -> [ auto_multi_rewrite l (glob_in_arg_hyp_to_clause cl) ] | [ "autorewrite" "with" ne_preident_list(l) in_arg_hyp(cl) "using" tactic(t) ] -> [ let cl = glob_in_arg_hyp_to_clause cl in auto_multi_rewrite_with (Tacinterp.eval_tactic t) l cl ] END TACTIC EXTEND autorewrite_star | [ "autorewrite" "*" "with" ne_preident_list(l) in_arg_hyp(cl) ] -> [ auto_multi_rewrite ~conds:AllMatches l (glob_in_arg_hyp_to_clause cl) ] | [ "autorewrite" "*" "with" ne_preident_list(l) in_arg_hyp(cl) "using" tactic(t) ] -> [ let cl = glob_in_arg_hyp_to_clause cl in auto_multi_rewrite_with ~conds:AllMatches (Tacinterp.eval_tactic t) l cl ] END (**********************************************************************) (* Rewrite star *) let rewrite_star clause orient occs (sigma,c) (tac : glob_tactic_expr option) = let tac' = Option.map (fun t -> Tacinterp.eval_tactic t, FirstSolved) tac in Refiner. tclWITHHOLES false (general_rewrite_ebindings_clause clause orient occs ?tac:tac' true true (c,NoBindings)) sigma true let occurrences_of = function | n::_ as nl when n < 0 -> (false,List.map abs nl) | nl -> if List.exists (fun n -> n < 0) nl then error "Illegal negative occurrence number."; (true,nl) TACTIC EXTEND rewrite_star | [ "rewrite" "*" orient(o) open_constr(c) "in" hyp(id) "at" occurrences(occ) by_arg_tac(tac) ] -> [ rewrite_star (Some id) o (occurrences_of occ) c tac ] | [ "rewrite" "*" orient(o) open_constr(c) "at" occurrences(occ) "in" hyp(id) by_arg_tac(tac) ] -> [ rewrite_star (Some id) o (occurrences_of occ) c tac ] | [ "rewrite" "*" orient(o) open_constr(c) "in" hyp(id) by_arg_tac(tac) ] -> [ rewrite_star (Some id) o Termops.all_occurrences c tac ] | [ "rewrite" "*" orient(o) open_constr(c) "at" occurrences(occ) by_arg_tac(tac) ] -> [ rewrite_star None o (occurrences_of occ) c tac ] | [ "rewrite" "*" orient(o) open_constr(c) by_arg_tac(tac) ] -> [ rewrite_star None o Termops.all_occurrences c tac ] END (**********************************************************************) (* Hint Rewrite *) let add_rewrite_hint name ort t lcsr = let env = Global.env() and sigma = Evd.empty in let f c = Topconstr.constr_loc c, Constrintern.interp_constr sigma env c, ort, t in add_rew_rules name (List.map f lcsr) VERNAC COMMAND EXTEND HintRewrite [ "Hint" "Rewrite" orient(o) ne_constr_list(l) ":" preident(b) ] -> [ add_rewrite_hint b o (Tacexpr.TacId []) l ] | [ "Hint" "Rewrite" orient(o) ne_constr_list(l) "using" tactic(t) ":" preident(b) ] -> [ add_rewrite_hint b o t l ] | [ "Hint" "Rewrite" orient(o) ne_constr_list(l) ] -> [ add_rewrite_hint "core" o (Tacexpr.TacId []) l ] | [ "Hint" "Rewrite" orient(o) ne_constr_list(l) "using" tactic(t) ] -> [ add_rewrite_hint "core" o t l ] END (**********************************************************************) (* Hint Resolve *) open Term open Coqlib let project_hint pri l2r c = let env = Global.env() in let c = Constrintern.interp_constr Evd.empty env c in let t = Retyping.get_type_of env Evd.empty c in let t = Tacred.reduce_to_quantified_ref env Evd.empty (Lazy.force coq_iff_ref) t in let sign,ccl = decompose_prod_assum t in let (a,b) = match snd (decompose_app ccl) with | [a;b] -> (a,b) | _ -> assert false in let p = if l2r then build_coq_iff_left_proj () else build_coq_iff_right_proj () in let c = Reductionops.whd_beta Evd.empty (mkApp (c,Termops.extended_rel_vect 0 sign)) in let c = it_mkLambda_or_LetIn (mkApp (p,[|mkArrow a (lift 1 b);mkArrow b (lift 1 a);c|])) sign in (pri,true,Auto.PathAny,c) let add_hints_iff l2r lc n bl = Auto.add_hints true bl (Auto.HintsResolveEntry (List.map (project_hint n l2r) lc)) VERNAC COMMAND EXTEND HintResolveIffLR [ "Hint" "Resolve" "->" ne_constr_list(lc) natural_opt(n) ":" preident_list(bl) ] -> [ add_hints_iff true lc n bl ] | [ "Hint" "Resolve" "->" ne_constr_list(lc) natural_opt(n) ] -> [ add_hints_iff true lc n ["core"] ] END VERNAC COMMAND EXTEND HintResolveIffRL [ "Hint" "Resolve" "<-" ne_constr_list(lc) natural_opt(n) ":" preident_list(bl) ] -> [ add_hints_iff false lc n bl ] | [ "Hint" "Resolve" "<-" ne_constr_list(lc) natural_opt(n) ] -> [ add_hints_iff false lc n ["core"] ] END (**********************************************************************) (* Refine *) open Refine TACTIC EXTEND refine [ "refine" casted_open_constr(c) ] -> [ refine c ] END let refine_tac = h_refine (**********************************************************************) (* Inversion lemmas (Leminv) *) open Inv open Leminv VERNAC COMMAND EXTEND DeriveInversionClear [ "Derive" "Inversion_clear" ident(na) hyp(id) ] -> [ inversion_lemma_from_goal 1 na id Term.prop_sort false inv_clear_tac ] | [ "Derive" "Inversion_clear" natural(n) ident(na) hyp(id) ] -> [ inversion_lemma_from_goal n na id Term.prop_sort false inv_clear_tac ] | [ "Derive" "Inversion_clear" ident(na) "with" constr(c) "Sort" sort(s) ] -> [ add_inversion_lemma_exn na c s false inv_clear_tac ] | [ "Derive" "Inversion_clear" ident(na) "with" constr(c) ] -> [ add_inversion_lemma_exn na c (Glob_term.GProp Term.Null) false inv_clear_tac ] END open Term open Glob_term VERNAC COMMAND EXTEND DeriveInversion | [ "Derive" "Inversion" ident(na) "with" constr(c) "Sort" sort(s) ] -> [ add_inversion_lemma_exn na c s false inv_tac ] | [ "Derive" "Inversion" ident(na) "with" constr(c) ] -> [ add_inversion_lemma_exn na c (GProp Null) false inv_tac ] | [ "Derive" "Inversion" ident(na) hyp(id) ] -> [ inversion_lemma_from_goal 1 na id Term.prop_sort false inv_tac ] | [ "Derive" "Inversion" natural(n) ident(na) hyp(id) ] -> [ inversion_lemma_from_goal n na id Term.prop_sort false inv_tac ] END VERNAC COMMAND EXTEND DeriveDependentInversion | [ "Derive" "Dependent" "Inversion" ident(na) "with" constr(c) "Sort" sort(s) ] -> [ add_inversion_lemma_exn na c s true dinv_tac ] END VERNAC COMMAND EXTEND DeriveDependentInversionClear | [ "Derive" "Dependent" "Inversion_clear" ident(na) "with" constr(c) "Sort" sort(s) ] -> [ add_inversion_lemma_exn na c s true dinv_clear_tac ] END (**********************************************************************) (* Subst *) TACTIC EXTEND subst | [ "subst" ne_var_list(l) ] -> [ subst l ] | [ "subst" ] -> [ fun gl -> subst_all gl ] END let simple_subst_tactic_flags = { only_leibniz = true; rewrite_dependent_proof = false } TACTIC EXTEND simple_subst | [ "simple" "subst" ] -> [ subst_all ~flags:simple_subst_tactic_flags ] END open Evar_tactics (**********************************************************************) (* Evar creation *) TACTIC EXTEND evar [ "evar" "(" ident(id) ":" lconstr(typ) ")" ] -> [ let_evar (Name id) typ ] | [ "evar" constr(typ) ] -> [ let_evar Anonymous typ ] END open Tacexpr open Tacticals TACTIC EXTEND instantiate [ "instantiate" "(" integer(i) ":=" glob(c) ")" hloc(hl) ] -> [instantiate i c hl ] | [ "instantiate" ] -> [ tclNORMEVAR ] END (**********************************************************************) (** Nijmegen "step" tactic for setoid rewriting *) open Tactics open Tactics open Libnames open Glob_term open Summary open Libobject open Lib (* Registered lemmas are expected to be of the form x R y -> y == z -> x R z (in the right table) x R y -> x == z -> z R y (in the left table) *) let transitivity_right_table = ref [] let transitivity_left_table = ref [] (* [step] tries to apply a rewriting lemma; then apply [tac] intended to complete to proof of the last hypothesis (assumed to state an equality) *) let step left x tac = let l = List.map (fun lem -> tclTHENLAST (apply_with_bindings (lem, ImplicitBindings [x])) tac) !(if left then transitivity_left_table else transitivity_right_table) in tclFIRST l (* Main function to push lemmas in persistent environment *) let cache_transitivity_lemma (_,(left,lem)) = if left then transitivity_left_table := lem :: !transitivity_left_table else transitivity_right_table := lem :: !transitivity_right_table let subst_transitivity_lemma (subst,(b,ref)) = (b,subst_mps subst ref) let inTransitivity : bool * constr -> obj = declare_object {(default_object "TRANSITIVITY-STEPS") with cache_function = cache_transitivity_lemma; open_function = (fun i o -> if i=1 then cache_transitivity_lemma o); subst_function = subst_transitivity_lemma; classify_function = (fun o -> Substitute o) } (* Synchronisation with reset *) let freeze () = !transitivity_left_table, !transitivity_right_table let unfreeze (l,r) = transitivity_left_table := l; transitivity_right_table := r let init () = transitivity_left_table := []; transitivity_right_table := [] let _ = declare_summary "transitivity-steps" { freeze_function = freeze; unfreeze_function = unfreeze; init_function = init } (* Main entry points *) let add_transitivity_lemma left lem = let lem' = Constrintern.interp_constr Evd.empty (Global.env ()) lem in add_anonymous_leaf (inTransitivity (left,lem')) (* Vernacular syntax *) TACTIC EXTEND stepl | ["stepl" constr(c) "by" tactic(tac) ] -> [ step true c (Tacinterp.eval_tactic tac) ] | ["stepl" constr(c) ] -> [ step true c tclIDTAC ] END TACTIC EXTEND stepr | ["stepr" constr(c) "by" tactic(tac) ] -> [ step false c (Tacinterp.eval_tactic tac) ] | ["stepr" constr(c) ] -> [ step false c tclIDTAC ] END VERNAC COMMAND EXTEND AddStepl | [ "Declare" "Left" "Step" constr(t) ] -> [ add_transitivity_lemma true t ] END VERNAC COMMAND EXTEND AddStepr | [ "Declare" "Right" "Step" constr(t) ] -> [ add_transitivity_lemma false t ] END VERNAC COMMAND EXTEND ImplicitTactic | [ "Declare" "Implicit" "Tactic" tactic(tac) ] -> [ Pfedit.declare_implicit_tactic (Tacinterp.interp tac) ] END (**********************************************************************) (*spiwack : Vernac commands for retroknowledge *) VERNAC COMMAND EXTEND RetroknowledgeRegister | [ "Register" constr(c) "as" retroknowledge_field(f) "by" constr(b)] -> [ let tc = Constrintern.interp_constr Evd.empty (Global.env ()) c in let tb = Constrintern.interp_constr Evd.empty (Global.env ()) b in Global.register f tc tb ] END (**********************************************************************) (* sozeau: abs/gen for induction on instantiated dependent inductives, using "Ford" induction as defined by Conor McBride *) TACTIC EXTEND generalize_eqs | ["generalize_eqs" hyp(id) ] -> [ abstract_generalize ~generalize_vars:false id ] END TACTIC EXTEND dep_generalize_eqs | ["dependent" "generalize_eqs" hyp(id) ] -> [ abstract_generalize ~generalize_vars:false ~force_dep:true id ] END TACTIC EXTEND generalize_eqs_vars | ["generalize_eqs_vars" hyp(id) ] -> [ abstract_generalize ~generalize_vars:true id ] END TACTIC EXTEND dep_generalize_eqs_vars | ["dependent" "generalize_eqs_vars" hyp(id) ] -> [ abstract_generalize ~force_dep:true ~generalize_vars:true id ] END (** Tactic to automatically simplify hypotheses of the form [Π Δ, x_i = t_i -> T] where [t_i] is closed w.r.t. Δ. Such hypotheses are automatically generated during dependent induction. For internal use. *) TACTIC EXTEND specialize_eqs [ "specialize_eqs" hyp(id) ] -> [ specialize_eqs id ] END (**********************************************************************) (* A tactic that considers a given occurrence of [c] in [t] and *) (* abstract the minimal set of all the occurrences of [c] so that the *) (* abstraction [fun x -> t[x/c]] is well-typed *) (* *) (* Contributed by Chung-Kil Hur (Winter 2009) *) (**********************************************************************) let subst_var_with_hole occ tid t = let occref = if occ > 0 then ref occ else Termops.error_invalid_occurrence [occ] in let locref = ref 0 in let rec substrec = function | GVar (_,id) as x -> if id = tid then (decr occref; if !occref = 0 then x else (incr locref; GHole (make_loc (!locref,0),Evd.QuestionMark(Evd.Define true)))) else x | c -> map_glob_constr_left_to_right substrec c in let t' = substrec t in if !occref > 0 then Termops.error_invalid_occurrence [occ] else t' let subst_hole_with_term occ tc t = let locref = ref 0 in let occref = ref occ in let rec substrec = function | GHole (_,Evd.QuestionMark(Evd.Define true)) -> decr occref; if !occref = 0 then tc else (incr locref; GHole (make_loc (!locref,0),Evd.QuestionMark(Evd.Define true))) | c -> map_glob_constr_left_to_right substrec c in substrec t open Tacmach let out_arg = function | ArgVar _ -> anomaly "Unevaluated or_var variable" | ArgArg x -> x let hResolve id c occ t gl = let sigma = project gl in let env = Termops.clear_named_body id (pf_env gl) in let env_ids = Termops.ids_of_context env in let env_names = Termops.names_of_rel_context env in let c_raw = Detyping.detype true env_ids env_names c in let t_raw = Detyping.detype true env_ids env_names t in let rec resolve_hole t_hole = try Pretyping.Default.understand sigma env t_hole with | Loc.Exc_located (loc,Pretype_errors.PretypeError (_,_,Pretype_errors.UnsolvableImplicit _)) -> resolve_hole (subst_hole_with_term (fst (unloc loc)) c_raw t_hole) in let t_constr = resolve_hole (subst_var_with_hole occ id t_raw) in let t_constr_type = Retyping.get_type_of env sigma t_constr in change_in_concl None (mkLetIn (Anonymous,t_constr,t_constr_type,pf_concl gl)) gl let hResolve_auto id c t gl = let rec resolve_auto n = try hResolve id c n t gl with | UserError _ as e -> raise e | e when Errors.noncritical e -> resolve_auto (n+1) in resolve_auto 1 TACTIC EXTEND hresolve_core | [ "hresolve_core" "(" ident(id) ":=" constr(c) ")" "at" int_or_var(occ) "in" constr(t) ] -> [ hResolve id c (out_arg occ) t ] | [ "hresolve_core" "(" ident(id) ":=" constr(c) ")" "in" constr(t) ] -> [ hResolve_auto id c t ] END (** hget_evar *) open Evar_refiner open Sign let hget_evar n gl = let sigma = project gl in let evl = evar_list sigma (pf_concl gl) in if List.length evl < n then error "Not enough uninstantiated existential variables."; if n <= 0 then error "Incorrect existential variable index."; let ev = List.nth evl (n-1) in let ev_type = existential_type sigma ev in change_in_concl None (mkLetIn (Anonymous,mkEvar ev,ev_type,pf_concl gl)) gl TACTIC EXTEND hget_evar | [ "hget_evar" int_or_var(n) ] -> [ hget_evar (out_arg n) ] END (**********************************************************************) (**********************************************************************) (* A tactic that reduces one match t with ... by doing destruct t. *) (* if t is not a variable, the tactic does *) (* case_eq t;intros ... heq;rewrite heq in *|-. (but heq itself is *) (* preserved). *) (* Contributed by Julien Forest and Pierre Courtieu (july 2010) *) (**********************************************************************) exception Found of tactic let rewrite_except h g = tclMAP (fun id -> if id = h then tclIDTAC else tclTRY (Equality.general_rewrite_in true Termops.all_occurrences true true id (mkVar h) false)) (Tacmach.pf_ids_of_hyps g) g let refl_equal = let coq_base_constant s = Coqlib.gen_constant_in_modules "RecursiveDefinition" (Coqlib.init_modules @ [["Coq";"Arith";"Le"];["Coq";"Arith";"Lt"]]) s in function () -> (coq_base_constant "eq_refl") (* This is simply an implementation of the case_eq tactic. this code should be replaced by a call to the tactic but I don't know how to call it before it is defined. *) let mkCaseEq a : tactic = (fun g -> let type_of_a = Tacmach.pf_type_of g a in tclTHENLIST [Hiddentac.h_generalize [mkApp(delayed_force refl_equal, [| type_of_a; a|])]; (fun g2 -> change_in_concl None (Tacred.pattern_occs [((false,[1]), a)] (Tacmach.pf_env g2) Evd.empty (Tacmach.pf_concl g2)) g2); simplest_case a] g);; let case_eq_intros_rewrite x g = let n = nb_prod (Tacmach.pf_concl g) in Pp.msgnl (Printer.pr_lconstr x); tclTHENLIST [ mkCaseEq x; (fun g -> let n' = nb_prod (Tacmach.pf_concl g) in let h = fresh_id (Tacmach.pf_ids_of_hyps g) (id_of_string "heq") g in tclTHENLIST [ (tclDO (n'-n-1) intro); Tacmach.introduction h; rewrite_except h] g ) ] g let rec find_a_destructable_match t = match kind_of_term t with | Case (_,_,x,_) when closed0 x -> if isVar x then (* TODO check there is no rel n. *) raise (Found (Tacinterp.eval_tactic(<:tactic>))) else let _ = Pp.msgnl (Printer.pr_lconstr x) in raise (Found (case_eq_intros_rewrite x)) | _ -> iter_constr find_a_destructable_match t let destauto t = try find_a_destructable_match t; error "No destructable match found" with Found tac -> tac let destauto_in id g = let ctype = Tacmach.pf_type_of g (mkVar id) in Pp.msgnl (Printer.pr_lconstr (mkVar id)); Pp.msgnl (Printer.pr_lconstr (ctype)); destauto ctype g TACTIC EXTEND destauto | [ "destauto" ] -> [ (fun g -> destauto (Tacmach.pf_concl g) g) ] | [ "destauto" "in" hyp(id) ] -> [ destauto_in id ] END (* ********************************************************************* *) TACTIC EXTEND constr_eq | [ "constr_eq" constr(x) constr(y) ] -> [ if eq_constr x y then tclIDTAC else tclFAIL 0 (str "Not equal") ] END TACTIC EXTEND is_evar | [ "is_evar" constr(x) ] -> [ match kind_of_term x with | Evar _ -> tclIDTAC | _ -> tclFAIL 0 (str "Not an evar") ] END let rec has_evar x = match kind_of_term x with | Evar _ -> true | Rel _ | Var _ | Meta _ | Sort _ | Const _ | Ind _ | Construct _ -> false | Cast (t1, _, t2) | Prod (_, t1, t2) | Lambda (_, t1, t2) -> has_evar t1 || has_evar t2 | LetIn (_, t1, t2, t3) -> has_evar t1 || has_evar t2 || has_evar t3 | App (t1, ts) -> has_evar t1 || has_evar_array ts | Case (_, t1, t2, ts) -> has_evar t1 || has_evar t2 || has_evar_array ts | Fix ((_, tr)) | CoFix ((_, tr)) -> has_evar_prec tr and has_evar_array x = array_exists has_evar x and has_evar_prec (_, ts1, ts2) = array_exists has_evar ts1 || array_exists has_evar ts2 TACTIC EXTEND has_evar | [ "has_evar" constr(x) ] -> [ if has_evar x then tclIDTAC else tclFAIL 0 (str "No evars") ] END TACTIC EXTEND is_hyp | [ "is_var" constr(x) ] -> [ match kind_of_term x with | Var _ -> tclIDTAC | _ -> tclFAIL 0 (str "Not a variable or hypothesis") ] END TACTIC EXTEND is_fix | [ "is_fix" constr(x) ] -> [ match kind_of_term x with | Fix _ -> Tacticals.tclIDTAC | _ -> Tacticals.tclFAIL 0 (Pp.str "not a fix definition") ] END;; (* Command to grab the evars left unresolved at the end of a proof. *) (* spiwack: I put it in extratactics because it is somewhat tied with the semantics of the LCF-style tactics, hence with the classic tactic mode. *) VERNAC COMMAND EXTEND GrabEvars [ "Grab" "Existential" "Variables" ] -> [ let p = Proof_global.give_me_the_proof () in Proof.V82.grab_evars p; Flags.if_verbose (fun () -> Pp.msg (Printer.pr_open_subgoals ())) () ] END coq-8.4pl2/tactics/hipattern.ml40000640000175000001440000004027412122663343015706 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a option type testing_function = constr -> bool let mkmeta n = Nameops.make_ident "X" (Some n) let meta1 = mkmeta 1 let meta2 = mkmeta 2 let meta3 = mkmeta 3 let meta4 = mkmeta 4 let op2bool = function Some _ -> true | None -> false let match_with_non_recursive_type t = match kind_of_term t with | App _ -> let (hdapp,args) = decompose_app t in (match kind_of_term hdapp with | Ind ind -> if not (Global.lookup_mind (fst ind)).mind_finite then Some (hdapp,args) else None | _ -> None) | _ -> None let is_non_recursive_type t = op2bool (match_with_non_recursive_type t) (* Test dependencies *) (* NB: we consider also the let-in case in the following function, since they may appear in types of inductive constructors (see #2629) *) let rec has_nodep_prod_after n c = match kind_of_term c with | Prod (_,_,b) | LetIn (_,_,_,b) -> ( n>0 || not (dependent (mkRel 1) b)) && (has_nodep_prod_after (n-1) b) | _ -> true let has_nodep_prod = has_nodep_prod_after 0 (* A general conjunctive type is a non-recursive with-no-indices inductive type with only one constructor and no dependencies between argument; it is strict if it has the form "Inductive I A1 ... An := C (_:A1) ... (_:An)" *) (* style: None = record; Some false = conjunction; Some true = strict conj *) let match_with_one_constructor style allow_rec t = let (hdapp,args) = decompose_app t in match kind_of_term hdapp with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in if (Array.length mip.mind_consnames = 1) && (allow_rec or not (mis_is_recursive (ind,mib,mip))) && (mip.mind_nrealargs = 0) then if style = Some true (* strict conjunction *) then let ctx = (prod_assum (snd (decompose_prod_n_assum mib.mind_nparams mip.mind_nf_lc.(0)))) in if List.for_all (fun (_,b,c) -> b=None && isRel c && destRel c = mib.mind_nparams) ctx then Some (hdapp,args) else None else let ctyp = prod_applist mip.mind_nf_lc.(0) args in let cargs = List.map pi3 ((prod_assum ctyp)) in if style <> Some false || has_nodep_prod ctyp then (* Record or non strict conjunction *) Some (hdapp,List.rev cargs) else None else None | _ -> None let match_with_conjunction ?(strict=false) t = match_with_one_constructor (Some strict) false t let match_with_record t = match_with_one_constructor None false t let is_conjunction ?(strict=false) t = op2bool (match_with_conjunction ~strict t) let is_record t = op2bool (match_with_record t) let match_with_tuple t = let t = match_with_one_constructor None true t in Option.map (fun (hd,l) -> let ind = destInd hd in let (mib,mip) = Global.lookup_inductive ind in let isrec = mis_is_recursive (ind,mib,mip) in (hd,l,isrec)) t let is_tuple t = op2bool (match_with_tuple t) (* A general disjunction type is a non-recursive with-no-indices inductive type with of which all constructors have a single argument; it is strict if it has the form "Inductive I A1 ... An := C1 (_:A1) | ... | Cn : (_:An)" *) let test_strict_disjunction n lc = array_for_all_i (fun i c -> match (prod_assum (snd (decompose_prod_n_assum n c))) with | [_,None,c] -> isRel c && destRel c = (n - i) | _ -> false) 0 lc let match_with_disjunction ?(strict=false) t = let (hdapp,args) = decompose_app t in match kind_of_term hdapp with | Ind ind -> let car = mis_constr_nargs ind in let (mib,mip) = Global.lookup_inductive ind in if array_for_all (fun ar -> ar = 1) car && not (mis_is_recursive (ind,mib,mip)) && (mip.mind_nrealargs = 0) then if strict then if test_strict_disjunction mib.mind_nparams mip.mind_nf_lc then Some (hdapp,args) else None else let cargs = Array.map (fun ar -> pi2 (destProd (prod_applist ar args))) mip.mind_nf_lc in Some (hdapp,Array.to_list cargs) else None | _ -> None let is_disjunction ?(strict=false) t = op2bool (match_with_disjunction ~strict t) (* An empty type is an inductive type, possible with indices, that has no constructors *) let match_with_empty_type t = let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in let nconstr = Array.length mip.mind_consnames in if nconstr = 0 then Some hdapp else None | _ -> None let is_empty_type t = op2bool (match_with_empty_type t) (* This filters inductive types with one constructor with no arguments; Parameters and indices are allowed *) let match_with_unit_or_eq_type t = let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in let constr_types = mip.mind_nf_lc in let nconstr = Array.length mip.mind_consnames in let zero_args c = nb_prod c = mib.mind_nparams in if nconstr = 1 && zero_args constr_types.(0) then Some hdapp else None | _ -> None let is_unit_or_eq_type t = op2bool (match_with_unit_or_eq_type t) (* A unit type is an inductive type with no indices but possibly (useless) parameters, and that has no arguments in its unique constructor *) let is_unit_type t = match match_with_conjunction t with | Some (_,t) when List.length t = 0 -> true | _ -> false (* Checks if a given term is an application of an inductive binary relation R, so that R has only one constructor establishing its reflexivity. *) type equation_kind = | MonomorphicLeibnizEq of constr * constr | PolymorphicLeibnizEq of constr * constr * constr | HeterogenousEq of constr * constr * constr * constr exception NoEquationFound let coq_refl_leibniz1_pattern = PATTERN [ forall x:_, _ x x ] let coq_refl_leibniz2_pattern = PATTERN [ forall A:_, forall x:A, _ A x x ] let coq_refl_jm_pattern = PATTERN [ forall A:_, forall x:A, _ A x A x ] open Libnames let match_with_equation t = if not (isApp t) then raise NoEquationFound; let (hdapp,args) = destApp t in match kind_of_term hdapp with | Ind ind -> if IndRef ind = glob_eq then Some (build_coq_eq_data()),hdapp, PolymorphicLeibnizEq(args.(0),args.(1),args.(2)) else if IndRef ind = glob_identity then Some (build_coq_identity_data()),hdapp, PolymorphicLeibnizEq(args.(0),args.(1),args.(2)) else if IndRef ind = glob_jmeq then Some (build_coq_jmeq_data()),hdapp, HeterogenousEq(args.(0),args.(1),args.(2),args.(3)) else let (mib,mip) = Global.lookup_inductive ind in let constr_types = mip.mind_nf_lc in let nconstr = Array.length mip.mind_consnames in if nconstr = 1 then if is_matching coq_refl_leibniz1_pattern constr_types.(0) then None, hdapp, MonomorphicLeibnizEq(args.(0),args.(1)) else if is_matching coq_refl_leibniz2_pattern constr_types.(0) then None, hdapp, PolymorphicLeibnizEq(args.(0),args.(1),args.(2)) else if is_matching coq_refl_jm_pattern constr_types.(0) then None, hdapp, HeterogenousEq(args.(0),args.(1),args.(2),args.(3)) else raise NoEquationFound else raise NoEquationFound | _ -> raise NoEquationFound let is_inductive_equality ind = let (mib,mip) = Global.lookup_inductive ind in let nconstr = Array.length mip.mind_consnames in nconstr = 1 && constructor_nrealargs (Global.env()) (ind,1) = 0 let match_with_equality_type t = let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind when is_inductive_equality ind -> Some (hdapp,args) | _ -> None let is_equality_type t = op2bool (match_with_equality_type t) let coq_arrow_pattern = PATTERN [ ?X1 -> ?X2 ] let match_arrow_pattern t = match matches coq_arrow_pattern t with | [(m1,arg);(m2,mind)] -> assert (m1=meta1 & m2=meta2); (arg, mind) | _ -> anomaly "Incorrect pattern matching" let match_with_nottype t = try let (arg,mind) = match_arrow_pattern t in if is_empty_type mind then Some (mind,arg) else None with PatternMatchingFailure -> None let is_nottype t = op2bool (match_with_nottype t) let match_with_forall_term c= match kind_of_term c with | Prod (nam,a,b) -> Some (nam,a,b) | _ -> None let is_forall_term c = op2bool (match_with_forall_term c) let match_with_imp_term c= match kind_of_term c with | Prod (_,a,b) when not (dependent (mkRel 1) b) ->Some (a,b) | _ -> None let is_imp_term c = op2bool (match_with_imp_term c) let match_with_nodep_ind t = let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in if Array.length (mib.mind_packets)>1 then None else let nodep_constr = has_nodep_prod_after mib.mind_nparams in if array_for_all nodep_constr mip.mind_nf_lc then let params= if mip.mind_nrealargs=0 then args else fst (list_chop mib.mind_nparams args) in Some (hdapp,params,mip.mind_nrealargs) else None | _ -> None let is_nodep_ind t=op2bool (match_with_nodep_ind t) let match_with_sigma_type t= let (hdapp,args) = decompose_app t in match (kind_of_term hdapp) with | Ind ind -> let (mib,mip) = Global.lookup_inductive ind in if (Array.length (mib.mind_packets)=1) && (mip.mind_nrealargs=0) && (Array.length mip.mind_consnames=1) && has_nodep_prod_after (mib.mind_nparams+1) mip.mind_nf_lc.(0) then (*allowing only 1 existential*) Some (hdapp,args) else None | _ -> None let is_sigma_type t=op2bool (match_with_sigma_type t) (***** Destructing patterns bound to some theory *) let rec first_match matcher = function | [] -> raise PatternMatchingFailure | (pat,build_set)::l -> try (build_set (),matcher pat) with PatternMatchingFailure -> first_match matcher l (*** Equality *) (* Patterns "(eq ?1 ?2 ?3)" and "(identity ?1 ?2 ?3)" *) let coq_eq_pattern_gen eq = lazy PATTERN [ %eq ?X1 ?X2 ?X3 ] let coq_eq_pattern = coq_eq_pattern_gen coq_eq_ref let coq_identity_pattern = coq_eq_pattern_gen coq_identity_ref let coq_jmeq_pattern = lazy PATTERN [ %coq_jmeq_ref ?X1 ?X2 ?X3 ?X4 ] let coq_eq_true_pattern = lazy PATTERN [ %coq_eq_true_ref ?X1 ] let match_eq eqn eq_pat = let pat = try Lazy.force eq_pat with e when Errors.noncritical e -> raise PatternMatchingFailure in match matches pat eqn with | [(m1,t);(m2,x);(m3,y)] -> assert (m1 = meta1 & m2 = meta2 & m3 = meta3); PolymorphicLeibnizEq (t,x,y) | [(m1,t);(m2,x);(m3,t');(m4,x')] -> assert (m1 = meta1 & m2 = meta2 & m3 = meta3 & m4 = meta4); HeterogenousEq (t,x,t',x') | _ -> anomaly "match_eq: an eq pattern should match 3 or 4 terms" let equalities = [coq_eq_pattern, build_coq_eq_data; coq_jmeq_pattern, build_coq_jmeq_data; coq_identity_pattern, build_coq_identity_data] let find_eq_data eqn = (* fails with PatternMatchingFailure *) first_match (match_eq eqn) equalities let extract_eq_args gl = function | MonomorphicLeibnizEq (e1,e2) -> let t = Tacmach.pf_type_of gl e1 in (t,e1,e2) | PolymorphicLeibnizEq (t,e1,e2) -> (t,e1,e2) | HeterogenousEq (t1,e1,t2,e2) -> if Tacmach.pf_conv_x gl t1 t2 then (t1,e1,e2) else raise PatternMatchingFailure let find_eq_data_decompose gl eqn = let (lbeq,eq_args) = find_eq_data eqn in (lbeq,extract_eq_args gl eq_args) let inversible_equalities = [coq_eq_pattern, build_coq_inversion_eq_data; coq_jmeq_pattern, build_coq_inversion_jmeq_data; coq_identity_pattern, build_coq_inversion_identity_data; coq_eq_true_pattern, build_coq_inversion_eq_true_data] let find_this_eq_data_decompose gl eqn = let (lbeq,eq_args) = try (*first_match (match_eq eqn) inversible_equalities*) find_eq_data eqn with PatternMatchingFailure -> errorlabstrm "" (str "No primitive equality found.") in let eq_args = try extract_eq_args gl eq_args with PatternMatchingFailure -> error "Don't know what to do with JMeq on arguments not of same type." in (lbeq,eq_args) open Tacmach open Tacticals let match_eq_nf gls eqn eq_pat = match pf_matches gls (Lazy.force eq_pat) eqn with | [(m1,t);(m2,x);(m3,y)] -> assert (m1 = meta1 & m2 = meta2 & m3 = meta3); (t,pf_whd_betadeltaiota gls x,pf_whd_betadeltaiota gls y) | _ -> anomaly "match_eq: an eq pattern should match 3 terms" let dest_nf_eq gls eqn = try snd (first_match (match_eq_nf gls eqn) equalities) with PatternMatchingFailure -> error "Not an equality." (*** Sigma-types *) (* Patterns "(existS ?1 ?2 ?3 ?4)" and "(existT ?1 ?2 ?3 ?4)" *) let coq_ex_pattern_gen ex = lazy PATTERN [ %ex ?X1 ?X2 ?X3 ?X4 ] let coq_existT_pattern = coq_ex_pattern_gen coq_existT_ref let coq_exist_pattern = coq_ex_pattern_gen coq_exist_ref let match_sigma ex ex_pat = match matches (Lazy.force ex_pat) ex with | [(m1,a);(m2,p);(m3,car);(m4,cdr)] -> assert (m1=meta1 & m2=meta2 & m3=meta3 & m4=meta4); (a,p,car,cdr) | _ -> anomaly "match_sigma: a successful sigma pattern should match 4 terms" let find_sigma_data_decompose ex = (* fails with PatternMatchingFailure *) first_match (match_sigma ex) [coq_existT_pattern, build_sigma_type; coq_exist_pattern, build_sigma] (* Pattern "(sig ?1 ?2)" *) let coq_sig_pattern = lazy PATTERN [ %coq_sig_ref ?X1 ?X2 ] let match_sigma t = match matches (Lazy.force coq_sig_pattern) t with | [(_,a); (_,p)] -> (a,p) | _ -> anomaly "Unexpected pattern" let is_matching_sigma t = is_matching (Lazy.force coq_sig_pattern) t (*** Decidable equalities *) (* The expected form of the goal for the tactic Decide Equality *) (* Pattern "{x=y}+{~(x=y)}" *) (* i.e. "(sumbool (eq ?1 x y) ~(eq ?1 x y))" *) let coq_eqdec_inf_pattern = lazy PATTERN [ { ?X2 = ?X3 :> ?X1 } + { ~ ?X2 = ?X3 :> ?X1 } ] let coq_eqdec_inf_rev_pattern = lazy PATTERN [ { ~ ?X2 = ?X3 :> ?X1 } + { ?X2 = ?X3 :> ?X1 } ] let coq_eqdec_pattern = lazy PATTERN [ %coq_or_ref (?X2 = ?X3 :> ?X1) (~ ?X2 = ?X3 :> ?X1) ] let coq_eqdec_rev_pattern = lazy PATTERN [ %coq_or_ref (~ ?X2 = ?X3 :> ?X1) (?X2 = ?X3 :> ?X1) ] let op_or = coq_or_ref let op_sum = coq_sumbool_ref let match_eqdec t = let eqonleft,op,subst = try true,op_sum,matches (Lazy.force coq_eqdec_inf_pattern) t with PatternMatchingFailure -> try false,op_sum,matches (Lazy.force coq_eqdec_inf_rev_pattern) t with PatternMatchingFailure -> try true,op_or,matches (Lazy.force coq_eqdec_pattern) t with PatternMatchingFailure -> false,op_or,matches (Lazy.force coq_eqdec_rev_pattern) t in match subst with | [(_,typ);(_,c1);(_,c2)] -> eqonleft, Libnames.constr_of_global (Lazy.force op), c1, c2, typ | _ -> anomaly "Unexpected pattern" (* Patterns "~ ?" and "? -> False" *) let coq_not_pattern = lazy PATTERN [ ~ _ ] let coq_imp_False_pattern = lazy PATTERN [ _ -> %coq_False_ref ] let is_matching_not t = is_matching (Lazy.force coq_not_pattern) t let is_matching_imp_False t = is_matching (Lazy.force coq_imp_False_pattern) t (* Remark: patterns that have references to the standard library must be evaluated lazily (i.e. at the time they are used, not a the time coqtop starts) *) coq-8.4pl2/tactics/nbtermdn.mli0000640000175000001440000000273612010532755015605 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* sig module Term_dn : sig type term_label = | GRLabel of global_reference | ProdLabel | LambdaLabel | SortLabel end type 'na t type 'na frozen_t val create : unit -> 'na t val add : 'na t -> ('na * (constr_pattern * Y.t)) -> unit val rmv : 'na t -> 'na -> unit val in_dn : 'na t -> 'na -> bool val remap : 'na t -> 'na -> (constr_pattern * Y.t) -> unit val lookup : 'na t -> constr -> (constr_pattern * Y.t) list val app : ('na -> (constr_pattern * Y.t) -> unit) -> 'na t -> unit val dnet_depth : int ref val freeze : 'na t -> 'na frozen_t val unfreeze : 'na frozen_t -> 'na t -> unit val empty : 'na t -> unit val to2lists : 'na t -> ('na * (constr_pattern * Y.t)) list * (Term_dn.term_label option * Btermdn.Make(Y).t) list end coq-8.4pl2/tactics/hiddentac.ml0000640000175000001440000001333012010532755015536 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ((all_occurrences_expr,c),Names.Anonymous)) cl) let h_generalize_dep c = abstract_tactic (TacGeneralizeDep c) (generalize_dep c) let h_let_tac b na c cl eqpat = let id = Option.default (dummy_loc,IntroAnonymous) eqpat in let with_eq = if b then None else Some (true,id) in abstract_tactic (TacLetTac (na,c,cl,b,eqpat)) (letin_tac with_eq na c None cl) let h_let_pat_tac b na c cl eqpat = let id = Option.default (dummy_loc,IntroAnonymous) eqpat in let with_eq = if b then None else Some (true,id) in abstract_tactic (TacLetTac (na,snd c,cl,b,eqpat)) (letin_pat_tac with_eq na c None cl) (* Derived basic tactics *) let h_simple_induction_destruct isrec h = abstract_tactic (TacSimpleInductionDestruct (isrec,h)) (if isrec then (simple_induct h) else (simple_destruct h)) let h_simple_induction = h_simple_induction_destruct true let h_simple_destruct = h_simple_induction_destruct false let out_indarg = function | ElimOnConstr (_,c) -> ElimOnConstr c | ElimOnIdent id -> ElimOnIdent id | ElimOnAnonHyp n -> ElimOnAnonHyp n let h_induction_destruct isrec ev lcl = let lcl' = on_pi1 (List.map (fun (a,b) ->(out_indarg a,b))) lcl in abstract_tactic (TacInductionDestruct (isrec,ev,lcl')) (induction_destruct isrec ev lcl) let h_new_induction ev c idl e cl = h_induction_destruct true ev ([c,idl],e,cl) let h_new_destruct ev c idl e cl = h_induction_destruct false ev ([c,idl],e,cl) let h_specialize n d = abstract_tactic (TacSpecialize (n,d)) (specialize n d) let h_lapply c = abstract_tactic (TacLApply c) (cut_and_apply c) (* Context management *) let h_clear b l = abstract_tactic (TacClear (b,l)) ((if b then keep else clear) l) let h_clear_body l = abstract_tactic (TacClearBody l) (clear_body l) let h_move dep id1 id2 = abstract_tactic (TacMove (dep,id1,id2)) (move_hyp dep id1 id2) let h_rename l = abstract_tactic (TacRename l) (rename_hyp l) let h_revert l = abstract_tactic (TacRevert l) (revert l) (* Constructors *) let h_left ev l = abstract_tactic (TacLeft (ev,l)) (left_with_bindings ev l) let h_right ev l = abstract_tactic (TacRight (ev,l)) (right_with_bindings ev l) let h_split ev l = abstract_tactic (TacSplit (ev,false,l)) (split_with_bindings ev l) (* Moved to tacinterp because of dependencies in Tacinterp.interp let h_any_constructor t = abstract_tactic (TacAnyConstructor t) (any_constructor t) *) let h_constructor ev n l = abstract_tactic (TacConstructor(ev,ArgArg n,l))(constructor_tac ev None n l) let h_one_constructor n = abstract_tactic (TacConstructor(false,ArgArg n,NoBindings)) (one_constructor n NoBindings) let h_simplest_left = h_left false NoBindings let h_simplest_right = h_right false NoBindings (* Conversion *) let h_reduce r cl = abstract_tactic (TacReduce (r,cl)) (reduce r cl) let h_change op c cl = abstract_tactic (TacChange (op,c,cl)) (change op c cl) (* Equivalence relations *) let h_reflexivity = abstract_tactic TacReflexivity intros_reflexivity let h_symmetry c = abstract_tactic (TacSymmetry c) (intros_symmetry c) let h_transitivity c = abstract_tactic (TacTransitivity c) (intros_transitivity c) let h_simplest_apply c = h_apply false false [dummy_loc,(c,NoBindings)] let h_simplest_eapply c = h_apply false true [dummy_loc,(c,NoBindings)] let h_simplest_elim c = h_elim false (c,NoBindings) None let h_simplest_case c = h_case false (c,NoBindings) let h_intro_patterns l = abstract_tactic (TacIntroPattern l) (intro_patterns l) coq-8.4pl2/tactics/autorewrite.mli0000640000175000001440000000372712010532755016347 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raw_rew_rule list -> unit (** The AutoRewrite tactic. The optional conditions tell rewrite how to handle matching and side-condition solving. Default is Naive: first match in the clause, don't look at the side-conditions to tell if the rewrite succeeded. *) val autorewrite : ?conds:conditions -> tactic -> string list -> tactic val autorewrite_in : ?conds:conditions -> Names.identifier -> tactic -> string list -> tactic (** Rewriting rules *) type rew_rule = { rew_lemma: constr; rew_type: types; rew_pat: constr; rew_l2r: bool; rew_tac: glob_tactic_expr } val find_rewrites : string -> rew_rule list val find_matches : string -> constr -> rew_rule list val auto_multi_rewrite : ?conds:conditions -> string list -> Tacticals.clause -> tactic val auto_multi_rewrite_with : ?conds:conditions -> tactic -> string list -> Tacticals.clause -> tactic val print_rewrite_hintdb : string -> unit open Clenv type hypinfo = { hyp_cl : clausenv; hyp_prf : constr; hyp_ty : types; hyp_car : constr; hyp_rel : constr; hyp_l2r : bool; hyp_left : constr; hyp_right : constr; } val find_applied_relation : bool -> Util.loc -> Environ.env -> Evd.evar_map -> Term.constr -> bool -> hypinfo coq-8.4pl2/tactics/extraargs.ml40000640000175000001440000002665112026600571015710 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* " let pr_orient _prc _prlc _prt = function | true -> Pp.mt () | false -> Pp.str " <-" ARGUMENT EXTEND orient TYPED AS bool PRINTED BY pr_orient | [ "->" ] -> [ true ] | [ "<-" ] -> [ false ] | [ ] -> [ true ] END let pr_orient = pr_orient () () () let pr_int_list = Util.pr_sequence Pp.int let pr_int_list_full _prc _prlc _prt l = pr_int_list l open Glob_term let pr_occurrences _prc _prlc _prt l = match l with | ArgArg x -> pr_int_list x | ArgVar (loc, id) -> Nameops.pr_id id let coerce_to_int = function | VInteger n -> n | v -> raise (CannotCoerceTo "an integer") let int_list_of_VList = function | VList l -> List.map (fun n -> coerce_to_int n) l | _ -> raise Not_found let interp_occs ist gl l = match l with | ArgArg x -> x | ArgVar (_,id as locid) -> (try int_list_of_VList (List.assoc id ist.lfun) with Not_found | CannotCoerceTo _ -> [interp_int ist locid]) let interp_occs ist gl l = Tacmach.project gl , interp_occs ist gl l let glob_occs ist l = l let subst_occs evm l = l type occurrences_or_var = int list or_var type occurrences = int list ARGUMENT EXTEND occurrences PRINTED BY pr_int_list_full INTERPRETED BY interp_occs GLOBALIZED BY glob_occs SUBSTITUTED BY subst_occs RAW_TYPED AS occurrences_or_var RAW_PRINTED BY pr_occurrences GLOB_TYPED AS occurrences_or_var GLOB_PRINTED BY pr_occurrences | [ ne_integer_list(l) ] -> [ ArgArg l ] | [ var(id) ] -> [ ArgVar id ] END let pr_occurrences = pr_occurrences () () () let pr_gen prc _prlc _prtac c = prc c let pr_globc _prc _prlc _prtac (_,glob) = Printer.pr_glob_constr glob let interp_glob ist gl (t,_) = Tacmach.project gl , (ist,t) let glob_glob = Tacinterp.intern_constr let subst_glob = Tacinterp.subst_glob_constr_and_expr ARGUMENT EXTEND glob PRINTED BY pr_globc INTERPRETED BY interp_glob GLOBALIZED BY glob_glob SUBSTITUTED BY subst_glob RAW_TYPED AS constr_expr RAW_PRINTED BY pr_gen GLOB_TYPED AS glob_constr_and_expr GLOB_PRINTED BY pr_gen [ lconstr(c) ] -> [ c ] END type 'id gen_place= ('id * hyp_location_flag,unit) location type loc_place = identifier Util.located gen_place type place = identifier gen_place let pr_gen_place pr_id = function ConclLocation () -> Pp.mt () | HypLocation (id,InHyp) -> str "in " ++ pr_id id | HypLocation (id,InHypTypeOnly) -> str "in (Type of " ++ pr_id id ++ str ")" | HypLocation (id,InHypValueOnly) -> str "in (Value of " ++ pr_id id ++ str ")" let pr_loc_place _ _ _ = pr_gen_place (fun (_,id) -> Nameops.pr_id id) let pr_place _ _ _ = pr_gen_place Nameops.pr_id let pr_hloc = pr_loc_place () () () let intern_place ist = function ConclLocation () -> ConclLocation () | HypLocation (id,hl) -> HypLocation (intern_hyp ist id,hl) let interp_place ist gl = function ConclLocation () -> ConclLocation () | HypLocation (id,hl) -> HypLocation (interp_hyp ist gl id,hl) let interp_place ist gl p = Tacmach.project gl , interp_place ist gl p let subst_place subst pl = pl ARGUMENT EXTEND hloc PRINTED BY pr_place INTERPRETED BY interp_place GLOBALIZED BY intern_place SUBSTITUTED BY subst_place RAW_TYPED AS loc_place RAW_PRINTED BY pr_loc_place GLOB_TYPED AS loc_place GLOB_PRINTED BY pr_loc_place [ ] -> [ ConclLocation () ] | [ "in" "|-" "*" ] -> [ ConclLocation () ] | [ "in" ident(id) ] -> [ HypLocation ((Util.dummy_loc,id),InHyp) ] | [ "in" "(" "Type" "of" ident(id) ")" ] -> [ HypLocation ((Util.dummy_loc,id),InHypTypeOnly) ] | [ "in" "(" "Value" "of" ident(id) ")" ] -> [ HypLocation ((Util.dummy_loc,id),InHypValueOnly) ] END (* Julien: Mise en commun des differentes version de replace with in by *) let pr_by_arg_tac _prc _prlc prtac opt_c = match opt_c with | None -> mt () | Some t -> hov 2 (str "by" ++ spc () ++ prtac (3,Ppextend.E) t) ARGUMENT EXTEND by_arg_tac TYPED AS tactic_opt PRINTED BY pr_by_arg_tac | [ "by" tactic3(c) ] -> [ Some c ] | [ ] -> [ None ] END let pr_by_arg_tac prtac opt_c = pr_by_arg_tac () () prtac opt_c let pr_in_hyp pr_id (lo,concl) : Pp.std_ppcmds = match lo,concl with | Some [],true -> mt () | None,true -> str "in" ++ spc () ++ str "*" | None,false -> str "in" ++ spc () ++ str "*" ++ spc () ++ str "|-" | Some l,_ -> str "in" ++ spc () ++ Util.prlist_with_sep Util.pr_comma pr_id l ++ match concl with | true -> spc () ++ str "|-" ++ spc () ++ str "*" | _ -> mt () let pr_in_arg_hyp _ _ _ = pr_in_hyp (fun (_,id) -> Ppconstr.pr_id id) let pr_in_arg_hyp_typed _ _ _ = pr_in_hyp Ppconstr.pr_id let pr_var_list_gen pr_id = Util.prlist_with_sep (fun () -> str ",") pr_id let pr_var_list_typed _ _ _ = pr_var_list_gen Ppconstr.pr_id let pr_var_list _ _ _ = pr_var_list_gen (fun (_,id) -> Ppconstr.pr_id id) ARGUMENT EXTEND comma_var_lne PRINTED BY pr_var_list_typed RAW_TYPED AS var list RAW_PRINTED BY pr_var_list GLOB_TYPED AS var list GLOB_PRINTED BY pr_var_list | [ var(x) ] -> [ [x] ] | [ var(x) "," comma_var_lne(l) ] -> [x::l] END ARGUMENT EXTEND comma_var_l PRINTED BY pr_var_list_typed RAW_TYPED AS var list RAW_PRINTED BY pr_var_list GLOB_TYPED AS var list GLOB_PRINTED BY pr_var_list | [ comma_var_lne(l) ] -> [l] | [] -> [ [] ] END let pr_in_concl _ _ _ = function true -> str "|- " ++ spc () ++ str "*" | _ -> str "|-" ARGUMENT EXTEND inconcl TYPED AS bool PRINTED BY pr_in_concl | [ "|-" "*" ] -> [ true ] | [ "|-" ] -> [ false ] END ARGUMENT EXTEND in_arg_hyp PRINTED BY pr_in_arg_hyp_typed RAW_TYPED AS var list option * bool RAW_PRINTED BY pr_in_arg_hyp GLOB_TYPED AS var list option * bool GLOB_PRINTED BY pr_in_arg_hyp | [ "in" "*" ] -> [(None,true)] | [ "in" "*" inconcl_opt(b) ] -> [let onconcl = match b with Some b -> b | None -> true in (None,onconcl)] | [ "in" comma_var_l(l) inconcl_opt(b) ] -> [ let onconcl = match b with Some b -> b | None -> false in Some l, onconcl ] | [ ] -> [ (Some [],true) ] END let pr_in_arg_hyp = pr_in_arg_hyp_typed () () () let gen_in_arg_hyp_to_clause trad_id (hyps ,concl) : Tacticals.clause = {Tacexpr.onhyps= Option.map (fun l -> List.map (fun id -> ( (all_occurrences_expr,trad_id id),InHyp)) l ) hyps; Tacexpr.concl_occs = if concl then all_occurrences_expr else no_occurrences_expr} let raw_in_arg_hyp_to_clause = gen_in_arg_hyp_to_clause snd let glob_in_arg_hyp_to_clause = gen_in_arg_hyp_to_clause (fun x -> x) (* spiwack argument for the commands of the retroknowledge *) let (wit_r_nat_field, globwit_r_nat_field, rawwit_r_nat_field) = Genarg.create_arg None "r_nat_field" let (wit_r_n_field, globwit_r_n_field, rawwit_r_n_field) = Genarg.create_arg None "r_n_field" let (wit_r_int31_field, globwit_r_int31_field, rawwit_r_int31_field) = Genarg.create_arg None "r_int31_field" let (wit_r_field, globwit_r_field, rawwit_r_field) = Genarg.create_arg None "r_field" (* spiwack: the print functions are incomplete, but I don't know what they are used for *) let pr_r_nat_field _ _ _ natf = str "nat " ++ match natf with | Retroknowledge.NatType -> str "type" | Retroknowledge.NatPlus -> str "plus" | Retroknowledge.NatTimes -> str "times" let pr_r_n_field _ _ _ nf = str "binary N " ++ match nf with | Retroknowledge.NPositive -> str "positive" | Retroknowledge.NType -> str "type" | Retroknowledge.NTwice -> str "twice" | Retroknowledge.NTwicePlusOne -> str "twice plus one" | Retroknowledge.NPhi -> str "phi" | Retroknowledge.NPhiInv -> str "phi inv" | Retroknowledge.NPlus -> str "plus" | Retroknowledge.NTimes -> str "times" let pr_r_int31_field _ _ _ i31f = str "int31 " ++ match i31f with | Retroknowledge.Int31Bits -> str "bits" | Retroknowledge.Int31Type -> str "type" | Retroknowledge.Int31Twice -> str "twice" | Retroknowledge.Int31TwicePlusOne -> str "twice plus one" | Retroknowledge.Int31Phi -> str "phi" | Retroknowledge.Int31PhiInv -> str "phi inv" | Retroknowledge.Int31Plus -> str "plus" | Retroknowledge.Int31Times -> str "times" | _ -> assert false let pr_retroknowledge_field _ _ _ f = match f with (* | Retroknowledge.KEq -> str "equality" | Retroknowledge.KNat natf -> pr_r_nat_field () () () natf | Retroknowledge.KN nf -> pr_r_n_field () () () nf *) | Retroknowledge.KInt31 (group, i31f) -> (pr_r_int31_field () () () i31f) ++ str "in " ++ str group ARGUMENT EXTEND retroknowledge_nat TYPED AS r_nat_field PRINTED BY pr_r_nat_field | [ "nat" "type" ] -> [ Retroknowledge.NatType ] | [ "nat" "plus" ] -> [ Retroknowledge.NatPlus ] | [ "nat" "times" ] -> [ Retroknowledge.NatTimes ] END ARGUMENT EXTEND retroknowledge_binary_n TYPED AS r_n_field PRINTED BY pr_r_n_field | [ "binary" "N" "positive" ] -> [ Retroknowledge.NPositive ] | [ "binary" "N" "type" ] -> [ Retroknowledge.NType ] | [ "binary" "N" "twice" ] -> [ Retroknowledge.NTwice ] | [ "binary" "N" "twice" "plus" "one" ] -> [ Retroknowledge.NTwicePlusOne ] | [ "binary" "N" "phi" ] -> [ Retroknowledge.NPhi ] | [ "binary" "N" "phi" "inv" ] -> [ Retroknowledge.NPhiInv ] | [ "binary" "N" "plus" ] -> [ Retroknowledge.NPlus ] | [ "binary" "N" "times" ] -> [ Retroknowledge.NTimes ] END ARGUMENT EXTEND retroknowledge_int31 TYPED AS r_int31_field PRINTED BY pr_r_int31_field | [ "int31" "bits" ] -> [ Retroknowledge.Int31Bits ] | [ "int31" "type" ] -> [ Retroknowledge.Int31Type ] | [ "int31" "twice" ] -> [ Retroknowledge.Int31Twice ] | [ "int31" "twice" "plus" "one" ] -> [ Retroknowledge.Int31TwicePlusOne ] | [ "int31" "phi" ] -> [ Retroknowledge.Int31Phi ] | [ "int31" "phi" "inv" ] -> [ Retroknowledge.Int31PhiInv ] | [ "int31" "plus" ] -> [ Retroknowledge.Int31Plus ] | [ "int31" "plusc" ] -> [ Retroknowledge.Int31PlusC ] | [ "int31" "pluscarryc" ] -> [ Retroknowledge.Int31PlusCarryC ] | [ "int31" "minus" ] -> [ Retroknowledge.Int31Minus ] | [ "int31" "minusc" ] -> [ Retroknowledge.Int31MinusC ] | [ "int31" "minuscarryc" ] -> [ Retroknowledge.Int31MinusCarryC ] | [ "int31" "times" ] -> [ Retroknowledge.Int31Times ] | [ "int31" "timesc" ] -> [ Retroknowledge.Int31TimesC ] | [ "int31" "div21" ] -> [ Retroknowledge.Int31Div21 ] | [ "int31" "div" ] -> [ Retroknowledge.Int31Div ] | [ "int31" "addmuldiv" ] -> [ Retroknowledge.Int31AddMulDiv ] | [ "int31" "compare" ] -> [ Retroknowledge.Int31Compare ] | [ "int31" "head0" ] -> [ Retroknowledge.Int31Head0 ] | [ "int31" "tail0" ] -> [ Retroknowledge.Int31Tail0 ] END ARGUMENT EXTEND retroknowledge_field TYPED AS r_field PRINTED BY pr_retroknowledge_field (*| [ "equality" ] -> [ Retroknowledge.KEq ] | [ retroknowledge_nat(n)] -> [ Retroknowledge.KNat n ] | [ retroknowledge_binary_n (n)] -> [ Retroknowledge.KN n ]*) | [ retroknowledge_int31 (i) "in" string(g)] -> [ Retroknowledge.KInt31(g,i) ] END coq-8.4pl2/tactics/btermdn.ml0000640000175000001440000001074312010532755015253 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* struct module Term_dn = Termdn.Make(Z) module X = struct type t = constr_pattern*int let compare = Pervasives.compare end module Y = struct type t = Term_dn.term_label let compare x y = let make_name n = match n with | Term_dn.GRLabel(ConstRef con) -> Term_dn.GRLabel(ConstRef(constant_of_kn(canonical_con con))) | Term_dn.GRLabel(IndRef (kn,i)) -> Term_dn.GRLabel(IndRef(mind_of_kn(canonical_mind kn),i)) | Term_dn.GRLabel(ConstructRef ((kn,i),j ))-> Term_dn.GRLabel(ConstructRef((mind_of_kn(canonical_mind kn),i),j)) | k -> k in Pervasives.compare (make_name x) (make_name y) end module Dn = Dn.Make(X)(Y)(Z) type t = Dn.t let create = Dn.create let decomp = let rec decrec acc c = match kind_of_term c with | App (f,l) -> decrec (Array.fold_right (fun a l -> a::l) l acc) f | Cast (c1,_,_) -> decrec acc c1 | _ -> (c,acc) in decrec [] let constr_val_discr t = let c, l = decomp t in match kind_of_term c with | Ind ind_sp -> Dn.Label(Term_dn.GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Dn.Label(Term_dn.GRLabel (ConstructRef cstr_sp),l) | Var id -> Dn.Label(Term_dn.GRLabel (VarRef id),l) | Const _ -> Dn.Everything | _ -> Dn.Nothing let constr_val_discr_st (idpred,cpred) t = let c, l = decomp t in match kind_of_term c with | Const c -> if Cpred.mem c cpred then Dn.Everything else Dn.Label(Term_dn.GRLabel (ConstRef c),l) | Ind ind_sp -> Dn.Label(Term_dn.GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Dn.Label(Term_dn.GRLabel (ConstructRef cstr_sp),l) | Var id when not (Idpred.mem id idpred) -> Dn.Label(Term_dn.GRLabel (VarRef id),l) | Prod (n, d, c) -> Dn.Label(Term_dn.ProdLabel, [d; c]) | Lambda (n, d, c) -> Dn.Label(Term_dn.LambdaLabel, [d; c] @ l) | Sort _ -> Dn.Label(Term_dn.SortLabel, []) | Evar _ -> Dn.Everything | _ -> Dn.Nothing let bounded_constr_pat_discr_st st (t,depth) = if depth = 0 then None else match Term_dn.constr_pat_discr_st st t with | None -> None | Some (c,l) -> Some(c,List.map (fun c -> (c,depth-1)) l) let bounded_constr_val_discr_st st (t,depth) = if depth = 0 then Dn.Nothing else match constr_val_discr_st st t with | Dn.Label (c,l) -> Dn.Label(c,List.map (fun c -> (c,depth-1)) l) | Dn.Nothing -> Dn.Nothing | Dn.Everything -> Dn.Everything let bounded_constr_pat_discr (t,depth) = if depth = 0 then None else match Term_dn.constr_pat_discr t with | None -> None | Some (c,l) -> Some(c,List.map (fun c -> (c,depth-1)) l) let bounded_constr_val_discr (t,depth) = if depth = 0 then Dn.Nothing else match constr_val_discr t with | Dn.Label (c,l) -> Dn.Label(c,List.map (fun c -> (c,depth-1)) l) | Dn.Nothing -> Dn.Nothing | Dn.Everything -> Dn.Everything let add = function | None -> (fun dn (c,v) -> Dn.add dn bounded_constr_pat_discr ((c,!dnet_depth),v)) | Some st -> (fun dn (c,v) -> Dn.add dn (bounded_constr_pat_discr_st st) ((c,!dnet_depth),v)) let rmv = function | None -> (fun dn (c,v) -> Dn.rmv dn bounded_constr_pat_discr ((c,!dnet_depth),v)) | Some st -> (fun dn (c,v) -> Dn.rmv dn (bounded_constr_pat_discr_st st) ((c,!dnet_depth),v)) let lookup = function | None -> (fun dn t -> List.map (fun ((c,_),v) -> (c,v)) (Dn.lookup dn bounded_constr_val_discr (t,!dnet_depth))) | Some st -> (fun dn t -> List.map (fun ((c,_),v) -> (c,v)) (Dn.lookup dn (bounded_constr_val_discr_st st) (t,!dnet_depth))) let app f dn = Dn.app (fun ((c,_),v) -> f(c,v)) dn end coq-8.4pl2/tactics/inv.ml0000640000175000001440000004435312121620060014406 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mv::acc | _ -> fold_constr collrec acc c in collrec [] c let check_no_metas clenv ccl = if occur_meta ccl then let metas = List.filter (fun m -> not (Evd.meta_defined clenv.evd m)) (collect_meta_variables ccl) in let metas = List.map (Evd.meta_name clenv.evd) metas in errorlabstrm "inversion" (str ("Cannot find an instantiation for variable"^ (if List.length metas = 1 then " " else "s ")) ++ prlist_with_sep pr_comma pr_name metas (* ajouter "in " ++ pr_lconstr ccl mais il faut le bon contexte *)) let var_occurs_in_pf gl id = let env = pf_env gl in occur_var env id (pf_concl gl) or List.exists (occur_var_in_decl env id) (pf_hyps gl) (* [make_inv_predicate (ity,args) C] is given the inductive type, its arguments, both the global parameters and its local arguments, and is expected to produce a predicate P such that if largs is the "local" part of the arguments, then (P largs) will be convertible with a conclusion of the form: a1=a1->a2=a2 ... -> C Algorithm: suppose length(largs)=n (1) Push the entire arity, [xbar:Abar], carrying along largs and the conclusion (2) Pair up each ai with its respective Rel version: a1==(Rel n), a2==(Rel n-1), etc. (3) For each pair, ai,Rel j, if the Ai is dependent - that is, the type of [Rel j] is an open term, then we construct the iterated tuple, [make_iterated_tuple] does it, and use that for our equation Otherwise, we just use ai=Rel j *) type inversion_status = Dep of constr option | NoDep let compute_eqn env sigma n i ai = (ai, (mkRel (n-i),get_type_of env sigma (mkRel (n-i)))) let make_inv_predicate env sigma indf realargs id status concl = let nrealargs = List.length realargs in let (hyps,concl) = match status with | NoDep -> (* We push the arity and leave concl unchanged *) let hyps_arity,_ = get_arity env indf in (hyps_arity,concl) | Dep dflt_concl -> if not (occur_var env id concl) then errorlabstrm "make_inv_predicate" (str "Current goal does not depend on " ++ pr_id id ++ str"."); (* We abstract the conclusion of goal with respect to realargs and c to * be concl in order to rewrite and have c also rewritten when the case * will be done *) let pred = match dflt_concl with | Some concl -> concl (*assumed it's some [x1..xn,H:I(x1..xn)]C*) | None -> let sort = get_sort_family_of env sigma concl in let p = make_arity env true indf (new_sort_in_family sort) in Unification.abstract_list_all env (Evd.create_evar_defs sigma) p concl (realargs@[mkVar id]) in let hyps,bodypred = decompose_lam_n_assum (nrealargs+1) pred in (* We lift to make room for the equations *) (hyps,lift nrealargs bodypred) in let nhyps = rel_context_length hyps in let env' = push_rel_context hyps env in let realargs' = List.map (lift nhyps) realargs in let pairs = list_map_i (compute_eqn env' sigma nhyps) 0 realargs' in (* Now the arity is pushed, and we need to construct the pairs * ai,mkRel(n-i+1) *) (* Now, we can recurse down this list, for each ai,(mkRel k) whether to push (mkRel k)=ai (when Ai is closed). In any case, we carry along the rest of pairs *) let rec build_concl eqns n = function | [] -> (it_mkProd concl eqns,n) | (ai,(xi,ti))::restlist -> let (lhs,eqnty,rhs) = if closed0 ti then (xi,ti,ai) else make_iterated_tuple env' sigma ai (xi,ti) in let eq_term = Coqlib.build_coq_eq () in let eqn = applist (eq_term ,[eqnty;lhs;rhs]) in build_concl ((Anonymous,lift n eqn)::eqns) (n+1) restlist in let (newconcl,neqns) = build_concl [] 0 pairs in let predicate = it_mkLambda_or_LetIn_name env newconcl hyps in (* OK - this predicate should now be usable by res_elimination_then to do elimination on the conclusion. *) (predicate,neqns) (* The result of the elimination is a bunch of goals like: |- (cibar:Cibar)Equands->C where the cibar are either dependent or not. We are fed a signature, with "true" for every recursive argument, and false for every non-recursive one. So we need to do the sign_branch_len(sign) intros, thinning out all recursive assumptions. This leaves us with exactly length(sign) assumptions. We save their names, and then do introductions for all the equands (there are some number of them, which is the other argument of the tactic) This gives us the #neqns equations, whose names we get also, and the #length(sign) arguments. Suppose that #nodep of these arguments are non-dependent. Generalize and thin them. This gives us #dep = #length(sign)-#nodep arguments which are dependent. Now, we want to take each of the equations, and do all possible injections to get the left-hand-side to be a variable. At the same time, if we find a lhs/rhs pair which are different, we can discriminate them to prove false and finish the branch. Then, we thin away the equations, and do the introductions for the #nodep arguments which we generalized before. *) (* Called after the case-assumptions have been killed off, and all the intros have been done. Given that the clause in question is an equality (if it isn't we fail), we are responsible for projecting the equality, using Injection and Discriminate, and applying it to the concusion *) (* Computes the subset of hypothesis in the local context whose type depends on t (should be of the form (mkVar id)), then it generalizes them, applies tac to rewrite all occurrencies of t, and introduces generalized hypotheis. Precondition: t=(mkVar id) *) let rec dependent_hyps id idlist gl = let rec dep_rec =function | [] -> [] | (id1,_,_)::l -> (* Update the type of id1: it may have been subject to rewriting *) let d = pf_get_hyp gl id1 in if occur_var_in_decl (Global.env()) id d then d :: dep_rec l else dep_rec l in dep_rec idlist let split_dep_and_nodep hyps gl = List.fold_right (fun (id,_,_ as d) (l1,l2) -> if var_occurs_in_pf gl id then (d::l1,l2) else (l1,d::l2)) hyps ([],[]) open Coqlib (* Computation of dids is late; must have been done in rewrite_equations*) (* Will keep generalizing and introducing back and forth... *) (* Moreover, others hyps depending of dids should have been *) (* generalized; in such a way that [dids] can endly be cleared *) (* Consider for instance this case extracted from Well_Ordering.v A : Set B : A ->Set a0 : A f : (B a0) ->WO y : WO H0 : (le_WO y (sup a0 f)) ============================ (Acc WO le_WO y) Inversion H0 gives A : Set B : A ->Set a0 : A f : (B a0) ->WO y : WO H0 : (le_WO y (sup a0 f)) a1 : A f0 : (B a1) ->WO v : (B a1) H1 : (f0 v)=y H3 : a1=a0 f1 : (B a0) ->WO v0 : (B a0) H4 : (existS A [a:A](B a) ->WO a0 f1)=(existS A [a:A](B a) ->WO a0 f) ============================ (Acc WO le_WO (f1 v0)) while, ideally, we would have expected A : Set B : A ->Set a0 : A f0 : (B a0)->WO v : (B a0) ============================ (Acc WO le_WO (f0 v)) obtained from destruction with equalities A : Set B : A ->Set a0 : A f : (B a0) ->WO y : WO H0 : (le_WO y (sup a0 f)) a1 : A f0 : (B a1)->WO v : (B a1) H1 : (f0 v)=y H2 : (sup a1 f0)=(sup a0 f) ============================ (Acc WO le_WO (f0 v)) by clearing initial hypothesis H0 and its dependency y, clearing H1 (in fact H1 can be avoided using the same trick as for newdestruct), decomposing H2 to get a1=a0 and (a1,f0)=(a0,f), replacing a1 by a0 everywhere and removing a1 and a1=a0 (in fact it would have been more regular to replace a0 by a1, avoiding f1 and v0 cannot replace f0 and v), finally removing H4 (here because f is not used, more generally after using eq_dep and replacing f by f0) [and finally rename a0, f0 into a,f]. Summary: nine useless hypotheses! Nota: with Inversion_clear, only four useless hypotheses *) let generalizeRewriteIntros tac depids id gls = let dids = dependent_hyps id depids gls in (tclTHENSEQ [bring_hyps dids; tac; (* may actually fail to replace if dependent in a previous eq *) intros_replacing (ids_of_named_context dids)]) gls let rec tclMAP_i n tacfun = function | [] -> tclDO n (tacfun None) | a::l -> if n=0 then error "Too many names." else tclTHEN (tacfun (Some a)) (tclMAP_i (n-1) tacfun l) let remember_first_eq id x = if !x = no_move then x := MoveAfter id (* invariant: ProjectAndApply is responsible for erasing the clause which it is given as input It simplifies the clause (an equality) to use it as a rewrite rule and then erases the result of the simplification. *) (* invariant: ProjectAndApplyNoThining simplifies the clause (an equality) . If it can discriminate then the goal is proved, if not tries to use it as a rewrite rule. It erases the clause which is given as input *) let projectAndApply thin id eqname names depids gls = let subst_hyp l2r id = tclTHEN (tclTRY(rewriteInConcl l2r (mkVar id))) (if thin then clear [id] else (remember_first_eq id eqname; tclIDTAC)) in let substHypIfVariable tac id gls = let (t,t1,t2) = Hipattern.dest_nf_eq gls (pf_get_hyp_typ gls id) in match (kind_of_term t1, kind_of_term t2) with | Var id1, _ -> generalizeRewriteIntros (subst_hyp true id) depids id1 gls | _, Var id2 -> generalizeRewriteIntros (subst_hyp false id) depids id2 gls | _ -> tac id gls in let deq_trailer id neqns = tclTHENSEQ [(if names <> [] then clear [id] else tclIDTAC); (tclMAP_i neqns (fun idopt -> tclTRY (tclTHEN (intro_move idopt no_move) (* try again to substitute and if still not a variable after *) (* decomposition, arbitrarily try to rewrite RL !? *) (tclTRY (onLastHypId (substHypIfVariable (subst_hyp false)))))) names); (if names = [] then clear [id] else tclIDTAC)] in substHypIfVariable (* If no immediate variable in the equation, try to decompose it *) (* and apply a trailer which again try to substitute *) (fun id -> dEqThen false (deq_trailer id) (Some (ElimOnConstr (mkVar id,NoBindings)))) id gls (* Inversion qui n'introduit pas les hypotheses, afin de pouvoir les nommer soi-meme (proposition de Valerie). *) let rewrite_equations_gene othin neqns ba gl = let (depids,nodepids) = split_dep_and_nodep ba.assums gl in let rewrite_eqns = match othin with | Some thin -> onLastHypId (fun last -> tclTHENSEQ [tclDO neqns (tclTHEN intro (onLastHypId (fun id -> tclTRY (projectAndApply thin id (ref no_move) [] depids)))); onHyps (compose List.rev (afterHyp last)) bring_hyps; onHyps (afterHyp last) (compose clear ids_of_named_context)]) | None -> tclIDTAC in (tclTHENSEQ [tclDO neqns intro; bring_hyps nodepids; clear (ids_of_named_context nodepids); onHyps (compose List.rev (nLastDecls neqns)) bring_hyps; onHyps (nLastDecls neqns) (compose clear ids_of_named_context); rewrite_eqns; tclMAP (fun (id,_,_ as d) -> (tclORELSE (clear [id]) (tclTHEN (bring_hyps [d]) (clear [id])))) depids]) gl (* Introduction of the equations on arguments othin: discriminates Simple Inversion, Inversion and Inversion_clear None: the equations are introduced, but not rewritten Some thin: the equations are rewritten, and cleared if thin is true *) let rec get_names allow_conj (loc,pat) = match pat with | IntroWildcard -> error "Discarding pattern not allowed for inversion equations." | IntroAnonymous | IntroForthcoming _ -> error "Anonymous pattern not allowed for inversion equations." | IntroFresh _ -> error "Fresh pattern not allowed for inversion equations." | IntroRewrite _-> error "Rewriting pattern not allowed for inversion equations." | IntroOrAndPattern [l] -> if allow_conj then if l = [] then (None,[]) else let l = List.map (fun id -> Option.get (fst (get_names false id))) l in (Some (List.hd l), l) else error"Nested conjunctive patterns not allowed for inversion equations." | IntroOrAndPattern l -> error "Disjunctive patterns not allowed for inversion equations." | IntroIdentifier id -> (Some id,[id]) let extract_eqn_names = function | None -> None,[] | Some x -> x let rewrite_equations othin neqns names ba gl = let names = List.map (get_names true) names in let (depids,nodepids) = split_dep_and_nodep ba.assums gl in let rewrite_eqns = let first_eq = ref no_move in match othin with | Some thin -> tclTHENSEQ [onHyps (compose List.rev (nLastDecls neqns)) bring_hyps; onHyps (nLastDecls neqns) (compose clear ids_of_named_context); tclMAP_i neqns (fun o -> let idopt,names = extract_eqn_names o in (tclTHEN (intro_move idopt no_move) (onLastHypId (fun id -> tclTRY (projectAndApply thin id first_eq names depids))))) names; tclMAP (fun (id,_,_) gl -> intro_move None (if thin then no_move else !first_eq) gl) nodepids; tclMAP (fun (id,_,_) -> tclTRY (clear [id])) depids] | None -> tclIDTAC in (tclTHENSEQ [tclDO neqns intro; bring_hyps nodepids; clear (ids_of_named_context nodepids); rewrite_eqns]) gl let interp_inversion_kind = function | SimpleInversion -> None | FullInversion -> Some false | FullInversionClear -> Some true let rewrite_equations_tac (gene, othin) id neqns names ba = let othin = interp_inversion_kind othin in let tac = if gene then rewrite_equations_gene othin neqns ba else rewrite_equations othin neqns names ba in if othin = Some true (* if Inversion_clear, clear the hypothesis *) then tclTHEN tac (tclTRY (clear [id])) else tac let raw_inversion inv_kind id status names gl = let env = pf_env gl and sigma = project gl in let c = mkVar id in let (ind,t) = try pf_reduce_to_atomic_ind gl (pf_type_of gl c) with UserError _ -> errorlabstrm "raw_inversion" (str ("The type of "^(string_of_id id)^" is not inductive.")) in let indclause = mk_clenv_from gl (c,t) in let ccl = clenv_type indclause in check_no_metas indclause ccl; let IndType (indf,realargs) = find_rectype env sigma ccl in let (elim_predicate,neqns) = make_inv_predicate env sigma indf realargs id status (pf_concl gl) in let (cut_concl,case_tac) = if status <> NoDep & (dependent c (pf_concl gl)) then Reduction.beta_appvect elim_predicate (Array.of_list (realargs@[c])), case_then_using else Reduction.beta_appvect elim_predicate (Array.of_list realargs), case_nodep_then_using in (tclTHENS (assert_tac Anonymous cut_concl) [case_tac names (introCaseAssumsThen (rewrite_equations_tac inv_kind id neqns)) (Some elim_predicate) ([],[]) ind indclause; onLastHypId (fun id -> (tclTHEN (apply_term (mkVar id) (list_tabulate (fun _ -> Evarutil.mk_new_meta()) neqns)) reflexivity))]) gl (* Error messages of the inversion tactics *) let wrap_inv_error id = function | Indrec.RecursionSchemeError (Indrec.NotAllowedCaseAnalysis (_,(Type _ | Prop Pos as k),i)) -> errorlabstrm "" (strbrk "Inversion would require case analysis on sort " ++ pr_sort k ++ strbrk " which is not allowed for inductive definition " ++ pr_inductive (Global.env()) i ++ str ".") | e -> raise e (* The most general inversion tactic *) let inversion inv_kind status names id gls = try (raw_inversion inv_kind id status names) gls with e when Errors.noncritical e -> wrap_inv_error id e (* Specializing it... *) let inv_gen gene thin status names = try_intros_until (inversion (gene,thin) status names) open Tacexpr let inv k = inv_gen false k NoDep let half_inv_tac id = inv SimpleInversion None (NamedHyp id) let inv_tac id = inv FullInversion None (NamedHyp id) let inv_clear_tac id = inv FullInversionClear None (NamedHyp id) let dinv k c = inv_gen false k (Dep c) let half_dinv_tac id = dinv SimpleInversion None None (NamedHyp id) let dinv_tac id = dinv FullInversion None None (NamedHyp id) let dinv_clear_tac id = dinv FullInversionClear None None (NamedHyp id) (* InvIn will bring the specified clauses into the conclusion, and then * perform inversion on the named hypothesis. After, it will intro them * back to their places in the hyp-list. *) let invIn k names ids id gls = let hyps = List.map (pf_get_hyp gls) ids in let nb_prod_init = nb_prod (pf_concl gls) in let intros_replace_ids gls = let nb_of_new_hyp = nb_prod (pf_concl gls) - (List.length hyps + nb_prod_init) in if nb_of_new_hyp < 1 then intros_replacing ids gls else tclTHEN (tclDO nb_of_new_hyp intro) (intros_replacing ids) gls in try (tclTHENSEQ [bring_hyps hyps; inversion (false,k) NoDep names id; intros_replace_ids]) gls with e when Errors.noncritical e -> wrap_inv_error id e let invIn_gen k names idl = try_intros_until (invIn k names idl) let inv_clause k names = function | [] -> inv k names | idl -> invIn_gen k names idl coq-8.4pl2/tactics/hiddentac.mli0000640000175000001440000001150312010532755015707 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* identifier move_location -> tactic val h_intro : identifier -> tactic val h_intros_until : quantified_hypothesis -> tactic val h_assumption : tactic val h_exact : constr -> tactic val h_exact_no_check : constr -> tactic val h_vm_cast_no_check : constr -> tactic val h_apply : advanced_flag -> evars_flag -> constr with_bindings located list -> tactic val h_apply_in : advanced_flag -> evars_flag -> constr with_bindings located list -> identifier * intro_pattern_expr located option -> tactic val h_elim : evars_flag -> constr with_bindings -> constr with_bindings option -> tactic val h_elim_type : constr -> tactic val h_case : evars_flag -> constr with_bindings -> tactic val h_case_type : constr -> tactic val h_mutual_fix : hidden_flag -> identifier -> int -> (identifier * int * constr) list -> tactic val h_fix : identifier option -> int -> tactic val h_mutual_cofix : hidden_flag -> identifier -> (identifier * constr) list -> tactic val h_cofix : identifier option -> tactic val h_cut : constr -> tactic val h_generalize : constr list -> tactic val h_generalize_gen : (constr with_occurrences * name) list -> tactic val h_generalize_dep : constr -> tactic val h_let_tac : letin_flag -> name -> constr -> Tacticals.clause -> intro_pattern_expr located option -> tactic val h_let_pat_tac : letin_flag -> name -> evar_map * constr -> Tacticals.clause -> intro_pattern_expr located option -> tactic (** Derived basic tactics *) val h_simple_induction : quantified_hypothesis -> tactic val h_simple_destruct : quantified_hypothesis -> tactic val h_simple_induction_destruct : rec_flag -> quantified_hypothesis -> tactic val h_new_induction : evars_flag -> (evar_map * constr with_bindings) induction_arg -> intro_pattern_expr located option * intro_pattern_expr located option -> constr with_bindings option -> Tacticals.clause option -> tactic val h_new_destruct : evars_flag -> (evar_map * constr with_bindings) induction_arg -> intro_pattern_expr located option * intro_pattern_expr located option -> constr with_bindings option -> Tacticals.clause option -> tactic val h_induction_destruct : rec_flag -> evars_flag -> ((evar_map * constr with_bindings) induction_arg * (intro_pattern_expr located option * intro_pattern_expr located option)) list * constr with_bindings option * Tacticals.clause option -> tactic val h_specialize : int option -> constr with_bindings -> tactic val h_lapply : constr -> tactic (** Automation tactic : see Auto *) (** Context management *) val h_clear : bool -> identifier list -> tactic val h_clear_body : identifier list -> tactic val h_move : bool -> identifier -> identifier move_location -> tactic val h_rename : (identifier*identifier) list -> tactic val h_revert : identifier list -> tactic (** Constructors *) val h_constructor : evars_flag -> int -> constr bindings -> tactic val h_left : evars_flag -> constr bindings -> tactic val h_right : evars_flag -> constr bindings -> tactic val h_split : evars_flag -> constr bindings list -> tactic val h_one_constructor : int -> tactic val h_simplest_left : tactic val h_simplest_right : tactic (** Conversion *) val h_reduce : Redexpr.red_expr -> Tacticals.clause -> tactic val h_change : Pattern.constr_pattern option -> constr -> Tacticals.clause -> tactic (** Equivalence relations *) val h_reflexivity : tactic val h_symmetry : Tacticals.clause -> tactic val h_transitivity : constr option -> tactic val h_simplest_apply : constr -> tactic val h_simplest_eapply : constr -> tactic val h_simplest_elim : constr -> tactic val h_simplest_case : constr -> tactic val h_intro_patterns : intro_pattern_expr located list -> tactic coq-8.4pl2/tactics/equality.ml0000640000175000001440000016126012121620060015444 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !discriminate_introduction); optwrite = (:=) discriminate_introduction } (* Rewriting tactics *) type dep_proof_flag = bool (* true = support rewriting dependent proofs *) type freeze_evars_flag = bool (* true = don't instantiate existing evars *) type orientation = bool type conditions = | Naive (* Only try the first occurence of the lemma (default) *) | FirstSolved (* Use the first match whose side-conditions are solved *) | AllMatches (* Rewrite all matches whose side-conditions are solved *) (* Warning : rewriting from left to right only works if there exists in the context a theorem named __r with type (A:)(x:A)(P:A->Prop)(P x)->(y:A)(eqname A y x)->(P y). If another equality myeq is introduced, then corresponding theorems myeq_ind_r, myeq_rec_r and myeq_rect_r have to be proven. See below. -- Eduardo (19/8/97) *) let rewrite_unif_flags = { Unification.modulo_conv_on_closed_terms = None; Unification.use_metas_eagerly_in_conv_on_closed_terms = true; Unification.modulo_delta = empty_transparent_state; Unification.modulo_delta_types = empty_transparent_state; Unification.modulo_delta_in_merge = None; Unification.check_applied_meta_types = true; Unification.resolve_evars = true; Unification.use_pattern_unification = true; Unification.use_meta_bound_pattern_unification = true; Unification.frozen_evars = ExistentialSet.empty; Unification.restrict_conv_on_strict_subterms = false; Unification.modulo_betaiota = false; Unification.modulo_eta = true; Unification.allow_K_in_toplevel_higher_order_unification = false (* allow_K does not matter in practice because calls w_typed_unify *) } let freeze_initial_evars sigma flags clause = (* We take evars of the type: this may include old evars! For excluding *) (* all old evars, including the ones occurring in the rewriting lemma, *) (* we would have to take the clenv_value *) let newevars = Evd.collect_evars (clenv_type clause) in let evars = fold_undefined (fun evk _ evars -> if ExistentialSet.mem evk newevars then evars else ExistentialSet.add evk evars) sigma ExistentialSet.empty in { flags with Unification.frozen_evars = evars } let make_flags frzevars sigma flags clause = if frzevars then freeze_initial_evars sigma flags clause else flags let side_tac tac sidetac = match sidetac with | None -> tac | Some sidetac -> tclTHENSFIRSTn tac [|tclIDTAC|] sidetac let instantiate_lemma_all frzevars env sigma gl c ty l l2r concl = let eqclause = Clenv.make_clenv_binding { gl with sigma = sigma } (c,ty) l in let (equiv, args) = decompose_app (Clenv.clenv_type eqclause) in let rec split_last_two = function | [c1;c2] -> [],(c1, c2) | x::y::z -> let l,res = split_last_two (y::z) in x::l, res | _ -> error "The term provided is not an applied relation." in let others,(c1,c2) = split_last_two args in let try_occ (evd', c') = clenv_pose_dependent_evars true {eqclause with evd = evd'} in let flags = make_flags frzevars sigma rewrite_unif_flags eqclause in let occs = Unification.w_unify_to_subterm_all ~flags env eqclause.evd ((if l2r then c1 else c2),concl) in List.map try_occ occs let instantiate_lemma env sigma gl c ty l l2r concl = let gl = { gl with sigma = sigma } in let ct = pf_type_of gl c in let t = try snd (pf_reduce_to_quantified_ind gl ct) with UserError _ -> ct in let eqclause = Clenv.make_clenv_binding gl (c,t) l in [eqclause] let rewrite_conv_closed_unif_flags = { Unification.modulo_conv_on_closed_terms = Some full_transparent_state; (* We have this flag for historical reasons, it has e.g. the consequence *) (* to rewrite "?x+2" in "y+(1+1)=0" or to rewrite "?x+?x" in "2+(1+1)=0" *) Unification.use_metas_eagerly_in_conv_on_closed_terms = true; (* Combined with modulo_conv_on_closed_terms, this flag allows since 8.2 *) (* to rewrite e.g. "?x+(2+?x)" in "1+(1+2)=0" *) Unification.modulo_delta = empty_transparent_state; Unification.modulo_delta_types = full_transparent_state; Unification.modulo_delta_in_merge = None; Unification.check_applied_meta_types = true; Unification.resolve_evars = false; Unification.use_pattern_unification = true; (* To rewrite "?n x y" in "y+x=0" when ?n is *) (* a preexisting evar of the goal*) Unification.use_meta_bound_pattern_unification = true; Unification.frozen_evars = ExistentialSet.empty; (* This is set dynamically *) Unification.restrict_conv_on_strict_subterms = false; Unification.modulo_betaiota = false; Unification.modulo_eta = true; Unification.allow_K_in_toplevel_higher_order_unification = false } let rewrite_elim with_evars frzevars c e gl = let flags = make_flags frzevars (project gl) rewrite_conv_closed_unif_flags c in general_elim_clause_gen (elimination_clause_scheme with_evars ~flags) c e gl let rewrite_elim_in with_evars frzevars id c e gl = let flags = make_flags frzevars (project gl) rewrite_conv_closed_unif_flags c in general_elim_clause_gen (elimination_in_clause_scheme with_evars ~flags id) c e gl (* Ad hoc asymmetric general_elim_clause *) let general_elim_clause with_evars frzevars cls rew elim = try (match cls with | None -> (* was tclWEAK_PROGRESS which only fails for tactics generating one subgoal and did not fail for useless conditional rewritings generating an extra condition *) tclNOTSAMEGOAL (rewrite_elim with_evars frzevars rew elim) | Some id -> rewrite_elim_in with_evars frzevars id rew elim) with Pretype_errors.PretypeError (env,evd, Pretype_errors.NoOccurrenceFound (c', _)) -> raise (Pretype_errors.PretypeError (env,evd,Pretype_errors.NoOccurrenceFound (c', cls))) let general_elim_clause with_evars frzevars tac cls sigma c t l l2r elim gl = let all, firstonly, tac = match tac with | None -> false, false, None | Some (tac, Naive) -> false, false, Some tac | Some (tac, FirstSolved) -> true, true, Some (tclCOMPLETE tac) | Some (tac, AllMatches) -> true, false, Some (tclCOMPLETE tac) in let cs = (if not all then instantiate_lemma else instantiate_lemma_all frzevars) (pf_env gl) sigma gl c t l l2r (match cls with None -> pf_concl gl | Some id -> pf_get_hyp_typ gl id) in let try_clause c = side_tac (tclTHEN (Refiner.tclEVARS c.evd) (general_elim_clause with_evars frzevars cls c elim)) tac in if firstonly then tclFIRST (List.map try_clause cs) gl else tclMAP try_clause cs gl (* The next function decides in particular whether to try a regular rewrite or a generalized rewrite. Approach is to break everything, if [eq] appears in head position then regular rewrite else try general rewrite. If occurrences are set, use general rewrite. *) let general_rewrite_clause = ref (fun _ -> assert false) let register_general_rewrite_clause = (:=) general_rewrite_clause let is_applied_rewrite_relation = ref (fun _ _ _ _ -> None) let register_is_applied_rewrite_relation = (:=) is_applied_rewrite_relation (* find_elim determines which elimination principle is necessary to eliminate lbeq on sort_of_gl. *) let find_elim hdcncl lft2rgt dep cls args gl = let inccl = (cls = None) in if (eq_constr hdcncl (constr_of_reference (Coqlib.glob_eq)) || eq_constr hdcncl (constr_of_reference (Coqlib.glob_jmeq)) && pf_conv_x gl (List.nth args 0) (List.nth args 2)) && not dep || Flags.version_less_or_equal Flags.V8_2 then match kind_of_term hdcncl with | Ind ind_sp -> let pr1 = lookup_eliminator ind_sp (elimination_sort_of_clause cls gl) in if lft2rgt = Some (cls=None) then let c1 = destConst pr1 in let mp,dp,l = repr_con (constant_of_kn (canonical_con c1)) in let l' = label_of_id (add_suffix (id_of_label l) "_r") in let c1' = Global.constant_of_delta_kn (make_kn mp dp l') in begin try let _ = Global.lookup_constant c1' in mkConst c1' with Not_found -> let rwr_thm = string_of_label l' in error ("Cannot find rewrite principle "^rwr_thm^".") end else pr1 | _ -> (* cannot occur since we checked that we are in presence of Logic.eq or Jmeq just before *) assert false else let scheme_name = match dep, lft2rgt, inccl with (* Non dependent case *) | false, Some true, true -> rew_l2r_scheme_kind | false, Some true, false -> rew_r2l_scheme_kind | false, _, false -> rew_l2r_scheme_kind | false, _, true -> rew_r2l_scheme_kind (* Dependent case *) | true, Some true, true -> rew_l2r_dep_scheme_kind | true, Some true, false -> rew_l2r_forward_dep_scheme_kind | true, _, true -> rew_r2l_dep_scheme_kind | true, _, false -> rew_r2l_forward_dep_scheme_kind in match kind_of_term hdcncl with | Ind ind -> mkConst (find_scheme scheme_name ind) | _ -> assert false let type_of_clause gl = function | None -> pf_concl gl | Some id -> pf_get_hyp_typ gl id let leibniz_rewrite_ebindings_clause cls lft2rgt tac sigma c t l with_evars frzevars dep_proof_ok gl hdcncl = let isatomic = isProd (whd_zeta hdcncl) in let dep_fun = if isatomic then dependent else dependent_no_evar in let dep = dep_proof_ok && dep_fun c (type_of_clause gl cls) in let elim = find_elim hdcncl lft2rgt dep cls (snd (decompose_app t)) gl in general_elim_clause with_evars frzevars tac cls sigma c t l (match lft2rgt with None -> false | Some b -> b) {elimindex = None; elimbody = (elim,NoBindings)} gl let adjust_rewriting_direction args lft2rgt = if List.length args = 1 then begin (* equality to a constant, like in eq_true *) (* more natural to see -> as the rewriting to the constant *) if not lft2rgt then error "Rewriting non-symmetric equality not allowed from right-to-left."; None end else (* other equality *) Some lft2rgt let rewrite_side_tac tac sidetac = side_tac tac (Option.map fst sidetac) (* Main function for dispatching which kind of rewriting it is about *) let general_rewrite_ebindings_clause cls lft2rgt occs frzevars dep_proof_ok ?tac ((c,l) : constr with_bindings) with_evars gl = if occs <> all_occurrences then ( rewrite_side_tac (!general_rewrite_clause cls lft2rgt occs (c,l) ~new_goals:[]) tac gl) else let env = pf_env gl in let sigma = project gl in let ctype = get_type_of env sigma c in let rels, t = decompose_prod_assum (whd_betaiotazeta sigma ctype) in match match_with_equality_type t with | Some (hdcncl,args) -> (* Fast path: direct leibniz-like rewrite *) let lft2rgt = adjust_rewriting_direction args lft2rgt in leibniz_rewrite_ebindings_clause cls lft2rgt tac sigma c (it_mkProd_or_LetIn t rels) l with_evars frzevars dep_proof_ok gl hdcncl | None -> try rewrite_side_tac (!general_rewrite_clause cls lft2rgt occs (c,l) ~new_goals:[]) tac gl with e when Errors.noncritical e -> (* Try to see if there's an equality hidden *) let env' = push_rel_context rels env in let rels',t' = splay_prod_assum env' sigma t in (* Search for underlying eq *) match match_with_equality_type t' with | Some (hdcncl,args) -> let lft2rgt = adjust_rewriting_direction args lft2rgt in leibniz_rewrite_ebindings_clause cls lft2rgt tac sigma c (it_mkProd_or_LetIn t' (rels' @ rels)) l with_evars frzevars dep_proof_ok gl hdcncl | None -> raise e (* error "The provided term does not end with an equality or a declared rewrite relation." *) let general_rewrite_ebindings = general_rewrite_ebindings_clause None let general_rewrite_bindings l2r occs frzevars dep_proof_ok ?tac (c,bl) = general_rewrite_ebindings_clause None l2r occs frzevars dep_proof_ok ?tac (c,bl) let general_rewrite l2r occs frzevars dep_proof_ok ?tac c = general_rewrite_bindings l2r occs frzevars dep_proof_ok ?tac (c,NoBindings) false let general_rewrite_ebindings_in l2r occs frzevars dep_proof_ok ?tac id = general_rewrite_ebindings_clause (Some id) l2r occs frzevars dep_proof_ok ?tac let general_rewrite_bindings_in l2r occs frzevars dep_proof_ok ?tac id (c,bl) = general_rewrite_ebindings_clause (Some id) l2r occs frzevars dep_proof_ok ?tac (c,bl) let general_rewrite_in l2r occs frzevars dep_proof_ok ?tac id c = general_rewrite_ebindings_clause (Some id) l2r occs frzevars dep_proof_ok ?tac (c,NoBindings) let general_multi_rewrite l2r with_evars ?tac c cl = let occs_of = on_snd (List.fold_left (fun acc -> function ArgArg x -> x :: acc | ArgVar _ -> acc) []) in match cl.onhyps with | Some l -> (* If a precise list of locations is given, success is mandatory for each of these locations. *) let rec do_hyps = function | [] -> tclIDTAC | ((occs,id),_) :: l -> tclTHENFIRST (general_rewrite_ebindings_in l2r (occs_of occs) false true ?tac id c with_evars) (do_hyps l) in if cl.concl_occs = no_occurrences_expr then do_hyps l else tclTHENFIRST (general_rewrite_ebindings l2r (occs_of cl.concl_occs) false true ?tac c with_evars) (do_hyps l) | None -> (* Otherwise, if we are told to rewrite in all hypothesis via the syntax "* |-", we fail iff all the different rewrites fail *) let rec do_hyps_atleastonce = function | [] -> (fun gl -> error "Nothing to rewrite.") | id :: l -> tclIFTHENTRYELSEMUST (general_rewrite_ebindings_in l2r all_occurrences false true ?tac id c with_evars) (do_hyps_atleastonce l) in let do_hyps gl = (* If the term to rewrite uses an hypothesis H, don't rewrite in H *) let ids = let ids_in_c = Environ.global_vars_set (Global.env()) (fst c) in Idset.fold (fun id l -> list_remove id l) ids_in_c (pf_ids_of_hyps gl) in do_hyps_atleastonce ids gl in if cl.concl_occs = no_occurrences_expr then do_hyps else tclIFTHENTRYELSEMUST (general_rewrite_ebindings l2r (occs_of cl.concl_occs) false true ?tac c with_evars) do_hyps type delayed_open_constr_with_bindings = env -> evar_map -> evar_map * constr with_bindings let general_multi_multi_rewrite with_evars l cl tac = let do1 l2r f gl = let sigma,c = f (pf_env gl) (project gl) in Refiner.tclWITHHOLES with_evars (general_multi_rewrite l2r with_evars ?tac c) sigma cl gl in let rec doN l2r c = function | Precisely n when n <= 0 -> tclIDTAC | Precisely 1 -> do1 l2r c | Precisely n -> tclTHENFIRST (do1 l2r c) (doN l2r c (Precisely (n-1))) | RepeatStar -> tclREPEAT_MAIN (do1 l2r c) | RepeatPlus -> tclTHENFIRST (do1 l2r c) (doN l2r c RepeatStar) | UpTo n when n<=0 -> tclIDTAC | UpTo n -> tclTHENFIRST (tclTRY (do1 l2r c)) (doN l2r c (UpTo (n-1))) in let rec loop = function | [] -> tclIDTAC | (l2r,m,c)::l -> tclTHENFIRST (doN l2r c m) (loop l) in loop l let rewriteLR = general_rewrite true all_occurrences true true let rewriteRL = general_rewrite false all_occurrences true true (* Replacing tactics *) (* eq,sym_eq : equality on Type and its symmetry theorem c2 c1 : c1 is to be replaced by c2 unsafe : If true, do not check that c1 and c2 are convertible tac : Used to prove the equality c1 = c2 gl : goal *) let multi_replace clause c2 c1 unsafe try_prove_eq_opt gl = let try_prove_eq = match try_prove_eq_opt with | None -> tclIDTAC | Some tac -> tclCOMPLETE tac in let t1 = pf_apply get_type_of gl c1 and t2 = pf_apply get_type_of gl c2 in if unsafe or (pf_conv_x gl t1 t2) then let e = build_coq_eq () in let sym = build_coq_eq_sym () in let eq = applist (e, [t1;c1;c2]) in tclTHENS (assert_as false None eq) [onLastHypId (fun id -> tclTHEN (tclTRY (general_multi_rewrite false false (mkVar id,NoBindings) clause)) (clear [id])); tclFIRST [assumption; tclTHEN (apply sym) assumption; try_prove_eq ] ] gl else error "Terms do not have convertible types." let replace c2 c1 gl = multi_replace onConcl c2 c1 false None gl let replace_in id c2 c1 gl = multi_replace (onHyp id) c2 c1 false None gl let replace_by c2 c1 tac gl = multi_replace onConcl c2 c1 false (Some tac) gl let replace_in_by id c2 c1 tac gl = multi_replace (onHyp id) c2 c1 false (Some tac) gl let replace_in_clause_maybe_by c2 c1 cl tac_opt gl = multi_replace cl c2 c1 false tac_opt gl (* End of Eduardo's code. The rest of this file could be improved using the functions match_with_equation, etc that I defined in Pattern.ml. -- Eduardo (19/8/97) *) (* Tactics for equality reasoning with the "eq" relation. This code will work with any equivalence relation which is substitutive *) (* [find_positions t1 t2] will find the positions in the two terms which are suitable for discrimination, or for injection. Obviously, if there is a position which is suitable for discrimination, then we want to exploit it, and not bother with injection. So when we find a position which is suitable for discrimination, we will just raise an exception with that position. So the algorithm goes like this: if [t1] and [t2] start with the same constructor, then we can continue to try to find positions in the arguments of [t1] and [t2]. if [t1] and [t2] do not start with the same constructor, then we have found a discrimination position if one [t1] or [t2] do not start with a constructor and the two terms are not already convertible, then we have found an injection position. A discriminating position consists of a constructor-path and a pair of operators. The constructor-path tells us how to get down to the place where the two operators, which must differ, can be found. An injecting position has two terms instead of the two operators, since these terms are different, but not manifestly so. A constructor-path is a list of pairs of (operator * int), where the int (based at 0) tells us which argument of the operator we descended into. *) exception DiscrFound of (constructor * int) list * constructor * constructor let find_positions env sigma t1 t2 = let rec findrec sorts posn t1 t2 = let hd1,args1 = whd_betadeltaiota_stack env sigma t1 in let hd2,args2 = whd_betadeltaiota_stack env sigma t2 in match (kind_of_term hd1, kind_of_term hd2) with | Construct sp1, Construct sp2 when List.length args1 = mis_constructor_nargs_env env sp1 -> let sorts = list_intersect sorts (allowed_sorts env (fst sp1)) in (* both sides are fully applied constructors, so either we descend, or we can discriminate here. *) if is_conv env sigma hd1 hd2 then let nrealargs = constructor_nrealargs env sp1 in let rargs1 = list_lastn nrealargs args1 in let rargs2 = list_lastn nrealargs args2 in List.flatten (list_map2_i (fun i -> findrec sorts ((sp1,i)::posn)) 0 rargs1 rargs2) else if List.mem InType sorts then (* see build_discriminator *) raise (DiscrFound (List.rev posn,sp1,sp2)) else [] | _ -> let t1_0 = applist (hd1,args1) and t2_0 = applist (hd2,args2) in if is_conv env sigma t1_0 t2_0 then [] else let ty1_0 = get_type_of env sigma t1_0 in let s = get_sort_family_of env sigma ty1_0 in if List.mem s sorts then [(List.rev posn,t1_0,t2_0)] else [] in try (* Rem: to allow injection on proofs objects, just add InProp *) Inr (findrec [InSet;InType] [] t1 t2) with DiscrFound (path,c1,c2) -> Inl (path,c1,c2) let discriminable env sigma t1 t2 = match find_positions env sigma t1 t2 with | Inl _ -> true | _ -> false let injectable env sigma t1 t2 = match find_positions env sigma t1 t2 with | Inl _ | Inr [] | Inr [([],_,_)] -> false | Inr _ -> true (* Once we have found a position, we need to project down to it. If we are discriminating, then we need to produce False on one of the branches of the discriminator, and True on the other one. So the result type of the case-expressions is always Prop. If we are injecting, then we need to discover the result-type. This can be difficult, since the type of the two terms at the injection-position can be different, and we need to find a dependent sigma-type which generalizes them both. We can get an approximation to the right type to choose by: (0) Before beginning, we reserve a patvar for the default value of the match, to be used in all the bogus branches. (1) perform the case-splits, down to the site of the injection. At each step, we have a term which is the "head" of the next case-split. At the point when we actually reach the end of our path, the "head" is the term to return. We compute its type, and then, backwards, make a sigma-type with every free debruijn reference in that type. We can be finer, and first do a S(TRONG)NF on the type, so that we get the fewest number of references possible. (2) This gives us a closed type for the head, which we use for the types of all the case-splits. (3) Now, we can compute the type of one of T1, T2, and then unify it with the type of the last component of the result-type, and this will give us the bindings for the other arguments of the tuple. *) (* The algorithm, then is to perform successive case-splits. We have the result-type of the case-split, and also the type of that result-type. We have a "direction" we want to follow, i.e. a constructor-number, and in all other "directions", we want to juse use the default-value. After doing the case-split, we call the afterfun, with the updated environment, to produce the term for the desired "direction". The assumption is made here that the result-type is not manifestly functional, so we can just use the length of the branch-type to know how many lambda's to stick in. *) (* [descend_then sigma env head dirn] returns the number of products introduced, and the environment which is active, in the body of the case-branch given by [dirn], along with a continuation, which expects to be fed: (1) the value of the body of the branch given by [dirn] (2) the default-value (3) the type of the default-value, which must also be the type of the body of the [dirn] branch the continuation then constructs the case-split. *) let descend_then sigma env head dirn = let IndType (indf,_) = try find_rectype env sigma (get_type_of env sigma head) with Not_found -> error "Cannot project on an inductive type derived from a dependency." in let ind,_ = dest_ind_family indf in let (mib,mip) = lookup_mind_specif env ind in let cstr = get_constructors env indf in let dirn_nlams = cstr.(dirn-1).cs_nargs in let dirn_env = push_rel_context cstr.(dirn-1).cs_args env in (dirn_nlams, dirn_env, (fun dirnval (dfltval,resty) -> let deparsign = make_arity_signature env true indf in let p = it_mkLambda_or_LetIn (lift (mip.mind_nrealargs+1) resty) deparsign in let build_branch i = let result = if i = dirn then dirnval else dfltval in it_mkLambda_or_LetIn_name env result cstr.(i-1).cs_args in let brl = List.map build_branch (interval 1 (Array.length mip.mind_consnames)) in let ci = make_case_info env ind RegularStyle in mkCase (ci, p, head, Array.of_list brl))) (* Now we need to construct the discriminator, given a discriminable position. This boils down to: (1) If the position is directly beneath us, then we need to do a case-split, with result-type Prop, and stick True and False into the branches, as is convenient. (2) If the position is not directly beneath us, then we need to call descend_then, to descend one step, and then recursively construct the discriminator. *) (* [construct_discriminator env dirn headval] constructs a case-split on [headval], with the [dirn]-th branch giving [True], and all the rest giving False. *) let construct_discriminator sigma env dirn c sort = let IndType(indf,_) = try find_rectype env sigma (get_type_of env sigma c) with Not_found -> (* one can find Rel(k) in case of dependent constructors like T := c : (A:Set)A->T and a discrimination on (c bool true) = (c bool false) CP : changed assert false in a more informative error *) errorlabstrm "Equality.construct_discriminator" (str "Cannot discriminate on inductive constructors with \ dependent types.") in let (ind,_) = dest_ind_family indf in let (mib,mip) = lookup_mind_specif env ind in let (true_0,false_0,sort_0) = build_coq_True(),build_coq_False(),Prop Null in let deparsign = make_arity_signature env true indf in let p = it_mkLambda_or_LetIn (mkSort sort_0) deparsign in let cstrs = get_constructors env indf in let build_branch i = let endpt = if i = dirn then true_0 else false_0 in it_mkLambda_or_LetIn endpt cstrs.(i-1).cs_args in let brl = List.map build_branch(interval 1 (Array.length mip.mind_consnames)) in let ci = make_case_info env ind RegularStyle in mkCase (ci, p, c, Array.of_list brl) let rec build_discriminator sigma env dirn c sort = function | [] -> construct_discriminator sigma env dirn c sort | ((sp,cnum),argnum)::l -> let (cnum_nlams,cnum_env,kont) = descend_then sigma env c cnum in let newc = mkRel(cnum_nlams-argnum) in let subval = build_discriminator sigma cnum_env dirn newc sort l in kont subval (build_coq_False (),mkSort (Prop Null)) (* Note: discrimination could be more clever: if some elimination is not allowed because of a large impredicative constructor in the path (see allowed_sorts in find_positions), the positions could still be discrimated by projecting first instead of putting the discrimination combinator inside the projecting combinator. Example of relevant situation: Inductive t:Set := c : forall A:Set, A -> nat -> t. Goal ~ c _ 0 0 = c _ 0 1. intro. discriminate H. *) let gen_absurdity id gl = if is_empty_type (pf_get_hyp_typ gl id) then simplest_elim (mkVar id) gl else errorlabstrm "Equality.gen_absurdity" (str "Not the negation of an equality.") (* Precondition: eq is leibniz equality returns ((eq_elim t t1 P i t2), absurd_term) where P=[e:t]discriminator absurd_term=False *) let ind_scheme_of_eq lbeq = let (mib,mip) = Global.lookup_inductive (destInd lbeq.eq) in let kind = inductive_sort_family mip in (* use ind rather than case by compatibility *) let kind = if kind = InProp then Elimschemes.ind_scheme_kind_from_prop else Elimschemes.ind_scheme_kind_from_type in mkConst (find_scheme kind (destInd lbeq.eq)) let discrimination_pf e (t,t1,t2) discriminator lbeq = let i = build_coq_I () in let absurd_term = build_coq_False () in let eq_elim = ind_scheme_of_eq lbeq in (applist (eq_elim, [t;t1;mkNamedLambda e t discriminator;i;t2]), absurd_term) exception NotDiscriminable let eq_baseid = id_of_string "e" let apply_on_clause (f,t) clause = let sigma = clause.evd in let f_clause = mk_clenv_from_env clause.env sigma None (f,t) in let argmv = (match kind_of_term (last_arg f_clause.templval.Evd.rebus) with | Meta mv -> mv | _ -> errorlabstrm "" (str "Ill-formed clause applicator.")) in clenv_fchain argmv f_clause clause let discr_positions env sigma (lbeq,eqn,(t,t1,t2)) eq_clause cpath dirn sort = let e = next_ident_away eq_baseid (ids_of_context env) in let e_env = push_named (e,None,t) env in let discriminator = build_discriminator sigma e_env dirn (mkVar e) sort cpath in let (pf, absurd_term) = discrimination_pf e (t,t1,t2) discriminator lbeq in let pf_ty = mkArrow eqn absurd_term in let absurd_clause = apply_on_clause (pf,pf_ty) eq_clause in let pf = clenv_value_cast_meta absurd_clause in tclTHENS (cut_intro absurd_term) [onLastHypId gen_absurdity; refine pf] let discrEq (lbeq,_,(t,t1,t2) as u) eq_clause gls = let sigma = eq_clause.evd in let env = pf_env gls in match find_positions env sigma t1 t2 with | Inr _ -> errorlabstrm "discr" (str"Not a discriminable equality.") | Inl (cpath, (_,dirn), _) -> let sort = pf_apply get_type_of gls (pf_concl gls) in discr_positions env sigma u eq_clause cpath dirn sort gls let onEquality with_evars tac (c,lbindc) gls = let t = pf_type_of gls c in let t' = try snd (pf_reduce_to_quantified_ind gls t) with UserError _ -> t in let eq_clause = make_clenv_binding gls (c,t') lbindc in let eq_clause' = clenv_pose_dependent_evars with_evars eq_clause in let eqn = clenv_type eq_clause' in let eq,eq_args = find_this_eq_data_decompose gls eqn in tclTHEN (Refiner.tclEVARS eq_clause'.evd) (tac (eq,eqn,eq_args) eq_clause') gls let onNegatedEquality with_evars tac gls = let ccl = pf_concl gls in match kind_of_term (hnf_constr (pf_env gls) (project gls) ccl) with | Prod (_,t,u) when is_empty_type u -> tclTHEN introf (onLastHypId (fun id -> onEquality with_evars tac (mkVar id,NoBindings))) gls | _ -> errorlabstrm "" (str "Not a negated primitive equality.") let discrSimpleClause with_evars = function | None -> onNegatedEquality with_evars discrEq | Some id -> onEquality with_evars discrEq (mkVar id,NoBindings) let discr with_evars = onEquality with_evars discrEq let discrClause with_evars = onClause (discrSimpleClause with_evars) let discrEverywhere with_evars = (* tclORELSE *) (if discr_do_intro () then (tclTHEN (tclREPEAT introf) (Tacticals.tryAllHyps (fun id -> tclCOMPLETE (discr with_evars (mkVar id,NoBindings))))) else (* <= 8.2 compat *) Tacticals.tryAllHypsAndConcl (discrSimpleClause with_evars)) (* (fun gls -> errorlabstrm "DiscrEverywhere" (str"No discriminable equalities.")) *) let discr_tac with_evars = function | None -> discrEverywhere with_evars | Some c -> onInductionArg (discr with_evars) c let discrConcl gls = discrClause false onConcl gls let discrHyp id gls = discrClause false (onHyp id) gls (* returns the sigma type (sigS, sigT) with the respective constructor depending on the sort *) (* J.F.: correction du bug #1167 en accord avec Hugo. *) let find_sigma_data s = build_sigma_type () (* [make_tuple env sigma (rterm,rty) lind] assumes [lind] is the lesser index bound in [rty] Then we build the term [(existT A P (mkRel lind) rterm)] of type [(sigS A P)] where [A] is the type of [mkRel lind] and [P] is [\na:A.rty{1/lind}] *) let make_tuple env sigma (rterm,rty) lind = assert (dependent (mkRel lind) rty); let {intro = exist_term; typ = sig_term} = find_sigma_data (get_sort_of env sigma rty) in let a = type_of env sigma (mkRel lind) in let (na,_,_) = lookup_rel lind env in (* We move [lind] to [1] and lift other rels > [lind] by 1 *) let rty = lift (1-lind) (liftn lind (lind+1) rty) in (* Now [lind] is [mkRel 1] and we abstract on (na:a) *) let p = mkLambda (na, a, rty) in (applist(exist_term,[a;p;(mkRel lind);rterm]), applist(sig_term,[a;p])) (* check that the free-references of the type of [c] are contained in the free-references of the normal-form of that type. Strictly computing the exact set of free rels would require full normalization but this is not reasonable (e.g. in presence of records that contains proofs). We restrict ourself to a "simpl" normalization *) let minimal_free_rels env sigma (c,cty) = let cty_rels = free_rels cty in let cty' = simpl env sigma cty in let rels' = free_rels cty' in if Intset.subset cty_rels rels' then (cty,cty_rels) else (cty',rels') (* Like the above, but recurse over all the rels found until there are no more rels to be found *) let minimal_free_rels_rec env sigma = let rec minimalrec_free_rels_rec prev_rels (c,cty) = let (cty,direct_rels) = minimal_free_rels env sigma (c,cty) in let combined_rels = Intset.union prev_rels direct_rels in let folder rels i = snd (minimalrec_free_rels_rec rels (c, type_of env sigma (mkRel i))) in (cty, List.fold_left folder combined_rels (Intset.elements (Intset.diff direct_rels prev_rels))) in minimalrec_free_rels_rec Intset.empty (* [sig_clausal_form siglen ty] Will explode [siglen] [sigS,sigT ]'s on [ty] (depending on the type of ty), and return: (1) a pattern, with meta-variables in it for various arguments, which, when the metavariables are replaced with appropriate terms, will have type [ty] (2) an integer, which is the last argument - the one which we just returned. (3) a pattern, for the type of that last meta (4) a typing for each patvar WARNING: No checking is done to make sure that the sigS(or sigT)'s are actually there. - Only homogeneous pairs are built i.e. pairs where all the dependencies are of the same sort [sig_clausal_form] proceed as follows: the default tuple is constructed by taking the tuple-type, exploding the first [tuplen] [sigS]'s, and replacing at each step the binder in the right-hand-type by a fresh metavariable. In addition, on the way back out, we will construct the pattern for the tuple which uses these meta-vars. This gives us a pattern, which we use to match against the type of [dflt]; if that fails, then against the S(TRONG)NF of that type. If both fail, then we just cannot construct our tuple. If one of those succeed, then we can construct our value easily - we just use the tuple-pattern. *) let sig_clausal_form env sigma sort_of_ty siglen ty dflt = let { intro = exist_term } = find_sigma_data sort_of_ty in let evdref = ref (Evd.create_goal_evar_defs sigma) in let rec sigrec_clausal_form siglen p_i = if siglen = 0 then (* is the default value typable with the expected type *) let dflt_typ = type_of env sigma dflt in if Evarconv.e_cumul env evdref dflt_typ p_i then (* the_conv_x had a side-effect on evdref *) dflt else error "Cannot solve a unification problem." else let (a,p_i_minus_1) = match whd_beta_stack !evdref p_i with | (_sigS,[a;p]) -> (a,p) | _ -> anomaly "sig_clausal_form: should be a sigma type" in let ev = Evarutil.e_new_evar evdref env a in let rty = beta_applist(p_i_minus_1,[ev]) in let tuple_tail = sigrec_clausal_form (siglen-1) rty in match Evd.existential_opt_value !evdref (destEvar ev) with | Some w -> let w_type = type_of env sigma w in if Evarconv.e_cumul env evdref w_type a then applist(exist_term,[w_type;p_i_minus_1;w;tuple_tail]) else error "Cannot solve a unification problem." | None -> anomaly "Not enough components to build the dependent tuple" in let scf = sigrec_clausal_form siglen ty in Evarutil.nf_evar !evdref scf (* The problem is to build a destructor (a generalization of the predecessor) which, when applied to a term made of constructors (say [Ci(e1,Cj(e2,Ck(...,term,...),...),...)]), returns a given subterm of the term (say [term]). Let [typ] be the type of [term]. If [term] has no dependencies in the [e1], [e2], etc, then all is simple. If not, then we need to encapsulated the dependencies into a dependent tuple in such a way that the destructor has not a dependent type and rewriting can then be applied. The destructor has the form [e]Cases e of | ... | Ci (x1,x2,...) => Cases x2 of | ... | Cj (y1,y2,...) => Cases y2 of | ... | Ck (...,z,...) => z | ... end | ... end | ... end and the dependencies is expressed by the fact that [z] has a type dependent in the x1, y1, ... Assume [z] is typed as follows: env |- z:zty If [zty] has no dependencies, this is simple. Otherwise, assume [zty] has free (de Bruijn) variables in,...i1 then the role of [make_iterated_tuple sigma env (term,typ) (z,zty)] is to build the tuple [existT [xn]Pn Rel(in) .. (existT [x2]P2 Rel(i2) (existT [x1]P1 Rel(i1) z))] where P1 is zty[i1/x1], P2 is {x1 | P1[i2/x2]} etc. To do this, we find the free (relative) references of the strong NF of [z]'s type, gather them together in left-to-right order (i.e. highest-numbered is farthest-left), and construct a big iterated pair out of it. This only works when the references are all themselves to members of [Set]s, because we use [sigS] to construct the tuple. Suppose now that our constructed tuple is of length [tuplen]. We need also to construct a default value for the other branches of the destructor. As default value, we take a tuple of the form [existT [xn]Pn ?n (... existT [x2]P2 ?2 (existT [x1]P1 ?1 term))] but for this we have to solve the following unification problem: typ = zty[i1/?1;...;in/?n] This is done by [sig_clausal_form]. *) let make_iterated_tuple env sigma dflt (z,zty) = let (zty,rels) = minimal_free_rels_rec env sigma (z,zty) in let sort_of_zty = get_sort_of env sigma zty in let sorted_rels = Sort.list (<) (Intset.elements rels) in let (tuple,tuplety) = List.fold_left (make_tuple env sigma) (z,zty) sorted_rels in assert (closed0 tuplety); let n = List.length sorted_rels in let dfltval = sig_clausal_form env sigma sort_of_zty n tuplety dflt in (tuple,tuplety,dfltval) let rec build_injrec sigma env dflt c = function | [] -> make_iterated_tuple env sigma dflt (c,type_of env sigma c) | ((sp,cnum),argnum)::l -> try let (cnum_nlams,cnum_env,kont) = descend_then sigma env c cnum in let newc = mkRel(cnum_nlams-argnum) in let (subval,tuplety,dfltval) = build_injrec sigma cnum_env dflt newc l in (kont subval (dfltval,tuplety), tuplety,dfltval) with UserError _ -> failwith "caught" let build_injector sigma env dflt c cpath = let (injcode,resty,_) = build_injrec sigma env dflt c cpath in (injcode,resty) (* let try_delta_expand env sigma t = let whdt = whd_betadeltaiota env sigma t in let rec hd_rec c = match kind_of_term c with | Construct _ -> whdt | App (f,_) -> hd_rec f | Cast (c,_,_) -> hd_rec c | _ -> t in hd_rec whdt *) (* Given t1=t2 Inj calculates the whd normal forms of t1 and t2 and it expands then only when the whdnf has a constructor of an inductive type in hd position, otherwise delta expansion is not done *) let simplify_args env sigma t = (* Quick hack to reduce in arguments of eq only *) match decompose_app t with | eq, [t;c1;c2] -> applist (eq,[t;simpl env sigma c1;simpl env sigma c2]) | eq, [t1;c1;t2;c2] -> applist (eq,[t1;simpl env sigma c1;t2;simpl env sigma c2]) | _ -> t let inject_at_positions env sigma (eq,_,(t,t1,t2)) eq_clause posns tac = let e = next_ident_away eq_baseid (ids_of_context env) in let e_env = push_named (e,None,t) env in let injectors = map_succeed (fun (cpath,t1',t2') -> (* arbitrarily take t1' as the injector default value *) let (injbody,resty) = build_injector sigma e_env t1' (mkVar e) cpath in let injfun = mkNamedLambda e t injbody in let pf = applist(eq.congr,[t;resty;injfun;t1;t2]) in let pf_typ = get_type_of env sigma pf in let inj_clause = apply_on_clause (pf,pf_typ) eq_clause in let pf = clenv_value_cast_meta inj_clause in let ty = simplify_args env sigma (clenv_type inj_clause) in (pf,ty)) posns in if injectors = [] then errorlabstrm "Equality.inj" (str "Failed to decompose the equality."); tclTHEN (tclMAP (fun (pf,ty) -> tclTHENS (cut ty) [tclIDTAC; refine pf]) injectors) (tac (List.length injectors)) exception Not_dep_pair let eq_dec_scheme_kind_name = ref (fun _ -> failwith "eq_dec_scheme undefined") let set_eq_dec_scheme_kind k = eq_dec_scheme_kind_name := (fun _ -> k) let injEq ipats (eq,_,(t,t1,t2) as u) eq_clause = let sigma = eq_clause.evd in let env = eq_clause.env in match find_positions env sigma t1 t2 with | Inl _ -> errorlabstrm "Inj" (str"Not a projectable equality but a discriminable one.") | Inr [] -> errorlabstrm "Equality.inj" (str"Nothing to do, it is an equality between convertible terms.") | Inr [([],_,_)] when Flags.version_strictly_greater Flags.V8_3 -> errorlabstrm "Equality.inj" (str"Nothing to inject.") | Inr posns -> (* Est-ce utile à partir du moment où les arguments projetés subissent "nf" ? let t1 = try_delta_expand env sigma t1 in let t2 = try_delta_expand env sigma t2 in *) try ( (* fetch the informations of the pair *) let ceq = constr_of_global Coqlib.glob_eq in let sigTconstr () = (Coqlib.build_sigma_type()).Coqlib.typ in let eqTypeDest = fst (destApp t) in let _,ar1 = destApp t1 and _,ar2 = destApp t2 in let ind = destInd ar1.(0) in let inj2 = Coqlib.coq_constant "inj_pair2_eq_dec is missing" ["Logic";"Eqdep_dec"] "inj_pair2_eq_dec" in (* check whether the equality deals with dep pairs or not *) (* if yes, check if the user has declared the dec principle *) (* and compare the fst arguments of the dep pair *) let new_eq_args = [|type_of env sigma (ar1.(3));ar1.(3);ar2.(3)|] in if ( (eqTypeDest = sigTconstr()) && (Ind_tables.check_scheme (!eq_dec_scheme_kind_name()) ind=true) && (is_conv env sigma (ar1.(2)) (ar2.(2)) = true)) then ( (* Require Import Eqdec_dec copied from vernac_require in vernacentries.ml*) let qidl = qualid_of_reference (Ident (dummy_loc,id_of_string "Eqdep_dec")) in Library.require_library [qidl] (Some false); (* cut with the good equality and prove the requested goal *) tclTHENS (cut (mkApp (ceq,new_eq_args)) ) [tclIDTAC; tclTHEN (apply ( mkApp(inj2, [|ar1.(0);mkConst (find_scheme (!eq_dec_scheme_kind_name()) ind); ar1.(1);ar1.(2);ar1.(3);ar2.(3)|]) )) (Auto.trivial [] []) ] (* not a dep eq or no decidable type found *) ) else (raise Not_dep_pair) ) with e when Errors.noncritical e -> inject_at_positions env sigma u eq_clause posns (fun _ -> intros_pattern no_move ipats) let inj ipats with_evars = onEquality with_evars (injEq ipats) let injClause ipats with_evars = function | None -> onNegatedEquality with_evars (injEq ipats) | Some c -> onInductionArg (inj ipats with_evars) c let injConcl gls = injClause [] false None gls let injHyp id gls = injClause [] false (Some (ElimOnIdent (dummy_loc,id))) gls let decompEqThen ntac (lbeq,_,(t,t1,t2) as u) clause gls = let sort = pf_apply get_type_of gls (pf_concl gls) in let sigma = clause.evd in let env = pf_env gls in match find_positions env sigma t1 t2 with | Inl (cpath, (_,dirn), _) -> discr_positions env sigma u clause cpath dirn sort gls | Inr [] -> (* Change: do not fail, simplify clear this trivial hyp *) ntac 0 gls | Inr posns -> inject_at_positions env sigma u clause (List.rev posns) ntac gls let dEqThen with_evars ntac = function | None -> onNegatedEquality with_evars (decompEqThen ntac) | Some c -> onInductionArg (onEquality with_evars (decompEqThen ntac)) c let dEq with_evars = dEqThen with_evars (fun x -> tclIDTAC) let swap_equality_args = function | MonomorphicLeibnizEq (e1,e2) -> [e2;e1] | PolymorphicLeibnizEq (t,e1,e2) -> [t;e2;e1] | HeterogenousEq (t1,e1,t2,e2) -> [t2;e2;t1;e1] let swap_equands gls eqn = let (lbeq,eq_args) = find_eq_data eqn in applist(lbeq.eq,swap_equality_args eq_args) let swapEquandsInConcl gls = let (lbeq,eq_args) = find_eq_data (pf_concl gls) in let sym_equal = lbeq.sym in refine (applist(sym_equal,(swap_equality_args eq_args@[Evarutil.mk_new_meta()]))) gls (* Refine from [|- P e2] to [|- P e1] and [|- e1=e2:>t] (body is P (Rel 1)) *) let bareRevSubstInConcl lbeq body (t,e1,e2) gls = (* find substitution scheme *) let eq_elim = find_elim lbeq.eq (Some false) false None [e1;e2] gls in (* build substitution predicate *) let p = lambda_create (pf_env gls) (t,body) in (* apply substitution scheme *) refine (applist(eq_elim,[t;e1;p;Evarutil.mk_new_meta(); e2;Evarutil.mk_new_meta()])) gls (* [subst_tuple_term dep_pair B] Given that dep_pair looks like: (existT e1 (existT e2 ... (existT en en+1) ... )) of type {x1:T1 & {x2:T2(x1) & ... {xn:Tn(x1..xn-1) & en+1 } } } and B might contain instances of the ei, we will return the term: ([x1:ty1]...[xn+1:tyn+1]B (projT1 (mkRel 1)) (projT1 (projT2 (mkRel 1))) ... (projT1 (projT2^n (mkRel 1))) (projT2 (projT2^n (mkRel 1))) That is, we will abstract out the terms e1...en+1 of types t1 (=_beta T1), ..., tn+1 (=_beta Tn+1(e1..en)) as usual, but will then produce a term in which the abstraction is on a single term - the debruijn index [mkRel 1], which will be of the same type as dep_pair (note that the abstracted body may not be typable!). ALGORITHM for abstraction: We have a list of terms, [e1]...[en+1], which we want to abstract out of [B]. For each term [ei], going backwards from [n+1], we just do a [subst_term], and then do a lambda-abstraction to the type of the [ei]. *) let decomp_tuple_term env c t = let rec decomprec inner_code ex exty = let iterated_decomp = try let {proj1=p1; proj2=p2},(a,p,car,cdr) = find_sigma_data_decompose ex in let car_code = applist (p1,[a;p;inner_code]) and cdr_code = applist (p2,[a;p;inner_code]) in let cdrtyp = beta_applist (p,[car]) in List.map (fun l -> ((car,a),car_code)::l) (decomprec cdr_code cdr cdrtyp) with PatternMatchingFailure -> [] in [((ex,exty),inner_code)]::iterated_decomp in decomprec (mkRel 1) c t let subst_tuple_term env sigma dep_pair1 dep_pair2 b = let typ = get_type_of env sigma dep_pair1 in (* We find all possible decompositions *) let decomps1 = decomp_tuple_term env dep_pair1 typ in let decomps2 = decomp_tuple_term env dep_pair2 typ in (* We adjust to the shortest decomposition *) let n = min (List.length decomps1) (List.length decomps2) in let decomp1 = List.nth decomps1 (n-1) in let decomp2 = List.nth decomps2 (n-1) in (* We rewrite dep_pair1 ... *) let e1_list,proj_list = List.split decomp1 in (* ... and use dep_pair2 to compute the expected goal *) let e2_list,_ = List.split decomp2 in (* We build the expected goal *) let abst_B = List.fold_right (fun (e,t) body -> lambda_create env (t,subst_term e body)) e1_list b in let pred_body = beta_applist(abst_B,proj_list) in let expected_goal = beta_applist (abst_B,List.map fst e2_list) in (* Simulate now the normalisation treatment made by Logic.mk_refgoals *) let expected_goal = nf_betaiota sigma expected_goal in pred_body,expected_goal (* Like "replace" but decompose dependent equalities *) exception NothingToRewrite let cutSubstInConcl_RL eqn gls = let (lbeq,(t,e1,e2 as eq)) = find_eq_data_decompose gls eqn in let body,expected_goal = pf_apply subst_tuple_term gls e2 e1 (pf_concl gls) in if not (dependent (mkRel 1) body) then raise NothingToRewrite; tclTHENFIRST (bareRevSubstInConcl lbeq body eq) (convert_concl expected_goal DEFAULTcast) gls (* |- (P e1) BY CutSubstInConcl_LR (eq T e1 e2) |- (P e2) |- (eq T e1 e2) *) let cutSubstInConcl_LR eqn gls = (tclTHENS (cutSubstInConcl_RL (swap_equands gls eqn)) ([tclIDTAC; swapEquandsInConcl])) gls let cutSubstInConcl l2r =if l2r then cutSubstInConcl_LR else cutSubstInConcl_RL let cutSubstInHyp_LR eqn id gls = let (lbeq,(t,e1,e2 as eq)) = find_eq_data_decompose gls eqn in let idtyp = pf_get_hyp_typ gls id in let body,expected_goal = pf_apply subst_tuple_term gls e1 e2 idtyp in if not (dependent (mkRel 1) body) then raise NothingToRewrite; cut_replacing id expected_goal (tclTHENFIRST (bareRevSubstInConcl lbeq body eq) (refine_no_check (mkVar id))) gls let cutSubstInHyp_RL eqn id gls = (tclTHENS (cutSubstInHyp_LR (swap_equands gls eqn) id) ([tclIDTAC; swapEquandsInConcl])) gls let cutSubstInHyp l2r = if l2r then cutSubstInHyp_LR else cutSubstInHyp_RL let try_rewrite tac gls = try tac gls with | PatternMatchingFailure -> errorlabstrm "try_rewrite" (str "Not a primitive equality here.") | e when catchable_exception e -> errorlabstrm "try_rewrite" (strbrk "Cannot find a well-typed generalization of the goal that makes the proof progress.") | NothingToRewrite -> errorlabstrm "try_rewrite" (strbrk "Nothing to rewrite.") let cutSubstClause l2r eqn cls gls = match cls with | None -> cutSubstInConcl l2r eqn gls | Some id -> cutSubstInHyp l2r eqn id gls let cutRewriteClause l2r eqn cls = try_rewrite (cutSubstClause l2r eqn cls) let cutRewriteInHyp l2r eqn id = cutRewriteClause l2r eqn (Some id) let cutRewriteInConcl l2r eqn = cutRewriteClause l2r eqn None let substClause l2r c cls gls = let eq = pf_apply get_type_of gls c in tclTHENS (cutSubstClause l2r eq cls) [tclIDTAC; exact_no_check c] gls let rewriteClause l2r c cls = try_rewrite (substClause l2r c cls) let rewriteInHyp l2r c id = rewriteClause l2r c (Some id) let rewriteInConcl l2r c = rewriteClause l2r c None (* Naming scheme for rewrite and cutrewrite tactics give equality give proof of equality / cutSubstClause substClause raw | cutSubstInHyp substInHyp \ cutSubstInConcl substInConcl / cutRewriteClause rewriteClause user| cutRewriteInHyp rewriteInHyp \ cutRewriteInConcl rewriteInConcl raw = raise typing error or PatternMatchingFailure user = raise user error specific to rewrite *) (**********************************************************************) (* Substitutions tactics (JCF) *) let unfold_body x gl = let hyps = pf_hyps gl in let xval = match Sign.lookup_named x hyps with (_,Some xval,_) -> xval | _ -> errorlabstrm "unfold_body" (pr_id x ++ str" is not a defined hypothesis.") in let aft = afterHyp x gl in let hl = List.fold_right (fun (y,yval,_) cl -> (y,InHyp) :: cl) aft [] in let xvar = mkVar x in let rfun _ _ c = replace_term xvar xval c in tclTHENLIST [tclMAP (fun h -> reduct_in_hyp rfun h) hl; reduct_in_concl (rfun,DEFAULTcast)] gl let restrict_to_eq_and_identity eq = (* compatibility *) if eq <> constr_of_global glob_eq && eq <> constr_of_global glob_identity then raise PatternMatchingFailure exception FoundHyp of (identifier * constr * bool) (* tests whether hyp [c] is [x = t] or [t = x], [x] not occuring in [t] *) let is_eq_x gl x (id,_,c) = try let (_,lhs,rhs) = snd (find_eq_data_decompose gl c) in if (eq_constr x lhs) && not (occur_term x rhs) then raise (FoundHyp (id,rhs,true)); if (eq_constr x rhs) && not (occur_term x lhs) then raise (FoundHyp (id,lhs,false)) with PatternMatchingFailure -> () (* Rewrite "hyp:x=rhs" or "hyp:rhs=x" (if dir=false) everywhere and erase hyp and x; proceed by generalizing all dep hyps *) let subst_one dep_proof_ok x (hyp,rhs,dir) gl = (* The set of hypotheses using x *) let depdecls = let test (id,_,c as dcl) = if id <> hyp && occur_var_in_decl (pf_env gl) x dcl then dcl else failwith "caught" in List.rev (map_succeed test (pf_hyps gl)) in let dephyps = List.map (fun (id,_,_) -> id) depdecls in (* Decides if x appears in conclusion *) let depconcl = occur_var (pf_env gl) x (pf_concl gl) in (* The set of non-defined hypothesis: they must be abstracted, rewritten and reintroduced *) let abshyps = map_succeed (fun (id,v,_) -> if v=None then mkVar id else failwith "caught") depdecls in (* a tactic that either introduce an abstracted and rewritten hyp, or introduce a definition where x was replaced *) let introtac = function (id,None,_) -> intro_using id | (id,Some hval,htyp) -> letin_tac None (Name id) (replace_term (mkVar x) rhs hval) (Some (replace_term (mkVar x) rhs htyp)) nowhere in let need_rewrite = dephyps <> [] || depconcl in tclTHENLIST ((if need_rewrite then [generalize abshyps; general_rewrite dir all_occurrences true dep_proof_ok (mkVar hyp); thin dephyps; tclMAP introtac depdecls] else [tclIDTAC]) @ [tclTRY (clear [x;hyp])]) gl (* Look for an hypothesis hyp of the form "x=rhs" or "rhs=x", rewrite it everywhere, and erase hyp and x; proceed by generalizing all dep hyps *) let subst_one_var dep_proof_ok x gl = let hyps = pf_hyps gl in let (_,xval,_) = pf_get_hyp gl x in (* If x has a body, simply replace x with body and clear x *) if xval <> None then tclTHEN (unfold_body x) (clear [x]) gl else (* x is a variable: *) let varx = mkVar x in (* Find a non-recursive definition for x *) let (hyp,rhs,dir) = try let test hyp _ = is_eq_x gl varx hyp in Sign.fold_named_context test ~init:() hyps; errorlabstrm "Subst" (str "Cannot find any non-recursive equality over " ++ pr_id x ++ str".") with FoundHyp res -> res in subst_one dep_proof_ok x (hyp,rhs,dir) gl let subst_gen dep_proof_ok ids = tclTHEN tclNORMEVAR (tclMAP (subst_one_var dep_proof_ok) ids) (* For every x, look for an hypothesis hyp of the form "x=rhs" or "rhs=x", rewrite it everywhere, and erase hyp and x; proceed by generalizing all dep hyps *) let subst = subst_gen true type subst_tactic_flags = { only_leibniz : bool; rewrite_dependent_proof : bool } let default_subst_tactic_flags () = if Flags.version_strictly_greater Flags.V8_2 then { only_leibniz = false; rewrite_dependent_proof = true } else { only_leibniz = true; rewrite_dependent_proof = false } let subst_all ?(flags=default_subst_tactic_flags ()) gl = let test (_,c) = try let lbeq,(_,x,y) = find_eq_data_decompose gl c in if flags.only_leibniz then restrict_to_eq_and_identity lbeq.eq; (* J.F.: added to prevent failure on goal containing x=x as an hyp *) if eq_constr x y then failwith "caught"; match kind_of_term x with Var x -> x | _ -> match kind_of_term y with Var y -> y | _ -> failwith "caught" with PatternMatchingFailure -> failwith "caught" in let ids = map_succeed test (pf_hyps_types gl) in let ids = list_uniquize ids in subst_gen flags.rewrite_dependent_proof ids gl (* Rewrite the first assumption for which the condition faildir does not fail and gives the direction of the rewrite *) let cond_eq_term_left c t gl = try let (_,x,_) = snd (find_eq_data_decompose gl t) in if pf_conv_x gl c x then true else failwith "not convertible" with PatternMatchingFailure -> failwith "not an equality" let cond_eq_term_right c t gl = try let (_,_,x) = snd (find_eq_data_decompose gl t) in if pf_conv_x gl c x then false else failwith "not convertible" with PatternMatchingFailure -> failwith "not an equality" let cond_eq_term c t gl = try let (_,x,y) = snd (find_eq_data_decompose gl t) in if pf_conv_x gl c x then true else if pf_conv_x gl c y then false else failwith "not convertible" with PatternMatchingFailure -> failwith "not an equality" let rewrite_multi_assumption_cond cond_eq_term cl gl = let rec arec = function | [] -> error "No such assumption." | (id,_,t) ::rest -> begin try let dir = cond_eq_term t gl in general_multi_rewrite dir false (mkVar id,NoBindings) cl gl with | Failure _ | UserError _ -> arec rest end in arec (pf_hyps gl) let replace_multi_term dir_opt c = let cond_eq_fun = match dir_opt with | None -> cond_eq_term c | Some true -> cond_eq_term_left c | Some false -> cond_eq_term_right c in rewrite_multi_assumption_cond cond_eq_fun let _ = Tactics.register_general_multi_rewrite (fun b evars t cls -> general_multi_rewrite b evars t cls) let _ = Tactics.register_subst_one (fun b -> subst_one b) coq-8.4pl2/tactics/elimschemes.ml0000640000175000001440000001144112010532755016112 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* obj = declare_object { (default_object name) with cache_function = cache; load_function = (fun _ -> load); open_function = (fun _ -> load); classify_function = (fun (local, tac) -> if local then Dispose else Substitute (local, tac)); subst_function = subst} in let put local tac = set_default_tactic local tac; Lib.add_anonymous_leaf (input (local, tac)) in let get () = !locality, !default_tactic in let print () = Pptactic.pr_glob_tactic (Global.env ()) !default_tactic_expr ++ (if !locality then str" (locally defined)" else str" (globally defined)") in let freeze () = !locality, !default_tactic_expr in let unfreeze (local, t) = set_default_tactic local t in let init () = () in Summary.declare_summary name { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init }; put, get, print coq-8.4pl2/tactics/elim.mli0000640000175000001440000000254512010532755014720 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic) -> branch_args -> tactic val introCaseAssumsThen : (intro_pattern_expr Util.located list -> branch_assumptions -> tactic) -> branch_args -> tactic val general_decompose : (identifier * constr -> bool) -> constr -> tactic val decompose_nonrec : constr -> tactic val decompose_and : constr -> tactic val decompose_or : constr -> tactic val h_decompose : inductive list -> constr -> tactic val h_decompose_or : constr -> tactic val h_decompose_and : constr -> tactic val double_ind : Glob_term.quantified_hypothesis -> Glob_term.quantified_hypothesis -> tactic val h_double_induction : Glob_term.quantified_hypothesis -> Glob_term.quantified_hypothesis->tactic coq-8.4pl2/tactics/tactics.mli0000640000175000001440000003507312010532755015426 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string val head_constr : constr -> constr * constr list val head_constr_bound : constr -> constr * constr list val is_quantified_hypothesis : identifier -> goal sigma -> bool exception Bound (** {6 Primitive tactics. } *) val introduction : identifier -> tactic val refine : constr -> tactic val convert_concl : constr -> cast_kind -> tactic val convert_hyp : named_declaration -> tactic val thin : identifier list -> tactic val mutual_fix : identifier -> int -> (identifier * int * constr) list -> int -> tactic val fix : identifier option -> int -> tactic val mutual_cofix : identifier -> (identifier * constr) list -> int -> tactic val cofix : identifier option -> tactic (** {6 Introduction tactics. } *) val fresh_id_in_env : identifier list -> identifier -> env -> identifier val fresh_id : identifier list -> identifier -> goal sigma -> identifier val find_intro_names : rel_context -> goal sigma -> identifier list val intro : tactic val introf : tactic val intro_move : identifier option -> identifier move_location -> tactic (** [intro_avoiding idl] acts as intro but prevents the new identifier to belong to [idl] *) val intro_avoiding : identifier list -> tactic val intro_replacing : identifier -> tactic val intro_using : identifier -> tactic val intro_mustbe_force : identifier -> tactic val intro_then : (identifier -> tactic) -> tactic val intros_using : identifier list -> tactic val intro_erasing : identifier -> tactic val intros_replacing : identifier list -> tactic val intros : tactic (** [depth_of_quantified_hypothesis b h g] returns the index of [h] in the conclusion of goal [g], up to head-reduction if [b] is [true] *) val depth_of_quantified_hypothesis : bool -> quantified_hypothesis -> goal sigma -> int val intros_until_n_wored : int -> tactic val intros_until : quantified_hypothesis -> tactic val intros_clearing : bool list -> tactic (** Assuming a tactic [tac] depending on an hypothesis identifier, [try_intros_until tac arg] first assumes that arg denotes a quantified hypothesis (denoted by name or by index) and try to introduce it in context before to apply [tac], otherwise assume the hypothesis is already in context and directly apply [tac] *) val try_intros_until : (identifier -> tactic) -> quantified_hypothesis -> tactic (** Apply a tactic on a quantified hypothesis, an hypothesis in context or a term with bindings *) val onInductionArg : (constr with_bindings -> tactic) -> constr with_bindings induction_arg -> tactic (** {6 Introduction tactics with eliminations. } *) val intro_pattern : identifier move_location -> intro_pattern_expr -> tactic val intro_patterns : intro_pattern_expr located list -> tactic val intros_pattern : identifier move_location -> intro_pattern_expr located list -> tactic (** {6 Exact tactics. } *) val assumption : tactic val exact_no_check : constr -> tactic val vm_cast_no_check : constr -> tactic val exact_check : constr -> tactic val exact_proof : Topconstr.constr_expr -> tactic (** {6 Reduction tactics. } *) type tactic_reduction = env -> evar_map -> constr -> constr val reduct_in_hyp : tactic_reduction -> hyp_location -> tactic val reduct_option : tactic_reduction * cast_kind -> goal_location -> tactic val reduct_in_concl : tactic_reduction * cast_kind -> tactic val change_in_concl : (occurrences * constr_pattern) option -> constr -> tactic val change_in_hyp : (occurrences * constr_pattern) option -> constr -> hyp_location -> tactic val red_in_concl : tactic val red_in_hyp : hyp_location -> tactic val red_option : goal_location -> tactic val hnf_in_concl : tactic val hnf_in_hyp : hyp_location -> tactic val hnf_option : goal_location -> tactic val simpl_in_concl : tactic val simpl_in_hyp : hyp_location -> tactic val simpl_option : goal_location -> tactic val normalise_in_concl : tactic val normalise_in_hyp : hyp_location -> tactic val normalise_option : goal_location -> tactic val normalise_vm_in_concl : tactic val unfold_in_concl : (occurrences * evaluable_global_reference) list -> tactic val unfold_in_hyp : (occurrences * evaluable_global_reference) list -> hyp_location -> tactic val unfold_option : (occurrences * evaluable_global_reference) list -> goal_location -> tactic val change : constr_pattern option -> constr -> clause -> tactic val pattern_option : (occurrences * constr) list -> goal_location -> tactic val reduce : red_expr -> clause -> tactic val unfold_constr : global_reference -> tactic (** {6 Modification of the local context. } *) val clear : identifier list -> tactic val clear_body : identifier list -> tactic val keep : identifier list -> tactic val specialize : int option -> constr with_bindings -> tactic val move_hyp : bool -> identifier -> identifier move_location -> tactic val rename_hyp : (identifier * identifier) list -> tactic val revert : identifier list -> tactic (** {6 Resolution tactics. } *) val apply_type : constr -> constr list -> tactic val apply_term : constr -> constr list -> tactic val bring_hyps : named_context -> tactic val apply : constr -> tactic val eapply : constr -> tactic val apply_with_bindings_gen : advanced_flag -> evars_flag -> constr with_bindings located list -> tactic val apply_with_bindings : constr with_bindings -> tactic val eapply_with_bindings : constr with_bindings -> tactic val cut_and_apply : constr -> tactic val apply_in : advanced_flag -> evars_flag -> identifier -> constr with_bindings located list -> intro_pattern_expr located option -> tactic val simple_apply_in : identifier -> constr -> tactic (** {6 Elimination tactics. } *) (* The general form of an induction principle is the following: forall prm1 prm2 ... prmp, (induction parameters) forall Q1...,(Qi:Ti_1 -> Ti_2 ->...-> Ti_ni),...Qq, (predicates) branch1, branch2, ... , branchr, (branches of the principle) forall (x1:Ti_1) (x2:Ti_2) ... (xni:Ti_ni), (induction arguments) (HI: I prm1..prmp x1...xni) (optional main induction arg) -> (Qi x1...xni HI (f prm1...prmp x1...xni)).(conclusion) ^^ ^^^^^^^^^^^^^^^^^^^^^^^^ optional optional even if HI argument added if principle present above generated by functional induction [indarg] [farg] HI is not present when the induction principle does not come directly from an inductive type (like when it is generated by functional induction for example). HI is present otherwise BUT may not appear in the conclusion (dependent principle). HI and (f...) cannot be both present. Principles taken from functional induction have the final (f...). *) (** [rel_contexts] and [rel_declaration] actually contain triples, and lists are actually in reverse order to fit [compose_prod]. *) type elim_scheme = { elimc: constr with_bindings option; elimt: types; indref: global_reference option; index: int; (** index of the elimination type in the scheme *) params: rel_context; (** (prm1,tprm1);(prm2,tprm2)...(prmp,tprmp) *) nparams: int; (** number of parameters *) predicates: rel_context; (** (Qq, (Tq_1 -> Tq_2 ->...-> Tq_nq)), (Q1,...) *) npredicates: int; (** Number of predicates *) branches: rel_context; (** branchr,...,branch1 *) nbranches: int; (** Number of branches *) args: rel_context; (** (xni, Ti_ni) ... (x1, Ti_1) *) nargs: int; (** number of arguments *) indarg: rel_declaration option; (** Some (H,I prm1..prmp x1...xni) if HI is in premisses, None otherwise *) concl: types; (** Qi x1...xni HI (f...), HI and (f...) are optional and mutually exclusive *) indarg_in_concl: bool; (** true if HI appears at the end of conclusion *) farg_in_concl: bool; (** true if (f...) appears at the end of conclusion *) } val compute_elim_sig : ?elimc: constr with_bindings -> types -> elim_scheme val rebuild_elimtype_from_scheme: elim_scheme -> types (** elim principle with the index of its inductive arg *) type eliminator = { elimindex : int option; (** None = find it automatically *) elimbody : constr with_bindings } val elimination_clause_scheme : evars_flag -> ?flags:unify_flags -> int -> clausenv -> clausenv -> tactic val elimination_in_clause_scheme : evars_flag -> ?flags:unify_flags -> identifier -> int -> clausenv -> clausenv -> tactic val general_elim_clause_gen : (int -> Clenv.clausenv -> 'a -> tactic) -> 'a -> eliminator -> tactic val general_elim : evars_flag -> constr with_bindings -> eliminator -> tactic val general_elim_in : evars_flag -> identifier -> constr with_bindings -> eliminator -> tactic val default_elim : evars_flag -> constr with_bindings -> tactic val simplest_elim : constr -> tactic val elim : evars_flag -> constr with_bindings -> constr with_bindings option -> tactic val simple_induct : quantified_hypothesis -> tactic val new_induct : evars_flag -> (evar_map * constr with_bindings) induction_arg list -> constr with_bindings option -> intro_pattern_expr located option * intro_pattern_expr located option -> clause option -> tactic (** {6 Case analysis tactics. } *) val general_case_analysis : evars_flag -> constr with_bindings -> tactic val simplest_case : constr -> tactic val simple_destruct : quantified_hypothesis -> tactic val new_destruct : evars_flag -> (evar_map * constr with_bindings) induction_arg list -> constr with_bindings option -> intro_pattern_expr located option * intro_pattern_expr located option -> clause option -> tactic (** {6 Generic case analysis / induction tactics. } *) val induction_destruct : rec_flag -> evars_flag -> ((evar_map * constr with_bindings) induction_arg * (intro_pattern_expr located option * intro_pattern_expr located option)) list * constr with_bindings option * clause option -> tactic (** {6 Eliminations giving the type instead of the proof. } *) val case_type : constr -> tactic val elim_type : constr -> tactic (** {6 Some eliminations which are frequently used. } *) val impE : identifier -> tactic val andE : identifier -> tactic val orE : identifier -> tactic val dImp : clause -> tactic val dAnd : clause -> tactic val dorE : bool -> clause ->tactic (** {6 Introduction tactics. } *) val constructor_tac : evars_flag -> int option -> int -> constr bindings -> tactic val any_constructor : evars_flag -> tactic option -> tactic val one_constructor : int -> constr bindings -> tactic val left : constr bindings -> tactic val right : constr bindings -> tactic val split : constr bindings -> tactic val left_with_bindings : evars_flag -> constr bindings -> tactic val right_with_bindings : evars_flag -> constr bindings -> tactic val split_with_bindings : evars_flag -> constr bindings list -> tactic val simplest_left : tactic val simplest_right : tactic val simplest_split : tactic (** {6 Logical connective tactics. } *) val register_setoid_reflexivity : tactic -> unit val reflexivity_red : bool -> tactic val reflexivity : tactic val intros_reflexivity : tactic val register_setoid_symmetry : tactic -> unit val symmetry_red : bool -> tactic val symmetry : tactic val register_setoid_symmetry_in : (identifier -> tactic) -> unit val symmetry_in : identifier -> tactic val intros_symmetry : clause -> tactic val register_setoid_transitivity : (constr option -> tactic) -> unit val transitivity_red : bool -> constr option -> tactic val transitivity : constr -> tactic val etransitivity : tactic val intros_transitivity : constr option -> tactic val cut : constr -> tactic val cut_intro : constr -> tactic val assert_replacing : identifier -> types -> tactic -> tactic val cut_replacing : identifier -> types -> tactic -> tactic val cut_in_parallel : constr list -> tactic val assert_as : bool -> intro_pattern_expr located option -> constr -> tactic val forward : tactic option -> intro_pattern_expr located option -> constr -> tactic val letin_tac : (bool * intro_pattern_expr located) option -> name -> constr -> types option -> clause -> tactic val letin_pat_tac : (bool * intro_pattern_expr located) option -> name -> evar_map * constr -> types option -> clause -> tactic val assert_tac : name -> types -> tactic val assert_by : name -> types -> tactic -> tactic val pose_proof : name -> constr -> tactic val generalize : constr list -> tactic val generalize_gen : ((occurrences * constr) * name) list -> tactic val generalize_dep : ?with_let:bool (** Don't lose let bindings *) -> constr -> tactic val unify : ?state:Names.transparent_state -> constr -> constr -> tactic val resolve_classes : tactic val tclABSTRACT : identifier option -> tactic -> tactic val admit_as_an_axiom : tactic val abstract_generalize : ?generalize_vars:bool -> ?force_dep:bool -> identifier -> tactic val specialize_eqs : identifier -> tactic val register_general_multi_rewrite : (bool -> evars_flag -> constr with_bindings -> clause -> tactic) -> unit val register_subst_one : (bool -> identifier -> identifier * constr * bool -> tactic) -> unit coq-8.4pl2/tactics/leminv.mli0000640000175000001440000000104411505230601015246 0ustar notinusersopen Util open Names open Term open Glob_term open Proof_type open Topconstr val lemInv_gen : quantified_hypothesis -> constr -> tactic val lemInvIn_gen : quantified_hypothesis -> constr -> identifier list -> tactic val lemInv_clause : quantified_hypothesis -> constr -> identifier list -> tactic val inversion_lemma_from_goal : int -> identifier -> identifier located -> sorts -> bool -> (identifier -> tactic) -> unit val add_inversion_lemma_exn : identifier -> constr_expr -> glob_sort -> bool -> (identifier -> tactic) -> unit coq-8.4pl2/tactics/class_tactics.ml40000640000175000001440000006656112121620060016522 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Auto.create_hint_db false typeclasses_db full_transparent_state true) exception Found of evar_map (** We transform the evars that are concerned by this resolution (according to predicate p) into goals. Invariant: function p only manipulates undefined evars *) let evars_to_goals p evm = let goals, evm' = Evd.fold_undefined (fun ev evi (gls, evm') -> let evi', goal = p evm ev evi in let gls' = if goal then (ev,Goal.V82.build ev) :: gls else gls in (gls', Evd.add evm' ev evi')) evm ([], Evd.defined_evars evm) in if goals = [] then None else Some (List.rev goals, evm') (** Typeclasses instance search tactic / eauto *) open Auto let e_give_exact flags c gl = let t1 = (pf_type_of gl c) in tclTHEN (Clenvtac.unify ~flags t1) (exact_no_check c) gl open Unification let auto_unif_flags = { modulo_conv_on_closed_terms = Some full_transparent_state; use_metas_eagerly_in_conv_on_closed_terms = true; modulo_delta = var_full_transparent_state; modulo_delta_types = full_transparent_state; modulo_delta_in_merge = None; check_applied_meta_types = false; resolve_evars = false; use_pattern_unification = true; use_meta_bound_pattern_unification = true; frozen_evars = ExistentialSet.empty; restrict_conv_on_strict_subterms = false; (* ? *) modulo_betaiota = true; modulo_eta = true; allow_K_in_toplevel_higher_order_unification = false } let rec eq_constr_mod_evars x y = match kind_of_term x, kind_of_term y with | Evar (e1, l1), Evar (e2, l2) when e1 <> e2 -> true | _, _ -> compare_constr eq_constr_mod_evars x y let progress_evars t gl = let concl = pf_concl gl in let check gl' = let newconcl = pf_concl gl' in if eq_constr_mod_evars concl newconcl then tclFAIL 0 (str"No progress made (modulo evars)") gl' else tclIDTAC gl' in tclTHEN t check gl TACTIC EXTEND progress_evars [ "progress_evars" tactic(t) ] -> [ progress_evars (Tacinterp.eval_tactic t) ] END let unify_e_resolve flags (c,clenv) gls = let clenv' = connect_clenv gls clenv in let clenv' = clenv_unique_resolver ~flags clenv' gls in Clenvtac.clenv_refine true ~with_classes:false clenv' gls let unify_resolve flags (c,clenv) gls = let clenv' = connect_clenv gls clenv in let clenv' = clenv_unique_resolver ~flags clenv' gls in Clenvtac.clenv_refine false ~with_classes:false clenv' gls let clenv_of_prods nprods (c, clenv) gls = if nprods = 0 then Some clenv else let ty = pf_type_of gls c in let diff = nb_prod ty - nprods in if diff >= 0 then Some (mk_clenv_from_n gls (Some diff) (c,ty)) else None let with_prods nprods (c, clenv) f gls = match clenv_of_prods nprods (c, clenv) gls with | None -> tclFAIL 0 (str"Not enough premisses") gls | Some clenv' -> f (c, clenv') gls (** Hack to properly solve dependent evars that are typeclasses *) let flags_of_state st = {auto_unif_flags with modulo_conv_on_closed_terms = Some st; modulo_delta = st; modulo_delta_types = st; modulo_eta = false} let rec e_trivial_fail_db db_list local_db goal = let tacl = Eauto.registered_e_assumption :: (tclTHEN Tactics.intro (function g'-> let d = pf_last_hyp g' in let hintl = make_resolve_hyp (pf_env g') (project g') d in (e_trivial_fail_db db_list (Hint_db.add_list hintl local_db) g'))) :: (List.map (fun (x,_,_,_,_) -> x) (e_trivial_resolve db_list local_db (pf_concl goal))) in tclFIRST (List.map tclCOMPLETE tacl) goal and e_my_find_search db_list local_db hdc complete concl = let hdc = head_of_constr_reference hdc in let prods, concl = decompose_prod_assum concl in let nprods = List.length prods in let hintl = list_map_append (fun db -> if Hint_db.use_dn db then let flags = flags_of_state (Hint_db.transparent_state db) in List.map (fun x -> (flags, x)) (Hint_db.map_auto (hdc,concl) db) else let flags = flags_of_state (Hint_db.transparent_state db) in List.map (fun x -> (flags, x)) (Hint_db.map_all hdc db)) (local_db::db_list) in let tac_of_hint = fun (flags, {pri = b; pat = p; code = t; name = name}) -> let tac = match t with | Res_pf (term,cl) -> with_prods nprods (term,cl) (unify_resolve flags) | ERes_pf (term,cl) -> with_prods nprods (term,cl) (unify_e_resolve flags) | Give_exact (c) -> e_give_exact flags c | Res_pf_THEN_trivial_fail (term,cl) -> tclTHEN (with_prods nprods (term,cl) (unify_e_resolve flags)) (if complete then tclIDTAC else e_trivial_fail_db db_list local_db) | Unfold_nth c -> tclWEAK_PROGRESS (unfold_in_concl [all_occurrences,c]) | Extern tacast -> (* tclTHEN *) (* (fun gl -> Refiner.tclEVARS (mark_unresolvables (project gl)) gl) *) (conclPattern concl p tacast) in let tac = if complete then tclCOMPLETE tac else tac in match t with | Extern _ -> (tac,b,true, name, lazy (pr_autotactic t)) | _ -> (* let tac gl = with_pattern (pf_env gl) (project gl) flags p concl tac gl in *) (tac,b,false, name, lazy (pr_autotactic t)) in List.map tac_of_hint hintl and e_trivial_resolve db_list local_db gl = try e_my_find_search db_list local_db (fst (head_constr_bound gl)) true gl with Bound | Not_found -> [] let e_possible_resolve db_list local_db gl = try e_my_find_search db_list local_db (fst (head_constr_bound gl)) false gl with Bound | Not_found -> [] let rec catchable = function | Refiner.FailError _ -> true | Loc.Exc_located (_, e) -> catchable e | e -> Logic.catchable_exception e let nb_empty_evars s = Evd.fold_undefined (fun ev evi acc -> succ acc) s 0 let pr_ev evs ev = Printer.pr_constr_env (Goal.V82.env evs ev) (Evarutil.nf_evar evs (Goal.V82.concl evs ev)) let pr_depth l = prlist_with_sep (fun () -> str ".") pr_int (List.rev l) type autoinfo = { hints : Auto.hint_db; is_evar: existential_key option; only_classes: bool; auto_depth: int list; auto_last_tac: std_ppcmds Lazy.t; auto_path : global_reference option list; auto_cut : hints_path } type autogoal = goal * autoinfo type 'ans fk = unit -> 'ans type ('a,'ans) sk = 'a -> 'ans fk -> 'ans type 'a tac = { skft : 'ans. ('a,'ans) sk -> 'ans fk -> autogoal sigma -> 'ans } type auto_result = autogoal list sigma type atac = auto_result tac let make_resolve_hyp env sigma st flags only_classes pri (id, _, cty) = let cty = Evarutil.nf_evar sigma cty in let rec iscl env ty = let ctx, ar = decompose_prod_assum ty in match kind_of_term (fst (decompose_app ar)) with | Const c -> is_class (ConstRef c) | Ind i -> is_class (IndRef i) | _ -> let env' = Environ.push_rel_context ctx env in let ty' = whd_betadeltaiota env' ar in if not (eq_constr ty' ar) then iscl env' ty' else false in let is_class = iscl env cty in let keep = not only_classes || is_class in if keep then let c = mkVar id in let name = PathHints [VarRef id] in let hints = if is_class then let hints = build_subclasses ~check:false env sigma (VarRef id) None in (list_map_append (fun (pri, c) -> make_resolves env sigma (true,false,Flags.is_verbose()) pri c) hints) else [] in (hints @ map_succeed (fun f -> try f (c,cty) with UserError _ -> failwith "") [make_exact_entry ~name sigma pri; make_apply_entry ~name env sigma flags pri]) else [] let pf_filtered_hyps gls = Goal.V82.hyps gls.Evd.sigma (sig_it gls) let make_hints g st only_classes sign = let paths, hintlist = List.fold_left (fun (paths, hints) hyp -> if is_section_variable (pi1 hyp) then (paths, hints) else let path, hint = PathEmpty, pf_apply make_resolve_hyp g st (true,false,false) only_classes None hyp in (PathOr (paths, path), hint @ hints)) (PathEmpty, []) sign in Hint_db.add_list hintlist (Hint_db.empty st true) let autogoal_hints_cache : (Environ.named_context_val * hint_db) option ref = ref None let freeze () = !autogoal_hints_cache let unfreeze v = autogoal_hints_cache := v let init () = autogoal_hints_cache := None let _ = init () let _ = Summary.declare_summary "autogoal-hints-cache" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } let make_autogoal_hints = fun only_classes ?(st=full_transparent_state) g -> let sign = pf_filtered_hyps g in match freeze () with | Some (sign', hints) when Environ.eq_named_context_val sign sign' -> hints | _ -> let hints = make_hints g st only_classes (Environ.named_context_of_val sign) in unfreeze (Some (sign, hints)); hints let lift_tactic tac (f : goal list sigma -> autoinfo -> autogoal list sigma) : 'a tac = { skft = fun sk fk {it = gl,hints; sigma=s} -> let res = try Some (tac {it=gl; sigma=s}) with e when catchable e -> None in match res with | Some gls -> sk (f gls hints) fk | None -> fk () } let intro_tac : atac = lift_tactic Tactics.intro (fun {it = gls; sigma = s} info -> let gls' = List.map (fun g' -> let env = Goal.V82.env s g' in let context = Environ.named_context_of_val (Goal.V82.hyps s g') in let hint = make_resolve_hyp env s (Hint_db.transparent_state info.hints) (true,false,false) info.only_classes None (List.hd context) in let ldb = Hint_db.add_list hint info.hints in (g', { info with is_evar = None; hints = ldb; auto_last_tac = lazy (str"intro") })) gls in {it = gls'; sigma = s}) let normevars_tac : atac = { skft = fun sk fk {it = (gl, info); sigma = s} -> let gl', sigma' = Goal.V82.nf_evar s gl in let info' = { info with auto_last_tac = lazy (str"normevars") } in sk {it = [gl', info']; sigma = sigma'} fk } (* Ordering of states is lexicographic on the number of remaining goals. *) let compare (pri, _, _, res) (pri', _, _, res') = let nbgoals s = List.length (sig_it s) + nb_empty_evars (sig_sig s) in let pri = pri - pri' in if pri <> 0 then pri else nbgoals res - nbgoals res' let or_tac (x : 'a tac) (y : 'a tac) : 'a tac = { skft = fun sk fk gls -> x.skft sk (fun () -> y.skft sk fk gls) gls } let hints_tac hints = { skft = fun sk fk {it = gl,info; sigma = s} -> let concl = Goal.V82.concl s gl in let tacgl = {it = gl; sigma = s} in let poss = e_possible_resolve hints info.hints concl in let rec aux i foundone = function | (tac, _, b, name, pp) :: tl -> let derivs = path_derivate info.auto_cut name in let res = try if path_matches derivs [] then None else Some (tac tacgl) with e when catchable e -> None in (match res with | None -> aux i foundone tl | Some {it = gls; sigma = s'} -> if !typeclasses_debug then msgnl (pr_depth (i :: info.auto_depth) ++ str": " ++ Lazy.force pp ++ str" on" ++ spc () ++ pr_ev s gl); let fk = (fun () -> if !typeclasses_debug then msgnl (str"backtracked after " ++ Lazy.force pp); aux (succ i) true tl) in let sgls = evars_to_goals (fun evm ev evi -> if Typeclasses.is_resolvable evi && (not info.only_classes || Typeclasses.is_class_evar evm evi) then Typeclasses.mark_unresolvable evi, true else evi, false) s' in let newgls, s' = let gls' = List.map (fun g -> (None, g)) gls in match sgls with | None -> gls', s' | Some (evgls, s') -> (* Reorder with dependent subgoals. *) (gls' @ List.map (fun (ev, x) -> Some ev, x) evgls, s') in let gls' = list_map_i (fun j (evar, g) -> let info = { info with auto_depth = j :: i :: info.auto_depth; auto_last_tac = pp; is_evar = evar; hints = if b && not (Environ.eq_named_context_val (Goal.V82.hyps s' g) (Goal.V82.hyps s' gl)) then make_autogoal_hints info.only_classes ~st:(Hint_db.transparent_state info.hints) {it = g; sigma = s'} else info.hints; auto_cut = derivs } in g, info) 1 newgls in let glsv = {it = gls'; sigma = s'} in sk glsv fk) | [] -> if not foundone && !typeclasses_debug then msgnl (pr_depth info.auto_depth ++ str": no match for " ++ Printer.pr_constr_env (Goal.V82.env s gl) concl ++ spc () ++ int (List.length poss) ++ str" possibilities"); fk () in aux 1 false poss } let isProp env sigma concl = let ty = Retyping.get_type_of env sigma concl in kind_of_term ty = Sort (Prop Null) let needs_backtrack only_classes env evd oev concl = if oev = None || isProp env evd concl then not (Intset.is_empty (Evarutil.evars_of_term concl)) else true let then_list (second : atac) (sk : (auto_result, 'a) sk) : (auto_result, 'a) sk = let rec aux s (acc : autogoal list list) fk = function | (gl,info) :: gls -> (match info.is_evar with | Some ev when Evd.is_defined s ev -> aux s acc fk gls | _ -> second.skft (fun {it=gls';sigma=s'} fk' -> let needs_backtrack = if gls' = [] then needs_backtrack info.only_classes (Goal.V82.env s gl) s' info.is_evar (Goal.V82.concl s gl) else true in let fk'' = if not needs_backtrack then (if !typeclasses_debug then msgnl (str"no backtrack on " ++ pr_ev s gl ++ str " after " ++ Lazy.force info.auto_last_tac); fk) else fk' in aux s' (gls'::acc) fk'' gls) fk {it = (gl,info); sigma = s}) | [] -> Some (List.rev acc, s, fk) in fun {it = gls; sigma = s} fk -> let rec aux' = function | None -> fk () | Some (res, s', fk') -> let goals' = List.concat res in sk {it = goals'; sigma = s'} (fun () -> aux' (fk' ())) in aux' (aux s [] (fun () -> None) gls) let then_tac (first : atac) (second : atac) : atac = { skft = fun sk fk -> first.skft (then_list second sk) fk } let run_tac (t : 'a tac) (gl : autogoal sigma) : auto_result option = t.skft (fun x _ -> Some x) (fun _ -> None) gl type run_list_res = (auto_result * run_list_res fk) option let run_list_tac (t : 'a tac) p goals (gl : autogoal list sigma) : run_list_res = (then_list t (fun x fk -> Some (x, fk))) gl (fun _ -> None) let fail_tac : atac = { skft = fun sk fk _ -> fk () } let rec fix (t : 'a tac) : 'a tac = then_tac t { skft = fun sk fk -> (fix t).skft sk fk } let rec fix_limit limit (t : 'a tac) : 'a tac = if limit = 0 then fail_tac else then_tac t { skft = fun sk fk -> (fix_limit (pred limit) t).skft sk fk } let make_autogoal ?(only_classes=true) ?(st=full_transparent_state) cut ev g = let hints = make_autogoal_hints only_classes ~st g in (g.it, { hints = hints ; is_evar = ev; only_classes = only_classes; auto_depth = []; auto_last_tac = lazy (str"none"); auto_path = []; auto_cut = cut }) let cut_of_hints h = List.fold_left (fun cut db -> PathOr (Hint_db.cut db, cut)) PathEmpty h let make_autogoals ?(only_classes=true) ?(st=full_transparent_state) hints gs evm' = let cut = cut_of_hints hints in { it = list_map_i (fun i g -> let (gl, auto) = make_autogoal ~only_classes ~st cut (Some (fst g)) {it = snd g; sigma = evm'} in (gl, { auto with auto_depth = [i]})) 1 gs; sigma = evm' } let get_result r = match r with | None -> None | Some (gls, fk) -> Some (gls.sigma,fk) let run_on_evars ?(only_classes=true) ?(st=full_transparent_state) p evm hints tac = match evars_to_goals p evm with | None -> None (* This happens only because there's no evar having p *) | Some (goals, evm') -> let res = run_list_tac tac p goals (make_autogoals ~only_classes ~st hints goals evm') in match get_result res with | None -> raise Not_found | Some (evm', fk) -> Some (evars_reset_evd ~with_conv_pbs:true evm' evm, fk) let eauto_tac hints = then_tac normevars_tac (or_tac (hints_tac hints) intro_tac) let eauto_tac ?limit hints = match limit with | None -> fix (eauto_tac hints) | Some limit -> fix_limit limit (eauto_tac hints) let eauto ?(only_classes=true) ?st ?limit hints g = let gl = { it = make_autogoal ~only_classes ?st (cut_of_hints hints) None g; sigma = project g } in match run_tac (eauto_tac ?limit hints) gl with | None -> raise Not_found | Some {it = goals; sigma = s} -> {it = List.map fst goals; sigma = s} let real_eauto st ?limit hints p evd = let rec aux evd fails = let res, fails = try run_on_evars ~st p evd hints (eauto_tac ?limit hints), fails with Not_found -> List.fold_right (fun fk (res, fails) -> match res with | Some r -> res, fk :: fails | None -> get_result (fk ()), fails) fails (None, []) in match res with | None -> evd | Some (evd', fk) -> aux evd' (fk :: fails) in aux evd [] let resolve_all_evars_once debug limit p evd = let db = searchtable_map typeclasses_db in real_eauto ?limit (Hint_db.transparent_state db) [db] p evd (** We compute dependencies via a union-find algorithm. Beware of the imperative effects on the partition structure, it should not be shared, but only used locally. *) module Intpart = Unionfind.Make(Intset)(Intmap) let deps_of_constraints cstrs evm p = List.iter (fun (_, _, x, y) -> let evx = Evarutil.undefined_evars_of_term evm x in let evy = Evarutil.undefined_evars_of_term evm y in Intpart.union_set (Intset.union evx evy) p) cstrs let evar_dependencies evm p = Evd.fold_undefined (fun ev evi _ -> let evars = Intset.add ev (Evarutil.undefined_evars_of_evar_info evm evi) in Intpart.union_set evars p) evm () let resolve_one_typeclass env ?(sigma=Evd.empty) gl = let nc, gl, subst, _ = Evarutil.push_rel_context_to_named_context env gl in let (gl,t,sigma) = Goal.V82.mk_goal sigma nc gl Store.empty in let gls = { it = gl ; sigma = sigma } in let hints = searchtable_map typeclasses_db in let gls' = eauto ?limit:!typeclasses_depth ~st:(Hint_db.transparent_state hints) [hints] gls in let evd = sig_sig gls' in let t' = let (ev, inst) = destEvar t in mkEvar (ev, Array.of_list subst) in let term = Evarutil.nf_evar evd t' in evd, term let _ = Typeclasses.solve_instanciation_problem := (fun x y z -> resolve_one_typeclass x ~sigma:y z) (** [split_evars] returns groups of undefined evars according to dependencies *) let split_evars evm = let p = Intpart.create () in evar_dependencies evm p; deps_of_constraints (snd (extract_all_conv_pbs evm)) evm p; Intpart.partition p (** [evars_in_comp] filters an [evar_map], keeping only evars that belongs to a certain component *) let evars_in_comp comp evm = try evars_reset_evd (Intset.fold (fun ev acc -> Evd.add acc ev (Evd.find_undefined evm ev)) comp Evd.empty) evm with Not_found -> assert false let is_inference_forced p evd ev = try let evi = Evd.find_undefined evd ev in if Typeclasses.is_resolvable evi && snd (p ev evi) then let (loc, k) = evar_source ev evd in match k with | ImplicitArg (_, _, b) -> b | QuestionMark _ -> false | _ -> true else true with Not_found -> assert false let is_mandatory p comp evd = Intset.exists (is_inference_forced p evd) comp (** In case of unsatisfiable constraints, build a nice error message *) let error_unresolvable env comp do_split evd = let evd = Evarutil.nf_evar_map_undefined evd in let evm = if do_split then evars_in_comp comp evd else evd in let _, ev = Evd.fold_undefined (fun ev evi (b,acc) -> (* focus on one instance if only one was searched for *) if class_of_constr evi.evar_concl <> None then if not b (* || do_split *) then true, Some ev else b, None else b, acc) evm (false, None) in Typeclasses_errors.unsatisfiable_constraints (Evarutil.nf_env_evar evm env) evm ev (** Check if an evar is concerned by the current resolution attempt, (and in particular is in the current component), and also update its evar_info. Invariant : this should only be applied to undefined evars, and return undefined evar_info *) let select_and_update_evars p oevd in_comp evd ev evi = assert (evi.evar_body = Evar_empty); try let oevi = Evd.find_undefined oevd ev in if Typeclasses.is_resolvable oevi then Typeclasses.mark_unresolvable evi, (in_comp ev && p evd ev evi) else evi, false with Not_found -> Typeclasses.mark_unresolvable evi, p evd ev evi (** Do we still have unresolved evars that should be resolved ? *) let has_undefined p oevd evd = Evd.fold_undefined (fun ev evi has -> has || snd (p oevd ev evi)) evd false (** Revert the resolvability status of evars after resolution, potentially unprotecting some evars that were set unresolvable just for this call to resolution. *) let revert_resolvability oevd evd = Evd.fold_undefined (fun ev evi evm -> try if not (Typeclasses.is_resolvable evi) then let evi' = Evd.find_undefined oevd ev in if Typeclasses.is_resolvable evi' then Evd.add evm ev (Typeclasses.mark_resolvable evi) else evm else evm with Not_found -> evm) evd evd (** If [do_split] is [true], we try to separate the problem in several components and then solve them separately *) exception Unresolved let resolve_all_evars debug m env p oevd do_split fail = let split = if do_split then split_evars oevd else [Intset.empty] in let in_comp comp ev = if do_split then Intset.mem ev comp else true in let rec docomp evd = function | [] -> revert_resolvability oevd evd | comp :: comps -> let p = select_and_update_evars p oevd (in_comp comp) in try let evd' = resolve_all_evars_once debug m p evd in if has_undefined p oevd evd' then raise Unresolved; docomp evd' comps with Unresolved | Not_found -> if fail && (not do_split || is_mandatory (p evd) comp evd) then (* Unable to satisfy the constraints. *) error_unresolvable env comp do_split evd else (* Best effort: do nothing on this component *) docomp evd comps in docomp oevd split let initial_select_evars filter evd ev evi = filter (snd evi.Evd.evar_source) && Typeclasses.is_class_evar evd evi let resolve_typeclass_evars debug m env evd filter split fail = let evd = try Evarconv.consider_remaining_unif_problems ~ts:(Typeclasses.classes_transparent_state ()) env evd with e when Errors.noncritical e -> evd in resolve_all_evars debug m env (initial_select_evars filter) evd split fail let solve_inst debug depth env evd filter split fail = resolve_typeclass_evars debug depth env evd filter split fail let _ = Typeclasses.solve_instanciations_problem := solve_inst false !typeclasses_depth (** Options: depth, debug and transparency settings. *) open Goptions let set_typeclasses_debug d = (:=) typeclasses_debug d; Typeclasses.solve_instanciations_problem := solve_inst d !typeclasses_depth let get_typeclasses_debug () = !typeclasses_debug let set_typeclasses_debug = declare_bool_option { optsync = true; optdepr = false; optname = "debug output for typeclasses proof search"; optkey = ["Typeclasses";"Debug"]; optread = get_typeclasses_debug; optwrite = set_typeclasses_debug; } let set_typeclasses_depth d = (:=) typeclasses_depth d; Typeclasses.solve_instanciations_problem := solve_inst !typeclasses_debug !typeclasses_depth let get_typeclasses_depth () = !typeclasses_depth let set_typeclasses_depth = declare_int_option { optsync = true; optdepr = false; optname = "depth for typeclasses proof search"; optkey = ["Typeclasses";"Depth"]; optread = get_typeclasses_depth; optwrite = set_typeclasses_depth; } let set_transparency cl b = List.iter (fun r -> let gr = Smartlocate.global_with_alias r in let ev = Tacred.evaluable_of_global_reference (Global.env ()) gr in Classes.set_typeclass_transparency ev false b) cl VERNAC COMMAND EXTEND Typeclasses_Unfold_Settings | [ "Typeclasses" "Transparent" reference_list(cl) ] -> [ set_transparency cl true ] END VERNAC COMMAND EXTEND Typeclasses_Rigid_Settings | [ "Typeclasses" "Opaque" reference_list(cl) ] -> [ set_transparency cl false ] END open Genarg open Extraargs let pr_debug _prc _prlc _prt b = if b then Pp.str "debug" else Pp.mt() ARGUMENT EXTEND debug TYPED AS bool PRINTED BY pr_debug | [ "debug" ] -> [ true ] | [ ] -> [ false ] END let pr_depth _prc _prlc _prt = function Some i -> Util.pr_int i | None -> Pp.mt() ARGUMENT EXTEND depth TYPED AS int option PRINTED BY pr_depth | [ int_or_var_opt(v) ] -> [ match v with Some (ArgArg i) -> Some i | _ -> None ] END (* true = All transparent, false = Opaque if possible *) VERNAC COMMAND EXTEND Typeclasses_Settings | [ "Typeclasses" "eauto" ":=" debug(d) depth(depth) ] -> [ set_typeclasses_debug d; set_typeclasses_depth depth ] END let typeclasses_eauto ?(only_classes=false) ?(st=full_transparent_state) dbs gl = try let dbs = list_map_filter (fun db -> try Some (Auto.searchtable_map db) with e when Errors.noncritical e -> None) dbs in let st = match dbs with x :: _ -> Hint_db.transparent_state x | _ -> st in eauto ?limit:!typeclasses_depth ~only_classes ~st dbs gl with Not_found -> tclFAIL 0 (str" typeclasses eauto failed on: " ++ Printer.pr_goal gl) gl TACTIC EXTEND typeclasses_eauto | [ "typeclasses" "eauto" "with" ne_preident_list(l) ] -> [ typeclasses_eauto l ] | [ "typeclasses" "eauto" ] -> [ typeclasses_eauto ~only_classes:true [typeclasses_db] ] END let _ = Classes.refine_ref := Refine.refine (** Take the head of the arity of a constr. Used in the partial application tactic. *) let rec head_of_constr t = let t = strip_outer_cast(collapse_appl t) in match kind_of_term t with | Prod (_,_,c2) -> head_of_constr c2 | LetIn (_,_,_,c2) -> head_of_constr c2 | App (f,args) -> head_of_constr f | _ -> t TACTIC EXTEND head_of_constr [ "head_of_constr" ident(h) constr(c) ] -> [ let c = head_of_constr c in letin_tac None (Name h) c None allHyps ] END TACTIC EXTEND not_evar [ "not_evar" constr(ty) ] -> [ match kind_of_term ty with | Evar _ -> tclFAIL 0 (str"Evar") | _ -> tclIDTAC ] END TACTIC EXTEND is_ground [ "is_ground" constr(ty) ] -> [ fun gl -> if Evarutil.is_ground_term (project gl) ty then tclIDTAC gl else tclFAIL 0 (str"Not ground") gl ] END TACTIC EXTEND autoapply [ "autoapply" constr(c) "using" preident(i) ] -> [ fun gl -> let flags = flags_of_state (Auto.Hint_db.transparent_state (Auto.searchtable_map i)) in let cty = pf_type_of gl c in let ce = mk_clenv_from gl (c,cty) in unify_e_resolve flags (c,ce) gl ] END coq-8.4pl2/tactics/btermdn.mli0000640000175000001440000000200012010532755015407 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* sig type t val create : unit -> t val add : transparent_state option -> t -> (constr_pattern * Z.t) -> t val rmv : transparent_state option -> t -> (constr_pattern * Z.t) -> t val lookup : transparent_state option -> t -> constr -> (constr_pattern * Z.t) list val app : ((constr_pattern * Z.t) -> unit) -> t -> unit end val dnet_depth : int ref coq-8.4pl2/tactics/rewrite.ml40000640000175000001440000023762212123064012015363 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* = 2 -> let head = if isApp c then fst (destApp c) else c in if eq_constr (Lazy.force coq_eq) head then None else (try let params, args = array_chop (Array.length args - 2) args in let env' = Environ.push_rel_context rels env in let evd, evar = Evarutil.new_evar sigma env' (new_Type ()) in let inst = mkApp (Lazy.force rewrite_relation_class, [| evar; mkApp (c, params) |]) in let _ = Typeclasses.resolve_one_typeclass env' evd inst in Some (it_mkProd_or_LetIn t rels) with e when Errors.noncritical e -> None) | _ -> None let _ = Equality.register_is_applied_rewrite_relation is_applied_rewrite_relation let split_head = function hd :: tl -> hd, tl | [] -> assert(false) let new_cstr_evar (goal,cstr) env t = let cstr', t = Evarutil.new_evar cstr env t in (goal, cstr'), t let new_goal_evar (goal,cstr) env t = let goal', t = Evarutil.new_evar goal env t in (goal', cstr), t let build_signature evars env m (cstrs : (types * types option) option list) (finalcstr : (types * types option) option) = let new_evar evars env t = new_cstr_evar evars env (* ~src:(dummy_loc, ImplicitArg (ConstRef (Lazy.force respectful), (n, Some na))) *) t in let mk_relty evars newenv ty obj = match obj with | None | Some (_, None) -> let relty = mk_relation ty in if closed0 ty then let env' = Environ.reset_with_named_context (Environ.named_context_val env) env in new_evar evars env' relty else new_evar evars newenv relty | Some (x, Some rel) -> evars, rel in let rec aux env evars ty l = let t = Reductionops.whd_betadeltaiota env (fst evars) ty in match kind_of_term t, l with | Prod (na, ty, b), obj :: cstrs -> if noccurn 1 b (* non-dependent product *) then let ty = Reductionops.nf_betaiota (fst evars) ty in let (evars, b', arg, cstrs) = aux env evars (subst1 mkProp b) cstrs in let evars, relty = mk_relty evars env ty obj in let newarg = mkApp (Lazy.force respectful, [| ty ; b' ; relty ; arg |]) in evars, mkProd(na, ty, b), newarg, (ty, Some relty) :: cstrs else let (evars, b, arg, cstrs) = aux (Environ.push_rel (na, None, ty) env) evars b cstrs in let ty = Reductionops.nf_betaiota (fst evars) ty in let pred = mkLambda (na, ty, b) in let liftarg = mkLambda (na, ty, arg) in let arg' = mkApp (Lazy.force forall_relation, [| ty ; pred ; liftarg |]) in if obj = None then evars, mkProd(na, ty, b), arg', (ty, None) :: cstrs else error "build_signature: no constraint can apply on a dependent argument" | _, obj :: _ -> anomaly "build_signature: not enough products" | _, [] -> (match finalcstr with | None | Some (_, None) -> let t = Reductionops.nf_betaiota (fst evars) ty in let evars, rel = mk_relty evars env t None in evars, t, rel, [t, Some rel] | Some (t, Some rel) -> evars, t, rel, [t, Some rel]) in aux env evars m cstrs let proper_proof env evars carrier relation x = let goal = mkApp (Lazy.force proper_proxy_type, [| carrier ; relation; x |]) in new_cstr_evar evars env goal let extends_undefined evars evars' = let f ev evi found = found || not (Evd.mem evars ev) in fold_undefined f evars' false let find_class_proof proof_type proof_method env evars carrier relation = try let goal = mkApp (Lazy.force proof_type, [| carrier ; relation |]) in let evars', c = Typeclasses.resolve_one_typeclass env evars goal in if extends_undefined evars evars' then raise Not_found else mkApp (Lazy.force proof_method, [| carrier; relation; c |]) with e when Logic.catchable_exception e -> raise Not_found let get_reflexive_proof env = find_class_proof reflexive_type reflexive_proof env let get_symmetric_proof env = find_class_proof symmetric_type symmetric_proof env let get_transitive_proof env = find_class_proof transitive_type transitive_proof env exception FoundInt of int let array_find (arr: 'a array) (pred: int -> 'a -> bool): int = try for i=0 to Array.length arr - 1 do if pred i (arr.(i)) then raise (FoundInt i) done; raise Not_found with FoundInt i -> i type hypinfo = { cl : clausenv; prf : constr; car : constr; rel : constr; l2r : bool; c1 : constr; c2 : constr; c : (Tacinterp.interp_sign * Genarg.glob_constr_and_expr with_bindings) option; abs : (constr * types) option; flags : Unification.unify_flags; } let goalevars evars = fst evars let cstrevars evars = snd evars let evd_convertible env evd x y = try ignore(Evarconv.the_conv_x env x y evd); true with e when Errors.noncritical e -> false let rec decompose_app_rel env evd t = match kind_of_term t with | App (f, args) -> if Array.length args > 1 then let fargs, args = array_chop (Array.length args - 2) args in mkApp (f, fargs), args else let (f', args) = decompose_app_rel env evd args.(0) in let ty = Typing.type_of env evd args.(0) in let f'' = mkLambda (Name (id_of_string "x"), ty, mkLambda (Name (id_of_string "y"), lift 1 ty, mkApp (lift 2 f, [| mkApp (lift 2 f', [| mkRel 2; mkRel 1 |]) |]))) in (f'', args) | _ -> error "The term provided is not an applied relation." (* let nc, c', cl = push_rel_context_to_named_context env c in *) (* let env' = reset_with_named_context nc env in *) let decompose_applied_relation env sigma flags orig (c,l) left2right = let c' = c in let ctype = Typing.type_of env sigma c' in let find_rel ty = let eqclause = Clenv.make_clenv_binding_env_apply env sigma None (c',ty) l in let (equiv, args) = decompose_app_rel env eqclause.evd (Clenv.clenv_type eqclause) in let c1 = args.(0) and c2 = args.(1) in let ty1, ty2 = Typing.type_of env eqclause.evd c1, Typing.type_of env eqclause.evd c2 in if not (evd_convertible env eqclause.evd ty1 ty2) then None else Some { cl=eqclause; prf=(Clenv.clenv_value eqclause); car=ty1; rel = equiv; l2r=left2right; c1=c1; c2=c2; c=orig; abs=None; flags = flags } in match find_rel ctype with | Some c -> c | None -> let ctx,t' = Reductionops.splay_prod_assum env sigma ctype in (* Search for underlying eq *) match find_rel (it_mkProd_or_LetIn t' ctx) with | Some c -> c | None -> error "The term does not end with an applied homogeneous relation." open Tacinterp let decompose_applied_relation_expr env sigma flags (is, (c,l)) left2right = let sigma, cbl = Tacinterp.interp_open_constr_with_bindings is env sigma (c,l) in decompose_applied_relation env sigma flags (Some (is, (c,l))) cbl left2right let rewrite_db = "rewrite" let conv_transparent_state = (Idpred.empty, Cpred.full) let _ = Auto.add_auto_init (fun () -> Auto.create_hint_db false rewrite_db conv_transparent_state true) let rewrite_transparent_state () = Auto.Hint_db.transparent_state (Auto.searchtable_map rewrite_db) let rewrite_unif_flags = { Unification.modulo_conv_on_closed_terms = None; Unification.use_metas_eagerly_in_conv_on_closed_terms = true; Unification.modulo_delta = empty_transparent_state; Unification.modulo_delta_types = full_transparent_state; Unification.modulo_delta_in_merge = None; Unification.check_applied_meta_types = true; Unification.resolve_evars = true; Unification.use_pattern_unification = true; Unification.use_meta_bound_pattern_unification = true; Unification.frozen_evars = ExistentialSet.empty; Unification.restrict_conv_on_strict_subterms = false; Unification.modulo_betaiota = false; Unification.modulo_eta = true; Unification.allow_K_in_toplevel_higher_order_unification = true } let rewrite2_unif_flags = { Unification.modulo_conv_on_closed_terms = Some conv_transparent_state; Unification.use_metas_eagerly_in_conv_on_closed_terms = true; Unification.modulo_delta = empty_transparent_state; Unification.modulo_delta_types = conv_transparent_state; Unification.modulo_delta_in_merge = None; Unification.check_applied_meta_types = true; Unification.resolve_evars = true; Unification.use_pattern_unification = true; Unification.use_meta_bound_pattern_unification = true; Unification.frozen_evars = ExistentialSet.empty; Unification.restrict_conv_on_strict_subterms = false; Unification.modulo_betaiota = true; Unification.modulo_eta = true; Unification.allow_K_in_toplevel_higher_order_unification = true } let general_rewrite_unif_flags () = let ts = rewrite_transparent_state () in { Unification.modulo_conv_on_closed_terms = Some ts; Unification.use_metas_eagerly_in_conv_on_closed_terms = true; Unification.modulo_delta = ts; Unification.modulo_delta_types = ts; Unification.modulo_delta_in_merge = None; Unification.check_applied_meta_types = true; Unification.resolve_evars = true; Unification.use_pattern_unification = true; Unification.use_meta_bound_pattern_unification = true; Unification.frozen_evars = ExistentialSet.empty; Unification.restrict_conv_on_strict_subterms = false; Unification.modulo_betaiota = true; Unification.modulo_eta = true; Unification.allow_K_in_toplevel_higher_order_unification = true } let convertible env evd x y = Reductionops.is_conv env evd x y let refresh_hypinfo env sigma hypinfo = if hypinfo.abs = None then let {l2r=l2r; c=c;cl=cl;flags=flags} = hypinfo in match c with | Some c -> (* Refresh the clausenv to not get the same meta twice in the goal. *) decompose_applied_relation_expr env sigma flags c l2r; | _ -> hypinfo else hypinfo let unify_eqn env sigma hypinfo t = if isEvar t then None else try let {cl=cl; prf=prf; car=car; rel=rel; l2r=l2r; c1=c1; c2=c2; c=c; abs=abs} = !hypinfo in let left = if l2r then c1 else c2 in let env', prf, c1, c2, car, rel = match abs with | Some (absprf, absprfty) -> let env' = clenv_unify ~flags:rewrite_unif_flags CONV left t cl in env', prf, c1, c2, car, rel | None -> let env' = clenv_unify ~flags:!hypinfo.flags CONV left t cl in let env' = Clenvtac.clenv_pose_dependent_evars true env' in (* let env' = Clenv.clenv_pose_metas_as_evars env' (Evd.undefined_metas env'.evd) in *) let evd' = Typeclasses.resolve_typeclasses ~fail:true env'.env env'.evd in let env' = { env' with evd = evd' } in let nf c = Evarutil.nf_evar evd' (Clenv.clenv_nf_meta env' c) in let c1 = nf c1 and c2 = nf c2 and car = nf car and rel = nf rel and prf = nf (Clenv.clenv_value env') in let ty1 = Typing.type_of env'.env env'.evd c1 and ty2 = Typing.type_of env'.env env'.evd c2 in if convertible env env'.evd ty1 ty2 then ( if occur_meta_or_existential prf then hypinfo := refresh_hypinfo env env'.evd !hypinfo; env', prf, c1, c2, car, rel) else raise Reduction.NotConvertible in let res = if l2r then (prf, (car, rel, c1, c2)) else try (mkApp (get_symmetric_proof env env'.evd car rel, [| c1 ; c2 ; prf |]), (car, rel, c2, c1)) with Not_found -> (prf, (car, inverse car rel, c2, c1)) in Some (env'.evd, res) with e when Class_tactics.catchable e -> None (* let unify_eqn env sigma hypinfo t = *) (* if isEvar t then None *) (* else try *) (* let {cl=cl; prf=prf; car=car; rel=rel; l2r=l2r; c1=c1; c2=c2; c=c; abs=abs} = !hypinfo in *) (* let left = if l2r then c1 else c2 in *) (* let evd', prf, c1, c2, car, rel = *) (* match abs with *) (* | Some (absprf, absprfty) -> *) (* let env' = clenv_unify allowK ~flags:rewrite_unif_flags CONV left t cl in *) (* env'.evd, prf, c1, c2, car, rel *) (* | None -> *) (* let cl' = Clenv.clenv_pose_metas_as_evars cl (Evd.undefined_metas cl.evd) in *) (* let sigma = cl'.evd in *) (* let c1 = Clenv.clenv_nf_meta cl' c1 *) (* and c2 = Clenv.clenv_nf_meta cl' c2 *) (* and prf = Clenv.clenv_nf_meta cl' prf *) (* and car = Clenv.clenv_nf_meta cl' car *) (* and rel = Clenv.clenv_nf_meta cl' rel *) (* in *) (* let sigma' = *) (* try Evarconv.the_conv_x ~ts:empty_transparent_state env t c1 sigma *) (* with Reduction.NotConvertible _ -> *) (* Evarconv.the_conv_x ~ts:conv_transparent_state env t c1 sigma *) (* in *) (* let sigma' = Evarconv.consider_remaining_unif_problems ~ts:conv_transparent_state env sigma' in *) (* let evd' = Typeclasses.resolve_typeclasses ~fail:true env sigma' in *) (* let nf c = Evarutil.nf_evar evd' c in *) (* let c1 = nf c1 and c2 = nf c2 *) (* and car = nf car and rel = nf rel *) (* and prf' = nf prf in *) (* if occur_meta_or_existential prf then *) (* hypinfo := refresh_hypinfo env evd' !hypinfo; *) (* evd', prf', c1, c2, car, rel *) (* in *) (* let res = *) (* if l2r then (prf, (car, rel, c1, c2)) *) (* else *) (* try (mkApp (get_symmetric_proof env Evd.empty car rel, *) (* [| c1 ; c2 ; prf |]), *) (* (car, rel, c2, c1)) *) (* with Not_found -> *) (* (prf, (car, inverse car rel, c2, c1)) *) (* in Some (evd', res) *) (* with Reduction.NotConvertible -> None *) (* | e when Class_tactics.catchable e -> None *) let unfold_impl t = match kind_of_term t with | App (arrow, [| a; b |])(* when eq_constr arrow (Lazy.force impl) *) -> mkProd (Anonymous, a, lift 1 b) | _ -> assert false let unfold_all t = match kind_of_term t with | App (id, [| a; b |]) (* when eq_constr id (Lazy.force coq_all) *) -> (match kind_of_term b with | Lambda (n, ty, b) -> mkProd (n, ty, b) | _ -> assert false) | _ -> assert false let unfold_forall t = match kind_of_term t with | App (id, [| a; b |]) (* when eq_constr id (Lazy.force coq_all) *) -> (match kind_of_term b with | Lambda (n, ty, b) -> mkProd (n, ty, b) | _ -> assert false) | _ -> assert false let arrow_morphism ta tb a b = let ap = is_Prop ta and bp = is_Prop tb in if ap && bp then mkApp (Lazy.force impl, [| a; b |]), unfold_impl else if ap then (* Domain in Prop, CoDomain in Type *) mkProd (Anonymous, a, b), (fun x -> x) else if bp then (* Dummy forall *) mkApp (Lazy.force coq_all, [| a; mkLambda (Anonymous, a, b) |]), unfold_forall else (* None in Prop, use arrow *) mkApp (Lazy.force arrow, [| a; b |]), unfold_impl let rec decomp_pointwise n c = if n = 0 then c else match kind_of_term c with | App (f, [| a; b; relb |]) when eq_constr f (Lazy.force pointwise_relation) -> decomp_pointwise (pred n) relb | App (f, [| a; b; arelb |]) when eq_constr f (Lazy.force forall_relation) -> decomp_pointwise (pred n) (Reductionops.beta_applist (arelb, [mkRel 1])) | _ -> raise (Invalid_argument "decomp_pointwise") let rec apply_pointwise rel = function | arg :: args -> (match kind_of_term rel with | App (f, [| a; b; relb |]) when eq_constr f (Lazy.force pointwise_relation) -> apply_pointwise relb args | App (f, [| a; b; arelb |]) when eq_constr f (Lazy.force forall_relation) -> apply_pointwise (Reductionops.beta_applist (arelb, [arg])) args | _ -> raise (Invalid_argument "apply_pointwise")) | [] -> rel let pointwise_or_dep_relation n t car rel = if noccurn 1 car && noccurn 1 rel then mkApp (Lazy.force pointwise_relation, [| t; lift (-1) car; lift (-1) rel |]) else mkApp (Lazy.force forall_relation, [| t; mkLambda (n, t, car); mkLambda (n, t, rel) |]) let lift_cstr env sigma evars (args : constr list) c ty cstr = let start env car = match cstr with | None | Some (_, None) -> Evarutil.e_new_evar evars env (mk_relation car) | Some (ty, Some rel) -> rel in let rec aux env prod n = if n = 0 then start env prod else match kind_of_term (Reduction.whd_betadeltaiota env prod) with | Prod (na, ty, b) -> if noccurn 1 b then let b' = lift (-1) b in let rb = aux env b' (pred n) in mkApp (Lazy.force pointwise_relation, [| ty; b'; rb |]) else let rb = aux (Environ.push_rel (na, None, ty) env) b (pred n) in mkApp (Lazy.force forall_relation, [| ty; mkLambda (na, ty, b); mkLambda (na, ty, rb) |]) | _ -> raise Not_found in let rec find env c ty = function | [] -> None | arg :: args -> try Some (aux env ty (succ (List.length args)), c, ty, arg :: args) with Not_found -> find env (mkApp (c, [| arg |])) (prod_applist ty [arg]) args in find env c ty args let unlift_cstr env sigma = function | None -> None | Some codom -> Some (decomp_pointwise 1 codom) type rewrite_flags = { under_lambdas : bool; on_morphisms : bool } let default_flags = { under_lambdas = true; on_morphisms = true; } type evars = evar_map * evar_map (* goal evars, constraint evars *) type rewrite_proof = | RewPrf of constr * constr | RewCast of cast_kind let get_opt_rew_rel = function RewPrf (rel, prf) -> Some rel | _ -> None type rewrite_result_info = { rew_car : constr; rew_from : constr; rew_to : constr; rew_prf : rewrite_proof; rew_evars : evars; } type rewrite_result = rewrite_result_info option type strategy = Environ.env -> identifier list -> constr -> types -> constr option -> evars -> rewrite_result option let get_rew_rel r = match r.rew_prf with | RewPrf (rel, prf) -> rel | RewCast c -> mkApp (Coqlib.build_coq_eq (), [| r.rew_car; r.rew_from; r.rew_to |]) let get_rew_prf r = match r.rew_prf with | RewPrf (rel, prf) -> rel, prf | RewCast c -> let rel = mkApp (Coqlib.build_coq_eq (), [| r.rew_car |]) in rel, mkCast (mkApp (Coqlib.build_coq_eq_refl (), [| r.rew_car; r.rew_from |]), c, mkApp (rel, [| r.rew_from; r.rew_to |])) let resolve_subrelation env avoid car rel prf rel' res = if eq_constr rel rel' then res else (* try let evd' = Evarconv.the_conv_x env rel rel' res.rew_evars in *) (* { res with rew_evars = evd' } *) (* with NotConvertible -> *) let app = mkApp (Lazy.force subrelation, [|car; rel; rel'|]) in let evars, subrel = new_cstr_evar res.rew_evars env app in let appsub = mkApp (subrel, [| res.rew_from ; res.rew_to ; prf |]) in { res with rew_prf = RewPrf (rel', appsub); rew_evars = evars } let resolve_morphism env avoid oldt m ?(fnewt=fun x -> x) args args' cstr evars = let evars, morph_instance, proj, sigargs, m', args, args' = let first = try (array_find args' (fun i b -> b <> None)) with Not_found -> raise (Invalid_argument "resolve_morphism") in let morphargs, morphobjs = array_chop first args in let morphargs', morphobjs' = array_chop first args' in let appm = mkApp(m, morphargs) in let appmtype = Typing.type_of env (goalevars evars) appm in let cstrs = List.map (Option.map (fun r -> r.rew_car, get_opt_rew_rel r.rew_prf)) (Array.to_list morphobjs') in (* Desired signature *) let evars, appmtype', signature, sigargs = build_signature evars env appmtype cstrs cstr in (* Actual signature found *) let cl_args = [| appmtype' ; signature ; appm |] in let app = mkApp (Lazy.force proper_type, cl_args) in let env' = Environ.push_named (id_of_string "do_subrelation", Some (Lazy.force do_subrelation), Lazy.force apply_subrelation) env in let evars, morph = new_cstr_evar evars env' app in evars, morph, morph, sigargs, appm, morphobjs, morphobjs' in let projargs, subst, evars, respars, typeargs = array_fold_left2 (fun (acc, subst, evars, sigargs, typeargs') x y -> let (carrier, relation), sigargs = split_head sigargs in match relation with | Some relation -> let carrier = substl subst carrier and relation = substl subst relation in (match y with | None -> let evars, proof = proper_proof env evars carrier relation x in [ proof ; x ; x ] @ acc, subst, evars, sigargs, x :: typeargs' | Some r -> [ snd (get_rew_prf r); r.rew_to; x ] @ acc, subst, evars, sigargs, r.rew_to :: typeargs') | None -> if y <> None then error "Cannot rewrite the argument of a dependent function"; x :: acc, x :: subst, evars, sigargs, x :: typeargs') ([], [], evars, sigargs, []) args args' in let proof = applistc proj (List.rev projargs) in let newt = applistc m' (List.rev typeargs) in match respars with [ a, Some r ] -> evars, proof, a, r, oldt, fnewt newt | _ -> assert(false) let apply_constraint env avoid car rel prf cstr res = match cstr with | None -> res | Some r -> resolve_subrelation env avoid car rel prf r res let eq_env x y = x == y let apply_rule hypinfo loccs : strategy = let (nowhere_except_in,occs) = loccs in let is_occ occ = if nowhere_except_in then List.mem occ occs else not (List.mem occ occs) in let occ = ref 0 in fun env avoid t ty cstr evars -> if not (eq_env !hypinfo.cl.env env) then hypinfo := refresh_hypinfo env (goalevars evars) !hypinfo; let unif = unify_eqn env (goalevars evars) hypinfo t in if unif <> None then incr occ; match unif with | Some (evd', (prf, (car, rel, c1, c2))) when is_occ !occ -> begin if eq_constr t c2 then Some None else let res = { rew_car = ty; rew_from = c1; rew_to = c2; rew_prf = RewPrf (rel, prf); rew_evars = evd', cstrevars evars } in Some (Some (apply_constraint env avoid car rel prf cstr res)) end | _ -> None let apply_lemma flags (evm,c) left2right loccs : strategy = fun env avoid t ty cstr evars -> let hypinfo = ref (decompose_applied_relation env (goalevars evars) flags None c left2right) in apply_rule hypinfo loccs env avoid t ty cstr evars let make_leibniz_proof c ty r = let prf = match r.rew_prf with | RewPrf (rel, prf) -> let rel = mkApp (Lazy.force coq_eq, [| ty |]) in let prf = mkApp (Lazy.force coq_f_equal, [| r.rew_car; ty; mkLambda (Anonymous, r.rew_car, c); r.rew_from; r.rew_to; prf |]) in RewPrf (rel, prf) | RewCast k -> r.rew_prf in { r with rew_car = ty; rew_from = subst1 r.rew_from c; rew_to = subst1 r.rew_to c; rew_prf = prf } open Elimschemes let reset_env env = let env' = Global.env_of_context (Environ.named_context_val env) in Environ.push_rel_context (Environ.rel_context env) env' let fold_match ?(force=false) env sigma c = let (ci, p, c, brs) = destCase c in let cty = Retyping.get_type_of env sigma c in let dep, pred, exists, sk = let env', ctx, body = let ctx, pred = decompose_lam_assum p in let env' = Environ.push_rel_context ctx env in env', ctx, pred in let sortp = Retyping.get_sort_family_of env' sigma body in let sortc = Retyping.get_sort_family_of env sigma cty in let dep = not (noccurn 1 body) in let pred = if dep then p else it_mkProd_or_LetIn (subst1 mkProp body) (List.tl ctx) in let sk = if sortp = InProp then if sortc = InProp then if dep then case_dep_scheme_kind_from_prop else case_scheme_kind_from_prop else ( if dep then case_dep_scheme_kind_from_type_in_prop else case_scheme_kind_from_type) else ((* sortc <> InProp by typing *) if dep then case_dep_scheme_kind_from_type else case_scheme_kind_from_type) in let exists = Ind_tables.check_scheme sk ci.ci_ind in if exists || force then dep, pred, exists, Ind_tables.find_scheme sk ci.ci_ind else raise Not_found in let app = let ind, args = Inductive.find_rectype env cty in let pars, args = list_chop ci.ci_npar args in let meths = List.map (fun br -> br) (Array.to_list brs) in applist (mkConst sk, pars @ [pred] @ meths @ args @ [c]) in sk, (if exists then env else reset_env env), app let unfold_match env sigma sk app = match kind_of_term app with | App (f', args) when f' = mkConst sk -> let v = Environ.constant_value (Global.env ()) sk in Reductionops.whd_beta sigma (mkApp (v, args)) | _ -> app let is_rew_cast = function RewCast _ -> true | _ -> false let coerce env avoid cstr res = let rel, prf = get_rew_prf res in apply_constraint env avoid res.rew_car rel prf cstr res let subterm all flags (s : strategy) : strategy = let rec aux env avoid t ty cstr evars = let cstr' = Option.map (fun c -> (ty, Some c)) cstr in match kind_of_term t with | App (m, args) -> let rewrite_args success = let args', evars', progress = Array.fold_left (fun (acc, evars, progress) arg -> if progress <> None && not all then (None :: acc, evars, progress) else let res = s env avoid arg (Typing.type_of env (goalevars evars) arg) None evars in match res with | Some None -> (None :: acc, evars, if progress = None then Some false else progress) | Some (Some r) -> (Some r :: acc, r.rew_evars, Some true) | None -> (None :: acc, evars, progress)) ([], evars, success) args in match progress with | None -> None | Some false -> Some None | Some true -> let args' = Array.of_list (List.rev args') in if array_exists (function | None -> false | Some r -> not (is_rew_cast r.rew_prf)) args' then let evars', prf, car, rel, c1, c2 = resolve_morphism env avoid t m args args' cstr' evars' in let res = { rew_car = ty; rew_from = c1; rew_to = c2; rew_prf = RewPrf (rel, prf); rew_evars = evars' } in Some (Some res) else let args' = array_map2 (fun aorig anew -> match anew with None -> aorig | Some r -> r.rew_to) args args' in let res = { rew_car = ty; rew_from = t; rew_to = mkApp (m, args'); rew_prf = RewCast DEFAULTcast; rew_evars = evars' } in Some (Some res) in if flags.on_morphisms then let evarsref = ref (snd evars) in let mty = Typing.type_of env (goalevars evars) m in let cstr', m, mty, argsl, args = let argsl = Array.to_list args in match lift_cstr env (goalevars evars) evarsref argsl m mty None with | Some (cstr', m, mty, args) -> Some cstr', m, mty, args, Array.of_list args | None -> None, m, mty, argsl, args in let m' = s env avoid m mty cstr' (fst evars, !evarsref) in match m' with | None -> rewrite_args None (* Standard path, try rewrite on arguments *) | Some None -> rewrite_args (Some false) | Some (Some r) -> (* We rewrote the function and get a proof of pointwise rel for the arguments. We just apply it. *) let prf = match r.rew_prf with | RewPrf (rel, prf) -> RewPrf (apply_pointwise rel argsl, mkApp (prf, args)) | x -> x in let res = { rew_car = prod_appvect r.rew_car args; rew_from = mkApp(r.rew_from, args); rew_to = mkApp(r.rew_to, args); rew_prf = prf; rew_evars = r.rew_evars } in match prf with | RewPrf (rel, prf) -> Some (Some (apply_constraint env avoid res.rew_car rel prf cstr res)) | _ -> Some (Some res) else rewrite_args None | Prod (n, x, b) when noccurn 1 b -> let b = subst1 mkProp b in let tx = Typing.type_of env (goalevars evars) x and tb = Typing.type_of env (goalevars evars) b in let mor, unfold = arrow_morphism tx tb x b in let res = aux env avoid mor ty cstr evars in (match res with | Some (Some r) -> Some (Some { r with rew_to = unfold r.rew_to }) | _ -> res) (* if x' = None && flags.under_lambdas then *) (* let lam = mkLambda (n, x, b) in *) (* let lam', occ = aux env lam occ None in *) (* let res = *) (* match lam' with *) (* | None -> None *) (* | Some (prf, (car, rel, c1, c2)) -> *) (* Some (resolve_morphism env sigma t *) (* ~fnewt:unfold_all *) (* (Lazy.force coq_all) [| x ; lam |] [| None; lam' |] *) (* cstr evars) *) (* in res, occ *) (* else *) | Prod (n, dom, codom) -> let lam = mkLambda (n, dom, codom) in let app, unfold = if eq_constr ty mkProp then mkApp (Lazy.force coq_all, [| dom; lam |]), unfold_all else mkApp (Lazy.force coq_forall, [| dom; lam |]), unfold_forall in let res = aux env avoid app ty cstr evars in (match res with | Some (Some r) -> Some (Some { r with rew_to = unfold r.rew_to }) | _ -> res) | Lambda (n, t, b) when flags.under_lambdas -> let n' = name_app (fun id -> Tactics.fresh_id_in_env avoid id env) n in let env' = Environ.push_rel (n', None, t) env in let b' = s env' avoid b (Typing.type_of env' (goalevars evars) b) (unlift_cstr env (goalevars evars) cstr) evars in (match b' with | Some (Some r) -> let prf = match r.rew_prf with | RewPrf (rel, prf) -> let rel = pointwise_or_dep_relation n' t r.rew_car rel in let prf = mkLambda (n', t, prf) in RewPrf (rel, prf) | x -> x in Some (Some { r with rew_prf = prf; rew_car = mkProd (n, t, r.rew_car); rew_from = mkLambda(n, t, r.rew_from); rew_to = mkLambda (n, t, r.rew_to) }) | _ -> b') | Case (ci, p, c, brs) -> let cty = Typing.type_of env (goalevars evars) c in let cstr' = Some (mkApp (Lazy.force coq_eq, [| cty |])) in let c' = s env avoid c cty cstr' evars in let res = match c' with | Some (Some r) -> let res = make_leibniz_proof (mkCase (ci, lift 1 p, mkRel 1, Array.map (lift 1) brs)) ty r in Some (Some (coerce env avoid cstr res)) | x -> if array_for_all ((=) 0) ci.ci_cstr_ndecls then let cstr = Some (mkApp (Lazy.force coq_eq, [| ty |])) in let found, brs' = Array.fold_left (fun (found, acc) br -> if found <> None then (found, fun x -> lift 1 br :: acc x) else match s env avoid br ty cstr evars with | Some (Some r) -> (Some r, fun x -> mkRel 1 :: acc x) | _ -> (None, fun x -> lift 1 br :: acc x)) (None, fun x -> []) brs in match found with | Some r -> let ctxc = mkCase (ci, lift 1 p, lift 1 c, Array.of_list (List.rev (brs' x))) in Some (Some (make_leibniz_proof ctxc ty r)) | None -> x else match try Some (fold_match env (goalevars evars) t) with Not_found -> None with | None -> x | Some (cst, _, t') -> match aux env avoid t' ty cstr evars with | Some (Some prf) -> Some (Some { prf with rew_from = t; rew_to = unfold_match env (goalevars evars) cst prf.rew_to }) | x' -> x in (match res with | Some (Some r) -> let rel, prf = get_rew_prf r in Some (Some (apply_constraint env avoid r.rew_car rel prf cstr r)) | x -> x) | _ -> None in aux let all_subterms = subterm true default_flags let one_subterm = subterm false default_flags (** Requires transitivity of the rewrite step, if not a reduction. Not tail-recursive. *) let transitivity env avoid (res : rewrite_result_info) (next : strategy) : rewrite_result option = match next env avoid res.rew_to res.rew_car (get_opt_rew_rel res.rew_prf) res.rew_evars with | None -> None | Some None -> Some (Some res) | Some (Some res') -> match res.rew_prf with | RewCast c -> Some (Some { res' with rew_from = res.rew_from }) | RewPrf (rew_rel, rew_prf) -> match res'.rew_prf with | RewCast _ -> Some (Some ({ res with rew_to = res'.rew_to })) | RewPrf (res'_rel, res'_prf) -> let prfty = mkApp (Lazy.force transitive_type, [| res.rew_car; rew_rel |]) in let evars, prf = new_cstr_evar res'.rew_evars env prfty in let prf = mkApp (prf, [|res.rew_from; res'.rew_from; res'.rew_to; rew_prf; res'_prf |]) in Some (Some { res' with rew_from = res.rew_from; rew_evars = evars; rew_prf = RewPrf (res'_rel, prf) }) (** Rewriting strategies. Inspired by ELAN's rewriting strategies: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.4049 *) module Strategies = struct let fail : strategy = fun env avoid t ty cstr evars -> None let id : strategy = fun env avoid t ty cstr evars -> Some None let refl : strategy = fun env avoid t ty cstr evars -> let evars, rel = match cstr with | None -> new_cstr_evar evars env (mk_relation ty) | Some r -> evars, r in let evars, proof = let mty = mkApp (Lazy.force proper_proxy_type, [| ty ; rel; t |]) in new_cstr_evar evars env mty in Some (Some { rew_car = ty; rew_from = t; rew_to = t; rew_prf = RewPrf (rel, proof); rew_evars = evars }) let progress (s : strategy) : strategy = fun env avoid t ty cstr evars -> match s env avoid t ty cstr evars with | None -> None | Some None -> None | r -> r let seq fst snd : strategy = fun env avoid t ty cstr evars -> match fst env avoid t ty cstr evars with | None -> None | Some None -> snd env avoid t ty cstr evars | Some (Some res) -> transitivity env avoid res snd let choice fst snd : strategy = fun env avoid t ty cstr evars -> match fst env avoid t ty cstr evars with | None -> snd env avoid t ty cstr evars | res -> res let try_ str : strategy = choice str id let fix (f : strategy -> strategy) : strategy = let rec aux env = f (fun env -> aux env) env in aux let any (s : strategy) : strategy = fix (fun any -> try_ (seq s any)) let repeat (s : strategy) : strategy = seq s (any s) let bu (s : strategy) : strategy = fix (fun s' -> seq (choice (progress (all_subterms s')) s) (try_ s')) let td (s : strategy) : strategy = fix (fun s' -> seq (choice s (progress (all_subterms s'))) (try_ s')) let innermost (s : strategy) : strategy = fix (fun ins -> choice (one_subterm ins) s) let outermost (s : strategy) : strategy = fix (fun out -> choice s (one_subterm out)) let lemmas flags cs : strategy = List.fold_left (fun tac (l,l2r) -> choice tac (apply_lemma flags l l2r (false,[]))) fail cs let inj_open c = (Evd.empty,c) let old_hints (db : string) : strategy = let rules = Autorewrite.find_rewrites db in lemmas rewrite_unif_flags (List.map (fun hint -> (inj_open (hint.Autorewrite.rew_lemma, NoBindings), hint.Autorewrite.rew_l2r)) rules) let hints (db : string) : strategy = fun env avoid t ty cstr evars -> let rules = Autorewrite.find_matches db t in let lemma hint = (inj_open (hint.Autorewrite.rew_lemma, NoBindings), hint.Autorewrite.rew_l2r) in let lems = List.map lemma rules in lemmas rewrite_unif_flags lems env avoid t ty cstr evars let reduce (r : Redexpr.red_expr) : strategy = let rfn, ckind = Redexpr.reduction_of_red_expr r in fun env avoid t ty cstr evars -> let t' = rfn env (goalevars evars) t in if eq_constr t' t then Some None else Some (Some { rew_car = ty; rew_from = t; rew_to = t'; rew_prf = RewCast ckind; rew_evars = evars }) let fold c : strategy = fun env avoid t ty cstr evars -> (* let sigma, (c,_) = Tacinterp.interp_open_constr_with_bindings is env (goalevars evars) c in *) let sigma, c = Constrintern.interp_open_constr (goalevars evars) env c in let unfolded = try Tacred.try_red_product env sigma c with e when Errors.noncritical e -> error "fold: the term is not unfoldable !" in try let sigma = Unification.w_unify env sigma CONV ~flags:Unification.elim_flags unfolded t in let c' = Evarutil.nf_evar sigma c in Some (Some { rew_car = ty; rew_from = t; rew_to = c'; rew_prf = RewCast DEFAULTcast; rew_evars = sigma, cstrevars evars }) with e when Errors.noncritical e -> None let fold_glob c : strategy = fun env avoid t ty cstr evars -> (* let sigma, (c,_) = Tacinterp.interp_open_constr_with_bindings is env (goalevars evars) c in *) let sigma, c = Pretyping.Default.understand_tcc (goalevars evars) env c in let unfolded = try Tacred.try_red_product env sigma c with e when Errors.noncritical e -> error "fold: the term is not unfoldable !" in try let sigma = Unification.w_unify env sigma CONV ~flags:Unification.elim_flags unfolded t in let c' = Evarutil.nf_evar sigma c in Some (Some { rew_car = ty; rew_from = t; rew_to = c'; rew_prf = RewCast DEFAULTcast; rew_evars = sigma, cstrevars evars }) with e when Errors.noncritical e -> None end (** The strategy for a single rewrite, dealing with occurences. *) let rewrite_strat flags occs hyp = let app = apply_rule hyp occs in let rec aux () = Strategies.choice app (subterm true flags (fun env -> aux () env)) in aux () let get_hypinfo_ids {c = opt} = match opt with | None -> [] | Some (is, gc) -> List.map fst is.lfun @ is.avoid_ids let rewrite_with flags c left2right loccs : strategy = fun env avoid t ty cstr evars -> let gevars = goalevars evars in let hypinfo = ref (decompose_applied_relation_expr env gevars flags c left2right) in let avoid = get_hypinfo_ids !hypinfo @ avoid in rewrite_strat default_flags loccs hypinfo env avoid t ty cstr (gevars, cstrevars evars) let apply_strategy (s : strategy) env avoid concl cstr evars = let res = s env avoid concl (Typing.type_of env (goalevars evars) concl) (Option.map snd cstr) evars in match res with | None -> None | Some None -> Some None | Some (Some res) -> Some (Some (res.rew_prf, res.rew_evars, res.rew_car, res.rew_from, res.rew_to)) let merge_evars (goal,cstr) = Evd.merge goal cstr let solve_constraints env evars = Typeclasses.resolve_typeclasses env ~split:false ~fail:true (merge_evars evars) let nf_zeta = Reductionops.clos_norm_flags (Closure.RedFlags.mkflags [Closure.RedFlags.fZETA]) let map_rewprf f = function | RewPrf (rel, prf) -> RewPrf (f rel, f prf) | RewCast c -> RewCast c type result = (evar_map * constr option * types) option option let cl_rewrite_clause_aux ?(abs=None) strat env avoid sigma concl is_hyp : result = let cstr = let sort = mkProp in let impl = Lazy.force impl in match is_hyp with | None -> (sort, inverse sort impl) | Some _ -> (sort, impl) in let evars = (sigma, Evd.empty) in let eq = apply_strategy strat env avoid concl (Some cstr) evars in match eq with | Some (Some (p, evars, car, oldt, newt)) -> let evars' = solve_constraints env evars in let p = map_rewprf (fun p -> nf_zeta env evars' (Evarutil.nf_evar evars' p)) p in let newt = Evarutil.nf_evar evars' newt in let abs = Option.map (fun (x, y) -> Evarutil.nf_evar evars' x, Evarutil.nf_evar evars' y) abs in let evars = (* Keep only original evars (potentially instantiated) and goal evars, the rest has been defined and substituted already. *) (* let cstrs = cstrevars evars in *) (* cstrs is small *) let gevars = goalevars evars in Evd.fold (fun ev evi acc -> if Evd.mem gevars ev then Evd.add acc ev evi else acc) evars' Evd.empty (* Evd.fold (fun ev evi acc -> Evd.remove acc ev) cstrs evars' *) in let res = match is_hyp with | Some id -> (match p with | RewPrf (rel, p) -> let term = match abs with | None -> p | Some (t, ty) -> mkApp (mkLambda (Name (id_of_string "lemma"), ty, p), [| t |]) in Some (evars, Some (mkApp (term, [| mkVar id |])), newt) | RewCast c -> Some (evars, None, newt)) | None -> (match p with | RewPrf (rel, p) -> (match abs with | None -> Some (evars, Some p, newt) | Some (t, ty) -> let proof = mkApp (mkLambda (Name (id_of_string "lemma"), ty, p), [| t |]) in Some (evars, Some proof, newt)) | RewCast c -> Some (evars, None, newt)) in Some res | Some None -> Some None | None -> None let rewrite_refine (evd,c) = Tacmach.refine c let cl_rewrite_clause_tac ?abs strat meta clause gl = let evartac evd = Refiner.tclEVARS evd in let treat res = match res with | None -> tclFAIL 0 (str "Nothing to rewrite") | Some None -> tclFAIL 0 (str"No progress made") | Some (Some (undef, p, newt)) -> let tac = match clause, p with | Some id, Some p -> cut_replacing id newt (Tacmach.refine p) | Some id, None -> change_in_hyp None newt (id, InHypTypeOnly) | None, Some p -> let name = next_name_away_with_default "H" Anonymous (pf_ids_of_hyps gl) in tclTHENLAST (Tacmach.internal_cut_no_check false name newt) (tclTHEN (Tactics.revert [name]) (Tacmach.refine p)) | None, None -> change_in_concl None newt in tclTHEN (evartac undef) tac in let tac = try let concl, is_hyp = match clause with | Some id -> pf_get_hyp_typ gl id, Some id | None -> pf_concl gl, None in let sigma = project gl in let concl = Evarutil.nf_evar sigma concl in let res = cl_rewrite_clause_aux ?abs strat (pf_env gl) [] sigma concl is_hyp in treat res with | Loc.Exc_located (_, TypeClassError (env, (UnsatisfiableConstraints _ as e))) | TypeClassError (env, (UnsatisfiableConstraints _ as e)) -> Refiner.tclFAIL_lazy 0 (lazy (str"Unable to satisfy the rewriting constraints." ++ fnl () ++ Himsg.explain_typeclass_error env e)) in tac gl open Goal open Environ let bind_gl_info f = bind concl (fun c -> bind env (fun v -> bind defs (fun ev -> f c v ev))) let fail l s = raise (Refiner.FailError (l, lazy s)) let new_refine c : Goal.subgoals Goal.sensitive = let refable = Goal.Refinable.make (fun handle -> Goal.Refinable.constr_of_open_constr handle true c) in Goal.bind refable Goal.refine let assert_replacing id newt tac = let sens = bind_gl_info (fun concl env sigma -> let nc' = Environ.fold_named_context (fun _ (n, b, t as decl) nc' -> if n = id then (n, b, newt) :: nc' else decl :: nc') env ~init:[] in let reft = Refinable.make (fun h -> Goal.bind (Refinable.mkEvar h (Environ.reset_with_named_context (val_of_named_context nc') env) concl) (fun ev -> Goal.bind (Refinable.mkEvar h env newt) (fun ev' -> let inst = fold_named_context (fun _ (n, b, t) inst -> if n = id then ev' :: inst else if b = None then mkVar n :: inst else inst) env ~init:[] in let (e, args) = destEvar ev in Goal.return (mkEvar (e, Array.of_list inst))))) in Goal.bind reft Goal.refine) in Proofview.tclTHEN (Proofview.tclSENSITIVE sens) (Proofview.tclFOCUS 2 2 tac) let newfail n s = Proofview.tclZERO (Refiner.FailError (n, lazy s)) let cl_rewrite_clause_newtac ?abs strat clause = let treat (res, is_hyp) = match res with | None -> newfail 0 (str "Nothing to rewrite") | Some None -> newfail 0 (str"No progress made") | Some (Some res) -> match is_hyp, res with | Some id, (undef, Some p, newt) -> assert_replacing id newt (Proofview.tclSENSITIVE (new_refine (undef, p))) | Some id, (undef, None, newt) -> Proofview.tclSENSITIVE (Goal.convert_hyp false (id, None, newt)) | None, (undef, Some p, newt) -> let refable = Goal.Refinable.make (fun handle -> Goal.bind env (fun env -> Goal.bind (Refinable.mkEvar handle env newt) (fun ev -> Goal.Refinable.constr_of_open_constr handle true (undef, mkApp (p, [| ev |]))))) in Proofview.tclSENSITIVE (Goal.bind refable Goal.refine) | None, (undef, None, newt) -> Proofview.tclSENSITIVE (Goal.convert_concl false newt) in let info = bind_gl_info (fun concl env sigma -> let ty, is_hyp = match clause with | Some id -> Environ.named_type id env, Some id | None -> concl, None in let res = try cl_rewrite_clause_aux ?abs strat env [] sigma ty is_hyp with | Loc.Exc_located (_, TypeClassError (env, (UnsatisfiableConstraints _ as e))) | TypeClassError (env, (UnsatisfiableConstraints _ as e)) -> fail 0 (str"setoid rewrite failed: unable to satisfy the rewriting constraints." ++ fnl () ++ Himsg.explain_typeclass_error env e) in return (res, is_hyp)) in Proofview.tclGOALBINDU info (fun i -> treat i) let cl_rewrite_clause_new_strat ?abs strat clause = init_setoid (); cl_rewrite_clause_newtac ?abs strat clause let cl_rewrite_clause_newtac' l left2right occs clause = Proof_global.run_tactic (Proofview.tclFOCUS 1 1 (cl_rewrite_clause_new_strat (rewrite_with rewrite_unif_flags l left2right occs) clause)) let tactic_init_setoid () = init_setoid (); tclIDTAC let cl_rewrite_clause_strat strat clause = tclTHEN (tactic_init_setoid ()) (fun gl -> let meta = Evarutil.new_meta() in try cl_rewrite_clause_tac strat (mkMeta meta) clause gl with | Refiner.FailError (n, pp) -> tclFAIL n (str"setoid rewrite failed: " ++ Lazy.force pp) gl) let cl_rewrite_clause l left2right occs clause gl = cl_rewrite_clause_strat (rewrite_with (general_rewrite_unif_flags ()) l left2right occs) clause gl open Pp open Pcoq open Names open Tacexpr open Tacinterp open Termops open Genarg open Extraargs let occurrences_of = function | n::_ as nl when n < 0 -> (false,List.map abs nl) | nl -> if List.exists (fun n -> n < 0) nl then error "Illegal negative occurrence number."; (true,nl) let apply_constr_expr c l2r occs = fun env avoid t ty cstr evars -> let evd, c = Constrintern.interp_open_constr (goalevars evars) env c in apply_lemma (general_rewrite_unif_flags ()) (evd, (c, NoBindings)) l2r occs env avoid t ty cstr (evd, cstrevars evars) let apply_glob_constr c l2r occs = fun env avoid t ty cstr evars -> let evd, c = (Pretyping.Default.understand_tcc (goalevars evars) env c) in apply_lemma (general_rewrite_unif_flags ()) (evd, (c, NoBindings)) l2r occs env avoid t ty cstr (evd, cstrevars evars) let interp_constr_list env sigma = List.map (fun c -> let evd, c = Constrintern.interp_open_constr sigma env c in (evd, (c, NoBindings)), true) let interp_glob_constr_list env sigma = List.map (fun c -> let evd, c = Pretyping.Default.understand_tcc sigma env c in (evd, (c, NoBindings)), true) open Pcoq (* Syntax for rewriting with strategies *) type constr_expr_with_bindings = constr_expr with_bindings type glob_constr_with_bindings = glob_constr_and_expr with_bindings type glob_constr_with_bindings_sign = interp_sign * glob_constr_and_expr with_bindings let pr_glob_constr_with_bindings_sign _ _ _ (ge : glob_constr_with_bindings_sign) = Printer.pr_glob_constr (fst (fst (snd ge))) let pr_glob_constr_with_bindings _ _ _ (ge : glob_constr_with_bindings) = Printer.pr_glob_constr (fst (fst ge)) let pr_constr_expr_with_bindings prc _ _ (ge : constr_expr_with_bindings) = prc (fst ge) let interp_glob_constr_with_bindings ist gl c = Tacmach.project gl , (ist, c) let glob_glob_constr_with_bindings ist l = Tacinterp.intern_constr_with_bindings ist l let subst_glob_constr_with_bindings s c = subst_glob_with_bindings s c ARGUMENT EXTEND glob_constr_with_bindings PRINTED BY pr_glob_constr_with_bindings_sign INTERPRETED BY interp_glob_constr_with_bindings GLOBALIZED BY glob_glob_constr_with_bindings SUBSTITUTED BY subst_glob_constr_with_bindings RAW_TYPED AS constr_expr_with_bindings RAW_PRINTED BY pr_constr_expr_with_bindings GLOB_TYPED AS glob_constr_with_bindings GLOB_PRINTED BY pr_glob_constr_with_bindings [ constr_with_bindings(bl) ] -> [ bl ] END type ('constr,'redexpr) strategy_ast = | StratId | StratFail | StratRefl | StratUnary of string * ('constr,'redexpr) strategy_ast | StratBinary of string * ('constr,'redexpr) strategy_ast * ('constr,'redexpr) strategy_ast | StratConstr of 'constr * bool | StratTerms of 'constr list | StratHints of bool * string | StratEval of 'redexpr | StratFold of 'constr let rec map_strategy (f : 'a -> 'a2) (g : 'b -> 'b2) : ('a,'b) strategy_ast -> ('a2,'b2) strategy_ast = function | StratId | StratFail | StratRefl as s -> s | StratUnary (s, str) -> StratUnary (s, map_strategy f g str) | StratBinary (s, str, str') -> StratBinary (s, map_strategy f g str, map_strategy f g str') | StratConstr (c, b) -> StratConstr (f c, b) | StratTerms l -> StratTerms (List.map f l) | StratHints (b, id) -> StratHints (b, id) | StratEval r -> StratEval (g r) | StratFold c -> StratFold (f c) let rec strategy_of_ast = function | StratId -> Strategies.id | StratFail -> Strategies.fail | StratRefl -> Strategies.refl | StratUnary (f, s) -> let s' = strategy_of_ast s in let f' = match f with | "subterms" -> all_subterms | "subterm" -> one_subterm | "innermost" -> Strategies.innermost | "outermost" -> Strategies.outermost | "bottomup" -> Strategies.bu | "topdown" -> Strategies.td | "progress" -> Strategies.progress | "try" -> Strategies.try_ | "any" -> Strategies.any | "repeat" -> Strategies.repeat | _ -> anomalylabstrm "strategy_of_ast" (str"Unkwnon strategy: " ++ str f) in f' s' | StratBinary (f, s, t) -> let s' = strategy_of_ast s in let t' = strategy_of_ast t in let f' = match f with | "compose" -> Strategies.seq | "choice" -> Strategies.choice | _ -> anomalylabstrm "strategy_of_ast" (str"Unkwnon strategy: " ++ str f) in f' s' t' | StratConstr (c, b) -> apply_glob_constr (fst c) b all_occurrences | StratHints (old, id) -> if old then Strategies.old_hints id else Strategies.hints id | StratTerms l -> (fun env avoid t ty cstr evars -> let l' = interp_glob_constr_list env (goalevars evars) (List.map fst l) in Strategies.lemmas rewrite_unif_flags l' env avoid t ty cstr evars) | StratEval r -> (fun env avoid t ty cstr evars -> let (sigma,r_interp) = Tacinterp.interp_redexp env (goalevars evars) r in Strategies.reduce r_interp env avoid t ty cstr (sigma,cstrevars evars)) | StratFold c -> Strategies.fold_glob (fst c) type raw_strategy = (constr_expr, Tacexpr.raw_red_expr) strategy_ast type glob_strategy = (glob_constr_and_expr, Tacexpr.raw_red_expr) strategy_ast let interp_strategy ist gl s = let sigma = project gl in sigma, strategy_of_ast s let glob_strategy ist s = map_strategy (Tacinterp.intern_constr ist) (fun c -> c) s let subst_strategy s str = str let pr_strategy _ _ _ (s : strategy) = Pp.str "" let pr_raw_strategy _ _ _ (s : raw_strategy) = Pp.str "" let pr_glob_strategy _ _ _ (s : glob_strategy) = Pp.str "" ARGUMENT EXTEND rewstrategy PRINTED BY pr_strategy INTERPRETED BY interp_strategy GLOBALIZED BY glob_strategy SUBSTITUTED BY subst_strategy RAW_TYPED AS raw_strategy RAW_PRINTED BY pr_raw_strategy GLOB_TYPED AS glob_strategy GLOB_PRINTED BY pr_glob_strategy [ glob(c) ] -> [ StratConstr (c, true) ] | [ "<-" constr(c) ] -> [ StratConstr (c, false) ] | [ "subterms" rewstrategy(h) ] -> [ StratUnary ("all_subterms", h) ] | [ "subterm" rewstrategy(h) ] -> [ StratUnary ("one_subterm", h) ] | [ "innermost" rewstrategy(h) ] -> [ StratUnary("innermost", h) ] | [ "outermost" rewstrategy(h) ] -> [ StratUnary("outermost", h) ] | [ "bottomup" rewstrategy(h) ] -> [ StratUnary("bottomup", h) ] | [ "topdown" rewstrategy(h) ] -> [ StratUnary("topdown", h) ] | [ "id" ] -> [ StratId ] | [ "fail" ] -> [ StratFail ] | [ "refl" ] -> [ StratRefl ] | [ "progress" rewstrategy(h) ] -> [ StratUnary ("progress", h) ] | [ "try" rewstrategy(h) ] -> [ StratUnary ("try", h) ] | [ "any" rewstrategy(h) ] -> [ StratUnary ("any", h) ] | [ "repeat" rewstrategy(h) ] -> [ StratUnary ("repeat", h) ] | [ rewstrategy(h) ";" rewstrategy(h') ] -> [ StratBinary ("compose", h, h') ] | [ "(" rewstrategy(h) ")" ] -> [ h ] | [ "choice" rewstrategy(h) rewstrategy(h') ] -> [ StratBinary ("choice", h, h') ] | [ "old_hints" preident(h) ] -> [ StratHints (true, h) ] | [ "hints" preident(h) ] -> [ StratHints (false, h) ] | [ "terms" constr_list(h) ] -> [ StratTerms h ] | [ "eval" red_expr(r) ] -> [ StratEval r ] | [ "fold" constr(c) ] -> [ StratFold c ] END (* By default the strategy for "rewrite_db" is top-down *) let db_strat db = Strategies.td (Strategies.hints db) let cl_rewrite_clause_db db cl = cl_rewrite_clause_strat (db_strat db) cl TACTIC EXTEND rewrite_strat | [ "rewrite_strat" rewstrategy(s) "in" hyp(id) ] -> [ cl_rewrite_clause_strat s (Some id) ] | [ "rewrite_strat" rewstrategy(s) ] -> [ cl_rewrite_clause_strat s None ] | [ "rewrite_db" preident(db) "in" hyp(id) ] -> [ cl_rewrite_clause_db db (Some id) ] | [ "rewrite_db" preident(db) ] -> [ cl_rewrite_clause_db db None ] END let clsubstitute o c = let is_tac id = match fst (fst (snd c)) with GVar (_, id') when id' = id -> true | _ -> false in Tacticals.onAllHypsAndConcl (fun cl -> match cl with | Some id when is_tac id -> tclIDTAC | _ -> cl_rewrite_clause c o all_occurrences cl) open Extraargs TACTIC EXTEND substitute | [ "substitute" orient(o) glob_constr_with_bindings(c) ] -> [ clsubstitute o c ] END (* Compatibility with old Setoids *) TACTIC EXTEND setoid_rewrite [ "setoid_rewrite" orient(o) glob_constr_with_bindings(c) ] -> [ cl_rewrite_clause c o all_occurrences None ] | [ "setoid_rewrite" orient(o) glob_constr_with_bindings(c) "in" hyp(id) ] -> [ cl_rewrite_clause c o all_occurrences (Some id)] | [ "setoid_rewrite" orient(o) glob_constr_with_bindings(c) "at" occurrences(occ) ] -> [ cl_rewrite_clause c o (occurrences_of occ) None] | [ "setoid_rewrite" orient(o) glob_constr_with_bindings(c) "at" occurrences(occ) "in" hyp(id)] -> [ cl_rewrite_clause c o (occurrences_of occ) (Some id)] | [ "setoid_rewrite" orient(o) glob_constr_with_bindings(c) "in" hyp(id) "at" occurrences(occ)] -> [ cl_rewrite_clause c o (occurrences_of occ) (Some id)] END let cl_rewrite_clause_newtac_tac c o occ cl gl = cl_rewrite_clause_newtac' c o occ cl; tclIDTAC gl TACTIC EXTEND GenRew | [ "rew" orient(o) glob_constr_with_bindings(c) "in" hyp(id) "at" occurrences(occ) ] -> [ cl_rewrite_clause_newtac_tac c o (occurrences_of occ) (Some id) ] | [ "rew" orient(o) glob_constr_with_bindings(c) "at" occurrences(occ) "in" hyp(id) ] -> [ cl_rewrite_clause_newtac_tac c o (occurrences_of occ) (Some id) ] | [ "rew" orient(o) glob_constr_with_bindings(c) "in" hyp(id) ] -> [ cl_rewrite_clause_newtac_tac c o all_occurrences (Some id) ] | [ "rew" orient(o) glob_constr_with_bindings(c) "at" occurrences(occ) ] -> [ cl_rewrite_clause_newtac_tac c o (occurrences_of occ) None ] | [ "rew" orient(o) glob_constr_with_bindings(c) ] -> [ cl_rewrite_clause_newtac_tac c o all_occurrences None ] END let mkappc s l = CAppExpl (dummy_loc,(None,(Libnames.Ident (dummy_loc,id_of_string s))),l) let declare_an_instance n s args = ((dummy_loc,Name n), Explicit, CAppExpl (dummy_loc, (None, Qualid (dummy_loc, qualid_of_string s)), args)) let declare_instance a aeq n s = declare_an_instance n s [a;aeq] let anew_instance global binders instance fields = new_instance binders instance (Some (CRecord (dummy_loc,None,fields))) ~global:(not (Vernacexpr.use_section_locality ())) ~generalize:false None let declare_instance_refl global binders a aeq n lemma = let instance = declare_instance a aeq (add_suffix n "_Reflexive") "Coq.Classes.RelationClasses.Reflexive" in anew_instance global binders instance [(Ident (dummy_loc,id_of_string "reflexivity"),lemma)] let declare_instance_sym global binders a aeq n lemma = let instance = declare_instance a aeq (add_suffix n "_Symmetric") "Coq.Classes.RelationClasses.Symmetric" in anew_instance global binders instance [(Ident (dummy_loc,id_of_string "symmetry"),lemma)] let declare_instance_trans global binders a aeq n lemma = let instance = declare_instance a aeq (add_suffix n "_Transitive") "Coq.Classes.RelationClasses.Transitive" in anew_instance global binders instance [(Ident (dummy_loc,id_of_string "transitivity"),lemma)] let declare_relation ?(binders=[]) a aeq n refl symm trans = init_setoid (); let global = not (Vernacexpr.use_section_locality ()) in let instance = declare_instance a aeq (add_suffix n "_relation") "Coq.Classes.RelationClasses.RewriteRelation" in ignore(anew_instance global binders instance []); match (refl,symm,trans) with (None, None, None) -> () | (Some lemma1, None, None) -> ignore (declare_instance_refl global binders a aeq n lemma1) | (None, Some lemma2, None) -> ignore (declare_instance_sym global binders a aeq n lemma2) | (None, None, Some lemma3) -> ignore (declare_instance_trans global binders a aeq n lemma3) | (Some lemma1, Some lemma2, None) -> ignore (declare_instance_refl global binders a aeq n lemma1); ignore (declare_instance_sym global binders a aeq n lemma2) | (Some lemma1, None, Some lemma3) -> let _lemma_refl = declare_instance_refl global binders a aeq n lemma1 in let _lemma_trans = declare_instance_trans global binders a aeq n lemma3 in let instance = declare_instance a aeq n "Coq.Classes.RelationClasses.PreOrder" in ignore( anew_instance global binders instance [(Ident (dummy_loc,id_of_string "PreOrder_Reflexive"), lemma1); (Ident (dummy_loc,id_of_string "PreOrder_Transitive"),lemma3)]) | (None, Some lemma2, Some lemma3) -> let _lemma_sym = declare_instance_sym global binders a aeq n lemma2 in let _lemma_trans = declare_instance_trans global binders a aeq n lemma3 in let instance = declare_instance a aeq n "Coq.Classes.RelationClasses.PER" in ignore( anew_instance global binders instance [(Ident (dummy_loc,id_of_string "PER_Symmetric"), lemma2); (Ident (dummy_loc,id_of_string "PER_Transitive"),lemma3)]) | (Some lemma1, Some lemma2, Some lemma3) -> let _lemma_refl = declare_instance_refl global binders a aeq n lemma1 in let _lemma_sym = declare_instance_sym global binders a aeq n lemma2 in let _lemma_trans = declare_instance_trans global binders a aeq n lemma3 in let instance = declare_instance a aeq n "Coq.Classes.RelationClasses.Equivalence" in ignore( anew_instance global binders instance [(Ident (dummy_loc,id_of_string "Equivalence_Reflexive"), lemma1); (Ident (dummy_loc,id_of_string "Equivalence_Symmetric"), lemma2); (Ident (dummy_loc,id_of_string "Equivalence_Transitive"), lemma3)]) type 'a binders_argtype = (local_binder list, 'a) Genarg.abstract_argument_type let _, _, rawwit_binders = (Genarg.create_arg None "binders" : Genarg.tlevel binders_argtype * Genarg.glevel binders_argtype * Genarg.rlevel binders_argtype) open Pcoq.Constr VERNAC COMMAND EXTEND AddRelation | [ "Add" "Relation" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "symmetry" "proved" "by" constr(lemma2) "as" ident(n) ] -> [ declare_relation a aeq n (Some lemma1) (Some lemma2) None ] | [ "Add" "Relation" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "as" ident(n) ] -> [ declare_relation a aeq n (Some lemma1) None None ] | [ "Add" "Relation" constr(a) constr(aeq) "as" ident(n) ] -> [ declare_relation a aeq n None None None ] END VERNAC COMMAND EXTEND AddRelation2 [ "Add" "Relation" constr(a) constr(aeq) "symmetry" "proved" "by" constr(lemma2) "as" ident(n) ] -> [ declare_relation a aeq n None (Some lemma2) None ] | [ "Add" "Relation" constr(a) constr(aeq) "symmetry" "proved" "by" constr(lemma2) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation a aeq n None (Some lemma2) (Some lemma3) ] END VERNAC COMMAND EXTEND AddRelation3 [ "Add" "Relation" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation a aeq n (Some lemma1) None (Some lemma3) ] | [ "Add" "Relation" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "symmetry" "proved" "by" constr(lemma2) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation a aeq n (Some lemma1) (Some lemma2) (Some lemma3) ] | [ "Add" "Relation" constr(a) constr(aeq) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation a aeq n None None (Some lemma3) ] END VERNAC COMMAND EXTEND AddParametricRelation | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "symmetry" "proved" "by" constr(lemma2) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n (Some lemma1) (Some lemma2) None ] | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n (Some lemma1) None None ] | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n None None None ] END VERNAC COMMAND EXTEND AddParametricRelation2 [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "symmetry" "proved" "by" constr(lemma2) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n None (Some lemma2) None ] | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "symmetry" "proved" "by" constr(lemma2) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n None (Some lemma2) (Some lemma3) ] END VERNAC COMMAND EXTEND AddParametricRelation3 [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n (Some lemma1) None (Some lemma3) ] | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "reflexivity" "proved" "by" constr(lemma1) "symmetry" "proved" "by" constr(lemma2) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n (Some lemma1) (Some lemma2) (Some lemma3) ] | [ "Add" "Parametric" "Relation" binders(b) ":" constr(a) constr(aeq) "transitivity" "proved" "by" constr(lemma3) "as" ident(n) ] -> [ declare_relation ~binders:b a aeq n None None (Some lemma3) ] END let cHole = CHole (dummy_loc, None) open Entries open Libnames let proper_projection r ty = let ctx, inst = decompose_prod_assum ty in let mor, args = destApp inst in let instarg = mkApp (r, rel_vect 0 (List.length ctx)) in let app = mkApp (Lazy.force proper_proj, Array.append args [| instarg |]) in it_mkLambda_or_LetIn app ctx let declare_projection n instance_id r = let ty = Global.type_of_global r in let c = constr_of_global r in let term = proper_projection c ty in let typ = Typing.type_of (Global.env ()) Evd.empty term in let ctx, typ = decompose_prod_assum typ in let typ = let n = let rec aux t = match kind_of_term t with App (f, [| a ; a' ; rel; rel' |]) when eq_constr f (Lazy.force respectful) -> succ (aux rel') | _ -> 0 in let init = match kind_of_term typ with App (f, args) when eq_constr f (Lazy.force respectful) -> mkApp (f, fst (array_chop (Array.length args - 2) args)) | _ -> typ in aux init in let ctx,ccl = Reductionops.splay_prod_n (Global.env()) Evd.empty (3 * n) typ in it_mkProd_or_LetIn ccl ctx in let typ = it_mkProd_or_LetIn typ ctx in let cst = { const_entry_body = term; const_entry_secctx = None; const_entry_type = Some typ; const_entry_opaque = false } in ignore(Declare.declare_constant n (Entries.DefinitionEntry cst, Decl_kinds.IsDefinition Decl_kinds.Definition)) let build_morphism_signature m = let env = Global.env () in let m = Constrintern.interp_constr Evd.empty env m in let t = Typing.type_of env Evd.empty m in let isevars = ref (Evd.empty, Evd.empty) in let cstrs = let rec aux t = match kind_of_term t with | Prod (na, a, b) -> None :: aux b | _ -> [] in aux t in let evars, t', sig_, cstrs = build_signature !isevars env t cstrs None in let _ = isevars := evars in let _ = List.iter (fun (ty, rel) -> Option.iter (fun rel -> let default = mkApp (Lazy.force default_relation, [| ty; rel |]) in let evars,c = new_cstr_evar !isevars env default in isevars := evars) rel) cstrs in let morph = mkApp (Lazy.force proper_type, [| t; sig_; m |]) in let evd = solve_constraints env !isevars in let m = Evarutil.nf_evar evd morph in Evarutil.check_evars env Evd.empty evd m; m let default_morphism sign m = let env = Global.env () in let t = Typing.type_of env Evd.empty m in let evars, _, sign, cstrs = build_signature (Evd.empty,Evd.empty) env t (fst sign) (snd sign) in let morph = mkApp (Lazy.force proper_type, [| t; sign; m |]) in let evars, mor = resolve_one_typeclass env (merge_evars evars) morph in mor, proper_projection mor morph let add_setoid global binders a aeq t n = init_setoid (); let _lemma_refl = declare_instance_refl global binders a aeq n (mkappc "Seq_refl" [a;aeq;t]) in let _lemma_sym = declare_instance_sym global binders a aeq n (mkappc "Seq_sym" [a;aeq;t]) in let _lemma_trans = declare_instance_trans global binders a aeq n (mkappc "Seq_trans" [a;aeq;t]) in let instance = declare_instance a aeq n "Coq.Classes.RelationClasses.Equivalence" in ignore( anew_instance global binders instance [(Ident (dummy_loc,id_of_string "Equivalence_Reflexive"), mkappc "Seq_refl" [a;aeq;t]); (Ident (dummy_loc,id_of_string "Equivalence_Symmetric"), mkappc "Seq_sym" [a;aeq;t]); (Ident (dummy_loc,id_of_string "Equivalence_Transitive"), mkappc "Seq_trans" [a;aeq;t])]) let add_morphism_infer glob m n = init_setoid (); let instance_id = add_suffix n "_Proper" in let instance = build_morphism_signature m in if Lib.is_modtype () then let cst = Declare.declare_constant ~internal:Declare.KernelSilent instance_id (Entries.ParameterEntry (None,instance,None), Decl_kinds.IsAssumption Decl_kinds.Logical) in add_instance (Typeclasses.new_instance (Lazy.force proper_class) None glob (ConstRef cst)); declare_projection n instance_id (ConstRef cst) else let kind = Decl_kinds.Global, Decl_kinds.DefinitionBody Decl_kinds.Instance in Flags.silently (fun () -> Lemmas.start_proof instance_id kind instance (fun _ -> function Libnames.ConstRef cst -> add_instance (Typeclasses.new_instance (Lazy.force proper_class) None glob (ConstRef cst)); declare_projection n instance_id (ConstRef cst) | _ -> assert false); Pfedit.by (Tacinterp.interp <:tactic< Coq.Classes.SetoidTactics.add_morphism_tactic>>)) (); Flags.if_verbose (fun x -> msg (Printer.pr_open_subgoals x)) () let add_morphism glob binders m s n = init_setoid (); let instance_id = add_suffix n "_Proper" in let instance = ((dummy_loc,Name instance_id), Explicit, CAppExpl (dummy_loc, (None, Qualid (dummy_loc, Libnames.qualid_of_string "Coq.Classes.Morphisms.Proper")), [cHole; s; m])) in let tac = Tacinterp.interp <:tactic> in ignore(new_instance ~global:glob binders instance (Some (CRecord (dummy_loc,None,[]))) ~generalize:false ~tac ~hook:(declare_projection n instance_id) None) VERNAC COMMAND EXTEND AddSetoid1 [ "Add" "Setoid" constr(a) constr(aeq) constr(t) "as" ident(n) ] -> [ add_setoid (not (Vernacexpr.use_section_locality ())) [] a aeq t n ] | [ "Add" "Parametric" "Setoid" binders(binders) ":" constr(a) constr(aeq) constr(t) "as" ident(n) ] -> [ add_setoid (not (Vernacexpr.use_section_locality ())) binders a aeq t n ] | [ "Add" "Morphism" constr(m) ":" ident(n) ] -> [ add_morphism_infer (not (Vernacexpr.use_section_locality ())) m n ] | [ "Add" "Morphism" constr(m) "with" "signature" lconstr(s) "as" ident(n) ] -> [ add_morphism (not (Vernacexpr.use_section_locality ())) [] m s n ] | [ "Add" "Parametric" "Morphism" binders(binders) ":" constr(m) "with" "signature" lconstr(s) "as" ident(n) ] -> [ add_morphism (not (Vernacexpr.use_section_locality ())) binders m s n ] END (** Bind to "rewrite" too *) (** Taken from original setoid_replace, to emulate the old rewrite semantics where lemmas are first instantiated and then rewrite proceeds. *) let check_evar_map_of_evars_defs evd = let metas = Evd.meta_list evd in let check_freemetas_is_empty rebus = Evd.Metaset.iter (fun m -> if Evd.meta_defined evd m then () else raise (Logic.RefinerError (Logic.UnresolvedBindings [Evd.meta_name evd m]))) in List.iter (fun (_,binding) -> match binding with Evd.Cltyp (_,{Evd.rebus=rebus; Evd.freemetas=freemetas}) -> check_freemetas_is_empty rebus freemetas | Evd.Clval (_,({Evd.rebus=rebus1; Evd.freemetas=freemetas1},_), {Evd.rebus=rebus2; Evd.freemetas=freemetas2}) -> check_freemetas_is_empty rebus1 freemetas1 ; check_freemetas_is_empty rebus2 freemetas2 ) metas let unification_rewrite flags l2r c1 c2 cl car rel but gl = let env = pf_env gl in let (evd',c') = try (* ~flags:(false,true) to allow to mark occurrences that must not be rewritten simply by replacing them with let-defined definitions in the context *) Unification.w_unify_to_subterm ~flags:rewrite_unif_flags env cl.evd ((if l2r then c1 else c2),but) with Pretype_errors.PretypeError _ -> (* ~flags:(true,true) to make Ring work (since it really exploits conversion) *) Unification.w_unify_to_subterm ~flags:flags env cl.evd ((if l2r then c1 else c2),but) in let evd' = Typeclasses.resolve_typeclasses ~fail:false env evd' in let cl' = {cl with evd = evd'} in let cl' = Clenvtac.clenv_pose_dependent_evars true cl' in let nf c = Evarutil.nf_evar cl'.evd (Clenv.clenv_nf_meta cl' c) in let c1 = if l2r then nf c' else nf c1 and c2 = if l2r then nf c2 else nf c' and car = nf car and rel = nf rel in check_evar_map_of_evars_defs cl'.evd; let prf = nf (Clenv.clenv_value cl') and prfty = nf (Clenv.clenv_type cl') in let cl' = { cl' with templval = mk_freelisted prf ; templtyp = mk_freelisted prfty } in {cl=cl'; prf=(mkRel 1); car=car; rel=rel; l2r=l2r; c1=c1; c2=c2; c=None; abs=Some (prf, prfty); flags = flags} let get_hyp gl evars (c,l) clause l2r = let flags = rewrite2_unif_flags in let hi = decompose_applied_relation (pf_env gl) evars flags None (c,l) l2r in let but = match clause with | Some id -> pf_get_hyp_typ gl id | None -> Evarutil.nf_evar evars (pf_concl gl) in { unification_rewrite flags hi.l2r hi.c1 hi.c2 hi.cl hi.car hi.rel but gl with flags = rewrite_unif_flags } let general_rewrite_flags = { under_lambdas = false; on_morphisms = true } let apply_lemma gl (c,l) cl l2r occs = let sigma = project gl in let hypinfo = ref (get_hyp gl sigma (c,l) cl l2r) in let app = apply_rule hypinfo occs in let rec aux () = Strategies.choice app (subterm true general_rewrite_flags (fun env -> aux () env)) in !hypinfo, aux () let general_s_rewrite cl l2r occs (c,l) ~new_goals gl = let meta = Evarutil.new_meta() in let hypinfo, strat = apply_lemma gl (c,l) cl l2r occs in tclWEAK_PROGRESS (tclTHEN (Refiner.tclEVARS hypinfo.cl.evd) (cl_rewrite_clause_tac ~abs:hypinfo.abs strat (mkMeta meta) cl)) gl let general_s_rewrite_clause x = init_setoid (); match x with | None -> general_s_rewrite None | Some id -> general_s_rewrite (Some id) let _ = Equality.register_general_rewrite_clause general_s_rewrite_clause (** [setoid_]{reflexivity,symmetry,transitivity} tactics *) let not_declared env ty rel = tclFAIL 0 (str" The relation " ++ Printer.pr_constr_env env rel ++ str" is not a declared " ++ str ty ++ str" relation. Maybe you need to require the Setoid library") let setoid_proof gl ty fn fallback = let env = pf_env gl in try let rel, args = decompose_app_rel env (project gl) (pf_concl gl) in let evm = project gl in let car = pi3 (List.hd (fst (Reduction.dest_prod env (Typing.type_of env evm rel)))) in fn env evm car rel gl with e when Errors.noncritical e -> try fallback gl with Hipattern.NoEquationFound -> match e with | Not_found -> let rel, args = decompose_app_rel env (project gl) (pf_concl gl) in not_declared env ty rel gl | _ -> raise e let setoid_reflexivity gl = setoid_proof gl "reflexive" (fun env evm car rel -> apply (get_reflexive_proof env evm car rel)) (reflexivity_red true) let setoid_symmetry gl = setoid_proof gl "symmetric" (fun env evm car rel -> apply (get_symmetric_proof env evm car rel)) (symmetry_red true) let setoid_transitivity c gl = setoid_proof gl "transitive" (fun env evm car rel -> let proof = get_transitive_proof env evm car rel in match c with | None -> eapply proof | Some c -> apply_with_bindings (proof,Glob_term.ImplicitBindings [ c ])) (transitivity_red true c) let setoid_symmetry_in id gl = let ctype = pf_type_of gl (mkVar id) in let binders,concl = decompose_prod_assum ctype in let (equiv, args) = decompose_app concl in let rec split_last_two = function | [c1;c2] -> [],(c1, c2) | x::y::z -> let l,res = split_last_two (y::z) in x::l, res | _ -> error "The term provided is not an equivalence." in let others,(c1,c2) = split_last_two args in let he,c1,c2 = mkApp (equiv, Array.of_list others),c1,c2 in let new_hyp' = mkApp (he, [| c2 ; c1 |]) in let new_hyp = it_mkProd_or_LetIn new_hyp' binders in tclTHENS (Tactics.cut new_hyp) [ intro_replacing id; tclTHENLIST [ intros; setoid_symmetry; apply (mkVar id); Tactics.assumption ] ] gl let _ = Tactics.register_setoid_reflexivity setoid_reflexivity let _ = Tactics.register_setoid_symmetry setoid_symmetry let _ = Tactics.register_setoid_symmetry_in setoid_symmetry_in let _ = Tactics.register_setoid_transitivity setoid_transitivity TACTIC EXTEND setoid_symmetry [ "setoid_symmetry" ] -> [ setoid_symmetry ] | [ "setoid_symmetry" "in" hyp(n) ] -> [ setoid_symmetry_in n ] END TACTIC EXTEND setoid_reflexivity [ "setoid_reflexivity" ] -> [ setoid_reflexivity ] END TACTIC EXTEND setoid_transitivity [ "setoid_transitivity" constr(t) ] -> [ setoid_transitivity (Some t) ] | [ "setoid_etransitivity" ] -> [ setoid_transitivity None ] END let implify id gl = let (_, b, ctype) = pf_get_hyp gl id in let binders,concl = decompose_prod_assum ctype in let ctype' = match binders with | (_, None, ty as hd) :: tl when noccurn 1 concl -> let env = Environ.push_rel_context tl (pf_env gl) in let sigma = project gl in let tyhd = Typing.type_of env sigma ty and tyconcl = Typing.type_of (Environ.push_rel hd env) sigma concl in let app, unfold = arrow_morphism tyhd (subst1 mkProp tyconcl) ty (subst1 mkProp concl) in it_mkProd_or_LetIn app tl | _ -> ctype in convert_hyp_no_check (id, b, ctype') gl TACTIC EXTEND implify [ "implify" hyp(n) ] -> [ implify n ] END let rec fold_matches env sigma c = map_constr_with_full_binders Environ.push_rel (fun env c -> match kind_of_term c with | Case _ -> let cst, env, c' = fold_match ~force:true env sigma c in fold_matches env sigma c' | _ -> fold_matches env sigma c) env c TACTIC EXTEND fold_match [ "fold_match" constr(c) ] -> [ fun gl -> let _, _, c' = fold_match ~force:true (pf_env gl) (project gl) c in change (Some (snd (pattern_of_constr (project gl) c))) c' onConcl gl ] END TACTIC EXTEND fold_matches | [ "fold_matches" constr(c) ] -> [ fun gl -> let c' = fold_matches (pf_env gl) (project gl) c in change (Some (snd (pattern_of_constr (project gl) c))) c' onConcl gl ] END TACTIC EXTEND myapply | [ "myapply" global(id) constr_list(l) ] -> [ fun gl -> let gr = id in let _, impls = List.hd (Impargs.implicits_of_global gr) in let ty = Global.type_of_global gr in let env = pf_env gl in let evars = ref (project gl) in let app = let rec aux ty impls args args' = match impls, kind_of_term ty with | Some (_, _, (_, _)) :: impls, Prod (n, t, t') -> let arg = Evarutil.e_new_evar evars env t in aux (subst1 arg t') impls args (arg :: args') | None :: impls, Prod (n, t, t') -> (match args with | [] -> if dependent (mkRel 1) t' then let arg = Evarutil.e_new_evar evars env t in aux (subst1 arg t') impls args (arg :: args') else let arg = Evarutil.mk_new_meta () in evars := meta_declare (destMeta arg) t !evars; aux (subst1 arg t') impls args (arg :: args') | arg :: args -> aux (subst1 arg t') impls args (arg :: args')) | _, _ -> mkApp (constr_of_global gr, Array.of_list (List.rev args')) in aux ty impls l [] in tclTHEN (Refiner.tclEVARS !evars) (apply app) gl ] END coq-8.4pl2/tactics/equality.mli0000640000175000001440000001242112010532755015621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* occurrences -> freeze_evars_flag -> dep_proof_flag -> ?tac:(tactic * conditions) -> constr with_bindings -> evars_flag -> tactic val general_rewrite : orientation -> occurrences -> freeze_evars_flag -> dep_proof_flag -> ?tac:(tactic * conditions) -> constr -> tactic (* Equivalent to [general_rewrite l2r] *) val rewriteLR : ?tac:(tactic * conditions) -> constr -> tactic val rewriteRL : ?tac:(tactic * conditions) -> constr -> tactic (* Warning: old [general_rewrite_in] is now [general_rewrite_bindings_in] *) val register_general_rewrite_clause : (identifier option -> orientation -> occurrences -> constr with_bindings -> new_goals:constr list -> tactic) -> unit val register_is_applied_rewrite_relation : (env -> evar_map -> rel_context -> constr -> constr option) -> unit val general_rewrite_ebindings_clause : identifier option -> orientation -> occurrences -> freeze_evars_flag -> dep_proof_flag -> ?tac:(tactic * conditions) -> constr with_bindings -> evars_flag -> tactic val general_rewrite_bindings_in : orientation -> occurrences -> freeze_evars_flag -> dep_proof_flag -> ?tac:(tactic * conditions) -> identifier -> constr with_bindings -> evars_flag -> tactic val general_rewrite_in : orientation -> occurrences -> freeze_evars_flag -> dep_proof_flag -> ?tac:(tactic * conditions) -> identifier -> constr -> evars_flag -> tactic val general_multi_rewrite : orientation -> evars_flag -> ?tac:(tactic * conditions) -> constr with_bindings -> clause -> tactic type delayed_open_constr_with_bindings = env -> evar_map -> evar_map * constr with_bindings val general_multi_multi_rewrite : evars_flag -> (bool * multi * delayed_open_constr_with_bindings) list -> clause -> (tactic * conditions) option -> tactic val replace_in_clause_maybe_by : constr -> constr -> clause -> tactic option -> tactic val replace : constr -> constr -> tactic val replace_in : identifier -> constr -> constr -> tactic val replace_by : constr -> constr -> tactic -> tactic val replace_in_by : identifier -> constr -> constr -> tactic -> tactic val discr : evars_flag -> constr with_bindings -> tactic val discrConcl : tactic val discrClause : evars_flag -> clause -> tactic val discrHyp : identifier -> tactic val discrEverywhere : evars_flag -> tactic val discr_tac : evars_flag -> constr with_bindings induction_arg option -> tactic val inj : intro_pattern_expr located list -> evars_flag -> constr with_bindings -> tactic val injClause : intro_pattern_expr located list -> evars_flag -> constr with_bindings induction_arg option -> tactic val injHyp : identifier -> tactic val injConcl : tactic val dEq : evars_flag -> constr with_bindings induction_arg option -> tactic val dEqThen : evars_flag -> (int -> tactic) -> constr with_bindings induction_arg option -> tactic val make_iterated_tuple : env -> evar_map -> constr -> (constr * types) -> constr * constr * constr (* The family cutRewriteIn expect an equality statement *) val cutRewriteInHyp : bool -> types -> identifier -> tactic val cutRewriteInConcl : bool -> constr -> tactic (* The family rewriteIn expect the proof of an equality *) val rewriteInHyp : bool -> constr -> identifier -> tactic val rewriteInConcl : bool -> constr -> tactic (* Expect the proof of an equality; fails with raw internal errors *) val substClause : bool -> constr -> identifier option -> tactic val discriminable : env -> evar_map -> constr -> constr -> bool val injectable : env -> evar_map -> constr -> constr -> bool (* Subst *) val unfold_body : identifier -> tactic type subst_tactic_flags = { only_leibniz : bool; rewrite_dependent_proof : bool } val subst_gen : bool -> identifier list -> tactic val subst : identifier list -> tactic val subst_all : ?flags:subst_tactic_flags -> tactic (* Replace term *) (* [replace_multi_term dir_opt c cl] perfoms replacement of [c] by the first value found in context (according to [dir] if given to get the rewrite direction) in the clause [cl] *) val replace_multi_term : bool option -> constr -> clause -> tactic val set_eq_dec_scheme_kind : mutual scheme_kind -> unit coq-8.4pl2/tactics/tacticals.ml0000640000175000001440000003713612010532755015574 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tclFAIL 0 (str "No applicable tactic") | [a] -> tac a (* so that returned failure is the one from last item *) | a::tl -> tclORELSE (tac a) (tclFIRST_PROGRESS_ON tac tl) (************************************************************************) (* Tacticals applying on hypotheses *) (************************************************************************) let nthDecl m gl = try List.nth (pf_hyps gl) (m-1) with Failure _ -> error "No such assumption." let nthHypId m gl = pi1 (nthDecl m gl) let nthHyp m gl = mkVar (nthHypId m gl) let lastDecl gl = nthDecl 1 gl let lastHypId gl = nthHypId 1 gl let lastHyp gl = nthHyp 1 gl let nLastDecls n gl = try list_firstn n (pf_hyps gl) with Failure _ -> error "Not enough hypotheses in the goal." let nLastHypsId n gl = List.map pi1 (nLastDecls n gl) let nLastHyps n gl = List.map mkVar (nLastHypsId n gl) let onNthDecl m tac gl = tac (nthDecl m gl) gl let onNthHypId m tac gl = tac (nthHypId m gl) gl let onNthHyp m tac gl = tac (nthHyp m gl) gl let onLastDecl = onNthDecl 1 let onLastHypId = onNthHypId 1 let onLastHyp = onNthHyp 1 let onHyps find tac gl = tac (find gl) gl let onNLastDecls n tac = onHyps (nLastDecls n) tac let onNLastHypsId n tac = onHyps (nLastHypsId n) tac let onNLastHyps n tac = onHyps (nLastHyps n) tac let afterHyp id gl = fst (list_split_when (fun (hyp,_,_) -> hyp = id) (pf_hyps gl)) (***************************************) (* Clause Tacticals *) (***************************************) (* The following functions introduce several tactic combinators and functions useful for working with clauses. A clause is either None or (Some id), where id is an identifier. This type is useful for defining tactics that may be used either to transform the conclusion (None) or to transform a hypothesis id (Some id). -- --Eduardo (8/8/97) *) (* A [simple_clause] is a set of hypotheses, possibly extended with the conclusion (conclusion is represented by None) *) type simple_clause = identifier option list (* An [clause] is the algebraic form of a [concrete_clause]; it may refer to all hypotheses independently of the effective contents of the current goal *) type clause = identifier gclause let allHypsAndConcl = { onhyps=None; concl_occs=all_occurrences_expr } let allHyps = { onhyps=None; concl_occs=no_occurrences_expr } let onConcl = { onhyps=Some[]; concl_occs=all_occurrences_expr } let onHyp id = { onhyps=Some[((all_occurrences_expr,id),InHyp)]; concl_occs=no_occurrences_expr } let simple_clause_of cl gls = let error_occurrences () = error "This tactic does not support occurrences selection" in let error_body_selection () = error "This tactic does not support body selection" in let hyps = match cl.onhyps with | None -> List.map Option.make (pf_ids_of_hyps gls) | Some l -> List.map (fun ((occs,id),w) -> if occs <> all_occurrences_expr then error_occurrences (); if w = InHypValueOnly then error_body_selection (); Some id) l in if cl.concl_occs = no_occurrences_expr then hyps else if cl.concl_occs <> all_occurrences_expr then error_occurrences () else None :: hyps let fullGoal gl = None :: List.map Option.make (pf_ids_of_hyps gl) let onAllHyps tac gl = tclMAP tac (pf_ids_of_hyps gl) gl let onAllHypsAndConcl tac gl = tclMAP tac (fullGoal gl) gl let tryAllHyps tac gl = tclFIRST_PROGRESS_ON tac (pf_ids_of_hyps gl) gl let tryAllHypsAndConcl tac gl = tclFIRST_PROGRESS_ON tac (fullGoal gl) gl let onClause tac cl gls = tclMAP tac (simple_clause_of cl gls) gls let onClauseLR tac cl gls = tclMAP tac (List.rev (simple_clause_of cl gls)) gls let ifOnHyp pred tac1 tac2 id gl = if pred (id,pf_get_hyp_typ gl id) then tac1 id gl else tac2 id gl (************************************************************************) (* An intermediate form of occurrence clause that select components *) (* of a definition, hypotheses and possibly the goal *) (* (used for reduction tactics) *) (************************************************************************) (* A [hyp_location] is an hypothesis together with a position, in body if any, in type or in both *) type hyp_location = identifier * hyp_location_flag (* A [goal_location] is either an hypothesis (together with a position, in body if any, in type or in both) or the goal *) type goal_location = hyp_location option (************************************************************************) (* An intermediate structure for dealing with occurrence clauses *) (************************************************************************) (* [clause_atom] refers either to an hypothesis location (i.e. an hypothesis with occurrences and a position, in body if any, in type or in both) or to some occurrences of the conclusion *) type clause_atom = | OnHyp of identifier * occurrences_expr * hyp_location_flag | OnConcl of occurrences_expr (* A [concrete_clause] is an effective collection of occurrences in the hypotheses and the conclusion *) type concrete_clause = clause_atom list let concrete_clause_of cl gls = let hyps = match cl.onhyps with | None -> let f id = OnHyp (id,all_occurrences_expr,InHyp) in List.map f (pf_ids_of_hyps gls) | Some l -> List.map (fun ((occs,id),w) -> OnHyp (id,occs,w)) l in if cl.concl_occs = no_occurrences_expr then hyps else OnConcl cl.concl_occs :: hyps (************************************************************************) (* Elimination Tacticals *) (************************************************************************) (* The following tacticals allow to apply a tactic to the branches generated by the application of an elimination tactic. Two auxiliary types --branch_args and branch_assumptions-- are used to keep track of some information about the ``branches'' of the elimination. *) type branch_args = { ity : inductive; (* the type we were eliminating on *) largs : constr list; (* its arguments *) branchnum : int; (* the branch number *) pred : constr; (* the predicate we used *) nassums : int; (* the number of assumptions to be introduced *) branchsign : bool list; (* the signature of the branch. true=recursive argument, false=constant *) branchnames : intro_pattern_expr located list} type branch_assumptions = { ba : branch_args; (* the branch args *) assums : named_context} (* the list of assumptions introduced *) let fix_empty_or_and_pattern nv l = (* 1- The syntax does not distinguish between "[ ]" for one clause with no names and "[ ]" for no clause at all *) (* 2- More generally, we admit "[ ]" for any disjunctive pattern of arbitrary length *) if l = [[]] then list_make nv [] else l let check_or_and_pattern_size loc names n = if List.length names <> n then if n = 1 then user_err_loc (loc,"",str "Expects a conjunctive pattern.") else user_err_loc (loc,"",str "Expects a disjunctive pattern with " ++ int n ++ str " branches.") let compute_induction_names n = function | None -> Array.make n [] | Some (loc,IntroOrAndPattern names) -> let names = fix_empty_or_and_pattern n names in check_or_and_pattern_size loc names n; Array.of_list names | Some (loc,_) -> user_err_loc (loc,"",str "Disjunctive/conjunctive introduction pattern expected.") let compute_construtor_signatures isrec (_,k as ity) = let rec analrec c recargs = match kind_of_term c, recargs with | Prod (_,_,c), recarg::rest -> let b = match dest_recarg recarg with | Norec | Imbr _ -> false | Mrec (_,j) -> isrec & j=k in b :: (analrec c rest) | LetIn (_,_,_,c), rest -> false :: (analrec c rest) | _, [] -> [] | _ -> anomaly "compute_construtor_signatures" in let (mib,mip) = Global.lookup_inductive ity in let n = mib.mind_nparams in let lc = Array.map (fun c -> snd (decompose_prod_n_assum n c)) mip.mind_nf_lc in let lrecargs = dest_subterms mip.mind_recargs in array_map2 analrec lc lrecargs let elimination_sort_of_goal gl = pf_apply Retyping.get_sort_family_of gl (pf_concl gl) let elimination_sort_of_hyp id gl = pf_apply Retyping.get_sort_family_of gl (pf_get_hyp_typ gl id) let elimination_sort_of_clause = function | None -> elimination_sort_of_goal | Some id -> elimination_sort_of_hyp id (* Find the right elimination suffix corresponding to the sort of the goal *) (* c should be of type A1->.. An->B with B an inductive definition *) let general_elim_then_using mk_elim isrec allnames tac predicate (indbindings,elimbindings) ind indclause gl = let elim = mk_elim ind gl in (* applying elimination_scheme just a little modified *) let indclause' = clenv_match_args indbindings indclause in let elimclause = mk_clenv_from gl (elim,pf_type_of gl elim) in let indmv = match kind_of_term (last_arg elimclause.templval.Evd.rebus) with | Meta mv -> mv | _ -> anomaly "elimination" in let pmv = let p, _ = decompose_app elimclause.templtyp.Evd.rebus in match kind_of_term p with | Meta p -> p | _ -> let name_elim = match kind_of_term elim with | Const kn -> string_of_con kn | Var id -> string_of_id id | _ -> "\b" in error ("The elimination combinator " ^ name_elim ^ " is unknown.") in let elimclause' = clenv_fchain indmv elimclause indclause' in let elimclause' = clenv_match_args elimbindings elimclause' in let branchsigns = compute_construtor_signatures isrec ind in let brnames = compute_induction_names (Array.length branchsigns) allnames in let after_tac ce i gl = let (hd,largs) = decompose_app ce.templtyp.Evd.rebus in let ba = { branchsign = branchsigns.(i); branchnames = brnames.(i); nassums = List.fold_left (fun acc b -> if b then acc+2 else acc+1) 0 branchsigns.(i); branchnum = i+1; ity = ind; largs = List.map (clenv_nf_meta ce) largs; pred = clenv_nf_meta ce hd } in tac ba gl in let branchtacs ce = Array.init (Array.length branchsigns) (after_tac ce) in let elimclause' = match predicate with | None -> elimclause' | Some p -> clenv_unify ~flags:Unification.elim_flags Reduction.CONV (mkMeta pmv) p elimclause' in elim_res_pf_THEN_i elimclause' branchtacs gl (* computing the case/elim combinators *) let gl_make_elim ind gl = Indrec.lookup_eliminator ind (elimination_sort_of_goal gl) let gl_make_case_dep ind gl = pf_apply Indrec.build_case_analysis_scheme gl ind true (elimination_sort_of_goal gl) let gl_make_case_nodep ind gl = pf_apply Indrec.build_case_analysis_scheme gl ind false (elimination_sort_of_goal gl) let elimination_then_using tac predicate bindings c gl = let (ind,t) = pf_reduce_to_quantified_ind gl (pf_type_of gl c) in let indclause = mk_clenv_from gl (c,t) in general_elim_then_using gl_make_elim true None tac predicate bindings ind indclause gl let case_then_using = general_elim_then_using gl_make_case_dep false let case_nodep_then_using = general_elim_then_using gl_make_case_nodep false let elimination_then tac = elimination_then_using tac None let simple_elimination_then tac = elimination_then tac ([],[]) let make_elim_branch_assumptions ba gl = let rec makerec (assums,cargs,constargs,recargs,indargs) lb lc = match lb,lc with | ([], _) -> { ba = ba; assums = assums} | ((true::tl), ((idrec,_,_ as recarg)::(idind,_,_ as indarg)::idtl)) -> makerec (recarg::indarg::assums, idrec::cargs, idrec::recargs, constargs, idind::indargs) tl idtl | ((false::tl), ((id,_,_ as constarg)::idtl)) -> makerec (constarg::assums, id::cargs, id::constargs, recargs, indargs) tl idtl | (_, _) -> anomaly "make_elim_branch_assumptions" in makerec ([],[],[],[],[]) ba.branchsign (try list_firstn ba.nassums (pf_hyps gl) with Failure _ -> anomaly "make_elim_branch_assumptions") let elim_on_ba tac ba gl = tac (make_elim_branch_assumptions ba gl) gl let make_case_branch_assumptions ba gl = let rec makerec (assums,cargs,constargs,recargs) p_0 p_1 = match p_0,p_1 with | ([], _) -> { ba = ba; assums = assums} | ((true::tl), ((idrec,_,_ as recarg)::idtl)) -> makerec (recarg::assums, idrec::cargs, idrec::recargs, constargs) tl idtl | ((false::tl), ((id,_,_ as constarg)::idtl)) -> makerec (constarg::assums, id::cargs, recargs, id::constargs) tl idtl | (_, _) -> anomaly "make_case_branch_assumptions" in makerec ([],[],[],[]) ba.branchsign (try list_firstn ba.nassums (pf_hyps gl) with Failure _ -> anomaly "make_case_branch_assumptions") let case_on_ba tac ba gl = tac (make_case_branch_assumptions ba gl) gl coq-8.4pl2/tactics/tactic_option.mli0000640000175000001440000000142712010532755016627 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string -> (* put *) (locality_flag -> glob_tactic_expr -> unit) * (* get *) (unit -> locality_flag * tactic) * (* print *) (unit -> Pp.std_ppcmds) coq-8.4pl2/tactics/auto.ml0000640000175000001440000014372312121620060014563 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [v] | h::tl -> if pri_order v h then v::h::tl else h::(insrec tl) in insrec l (* Nov 98 -- Papageno *) (* Les Hints sont ré-organisés en plusieurs databases. La table impérative "searchtable", de type "hint_db_table", associe une database (hint_db) à chaque nom. Une hint_db est une table d'association fonctionelle constr -> search_entry Le constr correspond à la constante de tête de la conclusion. Une search_entry est un triplet comprenant : - la liste des tactiques qui n'ont pas de pattern associé - la liste des tactiques qui ont un pattern - un discrimination net borné (Btermdn.t) constitué de tous les patterns de la seconde liste de tactiques *) type stored_data = int * pri_auto_tactic (* First component is the index of insertion in the table, to keep most recent first semantics. *) let auto_tactic_ord code1 code2 = match code1, code2 with | Res_pf (c1, _), Res_pf (c2, _) | ERes_pf (c1, _), ERes_pf (c2, _) | Give_exact c1, Give_exact c2 | Res_pf_THEN_trivial_fail (c1, _), Res_pf_THEN_trivial_fail (c2, _) -> constr_ord c1 c2 | Unfold_nth (EvalVarRef i1), Unfold_nth (EvalVarRef i2) -> Pervasives.compare i1 i2 | Unfold_nth (EvalConstRef c1), Unfold_nth (EvalConstRef c2) -> kn_ord (canonical_con c1) (canonical_con c2) | Extern t1, Extern t2 -> Pervasives.compare t1 t2 | _ -> Pervasives.compare code1 code2 module Bounded_net = Btermdn.Make(struct type t = stored_data let compare = pri_order_int end) type search_entry = stored_data list * stored_data list * Bounded_net.t let empty_se = ([],[],Bounded_net.create ()) let eq_pri_auto_tactic (_, x) (_, y) = if x.pri = y.pri && x.pat = y.pat then match x.code,y.code with | Res_pf(cstr,_),Res_pf(cstr1,_) -> eq_constr cstr cstr1 | ERes_pf(cstr,_),ERes_pf(cstr1,_) -> eq_constr cstr cstr1 | Give_exact cstr,Give_exact cstr1 -> eq_constr cstr cstr1 | Res_pf_THEN_trivial_fail(cstr,_) ,Res_pf_THEN_trivial_fail(cstr1,_) -> eq_constr cstr cstr1 | _,_ -> false else false let add_tac pat t st (l,l',dn) = match pat with | None -> if not (List.exists (eq_pri_auto_tactic t) l) then (insert t l, l', dn) else (l, l', dn) | Some pat -> if not (List.exists (eq_pri_auto_tactic t) l') then (l, insert t l', Bounded_net.add st dn (pat,t)) else (l, l', dn) let rebuild_dn st ((l,l',dn) : search_entry) = (l, l', List.fold_left (fun dn (id, t) -> Bounded_net.add (Some st) dn (Option.get t.pat, (id, t))) (Bounded_net.create ()) l') let lookup_tacs (hdc,c) st (l,l',dn) = let l' = List.map snd (Bounded_net.lookup st dn c) in let sl' = List.stable_sort pri_order_int l' in Sort.merge pri_order l sl' module Constr_map = Map.Make(RefOrdered) let is_transparent_gr (ids, csts) = function | VarRef id -> Idpred.mem id ids | ConstRef cst -> Cpred.mem cst csts | IndRef _ | ConstructRef _ -> false let dummy_goal = Goal.V82.dummy_goal let translate_hint (go,p) = let mk_clenv (c,t) = let cl = mk_clenv_from dummy_goal (c,t) in {cl with env = empty_env } in let code = match p.code with | Res_pf (c,t) -> Res_pf (c, mk_clenv (c,t)) | ERes_pf (c,t) -> ERes_pf (c, mk_clenv (c,t)) | Res_pf_THEN_trivial_fail (c,t) -> Res_pf_THEN_trivial_fail (c, mk_clenv (c,t)) | Give_exact c -> Give_exact c | Unfold_nth e -> Unfold_nth e | Extern t -> Extern t in (go,{ p with code = code }) let path_matches hp hints = let rec aux hp hints k = match hp, hints with | PathAtom _, [] -> false | PathAtom PathAny, (_ :: hints') -> k hints' | PathAtom p, (h :: hints') -> if p = h then k hints' else false | PathStar hp', hints -> k hints || aux hp' hints (fun hints' -> aux hp hints' k) | PathSeq (hp, hp'), hints -> aux hp hints (fun hints' -> aux hp' hints' k) | PathOr (hp, hp'), hints -> aux hp hints k || aux hp' hints k | PathEmpty, _ -> false | PathEpsilon, hints -> k hints in aux hp hints (fun hints' -> true) let rec matches_epsilon = function | PathAtom _ -> false | PathStar _ -> true | PathSeq (p, p') -> matches_epsilon p && matches_epsilon p' | PathOr (p, p') -> matches_epsilon p || matches_epsilon p' | PathEmpty -> false | PathEpsilon -> true let rec is_empty = function | PathAtom _ -> false | PathStar _ -> false | PathSeq (p, p') -> is_empty p || is_empty p' | PathOr (p, p') -> matches_epsilon p && matches_epsilon p' | PathEmpty -> true | PathEpsilon -> false let rec path_derivate hp hint = let rec derivate_atoms hints hints' = match hints, hints' with | gr :: grs, gr' :: grs' when gr = gr' -> derivate_atoms grs grs' | [], [] -> PathEpsilon | [], hints -> PathEmpty | grs, [] -> PathAtom (PathHints grs) | _, _ -> PathEmpty in match hp with | PathAtom PathAny -> PathEpsilon | PathAtom (PathHints grs) -> (match grs, hint with | h :: hints, PathAny -> PathEmpty | hints, PathHints hints' -> derivate_atoms hints hints' | _, _ -> assert false) | PathStar p -> if path_matches p [hint] then hp else PathEpsilon | PathSeq (hp, hp') -> let hpder = path_derivate hp hint in if matches_epsilon hp then PathOr (PathSeq (hpder, hp'), path_derivate hp' hint) else if is_empty hpder then PathEmpty else PathSeq (hpder, hp') | PathOr (hp, hp') -> PathOr (path_derivate hp hint, path_derivate hp' hint) | PathEmpty -> PathEmpty | PathEpsilon -> PathEmpty let rec normalize_path h = match h with | PathStar PathEpsilon -> PathEpsilon | PathSeq (PathEmpty, _) | PathSeq (_, PathEmpty) -> PathEmpty | PathSeq (PathEpsilon, p) | PathSeq (p, PathEpsilon) -> normalize_path p | PathOr (PathEmpty, p) | PathOr (p, PathEmpty) -> normalize_path p | PathOr (p, q) -> let p', q' = normalize_path p, normalize_path q in if p = p' && q = q' then h else normalize_path (PathOr (p', q')) | PathSeq (p, q) -> let p', q' = normalize_path p, normalize_path q in if p = p' && q = q' then h else normalize_path (PathSeq (p', q')) | _ -> h let path_derivate hp hint = normalize_path (path_derivate hp hint) let rec pp_hints_path = function | PathAtom (PathAny) -> str"." | PathAtom (PathHints grs) -> prlist_with_sep pr_spc pr_global grs | PathStar p -> str "(" ++ pp_hints_path p ++ str")*" | PathSeq (p, p') -> pp_hints_path p ++ str" ; " ++ pp_hints_path p' | PathOr (p, p') -> str "(" ++ pp_hints_path p ++ spc () ++ str"|" ++ spc () ++ pp_hints_path p' ++ str ")" | PathEmpty -> str"Ø" | PathEpsilon -> str"ε" let subst_path_atom subst p = match p with | PathAny -> p | PathHints grs -> let gr' gr = fst (subst_global subst gr) in let grs' = list_smartmap gr' grs in if grs' == grs then p else PathHints grs' let rec subst_hints_path subst hp = match hp with | PathAtom p -> let p' = subst_path_atom subst p in if p' == p then hp else PathAtom p' | PathStar p -> let p' = subst_hints_path subst p in if p' == p then hp else PathStar p' | PathSeq (p, q) -> let p' = subst_hints_path subst p in let q' = subst_hints_path subst q in if p' == p && q' == q then hp else PathSeq (p', q') | PathOr (p, q) -> let p' = subst_hints_path subst p in let q' = subst_hints_path subst q in if p' == p && q' == q then hp else PathOr (p', q') | _ -> hp module Hint_db = struct type t = { hintdb_state : Names.transparent_state; hintdb_cut : hints_path; hintdb_unfolds : Idset.t * Cset.t; mutable hintdb_max_id : int; use_dn : bool; hintdb_map : search_entry Constr_map.t; (* A list of unindexed entries starting with an unfoldable constant or with no associated pattern. *) hintdb_nopat : (global_reference option * stored_data) list } let next_hint_id t = let h = t.hintdb_max_id in t.hintdb_max_id <- succ t.hintdb_max_id; h let empty st use_dn = { hintdb_state = st; hintdb_cut = PathEmpty; hintdb_unfolds = (Idset.empty, Cset.empty); hintdb_max_id = 0; use_dn = use_dn; hintdb_map = Constr_map.empty; hintdb_nopat = [] } let find key db = try Constr_map.find key db.hintdb_map with Not_found -> empty_se let map_none db = List.map snd (Sort.merge pri_order (List.map snd db.hintdb_nopat) []) let map_all k db = let (l,l',_) = find k db in List.map snd (Sort.merge pri_order (List.map snd db.hintdb_nopat @ l) l') let map_auto (k,c) db = let st = if db.use_dn then Some db.hintdb_state else None in let l' = lookup_tacs (k,c) st (find k db) in List.map snd (Sort.merge pri_order (List.map snd db.hintdb_nopat) l') let is_exact = function | Give_exact _ -> true | _ -> false let is_unfold = function | Unfold_nth _ -> true | _ -> false let addkv gr id v db = let idv = id, v in let k = match gr with | Some gr -> if db.use_dn && is_transparent_gr db.hintdb_state gr && is_unfold v.code then None else Some gr | None -> None in let dnst = if db.use_dn then Some db.hintdb_state else None in let pat = if not db.use_dn && is_exact v.code then None else v.pat in match k with | None -> if not (List.exists (fun (_, (_, v')) -> v = v') db.hintdb_nopat) then { db with hintdb_nopat = (gr,idv) :: db.hintdb_nopat } else db | Some gr -> let oval = find gr db in { db with hintdb_map = Constr_map.add gr (add_tac pat idv dnst oval) db.hintdb_map } let rebuild_db st' db = let db' = { db with hintdb_map = Constr_map.map (rebuild_dn st') db.hintdb_map; hintdb_state = st'; hintdb_nopat = [] } in List.fold_left (fun db (gr,(id,v)) -> addkv gr id v db) db' db.hintdb_nopat let add_one kv db = let (k,v) = translate_hint kv in let st',db,rebuild = match v.code with | Unfold_nth egr -> let addunf (ids,csts) (ids',csts') = match egr with | EvalVarRef id -> (Idpred.add id ids, csts), (Idset.add id ids', csts') | EvalConstRef cst -> (ids, Cpred.add cst csts), (ids', Cset.add cst csts') in let state, unfs = addunf db.hintdb_state db.hintdb_unfolds in state, { db with hintdb_unfolds = unfs }, true | _ -> db.hintdb_state, db, false in let db = if db.use_dn && rebuild then rebuild_db st' db else db in addkv k (next_hint_id db) v db let add_list l db = List.fold_left (fun db k -> add_one k db) db l let remove_sdl p sdl = list_smartfilter p sdl let remove_he st p (sl1, sl2, dn as he) = let sl1' = remove_sdl p sl1 and sl2' = remove_sdl p sl2 in if sl1' == sl1 && sl2' == sl2 then he else rebuild_dn st (sl1', sl2', dn) let remove_list grs db = let filter (_, h) = match h.name with PathHints [gr] -> not (List.mem gr grs) | _ -> true in let hintmap = Constr_map.map (remove_he db.hintdb_state filter) db.hintdb_map in let hintnopat = list_smartfilter (fun (ge, sd) -> filter sd) db.hintdb_nopat in { db with hintdb_map = hintmap; hintdb_nopat = hintnopat } let remove_one gr db = remove_list [gr] db let iter f db = f None (List.map (fun x -> snd (snd x)) db.hintdb_nopat); Constr_map.iter (fun k (l,l',_) -> f (Some k) (List.map snd (l@l'))) db.hintdb_map let transparent_state db = db.hintdb_state let set_transparent_state db st = if db.use_dn then rebuild_db st db else { db with hintdb_state = st } let add_cut path db = { db with hintdb_cut = normalize_path (PathOr (db.hintdb_cut, path)) } let cut db = db.hintdb_cut let unfolds db = db.hintdb_unfolds let use_dn db = db.use_dn end module Hintdbmap = Gmap type hint_db = Hint_db.t type frozen_hint_db_table = (string,hint_db) Hintdbmap.t type hint_db_table = (string,hint_db) Hintdbmap.t ref type hint_db_name = string let searchtable = (ref Hintdbmap.empty : hint_db_table) let searchtable_map name = Hintdbmap.find name !searchtable let searchtable_add (name,db) = searchtable := Hintdbmap.add name db !searchtable let current_db_names () = Hintdbmap.dom !searchtable (**************************************************************************) (* Definition of the summary *) (**************************************************************************) let auto_init : (unit -> unit) ref = ref (fun () -> ()) let add_auto_init f = let init = !auto_init in auto_init := (fun () -> init (); f ()) let init () = searchtable := Hintdbmap.empty; !auto_init () let freeze () = !searchtable let unfreeze fs = searchtable := fs let _ = Summary.declare_summary "search" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } (**************************************************************************) (* Auxiliary functions to prepare AUTOHINT objects *) (**************************************************************************) let rec nb_hyp c = match kind_of_term c with | Prod(_,_,c2) -> if noccurn 1 c2 then 1+(nb_hyp c2) else nb_hyp c2 | _ -> 0 (* adding and removing tactics in the search table *) let try_head_pattern c = try head_pattern_bound c with BoundPattern -> error "Bound head variable." let name_of_constr c = try Some (global_of_constr c) with Not_found -> None let make_exact_entry sigma pri ?(name=PathAny) (c,cty) = let cty = strip_outer_cast cty in match kind_of_term cty with | Prod _ -> failwith "make_exact_entry" | _ -> let pat = snd (Pattern.pattern_of_constr sigma cty) in let hd = try head_pattern_bound pat with BoundPattern -> failwith "make_exact_entry" in (Some hd, { pri = (match pri with None -> 0 | Some p -> p); pat = Some pat; name = name; code = Give_exact c }) let make_apply_entry env sigma (eapply,hnf,verbose) pri ?(name=PathAny) (c,cty) = let cty = if hnf then hnf_constr env sigma cty else cty in match kind_of_term cty with | Prod _ -> let ce = mk_clenv_from dummy_goal (c,cty) in let c' = clenv_type (* ~reduce:false *) ce in let pat = snd (Pattern.pattern_of_constr sigma c') in let hd = try head_pattern_bound pat with BoundPattern -> failwith "make_apply_entry" in let nmiss = List.length (clenv_missing ce) in if nmiss = 0 then (Some hd, { pri = (match pri with None -> nb_hyp cty | Some p -> p); pat = Some pat; name = name; code = Res_pf(c,cty) }) else begin if not eapply then failwith "make_apply_entry"; if verbose then warn (str "the hint: eapply " ++ pr_lconstr c ++ str " will only be used by eauto"); (Some hd, { pri = (match pri with None -> nb_hyp cty + nmiss | Some p -> p); pat = Some pat; name = name; code = ERes_pf(c,cty) }) end | _ -> failwith "make_apply_entry" (* flags is (e,h,v) with e=true if eapply and h=true if hnf and v=true if verbose c is a constr cty is the type of constr *) let make_resolves env sigma flags pri ?name c = let cty = Retyping.get_type_of env sigma c in let ents = map_succeed (fun f -> f (c,cty)) [make_exact_entry sigma pri ?name; make_apply_entry env sigma flags pri ?name] in if ents = [] then errorlabstrm "Hint" (pr_lconstr c ++ spc() ++ (if pi1 flags then str"cannot be used as a hint." else str "can be used as a hint only for eauto.")); ents (* used to add an hypothesis to the local hint database *) let make_resolve_hyp env sigma (hname,_,htyp) = try [make_apply_entry env sigma (true, true, false) None ~name:(PathHints [VarRef hname]) (mkVar hname, htyp)] with | Failure _ -> [] | e when Logic.catchable_exception e -> anomaly "make_resolve_hyp" (* REM : in most cases hintname = id *) let make_unfold eref = let g = global_of_evaluable_reference eref in (Some g, { pri = 4; pat = None; name = PathHints [g]; code = Unfold_nth eref }) let make_extern pri pat tacast = let hdconstr = Option.map try_head_pattern pat in (hdconstr, { pri = pri; pat = pat; name = PathAny; code = Extern tacast }) let make_trivial env sigma ?(name=PathAny) c = let t = hnf_constr env sigma (type_of env sigma c) in let hd = head_of_constr_reference (fst (head_constr t)) in let ce = mk_clenv_from dummy_goal (c,t) in (Some hd, { pri=1; pat = Some (snd (Pattern.pattern_of_constr sigma (clenv_type ce))); name = name; code=Res_pf_THEN_trivial_fail(c,t) }) open Vernacexpr (**************************************************************************) (* declaration of the AUTOHINT library object *) (**************************************************************************) (* If the database does not exist, it is created *) (* TODO: should a warning be printed in this case ?? *) let get_db dbname = try searchtable_map dbname with Not_found -> Hint_db.empty empty_transparent_state false let add_hint dbname hintlist = let db = get_db dbname in let db' = Hint_db.add_list hintlist db in searchtable_add (dbname,db') let add_transparency dbname grs b = let db = get_db dbname in let st = Hint_db.transparent_state db in let st' = List.fold_left (fun (ids, csts) gr -> match gr with | EvalConstRef c -> (ids, (if b then Cpred.add else Cpred.remove) c csts) | EvalVarRef v -> (if b then Idpred.add else Idpred.remove) v ids, csts) st grs in searchtable_add (dbname, Hint_db.set_transparent_state db st') let remove_hint dbname grs = let db = get_db dbname in let db' = Hint_db.remove_list grs db in searchtable_add (dbname, db') type hint_action = | CreateDB of bool * transparent_state | AddTransparency of evaluable_global_reference list * bool | AddHints of hint_entry list | RemoveHints of global_reference list | AddCut of hints_path let add_cut dbname path = let db = get_db dbname in let db' = Hint_db.add_cut path db in searchtable_add (dbname, db') type hint_obj = bool * string * hint_action (* locality, name, action *) let cache_autohint (_,(local,name,hints)) = match hints with | CreateDB (b, st) -> searchtable_add (name, Hint_db.empty st b) | AddTransparency (grs, b) -> add_transparency name grs b | AddHints hints -> add_hint name hints | RemoveHints grs -> remove_hint name grs | AddCut path -> add_cut name path let forward_subst_tactic = ref (fun _ -> failwith "subst_tactic is not installed for auto") let set_extern_subst_tactic f = forward_subst_tactic := f let subst_autohint (subst,(local,name,hintlist as obj)) = let subst_key gr = let (lab'', elab') = subst_global subst gr in let gr' = (try head_of_constr_reference (fst (head_constr_bound elab')) with Tactics.Bound -> lab'') in if gr' == gr then gr else gr' in let subst_hint (k,data as hint) = let k' = Option.smartmap subst_key k in let pat' = Option.smartmap (subst_pattern subst) data.pat in let code' = match data.code with | Res_pf (c,t) -> let c' = subst_mps subst c in let t' = subst_mps subst t in if c==c' && t'==t then data.code else Res_pf (c', t') | ERes_pf (c,t) -> let c' = subst_mps subst c in let t' = subst_mps subst t in if c==c' && t'==t then data.code else ERes_pf (c',t') | Give_exact c -> let c' = subst_mps subst c in if c==c' then data.code else Give_exact c' | Res_pf_THEN_trivial_fail (c,t) -> let c' = subst_mps subst c in let t' = subst_mps subst t in if c==c' && t==t' then data.code else Res_pf_THEN_trivial_fail (c',t') | Unfold_nth ref -> let ref' = subst_evaluable_reference subst ref in if ref==ref' then data.code else Unfold_nth ref' | Extern tac -> let tac' = !forward_subst_tactic subst tac in if tac==tac' then data.code else Extern tac' in let name' = subst_path_atom subst data.name in let data' = if data.pat==pat' && data.name == name' && data.code==code' then data else { data with pat = pat'; name = name'; code = code' } in if k' == k && data' == data then hint else (k',data') in match hintlist with | CreateDB _ -> obj | AddTransparency (grs, b) -> let grs' = list_smartmap (subst_evaluable_reference subst) grs in if grs==grs' then obj else (local, name, AddTransparency (grs', b)) | AddHints hintlist -> let hintlist' = list_smartmap subst_hint hintlist in if hintlist' == hintlist then obj else (local,name,AddHints hintlist') | RemoveHints grs -> let grs' = list_smartmap (fun x -> fst (subst_global subst x)) grs in if grs==grs' then obj else (local, name, RemoveHints grs') | AddCut path -> let path' = subst_hints_path subst path in if path' == path then obj else (local, name, AddCut path') let classify_autohint ((local,name,hintlist) as obj) = if local or hintlist = (AddHints []) then Dispose else Substitute obj let inAutoHint : hint_obj -> obj = declare_object {(default_object "AUTOHINT") with cache_function = cache_autohint; load_function = (fun _ -> cache_autohint); subst_function = subst_autohint; classify_function = classify_autohint; } let create_hint_db l n st b = Lib.add_anonymous_leaf (inAutoHint (l,n,CreateDB (b, st))) let remove_hints local dbnames grs = let dbnames = if dbnames = [] then ["core"] else dbnames in List.iter (fun dbname -> Lib.add_anonymous_leaf (inAutoHint(local, dbname, RemoveHints grs))) dbnames (**************************************************************************) (* The "Hint" vernacular command *) (**************************************************************************) let add_resolves env sigma clist local dbnames = List.iter (fun dbname -> Lib.add_anonymous_leaf (inAutoHint (local,dbname, AddHints (List.flatten (List.map (fun (x, hnf, path, y) -> make_resolves env sigma (true,hnf,Flags.is_verbose()) x ~name:path y) clist))))) dbnames let add_unfolds l local dbnames = List.iter (fun dbname -> Lib.add_anonymous_leaf (inAutoHint (local,dbname, AddHints (List.map make_unfold l)))) dbnames let add_cuts l local dbnames = List.iter (fun dbname -> Lib.add_anonymous_leaf (inAutoHint (local,dbname, AddCut l))) dbnames let add_transparency l b local dbnames = List.iter (fun dbname -> Lib.add_anonymous_leaf (inAutoHint (local,dbname, AddTransparency (l, b)))) dbnames let add_extern pri pat tacast local dbname = (* We check that all metas that appear in tacast have at least one occurence in the left pattern pat *) let tacmetas = [] in match pat with | Some (patmetas,pat) -> (match (list_subtract tacmetas patmetas) with | i::_ -> errorlabstrm "add_extern" (str "The meta-variable ?" ++ Ppconstr.pr_patvar i ++ str" is not bound.") | [] -> Lib.add_anonymous_leaf (inAutoHint(local,dbname, AddHints [make_extern pri (Some pat) tacast]))) | None -> Lib.add_anonymous_leaf (inAutoHint(local,dbname, AddHints [make_extern pri None tacast])) let add_externs pri pat tacast local dbnames = List.iter (add_extern pri pat tacast local) dbnames let add_trivials env sigma l local dbnames = List.iter (fun dbname -> Lib.add_anonymous_leaf ( inAutoHint(local,dbname, AddHints (List.map (fun (name, c) -> make_trivial env sigma ~name c) l)))) dbnames let forward_intern_tac = ref (fun _ -> failwith "intern_tac is not installed for auto") let set_extern_intern_tac f = forward_intern_tac := f type hints_entry = | HintsResolveEntry of (int option * bool * hints_path_atom * constr) list | HintsImmediateEntry of (hints_path_atom * constr) list | HintsCutEntry of hints_path | HintsUnfoldEntry of evaluable_global_reference list | HintsTransparencyEntry of evaluable_global_reference list * bool | HintsExternEntry of int * (patvar list * constr_pattern) option * glob_tactic_expr let h = id_of_string "H" exception Found of constr * types let prepare_hint env (sigma,c) = let sigma = Typeclasses.resolve_typeclasses ~fail:false env sigma in (* We re-abstract over uninstantiated evars. It is actually a bit stupid to generalize over evars since the first thing make_resolves will do is to re-instantiate the products *) let c = drop_extra_implicit_args (Evarutil.nf_evar sigma c) in let vars = ref (collect_vars c) in let subst = ref [] in let rec find_next_evar c = match kind_of_term c with | Evar (evk,args as ev) -> (* We skip the test whether args is the identity or not *) let t = Evarutil.nf_evar sigma (existential_type sigma ev) in let t = List.fold_right (fun (e,id) c -> replace_term e id c) !subst t in if free_rels t <> Intset.empty then error "Hints with holes dependent on a bound variable not supported."; if occur_existential t then (* Not clever enough to construct dependency graph of evars *) error "Not clever enough to deal with evars dependent in other evars."; raise (Found (c,t)) | _ -> iter_constr find_next_evar c in let rec iter c = try find_next_evar c; c with Found (evar,t) -> let id = next_ident_away_from h (fun id -> Idset.mem id !vars) in vars := Idset.add id !vars; subst := (evar,mkVar id)::!subst; mkNamedLambda id t (iter (replace_term evar (mkVar id) c)) in iter c let path_of_constr_expr c = match c with | Topconstr.CRef r -> (try PathHints [global r] with e when Errors.noncritical e -> PathAny) | _ -> PathAny let interp_hints h = let f c = let evd,c = Constrintern.interp_open_constr Evd.empty (Global.env()) c in let c = prepare_hint (Global.env()) (evd,c) in Evarutil.check_evars (Global.env()) Evd.empty evd c; c in let fr r = let gr = global_with_alias r in let r' = evaluable_of_global_reference (Global.env()) gr in Dumpglob.add_glob (loc_of_reference r) gr; r' in let fres (o, b, c) = (o, b, path_of_constr_expr c, f c) in let fi c = path_of_constr_expr c, f c in let fp = Constrintern.intern_constr_pattern Evd.empty (Global.env()) in match h with | HintsResolve lhints -> HintsResolveEntry (List.map fres lhints) | HintsImmediate lhints -> HintsImmediateEntry (List.map fi lhints) | HintsUnfold lhints -> HintsUnfoldEntry (List.map fr lhints) | HintsTransparency (lhints, b) -> HintsTransparencyEntry (List.map fr lhints, b) | HintsConstructors lqid -> let constr_hints_of_ind qid = let ind = global_inductive_with_alias qid in Dumpglob.dump_reference (fst (qualid_of_reference qid)) "<>" (string_of_reference qid) "ind"; list_tabulate (fun i -> let c = (ind,i+1) in None, true, PathHints [ConstructRef c], mkConstruct c) (nconstructors ind) in HintsResolveEntry (List.flatten (List.map constr_hints_of_ind lqid)) | HintsExtern (pri, patcom, tacexp) -> let pat = Option.map fp patcom in let tacexp = !forward_intern_tac (match pat with None -> [] | Some (l, _) -> l) tacexp in HintsExternEntry (pri, pat, tacexp) let add_hints local dbnames0 h = if List.mem "nocore" dbnames0 then error "The hint database \"nocore\" is meant to stay empty."; let dbnames = if dbnames0 = [] then ["core"] else dbnames0 in let env = Global.env() and sigma = Evd.empty in match h with | HintsResolveEntry lhints -> add_resolves env sigma lhints local dbnames | HintsImmediateEntry lhints -> add_trivials env sigma lhints local dbnames | HintsCutEntry lhints -> add_cuts lhints local dbnames | HintsUnfoldEntry lhints -> add_unfolds lhints local dbnames | HintsTransparencyEntry (lhints, b) -> add_transparency lhints b local dbnames | HintsExternEntry (pri, pat, tacexp) -> add_externs pri pat tacexp local dbnames (**************************************************************************) (* Functions for printing the hints *) (**************************************************************************) let pr_autotactic = function | Res_pf (c,clenv) -> (str"apply " ++ pr_constr c) | ERes_pf (c,clenv) -> (str"eapply " ++ pr_constr c) | Give_exact c -> (str"exact " ++ pr_constr c) | Res_pf_THEN_trivial_fail (c,clenv) -> (str"apply " ++ pr_constr c ++ str" ; trivial") | Unfold_nth c -> (str"unfold " ++ pr_evaluable_reference c) | Extern tac -> (str "(*external*) " ++ Pptactic.pr_glob_tactic (Global.env()) tac) let pr_hint (id, v) = (pr_autotactic v.code ++ str"(level " ++ int v.pri ++ str", id " ++ int id ++ str ")" ++ spc ()) let pr_hint_list hintlist = (str " " ++ hov 0 (prlist pr_hint hintlist) ++ fnl ()) let pr_hints_db (name,db,hintlist) = (str "In the database " ++ str name ++ str ":" ++ if hintlist = [] then (str " nothing" ++ fnl ()) else (fnl () ++ pr_hint_list hintlist)) (* Print all hints associated to head c in any database *) let pr_hint_list_for_head c = let dbs = Hintdbmap.to_list !searchtable in let valid_dbs = map_succeed (fun (name,db) -> (name,db, List.map (fun v -> 0, v) (Hint_db.map_all c db))) dbs in if valid_dbs = [] then (str "No hint declared for :" ++ pr_global c) else hov 0 (str"For " ++ pr_global c ++ str" -> " ++ fnl () ++ hov 0 (prlist pr_hints_db valid_dbs)) let pr_hint_ref ref = pr_hint_list_for_head ref (* Print all hints associated to head id in any database *) let print_hint_ref ref = ppnl(pr_hint_ref ref) let pr_hint_term cl = try let dbs = Hintdbmap.to_list !searchtable in let valid_dbs = let fn = try let (hdc,args) = head_constr_bound cl in let hd = head_of_constr_reference hdc in if occur_existential cl then Hint_db.map_all hd else Hint_db.map_auto (hd, applist (hdc,args)) with Bound -> Hint_db.map_none in let fn db = List.map (fun x -> 0, x) (fn db) in map_succeed (fun (name, db) -> (name, db, fn db)) dbs in if valid_dbs = [] then (str "No hint applicable for current goal") else (str "Applicable Hints :" ++ fnl () ++ hov 0 (prlist pr_hints_db valid_dbs)) with Match_failure _ | Failure _ -> (str "No hint applicable for current goal") let error_no_such_hint_database x = error ("No such Hint database: "^x^".") let print_hint_term cl = ppnl (pr_hint_term cl) (* print all hints that apply to the concl of the current goal *) let print_applicable_hint () = let pts = get_pftreestate () in let glss = Proof.V82.subgoals pts in match glss.Evd.it with | [] -> Util.error "No focused goal." | g::_ -> let gl = { Evd.it = g; sigma = glss.Evd.sigma } in print_hint_term (pf_concl gl) (* displays the whole hint database db *) let print_hint_db db = let (ids, csts) = Hint_db.transparent_state db in msgnl (hov 0 ((if Hint_db.use_dn db then str"Discriminated database" else str"Non-discriminated database"))); msgnl (hov 2 (str"Unfoldable variable definitions: " ++ pr_idpred ids)); msgnl (hov 2 (str"Unfoldable constant definitions: " ++ pr_cpred csts)); msgnl (hov 2 (str"Cut: " ++ pp_hints_path (Hint_db.cut db))); Hint_db.iter (fun head hintlist -> match head with | Some head -> msg (hov 0 (str "For " ++ pr_global head ++ str " -> " ++ pr_hint_list (List.map (fun x -> (0,x)) hintlist))) | None -> msg (hov 0 (str "For any goal -> " ++ pr_hint_list (List.map (fun x -> (0, x)) hintlist)))) db let print_hint_db_by_name dbname = try let db = searchtable_map dbname in print_hint_db db with Not_found -> error_no_such_hint_database dbname (* displays all the hints of all databases *) let print_searchtable () = Hintdbmap.iter (fun name db -> msg (str "In the database " ++ str name ++ str ":" ++ fnl ()); print_hint_db db) !searchtable (**************************************************************************) (* Automatic tactics *) (**************************************************************************) (**************************************************************************) (* tactics with a trace mechanism for automatic search *) (**************************************************************************) let priority l = List.filter (fun (_, hint) -> hint.pri = 0) l (* tell auto not to reuse already instantiated metas in unification (for compatibility, since otherwise, apply succeeds oftener) *) open Unification let auto_unif_flags = { modulo_conv_on_closed_terms = Some full_transparent_state; use_metas_eagerly_in_conv_on_closed_terms = false; modulo_delta = empty_transparent_state; modulo_delta_types = full_transparent_state; modulo_delta_in_merge = None; check_applied_meta_types = false; resolve_evars = true; use_pattern_unification = false; use_meta_bound_pattern_unification = true; frozen_evars = ExistentialSet.empty; restrict_conv_on_strict_subterms = false; (* Compat *) modulo_betaiota = false; modulo_eta = true; allow_K_in_toplevel_higher_order_unification = false } (* Try unification with the precompiled clause, then use registered Apply *) let h_clenv_refine ev c clenv = Refiner.abstract_tactic (TacApply (true,ev,[c,NoBindings],None)) (Clenvtac.clenv_refine ev clenv) let unify_resolve_nodelta (c,clenv) gl = let clenv' = connect_clenv gl clenv in let clenv'' = clenv_unique_resolver ~flags:auto_unif_flags clenv' gl in h_clenv_refine false c clenv'' gl let unify_resolve flags (c,clenv) gl = let clenv' = connect_clenv gl clenv in let clenv'' = clenv_unique_resolver ~flags clenv' gl in h_clenv_refine false c clenv'' gl let unify_resolve_gen = function | None -> unify_resolve_nodelta | Some flags -> unify_resolve flags (* Util *) let expand_constructor_hints env lems = list_map_append (fun (sigma,lem) -> match kind_of_term lem with | Ind ind -> list_tabulate (fun i -> mkConstruct (ind,i+1)) (nconstructors ind) | _ -> [prepare_hint env (sigma,lem)]) lems (* builds a hint database from a constr signature *) (* typically used with (lid, ltyp) = pf_hyps_types *) let add_hint_lemmas eapply lems hint_db gl = let lems = expand_constructor_hints (pf_env gl) lems in let hintlist' = list_map_append (pf_apply make_resolves gl (eapply,true,false) None) lems in Hint_db.add_list hintlist' hint_db let make_local_hint_db ?ts eapply lems gl = let sign = pf_hyps gl in let ts = match ts with | None -> Hint_db.transparent_state (searchtable_map "core") | Some ts -> ts in let hintlist = list_map_append (pf_apply make_resolve_hyp gl) sign in add_hint_lemmas eapply lems (Hint_db.add_list hintlist (Hint_db.empty ts false)) gl (* Serait-ce possible de compiler d'abord la tactique puis de faire la substitution sans passer par bdize dont l'objectif est de préparer un terme pour l'affichage ? (HH) *) (* Si on enlève le dernier argument (gl) conclPattern est calculé une fois pour toutes : en particulier si Pattern.somatch produit une UserError Ce qui fait que si la conclusion ne matche pas le pattern, Auto échoue, même si après Intros la conclusion matche le pattern. *) (* conclPattern doit échouer avec error car il est rattraper par tclFIRST *) let forward_interp_tactic = ref (fun _ -> failwith "interp_tactic is not installed for auto") let set_extern_interp f = forward_interp_tactic := f let conclPattern concl pat tac gl = let constr_bindings = match pat with | None -> [] | Some pat -> try matches pat concl with PatternMatchingFailure -> error "conclPattern" in !forward_interp_tactic constr_bindings tac gl (***********************************************************) (** A debugging / verbosity framework for trivial and auto *) (***********************************************************) (** The following options allow to trigger debugging/verbosity without having to adapt the scripts. Note: if Debug and Info are both activated, Debug take precedence. *) let global_debug_trivial = ref false let global_debug_auto = ref false let global_info_trivial = ref false let global_info_auto = ref false let add_option ls refe = let _ = Goptions.declare_bool_option { Goptions.optsync = true; Goptions.optdepr = false; Goptions.optname = String.concat " " ls; Goptions.optkey = ls; Goptions.optread = (fun () -> !refe); Goptions.optwrite = (:=) refe } in () let _ = add_option ["Debug";"Trivial"] global_debug_trivial; add_option ["Debug";"Auto"] global_debug_auto; add_option ["Info";"Trivial"] global_info_trivial; add_option ["Info";"Auto"] global_info_auto let no_dbg () = (Off,0,ref []) let mk_trivial_dbg debug = let d = if debug = Debug || !global_debug_trivial then Debug else if debug = Info || !global_info_trivial then Info else Off in (d,0,ref []) (** Note : we start the debug depth of auto at 1 to distinguish it for trivial (whose depth is 0). *) let mk_auto_dbg debug = let d = if debug = Debug || !global_debug_auto then Debug else if debug = Info || !global_info_auto then Info else Off in (d,1,ref []) let incr_dbg = function (dbg,depth,trace) -> (dbg,depth+1,trace) (** A tracing tactic for debug/info trivial/auto *) let tclLOG (dbg,depth,trace) pp tac = match dbg with | Off -> tac | Debug -> (* For "debug (trivial/auto)", we directly output messages *) let s = String.make depth '*' in begin fun gl -> try let out = tac gl in msg_debug (str s ++ spc () ++ pp () ++ str ". (*success*)"); out with reraise -> msg_debug (str s ++ spc () ++ pp () ++ str ". (*fail*)"); raise reraise end | Info -> (* For "info (trivial/auto)", we store a log trace *) begin fun gl -> try let out = tac gl in trace := (depth, Some pp) :: !trace; out with reraise -> trace := (depth, None) :: !trace; raise reraise end (** For info, from the linear trace information, we reconstitute the part of the proof tree we're interested in. The last executed tactic comes first in the trace (and it should be a successful one). [depth] is the root depth of the tree fragment we're visiting. [keep] means we're in a successful tree fragment (the very last tactic has been successful). *) let rec cleanup_info_trace depth acc = function | [] -> acc | (d,Some pp) :: l -> cleanup_info_trace d ((d,pp)::acc) l | l -> cleanup_info_trace depth acc (erase_subtree depth l) and erase_subtree depth = function | [] -> [] | (d,_) :: l -> if d = depth then l else erase_subtree depth l let pr_info_atom (d,pp) = msg_debug (str (String.make d ' ') ++ pp () ++ str ".") let pr_info_trace = function | (Info,_,{contents=(d,Some pp)::l}) -> List.iter pr_info_atom (cleanup_info_trace d [(d,pp)] l) | _ -> () let pr_info_nop = function | (Info,_,_) -> msg_debug (str "idtac.") | _ -> () let pr_dbg_header = function | (Off,_,_) -> () | (Debug,0,_) -> msg_debug (str "(* debug trivial : *)") | (Debug,_,_) -> msg_debug (str "(* debug auto : *)") | (Info,0,_) -> msg_debug (str "(* info trivial : *)") | (Info,_,_) -> msg_debug (str "(* info auto : *)") let tclTRY_dbg d tac = tclORELSE0 (fun gl -> pr_dbg_header d; let out = tac gl in pr_info_trace d; out) (fun gl -> pr_info_nop d; tclIDTAC gl) (**************************************************************************) (* The Trivial tactic *) (**************************************************************************) (* local_db is a Hint database containing the hypotheses of current goal *) (* Papageno : cette fonction a été pas mal simplifiée depuis que la base de Hint impérative a été remplacée par plusieurs bases fonctionnelles *) let flags_of_state st = {auto_unif_flags with modulo_conv_on_closed_terms = Some st; modulo_delta = st} let hintmap_of hdc concl = match hdc with | None -> Hint_db.map_none | Some hdc -> if occur_existential concl then Hint_db.map_all hdc else Hint_db.map_auto (hdc,concl) let exists_evaluable_reference env = function | EvalConstRef _ -> true | EvalVarRef v -> try ignore(lookup_named v env); true with Not_found -> false let dbg_intro dbg = tclLOG dbg (fun () -> str "intro") intro let dbg_assumption dbg = tclLOG dbg (fun () -> str "assumption") assumption let rec trivial_fail_db dbg mod_delta db_list local_db gl = let intro_tac = tclTHEN (dbg_intro dbg) (fun g'-> let hintl = make_resolve_hyp (pf_env g') (project g') (pf_last_hyp g') in trivial_fail_db dbg mod_delta db_list (Hint_db.add_list hintl local_db) g') in tclFIRST ((dbg_assumption dbg)::intro_tac:: (List.map tclCOMPLETE (trivial_resolve dbg mod_delta db_list local_db (pf_concl gl)))) gl and my_find_search_nodelta db_list local_db hdc concl = List.map (fun hint -> (None,hint)) (list_map_append (hintmap_of hdc concl) (local_db::db_list)) and my_find_search mod_delta = if mod_delta then my_find_search_delta else my_find_search_nodelta and my_find_search_delta db_list local_db hdc concl = let flags = {auto_unif_flags with use_metas_eagerly_in_conv_on_closed_terms = true} in let f = hintmap_of hdc concl in if occur_existential concl then list_map_append (fun db -> if Hint_db.use_dn db then let flags = flags_of_state (Hint_db.transparent_state db) in List.map (fun x -> (Some flags,x)) (f db) else let flags = {flags with modulo_delta = Hint_db.transparent_state db} in List.map (fun x -> (Some flags,x)) (f db)) (local_db::db_list) else list_map_append (fun db -> if Hint_db.use_dn db then let flags = flags_of_state (Hint_db.transparent_state db) in List.map (fun x -> (Some flags, x)) (f db) else let (ids, csts as st) = Hint_db.transparent_state db in let flags, l = let l = match hdc with None -> Hint_db.map_none db | Some hdc -> if (Idpred.is_empty ids && Cpred.is_empty csts) then Hint_db.map_auto (hdc,concl) db else Hint_db.map_all hdc db in {flags with modulo_delta = st}, l in List.map (fun x -> (Some flags,x)) l) (local_db::db_list) and tac_of_hint dbg db_list local_db concl (flags, ({pat=p; code=t})) = let tactic = match t with | Res_pf (c,cl) -> unify_resolve_gen flags (c,cl) | ERes_pf _ -> (fun gl -> error "eres_pf") | Give_exact c -> exact_check c | Res_pf_THEN_trivial_fail (c,cl) -> tclTHEN (unify_resolve_gen flags (c,cl)) (* With "(debug) trivial", we shouldn't end here, and with "debug auto" we don't display the details of inner trivial *) (trivial_fail_db (no_dbg ()) (flags <> None) db_list local_db) | Unfold_nth c -> (fun gl -> if exists_evaluable_reference (pf_env gl) c then tclPROGRESS (h_reduce (Unfold [all_occurrences_expr,c]) onConcl) gl else tclFAIL 0 (str"Unbound reference") gl) | Extern tacast -> conclPattern concl p tacast in tclLOG dbg (fun () -> pr_autotactic t) tactic and trivial_resolve dbg mod_delta db_list local_db cl = try let head = try let hdconstr,_ = head_constr_bound cl in Some (head_of_constr_reference hdconstr) with Bound -> None in List.map (tac_of_hint dbg db_list local_db cl) (priority (my_find_search mod_delta db_list local_db head cl)) with Not_found -> [] (** The use of the "core" database can be de-activated by passing "nocore" amongst the databases. *) let make_db_list dbnames = let use_core = not (List.mem "nocore" dbnames) in let dbnames = list_remove "nocore" dbnames in let dbnames = if use_core then "core"::dbnames else dbnames in let lookup db = try searchtable_map db with Not_found -> error_no_such_hint_database db in List.map lookup dbnames let trivial ?(debug=Off) lems dbnames gl = let db_list = make_db_list dbnames in let d = mk_trivial_dbg debug in tclTRY_dbg d (trivial_fail_db d false db_list (make_local_hint_db false lems gl)) gl let full_trivial ?(debug=Off) lems gl = let dbnames = Hintdbmap.dom !searchtable in let dbnames = list_remove "v62" dbnames in let db_list = List.map (fun x -> searchtable_map x) dbnames in let d = mk_trivial_dbg debug in tclTRY_dbg d (trivial_fail_db d false db_list (make_local_hint_db false lems gl)) gl let gen_trivial ?(debug=Off) lems = function | None -> full_trivial ~debug lems | Some l -> trivial ~debug lems l let h_trivial ?(debug=Off) lems l = Refiner.abstract_tactic (TacTrivial (debug,List.map snd lems,l)) (gen_trivial ~debug lems l) (**************************************************************************) (* The classical Auto tactic *) (**************************************************************************) let possible_resolve dbg mod_delta db_list local_db cl = try let head = try let hdconstr,_ = head_constr_bound cl in Some (head_of_constr_reference hdconstr) with Bound -> None in List.map (tac_of_hint dbg db_list local_db cl) (my_find_search mod_delta db_list local_db head cl) with Not_found -> [] let dbg_case dbg id = tclLOG dbg (fun () -> str "case " ++ pr_id id) (simplest_case (mkVar id)) let decomp_unary_term_then dbg (id,_,typc) kont1 kont2 gl = try let ccl = applist (head_constr typc) in match Hipattern.match_with_conjunction ccl with | Some (_,args) -> tclTHEN (dbg_case dbg id) (kont1 (List.length args)) gl | None -> kont2 gl with UserError _ -> kont2 gl let decomp_empty_term dbg (id,_,typc) gl = if Hipattern.is_empty_type typc then dbg_case dbg id gl else errorlabstrm "Auto.decomp_empty_term" (str "Not an empty type.") let extend_local_db gl decl db = Hint_db.add_list (make_resolve_hyp (pf_env gl) (project gl) decl) db (* Introduce an hypothesis, then call the continuation tactic [kont] with the hint db extended with the so-obtained hypothesis *) let intro_register dbg kont db = tclTHEN (dbg_intro dbg) (onLastDecl (fun decl gl -> kont (extend_local_db gl decl db) gl)) (* n is the max depth of search *) (* local_db contains the local Hypotheses *) exception Uplift of tactic list let search d n mod_delta db_list local_db = let rec search d n local_db = if n=0 then (fun gl -> error "BOUND 2") else tclORELSE0 (dbg_assumption d) (tclORELSE0 (intro_register d (search d n) local_db) (fun gl -> let d' = incr_dbg d in tclFIRST (List.map (fun ntac -> tclTHEN ntac (search d' (n-1) local_db)) (possible_resolve d mod_delta db_list local_db (pf_concl gl))) gl)) in search d n local_db let default_search_depth = ref 5 let delta_auto ?(debug=Off) mod_delta n lems dbnames gl = let db_list = make_db_list dbnames in let d = mk_auto_dbg debug in tclTRY_dbg d (search d n mod_delta db_list (make_local_hint_db false lems gl)) gl let auto ?(debug=Off) n = delta_auto ~debug false n let new_auto ?(debug=Off) n = delta_auto ~debug true n let default_auto = auto !default_search_depth [] [] let delta_full_auto ?(debug=Off) mod_delta n lems gl = let dbnames = Hintdbmap.dom !searchtable in let dbnames = list_remove "v62" dbnames in let db_list = List.map (fun x -> searchtable_map x) dbnames in let d = mk_auto_dbg debug in tclTRY_dbg d (search d n mod_delta db_list (make_local_hint_db false lems gl)) gl let full_auto ?(debug=Off) n = delta_full_auto ~debug false n let new_full_auto ?(debug=Off) n = delta_full_auto ~debug true n let default_full_auto gl = full_auto !default_search_depth [] gl let gen_auto ?(debug=Off) n lems dbnames = let n = match n with None -> !default_search_depth | Some n -> n in match dbnames with | None -> full_auto ~debug n lems | Some l -> auto ~debug n lems l let inj_or_var = Option.map (fun n -> ArgArg n) let h_auto ?(debug=Off) n lems l = Refiner.abstract_tactic (TacAuto (debug,inj_or_var n,List.map snd lems,l)) (gen_auto ~debug n lems l) coq-8.4pl2/tactics/contradiction.mli0000640000175000001440000000131212010532755016621 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic val contradiction : constr with_bindings option -> tactic coq-8.4pl2/tactics/autorewrite.ml0000640000175000001440000002436112121620060016161 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* errorlabstrm "AutoRewrite" (str ("Rewriting base "^(bas)^" does not exist.")) let find_rewrites bas = List.rev_map snd (HintDN.find_all (find_base bas)) let find_matches bas pat = let base = find_base bas in let res = HintDN.search_pattern base pat in List.map (fun ((_,rew), esubst, subst) -> rew) res let print_rewrite_hintdb bas = ppnl (str "Database " ++ str bas ++ (Pp.cut ()) ++ prlist_with_sep Pp.cut (fun h -> str (if h.rew_l2r then "rewrite -> " else "rewrite <- ") ++ Printer.pr_lconstr h.rew_lemma ++ str " of type " ++ Printer.pr_lconstr h.rew_type ++ str " then use tactic " ++ Pptactic.pr_glob_tactic (Global.env()) h.rew_tac) (find_rewrites bas)) type raw_rew_rule = loc * constr * bool * raw_tactic_expr (* Applies all the rules of one base *) let one_base general_rewrite_maybe_in tac_main bas = let lrul = find_rewrites bas in let lrul = List.map (fun h -> (h.rew_lemma,h.rew_l2r,Tacinterp.eval_tactic h.rew_tac)) lrul in tclREPEAT_MAIN (tclPROGRESS (List.fold_left (fun tac (csr,dir,tc) -> tclTHEN tac (tclREPEAT_MAIN (tclTHENFIRST (general_rewrite_maybe_in dir csr tc) tac_main))) tclIDTAC lrul)) (* The AutoRewrite tactic *) let autorewrite ?(conds=Naive) tac_main lbas = tclREPEAT_MAIN (tclPROGRESS (List.fold_left (fun tac bas -> tclTHEN tac (one_base (fun dir c tac -> let tac = tac, conds in general_rewrite dir all_occurrences true false ~tac c) tac_main bas)) tclIDTAC lbas)) let autorewrite_multi_in ?(conds=Naive) idl tac_main lbas : tactic = fun gl -> (* let's check at once if id exists (to raise the appropriate error) *) let _ = List.map (Tacmach.pf_get_hyp gl) idl in let general_rewrite_in id = let id = ref id in let to_be_cleared = ref false in fun dir cstr tac gl -> let last_hyp_id = match Tacmach.pf_hyps gl with (last_hyp_id,_,_)::_ -> last_hyp_id | _ -> (* even the hypothesis id is missing *) error ("No such hypothesis: " ^ (string_of_id !id) ^".") in let gl' = general_rewrite_in dir all_occurrences true ~tac:(tac, conds) false !id cstr false gl in let gls = gl'.Evd.it in match gls with g::_ -> (match Environ.named_context_of_val (Goal.V82.hyps gl'.Evd.sigma g) with (lastid,_,_)::_ -> if last_hyp_id <> lastid then begin let gl'' = if !to_be_cleared then tclTHEN (fun _ -> gl') (tclTRY (clear [!id])) gl else gl' in id := lastid ; to_be_cleared := true ; gl'' end else begin to_be_cleared := false ; gl' end | _ -> assert false) (* there must be at least an hypothesis *) | _ -> assert false (* rewriting cannot complete a proof *) in tclMAP (fun id -> tclREPEAT_MAIN (tclPROGRESS (List.fold_left (fun tac bas -> tclTHEN tac (one_base (general_rewrite_in id) tac_main bas)) tclIDTAC lbas))) idl gl let autorewrite_in ?(conds=Naive) id = autorewrite_multi_in ~conds [id] let gen_auto_multi_rewrite conds tac_main lbas cl = let try_do_hyps treat_id l = autorewrite_multi_in ~conds (List.map treat_id l) tac_main lbas in if cl.concl_occs <> all_occurrences_expr & cl.concl_occs <> no_occurrences_expr then error "The \"at\" syntax isn't available yet for the autorewrite tactic." else let compose_tac t1 t2 = match cl.onhyps with | Some [] -> t1 | _ -> tclTHENFIRST t1 t2 in compose_tac (if cl.concl_occs <> no_occurrences_expr then autorewrite ~conds tac_main lbas else tclIDTAC) (match cl.onhyps with | Some l -> try_do_hyps (fun ((_,id),_) -> id) l | None -> fun gl -> (* try to rewrite in all hypothesis (except maybe the rewritten one) *) let ids = Tacmach.pf_ids_of_hyps gl in try_do_hyps (fun id -> id) ids gl) let auto_multi_rewrite ?(conds=Naive) = gen_auto_multi_rewrite conds Refiner.tclIDTAC let auto_multi_rewrite_with ?(conds=Naive) tac_main lbas cl gl = let onconcl = cl.Tacexpr.concl_occs <> no_occurrences_expr in match onconcl,cl.Tacexpr.onhyps with | false,Some [_] | true,Some [] | false,Some [] -> (* autorewrite with .... in clause using tac n'est sur que si clause represente soit le but soit UNE hypothese *) gen_auto_multi_rewrite conds tac_main lbas cl gl | _ -> Util.errorlabstrm "autorewrite" (strbrk "autorewrite .. in .. using can only be used either with a unique hypothesis or on the conclusion.") (* Functions necessary to the library object declaration *) let cache_hintrewrite (_,(rbase,lrl)) = let base = try find_base rbase with e when Errors.noncritical e -> HintDN.empty in let max = try fst (Util.list_last (HintDN.find_all base)) with e when Errors.noncritical e -> 0 in let lrl = HintDN.map (fun (i,h) -> (i + max, h)) lrl in rewtab:=Stringmap.add rbase (HintDN.union lrl base) !rewtab let subst_hintrewrite (subst,(rbase,list as node)) = let list' = HintDN.subst subst list in if list' == list then node else (rbase,list') let classify_hintrewrite x = Libobject.Substitute x (* Declaration of the Hint Rewrite library object *) let inHintRewrite : string * HintDN.t -> Libobject.obj = Libobject.declare_object {(Libobject.default_object "HINT_REWRITE") with Libobject.cache_function = cache_hintrewrite; Libobject.load_function = (fun _ -> cache_hintrewrite); Libobject.subst_function = subst_hintrewrite; Libobject.classify_function = classify_hintrewrite } open Clenv type hypinfo = { hyp_cl : clausenv; hyp_prf : constr; hyp_ty : types; hyp_car : constr; hyp_rel : constr; hyp_l2r : bool; hyp_left : constr; hyp_right : constr; } let evd_convertible env evd x y = try ignore(Unification.w_unify ~flags:Unification.elim_flags env evd Reduction.CONV x y); true (* try ignore(Evarconv.the_conv_x env x y evd); true *) with e when Errors.noncritical e -> false let decompose_applied_relation metas env sigma c ctype left2right = let find_rel ty = let eqclause = Clenv.mk_clenv_from_env env sigma None (c,ty) in let eqclause = if metas then eqclause else clenv_pose_metas_as_evars eqclause (Evd.undefined_metas eqclause.evd) in let (equiv, args) = decompose_app (Clenv.clenv_type eqclause) in let rec split_last_two = function | [c1;c2] -> [],(c1, c2) | x::y::z -> let l,res = split_last_two (y::z) in x::l, res | _ -> raise Not_found in try let others,(c1,c2) = split_last_two args in let ty1, ty2 = Typing.type_of env eqclause.evd c1, Typing.type_of env eqclause.evd c2 in (* if not (evd_convertible env eqclause.evd ty1 ty2) then None *) (* else *) Some { hyp_cl=eqclause; hyp_prf=(Clenv.clenv_value eqclause); hyp_ty = ty; hyp_car=ty1; hyp_rel=mkApp (equiv, Array.of_list others); hyp_l2r=left2right; hyp_left=c1; hyp_right=c2; } with Not_found -> None in match find_rel ctype with | Some c -> Some c | None -> let ctx,t' = Reductionops.splay_prod_assum env sigma ctype in (* Search for underlying eq *) match find_rel (it_mkProd_or_LetIn t' ctx) with | Some c -> Some c | None -> None let find_applied_relation metas loc env sigma c left2right = let ctype = Typing.type_of env sigma c in match decompose_applied_relation metas env sigma c ctype left2right with | Some c -> c | None -> user_err_loc (loc, "decompose_applied_relation", str"The type" ++ spc () ++ Printer.pr_constr_env env ctype ++ spc () ++ str"of this term does not end with an applied relation.") (* To add rewriting rules to a base *) let add_rew_rules base lrul = let counter = ref 0 in let lrul = List.fold_left (fun dn (loc,c,b,t) -> let info = find_applied_relation false loc (Global.env ()) Evd.empty c b in let pat = if b then info.hyp_left else info.hyp_right in let rul = { rew_lemma = c; rew_type = info.hyp_ty; rew_pat = pat; rew_l2r = b; rew_tac = Tacinterp.glob_tactic t} in incr counter; HintDN.add pat (!counter, rul) dn) HintDN.empty lrul in Lib.add_anonymous_leaf (inHintRewrite (base,lrul)) coq-8.4pl2/tactics/eqschemes.ml0000640000175000001440000011175512010532755015602 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* hid | InType -> xid let fresh env id = next_global_ident_away id [] let build_dependent_inductive ind (mib,mip) = let realargs,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in applist (mkInd ind, extended_rel_list mip.mind_nrealargs_ctxt mib.mind_params_ctxt @ extended_rel_list 0 realargs) let my_it_mkLambda_or_LetIn s c = it_mkLambda_or_LetIn c s let my_it_mkProd_or_LetIn s c = it_mkProd_or_LetIn c s let my_it_mkLambda_or_LetIn_name s c = it_mkLambda_or_LetIn_name (Global.env()) c s let get_coq_eq () = try let eq = Libnames.destIndRef Coqlib.glob_eq in let _ = Global.lookup_inductive eq in (* Do not force the lazy if they are not defined *) mkInd eq, Coqlib.build_coq_eq_refl () with Not_found -> error "eq not found." (**********************************************************************) (* Check if an inductive type [ind] has the form *) (* *) (* I q1..qm,p1..pn a1..an with one constructor *) (* C : I q1..qm,p1..pn p1..pn *) (* *) (* in which case, a symmetry lemma is definable *) (**********************************************************************) let get_sym_eq_data env ind = let (mib,mip as specif) = lookup_mind_specif env ind in if Array.length mib.mind_packets <> 1 or Array.length mip.mind_nf_lc <> 1 then error "Not an inductive type with a single constructor."; let realsign,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in if List.exists (fun (_,b,_) -> b <> None) realsign then error "Inductive equalities with local definitions in arity not supported."; let constrsign,ccl = decompose_prod_assum mip.mind_nf_lc.(0) in let _,constrargs = decompose_app ccl in if rel_context_length constrsign<>rel_context_length mib.mind_params_ctxt then error "Constructor must have no arguments"; (* This can be relaxed... *) let params,constrargs = list_chop mib.mind_nparams constrargs in if mip.mind_nrealargs > mib.mind_nparams then error "Constructors arguments must repeat the parameters."; let _,params2 = list_chop (mib.mind_nparams-mip.mind_nrealargs) params in let paramsctxt1,_ = list_chop (mib.mind_nparams-mip.mind_nrealargs) mib.mind_params_ctxt in if not (list_equal eq_constr params2 constrargs) then error "Constructors arguments must repeat the parameters."; (* nrealargs_ctxt and nrealargs are the same here *) (specif,mip.mind_nrealargs,realsign,mib.mind_params_ctxt,paramsctxt1) (**********************************************************************) (* Check if an inductive type [ind] has the form *) (* *) (* I q1..qm a1..an with one constructor *) (* C : I q1..qm b1..bn *) (* *) (* in which case it expresses the equalities ai=bi, but not in a way *) (* such that symmetry is a priori definable *) (**********************************************************************) let get_non_sym_eq_data env ind = let (mib,mip as specif) = lookup_mind_specif env ind in if Array.length mib.mind_packets <> 1 or Array.length mip.mind_nf_lc <> 1 then error "Not an inductive type with a single constructor."; let realsign,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in if List.exists (fun (_,b,_) -> b <> None) realsign then error "Inductive equalities with local definitions in arity not supported"; let constrsign,ccl = decompose_prod_assum mip.mind_nf_lc.(0) in let _,constrargs = decompose_app ccl in if rel_context_length constrsign<>rel_context_length mib.mind_params_ctxt then error "Constructor must have no arguments"; let _,constrargs = list_chop mib.mind_nparams constrargs in (specif,constrargs,realsign,mip.mind_nrealargs) (**********************************************************************) (* Build the symmetry lemma associated to an inductive type *) (* I q1..qm,p1..pn a1..an with one constructor *) (* C : I q1..qm,p1..pn p1..pn *) (* *) (* sym := fun q1..qn p1..pn a1..an (H:I q1..qm p1..pn a1..an) => *) (* match H in I _.._ a1..an return I q1..qm a1..an p1..pn with *) (* C => C *) (* end *) (* : forall q1..qm p1..pn a1..an I q1..qm p1..pn a1..an -> *) (* I q1..qm a1..an p1..pn *) (* *) (**********************************************************************) let build_sym_scheme env ind = let (mib,mip as specif),nrealargs,realsign,paramsctxt,paramsctxt1 = get_sym_eq_data env ind in let cstr n = mkApp (mkConstruct(ind,1),extended_rel_vect n mib.mind_params_ctxt) in let varH = fresh env (default_id_of_sort (snd (mind_arity mip))) in let applied_ind = build_dependent_inductive ind specif in let realsign_ind = name_context env ((Name varH,None,applied_ind)::realsign) in let ci = make_case_info (Global.env()) ind RegularStyle in (my_it_mkLambda_or_LetIn mib.mind_params_ctxt (my_it_mkLambda_or_LetIn_name realsign_ind (mkCase (ci, my_it_mkLambda_or_LetIn_name (lift_rel_context (nrealargs+1) realsign_ind) (mkApp (mkInd ind,Array.concat [extended_rel_vect (3*nrealargs+2) paramsctxt1; rel_vect 1 nrealargs; rel_vect (2*nrealargs+2) nrealargs])), mkRel 1 (* varH *), [|cstr (nrealargs+1)|])))) let sym_scheme_kind = declare_individual_scheme_object "_sym_internal" (fun ind -> build_sym_scheme (Global.env() (* side-effect! *)) ind) (**********************************************************************) (* Build the involutivity of symmetry for an inductive type *) (* I q1..qm,p1..pn a1..an with one constructor *) (* C : I q1..qm,p1..pn p1..pn *) (* *) (* inv := fun q1..qn p1..pn a1..an (H:I q1..qm p1..pn a1..an) => *) (* match H in I _.._ a1..an return *) (* sym q1..qm p1..pn a1..an (sym q1..qm a1..an p1..pn H) = H *) (* with *) (* C => refl_equal C *) (* end *) (* : forall q1..qm p1..pn a1..an (H:I q1..qm a1..an p1..pn), *) (* sym q1..qm p1..pn a1..an (sym q1..qm a1..an p1..pn H) = H *) (* *) (**********************************************************************) let build_sym_involutive_scheme env ind = let (mib,mip as specif),nrealargs,realsign,paramsctxt,paramsctxt1 = get_sym_eq_data env ind in let sym = mkConst (find_scheme sym_scheme_kind ind) in let (eq,eqrefl) = get_coq_eq () in let cstr n = mkApp (mkConstruct(ind,1),extended_rel_vect n paramsctxt) in let varH = fresh env (default_id_of_sort (snd (mind_arity mip))) in let applied_ind = build_dependent_inductive ind specif in let applied_ind_C = mkApp (mkInd ind, Array.append (extended_rel_vect (nrealargs+1) mib.mind_params_ctxt) (rel_vect (nrealargs+1) nrealargs)) in let realsign_ind = name_context env ((Name varH,None,applied_ind)::realsign) in let ci = make_case_info (Global.env()) ind RegularStyle in (my_it_mkLambda_or_LetIn paramsctxt (my_it_mkLambda_or_LetIn_name realsign_ind (mkCase (ci, my_it_mkLambda_or_LetIn_name (lift_rel_context (nrealargs+1) realsign_ind) (mkApp (eq,[| mkApp (mkInd ind, Array.concat [extended_rel_vect (3*nrealargs+2) paramsctxt1; rel_vect (2*nrealargs+2) nrealargs; rel_vect 1 nrealargs]); mkApp (sym,Array.concat [extended_rel_vect (3*nrealargs+2) paramsctxt1; rel_vect 1 nrealargs; rel_vect (2*nrealargs+2) nrealargs; [|mkApp (sym,Array.concat [extended_rel_vect (3*nrealargs+2) paramsctxt1; rel_vect (2*nrealargs+2) nrealargs; rel_vect 1 nrealargs; [|mkRel 1|]])|]]); mkRel 1|])), mkRel 1 (* varH *), [|mkApp(eqrefl,[|applied_ind_C;cstr (nrealargs+1)|])|])))) let sym_involutive_scheme_kind = declare_individual_scheme_object "_sym_involutive" (fun ind -> build_sym_involutive_scheme (Global.env() (* side-effect! *)) ind) (**********************************************************************) (* Build the left-to-right rewriting lemma for conclusion associated *) (* to an inductive type I q1..qm,p1..pn a1..an with one constructor *) (* C : I q1..qm,p1..pn p1..pn *) (* (symmetric equality in non-dependent and dependent cases) *) (* *) (* We could have defined the scheme in one match over a generalized *) (* type but this behaves badly wrt the guard condition, so we use *) (* symmetry instead; with commutative-cuts-aware guard condition a *) (* proof in the style of l2r_forward is also possible (see below) *) (* *) (* rew := fun q1..qm p1..pn a1..an *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an -> kind) *) (* (HC:P a1..an C) *) (* (H:I q1..qm p1..pn a1..an) => *) (* match sym_involutive q1..qm p1..pn a1..an H as Heq *) (* in _ = H return P p1..pn H *) (* with *) (* refl => *) (* match sym q1..qm p1..pn a1..an H as H *) (* in I _.._ p1..pn *) (* return P p1..pn (sym q1..qm a1..an p1..pn H) *) (* with *) (* C => HC *) (* end *) (* end *) (* : forall q1..qn p1..pn a1..an *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an -> kind), *) (* P a1..an C -> *) (* forall (H:I q1..qm p1..pn a1..an), P p1..pn H *) (* *) (* where A1..An are the common types of p1..pn and a1..an *) (* *) (* Note: the symmetry is needed in the dependent case since the *) (* dependency is on the inner arguments (the indices in C) and these *) (* inner arguments need to be visible as parameters to be able to *) (* abstract over them in P. *) (**********************************************************************) (**********************************************************************) (* For information, the alternative proof of dependent l2r_rew scheme *) (* that would use commutative cuts is the following *) (* *) (* rew := fun q1..qm p1..pn a1..an *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an -> kind) *) (* (HC:P a1..an C) *) (* (H:I q1..qm p1..pn a1..an) => *) (* match H in I .._.. a1..an return *) (* forall p1..pn, I q1..qm p1..pn a1..an -> kind), *) (* P a1..an C -> P p1..pn H *) (* with *) (* C => fun P HC => HC *) (* end P HC *) (* : forall q1..qn p1..pn a1..an *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an -> kind), *) (* P a1..an C -> *) (* forall (H:I q1..qm p1..pn a1..an), P p1..pn H *) (* *) (**********************************************************************) let build_l2r_rew_scheme dep env ind kind = let (mib,mip as specif),nrealargs,realsign,paramsctxt,paramsctxt1 = get_sym_eq_data env ind in let sym = mkConst (find_scheme sym_scheme_kind ind) in let sym_involutive = mkConst (find_scheme sym_involutive_scheme_kind ind) in let (eq,eqrefl) = get_coq_eq () in let cstr n p = mkApp (mkConstruct(ind,1), Array.concat [extended_rel_vect n paramsctxt1; rel_vect p nrealargs]) in let varH = fresh env (default_id_of_sort (snd (mind_arity mip))) in let varHC = fresh env (id_of_string "HC") in let varP = fresh env (id_of_string "P") in let applied_ind = build_dependent_inductive ind specif in let applied_ind_P = mkApp (mkInd ind, Array.concat [extended_rel_vect (3*nrealargs) paramsctxt1; rel_vect 0 nrealargs; rel_vect nrealargs nrealargs]) in let applied_ind_G = mkApp (mkInd ind, Array.concat [extended_rel_vect (3*nrealargs+3) paramsctxt1; rel_vect (nrealargs+3) nrealargs; rel_vect 0 nrealargs]) in let realsign_P = lift_rel_context nrealargs realsign in let realsign_ind_P = name_context env ((Name varH,None,applied_ind_P)::realsign_P) in let realsign_ind_G = name_context env ((Name varH,None,applied_ind_G):: lift_rel_context (nrealargs+3) realsign) in let applied_sym_C n = mkApp(sym, Array.append (extended_rel_vect n mip.mind_arity_ctxt) [|mkVar varH|]) in let applied_sym_G = mkApp(sym, Array.concat [extended_rel_vect (nrealargs*3+4) paramsctxt1; rel_vect (nrealargs+4) nrealargs; rel_vect 1 nrealargs; [|mkRel 1|]]) in let s = mkSort (new_sort_in_family kind) in let ci = make_case_info (Global.env()) ind RegularStyle in let cieq = make_case_info (Global.env()) (destInd eq) RegularStyle in let applied_PC = mkApp (mkVar varP,Array.append (extended_rel_vect 1 realsign) (if dep then [|cstr (2*nrealargs+1) 1|] else [||])) in let applied_PG = mkApp (mkVar varP,Array.append (rel_vect 1 nrealargs) (if dep then [|applied_sym_G|] else [||])) in let applied_PR = mkApp (mkVar varP,Array.append (rel_vect (nrealargs+5) nrealargs) (if dep then [|mkRel 2|] else [||])) in let applied_sym_sym = mkApp (sym,Array.concat [extended_rel_vect (2*nrealargs+4) paramsctxt1; rel_vect 4 nrealargs; rel_vect (nrealargs+4) nrealargs; [|mkApp (sym,Array.concat [extended_rel_vect (2*nrealargs+4) paramsctxt1; rel_vect (nrealargs+4) nrealargs; rel_vect 4 nrealargs; [|mkRel 2|]])|]]) in let main_body = mkCase (ci, my_it_mkLambda_or_LetIn_name realsign_ind_G applied_PG, applied_sym_C 3, [|mkVar varHC|]) in (my_it_mkLambda_or_LetIn mib.mind_params_ctxt (my_it_mkLambda_or_LetIn_name realsign (mkNamedLambda varP (my_it_mkProd_or_LetIn (if dep then realsign_ind_P else realsign_P) s) (mkNamedLambda varHC applied_PC (mkNamedLambda varH (lift 2 applied_ind) (if dep then (* we need a coercion *) mkCase (cieq, mkLambda (Name varH,lift 3 applied_ind, mkLambda (Anonymous, mkApp (eq,[|lift 4 applied_ind;applied_sym_sym;mkRel 1|]), applied_PR)), mkApp (sym_involutive, Array.append (extended_rel_vect 3 mip.mind_arity_ctxt) [|mkVar varH|]), [|main_body|]) else main_body)))))) (**********************************************************************) (* Build the left-to-right rewriting lemma for hypotheses associated *) (* to an inductive type I q1..qm,p1..pn a1..an with one constructor *) (* C : I q1..qm,p1..pn p1..pn *) (* (symmetric equality in non dependent and dependent cases) *) (* *) (* rew := fun q1..qm p1..pn a1..an (H:I q1..qm p1..pn a1..an) *) (* match H in I _.._ a1..an *) (* return forall *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an -> kind) *) (* (HC:P p1..pn H) => *) (* P a1..an C *) (* with *) (* C => fun P HC => HC *) (* end *) (* : forall q1..qm p1..pn a1..an *) (* (H:I q1..qm p1..pn a1..an) *) (* (P:forall p1..pn, I q1..qm p1..pn a1..an ->kind), *) (* P p1..pn H -> P a1..an C *) (* *) (* Note: the symmetry is needed in the dependent case since the *) (* dependency is on the inner arguments (the indices in C) and these *) (* inner arguments need to be visible as parameters to be able to *) (* abstract over them in P. *) (**********************************************************************) let build_l2r_forward_rew_scheme dep env ind kind = let (mib,mip as specif),nrealargs,realsign,paramsctxt,paramsctxt1 = get_sym_eq_data env ind in let cstr n p = mkApp (mkConstruct(ind,1), Array.concat [extended_rel_vect n paramsctxt1; rel_vect p nrealargs]) in let varH = fresh env (default_id_of_sort (snd (mind_arity mip))) in let varHC = fresh env (id_of_string "HC") in let varP = fresh env (id_of_string "P") in let applied_ind = build_dependent_inductive ind specif in let applied_ind_P = mkApp (mkInd ind, Array.concat [extended_rel_vect (4*nrealargs+2) paramsctxt1; rel_vect 0 nrealargs; rel_vect (nrealargs+1) nrealargs]) in let applied_ind_P' = mkApp (mkInd ind, Array.concat [extended_rel_vect (3*nrealargs+1) paramsctxt1; rel_vect 0 nrealargs; rel_vect (2*nrealargs+1) nrealargs]) in let realsign_P n = lift_rel_context (nrealargs*n+n) realsign in let realsign_ind = name_context env ((Name varH,None,applied_ind)::realsign) in let realsign_ind_P n aP = name_context env ((Name varH,None,aP)::realsign_P n) in let s = mkSort (new_sort_in_family kind) in let ci = make_case_info (Global.env()) ind RegularStyle in let applied_PC = mkApp (mkVar varP,Array.append (rel_vect (nrealargs*2+3) nrealargs) (if dep then [|mkRel 2|] else [||])) in let applied_PC' = mkApp (mkVar varP,Array.append (rel_vect (nrealargs+2) nrealargs) (if dep then [|cstr (2*nrealargs+2) (nrealargs+2)|] else [||])) in let applied_PG = mkApp (mkVar varP,Array.append (rel_vect 3 nrealargs) (if dep then [|cstr (3*nrealargs+4) 3|] else [||])) in (my_it_mkLambda_or_LetIn mib.mind_params_ctxt (my_it_mkLambda_or_LetIn_name realsign (mkNamedLambda varH applied_ind (mkCase (ci, my_it_mkLambda_or_LetIn_name (lift_rel_context (nrealargs+1) realsign_ind) (mkNamedProd varP (my_it_mkProd_or_LetIn (if dep then realsign_ind_P 2 applied_ind_P else realsign_P 2) s) (mkNamedProd varHC applied_PC applied_PG)), (mkVar varH), [|mkNamedLambda varP (my_it_mkProd_or_LetIn (if dep then realsign_ind_P 1 applied_ind_P' else realsign_P 2) s) (mkNamedLambda varHC applied_PC' (mkVar varHC))|]))))) (**********************************************************************) (* Build the right-to-left rewriting lemma for hypotheses associated *) (* to an inductive type I q1..qm a1..an with one constructor *) (* C : I q1..qm b1..bn *) (* (arbitrary equality in non-dependent and dependent cases) *) (* *) (* rew := fun q1..qm a1..an (H:I q1..qm a1..an) *) (* (P:forall a1..an, I q1..qm a1..an -> kind) *) (* (HC:P a1..an H) => *) (* match H in I _.._ a1..an return P a1..an H -> P b1..bn C *) (* with *) (* C => fun x => x *) (* end HC *) (* : forall q1..pm a1..an (H:I q1..qm a1..an) *) (* (P:forall a1..an, I q1..qm a1..an -> kind), *) (* P a1..an H -> P b1..bn C *) (* *) (* Note that the dependent elimination here is not a dependency *) (* in the conclusion of the scheme but a dependency in the premise of *) (* the scheme. This is unfortunately incompatible with the standard *) (* pattern for schemes in Coq which expects that the eliminated *) (* object is the last premise of the scheme. We then have no choice *) (* than following the more liberal pattern of having the eliminated *) (* object coming before the premises. *) (* *) (* Note that in the non-dependent case, this scheme (up to the order *) (* of premises) generalizes the (backward) l2r scheme above: same *) (* statement but no need for symmetry of the equality. *) (**********************************************************************) let build_r2l_forward_rew_scheme dep env ind kind = let ((mib,mip as specif),constrargs,realsign,nrealargs) = get_non_sym_eq_data env ind in let cstr n = mkApp (mkConstruct(ind,1),extended_rel_vect n mib.mind_params_ctxt) in let constrargs_cstr = constrargs@[cstr 0] in let varH = fresh env (default_id_of_sort (snd (mind_arity mip))) in let varHC = fresh env (id_of_string "HC") in let varP = fresh env (id_of_string "P") in let applied_ind = build_dependent_inductive ind specif in let realsign_ind = name_context env ((Name varH,None,applied_ind)::realsign) in let s = mkSort (new_sort_in_family kind) in let ci = make_case_info (Global.env()) ind RegularStyle in let applied_PC = applist (mkVar varP,if dep then constrargs_cstr else constrargs) in let applied_PG = mkApp (mkVar varP, if dep then extended_rel_vect 0 realsign_ind else extended_rel_vect 1 realsign) in (my_it_mkLambda_or_LetIn mib.mind_params_ctxt (my_it_mkLambda_or_LetIn_name realsign_ind (mkNamedLambda varP (my_it_mkProd_or_LetIn (lift_rel_context (nrealargs+1) (if dep then realsign_ind else realsign)) s) (mkNamedLambda varHC (lift 1 applied_PG) (mkApp (mkCase (ci, my_it_mkLambda_or_LetIn_name (lift_rel_context (nrealargs+3) realsign_ind) (mkArrow applied_PG (lift (2*nrealargs+5) applied_PC)), mkRel 3 (* varH *), [|mkLambda (Name varHC, lift (nrealargs+3) applied_PC, mkRel 1)|]), [|mkVar varHC|])))))) (**********************************************************************) (* This function "repairs" the non-dependent r2l forward rewriting *) (* scheme by making it comply with the standard pattern of schemes *) (* in Coq. Otherwise said, it turns a scheme of type *) (* *) (* forall q1..pm a1..an, I q1..qm a1..an -> *) (* forall (P: forall a1..an, kind), *) (* P a1..an -> P b1..bn *) (* *) (* into a scheme of type *) (* *) (* forall q1..pm (P:forall a1..an, kind), *) (* P a1..an -> forall a1..an, I q1..qm a1..an -> P b1..bn *) (* *) (**********************************************************************) let fix_r2l_forward_rew_scheme c = let t = Retyping.get_type_of (Global.env()) Evd.empty c in let ctx,_ = decompose_prod_assum t in match ctx with | hp :: p :: ind :: indargs -> my_it_mkLambda_or_LetIn indargs (mkLambda_or_LetIn (map_rel_declaration (liftn (-1) 1) p) (mkLambda_or_LetIn (map_rel_declaration (liftn (-1) 2) hp) (mkLambda_or_LetIn (map_rel_declaration (lift 2) ind) (Reductionops.whd_beta Evd.empty (applist (c, extended_rel_list 3 indargs @ [mkRel 1;mkRel 3;mkRel 2])))))) | _ -> anomaly "Ill-formed non-dependent left-to-right rewriting scheme" (**********************************************************************) (* Build the right-to-left rewriting lemma for conclusion associated *) (* to an inductive type I q1..qm a1..an with one constructor *) (* C : I q1..qm b1..bn *) (* (arbitrary equality in non-dependent and dependent case) *) (* *) (* This is actually the standard case analysis scheme *) (* *) (* rew := fun q1..qm a1..an *) (* (P:forall a1..an, I q1..qm a1..an -> kind) *) (* (H:I q1..qm a1..an) *) (* (HC:P b1..bn C) => *) (* match H in I _.._ a1..an return P a1..an H with *) (* C => HC *) (* end *) (* : forall q1..pm a1..an *) (* (P:forall a1..an, I q1..qm a1..an -> kind) *) (* (H:I q1..qm a1..an), *) (* P b1..bn C -> P a1..an H *) (**********************************************************************) let build_r2l_rew_scheme dep env ind k = build_case_analysis_scheme env Evd.empty ind dep k (**********************************************************************) (* Register the rewriting schemes *) (**********************************************************************) (**********************************************************************) (* Dependent rewrite from left-to-right in conclusion *) (* (symmetrical equality type only) *) (* Gamma |- P p1..pn H ==> Gamma |- P a1..an C *) (* with H:I p1..pn a1..an in Gamma *) (**********************************************************************) let rew_l2r_dep_scheme_kind = declare_individual_scheme_object "_rew_r_dep" (fun ind -> build_l2r_rew_scheme true (Global.env()) ind InType) (**********************************************************************) (* Dependent rewrite from right-to-left in conclusion *) (* Gamma |- P a1..an H ==> Gamma |- P b1..bn C *) (* with H:I a1..an in Gamma (non symmetric case) *) (* or H:I b1..bn a1..an in Gamma (symmetric case) *) (**********************************************************************) let rew_r2l_dep_scheme_kind = declare_individual_scheme_object "_rew_dep" (fun ind -> build_r2l_rew_scheme true (Global.env()) ind InType) (**********************************************************************) (* Dependent rewrite from right-to-left in hypotheses *) (* Gamma, P a1..an H |- D ==> Gamma, P b1..bn C |- D *) (* with H:I a1..an in Gamma (non symmetric case) *) (* or H:I b1..bn a1..an in Gamma (symmetric case) *) (**********************************************************************) let rew_r2l_forward_dep_scheme_kind = declare_individual_scheme_object "_rew_fwd_dep" (fun ind -> build_r2l_forward_rew_scheme true (Global.env()) ind InType) (**********************************************************************) (* Dependent rewrite from left-to-right in hypotheses *) (* (symmetrical equality type only) *) (* Gamma, P p1..pn H |- D ==> Gamma, P a1..an C |- D *) (* with H:I p1..pn a1..an in Gamma *) (**********************************************************************) let rew_l2r_forward_dep_scheme_kind = declare_individual_scheme_object "_rew_fwd_r_dep" (fun ind -> build_l2r_forward_rew_scheme true (Global.env()) ind InType) (**********************************************************************) (* Non-dependent rewrite from either left-to-right in conclusion or *) (* right-to-left in hypotheses: both l2r_rew and r2l_forward_rew are *) (* potential candidates. Since l2r_rew needs a symmetrical equality, *) (* we adopt r2l_forward_rew (this one introduces a blocked beta- *) (* expansion but since the guard condition supports commutative cuts *) (* this is not a problem; we need though a fix to adjust it to the *) (* standard form of schemes in Coq) *) (**********************************************************************) let rew_l2r_scheme_kind = declare_individual_scheme_object "_rew_r" (fun ind -> fix_r2l_forward_rew_scheme (build_r2l_forward_rew_scheme false (Global.env()) ind InType)) (**********************************************************************) (* Non-dependent rewrite from either right-to-left in conclusion or *) (* left-to-right in hypotheses: both r2l_rew and l2r_forward_rew but *) (* since r2l_rew works in the non-symmetric case as well as without *) (* introducing commutative cuts, we adopt it *) (**********************************************************************) let rew_r2l_scheme_kind = declare_individual_scheme_object "_rew" (fun ind -> build_r2l_rew_scheme false (Global.env()) ind InType) (* End of rewriting schemes *) (**********************************************************************) (* Build the congruence lemma associated to an inductive type *) (* I p1..pn a with one constructor C : I q1..qn b *) (* *) (* congr := fun p1..pn (B:Type) (f:A->B) a (H:I p1..pn a) => *) (* match H in I _.._ a' return f b = f a' with *) (* C => eq_refl (f b) *) (* end *) (* : forall p1..pn (B:Type) (f:A->B) a, I p1..pn a -> f b = f a *) (* *) (* where A is the common type of a and b *) (**********************************************************************) (* TODO: extend it to types with more than one index *) let build_congr env (eq,refl) ind = let (mib,mip) = lookup_mind_specif env ind in if Array.length mib.mind_packets <> 1 or Array.length mip.mind_nf_lc <> 1 then error "Not an inductive type with a single constructor."; if mip.mind_nrealargs <> 1 then error "Expect an inductive type with one predicate parameter."; let i = 1 in let realsign,_ = list_chop mip.mind_nrealargs_ctxt mip.mind_arity_ctxt in if List.exists (fun (_,b,_) -> b <> None) realsign then error "Inductive equalities with local definitions in arity not supported."; let env_with_arity = push_rel_context mip.mind_arity_ctxt env in let (_,_,ty) = lookup_rel (mip.mind_nrealargs - i + 1) env_with_arity in let constrsign,ccl = decompose_prod_assum mip.mind_nf_lc.(0) in let _,constrargs = decompose_app ccl in if rel_context_length constrsign<>rel_context_length mib.mind_params_ctxt then error "Constructor must have no arguments"; let b = List.nth constrargs (i + mib.mind_nparams - 1) in let varB = fresh env (id_of_string "B") in let varH = fresh env (id_of_string "H") in let varf = fresh env (id_of_string "f") in let ci = make_case_info (Global.env()) ind RegularStyle in my_it_mkLambda_or_LetIn mib.mind_params_ctxt (mkNamedLambda varB (new_Type ()) (mkNamedLambda varf (mkArrow (lift 1 ty) (mkVar varB)) (my_it_mkLambda_or_LetIn_name (lift_rel_context 2 realsign) (mkNamedLambda varH (applist (mkInd ind, extended_rel_list (mip.mind_nrealargs+2) mib.mind_params_ctxt @ extended_rel_list 0 realsign)) (mkCase (ci, my_it_mkLambda_or_LetIn_name (lift_rel_context (mip.mind_nrealargs+3) realsign) (mkLambda (Anonymous, applist (mkInd ind, extended_rel_list (2*mip.mind_nrealargs_ctxt+3) mib.mind_params_ctxt @ extended_rel_list 0 realsign), mkApp (eq, [|mkVar varB; mkApp (mkVar varf, [|lift (2*mip.mind_nrealargs_ctxt+4) b|]); mkApp (mkVar varf, [|mkRel (mip.mind_nrealargs - i + 2)|])|]))), mkVar varH, [|mkApp (refl, [|mkVar varB; mkApp (mkVar varf, [|lift (mip.mind_nrealargs+3) b|])|])|])))))) let congr_scheme_kind = declare_individual_scheme_object "_congr" (fun ind -> (* May fail if equality is not defined *) build_congr (Global.env()) (get_coq_eq ()) ind) coq-8.4pl2/tactics/tacticals.mli0000640000175000001440000002034112010532755015733 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic val tclORELSE0 : tactic -> tactic -> tactic val tclORELSE : tactic -> tactic -> tactic val tclTHEN : tactic -> tactic -> tactic val tclTHENSEQ : tactic list -> tactic val tclTHENLIST : tactic list -> tactic val tclTHEN_i : tactic -> (int -> tactic) -> tactic val tclTHENFIRST : tactic -> tactic -> tactic val tclTHENLAST : tactic -> tactic -> tactic val tclTHENS : tactic -> tactic list -> tactic val tclTHENSV : tactic -> tactic array -> tactic val tclTHENSLASTn : tactic -> tactic -> tactic array -> tactic val tclTHENLASTn : tactic -> tactic array -> tactic val tclTHENSFIRSTn : tactic -> tactic array -> tactic -> tactic val tclTHENFIRSTn : tactic -> tactic array -> tactic val tclREPEAT : tactic -> tactic val tclREPEAT_MAIN : tactic -> tactic val tclFIRST : tactic list -> tactic val tclSOLVE : tactic list -> tactic val tclTRY : tactic -> tactic val tclCOMPLETE : tactic -> tactic val tclAT_LEAST_ONCE : tactic -> tactic val tclFAIL : int -> std_ppcmds -> tactic val tclFAIL_lazy : int -> std_ppcmds Lazy.t -> tactic val tclDO : int -> tactic -> tactic val tclWEAK_PROGRESS : tactic -> tactic val tclPROGRESS : tactic -> tactic val tclNOTSAMEGOAL : tactic -> tactic val tclTHENTRY : tactic -> tactic -> tactic val tclMAP : ('a -> tactic) -> 'a list -> tactic val tclIFTHENELSE : tactic -> tactic -> tactic -> tactic val tclIFTHENSELSE : tactic -> tactic list -> tactic -> tactic val tclIFTHENSVELSE : tactic -> tactic array -> tactic -> tactic val tclIFTHENTRYELSEMUST : tactic -> tactic -> tactic val tclFIRST_PROGRESS_ON : ('a -> tactic) -> 'a list -> tactic (** {6 Tacticals applying to hypotheses } *) val onNthHypId : int -> (identifier -> tactic) -> tactic val onNthHyp : int -> (constr -> tactic) -> tactic val onNthDecl : int -> (named_declaration -> tactic) -> tactic val onLastHypId : (identifier -> tactic) -> tactic val onLastHyp : (constr -> tactic) -> tactic val onLastDecl : (named_declaration -> tactic) -> tactic val onNLastHypsId : int -> (identifier list -> tactic) -> tactic val onNLastHyps : int -> (constr list -> tactic) -> tactic val onNLastDecls : int -> (named_context -> tactic) -> tactic val lastHypId : goal sigma -> identifier val lastHyp : goal sigma -> constr val lastDecl : goal sigma -> named_declaration val nLastHypsId : int -> goal sigma -> identifier list val nLastHyps : int -> goal sigma -> constr list val nLastDecls : int -> goal sigma -> named_context val afterHyp : identifier -> goal sigma -> named_context val ifOnHyp : (identifier * types -> bool) -> (identifier -> tactic) -> (identifier -> tactic) -> identifier -> tactic val onHyps : (goal sigma -> named_context) -> (named_context -> tactic) -> tactic (** {6 Tacticals applying to goal components } *) (** A [simple_clause] is a set of hypotheses, possibly extended with the conclusion (conclusion is represented by None) *) type simple_clause = identifier option list (** A [clause] denotes occurrences and hypotheses in a goal; in particular, it can abstractly refer to the set of hypotheses independently of the effective contents of the current goal *) type clause = identifier gclause val simple_clause_of : clause -> goal sigma -> simple_clause val allHypsAndConcl : clause val allHyps : clause val onHyp : identifier -> clause val onConcl : clause val tryAllHyps : (identifier -> tactic) -> tactic val tryAllHypsAndConcl : (identifier option -> tactic) -> tactic val onAllHyps : (identifier -> tactic) -> tactic val onAllHypsAndConcl : (identifier option -> tactic) -> tactic val onClause : (identifier option -> tactic) -> clause -> tactic val onClauseLR : (identifier option -> tactic) -> clause -> tactic (** {6 An intermediate form of occurrence clause with no mention of occurrences } *) (** A [hyp_location] is an hypothesis together with a position, in body if any, in type or in both *) type hyp_location = identifier * hyp_location_flag (** A [goal_location] is either an hypothesis (together with a position, in body if any, in type or in both) or the goal *) type goal_location = hyp_location option (** {6 A concrete view of occurrence clauses } *) (** [clause_atom] refers either to an hypothesis location (i.e. an hypothesis with occurrences and a position, in body if any, in type or in both) or to some occurrences of the conclusion *) type clause_atom = | OnHyp of identifier * occurrences_expr * hyp_location_flag | OnConcl of occurrences_expr (** A [concrete_clause] is an effective collection of occurrences in the hypotheses and the conclusion *) type concrete_clause = clause_atom list (** This interprets an [clause] in a given [goal] context *) val concrete_clause_of : clause -> goal sigma -> concrete_clause (** {6 Elimination tacticals. } *) type branch_args = { ity : inductive; (** the type we were eliminating on *) largs : constr list; (** its arguments *) branchnum : int; (** the branch number *) pred : constr; (** the predicate we used *) nassums : int; (** the number of assumptions to be introduced *) branchsign : bool list; (** the signature of the branch. true=recursive argument, false=constant *) branchnames : intro_pattern_expr located list} type branch_assumptions = { ba : branch_args; (** the branch args *) assums : named_context} (** the list of assumptions introduced *) (** [check_disjunctive_pattern_size loc pats n] returns an appropriate error message if |pats| <> n *) val check_or_and_pattern_size : Util.loc -> or_and_intro_pattern_expr -> int -> unit (** Tolerate "[]" to mean a disjunctive pattern of any length *) val fix_empty_or_and_pattern : int -> or_and_intro_pattern_expr -> or_and_intro_pattern_expr (** Useful for [as intro_pattern] modifier *) val compute_induction_names : int -> intro_pattern_expr located option -> intro_pattern_expr located list array val elimination_sort_of_goal : goal sigma -> sorts_family val elimination_sort_of_hyp : identifier -> goal sigma -> sorts_family val elimination_sort_of_clause : identifier option -> goal sigma -> sorts_family val general_elim_then_using : (inductive -> goal sigma -> constr) -> rec_flag -> intro_pattern_expr located option -> (branch_args -> tactic) -> constr option -> (arg_bindings * arg_bindings) -> inductive -> clausenv -> tactic val elimination_then_using : (branch_args -> tactic) -> constr option -> (arg_bindings * arg_bindings) -> constr -> tactic val elimination_then : (branch_args -> tactic) -> (arg_bindings * arg_bindings) -> constr -> tactic val case_then_using : intro_pattern_expr located option -> (branch_args -> tactic) -> constr option -> (arg_bindings * arg_bindings) -> inductive -> clausenv -> tactic val case_nodep_then_using : intro_pattern_expr located option -> (branch_args -> tactic) -> constr option -> (arg_bindings * arg_bindings) -> inductive -> clausenv -> tactic val simple_elimination_then : (branch_args -> tactic) -> constr -> tactic val elim_on_ba : (branch_assumptions -> tactic) -> branch_args -> tactic val case_on_ba : (branch_assumptions -> tactic) -> branch_args -> tactic coq-8.4pl2/tactics/refine.mli0000640000175000001440000000110312010532755015227 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tactic coq-8.4pl2/tactics/eqschemes.mli0000640000175000001440000000343612010532755015747 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* env -> inductive -> sorts_family -> constr val build_l2r_rew_scheme : bool -> env -> inductive -> sorts_family -> constr val build_r2l_forward_rew_scheme : bool -> env -> inductive -> sorts_family -> constr val build_l2r_forward_rew_scheme : bool -> env -> inductive -> sorts_family -> constr (** Builds a symmetry scheme for a symmetrical equality type *) val build_sym_scheme : env -> inductive -> constr val sym_scheme_kind : individual scheme_kind val build_sym_involutive_scheme : env -> inductive -> constr val sym_involutive_scheme_kind : individual scheme_kind (** Builds a congruence scheme for an equality type *) val congr_scheme_kind : individual scheme_kind val build_congr : env -> constr * constr -> inductive -> constr coq-8.4pl2/tactics/eauto.mli0000640000175000001440000000251212010532755015101 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr -> tactic val gen_eauto : ?debug:Tacexpr.debug -> bool * int -> open_constr list -> hint_db_name list option -> tactic val eauto_with_bases : ?debug:Tacexpr.debug -> bool * int -> open_constr list -> Auto.hint_db list -> Proof_type.tactic val autounfold : hint_db_name list -> Tacticals.clause -> tactic coq-8.4pl2/tactics/elim.ml0000640000175000001440000001246312010532755014547 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if b then acc+2 else acc+1) 0 ba.branchsign in let introElimAssums = tclDO nassums intro in (tclTHEN introElimAssums (elim_on_ba tac ba)) let introCaseAssumsThen tac ba = let case_thin_sign = List.flatten (List.map (function b -> if b then [false;true] else [false]) ba.branchsign) in let n1 = List.length case_thin_sign in let n2 = List.length ba.branchnames in let (l1,l2),l3 = if n1 < n2 then list_chop n1 ba.branchnames, [] else (ba.branchnames, []), if n1 > n2 then snd (list_chop n2 case_thin_sign) else [] in let introCaseAssums = tclTHEN (intros_pattern no_move l1) (intros_clearing l3) in (tclTHEN introCaseAssums (case_on_ba (tac l2) ba)) (* The following tactic Decompose repeatedly applies the elimination(s) rule(s) of the types satisfying the predicate ``recognizer'' onto a certain hypothesis. For example : Require Elim. Require Le. Goal (y:nat){x:nat | (le O x)/\(le x y)}->{x:nat | (le O x)}. Intros y H. Decompose [sig and] H;EAuto. Qed. Another example : Goal (A,B,C:Prop)(A/\B/\C \/ B/\C \/ C/\A) -> C. Intros A B C H; Decompose [and or] H; Assumption. Qed. *) let elimHypThen tac id gl = elimination_then tac ([],[]) (mkVar id) gl let rec general_decompose_on_hyp recognizer = ifOnHyp recognizer (general_decompose_aux recognizer) (fun _ -> tclIDTAC) and general_decompose_aux recognizer id = elimHypThen (introElimAssumsThen (fun bas -> tclTHEN (clear [id]) (tclMAP (general_decompose_on_hyp recognizer) (ids_of_named_context bas.assums)))) id (* Faudrait ajouter un COMPLETE pour que l'hypothse cre ne reste pas si aucune limination n'est possible *) (* Meilleures stratgies mais perte de compatibilit *) let tmphyp_name = id_of_string "_TmpHyp" let up_to_delta = ref false (* true *) let general_decompose recognizer c gl = let typc = pf_type_of gl c in tclTHENSV (cut typc) [| tclTHEN (intro_using tmphyp_name) (onLastHypId (ifOnHyp recognizer (general_decompose_aux recognizer) (fun id -> clear [id]))); exact_no_check c |] gl let head_in gls indl t = try let ity,_ = if !up_to_delta then find_mrectype (pf_env gls) (project gls) t else extract_mrectype t in List.mem ity indl with Not_found -> false let decompose_these c l gls = let indl = (*List.map inductive_of*) l in general_decompose (fun (_,t) -> head_in gls indl t) c gls let decompose_nonrec c gls = general_decompose (fun (_,t) -> is_non_recursive_type t) c gls let decompose_and c gls = general_decompose (fun (_,t) -> is_record t) c gls let decompose_or c gls = general_decompose (fun (_,t) -> is_disjunction t) c gls let h_decompose l c = Refiner.abstract_tactic (TacDecompose (l,c)) (decompose_these c l) let h_decompose_or c = Refiner.abstract_tactic (TacDecomposeOr c) (decompose_or c) let h_decompose_and c = Refiner.abstract_tactic (TacDecomposeAnd c) (decompose_and c) (* The tactic Double performs a double induction *) let simple_elimination c gls = simple_elimination_then (fun _ -> tclIDTAC) c gls let induction_trailer abs_i abs_j bargs = tclTHEN (tclDO (abs_j - abs_i) intro) (onLastHypId (fun id gls -> let idty = pf_type_of gls (mkVar id) in let fvty = global_vars (pf_env gls) idty in let possible_bring_hyps = (List.tl (nLastDecls (abs_j - abs_i) gls)) @ bargs.assums in let (hyps,_) = List.fold_left (fun (bring_ids,leave_ids) (cid,_,cidty as d) -> if not (List.mem cid leave_ids) then (d::bring_ids,leave_ids) else (bring_ids,cid::leave_ids)) ([],fvty) possible_bring_hyps in let ids = List.rev (ids_of_named_context hyps) in (tclTHENSEQ [bring_hyps hyps; tclTRY (clear ids); simple_elimination (mkVar id)]) gls)) let double_ind h1 h2 gls = let abs_i = depth_of_quantified_hypothesis true h1 gls in let abs_j = depth_of_quantified_hypothesis true h2 gls in let (abs_i,abs_j) = if abs_i < abs_j then (abs_i,abs_j) else if abs_i > abs_j then (abs_j,abs_i) else error "Both hypotheses are the same." in (tclTHEN (tclDO abs_i intro) (onLastHypId (fun id -> elimination_then (introElimAssumsThen (induction_trailer abs_i abs_j)) ([],[]) (mkVar id)))) gls let h_double_induction h1 h2 = Refiner.abstract_tactic (TacDoubleInduction (h1,h2)) (double_ind h1 h2) coq-8.4pl2/tactics/eqdecide.ml40000640000175000001440000001402112010532755015440 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (clear [destVar c]))) let choose_eq eqonleft = if eqonleft then h_simplest_left else h_simplest_right let choose_noteq eqonleft = if eqonleft then h_simplest_right else h_simplest_left let mkBranches c1 c2 = tclTHENSEQ [generalize [c2]; h_simplest_elim c1; intros; onLastHyp h_simplest_case; clear_last; intros] let solveNoteqBranch side = tclTHEN (choose_noteq side) (tclTHEN introf (onLastHypId (fun id -> Extratactics.h_discrHyp id))) let h_solveNoteqBranch side = Refiner.abstract_extended_tactic "solveNoteqBranch" [] (solveNoteqBranch side) (* Constructs the type {c1=c2}+{~c1=c2} *) let mkDecideEqGoal eqonleft op rectype c1 c2 g = let equality = mkApp(build_coq_eq(), [|rectype; c1; c2|]) in let disequality = mkApp(build_coq_not (), [|equality|]) in if eqonleft then mkApp(op, [|equality; disequality |]) else mkApp(op, [|disequality; equality |]) (* Constructs the type (x1,x2:R){x1=x2}+{~x1=x2} *) let mkGenDecideEqGoal rectype g = let hypnames = pf_ids_of_hyps g in let xname = next_ident_away (id_of_string "x") hypnames and yname = next_ident_away (id_of_string "y") hypnames in (mkNamedProd xname rectype (mkNamedProd yname rectype (mkDecideEqGoal true (build_coq_sumbool ()) rectype (mkVar xname) (mkVar yname) g))) let eqCase tac = (tclTHEN intro (tclTHEN (onLastHyp Equality.rewriteLR) (tclTHEN clear_last tac))) let diseqCase eqonleft = let diseq = id_of_string "diseq" in let absurd = id_of_string "absurd" in (tclTHEN (intro_using diseq) (tclTHEN (choose_noteq eqonleft) (tclTHEN red_in_concl (tclTHEN (intro_using absurd) (tclTHEN (h_simplest_apply (mkVar diseq)) (tclTHEN (Extratactics.h_injHyp absurd) (full_trivial []))))))) let solveArg eqonleft op a1 a2 tac g = let rectype = pf_type_of g a1 in let decide = mkDecideEqGoal eqonleft op rectype a1 a2 g in let subtacs = if eqonleft then [eqCase tac;diseqCase eqonleft;default_auto] else [diseqCase eqonleft;eqCase tac;default_auto] in (tclTHENS (h_elim_type decide) subtacs) g let solveEqBranch rectype g = try let (eqonleft,op,lhs,rhs,_) = match_eqdec (pf_concl g) in let (mib,mip) = Global.lookup_inductive rectype in let nparams = mib.mind_nparams in let getargs l = list_skipn nparams (snd (decompose_app l)) in let rargs = getargs rhs and largs = getargs lhs in List.fold_right2 (solveArg eqonleft op) largs rargs (tclTHEN (choose_eq eqonleft) h_reflexivity) g with PatternMatchingFailure -> error "Unexpected conclusion!" (* The tactic Decide Equality *) let hd_app c = match kind_of_term c with | App (h,_) -> h | _ -> c let decideGralEquality g = try let eqonleft,_,c1,c2,typ = match_eqdec (pf_concl g) in let headtyp = hd_app (pf_compute g typ) in let rectype = match kind_of_term headtyp with | Ind mi -> mi | _ -> error"This decision procedure only works for inductive objects." in (tclTHEN (mkBranches c1 c2) (tclORELSE (h_solveNoteqBranch eqonleft) (solveEqBranch rectype))) g with PatternMatchingFailure -> error "The goal must be of the form {x<>y}+{x=y} or {x=y}+{x<>y}." let decideEqualityGoal = tclTHEN intros decideGralEquality let decideEquality rectype g = let decide = mkGenDecideEqGoal rectype g in (tclTHENS (cut decide) [default_auto;decideEqualityGoal]) g (* The tactic Compare *) let compare c1 c2 g = let rectype = pf_type_of g c1 in let decide = mkDecideEqGoal true (build_coq_sumbool ()) rectype c1 c2 g in (tclTHENS (cut decide) [(tclTHEN intro (tclTHEN (onLastHyp simplest_case) clear_last)); decideEquality (pf_type_of g c1)]) g (* User syntax *) TACTIC EXTEND decide_equality | [ "decide" "equality" ] -> [ decideEqualityGoal ] END TACTIC EXTEND compare | [ "compare" constr(c1) constr(c2) ] -> [ compare c1 c2 ] END coq-8.4pl2/tactics/tacinterp.ml0000640000175000001440000036076012121620060015606 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* msgnl (str "bug in the debugger: " ++ str "an exception is raised while printing debug information") let error_syntactic_metavariables_not_allowed loc = user_err_loc (loc,"out_ident", str "Syntactic metavariables allowed only in quotations.") let error_tactic_expected loc = user_err_loc (loc,"",str "Tactic expected.") let error_global_not_found_loc (loc,qid) = error_global_not_found_loc loc qid let skip_metaid = function | AI x -> x | MetaId (loc,_) -> error_syntactic_metavariables_not_allowed loc (* Values for interpretation *) type value = | VRTactic of (goal list sigma) (* For Match results *) (* Not a true value *) | VFun of ltac_trace * (identifier*value) list * identifier option list * glob_tactic_expr | VVoid | VInteger of int | VIntroPattern of intro_pattern_expr (* includes idents which are not *) (* bound as in "Intro H" but which may be bound *) (* later, as in "tac" in "Intro H; tac" *) | VConstr of constr_under_binders (* includes idents known to be bound and references *) | VConstr_context of constr | VList of value list | VRec of (identifier*value) list ref * glob_tactic_expr let dloc = dummy_loc let catch_error call_trace tac g = if call_trace = [] then tac g else try tac g with | LtacLocated _ as e -> raise e | Loc.Exc_located (_,LtacLocated _) as e -> raise e | e when Errors.noncritical e -> let (nrep,loc',c),tail = list_sep_last call_trace in let loc,e' = match e with Loc.Exc_located(loc,e) -> loc,e | _ ->dloc,e in if tail = [] then let loc = if loc = dloc then loc' else loc in raise (Loc.Exc_located(loc,e')) else raise (Loc.Exc_located(loc',LtacLocated((nrep,c,tail,loc),e'))) (* Signature for interpretation: val_interp and interpretation functions *) type interp_sign = { lfun : (identifier * value) list; avoid_ids : identifier list; (* ids inherited from the call context (needed to get fresh ids) *) debug : debug_info; trace : ltac_trace } let check_is_value = function | VRTactic _ -> (* These are goals produced by Match *) error "Immediate match producing tactics not allowed in local definitions." | _ -> () (* Gives the constr corresponding to a Constr_context tactic_arg *) let constr_of_VConstr_context = function | VConstr_context c -> c | _ -> errorlabstrm "constr_of_VConstr_context" (str "Not a context variable.") (* Displays a value *) let rec pr_value env = function | VVoid -> str "()" | VInteger n -> int n | VIntroPattern ipat -> pr_intro_pattern (dloc,ipat) | VConstr c -> (match env with Some env -> pr_lconstr_under_binders_env env c | _ -> str "a term") | VConstr_context c -> (match env with Some env -> pr_lconstr_env env c | _ -> str "a term") | (VRTactic _ | VFun _ | VRec _) -> str "a tactic" | VList [] -> str "an empty list" | VList (a::_) -> str "a list (first element is " ++ pr_value env a ++ str")" (* Transforms an id into a constr if possible, or fails with Not_found *) let constr_of_id env id = Term.mkVar (let _ = Environ.lookup_named id env in id) (* To embed tactics *) let ((tactic_in : (interp_sign -> glob_tactic_expr) -> Dyn.t), (tactic_out : Dyn.t -> (interp_sign -> glob_tactic_expr))) = Dyn.create "tactic" let ((value_in : value -> Dyn.t), (value_out : Dyn.t -> value)) = Dyn.create "value" let valueIn t = TacDynamic (dummy_loc,value_in t) let valueOut = function | TacDynamic (_,d) -> if (Dyn.tag d) = "value" then value_out d else anomalylabstrm "valueOut" (str "Dynamic tag should be value") | ast -> anomalylabstrm "valueOut" (str "Not a Dynamic ast: ") (* Table of "pervasives" macros tactics (e.g. auto, simpl, etc.) *) let atomic_mactab = ref Idmap.empty let add_primitive_tactic s tac = let id = id_of_string s in atomic_mactab := Idmap.add id tac !atomic_mactab let _ = let nocl = {onhyps=Some[];concl_occs=all_occurrences_expr} in List.iter (fun (s,t) -> add_primitive_tactic s (TacAtom(dloc,t))) [ "red", TacReduce(Red false,nocl); "hnf", TacReduce(Hnf,nocl); "simpl", TacReduce(Simpl None,nocl); "compute", TacReduce(Cbv all_flags,nocl); "intro", TacIntroMove(None,no_move); "intros", TacIntroPattern []; "assumption", TacAssumption; "cofix", TacCofix None; "trivial", TacTrivial (Off,[],None); "auto", TacAuto(Off,None,[],None); "left", TacLeft(false,NoBindings); "eleft", TacLeft(true,NoBindings); "right", TacRight(false,NoBindings); "eright", TacRight(true,NoBindings); "split", TacSplit(false,false,[NoBindings]); "esplit", TacSplit(true,false,[NoBindings]); "constructor", TacAnyConstructor (false,None); "econstructor", TacAnyConstructor (true,None); "reflexivity", TacReflexivity; "symmetry", TacSymmetry nocl ]; List.iter (fun (s,t) -> add_primitive_tactic s t) [ "idtac",TacId []; "fail", TacFail(ArgArg 0,[]); "fresh", TacArg(dloc,TacFreshId []) ] let lookup_atomic id = Idmap.find id !atomic_mactab let is_atomic_kn kn = let (_,_,l) = repr_kn kn in Idmap.mem (id_of_label l) !atomic_mactab (* Summary and Object declaration *) let mactab = ref Gmap.empty let lookup r = Gmap.find r !mactab let _ = let init () = mactab := Gmap.empty in let freeze () = !mactab in let unfreeze fs = mactab := fs in Summary.declare_summary "tactic-definition" { Summary.freeze_function = freeze; Summary.unfreeze_function = unfreeze; Summary.init_function = init } (* Tactics table (TacExtend). *) let tac_tab = Hashtbl.create 17 let add_tactic s t = if Hashtbl.mem tac_tab s then errorlabstrm ("Refiner.add_tactic: ") (str ("Cannot redeclare tactic "^s^".")); Hashtbl.add tac_tab s t let overwriting_add_tactic s t = if Hashtbl.mem tac_tab s then begin Hashtbl.remove tac_tab s; warning ("Overwriting definition of tactic "^s) end; Hashtbl.add tac_tab s t let lookup_tactic s = try Hashtbl.find tac_tab s with Not_found -> errorlabstrm "Refiner.lookup_tactic" (str"The tactic " ++ str s ++ str" is not installed.") (* let vernac_tactic (s,args) = let tacfun = lookup_tactic s args in abstract_extended_tactic s args tacfun *) (* Interpretation of extra generic arguments *) type glob_sign = { ltacvars : identifier list * identifier list; (* ltac variables and the subset of vars introduced by Intro/Let/... *) ltacrecvars : (identifier * ltac_constant) list; (* ltac recursive names *) gsigma : Evd.evar_map; genv : Environ.env } type interp_genarg_type = (glob_sign -> raw_generic_argument -> glob_generic_argument) * (interp_sign -> goal sigma -> glob_generic_argument -> Evd.evar_map * typed_generic_argument) * (substitution -> glob_generic_argument -> glob_generic_argument) let extragenargtab = ref (Gmap.empty : (string,interp_genarg_type) Gmap.t) let add_interp_genarg id f = extragenargtab := Gmap.add id f !extragenargtab let lookup_genarg id = try Gmap.find id !extragenargtab with Not_found -> let msg = "No interpretation function found for entry " ^ id in warning msg; let f = (fun _ _ -> failwith msg), (fun _ _ _ -> failwith msg), (fun _ a -> a) in add_interp_genarg id f; f let lookup_genarg_glob id = let (f,_,_) = lookup_genarg id in f let lookup_interp_genarg id = let (_,f,_) = lookup_genarg id in f let lookup_genarg_subst id = let (_,_,f) = lookup_genarg id in f let push_trace (loc,ck) = function | (n,loc',ck')::trl when ck=ck' -> (n+1,loc,ck)::trl | trl -> (1,loc,ck)::trl let propagate_trace ist loc id = function | VFun (_,lfun,it,b) -> let t = if it=[] then b else TacFun (it,b) in VFun (push_trace(loc,LtacVarCall (id,t)) ist.trace,lfun,it,b) | x -> x (* Dynamically check that an argument is a tactic *) let coerce_to_tactic loc id = function | VFun _ | VRTactic _ as a -> a | _ -> user_err_loc (loc, "", str "Variable " ++ pr_id id ++ str " should be bound to a tactic.") (*****************) (* Globalization *) (*****************) (* We have identifier <| global_reference <| constr *) let find_ident id ist = List.mem id (fst ist.ltacvars) or List.mem id (ids_of_named_context (Environ.named_context ist.genv)) let find_recvar qid ist = List.assoc qid ist.ltacrecvars (* a "var" is a ltac var or a var introduced by an intro tactic *) let find_var id ist = List.mem id (fst ist.ltacvars) (* a "ctxvar" is a var introduced by an intro tactic (Intro/LetTac/...) *) let find_ctxvar id ist = List.mem id (snd ist.ltacvars) (* a "ltacvar" is an ltac var (Let-In/Fun/...) *) let find_ltacvar id ist = find_var id ist & not (find_ctxvar id ist) let find_hyp id ist = List.mem id (ids_of_named_context (Environ.named_context ist.genv)) (* Globalize a name introduced by Intro/LetTac/... ; it is allowed to *) (* be fresh in which case it is binding later on *) let intern_ident l ist id = (* We use identifier both for variables and new names; thus nothing to do *) if not (find_ident id ist) then l:=(id::fst !l,id::snd !l); id let intern_name l ist = function | Anonymous -> Anonymous | Name id -> Name (intern_ident l ist id) let strict_check = ref false let adjust_loc loc = if !strict_check then dloc else loc (* Globalize a name which must be bound -- actually just check it is bound *) let intern_hyp ist (loc,id as locid) = if not !strict_check then locid else if find_ident id ist then (dloc,id) else Pretype_errors.error_var_not_found_loc loc id let intern_hyp_or_metaid ist id = intern_hyp ist (skip_metaid id) let intern_or_var ist = function | ArgVar locid -> ArgVar (intern_hyp ist locid) | ArgArg _ as x -> x let intern_inductive_or_by_notation = smart_global_inductive let intern_inductive ist = function | AN (Ident (loc,id)) when find_var id ist -> ArgVar (loc,id) | r -> ArgArg (intern_inductive_or_by_notation r) let intern_global_reference ist = function | Ident (loc,id) when find_var id ist -> ArgVar (loc,id) | r -> let loc,_ as lqid = qualid_of_reference r in try ArgArg (loc,locate_global_with_alias lqid) with Not_found -> error_global_not_found_loc lqid let intern_ltac_variable ist = function | Ident (loc,id) -> if find_ltacvar id ist then (* A local variable of any type *) ArgVar (loc,id) else (* A recursive variable *) ArgArg (loc,find_recvar id ist) | _ -> raise Not_found let intern_constr_reference strict ist = function | Ident (_,id) as r when not strict & find_hyp id ist -> GVar (dloc,id), Some (CRef r) | Ident (_,id) as r when find_ctxvar id ist -> GVar (dloc,id), if strict then None else Some (CRef r) | r -> let loc,_ as lqid = qualid_of_reference r in GRef (loc,locate_global_with_alias lqid), if strict then None else Some (CRef r) let intern_move_location ist = function | MoveAfter id -> MoveAfter (intern_hyp_or_metaid ist id) | MoveBefore id -> MoveBefore (intern_hyp_or_metaid ist id) | MoveToEnd toleft as x -> x (* Internalize an isolated reference in position of tactic *) let intern_isolated_global_tactic_reference r = let (loc,qid) = qualid_of_reference r in try TacCall (loc,ArgArg (loc,locate_tactic qid),[]) with Not_found -> match r with | Ident (_,id) -> Tacexp (lookup_atomic id) | _ -> raise Not_found let intern_isolated_tactic_reference strict ist r = (* An ltac reference *) try Reference (intern_ltac_variable ist r) with Not_found -> (* A global tactic *) try intern_isolated_global_tactic_reference r with Not_found -> (* Tolerance for compatibility, allow not to use "constr:" *) try ConstrMayEval (ConstrTerm (intern_constr_reference strict ist r)) with Not_found -> (* Reference not found *) error_global_not_found_loc (qualid_of_reference r) (* Internalize an applied tactic reference *) let intern_applied_global_tactic_reference r = let (loc,qid) = qualid_of_reference r in ArgArg (loc,locate_tactic qid) let intern_applied_tactic_reference ist r = (* An ltac reference *) try intern_ltac_variable ist r with Not_found -> (* A global tactic *) try intern_applied_global_tactic_reference r with Not_found -> (* Reference not found *) error_global_not_found_loc (qualid_of_reference r) (* Intern a reference parsed in a non-tactic entry *) let intern_non_tactic_reference strict ist r = (* An ltac reference *) try Reference (intern_ltac_variable ist r) with Not_found -> (* A constr reference *) try ConstrMayEval (ConstrTerm (intern_constr_reference strict ist r)) with Not_found -> (* Tolerance for compatibility, allow not to use "ltac:" *) try intern_isolated_global_tactic_reference r with Not_found -> (* By convention, use IntroIdentifier for unbound ident, when not in a def *) match r with | Ident (loc,id) when not strict -> IntroPattern (loc,IntroIdentifier id) | _ -> (* Reference not found *) error_global_not_found_loc (qualid_of_reference r) let intern_message_token ist = function | (MsgString _ | MsgInt _ as x) -> x | MsgIdent id -> MsgIdent (intern_hyp_or_metaid ist id) let intern_message ist = List.map (intern_message_token ist) let rec intern_intro_pattern lf ist = function | loc, IntroOrAndPattern l -> loc, IntroOrAndPattern (intern_or_and_intro_pattern lf ist l) | loc, IntroIdentifier id -> loc, IntroIdentifier (intern_ident lf ist id) | loc, IntroFresh id -> loc, IntroFresh (intern_ident lf ist id) | loc, (IntroWildcard | IntroAnonymous | IntroRewrite _ | IntroForthcoming _) as x -> x and intern_or_and_intro_pattern lf ist = List.map (List.map (intern_intro_pattern lf ist)) let intern_quantified_hypothesis ist = function | AnonHyp n -> AnonHyp n | NamedHyp id -> (* Uncomment to disallow "intros until n" in ltac when n is not bound *) NamedHyp ((*snd (intern_hyp ist (dloc,*)id(* ))*)) let intern_binding_name ist x = (* We use identifier both for variables and binding names *) (* Todo: consider the body of the lemma to which the binding refer and if a term w/o ltac vars, check the name is indeed quantified *) x let intern_constr_gen allow_patvar isarity {ltacvars=lfun; gsigma=sigma; genv=env} c = let warn = if !strict_check then fun x -> x else Constrintern.for_grammar in let c' = warn (Constrintern.intern_gen isarity ~allow_patvar ~ltacvars:(fst lfun,[]) sigma env) c in (c',if !strict_check then None else Some c) let intern_constr = intern_constr_gen false false let intern_type = intern_constr_gen false true (* Globalize bindings *) let intern_binding ist (loc,b,c) = (loc,intern_binding_name ist b,intern_constr ist c) let intern_bindings ist = function | NoBindings -> NoBindings | ImplicitBindings l -> ImplicitBindings (List.map (intern_constr ist) l) | ExplicitBindings l -> ExplicitBindings (List.map (intern_binding ist) l) let intern_constr_with_bindings ist (c,bl) = (intern_constr ist c, intern_bindings ist bl) (* TODO: catch ltac vars *) let intern_induction_arg ist = function | ElimOnConstr c -> ElimOnConstr (intern_constr_with_bindings ist c) | ElimOnAnonHyp n as x -> x | ElimOnIdent (loc,id) -> if !strict_check then (* If in a defined tactic, no intros-until *) match intern_constr ist (CRef (Ident (dloc,id))) with | GVar (loc,id),_ -> ElimOnIdent (loc,id) | c -> ElimOnConstr (c,NoBindings) else ElimOnIdent (loc,id) let short_name = function | AN (Ident (loc,id)) when not !strict_check -> Some (loc,id) | _ -> None let intern_evaluable_global_reference ist r = let lqid = qualid_of_reference r in try evaluable_of_global_reference ist.genv (locate_global_with_alias lqid) with Not_found -> match r with | Ident (loc,id) when not !strict_check -> EvalVarRef id | _ -> error_global_not_found_loc lqid let intern_evaluable_reference_or_by_notation ist = function | AN r -> intern_evaluable_global_reference ist r | ByNotation (loc,ntn,sc) -> evaluable_of_global_reference ist.genv (Notation.interp_notation_as_global_reference loc (function ConstRef _ | VarRef _ -> true | _ -> false) ntn sc) (* Globalize a reduction expression *) let intern_evaluable ist = function | AN (Ident (loc,id)) when find_ltacvar id ist -> ArgVar (loc,id) | AN (Ident (loc,id)) when not !strict_check & find_hyp id ist -> ArgArg (EvalVarRef id, Some (loc,id)) | AN (Ident (loc,id)) when find_ctxvar id ist -> ArgArg (EvalVarRef id, if !strict_check then None else Some (loc,id)) | r -> let e = intern_evaluable_reference_or_by_notation ist r in let na = short_name r in ArgArg (e,na) let intern_unfold ist (l,qid) = (l,intern_evaluable ist qid) let intern_flag ist red = { red with rConst = List.map (intern_evaluable ist) red.rConst } let intern_constr_with_occurrences ist (l,c) = (l,intern_constr ist c) let intern_constr_pattern ist ltacvars pc = let metas,pat = Constrintern.intern_constr_pattern ist.gsigma ist.genv ~ltacvars pc in let c = intern_constr_gen true false ist pc in metas,(c,pat) let intern_typed_pattern ist p = let dummy_pat = PRel 0 in (* we cannot ensure in non strict mode that the pattern is closed *) (* keeping a constr_expr copy is too complicated and we want anyway to *) (* type it, so we remember the pattern as a glob_constr only *) (intern_constr_gen true false ist p,dummy_pat) let intern_typed_pattern_with_occurrences ist (l,p) = (l,intern_typed_pattern ist p) (* This seems fairly hacky, but it's the first way I've found to get proper globalization of [unfold]. --adamc *) let dump_glob_red_expr = function | Unfold occs -> List.iter (fun (_, r) -> try Dumpglob.add_glob (loc_of_or_by_notation Libnames.loc_of_reference r) (Smartlocate.smart_global r) with e when Errors.noncritical e -> ()) occs | Cbv grf | Lazy grf -> List.iter (fun r -> try Dumpglob.add_glob (loc_of_or_by_notation Libnames.loc_of_reference r) (Smartlocate.smart_global r) with e when Errors.noncritical e -> ()) grf.rConst | _ -> () let intern_red_expr ist = function | Unfold l -> Unfold (List.map (intern_unfold ist) l) | Fold l -> Fold (List.map (intern_constr ist) l) | Cbv f -> Cbv (intern_flag ist f) | Lazy f -> Lazy (intern_flag ist f) | Pattern l -> Pattern (List.map (intern_constr_with_occurrences ist) l) | Simpl o -> Simpl (Option.map (intern_typed_pattern_with_occurrences ist) o) | (Red _ | Hnf | ExtraRedExpr _ | CbvVm as r ) -> r let intern_in_hyp_as ist lf (id,ipat) = (intern_hyp_or_metaid ist id, Option.map (intern_intro_pattern lf ist) ipat) let intern_hyp_list ist = List.map (intern_hyp_or_metaid ist) let intern_inversion_strength lf ist = function | NonDepInversion (k,idl,ids) -> NonDepInversion (k,intern_hyp_list ist idl, Option.map (intern_intro_pattern lf ist) ids) | DepInversion (k,copt,ids) -> DepInversion (k, Option.map (intern_constr ist) copt, Option.map (intern_intro_pattern lf ist) ids) | InversionUsing (c,idl) -> InversionUsing (intern_constr ist c, intern_hyp_list ist idl) (* Interprets an hypothesis name *) let intern_hyp_location ist (((b,occs),id),hl) = (((b,List.map (intern_or_var ist) occs),intern_hyp_or_metaid ist id), hl) (* Reads a pattern *) let intern_pattern ist ?(as_type=false) lfun = function | Subterm (b,ido,pc) -> let ltacvars = (lfun,[]) in let (metas,pc) = intern_constr_pattern ist ltacvars pc in ido, metas, Subterm (b,ido,pc) | Term pc -> let ltacvars = (lfun,[]) in let (metas,pc) = intern_constr_pattern ist ltacvars pc in None, metas, Term pc let intern_constr_may_eval ist = function | ConstrEval (r,c) -> ConstrEval (intern_red_expr ist r,intern_constr ist c) | ConstrContext (locid,c) -> ConstrContext (intern_hyp ist locid,intern_constr ist c) | ConstrTypeOf c -> ConstrTypeOf (intern_constr ist c) | ConstrTerm c -> ConstrTerm (intern_constr ist c) (* External tactics *) let print_xml_term = ref (fun _ -> failwith "print_xml_term unset") let declare_xml_printer f = print_xml_term := f let internalise_tacarg ch = G_xml.parse_tactic_arg ch let extern_tacarg ch env sigma = function | VConstr ([],c) -> !print_xml_term ch env sigma c | VRTactic _ | VFun _ | VVoid | VInteger _ | VConstr_context _ | VIntroPattern _ | VRec _ | VList _ | VConstr _ -> error "Only externing of closed terms is implemented." let extern_request ch req gl la = output_string ch "\n"; List.iter (pf_apply (extern_tacarg ch) gl) la; output_string ch "\n" let value_of_ident id = VIntroPattern (IntroIdentifier id) let extend_values_with_bindings (ln,lm) lfun = let lnames = List.map (fun (id,id') ->(id,value_of_ident id')) ln in let lmatch = List.map (fun (id,(ids,c)) -> (id,VConstr (ids,c))) lm in (* For compatibility, bound variables are visible only if no other binding of the same name exists *) lmatch@lfun@lnames (* Reads the hypotheses of a "match goal" rule *) let rec intern_match_goal_hyps ist lfun = function | (Hyp ((_,na) as locna,mp))::tl -> let ido, metas1, pat = intern_pattern ist ~as_type:true lfun mp in let lfun, metas2, hyps = intern_match_goal_hyps ist lfun tl in let lfun' = name_cons na (Option.List.cons ido lfun) in lfun', metas1@metas2, Hyp (locna,pat)::hyps | (Def ((_,na) as locna,mv,mp))::tl -> let ido, metas1, patv = intern_pattern ist ~as_type:false lfun mv in let ido', metas2, patt = intern_pattern ist ~as_type:true lfun mp in let lfun, metas3, hyps = intern_match_goal_hyps ist lfun tl in let lfun' = name_cons na (Option.List.cons ido' (Option.List.cons ido lfun)) in lfun', metas1@metas2@metas3, Def (locna,patv,patt)::hyps | [] -> lfun, [], [] (* Utilities *) let extract_let_names lrc = List.fold_right (fun ((loc,name),_) l -> if List.mem name l then user_err_loc (loc, "glob_tactic", str "This variable is bound several times."); name::l) lrc [] let clause_app f = function { onhyps=None; concl_occs=nl } -> { onhyps=None; concl_occs=nl } | { onhyps=Some l; concl_occs=nl } -> { onhyps=Some(List.map f l); concl_occs=nl} (* Globalizes tactics : raw_tactic_expr -> glob_tactic_expr *) let rec intern_atomic lf ist x = match (x:raw_atomic_tactic_expr) with (* Basic tactics *) | TacIntroPattern l -> TacIntroPattern (List.map (intern_intro_pattern lf ist) l) | TacIntrosUntil hyp -> TacIntrosUntil (intern_quantified_hypothesis ist hyp) | TacIntroMove (ido,hto) -> TacIntroMove (Option.map (intern_ident lf ist) ido, intern_move_location ist hto) | TacAssumption -> TacAssumption | TacExact c -> TacExact (intern_constr ist c) | TacExactNoCheck c -> TacExactNoCheck (intern_constr ist c) | TacVmCastNoCheck c -> TacVmCastNoCheck (intern_constr ist c) | TacApply (a,ev,cb,inhyp) -> TacApply (a,ev,List.map (intern_constr_with_bindings ist) cb, Option.map (intern_in_hyp_as ist lf) inhyp) | TacElim (ev,cb,cbo) -> TacElim (ev,intern_constr_with_bindings ist cb, Option.map (intern_constr_with_bindings ist) cbo) | TacElimType c -> TacElimType (intern_type ist c) | TacCase (ev,cb) -> TacCase (ev,intern_constr_with_bindings ist cb) | TacCaseType c -> TacCaseType (intern_type ist c) | TacFix (idopt,n) -> TacFix (Option.map (intern_ident lf ist) idopt,n) | TacMutualFix (b,id,n,l) -> let f (id,n,c) = (intern_ident lf ist id,n,intern_type ist c) in TacMutualFix (b,intern_ident lf ist id, n, List.map f l) | TacCofix idopt -> TacCofix (Option.map (intern_ident lf ist) idopt) | TacMutualCofix (b,id,l) -> let f (id,c) = (intern_ident lf ist id,intern_type ist c) in TacMutualCofix (b,intern_ident lf ist id, List.map f l) | TacCut c -> TacCut (intern_type ist c) | TacAssert (otac,ipat,c) -> TacAssert (Option.map (intern_pure_tactic ist) otac, Option.map (intern_intro_pattern lf ist) ipat, intern_constr_gen false (otac<>None) ist c) | TacGeneralize cl -> TacGeneralize (List.map (fun (c,na) -> intern_constr_with_occurrences ist c, intern_name lf ist na) cl) | TacGeneralizeDep c -> TacGeneralizeDep (intern_constr ist c) | TacLetTac (na,c,cls,b,eqpat) -> let na = intern_name lf ist na in TacLetTac (na,intern_constr ist c, (clause_app (intern_hyp_location ist) cls),b, (Option.map (intern_intro_pattern lf ist) eqpat)) (* Automation tactics *) | TacTrivial (d,lems,l) -> TacTrivial (d,List.map (intern_constr ist) lems,l) | TacAuto (d,n,lems,l) -> TacAuto (d,Option.map (intern_or_var ist) n, List.map (intern_constr ist) lems,l) (* Derived basic tactics *) | TacSimpleInductionDestruct (isrec,h) -> TacSimpleInductionDestruct (isrec,intern_quantified_hypothesis ist h) | TacInductionDestruct (ev,isrec,(l,el,cls)) -> TacInductionDestruct (ev,isrec,(List.map (fun (c,(ipato,ipats)) -> (intern_induction_arg ist c, (Option.map (intern_intro_pattern lf ist) ipato, Option.map (intern_intro_pattern lf ist) ipats))) l, Option.map (intern_constr_with_bindings ist) el, Option.map (clause_app (intern_hyp_location ist)) cls)) | TacDoubleInduction (h1,h2) -> let h1 = intern_quantified_hypothesis ist h1 in let h2 = intern_quantified_hypothesis ist h2 in TacDoubleInduction (h1,h2) | TacDecomposeAnd c -> TacDecomposeAnd (intern_constr ist c) | TacDecomposeOr c -> TacDecomposeOr (intern_constr ist c) | TacDecompose (l,c) -> let l = List.map (intern_inductive ist) l in TacDecompose (l,intern_constr ist c) | TacSpecialize (n,l) -> TacSpecialize (n,intern_constr_with_bindings ist l) | TacLApply c -> TacLApply (intern_constr ist c) (* Context management *) | TacClear (b,l) -> TacClear (b,List.map (intern_hyp_or_metaid ist) l) | TacClearBody l -> TacClearBody (List.map (intern_hyp_or_metaid ist) l) | TacMove (dep,id1,id2) -> TacMove (dep,intern_hyp_or_metaid ist id1,intern_move_location ist id2) | TacRename l -> TacRename (List.map (fun (id1,id2) -> intern_hyp_or_metaid ist id1, intern_hyp_or_metaid ist id2) l) | TacRevert l -> TacRevert (List.map (intern_hyp_or_metaid ist) l) (* Constructors *) | TacLeft (ev,bl) -> TacLeft (ev,intern_bindings ist bl) | TacRight (ev,bl) -> TacRight (ev,intern_bindings ist bl) | TacSplit (ev,b,bll) -> TacSplit (ev,b,List.map (intern_bindings ist) bll) | TacAnyConstructor (ev,t) -> TacAnyConstructor (ev,Option.map (intern_pure_tactic ist) t) | TacConstructor (ev,n,bl) -> TacConstructor (ev,intern_or_var ist n,intern_bindings ist bl) (* Conversion *) | TacReduce (r,cl) -> dump_glob_red_expr r; TacReduce (intern_red_expr ist r, clause_app (intern_hyp_location ist) cl) | TacChange (None,c,cl) -> TacChange (None, (if (cl.onhyps = None or cl.onhyps = Some []) & (cl.concl_occs = all_occurrences_expr or cl.concl_occs = no_occurrences_expr) then intern_type ist c else intern_constr ist c), clause_app (intern_hyp_location ist) cl) | TacChange (Some p,c,cl) -> TacChange (Some (intern_typed_pattern ist p),intern_constr ist c, clause_app (intern_hyp_location ist) cl) (* Equivalence relations *) | TacReflexivity -> TacReflexivity | TacSymmetry idopt -> TacSymmetry (clause_app (intern_hyp_location ist) idopt) | TacTransitivity c -> TacTransitivity (Option.map (intern_constr ist) c) (* Equality and inversion *) | TacRewrite (ev,l,cl,by) -> TacRewrite (ev, List.map (fun (b,m,c) -> (b,m,intern_constr_with_bindings ist c)) l, clause_app (intern_hyp_location ist) cl, Option.map (intern_pure_tactic ist) by) | TacInversion (inv,hyp) -> TacInversion (intern_inversion_strength lf ist inv, intern_quantified_hypothesis ist hyp) (* For extensions *) | TacExtend (loc,opn,l) -> let _ = lookup_tactic opn in TacExtend (adjust_loc loc,opn,List.map (intern_genarg ist) l) | TacAlias (loc,s,l,(dir,body)) -> let l = List.map (fun (id,a) -> (id,intern_genarg ist a)) l in TacAlias (loc,s,l,(dir,body)) and intern_tactic onlytac ist tac = snd (intern_tactic_seq onlytac ist tac) and intern_tactic_seq onlytac ist = function | TacAtom (loc,t) -> let lf = ref ist.ltacvars in let t = intern_atomic lf ist t in !lf, TacAtom (adjust_loc loc, t) | TacFun tacfun -> ist.ltacvars, TacFun (intern_tactic_fun ist tacfun) | TacLetIn (isrec,l,u) -> let (l1,l2) = ist.ltacvars in let ist' = { ist with ltacvars = (extract_let_names l @ l1, l2) } in let l = List.map (fun (n,b) -> (n,intern_tacarg !strict_check false (if isrec then ist' else ist) b)) l in ist.ltacvars, TacLetIn (isrec,l,intern_tactic onlytac ist' u) | TacMatchGoal (lz,lr,lmr) -> ist.ltacvars, TacMatchGoal(lz,lr, intern_match_rule onlytac ist lmr) | TacMatch (lz,c,lmr) -> ist.ltacvars, TacMatch (lz,intern_tactic_or_tacarg ist c,intern_match_rule onlytac ist lmr) | TacId l -> ist.ltacvars, TacId (intern_message ist l) | TacFail (n,l) -> ist.ltacvars, TacFail (intern_or_var ist n,intern_message ist l) | TacProgress tac -> ist.ltacvars, TacProgress (intern_pure_tactic ist tac) | TacAbstract (tac,s) -> ist.ltacvars, TacAbstract (intern_pure_tactic ist tac,s) | TacThen (t1,[||],t2,[||]) -> let lfun', t1 = intern_tactic_seq onlytac ist t1 in let lfun'', t2 = intern_tactic_seq onlytac { ist with ltacvars = lfun' } t2 in lfun'', TacThen (t1,[||],t2,[||]) | TacThen (t1,tf,t2,tl) -> let lfun', t1 = intern_tactic_seq onlytac ist t1 in let ist' = { ist with ltacvars = lfun' } in (* Que faire en cas de (tac complexe avec Match et Thens; tac2) ?? *) lfun', TacThen (t1,Array.map (intern_pure_tactic ist') tf,intern_pure_tactic ist' t2, Array.map (intern_pure_tactic ist') tl) | TacThens (t,tl) -> let lfun', t = intern_tactic_seq true ist t in let ist' = { ist with ltacvars = lfun' } in (* Que faire en cas de (tac complexe avec Match et Thens; tac2) ?? *) lfun', TacThens (t, List.map (intern_pure_tactic ist') tl) | TacDo (n,tac) -> ist.ltacvars, TacDo (intern_or_var ist n,intern_pure_tactic ist tac) | TacTry tac -> ist.ltacvars, TacTry (intern_pure_tactic ist tac) | TacInfo tac -> ist.ltacvars, TacInfo (intern_pure_tactic ist tac) | TacRepeat tac -> ist.ltacvars, TacRepeat (intern_pure_tactic ist tac) | TacTimeout (n,tac) -> ist.ltacvars, TacTimeout (intern_or_var ist n,intern_tactic onlytac ist tac) | TacOrelse (tac1,tac2) -> ist.ltacvars, TacOrelse (intern_pure_tactic ist tac1,intern_pure_tactic ist tac2) | TacFirst l -> ist.ltacvars, TacFirst (List.map (intern_pure_tactic ist) l) | TacSolve l -> ist.ltacvars, TacSolve (List.map (intern_pure_tactic ist) l) | TacComplete tac -> ist.ltacvars, TacComplete (intern_pure_tactic ist tac) | TacArg (loc,a) -> ist.ltacvars, intern_tactic_as_arg loc onlytac ist a and intern_tactic_as_arg loc onlytac ist a = match intern_tacarg !strict_check onlytac ist a with | TacCall _ | TacExternal _ | Reference _ | TacDynamic _ as a -> TacArg (loc,a) | Tacexp a -> a | TacVoid | IntroPattern _ | Integer _ | ConstrMayEval _ | TacFreshId _ as a -> if onlytac then error_tactic_expected loc else TacArg (loc,a) | MetaIdArg _ -> assert false and intern_tactic_or_tacarg ist = intern_tactic false ist and intern_pure_tactic ist = intern_tactic true ist and intern_tactic_fun ist (var,body) = let (l1,l2) = ist.ltacvars in let lfun' = List.rev_append (Option.List.flatten var) l1 in (var,intern_tactic_or_tacarg { ist with ltacvars = (lfun',l2) } body) and intern_tacarg strict onlytac ist = function | TacVoid -> TacVoid | Reference r -> intern_non_tactic_reference strict ist r | IntroPattern ipat -> let lf = ref([],[]) in (*How to know what names the intropattern binds?*) IntroPattern (intern_intro_pattern lf ist ipat) | Integer n -> Integer n | ConstrMayEval c -> ConstrMayEval (intern_constr_may_eval ist c) | MetaIdArg (loc,istac,s) -> (* $id can occur in Grammar tactic... *) let id = id_of_string s in if find_ltacvar id ist then if istac then Reference (ArgVar (adjust_loc loc,id)) else ConstrMayEval (ConstrTerm (GVar (adjust_loc loc,id), None)) else error_syntactic_metavariables_not_allowed loc | TacCall (loc,f,[]) -> intern_isolated_tactic_reference strict ist f | TacCall (loc,f,l) -> TacCall (loc, intern_applied_tactic_reference ist f, List.map (intern_tacarg !strict_check false ist) l) | TacExternal (loc,com,req,la) -> TacExternal (loc,com,req,List.map (intern_tacarg !strict_check false ist) la) | TacFreshId x -> TacFreshId (List.map (intern_or_var ist) x) | Tacexp t -> Tacexp (intern_tactic onlytac ist t) | TacDynamic(loc,t) as x -> (match Dyn.tag t with | "tactic" | "value" -> x | "constr" -> if onlytac then error_tactic_expected loc else x | s -> anomaly_loc (loc, "", str "Unknown dynamic: <" ++ str s ++ str ">")) (* Reads the rules of a Match Context or a Match *) and intern_match_rule onlytac ist = function | (All tc)::tl -> All (intern_tactic onlytac ist tc) :: (intern_match_rule onlytac ist tl) | (Pat (rl,mp,tc))::tl -> let {ltacvars=(lfun,l2); gsigma=sigma; genv=env} = ist in let lfun',metas1,hyps = intern_match_goal_hyps ist lfun rl in let ido,metas2,pat = intern_pattern ist lfun mp in let metas = list_uniquize (metas1@metas2) in let ist' = { ist with ltacvars = (metas@(Option.List.cons ido lfun'),l2) } in Pat (hyps,pat,intern_tactic onlytac ist' tc) :: (intern_match_rule onlytac ist tl) | [] -> [] and intern_genarg ist x = match genarg_tag x with | BoolArgType -> in_gen globwit_bool (out_gen rawwit_bool x) | IntArgType -> in_gen globwit_int (out_gen rawwit_int x) | IntOrVarArgType -> in_gen globwit_int_or_var (intern_or_var ist (out_gen rawwit_int_or_var x)) | StringArgType -> in_gen globwit_string (out_gen rawwit_string x) | PreIdentArgType -> in_gen globwit_pre_ident (out_gen rawwit_pre_ident x) | IntroPatternArgType -> let lf = ref ([],[]) in (* how to know which names are bound by the intropattern *) in_gen globwit_intro_pattern (intern_intro_pattern lf ist (out_gen rawwit_intro_pattern x)) | IdentArgType b -> let lf = ref ([],[]) in in_gen (globwit_ident_gen b) (intern_ident lf ist (out_gen (rawwit_ident_gen b) x)) | VarArgType -> in_gen globwit_var (intern_hyp ist (out_gen rawwit_var x)) | RefArgType -> in_gen globwit_ref (intern_global_reference ist (out_gen rawwit_ref x)) | SortArgType -> in_gen globwit_sort (out_gen rawwit_sort x) | ConstrArgType -> in_gen globwit_constr (intern_constr ist (out_gen rawwit_constr x)) | ConstrMayEvalArgType -> in_gen globwit_constr_may_eval (intern_constr_may_eval ist (out_gen rawwit_constr_may_eval x)) | QuantHypArgType -> in_gen globwit_quant_hyp (intern_quantified_hypothesis ist (out_gen rawwit_quant_hyp x)) | RedExprArgType -> in_gen globwit_red_expr (intern_red_expr ist (out_gen rawwit_red_expr x)) | OpenConstrArgType b -> in_gen (globwit_open_constr_gen b) ((),intern_constr ist (snd (out_gen (rawwit_open_constr_gen b) x))) | ConstrWithBindingsArgType -> in_gen globwit_constr_with_bindings (intern_constr_with_bindings ist (out_gen rawwit_constr_with_bindings x)) | BindingsArgType -> in_gen globwit_bindings (intern_bindings ist (out_gen rawwit_bindings x)) | List0ArgType _ -> app_list0 (intern_genarg ist) x | List1ArgType _ -> app_list1 (intern_genarg ist) x | OptArgType _ -> app_opt (intern_genarg ist) x | PairArgType _ -> app_pair (intern_genarg ist) (intern_genarg ist) x | ExtraArgType s -> match tactic_genarg_level s with | Some n -> (* Special treatment of tactic arguments *) in_gen (globwit_tactic n) (intern_tactic_or_tacarg ist (out_gen (rawwit_tactic n) x)) | None -> lookup_genarg_glob s ist x (************* End globalization ************) (***************************************************************************) (* Evaluation/interpretation *) let is_variable env id = List.mem id (ids_of_named_context (Environ.named_context env)) (* Debug reference *) let debug = ref DebugOff (* Sets the debugger mode *) let set_debug pos = debug := pos (* Gives the state of debug *) let get_debug () = !debug let debugging_step ist pp = match ist.debug with | DebugOn lev -> safe_msgnl (str "Level " ++ int lev ++ str": " ++ pp () ++ fnl()) | _ -> () let debugging_exception_step ist signal_anomaly e pp = let explain_exc = if signal_anomaly then explain_logic_error else explain_logic_error_no_anomaly in debugging_step ist (fun () -> pp() ++ spc() ++ str "raised the exception" ++ fnl() ++ !explain_exc e) let error_ltac_variable loc id env v s = user_err_loc (loc, "", str "Ltac variable " ++ pr_id id ++ strbrk " is bound to" ++ spc () ++ pr_value env v ++ spc () ++ strbrk "which cannot be coerced to " ++ str s ++ str".") exception CannotCoerceTo of string (* Raise Not_found if not in interpretation sign *) let try_interp_ltac_var coerce ist env (loc,id) = let v = List.assoc id ist.lfun in try coerce v with CannotCoerceTo s -> error_ltac_variable loc id env v s let interp_ltac_var coerce ist env locid = try try_interp_ltac_var coerce ist env locid with Not_found -> anomaly ("Detected '" ^ (string_of_id (snd locid)) ^ "' as ltac var at interning time") (* Interprets an identifier which must be fresh *) let coerce_to_ident fresh env = function | VIntroPattern (IntroIdentifier id) -> id | VConstr ([],c) when isVar c & not (fresh & is_variable env (destVar c)) -> (* We need it fresh for intro e.g. in "Tac H = clear H; intro H" *) destVar c | v -> raise (CannotCoerceTo "a fresh identifier") let interp_ident_gen fresh ist env id = try try_interp_ltac_var (coerce_to_ident fresh env) ist (Some env) (dloc,id) with Not_found -> id let interp_ident = interp_ident_gen false let interp_fresh_ident = interp_ident_gen true let pf_interp_ident id gl = interp_ident_gen false id (pf_env gl) let pf_interp_fresh_ident id gl = interp_ident_gen true id (pf_env gl) (* Interprets an optional identifier which must be fresh *) let interp_fresh_name ist env = function | Anonymous -> Anonymous | Name id -> Name (interp_fresh_ident ist env id) let coerce_to_intro_pattern env = function | VIntroPattern ipat -> ipat | VConstr ([],c) when isVar c -> (* This happens e.g. in definitions like "Tac H = clear H; intro H" *) (* but also in "destruct H as (H,H')" *) IntroIdentifier (destVar c) | v -> raise (CannotCoerceTo "an introduction pattern") let interp_intro_pattern_var loc ist env id = try try_interp_ltac_var (coerce_to_intro_pattern env) ist (Some env) (loc,id) with Not_found -> IntroIdentifier id let coerce_to_hint_base = function | VIntroPattern (IntroIdentifier id) -> string_of_id id | _ -> raise (CannotCoerceTo "a hint base name") let interp_hint_base ist s = try try_interp_ltac_var coerce_to_hint_base ist None (dloc,id_of_string s) with Not_found -> s let coerce_to_int = function | VInteger n -> n | v -> raise (CannotCoerceTo "an integer") let interp_int ist locid = try try_interp_ltac_var coerce_to_int ist None locid with Not_found -> user_err_loc(fst locid,"interp_int", str "Unbound variable " ++ pr_id (snd locid) ++ str".") let interp_int_or_var ist = function | ArgVar locid -> interp_int ist locid | ArgArg n -> n let int_or_var_list_of_VList = function | VList l -> List.map (fun n -> ArgArg (coerce_to_int n)) l | _ -> raise Not_found let interp_int_or_var_as_list ist = function | ArgVar (_,id as locid) -> (try int_or_var_list_of_VList (List.assoc id ist.lfun) with Not_found | CannotCoerceTo _ -> [ArgArg (interp_int ist locid)]) | ArgArg n as x -> [x] let interp_int_or_var_list ist l = List.flatten (List.map (interp_int_or_var_as_list ist) l) let constr_of_value env = function | VConstr csr -> csr | VIntroPattern (IntroIdentifier id) -> ([],constr_of_id env id) | _ -> raise Not_found let closed_constr_of_value env v = let ids,c = constr_of_value env v in if ids <> [] then raise Not_found; c let coerce_to_hyp env = function | VConstr ([],c) when isVar c -> destVar c | VIntroPattern (IntroIdentifier id) when is_variable env id -> id | _ -> raise (CannotCoerceTo "a variable") (* Interprets a bound variable (especially an existing hypothesis) *) let interp_hyp ist gl (loc,id as locid) = let env = pf_env gl in (* Look first in lfun for a value coercible to a variable *) try try_interp_ltac_var (coerce_to_hyp env) ist (Some env) locid with Not_found -> (* Then look if bound in the proof context at calling time *) if is_variable env id then id else user_err_loc (loc,"eval_variable", str "No such hypothesis: " ++ pr_id id ++ str ".") let hyp_list_of_VList env = function | VList l -> List.map (coerce_to_hyp env) l | _ -> raise Not_found let interp_hyp_list_as_list ist gl (loc,id as x) = try hyp_list_of_VList (pf_env gl) (List.assoc id ist.lfun) with Not_found | CannotCoerceTo _ -> [interp_hyp ist gl x] let interp_hyp_list ist gl l = List.flatten (List.map (interp_hyp_list_as_list ist gl) l) let interp_move_location ist gl = function | MoveAfter id -> MoveAfter (interp_hyp ist gl id) | MoveBefore id -> MoveBefore (interp_hyp ist gl id) | MoveToEnd toleft as x -> x (* Interprets a qualified name *) let coerce_to_reference env v = try match v with | VConstr ([],c) -> global_of_constr c (* may raise Not_found *) | _ -> raise Not_found with Not_found -> raise (CannotCoerceTo "a reference") let interp_reference ist env = function | ArgArg (_,r) -> r | ArgVar locid -> interp_ltac_var (coerce_to_reference env) ist (Some env) locid let pf_interp_reference ist gl = interp_reference ist (pf_env gl) let coerce_to_inductive = function | VConstr ([],c) when isInd c -> destInd c | _ -> raise (CannotCoerceTo "an inductive type") let interp_inductive ist = function | ArgArg r -> r | ArgVar locid -> interp_ltac_var coerce_to_inductive ist None locid let coerce_to_evaluable_ref env v = let ev = match v with | VConstr ([],c) when isConst c -> EvalConstRef (destConst c) | VConstr ([],c) when isVar c -> EvalVarRef (destVar c) | VIntroPattern (IntroIdentifier id) when List.mem id (ids_of_context env) -> EvalVarRef id | _ -> raise (CannotCoerceTo "an evaluable reference") in if not (Tacred.is_evaluable env ev) then raise (CannotCoerceTo "an evaluable reference") else ev let interp_evaluable ist env = function | ArgArg (r,Some (loc,id)) -> (* Maybe [id] has been introduced by Intro-like tactics *) (try match Environ.lookup_named id env with | (_,Some _,_) -> EvalVarRef id | _ -> error_not_evaluable (VarRef id) with Not_found -> match r with | EvalConstRef _ -> r | _ -> error_global_not_found_loc (loc,qualid_of_ident id)) | ArgArg (r,None) -> r | ArgVar locid -> interp_ltac_var (coerce_to_evaluable_ref env) ist (Some env) locid (* Interprets an hypothesis name *) let interp_occurrences ist (b,occs) = (b,interp_int_or_var_list ist occs) let interp_hyp_location ist gl ((occs,id),hl) = ((interp_occurrences ist occs,interp_hyp ist gl id),hl) let interp_clause ist gl { onhyps=ol; concl_occs=occs } = { onhyps=Option.map(List.map (interp_hyp_location ist gl)) ol; concl_occs=interp_occurrences ist occs } (* Interpretation of constructions *) (* Extract the constr list from lfun *) let extract_ltac_constr_values ist env = let rec aux = function | (id,v)::tl -> let (l1,l2) = aux tl in (try ((id,constr_of_value env v)::l1,l2) with Not_found -> let ido = match v with | VIntroPattern (IntroIdentifier id0) -> Some id0 | _ -> None in (l1,(id,ido)::l2)) | [] -> ([],[]) in aux ist.lfun (* Extract the identifier list from lfun: join all branches (what to do else?)*) let rec intropattern_ids (loc,pat) = match pat with | IntroIdentifier id -> [id] | IntroOrAndPattern ll -> List.flatten (List.map intropattern_ids (List.flatten ll)) | IntroWildcard | IntroAnonymous | IntroFresh _ | IntroRewrite _ | IntroForthcoming _ -> [] let rec extract_ids ids = function | (id,VIntroPattern ipat)::tl when not (List.mem id ids) -> intropattern_ids (dloc,ipat) @ extract_ids ids tl | _::tl -> extract_ids ids tl | [] -> [] let default_fresh_id = id_of_string "H" let interp_fresh_id ist env l = let ids = map_succeed (function ArgVar(_,id) -> id | _ -> failwith "") l in let avoid = (extract_ids ids ist.lfun) @ ist.avoid_ids in let id = if l = [] then default_fresh_id else let s = String.concat "" (List.map (function | ArgArg s -> s | ArgVar (_,id) -> string_of_id (interp_ident ist env id)) l) in let s = if Lexer.is_keyword s then s^"0" else s in id_of_string s in Tactics.fresh_id_in_env avoid id env let pf_interp_fresh_id ist gl = interp_fresh_id ist (pf_env gl) let interp_gen kind ist allow_patvar expand_evar fail_evar use_classes env sigma (c,ce) = let (ltacvars,unbndltacvars as vars) = extract_ltac_constr_values ist env in let c = match ce with | None -> c (* If at toplevel (ce<>None), the error can be due to an incorrect context at globalization time: we retype with the now known intros/lettac/inversion hypothesis names *) | Some c -> let ltacdata = (List.map fst ltacvars,unbndltacvars) in intern_gen (kind = IsType) ~allow_patvar ~ltacvars:ltacdata sigma env c in let trace = push_trace (dloc,LtacConstrInterp (c,vars)) ist.trace in let evdc = catch_error trace (understand_ltac ~resolve_classes:use_classes expand_evar sigma env vars kind) c in let (evd,c) = if expand_evar then solve_remaining_evars fail_evar use_classes solve_by_implicit_tactic env sigma evdc else evdc in db_constr ist.debug env c; (evd,c) (* Interprets a constr; expects evars to be solved *) let interp_constr_gen kind ist env sigma c = interp_gen kind ist false true true true env sigma c let interp_constr = interp_constr_gen (OfType None) let interp_type = interp_constr_gen IsType (* Interprets an open constr *) let interp_open_constr_gen kind ist = interp_gen kind ist false true false false let interp_open_constr ccl ist = interp_gen (OfType ccl) ist false true false (ccl<>None) let interp_pure_open_constr ist = interp_gen (OfType None) ist false false false false let interp_typed_pattern ist env sigma (c,_) = let sigma, c = interp_gen (OfType None) ist true false false false env sigma c in pattern_of_constr sigma c (* Interprets a constr expression casted by the current goal *) let pf_interp_casted_constr ist gl c = interp_constr_gen (OfType (Some (pf_concl gl))) ist (pf_env gl) (project gl) c (* Interprets a constr expression *) let pf_interp_constr ist gl = interp_constr ist (pf_env gl) (project gl) let constr_list_of_VList env = function | VList l -> List.map (closed_constr_of_value env) l | _ -> raise Not_found let interp_constr_in_compound_list inj_fun dest_fun interp_fun ist env sigma l = let try_expand_ltac_var sigma x = try match dest_fun x with | GVar (_,id), _ -> sigma, List.map inj_fun (constr_list_of_VList env (List.assoc id ist.lfun)) | _ -> raise Not_found with Not_found -> (*all of dest_fun, List.assoc, constr_list_of_VList may raise Not_found*) let sigma, c = interp_fun ist env sigma x in sigma, [c] in let sigma, l = list_fold_map try_expand_ltac_var sigma l in sigma, List.flatten l let interp_constr_list ist env sigma c = interp_constr_in_compound_list (fun x -> x) (fun x -> x) interp_constr ist env sigma c let interp_open_constr_list = interp_constr_in_compound_list (fun x -> x) (fun x -> x) (interp_open_constr None) let interp_auto_lemmas ist env sigma lems = let local_sigma, lems = interp_open_constr_list ist env sigma lems in List.map (fun lem -> (local_sigma,lem)) lems (* Interprets a type expression *) let pf_interp_type ist gl = interp_type ist (pf_env gl) (project gl) (* Interprets a reduction expression *) let interp_unfold ist env (occs,qid) = (interp_occurrences ist occs,interp_evaluable ist env qid) let interp_flag ist env red = { red with rConst = List.map (interp_evaluable ist env) red.rConst } let interp_constr_with_occurrences ist sigma env (occs,c) = let (sigma,c_interp) = interp_constr ist sigma env c in sigma , (interp_occurrences ist occs, c_interp) let interp_typed_pattern_with_occurrences ist env sigma (occs,c) = let sign,p = interp_typed_pattern ist env sigma c in sign, (interp_occurrences ist occs, p) let interp_closed_typed_pattern_with_occurrences ist env sigma occl = snd (interp_typed_pattern_with_occurrences ist env sigma occl) let interp_constr_with_occurrences_and_name_as_list = interp_constr_in_compound_list (fun c -> ((all_occurrences_expr,c),Anonymous)) (function ((occs,c),Anonymous) when occs = all_occurrences_expr -> c | _ -> raise Not_found) (fun ist env sigma (occ_c,na) -> let (sigma,c_interp) = interp_constr_with_occurrences ist env sigma occ_c in sigma, (c_interp, interp_fresh_name ist env na)) let interp_red_expr ist sigma env = function | Unfold l -> sigma , Unfold (List.map (interp_unfold ist env) l) | Fold l -> let (sigma,l_interp) = interp_constr_list ist env sigma l in sigma , Fold l_interp | Cbv f -> sigma , Cbv (interp_flag ist env f) | Lazy f -> sigma , Lazy (interp_flag ist env f) | Pattern l -> let (sigma,l_interp) = List.fold_right begin fun c (sigma,acc) -> let (sigma,c_interp) = interp_constr_with_occurrences ist env sigma c in sigma , c_interp :: acc end l (sigma,[]) in sigma , Pattern l_interp | Simpl o -> sigma , Simpl(Option.map (interp_closed_typed_pattern_with_occurrences ist env sigma) o) | (Red _ | Hnf | ExtraRedExpr _ | CbvVm as r) -> sigma , r let pf_interp_red_expr ist gl = interp_red_expr ist (project gl) (pf_env gl) let interp_may_eval f ist gl = function | ConstrEval (r,c) -> let (sigma,redexp) = pf_interp_red_expr ist gl r in let (sigma,c_interp) = f ist { gl with sigma=sigma } c in sigma , pf_reduction_of_red_expr gl redexp c_interp | ConstrContext ((loc,s),c) -> (try let (sigma,ic) = f ist gl c and ctxt = constr_of_VConstr_context (List.assoc s ist.lfun) in sigma , subst_meta [special_meta,ic] ctxt with | Not_found -> user_err_loc (loc, "interp_may_eval", str "Unbound context identifier" ++ pr_id s ++ str".")) | ConstrTypeOf c -> let (sigma,c_interp) = f ist gl c in sigma , pf_type_of gl c_interp | ConstrTerm c -> try f ist gl c with reraise -> debugging_exception_step ist false reraise (fun () -> str"interpretation of term " ++ pr_glob_constr_env (pf_env gl) (fst c)); raise reraise (* Interprets a constr expression possibly to first evaluate *) let interp_constr_may_eval ist gl c = let (sigma,csr) = try interp_may_eval pf_interp_constr ist gl c with reraise -> debugging_exception_step ist false reraise (fun () -> str"evaluation of term"); raise reraise in begin db_constr ist.debug (pf_env gl) csr; sigma , csr end let rec message_of_value gl = function | VVoid -> str "()" | VInteger n -> int n | VIntroPattern ipat -> pr_intro_pattern (dloc,ipat) | VConstr_context c -> pr_constr_env (pf_env gl) c | VConstr c -> pr_constr_under_binders_env (pf_env gl) c | VRec _ | VRTactic _ | VFun _ -> str "" | VList l -> prlist_with_sep spc (message_of_value gl) l let rec interp_message_token ist gl = function | MsgString s -> str s | MsgInt n -> int n | MsgIdent (loc,id) -> let v = try List.assoc id ist.lfun with Not_found -> user_err_loc (loc,"",pr_id id ++ str" not found.") in message_of_value gl v let rec interp_message_nl ist gl = function | [] -> mt() | l -> prlist_with_sep spc (interp_message_token ist gl) l ++ fnl() let interp_message ist gl l = (* Force evaluation of interp_message_token so that potential errors are raised now and not at printing time *) prlist (fun x -> spc () ++ x) (List.map (interp_message_token ist gl) l) let intro_pattern_list_of_Vlist loc env = function | VList l -> List.map (fun a -> loc,coerce_to_intro_pattern env a) l | _ -> raise Not_found let rec interp_intro_pattern ist gl = function | loc, IntroOrAndPattern l -> loc, IntroOrAndPattern (interp_or_and_intro_pattern ist gl l) | loc, IntroIdentifier id -> loc, interp_intro_pattern_var loc ist (pf_env gl) id | loc, IntroFresh id -> loc, IntroFresh (interp_fresh_ident ist (pf_env gl) id) | loc, (IntroWildcard | IntroAnonymous | IntroRewrite _ | IntroForthcoming _) as x -> x and interp_or_and_intro_pattern ist gl = List.map (interp_intro_pattern_list_as_list ist gl) and interp_intro_pattern_list_as_list ist gl = function | [loc,IntroIdentifier id] as l -> (try intro_pattern_list_of_Vlist loc (pf_env gl) (List.assoc id ist.lfun) with Not_found | CannotCoerceTo _ -> List.map (interp_intro_pattern ist gl) l) | l -> List.map (interp_intro_pattern ist gl) l let interp_in_hyp_as ist gl (id,ipat) = (interp_hyp ist gl id,Option.map (interp_intro_pattern ist gl) ipat) (* Quantified named or numbered hypothesis or hypothesis in context *) (* (as in Inversion) *) let coerce_to_quantified_hypothesis = function | VInteger n -> AnonHyp n | VIntroPattern (IntroIdentifier id) -> NamedHyp id | v -> raise (CannotCoerceTo "a quantified hypothesis") let interp_quantified_hypothesis ist = function | AnonHyp n -> AnonHyp n | NamedHyp id -> try try_interp_ltac_var coerce_to_quantified_hypothesis ist None(dloc,id) with Not_found -> NamedHyp id let interp_binding_name ist = function | AnonHyp n -> AnonHyp n | NamedHyp id -> (* If a name is bound, it has to be a quantified hypothesis *) (* user has to use other names for variables if these ones clash with *) (* a name intented to be used as a (non-variable) identifier *) try try_interp_ltac_var coerce_to_quantified_hypothesis ist None(dloc,id) with Not_found -> NamedHyp id (* Quantified named or numbered hypothesis or hypothesis in context *) (* (as in Inversion) *) let coerce_to_decl_or_quant_hyp env = function | VInteger n -> AnonHyp n | v -> try NamedHyp (coerce_to_hyp env v) with CannotCoerceTo _ -> raise (CannotCoerceTo "a declared or quantified hypothesis") let interp_declared_or_quantified_hypothesis ist gl = function | AnonHyp n -> AnonHyp n | NamedHyp id -> let env = pf_env gl in try try_interp_ltac_var (coerce_to_decl_or_quant_hyp env) ist (Some env) (dloc,id) with Not_found -> NamedHyp id let interp_binding ist env sigma (loc,b,c) = let sigma, c = interp_open_constr None ist env sigma c in sigma, (loc,interp_binding_name ist b,c) let interp_bindings ist env sigma = function | NoBindings -> sigma, NoBindings | ImplicitBindings l -> let sigma, l = interp_open_constr_list ist env sigma l in sigma, ImplicitBindings l | ExplicitBindings l -> let sigma, l = list_fold_map (interp_binding ist env) sigma l in sigma, ExplicitBindings l let interp_constr_with_bindings ist env sigma (c,bl) = let sigma, bl = interp_bindings ist env sigma bl in let sigma, c = interp_open_constr None ist env sigma c in sigma, (c,bl) let interp_open_constr_with_bindings ist env sigma (c,bl) = let sigma, bl = interp_bindings ist env sigma bl in let sigma, c = interp_open_constr None ist env sigma c in sigma, (c, bl) let loc_of_bindings = function | NoBindings -> dummy_loc | ImplicitBindings l -> loc_of_glob_constr (fst (list_last l)) | ExplicitBindings l -> pi1 (list_last l) let interp_open_constr_with_bindings_loc ist env sigma ((c,_),bl as cb) = let loc1 = loc_of_glob_constr c in let loc2 = loc_of_bindings bl in let loc = if loc2 = dummy_loc then loc1 else join_loc loc1 loc2 in let sigma, cb = interp_open_constr_with_bindings ist env sigma cb in sigma, (loc,cb) let interp_induction_arg ist gl arg = let env = pf_env gl and sigma = project gl in match arg with | ElimOnConstr c -> ElimOnConstr (interp_constr_with_bindings ist env sigma c) | ElimOnAnonHyp n as x -> x | ElimOnIdent (loc,id) -> try match List.assoc id ist.lfun with | VInteger n -> ElimOnAnonHyp n | VIntroPattern (IntroIdentifier id') -> if Tactics.is_quantified_hypothesis id' gl then ElimOnIdent (loc,id') else (try ElimOnConstr (sigma,(constr_of_id env id',NoBindings)) with Not_found -> user_err_loc (loc,"", pr_id id ++ strbrk " binds to " ++ pr_id id' ++ strbrk " which is neither a declared or a quantified hypothesis.")) | VConstr ([],c) -> ElimOnConstr (sigma,(c,NoBindings)) | _ -> user_err_loc (loc,"", strbrk "Cannot coerce " ++ pr_id id ++ strbrk " neither to a quantified hypothesis nor to a term.") with Not_found -> (* We were in non strict (interactive) mode *) if Tactics.is_quantified_hypothesis id gl then ElimOnIdent (loc,id) else let c = (GVar (loc,id),Some (CRef (Ident (loc,id)))) in let (sigma,c) = interp_constr ist env sigma c in ElimOnConstr (sigma,(c,NoBindings)) (* Associates variables with values and gives the remaining variables and values *) let head_with_value (lvar,lval) = let rec head_with_value_rec lacc = function | ([],[]) -> (lacc,[],[]) | (vr::tvr,ve::tve) -> (match vr with | None -> head_with_value_rec lacc (tvr,tve) | Some v -> head_with_value_rec ((v,ve)::lacc) (tvr,tve)) | (vr,[]) -> (lacc,vr,[]) | ([],ve) -> (lacc,[],ve) in head_with_value_rec [] (lvar,lval) (* Gives a context couple if there is a context identifier *) let give_context ctxt = function | None -> [] | Some id -> [id,VConstr_context ctxt] (* Reads a pattern by substituting vars of lfun *) let use_types = false let eval_pattern lfun ist env sigma (_,pat as c) = if use_types then snd (interp_typed_pattern ist env sigma c) else instantiate_pattern sigma lfun pat let read_pattern lfun ist env sigma = function | Subterm (b,ido,c) -> Subterm (b,ido,eval_pattern lfun ist env sigma c) | Term c -> Term (eval_pattern lfun ist env sigma c) (* Reads the hypotheses of a Match Context rule *) let cons_and_check_name id l = if List.mem id l then user_err_loc (dloc,"read_match_goal_hyps", strbrk ("Hypothesis pattern-matching variable "^(string_of_id id)^ " used twice in the same pattern.")) else id::l let rec read_match_goal_hyps lfun ist env sigma lidh = function | (Hyp ((loc,na) as locna,mp))::tl -> let lidh' = name_fold cons_and_check_name na lidh in Hyp (locna,read_pattern lfun ist env sigma mp):: (read_match_goal_hyps lfun ist env sigma lidh' tl) | (Def ((loc,na) as locna,mv,mp))::tl -> let lidh' = name_fold cons_and_check_name na lidh in Def (locna,read_pattern lfun ist env sigma mv, read_pattern lfun ist env sigma mp):: (read_match_goal_hyps lfun ist env sigma lidh' tl) | [] -> [] (* Reads the rules of a Match Context or a Match *) let rec read_match_rule lfun ist env sigma = function | (All tc)::tl -> (All tc)::(read_match_rule lfun ist env sigma tl) | (Pat (rl,mp,tc))::tl -> Pat (read_match_goal_hyps lfun ist env sigma [] rl, read_pattern lfun ist env sigma mp,tc) :: read_match_rule lfun ist env sigma tl | [] -> [] (* For Match Context and Match *) exception Not_coherent_metas exception Eval_fail of std_ppcmds let is_match_catchable = function | PatternMatchingFailure | Eval_fail _ -> true | e -> Logic.catchable_exception e let equal_instances gl (ctx',c') (ctx,c) = (* How to compare instances? Do we want the terms to be convertible? unifiable? Do we want the universe levels to be relevant? (historically, conv_x is used) *) ctx = ctx' & pf_conv_x gl c' c (* Verifies if the matched list is coherent with respect to lcm *) (* While non-linear matching is modulo eq_constr in matches, merge of *) (* different instances of the same metavars is here modulo conversion... *) let verify_metas_coherence gl (ln1,lcm) (ln,lm) = let rec aux = function | (id,c as x)::tl -> if List.for_all (fun (id',c') -> id'<>id or equal_instances gl c' c) lcm then x :: aux tl else raise Not_coherent_metas | [] -> lcm in (ln@ln1,aux lm) let adjust (l,lc) = (l,List.map (fun (id,c) -> (id,([],c))) lc) (* Tries to match one hypothesis pattern with a list of hypotheses *) let apply_one_mhyp_context ist env gl lmatch (hypname,patv,pat) lhyps = let get_id_couple id = function | Name idpat -> [idpat,VConstr ([],mkVar id)] | Anonymous -> [] in let match_pat lmatch hyp pat = match pat with | Term t -> let lmeta = extended_matches t hyp in (try let lmeta = verify_metas_coherence gl lmatch lmeta in ([],lmeta,(fun () -> raise PatternMatchingFailure)) with | Not_coherent_metas -> raise PatternMatchingFailure); | Subterm (b,ic,t) -> let rec match_next_pattern find_next () = let (lmeta,ctxt,find_next') = find_next () in try let lmeta = verify_metas_coherence gl lmatch (adjust lmeta) in (give_context ctxt ic,lmeta,match_next_pattern find_next') with | Not_coherent_metas -> match_next_pattern find_next' () in match_next_pattern (fun () -> match_subterm_gen b t hyp) () in let rec apply_one_mhyp_context_rec = function | (id,b,hyp as hd)::tl -> (match patv with | None -> let rec match_next_pattern find_next () = try let (ids, lmeta, find_next') = find_next () in (get_id_couple id hypname@ids, lmeta, hd, match_next_pattern find_next') with | PatternMatchingFailure -> apply_one_mhyp_context_rec tl in match_next_pattern (fun () -> let hyp = if b<>None then refresh_universes_strict hyp else hyp in match_pat lmatch hyp pat) () | Some patv -> match b with | Some body -> let rec match_next_pattern_in_body next_in_body () = try let (ids,lmeta,next_in_body') = next_in_body() in let rec match_next_pattern_in_typ next_in_typ () = try let (ids',lmeta',next_in_typ') = next_in_typ() in (get_id_couple id hypname@ids@ids', lmeta', hd, match_next_pattern_in_typ next_in_typ') with | PatternMatchingFailure -> match_next_pattern_in_body next_in_body' () in match_next_pattern_in_typ (fun () -> let hyp = refresh_universes_strict hyp in match_pat lmeta hyp pat) () with PatternMatchingFailure -> apply_one_mhyp_context_rec tl in match_next_pattern_in_body (fun () -> match_pat lmatch body patv) () | None -> apply_one_mhyp_context_rec tl) | [] -> db_hyp_pattern_failure ist.debug env (hypname,pat); raise PatternMatchingFailure in apply_one_mhyp_context_rec lhyps (* misc *) let mk_constr_value ist gl c = let (sigma,c_interp) = pf_interp_constr ist gl c in sigma,VConstr ([],c_interp) let mk_open_constr_value ist gl c = let (sigma,c_interp) = pf_apply (interp_open_constr None ist) gl c in sigma,VConstr ([],c_interp) let mk_hyp_value ist gl c = VConstr ([],mkVar (interp_hyp ist gl c)) let mk_int_or_var_value ist c = VInteger (interp_int_or_var ist c) let pack_sigma (sigma,c) = {it=c;sigma=sigma} let extend_gl_hyps { it=gl ; sigma=sigma } sign = Goal.V82.new_goal_with sigma gl sign (* Interprets an l-tac expression into a value *) let rec val_interp ist gl (tac:glob_tactic_expr) = let value_interp ist = match tac with (* Immediate evaluation *) | TacFun (it,body) -> project gl , VFun (ist.trace,ist.lfun,it,body) | TacLetIn (true,l,u) -> interp_letrec ist gl l u | TacLetIn (false,l,u) -> interp_letin ist gl l u | TacMatchGoal (lz,lr,lmr) -> interp_match_goal ist gl lz lr lmr | TacMatch (lz,c,lmr) -> interp_match ist gl lz c lmr | TacArg (loc,a) -> interp_tacarg ist gl a (* Delayed evaluation *) | t -> project gl , VFun (ist.trace,ist.lfun,[],t) in check_for_interrupt (); match ist.debug with | DebugOn lev -> debug_prompt lev gl tac (fun v -> value_interp {ist with debug=v}) | _ -> value_interp ist and eval_tactic ist = function | TacAtom (loc,t) -> fun gl -> let box = ref None in abstract_tactic_box := box; let call = LtacAtomCall (t,box) in let tac = (* catch error in the interpretation *) catch_error (push_trace(dloc,call)ist.trace) (interp_atomic ist gl) t in (* catch error in the evaluation *) catch_error (push_trace(loc,call)ist.trace) tac gl | TacFun _ | TacLetIn _ -> assert false | TacMatchGoal _ | TacMatch _ -> assert false | TacId s -> fun gl -> let res = tclIDTAC_MESSAGE (interp_message_nl ist gl s) gl in db_breakpoint ist.debug s; res | TacFail (n,s) -> fun gl -> tclFAIL (interp_int_or_var ist n) (interp_message ist gl s) gl | TacProgress tac -> tclPROGRESS (interp_tactic ist tac) | TacAbstract (tac,ido) -> fun gl -> Tactics.tclABSTRACT (Option.map (pf_interp_ident ist gl) ido) (interp_tactic ist tac) gl | TacThen (t1,tf,t,tl) -> tclTHENS3PARTS (interp_tactic ist t1) (Array.map (interp_tactic ist) tf) (interp_tactic ist t) (Array.map (interp_tactic ist) tl) | TacThens (t1,tl) -> tclTHENS (interp_tactic ist t1) (List.map (interp_tactic ist) tl) | TacDo (n,tac) -> tclDO (interp_int_or_var ist n) (interp_tactic ist tac) | TacTimeout (n,tac) -> tclTIMEOUT (interp_int_or_var ist n) (interp_tactic ist tac) | TacTry tac -> tclTRY (interp_tactic ist tac) | TacRepeat tac -> tclREPEAT (interp_tactic ist tac) | TacOrelse (tac1,tac2) -> tclORELSE (interp_tactic ist tac1) (interp_tactic ist tac2) | TacFirst l -> tclFIRST (List.map (interp_tactic ist) l) | TacSolve l -> tclSOLVE (List.map (interp_tactic ist) l) | TacComplete tac -> tclCOMPLETE (interp_tactic ist tac) | TacArg a -> interp_tactic ist (TacArg a) | TacInfo tac -> msg_warning (str "The general \"info\" tactic is currently not working.\n" ++ str "Some specific verbose tactics may exist instead, such as\n" ++ str "info_trivial, info_auto, info_eauto."); eval_tactic ist tac and force_vrec ist gl = function | VRec (lfun,body) -> val_interp {ist with lfun = !lfun} gl body | v -> project gl , v and interp_ltac_reference loc' mustbetac ist gl = function | ArgVar (loc,id) -> let v = List.assoc id ist.lfun in let (sigma,v) = force_vrec ist gl v in let v = propagate_trace ist loc id v in sigma , if mustbetac then coerce_to_tactic loc id v else v | ArgArg (loc,r) -> let ids = extract_ids [] ist.lfun in let loc_info = ((if loc' = dloc then loc else loc'),LtacNameCall r) in let ist = { lfun=[]; debug=ist.debug; avoid_ids=ids; trace = push_trace loc_info ist.trace } in val_interp ist gl (lookup r) and interp_tacarg ist gl arg = let evdref = ref (project gl) in let v = match arg with | TacVoid -> VVoid | Reference r -> let (sigma,v) = interp_ltac_reference dloc false ist gl r in evdref := sigma; v | Integer n -> VInteger n | IntroPattern ipat -> VIntroPattern (snd (interp_intro_pattern ist gl ipat)) | ConstrMayEval c -> let (sigma,c_interp) = interp_constr_may_eval ist gl c in evdref := sigma; VConstr ([],c_interp) | MetaIdArg (loc,_,id) -> assert false | TacCall (loc,r,[]) -> let (sigma,v) = interp_ltac_reference loc true ist gl r in evdref := sigma; v | TacCall (loc,f,l) -> let (sigma,fv) = interp_ltac_reference loc true ist gl f in let (sigma,largs) = List.fold_right begin fun a (sigma',acc) -> let (sigma', a_interp) = interp_tacarg ist gl a in sigma' , a_interp::acc end l (sigma,[]) in List.iter check_is_value largs; let (sigma,v) = interp_app loc ist { gl with sigma=sigma } fv largs in evdref:= sigma; v | TacExternal (loc,com,req,la) -> let (sigma,la_interp) = List.fold_right begin fun a (sigma,acc) -> let (sigma,a_interp) = interp_tacarg ist {gl with sigma=sigma} a in sigma , a_interp::acc end la (project gl,[]) in let (sigma,v) = interp_external loc ist { gl with sigma=sigma } com req la_interp in evdref := sigma; v | TacFreshId l -> let id = pf_interp_fresh_id ist gl l in VIntroPattern (IntroIdentifier id) | Tacexp t -> let (sigma,v) = val_interp ist gl t in evdref := sigma; v | TacDynamic(_,t) -> let tg = (Dyn.tag t) in if tg = "tactic" then let (sigma,v) = val_interp ist gl (tactic_out t ist) in evdref := sigma; v else if tg = "value" then value_out t else if tg = "constr" then VConstr ([],constr_out t) else anomaly_loc (dloc, "Tacinterp.val_interp", (str "Unknown dynamic: <" ++ str (Dyn.tag t) ++ str ">")) in !evdref , v (* Interprets an application node *) and interp_app loc ist gl fv largs = match fv with (* if var=[] and body has been delayed by val_interp, then body is not a tactic that expects arguments. Otherwise Ltac goes into an infinite loop (val_interp puts a VFun back on body, and then interp_app is called again...) *) | (VFun(trace,olfun,(_::_ as var),body) |VFun(trace,olfun,([] as var), (TacFun _|TacLetIn _|TacMatchGoal _|TacMatch _| TacArg _ as body))) -> let (newlfun,lvar,lval)=head_with_value (var,largs) in if lvar=[] then let (sigma,v) = try catch_error trace (val_interp {ist with lfun=newlfun@olfun; trace=trace} gl) body with reraise -> debugging_exception_step ist false reraise (fun () -> str "evaluation"); raise reraise in let gl = { gl with sigma=sigma } in debugging_step ist (fun () -> str"evaluation returns"++fnl()++pr_value (Some (pf_env gl)) v); if lval=[] then sigma,v else interp_app loc ist gl v lval else project gl , VFun(trace,newlfun@olfun,lvar,body) | _ -> user_err_loc (loc, "Tacinterp.interp_app", (str"Illegal tactic application.")) (* Gives the tactic corresponding to the tactic value *) and tactic_of_value ist vle g = match vle with | VRTactic res -> res | VFun (trace,lfun,[],t) -> let tac = eval_tactic {ist with lfun=lfun; trace=trace} t in catch_error trace tac g | (VFun _|VRec _) -> error "A fully applied tactic is expected." | VConstr _ -> errorlabstrm "" (str"Value is a term. Expected a tactic.") | VConstr_context _ -> errorlabstrm "" (str"Value is a term context. Expected a tactic.") | VIntroPattern _ -> errorlabstrm "" (str"Value is an intro pattern. Expected a tactic.") | _ -> errorlabstrm "" (str"Expression does not evaluate to a tactic.") (* Evaluation with FailError catching *) and eval_with_fail ist is_lazy goal tac = try let (sigma,v) = val_interp ist goal tac in sigma , (match v with | VFun (trace,lfun,[],t) when not is_lazy -> let tac = eval_tactic {ist with lfun=lfun; trace=trace} t in VRTactic (catch_error trace tac { goal with sigma=sigma }) | a -> a) with | FailError (0,s) | Loc.Exc_located(_, FailError (0,s)) | Loc.Exc_located(_,LtacLocated (_,FailError (0,s))) -> raise (Eval_fail (Lazy.force s)) | FailError (lvl,s) -> raise (FailError (lvl - 1, s)) | Loc.Exc_located(s,FailError (lvl,s')) -> raise (Loc.Exc_located(s,FailError (lvl - 1, s'))) | Loc.Exc_located(s,LtacLocated (s'',FailError (lvl,s'))) -> raise (Loc.Exc_located(s,LtacLocated (s'',FailError (lvl - 1, s')))) (* Interprets the clauses of a recursive LetIn *) and interp_letrec ist gl llc u = let lref = ref ist.lfun in let lve = list_map_left (fun ((_,id),b) -> (id,VRec (lref,TacArg (dloc,b)))) llc in lref := lve@ist.lfun; let ist = { ist with lfun = lve@ist.lfun } in val_interp ist gl u (* Interprets the clauses of a LetIn *) and interp_letin ist gl llc u = let (sigma,lve) = List.fold_right begin fun ((_,id),body) (sigma,acc) -> let (sigma,v) = interp_tacarg ist { gl with sigma=sigma } body in check_is_value v; sigma, (id,v)::acc end llc (project gl,[]) in let ist = { ist with lfun = lve@ist.lfun } in val_interp ist { gl with sigma=sigma } u (* Interprets the Match Context expressions *) and interp_match_goal ist goal lz lr lmr = let (gl,sigma) = Goal.V82.nf_evar (project goal) (sig_it goal) in let goal = { it = gl ; sigma = sigma } in let hyps = pf_hyps goal in let hyps = if lr then List.rev hyps else hyps in let concl = pf_concl goal in let env = pf_env goal in let rec apply_goal_sub app ist (id,c) csr mt mhyps hyps = let rec match_next_pattern find_next () = let (lgoal,ctxt,find_next') = find_next () in let lctxt = give_context ctxt id in try apply_hyps_context ist env lz goal mt lctxt (adjust lgoal) mhyps hyps with e when is_match_catchable e -> match_next_pattern find_next' () in match_next_pattern (fun () -> match_subterm_gen app c csr) () in let rec apply_match_goal ist env goal nrs lex lpt = begin if lex<>[] then db_pattern_rule ist.debug nrs (List.hd lex); match lpt with | (All t)::tl -> begin db_mc_pattern_success ist.debug; try eval_with_fail ist lz goal t with e when is_match_catchable e -> apply_match_goal ist env goal (nrs+1) (List.tl lex) tl end | (Pat (mhyps,mgoal,mt))::tl -> let mhyps = List.rev mhyps (* Sens naturel *) in (match mgoal with | Term mg -> (try let lmatch = extended_matches mg concl in db_matched_concl ist.debug env concl; apply_hyps_context ist env lz goal mt [] lmatch mhyps hyps with e when is_match_catchable e -> (match e with | PatternMatchingFailure -> db_matching_failure ist.debug | Eval_fail s -> db_eval_failure ist.debug s | _ -> db_logic_failure ist.debug e); apply_match_goal ist env goal (nrs+1) (List.tl lex) tl) | Subterm (b,id,mg) -> (try apply_goal_sub b ist (id,mg) concl mt mhyps hyps with | PatternMatchingFailure -> apply_match_goal ist env goal (nrs+1) (List.tl lex) tl)) | _ -> errorlabstrm "Tacinterp.apply_match_goal" (v 0 (str "No matching clauses for match goal" ++ (if ist.debug=DebugOff then fnl() ++ str "(use \"Set Ltac Debug\" for more info)" else mt()) ++ str".")) end in apply_match_goal ist env goal 0 lmr (read_match_rule (fst (extract_ltac_constr_values ist env)) ist env (project goal) lmr) (* Tries to match the hypotheses in a Match Context *) and apply_hyps_context ist env lz goal mt lctxt lgmatch mhyps hyps = let rec apply_hyps_context_rec lfun lmatch lhyps_rest = function | hyp_pat::tl -> let (hypname, _, _ as hyp_pat) = match hyp_pat with | Hyp ((_,hypname),mhyp) -> hypname, None, mhyp | Def ((_,hypname),mbod,mhyp) -> hypname, Some mbod, mhyp in let rec match_next_pattern find_next = let (lids,lm,hyp_match,find_next') = find_next () in db_matched_hyp ist.debug (pf_env goal) hyp_match hypname; try let id_match = pi1 hyp_match in let nextlhyps = list_remove_assoc_in_triple id_match lhyps_rest in apply_hyps_context_rec (lfun@lids) lm nextlhyps tl with e when is_match_catchable e -> match_next_pattern find_next' in let init_match_pattern () = apply_one_mhyp_context ist env goal lmatch hyp_pat lhyps_rest in match_next_pattern init_match_pattern | [] -> let lfun = extend_values_with_bindings lmatch (lfun@ist.lfun) in db_mc_pattern_success ist.debug; eval_with_fail {ist with lfun=lfun} lz goal mt in apply_hyps_context_rec lctxt lgmatch hyps mhyps and interp_external loc ist gl com req la = let f ch = extern_request ch req gl la in let g ch = internalise_tacarg ch in interp_tacarg ist gl (System.connect f g com) (* Interprets extended tactic generic arguments *) and interp_genarg ist gl x = let evdref = ref (project gl) in let rec interp_genarg ist gl x = let gl = { gl with sigma = !evdref } in match genarg_tag x with | BoolArgType -> in_gen wit_bool (out_gen globwit_bool x) | IntArgType -> in_gen wit_int (out_gen globwit_int x) | IntOrVarArgType -> in_gen wit_int_or_var (ArgArg (interp_int_or_var ist (out_gen globwit_int_or_var x))) | StringArgType -> in_gen wit_string (out_gen globwit_string x) | PreIdentArgType -> in_gen wit_pre_ident (out_gen globwit_pre_ident x) | IntroPatternArgType -> in_gen wit_intro_pattern (interp_intro_pattern ist gl (out_gen globwit_intro_pattern x)) | IdentArgType b -> in_gen (wit_ident_gen b) (pf_interp_fresh_ident ist gl (out_gen (globwit_ident_gen b) x)) | VarArgType -> in_gen wit_var (interp_hyp ist gl (out_gen globwit_var x)) | RefArgType -> in_gen wit_ref (pf_interp_reference ist gl (out_gen globwit_ref x)) | SortArgType -> let (sigma,c_interp) = pf_interp_constr ist gl (GSort (dloc,out_gen globwit_sort x), None) in evdref := sigma; in_gen wit_sort (destSort c_interp) | ConstrArgType -> let (sigma,c_interp) = pf_interp_constr ist gl (out_gen globwit_constr x) in evdref := sigma; in_gen wit_constr c_interp | ConstrMayEvalArgType -> let (sigma,c_interp) = interp_constr_may_eval ist gl (out_gen globwit_constr_may_eval x) in evdref := sigma; in_gen wit_constr_may_eval c_interp | QuantHypArgType -> in_gen wit_quant_hyp (interp_declared_or_quantified_hypothesis ist gl (out_gen globwit_quant_hyp x)) | RedExprArgType -> let (sigma,r_interp) = pf_interp_red_expr ist gl (out_gen globwit_red_expr x) in evdref := sigma; in_gen wit_red_expr r_interp | OpenConstrArgType casted -> in_gen (wit_open_constr_gen casted) (interp_open_constr (if casted then Some (pf_concl gl) else None) ist (pf_env gl) (project gl) (snd (out_gen (globwit_open_constr_gen casted) x))) | ConstrWithBindingsArgType -> in_gen wit_constr_with_bindings (pack_sigma (interp_constr_with_bindings ist (pf_env gl) (project gl) (out_gen globwit_constr_with_bindings x))) | BindingsArgType -> in_gen wit_bindings (pack_sigma (interp_bindings ist (pf_env gl) (project gl) (out_gen globwit_bindings x))) | List0ArgType ConstrArgType -> let (sigma,v) = interp_genarg_constr_list0 ist gl x in evdref := sigma; v | List1ArgType ConstrArgType -> let (sigma,v) = interp_genarg_constr_list1 ist gl x in evdref := sigma; v | List0ArgType VarArgType -> interp_genarg_var_list0 ist gl x | List1ArgType VarArgType -> interp_genarg_var_list1 ist gl x | List0ArgType _ -> app_list0 (interp_genarg ist gl) x | List1ArgType _ -> app_list1 (interp_genarg ist gl) x | OptArgType _ -> app_opt (interp_genarg ist gl) x | PairArgType _ -> app_pair (interp_genarg ist gl) (interp_genarg ist gl) x | ExtraArgType s -> match tactic_genarg_level s with | Some n -> (* Special treatment of tactic arguments *) in_gen (wit_tactic n) (TacArg(dloc,valueIn(VFun(ist.trace,ist.lfun,[], out_gen (globwit_tactic n) x)))) | None -> let (sigma,v) = lookup_interp_genarg s ist gl x in evdref:=sigma; v in let v = interp_genarg ist gl x in !evdref , v and interp_genarg_constr_list0 ist gl x = let lc = out_gen (wit_list0 globwit_constr) x in let (sigma,lc) = pf_apply (interp_constr_list ist) gl lc in sigma , in_gen (wit_list0 wit_constr) lc and interp_genarg_constr_list1 ist gl x = let lc = out_gen (wit_list1 globwit_constr) x in let (sigma,lc) = pf_apply (interp_constr_list ist) gl lc in sigma , in_gen (wit_list1 wit_constr) lc and interp_genarg_var_list0 ist gl x = let lc = out_gen (wit_list0 globwit_var) x in let lc = interp_hyp_list ist gl lc in in_gen (wit_list0 wit_var) lc and interp_genarg_var_list1 ist gl x = let lc = out_gen (wit_list1 globwit_var) x in let lc = interp_hyp_list ist gl lc in in_gen (wit_list1 wit_var) lc (* Interprets the Match expressions *) and interp_match ist g lz constr lmr = let rec apply_match_subterm app ist (id,c) csr mt = let rec match_next_pattern find_next () = let (lmatch,ctxt,find_next') = find_next () in let lctxt = give_context ctxt id in let lfun = extend_values_with_bindings (adjust lmatch) (lctxt@ist.lfun) in try eval_with_fail {ist with lfun=lfun} lz g mt with e when is_match_catchable e -> match_next_pattern find_next' () in match_next_pattern (fun () -> match_subterm_gen app c csr) () in let rec apply_match ist sigma csr = let g = { g with sigma=sigma } in function | (All t)::tl -> (try eval_with_fail ist lz g t with e when is_match_catchable e -> apply_match ist sigma csr tl) | (Pat ([],Term c,mt))::tl -> (try let lmatch = try extended_matches c csr with reraise -> debugging_exception_step ist false reraise (fun () -> str "matching with pattern" ++ fnl () ++ pr_constr_pattern_env (pf_env g) c); raise reraise in try let lfun = extend_values_with_bindings lmatch ist.lfun in eval_with_fail { ist with lfun=lfun } lz g mt with reraise -> debugging_exception_step ist false reraise (fun () -> str "rule body for pattern" ++ pr_constr_pattern_env (pf_env g) c); raise reraise with e when is_match_catchable e -> debugging_step ist (fun () -> str "switching to the next rule"); apply_match ist sigma csr tl) | (Pat ([],Subterm (b,id,c),mt))::tl -> (try apply_match_subterm b ist (id,c) csr mt with PatternMatchingFailure -> apply_match ist sigma csr tl) | _ -> errorlabstrm "Tacinterp.apply_match" (str "No matching clauses for match.") in let (sigma,csr) = try interp_ltac_constr ist g constr with reraise -> debugging_exception_step ist true reraise (fun () -> str "evaluation of the matched expression"); raise reraise in let ilr = read_match_rule (fst (extract_ltac_constr_values ist (pf_env g))) ist (pf_env g) sigma lmr in let res = try apply_match ist sigma csr ilr with reraise -> debugging_exception_step ist true reraise (fun () -> str "match expression"); raise reraise in debugging_step ist (fun () -> str "match expression returns " ++ pr_value (Some (pf_env g)) (snd res)); res (* Interprets tactic expressions : returns a "constr" *) and interp_ltac_constr ist gl e = let (sigma, result) = try val_interp ist gl e with Not_found -> debugging_step ist (fun () -> str "evaluation failed for" ++ fnl() ++ Pptactic.pr_glob_tactic (pf_env gl) e); raise Not_found in try let cresult = constr_of_value (pf_env gl) result in debugging_step ist (fun () -> Pptactic.pr_glob_tactic (pf_env gl) e ++ fnl() ++ str " has value " ++ fnl() ++ pr_constr_under_binders_env (pf_env gl) cresult); if fst cresult <> [] then raise Not_found; sigma , snd cresult with Not_found -> errorlabstrm "" (str "Must evaluate to a closed term" ++ fnl() ++ str "offending expression: " ++ fnl() ++ Pptactic.pr_glob_tactic (pf_env gl) e ++ fnl() ++ str "this is a " ++ (match result with | VRTactic _ -> str "VRTactic" | VFun (_,il,ul,b) -> (str "VFun with body " ++ fnl() ++ Pptactic.pr_glob_tactic (pf_env gl) b ++ fnl() ++ str "instantiated arguments " ++ fnl() ++ List.fold_right (fun p s -> let (i,v) = p in str (string_of_id i) ++ str ", " ++ s) il (str "") ++ str "uninstantiated arguments " ++ fnl() ++ List.fold_right (fun opt_id s -> (match opt_id with Some id -> str (string_of_id id) | None -> str "_") ++ str ", " ++ s) ul (mt())) | VVoid -> str "VVoid" | VInteger _ -> str "VInteger" | VConstr _ -> str "VConstr" | VIntroPattern _ -> str "VIntroPattern" | VConstr_context _ -> str "VConstrr_context" | VRec _ -> str "VRec" | VList _ -> str "VList") ++ str".") (* Interprets tactic expressions : returns a "tactic" *) and interp_tactic ist tac gl = let (sigma,v) = val_interp ist gl tac in tactic_of_value ist v { gl with sigma=sigma } (* Interprets a primitive tactic *) and interp_atomic ist gl tac = let env = pf_env gl and sigma = project gl in match tac with (* Basic tactics *) | TacIntroPattern l -> h_intro_patterns (interp_intro_pattern_list_as_list ist gl l) | TacIntrosUntil hyp -> h_intros_until (interp_quantified_hypothesis ist hyp) | TacIntroMove (ido,hto) -> h_intro_move (Option.map (interp_fresh_ident ist env) ido) (interp_move_location ist gl hto) | TacAssumption -> h_assumption | TacExact c -> let (sigma,c_interp) = pf_interp_casted_constr ist gl c in tclTHEN (tclEVARS sigma) (h_exact c_interp) | TacExactNoCheck c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_exact_no_check c_interp) | TacVmCastNoCheck c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_vm_cast_no_check c_interp) | TacApply (a,ev,cb,cl) -> let sigma, l = list_fold_map (interp_open_constr_with_bindings_loc ist env) sigma cb in let tac = match cl with | None -> h_apply a ev | Some cl -> (fun l -> h_apply_in a ev l (interp_in_hyp_as ist gl cl)) in tclWITHHOLES ev tac sigma l | TacElim (ev,cb,cbo) -> let sigma, cb = interp_constr_with_bindings ist env sigma cb in let sigma, cbo = Option.fold_map (interp_constr_with_bindings ist env) sigma cbo in tclWITHHOLES ev (h_elim ev cb) sigma cbo | TacElimType c -> let (sigma,c_interp) = pf_interp_type ist gl c in tclTHEN (tclEVARS sigma) (h_elim_type c_interp) | TacCase (ev,cb) -> let sigma, cb = interp_constr_with_bindings ist env sigma cb in tclWITHHOLES ev (h_case ev) sigma cb | TacCaseType c -> let (sigma,c_interp) = pf_interp_type ist gl c in tclTHEN (tclEVARS sigma) (h_case_type c_interp) | TacFix (idopt,n) -> h_fix (Option.map (interp_fresh_ident ist env) idopt) n | TacMutualFix (b,id,n,l) -> let f sigma (id,n,c) = let (sigma,c_interp) = pf_interp_type ist { gl with sigma=sigma } c in sigma , (interp_fresh_ident ist env id,n,c_interp) in let (sigma,l_interp) = List.fold_right begin fun c (sigma,acc) -> let (sigma,c_interp) = f sigma c in sigma , c_interp::acc end l (project gl,[]) in tclTHEN (tclEVARS sigma) (h_mutual_fix b (interp_fresh_ident ist env id) n l_interp) | TacCofix idopt -> h_cofix (Option.map (interp_fresh_ident ist env) idopt) | TacMutualCofix (b,id,l) -> let f sigma (id,c) = let (sigma,c_interp) = pf_interp_type ist { gl with sigma=sigma } c in sigma , (interp_fresh_ident ist env id,c_interp) in let (sigma,l_interp) = List.fold_right begin fun c (sigma,acc) -> let (sigma,c_interp) = f sigma c in sigma , c_interp::acc end l (project gl,[]) in tclTHEN (tclEVARS sigma) (h_mutual_cofix b (interp_fresh_ident ist env id) l_interp) | TacCut c -> let (sigma,c_interp) = pf_interp_type ist gl c in tclTHEN (tclEVARS sigma) (h_cut c_interp) | TacAssert (t,ipat,c) -> let (sigma,c) = (if t=None then interp_constr else interp_type) ist env sigma c in tclTHEN (tclEVARS sigma) (abstract_tactic (TacAssert (t,ipat,c)) (Tactics.forward (Option.map (interp_tactic ist) t) (Option.map (interp_intro_pattern ist gl) ipat) c)) | TacGeneralize cl -> let sigma, cl = interp_constr_with_occurrences_and_name_as_list ist env sigma cl in tclWITHHOLES false (h_generalize_gen) sigma cl | TacGeneralizeDep c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_generalize_dep c_interp) | TacLetTac (na,c,clp,b,eqpat) -> let clp = interp_clause ist gl clp in let eqpat = Option.map (interp_intro_pattern ist gl) eqpat in if clp = nowhere then (* We try to fully-typecheck the term *) let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_let_tac b (interp_fresh_name ist env na) c_interp clp eqpat) else (* We try to keep the pattern structure as much as possible *) h_let_pat_tac b (interp_fresh_name ist env na) (interp_pure_open_constr ist env sigma c) clp eqpat (* Automation tactics *) | TacTrivial (debug,lems,l) -> Auto.h_trivial ~debug (interp_auto_lemmas ist env sigma lems) (Option.map (List.map (interp_hint_base ist)) l) | TacAuto (debug,n,lems,l) -> Auto.h_auto ~debug (Option.map (interp_int_or_var ist) n) (interp_auto_lemmas ist env sigma lems) (Option.map (List.map (interp_hint_base ist)) l) (* Derived basic tactics *) | TacSimpleInductionDestruct (isrec,h) -> h_simple_induction_destruct isrec (interp_quantified_hypothesis ist h) | TacInductionDestruct (isrec,ev,(l,el,cls)) -> let sigma, l = list_fold_map (fun sigma (c,(ipato,ipats)) -> let c = interp_induction_arg ist gl c in (sigma,(c, (Option.map (interp_intro_pattern ist gl) ipato, Option.map (interp_intro_pattern ist gl) ipats)))) sigma l in let sigma,el = Option.fold_map (interp_constr_with_bindings ist env) sigma el in let cls = Option.map (interp_clause ist gl) cls in tclWITHHOLES ev (h_induction_destruct isrec ev) sigma (l,el,cls) | TacDoubleInduction (h1,h2) -> let h1 = interp_quantified_hypothesis ist h1 in let h2 = interp_quantified_hypothesis ist h2 in Elim.h_double_induction h1 h2 | TacDecomposeAnd c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (Elim.h_decompose_and c_interp) | TacDecomposeOr c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (Elim.h_decompose_or c_interp) | TacDecompose (l,c) -> let l = List.map (interp_inductive ist) l in let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (Elim.h_decompose l c_interp) | TacSpecialize (n,cb) -> let sigma, cb = interp_constr_with_bindings ist env sigma cb in tclWITHHOLES false (h_specialize n) sigma cb | TacLApply c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_lapply c_interp) (* Context management *) | TacClear (b,l) -> h_clear b (interp_hyp_list ist gl l) | TacClearBody l -> h_clear_body (interp_hyp_list ist gl l) | TacMove (dep,id1,id2) -> h_move dep (interp_hyp ist gl id1) (interp_move_location ist gl id2) | TacRename l -> h_rename (List.map (fun (id1,id2) -> interp_hyp ist gl id1, interp_fresh_ident ist env (snd id2)) l) | TacRevert l -> h_revert (interp_hyp_list ist gl l) (* Constructors *) | TacLeft (ev,bl) -> let sigma, bl = interp_bindings ist env sigma bl in tclWITHHOLES ev (h_left ev) sigma bl | TacRight (ev,bl) -> let sigma, bl = interp_bindings ist env sigma bl in tclWITHHOLES ev (h_right ev) sigma bl | TacSplit (ev,_,bll) -> let sigma, bll = list_fold_map (interp_bindings ist env) sigma bll in tclWITHHOLES ev (h_split ev) sigma bll | TacAnyConstructor (ev,t) -> abstract_tactic (TacAnyConstructor (ev,t)) (Tactics.any_constructor ev (Option.map (interp_tactic ist) t)) | TacConstructor (ev,n,bl) -> let sigma, bl = interp_bindings ist env sigma bl in tclWITHHOLES ev (h_constructor ev (interp_int_or_var ist n)) sigma bl (* Conversion *) | TacReduce (r,cl) -> let (sigma,r_interp) = pf_interp_red_expr ist gl r in tclTHEN (tclEVARS sigma) (h_reduce r_interp (interp_clause ist gl cl)) | TacChange (None,c,cl) -> let (sigma,c_interp) = if (cl.onhyps = None or cl.onhyps = Some []) & (cl.concl_occs = all_occurrences_expr or cl.concl_occs = no_occurrences_expr) then pf_interp_type ist gl c else pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_change None c_interp (interp_clause ist gl cl)) | TacChange (Some op,c,cl) -> let sign,op = interp_typed_pattern ist env sigma op in (* spiwack: (2012/04/18) the evar_map output by pf_interp_constr is dropped as the evar_map taken as input (from extend_gl_hyps) is incorrect. This means that evar instantiated by pf_interp_constr may be lost, there. *) let (_,c_interp) = try pf_interp_constr ist (extend_gl_hyps gl sign) c with Not_found | Anomaly _ (* Hack *) -> errorlabstrm "" (strbrk "Failed to get enough information from the left-hand side to type the right-hand side.") in tclTHEN (tclEVARS sigma) (h_change (Some op) c_interp (interp_clause ist { gl with sigma=sigma } cl)) (* Equivalence relations *) | TacReflexivity -> h_reflexivity | TacSymmetry c -> h_symmetry (interp_clause ist gl c) | TacTransitivity c -> begin match c with | None -> h_transitivity None | Some c -> let (sigma,c_interp) = pf_interp_constr ist gl c in tclTHEN (tclEVARS sigma) (h_transitivity (Some c_interp)) end (* Equality and inversion *) | TacRewrite (ev,l,cl,by) -> let l = List.map (fun (b,m,c) -> let f env sigma = interp_open_constr_with_bindings ist env sigma c in (b,m,f)) l in let cl = interp_clause ist gl cl in Equality.general_multi_multi_rewrite ev l cl (Option.map (fun by -> tclCOMPLETE (interp_tactic ist by), Equality.Naive) by) | TacInversion (DepInversion (k,c,ids),hyp) -> let (sigma,c_interp) = match c with | None -> sigma , None | Some c -> let (sigma,c_interp) = pf_interp_constr ist gl c in sigma , Some c_interp in Inv.dinv k c_interp (Option.map (interp_intro_pattern ist gl) ids) (interp_declared_or_quantified_hypothesis ist gl hyp) | TacInversion (NonDepInversion (k,idl,ids),hyp) -> Inv.inv_clause k (Option.map (interp_intro_pattern ist gl) ids) (interp_hyp_list ist gl idl) (interp_declared_or_quantified_hypothesis ist gl hyp) | TacInversion (InversionUsing (c,idl),hyp) -> let (sigma,c_interp) = pf_interp_constr ist gl c in Leminv.lemInv_clause (interp_declared_or_quantified_hypothesis ist gl hyp) c_interp (interp_hyp_list ist gl idl) (* For extensions *) | TacExtend (loc,opn,l) -> let tac = lookup_tactic opn in let (sigma,args) = List.fold_right begin fun a (sigma,acc) -> let (sigma,a_interp) = interp_genarg ist { gl with sigma=sigma } a in sigma , a_interp::acc end l (project gl,[]) in abstract_extended_tactic opn args (tac args) | TacAlias (loc,s,l,(_,body)) -> fun gl -> let evdref = ref gl.sigma in let rec f x = match genarg_tag x with | IntArgType -> VInteger (out_gen globwit_int x) | IntOrVarArgType -> mk_int_or_var_value ist (out_gen globwit_int_or_var x) | PreIdentArgType -> failwith "pre-identifiers cannot be bound" | IntroPatternArgType -> VIntroPattern (snd (interp_intro_pattern ist gl (out_gen globwit_intro_pattern x))) | IdentArgType b -> value_of_ident (interp_fresh_ident ist env (out_gen (globwit_ident_gen b) x)) | VarArgType -> mk_hyp_value ist gl (out_gen globwit_var x) | RefArgType -> VConstr ([],constr_of_global (pf_interp_reference ist gl (out_gen globwit_ref x))) | SortArgType -> VConstr ([],mkSort (interp_sort (out_gen globwit_sort x))) | ConstrArgType -> let (sigma,v) = mk_constr_value ist gl (out_gen globwit_constr x) in evdref := sigma; v | OpenConstrArgType false -> let (sigma,v) = mk_open_constr_value ist gl (snd (out_gen globwit_open_constr x)) in evdref := sigma; v | ConstrMayEvalArgType -> let (sigma,c_interp) = interp_constr_may_eval ist gl (out_gen globwit_constr_may_eval x) in evdref := sigma; VConstr ([],c_interp) | ExtraArgType s when tactic_genarg_level s <> None -> (* Special treatment of tactic arguments *) let (sigma,v) = val_interp ist gl (out_gen (globwit_tactic (Option.get (tactic_genarg_level s))) x) in evdref := sigma; v | List0ArgType ConstrArgType -> let wit = wit_list0 globwit_constr in let (sigma,l_interp) = List.fold_right begin fun c (sigma,acc) -> let (sigma,c_interp) = mk_constr_value ist { gl with sigma=sigma } c in sigma , c_interp::acc end (out_gen wit x) (project gl,[]) in evdref := sigma; VList (l_interp) | List0ArgType VarArgType -> let wit = wit_list0 globwit_var in VList (List.map (mk_hyp_value ist gl) (out_gen wit x)) | List0ArgType IntArgType -> let wit = wit_list0 globwit_int in VList (List.map (fun x -> VInteger x) (out_gen wit x)) | List0ArgType IntOrVarArgType -> let wit = wit_list0 globwit_int_or_var in VList (List.map (mk_int_or_var_value ist) (out_gen wit x)) | List0ArgType (IdentArgType b) -> let wit = wit_list0 (globwit_ident_gen b) in let mk_ident x = value_of_ident (interp_fresh_ident ist env x) in VList (List.map mk_ident (out_gen wit x)) | List0ArgType IntroPatternArgType -> let wit = wit_list0 globwit_intro_pattern in let mk_ipat x = VIntroPattern (snd (interp_intro_pattern ist gl x)) in VList (List.map mk_ipat (out_gen wit x)) | List1ArgType ConstrArgType -> let wit = wit_list1 globwit_constr in let (sigma, l_interp) = List.fold_right begin fun c (sigma,acc) -> let (sigma,c_interp) = mk_constr_value ist { gl with sigma=sigma } c in sigma , c_interp::acc end (out_gen wit x) (project gl,[]) in evdref:=sigma; VList l_interp | List1ArgType VarArgType -> let wit = wit_list1 globwit_var in VList (List.map (mk_hyp_value ist gl) (out_gen wit x)) | List1ArgType IntArgType -> let wit = wit_list1 globwit_int in VList (List.map (fun x -> VInteger x) (out_gen wit x)) | List1ArgType IntOrVarArgType -> let wit = wit_list1 globwit_int_or_var in VList (List.map (mk_int_or_var_value ist) (out_gen wit x)) | List1ArgType (IdentArgType b) -> let wit = wit_list1 (globwit_ident_gen b) in let mk_ident x = value_of_ident (interp_fresh_ident ist env x) in VList (List.map mk_ident (out_gen wit x)) | List1ArgType IntroPatternArgType -> let wit = wit_list1 globwit_intro_pattern in let mk_ipat x = VIntroPattern (snd (interp_intro_pattern ist gl x)) in VList (List.map mk_ipat (out_gen wit x)) | StringArgType | BoolArgType | QuantHypArgType | RedExprArgType | OpenConstrArgType _ | ConstrWithBindingsArgType | ExtraArgType _ | BindingsArgType | OptArgType _ | PairArgType _ | List0ArgType _ | List1ArgType _ -> error "This argument type is not supported in tactic notations." in let lfun = (List.map (fun (x,c) -> (x,f c)) l)@ist.lfun in let trace = push_trace (loc,LtacNotationCall s) ist.trace in let gl = { gl with sigma = !evdref } in interp_tactic { ist with lfun=lfun; trace=trace } body gl let make_empty_glob_sign () = { ltacvars = ([],[]); ltacrecvars = []; gsigma = Evd.empty; genv = Global.env() } let fully_empty_glob_sign = { ltacvars = ([],[]); ltacrecvars = []; gsigma = Evd.empty; genv = Environ.empty_env } (* Initial call for interpretation *) let interp_tac_gen lfun avoid_ids debug t gl = interp_tactic { lfun=lfun; avoid_ids=avoid_ids; debug=debug; trace=[] } (intern_tactic true { ltacvars = (List.map fst lfun, []); ltacrecvars = []; gsigma = project gl; genv = pf_env gl } t) gl let eval_tactic t gls = db_initialize (); interp_tactic { lfun=[]; avoid_ids=[]; debug=get_debug(); trace=[] } t gls let interp t = interp_tac_gen [] [] (get_debug()) t let eval_ltac_constr gl t = interp_ltac_constr { lfun=[]; avoid_ids=[]; debug=get_debug(); trace=[] } gl (intern_tactic_or_tacarg (make_empty_glob_sign ()) t ) (* Hides interpretation for pretty-print *) let hide_interp t ot gl = let ist = { ltacvars = ([],[]); ltacrecvars = []; gsigma = project gl; genv = pf_env gl } in let te = intern_tactic true ist t in let t = eval_tactic te in match ot with | None -> abstract_tactic_expr (TacArg (dloc,Tacexp te)) t gl | Some t' -> abstract_tactic_expr ~dflt:true (TacArg (dloc,Tacexp te)) (tclTHEN t t') gl (***************************************************************************) (* Substitution at module closing time *) let subst_quantified_hypothesis _ x = x let subst_declared_or_quantified_hypothesis _ x = x let subst_glob_constr_and_expr subst (c,e) = assert (e=None); (* e<>None only for toplevel tactics *) (Detyping.subst_glob_constr subst c,None) let subst_glob_constr = subst_glob_constr_and_expr (* shortening *) let subst_binding subst (loc,b,c) = (loc,subst_quantified_hypothesis subst b,subst_glob_constr subst c) let subst_bindings subst = function | NoBindings -> NoBindings | ImplicitBindings l -> ImplicitBindings (List.map (subst_glob_constr subst) l) | ExplicitBindings l -> ExplicitBindings (List.map (subst_binding subst) l) let subst_glob_with_bindings subst (c,bl) = (subst_glob_constr subst c, subst_bindings subst bl) let subst_induction_arg subst = function | ElimOnConstr c -> ElimOnConstr (subst_glob_with_bindings subst c) | ElimOnAnonHyp n as x -> x | ElimOnIdent id as x -> x let subst_and_short_name f (c,n) = (* assert (n=None); *)(* since tacdef are strictly globalized *) (f c,None) let subst_or_var f = function | ArgVar _ as x -> x | ArgArg x -> ArgArg (f x) let subst_located f (_loc,id) = (dloc,f id) let subst_reference subst = subst_or_var (subst_located (subst_kn subst)) (*CSC: subst_global_reference is used "only" for RefArgType, that propagates to the syntactic non-terminals "global", used in commands such as Print. It is also used for non-evaluable references. *) let subst_global_reference subst = let subst_global ref = let ref',t' = subst_global subst ref in if not (eq_constr (constr_of_global ref') t') then ppnl (str "Warning: The reference " ++ pr_global ref ++ str " is not " ++ str " expanded to \"" ++ pr_lconstr t' ++ str "\", but to " ++ pr_global ref') ; ref' in subst_or_var (subst_located subst_global) let subst_evaluable subst = let subst_eval_ref = subst_evaluable_reference subst in subst_or_var (subst_and_short_name subst_eval_ref) let subst_unfold subst (l,e) = (l,subst_evaluable subst e) let subst_flag subst red = { red with rConst = List.map (subst_evaluable subst) red.rConst } let subst_constr_with_occurrences subst (l,c) = (l,subst_glob_constr subst c) let subst_glob_constr_or_pattern subst (c,p) = (subst_glob_constr subst c,subst_pattern subst p) let subst_pattern_with_occurrences subst (l,p) = (l,subst_glob_constr_or_pattern subst p) let subst_redexp subst = function | Unfold l -> Unfold (List.map (subst_unfold subst) l) | Fold l -> Fold (List.map (subst_glob_constr subst) l) | Cbv f -> Cbv (subst_flag subst f) | Lazy f -> Lazy (subst_flag subst f) | Pattern l -> Pattern (List.map (subst_constr_with_occurrences subst) l) | Simpl o -> Simpl (Option.map (subst_pattern_with_occurrences subst) o) | (Red _ | Hnf | ExtraRedExpr _ | CbvVm as r) -> r let subst_raw_may_eval subst = function | ConstrEval (r,c) -> ConstrEval (subst_redexp subst r,subst_glob_constr subst c) | ConstrContext (locid,c) -> ConstrContext (locid,subst_glob_constr subst c) | ConstrTypeOf c -> ConstrTypeOf (subst_glob_constr subst c) | ConstrTerm c -> ConstrTerm (subst_glob_constr subst c) let subst_match_pattern subst = function | Subterm (b,ido,pc) -> Subterm (b,ido,(subst_glob_constr_or_pattern subst pc)) | Term pc -> Term (subst_glob_constr_or_pattern subst pc) let rec subst_match_goal_hyps subst = function | Hyp (locs,mp) :: tl -> Hyp (locs,subst_match_pattern subst mp) :: subst_match_goal_hyps subst tl | Def (locs,mv,mp) :: tl -> Def (locs,subst_match_pattern subst mv, subst_match_pattern subst mp) :: subst_match_goal_hyps subst tl | [] -> [] let rec subst_atomic subst (t:glob_atomic_tactic_expr) = match t with (* Basic tactics *) | TacIntroPattern _ | TacIntrosUntil _ | TacIntroMove _ as x -> x | TacAssumption as x -> x | TacExact c -> TacExact (subst_glob_constr subst c) | TacExactNoCheck c -> TacExactNoCheck (subst_glob_constr subst c) | TacVmCastNoCheck c -> TacVmCastNoCheck (subst_glob_constr subst c) | TacApply (a,ev,cb,cl) -> TacApply (a,ev,List.map (subst_glob_with_bindings subst) cb,cl) | TacElim (ev,cb,cbo) -> TacElim (ev,subst_glob_with_bindings subst cb, Option.map (subst_glob_with_bindings subst) cbo) | TacElimType c -> TacElimType (subst_glob_constr subst c) | TacCase (ev,cb) -> TacCase (ev,subst_glob_with_bindings subst cb) | TacCaseType c -> TacCaseType (subst_glob_constr subst c) | TacFix (idopt,n) as x -> x | TacMutualFix (b,id,n,l) -> TacMutualFix(b,id,n,List.map (fun (id,n,c) -> (id,n,subst_glob_constr subst c)) l) | TacCofix idopt as x -> x | TacMutualCofix (b,id,l) -> TacMutualCofix (b,id, List.map (fun (id,c) -> (id,subst_glob_constr subst c)) l) | TacCut c -> TacCut (subst_glob_constr subst c) | TacAssert (b,na,c) -> TacAssert (Option.map (subst_tactic subst) b,na,subst_glob_constr subst c) | TacGeneralize cl -> TacGeneralize (List.map (on_fst (subst_constr_with_occurrences subst))cl) | TacGeneralizeDep c -> TacGeneralizeDep (subst_glob_constr subst c) | TacLetTac (id,c,clp,b,eqpat) -> TacLetTac (id,subst_glob_constr subst c,clp,b,eqpat) (* Automation tactics *) | TacTrivial (d,lems,l) -> TacTrivial (d,List.map (subst_glob_constr subst) lems,l) | TacAuto (d,n,lems,l) -> TacAuto (d,n,List.map (subst_glob_constr subst) lems,l) (* Derived basic tactics *) | TacSimpleInductionDestruct (isrec,h) as x -> x | TacInductionDestruct (isrec,ev,(l,el,cls)) -> let l' = List.map (fun (c,ids) -> subst_induction_arg subst c, ids) l in let el' = Option.map (subst_glob_with_bindings subst) el in TacInductionDestruct (isrec,ev,(l',el',cls)) | TacDoubleInduction (h1,h2) as x -> x | TacDecomposeAnd c -> TacDecomposeAnd (subst_glob_constr subst c) | TacDecomposeOr c -> TacDecomposeOr (subst_glob_constr subst c) | TacDecompose (l,c) -> let l = List.map (subst_or_var (subst_inductive subst)) l in TacDecompose (l,subst_glob_constr subst c) | TacSpecialize (n,l) -> TacSpecialize (n,subst_glob_with_bindings subst l) | TacLApply c -> TacLApply (subst_glob_constr subst c) (* Context management *) | TacClear _ as x -> x | TacClearBody l as x -> x | TacMove (dep,id1,id2) as x -> x | TacRename l as x -> x | TacRevert _ as x -> x (* Constructors *) | TacLeft (ev,bl) -> TacLeft (ev,subst_bindings subst bl) | TacRight (ev,bl) -> TacRight (ev,subst_bindings subst bl) | TacSplit (ev,b,bll) -> TacSplit (ev,b,List.map (subst_bindings subst) bll) | TacAnyConstructor (ev,t) -> TacAnyConstructor (ev,Option.map (subst_tactic subst) t) | TacConstructor (ev,n,bl) -> TacConstructor (ev,n,subst_bindings subst bl) (* Conversion *) | TacReduce (r,cl) -> TacReduce (subst_redexp subst r, cl) | TacChange (op,c,cl) -> TacChange (Option.map (subst_glob_constr_or_pattern subst) op, subst_glob_constr subst c, cl) (* Equivalence relations *) | TacReflexivity | TacSymmetry _ as x -> x | TacTransitivity c -> TacTransitivity (Option.map (subst_glob_constr subst) c) (* Equality and inversion *) | TacRewrite (ev,l,cl,by) -> TacRewrite (ev, List.map (fun (b,m,c) -> b,m,subst_glob_with_bindings subst c) l, cl,Option.map (subst_tactic subst) by) | TacInversion (DepInversion (k,c,l),hyp) -> TacInversion (DepInversion (k,Option.map (subst_glob_constr subst) c,l),hyp) | TacInversion (NonDepInversion _,_) as x -> x | TacInversion (InversionUsing (c,cl),hyp) -> TacInversion (InversionUsing (subst_glob_constr subst c,cl),hyp) (* For extensions *) | TacExtend (_loc,opn,l) -> TacExtend (dloc,opn,List.map (subst_genarg subst) l) | TacAlias (_,s,l,(dir,body)) -> TacAlias (dloc,s,List.map (fun (id,a) -> (id,subst_genarg subst a)) l, (dir,subst_tactic subst body)) and subst_tactic subst (t:glob_tactic_expr) = match t with | TacAtom (_loc,t) -> TacAtom (dloc, subst_atomic subst t) | TacFun tacfun -> TacFun (subst_tactic_fun subst tacfun) | TacLetIn (r,l,u) -> let l = List.map (fun (n,b) -> (n,subst_tacarg subst b)) l in TacLetIn (r,l,subst_tactic subst u) | TacMatchGoal (lz,lr,lmr) -> TacMatchGoal(lz,lr, subst_match_rule subst lmr) | TacMatch (lz,c,lmr) -> TacMatch (lz,subst_tactic subst c,subst_match_rule subst lmr) | TacId _ | TacFail _ as x -> x | TacProgress tac -> TacProgress (subst_tactic subst tac:glob_tactic_expr) | TacAbstract (tac,s) -> TacAbstract (subst_tactic subst tac,s) | TacThen (t1,tf,t2,tl) -> TacThen (subst_tactic subst t1,Array.map (subst_tactic subst) tf, subst_tactic subst t2,Array.map (subst_tactic subst) tl) | TacThens (t,tl) -> TacThens (subst_tactic subst t, List.map (subst_tactic subst) tl) | TacDo (n,tac) -> TacDo (n,subst_tactic subst tac) | TacTimeout (n,tac) -> TacTimeout (n,subst_tactic subst tac) | TacTry tac -> TacTry (subst_tactic subst tac) | TacInfo tac -> TacInfo (subst_tactic subst tac) | TacRepeat tac -> TacRepeat (subst_tactic subst tac) | TacOrelse (tac1,tac2) -> TacOrelse (subst_tactic subst tac1,subst_tactic subst tac2) | TacFirst l -> TacFirst (List.map (subst_tactic subst) l) | TacSolve l -> TacSolve (List.map (subst_tactic subst) l) | TacComplete tac -> TacComplete (subst_tactic subst tac) | TacArg (_,a) -> TacArg (dloc,subst_tacarg subst a) and subst_tactic_fun subst (var,body) = (var,subst_tactic subst body) and subst_tacarg subst = function | Reference r -> Reference (subst_reference subst r) | ConstrMayEval c -> ConstrMayEval (subst_raw_may_eval subst c) | MetaIdArg (_loc,_,_) -> assert false | TacCall (_loc,f,l) -> TacCall (_loc, subst_reference subst f, List.map (subst_tacarg subst) l) | TacExternal (_loc,com,req,la) -> TacExternal (_loc,com,req,List.map (subst_tacarg subst) la) | (TacVoid | IntroPattern _ | Integer _ | TacFreshId _) as x -> x | Tacexp t -> Tacexp (subst_tactic subst t) | TacDynamic(the_loc,t) as x -> (match Dyn.tag t with | "tactic" | "value" -> x | "constr" -> TacDynamic(the_loc, constr_in (subst_mps subst (constr_out t))) | s -> anomaly_loc (dloc, "Tacinterp.val_interp", str "Unknown dynamic: <" ++ str s ++ str ">")) (* Reads the rules of a Match Context or a Match *) and subst_match_rule subst = function | (All tc)::tl -> (All (subst_tactic subst tc))::(subst_match_rule subst tl) | (Pat (rl,mp,tc))::tl -> let hyps = subst_match_goal_hyps subst rl in let pat = subst_match_pattern subst mp in Pat (hyps,pat,subst_tactic subst tc) ::(subst_match_rule subst tl) | [] -> [] and subst_genarg subst (x:glob_generic_argument) = match genarg_tag x with | BoolArgType -> in_gen globwit_bool (out_gen globwit_bool x) | IntArgType -> in_gen globwit_int (out_gen globwit_int x) | IntOrVarArgType -> in_gen globwit_int_or_var (out_gen globwit_int_or_var x) | StringArgType -> in_gen globwit_string (out_gen globwit_string x) | PreIdentArgType -> in_gen globwit_pre_ident (out_gen globwit_pre_ident x) | IntroPatternArgType -> in_gen globwit_intro_pattern (out_gen globwit_intro_pattern x) | IdentArgType b -> in_gen (globwit_ident_gen b) (out_gen (globwit_ident_gen b) x) | VarArgType -> in_gen globwit_var (out_gen globwit_var x) | RefArgType -> in_gen globwit_ref (subst_global_reference subst (out_gen globwit_ref x)) | SortArgType -> in_gen globwit_sort (out_gen globwit_sort x) | ConstrArgType -> in_gen globwit_constr (subst_glob_constr subst (out_gen globwit_constr x)) | ConstrMayEvalArgType -> in_gen globwit_constr_may_eval (subst_raw_may_eval subst (out_gen globwit_constr_may_eval x)) | QuantHypArgType -> in_gen globwit_quant_hyp (subst_declared_or_quantified_hypothesis subst (out_gen globwit_quant_hyp x)) | RedExprArgType -> in_gen globwit_red_expr (subst_redexp subst (out_gen globwit_red_expr x)) | OpenConstrArgType b -> in_gen (globwit_open_constr_gen b) ((),subst_glob_constr subst (snd (out_gen (globwit_open_constr_gen b) x))) | ConstrWithBindingsArgType -> in_gen globwit_constr_with_bindings (subst_glob_with_bindings subst (out_gen globwit_constr_with_bindings x)) | BindingsArgType -> in_gen globwit_bindings (subst_bindings subst (out_gen globwit_bindings x)) | List0ArgType _ -> app_list0 (subst_genarg subst) x | List1ArgType _ -> app_list1 (subst_genarg subst) x | OptArgType _ -> app_opt (subst_genarg subst) x | PairArgType _ -> app_pair (subst_genarg subst) (subst_genarg subst) x | ExtraArgType s -> match tactic_genarg_level s with | Some n -> (* Special treatment of tactic arguments *) in_gen (globwit_tactic n) (subst_tactic subst (out_gen (globwit_tactic n) x)) | None -> lookup_genarg_subst s subst x (***************************************************************************) (* Tactic registration *) (* Declaration of the TAC-DEFINITION object *) let add (kn,td) = mactab := Gmap.add kn td !mactab let replace (kn,td) = mactab := Gmap.add kn td (Gmap.remove kn !mactab) type tacdef_kind = | NewTac of identifier | UpdateTac of ltac_constant let load_md i ((sp,kn),(local,defs)) = let dp,_ = repr_path sp in let mp,dir,_ = repr_kn kn in List.iter (fun (id,t) -> match id with NewTac id -> let sp = Libnames.make_path dp id in let kn = Names.make_kn mp dir (label_of_id id) in Nametab.push_tactic (Until i) sp kn; add (kn,t) | UpdateTac kn -> replace (kn,t)) defs let open_md i ((sp,kn),(local,defs)) = let dp,_ = repr_path sp in let mp,dir,_ = repr_kn kn in List.iter (fun (id,t) -> match id with NewTac id -> let sp = Libnames.make_path dp id in let kn = Names.make_kn mp dir (label_of_id id) in Nametab.push_tactic (Exactly i) sp kn | UpdateTac kn -> ()) defs let cache_md x = load_md 1 x let subst_kind subst id = match id with | NewTac _ -> id | UpdateTac kn -> UpdateTac (subst_kn subst kn) let subst_md (subst,(local,defs)) = (local, List.map (fun (id,t) -> (subst_kind subst id,subst_tactic subst t)) defs) let classify_md (local,defs as o) = if local then Dispose else Substitute o let inMD : bool * (tacdef_kind * glob_tactic_expr) list -> obj = declare_object {(default_object "TAC-DEFINITION") with cache_function = cache_md; load_function = load_md; open_function = open_md; subst_function = subst_md; classify_function = classify_md} let rec split_ltac_fun = function | TacFun (l,t) -> (l,t) | t -> ([],t) let pr_ltac_fun_arg = function | None -> spc () ++ str "_" | Some id -> spc () ++ pr_id id let print_ltac id = try let kn = Nametab.locate_tactic id in let l,t = split_ltac_fun (lookup kn) in hv 2 ( hov 2 (str "Ltac" ++ spc() ++ pr_qualid id ++ prlist pr_ltac_fun_arg l ++ spc () ++ str ":=") ++ spc() ++ Pptactic.pr_glob_tactic (Global.env ()) t) with Not_found -> errorlabstrm "print_ltac" (pr_qualid id ++ spc() ++ str "is not a user defined tactic.") open Libnames (* Adds a definition for tactics in the table *) let make_absolute_name ident repl = let loc = loc_of_reference ident in try let id, kn = if repl then None, Nametab.locate_tactic (snd (qualid_of_reference ident)) else let id = coerce_reference_to_id ident in Some id, Lib.make_kn id in if Gmap.mem kn !mactab then if repl then id, kn else user_err_loc (loc,"Tacinterp.add_tacdef", str "There is already an Ltac named " ++ pr_reference ident ++ str".") else if is_atomic_kn kn then user_err_loc (loc,"Tacinterp.add_tacdef", str "Reserved Ltac name " ++ pr_reference ident ++ str".") else id, kn with Not_found -> user_err_loc (loc,"Tacinterp.add_tacdef", str "There is no Ltac named " ++ pr_reference ident ++ str".") let add_tacdef local isrec tacl = let rfun = List.map (fun (ident, b, _) -> make_absolute_name ident b) tacl in let ist = {(make_empty_glob_sign()) with ltacrecvars = if isrec then list_map_filter (function (Some id, qid) -> Some (id, qid) | (None, _) -> None) rfun else []} in let gtacl = List.map2 (fun (_,b,def) (id, qid) -> let k = if b then UpdateTac qid else NewTac (Option.get id) in let t = Flags.with_option strict_check (intern_tactic_or_tacarg ist) def in (k, t)) tacl rfun in let id0 = fst (List.hd rfun) in let _ = match id0 with | Some id0 -> ignore(Lib.add_leaf id0 (inMD (local,gtacl))) | _ -> Lib.add_anonymous_leaf (inMD (local,gtacl)) in List.iter (fun (id,b,_) -> Flags.if_verbose msgnl (Libnames.pr_reference id ++ (if b then str " is redefined" else str " is defined"))) tacl (***************************************************************************) (* Other entry points *) let glob_tactic x = Flags.with_option strict_check (intern_tactic true (make_empty_glob_sign ())) x let glob_tactic_env l env x = Flags.with_option strict_check (intern_pure_tactic { ltacvars = (l,[]); ltacrecvars = []; gsigma = Evd.empty; genv = env }) x let interp_redexp env sigma r = let ist = { lfun=[]; avoid_ids=[]; debug=get_debug (); trace=[] } in let gist = {(make_empty_glob_sign ()) with genv = env; gsigma = sigma } in interp_red_expr ist sigma env (intern_red_expr gist r) (***************************************************************************) (* Embed tactics in raw or glob tactic expr *) let globTacticIn t = TacArg (dloc,TacDynamic (dloc,tactic_in t)) let tacticIn t = globTacticIn (fun ist -> try glob_tactic (t ist) with e when Errors.noncritical e -> anomalylabstrm "tacticIn" (str "Incorrect tactic expression. Received exception is:" ++ Errors.print e)) let tacticOut = function | TacArg (_,TacDynamic (_,d)) -> if (Dyn.tag d) = "tactic" then tactic_out d else anomalylabstrm "tacticOut" (str "Dynamic tag should be tactic") | ast -> anomalylabstrm "tacticOut" (str "Not a Dynamic ast: " (* ++ print_ast ast*) ) (***************************************************************************) (* Backwarding recursive needs of tactic glob/interp/eval functions *) let _ = Auto.set_extern_interp (fun l -> let l = List.map (fun (id,c) -> (id,VConstr ([],c))) l in interp_tactic {lfun=l;avoid_ids=[];debug=get_debug(); trace=[]}) let _ = Auto.set_extern_intern_tac (fun l -> Flags.with_option strict_check (intern_pure_tactic {(make_empty_glob_sign()) with ltacvars=(l,[])})) let _ = Auto.set_extern_subst_tactic subst_tactic coq-8.4pl2/tactics/termdn.ml0000640000175000001440000000764212010532755015115 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* struct module X = struct type t = constr_pattern let compare = Pervasives.compare end type term_label = | GRLabel of global_reference | ProdLabel | LambdaLabel | SortLabel module Y = struct type t = term_label let compare x y = let make_name n = match n with | GRLabel(ConstRef con) -> GRLabel(ConstRef(constant_of_kn(canonical_con con))) | GRLabel(IndRef (kn,i)) -> GRLabel(IndRef(mind_of_kn(canonical_mind kn),i)) | GRLabel(ConstructRef ((kn,i),j ))-> GRLabel(ConstructRef((mind_of_kn(canonical_mind kn),i),j)) | k -> k in Pervasives.compare (make_name x) (make_name y) end module Dn = Dn.Make(X)(Y)(Z) type t = Dn.t type 'a lookup_res = 'a Dn.lookup_res (*If we have: f a b c ..., decomp gives: (f,[a;b;c;...])*) let decomp = let rec decrec acc c = match kind_of_term c with | App (f,l) -> decrec (Array.fold_right (fun a l -> a::l) l acc) f | Cast (c1,_,_) -> decrec acc c1 | _ -> (c,acc) in decrec [] let decomp_pat = let rec decrec acc = function | PApp (f,args) -> decrec (Array.to_list args @ acc) f | c -> (c,acc) in decrec [] let constr_pat_discr t = if not (occur_meta_pattern t) then None else match decomp_pat t with | PRef ((IndRef _) as ref), args | PRef ((ConstructRef _ ) as ref), args -> Some (GRLabel ref,args) | PRef ((VarRef v) as ref), args -> Some(GRLabel ref,args) | _ -> None let constr_pat_discr_st (idpred,cpred) t = match decomp_pat t with | PRef ((IndRef _) as ref), args | PRef ((ConstructRef _ ) as ref), args -> Some (GRLabel ref,args) | PRef ((VarRef v) as ref), args when not (Idpred.mem v idpred) -> Some(GRLabel ref,args) | PVar v, args when not (Idpred.mem v idpred) -> Some(GRLabel (VarRef v),args) | PRef ((ConstRef c) as ref), args when not (Cpred.mem c cpred) -> Some (GRLabel ref, args) | PProd (_, d, c), [] -> Some (ProdLabel, [d ; c]) | PLambda (_, d, c), l -> Some (LambdaLabel, [d ; c] @ l) | PSort s, [] -> Some (SortLabel, []) | _ -> None open Dn let constr_val_discr t = let c, l = decomp t in match kind_of_term c with | Ind ind_sp -> Label(GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Label(GRLabel (ConstructRef cstr_sp),l) | Var id -> Label(GRLabel (VarRef id),l) | Const _ -> Everything | _ -> Nothing let constr_val_discr_st (idpred,cpred) t = let c, l = decomp t in match kind_of_term c with | Const c -> if Cpred.mem c cpred then Everything else Label(GRLabel (ConstRef c),l) | Ind ind_sp -> Label(GRLabel (IndRef ind_sp),l) | Construct cstr_sp -> Label(GRLabel (ConstructRef cstr_sp),l) | Var id when not (Idpred.mem id idpred) -> Label(GRLabel (VarRef id),l) | Prod (n, d, c) -> Label(ProdLabel, [d; c]) | Lambda (n, d, c) -> Label(LambdaLabel, [d; c] @ l) | Sort _ -> Label (SortLabel, []) | Evar _ -> Everything | _ -> Nothing let create = Dn.create let add dn st = Dn.add dn (constr_pat_discr_st st) let rmv dn st = Dn.rmv dn (constr_pat_discr_st st) let lookup dn st t = Dn.lookup dn (constr_val_discr_st st) t let app f dn = Dn.app f dn end coq-8.4pl2/tactics/leminv.ml0000640000175000001440000002343212010532755015111 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (*

      \n")) in let () = (List.iter (fun x -> Buffer.add_string b ("\n")) l) in let () = Buffer.add_string b"\n" in b in let () = List.iter (fun b -> Buffer.add_buffer res_buf (tactic_item b)) li in res_buf let init () = let theui = Printf.sprintf " %s %s %s " (if Coq_config.gtk_platform <> `QUARTZ then "" else "") (Buffer.contents (list_items "Tactic" Coq_commands.tactics)) (Buffer.contents (list_items "Template" Coq_commands.commands)) in ignore (ui_m#add_ui_from_string theui); coq-8.4pl2/ide/command_windows.mli0000640000175000001440000000142612010532755016266 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Preferences.pref ref -> object method new_command : ?command:string -> ?term:string -> unit -> unit method frame : GBin.frame method refresh_font : unit -> unit method refresh_color : unit -> unit end coq-8.4pl2/ide/ideproof.ml0000640000175000001440000001360412010532755014535 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* let iter = new GText.iter it in let start = iter#backward_to_tag_toggle (Some tag) in let stop = iter#forward_to_tag_toggle (Some tag) in match GdkEvent.get_type evt with | `BUTTON_PRESS -> let ev = GdkEvent.Button.cast evt in if (GdkEvent.Button.button ev) <> 3 then false else begin let ctxt_menu = GMenu.menu () in let factory = new GMenu.factory ctxt_menu in List.iter (fun (text,cmd) -> ignore (factory#add_item text ~callback:(sel_cb cmd))) menu_content; ctxt_menu#popup ~button:3 ~time:(GdkEvent.Button.time ev); true end | `MOTION_NOTIFY -> hover_cb start stop; false | _ -> false)) let mode_tactic sel_cb (proof:GText.view) goals hints = match goals with | [] -> assert false | { Interface.goal_hyp = hyps; Interface.goal_ccl = cur_goal; } :: rem_goals -> let on_hover sel_start sel_stop = proof#buffer#remove_tag ~start:proof#buffer#start_iter ~stop:sel_start Tags.Proof.highlight; proof#buffer#remove_tag ~start:sel_stop ~stop:proof#buffer#end_iter Tags.Proof.highlight; proof#buffer#apply_tag ~start:sel_start ~stop:sel_stop Tags.Proof.highlight in let goals_cnt = List.length rem_goals + 1 in let head_str = Printf.sprintf "%d subgoal%s\n" goals_cnt (if 1 < goals_cnt then "" else "s") in let goal_str index total = Printf.sprintf "______________________________________(%d/%d)\n" index total in (* Insert current goal and its hypotheses *) let hyps_hints, goal_hints = match hints with | None -> [], [] | Some (hl, h) -> (hl, h) in let rec insert_hyp hints hs = match hs with | [] -> () | hyp :: hs -> let tags, rem_hints = match hints with | [] -> [], [] | hint :: hints -> let tag = proof#buffer#create_tag [] in let () = hook_tag_cb tag hint sel_cb on_hover in [tag], hints in let () = proof#buffer#insert ~tags (hyp ^ "\n") in insert_hyp rem_hints hs in let () = proof#buffer#insert head_str in let () = insert_hyp hyps_hints hyps in let () = let tags = Tags.Proof.goal :: if goal_hints <> [] then let tag = proof#buffer#create_tag [] in let () = hook_tag_cb tag goal_hints sel_cb on_hover in [tag] else [] in proof#buffer#insert (goal_str 1 goals_cnt); proof#buffer#insert ~tags cur_goal; proof#buffer#insert "\n" in (* Insert remaining goals (no hypotheses) *) let fold_goal i _ { Interface.goal_ccl = g } = proof#buffer#insert (goal_str i goals_cnt); proof#buffer#insert (g ^ "\n") in let () = Minilib.list_fold_left_i fold_goal 2 () rem_goals in ignore(proof#buffer#place_cursor ~where:(proof#buffer#end_iter#backward_to_tag_toggle (Some Tags.Proof.goal))); ignore(proof#scroll_to_mark ~use_align:true ~yalign:0.95 `INSERT) let mode_cesar (proof:GText.view) = function | [] -> assert false | { Interface.goal_hyp = hyps; Interface.goal_ccl = cur_goal; } :: _ -> proof#buffer#insert " *** Declarative Mode ***\n"; List.iter (fun hyp -> proof#buffer#insert (hyp^"\n")) hyps; proof#buffer#insert "______________________________________\n"; proof#buffer#insert ("thesis := \n "^cur_goal^"\n"); ignore (proof#scroll_to_iter (proof#buffer#get_iter_at_mark `INSERT)) let rec flatten = function | [] -> [] | (lg, rg) :: l -> let inner = flatten l in List.rev_append lg inner @ rg let display mode (view:GText.view) goals hints evars = let () = view#buffer#set_text "" in match goals with | None -> () (* No proof in progress *) | Some { Interface.fg_goals = []; Interface.bg_goals = bg } -> let bg = flatten (List.rev bg) in let evars = match evars with None -> [] | Some evs -> evs in begin match (bg, evars) with | [], [] -> view#buffer#insert "No more subgoals." | [], _ :: _ -> (* A proof has been finished, but not concluded *) view#buffer#insert "No more subgoals but non-instantiated existential variables:\n\n"; let iter evar = let msg = Printf.sprintf "%s\n" evar.Interface.evar_info in view#buffer#insert msg in List.iter iter evars | _, _ -> (* No foreground proofs, but still unfocused ones *) view#buffer#insert "This subproof is complete, but there are still unfocused goals:\n\n"; let iter goal = let msg = Printf.sprintf "%s\n" goal.Interface.goal_ccl in view#buffer#insert msg in List.iter iter bg end | Some { Interface.fg_goals = fg } -> mode view fg hints coq-8.4pl2/ide/preferences.mli0000640000175000001440000000512012010532755015372 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val load_pref : unit -> unit val current : pref ref val configure : ?apply:(unit -> unit) -> unit -> unit (* Hooks *) val refresh_font_hook : (unit -> unit) ref val refresh_background_color_hook : (unit -> unit) ref val refresh_toolbar_hook : (unit -> unit) ref val resize_window_hook : (unit -> unit) ref val refresh_tabs_hook : (unit -> unit) ref val use_default_doc_url : string coq-8.4pl2/ide/coqide.mli0000640000175000001440000000307612057161033014343 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string list (** Prepare the widgets, load the given files in tabs *) val main : string list -> unit (** Function to save anything and kill all coqtops @return [false] if you're allowed to quit. *) val forbid_quit_to_save : unit -> bool (** Function to load of a file. *) val do_load : string -> unit (** Set coqide to ignore Ctrl-C, while launching [crash_save] and exiting for others received signals *) val ignore_break : unit -> unit (** Emergency saving of opened files as "foo.v.crashcoqide", and exit (if the integer isn't 127). *) val crash_save : int -> unit val check_for_geoproof_input : unit -> unit coq-8.4pl2/ide/ide_win32_stubs.c0000640000175000001440000000307512122347656015554 0ustar notinusers#define _WIN32_WINNT 0x0501 /* Cf below, we restrict to */ #include #include #include /* Win32 emulation of kill -9 */ /* The pid returned by Unix.create_process is actually a pseudo-pid, made via a cast of the obtained HANDLE, (cf. win32unix/createprocess.c in the sources of ocaml). Since we're still in the caller process, we simply cast back to get an handle... The 0 is the exit code we want for the terminated process. */ CAMLprim value win32_kill(value pseudopid) { CAMLparam1(pseudopid); TerminateProcess((HANDLE)(Long_val(pseudopid)), 0); CAMLreturn(Val_unit); } /* Win32 emulation of a kill -2 (SIGINT) */ /* For simplicity, we signal all processes sharing a console with coqide. This shouldn't be an issue since currently at most one coqtop is busy at a given time. Earlier, we tried to be more precise via FreeConsole and AttachConsole before generating the Ctrl-C, but that wasn't working so well (see #2869). This code rely now on the fact that coqide is a console app, and that coqide itself ignores Ctrl-C. */ CAMLprim value win32_interrupt_all(value unit) { CAMLparam1(unit); GenerateConsoleCtrlEvent(CTRL_C_EVENT,0); CAMLreturn(Val_unit); } /* Get rid of the nasty console window (only if we created it) */ CAMLprim value win32_hide_console (value unit) { CAMLparam1(unit); DWORD pid; HWND hw = GetConsoleWindow(); if (hw != NULL) { GetWindowThreadProcessId(hw, &pid); if (pid == GetCurrentProcessId()) ShowWindow(hw, SW_HIDE); } CAMLreturn(Val_unit); } coq-8.4pl2/ide/undo_lablgtk_ge212.mli0000640000175000001440000000210512010532755016436 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Gtk.text_view] as 'a) Gtk.obj -> object inherit GText.view val obj : 'a Gtk.obj method undo : bool method redo : bool method clear_undo : unit end val undoable_view : ?buffer:GText.buffer -> ?editable:bool -> ?cursor_visible:bool -> ?justification:GtkEnums.justification -> ?wrap_mode:GtkEnums.wrap_mode -> ?accepts_tab:bool -> ?border_width:int -> ?width:int -> ?height:int -> ?packing:(GObj.widget -> unit) -> ?show:bool -> unit -> undoable_view coq-8.4pl2/ide/coq.ml0000640000175000001440000002543612052731210013507 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* " in try (* the following makes sense only when running with local layout *) let coqroot = Filename.concat (Filename.dirname Sys.executable_name) Filename.parent_dir_name in let ch = open_in (Filename.concat coqroot "revision") in let ver = input_line ch in let rev = input_line ch in (ver,rev) with _ -> (Coq_config.version,date) let short_version () = let (ver,date) = get_version_date () in Printf.sprintf "The Coq Proof Assistant, version %s (%s)\n" ver date let version () = let (ver,date) = get_version_date () in Printf.sprintf "The Coq Proof Assistant, version %s (%s)\ \nArchitecture %s running %s operating system\ \nGtk version is %s\ \nThis is %s (%s is the best one for this architecture and OS)\ \n" ver date Coq_config.arch Sys.os_type (let x,y,z = GMain.Main.version in Printf.sprintf "%d.%d.%d" x y z) (Filename.basename Sys.executable_name) Coq_config.best (** * Initial checks by launching test coqtop processes *) let rec read_all_lines in_chan = try let arg = input_line in_chan in arg::(read_all_lines in_chan) with End_of_file -> [] let fatal_error_popup msg = let popup = GWindow.message_dialog ~buttons:GWindow.Buttons.ok ~message_type:`ERROR ~message:msg () in ignore (popup#run ()); exit 1 let final_info_popup small msg = if small then let popup = GWindow.message_dialog ~buttons:GWindow.Buttons.ok ~message_type:`INFO ~message:msg () in let _ = popup#run () in exit 0 else let popup = GWindow.dialog () in let button = GButton.button ~label:"ok" ~packing:popup#action_area#add () in let scroll = GBin.scrolled_window ~hpolicy:`NEVER ~vpolicy:`AUTOMATIC ~packing:popup#vbox#add ~height:500 () in let _ = GMisc.label ~text:msg ~packing:scroll#add_with_viewport () in let _ = popup#connect#destroy ~callback:(fun _ -> exit 0) in let _ = button#connect#clicked ~callback:(fun _ -> exit 0) in let _ = popup#run () in exit 0 let connection_error cmd lines exn = fatal_error_popup ("Connection with coqtop failed!\n"^ "Command was: "^cmd^"\n"^ "Answer was: "^(String.concat "\n " lines)^"\n"^ "Exception was: "^Printexc.to_string exn) let display_coqtop_answer cmd lines = final_info_popup (List.length lines < 30) ("Coqtop exited\n"^ "Command was: "^cmd^"\n"^ "Answer was: "^(String.concat "\n " lines)) let check_remaining_opt arg = if arg <> "" && arg.[0] = '-' then fatal_error_popup ("Illegal option: "^arg) let rec filter_coq_opts args = let argstr = String.concat " " (List.map Filename.quote args) in let cmd = Filename.quote (coqtop_path ()) ^" -nois -filteropts " ^ argstr in let cmd = requote cmd in let filtered_args = ref [] in let errlines = ref [] in try let oc,ic,ec = Unix.open_process_full cmd (Unix.environment ()) in filtered_args := read_all_lines oc; errlines := read_all_lines ec; match Unix.close_process_full (oc,ic,ec) with | Unix.WEXITED 0 -> List.iter check_remaining_opt !filtered_args; !filtered_args | Unix.WEXITED 127 -> asks_for_coqtop args | _ -> display_coqtop_answer cmd (!filtered_args @ !errlines) with Sys_error _ -> asks_for_coqtop args | e -> connection_error cmd (!filtered_args @ !errlines) e and asks_for_coqtop args = let pb_mes = GWindow.message_dialog ~message:"Failed to load coqtop. Reset the preference to default ?" ~message_type:`QUESTION ~buttons:GWindow.Buttons.yes_no () in match pb_mes#run () with | `YES -> let () = !Preferences.current.Preferences.cmd_coqtop <- None in let () = custom_coqtop := None in let () = pb_mes#destroy () in filter_coq_opts args | `DELETE_EVENT | `NO -> let () = pb_mes#destroy () in let cmd_sel = GWindow.file_selection ~title:"Coqtop to execute (edit your preference then)" ~filename:(coqtop_path ()) ~urgency_hint:true () in match cmd_sel#run () with | `OK -> let () = custom_coqtop := (Some cmd_sel#filename) in let () = cmd_sel#destroy () in filter_coq_opts args | `CANCEL | `DELETE_EVENT | `HELP -> exit 0 exception WrongExitStatus of string let print_status = function | Unix.WEXITED n -> "WEXITED "^string_of_int n | Unix.WSIGNALED n -> "WSIGNALED "^string_of_int n | Unix.WSTOPPED n -> "WSTOPPED "^string_of_int n let check_connection args = let lines = ref [] in let argstr = String.concat " " (List.map Filename.quote args) in let cmd = Filename.quote (coqtop_path ()) ^ " -batch " ^ argstr in let cmd = requote cmd in try let ic = Unix.open_process_in cmd in lines := read_all_lines ic; match Unix.close_process_in ic with | Unix.WEXITED 0 -> () (* coqtop seems ok *) | st -> raise (WrongExitStatus (print_status st)) with e -> connection_error cmd !lines e (** * The structure describing a coqtop sub-process *) type coqtop = { pid : int; (* Unix process id *) cout : in_channel ; cin : out_channel ; sup_args : string list; } (** * Count of all active coqtops *) let toplvl_ctr = ref 0 let toplvl_ctr_mtx = Mutex.create () let coqtop_zombies () = Mutex.lock toplvl_ctr_mtx; let res = !toplvl_ctr in Mutex.unlock toplvl_ctr_mtx; res (** * Starting / signaling / ending a real coqtop sub-process *) (** We simulate a Unix.open_process that also returns the pid of the created process. Note: this uses Unix.create_process, which doesn't call bin/sh, so args shouldn't be quoted. The process cannot be terminated by a Unix.close_process, but rather by a kill of the pid. >--ide2top_w--[pipe]--ide2top_r--> coqide coqtop <--top2ide_r--[pipe]--top2ide_w--< Note: we use Unix.stderr in Unix.create_process to get debug messages from the coqtop's Ide_slave loop. NB: it's important to close coqide's descriptors (ide2top_w and top2ide_r) in coqtop. We do this indirectly via [Unix.set_close_on_exec]. This way, coqide has the only remaining copies of these descriptors, and closing them later will have visible effects in coqtop. Cf man 7 pipe : - If all file descriptors referring to the write end of a pipe have been closed, then an attempt to read(2) from the pipe will see end-of-file (read(2) will return 0). - If all file descriptors referring to the read end of a pipe have been closed, then a write(2) will cause a SIGPIPE signal to be generated for the calling process. If the calling process is ignoring this signal, then write(2) fails with the error EPIPE. Symmetrically, coqtop's descriptors (ide2top_r and top2ide_w) should be closed in coqide. *) let open_process_pid prog args = let (ide2top_r,ide2top_w) = Unix.pipe () in let (top2ide_r,top2ide_w) = Unix.pipe () in Unix.set_close_on_exec ide2top_w; Unix.set_close_on_exec top2ide_r; let pid = Unix.create_process prog args ide2top_r top2ide_w Unix.stderr in assert (pid <> 0); Unix.close ide2top_r; Unix.close top2ide_w; let oc = Unix.out_channel_of_descr ide2top_w in let ic = Unix.in_channel_of_descr top2ide_r in set_binary_mode_out oc true; set_binary_mode_in ic true; (pid,ic,oc) let spawn_coqtop sup_args = Mutex.lock toplvl_ctr_mtx; try let prog = coqtop_path () in let args = Array.of_list (prog :: "-ideslave" :: sup_args) in let (pid,ic,oc) = open_process_pid prog args in incr toplvl_ctr; Mutex.unlock toplvl_ctr_mtx; { pid = pid; cin = oc; cout = ic ; sup_args = sup_args } with e -> Mutex.unlock toplvl_ctr_mtx; raise e let respawn_coqtop coqtop = spawn_coqtop coqtop.sup_args let interrupter = ref (fun pid -> Unix.kill pid Sys.sigint) let killer = ref (fun pid -> Unix.kill pid Sys.sigkill) let break_coqtop coqtop = try !interrupter coqtop.pid with _ -> prerr_endline "Error while sending Ctrl-C" let kill_coqtop coqtop = let pid = coqtop.pid in begin try !killer pid with _ -> prerr_endline "Kill -9 failed. Process already terminated ?" end; try ignore (Unix.waitpid [] pid); Mutex.lock toplvl_ctr_mtx; decr toplvl_ctr; Mutex.unlock toplvl_ctr_mtx with _ -> prerr_endline "Error while waiting for child" (** * Calls to coqtop *) (** Cf [Ide_intf] for more details *) let p = Xml_parser.make () let () = Xml_parser.check_eof p false let eval_call coqtop (c:'a Ide_intf.call) = Xml_utils.print_xml coqtop.cin (Ide_intf.of_call c); flush coqtop.cin; let xml = Xml_parser.parse p (Xml_parser.SChannel coqtop.cout) in (Ide_intf.to_answer xml c : 'a Interface.value) let interp coqtop ?(raw=false) ?(verbose=true) s = eval_call coqtop (Ide_intf.interp (raw,verbose,s)) let rewind coqtop i = eval_call coqtop (Ide_intf.rewind i) let inloadpath coqtop s = eval_call coqtop (Ide_intf.inloadpath s) let mkcases coqtop s = eval_call coqtop (Ide_intf.mkcases s) let status coqtop = eval_call coqtop Ide_intf.status let hints coqtop = eval_call coqtop Ide_intf.hints module PrintOpt = struct type t = string list let implicit = ["Printing"; "Implicit"] let coercions = ["Printing"; "Coercions"] let raw_matching = ["Printing"; "Matching"; "Synth"] let notations = ["Printing"; "Notations"] let all_basic = ["Printing"; "All"] let existential = ["Printing"; "Existential"; "Instances"] let universes = ["Printing"; "Universes"] let state_hack = Hashtbl.create 11 let _ = List.iter (fun opt -> Hashtbl.add state_hack opt false) [ implicit; coercions; raw_matching; notations; all_basic; existential; universes ] let set coqtop options = let () = List.iter (fun (name, v) -> Hashtbl.replace state_hack name v) options in let options = List.map (fun (name, v) -> (name, Interface.BoolValue v)) options in match eval_call coqtop (Ide_intf.set_options options) with | Interface.Good () -> () | _ -> raise (Failure "Cannot set options.") let enforce_hack coqtop = let elements = Hashtbl.fold (fun opt v acc -> (opt, v) :: acc) state_hack [] in set coqtop elements end let goals coqtop = let () = PrintOpt.enforce_hack coqtop in eval_call coqtop Ide_intf.goals let evars coqtop = let () = PrintOpt.enforce_hack coqtop in eval_call coqtop Ide_intf.evars coq-8.4pl2/ide/ideutils.ml0000640000175000001440000002570612010532755014556 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ignore (status_context#push s)),status_context#pop let flash_info = let flash_context = status#new_context ~name:"Flash" in (fun ?(delay=5000) s -> flash_context#flash ~delay s) let set_location = ref (function s -> failwith "not ready") let pbar = GRange.progress_bar ~pulse_step:0.2 () let debug = ref (false) let prerr_endline s = if !debug then try prerr_endline s;flush stderr with _ -> () let get_insert input_buffer = input_buffer#get_iter_at_mark `INSERT let is_char_start c = let code = Char.code c in code < 0x80 || code >= 0xc0 let byte_offset_to_char_offset s byte_offset = if (byte_offset < String.length s) then begin let count_delta = ref 0 in for i = 0 to byte_offset do let code = Char.code s.[i] in if code >= 0x80 && code < 0xc0 then incr count_delta done; byte_offset - !count_delta end else begin let count_delta = ref 0 in for i = 0 to String.length s - 1 do let code = Char.code s.[i] in if code >= 0x80 && code < 0xc0 then incr count_delta done; byte_offset - !count_delta end let print_id id = prerr_endline ("GOT sig id :"^(string_of_int (Obj.magic id))) let do_convert s = Utf8_convert.f (if Glib.Utf8.validate s then begin prerr_endline "Input is UTF-8";s end else let from_loc () = let _,char_set = Glib.Convert.get_charset () in flash_info ("Converting from locale ("^char_set^")"); Glib.Convert.convert_with_fallback ~to_codeset:"UTF-8" ~from_codeset:char_set s in let from_manual enc = flash_info ("Converting from "^ enc); Glib.Convert.convert s ~to_codeset:"UTF-8" ~from_codeset:enc in match !current.encoding with |Preferences.Eutf8 | Preferences.Elocale -> from_loc () |Emanual enc -> try from_manual enc with _ -> from_loc ()) let try_convert s = try do_convert s with _ -> "(* Fatal error: wrong encoding in input. \ Please choose a correct encoding in the preference panel.*)";; let try_export file_name s = try let s = try match !current.encoding with |Eutf8 -> begin (prerr_endline "UTF-8 is enforced" ;s) end |Elocale -> begin let is_unicode,char_set = Glib.Convert.get_charset () in if is_unicode then (prerr_endline "Locale is UTF-8" ;s) else (prerr_endline ("Locale is "^char_set); Glib.Convert.convert_with_fallback ~from_codeset:"UTF-8" ~to_codeset:char_set s) end |Emanual enc -> (prerr_endline ("Manual charset is "^ enc); Glib.Convert.convert_with_fallback ~from_codeset:"UTF-8" ~to_codeset:enc s) with e -> (prerr_endline ("Error ("^(Printexc.to_string e)^") in transcoding: falling back to UTF-8") ;s) in let oc = open_out file_name in output_string oc s; close_out oc; true with e -> prerr_endline (Printexc.to_string e);false let my_stat f = try Some (Unix.stat f) with _ -> None let revert_timer = ref None let disconnect_revert_timer () = match !revert_timer with | None -> () | Some id -> GMain.Timeout.remove id; revert_timer := None let auto_save_timer = ref None let disconnect_auto_save_timer () = match !auto_save_timer with | None -> () | Some id -> GMain.Timeout.remove id; auto_save_timer := None let highlight_timer = ref None let set_highlight_timer f = match !highlight_timer with | None -> revert_timer := Some (GMain.Timeout.add ~ms:2000 ~callback:(fun () -> f (); highlight_timer := None; true)) | Some id -> GMain.Timeout.remove id; revert_timer := Some (GMain.Timeout.add ~ms:2000 ~callback:(fun () -> f (); highlight_timer := None; true)) let last_dir = ref "" let filter_all_files () = GFile.filter ~name:"All" ~patterns:["*"] () let filter_coq_files () = GFile.filter ~name:"Coq source code" ~patterns:[ "*.v"] () let select_file_for_open ~title ?(dir = last_dir) ?(filename="") () = let file = ref None in let file_chooser = GWindow.file_chooser_dialog ~action:`OPEN ~modal:true ~title () in file_chooser#add_button_stock `CANCEL `CANCEL ; file_chooser#add_select_button_stock `OPEN `OPEN ; file_chooser#add_filter (filter_coq_files ()); file_chooser#add_filter (filter_all_files ()); file_chooser#set_default_response `OPEN; ignore (file_chooser#set_current_folder !dir); begin match file_chooser#run () with | `OPEN -> begin file := file_chooser#filename; match !file with None -> () | Some s -> dir := Filename.dirname s; end | `DELETE_EVENT | `CANCEL -> () end ; file_chooser#destroy (); !file let select_file_for_save ~title ?(dir = last_dir) ?(filename="") () = let file = ref None in let file_chooser = GWindow.file_chooser_dialog ~action:`SAVE ~modal:true ~title () in file_chooser#add_button_stock `CANCEL `CANCEL ; file_chooser#add_select_button_stock `SAVE `SAVE ; file_chooser#add_filter (filter_coq_files ()); file_chooser#add_filter (filter_all_files ()); (* this line will be used when a lablgtk >= 2.10.0 is the default on most distributions file_chooser#set_do_overwrite_confirmation true; *) file_chooser#set_default_response `SAVE; ignore (file_chooser#set_current_folder !dir); ignore (file_chooser#set_current_name filename); begin match file_chooser#run () with | `SAVE -> begin file := file_chooser#filename; match !file with None -> () | Some s -> dir := Filename.dirname s; end | `DELETE_EVENT | `CANCEL -> () end ; file_chooser#destroy (); !file let find_tag_start (tag :GText.tag) (it:GText.iter) = let it = it#copy in let tag = Some tag in while not (it#begins_tag tag) && it#nocopy#backward_char do () done; it let find_tag_stop (tag :GText.tag) (it:GText.iter) = let it = it#copy in let tag = Some tag in while not (it#ends_tag tag) && it#nocopy#forward_char do () done; it let find_tag_limits (tag :GText.tag) (it:GText.iter) = (find_tag_start tag it , find_tag_stop tag it) (* explanations: Win32 threads won't work if events are produced in a thread different from the thread of the Gtk loop. In this case we must use GtkThread.async to push a callback in the main thread. Beware that the synchronus version may produce deadlocks. *) let async = if Sys.os_type = "Win32" then GtkThread.async else (fun x -> x) let sync = if Sys.os_type = "Win32" then GtkThread.sync else (fun x -> x) let mutex text f = let m = Mutex.create() in fun x -> if Mutex.try_lock m then (try prerr_endline ("Got lock on "^text); f x; Mutex.unlock m; prerr_endline ("Released lock on "^text) with e -> Mutex.unlock m; prerr_endline ("Released lock on "^text^" (on error)"); raise e) else prerr_endline ("Discarded call for "^text^": computations ongoing") let stock_to_widget ?(size=`DIALOG) s = let img = GMisc.image () in img#set_stock s; img#coerce let custom_coqtop = ref None let coqtop_path () = let file = match !custom_coqtop with | Some s -> s | None -> match !current.cmd_coqtop with | Some s -> s | None -> let prog = String.copy Sys.executable_name in try let pos = String.length prog - 6 in let i = Str.search_backward (Str.regexp_string "coqide") prog pos in String.blit "coqtop" 0 prog i 6; prog with Not_found -> "coqtop" in file let rec print_list print fmt = function | [] -> () | [x] -> print fmt x | x :: r -> print fmt x; print_list print fmt r (* In win32, when a command-line is to be executed via cmd.exe (i.e. Sys.command, Unix.open_process, ...), it cannot contain several quoted "..." zones otherwise some quotes are lost. Solution: we re-quote everything. Reference: http://ss64.com/nt/cmd.html *) let requote cmd = if Sys.os_type = "Win32" then "\""^cmd^"\"" else cmd (* TODO: allow to report output as soon as it comes (user-fiendlier for long commands like make...) *) let run_command f c = let c = requote c in let result = Buffer.create 127 in let cin,cout,cerr = Unix.open_process_full c (Unix.environment ()) in let buff = String.make 127 ' ' in let buffe = String.make 127 ' ' in let n = ref 0 in let ne = ref 0 in while n:= input cin buff 0 127 ; ne := input cerr buffe 0 127 ; !n+ !ne <> 0 do let r = try_convert (String.sub buff 0 !n) in f r; Buffer.add_string result r; let r = try_convert (String.sub buffe 0 !ne) in f r; Buffer.add_string result r done; (Unix.close_process_full (cin,cout,cerr), Buffer.contents result) let browse f url = let com = Minilib.subst_command_placeholder !current.cmd_browse url in let _ = Unix.open_process_out com in () (* This beautiful message will wait for twt ... if s = 127 then f ("Could not execute\n\""^com^ "\"\ncheck your preferences for setting a valid browser command\n") *) let doc_url () = if !current.doc_url = use_default_doc_url || !current.doc_url = "" then let addr = List.fold_left Filename.concat (Coq_config.docdir) ["html";"refman";"index.html"] in if Sys.file_exists addr then "file://"^addr else Coq_config.wwwrefman else !current.doc_url let url_for_keyword = let ht = Hashtbl.create 97 in lazy ( begin try let cin = try let index_urls = Filename.concat (List.find (fun x -> Sys.file_exists (Filename.concat x "index_urls.txt")) Minilib.xdg_config_dirs) "index_urls.txt" in open_in index_urls with Not_found -> let doc_url = doc_url () in let n = String.length doc_url in if n > 8 && String.sub doc_url 0 7 = "file://" then open_in (String.sub doc_url 7 (n-7) ^ "index_urls.txt") else raise Exit in try while true do let s = input_line cin in try let i = String.index s ',' in let k = String.sub s 0 i in let u = String.sub s (i + 1) (String.length s - i - 1) in Hashtbl.add ht k u with _ -> Minilib.safe_prerr_endline "Warning: Cannot parse documentation index file." done with End_of_file -> close_in cin with _ -> Minilib.safe_prerr_endline "Warning: Cannot find documentation index file." end; Hashtbl.find ht : string -> string) let browse_keyword f text = try let u = Lazy.force url_for_keyword text in browse f (doc_url() ^ u) with Not_found -> f ("No documentation found for \""^text^"\".\n") let absolute_filename f = Minilib.correct_path f (Sys.getcwd ()) coq-8.4pl2/ide/uim/0000750000175000001440000000000012127276554013174 5ustar notinuserscoq-8.4pl2/ide/minilib.ml0000640000175000001440000001313311756735231014360 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* a | b::l -> it_list_f (i+1) (f i a b) l in it_list_f (* [list_chop i l] splits [l] into two lists [(l1,l2)] such that [l1++l2=l] and [l1] has length [i]. It raises [Failure] when [i] is negative or greater than the length of [l] *) let list_chop n l = let rec chop_aux i acc = function | tl when i=0 -> (List.rev acc, tl) | h::t -> chop_aux (pred i) (h::acc) t | [] -> failwith "list_chop" in chop_aux n [] l let list_map_i f = let rec map_i_rec i = function | [] -> [] | x::l -> let v = f i x in v :: map_i_rec (i+1) l in map_i_rec let list_index x = let rec index_x n = function | y::l -> if x = y then n else index_x (succ n) l | [] -> raise Not_found in index_x 1 let list_index0 x l = list_index x l - 1 let list_filter_i p = let rec filter_i_rec i = function | [] -> [] | x::l -> let l' = filter_i_rec (succ i) l in if p i x then x::l' else l' in filter_i_rec 0 let string_map f s = let l = String.length s in let r = String.create l in for i= 0 to (l - 1) do r.[i] <- f (s.[i]) done; r let subst_command_placeholder s t = Str.global_replace (Str.regexp_string "%s") t s (* Split the content of a variable such as $PATH in a list of directories. The separators are either ":" in unix or ";" in win32 *) let path_to_list = Str.split (Str.regexp "[:;]") (* On win32, the home directory is probably not in $HOME, but in some other environment variable *) let home = try Sys.getenv "HOME" with Not_found -> try (Sys.getenv "HOMEDRIVE")^(Sys.getenv "HOMEPATH") with Not_found -> try Sys.getenv "USERPROFILE" with Not_found -> Filename.current_dir_name let opt2list = function None -> [] | Some x -> [x] let (/) = Filename.concat let coqify d = d / "coq" let xdg_config_home = coqify (try Sys.getenv "XDG_CONFIG_HOME" with Not_found -> home / ".config") let relative_base = Filename.dirname (Filename.dirname Sys.executable_name) let xdg_config_dirs = let sys_dirs = try List.map coqify (path_to_list (Sys.getenv "XDG_CONFIG_DIRS")) with | Not_found when Sys.os_type = "Win32" -> [relative_base / "config"] | Not_found -> ["/etc/xdg/coq"] in xdg_config_home :: sys_dirs @ opt2list Coq_config.configdir let xdg_data_home = coqify (try Sys.getenv "XDG_DATA_HOME" with Not_found -> home / ".local" / "share") let xdg_data_dirs = let sys_dirs = try List.map coqify (path_to_list (Sys.getenv "XDG_DATA_DIRS")) with | Not_found when Sys.os_type = "Win32" -> [relative_base / "share"] | Not_found -> ["/usr/local/share/coq";"/usr/share/coq"] in xdg_data_home :: sys_dirs @ opt2list Coq_config.datadir let coqtop_path = ref "" (* On a Win32 application with no console, writing to stderr raise a Sys_error "bad file descriptor", hence the "try" below. Ideally, we should re-route message to a log file somewhere, or print in the response buffer. *) let safe_prerr_endline s = try prerr_endline s;flush stderr with _ -> () (* Hints to partially detects if two paths refer to the same repertory *) let rec remove_path_dot p = let curdir = Filename.concat Filename.current_dir_name "" in (* Unix: "./" *) let n = String.length curdir in let l = String.length p in if l > n && String.sub p 0 n = curdir then let n' = let sl = String.length Filename.dir_sep in let i = ref n in while !i <= l - sl && String.sub p !i sl = Filename.dir_sep do i := !i + sl done; !i in remove_path_dot (String.sub p n' (l - n')) else p let strip_path p = let cwd = Filename.concat (Sys.getcwd ()) "" in (* Unix: "`pwd`/" *) let n = String.length cwd in let l = String.length p in if l > n && String.sub p 0 n = cwd then let n' = let sl = String.length Filename.dir_sep in let i = ref n in while !i <= l - sl && String.sub p !i sl = Filename.dir_sep do i := !i + sl done; !i in remove_path_dot (String.sub p n' (l - n')) else remove_path_dot p let canonical_path_name p = let current = Sys.getcwd () in try Sys.chdir p; let p' = Sys.getcwd () in Sys.chdir current; p' with Sys_error _ -> (* We give up to find a canonical name and just simplify it... *) strip_path p let correct_path f dir = if Filename.is_relative f then Filename.concat dir f else f (* checks if two file names refer to the same (existing) file by comparing their device and inode. It seems that under Windows, inode is always 0, so we cannot accurately check if *) (* Optimised for partial application (in case many candidates must be compared to f1). *) let same_file f1 = try let s1 = Unix.stat f1 in (fun f2 -> try let s2 = Unix.stat f2 in s1.Unix.st_dev = s2.Unix.st_dev && if Sys.os_type = "Win32" then f1 = f2 else s1.Unix.st_ino = s2.Unix.st_ino with Unix.Unix_error _ -> false) with Unix.Unix_error _ -> (fun _ -> false) coq-8.4pl2/ide/coq.ico0000640000175000001440000002607611144561426013665 0ustar notinusers*@ (,(*   &,........-( (0/&&D`mpqqqqqqqocE |bvolkjjjjjjkkf#++p-  u#++4  ~}}}}}}}}~t!))5 ~||||||||}t"*+}1  t~|{{{{{{|}v%./r* -}|{zzz{|}u%-.URej i~}{zzz{}~ہNad~ 2 g}~}{z{|~܀gEy)~|{|}x.9;0 BNS\}||~ms' [nu~||~d t+  ~}|~b| 9u}{zyz{q!*.Z+ ,pvutttuuG_oT6+&   .Ngwmuýuttsttup"-3 vhd\R?- " >CXfqruttsssttumhe`YyBYg%19iK0  ; ',^Rnruttsssssttuokvvvtb;P\iD!   :#-3]Ql}nuuttsssssssttt^~buuuusd2CN ~N(  )/HRm~kvuuttssssssssttupI`ojvutuvp;O[P  MfvTouuuuttsssssssssttuv[zBVcvýutttutG`o G ]z1vvuutttssssssssssttuvD[i(16kuuutttuo4FQm8*49xPwƾutttssssssssssssttuo&17!(,tsuuttttuo#(\""(+{,zǣvutttsssssssssssttuk )/(3:suutsstuvKev z83>Ez9{ƖwvuttsssssssssssturSp^uutsssttul!,2Q:JUp}^zƠwuutsssssstsssstuvuutssssttuuE]li, 4@Hs&z{xǺuutsssstttsssstuutsssssstuug ;XpzRxƯuutssttuttssssttsssssssttuq(-K@Q\ xYwƼuttttuvuttssssssssssssttut6HTXQftzvuttuugwuttsssssssssssstuvOk} e%pAxƺuttuh6GRsvttsssssssssssstuvSr l+ _}"yģvtuv\~$*ulvuttsssssssssssttuap. EYeuvuuvSr dc|vuttsssssssssssttubq. 9HQtvuuuHapXUpWwŽuttsssssssssssttucm+ 6EOsvuut@VdQ6CK2yâvutsssssssssssttucd% 5FQ-svtutAWeLxpwuttsssssssssstuvZ| UCWdTtutut:MYFm4yƬuttsssssssssttuuKfv= Ldr>pvutur)5<DQgt|lvuttttttttttttut2AKg'  #mvuttur'4;D]v{Ǟvutttttttttuuvc 2 3$)Otutttur'3:H  wByƤvuuuuuuuuuuvg(4<<o1ekD]n{6GRxŽutorur&29U8* r/{Əxǿvuuuuuvwugn%*(v3y˰tquutosur%17'3G1@W{ ^A  g|L|ňxşxġxáxáwv~iVG\i-#( QnRtvutsstur+8?Wvb0?To4\p|_xWn}SiwSiv@NW  .-;Pecotuyzuur;L_ah^#.>W% #I`zahiou{{wum.M`f2lƇhhhhhhhgcE[}M Udo f+fYg{feebrXrL-6C #*/7EW3AS 0=N .;L %, ?@@@```ppxx|~???coq-8.4pl2/ide/undo_lablgtk_lt26.mli0000640000175000001440000000201112010532755016401 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* object inherit GText.view method undo : bool method redo : bool method clear_undo : unit end val undoable_view : ?buffer:GText.buffer -> ?editable:bool -> ?cursor_visible:bool -> ?justification:GtkEnums.justification -> ?wrap_mode:GtkEnums.wrap_mode -> ?border_width:int -> ?width:int -> ?height:int -> ?packing:(GObj.widget -> unit) -> ?show:bool -> unit -> undoable_view coq-8.4pl2/ide/typed_notebook.ml0000640000175000001440000000514612010532755015755 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* if i = real_pos then term else x) 0 term_list; super#set_page ?tab_label ?menu_label page method get_nth_term i = List.nth term_list i method term_num p = Minilib.list_index0 p term_list method pages = term_list method remove_page index = term_list <- Minilib.list_filter_i (fun i x -> if i = index then kill_page x; i <> index) term_list; super#remove_page index method current_term = List.nth term_list super#current_page end let create make kill = GtkPack.Notebook.make_params [] ~cont:(GContainer.pack_container ~create:(fun pl -> let nb = GtkPack.Notebook.create pl in (new typed_notebook make kill nb))) coq-8.4pl2/ide/coq_icon.rc0000640000175000001440000000003411554053542014511 0ustar notinuserslarge ICON ide/coq.ico coq-8.4pl2/ide/command_windows.ml0000640000175000001440000001210212010532755016106 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* i <> index) !views in let _ = toolbar#insert_button ~tooltip:"Delete Page" ~text:"Delete Page" ~icon:(Ideutils.stock_to_widget `DELETE) ~callback:remove_cb () in object(self) val frame = frame val new_page_menu = new_page_menu val notebook = notebook method frame = frame method new_command ?command ?term () = let frame = GBin.frame ~shadow_type:`ETCHED_OUT () in let _ = notebook#append_page frame#coerce in notebook#goto_page (notebook#page_num frame#coerce); let vbox = GPack.vbox ~homogeneous:false ~packing:frame#add () in let hbox = GPack.hbox ~homogeneous:false ~packing:vbox#pack () in let (combo,_) = GEdit.combo_box_entry_text ~strings:Coq_commands.state_preserving ~packing:hbox#pack () in let on_activate c () = if List.mem combo#entry#text Coq_commands.state_preserving then c () else prerr_endline "Not a state preserving command" in let entry = GEdit.entry ~packing:(hbox#pack ~expand:true) () in entry#misc#set_can_default true; let r_bin = GBin.scrolled_window ~vpolicy:`AUTOMATIC ~hpolicy:`AUTOMATIC ~packing:(vbox#pack ~fill:true ~expand:true) () in let ok_b = GButton.button ~label:"Ok" ~packing:(hbox#pack ~expand:false) () in let result = GText.view ~packing:r_bin#add () in let () = views := !views @ [result] in result#misc#modify_font !current.Preferences.text_font; let clr = Tags.color_of_string !current.Preferences.background_color in result#misc#modify_base [`NORMAL, `COLOR clr]; result#misc#set_can_focus true; (* false causes problems for selection *) result#set_editable false; let callback () = let com = combo#entry#text in let phrase = if String.get com (String.length com - 1) = '.' then com ^ " " else com ^ " " ^ entry#text ^" . " in try result#buffer#set_text (match Coq.interp !coqtop ~raw:true phrase with | Interface.Fail (l,str) -> ("Error while interpreting "^phrase^":\n"^str) | Interface.Good results -> ("Result for command " ^ phrase ^ ":\n" ^ results)) with e -> let s = Printexc.to_string e in assert (Glib.Utf8.validate s); result#buffer#set_text s in ignore (combo#entry#connect#activate ~callback:(on_activate callback)); ignore (ok_b#connect#clicked ~callback:(on_activate callback)); begin match command,term with | None,None -> () | Some c, None -> combo#entry#set_text c; | Some c, Some t -> combo#entry#set_text c; entry#set_text t | None , Some t -> entry#set_text t end; on_activate callback (); entry#misc#grab_focus (); entry#misc#grab_default (); ignore (entry#connect#activate ~callback); ignore (combo#entry#connect#activate ~callback); self#frame#misc#show () method refresh_font () = let iter view = view#misc#modify_font !current.Preferences.text_font in List.iter iter !views method refresh_color () = let clr = Tags.color_of_string !current.Preferences.background_color in let iter view = view#misc#modify_base [`NORMAL, `COLOR clr] in List.iter iter !views initializer ignore (new_page_menu#connect#clicked ~callback:self#new_command); (* ignore (window#event#connect#delete (fun _ -> window#misc#hide(); true));*) end coq-8.4pl2/ide/utils/0000750000175000001440000000000012127276554013542 5ustar notinuserscoq-8.4pl2/ide/utils/configwin_messages.ml0000640000175000001440000000476311554053562017754 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** Module containing the messages of Configwin.*) let software = "Configwin";; let version = "1.2";; let html_config = "Configwin bindings configurator for html parameters" let home = Minilib.home let mCapture = "Capture";; let mType_key = "Type key" ;; let mAdd = "Add";; let mRemove = "Remove";; let mUp = "Up";; let mEdit = "Edit";; let mOk = "Ok";; let mCancel = "Cancel";; let mApply = "Apply";; let mValue = "Value" let mKey = "Key" let shortcuts = "Shortcuts" let html_end = "End with" let html_begin = "Begin with" coq-8.4pl2/ide/utils/editable_cells.ml0000640000175000001440000000667011254456226017035 0ustar notinusersopen GTree open Gobject let create l = let hbox = GPack.hbox () in let scw = GBin.scrolled_window ~hpolicy:`AUTOMATIC ~vpolicy:`AUTOMATIC ~packing:(hbox#pack ~expand:true) () in let columns = new GTree.column_list in let command_col = columns#add Data.string in let coq_col = columns#add Data.string in let store = GTree.list_store columns in (* populate the store *) let _ = List.iter (fun (x,y) -> let row = store#append () in store#set ~row ~column:command_col x; store#set ~row ~column:coq_col y) l in let view = GTree.view ~model:store ~packing:scw#add_with_viewport () in (* Alternate colors for the rows *) view#set_rules_hint true; let renderer_comm = GTree.cell_renderer_text [`EDITABLE true] in ignore (renderer_comm#connect#edited ~callback:(fun (path:Gtk.tree_path) (s:string) -> store#set ~row:(store#get_iter path) ~column:command_col s)); let first = GTree.view_column ~title:"Coq Command to try" ~renderer:(renderer_comm,["text",command_col]) () in ignore (view#append_column first); let renderer_coq = GTree.cell_renderer_text [`EDITABLE true] in ignore(renderer_coq#connect#edited ~callback:(fun (path:Gtk.tree_path) (s:string) -> store#set ~row:(store#get_iter path) ~column:coq_col s)); let second = GTree.view_column ~title:"Coq Command to insert" ~renderer:(renderer_coq,["text",coq_col]) () in ignore (view#append_column second); let vbox = GPack.button_box `VERTICAL ~packing:hbox#pack ~layout:`SPREAD () in let up = GButton.button ~stock:`GO_UP ~label:"Up" ~packing:(vbox#pack ~expand:true ~fill:false) () in let down = GButton.button ~stock:`GO_DOWN ~label:"Down" ~packing:(vbox#pack ~expand:true ~fill:false) () in let add = GButton.button ~stock:`ADD ~label:"Add" ~packing:(vbox#pack ~expand:true ~fill:false) () in let remove = GButton.button ~stock:`REMOVE ~label:"Remove" ~packing:(vbox#pack ~expand:true ~fill:false) () in ignore (add#connect#clicked ~callback:(fun b -> let n = store#append () in view#selection#select_iter n)); ignore (remove#connect#clicked ~callback:(fun b -> match view#selection#get_selected_rows with | [] -> () | path::_ -> let iter = store#get_iter path in ignore (store#remove iter); )); ignore (up#connect#clicked ~callback:(fun b -> match view#selection#get_selected_rows with | [] -> () | path::_ -> let iter = store#get_iter path in ignore (GtkTree.TreePath.prev path); let upiter = store#get_iter path in ignore (store#swap iter upiter); )); ignore (down#connect#clicked ~callback:(fun b -> match view#selection#get_selected_rows with | [] -> () | path::_ -> let iter = store#get_iter path in GtkTree.TreePath.next path; try let upiter = store#get_iter path in ignore (store#swap iter upiter) with _ -> () )); let get_data () = let start_path = GtkTree.TreePath.from_string "0" in let start_iter = store#get_iter start_path in let rec all acc = let new_acc = (store#get ~row:start_iter ~column:command_col, store#get ~row:start_iter ~column:coq_col)::acc in if store#iter_next start_iter then all new_acc else List.rev new_acc in all [] in (hbox,get_data) coq-8.4pl2/ide/utils/configwin.mli0000640000175000001440000003355111651507213016226 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** This module is the interface of the Configwin library. *) (** {2 Types} *) (** This type represents the different kinds of parameters. *) type parameter_kind;; (** This type represents the structure of the configuration window. *) type configuration_structure = | Section of string * GtkStock.id option * parameter_kind list (** label of the section, icon, parameters *) | Section_list of string * GtkStock.id option * configuration_structure list (** label of the section, icon, list of the sub sections *) ;; (** To indicate what button pushed the user when the window is closed. *) type return_button = Return_apply (** The user clicked on Apply at least once before closing the window with Cancel or the window manager. *) | Return_ok (** The user closed the window with the ok button. *) | Return_cancel (** The user closed the window with the cancel button or the window manager but never clicked on the apply button.*) (** {2 The key option class (to use with the {!Config_file} library)} *) val string_to_key : string -> Gdk.Tags.modifier list * int val key_to_string : Gdk.Tags.modifier list * int -> string val key_cp_wrapper : (Gdk.Tags.modifier list * int) Config_file.wrappers class key_cp : ?group:Config_file.group -> string list -> ?short_name:string -> Gdk.Tags.modifier list * int -> string -> [Gdk.Tags.modifier list * int] Config_file.cp_custom_type (** {2 Functions to create parameters} *) (** [string label value] creates a string parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val string : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** [bool label value] creates a boolean parameter. @param editable indicate if the value is editable (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val bool : ?editable: bool -> ?help: string -> ?f: (bool -> unit) -> string -> bool -> parameter_kind (** [strings label value] creates a string list parameter. @param editable indicate if the value is editable (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). @param add the function returning a list of strings when the user wants to add strings (default returns an empty list). @param eq the comparison function, used not to have doubles in list. Default is [Pervasives.(=)]. If you want to allow doubles in the list, give a function always returning false. *) val strings : ?editable: bool -> ?help: string -> ?f: (string list -> unit) -> ?eq: (string -> string -> bool) -> ?add: (unit -> string list) -> string -> string list -> parameter_kind (** [list label f_strings value] creates a list parameter. [f_strings] is a function taking a value and returning a list of strings to display it. The list length should be the same for any value, and the same as the titles list length. The [value] is the initial list. @param editable indicate if the value is editable (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). @param eq the comparison function, used not to have doubles in list. Default is [Pervasives.(=)]. If you want to allow doubles in the list, give a function always returning false. @param edit an optional function to use to edit an element of the list. The function returns an element, no matter if element was changed or not. When this function is given, a "Edit" button appears next to the list. @param add the function returning a list of values when the user wants to add values (default returns an empty list). @param titles an optional list of titles for the list. If the [f_strings] function returns a list with more than one element, then you must give a list of titles. @param color an optional function returning the optional color for a given element. This color is used to display the element in the list. The default function returns no color for any element. *) val list : ?editable: bool -> ?help: string -> ?f: ('a list -> unit) -> ?eq: ('a -> 'a -> bool) -> ?edit: ('a -> 'a) -> ?add: (unit -> 'a list) -> ?titles: string list -> ?color: ('a -> string option) -> string -> ('a -> string list) -> 'a list -> parameter_kind (** [color label value] creates a color parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val color : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** [font label value] creates a font parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val font : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** [combo label choices value] creates a combo parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). @param new_allowed indicate if a entry not in the list of choices is accepted (default is [false]). @param blank_allowed indicate if the empty selection [""] is accepted (default is [false]). *) val combo : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> ?new_allowed: bool -> ?blank_allowed: bool -> string -> string list -> string -> parameter_kind (** [text label value] creates a text parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the box for the text must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val text : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** Same as {!Configwin.text} but html bindings are available in the text widget. Use the [configwin_html_config] utility to edit your bindings. *) val html : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** [filename label value] creates a filename parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val filename : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: (string -> unit) -> string -> string -> parameter_kind (** [filenames label value] creates a filename list parameter. @param editable indicate if the value is editable (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). @param eq the comparison function, used not to have doubles in list. Default is [Pervasives.(=)]. If you want to allow doubles in the list, give a function always returning false. *) val filenames : ?editable: bool -> ?help: string -> ?f: (string list -> unit) -> ?eq: (string -> string -> bool) -> string -> string list -> parameter_kind (** [date label value] creates a date parameter. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). @param f_string the function used to display the date as a string. The parameter is a tupe [(day,month,year)], where [month] is between [0] and [11]. The default function creates the string [year/month/day]. *) val date : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: ((int * int * int) -> unit) -> ?f_string: ((int * int * int -> string)) -> string -> (int * int * int) -> parameter_kind (** [hotkey label value] creates a hot key parameter. A hot key is defined by a list of modifiers and a key code. @param editable indicate if the value is editable (default is [true]). @param expand indicate if the entry widget must expand or not (default is [true]). @param help an optional help message. @param f the function called to apply the value (default function does nothing). *) val hotkey : ?editable: bool -> ?expand: bool -> ?help: string -> ?f: ((Gdk.Tags.modifier list * int) -> unit) -> string -> (Gdk.Tags.modifier list * int) -> parameter_kind val modifiers : ?editable: bool -> ?expand: bool -> ?help: string -> ?allow:(Gdk.Tags.modifier list) -> ?f: (Gdk.Tags.modifier list -> unit) -> string -> Gdk.Tags.modifier list -> parameter_kind (** [custom box f expand] creates a custom parameter, with the given [box], the [f] function is called when the user wants to apply his changes, and [expand] indicates if the box must expand in its father. @param label if a value is specified, a the box is packed into a frame. *) val custom : ?label: string -> GPack.box -> (unit -> unit) -> bool -> parameter_kind (** {2 Functions creating configuration windows and boxes} *) (** This function takes a configuration structure and creates a window to configure the various parameters. @param apply this function is called when the apply button is clicked, after giving new values to parameters. *) val edit : ?apply: (unit -> unit) -> string -> ?width:int -> ?height:int -> configuration_structure list -> return_button (** This function takes a configuration structure and creates a window used to get the various parameters from the user. It is the same window as edit but there is no apply button.*) val get : string -> ?width:int -> ?height:int -> configuration_structure list -> return_button (** This function takes a list of parameter specifications and creates a window to configure the various parameters. @param apply this function is called when the apply button is clicked, after giving new values to parameters.*) val simple_edit : ?apply: (unit -> unit) -> string -> ?width:int -> ?height:int -> parameter_kind list -> return_button (** This function takes a list of parameter specifications and creates a window to configure the various parameters, without Apply button.*) val simple_get : string -> ?width:int -> ?height:int -> parameter_kind list -> return_button (** Create a [GPack.box] with the list of given parameters, Return the box and the function to call to apply new values to parameters. *) val box : parameter_kind list -> GData.tooltips -> GPack.box * (unit -> unit) (** Create a [GPack.box] with the list of given configuration structure list, and the given list of buttons (defined by their label and callback). Before calling the callback of a button, the [apply] function of each parameter is called. *) val tabbed_box : configuration_structure list -> (string * (unit -> unit)) list -> GData.tooltips -> GPack.box coq-8.4pl2/ide/utils/config_file.mli0000640000175000001440000003556710403541657016524 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** This module implements a mechanism to handle configuration files. A configuration file is defined as a set of [variable = value] lines, where value can be a simple string (types int, string, bool...), a list of values between brackets (lists) or parentheses (tuples), or a set of [variable = value] lines between braces. The configuration file is automatically loaded and saved, and configuration parameters are manipulated inside the program as easily as references. Object implementation by Jean-Baptiste Rouquier. *) (** {1:lowlevelinterface Low level interface} *) (** Skip this section on a first reading... *) (** The type of cp freshly parsed from configuration file, not yet wrapped in their proper type. *) module Raw : sig type cp = | String of string (** base types, reproducing the tokens of Genlex *) | Int of int | Float of float | List of cp list (** compound types *) | Tuple of cp list | Section of (string * cp) list (** A parser. *) val of_string : string -> cp (** Used to print the values into a log file for instance. *) val to_channel : out_channel -> cp -> unit end (** A type used to specialize polymorphics classes and define new classes. {!Config_file.predefinedwrappers} are provided. *) type 'a wrappers = { to_raw : 'a -> Raw.cp; of_raw : Raw.cp -> 'a; } (** An exception raised by {!Config_file.cp.set_raw} when the argument doesn't have a suitable {!Config_file.Raw.cp} type. The function explains the problem and flush the output.*) exception Wrong_type of (out_channel -> unit) (* (\** {2 Miscellaneous functions} *\) *) (* val bool_of_string : string -> bool *) (** {1 High level interface} *) (** {2 The two main classes} *) (** A Configuration Parameter, in short cp, ie a value we can store in and read from a configuration file. *) class type ['a] cp = object (** {1 Accessing methods} *) method get : 'a method set : 'a -> unit method get_default : 'a method get_help : string method get_name : string list (** Resets to the default value. *) method reset : unit (** {1 Miscellaneous} *) (** All the hooks are executed each time the method set is called, just after setting the new value.*) method add_hook : ('a -> 'a -> unit) -> unit (** Used to generate command line arguments in {!Config_file.group.command_line_args} *) method set_short_name : string -> unit (** [None] if no optional short_name was provided during object creation and [set_short_name] was never called.*) method get_short_name : string option (** {1 Methods for internal use} *) method get_formatted : Format.formatter -> unit method get_default_formatted : Format.formatter -> unit method get_help_formatted : Format.formatter -> unit method get_spec : Arg.spec method set_raw : Raw.cp -> unit end (** Unification over all possible ['a cp]: contains the main methods of ['a cp] except the methods using the type ['a]. A [group] manipulates only [groupable_cp] for homogeneity. *) type groupable_cp = < get_name : string list; get_short_name : string option; get_help : string; get_formatted : Format.formatter -> unit; get_default_formatted : Format.formatter -> unit; get_help_formatted : Format.formatter -> unit; get_spec : Arg.spec; reset : unit; set_raw : Raw.cp -> unit; > (** Raised in case a name is already used. See {!Config_file.group.add} *) exception Double_name (** An exception possibly raised if we want to check that every cp is defined in a configuration file. See {!Config_file.group.read}. *) exception Missing_cp of groupable_cp (** A group of cps, that can be loaded and saved, or used to generate command line arguments. The basic usage is to have only one group and one configuration file, but this mechanism allows to have more, for instance to have another smaller group for the options to pass on the command line. *) class group : object (** Adds a cp to the group. Note that the type ['a] must be lost to allow cps of different types to belong to the same group. @raise Double_name if [cp#get_name] is already used. *) (* method add : 'a cp -> 'a cp *) method add : 'a cp -> unit (**[write filename] saves all the cps into the configuration file [filename].*) method write : ?with_help:bool -> string -> unit (** [read filename] reads [filename] and stores the values it specifies into the cps belonging to this group. The file is created (and not read) if it doesn't exists. In the default behaviour, no warning is issued if not all cps are updated or if some values of [filename] aren't used. If [obsoletes] is specified, then prints in this file all the values that are in [filename] but not in this group. Those cps are likely to be erroneous or obsolete. Opens this file only if there is something to write in it. If [no_default] is [true], then raises [Missing_cp foo] if the cp [foo] isn't defined in [filename] but belongs to this group. [on_type_error groupable_cp value output filename in_channel] is called if the file doesn't give suitable value (string instead of int for instance, or a string not belonging to the expected enumeration) for the cp [groupable_cp]. [value] is the value read from the file, [output] is the argument of {!Config_file.Wrong_type}, [filename] is the same argument as the one given to read, and [in_channel] refers to [filename] to allow a function to close it if needed. Default behaviour is to print an error message and call [exit 1]. *) method read : ?obsoletes:string -> ?no_default:bool -> ?on_type_error : (groupable_cp -> Raw.cp -> (out_channel -> unit) -> string -> in_channel -> unit) -> string -> unit (** Interface with module Arg. @param section_separator the string used to concatenate the name of a cp, to get the command line option name. ["-"] is a good default. @return a list that can be used with [Arg.parse] and [Arg.usage].*) method command_line_args : section_separator:string -> (string * Arg.spec * string) list end (** {2 Predefined cp classes} *) (** The last three non-optional arguments are always [name] (of type string list), [default_value] and [help] (of type string). [name] is the path to the cp: [["section";"subsection"; ...; "foo"]]. It can consists of a single element but must not be empty. [short_name] will be added a "-" and used in {!Config_file.group.command_line_args}. [group], if provided, adds the freshly defined option to it (something like [initializer group#add self]). [help] needs not contain newlines, it will be automatically truncated where needed. It is mandatory but can be [""]. *) class int_cp : ?group:group -> string list -> ?short_name:string -> int -> string -> [int] cp class float_cp : ?group:group -> string list -> ?short_name:string -> float -> string -> [float] cp class bool_cp : ?group:group -> string list -> ?short_name:string -> bool -> string -> [bool] cp class string_cp : ?group:group -> string list -> ?short_name:string -> string -> string -> [string] cp class ['a] list_cp : 'a wrappers -> ?group:group -> string list -> ?short_name:string -> 'a list -> string -> ['a list] cp class ['a] option_cp : 'a wrappers -> ?group:group -> string list -> ?short_name:string -> 'a option -> string -> ['a option] cp class ['a] enumeration_cp : (string * 'a) list -> ?group:group -> string list -> ?short_name:string -> 'a -> string -> ['a] cp class ['a, 'b] tuple2_cp : 'a wrappers -> 'b wrappers -> ?group:group -> string list -> ?short_name:string -> 'a * 'b -> string -> ['a * 'b] cp class ['a, 'b, 'c] tuple3_cp : 'a wrappers -> 'b wrappers -> 'c wrappers -> ?group:group -> string list -> ?short_name:string -> 'a * 'b * 'c -> string -> ['a * 'b * 'c] cp class ['a, 'b, 'c, 'd] tuple4_cp : 'a wrappers -> 'b wrappers -> 'c wrappers -> 'd wrappers -> ?group:group -> string list -> ?short_name:string -> 'a * 'b * 'c * 'd -> string -> ['a * 'b * 'c * 'd] cp class string2_cp : ?group:group -> string list -> ?short_name:string -> string * string -> string -> [string, string] tuple2_cp (* class color_cp : ?group:group -> string list -> ?short_name:string -> string -> string -> string_cp *) class font_cp : ?group:group -> string list -> ?short_name:string -> string -> string -> string_cp class filename_cp : ?group:group -> string list -> ?short_name:string -> string -> string -> string_cp (** {2:predefinedwrappers Predefined wrappers} *) val int_wrappers : int wrappers val float_wrappers : float wrappers val bool_wrappers : bool wrappers val string_wrappers : string wrappers val list_wrappers : 'a wrappers -> 'a list wrappers val option_wrappers : 'a wrappers -> 'a option wrappers (** If you have a [type suit = Spades | Hearts | Diamond | Clubs], then {[enumeration_wrappers ["spades",Spades; "hearts",Hearts; "diamond",Diamond; "clubs",Clubs]]} will allow you to use cp of this type. For sum types with not only constant constructors, you will need to define your own cp class. *) val enumeration_wrappers : (string * 'a) list -> 'a wrappers val tuple2_wrappers : 'a wrappers -> 'b wrappers -> ('a * 'b) wrappers val tuple3_wrappers : 'a wrappers -> 'b wrappers -> 'c wrappers -> ('a * 'b * 'c) wrappers val tuple4_wrappers : 'a wrappers -> 'b wrappers -> 'c wrappers -> 'd wrappers -> ('a * 'b * 'c * 'd) wrappers (** {2 Defining new cp classes} *) (** To define a new cp class, you just have to provide an implementation for the wrappers between your type [foo] and the type [Raw.cp]. Once you have your wrappers [w], write {[class foo_cp = [foo] cp_custom_type w]} For further details, have a look at the commented .ml file, section "predefined cp classes". *) class ['a] cp_custom_type : 'a wrappers -> ?group:group -> string list -> ?short_name:string -> 'a -> string -> ['a] cp (** {1 Backward compatibility} Deprecated. All the functions from the module Options are available, except: - [prune_file]: use [group#write ?obsoletes:"foo.ml"]. - [smalllist_to_value], [smalllist_option]: use lists or tuples. - [get_class]. - [class_hook]: hooks are local to a cp. If you want hooks global to a class, define a new class that inherit from {!Config_file.cp_custom_type}. - [set_simple_option], [get_simple_option], [simple_options], [simple_args]: use {!Config_file.group.write}. - [set_option_hook]: use {!Config_file.cp.add_hook}. - [set_string_wrappers]: define a new class with {!Config_file.cp_custom_type}. The old configurations files are readable by this module. *) (**/**) type 'a option_class type 'a option_record type options_file val create_options_file : string -> options_file val set_options_file : options_file -> string -> unit val load : options_file -> unit val append : options_file -> string -> unit val save : options_file -> unit val save_with_help : options_file -> unit (* val define_option : options_file -> *) (* string list -> string -> 'a option_class -> 'a -> 'a option_record *) val option_hook : 'a option_record -> (unit -> unit) -> unit val string_option : string option_class val color_option : string option_class val font_option : string option_class val int_option : int option_class val bool_option : bool option_class val float_option : float option_class val string2_option : (string * string) option_class val option_option : 'a option_class -> 'a option option_class val list_option : 'a option_class -> 'a list option_class val sum_option : (string * 'a) list -> 'a option_class val tuple2_option : 'a option_class * 'b option_class -> ('a * 'b) option_class val tuple3_option : 'a option_class * 'b option_class * 'c option_class -> ('a * 'b * 'c) option_class val tuple4_option : 'a option_class * 'b option_class * 'c option_class * 'd option_class -> ('a * 'b * 'c * 'd) option_class val ( !! ) : 'a option_record -> 'a val ( =:= ) : 'a option_record -> 'a -> unit val shortname : 'a option_record -> string val get_help : 'a option_record -> string type option_value = Module of option_module | StringValue of string | IntValue of int | FloatValue of float | List of option_value list | SmallList of option_value list and option_module = (string * option_value) list val define_option_class : string -> (option_value -> 'a) -> ('a -> option_value) -> 'a option_class val to_value : 'a option_class -> 'a -> option_value val from_value : 'a option_class -> option_value -> 'a val value_to_string : option_value -> string val string_to_value : string -> option_value val value_to_int : option_value -> int val int_to_value : int -> option_value val bool_of_string : string -> bool val value_to_bool : option_value -> bool val bool_to_value : bool -> option_value val value_to_float : option_value -> float val float_to_value : float -> option_value val value_to_string2 : option_value -> string * string val string2_to_value : string * string -> option_value val value_to_list : (option_value -> 'a) -> option_value -> 'a list val list_to_value : ('a -> option_value) -> 'a list -> option_value coq-8.4pl2/ide/utils/configwin_keys.ml0000640000175000001440000034230011254456226017111 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** Key codes Ce fichier provient de X11/keysymdef.h les noms des symboles deviennent : XK_ -> xk_ Thanks to Fabrice Le Fessant. *) let xk_VoidSymbol = 0xFFFFFF (** void symbol *) (** TTY Functions, cleverly chosen to map to ascii, for convenience of programming, but could have been arbitrary (at the cost of lookup tables in client code. *) let xk_BackSpace = 0xFF08 (** back space, back char *) let xk_Tab = 0xFF09 let xk_Linefeed = 0xFF0A (** Linefeed, LF *) let xk_Clear = 0xFF0B let xk_Return = 0xFF0D (** Return, enter *) let xk_Pause = 0xFF13 (** Pause, hold *) let xk_Scroll_Lock = 0xFF14 let xk_Sys_Req = 0xFF15 let xk_Escape = 0xFF1B let xk_Delete = 0xFFFF (** Delete, rubout *) (** International & multi-key character composition *) let xk_Multi_key = 0xFF20 (** Multi-key character compose *) (** Japanese keyboard support *) let xk_Kanji = 0xFF21 (** Kanji, Kanji convert *) let xk_Muhenkan = 0xFF22 (** Cancel Conversion *) let xk_Henkan_Mode = 0xFF23 (** Start/Stop Conversion *) let xk_Henkan = 0xFF23 (** Alias for Henkan_Mode *) let xk_Romaji = 0xFF24 (** to Romaji *) let xk_Hiragana = 0xFF25 (** to Hiragana *) let xk_Katakana = 0xFF26 (** to Katakana *) let xk_Hiragana_Katakana = 0xFF27 (** Hiragana/Katakana toggle *) let xk_Zenkaku = 0xFF28 (** to Zenkaku *) let xk_Hankaku = 0xFF29 (** to Hankaku *) let xk_Zenkaku_Hankaku = 0xFF2A (** Zenkaku/Hankaku toggle *) let xk_Touroku = 0xFF2B (** Add to Dictionary *) let xk_Massyo = 0xFF2C (** Delete from Dictionary *) let xk_Kana_Lock = 0xFF2D (** Kana Lock *) let xk_Kana_Shift = 0xFF2E (** Kana Shift *) let xk_Eisu_Shift = 0xFF2F (** Alphanumeric Shift *) let xk_Eisu_toggle = 0xFF30 (** Alphanumeric toggle *) (** = 0xFF31 thru = 0xFF3F are under xk_KOREAN *) (** Cursor control & motion *) let xk_Home = 0xFF50 let xk_Left = 0xFF51 (** Move left, left arrow *) let xk_Up = 0xFF52 (** Move up, up arrow *) let xk_Right = 0xFF53 (** Move right, right arrow *) let xk_Down = 0xFF54 (** Move down, down arrow *) let xk_Prior = 0xFF55 (** Prior, previous *) let xk_Page_Up = 0xFF55 let xk_Next = 0xFF56 (** Next *) let xk_Page_Down = 0xFF56 let xk_End = 0xFF57 (** EOL *) let xk_Begin = 0xFF58 (** BOL *) (** Misc Functions *) let xk_Select = 0xFF60 (** Select, mark *) let xk_Print = 0xFF61 let xk_Execute = 0xFF62 (** Execute, run, do *) let xk_Insert = 0xFF63 (** Insert, insert here *) let xk_Undo = 0xFF65 (** Undo, oops *) let xk_Redo = 0xFF66 (** redo, again *) let xk_Menu = 0xFF67 let xk_Find = 0xFF68 (** Find, search *) let xk_Cancel = 0xFF69 (** Cancel, stop, abort, exit *) let xk_Help = 0xFF6A (** Help *) let xk_Break = 0xFF6B let xk_Mode_switch = 0xFF7E (** Character set switch *) let xk_script_switch = 0xFF7E (** Alias for mode_switch *) let xk_Num_Lock = 0xFF7F (** Keypad Functions, keypad numbers cleverly chosen to map to ascii *) let xk_KP_Space = 0xFF80 (** space *) let xk_KP_Tab = 0xFF89 let xk_KP_Enter = 0xFF8D (** enter *) let xk_KP_F1 = 0xFF91 (** PF1, KP_A, ... *) let xk_KP_F2 = 0xFF92 let xk_KP_F3 = 0xFF93 let xk_KP_F4 = 0xFF94 let xk_KP_Home = 0xFF95 let xk_KP_Left = 0xFF96 let xk_KP_Up = 0xFF97 let xk_KP_Right = 0xFF98 let xk_KP_Down = 0xFF99 let xk_KP_Prior = 0xFF9A let xk_KP_Page_Up = 0xFF9A let xk_KP_Next = 0xFF9B let xk_KP_Page_Down = 0xFF9B let xk_KP_End = 0xFF9C let xk_KP_Begin = 0xFF9D let xk_KP_Insert = 0xFF9E let xk_KP_Delete = 0xFF9F let xk_KP_Equal = 0xFFBD (** equals *) let xk_KP_Multiply = 0xFFAA let xk_KP_Add = 0xFFAB let xk_KP_Separator = 0xFFAC (** separator, often comma *) let xk_KP_Subtract = 0xFFAD let xk_KP_Decimal = 0xFFAE let xk_KP_Divide = 0xFFAF let xk_KP_0 = 0xFFB0 let xk_KP_1 = 0xFFB1 let xk_KP_2 = 0xFFB2 let xk_KP_3 = 0xFFB3 let xk_KP_4 = 0xFFB4 let xk_KP_5 = 0xFFB5 let xk_KP_6 = 0xFFB6 let xk_KP_7 = 0xFFB7 let xk_KP_8 = 0xFFB8 let xk_KP_9 = 0xFFB9 (* * Auxilliary Functions; note the duplicate definitions for left and right * function keys; Sun keyboards and a few other manufactures have such * function key groups on the left and/or right sides of the keyboard. * We've not found a keyboard with more than 35 function keys total. *) let xk_F1 = 0xFFBE let xk_F2 = 0xFFBF let xk_F3 = 0xFFC0 let xk_F4 = 0xFFC1 let xk_F5 = 0xFFC2 let xk_F6 = 0xFFC3 let xk_F7 = 0xFFC4 let xk_F8 = 0xFFC5 let xk_F9 = 0xFFC6 let xk_F10 = 0xFFC7 let xk_F11 = 0xFFC8 let xk_L1 = 0xFFC8 let xk_F12 = 0xFFC9 let xk_L2 = 0xFFC9 let xk_F13 = 0xFFCA let xk_L3 = 0xFFCA let xk_F14 = 0xFFCB let xk_L4 = 0xFFCB let xk_F15 = 0xFFCC let xk_L5 = 0xFFCC let xk_F16 = 0xFFCD let xk_L6 = 0xFFCD let xk_F17 = 0xFFCE let xk_L7 = 0xFFCE let xk_F18 = 0xFFCF let xk_L8 = 0xFFCF let xk_F19 = 0xFFD0 let xk_L9 = 0xFFD0 let xk_F20 = 0xFFD1 let xk_L10 = 0xFFD1 let xk_F21 = 0xFFD2 let xk_R1 = 0xFFD2 let xk_F22 = 0xFFD3 let xk_R2 = 0xFFD3 let xk_F23 = 0xFFD4 let xk_R3 = 0xFFD4 let xk_F24 = 0xFFD5 let xk_R4 = 0xFFD5 let xk_F25 = 0xFFD6 let xk_R5 = 0xFFD6 let xk_F26 = 0xFFD7 let xk_R6 = 0xFFD7 let xk_F27 = 0xFFD8 let xk_R7 = 0xFFD8 let xk_F28 = 0xFFD9 let xk_R8 = 0xFFD9 let xk_F29 = 0xFFDA let xk_R9 = 0xFFDA let xk_F30 = 0xFFDB let xk_R10 = 0xFFDB let xk_F31 = 0xFFDC let xk_R11 = 0xFFDC let xk_F32 = 0xFFDD let xk_R12 = 0xFFDD let xk_F33 = 0xFFDE let xk_R13 = 0xFFDE let xk_F34 = 0xFFDF let xk_R14 = 0xFFDF let xk_F35 = 0xFFE0 let xk_R15 = 0xFFE0 (** Modifiers *) let xk_Shift_L = 0xFFE1 (** Left shift *) let xk_Shift_R = 0xFFE2 (** Right shift *) let xk_Control_L = 0xFFE3 (** Left control *) let xk_Control_R = 0xFFE4 (** Right control *) let xk_Caps_Lock = 0xFFE5 (** Caps lock *) let xk_Shift_Lock = 0xFFE6 (** Shift lock *) let xk_Meta_L = 0xFFE7 (** Left meta *) let xk_Meta_R = 0xFFE8 (** Right meta *) let xk_Alt_L = 0xFFE9 (** Left alt *) let xk_Alt_R = 0xFFEA (** Right alt *) let xk_Super_L = 0xFFEB (** Left super *) let xk_Super_R = 0xFFEC (** Right super *) let xk_Hyper_L = 0xFFED (** Left hyper *) let xk_Hyper_R = 0xFFEE (** Right hyper *) (* * ISO 9995 Function and Modifier Keys * Byte 3 = = 0xFE *) let xk_ISO_Lock = 0xFE01 let xk_ISO_Level2_Latch = 0xFE02 let xk_ISO_Level3_Shift = 0xFE03 let xk_ISO_Level3_Latch = 0xFE04 let xk_ISO_Level3_Lock = 0xFE05 let xk_ISO_Group_Shift = 0xFF7E (** Alias for mode_switch *) let xk_ISO_Group_Latch = 0xFE06 let xk_ISO_Group_Lock = 0xFE07 let xk_ISO_Next_Group = 0xFE08 let xk_ISO_Next_Group_Lock = 0xFE09 let xk_ISO_Prev_Group = 0xFE0A let xk_ISO_Prev_Group_Lock = 0xFE0B let xk_ISO_First_Group = 0xFE0C let xk_ISO_First_Group_Lock = 0xFE0D let xk_ISO_Last_Group = 0xFE0E let xk_ISO_Last_Group_Lock = 0xFE0F let xk_ISO_Left_Tab = 0xFE20 let xk_ISO_Move_Line_Up = 0xFE21 let xk_ISO_Move_Line_Down = 0xFE22 let xk_ISO_Partial_Line_Up = 0xFE23 let xk_ISO_Partial_Line_Down = 0xFE24 let xk_ISO_Partial_Space_Left = 0xFE25 let xk_ISO_Partial_Space_Right = 0xFE26 let xk_ISO_Set_Margin_Left = 0xFE27 let xk_ISO_Set_Margin_Right = 0xFE28 let xk_ISO_Release_Margin_Left = 0xFE29 let xk_ISO_Release_Margin_Right = 0xFE2A let xk_ISO_Release_Both_Margins = 0xFE2B let xk_ISO_Fast_Cursor_Left = 0xFE2C let xk_ISO_Fast_Cursor_Right = 0xFE2D let xk_ISO_Fast_Cursor_Up = 0xFE2E let xk_ISO_Fast_Cursor_Down = 0xFE2F let xk_ISO_Continuous_Underline = 0xFE30 let xk_ISO_Discontinuous_Underline = 0xFE31 let xk_ISO_Emphasize = 0xFE32 let xk_ISO_Center_Object = 0xFE33 let xk_ISO_Enter = 0xFE34 let xk_dead_grave = 0xFE50 let xk_dead_acute = 0xFE51 let xk_dead_circumflex = 0xFE52 let xk_dead_tilde = 0xFE53 let xk_dead_macron = 0xFE54 let xk_dead_breve = 0xFE55 let xk_dead_abovedot = 0xFE56 let xk_dead_diaeresis = 0xFE57 let xk_dead_abovering = 0xFE58 let xk_dead_doubleacute = 0xFE59 let xk_dead_caron = 0xFE5A let xk_dead_cedilla = 0xFE5B let xk_dead_ogonek = 0xFE5C let xk_dead_iota = 0xFE5D let xk_dead_voiced_sound = 0xFE5E let xk_dead_semivoiced_sound = 0xFE5F let xk_dead_belowdot = 0xFE60 let xk_First_Virtual_Screen = 0xFED0 let xk_Prev_Virtual_Screen = 0xFED1 let xk_Next_Virtual_Screen = 0xFED2 let xk_Last_Virtual_Screen = 0xFED4 let xk_Terminate_Server = 0xFED5 let xk_AccessX_Enable = 0xFE70 let xk_AccessX_Feedback_Enable = 0xFE71 let xk_RepeatKeys_Enable = 0xFE72 let xk_SlowKeys_Enable = 0xFE73 let xk_BounceKeys_Enable = 0xFE74 let xk_StickyKeys_Enable = 0xFE75 let xk_MouseKeys_Enable = 0xFE76 let xk_MouseKeys_Accel_Enable = 0xFE77 let xk_Overlay1_Enable = 0xFE78 let xk_Overlay2_Enable = 0xFE79 let xk_AudibleBell_Enable = 0xFE7A let xk_Pointer_Left = 0xFEE0 let xk_Pointer_Right = 0xFEE1 let xk_Pointer_Up = 0xFEE2 let xk_Pointer_Down = 0xFEE3 let xk_Pointer_UpLeft = 0xFEE4 let xk_Pointer_UpRight = 0xFEE5 let xk_Pointer_DownLeft = 0xFEE6 let xk_Pointer_DownRight = 0xFEE7 let xk_Pointer_Button_Dflt = 0xFEE8 let xk_Pointer_Button1 = 0xFEE9 let xk_Pointer_Button2 = 0xFEEA let xk_Pointer_Button3 = 0xFEEB let xk_Pointer_Button4 = 0xFEEC let xk_Pointer_Button5 = 0xFEED let xk_Pointer_DblClick_Dflt = 0xFEEE let xk_Pointer_DblClick1 = 0xFEEF let xk_Pointer_DblClick2 = 0xFEF0 let xk_Pointer_DblClick3 = 0xFEF1 let xk_Pointer_DblClick4 = 0xFEF2 let xk_Pointer_DblClick5 = 0xFEF3 let xk_Pointer_Drag_Dflt = 0xFEF4 let xk_Pointer_Drag1 = 0xFEF5 let xk_Pointer_Drag2 = 0xFEF6 let xk_Pointer_Drag3 = 0xFEF7 let xk_Pointer_Drag4 = 0xFEF8 let xk_Pointer_Drag5 = 0xFEFD let xk_Pointer_EnableKeys = 0xFEF9 let xk_Pointer_Accelerate = 0xFEFA let xk_Pointer_DfltBtnNext = 0xFEFB let xk_Pointer_DfltBtnPrev = 0xFEFC (* * 3270 Terminal Keys * Byte 3 = = 0xFD *) let xk_3270_Duplicate = 0xFD01 let xk_3270_FieldMark = 0xFD02 let xk_3270_Right2 = 0xFD03 let xk_3270_Left2 = 0xFD04 let xk_3270_BackTab = 0xFD05 let xk_3270_EraseEOF = 0xFD06 let xk_3270_EraseInput = 0xFD07 let xk_3270_Reset = 0xFD08 let xk_3270_Quit = 0xFD09 let xk_3270_PA1 = 0xFD0A let xk_3270_PA2 = 0xFD0B let xk_3270_PA3 = 0xFD0C let xk_3270_Test = 0xFD0D let xk_3270_Attn = 0xFD0E let xk_3270_CursorBlink = 0xFD0F let xk_3270_AltCursor = 0xFD10 let xk_3270_KeyClick = 0xFD11 let xk_3270_Jump = 0xFD12 let xk_3270_Ident = 0xFD13 let xk_3270_Rule = 0xFD14 let xk_3270_Copy = 0xFD15 let xk_3270_Play = 0xFD16 let xk_3270_Setup = 0xFD17 let xk_3270_Record = 0xFD18 let xk_3270_ChangeScreen = 0xFD19 let xk_3270_DeleteWord = 0xFD1A let xk_3270_ExSelect = 0xFD1B let xk_3270_CursorSelect = 0xFD1C let xk_3270_PrintScreen = 0xFD1D let xk_3270_Enter = 0xFD1E (* * Latin 1 * Byte 3 = 0 *) let xk_space = 0x020 let xk_exclam = 0x021 let xk_quotedbl = 0x022 let xk_numbersign = 0x023 let xk_dollar = 0x024 let xk_percent = 0x025 let xk_ampersand = 0x026 let xk_apostrophe = 0x027 let xk_quoteright = 0x027 (** deprecated *) let xk_parenleft = 0x028 let xk_parenright = 0x029 let xk_asterisk = 0x02a let xk_plus = 0x02b let xk_comma = 0x02c let xk_minus = 0x02d let xk_period = 0x02e let xk_slash = 0x02f let xk_0 = 0x030 let xk_1 = 0x031 let xk_2 = 0x032 let xk_3 = 0x033 let xk_4 = 0x034 let xk_5 = 0x035 let xk_6 = 0x036 let xk_7 = 0x037 let xk_8 = 0x038 let xk_9 = 0x039 let xk_colon = 0x03a let xk_semicolon = 0x03b let xk_less = 0x03c let xk_equal = 0x03d let xk_greater = 0x03e let xk_question = 0x03f let xk_at = 0x040 let xk_A = 0x041 let xk_B = 0x042 let xk_C = 0x043 let xk_D = 0x044 let xk_E = 0x045 let xk_F = 0x046 let xk_G = 0x047 let xk_H = 0x048 let xk_I = 0x049 let xk_J = 0x04a let xk_K = 0x04b let xk_L = 0x04c let xk_M = 0x04d let xk_N = 0x04e let xk_O = 0x04f let xk_P = 0x050 let xk_Q = 0x051 let xk_R = 0x052 let xk_S = 0x053 let xk_T = 0x054 let xk_U = 0x055 let xk_V = 0x056 let xk_W = 0x057 let xk_X = 0x058 let xk_Y = 0x059 let xk_Z = 0x05a let xk_bracketleft = 0x05b let xk_backslash = 0x05c let xk_bracketright = 0x05d let xk_asciicircum = 0x05e let xk_underscore = 0x05f let xk_grave = 0x060 let xk_quoteleft = 0x060 (** deprecated *) let xk_a = 0x061 let xk_b = 0x062 let xk_c = 0x063 let xk_d = 0x064 let xk_e = 0x065 let xk_f = 0x066 let xk_g = 0x067 let xk_h = 0x068 let xk_i = 0x069 let xk_j = 0x06a let xk_k = 0x06b let xk_l = 0x06c let xk_m = 0x06d let xk_n = 0x06e let xk_o = 0x06f let xk_p = 0x070 let xk_q = 0x071 let xk_r = 0x072 let xk_s = 0x073 let xk_t = 0x074 let xk_u = 0x075 let xk_v = 0x076 let xk_w = 0x077 let xk_x = 0x078 let xk_y = 0x079 let xk_z = 0x07a let xk_braceleft = 0x07b let xk_bar = 0x07c let xk_braceright = 0x07d let xk_asciitilde = 0x07e let xk_nobreakspace = 0x0a0 let xk_exclamdown = 0x0a1 let xk_cent = 0x0a2 let xk_sterling = 0x0a3 let xk_currency = 0x0a4 let xk_yen = 0x0a5 let xk_brokenbar = 0x0a6 let xk_section = 0x0a7 let xk_diaeresis = 0x0a8 let xk_copyright = 0x0a9 let xk_ordfeminine = 0x0aa let xk_guillemotleft = 0x0ab (** left angle quotation mark *) let xk_notsign = 0x0ac let xk_hyphen = 0x0ad let xk_registered = 0x0ae let xk_macron = 0x0af let xk_degree = 0x0b0 let xk_plusminus = 0x0b1 let xk_twosuperior = 0x0b2 let xk_threesuperior = 0x0b3 let xk_acute = 0x0b4 let xk_mu = 0x0b5 let xk_paragraph = 0x0b6 let xk_periodcentered = 0x0b7 let xk_cedilla = 0x0b8 let xk_onesuperior = 0x0b9 let xk_masculine = 0x0ba let xk_guillemotright = 0x0bb (** right angle quotation mark *) let xk_onequarter = 0x0bc let xk_onehalf = 0x0bd let xk_threequarters = 0x0be let xk_questiondown = 0x0bf let xk_Agrave = 0x0c0 let xk_Aacute = 0x0c1 let xk_Acircumflex = 0x0c2 let xk_Atilde = 0x0c3 let xk_Adiaeresis = 0x0c4 let xk_Aring = 0x0c5 let xk_AE = 0x0c6 let xk_Ccedilla = 0x0c7 let xk_Egrave = 0x0c8 let xk_Eacute = 0x0c9 let xk_Ecircumflex = 0x0ca let xk_Ediaeresis = 0x0cb let xk_Igrave = 0x0cc let xk_Iacute = 0x0cd let xk_Icircumflex = 0x0ce let xk_Idiaeresis = 0x0cf let xk_ETH = 0x0d0 let xk_Eth = 0x0d0 (** deprecated *) let xk_Ntilde = 0x0d1 let xk_Ograve = 0x0d2 let xk_Oacute = 0x0d3 let xk_Ocircumflex = 0x0d4 let xk_Otilde = 0x0d5 let xk_Odiaeresis = 0x0d6 let xk_multiply = 0x0d7 let xk_Ooblique = 0x0d8 let xk_Ugrave = 0x0d9 let xk_Uacute = 0x0da let xk_Ucircumflex = 0x0db let xk_Udiaeresis = 0x0dc let xk_Yacute = 0x0dd let xk_THORN = 0x0de let xk_Thorn = 0x0de (** deprecated *) let xk_ssharp = 0x0df let xk_agrave = 0x0e0 let xk_aacute = 0x0e1 let xk_acircumflex = 0x0e2 let xk_atilde = 0x0e3 let xk_adiaeresis = 0x0e4 let xk_aring = 0x0e5 let xk_ae = 0x0e6 let xk_ccedilla = 0x0e7 let xk_egrave = 0x0e8 let xk_eacute = 0x0e9 let xk_ecircumflex = 0x0ea let xk_ediaeresis = 0x0eb let xk_igrave = 0x0ec let xk_iacute = 0x0ed let xk_icircumflex = 0x0ee let xk_idiaeresis = 0x0ef let xk_eth = 0x0f0 let xk_ntilde = 0x0f1 let xk_ograve = 0x0f2 let xk_oacute = 0x0f3 let xk_ocircumflex = 0x0f4 let xk_otilde = 0x0f5 let xk_odiaeresis = 0x0f6 let xk_division = 0x0f7 let xk_oslash = 0x0f8 let xk_ugrave = 0x0f9 let xk_uacute = 0x0fa let xk_ucircumflex = 0x0fb let xk_udiaeresis = 0x0fc let xk_yacute = 0x0fd let xk_thorn = 0x0fe let xk_ydiaeresis = 0x0ff (* * Latin 2 * Byte 3 = 1 *) let xk_Aogonek = 0x1a1 let xk_breve = 0x1a2 let xk_Lstroke = 0x1a3 let xk_Lcaron = 0x1a5 let xk_Sacute = 0x1a6 let xk_Scaron = 0x1a9 let xk_Scedilla = 0x1aa let xk_Tcaron = 0x1ab let xk_Zacute = 0x1ac let xk_Zcaron = 0x1ae let xk_Zabovedot = 0x1af let xk_aogonek = 0x1b1 let xk_ogonek = 0x1b2 let xk_lstroke = 0x1b3 let xk_lcaron = 0x1b5 let xk_sacute = 0x1b6 let xk_caron = 0x1b7 let xk_scaron = 0x1b9 let xk_scedilla = 0x1ba let xk_tcaron = 0x1bb let xk_zacute = 0x1bc let xk_doubleacute = 0x1bd let xk_zcaron = 0x1be let xk_zabovedot = 0x1bf let xk_Racute = 0x1c0 let xk_Abreve = 0x1c3 let xk_Lacute = 0x1c5 let xk_Cacute = 0x1c6 let xk_Ccaron = 0x1c8 let xk_Eogonek = 0x1ca let xk_Ecaron = 0x1cc let xk_Dcaron = 0x1cf let xk_Dstroke = 0x1d0 let xk_Nacute = 0x1d1 let xk_Ncaron = 0x1d2 let xk_Odoubleacute = 0x1d5 let xk_Rcaron = 0x1d8 let xk_Uring = 0x1d9 let xk_Udoubleacute = 0x1db let xk_Tcedilla = 0x1de let xk_racute = 0x1e0 let xk_abreve = 0x1e3 let xk_lacute = 0x1e5 let xk_cacute = 0x1e6 let xk_ccaron = 0x1e8 let xk_eogonek = 0x1ea let xk_ecaron = 0x1ec let xk_dcaron = 0x1ef let xk_dstroke = 0x1f0 let xk_nacute = 0x1f1 let xk_ncaron = 0x1f2 let xk_odoubleacute = 0x1f5 let xk_udoubleacute = 0x1fb let xk_rcaron = 0x1f8 let xk_uring = 0x1f9 let xk_tcedilla = 0x1fe let xk_abovedot = 0x1ff (* * Latin 3 * Byte 3 = 2 *) let xk_Hstroke = 0x2a1 let xk_Hcircumflex = 0x2a6 let xk_Iabovedot = 0x2a9 let xk_Gbreve = 0x2ab let xk_Jcircumflex = 0x2ac let xk_hstroke = 0x2b1 let xk_hcircumflex = 0x2b6 let xk_idotless = 0x2b9 let xk_gbreve = 0x2bb let xk_jcircumflex = 0x2bc let xk_Cabovedot = 0x2c5 let xk_Ccircumflex = 0x2c6 let xk_Gabovedot = 0x2d5 let xk_Gcircumflex = 0x2d8 let xk_Ubreve = 0x2dd let xk_Scircumflex = 0x2de let xk_cabovedot = 0x2e5 let xk_ccircumflex = 0x2e6 let xk_gabovedot = 0x2f5 let xk_gcircumflex = 0x2f8 let xk_ubreve = 0x2fd let xk_scircumflex = 0x2fe (* * Latin 4 * Byte 3 = 3 *) let xk_kra = 0x3a2 let xk_kappa = 0x3a2 (** deprecated *) let xk_Rcedilla = 0x3a3 let xk_Itilde = 0x3a5 let xk_Lcedilla = 0x3a6 let xk_Emacron = 0x3aa let xk_Gcedilla = 0x3ab let xk_Tslash = 0x3ac let xk_rcedilla = 0x3b3 let xk_itilde = 0x3b5 let xk_lcedilla = 0x3b6 let xk_emacron = 0x3ba let xk_gcedilla = 0x3bb let xk_tslash = 0x3bc let xk_ENG = 0x3bd let xk_eng = 0x3bf let xk_Amacron = 0x3c0 let xk_Iogonek = 0x3c7 let xk_Eabovedot = 0x3cc let xk_Imacron = 0x3cf let xk_Ncedilla = 0x3d1 let xk_Omacron = 0x3d2 let xk_Kcedilla = 0x3d3 let xk_Uogonek = 0x3d9 let xk_Utilde = 0x3dd let xk_Umacron = 0x3de let xk_amacron = 0x3e0 let xk_iogonek = 0x3e7 let xk_eabovedot = 0x3ec let xk_imacron = 0x3ef let xk_ncedilla = 0x3f1 let xk_omacron = 0x3f2 let xk_kcedilla = 0x3f3 let xk_uogonek = 0x3f9 let xk_utilde = 0x3fd let xk_umacron = 0x3fe (* * Katakana * Byte 3 = 4 *) let xk_overline = 0x47e let xk_kana_fullstop = 0x4a1 let xk_kana_openingbracket = 0x4a2 let xk_kana_closingbracket = 0x4a3 let xk_kana_comma = 0x4a4 let xk_kana_conjunctive = 0x4a5 let xk_kana_middledot = 0x4a5 (** deprecated *) let xk_kana_WO = 0x4a6 let xk_kana_a = 0x4a7 let xk_kana_i = 0x4a8 let xk_kana_u = 0x4a9 let xk_kana_e = 0x4aa let xk_kana_o = 0x4ab let xk_kana_ya = 0x4ac let xk_kana_yu = 0x4ad let xk_kana_yo = 0x4ae let xk_kana_tsu = 0x4af let xk_kana_tu = 0x4af (** deprecated *) let xk_prolongedsound = 0x4b0 let xk_kana_A = 0x4b1 let xk_kana_I = 0x4b2 let xk_kana_U = 0x4b3 let xk_kana_E = 0x4b4 let xk_kana_O = 0x4b5 let xk_kana_KA = 0x4b6 let xk_kana_KI = 0x4b7 let xk_kana_KU = 0x4b8 let xk_kana_KE = 0x4b9 let xk_kana_KO = 0x4ba let xk_kana_SA = 0x4bb let xk_kana_SHI = 0x4bc let xk_kana_SU = 0x4bd let xk_kana_SE = 0x4be let xk_kana_SO = 0x4bf let xk_kana_TA = 0x4c0 let xk_kana_CHI = 0x4c1 let xk_kana_TI = 0x4c1 (** deprecated *) let xk_kana_TSU = 0x4c2 let xk_kana_TU = 0x4c2 (** deprecated *) let xk_kana_TE = 0x4c3 let xk_kana_TO = 0x4c4 let xk_kana_NA = 0x4c5 let xk_kana_NI = 0x4c6 let xk_kana_NU = 0x4c7 let xk_kana_NE = 0x4c8 let xk_kana_NO = 0x4c9 let xk_kana_HA = 0x4ca let xk_kana_HI = 0x4cb let xk_kana_FU = 0x4cc let xk_kana_HU = 0x4cc (** deprecated *) let xk_kana_HE = 0x4cd let xk_kana_HO = 0x4ce let xk_kana_MA = 0x4cf let xk_kana_MI = 0x4d0 let xk_kana_MU = 0x4d1 let xk_kana_ME = 0x4d2 let xk_kana_MO = 0x4d3 let xk_kana_YA = 0x4d4 let xk_kana_YU = 0x4d5 let xk_kana_YO = 0x4d6 let xk_kana_RA = 0x4d7 let xk_kana_RI = 0x4d8 let xk_kana_RU = 0x4d9 let xk_kana_RE = 0x4da let xk_kana_RO = 0x4db let xk_kana_WA = 0x4dc let xk_kana_N = 0x4dd let xk_voicedsound = 0x4de let xk_semivoicedsound = 0x4df let xk_kana_switch = 0xFF7E (** Alias for mode_switch *) (* * Arabic * Byte 3 = 5 *) let xk_Arabic_comma = 0x5ac let xk_Arabic_semicolon = 0x5bb let xk_Arabic_question_mark = 0x5bf let xk_Arabic_hamza = 0x5c1 let xk_Arabic_maddaonalef = 0x5c2 let xk_Arabic_hamzaonalef = 0x5c3 let xk_Arabic_hamzaonwaw = 0x5c4 let xk_Arabic_hamzaunderalef = 0x5c5 let xk_Arabic_hamzaonyeh = 0x5c6 let xk_Arabic_alef = 0x5c7 let xk_Arabic_beh = 0x5c8 let xk_Arabic_tehmarbuta = 0x5c9 let xk_Arabic_teh = 0x5ca let xk_Arabic_theh = 0x5cb let xk_Arabic_jeem = 0x5cc let xk_Arabic_hah = 0x5cd let xk_Arabic_khah = 0x5ce let xk_Arabic_dal = 0x5cf let xk_Arabic_thal = 0x5d0 let xk_Arabic_ra = 0x5d1 let xk_Arabic_zain = 0x5d2 let xk_Arabic_seen = 0x5d3 let xk_Arabic_sheen = 0x5d4 let xk_Arabic_sad = 0x5d5 let xk_Arabic_dad = 0x5d6 let xk_Arabic_tah = 0x5d7 let xk_Arabic_zah = 0x5d8 let xk_Arabic_ain = 0x5d9 let xk_Arabic_ghain = 0x5da let xk_Arabic_tatweel = 0x5e0 let xk_Arabic_feh = 0x5e1 let xk_Arabic_qaf = 0x5e2 let xk_Arabic_kaf = 0x5e3 let xk_Arabic_lam = 0x5e4 let xk_Arabic_meem = 0x5e5 let xk_Arabic_noon = 0x5e6 let xk_Arabic_ha = 0x5e7 let xk_Arabic_heh = 0x5e7 (** deprecated *) let xk_Arabic_waw = 0x5e8 let xk_Arabic_alefmaksura = 0x5e9 let xk_Arabic_yeh = 0x5ea let xk_Arabic_fathatan = 0x5eb let xk_Arabic_dammatan = 0x5ec let xk_Arabic_kasratan = 0x5ed let xk_Arabic_fatha = 0x5ee let xk_Arabic_damma = 0x5ef let xk_Arabic_kasra = 0x5f0 let xk_Arabic_shadda = 0x5f1 let xk_Arabic_sukun = 0x5f2 let xk_Arabic_switch = 0xFF7E (** Alias for mode_switch *) (* * Cyrillic * Byte 3 = 6 *) let xk_Serbian_dje = 0x6a1 let xk_Macedonia_gje = 0x6a2 let xk_Cyrillic_io = 0x6a3 let xk_Ukrainian_ie = 0x6a4 let xk_Ukranian_je = 0x6a4 (** deprecated *) let xk_Macedonia_dse = 0x6a5 let xk_Ukrainian_i = 0x6a6 let xk_Ukranian_i = 0x6a6 (** deprecated *) let xk_Ukrainian_yi = 0x6a7 let xk_Ukranian_yi = 0x6a7 (** deprecated *) let xk_Cyrillic_je = 0x6a8 let xk_Serbian_je = 0x6a8 (** deprecated *) let xk_Cyrillic_lje = 0x6a9 let xk_Serbian_lje = 0x6a9 (** deprecated *) let xk_Cyrillic_nje = 0x6aa let xk_Serbian_nje = 0x6aa (** deprecated *) let xk_Serbian_tshe = 0x6ab let xk_Macedonia_kje = 0x6ac let xk_Byelorussian_shortu = 0x6ae let xk_Cyrillic_dzhe = 0x6af let xk_Serbian_dze = 0x6af (** deprecated *) let xk_numerosign = 0x6b0 let xk_Serbian_DJE = 0x6b1 let xk_Macedonia_GJE = 0x6b2 let xk_Cyrillic_IO = 0x6b3 let xk_Ukrainian_IE = 0x6b4 let xk_Ukranian_JE = 0x6b4 (** deprecated *) let xk_Macedonia_DSE = 0x6b5 let xk_Ukrainian_I = 0x6b6 let xk_Ukranian_I = 0x6b6 (** deprecated *) let xk_Ukrainian_YI = 0x6b7 let xk_Ukranian_YI = 0x6b7 (** deprecated *) let xk_Cyrillic_JE = 0x6b8 let xk_Serbian_JE = 0x6b8 (** deprecated *) let xk_Cyrillic_LJE = 0x6b9 let xk_Serbian_LJE = 0x6b9 (** deprecated *) let xk_Cyrillic_NJE = 0x6ba let xk_Serbian_NJE = 0x6ba (** deprecated *) let xk_Serbian_TSHE = 0x6bb let xk_Macedonia_KJE = 0x6bc let xk_Byelorussian_SHORTU = 0x6be let xk_Cyrillic_DZHE = 0x6bf let xk_Serbian_DZE = 0x6bf (** deprecated *) let xk_Cyrillic_yu = 0x6c0 let xk_Cyrillic_a = 0x6c1 let xk_Cyrillic_be = 0x6c2 let xk_Cyrillic_tse = 0x6c3 let xk_Cyrillic_de = 0x6c4 let xk_Cyrillic_ie = 0x6c5 let xk_Cyrillic_ef = 0x6c6 let xk_Cyrillic_ghe = 0x6c7 let xk_Cyrillic_ha = 0x6c8 let xk_Cyrillic_i = 0x6c9 let xk_Cyrillic_shorti = 0x6ca let xk_Cyrillic_ka = 0x6cb let xk_Cyrillic_el = 0x6cc let xk_Cyrillic_em = 0x6cd let xk_Cyrillic_en = 0x6ce let xk_Cyrillic_o = 0x6cf let xk_Cyrillic_pe = 0x6d0 let xk_Cyrillic_ya = 0x6d1 let xk_Cyrillic_er = 0x6d2 let xk_Cyrillic_es = 0x6d3 let xk_Cyrillic_te = 0x6d4 let xk_Cyrillic_u = 0x6d5 let xk_Cyrillic_zhe = 0x6d6 let xk_Cyrillic_ve = 0x6d7 let xk_Cyrillic_softsign = 0x6d8 let xk_Cyrillic_yeru = 0x6d9 let xk_Cyrillic_ze = 0x6da let xk_Cyrillic_sha = 0x6db let xk_Cyrillic_e = 0x6dc let xk_Cyrillic_shcha = 0x6dd let xk_Cyrillic_che = 0x6de let xk_Cyrillic_hardsign = 0x6df let xk_Cyrillic_YU = 0x6e0 let xk_Cyrillic_A = 0x6e1 let xk_Cyrillic_BE = 0x6e2 let xk_Cyrillic_TSE = 0x6e3 let xk_Cyrillic_DE = 0x6e4 let xk_Cyrillic_IE = 0x6e5 let xk_Cyrillic_EF = 0x6e6 let xk_Cyrillic_GHE = 0x6e7 let xk_Cyrillic_HA = 0x6e8 let xk_Cyrillic_I = 0x6e9 let xk_Cyrillic_SHORTI = 0x6ea let xk_Cyrillic_KA = 0x6eb let xk_Cyrillic_EL = 0x6ec let xk_Cyrillic_EM = 0x6ed let xk_Cyrillic_EN = 0x6ee let xk_Cyrillic_O = 0x6ef let xk_Cyrillic_PE = 0x6f0 let xk_Cyrillic_YA = 0x6f1 let xk_Cyrillic_ER = 0x6f2 let xk_Cyrillic_ES = 0x6f3 let xk_Cyrillic_TE = 0x6f4 let xk_Cyrillic_U = 0x6f5 let xk_Cyrillic_ZHE = 0x6f6 let xk_Cyrillic_VE = 0x6f7 let xk_Cyrillic_SOFTSIGN = 0x6f8 let xk_Cyrillic_YERU = 0x6f9 let xk_Cyrillic_ZE = 0x6fa let xk_Cyrillic_SHA = 0x6fb let xk_Cyrillic_E = 0x6fc let xk_Cyrillic_SHCHA = 0x6fd let xk_Cyrillic_CHE = 0x6fe let xk_Cyrillic_HARDSIGN = 0x6ff (* * Greek * Byte 3 = 7 *) let xk_Greek_ALPHAaccent = 0x7a1 let xk_Greek_EPSILONaccent = 0x7a2 let xk_Greek_ETAaccent = 0x7a3 let xk_Greek_IOTAaccent = 0x7a4 let xk_Greek_IOTAdiaeresis = 0x7a5 let xk_Greek_OMICRONaccent = 0x7a7 let xk_Greek_UPSILONaccent = 0x7a8 let xk_Greek_UPSILONdieresis = 0x7a9 let xk_Greek_OMEGAaccent = 0x7ab let xk_Greek_accentdieresis = 0x7ae let xk_Greek_horizbar = 0x7af let xk_Greek_alphaaccent = 0x7b1 let xk_Greek_epsilonaccent = 0x7b2 let xk_Greek_etaaccent = 0x7b3 let xk_Greek_iotaaccent = 0x7b4 let xk_Greek_iotadieresis = 0x7b5 let xk_Greek_iotaaccentdieresis = 0x7b6 let xk_Greek_omicronaccent = 0x7b7 let xk_Greek_upsilonaccent = 0x7b8 let xk_Greek_upsilondieresis = 0x7b9 let xk_Greek_upsilonaccentdieresis = 0x7ba let xk_Greek_omegaaccent = 0x7bb let xk_Greek_ALPHA = 0x7c1 let xk_Greek_BETA = 0x7c2 let xk_Greek_GAMMA = 0x7c3 let xk_Greek_DELTA = 0x7c4 let xk_Greek_EPSILON = 0x7c5 let xk_Greek_ZETA = 0x7c6 let xk_Greek_ETA = 0x7c7 let xk_Greek_THETA = 0x7c8 let xk_Greek_IOTA = 0x7c9 let xk_Greek_KAPPA = 0x7ca let xk_Greek_LAMDA = 0x7cb let xk_Greek_LAMBDA = 0x7cb let xk_Greek_MU = 0x7cc let xk_Greek_NU = 0x7cd let xk_Greek_XI = 0x7ce let xk_Greek_OMICRON = 0x7cf let xk_Greek_PI = 0x7d0 let xk_Greek_RHO = 0x7d1 let xk_Greek_SIGMA = 0x7d2 let xk_Greek_TAU = 0x7d4 let xk_Greek_UPSILON = 0x7d5 let xk_Greek_PHI = 0x7d6 let xk_Greek_CHI = 0x7d7 let xk_Greek_PSI = 0x7d8 let xk_Greek_OMEGA = 0x7d9 let xk_Greek_alpha = 0x7e1 let xk_Greek_beta = 0x7e2 let xk_Greek_gamma = 0x7e3 let xk_Greek_delta = 0x7e4 let xk_Greek_epsilon = 0x7e5 let xk_Greek_zeta = 0x7e6 let xk_Greek_eta = 0x7e7 let xk_Greek_theta = 0x7e8 let xk_Greek_iota = 0x7e9 let xk_Greek_kappa = 0x7ea let xk_Greek_lamda = 0x7eb let xk_Greek_lambda = 0x7eb let xk_Greek_mu = 0x7ec let xk_Greek_nu = 0x7ed let xk_Greek_xi = 0x7ee let xk_Greek_omicron = 0x7ef let xk_Greek_pi = 0x7f0 let xk_Greek_rho = 0x7f1 let xk_Greek_sigma = 0x7f2 let xk_Greek_finalsmallsigma = 0x7f3 let xk_Greek_tau = 0x7f4 let xk_Greek_upsilon = 0x7f5 let xk_Greek_phi = 0x7f6 let xk_Greek_chi = 0x7f7 let xk_Greek_psi = 0x7f8 let xk_Greek_omega = 0x7f9 let xk_Greek_switch = 0xFF7E (** Alias for mode_switch *) (* * Technical * Byte 3 = 8 *) let xk_leftradical = 0x8a1 let xk_topleftradical = 0x8a2 let xk_horizconnector = 0x8a3 let xk_topintegral = 0x8a4 let xk_botintegral = 0x8a5 let xk_vertconnector = 0x8a6 let xk_topleftsqbracket = 0x8a7 let xk_botleftsqbracket = 0x8a8 let xk_toprightsqbracket = 0x8a9 let xk_botrightsqbracket = 0x8aa let xk_topleftparens = 0x8ab let xk_botleftparens = 0x8ac let xk_toprightparens = 0x8ad let xk_botrightparens = 0x8ae let xk_leftmiddlecurlybrace = 0x8af let xk_rightmiddlecurlybrace = 0x8b0 let xk_topleftsummation = 0x8b1 let xk_botleftsummation = 0x8b2 let xk_topvertsummationconnector = 0x8b3 let xk_botvertsummationconnector = 0x8b4 let xk_toprightsummation = 0x8b5 let xk_botrightsummation = 0x8b6 let xk_rightmiddlesummation = 0x8b7 let xk_lessthanequal = 0x8bc let xk_notequal = 0x8bd let xk_greaterthanequal = 0x8be let xk_integral = 0x8bf let xk_therefore = 0x8c0 let xk_variation = 0x8c1 let xk_infinity = 0x8c2 let xk_nabla = 0x8c5 let xk_approximate = 0x8c8 let xk_similarequal = 0x8c9 let xk_ifonlyif = 0x8cd let xk_implies = 0x8ce let xk_identical = 0x8cf let xk_radical = 0x8d6 let xk_includedin = 0x8da let xk_includes = 0x8db let xk_intersection = 0x8dc let xk_union = 0x8dd let xk_logicaland = 0x8de let xk_logicalor = 0x8df let xk_partialderivative = 0x8ef let xk_function = 0x8f6 let xk_leftarrow = 0x8fb let xk_uparrow = 0x8fc let xk_rightarrow = 0x8fd let xk_downarrow = 0x8fe (* * Special * Byte 3 = 9 *) let xk_blank = 0x9df let xk_soliddiamond = 0x9e0 let xk_checkerboard = 0x9e1 let xk_ht = 0x9e2 let xk_ff = 0x9e3 let xk_cr = 0x9e4 let xk_lf = 0x9e5 let xk_nl = 0x9e8 let xk_vt = 0x9e9 let xk_lowrightcorner = 0x9ea let xk_uprightcorner = 0x9eb let xk_upleftcorner = 0x9ec let xk_lowleftcorner = 0x9ed let xk_crossinglines = 0x9ee let xk_horizlinescan1 = 0x9ef let xk_horizlinescan3 = 0x9f0 let xk_horizlinescan5 = 0x9f1 let xk_horizlinescan7 = 0x9f2 let xk_horizlinescan9 = 0x9f3 let xk_leftt = 0x9f4 let xk_rightt = 0x9f5 let xk_bott = 0x9f6 let xk_topt = 0x9f7 let xk_vertbar = 0x9f8 (* * Publishing * Byte 3 = a *) let xk_emspace = 0xaa1 let xk_enspace = 0xaa2 let xk_em3space = 0xaa3 let xk_em4space = 0xaa4 let xk_digitspace = 0xaa5 let xk_punctspace = 0xaa6 let xk_thinspace = 0xaa7 let xk_hairspace = 0xaa8 let xk_emdash = 0xaa9 let xk_endash = 0xaaa let xk_signifblank = 0xaac let xk_ellipsis = 0xaae let xk_doubbaselinedot = 0xaaf let xk_onethird = 0xab0 let xk_twothirds = 0xab1 let xk_onefifth = 0xab2 let xk_twofifths = 0xab3 let xk_threefifths = 0xab4 let xk_fourfifths = 0xab5 let xk_onesixth = 0xab6 let xk_fivesixths = 0xab7 let xk_careof = 0xab8 let xk_figdash = 0xabb let xk_leftanglebracket = 0xabc let xk_decimalpoint = 0xabd let xk_rightanglebracket = 0xabe let xk_marker = 0xabf let xk_oneeighth = 0xac3 let xk_threeeighths = 0xac4 let xk_fiveeighths = 0xac5 let xk_seveneighths = 0xac6 let xk_trademark = 0xac9 let xk_signaturemark = 0xaca let xk_trademarkincircle = 0xacb let xk_leftopentriangle = 0xacc let xk_rightopentriangle = 0xacd let xk_emopencircle = 0xace let xk_emopenrectangle = 0xacf let xk_leftsinglequotemark = 0xad0 let xk_rightsinglequotemark = 0xad1 let xk_leftdoublequotemark = 0xad2 let xk_rightdoublequotemark = 0xad3 let xk_prescription = 0xad4 let xk_minutes = 0xad6 let xk_seconds = 0xad7 let xk_latincross = 0xad9 let xk_hexagram = 0xada let xk_filledrectbullet = 0xadb let xk_filledlefttribullet = 0xadc let xk_filledrighttribullet = 0xadd let xk_emfilledcircle = 0xade let xk_emfilledrect = 0xadf let xk_enopencircbullet = 0xae0 let xk_enopensquarebullet = 0xae1 let xk_openrectbullet = 0xae2 let xk_opentribulletup = 0xae3 let xk_opentribulletdown = 0xae4 let xk_openstar = 0xae5 let xk_enfilledcircbullet = 0xae6 let xk_enfilledsqbullet = 0xae7 let xk_filledtribulletup = 0xae8 let xk_filledtribulletdown = 0xae9 let xk_leftpointer = 0xaea let xk_rightpointer = 0xaeb let xk_club = 0xaec let xk_diamond = 0xaed let xk_heart = 0xaee let xk_maltesecross = 0xaf0 let xk_dagger = 0xaf1 let xk_doubledagger = 0xaf2 let xk_checkmark = 0xaf3 let xk_ballotcross = 0xaf4 let xk_musicalsharp = 0xaf5 let xk_musicalflat = 0xaf6 let xk_malesymbol = 0xaf7 let xk_femalesymbol = 0xaf8 let xk_telephone = 0xaf9 let xk_telephonerecorder = 0xafa let xk_phonographcopyright = 0xafb let xk_caret = 0xafc let xk_singlelowquotemark = 0xafd let xk_doublelowquotemark = 0xafe let xk_cursor = 0xaff (* * APL * Byte 3 = b *) let xk_leftcaret = 0xba3 let xk_rightcaret = 0xba6 let xk_downcaret = 0xba8 let xk_upcaret = 0xba9 let xk_overbar = 0xbc0 let xk_downtack = 0xbc2 let xk_upshoe = 0xbc3 let xk_downstile = 0xbc4 let xk_underbar = 0xbc6 let xk_jot = 0xbca let xk_quad = 0xbcc let xk_uptack = 0xbce let xk_circle = 0xbcf let xk_upstile = 0xbd3 let xk_downshoe = 0xbd6 let xk_rightshoe = 0xbd8 let xk_leftshoe = 0xbda let xk_lefttack = 0xbdc let xk_righttack = 0xbfc (* * Hebrew * Byte 3 = c *) let xk_hebrew_doublelowline = 0xcdf let xk_hebrew_aleph = 0xce0 let xk_hebrew_bet = 0xce1 let xk_hebrew_beth = 0xce1 (** deprecated *) let xk_hebrew_gimel = 0xce2 let xk_hebrew_gimmel = 0xce2 (** deprecated *) let xk_hebrew_dalet = 0xce3 let xk_hebrew_daleth = 0xce3 (** deprecated *) let xk_hebrew_he = 0xce4 let xk_hebrew_waw = 0xce5 let xk_hebrew_zain = 0xce6 let xk_hebrew_zayin = 0xce6 (** deprecated *) let xk_hebrew_chet = 0xce7 let xk_hebrew_het = 0xce7 (** deprecated *) let xk_hebrew_tet = 0xce8 let xk_hebrew_teth = 0xce8 (** deprecated *) let xk_hebrew_yod = 0xce9 let xk_hebrew_finalkaph = 0xcea let xk_hebrew_kaph = 0xceb let xk_hebrew_lamed = 0xcec let xk_hebrew_finalmem = 0xced let xk_hebrew_mem = 0xcee let xk_hebrew_finalnun = 0xcef let xk_hebrew_nun = 0xcf0 let xk_hebrew_samech = 0xcf1 let xk_hebrew_samekh = 0xcf1 (** deprecated *) let xk_hebrew_ayin = 0xcf2 let xk_hebrew_finalpe = 0xcf3 let xk_hebrew_pe = 0xcf4 let xk_hebrew_finalzade = 0xcf5 let xk_hebrew_finalzadi = 0xcf5 (** deprecated *) let xk_hebrew_zade = 0xcf6 let xk_hebrew_zadi = 0xcf6 (** deprecated *) let xk_hebrew_qoph = 0xcf7 let xk_hebrew_kuf = 0xcf7 (** deprecated *) let xk_hebrew_resh = 0xcf8 let xk_hebrew_shin = 0xcf9 let xk_hebrew_taw = 0xcfa let xk_hebrew_taf = 0xcfa (** deprecated *) let xk_Hebrew_switch = 0xFF7E (** Alias for mode_switch *) (* * Thai * Byte 3 = d *) let xk_Thai_kokai = 0xda1 let xk_Thai_khokhai = 0xda2 let xk_Thai_khokhuat = 0xda3 let xk_Thai_khokhwai = 0xda4 let xk_Thai_khokhon = 0xda5 let xk_Thai_khorakhang = 0xda6 let xk_Thai_ngongu = 0xda7 let xk_Thai_chochan = 0xda8 let xk_Thai_choching = 0xda9 let xk_Thai_chochang = 0xdaa let xk_Thai_soso = 0xdab let xk_Thai_chochoe = 0xdac let xk_Thai_yoying = 0xdad let xk_Thai_dochada = 0xdae let xk_Thai_topatak = 0xdaf let xk_Thai_thothan = 0xdb0 let xk_Thai_thonangmontho = 0xdb1 let xk_Thai_thophuthao = 0xdb2 let xk_Thai_nonen = 0xdb3 let xk_Thai_dodek = 0xdb4 let xk_Thai_totao = 0xdb5 let xk_Thai_thothung = 0xdb6 let xk_Thai_thothahan = 0xdb7 let xk_Thai_thothong = 0xdb8 let xk_Thai_nonu = 0xdb9 let xk_Thai_bobaimai = 0xdba let xk_Thai_popla = 0xdbb let xk_Thai_phophung = 0xdbc let xk_Thai_fofa = 0xdbd let xk_Thai_phophan = 0xdbe let xk_Thai_fofan = 0xdbf let xk_Thai_phosamphao = 0xdc0 let xk_Thai_moma = 0xdc1 let xk_Thai_yoyak = 0xdc2 let xk_Thai_rorua = 0xdc3 let xk_Thai_ru = 0xdc4 let xk_Thai_loling = 0xdc5 let xk_Thai_lu = 0xdc6 let xk_Thai_wowaen = 0xdc7 let xk_Thai_sosala = 0xdc8 let xk_Thai_sorusi = 0xdc9 let xk_Thai_sosua = 0xdca let xk_Thai_hohip = 0xdcb let xk_Thai_lochula = 0xdcc let xk_Thai_oang = 0xdcd let xk_Thai_honokhuk = 0xdce let xk_Thai_paiyannoi = 0xdcf let xk_Thai_saraa = 0xdd0 let xk_Thai_maihanakat = 0xdd1 let xk_Thai_saraaa = 0xdd2 let xk_Thai_saraam = 0xdd3 let xk_Thai_sarai = 0xdd4 let xk_Thai_saraii = 0xdd5 let xk_Thai_saraue = 0xdd6 let xk_Thai_sarauee = 0xdd7 let xk_Thai_sarau = 0xdd8 let xk_Thai_sarauu = 0xdd9 let xk_Thai_phinthu = 0xdda let xk_Thai_maihanakat_maitho = 0xdde let xk_Thai_baht = 0xddf let xk_Thai_sarae = 0xde0 let xk_Thai_saraae = 0xde1 let xk_Thai_sarao = 0xde2 let xk_Thai_saraaimaimuan = 0xde3 let xk_Thai_saraaimaimalai = 0xde4 let xk_Thai_lakkhangyao = 0xde5 let xk_Thai_maiyamok = 0xde6 let xk_Thai_maitaikhu = 0xde7 let xk_Thai_maiek = 0xde8 let xk_Thai_maitho = 0xde9 let xk_Thai_maitri = 0xdea let xk_Thai_maichattawa = 0xdeb let xk_Thai_thanthakhat = 0xdec let xk_Thai_nikhahit = 0xded let xk_Thai_leksun = 0xdf0 let xk_Thai_leknung = 0xdf1 let xk_Thai_leksong = 0xdf2 let xk_Thai_leksam = 0xdf3 let xk_Thai_leksi = 0xdf4 let xk_Thai_lekha = 0xdf5 let xk_Thai_lekhok = 0xdf6 let xk_Thai_lekchet = 0xdf7 let xk_Thai_lekpaet = 0xdf8 let xk_Thai_lekkao = 0xdf9 (* * Korean * Byte 3 = e *) let xk_Hangul = 0xff31 (** Hangul start/stop(toggle) *) let xk_Hangul_Start = 0xff32 (** Hangul start *) let xk_Hangul_End = 0xff33 (** Hangul end, English start *) let xk_Hangul_Hanja = 0xff34 (** Start Hangul->Hanja Conversion *) let xk_Hangul_Jamo = 0xff35 (** Hangul Jamo mode *) let xk_Hangul_Romaja = 0xff36 (** Hangul Romaja mode *) let xk_Hangul_Codeinput = 0xff37 (** Hangul code input mode *) let xk_Hangul_Jeonja = 0xff38 (** Jeonja mode *) let xk_Hangul_Banja = 0xff39 (** Banja mode *) let xk_Hangul_PreHanja = 0xff3a (** Pre Hanja conversion *) let xk_Hangul_PostHanja = 0xff3b (** Post Hanja conversion *) let xk_Hangul_SingleCandidate = 0xff3c (** Single candidate *) let xk_Hangul_MultipleCandidate = 0xff3d (** Multiple candidate *) let xk_Hangul_PreviousCandidate = 0xff3e (** Previous candidate *) let xk_Hangul_Special = 0xff3f (** Special symbols *) let xk_Hangul_switch = 0xFF7E (** Alias for mode_switch *) (** Hangul Consonant Characters *) let xk_Hangul_Kiyeog = 0xea1 let xk_Hangul_SsangKiyeog = 0xea2 let xk_Hangul_KiyeogSios = 0xea3 let xk_Hangul_Nieun = 0xea4 let xk_Hangul_NieunJieuj = 0xea5 let xk_Hangul_NieunHieuh = 0xea6 let xk_Hangul_Dikeud = 0xea7 let xk_Hangul_SsangDikeud = 0xea8 let xk_Hangul_Rieul = 0xea9 let xk_Hangul_RieulKiyeog = 0xeaa let xk_Hangul_RieulMieum = 0xeab let xk_Hangul_RieulPieub = 0xeac let xk_Hangul_RieulSios = 0xead let xk_Hangul_RieulTieut = 0xeae let xk_Hangul_RieulPhieuf = 0xeaf let xk_Hangul_RieulHieuh = 0xeb0 let xk_Hangul_Mieum = 0xeb1 let xk_Hangul_Pieub = 0xeb2 let xk_Hangul_SsangPieub = 0xeb3 let xk_Hangul_PieubSios = 0xeb4 let xk_Hangul_Sios = 0xeb5 let xk_Hangul_SsangSios = 0xeb6 let xk_Hangul_Ieung = 0xeb7 let xk_Hangul_Jieuj = 0xeb8 let xk_Hangul_SsangJieuj = 0xeb9 let xk_Hangul_Cieuc = 0xeba let xk_Hangul_Khieuq = 0xebb let xk_Hangul_Tieut = 0xebc let xk_Hangul_Phieuf = 0xebd let xk_Hangul_Hieuh = 0xebe (** Hangul Vowel Characters *) let xk_Hangul_A = 0xebf let xk_Hangul_AE = 0xec0 let xk_Hangul_YA = 0xec1 let xk_Hangul_YAE = 0xec2 let xk_Hangul_EO = 0xec3 let xk_Hangul_E = 0xec4 let xk_Hangul_YEO = 0xec5 let xk_Hangul_YE = 0xec6 let xk_Hangul_O = 0xec7 let xk_Hangul_WA = 0xec8 let xk_Hangul_WAE = 0xec9 let xk_Hangul_OE = 0xeca let xk_Hangul_YO = 0xecb let xk_Hangul_U = 0xecc let xk_Hangul_WEO = 0xecd let xk_Hangul_WE = 0xece let xk_Hangul_WI = 0xecf let xk_Hangul_YU = 0xed0 let xk_Hangul_EU = 0xed1 let xk_Hangul_YI = 0xed2 let xk_Hangul_I = 0xed3 (** Hangul syllable-final (JongSeong) Characters *) let xk_Hangul_J_Kiyeog = 0xed4 let xk_Hangul_J_SsangKiyeog = 0xed5 let xk_Hangul_J_KiyeogSios = 0xed6 let xk_Hangul_J_Nieun = 0xed7 let xk_Hangul_J_NieunJieuj = 0xed8 let xk_Hangul_J_NieunHieuh = 0xed9 let xk_Hangul_J_Dikeud = 0xeda let xk_Hangul_J_Rieul = 0xedb let xk_Hangul_J_RieulKiyeog = 0xedc let xk_Hangul_J_RieulMieum = 0xedd let xk_Hangul_J_RieulPieub = 0xede let xk_Hangul_J_RieulSios = 0xedf let xk_Hangul_J_RieulTieut = 0xee0 let xk_Hangul_J_RieulPhieuf = 0xee1 let xk_Hangul_J_RieulHieuh = 0xee2 let xk_Hangul_J_Mieum = 0xee3 let xk_Hangul_J_Pieub = 0xee4 let xk_Hangul_J_PieubSios = 0xee5 let xk_Hangul_J_Sios = 0xee6 let xk_Hangul_J_SsangSios = 0xee7 let xk_Hangul_J_Ieung = 0xee8 let xk_Hangul_J_Jieuj = 0xee9 let xk_Hangul_J_Cieuc = 0xeea let xk_Hangul_J_Khieuq = 0xeeb let xk_Hangul_J_Tieut = 0xeec let xk_Hangul_J_Phieuf = 0xeed let xk_Hangul_J_Hieuh = 0xeee (** Ancient Hangul Consonant Characters *) let xk_Hangul_RieulYeorinHieuh = 0xeef let xk_Hangul_SunkyeongeumMieum = 0xef0 let xk_Hangul_SunkyeongeumPieub = 0xef1 let xk_Hangul_PanSios = 0xef2 let xk_Hangul_KkogjiDalrinIeung = 0xef3 let xk_Hangul_SunkyeongeumPhieuf = 0xef4 let xk_Hangul_YeorinHieuh = 0xef5 (** Ancient Hangul Vowel Characters *) let xk_Hangul_AraeA = 0xef6 let xk_Hangul_AraeAE = 0xef7 (** Ancient Hangul syllable-final (JongSeong) Characters *) let xk_Hangul_J_PanSios = 0xef8 let xk_Hangul_J_KkogjiDalrinIeung = 0xef9 let xk_Hangul_J_YeorinHieuh = 0xefa (** Korean currency symbol *) let xk_Korean_Won = 0xeff let name_to_keysym = [ "VoidSymbol",0xFFFFFF; "BackSpace",0xFF08; "Tab",0xFF09; "Linefeed",0xFF0A; "Clear",0xFF0B; "Return",0xFF0D; "Pause",0xFF13; "Scroll_Lock",0xFF14; "Sys_Req",0xFF15; "Escape",0xFF1B; "Delete",0xFFFF; "Multi_key",0xFF20; "Kanji",0xFF21; "Muhenkan",0xFF22; "Henkan_Mode",0xFF23; "Henkan",0xFF23; "Romaji",0xFF24; "Hiragana",0xFF25; "Katakana",0xFF26; "Hiragana_Katakana",0xFF27; "Zenkaku",0xFF28; "Hankaku",0xFF29; "Zenkaku_Hankaku",0xFF2A; "Touroku",0xFF2B; "Massyo",0xFF2C; "Kana_Lock",0xFF2D; "Kana_Shift",0xFF2E; "Eisu_Shift",0xFF2F; "Eisu_toggle",0xFF30; "Home",0xFF50; "Left",0xFF51; "Up",0xFF52; "Right",0xFF53; "Down",0xFF54; "Prior",0xFF55; "Page_Up",0xFF55; "Next",0xFF56; "Page_Down",0xFF56; "End",0xFF57; "Begin",0xFF58; "Select",0xFF60; "Print",0xFF61; "Execute",0xFF62; "Insert",0xFF63; "Undo",0xFF65; "Redo",0xFF66; "Menu",0xFF67; "Find",0xFF68; "Cancel",0xFF69; "Help",0xFF6A; "Break",0xFF6B; "Mode_switch",0xFF7E; "script_switch",0xFF7E; "Num_Lock",0xFF7F; "KP_Space",0xFF80; "KP_Tab",0xFF89; "KP_Enter",0xFF8D; "KP_F1",0xFF91; "KP_F2",0xFF92; "KP_F3",0xFF93; "KP_F4",0xFF94; "KP_Home",0xFF95; "KP_Left",0xFF96; "KP_Up",0xFF97; "KP_Right",0xFF98; "KP_Down",0xFF99; "KP_Prior",0xFF9A; "KP_Page_Up",0xFF9A; "KP_Next",0xFF9B; "KP_Page_Down",0xFF9B; "KP_End",0xFF9C; "KP_Begin",0xFF9D; "KP_Insert",0xFF9E; "KP_Delete",0xFF9F; "KP_Equal",0xFFBD; "KP_Multiply",0xFFAA; "KP_Add",0xFFAB; "KP_Separator",0xFFAC; "KP_Subtract",0xFFAD; "KP_Decimal",0xFFAE; "KP_Divide",0xFFAF; "KP_0",0xFFB0; "KP_1",0xFFB1; "KP_2",0xFFB2; "KP_3",0xFFB3; "KP_4",0xFFB4; "KP_5",0xFFB5; "KP_6",0xFFB6; "KP_7",0xFFB7; "KP_8",0xFFB8; "KP_9",0xFFB9; "F1",0xFFBE; "F2",0xFFBF; "F3",0xFFC0; "F4",0xFFC1; "F5",0xFFC2; "F6",0xFFC3; "F7",0xFFC4; "F8",0xFFC5; "F9",0xFFC6; "F10",0xFFC7; "F11",0xFFC8; "L1",0xFFC8; "F12",0xFFC9; "L2",0xFFC9; "F13",0xFFCA; "L3",0xFFCA; "F14",0xFFCB; "L4",0xFFCB; "F15",0xFFCC; "L5",0xFFCC; "F16",0xFFCD; "L6",0xFFCD; "F17",0xFFCE; "L7",0xFFCE; "F18",0xFFCF; "L8",0xFFCF; "F19",0xFFD0; "L9",0xFFD0; "F20",0xFFD1; "L10",0xFFD1; "F21",0xFFD2; "R1",0xFFD2; "F22",0xFFD3; "R2",0xFFD3; "F23",0xFFD4; "R3",0xFFD4; "F24",0xFFD5; "R4",0xFFD5; "F25",0xFFD6; "R5",0xFFD6; "F26",0xFFD7; "R6",0xFFD7; "F27",0xFFD8; "R7",0xFFD8; "F28",0xFFD9; "R8",0xFFD9; "F29",0xFFDA; "R9",0xFFDA; "F30",0xFFDB; "R10",0xFFDB; "F31",0xFFDC; "R11",0xFFDC; "F32",0xFFDD; "R12",0xFFDD; "F33",0xFFDE; "R13",0xFFDE; "F34",0xFFDF; "R14",0xFFDF; "F35",0xFFE0; "R15",0xFFE0; "Shift_L",0xFFE1; "Shift_R",0xFFE2; "Control_L",0xFFE3; "Control_R",0xFFE4; "Caps_Lock",0xFFE5; "Shift_Lock",0xFFE6; "Meta_L",0xFFE7; "Meta_R",0xFFE8; "Alt_L",0xFFE9; "Alt_R",0xFFEA; "Super_L",0xFFEB; "Super_R",0xFFEC; "Hyper_L",0xFFED; "Hyper_R",0xFFEE; "ISO_Lock",0xFE01; "ISO_Level2_Latch",0xFE02; "ISO_Level3_Shift",0xFE03; "ISO_Level3_Latch",0xFE04; "ISO_Level3_Lock",0xFE05; "ISO_Group_Shift",0xFF7E; "ISO_Group_Latch",0xFE06; "ISO_Group_Lock",0xFE07; "ISO_Next_Group",0xFE08; "ISO_Next_Group_Lock",0xFE09; "ISO_Prev_Group",0xFE0A; "ISO_Prev_Group_Lock",0xFE0B; "ISO_First_Group",0xFE0C; "ISO_First_Group_Lock",0xFE0D; "ISO_Last_Group",0xFE0E; "ISO_Last_Group_Lock",0xFE0F; "ISO_Left_Tab",0xFE20; "ISO_Move_Line_Up",0xFE21; "ISO_Move_Line_Down",0xFE22; "ISO_Partial_Line_Up",0xFE23; "ISO_Partial_Line_Down",0xFE24; "ISO_Partial_Space_Left",0xFE25; "ISO_Partial_Space_Right",0xFE26; "ISO_Set_Margin_Left",0xFE27; "ISO_Set_Margin_Right",0xFE28; "ISO_Release_Margin_Left",0xFE29; "ISO_Release_Margin_Right",0xFE2A; "ISO_Release_Both_Margins",0xFE2B; "ISO_Fast_Cursor_Left",0xFE2C; "ISO_Fast_Cursor_Right",0xFE2D; "ISO_Fast_Cursor_Up",0xFE2E; "ISO_Fast_Cursor_Down",0xFE2F; "ISO_Continuous_Underline",0xFE30; "ISO_Discontinuous_Underline",0xFE31; "ISO_Emphasize",0xFE32; "ISO_Center_Object",0xFE33; "ISO_Enter",0xFE34; "dead_grave",0xFE50; "dead_acute",0xFE51; "dead_circumflex",0xFE52; "dead_tilde",0xFE53; "dead_macron",0xFE54; "dead_breve",0xFE55; "dead_abovedot",0xFE56; "dead_diaeresis",0xFE57; "dead_abovering",0xFE58; "dead_doubleacute",0xFE59; "dead_caron",0xFE5A; "dead_cedilla",0xFE5B; "dead_ogonek",0xFE5C; "dead_iota",0xFE5D; "dead_voiced_sound",0xFE5E; "dead_semivoiced_sound",0xFE5F; "dead_belowdot",0xFE60; "First_Virtual_Screen",0xFED0; "Prev_Virtual_Screen",0xFED1; "Next_Virtual_Screen",0xFED2; "Last_Virtual_Screen",0xFED4; "Terminate_Server",0xFED5; "AccessX_Enable",0xFE70; "AccessX_Feedback_Enable",0xFE71; "RepeatKeys_Enable",0xFE72; "SlowKeys_Enable",0xFE73; "BounceKeys_Enable",0xFE74; "StickyKeys_Enable",0xFE75; "MouseKeys_Enable",0xFE76; "MouseKeys_Accel_Enable",0xFE77; "Overlay1_Enable",0xFE78; "Overlay2_Enable",0xFE79; "AudibleBell_Enable",0xFE7A; "Pointer_Left",0xFEE0; "Pointer_Right",0xFEE1; "Pointer_Up",0xFEE2; "Pointer_Down",0xFEE3; "Pointer_UpLeft",0xFEE4; "Pointer_UpRight",0xFEE5; "Pointer_DownLeft",0xFEE6; "Pointer_DownRight",0xFEE7; "Pointer_Button_Dflt",0xFEE8; "Pointer_Button1",0xFEE9; "Pointer_Button2",0xFEEA; "Pointer_Button3",0xFEEB; "Pointer_Button4",0xFEEC; "Pointer_Button5",0xFEED; "Pointer_DblClick_Dflt",0xFEEE; "Pointer_DblClick1",0xFEEF; "Pointer_DblClick2",0xFEF0; "Pointer_DblClick3",0xFEF1; "Pointer_DblClick4",0xFEF2; "Pointer_DblClick5",0xFEF3; "Pointer_Drag_Dflt",0xFEF4; "Pointer_Drag1",0xFEF5; "Pointer_Drag2",0xFEF6; "Pointer_Drag3",0xFEF7; "Pointer_Drag4",0xFEF8; "Pointer_Drag5",0xFEFD; "Pointer_EnableKeys",0xFEF9; "Pointer_Accelerate",0xFEFA; "Pointer_DfltBtnNext",0xFEFB; "Pointer_DfltBtnPrev",0xFEFC; "3270_Duplicate",0xFD01; "3270_FieldMark",0xFD02; "3270_Right2",0xFD03; "3270_Left2",0xFD04; "3270_BackTab",0xFD05; "3270_EraseEOF",0xFD06; "3270_EraseInput",0xFD07; "3270_Reset",0xFD08; "3270_Quit",0xFD09; "3270_PA1",0xFD0A; "3270_PA2",0xFD0B; "3270_PA3",0xFD0C; "3270_Test",0xFD0D; "3270_Attn",0xFD0E; "3270_CursorBlink",0xFD0F; "3270_AltCursor",0xFD10; "3270_KeyClick",0xFD11; "3270_Jump",0xFD12; "3270_Ident",0xFD13; "3270_Rule",0xFD14; "3270_Copy",0xFD15; "3270_Play",0xFD16; "3270_Setup",0xFD17; "3270_Record",0xFD18; "3270_ChangeScreen",0xFD19; "3270_DeleteWord",0xFD1A; "3270_ExSelect",0xFD1B; "3270_CursorSelect",0xFD1C; "3270_PrintScreen",0xFD1D; "3270_Enter",0xFD1E; "space",0x020; "exclam",0x021; "quotedbl",0x022; "numbersign",0x023; "dollar",0x024; "percent",0x025; "ampersand",0x026; "apostrophe",0x027; "quoteright",0x027; "parenleft",0x028; "parenright",0x029; "asterisk",0x02a; "plus",0x02b; "comma",0x02c; "minus",0x02d; "period",0x02e; "slash",0x02f; "0",0x030; "1",0x031; "2",0x032; "3",0x033; "4",0x034; "5",0x035; "6",0x036; "7",0x037; "8",0x038; "9",0x039; "colon",0x03a; "semicolon",0x03b; "less",0x03c; "equal",0x03d; "greater",0x03e; "question",0x03f; "at",0x040; "A",0x041; "B",0x042; "C",0x043; "D",0x044; "E",0x045; "F",0x046; "G",0x047; "H",0x048; "I",0x049; "J",0x04a; "K",0x04b; "L",0x04c; "M",0x04d; "N",0x04e; "O",0x04f; "P",0x050; "Q",0x051; "R",0x052; "S",0x053; "T",0x054; "U",0x055; "V",0x056; "W",0x057; "X",0x058; "Y",0x059; "Z",0x05a; "bracketleft",0x05b; "backslash",0x05c; "bracketright",0x05d; "asciicircum",0x05e; "underscore",0x05f; "grave",0x060; "quoteleft",0x060; "a",0x061; "b",0x062; "c",0x063; "d",0x064; "e",0x065; "f",0x066; "g",0x067; "h",0x068; "i",0x069; "j",0x06a; "k",0x06b; "l",0x06c; "m",0x06d; "n",0x06e; "o",0x06f; "p",0x070; "q",0x071; "r",0x072; "s",0x073; "t",0x074; "u",0x075; "v",0x076; "w",0x077; "x",0x078; "y",0x079; "z",0x07a; "braceleft",0x07b; "bar",0x07c; "braceright",0x07d; "asciitilde",0x07e; "nobreakspace",0x0a0; "exclamdown",0x0a1; "cent",0x0a2; "sterling",0x0a3; "currency",0x0a4; "yen",0x0a5; "brokenbar",0x0a6; "section",0x0a7; "diaeresis",0x0a8; "copyright",0x0a9; "ordfeminine",0x0aa; "guillemotleft",0x0ab; "notsign",0x0ac; "hyphen",0x0ad; "registered",0x0ae; "macron",0x0af; "degree",0x0b0; "plusminus",0x0b1; "twosuperior",0x0b2; "threesuperior",0x0b3; "acute",0x0b4; "mu",0x0b5; "paragraph",0x0b6; "periodcentered",0x0b7; "cedilla",0x0b8; "onesuperior",0x0b9; "masculine",0x0ba; "guillemotright",0x0bb; "onequarter",0x0bc; "onehalf",0x0bd; "threequarters",0x0be; "questiondown",0x0bf; "Agrave",0x0c0; "Aacute",0x0c1; "Acircumflex",0x0c2; "Atilde",0x0c3; "Adiaeresis",0x0c4; "Aring",0x0c5; "AE",0x0c6; "Ccedilla",0x0c7; "Egrave",0x0c8; "Eacute",0x0c9; "Ecircumflex",0x0ca; "Ediaeresis",0x0cb; "Igrave",0x0cc; "Iacute",0x0cd; "Icircumflex",0x0ce; "Idiaeresis",0x0cf; "ETH",0x0d0; "Eth",0x0d0; "Ntilde",0x0d1; "Ograve",0x0d2; "Oacute",0x0d3; "Ocircumflex",0x0d4; "Otilde",0x0d5; "Odiaeresis",0x0d6; "multiply",0x0d7; "Ooblique",0x0d8; "Ugrave",0x0d9; "Uacute",0x0da; "Ucircumflex",0x0db; "Udiaeresis",0x0dc; "Yacute",0x0dd; "THORN",0x0de; "Thorn",0x0de; "ssharp",0x0df; "agrave",0x0e0; "aacute",0x0e1; "acircumflex",0x0e2; "atilde",0x0e3; "adiaeresis",0x0e4; "aring",0x0e5; "ae",0x0e6; "ccedilla",0x0e7; "egrave",0x0e8; "eacute",0x0e9; "ecircumflex",0x0ea; "ediaeresis",0x0eb; "igrave",0x0ec; "iacute",0x0ed; "icircumflex",0x0ee; "idiaeresis",0x0ef; "eth",0x0f0; "ntilde",0x0f1; "ograve",0x0f2; "oacute",0x0f3; "ocircumflex",0x0f4; "otilde",0x0f5; "odiaeresis",0x0f6; "division",0x0f7; "oslash",0x0f8; "ugrave",0x0f9; "uacute",0x0fa; "ucircumflex",0x0fb; "udiaeresis",0x0fc; "yacute",0x0fd; "thorn",0x0fe; "ydiaeresis",0x0ff; "Aogonek",0x1a1; "breve",0x1a2; "Lstroke",0x1a3; "Lcaron",0x1a5; "Sacute",0x1a6; "Scaron",0x1a9; "Scedilla",0x1aa; "Tcaron",0x1ab; "Zacute",0x1ac; "Zcaron",0x1ae; "Zabovedot",0x1af; "aogonek",0x1b1; "ogonek",0x1b2; "lstroke",0x1b3; "lcaron",0x1b5; "sacute",0x1b6; "caron",0x1b7; "scaron",0x1b9; "scedilla",0x1ba; "tcaron",0x1bb; "zacute",0x1bc; "doubleacute",0x1bd; "zcaron",0x1be; "zabovedot",0x1bf; "Racute",0x1c0; "Abreve",0x1c3; "Lacute",0x1c5; "Cacute",0x1c6; "Ccaron",0x1c8; "Eogonek",0x1ca; "Ecaron",0x1cc; "Dcaron",0x1cf; "Dstroke",0x1d0; "Nacute",0x1d1; "Ncaron",0x1d2; "Odoubleacute",0x1d5; "Rcaron",0x1d8; "Uring",0x1d9; "Udoubleacute",0x1db; "Tcedilla",0x1de; "racute",0x1e0; "abreve",0x1e3; "lacute",0x1e5; "cacute",0x1e6; "ccaron",0x1e8; "eogonek",0x1ea; "ecaron",0x1ec; "dcaron",0x1ef; "dstroke",0x1f0; "nacute",0x1f1; "ncaron",0x1f2; "odoubleacute",0x1f5; "udoubleacute",0x1fb; "rcaron",0x1f8; "uring",0x1f9; "tcedilla",0x1fe; "abovedot",0x1ff; "Hstroke",0x2a1; "Hcircumflex",0x2a6; "Iabovedot",0x2a9; "Gbreve",0x2ab; "Jcircumflex",0x2ac; "hstroke",0x2b1; "hcircumflex",0x2b6; "idotless",0x2b9; "gbreve",0x2bb; "jcircumflex",0x2bc; "Cabovedot",0x2c5; "Ccircumflex",0x2c6; "Gabovedot",0x2d5; "Gcircumflex",0x2d8; "Ubreve",0x2dd; "Scircumflex",0x2de; "cabovedot",0x2e5; "ccircumflex",0x2e6; "gabovedot",0x2f5; "gcircumflex",0x2f8; "ubreve",0x2fd; "scircumflex",0x2fe; "kra",0x3a2; "kappa",0x3a2; "Rcedilla",0x3a3; "Itilde",0x3a5; "Lcedilla",0x3a6; "Emacron",0x3aa; "Gcedilla",0x3ab; "Tslash",0x3ac; "rcedilla",0x3b3; "itilde",0x3b5; "lcedilla",0x3b6; "emacron",0x3ba; "gcedilla",0x3bb; "tslash",0x3bc; "ENG",0x3bd; "eng",0x3bf; "Amacron",0x3c0; "Iogonek",0x3c7; "Eabovedot",0x3cc; "Imacron",0x3cf; "Ncedilla",0x3d1; "Omacron",0x3d2; "Kcedilla",0x3d3; "Uogonek",0x3d9; "Utilde",0x3dd; "Umacron",0x3de; "amacron",0x3e0; "iogonek",0x3e7; "eabovedot",0x3ec; "imacron",0x3ef; "ncedilla",0x3f1; "omacron",0x3f2; "kcedilla",0x3f3; "uogonek",0x3f9; "utilde",0x3fd; "umacron",0x3fe; "overline",0x47e; "kana_fullstop",0x4a1; "kana_openingbracket",0x4a2; "kana_closingbracket",0x4a3; "kana_comma",0x4a4; "kana_conjunctive",0x4a5; "kana_middledot",0x4a5; "kana_WO",0x4a6; "kana_a",0x4a7; "kana_i",0x4a8; "kana_u",0x4a9; "kana_e",0x4aa; "kana_o",0x4ab; "kana_ya",0x4ac; "kana_yu",0x4ad; "kana_yo",0x4ae; "kana_tsu",0x4af; "kana_tu",0x4af; "prolongedsound",0x4b0; "kana_A",0x4b1; "kana_I",0x4b2; "kana_U",0x4b3; "kana_E",0x4b4; "kana_O",0x4b5; "kana_KA",0x4b6; "kana_KI",0x4b7; "kana_KU",0x4b8; "kana_KE",0x4b9; "kana_KO",0x4ba; "kana_SA",0x4bb; "kana_SHI",0x4bc; "kana_SU",0x4bd; "kana_SE",0x4be; "kana_SO",0x4bf; "kana_TA",0x4c0; "kana_CHI",0x4c1; "kana_TI",0x4c1; "kana_TSU",0x4c2; "kana_TU",0x4c2; "kana_TE",0x4c3; "kana_TO",0x4c4; "kana_NA",0x4c5; "kana_NI",0x4c6; "kana_NU",0x4c7; "kana_NE",0x4c8; "kana_NO",0x4c9; "kana_HA",0x4ca; "kana_HI",0x4cb; "kana_FU",0x4cc; "kana_HU",0x4cc; "kana_HE",0x4cd; "kana_HO",0x4ce; "kana_MA",0x4cf; "kana_MI",0x4d0; "kana_MU",0x4d1; "kana_ME",0x4d2; "kana_MO",0x4d3; "kana_YA",0x4d4; "kana_YU",0x4d5; "kana_YO",0x4d6; "kana_RA",0x4d7; "kana_RI",0x4d8; "kana_RU",0x4d9; "kana_RE",0x4da; "kana_RO",0x4db; "kana_WA",0x4dc; "kana_N",0x4dd; "voicedsound",0x4de; "semivoicedsound",0x4df; "kana_switch",0xFF7E; "Arabic_comma",0x5ac; "Arabic_semicolon",0x5bb; "Arabic_question_mark",0x5bf; "Arabic_hamza",0x5c1; "Arabic_maddaonalef",0x5c2; "Arabic_hamzaonalef",0x5c3; "Arabic_hamzaonwaw",0x5c4; "Arabic_hamzaunderalef",0x5c5; "Arabic_hamzaonyeh",0x5c6; "Arabic_alef",0x5c7; "Arabic_beh",0x5c8; "Arabic_tehmarbuta",0x5c9; "Arabic_teh",0x5ca; "Arabic_theh",0x5cb; "Arabic_jeem",0x5cc; "Arabic_hah",0x5cd; "Arabic_khah",0x5ce; "Arabic_dal",0x5cf; "Arabic_thal",0x5d0; "Arabic_ra",0x5d1; "Arabic_zain",0x5d2; "Arabic_seen",0x5d3; "Arabic_sheen",0x5d4; "Arabic_sad",0x5d5; "Arabic_dad",0x5d6; "Arabic_tah",0x5d7; "Arabic_zah",0x5d8; "Arabic_ain",0x5d9; "Arabic_ghain",0x5da; "Arabic_tatweel",0x5e0; "Arabic_feh",0x5e1; "Arabic_qaf",0x5e2; "Arabic_kaf",0x5e3; "Arabic_lam",0x5e4; "Arabic_meem",0x5e5; "Arabic_noon",0x5e6; "Arabic_ha",0x5e7; "Arabic_heh",0x5e7; "Arabic_waw",0x5e8; "Arabic_alefmaksura",0x5e9; "Arabic_yeh",0x5ea; "Arabic_fathatan",0x5eb; "Arabic_dammatan",0x5ec; "Arabic_kasratan",0x5ed; "Arabic_fatha",0x5ee; "Arabic_damma",0x5ef; "Arabic_kasra",0x5f0; "Arabic_shadda",0x5f1; "Arabic_sukun",0x5f2; "Arabic_switch",0xFF7E; "Serbian_dje",0x6a1; "Macedonia_gje",0x6a2; "Cyrillic_io",0x6a3; "Ukrainian_ie",0x6a4; "Ukranian_je",0x6a4; "Macedonia_dse",0x6a5; "Ukrainian_i",0x6a6; "Ukranian_i",0x6a6; "Ukrainian_yi",0x6a7; "Ukranian_yi",0x6a7; "Cyrillic_je",0x6a8; "Serbian_je",0x6a8; "Cyrillic_lje",0x6a9; "Serbian_lje",0x6a9; "Cyrillic_nje",0x6aa; "Serbian_nje",0x6aa; "Serbian_tshe",0x6ab; "Macedonia_kje",0x6ac; "Byelorussian_shortu",0x6ae; "Cyrillic_dzhe",0x6af; "Serbian_dze",0x6af; "numerosign",0x6b0; "Serbian_DJE",0x6b1; "Macedonia_GJE",0x6b2; "Cyrillic_IO",0x6b3; "Ukrainian_IE",0x6b4; "Ukranian_JE",0x6b4; "Macedonia_DSE",0x6b5; "Ukrainian_I",0x6b6; "Ukranian_I",0x6b6; "Ukrainian_YI",0x6b7; "Ukranian_YI",0x6b7; "Cyrillic_JE",0x6b8; "Serbian_JE",0x6b8; "Cyrillic_LJE",0x6b9; "Serbian_LJE",0x6b9; "Cyrillic_NJE",0x6ba; "Serbian_NJE",0x6ba; "Serbian_TSHE",0x6bb; "Macedonia_KJE",0x6bc; "Byelorussian_SHORTU",0x6be; "Cyrillic_DZHE",0x6bf; "Serbian_DZE",0x6bf; "Cyrillic_yu",0x6c0; "Cyrillic_a",0x6c1; "Cyrillic_be",0x6c2; "Cyrillic_tse",0x6c3; "Cyrillic_de",0x6c4; "Cyrillic_ie",0x6c5; "Cyrillic_ef",0x6c6; "Cyrillic_ghe",0x6c7; "Cyrillic_ha",0x6c8; "Cyrillic_i",0x6c9; "Cyrillic_shorti",0x6ca; "Cyrillic_ka",0x6cb; "Cyrillic_el",0x6cc; "Cyrillic_em",0x6cd; "Cyrillic_en",0x6ce; "Cyrillic_o",0x6cf; "Cyrillic_pe",0x6d0; "Cyrillic_ya",0x6d1; "Cyrillic_er",0x6d2; "Cyrillic_es",0x6d3; "Cyrillic_te",0x6d4; "Cyrillic_u",0x6d5; "Cyrillic_zhe",0x6d6; "Cyrillic_ve",0x6d7; "Cyrillic_softsign",0x6d8; "Cyrillic_yeru",0x6d9; "Cyrillic_ze",0x6da; "Cyrillic_sha",0x6db; "Cyrillic_e",0x6dc; "Cyrillic_shcha",0x6dd; "Cyrillic_che",0x6de; "Cyrillic_hardsign",0x6df; "Cyrillic_YU",0x6e0; "Cyrillic_A",0x6e1; "Cyrillic_BE",0x6e2; "Cyrillic_TSE",0x6e3; "Cyrillic_DE",0x6e4; "Cyrillic_IE",0x6e5; "Cyrillic_EF",0x6e6; "Cyrillic_GHE",0x6e7; "Cyrillic_HA",0x6e8; "Cyrillic_I",0x6e9; "Cyrillic_SHORTI",0x6ea; "Cyrillic_KA",0x6eb; "Cyrillic_EL",0x6ec; "Cyrillic_EM",0x6ed; "Cyrillic_EN",0x6ee; "Cyrillic_O",0x6ef; "Cyrillic_PE",0x6f0; "Cyrillic_YA",0x6f1; "Cyrillic_ER",0x6f2; "Cyrillic_ES",0x6f3; "Cyrillic_TE",0x6f4; "Cyrillic_U",0x6f5; "Cyrillic_ZHE",0x6f6; "Cyrillic_VE",0x6f7; "Cyrillic_SOFTSIGN",0x6f8; "Cyrillic_YERU",0x6f9; "Cyrillic_ZE",0x6fa; "Cyrillic_SHA",0x6fb; "Cyrillic_E",0x6fc; "Cyrillic_SHCHA",0x6fd; "Cyrillic_CHE",0x6fe; "Cyrillic_HARDSIGN",0x6ff; "Greek_ALPHAaccent",0x7a1; "Greek_EPSILONaccent",0x7a2; "Greek_ETAaccent",0x7a3; "Greek_IOTAaccent",0x7a4; "Greek_IOTAdiaeresis",0x7a5; "Greek_OMICRONaccent",0x7a7; "Greek_UPSILONaccent",0x7a8; "Greek_UPSILONdieresis",0x7a9; "Greek_OMEGAaccent",0x7ab; "Greek_accentdieresis",0x7ae; "Greek_horizbar",0x7af; "Greek_alphaaccent",0x7b1; "Greek_epsilonaccent",0x7b2; "Greek_etaaccent",0x7b3; "Greek_iotaaccent",0x7b4; "Greek_iotadieresis",0x7b5; "Greek_iotaaccentdieresis",0x7b6; "Greek_omicronaccent",0x7b7; "Greek_upsilonaccent",0x7b8; "Greek_upsilondieresis",0x7b9; "Greek_upsilonaccentdieresis",0x7ba; "Greek_omegaaccent",0x7bb; "Greek_ALPHA",0x7c1; "Greek_BETA",0x7c2; "Greek_GAMMA",0x7c3; "Greek_DELTA",0x7c4; "Greek_EPSILON",0x7c5; "Greek_ZETA",0x7c6; "Greek_ETA",0x7c7; "Greek_THETA",0x7c8; "Greek_IOTA",0x7c9; "Greek_KAPPA",0x7ca; "Greek_LAMDA",0x7cb; "Greek_LAMBDA",0x7cb; "Greek_MU",0x7cc; "Greek_NU",0x7cd; "Greek_XI",0x7ce; "Greek_OMICRON",0x7cf; "Greek_PI",0x7d0; "Greek_RHO",0x7d1; "Greek_SIGMA",0x7d2; "Greek_TAU",0x7d4; "Greek_UPSILON",0x7d5; "Greek_PHI",0x7d6; "Greek_CHI",0x7d7; "Greek_PSI",0x7d8; "Greek_OMEGA",0x7d9; "Greek_alpha",0x7e1; "Greek_beta",0x7e2; "Greek_gamma",0x7e3; "Greek_delta",0x7e4; "Greek_epsilon",0x7e5; "Greek_zeta",0x7e6; "Greek_eta",0x7e7; "Greek_theta",0x7e8; "Greek_iota",0x7e9; "Greek_kappa",0x7ea; "Greek_lamda",0x7eb; "Greek_lambda",0x7eb; "Greek_mu",0x7ec; "Greek_nu",0x7ed; "Greek_xi",0x7ee; "Greek_omicron",0x7ef; "Greek_pi",0x7f0; "Greek_rho",0x7f1; "Greek_sigma",0x7f2; "Greek_finalsmallsigma",0x7f3; "Greek_tau",0x7f4; "Greek_upsilon",0x7f5; "Greek_phi",0x7f6; "Greek_chi",0x7f7; "Greek_psi",0x7f8; "Greek_omega",0x7f9; "Greek_switch",0xFF7E; "leftradical",0x8a1; "topleftradical",0x8a2; "horizconnector",0x8a3; "topintegral",0x8a4; "botintegral",0x8a5; "vertconnector",0x8a6; "topleftsqbracket",0x8a7; "botleftsqbracket",0x8a8; "toprightsqbracket",0x8a9; "botrightsqbracket",0x8aa; "topleftparens",0x8ab; "botleftparens",0x8ac; "toprightparens",0x8ad; "botrightparens",0x8ae; "leftmiddlecurlybrace",0x8af; "rightmiddlecurlybrace",0x8b0; "topleftsummation",0x8b1; "botleftsummation",0x8b2; "topvertsummationconnector",0x8b3; "botvertsummationconnector",0x8b4; "toprightsummation",0x8b5; "botrightsummation",0x8b6; "rightmiddlesummation",0x8b7; "lessthanequal",0x8bc; "notequal",0x8bd; "greaterthanequal",0x8be; "integral",0x8bf; "therefore",0x8c0; "variation",0x8c1; "infinity",0x8c2; "nabla",0x8c5; "approximate",0x8c8; "similarequal",0x8c9; "ifonlyif",0x8cd; "implies",0x8ce; "identical",0x8cf; "radical",0x8d6; "includedin",0x8da; "includes",0x8db; "intersection",0x8dc; "union",0x8dd; "logicaland",0x8de; "logicalor",0x8df; "partialderivative",0x8ef; "function",0x8f6; "leftarrow",0x8fb; "uparrow",0x8fc; "rightarrow",0x8fd; "downarrow",0x8fe; "blank",0x9df; "soliddiamond",0x9e0; "checkerboard",0x9e1; "ht",0x9e2; "ff",0x9e3; "cr",0x9e4; "lf",0x9e5; "nl",0x9e8; "vt",0x9e9; "lowrightcorner",0x9ea; "uprightcorner",0x9eb; "upleftcorner",0x9ec; "lowleftcorner",0x9ed; "crossinglines",0x9ee; "horizlinescan1",0x9ef; "horizlinescan3",0x9f0; "horizlinescan5",0x9f1; "horizlinescan7",0x9f2; "horizlinescan9",0x9f3; "leftt",0x9f4; "rightt",0x9f5; "bott",0x9f6; "topt",0x9f7; "vertbar",0x9f8; "emspace",0xaa1; "enspace",0xaa2; "em3space",0xaa3; "em4space",0xaa4; "digitspace",0xaa5; "punctspace",0xaa6; "thinspace",0xaa7; "hairspace",0xaa8; "emdash",0xaa9; "endash",0xaaa; "signifblank",0xaac; "ellipsis",0xaae; "doubbaselinedot",0xaaf; "onethird",0xab0; "twothirds",0xab1; "onefifth",0xab2; "twofifths",0xab3; "threefifths",0xab4; "fourfifths",0xab5; "onesixth",0xab6; "fivesixths",0xab7; "careof",0xab8; "figdash",0xabb; "leftanglebracket",0xabc; "decimalpoint",0xabd; "rightanglebracket",0xabe; "marker",0xabf; "oneeighth",0xac3; "threeeighths",0xac4; "fiveeighths",0xac5; "seveneighths",0xac6; "trademark",0xac9; "signaturemark",0xaca; "trademarkincircle",0xacb; "leftopentriangle",0xacc; "rightopentriangle",0xacd; "emopencircle",0xace; "emopenrectangle",0xacf; "leftsinglequotemark",0xad0; "rightsinglequotemark",0xad1; "leftdoublequotemark",0xad2; "rightdoublequotemark",0xad3; "prescription",0xad4; "minutes",0xad6; "seconds",0xad7; "latincross",0xad9; "hexagram",0xada; "filledrectbullet",0xadb; "filledlefttribullet",0xadc; "filledrighttribullet",0xadd; "emfilledcircle",0xade; "emfilledrect",0xadf; "enopencircbullet",0xae0; "enopensquarebullet",0xae1; "openrectbullet",0xae2; "opentribulletup",0xae3; "opentribulletdown",0xae4; "openstar",0xae5; "enfilledcircbullet",0xae6; "enfilledsqbullet",0xae7; "filledtribulletup",0xae8; "filledtribulletdown",0xae9; "leftpointer",0xaea; "rightpointer",0xaeb; "club",0xaec; "diamond",0xaed; "heart",0xaee; "maltesecross",0xaf0; "dagger",0xaf1; "doubledagger",0xaf2; "checkmark",0xaf3; "ballotcross",0xaf4; "musicalsharp",0xaf5; "musicalflat",0xaf6; "malesymbol",0xaf7; "femalesymbol",0xaf8; "telephone",0xaf9; "telephonerecorder",0xafa; "phonographcopyright",0xafb; "caret",0xafc; "singlelowquotemark",0xafd; "doublelowquotemark",0xafe; "cursor",0xaff; "leftcaret",0xba3; "rightcaret",0xba6; "downcaret",0xba8; "upcaret",0xba9; "overbar",0xbc0; "downtack",0xbc2; "upshoe",0xbc3; "downstile",0xbc4; "underbar",0xbc6; "jot",0xbca; "quad",0xbcc; "uptack",0xbce; "circle",0xbcf; "upstile",0xbd3; "downshoe",0xbd6; "rightshoe",0xbd8; "leftshoe",0xbda; "lefttack",0xbdc; "righttack",0xbfc; "hebrew_doublelowline",0xcdf; "hebrew_aleph",0xce0; "hebrew_bet",0xce1; "hebrew_beth",0xce1; "hebrew_gimel",0xce2; "hebrew_gimmel",0xce2; "hebrew_dalet",0xce3; "hebrew_daleth",0xce3; "hebrew_he",0xce4; "hebrew_waw",0xce5; "hebrew_zain",0xce6; "hebrew_zayin",0xce6; "hebrew_chet",0xce7; "hebrew_het",0xce7; "hebrew_tet",0xce8; "hebrew_teth",0xce8; "hebrew_yod",0xce9; "hebrew_finalkaph",0xcea; "hebrew_kaph",0xceb; "hebrew_lamed",0xcec; "hebrew_finalmem",0xced; "hebrew_mem",0xcee; "hebrew_finalnun",0xcef; "hebrew_nun",0xcf0; "hebrew_samech",0xcf1; "hebrew_samekh",0xcf1; "hebrew_ayin",0xcf2; "hebrew_finalpe",0xcf3; "hebrew_pe",0xcf4; "hebrew_finalzade",0xcf5; "hebrew_finalzadi",0xcf5; "hebrew_zade",0xcf6; "hebrew_zadi",0xcf6; "hebrew_qoph",0xcf7; "hebrew_kuf",0xcf7; "hebrew_resh",0xcf8; "hebrew_shin",0xcf9; "hebrew_taw",0xcfa; "hebrew_taf",0xcfa; "Hebrew_switch",0xFF7E; "Thai_kokai",0xda1; "Thai_khokhai",0xda2; "Thai_khokhuat",0xda3; "Thai_khokhwai",0xda4; "Thai_khokhon",0xda5; "Thai_khorakhang",0xda6; "Thai_ngongu",0xda7; "Thai_chochan",0xda8; "Thai_choching",0xda9; "Thai_chochang",0xdaa; "Thai_soso",0xdab; "Thai_chochoe",0xdac; "Thai_yoying",0xdad; "Thai_dochada",0xdae; "Thai_topatak",0xdaf; "Thai_thothan",0xdb0; "Thai_thonangmontho",0xdb1; "Thai_thophuthao",0xdb2; "Thai_nonen",0xdb3; "Thai_dodek",0xdb4; "Thai_totao",0xdb5; "Thai_thothung",0xdb6; "Thai_thothahan",0xdb7; "Thai_thothong",0xdb8; "Thai_nonu",0xdb9; "Thai_bobaimai",0xdba; "Thai_popla",0xdbb; "Thai_phophung",0xdbc; "Thai_fofa",0xdbd; "Thai_phophan",0xdbe; "Thai_fofan",0xdbf; "Thai_phosamphao",0xdc0; "Thai_moma",0xdc1; "Thai_yoyak",0xdc2; "Thai_rorua",0xdc3; "Thai_ru",0xdc4; "Thai_loling",0xdc5; "Thai_lu",0xdc6; "Thai_wowaen",0xdc7; "Thai_sosala",0xdc8; "Thai_sorusi",0xdc9; "Thai_sosua",0xdca; "Thai_hohip",0xdcb; "Thai_lochula",0xdcc; "Thai_oang",0xdcd; "Thai_honokhuk",0xdce; "Thai_paiyannoi",0xdcf; "Thai_saraa",0xdd0; "Thai_maihanakat",0xdd1; "Thai_saraaa",0xdd2; "Thai_saraam",0xdd3; "Thai_sarai",0xdd4; "Thai_saraii",0xdd5; "Thai_saraue",0xdd6; "Thai_sarauee",0xdd7; "Thai_sarau",0xdd8; "Thai_sarauu",0xdd9; "Thai_phinthu",0xdda; "Thai_maihanakat_maitho",0xdde; "Thai_baht",0xddf; "Thai_sarae",0xde0; "Thai_saraae",0xde1; "Thai_sarao",0xde2; "Thai_saraaimaimuan",0xde3; "Thai_saraaimaimalai",0xde4; "Thai_lakkhangyao",0xde5; "Thai_maiyamok",0xde6; "Thai_maitaikhu",0xde7; "Thai_maiek",0xde8; "Thai_maitho",0xde9; "Thai_maitri",0xdea; "Thai_maichattawa",0xdeb; "Thai_thanthakhat",0xdec; "Thai_nikhahit",0xded; "Thai_leksun",0xdf0; "Thai_leknung",0xdf1; "Thai_leksong",0xdf2; "Thai_leksam",0xdf3; "Thai_leksi",0xdf4; "Thai_lekha",0xdf5; "Thai_lekhok",0xdf6; "Thai_lekchet",0xdf7; "Thai_lekpaet",0xdf8; "Thai_lekkao",0xdf9; "Hangul",0xff31; "Hangul_Start",0xff32; "Hangul_End",0xff33; "Hangul_Hanja",0xff34; "Hangul_Jamo",0xff35; "Hangul_Romaja",0xff36; "Hangul_Codeinput",0xff37; "Hangul_Jeonja",0xff38; "Hangul_Banja",0xff39; "Hangul_PreHanja",0xff3a; "Hangul_PostHanja",0xff3b; "Hangul_SingleCandidate",0xff3c; "Hangul_MultipleCandidate",0xff3d; "Hangul_PreviousCandidate",0xff3e; "Hangul_Special",0xff3f; "Hangul_switch",0xFF7E; "Hangul_Kiyeog",0xea1; "Hangul_SsangKiyeog",0xea2; "Hangul_KiyeogSios",0xea3; "Hangul_Nieun",0xea4; "Hangul_NieunJieuj",0xea5; "Hangul_NieunHieuh",0xea6; "Hangul_Dikeud",0xea7; "Hangul_SsangDikeud",0xea8; "Hangul_Rieul",0xea9; "Hangul_RieulKiyeog",0xeaa; "Hangul_RieulMieum",0xeab; "Hangul_RieulPieub",0xeac; "Hangul_RieulSios",0xead; "Hangul_RieulTieut",0xeae; "Hangul_RieulPhieuf",0xeaf; "Hangul_RieulHieuh",0xeb0; "Hangul_Mieum",0xeb1; "Hangul_Pieub",0xeb2; "Hangul_SsangPieub",0xeb3; "Hangul_PieubSios",0xeb4; "Hangul_Sios",0xeb5; "Hangul_SsangSios",0xeb6; "Hangul_Ieung",0xeb7; "Hangul_Jieuj",0xeb8; "Hangul_SsangJieuj",0xeb9; "Hangul_Cieuc",0xeba; "Hangul_Khieuq",0xebb; "Hangul_Tieut",0xebc; "Hangul_Phieuf",0xebd; "Hangul_Hieuh",0xebe; "Hangul_A",0xebf; "Hangul_AE",0xec0; "Hangul_YA",0xec1; "Hangul_YAE",0xec2; "Hangul_EO",0xec3; "Hangul_E",0xec4; "Hangul_YEO",0xec5; "Hangul_YE",0xec6; "Hangul_O",0xec7; "Hangul_WA",0xec8; "Hangul_WAE",0xec9; "Hangul_OE",0xeca; "Hangul_YO",0xecb; "Hangul_U",0xecc; "Hangul_WEO",0xecd; "Hangul_WE",0xece; "Hangul_WI",0xecf; "Hangul_YU",0xed0; "Hangul_EU",0xed1; "Hangul_YI",0xed2; "Hangul_I",0xed3; "Hangul_J_Kiyeog",0xed4; "Hangul_J_SsangKiyeog",0xed5; "Hangul_J_KiyeogSios",0xed6; "Hangul_J_Nieun",0xed7; "Hangul_J_NieunJieuj",0xed8; "Hangul_J_NieunHieuh",0xed9; "Hangul_J_Dikeud",0xeda; "Hangul_J_Rieul",0xedb; "Hangul_J_RieulKiyeog",0xedc; "Hangul_J_RieulMieum",0xedd; "Hangul_J_RieulPieub",0xede; "Hangul_J_RieulSios",0xedf; "Hangul_J_RieulTieut",0xee0; "Hangul_J_RieulPhieuf",0xee1; "Hangul_J_RieulHieuh",0xee2; "Hangul_J_Mieum",0xee3; "Hangul_J_Pieub",0xee4; "Hangul_J_PieubSios",0xee5; "Hangul_J_Sios",0xee6; "Hangul_J_SsangSios",0xee7; "Hangul_J_Ieung",0xee8; "Hangul_J_Jieuj",0xee9; "Hangul_J_Cieuc",0xeea; "Hangul_J_Khieuq",0xeeb; "Hangul_J_Tieut",0xeec; "Hangul_J_Phieuf",0xeed; "Hangul_J_Hieuh",0xeee; "Hangul_RieulYeorinHieuh",0xeef; "Hangul_SunkyeongeumMieum",0xef0; "Hangul_SunkyeongeumPieub",0xef1; "Hangul_PanSios",0xef2; "Hangul_KkogjiDalrinIeung",0xef3; "Hangul_SunkyeongeumPhieuf",0xef4; "Hangul_YeorinHieuh",0xef5; "Hangul_AraeA",0xef6; "Hangul_AraeAE",0xef7; "Hangul_J_PanSios",0xef8; "Hangul_J_KkogjiDalrinIeung",0xef9; "Hangul_J_YeorinHieuh",0xefa; "Korean_Won",0xeff; ] let keysym_to_name = [ 0xFFFFFF,"VoidSymbol"; 0xFF08,"BackSpace"; 0xFF09,"Tab"; 0xFF0A,"Linefeed"; 0xFF0B,"Clear"; 0xFF0D,"Return"; 0xFF13,"Pause"; 0xFF14,"Scroll_Lock"; 0xFF15,"Sys_Req"; 0xFF1B,"Escape"; 0xFFFF,"Delete"; 0xFF20,"Multi_key"; 0xFF21,"Kanji"; 0xFF22,"Muhenkan"; 0xFF23,"Henkan_Mode"; 0xFF23,"Henkan"; 0xFF24,"Romaji"; 0xFF25,"Hiragana"; 0xFF26,"Katakana"; 0xFF27,"Hiragana_Katakana"; 0xFF28,"Zenkaku"; 0xFF29,"Hankaku"; 0xFF2A,"Zenkaku_Hankaku"; 0xFF2B,"Touroku"; 0xFF2C,"Massyo"; 0xFF2D,"Kana_Lock"; 0xFF2E,"Kana_Shift"; 0xFF2F,"Eisu_Shift"; 0xFF30,"Eisu_toggle"; 0xFF50,"Home"; 0xFF51,"Left"; 0xFF52,"Up"; 0xFF53,"Right"; 0xFF54,"Down"; 0xFF55,"Prior"; 0xFF55,"Page_Up"; 0xFF56,"Next"; 0xFF56,"Page_Down"; 0xFF57,"End"; 0xFF58,"Begin"; 0xFF60,"Select"; 0xFF61,"Print"; 0xFF62,"Execute"; 0xFF63,"Insert"; 0xFF65,"Undo"; 0xFF66,"Redo"; 0xFF67,"Menu"; 0xFF68,"Find"; 0xFF69,"Cancel"; 0xFF6A,"Help"; 0xFF6B,"Break"; 0xFF7E,"Mode_switch"; 0xFF7E,"script_switch"; 0xFF7F,"Num_Lock"; 0xFF80,"KP_Space"; 0xFF89,"KP_Tab"; 0xFF8D,"KP_Enter"; 0xFF91,"KP_F1"; 0xFF92,"KP_F2"; 0xFF93,"KP_F3"; 0xFF94,"KP_F4"; 0xFF95,"KP_Home"; 0xFF96,"KP_Left"; 0xFF97,"KP_Up"; 0xFF98,"KP_Right"; 0xFF99,"KP_Down"; 0xFF9A,"KP_Prior"; 0xFF9A,"KP_Page_Up"; 0xFF9B,"KP_Next"; 0xFF9B,"KP_Page_Down"; 0xFF9C,"KP_End"; 0xFF9D,"KP_Begin"; 0xFF9E,"KP_Insert"; 0xFF9F,"KP_Delete"; 0xFFBD,"KP_Equal"; 0xFFAA,"KP_Multiply"; 0xFFAB,"KP_Add"; 0xFFAC,"KP_Separator"; 0xFFAD,"KP_Subtract"; 0xFFAE,"KP_Decimal"; 0xFFAF,"KP_Divide"; 0xFFB0,"KP_0"; 0xFFB1,"KP_1"; 0xFFB2,"KP_2"; 0xFFB3,"KP_3"; 0xFFB4,"KP_4"; 0xFFB5,"KP_5"; 0xFFB6,"KP_6"; 0xFFB7,"KP_7"; 0xFFB8,"KP_8"; 0xFFB9,"KP_9"; 0xFFBE,"F1"; 0xFFBF,"F2"; 0xFFC0,"F3"; 0xFFC1,"F4"; 0xFFC2,"F5"; 0xFFC3,"F6"; 0xFFC4,"F7"; 0xFFC5,"F8"; 0xFFC6,"F9"; 0xFFC7,"F10"; 0xFFC8,"F11"; 0xFFC8,"L1"; 0xFFC9,"F12"; 0xFFC9,"L2"; 0xFFCA,"F13"; 0xFFCA,"L3"; 0xFFCB,"F14"; 0xFFCB,"L4"; 0xFFCC,"F15"; 0xFFCC,"L5"; 0xFFCD,"F16"; 0xFFCD,"L6"; 0xFFCE,"F17"; 0xFFCE,"L7"; 0xFFCF,"F18"; 0xFFCF,"L8"; 0xFFD0,"F19"; 0xFFD0,"L9"; 0xFFD1,"F20"; 0xFFD1,"L10"; 0xFFD2,"F21"; 0xFFD2,"R1"; 0xFFD3,"F22"; 0xFFD3,"R2"; 0xFFD4,"F23"; 0xFFD4,"R3"; 0xFFD5,"F24"; 0xFFD5,"R4"; 0xFFD6,"F25"; 0xFFD6,"R5"; 0xFFD7,"F26"; 0xFFD7,"R6"; 0xFFD8,"F27"; 0xFFD8,"R7"; 0xFFD9,"F28"; 0xFFD9,"R8"; 0xFFDA,"F29"; 0xFFDA,"R9"; 0xFFDB,"F30"; 0xFFDB,"R10"; 0xFFDC,"F31"; 0xFFDC,"R11"; 0xFFDD,"F32"; 0xFFDD,"R12"; 0xFFDE,"F33"; 0xFFDE,"R13"; 0xFFDF,"F34"; 0xFFDF,"R14"; 0xFFE0,"F35"; 0xFFE0,"R15"; 0xFFE1,"Shift_L"; 0xFFE2,"Shift_R"; 0xFFE3,"Control_L"; 0xFFE4,"Control_R"; 0xFFE5,"Caps_Lock"; 0xFFE6,"Shift_Lock"; 0xFFE7,"Meta_L"; 0xFFE8,"Meta_R"; 0xFFE9,"Alt_L"; 0xFFEA,"Alt_R"; 0xFFEB,"Super_L"; 0xFFEC,"Super_R"; 0xFFED,"Hyper_L"; 0xFFEE,"Hyper_R"; 0xFE01,"ISO_Lock"; 0xFE02,"ISO_Level2_Latch"; 0xFE03,"ISO_Level3_Shift"; 0xFE04,"ISO_Level3_Latch"; 0xFE05,"ISO_Level3_Lock"; 0xFF7E,"ISO_Group_Shift"; 0xFE06,"ISO_Group_Latch"; 0xFE07,"ISO_Group_Lock"; 0xFE08,"ISO_Next_Group"; 0xFE09,"ISO_Next_Group_Lock"; 0xFE0A,"ISO_Prev_Group"; 0xFE0B,"ISO_Prev_Group_Lock"; 0xFE0C,"ISO_First_Group"; 0xFE0D,"ISO_First_Group_Lock"; 0xFE0E,"ISO_Last_Group"; 0xFE0F,"ISO_Last_Group_Lock"; 0xFE20,"ISO_Left_Tab"; 0xFE21,"ISO_Move_Line_Up"; 0xFE22,"ISO_Move_Line_Down"; 0xFE23,"ISO_Partial_Line_Up"; 0xFE24,"ISO_Partial_Line_Down"; 0xFE25,"ISO_Partial_Space_Left"; 0xFE26,"ISO_Partial_Space_Right"; 0xFE27,"ISO_Set_Margin_Left"; 0xFE28,"ISO_Set_Margin_Right"; 0xFE29,"ISO_Release_Margin_Left"; 0xFE2A,"ISO_Release_Margin_Right"; 0xFE2B,"ISO_Release_Both_Margins"; 0xFE2C,"ISO_Fast_Cursor_Left"; 0xFE2D,"ISO_Fast_Cursor_Right"; 0xFE2E,"ISO_Fast_Cursor_Up"; 0xFE2F,"ISO_Fast_Cursor_Down"; 0xFE30,"ISO_Continuous_Underline"; 0xFE31,"ISO_Discontinuous_Underline"; 0xFE32,"ISO_Emphasize"; 0xFE33,"ISO_Center_Object"; 0xFE34,"ISO_Enter"; 0xFE50,"dead_grave"; 0xFE51,"dead_acute"; 0xFE52,"dead_circumflex"; 0xFE53,"dead_tilde"; 0xFE54,"dead_macron"; 0xFE55,"dead_breve"; 0xFE56,"dead_abovedot"; 0xFE57,"dead_diaeresis"; 0xFE58,"dead_abovering"; 0xFE59,"dead_doubleacute"; 0xFE5A,"dead_caron"; 0xFE5B,"dead_cedilla"; 0xFE5C,"dead_ogonek"; 0xFE5D,"dead_iota"; 0xFE5E,"dead_voiced_sound"; 0xFE5F,"dead_semivoiced_sound"; 0xFE60,"dead_belowdot"; 0xFED0,"First_Virtual_Screen"; 0xFED1,"Prev_Virtual_Screen"; 0xFED2,"Next_Virtual_Screen"; 0xFED4,"Last_Virtual_Screen"; 0xFED5,"Terminate_Server"; 0xFE70,"AccessX_Enable"; 0xFE71,"AccessX_Feedback_Enable"; 0xFE72,"RepeatKeys_Enable"; 0xFE73,"SlowKeys_Enable"; 0xFE74,"BounceKeys_Enable"; 0xFE75,"StickyKeys_Enable"; 0xFE76,"MouseKeys_Enable"; 0xFE77,"MouseKeys_Accel_Enable"; 0xFE78,"Overlay1_Enable"; 0xFE79,"Overlay2_Enable"; 0xFE7A,"AudibleBell_Enable"; 0xFEE0,"Pointer_Left"; 0xFEE1,"Pointer_Right"; 0xFEE2,"Pointer_Up"; 0xFEE3,"Pointer_Down"; 0xFEE4,"Pointer_UpLeft"; 0xFEE5,"Pointer_UpRight"; 0xFEE6,"Pointer_DownLeft"; 0xFEE7,"Pointer_DownRight"; 0xFEE8,"Pointer_Button_Dflt"; 0xFEE9,"Pointer_Button1"; 0xFEEA,"Pointer_Button2"; 0xFEEB,"Pointer_Button3"; 0xFEEC,"Pointer_Button4"; 0xFEED,"Pointer_Button5"; 0xFEEE,"Pointer_DblClick_Dflt"; 0xFEEF,"Pointer_DblClick1"; 0xFEF0,"Pointer_DblClick2"; 0xFEF1,"Pointer_DblClick3"; 0xFEF2,"Pointer_DblClick4"; 0xFEF3,"Pointer_DblClick5"; 0xFEF4,"Pointer_Drag_Dflt"; 0xFEF5,"Pointer_Drag1"; 0xFEF6,"Pointer_Drag2"; 0xFEF7,"Pointer_Drag3"; 0xFEF8,"Pointer_Drag4"; 0xFEFD,"Pointer_Drag5"; 0xFEF9,"Pointer_EnableKeys"; 0xFEFA,"Pointer_Accelerate"; 0xFEFB,"Pointer_DfltBtnNext"; 0xFEFC,"Pointer_DfltBtnPrev"; 0xFD01,"3270_Duplicate"; 0xFD02,"3270_FieldMark"; 0xFD03,"3270_Right2"; 0xFD04,"3270_Left2"; 0xFD05,"3270_BackTab"; 0xFD06,"3270_EraseEOF"; 0xFD07,"3270_EraseInput"; 0xFD08,"3270_Reset"; 0xFD09,"3270_Quit"; 0xFD0A,"3270_PA1"; 0xFD0B,"3270_PA2"; 0xFD0C,"3270_PA3"; 0xFD0D,"3270_Test"; 0xFD0E,"3270_Attn"; 0xFD0F,"3270_CursorBlink"; 0xFD10,"3270_AltCursor"; 0xFD11,"3270_KeyClick"; 0xFD12,"3270_Jump"; 0xFD13,"3270_Ident"; 0xFD14,"3270_Rule"; 0xFD15,"3270_Copy"; 0xFD16,"3270_Play"; 0xFD17,"3270_Setup"; 0xFD18,"3270_Record"; 0xFD19,"3270_ChangeScreen"; 0xFD1A,"3270_DeleteWord"; 0xFD1B,"3270_ExSelect"; 0xFD1C,"3270_CursorSelect"; 0xFD1D,"3270_PrintScreen"; 0xFD1E,"3270_Enter"; 0x020,"space"; 0x021,"exclam"; 0x022,"quotedbl"; 0x023,"numbersign"; 0x024,"dollar"; 0x025,"percent"; 0x026,"ampersand"; 0x027,"apostrophe"; 0x027,"quoteright"; 0x028,"parenleft"; 0x029,"parenright"; 0x02a,"asterisk"; 0x02b,"plus"; 0x02c,"comma"; 0x02d,"minus"; 0x02e,"period"; 0x02f,"slash"; 0x030,"0"; 0x031,"1"; 0x032,"2"; 0x033,"3"; 0x034,"4"; 0x035,"5"; 0x036,"6"; 0x037,"7"; 0x038,"8"; 0x039,"9"; 0x03a,"colon"; 0x03b,"semicolon"; 0x03c,"less"; 0x03d,"equal"; 0x03e,"greater"; 0x03f,"question"; 0x040,"at"; 0x041,"A"; 0x042,"B"; 0x043,"C"; 0x044,"D"; 0x045,"E"; 0x046,"F"; 0x047,"G"; 0x048,"H"; 0x049,"I"; 0x04a,"J"; 0x04b,"K"; 0x04c,"L"; 0x04d,"M"; 0x04e,"N"; 0x04f,"O"; 0x050,"P"; 0x051,"Q"; 0x052,"R"; 0x053,"S"; 0x054,"T"; 0x055,"U"; 0x056,"V"; 0x057,"W"; 0x058,"X"; 0x059,"Y"; 0x05a,"Z"; 0x05b,"bracketleft"; 0x05c,"backslash"; 0x05d,"bracketright"; 0x05e,"asciicircum"; 0x05f,"underscore"; 0x060,"grave"; 0x060,"quoteleft"; 0x061,"a"; 0x062,"b"; 0x063,"c"; 0x064,"d"; 0x065,"e"; 0x066,"f"; 0x067,"g"; 0x068,"h"; 0x069,"i"; 0x06a,"j"; 0x06b,"k"; 0x06c,"l"; 0x06d,"m"; 0x06e,"n"; 0x06f,"o"; 0x070,"p"; 0x071,"q"; 0x072,"r"; 0x073,"s"; 0x074,"t"; 0x075,"u"; 0x076,"v"; 0x077,"w"; 0x078,"x"; 0x079,"y"; 0x07a,"z"; 0x07b,"braceleft"; 0x07c,"bar"; 0x07d,"braceright"; 0x07e,"asciitilde"; 0x0a0,"nobreakspace"; 0x0a1,"exclamdown"; 0x0a2,"cent"; 0x0a3,"sterling"; 0x0a4,"currency"; 0x0a5,"yen"; 0x0a6,"brokenbar"; 0x0a7,"section"; 0x0a8,"diaeresis"; 0x0a9,"copyright"; 0x0aa,"ordfeminine"; 0x0ab,"guillemotleft"; 0x0ac,"notsign"; 0x0ad,"hyphen"; 0x0ae,"registered"; 0x0af,"macron"; 0x0b0,"degree"; 0x0b1,"plusminus"; 0x0b2,"twosuperior"; 0x0b3,"threesuperior"; 0x0b4,"acute"; 0x0b5,"mu"; 0x0b6,"paragraph"; 0x0b7,"periodcentered"; 0x0b8,"cedilla"; 0x0b9,"onesuperior"; 0x0ba,"masculine"; 0x0bb,"guillemotright"; 0x0bc,"onequarter"; 0x0bd,"onehalf"; 0x0be,"threequarters"; 0x0bf,"questiondown"; 0x0c0,"Agrave"; 0x0c1,"Aacute"; 0x0c2,"Acircumflex"; 0x0c3,"Atilde"; 0x0c4,"Adiaeresis"; 0x0c5,"Aring"; 0x0c6,"AE"; 0x0c7,"Ccedilla"; 0x0c8,"Egrave"; 0x0c9,"Eacute"; 0x0ca,"Ecircumflex"; 0x0cb,"Ediaeresis"; 0x0cc,"Igrave"; 0x0cd,"Iacute"; 0x0ce,"Icircumflex"; 0x0cf,"Idiaeresis"; 0x0d0,"ETH"; 0x0d0,"Eth"; 0x0d1,"Ntilde"; 0x0d2,"Ograve"; 0x0d3,"Oacute"; 0x0d4,"Ocircumflex"; 0x0d5,"Otilde"; 0x0d6,"Odiaeresis"; 0x0d7,"multiply"; 0x0d8,"Ooblique"; 0x0d9,"Ugrave"; 0x0da,"Uacute"; 0x0db,"Ucircumflex"; 0x0dc,"Udiaeresis"; 0x0dd,"Yacute"; 0x0de,"THORN"; 0x0de,"Thorn"; 0x0df,"ssharp"; 0x0e0,"agrave"; 0x0e1,"aacute"; 0x0e2,"acircumflex"; 0x0e3,"atilde"; 0x0e4,"adiaeresis"; 0x0e5,"aring"; 0x0e6,"ae"; 0x0e7,"ccedilla"; 0x0e8,"egrave"; 0x0e9,"eacute"; 0x0ea,"ecircumflex"; 0x0eb,"ediaeresis"; 0x0ec,"igrave"; 0x0ed,"iacute"; 0x0ee,"icircumflex"; 0x0ef,"idiaeresis"; 0x0f0,"eth"; 0x0f1,"ntilde"; 0x0f2,"ograve"; 0x0f3,"oacute"; 0x0f4,"ocircumflex"; 0x0f5,"otilde"; 0x0f6,"odiaeresis"; 0x0f7,"division"; 0x0f8,"oslash"; 0x0f9,"ugrave"; 0x0fa,"uacute"; 0x0fb,"ucircumflex"; 0x0fc,"udiaeresis"; 0x0fd,"yacute"; 0x0fe,"thorn"; 0x0ff,"ydiaeresis"; 0x1a1,"Aogonek"; 0x1a2,"breve"; 0x1a3,"Lstroke"; 0x1a5,"Lcaron"; 0x1a6,"Sacute"; 0x1a9,"Scaron"; 0x1aa,"Scedilla"; 0x1ab,"Tcaron"; 0x1ac,"Zacute"; 0x1ae,"Zcaron"; 0x1af,"Zabovedot"; 0x1b1,"aogonek"; 0x1b2,"ogonek"; 0x1b3,"lstroke"; 0x1b5,"lcaron"; 0x1b6,"sacute"; 0x1b7,"caron"; 0x1b9,"scaron"; 0x1ba,"scedilla"; 0x1bb,"tcaron"; 0x1bc,"zacute"; 0x1bd,"doubleacute"; 0x1be,"zcaron"; 0x1bf,"zabovedot"; 0x1c0,"Racute"; 0x1c3,"Abreve"; 0x1c5,"Lacute"; 0x1c6,"Cacute"; 0x1c8,"Ccaron"; 0x1ca,"Eogonek"; 0x1cc,"Ecaron"; 0x1cf,"Dcaron"; 0x1d0,"Dstroke"; 0x1d1,"Nacute"; 0x1d2,"Ncaron"; 0x1d5,"Odoubleacute"; 0x1d8,"Rcaron"; 0x1d9,"Uring"; 0x1db,"Udoubleacute"; 0x1de,"Tcedilla"; 0x1e0,"racute"; 0x1e3,"abreve"; 0x1e5,"lacute"; 0x1e6,"cacute"; 0x1e8,"ccaron"; 0x1ea,"eogonek"; 0x1ec,"ecaron"; 0x1ef,"dcaron"; 0x1f0,"dstroke"; 0x1f1,"nacute"; 0x1f2,"ncaron"; 0x1f5,"odoubleacute"; 0x1fb,"udoubleacute"; 0x1f8,"rcaron"; 0x1f9,"uring"; 0x1fe,"tcedilla"; 0x1ff,"abovedot"; 0x2a1,"Hstroke"; 0x2a6,"Hcircumflex"; 0x2a9,"Iabovedot"; 0x2ab,"Gbreve"; 0x2ac,"Jcircumflex"; 0x2b1,"hstroke"; 0x2b6,"hcircumflex"; 0x2b9,"idotless"; 0x2bb,"gbreve"; 0x2bc,"jcircumflex"; 0x2c5,"Cabovedot"; 0x2c6,"Ccircumflex"; 0x2d5,"Gabovedot"; 0x2d8,"Gcircumflex"; 0x2dd,"Ubreve"; 0x2de,"Scircumflex"; 0x2e5,"cabovedot"; 0x2e6,"ccircumflex"; 0x2f5,"gabovedot"; 0x2f8,"gcircumflex"; 0x2fd,"ubreve"; 0x2fe,"scircumflex"; 0x3a2,"kra"; 0x3a2,"kappa"; 0x3a3,"Rcedilla"; 0x3a5,"Itilde"; 0x3a6,"Lcedilla"; 0x3aa,"Emacron"; 0x3ab,"Gcedilla"; 0x3ac,"Tslash"; 0x3b3,"rcedilla"; 0x3b5,"itilde"; 0x3b6,"lcedilla"; 0x3ba,"emacron"; 0x3bb,"gcedilla"; 0x3bc,"tslash"; 0x3bd,"ENG"; 0x3bf,"eng"; 0x3c0,"Amacron"; 0x3c7,"Iogonek"; 0x3cc,"Eabovedot"; 0x3cf,"Imacron"; 0x3d1,"Ncedilla"; 0x3d2,"Omacron"; 0x3d3,"Kcedilla"; 0x3d9,"Uogonek"; 0x3dd,"Utilde"; 0x3de,"Umacron"; 0x3e0,"amacron"; 0x3e7,"iogonek"; 0x3ec,"eabovedot"; 0x3ef,"imacron"; 0x3f1,"ncedilla"; 0x3f2,"omacron"; 0x3f3,"kcedilla"; 0x3f9,"uogonek"; 0x3fd,"utilde"; 0x3fe,"umacron"; 0x47e,"overline"; 0x4a1,"kana_fullstop"; 0x4a2,"kana_openingbracket"; 0x4a3,"kana_closingbracket"; 0x4a4,"kana_comma"; 0x4a5,"kana_conjunctive"; 0x4a5,"kana_middledot"; 0x4a6,"kana_WO"; 0x4a7,"kana_a"; 0x4a8,"kana_i"; 0x4a9,"kana_u"; 0x4aa,"kana_e"; 0x4ab,"kana_o"; 0x4ac,"kana_ya"; 0x4ad,"kana_yu"; 0x4ae,"kana_yo"; 0x4af,"kana_tsu"; 0x4af,"kana_tu"; 0x4b0,"prolongedsound"; 0x4b1,"kana_A"; 0x4b2,"kana_I"; 0x4b3,"kana_U"; 0x4b4,"kana_E"; 0x4b5,"kana_O"; 0x4b6,"kana_KA"; 0x4b7,"kana_KI"; 0x4b8,"kana_KU"; 0x4b9,"kana_KE"; 0x4ba,"kana_KO"; 0x4bb,"kana_SA"; 0x4bc,"kana_SHI"; 0x4bd,"kana_SU"; 0x4be,"kana_SE"; 0x4bf,"kana_SO"; 0x4c0,"kana_TA"; 0x4c1,"kana_CHI"; 0x4c1,"kana_TI"; 0x4c2,"kana_TSU"; 0x4c2,"kana_TU"; 0x4c3,"kana_TE"; 0x4c4,"kana_TO"; 0x4c5,"kana_NA"; 0x4c6,"kana_NI"; 0x4c7,"kana_NU"; 0x4c8,"kana_NE"; 0x4c9,"kana_NO"; 0x4ca,"kana_HA"; 0x4cb,"kana_HI"; 0x4cc,"kana_FU"; 0x4cc,"kana_HU"; 0x4cd,"kana_HE"; 0x4ce,"kana_HO"; 0x4cf,"kana_MA"; 0x4d0,"kana_MI"; 0x4d1,"kana_MU"; 0x4d2,"kana_ME"; 0x4d3,"kana_MO"; 0x4d4,"kana_YA"; 0x4d5,"kana_YU"; 0x4d6,"kana_YO"; 0x4d7,"kana_RA"; 0x4d8,"kana_RI"; 0x4d9,"kana_RU"; 0x4da,"kana_RE"; 0x4db,"kana_RO"; 0x4dc,"kana_WA"; 0x4dd,"kana_N"; 0x4de,"voicedsound"; 0x4df,"semivoicedsound"; 0xFF7E,"kana_switch"; 0x5ac,"Arabic_comma"; 0x5bb,"Arabic_semicolon"; 0x5bf,"Arabic_question_mark"; 0x5c1,"Arabic_hamza"; 0x5c2,"Arabic_maddaonalef"; 0x5c3,"Arabic_hamzaonalef"; 0x5c4,"Arabic_hamzaonwaw"; 0x5c5,"Arabic_hamzaunderalef"; 0x5c6,"Arabic_hamzaonyeh"; 0x5c7,"Arabic_alef"; 0x5c8,"Arabic_beh"; 0x5c9,"Arabic_tehmarbuta"; 0x5ca,"Arabic_teh"; 0x5cb,"Arabic_theh"; 0x5cc,"Arabic_jeem"; 0x5cd,"Arabic_hah"; 0x5ce,"Arabic_khah"; 0x5cf,"Arabic_dal"; 0x5d0,"Arabic_thal"; 0x5d1,"Arabic_ra"; 0x5d2,"Arabic_zain"; 0x5d3,"Arabic_seen"; 0x5d4,"Arabic_sheen"; 0x5d5,"Arabic_sad"; 0x5d6,"Arabic_dad"; 0x5d7,"Arabic_tah"; 0x5d8,"Arabic_zah"; 0x5d9,"Arabic_ain"; 0x5da,"Arabic_ghain"; 0x5e0,"Arabic_tatweel"; 0x5e1,"Arabic_feh"; 0x5e2,"Arabic_qaf"; 0x5e3,"Arabic_kaf"; 0x5e4,"Arabic_lam"; 0x5e5,"Arabic_meem"; 0x5e6,"Arabic_noon"; 0x5e7,"Arabic_ha"; 0x5e7,"Arabic_heh"; 0x5e8,"Arabic_waw"; 0x5e9,"Arabic_alefmaksura"; 0x5ea,"Arabic_yeh"; 0x5eb,"Arabic_fathatan"; 0x5ec,"Arabic_dammatan"; 0x5ed,"Arabic_kasratan"; 0x5ee,"Arabic_fatha"; 0x5ef,"Arabic_damma"; 0x5f0,"Arabic_kasra"; 0x5f1,"Arabic_shadda"; 0x5f2,"Arabic_sukun"; 0xFF7E,"Arabic_switch"; 0x6a1,"Serbian_dje"; 0x6a2,"Macedonia_gje"; 0x6a3,"Cyrillic_io"; 0x6a4,"Ukrainian_ie"; 0x6a4,"Ukranian_je"; 0x6a5,"Macedonia_dse"; 0x6a6,"Ukrainian_i"; 0x6a6,"Ukranian_i"; 0x6a7,"Ukrainian_yi"; 0x6a7,"Ukranian_yi"; 0x6a8,"Cyrillic_je"; 0x6a8,"Serbian_je"; 0x6a9,"Cyrillic_lje"; 0x6a9,"Serbian_lje"; 0x6aa,"Cyrillic_nje"; 0x6aa,"Serbian_nje"; 0x6ab,"Serbian_tshe"; 0x6ac,"Macedonia_kje"; 0x6ae,"Byelorussian_shortu"; 0x6af,"Cyrillic_dzhe"; 0x6af,"Serbian_dze"; 0x6b0,"numerosign"; 0x6b1,"Serbian_DJE"; 0x6b2,"Macedonia_GJE"; 0x6b3,"Cyrillic_IO"; 0x6b4,"Ukrainian_IE"; 0x6b4,"Ukranian_JE"; 0x6b5,"Macedonia_DSE"; 0x6b6,"Ukrainian_I"; 0x6b6,"Ukranian_I"; 0x6b7,"Ukrainian_YI"; 0x6b7,"Ukranian_YI"; 0x6b8,"Cyrillic_JE"; 0x6b8,"Serbian_JE"; 0x6b9,"Cyrillic_LJE"; 0x6b9,"Serbian_LJE"; 0x6ba,"Cyrillic_NJE"; 0x6ba,"Serbian_NJE"; 0x6bb,"Serbian_TSHE"; 0x6bc,"Macedonia_KJE"; 0x6be,"Byelorussian_SHORTU"; 0x6bf,"Cyrillic_DZHE"; 0x6bf,"Serbian_DZE"; 0x6c0,"Cyrillic_yu"; 0x6c1,"Cyrillic_a"; 0x6c2,"Cyrillic_be"; 0x6c3,"Cyrillic_tse"; 0x6c4,"Cyrillic_de"; 0x6c5,"Cyrillic_ie"; 0x6c6,"Cyrillic_ef"; 0x6c7,"Cyrillic_ghe"; 0x6c8,"Cyrillic_ha"; 0x6c9,"Cyrillic_i"; 0x6ca,"Cyrillic_shorti"; 0x6cb,"Cyrillic_ka"; 0x6cc,"Cyrillic_el"; 0x6cd,"Cyrillic_em"; 0x6ce,"Cyrillic_en"; 0x6cf,"Cyrillic_o"; 0x6d0,"Cyrillic_pe"; 0x6d1,"Cyrillic_ya"; 0x6d2,"Cyrillic_er"; 0x6d3,"Cyrillic_es"; 0x6d4,"Cyrillic_te"; 0x6d5,"Cyrillic_u"; 0x6d6,"Cyrillic_zhe"; 0x6d7,"Cyrillic_ve"; 0x6d8,"Cyrillic_softsign"; 0x6d9,"Cyrillic_yeru"; 0x6da,"Cyrillic_ze"; 0x6db,"Cyrillic_sha"; 0x6dc,"Cyrillic_e"; 0x6dd,"Cyrillic_shcha"; 0x6de,"Cyrillic_che"; 0x6df,"Cyrillic_hardsign"; 0x6e0,"Cyrillic_YU"; 0x6e1,"Cyrillic_A"; 0x6e2,"Cyrillic_BE"; 0x6e3,"Cyrillic_TSE"; 0x6e4,"Cyrillic_DE"; 0x6e5,"Cyrillic_IE"; 0x6e6,"Cyrillic_EF"; 0x6e7,"Cyrillic_GHE"; 0x6e8,"Cyrillic_HA"; 0x6e9,"Cyrillic_I"; 0x6ea,"Cyrillic_SHORTI"; 0x6eb,"Cyrillic_KA"; 0x6ec,"Cyrillic_EL"; 0x6ed,"Cyrillic_EM"; 0x6ee,"Cyrillic_EN"; 0x6ef,"Cyrillic_O"; 0x6f0,"Cyrillic_PE"; 0x6f1,"Cyrillic_YA"; 0x6f2,"Cyrillic_ER"; 0x6f3,"Cyrillic_ES"; 0x6f4,"Cyrillic_TE"; 0x6f5,"Cyrillic_U"; 0x6f6,"Cyrillic_ZHE"; 0x6f7,"Cyrillic_VE"; 0x6f8,"Cyrillic_SOFTSIGN"; 0x6f9,"Cyrillic_YERU"; 0x6fa,"Cyrillic_ZE"; 0x6fb,"Cyrillic_SHA"; 0x6fc,"Cyrillic_E"; 0x6fd,"Cyrillic_SHCHA"; 0x6fe,"Cyrillic_CHE"; 0x6ff,"Cyrillic_HARDSIGN"; 0x7a1,"Greek_ALPHAaccent"; 0x7a2,"Greek_EPSILONaccent"; 0x7a3,"Greek_ETAaccent"; 0x7a4,"Greek_IOTAaccent"; 0x7a5,"Greek_IOTAdiaeresis"; 0x7a7,"Greek_OMICRONaccent"; 0x7a8,"Greek_UPSILONaccent"; 0x7a9,"Greek_UPSILONdieresis"; 0x7ab,"Greek_OMEGAaccent"; 0x7ae,"Greek_accentdieresis"; 0x7af,"Greek_horizbar"; 0x7b1,"Greek_alphaaccent"; 0x7b2,"Greek_epsilonaccent"; 0x7b3,"Greek_etaaccent"; 0x7b4,"Greek_iotaaccent"; 0x7b5,"Greek_iotadieresis"; 0x7b6,"Greek_iotaaccentdieresis"; 0x7b7,"Greek_omicronaccent"; 0x7b8,"Greek_upsilonaccent"; 0x7b9,"Greek_upsilondieresis"; 0x7ba,"Greek_upsilonaccentdieresis"; 0x7bb,"Greek_omegaaccent"; 0x7c1,"Greek_ALPHA"; 0x7c2,"Greek_BETA"; 0x7c3,"Greek_GAMMA"; 0x7c4,"Greek_DELTA"; 0x7c5,"Greek_EPSILON"; 0x7c6,"Greek_ZETA"; 0x7c7,"Greek_ETA"; 0x7c8,"Greek_THETA"; 0x7c9,"Greek_IOTA"; 0x7ca,"Greek_KAPPA"; 0x7cb,"Greek_LAMDA"; 0x7cb,"Greek_LAMBDA"; 0x7cc,"Greek_MU"; 0x7cd,"Greek_NU"; 0x7ce,"Greek_XI"; 0x7cf,"Greek_OMICRON"; 0x7d0,"Greek_PI"; 0x7d1,"Greek_RHO"; 0x7d2,"Greek_SIGMA"; 0x7d4,"Greek_TAU"; 0x7d5,"Greek_UPSILON"; 0x7d6,"Greek_PHI"; 0x7d7,"Greek_CHI"; 0x7d8,"Greek_PSI"; 0x7d9,"Greek_OMEGA"; 0x7e1,"Greek_alpha"; 0x7e2,"Greek_beta"; 0x7e3,"Greek_gamma"; 0x7e4,"Greek_delta"; 0x7e5,"Greek_epsilon"; 0x7e6,"Greek_zeta"; 0x7e7,"Greek_eta"; 0x7e8,"Greek_theta"; 0x7e9,"Greek_iota"; 0x7ea,"Greek_kappa"; 0x7eb,"Greek_lamda"; 0x7eb,"Greek_lambda"; 0x7ec,"Greek_mu"; 0x7ed,"Greek_nu"; 0x7ee,"Greek_xi"; 0x7ef,"Greek_omicron"; 0x7f0,"Greek_pi"; 0x7f1,"Greek_rho"; 0x7f2,"Greek_sigma"; 0x7f3,"Greek_finalsmallsigma"; 0x7f4,"Greek_tau"; 0x7f5,"Greek_upsilon"; 0x7f6,"Greek_phi"; 0x7f7,"Greek_chi"; 0x7f8,"Greek_psi"; 0x7f9,"Greek_omega"; 0xFF7E,"Greek_switch"; 0x8a1,"leftradical"; 0x8a2,"topleftradical"; 0x8a3,"horizconnector"; 0x8a4,"topintegral"; 0x8a5,"botintegral"; 0x8a6,"vertconnector"; 0x8a7,"topleftsqbracket"; 0x8a8,"botleftsqbracket"; 0x8a9,"toprightsqbracket"; 0x8aa,"botrightsqbracket"; 0x8ab,"topleftparens"; 0x8ac,"botleftparens"; 0x8ad,"toprightparens"; 0x8ae,"botrightparens"; 0x8af,"leftmiddlecurlybrace"; 0x8b0,"rightmiddlecurlybrace"; 0x8b1,"topleftsummation"; 0x8b2,"botleftsummation"; 0x8b3,"topvertsummationconnector"; 0x8b4,"botvertsummationconnector"; 0x8b5,"toprightsummation"; 0x8b6,"botrightsummation"; 0x8b7,"rightmiddlesummation"; 0x8bc,"lessthanequal"; 0x8bd,"notequal"; 0x8be,"greaterthanequal"; 0x8bf,"integral"; 0x8c0,"therefore"; 0x8c1,"variation"; 0x8c2,"infinity"; 0x8c5,"nabla"; 0x8c8,"approximate"; 0x8c9,"similarequal"; 0x8cd,"ifonlyif"; 0x8ce,"implies"; 0x8cf,"identical"; 0x8d6,"radical"; 0x8da,"includedin"; 0x8db,"includes"; 0x8dc,"intersection"; 0x8dd,"union"; 0x8de,"logicaland"; 0x8df,"logicalor"; 0x8ef,"partialderivative"; 0x8f6,"function"; 0x8fb,"leftarrow"; 0x8fc,"uparrow"; 0x8fd,"rightarrow"; 0x8fe,"downarrow"; 0x9df,"blank"; 0x9e0,"soliddiamond"; 0x9e1,"checkerboard"; 0x9e2,"ht"; 0x9e3,"ff"; 0x9e4,"cr"; 0x9e5,"lf"; 0x9e8,"nl"; 0x9e9,"vt"; 0x9ea,"lowrightcorner"; 0x9eb,"uprightcorner"; 0x9ec,"upleftcorner"; 0x9ed,"lowleftcorner"; 0x9ee,"crossinglines"; 0x9ef,"horizlinescan1"; 0x9f0,"horizlinescan3"; 0x9f1,"horizlinescan5"; 0x9f2,"horizlinescan7"; 0x9f3,"horizlinescan9"; 0x9f4,"leftt"; 0x9f5,"rightt"; 0x9f6,"bott"; 0x9f7,"topt"; 0x9f8,"vertbar"; 0xaa1,"emspace"; 0xaa2,"enspace"; 0xaa3,"em3space"; 0xaa4,"em4space"; 0xaa5,"digitspace"; 0xaa6,"punctspace"; 0xaa7,"thinspace"; 0xaa8,"hairspace"; 0xaa9,"emdash"; 0xaaa,"endash"; 0xaac,"signifblank"; 0xaae,"ellipsis"; 0xaaf,"doubbaselinedot"; 0xab0,"onethird"; 0xab1,"twothirds"; 0xab2,"onefifth"; 0xab3,"twofifths"; 0xab4,"threefifths"; 0xab5,"fourfifths"; 0xab6,"onesixth"; 0xab7,"fivesixths"; 0xab8,"careof"; 0xabb,"figdash"; 0xabc,"leftanglebracket"; 0xabd,"decimalpoint"; 0xabe,"rightanglebracket"; 0xabf,"marker"; 0xac3,"oneeighth"; 0xac4,"threeeighths"; 0xac5,"fiveeighths"; 0xac6,"seveneighths"; 0xac9,"trademark"; 0xaca,"signaturemark"; 0xacb,"trademarkincircle"; 0xacc,"leftopentriangle"; 0xacd,"rightopentriangle"; 0xace,"emopencircle"; 0xacf,"emopenrectangle"; 0xad0,"leftsinglequotemark"; 0xad1,"rightsinglequotemark"; 0xad2,"leftdoublequotemark"; 0xad3,"rightdoublequotemark"; 0xad4,"prescription"; 0xad6,"minutes"; 0xad7,"seconds"; 0xad9,"latincross"; 0xada,"hexagram"; 0xadb,"filledrectbullet"; 0xadc,"filledlefttribullet"; 0xadd,"filledrighttribullet"; 0xade,"emfilledcircle"; 0xadf,"emfilledrect"; 0xae0,"enopencircbullet"; 0xae1,"enopensquarebullet"; 0xae2,"openrectbullet"; 0xae3,"opentribulletup"; 0xae4,"opentribulletdown"; 0xae5,"openstar"; 0xae6,"enfilledcircbullet"; 0xae7,"enfilledsqbullet"; 0xae8,"filledtribulletup"; 0xae9,"filledtribulletdown"; 0xaea,"leftpointer"; 0xaeb,"rightpointer"; 0xaec,"club"; 0xaed,"diamond"; 0xaee,"heart"; 0xaf0,"maltesecross"; 0xaf1,"dagger"; 0xaf2,"doubledagger"; 0xaf3,"checkmark"; 0xaf4,"ballotcross"; 0xaf5,"musicalsharp"; 0xaf6,"musicalflat"; 0xaf7,"malesymbol"; 0xaf8,"femalesymbol"; 0xaf9,"telephone"; 0xafa,"telephonerecorder"; 0xafb,"phonographcopyright"; 0xafc,"caret"; 0xafd,"singlelowquotemark"; 0xafe,"doublelowquotemark"; 0xaff,"cursor"; 0xba3,"leftcaret"; 0xba6,"rightcaret"; 0xba8,"downcaret"; 0xba9,"upcaret"; 0xbc0,"overbar"; 0xbc2,"downtack"; 0xbc3,"upshoe"; 0xbc4,"downstile"; 0xbc6,"underbar"; 0xbca,"jot"; 0xbcc,"quad"; 0xbce,"uptack"; 0xbcf,"circle"; 0xbd3,"upstile"; 0xbd6,"downshoe"; 0xbd8,"rightshoe"; 0xbda,"leftshoe"; 0xbdc,"lefttack"; 0xbfc,"righttack"; 0xcdf,"hebrew_doublelowline"; 0xce0,"hebrew_aleph"; 0xce1,"hebrew_bet"; 0xce1,"hebrew_beth"; 0xce2,"hebrew_gimel"; 0xce2,"hebrew_gimmel"; 0xce3,"hebrew_dalet"; 0xce3,"hebrew_daleth"; 0xce4,"hebrew_he"; 0xce5,"hebrew_waw"; 0xce6,"hebrew_zain"; 0xce6,"hebrew_zayin"; 0xce7,"hebrew_chet"; 0xce7,"hebrew_het"; 0xce8,"hebrew_tet"; 0xce8,"hebrew_teth"; 0xce9,"hebrew_yod"; 0xcea,"hebrew_finalkaph"; 0xceb,"hebrew_kaph"; 0xcec,"hebrew_lamed"; 0xced,"hebrew_finalmem"; 0xcee,"hebrew_mem"; 0xcef,"hebrew_finalnun"; 0xcf0,"hebrew_nun"; 0xcf1,"hebrew_samech"; 0xcf1,"hebrew_samekh"; 0xcf2,"hebrew_ayin"; 0xcf3,"hebrew_finalpe"; 0xcf4,"hebrew_pe"; 0xcf5,"hebrew_finalzade"; 0xcf5,"hebrew_finalzadi"; 0xcf6,"hebrew_zade"; 0xcf6,"hebrew_zadi"; 0xcf7,"hebrew_qoph"; 0xcf7,"hebrew_kuf"; 0xcf8,"hebrew_resh"; 0xcf9,"hebrew_shin"; 0xcfa,"hebrew_taw"; 0xcfa,"hebrew_taf"; 0xFF7E,"Hebrew_switch"; 0xda1,"Thai_kokai"; 0xda2,"Thai_khokhai"; 0xda3,"Thai_khokhuat"; 0xda4,"Thai_khokhwai"; 0xda5,"Thai_khokhon"; 0xda6,"Thai_khorakhang"; 0xda7,"Thai_ngongu"; 0xda8,"Thai_chochan"; 0xda9,"Thai_choching"; 0xdaa,"Thai_chochang"; 0xdab,"Thai_soso"; 0xdac,"Thai_chochoe"; 0xdad,"Thai_yoying"; 0xdae,"Thai_dochada"; 0xdaf,"Thai_topatak"; 0xdb0,"Thai_thothan"; 0xdb1,"Thai_thonangmontho"; 0xdb2,"Thai_thophuthao"; 0xdb3,"Thai_nonen"; 0xdb4,"Thai_dodek"; 0xdb5,"Thai_totao"; 0xdb6,"Thai_thothung"; 0xdb7,"Thai_thothahan"; 0xdb8,"Thai_thothong"; 0xdb9,"Thai_nonu"; 0xdba,"Thai_bobaimai"; 0xdbb,"Thai_popla"; 0xdbc,"Thai_phophung"; 0xdbd,"Thai_fofa"; 0xdbe,"Thai_phophan"; 0xdbf,"Thai_fofan"; 0xdc0,"Thai_phosamphao"; 0xdc1,"Thai_moma"; 0xdc2,"Thai_yoyak"; 0xdc3,"Thai_rorua"; 0xdc4,"Thai_ru"; 0xdc5,"Thai_loling"; 0xdc6,"Thai_lu"; 0xdc7,"Thai_wowaen"; 0xdc8,"Thai_sosala"; 0xdc9,"Thai_sorusi"; 0xdca,"Thai_sosua"; 0xdcb,"Thai_hohip"; 0xdcc,"Thai_lochula"; 0xdcd,"Thai_oang"; 0xdce,"Thai_honokhuk"; 0xdcf,"Thai_paiyannoi"; 0xdd0,"Thai_saraa"; 0xdd1,"Thai_maihanakat"; 0xdd2,"Thai_saraaa"; 0xdd3,"Thai_saraam"; 0xdd4,"Thai_sarai"; 0xdd5,"Thai_saraii"; 0xdd6,"Thai_saraue"; 0xdd7,"Thai_sarauee"; 0xdd8,"Thai_sarau"; 0xdd9,"Thai_sarauu"; 0xdda,"Thai_phinthu"; 0xdde,"Thai_maihanakat_maitho"; 0xddf,"Thai_baht"; 0xde0,"Thai_sarae"; 0xde1,"Thai_saraae"; 0xde2,"Thai_sarao"; 0xde3,"Thai_saraaimaimuan"; 0xde4,"Thai_saraaimaimalai"; 0xde5,"Thai_lakkhangyao"; 0xde6,"Thai_maiyamok"; 0xde7,"Thai_maitaikhu"; 0xde8,"Thai_maiek"; 0xde9,"Thai_maitho"; 0xdea,"Thai_maitri"; 0xdeb,"Thai_maichattawa"; 0xdec,"Thai_thanthakhat"; 0xded,"Thai_nikhahit"; 0xdf0,"Thai_leksun"; 0xdf1,"Thai_leknung"; 0xdf2,"Thai_leksong"; 0xdf3,"Thai_leksam"; 0xdf4,"Thai_leksi"; 0xdf5,"Thai_lekha"; 0xdf6,"Thai_lekhok"; 0xdf7,"Thai_lekchet"; 0xdf8,"Thai_lekpaet"; 0xdf9,"Thai_lekkao"; 0xff31,"Hangul"; 0xff32,"Hangul_Start"; 0xff33,"Hangul_End"; 0xff34,"Hangul_Hanja"; 0xff35,"Hangul_Jamo"; 0xff36,"Hangul_Romaja"; 0xff37,"Hangul_Codeinput"; 0xff38,"Hangul_Jeonja"; 0xff39,"Hangul_Banja"; 0xff3a,"Hangul_PreHanja"; 0xff3b,"Hangul_PostHanja"; 0xff3c,"Hangul_SingleCandidate"; 0xff3d,"Hangul_MultipleCandidate"; 0xff3e,"Hangul_PreviousCandidate"; 0xff3f,"Hangul_Special"; 0xFF7E,"Hangul_switch"; 0xea1,"Hangul_Kiyeog"; 0xea2,"Hangul_SsangKiyeog"; 0xea3,"Hangul_KiyeogSios"; 0xea4,"Hangul_Nieun"; 0xea5,"Hangul_NieunJieuj"; 0xea6,"Hangul_NieunHieuh"; 0xea7,"Hangul_Dikeud"; 0xea8,"Hangul_SsangDikeud"; 0xea9,"Hangul_Rieul"; 0xeaa,"Hangul_RieulKiyeog"; 0xeab,"Hangul_RieulMieum"; 0xeac,"Hangul_RieulPieub"; 0xead,"Hangul_RieulSios"; 0xeae,"Hangul_RieulTieut"; 0xeaf,"Hangul_RieulPhieuf"; 0xeb0,"Hangul_RieulHieuh"; 0xeb1,"Hangul_Mieum"; 0xeb2,"Hangul_Pieub"; 0xeb3,"Hangul_SsangPieub"; 0xeb4,"Hangul_PieubSios"; 0xeb5,"Hangul_Sios"; 0xeb6,"Hangul_SsangSios"; 0xeb7,"Hangul_Ieung"; 0xeb8,"Hangul_Jieuj"; 0xeb9,"Hangul_SsangJieuj"; 0xeba,"Hangul_Cieuc"; 0xebb,"Hangul_Khieuq"; 0xebc,"Hangul_Tieut"; 0xebd,"Hangul_Phieuf"; 0xebe,"Hangul_Hieuh"; 0xebf,"Hangul_A"; 0xec0,"Hangul_AE"; 0xec1,"Hangul_YA"; 0xec2,"Hangul_YAE"; 0xec3,"Hangul_EO"; 0xec4,"Hangul_E"; 0xec5,"Hangul_YEO"; 0xec6,"Hangul_YE"; 0xec7,"Hangul_O"; 0xec8,"Hangul_WA"; 0xec9,"Hangul_WAE"; 0xeca,"Hangul_OE"; 0xecb,"Hangul_YO"; 0xecc,"Hangul_U"; 0xecd,"Hangul_WEO"; 0xece,"Hangul_WE"; 0xecf,"Hangul_WI"; 0xed0,"Hangul_YU"; 0xed1,"Hangul_EU"; 0xed2,"Hangul_YI"; 0xed3,"Hangul_I"; 0xed4,"Hangul_J_Kiyeog"; 0xed5,"Hangul_J_SsangKiyeog"; 0xed6,"Hangul_J_KiyeogSios"; 0xed7,"Hangul_J_Nieun"; 0xed8,"Hangul_J_NieunJieuj"; 0xed9,"Hangul_J_NieunHieuh"; 0xeda,"Hangul_J_Dikeud"; 0xedb,"Hangul_J_Rieul"; 0xedc,"Hangul_J_RieulKiyeog"; 0xedd,"Hangul_J_RieulMieum"; 0xede,"Hangul_J_RieulPieub"; 0xedf,"Hangul_J_RieulSios"; 0xee0,"Hangul_J_RieulTieut"; 0xee1,"Hangul_J_RieulPhieuf"; 0xee2,"Hangul_J_RieulHieuh"; 0xee3,"Hangul_J_Mieum"; 0xee4,"Hangul_J_Pieub"; 0xee5,"Hangul_J_PieubSios"; 0xee6,"Hangul_J_Sios"; 0xee7,"Hangul_J_SsangSios"; 0xee8,"Hangul_J_Ieung"; 0xee9,"Hangul_J_Jieuj"; 0xeea,"Hangul_J_Cieuc"; 0xeeb,"Hangul_J_Khieuq"; 0xeec,"Hangul_J_Tieut"; 0xeed,"Hangul_J_Phieuf"; 0xeee,"Hangul_J_Hieuh"; 0xeef,"Hangul_RieulYeorinHieuh"; 0xef0,"Hangul_SunkyeongeumMieum"; 0xef1,"Hangul_SunkyeongeumPieub"; 0xef2,"Hangul_PanSios"; 0xef3,"Hangul_KkogjiDalrinIeung"; 0xef4,"Hangul_SunkyeongeumPhieuf"; 0xef5,"Hangul_YeorinHieuh"; 0xef6,"Hangul_AraeA"; 0xef7,"Hangul_AraeAE"; 0xef8,"Hangul_J_PanSios"; 0xef9,"Hangul_J_KkogjiDalrinIeung"; 0xefa,"Hangul_J_YeorinHieuh"; 0xeff,"Korean_Won"; ] coq-8.4pl2/ide/utils/configwin_ihm.ml0000640000175000001440000012714711744020214016711 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** This module contains the gui functions of Configwin.*) open Configwin_types module O = Config_file class type widget = object method box : GObj.widget method apply : unit -> unit end let file_html_config = Filename.concat Configwin_messages.home ".configwin_html" let debug = false let dbg = if debug then prerr_endline else (fun _ -> ()) (** Return the config group for the html config file, and the option for bindings. *) let html_config_file_and_option () = let ini = new O.group in let bindings = new O.list_cp Configwin_types.htmlbinding_cp_wrapper ~group: ini ["bindings"] ~short_name: "bd" [ { html_key = Configwin_types.string_to_key "A-b" ; html_begin = ""; html_end = "" ; } ; { html_key = Configwin_types.string_to_key "A-i" ; html_begin = ""; html_end = "" ; } ] "" in ini#read file_html_config ; (ini, bindings) (** This variable contains the last directory where the user selected a file.*) let last_dir = ref "";; (** This function allows the user to select a file and returns the selected file name. An optional function allows to change the behaviour of the ok button. A VOIR : mutli-selection ? *) let select_files ?dir ?(fok : (string -> unit) option) the_title = let files = ref ([] : string list) in let fs = GWindow.file_selection ~modal:true ~title: the_title () in (* we set the previous directory, if no directory is given *) ( match dir with None -> if !last_dir <> "" then let _ = fs#set_filename !last_dir in () else () | Some dir -> let _ = fs#set_filename !last_dir in () ); let _ = fs # connect#destroy ~callback: GMain.Main.quit in let _ = fs # ok_button # connect#clicked ~callback: (match fok with None -> (fun () -> files := [fs#filename] ; fs#destroy ()) | Some f -> (fun () -> f fs#filename) ) in let _ = fs # cancel_button # connect#clicked ~callback:fs#destroy in fs # show (); GMain.Main.main (); match !files with | [] -> [] | [""] -> [] | l -> (* we keep the directory in last_dir *) last_dir := Filename.dirname (List.hd l); l ;; (** Make the user select a date. *) let select_date title (day,mon,year) = let v_opt = ref None in let window = GWindow.dialog ~modal:true ~title () in let hbox = GPack.hbox ~border_width:10 ~packing:window#vbox#add () in let cal = GMisc.calendar ~packing: (hbox#pack ~expand: true) () in cal#select_month ~month: mon ~year: year ; cal#select_day day; let bbox = window#action_area in let bok = GButton.button ~label: Configwin_messages.mOk ~packing:(bbox#pack ~expand:true ~padding:4) () in let bcancel = GButton.button ~label: Configwin_messages.mCancel ~packing:(bbox#pack ~expand:true ~padding:4) () in ignore (bok#connect#clicked ~callback: (fun () -> v_opt := Some (cal#date); window#destroy ())); ignore(bcancel#connect#clicked ~callback: window#destroy); bok#grab_default (); ignore(window#connect#destroy ~callback: GMain.Main.quit); window#set_position `CENTER; window#show (); GMain.Main.main (); !v_opt (** This class builds a frame with a clist and two buttons : one to add items and one to remove the selected items. The class takes in parameter a function used to add items and a string list ref which is used to store the content of the clist. At last, a title for the frame is also in parameter, so that each instance of the class creates a frame. *) class ['a] list_selection_box (listref : 'a list ref) titles_opt help_opt f_edit_opt f_strings f_color (eq : 'a -> 'a -> bool) add_function title editable (tt:GData.tooltips) = let _ = dbg "list_selection_box" in let wev = GBin.event_box () in let wf = GBin.frame ~label: title ~packing: wev#add () in let hbox = GPack.hbox ~packing: wf#add () in (* the scroll window and the clist *) let wscroll = GBin.scrolled_window ~vpolicy: `AUTOMATIC ~hpolicy: `AUTOMATIC ~packing: (hbox#pack ~expand: true) () in let wlist = match titles_opt with None -> GList.clist ~selection_mode: `MULTIPLE ~titles_show: false ~packing: wscroll#add () | Some l -> GList.clist ~selection_mode: `MULTIPLE ~titles: l ~titles_show: true ~packing: wscroll#add () in let _ = match help_opt with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wev#coerce in (* the vbox for the buttons *) let vbox_buttons = GPack.vbox () in let _ = if editable then let _ = hbox#pack ~expand: false vbox_buttons#coerce in () else () in let _ = dbg "list_selection_box: wb_add" in let wb_add = GButton.button ~label: Configwin_messages.mAdd ~packing: (vbox_buttons#pack ~expand:false ~padding:2) () in let wb_edit = GButton.button ~label: Configwin_messages.mEdit () in let _ = match f_edit_opt with None -> () | Some _ -> vbox_buttons#pack ~expand:false ~padding:2 wb_edit#coerce in let wb_up = GButton.button ~label: Configwin_messages.mUp ~packing: (vbox_buttons#pack ~expand:false ~padding:2) () in let wb_remove = GButton.button ~label: Configwin_messages.mRemove ~packing: (vbox_buttons#pack ~expand:false ~padding:2) () in let _ = dbg "list_selection_box: object(self)" in object (self) (** the list of selected rows *) val mutable list_select = [] (** This method returns the frame created. *) method box = wev method update l = (* set the new list in the provided listref *) listref := l; (* insert the elements in the clist *) wlist#freeze (); wlist#clear (); List.iter (fun ele -> ignore (wlist#append (f_strings ele)); match f_color ele with None -> () | Some c -> try wlist#set_row ~foreground: (`NAME c) (wlist#rows - 1) with _ -> () ) !listref; (match titles_opt with None -> wlist#columns_autosize () | Some _ -> GToolbox.autosize_clist wlist); wlist#thaw (); (* the list of selectd elements is now empty *) list_select <- [] (** Move up the selected rows. *) method up_selected = let rec iter n selrows l = match selrows with [] -> (l, []) | m :: qrows -> match l with [] -> ([],[]) | [_] -> (l,[]) | e1 :: e2 :: q when m = n + 1 -> let newl, newrows = iter (n+1) qrows (e1 :: q) in (e2 :: newl, n :: newrows) | e1 :: q -> let newl, newrows = iter (n+1) selrows q in (e1 :: newl, newrows) in let sorted_select = List.sort compare list_select in let new_list, new_rows = iter 0 sorted_select !listref in self#update new_list; List.iter (fun n -> wlist#select n 0) new_rows (** Make the user edit the first selected row. *) method edit_selected f_edit = let sorted_select = List.sort compare list_select in match sorted_select with [] -> () | n :: _ -> try let ele = List.nth !listref n in let ele2 = f_edit ele in let rec iter m = function [] -> [] | e :: q -> if n = m then ele2 :: q else e :: (iter (m+1) q) in self#update (iter 0 !listref); wlist#select n 0 with Not_found -> () initializer (** create the functions called when the buttons are clicked *) let f_add () = (* get the files to add with the function provided *) let l = add_function () in (* remove from the list the ones which are already in the listref, using the eq predicate *) let l2 = List.fold_left (fun acc -> fun ele -> if List.exists (eq ele) acc then acc else acc @ [ele]) !listref l in self#update l2 in let f_remove () = (* remove the selected items from the listref and the clist *) let rec iter n = function [] -> [] | h :: q -> if List.mem n list_select then iter (n+1) q else h :: (iter (n+1) q) in let new_list = iter 0 !listref in self#update new_list in let _ = dbg "list_selection_box: connecting wb_add" in (* connect the functions to the buttons *) ignore (wb_add#connect#clicked ~callback:f_add); let _ = dbg "list_selection_box: connecting wb_remove" in ignore (wb_remove#connect#clicked ~callback:f_remove); let _ = dbg "list_selection_box: connecting wb_up" in ignore (wb_up#connect#clicked ~callback:(fun () -> self#up_selected)); ( match f_edit_opt with None -> () | Some f -> let _ = dbg "list_selection_box: connecting wb_edit" in ignore (wb_edit#connect#clicked ~callback:(fun () -> self#edit_selected f)) ); (* connect the selection and deselection of items in the clist *) let f_select ~row ~column ~event = try list_select <- row :: list_select with Failure _ -> () in let f_unselect ~row ~column ~event = try let new_list_select = List.filter (fun n -> n <> row) list_select in list_select <- new_list_select with Failure _ -> () in (* connect the select and deselect events *) let _ = dbg "list_selection_box: connecting select_row" in ignore(wlist#connect#select_row ~callback:f_select); let _ = dbg "list_selection_box: connecting unselect_row" in ignore(wlist#connect#unselect_row ~callback:f_unselect); (* initialize the clist with the listref *) self#update !listref end;; (** This class is used to build a box for a string parameter.*) class string_param_box param (tt:GData.tooltips) = let _ = dbg "string_param_box" in let hbox = GPack.hbox () in let wev = GBin.event_box ~packing: (hbox#pack ~expand: false ~padding: 2) () in let _wl = GMisc.label ~text: param.string_label ~packing: wev#add () in let we = GEdit.entry ~editable: param.string_editable ~packing: (hbox#pack ~expand: param.string_expand ~padding: 2) () in let _ = match param.string_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wev#coerce in let _ = we#set_text (param.string_to_string param.string_value) in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = param.string_of_string we#text in if new_value <> param.string_value then let _ = param.string_f_apply new_value in param.string_value <- new_value else () end ;; (** This class is used to build a box for a combo parameter.*) class combo_param_box param (tt:GData.tooltips) = let _ = dbg "combo_param_box" in let hbox = GPack.hbox () in let wev = GBin.event_box ~packing: (hbox#pack ~expand: false ~padding: 2) () in let _wl = GMisc.label ~text: param.combo_label ~packing: wev#add () in let _ = match param.combo_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wev#coerce in let get_value = if not param.combo_new_allowed then let wc = GEdit.combo_box_text ~strings: param.combo_choices ?active:(let rec aux i = function |[] -> None |h::_ when h = param.combo_value -> Some i |_::t -> aux (succ i) t in aux 0 param.combo_choices) ~packing: (hbox#pack ~expand: param.combo_expand ~padding: 2) () in fun () -> match GEdit.text_combo_get_active wc with |None -> "" |Some s -> s else let (wc,_) = GEdit.combo_box_entry_text ~strings: param.combo_choices ~packing: (hbox#pack ~expand: param.combo_expand ~padding: 2) () in let _ = wc#entry#set_editable param.combo_editable in let _ = wc#entry#set_text param.combo_value in fun () -> wc#entry#text in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = get_value () in if new_value <> param.combo_value then let _ = param.combo_f_apply new_value in param.combo_value <- new_value else () end ;; (** Class used to pack a custom box. *) class custom_param_box param (tt:GData.tooltips) = let _ = dbg "custom_param_box" in let top = match param.custom_framed with None -> param.custom_box#coerce | Some l -> let wf = GBin.frame ~label: l () in wf#add param.custom_box#coerce; wf#coerce in object (self) method box = top method apply = param.custom_f_apply () end (** This class is used to build a box for a color parameter.*) class color_param_box param (tt:GData.tooltips) = let _ = dbg "color_param_box" in let v = ref param.color_value in let hbox = GPack.hbox () in let wb = GButton.button ~label: param.color_label ~packing: (hbox#pack ~expand: false ~padding: 2) () in let w_test = GMisc.arrow ~kind: `RIGHT ~shadow: `OUT ~width: 20 ~height: 20 ~packing: (hbox#pack ~expand: false ~padding: 2 ) () in let we = GEdit.entry ~editable: param.color_editable ~packing: (hbox#pack ~expand: param.color_expand ~padding: 2) () in let _ = match param.color_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wb#coerce in let set_color s = let style = w_test#misc#style#copy in ( try style#set_fg [ (`NORMAL, `NAME s) ; ] with _ -> () ); w_test#misc#set_style style; in let _ = set_color !v in let _ = we#set_text !v in let f_sel () = let dialog = GWindow.color_selection_dialog ~title: param.color_label ~modal: true ~show: true () in let wb_ok = dialog#ok_button in let wb_cancel = dialog#cancel_button in let _ = dialog#connect#destroy ~callback:GMain.Main.quit in let _ = wb_ok#connect#clicked ~callback:(fun () -> (* let color = dialog#colorsel#color in let r = (Gdk.Color.red color) in let g = (Gdk.Color.green color)in let b = (Gdk.Color.blue color) in let s = Printf.sprintf "#%4X%4X%4X" r g b in let _ = for i = 1 to (String.length s) - 1 do if s.[i] = ' ' then s.[i] <- '0' done in we#set_text s ; *) dialog#destroy () ) in let _ = wb_cancel#connect#clicked ~callback:dialog#destroy in GMain.Main.main () in let _ = if param.color_editable then ignore (wb#connect#clicked ~callback:f_sel) in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = we#text in if new_value <> param.color_value then let _ = param.color_f_apply new_value in param.color_value <- new_value else () initializer ignore (we#connect#changed ~callback:(fun () -> set_color we#text)); end ;; (** This class is used to build a box for a font parameter.*) class font_param_box param (tt:GData.tooltips) = let _ = dbg "font_param_box" in let v = ref param.font_value in let hbox = GPack.hbox () in let wb = GButton.button ~label: param.font_label ~packing: (hbox#pack ~expand: false ~padding: 2) () in let we = GEdit.entry ~editable: false ~packing: (hbox#pack ~expand: param.font_expand ~padding: 2) () in let _ = match param.font_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wb#coerce in let set_entry_font font_opt = match font_opt with None -> () | Some s -> let style = we#misc#style#copy in ( try let font = Gdk.Font.load_fontset s in style#set_font font with _ -> () ); we#misc#set_style style in let _ = set_entry_font (Some !v) in let _ = we#set_text !v in let f_sel () = let dialog = GWindow.font_selection_dialog ~title: param.font_label ~modal: true ~show: true () in dialog#selection#set_font_name !v; let wb_ok = dialog#ok_button in let wb_cancel = dialog#cancel_button in let _ = dialog#connect#destroy ~callback:GMain.Main.quit in let _ = wb_ok#connect#clicked ~callback:(fun () -> let font = dialog#selection#font_name in we#set_text font ; set_entry_font (Some font); dialog#destroy () ) in let _ = wb_cancel#connect#clicked ~callback:dialog#destroy in GMain.Main.main () in let _ = if param.font_editable then ignore (wb#connect#clicked ~callback:f_sel) in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = we#text in if new_value <> param.font_value then let _ = param.font_f_apply new_value in param.font_value <- new_value else () end ;; (** This class is used to build a box for a text parameter.*) class text_param_box param (tt:GData.tooltips) = let _ = dbg "text_param_box" in let wf = GBin.frame ~label: param.string_label ~height: 100 () in let wev = GBin.event_box ~packing: wf#add () in let wscroll = GBin.scrolled_window ~vpolicy: `AUTOMATIC ~hpolicy: `AUTOMATIC ~packing: wev#add () in let wview = GText.view ~editable: param.string_editable ~packing: wscroll#add () in let _ = match param.string_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wev#coerce in let _ = dbg "text_param_box: buffer creation" in let buffer = GText.buffer () in let _ = wview#set_buffer buffer in let _ = buffer#insert (param.string_to_string param.string_value) in let _ = dbg "text_param_box: object(self)" in object (self) val wview = wview (** This method returns the main box ready to be packed. *) method box = wf#coerce (** This method applies the new value of the parameter. *) method apply = let v = param.string_of_string (buffer#get_text ()) in if v <> param.string_value then ( dbg "apply new value !"; let _ = param.string_f_apply v in param.string_value <- v ) else () end ;; (** This class is used to build a box a html parameter. *) class html_param_box param (tt:GData.tooltips) = let _ = dbg "html_param_box" in object (self) inherit text_param_box param tt method private exec html_start html_end () = let (i1,i2) = wview#buffer#selection_bounds in let s = i1#get_text ~stop: i2 in match s with "" -> wview#buffer#insert (html_start^html_end) | _ -> ignore (wview#buffer#insert ~iter: i2 html_end); ignore (wview#buffer#insert ~iter: i1 html_start); wview#buffer#place_cursor ~where: i2 initializer dbg "html_param_box:initializer"; let (_,html_bindings) = html_config_file_and_option () in dbg "html_param_box:connecting key press events"; let add_shortcut hb = let (mods, k) = hb.html_key in Okey.add wview ~mods k (self#exec hb.html_begin hb.html_end) in List.iter add_shortcut html_bindings#get; dbg "html_param_box:end" end (** This class is used to build a box for a boolean parameter.*) class bool_param_box param (tt:GData.tooltips) = let _ = dbg "bool_param_box" in let wchk = GButton.check_button ~label: param.bool_label () in let _ = match param.bool_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wchk#coerce in let _ = wchk#set_active param.bool_value in let _ = wchk#misc#set_sensitive param.bool_editable in object (self) (** This method returns the check button ready to be packed. *) method box = wchk#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = wchk#active in if new_value <> param.bool_value then let _ = param.bool_f_apply new_value in param.bool_value <- new_value else () end ;; (** This class is used to build a box for a file name parameter.*) class filename_param_box param (tt:GData.tooltips) = let _ = dbg "filename_param_box" in let hbox = GPack.hbox () in let wb = GButton.button ~label: param.string_label ~packing: (hbox#pack ~expand: false ~padding: 2) () in let we = GEdit.entry ~editable: param.string_editable ~packing: (hbox#pack ~expand: param.string_expand ~padding: 2) () in let _ = match param.string_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wb#coerce in let _ = we#set_text (param.string_to_string param.string_value) in let f_click () = match select_files param.string_label with [] -> () | f :: _ -> we#set_text f in let _ = if param.string_editable then let _ = wb#connect#clicked ~callback:f_click in () else () in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = param.string_of_string we#text in if new_value <> param.string_value then let _ = param.string_f_apply new_value in param.string_value <- new_value else () end ;; (** This class is used to build a box for a hot key parameter.*) class hotkey_param_box param (tt:GData.tooltips) = let _ = dbg "hotkey_param_box" in let hbox = GPack.hbox () in let wev = GBin.event_box ~packing: (hbox#pack ~expand: false ~padding: 2) () in let _wl = GMisc.label ~text: param.hk_label ~packing: wev#add () in let we = GEdit.entry ~editable: false ~packing: (hbox#pack ~expand: param.hk_expand ~padding: 2) () in let value = ref param.hk_value in let _ = match param.hk_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wev#coerce in let _ = we#set_text (Configwin_types.key_to_string param.hk_value) in let mods_we_dont_care = [`MOD2 ; `MOD3 ; `MOD4 ; `MOD5 ; `LOCK] in let capture ev = let key = GdkEvent.Key.keyval ev in let modifiers = GdkEvent.Key.state ev in let mods = List.filter (fun m -> not (List.mem m mods_we_dont_care)) modifiers in value := (mods, key); we#set_text (Glib.Convert.locale_to_utf8 (Configwin_types.key_to_string !value)); false in let _ = if param.hk_editable then ignore (we#event#connect#key_press ~callback:capture) else () in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = !value in if new_value <> param.hk_value then let _ = param.hk_f_apply new_value in param.hk_value <- new_value else () end ;; class modifiers_param_box param = let hbox = GPack.hbox () in let wev = GBin.event_box ~packing: (hbox#pack ~expand:true ~fill:true ~padding: 2) () in let _wl = GMisc.label ~text: param.md_label ~packing: wev#add () in let value = ref param.md_value in let _ = List.map (fun modifier -> let but = GButton.toggle_button ~label:(Configwin_types.modifiers_to_string [modifier]) ~active:(List.mem modifier param.md_value) ~packing:(hbox#pack ~expand:false) () in ignore (but#connect#toggled ~callback:(fun _ -> if but#active then value := modifier::!value else value := List.filter ((<>) modifier) !value))) param.md_allow in let _ = match param.md_help with None -> () | Some help -> let tooltips = GData.tooltips () in ignore (hbox#connect#destroy ~callback: tooltips#destroy); tooltips#set_tip wev#coerce ~text: help ~privat: help in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = let new_value = !value in if new_value <> param.md_value then let _ = param.md_f_apply new_value in param.md_value <- new_value else () end ;; (** This class is used to build a box for a date parameter.*) class date_param_box param (tt:GData.tooltips) = let _ = dbg "date_param_box" in let v = ref param.date_value in let hbox = GPack.hbox () in let wb = GButton.button ~label: param.date_label ~packing: (hbox#pack ~expand: false ~padding: 2) () in let we = GEdit.entry ~editable: false ~packing: (hbox#pack ~expand: param.date_expand ~padding: 2) () in let _ = match param.date_help with None -> () | Some help -> tt#set_tip ~text: help ~privat: help wb#coerce in let _ = we#set_text (param.date_f_string param.date_value) in let f_click () = match select_date param.date_label !v with None -> () | Some (y,m,d) -> v := (d,m,y) ; we#set_text (param.date_f_string (d,m,y)) in let _ = if param.date_editable then let _ = wb#connect#clicked ~callback:f_click in () else () in object (self) (** This method returns the main box ready to be packed. *) method box = hbox#coerce (** This method applies the new value of the parameter. *) method apply = if !v <> param.date_value then let _ = param.date_f_apply !v in param.date_value <- !v else () end ;; (** This class is used to build a box for a parameter whose values are a list.*) class ['a] list_param_box (param : 'a list_param) (tt:GData.tooltips) = let _ = dbg "list_param_box" in let listref = ref param.list_value in let frame_selection = new list_selection_box listref param.list_titles param.list_help param.list_f_edit param.list_strings param.list_color param.list_eq param.list_f_add param.list_label param.list_editable tt in object (self) (** This method returns the main box ready to be packed. *) method box = frame_selection#box#coerce (** This method applies the new value of the parameter. *) method apply = param.list_f_apply !listref ; param.list_value <- !listref end ;; (** This class creates a configuration box from a configuration structure *) class configuration_box (tt : GData.tooltips) conf_struct = let main_box = GPack.hbox () in let columns = new GTree.column_list in let icon_col = columns#add GtkStock.conv in let label_col = columns#add Gobject.Data.string in let box_col = columns#add Gobject.Data.caml in let () = columns#lock () in let pane = GPack.paned `HORIZONTAL ~packing:main_box#add () in (* Tree view part *) let scroll = GBin.scrolled_window ~hpolicy:`NEVER ~packing:pane#pack1 () in let tree = GTree.tree_store columns in let view = GTree.view ~model:tree ~headers_visible:false ~packing:scroll#add_with_viewport () in let selection = view#selection in let _ = selection#set_mode `SINGLE in let menu_box = GPack.vbox ~packing:pane#pack2 () in let renderer = (GTree.cell_renderer_pixbuf [], ["stock-id", icon_col]) in let col = GTree.view_column ~renderer () in let _ = view#append_column col in let renderer = (GTree.cell_renderer_text [], ["text", label_col]) in let col = GTree.view_column ~renderer () in let _ = view#append_column col in let make_param (main_box : #GPack.box) = function | String_param p -> let box = new string_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Combo_param p -> let box = new combo_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Text_param p -> let box = new text_param_box p tt in let _ = main_box#pack ~expand: p.string_expand ~padding: 2 box#box in box | Bool_param p -> let box = new bool_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Filename_param p -> let box = new filename_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | List_param f -> let box = f tt in let _ = main_box#pack ~expand: true ~padding: 2 box#box in box | Custom_param p -> let box = new custom_param_box p tt in let _ = main_box#pack ~expand: p.custom_expand ~padding: 2 box#box in box | Color_param p -> let box = new color_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Font_param p -> let box = new font_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Date_param p -> let box = new date_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Hotkey_param p -> let box = new hotkey_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Modifiers_param p -> let box = new modifiers_param_box p in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Html_param p -> let box = new html_param_box p tt in let _ = main_box#pack ~expand: p.string_expand ~padding: 2 box#box in box in let set_icon iter = function | None -> () | Some icon -> tree#set iter icon_col icon in (* Populate the tree *) let rec make_tree iter conf_struct = (* box is not shown at first *) let box = GPack.vbox ~packing:(menu_box#pack ~expand:true) ~show:false () in let new_iter = match iter with | None -> tree#append () | Some parent -> tree#append ~parent () in match conf_struct with | Section (label, icon, param_list) -> let params = List.map (make_param box) param_list in let widget = object method box = box#coerce method apply () = List.iter (fun param -> param#apply) params end in let () = tree#set new_iter label_col label in let () = set_icon new_iter icon in let () = tree#set new_iter box_col widget in () | Section_list (label, icon, struct_list) -> let widget = object (* Section_list does not contain any effect widget, so we do not have to apply anything. *) method apply () = () method box = box#coerce end in let () = tree#set new_iter label_col label in let () = set_icon new_iter icon in let () = tree#set new_iter box_col widget in List.iter (make_tree (Some new_iter)) struct_list in let () = List.iter (make_tree None) conf_struct in (* Dealing with signals *) let current_prop : widget option ref = ref None in let select_iter iter = let () = match !current_prop with | None -> () | Some box -> box#box#misc#hide () in let box = tree#get ~row:iter ~column:box_col in let () = box#box#misc#show () in current_prop := Some box in let when_selected () = let rows = selection#get_selected_rows in match rows with | [] -> () | row :: _ -> let iter = tree#get_iter row in select_iter iter in (* Focus on a box when selected *) let _ = selection#connect#changed ~callback:when_selected in let _ = match tree#get_iter_first with | None -> () | Some iter -> select_iter iter in object method box = main_box method apply = let foreach _ iter = let widget = tree#get ~row:iter ~column:box_col in widget#apply(); false in tree#foreach foreach end (** Create a vbox with the list of given configuration structure list, and the given list of buttons (defined by their label and callback). Before calling the callback of a button, the [apply] function of each parameter is called. *) let tabbed_box conf_struct_list buttons tooltips = let param_box = new configuration_box tooltips conf_struct_list in let f_apply () = param_box#apply in let hbox_buttons = GPack.hbox ~packing: (param_box#box#pack ~expand: false ~padding: 4) () in let rec iter_buttons ?(grab=false) = function [] -> () | (label, callb) :: q -> let b = GButton.button ~label: label ~packing:(hbox_buttons#pack ~expand:true ~fill: true ~padding:4) () in ignore (b#connect#clicked ~callback: (fun () -> f_apply (); callb ())); (* If it's the first button then give it the focus *) if grab then b#grab_default (); iter_buttons q in iter_buttons ~grab: true buttons; param_box#box (** This function takes a configuration structure list and creates a window to configure the various parameters. *) let edit ?(with_apply=true) ?(apply=(fun () -> ())) title ?width ?height conf_struct = let dialog = GWindow.dialog ~position:`CENTER ~modal: true ~title: title ?height ?width () in let tooltips = GData.tooltips () in let config_box = new configuration_box tooltips conf_struct in let _ = dialog#vbox#add config_box#box#coerce in if with_apply then dialog#add_button Configwin_messages.mApply `APPLY; dialog#add_button Configwin_messages.mOk `OK; dialog#add_button Configwin_messages.mCancel `CANCEL; let destroy () = tooltips#destroy () ; dialog#destroy (); in let rec iter rep = try match dialog#run () with | `APPLY -> config_box#apply; iter Return_apply | `OK -> config_box#apply; destroy (); Return_ok | _ -> destroy (); rep with Failure s -> GToolbox.message_box ~title:"Error" s; iter rep | e -> GToolbox.message_box ~title:"Error" (Printexc.to_string e); iter rep in iter Return_cancel (** Create a vbox with the list of given parameters. *) let box param_list tt = let main_box = GPack.vbox () in let f parameter = match parameter with String_param p -> let box = new string_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Combo_param p -> let box = new combo_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Text_param p -> let box = new text_param_box p tt in let _ = main_box#pack ~expand: p.string_expand ~padding: 2 box#box in box | Bool_param p -> let box = new bool_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Filename_param p -> let box = new filename_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | List_param f -> let box = f tt in let _ = main_box#pack ~expand: true ~padding: 2 box#box in box | Custom_param p -> let box = new custom_param_box p tt in let _ = main_box#pack ~expand: p.custom_expand ~padding: 2 box#box in box | Color_param p -> let box = new color_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Font_param p -> let box = new font_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Date_param p -> let box = new date_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Hotkey_param p -> let box = new hotkey_param_box p tt in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Modifiers_param p -> let box = new modifiers_param_box p in let _ = main_box#pack ~expand: false ~padding: 2 box#box in box | Html_param p -> let box = new html_param_box p tt in let _ = main_box#pack ~expand: p.string_expand ~padding: 2 box#box in box in let list_param_box = List.map f param_list in let f_apply () = List.iter (fun param_box -> param_box#apply) list_param_box in (main_box, f_apply) (** This function takes a list of parameter specifications and creates a window to configure the various parameters.*) let simple_edit ?(with_apply=true) ?(apply=(fun () -> ())) title ?width ?height param_list = let dialog = GWindow.dialog ~modal: true ~title: title ?height ?width () in let tooltips = GData.tooltips () in if with_apply then dialog#add_button Configwin_messages.mApply `APPLY; dialog#add_button Configwin_messages.mOk `OK; dialog#add_button Configwin_messages.mCancel `CANCEL; let (box, f_apply) = box param_list tooltips in dialog#vbox#pack ~expand: true ~fill: true box#coerce; let destroy () = tooltips#destroy () ; dialog#destroy (); in let rec iter rep = try match dialog#run () with | `APPLY -> f_apply (); apply (); iter Return_apply | `OK -> f_apply () ; destroy () ; Return_ok | _ -> destroy (); rep with Failure s -> GToolbox.message_box ~title:"Error" s; iter rep | e -> GToolbox.message_box ~title:"Error" (Printexc.to_string e); iter rep in iter Return_cancel let edit_string l s = match GToolbox.input_string ~title: l ~text: s Configwin_messages.mValue with None -> s | Some s2 -> s2 (** Create a string param. *) let string ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = String_param { string_label = label ; string_help = help ; string_value = v ; string_editable = editable ; string_f_apply = f ; string_expand = expand ; string_to_string = (fun x -> x) ; string_of_string = (fun x -> x) ; } (** Create a bool param. *) let bool ?(editable=true) ?help ?(f=(fun _ -> ())) label v = Bool_param { bool_label = label ; bool_help = help ; bool_value = v ; bool_editable = editable ; bool_f_apply = f ; } (** Create a list param. *) let list ?(editable=true) ?help ?(f=(fun (_:'a list) -> ())) ?(eq=Pervasives.(=)) ?(edit:('a -> 'a) option) ?(add=(fun () -> ([] : 'a list))) ?titles ?(color=(fun (_:'a) -> (None : string option))) label (f_strings : 'a -> string list) v = List_param (fun tt -> new list_param_box { list_label = label ; list_help = help ; list_value = v ; list_editable = editable ; list_titles = titles; list_eq = eq ; list_strings = f_strings ; list_color = color ; list_f_edit = edit ; list_f_add = add ; list_f_apply = f ; } tt ) (** Create a strings param. *) let strings ?(editable=true) ?help ?(f=(fun _ -> ())) ?(eq=Pervasives.(=)) ?(add=(fun () -> [])) label v = list ~editable ?help ~f ~eq ~edit: (edit_string label) ~add label (fun s -> [s]) v (** Create a color param. *) let color ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = Color_param { color_label = label ; color_help = help ; color_value = v ; color_editable = editable ; color_f_apply = f ; color_expand = expand ; } (** Create a font param. *) let font ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = Font_param { font_label = label ; font_help = help ; font_value = v ; font_editable = editable ; font_f_apply = f ; font_expand = expand ; } (** Create a combo param. *) let combo ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) ?(new_allowed=false) ?(blank_allowed=false) label choices v = Combo_param { combo_label = label ; combo_help = help ; combo_value = v ; combo_editable = editable ; combo_choices = choices ; combo_new_allowed = new_allowed ; combo_blank_allowed = blank_allowed ; combo_f_apply = f ; combo_expand = expand ; } (** Create a text param. *) let text ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = Text_param { string_label = label ; string_help = help ; string_value = v ; string_editable = editable ; string_f_apply = f ; string_expand = expand ; string_to_string = (fun x -> x) ; string_of_string = (fun x -> x) ; } (** Create a html param. *) let html ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = Html_param { string_label = label ; string_help = help ; string_value = v ; string_editable = editable ; string_f_apply = f ; string_expand = expand ; string_to_string = (fun x -> x) ; string_of_string = (fun x -> x) ; } (** Create a filename param. *) let filename ?(editable=true) ?(expand=true)?help ?(f=(fun _ -> ())) label v = Filename_param { string_label = label ; string_help = help ; string_value = v ; string_editable = editable ; string_f_apply = f ; string_expand = expand ; string_to_string = (fun x -> x) ; string_of_string = (fun x -> x) ; } (** Create a filenames param.*) let filenames ?(editable=true) ?help ?(f=(fun _ -> ())) ?(eq=Pervasives.(=)) label v = let add () = select_files label in list ~editable ?help ~f ~eq ~add label (fun s -> [Glib.Convert.locale_to_utf8 s]) v (** Create a date param. *) let date ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) ?(f_string=(fun(d,m,y)-> Printf.sprintf "%d/%d/%d" y (m+1) d)) label v = Date_param { date_label = label ; date_help = help ; date_value = v ; date_editable = editable ; date_f_string = f_string ; date_f_apply = f ; date_expand = expand ; } (** Create a hot key param. *) let hotkey ?(editable=true) ?(expand=true) ?help ?(f=(fun _ -> ())) label v = Hotkey_param { hk_label = label ; hk_help = help ; hk_value = v ; hk_editable = editable ; hk_f_apply = f ; hk_expand = expand ; } let modifiers ?(editable=true) ?(expand=true) ?help ?(allow=[`CONTROL;`SHIFT;`LOCK;`MOD1;`MOD2;`MOD3;`MOD4;`MOD5]) ?(f=(fun _ -> ())) label v = Modifiers_param { md_label = label ; md_help = help ; md_value = v ; md_editable = editable ; md_f_apply = f ; md_expand = expand ; md_allow = allow ; } (** Create a custom param.*) let custom ?label box f expand = Custom_param { custom_box = box ; custom_f_apply = f ; custom_expand = expand ; custom_framed = label ; } coq-8.4pl2/ide/utils/okey.mli0000640000175000001440000001202011254456226015204 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** Okey interface. Once the lib is compiled and installed, you can use it by referencing it with the [Okey] module. You must add [okey.cmo] or [okey.cmx] on the commande line when you link. *) type modifier = Gdk.Tags.modifier (** Set the default modifier list. The first default value is [[]].*) val set_default_modifiers : modifier list -> unit (** Set the default modifier mask. The first default value is [[`MOD2 ; `MOD3 ; `MOD4 ; `MOD5 ; `LOCK]]. The mask defines the modifiers not taken into account when looking for the handler of a key press event. *) val set_default_mask : modifier list -> unit (** [add widget key callback] associates the [callback] function to the event "key_press" with the given [key] for the given [widget]. @param remove when true, the previous handlers for the given key and modifier list are not kept. @param cond this function is a guard: the [callback] function is not called if the [cond] function returns [false]. The default [cond] function always returns [true]. @param mods the list of modifiers. If not given, the default modifiers are used. You can set the default modifiers with function {!Okey.set_default_modifiers}. @param mask the list of modifiers which must not be taken into account to trigger the given handler. [mods] and [mask] must not have common modifiers. If not given, the default mask is used. You can set the default modifiers mask with function {!Okey.set_default_mask}. *) val add : < connect : < destroy : callback: (unit -> unit) -> GtkSignal.id; .. >; event : GObj.event_ops; get_oid : int; .. > -> ?cond: (unit -> bool) -> ?mods: modifier list -> ?mask: modifier list -> Gdk.keysym -> (unit -> unit) -> unit (** It calls {!Okey.add} for each given key.*) val add_list : < connect : < destroy : callback: (unit -> unit) -> GtkSignal.id; .. >; event : GObj.event_ops; get_oid : int; .. > -> ?cond: (unit -> bool) -> ?mods: modifier list -> ?mask: modifier list -> Gdk.keysym list -> (unit -> unit) -> unit (** Like {!Okey.add} but the previous handlers for the given modifiers and key are not kept.*) val set : < connect : < destroy : callback: (unit -> unit) -> GtkSignal.id; .. >; event : GObj.event_ops; get_oid : int; .. > -> ?cond: (unit -> bool) -> ?mods: modifier list -> ?mask: modifier list -> Gdk.keysym -> (unit -> unit) -> unit (** It calls {!Okey.set} for each given key.*) val set_list : < connect : < destroy : callback: (unit -> unit) -> GtkSignal.id; .. >; event : GObj.event_ops; get_oid : int; .. > -> ?cond: (unit -> bool) -> ?mods: modifier list -> ?mask: modifier list -> Gdk.keysym list -> (unit -> unit) -> unit (** Remove the handlers associated to the given widget. This is automatically done when a widget is destroyed but you can do it yourself. *) val remove_widget : < connect : < destroy : callback: (unit -> unit) -> GtkSignal.id; .. >; event : GObj.event_ops; get_oid : int; .. > -> unit -> unit coq-8.4pl2/ide/utils/configwin.ml0000640000175000001440000000712211744020214016042 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) type parameter_kind = Configwin_types.parameter_kind type configuration_structure = Configwin_types.configuration_structure = Section of string * GtkStock.id option * parameter_kind list | Section_list of string * GtkStock.id option * configuration_structure list type return_button = Configwin_types.return_button = Return_apply | Return_ok | Return_cancel let string_to_key = Configwin_types.string_to_key let key_to_string = Configwin_types.key_to_string let key_cp_wrapper = Configwin_types.key_cp_wrapper class key_cp = Configwin_types.key_cp let string = Configwin_ihm.string let text = Configwin_ihm.text let strings = Configwin_ihm.strings let list = Configwin_ihm.list let bool = Configwin_ihm.bool let filename = Configwin_ihm.filename let filenames = Configwin_ihm.filenames let color = Configwin_ihm.color let font = Configwin_ihm.font let combo = Configwin_ihm.combo let custom = Configwin_ihm.custom let date = Configwin_ihm.date let hotkey = Configwin_ihm.hotkey let modifiers = Configwin_ihm.modifiers let html = Configwin_ihm.html let edit ?(apply=(fun () -> ())) title ?width ?height conf_struct_list = Configwin_ihm.edit ~with_apply: true ~apply title ?width ?height conf_struct_list let get = Configwin_ihm.edit ~with_apply: false ~apply: (fun () -> ()) let simple_edit ?(apply=(fun () -> ())) title ?width ?height param_list = Configwin_ihm.simple_edit ~with_apply: true ~apply title ?width ?height param_list let simple_get = Configwin_ihm.simple_edit ~with_apply: false ~apply: (fun () -> ()) let box = Configwin_ihm.box let tabbed_box = Configwin_ihm.tabbed_box coq-8.4pl2/ide/utils/config_file.ml0000640000175000001440000007050411366307247016345 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (* TODO *) (* section comments *) (* better obsoletes: no "{}", line cuts *) (* possible improvements: *) (* use lex/yacc instead of genlex to be more robust, efficient, allow arrays and other types, read comments. *) (* description and help, level (beginner/advanced/...) for each cp *) (* find an option from its name and group *) (* class hooks *) (* get the sections of a group / of a file *) (* read file format from inifiles and ConfigParser *) (* Read the mli before reading this file! *) (* ******************************************************************************** *) (* ******************************** misc utilities ******************************** *) (* ******************************************************************************** *) (* This code is intended to be usable without any dependencies. *) (* pipeline style, see for instance Raw.of_channel. *) let (|>) x f = f x (* as List.assoc, but applies f to the element matching [key] and returns the list where this element has been replaced by the result of f. *) let rec list_assoc_remove key f = function | [] -> raise Not_found | (key',value) as elt :: tail -> if key <> key' then elt :: list_assoc_remove key f tail else match f value with | None -> tail | Some a -> (key',a) :: tail (* reminiscent of String.concat. Same as [Queue.iter f1 queue] but calls [f2 ()] between each calls to f1. Does not call f2 before the first call nor after the last call to f2. Could be more efficient with a richer module interface of Queue. *) let queue_iter_between f1 f2 queue = (* let f flag elt = if flag then f2 (); (f1 elt:unit); true in *) let f flag elt = if flag then f2 (); f1 elt; true in ignore (Queue.fold f false queue) let list_iter_between f1 f2 = function [] -> () | a::[] -> f1 a | a::tail -> f1 a; List.iter (fun elt -> (f2 ():unit); f1 elt) tail (* | a::tail -> f1 a; List.iter (fun elt -> f2 (); f1 elt) tail *) (* !! types ??? *) (* to ensure that strings will be parsed correctly by Genlex. It's more comfortable not to have quotes around the string, but sometimes it's necessary. *) exception Unsafe_string let safe_string s = if s = "" then "\"\"" else if ( try match s.[0] with | 'a'..'z' | 'A'..'Z' -> for i = 1 to String.length s - 1 do match s.[i] with 'a'..'z' | 'A'..'Z' | '_' | '0'..'9' -> () | _ -> raise Unsafe_string done; false | _ -> try string_of_int (int_of_string s) <> s || string_of_float (float_of_string s) <> s with Failure "int_of_string" | Failure "float_of_string" -> true with Unsafe_string -> true) then Printf.sprintf "\"%s\"" (String.escaped s) else s (* ******************************************************************************** *) (* ************************************* core ************************************* *) (* ******************************************************************************** *) module Raw = struct type cp = | String of string | Int of int | Float of float | List of cp list | Tuple of cp list | Section of (string * cp) list (* code generated by camlp4 pa_o.cmo pa_op.cmo pr_o.cmo -- -o config_file_parser.ml -impl config_file_parser.ml4 Unreadable on purpose, edit the file config_file_parser.ml4 rather than editing this (huge) lines. Then manually copy-paste here the content of config_file_parser.ml. Could be one day rewritten with ocamllex/yacc to be more robust, efficient, allow arrays, read comments...*) module Parse = struct let lexer = Genlex.make_lexer ["="; "{"; "}"; "["; "]"; ";"; "("; ")"; ","] let rec file l (strm__ : _ Stream.t) = match try Some (ident strm__) with Stream.Failure -> None with Some id -> begin match Stream.peek strm__ with Some (Genlex.Kwd "=") -> Stream.junk strm__; let v = try value strm__ with Stream.Failure -> raise (Stream.Error "") in begin try file ((id, v) :: l) strm__ with Stream.Failure -> raise (Stream.Error "") end | _ -> raise (Stream.Error "") end | _ -> List.rev l and value (strm__ : _ Stream.t) = match Stream.peek strm__ with Some (Genlex.Kwd "{") -> Stream.junk strm__; let v = try file [] strm__ with Stream.Failure -> raise (Stream.Error "") in begin match Stream.peek strm__ with Some (Genlex.Kwd "}") -> Stream.junk strm__; Section v | _ -> raise (Stream.Error "") end | Some (Genlex.Ident s) -> Stream.junk strm__; String s | Some (Genlex.String s) -> Stream.junk strm__; String s | Some (Genlex.Int i) -> Stream.junk strm__; Int i | Some (Genlex.Float f) -> Stream.junk strm__; Float f | Some (Genlex.Char c) -> Stream.junk strm__; String (String.make 1 c) | Some (Genlex.Kwd "[") -> Stream.junk strm__; let v = try list [] strm__ with Stream.Failure -> raise (Stream.Error "") in List v | Some (Genlex.Kwd "(") -> Stream.junk strm__; let v = try list [] strm__ with Stream.Failure -> raise (Stream.Error "") in Tuple v | _ -> raise Stream.Failure and ident (strm__ : _ Stream.t) = match Stream.peek strm__ with Some (Genlex.Ident s) -> Stream.junk strm__; s | Some (Genlex.String s) -> Stream.junk strm__; s | _ -> raise Stream.Failure and list l (strm__ : _ Stream.t) = match Stream.peek strm__ with Some (Genlex.Kwd ";") -> Stream.junk strm__; begin try list l strm__ with Stream.Failure -> raise (Stream.Error "") end | Some (Genlex.Kwd ",") -> Stream.junk strm__; begin try list l strm__ with Stream.Failure -> raise (Stream.Error "") end | _ -> match try Some (value strm__) with Stream.Failure -> None with Some v -> begin try list (v :: l) strm__ with Stream.Failure -> raise (Stream.Error "") end | _ -> match Stream.peek strm__ with Some (Genlex.Kwd "]") -> Stream.junk strm__; List.rev l | Some (Genlex.Kwd ")") -> Stream.junk strm__; List.rev l | _ -> raise Stream.Failure end open Format (* formating convention: the caller has to open the box, close it and flush the output *) (* remarks on Format: set_margin impose un appel set_max_indent sprintf et bprintf sont flushes chaque appel*) (* pretty print a Raw.cp *) let rec save formatter = function | String s -> fprintf formatter "%s" (safe_string s) (* How can I cut lines and *) | Int i -> fprintf formatter "%d" i (* print backslashes just before the \n? *) | Float f -> fprintf formatter "%g" f | List l -> fprintf formatter "[@["; list_iter_between (fun v -> fprintf formatter "@["; save formatter v; fprintf formatter "@]") (fun () -> fprintf formatter ";@ ") l; fprintf formatter "@]]" | Tuple l -> fprintf formatter "(@["; list_iter_between (fun v -> fprintf formatter "@["; save formatter v; fprintf formatter "@]") (fun () -> fprintf formatter ",@ ") l; fprintf formatter "@])" | Section l -> fprintf formatter "{@;<0 2>@["; list_iter_between (fun (name,value) -> fprintf formatter "@[%s =@ @[" name; save formatter value; fprintf formatter "@]@]";) (fun () -> fprintf formatter "@;<2 0>") l; fprintf formatter "@]}" (* let to_string r = save str_formatter r; flush_str_formatter () *) let to_channel out_channel r = let f = formatter_of_out_channel out_channel in fprintf f "@["; save f r; fprintf f "@]@?" let of_string s = s |> Stream.of_string |> Parse.lexer |> Parse.value let of_channel in_channel = let result = in_channel |> Stream.of_channel |> Parse.lexer |> Parse.file [] in close_in in_channel; result end (* print the given string in a way compatible with Format. Truncate the lines when needed, indent the newlines.*) let print_help formatter = String.iter (function | ' ' -> Format.pp_print_space formatter () | '\n' -> Format.pp_force_newline formatter () | c -> Format.pp_print_char formatter c) type 'a wrappers = { to_raw : 'a -> Raw.cp; of_raw : Raw.cp -> 'a} class type ['a] cp = object (* method private to_raw = wrappers.to_raw *) (* method private of_raw = wrappers.of_raw *) (* method private set_string s = s |> Raw.of_string |> self#of_raw |> self#set *) method add_hook : ('a -> 'a -> unit) -> unit method get : 'a method get_default : 'a method set : 'a -> unit method reset : unit method get_formatted : Format.formatter -> unit method get_default_formatted : Format.formatter -> unit method get_help_formatted : Format.formatter -> unit method get_name : string list method get_short_name : string option method set_short_name : string -> unit method get_help : string method get_spec : Arg.spec method set_raw : Raw.cp -> unit end type groupable_cp = < get_name : string list; get_short_name : string option; get_help : string; get_formatted : Format.formatter -> unit; get_default_formatted : Format.formatter -> unit; get_help_formatted : Format.formatter -> unit; get_spec : Arg.spec; reset : unit; set_raw : Raw.cp -> unit; > exception Double_name exception Missing_cp of groupable_cp exception Wrong_type of (out_channel -> unit) (* Two exceptions to stop the iteration on queues. *) exception Found exception Found_cp of groupable_cp (* The data structure to store the cps. It's a tree, each node is a section, and a queue of sons with their name. Each leaf contains a cp. *) type 'a nametree = | Immediate of 'a | Subsection of ((string * 'a nametree) Queue.t) (* this Queue must be nonempty for group.read.choose *) class group = object (self) val mutable cps = Queue.create () (* hold all the added cps, in a nametree. *) method add : 'a. 'a cp -> unit = fun original_cp -> let cp = (original_cp :> groupable_cp) in (* function called when we reach the end of the list cp#get_name. *) let add_immediate name cp queue = Queue.iter (fun (name',_) -> if name = name' then raise Double_name) queue; Queue.push (name, Immediate cp) queue in (* adds the cp with name [first_name::last_name] in section [section]. *) let rec add_in_section section first_name last_name cp queue = let sub_add = match last_name with (* what to do once we have find the correct section *) | [] -> add_immediate first_name | middle_name :: last_name -> add_in_section first_name middle_name last_name in try Queue.iter (function | name, Subsection subsection when name = section -> sub_add cp subsection; raise Found | _ -> ()) queue; let sub_queue = Queue.create () in sub_add cp sub_queue; Queue.push (section, Subsection sub_queue) queue with Found -> () in (match cp#get_name with | [] -> failwith "empty name" | first_name :: [] -> add_immediate first_name cp cps | first_name :: middle_name :: last_name -> add_in_section first_name middle_name last_name cp cps) method write ?(with_help=true) filename = let out_channel = open_out filename in let formatter = Format.formatter_of_out_channel out_channel in let print = Format.fprintf formatter in print "@["; let rec save_queue formatter = queue_iter_between (fun (name,nametree) -> save_nametree name nametree) (Format.pp_print_cut formatter) and save_nametree name = function | Immediate cp -> if with_help && cp#get_help <> "" then (print "@[(* "; cp#get_help_formatted formatter; print "@ *)@]@,"); Format.fprintf formatter "@[%s =@ @[" (safe_string name); cp#get_formatted formatter; print "@]@]" | Subsection queue -> Format.fprintf formatter "%s = {@;<0 2>@[" (safe_string name); save_queue formatter queue; print "@]@,}" in save_queue formatter cps; print "@]@."; close_out out_channel method read ?obsoletes ?(no_default=false) ?(on_type_error = fun groupable_cp raw_cp output filename in_channel -> close_in in_channel; Printf.eprintf "Type error while loading configuration parameter %s from file %s.\n%!" (String.concat "." groupable_cp#get_name) filename; output stderr; exit 1) filename = (* [filename] is created if it doesn't exist. In this case there is no need to read it. *) match Sys.file_exists filename with false -> self#write filename | true -> let in_channel = open_in filename in (* what to do when a cp is missing: *) let missing cp default = if no_default then raise (Missing_cp cp) else default in (* returns a cp contained in the nametree queue, which must be nonempty *) let choose queue = let rec iter q = Queue.iter (function | _, Immediate cp -> raise (Found_cp cp) | _, Subsection q -> iter q) q in try iter queue; failwith "choose" with Found_cp cp -> cp in (* [set_and_remove raw_cps nametree] sets the cp of [nametree] to their value defined in [raw_cps] and returns the remaining raw_cps. *) let set_cp cp value = try cp#set_raw value with Wrong_type output -> on_type_error cp value output filename in_channel in let rec set_and_remove raw_cps = function | name, Immediate cp -> (try list_assoc_remove name (fun value -> set_cp cp value; None) raw_cps with Not_found -> missing cp raw_cps) | name, Subsection queue -> (try list_assoc_remove name (function | Raw.Section l -> (match remainings l queue with | [] -> None | l -> Some (Raw.Section l)) | r -> missing (choose queue) (Some r)) raw_cps with Not_found -> missing (choose queue) raw_cps) and remainings raw_cps queue = Queue.fold set_and_remove raw_cps queue in let remainings = remainings (Raw.of_channel in_channel) cps in (* Handling of cps defined in filename but not belonging to self. *) if remainings <> [] then match obsoletes with | Some filename -> let out_channel = open_out filename in (* open_out_gen [Open_wronly; Open_creat; Open_append; Open_text] 0o666 filename in *) let formatter = Format.formatter_of_out_channel out_channel in Format.fprintf formatter "@["; Raw.save formatter (Raw.Section remainings); Format.fprintf formatter "@]@."; close_out out_channel | None -> () method command_line_args ~section_separator = let print = Format.fprintf Format.str_formatter in (* shortcut *) let result = ref [] in let push x = result := x :: !result in let rec iter = function | _, Immediate cp -> let key = "-" ^ String.concat section_separator cp#get_name in let spec = cp#get_spec in let doc = ( print "@["; Format.pp_print_as Format.str_formatter (String.length key +3) ""; if cp#get_help <> "" then (print "@,@["; cp#get_help_formatted Format.str_formatter; print "@]@ ") else print "@,"; print "@[@[current:@;<1 2>@["; cp#get_formatted Format.str_formatter; print "@]@],@ @[default:@;<1 2>@["; cp#get_default_formatted Format.str_formatter; print "@]@]@]@]"; Format.flush_str_formatter ()) in (match cp#get_short_name with | None -> () | Some short_name -> push ("-" ^ short_name,spec,"")); push (key,spec,doc) | _, Subsection queue -> Queue.iter iter queue in Queue.iter iter cps; List.rev !result end (* Given wrappers for the type 'a, cp_custom_type defines a class 'a cp. *) class ['a] cp_custom_type wrappers ?group:(group:group option) name ?short_name default help = object (self) method private to_raw = wrappers.to_raw method private of_raw = wrappers.of_raw val mutable value = default (* output *) method get = value method get_default = default method get_formatted formatter = self#get |> self#to_raw |> Raw.save formatter method get_default_formatted formatter = self#get_default |> self#to_raw |> Raw.save formatter (* input *) method set v = let v' = value in value <- v; self#exec_hooks v' v method set_raw v = self#of_raw v |> self#set method private set_string s = s |> Raw.of_string |> self#of_raw |> self#set method reset = self#set self#get_default (* name *) val mutable shortname = short_name method get_name = name method get_short_name = shortname method set_short_name s = shortname <- Some s (* help *) method get_help = help method get_help_formatted formatter = print_help formatter self#get_help method get_spec = Arg.String self#set_string (* hooks *) val mutable hooks = [] method add_hook f = hooks <- (f:'a->'a->unit) :: hooks method private exec_hooks v' v = List.iter (fun f -> f v' v) hooks initializer match group with Some g -> g#add (self :> 'a cp) | None -> () end (* ******************************************************************************** *) (* ****************************** predefined classes ****************************** *) (* ******************************************************************************** *) let int_wrappers = { to_raw = (fun v -> Raw.Int v); of_raw = function | Raw.Int v -> v | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Int expected, got %a\n%!" Raw.to_channel r))} class int_cp ?group name ?short_name default help = object (self) inherit [int] cp_custom_type int_wrappers ?group name ?short_name default help method get_spec = Arg.Int self#set end let float_wrappers = { to_raw = (fun v -> Raw.Float v); of_raw = function | Raw.Float v -> v | Raw.Int v -> float v | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Float expected, got %a\n%!" Raw.to_channel r)) } class float_cp ?group name ?short_name default help = object (self) inherit [float] cp_custom_type float_wrappers ?group name ?short_name default help method get_spec = Arg.Float self#set end (* The Pervasives version is too restrictive *) let bool_of_string s = match String.lowercase s with | "false" | "no" | "n" | "0" -> false (* "0" and "1" aren't used. *) | "true" | "yes" | "y" | "1" -> true | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Bool expected, got %s\n%!" r)) let bool_wrappers = { to_raw = (fun v -> Raw.String (string_of_bool v)); of_raw = function | Raw.String v -> bool_of_string v | Raw.Int v -> v <> 0 | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Bool expected, got %a\n%!" Raw.to_channel r)) } class bool_cp ?group name ?short_name default help = object (self) inherit [bool] cp_custom_type bool_wrappers ?group name ?short_name default help method get_spec = Arg.Bool self#set end let string_wrappers = { to_raw = (fun v -> Raw.String v); of_raw = function | Raw.String v -> v | Raw.Int v -> string_of_int v | Raw.Float v -> string_of_float v | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.String expected, got %a\n%!" Raw.to_channel r)) } class string_cp ?group name ?short_name default help = object (self) inherit [string] cp_custom_type string_wrappers ?group name ?short_name default help method private of_string s = s method get_spec = Arg.String self#set end let list_wrappers wrappers = { to_raw = (fun l -> Raw.List (List.map wrappers.to_raw l)); of_raw = function | Raw.List l -> List.map wrappers.of_raw l | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.List expected, got %a\n%!" Raw.to_channel r)) } class ['a] list_cp wrappers = ['a list] cp_custom_type (list_wrappers wrappers) let option_wrappers wrappers = { to_raw = (function | Some v -> wrappers.to_raw v | None -> Raw.String ""); of_raw = function | Raw.String s as v -> ( if s = "" || s = "None" then None else if String.length s >= 5 && String.sub s 0 5 = "Some " then Some (wrappers.of_raw (Raw.String (String.sub s 5 (String.length s -5)))) else Some (wrappers.of_raw v)) | r -> Some (wrappers.of_raw r)} class ['a] option_cp wrappers = ['a option] cp_custom_type (option_wrappers wrappers) let enumeration_wrappers enum = let switched = List.map (fun (string,cons) -> cons,string) enum in {to_raw = (fun v -> Raw.String (List.assq v switched)); of_raw = function | Raw.String s -> (try List.assoc s enum with Not_found -> failwith (Printf.sprintf "%s isn't a known constructor" s)) | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw enumeration expected, got %a\n%!" Raw.to_channel r)) } class ['a] enumeration_cp enum ?group name ?short_name default help = object (self) inherit ['a] cp_custom_type (enumeration_wrappers enum) ?group name ?short_name default help method get_spec = Arg.Symbol (List.map fst enum, (fun s -> self#set (List.assoc s enum))) end let tuple2_wrappers wrapa wrapb = { to_raw = (fun (a,b) -> Raw.Tuple [wrapa.to_raw a; wrapb.to_raw b]); of_raw = function | Raw.Tuple [a;b] -> wrapa.of_raw a, wrapb.of_raw b | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Tuple 2 expected, got %a\n%!" Raw.to_channel r)) } class ['a, 'b] tuple2_cp wrapa wrapb = ['a*'b] cp_custom_type (tuple2_wrappers wrapa wrapb) let tuple3_wrappers wrapa wrapb wrapc = { to_raw = (fun (a,b,c) -> Raw.Tuple[wrapa.to_raw a; wrapb.to_raw b; wrapc.to_raw c]); of_raw = function | Raw.Tuple [a;b;c] -> wrapa.of_raw a, wrapb.of_raw b, wrapc.of_raw c | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Tuple 3 expected, got %a\n%!" Raw.to_channel r)) } class ['a,'b,'c] tuple3_cp wrapa wrapb wrapc = ['a*'b*'c] cp_custom_type (tuple3_wrappers wrapa wrapb wrapc) let tuple4_wrappers wrapa wrapb wrapc wrapd = { to_raw=(fun (a,b,c,d)->Raw.Tuple[wrapa.to_raw a;wrapb.to_raw b;wrapc.to_raw c;wrapd.to_raw d]); of_raw = function | Raw.Tuple [a;b;c;d] -> wrapa.of_raw a, wrapb.of_raw b, wrapc.of_raw c, wrapd.of_raw d | r -> raise (Wrong_type (fun outchan -> Printf.fprintf outchan "Raw.Tuple 4 expected, got %a\n%!" Raw.to_channel r)) } class ['a,'b,'c,'d] tuple4_cp wrapa wrapb wrapc wrapd = ['a*'b*'c*'d] cp_custom_type (tuple4_wrappers wrapa wrapb wrapc wrapd) class string2_cp = [string,string] tuple2_cp string_wrappers string_wrappers (* class color_cp = string_cp *) class font_cp = string_cp class filename_cp = string_cp (* ******************************************************************************** *) (******************** Backward compatibility with module Flags.****************** *) (* ******************************************************************************** *) type 'a option_class = 'a wrappers type 'a option_record = 'a cp type options_file = {mutable filename:string; group:group} let create_options_file filename = {filename = filename; group = new group} let set_options_file options_file filename = options_file.filename <- filename let load {filename=f; group = g} = g#read f let append {group=g} filename = g#read filename let save {filename=f; group = g} = g#write ~with_help:false f let save_with_help {filename=f; group = g} = g#write ~with_help:true f let define_option {group=group} name help option_class default = (new cp_custom_type option_class ~group name default help) let option_hook cp f = cp#add_hook (fun _ _ -> f ()) let string_option = string_wrappers let color_option = string_wrappers let font_option = string_wrappers let int_option = int_wrappers let bool_option = bool_wrappers let float_option = float_wrappers let string2_option = tuple2_wrappers string_wrappers string_wrappers let option_option = option_wrappers let list_option = list_wrappers let sum_option = enumeration_wrappers let tuple2_option (a,b) = tuple2_wrappers a b let tuple3_option (a,b,c) = tuple3_wrappers a b c let tuple4_option (a,b,c,d) = tuple4_wrappers a b c d let ( !! ) cp = cp#get let ( =:= ) cp value = cp#set value let shortname cp = String.concat ":" cp#get_name let get_help cp = cp#get_help type option_value = Module of option_module | StringValue of string | IntValue of int | FloatValue of float | List of option_value list | SmallList of option_value list and option_module = (string * option_value) list let rec value_to_raw = function | Module a -> Raw.Section (List.map (fun (name,value) -> name, value_to_raw value) a) | StringValue a -> Raw.String a | IntValue a -> Raw.Int a | FloatValue a -> Raw.Float a | List a -> Raw.List (List.map value_to_raw a) | SmallList a -> Raw.Tuple (List.map value_to_raw a) let rec raw_to_value = function | Raw.String a -> StringValue a | Raw.Int a -> IntValue a | Raw.Float a -> FloatValue a | Raw.List a -> List (List.map raw_to_value a) | Raw.Tuple a -> SmallList (List.map raw_to_value a) | Raw.Section a -> Module (List.map (fun (name,value) -> name, raw_to_value value) a) let define_option_class _ of_option_value to_option_value = {to_raw = (fun a -> a |> to_option_value |> value_to_raw); of_raw = (fun a -> a |> raw_to_value |> of_option_value)} let to_value {to_raw = to_raw} a = a |> to_raw |> raw_to_value let from_value {of_raw = of_raw} a = a |> value_to_raw |> of_raw let of_value_w wrappers a = a |> value_to_raw |> wrappers.of_raw let to_value_w wrappers a = a |> wrappers.to_raw |> raw_to_value (* fancy indentation when finishing this stub code, not good style :-) *) let value_to_string : option_value -> string = of_value_w string_option let string_to_value = to_value_w string_option let value_to_int = of_value_w int_option let int_to_value = to_value_w int_option let value_to_bool = of_value_w bool_option let bool_to_value = to_value_w bool_option let value_to_float = of_value_w float_option let float_to_value = to_value_w float_option let value_to_string2 = of_value_w string2_option let string2_to_value = to_value_w string2_option let value_to_list of_value = let wrapper = define_option_class "" of_value (fun _ -> failwith "value_to_list") in of_value_w (list_option wrapper) let list_to_value to_value = let wrapper = define_option_class "" (fun _ -> failwith "value_to_list") to_value in to_value_w (list_option wrapper) coq-8.4pl2/ide/utils/configwin_types.ml0000640000175000001440000002716611651507213017306 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) (** This module contains the types used in Configwin. *) open Config_file let name_to_keysym = ("Button1", Configwin_keys.xk_Pointer_Button1) :: ("Button2", Configwin_keys.xk_Pointer_Button2) :: ("Button3", Configwin_keys.xk_Pointer_Button3) :: ("Button4", Configwin_keys.xk_Pointer_Button4) :: ("Button5", Configwin_keys.xk_Pointer_Button5) :: Configwin_keys.name_to_keysym let string_to_key s = let mask = ref [] in let key = try let pos = String.rindex s '-' in for i = 0 to pos - 1 do let m = match s.[i] with 'C' -> `CONTROL | 'S' -> `SHIFT | 'L' -> `LOCK | 'M' -> `MOD1 | 'A' -> `MOD1 | '1' -> `MOD1 | '2' -> `MOD2 | '3' -> `MOD3 | '4' -> `MOD4 | '5' -> `MOD5 | _ -> prerr_endline s; raise Not_found in mask := m :: !mask done; String.sub s (pos+1) (String.length s - pos - 1) with _ -> s in try !mask, List.assoc key name_to_keysym with e -> prerr_endline s; raise e let key_to_string (m, k) = let s = List.assoc k Configwin_keys.keysym_to_name in match m with [] -> s | _ -> let rec iter m s = match m with [] -> s | c :: m -> iter m (( match c with `CONTROL -> "C" | `SHIFT -> "S" | `LOCK -> "L" | `MOD1 -> "A" | `MOD2 -> "2" | `MOD3 -> "3" | `MOD4 -> "4" | `MOD5 -> "5" | _ -> raise Not_found ) ^ s) in iter m ("-" ^ s) let modifiers_to_string m = let rec iter m s = match m with [] -> s | c :: m -> iter m (( match c with `CONTROL -> "" | `SHIFT -> "" | `LOCK -> "" | `MOD1 -> "" | `MOD2 -> "" | `MOD3 -> "" | `MOD4 -> "" | `MOD5 -> "" | _ -> raise Not_found ) ^ s) in iter m "" let value_to_key v = match v with Raw.String s -> string_to_key s | _ -> prerr_endline "value_to_key"; raise Not_found let key_to_value k = Raw.String (key_to_string k) let key_cp_wrapper = { to_raw = key_to_value ; of_raw = value_to_key ; } (** A class to define key options, with the {!Config_file} module. *) class key_cp = [(Gdk.Tags.modifier list * int)] Config_file.cp_custom_type key_cp_wrapper (** This type represents a string or filename parameter, or any other type, depending on the given conversion functions. *) type 'a string_param = { string_label : string; (** the label of the parameter *) mutable string_value : 'a; (** the current value of the parameter *) string_editable : bool ; (** indicates if the value can be changed *) string_f_apply : ('a -> unit) ; (** the function to call to apply the new value of the parameter *) string_help : string option ; (** optional help string *) string_expand : bool ; (** expand or not *) string_to_string : 'a -> string ; string_of_string : string -> 'a ; } ;; (** This type represents a boolean parameter. *) type bool_param = { bool_label : string; (** the label of the parameter *) mutable bool_value : bool; (** the current value of the parameter *) bool_editable : bool ; (** indicates if the value can be changed *) bool_f_apply : (bool -> unit) ; (** the function to call to apply the new value of the parameter *) bool_help : string option ; (** optional help string *) } ;; (** This type represents a parameter whose value is a list of ['a]. *) type 'a list_param = { list_label : string; (** the label of the parameter *) mutable list_value : 'a list; (** the current value of the parameter *) list_titles : string list option; (** the titles of columns, if they must be displayed *) list_f_edit : ('a -> 'a) option; (** optional edition function *) list_eq : ('a -> 'a -> bool) ; (** the comparison function used to get list without doubles *) list_strings : ('a -> string list); (** the function to get a string list from a ['a]. *) list_color : ('a -> string option) ; (** a function to get the optional color of an element *) list_editable : bool ; (** indicates if the value can be changed *) list_f_add : unit -> 'a list ; (** the function to call to add list *) list_f_apply : ('a list -> unit) ; (** the function to call to apply the new value of the parameter *) list_help : string option ; (** optional help string *) } ;; type combo_param = { combo_label : string ; mutable combo_value : string ; combo_choices : string list ; combo_editable : bool ; combo_blank_allowed : bool ; combo_new_allowed : bool ; combo_f_apply : (string -> unit); combo_help : string option ; (** optional help string *) combo_expand : bool ; (** expand the entry widget or not *) } ;; type custom_param = { custom_box : GPack.box ; custom_f_apply : (unit -> unit) ; custom_expand : bool ; custom_framed : string option ; (** optional label for an optional frame *) } ;; type color_param = { color_label : string; (** the label of the parameter *) mutable color_value : string; (** the current value of the parameter *) color_editable : bool ; (** indicates if the value can be changed *) color_f_apply : (string -> unit) ; (** the function to call to apply the new value of the parameter *) color_help : string option ; (** optional help string *) color_expand : bool ; (** expand the entry widget or not *) } ;; type date_param = { date_label : string ; (** the label of the parameter *) mutable date_value : int * int * int ; (** day, month, year *) date_editable : bool ; (** indicates if the value can be changed *) date_f_string : (int * int * int) -> string ; (** the function used to display the current value (day, month, year) *) date_f_apply : ((int * int * int) -> unit) ; (** the function to call to apply the new value (day, month, year) of the parameter *) date_help : string option ; (** optional help string *) date_expand : bool ; (** expand the entry widget or not *) } ;; type font_param = { font_label : string ; (** the label of the parameter *) mutable font_value : string ; (** the font name *) font_editable : bool ; (** indicates if the value can be changed *) font_f_apply : (string -> unit) ; (** the function to call to apply the new value of the parameter *) font_help : string option ; (** optional help string *) font_expand : bool ; (** expand the entry widget or not *) } ;; type hotkey_param = { hk_label : string ; (** the label of the parameter *) mutable hk_value : (Gdk.Tags.modifier list * int) ; (** The value, as a list of modifiers and a key code *) hk_editable : bool ; (** indicates if the value can be changed *) hk_f_apply : ((Gdk.Tags.modifier list * int) -> unit) ; (** the function to call to apply the new value of the paramter *) hk_help : string option ; (** optional help string *) hk_expand : bool ; (** expand or not *) } type modifiers_param = { md_label : string ; (** the label of the parameter *) mutable md_value : Gdk.Tags.modifier list ; (** The value, as a list of modifiers and a key code *) md_editable : bool ; (** indicates if the value can be changed *) md_f_apply : Gdk.Tags.modifier list -> unit ; (** the function to call to apply the new value of the paramter *) md_help : string option ; (** optional help string *) md_expand : bool ; (** expand or not *) md_allow : Gdk.Tags.modifier list } (** This type represents the different kinds of parameters. *) type parameter_kind = String_param of string string_param | List_param of (GData.tooltips -> ) | Filename_param of string string_param | Bool_param of bool_param | Text_param of string string_param | Combo_param of combo_param | Custom_param of custom_param | Color_param of color_param | Date_param of date_param | Font_param of font_param | Hotkey_param of hotkey_param | Modifiers_param of modifiers_param | Html_param of string string_param ;; (** This type represents the structure of the configuration window. *) type configuration_structure = | Section of string * GtkStock.id option * parameter_kind list (** label of the section, icon, parameters *) | Section_list of string * GtkStock.id option * configuration_structure list (** label of the section, list of the sub sections *) ;; (** To indicate what button was pushed by the user when the window is closed. *) type return_button = Return_apply (** The user clicked on Apply at least once before closing the window with Cancel or the window manager. *) | Return_ok (** The user closed the window with the ok button. *) | Return_cancel (** The user closed the window with the cancel button or the window manager but never clicked on the apply button.*) (** {2 Bindings in the html editor} *) type html_binding = { mutable html_key : (Gdk.Tags.modifier list * int) ; mutable html_begin : string ; mutable html_end : string ; } let htmlbinding_cp_wrapper = let w = Config_file.tuple3_wrappers key_cp_wrapper Config_file.string_wrappers Config_file.string_wrappers in { to_raw = (fun v -> w.to_raw (v.html_key, v.html_begin, v.html_end)) ; of_raw = (fun r -> let (k,b,e) = w.of_raw r in { html_key = k ; html_begin = b ; html_end = e } ) ; } class htmlbinding_cp = [html_binding] Config_file.option_cp htmlbinding_cp_wrapper coq-8.4pl2/ide/utils/okey.ml0000640000175000001440000001423512026043504015032 0ustar notinusers(*********************************************************************************) (* Cameleon *) (* *) (* Copyright (C) 2005 Institut National de Recherche en Informatique et *) (* en Automatique. All rights reserved. *) (* *) (* This program is free software; you can redistribute it and/or modify *) (* it under the terms of the GNU Library General Public License as *) (* published by the Free Software Foundation; either version 2 of the *) (* License, or any later version. *) (* *) (* This program is distributed in the hope that it will be useful, *) (* but WITHOUT ANY WARRANTY; without even the implied warranty of *) (* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *) (* GNU Library General Public License for more details. *) (* *) (* You should have received a copy of the GNU Library General Public *) (* License along with this program; if not, write to the Free Software *) (* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA *) (* 02111-1307 USA *) (* *) (* Contact: Maxence.Guesdon@inria.fr *) (* *) (*********************************************************************************) type modifier = Gdk.Tags.modifier type handler = { cond : (unit -> bool) ; cback : (unit -> unit) ; } type handler_spec = int * int * Gdk.keysym (** mods * mask * key *) let int_of_modifier = function `SHIFT -> 1 | `LOCK -> 2 | `CONTROL -> 4 | `MOD1 -> 8 | `MOD2 -> 16 | `MOD3 -> 32 | `MOD4 -> 64 | `MOD5 -> 128 | `BUTTON1 -> 256 | `BUTTON2 -> 512 | `BUTTON3 -> 1024 | `BUTTON4 -> 2048 | `BUTTON5 -> 4096 | `HYPER -> 1 lsl 22 | `META -> 1 lsl 20 | `RELEASE -> 1 lsl 30 | `SUPER -> 1 lsl 21 let print_modifier l = List.iter (fun m -> print_string (((function `SHIFT -> "SHIFT" | `LOCK -> "LOCK" | `CONTROL -> "CONTROL" | `MOD1 -> "MOD1" | `MOD2 -> "MOD2" | `MOD3 -> "MOD3" | `MOD4 -> "MOD4" | `MOD5 -> "MOD5" | `BUTTON1 -> "B1" | `BUTTON2 -> "B2" | `BUTTON3 -> "B3" | `BUTTON4 -> "B4" | `BUTTON5 -> "B5" | `HYPER -> "HYPER" | `META -> "META" | `RELEASE -> "" | `SUPER -> "SUPER") m)^" ") ) l; print_newline () let int_of_modifiers l = List.fold_left (fun acc -> fun m -> acc + (int_of_modifier m)) 0 l module H = struct type t = handler_spec * handler let equal (m,k) (mods, mask, key) = (k = key) && ((m land mask) = mods) let filter_with_mask mods mask key l = List.filter (fun a -> (fst a) <> (mods, mask, key)) l let find_handlers mods key l = List.map snd (List.filter (fun ((m,ma,k),_) -> equal (mods,key) (m,ma,k)) l ) end let (table : (int, H.t list ref) Hashtbl.t) = Hashtbl.create 13 let key_press w ev = let key = GdkEvent.Key.keyval ev in let modifiers = GdkEvent.Key.state ev in try let (r : H.t list ref) = Hashtbl.find table (Oo.id w) in let l = H.find_handlers (int_of_modifiers modifiers) key !r in match l with [] -> false | _ -> List.iter (fun h -> if h.cond () then try h.cback () with e -> prerr_endline (Printexc.to_string e) else () ) l; true with Not_found -> false let associate_key_press w = ignore ((w#event#connect#key_press ~callback: (key_press w)) : GtkSignal.id) let default_modifiers = ref ([] : modifier list) let default_mask = ref ([`MOD2 ; `MOD3 ; `MOD4 ; `MOD5 ; `LOCK] : modifier list) let set_default_modifiers l = default_modifiers := l let set_default_mask l = default_mask := l let remove_widget (w : < event : GObj.event_ops ; ..>) () = try let r = Hashtbl.find table (Oo.id w) in r := [] with Not_found -> () let add1 ?(remove=false) w ?(cond=(fun () -> true)) ?(mods= !default_modifiers) ?(mask= !default_mask) k callback = let r = try Hashtbl.find table (Oo.id w) with Not_found -> let r = ref [] in Hashtbl.add table (Oo.id w) r; ignore (w#connect#destroy ~callback: (remove_widget w)); associate_key_press w; r in let n_mods = int_of_modifiers mods in let n_mask = lnot (int_of_modifiers mask) in let new_h = { cond = cond ; cback = callback } in if remove then ( let l = H.filter_with_mask n_mods n_mask k !r in r := ((n_mods, n_mask, k), new_h) :: l ) else r := ((n_mods, n_mask, k), new_h) :: !r let add w ?(cond=(fun () -> true)) ?(mods= !default_modifiers) ?(mask= !default_mask) k callback = add1 w ~cond ~mods ~mask k callback let add_list w ?(cond=(fun () -> true)) ?(mods= !default_modifiers) ?(mask= !default_mask) k_list callback = List.iter (fun k -> add w ~cond ~mods ~mask k callback) k_list let set w ?(cond=(fun () -> true)) ?(mods= !default_modifiers) ?(mask= !default_mask) k callback = add1 ~remove: true w ~cond ~mods ~mask k callback let set_list w ?(cond=(fun () -> true)) ?(mods= !default_modifiers) ?(mask= !default_mask) k_list callback = List.iter (fun k -> set w ~cond ~mods ~mask k callback) k_list coq-8.4pl2/ide/project_file.ml40000640000175000001440000001725711770563543015502 0ustar notinuserstype target = | ML of string (* ML file : foo.ml -> (ML "foo.ml") *) | MLI of string (* MLI file : foo.mli -> (MLI "foo.mli") *) | ML4 of string (* ML4 file : foo.ml4 -> (ML4 "foo.ml4") *) | MLLIB of string (* MLLIB file : foo.mllib -> (MLLIB "foo.mllib") *) | MLPACK of string (* MLLIB file : foo.mlpack -> (MLLIB "foo.mlpack") *) | V of string (* V file : foo.v -> (V "foo") *) | Arg of string | Special of string * string * string (* file, dependencies, command *) | Subdir of string | Def of string * string (* X=foo -> Def ("X","foo") *) | Include of string | RInclude of string * string (* -R physicalpath logicalpath *) type install = | NoInstall | TraditionalInstall | UserInstall | UnspecInstall exception Parsing_error let rec parse_string = parser | [< '' ' | '\n' | '\t' >] -> "" | [< 'c; s >] -> (String.make 1 c)^(parse_string s) | [< >] -> "" and parse_string2 = parser | [< ''"' >] -> "" | [< 'c; s >] -> (String.make 1 c)^(parse_string2 s) and parse_skip_comment = parser | [< ''\n'; s >] -> s | [< 'c; s >] -> parse_skip_comment s | [< >] -> [< >] and parse_args = parser | [< '' ' | '\n' | '\t'; s >] -> parse_args s | [< ''#'; s >] -> parse_args (parse_skip_comment s) | [< ''"'; str = parse_string2; s >] -> ("" ^ str) :: parse_args s | [< 'c; str = parse_string; s >] -> ((String.make 1 c) ^ str) :: (parse_args s) | [< >] -> [] let parse f = let c = open_in f in let res = parse_args (Stream.of_channel c) in close_in c; res let rec process_cmd_line orig_dir ((project_file,makefile,install,opt) as opts) l = function | [] -> opts,List.rev l | ("-h"|"--help") :: _ -> raise Parsing_error | ("-no-opt"|"-byte") :: r -> process_cmd_line orig_dir (project_file,makefile,install,false) l r | ("-full"|"-opt") :: r -> process_cmd_line orig_dir (project_file,makefile,install,true) l r | "-impredicative-set" :: r -> Minilib.safe_prerr_endline "Please now use \"-arg -impredicative-set\" instead of \"-impredicative-set\" alone to be more uniform."; process_cmd_line orig_dir opts (Arg "-impredicative-set" :: l) r | "-no-install" :: r -> Minilib.safe_prerr_endline "Option -no-install is deprecated. Use \"-install none\" instead"; process_cmd_line orig_dir (project_file,makefile,NoInstall,opt) l r | "-install" :: d :: r -> if install <> UnspecInstall then Minilib.safe_prerr_endline "Warning: -install sets more than once."; let install = match d with | "user" -> UserInstall | "none" -> NoInstall | "global" -> TraditionalInstall | _ -> Minilib.safe_prerr_endline (String.concat "" ["Warning: invalid option '"; d; "' passed to -install."]); install in process_cmd_line orig_dir (project_file,makefile,install,opt) l r | "-custom" :: com :: dependencies :: file :: r -> process_cmd_line orig_dir opts (Special (file,dependencies,com) :: l) r | "-I" :: d :: r -> process_cmd_line orig_dir opts ((Include (Minilib.correct_path d orig_dir)) :: l) r | "-R" :: p :: lp :: r -> process_cmd_line orig_dir opts (RInclude (Minilib.correct_path p orig_dir,lp) :: l) r | ("-I"|"-custom") :: _ -> raise Parsing_error | "-f" :: file :: r -> let file = Minilib.remove_path_dot (Minilib.correct_path file orig_dir) in let () = match project_file with | None -> () | Some _ -> Minilib.safe_prerr_endline "Warning: Several features will not work with multiple project files." in let (opts',l') = process_cmd_line (Filename.dirname file) (Some file,makefile,install,opt) l (parse file) in process_cmd_line orig_dir opts' l' r | ["-f"] -> raise Parsing_error | "-o" :: file :: r -> begin try let _ = String.index file '/' in raise Parsing_error with Not_found -> let () = match makefile with |None -> () |Some f -> Minilib.safe_prerr_endline ("Warning: Only one output file is genererated. "^f^" will not be.") in process_cmd_line orig_dir (project_file,Some file,install,opt) l r end | v :: "=" :: def :: r -> process_cmd_line orig_dir opts (Def (v,def) :: l) r | "-arg" :: a :: r -> process_cmd_line orig_dir opts (Arg a :: l) r | f :: r -> let f = Minilib.correct_path f orig_dir in process_cmd_line orig_dir opts (( if Filename.check_suffix f ".v" then V f else if (Filename.check_suffix f ".ml") then ML f else if (Filename.check_suffix f ".ml4") then ML4 f else if (Filename.check_suffix f ".mli") then MLI f else if (Filename.check_suffix f ".mllib") then MLLIB f else if (Filename.check_suffix f ".mlpack") then MLPACK f else Subdir f) :: l) r let rec post_canonize f = if Filename.basename f = Filename.current_dir_name then let dir = Filename.dirname f in if dir = Filename.current_dir_name then f else post_canonize dir else f (* Return: ((v,(mli,ml4,ml,mllib,mlpack),special,subdir),(i_inc,r_inc),(args,defs)) *) let split_arguments = let rec aux = function | V n :: r -> let (v,m,o,s),i,d = aux r in ((Minilib.remove_path_dot n::v,m,o,s),i,d) | ML n :: r -> let (v,(mli,ml4,ml,mllib,mlpack),o,s),i,d = aux r in ((v,(mli,ml4,Minilib.remove_path_dot n::ml,mllib,mlpack),o,s),i,d) | MLI n :: r -> let (v,(mli,ml4,ml,mllib,mlpack),o,s),i,d = aux r in ((v,(Minilib.remove_path_dot n::mli,ml4,ml,mllib,mlpack),o,s),i,d) | ML4 n :: r -> let (v,(mli,ml4,ml,mllib,mlpack),o,s),i,d = aux r in ((v,(mli,Minilib.remove_path_dot n::ml4,ml,mllib,mlpack),o,s),i,d) | MLLIB n :: r -> let (v,(mli,ml4,ml,mllib,mlpack),o,s),i,d = aux r in ((v,(mli,ml4,ml,Minilib.remove_path_dot n::mllib,mlpack),o,s),i,d) | MLPACK n :: r -> let (v,(mli,ml4,ml,mllib,mlpack),o,s),i,d = aux r in ((v,(mli,ml4,ml,mllib,Minilib.remove_path_dot n::mlpack),o,s),i,d) | Special (n,dep,c) :: r -> let (v,m,o,s),i,d = aux r in ((v,m,(n,dep,c)::o,s),i,d) | Subdir n :: r -> let (v,m,o,s),i,d = aux r in ((v,m,o,n::s),i,d) | Include p :: r -> let t,(i,r),d = aux r in (t,((Minilib.remove_path_dot (post_canonize p), Minilib.canonical_path_name p)::i,r),d) | RInclude (p,l) :: r -> let t,(i,r),d = aux r in (t,(i,(Minilib.remove_path_dot (post_canonize p),l, Minilib.canonical_path_name p)::r),d) | Def (v,def) :: r -> let t,i,(args,defs) = aux r in (t,i,(args,(v,def)::defs)) | Arg a :: r -> let t,i,(args,defs) = aux r in (t,i,(a::args,defs)) | [] -> ([],([],[],[],[],[]),[],[]),([],[]),([],[]) in aux let read_project_file f = split_arguments (snd (process_cmd_line (Filename.dirname f) (Some f, None, NoInstall, true) [] (parse f))) let args_from_project file project_files default_name = let is_f = Minilib.same_file file in let contains_file dir = List.exists (fun x -> is_f (Minilib.correct_path x dir)) in let build_cmd_line i_inc r_inc args = List.fold_right (fun (_,i) o -> "-I" :: i :: o) i_inc (List.fold_right (fun (_,l,p) o -> "-R" :: p :: l :: o) r_inc (List.fold_right (fun a o -> parse_args (Stream.of_string a) @ o) args [])) in try let (_,(_,(i_inc,r_inc),(args,_))) = List.find (fun (dir,((v_files,_,_,_),_,_)) -> contains_file dir v_files) project_files in build_cmd_line i_inc r_inc args with Not_found -> let rec find_project_file dir = try let ((v_files,_,_,_),(i_inc,r_inc),(args,_)) = read_project_file (Filename.concat dir default_name) in if contains_file dir v_files then build_cmd_line i_inc r_inc args else let newdir = Filename.dirname dir in Minilib.safe_prerr_endline newdir; if dir = newdir then [] else find_project_file newdir with Sys_error s -> let newdir = Filename.dirname dir in if dir = newdir then [] else find_project_file newdir in find_project_file (Filename.dirname file) coq-8.4pl2/ide/minilib.mli0000640000175000001440000000326311673467042014534 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a -> 'b -> 'a) -> int -> 'a -> 'b list -> 'a val list_map_i : (int -> 'a -> 'b) -> int -> 'a list -> 'b list val list_filter_i : (int -> 'a -> bool) -> 'a list -> 'a list val list_chop : int -> 'a list -> 'a list * 'a list val list_index0 : 'a -> 'a list -> int val string_map : (char -> char) -> string -> string val subst_command_placeholder : string -> string -> string val home : string val xdg_config_home : string val xdg_config_dirs : string list val xdg_data_home : string val xdg_data_dirs : string list val coqtop_path : string ref (** safe version of Pervasives.prerr_endline (avoid exception in win32 without console) *) val safe_prerr_endline : string -> unit val remove_path_dot : string -> string val strip_path : string -> string val canonical_path_name : string -> string (** correct_path f dir = dir/f if f is relative *) val correct_path : string -> string -> string (** checks if two file names refer to the same (existing) file *) val same_file : string -> string -> bool coq-8.4pl2/ide/FAQ0000640000175000001440000000571411662256365012745 0ustar notinusers CoqIde FAQ Q0) What is CoqIde? R0: A powerfull graphical interface for Coq. See http://coq.inria.fr. for more informations. Q1) How to enable Emacs keybindings? R1: Insert gtk-key-theme-name = "Emacs" in your "coqide-gtk2rc" file. It should be in $XDG_CONFIG_DIRS/coq dir. This is done by default. Q2) How to enable antialiased fonts? R2) Set the GDK_USE_XFT variable to 1. This is by default with Gtk >= 2.2. If some of your fonts are not available, set GDK_USE_XFT to 0. Q4) How to use those Forall and Exists pretty symbols? R4) Thanks to the Notation features in Coq, you just need to insert these lines in your Coq Buffer : ====================================================================== Notation "∀ x : t, P" := (forall x:t, P) (at level 200, x ident). Notation "∃ x : t, P" := (exists x:t, P) (at level 200, x ident). ====================================================================== Copy/Paste of these lines from this file will not work outside of CoqIde. You need to load a file containing these lines or to enter the "∀" using an input method (see Q5). To try it just use "Require utf8" from inside CoqIde. To enable these notations automatically start coqide with coqide -l utf8 In the ide subdir of Coq library, you will find a sample utf8.v with some pretty simple notations. Q5) How to define an input method for non ASCII symbols? R5)-First solution : type "2200" to enter a forall in the script widow. 2200 is the hexadecimal code for forall in unicode charts and is encoded as "∀" in UTF-8. 2203 is for exists. See http://www.unicode.org for more codes. -Second solution : rebind "a" to forall and "e" to exists. Under X11, you need to use something like xmodmap -e "keycode 24 = a A F13 F13" xmodmap -e "keycode 26 = e E F14 F14" and then to add bind "F13" {"insert-at-cursor" ("∀")} bind "F14" {"insert-at-cursor" ("∃")} to your "binding "text"" section in coqiderc-gtk2rc. The strange ("∀") argument is the UTF-8 encoding for 0x2200. You can compute these encodings using the lablgtk2 toplevel with Glib.Utf8.from_unichar 0x2200;; Further symbols can be bound on higher Fxx keys or on even on other keys you do not need . Q6) How to customize the shortcuts for menus? R6) Two solutions are offered: - Edit $XDG_CONFIG_HOME/coq/coqide.keys by hand or - Add "gtk-can-change-accels = 1" in your coqide-gtk2rc file. Then from CoqIde, you may select a menu entry and press the desired shortcut. Q7) What encoding should I use? What is this \x{iiii} in my file? R7) The encoding option is related to the way files are saved. Keep it as UTF-8 until it becomes important for you to exchange files with non UTF-8 aware applications. If you choose something else than UTF-8, then missing characters will be encoded by \x{....} or \x{........} where each dot is an hex. digit. The number between braces is the hexadecimal UNICODE index for the missing character. coq-8.4pl2/ide/undo_lablgtk_ge26.mli0000640000175000001440000000201512010532755016361 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Gtk.text_view] Gtk.obj -> object inherit GText.view method undo : bool method redo : bool method clear_undo : unit end val undoable_view : ?buffer:GText.buffer -> ?editable:bool -> ?cursor_visible:bool -> ?justification:GtkEnums.justification -> ?wrap_mode:GtkEnums.wrap_mode -> ?border_width:int -> ?width:int -> ?height:int -> ?packing:(GObj.widget -> unit) -> ?show:bool -> unit -> undoable_view coq-8.4pl2/ide/utf8_convert.mll0000640000175000001440000000243212010532755015525 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* s in let c = if Glib.Utf8.validate code then code else s in Buffer.add_string b c; entry lexbuf } | _ { let s = lexeme lexbuf in Buffer.add_string b s; entry lexbuf} | eof { let s = Buffer.contents b in Buffer.reset b ; s } { let f s = let lb = from_string s in Buffer.reset b; entry lb } coq-8.4pl2/ide/coq_commands.ml0000640000175000001440000001701412010532755015370 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "; "dependent rewrite <-"; "destruct"; "discriminate"; "do"; "double induction"; ]; [ "eapply"; "eauto"; "eauto with"; "eexact"; "elim"; "elim __ using"; "elim __ with"; "elimtype"; "exact"; "exists"; ]; [ "fail"; "field"; "first"; "firstorder"; "firstorder using"; "firstorder with"; "fix"; "fix __ with"; "fold"; "fold __ in"; "fourier"; "functional induction"; ]; [ "generalize"; "generalize dependent"; ]; [ "hnf"; ]; [ "idtac"; "induction"; "info"; "injection"; "instantiate (__:=__)"; "intro"; "intro after"; "intro __ after"; "intros"; "intros until"; "intuition"; "inversion"; "inversion __ in"; "inversion __ using"; "inversion __ using __ in"; "inversion__clear"; "inversion__clear __ in"; ]; [ "jp "; "jp"; ]; [ "lapply"; "lazy"; "lazy in"; "left"; ]; [ "move __ after"; ]; [ "omega"; ]; [ "pattern"; "pose"; "pose __:=__)"; "progress"; ]; [ "quote"; ]; [ "red"; "red in"; "refine"; "reflexivity"; "rename __ into"; "repeat"; "replace __ with"; "rewrite"; "rewrite __ in"; "rewrite <-"; "rewrite <- __ in"; "right"; "ring"; ]; [ "set"; "set (__:=__)"; "setoid__replace"; "setoid__rewrite"; "simpl"; "simpl __ in"; "simple destruct"; "simple induction"; "simple inversion"; "simplify__eq"; "solve"; "split"; (* "split__Rabs"; "split__Rmult"; *) "subst"; "symmetry"; "symmetry in"; ]; [ "tauto"; "transitivity"; "trivial"; "try"; ]; [ "unfold"; "unfold __ in"; ]; ] coq-8.4pl2/ide/coqide.ml0000640000175000001440000032642612122347656014213 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Tags.Script.processed | Unsafe -> Tags.Script.unjustified class type analyzed_views= object val mutable act_id : GtkSignal.id option val input_buffer : GText.buffer val input_view : Undo.undoable_view val last_array : string array val mutable last_index : bool val message_buffer : GText.buffer val message_view : GText.view val proof_buffer : GText.buffer val proof_view : GText.view val cmd_stack : ide_info Stack.t val mycoqtop : Coq.coqtop ref val mutable is_active : bool val mutable read_only : bool val mutable filename : string option val mutable stats : Unix.stats option method without_auto_complete : 'a 'b. ('a -> 'b) -> 'a -> 'b method set_auto_complete : bool -> unit method filename : string option method stats : Unix.stats option method update_stats : unit method revert : unit method auto_save : unit method save : string -> bool method save_as : string -> bool method read_only : bool method set_read_only : bool -> unit method is_active : bool method activate : unit -> unit method active_keypress_handler : GdkEvent.Key.t -> bool method backtrack_to : GText.iter -> unit method backtrack_to_no_lock : GText.iter -> unit method clear_message : unit method find_phrase_starting_at : GText.iter -> (GText.iter * GText.iter) option method get_insert : GText.iter method get_start_of_input : GText.iter method go_to_insert : unit method indent_current_line : unit method go_to_next_occ_of_cur_word : unit method go_to_prev_occ_of_cur_word : unit method insert_command : string -> string -> unit method tactic_wizard : string list -> unit method insert_message : string -> unit method process_next_phrase : bool -> unit method process_until_iter_or_error : GText.iter -> unit method process_until_end_or_error : unit method recenter_insert : unit method reset_initial : unit method force_reset_initial : unit method set_message : string -> unit method raw_coq_query : string -> unit method show_goals : unit method show_goals_full : unit method undo_last_step : unit method help_for_keyword : unit -> unit method complete_at_offset : int -> bool end type viewable_script = {script : Undo.undoable_view; tab_label : GMisc.label; mutable filename : string; mutable encoding : string; proof_view : GText.view; message_view : GText.view; analyzed_view : analyzed_views; toplvl : Coq.coqtop ref; command : Command_windows.command_window; } let kill_session s = (* To close the detached views of this script, we call manually [destroy] on it, triggering some callbacks in [detach_view]. In a more modern lablgtk, rather use the page-removed signal ? *) s.script#destroy (); Coq.kill_coqtop !(s.toplvl) let build_session s = let session_paned = GPack.paned `VERTICAL () in let eval_paned = GPack.paned `HORIZONTAL ~border_width:5 ~packing:(session_paned#pack1 ~shrink:false ~resize:true) () in let script_frame = GBin.frame ~shadow_type:`IN ~packing:eval_paned#add1 () in let script_scroll = GBin.scrolled_window ~vpolicy:`AUTOMATIC ~hpolicy:`AUTOMATIC ~packing:script_frame#add () in let state_paned = GPack.paned `VERTICAL ~packing:eval_paned#add2 () in let proof_frame = GBin.frame ~shadow_type:`IN ~packing:state_paned#add1 () in let proof_scroll = GBin.scrolled_window ~vpolicy:`AUTOMATIC ~hpolicy:`AUTOMATIC ~packing:proof_frame#add () in let message_frame = GBin.frame ~shadow_type:`IN ~packing:state_paned#add2 () in let message_scroll = GBin.scrolled_window ~vpolicy:`AUTOMATIC ~hpolicy:`AUTOMATIC ~packing:message_frame#add () in let session_tab = GPack.hbox ~homogeneous:false () in let img = GMisc.image ~icon_size:`SMALL_TOOLBAR ~packing:session_tab#pack () in let _ = s.script#buffer#connect#modified_changed ~callback:(fun () -> if s.script#buffer#modified then img#set_stock `SAVE else img#set_stock `YES) in let _ = eval_paned#misc#connect#size_allocate ~callback: (let old_paned_width = ref 2 in let old_paned_height = ref 2 in (fun {Gtk.width=paned_width;Gtk.height=paned_height} -> if !old_paned_width <> paned_width || !old_paned_height <> paned_height then ( eval_paned#set_position (eval_paned#position * paned_width / !old_paned_width); state_paned#set_position (state_paned#position * paned_height / !old_paned_height); old_paned_width := paned_width; old_paned_height := paned_height; ))) in session_paned#pack2 ~shrink:false ~resize:false (s.command#frame#coerce); script_scroll#add s.script#coerce; proof_scroll#add s.proof_view#coerce; message_scroll#add s.message_view#coerce; session_tab#pack s.tab_label#coerce; img#set_stock `YES; eval_paned#set_position 1; state_paned#set_position 1; (Some session_tab#coerce,None,session_paned#coerce) let session_notebook = Typed_notebook.create build_session kill_session ~border_width:2 ~show_border:false ~scrollable:true () let cb = GData.clipboard Gdk.Atom.primary let last_cb_content = ref "" let update_notebook_pos () = let pos = match !current.vertical_tabs, !current.opposite_tabs with | false, false -> `TOP | false, true -> `BOTTOM | true , false -> `LEFT | true , true -> `RIGHT in session_notebook#set_tab_pos pos let to_do_on_page_switch = ref [] (** * Coqide's handling of signals *) (** We ignore Ctrl-C, and for most of the other catchable signals we launch an emergency save of opened files and then exit *) let signals_to_crash = [Sys.sigabrt; Sys.sigalrm; Sys.sigfpe; Sys.sighup; Sys.sigill; Sys.sigpipe; Sys.sigquit; (* Sys.sigsegv; Sys.sigterm;*) Sys.sigusr2] let crash_save i = (* ignore (Unix.sigprocmask Unix.SIG_BLOCK signals_to_crash);*) Minilib.safe_prerr_endline "Trying to save all buffers in .crashcoqide files"; let count = ref 0 in List.iter (function {script=view; analyzed_view = av } -> (let filename = match av#filename with | None -> incr count; "Unnamed_coqscript_"^(string_of_int !count)^".crashcoqide" | Some f -> f^".crashcoqide" in try if try_export filename (view#buffer#get_text ()) then Minilib.safe_prerr_endline ("Saved "^filename) else Minilib.safe_prerr_endline ("Could not save "^filename) with _ -> Minilib.safe_prerr_endline ("Could not save "^filename)) ) session_notebook#pages; Minilib.safe_prerr_endline "Done. Please report."; if i <> 127 then exit i let ignore_break () = List.iter (fun i -> try Sys.set_signal i (Sys.Signal_handle crash_save) with _ -> prerr_endline "Signal ignored (normal if Win32)") signals_to_crash; (* We ignore the Ctrl-C, this is required for the Stop button to work, since we will actually send Ctrl-C to all processes sharing our console (including us) *) Sys.set_signal Sys.sigint Sys.Signal_ignore (** * Locks *) (* Locking machinery for Coq kernel *) let coq_computing = Mutex.create () (* To prevent Coq from interrupting during undoing...*) let coq_may_stop = Mutex.create () (* To prevent a force_reset_initial during a force_reset_initial *) let resetting = Mutex.create () exception RestartCoqtop exception Unsuccessful let force_reset_initial () = prerr_endline "Reset Initial"; session_notebook#current_term.analyzed_view#force_reset_initial let break () = prerr_endline "User break received"; Coq.break_coqtop !(session_notebook#current_term.toplvl) let do_if_not_computing text f x = let threaded_task () = if Mutex.try_lock coq_computing then begin prerr_endline "Getting lock"; List.iter (fun elt -> try f elt with | RestartCoqtop -> elt.analyzed_view#reset_initial | Sys_error str -> elt.analyzed_view#reset_initial; elt.analyzed_view#set_message ("Unable to communicate with coqtop, restarting coqtop.\n"^ "Error was: "^str) | e -> Mutex.unlock coq_computing; elt.analyzed_view#set_message ("Unknown error, please report:\n"^(Printexc.to_string e))) x; prerr_endline "Releasing lock"; Mutex.unlock coq_computing; end else prerr_endline "Discarded order (computations are ongoing)" in prerr_endline ("Launching thread " ^ text); ignore (Glib.Timeout.add ~ms:300 ~callback: (fun () -> if Mutex.try_lock coq_computing then (Mutex.unlock coq_computing; false) else (pbar#pulse (); true))); ignore (Thread.create threaded_task ()) let warning msg = GToolbox.message_box ~title:"Warning" ~icon:(let img = GMisc.image () in img#set_stock `DIALOG_WARNING; img#set_icon_size `DIALOG; img#coerce) msg let remove_current_view_page () = let do_remove () = let c = session_notebook#current_page in session_notebook#remove_page c in let current = session_notebook#current_term in if not current.script#buffer#modified then do_remove () else match GToolbox.question_box ~title:"Close" ~buttons:["Save Buffer and Close"; "Close without Saving"; "Don't Close"] ~default:0 ~icon:(let img = GMisc.image () in img#set_stock `DIALOG_WARNING; img#set_icon_size `DIALOG; img#coerce) "This buffer has unsaved modifications" with | 1 -> begin match current.analyzed_view#filename with | None -> begin match select_file_for_save ~title:"Save file" () with | None -> () | Some f -> if current.analyzed_view#save_as f then begin flash_info ("File " ^ f ^ " saved") ; do_remove () end else warning ("Save Failed (check if " ^ f ^ " is writable)") end | Some f -> if current.analyzed_view#save f then begin flash_info ("File " ^ f ^ " saved") ; do_remove () end else warning ("Save Failed (check if " ^ f ^ " is writable)") end | 2 -> do_remove () | _ -> () module Opt = Coq.PrintOpt let print_items = [ ([Opt.implicit],"Display implicit arguments","Display _implicit arguments", "i",false); ([Opt.coercions],"Display coercions","Display _coercions","c",false); ([Opt.raw_matching],"Display raw matching expressions", "Display raw _matching expressions","m",true); ([Opt.notations],"Display notations","Display _notations","n",true); ([Opt.all_basic],"Display all basic low-level contents", "Display _all basic low-level contents","a",false); ([Opt.existential],"Display existential variable instances", "Display _existential variable instances","e",false); ([Opt.universes],"Display universe levels","Display _universe levels", "u",false); ([Opt.all_basic;Opt.existential;Opt.universes], "Display all low-level contents", "Display all _low-level contents","l",false) ] let setopts ct opts v = let opts = List.map (fun o -> (o, v)) opts in Coq.PrintOpt.set ct opts (* Reset this to None on page change ! *) let (last_completion:(string*int*int*bool) option ref) = ref None let () = to_do_on_page_switch := (fun i -> last_completion := None)::!to_do_on_page_switch let rec complete input_buffer w (offset:int) = match !last_completion with | Some (lw,loffset,lpos,backward) when lw=w && loffset=offset -> begin let iter = input_buffer#get_iter (`OFFSET lpos) in if backward then match complete_backward w iter with | None -> last_completion := Some (lw,loffset, (find_word_end (input_buffer#get_iter (`OFFSET loffset)))#offset , false); None | Some (ss,start,stop) as result -> last_completion := Some (w,offset,ss#offset,true); result else match complete_forward w iter with | None -> last_completion := None; None | Some (ss,start,stop) as result -> last_completion := Some (w,offset,ss#offset,false); result end | _ -> begin match complete_backward w (input_buffer#get_iter (`OFFSET offset)) with | None -> last_completion := Some (w,offset,(find_word_end (input_buffer#get_iter (`OFFSET offset)))#offset,false); complete input_buffer w offset | Some (ss,start,stop) as result -> last_completion := Some (w,offset,ss#offset,true); result end let get_current_word () = match session_notebook#current_term,cb#text with | {script=script; analyzed_view=av;},None -> prerr_endline "None selected"; let it = av#get_insert in let start = find_word_start it in let stop = find_word_end start in script#buffer#move_mark `SEL_BOUND ~where:start; script#buffer#move_mark `INSERT ~where:stop; script#buffer#get_text ~slice:true ~start ~stop () | _,Some t -> prerr_endline "Some selected"; prerr_endline t; t let input_channel b ic = let buf = String.create 1024 and len = ref 0 in while len := input ic buf 0 1024; !len > 0 do Buffer.add_substring b buf 0 !len done let with_file handler name ~f = try let ic = open_in_gen [Open_rdonly;Open_creat] 0o644 name in try f ic; close_in ic with e -> close_in ic; raise e with Sys_error s -> handler s (* For find_phrase_starting_at *) exception Stop of int let tag_of_sort = function | Coq_lex.Comment -> Tags.Script.comment | Coq_lex.Keyword -> Tags.Script.kwd | Coq_lex.Declaration -> Tags.Script.decl | Coq_lex.ProofDeclaration -> Tags.Script.proof_decl | Coq_lex.Qed -> Tags.Script.qed | Coq_lex.String -> failwith "No tag" let apply_tag (buffer:GText.buffer) orig off_conv from upto sort = try let tag = tag_of_sort sort in let start = orig#forward_chars (off_conv from) in let stop = orig#forward_chars (off_conv upto) in buffer#apply_tag ~start ~stop tag with _ -> () let remove_tags (buffer:GText.buffer) from upto = List.iter (buffer#remove_tag ~start:from ~stop:upto) [ Tags.Script.comment; Tags.Script.kwd; Tags.Script.decl; Tags.Script.proof_decl; Tags.Script.qed ] (** Cut a part of the buffer in sentences and tag them. Invariant: either this slice ends the buffer, or it ends with ".". May raise [Coq_lex.Unterminated] when the zone ends with an unterminated sentence. *) let split_slice_lax (buffer:GText.buffer) from upto = remove_tags buffer from upto; buffer#remove_tag ~start:from ~stop:upto Tags.Script.sentence; let slice = buffer#get_text ~start:from ~stop:upto () in let rec split_substring str = let off_conv = byte_offset_to_char_offset str in let slice_len = String.length str in let end_off = Coq_lex.delimit_sentence (apply_tag buffer from off_conv) str in let start = from#forward_chars (off_conv end_off) in let stop = start#forward_char in buffer#apply_tag ~start ~stop Tags.Script.sentence; let next = end_off + 1 in if next < slice_len then begin ignore (from#nocopy#forward_chars (off_conv next)); split_substring (String.sub str next (slice_len - next)) end in split_substring slice (** Searching forward and backward a position fulfilling some condition *) let rec forward_search cond (iter:GText.iter) = if iter#is_end || cond iter then iter else forward_search cond iter#forward_char let rec backward_search cond (iter:GText.iter) = if iter#is_start || cond iter then iter else backward_search cond iter#backward_char let is_sentence_end s = s#has_tag Tags.Script.sentence let is_char s c = s#char = Char.code c (** Search backward the first character of a sentence, starting at [iter] and going at most up to [soi] (meant to be the end of the locked zone). Raise [StartError] when no proper sentence start has been found. A character following a ending "." is considered as a sentence start only if this character is a blank. In particular, when a final "." at the end of the locked zone isn't followed by a blank, then this non-blank character will be signaled as erroneous in [tag_on_insert] below. *) exception StartError let grab_sentence_start (iter:GText.iter) soi = let cond iter = if iter#compare soi < 0 then raise StartError; let prev = iter#backward_char in is_sentence_end prev && (not (is_char prev '.') || List.exists (is_char iter) [' ';'\n';'\r';'\t']) in backward_search cond iter (** Search forward the first character immediately after a sentence end *) let rec grab_sentence_stop (start:GText.iter) = (forward_search is_sentence_end start)#forward_char (** Search forward the first character immediately after a "." sentence end (and not just a "{" or "}" or comment end *) let rec grab_ending_dot (start:GText.iter) = let is_ending_dot s = is_sentence_end s && s#char = Char.code '.' in (forward_search is_ending_dot start)#forward_char (** Retag a zone that has been edited *) let tag_on_insert buffer = (* the start of the non-locked zone *) let soi = buffer#get_iter_at_mark (`NAME "start_of_input") in (* the inserted zone is between [prev_insert] and [insert] *) let insert = buffer#get_iter_at_mark `INSERT in let prev = buffer#get_iter_at_mark (`NAME "prev_insert") in (* [prev] is normally always before [insert] even when deleting. Let's check this nonetheless *) let prev, insert = if insert#compare prev < 0 then insert, prev else prev, insert in try let start = grab_sentence_start prev soi in (** The status of "{" "}" as sentence delimiters is too fragile. We retag up to the next "." instead. *) let stop = grab_ending_dot insert in try split_slice_lax buffer start stop with Coq_lex.Unterminated -> (* This shouldn't happen frequently. Either: - we are at eof, with indeed an unfinished sentence. - we have just inserted an opening of comment or string. - the inserted text ends with a "." that interacts with the "." found by [grab_ending_dot] to form a non-ending "..". In any case, we retag up to eof, since this isn't that costly. *) if not stop#is_end then try split_slice_lax buffer start buffer#end_iter with Coq_lex.Unterminated -> () with StartError -> buffer#apply_tag Tags.Script.error ~start:soi ~stop:soi#forward_char let force_retag buffer = try split_slice_lax buffer buffer#start_iter buffer#end_iter with Coq_lex.Unterminated -> () let toggle_proof_visibility (buffer:GText.buffer) (cursor:GText.iter) = (* move back twice if not into proof_decl, * once if into proof_decl and back_char into_proof_decl, * don't move if into proof_decl and back_char not into proof_decl *) if not (cursor#has_tag Tags.Script.proof_decl) then ignore (cursor#nocopy#backward_to_tag_toggle (Some Tags.Script.proof_decl)); if cursor#backward_char#has_tag Tags.Script.proof_decl then ignore (cursor#nocopy#backward_to_tag_toggle (Some Tags.Script.proof_decl)); let decl_start = cursor in let prf_end = decl_start#forward_to_tag_toggle (Some Tags.Script.qed) in let decl_end = grab_ending_dot decl_start in let prf_end = grab_ending_dot prf_end in let prf_end = prf_end#forward_char in if decl_start#has_tag Tags.Script.folded then ( buffer#remove_tag ~start:decl_start ~stop:decl_end Tags.Script.folded; buffer#remove_tag ~start:decl_end ~stop:prf_end Tags.Script.hidden) else ( buffer#apply_tag ~start:decl_start ~stop:decl_end Tags.Script.folded; buffer#apply_tag ~start:decl_end ~stop:prf_end Tags.Script.hidden) (** The arguments that will be passed to coqtop. No quoting here, since no /bin/sh when using create_process instead of open_process. *) let custom_project_files = ref [] let sup_args = ref [] class analyzed_view (_script:Undo.undoable_view) (_pv:GText.view) (_mv:GText.view) _cs _ct _fn = object(self) val input_view = _script val input_buffer = _script#buffer val proof_view = _pv val proof_buffer = _pv#buffer val message_view = _mv val message_buffer = _mv#buffer val cmd_stack = _cs val mycoqtop = _ct val mutable is_active = false val mutable read_only = false val mutable filename = _fn val mutable stats = None val mutable last_modification_time = 0. val mutable last_auto_save_time = 0. val mutable find_forward_instead_of_backward = false val mutable auto_complete_on = !current.auto_complete val hidden_proofs = Hashtbl.create 32 method private toggle_auto_complete = auto_complete_on <- not auto_complete_on method set_auto_complete t = auto_complete_on <- t method without_auto_complete : 'a 'b. ('a -> 'b) -> 'a -> 'b = fun f x -> let old = auto_complete_on in self#set_auto_complete false; let y = f x in self#set_auto_complete old; y method filename = filename method stats = stats method update_stats = match filename with | Some f -> stats <- my_stat f | _ -> () method revert = match filename with | Some f -> begin let do_revert () = begin push_info "Reverting buffer"; try if is_active then self#force_reset_initial; let b = Buffer.create 1024 in with_file flash_info f ~f:(input_channel b); let s = try_convert (Buffer.contents b) in input_buffer#set_text s; self#update_stats; input_buffer#place_cursor ~where:input_buffer#start_iter; input_buffer#set_modified false; pop_info (); flash_info "Buffer reverted"; force_retag input_buffer; with _ -> pop_info (); flash_info "Warning: could not revert buffer"; end in if input_buffer#modified then match (GToolbox.question_box ~title:"Modified buffer changed on disk" ~buttons:["Revert from File"; "Overwrite File"; "Disable Auto Revert"] ~default:0 ~icon:(stock_to_widget `DIALOG_WARNING) "Some unsaved buffers changed on disk" ) with 1 -> do_revert () | 2 -> if self#save f then flash_info "Overwritten" else flash_info "Could not overwrite file" | _ -> prerr_endline "Auto revert set to false"; !current.global_auto_revert <- false; disconnect_revert_timer () else do_revert () end | None -> () method save f = if try_export f (input_buffer#get_text ()) then begin filename <- Some f; input_buffer#set_modified false; stats <- my_stat f; (match self#auto_save_name with | None -> () | Some fn -> try Sys.remove fn with _ -> ()); true end else false method private auto_save_name = match filename with | None -> None | Some f -> let dir = Filename.dirname f in let base = (fst !current.auto_save_name) ^ (Filename.basename f) ^ (snd !current.auto_save_name) in Some (Filename.concat dir base) method private need_auto_save = input_buffer#modified && last_modification_time > last_auto_save_time method auto_save = if self#need_auto_save then begin match self#auto_save_name with | None -> () | Some fn -> try last_auto_save_time <- Unix.time(); prerr_endline ("Autosave time : "^(string_of_float (Unix.time()))); if try_export fn (input_buffer#get_text ()) then begin flash_info ~delay:1000 "Autosaved" end else warning ("Autosave failed (check if " ^ fn ^ " is writable)") with _ -> warning ("Autosave: unexpected error while writing "^fn) end method save_as f = if Sys.file_exists f then match (GToolbox.question_box ~title:"File exists on disk" ~buttons:["Overwrite"; "Cancel";] ~default:1 ~icon: (let img = GMisc.image () in img#set_stock `DIALOG_WARNING; img#set_icon_size `DIALOG; img#coerce) ("File "^f^" already exists") ) with 1 -> self#save f | _ -> false else self#save f method set_read_only b = read_only<-b method read_only = read_only method is_active = is_active method insert_message s = message_buffer#insert s; message_view#misc#draw None method set_message s = message_buffer#set_text s; message_view#misc#draw None method clear_message = message_buffer#set_text "" val mutable last_index = true val last_array = [|"";""|] method get_start_of_input = input_buffer#get_iter_at_mark (`NAME "start_of_input") method get_insert = get_insert input_buffer method recenter_insert = (* BUG : to investigate further: FIXED : Never call GMain.* in thread ! PLUS : GTK BUG ??? Cannot be called from a thread... ADDITION: using sync instead of async causes deadlock...*) ignore (GtkThread.async ( input_view#scroll_to_mark ~use_align:false ~yalign:0.75 ~within_margin:0.25) `INSERT) method indent_current_line = let get_nb_space it = let it = it#copy in let nb_sep = ref 0 in let continue = ref true in while !continue do if it#char = space then begin incr nb_sep; if not it#nocopy#forward_char then continue := false; end else continue := false done; !nb_sep in let previous_line = self#get_insert in if previous_line#nocopy#backward_line then begin let previous_line_spaces = get_nb_space previous_line in let current_line_start = self#get_insert#set_line_offset 0 in let current_line_spaces = get_nb_space current_line_start in if input_buffer#delete_interactive ~start:current_line_start ~stop:(current_line_start#forward_chars current_line_spaces) () then let current_line_start = self#get_insert#set_line_offset 0 in input_buffer#insert ~iter:current_line_start (String.make previous_line_spaces ' ') end method go_to_next_occ_of_cur_word = let cv = session_notebook#current_term in let av = cv.analyzed_view in let b = (cv.script)#buffer in let start = find_word_start (av#get_insert) in let stop = find_word_end start in let text = b#get_text ~start ~stop () in match stop#forward_search text with | None -> () | Some(start, _) -> (b#place_cursor start; self#recenter_insert) method go_to_prev_occ_of_cur_word = let cv = session_notebook#current_term in let av = cv.analyzed_view in let b = (cv.script)#buffer in let start = find_word_start (av#get_insert) in let stop = find_word_end start in let text = b#get_text ~start ~stop () in match start#backward_search text with | None -> () | Some(start, _) -> (b#place_cursor start; self#recenter_insert) val mutable full_goal_done = true method show_goals_full = if not full_goal_done then proof_view#buffer#set_text ""; begin let menu_callback = if !current.contextual_menus_on_goal then (fun s () -> ignore (self#insert_this_phrase_on_success true true false ("progress "^s) s)) else (fun _ _ -> ()) in try begin match Coq.goals !mycoqtop with | Interface.Fail (l, str) -> self#set_message ("Error in coqtop :\n"^str) | Interface.Good goals -> begin match Coq.evars !mycoqtop with | Interface.Fail (l, str) -> self#set_message ("Error in coqtop :\n"^str) | Interface.Good evs -> let hints = match Coq.hints !mycoqtop with | Interface.Fail (_, _) -> None | Interface.Good hints -> hints in Ideproof.display (Ideproof.mode_tactic menu_callback) proof_view goals hints evs end end with | e -> prerr_endline (Printexc.to_string e) end method show_goals = self#show_goals_full method private send_to_coq ct verbose phrase show_output show_error localize = let display_output msg = self#insert_message (if show_output then msg else "") in let display_error (loc,s) = if show_error then begin if not (Glib.Utf8.validate s) then flash_info "This error is so nasty that I can't even display it." else begin self#insert_message s; message_view#misc#draw None; if localize then (match loc with | None -> () | Some (start,stop) -> let convert_pos = byte_offset_to_char_offset phrase in let start = convert_pos start in let stop = convert_pos stop in let i = self#get_start_of_input in let starti = i#forward_chars start in let stopi = i#forward_chars stop in input_buffer#apply_tag Tags.Script.error ~start:starti ~stop:stopi; input_buffer#place_cursor ~where:starti) end end in try full_goal_done <- false; prerr_endline "Send_to_coq starting now"; (* It's important here to work with [ct] and not [!mycoqtop], otherwise we could miss a restart of coqtop and continue sending it orders. *) match Coq.interp ct ~verbose phrase with | Interface.Fail (l,str) -> sync display_error (l,str); None | Interface.Good msg -> sync display_output msg; Some Safe (* TODO: Restore someday the access to Decl_mode.get_damon_flag, and also detect the use of admit, and then return Unsafe *) with | End_of_file -> (* Coqtop has died, let's trigger a reset_initial. *) raise RestartCoqtop | e -> sync display_error (None, Printexc.to_string e); None (* This method is intended to perform stateless commands *) method raw_coq_query phrase = let () = prerr_endline "raw_coq_query starting now" in let display_error s = if not (Glib.Utf8.validate s) then flash_info "This error is so nasty that I can't even display it." else begin self#insert_message s; message_view#misc#draw None end in try match Coq.interp !mycoqtop ~raw:true ~verbose:false phrase with | Interface.Fail (_, err) -> sync display_error err | Interface.Good msg -> sync self#insert_message msg with | End_of_file -> raise RestartCoqtop | e -> sync display_error (Printexc.to_string e) method find_phrase_starting_at (start:GText.iter) = try let start = grab_sentence_start start self#get_start_of_input in let stop = grab_sentence_stop start in (* Is this phrase non-empty and complete ? *) if stop#compare start > 0 && is_sentence_end stop#backward_char then Some (start,stop) else None with StartError -> None method complete_at_offset (offset:int) = prerr_endline ("Completion at offset : " ^ string_of_int offset); let it () = input_buffer#get_iter (`OFFSET offset) in let iit = it () in let start = find_word_start iit in if ends_word iit then let w = input_buffer#get_text ~start ~stop:iit () in if String.length w <> 0 then begin prerr_endline ("Completion of prefix : '" ^ w^"'"); match complete input_buffer w start#offset with | None -> false | Some (ss,start,stop) -> let completion = input_buffer#get_text ~start ~stop () in ignore (input_buffer#delete_selection ()); ignore (input_buffer#insert_interactive completion); input_buffer#move_mark `SEL_BOUND ~where:(it())#backward_char; true end else false else false method private process_one_phrase ct verbosely display_goals do_highlight = let get_next_phrase () = self#clear_message; prerr_endline "process_one_phrase starting now"; if do_highlight then begin push_info "Coq is computing"; input_view#set_editable false; end; match self#find_phrase_starting_at self#get_start_of_input with | None -> if do_highlight then begin input_view#set_editable true; pop_info (); end; None | Some(start,stop) -> prerr_endline "process_one_phrase : to_process highlight"; if do_highlight then begin input_buffer#apply_tag Tags.Script.to_process ~start ~stop; prerr_endline "process_one_phrase : to_process applied"; end; prerr_endline "process_one_phrase : getting phrase"; Some((start,stop),start#get_slice ~stop) in let remove_tag (start,stop) = if do_highlight then begin input_buffer#remove_tag Tags.Script.to_process ~start ~stop; input_view#set_editable true; pop_info (); end in let mark_processed safe (start,stop) = let b = input_buffer in b#move_mark ~where:stop (`NAME "start_of_input"); b#apply_tag (safety_tag safe) ~start ~stop; if (self#get_insert#compare) stop <= 0 then begin b#place_cursor ~where:stop; self#recenter_insert end; let ide_payload = { start = `MARK (b#create_mark start); stop = `MARK (b#create_mark stop); } in Stack.push ide_payload cmd_stack; if display_goals then self#show_goals; remove_tag (start,stop) in match sync get_next_phrase () with | None -> raise Unsuccessful | Some ((_,stop) as loc,phrase) -> if stop#backward_char#has_tag Tags.Script.comment then sync mark_processed Safe loc else try match self#send_to_coq ct verbosely phrase true true true with | Some safe -> sync mark_processed safe loc | None -> sync remove_tag loc; raise Unsuccessful with | RestartCoqtop -> sync remove_tag loc; raise RestartCoqtop method process_next_phrase verbosely = try self#process_one_phrase !mycoqtop verbosely true true with Unsuccessful -> () method private insert_this_phrase_on_success show_output show_msg localize coqphrase insertphrase = let mark_processed safe = let stop = self#get_start_of_input in if stop#starts_line then input_buffer#insert ~iter:stop insertphrase else input_buffer#insert ~iter:stop ("\n"^insertphrase); tag_on_insert input_buffer; let start = self#get_start_of_input in input_buffer#move_mark ~where:stop (`NAME "start_of_input"); input_buffer#apply_tag (safety_tag safe) ~start ~stop; if (self#get_insert#compare) stop <= 0 then input_buffer#place_cursor ~where:stop; let ide_payload = { start = `MARK (input_buffer#create_mark start); stop = `MARK (input_buffer#create_mark stop); } in Stack.push ide_payload cmd_stack; self#show_goals; (*Auto insert save on success... try (match Coq.get_current_goals () with | [] -> (match self#send_to_coq "Save.\n" true true true with | Some ast -> begin let stop = self#get_start_of_input in if stop#starts_line then input_buffer#insert ~iter:stop "Save.\n" else input_buffer#insert ~iter:stop "\nSave.\n"; let start = self#get_start_of_input in input_buffer#move_mark ~where:stop (`NAME"start_of_input"); input_buffer#apply_tag_by_name "processed" ~start ~stop; if (self#get_insert#compare) stop <= 0 then input_buffer#place_cursor stop; let start_of_phrase_mark = `MARK (input_buffer#create_mark start) in let end_of_phrase_mark = `MARK (input_buffer#create_mark stop) in push_phrase reset_info start_of_phrase_mark end_of_phrase_mark ast end | None -> ()) | _ -> ()) with _ -> ()*) in match self#send_to_coq !mycoqtop false coqphrase show_output show_msg localize with | Some safe -> sync mark_processed safe; true | None -> sync (fun _ -> self#insert_message ("Unsuccessfully tried: "^coqphrase)) (); false method process_until_iter_or_error stop = let stop' = `OFFSET stop#offset in let start = self#get_start_of_input#copy in let start' = `OFFSET start#offset in sync (fun _ -> input_buffer#apply_tag Tags.Script.to_process ~start ~stop; input_view#set_editable false) (); push_info "Coq is computing"; let get_current () = if !current.stop_before then match self#find_phrase_starting_at self#get_start_of_input with | None -> self#get_start_of_input | Some (_, stop2) -> stop2 else begin self#get_start_of_input end in let unlock () = sync (fun _ -> self#show_goals; (* Start and stop might be invalid if an eol was added at eof *) let start = input_buffer#get_iter start' in let stop = input_buffer#get_iter stop' in input_buffer#remove_tag Tags.Script.to_process ~start ~stop; input_view#set_editable true) () in (* All the [process_one_phrase] below should be done with the same [ct] instead of accessing multiple time [mycoqtop]. Otherwise a restart of coqtop could go unnoticed, and the new coqtop could receive strange things. *) let ct = !mycoqtop in (try while stop#compare (get_current()) >= 0 do self#process_one_phrase ct false false false done with | Unsuccessful -> () | RestartCoqtop -> unlock (); raise RestartCoqtop); unlock (); pop_info() method process_until_end_or_error = self#process_until_iter_or_error input_buffer#end_iter method reset_initial = mycoqtop := Coq.respawn_coqtop !mycoqtop; sync (fun () -> Stack.iter (function inf -> let start = input_buffer#get_iter_at_mark inf.start in let stop = input_buffer#get_iter_at_mark inf.stop in input_buffer#move_mark ~where:start (`NAME "start_of_input"); input_buffer#remove_tag Tags.Script.processed ~start ~stop; input_buffer#remove_tag Tags.Script.unjustified ~start ~stop; input_buffer#delete_mark inf.start; input_buffer#delete_mark inf.stop; ) cmd_stack; Stack.clear cmd_stack; self#clear_message) () method force_reset_initial = (* Do nothing if a force_reset_initial is already ongoing *) if Mutex.try_lock resetting then begin Coq.kill_coqtop !mycoqtop; (* If a computation is ongoing, an exception will trigger the reset_initial in do_if_not_computing, not here. *) if Mutex.try_lock coq_computing then begin self#reset_initial; Mutex.unlock coq_computing end; Mutex.unlock resetting end (* Internal method for dialoging with coqtop about a backtrack. The ide's cmd_stack has already been cleared up to the desired point. The [finish] function is used to handle minor differences between [go_to_insert] and [undo_last_step] *) method private do_backtrack finish n = (* pop n more commands if coqtop has said so (e.g. for undoing a proof) *) let rec n_pop n = if n = 0 then () else let phrase = Stack.pop cmd_stack in let stop = input_buffer#get_iter_at_mark phrase.stop in if stop#backward_char#has_tag Tags.Script.comment then n_pop n else n_pop (pred n) in match Coq.rewind !mycoqtop n with | Interface.Good n -> n_pop n; sync (fun _ -> let start = if Stack.is_empty cmd_stack then input_buffer#start_iter else input_buffer#get_iter_at_mark (Stack.top cmd_stack).stop in let stop = self#get_start_of_input in input_buffer#remove_tag Tags.Script.processed ~start ~stop; input_buffer#remove_tag Tags.Script.unjustified ~start ~stop; input_buffer#move_mark ~where:start (`NAME "start_of_input"); self#show_goals; self#clear_message; finish start) () | Interface.Fail (l,str) -> sync self#set_message ("Error while backtracking :\n" ^ str ^ "\n" ^ "CoqIDE and coqtop may be out of sync, you may want to use Restart.") (* backtrack Coq to the phrase preceding iterator [i] *) method backtrack_to_no_lock i = prerr_endline "Backtracking_to iter starts now."; full_goal_done <- false; (* pop Coq commands until we reach iterator [i] *) let rec n_step n = if Stack.is_empty cmd_stack then n else let phrase = Stack.top cmd_stack in let stop = input_buffer#get_iter_at_mark phrase.stop in if i#compare stop >= 0 then n else begin ignore (Stack.pop cmd_stack); if stop#backward_char#has_tag Tags.Script.comment then n_step n else n_step (succ n) end in begin try self#do_backtrack (fun _ -> ()) (n_step 0); (* We may have backtracked too much: let's replay *) self#process_until_iter_or_error i with _ -> push_info ("WARNING: undo failed badly.\n" ^ "Coq might be in an inconsistent state.\n" ^ "Please restart and report."); end method backtrack_to i = if Mutex.try_lock coq_may_stop then (push_info "Undoing..."; self#backtrack_to_no_lock i; Mutex.unlock coq_may_stop; pop_info ()) else prerr_endline "backtrack_to : discarded (lock is busy)" method go_to_insert = let point = self#get_insert in if point#compare self#get_start_of_input>=0 then self#process_until_iter_or_error point else self#backtrack_to point method undo_last_step = full_goal_done <- false; if Mutex.try_lock coq_may_stop then (push_info "Undoing last step..."; (try let phrase = Stack.pop cmd_stack in let stop = input_buffer#get_iter_at_mark phrase.stop in let count = if stop#backward_char#has_tag Tags.Script.comment then 0 else 1 in let finish where = input_buffer#place_cursor ~where; self#recenter_insert; in self#do_backtrack finish count with Stack.Empty -> () ); pop_info (); Mutex.unlock coq_may_stop) else prerr_endline "undo_last_step discarded" method insert_command cp ip = async(fun _ -> self#clear_message)(); ignore (self#insert_this_phrase_on_success true false false cp ip) method tactic_wizard l = async(fun _ -> self#clear_message)(); ignore (List.exists (fun p -> self#insert_this_phrase_on_success true false false ("progress "^p^".") (p^".")) l) method active_keypress_handler k = let state = GdkEvent.Key.state k in begin match state with | l -> if GdkEvent.Key.keyval k = GdkKeysyms._Tab then begin prerr_endline "active_kp_handler for Tab"; self#indent_current_line; true end else false end val mutable act_id = None method activate () = if not is_active then begin is_active <- true; act_id <- Some (input_view#event#connect#key_press ~callback:self#active_keypress_handler); prerr_endline "CONNECTED active : "; print_id (match act_id with Some x -> x | None -> assert false); match filename with | None -> () | Some f -> let dir = Filename.dirname f in let ct = !mycoqtop in match Coq.inloadpath ct dir with | Interface.Fail (_,str) -> self#set_message ("Could not determine lodpath, this might lead to problems:\n"^str) | Interface.Good true -> () | Interface.Good false -> let cmd = Printf.sprintf "Add LoadPath \"%s\". " dir in match Coq.interp ct cmd with | Interface.Fail (l,str) -> self#set_message ("Couln't add loadpath:\n"^str) | Interface.Good _ -> () end method private electric_paren tag = let oparen_code = Glib.Utf8.to_unichar "(" ~pos:(ref 0) in let cparen_code = Glib.Utf8.to_unichar ")" ~pos:(ref 0) in ignore (input_buffer#connect#insert_text ~callback: (fun it x -> input_buffer#remove_tag ~start:input_buffer#start_iter ~stop:input_buffer#end_iter tag; if x = "" then () else match x.[String.length x - 1] with | ')' -> let hit = self#get_insert in let count = ref 0 in if hit#nocopy#backward_find_char (fun c -> if c = oparen_code && !count = 0 then true else if c = cparen_code then (incr count;false) else if c = oparen_code then (decr count;false) else false ) then begin prerr_endline "Found matching parenthesis"; input_buffer#apply_tag tag ~start:hit ~stop:hit#forward_char end else () | _ -> ()) ) method help_for_keyword () = browse_keyword (self#insert_message) (get_current_word ()) (** NB: Events during text edition: - [begin_user_action] - [insert_text] (or [delete_range] when deleting) - [changed] - [end_user_action] When pasting a text containing tags (e.g. the sentence terminators), there is actually many [insert_text] and [changed]. For instance, for "a. b.": - [begin_user_action] - [insert_text] (for "a") - [changed] - [insert_text] (for ".") - [changed] - [apply_tag] (for the tag of ".") - [insert_text] (for " b") - [changed] - [insert_text] (for ".") - [changed] - [apply_tag] (for the tag of ".") - [end_user_action] Since these copy-pasted tags may interact badly with the retag mechanism, we now don't monitor the "changed" event, but rather the "begin_user_action" and "end_user_action". We begin by setting a mark at the initial cursor point. At the end, the zone between the mark and the cursor is to be untagged and then retagged. *) initializer ignore (message_buffer#connect#insert_text ~callback:(fun it s -> ignore (message_view#scroll_to_mark ~use_align:false ~within_margin:0.49 `INSERT))); ignore (input_buffer#connect#insert_text ~callback:(fun it s -> if (it#compare self#get_start_of_input)<0 then GtkSignal.stop_emit (); if String.length s > 1 then (prerr_endline "insert_text: Placing cursor";input_buffer#place_cursor ~where:it))); ignore (input_buffer#connect#after#apply_tag ~callback:(fun tag ~start ~stop -> if (start#compare self#get_start_of_input)>=0 then begin input_buffer#remove_tag Tags.Script.processed ~start ~stop; input_buffer#remove_tag Tags.Script.unjustified ~start ~stop end ) ); ignore (input_buffer#connect#after#insert_text ~callback:(fun it s -> if auto_complete_on && String.length s = 1 && s <> " " && s <> "\n" then let v = session_notebook#current_term.analyzed_view in let has_completed = v#complete_at_offset ((input_view#buffer#get_iter `SEL_BOUND)#offset) in if has_completed then input_buffer#move_mark `SEL_BOUND ~where:(input_buffer#get_iter `SEL_BOUND)#forward_char; ) ); ignore (input_buffer#connect#begin_user_action ~callback:(fun () -> let where = self#get_insert in input_buffer#move_mark (`NAME "prev_insert") ~where; let start = self#get_start_of_input in let stop = input_buffer#end_iter in input_buffer#remove_tag Tags.Script.error ~start ~stop) ); ignore (input_buffer#connect#end_user_action ~callback:(fun () -> last_modification_time <- Unix.time (); tag_on_insert input_buffer ) ); ignore (input_buffer#add_selection_clipboard cb); ignore (proof_buffer#add_selection_clipboard cb); ignore (message_buffer#add_selection_clipboard cb); self#electric_paren Tags.Script.paren; ignore (input_buffer#connect#after#mark_set ~callback:(fun it (m:Gtk.text_mark) -> !set_location (Printf.sprintf "Line: %5d Char: %3d" (self#get_insert#line + 1) (self#get_insert#line_offset + 1)); match GtkText.Mark.get_name m with | Some "insert" -> input_buffer#remove_tag ~start:input_buffer#start_iter ~stop:input_buffer#end_iter Tags.Script.paren; | Some s -> prerr_endline (s^" moved") | None -> () ) ); ignore (input_buffer#connect#insert_text ~callback:(fun it s -> prerr_endline "Should recenter ?"; if String.contains s '\n' then begin prerr_endline "Should recenter : yes"; self#recenter_insert end)); end let last_make = ref "";; let last_make_index = ref 0;; let search_compile_error_regexp = Str.regexp "File \"\\([^\"]+\\)\", line \\([0-9]+\\), characters \\([0-9]+\\)-\\([0-9]+\\)";; let search_next_error () = let _ = Str.search_forward search_compile_error_regexp !last_make !last_make_index in let f = Str.matched_group 1 !last_make and l = int_of_string (Str.matched_group 2 !last_make) and b = int_of_string (Str.matched_group 3 !last_make) and e = int_of_string (Str.matched_group 4 !last_make) and msg_index = Str.match_beginning () in last_make_index := Str.group_end 4; (f,l,b,e, String.sub !last_make msg_index (String.length !last_make - msg_index)) (**********************************************************************) (* session creation and primitive handling *) (**********************************************************************) let create_session file = let script = Undo.undoable_view ~buffer:(GText.buffer ~tag_table:Tags.Script.table ()) ~wrap_mode:`NONE () in let proof = GText.view ~buffer:(GText.buffer ~tag_table:Tags.Proof.table ()) ~editable:false ~wrap_mode:`CHAR () in let message = GText.view ~buffer:(GText.buffer ~tag_table:Tags.Message.table ()) ~editable:false ~wrap_mode:`WORD () in let basename = GMisc.label ~text:(match file with |None -> "*scratch*" |Some f -> (Glib.Convert.filename_to_utf8 (Filename.basename f)) ) () in let stack = Stack.create () in let coqtop_args = match file with |None -> !sup_args |Some the_file -> match !current.read_project with |Ignore_args -> !sup_args |Append_args -> (Project_file.args_from_project the_file !custom_project_files !current.project_file_name) @(!sup_args) |Subst_args -> Project_file.args_from_project the_file !custom_project_files !current.project_file_name in let ct = ref (Coq.spawn_coqtop coqtop_args) in let command = new Command_windows.command_window ct current in let legacy_av = new analyzed_view script proof message stack ct file in let () = legacy_av#update_stats in let _ = script#buffer#create_mark ~name:"start_of_input" script#buffer#start_iter in let _ = script#buffer#create_mark ~name:"prev_insert" script#buffer#start_iter in let _ = proof#buffer#create_mark ~name:"end_of_conclusion" proof#buffer#start_iter in let _ = GtkBase.Widget.add_events proof#as_widget [`ENTER_NOTIFY;`POINTER_MOTION] in let () = List.iter (fun (opts,_,_,_,dflt) -> setopts !ct opts dflt) print_items in let _ = legacy_av#activate () in let _ = proof#event#connect#motion_notify ~callback: (fun e -> let win = match proof#get_window `WIDGET with | None -> assert false | Some w -> w in let x,y = Gdk.Window.get_pointer_location win in let b_x,b_y = proof#window_to_buffer_coords ~tag:`WIDGET ~x ~y in let it = proof#get_iter_at_location ~x:b_x ~y:b_y in let tags = it#tags in List.iter (fun t -> ignore (GtkText.Tag.event t#as_tag proof#as_widget e it#as_iter)) tags; false) in script#misc#set_name "ScriptWindow"; script#buffer#place_cursor ~where:(script#buffer#start_iter); proof#misc#set_can_focus true; message#misc#set_can_focus true; (* setting fonts *) script#misc#modify_font !current.text_font; proof#misc#modify_font !current.text_font; message#misc#modify_font !current.text_font; (* setting colors *) script#misc#modify_base [`NORMAL, `NAME !current.background_color]; proof#misc#modify_base [`NORMAL, `NAME !current.background_color]; message#misc#modify_base [`NORMAL, `NAME !current.background_color]; { tab_label=basename; filename=begin match file with None -> "" |Some f -> f end; script=script; proof_view=proof; message_view=message; analyzed_view=legacy_av; encoding=""; toplvl=ct; command=command } (* XXX - to be used later let load_session session filename encs = session.encoding <- List.find (IdeIO.load filename session.script#buffer) encs; session.tab_label#set_text (Glib.Convert.filename_to_utf8 (Filename.basename filename)); session.filename <- filename; session.script#buffer#set_modified false let save_session session filename encs = session.encoding <- List.find (IdeIO.save session.script#buffer filename) encs; session.tab_label#set_text (Glib.Convert.filename_to_utf8 (Filename.basename filename)); session.filename <- filename; session.script#buffer#set_modified false let init_session session = session.script#buffer#set_modified false; session.script#clear_undo; session.script#buffer#place_cursor session.script#buffer#start_iter *) (*********************************************************************) (* functions called by the user interface *) (*********************************************************************) (* XXX - to be used later let do_open session filename = try load_session session filename ["UTF-8";"ISO-8859-1";"ISO-8859-15"]; init_session session; ignore (session_notebook#append_term session) with _ -> () let do_save session = try if session.script#buffer#modified then save_session session session.filename [session.encoding] with _ -> () let choose_open = let last_filename = ref "" in fun session -> let open_dialog = GWindow.file_chooser_dialog ~action:`OPEN ~title:"Open file" ~modal:true () in let enc_frame = GBin.frame ~label:"File encoding" ~packing:(open_dialog#vbox#pack ~fill:false) () in let enc_entry = GEdit.entry ~text:(String.concat " " ["UTF-8";"ISO-8859-1";"ISO-8859-15"]) ~packing:enc_frame#add () in let error_dialog = GWindow.message_dialog ~message_type:`ERROR ~modal:true ~buttons:GWindow.Buttons.ok ~message:"Invalid encoding, please indicate the encoding to use." () in let open_response = function | `OPEN -> begin match open_dialog#filename with | Some fn -> begin try load_session session fn (Util.split_string_at ' ' enc_entry#text); session.analyzed_view <- Some (new analyzed_view session); init_session session; session_notebook#goto_page (session_notebook#append_term session); last_filename := fn with | Not_found -> open_dialog#misc#hide (); error_dialog#show () | _ -> error_dialog#set_markup "Unknown error while loading file, aborting."; open_dialog#destroy (); error_dialog#destroy () end | None -> () end | `DELETE_EVENT -> open_dialog#destroy (); error_dialog#destroy () in let _ = open_dialog#connect#response open_response in let _ = error_dialog#connect#response (fun x -> error_dialog#misc#hide (); open_dialog#show ()) in let filter_any = GFile.filter ~name:"Any" ~patterns:["*"] () in let filter_coq = GFile.filter ~name:"Coq source" ~patterns:["*.v"] () in open_dialog#add_select_button_stock `OPEN `OPEN; open_dialog#add_button_stock `CANCEL `DELETE_EVENT; open_dialog#add_filter filter_any; open_dialog#add_filter filter_coq; ignore(open_dialog#set_filename !last_filename); open_dialog#show () let choose_save session = let save_dialog = GWindow.file_chooser_dialog ~action:`SAVE ~title:"Save file" ~modal:true () in let enc_frame = GBin.frame ~label:"File encoding" ~packing:(save_dialog#vbox#pack ~fill:false) () in let enc_entry = GEdit.entry ~text:(String.concat " " [session.encoding;"UTF-8";"ISO-8859-1";"ISO-8859-15"]) ~packing:enc_frame#add () in let error_dialog = GWindow.message_dialog ~message_type:`ERROR ~modal:true ~buttons:GWindow.Buttons.ok ~message:"Invalid encoding, please indicate the encoding to use." () in let save_response = function | `SAVE -> begin match save_dialog#filename with | Some fn -> begin try save_session session fn (Util.split_string_at ' ' enc_entry#text) with | Not_found -> save_dialog#misc#hide (); error_dialog#show () | _ -> error_dialog#set_markup "Unknown error while saving file, aborting."; save_dialog#destroy (); error_dialog#destroy () end | None -> () end | `DELETE_EVENT -> save_dialog#destroy (); error_dialog#destroy () in let _ = save_dialog#connect#response save_response in let _ = error_dialog#connect#response (fun x -> error_dialog#misc#hide (); save_dialog#show ()) in let filter_any = GFile.filter ~name:"Any" ~patterns:["*"] () in let filter_coq = GFile.filter ~name:"Coq source" ~patterns:["*.v"] () in save_dialog#add_select_button_stock `SAVE `SAVE; save_dialog#add_button_stock `CANCEL `DELETE_EVENT; save_dialog#add_filter filter_any; save_dialog#add_filter filter_coq; ignore(save_dialog#set_filename session.filename); save_dialog#show () *) (* Nota: using && here has the advantage of working both under win32 and unix. If someday we want the main command to be tried even if the "cd" has failed, then we should use " ; " under unix but " & " under win32 (cf. #2363). *) let local_cd file = "cd " ^ Filename.quote (Filename.dirname file) ^ " && " let do_print session = let av = session.analyzed_view in match av#filename with |None -> flash_info "Cannot print: this buffer has no name" |Some f_name -> begin let cmd = local_cd f_name ^ !current.cmd_coqdoc ^ " -ps " ^ Filename.quote (Filename.basename f_name) ^ " | " ^ !current.cmd_print in let print_window = GWindow.window ~title:"Print" ~modal:true ~position:`CENTER ~wm_class:"CoqIDE" ~wm_name: "CoqIDE" () in let vbox_print = GPack.vbox ~spacing:10 ~border_width:10 ~packing:print_window#add () in let _ = GMisc.label ~justify:`LEFT ~text:"Print using the following command:" ~packing:vbox_print#add () in let print_entry = GEdit.entry ~text:cmd ~editable:true ~width_chars:80 ~packing:vbox_print#add () in let hbox_print = GPack.hbox ~spacing:10 ~packing:vbox_print#add () in let print_cancel_button = GButton.button ~stock:`CANCEL ~label:"Cancel" ~packing:hbox_print#add () in let print_button = GButton.button ~stock:`PRINT ~label:"Print" ~packing:hbox_print#add () in let callback_print () = let cmd = print_entry#text in let s,_ = run_command av#insert_message cmd in flash_info (cmd ^ if s = Unix.WEXITED 0 then " succeeded" else " failed"); print_window#destroy () in ignore (print_cancel_button#connect#clicked ~callback:print_window#destroy) ; ignore (print_button#connect#clicked ~callback:callback_print); print_window#misc#show () end let load_file handler f = let f = absolute_filename f in try prerr_endline "Loading file starts"; let is_f = Minilib.same_file f in if not (Minilib.list_fold_left_i (fun i found x -> if found then found else let {analyzed_view=av} = x in (match av#filename with | None -> false | Some fn -> if is_f fn then (session_notebook#goto_page i; true) else false)) 0 false session_notebook#pages) then begin prerr_endline "Loading: must open"; let b = Buffer.create 1024 in prerr_endline "Loading: get raw content"; with_file handler f ~f:(input_channel b); prerr_endline "Loading: convert content"; let s = do_convert (Buffer.contents b) in prerr_endline "Loading: create view"; let session = create_session (Some f) in prerr_endline "Loading: adding view"; let index = session_notebook#append_term session in let av = session.analyzed_view in prerr_endline "Loading: stats"; av#update_stats; let input_buffer = session.script#buffer in prerr_endline "Loading: fill buffer"; input_buffer#set_text s; input_buffer#place_cursor ~where:input_buffer#start_iter; force_retag input_buffer; prerr_endline ("Loading: switch to view "^ string_of_int index); session_notebook#goto_page index; prerr_endline "Loading: highlight"; input_buffer#set_modified false; prerr_endline "Loading: clear undo"; session.script#clear_undo; prerr_endline "Loading: success" end with | e -> handler ("Load failed: "^(Printexc.to_string e)) let do_load = load_file flash_info let saveall_f () = List.iter (function | {script = view ; analyzed_view = av} -> begin match av#filename with | None -> () | Some f -> ignore (av#save f) end ) session_notebook#pages let forbid_quit_to_save () = begin try save_pref() with e -> flash_info "Cannot save preferences" end; (if List.exists (function | {script=view} -> view#buffer#modified ) session_notebook#pages then match (GToolbox.question_box ~title:"Quit" ~buttons:["Save Named Buffers and Quit"; "Quit without Saving"; "Don't Quit"] ~default:0 ~icon: (let img = GMisc.image () in img#set_stock `DIALOG_WARNING; img#set_icon_size `DIALOG; img#coerce) "There are unsaved buffers" ) with 1 -> saveall_f () ; false | 2 -> false | _ -> true else false)|| (let wait_window = GWindow.window ~modal:true ~wm_class:"CoqIde" ~wm_name:"CoqIde" ~kind:`POPUP ~title:"Terminating coqtops" () in let _ = GMisc.label ~text:"Terminating coqtops processes, please wait ..." ~packing:wait_window#add () in let warning_window = GWindow.message_dialog ~message_type:`WARNING ~buttons:GWindow.Buttons.yes_no ~message: ("Some coqtops processes are still running.\n" ^ "If you quit CoqIDE right now, you may have to kill them manually.\n" ^ "Do you want to wait for those processes to terminate ?") () in let () = List.iter (fun _ -> session_notebook#remove_page 0) session_notebook#pages in let nb_try=ref (0) in let () = wait_window#show () in let () = while (Coq.coqtop_zombies () <> 0)&&(!nb_try <= 50) do incr nb_try; Thread.delay 0.1 ; done in if (!nb_try = 50) then begin wait_window#misc#hide (); match warning_window#run () with | `YES -> warning_window#misc#hide (); true | `NO | `DELETE_EVENT -> false end else false) let logfile = ref None let main files = (* Main window *) let w = GWindow.window ~wm_class:"CoqIde" ~wm_name:"CoqIde" ~allow_grow:true ~allow_shrink:true ~width:!current.window_width ~height:!current.window_height ~title:"CoqIde" () in (try let icon_image = Filename.concat (List.find (fun x -> Sys.file_exists (Filename.concat x "coq.png")) Minilib.xdg_data_dirs) "coq.png" in let icon = GdkPixbuf.from_file icon_image in w#set_icon (Some icon) with _ -> ()); let vbox = GPack.vbox ~homogeneous:false ~packing:w#add () in let new_f _ = let session = create_session None in let index = session_notebook#append_term session in session_notebook#goto_page index in let load_f _ = match select_file_for_open ~title:"Load file" () with | None -> () | Some f -> do_load f in let save_f _ = let current = session_notebook#current_term in try (match current.analyzed_view#filename with | None -> begin match select_file_for_save ~title:"Save file" () with | None -> () | Some f -> if current.analyzed_view#save_as f then begin current.tab_label#set_text (Filename.basename f); flash_info ("File " ^ f ^ " saved") end else warning ("Save Failed (check if " ^ f ^ " is writable)") end | Some f -> if current.analyzed_view#save f then flash_info ("File " ^ f ^ " saved") else warning ("Save Failed (check if " ^ f ^ " is writable)") ) with | e -> warning "Save: unexpected error" in let saveas_f _ = let current = session_notebook#current_term in try (match current.analyzed_view#filename with | None -> begin match select_file_for_save ~title:"Save file as" () with | None -> () | Some f -> if current.analyzed_view#save_as f then begin current.tab_label#set_text (Filename.basename f); flash_info "Saved" end else flash_info "Save Failed" end | Some f -> begin match select_file_for_save ~dir:(ref (Filename.dirname f)) ~filename:(Filename.basename f) ~title:"Save file as" () with | None -> () | Some f -> if current.analyzed_view#save_as f then begin current.tab_label#set_text (Filename.basename f); flash_info "Saved" end else flash_info "Save Failed" end); with e -> flash_info "Save Failed" in let revert_f {analyzed_view = av} = (try match av#filename,av#stats with | Some f,Some stats -> let new_stats = Unix.stat f in if new_stats.Unix.st_mtime > stats.Unix.st_mtime then av#revert | Some _, None -> av#revert | _ -> () with _ -> av#revert) in let export_f kind _ = let v = session_notebook#current_term in let av = v.analyzed_view in match av#filename with | None -> flash_info "Cannot print: this buffer has no name" | Some f -> let basef = Filename.basename f in let output = let basef_we = try Filename.chop_extension basef with _ -> basef in match kind with | "latex" -> basef_we ^ ".tex" | "dvi" | "ps" | "pdf" | "html" -> basef_we ^ "." ^ kind | _ -> assert false in let cmd = local_cd f ^ !current.cmd_coqdoc ^ " --" ^ kind ^ " -o " ^ (Filename.quote output) ^ " " ^ (Filename.quote basef) in let s,_ = run_command av#insert_message cmd in flash_info (cmd ^ if s = Unix.WEXITED 0 then " succeeded" else " failed") in let quit_f _ = if not (forbid_quit_to_save ()) then exit 0 in let get_active_view_for_cp () = let has_sel (i0,i1) = i0#compare i1 <> 0 in let current = session_notebook#current_term in if has_sel current.script#buffer#selection_bounds then current.script#as_view else if has_sel current.proof_view#buffer#selection_bounds then current.proof_view#as_view else current.message_view#as_view in (* let toggle_auto_complete_i = edit_f#add_check_item "_Auto Completion" ~active:!current.auto_complete ~callback: in *) (* auto_complete := (fun b -> match session_notebook#current_term.analyzed_view with | Some av -> av#set_auto_complete b | None -> ()); *) (* begin of find/replace mechanism *) let last_found = ref None in let search_backward = ref false in let find_w = GWindow.window (* ~wm_class:"CoqIde" ~wm_name:"CoqIde" *) (* ~allow_grow:true ~allow_shrink:true *) (* ~width:!current.window_width ~height:!current.window_height *) ~position:`CENTER ~title:"CoqIde search/replace" () in let find_box = GPack.table ~columns:3 ~rows:5 ~col_spacings:10 ~row_spacings:10 ~border_width:10 ~homogeneous:false ~packing:find_w#add () in let _ = GMisc.label ~text:"Find:" ~xalign:1.0 ~packing:(find_box#attach ~left:0 ~top:0 ~fill:`X) () in let find_entry = GEdit.entry ~editable: true ~packing: (find_box#attach ~left:1 ~top:0 ~expand:`X) () in let _ = GMisc.label ~text:"Replace with:" ~xalign:1.0 ~packing:(find_box#attach ~left:0 ~top:1 ~fill:`X) () in let replace_entry = GEdit.entry ~editable: true ~packing: (find_box#attach ~left:1 ~top:1 ~expand:`X) () in (* let _ = GButton.check_button ~label:"case sensitive" ~active:true ~packing: (find_box#attach ~left:1 ~top:2) () in *) let find_backwards_check = GButton.check_button ~label:"search backwards" ~active:!search_backward ~packing: (find_box#attach ~left:1 ~top:3) () in let close_find_button = GButton.button ~label:"Close" ~packing: (find_box#attach ~left:2 ~top:2) () in let replace_find_button = GButton.button ~label:"Replace and find" ~packing: (find_box#attach ~left:2 ~top:1) () in let find_again_button = GButton.button ~label:"_Find again" ~packing: (find_box#attach ~left:2 ~top:0) () in let last_find () = let v = session_notebook#current_term.script in let b = v#buffer in let start,stop = match !last_found with | None -> let i = b#get_iter_at_mark `INSERT in (i,i) | Some(start,stop) -> let start = b#get_iter_at_mark start and stop = b#get_iter_at_mark stop in b#remove_tag Tags.Script.found ~start ~stop; last_found:=None; start,stop in (v,b,start,stop) in let do_replace () = let v = session_notebook#current_term.script in let b = v#buffer in match !last_found with | None -> () | Some(start,stop) -> let start = b#get_iter_at_mark start and stop = b#get_iter_at_mark stop in b#delete ~start ~stop; b#insert ~iter:start replace_entry#text; last_found:=None in let find_from (v : Undo.undoable_view) (b : GText.buffer) (starti : GText.iter) text = prerr_endline ("Searching for " ^ text); match (if !search_backward then starti#backward_search text else starti#forward_search text) with | None -> () | Some(start,stop) -> b#apply_tag Tags.Script.found ~start ~stop; let start = `MARK (b#create_mark start) and stop = `MARK (b#create_mark stop) in v#scroll_to_mark ~use_align:false ~yalign:0.75 ~within_margin:0.25 stop; last_found := Some(start,stop) in let do_find () = let (v,b,starti,_) = last_find () in find_from v b starti find_entry#text in let do_replace_find () = do_replace(); do_find() in let close_find () = let (v,b,_,stop) = last_find () in b#place_cursor ~where:stop; find_w#misc#hide(); v#coerce#misc#grab_focus() in to_do_on_page_switch := (fun i -> if find_w#misc#visible then close_find()):: !to_do_on_page_switch; let find_again () = let (v,b,start,_) = last_find () in let start = if !search_backward then start#backward_chars 1 else start#forward_chars 1 in find_from v b start find_entry#text in let click_on_backward () = search_backward := not !search_backward in let key_find ev = let s = GdkEvent.Key.state ev and k = GdkEvent.Key.keyval ev in if k = GdkKeysyms._Escape then begin let (v,b,_,stop) = last_find () in find_w#misc#hide(); v#coerce#misc#grab_focus(); true end else if k = GdkKeysyms._Escape then begin close_find(); true end else if k = GdkKeysyms._Return || List.mem `CONTROL s && k = GdkKeysyms._f then begin find_again (); true end else if List.mem `CONTROL s && k = GdkKeysyms._b then begin find_backwards_check#set_active (not !search_backward); true end else false (* to let default callback execute *) in let find_f ~backward () = let save_dir = !search_backward in search_backward := backward; find_w#show (); find_w#present (); find_entry#misc#grab_focus (); search_backward := save_dir in let _ = find_again_button#connect#clicked find_again in let _ = close_find_button#connect#clicked close_find in let _ = replace_find_button#connect#clicked do_replace_find in let _ = find_backwards_check#connect#clicked click_on_backward in let _ = find_entry#connect#changed do_find in let _ = find_entry#event#connect#key_press ~callback:key_find in let _ = find_w#event#connect#delete ~callback:(fun _ -> find_w#misc#hide(); true) in (* let search_if = edit_f#add_item "Search _forward" ~key:GdkKeysyms._greater in let search_ib = edit_f#add_item "Search _backward" ~key:GdkKeysyms._less in *) (* let complete_i = edit_f#add_item "_Complete" ~key:GdkKeysyms._comma ~callback: (do_if_not_computing (fun b -> let v = session_notebook#current_term.analyzed_view in v#complete_at_offset ((v#view#buffer#get_iter `SEL_BOUND)#offset) )) in complete_i#misc#set_state `INSENSITIVE; *) (* end of find/replace mechanism *) (* begin Preferences *) let reset_revert_timer () = disconnect_revert_timer (); if !current.global_auto_revert then revert_timer := Some (GMain.Timeout.add ~ms:!current.global_auto_revert_delay ~callback: (fun () -> do_if_not_computing "revert" (sync revert_f) session_notebook#pages; true)) in reset_revert_timer (); (* to enable statup preferences timer *) (* XXX *) let auto_save_f {analyzed_view = av} = (try av#auto_save with _ -> ()) in let reset_auto_save_timer () = disconnect_auto_save_timer (); if !current.auto_save then auto_save_timer := Some (GMain.Timeout.add ~ms:!current.auto_save_delay ~callback: (fun () -> do_if_not_computing "autosave" (sync auto_save_f) session_notebook#pages; true)) in reset_auto_save_timer (); (* to enable statup preferences timer *) (* end Preferences *) let do_or_activate f () = do_if_not_computing "do_or_activate" (fun current -> let av = current.analyzed_view in ignore (f av); pop_info (); let msg = match Coq.status !(current.toplvl) with | Interface.Fail (l, str) -> "Oops, problem while fetching coq status." | Interface.Good status -> let path = match status.Interface.status_path with | [] | _ :: [] -> "" (* Drop the topmost level, usually "Top" *) | _ :: l -> " in " ^ String.concat "." l in let name = match status.Interface.status_proofname with | None -> "" | Some n -> ", proving " ^ n in "Ready" ^ path ^ name in push_info msg ) [session_notebook#current_term] in let do_if_active f _ = do_if_not_computing "do_if_active" (fun sess -> ignore (f sess.analyzed_view)) [session_notebook#current_term] in let match_callback _ = let w = get_current_word () in let cur_ct = !(session_notebook#current_term.toplvl) in try match Coq.mkcases cur_ct w with | Interface.Fail _ -> raise Not_found | Interface.Good cases -> let print_branch c l = Format.fprintf c " | @[%a@]=> _@\n" (print_list (fun c s -> Format.fprintf c "%s@ " s)) l in let b = Buffer.create 1024 in let fmt = Format.formatter_of_buffer b in Format.fprintf fmt "@[match var with@\n%aend@]@." (print_list print_branch) cases; let s = Buffer.contents b in prerr_endline s; let {script = view } = session_notebook#current_term in ignore (view#buffer#delete_selection ()); let m = view#buffer#create_mark (view#buffer#get_iter `INSERT) in if view#buffer#insert_interactive s then let i = view#buffer#get_iter (`MARK m) in let _ = i#nocopy#forward_chars 9 in view#buffer#place_cursor ~where:i; view#buffer#move_mark ~where:(i#backward_chars 3) `SEL_BOUND with Not_found -> flash_info "Not an inductive type" in (* External command callback *) let compile_f _ = let v = session_notebook#current_term in let av = v.analyzed_view in save_f (); match av#filename with | None -> flash_info "Active buffer has no name" | Some f -> let cmd = !current.cmd_coqc ^ " -I " ^ (Filename.quote (Filename.dirname f)) ^ " " ^ (Filename.quote f) in let s,res = run_command av#insert_message cmd in if s = Unix.WEXITED 0 then flash_info (f ^ " successfully compiled") else begin flash_info (f ^ " failed to compile"); av#process_until_end_or_error; av#insert_message "Compilation output:\n"; av#insert_message res end in let make_f _ = let v = session_notebook#current_term in let av = v.analyzed_view in match av#filename with | None -> flash_info "Cannot make: this buffer has no name" | Some f -> let cmd = local_cd f ^ !current.cmd_make in (* save_f (); *) av#insert_message "Command output:\n"; let s,res = run_command av#insert_message cmd in last_make := res; last_make_index := 0; flash_info (!current.cmd_make ^ if s = Unix.WEXITED 0 then " succeeded" else " failed") in let next_error _ = try let file,line,start,stop,error_msg = search_next_error () in do_load file; let v = session_notebook#current_term in let av = v.analyzed_view in let input_buffer = v.script#buffer in (* let init = input_buffer#start_iter in let i = init#forward_lines (line-1) in *) (* let convert_pos = byte_offset_to_char_offset phrase in let start = convert_pos start in let stop = convert_pos stop in *) (* let starti = i#forward_chars start in let stopi = i#forward_chars stop in *) let starti = input_buffer#get_iter_at_byte ~line:(line-1) start in let stopi = input_buffer#get_iter_at_byte ~line:(line-1) stop in input_buffer#apply_tag Tags.Script.error ~start:starti ~stop:stopi; input_buffer#place_cursor ~where:starti; av#set_message error_msg; v.script#misc#grab_focus () with Not_found -> last_make_index := 0; let v = session_notebook#current_term in let av = v.analyzed_view in av#set_message "No more errors.\n" in let coq_makefile_f _ = let v = session_notebook#current_term in let av = v.analyzed_view in match av#filename with | None -> flash_info "Cannot make makefile: this buffer has no name" | Some f -> let cmd = local_cd f ^ !current.cmd_coqmakefile in let s,res = run_command av#insert_message cmd in flash_info (!current.cmd_coqmakefile ^ if s = Unix.WEXITED 0 then " succeeded" else " failed") in let file_actions = GAction.action_group ~name:"File" () in let edit_actions = GAction.action_group ~name:"Edit" () in let view_actions = GAction.action_group ~name:"View" () in let export_actions = GAction.action_group ~name:"Export" () in let navigation_actions = GAction.action_group ~name:"Navigation" () in let tactics_actions = GAction.action_group ~name:"Tactics" () in let templates_actions = GAction.action_group ~name:"Templates" () in let queries_actions = GAction.action_group ~name:"Queries" () in let compile_actions = GAction.action_group ~name:"Compile" () in let windows_actions = GAction.action_group ~name:"Windows" () in let help_actions = GAction.action_group ~name:"Help" () in let add_gen_actions menu_name act_grp l = let no_under = Minilib.string_map (fun x -> if x = '_' then '-' else x) in let add_simple_template menu_name act_grp text = let text' = let l = String.length text - 1 in if String.get text l = '.' then text ^"\n" else text ^" " in GAction.add_action (menu_name^" "^(no_under text)) ~label:text ~callback:(fun _ -> let {script = view } = session_notebook#current_term in ignore (view#buffer#insert_interactive text')) act_grp in List.iter (function | [] -> () | [s] -> add_simple_template menu_name act_grp s | s::_ as ll -> let label = "_@..." in label.[1] <- s.[0]; GAction.add_action (menu_name^" "^(String.make 1 s.[0])) ~label act_grp; List.iter (add_simple_template menu_name act_grp) ll ) l in let tactic_shortcut s sc = GAction.add_action s ~label:("_"^s) ~accel:(!current.modifier_for_tactics^sc) ~callback:(do_if_active (fun a -> a#insert_command ("progress "^s^".") (s^"."))) in let query_callback command _ = let word = get_current_word () in if not (word = "") then let term = session_notebook#current_term in let query = command ^ " " ^ word ^ "." in term.message_view#buffer#set_text ""; term.analyzed_view#raw_coq_query query in let query_shortcut s accel = GAction.add_action s ~label:("_"^s) ?accel ~callback:(query_callback s) in let add_complex_template (name, label, text, offset, len, key) = (* Templates/Lemma *) let callback _ = let {script = view } = session_notebook#current_term in if view#buffer#insert_interactive text then begin let iter = view#buffer#get_iter_at_mark `INSERT in ignore (iter#nocopy#backward_chars offset); view#buffer#move_mark `INSERT ~where:iter; ignore (iter#nocopy#backward_chars len); view#buffer#move_mark `SEL_BOUND ~where:iter; end in match key with |Some ac -> GAction.add_action name ~label ~callback ~accel:(!current.modifier_for_templates^ac) |None -> GAction.add_action name ~label ~callback ?accel:None in let detach_view _ = (* Open a separate window containing the current buffer *) let trm = session_notebook#current_term in let w = GWindow.window ~show:true ~width:(!current.window_width*2/3) ~height:(!current.window_height*2/3) ~position:`CENTER ~title:(if trm.filename = "" then "*scratch*" else trm.filename) () in let sb = GBin.scrolled_window ~packing:w#add () in let nv = GText.view ~buffer:trm.script#buffer ~packing:sb#add () in nv#misc#modify_font !current.text_font; (* If the buffer in the main window is closed, destroy this detached view *) ignore (trm.script#connect#destroy ~callback:w#destroy) in GAction.add_actions file_actions [ GAction.add_action "File" ~label:"_File"; GAction.add_action "New" ~callback:new_f ~stock:`NEW; GAction.add_action "Open" ~callback:load_f ~stock:`OPEN; GAction.add_action "Save" ~callback:save_f ~stock:`SAVE ~tooltip:"Save current buffer"; GAction.add_action "Save as" ~label:"S_ave as" ~callback:saveas_f ~stock:`SAVE_AS; GAction.add_action "Save all" ~label:"Sa_ve all" ~callback:(fun _ -> saveall_f ()); GAction.add_action "Revert all buffers" ~label:"_Revert all buffers" ~callback:(fun _ -> List.iter revert_f session_notebook#pages) ~stock:`REVERT_TO_SAVED; GAction.add_action "Close buffer" ~label:"_Close buffer" ~callback:(fun _ -> remove_current_view_page ()) ~stock:`CLOSE ~tooltip:"Close current buffer"; GAction.add_action "Print..." ~label:"_Print..." ~callback:(fun _ -> do_print session_notebook#current_term) ~stock:`PRINT ~accel:"p"; GAction.add_action "Rehighlight" ~label:"Reh_ighlight" ~accel:"l" ~callback:(fun _ -> force_retag session_notebook#current_term.script#buffer; session_notebook#current_term.analyzed_view#recenter_insert) ~stock:`REFRESH; GAction.add_action "Quit" ~callback:quit_f ~stock:`QUIT; ]; GAction.add_actions export_actions [ GAction.add_action "Export to" ~label:"E_xport to"; GAction.add_action "Html" ~label:"_Html" ~callback:(export_f "html"); GAction.add_action "Latex" ~label:"_LaTeX" ~callback:(export_f "latex"); GAction.add_action "Dvi" ~label:"_Dvi" ~callback:(export_f "dvi"); GAction.add_action "Pdf" ~label:"_Pdf" ~callback:(export_f "pdf"); GAction.add_action "Ps" ~label:"_Ps" ~callback:(export_f "ps"); ]; GAction.add_actions edit_actions [ GAction.add_action "Edit" ~label:"_Edit"; GAction.add_action "Undo" ~accel:"u" ~callback:(fun _ -> do_if_not_computing "undo" (fun sess -> ignore (sess.analyzed_view#without_auto_complete (fun () -> session_notebook#current_term.script#undo) ())) [session_notebook#current_term]) ~stock:`UNDO; GAction.add_action "Clear Undo Stack" ~label:"_Clear Undo Stack" ~callback:(fun _ -> ignore session_notebook#current_term.script#clear_undo); GAction.add_action "Cut" ~callback:(fun _ -> GtkSignal.emit_unit (get_active_view_for_cp ()) ~sgn:GtkText.View.S.cut_clipboard ) ~stock:`CUT; GAction.add_action "Copy" ~callback:(fun _ -> GtkSignal.emit_unit (get_active_view_for_cp ()) ~sgn:GtkText.View.S.copy_clipboard) ~stock:`COPY; GAction.add_action "Paste" ~callback:(fun _ -> try GtkSignal.emit_unit session_notebook#current_term.script#as_view ~sgn:GtkText.View.S.paste_clipboard with _ -> prerr_endline "EMIT PASTE FAILED") ~stock:`PASTE; GAction.add_action "Find in buffer" ~label:"_Find in buffer" ~callback:(fun _ -> find_f ~backward:false ()) ~stock:`FIND; GAction.add_action "Find backwards" ~label:"Find _backwards" ~callback:(fun _ -> find_f ~backward:true ()) ~accel:"b"; GAction.add_action "Complete Word" ~label:"Complete Word" ~callback:(fun _ -> ignore ( let av = session_notebook#current_term.analyzed_view in av#complete_at_offset (av#get_insert)#offset )) ~accel:"slash"; GAction.add_action "External editor" ~label:"External editor" ~callback:(fun _ -> let av = session_notebook#current_term.analyzed_view in match av#filename with | None -> warning "Call to external editor available only on named files" | Some f -> save_f (); let com = Minilib.subst_command_placeholder !current.cmd_editor (Filename.quote f) in let _ = run_command av#insert_message com in av#revert) ~stock:`EDIT; GAction.add_action "Preferences" ~callback:(fun _ -> begin try configure ~apply:update_notebook_pos () with _ -> flash_info "Cannot save preferences" end; reset_revert_timer ()) ~accel:"comma" ~stock:`PREFERENCES; (* GAction.add_action "Save preferences" ~label:"_Save preferences" ~callback:(fun _ -> save_pref ()); *) ]; GAction.add_actions view_actions [ GAction.add_action "View" ~label:"_View"; GAction.add_action "Previous tab" ~label:"_Previous tab" ~accel:("Left") ~stock:`GO_BACK ~callback:(fun _ -> session_notebook#previous_page ()); GAction.add_action "Next tab" ~label:"_Next tab" ~accel:("Right") ~stock:`GO_FORWARD ~callback:(fun _ -> session_notebook#next_page ()); GAction.add_toggle_action "Show Toolbar" ~label:"Show _Toolbar" ~active:(!current.show_toolbar) ~callback: (fun _ -> !current.show_toolbar <- not !current.show_toolbar; !refresh_toolbar_hook ()); GAction.add_toggle_action "Show Query Pane" ~label:"Show _Query Pane" ~callback:(fun _ -> let ccw = session_notebook#current_term.command in if ccw#frame#misc#visible then ccw#frame#misc#hide () else ccw#frame#misc#show ()) ~accel:"Escape"; ]; List.iter (fun (opts,name,label,key,dflt) -> GAction.add_toggle_action name ~active:dflt ~label ~accel:(!current.modifier_for_display^key) ~callback:(fun v -> do_or_activate (fun a -> let () = setopts !(session_notebook#current_term.toplvl) opts v#get_active in a#show_goals) ()) view_actions) print_items; GAction.add_actions navigation_actions [ GAction.add_action "Navigation" ~label:"_Navigation"; GAction.add_action "Forward" ~label:"_Forward" ~stock:`GO_DOWN ~callback:(fun _ -> do_or_activate (fun a -> a#process_next_phrase true) ()) ~tooltip:"Forward one command" ~accel:(!current.modifier_for_navigation^"Down"); GAction.add_action "Backward" ~label:"_Backward" ~stock:`GO_UP ~callback:(fun _ -> do_or_activate (fun a -> a#undo_last_step) ()) ~tooltip:"Backward one command" ~accel:(!current.modifier_for_navigation^"Up"); GAction.add_action "Go to" ~label:"_Go to" ~stock:`JUMP_TO ~callback:(fun _ -> do_or_activate (fun a -> a#go_to_insert) ()) ~tooltip:"Go to cursor" ~accel:(!current.modifier_for_navigation^"Right"); GAction.add_action "Start" ~label:"_Start" ~stock:`GOTO_TOP ~callback:(fun _ -> force_reset_initial ()) ~tooltip:"Restart coq" ~accel:(!current.modifier_for_navigation^"Home"); GAction.add_action "End" ~label:"_End" ~stock:`GOTO_BOTTOM ~callback:(fun _ -> do_or_activate (fun a -> a#process_until_end_or_error) ()) ~tooltip:"Go to end" ~accel:(!current.modifier_for_navigation^"End"); GAction.add_action "Interrupt" ~label:"_Interrupt" ~stock:`STOP ~callback:(fun _ -> break ()) ~tooltip:"Interrupt computations" ~accel:(!current.modifier_for_navigation^"Break"); GAction.add_action "Hide" ~label:"_Hide" ~stock:`MISSING_IMAGE ~callback:(fun _ -> let sess = session_notebook#current_term in toggle_proof_visibility sess.script#buffer sess.analyzed_view#get_insert) ~tooltip:"Hide proof" ~accel:(!current.modifier_for_navigation^"h"); GAction.add_action "Previous" ~label:"_Previous" ~stock:`GO_BACK ~callback:(fun _ -> do_or_activate (fun a -> a#go_to_prev_occ_of_cur_word) ()) ~tooltip:"Previous occurence" ~accel:(!current.modifier_for_navigation^"less"); GAction.add_action "Next" ~label:"_Next" ~stock:`GO_FORWARD ~callback:(fun _ -> do_or_activate (fun a -> a#go_to_next_occ_of_cur_word) ()) ~tooltip:"Next occurence" ~accel:(!current.modifier_for_navigation^"greater"); ]; GAction.add_actions tactics_actions [ GAction.add_action "Try Tactics" ~label:"_Try Tactics"; GAction.add_action "Wizard" ~tooltip:"Proof Wizard" ~label:"" ~stock:`DIALOG_INFO ~callback:(do_if_active (fun a -> a#tactic_wizard !current.automatic_tactics)) ~accel:(!current.modifier_for_tactics^"dollar"); tactic_shortcut "auto" "a"; tactic_shortcut "auto with *" "asterisk"; tactic_shortcut "eauto" "e"; tactic_shortcut "eauto with *" "ampersand"; tactic_shortcut "intuition" "i"; tactic_shortcut "omega" "o"; tactic_shortcut "simpl" "s"; tactic_shortcut "tauto" "p"; tactic_shortcut "trivial" "v"; ]; add_gen_actions "Tactic" tactics_actions Coq_commands.tactics; GAction.add_actions templates_actions [ GAction.add_action "Templates" ~label:"Te_mplates"; add_complex_template ("Lemma", "_Lemma __", "Lemma new_lemma : .\nIdeproof.\n\nSave.\n", 19, 9, Some "L"); add_complex_template ("Theorem", "_Theorem __", "Theorem new_theorem : .\nIdeproof.\n\nSave.\n", 19, 11, Some "T"); add_complex_template ("Definition", "_Definition __", "Definition ident := .\n", 6, 5, Some "D"); add_complex_template ("Inductive", "_Inductive __", "Inductive ident : :=\n | : .\n", 14, 5, Some "I"); add_complex_template ("Fixpoint", "_Fixpoint __", "Fixpoint ident (_ : _) {struct _} : _ :=\n.\n", 29, 5, Some "F"); add_complex_template ("Scheme", "_Scheme __", "Scheme new_scheme := Induction for _ Sort _\ \nwith _ := Induction for _ Sort _.\n",61,10, Some "S"); GAction.add_action "match" ~label:"match ..." ~callback:match_callback ~accel:(!current.modifier_for_templates^"C"); ]; add_gen_actions "Template" templates_actions Coq_commands.commands; GAction.add_actions queries_actions [ GAction.add_action "Queries" ~label:"_Queries"; query_shortcut "SearchAbout" (Some "F2"); query_shortcut "Check" (Some "F3"); query_shortcut "Print" (Some "F4"); query_shortcut "About" (Some "F5"); query_shortcut "Locate" None; query_shortcut "Whelp Locate" None; ]; GAction.add_actions compile_actions [ GAction.add_action "Compile" ~label:"_Compile"; GAction.add_action "Compile buffer" ~label:"_Compile buffer" ~callback:compile_f; GAction.add_action "Make" ~label:"_Make" ~callback:make_f ~accel:"F6"; GAction.add_action "Next error" ~label:"_Next error" ~callback:next_error ~accel:"F7"; GAction.add_action "Make makefile" ~label:"Make makefile" ~callback:coq_makefile_f; ]; GAction.add_actions windows_actions [ GAction.add_action "Windows" ~label:"_Windows"; GAction.add_action "Detach View" ~label:"Detach _View" ~callback:detach_view ]; GAction.add_actions help_actions [ GAction.add_action "Help" ~label:"_Help"; GAction.add_action "Browse Coq Manual" ~label:"Browse Coq _Manual" ~callback:(fun _ -> let av = session_notebook#current_term.analyzed_view in browse av#insert_message (doc_url ())); GAction.add_action "Browse Coq Library" ~label:"Browse Coq _Library" ~callback:(fun _ -> let av = session_notebook#current_term.analyzed_view in browse av#insert_message !current.library_url); GAction.add_action "Help for keyword" ~label:"Help for _keyword" ~callback:(fun _ -> let av = session_notebook#current_term.analyzed_view in av#help_for_keyword ()) ~stock:`HELP; GAction.add_action "About Coq" ~label:"_About" ~stock:`ABOUT; ]; Coqide_ui.init (); Coqide_ui.ui_m#insert_action_group file_actions 0; Coqide_ui.ui_m#insert_action_group export_actions 0; Coqide_ui.ui_m#insert_action_group edit_actions 0; Coqide_ui.ui_m#insert_action_group view_actions 0; Coqide_ui.ui_m#insert_action_group navigation_actions 0; Coqide_ui.ui_m#insert_action_group tactics_actions 0; Coqide_ui.ui_m#insert_action_group templates_actions 0; Coqide_ui.ui_m#insert_action_group queries_actions 0; Coqide_ui.ui_m#insert_action_group compile_actions 0; Coqide_ui.ui_m#insert_action_group windows_actions 0; Coqide_ui.ui_m#insert_action_group help_actions 0; w#add_accel_group Coqide_ui.ui_m#get_accel_group ; GtkMain.Rc.parse_string "gtk-can-change-accels = 1"; if Coq_config.gtk_platform <> `QUARTZ then vbox#pack (Coqide_ui.ui_m#get_widget "/CoqIde MenuBar"); let tbar = GtkButton.Toolbar.cast ((Coqide_ui.ui_m#get_widget "/CoqIde ToolBar")#as_widget) in let () = GtkButton.Toolbar.set ~orientation:`HORIZONTAL ~style:`ICONS ~tooltips:true tbar in let toolbar = new GObj.widget tbar in vbox#pack toolbar; ignore (w#event#connect#delete ~callback:(fun _ -> quit_f (); true)); (* The vertical Separator between Scripts and Goals *) vbox#pack ~expand:true session_notebook#coerce; update_notebook_pos (); let nb = session_notebook in let lower_hbox = GPack.hbox ~homogeneous:false ~packing:vbox#pack () in lower_hbox#pack ~expand:true status#coerce; let search_lbl = GMisc.label ~text:"Search:" ~show:false ~packing:(lower_hbox#pack ~expand:false) () in let search_history = ref [] in let (search_input,_) = GEdit.combo_box_entry_text ~strings:!search_history ~show:false ~packing:(lower_hbox#pack ~expand:false) () in let ready_to_wrap_search = ref false in let start_of_search = ref None in let start_of_found = ref None in let end_of_found = ref None in let search_forward = ref true in let matched_word = ref None in let memo_search () = matched_word := Some search_input#entry#text in let end_search () = prerr_endline "End Search"; memo_search (); let v = session_notebook#current_term.script in v#buffer#move_mark `SEL_BOUND ~where:(v#buffer#get_iter_at_mark `INSERT); v#coerce#misc#grab_focus (); search_input#entry#set_text ""; search_lbl#misc#hide (); search_input#misc#hide () in let end_search_focus_out () = prerr_endline "End Search(focus out)"; memo_search (); let v = session_notebook#current_term.script in v#buffer#move_mark `SEL_BOUND ~where:(v#buffer#get_iter_at_mark `INSERT); search_input#entry#set_text ""; search_lbl#misc#hide (); search_input#misc#hide () in ignore (search_input#entry#connect#activate ~callback:end_search); ignore (search_input#entry#event#connect#key_press ~callback:(fun k -> let kv = GdkEvent.Key.keyval k in if kv = GdkKeysyms._Right || kv = GdkKeysyms._Up || kv = GdkKeysyms._Left || (kv = GdkKeysyms._g && (List.mem `CONTROL (GdkEvent.Key.state k))) then end_search (); false)); ignore (search_input#entry#event#connect#focus_out ~callback:(fun _ -> end_search_focus_out (); false)); to_do_on_page_switch := (fun i -> start_of_search := None; ready_to_wrap_search:=false)::!to_do_on_page_switch; (* TODO : make it work !!! *) let rec search_f () = search_lbl#misc#show (); search_input#misc#show (); prerr_endline "search_f called"; if !start_of_search = None then begin (* A full new search is starting *) start_of_search := Some (session_notebook#current_term.script#buffer#create_mark (session_notebook#current_term.script#buffer#get_iter_at_mark `INSERT)); start_of_found := !start_of_search; end_of_found := !start_of_search; matched_word := Some ""; end; let txt = search_input#entry#text in let v = session_notebook#current_term.script in let iit = v#buffer#get_iter_at_mark `SEL_BOUND and insert_iter = v#buffer#get_iter_at_mark `INSERT in prerr_endline ("SELBOUND="^(string_of_int iit#offset)); prerr_endline ("INSERT="^(string_of_int insert_iter#offset)); (match if !search_forward then iit#forward_search txt else let npi = iit#forward_chars (Glib.Utf8.length txt) in match (npi#offset = (v#buffer#get_iter_at_mark `INSERT)#offset), (let t = iit#get_text ~stop:npi in flash_info (t^"\n"^txt); t = txt) with | true,true -> (flash_info "T,T";iit#backward_search txt) | false,true -> flash_info "F,T";Some (iit,npi) | _,false -> (iit#backward_search txt) with | None -> if !ready_to_wrap_search then begin ready_to_wrap_search := false; flash_info "Search wrapped"; v#buffer#place_cursor ~where:(if !search_forward then v#buffer#start_iter else v#buffer#end_iter); search_f () end else begin if !search_forward then flash_info "Search at end" else flash_info "Search at start"; ready_to_wrap_search := true end | Some (start,stop) -> prerr_endline "search: before moving marks"; prerr_endline ("SELBOUND="^(string_of_int (v#buffer#get_iter_at_mark `SEL_BOUND)#offset)); prerr_endline ("INSERT="^(string_of_int (v#buffer#get_iter_at_mark `INSERT)#offset)); v#buffer#move_mark `SEL_BOUND ~where:start; v#buffer#move_mark `INSERT ~where:stop; prerr_endline "search: after moving marks"; prerr_endline ("SELBOUND="^(string_of_int (v#buffer#get_iter_at_mark `SEL_BOUND)#offset)); prerr_endline ("INSERT="^(string_of_int (v#buffer#get_iter_at_mark `INSERT)#offset)); v#scroll_to_mark `SEL_BOUND ) in ignore (search_input#entry#event#connect#key_release ~callback: (fun ev -> if GdkEvent.Key.keyval ev = GdkKeysyms._Escape then begin let v = session_notebook#current_term.script in (match !start_of_search with | None -> prerr_endline "search_key_rel: Placing sel_bound"; v#buffer#move_mark `SEL_BOUND ~where:(v#buffer#get_iter_at_mark `INSERT) | Some mk -> let it = v#buffer#get_iter_at_mark (`MARK mk) in prerr_endline "search_key_rel: Placing cursor"; v#buffer#place_cursor ~where:it; start_of_search := None ); search_input#entry#set_text ""; v#coerce#misc#grab_focus (); end; false )); ignore (search_input#entry#connect#changed ~callback:search_f); push_info "Ready"; (* Location display *) let l = GMisc.label ~text:"Line: 1 Char: 1" ~packing:lower_hbox#pack () in l#coerce#misc#set_name "location"; set_location := l#set_text; (* Progress Bar *) lower_hbox#pack pbar#coerce; pbar#set_text "CoqIde started"; (* Initializing hooks *) refresh_toolbar_hook := (fun () -> if !current.show_toolbar then toolbar#misc#show () else toolbar#misc#hide ()); refresh_font_hook := (fun () -> let fd = !current.text_font in let iter_page p = p.script#misc#modify_font fd; p.proof_view#misc#modify_font fd; p.message_view#misc#modify_font fd; p.command#refresh_font () in List.iter iter_page session_notebook#pages; ); refresh_background_color_hook := (fun () -> let clr = Tags.color_of_string !current.background_color in let iter_page p = p.script#misc#modify_base [`NORMAL, `COLOR clr]; p.proof_view#misc#modify_base [`NORMAL, `COLOR clr]; p.message_view#misc#modify_base [`NORMAL, `COLOR clr]; p.command#refresh_color () in List.iter iter_page session_notebook#pages; ); resize_window_hook := (fun () -> w#resize ~width:!current.window_width ~height:!current.window_height); refresh_tabs_hook := update_notebook_pos; let about_full_string = "\nCoq is developed by the Coq Development Team\ \n(INRIA - CNRS - LIX - LRI - PPS)\ \nWeb site: " ^ Coq_config.wwwcoq ^ "\nFeature wish or bug report: http://coq.inria.fr/bugs/\ \n\ \nCredits for CoqIDE, the Integrated Development Environment for Coq:\ \n\ \nMain author : Benjamin Monate\ \nContributors : Jean-Christophe Filliâtre\ \n Pierre Letouzey, Claude Marché\ \n Bruno Barras, Pierre Corbineau\ \n Julien Narboux, Hugo Herbelin, ... \ \n\ \nVersion information\ \n-------------------\ \n" in let display_log_file (b:GText.buffer) = if !debug then let file = match !logfile with None -> "stderr" | Some f -> f in b#insert ("Debug mode is on, log file is "^file) in let initial_about (b:GText.buffer) = let initial_string = "Welcome to CoqIDE, an Integrated Development Environment for Coq\n" in let coq_version = Coq.short_version () in display_log_file b; if Glib.Utf8.validate ("You are running " ^ coq_version) then b#insert ~iter:b#start_iter ("You are running " ^ coq_version); if Glib.Utf8.validate initial_string then b#insert ~iter:b#start_iter initial_string; (try let image = Filename.concat (List.find (fun x -> Sys.file_exists (Filename.concat x "coq.png")) Minilib.xdg_data_dirs) "coq.png" in let startup_image = GdkPixbuf.from_file image in b#insert ~iter:b#start_iter "\n\n"; b#insert_pixbuf ~iter:b#start_iter ~pixbuf:startup_image; b#insert ~iter:b#start_iter "\n\n\t\t " with _ -> ()) in let about (b:GText.buffer) = (try let image = Filename.concat (List.find (fun x -> Sys.file_exists (Filename.concat x "coq.png")) Minilib.xdg_data_dirs) "coq.png" in let startup_image = GdkPixbuf.from_file image in b#insert ~iter:b#start_iter "\n\n"; b#insert_pixbuf ~iter:b#start_iter ~pixbuf:startup_image; b#insert ~iter:b#start_iter "\n\n\t\t " with _ -> ()); if Glib.Utf8.validate about_full_string then b#insert about_full_string; let coq_version = Coq.version () in if Glib.Utf8.validate coq_version then b#insert coq_version; display_log_file b; in (* Remove default pango menu for textviews *) w#show (); ignore ((help_actions#get_action "About Coq")#connect#activate ~callback:(fun _ -> let prf_v = session_notebook#current_term.proof_view in prf_v#buffer#set_text ""; about prf_v#buffer)); (* *) (* Begin Color configuration *) Tags.set_processing_color (Tags.color_of_string !current.processing_color); Tags.set_processed_color (Tags.color_of_string !current.processed_color); (* End of color configuration *) ignore(nb#connect#switch_page ~callback: (fun i -> prerr_endline ("switch_page: starts " ^ string_of_int i); List.iter (function f -> f i) !to_do_on_page_switch; prerr_endline "switch_page: success") ); if List.length files >=1 then begin List.iter (fun f -> if Sys.file_exists f then do_load f else let f = if Filename.check_suffix f ".v" then f else f^".v" in load_file (fun s -> print_endline s; exit 1) f) files; session_notebook#goto_page 0; end else begin let session = create_session None in let index = session_notebook#append_term session in session_notebook#goto_page index; end; initial_about session_notebook#current_term.proof_view#buffer; !refresh_toolbar_hook (); session_notebook#current_term.script#misc#grab_focus ();; (* This function check every half of second if GeoProof has send something on his private clipboard *) let rec check_for_geoproof_input () = let cb_Dr = GData.clipboard (Gdk.Atom.intern "_GeoProof") in while true do Thread.delay 0.1; let s = cb_Dr#text in (match s with Some s -> if s <> "Ack" then session_notebook#current_term.script#buffer#insert (s^"\n"); cb_Dr#set_text "Ack" | None -> () ); (* cb_Dr#clear does not work so i use : *) (* cb_Dr#set_text "Ack" *) done (** By default, the coqtop we try to launch is exactly the current coqide full name, with the last occurrence of "coqide" replaced by "coqtop". This should correctly handle the ".opt", ".byte", ".exe" situations. If the replacement fails, we default to "coqtop", hoping it's somewhere in the path. Note that the -coqtop option to coqide allows to override this default coqtop path *) let read_coqide_args argv = let rec filter_coqtop coqtop project_files out = function | "-coqtop" :: prog :: args -> if coqtop = None then filter_coqtop (Some prog) project_files out args else (output_string stderr "Error: multiple -coqtop options"; exit 1) | "-f" :: file :: args -> filter_coqtop coqtop ((Minilib.canonical_path_name (Filename.dirname file), Project_file.read_project_file file) :: project_files) out args | "-f" :: [] -> output_string stderr "Error: missing project file name"; exit 1 | "-coqtop" :: [] -> output_string stderr "Error: missing argument after -coqtop"; exit 1 | "-debug"::args -> Ideutils.debug := true; filter_coqtop coqtop project_files ("-debug"::out) args | arg::args -> filter_coqtop coqtop project_files (arg::out) args | [] -> (coqtop,List.rev project_files,List.rev out) in let coqtop,project_files,argv = filter_coqtop None [] [] argv in Ideutils.custom_coqtop := coqtop; custom_project_files := project_files; argv coq-8.4pl2/ide/undo.ml0000640000175000001440000001234712010532755013676 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Delete (s,i,l) | Delete (s,i,l) -> Insert (s,i,l) class undoable_view (tv:[>Gtk.text_view] Gtk.obj) = let undo_lock = ref true in object(self) inherit GText.view tv as super val history = (Stack.create () : action Stack.t) val redo = (Queue.create () : action Queue.t) val nredo = (Stack.create () : action Stack.t) method private dump_debug = if false (* !debug *) then begin prerr_endline "==========Stack top============="; Stack.iter (fun e -> match e with | Insert(s,p,l) -> Printf.eprintf "Insert of '%s' at %d (length %d)\n" s p l | Delete(s,p,l) -> Printf.eprintf "Delete '%s' from %d (length %d)\n" s p l) history; Printf.eprintf "Stack size %d\n" (Stack.length history); prerr_endline "==========Stack Bottom=========="; prerr_endline "==========Queue start============="; Queue.iter (fun e -> match e with | Insert(s,p,l) -> Printf.eprintf "Insert of '%s' at %d (length %d)\n" s p l | Delete(s,p,l) -> Printf.eprintf "Delete '%s' from %d (length %d)\n" s p l) redo; Printf.eprintf "Stack size %d\n" (Queue.length redo); prerr_endline "==========Queue End==========" end method clear_undo = Stack.clear history; Stack.clear nredo; Queue.clear redo method undo = if !undo_lock then begin undo_lock := false; prerr_endline "UNDO"; try begin let r = match Stack.pop history with | Insert(s,p,l) as act -> let start = self#buffer#get_iter_at_char p in (self#buffer#delete_interactive ~start ~stop:(start#forward_chars l) ()) or (Stack.push act history; false) | Delete(s,p,l) as act -> let iter = self#buffer#get_iter_at_char p in (self#buffer#insert_interactive ~iter s) or (Stack.push act history; false) in if r then begin let act = Stack.pop history in Queue.push act redo; Stack.push act nredo end; undo_lock := true; r end with Stack.Empty -> undo_lock := true; false end else (prerr_endline "UNDO DISCARDED"; true) method redo = prerr_endline "REDO"; true initializer (* INCORRECT: is called even while undoing... ignore (self#buffer#connect#mark_set ~callback: (fun it tm -> if !undo_lock && not (Queue.is_empty redo) then begin Stack.iter (fun e -> Stack.push (neg e) history) nredo; Stack.clear nredo; Queue.iter (fun e -> Stack.push e history) redo; Queue.clear redo; end) ); *) ignore (self#buffer#connect#insert_text ~callback: (fun it s -> if !undo_lock && not (Queue.is_empty redo) then begin Stack.iter (fun e -> Stack.push (neg e) history) nredo; Stack.clear nredo; Queue.iter (fun e -> Stack.push e history) redo; Queue.clear redo; end; (* let pos = it#offset in if Stack.is_empty history or s=" " or s="\t" or s="\n" or (match Stack.top history with | Insert(old,opos,olen) -> opos + olen <> pos | _ -> true) then *) Stack.push (Insert(s,it#offset,Glib.Utf8.length s)) history (*else begin match Stack.pop history with | Insert(olds,offset,len) -> Stack.push (Insert(olds^s, offset, len+(Glib.Utf8.length s))) history | _ -> assert false end*); self#dump_debug )); ignore (self#buffer#connect#delete_range ~callback: (fun ~start ~stop -> if !undo_lock && not (Queue.is_empty redo) then begin Queue.iter (fun e -> Stack.push e history) redo; Queue.clear redo; end; let start_offset = start#offset in let stop_offset = stop#offset in let s = self#buffer#get_text ~start ~stop () in (* if Stack.is_empty history or (match Stack.top history with | Delete(old,opos,olen) -> olen=1 or opos <> start_offset | _ -> true ) then *) Stack.push (Delete(s, start_offset, stop_offset - start_offset )) history (* else begin match Stack.pop history with | Delete(olds,offset,len) -> Stack.push (Delete(olds^s, offset, len+(Glib.Utf8.length s))) history | _ -> assert false end*); self#dump_debug )) end let undoable_view ?(buffer:GText.buffer option) = GtkText.View.make_params [] ~cont:(GContainer.pack_container ~create: (fun pl -> let w = match buffer with | None -> GtkText.View.create [] | Some b -> GtkText.View.create_with_buffer b#as_buffer in Gobject.set_params w pl; ((new undoable_view w):undoable_view))) coq-8.4pl2/ide/coq2.ico0000750000175000001440000001114610667265771013756 0ustar notinusers(Fhn   (  p ( @ʦ """)))UUUMMMBBB999|PP3f3333f333ff3fffff3f3f̙f3333f3333333333f3333333f3f33ff3f3f3f3333f3333333f3̙33333f333ff3ffffff3f33f3ff3f3f3ffff3fffffffff3fffffff3f̙ffff3ff333f3ff33fff33f3ff̙3f3f3333f333ff3fffff̙̙3̙f̙̙̙3f̙3f3f3333f333ff3fffff3f3f̙3ffffffffff!___www*       ( @ wwww www www ww wxxx x x p pp? pp   p pp( @ʦ """)))UUUMMMBBB999|PP3f3333f333ff3fffff3f3f̙f3333f3333333333f3333333f3f33ff3f3f3f3333f3333333f3̙33333f333ff3ffffff3f33f3ff3f3f3ffff3fffffffff3fffffff3f̙ffff3ff333f3ff33fff33f3ff̙3f3f3333f333ff3fffff̙̙3̙f̙̙̙3f̙3f3f3333f333ff3fffff3f3f̙3ffffffffff!___www      22222222222222222222222222**                  pcoq-8.4pl2/ide/coq_lex.mll0000640000175000001440000001532212010532755014533 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Hashtbl.add h s Keyword) one_word_commands; List.iter (fun s -> Hashtbl.add h s Declaration) one_word_declarations; List.iter (fun s -> Hashtbl.add h s ProofDeclaration) proof_declarations; List.iter (fun s -> Hashtbl.add h s Qed) proof_ends; List.iter (fun s -> Hashtbl.add h' s Keyword) constr_keywords; (fun initial id -> Hashtbl.find (if initial then h else h') id) exception Unterminated let here f lexbuf = f (Lexing.lexeme_start lexbuf) (Lexing.lexeme_end lexbuf) } let space = [' ' '\n' '\r' '\t' '\012'] (* '\012' is form-feed *) let firstchar = ['$' 'A'-'Z' 'a'-'z' '_' '\192'-'\214' '\216'-'\246' '\248'-'\255'] let identchar = ['$' 'A'-'Z' 'a'-'z' '_' '\192'-'\214' '\216'-'\246' '\248'-'\255' '\'' '0'-'9'] let ident = firstchar identchar* let undotted_sep = [ '{' '}' '-' '+' '*' ] let dot_sep = '.' (space | eof) let multiword_declaration = "Module" (space+ "Type")? | "Program" space+ ident | "Existing" space+ "Instance" "s"? | "Canonical" space+ "Structure" let locality = (space+ "Local")? let multiword_command = ("Uns" | "S")" et" (space+ ident)* | (("Open" | "Close") locality | "Bind" | " Delimit" ) space+ "Scope" | (("Reserved" space+)? "Notation" | "Infix") locality space+ | "Next" space+ "Obligation" | "Solve" space+ "Obligations" | "Require" space+ ("Import"|"Export")? | "Hint" locality space+ ident | "Reset" (space+ "Initial")? | "Tactic" space+ "Notation" | "Implicit" space+ "Type" "s"? | "Combined" space+ "Scheme" | "Extraction" space+ (("Language" space+ ("Ocaml"|"Haskell"|"Scheme"|"Toplevel"))| ("Library"|"Inline"|"NoInline"|"Blacklist")) | "Recursive" space+ "Extraction" (space+ "Library")? | ("Print"|"Reset") space+ "Extraction" space+ ("Inline"|"Blacklist") | "Extract" space+ (("Inlined" space+) "Constant"| "Inductive") | "Typeclasses" space+ ("eauto" | "Transparent" | "Opaque") | ("Generalizable" space+) ("All" | "No")? "Variable" "s"? (* At least still missing: "Inline" + decl, variants of "Identity Coercion", variants of Print, Add, ... *) rule coq_string = parse | "\"\"" { coq_string lexbuf } | "\"" { Lexing.lexeme_end lexbuf } | eof { Lexing.lexeme_end lexbuf } | _ { coq_string lexbuf } and comment = parse | "(*" { ignore (comment lexbuf); comment lexbuf } | "\"" { ignore (coq_string lexbuf); comment lexbuf } | "*)" { (true, Lexing.lexeme_start lexbuf + 2) } | eof { (false, Lexing.lexeme_end lexbuf) } | _ { comment lexbuf } and sentence initial stamp = parse | "(*" { let comm_start = Lexing.lexeme_start lexbuf in let trully_terminated,comm_end = comment lexbuf in stamp comm_start comm_end Comment; if not trully_terminated then raise Unterminated; (* A comment alone is a sentence. A comment in a sentence doesn't terminate the sentence. Note: comm_end is the first position _after_ the comment, as required when tagging a zone, hence the -1 to locate the ")" terminating the comment. *) if initial then comm_end - 1 else sentence false stamp lexbuf } | "\"" { let str_start = Lexing.lexeme_start lexbuf in let str_end = coq_string lexbuf in stamp str_start str_end String; sentence false stamp lexbuf } | multiword_declaration { if initial then here stamp lexbuf Declaration; sentence false stamp lexbuf } | multiword_command { if initial then here stamp lexbuf Keyword; sentence false stamp lexbuf } | ident as id { (try here stamp lexbuf (tag_of_ident initial id) with Not_found -> ()); sentence false stamp lexbuf } | ".." { (* We must have a particular rule for parsing "..", where no dot is a terminator, even if we have a blank afterwards (cf. for instance the syntax for recursive notation). This rule and the following one also allow to treat the "..." special case, where the third dot is a terminator. *) sentence false stamp lexbuf } | dot_sep { Lexing.lexeme_start lexbuf } (* The usual "." terminator *) | undotted_sep { (* Separators like { or } and bullets * - + are only active at the start of a sentence *) if initial then Lexing.lexeme_start lexbuf else sentence false stamp lexbuf } | space+ { (* Parsing spaces is the only situation preserving initiality *) sentence initial stamp lexbuf } | _ { (* Any other characters *) sentence false stamp lexbuf } | eof { raise Unterminated } { (** Parse a sentence in string [slice], tagging relevant parts with function [stamp], and returning the position of the first sentence delimitor (either "." or "{" or "}" or the end of a comment). It will raise [Unterminated] when no end of sentence is found. *) let delimit_sentence stamp slice = sentence true stamp (Lexing.from_string slice) } coq-8.4pl2/ide/tags.mli0000640000175000001440000000264412010532755014037 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (*

      Case c of c1 ... cn end *) let n_lhs_rhs = ref [] and v_lhs = ref (None : constr option) and c_lhs = ref (None : constr option) in Array.iteri (fun i ci -> let argsi, bodyi = decompose_lam ci in let nargsi = List.length argsi in (* REL (narg3 + nargsi + 1) is f *) (* REL nargsi+1 to REL nargsi + nargs3 are arguments of f *) (* REL 1 to REL nargsi are argsi (reverse order) *) (* First we test if the RHS is the RHS for constants *) if isRel bodyi && destRel bodyi = 1 then c_lhs := Some (compute_lhs (snd (List.hd args3)) i nargsi) (* Then we test if the RHS is the RHS for variables *) else begin match decompose_app bodyi with | vmf, [_; _; a3; a4 ] when isRel a3 & isRel a4 & pf_conv_x gl vmf (Lazy.force coq_varmap_find)-> v_lhs := Some (compute_lhs (snd (List.hd args3)) i nargsi) (* Third case: this is a normal LHS-RHS *) | _ -> n_lhs_rhs := (compute_lhs (snd (List.hd args3)) i nargsi, compute_rhs bodyi (nargs3 + nargsi + 1)) :: !n_lhs_rhs end) lci; if !c_lhs = None & !v_lhs = None then i_can't_do_that (); (* The Cases predicate is a lambda; we assume no dependency *) let p = match kind_of_term p with | Lambda (_,_,p) -> Termops.pop p | _ -> p in { normal_lhs_rhs = List.rev !n_lhs_rhs; variable_lhs = !v_lhs; return_type = p; constants = List.fold_right ConstrSet.add cs ConstrSet.empty; constant_lhs = !c_lhs } | _ -> i_can't_do_that () end |_ -> i_can't_do_that () (* TODO for that function: \begin{itemize} \item handle the case where the return type is an argument of the function \item handle the case of simple mutual inductive (for example terms and lists of terms) formulas with the corresponding mutual recursvive interpretation functions. \end{itemize} *) (*s Stuff to build variables map, currently implemented as complete binary search trees (see file \texttt{Quote.v}) *) (* First the function to distinghish between constants (closed terms) and variables (open terms) *) let rec closed_under cset t = (ConstrSet.mem t cset) or (match (kind_of_term t) with | Cast(c,_,_) -> closed_under cset c | App(f,l) -> closed_under cset f && array_for_all (closed_under cset) l | _ -> false) (*s [btree_of_array [| c1; c2; c3; c4; c5 |]] builds the complete binary search tree containing the [ci], that is: \begin{verbatim} c1 / \ c2 c3 / \ c4 c5 \end{verbatim} The second argument is a constr (the common type of the [ci]) *) let btree_of_array a ty = let size_of_a = Array.length a in let semi_size_of_a = size_of_a lsr 1 in let node = Lazy.force coq_Node_vm and empty = mkApp (Lazy.force coq_Empty_vm, [| ty |]) in let rec aux n = if n > size_of_a then empty else if n > semi_size_of_a then mkApp (node, [| ty; a.(n-1); empty; empty |]) else mkApp (node, [| ty; a.(n-1); aux (2*n); aux (2*n+1) |]) in aux 1 (*s [btree_of_array] and [path_of_int] verify the following invariant:\\ {\tt (varmap\_find A dv }[(path_of_int n)] [(btree_of_array a ty)] = [a.(n)]\\ [n] must be [> 0] *) let path_of_int n = (* returns the list of digits of n in reverse order with initial 1 removed *) let rec digits_of_int n = if n=1 then [] else (n mod 2 = 1)::(digits_of_int (n lsr 1)) in List.fold_right (fun b c -> mkApp ((if b then Lazy.force coq_Right_idx else Lazy.force coq_Left_idx), [| c |])) (List.rev (digits_of_int n)) (Lazy.force coq_End_idx) (*s The tactic works with a list of subterms sharing the same variables map. We need to sort terms in order to avoid than strange things happen during replacement of terms by their 'abstract' counterparties. *) (* [subterm t t'] tests if constr [t'] occurs in [t] *) (* This function does not descend under binders (lambda and Cases) *) let rec subterm gl (t : constr) (t' : constr) = (pf_conv_x gl t t') or (match (kind_of_term t) with | App (f,args) -> array_exists (fun t -> subterm gl t t') args | Cast(t,_,_) -> (subterm gl t t') | _ -> false) (*s We want to sort the list according to reverse subterm order. *) (* Since it's a partial order the algoritm of Sort.list won't work !! *) let rec sort_subterm gl l = let rec insert c = function | [] -> [c] | (h::t as l) when eq_constr c h -> l (* Avoid doing the same work twice *) | h::t -> if subterm gl c h then c::h::t else h::(insert c t) in match l with | [] -> [] | h::t -> insert h (sort_subterm gl t) module Constrhash = Hashtbl.Make (struct type t = constr let equal = eq_constr let hash = hash_constr end) (*s Now we are able to do the inversion itself. We destructurate the term and use an imperative hashtable to store leafs that are already encountered. The type of arguments is:\\ [ivs : inversion_scheme]\\ [lc: constr list]\\ [gl: goal sigma]\\ *) let quote_terms ivs lc gl = Coqlib.check_required_library ["Coq";"quote";"Quote"]; let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) let rec aux c = let rec auxl l = match l with | (lhs, rhs)::tail -> begin try let s1 = matches rhs c in let s2 = List.map (fun (i,c_i) -> (coerce_meta_out i,aux c_i)) s1 in Termops.subst_meta s2 lhs with PatternMatchingFailure -> auxl tail end | [] -> begin match ivs.variable_lhs with | None -> begin match ivs.constant_lhs with | Some c_lhs -> Termops.subst_meta [1, c] c_lhs | None -> anomaly "invalid inversion scheme for quote" end | Some var_lhs -> begin match ivs.constant_lhs with | Some c_lhs when closed_under ivs.constants c -> Termops.subst_meta [1, c] c_lhs | _ -> begin try Constrhash.find varhash c with Not_found -> let newvar = Termops.subst_meta [1, (path_of_int !counter)] var_lhs in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end end end end in auxl ivs.normal_lhs_rhs in let lp = List.map aux lc in (lp, (btree_of_array (Array.of_list (List.rev !varlist)) ivs.return_type )) (*s actually we could "quote" a list of terms instead of a single term. Ring for example needs that, but Ring doesn't use Quote yet. *) let quote f lid gl = let f = pf_global gl f in let cl = List.map (pf_global gl) lid in let ivs = compute_ivs gl f cl in let (p, vm) = match quote_terms ivs [(pf_concl gl)] gl with | [p], vm -> (p,vm) | _ -> assert false in match ivs.variable_lhs with | None -> Tactics.convert_concl (mkApp (f, [| p |])) DEFAULTcast gl | Some _ -> Tactics.convert_concl (mkApp (f, [| vm; p |])) DEFAULTcast gl let gen_quote cont c f lid gl = let f = pf_global gl f in let cl = List.map (pf_global gl) lid in let ivs = compute_ivs gl f cl in let (p, vm) = match quote_terms ivs [c] gl with | [p], vm -> (p,vm) | _ -> assert false in match ivs.variable_lhs with | None -> cont (mkApp (f, [| p |])) gl | Some _ -> cont (mkApp (f, [| vm; p |])) gl (*i Just testing ... #use "include.ml";; open Quote;; let r = glob_constr_of_string;; let ivs = { normal_lhs_rhs = [ r "(f_and ?1 ?2)", r "?1/\?2"; r "(f_not ?1)", r "~?1"]; variable_lhs = Some (r "(f_atom ?1)"); return_type = r "Prop"; constants = ConstrSet.empty; constant_lhs = (r "nat") };; let t1 = r "True/\(True /\ ~False)";; let t2 = r "True/\~~False";; quote_term ivs () t1;; quote_term ivs () t2;; let ivs2 = normal_lhs_rhs = [ r "(f_and ?1 ?2)", r "?1/\?2"; r "(f_not ?1)", r "~?1" r "True", r "f_true"]; variable_lhs = Some (r "(f_atom ?1)"); return_type = r "Prop"; constants = ConstrSet.empty; constant_lhs = (r "nat") i*) coq-8.4pl2/plugins/quote/quote_plugin.mllib0000640000175000001440000000003711161000644020172 0ustar notinusersQuote G_quote Quote_plugin_mod coq-8.4pl2/plugins/quote/g_quote.ml40000640000175000001440000000223412010532755016527 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* k)) in let x = TacDynamic(dummy_loc, Pretyping.constr_in x) in let tac = <:tactic> in Tacinterp.interp tac TACTIC EXTEND quote [ "quote" ident(f) ] -> [ quote f [] ] | [ "quote" ident(f) "[" ne_ident_list(lc) "]"] -> [ quote f lc ] | [ "quote" ident(f) "in" constr(c) "using" tactic(k) ] -> [ gen_quote (make_cont k) c f [] ] | [ "quote" ident(f) "[" ne_ident_list(lc) "]" "in" constr(c) "using" tactic(k) ] -> [ gen_quote (make_cont k) c f lc ] END coq-8.4pl2/plugins/ring/0000750000175000001440000000000012127276541014255 5ustar notinuserscoq-8.4pl2/plugins/ring/LegacyArithRing.v0000640000175000001440000000477112010532755017463 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | S n', S m' => nateq n' m' | _, _ => false end. Lemma nateq_prop : forall n m:nat, Is_true (nateq n m) -> n = m. Proof. simple induction n; simple induction m; intros; try contradiction. trivial. unfold Is_true in H1. rewrite (H n1 H1). trivial. Qed. Hint Resolve nateq_prop: arithring. Definition NatTheory : Semi_Ring_Theory plus mult 1 0 nateq. split; intros; auto with arith arithring. (* apply (fun n m p:nat => plus_reg_l m p n) with (n := n). trivial.*) Defined. Add Legacy Semi Ring nat plus mult 1 0 nateq NatTheory [ 0 S ]. Goal forall n:nat, S n = 1 + n. intro; reflexivity. Save S_to_plus_one. (* Replace all occurrences of (S exp) by (plus (S O) exp), except when exp is already O and only for those occurrences than can be reached by going down plus and mult operations *) Ltac rewrite_S_to_plus_term t := match constr:t with | 1 => constr:1 | (S ?X1) => let t1 := rewrite_S_to_plus_term X1 in constr:(1 + t1) | (?X1 + ?X2) => let t1 := rewrite_S_to_plus_term X1 with t2 := rewrite_S_to_plus_term X2 in constr:(t1 + t2) | (?X1 * ?X2) => let t1 := rewrite_S_to_plus_term X1 with t2 := rewrite_S_to_plus_term X2 in constr:(t1 * t2) | _ => constr:t end. (* Apply S_to_plus on both sides of an equality *) Ltac rewrite_S_to_plus := match goal with | |- (?X1 = ?X2) => try let t1 := (**) (**) rewrite_S_to_plus_term X1 with t2 := rewrite_S_to_plus_term X2 in change (t1 = t2) | |- (?X1 = ?X2) => try let t1 := (**) (**) rewrite_S_to_plus_term X1 with t2 := rewrite_S_to_plus_term X2 in change (t1 = t2) end. Ltac ring_nat := rewrite_S_to_plus; ring. coq-8.4pl2/plugins/ring/g_ring.ml40000640000175000001440000000730712010532755016141 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ polynom l ] END (* The vernac commands "Add Ring" and co *) let cset_of_constrarg_list l = List.fold_right ConstrSet.add (List.map constr_of l) ConstrSet.empty VERNAC COMMAND EXTEND AddRing [ "Add" "Legacy" "Ring" constr(a) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aopp) constr(aeq) constr(t) "[" ne_constr_list(l) "]" ] -> [ add_theory true false false (constr_of a) None None None (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) (Some (constr_of aopp)) (constr_of aeq) (constr_of t) (cset_of_constrarg_list l) ] | [ "Add" "Legacy" "Semi" "Ring" constr(a) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aeq) constr(t) "[" ne_constr_list(l) "]" ] -> [ add_theory false false false (constr_of a) None None None (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) None (constr_of aeq) (constr_of t) (cset_of_constrarg_list l) ] | [ "Add" "Legacy" "Abstract" "Ring" constr(a) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aopp) constr(aeq) constr(t) ] -> [ add_theory true true false (constr_of a) None None None (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) (Some (constr_of aopp)) (constr_of aeq) (constr_of t) ConstrSet.empty ] | [ "Add" "Legacy" "Abstract" "Semi" "Ring" constr(a) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aeq) constr(t) ] -> [ add_theory false true false (constr_of a) None None None (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) None (constr_of aeq) (constr_of t) ConstrSet.empty ] | [ "Add" "Legacy" "Setoid" "Ring" constr(a) constr(aequiv) constr(asetth) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aopp) constr(aeq) constr(pm) constr(mm) constr(om) constr(t) "[" ne_constr_list(l) "]" ] -> [ add_theory true false true (constr_of a) (Some (constr_of aequiv)) (Some (constr_of asetth)) (Some { plusm = (constr_of pm); multm = (constr_of mm); oppm = Some (constr_of om) }) (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) (Some (constr_of aopp)) (constr_of aeq) (constr_of t) (cset_of_constrarg_list l) ] | [ "Add" "Legacy" "Semi" "Setoid" "Ring" constr(a) constr(aequiv) constr(asetth) constr(aplus) constr(amult) constr(aone) constr(azero) constr(aeq) constr(pm) constr(mm) constr(t) "[" ne_constr_list(l) "]" ] -> [ add_theory false false true (constr_of a) (Some (constr_of aequiv)) (Some (constr_of asetth)) (Some { plusm = (constr_of pm); multm = (constr_of mm); oppm = None }) (constr_of aplus) (constr_of amult) (constr_of aone) (constr_of azero) None (constr_of aeq) (constr_of t) (cset_of_constrarg_list l) ] END coq-8.4pl2/plugins/ring/LegacyRing_theory.v0000640000175000001440000002370612010532755020064 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. (* There is also a "weakly decidable" equality on A. That means that if (A_eq x y)=true then x=y but x=y can arise when (A_eq x y)=false. On an abstract ring the function [x,y:A]false is a good choice. The proof of A_eq_prop is in this case easy. *) Variable Aeq : A -> A -> bool. Infix "+" := Aplus (at level 50, left associativity). Infix "*" := Amult (at level 40, left associativity). Notation "0" := Azero. Notation "1" := Aone. Record Semi_Ring_Theory : Prop := {SR_plus_comm : forall n m:A, n + m = m + n; SR_plus_assoc : forall n m p:A, n + (m + p) = n + m + p; SR_mult_comm : forall n m:A, n * m = m * n; SR_mult_assoc : forall n m p:A, n * (m * p) = n * m * p; SR_plus_zero_left : forall n:A, 0 + n = n; SR_mult_one_left : forall n:A, 1 * n = n; SR_mult_zero_left : forall n:A, 0 * n = 0; SR_distr_left : forall n m p:A, (n + m) * p = n * p + m * p; (* SR_plus_reg_left : forall n m p:A, n + m = n + p -> m = p;*) SR_eq_prop : forall x y:A, Is_true (Aeq x y) -> x = y}. Variable T : Semi_Ring_Theory. Let plus_comm := SR_plus_comm T. Let plus_assoc := SR_plus_assoc T. Let mult_comm := SR_mult_comm T. Let mult_assoc := SR_mult_assoc T. Let plus_zero_left := SR_plus_zero_left T. Let mult_one_left := SR_mult_one_left T. Let mult_zero_left := SR_mult_zero_left T. Let distr_left := SR_distr_left T. (*Let plus_reg_left := SR_plus_reg_left T.*) Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left mult_one_left mult_zero_left distr_left (*plus_reg_left*). (* Lemmas whose form is x=y are also provided in form y=x because Auto does not symmetry *) Lemma SR_mult_assoc2 : forall n m p:A, n * m * p = n * (m * p). symmetry ; eauto. Qed. Lemma SR_plus_assoc2 : forall n m p:A, n + m + p = n + (m + p). symmetry ; eauto. Qed. Lemma SR_plus_zero_left2 : forall n:A, n = 0 + n. symmetry ; eauto. Qed. Lemma SR_mult_one_left2 : forall n:A, n = 1 * n. symmetry ; eauto. Qed. Lemma SR_mult_zero_left2 : forall n:A, 0 = 0 * n. symmetry ; eauto. Qed. Lemma SR_distr_left2 : forall n m p:A, n * p + m * p = (n + m) * p. symmetry ; eauto. Qed. Lemma SR_plus_permute : forall n m p:A, n + (m + p) = m + (n + p). intros. rewrite plus_assoc. elim (plus_comm m n). rewrite <- plus_assoc. reflexivity. Qed. Lemma SR_mult_permute : forall n m p:A, n * (m * p) = m * (n * p). intros. rewrite mult_assoc. elim (mult_comm m n). rewrite <- mult_assoc. reflexivity. Qed. Hint Resolve SR_plus_permute SR_mult_permute. Lemma SR_distr_right : forall n m p:A, n * (m + p) = n * m + n * p. intros. repeat rewrite (mult_comm n). eauto. Qed. Lemma SR_distr_right2 : forall n m p:A, n * m + n * p = n * (m + p). symmetry ; apply SR_distr_right. Qed. Lemma SR_mult_zero_right : forall n:A, n * 0 = 0. intro; rewrite mult_comm; eauto. Qed. Lemma SR_mult_zero_right2 : forall n:A, 0 = n * 0. intro; rewrite mult_comm; eauto. Qed. Lemma SR_plus_zero_right : forall n:A, n + 0 = n. intro; rewrite plus_comm; eauto. Qed. Lemma SR_plus_zero_right2 : forall n:A, n = n + 0. intro; rewrite plus_comm; eauto. Qed. Lemma SR_mult_one_right : forall n:A, n * 1 = n. intro; elim mult_comm; auto. Qed. Lemma SR_mult_one_right2 : forall n:A, n = n * 1. intro; elim mult_comm; auto. Qed. (* Lemma SR_plus_reg_right : forall n m p:A, m + n = p + n -> m = p. intros n m p; rewrite (plus_comm m n); rewrite (plus_comm p n); eauto. Qed. *) End Theory_of_semi_rings. Section Theory_of_rings. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Infix "+" := Aplus (at level 50, left associativity). Infix "*" := Amult (at level 40, left associativity). Notation "0" := Azero. Notation "1" := Aone. Notation "- x" := (Aopp x). Record Ring_Theory : Prop := {Th_plus_comm : forall n m:A, n + m = m + n; Th_plus_assoc : forall n m p:A, n + (m + p) = n + m + p; Th_mult_comm : forall n m:A, n * m = m * n; Th_mult_assoc : forall n m p:A, n * (m * p) = n * m * p; Th_plus_zero_left : forall n:A, 0 + n = n; Th_mult_one_left : forall n:A, 1 * n = n; Th_opp_def : forall n:A, n + - n = 0; Th_distr_left : forall n m p:A, (n + m) * p = n * p + m * p; Th_eq_prop : forall x y:A, Is_true (Aeq x y) -> x = y}. Variable T : Ring_Theory. Let plus_comm := Th_plus_comm T. Let plus_assoc := Th_plus_assoc T. Let mult_comm := Th_mult_comm T. Let mult_assoc := Th_mult_assoc T. Let plus_zero_left := Th_plus_zero_left T. Let mult_one_left := Th_mult_one_left T. Let opp_def := Th_opp_def T. Let distr_left := Th_distr_left T. Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left mult_one_left opp_def distr_left. (* Lemmas whose form is x=y are also provided in form y=x because Auto does not symmetry *) Lemma Th_mult_assoc2 : forall n m p:A, n * m * p = n * (m * p). symmetry ; eauto. Qed. Lemma Th_plus_assoc2 : forall n m p:A, n + m + p = n + (m + p). symmetry ; eauto. Qed. Lemma Th_plus_zero_left2 : forall n:A, n = 0 + n. symmetry ; eauto. Qed. Lemma Th_mult_one_left2 : forall n:A, n = 1 * n. symmetry ; eauto. Qed. Lemma Th_distr_left2 : forall n m p:A, n * p + m * p = (n + m) * p. symmetry ; eauto. Qed. Lemma Th_opp_def2 : forall n:A, 0 = n + - n. symmetry ; eauto. Qed. Lemma Th_plus_permute : forall n m p:A, n + (m + p) = m + (n + p). intros. rewrite plus_assoc. elim (plus_comm m n). rewrite <- plus_assoc. reflexivity. Qed. Lemma Th_mult_permute : forall n m p:A, n * (m * p) = m * (n * p). intros. rewrite mult_assoc. elim (mult_comm m n). rewrite <- mult_assoc. reflexivity. Qed. Hint Resolve Th_plus_permute Th_mult_permute. Lemma aux1 : forall a:A, a + a = a -> a = 0. intros. generalize (opp_def a). pattern a at 1. rewrite <- H. rewrite <- plus_assoc. rewrite opp_def. elim plus_comm. rewrite plus_zero_left. trivial. Qed. Lemma Th_mult_zero_left : forall n:A, 0 * n = 0. intros. apply aux1. rewrite <- distr_left. rewrite plus_zero_left. reflexivity. Qed. Hint Resolve Th_mult_zero_left. Lemma Th_mult_zero_left2 : forall n:A, 0 = 0 * n. symmetry ; eauto. Qed. Lemma aux2 : forall x y z:A, x + y = 0 -> x + z = 0 -> y = z. intros. rewrite <- (plus_zero_left y). elim H0. elim plus_assoc. elim (plus_comm y z). rewrite plus_assoc. rewrite H. rewrite plus_zero_left. reflexivity. Qed. Lemma Th_opp_mult_left : forall x y:A, - (x * y) = - x * y. intros. apply (aux2 (x:=(x * y))); [ apply opp_def | rewrite <- distr_left; rewrite opp_def; auto ]. Qed. Hint Resolve Th_opp_mult_left. Lemma Th_opp_mult_left2 : forall x y:A, - x * y = - (x * y). symmetry ; eauto. Qed. Lemma Th_mult_zero_right : forall n:A, n * 0 = 0. intro; elim mult_comm; eauto. Qed. Lemma Th_mult_zero_right2 : forall n:A, 0 = n * 0. intro; elim mult_comm; eauto. Qed. Lemma Th_plus_zero_right : forall n:A, n + 0 = n. intro; rewrite plus_comm; eauto. Qed. Lemma Th_plus_zero_right2 : forall n:A, n = n + 0. intro; rewrite plus_comm; eauto. Qed. Lemma Th_mult_one_right : forall n:A, n * 1 = n. intro; elim mult_comm; eauto. Qed. Lemma Th_mult_one_right2 : forall n:A, n = n * 1. intro; elim mult_comm; eauto. Qed. Lemma Th_opp_mult_right : forall x y:A, - (x * y) = x * - y. intros; do 2 rewrite (mult_comm x); auto. Qed. Lemma Th_opp_mult_right2 : forall x y:A, x * - y = - (x * y). intros; do 2 rewrite (mult_comm x); auto. Qed. Lemma Th_plus_opp_opp : forall x y:A, - x + - y = - (x + y). intros. apply (aux2 (x:=(x + y))); [ elim plus_assoc; rewrite (Th_plus_permute y (- x)); rewrite plus_assoc; rewrite opp_def; rewrite plus_zero_left; auto | auto ]. Qed. Lemma Th_plus_permute_opp : forall n m p:A, - m + (n + p) = n + (- m + p). eauto. Qed. Lemma Th_opp_opp : forall n:A, - - n = n. intro; apply (aux2 (x:=(- n))); [ auto | elim plus_comm; auto ]. Qed. Hint Resolve Th_opp_opp. Lemma Th_opp_opp2 : forall n:A, n = - - n. symmetry ; eauto. Qed. Lemma Th_mult_opp_opp : forall x y:A, - x * - y = x * y. intros; rewrite <- Th_opp_mult_left; rewrite <- Th_opp_mult_right; auto. Qed. Lemma Th_mult_opp_opp2 : forall x y:A, x * y = - x * - y. symmetry ; apply Th_mult_opp_opp. Qed. Lemma Th_opp_zero : - 0 = 0. rewrite <- (plus_zero_left (- 0)). auto. Qed. (* Lemma Th_plus_reg_left : forall n m p:A, n + m = n + p -> m = p. intros; generalize (f_equal (fun z => - n + z) H). repeat rewrite plus_assoc. rewrite (plus_comm (- n) n). rewrite opp_def. repeat rewrite Th_plus_zero_left; eauto. Qed. Lemma Th_plus_reg_right : forall n m p:A, m + n = p + n -> m = p. intros. eapply Th_plus_reg_left with n. rewrite (plus_comm n m). rewrite (plus_comm n p). auto. Qed. *) Lemma Th_distr_right : forall n m p:A, n * (m + p) = n * m + n * p. intros. repeat rewrite (mult_comm n). eauto. Qed. Lemma Th_distr_right2 : forall n m p:A, n * m + n * p = n * (m + p). symmetry ; apply Th_distr_right. Qed. End Theory_of_rings. Hint Resolve Th_mult_zero_left (*Th_plus_reg_left*): core. Unset Implicit Arguments. Definition Semi_Ring_Theory_of : forall (A:Type) (Aplus Amult:A -> A -> A) (Aone Azero:A) (Aopp:A -> A) (Aeq:A -> A -> bool), Ring_Theory Aplus Amult Aone Azero Aopp Aeq -> Semi_Ring_Theory Aplus Amult Aone Azero Aeq. intros until 1; case H. split; intros; simpl; eauto. Defined. (* Every ring can be viewed as a semi-ring : this property will be used in Abstract_polynom. *) Coercion Semi_Ring_Theory_of : Ring_Theory >-> Semi_Ring_Theory. Section product_ring. End product_ring. Section power_ring. End power_ring. coq-8.4pl2/plugins/ring/vo.itarget0000640000175000001440000000027711307752066016271 0ustar notinusersLegacyArithRing.vo LegacyNArithRing.vo LegacyRing_theory.vo LegacyRing.vo LegacyZArithRing.vo Ring_abstract.vo Ring_normalize.vo Setoid_ring_normalize.vo Setoid_ring_theory.vo Setoid_ring.vo coq-8.4pl2/plugins/ring/Ring_normalize.v0000640000175000001440000006272712010532755017433 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n = m. Proof. intros. apply index_eq_prop. generalize H. case (index_eq n m); simpl; trivial; intros. contradiction. Qed. Section semi_rings. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aeq : A -> A -> bool. (* Section definitions. *) (******************************************) (* Normal abtract Polynomials *) (******************************************) (* DEFINITIONS : - A varlist is a sorted product of one or more variables : x, x*y*z - A monom is a constant, a varlist or the product of a constant by a varlist variables. 2*x*y, x*y*z, 3 are monoms : 2*3, x*3*y, 4*x*3 are NOT. - A canonical sum is either a monom or an ordered sum of monoms (the order on monoms is defined later) - A normal polynomial it either a constant or a canonical sum or a constant plus a canonical sum *) (* varlist is isomorphic to (list var), but we built a special inductive for efficiency *) Inductive varlist : Type := | Nil_var : varlist | Cons_var : index -> varlist -> varlist. Inductive canonical_sum : Type := | Nil_monom : canonical_sum | Cons_monom : A -> varlist -> canonical_sum -> canonical_sum | Cons_varlist : varlist -> canonical_sum -> canonical_sum. (* Order on monoms *) (* That's the lexicographic order on varlist, extended by : - A constant is less than every monom - The relation between two varlist is preserved by multiplication by a constant. Examples : 3 < x < y x*y < x*y*y*z 2*x*y < x*y*y*z x*y < 54*x*y*y*z 4*x*y < 59*x*y*y*z *) Fixpoint varlist_eq (x y:varlist) {struct y} : bool := match x, y with | Nil_var, Nil_var => true | Cons_var i xrest, Cons_var j yrest => andb (index_eq i j) (varlist_eq xrest yrest) | _, _ => false end. Fixpoint varlist_lt (x y:varlist) {struct y} : bool := match x, y with | Nil_var, Cons_var _ _ => true | Cons_var i xrest, Cons_var j yrest => if index_lt i j then true else andb (index_eq i j) (varlist_lt xrest yrest) | _, _ => false end. (* merges two variables lists *) Fixpoint varlist_merge (l1:varlist) : varlist -> varlist := match l1 with | Cons_var v1 t1 => (fix vm_aux (l2:varlist) : varlist := match l2 with | Cons_var v2 t2 => if index_lt v1 v2 then Cons_var v1 (varlist_merge t1 l2) else Cons_var v2 (vm_aux t2) | Nil_var => l1 end) | Nil_var => fun l2 => l2 end. (* returns the sum of two canonical sums *) Fixpoint canonical_sum_merge (s1:canonical_sum) : canonical_sum -> canonical_sum := match s1 with | Cons_monom c1 l1 t1 => (fix csm_aux (s2:canonical_sum) : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 c2) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_monom c1 l1 (canonical_sum_merge t1 s2) else Cons_monom c2 l2 (csm_aux t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 Aone) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_monom c1 l1 (canonical_sum_merge t1 s2) else Cons_varlist l2 (csm_aux t2) | Nil_monom => s1 end) | Cons_varlist l1 t1 => (fix csm_aux2 (s2:canonical_sum) : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone c2) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_varlist l1 (canonical_sum_merge t1 s2) else Cons_monom c2 l2 (csm_aux2 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone Aone) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_varlist l1 (canonical_sum_merge t1 s2) else Cons_varlist l2 (csm_aux2 t2) | Nil_monom => s1 end) | Nil_monom => fun s2 => s2 end. (* Insertion of a monom in a canonical sum *) Fixpoint monom_insert (c1:A) (l1:varlist) (s2:canonical_sum) {struct s2} : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 c2) l1 t2 else if varlist_lt l1 l2 then Cons_monom c1 l1 s2 else Cons_monom c2 l2 (monom_insert c1 l1 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 Aone) l1 t2 else if varlist_lt l1 l2 then Cons_monom c1 l1 s2 else Cons_varlist l2 (monom_insert c1 l1 t2) | Nil_monom => Cons_monom c1 l1 Nil_monom end. Fixpoint varlist_insert (l1:varlist) (s2:canonical_sum) {struct s2} : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone c2) l1 t2 else if varlist_lt l1 l2 then Cons_varlist l1 s2 else Cons_monom c2 l2 (varlist_insert l1 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone Aone) l1 t2 else if varlist_lt l1 l2 then Cons_varlist l1 s2 else Cons_varlist l2 (varlist_insert l1 t2) | Nil_monom => Cons_varlist l1 Nil_monom end. (* Computes c0*s *) Fixpoint canonical_sum_scalar (c0:A) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => Cons_monom (Amult c0 c) l (canonical_sum_scalar c0 t) | Cons_varlist l t => Cons_monom c0 l (canonical_sum_scalar c0 t) | Nil_monom => Nil_monom end. (* Computes l0*s *) Fixpoint canonical_sum_scalar2 (l0:varlist) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => monom_insert c (varlist_merge l0 l) (canonical_sum_scalar2 l0 t) | Cons_varlist l t => varlist_insert (varlist_merge l0 l) (canonical_sum_scalar2 l0 t) | Nil_monom => Nil_monom end. (* Computes c0*l0*s *) Fixpoint canonical_sum_scalar3 (c0:A) (l0:varlist) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => monom_insert (Amult c0 c) (varlist_merge l0 l) (canonical_sum_scalar3 c0 l0 t) | Cons_varlist l t => monom_insert c0 (varlist_merge l0 l) (canonical_sum_scalar3 c0 l0 t) | Nil_monom => Nil_monom end. (* returns the product of two canonical sums *) Fixpoint canonical_sum_prod (s1 s2:canonical_sum) {struct s1} : canonical_sum := match s1 with | Cons_monom c1 l1 t1 => canonical_sum_merge (canonical_sum_scalar3 c1 l1 s2) (canonical_sum_prod t1 s2) | Cons_varlist l1 t1 => canonical_sum_merge (canonical_sum_scalar2 l1 s2) (canonical_sum_prod t1 s2) | Nil_monom => Nil_monom end. (* The type to represent concrete semi-ring polynomials *) Inductive spolynomial : Type := | SPvar : index -> spolynomial | SPconst : A -> spolynomial | SPplus : spolynomial -> spolynomial -> spolynomial | SPmult : spolynomial -> spolynomial -> spolynomial. Fixpoint spolynomial_normalize (p:spolynomial) : canonical_sum := match p with | SPvar i => Cons_varlist (Cons_var i Nil_var) Nil_monom | SPconst c => Cons_monom c Nil_var Nil_monom | SPplus l r => canonical_sum_merge (spolynomial_normalize l) (spolynomial_normalize r) | SPmult l r => canonical_sum_prod (spolynomial_normalize l) (spolynomial_normalize r) end. (* Deletion of useless 0 and 1 in canonical sums *) Fixpoint canonical_sum_simplify (s:canonical_sum) : canonical_sum := match s with | Cons_monom c l t => if Aeq c Azero then canonical_sum_simplify t else if Aeq c Aone then Cons_varlist l (canonical_sum_simplify t) else Cons_monom c l (canonical_sum_simplify t) | Cons_varlist l t => Cons_varlist l (canonical_sum_simplify t) | Nil_monom => Nil_monom end. Definition spolynomial_simplify (x:spolynomial) := canonical_sum_simplify (spolynomial_normalize x). (* End definitions. *) (* Section interpretation. *) (*** Here a variable map is defined and the interpetation of a spolynom acording to a certain variables map. Once again the choosen definition is generic and could be changed ****) Variable vm : varmap A. (* Interpretation of list of variables * [x1; ... ; xn ] is interpreted as (find v x1)* ... *(find v xn) * The unbound variables are mapped to 0. Normally this case sould * never occur. Since we want only to prove correctness theorems, which form * is : for any varmap and any spolynom ... this is a safe and pain-saving * choice *) Definition interp_var (i:index) := varmap_find Azero i vm. (* Local *) Definition ivl_aux := (fix ivl_aux (x:index) (t:varlist) {struct t} : A := match t with | Nil_var => interp_var x | Cons_var x' t' => Amult (interp_var x) (ivl_aux x' t') end). Definition interp_vl (l:varlist) := match l with | Nil_var => Aone | Cons_var x t => ivl_aux x t end. (* Local *) Definition interp_m (c:A) (l:varlist) := match l with | Nil_var => c | Cons_var x t => Amult c (ivl_aux x t) end. (* Local *) Definition ics_aux := (fix ics_aux (a:A) (s:canonical_sum) {struct s} : A := match s with | Nil_monom => a | Cons_varlist l t => Aplus a (ics_aux (interp_vl l) t) | Cons_monom c l t => Aplus a (ics_aux (interp_m c l) t) end). (* Interpretation of a canonical sum *) Definition interp_cs (s:canonical_sum) : A := match s with | Nil_monom => Azero | Cons_varlist l t => ics_aux (interp_vl l) t | Cons_monom c l t => ics_aux (interp_m c l) t end. Fixpoint interp_sp (p:spolynomial) : A := match p with | SPconst c => c | SPvar i => interp_var i | SPplus p1 p2 => Aplus (interp_sp p1) (interp_sp p2) | SPmult p1 p2 => Amult (interp_sp p1) (interp_sp p2) end. (* End interpretation. *) Unset Implicit Arguments. (* Section properties. *) Variable T : Semi_Ring_Theory Aplus Amult Aone Azero Aeq. Hint Resolve (SR_plus_comm T). Hint Resolve (SR_plus_assoc T). Hint Resolve (SR_plus_assoc2 T). Hint Resolve (SR_mult_comm T). Hint Resolve (SR_mult_assoc T). Hint Resolve (SR_mult_assoc2 T). Hint Resolve (SR_plus_zero_left T). Hint Resolve (SR_plus_zero_left2 T). Hint Resolve (SR_mult_one_left T). Hint Resolve (SR_mult_one_left2 T). Hint Resolve (SR_mult_zero_left T). Hint Resolve (SR_mult_zero_left2 T). Hint Resolve (SR_distr_left T). Hint Resolve (SR_distr_left2 T). (*Hint Resolve (SR_plus_reg_left T).*) Hint Resolve (SR_plus_permute T). Hint Resolve (SR_mult_permute T). Hint Resolve (SR_distr_right T). Hint Resolve (SR_distr_right2 T). Hint Resolve (SR_mult_zero_right T). Hint Resolve (SR_mult_zero_right2 T). Hint Resolve (SR_plus_zero_right T). Hint Resolve (SR_plus_zero_right2 T). Hint Resolve (SR_mult_one_right T). Hint Resolve (SR_mult_one_right2 T). (*Hint Resolve (SR_plus_reg_right T).*) Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. Lemma varlist_eq_prop : forall x y:varlist, Is_true (varlist_eq x y) -> x = y. Proof. simple induction x; simple induction y; contradiction || (try reflexivity). simpl; intros. generalize (andb_prop2 _ _ H1); intros; elim H2; intros. rewrite (index_eq_prop _ _ H3); rewrite (H v0 H4); reflexivity. Qed. Remark ivl_aux_ok : forall (v:varlist) (i:index), ivl_aux i v = Amult (interp_var i) (interp_vl v). Proof. simple induction v; simpl; intros. trivial. rewrite H; trivial. Qed. Lemma varlist_merge_ok : forall x y:varlist, interp_vl (varlist_merge x y) = Amult (interp_vl x) (interp_vl y). Proof. simple induction x. simpl; trivial. simple induction y. simpl; trivial. simpl; intros. elim (index_lt i i0); simpl; intros. repeat rewrite ivl_aux_ok. rewrite H. simpl. rewrite ivl_aux_ok. eauto. repeat rewrite ivl_aux_ok. rewrite H0. rewrite ivl_aux_ok. eauto. Qed. Remark ics_aux_ok : forall (x:A) (s:canonical_sum), ics_aux x s = Aplus x (interp_cs s). Proof. simple induction s; simpl; intros. trivial. reflexivity. reflexivity. Qed. Remark interp_m_ok : forall (x:A) (l:varlist), interp_m x l = Amult x (interp_vl l). Proof. destruct l as [| i v]. simpl; trivial. reflexivity. Qed. Lemma canonical_sum_merge_ok : forall x y:canonical_sum, interp_cs (canonical_sum_merge x y) = Aplus (interp_cs x) (interp_cs y). simple induction x; simpl. trivial. simple induction y; simpl; intros. (* monom and nil *) eauto. (* monom and monom *) generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl; repeat rewrite ics_aux_ok; rewrite H. repeat rewrite interp_m_ok. rewrite (SR_distr_left T). repeat rewrite <- (SR_plus_assoc T). apply f_equal with (f := Aplus (Amult a (interp_vl v0))). trivial. elim (varlist_lt v v0); simpl. repeat rewrite ics_aux_ok. rewrite H; simpl; rewrite ics_aux_ok; eauto. rewrite ics_aux_ok; rewrite H0; repeat rewrite ics_aux_ok; simpl; eauto. (* monom and varlist *) generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl; repeat rewrite ics_aux_ok; rewrite H. repeat rewrite interp_m_ok. rewrite (SR_distr_left T). repeat rewrite <- (SR_plus_assoc T). apply f_equal with (f := Aplus (Amult a (interp_vl v0))). rewrite (SR_mult_one_left T). trivial. elim (varlist_lt v v0); simpl. repeat rewrite ics_aux_ok. rewrite H; simpl; rewrite ics_aux_ok; eauto. rewrite ics_aux_ok; rewrite H0; repeat rewrite ics_aux_ok; simpl; eauto. simple induction y; simpl; intros. (* varlist and nil *) trivial. (* varlist and monom *) generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl; repeat rewrite ics_aux_ok; rewrite H. repeat rewrite interp_m_ok. rewrite (SR_distr_left T). repeat rewrite <- (SR_plus_assoc T). rewrite (SR_mult_one_left T). apply f_equal with (f := Aplus (interp_vl v0)). trivial. elim (varlist_lt v v0); simpl. repeat rewrite ics_aux_ok. rewrite H; simpl; rewrite ics_aux_ok; eauto. rewrite ics_aux_ok; rewrite H0; repeat rewrite ics_aux_ok; simpl; eauto. (* varlist and varlist *) generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl; repeat rewrite ics_aux_ok; rewrite H. repeat rewrite interp_m_ok. rewrite (SR_distr_left T). repeat rewrite <- (SR_plus_assoc T). rewrite (SR_mult_one_left T). apply f_equal with (f := Aplus (interp_vl v0)). trivial. elim (varlist_lt v v0); simpl. repeat rewrite ics_aux_ok. rewrite H; simpl; rewrite ics_aux_ok; eauto. rewrite ics_aux_ok; rewrite H0; repeat rewrite ics_aux_ok; simpl; eauto. Qed. Lemma monom_insert_ok : forall (a:A) (l:varlist) (s:canonical_sum), interp_cs (monom_insert a l s) = Aplus (Amult a (interp_vl l)) (interp_cs s). intros; generalize s; simple induction s0. simpl; rewrite interp_m_ok; trivial. simpl; intros. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl; rewrite interp_m_ok; repeat rewrite ics_aux_ok; rewrite interp_m_ok; rewrite (SR_distr_left T); eauto. elim (varlist_lt l v); simpl; [ repeat rewrite interp_m_ok; rewrite ics_aux_ok; eauto | repeat rewrite interp_m_ok; rewrite ics_aux_ok; rewrite H; rewrite ics_aux_ok; eauto ]. simpl; intros. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl; rewrite interp_m_ok; repeat rewrite ics_aux_ok; rewrite (SR_distr_left T); rewrite (SR_mult_one_left T); eauto. elim (varlist_lt l v); simpl; [ repeat rewrite interp_m_ok; rewrite ics_aux_ok; eauto | repeat rewrite interp_m_ok; rewrite ics_aux_ok; rewrite H; rewrite ics_aux_ok; eauto ]. Qed. Lemma varlist_insert_ok : forall (l:varlist) (s:canonical_sum), interp_cs (varlist_insert l s) = Aplus (interp_vl l) (interp_cs s). intros; generalize s; simple induction s0. simpl; trivial. simpl; intros. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl; rewrite interp_m_ok; repeat rewrite ics_aux_ok; rewrite interp_m_ok; rewrite (SR_distr_left T); rewrite (SR_mult_one_left T); eauto. elim (varlist_lt l v); simpl; [ repeat rewrite interp_m_ok; rewrite ics_aux_ok; eauto | repeat rewrite interp_m_ok; rewrite ics_aux_ok; rewrite H; rewrite ics_aux_ok; eauto ]. simpl; intros. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl; rewrite interp_m_ok; repeat rewrite ics_aux_ok; rewrite (SR_distr_left T); rewrite (SR_mult_one_left T); eauto. elim (varlist_lt l v); simpl; [ repeat rewrite interp_m_ok; rewrite ics_aux_ok; eauto | repeat rewrite interp_m_ok; rewrite ics_aux_ok; rewrite H; rewrite ics_aux_ok; eauto ]. Qed. Lemma canonical_sum_scalar_ok : forall (a:A) (s:canonical_sum), interp_cs (canonical_sum_scalar a s) = Amult a (interp_cs s). simple induction s. simpl; eauto. simpl; intros. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). reflexivity. simpl; intros. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). reflexivity. Qed. Lemma canonical_sum_scalar2_ok : forall (l:varlist) (s:canonical_sum), interp_cs (canonical_sum_scalar2 l s) = Amult (interp_vl l) (interp_cs s). simple induction s. simpl; trivial. simpl; intros. rewrite monom_insert_ok. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite varlist_merge_ok. repeat rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). repeat rewrite <- (SR_plus_assoc T). rewrite (SR_mult_permute T a (interp_vl l) (interp_vl v)). reflexivity. simpl; intros. rewrite varlist_insert_ok. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite varlist_merge_ok. repeat rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). repeat rewrite <- (SR_plus_assoc T). reflexivity. Qed. Lemma canonical_sum_scalar3_ok : forall (c:A) (l:varlist) (s:canonical_sum), interp_cs (canonical_sum_scalar3 c l s) = Amult c (Amult (interp_vl l) (interp_cs s)). simple induction s. simpl; repeat rewrite (SR_mult_zero_right T); reflexivity. simpl; intros. rewrite monom_insert_ok. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite varlist_merge_ok. repeat rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). repeat rewrite <- (SR_plus_assoc T). rewrite (SR_mult_permute T a (interp_vl l) (interp_vl v)). reflexivity. simpl; intros. rewrite monom_insert_ok. repeat rewrite ics_aux_ok. repeat rewrite interp_m_ok. rewrite H. rewrite varlist_merge_ok. repeat rewrite (SR_distr_right T). repeat rewrite <- (SR_mult_assoc T). repeat rewrite <- (SR_plus_assoc T). rewrite (SR_mult_permute T c (interp_vl l) (interp_vl v)). reflexivity. Qed. Lemma canonical_sum_prod_ok : forall x y:canonical_sum, interp_cs (canonical_sum_prod x y) = Amult (interp_cs x) (interp_cs y). simple induction x; simpl; intros. trivial. rewrite canonical_sum_merge_ok. rewrite canonical_sum_scalar3_ok. rewrite ics_aux_ok. rewrite interp_m_ok. rewrite H. rewrite (SR_mult_assoc T a (interp_vl v) (interp_cs y)). symmetry . eauto. rewrite canonical_sum_merge_ok. rewrite canonical_sum_scalar2_ok. rewrite ics_aux_ok. rewrite H. trivial. Qed. Theorem spolynomial_normalize_ok : forall p:spolynomial, interp_cs (spolynomial_normalize p) = interp_sp p. simple induction p; simpl; intros. reflexivity. reflexivity. rewrite canonical_sum_merge_ok. rewrite H; rewrite H0. reflexivity. rewrite canonical_sum_prod_ok. rewrite H; rewrite H0. reflexivity. Qed. Lemma canonical_sum_simplify_ok : forall s:canonical_sum, interp_cs (canonical_sum_simplify s) = interp_cs s. simple induction s. reflexivity. (* cons_monom *) simpl; intros. generalize (SR_eq_prop T a Azero). elim (Aeq a Azero). intro Heq; rewrite (Heq I). rewrite H. rewrite ics_aux_ok. rewrite interp_m_ok. rewrite (SR_mult_zero_left T). trivial. intros; simpl. generalize (SR_eq_prop T a Aone). elim (Aeq a Aone). intro Heq; rewrite (Heq I). simpl. repeat rewrite ics_aux_ok. rewrite interp_m_ok. rewrite H. rewrite (SR_mult_one_left T). reflexivity. simpl. repeat rewrite ics_aux_ok. rewrite interp_m_ok. rewrite H. reflexivity. (* cons_varlist *) simpl; intros. repeat rewrite ics_aux_ok. rewrite H. reflexivity. Qed. Theorem spolynomial_simplify_ok : forall p:spolynomial, interp_cs (spolynomial_simplify p) = interp_sp p. intro. unfold spolynomial_simplify. rewrite canonical_sum_simplify_ok. apply spolynomial_normalize_ok. Qed. (* End properties. *) End semi_rings. Arguments Cons_varlist : default implicits. Arguments Cons_monom : default implicits. Arguments SPconst : default implicits. Arguments SPplus : default implicits. Arguments SPmult : default implicits. Section rings. (* Here the coercion between Ring and Semi-Ring will be useful *) Set Implicit Arguments. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Variable vm : varmap A. Variable T : Ring_Theory Aplus Amult Aone Azero Aopp Aeq. Hint Resolve (Th_plus_comm T). Hint Resolve (Th_plus_assoc T). Hint Resolve (Th_plus_assoc2 T). Hint Resolve (Th_mult_comm T). Hint Resolve (Th_mult_assoc T). Hint Resolve (Th_mult_assoc2 T). Hint Resolve (Th_plus_zero_left T). Hint Resolve (Th_plus_zero_left2 T). Hint Resolve (Th_mult_one_left T). Hint Resolve (Th_mult_one_left2 T). Hint Resolve (Th_mult_zero_left T). Hint Resolve (Th_mult_zero_left2 T). Hint Resolve (Th_distr_left T). Hint Resolve (Th_distr_left2 T). (*Hint Resolve (Th_plus_reg_left T).*) Hint Resolve (Th_plus_permute T). Hint Resolve (Th_mult_permute T). Hint Resolve (Th_distr_right T). Hint Resolve (Th_distr_right2 T). Hint Resolve (Th_mult_zero_right T). Hint Resolve (Th_mult_zero_right2 T). Hint Resolve (Th_plus_zero_right T). Hint Resolve (Th_plus_zero_right2 T). Hint Resolve (Th_mult_one_right T). Hint Resolve (Th_mult_one_right2 T). (*Hint Resolve (Th_plus_reg_right T).*) Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. (*** Definitions *) Inductive polynomial : Type := | Pvar : index -> polynomial | Pconst : A -> polynomial | Pplus : polynomial -> polynomial -> polynomial | Pmult : polynomial -> polynomial -> polynomial | Popp : polynomial -> polynomial. Fixpoint polynomial_normalize (x:polynomial) : canonical_sum A := match x with | Pplus l r => canonical_sum_merge Aplus Aone (polynomial_normalize l) (polynomial_normalize r) | Pmult l r => canonical_sum_prod Aplus Amult Aone (polynomial_normalize l) (polynomial_normalize r) | Pconst c => Cons_monom c Nil_var (Nil_monom A) | Pvar i => Cons_varlist (Cons_var i Nil_var) (Nil_monom A) | Popp p => canonical_sum_scalar3 Aplus Amult Aone (Aopp Aone) Nil_var (polynomial_normalize p) end. Definition polynomial_simplify (x:polynomial) := canonical_sum_simplify Aone Azero Aeq (polynomial_normalize x). Fixpoint spolynomial_of (x:polynomial) : spolynomial A := match x with | Pplus l r => SPplus (spolynomial_of l) (spolynomial_of r) | Pmult l r => SPmult (spolynomial_of l) (spolynomial_of r) | Pconst c => SPconst c | Pvar i => SPvar A i | Popp p => SPmult (SPconst (Aopp Aone)) (spolynomial_of p) end. (*** Interpretation *) Fixpoint interp_p (p:polynomial) : A := match p with | Pconst c => c | Pvar i => varmap_find Azero i vm | Pplus p1 p2 => Aplus (interp_p p1) (interp_p p2) | Pmult p1 p2 => Amult (interp_p p1) (interp_p p2) | Popp p1 => Aopp (interp_p p1) end. (*** Properties *) Unset Implicit Arguments. Lemma spolynomial_of_ok : forall p:polynomial, interp_p p = interp_sp Aplus Amult Azero vm (spolynomial_of p). simple induction p; reflexivity || (simpl; intros). rewrite H; rewrite H0; reflexivity. rewrite H; rewrite H0; reflexivity. rewrite H. rewrite (Th_opp_mult_left2 T). rewrite (Th_mult_one_left T). reflexivity. Qed. Theorem polynomial_normalize_ok : forall p:polynomial, polynomial_normalize p = spolynomial_normalize Aplus Amult Aone (spolynomial_of p). simple induction p; reflexivity || (simpl; intros). rewrite H; rewrite H0; reflexivity. rewrite H; rewrite H0; reflexivity. rewrite H; simpl. elim (canonical_sum_scalar3 Aplus Amult Aone (Aopp Aone) Nil_var (spolynomial_normalize Aplus Amult Aone (spolynomial_of p0))); [ reflexivity | simpl; intros; rewrite H0; reflexivity | simpl; intros; rewrite H0; reflexivity ]. Qed. Theorem polynomial_simplify_ok : forall p:polynomial, interp_cs Aplus Amult Aone Azero vm (polynomial_simplify p) = interp_p p. intro. unfold polynomial_simplify. rewrite spolynomial_of_ok. rewrite polynomial_normalize_ok. rewrite (canonical_sum_simplify_ok A Aplus Amult Aone Azero Aeq vm T). rewrite (spolynomial_normalize_ok A Aplus Amult Aone Azero Aeq vm T). reflexivity. Qed. End rings. Infix "+" := Pplus : ring_scope. Infix "*" := Pmult : ring_scope. Notation "- x" := (Popp x) : ring_scope. Notation "[ x ]" := (Pvar x) (at level 0) : ring_scope. Delimit Scope ring_scope with ring. coq-8.4pl2/plugins/ring/Setoid_ring_theory.v0000640000175000001440000002530212010532755020300 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* A -> Prop. Infix Local "==" := Aequiv (at level 70, no associativity). Variable S : Setoid_Theory A Aequiv. Add Setoid A Aequiv S as Asetoid. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Infix "+" := Aplus (at level 50, left associativity). Infix "*" := Amult (at level 40, left associativity). Notation "0" := Azero. Notation "1" := Aone. Notation "- x" := (Aopp x). Variable plus_morph : forall a a0:A, a == a0 -> forall a1 a2:A, a1 == a2 -> a + a1 == a0 + a2. Variable mult_morph : forall a a0:A, a == a0 -> forall a1 a2:A, a1 == a2 -> a * a1 == a0 * a2. Variable opp_morph : forall a a0:A, a == a0 -> - a == - a0. Add Morphism Aplus : Aplus_ext. intros; apply plus_morph; assumption. Qed. Add Morphism Amult : Amult_ext. intros; apply mult_morph; assumption. Qed. Add Morphism Aopp : Aopp_ext. exact opp_morph. Qed. Section Theory_of_semi_setoid_rings. Record Semi_Setoid_Ring_Theory : Prop := {SSR_plus_comm : forall n m:A, n + m == m + n; SSR_plus_assoc : forall n m p:A, n + (m + p) == n + m + p; SSR_mult_comm : forall n m:A, n * m == m * n; SSR_mult_assoc : forall n m p:A, n * (m * p) == n * m * p; SSR_plus_zero_left : forall n:A, 0 + n == n; SSR_mult_one_left : forall n:A, 1 * n == n; SSR_mult_zero_left : forall n:A, 0 * n == 0; SSR_distr_left : forall n m p:A, (n + m) * p == n * p + m * p; SSR_plus_reg_left : forall n m p:A, n + m == n + p -> m == p; SSR_eq_prop : forall x y:A, Is_true (Aeq x y) -> x == y}. Variable T : Semi_Setoid_Ring_Theory. Let plus_comm := SSR_plus_comm T. Let plus_assoc := SSR_plus_assoc T. Let mult_comm := SSR_mult_comm T. Let mult_assoc := SSR_mult_assoc T. Let plus_zero_left := SSR_plus_zero_left T. Let mult_one_left := SSR_mult_one_left T. Let mult_zero_left := SSR_mult_zero_left T. Let distr_left := SSR_distr_left T. Let plus_reg_left := SSR_plus_reg_left T. Let equiv_refl := Seq_refl A Aequiv S. Let equiv_sym := Seq_sym A Aequiv S. Let equiv_trans := Seq_trans A Aequiv S. Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left mult_one_left mult_zero_left distr_left plus_reg_left equiv_refl (*equiv_sym*). Hint Immediate equiv_sym. (* Lemmas whose form is x=y are also provided in form y=x because Auto does not symmetry *) Lemma SSR_mult_assoc2 : forall n m p:A, n * m * p == n * (m * p). auto. Qed. Lemma SSR_plus_assoc2 : forall n m p:A, n + m + p == n + (m + p). auto. Qed. Lemma SSR_plus_zero_left2 : forall n:A, n == 0 + n. auto. Qed. Lemma SSR_mult_one_left2 : forall n:A, n == 1 * n. auto. Qed. Lemma SSR_mult_zero_left2 : forall n:A, 0 == 0 * n. auto. Qed. Lemma SSR_distr_left2 : forall n m p:A, n * p + m * p == (n + m) * p. auto. Qed. Lemma SSR_plus_permute : forall n m p:A, n + (m + p) == m + (n + p). intros. rewrite (plus_assoc n m p). rewrite (plus_comm n m). rewrite <- (plus_assoc m n p). trivial. Qed. Lemma SSR_mult_permute : forall n m p:A, n * (m * p) == m * (n * p). intros. rewrite (mult_assoc n m p). rewrite (mult_comm n m). rewrite <- (mult_assoc m n p). trivial. Qed. Hint Resolve SSR_plus_permute SSR_mult_permute. Lemma SSR_distr_right : forall n m p:A, n * (m + p) == n * m + n * p. intros. rewrite (mult_comm n (m + p)). rewrite (mult_comm n m). rewrite (mult_comm n p). auto. Qed. Lemma SSR_distr_right2 : forall n m p:A, n * m + n * p == n * (m + p). intros. apply equiv_sym. apply SSR_distr_right. Qed. Lemma SSR_mult_zero_right : forall n:A, n * 0 == 0. intro; rewrite (mult_comm n 0); auto. Qed. Lemma SSR_mult_zero_right2 : forall n:A, 0 == n * 0. intro; rewrite (mult_comm n 0); auto. Qed. Lemma SSR_plus_zero_right : forall n:A, n + 0 == n. intro; rewrite (plus_comm n 0); auto. Qed. Lemma SSR_plus_zero_right2 : forall n:A, n == n + 0. intro; rewrite (plus_comm n 0); auto. Qed. Lemma SSR_mult_one_right : forall n:A, n * 1 == n. intro; rewrite (mult_comm n 1); auto. Qed. Lemma SSR_mult_one_right2 : forall n:A, n == n * 1. intro; rewrite (mult_comm n 1); auto. Qed. Lemma SSR_plus_reg_right : forall n m p:A, m + n == p + n -> m == p. intros n m p; rewrite (plus_comm m n); rewrite (plus_comm p n). intro; apply plus_reg_left with n; trivial. Qed. End Theory_of_semi_setoid_rings. Section Theory_of_setoid_rings. Record Setoid_Ring_Theory : Prop := {STh_plus_comm : forall n m:A, n + m == m + n; STh_plus_assoc : forall n m p:A, n + (m + p) == n + m + p; STh_mult_comm : forall n m:A, n * m == m * n; STh_mult_assoc : forall n m p:A, n * (m * p) == n * m * p; STh_plus_zero_left : forall n:A, 0 + n == n; STh_mult_one_left : forall n:A, 1 * n == n; STh_opp_def : forall n:A, n + - n == 0; STh_distr_left : forall n m p:A, (n + m) * p == n * p + m * p; STh_eq_prop : forall x y:A, Is_true (Aeq x y) -> x == y}. Variable T : Setoid_Ring_Theory. Let plus_comm := STh_plus_comm T. Let plus_assoc := STh_plus_assoc T. Let mult_comm := STh_mult_comm T. Let mult_assoc := STh_mult_assoc T. Let plus_zero_left := STh_plus_zero_left T. Let mult_one_left := STh_mult_one_left T. Let opp_def := STh_opp_def T. Let distr_left := STh_distr_left T. Let equiv_refl := Seq_refl A Aequiv S. Let equiv_sym := Seq_sym A Aequiv S. Let equiv_trans := Seq_trans A Aequiv S. Hint Resolve plus_comm plus_assoc mult_comm mult_assoc plus_zero_left mult_one_left opp_def distr_left equiv_refl equiv_sym. (* Lemmas whose form is x=y are also provided in form y=x because Auto does not symmetry *) Lemma STh_mult_assoc2 : forall n m p:A, n * m * p == n * (m * p). auto. Qed. Lemma STh_plus_assoc2 : forall n m p:A, n + m + p == n + (m + p). auto. Qed. Lemma STh_plus_zero_left2 : forall n:A, n == 0 + n. auto. Qed. Lemma STh_mult_one_left2 : forall n:A, n == 1 * n. auto. Qed. Lemma STh_distr_left2 : forall n m p:A, n * p + m * p == (n + m) * p. auto. Qed. Lemma STh_opp_def2 : forall n:A, 0 == n + - n. auto. Qed. Lemma STh_plus_permute : forall n m p:A, n + (m + p) == m + (n + p). intros. rewrite (plus_assoc n m p). rewrite (plus_comm n m). rewrite <- (plus_assoc m n p). trivial. Qed. Lemma STh_mult_permute : forall n m p:A, n * (m * p) == m * (n * p). intros. rewrite (mult_assoc n m p). rewrite (mult_comm n m). rewrite <- (mult_assoc m n p). trivial. Qed. Hint Resolve STh_plus_permute STh_mult_permute. Lemma Saux1 : forall a:A, a + a == a -> a == 0. intros. rewrite <- (plus_zero_left a). rewrite (plus_comm 0 a). setoid_replace (a + 0) with (a + (a + - a)) by auto. rewrite (plus_assoc a a (- a)). rewrite H. apply opp_def. Qed. Lemma STh_mult_zero_left : forall n:A, 0 * n == 0. intros. apply Saux1. rewrite <- (distr_left 0 0 n). rewrite (plus_zero_left 0). trivial. Qed. Hint Resolve STh_mult_zero_left. Lemma STh_mult_zero_left2 : forall n:A, 0 == 0 * n. auto. Qed. Lemma Saux2 : forall x y z:A, x + y == 0 -> x + z == 0 -> y == z. intros. rewrite <- (plus_zero_left y). rewrite <- H0. rewrite <- (plus_assoc x z y). rewrite (plus_comm z y). rewrite (plus_assoc x y z). rewrite H. auto. Qed. Lemma STh_opp_mult_left : forall x y:A, - (x * y) == - x * y. intros. apply Saux2 with (x * y); auto. rewrite <- (distr_left x (- x) y). rewrite (opp_def x). auto. Qed. Hint Resolve STh_opp_mult_left. Lemma STh_opp_mult_left2 : forall x y:A, - x * y == - (x * y). auto. Qed. Lemma STh_mult_zero_right : forall n:A, n * 0 == 0. intro; rewrite (mult_comm n 0); auto. Qed. Lemma STh_mult_zero_right2 : forall n:A, 0 == n * 0. intro; rewrite (mult_comm n 0); auto. Qed. Lemma STh_plus_zero_right : forall n:A, n + 0 == n. intro; rewrite (plus_comm n 0); auto. Qed. Lemma STh_plus_zero_right2 : forall n:A, n == n + 0. intro; rewrite (plus_comm n 0); auto. Qed. Lemma STh_mult_one_right : forall n:A, n * 1 == n. intro; rewrite (mult_comm n 1); auto. Qed. Lemma STh_mult_one_right2 : forall n:A, n == n * 1. intro; rewrite (mult_comm n 1); auto. Qed. Lemma STh_opp_mult_right : forall x y:A, - (x * y) == x * - y. intros. rewrite (mult_comm x y). rewrite (mult_comm x (- y)). auto. Qed. Lemma STh_opp_mult_right2 : forall x y:A, x * - y == - (x * y). intros. rewrite (mult_comm x y). rewrite (mult_comm x (- y)). auto. Qed. Lemma STh_plus_opp_opp : forall x y:A, - x + - y == - (x + y). intros. apply Saux2 with (x + y); auto. rewrite (STh_plus_permute (x + y) (- x) (- y)). rewrite <- (plus_assoc x y (- y)). rewrite (opp_def y); rewrite (STh_plus_zero_right x). rewrite (STh_opp_def2 x); trivial. Qed. Lemma STh_plus_permute_opp : forall n m p:A, - m + (n + p) == n + (- m + p). auto. Qed. Lemma STh_opp_opp : forall n:A, - - n == n. intro. apply Saux2 with (- n); auto. rewrite (plus_comm (- n) n); auto. Qed. Hint Resolve STh_opp_opp. Lemma STh_opp_opp2 : forall n:A, n == - - n. auto. Qed. Lemma STh_mult_opp_opp : forall x y:A, - x * - y == x * y. intros. rewrite (STh_opp_mult_left2 x (- y)). rewrite (STh_opp_mult_right2 x y). trivial. Qed. Lemma STh_mult_opp_opp2 : forall x y:A, x * y == - x * - y. intros. apply equiv_sym. apply STh_mult_opp_opp. Qed. Lemma STh_opp_zero : - 0 == 0. rewrite <- (plus_zero_left (- 0)). trivial. Qed. Lemma STh_plus_reg_left : forall n m p:A, n + m == n + p -> m == p. intros. rewrite <- (plus_zero_left m). rewrite <- (plus_zero_left p). rewrite <- (opp_def n). rewrite (plus_comm n (- n)). rewrite <- (plus_assoc (- n) n m). rewrite <- (plus_assoc (- n) n p). auto. Qed. Lemma STh_plus_reg_right : forall n m p:A, m + n == p + n -> m == p. intros. apply STh_plus_reg_left with n. rewrite (plus_comm n m); rewrite (plus_comm n p); assumption. Qed. Lemma STh_distr_right : forall n m p:A, n * (m + p) == n * m + n * p. intros. rewrite (mult_comm n (m + p)). rewrite (mult_comm n m). rewrite (mult_comm n p). trivial. Qed. Lemma STh_distr_right2 : forall n m p:A, n * m + n * p == n * (m + p). intros. apply equiv_sym. apply STh_distr_right. Qed. End Theory_of_setoid_rings. Hint Resolve STh_mult_zero_left STh_plus_reg_left: core. Unset Implicit Arguments. Definition Semi_Setoid_Ring_Theory_of : Setoid_Ring_Theory -> Semi_Setoid_Ring_Theory. intros until 1; case H. split; intros; simpl; eauto. Defined. Coercion Semi_Setoid_Ring_Theory_of : Setoid_Ring_Theory >-> Semi_Setoid_Ring_Theory. Section product_ring. End product_ring. Section power_ring. End power_ring. End Setoid_rings. coq-8.4pl2/plugins/ring/Ring_abstract.v0000640000175000001440000004562012010532755017227 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* aspolynomial | ASP0 : aspolynomial | ASP1 : aspolynomial | ASPplus : aspolynomial -> aspolynomial -> aspolynomial | ASPmult : aspolynomial -> aspolynomial -> aspolynomial. Inductive abstract_sum : Type := | Nil_acs : abstract_sum | Cons_acs : varlist -> abstract_sum -> abstract_sum. Fixpoint abstract_sum_merge (s1:abstract_sum) : abstract_sum -> abstract_sum := match s1 with | Cons_acs l1 t1 => (fix asm_aux (s2:abstract_sum) : abstract_sum := match s2 with | Cons_acs l2 t2 => if varlist_lt l1 l2 then Cons_acs l1 (abstract_sum_merge t1 s2) else Cons_acs l2 (asm_aux t2) | Nil_acs => s1 end) | Nil_acs => fun s2 => s2 end. Fixpoint abstract_varlist_insert (l1:varlist) (s2:abstract_sum) {struct s2} : abstract_sum := match s2 with | Cons_acs l2 t2 => if varlist_lt l1 l2 then Cons_acs l1 s2 else Cons_acs l2 (abstract_varlist_insert l1 t2) | Nil_acs => Cons_acs l1 Nil_acs end. Fixpoint abstract_sum_scalar (l1:varlist) (s2:abstract_sum) {struct s2} : abstract_sum := match s2 with | Cons_acs l2 t2 => abstract_varlist_insert (varlist_merge l1 l2) (abstract_sum_scalar l1 t2) | Nil_acs => Nil_acs end. Fixpoint abstract_sum_prod (s1 s2:abstract_sum) {struct s1} : abstract_sum := match s1 with | Cons_acs l1 t1 => abstract_sum_merge (abstract_sum_scalar l1 s2) (abstract_sum_prod t1 s2) | Nil_acs => Nil_acs end. Fixpoint aspolynomial_normalize (p:aspolynomial) : abstract_sum := match p with | ASPvar i => Cons_acs (Cons_var i Nil_var) Nil_acs | ASP1 => Cons_acs Nil_var Nil_acs | ASP0 => Nil_acs | ASPplus l r => abstract_sum_merge (aspolynomial_normalize l) (aspolynomial_normalize r) | ASPmult l r => abstract_sum_prod (aspolynomial_normalize l) (aspolynomial_normalize r) end. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aeq : A -> A -> bool. Variable vm : varmap A. Variable T : Semi_Ring_Theory Aplus Amult Aone Azero Aeq. Fixpoint interp_asp (p:aspolynomial) : A := match p with | ASPvar i => interp_var Azero vm i | ASP0 => Azero | ASP1 => Aone | ASPplus l r => Aplus (interp_asp l) (interp_asp r) | ASPmult l r => Amult (interp_asp l) (interp_asp r) end. (* Local *) Definition iacs_aux := (fix iacs_aux (a:A) (s:abstract_sum) {struct s} : A := match s with | Nil_acs => a | Cons_acs l t => Aplus a (iacs_aux (interp_vl Amult Aone Azero vm l) t) end). Definition interp_acs (s:abstract_sum) : A := match s with | Cons_acs l t => iacs_aux (interp_vl Amult Aone Azero vm l) t | Nil_acs => Azero end. Hint Resolve (SR_plus_comm T). Hint Resolve (SR_plus_assoc T). Hint Resolve (SR_plus_assoc2 T). Hint Resolve (SR_mult_comm T). Hint Resolve (SR_mult_assoc T). Hint Resolve (SR_mult_assoc2 T). Hint Resolve (SR_plus_zero_left T). Hint Resolve (SR_plus_zero_left2 T). Hint Resolve (SR_mult_one_left T). Hint Resolve (SR_mult_one_left2 T). Hint Resolve (SR_mult_zero_left T). Hint Resolve (SR_mult_zero_left2 T). Hint Resolve (SR_distr_left T). Hint Resolve (SR_distr_left2 T). (*Hint Resolve (SR_plus_reg_left T).*) Hint Resolve (SR_plus_permute T). Hint Resolve (SR_mult_permute T). Hint Resolve (SR_distr_right T). Hint Resolve (SR_distr_right2 T). Hint Resolve (SR_mult_zero_right T). Hint Resolve (SR_mult_zero_right2 T). Hint Resolve (SR_plus_zero_right T). Hint Resolve (SR_plus_zero_right2 T). Hint Resolve (SR_mult_one_right T). Hint Resolve (SR_mult_one_right2 T). (*Hint Resolve (SR_plus_reg_right T).*) Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. Remark iacs_aux_ok : forall (x:A) (s:abstract_sum), iacs_aux x s = Aplus x (interp_acs s). Proof. simple induction s; simpl; intros. trivial. reflexivity. Qed. Hint Extern 10 (_ = _ :>A) => rewrite iacs_aux_ok: core. Lemma abstract_varlist_insert_ok : forall (l:varlist) (s:abstract_sum), interp_acs (abstract_varlist_insert l s) = Aplus (interp_vl Amult Aone Azero vm l) (interp_acs s). simple induction s. trivial. simpl; intros. elim (varlist_lt l v); simpl. eauto. rewrite iacs_aux_ok. rewrite H; auto. Qed. Lemma abstract_sum_merge_ok : forall x y:abstract_sum, interp_acs (abstract_sum_merge x y) = Aplus (interp_acs x) (interp_acs y). Proof. simple induction x. trivial. simple induction y; intros. auto. simpl; elim (varlist_lt v v0); simpl. repeat rewrite iacs_aux_ok. rewrite H; simpl; auto. simpl in H0. repeat rewrite iacs_aux_ok. rewrite H0. simpl; auto. Qed. Lemma abstract_sum_scalar_ok : forall (l:varlist) (s:abstract_sum), interp_acs (abstract_sum_scalar l s) = Amult (interp_vl Amult Aone Azero vm l) (interp_acs s). Proof. simple induction s. simpl; eauto. simpl; intros. rewrite iacs_aux_ok. rewrite abstract_varlist_insert_ok. rewrite H. rewrite (varlist_merge_ok A Aplus Amult Aone Azero Aeq vm T). auto. Qed. Lemma abstract_sum_prod_ok : forall x y:abstract_sum, interp_acs (abstract_sum_prod x y) = Amult (interp_acs x) (interp_acs y). Proof. simple induction x. intros; simpl; eauto. destruct y as [| v0 a0]; intros. simpl; rewrite H; eauto. unfold abstract_sum_prod; fold abstract_sum_prod. rewrite abstract_sum_merge_ok. rewrite abstract_sum_scalar_ok. rewrite H; simpl; auto. Qed. Theorem aspolynomial_normalize_ok : forall x:aspolynomial, interp_asp x = interp_acs (aspolynomial_normalize x). Proof. simple induction x; simpl; intros; trivial. rewrite abstract_sum_merge_ok. rewrite H; rewrite H0; eauto. rewrite abstract_sum_prod_ok. rewrite H; rewrite H0; eauto. Qed. End abstract_semi_rings. Section abstract_rings. (* In abstract polynomials there is no constants other than 0 and 1. An abstract ring is a ring whose operations plus, and mult are not functions but constructors. In other words, when c1 and c2 are closed, (plus c1 c2) doesn't reduce to a closed term. "closed" mean here "without plus and mult". *) (* this section is not parametrized by a (semi-)ring. Nevertheless, they are two different types for semi-rings and rings and there will be 2 correction theorems *) Inductive apolynomial : Type := | APvar : index -> apolynomial | AP0 : apolynomial | AP1 : apolynomial | APplus : apolynomial -> apolynomial -> apolynomial | APmult : apolynomial -> apolynomial -> apolynomial | APopp : apolynomial -> apolynomial. (* A canonical "abstract" sum is a list of varlist with the sign "+" or "-". Invariant : the list is sorted and there is no varlist is present with both signs. +x +x +x -x is forbidden => the canonical form is +x+x *) Inductive signed_sum : Type := | Nil_varlist : signed_sum | Plus_varlist : varlist -> signed_sum -> signed_sum | Minus_varlist : varlist -> signed_sum -> signed_sum. Fixpoint signed_sum_merge (s1:signed_sum) : signed_sum -> signed_sum := match s1 with | Plus_varlist l1 t1 => (fix ssm_aux (s2:signed_sum) : signed_sum := match s2 with | Plus_varlist l2 t2 => if varlist_lt l1 l2 then Plus_varlist l1 (signed_sum_merge t1 s2) else Plus_varlist l2 (ssm_aux t2) | Minus_varlist l2 t2 => if varlist_eq l1 l2 then signed_sum_merge t1 t2 else if varlist_lt l1 l2 then Plus_varlist l1 (signed_sum_merge t1 s2) else Minus_varlist l2 (ssm_aux t2) | Nil_varlist => s1 end) | Minus_varlist l1 t1 => (fix ssm_aux2 (s2:signed_sum) : signed_sum := match s2 with | Plus_varlist l2 t2 => if varlist_eq l1 l2 then signed_sum_merge t1 t2 else if varlist_lt l1 l2 then Minus_varlist l1 (signed_sum_merge t1 s2) else Plus_varlist l2 (ssm_aux2 t2) | Minus_varlist l2 t2 => if varlist_lt l1 l2 then Minus_varlist l1 (signed_sum_merge t1 s2) else Minus_varlist l2 (ssm_aux2 t2) | Nil_varlist => s1 end) | Nil_varlist => fun s2 => s2 end. Fixpoint plus_varlist_insert (l1:varlist) (s2:signed_sum) {struct s2} : signed_sum := match s2 with | Plus_varlist l2 t2 => if varlist_lt l1 l2 then Plus_varlist l1 s2 else Plus_varlist l2 (plus_varlist_insert l1 t2) | Minus_varlist l2 t2 => if varlist_eq l1 l2 then t2 else if varlist_lt l1 l2 then Plus_varlist l1 s2 else Minus_varlist l2 (plus_varlist_insert l1 t2) | Nil_varlist => Plus_varlist l1 Nil_varlist end. Fixpoint minus_varlist_insert (l1:varlist) (s2:signed_sum) {struct s2} : signed_sum := match s2 with | Plus_varlist l2 t2 => if varlist_eq l1 l2 then t2 else if varlist_lt l1 l2 then Minus_varlist l1 s2 else Plus_varlist l2 (minus_varlist_insert l1 t2) | Minus_varlist l2 t2 => if varlist_lt l1 l2 then Minus_varlist l1 s2 else Minus_varlist l2 (minus_varlist_insert l1 t2) | Nil_varlist => Minus_varlist l1 Nil_varlist end. Fixpoint signed_sum_opp (s:signed_sum) : signed_sum := match s with | Plus_varlist l2 t2 => Minus_varlist l2 (signed_sum_opp t2) | Minus_varlist l2 t2 => Plus_varlist l2 (signed_sum_opp t2) | Nil_varlist => Nil_varlist end. Fixpoint plus_sum_scalar (l1:varlist) (s2:signed_sum) {struct s2} : signed_sum := match s2 with | Plus_varlist l2 t2 => plus_varlist_insert (varlist_merge l1 l2) (plus_sum_scalar l1 t2) | Minus_varlist l2 t2 => minus_varlist_insert (varlist_merge l1 l2) (plus_sum_scalar l1 t2) | Nil_varlist => Nil_varlist end. Fixpoint minus_sum_scalar (l1:varlist) (s2:signed_sum) {struct s2} : signed_sum := match s2 with | Plus_varlist l2 t2 => minus_varlist_insert (varlist_merge l1 l2) (minus_sum_scalar l1 t2) | Minus_varlist l2 t2 => plus_varlist_insert (varlist_merge l1 l2) (minus_sum_scalar l1 t2) | Nil_varlist => Nil_varlist end. Fixpoint signed_sum_prod (s1 s2:signed_sum) {struct s1} : signed_sum := match s1 with | Plus_varlist l1 t1 => signed_sum_merge (plus_sum_scalar l1 s2) (signed_sum_prod t1 s2) | Minus_varlist l1 t1 => signed_sum_merge (minus_sum_scalar l1 s2) (signed_sum_prod t1 s2) | Nil_varlist => Nil_varlist end. Fixpoint apolynomial_normalize (p:apolynomial) : signed_sum := match p with | APvar i => Plus_varlist (Cons_var i Nil_var) Nil_varlist | AP1 => Plus_varlist Nil_var Nil_varlist | AP0 => Nil_varlist | APplus l r => signed_sum_merge (apolynomial_normalize l) (apolynomial_normalize r) | APmult l r => signed_sum_prod (apolynomial_normalize l) (apolynomial_normalize r) | APopp q => signed_sum_opp (apolynomial_normalize q) end. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Variable vm : varmap A. Variable T : Ring_Theory Aplus Amult Aone Azero Aopp Aeq. (* Local *) Definition isacs_aux := (fix isacs_aux (a:A) (s:signed_sum) {struct s} : A := match s with | Nil_varlist => a | Plus_varlist l t => Aplus a (isacs_aux (interp_vl Amult Aone Azero vm l) t) | Minus_varlist l t => Aplus a (isacs_aux (Aopp (interp_vl Amult Aone Azero vm l)) t) end). Definition interp_sacs (s:signed_sum) : A := match s with | Plus_varlist l t => isacs_aux (interp_vl Amult Aone Azero vm l) t | Minus_varlist l t => isacs_aux (Aopp (interp_vl Amult Aone Azero vm l)) t | Nil_varlist => Azero end. Fixpoint interp_ap (p:apolynomial) : A := match p with | APvar i => interp_var Azero vm i | AP0 => Azero | AP1 => Aone | APplus l r => Aplus (interp_ap l) (interp_ap r) | APmult l r => Amult (interp_ap l) (interp_ap r) | APopp q => Aopp (interp_ap q) end. Hint Resolve (Th_plus_comm T). Hint Resolve (Th_plus_assoc T). Hint Resolve (Th_plus_assoc2 T). Hint Resolve (Th_mult_comm T). Hint Resolve (Th_mult_assoc T). Hint Resolve (Th_mult_assoc2 T). Hint Resolve (Th_plus_zero_left T). Hint Resolve (Th_plus_zero_left2 T). Hint Resolve (Th_mult_one_left T). Hint Resolve (Th_mult_one_left2 T). Hint Resolve (Th_mult_zero_left T). Hint Resolve (Th_mult_zero_left2 T). Hint Resolve (Th_distr_left T). Hint Resolve (Th_distr_left2 T). (*Hint Resolve (Th_plus_reg_left T).*) Hint Resolve (Th_plus_permute T). Hint Resolve (Th_mult_permute T). Hint Resolve (Th_distr_right T). Hint Resolve (Th_distr_right2 T). Hint Resolve (Th_mult_zero_right2 T). Hint Resolve (Th_plus_zero_right T). Hint Resolve (Th_plus_zero_right2 T). Hint Resolve (Th_mult_one_right T). Hint Resolve (Th_mult_one_right2 T). (*Hint Resolve (Th_plus_reg_right T).*) Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. Lemma isacs_aux_ok : forall (x:A) (s:signed_sum), isacs_aux x s = Aplus x (interp_sacs s). Proof. simple induction s; simpl; intros. trivial. reflexivity. reflexivity. Qed. Hint Extern 10 (_ = _ :>A) => rewrite isacs_aux_ok: core. Ltac solve1 v v0 H H0 := simpl; elim (varlist_lt v v0); simpl; rewrite isacs_aux_ok; [ rewrite H; simpl; auto | simpl in H0; rewrite H0; auto ]. Lemma signed_sum_merge_ok : forall x y:signed_sum, interp_sacs (signed_sum_merge x y) = Aplus (interp_sacs x) (interp_sacs y). simple induction x. intro; simpl; auto. simple induction y; intros. auto. solve1 v v0 H H0. simpl; generalize (varlist_eq_prop v v0). elim (varlist_eq v v0); simpl. intro Heq; rewrite (Heq I). rewrite H. repeat rewrite isacs_aux_ok. rewrite (Th_plus_permute T). repeat rewrite (Th_plus_assoc T). rewrite (Th_plus_comm T (Aopp (interp_vl Amult Aone Azero vm v0)) (interp_vl Amult Aone Azero vm v0)). rewrite (Th_opp_def T). rewrite (Th_plus_zero_left T). reflexivity. solve1 v v0 H H0. simple induction y; intros. auto. simpl; generalize (varlist_eq_prop v v0). elim (varlist_eq v v0); simpl. intro Heq; rewrite (Heq I). rewrite H. repeat rewrite isacs_aux_ok. rewrite (Th_plus_permute T). repeat rewrite (Th_plus_assoc T). rewrite (Th_opp_def T). rewrite (Th_plus_zero_left T). reflexivity. solve1 v v0 H H0. solve1 v v0 H H0. Qed. Ltac solve2 l v H := elim (varlist_lt l v); simpl; rewrite isacs_aux_ok; [ auto | rewrite H; auto ]. Lemma plus_varlist_insert_ok : forall (l:varlist) (s:signed_sum), interp_sacs (plus_varlist_insert l s) = Aplus (interp_vl Amult Aone Azero vm l) (interp_sacs s). Proof. simple induction s. trivial. simpl; intros. solve2 l v H. simpl; intros. generalize (varlist_eq_prop l v). elim (varlist_eq l v); simpl. intro Heq; rewrite (Heq I). repeat rewrite isacs_aux_ok. repeat rewrite (Th_plus_assoc T). rewrite (Th_opp_def T). rewrite (Th_plus_zero_left T). reflexivity. solve2 l v H. Qed. Lemma minus_varlist_insert_ok : forall (l:varlist) (s:signed_sum), interp_sacs (minus_varlist_insert l s) = Aplus (Aopp (interp_vl Amult Aone Azero vm l)) (interp_sacs s). Proof. simple induction s. trivial. simpl; intros. generalize (varlist_eq_prop l v). elim (varlist_eq l v); simpl. intro Heq; rewrite (Heq I). repeat rewrite isacs_aux_ok. repeat rewrite (Th_plus_assoc T). rewrite (Th_plus_comm T (Aopp (interp_vl Amult Aone Azero vm v)) (interp_vl Amult Aone Azero vm v)). rewrite (Th_opp_def T). auto. simpl; intros. solve2 l v H. simpl; intros; solve2 l v H. Qed. Lemma signed_sum_opp_ok : forall s:signed_sum, interp_sacs (signed_sum_opp s) = Aopp (interp_sacs s). Proof. simple induction s; simpl; intros. symmetry ; apply (Th_opp_zero T). repeat rewrite isacs_aux_ok. rewrite H. rewrite (Th_plus_opp_opp T). reflexivity. repeat rewrite isacs_aux_ok. rewrite H. rewrite <- (Th_plus_opp_opp T). rewrite (Th_opp_opp T). reflexivity. Qed. Lemma plus_sum_scalar_ok : forall (l:varlist) (s:signed_sum), interp_sacs (plus_sum_scalar l s) = Amult (interp_vl Amult Aone Azero vm l) (interp_sacs s). Proof. simple induction s. trivial. simpl; intros. rewrite plus_varlist_insert_ok. rewrite (varlist_merge_ok A Aplus Amult Aone Azero Aeq vm T). repeat rewrite isacs_aux_ok. rewrite H. auto. simpl; intros. rewrite minus_varlist_insert_ok. repeat rewrite isacs_aux_ok. rewrite (varlist_merge_ok A Aplus Amult Aone Azero Aeq vm T). rewrite H. rewrite (Th_distr_right T). rewrite <- (Th_opp_mult_right T). reflexivity. Qed. Lemma minus_sum_scalar_ok : forall (l:varlist) (s:signed_sum), interp_sacs (minus_sum_scalar l s) = Aopp (Amult (interp_vl Amult Aone Azero vm l) (interp_sacs s)). Proof. simple induction s; simpl; intros. rewrite (Th_mult_zero_right T); symmetry ; apply (Th_opp_zero T). simpl; intros. rewrite minus_varlist_insert_ok. rewrite (varlist_merge_ok A Aplus Amult Aone Azero Aeq vm T). repeat rewrite isacs_aux_ok. rewrite H. rewrite (Th_distr_right T). rewrite (Th_plus_opp_opp T). reflexivity. simpl; intros. rewrite plus_varlist_insert_ok. repeat rewrite isacs_aux_ok. rewrite (varlist_merge_ok A Aplus Amult Aone Azero Aeq vm T). rewrite H. rewrite (Th_distr_right T). rewrite <- (Th_opp_mult_right T). rewrite <- (Th_plus_opp_opp T). rewrite (Th_opp_opp T). reflexivity. Qed. Lemma signed_sum_prod_ok : forall x y:signed_sum, interp_sacs (signed_sum_prod x y) = Amult (interp_sacs x) (interp_sacs y). Proof. simple induction x. simpl; eauto 1. intros; simpl. rewrite signed_sum_merge_ok. rewrite plus_sum_scalar_ok. repeat rewrite isacs_aux_ok. rewrite H. auto. intros; simpl. repeat rewrite isacs_aux_ok. rewrite signed_sum_merge_ok. rewrite minus_sum_scalar_ok. rewrite H. rewrite (Th_distr_left T). rewrite (Th_opp_mult_left T). reflexivity. Qed. Theorem apolynomial_normalize_ok : forall p:apolynomial, interp_sacs (apolynomial_normalize p) = interp_ap p. Proof. simple induction p; simpl; auto 1. intros. rewrite signed_sum_merge_ok. rewrite H; rewrite H0; reflexivity. intros. rewrite signed_sum_prod_ok. rewrite H; rewrite H0; reflexivity. intros. rewrite signed_sum_opp_ok. rewrite H; reflexivity. Qed. End abstract_rings. coq-8.4pl2/plugins/ring/ring.ml0000640000175000001440000007757212121620060015550 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* to be found in Coqlib *) open Coqlib let mkLApp(fc,v) = mkApp(Lazy.force fc, v) (*********** Useful types and functions ************) module OperSet = Set.Make (struct type t = global_reference let compare = (RefOrdered.compare : t->t->int) end) type morph = { plusm : constr; multm : constr; oppm : constr option; } type theory = { th_ring : bool; (* false for a semi-ring *) th_abstract : bool; th_setoid : bool; (* true for a setoid ring *) th_equiv : constr option; th_setoid_th : constr option; th_morph : morph option; th_a : constr; (* e.g. nat *) th_plus : constr; th_mult : constr; th_one : constr; th_zero : constr; th_opp : constr option; (* None if semi-ring *) th_eq : constr; th_t : constr; (* e.g. NatTheory *) th_closed : ConstrSet.t; (* e.g. [S; O] *) (* Must be empty for an abstract ring *) } (* Theories are stored in a table which is synchronised with the Reset mechanism. *) module Cmap = Map.Make(struct type t = constr let compare = constr_ord end) let theories_map = ref Cmap.empty let theories_map_add (c,t) = theories_map := Cmap.add c t !theories_map let theories_map_find c = Cmap.find c !theories_map let theories_map_mem c = Cmap.mem c !theories_map let _ = Summary.declare_summary "tactic-ring-table" { Summary.freeze_function = (fun () -> !theories_map); Summary.unfreeze_function = (fun t -> theories_map := t); Summary.init_function = (fun () -> theories_map := Cmap.empty) } (* declare a new type of object in the environment, "tactic-ring-theory" The functions theory_to_obj and obj_to_theory do the conversions between theories and environement objects. *) let subst_morph subst morph = let plusm' = subst_mps subst morph.plusm in let multm' = subst_mps subst morph.multm in let oppm' = Option.smartmap (subst_mps subst) morph.oppm in if plusm' == morph.plusm && multm' == morph.multm && oppm' == morph.oppm then morph else { plusm = plusm' ; multm = multm' ; oppm = oppm' ; } let subst_set subst cset = let same = ref true in let copy_subst c newset = let c' = subst_mps subst c in if not (c' == c) then same := false; ConstrSet.add c' newset in let cset' = ConstrSet.fold copy_subst cset ConstrSet.empty in if !same then cset else cset' let subst_theory subst th = let th_equiv' = Option.smartmap (subst_mps subst) th.th_equiv in let th_setoid_th' = Option.smartmap (subst_mps subst) th.th_setoid_th in let th_morph' = Option.smartmap (subst_morph subst) th.th_morph in let th_a' = subst_mps subst th.th_a in let th_plus' = subst_mps subst th.th_plus in let th_mult' = subst_mps subst th.th_mult in let th_one' = subst_mps subst th.th_one in let th_zero' = subst_mps subst th.th_zero in let th_opp' = Option.smartmap (subst_mps subst) th.th_opp in let th_eq' = subst_mps subst th.th_eq in let th_t' = subst_mps subst th.th_t in let th_closed' = subst_set subst th.th_closed in if th_equiv' == th.th_equiv && th_setoid_th' == th.th_setoid_th && th_morph' == th.th_morph && th_a' == th.th_a && th_plus' == th.th_plus && th_mult' == th.th_mult && th_one' == th.th_one && th_zero' == th.th_zero && th_opp' == th.th_opp && th_eq' == th.th_eq && th_t' == th.th_t && th_closed' == th.th_closed then th else { th_ring = th.th_ring ; th_abstract = th.th_abstract ; th_setoid = th.th_setoid ; th_equiv = th_equiv' ; th_setoid_th = th_setoid_th' ; th_morph = th_morph' ; th_a = th_a' ; th_plus = th_plus' ; th_mult = th_mult' ; th_one = th_one' ; th_zero = th_zero' ; th_opp = th_opp' ; th_eq = th_eq' ; th_t = th_t' ; th_closed = th_closed' ; } let subst_th (subst,(c,th as obj)) = let c' = subst_mps subst c in let th' = subst_theory subst th in if c' == c && th' == th then obj else (c',th') let theory_to_obj : constr * theory -> obj = let cache_th (_,(c, th)) = theories_map_add (c,th) in declare_object {(default_object "tactic-ring-theory") with open_function = (fun i o -> if i=1 then cache_th o); cache_function = cache_th; subst_function = subst_th; classify_function = (fun x -> Substitute x) } (* from the set A, guess the associated theory *) (* With this simple solution, the theory to use is automatically guessed *) (* But only one theory can be declared for a given Set *) let guess_theory a = try theories_map_find a with Not_found -> errorlabstrm "Ring" (str "No Declared Ring Theory for " ++ pr_lconstr a ++ fnl () ++ str "Use Add [Semi] Ring to declare it") (* Looks up an option *) let unbox = function | Some w -> w | None -> anomaly "Ring : Not in case of a setoid ring." (* Protects the convertibility test against undue exceptions when using it with untyped terms *) let safe_pf_conv_x gl c1 c2 = try pf_conv_x gl c1 c2 with e when Errors.noncritical e -> false (* Add a Ring or a Semi-Ring to the database after a type verification *) let implement_theory env t th args = is_conv env Evd.empty (Typing.type_of env Evd.empty t) (mkLApp (th, args)) (* (\* The following test checks whether the provided morphism is the default *) (* one for the given operation. In principle the test is too strict, since *) (* it should possible to provide another proof for the same fact (proof *) (* irrelevance). In particular, the error message is be not very explicative. *\) *) let states_compatibility_for env plus mult opp morphs = let check op compat = true in (* is_conv env Evd.empty (Setoid_replace.default_morphism op).Setoid_replace.lem *) (* compat in *) check plus morphs.plusm && check mult morphs.multm && (match (opp,morphs.oppm) with None, None -> true | Some opp, Some compat -> check opp compat | _,_ -> assert false) let add_theory want_ring want_abstract want_setoid a aequiv asetth amorph aplus amult aone azero aopp aeq t cset = if theories_map_mem a then errorlabstrm "Add Semi Ring" (str "A (Semi-)(Setoid-)Ring Structure is already declared for " ++ pr_lconstr a); let env = Global.env () in if (want_ring & want_setoid & ( not (implement_theory env t coq_Setoid_Ring_Theory [| a; (unbox aequiv); aplus; amult; aone; azero; (unbox aopp); aeq|]) || not (implement_theory env (unbox asetth) coq_Setoid_Theory [| a; (unbox aequiv) |]) || not (states_compatibility_for env aplus amult aopp (unbox amorph)) )) then errorlabstrm "addring" (str "Not a valid Setoid-Ring theory"); if (not want_ring & want_setoid & ( not (implement_theory env t coq_Semi_Setoid_Ring_Theory [| a; (unbox aequiv); aplus; amult; aone; azero; aeq|]) || not (implement_theory env (unbox asetth) coq_Setoid_Theory [| a; (unbox aequiv) |]) || not (states_compatibility_for env aplus amult aopp (unbox amorph)))) then errorlabstrm "addring" (str "Not a valid Semi-Setoid-Ring theory"); if (want_ring & not want_setoid & not (implement_theory env t coq_Ring_Theory [| a; aplus; amult; aone; azero; (unbox aopp); aeq |])) then errorlabstrm "addring" (str "Not a valid Ring theory"); if (not want_ring & not want_setoid & not (implement_theory env t coq_Semi_Ring_Theory [| a; aplus; amult; aone; azero; aeq |])) then errorlabstrm "addring" (str "Not a valid Semi-Ring theory"); Lib.add_anonymous_leaf (theory_to_obj (a, { th_ring = want_ring; th_abstract = want_abstract; th_setoid = want_setoid; th_equiv = aequiv; th_setoid_th = asetth; th_morph = amorph; th_a = a; th_plus = aplus; th_mult = amult; th_one = aone; th_zero = azero; th_opp = aopp; th_eq = aeq; th_t = t; th_closed = cset })) (******** The tactic itself *********) (* gl : goal sigma th : semi-ring theory (concrete) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) module Constrhash = Hashtbl.Make (struct type t = constr let equal = eq_constr let hash = hash_constr end) let build_spolynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) (* aux creates the spolynom p by a recursive destructuration of c and builds the varmap with side-effects *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop,[|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_SPplus, [|th.th_a; aux c1; aux c2 |]) | App (binop,[|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_SPmult, [|th.th_a; aux c1; aux c2 |]) | _ when closed_under th.th_closed c -> mkLApp(coq_SPconst, [|th.th_a; c |]) | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_SPvar, [|th.th_a; (path_of_int !counter) |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = btree_of_array (Array.of_list (List.rev !varlist)) th.th_a in List.map (fun p -> (mkLApp (coq_interp_sp, [|th.th_a; th.th_plus; th.th_mult; th.th_zero; v; p |]), mkLApp (coq_interp_cs, [|th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; pf_reduce cbv_betadeltaiota gl (mkLApp (coq_spolynomial_simplify, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; th.th_eq; p|])) |]), mkLApp (coq_spolynomial_simplify_ok, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; th.th_eq; v; th.th_t; p |]))) lp (* gl : goal sigma th : ring theory (concrete) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) let build_polynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_Pplus, [|th.th_a; aux c1; aux c2 |]) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_Pmult, [|th.th_a; aux c1; aux c2 |]) (* The special case of Z.sub *) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl c (mkApp (th.th_plus, [|c1; mkApp(unbox th.th_opp, [|c2|])|])) -> mkLApp(coq_Pplus, [|th.th_a; aux c1; mkLApp(coq_Popp, [|th.th_a; aux c2|]) |]) | App (unop, [|c1|]) when safe_pf_conv_x gl unop (unbox th.th_opp) -> mkLApp(coq_Popp, [|th.th_a; aux c1|]) | _ when closed_under th.th_closed c -> mkLApp(coq_Pconst, [|th.th_a; c |]) | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_Pvar, [|th.th_a; (path_of_int !counter) |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = (btree_of_array (Array.of_list (List.rev !varlist)) th.th_a) in List.map (fun p -> (mkLApp(coq_interp_p, [| th.th_a; th.th_plus; th.th_mult; th.th_zero; (unbox th.th_opp); v; p |])), mkLApp(coq_interp_cs, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; pf_reduce cbv_betadeltaiota gl (mkLApp(coq_polynomial_simplify, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); th.th_eq; p |])) |]), mkLApp(coq_polynomial_simplify_ok, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); th.th_eq; v; th.th_t; p |])) lp (* gl : goal sigma th : semi-ring theory (abstract) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) let build_aspolynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) (* aux creates the aspolynom p by a recursive destructuration of c and builds the varmap with side-effects *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_ASPplus, [| aux c1; aux c2 |]) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_ASPmult, [| aux c1; aux c2 |]) | _ when safe_pf_conv_x gl c th.th_zero -> Lazy.force coq_ASP0 | _ when safe_pf_conv_x gl c th.th_one -> Lazy.force coq_ASP1 | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_ASPvar, [|(path_of_int !counter) |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = btree_of_array (Array.of_list (List.rev !varlist)) th.th_a in List.map (fun p -> (mkLApp(coq_interp_asp, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; p |]), mkLApp(coq_interp_acs, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; pf_reduce cbv_betadeltaiota gl (mkLApp(coq_aspolynomial_normalize,[|p|])) |]), mkLApp(coq_spolynomial_simplify_ok, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; th.th_eq; v; th.th_t; p |]))) lp (* gl : goal sigma th : ring theory (abstract) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) let build_apolynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_APplus, [| aux c1; aux c2 |]) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_APmult, [| aux c1; aux c2 |]) (* The special case of Z.sub *) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl c (mkApp(th.th_plus, [|c1; mkApp(unbox th.th_opp,[|c2|]) |])) -> mkLApp(coq_APplus, [|aux c1; mkLApp(coq_APopp,[|aux c2|]) |]) | App (unop, [|c1|]) when safe_pf_conv_x gl unop (unbox th.th_opp) -> mkLApp(coq_APopp, [| aux c1 |]) | _ when safe_pf_conv_x gl c th.th_zero -> Lazy.force coq_AP0 | _ when safe_pf_conv_x gl c th.th_one -> Lazy.force coq_AP1 | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_APvar, [| path_of_int !counter |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = (btree_of_array (Array.of_list (List.rev !varlist)) th.th_a) in List.map (fun p -> (mkLApp(coq_interp_ap, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); v; p |]), mkLApp(coq_interp_sacs, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); v; pf_reduce cbv_betadeltaiota gl (mkLApp(coq_apolynomial_normalize, [|p|])) |]), mkLApp(coq_apolynomial_normalize_ok, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); th.th_eq; v; th.th_t; p |]))) lp (* gl : goal sigma th : setoid ring theory (concrete) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) let build_setpolynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_SetPplus, [|th.th_a; aux c1; aux c2 |]) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_SetPmult, [|th.th_a; aux c1; aux c2 |]) (* The special case of Z.sub *) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl c (mkApp(th.th_plus, [|c1; mkApp(unbox th.th_opp,[|c2|])|])) -> mkLApp(coq_SetPplus, [| th.th_a; aux c1; mkLApp(coq_SetPopp, [|th.th_a; aux c2|]) |]) | App (unop, [|c1|]) when safe_pf_conv_x gl unop (unbox th.th_opp) -> mkLApp(coq_SetPopp, [| th.th_a; aux c1 |]) | _ when closed_under th.th_closed c -> mkLApp(coq_SetPconst, [| th.th_a; c |]) | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_SetPvar, [| th.th_a; path_of_int !counter |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = (btree_of_array (Array.of_list (List.rev !varlist)) th.th_a) in List.map (fun p -> (mkLApp(coq_interp_setp, [| th.th_a; th.th_plus; th.th_mult; th.th_zero; (unbox th.th_opp); v; p |]), mkLApp(coq_interp_setcs, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; pf_reduce cbv_betadeltaiota gl (mkLApp(coq_setpolynomial_simplify, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; (unbox th.th_opp); th.th_eq; p |])) |]), mkLApp(coq_setpolynomial_simplify_ok, [| th.th_a; (unbox th.th_equiv); th.th_plus; th.th_mult; th.th_one; th.th_zero;(unbox th.th_opp); th.th_eq; (unbox th.th_setoid_th); (unbox th.th_morph).plusm; (unbox th.th_morph).multm; (unbox (unbox th.th_morph).oppm); v; th.th_t; p |]))) lp (* gl : goal sigma th : semi setoid ring theory (concrete) cl : constr list [c1; c2; ...] Builds - a list of tuples [(c1, c'1, c''1, c'1_eq_c''1); ... ] where c'i is convertible with ci and c'i_eq_c''i is a proof of equality of c'i and c''i *) let build_setspolynom gl th lc = let varhash = (Constrhash.create 17 : constr Constrhash.t) in let varlist = ref ([] : constr list) in (* list of variables *) let counter = ref 1 in (* number of variables created + 1 *) let rec aux c = match (kind_of_term (strip_outer_cast c)) with | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_plus -> mkLApp(coq_SetSPplus, [|th.th_a; aux c1; aux c2 |]) | App (binop, [|c1; c2|]) when safe_pf_conv_x gl binop th.th_mult -> mkLApp(coq_SetSPmult, [| th.th_a; aux c1; aux c2 |]) | _ when closed_under th.th_closed c -> mkLApp(coq_SetSPconst, [| th.th_a; c |]) | _ -> try Constrhash.find varhash c with Not_found -> let newvar = mkLApp(coq_SetSPvar, [|th.th_a; path_of_int !counter |]) in begin incr counter; varlist := c :: !varlist; Constrhash.add varhash c newvar; newvar end in let lp = List.map aux lc in let v = (btree_of_array (Array.of_list (List.rev !varlist)) th.th_a) in List.map (fun p -> (mkLApp(coq_interp_setsp, [| th.th_a; th.th_plus; th.th_mult; th.th_zero; v; p |]), mkLApp(coq_interp_setcs, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; v; pf_reduce cbv_betadeltaiota gl (mkLApp(coq_setspolynomial_simplify, [| th.th_a; th.th_plus; th.th_mult; th.th_one; th.th_zero; th.th_eq; p |])) |]), mkLApp(coq_setspolynomial_simplify_ok, [| th.th_a; (unbox th.th_equiv); th.th_plus; th.th_mult; th.th_one; th.th_zero; th.th_eq; (unbox th.th_setoid_th); (unbox th.th_morph).plusm; (unbox th.th_morph).multm; v; th.th_t; p |]))) lp module SectionPathSet = Set.Make(struct type t = full_path let compare = Pervasives.compare end) (* Avec l'uniformisation des red_kind, on perd ici sur la structure SectionPathSet; peut-tre faudra-t-il la dplacer dans Closure *) let constants_to_unfold = (* List.fold_right SectionPathSet.add *) let transform s = let sp = path_of_string s in let dir, id = repr_path sp in Libnames.encode_con dir id in List.map transform [ "Coq.ring.Ring_normalize.interp_cs"; "Coq.ring.Ring_normalize.interp_var"; "Coq.ring.Ring_normalize.interp_vl"; "Coq.ring.Ring_abstract.interp_acs"; "Coq.ring.Ring_abstract.interp_sacs"; "Coq.quote.Quote.varmap_find"; (* anciennement des Local devenus Definition *) "Coq.ring.Ring_normalize.ics_aux"; "Coq.ring.Ring_normalize.ivl_aux"; "Coq.ring.Ring_normalize.interp_m"; "Coq.ring.Ring_abstract.iacs_aux"; "Coq.ring.Ring_abstract.isacs_aux"; "Coq.ring.Setoid_ring_normalize.interp_cs"; "Coq.ring.Setoid_ring_normalize.interp_var"; "Coq.ring.Setoid_ring_normalize.interp_vl"; "Coq.ring.Setoid_ring_normalize.ics_aux"; "Coq.ring.Setoid_ring_normalize.ivl_aux"; "Coq.ring.Setoid_ring_normalize.interp_m"; ] (* SectionPathSet.empty *) (* Unfolds the functions interp and find_btree in the term c of goal gl *) open RedFlags let polynom_unfold_tac = let flags = (mkflags(fBETA::fIOTA::(List.map fCONST constants_to_unfold))) in reduct_in_concl (cbv_norm_flags flags,DEFAULTcast) let polynom_unfold_tac_in_term gl = let flags = (mkflags(fBETA::fIOTA::fZETA::(List.map fCONST constants_to_unfold))) in cbv_norm_flags flags (pf_env gl) (project gl) (* lc : constr list *) (* th : theory associated to t *) (* op : clause (None for conclusion or Some id for hypothesis id) *) (* gl : goal *) (* Does the rewriting c_i -> (interp R RC v (polynomial_simplify p_i)) where the ring R, the Ring theory RC, the varmap v and the polynomials p_i are guessed and such that c_i = (interp R RC v p_i) *) let raw_polynom th op lc gl = (* first we sort the terms : if t' is a subterm of t it must appear after t in the list. This is to avoid that the normalization of t' modifies t in a non-desired way *) let lc = sort_subterm gl lc in let ltriplets = if th.th_setoid then if th.th_ring then build_setpolynom gl th lc else build_setspolynom gl th lc else if th.th_ring then if th.th_abstract then build_apolynom gl th lc else build_polynom gl th lc else if th.th_abstract then build_aspolynom gl th lc else build_spolynom gl th lc in let polynom_tac = List.fold_right2 (fun ci (c'i, c''i, c'i_eq_c''i) tac -> let c'''i = if !term_quality then polynom_unfold_tac_in_term gl c''i else c''i in if !term_quality && safe_pf_conv_x gl c'''i ci then tac (* convertible terms *) else if th.th_setoid then (tclORELSE (tclORELSE (h_exact c'i_eq_c''i) (h_exact (mkLApp(coq_seq_sym, [| th.th_a; (unbox th.th_equiv); (unbox th.th_setoid_th); c'''i; ci; c'i_eq_c''i |])))) (tclTHENS (tclORELSE (Equality.general_rewrite true Termops.all_occurrences true false c'i_eq_c''i) (Equality.general_rewrite false Termops.all_occurrences true false c'i_eq_c''i)) [tac])) else (tclORELSE (tclORELSE (h_exact c'i_eq_c''i) (h_exact (mkApp(build_coq_eq_sym (), [|th.th_a; c'''i; ci; c'i_eq_c''i |])))) (tclTHENS (elim_type (mkApp(build_coq_eq (), [|th.th_a; c'''i; ci |]))) [ tac; h_exact c'i_eq_c''i ])) ) lc ltriplets polynom_unfold_tac in polynom_tac gl let guess_eq_tac th = (tclORELSE reflexivity (tclTHEN polynom_unfold_tac (tclTHEN (* Normalized sums associate on the right *) (tclREPEAT (tclTHENFIRST (apply (mkApp(build_coq_f_equal2 (), [| th.th_a; th.th_a; th.th_a; th.th_plus |]))) reflexivity)) (tclTRY (tclTHENLAST (apply (mkApp(build_coq_f_equal2 (), [| th.th_a; th.th_a; th.th_a; th.th_plus |]))) reflexivity))))) let guess_equiv_tac th = (tclORELSE (apply (mkLApp(coq_seq_refl, [| th.th_a; (unbox th.th_equiv); (unbox th.th_setoid_th)|]))) (tclTHEN polynom_unfold_tac (tclREPEAT (tclORELSE (apply (unbox th.th_morph).plusm) (apply (unbox th.th_morph).multm))))) let match_with_equiv c = match (kind_of_term c) with | App (e,a) -> if (List.mem e []) (* (Setoid_replace.equiv_list ())) *) then Some (decompose_app c) else None | _ -> None let polynom lc gl = Coqlib.check_required_library ["Coq";"ring";"LegacyRing"]; match lc with (* If no argument is given, try to recognize either an equality or a declared relation with arguments c1 ... cn, do "Ring c1 c2 ... cn" and then try to apply the simplification theorems declared for the relation *) | [] -> (try match Hipattern.match_with_equation (pf_concl gl) with | _,_,Hipattern.PolymorphicLeibnizEq (t,c1,c2) -> let th = guess_theory t in (tclTHEN (raw_polynom th None [c1;c2]) (guess_eq_tac th)) gl | _,_,Hipattern.HeterogenousEq (t1,c1,t2,c2) when safe_pf_conv_x gl t1 t2 -> let th = guess_theory t1 in (tclTHEN (raw_polynom th None [c1;c2]) (guess_eq_tac th)) gl | _ -> raise Exit with Hipattern.NoEquationFound | Exit -> (match match_with_equiv (pf_concl gl) with | Some (equiv, c1::args) -> let t = (pf_type_of gl c1) in let th = (guess_theory t) in if List.exists (fun c2 -> not (safe_pf_conv_x gl t (pf_type_of gl c2))) args then errorlabstrm "Ring :" (str" All terms must have the same type"); (tclTHEN (raw_polynom th None (c1::args)) (guess_equiv_tac th)) gl | _ -> errorlabstrm "polynom :" (str" This goal is not an equality nor a setoid equivalence"))) (* Elsewhere, guess the theory, check that all terms have the same type and apply raw_polynom *) | c :: lc' -> let t = pf_type_of gl c in let th = guess_theory t in if List.exists (fun c1 -> not (safe_pf_conv_x gl t (pf_type_of gl c1))) lc' then errorlabstrm "Ring :" (str" All terms must have the same type"); (tclTHEN (raw_polynom th None lc) polynom_unfold_tac) gl coq-8.4pl2/plugins/ring/LegacyRing.v0000640000175000001440000000256112010532755016466 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* b) eqb. split; simpl. destruct n; destruct m; reflexivity. destruct n; destruct m; destruct p; reflexivity. destruct n; destruct m; reflexivity. destruct n; destruct m; destruct p; reflexivity. destruct n; reflexivity. destruct n; reflexivity. destruct n; reflexivity. destruct n; destruct m; destruct p; reflexivity. destruct x; destruct y; reflexivity || simpl; tauto. Defined. Add Legacy Ring bool xorb andb true false (fun b:bool => b) eqb BoolTheory [ true false ]. coq-8.4pl2/plugins/ring/LegacyZArithRing.v0000640000175000001440000000241712010532755017610 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ => false end. Lemma Zeq_prop : forall x y:Z, Is_true (Zeq x y) -> x = y. intros x y H; unfold Zeq in H. apply Z.compare_eq. destruct (x ?= y)%Z; [ reflexivity | contradiction | contradiction ]. Qed. Definition ZTheory : Ring_Theory Z.add Z.mul 1%Z 0%Z Z.opp Zeq. split; intros; eauto with zarith. apply Zeq_prop; assumption. Qed. (* NatConstants and NatTheory are defined in Ring_theory.v *) Add Legacy Ring Z Z.add Z.mul 1%Z 0%Z Z.opp Zeq ZTheory [ Zpos Zneg 0%Z xO xI 1%positive ]. coq-8.4pl2/plugins/ring/Setoid_ring.v0000640000175000001440000000121612010532755016704 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* n = m. Proof. simple induction n; simple induction m; simpl; try reflexivity || contradiction. intros; rewrite (H i0); trivial. intros; rewrite (H i0); trivial. Qed. Section setoid. Variable A : Type. Variable Aequiv : A -> A -> Prop. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Variable S : Setoid_Theory A Aequiv. Add Setoid A Aequiv S as Asetoid. Variable plus_morph : forall a a0:A, Aequiv a a0 -> forall a1 a2:A, Aequiv a1 a2 -> Aequiv (Aplus a a1) (Aplus a0 a2). Variable mult_morph : forall a a0:A, Aequiv a a0 -> forall a1 a2:A, Aequiv a1 a2 -> Aequiv (Amult a a1) (Amult a0 a2). Variable opp_morph : forall a a0:A, Aequiv a a0 -> Aequiv (Aopp a) (Aopp a0). Add Morphism Aplus : Aplus_ext. intros; apply plus_morph; assumption. Qed. Add Morphism Amult : Amult_ext. intros; apply mult_morph; assumption. Qed. Add Morphism Aopp : Aopp_ext. exact opp_morph. Qed. Let equiv_refl := Seq_refl A Aequiv S. Let equiv_sym := Seq_sym A Aequiv S. Let equiv_trans := Seq_trans A Aequiv S. Hint Resolve equiv_refl equiv_trans. Hint Immediate equiv_sym. Section semi_setoid_rings. (* Section definitions. *) (******************************************) (* Normal abtract Polynomials *) (******************************************) (* DEFINITIONS : - A varlist is a sorted product of one or more variables : x, x*y*z - A monom is a constant, a varlist or the product of a constant by a varlist variables. 2*x*y, x*y*z, 3 are monoms : 2*3, x*3*y, 4*x*3 are NOT. - A canonical sum is either a monom or an ordered sum of monoms (the order on monoms is defined later) - A normal polynomial it either a constant or a canonical sum or a constant plus a canonical sum *) (* varlist is isomorphic to (list var), but we built a special inductive for efficiency *) Inductive varlist : Type := | Nil_var : varlist | Cons_var : index -> varlist -> varlist. Inductive canonical_sum : Type := | Nil_monom : canonical_sum | Cons_monom : A -> varlist -> canonical_sum -> canonical_sum | Cons_varlist : varlist -> canonical_sum -> canonical_sum. (* Order on monoms *) (* That's the lexicographic order on varlist, extended by : - A constant is less than every monom - The relation between two varlist is preserved by multiplication by a constant. Examples : 3 < x < y x*y < x*y*y*z 2*x*y < x*y*y*z x*y < 54*x*y*y*z 4*x*y < 59*x*y*y*z *) Fixpoint varlist_eq (x y:varlist) {struct y} : bool := match x, y with | Nil_var, Nil_var => true | Cons_var i xrest, Cons_var j yrest => andb (index_eq i j) (varlist_eq xrest yrest) | _, _ => false end. Fixpoint varlist_lt (x y:varlist) {struct y} : bool := match x, y with | Nil_var, Cons_var _ _ => true | Cons_var i xrest, Cons_var j yrest => if index_lt i j then true else andb (index_eq i j) (varlist_lt xrest yrest) | _, _ => false end. (* merges two variables lists *) Fixpoint varlist_merge (l1:varlist) : varlist -> varlist := match l1 with | Cons_var v1 t1 => (fix vm_aux (l2:varlist) : varlist := match l2 with | Cons_var v2 t2 => if index_lt v1 v2 then Cons_var v1 (varlist_merge t1 l2) else Cons_var v2 (vm_aux t2) | Nil_var => l1 end) | Nil_var => fun l2 => l2 end. (* returns the sum of two canonical sums *) Fixpoint canonical_sum_merge (s1:canonical_sum) : canonical_sum -> canonical_sum := match s1 with | Cons_monom c1 l1 t1 => (fix csm_aux (s2:canonical_sum) : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 c2) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_monom c1 l1 (canonical_sum_merge t1 s2) else Cons_monom c2 l2 (csm_aux t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 Aone) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_monom c1 l1 (canonical_sum_merge t1 s2) else Cons_varlist l2 (csm_aux t2) | Nil_monom => s1 end) | Cons_varlist l1 t1 => (fix csm_aux2 (s2:canonical_sum) : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone c2) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_varlist l1 (canonical_sum_merge t1 s2) else Cons_monom c2 l2 (csm_aux2 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone Aone) l1 (canonical_sum_merge t1 t2) else if varlist_lt l1 l2 then Cons_varlist l1 (canonical_sum_merge t1 s2) else Cons_varlist l2 (csm_aux2 t2) | Nil_monom => s1 end) | Nil_monom => fun s2 => s2 end. (* Insertion of a monom in a canonical sum *) Fixpoint monom_insert (c1:A) (l1:varlist) (s2:canonical_sum) {struct s2} : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 c2) l1 t2 else if varlist_lt l1 l2 then Cons_monom c1 l1 s2 else Cons_monom c2 l2 (monom_insert c1 l1 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus c1 Aone) l1 t2 else if varlist_lt l1 l2 then Cons_monom c1 l1 s2 else Cons_varlist l2 (monom_insert c1 l1 t2) | Nil_monom => Cons_monom c1 l1 Nil_monom end. Fixpoint varlist_insert (l1:varlist) (s2:canonical_sum) {struct s2} : canonical_sum := match s2 with | Cons_monom c2 l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone c2) l1 t2 else if varlist_lt l1 l2 then Cons_varlist l1 s2 else Cons_monom c2 l2 (varlist_insert l1 t2) | Cons_varlist l2 t2 => if varlist_eq l1 l2 then Cons_monom (Aplus Aone Aone) l1 t2 else if varlist_lt l1 l2 then Cons_varlist l1 s2 else Cons_varlist l2 (varlist_insert l1 t2) | Nil_monom => Cons_varlist l1 Nil_monom end. (* Computes c0*s *) Fixpoint canonical_sum_scalar (c0:A) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => Cons_monom (Amult c0 c) l (canonical_sum_scalar c0 t) | Cons_varlist l t => Cons_monom c0 l (canonical_sum_scalar c0 t) | Nil_monom => Nil_monom end. (* Computes l0*s *) Fixpoint canonical_sum_scalar2 (l0:varlist) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => monom_insert c (varlist_merge l0 l) (canonical_sum_scalar2 l0 t) | Cons_varlist l t => varlist_insert (varlist_merge l0 l) (canonical_sum_scalar2 l0 t) | Nil_monom => Nil_monom end. (* Computes c0*l0*s *) Fixpoint canonical_sum_scalar3 (c0:A) (l0:varlist) (s:canonical_sum) {struct s} : canonical_sum := match s with | Cons_monom c l t => monom_insert (Amult c0 c) (varlist_merge l0 l) (canonical_sum_scalar3 c0 l0 t) | Cons_varlist l t => monom_insert c0 (varlist_merge l0 l) (canonical_sum_scalar3 c0 l0 t) | Nil_monom => Nil_monom end. (* returns the product of two canonical sums *) Fixpoint canonical_sum_prod (s1 s2:canonical_sum) {struct s1} : canonical_sum := match s1 with | Cons_monom c1 l1 t1 => canonical_sum_merge (canonical_sum_scalar3 c1 l1 s2) (canonical_sum_prod t1 s2) | Cons_varlist l1 t1 => canonical_sum_merge (canonical_sum_scalar2 l1 s2) (canonical_sum_prod t1 s2) | Nil_monom => Nil_monom end. (* The type to represent concrete semi-setoid-ring polynomials *) Inductive setspolynomial : Type := | SetSPvar : index -> setspolynomial | SetSPconst : A -> setspolynomial | SetSPplus : setspolynomial -> setspolynomial -> setspolynomial | SetSPmult : setspolynomial -> setspolynomial -> setspolynomial. Fixpoint setspolynomial_normalize (p:setspolynomial) : canonical_sum := match p with | SetSPplus l r => canonical_sum_merge (setspolynomial_normalize l) (setspolynomial_normalize r) | SetSPmult l r => canonical_sum_prod (setspolynomial_normalize l) (setspolynomial_normalize r) | SetSPconst c => Cons_monom c Nil_var Nil_monom | SetSPvar i => Cons_varlist (Cons_var i Nil_var) Nil_monom end. Fixpoint canonical_sum_simplify (s:canonical_sum) : canonical_sum := match s with | Cons_monom c l t => if Aeq c Azero then canonical_sum_simplify t else if Aeq c Aone then Cons_varlist l (canonical_sum_simplify t) else Cons_monom c l (canonical_sum_simplify t) | Cons_varlist l t => Cons_varlist l (canonical_sum_simplify t) | Nil_monom => Nil_monom end. Definition setspolynomial_simplify (x:setspolynomial) := canonical_sum_simplify (setspolynomial_normalize x). Variable vm : varmap A. Definition interp_var (i:index) := varmap_find Azero i vm. Definition ivl_aux := (fix ivl_aux (x:index) (t:varlist) {struct t} : A := match t with | Nil_var => interp_var x | Cons_var x' t' => Amult (interp_var x) (ivl_aux x' t') end). Definition interp_vl (l:varlist) := match l with | Nil_var => Aone | Cons_var x t => ivl_aux x t end. Definition interp_m (c:A) (l:varlist) := match l with | Nil_var => c | Cons_var x t => Amult c (ivl_aux x t) end. Definition ics_aux := (fix ics_aux (a:A) (s:canonical_sum) {struct s} : A := match s with | Nil_monom => a | Cons_varlist l t => Aplus a (ics_aux (interp_vl l) t) | Cons_monom c l t => Aplus a (ics_aux (interp_m c l) t) end). Definition interp_setcs (s:canonical_sum) : A := match s with | Nil_monom => Azero | Cons_varlist l t => ics_aux (interp_vl l) t | Cons_monom c l t => ics_aux (interp_m c l) t end. Fixpoint interp_setsp (p:setspolynomial) : A := match p with | SetSPconst c => c | SetSPvar i => interp_var i | SetSPplus p1 p2 => Aplus (interp_setsp p1) (interp_setsp p2) | SetSPmult p1 p2 => Amult (interp_setsp p1) (interp_setsp p2) end. (* End interpretation. *) Unset Implicit Arguments. (* Section properties. *) Variable T : Semi_Setoid_Ring_Theory Aequiv Aplus Amult Aone Azero Aeq. Hint Resolve (SSR_plus_comm T). Hint Resolve (SSR_plus_assoc T). Hint Resolve (SSR_plus_assoc2 S T). Hint Resolve (SSR_mult_comm T). Hint Resolve (SSR_mult_assoc T). Hint Resolve (SSR_mult_assoc2 S T). Hint Resolve (SSR_plus_zero_left T). Hint Resolve (SSR_plus_zero_left2 S T). Hint Resolve (SSR_mult_one_left T). Hint Resolve (SSR_mult_one_left2 S T). Hint Resolve (SSR_mult_zero_left T). Hint Resolve (SSR_mult_zero_left2 S T). Hint Resolve (SSR_distr_left T). Hint Resolve (SSR_distr_left2 S T). Hint Resolve (SSR_plus_reg_left T). Hint Resolve (SSR_plus_permute S plus_morph T). Hint Resolve (SSR_mult_permute S mult_morph T). Hint Resolve (SSR_distr_right S plus_morph T). Hint Resolve (SSR_distr_right2 S plus_morph T). Hint Resolve (SSR_mult_zero_right S T). Hint Resolve (SSR_mult_zero_right2 S T). Hint Resolve (SSR_plus_zero_right S T). Hint Resolve (SSR_plus_zero_right2 S T). Hint Resolve (SSR_mult_one_right S T). Hint Resolve (SSR_mult_one_right2 S T). Hint Resolve (SSR_plus_reg_right S T). Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. Lemma varlist_eq_prop : forall x y:varlist, Is_true (varlist_eq x y) -> x = y. Proof. simple induction x; simple induction y; contradiction || (try reflexivity). simpl; intros. generalize (andb_prop2 _ _ H1); intros; elim H2; intros. rewrite (index_eq_prop _ _ H3); rewrite (H v0 H4); reflexivity. Qed. Remark ivl_aux_ok : forall (v:varlist) (i:index), Aequiv (ivl_aux i v) (Amult (interp_var i) (interp_vl v)). Proof. simple induction v; simpl; intros. trivial. rewrite (H i); trivial. Qed. Lemma varlist_merge_ok : forall x y:varlist, Aequiv (interp_vl (varlist_merge x y)) (Amult (interp_vl x) (interp_vl y)). Proof. simple induction x. simpl; trivial. simple induction y. simpl; trivial. simpl; intros. elim (index_lt i i0); simpl; intros. rewrite (ivl_aux_ok v i). rewrite (ivl_aux_ok v0 i0). rewrite (ivl_aux_ok (varlist_merge v (Cons_var i0 v0)) i). rewrite (H (Cons_var i0 v0)). simpl. rewrite (ivl_aux_ok v0 i0). eauto. rewrite (ivl_aux_ok v i). rewrite (ivl_aux_ok v0 i0). rewrite (ivl_aux_ok ((fix vm_aux (l2:varlist) : varlist := match l2 with | Nil_var => Cons_var i v | Cons_var v2 t2 => if index_lt i v2 then Cons_var i (varlist_merge v l2) else Cons_var v2 (vm_aux t2) end) v0) i0). rewrite H0. rewrite (ivl_aux_ok v i). eauto. Qed. Remark ics_aux_ok : forall (x:A) (s:canonical_sum), Aequiv (ics_aux x s) (Aplus x (interp_setcs s)). Proof. simple induction s; simpl; intros; trivial. Qed. Remark interp_m_ok : forall (x:A) (l:varlist), Aequiv (interp_m x l) (Amult x (interp_vl l)). Proof. destruct l as [| i v]; trivial. Qed. Hint Resolve ivl_aux_ok. Hint Resolve ics_aux_ok. Hint Resolve interp_m_ok. (* Hints Resolve ivl_aux_ok ics_aux_ok interp_m_ok. *) Lemma canonical_sum_merge_ok : forall x y:canonical_sum, Aequiv (interp_setcs (canonical_sum_merge x y)) (Aplus (interp_setcs x) (interp_setcs y)). Proof. simple induction x; simpl. trivial. simple induction y; simpl; intros. eauto. generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl. rewrite (ics_aux_ok (interp_m a v0) c). rewrite (ics_aux_ok (interp_m a0 v0) c0). rewrite (ics_aux_ok (interp_m (Aplus a a0) v0) (canonical_sum_merge c c0)). rewrite (H c0). rewrite (interp_m_ok (Aplus a a0) v0). rewrite (interp_m_ok a v0). rewrite (interp_m_ok a0 v0). setoid_replace (Amult (Aplus a a0) (interp_vl v0)) with (Aplus (Amult a (interp_vl v0)) (Amult a0 (interp_vl v0))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult a (interp_vl v0)) (Amult a0 (interp_vl v0))) (Aplus (interp_setcs c) (interp_setcs c0))) with (Aplus (Amult a (interp_vl v0)) (Aplus (Amult a0 (interp_vl v0)) (Aplus (interp_setcs c) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult a (interp_vl v0)) (interp_setcs c)) (Aplus (Amult a0 (interp_vl v0)) (interp_setcs c0))) with (Aplus (Amult a (interp_vl v0)) (Aplus (interp_setcs c) (Aplus (Amult a0 (interp_vl v0)) (interp_setcs c0)))); [ idtac | trivial ]. auto. elim (varlist_lt v v0); simpl. intro. rewrite (ics_aux_ok (interp_m a v) (canonical_sum_merge c (Cons_monom a0 v0 c0))) . rewrite (ics_aux_ok (interp_m a v) c). rewrite (ics_aux_ok (interp_m a0 v0) c0). rewrite (H (Cons_monom a0 v0 c0)); simpl. rewrite (ics_aux_ok (interp_m a0 v0) c0); auto. intro. rewrite (ics_aux_ok (interp_m a0 v0) ((fix csm_aux (s2:canonical_sum) : canonical_sum := match s2 with | Nil_monom => Cons_monom a v c | Cons_monom c2 l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus a c2) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_monom a v (canonical_sum_merge c s2) else Cons_monom c2 l2 (csm_aux t2) | Cons_varlist l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus a Aone) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_monom a v (canonical_sum_merge c s2) else Cons_varlist l2 (csm_aux t2) end) c0)). rewrite H0. rewrite (ics_aux_ok (interp_m a v) c); rewrite (ics_aux_ok (interp_m a0 v0) c0); simpl; auto. generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl. rewrite (ics_aux_ok (interp_m (Aplus a Aone) v0) (canonical_sum_merge c c0)); rewrite (ics_aux_ok (interp_m a v0) c); rewrite (ics_aux_ok (interp_vl v0) c0). rewrite (H c0). rewrite (interp_m_ok (Aplus a Aone) v0). rewrite (interp_m_ok a v0). setoid_replace (Amult (Aplus a Aone) (interp_vl v0)) with (Aplus (Amult a (interp_vl v0)) (Amult Aone (interp_vl v0))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult a (interp_vl v0)) (Amult Aone (interp_vl v0))) (Aplus (interp_setcs c) (interp_setcs c0))) with (Aplus (Amult a (interp_vl v0)) (Aplus (Amult Aone (interp_vl v0)) (Aplus (interp_setcs c) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult a (interp_vl v0)) (interp_setcs c)) (Aplus (interp_vl v0) (interp_setcs c0))) with (Aplus (Amult a (interp_vl v0)) (Aplus (interp_setcs c) (Aplus (interp_vl v0) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Amult Aone (interp_vl v0)) with (interp_vl v0); [ idtac | trivial ]. auto. elim (varlist_lt v v0); simpl. intro. rewrite (ics_aux_ok (interp_m a v) (canonical_sum_merge c (Cons_varlist v0 c0))) ; rewrite (ics_aux_ok (interp_m a v) c); rewrite (ics_aux_ok (interp_vl v0) c0). rewrite (H (Cons_varlist v0 c0)); simpl. rewrite (ics_aux_ok (interp_vl v0) c0). auto. intro. rewrite (ics_aux_ok (interp_vl v0) ((fix csm_aux (s2:canonical_sum) : canonical_sum := match s2 with | Nil_monom => Cons_monom a v c | Cons_monom c2 l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus a c2) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_monom a v (canonical_sum_merge c s2) else Cons_monom c2 l2 (csm_aux t2) | Cons_varlist l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus a Aone) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_monom a v (canonical_sum_merge c s2) else Cons_varlist l2 (csm_aux t2) end) c0)); rewrite H0. rewrite (ics_aux_ok (interp_m a v) c); rewrite (ics_aux_ok (interp_vl v0) c0); simpl. auto. simple induction y; simpl; intros. trivial. generalize (varlist_eq_prop v v0). elim (varlist_eq v v0). intros; rewrite (H1 I). simpl. rewrite (ics_aux_ok (interp_m (Aplus Aone a) v0) (canonical_sum_merge c c0)); rewrite (ics_aux_ok (interp_vl v0) c); rewrite (ics_aux_ok (interp_m a v0) c0); rewrite (H c0). rewrite (interp_m_ok (Aplus Aone a) v0); rewrite (interp_m_ok a v0). setoid_replace (Amult (Aplus Aone a) (interp_vl v0)) with (Aplus (Amult Aone (interp_vl v0)) (Amult a (interp_vl v0))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult Aone (interp_vl v0)) (Amult a (interp_vl v0))) (Aplus (interp_setcs c) (interp_setcs c0))) with (Aplus (Amult Aone (interp_vl v0)) (Aplus (Amult a (interp_vl v0)) (Aplus (interp_setcs c) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (interp_vl v0) (interp_setcs c)) (Aplus (Amult a (interp_vl v0)) (interp_setcs c0))) with (Aplus (interp_vl v0) (Aplus (interp_setcs c) (Aplus (Amult a (interp_vl v0)) (interp_setcs c0)))); [ idtac | trivial ]. auto. elim (varlist_lt v v0); simpl; intros. rewrite (ics_aux_ok (interp_vl v) (canonical_sum_merge c (Cons_monom a v0 c0))) ; rewrite (ics_aux_ok (interp_vl v) c); rewrite (ics_aux_ok (interp_m a v0) c0). rewrite (H (Cons_monom a v0 c0)); simpl. rewrite (ics_aux_ok (interp_m a v0) c0); auto. rewrite (ics_aux_ok (interp_m a v0) ((fix csm_aux2 (s2:canonical_sum) : canonical_sum := match s2 with | Nil_monom => Cons_varlist v c | Cons_monom c2 l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus Aone c2) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_varlist v (canonical_sum_merge c s2) else Cons_monom c2 l2 (csm_aux2 t2) | Cons_varlist l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus Aone Aone) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_varlist v (canonical_sum_merge c s2) else Cons_varlist l2 (csm_aux2 t2) end) c0)); rewrite H0. rewrite (ics_aux_ok (interp_vl v) c); rewrite (ics_aux_ok (interp_m a v0) c0); simpl; auto. generalize (varlist_eq_prop v v0). elim (varlist_eq v v0); intros. rewrite (H1 I); simpl. rewrite (ics_aux_ok (interp_m (Aplus Aone Aone) v0) (canonical_sum_merge c c0)) ; rewrite (ics_aux_ok (interp_vl v0) c); rewrite (ics_aux_ok (interp_vl v0) c0); rewrite (H c0). rewrite (interp_m_ok (Aplus Aone Aone) v0). setoid_replace (Amult (Aplus Aone Aone) (interp_vl v0)) with (Aplus (Amult Aone (interp_vl v0)) (Amult Aone (interp_vl v0))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (Amult Aone (interp_vl v0)) (Amult Aone (interp_vl v0))) (Aplus (interp_setcs c) (interp_setcs c0))) with (Aplus (Amult Aone (interp_vl v0)) (Aplus (Amult Aone (interp_vl v0)) (Aplus (interp_setcs c) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Aplus (Aplus (interp_vl v0) (interp_setcs c)) (Aplus (interp_vl v0) (interp_setcs c0))) with (Aplus (interp_vl v0) (Aplus (interp_setcs c) (Aplus (interp_vl v0) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Amult Aone (interp_vl v0)) with (interp_vl v0); auto. elim (varlist_lt v v0); simpl. rewrite (ics_aux_ok (interp_vl v) (canonical_sum_merge c (Cons_varlist v0 c0))) ; rewrite (ics_aux_ok (interp_vl v) c); rewrite (ics_aux_ok (interp_vl v0) c0); rewrite (H (Cons_varlist v0 c0)); simpl. rewrite (ics_aux_ok (interp_vl v0) c0); auto. rewrite (ics_aux_ok (interp_vl v0) ((fix csm_aux2 (s2:canonical_sum) : canonical_sum := match s2 with | Nil_monom => Cons_varlist v c | Cons_monom c2 l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus Aone c2) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_varlist v (canonical_sum_merge c s2) else Cons_monom c2 l2 (csm_aux2 t2) | Cons_varlist l2 t2 => if varlist_eq v l2 then Cons_monom (Aplus Aone Aone) v (canonical_sum_merge c t2) else if varlist_lt v l2 then Cons_varlist v (canonical_sum_merge c s2) else Cons_varlist l2 (csm_aux2 t2) end) c0)); rewrite H0. rewrite (ics_aux_ok (interp_vl v) c); rewrite (ics_aux_ok (interp_vl v0) c0); simpl; auto. Qed. Lemma monom_insert_ok : forall (a:A) (l:varlist) (s:canonical_sum), Aequiv (interp_setcs (monom_insert a l s)) (Aplus (Amult a (interp_vl l)) (interp_setcs s)). Proof. simple induction s; intros. simpl; rewrite (interp_m_ok a l); trivial. simpl; generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl. rewrite (ics_aux_ok (interp_m (Aplus a a0) v) c); rewrite (ics_aux_ok (interp_m a0 v) c). rewrite (interp_m_ok (Aplus a a0) v); rewrite (interp_m_ok a0 v). setoid_replace (Amult (Aplus a a0) (interp_vl v)) with (Aplus (Amult a (interp_vl v)) (Amult a0 (interp_vl v))); [ idtac | trivial ]. auto. elim (varlist_lt l v); simpl; intros. rewrite (ics_aux_ok (interp_m a0 v) c). rewrite (interp_m_ok a0 v); rewrite (interp_m_ok a l). auto. rewrite (ics_aux_ok (interp_m a0 v) (monom_insert a l c)); rewrite (ics_aux_ok (interp_m a0 v) c); rewrite H. auto. simpl. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl. rewrite (ics_aux_ok (interp_m (Aplus a Aone) v) c); rewrite (ics_aux_ok (interp_vl v) c). rewrite (interp_m_ok (Aplus a Aone) v). setoid_replace (Amult (Aplus a Aone) (interp_vl v)) with (Aplus (Amult a (interp_vl v)) (Amult Aone (interp_vl v))); [ idtac | trivial ]. setoid_replace (Amult Aone (interp_vl v)) with (interp_vl v); [ idtac | trivial ]. auto. elim (varlist_lt l v); simpl; intros; auto. rewrite (ics_aux_ok (interp_vl v) (monom_insert a l c)); rewrite H. rewrite (ics_aux_ok (interp_vl v) c); auto. Qed. Lemma varlist_insert_ok : forall (l:varlist) (s:canonical_sum), Aequiv (interp_setcs (varlist_insert l s)) (Aplus (interp_vl l) (interp_setcs s)). Proof. simple induction s; simpl; intros. trivial. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl. rewrite (ics_aux_ok (interp_m (Aplus Aone a) v) c); rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok (Aplus Aone a) v); rewrite (interp_m_ok a v). setoid_replace (Amult (Aplus Aone a) (interp_vl v)) with (Aplus (Amult Aone (interp_vl v)) (Amult a (interp_vl v))); [ idtac | trivial ]. setoid_replace (Amult Aone (interp_vl v)) with (interp_vl v); auto. elim (varlist_lt l v); simpl; intros; auto. rewrite (ics_aux_ok (interp_m a v) (varlist_insert l c)); rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok a v). rewrite H; auto. generalize (varlist_eq_prop l v); elim (varlist_eq l v). intro Hr; rewrite (Hr I); simpl. rewrite (ics_aux_ok (interp_m (Aplus Aone Aone) v) c); rewrite (ics_aux_ok (interp_vl v) c). rewrite (interp_m_ok (Aplus Aone Aone) v). setoid_replace (Amult (Aplus Aone Aone) (interp_vl v)) with (Aplus (Amult Aone (interp_vl v)) (Amult Aone (interp_vl v))); [ idtac | trivial ]. setoid_replace (Amult Aone (interp_vl v)) with (interp_vl v); auto. elim (varlist_lt l v); simpl; intros; auto. rewrite (ics_aux_ok (interp_vl v) (varlist_insert l c)). rewrite H. rewrite (ics_aux_ok (interp_vl v) c); auto. Qed. Lemma canonical_sum_scalar_ok : forall (a:A) (s:canonical_sum), Aequiv (interp_setcs (canonical_sum_scalar a s)) (Amult a (interp_setcs s)). Proof. simple induction s; simpl; intros. trivial. rewrite (ics_aux_ok (interp_m (Amult a a0) v) (canonical_sum_scalar a c)); rewrite (ics_aux_ok (interp_m a0 v) c). rewrite (interp_m_ok (Amult a a0) v); rewrite (interp_m_ok a0 v). rewrite H. setoid_replace (Amult a (Aplus (Amult a0 (interp_vl v)) (interp_setcs c))) with (Aplus (Amult a (Amult a0 (interp_vl v))) (Amult a (interp_setcs c))); [ idtac | trivial ]. auto. rewrite (ics_aux_ok (interp_m a v) (canonical_sum_scalar a c)); rewrite (ics_aux_ok (interp_vl v) c); rewrite H. rewrite (interp_m_ok a v). auto. Qed. Lemma canonical_sum_scalar2_ok : forall (l:varlist) (s:canonical_sum), Aequiv (interp_setcs (canonical_sum_scalar2 l s)) (Amult (interp_vl l) (interp_setcs s)). Proof. simple induction s; simpl; intros; auto. rewrite (monom_insert_ok a (varlist_merge l v) (canonical_sum_scalar2 l c)). rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok a v). rewrite H. rewrite (varlist_merge_ok l v). setoid_replace (Amult (interp_vl l) (Aplus (Amult a (interp_vl v)) (interp_setcs c))) with (Aplus (Amult (interp_vl l) (Amult a (interp_vl v))) (Amult (interp_vl l) (interp_setcs c))); [ idtac | trivial ]. auto. rewrite (varlist_insert_ok (varlist_merge l v) (canonical_sum_scalar2 l c)). rewrite (ics_aux_ok (interp_vl v) c). rewrite H. rewrite (varlist_merge_ok l v). auto. Qed. Lemma canonical_sum_scalar3_ok : forall (c:A) (l:varlist) (s:canonical_sum), Aequiv (interp_setcs (canonical_sum_scalar3 c l s)) (Amult c (Amult (interp_vl l) (interp_setcs s))). Proof. simple induction s; simpl; intros. rewrite (SSR_mult_zero_right S T (interp_vl l)). auto. rewrite (monom_insert_ok (Amult c a) (varlist_merge l v) (canonical_sum_scalar3 c l c0)). rewrite (ics_aux_ok (interp_m a v) c0). rewrite (interp_m_ok a v). rewrite H. rewrite (varlist_merge_ok l v). setoid_replace (Amult (interp_vl l) (Aplus (Amult a (interp_vl v)) (interp_setcs c0))) with (Aplus (Amult (interp_vl l) (Amult a (interp_vl v))) (Amult (interp_vl l) (interp_setcs c0))); [ idtac | trivial ]. setoid_replace (Amult c (Aplus (Amult (interp_vl l) (Amult a (interp_vl v))) (Amult (interp_vl l) (interp_setcs c0)))) with (Aplus (Amult c (Amult (interp_vl l) (Amult a (interp_vl v)))) (Amult c (Amult (interp_vl l) (interp_setcs c0)))); [ idtac | trivial ]. setoid_replace (Amult (Amult c a) (Amult (interp_vl l) (interp_vl v))) with (Amult c (Amult a (Amult (interp_vl l) (interp_vl v)))); [ idtac | trivial ]. auto. rewrite (monom_insert_ok c (varlist_merge l v) (canonical_sum_scalar3 c l c0)) . rewrite (ics_aux_ok (interp_vl v) c0). rewrite H. rewrite (varlist_merge_ok l v). setoid_replace (Aplus (Amult c (Amult (interp_vl l) (interp_vl v))) (Amult c (Amult (interp_vl l) (interp_setcs c0)))) with (Amult c (Aplus (Amult (interp_vl l) (interp_vl v)) (Amult (interp_vl l) (interp_setcs c0)))); [ idtac | trivial ]. auto. Qed. Lemma canonical_sum_prod_ok : forall x y:canonical_sum, Aequiv (interp_setcs (canonical_sum_prod x y)) (Amult (interp_setcs x) (interp_setcs y)). Proof. simple induction x; simpl; intros. trivial. rewrite (canonical_sum_merge_ok (canonical_sum_scalar3 a v y) (canonical_sum_prod c y)). rewrite (canonical_sum_scalar3_ok a v y). rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok a v). rewrite (H y). setoid_replace (Amult a (Amult (interp_vl v) (interp_setcs y))) with (Amult (Amult a (interp_vl v)) (interp_setcs y)); [ idtac | trivial ]. setoid_replace (Amult (Aplus (Amult a (interp_vl v)) (interp_setcs c)) (interp_setcs y)) with (Aplus (Amult (Amult a (interp_vl v)) (interp_setcs y)) (Amult (interp_setcs c) (interp_setcs y))); [ idtac | trivial ]. trivial. rewrite (canonical_sum_merge_ok (canonical_sum_scalar2 v y) (canonical_sum_prod c y)) . rewrite (canonical_sum_scalar2_ok v y). rewrite (ics_aux_ok (interp_vl v) c). rewrite (H y). trivial. Qed. Theorem setspolynomial_normalize_ok : forall p:setspolynomial, Aequiv (interp_setcs (setspolynomial_normalize p)) (interp_setsp p). Proof. simple induction p; simpl; intros; trivial. rewrite (canonical_sum_merge_ok (setspolynomial_normalize s) (setspolynomial_normalize s0)). rewrite H; rewrite H0; trivial. rewrite (canonical_sum_prod_ok (setspolynomial_normalize s) (setspolynomial_normalize s0)). rewrite H; rewrite H0; trivial. Qed. Lemma canonical_sum_simplify_ok : forall s:canonical_sum, Aequiv (interp_setcs (canonical_sum_simplify s)) (interp_setcs s). Proof. simple induction s; simpl; intros. trivial. generalize (SSR_eq_prop T a Azero). elim (Aeq a Azero). simpl. intros. rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok a v). rewrite (H0 I). setoid_replace (Amult Azero (interp_vl v)) with Azero; [ idtac | trivial ]. rewrite H. trivial. intros; simpl. generalize (SSR_eq_prop T a Aone). elim (Aeq a Aone). intros. rewrite (ics_aux_ok (interp_m a v) c). rewrite (interp_m_ok a v). rewrite (H1 I). simpl. rewrite (ics_aux_ok (interp_vl v) (canonical_sum_simplify c)). rewrite H. auto. simpl. intros. rewrite (ics_aux_ok (interp_m a v) (canonical_sum_simplify c)). rewrite (ics_aux_ok (interp_m a v) c). rewrite H; trivial. rewrite (ics_aux_ok (interp_vl v) (canonical_sum_simplify c)). rewrite H. auto. Qed. Theorem setspolynomial_simplify_ok : forall p:setspolynomial, Aequiv (interp_setcs (setspolynomial_simplify p)) (interp_setsp p). Proof. intro. unfold setspolynomial_simplify. rewrite (canonical_sum_simplify_ok (setspolynomial_normalize p)). exact (setspolynomial_normalize_ok p). Qed. End semi_setoid_rings. Arguments Cons_varlist : default implicits. Arguments Cons_monom : default implicits. Arguments SetSPconst : default implicits. Arguments SetSPplus : default implicits. Arguments SetSPmult : default implicits. Section setoid_rings. Set Implicit Arguments. Variable vm : varmap A. Variable T : Setoid_Ring_Theory Aequiv Aplus Amult Aone Azero Aopp Aeq. Hint Resolve (STh_plus_comm T). Hint Resolve (STh_plus_assoc T). Hint Resolve (STh_plus_assoc2 S T). Hint Resolve (STh_mult_comm T). Hint Resolve (STh_mult_assoc T). Hint Resolve (STh_mult_assoc2 S T). Hint Resolve (STh_plus_zero_left T). Hint Resolve (STh_plus_zero_left2 S T). Hint Resolve (STh_mult_one_left T). Hint Resolve (STh_mult_one_left2 S T). Hint Resolve (STh_mult_zero_left S plus_morph mult_morph T). Hint Resolve (STh_mult_zero_left2 S plus_morph mult_morph T). Hint Resolve (STh_distr_left T). Hint Resolve (STh_distr_left2 S T). Hint Resolve (STh_plus_reg_left S plus_morph T). Hint Resolve (STh_plus_permute S plus_morph T). Hint Resolve (STh_mult_permute S mult_morph T). Hint Resolve (STh_distr_right S plus_morph T). Hint Resolve (STh_distr_right2 S plus_morph T). Hint Resolve (STh_mult_zero_right S plus_morph mult_morph T). Hint Resolve (STh_mult_zero_right2 S plus_morph mult_morph T). Hint Resolve (STh_plus_zero_right S T). Hint Resolve (STh_plus_zero_right2 S T). Hint Resolve (STh_mult_one_right S T). Hint Resolve (STh_mult_one_right2 S T). Hint Resolve (STh_plus_reg_right S plus_morph T). Hint Resolve eq_refl eq_sym eq_trans. Hint Immediate T. (*** Definitions *) Inductive setpolynomial : Type := | SetPvar : index -> setpolynomial | SetPconst : A -> setpolynomial | SetPplus : setpolynomial -> setpolynomial -> setpolynomial | SetPmult : setpolynomial -> setpolynomial -> setpolynomial | SetPopp : setpolynomial -> setpolynomial. Fixpoint setpolynomial_normalize (x:setpolynomial) : canonical_sum := match x with | SetPplus l r => canonical_sum_merge (setpolynomial_normalize l) (setpolynomial_normalize r) | SetPmult l r => canonical_sum_prod (setpolynomial_normalize l) (setpolynomial_normalize r) | SetPconst c => Cons_monom c Nil_var Nil_monom | SetPvar i => Cons_varlist (Cons_var i Nil_var) Nil_monom | SetPopp p => canonical_sum_scalar3 (Aopp Aone) Nil_var (setpolynomial_normalize p) end. Definition setpolynomial_simplify (x:setpolynomial) := canonical_sum_simplify (setpolynomial_normalize x). Fixpoint setspolynomial_of (x:setpolynomial) : setspolynomial := match x with | SetPplus l r => SetSPplus (setspolynomial_of l) (setspolynomial_of r) | SetPmult l r => SetSPmult (setspolynomial_of l) (setspolynomial_of r) | SetPconst c => SetSPconst c | SetPvar i => SetSPvar i | SetPopp p => SetSPmult (SetSPconst (Aopp Aone)) (setspolynomial_of p) end. (*** Interpretation *) Fixpoint interp_setp (p:setpolynomial) : A := match p with | SetPconst c => c | SetPvar i => varmap_find Azero i vm | SetPplus p1 p2 => Aplus (interp_setp p1) (interp_setp p2) | SetPmult p1 p2 => Amult (interp_setp p1) (interp_setp p2) | SetPopp p1 => Aopp (interp_setp p1) end. (*** Properties *) Unset Implicit Arguments. Lemma setspolynomial_of_ok : forall p:setpolynomial, Aequiv (interp_setp p) (interp_setsp vm (setspolynomial_of p)). simple induction p; trivial; simpl; intros. rewrite H; rewrite H0; trivial. rewrite H; rewrite H0; trivial. rewrite H. rewrite (STh_opp_mult_left2 S plus_morph mult_morph T Aone (interp_setsp vm (setspolynomial_of s))). rewrite (STh_mult_one_left T (interp_setsp vm (setspolynomial_of s))). trivial. Qed. Theorem setpolynomial_normalize_ok : forall p:setpolynomial, setpolynomial_normalize p = setspolynomial_normalize (setspolynomial_of p). simple induction p; trivial; simpl; intros. rewrite H; rewrite H0; reflexivity. rewrite H; rewrite H0; reflexivity. rewrite H; simpl. elim (canonical_sum_scalar3 (Aopp Aone) Nil_var (setspolynomial_normalize (setspolynomial_of s))); [ reflexivity | simpl; intros; rewrite H0; reflexivity | simpl; intros; rewrite H0; reflexivity ]. Qed. Theorem setpolynomial_simplify_ok : forall p:setpolynomial, Aequiv (interp_setcs vm (setpolynomial_simplify p)) (interp_setp p). intro. unfold setpolynomial_simplify. rewrite (setspolynomial_of_ok p). rewrite setpolynomial_normalize_ok. rewrite (canonical_sum_simplify_ok vm (Semi_Setoid_Ring_Theory_of A Aequiv S Aplus Amult Aone Azero Aopp Aeq plus_morph mult_morph T) (setspolynomial_normalize (setspolynomial_of p))) . rewrite (setspolynomial_normalize_ok vm (Semi_Setoid_Ring_Theory_of A Aequiv S Aplus Amult Aone Azero Aopp Aeq plus_morph mult_morph T) (setspolynomial_of p)) . trivial. Qed. End setoid_rings. End setoid. coq-8.4pl2/plugins/ring/ring_plugin.mllib0000640000175000001440000000003411161000644017573 0ustar notinusersRing G_ring Ring_plugin_mod coq-8.4pl2/plugins/ring/LegacyNArithRing.v0000640000175000001440000000254512010532755017576 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* true | _ => false end. Lemma Neq_prop : forall n m:N, Is_true (Neq n m) -> n = m. intros n m H; unfold Neq in H. apply N.compare_eq. destruct (n ?= m)%N; [ reflexivity | contradiction | contradiction ]. Qed. Definition NTheory : Semi_Ring_Theory N.add N.mul 1%N 0%N Neq. split. apply N.add_comm. apply N.add_assoc. apply N.mul_comm. apply N.mul_assoc. apply N.add_0_l. apply N.mul_1_l. apply N.mul_0_l. apply N.mul_add_distr_r. apply Neq_prop. Qed. Add Legacy Semi Ring N N.add N.mul 1%N 0%N Neq NTheory [ Npos 0%N xO xI 1%positive ]. coq-8.4pl2/plugins/nsatz/0000750000175000001440000000000012127276541014455 5ustar notinuserscoq-8.4pl2/plugins/nsatz/utile.mli0000640000175000001440000000111111401704410016256 0ustar notinusers (* Printing *) val pr : string -> unit val prn : string -> unit val prt0 : 'a -> unit val prt : string -> unit val info : string -> unit (* Listes *) val list_mem_eq : ('a -> 'b -> bool) -> 'a -> 'b list -> bool val set_of_list_eq : ('a -> 'a -> bool) -> 'a list -> 'a list (* Memoization *) val memos : string -> ('a, 'b) Hashtbl.t -> ('c -> 'a) -> ('c -> 'b) -> 'c -> 'b val facteurs_liste : ('a -> 'a -> 'a) -> ('a -> bool) -> 'a list -> 'a list val factorise_tableau : ('a -> 'b -> 'a) -> ('a -> bool) -> 'a -> 'a array -> 'b array -> 'b array * ('a * int list) array coq-8.4pl2/plugins/nsatz/vo.itarget0000640000175000001440000000001111424041451016440 0ustar notinusersNsatz.vo coq-8.4pl2/plugins/nsatz/Nsatz.v0000640000175000001440000003407112010532755015742 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x == y. intros x y H; setoid_replace x with ((x - y) + y); simpl; [setoid_rewrite H | idtac]; simpl. cring. cring. Qed. Lemma psos_r1: forall x y, x == y -> x - y == 0. intros x y H; simpl; setoid_rewrite H; simpl; cring. Qed. Lemma nsatzR_diff: forall x y:R, not (x == y) -> not (x - y == 0). intros. intro; apply H. simpl; setoid_replace x with ((x - y) + y). simpl. setoid_rewrite H0. simpl; cring. simpl. simpl; cring. Qed. (* adpatation du code de Benjamin aux setoides *) Require Import ZArith. Require Export Ring_polynom. Require Export InitialRing. Definition PolZ := Pol Z. Definition PEZ := PExpr Z. Definition P0Z : PolZ := P0 (C:=Z) 0%Z. Definition PolZadd : PolZ -> PolZ -> PolZ := @Padd Z 0%Z Z.add Zeq_bool. Definition PolZmul : PolZ -> PolZ -> PolZ := @Pmul Z 0%Z 1%Z Z.add Z.mul Zeq_bool. Definition PolZeq := @Peq Z Zeq_bool. Definition norm := @norm_aux Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool. Fixpoint mult_l (la : list PEZ) (lp: list PolZ) : PolZ := match la, lp with | a::la, p::lp => PolZadd (PolZmul (norm a) p) (mult_l la lp) | _, _ => P0Z end. Fixpoint compute_list (lla: list (list PEZ)) (lp:list PolZ) := match lla with | List.nil => lp | la::lla => compute_list lla ((mult_l la lp)::lp) end. Definition check (lpe:list PEZ) (qe:PEZ) (certif: list (list PEZ) * list PEZ) := let (lla, lq) := certif in let lp := List.map norm lpe in PolZeq (norm qe) (mult_l lq (compute_list lla lp)). (* Correction *) Definition PhiR : list R -> PolZ -> R := (Pphi ring0 add mul (InitialRing.gen_phiZ ring0 ring1 add mul opp)). Definition PEevalR : list R -> PEZ -> R := PEeval ring0 add mul sub opp (gen_phiZ ring0 ring1 add mul opp) N.to_nat pow. Lemma P0Z_correct : forall l, PhiR l P0Z = 0. Proof. trivial. Qed. Lemma Rext: ring_eq_ext add mul opp _==_. Proof. constructor; solve_proper. Qed. Lemma Rset : Setoid_Theory R _==_. apply ring_setoid. Qed. Definition Rtheory:ring_theory ring0 ring1 add mul sub opp _==_. apply mk_rt. apply ring_add_0_l. apply ring_add_comm. apply ring_add_assoc. apply ring_mul_1_l. apply cring_mul_comm. apply ring_mul_assoc. apply ring_distr_l. apply ring_sub_def. apply ring_opp_def. Defined. Lemma PolZadd_correct : forall P' P l, PhiR l (PolZadd P P') == ((PhiR l P) + (PhiR l P')). Proof. unfold PolZadd, PhiR. intros. simpl. refine (Padd_ok Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) _ _ _). Qed. Lemma PolZmul_correct : forall P P' l, PhiR l (PolZmul P P') == ((PhiR l P) * (PhiR l P')). Proof. unfold PolZmul, PhiR. intros. refine (Pmul_ok Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) _ _ _). Qed. Lemma R_power_theory : Ring_theory.power_theory ring1 mul _==_ N.to_nat pow. apply Ring_theory.mkpow_th. unfold pow. intros. rewrite Nnat.N2Nat.id. reflexivity. Qed. Lemma norm_correct : forall (l : list R) (pe : PEZ), PEevalR l pe == PhiR l (norm pe). Proof. intros;apply (norm_aux_spec Rset Rext (Rth_ARth Rset Rext Rtheory) (gen_phiZ_morph Rset Rext Rtheory) R_power_theory). Qed. Lemma PolZeq_correct : forall P P' l, PolZeq P P' = true -> PhiR l P == PhiR l P'. Proof. intros;apply (Peq_ok Rset Rext (gen_phiZ_morph Rset Rext Rtheory));trivial. Qed. Fixpoint Cond0 (A:Type) (Interp:A->R) (l:list A) : Prop := match l with | List.nil => True | a::l => Interp a == 0 /\ Cond0 A Interp l end. Lemma mult_l_correct : forall l la lp, Cond0 PolZ (PhiR l) lp -> PhiR l (mult_l la lp) == 0. Proof. induction la;simpl;intros. cring. destruct lp;trivial. simpl. cring. simpl in H;destruct H. rewrite PolZadd_correct. simpl. rewrite PolZmul_correct. simpl. rewrite H. rewrite IHla. cring. trivial. Qed. Lemma compute_list_correct : forall l lla lp, Cond0 PolZ (PhiR l) lp -> Cond0 PolZ (PhiR l) (compute_list lla lp). Proof. induction lla;simpl;intros;trivial. apply IHlla;simpl;split;trivial. apply mult_l_correct;trivial. Qed. Lemma check_correct : forall l lpe qe certif, check lpe qe certif = true -> Cond0 PEZ (PEevalR l) lpe -> PEevalR l qe == 0. Proof. unfold check;intros l lpe qe (lla, lq) H2 H1. apply PolZeq_correct with (l:=l) in H2. rewrite norm_correct, H2. apply mult_l_correct. apply compute_list_correct. clear H2 lq lla qe;induction lpe;simpl;trivial. simpl in H1;destruct H1. rewrite <- norm_correct;auto. Qed. (* fin *) Definition R2:= 1 + 1. Fixpoint IPR p {struct p}: R := match p with xH => ring1 | xO xH => 1+1 | xO p1 => R2*(IPR p1) | xI xH => 1+(1+1) | xI p1 => 1+(R2*(IPR p1)) end. Definition IZR1 z := match z with Z0 => 0 | Zpos p => IPR p | Zneg p => -(IPR p) end. Fixpoint interpret3 t fv {struct t}: R := match t with | (PEadd t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 + v2) | (PEmul t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 * v2) | (PEsub t1 t2) => let v1 := interpret3 t1 fv in let v2 := interpret3 t2 fv in (v1 - v2) | (PEopp t1) => let v1 := interpret3 t1 fv in (-v1) | (PEpow t1 t2) => let v1 := interpret3 t1 fv in pow v1 (N.to_nat t2) | (PEc t1) => (IZR1 t1) | (PEX n) => List.nth (pred (Pos.to_nat n)) fv 0 end. End nsatz1. Ltac equality_to_goal H x y:= let h := fresh "nH" in (* eliminate trivial hypotheses, but it takes time!: (assert (h:equality x y); [solve [cring] | clear H; clear h]) || *) (try generalize (@psos_r1 _ _ _ _ _ _ _ _ _ _ _ x y H); clear H) . Ltac equalities_to_goal := lazymatch goal with | H: (_ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ _ ?x ?y) |- _ => equality_to_goal H x y | H: (_ _ _ _ ?x ?y) |- _ => equality_to_goal H x y (* extension possible :-) *) | H: (?x == ?y) |- _ => equality_to_goal H x y end. (* lp est incluse dans fv. La met en tete. *) Ltac parametres_en_tete fv lp := match fv with | (@nil _) => lp | (@cons _ ?x ?fv1) => let res := AddFvTail x lp in parametres_en_tete fv1 res end. Ltac append1 a l := match l with | (@nil _) => constr:(cons a l) | (cons ?x ?l) => let l' := append1 a l in constr:(cons x l') end. Ltac rev l := match l with |(@nil _) => l | (cons ?x ?l) => let l' := rev l in append1 x l' end. Ltac nsatz_call_n info nparam p rr lp kont := (* idtac "Trying power: " rr;*) let ll := constr:(PEc info :: PEc nparam :: PEpow p rr :: lp) in (* idtac "calcul...";*) nsatz_compute ll; (* idtac "done";*) match goal with | |- (?c::PEpow _ ?r::?lq0)::?lci0 = _ -> _ => intros _; set (lci:=lci0); set (lq:=lq0); kont c rr lq lci end. Ltac nsatz_call radicalmax info nparam p lp kont := let rec try_n n := lazymatch n with | 0%N => fail | _ => (let r := eval compute in (N.sub radicalmax (N.pred n)) in nsatz_call_n info nparam p r lp kont) || let n' := eval compute in (N.pred n) in try_n n' end in try_n radicalmax. Ltac lterm_goal g := match g with ?b1 == ?b2 => constr:(b1::b2::nil) | ?b1 == ?b2 -> ?g => let l := lterm_goal g in constr:(b1::b2::l) end. Ltac reify_goal l le lb:= match le with nil => idtac | ?e::?le1 => match lb with ?b::?lb1 => (* idtac "b="; idtac b;*) let x := fresh "B" in set (x:= b) at 1; change x with (interpret3 e l); clear x; reify_goal l le1 lb1 end end. Ltac get_lpol g := match g with (interpret3 ?p _) == _ => constr:(p::nil) | (interpret3 ?p _) == _ -> ?g => let l := get_lpol g in constr:(p::l) end. Ltac nsatz_generic radicalmax info lparam lvar := let nparam := eval compute in (Z.of_nat (List.length lparam)) in match goal with |- ?g => let lb := lterm_goal g in match (match lvar with |(@nil _) => match lparam with |(@nil _) => let r := eval red in (list_reifyl (lterm:=lb)) in r |_ => match eval red in (list_reifyl (lterm:=lb)) with |(?fv, ?le) => let fv := parametres_en_tete fv lparam in (* we reify a second time, with the good order for variables *) let r := eval red in (list_reifyl (lterm:=lb) (lvar:=fv)) in r end end |_ => let fv := parametres_en_tete lvar lparam in let r := eval red in (list_reifyl (lterm:=lb) (lvar:=fv)) in r end) with |(?fv, ?le) => reify_goal fv le lb ; match goal with |- ?g => let lp := get_lpol g in let lpol := eval compute in (List.rev lp) in intros; let SplitPolyList kont := match lpol with | ?p2::?lp2 => kont p2 lp2 | _ => idtac "polynomial not in the ideal" end in SplitPolyList ltac:(fun p lp => set (p21:=p) ; set (lp21:=lp); (* idtac "nparam:"; idtac nparam; idtac "p:"; idtac p; idtac "lp:"; idtac lp; *) nsatz_call radicalmax info nparam p lp ltac:(fun c r lq lci => set (q := PEmul c (PEpow p21 r)); let Hg := fresh "Hg" in assert (Hg:check lp21 q (lci,lq) = true); [ (vm_compute;reflexivity) || idtac "invalid nsatz certificate" | let Hg2 := fresh "Hg" in assert (Hg2: (interpret3 q fv) == 0); [ (*simpl*) idtac; generalize (@check_correct _ _ _ _ _ _ _ _ _ _ _ fv lp21 q (lci,lq) Hg); let cc := fresh "H" in (*simpl*) idtac; intro cc; apply cc; clear cc; (*simpl*) idtac; repeat (split;[assumption|idtac]); exact I | (*simpl in Hg2;*) (*simpl*) idtac; apply Rintegral_domain_pow with (interpret3 c fv) (N.to_nat r); (*simpl*) idtac; try apply integral_domain_one_zero; try apply integral_domain_minus_one_zero; try trivial; try exact integral_domain_one_zero; try exact integral_domain_minus_one_zero || (solve [simpl; unfold R2, equality, eq_notation, addition, add_notation, one, one_notation, multiplication, mul_notation, zero, zero_notation; discrR || omega]) || ((*simpl*) idtac) || idtac "could not prove discrimination result" ] ] ) ) end end end . Ltac nsatz_default:= intros; try apply (@psos_r1b _ _ _ _ _ _ _ _ _ _ _); match goal with |- (@equality ?r _ _ _) => repeat equalities_to_goal; nsatz_generic 6%N 1%Z (@nil r) (@nil r) end. Tactic Notation "nsatz" := nsatz_default. Tactic Notation "nsatz" "with" "radicalmax" ":=" constr(radicalmax) "strategy" ":=" constr(info) "parameters" ":=" constr(lparam) "variables" ":=" constr(lvar):= intros; try apply (@psos_r1b _ _ _ _ _ _ _ _ _ _ _); match goal with |- (@equality ?r _ _ _) => repeat equalities_to_goal; nsatz_generic radicalmax info lparam lvar end. (* Real numbers *) Require Import Reals. Require Import RealField. Lemma Rsth : Setoid_Theory R (@eq R). constructor;red;intros;subst;trivial. Qed. Instance Rops: (@Ring_ops R 0%R 1%R Rplus Rmult Rminus Ropp (@eq R)). Instance Rri : (Ring (Ro:=Rops)). constructor; try (try apply Rsth; try (unfold respectful, Proper; unfold equality; unfold eq_notation in *; intros; try rewrite H; try rewrite H0; reflexivity)). exact Rplus_0_l. exact Rplus_comm. symmetry. apply Rplus_assoc. exact Rmult_1_l. exact Rmult_1_r. symmetry. apply Rmult_assoc. exact Rmult_plus_distr_r. intros; apply Rmult_plus_distr_l. exact Rplus_opp_r. Defined. Lemma R_one_zero: 1%R <> 0%R. discrR. Qed. Instance Rcri: (Cring (Rr:=Rri)). red. exact Rmult_comm. Defined. Instance Rdi : (Integral_domain (Rcr:=Rcri)). constructor. exact Rmult_integral. exact R_one_zero. Defined. (* Rational numbers *) Require Import QArith. Instance Qops: (@Ring_ops Q 0%Q 1%Q Qplus Qmult Qminus Qopp Qeq). Instance Qri : (Ring (Ro:=Qops)). constructor. try apply Q_Setoid. apply Qplus_comp. apply Qmult_comp. apply Qminus_comp. apply Qopp_comp. exact Qplus_0_l. exact Qplus_comm. apply Qplus_assoc. exact Qmult_1_l. exact Qmult_1_r. apply Qmult_assoc. apply Qmult_plus_distr_l. intros. apply Qmult_plus_distr_r. reflexivity. exact Qplus_opp_r. Defined. Lemma Q_one_zero: not (Qeq 1%Q 0%Q). unfold Qeq. simpl. auto with *. Qed. Instance Qcri: (Cring (Rr:=Qri)). red. exact Qmult_comm. Defined. Instance Qdi : (Integral_domain (Rcr:=Qcri)). constructor. exact Qmult_integral. exact Q_one_zero. Defined. (* Integers *) Lemma Z_one_zero: 1%Z <> 0%Z. omega. Qed. Instance Zcri: (Cring (Rr:=Zr)). red. exact Z.mul_comm. Defined. Instance Zdi : (Integral_domain (Rcr:=Zcri)). constructor. exact Zmult_integral. exact Z_one_zero. Defined. coq-8.4pl2/plugins/nsatz/polynom.ml0000640000175000001440000004145712121620060016477 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> bool val lt : t -> t -> bool val le : t -> t -> bool val abs : t -> t val plus : t -> t -> t val mult : t -> t -> t val sub : t -> t -> t val opp : t -> t val div : t -> t -> t val modulo : t -> t -> t val puis : t -> int -> t val pgcd : t -> t -> t val hash : t -> int val of_num : Num.num -> t val to_string : t -> string end module type S = sig type coef type variable = int type t = Pint of coef | Prec of variable * t array val of_num : Num.num -> t val x : variable -> t val monome : variable -> int -> t val is_constantP : t -> bool val is_zero : t -> bool val max_var_pol : t -> variable val max_var_pol2 : t -> variable val max_var : t array -> variable val equal : t -> t -> bool val norm : t -> t val deg : variable -> t -> int val deg_total : t -> int val copyP : t -> t val coef : variable -> int -> t -> t val plusP : t -> t -> t val content : t -> coef val div_int : t -> coef -> t val vire_contenu : t -> t val vars : t -> variable list val int_of_Pint : t -> coef val multx : int -> variable -> t -> t val multP : t -> t -> t val deriv : variable -> t -> t val oppP : t -> t val moinsP : t -> t -> t val puisP : t -> int -> t val ( @@ ) : t -> t -> t val ( -- ) : t -> t -> t val ( ^^ ) : t -> int -> t val coefDom : variable -> t -> t val coefConst : variable -> t -> t val remP : variable -> t -> t val coef_int_tete : t -> coef val normc : t -> t val coef_constant : t -> coef val univ : bool ref val string_of_var : int -> string val nsP : int ref val to_string : t -> string val printP : t -> unit val print_tpoly : t array -> unit val print_lpoly : t list -> unit val quo_rem_pol : t -> t -> variable -> t * t val div_pol : t -> t -> variable -> t val divP : t -> t -> t val div_pol_rat : t -> t -> bool val pseudo_div : t -> t -> variable -> t * t * int * t val pgcdP : t -> t -> t val pgcd_pol : t -> t -> variable -> t val content_pol : t -> variable -> t val pgcd_coef_pol : t -> t -> variable -> t val pgcd_pol_rec : t -> t -> variable -> t val gcd_sub_res : t -> t -> variable -> t val gcd_sub_res_rec : t -> t -> t -> t -> int -> variable -> t val lazard_power : t -> t -> int -> variable -> t val hash : t -> int module Hashpol : Hashtbl.S with type key=t end (*********************************************************************** 2. Type of polynomials, operations. *) module Make (C:Coef) = struct type coef = C.t let coef_of_int i = C.of_num (Num.Int i) let coef0 = coef_of_int 0 let coef1 = coef_of_int 1 type variable = int type t = Pint of coef (* constant polynomial *) | Prec of variable * (t array) (* coefficients, increasing degree *) (* by default, operations work with normalized polynomials: - variables are positive integers - coefficients of a polynomial in x only use variables < x - no zero coefficient at beginning - no Prec(x,a) where a is constant in x *) (* constant polynomials *) let of_num x = Pint (C.of_num x) let cf0 = of_num (Num.Int 0) let cf1 = of_num (Num.Int 1) (* nth variable *) let x n = Prec (n,[|cf0;cf1|]) (* create v^n *) let monome v n = match n with 0->Pint coef1; |_->let tmp = Array.create (n+1) (Pint coef0) in tmp.(n)<-(Pint coef1); Prec (v, tmp) let is_constantP = function Pint _ -> true | Prec _ -> false let int_of_Pint = function Pint x -> x | _ -> failwith "non" let is_zero p = match p with Pint n -> if C.equal n coef0 then true else false |_-> false let max_var_pol p = match p with Pint _ -> 0 |Prec(x,_) -> x (* p not normalized *) let rec max_var_pol2 p = match p with Pint _ -> 0 |Prec(v,c)-> Array.fold_right (fun q m -> max (max_var_pol2 q) m) c v let rec max_var l = Array.fold_right (fun p m -> max (max_var_pol2 p) m) l 0 (* equality between polynomials *) let rec equal p q = match (p,q) with (Pint a,Pint b) -> C.equal a b |(Prec(x,p1),Prec(y,q1)) -> if x<>y then false else if (Array.length p1)<>(Array.length q1) then false else (try (Array.iteri (fun i a -> if not (equal a q1.(i)) then failwith "rat") p1; true) with e when Errors.noncritical e -> false) | (_,_) -> false (* normalize polynomial: remove head zeros, coefficients are normalized if constant, returns the coefficient *) let rec norm p = match p with Pint _ -> p |Prec (x,a)-> let d = (Array.length a -1) in let n = ref d in while !n>0 && (equal a.(!n) (Pint coef0)) do n:=!n-1; done; if !n<0 then Pint coef0 else if !n=0 then a.(0) else if !n=d then p else (let b=Array.create (!n+1) (Pint coef0) in for i=0 to !n do b.(i)<-a.(i);done; Prec(x,b)) (* degree in v, v >= max var of p *) let rec deg v p = match p with Prec(x,p1) when x=v -> Array.length p1 -1 |_ -> 0 (* total degree *) let rec deg_total p = match p with Prec (x,p1) -> let d = ref 0 in Array.iteri (fun i q -> d:= (max !d (i+(deg_total q)))) p1; !d |_ -> 0 let rec copyP p = match p with Pint i -> Pint i |Prec(x,q) -> Prec(x,Array.map copyP q) (* coefficient of degree i in v, v >= max var of p *) let coef v i p = match p with Prec (x,p1) when x=v -> if i<(Array.length p1) then p1.(i) else Pint coef0 |_ -> if i=0 then p else Pint coef0 (* addition *) let rec plusP p q = let res = (match (p,q) with (Pint a,Pint b) -> Pint (C.plus a b) |(Pint a, Prec (y,q1)) -> let q2=Array.map copyP q1 in q2.(0)<- plusP p q1.(0); Prec (y,q2) |(Prec (x,p1),Pint b) -> let p2=Array.map copyP p1 in p2.(0)<- plusP p1.(0) q; Prec (x,p2) |(Prec (x,p1),Prec (y,q1)) -> if xy then (let p2=Array.map copyP p1 in p2.(0)<- plusP p1.(0) q; Prec (x,p2)) else (let n=max (deg x p) (deg x q) in let r=Array.create (n+1) (Pint coef0) in for i=0 to n do r.(i)<- plusP (coef x i p) (coef x i q); done; Prec(x,r))) in norm res (* content, positive integer *) let rec content p = match p with Pint a -> C.abs a | Prec (x ,p1) -> Array.fold_left C.pgcd coef0 (Array.map content p1) let rec div_int p a= match p with Pint b -> Pint (C.div b a) | Prec(x,p1) -> Prec(x,Array.map (fun x -> div_int x a) p1) let vire_contenu p = let c = content p in if C.equal c coef0 then p else div_int p c (* sorted list of variables of a polynomial *) let rec vars=function Pint _->[] | Prec (x,l)->(List.flatten ([x]::(List.map vars (Array.to_list l)))) (* multiply p by v^n, v >= max_var p *) let rec multx n v p = match p with Prec (x,p1) when x=v -> let p2= Array.create ((Array.length p1)+n) (Pint coef0) in for i=0 to (Array.length p1)-1 do p2.(i+n)<-p1.(i); done; Prec (x,p2) |_ -> if equal p (Pint coef0) then (Pint coef0) else (let p2=Array.create (n+1) (Pint coef0) in p2.(n)<-p; Prec (v,p2)) (* product *) let rec multP p q = match (p,q) with (Pint a,Pint b) -> Pint (C.mult a b) |(Pint a, Prec (y,q1)) -> if C.equal a coef0 then Pint coef0 else let q2 = Array.map (fun z-> multP p z) q1 in Prec (y,q2) |(Prec (x,p1), Pint b) -> if C.equal b coef0 then Pint coef0 else let p2 = Array.map (fun z-> multP z q) p1 in Prec (x,p2) |(Prec (x,p1), Prec(y,q1)) -> if x multP p z) q1 in Prec (y,q2)) else if x>y then (let p2 = Array.map (fun z-> multP z q) p1 in Prec (x,p2)) else Array.fold_left plusP (Pint coef0) (Array.mapi (fun i z-> (multx i x (multP z q))) p1) (* derive p with variable v, v >= max_var p *) let rec deriv v p = match p with Pint a -> Pint coef0 | Prec(x,p1) when x=v -> let d = Array.length p1 -1 in if d=1 then p1.(1) else (let p2 = Array.create d (Pint coef0) in for i=0 to d-1 do p2.(i)<- multP (Pint (coef_of_int (i+1))) p1.(i+1); done; Prec (x,p2)) | Prec(x,p1)-> Pint coef0 (* opposite *) let rec oppP p = match p with Pint a -> Pint (C.opp a) |Prec(x,p1) -> Prec(x,Array.map oppP p1) let moinsP p q=plusP p (oppP q) let rec puisP p n = match n with 0 -> cf1 |_ -> (multP p (puisP p (n-1))) (* infix notations *) (*let (++) a b = plusP a b *) let (@@) a b = multP a b let (--) a b = moinsP a b let (^^) a b = puisP a b (* leading coefficient in v, v>= max_var p *) let coefDom v p= coef v (deg v p) p let coefConst v p = coef v 0 p (* tail of a polynomial *) let remP v p = moinsP p (multP (coefDom v p) (puisP (x v) (deg v p))) (* first interger coefficient of p *) let rec coef_int_tete p = let v = max_var_pol p in if v>0 then coef_int_tete (coefDom v p) else (match p with | Pint a -> a |_ -> assert false) (* divide by the content and make the head int coef positive *) let normc p = let p = vire_contenu p in let a = coef_int_tete p in if C.le coef0 a then p else oppP p (* constant coef of normalized polynomial *) let rec coef_constant p = match p with Pint a->a |Prec(_,q)->coef_constant q.(0) (*********************************************************************** 3. Printing polynomials. *) (* if univ = false, we use x,y,z,a,b,c,d... as variables, else x1,x2,... *) let univ=ref true let string_of_var x= if !univ then "u"^(string_of_int x) else if x<=3 then String.make 1 (Char.chr(x+(Char.code 'w'))) else String.make 1 (Char.chr(x-4+(Char.code 'a'))) let nsP = ref 0 let rec string_of_Pcut p = if (!nsP)<=0 then "..." else match p with |Pint a-> nsP:=(!nsP)-1; if C.le coef0 a then C.to_string a else "("^(C.to_string a)^")" |Prec (x,t)-> let v=string_of_var x and s=ref "" and sp=ref "" in let st0 = string_of_Pcut t.(0) in if st0<>"0" then s:=st0; let fin = ref false in for i=(Array.length t)-1 downto 1 do if (!nsP)<0 then (sp:="..."; if not (!fin) then s:=(!s)^"+"^(!sp); fin:=true) else ( let si=string_of_Pcut t.(i) in sp:=""; if i=1 then ( if si<>"0" then (nsP:=(!nsP)-1; if si="1" then sp:=v else (if (String.contains si '+') then sp:="("^si^")*"^v else sp:=si^"*"^v))) else ( if si<>"0" then (nsP:=(!nsP)-1; if si="1" then sp:=v^"^"^(string_of_int i) else (if (String.contains si '+') then sp:="("^si^")*"^v^"^"^(string_of_int i) else sp:=si^"*"^v^"^"^(string_of_int i)))); if !sp<>"" && not (!fin) then (nsP:=(!nsP)-1; if !s="" then s:=!sp else s:=(!s)^"+"^(!sp))); done; if !s="" then (nsP:=(!nsP)-1; (s:="0")); !s let to_string p = nsP:=20; string_of_Pcut p let printP p = Format.printf "@[%s@]" (to_string p) let print_tpoly lp = let s = ref "\n{ " in Array.iter (fun p -> s:=(!s)^(to_string p)^"\n") lp; prt0 ((!s)^"}") let print_lpoly lp = print_tpoly (Array.of_list lp) (*********************************************************************** 4. Exact division of polynomials. *) (* return (s,r) s.t. p = s*q+r *) let rec quo_rem_pol p q x = if x=0 then (match (p,q) with |(Pint a, Pint b) -> if C.equal (C.modulo a b) coef0 then (Pint (C.div a b), cf0) else failwith "div_pol1" |_ -> assert false) else let m = deg x q in let b = coefDom x q in let q1 = remP x q in (* q = b*x^m+q1 *) let r = ref p in let s = ref cf0 in let continue =ref true in while (!continue) && (not (equal !r cf0)) do let n = deg x !r in if n false (*********************************************************************** 5. Pseudo-division and gcd with subresultants. *) (* pseudo division : q = c*x^m+q1 retruns (r,c,d,s) s.t. c^d*p = s*q + r. *) let pseudo_div p q x = match q with Pint _ -> (cf0, q,1, p) | Prec (v,q1) when x<>v -> (cf0, q,1, p) | Prec (v,q1) -> ( (* pr "pseudo_division: c^d*p = s*q + r";*) let delta = ref 0 in let r = ref p in let c = coefDom x q in let q1 = remP x q in let d' = deg x q in let s = ref cf0 in while (deg x !r)>=(deg x q) do let d = deg x !r in let a = coefDom x !r in let r1=remP x !r in let u = a @@ ((monome x (d-d'))) in r:=(c @@ r1) -- (u @@ q1); s:=plusP (c @@ (!s)) u; delta := (!delta) + 1; done; (* pr ("deg d: "^(string_of_int (!delta))^", deg c: "^(string_of_int (deg_total c))); pr ("deg r:"^(string_of_int (deg_total !r))); *) (!r,c,!delta, !s) ) (* gcd with subresultants *) let rec pgcdP p q = let x = max (max_var_pol p) (max_var_pol q) in pgcd_pol p q x and pgcd_pol p q x = pgcd_pol_rec p q x and content_pol p x = match p with Prec(v,p1) when v=x -> Array.fold_left (fun a b -> pgcd_pol_rec a b (x-1)) cf0 p1 | _ -> p and pgcd_coef_pol c p x = match p with Prec(v,p1) when x=v -> Array.fold_left (fun a b -> pgcd_pol_rec a b (x-1)) c p1 |_ -> pgcd_pol_rec c p (x-1) and pgcd_pol_rec p q x = match (p,q) with (Pint a,Pint b) -> Pint (C.pgcd (C.abs a) (C.abs b)) |_ -> if equal p cf0 then q else if equal q cf0 then p else if (deg x q) = 0 then pgcd_coef_pol q p x else if (deg x p) = 0 then pgcd_coef_pol p q x else ( let a = content_pol p x in let b = content_pol q x in let c = pgcd_pol_rec a b (x-1) in pr (string_of_int x); let p1 = div_pol p c x in let q1 = div_pol q c x in let r = gcd_sub_res p1 q1 x in let cr = content_pol r x in let res = c @@ (div_pol r cr x) in res ) (* Sub-rsultants: ai*Ai = Qi*Ai+1 + bi*Ai+2 deg Ai+2 < deg Ai+1 Ai = ci*X^ni + ... di = ni - ni+1 ai = (- ci+1)^(di + 1) b1 = 1 bi = ci*si^di si i>1 s1 = 1 si+1 = ((ci+1)^di*si)/si^di *) and gcd_sub_res p q x = if equal q cf0 then p else let d = deg x p in let d' = deg x q in if d (C.hash a) | Prec (v,p) -> Array.fold_right (fun q h -> h + hash q) p 0 module Hashpol = Hashtbl.Make( struct type poly = t type t = poly let equal = equal let hash = hash end) end coq-8.4pl2/plugins/nsatz/ideal.ml0000640000175000001440000006160012121620060016050 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mon -> mon val deg : mon -> int val compare_mon : mon -> mon -> int val div_mon : mon -> mon -> mon val div_mon_test : mon -> mon -> bool val ppcm_mon : mon -> mon -> mon (* Polynomials *) type deg = int type coef type poly type polynom val repr : poly -> (coef * mon) list val polconst : coef -> poly val zeroP : poly val gen : int -> poly val equal : poly -> poly -> bool val name_var : string list ref val getvar : string list -> int -> string val lstringP : poly list -> string val printP : poly -> unit val lprintP : poly list -> unit val div_pol_coef : poly -> coef -> poly val plusP : poly -> poly -> poly val mult_t_pol : coef -> mon -> poly -> poly val selectdiv : mon -> poly list -> poly val oppP : poly -> poly val emultP : coef -> poly -> poly val multP : poly -> poly -> poly val puisP : poly -> int -> poly val contentP : poly -> coef val contentPlist : poly list -> coef val pgcdpos : coef -> coef -> coef val div_pol : poly -> poly -> coef -> coef -> mon -> poly val reduce2 : poly -> poly list -> coef * poly val poldepcontent : coef list ref val coefpoldep_find : poly -> poly -> poly val coefpoldep_set : poly -> poly -> poly -> unit val initcoefpoldep : poly list -> unit val reduce2_trace : poly -> poly list -> poly list -> poly list * poly val spol : poly -> poly -> poly val etrangers : poly -> poly -> bool val div_ppcm : poly -> poly -> poly -> bool val genPcPf : poly -> poly list -> poly list -> poly list val genOCPf : poly list -> poly list val is_homogeneous : poly -> bool type certificate = { coef : coef; power : int; gb_comb : poly list list; last_comb : poly list } val test_dans_ideal : poly -> poly list -> poly list -> poly list * poly * certificate val in_ideal : deg -> poly list -> poly -> poly list * poly * certificate end (*********************************************************************** Global options *) let lexico = ref false let use_hmon = ref false (* division of tail monomials *) let reduire_les_queues = false (* divide first with new polynomials *) let nouveaux_pol_en_tete = false (*********************************************************************** Functor *) module Make (P:Polynom.S) = struct type coef = P.t let coef0 = P.of_num (Num.Int 0) let coef1 = P.of_num (Num.Int 1) let coefm1 = P.of_num (Num.Int (-1)) let string_of_coef c = "["^(P.to_string c)^"]" (*********************************************************************** Monomials array of integers, first is the degree *) type mon = int array type deg = int type poly = (coef * mon) list type polynom = {pol : poly ref; num : int; sugar : int} let nvar m = Array.length m - 1 let deg m = m.(0) let mult_mon m m' = let d = nvar m in let m'' = Array.create (d+1) 0 in for i=0 to d do m''.(i)<- (m.(i)+m'.(i)); done; m'' let compare_mon m m' = let d = nvar m in if !lexico then ( (* Comparaison de monomes avec ordre du degre lexicographique = on commence par regarder la 1ere variable*) let res=ref 0 in let i=ref 1 in (* 1 si lexico pur 0 si degre*) while (!res=0) && (!i<=d) do res:= (compare m.(!i) m'.(!i)); i:=!i+1; done; !res) else ( (* degre lexicographique inverse *) match compare m.(0) m'.(0) with | 0 -> (* meme degre total *) let res=ref 0 in let i=ref d in while (!res=0) && (!i>=1) do res:= - (compare m.(!i) m'.(!i)); i:=!i-1; done; !res | x -> x) let div_mon m m' = let d = nvar m in let m'' = Array.create (d+1) 0 in for i=0 to d do m''.(i)<- (m.(i)-m'.(i)); done; m'' let div_pol_coef p c = List.map (fun (a,m) -> (P.divP a c,m)) p (* m' divides m *) let div_mon_test m m' = let d = nvar m in let res=ref true in let i=ref 0 in (*il faut que le degre total soit bien mis sinon, i=ref 1*) while (!res) && (!i<=d) do res:= (m.(!i) >= m'.(!i)); i:=succ !i; done; !res let set_deg m = let d = nvar m in m.(0)<-0; for i=1 to d do m.(0)<- m.(i)+m.(0); done; m (* lcm *) let ppcm_mon m m' = let d = nvar m in let m'' = Array.create (d+1) 0 in for i=1 to d do m''.(i)<- (max m.(i) m'.(i)); done; set_deg m'' (********************************************************************** Polynomials list of (coefficient, monomial) decreasing order *) let repr p = p let equal = Util.list_for_all2eq (fun (c1,m1) (c2,m2) -> P.equal c1 c2 && m1=m2) let hash p = let c = map fst p in let m = map snd p in fold_left (fun h p -> h * 17 + P.hash p) (Hashtbl.hash m) c module Hashpol = Hashtbl.Make( struct type t = poly let equal = equal let hash = hash end) (* A pretty printer for polynomials, with Maple-like syntax. *) open Format let getvar lv i = try (nth lv i) with e when Errors.noncritical e -> (fold_left (fun r x -> r^" "^x) "lv= " lv) ^" i="^(string_of_int i) let string_of_pol zeroP hdP tlP coefterm monterm string_of_coef dimmon string_of_exp lvar p = let rec string_of_mon m coefone = let s=ref [] in for i=1 to (dimmon m) do (match (string_of_exp m i) with "0" -> () | "1" -> s:= (!s) @ [(getvar !lvar (i-1))] | e -> s:= (!s) @ [((getvar !lvar (i-1)) ^ "^" ^ e)]); done; (match !s with [] -> if coefone then "1" else "" | l -> if coefone then (String.concat "*" l) else ( "*" ^ (String.concat "*" l))) and string_of_term t start = let a = coefterm t and m = monterm t in match (string_of_coef a) with "0" -> "" | "1" ->(match start with true -> string_of_mon m true |false -> ( "+ "^ (string_of_mon m true))) | "-1" ->( "-" ^" "^(string_of_mon m true)) | c -> if (String.get c 0)='-' then ( "- "^ (String.sub c 1 ((String.length c)-1))^ (string_of_mon m false)) else (match start with true -> ( c^(string_of_mon m false)) |false -> ( "+ "^ c^(string_of_mon m false))) and stringP p start = if (zeroP p) then (if start then ("0") else "") else ((string_of_term (hdP p) start)^ " "^ (stringP (tlP p) false)) in (stringP p true) let print_pol zeroP hdP tlP coefterm monterm string_of_coef dimmon string_of_exp lvar p = let rec print_mon m coefone = let s=ref [] in for i=1 to (dimmon m) do (match (string_of_exp m i) with "0" -> () | "1" -> s:= (!s) @ [(getvar !lvar (i-1))] | e -> s:= (!s) @ [((getvar !lvar (i-1)) ^ "^" ^ e)]); done; (match !s with [] -> if coefone then print_string "1" else () | l -> if coefone then print_string (String.concat "*" l) else (print_string "*"; print_string (String.concat "*" l))) and print_term t start = let a = coefterm t and m = monterm t in match (string_of_coef a) with "0" -> () | "1" ->(match start with true -> print_mon m true |false -> (print_string "+ "; print_mon m true)) | "-1" ->(print_string "-";print_space();print_mon m true) | c -> if (String.get c 0)='-' then (print_string "- "; print_string (String.sub c 1 ((String.length c)-1)); print_mon m false) else (match start with true -> (print_string c;print_mon m false) |false -> (print_string "+ "; print_string c;print_mon m false)) and printP p start = if (zeroP p) then (if start then print_string("0") else ()) else (print_term (hdP p) start; if start then open_hovbox 0; print_space(); print_cut(); printP (tlP p) false) in open_hovbox 3; printP p true; print_flush() let name_var= ref [] let stringP p = string_of_pol (fun p -> match p with [] -> true | _ -> false) (fun p -> match p with (t::p) -> t |_ -> failwith "print_pol dans dansideal") (fun p -> match p with (t::p) -> p |_ -> failwith "print_pol dans dansideal") (fun (a,m) -> a) (fun (a,m) -> m) string_of_coef (fun m -> (Array.length m)-1) (fun m i -> (string_of_int (m.(i)))) name_var p let nsP2 = ref max_int let stringPcut p = (*Polynomesrec.nsP1:=20;*) nsP2:=10; let res = if (length p)> !nsP2 then (stringP [hd p])^" + "^(string_of_int (length p))^" terms" else stringP p in (*Polynomesrec.nsP1:= max_int;*) nsP2:= max_int; res let rec lstringP l = match l with [] -> "" |p::l -> (stringP p)^("\n")^(lstringP l) let printP = print_pol (fun p -> match p with [] -> true | _ -> false) (fun p -> match p with (t::p) -> t |_ -> failwith "print_pol dans dansideal") (fun p -> match p with (t::p) -> p |_ -> failwith "print_pol dans dansideal") (fun (a,m) -> a) (fun (a,m) -> m) string_of_coef (fun m -> (Array.length m)-1) (fun m i -> (string_of_int (m.(i)))) name_var let rec lprintP l = match l with [] -> () |p::l -> printP p;print_string "\n"; lprintP l (* Operations *) let zeroP = [] (* returns a constant polynom ial with d variables *) let polconst d c = let m = Array.create (d+1) 0 in let m = set_deg m in [(c,m)] let plusP p q = let rec plusP p q = match p with [] -> q |t::p' -> match q with [] -> p |t'::q' -> match compare_mon (snd t) (snd t') with 1 -> t::(plusP p' q) |(-1) -> t'::(plusP p q') |_ -> let c=P.plusP (fst t) (fst t') in match P.equal c coef0 with true -> (plusP p' q') |false -> (c,(snd t))::(plusP p' q') in plusP p q (* multiplication by (a,monomial) *) let mult_t_pol a m p = let rec mult_t_pol p = match p with [] -> [] |(b,m')::p -> ((P.multP a b),(mult_mon m m'))::(mult_t_pol p) in mult_t_pol p let coef_of_int x = P.of_num (Num.Int x) (* variable i *) let gen d i = let m = Array.create (d+1) 0 in m.(i) <- 1; let m = set_deg m in [((coef_of_int 1),m)] let oppP p = let rec oppP p = match p with [] -> [] |(b,m')::p -> ((P.oppP b),m')::(oppP p) in oppP p (* multiplication by a coefficient *) let emultP a p = let rec emultP p = match p with [] -> [] |(b,m')::p -> ((P.multP a b),m')::(emultP p) in emultP p let multP p q = let rec aux p = match p with [] -> [] |(a,m)::p' -> plusP (mult_t_pol a m q) (aux p') in aux p let puisP p n= match p with [] -> [] |_ -> let d = nvar (snd (hd p)) in let rec puisP n = match n with 0 -> [coef1, Array.create (d+1) 0] | 1 -> p |_ -> multP p (puisP (n-1)) in puisP n let rec contentP p = match p with |[] -> coef1 |[a,m] -> a |(a,m)::p1 -> if P.equal a coef1 || P.equal a coefm1 then a else P.pgcdP a (contentP p1) let contentPlist lp = match lp with |[] -> coef1 |p::l1 -> fold_left (fun r q -> if P.equal r coef1 || P.equal r coefm1 then r else P.pgcdP r (contentP q)) (contentP p) l1 (*********************************************************************** Division of polynomials *) let pgcdpos a b = P.pgcdP a b let polynom0 = {pol = ref []; num = 0; sugar = 0} let ppol p = !(p.pol) let lm p = snd (hd (ppol p)) let nallpol = ref 0 let allpol = ref (Array.create 1000 polynom0) let new_allpol p s = nallpol := !nallpol + 1; if !nallpol >= Array.length !allpol then allpol := Array.append !allpol (Array.create !nallpol polynom0); let p = {pol = ref p; num = !nallpol; sugar = s} in !allpol.(!nallpol)<- p; p (* returns a polynomial of l whose head monomial divides m, else [] *) let rec selectdiv m l = match l with [] -> polynom0 |q::r -> let m'= snd (hd (ppol q)) in match (div_mon_test m m') with true -> q |false -> selectdiv m r let div_pol p q a b m = (* info ".";*) plusP (emultP a p) (mult_t_pol b m q) let hmon = Hashtbl.create 1000 let use_hmon = ref false let find_hmon m = if !use_hmon then Hashtbl.find hmon m else raise Not_found let add_hmon m q = if !use_hmon then Hashtbl.add hmon m q else () let div_coef a b = P.divP a b (* remainder r of the division of p by polynomials of l, returns (c,r) where c is the coefficient for pseudo-division : c p = sum_i q_i p_i + r *) let reduce2 p l = let l = if nouveaux_pol_en_tete then rev l else l in let rec reduce p = match p with [] -> (coef1,[]) |t::p' -> let (a,m)=t in let q = (try find_hmon m with Not_found -> let q = selectdiv m l in match (ppol q) with t'::q' -> (add_hmon m q; q) |[] -> q) in match (ppol q) with [] -> if reduire_les_queues then let (c,r)=(reduce p') in (c,((P.multP a c,m)::r)) else (coef1,p) |(b,m')::q' -> let c=(pgcdpos a b) in let a'= (div_coef b c) in let b'=(P.oppP (div_coef a c)) in let (e,r)=reduce (div_pol p' q' a' b' (div_mon m m')) in (P.multP a' e,r) in let (c,r) = reduce p in (c,r) (* trace of divisions *) (* list of initial polynomials *) let poldep = ref [] let poldepcontent = ref [] (* coefficients of polynomials when written with initial polynomials *) let coefpoldep = Hashtbl.create 51 (* coef of q in p = sum_i c_i*q_i *) let coefpoldep_find p q = try (Hashtbl.find coefpoldep (p.num,q.num)) with Not_found -> [] let coefpoldep_remove p q = Hashtbl.remove coefpoldep (p.num,q.num) let coefpoldep_set p q c = Hashtbl.add coefpoldep (p.num,q.num) c let initcoefpoldep d lp = poldep:=lp; poldepcontent:= map (fun p -> contentP (ppol p)) lp; iter (fun p -> coefpoldep_set p p (polconst d (coef_of_int 1))) lp (* keeps trace in coefpoldep divides without pseudodivisions *) let reduce2_trace p l lcp = let l = if nouveaux_pol_en_tete then rev l else l in (* rend (lq,r), ou r = p + sum(lq) *) let rec reduce p = match p with [] -> ([],[]) |t::p' -> let (a,m)=t in let q = (try find_hmon m with Not_found -> let q = selectdiv m l in match (ppol q) with t'::q' -> (add_hmon m q; q) |[] -> q) in match (ppol q) with [] -> if reduire_les_queues then let (lq,r)=(reduce p') in (lq,((a,m)::r)) else ([],p) |(b,m')::q' -> let b'=(P.oppP (div_coef a b)) in let m''= div_mon m m' in let p1=plusP p' (mult_t_pol b' m'' q') in let (lq,r)=reduce p1 in ((b',m'',q)::lq, r) in let (lq,r) = reduce p in (*info "reduce2_trace:\n"; iter (fun (a,m,s) -> let x = mult_t_pol a m s in info ((stringP x)^"\n")) lq; info "ok\n";*) (map2 (fun c0 q -> let c = fold_left (fun x (a,m,s) -> if equal (ppol s) (ppol q) then plusP x (mult_t_pol a m (polconst (nvar m) (coef_of_int 1))) else x) c0 lq in c) lcp !poldep, r) let homogeneous = ref false let pol_courant = ref polynom0 (*********************************************************************** Completion *) let sugar_flag = ref true let compute_sugar p = fold_left (fun s (a,m) -> max s m.(0)) 0 p let mk_polynom p = new_allpol p (compute_sugar p) let spol ps qs= let p = ppol ps in let q = ppol qs in let m = snd (hd p) in let m'= snd (hd q) in let a = fst (hd p) in let b = fst (hd q) in let p'= tl p in let q'= tl q in let c = (pgcdpos a b) in let m''=(ppcm_mon m m') in let m1 = div_mon m'' m in let m2 = div_mon m'' m' in let fsp p' q' = plusP (mult_t_pol (div_coef b c) m1 p') (mult_t_pol (P.oppP (div_coef a c)) m2 q') in let sp = fsp p' q' in let sps = new_allpol sp (max (m1.(0) + ps.sugar) (m2.(0) + qs.sugar)) in coefpoldep_set sps ps (fsp (polconst (nvar m) (coef_of_int 1)) []); coefpoldep_set sps qs (fsp [] (polconst (nvar m) (coef_of_int 1))); sps let etrangers p p'= let m = snd (hd p) in let m'= snd (hd p') in let d = nvar m in let res=ref true in let i=ref 1 in while (!res) && (!i<=d) do res:= (m.(!i) = 0) || (m'.(!i)=0); i:=!i+1; done; !res (* teste if head monomial of p'' divides lcm of lhead monomials of p and p' *) let div_ppcm p p' p'' = let m = snd (hd p) in let m'= snd (hd p') in let m''= snd (hd p'') in let d = nvar m in let res=ref true in let i=ref 1 in while (!res) && (!i<=d) do res:= ((max m.(!i) m'.(!i)) >= m''.(!i)); i:=!i+1; done; !res (* code from extraction of Laurent Théry Coq program *) type 'poly cpRes = Keep of ('poly list) | DontKeep of ('poly list) let list_rec f0 f1 = let rec f2 = function [] -> f0 | a0::l0 -> f1 a0 l0 (f2 l0) in f2 let addRes i = function Keep h'0 -> Keep (i::h'0) | DontKeep h'0 -> DontKeep (i::h'0) let slice i a q = list_rec (match etrangers (ppol i) (ppol a) with true -> DontKeep [] | false -> Keep []) (fun b q1 rec_ren -> match div_ppcm (ppol i) (ppol a) (ppol b) with true -> DontKeep (b::q1) | false -> (match div_ppcm (ppol i) (ppol b) (ppol a) with true -> rec_ren | false -> addRes b rec_ren)) q (* sugar strategy *) let rec addS x l = l @ [x] (* oblige de mettre en queue sinon le certificat deconne *) let addSsugar x l = if !sugar_flag then let sx = x.sugar in let rec insere l = match l with | [] -> [x] | y::l1 -> if sx <= y.sugar then x::l else y::(insere l1) in insere l else addS x l (* ajoute les spolynomes de i avec la liste de polynomes aP, a la liste q *) let genPcPf i aP q = (let rec genPc aP0 = match aP0 with [] -> (fun r -> r) | a::l1 -> (fun l -> (match slice i a l1 with Keep l2 -> addSsugar (spol i a) (genPc l2 l) | DontKeep l2 -> genPc l2 l)) in genPc aP) q let genOCPf h' = list_rec [] (fun a l rec_ren -> genPcPf a l rec_ren) h' (*********************************************************************** critical pairs/s-polynomials *) let ordcpair ((i1,j1),m1) ((i2,j2),m2) = (* let s1 = (max (!allpol.(i1).sugar + m1.(0) - (snd (hd (ppol !allpol.(i1)))).(0)) (!allpol.(j1).sugar + m1.(0) - (snd (hd (ppol !allpol.(j1)))).(0))) in let s2 = (max (!allpol.(i2).sugar + m2.(0) - (snd (hd (ppol !allpol.(i2)))).(0)) (!allpol.(j2).sugar + m2.(0) - (snd (hd (ppol !allpol.(j2)))).(0))) in match compare s1 s2 with | 1 -> 1 |(-1) -> -1 |0 -> compare_mon m1 m2*) compare_mon m1 m2 let sortcpairs lcp = sort ordcpair lcp let mergecpairs l1 l2 = merge ordcpair l1 l2 let ord i j = if i r @ (cpair p q)) [] lq) let cpairs lp = let rec aux l = match l with []|[_] -> [] |p::l1 -> mergecpairs (cpairs1 p l1) (aux l1) in aux lp let critere2 ((i,j),m) lp lcp = exists (fun h -> h.num <> i && h.num <> j && (div_mon_test m (lm h)) && (let c1 = ord i h.num in not (exists (fun (c,_) -> c1 = c) lcp)) && (let c1 = ord j h.num in not (exists (fun (c,_) -> c1 = c) lcp))) lp let critere3 ((i,j),m) lp lcp = exists (fun h -> h.num <> i && h.num <> j && (div_mon_test m (lm h)) && (h.num < j || not (m = ppcm_mon (lm (!allpol.(i))) (lm h))) && (h.num < i || not (m = ppcm_mon (lm (!allpol.(j))) (lm h)))) lp let add_cpairs p lp lcp = mergecpairs (cpairs1 p lp) lcp let step = ref 0 let infobuch p q = if !step = 0 then (info ("[" ^ (string_of_int (length p)) ^ "," ^ (string_of_int (length q)) ^ "]")) (* in lp new polynomials are at the end *) let coef_courant = ref coef1 type certificate = { coef : coef; power : int; gb_comb : poly list list; last_comb : poly list } let test_dans_ideal p lp lp0 = let (c,r) = reduce2 (ppol !pol_courant) lp in info ("remainder: "^(stringPcut r)^"\n"); coef_courant:= P.multP !coef_courant c; pol_courant:= mk_polynom r; if r=[] then (info "polynomial reduced to 0\n"; let lcp = map (fun q -> []) !poldep in let c = !coef_courant in let (lcq,r) = reduce2_trace (emultP c p) lp lcp in info "r ok\n"; info ("r: "^(stringP r)^"\n"); let res=ref (emultP c p) in iter2 (fun cq q -> res:=plusP (!res) (multP cq (ppol q)); ) lcq !poldep; info ("verif sum: "^(stringP (!res))^"\n"); info ("coefficient: "^(stringP (polconst 1 c))^"\n"); let rec aux lp = match lp with |[] -> [] |p::lp -> (map (fun q -> coefpoldep_find p q) lp)::(aux lp) in let coefficient_multiplicateur = c in let liste_polynomes_de_depart = rev lp0 in let polynome_a_tester = p in let liste_des_coefficients_intermediaires = (let lci = rev (aux (rev lp)) in let lci = ref lci (* (map rev lci) *) in iter (fun x -> lci := tl (!lci)) lp0; !lci) in let liste_des_coefficients = map (fun cq -> emultP (coef_of_int (-1)) cq) (rev lcq) in (liste_polynomes_de_depart, polynome_a_tester, {coef = coefficient_multiplicateur; power = 1; gb_comb = liste_des_coefficients_intermediaires; last_comb = liste_des_coefficients}) ) else ((*info "polynomial not reduced to 0\n"; info ("\nremainder: "^(stringPcut r)^"\n");*) raise NotInIdeal) let divide_rem_with_critical_pair = ref false let list_diff l x = filter (fun y -> y <> x) l let deg_hom p = match p with | [] -> -1 | (a,m)::_ -> m.(0) let pbuchf pq p lp0= info "computation of the Groebner basis\n"; step:=0; Hashtbl.clear hmon; let rec pbuchf (lp, lpc) = infobuch lp lpc; (* step:=(!step+1)mod 10;*) match lpc with [] -> (* info ("List of polynomials:\n"^(fold_left (fun r p -> r^(stringP p)^"\n") "" lp)); info "--------------------\n";*) test_dans_ideal (ppol p) lp lp0 | ((i,j),m) :: lpc2 -> (* info "choosen pair\n";*) if critere3 ((i,j),m) lp lpc2 then (info "c"; pbuchf (lp, lpc2)) else let a = spol !allpol.(i) !allpol.(j) in if !homogeneous && (ppol a)<>[] && deg_hom (ppol a) > deg_hom (ppol !pol_courant) then (info "h"; pbuchf (lp, lpc2)) else (* let sa = a.sugar in*) let (ca,a0)= reduce2 (ppol a) lp in match a0 with [] -> info "0";pbuchf (lp, lpc2) | _ -> (* info "pair reduced\n";*) a.pol := emultP ca (ppol a); let (lca,a0) = reduce2_trace (ppol a) lp (map (fun q -> emultP ca (coefpoldep_find a q)) !poldep) in (* info "paire re-reduced";*) a.pol := a0; (* let a0 = new_allpol a0 sa in*) iter2 (fun c q -> coefpoldep_remove a q; coefpoldep_set a q c) lca !poldep; let a0 = a in info ("\nnew polynomial: "^(stringPcut (ppol a0))^"\n"); let ct = coef1 (* contentP a0 *) in (*info ("content: "^(string_of_coef ct)^"\n");*) poldep:=addS a0 lp; poldepcontent:=addS ct (!poldepcontent); try test_dans_ideal (ppol p) (addS a0 lp) lp0 with NotInIdeal -> let newlpc = add_cpairs a0 lp lpc2 in pbuchf (((addS a0 lp), newlpc)) in pbuchf pq let is_homogeneous p = match p with | [] -> true | (a,m)::p1 -> let d = m.(0) in for_all (fun (b,m') -> m'.(0)=d) p1 (* returns c lp = [pn;...;p1] p lci = [[a(n+1,n);...;a(n+1,1)]; [a(n+2,n+1);...;a(n+2,1)]; ... [a(n+m,n+m-1);...;a(n+m,1)]] lc = [qn+m; ... q1] such that c*p = sum qi*pi where pn+k = a(n+k,n+k-1)*pn+k-1 + ... + a(n+k,1)* p1 *) let in_ideal d lp p = Hashtbl.clear hmon; Hashtbl.clear coefpoldep; nallpol := 0; allpol := Array.create 1000 polynom0; homogeneous := for_all is_homogeneous (p::lp); if !homogeneous then info "homogeneous polynomials\n"; info ("p: "^(stringPcut p)^"\n"); info ("lp:\n"^(fold_left (fun r p -> r^(stringPcut p)^"\n") "" lp)); (*info ("p: "^(stringP p)^"\n"); info ("lp:\n"^(fold_left (fun r p -> r^(stringP p)^"\n") "" lp));*) let lp = map mk_polynom lp in let p = mk_polynom p in initcoefpoldep d lp; coef_courant:=coef1; pol_courant:=p; let (lp1,p1,cert) = try test_dans_ideal (ppol p) lp lp with NotInIdeal -> pbuchf (lp, (cpairs lp)) p lp in info "computed\n"; (map ppol lp1, p1, cert) (* *) end coq-8.4pl2/plugins/nsatz/utile.ml0000640000175000001440000000737512121620060016125 0ustar notinusers(* Printing *) let pr x = if !Flags.debug then (Format.printf "@[%s@]" x; flush(stdout);)else () let prn x = if !Flags.debug then (Format.printf "@[%s\n@]" x; flush(stdout);) else () let prt0 s = () (* print_string s;flush(stdout)*) let prt s = if !Flags.debug then (print_string (s^"\n");flush(stdout)) else () let info s = Flags.if_verbose prerr_string s (* Lists *) let rec list_mem_eq eq x l = match l with [] -> false |y::l1 -> if (eq x y) then true else (list_mem_eq eq x l1) let set_of_list_eq eq l = let res = ref [] in List.iter (fun x -> if not (list_mem_eq eq x (!res)) then res:=x::(!res)) l; List.rev !res (* Memoization f is compatible with nf: f(nf(x)) = f(x) *) let memos s memoire nf f x = try (let v = Hashtbl.find memoire (nf x) in pr s;v) with e when Errors.noncritical e -> (pr "#"; let v = f x in Hashtbl.add memoire (nf x) v; v) (********************************************************************** Eléments minimaux pour un ordre partiel de division. E est un ensemble, avec une multiplication et une division partielle div (la fonction div peut échouer), constant est un prédicat qui définit un sous-ensemble C de E. *) (* Etant donnée une partie A de E, on calcule une partie B de E disjointe de C telle que: - les éléments de A sont des produits d'éléments de B et d'un de C. - B est minimale pour cette propriété. *) let facteurs_liste div constant lp = let lp = List.filter (fun x -> not (constant x)) lp in let rec factor lmin lp = (* lmin: ne se divisent pas entre eux *) match lp with [] -> lmin |p::lp1 -> (let l1 = ref [] in let p_dans_lmin = ref false in List.iter (fun q -> try (let r = div p q in if not (constant r) then l1:=r::(!l1) else p_dans_lmin:=true) with e when Errors.noncritical e -> ()) lmin; if !p_dans_lmin then factor lmin lp1 else if (!l1)=[] (* aucun q de lmin ne divise p *) then (let l1=ref lp1 in let lmin1=ref [] in List.iter (fun q -> try (let r = div q p in if not (constant r) then l1:=r::(!l1)) with e when Errors.noncritical e -> lmin1:=q::(!lmin1)) lmin; factor (List.rev (p::(!lmin1))) !l1) (* au moins un q de lmin divise p non trivialement *) else factor lmin ((!l1)@lp1)) in factor [] lp (* On suppose que tout élément de A est produit d'éléments de B et d'un de C: A et B sont deux tableaux, rend un tableau de couples (élément de C, listes d'indices l) tels que A.(i) = l.(i)_1*Produit(B.(j), j dans l.(i)_2) zero est un prédicat sur E tel que (zero x) => (constant x): si (zero x) est vrai on ne decompose pas x c est un élément quelconque de E. *) let factorise_tableau div zero c f l1 = let res = Array.create (Array.length f) (c,[]) in Array.iteri (fun i p -> let r = ref p in let li = ref [] in if not (zero p) then Array.iteri (fun j q -> try (while true do let rr = div !r q in li:=j::(!li); r:=rr; done) with e when Errors.noncritical e -> ()) l1; res.(i)<-(!r,!li)) f; (l1,res) (* exemples: let l = [1;2;6;24;720] and div1 = (fun a b -> if a mod b =0 then a/b else failwith "div") and constant = (fun x -> x<2) and zero = (fun x -> x=0) let f = facteurs_liste div1 constant l factorise_tableau div1 zero 0 (Array.of_list l) (Array.of_list f) *) coq-8.4pl2/plugins/nsatz/nsatz.ml40000640000175000001440000004055212010532755016232 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1 let puis = power_big_int_positive_int (* a et b positifs, résultat positif *) let rec pgcd a b = if equal b coef0 then a else if lt a b then pgcd b a else pgcd b (modulo a b) (* signe du pgcd = signe(a)*signe(b) si non nuls. *) let pgcd2 a b = if equal a coef0 then b else if equal b coef0 then a else let c = pgcd (abs a) (abs b) in if ((lt coef0 a)&&(lt b coef0)) ||((lt coef0 b)&&(lt a coef0)) then opp c else c end (* module Ent = struct type t = Entiers.entiers let of_int = Entiers.ent_of_int let of_num x = Entiers.ent_of_string(Num.string_of_num x) let to_num x = Num.num_of_string (Entiers.string_of_ent x) let equal = Entiers.eq_ent let lt = Entiers.lt_ent let le = Entiers.le_ent let abs = Entiers.abs_ent let plus =Entiers.add_ent let mult = Entiers.mult_ent let sub = Entiers.moins_ent let opp = Entiers.opp_ent let div = Entiers.div_ent let modulo = Entiers.mod_ent let coef0 = Entiers.ent0 let coef1 = Entiers.ent1 let to_string = Entiers.string_of_ent let to_int x = Entiers.int_of_ent x let hash x =Entiers.hash_ent x let signe = Entiers.signe_ent let rec puis p n = match n with 0 -> coef1 |_ -> (mult p (puis p (n-1))) (* a et b positifs, résultat positif *) let rec pgcd a b = if equal b coef0 then a else if lt a b then pgcd b a else pgcd b (modulo a b) (* signe du pgcd = signe(a)*signe(b) si non nuls. *) let pgcd2 a b = if equal a coef0 then b else if equal b coef0 then a else let c = pgcd (abs a) (abs b) in if ((lt coef0 a)&&(lt b coef0)) ||((lt coef0 b)&&(lt a coef0)) then opp c else c end *) (* ------------------------------------------------------------------------- *) (* ------------------------------------------------------------------------- *) type vname = string type term = | Zero | Const of Num.num | Var of vname | Opp of term | Add of term * term | Sub of term * term | Mul of term * term | Pow of term * int let const n = if eq_num n num_0 then Zero else Const n let pow(p,i) = if i=1 then p else Pow(p,i) let add = function (Zero,q) -> q | (p,Zero) -> p | (p,q) -> Add(p,q) let mul = function (Zero,_) -> Zero | (_,Zero) -> Zero | (p,Const n) when eq_num n num_1 -> p | (Const n,q) when eq_num n num_1 -> q | (p,q) -> Mul(p,q) let unconstr = mkRel 1 let tpexpr = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PExpr") let ttconst = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEc") let ttvar = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEX") let ttadd = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEadd") let ttsub = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEsub") let ttmul = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEmul") let ttopp = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEopp") let ttpow = lazy (gen_constant "CC" ["setoid_ring";"Ring_polynom"] "PEpow") let datatypes = ["Init";"Datatypes"] let binnums = ["Numbers";"BinNums"] let tlist = lazy (gen_constant "CC" datatypes "list") let lnil = lazy (gen_constant "CC" datatypes "nil") let lcons = lazy (gen_constant "CC" datatypes "cons") let tz = lazy (gen_constant "CC" binnums "Z") let z0 = lazy (gen_constant "CC" binnums "Z0") let zpos = lazy (gen_constant "CC" binnums "Zpos") let zneg = lazy(gen_constant "CC" binnums "Zneg") let pxI = lazy(gen_constant "CC" binnums "xI") let pxO = lazy(gen_constant "CC" binnums "xO") let pxH = lazy(gen_constant "CC" binnums "xH") let nN0 = lazy (gen_constant "CC" binnums "N0") let nNpos = lazy(gen_constant "CC" binnums "Npos") let mkt_app name l = mkApp (Lazy.force name, Array.of_list l) let tlp () = mkt_app tlist [mkt_app tpexpr [Lazy.force tz]] let tllp () = mkt_app tlist [tlp()] let rec mkt_pos n = if n =/ num_1 then Lazy.force pxH else if mod_num n num_2 =/ num_0 then mkt_app pxO [mkt_pos (quo_num n num_2)] else mkt_app pxI [mkt_pos (quo_num n num_2)] let mkt_n n = if n=num_0 then Lazy.force nN0 else mkt_app nNpos [mkt_pos n] let mkt_z z = if z =/ num_0 then Lazy.force z0 else if z >/ num_0 then mkt_app zpos [mkt_pos z] else mkt_app zneg [mkt_pos ((Int 0) -/ z)] let rec mkt_term t = match t with | Zero -> mkt_term (Const num_0) | Const r -> let (n,d) = numdom r in mkt_app ttconst [Lazy.force tz; mkt_z n] | Var v -> mkt_app ttvar [Lazy.force tz; mkt_pos (num_of_string v)] | Opp t1 -> mkt_app ttopp [Lazy.force tz; mkt_term t1] | Add (t1,t2) -> mkt_app ttadd [Lazy.force tz; mkt_term t1; mkt_term t2] | Sub (t1,t2) -> mkt_app ttsub [Lazy.force tz; mkt_term t1; mkt_term t2] | Mul (t1,t2) -> mkt_app ttmul [Lazy.force tz; mkt_term t1; mkt_term t2] | Pow (t1,n) -> if (n = 0) then mkt_app ttconst [Lazy.force tz; mkt_z num_1] else mkt_app ttpow [Lazy.force tz; mkt_term t1; mkt_n (num_of_int n)] let rec parse_pos p = match kind_of_term p with | App (a,[|p2|]) -> if eq_constr a (Lazy.force pxO) then num_2 */ (parse_pos p2) else num_1 +/ (num_2 */ (parse_pos p2)) | _ -> num_1 let parse_z z = match kind_of_term z with | App (a,[|p2|]) -> if eq_constr a (Lazy.force zpos) then parse_pos p2 else (num_0 -/ (parse_pos p2)) | _ -> num_0 let parse_n z = match kind_of_term z with | App (a,[|p2|]) -> parse_pos p2 | _ -> num_0 let rec parse_term p = match kind_of_term p with | App (a,[|_;p2|]) -> if eq_constr a (Lazy.force ttvar) then Var (string_of_num (parse_pos p2)) else if eq_constr a (Lazy.force ttconst) then Const (parse_z p2) else if eq_constr a (Lazy.force ttopp) then Opp (parse_term p2) else Zero | App (a,[|_;p2;p3|]) -> if eq_constr a (Lazy.force ttadd) then Add (parse_term p2, parse_term p3) else if eq_constr a (Lazy.force ttsub) then Sub (parse_term p2, parse_term p3) else if eq_constr a (Lazy.force ttmul) then Mul (parse_term p2, parse_term p3) else if eq_constr a (Lazy.force ttpow) then Pow (parse_term p2, int_of_num (parse_n p3)) else Zero | _ -> Zero let rec parse_request lp = match kind_of_term lp with | App (_,[|_|]) -> [] | App (_,[|_;p;lp1|]) -> (parse_term p)::(parse_request lp1) |_-> assert false let nvars = ref 0 let set_nvars_term t = let rec aux t = match t with | Zero -> () | Const r -> () | Var v -> let n = int_of_string v in nvars:= max (!nvars) n | Opp t1 -> aux t1 | Add (t1,t2) -> aux t1; aux t2 | Sub (t1,t2) -> aux t1; aux t2 | Mul (t1,t2) -> aux t1; aux t2 | Pow (t1,n) -> aux t1 in aux t let string_of_term p = let rec aux p = match p with | Zero -> "0" | Const r -> string_of_num r | Var v -> "x"^v | Opp t1 -> "(-"^(aux t1)^")" | Add (t1,t2) -> "("^(aux t1)^"+"^(aux t2)^")" | Sub (t1,t2) -> "("^(aux t1)^"-"^(aux t2)^")" | Mul (t1,t2) -> "("^(aux t1)^"*"^(aux t2)^")" | Pow (t1,n) -> (aux t1)^"^"^(string_of_int n) in aux p (*********************************************************************** Coefficients: recursive polynomials *) module Coef = BigInt (*module Coef = Ent*) module Poly = Polynom.Make(Coef) module PIdeal = Ideal.Make(Poly) open PIdeal (* term to sparse polynomial varaibles <=np are in the coefficients *) let term_pol_sparse np t= let d = !nvars in let rec aux t = (* info ("conversion de: "^(string_of_term t)^"\n");*) let res = match t with | Zero -> zeroP | Const r -> if r = num_0 then zeroP else polconst d (Poly.Pint (Coef.of_num r)) | Var v -> let v = int_of_string v in if v <= np then polconst d (Poly.x v) else gen d v | Opp t1 -> oppP (aux t1) | Add (t1,t2) -> plusP (aux t1) (aux t2) | Sub (t1,t2) -> plusP (aux t1) (oppP (aux t2)) | Mul (t1,t2) -> multP (aux t1) (aux t2) | Pow (t1,n) -> puisP (aux t1) n in (* info ("donne: "^(stringP res)^"\n");*) res in let res= aux t in res (* sparse polynomial to term *) let polrec_to_term p = let rec aux p = match p with |Poly.Pint n -> const (Coef.to_num n) |Poly.Prec (v,coefs) -> let res = ref Zero in Array.iteri (fun i c -> res:=add(!res, mul(aux c, pow (Var (string_of_int v), i)))) coefs; !res in aux p (* approximation of the Horner form used in the tactic ring *) let pol_sparse_to_term n2 p = (* info "pol_sparse_to_term ->\n";*) let p = PIdeal.repr p in let rec aux p = match p with [] -> const (num_of_string "0") | (a,m)::p1 -> let n = (Array.length m)-1 in let (i0,e0) = List.fold_left (fun (r,d) (a,m) -> let i0= ref 0 in for k=1 to n do if m.(k)>0 then i0:=k done; if !i0 = 0 then (r,d) else if !i0 > r then (!i0, m.(!i0)) else if !i0 = r && m.(!i0) if m.(i0)>=e0 then (m.(i0)<-m.(i0)-e0; p1:=(a,m)::(!p1)) else p2:=(a,m)::(!p2)) p; let vm = if e0=1 then Var (string_of_int (i0)) else pow (Var (string_of_int (i0)),e0) in add(mul(vm, aux (List.rev (!p1))), aux (List.rev (!p2)))) in (*info "-> pol_sparse_to_term\n";*) aux p let rec remove_list_tail l i = let rec aux l i = if l=[] then [] else if i<0 then l else if i=0 then List.tl l else match l with |(a::l1) -> a::(aux l1 (i-1)) |_ -> assert false in List.rev (aux (List.rev l) i) (* lq = [cn+m+1 n+m ...cn+m+1 1] lci=[[cn+1 n,...,cn1 1] ... [cn+m n+m-1,...,cn+m 1]] removes intermediate polynomials not useful to compute the last one. *) let remove_zeros zero lci = let n = List.length (List.hd lci) in let m=List.length lci in let u = Array.create m false in let rec utiles k = if k>=m then () else ( u.(k)<-true; let lc = List.nth lci k in for i=0 to List.length lc - 1 do if not (zero (List.nth lc i)) then utiles (i+k+1); done) in utiles 0; let lr = ref [] in for i=0 to m-1 do if u.(i) then lr:=(List.nth lci i)::(!lr) done; let lr=List.rev !lr in let lr = List.map (fun lc -> let lcr=ref lc in for i=0 to m-1 do if not u.(i) then lcr:=remove_list_tail !lcr (m-i+(n-m)) done; !lcr) lr in info ("useless spolynomials: " ^string_of_int (m-List.length lr)^"\n"); info ("useful spolynomials: " ^string_of_int (List.length lr)^"\n"); lr let theoremedeszeros lpol p = let t1 = Unix.gettimeofday() in let m = !nvars in let (lp0,p,cert) = in_ideal m lpol p in let lpc = List.rev !poldepcontent in info ("time: "^Format.sprintf "@[%10.3f@]s\n" (Unix.gettimeofday ()-.t1)); (cert,lp0,p,lpc) open Ideal let theoremedeszeros_termes lp = nvars:=0;(* mise a jour par term_pol_sparse *) List.iter set_nvars_term lp; match lp with | Const (Int sugarparam)::Const (Int nparam)::lp -> ((match sugarparam with |0 -> info "computation without sugar\n"; lexico:=false; sugar_flag := false; divide_rem_with_critical_pair := false |1 -> info "computation with sugar\n"; lexico:=false; sugar_flag := true; divide_rem_with_critical_pair := false |2 -> info "ordre lexico computation without sugar\n"; lexico:=true; sugar_flag := false; divide_rem_with_critical_pair := false |3 -> info "ordre lexico computation with sugar\n"; lexico:=true; sugar_flag := true; divide_rem_with_critical_pair := false |4 -> info "computation without sugar, division by pairs\n"; lexico:=false; sugar_flag := false; divide_rem_with_critical_pair := true |5 -> info "computation with sugar, division by pairs\n"; lexico:=false; sugar_flag := true; divide_rem_with_critical_pair := true |6 -> info "ordre lexico computation without sugar, division by pairs\n"; lexico:=true; sugar_flag := false; divide_rem_with_critical_pair := true |7 -> info "ordre lexico computation with sugar, division by pairs\n"; lexico:=true; sugar_flag := true; divide_rem_with_critical_pair := true | _ -> error "nsatz: bad parameter" ); let m= !nvars in let lvar=ref [] in for i=m downto 1 do lvar:=["x"^(string_of_int i)^""]@(!lvar); done; lvar:=["a";"b";"c";"d";"e";"f";"g";"h";"i";"j";"k";"l";"m";"n";"o";"p";"q";"r";"s";"t";"u";"v";"w";"x";"y";"z"] @ (!lvar); (* pour macaulay *) name_var:=!lvar; let lp = List.map (term_pol_sparse nparam) lp in match lp with | [] -> assert false | p::lp1 -> let lpol = List.rev lp1 in let (cert,lp0,p,_lct) = theoremedeszeros lpol p in info "cert ok\n"; let lc = cert.last_comb::List.rev cert.gb_comb in match remove_zeros (fun x -> x=zeroP) lc with | [] -> assert false | (lq::lci) -> (* lci commence par les nouveaux polynomes *) let m= !nvars in let c = pol_sparse_to_term m (polconst m cert.coef) in let r = Pow(Zero,cert.power) in let lci = List.rev lci in let lci = List.map (List.map (pol_sparse_to_term m)) lci in let lq = List.map (pol_sparse_to_term m) lq in info ("number of parametres: "^string_of_int nparam^"\n"); info "term computed\n"; (c,r,lci,lq) ) |_ -> assert false (* version avec hash-consing du certificat: let nsatz lpol = Hashtbl.clear Dansideal.hmon; Hashtbl.clear Dansideal.coefpoldep; Hashtbl.clear Dansideal.sugartbl; Hashtbl.clear Polynomesrec.hcontentP; init_constants (); let lp= parse_request lpol in let (_lp0,_p,c,r,_lci,_lq as rthz) = theoremedeszeros_termes lp in let certif = certificat_vers_polynome_creux rthz in let certif = hash_certif certif in let certif = certif_term certif in let c = mkt_term c in info "constr computed\n"; (c, certif) *) let nsatz lpol = let lp= parse_request lpol in let (c,r,lci,lq) = theoremedeszeros_termes lp in let res = [c::r::lq]@lci in let res = List.map (fun lx -> List.map mkt_term lx) res in let res = List.fold_right (fun lt r -> let ltterm = List.fold_right (fun t r -> mkt_app lcons [mkt_app tpexpr [Lazy.force tz];t;r]) lt (mkt_app lnil [mkt_app tpexpr [Lazy.force tz]]) in mkt_app lcons [tlp ();ltterm;r]) res (mkt_app lnil [tlp ()]) in info "term computed\n"; res let return_term t = let a = mkApp(gen_constant "CC" ["Init";"Logic"] "refl_equal",[|tllp ();t|]) in generalize [a] let nsatz_compute t = let lpol = try nsatz t with Ideal.NotInIdeal -> error "nsatz cannot solve this problem" in return_term lpol TACTIC EXTEND nsatz_compute | [ "nsatz_compute" constr(lt) ] -> [ nsatz_compute lt ] END coq-8.4pl2/plugins/nsatz/nsatz_plugin.mllib0000640000175000001440000000005311401704410020173 0ustar notinusersUtile Polynom Ideal Nsatz Nsatz_plugin_mod coq-8.4pl2/plugins/nsatz/polynom.mli0000640000175000001440000000574612010532755016663 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> bool val lt : t -> t -> bool val le : t -> t -> bool val abs : t -> t val plus : t -> t -> t val mult : t -> t -> t val sub : t -> t -> t val opp : t -> t val div : t -> t -> t val modulo : t -> t -> t val puis : t -> int -> t val pgcd : t -> t -> t val hash : t -> int val of_num : Num.num -> t val to_string : t -> string end module type S = sig type coef type variable = int type t = Pint of coef | Prec of variable * t array val of_num : Num.num -> t val x : variable -> t val monome : variable -> int -> t val is_constantP : t -> bool val is_zero : t -> bool val max_var_pol : t -> variable val max_var_pol2 : t -> variable val max_var : t array -> variable val equal : t -> t -> bool val norm : t -> t val deg : variable -> t -> int val deg_total : t -> int val copyP : t -> t val coef : variable -> int -> t -> t val plusP : t -> t -> t val content : t -> coef val div_int : t -> coef -> t val vire_contenu : t -> t val vars : t -> variable list val int_of_Pint : t -> coef val multx : int -> variable -> t -> t val multP : t -> t -> t val deriv : variable -> t -> t val oppP : t -> t val moinsP : t -> t -> t val puisP : t -> int -> t val ( @@ ) : t -> t -> t val ( -- ) : t -> t -> t val ( ^^ ) : t -> int -> t val coefDom : variable -> t -> t val coefConst : variable -> t -> t val remP : variable -> t -> t val coef_int_tete : t -> coef val normc : t -> t val coef_constant : t -> coef val univ : bool ref val string_of_var : int -> string val nsP : int ref val to_string : t -> string val printP : t -> unit val print_tpoly : t array -> unit val print_lpoly : t list -> unit val quo_rem_pol : t -> t -> variable -> t * t val div_pol : t -> t -> variable -> t val divP : t -> t -> t val div_pol_rat : t -> t -> bool val pseudo_div : t -> t -> variable -> t * t * int * t val pgcdP : t -> t -> t val pgcd_pol : t -> t -> variable -> t val content_pol : t -> variable -> t val pgcd_coef_pol : t -> t -> variable -> t val pgcd_pol_rec : t -> t -> variable -> t val gcd_sub_res : t -> t -> variable -> t val gcd_sub_res_rec : t -> t -> t -> t -> int -> variable -> t val lazard_power : t -> t -> int -> variable -> t val hash : t -> int module Hashpol : Hashtbl.S with type key=t end module Make (C:Coef) : S with type coef = C.t coq-8.4pl2/plugins/xml/0000750000175000001440000000000012127276541014116 5ustar notinuserscoq-8.4pl2/plugins/xml/xmlcommand.mli0000640000175000001440000000434511422606552016763 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string option -> unit (* show dest *) (* where dest is either None (for stdout) or (Some filename) *) (* pretty prints via Xml.pp the proof in progress on dest *) val show : string option -> unit (* set_print_proof_tree f *) (* sets a callback function f to export the proof_tree to XML *) val set_print_proof_tree : (string -> Evd.evar_map -> Proof_type.proof_tree -> Term.constr Proof2aproof.ProofTreeHash.t -> Proof_type.proof_tree Proof2aproof.ProofTreeHash.t -> string Acic.CicHash.t -> Xml.token Stream.t) -> unit coq-8.4pl2/plugins/xml/xml.mli0000640000175000001440000000433011422606552015416 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (string * string) list -> token Stream.t val xml_nempty : string -> (string * string) list -> token Stream.t -> token Stream.t val xml_cdata : string -> token Stream.t val pp_ch : token Stream.t -> out_channel -> unit (* The pretty printer for streams of token *) (* Usage: *) (* pp tokens None pretty prints the output on stdout *) (* pp tokens (Some filename) pretty prints the output on the file filename *) val pp : token Stream.t -> string option -> unit coq-8.4pl2/plugins/xml/dumptree.ml40000640000175000001440000001024512010532755016355 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* try if Sign.lookup_named id osign = (id,c,ty) then sign else raise Different with Not_found | Different -> Environ.push_named_context_val d sign) sign ~init:Environ.empty_named_context_val ;; let pr_tactic_xml = function | TacArg (_,Tacexp t) -> str "" | t -> str "" ;; let pr_proof_instr_xml instr = Ppdecl_proof.pr_proof_instr (Global.env()) instr ;; let pr_rule_xml pr = function | Prim r -> str "" | Nested(cmpd, subtree) -> hov 2 (str "" ++ fnl () ++ begin match cmpd with Tactic (texp, _) -> pr_tactic_xml texp end ++ fnl () ++ pr subtree ) ++ fnl () ++ str "" | Daimon -> str "" | Decl_proof _ -> str "" ;; let pr_var_decl_xml env (id,c,typ) = let ptyp = print_constr_env env typ in match c with | None -> (str "") | Some c -> (* Force evaluation *) let pb = print_constr_env env c in (str "") ;; let pr_rel_decl_xml env (na,c,typ) = let pbody = match c with | None -> mt () | Some c -> (* Force evaluation *) let pb = print_constr_env env c in (str" body=\"" ++ xmlstream pb ++ str "\"") in let ptyp = print_constr_env env typ in let pid = match na with | Anonymous -> mt () | Name id -> str " id=\"" ++ pr_id id ++ str "\"" in (str "") ;; let pr_context_xml env = let sign_env = fold_named_context (fun env d pp -> pp ++ pr_var_decl_xml env d) env ~init:(mt ()) in let db_env = fold_rel_context (fun env d pp -> pp ++ pr_rel_decl_xml env d) env ~init:(mt ()) in (sign_env ++ db_env) ;; let pr_subgoal_metas_xml metas env= let pr_one (meta, typ) = fnl () ++ str "" in List.fold_left (++) (mt ()) (List.map pr_one metas) ;; let pr_goal_xml sigma g = let env = try Goal.V82.unfiltered_env sigma g with _ -> empty_env in if Decl_mode.try_get_info sigma g = None then (hov 2 (str "" ++ fnl () ++ str "" ++ (pr_context_xml env)) ++ fnl () ++ str "") else (hov 2 (str "" ++ (pr_context_xml env)) ++ fnl () ++ str "") ;; let print_proof_xml () = Util.anomaly "Dump Tree command not supported in this version." VERNAC COMMAND EXTEND DumpTree [ "Dump" "Tree" ] -> [ print_proof_xml () ] END coq-8.4pl2/plugins/xml/xmlentries.ml40000640000175000001440000000261511422606552016727 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ Some fn ] | [ ] -> [ None ] END (* Print XML and Show XML *) VERNAC COMMAND EXTEND Xml | [ "Print" "XML" filename(fn) global(qid) ] -> [ Xmlcommand.print_ref qid fn ] | [ "Show" "XML" filename(fn) "Proof" ] -> [ Xmlcommand.show fn ] END coq-8.4pl2/plugins/xml/proof2aproof.ml0000640000175000001440000000571611453034060017065 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t | T.Cast (c1,k,c2) -> T.mkCast (aux c1, k, aux c2) | T.Prod (na,c1,c2) -> T.mkProd (na, aux c1, aux c2) | T.Lambda (na,t,c) -> T.mkLambda (na, aux t, aux c) | T.LetIn (na,b,t,c) -> T.mkLetIn (na, aux b, aux t, aux c) | T.App (c,l) -> let c' = aux c in let l' = Array.map aux l in (match T.kind_of_term c' with T.App (c'',l'') -> T.mkApp (c'', Array.append l'' l') | T.Cast (he,_,_) -> (match T.kind_of_term he with T.App (c'',l'') -> T.mkApp (c'', Array.append l'' l') | _ -> T.mkApp (c', l') ) | _ -> T.mkApp (c', l')) | T.Evar (e,l) when Evd.mem sigma e & Evd.is_defined sigma e -> aux (Evd.existential_value sigma (e,l)) | T.Evar (e,l) -> T.mkEvar (e, Array.map aux l) | T.Case (ci,p,c,bl) -> T.mkCase (ci, aux p, aux c, Array.map aux bl) | T.Fix (ln,(lna,tl,bl)) -> T.mkFix (ln,(lna,Array.map aux tl,Array.map aux bl)) | T.CoFix(ln,(lna,tl,bl)) -> T.mkCoFix (ln,(lna,Array.map aux tl,Array.map aux bl)) in aux ;; module ProofTreeHash = Hashtbl.Make (struct type t = Proof_type.proof_tree let equal = (==) let hash = Hashtbl.hash end) ;; let extract_open_proof sigma pf = (* Deactivated and candidate for removal. (Apr. 2010) *) () let extract_open_pftreestate pts = (* Deactivated and candidate for removal. (Apr. 2010) *) () coq-8.4pl2/plugins/xml/unshare.mli0000640000175000001440000000253311422606552016266 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool) -> 'a -> 'a coq-8.4pl2/plugins/xml/xml.ml40000640000175000001440000000615411422606552015337 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ] let xml_nempty name attrs content = [< 'NEmpty(name,attrs,content) >] let xml_cdata str = [< 'Str str >] (* Usage: *) (* pp tokens None pretty prints the output on stdout *) (* pp tokens (Some filename) pretty prints the output on the file filename *) let pp_ch strm channel = let rec pp_r m = parser [< 'Str a ; s >] -> print_spaces m ; fprint_string (a ^ "\n") ; pp_r m s | [< 'Empty(n,l) ; s >] -> print_spaces m ; fprint_string ("<" ^ n) ; List.iter (function (n,v) -> fprint_string (" " ^ n ^ "=\"" ^ v ^ "\"")) l; fprint_string "/>\n" ; pp_r m s | [< 'NEmpty(n,l,c) ; s >] -> print_spaces m ; fprint_string ("<" ^ n) ; List.iter (function (n,v) -> fprint_string (" " ^ n ^ "=\"" ^ v ^ "\"")) l; fprint_string ">\n" ; pp_r (m+1) c ; print_spaces m ; fprint_string ("\n") ; pp_r m s | [< >] -> () and print_spaces m = for i = 1 to m do fprint_string " " done and fprint_string str = output_string channel str in pp_r 0 strm ;; let pp strm fn = match fn with Some filename -> let filename = filename ^ ".xml" in let ch = open_out filename in pp_ch strm ch; close_out ch ; print_string ("\nWriting on file \"" ^ filename ^ "\" was successful\n"); flush stdout | None -> pp_ch strm stdout ;; coq-8.4pl2/plugins/xml/README0000640000175000001440000002630011160567762015004 0ustar notinusers(******************************************************************************) (* Copyright (C) 2000-2004, Claudio Sacerdoti Coen *) (* Project Helm (http://helm.cs.unibo.it) *) (* Project MoWGLI (http://mowgli.cs.unibo.it) *) (* *) (* Coq Exportation to XML *) (* *) (******************************************************************************) This module provides commands to export a piece of Coq library in XML format. Only the information relevant to proof-checking and proof-rendering is exported, i.e. only the CIC proof objects (lambda-terms). This document is tructured in the following way: 1. User documentation 1.1. New vernacular commands available 1.2. New coqc/coqtop flags and suggested usage 1.3. How to exploit the XML files 2. Technical informations 2.1. Inner-types 2.2. CIC with Explicit Named Substitutions 2.3. The CIC with Explicit Named Substitutions XML DTD ================================================================================ USER DOCUMENTATION ================================================================================ ======================================= 1.1. New vernacular commands available: ======================================= The new commands are: Print XML qualid. It prints in XML (to standard output) the object whose qualified name is qualid and its inner-types (see Sect. 2.1). The inner-types are always printed in their own XML file. If the object is a constant, its type and body are also printed as two distinct XML files. The object printed is always the most discharged form of the object (see the Section command of the Coq manual). Print XML File "filename" qualid. Similar to "Print XML qualid". The generated files are stored on the hard-disk using the base file name "filename". Show XML Proof. It prints in XML the current proof in progress. Its inner-types are also printed. Show XML File "filename" Proof. Similar to "Show XML Proof". The generated files are stored on the hard-disk using the base file name "filename". The verbosity of the previous commands is raised if the configuration parameter verbose of xmlcommand.ml is set to true at compile time. ============================================== 1.2. New coqc/coqtop flags and suggested usage ============================================== The following flag has been added to coqc and coqtop: -xml export XML files either to the hierarchy rooted in the directory $COQ_XML_LIBRARY_ROOT (if the environment variable is set) or to stdout (if unset) If the flag is set, every definition or declaration is immediately exported to XML. The XML files describe the user-provided non-discharged form of the definition or declaration. The coq_makefile utility has also been modified to easily allow XML exportation: make COQ_XML=-xml (or, equivalently, setting the environment variable COQ_XML) The suggested usage of the module is the following: 1. add to your own contribution a valid Make file and use coq_makefile to generate the Makefile from the Make file. *WARNING:* Since logical names are used to structure the XML hierarchy, always add to the Make file at least one "-R" option to map physical file names to logical module paths. See the Coq manual for further informations on the -R flag. 2. set $COQ_XML_LIBRARY_ROOT to the directory where the XML file hierarchy must be physically rooted. 3. compile your contribution with "make COQ_XML=-xml" ================================= 1.3. How to exploit the XML files ================================= Once the information is exported to XML, it becomes possible to implement services that are completely Coq-independent. Projects HELM and MoWGLI already provide rendering, searching and data mining functionalities. In particular, the standard library and contributions of Coq can be browsed and searched on the HELM web site: http://helm.cs.unibo.it/library.html If you want to publish your own contribution so that it is included in the HELM library, use the MoWGLI prototype upload form: http://mowgli.cs.unibo.it ================================================================================ TECHNICAL INFORMATIONS ================================================================================ ========================== 2.1. Inner-types ========================== In order to do proof-rendering (for example in natural language), some redundant typing information is required, i.e. the type of at least some of the subterms of the bodies and types. So, each new command described in section 1.1 print not only the object, but also another XML file in which you can find the type of all the subterms of the terms of the printed object which respect the following conditions: 1. It's sort is Prop or CProp (the "sort"-like definition used in CoRN to type computationally relevant predicative propositions). 2. It is not a cast or an atomic term, i.e. it's root is not a CAST, REL, VAR, MUTCONSTR or CONST. 3. If it's root is a LAMBDA, then the root's parent node is not a LAMBDA, i.e. only the type of the outer LAMBDA of a block of nested LAMBDAs is printed. The rationale for the 3rd condition is that the type of the inner LAMBDAs could be easily computed starting from the type of the outer LAMBDA; moreover, the types of the inner LAMBDAs requires a lot of disk/memory space: removing the 3rd condition leads to XML file that are two times as big as the ones exported appling the 3rd condition. ========================================== 2.2. CIC with Explicit Named Substitutions ========================================== The exported files are and XML encoding of the lambda-terms used by the Coq system. The implementative details of the Coq system are hidden as much as possible, so that the XML DTD is a straightforward encoding of the Calculus of (Co)Inductive Constructions. Nevertheless, there is a feature of the Coq system that can not be hidden in a completely satisfactory way: discharging. In Coq it is possible to open a section, declare variables and use them in the rest of the section as if they were axiom declarations. Once the section is closed, every definition and theorem in the section is discharged by abstracting it over the section variables. Variable declarations as well as section declarations are entirely dropped. Since we are interested in an XML encoding of definitions and theorems as close as possible to those directly provided the user, we do not want to export discharged forms. Exporting non-discharged theorem and definitions together with theorems that rely on the discharged forms obliges the tools that work on the XML encoding to implement discharging to achieve logical consistency. Moreover, the rendering of the files can be misleading, since hyperlinks can be shown between occurrences of the discharge form of a definition and the non-discharged definition, that are different objects. To overcome the previous limitations, Claudio Sacerdoti Coen developed in his PhD. thesis an extension of CIC, called Calculus of (Co)Inductive Constructions with Explicit Named Substitutions, that is a slight extension of CIC where discharging is not necessary. The DTD of the exported XML files describes constants, inductive types and variables of the Calculus of (Co)Inductive Constructions with Explicit Named Substitions. The conversion to the new calculus is performed during the exportation phase. The following example shows a very small Coq development together with its version in CIC with Explicit Named Substitutions. # CIC version: # Section S. Variable A : Prop. Definition impl := A -> A. Theorem t : impl. (* uses the undischarged form of impl *) Proof. exact (fun (a:A) => a). Qed. End S. Theorem t' : (impl False). (* uses the discharged form of impl *) Proof. exact (t False). (* uses the discharged form of t *) Qed. # Corresponding CIC with Explicit Named Substitutions version: # Section S. Variable A : Prop. Definition impl(A) := A -> A. (* theorems and definitions are explicitly abstracted over the variables. The name is sufficient to completely describe the abstraction *) Theorem t(A) : impl. (* impl where A is not instantiated *) Proof. exact (fun (a:A) => a). Qed. End S. Theorem t'() : impl{False/A}. (* impl where A is instantiated with False Notice that t' does not depend on A *) Proof. exact t{False/A}. (* t where A is instantiated with False *) Qed. Further details on the typing and reduction rules of the calculus can be found in Claudio Sacerdoti Coen PhD. dissertation, where the consistency of the calculus is also proved. ====================================================== 2.3. The CIC with Explicit Named Substitutions XML DTD ====================================================== A copy of the DTD can be found in the file "cic.dtd". is the root element of the files that correspond to constant types. is the root element of the files that correspond to constant bodies. It is used only for closed definitions and theorems (i.e. when no metavariable occurs in the body or type of the constant) is the root element of the file that correspond to the body of a constant that depends on metavariables (e.g. unfinished proofs) is the root element of the files that correspond to variables is the root element of the files that correspond to blocks of mutually defined inductive definitions The elements ,,,,,,,, ,, ,,,, and are used to encode the constructors of CIC. The sort or type attribute of the element, if present, is respectively the sort or the type of the term, that is a sort because of the typing rules of CIC. The element correspond to the application of an explicit named substitution to its first argument, that is a reference to a definition or declaration in the environment. All the other elements are just syntactic sugar. coq-8.4pl2/plugins/xml/acic2Xml.ml40000640000175000001440000003432611422606552016203 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Util.anomaly "find_last_id: empty list" | [id,_,_] -> id | _::tl -> find_last_id tl ;; let export_existential = string_of_int let print_term ids_to_inner_sorts = let rec aux = let module A = Acic in let module N = Names in let module X = Xml in function A.ARel (id,n,idref,b) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_empty "REL" ["value",(string_of_int n) ; "binder",(N.string_of_id b) ; "id",id ; "idref",idref; "sort",sort] | A.AVar (id,uri) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_empty "VAR" ["uri", uri ; "id",id ; "sort",sort] | A.AEvar (id,n,l) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "META" ["no",(export_existential n) ; "id",id ; "sort",sort] (List.fold_left (fun i t -> [< i ; X.xml_nempty "substitution" [] (aux t) >] ) [< >] (List.rev l)) | A.ASort (id,s) -> let string_of_sort = match Term.family_of_sort s with Term.InProp -> "Prop" | Term.InSet -> "Set" | Term.InType -> "Type" in X.xml_empty "SORT" ["value",string_of_sort ; "id",id] | A.AProds (prods,t) -> let last_id = find_last_id prods in let sort = Hashtbl.find ids_to_inner_sorts last_id in X.xml_nempty "PROD" ["type",sort] [< List.fold_left (fun i (id,binder,s) -> let sort = Hashtbl.find ids_to_inner_sorts (Cic2acic.source_id_of_id id) in let attrs = ("id",id)::("type",sort):: match binder with Names.Anonymous -> [] | Names.Name b -> ["binder",Names.string_of_id b] in [< X.xml_nempty "decl" attrs (aux s) ; i >] ) [< >] prods ; X.xml_nempty "target" [] (aux t) >] | A.ACast (id,v,t) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "CAST" ["id",id ; "sort",sort] [< X.xml_nempty "term" [] (aux v) ; X.xml_nempty "type" [] (aux t) >] | A.ALambdas (lambdas,t) -> let last_id = find_last_id lambdas in let sort = Hashtbl.find ids_to_inner_sorts last_id in X.xml_nempty "LAMBDA" ["sort",sort] [< List.fold_left (fun i (id,binder,s) -> let sort = Hashtbl.find ids_to_inner_sorts (Cic2acic.source_id_of_id id) in let attrs = ("id",id)::("type",sort):: match binder with Names.Anonymous -> [] | Names.Name b -> ["binder",Names.string_of_id b] in [< X.xml_nempty "decl" attrs (aux s) ; i >] ) [< >] lambdas ; X.xml_nempty "target" [] (aux t) >] | A.ALetIns (letins,t) -> let last_id = find_last_id letins in let sort = Hashtbl.find ids_to_inner_sorts last_id in X.xml_nempty "LETIN" ["sort",sort] [< List.fold_left (fun i (id,binder,s) -> let sort = Hashtbl.find ids_to_inner_sorts (Cic2acic.source_id_of_id id) in let attrs = ("id",id)::("sort",sort):: match binder with Names.Anonymous -> assert false | Names.Name b -> ["binder",Names.string_of_id b] in [< X.xml_nempty "def" attrs (aux s) ; i >] ) [< >] letins ; X.xml_nempty "target" [] (aux t) >] | A.AApp (id,li) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "APPLY" ["id",id ; "sort",sort] [< (List.fold_left (fun i x -> [< i ; (aux x) >]) [<>] li) >] | A.AConst (id,subst,uri) -> let sort = Hashtbl.find ids_to_inner_sorts id in let attrs = ["uri", uri ; "id",id ; "sort",sort] in aux_subst (X.xml_empty "CONST" attrs) subst | A.AInd (id,subst,uri,i) -> let attrs = ["uri", uri ; "noType",(string_of_int i) ; "id",id] in aux_subst (X.xml_empty "MUTIND" attrs) subst | A.AConstruct (id,subst,uri,i,j) -> let sort = Hashtbl.find ids_to_inner_sorts id in let attrs = ["uri", uri ; "noType",(string_of_int i) ; "noConstr",(string_of_int j) ; "id",id ; "sort",sort] in aux_subst (X.xml_empty "MUTCONSTRUCT" attrs) subst | A.ACase (id,uri,typeno,ty,te,patterns) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "MUTCASE" ["uriType", uri ; "noType", (string_of_int typeno) ; "id", id ; "sort",sort] [< X.xml_nempty "patternsType" [] [< (aux ty) >] ; X.xml_nempty "inductiveTerm" [] [< (aux te) >] ; List.fold_left (fun i x -> [< i ; X.xml_nempty "pattern" [] [< aux x >] >]) [<>] patterns >] | A.AFix (id, no, funs) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "FIX" ["noFun", (string_of_int no) ; "id",id ; "sort",sort] [< List.fold_left (fun i (id,fi,ai,ti,bi) -> [< i ; X.xml_nempty "FixFunction" ["id",id ; "name", (Names.string_of_id fi) ; "recIndex", (string_of_int ai)] [< X.xml_nempty "type" [] [< aux ti >] ; X.xml_nempty "body" [] [< aux bi >] >] >] ) [<>] funs >] | A.ACoFix (id,no,funs) -> let sort = Hashtbl.find ids_to_inner_sorts id in X.xml_nempty "COFIX" ["noFun", (string_of_int no) ; "id",id ; "sort",sort] [< List.fold_left (fun i (id,fi,ti,bi) -> [< i ; X.xml_nempty "CofixFunction" ["id",id ; "name", Names.string_of_id fi] [< X.xml_nempty "type" [] [< aux ti >] ; X.xml_nempty "body" [] [< aux bi >] >] >] ) [<>] funs >] and aux_subst target (id,subst) = if subst = [] then target else Xml.xml_nempty "instantiate" (match id with None -> [] | Some id -> ["id",id]) [< target ; List.fold_left (fun i (uri,arg) -> [< i ; Xml.xml_nempty "arg" ["relUri", uri] (aux arg) >] ) [<>] subst >] in aux ;; let param_attribute_of_params params = List.fold_right (fun (path,l) i -> List.fold_right (fun x i ->path ^ "/" ^ x ^ ".var" ^ match i with "" -> "" | i' -> " " ^ i' ) l "" ^ match i with "" -> "" | i' -> " " ^ i' ) params "" ;; let print_object uri ids_to_inner_sorts = let rec aux = let module A = Acic in let module X = Xml in function A.ACurrentProof (id,n,conjectures,bo,ty) -> let xml_for_current_proof_body = (*CSC: Should the CurrentProof also have the list of variables it depends on? *) (*CSC: I think so. Not implemented yet. *) X.xml_nempty "CurrentProof" ["of",uri ; "id", id] [< List.fold_left (fun i (cid,n,canonical_context,t) -> [< i ; X.xml_nempty "Conjecture" ["id", cid ; "no",export_existential n] [< List.fold_left (fun i (hid,t) -> [< (match t with n,A.Decl t -> X.xml_nempty "Decl" ["id",hid;"name",Names.string_of_id n] (print_term ids_to_inner_sorts t) | n,A.Def (t,_) -> X.xml_nempty "Def" ["id",hid;"name",Names.string_of_id n] (print_term ids_to_inner_sorts t) ) ; i >] ) [< >] canonical_context ; X.xml_nempty "Goal" [] (print_term ids_to_inner_sorts t) >] >]) [<>] (List.rev conjectures) ; X.xml_nempty "body" [] (print_term ids_to_inner_sorts bo) >] in let xml_for_current_proof_type = X.xml_nempty "ConstantType" ["name",n ; "id", id] (print_term ids_to_inner_sorts ty) in let xmlbo = [< X.xml_cdata "\n" ; X.xml_cdata ("\n"); xml_for_current_proof_body >] in let xmlty = [< X.xml_cdata "\n" ; X.xml_cdata ("\n"); xml_for_current_proof_type >] in xmlty, Some xmlbo | A.AConstant (id,n,bo,ty,params) -> let params' = param_attribute_of_params params in let xmlbo = match bo with None -> None | Some bo -> Some [< X.xml_cdata "\n" ; X.xml_cdata ("\n") ; X.xml_nempty "ConstantBody" ["for",uri ; "params",params' ; "id", id] [< print_term ids_to_inner_sorts bo >] >] in let xmlty = [< X.xml_cdata "\n" ; X.xml_cdata ("\n"); X.xml_nempty "ConstantType" ["name",n ; "params",params' ; "id", id] [< print_term ids_to_inner_sorts ty >] >] in xmlty, xmlbo | A.AVariable (id,n,bo,ty,params) -> let params' = param_attribute_of_params params in [< X.xml_cdata "\n" ; X.xml_cdata ("\n") ; X.xml_nempty "Variable" ["name",n ; "params",params' ; "id", id] [< (match bo with None -> [<>] | Some bo -> X.xml_nempty "body" [] (print_term ids_to_inner_sorts bo) ) ; X.xml_nempty "type" [] (print_term ids_to_inner_sorts ty) >] >], None | A.AInductiveDefinition (id,tys,params,nparams) -> let params' = param_attribute_of_params params in [< X.xml_cdata "\n" ; X.xml_cdata ("\n") ; X.xml_nempty "InductiveDefinition" ["noParams",string_of_int nparams ; "id",id ; "params",params'] [< (List.fold_left (fun i (id,typename,finite,arity,cons) -> [< i ; X.xml_nempty "InductiveType" ["id",id ; "name",Names.string_of_id typename ; "inductive",(string_of_bool finite) ] [< X.xml_nempty "arity" [] (print_term ids_to_inner_sorts arity) ; (List.fold_left (fun i (name,lc) -> [< i ; X.xml_nempty "Constructor" ["name",Names.string_of_id name] (print_term ids_to_inner_sorts lc) >]) [<>] cons ) >] >] ) [< >] tys ) >] >], None in aux ;; let print_inner_types curi ids_to_inner_sorts ids_to_inner_types = let module C2A = Cic2acic in let module X = Xml in [< X.xml_cdata "\n" ; X.xml_cdata ("\n"); X.xml_nempty "InnerTypes" ["of",curi] (Hashtbl.fold (fun id {C2A.annsynthesized = synty ; C2A.annexpected = expty} x -> [< x ; X.xml_nempty "TYPE" ["of",id] [< X.xml_nempty "synthesized" [] (print_term ids_to_inner_sorts synty) ; match expty with None -> [<>] | Some expty' -> X.xml_nempty "expected" [] (print_term ids_to_inner_sorts expty') >] >] ) ids_to_inner_types [<>] ) >] ;; coq-8.4pl2/plugins/xml/doubleTypeInference.ml0000640000175000001440000002511412121620060020366 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Some {Environ.utj_val = j.Environ.uj_val; Environ.utj_type = s } | _ -> None (* None means the CProp constant *) ;; let double_type_of env sigma cstr expectedty subterms_to_types = (*CSC: the code is inefficient because judgments are created just to be *) (*CSC: destroyed using Environ.j_type. Moreover I am pretty sure that the *) (*CSC: functions used do checks that we do not need *) let rec execute env sigma cstr expectedty = let module T = Term in let module E = Environ in (* the type part is the synthesized type *) let judgement = match T.kind_of_term cstr with T.Meta n -> Util.error "DoubleTypeInference.double_type_of: found a non-instanciated goal" | T.Evar ((n,l) as ev) -> let ty = Unshare.unshare (Evd.existential_type sigma ev) in let jty = execute env sigma ty None in let jty = assumption_of_judgment env sigma jty in let evar_context = E.named_context_of_val (Evd.find sigma n).Evd.evar_hyps in let rec iter actual_args evar_context = match actual_args,evar_context with [],[] -> () | he1::tl1,(n,_,ty)::tl2 -> (* for side-effects *) let _ = execute env sigma he1 (Some ty) in let tl2' = List.map (function (m,bo,ty) -> (* Warning: the substitution should be performed also on bo *) (* This is not done since bo is not used later yet *) (m,bo,Unshare.unshare (T.replace_vars [n,he1] ty)) ) tl2 in iter tl1 tl2' | _,_ -> assert false in (* for side effects only *) iter (List.rev (Array.to_list l)) (List.rev evar_context) ; E.make_judge cstr jty | T.Rel n -> Typeops.judge_of_relative env n | T.Var id -> Typeops.judge_of_variable env id | T.Const c -> E.make_judge cstr (Typeops.type_of_constant env c) | T.Ind ind -> E.make_judge cstr (Inductiveops.type_of_inductive env ind) | T.Construct cstruct -> E.make_judge cstr (Inductiveops.type_of_constructor env cstruct) | T.Case (ci,p,c,lf) -> let expectedtype = Reduction.whd_betadeltaiota env (Retyping.get_type_of env sigma c) in let cj = execute env sigma c (Some expectedtype) in let pj = execute env sigma p None in let (expectedtypes,_,_) = let indspec = Inductive.find_rectype env cj.Environ.uj_type in Inductive.type_case_branches env indspec pj cj.Environ.uj_val in let lfj = execute_array env sigma lf (Array.map (function x -> Some x) expectedtypes) in let (j,_) = Typeops.judge_of_case env ci pj cj lfj in j | T.Fix ((vn,i as vni),recdef) -> let (_,tys,_ as recdef') = execute_recdef env sigma recdef in let fix = (vni,recdef') in E.make_judge (T.mkFix fix) tys.(i) | T.CoFix (i,recdef) -> let (_,tys,_ as recdef') = execute_recdef env sigma recdef in let cofix = (i,recdef') in E.make_judge (T.mkCoFix cofix) tys.(i) | T.Sort (T.Prop c) -> Typeops.judge_of_prop_contents c | T.Sort (T.Type u) -> (*CSC: In case of need, I refresh the universe. But exportation of the *) (*CSC: right universe level information is destroyed. It must be changed *) (*CSC: again once Judicael will introduce his non-bugged algebraic *) (*CSC: universes. *) (try Typeops.judge_of_type u with e when e <> Sys.Break -> (* Successor of a non universe-variable universe anomaly *) (Pp.ppnl (Pp.str "Warning: universe refresh performed!!!") ; flush stdout ) ; Typeops.judge_of_type (Termops.new_univ ()) ) | T.App (f,args) -> let expected_head = Reduction.whd_betadeltaiota env (Retyping.get_type_of env sigma f) in let j = execute env sigma f (Some expected_head) in let expected_args = let rec aux typ = function [] -> [] | hj::restjl -> match T.kind_of_term (Reduction.whd_betadeltaiota env typ) with T.Prod (_,c1,c2) -> (Some (Reductionops.nf_beta sigma c1)) :: (aux (T.subst1 hj c2) restjl) | _ -> assert false in Array.of_list (aux j.Environ.uj_type (Array.to_list args)) in let jl = execute_array env sigma args expected_args in let (j,_) = Typeops.judge_of_apply env j jl in j | T.Lambda (name,c1,c2) -> let j = execute env sigma c1 None in let var = type_judgment env sigma j in let env1 = E.push_rel (name,None,var.E.utj_val) env in let expectedc2type = match expectedty with None -> None | Some ety -> match T.kind_of_term (Reduction.whd_betadeltaiota env ety) with T.Prod (_,_,expected_target_type) -> Some (Reductionops.nf_beta sigma expected_target_type) | _ -> assert false in let j' = execute env1 sigma c2 expectedc2type in Typeops.judge_of_abstraction env1 name var j' | T.Prod (name,c1,c2) -> let j = execute env sigma c1 None in let varj = type_judgment env sigma j in let env1 = E.push_rel (name,None,varj.E.utj_val) env in let j' = execute env1 sigma c2 None in (match type_judgment_cprop env1 sigma j' with Some varj' -> Typeops.judge_of_product env name varj varj' | None -> (* CProp found *) { Environ.uj_val = T.mkProd (name, j.Environ.uj_val, j'.Environ.uj_val); Environ.uj_type = T.mkConst cprop }) | T.LetIn (name,c1,c2,c3) -> (*CSC: What are the right expected types for the source and *) (*CSC: target of a LetIn? None used. *) let j1 = execute env sigma c1 None in let j2 = execute env sigma c2 None in let j2 = type_judgment env sigma j2 in let env1 = E.push_rel (name,Some j1.E.uj_val,j2.E.utj_val) env in let j3 = execute env1 sigma c3 None in Typeops.judge_of_letin env name j1 j2 j3 | T.Cast (c,k,t) -> let cj = execute env sigma c (Some (Reductionops.nf_beta sigma t)) in let tj = execute env sigma t None in let tj = type_judgment env sigma tj in let j, _ = Typeops.judge_of_cast env cj k tj in j in let synthesized = E.j_type judgement in let synthesized' = Reductionops.nf_beta sigma synthesized in let types,res = match expectedty with None -> (* No expected type *) {synthesized = synthesized' ; expected = None}, synthesized | Some ty when Term.eq_constr synthesized' ty -> (* The expected type is synthactically equal to the *) (* synthesized type. Let's forget it. *) (* Note: since eq_constr is up to casts, it is better *) (* to keep the expected type, since it can bears casts *) (* that change the innersort to CProp *) {synthesized = ty ; expected = None}, ty | Some expectedty' -> {synthesized = synthesized' ; expected = Some expectedty'}, expectedty' in (*CSC: debugging stuff to be removed *) if Acic.CicHash.mem subterms_to_types cstr then (Pp.ppnl (Pp.(++) (Pp.str "DUPLICATE INSERTION: ") (Printer.pr_lconstr cstr)) ; flush stdout ) ; Acic.CicHash.add subterms_to_types cstr types ; E.make_judge cstr res and execute_recdef env sigma (names,lar,vdef) = let length = Array.length lar in let larj = execute_array env sigma lar (Array.make length None) in let lara = Array.map (assumption_of_judgment env sigma) larj in let env1 = Environ.push_rec_types (names,lara,vdef) env in let expectedtypes = Array.map (function i -> Some (Term.lift length i)) lar in let vdefj = execute_array env1 sigma vdef expectedtypes in let vdefv = Array.map Environ.j_val vdefj in (names,lara,vdefv) and execute_array env sigma v expectedtypes = let jl = execute_list env sigma (Array.to_list v) (Array.to_list expectedtypes) in Array.of_list jl and execute_list env sigma = List.map2 (execute env sigma) in ignore (execute env sigma cstr expectedty) ;; coq-8.4pl2/plugins/xml/proofTree2Xml.ml40000640000175000001440000001717111661304170017244 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* not (List.mem n real_named_context)) named_context in let idrefs = List.map (function x,_,_ -> idref_of_id (Names.string_of_id x)) named_context' in let rel_context = Sign.push_named_to_rel_context named_context' [] in let rel_env = Environ.push_rel_context rel_context (Environ.reset_with_named_context (Environ.val_of_named_context real_named_context) env) in let obj' = Term.subst_vars (List.map (function (i,_,_) -> i) named_context') obj in let seed = ref 0 in try let annobj = Cic2acic.acic_of_cic_context' false seed ids_to_terms constr_to_ids ids_to_father_ids ids_to_inner_sorts ids_to_inner_types rel_env idrefs sigma (Unshare.unshare obj') None in Acic2Xml.print_term ids_to_inner_sorts annobj with e -> Util.anomaly ("Problem during the conversion of constr into XML: " ^ Printexc.to_string e) (* CSC: debugging stuff Pp.ppnl (Pp.str "Problem during the conversion of constr into XML") ; Pp.ppnl (Pp.str "ENVIRONMENT:") ; Pp.ppnl (Printer.pr_context_of rel_env) ; Pp.ppnl (Pp.str "TERM:") ; Pp.ppnl (Printer.pr_lconstr_env rel_env obj') ; Pp.ppnl (Pp.str "RAW-TERM:") ; Pp.ppnl (Printer.pr_lconstr obj') ; Xml.xml_empty "MISSING TERM" [] (*; raise e*) *) ;; let first_word s = try let i = String.index s ' ' in String.sub s 0 i with _ -> s ;; let string_of_prim_rule x = match x with | Proof_type.Intro _-> "Intro" | Proof_type.Cut _ -> "Cut" | Proof_type.FixRule _ -> "FixRule" | Proof_type.Cofix _ -> "Cofix" | Proof_type.Refine _ -> "Refine" | Proof_type.Convert_concl _ -> "Convert_concl" | Proof_type.Convert_hyp _->"Convert_hyp" | Proof_type.Thin _ -> "Thin" | Proof_type.ThinBody _-> "ThinBody" | Proof_type.Move (_,_,_) -> "Move" | Proof_type.Order _ -> "Order" | Proof_type.Rename (_,_) -> "Rename" | Proof_type.Change_evars -> "Change_evars" let print_proof_tree curi sigma pf proof_tree_to_constr proof_tree_to_flattened_proof_tree constr_to_ids = let module PT = Proof_type in let module L = Logic in let module X = Xml in let module T = Tacexpr in let ids_of_node node = let constr = Proof2aproof.ProofTreeHash.find proof_tree_to_constr node in (* let constr = try Proof2aproof.ProofTreeHash.find proof_tree_to_constr node with _ -> Pp.ppnl (Pp.(++) (Pp.str "Node of the proof-tree that generated no lambda-term: ") (Refiner.print_script true (Evd.empty) (Global.named_context ()) node)) ; assert false (* Closed bug, should not happen any more *) in *) try Some (Acic.CicHash.find constr_to_ids constr) with _ -> Pp.ppnl (Pp.(++) (Pp.str "The_generated_term_is_not_a_subterm_of_the_final_lambda_term") (Printer.pr_lconstr constr)) ; None in let rec aux node old_hyps = let of_attribute = match ids_of_node node with None -> [] | Some id -> ["of",id] in match node with {PT.ref=Some(PT.Prim tactic_expr,nodes)} -> let tac = string_of_prim_rule tactic_expr in let of_attribute = ("name",tac)::of_attribute in if nodes = [] then X.xml_empty "Prim" of_attribute else X.xml_nempty "Prim" of_attribute (List.fold_left (fun i n -> [< i ; (aux n old_hyps) >]) [<>] nodes) | {PT.goal=goal; PT.ref=Some(PT.Nested (PT.Tactic(tactic_expr,_),hidden_proof),nodes)} -> (* [hidden_proof] is the proof of the tactic; *) (* [nodes] are the proof of the subgoals generated by the tactic; *) (* [flat_proof] if the proof-tree obtained substituting [nodes] *) (* for the holes in [hidden_proof] *) let flat_proof = Proof2aproof.ProofTreeHash.find proof_tree_to_flattened_proof_tree node in begin match tactic_expr with | T.TacArg (_,T.Tacexp _) -> (* We don't need to keep the level of abstraction introduced at *) (* user-level invocation of tactic... (see Tacinterp.hide_interp)*) aux flat_proof old_hyps | _ -> (****** la tactique employee *) let prtac = Pptactic.pr_tactic (Global.env()) in let tac = Pp.string_of_ppcmds (prtac tactic_expr) in let tacname= first_word tac in let of_attribute = ("name",tacname)::("script",tac)::of_attribute in (****** le but *) let concl = Goal.V82.concl sigma goal in let hyps = Goal.V82.hyps sigma goal in let env = Global.env_of_context hyps in let xgoal = X.xml_nempty "Goal" [] (constr_to_xml concl sigma env) in let rec build_hyps = function | [] -> xgoal | (id,c,tid)::hyps1 -> let id' = Names.string_of_id id in [< build_hyps hyps1; (X.xml_nempty "Hypothesis" ["id",idref_of_id id' ; "name",id'] (constr_to_xml tid sigma env)) >] in let old_names = List.map (fun (id,c,tid)->id) old_hyps in let nhyps = Environ.named_context_of_val hyps in let new_hyps = List.filter (fun (id,c,tid)-> not (List.mem id old_names)) nhyps in X.xml_nempty "Tactic" of_attribute [<(build_hyps new_hyps) ; (aux flat_proof nhyps)>] end | {PT.ref=Some(PT.Daimon,_)} -> X.xml_empty "Hidden_open_goal" of_attribute | {PT.ref=None;PT.goal=goal} -> X.xml_empty "Open_goal" of_attribute | {PT.ref=Some(PT.Decl_proof _, _)} -> failwith "TODO: xml and decl_proof" in [< X.xml_cdata "\n" ; X.xml_cdata ("\n\n"); X.xml_nempty "ProofTree" ["of",curi] (aux pf []) >] ;; (* Hook registration *) (* CSC: debranched since it is bugged Xmlcommand.set_print_proof_tree print_proof_tree;; *) coq-8.4pl2/plugins/xml/xmlcommand.ml0000640000175000001440000006122612121620060016576 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* None) in (fun () -> !print_proof_tree), (fun f -> print_proof_tree := fun curi sigma0 pf proof_tree_to_constr proof_tree_to_flattened_proof_tree constr_to_ids -> Some (f curi sigma0 pf proof_tree_to_constr proof_tree_to_flattened_proof_tree constr_to_ids)) ;; (* UTILITY FUNCTIONS *) let print_if_verbose s = if !verbose then print_string s;; (* Next exception is used only inside print_coq_object and tag_of_string_tag *) exception Uninteresting;; (* NOT USED anymore, we back to the V6 point of view with global parameters (* Internally, for Coq V7, params of inductive types are associated *) (* not to the whole block of mutual inductive (as it was in V6) but to *) (* each member of the block; but externally, all params are required *) (* to be the same; the following function checks that the parameters *) (* of each inductive of a same block are all the same, then returns *) (* this number; it fails otherwise *) let extract_nparams pack = let module D = Declarations in let module U = Util in let module S = Sign in let {D.mind_nparams=nparams0} = pack.(0) in let arity0 = pack.(0).D.mind_user_arity in let params0, _ = S.decompose_prod_n_assum nparams0 arity0 in for i = 1 to Array.length pack - 1 do let {D.mind_nparams=nparamsi} = pack.(i) in let arityi = pack.(i).D.mind_user_arity in let paramsi, _ = S.decompose_prod_n_assum nparamsi arityi in if params0 <> paramsi then U.error "Cannot convert a block of inductive definitions with parameters specific to each inductive to a block of mutual inductive definitions with parameters global to the whole block" done; nparams0 *) (* could_have_namesakes sp = true iff o is an object that could be cooked and *) (* than that could exists in cooked form with the same name in a super *) (* section of the actual section *) let could_have_namesakes o sp = (* namesake = omonimo in italian *) let module DK = Decl_kinds in let module D = Declare in let tag = Libobject.object_tag o in print_if_verbose ("Object tag: " ^ tag ^ "\n") ; match tag with "CONSTANT" -> true (* constants/parameters are non global *) | "INDUCTIVE" -> true (* mutual inductive types are never local *) | "VARIABLE" -> false (* variables are local, so no namesakes *) | _ -> false (* uninteresting thing that won't be printed*) ;; (* filter_params pvars hyps *) (* filters out from pvars (which is a list of lists) all the variables *) (* that does not belong to hyps (which is a simple list) *) (* It returns a list of couples relative section path -- list of *) (* variable names. *) let filter_params pvars hyps = let rec aux ids = function [] -> [] | (id,he)::tl -> let ids' = id::ids in let ids'' = "cic:/" ^ String.concat "/" (List.rev (List.map Names.string_of_id ids')) in let he' = ids'', List.rev (List.filter (function x -> List.mem x hyps) he) in let tl' = aux ids' tl in match he' with _,[] -> tl' | _,_ -> he'::tl' in let cwd = Lib.cwd () in let cwdsp = Libnames.make_path cwd (Names.id_of_string "dummy") in let modulepath = Cic2acic.get_module_path_of_full_path cwdsp in aux (Names.repr_dirpath modulepath) (List.rev pvars) ;; type variables_type = Definition of string * Term.constr * Term.types | Assumption of string * Term.constr ;; (* The computation is very inefficient, but we can't do anything *) (* better unless this function is reimplemented in the Declare *) (* module. *) let search_variables () = let module N = Names in let cwd = Lib.cwd () in let cwdsp = Libnames.make_path cwd (Names.id_of_string "dummy") in let modulepath = Cic2acic.get_module_path_of_full_path cwdsp in let rec aux = function [] -> [] | he::tl as modules -> let one_section_variables = let dirpath = N.make_dirpath (modules @ N.repr_dirpath modulepath) in let t = List.map N.string_of_id (Decls.last_section_hyps dirpath) in [he,t] in one_section_variables @ aux tl in aux (Cic2acic.remove_module_dirpath_from_dirpath ~basedir:modulepath cwd) ;; (* FUNCTIONS TO PRINT A SINGLE OBJECT OF COQ *) let rec join_dirs cwd = function [] -> cwd | he::tail -> (try Unix.mkdir cwd 0o775 with e when e <> Sys.Break -> () (* Let's ignore the errors on mkdir *) ) ; let newcwd = cwd ^ "/" ^ he in join_dirs newcwd tail ;; let filename_of_path xml_library_root tag = let module N = Names in match xml_library_root with None -> None (* stdout *) | Some xml_library_root' -> let tokens = Cic2acic.token_list_of_kernel_name tag in Some (join_dirs xml_library_root' tokens) ;; let body_filename_of_filename = function Some f -> Some (f ^ ".body") | None -> None ;; let types_filename_of_filename = function Some f -> Some (f ^ ".types") | None -> None ;; let prooftree_filename_of_filename = function Some f -> Some (f ^ ".proof_tree") | None -> None ;; let theory_filename xml_library_root = let module N = Names in match xml_library_root with None -> None (* stdout *) | Some xml_library_root' -> let toks = List.map N.string_of_id (N.repr_dirpath (Lib.library_dp ())) in (* theory from A/B/C/F.v goes into A/B/C/F.theory *) let alltoks = List.rev toks in Some (join_dirs xml_library_root' alltoks ^ ".theory") let print_object uri obj sigma proof_tree_infos filename = (* function to pretty print and compress an XML file *) (*CSC: Unix.system "gzip ..." is an horrible non-portable solution. *) let pp xml filename = Xml.pp xml filename ; match filename with None -> () | Some fn -> let fn' = let rec escape s n = try let p = String.index_from s n '\'' in String.sub s n (p - n) ^ "\\'" ^ escape s (p+1) with Not_found -> String.sub s n (String.length s - n) in escape fn 0 in ignore (Unix.system ("gzip " ^ fn' ^ ".xml")) in let (annobj,_,constr_to_ids,_,ids_to_inner_sorts,ids_to_inner_types,_,_) = Cic2acic.acic_object_of_cic_object sigma obj in let (xml, xml') = Acic2Xml.print_object uri ids_to_inner_sorts annobj in let xmltypes = Acic2Xml.print_inner_types uri ids_to_inner_sorts ids_to_inner_types in pp xml filename ; begin match xml' with None -> () | Some xml' -> pp xml' (body_filename_of_filename filename) end ; pp xmltypes (types_filename_of_filename filename) ; match proof_tree_infos with None -> () | Some (sigma0,proof_tree,proof_tree_to_constr, proof_tree_to_flattened_proof_tree) -> let xmlprooftree = print_proof_tree () uri sigma0 proof_tree proof_tree_to_constr proof_tree_to_flattened_proof_tree constr_to_ids in match xmlprooftree with None -> () | Some xmlprooftree -> pp xmlprooftree (prooftree_filename_of_filename filename) ;; let string_list_of_named_context_list = List.map (function (n,_,_) -> Names.string_of_id n) ;; (* Function to collect the variables that occur in a term. *) (* Used only for variables (since for constants and mutual *) (* inductive types this information is already available. *) let find_hyps t = let module T = Term in let rec aux l t = match T.kind_of_term t with T.Var id when not (List.mem id l) -> let (_,bo,ty) = Global.lookup_named id in let boids = match bo with Some bo' -> aux l bo' | None -> l in id::(aux boids ty) | T.Var _ | T.Rel _ | T.Meta _ | T.Evar _ | T.Sort _ -> l | T.Cast (te,_, ty) -> aux (aux l te) ty | T.Prod (_,s,t) -> aux (aux l s) t | T.Lambda (_,s,t) -> aux (aux l s) t | T.LetIn (_,s,_,t) -> aux (aux l s) t | T.App (he,tl) -> Array.fold_left (fun i x -> aux i x) (aux l he) tl | T.Const con -> let hyps = (Global.lookup_constant con).Declarations.const_hyps in map_and_filter l hyps @ l | T.Ind ind | T.Construct (ind,_) -> let hyps = (fst (Global.lookup_inductive ind)).Declarations.mind_hyps in map_and_filter l hyps @ l | T.Case (_,t1,t2,b) -> Array.fold_left (fun i x -> aux i x) (aux (aux l t1) t2) b | T.Fix (_,(_,tys,bodies)) | T.CoFix (_,(_,tys,bodies)) -> let r = Array.fold_left (fun i x -> aux i x) l tys in Array.fold_left (fun i x -> aux i x) r bodies and map_and_filter l = function [] -> [] | (n,_,_)::tl when not (List.mem n l) -> n::(map_and_filter l tl) | _::tl -> map_and_filter l tl in aux [] t ;; (* Functions to construct an object *) let mk_variable_obj id body typ = let hyps,unsharedbody = match body with None -> [],None | Some bo -> find_hyps bo, Some (Unshare.unshare bo) in let hyps' = find_hyps typ @ hyps in let hyps'' = List.map Names.string_of_id hyps' in let variables = search_variables () in let params = filter_params variables hyps'' in Acic.Variable (Names.string_of_id id, unsharedbody, Unshare.unshare typ, params) ;; (* Unsharing is not performed on the body, that must be already unshared. *) (* The evar map and the type, instead, are unshared by this function. *) let mk_current_proof_obj is_a_variable id bo ty evar_map env = let unshared_ty = Unshare.unshare ty in let metasenv = List.map (function (n, {Evd.evar_concl = evar_concl ; Evd.evar_hyps = evar_hyps} ) -> (* We map the named context to a rel context and every Var to a Rel *) let final_var_ids,context = let rec aux var_ids = function [] -> var_ids,[] | (n,None,t)::tl -> let final_var_ids,tl' = aux (n::var_ids) tl in let t' = Term.subst_vars var_ids t in final_var_ids,(n, Acic.Decl (Unshare.unshare t'))::tl' | (n,Some b,t)::tl -> let final_var_ids,tl' = aux (n::var_ids) tl in let b' = Term.subst_vars var_ids b in (* t will not be exported to XML. Thus no unsharing performed *) final_var_ids,(n, Acic.Def (Unshare.unshare b',t))::tl' in aux [] (List.rev (Environ.named_context_of_val evar_hyps)) in (* We map the named context to a rel context and every Var to a Rel *) (n,context,Unshare.unshare (Term.subst_vars final_var_ids evar_concl)) ) (Evarutil.non_instantiated evar_map) in let id' = Names.string_of_id id in if metasenv = [] then let ids = Names.Idset.union (Environ.global_vars_set env bo) (Environ.global_vars_set env ty) in let hyps0 = Environ.keep_hyps env ids in let hyps = string_list_of_named_context_list hyps0 in (* Variables are the identifiers of the variables in scope *) let variables = search_variables () in let params = filter_params variables hyps in if is_a_variable then Acic.Variable (id',Some bo,unshared_ty,params) else Acic.Constant (id',Some bo,unshared_ty,params) else Acic.CurrentProof (id',metasenv,bo,unshared_ty) ;; let mk_constant_obj id bo ty variables hyps = let hyps = string_list_of_named_context_list hyps in let ty = Unshare.unshare ty in let params = filter_params variables hyps in match bo with None -> Acic.Constant (Names.string_of_id id,None,ty,params) | Some c -> Acic.Constant (Names.string_of_id id, Some (Unshare.unshare (Declarations.force c)), ty,params) ;; let mk_inductive_obj sp mib packs variables nparams hyps finite = let module D = Declarations in let hyps = string_list_of_named_context_list hyps in let params = filter_params variables hyps in (* let nparams = extract_nparams packs in *) let tys = let tyno = ref (Array.length packs) in Array.fold_right (fun p i -> decr tyno ; let {D.mind_consnames=consnames ; D.mind_typename=typename } = p in let arity = Inductive.type_of_inductive (Global.env()) (mib,p) in let lc = Inductiveops.arities_of_constructors (Global.env ()) (sp,!tyno) in let cons = (Array.fold_right (fun (name,lc) i -> (name,lc)::i) (Array.mapi (fun j x ->(x,Unshare.unshare lc.(j))) consnames) [] ) in (typename,finite,Unshare.unshare arity,cons)::i ) packs [] in Acic.InductiveDefinition (tys,params,nparams) ;; (* The current channel for .theory files *) let theory_buffer = Buffer.create 4000;; let theory_output_string ?(do_not_quote = false) s = (* prepare for coqdoc post-processing *) let s = if do_not_quote then s else "(** #"^s^"\n#*)\n" in print_if_verbose s; Buffer.add_string theory_buffer s ;; let kind_of_global_goal = function | Decl_kinds.Global, Decl_kinds.DefinitionBody _ -> "DEFINITION","InteractiveDefinition" | Decl_kinds.Global, (Decl_kinds.Proof k) -> "THEOREM",Decl_kinds.string_of_theorem_kind k | Decl_kinds.Local, _ -> assert false let kind_of_inductive isrecord kn = "DEFINITION", if (fst (Global.lookup_inductive (kn,0))).Declarations.mind_finite then begin match isrecord with | Declare.KernelSilent -> "Record" | _ -> "Inductive" end else "CoInductive" ;; let kind_of_variable id = let module DK = Decl_kinds in match Decls.variable_kind id with | DK.IsAssumption DK.Definitional -> "VARIABLE","Assumption" | DK.IsAssumption DK.Logical -> "VARIABLE","Hypothesis" | DK.IsAssumption DK.Conjectural -> "VARIABLE","Conjecture" | DK.IsDefinition DK.Definition -> "VARIABLE","LocalDefinition" | DK.IsProof _ -> "VARIABLE","LocalFact" | _ -> Util.anomaly "Unsupported variable kind" ;; let kind_of_constant kn = let module DK = Decl_kinds in match Decls.constant_kind kn with | DK.IsAssumption DK.Definitional -> "AXIOM","Declaration" | DK.IsAssumption DK.Logical -> "AXIOM","Axiom" | DK.IsAssumption DK.Conjectural -> Pp.warning "Conjecture not supported in dtd (used Declaration instead)"; "AXIOM","Declaration" | DK.IsDefinition DK.Definition -> "DEFINITION","Definition" | DK.IsDefinition DK.Example -> Pp.warning "Example not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.Coercion -> Pp.warning "Coercion not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.SubClass -> Pp.warning "SubClass not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.CanonicalStructure -> Pp.warning "CanonicalStructure not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.Fixpoint -> Pp.warning "Fixpoint not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.CoFixpoint -> Pp.warning "CoFixpoint not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.Scheme -> Pp.warning "Scheme not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.StructureComponent -> Pp.warning "StructureComponent not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.IdentityCoercion -> Pp.warning "IdentityCoercion not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.Instance -> Pp.warning "Instance not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsDefinition DK.Method -> Pp.warning "Method not supported in dtd (used Definition instead)"; "DEFINITION","Definition" | DK.IsProof (DK.Theorem|DK.Lemma|DK.Corollary|DK.Fact|DK.Remark as thm) -> "THEOREM",DK.string_of_theorem_kind thm | DK.IsProof _ -> Pp.warning "Unsupported theorem kind (used Theorem instead)"; "THEOREM",DK.string_of_theorem_kind DK.Theorem ;; let kind_of_global r = let module Ln = Libnames in let module DK = Decl_kinds in match r with | Ln.IndRef kn | Ln.ConstructRef (kn,_) -> let isrecord = try let _ = Recordops.lookup_projections kn in Declare.KernelSilent with Not_found -> Declare.KernelVerbose in kind_of_inductive isrecord (fst kn) | Ln.VarRef id -> kind_of_variable id | Ln.ConstRef kn -> kind_of_constant kn ;; let print_object_kind uri (xmltag,variation) = let s = Printf.sprintf "\n" xmltag uri variation in theory_output_string s ;; (* print id dest *) (* where sp is the qualified identifier (section path) of a *) (* definition/theorem, variable or inductive definition *) (* and dest is either None (for stdout) or (Some filename) *) (* pretty prints via Xml.pp the object whose identifier is id on dest *) (* Note: it is printed only (and directly) the most cooked available *) (* form of the definition (all the parameters are *) (* lambda-abstracted, but the object can still refer to variables) *) let print internal glob_ref kind xml_library_root = let module D = Declarations in let module De = Declare in let module G = Global in let module N = Names in let module Nt = Nametab in let module T = Term in let module X = Xml in let module Ln = Libnames in (* Variables are the identifiers of the variables in scope *) let variables = search_variables () in let tag,obj = match glob_ref with Ln.VarRef id -> (* this kn is fake since it is not provided by Coq *) let kn = let (mod_path,dir_path) = Lib.current_prefix () in N.make_kn mod_path dir_path (N.label_of_id id) in let (_,body,typ) = G.lookup_named id in Cic2acic.Variable kn,mk_variable_obj id body typ | Ln.ConstRef kn -> let id = N.id_of_label (N.con_label kn) in let cb = G.lookup_constant kn in let val0 = D.body_of_constant cb in let typ = cb.D.const_type in let hyps = cb.D.const_hyps in let typ = Typeops.type_of_constant_type (Global.env()) typ in Cic2acic.Constant kn,mk_constant_obj id val0 typ variables hyps | Ln.IndRef (kn,_) -> let mib = G.lookup_mind kn in let {D.mind_nparams=nparams; D.mind_packets=packs ; D.mind_hyps=hyps; D.mind_finite=finite} = mib in Cic2acic.Inductive kn,mk_inductive_obj kn mib packs variables nparams hyps finite | Ln.ConstructRef _ -> Util.error ("a single constructor cannot be printed in XML") in let fn = filename_of_path xml_library_root tag in let uri = Cic2acic.uri_of_kernel_name tag in (match internal with | Declare.KernelSilent -> () | _ -> print_object_kind uri kind); print_object uri obj Evd.empty None fn ;; let print_ref qid fn = let ref = Nametab.global qid in print Declare.UserVerbose ref (kind_of_global ref) fn (* show dest *) (* where dest is either None (for stdout) or (Some filename) *) (* pretty prints via Xml.pp the proof in progress on dest *) let show_pftreestate internal fn (kind,pftst) id = if true then Util.anomaly "Xmlcommand.show_pftreestate is not supported in this version." let show fn = let pftst = Pfedit.get_pftreestate () in let (id,kind,_,_) = Pfedit.current_proof_statement () in show_pftreestate false fn (kind,pftst) id ;; (* Let's register the callbacks *) let xml_library_root = try Some (Sys.getenv "COQ_XML_LIBRARY_ROOT") with Not_found -> None ;; let proof_to_export = ref None (* holds the proof-tree to export *) ;; let _ = Pfedit.set_xml_cook_proof (function pftreestate -> proof_to_export := Some pftreestate) ;; let _ = Declare.set_xml_declare_variable (function (sp,kn) -> let id = Libnames.basename sp in print Declare.UserVerbose (Libnames.VarRef id) (kind_of_variable id) xml_library_root ; proof_to_export := None) ;; let _ = Declare.set_xml_declare_constant (function (internal,kn) -> match !proof_to_export with None -> print internal (Libnames.ConstRef kn) (kind_of_constant kn) xml_library_root | Some pftreestate -> (* It is a proof. Let's export it starting from the proof-tree *) (* I saved in the Pfedit.set_xml_cook_proof callback. *) let fn = filename_of_path xml_library_root (Cic2acic.Constant kn) in show_pftreestate internal fn pftreestate (Names.id_of_label (Names.con_label kn)) ; proof_to_export := None) ;; let _ = Declare.set_xml_declare_inductive (function (isrecord,(sp,kn)) -> print Declare.UserVerbose (Libnames.IndRef (Names.mind_of_kn kn,0)) (kind_of_inductive isrecord (Names.mind_of_kn kn)) xml_library_root) ;; let _ = Vernac.set_xml_start_library (function () -> Buffer.reset theory_buffer; theory_output_string "\n"; theory_output_string ("\n" ^ "\n" ^ "\n\n" ^ "%xhtml-lat1.ent;\n" ^ "%xhtml-special.ent;\n" ^ "%xhtml-symbol.ent;\n" ^ "]>\n\n"); theory_output_string "\n"; theory_output_string "\n\n") ;; let _ = Vernac.set_xml_end_library (function () -> theory_output_string "\n\n"; let ofn = theory_filename xml_library_root in begin match ofn with None -> Buffer.output_buffer stdout theory_buffer ; | Some fn -> let ch = open_out (fn ^ ".v") in Buffer.output_buffer ch theory_buffer ; close_out ch; (* dummy glob file *) let ch = open_out (fn ^ ".glob") in close_out ch end ; Option.iter (fun fn -> let coqdoc = Filename.concat Envars.coqbin ("coqdoc" ^ Coq_config.exec_extension) in let options = " --html -s --body-only --no-index --latin1 --raw-comments" in let command cmd = if Sys.command cmd <> 0 then Util.anomaly ("Error executing \"" ^ cmd ^ "\"") in command (coqdoc^options^" -o "^fn^".xml "^fn^".v"); command ("rm "^fn^".v "^fn^".glob"); print_string("\nWriting on file \"" ^ fn ^ ".xml\" was successful\n")) ofn) ;; let _ = Lexer.set_xml_output_comment (theory_output_string ~do_not_quote:true) ;; let uri_of_dirpath dir = "/" ^ String.concat "/" (List.map Names.string_of_id (List.rev (Names.repr_dirpath dir))) ;; let _ = Lib.set_xml_open_section (fun _ -> let s = "cic:" ^ uri_of_dirpath (Lib.cwd ()) in theory_output_string ("")) ;; let _ = Lib.set_xml_close_section (fun _ -> theory_output_string "") ;; let _ = Library.set_xml_require (fun d -> theory_output_string (Printf.sprintf "Require %s.
      " (uri_of_dirpath d) (Names.string_of_dirpath d))) ;; coq-8.4pl2/plugins/xml/acic.ml0000640000175000001440000001161411422606552015347 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Libnames.is_dirpath_prefix_of modul dirpath) modules with [] -> Pp.warning ("Modules not supported: reference to "^ Libnames.string_of_path path^" will be wrong"); dirpath | [modul] -> modul | _ -> raise TwoModulesWhoseDirPathIsOneAPrefixOfTheOther ;; (*CSC: Problem: here we are using the wrong (???) hypothesis that there do *) (*CSC: not exist two modules whose dir_paths are one a prefix of the other *) let remove_module_dirpath_from_dirpath ~basedir dir = let module Ln = Libnames in if Ln.is_dirpath_prefix_of basedir dir then let ids = Names.repr_dirpath dir in let rec remove_firsts n l = match n,l with (0,l) -> l | (n,he::tl) -> remove_firsts (n-1) tl | _ -> assert false in let ids' = List.rev (remove_firsts (List.length (Names.repr_dirpath basedir)) (List.rev ids)) in ids' else Names.repr_dirpath dir ;; let get_uri_of_var v pvars = let module D = Decls in let module N = Names in let rec search_in_open_sections = function [] -> Util.error ("Variable "^v^" not found") | he::tl as modules -> let dirpath = N.make_dirpath modules in if List.mem (N.id_of_string v) (D.last_section_hyps dirpath) then modules else search_in_open_sections tl in let path = if List.mem v pvars then [] else search_in_open_sections (N.repr_dirpath (Lib.cwd ())) in "cic:" ^ List.fold_left (fun i x -> "/" ^ N.string_of_id x ^ i) "" path ;; type tag = Constant of Names.constant | Inductive of Names.mutual_inductive | Variable of Names.kernel_name ;; type etag = TConstant | TInductive | TVariable ;; let etag_of_tag = function Constant _ -> TConstant | Inductive _ -> TInductive | Variable _ -> TVariable let ext_of_tag = function TConstant -> "con" | TInductive -> "ind" | TVariable -> "var" ;; exception FunctorsXMLExportationNotImplementedYet;; let subtract l1 l2 = let l1' = List.rev (Names.repr_dirpath l1) in let l2' = List.rev (Names.repr_dirpath l2) in let rec aux = function he::tl when tl = l2' -> [he] | he::tl -> he::(aux tl) | [] -> assert (l2' = []) ; [] in Names.make_dirpath (List.rev (aux l1')) ;; let token_list_of_path dir id tag = let module N = Names in let token_list_of_dirpath dirpath = List.rev_map N.string_of_id (N.repr_dirpath dirpath) in token_list_of_dirpath dir @ [N.string_of_id id ^ "." ^ (ext_of_tag tag)] let token_list_of_kernel_name tag = let module N = Names in let module LN = Libnames in let id,dir = match tag with | Variable kn -> N.id_of_label (N.label kn), Lib.cwd () | Constant con -> N.id_of_label (N.con_label con), Lib.remove_section_part (LN.ConstRef con) | Inductive kn -> N.id_of_label (N.mind_label kn), Lib.remove_section_part (LN.IndRef (kn,0)) in token_list_of_path dir id (etag_of_tag tag) ;; let uri_of_kernel_name tag = let tokens = token_list_of_kernel_name tag in "cic:/" ^ String.concat "/" tokens let uri_of_declaration id tag = let module LN = Libnames in let dir = LN.pop_dirpath_n (Lib.sections_depth ()) (Lib.cwd ()) in let tokens = token_list_of_path dir id tag in "cic:/" ^ String.concat "/" tokens (* Special functions for handling of CCorn's CProp "sort" *) type sort = Coq_sort of Term.sorts_family | CProp ;; let prerr_endline _ = ();; let family_of_term ty = match Term.kind_of_term ty with Term.Sort s -> Coq_sort (Term.family_of_sort s) | Term.Const _ -> CProp (* I could check that the constant is CProp *) | _ -> Util.anomaly "family_of_term" ;; module CPropRetyping = struct module T = Term let outsort env sigma t = family_of_term (DoubleTypeInference.whd_betadeltaiotacprop env sigma t) let rec subst_type env sigma typ = function | [] -> typ | h::rest -> match T.kind_of_term (DoubleTypeInference.whd_betadeltaiotacprop env sigma typ) with | T.Prod (na,c1,c2) -> subst_type env sigma (T.subst1 h c2) rest | _ -> Util.anomaly "Non-functional construction" let sort_of_atomic_type env sigma ft args = let rec concl_of_arity env ar = match T.kind_of_term (DoubleTypeInference.whd_betadeltaiotacprop env sigma ar) with | T.Prod (na, t, b) -> concl_of_arity (Environ.push_rel (na,None,t) env) b | T.Sort s -> Coq_sort (T.family_of_sort s) | _ -> outsort env sigma (subst_type env sigma ft (Array.to_list args)) in concl_of_arity env ft let typeur sigma metamap = let rec type_of env cstr= match Term.kind_of_term cstr with | T.Meta n -> (try T.strip_outer_cast (List.assoc n metamap) with Not_found -> Util.anomaly "type_of: this is not a well-typed term") | T.Rel n -> let (_,_,ty) = Environ.lookup_rel n env in T.lift n ty | T.Var id -> (try let (_,_,ty) = Environ.lookup_named id env in ty with Not_found -> Util.anomaly ("type_of: variable "^(Names.string_of_id id)^" unbound")) | T.Const c -> let cb = Environ.lookup_constant c env in Typeops.type_of_constant_type env (cb.Declarations.const_type) | T.Evar ev -> Evd.existential_type sigma ev | T.Ind ind -> Inductiveops.type_of_inductive env ind | T.Construct cstr -> Inductiveops.type_of_constructor env cstr | T.Case (_,p,c,lf) -> let Inductiveops.IndType(_,realargs) = try Inductiveops.find_rectype env sigma (type_of env c) with Not_found -> Util.anomaly "type_of: Bad recursive type" in let t = Reductionops.whd_beta sigma (T.applist (p, realargs)) in (match Term.kind_of_term (DoubleTypeInference.whd_betadeltaiotacprop env sigma (type_of env t)) with | T.Prod _ -> Reductionops.whd_beta sigma (T.applist (t, [c])) | _ -> t) | T.Lambda (name,c1,c2) -> T.mkProd (name, c1, type_of (Environ.push_rel (name,None,c1) env) c2) | T.LetIn (name,b,c1,c2) -> T.subst1 b (type_of (Environ.push_rel (name,Some b,c1) env) c2) | T.Fix ((_,i),(_,tys,_)) -> tys.(i) | T.CoFix (i,(_,tys,_)) -> tys.(i) | T.App(f,args)-> T.strip_outer_cast (subst_type env sigma (type_of env f) (Array.to_list args)) | T.Cast (c,_, t) -> t | T.Sort _ | T.Prod _ -> match sort_of env cstr with Coq_sort T.InProp -> T.mkProp | Coq_sort T.InSet -> T.mkSet | Coq_sort T.InType -> T.mkType Univ.type1_univ (* ERROR HERE *) | CProp -> T.mkConst DoubleTypeInference.cprop and sort_of env t = match Term.kind_of_term t with | T.Cast (c,_, s) when T.isSort s -> family_of_term s | T.Sort (T.Prop c) -> Coq_sort T.InType | T.Sort (T.Type u) -> Coq_sort T.InType | T.Prod (name,t,c2) -> (match sort_of env t,sort_of (Environ.push_rel (name,None,t) env) c2 with | _, (Coq_sort T.InProp as s) -> s | Coq_sort T.InProp, (Coq_sort T.InSet as s) | Coq_sort T.InSet, (Coq_sort T.InSet as s) -> s | Coq_sort T.InType, (Coq_sort T.InSet as s) | CProp, (Coq_sort T.InSet as s) when Environ.engagement env = Some Declarations.ImpredicativeSet -> s | Coq_sort T.InType, Coq_sort T.InSet | CProp, Coq_sort T.InSet -> Coq_sort T.InType | _, (Coq_sort T.InType as s) -> s (*Type Univ.dummy_univ*) | _, (CProp as s) -> s) | T.App(f,args) -> sort_of_atomic_type env sigma (type_of env f) args | T.Lambda _ | T.Fix _ | T.Construct _ -> Util.anomaly "sort_of: Not a type (1)" | _ -> outsort env sigma (type_of env t) and sort_family_of env t = match T.kind_of_term t with | T.Cast (c,_, s) when T.isSort s -> family_of_term s | T.Sort (T.Prop c) -> Coq_sort T.InType | T.Sort (T.Type u) -> Coq_sort T.InType | T.Prod (name,t,c2) -> sort_family_of (Environ.push_rel (name,None,t) env) c2 | T.App(f,args) -> sort_of_atomic_type env sigma (type_of env f) args | T.Lambda _ | T.Fix _ | T.Construct _ -> Util.anomaly "sort_of: Not a type (1)" | _ -> outsort env sigma (type_of env t) in type_of, sort_of, sort_family_of let get_type_of env sigma c = let f,_,_ = typeur sigma [] in f env c let get_sort_family_of env sigma c = let _,_,f = typeur sigma [] in f env c end ;; let get_sort_family_of env evar_map ty = CPropRetyping.get_sort_family_of env evar_map ty ;; let type_as_sort env evar_map ty = (* CCorn code *) family_of_term (DoubleTypeInference.whd_betadeltaiotacprop env evar_map ty) ;; let is_a_Prop = function "Prop" | "CProp" -> true | _ -> false ;; (* Main Functions *) type anntypes = {annsynthesized : Acic.aconstr ; annexpected : Acic.aconstr option} ;; let gen_id seed = let res = "i" ^ string_of_int !seed in incr seed ; res ;; let fresh_id seed ids_to_terms constr_to_ids ids_to_father_ids = fun father t -> let res = gen_id seed in Hashtbl.add ids_to_father_ids res father ; Hashtbl.add ids_to_terms res t ; Acic.CicHash.add constr_to_ids t res ; res ;; let source_id_of_id id = "#source#" ^ id;; let acic_of_cic_context' computeinnertypes seed ids_to_terms constr_to_ids ids_to_father_ids ids_to_inner_sorts ids_to_inner_types ?(fake_dependent_products=false) env idrefs evar_map t expectedty = let module D = DoubleTypeInference in let module E = Environ in let module N = Names in let module A = Acic in let module T = Term in let fresh_id' = fresh_id seed ids_to_terms constr_to_ids ids_to_father_ids in (* CSC: do you have any reasonable substitute for 503? *) let terms_to_types = Acic.CicHash.create 503 in D.double_type_of env evar_map t expectedty terms_to_types ; let rec aux computeinnertypes father passed_lambdas_or_prods_or_letins env idrefs ?(subst=None,[]) tt = let fresh_id'' = fresh_id' father tt in let aux' = aux computeinnertypes (Some fresh_id'') [] in let string_of_sort_family = function Coq_sort T.InProp -> "Prop" | Coq_sort T.InSet -> "Set" | Coq_sort T.InType -> "Type" | CProp -> "CProp" in let string_of_sort t = string_of_sort_family (type_as_sort env evar_map t) in let ainnertypes,innertype,innersort,expected_available = let {D.synthesized = synthesized; D.expected = expected} = if computeinnertypes then try Acic.CicHash.find terms_to_types tt with e when e <> Sys.Break -> (*CSC: Warning: it really happens, for example in Ring_theory!!! *) Pp.ppnl (Pp.(++) (Pp.str "BUG: this subterm was not visited during the double-type-inference: ") (Printer.pr_lconstr tt)) ; assert false else (* We are already in an inner-type and Coscoy's double *) (* type inference algorithm has not been applied. *) (* We need to refresh the universes because we are doing *) (* type inference on an already inferred type. *) {D.synthesized = Reductionops.nf_beta evar_map (CPropRetyping.get_type_of env evar_map (Termops.refresh_universes tt)) ; D.expected = None} in (* Debugging only: print_endline "TERMINE:" ; flush stdout ; Pp.ppnl (Printer.pr_lconstr tt) ; flush stdout ; print_endline "TIPO:" ; flush stdout ; Pp.ppnl (Printer.pr_lconstr synthesized) ; flush stdout ; print_endline "ENVIRONMENT:" ; flush stdout ; Pp.ppnl (Printer.pr_context_of env) ; flush stdout ; print_endline "FINE_ENVIRONMENT" ; flush stdout ; *) let innersort = let synthesized_innersort = get_sort_family_of env evar_map synthesized in match expected with None -> synthesized_innersort | Some ty -> let expected_innersort = get_sort_family_of env evar_map ty in match expected_innersort, synthesized_innersort with CProp, _ | _, CProp -> CProp | _, _ -> expected_innersort in (* Debugging only: print_endline "PASSATO" ; flush stdout ; *) let ainnertypes,expected_available = if computeinnertypes then let annexpected,expected_available = match expected with None -> None,false | Some expectedty' -> Some (aux false (Some fresh_id'') [] env idrefs expectedty'), true in Some {annsynthesized = aux false (Some fresh_id'') [] env idrefs synthesized ; annexpected = annexpected }, expected_available else None,false in ainnertypes,synthesized, string_of_sort_family innersort, expected_available in let add_inner_type id = match ainnertypes with None -> () | Some ainnertypes -> Hashtbl.add ids_to_inner_types id ainnertypes in (* explicit_substitute_and_eta_expand_if_required h t t' *) (* where [t] = [] and [tt] = [h]{[t']} ("{.}" denotes explicit *) (* named substitution) or [tt] = (App [h]::[t]) (and [t'] = []) *) (* check if [h] is a term that requires an explicit named *) (* substitution and, in that case, uses the first arguments of *) (* [t] as the actual arguments of the substitution. If there *) (* are not enough parameters in the list [t], then eta-expansion *) (* is performed. *) let explicit_substitute_and_eta_expand_if_required h t t' compute_result_if_eta_expansion_not_required = let subst,residual_args,uninst_vars = let variables,basedir = try let g = Libnames.global_of_constr h in let sp = match g with Libnames.ConstructRef ((induri,_),_) | Libnames.IndRef (induri,_) -> Nametab.path_of_global (Libnames.IndRef (induri,0)) | Libnames.VarRef id -> (* Invariant: variables are never cooked in Coq *) raise Not_found | _ -> Nametab.path_of_global g in Dischargedhypsmap.get_discharged_hyps sp, get_module_path_of_full_path sp with Not_found -> (* no explicit substitution *) [], Libnames.dirpath_of_string "dummy" in (* returns a triple whose first element is *) (* an explicit named substitution of "type" *) (* (variable * argument) list, whose *) (* second element is the list of residual *) (* arguments and whose third argument is *) (* the list of uninstantiated variables *) let rec get_explicit_subst variables arguments = match variables,arguments with [],_ -> [],arguments,[] | _,[] -> [],[],variables | he1::tl1,he2::tl2 -> let subst,extra_args,uninst = get_explicit_subst tl1 tl2 in let (he1_sp, he1_id) = Libnames.repr_path he1 in let he1' = remove_module_dirpath_from_dirpath ~basedir he1_sp in let he1'' = String.concat "/" (List.map Names.string_of_id (List.rev he1')) ^ "/" ^ (Names.string_of_id he1_id) ^ ".var" in (he1'',he2)::subst, extra_args, uninst in get_explicit_subst variables t' in let uninst_vars_length = List.length uninst_vars in if uninst_vars_length > 0 then (* Not enough arguments provided. We must eta-expand! *) let un_args,_ = T.decompose_prod_n uninst_vars_length (CPropRetyping.get_type_of env evar_map tt) in let eta_expanded = let arguments = List.map (T.lift uninst_vars_length) t @ Termops.rel_list 0 uninst_vars_length in Unshare.unshare (T.lamn uninst_vars_length un_args (T.applistc h arguments)) in D.double_type_of env evar_map eta_expanded None terms_to_types ; Hashtbl.remove ids_to_inner_types fresh_id'' ; aux' env idrefs eta_expanded else compute_result_if_eta_expansion_not_required subst residual_args in (* Now that we have all the auxiliary functions we *) (* can finally proceed with the main case analysis. *) match T.kind_of_term tt with T.Rel n -> let id = match List.nth (E.rel_context env) (n - 1) with (N.Name id,_,_) -> id | (N.Anonymous,_,_) -> Nameops.make_ident "_" None in Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort && expected_available then add_inner_type fresh_id'' ; A.ARel (fresh_id'', n, List.nth idrefs (n-1), id) | T.Var id -> let pvars = Termops.ids_of_named_context (E.named_context env) in let pvars = List.map N.string_of_id pvars in let path = get_uri_of_var (N.string_of_id id) pvars in Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort && expected_available then add_inner_type fresh_id'' ; A.AVar (fresh_id'', path ^ "/" ^ (N.string_of_id id) ^ ".var") | T.Evar (n,l) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort && expected_available then add_inner_type fresh_id'' ; A.AEvar (fresh_id'', n, Array.to_list (Array.map (aux' env idrefs) l)) | T.Meta _ -> Util.anomaly "Meta met during exporting to XML" | T.Sort s -> A.ASort (fresh_id'', s) | T.Cast (v,_, t) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort then add_inner_type fresh_id'' ; A.ACast (fresh_id'', aux' env idrefs v, aux' env idrefs t) | T.Prod (n,s,t) -> let n' = match n with N.Anonymous -> N.Anonymous | _ -> if not fake_dependent_products && T.noccurn 1 t then N.Anonymous else N.Name (Namegen.next_name_away n (Termops.ids_of_context env)) in Hashtbl.add ids_to_inner_sorts fresh_id'' (string_of_sort innertype) ; let sourcetype = CPropRetyping.get_type_of env evar_map s in Hashtbl.add ids_to_inner_sorts (source_id_of_id fresh_id'') (string_of_sort sourcetype) ; let new_passed_prods = let father_is_prod = match father with None -> false | Some father' -> match Term.kind_of_term (Hashtbl.find ids_to_terms father') with T.Prod _ -> true | _ -> false in (fresh_id'', n', aux' env idrefs s):: (if father_is_prod then passed_lambdas_or_prods_or_letins else []) in let new_env = E.push_rel (n', None, s) env in let new_idrefs = fresh_id''::idrefs in (match Term.kind_of_term t with T.Prod _ -> aux computeinnertypes (Some fresh_id'') new_passed_prods new_env new_idrefs t | _ -> A.AProds (new_passed_prods, aux' new_env new_idrefs t)) | T.Lambda (n,s,t) -> let n' = match n with N.Anonymous -> N.Anonymous | _ -> N.Name (Namegen.next_name_away n (Termops.ids_of_context env)) in Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; let sourcetype = CPropRetyping.get_type_of env evar_map s in Hashtbl.add ids_to_inner_sorts (source_id_of_id fresh_id'') (string_of_sort sourcetype) ; let father_is_lambda = match father with None -> false | Some father' -> match Term.kind_of_term (Hashtbl.find ids_to_terms father') with T.Lambda _ -> true | _ -> false in if is_a_Prop innersort && ((not father_is_lambda) || expected_available) then add_inner_type fresh_id'' ; let new_passed_lambdas = (fresh_id'',n', aux' env idrefs s):: (if father_is_lambda then passed_lambdas_or_prods_or_letins else []) in let new_env = E.push_rel (n', None, s) env in let new_idrefs = fresh_id''::idrefs in (match Term.kind_of_term t with T.Lambda _ -> aux computeinnertypes (Some fresh_id'') new_passed_lambdas new_env new_idrefs t | _ -> let t' = aux' new_env new_idrefs t in (* eta-expansion for explicit named substitutions *) (* can create nested Lambdas. Here we perform the *) (* flattening. *) match t' with A.ALambdas (lambdas, t'') -> A.ALambdas (lambdas@new_passed_lambdas, t'') | _ -> A.ALambdas (new_passed_lambdas, t') ) | T.LetIn (n,s,t,d) -> let id = match n with N.Anonymous -> N.id_of_string "_X" | N.Name id -> id in let n' = N.Name (Namegen.next_ident_away id (Termops.ids_of_context env)) in Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; let sourcesort = get_sort_family_of env evar_map (CPropRetyping.get_type_of env evar_map s) in Hashtbl.add ids_to_inner_sorts (source_id_of_id fresh_id'') (string_of_sort_family sourcesort) ; let father_is_letin = match father with None -> false | Some father' -> match Term.kind_of_term (Hashtbl.find ids_to_terms father') with T.LetIn _ -> true | _ -> false in if is_a_Prop innersort then add_inner_type fresh_id'' ; let new_passed_letins = (fresh_id'',n', aux' env idrefs s):: (if father_is_letin then passed_lambdas_or_prods_or_letins else []) in let new_env = E.push_rel (n', Some s, t) env in let new_idrefs = fresh_id''::idrefs in (match Term.kind_of_term d with T.LetIn _ -> aux computeinnertypes (Some fresh_id'') new_passed_letins new_env new_idrefs d | _ -> A.ALetIns (new_passed_letins, aux' new_env new_idrefs d)) | T.App (h,t) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort then add_inner_type fresh_id'' ; let compute_result_if_eta_expansion_not_required subst residual_args = let residual_args_not_empty = residual_args <> [] in let h' = if residual_args_not_empty then aux' env idrefs ~subst:(None,subst) h else aux' env idrefs ~subst:(Some fresh_id'',subst) h in (* maybe all the arguments were used for the explicit *) (* named substitution *) if residual_args_not_empty then A.AApp (fresh_id'', h'::residual_args) else h' in let t' = Array.fold_right (fun x i -> (aux' env idrefs x)::i) t [] in explicit_substitute_and_eta_expand_if_required h (Array.to_list t) t' compute_result_if_eta_expansion_not_required | T.Const kn -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort && expected_available then add_inner_type fresh_id'' ; let compute_result_if_eta_expansion_not_required _ _ = A.AConst (fresh_id'', subst, (uri_of_kernel_name (Constant kn))) in let (_,subst') = subst in explicit_substitute_and_eta_expand_if_required tt [] (List.map snd subst') compute_result_if_eta_expansion_not_required | T.Ind (kn,i) -> let compute_result_if_eta_expansion_not_required _ _ = A.AInd (fresh_id'', subst, (uri_of_kernel_name (Inductive kn)), i) in let (_,subst') = subst in explicit_substitute_and_eta_expand_if_required tt [] (List.map snd subst') compute_result_if_eta_expansion_not_required | T.Construct ((kn,i),j) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort && expected_available then add_inner_type fresh_id'' ; let compute_result_if_eta_expansion_not_required _ _ = A.AConstruct (fresh_id'', subst, (uri_of_kernel_name (Inductive kn)), i, j) in let (_,subst') = subst in explicit_substitute_and_eta_expand_if_required tt [] (List.map snd subst') compute_result_if_eta_expansion_not_required | T.Case ({T.ci_ind=(kn,i)},ty,term,a) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort then add_inner_type fresh_id'' ; let a' = Array.fold_right (fun x i -> (aux' env idrefs x)::i) a [] in A.ACase (fresh_id'', (uri_of_kernel_name (Inductive kn)), i, aux' env idrefs ty, aux' env idrefs term, a') | T.Fix ((ai,i),(f,t,b)) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort then add_inner_type fresh_id'' ; let fresh_idrefs = Array.init (Array.length t) (function _ -> gen_id seed) in let new_idrefs = (List.rev (Array.to_list fresh_idrefs)) @ idrefs in let f' = let ids = ref (Termops.ids_of_context env) in Array.map (function N.Anonymous -> Util.error "Anonymous fix function met" | N.Name id as n -> let res = N.Name (Namegen.next_name_away n !ids) in ids := id::!ids ; res ) f in A.AFix (fresh_id'', i, Array.fold_right (fun (id,fi,ti,bi,ai) i -> let fi' = match fi with N.Name fi -> fi | N.Anonymous -> Util.error "Anonymous fix function met" in (id, fi', ai, aux' env idrefs ti, aux' (E.push_rec_types (f',t,b) env) new_idrefs bi)::i) (Array.mapi (fun j x -> (fresh_idrefs.(j),x,t.(j),b.(j),ai.(j))) f' ) [] ) | T.CoFix (i,(f,t,b)) -> Hashtbl.add ids_to_inner_sorts fresh_id'' innersort ; if is_a_Prop innersort then add_inner_type fresh_id'' ; let fresh_idrefs = Array.init (Array.length t) (function _ -> gen_id seed) in let new_idrefs = (List.rev (Array.to_list fresh_idrefs)) @ idrefs in let f' = let ids = ref (Termops.ids_of_context env) in Array.map (function N.Anonymous -> Util.error "Anonymous fix function met" | N.Name id as n -> let res = N.Name (Namegen.next_name_away n !ids) in ids := id::!ids ; res ) f in A.ACoFix (fresh_id'', i, Array.fold_right (fun (id,fi,ti,bi) i -> let fi' = match fi with N.Name fi -> fi | N.Anonymous -> Util.error "Anonymous fix function met" in (id, fi', aux' env idrefs ti, aux' (E.push_rec_types (f',t,b) env) new_idrefs bi)::i) (Array.mapi (fun j x -> (fresh_idrefs.(j),x,t.(j),b.(j)) ) f' ) [] ) in aux computeinnertypes None [] env idrefs t ;; (* Obsolete [HH 1/2009] let acic_of_cic_context metasenv context t = let ids_to_terms = Hashtbl.create 503 in let constr_to_ids = Acic.CicHash.create 503 in let ids_to_father_ids = Hashtbl.create 503 in let ids_to_inner_sorts = Hashtbl.create 503 in let ids_to_inner_types = Hashtbl.create 503 in let seed = ref 0 in acic_of_cic_context' true seed ids_to_terms constr_to_ids ids_to_father_ids ids_to_inner_sorts ids_to_inner_types metasenv context t, ids_to_terms, ids_to_father_ids, ids_to_inner_sorts, ids_to_inner_types ;; *) let acic_object_of_cic_object sigma obj = let module A = Acic in let ids_to_terms = Hashtbl.create 503 in let constr_to_ids = Acic.CicHash.create 503 in let ids_to_father_ids = Hashtbl.create 503 in let ids_to_inner_sorts = Hashtbl.create 503 in let ids_to_inner_types = Hashtbl.create 503 in let ids_to_conjectures = Hashtbl.create 11 in let ids_to_hypotheses = Hashtbl.create 127 in let hypotheses_seed = ref 0 in let conjectures_seed = ref 0 in let seed = ref 0 in let acic_term_of_cic_term_context' = acic_of_cic_context' true seed ids_to_terms constr_to_ids ids_to_father_ids ids_to_inner_sorts ids_to_inner_types in (*CSC: is this the right env to use? Hhmmm. There is a problem: in *) (*CSC: Global.env () the object we are exporting is already defined, *) (*CSC: either in the environment or in the named context (in the case *) (*CSC: of variables. Is this a problem? *) let env = Global.env () in let acic_term_of_cic_term' ?fake_dependent_products = acic_term_of_cic_term_context' ?fake_dependent_products env [] sigma in (*CSC: the fresh_id is not stored anywhere. This _MUST_ be fixed using *) (*CSC: a modified version of the already existent fresh_id function *) let fresh_id () = let res = "i" ^ string_of_int !seed in incr seed ; res in let aobj = match obj with A.Constant (id,bo,ty,params) -> let abo = match bo with None -> None | Some bo' -> Some (acic_term_of_cic_term' bo' (Some ty)) in let aty = acic_term_of_cic_term' ty None in A.AConstant (fresh_id (),id,abo,aty,params) | A.Variable (id,bo,ty,params) -> let abo = match bo with Some bo -> Some (acic_term_of_cic_term' bo (Some ty)) | None -> None in let aty = acic_term_of_cic_term' ty None in A.AVariable (fresh_id (),id,abo,aty,params) | A.CurrentProof (id,conjectures,bo,ty) -> let aconjectures = List.map (function (i,canonical_context,term) as conjecture -> let cid = "c" ^ string_of_int !conjectures_seed in Hashtbl.add ids_to_conjectures cid conjecture ; incr conjectures_seed ; let canonical_env,idrefs',acanonical_context = let rec aux env idrefs = function [] -> env,idrefs,[] | ((n,decl_or_def) as hyp)::tl -> let hid = "h" ^ string_of_int !hypotheses_seed in let new_idrefs = hid::idrefs in Hashtbl.add ids_to_hypotheses hid hyp ; incr hypotheses_seed ; match decl_or_def with A.Decl t -> let final_env,final_idrefs,atl = aux (Environ.push_rel (Names.Name n,None,t) env) new_idrefs tl in let at = acic_term_of_cic_term_context' env idrefs sigma t None in final_env,final_idrefs,(hid,(n,A.Decl at))::atl | A.Def (t,ty) -> let final_env,final_idrefs,atl = aux (Environ.push_rel (Names.Name n,Some t,ty) env) new_idrefs tl in let at = acic_term_of_cic_term_context' env idrefs sigma t None in let dummy_never_used = let s = "dummy_never_used" in A.ARel (s,99,s,Names.id_of_string s) in final_env,final_idrefs, (hid,(n,A.Def (at,dummy_never_used)))::atl in aux env [] canonical_context in let aterm = acic_term_of_cic_term_context' canonical_env idrefs' sigma term None in (cid,i,List.rev acanonical_context,aterm) ) conjectures in let abo = acic_term_of_cic_term_context' env [] sigma bo (Some ty) in let aty = acic_term_of_cic_term_context' env [] sigma ty None in A.ACurrentProof (fresh_id (),id,aconjectures,abo,aty) | A.InductiveDefinition (tys,params,paramsno) -> let env' = List.fold_right (fun (name,_,arity,_) env -> Environ.push_rel (Names.Name name, None, arity) env ) (List.rev tys) env in let idrefs = List.map (function _ -> gen_id seed) tys in let atys = List.map2 (fun id (name,inductive,ty,cons) -> let acons = List.map (function (name,ty) -> (name, acic_term_of_cic_term_context' ~fake_dependent_products:true env' idrefs Evd.empty ty None) ) cons in let aty = acic_term_of_cic_term' ~fake_dependent_products:true ty None in (id,name,inductive,aty,acons) ) (List.rev idrefs) tys in A.AInductiveDefinition (fresh_id (),atys,params,paramsno) in aobj,ids_to_terms,constr_to_ids,ids_to_father_ids,ids_to_inner_sorts, ids_to_inner_types,ids_to_conjectures,ids_to_hypotheses ;; coq-8.4pl2/plugins/xml/cic.dtd0000640000175000001440000001644011160567762015363 0ustar notinusers coq-8.4pl2/plugins/xml/cic2Xml.ml0000640000175000001440000000115211254456226015751 0ustar notinuserslet print_xml_term ch env sigma cic = let ids_to_terms = Hashtbl.create 503 in let constr_to_ids = Acic.CicHash.create 503 in let ids_to_father_ids = Hashtbl.create 503 in let ids_to_inner_sorts = Hashtbl.create 503 in let ids_to_inner_types = Hashtbl.create 503 in let seed = ref 0 in let acic = Cic2acic.acic_of_cic_context' true seed ids_to_terms constr_to_ids ids_to_father_ids ids_to_inner_sorts ids_to_inner_types env [] sigma (Unshare.unshare cic) None in let xml = Acic2Xml.print_term ids_to_inner_sorts acic in Xml.pp_ch xml ch ;; Tacinterp.declare_xml_printer print_xml_term ;; coq-8.4pl2/plugins/xml/COPYRIGHT0000640000175000001440000000247011160567762015421 0ustar notinusers(******************************************************************************) (* Copyright (C) 2000-2004, Claudio Sacerdoti Coen *) (* Project Helm (http://helm.cs.unibo.it) *) (* Project MoWGLI (http://mowgli.cs.unibo.it) *) (* *) (* Coq Exportation to XML *) (* *) (******************************************************************************) This Coq module has been developed by Claudio Sacerdoti Coen as a developer of projects HELM and MoWGLI. Project HELM (for Hypertextual Electronic Library of Mathematics) is a project developed at the Department of Computer Science, University of Bologna; http://helm.cs.unibo.it Project MoWGLI (Mathematics on the Web: Get It by Logics and Interfaces) is a UE IST project that generalizes and extends the HELM project; http://mowgli.cs.unibo.it The author is interested in any possible usage of the module. So, if you plan to use the module, please send him an e-mail. The licensing policy applied to the module is the same as for the whole Coq distribution. coq-8.4pl2/plugins/xml/theoryobject.dtd0000640000175000001440000000540611160567762017326 0ustar notinusers coq-8.4pl2/plugins/xml/doubleTypeInference.mli0000640000175000001440000000241311422606552020551 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Evd.evar_map -> Term.constr -> Term.constr val double_type_of : Environ.env -> Evd.evar_map -> Term.constr -> Term.constr option -> types Acic.CicHash.t -> unit coq-8.4pl2/plugins/xml/unshare.ml0000640000175000001440000000406211422606552016114 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* false) t = let obj = Obj.repr t in let rec aux obj = if already_unshared (Obj.obj obj) then obj else (if Obj.is_int obj then obj else if Obj.is_block obj then begin let tag = Obj.tag obj in if tag < Obj.no_scan_tag then begin let size = Obj.size obj in let new_obj = Obj.new_block 0 size in Obj.set_tag new_obj tag ; for i = 0 to size - 1 do Obj.set_field new_obj i (aux (Obj.field obj i)) done ; new_obj end else if tag = Obj.string_tag then obj else raise CanNotUnshare end else raise CanNotUnshare ) in Obj.obj (aux obj) ;; coq-8.4pl2/plugins/xml/xml_plugin.mllib0000640000175000001440000000021011161000644017271 0ustar notinusersUnshare Xml Acic DoubleTypeInference Cic2acic Acic2Xml Proof2aproof Xmlcommand ProofTree2Xml Xmlentries Cic2Xml Dumptree Xml_plugin_mod coq-8.4pl2/plugins/interface/0000750000175000001440000000000012127276541015256 5ustar notinuserscoq-8.4pl2/plugins/pluginsopt.itarget0000640000175000001440000000117111743265053017103 0ustar notinusersfield/field_plugin.cmxa setoid_ring/newring_plugin.cmxa extraction/extraction_plugin.cmxa decl_mode/decl_mode_plugin.cmxa firstorder/ground_plugin.cmxa rtauto/rtauto_plugin.cmxa fourier/fourier_plugin.cmxa romega/romega_plugin.cmxa omega/omega_plugin.cmxa micromega/micromega_plugin.cmxa xml/xml_plugin.cmxa subtac/subtac_plugin.cmxa ring/ring_plugin.cmxa cc/cc_plugin.cmxa nsatz/nsatz_plugin.cmxa funind/recdef_plugin.cmxa syntax/ascii_syntax_plugin.cmxa syntax/nat_syntax_plugin.cmxa syntax/numbers_syntax_plugin.cmxa syntax/r_syntax_plugin.cmxa syntax/string_syntax_plugin.cmxa syntax/z_syntax_plugin.cmxa quote/quote_plugin.cmxa coq-8.4pl2/plugins/pluginsbyte.itarget0000640000175000001440000000114211743265053017242 0ustar notinusersfield/field_plugin.cma setoid_ring/newring_plugin.cma extraction/extraction_plugin.cma decl_mode/decl_mode_plugin.cma firstorder/ground_plugin.cma rtauto/rtauto_plugin.cma fourier/fourier_plugin.cma romega/romega_plugin.cma omega/omega_plugin.cma micromega/micromega_plugin.cma xml/xml_plugin.cma subtac/subtac_plugin.cma ring/ring_plugin.cma cc/cc_plugin.cma nsatz/nsatz_plugin.cma funind/recdef_plugin.cma syntax/ascii_syntax_plugin.cma syntax/nat_syntax_plugin.cma syntax/numbers_syntax_plugin.cma syntax/r_syntax_plugin.cma syntax/string_syntax_plugin.cma syntax/z_syntax_plugin.cma quote/quote_plugin.cma coq-8.4pl2/plugins/micromega/0000750000175000001440000000000012127276541015261 5ustar notinuserscoq-8.4pl2/plugins/micromega/micromega_plugin.mllib0000640000175000001440000000017311561721013021613 0ustar notinusersSos_types Mutils Micromega Polynomial Mfourier Certificate Persistent_cache Coq_micromega G_micromega Micromega_plugin_mod coq-8.4pl2/plugins/micromega/ZCoeff.v0000640000175000001440000001216512010532755016623 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R. Variable ropp : R -> R. Variables req rle rlt : R -> R -> Prop. Variable sor : SOR rO rI rplus rtimes rminus ropp req rle rlt. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (rplus x y). Notation "x * y " := (rtimes x y). Notation "x - y " := (rminus x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Notation "x ~= y" := (~ req x y). Notation "x <= y" := (rle x y). Notation "x < y" := (rlt x y). Lemma req_refl : forall x, req x x. Proof. destruct sor.(SORsetoid). apply Equivalence_Reflexive. Qed. Lemma req_sym : forall x y, req x y -> req y x. Proof. destruct sor.(SORsetoid). apply Equivalence_Symmetric. Qed. Lemma req_trans : forall x y z, req x y -> req y z -> req x z. Proof. destruct sor.(SORsetoid). apply Equivalence_Transitive. Qed. Add Relation R req reflexivity proved by sor.(SORsetoid).(@Equivalence_Reflexive _ _) symmetry proved by sor.(SORsetoid).(@Equivalence_Symmetric _ _) transitivity proved by sor.(SORsetoid).(@Equivalence_Transitive _ _) as sor_setoid. Add Morphism rplus with signature req ==> req ==> req as rplus_morph. Proof. exact sor.(SORplus_wd). Qed. Add Morphism rtimes with signature req ==> req ==> req as rtimes_morph. Proof. exact sor.(SORtimes_wd). Qed. Add Morphism ropp with signature req ==> req as ropp_morph. Proof. exact sor.(SORopp_wd). Qed. Add Morphism rle with signature req ==> req ==> iff as rle_morph. Proof. exact sor.(SORle_wd). Qed. Add Morphism rlt with signature req ==> req ==> iff as rlt_morph. Proof. exact sor.(SORlt_wd). Qed. Add Morphism rminus with signature req ==> req ==> req as rminus_morph. Proof. exact (rminus_morph sor). Qed. Ltac le_less := rewrite (Rle_lt_eq sor); left; try assumption. Ltac le_equal := rewrite (Rle_lt_eq sor); right; try reflexivity; try assumption. Definition gen_order_phi_Z : Z -> R := gen_phiZ 0 1 rplus rtimes ropp. Notation phi_pos := (gen_phiPOS 1 rplus rtimes). Notation phi_pos1 := (gen_phiPOS1 1 rplus rtimes). Notation "[ x ]" := (gen_order_phi_Z x). Lemma ring_ops_wd : ring_eq_ext rplus rtimes ropp req. Proof. constructor. exact rplus_morph. exact rtimes_morph. exact ropp_morph. Qed. Lemma Zring_morph : ring_morph 0 1 rplus rtimes rminus ropp req 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool gen_order_phi_Z. Proof. exact (gen_phiZ_morph sor.(SORsetoid) ring_ops_wd sor.(SORrt)). Qed. Lemma phi_pos1_pos : forall x : positive, 0 < phi_pos1 x. Proof. induction x as [x IH | x IH |]; simpl; try apply (Rplus_pos_pos sor); try apply (Rtimes_pos_pos sor); try apply (Rplus_pos_pos sor); try apply (Rlt_0_1 sor); assumption. Qed. Lemma phi_pos1_succ : forall x : positive, phi_pos1 (Pos.succ x) == 1 + phi_pos1 x. Proof. exact (ARgen_phiPOS_Psucc sor.(SORsetoid) ring_ops_wd (Rth_ARth sor.(SORsetoid) ring_ops_wd sor.(SORrt))). Qed. Lemma clt_pos_morph : forall x y : positive, (x < y)%positive -> phi_pos1 x < phi_pos1 y. Proof. intros x y H. pattern y; apply Pos.lt_ind with x. rewrite phi_pos1_succ; apply (Rlt_succ_r sor). clear y H; intros y _ H. rewrite phi_pos1_succ. now apply (Rlt_lt_succ sor). assumption. Qed. Lemma clt_morph : forall x y : Z, (x < y)%Z -> [x] < [y]. Proof. intros x y H. do 2 rewrite (same_genZ sor.(SORsetoid) ring_ops_wd sor.(SORrt)); destruct x; destruct y; simpl in *; try discriminate. apply phi_pos1_pos. now apply clt_pos_morph. apply <- (Ropp_neg_pos sor); apply phi_pos1_pos. apply (Rlt_trans sor) with 0. apply <- (Ropp_neg_pos sor); apply phi_pos1_pos. apply phi_pos1_pos. apply -> (Ropp_lt_mono sor); apply clt_pos_morph. red. now rewrite Pos.compare_antisym. Qed. Lemma Zcleb_morph : forall x y : Z, Z.leb x y = true -> [x] <= [y]. Proof. unfold Z.leb; intros x y H. case_eq (x ?= y)%Z; intro H1; rewrite H1 in H. le_equal. apply Zring_morph.(morph_eq). unfold Zeq_bool; now rewrite H1. le_less. now apply clt_morph. discriminate. Qed. Lemma Zcneqb_morph : forall x y : Z, Zeq_bool x y = false -> [x] ~= [y]. Proof. intros x y H. unfold Zeq_bool in H. case_eq (Z.compare x y); intro H1; rewrite H1 in *; (discriminate || clear H). apply (Rlt_neq sor). now apply clt_morph. fold (x > y)%Z in H1. rewrite Z.gt_lt_iff in H1. apply (Rneq_symm sor). apply (Rlt_neq sor). now apply clt_morph. Qed. End InitialMorphism. coq-8.4pl2/plugins/micromega/vo.itarget0000640000175000001440000000023211307752066017264 0ustar notinusersCheckerMaker.vo EnvRing.vo Env.vo OrderedRing.vo Psatz.vo QMicromega.vo Refl.vo RingMicromega.vo RMicromega.vo Tauto.vo VarMap.vo ZCoeff.vo ZMicromega.vo coq-8.4pl2/plugins/micromega/micromega.mli0000640000175000001440000006014111565517202017727 0ustar notinuserstype __ = Obj.t val negb : bool -> bool type nat = | O | S of nat val fst : ('a1 * 'a2) -> 'a1 val snd : ('a1 * 'a2) -> 'a2 val app : 'a1 list -> 'a1 list -> 'a1 list type comparison = | Eq | Lt | Gt val compOpp : comparison -> comparison type compareSpecT = | CompEqT | CompLtT | CompGtT val compareSpec2Type : comparison -> compareSpecT type 'a compSpecT = compareSpecT val compSpec2Type : 'a1 -> 'a1 -> comparison -> 'a1 compSpecT type 'a sig0 = 'a (* singleton inductive, whose constructor was exist *) val plus : nat -> nat -> nat val nat_iter : nat -> ('a1 -> 'a1) -> 'a1 -> 'a1 type positive = | XI of positive | XO of positive | XH type n = | N0 | Npos of positive type z = | Z0 | Zpos of positive | Zneg of positive module type TotalOrder' = sig type t end module MakeOrderTac : functor (O:TotalOrder') -> sig end module MaxLogicalProperties : functor (O:TotalOrder') -> functor (M:sig val max : O.t -> O.t -> O.t end) -> sig module T : sig end end module Pos : sig type t = positive val succ : positive -> positive val add : positive -> positive -> positive val add_carry : positive -> positive -> positive val pred_double : positive -> positive val pred : positive -> positive val pred_N : positive -> n type mask = | IsNul | IsPos of positive | IsNeg val mask_rect : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 val mask_rec : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 val succ_double_mask : mask -> mask val double_mask : mask -> mask val double_pred_mask : positive -> mask val pred_mask : mask -> mask val sub_mask : positive -> positive -> mask val sub_mask_carry : positive -> positive -> mask val sub : positive -> positive -> positive val mul : positive -> positive -> positive val iter : positive -> ('a1 -> 'a1) -> 'a1 -> 'a1 val pow : positive -> positive -> positive val div2 : positive -> positive val div2_up : positive -> positive val size_nat : positive -> nat val size : positive -> positive val compare_cont : positive -> positive -> comparison -> comparison val compare : positive -> positive -> comparison val min : positive -> positive -> positive val max : positive -> positive -> positive val eqb : positive -> positive -> bool val leb : positive -> positive -> bool val ltb : positive -> positive -> bool val sqrtrem_step : (positive -> positive) -> (positive -> positive) -> (positive * mask) -> positive * mask val sqrtrem : positive -> positive * mask val sqrt : positive -> positive val gcdn : nat -> positive -> positive -> positive val gcd : positive -> positive -> positive val ggcdn : nat -> positive -> positive -> positive * (positive * positive) val ggcd : positive -> positive -> positive * (positive * positive) val coq_Nsucc_double : n -> n val coq_Ndouble : n -> n val coq_lor : positive -> positive -> positive val coq_land : positive -> positive -> n val ldiff : positive -> positive -> n val coq_lxor : positive -> positive -> n val shiftl_nat : positive -> nat -> positive val shiftr_nat : positive -> nat -> positive val shiftl : positive -> n -> positive val shiftr : positive -> n -> positive val testbit_nat : positive -> nat -> bool val testbit : positive -> n -> bool val iter_op : ('a1 -> 'a1 -> 'a1) -> positive -> 'a1 -> 'a1 val to_nat : positive -> nat val of_nat : nat -> positive val of_succ_nat : nat -> positive end module Coq_Pos : sig module Coq__1 : sig type t = positive end type t = Coq__1.t val succ : positive -> positive val add : positive -> positive -> positive val add_carry : positive -> positive -> positive val pred_double : positive -> positive val pred : positive -> positive val pred_N : positive -> n type mask = Pos.mask = | IsNul | IsPos of positive | IsNeg val mask_rect : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 val mask_rec : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 val succ_double_mask : mask -> mask val double_mask : mask -> mask val double_pred_mask : positive -> mask val pred_mask : mask -> mask val sub_mask : positive -> positive -> mask val sub_mask_carry : positive -> positive -> mask val sub : positive -> positive -> positive val mul : positive -> positive -> positive val iter : positive -> ('a1 -> 'a1) -> 'a1 -> 'a1 val pow : positive -> positive -> positive val div2 : positive -> positive val div2_up : positive -> positive val size_nat : positive -> nat val size : positive -> positive val compare_cont : positive -> positive -> comparison -> comparison val compare : positive -> positive -> comparison val min : positive -> positive -> positive val max : positive -> positive -> positive val eqb : positive -> positive -> bool val leb : positive -> positive -> bool val ltb : positive -> positive -> bool val sqrtrem_step : (positive -> positive) -> (positive -> positive) -> (positive * mask) -> positive * mask val sqrtrem : positive -> positive * mask val sqrt : positive -> positive val gcdn : nat -> positive -> positive -> positive val gcd : positive -> positive -> positive val ggcdn : nat -> positive -> positive -> positive * (positive * positive) val ggcd : positive -> positive -> positive * (positive * positive) val coq_Nsucc_double : n -> n val coq_Ndouble : n -> n val coq_lor : positive -> positive -> positive val coq_land : positive -> positive -> n val ldiff : positive -> positive -> n val coq_lxor : positive -> positive -> n val shiftl_nat : positive -> nat -> positive val shiftr_nat : positive -> nat -> positive val shiftl : positive -> n -> positive val shiftr : positive -> n -> positive val testbit_nat : positive -> nat -> bool val testbit : positive -> n -> bool val iter_op : ('a1 -> 'a1 -> 'a1) -> positive -> 'a1 -> 'a1 val to_nat : positive -> nat val of_nat : nat -> positive val of_succ_nat : nat -> positive val eq_dec : positive -> positive -> bool val peano_rect : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> 'a1 val peano_rec : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> 'a1 type coq_PeanoView = | PeanoOne | PeanoSucc of positive * coq_PeanoView val coq_PeanoView_rect : 'a1 -> (positive -> coq_PeanoView -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 val coq_PeanoView_rec : 'a1 -> (positive -> coq_PeanoView -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 val peanoView_xO : positive -> coq_PeanoView -> coq_PeanoView val peanoView_xI : positive -> coq_PeanoView -> coq_PeanoView val peanoView : positive -> coq_PeanoView val coq_PeanoView_iter : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 val switch_Eq : comparison -> comparison -> comparison val mask2cmp : mask -> comparison module T : sig end module ORev : sig type t = Coq__1.t end module MRev : sig val max : t -> t -> t end module MPRev : sig module T : sig end end module P : sig val max_case_strong : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val max_dec : t -> t -> bool val min_case_strong : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val min_dec : t -> t -> bool end val max_case_strong : t -> t -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : t -> t -> 'a1 -> 'a1 -> 'a1 val max_dec : t -> t -> bool val min_case_strong : t -> t -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : t -> t -> 'a1 -> 'a1 -> 'a1 val min_dec : t -> t -> bool end module N : sig type t = n val zero : n val one : n val two : n val succ_double : n -> n val double : n -> n val succ : n -> n val pred : n -> n val succ_pos : n -> positive val add : n -> n -> n val sub : n -> n -> n val mul : n -> n -> n val compare : n -> n -> comparison val eqb : n -> n -> bool val leb : n -> n -> bool val ltb : n -> n -> bool val min : n -> n -> n val max : n -> n -> n val div2 : n -> n val even : n -> bool val odd : n -> bool val pow : n -> n -> n val log2 : n -> n val size : n -> n val size_nat : n -> nat val pos_div_eucl : positive -> n -> n * n val div_eucl : n -> n -> n * n val div : n -> n -> n val modulo : n -> n -> n val gcd : n -> n -> n val ggcd : n -> n -> n * (n * n) val sqrtrem : n -> n * n val sqrt : n -> n val coq_lor : n -> n -> n val coq_land : n -> n -> n val ldiff : n -> n -> n val coq_lxor : n -> n -> n val shiftl_nat : n -> nat -> n val shiftr_nat : n -> nat -> n val shiftl : n -> n -> n val shiftr : n -> n -> n val testbit_nat : n -> nat -> bool val testbit : n -> n -> bool val to_nat : n -> nat val of_nat : nat -> n val iter : n -> ('a1 -> 'a1) -> 'a1 -> 'a1 val eq_dec : n -> n -> bool val discr : n -> positive option val binary_rect : 'a1 -> (n -> 'a1 -> 'a1) -> (n -> 'a1 -> 'a1) -> n -> 'a1 val binary_rec : 'a1 -> (n -> 'a1 -> 'a1) -> (n -> 'a1 -> 'a1) -> n -> 'a1 val peano_rect : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 val peano_rec : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 module BootStrap : sig end val recursion : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 module OrderElts : sig type t = n end module OrderTac : sig end module NZPowP : sig end module NZSqrtP : sig end val sqrt_up : n -> n val log2_up : n -> n module NZDivP : sig end val lcm : n -> n -> n val b2n : bool -> n val setbit : n -> n -> n val clearbit : n -> n -> n val ones : n -> n val lnot : n -> n -> n module T : sig end module ORev : sig type t = n end module MRev : sig val max : n -> n -> n end module MPRev : sig module T : sig end end module P : sig val max_case_strong : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val max_dec : n -> n -> bool val min_case_strong : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val min_dec : n -> n -> bool end val max_case_strong : n -> n -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : n -> n -> 'a1 -> 'a1 -> 'a1 val max_dec : n -> n -> bool val min_case_strong : n -> n -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : n -> n -> 'a1 -> 'a1 -> 'a1 val min_dec : n -> n -> bool end val pow_pos : ('a1 -> 'a1 -> 'a1) -> 'a1 -> positive -> 'a1 val nth : nat -> 'a1 list -> 'a1 -> 'a1 val map : ('a1 -> 'a2) -> 'a1 list -> 'a2 list val fold_right : ('a2 -> 'a1 -> 'a1) -> 'a1 -> 'a2 list -> 'a1 module Z : sig type t = z val zero : z val one : z val two : z val double : z -> z val succ_double : z -> z val pred_double : z -> z val pos_sub : positive -> positive -> z val add : z -> z -> z val opp : z -> z val succ : z -> z val pred : z -> z val sub : z -> z -> z val mul : z -> z -> z val pow_pos : z -> positive -> z val pow : z -> z -> z val compare : z -> z -> comparison val sgn : z -> z val leb : z -> z -> bool val geb : z -> z -> bool val ltb : z -> z -> bool val gtb : z -> z -> bool val eqb : z -> z -> bool val max : z -> z -> z val min : z -> z -> z val abs : z -> z val abs_nat : z -> nat val abs_N : z -> n val to_nat : z -> nat val to_N : z -> n val of_nat : nat -> z val of_N : n -> z val iter : z -> ('a1 -> 'a1) -> 'a1 -> 'a1 val pos_div_eucl : positive -> z -> z * z val div_eucl : z -> z -> z * z val div : z -> z -> z val modulo : z -> z -> z val quotrem : z -> z -> z * z val quot : z -> z -> z val rem : z -> z -> z val even : z -> bool val odd : z -> bool val div2 : z -> z val quot2 : z -> z val log2 : z -> z val sqrtrem : z -> z * z val sqrt : z -> z val gcd : z -> z -> z val ggcd : z -> z -> z * (z * z) val testbit : z -> z -> bool val shiftl : z -> z -> z val shiftr : z -> z -> z val coq_lor : z -> z -> z val coq_land : z -> z -> z val ldiff : z -> z -> z val coq_lxor : z -> z -> z val eq_dec : z -> z -> bool module BootStrap : sig end module OrderElts : sig type t = z end module OrderTac : sig end val sqrt_up : z -> z val log2_up : z -> z module NZDivP : sig end module Quot2Div : sig val div : z -> z -> z val modulo : z -> z -> z end module NZQuot : sig end val lcm : z -> z -> z val b2z : bool -> z val setbit : z -> z -> z val clearbit : z -> z -> z val lnot : z -> z val ones : z -> z module T : sig end module ORev : sig type t = z end module MRev : sig val max : z -> z -> z end module MPRev : sig module T : sig end end module P : sig val max_case_strong : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val max_dec : z -> z -> bool val min_case_strong : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 val min_dec : z -> z -> bool end val max_case_strong : z -> z -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val max_case : z -> z -> 'a1 -> 'a1 -> 'a1 val max_dec : z -> z -> bool val min_case_strong : z -> z -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 val min_case : z -> z -> 'a1 -> 'a1 -> 'a1 val min_dec : z -> z -> bool end val zeq_bool : z -> z -> bool type 'c pol = | Pc of 'c | Pinj of positive * 'c pol | PX of 'c pol * positive * 'c pol val p0 : 'a1 -> 'a1 pol val p1 : 'a1 -> 'a1 pol val peq : ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> bool val mkPinj : positive -> 'a1 pol -> 'a1 pol val mkPinj_pred : positive -> 'a1 pol -> 'a1 pol val mkPX : 'a1 -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val mkXi : 'a1 -> 'a1 -> positive -> 'a1 pol val mkX : 'a1 -> 'a1 -> 'a1 pol val popp : ('a1 -> 'a1) -> 'a1 pol -> 'a1 pol val paddC : ('a1 -> 'a1 -> 'a1) -> 'a1 pol -> 'a1 -> 'a1 pol val psubC : ('a1 -> 'a1 -> 'a1) -> 'a1 pol -> 'a1 -> 'a1 pol val paddI : ('a1 -> 'a1 -> 'a1) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val psubI : ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val paddX : 'a1 -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val psubX : 'a1 -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val padd : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol val psub : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol val pmulC_aux : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 -> 'a1 pol val pmulC : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 -> 'a1 pol val pmulI : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol val pmul : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol val psquare : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol type 'c pExpr = | PEc of 'c | PEX of positive | PEadd of 'c pExpr * 'c pExpr | PEsub of 'c pExpr * 'c pExpr | PEmul of 'c pExpr * 'c pExpr | PEopp of 'c pExpr | PEpow of 'c pExpr * n val mk_X : 'a1 -> 'a1 -> positive -> 'a1 pol val ppow_pos : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol) -> 'a1 pol -> 'a1 pol -> positive -> 'a1 pol val ppow_N : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol) -> 'a1 pol -> n -> 'a1 pol val norm_aux : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pExpr -> 'a1 pol type 'a bFormula = | TT | FF | X | A of 'a | Cj of 'a bFormula * 'a bFormula | D of 'a bFormula * 'a bFormula | N of 'a bFormula | I of 'a bFormula * 'a bFormula val map_bformula : ('a1 -> 'a2) -> 'a1 bFormula -> 'a2 bFormula type 'term' clause = 'term' list type 'term' cnf = 'term' clause list val tt : 'a1 cnf val ff : 'a1 cnf val add_term : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 -> 'a1 clause -> 'a1 clause option val or_clause : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 clause -> 'a1 clause -> 'a1 clause option val or_clause_cnf : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 clause -> 'a1 cnf -> 'a1 cnf val or_cnf : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 cnf -> 'a1 cnf -> 'a1 cnf val and_cnf : 'a1 cnf -> 'a1 cnf -> 'a1 cnf val xcnf : ('a2 -> bool) -> ('a2 -> 'a2 -> 'a2 option) -> ('a1 -> 'a2 cnf) -> ('a1 -> 'a2 cnf) -> bool -> 'a1 bFormula -> 'a2 cnf val cnf_checker : ('a1 list -> 'a2 -> bool) -> 'a1 cnf -> 'a2 list -> bool val tauto_checker : ('a2 -> bool) -> ('a2 -> 'a2 -> 'a2 option) -> ('a1 -> 'a2 cnf) -> ('a1 -> 'a2 cnf) -> ('a2 list -> 'a3 -> bool) -> 'a1 bFormula -> 'a3 list -> bool val cneqb : ('a1 -> 'a1 -> bool) -> 'a1 -> 'a1 -> bool val cltb : ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 -> 'a1 -> bool type 'c polC = 'c pol type op1 = | Equal | NonEqual | Strict | NonStrict type 'c nFormula = 'c polC * op1 val opMult : op1 -> op1 -> op1 option val opAdd : op1 -> op1 -> op1 option type 'c psatz = | PsatzIn of nat | PsatzSquare of 'c polC | PsatzMulC of 'c polC * 'c psatz | PsatzMulE of 'c psatz * 'c psatz | PsatzAdd of 'c psatz * 'c psatz | PsatzC of 'c | PsatzZ val map_option : ('a1 -> 'a2 option) -> 'a1 option -> 'a2 option val map_option2 : ('a1 -> 'a2 -> 'a3 option) -> 'a1 option -> 'a2 option -> 'a3 option val pexpr_times_nformula : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 polC -> 'a1 nFormula -> 'a1 nFormula option val nformula_times_nformula : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> 'a1 nFormula -> 'a1 nFormula option val nformula_plus_nformula : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> 'a1 nFormula -> 'a1 nFormula option val eval_Psatz : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula list -> 'a1 psatz -> 'a1 nFormula option val check_inconsistent : 'a1 -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> bool val check_normalised_formulas : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula list -> 'a1 psatz -> bool type op2 = | OpEq | OpNEq | OpLe | OpGe | OpLt | OpGt type 't formula = { flhs : 't pExpr; fop : op2; frhs : 't pExpr } val flhs : 'a1 formula -> 'a1 pExpr val fop : 'a1 formula -> op2 val frhs : 'a1 formula -> 'a1 pExpr val norm : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pExpr -> 'a1 pol val psub0 : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol val padd0 : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol val xnormalise : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula list val cnf_normalise : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula cnf val xnegate : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula list val cnf_negate : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula cnf val xdenorm : positive -> 'a1 pol -> 'a1 pExpr val denorm : 'a1 pol -> 'a1 pExpr val map_PExpr : ('a2 -> 'a1) -> 'a2 pExpr -> 'a1 pExpr val map_Formula : ('a2 -> 'a1) -> 'a2 formula -> 'a1 formula val simpl_cone : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 psatz -> 'a1 psatz type q = { qnum : z; qden : positive } val qnum : q -> z val qden : q -> positive val qeq_bool : q -> q -> bool val qle_bool : q -> q -> bool val qplus : q -> q -> q val qmult : q -> q -> q val qopp : q -> q val qminus : q -> q -> q val qinv : q -> q val qpower_positive : q -> positive -> q val qpower : q -> z -> q type 'a t0 = | Empty | Leaf of 'a | Node of 'a t0 * 'a * 'a t0 val find : 'a1 -> 'a1 t0 -> positive -> 'a1 type zWitness = z psatz val zWeakChecker : z nFormula list -> z psatz -> bool val psub1 : z pol -> z pol -> z pol val padd1 : z pol -> z pol -> z pol val norm0 : z pExpr -> z pol val xnormalise0 : z formula -> z nFormula list val normalise : z formula -> z nFormula cnf val xnegate0 : z formula -> z nFormula list val negate : z formula -> z nFormula cnf val zunsat : z nFormula -> bool val zdeduce : z nFormula -> z nFormula -> z nFormula option val ceiling : z -> z -> z type zArithProof = | DoneProof | RatProof of zWitness * zArithProof | CutProof of zWitness * zArithProof | EnumProof of zWitness * zWitness * zArithProof list val zgcdM : z -> z -> z val zgcd_pol : z polC -> z * z val zdiv_pol : z polC -> z -> z polC val makeCuttingPlane : z polC -> z polC * z val genCuttingPlane : z nFormula -> ((z polC * z) * op1) option val nformula_of_cutting_plane : ((z polC * z) * op1) -> z nFormula val is_pol_Z0 : z polC -> bool val eval_Psatz0 : z nFormula list -> zWitness -> z nFormula option val valid_cut_sign : op1 -> bool val zChecker : z nFormula list -> zArithProof -> bool val zTautoChecker : z formula bFormula -> zArithProof list -> bool type qWitness = q psatz val qWeakChecker : q nFormula list -> q psatz -> bool val qnormalise : q formula -> q nFormula cnf val qnegate : q formula -> q nFormula cnf val qunsat : q nFormula -> bool val qdeduce : q nFormula -> q nFormula -> q nFormula option val qTautoChecker : q formula bFormula -> qWitness list -> bool type rcst = | C0 | C1 | CQ of q | CZ of z | CPlus of rcst * rcst | CMinus of rcst * rcst | CMult of rcst * rcst | CInv of rcst | COpp of rcst val q_of_Rcst : rcst -> q type rWitness = q psatz val rWeakChecker : q nFormula list -> q psatz -> bool val rnormalise : q formula -> q nFormula cnf val rnegate : q formula -> q nFormula cnf val runsat : q nFormula -> bool val rdeduce : q nFormula -> q nFormula -> q nFormula option val rTautoChecker : rcst formula bFormula -> rWitness list -> bool coq-8.4pl2/plugins/micromega/sos.mli0000640000175000001440000000221112010532755016556 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* bool val poly_neg : poly -> poly val poly_mul : poly -> poly -> poly val poly_pow : poly -> int -> poly val poly_const : Num.num -> poly val poly_of_term : term -> poly val term_of_poly : poly -> term val term_of_sos : positivstellensatz * (Num.num * poly) list -> positivstellensatz val string_of_poly : poly -> string val real_positivnullstellensatz_general : bool -> int -> poly list -> (poly * positivstellensatz) list -> poly -> poly list * (positivstellensatz * (Num.num * poly) list) list val sumofsquares : poly -> Num.num * ( Num.num * poly) list coq-8.4pl2/plugins/micromega/mutils.ml0000640000175000001440000002534612121620060017122 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* () | e::l -> f o e ; output_string o ";" ; pp_list f o l let finally f rst = try let res = f () in rst () ; res with reraise -> (try rst () with any -> raise reraise ); raise reraise let map_option f x = match x with | None -> None | Some v -> Some (f v) let from_option = function | None -> failwith "from_option" | Some v -> v let rec try_any l x = match l with | [] -> None | (f,s)::l -> match f x with | None -> try_any l x | x -> x let iteri f l = let rec xiter i l = match l with | [] -> () | e::l -> f i e ; xiter (i+1) l in xiter 0 l let all_sym_pairs f l = let pair_with acc e l = List.fold_left (fun acc x -> (f e x) ::acc) acc l in let rec xpairs acc l = match l with | [] -> acc | e::l -> xpairs (pair_with acc e l) l in xpairs [] l let rec map3 f l1 l2 l3 = match l1 , l2 ,l3 with | [] , [] , [] -> [] | e1::l1 , e2::l2 , e3::l3 -> (f e1 e2 e3)::(map3 f l1 l2 l3) | _ -> raise (Invalid_argument "map3") let rec is_sublist l1 l2 = match l1 ,l2 with | [] ,_ -> true | e::l1', [] -> false | e::l1' , e'::l2' -> if e = e' then is_sublist l1' l2' else is_sublist l1 l2' let list_try_find f = let rec try_find_f = function | [] -> failwith "try_find" | h::t -> try f h with Failure _ -> try_find_f t in try_find_f let rec list_fold_right_elements f l = let rec aux = function | [] -> invalid_arg "list_fold_right_elements" | [x] -> x | x::l -> f x (aux l) in aux l let interval n m = let rec interval_n (l,m) = if n > m then l else interval_n (m::l,pred m) in interval_n ([],m) let extract pred l = List.fold_left (fun (fd,sys) e -> match fd with | None -> begin match pred e with | None -> fd, e::sys | Some v -> Some(v,e) , sys end | _ -> (fd, e::sys) ) (None,[]) l open Num open Big_int let ppcm x y = let g = gcd_big_int x y in let x' = div_big_int x g in let y' = div_big_int y g in mult_big_int g (mult_big_int x' y') let denominator = function | Int _ | Big_int _ -> unit_big_int | Ratio r -> Ratio.denominator_ratio r let numerator = function | Ratio r -> Ratio.numerator_ratio r | Int i -> Big_int.big_int_of_int i | Big_int i -> i let rec ppcm_list c l = match l with | [] -> c | e::l -> ppcm_list (ppcm c (denominator e)) l let rec rec_gcd_list c l = match l with | [] -> c | e::l -> rec_gcd_list (gcd_big_int c (numerator e)) l let rec gcd_list l = let res = rec_gcd_list zero_big_int l in if compare_big_int res zero_big_int = 0 then unit_big_int else res let rats_to_ints l = let c = ppcm_list unit_big_int l in List.map (fun x -> (div_big_int (mult_big_int (numerator x) c) (denominator x))) l (* Nasty reordering of lists - useful to trim certificate down *) let mapi f l = let rec xmapi i l = match l with | [] -> [] | e::l -> (f e i)::(xmapi (i+1) l) in xmapi 0 l let concatMapi f l = List.rev (mapi (fun e i -> (i,f e)) l) (* assoc_pos j [a0...an] = [j,a0....an,j+n],j+n+1 *) let assoc_pos j l = (mapi (fun e i -> e,i+j) l, j + (List.length l)) let assoc_pos_assoc l = let rec xpos i l = match l with | [] -> [] | (x,l) ::rst -> let (l',j) = assoc_pos i l in (x,l')::(xpos j rst) in xpos 0 l let filter_pos f l = (* Could sort ... take care of duplicates... *) let rec xfilter l = match l with | [] -> [] | (x,e)::l -> if List.exists (fun ee -> List.mem ee f) (List.map snd e) then (x,e)::(xfilter l) else xfilter l in xfilter l let select_pos lpos l = let rec xselect i lpos l = match lpos with | [] -> [] | j::rpos -> match l with | [] -> failwith "select_pos" | e::l -> if i = j then e:: (xselect (i+1) rpos l) else xselect (i+1) lpos l in xselect 0 lpos l (** * MODULE: Coq to Caml data-structure mappings *) module CoqToCaml = struct open Micromega let rec nat = function | O -> 0 | S n -> (nat n) + 1 let rec positive p = match p with | XH -> 1 | XI p -> 1+ 2*(positive p) | XO p -> 2*(positive p) let n nt = match nt with | N0 -> 0 | Npos p -> positive p let rec index i = (* Swap left-right ? *) match i with | XH -> 1 | XI i -> 1+(2*(index i)) | XO i -> 2*(index i) let z x = match x with | Z0 -> 0 | Zpos p -> (positive p) | Zneg p -> - (positive p) open Big_int let rec positive_big_int p = match p with | XH -> unit_big_int | XI p -> add_int_big_int 1 (mult_int_big_int 2 (positive_big_int p)) | XO p -> (mult_int_big_int 2 (positive_big_int p)) let z_big_int x = match x with | Z0 -> zero_big_int | Zpos p -> (positive_big_int p) | Zneg p -> minus_big_int (positive_big_int p) let num x = Num.Big_int (z_big_int x) let q_to_num {qnum = x ; qden = y} = Big_int (z_big_int x) // (Big_int (z_big_int (Zpos y))) end (** * MODULE: Caml to Coq data-structure mappings *) module CamlToCoq = struct open Micromega let rec nat = function | 0 -> O | n -> S (nat (n-1)) let rec positive n = if n=1 then XH else if n land 1 = 1 then XI (positive (n lsr 1)) else XO (positive (n lsr 1)) let n nt = if nt < 0 then assert false else if nt = 0 then N0 else Npos (positive nt) let rec index n = if n=1 then XH else if n land 1 = 1 then XI (index (n lsr 1)) else XO (index (n lsr 1)) let idx n = (*a.k.a path_of_int *) (* returns the list of digits of n in reverse order with initial 1 removed *) let rec digits_of_int n = if n=1 then [] else (n mod 2 = 1)::(digits_of_int (n lsr 1)) in List.fold_right (fun b c -> (if b then XI c else XO c)) (List.rev (digits_of_int n)) (XH) let z x = match compare x 0 with | 0 -> Z0 | 1 -> Zpos (positive x) | _ -> (* this should be -1 *) Zneg (positive (-x)) open Big_int let positive_big_int n = let two = big_int_of_int 2 in let rec _pos n = if eq_big_int n unit_big_int then XH else let (q,m) = quomod_big_int n two in if eq_big_int unit_big_int m then XI (_pos q) else XO (_pos q) in _pos n let bigint x = match sign_big_int x with | 0 -> Z0 | 1 -> Zpos (positive_big_int x) | _ -> Zneg (positive_big_int (minus_big_int x)) let q n = {Micromega.qnum = bigint (numerator n) ; Micromega.qden = positive_big_int (denominator n)} end (** * MODULE: Comparisons on lists: by evaluating the elements in a single list, * between two lists given an ordering, and using a hash computation *) module Cmp = struct let rec compare_lexical l = match l with | [] -> 0 (* Equal *) | f::l -> let cmp = f () in if cmp = 0 then compare_lexical l else cmp let rec compare_list cmp l1 l2 = match l1 , l2 with | [] , [] -> 0 | [] , _ -> -1 | _ , [] -> 1 | e1::l1 , e2::l2 -> let c = cmp e1 e2 in if c = 0 then compare_list cmp l1 l2 else c (** * hash_list takes a hash function and a list, and computes an integer which * is the hash value of the list. *) let hash_list hash l = let rec _hash_list l h = match l with | [] -> h lxor (Hashtbl.hash []) | e::l -> _hash_list l ((hash e) lxor h) in _hash_list l 0 end (** * MODULE: Labels for atoms in propositional formulas. * Tags are used to identify unused atoms in CNFs, and propagate them back to * the original formula. The translation back to Coq then ignores these * superfluous items, which speeds the translation up a bit. *) module type Tag = sig type t val from : int -> t val next : t -> t val pp : out_channel -> t -> unit val compare : t -> t -> int end module Tag : Tag = struct type t = int let from i = i let next i = i + 1 let pp o i = output_string o (string_of_int i) let compare : int -> int -> int = Pervasives.compare end (** * MODULE: Ordered sets of tags. *) module TagSet = Set.Make(Tag) (** * Forking routine, plumbing the appropriate pipes where needed. *) let command exe_path args vl = (* creating pipes for stdin, stdout, stderr *) let (stdin_read,stdin_write) = Unix.pipe () and (stdout_read,stdout_write) = Unix.pipe () and (stderr_read,stderr_write) = Unix.pipe () in (* Create the process *) let pid = Unix.create_process exe_path args stdin_read stdout_write stderr_write in (* Write the data on the stdin of the created process *) let outch = Unix.out_channel_of_descr stdin_write in output_value outch vl ; flush outch ; (* Wait for its completion *) let _pid,status = Unix.waitpid [] pid in finally (* Recover the result *) (fun () -> match status with | Unix.WEXITED 0 -> let inch = Unix.in_channel_of_descr stdout_read in begin try Marshal.from_channel inch with x when x <> Sys.Break -> failwith (Printf.sprintf "command \"%s\" exited %s" exe_path (Printexc.to_string x)) end | Unix.WEXITED i -> failwith (Printf.sprintf "command \"%s\" exited %i" exe_path i) | Unix.WSIGNALED i -> failwith (Printf.sprintf "command \"%s\" killed %i" exe_path i) | Unix.WSTOPPED i -> failwith (Printf.sprintf "command \"%s\" stopped %i" exe_path i)) (* Cleanup *) (fun () -> List.iter (fun x -> try Unix.close x with e when e <> Sys.Break -> ()) [stdin_read; stdin_write; stdout_read; stdout_write; stderr_read; stderr_write]) (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/LICENSE.sos0000640000175000001440000000305711160567762017103 0ustar notinusers HOL Light copyright notice, licence and disclaimer (c) University of Cambridge 1998 (c) Copyright, John Harrison 1998-2006 HOL Light version 2.20, hereinafter referred to as "the software", is a computer theorem proving system written by John Harrison. Much of the software was developed at the University of Cambridge Computer Laboratory, New Museums Site, Pembroke Street, Cambridge, CB2 3QG, England. The software is copyright, University of Cambridge 1998 and John Harrison 1998-2006. Permission to use, copy, modify, and distribute the software and its documentation for any purpose and without fee is hereby granted. In the case of further distribution of the software the present text, including copyright notice, licence and disclaimer of warranty, must be included in full and unmodified form in any release. Distribution of derivative software obtained by modifying the software, or incorporating it into other software, is permitted, provided the inclusion of the software is acknowledged and that any changes made to the software are clearly documented. John Harrison and the University of Cambridge disclaim all warranties with regard to the software, including all implied warranties of merchantability and fitness. In no event shall John Harrison or the University of Cambridge be liable for any special, indirect, incidental or consequential damages or any damages whatsoever, including, but not limited to, those arising from computer failure or malfunction, work stoppage, loss of profit or loss of contracts. coq-8.4pl2/plugins/micromega/RingMicromega.v0000640000175000001440000007550612010532755020202 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R. Variable ropp : R -> R. Variables req rle rlt : R -> R -> Prop. Variable sor : SOR rO rI rplus rtimes rminus ropp req rle rlt. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (rplus x y). Notation "x * y " := (rtimes x y). Notation "x - y " := (rminus x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Notation "x ~= y" := (~ req x y). Notation "x <= y" := (rle x y). Notation "x < y" := (rlt x y). (* Assume we have a type of coefficients C and a morphism from C to R *) Variable C : Type. Variables cO cI : C. Variables cplus ctimes cminus: C -> C -> C. Variable copp : C -> C. Variables ceqb cleb : C -> C -> bool. Variable phi : C -> R. (* Power coefficients *) Variable E : Set. (* the type of exponents *) Variable pow_phi : N -> E. Variable rpow : R -> E -> R. Notation "[ x ]" := (phi x). Notation "x [=] y" := (ceqb x y). Notation "x [<=] y" := (cleb x y). (* Let's collect all hypotheses in addition to the ordered ring axioms into one structure *) Record SORaddon := mk_SOR_addon { SORrm : ring_morph 0 1 rplus rtimes rminus ropp req cO cI cplus ctimes cminus copp ceqb phi; SORpower : power_theory rI rtimes req pow_phi rpow; SORcneqb_morph : forall x y : C, x [=] y = false -> [x] ~= [y]; SORcleb_morph : forall x y : C, x [<=] y = true -> [x] <= [y] }. Variable addon : SORaddon. Add Relation R req reflexivity proved by sor.(SORsetoid).(@Equivalence_Reflexive _ _ ) symmetry proved by sor.(SORsetoid).(@Equivalence_Symmetric _ _ ) transitivity proved by sor.(SORsetoid).(@Equivalence_Transitive _ _ ) as micomega_sor_setoid. Add Morphism rplus with signature req ==> req ==> req as rplus_morph. Proof. exact sor.(SORplus_wd). Qed. Add Morphism rtimes with signature req ==> req ==> req as rtimes_morph. Proof. exact sor.(SORtimes_wd). Qed. Add Morphism ropp with signature req ==> req as ropp_morph. Proof. exact sor.(SORopp_wd). Qed. Add Morphism rle with signature req ==> req ==> iff as rle_morph. Proof. exact sor.(SORle_wd). Qed. Add Morphism rlt with signature req ==> req ==> iff as rlt_morph. Proof. exact sor.(SORlt_wd). Qed. Add Morphism rminus with signature req ==> req ==> req as rminus_morph. Proof. exact (rminus_morph sor). (* We already proved that minus is a morphism in OrderedRing.v *) Qed. Definition cneqb (x y : C) := negb (ceqb x y). Definition cltb (x y : C) := (cleb x y) && (cneqb x y). Notation "x [~=] y" := (cneqb x y). Notation "x [<] y" := (cltb x y). Ltac le_less := rewrite (Rle_lt_eq sor); left; try assumption. Ltac le_equal := rewrite (Rle_lt_eq sor); right; try reflexivity; try assumption. Ltac le_elim H := rewrite (Rle_lt_eq sor) in H; destruct H as [H | H]. Lemma cleb_sound : forall x y : C, x [<=] y = true -> [x] <= [y]. Proof. exact addon.(SORcleb_morph). Qed. Lemma cneqb_sound : forall x y : C, x [~=] y = true -> [x] ~= [y]. Proof. intros x y H1. apply addon.(SORcneqb_morph). unfold cneqb, negb in H1. destruct (ceqb x y); now try discriminate. Qed. Lemma cltb_sound : forall x y : C, x [<] y = true -> [x] < [y]. Proof. intros x y H. unfold cltb in H. apply andb_prop in H. destruct H as [H1 H2]. apply cleb_sound in H1. apply cneqb_sound in H2. apply <- (Rlt_le_neq sor). now split. Qed. (* Begin Micromega *) Definition PolC := Pol C. (* polynomials in generalized Horner form, defined in Ring_polynom or EnvRing *) Definition PolEnv := Env R. (* For interpreting PolC *) Definition eval_pol (env : PolEnv) (p:PolC) : R := Pphi rplus rtimes phi env p. Inductive Op1 : Set := (* relations with 0 *) | Equal (* == 0 *) | NonEqual (* ~= 0 *) | Strict (* > 0 *) | NonStrict (* >= 0 *). Definition NFormula := (PolC * Op1)%type. (* normalized formula *) Definition eval_op1 (o : Op1) : R -> Prop := match o with | Equal => fun x => x == 0 | NonEqual => fun x : R => x ~= 0 | Strict => fun x : R => 0 < x | NonStrict => fun x : R => 0 <= x end. Definition eval_nformula (env : PolEnv) (f : NFormula) : Prop := let (p, op) := f in eval_op1 op (eval_pol env p). (** Rule of "signs" for addition and multiplication. An arbitrary result is coded buy None. *) Definition OpMult (o o' : Op1) : option Op1 := match o with | Equal => Some Equal | NonStrict => match o' with | Equal => Some Equal | NonEqual => None | Strict => Some NonStrict | NonStrict => Some NonStrict end | Strict => match o' with | NonEqual => None | _ => Some o' end | NonEqual => match o' with | Equal => Some Equal | NonEqual => Some NonEqual | _ => None end end. Definition OpAdd (o o': Op1) : option Op1 := match o with | Equal => Some o' | NonStrict => match o' with | Strict => Some Strict | NonEqual => None | _ => Some NonStrict end | Strict => match o' with | NonEqual => None | _ => Some Strict end | NonEqual => match o' with | Equal => Some NonEqual | _ => None end end. Lemma OpMult_sound : forall (o o' om: Op1) (x y : R), eval_op1 o x -> eval_op1 o' y -> OpMult o o' = Some om -> eval_op1 om (x * y). Proof. unfold eval_op1; destruct o; simpl; intros o' om x y H1 H2 H3. (* x == 0 *) inversion H3. rewrite H1. now rewrite (Rtimes_0_l sor). (* x ~= 0 *) destruct o' ; inversion H3. (* y == 0 *) rewrite H2. now rewrite (Rtimes_0_r sor). (* y ~= 0 *) apply (Rtimes_neq_0 sor) ; auto. (* 0 < x *) destruct o' ; inversion H3. (* y == 0 *) rewrite H2; now rewrite (Rtimes_0_r sor). (* 0 < y *) now apply (Rtimes_pos_pos sor). (* 0 <= y *) apply (Rtimes_nonneg_nonneg sor); [le_less | assumption]. (* 0 <= x *) destruct o' ; inversion H3. (* y == 0 *) rewrite H2; now rewrite (Rtimes_0_r sor). (* 0 < y *) apply (Rtimes_nonneg_nonneg sor); [assumption | le_less ]. (* 0 <= y *) now apply (Rtimes_nonneg_nonneg sor). Qed. Lemma OpAdd_sound : forall (o o' oa : Op1) (e e' : R), eval_op1 o e -> eval_op1 o' e' -> OpAdd o o' = Some oa -> eval_op1 oa (e + e'). Proof. unfold eval_op1; destruct o; simpl; intros o' oa e e' H1 H2 Hoa. (* e == 0 *) inversion Hoa. rewrite <- H0. destruct o' ; rewrite H1 ; now rewrite (Rplus_0_l sor). (* e ~= 0 *) destruct o'. (* e' == 0 *) inversion Hoa. rewrite H2. now rewrite (Rplus_0_r sor). (* e' ~= 0 *) discriminate. (* 0 < e' *) discriminate. (* 0 <= e' *) discriminate. (* 0 < e *) destruct o'. (* e' == 0 *) inversion Hoa. rewrite H2. now rewrite (Rplus_0_r sor). (* e' ~= 0 *) discriminate. (* 0 < e' *) inversion Hoa. now apply (Rplus_pos_pos sor). (* 0 <= e' *) inversion Hoa. now apply (Rplus_pos_nonneg sor). (* 0 <= e *) destruct o'. (* e' == 0 *) inversion Hoa. now rewrite H2, (Rplus_0_r sor). (* e' ~= 0 *) discriminate. (* 0 < e' *) inversion Hoa. now apply (Rplus_nonneg_pos sor). (* 0 <= e' *) inversion Hoa. now apply (Rplus_nonneg_nonneg sor). Qed. Inductive Psatz : Type := | PsatzIn : nat -> Psatz | PsatzSquare : PolC -> Psatz | PsatzMulC : PolC -> Psatz -> Psatz | PsatzMulE : Psatz -> Psatz -> Psatz | PsatzAdd : Psatz -> Psatz -> Psatz | PsatzC : C -> Psatz | PsatzZ : Psatz. (** Given a list [l] of NFormula and an extended polynomial expression [e], if [eval_Psatz l e] succeeds (= Some f) then [f] is a logic consequence of the conjunction of the formulae in l. Moreover, the polynomial expression is obtained by replacing the (PsatzIn n) by the nth polynomial expression in [l] and the sign is computed by the "rule of sign" *) (* Might be defined elsewhere *) Definition map_option (A B:Type) (f : A -> option B) (o : option A) : option B := match o with | None => None | Some x => f x end. Arguments map_option [A B] f o. Definition map_option2 (A B C : Type) (f : A -> B -> option C) (o: option A) (o': option B) : option C := match o , o' with | None , _ => None | _ , None => None | Some x , Some x' => f x x' end. Arguments map_option2 [A B C] f o o'. Definition Rops_wd := mk_reqe (*rplus rtimes ropp req*) sor.(SORplus_wd) sor.(SORtimes_wd) sor.(SORopp_wd). Definition pexpr_times_nformula (e: PolC) (f : NFormula) : option NFormula := let (ef,o) := f in match o with | Equal => Some (Pmul cO cI cplus ctimes ceqb e ef , Equal) | _ => None end. Definition nformula_times_nformula (f1 f2 : NFormula) : option NFormula := let (e1,o1) := f1 in let (e2,o2) := f2 in map_option (fun x => (Some (Pmul cO cI cplus ctimes ceqb e1 e2,x))) (OpMult o1 o2). Definition nformula_plus_nformula (f1 f2 : NFormula) : option NFormula := let (e1,o1) := f1 in let (e2,o2) := f2 in map_option (fun x => (Some (Padd cO cplus ceqb e1 e2,x))) (OpAdd o1 o2). Fixpoint eval_Psatz (l : list NFormula) (e : Psatz) {struct e} : option NFormula := match e with | PsatzIn n => Some (nth n l (Pc cO, Equal)) | PsatzSquare e => Some (Psquare cO cI cplus ctimes ceqb e , NonStrict) | PsatzMulC re e => map_option (pexpr_times_nformula re) (eval_Psatz l e) | PsatzMulE f1 f2 => map_option2 nformula_times_nformula (eval_Psatz l f1) (eval_Psatz l f2) | PsatzAdd f1 f2 => map_option2 nformula_plus_nformula (eval_Psatz l f1) (eval_Psatz l f2) | PsatzC c => if cltb cO c then Some (Pc c, Strict) else None (* This could be 0, or <> 0 -- but these cases are useless *) | PsatzZ => Some (Pc cO, Equal) (* Just to make life easier *) end. Lemma pexpr_times_nformula_correct : forall (env: PolEnv) (e: PolC) (f f' : NFormula), eval_nformula env f -> pexpr_times_nformula e f = Some f' -> eval_nformula env f'. Proof. unfold pexpr_times_nformula. destruct f. intros. destruct o ; inversion H0 ; try discriminate. simpl in *. unfold eval_pol in *. rewrite (Pmul_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)). rewrite H. apply (Rtimes_0_r sor). Qed. Lemma nformula_times_nformula_correct : forall (env:PolEnv) (f1 f2 f : NFormula), eval_nformula env f1 -> eval_nformula env f2 -> nformula_times_nformula f1 f2 = Some f -> eval_nformula env f. Proof. unfold nformula_times_nformula. destruct f1 ; destruct f2. case_eq (OpMult o o0) ; simpl ; try discriminate. intros. inversion H2 ; simpl. unfold eval_pol. destruct o1; simpl; rewrite (Pmul_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)); apply OpMult_sound with (3:= H);assumption. Qed. Lemma nformula_plus_nformula_correct : forall (env:PolEnv) (f1 f2 f : NFormula), eval_nformula env f1 -> eval_nformula env f2 -> nformula_plus_nformula f1 f2 = Some f -> eval_nformula env f. Proof. unfold nformula_plus_nformula. destruct f1 ; destruct f2. case_eq (OpAdd o o0) ; simpl ; try discriminate. intros. inversion H2 ; simpl. unfold eval_pol. destruct o1; simpl; rewrite (Padd_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)); apply OpAdd_sound with (3:= H);assumption. Qed. Lemma eval_Psatz_Sound : forall (l : list NFormula) (env : PolEnv), (forall (f : NFormula), In f l -> eval_nformula env f) -> forall (e : Psatz) (f : NFormula), eval_Psatz l e = Some f -> eval_nformula env f. Proof. induction e. (* PsatzIn *) simpl ; intros. destruct (nth_in_or_default n l (Pc cO, Equal)). (* index is in bounds *) apply H ; congruence. (* index is out-of-bounds *) inversion H0. rewrite e. simpl. now apply addon.(SORrm).(morph0). (* PsatzSquare *) simpl. intros. inversion H0. simpl. unfold eval_pol. rewrite (Psquare_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)); now apply (Rtimes_square_nonneg sor). (* PsatzMulC *) simpl. intro. case_eq (eval_Psatz l e) ; simpl ; intros. apply IHe in H0. apply pexpr_times_nformula_correct with (1:=H0) (2:= H1). discriminate. (* PsatzMulC *) simpl ; intro. case_eq (eval_Psatz l e1) ; simpl ; try discriminate. case_eq (eval_Psatz l e2) ; simpl ; try discriminate. intros. apply IHe1 in H1. apply IHe2 in H0. apply (nformula_times_nformula_correct env n0 n) ; assumption. (* PsatzAdd *) simpl ; intro. case_eq (eval_Psatz l e1) ; simpl ; try discriminate. case_eq (eval_Psatz l e2) ; simpl ; try discriminate. intros. apply IHe1 in H1. apply IHe2 in H0. apply (nformula_plus_nformula_correct env n0 n) ; assumption. (* PsatzC *) simpl. intro. case_eq (cO [<] c). intros. inversion H1. simpl. rewrite <- addon.(SORrm).(morph0). now apply cltb_sound. discriminate. (* PsatzZ *) simpl. intros. inversion H0. simpl. apply addon.(SORrm).(morph0). Qed. Fixpoint ge_bool (n m : nat) : bool := match n with | O => match m with | O => true | S _ => false end | S n => match m with | O => true | S m => ge_bool n m end end. Lemma ge_bool_cases : forall n m, (if ge_bool n m then n >= m else n < m)%nat. Proof. induction n; destruct m ; simpl; auto with arith. specialize (IHn m). destruct (ge_bool); auto with arith. Qed. Fixpoint xhyps_of_psatz (base:nat) (acc : list nat) (prf : Psatz) : list nat := match prf with | PsatzC _ | PsatzZ | PsatzSquare _ => acc | PsatzMulC _ prf => xhyps_of_psatz base acc prf | PsatzAdd e1 e2 | PsatzMulE e1 e2 => xhyps_of_psatz base (xhyps_of_psatz base acc e2) e1 | PsatzIn n => if ge_bool n base then (n::acc) else acc end. Fixpoint nhyps_of_psatz (prf : Psatz) : list nat := match prf with | PsatzC _ | PsatzZ | PsatzSquare _ => nil | PsatzMulC _ prf => nhyps_of_psatz prf | PsatzAdd e1 e2 | PsatzMulE e1 e2 => nhyps_of_psatz e1 ++ nhyps_of_psatz e2 | PsatzIn n => n :: nil end. Fixpoint extract_hyps (l: list NFormula) (ln : list nat) : list NFormula := match ln with | nil => nil | n::ln => nth n l (Pc cO, Equal) :: extract_hyps l ln end. Lemma extract_hyps_app : forall l ln1 ln2, extract_hyps l (ln1 ++ ln2) = (extract_hyps l ln1) ++ (extract_hyps l ln2). Proof. induction ln1. reflexivity. simpl. intros. rewrite IHln1. reflexivity. Qed. Ltac inv H := inversion H ; try subst ; clear H. Lemma nhyps_of_psatz_correct : forall (env : PolEnv) (e:Psatz) (l : list NFormula) (f: NFormula), eval_Psatz l e = Some f -> ((forall f', In f' (extract_hyps l (nhyps_of_psatz e)) -> eval_nformula env f') -> eval_nformula env f). Proof. induction e ; intros. (*PsatzIn*) simpl in *. apply H0. intuition congruence. (* PsatzSquare *) simpl in *. inv H. simpl. unfold eval_pol. rewrite (Psquare_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)); now apply (Rtimes_square_nonneg sor). (* PsatzMulC *) simpl in *. case_eq (eval_Psatz l e). intros. rewrite H1 in H. simpl in H. apply pexpr_times_nformula_correct with (2:= H). apply IHe with (1:= H1); auto. intros. rewrite H1 in H. simpl in H ; discriminate. (* PsatzMulE *) simpl in *. revert H. case_eq (eval_Psatz l e1). case_eq (eval_Psatz l e2) ; simpl ; intros. apply nformula_times_nformula_correct with (3:= H2). apply IHe1 with (1:= H1) ; auto. intros. apply H0. rewrite extract_hyps_app. apply in_or_app. tauto. apply IHe2 with (1:= H) ; auto. intros. apply H0. rewrite extract_hyps_app. apply in_or_app. tauto. discriminate. simpl. discriminate. (* PsatzAdd *) simpl in *. revert H. case_eq (eval_Psatz l e1). case_eq (eval_Psatz l e2) ; simpl ; intros. apply nformula_plus_nformula_correct with (3:= H2). apply IHe1 with (1:= H1) ; auto. intros. apply H0. rewrite extract_hyps_app. apply in_or_app. tauto. apply IHe2 with (1:= H) ; auto. intros. apply H0. rewrite extract_hyps_app. apply in_or_app. tauto. discriminate. simpl. discriminate. (* PsatzC *) simpl in H. case_eq (cO [<] c). intros. rewrite H1 in H. inv H. unfold eval_nformula. simpl. rewrite <- addon.(SORrm).(morph0). now apply cltb_sound. intros. rewrite H1 in H. discriminate. (* PsatzZ *) simpl in *. inv H. unfold eval_nformula. simpl. apply addon.(SORrm).(morph0). Qed. (* roughly speaking, normalise_pexpr_correct is a proof of forall env p, eval_pexpr env p == eval_pol env (normalise_pexpr p) *) (*****) Definition paddC := PaddC cplus. Definition psubC := PsubC cminus. Definition PsubC_ok : forall c P env, eval_pol env (psubC P c) == eval_pol env P - [c] := let Rops_wd := mk_reqe (*rplus rtimes ropp req*) sor.(SORplus_wd) sor.(SORtimes_wd) sor.(SORopp_wd) in PsubC_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm). Definition PaddC_ok : forall c P env, eval_pol env (paddC P c) == eval_pol env P + [c] := let Rops_wd := mk_reqe (*rplus rtimes ropp req*) sor.(SORplus_wd) sor.(SORtimes_wd) sor.(SORopp_wd) in PaddC_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm). (* Check that a formula f is inconsistent by normalizing and comparing the resulting constant with 0 *) Definition check_inconsistent (f : NFormula) : bool := let (e, op) := f in match e with | Pc c => match op with | Equal => cneqb c cO | NonStrict => c [<] cO | Strict => c [<=] cO | NonEqual => c [=] cO end | _ => false (* not a constant *) end. Lemma check_inconsistent_sound : forall (p : PolC) (op : Op1), check_inconsistent (p, op) = true -> forall env, ~ eval_op1 op (eval_pol env p). Proof. intros p op H1 env. unfold check_inconsistent in H1. destruct op; simpl ; (*****) destruct p ; simpl; try discriminate H1; try rewrite <- addon.(SORrm).(morph0); trivial. now apply cneqb_sound. apply addon.(SORrm).(morph_eq) in H1. congruence. apply cleb_sound in H1. now apply -> (Rle_ngt sor). apply cltb_sound in H1. now apply -> (Rlt_nge sor). Qed. Definition check_normalised_formulas : list NFormula -> Psatz -> bool := fun l cm => match eval_Psatz l cm with | None => false | Some f => check_inconsistent f end. Lemma checker_nf_sound : forall (l : list NFormula) (cm : Psatz), check_normalised_formulas l cm = true -> forall env : PolEnv, make_impl (eval_nformula env) l False. Proof. intros l cm H env. unfold check_normalised_formulas in H. revert H. case_eq (eval_Psatz l cm) ; [|discriminate]. intros nf. intros. rewrite <- make_conj_impl. intro. assert (H1' := make_conj_in _ _ H1). assert (Hnf := @eval_Psatz_Sound _ _ H1' _ _ H). destruct nf. apply (@check_inconsistent_sound _ _ H0 env Hnf). Qed. (** Normalisation of formulae **) Inductive Op2 : Set := (* binary relations *) | OpEq | OpNEq | OpLe | OpGe | OpLt | OpGt. Definition eval_op2 (o : Op2) : R -> R -> Prop := match o with | OpEq => req | OpNEq => fun x y : R => x ~= y | OpLe => rle | OpGe => fun x y : R => y <= x | OpLt => fun x y : R => x < y | OpGt => fun x y : R => y < x end. Definition eval_pexpr (l : PolEnv) (pe : PExpr C) : R := PEeval rplus rtimes rminus ropp phi pow_phi rpow l pe. Record Formula (T:Type) : Type := { Flhs : PExpr T; Fop : Op2; Frhs : PExpr T }. Definition eval_formula (env : PolEnv) (f : Formula C) : Prop := let (lhs, op, rhs) := f in (eval_op2 op) (eval_pexpr env lhs) (eval_pexpr env rhs). (* We normalize Formulas by moving terms to one side *) Definition norm := norm_aux cO cI cplus ctimes cminus copp ceqb. Definition psub := Psub cO cplus cminus copp ceqb. Definition padd := Padd cO cplus ceqb. Definition normalise (f : Formula C) : NFormula := let (lhs, op, rhs) := f in let lhs := norm lhs in let rhs := norm rhs in match op with | OpEq => (psub lhs rhs, Equal) | OpNEq => (psub lhs rhs, NonEqual) | OpLe => (psub rhs lhs, NonStrict) | OpGe => (psub lhs rhs, NonStrict) | OpGt => (psub lhs rhs, Strict) | OpLt => (psub rhs lhs, Strict) end. Definition negate (f : Formula C) : NFormula := let (lhs, op, rhs) := f in let lhs := norm lhs in let rhs := norm rhs in match op with | OpEq => (psub rhs lhs, NonEqual) | OpNEq => (psub rhs lhs, Equal) | OpLe => (psub lhs rhs, Strict) (* e <= e' == ~ e > e' *) | OpGe => (psub rhs lhs, Strict) | OpGt => (psub rhs lhs, NonStrict) | OpLt => (psub lhs rhs, NonStrict) end. Lemma eval_pol_sub : forall env lhs rhs, eval_pol env (psub lhs rhs) == eval_pol env lhs - eval_pol env rhs. Proof. intros. apply (Psub_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)). Qed. Lemma eval_pol_add : forall env lhs rhs, eval_pol env (padd lhs rhs) == eval_pol env lhs + eval_pol env rhs. Proof. intros. apply (Padd_ok sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm)). Qed. Lemma eval_pol_norm : forall env lhs, eval_pexpr env lhs == eval_pol env (norm lhs). Proof. intros. apply (norm_aux_spec sor.(SORsetoid) Rops_wd (Rth_ARth (SORsetoid sor) Rops_wd sor.(SORrt)) addon.(SORrm) addon.(SORpower) ). Qed. Theorem normalise_sound : forall (env : PolEnv) (f : Formula C), eval_formula env f -> eval_nformula env (normalise f). Proof. intros env f H; destruct f as [lhs op rhs]; simpl in *. destruct op; simpl in *; rewrite eval_pol_sub ; rewrite <- eval_pol_norm ; rewrite <- eval_pol_norm. now apply <- (Rminus_eq_0 sor). intros H1. apply -> (Rminus_eq_0 sor) in H1. now apply H. now apply -> (Rle_le_minus sor). now apply -> (Rle_le_minus sor). now apply -> (Rlt_lt_minus sor). now apply -> (Rlt_lt_minus sor). Qed. Theorem negate_correct : forall (env : PolEnv) (f : Formula C), eval_formula env f <-> ~ (eval_nformula env (negate f)). Proof. intros env f; destruct f as [lhs op rhs]; simpl. destruct op; simpl in *; rewrite eval_pol_sub ; rewrite <- eval_pol_norm ; rewrite <- eval_pol_norm. symmetry. rewrite (Rminus_eq_0 sor). split; intro H; [symmetry; now apply -> (Req_dne sor) | symmetry in H; now apply <- (Req_dne sor)]. rewrite (Rminus_eq_0 sor). split; intro; now apply (Rneq_symm sor). rewrite <- (Rlt_lt_minus sor). now rewrite <- (Rle_ngt sor). rewrite <- (Rlt_lt_minus sor). now rewrite <- (Rle_ngt sor). rewrite <- (Rle_le_minus sor). now rewrite <- (Rlt_nge sor). rewrite <- (Rle_le_minus sor). now rewrite <- (Rlt_nge sor). Qed. (** Another normalisation - this is used for cnf conversion **) Definition xnormalise (t:Formula C) : list (NFormula) := let (lhs,o,rhs) := t in let lhs := norm lhs in let rhs := norm rhs in match o with | OpEq => (psub lhs rhs, Strict)::(psub rhs lhs , Strict)::nil | OpNEq => (psub lhs rhs,Equal) :: nil | OpGt => (psub rhs lhs,NonStrict) :: nil | OpLt => (psub lhs rhs,NonStrict) :: nil | OpGe => (psub rhs lhs , Strict) :: nil | OpLe => (psub lhs rhs ,Strict) :: nil end. Require Import Tauto. Definition cnf_normalise (t:Formula C) : cnf (NFormula) := List.map (fun x => x::nil) (xnormalise t). Add Ring SORRing : sor.(SORrt). Lemma cnf_normalise_correct : forall env t, eval_cnf eval_nformula env (cnf_normalise t) -> eval_formula env t. Proof. unfold cnf_normalise, xnormalise ; simpl ; intros env t. unfold eval_cnf, eval_clause. destruct t as [lhs o rhs]; case_eq o ; simpl; repeat rewrite eval_pol_sub ; repeat rewrite <- eval_pol_norm in * ; generalize (eval_pexpr env lhs); generalize (eval_pexpr env rhs) ; intros z1 z2 ; intros. (**) apply sor.(SORle_antisymm). rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). tauto. rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). tauto. now rewrite <- (Rminus_eq_0 sor). rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). auto. rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). auto. rewrite (Rlt_nge sor). rewrite (Rle_le_minus sor). auto. rewrite (Rlt_nge sor). rewrite (Rle_le_minus sor). auto. Qed. Definition xnegate (t:Formula C) : list (NFormula) := let (lhs,o,rhs) := t in let lhs := norm lhs in let rhs := norm rhs in match o with | OpEq => (psub lhs rhs,Equal) :: nil | OpNEq => (psub lhs rhs ,Strict)::(psub rhs lhs,Strict)::nil | OpGt => (psub lhs rhs,Strict) :: nil | OpLt => (psub rhs lhs,Strict) :: nil | OpGe => (psub lhs rhs,NonStrict) :: nil | OpLe => (psub rhs lhs,NonStrict) :: nil end. Definition cnf_negate (t:Formula C) : cnf (NFormula) := List.map (fun x => x::nil) (xnegate t). Lemma cnf_negate_correct : forall env t, eval_cnf eval_nformula env (cnf_negate t) -> ~ eval_formula env t. Proof. unfold cnf_negate, xnegate ; simpl ; intros env t. unfold eval_cnf, eval_clause. destruct t as [lhs o rhs]; case_eq o ; simpl; repeat rewrite eval_pol_sub ; repeat rewrite <- eval_pol_norm in * ; generalize (eval_pexpr env lhs); generalize (eval_pexpr env rhs) ; intros z1 z2 ; intros ; intuition. (**) apply H0. rewrite H1 ; ring. (**) apply H1. apply sor.(SORle_antisymm). rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). tauto. rewrite (Rle_ngt sor). rewrite (Rlt_lt_minus sor). tauto. (**) apply H0. now rewrite (Rle_le_minus sor) in H1. apply H0. now rewrite (Rle_le_minus sor) in H1. apply H0. now rewrite (Rlt_lt_minus sor) in H1. apply H0. now rewrite (Rlt_lt_minus sor) in H1. Qed. Lemma eval_nformula_dec : forall env d, (eval_nformula env d) \/ ~ (eval_nformula env d). Proof. intros. destruct d ; simpl. generalize (eval_pol env p); intros. destruct o ; simpl. apply (Req_em sor r 0). destruct (Req_em sor r 0) ; tauto. rewrite <- (Rle_ngt sor r 0). generalize (Rle_gt_cases sor r 0). tauto. rewrite <- (Rlt_nge sor r 0). generalize (Rle_gt_cases sor 0 r). tauto. Qed. (** Reverse transformation *) Fixpoint xdenorm (jmp : positive) (p: Pol C) : PExpr C := match p with | Pc c => PEc c | Pinj j p => xdenorm (Pos.add j jmp ) p | PX p j q => PEadd (PEmul (xdenorm jmp p) (PEpow (PEX _ jmp) (Npos j))) (xdenorm (Pos.succ jmp) q) end. Lemma xdenorm_correct : forall p i env, eval_pol (jump i env) p == eval_pexpr env (xdenorm (Pos.succ i) p). Proof. unfold eval_pol. induction p. simpl. reflexivity. (* Pinj *) simpl. intros. rewrite Pos.add_succ_r. rewrite <- IHp. symmetry. rewrite Pos.add_comm. rewrite Pjump_add. reflexivity. (* PX *) simpl. intros. rewrite <- IHp1, <- IHp2. unfold Env.tail , Env.hd. rewrite <- Pjump_add. rewrite Pos.add_1_r. unfold Env.nth. unfold jump at 2. rewrite <- Pos.add_1_l. rewrite addon.(SORpower).(rpow_pow_N). unfold pow_N. ring. Qed. Definition denorm (p : Pol C) := xdenorm xH p. Lemma denorm_correct : forall p env, eval_pol env p == eval_pexpr env (denorm p). Proof. unfold denorm. induction p. reflexivity. simpl. rewrite Pos.add_1_r. apply xdenorm_correct. simpl. intros. rewrite IHp1. unfold Env.tail. rewrite xdenorm_correct. change (Pos.succ xH) with 2%positive. rewrite addon.(SORpower).(rpow_pow_N). simpl. reflexivity. Qed. (** Sometimes it is convenient to make a distinction between "syntactic" coefficients and "real" coefficients that are used to actually compute *) Variable S : Type. Variable C_of_S : S -> C. Variable phiS : S -> R. Variable phi_C_of_S : forall c, phiS c = phi (C_of_S c). Fixpoint map_PExpr (e : PExpr S) : PExpr C := match e with | PEc c => PEc (C_of_S c) | PEX p => PEX _ p | PEadd e1 e2 => PEadd (map_PExpr e1) (map_PExpr e2) | PEsub e1 e2 => PEsub (map_PExpr e1) (map_PExpr e2) | PEmul e1 e2 => PEmul (map_PExpr e1) (map_PExpr e2) | PEopp e => PEopp (map_PExpr e) | PEpow e n => PEpow (map_PExpr e) n end. Definition map_Formula (f : Formula S) : Formula C := let (l,o,r) := f in Build_Formula (map_PExpr l) o (map_PExpr r). Definition eval_sexpr (env : PolEnv) (e : PExpr S) : R := PEeval rplus rtimes rminus ropp phiS pow_phi rpow env e. Definition eval_sformula (env : PolEnv) (f : Formula S) : Prop := let (lhs, op, rhs) := f in (eval_op2 op) (eval_sexpr env lhs) (eval_sexpr env rhs). Lemma eval_pexprSC : forall env s, eval_sexpr env s = eval_pexpr env (map_PExpr s). Proof. unfold eval_pexpr, eval_sexpr. induction s ; simpl ; try (rewrite IHs1 ; rewrite IHs2) ; try reflexivity. apply phi_C_of_S. rewrite IHs. reflexivity. rewrite IHs. reflexivity. Qed. (** equality migth be (too) strong *) Lemma eval_formulaSC : forall env f, eval_sformula env f = eval_formula env (map_Formula f). Proof. destruct f. simpl. repeat rewrite eval_pexprSC. reflexivity. Qed. (** Some syntactic simplifications of expressions *) Definition simpl_cone (e:Psatz) : Psatz := match e with | PsatzSquare t => match t with | Pc c => if ceqb cO c then PsatzZ else PsatzC (ctimes c c) | _ => PsatzSquare t end | PsatzMulE t1 t2 => match t1 , t2 with | PsatzZ , x => PsatzZ | x , PsatzZ => PsatzZ | PsatzC c , PsatzC c' => PsatzC (ctimes c c') | PsatzC p1 , PsatzMulE (PsatzC p2) x => PsatzMulE (PsatzC (ctimes p1 p2)) x | PsatzC p1 , PsatzMulE x (PsatzC p2) => PsatzMulE (PsatzC (ctimes p1 p2)) x | PsatzMulE (PsatzC p2) x , PsatzC p1 => PsatzMulE (PsatzC (ctimes p1 p2)) x | PsatzMulE x (PsatzC p2) , PsatzC p1 => PsatzMulE (PsatzC (ctimes p1 p2)) x | PsatzC x , PsatzAdd y z => PsatzAdd (PsatzMulE (PsatzC x) y) (PsatzMulE (PsatzC x) z) | PsatzC c , _ => if ceqb cI c then t2 else PsatzMulE t1 t2 | _ , PsatzC c => if ceqb cI c then t1 else PsatzMulE t1 t2 | _ , _ => e end | PsatzAdd t1 t2 => match t1 , t2 with | PsatzZ , x => x | x , PsatzZ => x | x , y => PsatzAdd x y end | _ => e end. End Micromega. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/CheckerMaker.v0000640000175000001440000001144012010532755017766 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Formula -> Prop. Variable Formula' : Type. Variable eval' : Env -> Formula' -> Prop. Variable normalise : Formula -> Formula'. Variable negate : Formula -> Formula'. Hypothesis normalise_sound : forall (env : Env) (t : Formula), eval env t -> eval' env (normalise t). Hypothesis negate_correct : forall (env : Env) (t : Formula), eval env t <-> ~ (eval' env (negate t)). Variable Witness : Type. Variable check_formulas' : list Formula' -> Witness -> bool. Hypothesis check_formulas'_sound : forall (l : list Formula') (w : Witness), check_formulas' l w = true -> forall env : Env, make_impl (eval' env) l False. Definition normalise_list : list Formula -> list Formula' := map normalise. Definition negate_list : list Formula -> list Formula' := map negate. Definition check_formulas (l : list Formula) (w : Witness) : bool := check_formulas' (map normalise l) w. (* Contraposition of normalise_sound for lists *) Lemma normalise_sound_contr : forall (env : Env) (l : list Formula), make_impl (eval' env) (map normalise l) False -> make_impl (eval env) l False. Proof. intros env l; induction l as [| t l IH]; simpl in *. trivial. intros H1 H2. apply IH. apply H1. now apply normalise_sound. Qed. Theorem check_formulas_sound : forall (l : list Formula) (w : Witness), check_formulas l w = true -> forall env : Env, make_impl (eval env) l False. Proof. unfold check_formulas; intros l w H env. destruct l as [| t l]; simpl in *. pose proof (check_formulas'_sound H env) as H1; now simpl in H1. intro H1. apply normalise_sound in H1. pose proof (check_formulas'_sound H env) as H2; simpl in H2. apply H2 in H1. now apply normalise_sound_contr. Qed. (* In check_conj_formulas', t2 is supposed to be a list of negations of formulas. If, for example, t1 = [A1, A2] and t2 = [~ B1, ~ B2], then check_conj_formulas' checks that each of [~ B1, A1, A2] and [~ B2, A1, A2] is inconsistent. This means that A1 /\ A2 -> B1 and A1 /\ A2 -> B1, i.e., that A1 /\ A2 -> B1 /\ B2. *) Fixpoint check_conj_formulas' (t1 : list Formula') (wits : list Witness) (t2 : list Formula') {struct wits} : bool := match t2 with | nil => true | t':: rt2 => match wits with | nil => false | w :: rwits => match check_formulas' (t':: t1) w with | true => check_conj_formulas' t1 rwits rt2 | false => false end end end. (* checks whether the conjunction of t1 implies the conjunction of t2 *) Definition check_conj_formulas (t1 : list Formula) (wits : list Witness) (t2 : list Formula) : bool := check_conj_formulas' (normalise_list t1) wits (negate_list t2). Theorem check_conj_formulas_sound : forall (t1 : list Formula) (t2 : list Formula) (wits : list Witness), check_conj_formulas t1 wits t2 = true -> forall env : Env, make_impl (eval env) t1 (make_conj (eval env) t2). Proof. intro t1; induction t2 as [| a2 t2' IH]. intros; apply make_impl_true. intros wits H env. unfold check_conj_formulas in H; simpl in H. destruct wits as [| w ws]; simpl in H. discriminate. case_eq (check_formulas' (negate a2 :: normalise_list t1) w); intro H1; rewrite H1 in H; [| discriminate]. assert (H2 : make_impl (eval' env) (negate a2 :: normalise_list t1) False) by now apply check_formulas'_sound with (w := w). clear H1. pose proof (IH ws H env) as H1. simpl in H2. assert (H3 : eval' env (negate a2) -> make_impl (eval env) t1 False) by auto using normalise_sound_contr. clear H2. rewrite <- make_conj_impl in *. rewrite make_conj_cons. intro H2. split. apply <- negate_correct. intro; now elim H3. exact (H1 H2). Qed. End CheckerMaker. *)coq-8.4pl2/plugins/micromega/QMicromega.v0000640000175000001440000001474412010532755017500 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* x) (fun x => x) (pow_N 1 Qmult). Proof. constructor. constructor ; intros ; try reflexivity. apply Qeq_bool_eq; auto. constructor. reflexivity. intros x y. apply Qeq_bool_neq ; auto. apply Qle_bool_imp_le. Qed. (*Definition Zeval_expr := eval_pexpr 0 Z.add Z.mul Z.sub Z.opp (fun x => x) (fun x => Z.of_N x) (Z.pow).*) Require Import EnvRing. Fixpoint Qeval_expr (env: PolEnv Q) (e: PExpr Q) : Q := match e with | PEc c => c | PEX j => env j | PEadd pe1 pe2 => (Qeval_expr env pe1) + (Qeval_expr env pe2) | PEsub pe1 pe2 => (Qeval_expr env pe1) - (Qeval_expr env pe2) | PEmul pe1 pe2 => (Qeval_expr env pe1) * (Qeval_expr env pe2) | PEopp pe1 => - (Qeval_expr env pe1) | PEpow pe1 n => Qpower (Qeval_expr env pe1) (Z.of_N n) end. Lemma Qeval_expr_simpl : forall env e, Qeval_expr env e = match e with | PEc c => c | PEX j => env j | PEadd pe1 pe2 => (Qeval_expr env pe1) + (Qeval_expr env pe2) | PEsub pe1 pe2 => (Qeval_expr env pe1) - (Qeval_expr env pe2) | PEmul pe1 pe2 => (Qeval_expr env pe1) * (Qeval_expr env pe2) | PEopp pe1 => - (Qeval_expr env pe1) | PEpow pe1 n => Qpower (Qeval_expr env pe1) (Z.of_N n) end. Proof. destruct e ; reflexivity. Qed. Definition Qeval_expr' := eval_pexpr Qplus Qmult Qminus Qopp (fun x => x) (fun x => x) (pow_N 1 Qmult). Lemma QNpower : forall r n, r ^ Z.of_N n = pow_N 1 Qmult r n. Proof. destruct n ; reflexivity. Qed. Lemma Qeval_expr_compat : forall env e, Qeval_expr env e = Qeval_expr' env e. Proof. induction e ; simpl ; subst ; try congruence. reflexivity. rewrite IHe. apply QNpower. Qed. Definition Qeval_op2 (o : Op2) : Q -> Q -> Prop := match o with | OpEq => Qeq | OpNEq => fun x y => ~ x == y | OpLe => Qle | OpGe => fun x y => Qle y x | OpLt => Qlt | OpGt => fun x y => Qlt y x end. Definition Qeval_formula (e:PolEnv Q) (ff : Formula Q) := let (lhs,o,rhs) := ff in Qeval_op2 o (Qeval_expr e lhs) (Qeval_expr e rhs). Definition Qeval_formula' := eval_formula Qplus Qmult Qminus Qopp Qeq Qle Qlt (fun x => x) (fun x => x) (pow_N 1 Qmult). Lemma Qeval_formula_compat : forall env f, Qeval_formula env f <-> Qeval_formula' env f. Proof. intros. unfold Qeval_formula. destruct f. repeat rewrite Qeval_expr_compat. unfold Qeval_formula'. unfold Qeval_expr'. split ; destruct Fop ; simpl; auto. Qed. Definition Qeval_nformula := eval_nformula 0 Qplus Qmult Qeq Qle Qlt (fun x => x) . Definition Qeval_op1 (o : Op1) : Q -> Prop := match o with | Equal => fun x : Q => x == 0 | NonEqual => fun x : Q => ~ x == 0 | Strict => fun x : Q => 0 < x | NonStrict => fun x : Q => 0 <= x end. Lemma Qeval_nformula_dec : forall env d, (Qeval_nformula env d) \/ ~ (Qeval_nformula env d). Proof. exact (fun env d =>eval_nformula_dec Qsor (fun x => x) env d). Qed. Definition QWitness := Psatz Q. Definition QWeakChecker := check_normalised_formulas 0 1 Qplus Qmult Qeq_bool Qle_bool. Require Import List. Lemma QWeakChecker_sound : forall (l : list (NFormula Q)) (cm : QWitness), QWeakChecker l cm = true -> forall env, make_impl (Qeval_nformula env) l False. Proof. intros l cm H. intro. unfold Qeval_nformula. apply (checker_nf_sound Qsor QSORaddon l cm). unfold QWeakChecker in H. exact H. Qed. Require Import Tauto. Definition Qnormalise := @cnf_normalise Q 0 1 Qplus Qmult Qminus Qopp Qeq_bool. Definition Qnegate := @cnf_negate Q 0 1 Qplus Qmult Qminus Qopp Qeq_bool. Definition qunsat := check_inconsistent 0 Qeq_bool Qle_bool. Definition qdeduce := nformula_plus_nformula 0 Qplus Qeq_bool. Definition QTautoChecker (f : BFormula (Formula Q)) (w: list QWitness) : bool := @tauto_checker (Formula Q) (NFormula Q) qunsat qdeduce Qnormalise Qnegate QWitness QWeakChecker f w. Lemma QTautoChecker_sound : forall f w, QTautoChecker f w = true -> forall env, eval_f (Qeval_formula env) f. Proof. intros f w. unfold QTautoChecker. apply (tauto_checker_sound Qeval_formula Qeval_nformula). apply Qeval_nformula_dec. intros until env. unfold eval_nformula. unfold RingMicromega.eval_nformula. destruct t. apply (check_inconsistent_sound Qsor QSORaddon) ; auto. unfold qdeduce. apply (nformula_plus_nformula_correct Qsor QSORaddon). intros. rewrite Qeval_formula_compat. unfold Qeval_formula'. now apply (cnf_normalise_correct Qsor QSORaddon). intros. rewrite Qeval_formula_compat. unfold Qeval_formula'. now apply (cnf_negate_correct Qsor QSORaddon). intros t w0. apply QWeakChecker_sound. Qed. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/RMicromega.v0000640000175000001440000003302112010532755017466 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (IZR (Qnum x) * / IZR (' Qden x))%R. Lemma Rinv_elim : forall x y z, y <> 0 -> (z * y = x <-> x * / y = z). Proof. intros. split ; intros. subst. rewrite Rmult_assoc. rewrite Rinv_r; auto. ring. subst. rewrite Rmult_assoc. rewrite (Rmult_comm (/ y)). rewrite Rinv_r ; auto. ring. Qed. Ltac INR_nat_of_P := match goal with | H : context[INR (Pos.to_nat ?X)] |- _ => revert H ; let HH := fresh in assert (HH := pos_INR_nat_of_P X) ; revert HH ; generalize (INR (Pos.to_nat X)) | |- context[INR (Pos.to_nat ?X)] => let HH := fresh in assert (HH := pos_INR_nat_of_P X) ; revert HH ; generalize (INR (Pos.to_nat X)) end. Ltac add_eq expr val := set (temp := expr) ; generalize (eq_refl temp) ; unfold temp at 1 ; generalize temp ; intro val ; clear temp. Ltac Rinv_elim := match goal with | |- context[?x * / ?y] => let z := fresh "v" in add_eq (x * / y) z ; let H := fresh in intro H ; rewrite <- Rinv_elim in H end. Lemma Rlt_neq : forall r , 0 < r -> r <> 0. Proof. red. intros. subst. apply (Rlt_irrefl 0 H). Qed. Lemma Rinv_1 : forall x, x * / 1 = x. Proof. intro. Rinv_elim. subst ; ring. apply R1_neq_R0. Qed. Lemma Qeq_true : forall x y, Qeq_bool x y = true -> IQR x = IQR y. Proof. unfold IQR. simpl. intros. apply Qeq_bool_eq in H. unfold Qeq in H. assert (IZR (Qnum x * ' Qden y) = IZR (Qnum y * ' Qden x))%Z. rewrite H. reflexivity. repeat rewrite mult_IZR in H0. simpl in H0. revert H0. repeat INR_nat_of_P. intros. apply Rinv_elim in H2 ; [| apply Rlt_neq ; auto]. rewrite <- H2. field. split ; apply Rlt_neq ; auto. Qed. Lemma Qeq_false : forall x y, Qeq_bool x y = false -> IQR x <> IQR y. Proof. intros. apply Qeq_bool_neq in H. intro. apply H. clear H. unfold Qeq,IQR in *. simpl in *. revert H0. repeat Rinv_elim. intros. subst. assert (IZR (Qnum x * ' Qden y)%Z = IZR (Qnum y * ' Qden x)%Z). repeat rewrite mult_IZR. simpl. rewrite <- H0. rewrite <- H. ring. apply eq_IZR ; auto. INR_nat_of_P; intros; apply Rlt_neq ; auto. INR_nat_of_P; intros ; apply Rlt_neq ; auto. Qed. Lemma Qle_true : forall x y : Q, Qle_bool x y = true -> IQR x <= IQR y. Proof. intros. apply Qle_bool_imp_le in H. unfold Qle in H. unfold IQR. simpl in *. apply IZR_le in H. repeat rewrite mult_IZR in H. simpl in H. repeat INR_nat_of_P; intros. assert (Hr := Rlt_neq r H). assert (Hr0 := Rlt_neq r0 H0). replace (IZR (Qnum x) * / r) with ((IZR (Qnum x) * r0) * (/r * /r0)). replace (IZR (Qnum y) * / r0) with ((IZR (Qnum y) * r) * (/r * /r0)). apply Rmult_le_compat_r ; auto. apply Rmult_le_pos. unfold Rle. left. apply Rinv_0_lt_compat ; auto. unfold Rle. left. apply Rinv_0_lt_compat ; auto. field ; intuition. field ; intuition. Qed. Lemma IQR_0 : IQR 0 = 0. Proof. compute. apply Rinv_1. Qed. Lemma IQR_1 : IQR 1 = 1. Proof. compute. apply Rinv_1. Qed. Lemma IQR_plus : forall x y, IQR (x + y) = IQR x + IQR y. Proof. intros. unfold IQR. simpl in *. rewrite plus_IZR in *. rewrite mult_IZR in *. simpl. rewrite Pos2Nat.inj_mul. rewrite mult_INR. rewrite mult_IZR. simpl. repeat INR_nat_of_P. intros. field. split ; apply Rlt_neq ; auto. Qed. Lemma IQR_opp : forall x, IQR (- x) = - IQR x. Proof. intros. unfold IQR. simpl. rewrite opp_IZR. ring. Qed. Lemma IQR_minus : forall x y, IQR (x - y) = IQR x - IQR y. Proof. intros. unfold Qminus. rewrite IQR_plus. rewrite IQR_opp. ring. Qed. Lemma IQR_mult : forall x y, IQR (x * y) = IQR x * IQR y. Proof. unfold IQR ; intros. simpl. repeat rewrite mult_IZR. simpl. rewrite Pos2Nat.inj_mul. rewrite mult_INR. repeat INR_nat_of_P. intros. field ; split ; apply Rlt_neq ; auto. Qed. Lemma IQR_inv_lt : forall x, (0 < x)%Q -> IQR (/ x) = / IQR x. Proof. unfold IQR ; simpl. intros. unfold Qlt in H. revert H. simpl. intros. unfold Qinv. destruct x ; simpl in *. destruct Qnum ; simpl. exfalso. auto with zarith. clear H. repeat INR_nat_of_P. intros. assert (HH := Rlt_neq _ H). assert (HH0 := Rlt_neq _ H0). rewrite Rinv_mult_distr ; auto. rewrite Rinv_involutive ; auto. ring. apply Rinv_0_lt_compat in H0. apply Rlt_neq ; auto. simpl in H. exfalso. rewrite Pos.mul_comm in H. compute in H. discriminate. Qed. Lemma Qinv_opp : forall x, (- (/ x) = / ( -x))%Q. Proof. destruct x ; destruct Qnum ; reflexivity. Qed. Lemma Qopp_involutive_strong : forall x, (- - x = x)%Q. Proof. intros. destruct x. unfold Qopp. simpl. rewrite Z.opp_involutive. reflexivity. Qed. Lemma Ropp_0 : forall r , - r = 0 -> r = 0. Proof. intros. rewrite <- (Ropp_involutive r). apply Ropp_eq_0_compat ; auto. Qed. Lemma IQR_x_0 : forall x, IQR x = 0 -> x == 0%Q. Proof. destruct x ; simpl. unfold IQR. simpl. INR_nat_of_P. intros. apply Rmult_integral in H0. destruct H0. apply eq_IZR_R0 in H0. subst. reflexivity. exfalso. apply Rinv_0_lt_compat in H. rewrite <- H0 in H. apply Rlt_irrefl in H. auto. Qed. Lemma IQR_inv_gt : forall x, (0 > x)%Q -> IQR (/ x) = / IQR x. Proof. intros. rewrite <- (Qopp_involutive_strong x). rewrite <- Qinv_opp. rewrite IQR_opp. rewrite IQR_inv_lt. repeat rewrite IQR_opp. rewrite Ropp_inv_permute. auto. intro. apply Ropp_0 in H0. apply IQR_x_0 in H0. rewrite H0 in H. compute in H. discriminate. unfold Qlt in *. destruct x ; simpl in *. auto with zarith. Qed. Lemma IQR_inv : forall x, ~ x == 0 -> IQR (/ x) = / IQR x. Proof. intros. assert ( 0 > x \/ 0 < x)%Q. destruct x ; unfold Qlt, Qeq in * ; simpl in *. rewrite Z.mul_1_r in *. destruct Qnum ; simpl in * ; intuition auto. right. reflexivity. left ; reflexivity. destruct H0. apply IQR_inv_gt ; auto. apply IQR_inv_lt ; auto. Qed. Lemma IQR_inv_ext : forall x, IQR (/ x) = (if Qeq_bool x 0 then 0 else / IQR x). Proof. intros. case_eq (Qeq_bool x 0). intros. apply Qeq_bool_eq in H. destruct x ; simpl. unfold Qeq in H. simpl in H. replace Qnum with 0%Z. compute. rewrite Rinv_1. reflexivity. rewrite <- H. ring. intros. apply IQR_inv. intro. rewrite <- Qeq_bool_iff in H0. congruence. Qed. Notation to_nat := N.to_nat. Lemma QSORaddon : @SORaddon R R0 R1 Rplus Rmult Rminus Ropp (@eq R) Rle (* ring elements *) Q 0%Q 1%Q Qplus Qmult Qminus Qopp (* coefficients *) Qeq_bool Qle_bool IQR nat to_nat pow. Proof. constructor. constructor ; intros ; try reflexivity. apply IQR_0. apply IQR_1. apply IQR_plus. apply IQR_minus. apply IQR_mult. apply IQR_opp. apply Qeq_true ; auto. apply R_power_theory. apply Qeq_false. apply Qle_true. Qed. (* Syntactic ring coefficients. For computing, we use Q. *) Inductive Rcst := | C0 | C1 | CQ (r : Q) | CZ (r : Z) | CPlus (r1 r2 : Rcst) | CMinus (r1 r2 : Rcst) | CMult (r1 r2 : Rcst) | CInv (r : Rcst) | COpp (r : Rcst). Fixpoint Q_of_Rcst (r : Rcst) : Q := match r with | C0 => 0 # 1 | C1 => 1 # 1 | CZ z => z # 1 | CQ q => q | CPlus r1 r2 => Qplus (Q_of_Rcst r1) (Q_of_Rcst r2) | CMinus r1 r2 => Qminus (Q_of_Rcst r1) (Q_of_Rcst r2) | CMult r1 r2 => Qmult (Q_of_Rcst r1) (Q_of_Rcst r2) | CInv r => Qinv (Q_of_Rcst r) | COpp r => Qopp (Q_of_Rcst r) end. Fixpoint R_of_Rcst (r : Rcst) : R := match r with | C0 => R0 | C1 => R1 | CZ z => IZR z | CQ q => IQR q | CPlus r1 r2 => (R_of_Rcst r1) + (R_of_Rcst r2) | CMinus r1 r2 => (R_of_Rcst r1) - (R_of_Rcst r2) | CMult r1 r2 => (R_of_Rcst r1) * (R_of_Rcst r2) | CInv r => if Qeq_bool (Q_of_Rcst r) (0 # 1) then R0 else Rinv (R_of_Rcst r) | COpp r => - (R_of_Rcst r) end. Lemma Q_of_RcstR : forall c, IQR (Q_of_Rcst c) = R_of_Rcst c. Proof. induction c ; simpl ; try (rewrite <- IHc1 ; rewrite <- IHc2). apply IQR_0. apply IQR_1. reflexivity. unfold IQR. simpl. rewrite Rinv_1. reflexivity. apply IQR_plus. apply IQR_minus. apply IQR_mult. rewrite <- IHc. apply IQR_inv_ext. rewrite <- IHc. apply IQR_opp. Qed. Require Import EnvRing. Definition INZ (n:N) : R := match n with | N0 => IZR 0%Z | Npos p => IZR (Zpos p) end. Definition Reval_expr := eval_pexpr Rplus Rmult Rminus Ropp R_of_Rcst N.to_nat pow. Definition Reval_op2 (o:Op2) : R -> R -> Prop := match o with | OpEq => @eq R | OpNEq => fun x y => ~ x = y | OpLe => Rle | OpGe => Rge | OpLt => Rlt | OpGt => Rgt end. Definition Reval_formula (e: PolEnv R) (ff : Formula Rcst) := let (lhs,o,rhs) := ff in Reval_op2 o (Reval_expr e lhs) (Reval_expr e rhs). Definition Reval_formula' := eval_sformula Rplus Rmult Rminus Ropp (@eq R) Rle Rlt N.to_nat pow R_of_Rcst. Definition QReval_formula := eval_formula Rplus Rmult Rminus Ropp (@eq R) Rle Rlt IQR N.to_nat pow . Lemma Reval_formula_compat : forall env f, Reval_formula env f <-> Reval_formula' env f. Proof. intros. unfold Reval_formula. destruct f. unfold Reval_formula'. unfold Reval_expr. split ; destruct Fop ; simpl ; auto. apply Rge_le. apply Rle_ge. Qed. Definition Qeval_nformula := eval_nformula 0 Rplus Rmult (@eq R) Rle Rlt IQR. Lemma Reval_nformula_dec : forall env d, (Qeval_nformula env d) \/ ~ (Qeval_nformula env d). Proof. exact (fun env d =>eval_nformula_dec Rsor IQR env d). Qed. Definition RWitness := Psatz Q. Definition RWeakChecker := check_normalised_formulas 0%Q 1%Q Qplus Qmult Qeq_bool Qle_bool. Require Import List. Lemma RWeakChecker_sound : forall (l : list (NFormula Q)) (cm : RWitness), RWeakChecker l cm = true -> forall env, make_impl (Qeval_nformula env) l False. Proof. intros l cm H. intro. unfold Qeval_nformula. apply (checker_nf_sound Rsor QSORaddon l cm). unfold RWeakChecker in H. exact H. Qed. Require Import Tauto. Definition Rnormalise := @cnf_normalise Q 0%Q 1%Q Qplus Qmult Qminus Qopp Qeq_bool. Definition Rnegate := @cnf_negate Q 0%Q 1%Q Qplus Qmult Qminus Qopp Qeq_bool. Definition runsat := check_inconsistent 0%Q Qeq_bool Qle_bool. Definition rdeduce := nformula_plus_nformula 0%Q Qplus Qeq_bool. Definition RTautoChecker (f : BFormula (Formula Rcst)) (w: list RWitness) : bool := @tauto_checker (Formula Q) (NFormula Q) runsat rdeduce Rnormalise Rnegate RWitness RWeakChecker (map_bformula (map_Formula Q_of_Rcst) f) w. Lemma RTautoChecker_sound : forall f w, RTautoChecker f w = true -> forall env, eval_f (Reval_formula env) f. Proof. intros f w. unfold RTautoChecker. intros TC env. apply (tauto_checker_sound QReval_formula Qeval_nformula) with (env := env) in TC. rewrite eval_f_map in TC. rewrite eval_f_morph with (ev':= Reval_formula env) in TC ; auto. intro. unfold QReval_formula. rewrite <- eval_formulaSC with (phiS := R_of_Rcst). rewrite Reval_formula_compat. tauto. intro. rewrite Q_of_RcstR. reflexivity. apply Reval_nformula_dec. destruct t. apply (check_inconsistent_sound Rsor QSORaddon) ; auto. unfold rdeduce. apply (nformula_plus_nformula_correct Rsor QSORaddon). now apply (cnf_normalise_correct Rsor QSORaddon). intros. now apply (cnf_negate_correct Rsor QSORaddon). intros t w0. apply RWeakChecker_sound. Qed. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/certificate.ml0000640000175000001440000011115512121620060020061 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 'a; number_to_num : 'a -> num; zero : 'a; unit : 'a; mult : 'a -> 'a -> 'a; eqb : 'a -> 'a -> bool } let z_spec = { bigint_to_number = Ml2C.bigint ; number_to_num = (fun x -> Big_int (C2Ml.z_big_int x)); zero = Mc.Z0; unit = Mc.Zpos Mc.XH; mult = Mc.Z.mul; eqb = Mc.zeq_bool } let q_spec = { bigint_to_number = (fun x -> {Mc.qnum = Ml2C.bigint x; Mc.qden = Mc.XH}); number_to_num = C2Ml.q_to_num; zero = {Mc.qnum = Mc.Z0;Mc.qden = Mc.XH}; unit = {Mc.qnum = (Mc.Zpos Mc.XH) ; Mc.qden = Mc.XH}; mult = Mc.qmult; eqb = Mc.qeq_bool } let r_spec = z_spec let dev_form n_spec p = let rec dev_form p = match p with | Mc.PEc z -> Poly.constant (n_spec.number_to_num z) | Mc.PEX v -> Poly.variable (C2Ml.positive v) | Mc.PEmul(p1,p2) -> let p1 = dev_form p1 in let p2 = dev_form p2 in Poly.product p1 p2 | Mc.PEadd(p1,p2) -> Poly.addition (dev_form p1) (dev_form p2) | Mc.PEopp p -> Poly.uminus (dev_form p) | Mc.PEsub(p1,p2) -> Poly.addition (dev_form p1) (Poly.uminus (dev_form p2)) | Mc.PEpow(p,n) -> let p = dev_form p in let n = C2Ml.n n in let rec pow n = if n = 0 then Poly.constant (n_spec.number_to_num n_spec.unit) else Poly.product p (pow (n-1)) in pow n in dev_form p let monomial_to_polynomial mn = Monomial.fold (fun v i acc -> let v = Ml2C.positive v in let mn = if i = 1 then Mc.PEX v else Mc.PEpow (Mc.PEX v ,Ml2C.n i) in if acc = Mc.PEc (Mc.Zpos Mc.XH) then mn else Mc.PEmul(mn,acc)) mn (Mc.PEc (Mc.Zpos Mc.XH)) let list_to_polynomial vars l = assert (List.for_all (fun x -> ceiling_num x =/ x) l); let var x = monomial_to_polynomial (List.nth vars x) in let rec xtopoly p i = function | [] -> p | c::l -> if c =/ (Int 0) then xtopoly p (i+1) l else let c = Mc.PEc (Ml2C.bigint (numerator c)) in let mn = if c = Mc.PEc (Mc.Zpos Mc.XH) then var i else Mc.PEmul (c,var i) in let p' = if p = Mc.PEc Mc.Z0 then mn else Mc.PEadd (mn, p) in xtopoly p' (i+1) l in xtopoly (Mc.PEc Mc.Z0) 0 l let rec fixpoint f x = let y' = f x in if y' = x then y' else fixpoint f y' let rec_simpl_cone n_spec e = let simpl_cone = Mc.simpl_cone n_spec.zero n_spec.unit n_spec.mult n_spec.eqb in let rec rec_simpl_cone = function | Mc.PsatzMulE(t1, t2) -> simpl_cone (Mc.PsatzMulE (rec_simpl_cone t1, rec_simpl_cone t2)) | Mc.PsatzAdd(t1,t2) -> simpl_cone (Mc.PsatzAdd (rec_simpl_cone t1, rec_simpl_cone t2)) | x -> simpl_cone x in rec_simpl_cone e let simplify_cone n_spec c = fixpoint (rec_simpl_cone n_spec) c type cone_prod = Const of cone | Ideal of cone *cone | Mult of cone * cone | Other of cone and cone = Mc.zWitness let factorise_linear_cone c = let rec cone_list c l = match c with | Mc.PsatzAdd (x,r) -> cone_list r (x::l) | _ -> c :: l in let factorise c1 c2 = match c1 , c2 with | Mc.PsatzMulC(x,y) , Mc.PsatzMulC(x',y') -> if x = x' then Some (Mc.PsatzMulC(x, Mc.PsatzAdd(y,y'))) else None | Mc.PsatzMulE(x,y) , Mc.PsatzMulE(x',y') -> if x = x' then Some (Mc.PsatzMulE(x, Mc.PsatzAdd(y,y'))) else None | _ -> None in let rec rebuild_cone l pending = match l with | [] -> (match pending with | None -> Mc.PsatzZ | Some p -> p ) | e::l -> (match pending with | None -> rebuild_cone l (Some e) | Some p -> (match factorise p e with | None -> Mc.PsatzAdd(p, rebuild_cone l (Some e)) | Some f -> rebuild_cone l (Some f) ) ) in (rebuild_cone (List.sort Pervasives.compare (cone_list c [])) None) (* The binding with Fourier might be a bit obsolete -- how does it handle equalities ? *) (* Certificates are elements of the cone such that P = 0 *) (* To begin with, we search for certificates of the form: a1.p1 + ... an.pn + b1.q1 +... + bn.qn + c = 0 where pi >= 0 qi > 0 ai >= 0 bi >= 0 Sum bi + c >= 1 This is a linear problem: each monomial is considered as a variable. Hence, we can use fourier. The variable c is at index 0 *) open Mfourier (* fold_left followed by a rev ! *) let constrain_monomial mn l = let coeffs = List.fold_left (fun acc p -> (Poly.get mn p)::acc) [] l in if mn = Monomial.const then { coeffs = Vect.from_list ((Big_int unit_big_int):: (List.rev coeffs)) ; op = Eq ; cst = Big_int zero_big_int } else { coeffs = Vect.from_list ((Big_int zero_big_int):: (List.rev coeffs)) ; op = Eq ; cst = Big_int zero_big_int } let positivity l = let rec xpositivity i l = match l with | [] -> [] | (_,Mc.Equal)::l -> xpositivity (i+1) l | (_,_)::l -> {coeffs = Vect.update (i+1) (fun _ -> Int 1) Vect.null ; op = Ge ; cst = Int 0 } :: (xpositivity (i+1) l) in xpositivity 0 l let string_of_op = function | Mc.Strict -> "> 0" | Mc.NonStrict -> ">= 0" | Mc.Equal -> "= 0" | Mc.NonEqual -> "<> 0" (* If the certificate includes at least one strict inequality, the obtained polynomial can also be 0 *) let build_linear_system l = (* Gather the monomials: HINT add up of the polynomials ==> This does not work anymore *) let l' = List.map fst l in let module MonSet = Set.Make(Monomial) in let monomials = List.fold_left (fun acc p -> Poly.fold (fun m _ acc -> MonSet.add m acc) p acc) (MonSet.singleton Monomial.const) l' in (* For each monomial, compute a constraint *) let s0 = MonSet.fold (fun mn res -> (constrain_monomial mn l')::res) monomials [] in (* I need at least something strictly positive *) let strict = { coeffs = Vect.from_list ((Big_int unit_big_int):: (List.map (fun (x,y) -> match y with Mc.Strict -> Big_int unit_big_int | _ -> Big_int zero_big_int) l)); op = Ge ; cst = Big_int unit_big_int } in (* Add the positivity constraint *) {coeffs = Vect.from_list ([Big_int unit_big_int]) ; op = Ge ; cst = Big_int zero_big_int}::(strict::(positivity l)@s0) let big_int_to_z = Ml2C.bigint (* For Q, this is a pity that the certificate has been scaled -- at a lower layer, certificates are using nums... *) let make_certificate n_spec (cert,li) = let bint_to_cst = n_spec.bigint_to_number in match cert with | [] -> failwith "empty_certificate" | e::cert' -> (* let cst = match compare_big_int e zero_big_int with | 0 -> Mc.PsatzZ | 1 -> Mc.PsatzC (bint_to_cst e) | _ -> failwith "positivity error" in *) let rec scalar_product cert l = match cert with | [] -> Mc.PsatzZ | c::cert -> match l with | [] -> failwith "make_certificate(1)" | i::l -> let r = scalar_product cert l in match compare_big_int c zero_big_int with | -1 -> Mc.PsatzAdd ( Mc.PsatzMulC (Mc.Pc ( bint_to_cst c), Mc.PsatzIn (Ml2C.nat i)), r) | 0 -> r | _ -> Mc.PsatzAdd ( Mc.PsatzMulE (Mc.PsatzC (bint_to_cst c), Mc.PsatzIn (Ml2C.nat i)), r) in (factorise_linear_cone (simplify_cone n_spec (scalar_product cert' li))) exception Found of Monomial.t exception Strict let primal l = let vr = ref 0 in let module Mmn = Map.Make(Monomial) in let vect_of_poly map p = Poly.fold (fun mn vl (map,vect) -> if mn = Monomial.const then (map,vect) else let (mn,m) = try (Mmn.find mn map,map) with Not_found -> let res = (!vr, Mmn.add mn !vr map) in incr vr ; res in (m,if sign_num vl = 0 then vect else (mn,vl)::vect)) p (map,[]) in let op_op = function Mc.NonStrict -> Ge |Mc.Equal -> Eq | _ -> raise Strict in let cmp x y = Pervasives.compare (fst x) (fst y) in snd (List.fold_right (fun (p,op) (map,l) -> let (mp,vect) = vect_of_poly map p in let cstr = {coeffs = List.sort cmp vect; op = op_op op ; cst = minus_num (Poly.get Monomial.const p)} in (mp,cstr::l)) l (Mmn.empty,[])) let dual_raw_certificate (l: (Poly.t * Mc.op1) list) = (* List.iter (fun (p,op) -> Printf.fprintf stdout "%a %s 0\n" Poly.pp p (string_of_op op) ) l ; *) let sys = build_linear_system l in try match Fourier.find_point sys with | Inr _ -> None | Inl cert -> Some (rats_to_ints (Vect.to_list cert)) (* should not use rats_to_ints *) with x when Errors.noncritical x -> if debug then (Printf.printf "raw certificate %s" (Printexc.to_string x); flush stdout) ; None let raw_certificate l = try let p = primal l in match Fourier.find_point p with | Inr prf -> if debug then Printf.printf "AProof : %a\n" pp_proof prf ; let cert = List.map (fun (x,n) -> x+1,n) (fst (List.hd (Proof.mk_proof p prf))) in if debug then Printf.printf "CProof : %a" Vect.pp_vect cert ; Some (rats_to_ints (Vect.to_list cert)) | Inl _ -> None with Strict -> (* Fourier elimination should handle > *) dual_raw_certificate l let simple_linear_prover l = let (lc,li) = List.split l in match raw_certificate lc with | None -> None (* No certificate *) | Some cert -> Some (cert,li) let linear_prover n_spec l = let build_system n_spec l = let li = List.combine l (interval 0 (List.length l -1)) in let (l1,l') = List.partition (fun (x,_) -> if snd x = Mc.NonEqual then true else false) li in List.map (fun ((x,y),i) -> match y with Mc.NonEqual -> failwith "cannot happen" | y -> ((dev_form n_spec x, y),i)) l' in let l' = build_system n_spec l in simple_linear_prover (*n_spec*) l' let linear_prover n_spec l = try linear_prover n_spec l with x when x <> Sys.Break -> (print_string (Printexc.to_string x); None) let linear_prover_with_cert spec l = match linear_prover spec l with | None -> None | Some cert -> Some (make_certificate spec cert) let make_linear_system l = let l' = List.map fst l in let monomials = List.fold_left (fun acc p -> Poly.addition p acc) (Poly.constant (Int 0)) l' in let monomials = Poly.fold (fun mn _ l -> if mn = Monomial.const then l else mn::l) monomials [] in (List.map (fun (c,op) -> {coeffs = Vect.from_list (List.map (fun mn -> (Poly.get mn c)) monomials) ; op = op ; cst = minus_num ( (Poly.get Monomial.const c))}) l ,monomials) let pplus x y = Mc.PEadd(x,y) let pmult x y = Mc.PEmul(x,y) let pconst x = Mc.PEc x let popp x = Mc.PEopp x let debug = false (* keep track of enumerated vectors *) let rec mem p x l = match l with [] -> false | e::l -> if p x e then true else mem p x l let rec remove_assoc p x l = match l with [] -> [] | e::l -> if p x (fst e) then remove_assoc p x l else e::(remove_assoc p x l) let eq x y = Vect.compare x y = 0 let remove e l = List.fold_left (fun l x -> if eq x e then l else x::l) [] l (* The prover is (probably) incomplete -- only searching for naive cutting planes *) let develop_constraint z_spec (e,k) = match k with | Mc.NonStrict -> (dev_form z_spec e , Ge) | Mc.Equal -> (dev_form z_spec e , Eq) | _ -> assert false let op_of_op_compat = function | Ge -> Mc.NonStrict | Eq -> Mc.Equal let integer_vector coeffs = let vars , coeffs = List.split coeffs in List.combine vars (List.map (fun x -> Big_int x) (rats_to_ints coeffs)) let integer_cstr {coeffs = coeffs ; op = op ; cst = cst } = let vars , coeffs = List.split coeffs in match rats_to_ints (cst::coeffs) with | cst :: coeffs -> { coeffs = List.combine vars (List.map (fun x -> Big_int x) coeffs) ; op = op ; cst = Big_int cst} | _ -> assert false let pexpr_of_cstr_compat var cstr = let {coeffs = coeffs ; op = op ; cst = cst } = integer_cstr cstr in try let expr = list_to_polynomial var (Vect.to_list coeffs) in let d = Ml2C.bigint (denominator cst) in let n = Ml2C.bigint (numerator cst) in (pplus (pmult (pconst d) expr) (popp (pconst n)), op_of_op_compat op) with Failure _ -> failwith "pexpr_of_cstr_compat" open Sos_types let rec scale_term t = match t with | Zero -> unit_big_int , Zero | Const n -> (denominator n) , Const (Big_int (numerator n)) | Var n -> unit_big_int , Var n | Inv _ -> failwith "scale_term : not implemented" | Opp t -> let s, t = scale_term t in s, Opp t | Add(t1,t2) -> let s1,y1 = scale_term t1 and s2,y2 = scale_term t2 in let g = gcd_big_int s1 s2 in let s1' = div_big_int s1 g in let s2' = div_big_int s2 g in let e = mult_big_int g (mult_big_int s1' s2') in if (compare_big_int e unit_big_int) = 0 then (unit_big_int, Add (y1,y2)) else e, Add (Mul(Const (Big_int s2'), y1), Mul (Const (Big_int s1'), y2)) | Sub _ -> failwith "scale term: not implemented" | Mul(y,z) -> let s1,y1 = scale_term y and s2,y2 = scale_term z in mult_big_int s1 s2 , Mul (y1, y2) | Pow(t,n) -> let s,t = scale_term t in power_big_int_positive_int s n , Pow(t,n) | _ -> failwith "scale_term : not implemented" let scale_term t = let (s,t') = scale_term t in s,t' let get_index_of_ith_match f i l = let rec get j res l = match l with | [] -> failwith "bad index" | e::l -> if f e then (if j = i then res else get (j+1) (res+1) l ) else get j (res+1) l in get 0 0 l let rec scale_certificate pos = match pos with | Axiom_eq i -> unit_big_int , Axiom_eq i | Axiom_le i -> unit_big_int , Axiom_le i | Axiom_lt i -> unit_big_int , Axiom_lt i | Monoid l -> unit_big_int , Monoid l | Rational_eq n -> (denominator n) , Rational_eq (Big_int (numerator n)) | Rational_le n -> (denominator n) , Rational_le (Big_int (numerator n)) | Rational_lt n -> (denominator n) , Rational_lt (Big_int (numerator n)) | Square t -> let s,t' = scale_term t in mult_big_int s s , Square t' | Eqmul (t, y) -> let s1,y1 = scale_term t and s2,y2 = scale_certificate y in mult_big_int s1 s2 , Eqmul (y1,y2) | Sum (y, z) -> let s1,y1 = scale_certificate y and s2,y2 = scale_certificate z in let g = gcd_big_int s1 s2 in let s1' = div_big_int s1 g in let s2' = div_big_int s2 g in mult_big_int g (mult_big_int s1' s2'), Sum (Product(Rational_le (Big_int s2'), y1), Product (Rational_le (Big_int s1'), y2)) | Product (y, z) -> let s1,y1 = scale_certificate y and s2,y2 = scale_certificate z in mult_big_int s1 s2 , Product (y1,y2) open Micromega let rec term_to_q_expr = function | Const n -> PEc (Ml2C.q n) | Zero -> PEc ( Ml2C.q (Int 0)) | Var s -> PEX (Ml2C.index (int_of_string (String.sub s 1 (String.length s - 1)))) | Mul(p1,p2) -> PEmul(term_to_q_expr p1, term_to_q_expr p2) | Add(p1,p2) -> PEadd(term_to_q_expr p1, term_to_q_expr p2) | Opp p -> PEopp (term_to_q_expr p) | Pow(t,n) -> PEpow (term_to_q_expr t,Ml2C.n n) | Sub(t1,t2) -> PEsub (term_to_q_expr t1, term_to_q_expr t2) | _ -> failwith "term_to_q_expr: not implemented" let term_to_q_pol e = Mc.norm_aux (Ml2C.q (Int 0)) (Ml2C.q (Int 1)) Mc.qplus Mc.qmult Mc.qminus Mc.qopp Mc.qeq_bool (term_to_q_expr e) let rec product l = match l with | [] -> Mc.PsatzZ | [i] -> Mc.PsatzIn (Ml2C.nat i) | i ::l -> Mc.PsatzMulE(Mc.PsatzIn (Ml2C.nat i), product l) let q_cert_of_pos pos = let rec _cert_of_pos = function Axiom_eq i -> Mc.PsatzIn (Ml2C.nat i) | Axiom_le i -> Mc.PsatzIn (Ml2C.nat i) | Axiom_lt i -> Mc.PsatzIn (Ml2C.nat i) | Monoid l -> product l | Rational_eq n | Rational_le n | Rational_lt n -> if compare_num n (Int 0) = 0 then Mc.PsatzZ else Mc.PsatzC (Ml2C.q n) | Square t -> Mc.PsatzSquare (term_to_q_pol t) | Eqmul (t, y) -> Mc.PsatzMulC(term_to_q_pol t, _cert_of_pos y) | Sum (y, z) -> Mc.PsatzAdd (_cert_of_pos y, _cert_of_pos z) | Product (y, z) -> Mc.PsatzMulE (_cert_of_pos y, _cert_of_pos z) in simplify_cone q_spec (_cert_of_pos pos) let rec term_to_z_expr = function | Const n -> PEc (Ml2C.bigint (big_int_of_num n)) | Zero -> PEc ( Z0) | Var s -> PEX (Ml2C.index (int_of_string (String.sub s 1 (String.length s - 1)))) | Mul(p1,p2) -> PEmul(term_to_z_expr p1, term_to_z_expr p2) | Add(p1,p2) -> PEadd(term_to_z_expr p1, term_to_z_expr p2) | Opp p -> PEopp (term_to_z_expr p) | Pow(t,n) -> PEpow (term_to_z_expr t,Ml2C.n n) | Sub(t1,t2) -> PEsub (term_to_z_expr t1, term_to_z_expr t2) | _ -> failwith "term_to_z_expr: not implemented" let term_to_z_pol e = Mc.norm_aux (Ml2C.z 0) (Ml2C.z 1) Mc.Z.add Mc.Z.mul Mc.Z.sub Mc.Z.opp Mc.zeq_bool (term_to_z_expr e) let z_cert_of_pos pos = let s,pos = (scale_certificate pos) in let rec _cert_of_pos = function Axiom_eq i -> Mc.PsatzIn (Ml2C.nat i) | Axiom_le i -> Mc.PsatzIn (Ml2C.nat i) | Axiom_lt i -> Mc.PsatzIn (Ml2C.nat i) | Monoid l -> product l | Rational_eq n | Rational_le n | Rational_lt n -> if compare_num n (Int 0) = 0 then Mc.PsatzZ else Mc.PsatzC (Ml2C.bigint (big_int_of_num n)) | Square t -> Mc.PsatzSquare (term_to_z_pol t) | Eqmul (t, y) -> let is_unit = match t with | Const n -> n =/ Int 1 | _ -> false in if is_unit then _cert_of_pos y else Mc.PsatzMulC(term_to_z_pol t, _cert_of_pos y) | Sum (y, z) -> Mc.PsatzAdd (_cert_of_pos y, _cert_of_pos z) | Product (y, z) -> Mc.PsatzMulE (_cert_of_pos y, _cert_of_pos z) in simplify_cone z_spec (_cert_of_pos pos) (** All constraints (initial or derived) have an index and have a justification i.e., proof. Given a constraint, all the coefficients are always integers. *) open Mutils open Mfourier open Num open Big_int open Polynomial (*module Mc = Micromega*) (*module Ml2C = Mutils.CamlToCoq module C2Ml = Mutils.CoqToCaml *) let debug = false module Env = struct type t = int list let id_of_hyp hyp l = let rec xid_of_hyp i l = match l with | [] -> failwith "id_of_hyp" | hyp'::l -> if hyp = hyp' then i else xid_of_hyp (i+1) l in xid_of_hyp 0 l end let coq_poly_of_linpol (p,c) = let pol_of_mon m = Monomial.fold (fun x v p -> Mc.PEmul(Mc.PEpow(Mc.PEX(Ml2C.positive x),Ml2C.n v),p)) m (Mc.PEc (Mc.Zpos Mc.XH)) in List.fold_left (fun acc (x,v) -> let mn = LinPoly.MonT.retrieve x in Mc.PEadd(Mc.PEmul(Mc.PEc (Ml2C.bigint (numerator v)), pol_of_mon mn),acc)) (Mc.PEc (Ml2C.bigint (numerator c))) p let rec cmpl_prf_rule env = function | Hyp i | Def i -> Mc.PsatzIn (Ml2C.nat (Env.id_of_hyp i env)) | Cst i -> Mc.PsatzC (Ml2C.bigint i) | Zero -> Mc.PsatzZ | MulPrf(p1,p2) -> Mc.PsatzMulE(cmpl_prf_rule env p1, cmpl_prf_rule env p2) | AddPrf(p1,p2) -> Mc.PsatzAdd(cmpl_prf_rule env p1 , cmpl_prf_rule env p2) | MulC(lp,p) -> let lp = Mc.norm0 (coq_poly_of_linpol lp) in Mc.PsatzMulC(lp,cmpl_prf_rule env p) | Square lp -> Mc.PsatzSquare (Mc.norm0 (coq_poly_of_linpol lp)) | _ -> failwith "Cuts should already be compiled" let rec cmpl_proof env = function | Done -> Mc.DoneProof | Step(i,p,prf) -> begin match p with | CutPrf p' -> Mc.CutProof(cmpl_prf_rule env p', cmpl_proof (i::env) prf) | _ -> Mc.RatProof(cmpl_prf_rule env p,cmpl_proof (i::env) prf) end | Enum(i,p1,_,p2,l) -> Mc.EnumProof(cmpl_prf_rule env p1,cmpl_prf_rule env p2,List.map (cmpl_proof (i::env)) l) let compile_proof env prf = let id = 1 + proof_max_id prf in let _,prf = normalise_proof id prf in if debug then Printf.fprintf stdout "compiled proof %a\n" output_proof prf; cmpl_proof env prf type prf_sys = (cstr_compat * prf_rule) list let xlinear_prover sys = match Fourier.find_point sys with | Inr prf -> if debug then Printf.printf "AProof : %a\n" pp_proof prf ; let cert = (*List.map (fun (x,n) -> x+1,n)*) (fst (List.hd (Proof.mk_proof sys prf))) in if debug then Printf.printf "CProof : %a" Vect.pp_vect cert ; Some (rats_to_ints (Vect.to_list cert)) | Inl _ -> None let output_num o n = output_string o (string_of_num n) let output_bigint o n = output_string o (string_of_big_int n) let proof_of_farkas prf cert = (* Printf.printf "\nproof_of_farkas %a , %a \n" (pp_list output_prf_rule) prf (pp_list output_bigint) cert ; *) let rec mk_farkas acc prf cert = match prf, cert with | _ , [] -> acc | [] , _ -> failwith "proof_of_farkas : not enough hyps" | p::prf,c::cert -> mk_farkas (add_proof (mul_proof c p) acc) prf cert in let res = mk_farkas Zero prf cert in (*Printf.printf "==> %a" output_prf_rule res ; *) res let linear_prover sys = let (sysi,prfi) = List.split sys in match xlinear_prover sysi with | None -> None | Some cert -> Some (proof_of_farkas prfi cert) let linear_prover = if debug then fun sys -> Printf.printf ""; flush stdout ; res else linear_prover (** A single constraint can be unsat for the following reasons: - 0 >= c for c a negative constant - 0 = c for c a non-zero constant - e = c when the coeffs of e are all integers and c is rational *) type checksat = | Tauto (* Tautology *) | Unsat of prf_rule (* Unsatisfiable *) | Cut of cstr_compat * prf_rule (* Cutting plane *) | Normalise of cstr_compat * prf_rule (* coefficients are relatively prime *) (** [check_sat] - detects constraints that are not satisfiable; - normalises constraints and generate cuts. *) let check_sat (cstr,prf) = let {coeffs=coeffs ; op=op ; cst=cst} = cstr in match coeffs with | [] -> if eval_op op (Int 0) cst then Tauto else Unsat prf | _ -> let gcdi = (gcd_list (List.map snd coeffs)) in let gcd = Big_int gcdi in if eq_num gcd (Int 1) then Normalise(cstr,prf) else if sign_num (mod_num cst gcd) = 0 then (* We can really normalise *) begin assert (sign_num gcd >=1 ) ; let cstr = { coeffs = List.map (fun (x,v) -> (x, v // gcd)) coeffs; op = op ; cst = cst // gcd } in Normalise(cstr,Gcd(gcdi,prf)) (* Normalise(cstr,CutPrf prf)*) end else match op with | Eq -> Unsat (CutPrf prf) | Ge -> let cstr = { coeffs = List.map (fun (x,v) -> (x, v // gcd)) coeffs; op = op ; cst = ceiling_num (cst // gcd) } in Cut(cstr,CutPrf prf) (** Proof generating pivoting over variable v *) let pivot v (c1,p1) (c2,p2) = let {coeffs = v1 ; op = op1 ; cst = n1} = c1 and {coeffs = v2 ; op = op2 ; cst = n2} = c2 in (* Could factorise gcd... *) let xpivot cv1 cv2 = ( {coeffs = Vect.add (Vect.mul cv1 v1) (Vect.mul cv2 v2) ; op = Proof.add_op op1 op2 ; cst = n1 */ cv1 +/ n2 */ cv2 }, AddPrf(mul_proof (numerator cv1) p1,mul_proof (numerator cv2) p2)) in match Vect.get v v1 , Vect.get v v2 with | None , _ | _ , None -> None | Some a , Some b -> if (sign_num a) * (sign_num b) = -1 then let cv1 = abs_num b and cv2 = abs_num a in Some (xpivot cv1 cv2) else if op1 = Eq then let cv1 = minus_num (b */ (Int (sign_num a))) and cv2 = abs_num a in Some (xpivot cv1 cv2) else if op2 = Eq then let cv1 = abs_num b and cv2 = minus_num (a */ (Int (sign_num b))) in Some (xpivot cv1 cv2) else None (* op2 could be Eq ... this might happen *) exception FoundProof of prf_rule let rec simpl_sys sys = List.fold_left (fun acc (c,p) -> match check_sat (c,p) with | Tauto -> acc | Unsat prf -> raise (FoundProof prf) | Cut(c,p) -> (c,p)::acc | Normalise (c,p) -> (c,p)::acc) [] sys (** [ext_gcd a b] is the extended Euclid algorithm. [ext_gcd a b = (x,y,g)] iff [ax+by=g] Source: http://en.wikipedia.org/wiki/Extended_Euclidean_algorithm *) let rec ext_gcd a b = if sign_big_int b = 0 then (unit_big_int,zero_big_int) else let (q,r) = quomod_big_int a b in let (s,t) = ext_gcd b r in (t, sub_big_int s (mult_big_int q t)) let pp_ext_gcd a b = let a' = big_int_of_int a in let b' = big_int_of_int b in let (x,y) = ext_gcd a' b' in Printf.fprintf stdout "%s * %s + %s * %s = %s\n" (string_of_big_int x) (string_of_big_int a') (string_of_big_int y) (string_of_big_int b') (string_of_big_int (add_big_int (mult_big_int x a') (mult_big_int y b'))) exception Result of (int * (proof * cstr_compat)) let split_equations psys = List.partition (fun (c,p) -> c.op = Eq) let extract_coprime (c1,p1) (c2,p2) = let rec exist2 vect1 vect2 = match vect1 , vect2 with | _ , [] | [], _ -> None | (v1,n1)::vect1' , (v2, n2) :: vect2' -> if v1 = v2 then if compare_big_int (gcd_big_int (numerator n1) (numerator n2)) unit_big_int = 0 then Some (v1,n1,n2) else exist2 vect1' vect2' else if v1 < v2 then exist2 vect1' vect2 else exist2 vect1 vect2' in if c1.op = Eq && c2.op = Eq then exist2 c1.coeffs c2.coeffs else None let extract2 pred l = let rec xextract2 rl l = match l with | [] -> (None,rl) (* Did not find *) | e::l -> match extract (pred e) l with | None,_ -> xextract2 (e::rl) l | Some (r,e'),l' -> Some (r,e,e'), List.rev_append rl l' in xextract2 [] l let extract_coprime_equation psys = extract2 extract_coprime psys let apply_and_normalise f psys = List.fold_left (fun acc pc' -> match f pc' with | None -> pc'::acc | Some pc' -> match check_sat pc' with | Tauto -> acc | Unsat prf -> raise (FoundProof prf) | Cut(c,p) -> (c,p)::acc | Normalise (c,p) -> (c,p)::acc ) [] psys let pivot_sys v pc psys = apply_and_normalise (pivot v pc) psys let reduce_coprime psys = let oeq,sys = extract_coprime_equation psys in match oeq with | None -> None (* Nothing to do *) | Some((v,n1,n2),(c1,p1),(c2,p2) ) -> let (l1,l2) = ext_gcd (numerator n1) (numerator n2) in let l1' = Big_int l1 and l2' = Big_int l2 in let cstr = {coeffs = Vect.add (Vect.mul l1' c1.coeffs) (Vect.mul l2' c2.coeffs); op = Eq ; cst = (l1' */ c1.cst) +/ (l2' */ c2.cst) } in let prf = add_proof (mul_proof (numerator l1') p1) (mul_proof (numerator l2') p2) in Some (pivot_sys v (cstr,prf) ((c1,p1)::sys)) (** If there is an equation [eq] of the form 1.x + e = c, do a pivot over x with equation [eq] *) let reduce_unary psys = let is_unary_equation (cstr,prf) = if cstr.op = Eq then try Some (fst (List.find (fun (_,n) -> n =/ (Int 1) || n=/ (Int (-1))) cstr.coeffs)) with Not_found -> None else None in let (oeq,sys) = extract is_unary_equation psys in match oeq with | None -> None (* Nothing to do *) | Some(v,pc) -> Some(pivot_sys v pc sys) let reduce_non_lin_unary psys = let is_unary_equation (cstr,prf) = if cstr.op = Eq then try let x = fst (List.find (fun (x,n) -> (n =/ (Int 1) || n=/ (Int (-1))) && Monomial.is_var (LinPoly.MonT.retrieve x) ) cstr.coeffs) in let x' = LinPoly.MonT.retrieve x in if List.for_all (fun (y,_) -> y = x || snd (Monomial.div (LinPoly.MonT.retrieve y) x') = 0) cstr.coeffs then Some x else None with Not_found -> None else None in let (oeq,sys) = extract is_unary_equation psys in match oeq with | None -> None (* Nothing to do *) | Some(v,pc) -> Some(apply_and_normalise (LinPoly.pivot_eq v pc) sys) let reduce_var_change psys = let rec rel_prime vect = match vect with | [] -> None | (x,v)::vect -> let v = numerator v in try let (x',v') = List.find (fun (_,v') -> let v' = numerator v' in eq_big_int (gcd_big_int v v') unit_big_int) vect in Some ((x,v),(x',numerator v')) with Not_found -> rel_prime vect in let rel_prime (cstr,prf) = if cstr.op = Eq then rel_prime cstr.coeffs else None in let (oeq,sys) = extract rel_prime psys in match oeq with | None -> None | Some(((x,v),(x',v')),(c,p)) -> let (l1,l2) = ext_gcd v v' in let l1,l2 = Big_int l1 , Big_int l2 in let get v vect = match Vect.get v vect with | None -> Int 0 | Some n -> n in let pivot_eq (c',p') = let {coeffs = coeffs ; op = op ; cst = cst} = c' in let vx = get x coeffs in let vx' = get x' coeffs in let m = minus_num (vx */ l1 +/ vx' */ l2) in Some ({coeffs = Vect.add (Vect.mul m c.coeffs) coeffs ; op = op ; cst = m */ c.cst +/ cst} , AddPrf(MulC(([], m),p),p')) in Some (apply_and_normalise pivot_eq sys) let reduce_pivot psys = let is_equation (cstr,prf) = if cstr.op = Eq then try Some (fst (List.hd cstr.coeffs)) with Not_found -> None else None in let (oeq,sys) = extract is_equation psys in match oeq with | None -> None (* Nothing to do *) | Some(v,pc) -> if debug then Printf.printf "Bad news : loss of completeness %a=%s" Vect.pp_vect (fst pc).coeffs (string_of_num (fst pc).cst); Some(pivot_sys v pc sys) let iterate_until_stable f x = let rec iter x = match f x with | None -> x | Some x' -> iter x' in iter x let rec app_funs l x = match l with | [] -> None | f::fl -> match f x with | None -> app_funs fl x | Some x' -> Some x' let reduction_equations psys = iterate_until_stable (app_funs [reduce_unary ; reduce_coprime ; reduce_var_change (*; reduce_pivot*)]) psys let reduction_non_lin_equations psys = iterate_until_stable (app_funs [reduce_non_lin_unary (*; reduce_coprime ; reduce_var_change ; reduce_pivot *)]) psys (** [get_bound sys] returns upon success an interval (lb,e,ub) with proofs *) let get_bound sys = let is_small (v,i) = match Itv.range i with | None -> false | Some i -> i <=/ (Int 1) in let select_best (x1,i1) (x2,i2) = if Itv.smaller_itv i1 i2 then (x1,i1) else (x2,i2) in (* For lia, there are no equations => these precautions are not needed *) (* For nlia, there are equations => do not enumerate over equations! *) let all_planes sys = let (eq,ineq) = List.partition (fun c -> c.op = Eq) sys in match eq with | [] -> List.rev_map (fun c -> c.coeffs) ineq | _ -> List.fold_left (fun acc c -> if List.exists (fun c' -> Vect.equal c.coeffs c'.coeffs) eq then acc else c.coeffs ::acc) [] ineq in let smallest_interval = List.fold_left (fun acc vect -> if is_small acc then acc else match Fourier.optimise vect sys with | None -> acc | Some i -> if debug then Printf.printf "Found a new bound %a" Vect.pp_vect vect ; select_best (vect,i) acc) (Vect.null, (None,None)) (all_planes sys) in let smallest_interval = match smallest_interval with | (x,(Some i, Some j)) -> Some(i,x,j) | x -> None (* This should not be possible *) in match smallest_interval with | Some (lb,e,ub) -> let (lbn,lbd) = (sub_big_int (numerator lb) unit_big_int, denominator lb) in let (ubn,ubd) = (add_big_int unit_big_int (numerator ub) , denominator ub) in (match (* x <= ub -> x > ub *) xlinear_prover ({coeffs = Vect.mul (Big_int ubd) e ; op = Ge ; cst = Big_int ubn} :: sys), (* lb <= x -> lb > x *) xlinear_prover ({coeffs = Vect.mul (minus_num (Big_int lbd)) e ; op = Ge ; cst = minus_num (Big_int lbn)} :: sys) with | Some cub , Some clb -> Some(List.tl clb,(lb,e,ub), List.tl cub) | _ -> failwith "Interval without proof" ) | None -> None let check_sys sys = List.for_all (fun (c,p) -> List.for_all (fun (_,n) -> sign_num n <> 0) c.coeffs) sys let xlia reduction_equations sys = let rec enum_proof (id:int) (sys:prf_sys) : proof option = if debug then (Printf.printf "enum_proof\n" ; flush stdout) ; assert (check_sys sys) ; let nsys,prf = List.split sys in match get_bound nsys with | None -> None (* Is the systeme really unbounded ? *) | Some(prf1,(lb,e,ub),prf2) -> if debug then Printf.printf "Found interval: %a in [%s;%s] -> " Vect.pp_vect e (string_of_num lb) (string_of_num ub) ; (match start_enum id e (ceiling_num lb) (floor_num ub) sys with | Some prfl -> Some(Enum(id,proof_of_farkas prf prf1,e, proof_of_farkas prf prf2,prfl)) | None -> None ) and start_enum id e clb cub sys = if clb >/ cub then Some [] else let eq = {coeffs = e ; op = Eq ; cst = clb} in match aux_lia (id+1) ((eq, Def id) :: sys) with | None -> None | Some prf -> match start_enum id e (clb +/ (Int 1)) cub sys with | None -> None | Some l -> Some (prf::l) and aux_lia (id:int) (sys:prf_sys) : proof option = assert (check_sys sys) ; if debug then Printf.printf "xlia: %a \n" (pp_list (fun o (c,_) -> output_cstr o c)) sys ; try let sys = reduction_equations sys in if debug then Printf.printf "after reduction: %a \n" (pp_list (fun o (c,_) -> output_cstr o c)) sys ; match linear_prover sys with | Some prf -> Some (Step(id,prf,Done)) | None -> enum_proof id sys with FoundProof prf -> (* [reduction_equations] can find a proof *) Some(Step(id,prf,Done)) in (* let sys' = List.map (fun (p,o) -> Mc.norm0 p , o) sys in*) let id = List.length sys in let orpf = try let sys = simpl_sys sys in aux_lia id sys with FoundProof pr -> Some(Step(id,pr,Done)) in match orpf with | None -> None | Some prf -> (*Printf.printf "direct proof %a\n" output_proof prf ; *) let env = mapi (fun _ i -> i) sys in let prf = compile_proof env prf in (*try if Mc.zChecker sys' prf then Some prf else raise Certificate.BadCertificate with Failure s -> (Printf.printf "%s" s ; Some prf) *) Some prf let cstr_compat_of_poly (p,o) = let (v,c) = LinPoly.linpol_of_pol p in {coeffs = v ; op = o ; cst = minus_num c } let lia sys = LinPoly.MonT.clear (); let sys = List.map (develop_constraint z_spec) sys in let (sys:cstr_compat list) = List.map cstr_compat_of_poly sys in let sys = mapi (fun c i -> (c,Hyp i)) sys in xlia reduction_equations sys let nlia sys = LinPoly.MonT.clear (); let sys = List.map (develop_constraint z_spec) sys in let sys = mapi (fun c i -> (c,Hyp i)) sys in let is_linear = List.for_all (fun ((p,_),_) -> Poly.is_linear p) sys in let module MonMap = Map.Make(Monomial) in let collect_square = List.fold_left (fun acc ((p,_),_) -> Poly.fold (fun m _ acc -> match Monomial.sqrt m with | None -> acc | Some s -> MonMap.add s m acc) p acc) MonMap.empty sys in let sys = MonMap.fold (fun s m acc -> let s = LinPoly.linpol_of_pol (Poly.add s (Int 1) (Poly.constant (Int 0))) in let m = Poly.add m (Int 1) (Poly.constant (Int 0)) in ((m, Ge), (Square s))::acc) collect_square sys in (* List.iter (fun ((p,_),_) -> Printf.printf "square %a\n" Poly.pp p) gen_square*) let sys = if is_linear then sys else sys @ (all_sym_pairs (fun ((c,o),p) ((c',o'),p') -> ((Poly.product c c',opMult o o'), MulPrf(p,p'))) sys) in let sys = List.map (fun (c,p) -> cstr_compat_of_poly c,p) sys in assert (check_sys sys) ; xlia (if is_linear then reduction_equations else reduction_non_lin_equations) sys (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/polynomial.ml0000640000175000001440000004573312010532755020004 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ) = add_num let (<->) = minus_num let (<*>) = mult_num module Monomial : sig type t val const : t val is_const : t -> bool val var : var -> t val is_var : t -> bool val find : var -> t -> int val mult : var -> t -> t val prod : t -> t -> t val exp : t -> int -> t val div : t -> t -> t * int val compare : t -> t -> int val pp : out_channel -> t -> unit val fold : (var -> int -> 'a -> 'a) -> t -> 'a -> 'a val sqrt : t -> t option end = struct (* A monomial is represented by a multiset of variables *) module Map = Map.Make(struct type t = var let compare = Pervasives.compare end) open Map type t = int Map.t let pp o m = Map.iter (fun k v -> if v = 1 then Printf.fprintf o "x%i." k else Printf.fprintf o "x%i^%i." k v) m (* The monomial that corresponds to a constant *) let const = Map.empty let sum_degree m = Map.fold (fun _ n s -> s + n) m 0 (* Total ordering of monomials *) let compare: t -> t -> int = fun m1 m2 -> let s1 = sum_degree m1 and s2 = sum_degree m2 in if s1 = s2 then Map.compare Pervasives.compare m1 m2 else Pervasives.compare s1 s2 let is_const m = (m = Map.empty) (* The monomial 'x' *) let var x = Map.add x 1 Map.empty let is_var m = try not (Map.fold (fun _ i fk -> if fk = true (* first key *) then if i = 1 then false else raise Not_found else raise Not_found) m true) with Not_found -> false let sqrt m = if is_const m then None else try Some (Map.fold (fun v i acc -> let i' = i / 2 in if i mod 2 = 0 then add v i' m else raise Not_found) m const) with Not_found -> None (* Get the degre of a variable in a monomial *) let find x m = try find x m with Not_found -> 0 (* Multiply a monomial by a variable *) let mult x m = add x ( (find x m) + 1) m (* Product of monomials *) let prod m1 m2 = Map.fold (fun k d m -> add k ((find k m) + d) m) m1 m2 let exp m n = let rec exp acc n = if n = 0 then acc else exp (prod acc m) (n - 1) in exp const n (* [div m1 m2 = mr,n] such that mr * (m2)^n = m1 *) let div m1 m2 = let n = fold (fun x i n -> let i' = find x m1 in let nx = i' / i in min n nx) m2 max_int in let mr = fold (fun x i' m -> let i = find x m2 in let ir = i' - i * n in if ir = 0 then m else add x ir m) m1 empty in (mr,n) let fold = fold end module Poly : (* A polynomial is a map of monomials *) (* This is probably a naive implementation (expected to be fast enough - Coq is probably the bottleneck) *The new ring contribution is using a sparse Horner representation. *) sig type t val get : Monomial.t -> t -> num val variable : var -> t val add : Monomial.t -> num -> t -> t val constant : num -> t val mult : Monomial.t -> num -> t -> t val product : t -> t -> t val addition : t -> t -> t val uminus : t -> t val fold : (Monomial.t -> num -> 'a -> 'a) -> t -> 'a -> 'a val pp : out_channel -> t -> unit val compare : t -> t -> int val is_null : t -> bool val is_linear : t -> bool end = struct (*normalisation bug : 0*x ... *) module P = Map.Make(Monomial) open P type t = num P.t let pp o p = P.iter (fun k v -> if Monomial.compare Monomial.const k = 0 then Printf.fprintf o "%s " (string_of_num v) else Printf.fprintf o "%s*%a " (string_of_num v) Monomial.pp k) p (* Get the coefficient of monomial mn *) let get : Monomial.t -> t -> num = fun mn p -> try find mn p with Not_found -> (Int 0) (* The polynomial 1.x *) let variable : var -> t = fun x -> add (Monomial.var x) (Int 1) empty (*The constant polynomial *) let constant : num -> t = fun c -> add (Monomial.const) c empty (* The addition of a monomial *) let add : Monomial.t -> num -> t -> t = fun mn v p -> if sign_num v = 0 then p else let vl = (get mn p) <+> v in if sign_num vl = 0 then remove mn p else add mn vl p (** Design choice: empty is not a polynomial I do not remember why .... **) (* The product by a monomial *) let mult : Monomial.t -> num -> t -> t = fun mn v p -> if sign_num v = 0 then constant (Int 0) else fold (fun mn' v' res -> P.add (Monomial.prod mn mn') (v<*>v') res) p empty let addition : t -> t -> t = fun p1 p2 -> fold (fun mn v p -> add mn v p) p1 p2 let product : t -> t -> t = fun p1 p2 -> fold (fun mn v res -> addition (mult mn v p2) res ) p1 empty let uminus : t -> t = fun p -> map (fun v -> minus_num v) p let fold = P.fold let is_null p = fold (fun mn vl b -> b & sign_num vl = 0) p true let compare = compare compare_num let is_linear p = P.fold (fun m _ acc -> acc && (Monomial.is_const m || Monomial.is_var m)) p true (* let is_linear p = let res = is_linear p in Printf.printf "is_linear %a = %b\n" pp p res ; res *) end module Vect = struct (** [t] is the type of vectors. A vector [(x1,v1) ; ... ; (xn,vn)] is such that: - variables indexes are ordered (x1 true | [] , _ -> false | _::_ , [] -> false | (i1,n1)::v1 , (i2,n2)::v2 -> (i1 = i2) && n1 =/ n2 && equal v1 v2 let hash v = let rec hash i = function | [] -> i | (vr,vl)::l -> hash (i + (Hashtbl.hash (vr, float_of_num vl))) l in Hashtbl.hash (hash 0 v ) let null = [] let pp_vect o vect = List.iter (fun (v,n) -> Printf.printf "%sx%i + " (string_of_num n) v) vect let from_list (l: num list) = let rec xfrom_list i l = match l with | [] -> [] | e::l -> if e <>/ Int 0 then (i,e)::(xfrom_list (i+1) l) else xfrom_list (i+1) l in xfrom_list 0 l let zero_num = Int 0 let unit_num = Int 1 let to_list m = let rec xto_list i l = match l with | [] -> [] | (x,v)::l' -> if i = x then v::(xto_list (i+1) l') else zero_num ::(xto_list (i+1) l) in xto_list 0 m let cons i v rst = if v =/ Int 0 then rst else (i,v)::rst let rec update i f t = match t with | [] -> cons i (f zero_num) [] | (k,v)::l -> match Pervasives.compare i k with | 0 -> cons k (f v) l | -1 -> cons i (f zero_num) t | 1 -> (k,v) ::(update i f l) | _ -> failwith "compare_num" let rec set i n t = match t with | [] -> cons i n [] | (k,v)::l -> match Pervasives.compare i k with | 0 -> cons k n l | -1 -> cons i n t | 1 -> (k,v) :: (set i n l) | _ -> failwith "compare_num" let gcd m = let res = List.fold_left (fun x (i,e) -> Big_int.gcd_big_int x (Utils.numerator e)) Big_int.zero_big_int m in if Big_int.compare_big_int res Big_int.zero_big_int = 0 then Big_int.unit_big_int else res let rec mul z t = match z with | Int 0 -> [] | Int 1 -> t | _ -> List.map (fun (i,n) -> (i, mult_num z n)) t let rec add v1 v2 = match v1 , v2 with | (x1,n1)::v1' , (x2,n2)::v2' -> if x1 = x2 then let n' = n1 +/ n2 in if n' =/ Int 0 then add v1' v2' else let res = add v1' v2' in (x1,n') ::res else if x1 < x2 then let res = add v1' v2 in (x1, n1)::res else let res = add v1 v2' in (x2, n2)::res | [] , [] -> [] | [] , _ -> v2 | _ , [] -> v1 let compare : t -> t -> int = Utils.Cmp.compare_list (fun x y -> Utils.Cmp.compare_lexical [ (fun () -> Pervasives.compare (fst x) (fst y)); (fun () -> compare_num (snd x) (snd y))]) (** [tail v vect] returns - [None] if [v] is not a variable of the vector [vect] - [Some(vl,rst)] where [vl] is the value of [v] in vector [vect] and [rst] is the remaining of the vector We exploit that vectors are ordered lists *) let rec tail (v:var) (vect:t) = match vect with | [] -> None | (v',vl)::vect' -> match Pervasives.compare v' v with | 0 -> Some (vl,vect) (* Ok, found *) | -1 -> tail v vect' (* Might be in the tail *) | _ -> None (* Hopeless *) let get v vect = match tail v vect with | None -> None | Some(vl,_) -> Some vl let rec fresh v = match v with | [] -> 1 | [v,_] -> v + 1 | _::v -> fresh v end type vector = Vect.t type cstr_compat = {coeffs : vector ; op : op ; cst : num} and op = |Eq | Ge let string_of_op = function Eq -> "=" | Ge -> ">=" let output_cstr o {coeffs = coeffs ; op = op ; cst = cst} = Printf.fprintf o "%a %s %s" Vect.pp_vect coeffs (string_of_op op) (string_of_num cst) let opMult o1 o2 = match o1, o2 with | Eq , Eq -> Eq | Eq , Ge | Ge , Eq -> Ge | Ge , Ge -> Ge let opAdd o1 o2 = match o1 , o2 with | Eq , _ | _ , Eq -> Eq | Ge , Ge -> Ge open Big_int type index = int type prf_rule = | Hyp of int | Def of int | Cst of big_int | Zero | Square of (Vect.t * num) | MulC of (Vect.t * num) * prf_rule | Gcd of big_int * prf_rule | MulPrf of prf_rule * prf_rule | AddPrf of prf_rule * prf_rule | CutPrf of prf_rule type proof = | Done | Step of int * prf_rule * proof | Enum of int * prf_rule * Vect.t * prf_rule * proof list let rec output_prf_rule o = function | Hyp i -> Printf.fprintf o "Hyp %i" i | Def i -> Printf.fprintf o "Def %i" i | Cst c -> Printf.fprintf o "Cst %s" (string_of_big_int c) | Zero -> Printf.fprintf o "Zero" | Square _ -> Printf.fprintf o "( )^2" | MulC(p,pr) -> Printf.fprintf o "P * %a" output_prf_rule pr | MulPrf(p1,p2) -> Printf.fprintf o "%a * %a" output_prf_rule p1 output_prf_rule p2 | AddPrf(p1,p2) -> Printf.fprintf o "%a + %a" output_prf_rule p1 output_prf_rule p2 | CutPrf(p) -> Printf.fprintf o "[%a]" output_prf_rule p | Gcd(c,p) -> Printf.fprintf o "(%a)/%s" output_prf_rule p (string_of_big_int c) let rec output_proof o = function | Done -> Printf.fprintf o "." | Step(i,p,pf) -> Printf.fprintf o "%i:= %a ; %a" i output_prf_rule p output_proof pf | Enum(i,p1,v,p2,pl) -> Printf.fprintf o "%i{%a<=%a<=%a}%a" i output_prf_rule p1 Vect.pp_vect v output_prf_rule p2 (pp_list output_proof) pl let rec pr_rule_max_id = function | Hyp i | Def i -> i | Cst _ | Zero | Square _ -> -1 | MulC(_,p) | CutPrf p | Gcd(_,p) -> pr_rule_max_id p | MulPrf(p1,p2)| AddPrf(p1,p2) -> max (pr_rule_max_id p1) (pr_rule_max_id p2) let rec proof_max_id = function | Done -> -1 | Step(i,pr,prf) -> max i (max (pr_rule_max_id pr) (proof_max_id prf)) | Enum(i,p1,_,p2,l) -> let m = max (pr_rule_max_id p1) (pr_rule_max_id p2) in List.fold_left (fun i prf -> max i (proof_max_id prf)) (max i m) l let rec pr_rule_def_cut id = function | MulC(p,prf) -> let (bds,id',prf') = pr_rule_def_cut id prf in (bds, id', MulC(p,prf')) | MulPrf(p1,p2) -> let (bds1,id,p1) = pr_rule_def_cut id p1 in let (bds2,id,p2) = pr_rule_def_cut id p2 in (bds2@bds1,id,MulPrf(p1,p2)) | AddPrf(p1,p2) -> let (bds1,id,p1) = pr_rule_def_cut id p1 in let (bds2,id,p2) = pr_rule_def_cut id p2 in (bds2@bds1,id,AddPrf(p1,p2)) | CutPrf p -> let (bds,id,p) = pr_rule_def_cut id p in ((id,p)::bds,id+1,Def id) | Gcd(c,p) -> let (bds,id,p) = pr_rule_def_cut id p in ((id,p)::bds,id+1,Def id) | Square _|Cst _|Def _|Hyp _|Zero as x -> ([],id,x) (* Do not define top-level cuts *) let pr_rule_def_cut id = function | CutPrf p -> let (bds,ids,p') = pr_rule_def_cut id p in bds,ids, CutPrf p' | p -> pr_rule_def_cut id p let rec implicit_cut p = match p with | CutPrf p -> implicit_cut p | _ -> p let rec normalise_proof id prf = match prf with | Done -> (id,Done) | Step(i,Gcd(c,p),Done) -> normalise_proof id (Step(i,p,Done)) | Step(i,p,prf) -> let bds,id,p' = pr_rule_def_cut id p in let (id,prf) = normalise_proof id prf in let prf = List.fold_left (fun acc (i,p) -> Step(i, CutPrf p,acc)) (Step(i,p',prf)) bds in (id,prf) | Enum(i,p1,v,p2,pl) -> (* Why do I have top-level cuts ? *) (* let p1 = implicit_cut p1 in let p2 = implicit_cut p2 in let (ids,prfs) = List.split (List.map (normalise_proof id) pl) in (List.fold_left max 0 ids , Enum(i,p1,v,p2,prfs)) *) let bds1,id,p1' = pr_rule_def_cut id (implicit_cut p1) in let bds2,id,p2' = pr_rule_def_cut id (implicit_cut p2) in let (ids,prfs) = List.split (List.map (normalise_proof id) pl) in (List.fold_left max 0 ids , List.fold_left (fun acc (i,p) -> Step(i, CutPrf p,acc)) (Enum(i,p1',v,p2',prfs)) (bds2@bds1)) let normalise_proof id prf = let res = normalise_proof id prf in if debug then Printf.printf "normalise_proof %a -> %a" output_proof prf output_proof (snd res) ; res let add_proof x y = match x, y with | Zero , p | p , Zero -> p | _ -> AddPrf(x,y) let mul_proof c p = match sign_big_int c with | 0 -> Zero (* This is likely to be a bug *) | -1 -> MulC(([],Big_int c),p) (* [p] should represent an equality *) | 1 -> if eq_big_int c unit_big_int then p else MulPrf(Cst c,p) | _ -> assert false let mul_proof_ext (p,c) prf = match p with | [] -> mul_proof (numerator c) prf | _ -> MulC((p,c),prf) (* let rec scale_prf_rule = function | Hyp i -> (unit_big_int, Hyp i) | Def i -> (unit_big_int, Def i) | Cst c -> (unit_big_int, Cst i) | Zero -> (unit_big_int, Zero) | Square p -> (unit_big_int,Square p) | Div(c,pr) -> let (bi,pr') = scale_prf_rule pr in (mult_big_int c bi , pr') | MulC(p,pr) -> let bi,pr' = scale_prf_rule pr in (bi,MulC p,pr') | MulPrf(p1,p2) -> let b1,p1 = scale_prf_rule p1 in let b2,p2 = scale_prf_rule p2 in | AddPrf(p1,p2) -> let b1,p1 = scale_prf_rule p1 in let b2,p2 = scale_prf_rule p2 in let g = gcd_big_int *) module LinPoly = struct type t = Vect.t * num module MonT = struct module MonoMap = Map.Make(Monomial) module IntMap = Map.Make(struct type t = int let compare = Pervasives.compare end) (** A hash table might be preferable but requires a hash function. *) let (index_of_monomial : int MonoMap.t ref) = ref (MonoMap.empty) let (monomial_of_index : Monomial.t IntMap.t ref) = ref (IntMap.empty) let fresh = ref 0 let clear () = index_of_monomial := MonoMap.empty; monomial_of_index := IntMap.empty ; fresh := 0 let register m = try MonoMap.find m !index_of_monomial with Not_found -> begin let res = !fresh in index_of_monomial := MonoMap.add m res !index_of_monomial ; monomial_of_index := IntMap.add res m !monomial_of_index ; incr fresh ; res end let retrieve i = IntMap.find i !monomial_of_index end let normalise (v,c) = (List.sort (fun x y -> Pervasives.compare (fst x) (fst y)) v , c) let output_mon o (x,v) = Printf.fprintf o "%s.%a +" (string_of_num v) Monomial.pp (MonT.retrieve x) let output_cstr o {coeffs = coeffs ; op = op ; cst = cst} = Printf.fprintf o "%a %s %s" (pp_list output_mon) coeffs (string_of_op op) (string_of_num cst) let linpol_of_pol p = let (v,c) = Poly.fold (fun mon num (vct,cst) -> if Monomial.is_const mon then (vct,num) else let vr = MonT.register mon in ((vr,num)::vct,cst)) p ([], Int 0) in normalise (v,c) let mult v m (vect,c) = if Monomial.is_const m then (Vect.mul v vect, v <*> c) else if sign_num v <> 0 then let hd = if sign_num c <> 0 then [MonT.register m,v <*> c] else [] in let vect = hd @ (List.map (fun (x,n) -> let x = MonT.retrieve x in let x_m = MonT.register (Monomial.prod m x) in (x_m, v <*> n)) vect ) in normalise (vect , Int 0) else ([],Int 0) let mult v m (vect,c) = let (vect',c') = mult v m (vect,c) in if debug then Printf.printf "mult %s %a (%a,%s) -> (%a,%s)\n" (string_of_num v) Monomial.pp m (pp_list output_mon) vect (string_of_num c) (pp_list output_mon) vect' (string_of_num c') ; (vect',c') let make_lin_pol v mon = if Monomial.is_const mon then [] , v else [MonT.register mon, v],Int 0 let xpivot_eq (c,prf) x v (c',prf') = if debug then Printf.printf "xpivot_eq {%a} %a %s {%a}\n" output_cstr c Monomial.pp (MonT.retrieve x) (string_of_num v) output_cstr c' ; let {coeffs = coeffs ; op = op ; cst = cst} = c' in let m = MonT.retrieve x in let apply_pivot (vqn,q,n) (c',prf') = (* Morally, we have (Vect.get (q*x^n) c'.coeffs) = vmn with n >=0 *) let cc' = abs_num v in let cc_num = Int (- (sign_num v)) <*> vqn in let cc_mon = Monomial.prod q (Monomial.exp m (n-1)) in let (c_coeff,c_cst) = mult cc_num cc_mon (c.coeffs, minus_num c.cst) in let c' = {coeffs = Vect.add (Vect.mul cc' c'.coeffs) c_coeff ; op = op ; cst = (minus_num c_cst) <+> (cc' <*> c'.cst)} in let prf' = add_proof (mul_proof_ext (make_lin_pol cc_num cc_mon) prf) (mul_proof (numerator cc') prf') in if debug then Printf.printf "apply_pivot -> {%a}\n" output_cstr c' ; (c',prf') in let cmp (q,n) (q',n') = if n < n' then -1 else if n = n' then Monomial.compare q q' else 1 in let find_pivot (c',prf') = let (v,q,n) = List.fold_left (fun (v,q,n) (x,v') -> let x = MonT.retrieve x in let (q',n') = Monomial.div x m in if cmp (q,n) (q',n') = -1 then (v',q',n') else (v,q,n)) (Int 0, Monomial.const,0) c'.coeffs in if n > 0 then Some (v,q,n) else None in let rec pivot (q,n) (c',prf') = match find_pivot (c',prf') with | None -> (c',prf') | Some(v,q',n') -> if cmp (q',n') (q,n) = -1 then pivot (q',n') (apply_pivot (v,q',n') (c',prf')) else (c',prf') in pivot (Monomial.const,max_int) (c',prf') let pivot_eq x (c,prf) = match Vect.get x c.coeffs with | None -> (fun x -> None) | Some v -> fun cp' -> Some (xpivot_eq (c,prf) x v cp') end coq-8.4pl2/plugins/micromega/persistent_cache.ml0000640000175000001440000001254212121620060021122 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* string -> 'a t (** [create i f] creates an empty persistent table with initial size i associated with file [f] *) val open_in : string -> 'a t (** [open_in f] rebuilds a table from the records stored in file [f]. As marshaling is not type-safe, it migth segault. *) val find : 'a t -> key -> 'a (** find has the specification of Hashtable.find *) val add : 'a t -> key -> 'a -> unit (** [add tbl key elem] adds the binding [key] [elem] to the table [tbl]. (and writes the binding to the file associated with [tbl].) If [key] is already bound, raises KeyAlreadyBound *) val close : 'a t -> unit (** [close tbl] is closing the table. Once closed, a table cannot be used. i.e, find,add will raise UnboundTable *) val memo : string -> (key -> 'a) -> (key -> 'a) (** [memo cache f] returns a memo function for [f] using file [cache] as persistent table. Note that the cache will only be loaded when the function is used for the first time *) end open Hashtbl module PHashtable(Key:HashedType) : PHashtable with type key = Key.t = struct open Unix type key = Key.t module Table = Hashtbl.Make(Key) exception InvalidTableFormat exception UnboundTable type mode = Closed | Open type 'a t = { outch : out_channel ; mutable status : mode ; htbl : 'a Table.t } let create i f = let flags = [O_WRONLY; O_TRUNC;O_CREAT] in { outch = out_channel_of_descr (openfile f flags 0o666); status = Open ; htbl = Table.create i } let finally f rst = try let res = f () in rst () ; res with reraise -> (try rst () with any -> raise reraise ); raise reraise let read_key_elem inch = try Some (Marshal.from_channel inch) with | End_of_file -> None | e when e <> Sys.Break -> raise InvalidTableFormat (** In win32, it seems that we should unlock the exact zone that has been locked, and not the whole file *) let locked_start = ref 0 let lock fd = locked_start := lseek fd 0 SEEK_CUR; lockf fd F_LOCK 0 let rlock fd = locked_start := lseek fd 0 SEEK_CUR; lockf fd F_RLOCK 0 let unlock fd = let pos = lseek fd 0 SEEK_CUR in ignore (lseek fd !locked_start SEEK_SET); lockf fd F_ULOCK 0; ignore (lseek fd pos SEEK_SET) let open_in f = let flags = [O_RDONLY ; O_CREAT] in let finch = openfile f flags 0o666 in let inch = in_channel_of_descr finch in let htbl = Table.create 100 in let rec xload () = match read_key_elem inch with | None -> () | Some (key,elem) -> Table.add htbl key elem ; xload () in try (* Locking of the (whole) file while reading *) rlock finch; finally (fun () -> xload () ) (fun () -> unlock finch ; close_in_noerr inch ; ) ; { outch = out_channel_of_descr (openfile f [O_WRONLY;O_APPEND;O_CREAT] 0o666) ; status = Open ; htbl = htbl } with InvalidTableFormat -> (* Try to keep as many entries as possible *) begin let flags = [O_WRONLY; O_TRUNC;O_CREAT] in let out = (openfile f flags 0o666) in let outch = out_channel_of_descr out in lock out; (try Table.iter (fun k e -> Marshal.to_channel outch (k,e) [Marshal.No_sharing]) htbl; flush outch ; with e when e <> Sys.Break -> () ) ; unlock out ; { outch = outch ; status = Open ; htbl = htbl } end let close t = let {outch = outch ; status = status ; htbl = tbl} = t in match t.status with | Closed -> () (* don't do it twice *) | Open -> close_out outch ; Table.clear tbl ; t.status <- Closed let add t k e = let {outch = outch ; status = status ; htbl = tbl} = t in if status = Closed then raise UnboundTable else let fd = descr_of_out_channel outch in begin Table.add tbl k e ; lock fd; ignore (lseek fd 0 SEEK_END); Marshal.to_channel outch (k,e) [Marshal.No_sharing] ; flush outch ; unlock fd end let find t k = let {outch = outch ; status = status ; htbl = tbl} = t in if status = Closed then raise UnboundTable else let res = Table.find tbl k in res let memo cache f = let tbl = lazy (open_in cache) in fun x -> let tbl = Lazy.force tbl in try find tbl x with Not_found -> let res = f x in add tbl x res ; res end (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/VarMap.v0000640000175000001440000000350612010532755016634 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t | Node : t -> A -> t -> t . Fixpoint find (vm : t) (p:positive) {struct vm} : A := match vm with | Empty => default | Leaf i => i | Node l e r => match p with | xH => e | xO p => find l p | xI p => find r p end end. End MakeVarMap. coq-8.4pl2/plugins/micromega/Refl.v0000640000175000001440000000673412010532755016344 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ' '/\': basic properties *) Fixpoint make_impl (A : Type) (eval : A -> Prop) (l : list A) (goal : Prop) {struct l} : Prop := match l with | nil => goal | cons e l => (eval e) -> (make_impl eval l goal) end. Theorem make_impl_true : forall (A : Type) (eval : A -> Prop) (l : list A), make_impl eval l True. Proof. induction l as [| a l IH]; simpl. trivial. intro; apply IH. Qed. Fixpoint make_conj (A : Type) (eval : A -> Prop) (l : list A) {struct l} : Prop := match l with | nil => True | cons e nil => (eval e) | cons e l2 => ((eval e) /\ (make_conj eval l2)) end. Theorem make_conj_cons : forall (A : Type) (eval : A -> Prop) (a : A) (l : list A), make_conj eval (a :: l) <-> eval a /\ make_conj eval l. Proof. intros; destruct l; simpl; tauto. Qed. Lemma make_conj_impl : forall (A : Type) (eval : A -> Prop) (l : list A) (g : Prop), (make_conj eval l -> g) <-> make_impl eval l g. Proof. induction l. simpl. tauto. simpl. intros. destruct l. simpl. tauto. generalize (IHl g). tauto. Qed. Lemma make_conj_in : forall (A : Type) (eval : A -> Prop) (l : list A), make_conj eval l -> (forall p, In p l -> eval p). Proof. induction l. simpl. tauto. simpl. intros. destruct l. simpl in H0. destruct H0. subst; auto. tauto. destruct H. destruct H0. subst;auto. apply IHl; auto. Qed. Lemma make_conj_app : forall A eval l1 l2, @make_conj A eval (l1 ++ l2) <-> @make_conj A eval l1 /\ @make_conj A eval l2. Proof. induction l1. simpl. tauto. intros. change ((a::l1) ++ l2) with (a :: (l1 ++ l2)). rewrite make_conj_cons. rewrite IHl1. rewrite make_conj_cons. tauto. Qed. Lemma not_make_conj_cons : forall (A:Type) (t:A) a eval (no_middle_eval : (eval t) \/ ~ (eval t)), ~ make_conj eval (t ::a) -> ~ (eval t) \/ (~ make_conj eval a). Proof. intros. simpl in H. destruct a. tauto. tauto. Qed. Lemma not_make_conj_app : forall (A:Type) (t:list A) a eval (no_middle_eval : forall d, eval d \/ ~ eval d) , ~ make_conj eval (t ++ a) -> (~ make_conj eval t) \/ (~ make_conj eval a). Proof. induction t. simpl. tauto. intros. simpl ((a::t)++a0)in H. destruct (@not_make_conj_cons _ _ _ _ (no_middle_eval a) H). left ; red ; intros. apply H0. rewrite make_conj_cons in H1. tauto. destruct (IHt _ _ no_middle_eval H0). left ; red ; intros. apply H1. rewrite make_conj_cons in H2. tauto. right ; auto. Qed. coq-8.4pl2/plugins/micromega/Env.v0000640000175000001440000000604412010532755016176 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* D. Definition jump (j:positive) (e:Env) := fun x => e (x+j). Definition nth (n:positive) (e:Env) := e n. Definition hd (e:Env) := nth 1 e. Definition tail (e:Env) := jump 1 e. Lemma jump_add i j l x : jump (i + j) l x = jump i (jump j l) x. Proof. unfold jump. f_equal. apply Pos.add_assoc. Qed. Lemma jump_simpl p l x : jump p l x = match p with | xH => tail l x | xO p => jump p (jump p l) x | xI p => jump p (jump p (tail l)) x end. Proof. destruct p; unfold tail; rewrite <- ?jump_add; f_equal; now rewrite Pos.add_diag. Qed. Lemma jump_tl j l x : tail (jump j l) x = jump j (tail l) x. Proof. unfold tail. rewrite <- !jump_add. f_equal. apply Pos.add_comm. Qed. Lemma jump_succ j l x : jump (Pos.succ j) l x = jump 1 (jump j l) x. Proof. rewrite <- jump_add. f_equal. symmetry. apply Pos.add_1_l. Qed. Lemma jump_pred_double i l x : jump (Pos.pred_double i) (tail l) x = jump i (jump i l) x. Proof. unfold tail. rewrite <- !jump_add. f_equal. now rewrite Pos.add_1_r, Pos.succ_pred_double, Pos.add_diag. Qed. Lemma nth_spec p l : nth p l = match p with | xH => hd l | xO p => nth p (jump p l) | xI p => nth p (jump p (tail l)) end. Proof. unfold hd, nth, tail, jump. destruct p; f_equal; now rewrite Pos.add_diag. Qed. Lemma nth_jump p l : nth p (tail l) = hd (jump p l). Proof. unfold hd, nth, tail, jump. f_equal. apply Pos.add_comm. Qed. Lemma nth_pred_double p l : nth (Pos.pred_double p) (tail l) = nth p (jump p l). Proof. unfold nth, tail, jump. f_equal. now rewrite Pos.add_1_r, Pos.succ_pred_double, Pos.add_diag. Qed. End S. Ltac jump_simpl := repeat match goal with | |- appcontext [jump xH] => rewrite (jump_simpl xH) | |- appcontext [jump (xO ?p)] => rewrite (jump_simpl (xO p)) | |- appcontext [jump (xI ?p)] => rewrite (jump_simpl (xI p)) end. coq-8.4pl2/plugins/micromega/mfourier.ml0000640000175000001440000006722712121620060017441 0ustar notinusersopen Num module Utils = Mutils open Polynomial open Vect let map_option = Utils.map_option let from_option = Utils.from_option let debug = false type ('a,'b) lr = Inl of 'a | Inr of 'b (** Implementation of intervals *) module Itv = struct (** The type of intervals is *) type interval = num option * num option (** None models the absence of bound i.e. infinity *) (** As a result, - None , None -> ]-oo,+oo[ - None , Some v -> ]-oo,v] - Some v, None -> [v,+oo[ - Some v, Some v' -> [v,v'] Intervals needs to be explicitely normalised. *) type who = Left | Right (** if then interval [itv] is empty, [norm_itv itv] returns [None] otherwise, it returns [Some itv] *) let norm_itv itv = match itv with | Some a , Some b -> if a <=/ b then Some itv else None | _ -> Some itv (** [opp_itv itv] computes the opposite interval *) let opp_itv itv = let (l,r) = itv in (map_option minus_num r, map_option minus_num l) (** [inter i1 i2 = None] if the intersection of intervals is empty [inter i1 i2 = Some i] if [i] is the intersection of the intervals [i1] and [i2] *) let inter i1 i2 = let (l1,r1) = i1 and (l2,r2) = i2 in let inter f o1 o2 = match o1 , o2 with | None , None -> None | Some _ , None -> o1 | None , Some _ -> o2 | Some n1 , Some n2 -> Some (f n1 n2) in norm_itv (inter max_num l1 l2 , inter min_num r1 r2) let range = function | None,_ | _,None -> None | Some i,Some j -> Some (floor_num j -/ceiling_num i +/ (Int 1)) let smaller_itv i1 i2 = match range i1 , range i2 with | None , _ -> false | _ , None -> true | Some i , Some j -> i <=/ j (** [in_bound bnd v] checks whether [v] is within the bounds [bnd] *) let in_bound bnd v = let (l,r) = bnd in match l , r with | None , None -> true | None , Some a -> v <=/ a | Some a , None -> a <=/ v | Some a , Some b -> a <=/ v && v <=/ b end open Itv type vector = Vect.t (** 'cstr' is the type of constraints. {coeffs = v ; bound = (l,r) } models the constraints l <= v <= r **) module ISet = Set.Make(struct type t = int let compare = Pervasives.compare end) module PSet = ISet module System = Hashtbl.Make(Vect) type proof = | Hyp of int | Elim of var * proof * proof | And of proof * proof type system = { sys : cstr_info ref System.t ; vars : ISet.t } and cstr_info = { bound : interval ; prf : proof ; pos : int ; neg : int ; } (** A system of constraints has the form [{sys = s ; vars = v}]. [s] is a hashtable mapping a normalised vector to a [cstr_info] record where - [bound] is an interval - [prf_idx] is the set of hypothese indexes (i.e. constraints in the initial system) used to obtain the current constraint. In the initial system, each constraint is given an unique singleton proof_idx. When a new constraint c is computed by a function f(c1,...,cn), its proof_idx is ISet.fold union (List.map (fun x -> x.proof_idx) [c1;...;cn] - [pos] is the number of positive values of the vector - [neg] is the number of negative values of the vector ( [neg] + [pos] is therefore the length of the vector) [v] is an upper-bound of the set of variables which appear in [s]. *) (** To be thrown when a system has no solution *) exception SystemContradiction of proof let hyps prf = let rec hyps prf acc = match prf with | Hyp i -> ISet.add i acc | Elim(_,prf1,prf2) | And(prf1,prf2) -> hyps prf1 (hyps prf2 acc) in hyps prf ISet.empty (** Pretty printing *) let rec pp_proof o prf = match prf with | Hyp i -> Printf.fprintf o "H%i" i | Elim(v, prf1,prf2) -> Printf.fprintf o "E(%i,%a,%a)" v pp_proof prf1 pp_proof prf2 | And(prf1,prf2) -> Printf.fprintf o "A(%a,%a)" pp_proof prf1 pp_proof prf2 let pp_bound o = function | None -> output_string o "oo" | Some a -> output_string o (string_of_num a) let pp_itv o (l,r) = Printf.fprintf o "(%a,%a)" pp_bound l pp_bound r let pp_iset o s = output_string o "{" ; ISet.fold (fun i _ -> Printf.fprintf o "%i " i) s (); output_string o "}" let pp_pset o s = output_string o "{" ; PSet.fold (fun i _ -> Printf.fprintf o "%i " i) s (); output_string o "}" let pp_info o i = pp_itv o i.bound let pp_cstr o (vect,bnd) = let (l,r) = bnd in (match l with | None -> () | Some n -> Printf.fprintf o "%s <= " (string_of_num n)) ; pp_vect o vect ; (match r with | None -> output_string o"\n" | Some n -> Printf.fprintf o "<=%s\n" (string_of_num n)) let pp_system o sys= System.iter (fun vect ibnd -> pp_cstr o (vect,(!ibnd).bound)) sys let pp_split_cstr o (vl,v,c,_) = Printf.fprintf o "(val x = %s ,%a,%s)" (string_of_num vl) pp_vect v (string_of_num c) (** [merge_cstr_info] takes: - the intersection of bounds and - the union of proofs - [pos] and [neg] fields should be identical *) let merge_cstr_info i1 i2 = let { pos = p1 ; neg = n1 ; bound = i1 ; prf = prf1 } = i1 and { pos = p2 ; neg = n2 ; bound = i2 ; prf = prf2 } = i2 in assert (p1 = p2 && n1 = n2) ; match inter i1 i2 with | None -> None (* Could directly raise a system contradiction exception *) | Some bnd -> Some { pos = p1 ; neg = n1 ; bound = bnd ; prf = And(prf1,prf2) } (** [xadd_cstr vect cstr_info] loads an constraint into the system. The constraint is neither redundant nor contradictory. @raise SystemContradiction if [cstr_info] returns [None] *) let xadd_cstr vect cstr_info sys = if debug && System.length sys mod 1000 = 0 then (print_string "*" ; flush stdout) ; try let info = System.find sys vect in match merge_cstr_info cstr_info !info with | None -> raise (SystemContradiction (And(cstr_info.prf, (!info).prf))) | Some info' -> info := info' with | Not_found -> System.replace sys vect (ref cstr_info) type cstr_ext = | Contradiction (** The constraint is contradictory. Typically, a [SystemContradiction] exception will be raised. *) | Redundant (** The constrain is redundant. Typically, the constraint will be dropped *) | Cstr of vector * cstr_info (** Taken alone, the constraint is neither contradictory nor redundant. Typically, it will be added to the constraint system. *) (** [normalise_cstr] : vector -> cstr_info -> cstr_ext *) let normalise_cstr vect cinfo = match norm_itv cinfo.bound with | None -> Contradiction | Some (l,r) -> match vect with | [] -> if Itv.in_bound (l,r) (Int 0) then Redundant else Contradiction | (_,n)::_ -> Cstr( (if n <>/ Int 1 then List.map (fun (x,nx) -> (x,nx // n)) vect else vect), let divn x = x // n in if sign_num n = 1 then{cinfo with bound = (map_option divn l , map_option divn r) } else {cinfo with pos = cinfo.neg ; neg = cinfo.pos ; bound = (map_option divn r , map_option divn l)}) (** For compatibility, there is an external representation of constraints *) let eval_op = function | Eq -> (=/) | Ge -> (>=/) let count v = let rec count n p v = match v with | [] -> (n,p) | (_,vl)::v -> let sg = sign_num vl in assert (sg <> 0) ; if sg = 1 then count n (p+1) v else count (n+1) p v in count 0 0 v let norm_cstr {coeffs = v ; op = o ; cst = c} idx = let (n,p) = count v in normalise_cstr v {pos = p ; neg = n ; bound = (match o with | Eq -> Some c , Some c | Ge -> Some c , None) ; prf = Hyp idx } (** [load_system l] takes a list of constraints of type [cstr_compat] @return a system of constraints @raise SystemContradiction if a contradiction is found *) let load_system l = let sys = System.create 1000 in let li = Mutils.mapi (fun e i -> (e,i)) l in let vars = List.fold_left (fun vrs (cstr,i) -> match norm_cstr cstr i with | Contradiction -> raise (SystemContradiction (Hyp i)) | Redundant -> vrs | Cstr(vect,info) -> xadd_cstr vect info sys ; List.fold_left (fun s (v,_) -> ISet.add v s) vrs cstr.coeffs) ISet.empty li in {sys = sys ;vars = vars} let system_list sys = let { sys = s ; vars = v } = sys in System.fold (fun k bi l -> (k, !bi)::l) s [] (** [add (v1,c1) (v2,c2) ] precondition: (c1 <>/ Int 0 && c2 <>/ Int 0) @return a pair [(v,ln)] such that [v] is the sum of vector [v1] divided by [c1] and vector [v2] divided by [c2] Note that the resulting vector is not normalised. *) let add (v1,c1) (v2,c2) = assert (c1 <>/ Int 0 && c2 <>/ Int 0) ; let rec xadd v1 v2 = match v1 , v2 with | (x1,n1)::v1' , (x2,n2)::v2' -> if x1 = x2 then let n' = (n1 // c1) +/ (n2 // c2) in if n' =/ Int 0 then xadd v1' v2' else let res = xadd v1' v2' in (x1,n') ::res else if x1 < x2 then let res = xadd v1' v2 in (x1, n1 // c1)::res else let res = xadd v1 v2' in (x2, n2 // c2)::res | [] , [] -> [] | [] , _ -> List.map (fun (x,vl) -> (x,vl // c2)) v2 | _ , [] -> List.map (fun (x,vl) -> (x,vl // c1)) v1 in let res = xadd v1 v2 in (res, count res) let add (v1,c1) (v2,c2) = let res = add (v1,c1) (v2,c2) in (* Printf.printf "add(%a,%s,%a,%s) -> %a\n" pp_vect v1 (string_of_num c1) pp_vect v2 (string_of_num c2) pp_vect (fst res) ;*) res type tlr = (num * vector * cstr_info) list type tm = (vector * cstr_info ) list (** To perform Fourier elimination, constraints are categorised depending on the sign of the variable to eliminate. *) (** [split x vect info (l,m,r)] @param v is the variable to eliminate @param l contains constraints such that (e + a*x) // a >= c / a @param r contains constraints such that (e + a*x) // - a >= c / -a @param m contains constraints which do not mention [x] *) let split x (vect: vector) info (l,m,r) = match get x vect with | None -> (* The constraint does not mention [x], store it in m *) (l,(vect,info)::m,r) | Some vl -> (* otherwise *) let cons_bound lst bd = match bd with | None -> lst | Some bnd -> (vl,vect,{info with bound = Some bnd,None})::lst in let lb,rb = info.bound in if sign_num vl = 1 then (cons_bound l lb,m,cons_bound r rb) else (* sign_num vl = -1 *) (cons_bound l rb,m,cons_bound r lb) (** [project vr sys] projects system [sys] over the set of variables [ISet.remove vr sys.vars ]. This is a one step Fourier elimination. *) let project vr sys = let (l,m,r) = System.fold (fun vect rf l_m_r -> split vr vect !rf l_m_r) sys.sys ([],[],[]) in let new_sys = System.create (System.length sys.sys) in (* Constraints in [m] belong to the projection - for those [vr] is already projected out *) List.iter (fun (vect,info) -> System.replace new_sys vect (ref info) ) m ; let elim (v1,vect1,info1) (v2,vect2,info2) = let {neg = n1 ; pos = p1 ; bound = bound1 ; prf = prf1} = info1 and {neg = n2 ; pos = p2 ; bound = bound2 ; prf = prf2} = info2 in let bnd1 = from_option (fst bound1) and bnd2 = from_option (fst bound2) in let bound = (bnd1 // v1) +/ (bnd2 // minus_num v2) in let vres,(n,p) = add (vect1,v1) (vect2,minus_num v2) in (vres,{neg = n ; pos = p ; bound = (Some bound, None); prf = Elim(vr,info1.prf,info2.prf)}) in List.iter(fun l_elem -> List.iter (fun r_elem -> let (vect,info) = elim l_elem r_elem in match normalise_cstr vect info with | Redundant -> () | Contradiction -> raise (SystemContradiction info.prf) | Cstr(vect,info) -> xadd_cstr vect info new_sys) r ) l; {sys = new_sys ; vars = ISet.remove vr sys.vars} (** [project_using_eq] performs elimination by pivoting using an equation. This is the counter_part of the [elim] sub-function of [!project]. @param vr is the variable to be used as pivot @param c is the coefficient of variable [vr] in vector [vect] @param len is the length of the equation @param bound is the bound of the equation @param prf is the proof of the equation *) let project_using_eq vr c vect bound prf (vect',info') = match get vr vect' with | Some c2 -> let c1 = if c2 >=/ Int 0 then minus_num c else c in let c2 = abs_num c2 in let (vres,(n,p)) = add (vect,c1) (vect', c2) in let cst = bound // c1 in let bndres = let f x = cst +/ x // c2 in let (l,r) = info'.bound in (map_option f l , map_option f r) in (vres,{neg = n ; pos = p ; bound = bndres ; prf = Elim(vr,prf,info'.prf)}) | None -> (vect',info') let elim_var_using_eq vr vect cst prf sys = let c = from_option (get vr vect) in let elim_var = project_using_eq vr c vect cst prf in let new_sys = System.create (System.length sys.sys) in System.iter(fun vect iref -> let (vect',info') = elim_var (vect,!iref) in match normalise_cstr vect' info' with | Redundant -> () | Contradiction -> raise (SystemContradiction info'.prf) | Cstr(vect,info') -> xadd_cstr vect info' new_sys) sys.sys ; {sys = new_sys ; vars = ISet.remove vr sys.vars} (** [size sys] computes the number of entries in the system of constraints *) let size sys = System.fold (fun v iref s -> s + (!iref).neg + (!iref).pos) sys 0 module IMap = Map.Make(struct type t = int let compare : int -> int -> int = Pervasives.compare end) let pp_map o map = IMap.fold (fun k elt () -> Printf.fprintf o "%i -> %s\n" k (string_of_num elt)) map () (** [eval_vect map vect] evaluates vector [vect] using the values of [map]. If [map] binds all the variables of [vect], we get [eval_vect map [(x1,v1);...;(xn,vn)] = (IMap.find x1 map * v1) + ... + (IMap.find xn map) * vn , []] The function returns as second argument, a sub-vector consisting in the variables that are not in [map]. *) let eval_vect map vect = let rec xeval_vect vect sum rst = match vect with | [] -> (sum,rst) | (v,vl)::vect -> try let val_v = IMap.find v map in xeval_vect vect (sum +/ (val_v */ vl)) rst with Not_found -> xeval_vect vect sum ((v,vl)::rst) in xeval_vect vect (Int 0) [] (** [restrict_bound n sum itv] returns the interval of [x] given that (fst itv) <= x * n + sum <= (snd itv) *) let restrict_bound n sum (itv:interval) = let f x = (x -/ sum) // n in let l,r = itv in match sign_num n with | 0 -> if in_bound itv sum then (None,None) (* redundant *) else failwith "SystemContradiction" | 1 -> map_option f l , map_option f r | _ -> map_option f r , map_option f l (** [bound_of_variable map v sys] computes the interval of [v] in [sys] given a mapping [map] binding all the other variables *) let bound_of_variable map v sys = System.fold (fun vect iref bnd -> let sum,rst = eval_vect map vect in let vl = match get v rst with | None -> Int 0 | Some v -> v in match inter bnd (restrict_bound vl sum (!iref).bound) with | None -> failwith "bound_of_variable: impossible" | Some itv -> itv) sys (None,None) (** [pick_small_value bnd] picks a value being closed to zero within the interval *) let pick_small_value bnd = match bnd with | None , None -> Int 0 | None , Some i -> if (Int 0) <=/ (floor_num i) then Int 0 else floor_num i | Some i,None -> if i <=/ (Int 0) then Int 0 else ceiling_num i | Some i,Some j -> if i <=/ Int 0 && Int 0 <=/ j then Int 0 else if ceiling_num i <=/ floor_num j then ceiling_num i (* why not *) else i (** [solution s1 sys_l = Some(sn,[(vn-1,sn-1);...; (v1,s1)]@sys_l)] then [sn] is a system which contains only [black_v] -- if it existed in [s1] and [sn+1] is obtained by projecting [vn] out of [sn] @raise SystemContradiction if system [s] has no solution *) let solve_sys black_v choose_eq choose_variable sys sys_l = let rec solve_sys sys sys_l = if debug then Printf.printf "S #%i size %i\n" (System.length sys.sys) (size sys.sys); let eqs = choose_eq sys in try let (v,vect,cst,ln) = fst (List.find (fun ((v,_,_,_),_) -> v <> black_v) eqs) in if debug then (Printf.printf "\nE %a = %s variable %i\n" pp_vect vect (string_of_num cst) v ; flush stdout); let sys' = elim_var_using_eq v vect cst ln sys in solve_sys sys' ((v,sys)::sys_l) with Not_found -> let vars = choose_variable sys in try let (v,est) = (List.find (fun (v,_) -> v <> black_v) vars) in if debug then (Printf.printf "\nV : %i estimate %f\n" v est ; flush stdout) ; let sys' = project v sys in solve_sys sys' ((v,sys)::sys_l) with Not_found -> (* we are done *) Inl (sys,sys_l) in solve_sys sys sys_l let solve black_v choose_eq choose_variable cstrs = try let sys = load_system cstrs in if debug then Printf.printf "solve :\n %a" pp_system sys.sys ; solve_sys black_v choose_eq choose_variable sys [] with SystemContradiction prf -> Inr prf (** The purpose of module [EstimateElimVar] is to try to estimate the cost of eliminating a variable. The output is an ordered list of (variable,cost). *) module EstimateElimVar = struct type sys_list = (vector * cstr_info) list let abstract_partition (v:int) (l: sys_list) = let rec xpart (l:sys_list) (ltl:sys_list) (n:int list) (z:int) (p:int list) = match l with | [] -> (ltl, n,z,p) | (l1,info) ::rl -> match l1 with | [] -> xpart rl (([],info)::ltl) n (info.neg+info.pos+z) p | (vr,vl)::rl1 -> if v = vr then let cons_bound lst bd = match bd with | None -> lst | Some bnd -> info.neg+info.pos::lst in let lb,rb = info.bound in if sign_num vl = 1 then xpart rl ((rl1,info)::ltl) (cons_bound n lb) z (cons_bound p rb) else xpart rl ((rl1,info)::ltl) (cons_bound n rb) z (cons_bound p lb) else (* the variable is greater *) xpart rl ((l1,info)::ltl) n (info.neg+info.pos+z) p in let (sys',n,z,p) = xpart l [] [] 0 [] in let ln = float_of_int (List.length n) in let sn = float_of_int (List.fold_left (+) 0 n) in let lp = float_of_int (List.length p) in let sp = float_of_int (List.fold_left (+) 0 p) in (sys', float_of_int z +. lp *. sn +. ln *. sp -. lp*.ln) let choose_variable sys = let {sys = s ; vars = v} = sys in let sl = system_list sys in let evals = fst (ISet.fold (fun v (eval,s) -> let ts,vl = abstract_partition v s in ((v,vl)::eval, ts)) v ([],sl)) in List.sort (fun x y -> Pervasives.compare (snd x) (snd y) ) evals end open EstimateElimVar (** The module [EstimateElimEq] is similar to [EstimateElimVar] but it orders equations. *) module EstimateElimEq = struct let itv_point bnd = match bnd with |(Some a, Some b) -> a =/ b | _ -> false let eq_bound bnd c = match bnd with |(Some a, Some b) -> a =/ b && c =/ b | _ -> false let rec unroll_until v l = match l with | [] -> (false,[]) | (i,_)::rl -> if i = v then (true,rl) else if i < v then unroll_until v rl else (false,l) let rec choose_simple_equation eqs = match eqs with | [] -> None | (vect,a,prf,ln)::eqs -> match vect with | [i,_] -> Some (i,vect,a,prf,ln) | _ -> choose_simple_equation eqs let choose_primal_equation eqs sys_l = (* Counts the number of equations refering to variable [v] -- It looks like nb_cst is dead... *) let is_primal_equation_var v = List.fold_left (fun nb_eq (vect,info) -> if fst (unroll_until v vect) then if itv_point info.bound then nb_eq + 1 else nb_eq else nb_eq) 0 sys_l in let rec find_var vect = match vect with | [] -> None | (i,_)::vect -> let nb_eq = is_primal_equation_var i in if nb_eq = 2 then Some i else find_var vect in let rec find_eq_var eqs = match eqs with | [] -> None | (vect,a,prf,ln)::l -> match find_var vect with | None -> find_eq_var l | Some r -> Some (r,vect,a,prf,ln) in match choose_simple_equation eqs with | None -> find_eq_var eqs | Some res -> Some res let choose_equality_var sys = let sys_l = system_list sys in let equalities = List.fold_left (fun l (vect,info) -> match info.bound with | Some a , Some b -> if a =/ b then (* This an equation *) (vect,a,info.prf,info.neg+info.pos)::l else l | _ -> l ) [] sys_l in let rec estimate_cost v ct sysl acc tlsys = match sysl with | [] -> (acc,tlsys) | (l,info)::rsys -> let ln = info.pos + info.neg in let (b,l) = unroll_until v l in match b with | true -> if itv_point info.bound then estimate_cost v ct rsys (acc+ln) ((l,info)::tlsys) (* this is free *) else estimate_cost v ct rsys (acc+ln+ct) ((l,info)::tlsys) (* should be more ? *) | false -> estimate_cost v ct rsys (acc+ln) ((l,info)::tlsys) in match choose_primal_equation equalities sys_l with | None -> let cost_eq eq const prf ln acc_costs = let rec cost_eq eqr sysl costs = match eqr with | [] -> costs | (v,_) ::eqr -> let (cst,tlsys) = estimate_cost v (ln-1) sysl 0 [] in cost_eq eqr tlsys (((v,eq,const,prf),cst)::costs) in cost_eq eq sys_l acc_costs in let all_costs = List.fold_left (fun all_costs (vect,const,prf,ln) -> cost_eq vect const prf ln all_costs) [] equalities in (* pp_list (fun o ((v,eq,_,_),cst) -> Printf.fprintf o "((%i,%a),%i)\n" v pp_vect eq cst) stdout all_costs ; *) List.sort (fun x y -> Pervasives.compare (snd x) (snd y) ) all_costs | Some (v,vect, const,prf,_) -> [(v,vect,const,prf),0] end open EstimateElimEq module Fourier = struct let optimise vect l = (* We add a dummy (fresh) variable for vector *) let fresh = List.fold_left (fun fr c -> Pervasives.max fr (Vect.fresh c.coeffs)) 0 l in let cstr = { coeffs = Vect.set fresh (Int (-1)) vect ; op = Eq ; cst = (Int 0)} in match solve fresh choose_equality_var choose_variable (cstr::l) with | Inr prf -> None (* This is an unsatisfiability proof *) | Inl (s,_) -> try Some (bound_of_variable IMap.empty fresh s.sys) with x when x <> Sys.Break -> Printf.printf "optimise Exception : %s" (Printexc.to_string x) ; None let find_point cstrs = match solve max_int choose_equality_var choose_variable cstrs with | Inr prf -> Inr prf | Inl (_,l) -> let rec rebuild_solution l map = match l with | [] -> map | (v,e)::l -> let itv = bound_of_variable map v e.sys in let map = IMap.add v (pick_small_value itv) map in rebuild_solution l map in let map = rebuild_solution l IMap.empty in let vect = List.rev (IMap.fold (fun v i vect -> (v,i)::vect) map []) in (* Printf.printf "SOLUTION %a" pp_vect vect ; *) let res = Inl vect in res end module Proof = struct (** A proof term in the sense of a ZMicromega.RatProof is a positive combination of the hypotheses which leads to a contradiction. The proofs constructed by Fourier elimination are more like execution traces: - certain facts are recorded but are useless - certain inferences are implicit. The following code implements proof reconstruction. *) let add x y = fst (add x y) let forall_pairs f l1 l2 = List.fold_left (fun acc e1 -> List.fold_left (fun acc e2 -> match f e1 e2 with | None -> acc | Some v -> v::acc) acc l2) [] l1 let add_op x y = match x , y with | Eq , Eq -> Eq | _ -> Ge let pivot v (p1,c1) (p2,c2) = let {coeffs = v1 ; op = op1 ; cst = n1} = c1 and {coeffs = v2 ; op = op2 ; cst = n2} = c2 in match Vect.get v v1 , Vect.get v v2 with | None , _ | _ , None -> None | Some a , Some b -> if (sign_num a) * (sign_num b) = -1 then Some (add (p1,abs_num a) (p2,abs_num b) , {coeffs = add (v1,abs_num a) (v2,abs_num b) ; op = add_op op1 op2 ; cst = n1 // (abs_num a) +/ n2 // (abs_num b) }) else if op1 = Eq then Some (add (p1,minus_num (a // b)) (p2,Int 1), {coeffs = add (v1,minus_num (a// b)) (v2 ,Int 1) ; op = add_op op1 op2; cst = n1 // (minus_num (a// b)) +/ n2 // (Int 1)}) else if op2 = Eq then Some (add (p2,minus_num (b // a)) (p1,Int 1), {coeffs = add (v2,minus_num (b// a)) (v1 ,Int 1) ; op = add_op op1 op2; cst = n2 // (minus_num (b// a)) +/ n1 // (Int 1)}) else None (* op2 could be Eq ... this might happen *) let normalise_proofs l = List.fold_left (fun acc (prf,cstr) -> match acc with | Inr _ -> acc (* I already found a contradiction *) | Inl acc -> match norm_cstr cstr 0 with | Redundant -> Inl acc | Contradiction -> Inr (prf,cstr) | Cstr(v,info) -> Inl ((prf,cstr,v,info)::acc)) (Inl []) l type oproof = (vector * cstr_compat * num) option let merge_proof (oleft:oproof) (prf,cstr,v,info) (oright:oproof) = let (l,r) = info.bound in let keep p ob bd = match ob , bd with | None , None -> None | None , Some b -> Some(prf,cstr,b) | Some _ , None -> ob | Some(prfl,cstrl,bl) , Some b -> if p bl b then Some(prf,cstr, b) else ob in let oleft = keep (<=/) oleft l in let oright = keep (>=/) oright r in (* Now, there might be a contradiction *) match oleft , oright with | None , _ | _ , None -> Inl (oleft,oright) | Some(prfl,cstrl,l) , Some(prfr,cstrr,r) -> if l <=/ r then Inl (oleft,oright) else (* There is a contradiction - it should show up by scaling up the vectors - any pivot should do*) match cstrr.coeffs with | [] -> Inr (add (prfl,Int 1) (prfr,Int 1), cstrr) (* this is wrong *) | (v,_)::_ -> match pivot v (prfl,cstrl) (prfr,cstrr) with | None -> failwith "merge_proof : pivot is not possible" | Some x -> Inr x let mk_proof hyps prf = (* I am keeping list - I might have a proof for the left bound and a proof for the right bound. If I perform aggressive elimination of redundancies, I expect the list to be of length at most 2. For each proof list, all the vectors should be of the form a.v for different constants a. *) let rec mk_proof prf = match prf with | Hyp i -> [ ([i, Int 1] , List.nth hyps i) ] | Elim(v,prf1,prf2) -> let prfsl = mk_proof prf1 and prfsr = mk_proof prf2 in (* I take only the pairs for which the elimination is meaningfull *) forall_pairs (pivot v) prfsl prfsr | And(prf1,prf2) -> let prfsl1 = mk_proof prf1 and prfsl2 = mk_proof prf2 in (* detect trivial redundancies and contradictions *) match normalise_proofs (prfsl1@prfsl2) with | Inr x -> [x] (* This is a contradiction - this should be the end of the proof *) | Inl l -> (* All the vectors are the same *) let prfs = List.fold_left (fun acc e -> match acc with | Inr _ -> acc (* I have a contradiction *) | Inl (oleft,oright) -> merge_proof oleft e oright) (Inl(None,None)) l in match prfs with | Inr x -> [x] | Inl (oleft,oright) -> match oleft , oright with | None , None -> [] | None , Some(prf,cstr,_) | Some(prf,cstr,_) , None -> [prf,cstr] | Some(prf1,cstr1,_) , Some(prf2,cstr2,_) -> [prf1,cstr1;prf2,cstr2] in mk_proof prf end coq-8.4pl2/plugins/micromega/sos_types.ml0000640000175000001440000000535412010532755017644 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* output_string o "0" | Const n -> output_string o (string_of_num n) | Var n -> Printf.fprintf o "v%s" n | Inv t -> Printf.fprintf o "1/(%a)" output_term t | Opp t -> Printf.fprintf o "- (%a)" output_term t | Add(t1,t2) -> Printf.fprintf o "(%a)+(%a)" output_term t1 output_term t2 | Sub(t1,t2) -> Printf.fprintf o "(%a)-(%a)" output_term t1 output_term t2 | Mul(t1,t2) -> Printf.fprintf o "(%a)*(%a)" output_term t1 output_term t2 | Div(t1,t2) -> Printf.fprintf o "(%a)/(%a)" output_term t1 output_term t2 | Pow(t1,i) -> Printf.fprintf o "(%a)^(%i)" output_term t1 i (* ------------------------------------------------------------------------- *) (* Data structure for Positivstellensatz refutations. *) (* ------------------------------------------------------------------------- *) type positivstellensatz = Axiom_eq of int | Axiom_le of int | Axiom_lt of int | Rational_eq of num | Rational_le of num | Rational_lt of num | Square of term | Monoid of int list | Eqmul of term * positivstellensatz | Sum of positivstellensatz * positivstellensatz | Product of positivstellensatz * positivstellensatz;; let rec output_psatz o = function | Axiom_eq i -> Printf.fprintf o "Aeq(%i)" i | Axiom_le i -> Printf.fprintf o "Ale(%i)" i | Axiom_lt i -> Printf.fprintf o "Alt(%i)" i | Rational_eq n -> Printf.fprintf o "eq(%s)" (string_of_num n) | Rational_le n -> Printf.fprintf o "le(%s)" (string_of_num n) | Rational_lt n -> Printf.fprintf o "lt(%s)" (string_of_num n) | Square t -> Printf.fprintf o "(%a)^2" output_term t | Monoid l -> Printf.fprintf o "monoid" | Eqmul (t,ps) -> Printf.fprintf o "%a * %a" output_term t output_psatz ps | Sum (t1,t2) -> Printf.fprintf o "%a + %a" output_psatz t1 output_psatz t2 | Product (t1,t2) -> Printf.fprintf o "%a * %a" output_psatz t1 output_psatz t2 coq-8.4pl2/plugins/micromega/Tauto.v0000640000175000001440000003353012010532755016542 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* BFormula A | A : A -> BFormula A | Cj : BFormula A -> BFormula A -> BFormula A | D : BFormula A-> BFormula A -> BFormula A | N : BFormula A -> BFormula A | I : BFormula A-> BFormula A-> BFormula A. Fixpoint eval_f (A:Type) (ev:A -> Prop ) (f:BFormula A) {struct f}: Prop := match f with | TT => True | FF => False | A a => ev a | X p => p | Cj e1 e2 => (eval_f ev e1) /\ (eval_f ev e2) | D e1 e2 => (eval_f ev e1) \/ (eval_f ev e2) | N e => ~ (eval_f ev e) | I f1 f2 => (eval_f ev f1) -> (eval_f ev f2) end. Lemma eval_f_morph : forall A (ev ev' : A -> Prop) (f : BFormula A), (forall a, ev a <-> ev' a) -> (eval_f ev f <-> eval_f ev' f). Proof. induction f ; simpl ; try tauto. intros. assert (H' := H a). auto. Qed. Fixpoint map_bformula (T U : Type) (fct : T -> U) (f : BFormula T) : BFormula U := match f with | TT => TT _ | FF => FF _ | X p => X _ p | A a => A (fct a) | Cj f1 f2 => Cj (map_bformula fct f1) (map_bformula fct f2) | D f1 f2 => D (map_bformula fct f1) (map_bformula fct f2) | N f => N (map_bformula fct f) | I f1 f2 => I (map_bformula fct f1) (map_bformula fct f2) end. Lemma eval_f_map : forall T U (fct: T-> U) env f , eval_f env (map_bformula fct f) = eval_f (fun x => env (fct x)) f. Proof. induction f ; simpl ; try (rewrite IHf1 ; rewrite IHf2) ; auto. rewrite <- IHf. auto. Qed. Lemma map_simpl : forall A B f l, @map A B f l = match l with | nil => nil | a :: l=> (f a) :: (@map A B f l) end. Proof. destruct l ; reflexivity. Qed. Section S. Variable Env : Type. Variable Term : Type. Variable eval : Env -> Term -> Prop. Variable Term' : Type. Variable eval' : Env -> Term' -> Prop. Variable no_middle_eval' : forall env d, (eval' env d) \/ ~ (eval' env d). Variable unsat : Term' -> bool. Variable unsat_prop : forall t, unsat t = true -> forall env, eval' env t -> False. Variable deduce : Term' -> Term' -> option Term'. Variable deduce_prop : forall env t t' u, eval' env t -> eval' env t' -> deduce t t' = Some u -> eval' env u. Definition clause := list Term'. Definition cnf := list clause. Variable normalise : Term -> cnf. Variable negate : Term -> cnf. Definition tt : cnf := @nil clause. Definition ff : cnf := cons (@nil Term') nil. Fixpoint add_term (t: Term') (cl : clause) : option clause := match cl with | nil => match deduce t t with | None => Some (t ::nil) | Some u => if unsat u then None else Some (t::nil) end | t'::cl => match deduce t t' with | None => match add_term t cl with | None => None | Some cl' => Some (t' :: cl') end | Some u => if unsat u then None else match add_term t cl with | None => None | Some cl' => Some (t' :: cl') end end end. Fixpoint or_clause (cl1 cl2 : clause) : option clause := match cl1 with | nil => Some cl2 | t::cl => match add_term t cl2 with | None => None | Some cl' => or_clause cl cl' end end. (* Definition or_clause_cnf (t:clause) (f:cnf) : cnf := List.map (fun x => (t++x)) f. *) Definition or_clause_cnf (t:clause) (f:cnf) : cnf := List.fold_right (fun e acc => match or_clause t e with | None => acc | Some cl => cl :: acc end) nil f. Fixpoint or_cnf (f : cnf) (f' : cnf) {struct f}: cnf := match f with | nil => tt | e :: rst => (or_cnf rst f') ++ (or_clause_cnf e f') end. Definition and_cnf (f1 : cnf) (f2 : cnf) : cnf := f1 ++ f2. Fixpoint xcnf (pol : bool) (f : BFormula Term) {struct f}: cnf := match f with | TT => if pol then tt else ff | FF => if pol then ff else tt | X p => if pol then ff else ff (* This is not complete - cannot negate any proposition *) | A x => if pol then normalise x else negate x | N e => xcnf (negb pol) e | Cj e1 e2 => (if pol then and_cnf else or_cnf) (xcnf pol e1) (xcnf pol e2) | D e1 e2 => (if pol then or_cnf else and_cnf) (xcnf pol e1) (xcnf pol e2) | I e1 e2 => (if pol then or_cnf else and_cnf) (xcnf (negb pol) e1) (xcnf pol e2) end. Definition eval_clause (env : Env) (cl : clause) := ~ make_conj (eval' env) cl. Definition eval_cnf (env : Env) (f:cnf) := make_conj (eval_clause env) f. Lemma eval_cnf_app : forall env x y, eval_cnf env (x++y) -> eval_cnf env x /\ eval_cnf env y. Proof. unfold eval_cnf. intros. rewrite make_conj_app in H ; auto. Qed. Definition eval_opt_clause (env : Env) (cl: option clause) := match cl with | None => True | Some cl => eval_clause env cl end. Lemma add_term_correct : forall env t cl , eval_opt_clause env (add_term t cl) -> eval_clause env (t::cl). Proof. induction cl. (* BC *) simpl. case_eq (deduce t t) ; auto. intros until 0. case_eq (unsat t0) ; auto. unfold eval_clause. rewrite make_conj_cons. intros. intro. apply unsat_prop with (1:= H) (env := env). apply deduce_prop with (3:= H0) ; tauto. (* IC *) simpl. case_eq (deduce t a). intro u. case_eq (unsat u). simpl. intros. unfold eval_clause. intro. apply unsat_prop with (1:= H) (env:= env). repeat rewrite make_conj_cons in H2. apply deduce_prop with (3:= H0); tauto. intro. case_eq (add_term t cl) ; intros. simpl in H2. rewrite H0 in IHcl. simpl in IHcl. unfold eval_clause in *. intros. repeat rewrite make_conj_cons in *. tauto. rewrite H0 in IHcl ; simpl in *. unfold eval_clause in *. intros. repeat rewrite make_conj_cons in *. tauto. case_eq (add_term t cl) ; intros. simpl in H1. unfold eval_clause in *. repeat rewrite make_conj_cons in *. rewrite H in IHcl. simpl in IHcl. tauto. simpl in *. rewrite H in IHcl. simpl in IHcl. unfold eval_clause in *. repeat rewrite make_conj_cons in *. tauto. Qed. Lemma or_clause_correct : forall cl cl' env, eval_opt_clause env (or_clause cl cl') -> eval_clause env cl \/ eval_clause env cl'. Proof. induction cl. simpl. tauto. intros until 0. simpl. assert (HH := add_term_correct env a cl'). case_eq (add_term a cl'). simpl in *. intros. apply IHcl in H0. rewrite H in HH. simpl in HH. unfold eval_clause in *. destruct H0. repeat rewrite make_conj_cons in *. tauto. apply HH in H0. apply not_make_conj_cons in H0 ; auto. repeat rewrite make_conj_cons in *. tauto. simpl. intros. rewrite H in HH. simpl in HH. unfold eval_clause in *. assert (HH' := HH Coq.Init.Logic.I). apply not_make_conj_cons in HH'; auto. repeat rewrite make_conj_cons in *. tauto. Qed. Lemma or_clause_cnf_correct : forall env t f, eval_cnf env (or_clause_cnf t f) -> (eval_clause env t) \/ (eval_cnf env f). Proof. unfold eval_cnf. unfold or_clause_cnf. intros until t. set (F := (fun (e : clause) (acc : list clause) => match or_clause t e with | Some cl => cl :: acc | None => acc end)). induction f. auto. (**) simpl. intros. destruct f. simpl in H. simpl in IHf. unfold F in H. revert H. intros. apply or_clause_correct. destruct (or_clause t a) ; simpl in * ; auto. unfold F in H at 1. revert H. assert (HH := or_clause_correct t a env). destruct (or_clause t a); simpl in HH ; rewrite make_conj_cons in * ; intuition. rewrite make_conj_cons in *. tauto. Qed. Lemma eval_cnf_cons : forall env a f, (~ make_conj (eval' env) a) -> eval_cnf env f -> eval_cnf env (a::f). Proof. intros. unfold eval_cnf in *. rewrite make_conj_cons ; eauto. Qed. Lemma or_cnf_correct : forall env f f', eval_cnf env (or_cnf f f') -> (eval_cnf env f) \/ (eval_cnf env f'). Proof. induction f. unfold eval_cnf. simpl. tauto. (**) intros. simpl in H. destruct (eval_cnf_app _ _ _ H). clear H. destruct (IHf _ H0). destruct (or_clause_cnf_correct _ _ _ H1). left. apply eval_cnf_cons ; auto. right ; auto. right ; auto. Qed. Variable normalise_correct : forall env t, eval_cnf env (normalise t) -> eval env t. Variable negate_correct : forall env t, eval_cnf env (negate t) -> ~ eval env t. Lemma xcnf_correct : forall f pol env, eval_cnf env (xcnf pol f) -> eval_f (eval env) (if pol then f else N f). Proof. induction f. (* TT *) unfold eval_cnf. simpl. destruct pol ; simpl ; auto. (* FF *) unfold eval_cnf. destruct pol; simpl ; auto. unfold eval_clause ; simpl. tauto. (* P *) simpl. destruct pol ; intros ;simpl. unfold eval_cnf in H. (* Here I have to drop the proposition *) simpl in H. unfold eval_clause in H ; simpl in H. tauto. (* Here, I could store P in the clause *) unfold eval_cnf in H;simpl in H. unfold eval_clause in H ; simpl in H. tauto. (* A *) simpl. destruct pol ; simpl. intros. apply normalise_correct ; auto. (* A 2 *) intros. apply negate_correct ; auto. auto. (* Cj *) destruct pol ; simpl. (* pol = true *) intros. unfold and_cnf in H. destruct (eval_cnf_app _ _ _ H). clear H. split. apply (IHf1 _ _ H0). apply (IHf2 _ _ H1). (* pol = false *) intros. destruct (or_cnf_correct _ _ _ H). generalize (IHf1 false env H0). simpl. tauto. generalize (IHf2 false env H0). simpl. tauto. (* D *) simpl. destruct pol. (* pol = true *) intros. destruct (or_cnf_correct _ _ _ H). generalize (IHf1 _ env H0). simpl. tauto. generalize (IHf2 _ env H0). simpl. tauto. (* pol = true *) unfold and_cnf. intros. destruct (eval_cnf_app _ _ _ H). clear H. simpl. generalize (IHf1 _ _ H0). generalize (IHf2 _ _ H1). simpl. tauto. (**) simpl. destruct pol ; simpl. intros. apply (IHf false) ; auto. intros. generalize (IHf _ _ H). tauto. (* I *) simpl; intros. destruct pol. simpl. intro. destruct (or_cnf_correct _ _ _ H). generalize (IHf1 _ _ H1). simpl in *. tauto. generalize (IHf2 _ _ H1). auto. (* pol = false *) unfold and_cnf in H. simpl in H. destruct (eval_cnf_app _ _ _ H). generalize (IHf1 _ _ H0). generalize (IHf2 _ _ H1). simpl. tauto. Qed. Variable Witness : Type. Variable checker : list Term' -> Witness -> bool. Variable checker_sound : forall t w, checker t w = true -> forall env, make_impl (eval' env) t False. Fixpoint cnf_checker (f : cnf) (l : list Witness) {struct f}: bool := match f with | nil => true | e::f => match l with | nil => false | c::l => match checker e c with | true => cnf_checker f l | _ => false end end end. Lemma cnf_checker_sound : forall t w, cnf_checker t w = true -> forall env, eval_cnf env t. Proof. unfold eval_cnf. induction t. (* bc *) simpl. auto. (* ic *) simpl. destruct w. intros ; discriminate. case_eq (checker a w) ; intros ; try discriminate. generalize (@checker_sound _ _ H env). generalize (IHt _ H0 env) ; intros. destruct t. red ; intro. rewrite <- make_conj_impl in H2. tauto. rewrite <- make_conj_impl in H2. tauto. Qed. Definition tauto_checker (f:BFormula Term) (w:list Witness) : bool := cnf_checker (xcnf true f) w. Lemma tauto_checker_sound : forall t w, tauto_checker t w = true -> forall env, eval_f (eval env) t. Proof. unfold tauto_checker. intros. change (eval_f (eval env) t) with (eval_f (eval env) (if true then t else TT Term)). apply (xcnf_correct t true). eapply cnf_checker_sound ; eauto. Qed. End S. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/ZMicromega.v0000640000175000001440000007424112010532755017507 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* destruct (andb_prop _ _ id); clear id | [ id : (_ || _)%bool = true |- _ ] => destruct (orb_prop _ _ id); clear id end. Ltac inv H := inversion H ; try subst ; clear H. Require Import EnvRing. Open Scope Z_scope. Lemma Zsor : SOR 0 1 Z.add Z.mul Z.sub Z.opp (@eq Z) Z.le Z.lt. Proof. constructor ; intros ; subst ; try (intuition (auto with zarith)). apply Zsth. apply Zth. destruct (Z.lt_trichotomy n m) ; intuition. apply Z.mul_pos_pos ; auto. Qed. Lemma ZSORaddon : SORaddon 0 1 Z.add Z.mul Z.sub Z.opp (@eq Z) Z.le (* ring elements *) 0%Z 1%Z Z.add Z.mul Z.sub Z.opp (* coefficients *) Zeq_bool Z.leb (fun x => x) (fun x => x) (pow_N 1 Z.mul). Proof. constructor. constructor ; intros ; try reflexivity. apply Zeq_bool_eq ; auto. constructor. reflexivity. intros x y. apply Zeq_bool_neq ; auto. apply Zle_bool_imp_le. Qed. Fixpoint Zeval_expr (env : PolEnv Z) (e: PExpr Z) : Z := match e with | PEc c => c | PEX x => env x | PEadd e1 e2 => Zeval_expr env e1 + Zeval_expr env e2 | PEmul e1 e2 => Zeval_expr env e1 * Zeval_expr env e2 | PEpow e1 n => Z.pow (Zeval_expr env e1) (Z.of_N n) | PEsub e1 e2 => (Zeval_expr env e1) - (Zeval_expr env e2) | PEopp e => Z.opp (Zeval_expr env e) end. Definition eval_expr := eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x => x) (fun x => x) (pow_N 1 Z.mul). Lemma ZNpower : forall r n, r ^ Z.of_N n = pow_N 1 Z.mul r n. Proof. destruct n. reflexivity. simpl. unfold Z.pow_pos. replace (pow_pos Z.mul r p) with (1 * (pow_pos Z.mul r p)) by ring. generalize 1. induction p; simpl ; intros ; repeat rewrite IHp ; ring. Qed. Lemma Zeval_expr_compat : forall env e, Zeval_expr env e = eval_expr env e. Proof. induction e ; simpl ; try congruence. reflexivity. rewrite ZNpower. congruence. Qed. Definition Zeval_op2 (o : Op2) : Z -> Z -> Prop := match o with | OpEq => @eq Z | OpNEq => fun x y => ~ x = y | OpLe => Z.le | OpGe => Z.ge | OpLt => Z.lt | OpGt => Z.gt end. Definition Zeval_formula (env : PolEnv Z) (f : Formula Z):= let (lhs, op, rhs) := f in (Zeval_op2 op) (Zeval_expr env lhs) (Zeval_expr env rhs). Definition Zeval_formula' := eval_formula Z.add Z.mul Z.sub Z.opp (@eq Z) Z.le Z.lt (fun x => x) (fun x => x) (pow_N 1 Z.mul). Lemma Zeval_formula_compat : forall env f, Zeval_formula env f <-> Zeval_formula' env f. Proof. destruct f ; simpl. rewrite Zeval_expr_compat. rewrite Zeval_expr_compat. unfold eval_expr. generalize (eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env Flhs). generalize ((eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env Frhs)). destruct Fop ; simpl; intros ; intuition (auto with zarith). Qed. Definition eval_nformula := eval_nformula 0 Z.add Z.mul (@eq Z) Z.le Z.lt (fun x => x) . Definition Zeval_op1 (o : Op1) : Z -> Prop := match o with | Equal => fun x : Z => x = 0 | NonEqual => fun x : Z => x <> 0 | Strict => fun x : Z => 0 < x | NonStrict => fun x : Z => 0 <= x end. Lemma Zeval_nformula_dec : forall env d, (eval_nformula env d) \/ ~ (eval_nformula env d). Proof. intros. apply (eval_nformula_dec Zsor). Qed. Definition ZWitness := Psatz Z. Definition ZWeakChecker := check_normalised_formulas 0 1 Z.add Z.mul Zeq_bool Z.leb. Lemma ZWeakChecker_sound : forall (l : list (NFormula Z)) (cm : ZWitness), ZWeakChecker l cm = true -> forall env, make_impl (eval_nformula env) l False. Proof. intros l cm H. intro. unfold eval_nformula. apply (checker_nf_sound Zsor ZSORaddon l cm). unfold ZWeakChecker in H. exact H. Qed. Definition psub := psub Z0 Z.add Z.sub Z.opp Zeq_bool. Definition padd := padd Z0 Z.add Zeq_bool. Definition norm := norm 0 1 Z.add Z.mul Z.sub Z.opp Zeq_bool. Definition eval_pol := eval_pol Z.add Z.mul (fun x => x). Lemma eval_pol_sub : forall env lhs rhs, eval_pol env (psub lhs rhs) = eval_pol env lhs - eval_pol env rhs. Proof. intros. apply (eval_pol_sub Zsor ZSORaddon). Qed. Lemma eval_pol_add : forall env lhs rhs, eval_pol env (padd lhs rhs) = eval_pol env lhs + eval_pol env rhs. Proof. intros. apply (eval_pol_add Zsor ZSORaddon). Qed. Lemma eval_pol_norm : forall env e, eval_expr env e = eval_pol env (norm e) . Proof. intros. apply (eval_pol_norm Zsor ZSORaddon). Qed. Definition xnormalise (t:Formula Z) : list (NFormula Z) := let (lhs,o,rhs) := t in let lhs := norm lhs in let rhs := norm rhs in match o with | OpEq => ((psub lhs (padd rhs (Pc 1))),NonStrict)::((psub rhs (padd lhs (Pc 1))),NonStrict)::nil | OpNEq => (psub lhs rhs,Equal) :: nil | OpGt => (psub rhs lhs,NonStrict) :: nil | OpLt => (psub lhs rhs,NonStrict) :: nil | OpGe => (psub rhs (padd lhs (Pc 1)),NonStrict) :: nil | OpLe => (psub lhs (padd rhs (Pc 1)),NonStrict) :: nil end. Require Import Tauto BinNums. Definition normalise (t:Formula Z) : cnf (NFormula Z) := List.map (fun x => x::nil) (xnormalise t). Lemma normalise_correct : forall env t, eval_cnf eval_nformula env (normalise t) <-> Zeval_formula env t. Proof. Opaque padd. unfold normalise, xnormalise ; simpl; intros env t. rewrite Zeval_formula_compat. unfold eval_cnf, eval_clause. destruct t as [lhs o rhs]; case_eq o; simpl; repeat rewrite eval_pol_sub; repeat rewrite eval_pol_add; repeat rewrite <- eval_pol_norm ; simpl in *; unfold eval_expr; generalize ( eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env lhs); generalize (eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env rhs) ; intros z1 z2 ; intros ; subst; intuition (auto with zarith). Transparent padd. Qed. Definition xnegate (t:RingMicromega.Formula Z) : list (NFormula Z) := let (lhs,o,rhs) := t in let lhs := norm lhs in let rhs := norm rhs in match o with | OpEq => (psub lhs rhs,Equal) :: nil | OpNEq => ((psub lhs (padd rhs (Pc 1))),NonStrict)::((psub rhs (padd lhs (Pc 1))),NonStrict)::nil | OpGt => (psub lhs (padd rhs (Pc 1)),NonStrict) :: nil | OpLt => (psub rhs (padd lhs (Pc 1)),NonStrict) :: nil | OpGe => (psub lhs rhs,NonStrict) :: nil | OpLe => (psub rhs lhs,NonStrict) :: nil end. Definition negate (t:RingMicromega.Formula Z) : cnf (NFormula Z) := List.map (fun x => x::nil) (xnegate t). Lemma negate_correct : forall env t, eval_cnf eval_nformula env (negate t) <-> ~ Zeval_formula env t. Proof. Proof. Opaque padd. intros env t. rewrite Zeval_formula_compat. unfold negate, xnegate ; simpl. unfold eval_cnf,eval_clause. destruct t as [lhs o rhs]; case_eq o; simpl; repeat rewrite eval_pol_sub; repeat rewrite eval_pol_add; repeat rewrite <- eval_pol_norm ; simpl in *; unfold eval_expr; generalize ( eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env lhs); generalize (eval_pexpr Z.add Z.mul Z.sub Z.opp (fun x : Z => x) (fun x : N => x) (pow_N 1 Z.mul) env rhs) ; intros z1 z2 ; intros ; subst; intuition (auto with zarith). Transparent padd. Qed. Definition Zunsat := check_inconsistent 0 Zeq_bool Z.leb. Definition Zdeduce := nformula_plus_nformula 0 Z.add Zeq_bool. Definition ZweakTautoChecker (w: list ZWitness) (f : BFormula (Formula Z)) : bool := @tauto_checker (Formula Z) (NFormula Z) Zunsat Zdeduce normalise negate ZWitness ZWeakChecker f w. (* To get a complete checker, the proof format has to be enriched *) Require Import Zdiv. Open Scope Z_scope. Definition ceiling (a b:Z) : Z := let (q,r) := Z.div_eucl a b in match r with | Z0 => q | _ => q + 1 end. Require Import Znumtheory. Lemma Zdivide_ceiling : forall a b, (b | a) -> ceiling a b = Z.div a b. Proof. unfold ceiling. intros. apply Zdivide_mod in H. case_eq (Z.div_eucl a b). intros. change z with (fst (z,z0)). rewrite <- H0. change (fst (Z.div_eucl a b)) with (Z.div a b). change z0 with (snd (z,z0)). rewrite <- H0. change (snd (Z.div_eucl a b)) with (Z.modulo a b). rewrite H. reflexivity. Qed. Lemma narrow_interval_lower_bound a b x : a > 0 -> a * x >= b -> x >= ceiling b a. Proof. rewrite !Z.ge_le_iff. unfold ceiling. intros Ha H. generalize (Z_div_mod b a Ha). destruct (Z.div_eucl b a) as (q,r). intros (->,(H1,H2)). destruct r as [|r|r]. - rewrite Z.add_0_r in H. apply Z.mul_le_mono_pos_l in H; auto with zarith. - assert (0 < Z.pos r) by easy. rewrite Z.add_1_r, Z.le_succ_l. apply Z.mul_lt_mono_pos_l with a; auto with zarith. - now elim H1. Qed. (** NB: narrow_interval_upper_bound is Zdiv.Zdiv_le_lower_bound *) Require Import QArith. Inductive ZArithProof : Type := | DoneProof | RatProof : ZWitness -> ZArithProof -> ZArithProof | CutProof : ZWitness -> ZArithProof -> ZArithProof | EnumProof : ZWitness -> ZWitness -> list ZArithProof -> ZArithProof (*| SplitProof : PolC Z -> ZArithProof -> ZArithProof -> ZArithProof*). (* n/d <= x -> d*x - n >= 0 *) (* In order to compute the 'cut', we need to express a polynomial P as a * Q + b. - b is the constant - a is the gcd of the other coefficient. *) Require Import Znumtheory. Definition isZ0 (x:Z) := match x with | Z0 => true | _ => false end. Lemma isZ0_0 : forall x, isZ0 x = true <-> x = 0. Proof. destruct x ; simpl ; intuition congruence. Qed. Lemma isZ0_n0 : forall x, isZ0 x = false <-> x <> 0. Proof. destruct x ; simpl ; intuition congruence. Qed. Definition ZgcdM (x y : Z) := Z.max (Z.gcd x y) 1. Fixpoint Zgcd_pol (p : PolC Z) : (Z * Z) := match p with | Pc c => (0,c) | Pinj _ p => Zgcd_pol p | PX p _ q => let (g1,c1) := Zgcd_pol p in let (g2,c2) := Zgcd_pol q in (ZgcdM (ZgcdM g1 c1) g2 , c2) end. (*Eval compute in (Zgcd_pol ((PX (Pc (-2)) 1 (Pc 4)))).*) Fixpoint Zdiv_pol (p:PolC Z) (x:Z) : PolC Z := match p with | Pc c => Pc (Z.div c x) | Pinj j p => Pinj j (Zdiv_pol p x) | PX p j q => PX (Zdiv_pol p x) j (Zdiv_pol q x) end. Inductive Zdivide_pol (x:Z): PolC Z -> Prop := | Zdiv_Pc : forall c, (x | c) -> Zdivide_pol x (Pc c) | Zdiv_Pinj : forall p, Zdivide_pol x p -> forall j, Zdivide_pol x (Pinj j p) | Zdiv_PX : forall p q, Zdivide_pol x p -> Zdivide_pol x q -> forall j, Zdivide_pol x (PX p j q). Lemma Zdiv_pol_correct : forall a p, 0 < a -> Zdivide_pol a p -> forall env, eval_pol env p = a * eval_pol env (Zdiv_pol p a). Proof. intros until 2. induction H0. (* Pc *) simpl. intros. apply Zdivide_Zdiv_eq ; auto. (* Pinj *) simpl. intros. apply IHZdivide_pol. (* PX *) simpl. intros. rewrite IHZdivide_pol1. rewrite IHZdivide_pol2. ring. Qed. Lemma Zgcd_pol_ge : forall p, fst (Zgcd_pol p) >= 0. Proof. induction p. simpl. auto with zarith. simpl. auto. simpl. case_eq (Zgcd_pol p1). case_eq (Zgcd_pol p3). intros. simpl. unfold ZgcdM. generalize (Z.gcd_nonneg z1 z2). generalize (Zmax_spec (Z.gcd z1 z2) 1). generalize (Z.gcd_nonneg (Z.max (Z.gcd z1 z2) 1) z). generalize (Zmax_spec (Z.gcd (Z.max (Z.gcd z1 z2) 1) z) 1). auto with zarith. Qed. Lemma Zdivide_pol_Zdivide : forall p x y, Zdivide_pol x p -> (y | x) -> Zdivide_pol y p. Proof. intros. induction H. constructor. apply Z.divide_trans with (1:= H0) ; assumption. constructor. auto. constructor ; auto. Qed. Lemma Zdivide_pol_one : forall p, Zdivide_pol 1 p. Proof. induction p ; constructor ; auto. exists c. ring. Qed. Lemma Zgcd_minus : forall a b c, (a | c - b ) -> (Z.gcd a b | c). Proof. intros a b c (q,Hq). destruct (Zgcd_is_gcd a b) as [(a',Ha) (b',Hb) _]. set (g:=Z.gcd a b) in *; clearbody g. exists (q * a' + b'). symmetry in Hq. rewrite <- Z.add_move_r in Hq. rewrite <- Hq, Hb, Ha. ring. Qed. Lemma Zdivide_pol_sub : forall p a b, 0 < Z.gcd a b -> Zdivide_pol a (PsubC Z.sub p b) -> Zdivide_pol (Z.gcd a b) p. Proof. induction p. simpl. intros. inversion H0. constructor. apply Zgcd_minus ; auto. intros. constructor. simpl in H0. inversion H0 ; subst; clear H0. apply IHp ; auto. simpl. intros. inv H0. constructor. apply Zdivide_pol_Zdivide with (1:= H3). destruct (Zgcd_is_gcd a b) ; assumption. apply IHp2 ; assumption. Qed. Lemma Zdivide_pol_sub_0 : forall p a, Zdivide_pol a (PsubC Z.sub p 0) -> Zdivide_pol a p. Proof. induction p. simpl. intros. inversion H. constructor. replace (c - 0) with c in H1 ; auto with zarith. intros. constructor. simpl in H. inversion H ; subst; clear H. apply IHp ; auto. simpl. intros. inv H. constructor. auto. apply IHp2 ; assumption. Qed. Lemma Zgcd_pol_div : forall p g c, Zgcd_pol p = (g, c) -> Zdivide_pol g (PsubC Z.sub p c). Proof. induction p ; simpl. (* Pc *) intros. inv H. constructor. exists 0. now ring. (* Pinj *) intros. constructor. apply IHp ; auto. (* PX *) intros g c. case_eq (Zgcd_pol p1) ; case_eq (Zgcd_pol p3) ; intros. inv H1. unfold ZgcdM at 1. destruct (Zmax_spec (Z.gcd (ZgcdM z1 z2) z) 1) as [HH1 | HH1]; destruct HH1 as [HH1 HH1'] ; rewrite HH1'. constructor. apply Zdivide_pol_Zdivide with (x:= ZgcdM z1 z2). unfold ZgcdM. destruct (Zmax_spec (Z.gcd z1 z2) 1) as [HH2 | HH2]. destruct HH2. rewrite H2. apply Zdivide_pol_sub ; auto. auto with zarith. destruct HH2. rewrite H2. apply Zdivide_pol_one. unfold ZgcdM in HH1. unfold ZgcdM. destruct (Zmax_spec (Z.gcd z1 z2) 1) as [HH2 | HH2]. destruct HH2. rewrite H2 in *. destruct (Zgcd_is_gcd (Z.gcd z1 z2) z); auto. destruct HH2. rewrite H2. destruct (Zgcd_is_gcd 1 z); auto. apply Zdivide_pol_Zdivide with (x:= z). apply (IHp2 _ _ H); auto. destruct (Zgcd_is_gcd (ZgcdM z1 z2) z); auto. constructor. apply Zdivide_pol_one. apply Zdivide_pol_one. Qed. Lemma Zgcd_pol_correct_lt : forall p env g c, Zgcd_pol p = (g,c) -> 0 < g -> eval_pol env p = g * (eval_pol env (Zdiv_pol (PsubC Z.sub p c) g)) + c. Proof. intros. rewrite <- Zdiv_pol_correct ; auto. rewrite (RingMicromega.PsubC_ok Zsor ZSORaddon). unfold eval_pol. ring. (**) apply Zgcd_pol_div ; auto. Qed. Definition makeCuttingPlane (p : PolC Z) : PolC Z * Z := let (g,c) := Zgcd_pol p in if Z.gtb g Z0 then (Zdiv_pol (PsubC Z.sub p c) g , Z.opp (ceiling (Z.opp c) g)) else (p,Z0). Definition genCuttingPlane (f : NFormula Z) : option (PolC Z * Z * Op1) := let (e,op) := f in match op with | Equal => let (g,c) := Zgcd_pol e in if andb (Z.gtb g Z0) (andb (negb (Zeq_bool c Z0)) (negb (Zeq_bool (Z.gcd g c) g))) then None (* inconsistent *) else (* Could be optimised Zgcd_pol is recomputed *) let (p,c) := makeCuttingPlane e in Some (p,c,Equal) | NonEqual => Some (e,Z0,op) | Strict => let (p,c) := makeCuttingPlane (PsubC Z.sub e 1) in Some (p,c,NonStrict) | NonStrict => let (p,c) := makeCuttingPlane e in Some (p,c,NonStrict) end. Definition nformula_of_cutting_plane (t : PolC Z * Z * Op1) : NFormula Z := let (e_z, o) := t in let (e,z) := e_z in (padd e (Pc z) , o). Definition is_pol_Z0 (p : PolC Z) : bool := match p with | Pc Z0 => true | _ => false end. Lemma is_pol_Z0_eval_pol : forall p, is_pol_Z0 p = true -> forall env, eval_pol env p = 0. Proof. unfold is_pol_Z0. destruct p ; try discriminate. destruct z ; try discriminate. reflexivity. Qed. Definition eval_Psatz : list (NFormula Z) -> ZWitness -> option (NFormula Z) := eval_Psatz 0 1 Z.add Z.mul Zeq_bool Z.leb. Definition valid_cut_sign (op:Op1) := match op with | Equal => true | NonStrict => true | _ => false end. Fixpoint ZChecker (l:list (NFormula Z)) (pf : ZArithProof) {struct pf} : bool := match pf with | DoneProof => false | RatProof w pf => match eval_Psatz l w with | None => false | Some f => if Zunsat f then true else ZChecker (f::l) pf end | CutProof w pf => match eval_Psatz l w with | None => false | Some f => match genCuttingPlane f with | None => true | Some cp => ZChecker (nformula_of_cutting_plane cp::l) pf end end | EnumProof w1 w2 pf => match eval_Psatz l w1 , eval_Psatz l w2 with | Some f1 , Some f2 => match genCuttingPlane f1 , genCuttingPlane f2 with |Some (e1,z1,op1) , Some (e2,z2,op2) => if (valid_cut_sign op1 && valid_cut_sign op2 && is_pol_Z0 (padd e1 e2)) then (fix label (pfs:list ZArithProof) := fun lb ub => match pfs with | nil => if Z.gtb lb ub then true else false | pf::rsr => andb (ZChecker ((psub e1 (Pc lb), Equal) :: l) pf) (label rsr (Z.add lb 1%Z) ub) end) pf (Z.opp z1) z2 else false | _ , _ => true end | _ , _ => false end end. Fixpoint bdepth (pf : ZArithProof) : nat := match pf with | DoneProof => O | RatProof _ p => S (bdepth p) | CutProof _ p => S (bdepth p) | EnumProof _ _ l => S (List.fold_right (fun pf x => Max.max (bdepth pf) x) O l) end. Require Import Wf_nat. Lemma in_bdepth : forall l a b y, In y l -> ltof ZArithProof bdepth y (EnumProof a b l). Proof. induction l. (* nil *) simpl. tauto. (* cons *) simpl. intros. destruct H. subst. unfold ltof. simpl. generalize ( (fold_right (fun (pf : ZArithProof) (x : nat) => Max.max (bdepth pf) x) 0%nat l)). intros. generalize (bdepth y) ; intros. generalize (Max.max_l n0 n) (Max.max_r n0 n). auto with zarith. generalize (IHl a0 b y H). unfold ltof. simpl. generalize ( (fold_right (fun (pf : ZArithProof) (x : nat) => Max.max (bdepth pf) x) 0%nat l)). intros. generalize (Max.max_l (bdepth a) n) (Max.max_r (bdepth a) n). auto with zarith. Qed. Lemma eval_Psatz_sound : forall env w l f', make_conj (eval_nformula env) l -> eval_Psatz l w = Some f' -> eval_nformula env f'. Proof. intros. apply (eval_Psatz_Sound Zsor ZSORaddon) with (l:=l) (e:= w) ; auto. apply make_conj_in ; auto. Qed. Lemma makeCuttingPlane_ns_sound : forall env e e' c, eval_nformula env (e, NonStrict) -> makeCuttingPlane e = (e',c) -> eval_nformula env (nformula_of_cutting_plane (e', c, NonStrict)). Proof. unfold nformula_of_cutting_plane. unfold eval_nformula. unfold RingMicromega.eval_nformula. unfold eval_op1. intros. rewrite (RingMicromega.eval_pol_add Zsor ZSORaddon). simpl. (**) unfold makeCuttingPlane in H0. revert H0. case_eq (Zgcd_pol e) ; intros g c0. generalize (Zgt_cases g 0) ; destruct (Z.gtb g 0). intros. inv H2. change (RingMicromega.eval_pol Z.add Z.mul (fun x : Z => x)) with eval_pol in *. apply Zgcd_pol_correct_lt with (env:=env) in H1. generalize (narrow_interval_lower_bound g (- c0) (eval_pol env (Zdiv_pol (PsubC Z.sub e c0) g)) H0). auto with zarith. auto with zarith. (* g <= 0 *) intros. inv H2. auto with zarith. Qed. Lemma cutting_plane_sound : forall env f p, eval_nformula env f -> genCuttingPlane f = Some p -> eval_nformula env (nformula_of_cutting_plane p). Proof. unfold genCuttingPlane. destruct f as [e op]. destruct op. (* Equal *) destruct p as [[e' z] op]. case_eq (Zgcd_pol e) ; intros g c. case_eq (Z.gtb g 0 && (negb (Zeq_bool c 0) && negb (Zeq_bool (Z.gcd g c) g))) ; [discriminate|]. case_eq (makeCuttingPlane e). intros. inv H3. unfold makeCuttingPlane in H. rewrite H1 in H. revert H. change (eval_pol env e = 0) in H2. case_eq (Z.gtb g 0). intros. rewrite <- Zgt_is_gt_bool in H. rewrite Zgcd_pol_correct_lt with (1:= H1) in H2; auto with zarith. unfold nformula_of_cutting_plane. change (eval_pol env (padd e' (Pc z)) = 0). inv H3. rewrite eval_pol_add. set (x:=eval_pol env (Zdiv_pol (PsubC Z.sub e c) g)) in *; clearbody x. simpl. rewrite andb_false_iff in H0. destruct H0. rewrite Zgt_is_gt_bool in H ; congruence. rewrite andb_false_iff in H0. destruct H0. rewrite negb_false_iff in H0. apply Zeq_bool_eq in H0. subst. simpl. rewrite Z.add_0_r, Z.mul_eq_0 in H2. intuition auto with zarith. rewrite negb_false_iff in H0. apply Zeq_bool_eq in H0. assert (HH := Zgcd_is_gcd g c). rewrite H0 in HH. inv HH. apply Zdivide_opp_r in H4. rewrite Zdivide_ceiling ; auto. apply Z.sub_move_0_r. apply Z.div_unique_exact ; auto with zarith. intros. unfold nformula_of_cutting_plane. inv H3. change (eval_pol env (padd e' (Pc 0)) = 0). rewrite eval_pol_add. simpl. auto with zarith. (* NonEqual *) intros. inv H0. unfold eval_nformula in *. unfold RingMicromega.eval_nformula in *. unfold nformula_of_cutting_plane. unfold eval_op1 in *. rewrite (RingMicromega.eval_pol_add Zsor ZSORaddon). simpl. auto with zarith. (* Strict *) destruct p as [[e' z] op]. case_eq (makeCuttingPlane (PsubC Z.sub e 1)). intros. inv H1. apply makeCuttingPlane_ns_sound with (env:=env) (2:= H). simpl in *. rewrite (RingMicromega.PsubC_ok Zsor ZSORaddon). auto with zarith. (* NonStrict *) destruct p as [[e' z] op]. case_eq (makeCuttingPlane e). intros. inv H1. apply makeCuttingPlane_ns_sound with (env:=env) (2:= H). assumption. Qed. Lemma genCuttingPlaneNone : forall env f, genCuttingPlane f = None -> eval_nformula env f -> False. Proof. unfold genCuttingPlane. destruct f. destruct o. case_eq (Zgcd_pol p) ; intros g c. case_eq (Z.gtb g 0 && (negb (Zeq_bool c 0) && negb (Zeq_bool (Z.gcd g c) g))). intros. flatten_bool. rewrite negb_true_iff in H5. apply Zeq_bool_neq in H5. rewrite <- Zgt_is_gt_bool in H3. rewrite negb_true_iff in H. apply Zeq_bool_neq in H. change (eval_pol env p = 0) in H2. rewrite Zgcd_pol_correct_lt with (1:= H0) in H2; auto with zarith. set (x:=eval_pol env (Zdiv_pol (PsubC Z.sub p c) g)) in *; clearbody x. contradict H5. apply Zis_gcd_gcd; auto with zarith. constructor; auto with zarith. exists (-x). rewrite Z.mul_opp_l, Z.mul_comm; auto with zarith. (**) destruct (makeCuttingPlane p); discriminate. discriminate. destruct (makeCuttingPlane (PsubC Z.sub p 1)) ; discriminate. destruct (makeCuttingPlane p) ; discriminate. Qed. Lemma ZChecker_sound : forall w l, ZChecker l w = true -> forall env, make_impl (eval_nformula env) l False. Proof. induction w using (well_founded_ind (well_founded_ltof _ bdepth)). destruct w as [ | w pf | w pf | w1 w2 pf]. (* DoneProof *) simpl. discriminate. (* RatProof *) simpl. intro l. case_eq (eval_Psatz l w) ; [| discriminate]. intros f Hf. case_eq (Zunsat f). intros. apply (checker_nf_sound Zsor ZSORaddon l w). unfold check_normalised_formulas. unfold eval_Psatz in Hf. rewrite Hf. unfold Zunsat in H0. assumption. intros. assert (make_impl (eval_nformula env) (f::l) False). apply H with (2:= H1). unfold ltof. simpl. auto with arith. destruct f. rewrite <- make_conj_impl in H2. rewrite make_conj_cons in H2. rewrite <- make_conj_impl. intro. apply H2. split ; auto. apply eval_Psatz_sound with (2:= Hf) ; assumption. (* CutProof *) simpl. intro l. case_eq (eval_Psatz l w) ; [ | discriminate]. intros f' Hlc. case_eq (genCuttingPlane f'). intros. assert (make_impl (eval_nformula env) (nformula_of_cutting_plane p::l) False). eapply (H pf) ; auto. unfold ltof. simpl. auto with arith. rewrite <- make_conj_impl in H2. rewrite make_conj_cons in H2. rewrite <- make_conj_impl. intro. apply H2. split ; auto. apply eval_Psatz_sound with (env:=env) in Hlc. apply cutting_plane_sound with (1:= Hlc) (2:= H0). auto. (* genCuttingPlane = None *) intros. rewrite <- make_conj_impl. intros. apply eval_Psatz_sound with (2:= Hlc) in H2. apply genCuttingPlaneNone with (2:= H2) ; auto. (* EnumProof *) intro. simpl. case_eq (eval_Psatz l w1) ; [ | discriminate]. case_eq (eval_Psatz l w2) ; [ | discriminate]. intros f1 Hf1 f2 Hf2. case_eq (genCuttingPlane f2). destruct p as [ [p1 z1] op1]. case_eq (genCuttingPlane f1). destruct p as [ [p2 z2] op2]. case_eq (valid_cut_sign op1 && valid_cut_sign op2 && is_pol_Z0 (padd p1 p2)). intros Hcond. flatten_bool. rename H1 into HZ0. rename H2 into Hop1. rename H3 into Hop2. intros HCutL HCutR Hfix env. (* get the bounds of the enum *) rewrite <- make_conj_impl. intro. assert (-z1 <= eval_pol env p1 <= z2). split. apply eval_Psatz_sound with (env:=env) in Hf2 ; auto. apply cutting_plane_sound with (1:= Hf2) in HCutR. unfold nformula_of_cutting_plane in HCutR. unfold eval_nformula in HCutR. unfold RingMicromega.eval_nformula in HCutR. change (RingMicromega.eval_pol Z.add Z.mul (fun x : Z => x)) with eval_pol in HCutR. unfold eval_op1 in HCutR. destruct op1 ; simpl in Hop1 ; try discriminate; rewrite eval_pol_add in HCutR; simpl in HCutR; auto with zarith. (**) apply is_pol_Z0_eval_pol with (env := env) in HZ0. rewrite eval_pol_add in HZ0. replace (eval_pol env p1) with (- eval_pol env p2) by omega. apply eval_Psatz_sound with (env:=env) in Hf1 ; auto. apply cutting_plane_sound with (1:= Hf1) in HCutL. unfold nformula_of_cutting_plane in HCutL. unfold eval_nformula in HCutL. unfold RingMicromega.eval_nformula in HCutL. change (RingMicromega.eval_pol Z.add Z.mul (fun x : Z => x)) with eval_pol in HCutL. unfold eval_op1 in HCutL. rewrite eval_pol_add in HCutL. simpl in HCutL. destruct op2 ; simpl in Hop2 ; try discriminate ; omega. revert Hfix. match goal with | |- context[?F pf (-z1) z2 = true] => set (FF := F) end. intros. assert (HH :forall x, -z1 <= x <= z2 -> exists pr, (In pr pf /\ ZChecker ((PsubC Z.sub p1 x,Equal) :: l) pr = true)%Z). clear HZ0 Hop1 Hop2 HCutL HCutR H0 H1. revert Hfix. generalize (-z1). clear z1. intro z1. revert z1 z2. induction pf;simpl ;intros. generalize (Zgt_cases z1 z2). destruct (Z.gtb z1 z2). intros. apply False_ind ; omega. discriminate. flatten_bool. assert (HH:(x = z1 \/ z1 +1 <=x)%Z) by omega. destruct HH. subst. exists a ; auto. assert (z1 + 1 <= x <= z2)%Z by omega. elim IHpf with (2:=H2) (3:= H4). destruct H4. intros. exists x0 ; split;tauto. intros until 1. apply H ; auto. unfold ltof in *. simpl in *. zify. omega. (*/asser *) destruct (HH _ H1) as [pr [Hin Hcheker]]. assert (make_impl (eval_nformula env) ((PsubC Z.sub p1 (eval_pol env p1),Equal) :: l) False). apply (H pr);auto. apply in_bdepth ; auto. rewrite <- make_conj_impl in H2. apply H2. rewrite make_conj_cons. split ;auto. unfold eval_nformula. unfold RingMicromega.eval_nformula. simpl. rewrite (RingMicromega.PsubC_ok Zsor ZSORaddon). unfold eval_pol. ring. discriminate. (* No cutting plane *) intros. rewrite <- make_conj_impl. intros. apply eval_Psatz_sound with (2:= Hf1) in H3. apply genCuttingPlaneNone with (2:= H3) ; auto. (* No Cutting plane (bis) *) intros. rewrite <- make_conj_impl. intros. apply eval_Psatz_sound with (2:= Hf2) in H2. apply genCuttingPlaneNone with (2:= H2) ; auto. Qed. Definition ZTautoChecker (f : BFormula (Formula Z)) (w: list ZArithProof): bool := @tauto_checker (Formula Z) (NFormula Z) Zunsat Zdeduce normalise negate ZArithProof ZChecker f w. Lemma ZTautoChecker_sound : forall f w, ZTautoChecker f w = true -> forall env, eval_f (Zeval_formula env) f. Proof. intros f w. unfold ZTautoChecker. apply (tauto_checker_sound Zeval_formula eval_nformula). apply Zeval_nformula_dec. intros until env. unfold eval_nformula. unfold RingMicromega.eval_nformula. destruct t. apply (check_inconsistent_sound Zsor ZSORaddon) ; auto. unfold Zdeduce. apply (nformula_plus_nformula_correct Zsor ZSORaddon). intros env t. rewrite normalise_correct ; auto. intros env t. rewrite negate_correct ; auto. intros t w0. apply ZChecker_sound. Qed. Fixpoint xhyps_of_pt (base:nat) (acc : list nat) (pt:ZArithProof) : list nat := match pt with | DoneProof => acc | RatProof c pt => xhyps_of_pt (S base ) (xhyps_of_psatz base acc c) pt | CutProof c pt => xhyps_of_pt (S base ) (xhyps_of_psatz base acc c) pt | EnumProof c1 c2 l => let acc := xhyps_of_psatz base (xhyps_of_psatz base acc c2) c1 in List.fold_left (xhyps_of_pt (S base)) l acc end. Definition hyps_of_pt (pt : ZArithProof) : list nat := xhyps_of_pt 0 nil pt. (*Lemma hyps_of_pt_correct : forall pt l, *) Open Scope Z_scope. (** To ease bindings from ml code **) (*Definition varmap := Quote.varmap.*) Definition make_impl := Refl.make_impl. Definition make_conj := Refl.make_conj. Require VarMap. (*Definition varmap_type := VarMap.t Z. *) Definition env := PolEnv Z. Definition node := @VarMap.Node Z. Definition empty := @VarMap.Empty Z. Definition leaf := @VarMap.Leaf Z. Definition coneMember := ZWitness. Definition eval := eval_formula. Definition prod_pos_nat := prod positive nat. Notation n_of_Z := Z.to_N (only parsing). (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/MExtraction.v0000640000175000001440000000441412010532755017702 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* "( * )" [ "(,)" ]. Extract Inductive list => list [ "[]" "(::)" ]. Extract Inductive bool => bool [ true false ]. Extract Inductive sumbool => bool [ true false ]. Extract Inductive option => option [ Some None ]. Extract Inductive sumor => option [ Some None ]. (** Then, in a ternary alternative { }+{ }+{ }, - leftmost choice (Inleft Left) is (Some true), - middle choice (Inleft Right) is (Some false), - rightmost choice (Inright) is (None) *) (** To preserve its laziness, andb is normally expansed. Let's rather use the ocaml && *) Extract Inlined Constant andb => "(&&)". Require Import Reals. Extract Constant R => "int". Extract Constant R0 => "0". Extract Constant R1 => "1". Extract Constant Rplus => "( + )". Extract Constant Rmult => "( * )". Extract Constant Ropp => "fun x -> - x". Extract Constant Rinv => "fun x -> 1 / x". Extraction "micromega.ml" List.map simpl_cone (*map_cone indexes*) denorm Qpower n_of_Z N.of_nat ZTautoChecker ZWeakChecker QTautoChecker RTautoChecker find. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/sos_lib.ml0000640000175000001440000005347711254456226017266 0ustar notinusers(* ========================================================================= *) (* - This code originates from John Harrison's HOL LIGHT 2.30 *) (* (see file LICENSE.sos for license, copyright and disclaimer) *) (* This code is the HOL LIGHT library code used by sos.ml *) (* - Laurent Thry (thery@sophia.inria.fr) has isolated the HOL *) (* independent bits *) (* - Frdric Besson (fbesson@irisa.fr) is using it to feed micromega *) (* ========================================================================= *) open Sos_types open Num open List let debugging = ref false;; (* ------------------------------------------------------------------------- *) (* Comparisons that are reflexive on NaN and also short-circuiting. *) (* ------------------------------------------------------------------------- *) let (=?) = fun x y -> Pervasives.compare x y = 0;; let ( Pervasives.compare x y < 0;; let (<=?) = fun x y -> Pervasives.compare x y <= 0;; let (>?) = fun x y -> Pervasives.compare x y > 0;; let (>=?) = fun x y -> Pervasives.compare x y >= 0;; (* ------------------------------------------------------------------------- *) (* Combinators. *) (* ------------------------------------------------------------------------- *) let (o) = fun f g x -> f(g x);; (* ------------------------------------------------------------------------- *) (* Some useful functions on "num" type. *) (* ------------------------------------------------------------------------- *) let num_0 = Int 0 and num_1 = Int 1 and num_2 = Int 2 and num_10 = Int 10;; let pow2 n = power_num num_2 (Int n);; let pow10 n = power_num num_10 (Int n);; let numdom r = let r' = Ratio.normalize_ratio (ratio_of_num r) in num_of_big_int(Ratio.numerator_ratio r'), num_of_big_int(Ratio.denominator_ratio r');; let numerator = (o) fst numdom and denominator = (o) snd numdom;; let gcd_num n1 n2 = num_of_big_int(Big_int.gcd_big_int (big_int_of_num n1) (big_int_of_num n2));; let lcm_num x y = if x =/ num_0 & y =/ num_0 then num_0 else abs_num((x */ y) // gcd_num x y);; (* ------------------------------------------------------------------------- *) (* List basics. *) (* ------------------------------------------------------------------------- *) let rec el n l = if n = 0 then hd l else el (n - 1) (tl l);; (* ------------------------------------------------------------------------- *) (* Various versions of list iteration. *) (* ------------------------------------------------------------------------- *) let rec itlist f l b = match l with [] -> b | (h::t) -> f h (itlist f t b);; let rec end_itlist f l = match l with [] -> failwith "end_itlist" | [x] -> x | (h::t) -> f h (end_itlist f t);; let rec itlist2 f l1 l2 b = match (l1,l2) with ([],[]) -> b | (h1::t1,h2::t2) -> f h1 h2 (itlist2 f t1 t2 b) | _ -> failwith "itlist2";; (* ------------------------------------------------------------------------- *) (* All pairs arising from applying a function over two lists. *) (* ------------------------------------------------------------------------- *) let rec allpairs f l1 l2 = match l1 with h1::t1 -> itlist (fun x a -> f h1 x :: a) l2 (allpairs f t1 l2) | [] -> [];; (* ------------------------------------------------------------------------- *) (* String operations (surely there is a better way...) *) (* ------------------------------------------------------------------------- *) let implode l = itlist (^) l "";; let explode s = let rec exap n l = if n < 0 then l else exap (n - 1) ((String.sub s n 1)::l) in exap (String.length s - 1) [];; (* ------------------------------------------------------------------------- *) (* Attempting function or predicate applications. *) (* ------------------------------------------------------------------------- *) let can f x = try (f x; true) with Failure _ -> false;; (* ------------------------------------------------------------------------- *) (* Repetition of a function. *) (* ------------------------------------------------------------------------- *) let rec funpow n f x = if n < 1 then x else funpow (n-1) f (f x);; (* ------------------------------------------------------------------------- *) (* Replication and sequences. *) (* ------------------------------------------------------------------------- *) let rec replicate x n = if n < 1 then [] else x::(replicate x (n - 1));; let rec (--) = fun m n -> if m > n then [] else m::((m + 1) -- n);; (* ------------------------------------------------------------------------- *) (* Various useful list operations. *) (* ------------------------------------------------------------------------- *) let rec forall p l = match l with [] -> true | h::t -> p(h) & forall p t;; let rec tryfind f l = match l with [] -> failwith "tryfind" | (h::t) -> try f h with Failure _ -> tryfind f t;; let index x = let rec ind n l = match l with [] -> failwith "index" | (h::t) -> if x =? h then n else ind (n + 1) t in ind 0;; (* ------------------------------------------------------------------------- *) (* "Set" operations on lists. *) (* ------------------------------------------------------------------------- *) let rec mem x lis = match lis with [] -> false | (h::t) -> x =? h or mem x t;; let insert x l = if mem x l then l else x::l;; let union l1 l2 = itlist insert l1 l2;; let subtract l1 l2 = filter (fun x -> not (mem x l2)) l1;; (* ------------------------------------------------------------------------- *) (* Merging and bottom-up mergesort. *) (* ------------------------------------------------------------------------- *) let rec merge ord l1 l2 = match l1 with [] -> l2 | h1::t1 -> match l2 with [] -> l1 | h2::t2 -> if ord h1 h2 then h1::(merge ord t1 l2) else h2::(merge ord l1 t2);; (* ------------------------------------------------------------------------- *) (* Common measure predicates to use with "sort". *) (* ------------------------------------------------------------------------- *) let increasing f x y = f x ? f y;; (* ------------------------------------------------------------------------- *) (* Zipping, unzipping etc. *) (* ------------------------------------------------------------------------- *) let rec zip l1 l2 = match (l1,l2) with ([],[]) -> [] | (h1::t1,h2::t2) -> (h1,h2)::(zip t1 t2) | _ -> failwith "zip";; let rec unzip = function [] -> [],[] | ((a,b)::rest) -> let alist,blist = unzip rest in (a::alist,b::blist);; (* ------------------------------------------------------------------------- *) (* Iterating functions over lists. *) (* ------------------------------------------------------------------------- *) let rec do_list f l = match l with [] -> () | (h::t) -> (f h; do_list f t);; (* ------------------------------------------------------------------------- *) (* Sorting. *) (* ------------------------------------------------------------------------- *) let rec sort cmp lis = match lis with [] -> [] | piv::rest -> let r,l = partition (cmp piv) rest in (sort cmp l) @ (piv::(sort cmp r));; (* ------------------------------------------------------------------------- *) (* Removing adjacent (NB!) equal elements from list. *) (* ------------------------------------------------------------------------- *) let rec uniq l = match l with x::(y::_ as t) -> let t' = uniq t in if x =? y then t' else if t'==t then l else x::t' | _ -> l;; (* ------------------------------------------------------------------------- *) (* Convert list into set by eliminating duplicates. *) (* ------------------------------------------------------------------------- *) let setify s = uniq (sort (<=?) s);; (* ------------------------------------------------------------------------- *) (* Polymorphic finite partial functions via Patricia trees. *) (* *) (* The point of this strange representation is that it is canonical (equal *) (* functions have the same encoding) yet reasonably efficient on average. *) (* *) (* Idea due to Diego Olivier Fernandez Pons (OCaml list, 2003/11/10). *) (* ------------------------------------------------------------------------- *) type ('a,'b)func = Empty | Leaf of int * ('a*'b)list | Branch of int * int * ('a,'b)func * ('a,'b)func;; (* ------------------------------------------------------------------------- *) (* Undefined function. *) (* ------------------------------------------------------------------------- *) let undefined = Empty;; (* ------------------------------------------------------------------------- *) (* In case of equality comparison worries, better use this. *) (* ------------------------------------------------------------------------- *) let is_undefined f = match f with Empty -> true | _ -> false;; (* ------------------------------------------------------------------------- *) (* Operation analagous to "map" for lists. *) (* ------------------------------------------------------------------------- *) let mapf = let rec map_list f l = match l with [] -> [] | (x,y)::t -> (x,f(y))::(map_list f t) in let rec mapf f t = match t with Empty -> Empty | Leaf(h,l) -> Leaf(h,map_list f l) | Branch(p,b,l,r) -> Branch(p,b,mapf f l,mapf f r) in mapf;; (* ------------------------------------------------------------------------- *) (* Operations analogous to "fold" for lists. *) (* ------------------------------------------------------------------------- *) let foldl = let rec foldl_list f a l = match l with [] -> a | (x,y)::t -> foldl_list f (f a x y) t in let rec foldl f a t = match t with Empty -> a | Leaf(h,l) -> foldl_list f a l | Branch(p,b,l,r) -> foldl f (foldl f a l) r in foldl;; let foldr = let rec foldr_list f l a = match l with [] -> a | (x,y)::t -> f x y (foldr_list f t a) in let rec foldr f t a = match t with Empty -> a | Leaf(h,l) -> foldr_list f l a | Branch(p,b,l,r) -> foldr f l (foldr f r a) in foldr;; (* ------------------------------------------------------------------------- *) (* Redefinition and combination. *) (* ------------------------------------------------------------------------- *) let (|->),combine = let ldb x y = let z = x lxor y in z land (-z) in let newbranch p1 t1 p2 t2 = let b = ldb p1 p2 in let p = p1 land (b - 1) in if p1 land b = 0 then Branch(p,b,t1,t2) else Branch(p,b,t2,t1) in let rec define_list (x,y as xy) l = match l with (a,b as ab)::t -> if x =? a then xy::t else if x [xy] and combine_list op z l1 l2 = match (l1,l2) with [],_ -> l2 | _,[] -> l1 | ((x1,y1 as xy1)::t1,(x2,y2 as xy2)::t2) -> if x1 ) x y = let k = Hashtbl.hash x in let rec upd t = match t with Empty -> Leaf (k,[x,y]) | Leaf(h,l) -> if h = k then Leaf(h,define_list (x,y) l) else newbranch h t k (Leaf(k,[x,y])) | Branch(p,b,l,r) -> if k land (b - 1) <> p then newbranch p t k (Leaf(k,[x,y])) else if k land b = 0 then Branch(p,b,upd l,r) else Branch(p,b,l,upd r) in upd in let rec combine op z t1 t2 = match (t1,t2) with Empty,_ -> t2 | _,Empty -> t1 | Leaf(h1,l1),Leaf(h2,l2) -> if h1 = h2 then let l = combine_list op z l1 l2 in if l = [] then Empty else Leaf(h1,l) else newbranch h1 t1 h2 t2 | (Leaf(k,lis) as lf),(Branch(p,b,l,r) as br) | (Branch(p,b,l,r) as br),(Leaf(k,lis) as lf) -> if k land (b - 1) = p then if k land b = 0 then let l' = combine op z lf l in if is_undefined l' then r else Branch(p,b,l',r) else let r' = combine op z lf r in if is_undefined r' then l else Branch(p,b,l,r') else newbranch k lf p br | Branch(p1,b1,l1,r1),Branch(p2,b2,l2,r2) -> if b1 < b2 then if p2 land (b1 - 1) <> p1 then newbranch p1 t1 p2 t2 else if p2 land b1 = 0 then let l = combine op z l1 t2 in if is_undefined l then r1 else Branch(p1,b1,l,r1) else let r = combine op z r1 t2 in if is_undefined r then l1 else Branch(p1,b1,l1,r) else if b2 < b1 then if p1 land (b2 - 1) <> p2 then newbranch p1 t1 p2 t2 else if p1 land b2 = 0 then let l = combine op z t1 l2 in if is_undefined l then r2 else Branch(p2,b2,l,r2) else let r = combine op z t1 r2 in if is_undefined r then l2 else Branch(p2,b2,l2,r) else if p1 = p2 then let l = combine op z l1 l2 and r = combine op z r1 r2 in if is_undefined l then r else if is_undefined r then l else Branch(p1,b1,l,r) else newbranch p1 t1 p2 t2 in (|->),combine;; (* ------------------------------------------------------------------------- *) (* Special case of point function. *) (* ------------------------------------------------------------------------- *) let (|=>) = fun x y -> (x |-> y) undefined;; (* ------------------------------------------------------------------------- *) (* Grab an arbitrary element. *) (* ------------------------------------------------------------------------- *) let rec choose t = match t with Empty -> failwith "choose: completely undefined function" | Leaf(h,l) -> hd l | Branch(b,p,t1,t2) -> choose t1;; (* ------------------------------------------------------------------------- *) (* Application. *) (* ------------------------------------------------------------------------- *) let applyd = let rec apply_listd l d x = match l with (a,b)::t -> if x =? a then b else if x >? a then apply_listd t d x else d x | [] -> d x in fun f d x -> let k = Hashtbl.hash x in let rec look t = match t with Leaf(h,l) when h = k -> apply_listd l d x | Branch(p,b,l,r) -> look (if k land b = 0 then l else r) | _ -> d x in look f;; let apply f = applyd f (fun x -> failwith "apply");; let tryapplyd f a d = applyd f (fun x -> d) a;; let defined f x = try apply f x; true with Failure _ -> false;; (* ------------------------------------------------------------------------- *) (* Undefinition. *) (* ------------------------------------------------------------------------- *) let undefine = let rec undefine_list x l = match l with (a,b as ab)::t -> if x =? a then t else if x [] in fun x -> let k = Hashtbl.hash x in let rec und t = match t with Leaf(h,l) when h = k -> let l' = undefine_list x l in if l' == l then t else if l' = [] then Empty else Leaf(h,l') | Branch(p,b,l,r) when k land (b - 1) = p -> if k land b = 0 then let l' = und l in if l' == l then t else if is_undefined l' then r else Branch(p,b,l',r) else let r' = und r in if r' == r then t else if is_undefined r' then l else Branch(p,b,l,r') | _ -> t in und;; (* ------------------------------------------------------------------------- *) (* Mapping to sorted-list representation of the graph, domain and range. *) (* ------------------------------------------------------------------------- *) let graph f = setify (foldl (fun a x y -> (x,y)::a) [] f);; let dom f = setify(foldl (fun a x y -> x::a) [] f);; let ran f = setify(foldl (fun a x y -> y::a) [] f);; (* ------------------------------------------------------------------------- *) (* More parser basics. *) (* ------------------------------------------------------------------------- *) exception Noparse;; let isspace,issep,isbra,issymb,isalpha,isnum,isalnum = let charcode s = Char.code(String.get s 0) in let spaces = " \t\n\r" and separators = ",;" and brackets = "()[]{}" and symbs = "\\!@#$%^&*-+|\\<=>/?~.:" and alphas = "'abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ" and nums = "0123456789" in let allchars = spaces^separators^brackets^symbs^alphas^nums in let csetsize = itlist ((o) max charcode) (explode allchars) 256 in let ctable = Array.make csetsize 0 in do_list (fun c -> Array.set ctable (charcode c) 1) (explode spaces); do_list (fun c -> Array.set ctable (charcode c) 2) (explode separators); do_list (fun c -> Array.set ctable (charcode c) 4) (explode brackets); do_list (fun c -> Array.set ctable (charcode c) 8) (explode symbs); do_list (fun c -> Array.set ctable (charcode c) 16) (explode alphas); do_list (fun c -> Array.set ctable (charcode c) 32) (explode nums); let isspace c = Array.get ctable (charcode c) = 1 and issep c = Array.get ctable (charcode c) = 2 and isbra c = Array.get ctable (charcode c) = 4 and issymb c = Array.get ctable (charcode c) = 8 and isalpha c = Array.get ctable (charcode c) = 16 and isnum c = Array.get ctable (charcode c) = 32 and isalnum c = Array.get ctable (charcode c) >= 16 in isspace,issep,isbra,issymb,isalpha,isnum,isalnum;; let (||) parser1 parser2 input = try parser1 input with Noparse -> parser2 input;; let (++) parser1 parser2 input = let result1,rest1 = parser1 input in let result2,rest2 = parser2 rest1 in (result1,result2),rest2;; let rec many prs input = try let result,next = prs input in let results,rest = many prs next in (result::results),rest with Noparse -> [],input;; let (>>) prs treatment input = let result,rest = prs input in treatment(result),rest;; let fix err prs input = try prs input with Noparse -> failwith (err ^ " expected");; let rec listof prs sep err = prs ++ many (sep ++ fix err prs >> snd) >> (fun (h,t) -> h::t);; let possibly prs input = try let x,rest = prs input in [x],rest with Noparse -> [],input;; let some p = function [] -> raise Noparse | (h::t) -> if p h then (h,t) else raise Noparse;; let a tok = some (fun item -> item = tok);; let rec atleast n prs i = (if n <= 0 then many prs else prs ++ atleast (n - 1) prs >> (fun (h,t) -> h::t)) i;; let finished input = if input = [] then 0,input else failwith "Unparsed input";; (* ------------------------------------------------------------------------- *) let temp_path = ref Filename.temp_dir_name;; (* ------------------------------------------------------------------------- *) (* Convenient conversion between files and (lists of) strings. *) (* ------------------------------------------------------------------------- *) let strings_of_file filename = let fd = try Pervasives.open_in filename with Sys_error _ -> failwith("strings_of_file: can't open "^filename) in let rec suck_lines acc = try let l = Pervasives.input_line fd in suck_lines (l::acc) with End_of_file -> rev acc in let data = suck_lines [] in (Pervasives.close_in fd; data);; let string_of_file filename = end_itlist (fun s t -> s^"\n"^t) (strings_of_file filename);; let file_of_string filename s = let fd = Pervasives.open_out filename in output_string fd s; close_out fd;; (* ------------------------------------------------------------------------- *) (* Iterative deepening. *) (* ------------------------------------------------------------------------- *) let rec deepen f n = try (*print_string "Searching with depth limit "; print_int n; print_newline();*) f n with Failure _ -> deepen f (n + 1);; exception TooDeep let deepen_until limit f n = match compare limit 0 with | 0 -> raise TooDeep | -1 -> deepen f n | _ -> let rec d_until f n = try(* if !debugging then (print_string "Searching with depth limit "; print_int n; print_newline()) ;*) f n with Failure x -> (*if !debugging then (Printf.printf "solver error : %s\n" x) ; *) if n = limit then raise TooDeep else d_until f (n + 1) in d_until f n coq-8.4pl2/plugins/micromega/g_micromega.ml40000640000175000001440000000440312010532755020143 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* anomaly "Unevaluated or_var variable" | ArgArg x -> x TACTIC EXTEND PsatzZ | [ "psatz_Z" int_or_var(i) ] -> [ Coq_micromega.psatz_Z (out_arg i) ] | [ "psatz_Z" ] -> [ Coq_micromega.psatz_Z (-1) ] END TACTIC EXTEND ZOmicron [ "xlia" ] -> [ Coq_micromega.xlia] END TACTIC EXTEND Nlia [ "xnlia" ] -> [ Coq_micromega.xnlia] END TACTIC EXTEND Sos_Z | [ "sos_Z" ] -> [ Coq_micromega.sos_Z] END TACTIC EXTEND Sos_Q | [ "sos_Q" ] -> [ Coq_micromega.sos_Q] END TACTIC EXTEND Sos_R | [ "sos_R" ] -> [ Coq_micromega.sos_R] END TACTIC EXTEND Omicron [ "psatzl_Z" ] -> [ Coq_micromega.psatzl_Z] END TACTIC EXTEND QOmicron [ "psatzl_Q" ] -> [ Coq_micromega.psatzl_Q] END TACTIC EXTEND ROmicron [ "psatzl_R" ] -> [ Coq_micromega.psatzl_R] END TACTIC EXTEND RMicromega | [ "psatz_R" int_or_var(i) ] -> [ Coq_micromega.psatz_R (out_arg i) ] | [ "psatz_R" ] -> [ Coq_micromega.psatz_R (-1) ] END TACTIC EXTEND QMicromega | [ "psatz_Q" int_or_var(i) ] -> [ Coq_micromega.psatz_Q (out_arg i) ] | [ "psatz_Q" ] -> [ Coq_micromega.psatz_Q (-1) ] END coq-8.4pl2/plugins/micromega/Psatz.v0000640000175000001440000001004312010532755016541 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (sos_Z || psatz_Z d) ; intros __wit __varmap __ff ; change (Tauto.eval_f (Zeval_formula (@find Z Z0 __varmap)) __ff) ; apply (ZTautoChecker_sound __ff __wit); vm_compute ; reflexivity | R => (sos_R || psatz_R d) ; (* If csdp is not installed, the previous step might not produce any progress: the rest of the tactical will then fail. Hence the 'try'. *) try (intros __wit __varmap __ff ; change (Tauto.eval_f (Reval_formula (@find R 0%R __varmap)) __ff) ; apply (RTautoChecker_sound __ff __wit); vm_compute ; reflexivity) | Q => (sos_Q || psatz_Q d) ; (* If csdp is not installed, the previous step might not produce any progress: the rest of the tactical will then fail. Hence the 'try'. *) try (intros __wit __varmap __ff ; change (Tauto.eval_f (Qeval_formula (@find Q 0%Q __varmap)) __ff) ; apply (QTautoChecker_sound __ff __wit); vm_compute ; reflexivity) | _ => fail "Unsupported domain" end in tac. Tactic Notation "psatz" constr(dom) int_or_var(n) := xpsatz dom n. Tactic Notation "psatz" constr(dom) := xpsatz dom ltac:-1. Ltac psatzl dom := let tac := lazymatch dom with | Z => psatzl_Z ; intros __wit __varmap __ff ; change (Tauto.eval_f (Zeval_formula (@find Z Z0 __varmap)) __ff) ; apply (ZTautoChecker_sound __ff __wit); vm_compute ; reflexivity | Q => psatzl_Q ; (* If csdp is not installed, the previous step might not produce any progress: the rest of the tactical will then fail. Hence the 'try'. *) try (intros __wit __varmap __ff ; change (Tauto.eval_f (Qeval_formula (@find Q 0%Q __varmap)) __ff) ; apply (QTautoChecker_sound __ff __wit); vm_compute ; reflexivity) | R => unfold Rdiv in * ; psatzl_R ; (* If csdp is not installed, the previous step might not produce any progress: the rest of the tactical will then fail. Hence the 'try'. *) try (intros __wit __varmap __ff ; change (Tauto.eval_f (Reval_formula (@find R 0%R __varmap)) __ff) ; apply (RTautoChecker_sound __ff __wit); vm_compute ; reflexivity) | _ => fail "Unsupported domain" end in tac. Ltac lra := first [ psatzl R | psatzl Q ]. Ltac lia := zify ; unfold Z.succ in * ; (*cbv delta - [Z.add Z.sub Z.opp Z.mul Z.pow Z.gt Z.ge Z.le Z.lt iff not] ;*) xlia ; intros __wit __varmap __ff ; change (Tauto.eval_f (Zeval_formula (@find Z Z0 __varmap)) __ff) ; apply (ZTautoChecker_sound __ff __wit); vm_compute ; reflexivity. Ltac nia := zify ; unfold Z.succ in * ; xnlia ; intros __wit __varmap __ff ; change (Tauto.eval_f (Zeval_formula (@find Z Z0 __varmap)) __ff) ; apply (ZTautoChecker_sound __ff __wit); vm_compute ; reflexivity. (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/OrderedRing.v0000640000175000001440000003345212010532755017655 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R) (ropp : R -> R). Variable req rle rlt : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (rplus x y). Notation "x * y " := (rtimes x y). Notation "x - y " := (rminus x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Notation "x ~= y" := (~ req x y). Notation "x <= y" := (rle x y). Notation "x < y" := (rlt x y). Record SOR : Type := mk_SOR_theory { SORsetoid : Setoid_Theory R req; SORplus_wd : forall x1 x2, x1 == x2 -> forall y1 y2, y1 == y2 -> x1 + y1 == x2 + y2; SORtimes_wd : forall x1 x2, x1 == x2 -> forall y1 y2, y1 == y2 -> x1 * y1 == x2 * y2; SORopp_wd : forall x1 x2, x1 == x2 -> -x1 == -x2; SORle_wd : forall x1 x2, x1 == x2 -> forall y1 y2, y1 == y2 -> (x1 <= y1 <-> x2 <= y2); SORlt_wd : forall x1 x2, x1 == x2 -> forall y1 y2, y1 == y2 -> (x1 < y1 <-> x2 < y2); SORrt : ring_theory rO rI rplus rtimes rminus ropp req; SORle_refl : forall n : R, n <= n; SORle_antisymm : forall n m : R, n <= m -> m <= n -> n == m; SORle_trans : forall n m p : R, n <= m -> m <= p -> n <= p; SORlt_le_neq : forall n m : R, n < m <-> n <= m /\ n ~= m; SORlt_trichotomy : forall n m : R, n < m \/ n == m \/ m < n; SORplus_le_mono_l : forall n m p : R, n <= m -> p + n <= p + m; SORtimes_pos_pos : forall n m : R, 0 < n -> 0 < m -> 0 < n * m; SORneq_0_1 : 0 ~= 1 }. (* We cannot use Relation_Definitions.order.ord_antisym and Relations_1.Antisymmetric because they refer to Leibniz equality *) End DEFINITIONS. Section STRICT_ORDERED_RING. Variable R : Type. Variable (rO rI : R) (rplus rtimes rminus: R -> R -> R) (ropp : R -> R). Variable req rle rlt : R -> R -> Prop. Variable sor : SOR rO rI rplus rtimes rminus ropp req rle rlt. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (rplus x y). Notation "x * y " := (rtimes x y). Notation "x - y " := (rminus x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Notation "x ~= y" := (~ req x y). Notation "x <= y" := (rle x y). Notation "x < y" := (rlt x y). Add Relation R req reflexivity proved by sor.(SORsetoid).(@Equivalence_Reflexive _ _ ) symmetry proved by sor.(SORsetoid).(@Equivalence_Symmetric _ _ ) transitivity proved by sor.(SORsetoid).(@Equivalence_Transitive _ _ ) as sor_setoid. Add Morphism rplus with signature req ==> req ==> req as rplus_morph. Proof. exact sor.(SORplus_wd). Qed. Add Morphism rtimes with signature req ==> req ==> req as rtimes_morph. Proof. exact sor.(SORtimes_wd). Qed. Add Morphism ropp with signature req ==> req as ropp_morph. Proof. exact sor.(SORopp_wd). Qed. Add Morphism rle with signature req ==> req ==> iff as rle_morph. Proof. exact sor.(SORle_wd). Qed. Add Morphism rlt with signature req ==> req ==> iff as rlt_morph. Proof. exact sor.(SORlt_wd). Qed. Add Ring SOR : sor.(SORrt). Add Morphism rminus with signature req ==> req ==> req as rminus_morph. Proof. intros x1 x2 H1 y1 y2 H2. rewrite (sor.(SORrt).(Rsub_def) x1 y1). rewrite (sor.(SORrt).(Rsub_def) x2 y2). rewrite H1; now rewrite H2. Qed. Theorem Rneq_symm : forall n m : R, n ~= m -> m ~= n. Proof. intros n m H1 H2; rewrite H2 in H1; now apply H1. Qed. (* Propeties of plus, minus and opp *) Theorem Rplus_0_l : forall n : R, 0 + n == n. Proof. intro; ring. Qed. Theorem Rplus_0_r : forall n : R, n + 0 == n. Proof. intro; ring. Qed. Theorem Rtimes_0_r : forall n : R, n * 0 == 0. Proof. intro; ring. Qed. Theorem Rplus_comm : forall n m : R, n + m == m + n. Proof. intros; ring. Qed. Theorem Rtimes_0_l : forall n : R, 0 * n == 0. Proof. intro; ring. Qed. Theorem Rtimes_comm : forall n m : R, n * m == m * n. Proof. intros; ring. Qed. Theorem Rminus_eq_0 : forall n m : R, n - m == 0 <-> n == m. Proof. intros n m. split; intro H. setoid_replace n with ((n - m) + m) by ring. rewrite H. now rewrite Rplus_0_l. rewrite H; ring. Qed. Theorem Rplus_cancel_l : forall n m p : R, p + n == p + m <-> n == m. Proof. intros n m p; split; intro H. setoid_replace n with (- p + (p + n)) by ring. setoid_replace m with (- p + (p + m)) by ring. now rewrite H. now rewrite H. Qed. (* Relations *) Theorem Rle_refl : forall n : R, n <= n. Proof sor.(SORle_refl). Theorem Rle_antisymm : forall n m : R, n <= m -> m <= n -> n == m. Proof sor.(SORle_antisymm). Theorem Rle_trans : forall n m p : R, n <= m -> m <= p -> n <= p. Proof sor.(SORle_trans). Theorem Rlt_trichotomy : forall n m : R, n < m \/ n == m \/ m < n. Proof sor.(SORlt_trichotomy). Theorem Rlt_le_neq : forall n m : R, n < m <-> n <= m /\ n ~= m. Proof sor.(SORlt_le_neq). Theorem Rneq_0_1 : 0 ~= 1. Proof sor.(SORneq_0_1). Theorem Req_em : forall n m : R, n == m \/ n ~= m. Proof. intros n m. destruct (Rlt_trichotomy n m) as [H | [H | H]]; try rewrite Rlt_le_neq in H. right; now destruct H. now left. right; apply Rneq_symm; now destruct H. Qed. Theorem Req_dne : forall n m : R, ~ ~ n == m <-> n == m. Proof. intros n m; destruct (Req_em n m) as [H | H]. split; auto. split. intro H1; false_hyp H H1. auto. Qed. Theorem Rle_lt_eq : forall n m : R, n <= m <-> n < m \/ n == m. Proof. intros n m; rewrite Rlt_le_neq. split; [intro H | intros [[H1 H2] | H]]. destruct (Req_em n m) as [H1 | H1]. now right. left; now split. assumption. rewrite H; apply Rle_refl. Qed. Ltac le_less := rewrite Rle_lt_eq; left; try assumption. Ltac le_equal := rewrite Rle_lt_eq; right; try reflexivity; try assumption. Ltac le_elim H := rewrite Rle_lt_eq in H; destruct H as [H | H]. Theorem Rlt_trans : forall n m p : R, n < m -> m < p -> n < p. Proof. intros n m p; repeat rewrite Rlt_le_neq; intros [H1 H2] [H3 H4]; split. now apply Rle_trans with m. intro H. rewrite H in H1. pose proof (Rle_antisymm H3 H1). now apply H4. Qed. Theorem Rle_lt_trans : forall n m p : R, n <= m -> m < p -> n < p. Proof. intros n m p H1 H2; le_elim H1. now apply Rlt_trans with (m := m). now rewrite H1. Qed. Theorem Rlt_le_trans : forall n m p : R, n < m -> m <= p -> n < p. Proof. intros n m p H1 H2; le_elim H2. now apply Rlt_trans with (m := m). now rewrite <- H2. Qed. Theorem Rle_gt_cases : forall n m : R, n <= m \/ m < n. Proof. intros n m; destruct (Rlt_trichotomy n m) as [H | [H | H]]. left; now le_less. left; now le_equal. now right. Qed. Theorem Rlt_neq : forall n m : R, n < m -> n ~= m. Proof. intros n m; rewrite Rlt_le_neq; now intros [_ H]. Qed. Theorem Rle_ngt : forall n m : R, n <= m <-> ~ m < n. Proof. intros n m; split. intros H H1; assert (H2 : n < n) by now apply Rle_lt_trans with m. now apply (Rlt_neq H2). intro H. destruct (Rle_gt_cases n m) as [H1 | H1]. assumption. false_hyp H1 H. Qed. Theorem Rlt_nge : forall n m : R, n < m <-> ~ m <= n. Proof. intros n m; split. intros H H1; assert (H2 : n < n) by now apply Rlt_le_trans with m. now apply (Rlt_neq H2). intro H. destruct (Rle_gt_cases m n) as [H1 | H1]. false_hyp H1 H. assumption. Qed. (* Plus, minus and order *) Theorem Rplus_le_mono_l : forall n m p : R, n <= m <-> p + n <= p + m. Proof. intros n m p; split. apply sor.(SORplus_le_mono_l). intro H. apply (sor.(SORplus_le_mono_l) (p + n) (p + m) (- p)) in H. setoid_replace (- p + (p + n)) with n in H by ring. setoid_replace (- p + (p + m)) with m in H by ring. assumption. Qed. Theorem Rplus_le_mono_r : forall n m p : R, n <= m <-> n + p <= m + p. Proof. intros n m p; rewrite (Rplus_comm n p); rewrite (Rplus_comm m p). apply Rplus_le_mono_l. Qed. Theorem Rplus_lt_mono_l : forall n m p : R, n < m <-> p + n < p + m. Proof. intros n m p; do 2 rewrite Rlt_le_neq. rewrite Rplus_cancel_l. now rewrite <- Rplus_le_mono_l. Qed. Theorem Rplus_lt_mono_r : forall n m p : R, n < m <-> n + p < m + p. Proof. intros n m p. rewrite (Rplus_comm n p); rewrite (Rplus_comm m p); apply Rplus_lt_mono_l. Qed. Theorem Rplus_lt_mono : forall n m p q : R, n < m -> p < q -> n + p < m + q. Proof. intros n m p q H1 H2. apply Rlt_trans with (m + p); [now apply -> Rplus_lt_mono_r | now apply -> Rplus_lt_mono_l]. Qed. Theorem Rplus_le_mono : forall n m p q : R, n <= m -> p <= q -> n + p <= m + q. Proof. intros n m p q H1 H2. apply Rle_trans with (m + p); [now apply -> Rplus_le_mono_r | now apply -> Rplus_le_mono_l]. Qed. Theorem Rplus_lt_le_mono : forall n m p q : R, n < m -> p <= q -> n + p < m + q. Proof. intros n m p q H1 H2. apply Rlt_le_trans with (m + p); [now apply -> Rplus_lt_mono_r | now apply -> Rplus_le_mono_l]. Qed. Theorem Rplus_le_lt_mono : forall n m p q : R, n <= m -> p < q -> n + p < m + q. Proof. intros n m p q H1 H2. apply Rle_lt_trans with (m + p); [now apply -> Rplus_le_mono_r | now apply -> Rplus_lt_mono_l]. Qed. Theorem Rplus_pos_pos : forall n m : R, 0 < n -> 0 < m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (Rplus_0_l 0). now apply Rplus_lt_mono. Qed. Theorem Rplus_pos_nonneg : forall n m : R, 0 < n -> 0 <= m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (Rplus_0_l 0). now apply Rplus_lt_le_mono. Qed. Theorem Rplus_nonneg_pos : forall n m : R, 0 <= n -> 0 < m -> 0 < n + m. Proof. intros n m H1 H2. rewrite <- (Rplus_0_l 0). now apply Rplus_le_lt_mono. Qed. Theorem Rplus_nonneg_nonneg : forall n m : R, 0 <= n -> 0 <= m -> 0 <= n + m. Proof. intros n m H1 H2. rewrite <- (Rplus_0_l 0). now apply Rplus_le_mono. Qed. Theorem Rle_le_minus : forall n m : R, n <= m <-> 0 <= m - n. Proof. intros n m. rewrite (@Rplus_le_mono_r n m (- n)). setoid_replace (n + - n) with 0 by ring. now setoid_replace (m + - n) with (m - n) by ring. Qed. Theorem Rlt_lt_minus : forall n m : R, n < m <-> 0 < m - n. Proof. intros n m. rewrite (@Rplus_lt_mono_r n m (- n)). setoid_replace (n + - n) with 0 by ring. now setoid_replace (m + - n) with (m - n) by ring. Qed. Theorem Ropp_lt_mono : forall n m : R, n < m <-> - m < - n. Proof. intros n m. split; intro H. apply -> (@Rplus_lt_mono_l n m (- n - m)) in H. setoid_replace (- n - m + n) with (- m) in H by ring. now setoid_replace (- n - m + m) with (- n) in H by ring. apply -> (@Rplus_lt_mono_l (- m) (- n) (n + m)) in H. setoid_replace (n + m + - m) with n in H by ring. now setoid_replace (n + m + - n) with m in H by ring. Qed. Theorem Ropp_pos_neg : forall n : R, 0 < - n <-> n < 0. Proof. intro n; rewrite (Ropp_lt_mono n 0). now setoid_replace (- 0) with 0 by ring. Qed. (* Times and order *) Theorem Rtimes_pos_pos : forall n m : R, 0 < n -> 0 < m -> 0 < n * m. Proof sor.(SORtimes_pos_pos). Theorem Rtimes_nonneg_nonneg : forall n m : R, 0 <= n -> 0 <= m -> 0 <= n * m. Proof. intros n m H1 H2. le_elim H1. le_elim H2. le_less; now apply Rtimes_pos_pos. rewrite <- H2; rewrite Rtimes_0_r; le_equal. rewrite <- H1; rewrite Rtimes_0_l; le_equal. Qed. Theorem Rtimes_pos_neg : forall n m : R, 0 < n -> m < 0 -> n * m < 0. Proof. intros n m H1 H2. apply -> Ropp_pos_neg. setoid_replace (- (n * m)) with (n * (- m)) by ring. apply Rtimes_pos_pos. assumption. now apply <- Ropp_pos_neg. Qed. Theorem Rtimes_neg_neg : forall n m : R, n < 0 -> m < 0 -> 0 < n * m. Proof. intros n m H1 H2. setoid_replace (n * m) with ((- n) * (- m)) by ring. apply Rtimes_pos_pos; now apply <- Ropp_pos_neg. Qed. Theorem Rtimes_square_nonneg : forall n : R, 0 <= n * n. Proof. intro n; destruct (Rlt_trichotomy 0 n) as [H | [H | H]]. le_less; now apply Rtimes_pos_pos. rewrite <- H, Rtimes_0_l; le_equal. le_less; now apply Rtimes_neg_neg. Qed. Theorem Rtimes_neq_0 : forall n m : R, n ~= 0 /\ m ~= 0 -> n * m ~= 0. Proof. intros n m [H1 H2]. destruct (Rlt_trichotomy n 0) as [H3 | [H3 | H3]]; destruct (Rlt_trichotomy m 0) as [H4 | [H4 | H4]]; try (false_hyp H3 H1); try (false_hyp H4 H2). apply Rneq_symm. apply Rlt_neq. now apply Rtimes_neg_neg. apply Rlt_neq. rewrite Rtimes_comm. now apply Rtimes_pos_neg. apply Rlt_neq. now apply Rtimes_pos_neg. apply Rneq_symm. apply Rlt_neq. now apply Rtimes_pos_pos. Qed. (* The following theorems are used to build a morphism from Z to R and prove its properties in ZCoeff.v. They are not used in RingMicromega.v. *) (* Surprisingly, multilication is needed to prove the following theorem *) Theorem Ropp_neg_pos : forall n : R, - n < 0 <-> 0 < n. Proof. intro n; setoid_replace n with (- - n) by ring. rewrite Ropp_pos_neg. now setoid_replace (- - n) with n by ring. Qed. Theorem Rlt_0_1 : 0 < 1. Proof. apply <- Rlt_le_neq. split. setoid_replace 1 with (1 * 1) by ring. apply Rtimes_square_nonneg. apply Rneq_0_1. Qed. Theorem Rlt_succ_r : forall n : R, n < 1 + n. Proof. intro n. rewrite <- (Rplus_0_l n); setoid_replace (1 + (0 + n)) with (1 + n) by ring. apply -> Rplus_lt_mono_r. apply Rlt_0_1. Qed. Theorem Rlt_lt_succ : forall n m : R, n < m -> n < 1 + m. Proof. intros n m H; apply Rlt_trans with m. assumption. apply Rlt_succ_r. Qed. (*Theorem Rtimes_lt_mono_pos_l : forall n m p : R, 0 < p -> n < m -> p * n < p * m. Proof. intros n m p H1 H2. apply <- Rlt_lt_minus. setoid_replace (p * m - p * n) with (p * (m - n)) by ring. apply Rtimes_pos_pos. assumption. now apply -> Rlt_lt_minus. Qed.*) End STRICT_ORDERED_RING. coq-8.4pl2/plugins/micromega/micromega.ml0000640000175000001440000030245211565517202017562 0ustar notinuserstype __ = Obj.t let __ = let rec f _ = Obj.repr f in Obj.repr f (** val negb : bool -> bool **) let negb = function | true -> false | false -> true type nat = | O | S of nat (** val fst : ('a1 * 'a2) -> 'a1 **) let fst = function | x,y -> x (** val snd : ('a1 * 'a2) -> 'a2 **) let snd = function | x,y -> y (** val app : 'a1 list -> 'a1 list -> 'a1 list **) let rec app l m = match l with | [] -> m | a::l1 -> a::(app l1 m) type comparison = | Eq | Lt | Gt (** val compOpp : comparison -> comparison **) let compOpp = function | Eq -> Eq | Lt -> Gt | Gt -> Lt type compareSpecT = | CompEqT | CompLtT | CompGtT (** val compareSpec2Type : comparison -> compareSpecT **) let compareSpec2Type = function | Eq -> CompEqT | Lt -> CompLtT | Gt -> CompGtT type 'a compSpecT = compareSpecT (** val compSpec2Type : 'a1 -> 'a1 -> comparison -> 'a1 compSpecT **) let compSpec2Type x y c = compareSpec2Type c type 'a sig0 = 'a (* singleton inductive, whose constructor was exist *) (** val plus : nat -> nat -> nat **) let rec plus n0 m = match n0 with | O -> m | S p -> S (plus p m) (** val nat_iter : nat -> ('a1 -> 'a1) -> 'a1 -> 'a1 **) let rec nat_iter n0 f x = match n0 with | O -> x | S n' -> f (nat_iter n' f x) type positive = | XI of positive | XO of positive | XH type n = | N0 | Npos of positive type z = | Z0 | Zpos of positive | Zneg of positive module type TotalOrder' = sig type t end module MakeOrderTac = functor (O:TotalOrder') -> struct end module MaxLogicalProperties = functor (O:TotalOrder') -> functor (M:sig val max : O.t -> O.t -> O.t end) -> struct module T = MakeOrderTac(O) end module Pos = struct type t = positive (** val succ : positive -> positive **) let rec succ = function | XI p -> XO (succ p) | XO p -> XI p | XH -> XO XH (** val add : positive -> positive -> positive **) let rec add x y = match x with | XI p -> (match y with | XI q0 -> XO (add_carry p q0) | XO q0 -> XI (add p q0) | XH -> XO (succ p)) | XO p -> (match y with | XI q0 -> XI (add p q0) | XO q0 -> XO (add p q0) | XH -> XI p) | XH -> (match y with | XI q0 -> XO (succ q0) | XO q0 -> XI q0 | XH -> XO XH) (** val add_carry : positive -> positive -> positive **) and add_carry x y = match x with | XI p -> (match y with | XI q0 -> XI (add_carry p q0) | XO q0 -> XO (add_carry p q0) | XH -> XI (succ p)) | XO p -> (match y with | XI q0 -> XO (add_carry p q0) | XO q0 -> XI (add p q0) | XH -> XO (succ p)) | XH -> (match y with | XI q0 -> XI (succ q0) | XO q0 -> XO (succ q0) | XH -> XI XH) (** val pred_double : positive -> positive **) let rec pred_double = function | XI p -> XI (XO p) | XO p -> XI (pred_double p) | XH -> XH (** val pred : positive -> positive **) let pred = function | XI p -> XO p | XO p -> pred_double p | XH -> XH (** val pred_N : positive -> n **) let pred_N = function | XI p -> Npos (XO p) | XO p -> Npos (pred_double p) | XH -> N0 type mask = | IsNul | IsPos of positive | IsNeg (** val mask_rect : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 **) let mask_rect f f0 f1 = function | IsNul -> f | IsPos x -> f0 x | IsNeg -> f1 (** val mask_rec : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 **) let mask_rec f f0 f1 = function | IsNul -> f | IsPos x -> f0 x | IsNeg -> f1 (** val succ_double_mask : mask -> mask **) let succ_double_mask = function | IsNul -> IsPos XH | IsPos p -> IsPos (XI p) | IsNeg -> IsNeg (** val double_mask : mask -> mask **) let double_mask = function | IsPos p -> IsPos (XO p) | x0 -> x0 (** val double_pred_mask : positive -> mask **) let double_pred_mask = function | XI p -> IsPos (XO (XO p)) | XO p -> IsPos (XO (pred_double p)) | XH -> IsNul (** val pred_mask : mask -> mask **) let pred_mask = function | IsPos q0 -> (match q0 with | XH -> IsNul | _ -> IsPos (pred q0)) | _ -> IsNeg (** val sub_mask : positive -> positive -> mask **) let rec sub_mask x y = match x with | XI p -> (match y with | XI q0 -> double_mask (sub_mask p q0) | XO q0 -> succ_double_mask (sub_mask p q0) | XH -> IsPos (XO p)) | XO p -> (match y with | XI q0 -> succ_double_mask (sub_mask_carry p q0) | XO q0 -> double_mask (sub_mask p q0) | XH -> IsPos (pred_double p)) | XH -> (match y with | XH -> IsNul | _ -> IsNeg) (** val sub_mask_carry : positive -> positive -> mask **) and sub_mask_carry x y = match x with | XI p -> (match y with | XI q0 -> succ_double_mask (sub_mask_carry p q0) | XO q0 -> double_mask (sub_mask p q0) | XH -> IsPos (pred_double p)) | XO p -> (match y with | XI q0 -> double_mask (sub_mask_carry p q0) | XO q0 -> succ_double_mask (sub_mask_carry p q0) | XH -> double_pred_mask p) | XH -> IsNeg (** val sub : positive -> positive -> positive **) let sub x y = match sub_mask x y with | IsPos z0 -> z0 | _ -> XH (** val mul : positive -> positive -> positive **) let rec mul x y = match x with | XI p -> add y (XO (mul p y)) | XO p -> XO (mul p y) | XH -> y (** val iter : positive -> ('a1 -> 'a1) -> 'a1 -> 'a1 **) let rec iter n0 f x = match n0 with | XI n' -> f (iter n' f (iter n' f x)) | XO n' -> iter n' f (iter n' f x) | XH -> f x (** val pow : positive -> positive -> positive **) let pow x y = iter y (mul x) XH (** val div2 : positive -> positive **) let div2 = function | XI p2 -> p2 | XO p2 -> p2 | XH -> XH (** val div2_up : positive -> positive **) let div2_up = function | XI p2 -> succ p2 | XO p2 -> p2 | XH -> XH (** val size_nat : positive -> nat **) let rec size_nat = function | XI p2 -> S (size_nat p2) | XO p2 -> S (size_nat p2) | XH -> S O (** val size : positive -> positive **) let rec size = function | XI p2 -> succ (size p2) | XO p2 -> succ (size p2) | XH -> XH (** val compare_cont : positive -> positive -> comparison -> comparison **) let rec compare_cont x y r = match x with | XI p -> (match y with | XI q0 -> compare_cont p q0 r | XO q0 -> compare_cont p q0 Gt | XH -> Gt) | XO p -> (match y with | XI q0 -> compare_cont p q0 Lt | XO q0 -> compare_cont p q0 r | XH -> Gt) | XH -> (match y with | XH -> r | _ -> Lt) (** val compare : positive -> positive -> comparison **) let compare x y = compare_cont x y Eq (** val min : positive -> positive -> positive **) let min p p' = match compare p p' with | Gt -> p' | _ -> p (** val max : positive -> positive -> positive **) let max p p' = match compare p p' with | Gt -> p | _ -> p' (** val eqb : positive -> positive -> bool **) let rec eqb p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> eqb p2 q1 | _ -> false) | XO p2 -> (match q0 with | XO q1 -> eqb p2 q1 | _ -> false) | XH -> (match q0 with | XH -> true | _ -> false) (** val leb : positive -> positive -> bool **) let leb x y = match compare x y with | Gt -> false | _ -> true (** val ltb : positive -> positive -> bool **) let ltb x y = match compare x y with | Lt -> true | _ -> false (** val sqrtrem_step : (positive -> positive) -> (positive -> positive) -> (positive * mask) -> positive * mask **) let sqrtrem_step f g = function | s,y -> (match y with | IsPos r -> let s' = XI (XO s) in let r' = g (f r) in if leb s' r' then (XI s),(sub_mask r' s') else (XO s),(IsPos r') | _ -> (XO s),(sub_mask (g (f XH)) (XO (XO XH)))) (** val sqrtrem : positive -> positive * mask **) let rec sqrtrem = function | XI p2 -> (match p2 with | XI p3 -> sqrtrem_step (fun x -> XI x) (fun x -> XI x) (sqrtrem p3) | XO p3 -> sqrtrem_step (fun x -> XO x) (fun x -> XI x) (sqrtrem p3) | XH -> XH,(IsPos (XO XH))) | XO p2 -> (match p2 with | XI p3 -> sqrtrem_step (fun x -> XI x) (fun x -> XO x) (sqrtrem p3) | XO p3 -> sqrtrem_step (fun x -> XO x) (fun x -> XO x) (sqrtrem p3) | XH -> XH,(IsPos XH)) | XH -> XH,IsNul (** val sqrt : positive -> positive **) let sqrt p = fst (sqrtrem p) (** val gcdn : nat -> positive -> positive -> positive **) let rec gcdn n0 a b = match n0 with | O -> XH | S n1 -> (match a with | XI a' -> (match b with | XI b' -> (match compare a' b' with | Eq -> a | Lt -> gcdn n1 (sub b' a') a | Gt -> gcdn n1 (sub a' b') b) | XO b0 -> gcdn n1 a b0 | XH -> XH) | XO a0 -> (match b with | XI p -> gcdn n1 a0 b | XO b0 -> XO (gcdn n1 a0 b0) | XH -> XH) | XH -> XH) (** val gcd : positive -> positive -> positive **) let gcd a b = gcdn (plus (size_nat a) (size_nat b)) a b (** val ggcdn : nat -> positive -> positive -> positive * (positive * positive) **) let rec ggcdn n0 a b = match n0 with | O -> XH,(a,b) | S n1 -> (match a with | XI a' -> (match b with | XI b' -> (match compare a' b' with | Eq -> a,(XH,XH) | Lt -> let g,p = ggcdn n1 (sub b' a') a in let ba,aa = p in g,(aa,(add aa (XO ba))) | Gt -> let g,p = ggcdn n1 (sub a' b') b in let ab,bb = p in g,((add bb (XO ab)),bb)) | XO b0 -> let g,p = ggcdn n1 a b0 in let aa,bb = p in g,(aa,(XO bb)) | XH -> XH,(a,XH)) | XO a0 -> (match b with | XI p -> let g,p2 = ggcdn n1 a0 b in let aa,bb = p2 in g,((XO aa),bb) | XO b0 -> let g,p = ggcdn n1 a0 b0 in (XO g),p | XH -> XH,(a,XH)) | XH -> XH,(XH,b)) (** val ggcd : positive -> positive -> positive * (positive * positive) **) let ggcd a b = ggcdn (plus (size_nat a) (size_nat b)) a b (** val coq_Nsucc_double : n -> n **) let coq_Nsucc_double = function | N0 -> Npos XH | Npos p -> Npos (XI p) (** val coq_Ndouble : n -> n **) let coq_Ndouble = function | N0 -> N0 | Npos p -> Npos (XO p) (** val coq_lor : positive -> positive -> positive **) let rec coq_lor p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> XI (coq_lor p2 q1) | XO q1 -> XI (coq_lor p2 q1) | XH -> p) | XO p2 -> (match q0 with | XI q1 -> XI (coq_lor p2 q1) | XO q1 -> XO (coq_lor p2 q1) | XH -> XI p2) | XH -> (match q0 with | XO q1 -> XI q1 | _ -> q0) (** val coq_land : positive -> positive -> n **) let rec coq_land p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Nsucc_double (coq_land p2 q1) | XO q1 -> coq_Ndouble (coq_land p2 q1) | XH -> Npos XH) | XO p2 -> (match q0 with | XI q1 -> coq_Ndouble (coq_land p2 q1) | XO q1 -> coq_Ndouble (coq_land p2 q1) | XH -> N0) | XH -> (match q0 with | XO q1 -> N0 | _ -> Npos XH) (** val ldiff : positive -> positive -> n **) let rec ldiff p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Ndouble (ldiff p2 q1) | XO q1 -> coq_Nsucc_double (ldiff p2 q1) | XH -> Npos (XO p2)) | XO p2 -> (match q0 with | XI q1 -> coq_Ndouble (ldiff p2 q1) | XO q1 -> coq_Ndouble (ldiff p2 q1) | XH -> Npos p) | XH -> (match q0 with | XO q1 -> Npos XH | _ -> N0) (** val coq_lxor : positive -> positive -> n **) let rec coq_lxor p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Ndouble (coq_lxor p2 q1) | XO q1 -> coq_Nsucc_double (coq_lxor p2 q1) | XH -> Npos (XO p2)) | XO p2 -> (match q0 with | XI q1 -> coq_Nsucc_double (coq_lxor p2 q1) | XO q1 -> coq_Ndouble (coq_lxor p2 q1) | XH -> Npos (XI p2)) | XH -> (match q0 with | XI q1 -> Npos (XO q1) | XO q1 -> Npos (XI q1) | XH -> N0) (** val shiftl_nat : positive -> nat -> positive **) let shiftl_nat p n0 = nat_iter n0 (fun x -> XO x) p (** val shiftr_nat : positive -> nat -> positive **) let shiftr_nat p n0 = nat_iter n0 div2 p (** val shiftl : positive -> n -> positive **) let shiftl p = function | N0 -> p | Npos n1 -> iter n1 (fun x -> XO x) p (** val shiftr : positive -> n -> positive **) let shiftr p = function | N0 -> p | Npos n1 -> iter n1 div2 p (** val testbit_nat : positive -> nat -> bool **) let rec testbit_nat p n0 = match p with | XI p2 -> (match n0 with | O -> true | S n' -> testbit_nat p2 n') | XO p2 -> (match n0 with | O -> false | S n' -> testbit_nat p2 n') | XH -> (match n0 with | O -> true | S n1 -> false) (** val testbit : positive -> n -> bool **) let rec testbit p n0 = match p with | XI p2 -> (match n0 with | N0 -> true | Npos n1 -> testbit p2 (pred_N n1)) | XO p2 -> (match n0 with | N0 -> false | Npos n1 -> testbit p2 (pred_N n1)) | XH -> (match n0 with | N0 -> true | Npos p2 -> false) (** val iter_op : ('a1 -> 'a1 -> 'a1) -> positive -> 'a1 -> 'a1 **) let rec iter_op op p a = match p with | XI p2 -> op a (iter_op op p2 (op a a)) | XO p2 -> iter_op op p2 (op a a) | XH -> a (** val to_nat : positive -> nat **) let to_nat x = iter_op plus x (S O) (** val of_nat : nat -> positive **) let rec of_nat = function | O -> XH | S x -> (match x with | O -> XH | S n1 -> succ (of_nat x)) (** val of_succ_nat : nat -> positive **) let rec of_succ_nat = function | O -> XH | S x -> succ (of_succ_nat x) end module Coq_Pos = struct module Coq__1 = struct type t = positive end type t = Coq__1.t (** val succ : positive -> positive **) let rec succ = function | XI p -> XO (succ p) | XO p -> XI p | XH -> XO XH (** val add : positive -> positive -> positive **) let rec add x y = match x with | XI p -> (match y with | XI q0 -> XO (add_carry p q0) | XO q0 -> XI (add p q0) | XH -> XO (succ p)) | XO p -> (match y with | XI q0 -> XI (add p q0) | XO q0 -> XO (add p q0) | XH -> XI p) | XH -> (match y with | XI q0 -> XO (succ q0) | XO q0 -> XI q0 | XH -> XO XH) (** val add_carry : positive -> positive -> positive **) and add_carry x y = match x with | XI p -> (match y with | XI q0 -> XI (add_carry p q0) | XO q0 -> XO (add_carry p q0) | XH -> XI (succ p)) | XO p -> (match y with | XI q0 -> XO (add_carry p q0) | XO q0 -> XI (add p q0) | XH -> XO (succ p)) | XH -> (match y with | XI q0 -> XI (succ q0) | XO q0 -> XO (succ q0) | XH -> XI XH) (** val pred_double : positive -> positive **) let rec pred_double = function | XI p -> XI (XO p) | XO p -> XI (pred_double p) | XH -> XH (** val pred : positive -> positive **) let pred = function | XI p -> XO p | XO p -> pred_double p | XH -> XH (** val pred_N : positive -> n **) let pred_N = function | XI p -> Npos (XO p) | XO p -> Npos (pred_double p) | XH -> N0 type mask = Pos.mask = | IsNul | IsPos of positive | IsNeg (** val mask_rect : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 **) let mask_rect f f0 f1 = function | IsNul -> f | IsPos x -> f0 x | IsNeg -> f1 (** val mask_rec : 'a1 -> (positive -> 'a1) -> 'a1 -> mask -> 'a1 **) let mask_rec f f0 f1 = function | IsNul -> f | IsPos x -> f0 x | IsNeg -> f1 (** val succ_double_mask : mask -> mask **) let succ_double_mask = function | IsNul -> IsPos XH | IsPos p -> IsPos (XI p) | IsNeg -> IsNeg (** val double_mask : mask -> mask **) let double_mask = function | IsPos p -> IsPos (XO p) | x0 -> x0 (** val double_pred_mask : positive -> mask **) let double_pred_mask = function | XI p -> IsPos (XO (XO p)) | XO p -> IsPos (XO (pred_double p)) | XH -> IsNul (** val pred_mask : mask -> mask **) let pred_mask = function | IsPos q0 -> (match q0 with | XH -> IsNul | _ -> IsPos (pred q0)) | _ -> IsNeg (** val sub_mask : positive -> positive -> mask **) let rec sub_mask x y = match x with | XI p -> (match y with | XI q0 -> double_mask (sub_mask p q0) | XO q0 -> succ_double_mask (sub_mask p q0) | XH -> IsPos (XO p)) | XO p -> (match y with | XI q0 -> succ_double_mask (sub_mask_carry p q0) | XO q0 -> double_mask (sub_mask p q0) | XH -> IsPos (pred_double p)) | XH -> (match y with | XH -> IsNul | _ -> IsNeg) (** val sub_mask_carry : positive -> positive -> mask **) and sub_mask_carry x y = match x with | XI p -> (match y with | XI q0 -> succ_double_mask (sub_mask_carry p q0) | XO q0 -> double_mask (sub_mask p q0) | XH -> IsPos (pred_double p)) | XO p -> (match y with | XI q0 -> double_mask (sub_mask_carry p q0) | XO q0 -> succ_double_mask (sub_mask_carry p q0) | XH -> double_pred_mask p) | XH -> IsNeg (** val sub : positive -> positive -> positive **) let sub x y = match sub_mask x y with | IsPos z0 -> z0 | _ -> XH (** val mul : positive -> positive -> positive **) let rec mul x y = match x with | XI p -> add y (XO (mul p y)) | XO p -> XO (mul p y) | XH -> y (** val iter : positive -> ('a1 -> 'a1) -> 'a1 -> 'a1 **) let rec iter n0 f x = match n0 with | XI n' -> f (iter n' f (iter n' f x)) | XO n' -> iter n' f (iter n' f x) | XH -> f x (** val pow : positive -> positive -> positive **) let pow x y = iter y (mul x) XH (** val div2 : positive -> positive **) let div2 = function | XI p2 -> p2 | XO p2 -> p2 | XH -> XH (** val div2_up : positive -> positive **) let div2_up = function | XI p2 -> succ p2 | XO p2 -> p2 | XH -> XH (** val size_nat : positive -> nat **) let rec size_nat = function | XI p2 -> S (size_nat p2) | XO p2 -> S (size_nat p2) | XH -> S O (** val size : positive -> positive **) let rec size = function | XI p2 -> succ (size p2) | XO p2 -> succ (size p2) | XH -> XH (** val compare_cont : positive -> positive -> comparison -> comparison **) let rec compare_cont x y r = match x with | XI p -> (match y with | XI q0 -> compare_cont p q0 r | XO q0 -> compare_cont p q0 Gt | XH -> Gt) | XO p -> (match y with | XI q0 -> compare_cont p q0 Lt | XO q0 -> compare_cont p q0 r | XH -> Gt) | XH -> (match y with | XH -> r | _ -> Lt) (** val compare : positive -> positive -> comparison **) let compare x y = compare_cont x y Eq (** val min : positive -> positive -> positive **) let min p p' = match compare p p' with | Gt -> p' | _ -> p (** val max : positive -> positive -> positive **) let max p p' = match compare p p' with | Gt -> p | _ -> p' (** val eqb : positive -> positive -> bool **) let rec eqb p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> eqb p2 q1 | _ -> false) | XO p2 -> (match q0 with | XO q1 -> eqb p2 q1 | _ -> false) | XH -> (match q0 with | XH -> true | _ -> false) (** val leb : positive -> positive -> bool **) let leb x y = match compare x y with | Gt -> false | _ -> true (** val ltb : positive -> positive -> bool **) let ltb x y = match compare x y with | Lt -> true | _ -> false (** val sqrtrem_step : (positive -> positive) -> (positive -> positive) -> (positive * mask) -> positive * mask **) let sqrtrem_step f g = function | s,y -> (match y with | IsPos r -> let s' = XI (XO s) in let r' = g (f r) in if leb s' r' then (XI s),(sub_mask r' s') else (XO s),(IsPos r') | _ -> (XO s),(sub_mask (g (f XH)) (XO (XO XH)))) (** val sqrtrem : positive -> positive * mask **) let rec sqrtrem = function | XI p2 -> (match p2 with | XI p3 -> sqrtrem_step (fun x -> XI x) (fun x -> XI x) (sqrtrem p3) | XO p3 -> sqrtrem_step (fun x -> XO x) (fun x -> XI x) (sqrtrem p3) | XH -> XH,(IsPos (XO XH))) | XO p2 -> (match p2 with | XI p3 -> sqrtrem_step (fun x -> XI x) (fun x -> XO x) (sqrtrem p3) | XO p3 -> sqrtrem_step (fun x -> XO x) (fun x -> XO x) (sqrtrem p3) | XH -> XH,(IsPos XH)) | XH -> XH,IsNul (** val sqrt : positive -> positive **) let sqrt p = fst (sqrtrem p) (** val gcdn : nat -> positive -> positive -> positive **) let rec gcdn n0 a b = match n0 with | O -> XH | S n1 -> (match a with | XI a' -> (match b with | XI b' -> (match compare a' b' with | Eq -> a | Lt -> gcdn n1 (sub b' a') a | Gt -> gcdn n1 (sub a' b') b) | XO b0 -> gcdn n1 a b0 | XH -> XH) | XO a0 -> (match b with | XI p -> gcdn n1 a0 b | XO b0 -> XO (gcdn n1 a0 b0) | XH -> XH) | XH -> XH) (** val gcd : positive -> positive -> positive **) let gcd a b = gcdn (plus (size_nat a) (size_nat b)) a b (** val ggcdn : nat -> positive -> positive -> positive * (positive * positive) **) let rec ggcdn n0 a b = match n0 with | O -> XH,(a,b) | S n1 -> (match a with | XI a' -> (match b with | XI b' -> (match compare a' b' with | Eq -> a,(XH,XH) | Lt -> let g,p = ggcdn n1 (sub b' a') a in let ba,aa = p in g,(aa,(add aa (XO ba))) | Gt -> let g,p = ggcdn n1 (sub a' b') b in let ab,bb = p in g,((add bb (XO ab)),bb)) | XO b0 -> let g,p = ggcdn n1 a b0 in let aa,bb = p in g,(aa,(XO bb)) | XH -> XH,(a,XH)) | XO a0 -> (match b with | XI p -> let g,p2 = ggcdn n1 a0 b in let aa,bb = p2 in g,((XO aa),bb) | XO b0 -> let g,p = ggcdn n1 a0 b0 in (XO g),p | XH -> XH,(a,XH)) | XH -> XH,(XH,b)) (** val ggcd : positive -> positive -> positive * (positive * positive) **) let ggcd a b = ggcdn (plus (size_nat a) (size_nat b)) a b (** val coq_Nsucc_double : n -> n **) let coq_Nsucc_double = function | N0 -> Npos XH | Npos p -> Npos (XI p) (** val coq_Ndouble : n -> n **) let coq_Ndouble = function | N0 -> N0 | Npos p -> Npos (XO p) (** val coq_lor : positive -> positive -> positive **) let rec coq_lor p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> XI (coq_lor p2 q1) | XO q1 -> XI (coq_lor p2 q1) | XH -> p) | XO p2 -> (match q0 with | XI q1 -> XI (coq_lor p2 q1) | XO q1 -> XO (coq_lor p2 q1) | XH -> XI p2) | XH -> (match q0 with | XO q1 -> XI q1 | _ -> q0) (** val coq_land : positive -> positive -> n **) let rec coq_land p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Nsucc_double (coq_land p2 q1) | XO q1 -> coq_Ndouble (coq_land p2 q1) | XH -> Npos XH) | XO p2 -> (match q0 with | XI q1 -> coq_Ndouble (coq_land p2 q1) | XO q1 -> coq_Ndouble (coq_land p2 q1) | XH -> N0) | XH -> (match q0 with | XO q1 -> N0 | _ -> Npos XH) (** val ldiff : positive -> positive -> n **) let rec ldiff p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Ndouble (ldiff p2 q1) | XO q1 -> coq_Nsucc_double (ldiff p2 q1) | XH -> Npos (XO p2)) | XO p2 -> (match q0 with | XI q1 -> coq_Ndouble (ldiff p2 q1) | XO q1 -> coq_Ndouble (ldiff p2 q1) | XH -> Npos p) | XH -> (match q0 with | XO q1 -> Npos XH | _ -> N0) (** val coq_lxor : positive -> positive -> n **) let rec coq_lxor p q0 = match p with | XI p2 -> (match q0 with | XI q1 -> coq_Ndouble (coq_lxor p2 q1) | XO q1 -> coq_Nsucc_double (coq_lxor p2 q1) | XH -> Npos (XO p2)) | XO p2 -> (match q0 with | XI q1 -> coq_Nsucc_double (coq_lxor p2 q1) | XO q1 -> coq_Ndouble (coq_lxor p2 q1) | XH -> Npos (XI p2)) | XH -> (match q0 with | XI q1 -> Npos (XO q1) | XO q1 -> Npos (XI q1) | XH -> N0) (** val shiftl_nat : positive -> nat -> positive **) let shiftl_nat p n0 = nat_iter n0 (fun x -> XO x) p (** val shiftr_nat : positive -> nat -> positive **) let shiftr_nat p n0 = nat_iter n0 div2 p (** val shiftl : positive -> n -> positive **) let shiftl p = function | N0 -> p | Npos n1 -> iter n1 (fun x -> XO x) p (** val shiftr : positive -> n -> positive **) let shiftr p = function | N0 -> p | Npos n1 -> iter n1 div2 p (** val testbit_nat : positive -> nat -> bool **) let rec testbit_nat p n0 = match p with | XI p2 -> (match n0 with | O -> true | S n' -> testbit_nat p2 n') | XO p2 -> (match n0 with | O -> false | S n' -> testbit_nat p2 n') | XH -> (match n0 with | O -> true | S n1 -> false) (** val testbit : positive -> n -> bool **) let rec testbit p n0 = match p with | XI p2 -> (match n0 with | N0 -> true | Npos n1 -> testbit p2 (pred_N n1)) | XO p2 -> (match n0 with | N0 -> false | Npos n1 -> testbit p2 (pred_N n1)) | XH -> (match n0 with | N0 -> true | Npos p2 -> false) (** val iter_op : ('a1 -> 'a1 -> 'a1) -> positive -> 'a1 -> 'a1 **) let rec iter_op op p a = match p with | XI p2 -> op a (iter_op op p2 (op a a)) | XO p2 -> iter_op op p2 (op a a) | XH -> a (** val to_nat : positive -> nat **) let to_nat x = iter_op plus x (S O) (** val of_nat : nat -> positive **) let rec of_nat = function | O -> XH | S x -> (match x with | O -> XH | S n1 -> succ (of_nat x)) (** val of_succ_nat : nat -> positive **) let rec of_succ_nat = function | O -> XH | S x -> succ (of_succ_nat x) (** val eq_dec : positive -> positive -> bool **) let rec eq_dec p y0 = match p with | XI p2 -> (match y0 with | XI p3 -> eq_dec p2 p3 | _ -> false) | XO p2 -> (match y0 with | XO p3 -> eq_dec p2 p3 | _ -> false) | XH -> (match y0 with | XH -> true | _ -> false) (** val peano_rect : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> 'a1 **) let rec peano_rect a f p = let f2 = peano_rect (f XH a) (fun p2 x -> f (succ (XO p2)) (f (XO p2) x)) in (match p with | XI q0 -> f (XO q0) (f2 q0) | XO q0 -> f2 q0 | XH -> a) (** val peano_rec : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> 'a1 **) let peano_rec = peano_rect type coq_PeanoView = | PeanoOne | PeanoSucc of positive * coq_PeanoView (** val coq_PeanoView_rect : 'a1 -> (positive -> coq_PeanoView -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 **) let rec coq_PeanoView_rect f f0 p = function | PeanoOne -> f | PeanoSucc (p3, p4) -> f0 p3 p4 (coq_PeanoView_rect f f0 p3 p4) (** val coq_PeanoView_rec : 'a1 -> (positive -> coq_PeanoView -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 **) let rec coq_PeanoView_rec f f0 p = function | PeanoOne -> f | PeanoSucc (p3, p4) -> f0 p3 p4 (coq_PeanoView_rec f f0 p3 p4) (** val peanoView_xO : positive -> coq_PeanoView -> coq_PeanoView **) let rec peanoView_xO p = function | PeanoOne -> PeanoSucc (XH, PeanoOne) | PeanoSucc (p2, q1) -> PeanoSucc ((succ (XO p2)), (PeanoSucc ((XO p2), (peanoView_xO p2 q1)))) (** val peanoView_xI : positive -> coq_PeanoView -> coq_PeanoView **) let rec peanoView_xI p = function | PeanoOne -> PeanoSucc ((succ XH), (PeanoSucc (XH, PeanoOne))) | PeanoSucc (p2, q1) -> PeanoSucc ((succ (XI p2)), (PeanoSucc ((XI p2), (peanoView_xI p2 q1)))) (** val peanoView : positive -> coq_PeanoView **) let rec peanoView = function | XI p2 -> peanoView_xI p2 (peanoView p2) | XO p2 -> peanoView_xO p2 (peanoView p2) | XH -> PeanoOne (** val coq_PeanoView_iter : 'a1 -> (positive -> 'a1 -> 'a1) -> positive -> coq_PeanoView -> 'a1 **) let rec coq_PeanoView_iter a f p = function | PeanoOne -> a | PeanoSucc (p2, q1) -> f p2 (coq_PeanoView_iter a f p2 q1) (** val switch_Eq : comparison -> comparison -> comparison **) let switch_Eq c = function | Eq -> c | x -> x (** val mask2cmp : mask -> comparison **) let mask2cmp = function | IsNul -> Eq | IsPos p2 -> Gt | IsNeg -> Lt module T = struct end module ORev = struct type t = Coq__1.t end module MRev = struct (** val max : t -> t -> t **) let max x y = min y x end module MPRev = MaxLogicalProperties(ORev)(MRev) module P = struct (** val max_case_strong : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat n0 (max n0 m) __ (hl __) | _ -> compat m (max n0 m) __ (hr __)) (** val max_case : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 x1 = max_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val max_dec : t -> t -> bool **) let max_dec n0 m = max_case n0 m (fun x y _ h0 -> h0) true false (** val min_case_strong : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat m (min n0 m) __ (hr __) | _ -> compat n0 (min n0 m) __ (hl __)) (** val min_case : t -> t -> (t -> t -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 x1 = min_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val min_dec : t -> t -> bool **) let min_dec n0 m = min_case n0 m (fun x y _ h0 -> h0) true false end (** val max_case_strong : t -> t -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m x x0 = P.max_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val max_case : t -> t -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 = max_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val max_dec : t -> t -> bool **) let max_dec = P.max_dec (** val min_case_strong : t -> t -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m x x0 = P.min_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val min_case : t -> t -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 = min_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val min_dec : t -> t -> bool **) let min_dec = P.min_dec end module N = struct type t = n (** val zero : n **) let zero = N0 (** val one : n **) let one = Npos XH (** val two : n **) let two = Npos (XO XH) (** val succ_double : n -> n **) let succ_double = function | N0 -> Npos XH | Npos p -> Npos (XI p) (** val double : n -> n **) let double = function | N0 -> N0 | Npos p -> Npos (XO p) (** val succ : n -> n **) let succ = function | N0 -> Npos XH | Npos p -> Npos (Coq_Pos.succ p) (** val pred : n -> n **) let pred = function | N0 -> N0 | Npos p -> Coq_Pos.pred_N p (** val succ_pos : n -> positive **) let succ_pos = function | N0 -> XH | Npos p -> Coq_Pos.succ p (** val add : n -> n -> n **) let add n0 m = match n0 with | N0 -> m | Npos p -> (match m with | N0 -> n0 | Npos q0 -> Npos (Coq_Pos.add p q0)) (** val sub : n -> n -> n **) let sub n0 m = match n0 with | N0 -> N0 | Npos n' -> (match m with | N0 -> n0 | Npos m' -> (match Coq_Pos.sub_mask n' m' with | Coq_Pos.IsPos p -> Npos p | _ -> N0)) (** val mul : n -> n -> n **) let mul n0 m = match n0 with | N0 -> N0 | Npos p -> (match m with | N0 -> N0 | Npos q0 -> Npos (Coq_Pos.mul p q0)) (** val compare : n -> n -> comparison **) let compare n0 m = match n0 with | N0 -> (match m with | N0 -> Eq | Npos m' -> Lt) | Npos n' -> (match m with | N0 -> Gt | Npos m' -> Coq_Pos.compare n' m') (** val eqb : n -> n -> bool **) let rec eqb n0 m = match n0 with | N0 -> (match m with | N0 -> true | Npos p -> false) | Npos p -> (match m with | N0 -> false | Npos q0 -> Coq_Pos.eqb p q0) (** val leb : n -> n -> bool **) let leb x y = match compare x y with | Gt -> false | _ -> true (** val ltb : n -> n -> bool **) let ltb x y = match compare x y with | Lt -> true | _ -> false (** val min : n -> n -> n **) let min n0 n' = match compare n0 n' with | Gt -> n' | _ -> n0 (** val max : n -> n -> n **) let max n0 n' = match compare n0 n' with | Gt -> n0 | _ -> n' (** val div2 : n -> n **) let div2 = function | N0 -> N0 | Npos p2 -> (match p2 with | XI p -> Npos p | XO p -> Npos p | XH -> N0) (** val even : n -> bool **) let even = function | N0 -> true | Npos p -> (match p with | XO p2 -> true | _ -> false) (** val odd : n -> bool **) let odd n0 = negb (even n0) (** val pow : n -> n -> n **) let pow n0 = function | N0 -> Npos XH | Npos p2 -> (match n0 with | N0 -> N0 | Npos q0 -> Npos (Coq_Pos.pow q0 p2)) (** val log2 : n -> n **) let log2 = function | N0 -> N0 | Npos p2 -> (match p2 with | XI p -> Npos (Coq_Pos.size p) | XO p -> Npos (Coq_Pos.size p) | XH -> N0) (** val size : n -> n **) let size = function | N0 -> N0 | Npos p -> Npos (Coq_Pos.size p) (** val size_nat : n -> nat **) let size_nat = function | N0 -> O | Npos p -> Coq_Pos.size_nat p (** val pos_div_eucl : positive -> n -> n * n **) let rec pos_div_eucl a b = match a with | XI a' -> let q0,r = pos_div_eucl a' b in let r' = succ_double r in if leb b r' then (succ_double q0),(sub r' b) else (double q0),r' | XO a' -> let q0,r = pos_div_eucl a' b in let r' = double r in if leb b r' then (succ_double q0),(sub r' b) else (double q0),r' | XH -> (match b with | N0 -> N0,(Npos XH) | Npos p -> (match p with | XH -> (Npos XH),N0 | _ -> N0,(Npos XH))) (** val div_eucl : n -> n -> n * n **) let div_eucl a b = match a with | N0 -> N0,N0 | Npos na -> (match b with | N0 -> N0,a | Npos p -> pos_div_eucl na b) (** val div : n -> n -> n **) let div a b = fst (div_eucl a b) (** val modulo : n -> n -> n **) let modulo a b = snd (div_eucl a b) (** val gcd : n -> n -> n **) let gcd a b = match a with | N0 -> b | Npos p -> (match b with | N0 -> a | Npos q0 -> Npos (Coq_Pos.gcd p q0)) (** val ggcd : n -> n -> n * (n * n) **) let ggcd a b = match a with | N0 -> b,(N0,(Npos XH)) | Npos p -> (match b with | N0 -> a,((Npos XH),N0) | Npos q0 -> let g,p2 = Coq_Pos.ggcd p q0 in let aa,bb = p2 in (Npos g),((Npos aa),(Npos bb))) (** val sqrtrem : n -> n * n **) let sqrtrem = function | N0 -> N0,N0 | Npos p -> let s,m = Coq_Pos.sqrtrem p in (match m with | Coq_Pos.IsPos r -> (Npos s),(Npos r) | _ -> (Npos s),N0) (** val sqrt : n -> n **) let sqrt = function | N0 -> N0 | Npos p -> Npos (Coq_Pos.sqrt p) (** val coq_lor : n -> n -> n **) let coq_lor n0 m = match n0 with | N0 -> m | Npos p -> (match m with | N0 -> n0 | Npos q0 -> Npos (Coq_Pos.coq_lor p q0)) (** val coq_land : n -> n -> n **) let coq_land n0 m = match n0 with | N0 -> N0 | Npos p -> (match m with | N0 -> N0 | Npos q0 -> Coq_Pos.coq_land p q0) (** val ldiff : n -> n -> n **) let rec ldiff n0 m = match n0 with | N0 -> N0 | Npos p -> (match m with | N0 -> n0 | Npos q0 -> Coq_Pos.ldiff p q0) (** val coq_lxor : n -> n -> n **) let coq_lxor n0 m = match n0 with | N0 -> m | Npos p -> (match m with | N0 -> n0 | Npos q0 -> Coq_Pos.coq_lxor p q0) (** val shiftl_nat : n -> nat -> n **) let shiftl_nat a n0 = nat_iter n0 double a (** val shiftr_nat : n -> nat -> n **) let shiftr_nat a n0 = nat_iter n0 div2 a (** val shiftl : n -> n -> n **) let shiftl a n0 = match a with | N0 -> N0 | Npos a0 -> Npos (Coq_Pos.shiftl a0 n0) (** val shiftr : n -> n -> n **) let shiftr a = function | N0 -> a | Npos p -> Coq_Pos.iter p div2 a (** val testbit_nat : n -> nat -> bool **) let testbit_nat = function | N0 -> (fun x -> false) | Npos p -> Coq_Pos.testbit_nat p (** val testbit : n -> n -> bool **) let testbit a n0 = match a with | N0 -> false | Npos p -> Coq_Pos.testbit p n0 (** val to_nat : n -> nat **) let to_nat = function | N0 -> O | Npos p -> Coq_Pos.to_nat p (** val of_nat : nat -> n **) let of_nat = function | O -> N0 | S n' -> Npos (Coq_Pos.of_succ_nat n') (** val iter : n -> ('a1 -> 'a1) -> 'a1 -> 'a1 **) let iter n0 f x = match n0 with | N0 -> x | Npos p -> Coq_Pos.iter p f x (** val eq_dec : n -> n -> bool **) let eq_dec n0 m = match n0 with | N0 -> (match m with | N0 -> true | Npos p -> false) | Npos x -> (match m with | N0 -> false | Npos p2 -> Coq_Pos.eq_dec x p2) (** val discr : n -> positive option **) let discr = function | N0 -> None | Npos p -> Some p (** val binary_rect : 'a1 -> (n -> 'a1 -> 'a1) -> (n -> 'a1 -> 'a1) -> n -> 'a1 **) let binary_rect f0 f2 fS2 n0 = let f2' = fun p -> f2 (Npos p) in let fS2' = fun p -> fS2 (Npos p) in (match n0 with | N0 -> f0 | Npos p -> let rec f = function | XI p3 -> fS2' p3 (f p3) | XO p3 -> f2' p3 (f p3) | XH -> fS2 N0 f0 in f p) (** val binary_rec : 'a1 -> (n -> 'a1 -> 'a1) -> (n -> 'a1 -> 'a1) -> n -> 'a1 **) let binary_rec = binary_rect (** val peano_rect : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 **) let peano_rect f0 f n0 = let f' = fun p -> f (Npos p) in (match n0 with | N0 -> f0 | Npos p -> Coq_Pos.peano_rect (f N0 f0) f' p) (** val peano_rec : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 **) let peano_rec = peano_rect module BootStrap = struct end (** val recursion : 'a1 -> (n -> 'a1 -> 'a1) -> n -> 'a1 **) let recursion x = peano_rect x module OrderElts = struct type t = n end module OrderTac = MakeOrderTac(OrderElts) module NZPowP = struct end module NZSqrtP = struct end (** val sqrt_up : n -> n **) let sqrt_up a = match compare N0 a with | Lt -> succ (sqrt (pred a)) | _ -> N0 (** val log2_up : n -> n **) let log2_up a = match compare (Npos XH) a with | Lt -> succ (log2 (pred a)) | _ -> N0 module NZDivP = struct end (** val lcm : n -> n -> n **) let lcm a b = mul a (div b (gcd a b)) (** val b2n : bool -> n **) let b2n = function | true -> Npos XH | false -> N0 (** val setbit : n -> n -> n **) let setbit a n0 = coq_lor a (shiftl (Npos XH) n0) (** val clearbit : n -> n -> n **) let clearbit a n0 = ldiff a (shiftl (Npos XH) n0) (** val ones : n -> n **) let ones n0 = pred (shiftl (Npos XH) n0) (** val lnot : n -> n -> n **) let lnot a n0 = coq_lxor a (ones n0) module T = struct end module ORev = struct type t = n end module MRev = struct (** val max : n -> n -> n **) let max x y = min y x end module MPRev = MaxLogicalProperties(ORev)(MRev) module P = struct (** val max_case_strong : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat n0 (max n0 m) __ (hl __) | _ -> compat m (max n0 m) __ (hr __)) (** val max_case : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 x1 = max_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val max_dec : n -> n -> bool **) let max_dec n0 m = max_case n0 m (fun x y _ h0 -> h0) true false (** val min_case_strong : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat m (min n0 m) __ (hr __) | _ -> compat n0 (min n0 m) __ (hl __)) (** val min_case : n -> n -> (n -> n -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 x1 = min_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val min_dec : n -> n -> bool **) let min_dec n0 m = min_case n0 m (fun x y _ h0 -> h0) true false end (** val max_case_strong : n -> n -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m x x0 = P.max_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val max_case : n -> n -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 = max_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val max_dec : n -> n -> bool **) let max_dec = P.max_dec (** val min_case_strong : n -> n -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m x x0 = P.min_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val min_case : n -> n -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 = min_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val min_dec : n -> n -> bool **) let min_dec = P.min_dec end (** val pow_pos : ('a1 -> 'a1 -> 'a1) -> 'a1 -> positive -> 'a1 **) let rec pow_pos rmul x = function | XI i0 -> let p = pow_pos rmul x i0 in rmul x (rmul p p) | XO i0 -> let p = pow_pos rmul x i0 in rmul p p | XH -> x (** val nth : nat -> 'a1 list -> 'a1 -> 'a1 **) let rec nth n0 l default = match n0 with | O -> (match l with | [] -> default | x::l' -> x) | S m -> (match l with | [] -> default | x::t1 -> nth m t1 default) (** val map : ('a1 -> 'a2) -> 'a1 list -> 'a2 list **) let rec map f = function | [] -> [] | a::t1 -> (f a)::(map f t1) (** val fold_right : ('a2 -> 'a1 -> 'a1) -> 'a1 -> 'a2 list -> 'a1 **) let rec fold_right f a0 = function | [] -> a0 | b::t1 -> f b (fold_right f a0 t1) module Z = struct type t = z (** val zero : z **) let zero = Z0 (** val one : z **) let one = Zpos XH (** val two : z **) let two = Zpos (XO XH) (** val double : z -> z **) let double = function | Z0 -> Z0 | Zpos p -> Zpos (XO p) | Zneg p -> Zneg (XO p) (** val succ_double : z -> z **) let succ_double = function | Z0 -> Zpos XH | Zpos p -> Zpos (XI p) | Zneg p -> Zneg (Coq_Pos.pred_double p) (** val pred_double : z -> z **) let pred_double = function | Z0 -> Zneg XH | Zpos p -> Zpos (Coq_Pos.pred_double p) | Zneg p -> Zneg (XI p) (** val pos_sub : positive -> positive -> z **) let rec pos_sub x y = match x with | XI p -> (match y with | XI q0 -> double (pos_sub p q0) | XO q0 -> succ_double (pos_sub p q0) | XH -> Zpos (XO p)) | XO p -> (match y with | XI q0 -> pred_double (pos_sub p q0) | XO q0 -> double (pos_sub p q0) | XH -> Zpos (Coq_Pos.pred_double p)) | XH -> (match y with | XI q0 -> Zneg (XO q0) | XO q0 -> Zneg (Coq_Pos.pred_double q0) | XH -> Z0) (** val add : z -> z -> z **) let add x y = match x with | Z0 -> y | Zpos x' -> (match y with | Z0 -> x | Zpos y' -> Zpos (Coq_Pos.add x' y') | Zneg y' -> pos_sub x' y') | Zneg x' -> (match y with | Z0 -> x | Zpos y' -> pos_sub y' x' | Zneg y' -> Zneg (Coq_Pos.add x' y')) (** val opp : z -> z **) let opp = function | Z0 -> Z0 | Zpos x0 -> Zneg x0 | Zneg x0 -> Zpos x0 (** val succ : z -> z **) let succ x = add x (Zpos XH) (** val pred : z -> z **) let pred x = add x (Zneg XH) (** val sub : z -> z -> z **) let sub m n0 = add m (opp n0) (** val mul : z -> z -> z **) let mul x y = match x with | Z0 -> Z0 | Zpos x' -> (match y with | Z0 -> Z0 | Zpos y' -> Zpos (Coq_Pos.mul x' y') | Zneg y' -> Zneg (Coq_Pos.mul x' y')) | Zneg x' -> (match y with | Z0 -> Z0 | Zpos y' -> Zneg (Coq_Pos.mul x' y') | Zneg y' -> Zpos (Coq_Pos.mul x' y')) (** val pow_pos : z -> positive -> z **) let pow_pos z0 n0 = Coq_Pos.iter n0 (mul z0) (Zpos XH) (** val pow : z -> z -> z **) let pow x = function | Z0 -> Zpos XH | Zpos p -> pow_pos x p | Zneg p -> Z0 (** val compare : z -> z -> comparison **) let compare x y = match x with | Z0 -> (match y with | Z0 -> Eq | Zpos y' -> Lt | Zneg y' -> Gt) | Zpos x' -> (match y with | Zpos y' -> Coq_Pos.compare x' y' | _ -> Gt) | Zneg x' -> (match y with | Zneg y' -> compOpp (Coq_Pos.compare x' y') | _ -> Lt) (** val sgn : z -> z **) let sgn = function | Z0 -> Z0 | Zpos p -> Zpos XH | Zneg p -> Zneg XH (** val leb : z -> z -> bool **) let leb x y = match compare x y with | Gt -> false | _ -> true (** val geb : z -> z -> bool **) let geb x y = match compare x y with | Lt -> false | _ -> true (** val ltb : z -> z -> bool **) let ltb x y = match compare x y with | Lt -> true | _ -> false (** val gtb : z -> z -> bool **) let gtb x y = match compare x y with | Gt -> true | _ -> false (** val eqb : z -> z -> bool **) let rec eqb x y = match x with | Z0 -> (match y with | Z0 -> true | _ -> false) | Zpos p -> (match y with | Zpos q0 -> Coq_Pos.eqb p q0 | _ -> false) | Zneg p -> (match y with | Zneg q0 -> Coq_Pos.eqb p q0 | _ -> false) (** val max : z -> z -> z **) let max n0 m = match compare n0 m with | Lt -> m | _ -> n0 (** val min : z -> z -> z **) let min n0 m = match compare n0 m with | Gt -> m | _ -> n0 (** val abs : z -> z **) let abs = function | Zneg p -> Zpos p | x -> x (** val abs_nat : z -> nat **) let abs_nat = function | Z0 -> O | Zpos p -> Coq_Pos.to_nat p | Zneg p -> Coq_Pos.to_nat p (** val abs_N : z -> n **) let abs_N = function | Z0 -> N0 | Zpos p -> Npos p | Zneg p -> Npos p (** val to_nat : z -> nat **) let to_nat = function | Zpos p -> Coq_Pos.to_nat p | _ -> O (** val to_N : z -> n **) let to_N = function | Zpos p -> Npos p | _ -> N0 (** val of_nat : nat -> z **) let of_nat = function | O -> Z0 | S n1 -> Zpos (Coq_Pos.of_succ_nat n1) (** val of_N : n -> z **) let of_N = function | N0 -> Z0 | Npos p -> Zpos p (** val iter : z -> ('a1 -> 'a1) -> 'a1 -> 'a1 **) let iter n0 f x = match n0 with | Zpos p -> Coq_Pos.iter p f x | _ -> x (** val pos_div_eucl : positive -> z -> z * z **) let rec pos_div_eucl a b = match a with | XI a' -> let q0,r = pos_div_eucl a' b in let r' = add (mul (Zpos (XO XH)) r) (Zpos XH) in if gtb b r' then (mul (Zpos (XO XH)) q0),r' else (add (mul (Zpos (XO XH)) q0) (Zpos XH)),(sub r' b) | XO a' -> let q0,r = pos_div_eucl a' b in let r' = mul (Zpos (XO XH)) r in if gtb b r' then (mul (Zpos (XO XH)) q0),r' else (add (mul (Zpos (XO XH)) q0) (Zpos XH)),(sub r' b) | XH -> if geb b (Zpos (XO XH)) then Z0,(Zpos XH) else (Zpos XH),Z0 (** val div_eucl : z -> z -> z * z **) let div_eucl a b = match a with | Z0 -> Z0,Z0 | Zpos a' -> (match b with | Z0 -> Z0,Z0 | Zpos p -> pos_div_eucl a' b | Zneg b' -> let q0,r = pos_div_eucl a' (Zpos b') in (match r with | Z0 -> (opp q0),Z0 | _ -> (opp (add q0 (Zpos XH))),(add b r))) | Zneg a' -> (match b with | Z0 -> Z0,Z0 | Zpos p -> let q0,r = pos_div_eucl a' b in (match r with | Z0 -> (opp q0),Z0 | _ -> (opp (add q0 (Zpos XH))),(sub b r)) | Zneg b' -> let q0,r = pos_div_eucl a' (Zpos b') in q0,(opp r)) (** val div : z -> z -> z **) let div a b = let q0,x = div_eucl a b in q0 (** val modulo : z -> z -> z **) let modulo a b = let x,r = div_eucl a b in r (** val quotrem : z -> z -> z * z **) let quotrem a b = match a with | Z0 -> Z0,Z0 | Zpos a0 -> (match b with | Z0 -> Z0,a | Zpos b0 -> let q0,r = N.pos_div_eucl a0 (Npos b0) in (of_N q0),(of_N r) | Zneg b0 -> let q0,r = N.pos_div_eucl a0 (Npos b0) in (opp (of_N q0)),(of_N r)) | Zneg a0 -> (match b with | Z0 -> Z0,a | Zpos b0 -> let q0,r = N.pos_div_eucl a0 (Npos b0) in (opp (of_N q0)),(opp (of_N r)) | Zneg b0 -> let q0,r = N.pos_div_eucl a0 (Npos b0) in (of_N q0),(opp (of_N r))) (** val quot : z -> z -> z **) let quot a b = fst (quotrem a b) (** val rem : z -> z -> z **) let rem a b = snd (quotrem a b) (** val even : z -> bool **) let even = function | Z0 -> true | Zpos p -> (match p with | XO p2 -> true | _ -> false) | Zneg p -> (match p with | XO p2 -> true | _ -> false) (** val odd : z -> bool **) let odd = function | Z0 -> false | Zpos p -> (match p with | XO p2 -> false | _ -> true) | Zneg p -> (match p with | XO p2 -> false | _ -> true) (** val div2 : z -> z **) let div2 = function | Z0 -> Z0 | Zpos p -> (match p with | XH -> Z0 | _ -> Zpos (Coq_Pos.div2 p)) | Zneg p -> Zneg (Coq_Pos.div2_up p) (** val quot2 : z -> z **) let quot2 = function | Z0 -> Z0 | Zpos p -> (match p with | XH -> Z0 | _ -> Zpos (Coq_Pos.div2 p)) | Zneg p -> (match p with | XH -> Z0 | _ -> Zneg (Coq_Pos.div2 p)) (** val log2 : z -> z **) let log2 = function | Zpos p2 -> (match p2 with | XI p -> Zpos (Coq_Pos.size p) | XO p -> Zpos (Coq_Pos.size p) | XH -> Z0) | _ -> Z0 (** val sqrtrem : z -> z * z **) let sqrtrem = function | Zpos p -> let s,m = Coq_Pos.sqrtrem p in (match m with | Coq_Pos.IsPos r -> (Zpos s),(Zpos r) | _ -> (Zpos s),Z0) | _ -> Z0,Z0 (** val sqrt : z -> z **) let sqrt = function | Zpos p -> Zpos (Coq_Pos.sqrt p) | _ -> Z0 (** val gcd : z -> z -> z **) let gcd a b = match a with | Z0 -> abs b | Zpos a0 -> (match b with | Z0 -> abs a | Zpos b0 -> Zpos (Coq_Pos.gcd a0 b0) | Zneg b0 -> Zpos (Coq_Pos.gcd a0 b0)) | Zneg a0 -> (match b with | Z0 -> abs a | Zpos b0 -> Zpos (Coq_Pos.gcd a0 b0) | Zneg b0 -> Zpos (Coq_Pos.gcd a0 b0)) (** val ggcd : z -> z -> z * (z * z) **) let ggcd a b = match a with | Z0 -> (abs b),(Z0,(sgn b)) | Zpos a0 -> (match b with | Z0 -> (abs a),((sgn a),Z0) | Zpos b0 -> let g,p = Coq_Pos.ggcd a0 b0 in let aa,bb = p in (Zpos g),((Zpos aa),(Zpos bb)) | Zneg b0 -> let g,p = Coq_Pos.ggcd a0 b0 in let aa,bb = p in (Zpos g),((Zpos aa),(Zneg bb))) | Zneg a0 -> (match b with | Z0 -> (abs a),((sgn a),Z0) | Zpos b0 -> let g,p = Coq_Pos.ggcd a0 b0 in let aa,bb = p in (Zpos g),((Zneg aa),(Zpos bb)) | Zneg b0 -> let g,p = Coq_Pos.ggcd a0 b0 in let aa,bb = p in (Zpos g),((Zneg aa),(Zneg bb))) (** val testbit : z -> z -> bool **) let testbit a = function | Z0 -> odd a | Zpos p -> (match a with | Z0 -> false | Zpos a0 -> Coq_Pos.testbit a0 (Npos p) | Zneg a0 -> negb (N.testbit (Coq_Pos.pred_N a0) (Npos p))) | Zneg p -> false (** val shiftl : z -> z -> z **) let shiftl a = function | Z0 -> a | Zpos p -> Coq_Pos.iter p (mul (Zpos (XO XH))) a | Zneg p -> Coq_Pos.iter p div2 a (** val shiftr : z -> z -> z **) let shiftr a n0 = shiftl a (opp n0) (** val coq_lor : z -> z -> z **) let coq_lor a b = match a with | Z0 -> b | Zpos a0 -> (match b with | Z0 -> a | Zpos b0 -> Zpos (Coq_Pos.coq_lor a0 b0) | Zneg b0 -> Zneg (N.succ_pos (N.ldiff (Coq_Pos.pred_N b0) (Npos a0)))) | Zneg a0 -> (match b with | Z0 -> a | Zpos b0 -> Zneg (N.succ_pos (N.ldiff (Coq_Pos.pred_N a0) (Npos b0))) | Zneg b0 -> Zneg (N.succ_pos (N.coq_land (Coq_Pos.pred_N a0) (Coq_Pos.pred_N b0)))) (** val coq_land : z -> z -> z **) let coq_land a b = match a with | Z0 -> Z0 | Zpos a0 -> (match b with | Z0 -> Z0 | Zpos b0 -> of_N (Coq_Pos.coq_land a0 b0) | Zneg b0 -> of_N (N.ldiff (Npos a0) (Coq_Pos.pred_N b0))) | Zneg a0 -> (match b with | Z0 -> Z0 | Zpos b0 -> of_N (N.ldiff (Npos b0) (Coq_Pos.pred_N a0)) | Zneg b0 -> Zneg (N.succ_pos (N.coq_lor (Coq_Pos.pred_N a0) (Coq_Pos.pred_N b0)))) (** val ldiff : z -> z -> z **) let ldiff a b = match a with | Z0 -> Z0 | Zpos a0 -> (match b with | Z0 -> a | Zpos b0 -> of_N (Coq_Pos.ldiff a0 b0) | Zneg b0 -> of_N (N.coq_land (Npos a0) (Coq_Pos.pred_N b0))) | Zneg a0 -> (match b with | Z0 -> a | Zpos b0 -> Zneg (N.succ_pos (N.coq_lor (Coq_Pos.pred_N a0) (Npos b0))) | Zneg b0 -> of_N (N.ldiff (Coq_Pos.pred_N b0) (Coq_Pos.pred_N a0))) (** val coq_lxor : z -> z -> z **) let coq_lxor a b = match a with | Z0 -> b | Zpos a0 -> (match b with | Z0 -> a | Zpos b0 -> of_N (Coq_Pos.coq_lxor a0 b0) | Zneg b0 -> Zneg (N.succ_pos (N.coq_lxor (Npos a0) (Coq_Pos.pred_N b0)))) | Zneg a0 -> (match b with | Z0 -> a | Zpos b0 -> Zneg (N.succ_pos (N.coq_lxor (Coq_Pos.pred_N a0) (Npos b0))) | Zneg b0 -> of_N (N.coq_lxor (Coq_Pos.pred_N a0) (Coq_Pos.pred_N b0))) (** val eq_dec : z -> z -> bool **) let eq_dec x y = match x with | Z0 -> (match y with | Z0 -> true | _ -> false) | Zpos x0 -> (match y with | Zpos p2 -> Coq_Pos.eq_dec x0 p2 | _ -> false) | Zneg x0 -> (match y with | Zneg p2 -> Coq_Pos.eq_dec x0 p2 | _ -> false) module BootStrap = struct end module OrderElts = struct type t = z end module OrderTac = MakeOrderTac(OrderElts) (** val sqrt_up : z -> z **) let sqrt_up a = match compare Z0 a with | Lt -> succ (sqrt (pred a)) | _ -> Z0 (** val log2_up : z -> z **) let log2_up a = match compare (Zpos XH) a with | Lt -> succ (log2 (pred a)) | _ -> Z0 module NZDivP = struct end module Quot2Div = struct (** val div : z -> z -> z **) let div = quot (** val modulo : z -> z -> z **) let modulo = rem end module NZQuot = struct end (** val lcm : z -> z -> z **) let lcm a b = abs (mul a (div b (gcd a b))) (** val b2z : bool -> z **) let b2z = function | true -> Zpos XH | false -> Z0 (** val setbit : z -> z -> z **) let setbit a n0 = coq_lor a (shiftl (Zpos XH) n0) (** val clearbit : z -> z -> z **) let clearbit a n0 = ldiff a (shiftl (Zpos XH) n0) (** val lnot : z -> z **) let lnot a = pred (opp a) (** val ones : z -> z **) let ones n0 = pred (shiftl (Zpos XH) n0) module T = struct end module ORev = struct type t = z end module MRev = struct (** val max : z -> z -> z **) let max x y = min y x end module MPRev = MaxLogicalProperties(ORev)(MRev) module P = struct (** val max_case_strong : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat n0 (max n0 m) __ (hl __) | _ -> compat m (max n0 m) __ (hr __)) (** val max_case : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 x1 = max_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val max_dec : z -> z -> bool **) let max_dec n0 m = max_case n0 m (fun x y _ h0 -> h0) true false (** val min_case_strong : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m compat hl hr = let c = compSpec2Type n0 m (compare n0 m) in (match c with | CompGtT -> compat m (min n0 m) __ (hr __) | _ -> compat n0 (min n0 m) __ (hl __)) (** val min_case : z -> z -> (z -> z -> __ -> 'a1 -> 'a1) -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 x1 = min_case_strong n0 m x (fun _ -> x0) (fun _ -> x1) (** val min_dec : z -> z -> bool **) let min_dec n0 m = min_case n0 m (fun x y _ h0 -> h0) true false end (** val max_case_strong : z -> z -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let max_case_strong n0 m x x0 = P.max_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val max_case : z -> z -> 'a1 -> 'a1 -> 'a1 **) let max_case n0 m x x0 = max_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val max_dec : z -> z -> bool **) let max_dec = P.max_dec (** val min_case_strong : z -> z -> (__ -> 'a1) -> (__ -> 'a1) -> 'a1 **) let min_case_strong n0 m x x0 = P.min_case_strong n0 m (fun x1 y _ x2 -> x2) x x0 (** val min_case : z -> z -> 'a1 -> 'a1 -> 'a1 **) let min_case n0 m x x0 = min_case_strong n0 m (fun _ -> x) (fun _ -> x0) (** val min_dec : z -> z -> bool **) let min_dec = P.min_dec end (** val zeq_bool : z -> z -> bool **) let zeq_bool x y = match Z.compare x y with | Eq -> true | _ -> false type 'c pol = | Pc of 'c | Pinj of positive * 'c pol | PX of 'c pol * positive * 'c pol (** val p0 : 'a1 -> 'a1 pol **) let p0 cO = Pc cO (** val p1 : 'a1 -> 'a1 pol **) let p1 cI = Pc cI (** val peq : ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> bool **) let rec peq ceqb p p' = match p with | Pc c -> (match p' with | Pc c' -> ceqb c c' | _ -> false) | Pinj (j, q0) -> (match p' with | Pinj (j', q') -> (match Coq_Pos.compare j j' with | Eq -> peq ceqb q0 q' | _ -> false) | _ -> false) | PX (p2, i, q0) -> (match p' with | PX (p'0, i', q') -> (match Coq_Pos.compare i i' with | Eq -> if peq ceqb p2 p'0 then peq ceqb q0 q' else false | _ -> false) | _ -> false) (** val mkPinj : positive -> 'a1 pol -> 'a1 pol **) let mkPinj j p = match p with | Pc c -> p | Pinj (j', q0) -> Pinj ((Coq_Pos.add j j'), q0) | PX (p2, p3, p4) -> Pinj (j, p) (** val mkPinj_pred : positive -> 'a1 pol -> 'a1 pol **) let mkPinj_pred j p = match j with | XI j0 -> Pinj ((XO j0), p) | XO j0 -> Pinj ((Coq_Pos.pred_double j0), p) | XH -> p (** val mkPX : 'a1 -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let mkPX cO ceqb p i q0 = match p with | Pc c -> if ceqb c cO then mkPinj XH q0 else PX (p, i, q0) | Pinj (p2, p3) -> PX (p, i, q0) | PX (p', i', q') -> if peq ceqb q' (p0 cO) then PX (p', (Coq_Pos.add i' i), q0) else PX (p, i, q0) (** val mkXi : 'a1 -> 'a1 -> positive -> 'a1 pol **) let mkXi cO cI i = PX ((p1 cI), i, (p0 cO)) (** val mkX : 'a1 -> 'a1 -> 'a1 pol **) let mkX cO cI = mkXi cO cI XH (** val popp : ('a1 -> 'a1) -> 'a1 pol -> 'a1 pol **) let rec popp copp = function | Pc c -> Pc (copp c) | Pinj (j, q0) -> Pinj (j, (popp copp q0)) | PX (p2, i, q0) -> PX ((popp copp p2), i, (popp copp q0)) (** val paddC : ('a1 -> 'a1 -> 'a1) -> 'a1 pol -> 'a1 -> 'a1 pol **) let rec paddC cadd p c = match p with | Pc c1 -> Pc (cadd c1 c) | Pinj (j, q0) -> Pinj (j, (paddC cadd q0 c)) | PX (p2, i, q0) -> PX (p2, i, (paddC cadd q0 c)) (** val psubC : ('a1 -> 'a1 -> 'a1) -> 'a1 pol -> 'a1 -> 'a1 pol **) let rec psubC csub p c = match p with | Pc c1 -> Pc (csub c1 c) | Pinj (j, q0) -> Pinj (j, (psubC csub q0 c)) | PX (p2, i, q0) -> PX (p2, i, (psubC csub q0 c)) (** val paddI : ('a1 -> 'a1 -> 'a1) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let rec paddI cadd pop q0 j = function | Pc c -> mkPinj j (paddC cadd q0 c) | Pinj (j', q') -> (match Z.pos_sub j' j with | Z0 -> mkPinj j (pop q' q0) | Zpos k -> mkPinj j (pop (Pinj (k, q')) q0) | Zneg k -> mkPinj j' (paddI cadd pop q0 k q')) | PX (p2, i, q') -> (match j with | XI j0 -> PX (p2, i, (paddI cadd pop q0 (XO j0) q')) | XO j0 -> PX (p2, i, (paddI cadd pop q0 (Coq_Pos.pred_double j0) q')) | XH -> PX (p2, i, (pop q' q0))) (** val psubI : ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let rec psubI cadd copp pop q0 j = function | Pc c -> mkPinj j (paddC cadd (popp copp q0) c) | Pinj (j', q') -> (match Z.pos_sub j' j with | Z0 -> mkPinj j (pop q' q0) | Zpos k -> mkPinj j (pop (Pinj (k, q')) q0) | Zneg k -> mkPinj j' (psubI cadd copp pop q0 k q')) | PX (p2, i, q') -> (match j with | XI j0 -> PX (p2, i, (psubI cadd copp pop q0 (XO j0) q')) | XO j0 -> PX (p2, i, (psubI cadd copp pop q0 (Coq_Pos.pred_double j0) q')) | XH -> PX (p2, i, (pop q' q0))) (** val paddX : 'a1 -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let rec paddX cO ceqb pop p' i' p = match p with | Pc c -> PX (p', i', p) | Pinj (j, q') -> (match j with | XI j0 -> PX (p', i', (Pinj ((XO j0), q'))) | XO j0 -> PX (p', i', (Pinj ((Coq_Pos.pred_double j0), q'))) | XH -> PX (p', i', q')) | PX (p2, i, q') -> (match Z.pos_sub i i' with | Z0 -> mkPX cO ceqb (pop p2 p') i q' | Zpos k -> mkPX cO ceqb (pop (PX (p2, k, (p0 cO))) p') i' q' | Zneg k -> mkPX cO ceqb (paddX cO ceqb pop p' k p2) i q') (** val psubX : 'a1 -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let rec psubX cO copp ceqb pop p' i' p = match p with | Pc c -> PX ((popp copp p'), i', p) | Pinj (j, q') -> (match j with | XI j0 -> PX ((popp copp p'), i', (Pinj ((XO j0), q'))) | XO j0 -> PX ((popp copp p'), i', (Pinj ((Coq_Pos.pred_double j0), q'))) | XH -> PX ((popp copp p'), i', q')) | PX (p2, i, q') -> (match Z.pos_sub i i' with | Z0 -> mkPX cO ceqb (pop p2 p') i q' | Zpos k -> mkPX cO ceqb (pop (PX (p2, k, (p0 cO))) p') i' q' | Zneg k -> mkPX cO ceqb (psubX cO copp ceqb pop p' k p2) i q') (** val padd : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol **) let rec padd cO cadd ceqb p = function | Pc c' -> paddC cadd p c' | Pinj (j', q') -> paddI cadd (padd cO cadd ceqb) q' j' p | PX (p'0, i', q') -> (match p with | Pc c -> PX (p'0, i', (paddC cadd q' c)) | Pinj (j, q0) -> (match j with | XI j0 -> PX (p'0, i', (padd cO cadd ceqb (Pinj ((XO j0), q0)) q')) | XO j0 -> PX (p'0, i', (padd cO cadd ceqb (Pinj ((Coq_Pos.pred_double j0), q0)) q')) | XH -> PX (p'0, i', (padd cO cadd ceqb q0 q'))) | PX (p2, i, q0) -> (match Z.pos_sub i i' with | Z0 -> mkPX cO ceqb (padd cO cadd ceqb p2 p'0) i (padd cO cadd ceqb q0 q') | Zpos k -> mkPX cO ceqb (padd cO cadd ceqb (PX (p2, k, (p0 cO))) p'0) i' (padd cO cadd ceqb q0 q') | Zneg k -> mkPX cO ceqb (paddX cO ceqb (padd cO cadd ceqb) p'0 k p2) i (padd cO cadd ceqb q0 q'))) (** val psub : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol **) let rec psub cO cadd csub copp ceqb p = function | Pc c' -> psubC csub p c' | Pinj (j', q') -> psubI cadd copp (psub cO cadd csub copp ceqb) q' j' p | PX (p'0, i', q') -> (match p with | Pc c -> PX ((popp copp p'0), i', (paddC cadd (popp copp q') c)) | Pinj (j, q0) -> (match j with | XI j0 -> PX ((popp copp p'0), i', (psub cO cadd csub copp ceqb (Pinj ((XO j0), q0)) q')) | XO j0 -> PX ((popp copp p'0), i', (psub cO cadd csub copp ceqb (Pinj ((Coq_Pos.pred_double j0), q0)) q')) | XH -> PX ((popp copp p'0), i', (psub cO cadd csub copp ceqb q0 q'))) | PX (p2, i, q0) -> (match Z.pos_sub i i' with | Z0 -> mkPX cO ceqb (psub cO cadd csub copp ceqb p2 p'0) i (psub cO cadd csub copp ceqb q0 q') | Zpos k -> mkPX cO ceqb (psub cO cadd csub copp ceqb (PX (p2, k, (p0 cO))) p'0) i' (psub cO cadd csub copp ceqb q0 q') | Zneg k -> mkPX cO ceqb (psubX cO copp ceqb (psub cO cadd csub copp ceqb) p'0 k p2) i (psub cO cadd csub copp ceqb q0 q'))) (** val pmulC_aux : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 -> 'a1 pol **) let rec pmulC_aux cO cmul ceqb p c = match p with | Pc c' -> Pc (cmul c' c) | Pinj (j, q0) -> mkPinj j (pmulC_aux cO cmul ceqb q0 c) | PX (p2, i, q0) -> mkPX cO ceqb (pmulC_aux cO cmul ceqb p2 c) i (pmulC_aux cO cmul ceqb q0 c) (** val pmulC : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 -> 'a1 pol **) let pmulC cO cI cmul ceqb p c = if ceqb c cO then p0 cO else if ceqb c cI then p else pmulC_aux cO cmul ceqb p c (** val pmulI : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol -> 'a1 pol) -> 'a1 pol -> positive -> 'a1 pol -> 'a1 pol **) let rec pmulI cO cI cmul ceqb pmul0 q0 j = function | Pc c -> mkPinj j (pmulC cO cI cmul ceqb q0 c) | Pinj (j', q') -> (match Z.pos_sub j' j with | Z0 -> mkPinj j (pmul0 q' q0) | Zpos k -> mkPinj j (pmul0 (Pinj (k, q')) q0) | Zneg k -> mkPinj j' (pmulI cO cI cmul ceqb pmul0 q0 k q')) | PX (p', i', q') -> (match j with | XI j' -> mkPX cO ceqb (pmulI cO cI cmul ceqb pmul0 q0 j p') i' (pmulI cO cI cmul ceqb pmul0 q0 (XO j') q') | XO j' -> mkPX cO ceqb (pmulI cO cI cmul ceqb pmul0 q0 j p') i' (pmulI cO cI cmul ceqb pmul0 q0 (Coq_Pos.pred_double j') q') | XH -> mkPX cO ceqb (pmulI cO cI cmul ceqb pmul0 q0 XH p') i' (pmul0 q' q0)) (** val pmul : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol **) let rec pmul cO cI cadd cmul ceqb p p'' = match p'' with | Pc c -> pmulC cO cI cmul ceqb p c | Pinj (j', q') -> pmulI cO cI cmul ceqb (pmul cO cI cadd cmul ceqb) q' j' p | PX (p', i', q') -> (match p with | Pc c -> pmulC cO cI cmul ceqb p'' c | Pinj (j, q0) -> let qQ' = match j with | XI j0 -> pmul cO cI cadd cmul ceqb (Pinj ((XO j0), q0)) q' | XO j0 -> pmul cO cI cadd cmul ceqb (Pinj ((Coq_Pos.pred_double j0), q0)) q' | XH -> pmul cO cI cadd cmul ceqb q0 q' in mkPX cO ceqb (pmul cO cI cadd cmul ceqb p p') i' qQ' | PX (p2, i, q0) -> let qQ' = pmul cO cI cadd cmul ceqb q0 q' in let pQ' = pmulI cO cI cmul ceqb (pmul cO cI cadd cmul ceqb) q' XH p2 in let qP' = pmul cO cI cadd cmul ceqb (mkPinj XH q0) p' in let pP' = pmul cO cI cadd cmul ceqb p2 p' in padd cO cadd ceqb (mkPX cO ceqb (padd cO cadd ceqb (mkPX cO ceqb pP' i (p0 cO)) qP') i' (p0 cO)) (mkPX cO ceqb pQ' i qQ')) (** val psquare : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol **) let rec psquare cO cI cadd cmul ceqb = function | Pc c -> Pc (cmul c c) | Pinj (j, q0) -> Pinj (j, (psquare cO cI cadd cmul ceqb q0)) | PX (p2, i, q0) -> let twoPQ = pmul cO cI cadd cmul ceqb p2 (mkPinj XH (pmulC cO cI cmul ceqb q0 (cadd cI cI))) in let q2 = psquare cO cI cadd cmul ceqb q0 in let p3 = psquare cO cI cadd cmul ceqb p2 in mkPX cO ceqb (padd cO cadd ceqb (mkPX cO ceqb p3 i (p0 cO)) twoPQ) i q2 type 'c pExpr = | PEc of 'c | PEX of positive | PEadd of 'c pExpr * 'c pExpr | PEsub of 'c pExpr * 'c pExpr | PEmul of 'c pExpr * 'c pExpr | PEopp of 'c pExpr | PEpow of 'c pExpr * n (** val mk_X : 'a1 -> 'a1 -> positive -> 'a1 pol **) let mk_X cO cI j = mkPinj_pred j (mkX cO cI) (** val ppow_pos : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol) -> 'a1 pol -> 'a1 pol -> positive -> 'a1 pol **) let rec ppow_pos cO cI cadd cmul ceqb subst_l res p = function | XI p3 -> subst_l (pmul cO cI cadd cmul ceqb (ppow_pos cO cI cadd cmul ceqb subst_l (ppow_pos cO cI cadd cmul ceqb subst_l res p p3) p p3) p) | XO p3 -> ppow_pos cO cI cadd cmul ceqb subst_l (ppow_pos cO cI cadd cmul ceqb subst_l res p p3) p p3 | XH -> subst_l (pmul cO cI cadd cmul ceqb res p) (** val ppow_N : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 pol -> 'a1 pol) -> 'a1 pol -> n -> 'a1 pol **) let ppow_N cO cI cadd cmul ceqb subst_l p = function | N0 -> p1 cI | Npos p2 -> ppow_pos cO cI cadd cmul ceqb subst_l (p1 cI) p p2 (** val norm_aux : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pExpr -> 'a1 pol **) let rec norm_aux cO cI cadd cmul csub copp ceqb = function | PEc c -> Pc c | PEX j -> mk_X cO cI j | PEadd (pe1, pe2) -> (match pe1 with | PEopp pe3 -> psub cO cadd csub copp ceqb (norm_aux cO cI cadd cmul csub copp ceqb pe2) (norm_aux cO cI cadd cmul csub copp ceqb pe3) | _ -> (match pe2 with | PEopp pe3 -> psub cO cadd csub copp ceqb (norm_aux cO cI cadd cmul csub copp ceqb pe1) (norm_aux cO cI cadd cmul csub copp ceqb pe3) | _ -> padd cO cadd ceqb (norm_aux cO cI cadd cmul csub copp ceqb pe1) (norm_aux cO cI cadd cmul csub copp ceqb pe2))) | PEsub (pe1, pe2) -> psub cO cadd csub copp ceqb (norm_aux cO cI cadd cmul csub copp ceqb pe1) (norm_aux cO cI cadd cmul csub copp ceqb pe2) | PEmul (pe1, pe2) -> pmul cO cI cadd cmul ceqb (norm_aux cO cI cadd cmul csub copp ceqb pe1) (norm_aux cO cI cadd cmul csub copp ceqb pe2) | PEopp pe1 -> popp copp (norm_aux cO cI cadd cmul csub copp ceqb pe1) | PEpow (pe1, n0) -> ppow_N cO cI cadd cmul ceqb (fun p -> p) (norm_aux cO cI cadd cmul csub copp ceqb pe1) n0 type 'a bFormula = | TT | FF | X | A of 'a | Cj of 'a bFormula * 'a bFormula | D of 'a bFormula * 'a bFormula | N of 'a bFormula | I of 'a bFormula * 'a bFormula (** val map_bformula : ('a1 -> 'a2) -> 'a1 bFormula -> 'a2 bFormula **) let rec map_bformula fct = function | TT -> TT | FF -> FF | X -> X | A a -> A (fct a) | Cj (f1, f2) -> Cj ((map_bformula fct f1), (map_bformula fct f2)) | D (f1, f2) -> D ((map_bformula fct f1), (map_bformula fct f2)) | N f0 -> N (map_bformula fct f0) | I (f1, f2) -> I ((map_bformula fct f1), (map_bformula fct f2)) type 'term' clause = 'term' list type 'term' cnf = 'term' clause list (** val tt : 'a1 cnf **) let tt = [] (** val ff : 'a1 cnf **) let ff = []::[] (** val add_term : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 -> 'a1 clause -> 'a1 clause option **) let rec add_term unsat deduce t1 = function | [] -> (match deduce t1 t1 with | Some u -> if unsat u then None else Some (t1::[]) | None -> Some (t1::[])) | t'::cl0 -> (match deduce t1 t' with | Some u -> if unsat u then None else (match add_term unsat deduce t1 cl0 with | Some cl' -> Some (t'::cl') | None -> None) | None -> (match add_term unsat deduce t1 cl0 with | Some cl' -> Some (t'::cl') | None -> None)) (** val or_clause : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 clause -> 'a1 clause -> 'a1 clause option **) let rec or_clause unsat deduce cl1 cl2 = match cl1 with | [] -> Some cl2 | t1::cl -> (match add_term unsat deduce t1 cl2 with | Some cl' -> or_clause unsat deduce cl cl' | None -> None) (** val or_clause_cnf : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 clause -> 'a1 cnf -> 'a1 cnf **) let or_clause_cnf unsat deduce t1 f = fold_right (fun e acc -> match or_clause unsat deduce t1 e with | Some cl -> cl::acc | None -> acc) [] f (** val or_cnf : ('a1 -> bool) -> ('a1 -> 'a1 -> 'a1 option) -> 'a1 cnf -> 'a1 cnf -> 'a1 cnf **) let rec or_cnf unsat deduce f f' = match f with | [] -> tt | e::rst -> app (or_cnf unsat deduce rst f') (or_clause_cnf unsat deduce e f') (** val and_cnf : 'a1 cnf -> 'a1 cnf -> 'a1 cnf **) let and_cnf f1 f2 = app f1 f2 (** val xcnf : ('a2 -> bool) -> ('a2 -> 'a2 -> 'a2 option) -> ('a1 -> 'a2 cnf) -> ('a1 -> 'a2 cnf) -> bool -> 'a1 bFormula -> 'a2 cnf **) let rec xcnf unsat deduce normalise0 negate0 pol0 = function | TT -> if pol0 then tt else ff | FF -> if pol0 then ff else tt | X -> ff | A x -> if pol0 then normalise0 x else negate0 x | Cj (e1, e2) -> if pol0 then and_cnf (xcnf unsat deduce normalise0 negate0 pol0 e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) else or_cnf unsat deduce (xcnf unsat deduce normalise0 negate0 pol0 e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) | D (e1, e2) -> if pol0 then or_cnf unsat deduce (xcnf unsat deduce normalise0 negate0 pol0 e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) else and_cnf (xcnf unsat deduce normalise0 negate0 pol0 e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) | N e -> xcnf unsat deduce normalise0 negate0 (negb pol0) e | I (e1, e2) -> if pol0 then or_cnf unsat deduce (xcnf unsat deduce normalise0 negate0 (negb pol0) e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) else and_cnf (xcnf unsat deduce normalise0 negate0 (negb pol0) e1) (xcnf unsat deduce normalise0 negate0 pol0 e2) (** val cnf_checker : ('a1 list -> 'a2 -> bool) -> 'a1 cnf -> 'a2 list -> bool **) let rec cnf_checker checker f l = match f with | [] -> true | e::f0 -> (match l with | [] -> false | c::l0 -> if checker e c then cnf_checker checker f0 l0 else false) (** val tauto_checker : ('a2 -> bool) -> ('a2 -> 'a2 -> 'a2 option) -> ('a1 -> 'a2 cnf) -> ('a1 -> 'a2 cnf) -> ('a2 list -> 'a3 -> bool) -> 'a1 bFormula -> 'a3 list -> bool **) let tauto_checker unsat deduce normalise0 negate0 checker f w = cnf_checker checker (xcnf unsat deduce normalise0 negate0 true f) w (** val cneqb : ('a1 -> 'a1 -> bool) -> 'a1 -> 'a1 -> bool **) let cneqb ceqb x y = negb (ceqb x y) (** val cltb : ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 -> 'a1 -> bool **) let cltb ceqb cleb x y = (&&) (cleb x y) (cneqb ceqb x y) type 'c polC = 'c pol type op1 = | Equal | NonEqual | Strict | NonStrict type 'c nFormula = 'c polC * op1 (** val opMult : op1 -> op1 -> op1 option **) let opMult o o' = match o with | Equal -> Some Equal | NonEqual -> (match o' with | Strict -> None | NonStrict -> None | x -> Some x) | Strict -> (match o' with | NonEqual -> None | _ -> Some o') | NonStrict -> (match o' with | NonEqual -> None | Strict -> Some NonStrict | x -> Some x) (** val opAdd : op1 -> op1 -> op1 option **) let opAdd o o' = match o with | Equal -> Some o' | NonEqual -> (match o' with | Equal -> Some NonEqual | _ -> None) | Strict -> (match o' with | NonEqual -> None | _ -> Some Strict) | NonStrict -> (match o' with | Equal -> Some NonStrict | NonEqual -> None | x -> Some x) type 'c psatz = | PsatzIn of nat | PsatzSquare of 'c polC | PsatzMulC of 'c polC * 'c psatz | PsatzMulE of 'c psatz * 'c psatz | PsatzAdd of 'c psatz * 'c psatz | PsatzC of 'c | PsatzZ (** val map_option : ('a1 -> 'a2 option) -> 'a1 option -> 'a2 option **) let map_option f = function | Some x -> f x | None -> None (** val map_option2 : ('a1 -> 'a2 -> 'a3 option) -> 'a1 option -> 'a2 option -> 'a3 option **) let map_option2 f o o' = match o with | Some x -> (match o' with | Some x' -> f x x' | None -> None) | None -> None (** val pexpr_times_nformula : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 polC -> 'a1 nFormula -> 'a1 nFormula option **) let pexpr_times_nformula cO cI cplus ctimes ceqb e = function | ef,o -> (match o with | Equal -> Some ((pmul cO cI cplus ctimes ceqb e ef),Equal) | _ -> None) (** val nformula_times_nformula : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> 'a1 nFormula -> 'a1 nFormula option **) let nformula_times_nformula cO cI cplus ctimes ceqb f1 f2 = let e1,o1 = f1 in let e2,o2 = f2 in map_option (fun x -> Some ((pmul cO cI cplus ctimes ceqb e1 e2),x)) (opMult o1 o2) (** val nformula_plus_nformula : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> 'a1 nFormula -> 'a1 nFormula option **) let nformula_plus_nformula cO cplus ceqb f1 f2 = let e1,o1 = f1 in let e2,o2 = f2 in map_option (fun x -> Some ((padd cO cplus ceqb e1 e2),x)) (opAdd o1 o2) (** val eval_Psatz : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula list -> 'a1 psatz -> 'a1 nFormula option **) let rec eval_Psatz cO cI cplus ctimes ceqb cleb l = function | PsatzIn n0 -> Some (nth n0 l ((Pc cO),Equal)) | PsatzSquare e0 -> Some ((psquare cO cI cplus ctimes ceqb e0),NonStrict) | PsatzMulC (re, e0) -> map_option (pexpr_times_nformula cO cI cplus ctimes ceqb re) (eval_Psatz cO cI cplus ctimes ceqb cleb l e0) | PsatzMulE (f1, f2) -> map_option2 (nformula_times_nformula cO cI cplus ctimes ceqb) (eval_Psatz cO cI cplus ctimes ceqb cleb l f1) (eval_Psatz cO cI cplus ctimes ceqb cleb l f2) | PsatzAdd (f1, f2) -> map_option2 (nformula_plus_nformula cO cplus ceqb) (eval_Psatz cO cI cplus ctimes ceqb cleb l f1) (eval_Psatz cO cI cplus ctimes ceqb cleb l f2) | PsatzC c -> if cltb ceqb cleb cO c then Some ((Pc c),Strict) else None | PsatzZ -> Some ((Pc cO),Equal) (** val check_inconsistent : 'a1 -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula -> bool **) let check_inconsistent cO ceqb cleb = function | e,op -> (match e with | Pc c -> (match op with | Equal -> cneqb ceqb c cO | NonEqual -> ceqb c cO | Strict -> cleb c cO | NonStrict -> cltb ceqb cleb c cO) | _ -> false) (** val check_normalised_formulas : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> ('a1 -> 'a1 -> bool) -> 'a1 nFormula list -> 'a1 psatz -> bool **) let check_normalised_formulas cO cI cplus ctimes ceqb cleb l cm = match eval_Psatz cO cI cplus ctimes ceqb cleb l cm with | Some f -> check_inconsistent cO ceqb cleb f | None -> false type op2 = | OpEq | OpNEq | OpLe | OpGe | OpLt | OpGt type 't formula = { flhs : 't pExpr; fop : op2; frhs : 't pExpr } (** val flhs : 'a1 formula -> 'a1 pExpr **) let flhs x = x.flhs (** val fop : 'a1 formula -> op2 **) let fop x = x.fop (** val frhs : 'a1 formula -> 'a1 pExpr **) let frhs x = x.frhs (** val norm : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pExpr -> 'a1 pol **) let norm cO cI cplus ctimes cminus copp ceqb = norm_aux cO cI cplus ctimes cminus copp ceqb (** val psub0 : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol **) let psub0 cO cplus cminus copp ceqb = psub cO cplus cminus copp ceqb (** val padd0 : 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 pol -> 'a1 pol -> 'a1 pol **) let padd0 cO cplus ceqb = padd cO cplus ceqb (** val xnormalise : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula list **) let xnormalise cO cI cplus ctimes cminus copp ceqb t1 = let { flhs = lhs; fop = o; frhs = rhs } = t1 in let lhs0 = norm cO cI cplus ctimes cminus copp ceqb lhs in let rhs0 = norm cO cI cplus ctimes cminus copp ceqb rhs in (match o with | OpEq -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Strict)::(((psub0 cO cplus cminus copp ceqb rhs0 lhs0),Strict)::[]) | OpNEq -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Equal)::[] | OpLe -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Strict)::[] | OpGe -> ((psub0 cO cplus cminus copp ceqb rhs0 lhs0),Strict)::[] | OpLt -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),NonStrict)::[] | OpGt -> ((psub0 cO cplus cminus copp ceqb rhs0 lhs0),NonStrict)::[]) (** val cnf_normalise : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula cnf **) let cnf_normalise cO cI cplus ctimes cminus copp ceqb t1 = map (fun x -> x::[]) (xnormalise cO cI cplus ctimes cminus copp ceqb t1) (** val xnegate : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula list **) let xnegate cO cI cplus ctimes cminus copp ceqb t1 = let { flhs = lhs; fop = o; frhs = rhs } = t1 in let lhs0 = norm cO cI cplus ctimes cminus copp ceqb lhs in let rhs0 = norm cO cI cplus ctimes cminus copp ceqb rhs in (match o with | OpEq -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Equal)::[] | OpNEq -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Strict)::(((psub0 cO cplus cminus copp ceqb rhs0 lhs0),Strict)::[]) | OpLe -> ((psub0 cO cplus cminus copp ceqb rhs0 lhs0),NonStrict)::[] | OpGe -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),NonStrict)::[] | OpLt -> ((psub0 cO cplus cminus copp ceqb rhs0 lhs0),Strict)::[] | OpGt -> ((psub0 cO cplus cminus copp ceqb lhs0 rhs0),Strict)::[]) (** val cnf_negate : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 formula -> 'a1 nFormula cnf **) let cnf_negate cO cI cplus ctimes cminus copp ceqb t1 = map (fun x -> x::[]) (xnegate cO cI cplus ctimes cminus copp ceqb t1) (** val xdenorm : positive -> 'a1 pol -> 'a1 pExpr **) let rec xdenorm jmp = function | Pc c -> PEc c | Pinj (j, p2) -> xdenorm (Coq_Pos.add j jmp) p2 | PX (p2, j, q0) -> PEadd ((PEmul ((xdenorm jmp p2), (PEpow ((PEX jmp), (Npos j))))), (xdenorm (Coq_Pos.succ jmp) q0)) (** val denorm : 'a1 pol -> 'a1 pExpr **) let denorm p = xdenorm XH p (** val map_PExpr : ('a2 -> 'a1) -> 'a2 pExpr -> 'a1 pExpr **) let rec map_PExpr c_of_S = function | PEc c -> PEc (c_of_S c) | PEX p -> PEX p | PEadd (e1, e2) -> PEadd ((map_PExpr c_of_S e1), (map_PExpr c_of_S e2)) | PEsub (e1, e2) -> PEsub ((map_PExpr c_of_S e1), (map_PExpr c_of_S e2)) | PEmul (e1, e2) -> PEmul ((map_PExpr c_of_S e1), (map_PExpr c_of_S e2)) | PEopp e0 -> PEopp (map_PExpr c_of_S e0) | PEpow (e0, n0) -> PEpow ((map_PExpr c_of_S e0), n0) (** val map_Formula : ('a2 -> 'a1) -> 'a2 formula -> 'a1 formula **) let map_Formula c_of_S f = let { flhs = l; fop = o; frhs = r } = f in { flhs = (map_PExpr c_of_S l); fop = o; frhs = (map_PExpr c_of_S r) } (** val simpl_cone : 'a1 -> 'a1 -> ('a1 -> 'a1 -> 'a1) -> ('a1 -> 'a1 -> bool) -> 'a1 psatz -> 'a1 psatz **) let simpl_cone cO cI ctimes ceqb e = match e with | PsatzSquare t1 -> (match t1 with | Pc c -> if ceqb cO c then PsatzZ else PsatzC (ctimes c c) | _ -> PsatzSquare t1) | PsatzMulE (t1, t2) -> (match t1 with | PsatzMulE (x, x0) -> (match x with | PsatzC p2 -> (match t2 with | PsatzC c -> PsatzMulE ((PsatzC (ctimes c p2)), x0) | PsatzZ -> PsatzZ | _ -> e) | _ -> (match x0 with | PsatzC p2 -> (match t2 with | PsatzC c -> PsatzMulE ((PsatzC (ctimes c p2)), x) | PsatzZ -> PsatzZ | _ -> e) | _ -> (match t2 with | PsatzC c -> if ceqb cI c then t1 else PsatzMulE (t1, t2) | PsatzZ -> PsatzZ | _ -> e))) | PsatzC c -> (match t2 with | PsatzMulE (x, x0) -> (match x with | PsatzC p2 -> PsatzMulE ((PsatzC (ctimes c p2)), x0) | _ -> (match x0 with | PsatzC p2 -> PsatzMulE ((PsatzC (ctimes c p2)), x) | _ -> if ceqb cI c then t2 else PsatzMulE (t1, t2))) | PsatzAdd (y, z0) -> PsatzAdd ((PsatzMulE ((PsatzC c), y)), (PsatzMulE ((PsatzC c), z0))) | PsatzC c0 -> PsatzC (ctimes c c0) | PsatzZ -> PsatzZ | _ -> if ceqb cI c then t2 else PsatzMulE (t1, t2)) | PsatzZ -> PsatzZ | _ -> (match t2 with | PsatzC c -> if ceqb cI c then t1 else PsatzMulE (t1, t2) | PsatzZ -> PsatzZ | _ -> e)) | PsatzAdd (t1, t2) -> (match t1 with | PsatzZ -> t2 | _ -> (match t2 with | PsatzZ -> t1 | _ -> PsatzAdd (t1, t2))) | _ -> e type q = { qnum : z; qden : positive } (** val qnum : q -> z **) let qnum x = x.qnum (** val qden : q -> positive **) let qden x = x.qden (** val qeq_bool : q -> q -> bool **) let qeq_bool x y = zeq_bool (Z.mul x.qnum (Zpos y.qden)) (Z.mul y.qnum (Zpos x.qden)) (** val qle_bool : q -> q -> bool **) let qle_bool x y = Z.leb (Z.mul x.qnum (Zpos y.qden)) (Z.mul y.qnum (Zpos x.qden)) (** val qplus : q -> q -> q **) let qplus x y = { qnum = (Z.add (Z.mul x.qnum (Zpos y.qden)) (Z.mul y.qnum (Zpos x.qden))); qden = (Coq_Pos.mul x.qden y.qden) } (** val qmult : q -> q -> q **) let qmult x y = { qnum = (Z.mul x.qnum y.qnum); qden = (Coq_Pos.mul x.qden y.qden) } (** val qopp : q -> q **) let qopp x = { qnum = (Z.opp x.qnum); qden = x.qden } (** val qminus : q -> q -> q **) let qminus x y = qplus x (qopp y) (** val qinv : q -> q **) let qinv x = match x.qnum with | Z0 -> { qnum = Z0; qden = XH } | Zpos p -> { qnum = (Zpos x.qden); qden = p } | Zneg p -> { qnum = (Zneg x.qden); qden = p } (** val qpower_positive : q -> positive -> q **) let qpower_positive q0 p = pow_pos qmult q0 p (** val qpower : q -> z -> q **) let qpower q0 = function | Z0 -> { qnum = (Zpos XH); qden = XH } | Zpos p -> qpower_positive q0 p | Zneg p -> qinv (qpower_positive q0 p) type 'a t0 = | Empty | Leaf of 'a | Node of 'a t0 * 'a * 'a t0 (** val find : 'a1 -> 'a1 t0 -> positive -> 'a1 **) let rec find default vm p = match vm with | Empty -> default | Leaf i -> i | Node (l, e, r) -> (match p with | XI p2 -> find default r p2 | XO p2 -> find default l p2 | XH -> e) type zWitness = z psatz (** val zWeakChecker : z nFormula list -> z psatz -> bool **) let zWeakChecker = check_normalised_formulas Z0 (Zpos XH) Z.add Z.mul zeq_bool Z.leb (** val psub1 : z pol -> z pol -> z pol **) let psub1 = psub0 Z0 Z.add Z.sub Z.opp zeq_bool (** val padd1 : z pol -> z pol -> z pol **) let padd1 = padd0 Z0 Z.add zeq_bool (** val norm0 : z pExpr -> z pol **) let norm0 = norm Z0 (Zpos XH) Z.add Z.mul Z.sub Z.opp zeq_bool (** val xnormalise0 : z formula -> z nFormula list **) let xnormalise0 t1 = let { flhs = lhs; fop = o; frhs = rhs } = t1 in let lhs0 = norm0 lhs in let rhs0 = norm0 rhs in (match o with | OpEq -> ((psub1 lhs0 (padd1 rhs0 (Pc (Zpos XH)))),NonStrict)::(((psub1 rhs0 (padd1 lhs0 (Pc (Zpos XH)))),NonStrict)::[]) | OpNEq -> ((psub1 lhs0 rhs0),Equal)::[] | OpLe -> ((psub1 lhs0 (padd1 rhs0 (Pc (Zpos XH)))),NonStrict)::[] | OpGe -> ((psub1 rhs0 (padd1 lhs0 (Pc (Zpos XH)))),NonStrict)::[] | OpLt -> ((psub1 lhs0 rhs0),NonStrict)::[] | OpGt -> ((psub1 rhs0 lhs0),NonStrict)::[]) (** val normalise : z formula -> z nFormula cnf **) let normalise t1 = map (fun x -> x::[]) (xnormalise0 t1) (** val xnegate0 : z formula -> z nFormula list **) let xnegate0 t1 = let { flhs = lhs; fop = o; frhs = rhs } = t1 in let lhs0 = norm0 lhs in let rhs0 = norm0 rhs in (match o with | OpEq -> ((psub1 lhs0 rhs0),Equal)::[] | OpNEq -> ((psub1 lhs0 (padd1 rhs0 (Pc (Zpos XH)))),NonStrict)::(((psub1 rhs0 (padd1 lhs0 (Pc (Zpos XH)))),NonStrict)::[]) | OpLe -> ((psub1 rhs0 lhs0),NonStrict)::[] | OpGe -> ((psub1 lhs0 rhs0),NonStrict)::[] | OpLt -> ((psub1 rhs0 (padd1 lhs0 (Pc (Zpos XH)))),NonStrict)::[] | OpGt -> ((psub1 lhs0 (padd1 rhs0 (Pc (Zpos XH)))),NonStrict)::[]) (** val negate : z formula -> z nFormula cnf **) let negate t1 = map (fun x -> x::[]) (xnegate0 t1) (** val zunsat : z nFormula -> bool **) let zunsat = check_inconsistent Z0 zeq_bool Z.leb (** val zdeduce : z nFormula -> z nFormula -> z nFormula option **) let zdeduce = nformula_plus_nformula Z0 Z.add zeq_bool (** val ceiling : z -> z -> z **) let ceiling a b = let q0,r = Z.div_eucl a b in (match r with | Z0 -> q0 | _ -> Z.add q0 (Zpos XH)) type zArithProof = | DoneProof | RatProof of zWitness * zArithProof | CutProof of zWitness * zArithProof | EnumProof of zWitness * zWitness * zArithProof list (** val zgcdM : z -> z -> z **) let zgcdM x y = Z.max (Z.gcd x y) (Zpos XH) (** val zgcd_pol : z polC -> z * z **) let rec zgcd_pol = function | Pc c -> Z0,c | Pinj (p2, p3) -> zgcd_pol p3 | PX (p2, p3, q0) -> let g1,c1 = zgcd_pol p2 in let g2,c2 = zgcd_pol q0 in (zgcdM (zgcdM g1 c1) g2),c2 (** val zdiv_pol : z polC -> z -> z polC **) let rec zdiv_pol p x = match p with | Pc c -> Pc (Z.div c x) | Pinj (j, p2) -> Pinj (j, (zdiv_pol p2 x)) | PX (p2, j, q0) -> PX ((zdiv_pol p2 x), j, (zdiv_pol q0 x)) (** val makeCuttingPlane : z polC -> z polC * z **) let makeCuttingPlane p = let g,c = zgcd_pol p in if Z.gtb g Z0 then (zdiv_pol (psubC Z.sub p c) g),(Z.opp (ceiling (Z.opp c) g)) else p,Z0 (** val genCuttingPlane : z nFormula -> ((z polC * z) * op1) option **) let genCuttingPlane = function | e,op -> (match op with | Equal -> let g,c = zgcd_pol e in if (&&) (Z.gtb g Z0) ((&&) (negb (zeq_bool c Z0)) (negb (zeq_bool (Z.gcd g c) g))) then None else Some ((makeCuttingPlane e),Equal) | NonEqual -> Some ((e,Z0),op) | Strict -> Some ((makeCuttingPlane (psubC Z.sub e (Zpos XH))),NonStrict) | NonStrict -> Some ((makeCuttingPlane e),NonStrict)) (** val nformula_of_cutting_plane : ((z polC * z) * op1) -> z nFormula **) let nformula_of_cutting_plane = function | e_z,o -> let e,z0 = e_z in (padd1 e (Pc z0)),o (** val is_pol_Z0 : z polC -> bool **) let is_pol_Z0 = function | Pc z0 -> (match z0 with | Z0 -> true | _ -> false) | _ -> false (** val eval_Psatz0 : z nFormula list -> zWitness -> z nFormula option **) let eval_Psatz0 = eval_Psatz Z0 (Zpos XH) Z.add Z.mul zeq_bool Z.leb (** val valid_cut_sign : op1 -> bool **) let valid_cut_sign = function | Equal -> true | NonStrict -> true | _ -> false (** val zChecker : z nFormula list -> zArithProof -> bool **) let rec zChecker l = function | DoneProof -> false | RatProof (w, pf0) -> (match eval_Psatz0 l w with | Some f -> if zunsat f then true else zChecker (f::l) pf0 | None -> false) | CutProof (w, pf0) -> (match eval_Psatz0 l w with | Some f -> (match genCuttingPlane f with | Some cp -> zChecker ((nformula_of_cutting_plane cp)::l) pf0 | None -> true) | None -> false) | EnumProof (w1, w2, pf0) -> (match eval_Psatz0 l w1 with | Some f1 -> (match eval_Psatz0 l w2 with | Some f2 -> (match genCuttingPlane f1 with | Some p -> let p2,op3 = p in let e1,z1 = p2 in (match genCuttingPlane f2 with | Some p3 -> let p4,op4 = p3 in let e2,z2 = p4 in if (&&) ((&&) (valid_cut_sign op3) (valid_cut_sign op4)) (is_pol_Z0 (padd1 e1 e2)) then let rec label pfs lb ub = match pfs with | [] -> Z.gtb lb ub | pf1::rsr -> (&&) (zChecker (((psub1 e1 (Pc lb)),Equal)::l) pf1) (label rsr (Z.add lb (Zpos XH)) ub) in label pf0 (Z.opp z1) z2 else false | None -> true) | None -> true) | None -> false) | None -> false) (** val zTautoChecker : z formula bFormula -> zArithProof list -> bool **) let zTautoChecker f w = tauto_checker zunsat zdeduce normalise negate zChecker f w type qWitness = q psatz (** val qWeakChecker : q nFormula list -> q psatz -> bool **) let qWeakChecker = check_normalised_formulas { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qeq_bool qle_bool (** val qnormalise : q formula -> q nFormula cnf **) let qnormalise = cnf_normalise { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qminus qopp qeq_bool (** val qnegate : q formula -> q nFormula cnf **) let qnegate = cnf_negate { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qminus qopp qeq_bool (** val qunsat : q nFormula -> bool **) let qunsat = check_inconsistent { qnum = Z0; qden = XH } qeq_bool qle_bool (** val qdeduce : q nFormula -> q nFormula -> q nFormula option **) let qdeduce = nformula_plus_nformula { qnum = Z0; qden = XH } qplus qeq_bool (** val qTautoChecker : q formula bFormula -> qWitness list -> bool **) let qTautoChecker f w = tauto_checker qunsat qdeduce qnormalise qnegate qWeakChecker f w type rcst = | C0 | C1 | CQ of q | CZ of z | CPlus of rcst * rcst | CMinus of rcst * rcst | CMult of rcst * rcst | CInv of rcst | COpp of rcst (** val q_of_Rcst : rcst -> q **) let rec q_of_Rcst = function | C0 -> { qnum = Z0; qden = XH } | C1 -> { qnum = (Zpos XH); qden = XH } | CQ q0 -> q0 | CZ z0 -> { qnum = z0; qden = XH } | CPlus (r1, r2) -> qplus (q_of_Rcst r1) (q_of_Rcst r2) | CMinus (r1, r2) -> qminus (q_of_Rcst r1) (q_of_Rcst r2) | CMult (r1, r2) -> qmult (q_of_Rcst r1) (q_of_Rcst r2) | CInv r0 -> qinv (q_of_Rcst r0) | COpp r0 -> qopp (q_of_Rcst r0) type rWitness = q psatz (** val rWeakChecker : q nFormula list -> q psatz -> bool **) let rWeakChecker = check_normalised_formulas { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qeq_bool qle_bool (** val rnormalise : q formula -> q nFormula cnf **) let rnormalise = cnf_normalise { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qminus qopp qeq_bool (** val rnegate : q formula -> q nFormula cnf **) let rnegate = cnf_negate { qnum = Z0; qden = XH } { qnum = (Zpos XH); qden = XH } qplus qmult qminus qopp qeq_bool (** val runsat : q nFormula -> bool **) let runsat = check_inconsistent { qnum = Z0; qden = XH } qeq_bool qle_bool (** val rdeduce : q nFormula -> q nFormula -> q nFormula option **) let rdeduce = nformula_plus_nformula { qnum = Z0; qden = XH } qplus qeq_bool (** val rTautoChecker : rcst formula bFormula -> rWitness list -> bool **) let rTautoChecker f w = tauto_checker runsat rdeduce rnormalise rnegate rWeakChecker (map_bformula (map_Formula q_of_Rcst) f) w coq-8.4pl2/plugins/micromega/sos.ml0000640000175000001440000022125311443525062016420 0ustar notinusers(* ========================================================================= *) (* - This code originates from John Harrison's HOL LIGHT 2.30 *) (* (see file LICENSE.sos for license, copyright and disclaimer) *) (* - Laurent Thry (thery@sophia.inria.fr) has isolated the HOL *) (* independent bits *) (* - Frdric Besson (fbesson@irisa.fr) is using it to feed micromega *) (* ========================================================================= *) (* ========================================================================= *) (* Nonlinear universal reals procedure using SOS decomposition. *) (* ========================================================================= *) open Num;; open List;; open Sos_types;; open Sos_lib;; (* prioritize_real();; *) let debugging = ref false;; exception Sanity;; exception Unsolvable;; (* ------------------------------------------------------------------------- *) (* Turn a rational into a decimal string with d sig digits. *) (* ------------------------------------------------------------------------- *) let decimalize = let rec normalize y = if abs_num y =/ Int 1 then normalize (y // Int 10) + 1 else 0 in fun d x -> if x =/ Int 0 then "0.0" else let y = abs_num x in let e = normalize y in let z = pow10(-e) */ y +/ Int 1 in let k = round_num(pow10 d */ z) in (if x a | h::t -> itern (k + 1) t f (f h k a);; let rec iter (m,n) f a = if n < m then a else iter (m+1,n) f (f m a);; (* ------------------------------------------------------------------------- *) (* The main types. *) (* ------------------------------------------------------------------------- *) type vector = int*(int,num)func;; type matrix = (int*int)*(int*int,num)func;; type monomial = (vname,int)func;; type poly = (monomial,num)func;; (* ------------------------------------------------------------------------- *) (* Assignment avoiding zeros. *) (* ------------------------------------------------------------------------- *) let (|-->) x y a = if y =/ Int 0 then a else (x |-> y) a;; (* ------------------------------------------------------------------------- *) (* This can be generic. *) (* ------------------------------------------------------------------------- *) let element (d,v) i = tryapplyd v i (Int 0);; let mapa f (d,v) = d,foldl (fun a i c -> (i |--> f(c)) a) undefined v;; let is_zero (d,v) = match v with Empty -> true | _ -> false;; (* ------------------------------------------------------------------------- *) (* Vectors. Conventionally indexed 1..n. *) (* ------------------------------------------------------------------------- *) let vector_0 n = (n,undefined:vector);; let dim (v:vector) = fst v;; let vector_const c n = if c =/ Int 0 then vector_0 n else (n,itlist (fun k -> k |-> c) (1--n) undefined :vector);; let vector_1 = vector_const (Int 1);; let vector_cmul c (v:vector) = let n = dim v in if c =/ Int 0 then vector_0 n else n,mapf (fun x -> c */ x) (snd v) let vector_neg (v:vector) = (fst v,mapf minus_num (snd v) :vector);; let vector_add (v1:vector) (v2:vector) = let m = dim v1 and n = dim v2 in if m <> n then failwith "vector_add: incompatible dimensions" else (n,combine (+/) (fun x -> x =/ Int 0) (snd v1) (snd v2) :vector);; let vector_sub v1 v2 = vector_add v1 (vector_neg v2);; let vector_dot (v1:vector) (v2:vector) = let m = dim v1 and n = dim v2 in if m <> n then failwith "vector_add: incompatible dimensions" else foldl (fun a i x -> x +/ a) (Int 0) (combine ( */ ) (fun x -> x =/ Int 0) (snd v1) (snd v2));; let vector_of_list l = let n = length l in (n,itlist2 (|->) (1--n) l undefined :vector);; (* ------------------------------------------------------------------------- *) (* Matrices; again rows and columns indexed from 1. *) (* ------------------------------------------------------------------------- *) let matrix_0 (m,n) = ((m,n),undefined:matrix);; let dimensions (m:matrix) = fst m;; let matrix_const c (m,n as mn) = if m <> n then failwith "matrix_const: needs to be square" else if c =/ Int 0 then matrix_0 mn else (mn,itlist (fun k -> (k,k) |-> c) (1--n) undefined :matrix);; let matrix_1 = matrix_const (Int 1);; let matrix_cmul c (m:matrix) = let (i,j) = dimensions m in if c =/ Int 0 then matrix_0 (i,j) else (i,j),mapf (fun x -> c */ x) (snd m);; let matrix_neg (m:matrix) = (dimensions m,mapf minus_num (snd m) :matrix);; let matrix_add (m1:matrix) (m2:matrix) = let d1 = dimensions m1 and d2 = dimensions m2 in if d1 <> d2 then failwith "matrix_add: incompatible dimensions" else (d1,combine (+/) (fun x -> x =/ Int 0) (snd m1) (snd m2) :matrix);; let matrix_sub m1 m2 = matrix_add m1 (matrix_neg m2);; let row k (m:matrix) = let i,j = dimensions m in (j, foldl (fun a (i,j) c -> if i = k then (j |-> c) a else a) undefined (snd m) : vector);; let column k (m:matrix) = let i,j = dimensions m in (i, foldl (fun a (i,j) c -> if j = k then (i |-> c) a else a) undefined (snd m) : vector);; let transp (m:matrix) = let i,j = dimensions m in ((j,i),foldl (fun a (i,j) c -> ((j,i) |-> c) a) undefined (snd m) :matrix);; let diagonal (v:vector) = let n = dim v in ((n,n),foldl (fun a i c -> ((i,i) |-> c) a) undefined (snd v) : matrix);; let matrix_of_list l = let m = length l in if m = 0 then matrix_0 (0,0) else let n = length (hd l) in (m,n),itern 1 l (fun v i -> itern 1 v (fun c j -> (i,j) |-> c)) undefined;; (* ------------------------------------------------------------------------- *) (* Monomials. *) (* ------------------------------------------------------------------------- *) let monomial_eval assig (m:monomial) = foldl (fun a x k -> a */ power_num (apply assig x) (Int k)) (Int 1) m;; let monomial_1 = (undefined:monomial);; let monomial_var x = (x |=> 1 :monomial);; let (monomial_mul:monomial->monomial->monomial) = combine (+) (fun x -> false);; let monomial_pow (m:monomial) k = if k = 0 then monomial_1 else mapf (fun x -> k * x) m;; let monomial_divides (m1:monomial) (m2:monomial) = foldl (fun a x k -> tryapplyd m2 x 0 >= k & a) true m1;; let monomial_div (m1:monomial) (m2:monomial) = let m = combine (+) (fun x -> x = 0) m1 (mapf (fun x -> -x) m2) in if foldl (fun a x k -> k >= 0 & a) true m then m else failwith "monomial_div: non-divisible";; let monomial_degree x (m:monomial) = tryapplyd m x 0;; let monomial_lcm (m1:monomial) (m2:monomial) = (itlist (fun x -> x |-> max (monomial_degree x m1) (monomial_degree x m2)) (union (dom m1) (dom m2)) undefined :monomial);; let monomial_multidegree (m:monomial) = foldl (fun a x k -> k + a) 0 m;; let monomial_variables m = dom m;; (* ------------------------------------------------------------------------- *) (* Polynomials. *) (* ------------------------------------------------------------------------- *) let eval assig (p:poly) = foldl (fun a m c -> a +/ c */ monomial_eval assig m) (Int 0) p;; let poly_0 = (undefined:poly);; let poly_isconst (p:poly) = foldl (fun a m c -> m = monomial_1 & a) true p;; let poly_var x = ((monomial_var x) |=> Int 1 :poly);; let poly_const c = if c =/ Int 0 then poly_0 else (monomial_1 |=> c);; let poly_cmul c (p:poly) = if c =/ Int 0 then poly_0 else mapf (fun x -> c */ x) p;; let poly_neg (p:poly) = (mapf minus_num p :poly);; let poly_add (p1:poly) (p2:poly) = (combine (+/) (fun x -> x =/ Int 0) p1 p2 :poly);; let poly_sub p1 p2 = poly_add p1 (poly_neg p2);; let poly_cmmul (c,m) (p:poly) = if c =/ Int 0 then poly_0 else if m = monomial_1 then mapf (fun d -> c */ d) p else foldl (fun a m' d -> (monomial_mul m m' |-> c */ d) a) poly_0 p;; let poly_mul (p1:poly) (p2:poly) = foldl (fun a m c -> poly_add (poly_cmmul (c,m) p2) a) poly_0 p1;; let poly_div (p1:poly) (p2:poly) = if not(poly_isconst p2) then failwith "poly_div: non-constant" else let c = eval undefined p2 in if c =/ Int 0 then failwith "poly_div: division by zero" else poly_cmul (Int 1 // c) p1;; let poly_square p = poly_mul p p;; let rec poly_pow p k = if k = 0 then poly_const (Int 1) else if k = 1 then p else let q = poly_square(poly_pow p (k / 2)) in if k mod 2 = 1 then poly_mul p q else q;; let poly_exp p1 p2 = if not(poly_isconst p2) then failwith "poly_exp: not a constant" else poly_pow p1 (Num.int_of_num (eval undefined p2));; let degree x (p:poly) = foldl (fun a m c -> max (monomial_degree x m) a) 0 p;; let multidegree (p:poly) = foldl (fun a m c -> max (monomial_multidegree m) a) 0 p;; let poly_variables (p:poly) = foldr (fun m c -> union (monomial_variables m)) p [];; (* ------------------------------------------------------------------------- *) (* Order monomials for human presentation. *) (* ------------------------------------------------------------------------- *) let humanorder_varpow (x1,k1) (x2,k2) = x1 < x2 or x1 = x2 & k1 > k2;; let humanorder_monomial = let rec ord l1 l2 = match (l1,l2) with _,[] -> true | [],_ -> false | h1::t1,h2::t2 -> humanorder_varpow h1 h2 or h1 = h2 & ord t1 t2 in fun m1 m2 -> m1 = m2 or ord (sort humanorder_varpow (graph m1)) (sort humanorder_varpow (graph m2));; (* ------------------------------------------------------------------------- *) (* Conversions to strings. *) (* ------------------------------------------------------------------------- *) let string_of_vector min_size max_size (v:vector) = let n_raw = dim v in if n_raw = 0 then "[]" else let n = max min_size (min n_raw max_size) in let xs = map ((o) string_of_num (element v)) (1--n) in "[" ^ end_itlist (fun s t -> s ^ ", " ^ t) xs ^ (if n_raw > max_size then ", ...]" else "]");; let string_of_matrix max_size (m:matrix) = let i_raw,j_raw = dimensions m in let i = min max_size i_raw and j = min max_size j_raw in let rstr = map (fun k -> string_of_vector j j (row k m)) (1--i) in "["^end_itlist(fun s t -> s^";\n "^t) rstr ^ (if j > max_size then "\n ...]" else "]");; let string_of_vname (v:vname): string = (v: string);; let rec string_of_term t = match t with Opp t1 -> "(- " ^ string_of_term t1 ^ ")" | Add (t1, t2) -> "(" ^ (string_of_term t1) ^ " + " ^ (string_of_term t2) ^ ")" | Sub (t1, t2) -> "(" ^ (string_of_term t1) ^ " - " ^ (string_of_term t2) ^ ")" | Mul (t1, t2) -> "(" ^ (string_of_term t1) ^ " * " ^ (string_of_term t2) ^ ")" | Inv t1 -> "(/ " ^ string_of_term t1 ^ ")" | Div (t1, t2) -> "(" ^ (string_of_term t1) ^ " / " ^ (string_of_term t2) ^ ")" | Pow (t1, n1) -> "(" ^ (string_of_term t1) ^ " ^ " ^ (string_of_int n1) ^ ")" | Zero -> "0" | Var v -> "x" ^ (string_of_vname v) | Const x -> string_of_num x;; let string_of_varpow x k = if k = 1 then string_of_vname x else string_of_vname x^"^"^string_of_int k;; let string_of_monomial m = if m = monomial_1 then "1" else let vps = List.fold_right (fun (x,k) a -> string_of_varpow x k :: a) (sort humanorder_varpow (graph m)) [] in end_itlist (fun s t -> s^"*"^t) vps;; let string_of_cmonomial (c,m) = if m = monomial_1 then string_of_num c else if c =/ Int 1 then string_of_monomial m else string_of_num c ^ "*" ^ string_of_monomial m;; let string_of_poly (p:poly) = if p = poly_0 then "<<0>>" else let cms = sort (fun (m1,_) (m2,_) -> humanorder_monomial m1 m2) (graph p) in let s = List.fold_left (fun a (m,c) -> if c >";; (* ------------------------------------------------------------------------- *) (* Printers. *) (* ------------------------------------------------------------------------- *) let print_vector v = Format.print_string(string_of_vector 0 20 v);; let print_matrix m = Format.print_string(string_of_matrix 20 m);; let print_monomial m = Format.print_string(string_of_monomial m);; let print_poly m = Format.print_string(string_of_poly m);; (* #install_printer print_vector;; #install_printer print_matrix;; #install_printer print_monomial;; #install_printer print_poly;; *) (* ------------------------------------------------------------------------- *) (* Conversion from term. *) (* ------------------------------------------------------------------------- *) let rec poly_of_term t = match t with Zero -> poly_0 | Const n -> poly_const n | Var x -> poly_var x | Opp t1 -> poly_neg (poly_of_term t1) | Inv t1 -> let p = poly_of_term t1 in if poly_isconst p then poly_const(Int 1 // eval undefined p) else failwith "poly_of_term: inverse of non-constant polyomial" | Add (l, r) -> poly_add (poly_of_term l) (poly_of_term r) | Sub (l, r) -> poly_sub (poly_of_term l) (poly_of_term r) | Mul (l, r) -> poly_mul (poly_of_term l) (poly_of_term r) | Div (l, r) -> let p = poly_of_term l and q = poly_of_term r in if poly_isconst q then poly_cmul (Int 1 // eval undefined q) p else failwith "poly_of_term: division by non-constant polynomial" | Pow (t, n) -> poly_pow (poly_of_term t) n;; (* ------------------------------------------------------------------------- *) (* String of vector (just a list of space-separated numbers). *) (* ------------------------------------------------------------------------- *) let sdpa_of_vector (v:vector) = let n = dim v in let strs = map (o (decimalize 20) (element v)) (1--n) in end_itlist (fun x y -> x ^ " " ^ y) strs ^ "\n";; (* ------------------------------------------------------------------------- *) (* String for block diagonal matrix numbered k. *) (* ------------------------------------------------------------------------- *) let sdpa_of_blockdiagonal k m = let pfx = string_of_int k ^" " in let ents = foldl (fun a (b,i,j) c -> if i > j then a else ((b,i,j),c)::a) [] m in let entss = sort (increasing fst) ents in itlist (fun ((b,i,j),c) a -> pfx ^ string_of_int b ^ " " ^ string_of_int i ^ " " ^ string_of_int j ^ " " ^ decimalize 20 c ^ "\n" ^ a) entss "";; (* ------------------------------------------------------------------------- *) (* String for a matrix numbered k, in SDPA sparse format. *) (* ------------------------------------------------------------------------- *) let sdpa_of_matrix k (m:matrix) = let pfx = string_of_int k ^ " 1 " in let ms = foldr (fun (i,j) c a -> if i > j then a else ((i,j),c)::a) (snd m) [] in let mss = sort (increasing fst) ms in itlist (fun ((i,j),c) a -> pfx ^ string_of_int i ^ " " ^ string_of_int j ^ " " ^ decimalize 20 c ^ "\n" ^ a) mss "";; (* ------------------------------------------------------------------------- *) (* String in SDPA sparse format for standard SDP problem: *) (* *) (* X = v_1 * [M_1] + ... + v_m * [M_m] - [M_0] must be PSD *) (* Minimize obj_1 * v_1 + ... obj_m * v_m *) (* ------------------------------------------------------------------------- *) let sdpa_of_problem comment obj mats = let m = length mats - 1 and n,_ = dimensions (hd mats) in "\"" ^ comment ^ "\"\n" ^ string_of_int m ^ "\n" ^ "1\n" ^ string_of_int n ^ "\n" ^ sdpa_of_vector obj ^ itlist2 (fun k m a -> sdpa_of_matrix (k - 1) m ^ a) (1--length mats) mats "";; (* ------------------------------------------------------------------------- *) (* More parser basics. *) (* ------------------------------------------------------------------------- *) let word s = end_itlist (fun p1 p2 -> (p1 ++ p2) >> (fun (s,t) -> s^t)) (map a (explode s));; let token s = many (some isspace) ++ word s ++ many (some isspace) >> (fun ((_,t),_) -> t);; let decimal = let numeral = some isnum in let decimalint = atleast 1 numeral >> ((o) Num.num_of_string implode) in let decimalfrac = atleast 1 numeral >> (fun s -> Num.num_of_string(implode s) // pow10 (length s)) in let decimalsig = decimalint ++ possibly (a "." ++ decimalfrac >> snd) >> (function (h,[x]) -> h +/ x | (h,_) -> h) in let signed prs = a "-" ++ prs >> ((o) minus_num snd) || a "+" ++ prs >> snd || prs in let exponent = (a "e" || a "E") ++ signed decimalint >> snd in signed decimalsig ++ possibly exponent >> (function (h,[x]) -> h */ power_num (Int 10) x | (h,_) -> h);; let mkparser p s = let x,rst = p(explode s) in if rst = [] then x else failwith "mkparser: unparsed input";; let parse_decimal = mkparser decimal;; (* ------------------------------------------------------------------------- *) (* Parse back a vector. *) (* ------------------------------------------------------------------------- *) let parse_sdpaoutput,parse_csdpoutput = let vector = token "{" ++ listof decimal (token ",") "decimal" ++ token "}" >> (fun ((_,v),_) -> vector_of_list v) in let rec skipupto dscr prs inp = (dscr ++ prs >> snd || some (fun c -> true) ++ skipupto dscr prs >> snd) inp in let ignore inp = (),[] in let sdpaoutput = skipupto (word "xVec" ++ token "=") (vector ++ ignore >> fst) in let csdpoutput = (decimal ++ many(a " " ++ decimal >> snd) >> (fun (h,t) -> h::t)) ++ (a " " ++ a "\n" ++ ignore) >> ((o) vector_of_list fst) in mkparser sdpaoutput,mkparser csdpoutput;; (* ------------------------------------------------------------------------- *) (* Also parse the SDPA output to test success (CSDP yields a return code). *) (* ------------------------------------------------------------------------- *) let sdpa_run_succeeded = let rec skipupto dscr prs inp = (dscr ++ prs >> snd || some (fun c -> true) ++ skipupto dscr prs >> snd) inp in let prs = skipupto (word "phase.value" ++ token "=") (possibly (a "p") ++ possibly (a "d") ++ (word "OPT" || word "FEAS")) in fun s -> try ignore (prs (explode s)); true with Noparse -> false;; (* ------------------------------------------------------------------------- *) (* The default parameters. Unfortunately this goes to a fixed file. *) (* ------------------------------------------------------------------------- *) let sdpa_default_parameters = "100 unsigned int maxIteration;\ \n1.0E-7 double 0.0 < epsilonStar;\ \n1.0E2 double 0.0 < lambdaStar;\ \n2.0 double 1.0 < omegaStar;\ \n-1.0E5 double lowerBound;\ \n1.0E5 double upperBound;\ \n0.1 double 0.0 <= betaStar < 1.0;\ \n0.2 double 0.0 <= betaBar < 1.0, betaStar <= betaBar;\ \n0.9 double 0.0 < gammaStar < 1.0;\ \n1.0E-7 double 0.0 < epsilonDash;\ \n";; (* ------------------------------------------------------------------------- *) (* These were suggested by Makoto Yamashita for problems where we are *) (* right at the edge of the semidefinite cone, as sometimes happens. *) (* ------------------------------------------------------------------------- *) let sdpa_alt_parameters = "1000 unsigned int maxIteration;\ \n1.0E-7 double 0.0 < epsilonStar;\ \n1.0E4 double 0.0 < lambdaStar;\ \n2.0 double 1.0 < omegaStar;\ \n-1.0E5 double lowerBound;\ \n1.0E5 double upperBound;\ \n0.1 double 0.0 <= betaStar < 1.0;\ \n0.2 double 0.0 <= betaBar < 1.0, betaStar <= betaBar;\ \n0.9 double 0.0 < gammaStar < 1.0;\ \n1.0E-7 double 0.0 < epsilonDash;\ \n";; let sdpa_params = sdpa_alt_parameters;; (* ------------------------------------------------------------------------- *) (* CSDP parameters; so far I'm sticking with the defaults. *) (* ------------------------------------------------------------------------- *) let csdp_default_parameters = "axtol=1.0e-8\ \natytol=1.0e-8\ \nobjtol=1.0e-8\ \npinftol=1.0e8\ \ndinftol=1.0e8\ \nmaxiter=100\ \nminstepfrac=0.9\ \nmaxstepfrac=0.97\ \nminstepp=1.0e-8\ \nminstepd=1.0e-8\ \nusexzgap=1\ \ntweakgap=0\ \naffine=0\ \nprintlevel=1\ \n";; let csdp_params = csdp_default_parameters;; (* ------------------------------------------------------------------------- *) (* Now call CSDP on a problem and parse back the output. *) (* ------------------------------------------------------------------------- *) let run_csdp dbg obj mats = let input_file = Filename.temp_file "sos" ".dat-s" in let output_file = String.sub input_file 0 (String.length input_file - 6) ^ ".out" and params_file = Filename.concat (!temp_path) "param.csdp" in file_of_string input_file (sdpa_of_problem "" obj mats); file_of_string params_file csdp_params; let rv = Sys.command("cd "^(!temp_path)^"; csdp "^input_file ^ " " ^ output_file ^ (if dbg then "" else "> /dev/null")) in let op = string_of_file output_file in let res = parse_csdpoutput op in ((if dbg then () else (Sys.remove input_file; Sys.remove output_file)); rv,res);; let csdp obj mats = let rv,res = run_csdp (!debugging) obj mats in (if rv = 1 or rv = 2 then failwith "csdp: Problem is infeasible" else if rv = 3 then () (* Format.print_string "csdp warning: Reduced accuracy"; Format.print_newline() *) else if rv <> 0 then failwith("csdp: error "^string_of_int rv) else ()); res;; (* ------------------------------------------------------------------------- *) (* Try some apparently sensible scaling first. Note that this is purely to *) (* get a cleaner translation to floating-point, and doesn't affect any of *) (* the results, in principle. In practice it seems a lot better when there *) (* are extreme numbers in the original problem. *) (* ------------------------------------------------------------------------- *) let scale_then = let common_denominator amat acc = foldl (fun a m c -> lcm_num (denominator c) a) acc amat and maximal_element amat acc = foldl (fun maxa m c -> max_num maxa (abs_num c)) acc amat in fun solver obj mats -> let cd1 = itlist common_denominator mats (Int 1) and cd2 = common_denominator (snd obj) (Int 1) in let mats' = map (mapf (fun x -> cd1 */ x)) mats and obj' = vector_cmul cd2 obj in let max1 = itlist maximal_element mats' (Int 0) and max2 = maximal_element (snd obj') (Int 0) in let scal1 = pow2 (20-int_of_float(log(float_of_num max1) /. log 2.0)) and scal2 = pow2 (20-int_of_float(log(float_of_num max2) /. log 2.0)) in let mats'' = map (mapf (fun x -> x */ scal1)) mats' and obj'' = vector_cmul scal2 obj' in solver obj'' mats'';; (* ------------------------------------------------------------------------- *) (* Round a vector to "nice" rationals. *) (* ------------------------------------------------------------------------- *) let nice_rational n x = round_num (n */ x) // n;; let nice_vector n = mapa (nice_rational n);; (* ------------------------------------------------------------------------- *) (* Reduce linear program to SDP (diagonal matrices) and test with CSDP. This *) (* one tests A [-1;x1;..;xn] >= 0 (i.e. left column is negated constants). *) (* ------------------------------------------------------------------------- *) let linear_program_basic a = let m,n = dimensions a in let mats = map (fun j -> diagonal (column j a)) (1--n) and obj = vector_const (Int 1) m in let rv,res = run_csdp false obj mats in if rv = 1 or rv = 2 then false else if rv = 0 then true else failwith "linear_program: An error occurred in the SDP solver";; (* ------------------------------------------------------------------------- *) (* Alternative interface testing A x >= b for matrix A, vector b. *) (* ------------------------------------------------------------------------- *) let linear_program a b = let m,n = dimensions a in if dim b <> m then failwith "linear_program: incompatible dimensions" else let mats = diagonal b :: map (fun j -> diagonal (column j a)) (1--n) and obj = vector_const (Int 1) m in let rv,res = run_csdp false obj mats in if rv = 1 or rv = 2 then false else if rv = 0 then true else failwith "linear_program: An error occurred in the SDP solver";; (* ------------------------------------------------------------------------- *) (* Test whether a point is in the convex hull of others. Rather than use *) (* computational geometry, express as linear inequalities and call CSDP. *) (* This is a bit lazy of me, but it's easy and not such a bottleneck so far. *) (* ------------------------------------------------------------------------- *) let in_convex_hull pts pt = let pts1 = (1::pt) :: map (fun x -> 1::x) pts in let pts2 = map (fun p -> map (fun x -> -x) p @ p) pts1 in let n = length pts + 1 and v = 2 * (length pt + 1) in let m = v + n - 1 in let mat = (m,n), itern 1 pts2 (fun pts j -> itern 1 pts (fun x i -> (i,j) |-> Int x)) (iter (1,n) (fun i -> (v + i,i+1) |-> Int 1) undefined) in linear_program_basic mat;; (* ------------------------------------------------------------------------- *) (* Filter down a set of points to a minimal set with the same convex hull. *) (* ------------------------------------------------------------------------- *) let minimal_convex_hull = let augment1 = function | [] -> assert false | (m::ms) -> if in_convex_hull ms m then ms else ms@[m] in let augment m ms = funpow 3 augment1 (m::ms) in fun mons -> let mons' = itlist augment (tl mons) [hd mons] in funpow (length mons') augment1 mons';; (* ------------------------------------------------------------------------- *) (* Stuff for "equations" (generic A->num functions). *) (* ------------------------------------------------------------------------- *) let equation_cmul c eq = if c =/ Int 0 then Empty else mapf (fun d -> c */ d) eq;; let equation_add eq1 eq2 = combine (+/) (fun x -> x =/ Int 0) eq1 eq2;; let equation_eval assig eq = let value v = apply assig v in foldl (fun a v c -> a +/ value(v) */ c) (Int 0) eq;; (* ------------------------------------------------------------------------- *) (* Eliminate among linear equations: return unconstrained variables and *) (* assignments for the others in terms of them. We give one pseudo-variable *) (* "one" that's used for a constant term. *) (* ------------------------------------------------------------------------- *) let failstore = ref [];; let eliminate_equations = let rec extract_first p l = match l with [] -> failwith "extract_first" | h::t -> if p(h) then h,t else let k,s = extract_first p t in k,h::s in let rec eliminate vars dun eqs = match vars with [] -> if forall is_undefined eqs then dun else (failstore := [vars,dun,eqs]; raise Unsolvable) | v::vs -> try let eq,oeqs = extract_first (fun e -> defined e v) eqs in let a = apply eq v in let eq' = equation_cmul (Int(-1) // a) (undefine v eq) in let elim e = let b = tryapplyd e v (Int 0) in if b =/ Int 0 then e else equation_add e (equation_cmul (minus_num b // a) eq) in eliminate vs ((v |-> eq') (mapf elim dun)) (map elim oeqs) with Failure _ -> eliminate vs dun eqs in fun one vars eqs -> let assig = eliminate vars undefined eqs in let vs = foldl (fun a x f -> subtract (dom f) [one] @ a) [] assig in setify vs,assig;; (* ------------------------------------------------------------------------- *) (* Eliminate all variables, in an essentially arbitrary order. *) (* ------------------------------------------------------------------------- *) let eliminate_all_equations one = let choose_variable eq = let (v,_) = choose eq in if v = one then let eq' = undefine v eq in if is_undefined eq' then failwith "choose_variable" else let (w,_) = choose eq' in w else v in let rec eliminate dun eqs = match eqs with [] -> dun | eq::oeqs -> if is_undefined eq then eliminate dun oeqs else let v = choose_variable eq in let a = apply eq v in let eq' = equation_cmul (Int(-1) // a) (undefine v eq) in let elim e = let b = tryapplyd e v (Int 0) in if b =/ Int 0 then e else equation_add e (equation_cmul (minus_num b // a) eq) in eliminate ((v |-> eq') (mapf elim dun)) (map elim oeqs) in fun eqs -> let assig = eliminate undefined eqs in let vs = foldl (fun a x f -> subtract (dom f) [one] @ a) [] assig in setify vs,assig;; (* ------------------------------------------------------------------------- *) (* Solve equations by assigning arbitrary numbers. *) (* ------------------------------------------------------------------------- *) let solve_equations one eqs = let vars,assigs = eliminate_all_equations one eqs in let vfn = itlist (fun v -> (v |-> Int 0)) vars (one |=> Int(-1)) in let ass = combine (+/) (fun c -> false) (mapf (equation_eval vfn) assigs) vfn in if forall (fun e -> equation_eval ass e =/ Int 0) eqs then undefine one ass else raise Sanity;; (* ------------------------------------------------------------------------- *) (* Hence produce the "relevant" monomials: those whose squares lie in the *) (* Newton polytope of the monomials in the input. (This is enough according *) (* to Reznik: "Extremal PSD forms with few terms", Duke Math. Journal, *) (* vol 45, pp. 363--374, 1978. *) (* *) (* These are ordered in sort of decreasing degree. In particular the *) (* constant monomial is last; this gives an order in diagonalization of the *) (* quadratic form that will tend to display constants. *) (* ------------------------------------------------------------------------- *) let newton_polytope pol = let vars = poly_variables pol in let mons = map (fun m -> map (fun x -> monomial_degree x m) vars) (dom pol) and ds = map (fun x -> (degree x pol + 1) / 2) vars in let all = itlist (fun n -> allpairs (fun h t -> h::t) (0--n)) ds [[]] and mons' = minimal_convex_hull mons in let all' = filter (fun m -> in_convex_hull mons' (map (fun x -> 2 * x) m)) all in map (fun m -> itlist2 (fun v i a -> if i = 0 then a else (v |-> i) a) vars m monomial_1) (rev all');; (* ------------------------------------------------------------------------- *) (* Diagonalize (Cholesky/LDU) the matrix corresponding to a quadratic form. *) (* ------------------------------------------------------------------------- *) let diag m = let nn = dimensions m in let n = fst nn in if snd nn <> n then failwith "diagonalize: non-square matrix" else let rec diagonalize i m = if is_zero m then [] else let a11 = element m (i,i) in if a11 a1k // a11) v in let m' = (n,n), iter (i+1,n) (fun j -> iter (i+1,n) (fun k -> ((j,k) |--> (element m (j,k) -/ element v j */ element v' k)))) undefined in (a11,v')::diagonalize (i + 1) m' in diagonalize 1 m;; (* ------------------------------------------------------------------------- *) (* Adjust a diagonalization to collect rationals at the start. *) (* ------------------------------------------------------------------------- *) let deration d = if d = [] then Int 0,d else let adj(c,l) = let a = foldl (fun a i c -> lcm_num a (denominator c)) (Int 1) (snd l) // foldl (fun a i c -> gcd_num a (numerator c)) (Int 0) (snd l) in (c // (a */ a)),mapa (fun x -> a */ x) l in let d' = map adj d in let a = itlist ((o) lcm_num ( (o) denominator fst)) d' (Int 1) // itlist ((o) gcd_num ( (o) numerator fst)) d' (Int 0) in (Int 1 // a),map (fun (c,l) -> (a */ c,l)) d';; (* ------------------------------------------------------------------------- *) (* Enumeration of monomials with given multidegree bound. *) (* ------------------------------------------------------------------------- *) let rec enumerate_monomials d vars = if d < 0 then [] else if d = 0 then [undefined] else if vars = [] then [monomial_1] else let alts = map (fun k -> let oths = enumerate_monomials (d - k) (tl vars) in map (fun ks -> if k = 0 then ks else (hd vars |-> k) ks) oths) (0--d) in end_itlist (@) alts;; (* ------------------------------------------------------------------------- *) (* Enumerate products of distinct input polys with degree <= d. *) (* We ignore any constant input polynomials. *) (* Give the output polynomial and a record of how it was derived. *) (* ------------------------------------------------------------------------- *) let rec enumerate_products d pols = if d = 0 then [poly_const num_1,Rational_lt num_1] else if d < 0 then [] else match pols with [] -> [poly_const num_1,Rational_lt num_1] | (p,b)::ps -> let e = multidegree p in if e = 0 then enumerate_products d ps else enumerate_products d ps @ map (fun (q,c) -> poly_mul p q,Product(b,c)) (enumerate_products (d - e) ps);; (* ------------------------------------------------------------------------- *) (* Multiply equation-parametrized poly by regular poly and add accumulator. *) (* ------------------------------------------------------------------------- *) let epoly_pmul p q acc = foldl (fun a m1 c -> foldl (fun b m2 e -> let m = monomial_mul m1 m2 in let es = tryapplyd b m undefined in (m |-> equation_add (equation_cmul c e) es) b) a q) acc p;; (* ------------------------------------------------------------------------- *) (* Usual operations on equation-parametrized poly. *) (* ------------------------------------------------------------------------- *) let epoly_cmul c l = if c =/ Int 0 then undefined else mapf (equation_cmul c) l;; let epoly_neg = epoly_cmul (Int(-1));; let epoly_add = combine equation_add is_undefined;; let epoly_sub p q = epoly_add p (epoly_neg q);; (* ------------------------------------------------------------------------- *) (* Convert regular polynomial. Note that we treat (0,0,0) as -1. *) (* ------------------------------------------------------------------------- *) let epoly_of_poly p = foldl (fun a m c -> (m |-> ((0,0,0) |=> minus_num c)) a) undefined p;; (* ------------------------------------------------------------------------- *) (* String for block diagonal matrix numbered k. *) (* ------------------------------------------------------------------------- *) let sdpa_of_blockdiagonal k m = let pfx = string_of_int k ^" " in let ents = foldl (fun a (b,i,j) c -> if i > j then a else ((b,i,j),c)::a) [] m in let entss = sort (increasing fst) ents in itlist (fun ((b,i,j),c) a -> pfx ^ string_of_int b ^ " " ^ string_of_int i ^ " " ^ string_of_int j ^ " " ^ decimalize 20 c ^ "\n" ^ a) entss "";; (* ------------------------------------------------------------------------- *) (* SDPA for problem using block diagonal (i.e. multiple SDPs) *) (* ------------------------------------------------------------------------- *) let sdpa_of_blockproblem comment nblocks blocksizes obj mats = let m = length mats - 1 in "\"" ^ comment ^ "\"\n" ^ string_of_int m ^ "\n" ^ string_of_int nblocks ^ "\n" ^ (end_itlist (fun s t -> s^" "^t) (map string_of_int blocksizes)) ^ "\n" ^ sdpa_of_vector obj ^ itlist2 (fun k m a -> sdpa_of_blockdiagonal (k - 1) m ^ a) (1--length mats) mats "";; (* ------------------------------------------------------------------------- *) (* Hence run CSDP on a problem in block diagonal form. *) (* ------------------------------------------------------------------------- *) let run_csdp dbg nblocks blocksizes obj mats = let input_file = Filename.temp_file "sos" ".dat-s" in let output_file = String.sub input_file 0 (String.length input_file - 6) ^ ".out" and params_file = Filename.concat (!temp_path) "param.csdp" in file_of_string input_file (sdpa_of_blockproblem "" nblocks blocksizes obj mats); file_of_string params_file csdp_params; let rv = Sys.command("cd "^(!temp_path)^"; csdp "^input_file ^ " " ^ output_file ^ (if dbg then "" else "> /dev/null")) in let op = string_of_file output_file in let res = parse_csdpoutput op in ((if dbg then () else (Sys.remove input_file; Sys.remove output_file)); rv,res);; let csdp nblocks blocksizes obj mats = let rv,res = run_csdp (!debugging) nblocks blocksizes obj mats in (if rv = 1 or rv = 2 then failwith "csdp: Problem is infeasible" else if rv = 3 then () (*Format.print_string "csdp warning: Reduced accuracy"; Format.print_newline() *) else if rv <> 0 then failwith("csdp: error "^string_of_int rv) else ()); res;; (* ------------------------------------------------------------------------- *) (* 3D versions of matrix operations to consider blocks separately. *) (* ------------------------------------------------------------------------- *) let bmatrix_add = combine (+/) (fun x -> x =/ Int 0);; let bmatrix_cmul c bm = if c =/ Int 0 then undefined else mapf (fun x -> c */ x) bm;; let bmatrix_neg = bmatrix_cmul (Int(-1));; let bmatrix_sub m1 m2 = bmatrix_add m1 (bmatrix_neg m2);; (* ------------------------------------------------------------------------- *) (* Smash a block matrix into components. *) (* ------------------------------------------------------------------------- *) let blocks blocksizes bm = map (fun (bs,b0) -> let m = foldl (fun a (b,i,j) c -> if b = b0 then ((i,j) |-> c) a else a) undefined bm in (((bs,bs),m):matrix)) (zip blocksizes (1--length blocksizes));; (* ------------------------------------------------------------------------- *) (* Positiv- and Nullstellensatz. Flag "linf" forces a linear representation. *) (* ------------------------------------------------------------------------- *) let real_positivnullstellensatz_general linf d eqs leqs pol = let vars = itlist ((o) union poly_variables) (pol::eqs @ map fst leqs) [] in let monoid = if linf then (poly_const num_1,Rational_lt num_1):: (filter (fun (p,c) -> multidegree p <= d) leqs) else enumerate_products d leqs in let nblocks = length monoid in let mk_idmultiplier k p = let e = d - multidegree p in let mons = enumerate_monomials e vars in let nons = zip mons (1--length mons) in mons, itlist (fun (m,n) -> (m |-> ((-k,-n,n) |=> Int 1))) nons undefined in let mk_sqmultiplier k (p,c) = let e = (d - multidegree p) / 2 in let mons = enumerate_monomials e vars in let nons = zip mons (1--length mons) in mons, itlist (fun (m1,n1) -> itlist (fun (m2,n2) a -> let m = monomial_mul m1 m2 in if n1 > n2 then a else let c = if n1 = n2 then Int 1 else Int 2 in let e = tryapplyd a m undefined in (m |-> equation_add ((k,n1,n2) |=> c) e) a) nons) nons undefined in let sqmonlist,sqs = unzip(map2 mk_sqmultiplier (1--length monoid) monoid) and idmonlist,ids = unzip(map2 mk_idmultiplier (1--length eqs) eqs) in let blocksizes = map length sqmonlist in let bigsum = itlist2 (fun p q a -> epoly_pmul p q a) eqs ids (itlist2 (fun (p,c) s a -> epoly_pmul p s a) monoid sqs (epoly_of_poly(poly_neg pol))) in let eqns = foldl (fun a m e -> e::a) [] bigsum in let pvs,assig = eliminate_all_equations (0,0,0) eqns in let qvars = (0,0,0)::pvs in let allassig = itlist (fun v -> (v |-> (v |=> Int 1))) pvs assig in let mk_matrix v = foldl (fun m (b,i,j) ass -> if b < 0 then m else let c = tryapplyd ass v (Int 0) in if c =/ Int 0 then m else ((b,j,i) |-> c) (((b,i,j) |-> c) m)) undefined allassig in let diagents = foldl (fun a (b,i,j) e -> if b > 0 & i = j then equation_add e a else a) undefined allassig in let mats = map mk_matrix qvars and obj = length pvs, itern 1 pvs (fun v i -> (i |--> tryapplyd diagents v (Int 0))) undefined in let raw_vec = if pvs = [] then vector_0 0 else scale_then (csdp nblocks blocksizes) obj mats in let find_rounding d = (if !debugging then (Format.print_string("Trying rounding with limit "^string_of_num d); Format.print_newline()) else ()); let vec = nice_vector d raw_vec in let blockmat = iter (1,dim vec) (fun i a -> bmatrix_add (bmatrix_cmul (element vec i) (el i mats)) a) (bmatrix_neg (el 0 mats)) in let allmats = blocks blocksizes blockmat in vec,map diag allmats in let vec,ratdias = if pvs = [] then find_rounding num_1 else tryfind find_rounding (map Num.num_of_int (1--31) @ map pow2 (5--66)) in let newassigs = itlist (fun k -> el (k - 1) pvs |-> element vec k) (1--dim vec) ((0,0,0) |=> Int(-1)) in let finalassigs = foldl (fun a v e -> (v |-> equation_eval newassigs e) a) newassigs allassig in let poly_of_epoly p = foldl (fun a v e -> (v |--> equation_eval finalassigs e) a) undefined p in let mk_sos mons = let mk_sq (c,m) = c,itlist (fun k a -> (el (k - 1) mons |--> element m k) a) (1--length mons) undefined in map mk_sq in let sqs = map2 mk_sos sqmonlist ratdias and cfs = map poly_of_epoly ids in let msq = filter (fun (a,b) -> b <> []) (map2 (fun a b -> a,b) monoid sqs) in let eval_sq sqs = itlist (fun (c,q) -> poly_add (poly_cmul c (poly_mul q q))) sqs poly_0 in let sanity = itlist (fun ((p,c),s) -> poly_add (poly_mul p (eval_sq s))) msq (itlist2 (fun p q -> poly_add (poly_mul p q)) cfs eqs (poly_neg pol)) in if not(is_undefined sanity) then raise Sanity else cfs,map (fun (a,b) -> snd a,b) msq;; (* ------------------------------------------------------------------------- *) (* Iterative deepening. *) (* ------------------------------------------------------------------------- *) let rec deepen f n = try print_string "Searching with depth limit "; print_int n; print_newline(); f n with Failure _ -> deepen f (n + 1);; (* ------------------------------------------------------------------------- *) (* The ordering so we can create canonical HOL polynomials. *) (* ------------------------------------------------------------------------- *) let dest_monomial mon = sort (increasing fst) (graph mon);; let monomial_order = let rec lexorder l1 l2 = match (l1,l2) with [],[] -> true | vps,[] -> false | [],vps -> true | ((x1,n1)::vs1),((x2,n2)::vs2) -> if x1 < x2 then true else if x2 < x1 then false else if n1 < n2 then false else if n2 < n1 then true else lexorder vs1 vs2 in fun m1 m2 -> if m2 = monomial_1 then true else if m1 = monomial_1 then false else let mon1 = dest_monomial m1 and mon2 = dest_monomial m2 in let deg1 = itlist ((o) (+) snd) mon1 0 and deg2 = itlist ((o) (+) snd) mon2 0 in if deg1 < deg2 then false else if deg1 > deg2 then true else lexorder mon1 mon2;; let dest_poly p = map (fun (m,c) -> c,dest_monomial m) (sort (fun (m1,_) (m2,_) -> monomial_order m1 m2) (graph p));; (* ------------------------------------------------------------------------- *) (* Map back polynomials and their composites to HOL. *) (* ------------------------------------------------------------------------- *) let term_of_varpow = fun x k -> if k = 1 then Var x else Pow (Var x, k);; let term_of_monomial = fun m -> if m = monomial_1 then Const num_1 else let m' = dest_monomial m in let vps = itlist (fun (x,k) a -> term_of_varpow x k :: a) m' [] in end_itlist (fun s t -> Mul (s,t)) vps;; let term_of_cmonomial = fun (m,c) -> if m = monomial_1 then Const c else if c =/ num_1 then term_of_monomial m else Mul (Const c,term_of_monomial m);; let term_of_poly = fun p -> if p = poly_0 then Zero else let cms = map term_of_cmonomial (sort (fun (m1,_) (m2,_) -> monomial_order m1 m2) (graph p)) in end_itlist (fun t1 t2 -> Add (t1,t2)) cms;; let term_of_sqterm (c,p) = Product(Rational_lt c,Square(term_of_poly p));; let term_of_sos (pr,sqs) = if sqs = [] then pr else Product(pr,end_itlist (fun a b -> Sum(a,b)) (map term_of_sqterm sqs));; (* ------------------------------------------------------------------------- *) (* Interface to HOL. *) (* ------------------------------------------------------------------------- *) (* let REAL_NONLINEAR_PROVER translator (eqs,les,lts) = let eq0 = map (poly_of_term o lhand o concl) eqs and le0 = map (poly_of_term o lhand o concl) les and lt0 = map (poly_of_term o lhand o concl) lts in let eqp0 = map (fun (t,i) -> t,Axiom_eq i) (zip eq0 (0--(length eq0 - 1))) and lep0 = map (fun (t,i) -> t,Axiom_le i) (zip le0 (0--(length le0 - 1))) and ltp0 = map (fun (t,i) -> t,Axiom_lt i) (zip lt0 (0--(length lt0 - 1))) in let keq,eq = partition (fun (p,_) -> multidegree p = 0) eqp0 and klep,lep = partition (fun (p,_) -> multidegree p = 0) lep0 and kltp,ltp = partition (fun (p,_) -> multidegree p = 0) ltp0 in let trivial_axiom (p,ax) = match ax with Axiom_eq n when eval undefined p <>/ num_0 -> el n eqs | Axiom_le n when eval undefined p el n les | Axiom_lt n when eval undefined p <=/ num_0 -> el n lts | _ -> failwith "not a trivial axiom" in try let th = tryfind trivial_axiom (keq @ klep @ kltp) in CONV_RULE (LAND_CONV REAL_POLY_CONV THENC REAL_RAT_RED_CONV) th with Failure _ -> let pol = itlist poly_mul (map fst ltp) (poly_const num_1) in let leq = lep @ ltp in let tryall d = let e = multidegree pol in let k = if e = 0 then 0 else d / e in let eq' = map fst eq in tryfind (fun i -> d,i,real_positivnullstellensatz_general false d eq' leq (poly_neg(poly_pow pol i))) (0--k) in let d,i,(cert_ideal,cert_cone) = deepen tryall 0 in let proofs_ideal = map2 (fun q (p,ax) -> Eqmul(term_of_poly q,ax)) cert_ideal eq and proofs_cone = map term_of_sos cert_cone and proof_ne = if ltp = [] then Rational_lt num_1 else let p = end_itlist (fun s t -> Product(s,t)) (map snd ltp) in funpow i (fun q -> Product(p,q)) (Rational_lt num_1) in let proof = end_itlist (fun s t -> Sum(s,t)) (proof_ne :: proofs_ideal @ proofs_cone) in print_string("Translating proof certificate to HOL"); print_newline(); translator (eqs,les,lts) proof;; *) (* ------------------------------------------------------------------------- *) (* A wrapper that tries to substitute away variables first. *) (* ------------------------------------------------------------------------- *) (* let REAL_NONLINEAR_SUBST_PROVER = let zero = `&0:real` and mul_tm = `( * ):real->real->real` and shuffle1 = CONV_RULE(REWR_CONV(REAL_ARITH `a + x = (y:real) <=> x = y - a`)) and shuffle2 = CONV_RULE(REWR_CONV(REAL_ARITH `x + a = (y:real) <=> x = y - a`)) in let rec substitutable_monomial fvs tm = match tm with Var(_,Tyapp("real",[])) when not (mem tm fvs) -> Int 1,tm | Comb(Comb(Const("real_mul",_),c),(Var(_,_) as t)) when is_ratconst c & not (mem t fvs) -> rat_of_term c,t | Comb(Comb(Const("real_add",_),s),t) -> (try substitutable_monomial (union (frees t) fvs) s with Failure _ -> substitutable_monomial (union (frees s) fvs) t) | _ -> failwith "substitutable_monomial" and isolate_variable v th = match lhs(concl th) with x when x = v -> th | Comb(Comb(Const("real_add",_),(Var(_,Tyapp("real",[])) as x)),t) when x = v -> shuffle2 th | Comb(Comb(Const("real_add",_),s),t) -> isolate_variable v(shuffle1 th) in let make_substitution th = let (c,v) = substitutable_monomial [] (lhs(concl th)) in let th1 = AP_TERM (mk_comb(mul_tm,term_of_rat(Int 1 // c))) th in let th2 = CONV_RULE(BINOP_CONV REAL_POLY_MUL_CONV) th1 in CONV_RULE (RAND_CONV REAL_POLY_CONV) (isolate_variable v th2) in fun translator -> let rec substfirst(eqs,les,lts) = try let eth = tryfind make_substitution eqs in let modify = CONV_RULE(LAND_CONV(SUBS_CONV[eth] THENC REAL_POLY_CONV)) in substfirst(filter (fun t -> lhand(concl t) <> zero) (map modify eqs), map modify les,map modify lts) with Failure _ -> REAL_NONLINEAR_PROVER translator (eqs,les,lts) in substfirst;; *) (* ------------------------------------------------------------------------- *) (* Overall function. *) (* ------------------------------------------------------------------------- *) (* let REAL_SOS = let init = GEN_REWRITE_CONV ONCE_DEPTH_CONV [DECIMAL] and pure = GEN_REAL_ARITH REAL_NONLINEAR_SUBST_PROVER in fun tm -> let th = init tm in EQ_MP (SYM th) (pure(rand(concl th)));; *) (* ------------------------------------------------------------------------- *) (* Add hacks for division. *) (* ------------------------------------------------------------------------- *) (* let REAL_SOSFIELD = let inv_tm = `inv:real->real` in let prenex_conv = TOP_DEPTH_CONV BETA_CONV THENC PURE_REWRITE_CONV[FORALL_SIMP; EXISTS_SIMP; real_div; REAL_INV_INV; REAL_INV_MUL; GSYM REAL_POW_INV] THENC NNFC_CONV THENC DEPTH_BINOP_CONV `(/\)` CONDS_CELIM_CONV THENC PRENEX_CONV and setup_conv = NNF_CONV THENC WEAK_CNF_CONV THENC CONJ_CANON_CONV and core_rule t = try REAL_ARITH t with Failure _ -> try REAL_RING t with Failure _ -> REAL_SOS t and is_inv = let is_div = is_binop `(/):real->real->real` in fun tm -> (is_div tm or (is_comb tm & rator tm = inv_tm)) & not(is_ratconst(rand tm)) in let BASIC_REAL_FIELD tm = let is_freeinv t = is_inv t & free_in t tm in let itms = setify(map rand (find_terms is_freeinv tm)) in let hyps = map (fun t -> SPEC t REAL_MUL_RINV) itms in let tm' = itlist (fun th t -> mk_imp(concl th,t)) hyps tm in let itms' = map (curry mk_comb inv_tm) itms in let gvs = map (genvar o type_of) itms' in let tm'' = subst (zip gvs itms') tm' in let th1 = setup_conv tm'' in let cjs = conjuncts(rand(concl th1)) in let ths = map core_rule cjs in let th2 = EQ_MP (SYM th1) (end_itlist CONJ ths) in rev_itlist (C MP) hyps (INST (zip itms' gvs) th2) in fun tm -> let th0 = prenex_conv tm in let tm0 = rand(concl th0) in let avs,bod = strip_forall tm0 in let th1 = setup_conv bod in let ths = map BASIC_REAL_FIELD (conjuncts(rand(concl th1))) in EQ_MP (SYM th0) (GENL avs (EQ_MP (SYM th1) (end_itlist CONJ ths)));; *) (* ------------------------------------------------------------------------- *) (* Integer version. *) (* ------------------------------------------------------------------------- *) (* let INT_SOS = let atom_CONV = let pth = prove (`(~(x <= y) <=> y + &1 <= x:int) /\ (~(x < y) <=> y <= x) /\ (~(x = y) <=> x + &1 <= y \/ y + &1 <= x) /\ (x < y <=> x + &1 <= y)`, REWRITE_TAC[INT_NOT_LE; INT_NOT_LT; INT_NOT_EQ; INT_LT_DISCRETE]) in GEN_REWRITE_CONV I [pth] and bub_CONV = GEN_REWRITE_CONV TOP_SWEEP_CONV [int_eq; int_le; int_lt; int_ge; int_gt; int_of_num_th; int_neg_th; int_add_th; int_mul_th; int_sub_th; int_pow_th; int_abs_th; int_max_th; int_min_th] in let base_CONV = TRY_CONV atom_CONV THENC bub_CONV in let NNF_NORM_CONV = GEN_NNF_CONV false (base_CONV,fun t -> base_CONV t,base_CONV(mk_neg t)) in let init_CONV = GEN_REWRITE_CONV DEPTH_CONV [FORALL_SIMP; EXISTS_SIMP] THENC GEN_REWRITE_CONV DEPTH_CONV [INT_GT; INT_GE] THENC CONDS_ELIM_CONV THENC NNF_NORM_CONV in let p_tm = `p:bool` and not_tm = `(~)` in let pth = TAUT(mk_eq(mk_neg(mk_neg p_tm),p_tm)) in fun tm -> let th0 = INST [tm,p_tm] pth and th1 = NNF_NORM_CONV(mk_neg tm) in let th2 = REAL_SOS(mk_neg(rand(concl th1))) in EQ_MP th0 (EQ_MP (AP_TERM not_tm (SYM th1)) th2);; *) (* ------------------------------------------------------------------------- *) (* Natural number version. *) (* ------------------------------------------------------------------------- *) (* let SOS_RULE tm = let avs = frees tm in let tm' = list_mk_forall(avs,tm) in let th1 = NUM_TO_INT_CONV tm' in let th2 = INT_SOS (rand(concl th1)) in SPECL avs (EQ_MP (SYM th1) th2);; *) (* ------------------------------------------------------------------------- *) (* Now pure SOS stuff. *) (* ------------------------------------------------------------------------- *) (*prioritize_real();;*) (* ------------------------------------------------------------------------- *) (* Some combinatorial helper functions. *) (* ------------------------------------------------------------------------- *) let rec allpermutations l = if l = [] then [[]] else itlist (fun h acc -> map (fun t -> h::t) (allpermutations (subtract l [h])) @ acc) l [];; let allvarorders l = map (fun vlis x -> index x vlis) (allpermutations l);; let changevariables_monomial zoln (m:monomial) = foldl (fun a x k -> (assoc x zoln |-> k) a) monomial_1 m;; let changevariables zoln pol = foldl (fun a m c -> (changevariables_monomial zoln m |-> c) a) poly_0 pol;; (* ------------------------------------------------------------------------- *) (* Return to original non-block matrices. *) (* ------------------------------------------------------------------------- *) let sdpa_of_vector (v:vector) = let n = dim v in let strs = map (o (decimalize 20) (element v)) (1--n) in end_itlist (fun x y -> x ^ " " ^ y) strs ^ "\n";; let sdpa_of_blockdiagonal k m = let pfx = string_of_int k ^" " in let ents = foldl (fun a (b,i,j) c -> if i > j then a else ((b,i,j),c)::a) [] m in let entss = sort (increasing fst) ents in itlist (fun ((b,i,j),c) a -> pfx ^ string_of_int b ^ " " ^ string_of_int i ^ " " ^ string_of_int j ^ " " ^ decimalize 20 c ^ "\n" ^ a) entss "";; let sdpa_of_matrix k (m:matrix) = let pfx = string_of_int k ^ " 1 " in let ms = foldr (fun (i,j) c a -> if i > j then a else ((i,j),c)::a) (snd m) [] in let mss = sort (increasing fst) ms in itlist (fun ((i,j),c) a -> pfx ^ string_of_int i ^ " " ^ string_of_int j ^ " " ^ decimalize 20 c ^ "\n" ^ a) mss "";; let sdpa_of_problem comment obj mats = let m = length mats - 1 and n,_ = dimensions (hd mats) in "\"" ^ comment ^ "\"\n" ^ string_of_int m ^ "\n" ^ "1\n" ^ string_of_int n ^ "\n" ^ sdpa_of_vector obj ^ itlist2 (fun k m a -> sdpa_of_matrix (k - 1) m ^ a) (1--length mats) mats "";; let run_csdp dbg obj mats = let input_file = Filename.temp_file "sos" ".dat-s" in let output_file = String.sub input_file 0 (String.length input_file - 6) ^ ".out" and params_file = Filename.concat (!temp_path) "param.csdp" in file_of_string input_file (sdpa_of_problem "" obj mats); file_of_string params_file csdp_params; let rv = Sys.command("cd "^(!temp_path)^"; csdp "^input_file ^ " " ^ output_file ^ (if dbg then "" else "> /dev/null")) in let op = string_of_file output_file in let res = parse_csdpoutput op in ((if dbg then () else (Sys.remove input_file; Sys.remove output_file)); rv,res);; let csdp obj mats = let rv,res = run_csdp (!debugging) obj mats in (if rv = 1 or rv = 2 then failwith "csdp: Problem is infeasible" else if rv = 3 then () (* (Format.print_string "csdp warning: Reduced accuracy"; Format.print_newline()) *) else if rv <> 0 then failwith("csdp: error "^string_of_int rv) else ()); res;; (* ------------------------------------------------------------------------- *) (* Sum-of-squares function with some lowbrow symmetry reductions. *) (* ------------------------------------------------------------------------- *) let sumofsquares_general_symmetry tool pol = let vars = poly_variables pol and lpps = newton_polytope pol in let n = length lpps in let sym_eqs = let invariants = filter (fun vars' -> is_undefined(poly_sub pol (changevariables (zip vars vars') pol))) (allpermutations vars) in let lpns = zip lpps (1--length lpps) in let lppcs = filter (fun (m,(n1,n2)) -> n1 <= n2) (allpairs (fun (m1,n1) (m2,n2) -> (m1,m2),(n1,n2)) lpns lpns) in let clppcs = end_itlist (@) (map (fun ((m1,m2),(n1,n2)) -> map (fun vars' -> (changevariables_monomial (zip vars vars') m1, changevariables_monomial (zip vars vars') m2),(n1,n2)) invariants) lppcs) in let clppcs_dom = setify(map fst clppcs) in let clppcs_cls = map (fun d -> filter (fun (e,_) -> e = d) clppcs) clppcs_dom in let eqvcls = map (o setify (map snd)) clppcs_cls in let mk_eq cls acc = match cls with [] -> raise Sanity | [h] -> acc | h::t -> map (fun k -> (k |-> Int(-1)) (h |=> Int 1)) t @ acc in itlist mk_eq eqvcls [] in let eqs = foldl (fun a x y -> y::a) [] (itern 1 lpps (fun m1 n1 -> itern 1 lpps (fun m2 n2 f -> let m = monomial_mul m1 m2 in if n1 > n2 then f else let c = if n1 = n2 then Int 1 else Int 2 in (m |-> ((n1,n2) |-> c) (tryapplyd f m undefined)) f)) (foldl (fun a m c -> (m |-> ((0,0)|=>c)) a) undefined pol)) @ sym_eqs in let pvs,assig = eliminate_all_equations (0,0) eqs in let allassig = itlist (fun v -> (v |-> (v |=> Int 1))) pvs assig in let qvars = (0,0)::pvs in let diagents = end_itlist equation_add (map (fun i -> apply allassig (i,i)) (1--n)) in let mk_matrix v = ((n,n), foldl (fun m (i,j) ass -> let c = tryapplyd ass v (Int 0) in if c =/ Int 0 then m else ((j,i) |-> c) (((i,j) |-> c) m)) undefined allassig :matrix) in let mats = map mk_matrix qvars and obj = length pvs, itern 1 pvs (fun v i -> (i |--> tryapplyd diagents v (Int 0))) undefined in let raw_vec = if pvs = [] then vector_0 0 else tool obj mats in let find_rounding d = (if !debugging then (Format.print_string("Trying rounding with limit "^string_of_num d); Format.print_newline()) else ()); let vec = nice_vector d raw_vec in let mat = iter (1,dim vec) (fun i a -> matrix_add (matrix_cmul (element vec i) (el i mats)) a) (matrix_neg (el 0 mats)) in deration(diag mat) in let rat,dia = if pvs = [] then let mat = matrix_neg (el 0 mats) in deration(diag mat) else tryfind find_rounding (map Num.num_of_int (1--31) @ map pow2 (5--66)) in let poly_of_lin(d,v) = d,foldl(fun a i c -> (el (i - 1) lpps |-> c) a) undefined (snd v) in let lins = map poly_of_lin dia in let sqs = map (fun (d,l) -> poly_mul (poly_const d) (poly_pow l 2)) lins in let sos = poly_cmul rat (end_itlist poly_add sqs) in if is_undefined(poly_sub sos pol) then rat,lins else raise Sanity;; let sumofsquares = sumofsquares_general_symmetry csdp;; (* ------------------------------------------------------------------------- *) (* Pure HOL SOS conversion. *) (* ------------------------------------------------------------------------- *) (* let SOS_CONV = let mk_square = let pow_tm = `(pow)` and two_tm = `2` in fun tm -> mk_comb(mk_comb(pow_tm,tm),two_tm) and mk_prod = mk_binop `( * )` and mk_sum = mk_binop `(+)` in fun tm -> let k,sos = sumofsquares(poly_of_term tm) in let mk_sqtm(c,p) = mk_prod (term_of_rat(k */ c)) (mk_square(term_of_poly p)) in let tm' = end_itlist mk_sum (map mk_sqtm sos) in let th = REAL_POLY_CONV tm and th' = REAL_POLY_CONV tm' in TRANS th (SYM th');; *) (* ------------------------------------------------------------------------- *) (* Attempt to prove &0 <= x by direct SOS decomposition. *) (* ------------------------------------------------------------------------- *) (* let PURE_SOS_TAC = let tac = MATCH_ACCEPT_TAC(REWRITE_RULE[GSYM REAL_POW_2] REAL_LE_SQUARE) ORELSE MATCH_ACCEPT_TAC REAL_LE_SQUARE ORELSE (MATCH_MP_TAC REAL_LE_ADD THEN CONJ_TAC) ORELSE (MATCH_MP_TAC REAL_LE_MUL THEN CONJ_TAC) ORELSE CONV_TAC(RAND_CONV REAL_RAT_REDUCE_CONV THENC REAL_RAT_LE_CONV) in REPEAT GEN_TAC THEN REWRITE_TAC[real_ge] THEN GEN_REWRITE_TAC I [GSYM REAL_SUB_LE] THEN CONV_TAC(RAND_CONV SOS_CONV) THEN REPEAT tac THEN NO_TAC;; let PURE_SOS tm = prove(tm,PURE_SOS_TAC);; *) (* ------------------------------------------------------------------------- *) (* Examples. *) (* ------------------------------------------------------------------------- *) (***** time REAL_SOS `a1 >= &0 /\ a2 >= &0 /\ (a1 * a1 + a2 * a2 = b1 * b1 + b2 * b2 + &2) /\ (a1 * b1 + a2 * b2 = &0) ==> a1 * a2 - b1 * b2 >= &0`;; time REAL_SOS `&3 * x + &7 * a < &4 /\ &3 < &2 * x ==> a < &0`;; time REAL_SOS `b pow 2 < &4 * a * c ==> ~(a * x pow 2 + b * x + c = &0)`;; time REAL_SOS `(a * x pow 2 + b * x + c = &0) ==> b pow 2 >= &4 * a * c`;; time REAL_SOS `&0 <= x /\ x <= &1 /\ &0 <= y /\ y <= &1 ==> x pow 2 + y pow 2 < &1 \/ (x - &1) pow 2 + y pow 2 < &1 \/ x pow 2 + (y - &1) pow 2 < &1 \/ (x - &1) pow 2 + (y - &1) pow 2 < &1`;; time REAL_SOS `&0 <= b /\ &0 <= c /\ &0 <= x /\ &0 <= y /\ (x pow 2 = c) /\ (y pow 2 = a pow 2 * c + b) ==> a * c <= y * x`;; time REAL_SOS `&0 <= x /\ &0 <= y /\ &0 <= z /\ x + y + z <= &3 ==> x * y + x * z + y * z >= &3 * x * y * z`;; time REAL_SOS `(x pow 2 + y pow 2 + z pow 2 = &1) ==> (x + y + z) pow 2 <= &3`;; time REAL_SOS `(w pow 2 + x pow 2 + y pow 2 + z pow 2 = &1) ==> (w + x + y + z) pow 2 <= &4`;; time REAL_SOS `x >= &1 /\ y >= &1 ==> x * y >= x + y - &1`;; time REAL_SOS `x > &1 /\ y > &1 ==> x * y > x + y - &1`;; time REAL_SOS `abs(x) <= &1 ==> abs(&64 * x pow 7 - &112 * x pow 5 + &56 * x pow 3 - &7 * x) <= &1`;; time REAL_SOS `abs(x - z) <= e /\ abs(y - z) <= e /\ &0 <= u /\ &0 <= v /\ (u + v = &1) ==> abs((u * x + v * y) - z) <= e`;; (* ------------------------------------------------------------------------- *) (* One component of denominator in dodecahedral example. *) (* ------------------------------------------------------------------------- *) time REAL_SOS `&2 <= x /\ x <= &125841 / &50000 /\ &2 <= y /\ y <= &125841 / &50000 /\ &2 <= z /\ z <= &125841 / &50000 ==> &2 * (x * z + x * y + y * z) - (x * x + y * y + z * z) >= &0`;; (* ------------------------------------------------------------------------- *) (* Over a larger but simpler interval. *) (* ------------------------------------------------------------------------- *) time REAL_SOS `&2 <= x /\ x <= &4 /\ &2 <= y /\ y <= &4 /\ &2 <= z /\ z <= &4 ==> &0 <= &2 * (x * z + x * y + y * z) - (x * x + y * y + z * z)`;; (* ------------------------------------------------------------------------- *) (* We can do 12. I think 12 is a sharp bound; see PP's certificate. *) (* ------------------------------------------------------------------------- *) time REAL_SOS `&2 <= x /\ x <= &4 /\ &2 <= y /\ y <= &4 /\ &2 <= z /\ z <= &4 ==> &12 <= &2 * (x * z + x * y + y * z) - (x * x + y * y + z * z)`;; (* ------------------------------------------------------------------------- *) (* Gloptipoly example. *) (* ------------------------------------------------------------------------- *) (*** This works but normalization takes minutes time REAL_SOS `(x - y - &2 * x pow 4 = &0) /\ &0 <= x /\ x <= &2 /\ &0 <= y /\ y <= &3 ==> y pow 2 - &7 * y - &12 * x + &17 >= &0`;; ***) (* ------------------------------------------------------------------------- *) (* Inequality from sci.math (see "Leon-Sotelo, por favor"). *) (* ------------------------------------------------------------------------- *) time REAL_SOS `&0 <= x /\ &0 <= y /\ (x * y = &1) ==> x + y <= x pow 2 + y pow 2`;; time REAL_SOS `&0 <= x /\ &0 <= y /\ (x * y = &1) ==> x * y * (x + y) <= x pow 2 + y pow 2`;; time REAL_SOS `&0 <= x /\ &0 <= y ==> x * y * (x + y) pow 2 <= (x pow 2 + y pow 2) pow 2`;; (* ------------------------------------------------------------------------- *) (* Some examples over integers and natural numbers. *) (* ------------------------------------------------------------------------- *) time SOS_RULE `!m n. 2 * m + n = (n + m) + m`;; time SOS_RULE `!n. ~(n = 0) ==> (0 MOD n = 0)`;; time SOS_RULE `!m n. m < n ==> (m DIV n = 0)`;; time SOS_RULE `!n:num. n <= n * n`;; time SOS_RULE `!m n. n * (m DIV n) <= m`;; time SOS_RULE `!n. ~(n = 0) ==> (0 DIV n = 0)`;; time SOS_RULE `!m n p. ~(p = 0) /\ m <= n ==> m DIV p <= n DIV p`;; time SOS_RULE `!a b n. ~(a = 0) ==> (n <= b DIV a <=> a * n <= b)`;; (* ------------------------------------------------------------------------- *) (* This is particularly gratifying --- cf hideous manual proof in arith.ml *) (* ------------------------------------------------------------------------- *) (*** This doesn't now seem to work as well as it did; what changed? time SOS_RULE `!a b c d. ~(b = 0) /\ b * c < (a + 1) * d ==> c DIV d <= a DIV b`;; ***) (* ------------------------------------------------------------------------- *) (* Key lemma for injectivity of Cantor-type pairing functions. *) (* ------------------------------------------------------------------------- *) time SOS_RULE `!x1 y1 x2 y2. ((x1 + y1) EXP 2 + x1 + 1 = (x2 + y2) EXP 2 + x2 + 1) ==> (x1 + y1 = x2 + y2)`;; time SOS_RULE `!x1 y1 x2 y2. ((x1 + y1) EXP 2 + x1 + 1 = (x2 + y2) EXP 2 + x2 + 1) /\ (x1 + y1 = x2 + y2) ==> (x1 = x2) /\ (y1 = y2)`;; time SOS_RULE `!x1 y1 x2 y2. (((x1 + y1) EXP 2 + 3 * x1 + y1) DIV 2 = ((x2 + y2) EXP 2 + 3 * x2 + y2) DIV 2) ==> (x1 + y1 = x2 + y2)`;; time SOS_RULE `!x1 y1 x2 y2. (((x1 + y1) EXP 2 + 3 * x1 + y1) DIV 2 = ((x2 + y2) EXP 2 + 3 * x2 + y2) DIV 2) /\ (x1 + y1 = x2 + y2) ==> (x1 = x2) /\ (y1 = y2)`;; (* ------------------------------------------------------------------------- *) (* Reciprocal multiplication (actually just ARITH_RULE does these). *) (* ------------------------------------------------------------------------- *) time SOS_RULE `x <= 127 ==> ((86 * x) DIV 256 = x DIV 3)`;; time SOS_RULE `x < 2 EXP 16 ==> ((104858 * x) DIV (2 EXP 20) = x DIV 10)`;; (* ------------------------------------------------------------------------- *) (* This is more impressive since it's really nonlinear. See REMAINDER_DECODE *) (* ------------------------------------------------------------------------- *) time SOS_RULE `0 < m /\ m < n ==> ((m * ((n * x) DIV m + 1)) DIV n = x)`;; (* ------------------------------------------------------------------------- *) (* Some conversion examples. *) (* ------------------------------------------------------------------------- *) time SOS_CONV `&2 * x pow 4 + &2 * x pow 3 * y - x pow 2 * y pow 2 + &5 * y pow 4`;; time SOS_CONV `x pow 4 - (&2 * y * z + &1) * x pow 2 + (y pow 2 * z pow 2 + &2 * y * z + &2)`;; time SOS_CONV `&4 * x pow 4 + &4 * x pow 3 * y - &7 * x pow 2 * y pow 2 - &2 * x * y pow 3 + &10 * y pow 4`;; time SOS_CONV `&4 * x pow 4 * y pow 6 + x pow 2 - x * y pow 2 + y pow 2`;; time SOS_CONV `&4096 * (x pow 4 + x pow 2 + z pow 6 - &3 * x pow 2 * z pow 2) + &729`;; time SOS_CONV `&120 * x pow 2 - &63 * x pow 4 + &10 * x pow 6 + &30 * x * y - &120 * y pow 2 + &120 * y pow 4 + &31`;; time SOS_CONV `&9 * x pow 2 * y pow 4 + &9 * x pow 2 * z pow 4 + &36 * x pow 2 * y pow 3 + &36 * x pow 2 * y pow 2 - &48 * x * y * z pow 2 + &4 * y pow 4 + &4 * z pow 4 - &16 * y pow 3 + &16 * y pow 2`;; time SOS_CONV `(x pow 2 + y pow 2 + z pow 2) * (x pow 4 * y pow 2 + x pow 2 * y pow 4 + z pow 6 - &3 * x pow 2 * y pow 2 * z pow 2)`;; time SOS_CONV `x pow 4 + y pow 4 + z pow 4 - &4 * x * y * z + x + y + z + &3`;; (*** I think this will work, but normalization is slow time SOS_CONV `&100 * (x pow 4 + y pow 4 + z pow 4 - &4 * x * y * z + x + y + z) + &212`;; ***) time SOS_CONV `&100 * ((&2 * x - &2) pow 2 + (x pow 3 - &8 * x - &2) pow 2) - &588`;; time SOS_CONV `x pow 2 * (&120 - &63 * x pow 2 + &10 * x pow 4) + &30 * x * y + &30 * y pow 2 * (&4 * y pow 2 - &4) + &31`;; (* ------------------------------------------------------------------------- *) (* Example of basic rule. *) (* ------------------------------------------------------------------------- *) time PURE_SOS `!x. x pow 4 + y pow 4 + z pow 4 - &4 * x * y * z + x + y + z + &3 >= &1 / &7`;; time PURE_SOS `&0 <= &98 * x pow 12 + -- &980 * x pow 10 + &3038 * x pow 8 + -- &2968 * x pow 6 + &1022 * x pow 4 + -- &84 * x pow 2 + &2`;; time PURE_SOS `!x. &0 <= &2 * x pow 14 + -- &84 * x pow 12 + &1022 * x pow 10 + -- &2968 * x pow 8 + &3038 * x pow 6 + -- &980 * x pow 4 + &98 * x pow 2`;; (* ------------------------------------------------------------------------- *) (* From Zeng et al, JSC vol 37 (2004), p83-99. *) (* All of them work nicely with pure SOS_CONV, except (maybe) the one noted. *) (* ------------------------------------------------------------------------- *) PURE_SOS `x pow 6 + y pow 6 + z pow 6 - &3 * x pow 2 * y pow 2 * z pow 2 >= &0`;; PURE_SOS `x pow 4 + y pow 4 + z pow 4 + &1 - &4*x*y*z >= &0`;; PURE_SOS `x pow 4 + &2*x pow 2*z + x pow 2 - &2*x*y*z + &2*y pow 2*z pow 2 + &2*y*z pow 2 + &2*z pow 2 - &2*x + &2* y*z + &1 >= &0`;; (**** This is harder. Interestingly, this fails the pure SOS test, it seems. Yet only on rounding(!?) Poor Newton polytope optimization or something? But REAL_SOS does finally converge on the second run at level 12! REAL_SOS `x pow 4*y pow 4 - &2*x pow 5*y pow 3*z pow 2 + x pow 6*y pow 2*z pow 4 + &2*x pow 2*y pow 3*z - &4* x pow 3*y pow 2*z pow 3 + &2*x pow 4*y*z pow 5 + z pow 2*y pow 2 - &2*z pow 4*y*x + z pow 6*x pow 2 >= &0`;; ****) PURE_SOS `x pow 4 + &4*x pow 2*y pow 2 + &2*x*y*z pow 2 + &2*x*y*w pow 2 + y pow 4 + z pow 4 + w pow 4 + &2*z pow 2*w pow 2 + &2*x pow 2*w + &2*y pow 2*w + &2*x*y + &3*w pow 2 + &2*z pow 2 + &1 >= &0`;; PURE_SOS `w pow 6 + &2*z pow 2*w pow 3 + x pow 4 + y pow 4 + z pow 4 + &2*x pow 2*w + &2*x pow 2*z + &3*x pow 2 + w pow 2 + &2*z*w + z pow 2 + &2*z + &2*w + &1 >= &0`;; *****) coq-8.4pl2/plugins/micromega/EnvRing.v0000640000175000001440000007451412010532755017025 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R->R) (ropp : R->R). Variable req : R -> R -> Prop. (* Ring properties *) Variable Rsth : Equivalence req. Variable Reqe : ring_eq_ext radd rmul ropp req. Variable ARth : almost_ring_theory rO rI radd rmul rsub ropp req. (* Coefficients *) Variable C: Type. Variable (cO cI: C) (cadd cmul csub : C->C->C) (copp : C->C). Variable ceqb : C->C->bool. Variable phi : C -> R. Variable CRmorph : ring_morph rO rI radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi. (* Power coefficients *) Variable Cpow : Type. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Variable pow_th : power_theory rI rmul req Cp_phi rpow. (* R notations *) Notation "0" := rO. Notation "1" := rI. Infix "+" := radd. Infix "*" := rmul. Infix "-" := rsub. Notation "- x" := (ropp x). Infix "==" := req. Infix "^" := (pow_pos rmul). (* C notations *) Infix "+!" := cadd. Infix "*!" := cmul. Infix "-! " := csub. Notation "-! x" := (copp x). Infix "?=!" := ceqb. Notation "[ x ]" := (phi x). (* Useful tactics *) Add Morphism radd : radd_ext. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext. exact (Ropp_ext Reqe). Qed. Add Morphism rsub : rsub_ext. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac rsimpl := gen_srewrite Rsth Reqe ARth. Ltac add_push := gen_add_push radd Rsth Reqe ARth. Ltac mul_push := gen_mul_push rmul Rsth Reqe ARth. Ltac add_permut_rec t := match t with | ?x + ?y => add_permut_rec y || add_permut_rec x | _ => add_push t; apply (Radd_ext Reqe); [|reflexivity] end. Ltac add_permut := repeat (reflexivity || match goal with |- ?t == _ => add_permut_rec t end). Ltac mul_permut_rec t := match t with | ?x * ?y => mul_permut_rec y || mul_permut_rec x | _ => mul_push t; apply (Rmul_ext Reqe); [|reflexivity] end. Ltac mul_permut := repeat (reflexivity || match goal with |- ?t == _ => mul_permut_rec t end). (* Definition of multivariable polynomials with coefficients in C : Type [Pol] represents [X1 ... Xn]. The representation is Horner's where a [n] variable polynomial (C[X1..Xn]) is seen as a polynomial on [X1] which coefficients are polynomials with [n-1] variables (C[X2..Xn]). There are several optimisations to make the repr compacter: - [Pc c] is the constant polynomial of value c == c*X1^0*..*Xn^0 - [Pinj j Q] is a polynomial constant w.r.t the [j] first variables. variable indices are shifted of j in Q. == X1^0 *..* Xj^0 * Q{X1 <- Xj+1;..; Xn-j <- Xn} - [PX P i Q] is an optimised Horner form of P*X^i + Q with P not the null polynomial == P * X1^i + Q{X1 <- X2; ..; Xn-1 <- Xn} In addition: - polynomials of the form (PX (PX P i (Pc 0)) j Q) are forbidden since they can be represented by the simpler form (PX P (i+j) Q) - (Pinj i (Pinj j P)) is (Pinj (i+j) P) - (Pinj i (Pc c)) is (Pc c) *) Inductive Pol : Type := | Pc : C -> Pol | Pinj : positive -> Pol -> Pol | PX : Pol -> positive -> Pol -> Pol. Definition P0 := Pc cO. Definition P1 := Pc cI. Fixpoint Peq (P P' : Pol) {struct P'} : bool := match P, P' with | Pc c, Pc c' => c ?=! c' | Pinj j Q, Pinj j' Q' => match j ?= j' with | Eq => Peq Q Q' | _ => false end | PX P i Q, PX P' i' Q' => match i ?= i' with | Eq => if Peq P P' then Peq Q Q' else false | _ => false end | _, _ => false end. Infix "?==" := Peq. Definition mkPinj j P := match P with | Pc _ => P | Pinj j' Q => Pinj (j + j') Q | _ => Pinj j P end. Definition mkPinj_pred j P:= match j with | xH => P | xO j => Pinj (Pos.pred_double j) P | xI j => Pinj (xO j) P end. Definition mkPX P i Q := match P with | Pc c => if c ?=! cO then mkPinj xH Q else PX P i Q | Pinj _ _ => PX P i Q | PX P' i' Q' => if Q' ?== P0 then PX P' (i' + i) Q else PX P i Q end. Definition mkXi i := PX P1 i P0. Definition mkX := mkXi 1. (** Opposite of addition *) Fixpoint Popp (P:Pol) : Pol := match P with | Pc c => Pc (-! c) | Pinj j Q => Pinj j (Popp Q) | PX P i Q => PX (Popp P) i (Popp Q) end. Notation "-- P" := (Popp P). (** Addition et subtraction *) Fixpoint PaddC (P:Pol) (c:C) : Pol := match P with | Pc c1 => Pc (c1 +! c) | Pinj j Q => Pinj j (PaddC Q c) | PX P i Q => PX P i (PaddC Q c) end. Fixpoint PsubC (P:Pol) (c:C) : Pol := match P with | Pc c1 => Pc (c1 -! c) | Pinj j Q => Pinj j (PsubC Q c) | PX P i Q => PX P i (PsubC Q c) end. Section PopI. Variable Pop : Pol -> Pol -> Pol. Variable Q : Pol. Fixpoint PaddI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PaddC Q c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pop (Pinj k Q') Q) | Z0 => mkPinj j (Pop Q' Q) | Zneg k => mkPinj j' (PaddI k Q') end | PX P i Q' => match j with | xH => PX P i (Pop Q' Q) | xO j => PX P i (PaddI (Pos.pred_double j) Q') | xI j => PX P i (PaddI (xO j) Q') end end. Fixpoint PsubI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PaddC (--Q) c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pop (Pinj k Q') Q) | Z0 => mkPinj j (Pop Q' Q) | Zneg k => mkPinj j' (PsubI k Q') end | PX P i Q' => match j with | xH => PX P i (Pop Q' Q) | xO j => PX P i (PsubI (Pos.pred_double j) Q') | xI j => PX P i (PsubI (xO j) Q') end end. Variable P' : Pol. Fixpoint PaddX (i':positive) (P:Pol) : Pol := match P with | Pc c => PX P' i' P | Pinj j Q' => match j with | xH => PX P' i' Q' | xO j => PX P' i' (Pinj (Pos.pred_double j) Q') | xI j => PX P' i' (Pinj (xO j) Q') end | PX P i Q' => match Z.pos_sub i i' with | Zpos k => mkPX (Pop (PX P k P0) P') i' Q' | Z0 => mkPX (Pop P P') i Q' | Zneg k => mkPX (PaddX k P) i Q' end end. Fixpoint PsubX (i':positive) (P:Pol) : Pol := match P with | Pc c => PX (--P') i' P | Pinj j Q' => match j with | xH => PX (--P') i' Q' | xO j => PX (--P') i' (Pinj (Pos.pred_double j) Q') | xI j => PX (--P') i' (Pinj (xO j) Q') end | PX P i Q' => match Z.pos_sub i i' with | Zpos k => mkPX (Pop (PX P k P0) P') i' Q' | Z0 => mkPX (Pop P P') i Q' | Zneg k => mkPX (PsubX k P) i Q' end end. End PopI. Fixpoint Padd (P P': Pol) {struct P'} : Pol := match P' with | Pc c' => PaddC P c' | Pinj j' Q' => PaddI Padd Q' j' P | PX P' i' Q' => match P with | Pc c => PX P' i' (PaddC Q' c) | Pinj j Q => match j with | xH => PX P' i' (Padd Q Q') | xO j => PX P' i' (Padd (Pinj (Pos.pred_double j) Q) Q') | xI j => PX P' i' (Padd (Pinj (xO j) Q) Q') end | PX P i Q => match Z.pos_sub i i' with | Zpos k => mkPX (Padd (PX P k P0) P') i' (Padd Q Q') | Z0 => mkPX (Padd P P') i (Padd Q Q') | Zneg k => mkPX (PaddX Padd P' k P) i (Padd Q Q') end end end. Infix "++" := Padd. Fixpoint Psub (P P': Pol) {struct P'} : Pol := match P' with | Pc c' => PsubC P c' | Pinj j' Q' => PsubI Psub Q' j' P | PX P' i' Q' => match P with | Pc c => PX (--P') i' (*(--(PsubC Q' c))*) (PaddC (--Q') c) | Pinj j Q => match j with | xH => PX (--P') i' (Psub Q Q') | xO j => PX (--P') i' (Psub (Pinj (Pos.pred_double j) Q) Q') | xI j => PX (--P') i' (Psub (Pinj (xO j) Q) Q') end | PX P i Q => match Z.pos_sub i i' with | Zpos k => mkPX (Psub (PX P k P0) P') i' (Psub Q Q') | Z0 => mkPX (Psub P P') i (Psub Q Q') | Zneg k => mkPX (PsubX Psub P' k P) i (Psub Q Q') end end end. Infix "--" := Psub. (** Multiplication *) Fixpoint PmulC_aux (P:Pol) (c:C) : Pol := match P with | Pc c' => Pc (c' *! c) | Pinj j Q => mkPinj j (PmulC_aux Q c) | PX P i Q => mkPX (PmulC_aux P c) i (PmulC_aux Q c) end. Definition PmulC P c := if c ?=! cO then P0 else if c ?=! cI then P else PmulC_aux P c. Section PmulI. Variable Pmul : Pol -> Pol -> Pol. Variable Q : Pol. Fixpoint PmulI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PmulC Q c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pmul (Pinj k Q') Q) | Z0 => mkPinj j (Pmul Q' Q) | Zneg k => mkPinj j' (PmulI k Q') end | PX P' i' Q' => match j with | xH => mkPX (PmulI xH P') i' (Pmul Q' Q) | xO j' => mkPX (PmulI j P') i' (PmulI (Pos.pred_double j') Q') | xI j' => mkPX (PmulI j P') i' (PmulI (xO j') Q') end end. End PmulI. Fixpoint Pmul (P P'' : Pol) {struct P''} : Pol := match P'' with | Pc c => PmulC P c | Pinj j' Q' => PmulI Pmul Q' j' P | PX P' i' Q' => match P with | Pc c => PmulC P'' c | Pinj j Q => let QQ' := match j with | xH => Pmul Q Q' | xO j => Pmul (Pinj (Pos.pred_double j) Q) Q' | xI j => Pmul (Pinj (xO j) Q) Q' end in mkPX (Pmul P P') i' QQ' | PX P i Q=> let QQ' := Pmul Q Q' in let PQ' := PmulI Pmul Q' xH P in let QP' := Pmul (mkPinj xH Q) P' in let PP' := Pmul P P' in (mkPX (mkPX PP' i P0 ++ QP') i' P0) ++ mkPX PQ' i QQ' end end. Infix "**" := Pmul. Fixpoint Psquare (P:Pol) : Pol := match P with | Pc c => Pc (c *! c) | Pinj j Q => Pinj j (Psquare Q) | PX P i Q => let twoPQ := Pmul P (mkPinj xH (PmulC Q (cI +! cI))) in let Q2 := Psquare Q in let P2 := Psquare P in mkPX (mkPX P2 i P0 ++ twoPQ) i Q2 end. (** Monomial **) (** A monomial is X1^k1...Xi^ki. Its representation is a simplified version of the polynomial representation: - [mon0] correspond to the polynom [P1]. - [(zmon j M)] corresponds to [(Pinj j ...)], i.e. skip j variable indices. - [(vmon i M)] is X^i*M with X the current variable, its corresponds to (PX P1 i ...)] *) Inductive Mon: Set := | mon0: Mon | zmon: positive -> Mon -> Mon | vmon: positive -> Mon -> Mon. Definition mkZmon j M := match M with mon0 => mon0 | _ => zmon j M end. Definition zmon_pred j M := match j with xH => M | _ => mkZmon (Pos.pred j) M end. Definition mkVmon i M := match M with | mon0 => vmon i mon0 | zmon j m => vmon i (zmon_pred j m) | vmon i' m => vmon (i+i') m end. Fixpoint MFactor (P: Pol) (M: Mon) : Pol * Pol := match P, M with _, mon0 => (Pc cO, P) | Pc _, _ => (P, Pc cO) | Pinj j1 P1, zmon j2 M1 => match (j1 ?= j2) with Eq => let (R,S) := MFactor P1 M1 in (mkPinj j1 R, mkPinj j1 S) | Lt => let (R,S) := MFactor P1 (zmon (j2 - j1) M1) in (mkPinj j1 R, mkPinj j1 S) | Gt => (P, Pc cO) end | Pinj _ _, vmon _ _ => (P, Pc cO) | PX P1 i Q1, zmon j M1 => let M2 := zmon_pred j M1 in let (R1, S1) := MFactor P1 M in let (R2, S2) := MFactor Q1 M2 in (mkPX R1 i R2, mkPX S1 i S2) | PX P1 i Q1, vmon j M1 => match (i ?= j) with Eq => let (R1,S1) := MFactor P1 (mkZmon xH M1) in (mkPX R1 i Q1, S1) | Lt => let (R1,S1) := MFactor P1 (vmon (j - i) M1) in (mkPX R1 i Q1, S1) | Gt => let (R1,S1) := MFactor P1 (mkZmon xH M1) in (mkPX R1 i Q1, mkPX S1 (i-j) (Pc cO)) end end. Definition POneSubst (P1: Pol) (M1: Mon) (P2: Pol): option Pol := let (Q1,R1) := MFactor P1 M1 in match R1 with (Pc c) => if c ?=! cO then None else Some (Padd Q1 (Pmul P2 R1)) | _ => Some (Padd Q1 (Pmul P2 R1)) end. Fixpoint PNSubst1 (P1: Pol) (M1: Mon) (P2: Pol) (n: nat) : Pol := match POneSubst P1 M1 P2 with Some P3 => match n with S n1 => PNSubst1 P3 M1 P2 n1 | _ => P3 end | _ => P1 end. Definition PNSubst (P1: Pol) (M1: Mon) (P2: Pol) (n: nat): option Pol := match POneSubst P1 M1 P2 with Some P3 => match n with S n1 => Some (PNSubst1 P3 M1 P2 n1) | _ => None end | _ => None end. Fixpoint PSubstL1 (P1: Pol) (LM1: list (Mon * Pol)) (n: nat) : Pol := match LM1 with cons (M1,P2) LM2 => PSubstL1 (PNSubst1 P1 M1 P2 n) LM2 n | _ => P1 end. Fixpoint PSubstL (P1: Pol) (LM1: list (Mon * Pol)) (n: nat) : option Pol := match LM1 with cons (M1,P2) LM2 => match PNSubst P1 M1 P2 n with Some P3 => Some (PSubstL1 P3 LM2 n) | None => PSubstL P1 LM2 n end | _ => None end. Fixpoint PNSubstL (P1: Pol) (LM1: list (Mon * Pol)) (m n: nat) : Pol := match PSubstL P1 LM1 n with Some P3 => match m with S m1 => PNSubstL P3 LM1 m1 n | _ => P3 end | _ => P1 end. (** Evaluation of a polynomial towards R *) Fixpoint Pphi(l:Env R) (P:Pol) : R := match P with | Pc c => [c] | Pinj j Q => Pphi (jump j l) Q | PX P i Q => Pphi l P * (hd l) ^ i + Pphi (tail l) Q end. Reserved Notation "P @ l " (at level 10, no associativity). Notation "P @ l " := (Pphi l P). (** Evaluation of a monomial towards R *) Fixpoint Mphi(l:Env R) (M: Mon) : R := match M with | mon0 => rI | zmon j M1 => Mphi (jump j l) M1 | vmon i M1 => Mphi (tail l) M1 * (hd l) ^ i end. Notation "M @@ l" := (Mphi l M) (at level 10, no associativity). (** Proofs *) Ltac destr_pos_sub := match goal with |- context [Z.pos_sub ?x ?y] => generalize (Z.pos_sub_discr x y); destruct (Z.pos_sub x y) end. Lemma Peq_ok P P' : (P ?== P') = true -> forall l, P@l == P'@ l. Proof. revert P';induction P;destruct P';simpl; intros H l; try easy. - now apply (morph_eq CRmorph). - destruct (Pos.compare_spec p p0); [ subst | easy | easy ]. now rewrite IHP. - specialize (IHP1 P'1); specialize (IHP2 P'2). destruct (Pos.compare_spec p p0); [ subst | easy | easy ]. destruct (P2 ?== P'1); [|easy]. rewrite H in *. now rewrite IHP1, IHP2. Qed. Lemma Peq_spec P P' : BoolSpec (forall l, P@l == P'@l) True (P ?== P'). Proof. generalize (Peq_ok P P'). destruct (P ?== P'); auto. Qed. Lemma Pphi0 l : P0@l == 0. Proof. simpl;apply (morph0 CRmorph). Qed. Lemma Pphi1 l : P1@l == 1. Proof. simpl;apply (morph1 CRmorph). Qed. Lemma env_morph p e1 e2 : (forall x, e1 x = e2 x) -> p @ e1 = p @ e2. Proof. revert e1 e2. induction p ; simpl. - reflexivity. - intros e1 e2 EQ. apply IHp. intros. apply EQ. - intros e1 e2 EQ. f_equal; [f_equal|]. + now apply IHp1. + f_equal. apply EQ. + apply IHp2. intros; apply EQ. Qed. Lemma Pjump_add P i j l : P @ (jump (i + j) l) = P @ (jump j (jump i l)). Proof. apply env_morph. intros. rewrite <- jump_add. f_equal. apply Pos.add_comm. Qed. Lemma Pjump_xO_tail P p l : P @ (jump (xO p) (tail l)) = P @ (jump (xI p) l). Proof. apply env_morph. intros. now jump_simpl. Qed. Lemma Pjump_pred_double P p l : P @ (jump (Pos.pred_double p) (tail l)) = P @ (jump (xO p) l). Proof. apply env_morph. intros. rewrite jump_pred_double. now jump_simpl. Qed. Lemma mkPinj_ok j l P : (mkPinj j P)@l == P@(jump j l). Proof. destruct P;simpl;rsimpl. now rewrite Pjump_add. Qed. Lemma pow_pos_add x i j : x^(j + i) == x^i * x^j. Proof. rewrite Pos.add_comm. apply (pow_pos_add Rsth Reqe.(Rmul_ext) ARth.(ARmul_assoc)). Qed. Lemma ceqb_spec c c' : BoolSpec ([c] == [c']) True (c ?=! c'). Proof. generalize (morph_eq CRmorph c c'). destruct (c ?=! c'); auto. Qed. Lemma mkPX_ok l P i Q : (mkPX P i Q)@l == P@l * (hd l)^i + Q@(tail l). Proof. unfold mkPX. destruct P. - case ceqb_spec; intros H; simpl; try reflexivity. rewrite H, (morph0 CRmorph), mkPinj_ok; rsimpl. - reflexivity. - case Peq_spec; intros H; simpl; try reflexivity. rewrite H, Pphi0, Pos.add_comm, pow_pos_add; rsimpl. Qed. Hint Rewrite Pphi0 Pphi1 mkPinj_ok mkPX_ok (morph0 CRmorph) (morph1 CRmorph) (morph0 CRmorph) (morph_add CRmorph) (morph_mul CRmorph) (morph_sub CRmorph) (morph_opp CRmorph) : Esimpl. (* Quicker than autorewrite with Esimpl :-) *) Ltac Esimpl := try rewrite_db Esimpl; rsimpl; simpl. Lemma PaddC_ok c P l : (PaddC P c)@l == P@l + [c]. Proof. revert l;induction P;simpl;intros;Esimpl;trivial. rewrite IHP2;rsimpl. Qed. Lemma PsubC_ok c P l : (PsubC P c)@l == P@l - [c]. Proof. revert l;induction P;simpl;intros. - Esimpl. - rewrite IHP;rsimpl. - rewrite IHP2;rsimpl. Qed. Lemma PmulC_aux_ok c P l : (PmulC_aux P c)@l == P@l * [c]. Proof. revert l;induction P;simpl;intros;Esimpl;trivial. rewrite IHP1, IHP2;rsimpl. add_permut. mul_permut. Qed. Lemma PmulC_ok c P l : (PmulC P c)@l == P@l * [c]. Proof. unfold PmulC. case ceqb_spec; intros H. - rewrite H; Esimpl. - case ceqb_spec; intros H'. + rewrite H'; Esimpl. + apply PmulC_aux_ok. Qed. Lemma Popp_ok P l : (--P)@l == - P@l. Proof. revert l;induction P;simpl;intros. - Esimpl. - apply IHP. - rewrite IHP1, IHP2;rsimpl. Qed. Hint Rewrite PaddC_ok PsubC_ok PmulC_ok Popp_ok : Esimpl. Lemma PaddX_ok P' P k l : (forall P l, (P++P')@l == P@l + P'@l) -> (PaddX Padd P' k P) @ l == P@l + P'@l * (hd l)^k. Proof. intros IHP'. revert k l. induction P;simpl;intros. - add_permut. - destruct p; simpl; rewrite ?Pjump_xO_tail, ?Pjump_pred_double; add_permut. - destr_pos_sub; intros ->;Esimpl. + rewrite IHP';rsimpl. add_permut. + rewrite IHP', pow_pos_add;simpl;Esimpl. add_permut. + rewrite IHP1, pow_pos_add;rsimpl. add_permut. Qed. Lemma Padd_ok P' P l : (P ++ P')@l == P@l + P'@l. Proof. revert P l; induction P';simpl;intros;Esimpl. - revert p l; induction P;simpl;intros. + Esimpl; add_permut. + destr_pos_sub; intros ->;Esimpl. * now rewrite IHP'. * rewrite IHP';Esimpl. now rewrite Pjump_add. * rewrite IHP. now rewrite Pjump_add. + destruct p0;simpl. * rewrite IHP2;simpl. rsimpl. rewrite Pjump_xO_tail. Esimpl. * rewrite IHP2;simpl. rewrite Pjump_pred_double. rsimpl. * rewrite IHP'. rsimpl. - destruct P;simpl. + Esimpl. add_permut. + destruct p0;simpl;Esimpl; rewrite IHP'2; simpl. * rewrite Pjump_xO_tail. rsimpl. add_permut. * rewrite Pjump_pred_double. rsimpl. add_permut. * rsimpl. unfold tail. add_permut. + destr_pos_sub; intros ->; Esimpl. * rewrite IHP'1, IHP'2;rsimpl. add_permut. * rewrite IHP'1, IHP'2;simpl;Esimpl. rewrite pow_pos_add;rsimpl. add_permut. * rewrite PaddX_ok by trivial; rsimpl. rewrite IHP'2, pow_pos_add; rsimpl. add_permut. Qed. Lemma PsubX_ok P' P k l : (forall P l, (P--P')@l == P@l - P'@l) -> (PsubX Psub P' k P) @ l == P@l - P'@l * (hd l)^k. Proof. intros IHP'. revert k l. induction P;simpl;intros. - rewrite Popp_ok;rsimpl; add_permut. - destruct p; simpl; rewrite Popp_ok;rsimpl; rewrite ?Pjump_xO_tail, ?Pjump_pred_double; add_permut. - destr_pos_sub; intros ->; Esimpl. + rewrite IHP';rsimpl. add_permut. + rewrite IHP', pow_pos_add;simpl;Esimpl. add_permut. + rewrite IHP1, pow_pos_add;rsimpl. add_permut. Qed. Lemma Psub_ok P' P l : (P -- P')@l == P@l - P'@l. Proof. revert P l; induction P';simpl;intros;Esimpl. - revert p l; induction P;simpl;intros. + Esimpl; add_permut. + destr_pos_sub; intros ->;Esimpl. * rewrite IHP';rsimpl. * rewrite IHP';Esimpl. now rewrite Pjump_add. * rewrite IHP. now rewrite Pjump_add. + destruct p0;simpl. * rewrite IHP2;simpl. rsimpl. rewrite Pjump_xO_tail. Esimpl. * rewrite IHP2;simpl. rewrite Pjump_pred_double. rsimpl. * rewrite IHP'. rsimpl. - destruct P;simpl. + Esimpl; add_permut. + destruct p0;simpl;Esimpl; rewrite IHP'2; simpl. * rewrite Pjump_xO_tail. rsimpl. add_permut. * rewrite Pjump_pred_double. rsimpl. add_permut. * rsimpl. unfold tail. add_permut. + destr_pos_sub; intros ->; Esimpl. * rewrite IHP'1, IHP'2;rsimpl. add_permut. * rewrite IHP'1, IHP'2;simpl;Esimpl. rewrite pow_pos_add;rsimpl. add_permut. * rewrite PsubX_ok by trivial;rsimpl. rewrite IHP'2, pow_pos_add;rsimpl. add_permut. Qed. Lemma PmulI_ok P' : (forall P l, (Pmul P P') @ l == P @ l * P' @ l) -> forall P p l, (PmulI Pmul P' p P) @ l == P @ l * P' @ (jump p l). Proof. intros IHP'. induction P;simpl;intros. - Esimpl; mul_permut. - destr_pos_sub; intros ->;Esimpl. + now rewrite IHP'. + now rewrite IHP', Pjump_add. + now rewrite IHP, Pjump_add. - destruct p0;Esimpl; rewrite ?IHP1, ?IHP2; rsimpl. + rewrite Pjump_xO_tail. f_equiv. mul_permut. + rewrite Pjump_pred_double. f_equiv. mul_permut. + rewrite IHP'. f_equiv. mul_permut. Qed. Lemma Pmul_ok P P' l : (P**P')@l == P@l * P'@l. Proof. revert P l;induction P';simpl;intros. - apply PmulC_ok. - apply PmulI_ok;trivial. - destruct P. + rewrite (ARmul_comm ARth). Esimpl. + Esimpl. rewrite IHP'1;Esimpl. f_equiv. destruct p0;rewrite IHP'2;Esimpl. * now rewrite Pjump_xO_tail. * rewrite Pjump_pred_double; Esimpl. + rewrite Padd_ok, !mkPX_ok, Padd_ok, !mkPX_ok, !IHP'1, !IHP'2, PmulI_ok; trivial. simpl. Esimpl. unfold tail. add_permut; f_equiv; mul_permut. Qed. Lemma Psquare_ok P l : (Psquare P)@l == P@l * P@l. Proof. revert l;induction P;simpl;intros;Esimpl. - apply IHP. - rewrite Padd_ok, Pmul_ok;Esimpl. rewrite IHP1, IHP2. mul_push ((hd l)^p). now mul_push (P2@l). Qed. Lemma Mphi_morph M e1 e2 : (forall x, e1 x = e2 x) -> M @@ e1 = M @@ e2. Proof. revert e1 e2; induction M; simpl; intros e1 e2 EQ; trivial. - apply IHM. intros; apply EQ. - f_equal. * apply IHM. intros; apply EQ. * f_equal. apply EQ. Qed. Lemma Mjump_xO_tail M p l : M @@ (jump (xO p) (tail l)) = M @@ (jump (xI p) l). Proof. apply Mphi_morph. intros. now jump_simpl. Qed. Lemma Mjump_pred_double M p l : M @@ (jump (Pos.pred_double p) (tail l)) = M @@ (jump (xO p) l). Proof. apply Mphi_morph. intros. rewrite jump_pred_double. now jump_simpl. Qed. Lemma Mjump_add M i j l : M @@ (jump (i + j) l) = M @@ (jump j (jump i l)). Proof. apply Mphi_morph. intros. now rewrite <- jump_add, Pos.add_comm. Qed. Lemma mkZmon_ok M j l : (mkZmon j M) @@ l == (zmon j M) @@ l. Proof. destruct M; simpl; rsimpl. Qed. Lemma zmon_pred_ok M j l : (zmon_pred j M) @@ (tail l) == (zmon j M) @@ l. Proof. destruct j; simpl; rewrite ?mkZmon_ok; simpl; rsimpl. - now rewrite Mjump_xO_tail. - rewrite Mjump_pred_double; rsimpl. Qed. Lemma mkVmon_ok M i l : (mkVmon i M)@@l == M@@l * (hd l)^i. Proof. destruct M;simpl;intros;rsimpl. - rewrite zmon_pred_ok;simpl;rsimpl. - rewrite pow_pos_add;rsimpl. Qed. Ltac destr_mfactor R S := match goal with | H : context [MFactor ?P _] |- context [MFactor ?P ?M] => specialize (H M); destruct MFactor as (R,S) end. Lemma Mphi_ok P M l : let (Q,R) := MFactor P M in P@l == Q@l + M@@l * R@l. Proof. revert M l; induction P; destruct M; intros l; simpl; auto; Esimpl. - case Pos.compare_spec; intros He; simpl. * destr_mfactor R1 S1. now rewrite IHP, He, !mkPinj_ok. * destr_mfactor R1 S1. rewrite IHP; simpl. now rewrite !mkPinj_ok, <- Mjump_add, Pos.add_comm, Pos.sub_add. * Esimpl. - destr_mfactor R1 S1. destr_mfactor R2 S2. rewrite IHP1, IHP2, !mkPX_ok, zmon_pred_ok; simpl; rsimpl. add_permut. - case Pos.compare_spec; intros He; simpl; destr_mfactor R1 S1; rewrite ?He, IHP1, mkPX_ok, ?mkZmon_ok; simpl; rsimpl; unfold tail; add_permut; mul_permut. * rewrite <- pow_pos_add, Pos.add_comm, Pos.sub_add by trivial; rsimpl. * rewrite mkPX_ok. simpl. Esimpl. mul_permut. rewrite <- pow_pos_add, Pos.sub_add by trivial; rsimpl. Qed. Lemma POneSubst_ok P1 M1 P2 P3 l : POneSubst P1 M1 P2 = Some P3 -> M1@@l == P2@l -> P1@l == P3@l. Proof. unfold POneSubst. assert (H := Mphi_ok P1). destr_mfactor R1 S1. rewrite H; clear H. intros EQ EQ'. replace P3 with (R1 ++ P2 ** S1). - rewrite EQ', Padd_ok, Pmul_ok; rsimpl. - revert EQ. destruct S1; try now injection 1. case ceqb_spec; now inversion 2. Qed. Lemma PNSubst1_ok n P1 M1 P2 l : M1@@l == P2@l -> P1@l == (PNSubst1 P1 M1 P2 n)@l. Proof. revert P1. induction n; simpl; intros P1; generalize (POneSubst_ok P1 M1 P2); destruct POneSubst; intros; rewrite <- ?IHn; auto; reflexivity. Qed. Lemma PNSubst_ok n P1 M1 P2 l P3 : PNSubst P1 M1 P2 n = Some P3 -> M1@@l == P2@l -> P1@l == P3@l. Proof. unfold PNSubst. assert (H := POneSubst_ok P1 M1 P2); destruct POneSubst; try discriminate. destruct n; inversion_clear 1. intros. rewrite <- PNSubst1_ok; auto. Qed. Fixpoint MPcond (LM1: list (Mon * Pol)) (l: Env R) : Prop := match LM1 with | cons (M1,P2) LM2 => (M1@@l == P2@l) /\ MPcond LM2 l | _ => True end. Lemma PSubstL1_ok n LM1 P1 l : MPcond LM1 l -> P1@l == (PSubstL1 P1 LM1 n)@l. Proof. revert P1; induction LM1 as [|(M2,P2) LM2 IH]; simpl; intros. - reflexivity. - rewrite <- IH by intuition. now apply PNSubst1_ok. Qed. Lemma PSubstL_ok n LM1 P1 P2 l : PSubstL P1 LM1 n = Some P2 -> MPcond LM1 l -> P1@l == P2@l. Proof. revert P1. induction LM1 as [|(M2,P2') LM2 IH]; simpl; intros. - discriminate. - assert (H':=PNSubst_ok n P3 M2 P2'). destruct PNSubst. * injection H; intros <-. rewrite <- PSubstL1_ok; intuition. * now apply IH. Qed. Lemma PNSubstL_ok m n LM1 P1 l : MPcond LM1 l -> P1@l == (PNSubstL P1 LM1 m n)@l. Proof. revert LM1 P1. induction m; simpl; intros; assert (H' := PSubstL_ok n LM1 P2); destruct PSubstL; auto; try reflexivity. rewrite <- IHm; auto. Qed. (** Definition of polynomial expressions *) Inductive PExpr : Type := | PEc : C -> PExpr | PEX : positive -> PExpr | PEadd : PExpr -> PExpr -> PExpr | PEsub : PExpr -> PExpr -> PExpr | PEmul : PExpr -> PExpr -> PExpr | PEopp : PExpr -> PExpr | PEpow : PExpr -> N -> PExpr. (** evaluation of polynomial expressions towards R *) Definition mk_X j := mkPinj_pred j mkX. (** evaluation of polynomial expressions towards R *) Fixpoint PEeval (l:Env R) (pe:PExpr) : R := match pe with | PEc c => phi c | PEX j => nth j l | PEadd pe1 pe2 => (PEeval l pe1) + (PEeval l pe2) | PEsub pe1 pe2 => (PEeval l pe1) - (PEeval l pe2) | PEmul pe1 pe2 => (PEeval l pe1) * (PEeval l pe2) | PEopp pe1 => - (PEeval l pe1) | PEpow pe1 n => rpow (PEeval l pe1) (Cp_phi n) end. (** Correctness proofs *) Lemma mkX_ok p l : nth p l == (mk_X p) @ l. Proof. destruct p;simpl;intros;Esimpl;trivial. rewrite nth_spec ; auto. unfold hd. now rewrite <- nth_pred_double, nth_jump. Qed. Hint Rewrite Padd_ok Psub_ok : Esimpl. Section POWER. Variable subst_l : Pol -> Pol. Fixpoint Ppow_pos (res P:Pol) (p:positive) : Pol := match p with | xH => subst_l (res ** P) | xO p => Ppow_pos (Ppow_pos res P p) P p | xI p => subst_l ((Ppow_pos (Ppow_pos res P p) P p) ** P) end. Definition Ppow_N P n := match n with | N0 => P1 | Npos p => Ppow_pos P1 P p end. Lemma Ppow_pos_ok l : (forall P, subst_l P@l == P@l) -> forall res P p, (Ppow_pos res P p)@l == res@l * (pow_pos Pmul P p)@l. Proof. intros subst_l_ok res P p. revert res. induction p;simpl;intros; rewrite ?subst_l_ok, ?Pmul_ok, ?IHp; mul_permut. Qed. Lemma Ppow_N_ok l : (forall P, subst_l P@l == P@l) -> forall P n, (Ppow_N P n)@l == (pow_N P1 Pmul P n)@l. Proof. destruct n;simpl. - reflexivity. - rewrite Ppow_pos_ok by trivial. Esimpl. Qed. End POWER. (** Normalization and rewriting *) Section NORM_SUBST_REC. Variable n : nat. Variable lmp:list (Mon*Pol). Let subst_l P := PNSubstL P lmp n n. Let Pmul_subst P1 P2 := subst_l (Pmul P1 P2). Let Ppow_subst := Ppow_N subst_l. Fixpoint norm_aux (pe:PExpr) : Pol := match pe with | PEc c => Pc c | PEX j => mk_X j | PEadd (PEopp pe1) pe2 => Psub (norm_aux pe2) (norm_aux pe1) | PEadd pe1 (PEopp pe2) => Psub (norm_aux pe1) (norm_aux pe2) | PEadd pe1 pe2 => Padd (norm_aux pe1) (norm_aux pe2) | PEsub pe1 pe2 => Psub (norm_aux pe1) (norm_aux pe2) | PEmul pe1 pe2 => Pmul (norm_aux pe1) (norm_aux pe2) | PEopp pe1 => Popp (norm_aux pe1) | PEpow pe1 n => Ppow_N (fun p => p) (norm_aux pe1) n end. Definition norm_subst pe := subst_l (norm_aux pe). (** Internally, [norm_aux] is expanded in a large number of cases. To speed-up proofs, we use an alternative definition. *) Definition get_PEopp pe := match pe with | PEopp pe' => Some pe' | _ => None end. Lemma norm_aux_PEadd pe1 pe2 : norm_aux (PEadd pe1 pe2) = match get_PEopp pe1, get_PEopp pe2 with | Some pe1', _ => (norm_aux pe2) -- (norm_aux pe1') | None, Some pe2' => (norm_aux pe1) -- (norm_aux pe2') | None, None => (norm_aux pe1) ++ (norm_aux pe2) end. Proof. simpl (norm_aux (PEadd _ _)). destruct pe1; [ | | | | | reflexivity | ]; destruct pe2; simpl get_PEopp; reflexivity. Qed. Lemma norm_aux_PEopp pe : match get_PEopp pe with | Some pe' => norm_aux pe = -- (norm_aux pe') | None => True end. Proof. now destruct pe. Qed. Lemma norm_aux_spec l pe : PEeval l pe == (norm_aux pe)@l. Proof. intros. induction pe. - reflexivity. - apply mkX_ok. - simpl PEeval. rewrite IHpe1, IHpe2. assert (H1 := norm_aux_PEopp pe1). assert (H2 := norm_aux_PEopp pe2). rewrite norm_aux_PEadd. do 2 destruct get_PEopp; rewrite ?H1, ?H2; Esimpl; add_permut. - simpl. rewrite IHpe1, IHpe2. Esimpl. - simpl. rewrite IHpe1, IHpe2. now rewrite Pmul_ok. - simpl. rewrite IHpe. Esimpl. - simpl. rewrite Ppow_N_ok by reflexivity. rewrite pow_th.(rpow_pow_N). destruct n0; simpl; Esimpl. induction p;simpl; now rewrite ?IHp, ?IHpe, ?Pms_ok, ?Pmul_ok. Qed. End NORM_SUBST_REC. End MakeRingPol. coq-8.4pl2/plugins/micromega/coq_micromega.ml0000640000175000001440000017723612121620060020420 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* ,=,<>,<=,>=} pExpr2 * where pExpr1, pExpr2 are polynomial expressions (see Micromega). pExprs are * parametrized by 'cst, which is used as the type of constants. *) type 'cst atom = 'cst Micromega.formula (** * Micromega's encoding of formulas. * By order of appearance: boolean constants, variables, atoms, conjunctions, * disjunctions, negation, implication. *) type 'cst formula = | TT | FF | X of Term.constr | A of 'cst atom * tag * Term.constr | C of 'cst formula * 'cst formula | D of 'cst formula * 'cst formula | N of 'cst formula | I of 'cst formula * Names.identifier option * 'cst formula (** * Formula pretty-printer. *) let rec pp_formula o f = match f with | TT -> output_string o "tt" | FF -> output_string o "ff" | X c -> output_string o "X " | A(_,t,_) -> Printf.fprintf o "A(%a)" Tag.pp t | C(f1,f2) -> Printf.fprintf o "C(%a,%a)" pp_formula f1 pp_formula f2 | D(f1,f2) -> Printf.fprintf o "D(%a,%a)" pp_formula f1 pp_formula f2 | I(f1,n,f2) -> Printf.fprintf o "I(%a%s,%a)" pp_formula f1 (match n with | Some id -> Names.string_of_id id | None -> "") pp_formula f2 | N(f) -> Printf.fprintf o "N(%a)" pp_formula f let rec map_atoms fct f = match f with | TT -> TT | FF -> FF | X x -> X x | A (at,tg,cstr) -> A(fct at,tg,cstr) | C (f1,f2) -> C(map_atoms fct f1, map_atoms fct f2) | D (f1,f2) -> D(map_atoms fct f1, map_atoms fct f2) | N f -> N(map_atoms fct f) | I(f1,o,f2) -> I(map_atoms fct f1, o , map_atoms fct f2) (** * Collect the identifiers of a (string of) implications. Implication labels * are inherited from Coq/CoC's higher order dependent type constructor (Pi). *) let rec ids_of_formula f = match f with | I(f1,Some id,f2) -> id::(ids_of_formula f2) | _ -> [] (** * A clause is a list of (tagged) nFormulas. * nFormulas are normalized formulas, i.e., of the form: * cPol {=,<>,>,>=} 0 * with cPol compact polynomials (see the Pol inductive type in EnvRing.v). *) type 'cst clause = ('cst Micromega.nFormula * tag) list (** * A CNF is a list of clauses. *) type 'cst cnf = ('cst clause) list (** * True and False are empty cnfs and clauses. *) let tt : 'cst cnf = [] let ff : 'cst cnf = [ [] ] (** * A refinement of cnf with tags left out. This is an intermediary form * between the cnf tagged list representation ('cst cnf) used to solve psatz, * and the freeform formulas ('cst formula) that is retrieved from Coq. *) module Mc = Micromega type 'cst mc_cnf = ('cst Mc.nFormula) list list (** * From a freeform formula, build a cnf. * The parametric functions negate and normalize are theory-dependent, and * originate in micromega.ml (extracted, e.g. for rnegate, from RMicromega.v * and RingMicromega.v). *) type 'a tagged_option = T of tag list | S of 'a let cnf (negate: 'cst atom -> 'cst mc_cnf) (normalise:'cst atom -> 'cst mc_cnf) (unsat : 'cst Mc.nFormula -> bool) (deduce : 'cst Mc.nFormula -> 'cst Mc.nFormula -> 'cst Mc.nFormula option) (f:'cst formula) = let negate a t = List.map (fun cl -> List.map (fun x -> (x,t)) cl) (negate a) in let normalise a t = List.map (fun cl -> List.map (fun x -> (x,t)) cl) (normalise a) in let and_cnf x y = x @ y in let rec add_term t0 = function | [] -> (match deduce (fst t0) (fst t0) with | Some u -> if unsat u then T [snd t0] else S (t0::[]) | None -> S (t0::[])) | t'::cl0 -> (match deduce (fst t0) (fst t') with | Some u -> if unsat u then T [snd t0 ; snd t'] else (match add_term t0 cl0 with | S cl' -> S (t'::cl') | T l -> T l) | None -> (match add_term t0 cl0 with | S cl' -> S (t'::cl') | T l -> T l)) in let rec or_clause cl1 cl2 = match cl1 with | [] -> S cl2 | t0::cl -> (match add_term t0 cl2 with | S cl' -> or_clause cl cl' | T l -> T l) in let or_clause_cnf t f = List.fold_right (fun e (acc,tg) -> match or_clause t e with | S cl -> (cl :: acc,tg) | T l -> (acc,tg@l)) f ([],[]) in let rec or_cnf f f' = match f with | [] -> tt,[] | e :: rst -> let (rst_f',t) = or_cnf rst f' in let (e_f', t') = or_clause_cnf e f' in (rst_f' @ e_f', t @ t') in let rec xcnf (polarity : bool) f = match f with | TT -> if polarity then (tt,[]) else (ff,[]) | FF -> if polarity then (ff,[]) else (tt,[]) | X p -> if polarity then (ff,[]) else (ff,[]) | A(x,t,_) -> ((if polarity then normalise x t else negate x t),[]) | N(e) -> xcnf (not polarity) e | C(e1,e2) -> let e1,t1 = xcnf polarity e1 in let e2,t2 = xcnf polarity e2 in if polarity then and_cnf e1 e2, t1 @ t2 else let f',t' = or_cnf e1 e2 in (f', t1 @ t2 @ t') | D(e1,e2) -> let e1,t1 = xcnf polarity e1 in let e2,t2 = xcnf polarity e2 in if polarity then let f',t' = or_cnf e1 e2 in (f', t1 @ t2 @ t') else and_cnf e1 e2, t1 @ t2 | I(e1,_,e2) -> let e1 , t1 = (xcnf (not polarity) e1) in let e2 , t2 = (xcnf polarity e2) in if polarity then let f',t' = or_cnf e1 e2 in (f', t1 @ t2 @ t') else and_cnf e1 e2, t1 @ t2 in xcnf true f (** * MODULE: Ordered set of integers. *) module ISet = Set.Make(struct type t = int let compare : int -> int -> int = Pervasives.compare end) (** * Given a set of integers s={i0,...,iN} and a list m, return the list of * elements of m that are at position i0,...,iN. *) let selecti s m = let rec xselecti i m = match m with | [] -> [] | e::m -> if ISet.mem i s then e::(xselecti (i+1) m) else xselecti (i+1) m in xselecti 0 m (** * MODULE: Mapping of the Coq data-strustures into Caml and Caml extracted * code. This includes initializing Caml variables based on Coq terms, parsing * various Coq expressions into Caml, and dumping Caml expressions into Coq. * * Opened here and in csdpcert.ml. *) module M = struct open Coqlib open Term (** * Location of the Coq libraries. *) let logic_dir = ["Coq";"Logic";"Decidable"] let coq_modules = init_modules @ [logic_dir] @ arith_modules @ zarith_base_modules @ [ ["Coq";"Lists";"List"]; ["ZMicromega"]; ["Tauto"]; ["RingMicromega"]; ["EnvRing"]; ["Coq"; "micromega"; "ZMicromega"]; ["Coq"; "micromega"; "RMicromega"]; ["Coq" ; "micromega" ; "Tauto"]; ["Coq" ; "micromega" ; "RingMicromega"]; ["Coq" ; "micromega" ; "EnvRing"]; ["Coq";"QArith"; "QArith_base"]; ["Coq";"Reals" ; "Rdefinitions"]; ["Coq";"Reals" ; "Rpow_def"]; ["LRing_normalise"]] let bin_module = [["Coq";"Numbers";"BinNums"]] let r_modules = [["Coq";"Reals" ; "Rdefinitions"]; ["Coq";"Reals" ; "Rpow_def"] ; ] let z_modules = [["Coq";"ZArith";"BinInt"]] (** * Initialization : a large amount of Caml symbols are derived from * ZMicromega.v *) let init_constant = gen_constant_in_modules "ZMicromega" init_modules let constant = gen_constant_in_modules "ZMicromega" coq_modules let bin_constant = gen_constant_in_modules "ZMicromega" bin_module let r_constant = gen_constant_in_modules "ZMicromega" r_modules let z_constant = gen_constant_in_modules "ZMicromega" z_modules (* let constant = gen_constant_in_modules "Omicron" coq_modules *) let coq_and = lazy (init_constant "and") let coq_or = lazy (init_constant "or") let coq_not = lazy (init_constant "not") let coq_iff = lazy (init_constant "iff") let coq_True = lazy (init_constant "True") let coq_False = lazy (init_constant "False") let coq_cons = lazy (constant "cons") let coq_nil = lazy (constant "nil") let coq_list = lazy (constant "list") let coq_O = lazy (init_constant "O") let coq_S = lazy (init_constant "S") let coq_nat = lazy (init_constant "nat") let coq_N0 = lazy (bin_constant "N0") let coq_Npos = lazy (bin_constant "Npos") let coq_pair = lazy (init_constant "pair") let coq_None = lazy (init_constant "None") let coq_option = lazy (init_constant "option") let coq_positive = lazy (bin_constant "positive") let coq_xH = lazy (bin_constant "xH") let coq_xO = lazy (bin_constant "xO") let coq_xI = lazy (bin_constant "xI") let coq_Z = lazy (bin_constant "Z") let coq_ZERO = lazy (bin_constant "Z0") let coq_POS = lazy (bin_constant "Zpos") let coq_NEG = lazy (bin_constant "Zneg") let coq_Q = lazy (constant "Q") let coq_R = lazy (constant "R") let coq_Build_Witness = lazy (constant "Build_Witness") let coq_Qmake = lazy (constant "Qmake") let coq_Rcst = lazy (constant "Rcst") let coq_C0 = lazy (constant "C0") let coq_C1 = lazy (constant "C1") let coq_CQ = lazy (constant "CQ") let coq_CZ = lazy (constant "CZ") let coq_CPlus = lazy (constant "CPlus") let coq_CMinus = lazy (constant "CMinus") let coq_CMult = lazy (constant "CMult") let coq_CInv = lazy (constant "CInv") let coq_COpp = lazy (constant "COpp") let coq_R0 = lazy (constant "R0") let coq_R1 = lazy (constant "R1") let coq_proofTerm = lazy (constant "ZArithProof") let coq_doneProof = lazy (constant "DoneProof") let coq_ratProof = lazy (constant "RatProof") let coq_cutProof = lazy (constant "CutProof") let coq_enumProof = lazy (constant "EnumProof") let coq_Zgt = lazy (z_constant "Z.gt") let coq_Zge = lazy (z_constant "Z.ge") let coq_Zle = lazy (z_constant "Z.le") let coq_Zlt = lazy (z_constant "Z.lt") let coq_Eq = lazy (init_constant "eq") let coq_Zplus = lazy (z_constant "Z.add") let coq_Zminus = lazy (z_constant "Z.sub") let coq_Zopp = lazy (z_constant "Z.opp") let coq_Zmult = lazy (z_constant "Z.mul") let coq_Zpower = lazy (z_constant "Z.pow") let coq_Qgt = lazy (constant "Qgt") let coq_Qge = lazy (constant "Qge") let coq_Qle = lazy (constant "Qle") let coq_Qlt = lazy (constant "Qlt") let coq_Qeq = lazy (constant "Qeq") let coq_Qplus = lazy (constant "Qplus") let coq_Qminus = lazy (constant "Qminus") let coq_Qopp = lazy (constant "Qopp") let coq_Qmult = lazy (constant "Qmult") let coq_Qpower = lazy (constant "Qpower") let coq_Rgt = lazy (r_constant "Rgt") let coq_Rge = lazy (r_constant "Rge") let coq_Rle = lazy (r_constant "Rle") let coq_Rlt = lazy (r_constant "Rlt") let coq_Rplus = lazy (r_constant "Rplus") let coq_Rminus = lazy (r_constant "Rminus") let coq_Ropp = lazy (r_constant "Ropp") let coq_Rmult = lazy (r_constant "Rmult") let coq_Rdiv = lazy (r_constant "Rdiv") let coq_Rinv = lazy (r_constant "Rinv") let coq_Rpower = lazy (r_constant "pow") let coq_IQR = lazy (constant "IQR") let coq_IZR = lazy (constant "IZR") let coq_PEX = lazy (constant "PEX" ) let coq_PEc = lazy (constant"PEc") let coq_PEadd = lazy (constant "PEadd") let coq_PEopp = lazy (constant "PEopp") let coq_PEmul = lazy (constant "PEmul") let coq_PEsub = lazy (constant "PEsub") let coq_PEpow = lazy (constant "PEpow") let coq_PX = lazy (constant "PX" ) let coq_Pc = lazy (constant"Pc") let coq_Pinj = lazy (constant "Pinj") let coq_OpEq = lazy (constant "OpEq") let coq_OpNEq = lazy (constant "OpNEq") let coq_OpLe = lazy (constant "OpLe") let coq_OpLt = lazy (constant "OpLt") let coq_OpGe = lazy (constant "OpGe") let coq_OpGt = lazy (constant "OpGt") let coq_PsatzIn = lazy (constant "PsatzIn") let coq_PsatzSquare = lazy (constant "PsatzSquare") let coq_PsatzMulE = lazy (constant "PsatzMulE") let coq_PsatzMultC = lazy (constant "PsatzMulC") let coq_PsatzAdd = lazy (constant "PsatzAdd") let coq_PsatzC = lazy (constant "PsatzC") let coq_PsatzZ = lazy (constant "PsatzZ") let coq_coneMember = lazy (constant "coneMember") let coq_make_impl = lazy (gen_constant_in_modules "Zmicromega" [["Refl"]] "make_impl") let coq_make_conj = lazy (gen_constant_in_modules "Zmicromega" [["Refl"]] "make_conj") let coq_TT = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "TT") let coq_FF = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "FF") let coq_And = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "Cj") let coq_Or = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "D") let coq_Neg = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "N") let coq_Atom = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "A") let coq_X = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "X") let coq_Impl = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "I") let coq_Formula = lazy (gen_constant_in_modules "ZMicromega" [["Coq" ; "micromega" ; "Tauto"];["Tauto"]] "BFormula") (** * Initialization : a few Caml symbols are derived from other libraries; * QMicromega, ZArithRing, RingMicromega. *) let coq_QWitness = lazy (gen_constant_in_modules "QMicromega" [["Coq"; "micromega"; "QMicromega"]] "QWitness") let coq_ZWitness = lazy (gen_constant_in_modules "QMicromega" [["Coq"; "micromega"; "ZMicromega"]] "ZWitness") let coq_N_of_Z = lazy (gen_constant_in_modules "ZArithRing" [["Coq";"setoid_ring";"ZArithRing"]] "N_of_Z") let coq_Build = lazy (gen_constant_in_modules "RingMicromega" [["Coq" ; "micromega" ; "RingMicromega"] ; ["RingMicromega"] ] "Build_Formula") let coq_Cstr = lazy (gen_constant_in_modules "RingMicromega" [["Coq" ; "micromega" ; "RingMicromega"] ; ["RingMicromega"] ] "Formula") (** * Parsing and dumping : transformation functions between Caml and Coq * data-structures. * * dump_* functions go from Micromega to Coq terms * parse_* functions go from Coq to Micromega terms * pp_* functions pretty-print Coq terms. *) (* Error datastructures *) type parse_error = | Ukn | BadStr of string | BadNum of int | BadTerm of Term.constr | Msg of string | Goal of (Term.constr list ) * Term.constr * parse_error let string_of_error = function | Ukn -> "ukn" | BadStr s -> s | BadNum i -> string_of_int i | BadTerm _ -> "BadTerm" | Msg s -> s | Goal _ -> "Goal" exception ParseError (* A simple but useful getter function *) let get_left_construct term = match Term.kind_of_term term with | Term.Construct(_,i) -> (i,[| |]) | Term.App(l,rst) -> (match Term.kind_of_term l with | Term.Construct(_,i) -> (i,rst) | _ -> raise ParseError ) | _ -> raise ParseError (* Access the Micromega module *) (* parse/dump/print from numbers up to expressions and formulas *) let rec parse_nat term = let (i,c) = get_left_construct term in match i with | 1 -> Mc.O | 2 -> Mc.S (parse_nat (c.(0))) | i -> raise ParseError let pp_nat o n = Printf.fprintf o "%i" (CoqToCaml.nat n) let rec dump_nat x = match x with | Mc.O -> Lazy.force coq_O | Mc.S p -> Term.mkApp(Lazy.force coq_S,[| dump_nat p |]) let rec parse_positive term = let (i,c) = get_left_construct term in match i with | 1 -> Mc.XI (parse_positive c.(0)) | 2 -> Mc.XO (parse_positive c.(0)) | 3 -> Mc.XH | i -> raise ParseError let rec dump_positive x = match x with | Mc.XH -> Lazy.force coq_xH | Mc.XO p -> Term.mkApp(Lazy.force coq_xO,[| dump_positive p |]) | Mc.XI p -> Term.mkApp(Lazy.force coq_xI,[| dump_positive p |]) let pp_positive o x = Printf.fprintf o "%i" (CoqToCaml.positive x) let rec dump_n x = match x with | Mc.N0 -> Lazy.force coq_N0 | Mc.Npos p -> Term.mkApp(Lazy.force coq_Npos,[| dump_positive p|]) let rec dump_index x = match x with | Mc.XH -> Lazy.force coq_xH | Mc.XO p -> Term.mkApp(Lazy.force coq_xO,[| dump_index p |]) | Mc.XI p -> Term.mkApp(Lazy.force coq_xI,[| dump_index p |]) let pp_index o x = Printf.fprintf o "%i" (CoqToCaml.index x) let rec pp_n o x = output_string o (string_of_int (CoqToCaml.n x)) let dump_pair t1 t2 dump_t1 dump_t2 (x,y) = Term.mkApp(Lazy.force coq_pair,[| t1 ; t2 ; dump_t1 x ; dump_t2 y|]) let rec parse_z term = let (i,c) = get_left_construct term in match i with | 1 -> Mc.Z0 | 2 -> Mc.Zpos (parse_positive c.(0)) | 3 -> Mc.Zneg (parse_positive c.(0)) | i -> raise ParseError let dump_z x = match x with | Mc.Z0 ->Lazy.force coq_ZERO | Mc.Zpos p -> Term.mkApp(Lazy.force coq_POS,[| dump_positive p|]) | Mc.Zneg p -> Term.mkApp(Lazy.force coq_NEG,[| dump_positive p|]) let pp_z o x = Printf.fprintf o "%s" (Big_int.string_of_big_int (CoqToCaml.z_big_int x)) let dump_num bd1 = Term.mkApp(Lazy.force coq_Qmake, [|dump_z (CamlToCoq.bigint (numerator bd1)) ; dump_positive (CamlToCoq.positive_big_int (denominator bd1)) |]) let dump_q q = Term.mkApp(Lazy.force coq_Qmake, [| dump_z q.Micromega.qnum ; dump_positive q.Micromega.qden|]) let parse_q term = match Term.kind_of_term term with | Term.App(c, args) -> if c = Lazy.force coq_Qmake then {Mc.qnum = parse_z args.(0) ; Mc.qden = parse_positive args.(1) } else raise ParseError | _ -> raise ParseError let rec pp_Rcst o cst = match cst with | Mc.C0 -> output_string o "C0" | Mc.C1 -> output_string o "C1" | Mc.CQ q -> output_string o "CQ _" | Mc.CZ z -> pp_z o z | Mc.CPlus(x,y) -> Printf.fprintf o "(%a + %a)" pp_Rcst x pp_Rcst y | Mc.CMinus(x,y) -> Printf.fprintf o "(%a - %a)" pp_Rcst x pp_Rcst y | Mc.CMult(x,y) -> Printf.fprintf o "(%a * %a)" pp_Rcst x pp_Rcst y | Mc.CInv t -> Printf.fprintf o "(/ %a)" pp_Rcst t | Mc.COpp t -> Printf.fprintf o "(- %a)" pp_Rcst t let rec dump_Rcst cst = match cst with | Mc.C0 -> Lazy.force coq_C0 | Mc.C1 -> Lazy.force coq_C1 | Mc.CQ q -> Term.mkApp(Lazy.force coq_CQ, [| dump_q q |]) | Mc.CZ z -> Term.mkApp(Lazy.force coq_CZ, [| dump_z z |]) | Mc.CPlus(x,y) -> Term.mkApp(Lazy.force coq_CPlus, [| dump_Rcst x ; dump_Rcst y |]) | Mc.CMinus(x,y) -> Term.mkApp(Lazy.force coq_CMinus, [| dump_Rcst x ; dump_Rcst y |]) | Mc.CMult(x,y) -> Term.mkApp(Lazy.force coq_CMult, [| dump_Rcst x ; dump_Rcst y |]) | Mc.CInv t -> Term.mkApp(Lazy.force coq_CInv, [| dump_Rcst t |]) | Mc.COpp t -> Term.mkApp(Lazy.force coq_COpp, [| dump_Rcst t |]) let rec parse_Rcst term = let (i,c) = get_left_construct term in match i with | 1 -> Mc.C0 | 2 -> Mc.C1 | 3 -> Mc.CQ (parse_q c.(0)) | 4 -> Mc.CPlus(parse_Rcst c.(0), parse_Rcst c.(1)) | 5 -> Mc.CMinus(parse_Rcst c.(0), parse_Rcst c.(1)) | 6 -> Mc.CMult(parse_Rcst c.(0), parse_Rcst c.(1)) | 7 -> Mc.CInv(parse_Rcst c.(0)) | 8 -> Mc.COpp(parse_Rcst c.(0)) | _ -> raise ParseError let rec parse_list parse_elt term = let (i,c) = get_left_construct term in match i with | 1 -> [] | 2 -> parse_elt c.(1) :: parse_list parse_elt c.(2) | i -> raise ParseError let rec dump_list typ dump_elt l = match l with | [] -> Term.mkApp(Lazy.force coq_nil,[| typ |]) | e :: l -> Term.mkApp(Lazy.force coq_cons, [| typ; dump_elt e;dump_list typ dump_elt l|]) let pp_list op cl elt o l = let rec _pp o l = match l with | [] -> () | [e] -> Printf.fprintf o "%a" elt e | e::l -> Printf.fprintf o "%a ,%a" elt e _pp l in Printf.fprintf o "%s%a%s" op _pp l cl let pp_var = pp_positive let dump_var = dump_positive let pp_expr pp_z o e = let rec pp_expr o e = match e with | Mc.PEX n -> Printf.fprintf o "V %a" pp_var n | Mc.PEc z -> pp_z o z | Mc.PEadd(e1,e2) -> Printf.fprintf o "(%a)+(%a)" pp_expr e1 pp_expr e2 | Mc.PEmul(e1,e2) -> Printf.fprintf o "%a*(%a)" pp_expr e1 pp_expr e2 | Mc.PEopp e -> Printf.fprintf o "-(%a)" pp_expr e | Mc.PEsub(e1,e2) -> Printf.fprintf o "(%a)-(%a)" pp_expr e1 pp_expr e2 | Mc.PEpow(e,n) -> Printf.fprintf o "(%a)^(%a)" pp_expr e pp_n n in pp_expr o e let dump_expr typ dump_z e = let rec dump_expr e = match e with | Mc.PEX n -> mkApp(Lazy.force coq_PEX,[| typ; dump_var n |]) | Mc.PEc z -> mkApp(Lazy.force coq_PEc,[| typ ; dump_z z |]) | Mc.PEadd(e1,e2) -> mkApp(Lazy.force coq_PEadd, [| typ; dump_expr e1;dump_expr e2|]) | Mc.PEsub(e1,e2) -> mkApp(Lazy.force coq_PEsub, [| typ; dump_expr e1;dump_expr e2|]) | Mc.PEopp e -> mkApp(Lazy.force coq_PEopp, [| typ; dump_expr e|]) | Mc.PEmul(e1,e2) -> mkApp(Lazy.force coq_PEmul, [| typ; dump_expr e1;dump_expr e2|]) | Mc.PEpow(e,n) -> mkApp(Lazy.force coq_PEpow, [| typ; dump_expr e; dump_n n|]) in dump_expr e let dump_pol typ dump_c e = let rec dump_pol e = match e with | Mc.Pc n -> mkApp(Lazy.force coq_Pc, [|typ ; dump_c n|]) | Mc.Pinj(p,pol) -> mkApp(Lazy.force coq_Pinj , [| typ ; dump_positive p ; dump_pol pol|]) | Mc.PX(pol1,p,pol2) -> mkApp(Lazy.force coq_PX, [| typ ; dump_pol pol1 ; dump_positive p ; dump_pol pol2|]) in dump_pol e let pp_pol pp_c o e = let rec pp_pol o e = match e with | Mc.Pc n -> Printf.fprintf o "Pc %a" pp_c n | Mc.Pinj(p,pol) -> Printf.fprintf o "Pinj(%a,%a)" pp_positive p pp_pol pol | Mc.PX(pol1,p,pol2) -> Printf.fprintf o "PX(%a,%a,%a)" pp_pol pol1 pp_positive p pp_pol pol2 in pp_pol o e let pp_cnf pp_c o f = let pp_clause o l = List.iter (fun ((p,_),t) -> Printf.fprintf o "(%a @%a)" (pp_pol pp_c) p Tag.pp t) l in List.iter (fun l -> Printf.fprintf o "[%a]" pp_clause l) f let dump_psatz typ dump_z e = let z = Lazy.force typ in let rec dump_cone e = match e with | Mc.PsatzIn n -> mkApp(Lazy.force coq_PsatzIn,[| z; dump_nat n |]) | Mc.PsatzMulC(e,c) -> mkApp(Lazy.force coq_PsatzMultC, [| z; dump_pol z dump_z e ; dump_cone c |]) | Mc.PsatzSquare e -> mkApp(Lazy.force coq_PsatzSquare, [| z;dump_pol z dump_z e|]) | Mc.PsatzAdd(e1,e2) -> mkApp(Lazy.force coq_PsatzAdd, [| z; dump_cone e1; dump_cone e2|]) | Mc.PsatzMulE(e1,e2) -> mkApp(Lazy.force coq_PsatzMulE, [| z; dump_cone e1; dump_cone e2|]) | Mc.PsatzC p -> mkApp(Lazy.force coq_PsatzC,[| z; dump_z p|]) | Mc.PsatzZ -> mkApp( Lazy.force coq_PsatzZ,[| z|]) in dump_cone e let pp_psatz pp_z o e = let rec pp_cone o e = match e with | Mc.PsatzIn n -> Printf.fprintf o "(In %a)%%nat" pp_nat n | Mc.PsatzMulC(e,c) -> Printf.fprintf o "( %a [*] %a)" (pp_pol pp_z) e pp_cone c | Mc.PsatzSquare e -> Printf.fprintf o "(%a^2)" (pp_pol pp_z) e | Mc.PsatzAdd(e1,e2) -> Printf.fprintf o "(%a [+] %a)" pp_cone e1 pp_cone e2 | Mc.PsatzMulE(e1,e2) -> Printf.fprintf o "(%a [*] %a)" pp_cone e1 pp_cone e2 | Mc.PsatzC p -> Printf.fprintf o "(%a)%%positive" pp_z p | Mc.PsatzZ -> Printf.fprintf o "0" in pp_cone o e let rec dump_op = function | Mc.OpEq-> Lazy.force coq_OpEq | Mc.OpNEq-> Lazy.force coq_OpNEq | Mc.OpLe -> Lazy.force coq_OpLe | Mc.OpGe -> Lazy.force coq_OpGe | Mc.OpGt-> Lazy.force coq_OpGt | Mc.OpLt-> Lazy.force coq_OpLt let pp_op o e= match e with | Mc.OpEq-> Printf.fprintf o "=" | Mc.OpNEq-> Printf.fprintf o "<>" | Mc.OpLe -> Printf.fprintf o "=<" | Mc.OpGe -> Printf.fprintf o ">=" | Mc.OpGt-> Printf.fprintf o ">" | Mc.OpLt-> Printf.fprintf o "<" let pp_cstr pp_z o {Mc.flhs = l ; Mc.fop = op ; Mc.frhs = r } = Printf.fprintf o"(%a %a %a)" (pp_expr pp_z) l pp_op op (pp_expr pp_z) r let dump_cstr typ dump_constant {Mc.flhs = e1 ; Mc.fop = o ; Mc.frhs = e2} = Term.mkApp(Lazy.force coq_Build, [| typ; dump_expr typ dump_constant e1 ; dump_op o ; dump_expr typ dump_constant e2|]) let assoc_const x l = try snd (List.find (fun (x',y) -> x = Lazy.force x') l) with Not_found -> raise ParseError let zop_table = [ coq_Zgt, Mc.OpGt ; coq_Zge, Mc.OpGe ; coq_Zlt, Mc.OpLt ; coq_Zle, Mc.OpLe ] let rop_table = [ coq_Rgt, Mc.OpGt ; coq_Rge, Mc.OpGe ; coq_Rlt, Mc.OpLt ; coq_Rle, Mc.OpLe ] let qop_table = [ coq_Qlt, Mc.OpLt ; coq_Qle, Mc.OpLe ; coq_Qeq, Mc.OpEq ] let parse_zop (op,args) = match kind_of_term op with | Const x -> (assoc_const op zop_table, args.(0) , args.(1)) | Ind(n,0) -> if op = Lazy.force coq_Eq && args.(0) = Lazy.force coq_Z then (Mc.OpEq, args.(1), args.(2)) else raise ParseError | _ -> failwith "parse_zop" let parse_rop (op,args) = match kind_of_term op with | Const x -> (assoc_const op rop_table, args.(0) , args.(1)) | Ind(n,0) -> if op = Lazy.force coq_Eq && args.(0) = Lazy.force coq_R then (Mc.OpEq, args.(1), args.(2)) else raise ParseError | _ -> failwith "parse_zop" let parse_qop (op,args) = (assoc_const op qop_table, args.(0) , args.(1)) let is_constant t = (* This is an approx *) match kind_of_term t with | Construct(i,_) -> true | _ -> false type 'a op = | Binop of ('a Mc.pExpr -> 'a Mc.pExpr -> 'a Mc.pExpr) | Opp | Power | Ukn of string let assoc_ops x l = try snd (List.find (fun (x',y) -> x = Lazy.force x') l) with Not_found -> Ukn "Oups" (** * MODULE: Env is for environment. *) module Env = struct type t = constr list let compute_rank_add env v = let rec _add env n v = match env with | [] -> ([v],n) | e::l -> if eq_constr e v then (env,n) else let (env,n) = _add l ( n+1) v in (e::env,n) in let (env, n) = _add env 1 v in (env, CamlToCoq.idx n) let empty = [] let elements env = env end (* MODULE END: Env *) (** * This is the big generic function for expression parsers. *) let parse_expr parse_constant parse_exp ops_spec env term = if debug then (Pp.pp (Pp.str "parse_expr: "); Pp.pp (Printer.prterm term); Pp.pp (Pp.str "\n"); Pp.pp_flush ()); (* let constant_or_variable env term = try ( Mc.PEc (parse_constant term) , env) with ParseError -> let (env,n) = Env.compute_rank_add env term in (Mc.PEX n , env) in *) let parse_variable env term = let (env,n) = Env.compute_rank_add env term in (Mc.PEX n , env) in let rec parse_expr env term = let combine env op (t1,t2) = let (expr1,env) = parse_expr env t1 in let (expr2,env) = parse_expr env t2 in (op expr1 expr2,env) in try (Mc.PEc (parse_constant term) , env) with ParseError -> match kind_of_term term with | App(t,args) -> ( match kind_of_term t with | Const c -> ( match assoc_ops t ops_spec with | Binop f -> combine env f (args.(0),args.(1)) | Opp -> let (expr,env) = parse_expr env args.(0) in (Mc.PEopp expr, env) | Power -> begin try let (expr,env) = parse_expr env args.(0) in let power = (parse_exp expr args.(1)) in (power , env) with e when e <> Sys.Break -> (* if the exponent is a variable *) let (env,n) = Env.compute_rank_add env term in (Mc.PEX n, env) end | Ukn s -> if debug then (Printf.printf "unknown op: %s\n" s; flush stdout;); let (env,n) = Env.compute_rank_add env term in (Mc.PEX n, env) ) | _ -> parse_variable env term ) | _ -> parse_variable env term in parse_expr env term let zop_spec = [ coq_Zplus , Binop (fun x y -> Mc.PEadd(x,y)) ; coq_Zminus , Binop (fun x y -> Mc.PEsub(x,y)) ; coq_Zmult , Binop (fun x y -> Mc.PEmul (x,y)) ; coq_Zopp , Opp ; coq_Zpower , Power] let qop_spec = [ coq_Qplus , Binop (fun x y -> Mc.PEadd(x,y)) ; coq_Qminus , Binop (fun x y -> Mc.PEsub(x,y)) ; coq_Qmult , Binop (fun x y -> Mc.PEmul (x,y)) ; coq_Qopp , Opp ; coq_Qpower , Power] let rop_spec = [ coq_Rplus , Binop (fun x y -> Mc.PEadd(x,y)) ; coq_Rminus , Binop (fun x y -> Mc.PEsub(x,y)) ; coq_Rmult , Binop (fun x y -> Mc.PEmul (x,y)) ; coq_Ropp , Opp ; coq_Rpower , Power] let zconstant = parse_z let qconstant = parse_q let rconst_assoc = [ coq_Rplus , (fun x y -> Mc.CPlus(x,y)) ; coq_Rminus , (fun x y -> Mc.CMinus(x,y)) ; coq_Rmult , (fun x y -> Mc.CMult(x,y)) ; coq_Rdiv , (fun x y -> Mc.CMult(x,Mc.CInv y)) ; ] let rec rconstant term = match Term.kind_of_term term with | Const x -> if term = Lazy.force coq_R0 then Mc.C0 else if term = Lazy.force coq_R1 then Mc.C1 else raise ParseError | App(op,args) -> begin try (* the evaluation order is important in the following *) let f = assoc_const op rconst_assoc in let a = rconstant args.(0) in let b = rconstant args.(1) in f a b with ParseError -> match op with | op when op = Lazy.force coq_Rinv -> Mc.CInv(rconstant args.(0)) | op when op = Lazy.force coq_IQR -> Mc.CQ (parse_q args.(0)) (* | op when op = Lazy.force coq_IZR -> Mc.CZ (parse_z args.(0))*) | _ -> raise ParseError end | _ -> raise ParseError let rconstant term = if debug then (Pp.pp_flush (); Pp.pp (Pp.str "rconstant: "); Pp.pp (Printer.prterm term); Pp.pp (Pp.str "\n"); Pp.pp_flush ()); let res = rconstant term in if debug then (Printf.printf "rconstant -> %a\n" pp_Rcst res ; flush stdout) ; res let parse_zexpr = parse_expr zconstant (fun expr x -> let exp = (parse_z x) in match exp with | Mc.Zneg _ -> Mc.PEc Mc.Z0 | _ -> Mc.PEpow(expr, Mc.Z.to_N exp)) zop_spec let parse_qexpr = parse_expr qconstant (fun expr x -> let exp = parse_z x in match exp with | Mc.Zneg _ -> begin match expr with | Mc.PEc q -> Mc.PEc (Mc.qpower q exp) | _ -> print_string "parse_qexpr parse error" ; flush stdout ; raise ParseError end | _ -> let exp = Mc.Z.to_N exp in Mc.PEpow(expr,exp)) qop_spec let parse_rexpr = parse_expr rconstant (fun expr x -> let exp = Mc.N.of_nat (parse_nat x) in Mc.PEpow(expr,exp)) rop_spec let parse_arith parse_op parse_expr env cstr = if debug then (Pp.pp_flush (); Pp.pp (Pp.str "parse_arith: "); Pp.pp (Printer.prterm cstr); Pp.pp (Pp.str "\n"); Pp.pp_flush ()); match kind_of_term cstr with | App(op,args) -> let (op,lhs,rhs) = parse_op (op,args) in let (e1,env) = parse_expr env lhs in let (e2,env) = parse_expr env rhs in ({Mc.flhs = e1; Mc.fop = op;Mc.frhs = e2},env) | _ -> failwith "error : parse_arith(2)" let parse_zarith = parse_arith parse_zop parse_zexpr let parse_qarith = parse_arith parse_qop parse_qexpr let parse_rarith = parse_arith parse_rop parse_rexpr (* generic parsing of arithmetic expressions *) let rec f2f = function | TT -> Mc.TT | FF -> Mc.FF | X _ -> Mc.X | A (x,_,_) -> Mc.A x | C (a,b) -> Mc.Cj(f2f a,f2f b) | D (a,b) -> Mc.D(f2f a,f2f b) | N (a) -> Mc.N(f2f a) | I(a,_,b) -> Mc.I(f2f a,f2f b) let is_prop t = match t with | Names.Anonymous -> true (* Not quite right *) | Names.Name x -> false let mkC f1 f2 = C(f1,f2) let mkD f1 f2 = D(f1,f2) let mkIff f1 f2 = C(I(f1,None,f2),I(f2,None,f1)) let mkI f1 f2 = I(f1,None,f2) let mkformula_binary g term f1 f2 = match f1 , f2 with | X _ , X _ -> X(term) | _ -> g f1 f2 (** * This is the big generic function for formula parsers. *) let parse_formula parse_atom env tg term = let parse_atom env tg t = try let (at,env) = parse_atom env t in (A(at,tg,t), env,Tag.next tg) with e when e <> Sys.Break -> (X(t),env,tg) in let rec xparse_formula env tg term = match kind_of_term term with | App(l,rst) -> (match rst with | [|a;b|] when eq_constr l (Lazy.force coq_and) -> let f,env,tg = xparse_formula env tg a in let g,env, tg = xparse_formula env tg b in mkformula_binary mkC term f g,env,tg | [|a;b|] when eq_constr l (Lazy.force coq_or) -> let f,env,tg = xparse_formula env tg a in let g,env,tg = xparse_formula env tg b in mkformula_binary mkD term f g,env,tg | [|a|] when eq_constr l (Lazy.force coq_not) -> let (f,env,tg) = xparse_formula env tg a in (N(f), env,tg) | [|a;b|] when eq_constr l (Lazy.force coq_iff) -> let f,env,tg = xparse_formula env tg a in let g,env,tg = xparse_formula env tg b in mkformula_binary mkIff term f g,env,tg | _ -> parse_atom env tg term) | Prod(typ,a,b) when not (Termops.dependent (mkRel 1) b) -> let f,env,tg = xparse_formula env tg a in let g,env,tg = xparse_formula env tg b in mkformula_binary mkI term f g,env,tg | _ when eq_constr term (Lazy.force coq_True) -> (TT,env,tg) | _ when eq_constr term (Lazy.force coq_False) -> (FF,env,tg) | _ -> X(term),env,tg in xparse_formula env tg ((*Reductionops.whd_zeta*) term) let dump_formula typ dump_atom f = let rec xdump f = match f with | TT -> mkApp(Lazy.force coq_TT,[|typ|]) | FF -> mkApp(Lazy.force coq_FF,[|typ|]) | C(x,y) -> mkApp(Lazy.force coq_And,[|typ ; xdump x ; xdump y|]) | D(x,y) -> mkApp(Lazy.force coq_Or,[|typ ; xdump x ; xdump y|]) | I(x,_,y) -> mkApp(Lazy.force coq_Impl,[|typ ; xdump x ; xdump y|]) | N(x) -> mkApp(Lazy.force coq_Neg,[|typ ; xdump x|]) | A(x,_,_) -> mkApp(Lazy.force coq_Atom,[|typ ; dump_atom x|]) | X(t) -> mkApp(Lazy.force coq_X,[|typ ; t|]) in xdump f (** * Given a conclusion and a list of affectations, rebuild a term prefixed by * the appropriate letins. * TODO: reverse the list of bindings! *) let set l concl = let rec xset acc = function | [] -> acc | (e::l) -> let (name,expr,typ) = e in xset (Term.mkNamedLetIn (Names.id_of_string name) expr typ acc) l in xset concl l end (** * MODULE END: M *) open M let rec sig_of_cone = function | Mc.PsatzIn n -> [CoqToCaml.nat n] | Mc.PsatzMulE(w1,w2) -> (sig_of_cone w1)@(sig_of_cone w2) | Mc.PsatzMulC(w1,w2) -> (sig_of_cone w2) | Mc.PsatzAdd(w1,w2) -> (sig_of_cone w1)@(sig_of_cone w2) | _ -> [] let same_proof sg cl1 cl2 = let rec xsame_proof sg = match sg with | [] -> true | n::sg -> (try List.nth cl1 n = List.nth cl2 n with e when e <> Sys.Break -> false) && (xsame_proof sg ) in xsame_proof sg let tags_of_clause tgs wit clause = let rec xtags tgs = function | Mc.PsatzIn n -> Names.Idset.union tgs (snd (List.nth clause (CoqToCaml.nat n) )) | Mc.PsatzMulC(e,w) -> xtags tgs w | Mc.PsatzMulE (w1,w2) | Mc.PsatzAdd(w1,w2) -> xtags (xtags tgs w1) w2 | _ -> tgs in xtags tgs wit (*let tags_of_cnf wits cnf = List.fold_left2 (fun acc w cl -> tags_of_clause acc w cl) Names.Idset.empty wits cnf *) let find_witness prover polys1 = try_any prover polys1 let rec witness prover l1 l2 = match l2 with | [] -> Some [] | e :: l2 -> match find_witness prover (e::l1) with | None -> None | Some w -> (match witness prover l1 l2 with | None -> None | Some l -> Some (w::l) ) let rec apply_ids t ids = match ids with | [] -> t | i::ids -> apply_ids (Term.mkApp(t,[| Term.mkVar i |])) ids let coq_Node = lazy (Coqlib.gen_constant_in_modules "VarMap" [["Coq" ; "micromega" ; "VarMap"];["VarMap"]] "Node") let coq_Leaf = lazy (Coqlib.gen_constant_in_modules "VarMap" [["Coq" ; "micromega" ; "VarMap"];["VarMap"]] "Leaf") let coq_Empty = lazy (Coqlib.gen_constant_in_modules "VarMap" [["Coq" ; "micromega" ;"VarMap"];["VarMap"]] "Empty") let btree_of_array typ a = let size_of_a = Array.length a in let semi_size_of_a = size_of_a lsr 1 in let node = Lazy.force coq_Node and leaf = Lazy.force coq_Leaf and empty = Term.mkApp (Lazy.force coq_Empty, [| typ |]) in let rec aux n = if n > size_of_a then empty else if n > semi_size_of_a then Term.mkApp (leaf, [| typ; a.(n-1) |]) else Term.mkApp (node, [| typ; aux (2*n); a.(n-1); aux (2*n+1) |]) in aux 1 let btree_of_array typ a = try btree_of_array typ a with x when x <> Sys.Break -> failwith (Printf.sprintf "btree of array : %s" (Printexc.to_string x)) let dump_varmap typ env = btree_of_array typ (Array.of_list env) let rec pp_varmap o vm = match vm with | Mc.Empty -> output_string o "[]" | Mc.Leaf z -> Printf.fprintf o "[%a]" pp_z z | Mc.Node(l,z,r) -> Printf.fprintf o "[%a, %a, %a]" pp_varmap l pp_z z pp_varmap r let rec dump_proof_term = function | Micromega.DoneProof -> Lazy.force coq_doneProof | Micromega.RatProof(cone,rst) -> Term.mkApp(Lazy.force coq_ratProof, [| dump_psatz coq_Z dump_z cone; dump_proof_term rst|]) | Micromega.CutProof(cone,prf) -> Term.mkApp(Lazy.force coq_cutProof, [| dump_psatz coq_Z dump_z cone ; dump_proof_term prf|]) | Micromega.EnumProof(c1,c2,prfs) -> Term.mkApp (Lazy.force coq_enumProof, [| dump_psatz coq_Z dump_z c1 ; dump_psatz coq_Z dump_z c2 ; dump_list (Lazy.force coq_proofTerm) dump_proof_term prfs |]) let rec size_of_psatz = function | Micromega.PsatzIn _ -> 1 | Micromega.PsatzSquare _ -> 1 | Micromega.PsatzMulC(_,p) -> 1 + (size_of_psatz p) | Micromega.PsatzMulE(p1,p2) | Micromega.PsatzAdd(p1,p2) -> size_of_psatz p1 + size_of_psatz p2 | Micromega.PsatzC _ -> 1 | Micromega.PsatzZ -> 1 let rec size_of_pf = function | Micromega.DoneProof -> 1 | Micromega.RatProof(p,a) -> (size_of_pf a) + (size_of_psatz p) | Micromega.CutProof(p,a) -> (size_of_pf a) + (size_of_psatz p) | Micromega.EnumProof(p1,p2,l) -> (size_of_psatz p1) + (size_of_psatz p2) + (List.fold_left (fun acc p -> size_of_pf p + acc) 0 l) let dump_proof_term t = if debug then Printf.printf "dump_proof_term %i\n" (size_of_pf t) ; dump_proof_term t let pp_q o q = Printf.fprintf o "%a/%a" pp_z q.Micromega.qnum pp_positive q.Micromega.qden let rec pp_proof_term o = function | Micromega.DoneProof -> Printf.fprintf o "D" | Micromega.RatProof(cone,rst) -> Printf.fprintf o "R[%a,%a]" (pp_psatz pp_z) cone pp_proof_term rst | Micromega.CutProof(cone,rst) -> Printf.fprintf o "C[%a,%a]" (pp_psatz pp_z) cone pp_proof_term rst | Micromega.EnumProof(c1,c2,rst) -> Printf.fprintf o "EP[%a,%a,%a]" (pp_psatz pp_z) c1 (pp_psatz pp_z) c2 (pp_list "[" "]" pp_proof_term) rst let rec parse_hyps parse_arith env tg hyps = match hyps with | [] -> ([],env,tg) | (i,t)::l -> let (lhyps,env,tg) = parse_hyps parse_arith env tg l in try let (c,env,tg) = parse_formula parse_arith env tg t in ((i,c)::lhyps, env,tg) with e when e <> Sys.Break -> (lhyps,env,tg) (*(if debug then Printf.printf "parse_arith : %s\n" x);*) (*exception ParseError*) let parse_goal parse_arith env hyps term = (* try*) let (f,env,tg) = parse_formula parse_arith env (Tag.from 0) term in let (lhyps,env,tg) = parse_hyps parse_arith env tg hyps in (lhyps,f,env) (* with Failure x -> raise ParseError*) (** * The datastructures that aggregate theory-dependent proof values. *) type ('synt_c, 'prf) domain_spec = { typ : Term.constr; (* is the type of the interpretation domain - Z, Q, R*) coeff : Term.constr ; (* is the type of the syntactic coeffs - Z , Q , Rcst *) dump_coeff : 'synt_c -> Term.constr ; proof_typ : Term.constr ; dump_proof : 'prf -> Term.constr } let zz_domain_spec = lazy { typ = Lazy.force coq_Z; coeff = Lazy.force coq_Z; dump_coeff = dump_z ; proof_typ = Lazy.force coq_proofTerm ; dump_proof = dump_proof_term } let qq_domain_spec = lazy { typ = Lazy.force coq_Q; coeff = Lazy.force coq_Q; dump_coeff = dump_q ; proof_typ = Lazy.force coq_QWitness ; dump_proof = dump_psatz coq_Q dump_q } let rcst_domain_spec = lazy { typ = Lazy.force coq_R; coeff = Lazy.force coq_Rcst; dump_coeff = dump_Rcst; proof_typ = Lazy.force coq_QWitness ; dump_proof = dump_psatz coq_Q dump_q } (** * Instanciate the current Coq goal with a Micromega formula, a varmap, and a * witness. *) let micromega_order_change spec cert cert_typ env ff gl = let formula_typ = (Term.mkApp (Lazy.force coq_Cstr,[|spec.coeff|])) in let ff = dump_formula formula_typ (dump_cstr spec.coeff spec.dump_coeff) ff in let vm = dump_varmap (spec.typ) env in Tactics.change_in_concl None (set [ ("__ff", ff, Term.mkApp(Lazy.force coq_Formula, [|formula_typ |])); ("__varmap", vm, Term.mkApp (Coqlib.gen_constant_in_modules "VarMap" [["Coq" ; "micromega" ; "VarMap"] ; ["VarMap"]] "t", [|spec.typ|])); ("__wit", cert, cert_typ) ] (Tacmach.pf_concl gl) ) gl (** * The datastructures that aggregate prover attributes. *) type ('a,'prf) prover = { name : string ; (* name of the prover *) prover : 'a list -> 'prf option ; (* the prover itself *) hyps : 'prf -> ISet.t ; (* extract the indexes of the hypotheses really used in the proof *) compact : 'prf -> (int -> int) -> 'prf ; (* remap the hyp indexes according to function *) pp_prf : out_channel -> 'prf -> unit ;(* pretting printing of proof *) pp_f : out_channel -> 'a -> unit (* pretty printing of the formulas (polynomials)*) } (** * Given a list of provers and a disjunction of atoms, find a proof of any of * the atoms. Returns an (optional) pair of a proof and a prover * datastructure. *) let find_witness provers polys1 = let provers = List.map (fun p -> (fun l -> match p.prover l with | None -> None | Some prf -> Some(prf,p)) , p.name) provers in try_any provers (List.map fst polys1) (** * Given a list of provers and a CNF, find a proof for each of the clauses. * Return the proofs as a list. *) let witness_list prover l = let rec xwitness_list l = match l with | [] -> Some [] | e :: l -> match find_witness prover e with | None -> None | Some w -> (match xwitness_list l with | None -> None | Some l -> Some (w :: l) ) in xwitness_list l let witness_list_tags = witness_list (* *Deprecated* let is_singleton = function [] -> true | [e] -> true | _ -> false *) let pp_ml_list pp_elt o l = output_string o "[" ; List.iter (fun x -> Printf.fprintf o "%a ;" pp_elt x) l ; output_string o "]" (** * Prune the proof object, according to the 'diff' between two cnf formulas. *) let compact_proofs (cnf_ff: 'cst cnf) res (cnf_ff': 'cst cnf) = let compact_proof (old_cl:'cst clause) (prf,prover) (new_cl:'cst clause) = let new_cl = Mutils.mapi (fun (f,_) i -> (f,i)) new_cl in let remap i = let formula = try fst (List.nth old_cl i) with Failure _ -> failwith "bad old index" in List.assoc formula new_cl in (* if debug then begin Printf.printf "\ncompact_proof : %a %a %a" (pp_ml_list prover.pp_f) (List.map fst old_cl) prover.pp_prf prf (pp_ml_list prover.pp_f) (List.map fst new_cl) ; flush stdout end ; *) let res = try prover.compact prf remap with x when x <> Sys.Break -> if debug then Printf.fprintf stdout "Proof compaction %s" (Printexc.to_string x) ; (* This should not happen -- this is the recovery plan... *) match prover.prover (List.map fst new_cl) with | None -> failwith "proof compaction error" | Some p -> p in if debug then begin Printf.printf " -> %a\n" prover.pp_prf res ; flush stdout end ; res in let is_proof_compatible (old_cl:'cst clause) (prf,prover) (new_cl:'cst clause) = let hyps_idx = prover.hyps prf in let hyps = selecti hyps_idx old_cl in is_sublist hyps new_cl in let cnf_res = List.combine cnf_ff res in (* we get pairs clause * proof *) List.map (fun x -> let (o,p) = List.find (fun (l,p) -> is_proof_compatible l p x) cnf_res in compact_proof o p x) cnf_ff' (** * "Hide out" tagged atoms of a formula by transforming them into generic * variables. See the Tag module in mutils.ml for more. *) let abstract_formula hyps f = let rec xabs f = match f with | X c -> X c | A(a,t,term) -> if TagSet.mem t hyps then A(a,t,term) else X(term) | C(f1,f2) -> (match xabs f1 , xabs f2 with | X a1 , X a2 -> X (Term.mkApp(Lazy.force coq_and, [|a1;a2|])) | f1 , f2 -> C(f1,f2) ) | D(f1,f2) -> (match xabs f1 , xabs f2 with | X a1 , X a2 -> X (Term.mkApp(Lazy.force coq_or, [|a1;a2|])) | f1 , f2 -> D(f1,f2) ) | N(f) -> (match xabs f with | X a -> X (Term.mkApp(Lazy.force coq_not, [|a|])) | f -> N f) | I(f1,hyp,f2) -> (match xabs f1 , hyp, xabs f2 with | X a1 , Some _ , af2 -> af2 | X a1 , None , X a2 -> X (Term.mkArrow a1 a2) | af1 , _ , af2 -> I(af1,hyp,af2) ) | FF -> FF | TT -> TT in xabs f (* [abstract_wrt_formula] is used in contexts whre f1 is already an abstraction of f2 *) let rec abstract_wrt_formula f1 f2 = match f1 , f2 with | X c , _ -> X c | A _ , A _ -> f2 | C(a,b) , C(a',b') -> C(abstract_wrt_formula a a', abstract_wrt_formula b b') | D(a,b) , D(a',b') -> D(abstract_wrt_formula a a', abstract_wrt_formula b b') | I(a,_,b) , I(a',x,b') -> I(abstract_wrt_formula a a',x, abstract_wrt_formula b b') | FF , FF -> FF | TT , TT -> TT | N x , N y -> N(abstract_wrt_formula x y) | _ -> failwith "abstract_wrt_formula" (** * This exception is raised by really_call_csdpcert if Coq's configure didn't * find a CSDP executable. *) exception CsdpNotFound (** * This is the core of Micromega: apply the prover, analyze the result and * prune unused fomulas, and finally modify the proof state. *) let formula_hyps_concl hyps concl = List.fold_right (fun (id,f) (cc,ids) -> match f with X _ -> (cc,ids) | _ -> (I(f,Some id,cc), id::ids)) hyps (concl,[]) let micromega_tauto negate normalise unsat deduce spec prover env polys1 polys2 gl = (* Express the goal as one big implication *) let (ff,ids) = formula_hyps_concl polys1 polys2 in (* Convert the aplpication into a (mc_)cnf (a list of lists of formulas) *) let cnf_ff,cnf_ff_tags = cnf negate normalise unsat deduce ff in if debug then begin Pp.pp (Pp.str "Formula....\n") ; let formula_typ = (Term.mkApp(Lazy.force coq_Cstr, [|spec.coeff|])) in let ff = dump_formula formula_typ (dump_cstr spec.typ spec.dump_coeff) ff in Pp.pp (Printer.prterm ff) ; Pp.pp_flush (); Printf.fprintf stdout "cnf : %a\n" (pp_cnf (fun o _ -> ())) cnf_ff end; match witness_list_tags prover cnf_ff with | None -> None | Some res -> (*Printf.printf "\nList %i" (List.length `res); *) let hyps = List.fold_left (fun s (cl,(prf,p)) -> let tags = ISet.fold (fun i s -> let t = snd (List.nth cl i) in if debug then (Printf.fprintf stdout "T : %i -> %a" i Tag.pp t) ; (*try*) TagSet.add t s (* with Invalid_argument _ -> s*)) (p.hyps prf) TagSet.empty in TagSet.union s tags) (List.fold_left (fun s i -> TagSet.add i s) TagSet.empty cnf_ff_tags) (List.combine cnf_ff res) in if debug then (Printf.printf "TForm : %a\n" pp_formula ff ; flush stdout; Printf.printf "Hyps : %a\n" (fun o s -> TagSet.fold (fun i _ -> Printf.fprintf o "%a " Tag.pp i) s ()) hyps) ; let ff' = abstract_formula hyps ff in let cnf_ff',_ = cnf negate normalise unsat deduce ff' in if debug then begin Pp.pp (Pp.str "\nAFormula\n") ; let formula_typ = (Term.mkApp( Lazy.force coq_Cstr,[| spec.coeff|])) in let ff' = dump_formula formula_typ (dump_cstr spec.typ spec.dump_coeff) ff' in Pp.pp (Printer.prterm ff') ; Pp.pp_flush (); Printf.fprintf stdout "cnf : %a\n" (pp_cnf (fun o _ -> ())) cnf_ff' end; (* Even if it does not work, this does not mean it is not provable -- the prover is REALLY incomplete *) (* if debug then begin (* recompute the proofs *) match witness_list_tags prover cnf_ff' with | None -> failwith "abstraction is wrong" | Some res -> () end ; *) let res' = compact_proofs cnf_ff res cnf_ff' in let (ff',res',ids) = (ff',res', ids_of_formula ff') in let res' = dump_list (spec.proof_typ) spec.dump_proof res' in Some (ids,ff',res') (** * Parse the proof environment, and call micromega_tauto *) let micromega_gen parse_arith (negate:'cst atom -> 'cst mc_cnf) (normalise:'cst atom -> 'cst mc_cnf) unsat deduce spec prover gl = let concl = Tacmach.pf_concl gl in let hyps = Tacmach.pf_hyps_types gl in try let (hyps,concl,env) = parse_goal parse_arith Env.empty hyps concl in let env = Env.elements env in let spec = Lazy.force spec in match micromega_tauto negate normalise unsat deduce spec prover env hyps concl gl with | None -> Tacticals.tclFAIL 0 (Pp.str " Cannot find witness") gl | Some (ids,ff',res') -> (Tacticals.tclTHENSEQ [ Tactics.generalize (List.map Term.mkVar ids) ; micromega_order_change spec res' (Term.mkApp(Lazy.force coq_list, [|spec.proof_typ|])) env ff' ]) gl with (* | Failure x -> flush stdout ; Pp.pp_flush () ; Tacticals.tclFAIL 0 (Pp.str x) gl *) | ParseError -> Tacticals.tclFAIL 0 (Pp.str "Bad logical fragment") gl | CsdpNotFound -> flush stdout ; Pp.pp_flush () ; Tacticals.tclFAIL 0 (Pp.str (" Skipping what remains of this tactic: the complexity of the goal requires " ^ "the use of a specialized external tool called csdp. \n\n" ^ "Unfortunately Coq isn't aware of the presence of any \"csdp\" executable in the path. \n\n" ^ "Csdp packages are provided by some OS distributions; binaries and source code can be downloaded from https://projects.coin-or.org/Csdp")) gl let micromega_order_changer cert env ff gl = let coeff = Lazy.force coq_Rcst in let dump_coeff = dump_Rcst in let typ = Lazy.force coq_R in let cert_typ = (Term.mkApp(Lazy.force coq_list, [|Lazy.force coq_QWitness |])) in let formula_typ = (Term.mkApp (Lazy.force coq_Cstr,[| coeff|])) in let ff = dump_formula formula_typ (dump_cstr coeff dump_coeff) ff in let vm = dump_varmap (typ) env in Tactics.change_in_concl None (set [ ("__ff", ff, Term.mkApp(Lazy.force coq_Formula, [|formula_typ |])); ("__varmap", vm, Term.mkApp (Coqlib.gen_constant_in_modules "VarMap" [["Coq" ; "micromega" ; "VarMap"] ; ["VarMap"]] "t", [|typ|])); ("__wit", cert, cert_typ) ] (Tacmach.pf_concl gl) ) gl let micromega_genr prover gl = let parse_arith = parse_rarith in let negate = Mc.rnegate in let normalise = Mc.rnormalise in let unsat = Mc.runsat in let deduce = Mc.rdeduce in let spec = lazy { typ = Lazy.force coq_R; coeff = Lazy.force coq_Rcst; dump_coeff = dump_q; proof_typ = Lazy.force coq_QWitness ; dump_proof = dump_psatz coq_Q dump_q } in let concl = Tacmach.pf_concl gl in let hyps = Tacmach.pf_hyps_types gl in try let (hyps,concl,env) = parse_goal parse_arith Env.empty hyps concl in let env = Env.elements env in let spec = Lazy.force spec in let hyps' = List.map (fun (n,f) -> (n, map_atoms (Micromega.map_Formula Micromega.q_of_Rcst) f)) hyps in let concl' = map_atoms (Micromega.map_Formula Micromega.q_of_Rcst) concl in match micromega_tauto negate normalise unsat deduce spec prover env hyps' concl' gl with | None -> Tacticals.tclFAIL 0 (Pp.str " Cannot find witness") gl | Some (ids,ff',res') -> let (ff,ids') = formula_hyps_concl (List.filter (fun (n,_) -> List.mem n ids) hyps) concl in (Tacticals.tclTHENSEQ [ Tactics.generalize (List.map Term.mkVar ids) ; micromega_order_changer res' env (abstract_wrt_formula ff' ff) ]) gl with (* | Failure x -> flush stdout ; Pp.pp_flush () ; Tacticals.tclFAIL 0 (Pp.str x) gl *) | ParseError -> Tacticals.tclFAIL 0 (Pp.str "Bad logical fragment") gl | CsdpNotFound -> flush stdout ; Pp.pp_flush () ; Tacticals.tclFAIL 0 (Pp.str (" Skipping what remains of this tactic: the complexity of the goal requires " ^ "the use of a specialized external tool called csdp. \n\n" ^ "Unfortunately Coq isn't aware of the presence of any \"csdp\" executable in the path. \n\n" ^ "Csdp packages are provided by some OS distributions; binaries and source code can be downloaded from https://projects.coin-or.org/Csdp")) gl let lift_ratproof prover l = match prover l with | None -> None | Some c -> Some (Mc.RatProof( c,Mc.DoneProof)) type micromega_polys = (Micromega.q Mc.pol * Mc.op1) list type csdp_certificate = S of Sos_types.positivstellensatz option | F of string type provername = string * int option (** * The caching mechanism. *) open Persistent_cache module Cache = PHashtable(struct type t = (provername * micromega_polys) let equal = (=) let hash = Hashtbl.hash end) let csdp_cache = "csdp.cache" (** * Build the command to call csdpcert, and launch it. This in turn will call * the sos driver to the csdp executable. * Throw CsdpNotFound if Coq isn't aware of any csdp executable. *) let require_csdp = if System.is_in_system_path "csdp" then lazy () else lazy (raise CsdpNotFound) let really_call_csdpcert : provername -> micromega_polys -> Sos_types.positivstellensatz option = fun provername poly -> Lazy.force require_csdp; let cmdname = List.fold_left Filename.concat (Envars.coqlib ()) ["plugins"; "micromega"; "csdpcert" ^ Coq_config.exec_extension] in match ((command cmdname [|cmdname|] (provername,poly)) : csdp_certificate) with | F str -> failwith str | S res -> res (** * Check the cache before calling the prover. *) let xcall_csdpcert = Cache.memo csdp_cache (fun (prover,pb) -> really_call_csdpcert prover pb) (** * Prover callback functions. *) let call_csdpcert prover pb = xcall_csdpcert (prover,pb) let rec z_to_q_pol e = match e with | Mc.Pc z -> Mc.Pc {Mc.qnum = z ; Mc.qden = Mc.XH} | Mc.Pinj(p,pol) -> Mc.Pinj(p,z_to_q_pol pol) | Mc.PX(pol1,p,pol2) -> Mc.PX(z_to_q_pol pol1, p, z_to_q_pol pol2) let call_csdpcert_q provername poly = match call_csdpcert provername poly with | None -> None | Some cert -> let cert = Certificate.q_cert_of_pos cert in if Mc.qWeakChecker poly cert then Some cert else ((print_string "buggy certificate" ; flush stdout) ;None) let call_csdpcert_z provername poly = let l = List.map (fun (e,o) -> (z_to_q_pol e,o)) poly in match call_csdpcert provername l with | None -> None | Some cert -> let cert = Certificate.z_cert_of_pos cert in if Mc.zWeakChecker poly cert then Some cert else ((print_string "buggy certificate" ; flush stdout) ;None) let xhyps_of_cone base acc prf = let rec xtract e acc = match e with | Mc.PsatzC _ | Mc.PsatzZ | Mc.PsatzSquare _ -> acc | Mc.PsatzIn n -> let n = (CoqToCaml.nat n) in if n >= base then ISet.add (n-base) acc else acc | Mc.PsatzMulC(_,c) -> xtract c acc | Mc.PsatzAdd(e1,e2) | Mc.PsatzMulE(e1,e2) -> xtract e1 (xtract e2 acc) in xtract prf acc let hyps_of_cone prf = xhyps_of_cone 0 ISet.empty prf let compact_cone prf f = let np n = CamlToCoq.nat (f (CoqToCaml.nat n)) in let rec xinterp prf = match prf with | Mc.PsatzC _ | Mc.PsatzZ | Mc.PsatzSquare _ -> prf | Mc.PsatzIn n -> Mc.PsatzIn (np n) | Mc.PsatzMulC(e,c) -> Mc.PsatzMulC(e,xinterp c) | Mc.PsatzAdd(e1,e2) -> Mc.PsatzAdd(xinterp e1,xinterp e2) | Mc.PsatzMulE(e1,e2) -> Mc.PsatzMulE(xinterp e1,xinterp e2) in xinterp prf let hyps_of_pt pt = let rec xhyps base pt acc = match pt with | Mc.DoneProof -> acc | Mc.RatProof(c,pt) -> xhyps (base+1) pt (xhyps_of_cone base acc c) | Mc.CutProof(c,pt) -> xhyps (base+1) pt (xhyps_of_cone base acc c) | Mc.EnumProof(c1,c2,l) -> let s = xhyps_of_cone base (xhyps_of_cone base acc c2) c1 in List.fold_left (fun s x -> xhyps (base + 1) x s) s l in xhyps 0 pt ISet.empty let hyps_of_pt pt = let res = hyps_of_pt pt in if debug then (Printf.fprintf stdout "\nhyps_of_pt : %a -> " pp_proof_term pt ; ISet.iter (fun i -> Printf.printf "%i " i) res); res let compact_pt pt f = let translate ofset x = if x < ofset then x else (f (x-ofset) + ofset) in let rec compact_pt ofset pt = match pt with | Mc.DoneProof -> Mc.DoneProof | Mc.RatProof(c,pt) -> Mc.RatProof(compact_cone c (translate (ofset)), compact_pt (ofset+1) pt ) | Mc.CutProof(c,pt) -> Mc.CutProof(compact_cone c (translate (ofset)), compact_pt (ofset+1) pt ) | Mc.EnumProof(c1,c2,l) -> Mc.EnumProof(compact_cone c1 (translate (ofset)), compact_cone c2 (translate (ofset)), Mc.map (fun x -> compact_pt (ofset+1) x) l) in compact_pt 0 pt (** * Definition of provers. * Instantiates the type ('a,'prf) prover defined above. *) let lift_pexpr_prover p l = p (List.map (fun (e,o) -> Mc.denorm e , o) l) let linear_prover_Z = { name = "linear prover" ; prover = lift_ratproof (lift_pexpr_prover (Certificate.linear_prover_with_cert Certificate.z_spec)) ; hyps = hyps_of_pt ; compact = compact_pt ; pp_prf = pp_proof_term; pp_f = fun o x -> pp_pol pp_z o (fst x) } let linear_prover_Q = { name = "linear prover"; prover = lift_pexpr_prover (Certificate.linear_prover_with_cert Certificate.q_spec) ; hyps = hyps_of_cone ; compact = compact_cone ; pp_prf = pp_psatz pp_q ; pp_f = fun o x -> pp_pol pp_q o (fst x) } let linear_prover_R = { name = "linear prover"; prover = lift_pexpr_prover (Certificate.linear_prover_with_cert Certificate.q_spec) ; hyps = hyps_of_cone ; compact = compact_cone ; pp_prf = pp_psatz pp_q ; pp_f = fun o x -> pp_pol pp_q o (fst x) } let non_linear_prover_Q str o = { name = "real nonlinear prover"; prover = call_csdpcert_q (str, o); hyps = hyps_of_cone; compact = compact_cone ; pp_prf = pp_psatz pp_q ; pp_f = fun o x -> pp_pol pp_q o (fst x) } let non_linear_prover_R str o = { name = "real nonlinear prover"; prover = call_csdpcert_q (str, o); hyps = hyps_of_cone; compact = compact_cone; pp_prf = pp_psatz pp_q; pp_f = fun o x -> pp_pol pp_q o (fst x) } let non_linear_prover_Z str o = { name = "real nonlinear prover"; prover = lift_ratproof (call_csdpcert_z (str, o)); hyps = hyps_of_pt; compact = compact_pt; pp_prf = pp_proof_term; pp_f = fun o x -> pp_pol pp_z o (fst x) } module CacheZ = PHashtable(struct type t = (Mc.z Mc.pol * Mc.op1) list let equal = (=) let hash = Hashtbl.hash end) let memo_zlinear_prover = CacheZ.memo "lia.cache" (lift_pexpr_prover Certificate.lia) let memo_nlia = CacheZ.memo "nlia.cache" (lift_pexpr_prover Certificate.nlia) (*let memo_zlinear_prover = (lift_pexpr_prover Lia.lia)*) (*let memo_zlinear_prover = CacheZ.memo "lia.cache" (lift_pexpr_prover Certificate.zlinear_prover)*) let linear_Z = { name = "lia"; prover = memo_zlinear_prover ; hyps = hyps_of_pt; compact = compact_pt; pp_prf = pp_proof_term; pp_f = fun o x -> pp_pol pp_z o (fst x) } let nlinear_Z = { name = "nlia"; prover = memo_nlia ; hyps = hyps_of_pt; compact = compact_pt; pp_prf = pp_proof_term; pp_f = fun o x -> pp_pol pp_z o (fst x) } let tauto_lia ff = let prover = linear_Z in let cnf_ff,_ = cnf Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce ff in match witness_list_tags [prover] cnf_ff with | None -> None | Some l -> Some (List.map fst l) (** * Functions instantiating micromega_gen with the appropriate theories and * solvers *) let psatzl_Z gl = micromega_gen parse_zarith Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce zz_domain_spec [ linear_prover_Z ] gl let psatzl_Q gl = micromega_gen parse_qarith Mc.qnegate Mc.qnormalise Mc.qunsat Mc.qdeduce qq_domain_spec [ linear_prover_Q ] gl let psatz_Q i gl = micromega_gen parse_qarith Mc.qnegate Mc.qnormalise Mc.qunsat Mc.qdeduce qq_domain_spec [ non_linear_prover_Q "real_nonlinear_prover" (Some i) ] gl let psatzl_R gl = micromega_genr [ linear_prover_R ] gl let psatz_R i gl = micromega_genr [ non_linear_prover_R "real_nonlinear_prover" (Some i) ] gl let psatz_Z i gl = micromega_gen parse_zarith Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce zz_domain_spec [ non_linear_prover_Z "real_nonlinear_prover" (Some i) ] gl let sos_Z gl = micromega_gen parse_zarith Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce zz_domain_spec [ non_linear_prover_Z "pure_sos" None ] gl let sos_Q gl = micromega_gen parse_qarith Mc.qnegate Mc.qnormalise Mc.qunsat Mc.qdeduce qq_domain_spec [ non_linear_prover_Q "pure_sos" None ] gl let sos_R gl = micromega_genr [ non_linear_prover_R "pure_sos" None ] gl let xlia gl = try micromega_gen parse_zarith Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce zz_domain_spec [ linear_Z ] gl with reraise -> (*Printexc.print_backtrace stdout ;*) raise reraise let xnlia gl = try micromega_gen parse_zarith Mc.negate Mc.normalise Mc.zunsat Mc.zdeduce zz_domain_spec [ nlinear_Z ] gl with reraise -> (*Printexc.print_backtrace stdout ;*) raise reraise (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/micromega/csdpcert.ml0000640000175000001440000001531012121620060017402 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Const (C2Ml.q_to_num z) | PEX v -> Var ("x"^(string_of_int (C2Ml.index v))) | PEmul(p1,p2) -> let p1 = expr_to_term p1 in let p2 = expr_to_term p2 in let res = Mul(p1,p2) in res | PEadd(p1,p2) -> Add(expr_to_term p1, expr_to_term p2) | PEsub(p1,p2) -> Sub(expr_to_term p1, expr_to_term p2) | PEpow(p,n) -> Pow(expr_to_term p , C2Ml.n n) | PEopp p -> Opp (expr_to_term p) end open M open List open Mutils let rec canonical_sum_to_string = function s -> failwith "not implemented" let print_canonical_sum m = Format.print_string (canonical_sum_to_string m) let print_list_term o l = output_string o "print_list_term\n"; List.iter (fun (e,k) -> Printf.fprintf o "q: %s %s ;" (string_of_poly (poly_of_term (expr_to_term e))) (match k with Mc.Equal -> "= " | Mc.Strict -> "> " | Mc.NonStrict -> ">= " | _ -> failwith "not_implemented")) (List.map (fun (e, o) -> Mc.denorm e , o) l) ; output_string o "\n" let partition_expr l = let rec f i = function | [] -> ([],[],[]) | (e,k)::l -> let (eq,ge,neq) = f (i+1) l in match k with | Mc.Equal -> ((e,i)::eq,ge,neq) | Mc.NonStrict -> (eq,(e,Axiom_le i)::ge,neq) | Mc.Strict -> (* e > 0 == e >= 0 /\ e <> 0 *) (eq, (e,Axiom_lt i)::ge,(e,Axiom_lt i)::neq) | Mc.NonEqual -> (eq,ge,(e,Axiom_eq i)::neq) (* Not quite sure -- Coq interface has changed *) in f 0 l let rec sets_of_list l = match l with | [] -> [[]] | e::l -> let s = sets_of_list l in s@(List.map (fun s0 -> e::s0) s) (* The exploration is probably not complete - for simple cases, it works... *) let real_nonlinear_prover d l = let l = List.map (fun (e,op) -> (Mc.denorm e,op)) l in try let (eq,ge,neq) = partition_expr l in let rec elim_const = function [] -> [] | (x,y)::l -> let p = poly_of_term (expr_to_term x) in if poly_isconst p then elim_const l else (p,y)::(elim_const l) in let eq = elim_const eq in let peq = List.map fst eq in let pge = List.map (fun (e,psatz) -> poly_of_term (expr_to_term e),psatz) ge in let monoids = List.map (fun m -> (List.fold_right (fun (p,kd) y -> let p = poly_of_term (expr_to_term p) in match kd with | Axiom_lt i -> poly_mul p y | Axiom_eq i -> poly_mul (poly_pow p 2) y | _ -> failwith "monoids") m (poly_const (Int 1)) , map snd m)) (sets_of_list neq) in let (cert_ideal, cert_cone,monoid) = deepen_until d (fun d -> list_try_find (fun m -> let (ci,cc) = real_positivnullstellensatz_general false d peq pge (poly_neg (fst m) ) in (ci,cc,snd m)) monoids) 0 in let proofs_ideal = map2 (fun q i -> Eqmul(term_of_poly q,Axiom_eq i)) cert_ideal (List.map snd eq) in let proofs_cone = map term_of_sos cert_cone in let proof_ne = let (neq , lt) = List.partition (function Axiom_eq _ -> true | _ -> false ) monoid in let sq = match (List.map (function Axiom_eq i -> i | _ -> failwith "error") neq) with | [] -> Rational_lt (Int 1) | l -> Monoid l in List.fold_right (fun x y -> Product(x,y)) lt sq in let proof = list_fold_right_elements (fun s t -> Sum(s,t)) (proof_ne :: proofs_ideal @ proofs_cone) in S (Some proof) with | Sos_lib.TooDeep -> S None | x when x <> Sys.Break -> F (Printexc.to_string x) (* This is somewhat buggy, over Z, strict inequality vanish... *) let pure_sos l = let l = List.map (fun (e,o) -> Mc.denorm e, o) l in (* If there is no strict inequality, I should nonetheless be able to try something - over Z > is equivalent to -1 >= *) try let l = List.combine l (interval 0 (length l -1)) in let (lt,i) = try (List.find (fun (x,_) -> snd x = Mc.Strict) l) with Not_found -> List.hd l in let plt = poly_neg (poly_of_term (expr_to_term (fst lt))) in let (n,polys) = sumofsquares plt in (* n * (ci * pi^2) *) let pos = Product (Rational_lt n, List.fold_right (fun (c,p) rst -> Sum (Product (Rational_lt c, Square (term_of_poly p)), rst)) polys (Rational_lt (Int 0))) in let proof = Sum(Axiom_lt i, pos) in (* let s,proof' = scale_certificate proof in let cert = snd (cert_of_pos proof') in *) S (Some proof) with (* | Sos.CsdpNotFound -> F "Sos.CsdpNotFound" *) | x when x <> Sys.Break -> (* May be that could be refined *) S None let run_prover prover pb = match prover with | "real_nonlinear_prover", Some d -> real_nonlinear_prover d pb | "pure_sos", None -> pure_sos pb | prover, _ -> (Printf.printf "unknown prover: %s\n" prover; exit 1) let output_csdp_certificate o = function | S None -> output_string o "S None" | S (Some p) -> Printf.fprintf o "S (Some %a)" output_psatz p | F s -> Printf.fprintf o "F %s" s let main () = try let (prover,poly) = (input_value stdin : provername * micromega_polys) in let cert = run_prover prover poly in (* Printf.fprintf chan "%a -> %a" print_list_term poly output_csdp_certificate cert ; close_out chan ; *) output_value stdout (cert:csdp_certificate); flush stdout ; Marshal.to_channel chan (cert:csdp_certificate) [] ; flush chan ; exit 0 with any -> (Printf.fprintf chan "error %s" (Printexc.to_string any) ; exit 1) ;; let _ = main () in () (* Local Variables: *) (* coding: utf-8 *) (* End: *) coq-8.4pl2/plugins/setoid_ring/0000750000175000001440000000000012127276542015625 5ustar notinuserscoq-8.4pl2/plugins/setoid_ring/Rings_Z.v0000640000175000001440000000046211776416511017373 0ustar notinusersRequire Export Cring. Require Export Integral_domain. Require Export Ncring_initial. Instance Zcri: (Cring (Rr:=Zr)). red. exact Z.mul_comm. Defined. Lemma Z_one_zero: 1%Z <> 0%Z. omega. Qed. Instance Zdi : (Integral_domain (Rcr:=Zcri)). constructor. exact Zmult_integral. exact Z_one_zero. Defined. coq-8.4pl2/plugins/setoid_ring/Ring_polynom.v0000640000175000001440000012441712010532755020472 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R->R) (ropp : R->R). Variable req : R -> R -> Prop. (* Ring properties *) Variable Rsth : Equivalence req. Variable Reqe : ring_eq_ext radd rmul ropp req. Variable ARth : almost_ring_theory rO rI radd rmul rsub ropp req. (* Coefficients *) Variable C: Type. Variable (cO cI: C) (cadd cmul csub : C->C->C) (copp : C->C). Variable ceqb : C->C->bool. Variable phi : C -> R. Variable CRmorph : ring_morph rO rI radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi. (* Power coefficients *) Variable Cpow : Type. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Variable pow_th : power_theory rI rmul req Cp_phi rpow. (* division is ok *) Variable cdiv: C -> C -> C * C. Variable div_th: div_theory req cadd cmul phi cdiv. (* R notations *) Notation "0" := rO. Notation "1" := rI. Infix "+" := radd. Infix "*" := rmul. Infix "-" := rsub. Notation "- x" := (ropp x). Infix "==" := req. Infix "^" := (pow_pos rmul). (* C notations *) Infix "+!" := cadd. Infix "*!" := cmul. Infix "-! " := csub. Notation "-! x" := (copp x). Infix "?=!" := ceqb. Notation "[ x ]" := (phi x). (* Useful tactics *) Add Morphism radd : radd_ext. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext. exact (Ropp_ext Reqe). Qed. Add Morphism rsub : rsub_ext. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac rsimpl := gen_srewrite Rsth Reqe ARth. Ltac add_push := gen_add_push radd Rsth Reqe ARth. Ltac mul_push := gen_mul_push rmul Rsth Reqe ARth. Ltac add_permut_rec t := match t with | ?x + ?y => add_permut_rec y || add_permut_rec x | _ => add_push t; apply (Radd_ext Reqe); [|reflexivity] end. Ltac add_permut := repeat (reflexivity || match goal with |- ?t == _ => add_permut_rec t end). Ltac mul_permut_rec t := match t with | ?x * ?y => mul_permut_rec y || mul_permut_rec x | _ => mul_push t; apply (Rmul_ext Reqe); [|reflexivity] end. Ltac mul_permut := repeat (reflexivity || match goal with |- ?t == _ => mul_permut_rec t end). (* Definition of multivariable polynomials with coefficients in C : Type [Pol] represents [X1 ... Xn]. The representation is Horner's where a [n] variable polynomial (C[X1..Xn]) is seen as a polynomial on [X1] which coefficients are polynomials with [n-1] variables (C[X2..Xn]). There are several optimisations to make the repr compacter: - [Pc c] is the constant polynomial of value c == c*X1^0*..*Xn^0 - [Pinj j Q] is a polynomial constant w.r.t the [j] first variables. variable indices are shifted of j in Q. == X1^0 *..* Xj^0 * Q{X1 <- Xj+1;..; Xn-j <- Xn} - [PX P i Q] is an optimised Horner form of P*X^i + Q with P not the null polynomial == P * X1^i + Q{X1 <- X2; ..; Xn-1 <- Xn} In addition: - polynomials of the form (PX (PX P i (Pc 0)) j Q) are forbidden since they can be represented by the simpler form (PX P (i+j) Q) - (Pinj i (Pinj j P)) is (Pinj (i+j) P) - (Pinj i (Pc c)) is (Pc c) *) Inductive Pol : Type := | Pc : C -> Pol | Pinj : positive -> Pol -> Pol | PX : Pol -> positive -> Pol -> Pol. Definition P0 := Pc cO. Definition P1 := Pc cI. Fixpoint Peq (P P' : Pol) {struct P'} : bool := match P, P' with | Pc c, Pc c' => c ?=! c' | Pinj j Q, Pinj j' Q' => match j ?= j' with | Eq => Peq Q Q' | _ => false end | PX P i Q, PX P' i' Q' => match i ?= i' with | Eq => if Peq P P' then Peq Q Q' else false | _ => false end | _, _ => false end. Infix "?==" := Peq. Definition mkPinj j P := match P with | Pc _ => P | Pinj j' Q => Pinj (j + j') Q | _ => Pinj j P end. Definition mkPinj_pred j P:= match j with | xH => P | xO j => Pinj (Pos.pred_double j) P | xI j => Pinj (xO j) P end. Definition mkPX P i Q := match P with | Pc c => if c ?=! cO then mkPinj xH Q else PX P i Q | Pinj _ _ => PX P i Q | PX P' i' Q' => if Q' ?== P0 then PX P' (i' + i) Q else PX P i Q end. Definition mkXi i := PX P1 i P0. Definition mkX := mkXi 1. (** Opposite of addition *) Fixpoint Popp (P:Pol) : Pol := match P with | Pc c => Pc (-! c) | Pinj j Q => Pinj j (Popp Q) | PX P i Q => PX (Popp P) i (Popp Q) end. Notation "-- P" := (Popp P). (** Addition et subtraction *) Fixpoint PaddC (P:Pol) (c:C) : Pol := match P with | Pc c1 => Pc (c1 +! c) | Pinj j Q => Pinj j (PaddC Q c) | PX P i Q => PX P i (PaddC Q c) end. Fixpoint PsubC (P:Pol) (c:C) : Pol := match P with | Pc c1 => Pc (c1 -! c) | Pinj j Q => Pinj j (PsubC Q c) | PX P i Q => PX P i (PsubC Q c) end. Section PopI. Variable Pop : Pol -> Pol -> Pol. Variable Q : Pol. Fixpoint PaddI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PaddC Q c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pop (Pinj k Q') Q) | Z0 => mkPinj j (Pop Q' Q) | Zneg k => mkPinj j' (PaddI k Q') end | PX P i Q' => match j with | xH => PX P i (Pop Q' Q) | xO j => PX P i (PaddI (Pos.pred_double j) Q') | xI j => PX P i (PaddI (xO j) Q') end end. Fixpoint PsubI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PaddC (--Q) c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pop (Pinj k Q') Q) | Z0 => mkPinj j (Pop Q' Q) | Zneg k => mkPinj j' (PsubI k Q') end | PX P i Q' => match j with | xH => PX P i (Pop Q' Q) | xO j => PX P i (PsubI (Pos.pred_double j) Q') | xI j => PX P i (PsubI (xO j) Q') end end. Variable P' : Pol. Fixpoint PaddX (i':positive) (P:Pol) : Pol := match P with | Pc c => PX P' i' P | Pinj j Q' => match j with | xH => PX P' i' Q' | xO j => PX P' i' (Pinj (Pos.pred_double j) Q') | xI j => PX P' i' (Pinj (xO j) Q') end | PX P i Q' => match Z.pos_sub i i' with | Zpos k => mkPX (Pop (PX P k P0) P') i' Q' | Z0 => mkPX (Pop P P') i Q' | Zneg k => mkPX (PaddX k P) i Q' end end. Fixpoint PsubX (i':positive) (P:Pol) : Pol := match P with | Pc c => PX (--P') i' P | Pinj j Q' => match j with | xH => PX (--P') i' Q' | xO j => PX (--P') i' (Pinj (Pos.pred_double j) Q') | xI j => PX (--P') i' (Pinj (xO j) Q') end | PX P i Q' => match Z.pos_sub i i' with | Zpos k => mkPX (Pop (PX P k P0) P') i' Q' | Z0 => mkPX (Pop P P') i Q' | Zneg k => mkPX (PsubX k P) i Q' end end. End PopI. Fixpoint Padd (P P': Pol) {struct P'} : Pol := match P' with | Pc c' => PaddC P c' | Pinj j' Q' => PaddI Padd Q' j' P | PX P' i' Q' => match P with | Pc c => PX P' i' (PaddC Q' c) | Pinj j Q => match j with | xH => PX P' i' (Padd Q Q') | xO j => PX P' i' (Padd (Pinj (Pos.pred_double j) Q) Q') | xI j => PX P' i' (Padd (Pinj (xO j) Q) Q') end | PX P i Q => match Z.pos_sub i i' with | Zpos k => mkPX (Padd (PX P k P0) P') i' (Padd Q Q') | Z0 => mkPX (Padd P P') i (Padd Q Q') | Zneg k => mkPX (PaddX Padd P' k P) i (Padd Q Q') end end end. Infix "++" := Padd. Fixpoint Psub (P P': Pol) {struct P'} : Pol := match P' with | Pc c' => PsubC P c' | Pinj j' Q' => PsubI Psub Q' j' P | PX P' i' Q' => match P with | Pc c => PX (--P') i' (*(--(PsubC Q' c))*) (PaddC (--Q') c) | Pinj j Q => match j with | xH => PX (--P') i' (Psub Q Q') | xO j => PX (--P') i' (Psub (Pinj (Pos.pred_double j) Q) Q') | xI j => PX (--P') i' (Psub (Pinj (xO j) Q) Q') end | PX P i Q => match Z.pos_sub i i' with | Zpos k => mkPX (Psub (PX P k P0) P') i' (Psub Q Q') | Z0 => mkPX (Psub P P') i (Psub Q Q') | Zneg k => mkPX (PsubX Psub P' k P) i (Psub Q Q') end end end. Infix "--" := Psub. (** Multiplication *) Fixpoint PmulC_aux (P:Pol) (c:C) : Pol := match P with | Pc c' => Pc (c' *! c) | Pinj j Q => mkPinj j (PmulC_aux Q c) | PX P i Q => mkPX (PmulC_aux P c) i (PmulC_aux Q c) end. Definition PmulC P c := if c ?=! cO then P0 else if c ?=! cI then P else PmulC_aux P c. Section PmulI. Variable Pmul : Pol -> Pol -> Pol. Variable Q : Pol. Fixpoint PmulI (j:positive) (P:Pol) : Pol := match P with | Pc c => mkPinj j (PmulC Q c) | Pinj j' Q' => match Z.pos_sub j' j with | Zpos k => mkPinj j (Pmul (Pinj k Q') Q) | Z0 => mkPinj j (Pmul Q' Q) | Zneg k => mkPinj j' (PmulI k Q') end | PX P' i' Q' => match j with | xH => mkPX (PmulI xH P') i' (Pmul Q' Q) | xO j' => mkPX (PmulI j P') i' (PmulI (Pos.pred_double j') Q') | xI j' => mkPX (PmulI j P') i' (PmulI (xO j') Q') end end. End PmulI. Fixpoint Pmul (P P'' : Pol) {struct P''} : Pol := match P'' with | Pc c => PmulC P c | Pinj j' Q' => PmulI Pmul Q' j' P | PX P' i' Q' => match P with | Pc c => PmulC P'' c | Pinj j Q => let QQ' := match j with | xH => Pmul Q Q' | xO j => Pmul (Pinj (Pos.pred_double j) Q) Q' | xI j => Pmul (Pinj (xO j) Q) Q' end in mkPX (Pmul P P') i' QQ' | PX P i Q=> let QQ' := Pmul Q Q' in let PQ' := PmulI Pmul Q' xH P in let QP' := Pmul (mkPinj xH Q) P' in let PP' := Pmul P P' in (mkPX (mkPX PP' i P0 ++ QP') i' P0) ++ mkPX PQ' i QQ' end end. Infix "**" := Pmul. Fixpoint Psquare (P:Pol) : Pol := match P with | Pc c => Pc (c *! c) | Pinj j Q => Pinj j (Psquare Q) | PX P i Q => let twoPQ := Pmul P (mkPinj xH (PmulC Q (cI +! cI))) in let Q2 := Psquare Q in let P2 := Psquare P in mkPX (mkPX P2 i P0 ++ twoPQ) i Q2 end. (** Monomial **) (** A monomial is X1^k1...Xi^ki. Its representation is a simplified version of the polynomial representation: - [mon0] correspond to the polynom [P1]. - [(zmon j M)] corresponds to [(Pinj j ...)], i.e. skip j variable indices. - [(vmon i M)] is X^i*M with X the current variable, its corresponds to (PX P1 i ...)] *) Inductive Mon: Set := | mon0: Mon | zmon: positive -> Mon -> Mon | vmon: positive -> Mon -> Mon. Definition mkZmon j M := match M with mon0 => mon0 | _ => zmon j M end. Definition zmon_pred j M := match j with xH => M | _ => mkZmon (Pos.pred j) M end. Definition mkVmon i M := match M with | mon0 => vmon i mon0 | zmon j m => vmon i (zmon_pred j m) | vmon i' m => vmon (i+i') m end. Fixpoint CFactor (P: Pol) (c: C) {struct P}: Pol * Pol := match P with | Pc c1 => let (q,r) := cdiv c1 c in (Pc r, Pc q) | Pinj j1 P1 => let (R,S) := CFactor P1 c in (mkPinj j1 R, mkPinj j1 S) | PX P1 i Q1 => let (R1, S1) := CFactor P1 c in let (R2, S2) := CFactor Q1 c in (mkPX R1 i R2, mkPX S1 i S2) end. Fixpoint MFactor (P: Pol) (c: C) (M: Mon) {struct P}: Pol * Pol := match P, M with _, mon0 => if (ceqb c cI) then (Pc cO, P) else CFactor P c | Pc _, _ => (P, Pc cO) | Pinj j1 P1, zmon j2 M1 => match j1 ?= j2 with Eq => let (R,S) := MFactor P1 c M1 in (mkPinj j1 R, mkPinj j1 S) | Lt => let (R,S) := MFactor P1 c (zmon (j2 - j1) M1) in (mkPinj j1 R, mkPinj j1 S) | Gt => (P, Pc cO) end | Pinj _ _, vmon _ _ => (P, Pc cO) | PX P1 i Q1, zmon j M1 => let M2 := zmon_pred j M1 in let (R1, S1) := MFactor P1 c M in let (R2, S2) := MFactor Q1 c M2 in (mkPX R1 i R2, mkPX S1 i S2) | PX P1 i Q1, vmon j M1 => match i ?= j with Eq => let (R1,S1) := MFactor P1 c (mkZmon xH M1) in (mkPX R1 i Q1, S1) | Lt => let (R1,S1) := MFactor P1 c (vmon (j - i) M1) in (mkPX R1 i Q1, S1) | Gt => let (R1,S1) := MFactor P1 c (mkZmon xH M1) in (mkPX R1 i Q1, mkPX S1 (i-j) (Pc cO)) end end. Definition POneSubst (P1: Pol) (cM1: C * Mon) (P2: Pol): option Pol := let (c,M1) := cM1 in let (Q1,R1) := MFactor P1 c M1 in match R1 with (Pc c) => if c ?=! cO then None else Some (Padd Q1 (Pmul P2 R1)) | _ => Some (Padd Q1 (Pmul P2 R1)) end. Fixpoint PNSubst1 (P1: Pol) (cM1: C * Mon) (P2: Pol) (n: nat) : Pol := match POneSubst P1 cM1 P2 with Some P3 => match n with S n1 => PNSubst1 P3 cM1 P2 n1 | _ => P3 end | _ => P1 end. Definition PNSubst (P1: Pol) (cM1: C * Mon) (P2: Pol) (n: nat): option Pol := match POneSubst P1 cM1 P2 with Some P3 => match n with S n1 => Some (PNSubst1 P3 cM1 P2 n1) | _ => None end | _ => None end. Fixpoint PSubstL1 (P1: Pol) (LM1: list ((C * Mon) * Pol)) (n: nat) : Pol := match LM1 with cons (M1,P2) LM2 => PSubstL1 (PNSubst1 P1 M1 P2 n) LM2 n | _ => P1 end. Fixpoint PSubstL (P1: Pol) (LM1: list ((C * Mon) * Pol)) (n: nat) : option Pol := match LM1 with cons (M1,P2) LM2 => match PNSubst P1 M1 P2 n with Some P3 => Some (PSubstL1 P3 LM2 n) | None => PSubstL P1 LM2 n end | _ => None end. Fixpoint PNSubstL (P1: Pol) (LM1: list ((C * Mon) * Pol)) (m n: nat) : Pol := match PSubstL P1 LM1 n with Some P3 => match m with S m1 => PNSubstL P3 LM1 m1 n | _ => P3 end | _ => P1 end. (** Evaluation of a polynomial towards R *) Local Notation hd := (List.hd 0). Fixpoint Pphi(l:list R) (P:Pol) : R := match P with | Pc c => [c] | Pinj j Q => Pphi (jump j l) Q | PX P i Q => Pphi l P * (hd l) ^ i + Pphi (tail l) Q end. Reserved Notation "P @ l " (at level 10, no associativity). Notation "P @ l " := (Pphi l P). (** Evaluation of a monomial towards R *) Fixpoint Mphi(l:list R) (M: Mon) : R := match M with | mon0 => rI | zmon j M1 => Mphi (jump j l) M1 | vmon i M1 => Mphi (tail l) M1 * (hd l) ^ i end. Notation "M @@ l" := (Mphi l M) (at level 10, no associativity). (** Proofs *) Ltac destr_pos_sub := match goal with |- context [Z.pos_sub ?x ?y] => generalize (Z.pos_sub_discr x y); destruct (Z.pos_sub x y) end. Lemma jump_add' i j (l:list R) : jump (i + j) l = jump j (jump i l). Proof. rewrite Pos.add_comm. apply jump_add. Qed. Lemma Peq_ok P P' : (P ?== P') = true -> forall l, P@l == P'@ l. Proof. revert P';induction P;destruct P';simpl; intros H l; try easy. - now apply (morph_eq CRmorph). - destruct (Pos.compare_spec p p0); [ subst | easy | easy ]. now rewrite IHP. - specialize (IHP1 P'1); specialize (IHP2 P'2). destruct (Pos.compare_spec p p0); [ subst | easy | easy ]. destruct (P2 ?== P'1); [|easy]. rewrite H in *. now rewrite IHP1, IHP2. Qed. Lemma Peq_spec P P' : BoolSpec (forall l, P@l == P'@l) True (P ?== P'). Proof. generalize (Peq_ok P P'). destruct (P ?== P'); auto. Qed. Lemma Pphi0 l : P0@l == 0. Proof. simpl;apply (morph0 CRmorph). Qed. Lemma Pphi1 l : P1@l == 1. Proof. simpl;apply (morph1 CRmorph). Qed. Lemma mkPinj_ok j l P : (mkPinj j P)@l == P@(jump j l). Proof. destruct P;simpl;rsimpl. now rewrite jump_add'. Qed. Lemma pow_pos_add x i j : x^(j + i) == x^i * x^j. Proof. rewrite Pos.add_comm. apply (pow_pos_add Rsth Reqe.(Rmul_ext) ARth.(ARmul_assoc)). Qed. Lemma ceqb_spec c c' : BoolSpec ([c] == [c']) True (c ?=! c'). Proof. generalize (morph_eq CRmorph c c'). destruct (c ?=! c'); auto. Qed. Lemma mkPX_ok l P i Q : (mkPX P i Q)@l == P@l * (hd l)^i + Q@(tail l). Proof. unfold mkPX. destruct P. - case ceqb_spec; intros H; simpl; try reflexivity. rewrite H, (morph0 CRmorph), mkPinj_ok; rsimpl. - reflexivity. - case Peq_spec; intros H; simpl; try reflexivity. rewrite H, Pphi0, Pos.add_comm, pow_pos_add; rsimpl. Qed. Hint Rewrite Pphi0 Pphi1 mkPinj_ok mkPX_ok (morph0 CRmorph) (morph1 CRmorph) (morph0 CRmorph) (morph_add CRmorph) (morph_mul CRmorph) (morph_sub CRmorph) (morph_opp CRmorph) : Esimpl. (* Quicker than autorewrite with Esimpl :-) *) Ltac Esimpl := try rewrite_db Esimpl; rsimpl; simpl. Lemma PaddC_ok c P l : (PaddC P c)@l == P@l + [c]. Proof. revert l;induction P;simpl;intros;Esimpl;trivial. rewrite IHP2;rsimpl. Qed. Lemma PsubC_ok c P l : (PsubC P c)@l == P@l - [c]. Proof. revert l;induction P;simpl;intros. - Esimpl. - rewrite IHP;rsimpl. - rewrite IHP2;rsimpl. Qed. Lemma PmulC_aux_ok c P l : (PmulC_aux P c)@l == P@l * [c]. Proof. revert l;induction P;simpl;intros;Esimpl;trivial. rewrite IHP1, IHP2;rsimpl. add_permut. mul_permut. Qed. Lemma PmulC_ok c P l : (PmulC P c)@l == P@l * [c]. Proof. unfold PmulC. case ceqb_spec; intros H. - rewrite H; Esimpl. - case ceqb_spec; intros H'. + rewrite H'; Esimpl. + apply PmulC_aux_ok. Qed. Lemma Popp_ok P l : (--P)@l == - P@l. Proof. revert l;induction P;simpl;intros. - Esimpl. - apply IHP. - rewrite IHP1, IHP2;rsimpl. Qed. Hint Rewrite PaddC_ok PsubC_ok PmulC_ok Popp_ok : Esimpl. Lemma PaddX_ok P' P k l : (forall P l, (P++P')@l == P@l + P'@l) -> (PaddX Padd P' k P) @ l == P@l + P'@l * (hd l)^k. Proof. intros IHP'. revert k l. induction P;simpl;intros. - add_permut. - destruct p; simpl; rewrite ?jump_pred_double; add_permut. - destr_pos_sub; intros ->;Esimpl. + rewrite IHP';rsimpl. add_permut. + rewrite IHP', pow_pos_add;simpl;Esimpl. add_permut. + rewrite IHP1, pow_pos_add;rsimpl. add_permut. Qed. Lemma Padd_ok P' P l : (P ++ P')@l == P@l + P'@l. Proof. revert P l; induction P';simpl;intros;Esimpl. - revert p l; induction P;simpl;intros. + Esimpl; add_permut. + destr_pos_sub; intros ->;Esimpl. * now rewrite IHP'. * rewrite IHP';Esimpl. now rewrite jump_add'. * rewrite IHP. now rewrite jump_add'. + destruct p0;simpl. * rewrite IHP2;simpl. rsimpl. * rewrite IHP2;simpl. rewrite jump_pred_double. rsimpl. * rewrite IHP'. rsimpl. - destruct P;simpl. + Esimpl. add_permut. + destruct p0;simpl;Esimpl; rewrite IHP'2; simpl. * rsimpl. add_permut. * rewrite jump_pred_double. rsimpl. add_permut. * rsimpl. add_permut. + destr_pos_sub; intros ->; Esimpl. * rewrite IHP'1, IHP'2;rsimpl. add_permut. * rewrite IHP'1, IHP'2;simpl;Esimpl. rewrite pow_pos_add;rsimpl. add_permut. * rewrite PaddX_ok by trivial; rsimpl. rewrite IHP'2, pow_pos_add; rsimpl. add_permut. Qed. Lemma PsubX_ok P' P k l : (forall P l, (P--P')@l == P@l - P'@l) -> (PsubX Psub P' k P) @ l == P@l - P'@l * (hd l)^k. Proof. intros IHP'. revert k l. induction P;simpl;intros. - rewrite Popp_ok;rsimpl; add_permut. - destruct p; simpl; rewrite Popp_ok;rsimpl; rewrite ?jump_pred_double; add_permut. - destr_pos_sub; intros ->; Esimpl. + rewrite IHP';rsimpl. add_permut. + rewrite IHP', pow_pos_add;simpl;Esimpl. add_permut. + rewrite IHP1, pow_pos_add;rsimpl. add_permut. Qed. Lemma Psub_ok P' P l : (P -- P')@l == P@l - P'@l. Proof. revert P l; induction P';simpl;intros;Esimpl. - revert p l; induction P;simpl;intros. + Esimpl; add_permut. + destr_pos_sub; intros ->;Esimpl. * rewrite IHP';rsimpl. * rewrite IHP';Esimpl. now rewrite jump_add'. * rewrite IHP. now rewrite jump_add'. + destruct p0;simpl. * rewrite IHP2;simpl. rsimpl. * rewrite IHP2;simpl. rewrite jump_pred_double. rsimpl. * rewrite IHP'. rsimpl. - destruct P;simpl. + Esimpl; add_permut. + destruct p0;simpl;Esimpl; rewrite IHP'2; simpl. * rsimpl. add_permut. * rewrite jump_pred_double. rsimpl. add_permut. * rsimpl. add_permut. + destr_pos_sub; intros ->; Esimpl. * rewrite IHP'1, IHP'2;rsimpl. add_permut. * rewrite IHP'1, IHP'2;simpl;Esimpl. rewrite pow_pos_add;rsimpl. add_permut. * rewrite PsubX_ok by trivial;rsimpl. rewrite IHP'2, pow_pos_add;rsimpl. add_permut. Qed. Lemma PmulI_ok P' : (forall P l, (Pmul P P') @ l == P @ l * P' @ l) -> forall P p l, (PmulI Pmul P' p P) @ l == P @ l * P' @ (jump p l). Proof. intros IHP'. induction P;simpl;intros. - Esimpl; mul_permut. - destr_pos_sub; intros ->;Esimpl. + now rewrite IHP'. + now rewrite IHP', jump_add'. + now rewrite IHP, jump_add'. - destruct p0;Esimpl; rewrite ?IHP1, ?IHP2; rsimpl. + f_equiv. mul_permut. + rewrite jump_pred_double. f_equiv. mul_permut. + rewrite IHP'. f_equiv. mul_permut. Qed. Lemma Pmul_ok P P' l : (P**P')@l == P@l * P'@l. Proof. revert P l;induction P';simpl;intros. - apply PmulC_ok. - apply PmulI_ok;trivial. - destruct P. + rewrite (ARmul_comm ARth). Esimpl. + Esimpl. f_equiv. rewrite IHP'1; Esimpl. destruct p0;rewrite IHP'2;Esimpl. rewrite jump_pred_double; Esimpl. + rewrite Padd_ok, !mkPX_ok, Padd_ok, !mkPX_ok, !IHP'1, !IHP'2, PmulI_ok; trivial. simpl. Esimpl. add_permut; f_equiv; mul_permut. Qed. Lemma Psquare_ok P l : (Psquare P)@l == P@l * P@l. Proof. revert l;induction P;simpl;intros;Esimpl. - apply IHP. - rewrite Padd_ok, Pmul_ok;Esimpl. rewrite IHP1, IHP2. mul_push ((hd l)^p). now mul_push (P2@l). Qed. Lemma mkZmon_ok M j l : (mkZmon j M) @@ l == (zmon j M) @@ l. Proof. destruct M; simpl; rsimpl. Qed. Lemma zmon_pred_ok M j l : (zmon_pred j M) @@ (tail l) == (zmon j M) @@ l. Proof. destruct j; simpl; rewrite ?mkZmon_ok; simpl; rsimpl. rewrite jump_pred_double; rsimpl. Qed. Lemma mkVmon_ok M i l : (mkVmon i M)@@l == M@@l * (hd l)^i. Proof. destruct M;simpl;intros;rsimpl. - rewrite zmon_pred_ok;simpl;rsimpl. - rewrite pow_pos_add;rsimpl. Qed. Ltac destr_factor := match goal with | H : context [CFactor ?P _] |- context [CFactor ?P ?c] => destruct (CFactor P c); destr_factor; rewrite H; clear H | H : context [MFactor ?P _ _] |- context [MFactor ?P ?c ?M] => specialize (H M); destruct (MFactor P c M); destr_factor; rewrite H; clear H | _ => idtac end. Lemma Mcphi_ok P c l : let (Q,R) := CFactor P c in P@l == Q@l + [c] * R@l. Proof. revert l. induction P as [c0 | j P IH | P1 IH1 i P2 IH2]; intros l; Esimpl. - assert (H := div_th.(div_eucl_th) c0 c). destruct cdiv as (q,r). rewrite H; Esimpl. add_permut. - destr_factor. Esimpl. - destr_factor. Esimpl. add_permut. Qed. Lemma Mphi_ok P (cM: C * Mon) l : let (c,M) := cM in let (Q,R) := MFactor P c M in P@l == Q@l + [c] * M@@l * R@l. Proof. destruct cM as (c,M). revert M l. induction P; destruct M; intros l; simpl; auto; try (case ceqb_spec; intro He); try (case Pos.compare_spec; intros He); rewrite ?He; destr_factor; simpl; Esimpl. - assert (H := div_th.(div_eucl_th) c0 c). destruct cdiv as (q,r). rewrite H; Esimpl. add_permut. - assert (H := Mcphi_ok P c). destr_factor. Esimpl. - now rewrite <- jump_add, Pos.sub_add. - assert (H2 := Mcphi_ok P2 c). assert (H3 := Mcphi_ok P3 c). destr_factor. Esimpl. add_permut. - rewrite zmon_pred_ok. simpl. add_permut. - rewrite mkZmon_ok. simpl. add_permut. mul_permut. - add_permut. mul_permut. rewrite <- pow_pos_add, Pos.add_comm, Pos.sub_add by trivial; rsimpl. - rewrite mkZmon_ok. simpl. Esimpl. add_permut. mul_permut. rewrite <- pow_pos_add, Pos.sub_add by trivial; rsimpl. Qed. Lemma POneSubst_ok P1 cM1 P2 P3 l : POneSubst P1 cM1 P2 = Some P3 -> [fst cM1] * (snd cM1)@@l == P2@l -> P1@l == P3@l. Proof. destruct cM1 as (cc,M1). unfold POneSubst. assert (H := Mphi_ok P1 (cc, M1) l). simpl in H. destruct MFactor as (R1,S1); simpl. rewrite H. clear H. intros EQ EQ'. replace P3 with (R1 ++ P2 ** S1). - rewrite EQ', Padd_ok, Pmul_ok; rsimpl. - revert EQ. destruct S1; try now injection 1. case ceqb_spec; now inversion 2. Qed. Lemma PNSubst1_ok n P1 cM1 P2 l : [fst cM1] * (snd cM1)@@l == P2@l -> P1@l == (PNSubst1 P1 cM1 P2 n)@l. Proof. revert P1. induction n; simpl; intros P1; generalize (POneSubst_ok P1 cM1 P2); destruct POneSubst; intros; rewrite <- ?IHn; auto; reflexivity. Qed. Lemma PNSubst_ok n P1 cM1 P2 l P3 : PNSubst P1 cM1 P2 n = Some P3 -> [fst cM1] * (snd cM1)@@l == P2@l -> P1@l == P3@l. Proof. unfold PNSubst. assert (H := POneSubst_ok P1 cM1 P2); destruct POneSubst; try discriminate. destruct n; inversion_clear 1. intros. rewrite <- PNSubst1_ok; auto. Qed. Fixpoint MPcond (LM1: list (C * Mon * Pol)) (l: list R) : Prop := match LM1 with | (M1,P2) :: LM2 => ([fst M1] * (snd M1)@@l == P2@l) /\ MPcond LM2 l | _ => True end. Lemma PSubstL1_ok n LM1 P1 l : MPcond LM1 l -> P1@l == (PSubstL1 P1 LM1 n)@l. Proof. revert P1; induction LM1 as [|(M2,P2) LM2 IH]; simpl; intros. - reflexivity. - rewrite <- IH by intuition. now apply PNSubst1_ok. Qed. Lemma PSubstL_ok n LM1 P1 P2 l : PSubstL P1 LM1 n = Some P2 -> MPcond LM1 l -> P1@l == P2@l. Proof. revert P1. induction LM1 as [|(M2,P2') LM2 IH]; simpl; intros. - discriminate. - assert (H':=PNSubst_ok n P3 M2 P2'). destruct PNSubst. * injection H; intros <-. rewrite <- PSubstL1_ok; intuition. * now apply IH. Qed. Lemma PNSubstL_ok m n LM1 P1 l : MPcond LM1 l -> P1@l == (PNSubstL P1 LM1 m n)@l. Proof. revert LM1 P1. induction m; simpl; intros; assert (H' := PSubstL_ok n LM1 P2); destruct PSubstL; auto; try reflexivity. rewrite <- IHm; auto. Qed. (** Definition of polynomial expressions *) Inductive PExpr : Type := | PEc : C -> PExpr | PEX : positive -> PExpr | PEadd : PExpr -> PExpr -> PExpr | PEsub : PExpr -> PExpr -> PExpr | PEmul : PExpr -> PExpr -> PExpr | PEopp : PExpr -> PExpr | PEpow : PExpr -> N -> PExpr. (** evaluation of polynomial expressions towards R *) Definition mk_X j := mkPinj_pred j mkX. (** evaluation of polynomial expressions towards R *) Fixpoint PEeval (l:list R) (pe:PExpr) {struct pe} : R := match pe with | PEc c => phi c | PEX j => nth 0 j l | PEadd pe1 pe2 => (PEeval l pe1) + (PEeval l pe2) | PEsub pe1 pe2 => (PEeval l pe1) - (PEeval l pe2) | PEmul pe1 pe2 => (PEeval l pe1) * (PEeval l pe2) | PEopp pe1 => - (PEeval l pe1) | PEpow pe1 n => rpow (PEeval l pe1) (Cp_phi n) end. Strategy expand [PEeval]. (** Correctness proofs *) Lemma mkX_ok p l : nth 0 p l == (mk_X p) @ l. Proof. destruct p;simpl;intros;Esimpl;trivial. - now rewrite <-jump_tl, nth_jump. - now rewrite <- nth_jump, nth_pred_double. Qed. Hint Rewrite Padd_ok Psub_ok : Esimpl. Section POWER. Variable subst_l : Pol -> Pol. Fixpoint Ppow_pos (res P:Pol) (p:positive) : Pol := match p with | xH => subst_l (res ** P) | xO p => Ppow_pos (Ppow_pos res P p) P p | xI p => subst_l ((Ppow_pos (Ppow_pos res P p) P p) ** P) end. Definition Ppow_N P n := match n with | N0 => P1 | Npos p => Ppow_pos P1 P p end. Lemma Ppow_pos_ok l : (forall P, subst_l P@l == P@l) -> forall res P p, (Ppow_pos res P p)@l == res@l * (pow_pos Pmul P p)@l. Proof. intros subst_l_ok res P p. revert res. induction p;simpl;intros; rewrite ?subst_l_ok, ?Pmul_ok, ?IHp; mul_permut. Qed. Lemma Ppow_N_ok l : (forall P, subst_l P@l == P@l) -> forall P n, (Ppow_N P n)@l == (pow_N P1 Pmul P n)@l. Proof. destruct n;simpl. - reflexivity. - rewrite Ppow_pos_ok by trivial. Esimpl. Qed. End POWER. (** Normalization and rewriting *) Section NORM_SUBST_REC. Variable n : nat. Variable lmp:list (C*Mon*Pol). Let subst_l P := PNSubstL P lmp n n. Let Pmul_subst P1 P2 := subst_l (Pmul P1 P2). Let Ppow_subst := Ppow_N subst_l. Fixpoint norm_aux (pe:PExpr) : Pol := match pe with | PEc c => Pc c | PEX j => mk_X j | PEadd (PEopp pe1) pe2 => (norm_aux pe2) -- (norm_aux pe1) | PEadd pe1 (PEopp pe2) => (norm_aux pe1) -- (norm_aux pe2) | PEadd pe1 pe2 => (norm_aux pe1) ++ (norm_aux pe2) | PEsub pe1 pe2 => (norm_aux pe1) -- (norm_aux pe2) | PEmul pe1 pe2 => (norm_aux pe1) ** (norm_aux pe2) | PEopp pe1 => -- (norm_aux pe1) | PEpow pe1 n => Ppow_N (fun p => p) (norm_aux pe1) n end. Definition norm_subst pe := subst_l (norm_aux pe). (** Internally, [norm_aux] is expanded in a large number of cases. To speed-up proofs, we use an alternative definition. *) Definition get_PEopp pe := match pe with | PEopp pe' => Some pe' | _ => None end. Lemma norm_aux_PEadd pe1 pe2 : norm_aux (PEadd pe1 pe2) = match get_PEopp pe1, get_PEopp pe2 with | Some pe1', _ => (norm_aux pe2) -- (norm_aux pe1') | None, Some pe2' => (norm_aux pe1) -- (norm_aux pe2') | None, None => (norm_aux pe1) ++ (norm_aux pe2) end. Proof. simpl (norm_aux (PEadd _ _)). destruct pe1; [ | | | | | reflexivity | ]; destruct pe2; simpl get_PEopp; reflexivity. Qed. Lemma norm_aux_PEopp pe : match get_PEopp pe with | Some pe' => norm_aux pe = -- (norm_aux pe') | None => True end. Proof. now destruct pe. Qed. Lemma norm_aux_spec l pe : PEeval l pe == (norm_aux pe)@l. Proof. intros. induction pe. - reflexivity. - apply mkX_ok. - simpl PEeval. rewrite IHpe1, IHpe2. assert (H1 := norm_aux_PEopp pe1). assert (H2 := norm_aux_PEopp pe2). rewrite norm_aux_PEadd. do 2 destruct get_PEopp; rewrite ?H1, ?H2; Esimpl; add_permut. - simpl. rewrite IHpe1, IHpe2. Esimpl. - simpl. rewrite IHpe1, IHpe2. now rewrite Pmul_ok. - simpl. rewrite IHpe. Esimpl. - simpl. rewrite Ppow_N_ok by reflexivity. rewrite pow_th.(rpow_pow_N). destruct n0; simpl; Esimpl. induction p;simpl; now rewrite ?IHp, ?IHpe, ?Pms_ok, ?Pmul_ok. Qed. Lemma norm_subst_spec : forall l pe, MPcond lmp l -> PEeval l pe == (norm_subst pe)@l. Proof. intros;unfold norm_subst. unfold subst_l;rewrite <- PNSubstL_ok;trivial. apply norm_aux_spec. Qed. End NORM_SUBST_REC. Fixpoint interp_PElist (l:list R) (lpe:list (PExpr*PExpr)) {struct lpe} : Prop := match lpe with | nil => True | (me,pe)::lpe => match lpe with | nil => PEeval l me == PEeval l pe | _ => PEeval l me == PEeval l pe /\ interp_PElist l lpe end end. Fixpoint mon_of_pol (P:Pol) : option (C * Mon) := match P with | Pc c => if (c ?=! cO) then None else Some (c, mon0) | Pinj j P => match mon_of_pol P with | None => None | Some (c,m) => Some (c, mkZmon j m) end | PX P i Q => if Peq Q P0 then match mon_of_pol P with | None => None | Some (c,m) => Some (c, mkVmon i m) end else None end. Fixpoint mk_monpol_list (lpe:list (PExpr * PExpr)) : list (C*Mon*Pol) := match lpe with | nil => nil | (me,pe)::lpe => match mon_of_pol (norm_subst 0 nil me) with | None => mk_monpol_list lpe | Some m => (m,norm_subst 0 nil pe):: mk_monpol_list lpe end end. Lemma mon_of_pol_ok : forall P m, mon_of_pol P = Some m -> forall l, [fst m] * Mphi l (snd m) == P@l. Proof. induction P;simpl;intros;Esimpl. assert (H1 := (morph_eq CRmorph) c cO). destruct (c ?=! cO). discriminate. inversion H;trivial;Esimpl. generalize H;clear H;case_eq (mon_of_pol P). intros (c1,P2) H0 H1; inversion H1; Esimpl. generalize (IHP (c1, P2) H0 (jump p l)). rewrite mkZmon_ok;simpl;auto. intros; discriminate. generalize H;clear H;change match P3 with | Pc c => c ?=! cO | Pinj _ _ => false | PX _ _ _ => false end with (P3 ?== P0). assert (H := Peq_ok P3 P0). destruct (P3 ?== P0). case_eq (mon_of_pol P2);try intros (cc, pp); intros. inversion H1. simpl. rewrite mkVmon_ok;simpl. rewrite H;trivial;Esimpl. generalize (IHP1 _ H0); simpl; intros HH; rewrite HH; rsimpl. discriminate. intros;discriminate. Qed. Lemma interp_PElist_ok : forall l lpe, interp_PElist l lpe -> MPcond (mk_monpol_list lpe) l. Proof. induction lpe;simpl. trivial. destruct a;simpl;intros. assert (HH:=mon_of_pol_ok (norm_subst 0 nil p)); destruct (mon_of_pol (norm_subst 0 nil p)). split. rewrite <- norm_subst_spec by exact I. destruct lpe;try destruct H;rewrite <- H; rewrite (norm_subst_spec 0 nil); try exact I;apply HH;trivial. apply IHlpe. destruct lpe;simpl;trivial. destruct H. exact H0. apply IHlpe. destruct lpe;simpl;trivial. destruct H. exact H0. Qed. Lemma norm_subst_ok : forall n l lpe pe, interp_PElist l lpe -> PEeval l pe == (norm_subst n (mk_monpol_list lpe) pe)@l. Proof. intros;apply norm_subst_spec. apply interp_PElist_ok;trivial. Qed. Lemma ring_correct : forall n l lpe pe1 pe2, interp_PElist l lpe -> (let lmp := mk_monpol_list lpe in norm_subst n lmp pe1 ?== norm_subst n lmp pe2) = true -> PEeval l pe1 == PEeval l pe2. Proof. simpl;intros. do 2 (rewrite (norm_subst_ok n l lpe);trivial). apply Peq_ok;trivial. Qed. (** Generic evaluation of polynomial towards R avoiding parenthesis *) Variable get_sign : C -> option C. Variable get_sign_spec : sign_theory copp ceqb get_sign. Section EVALUATION. (* [mkpow x p] = x^p *) Variable mkpow : R -> positive -> R. (* [mkpow x p] = -(x^p) *) Variable mkopp_pow : R -> positive -> R. (* [mkmult_pow r x p] = r * x^p *) Variable mkmult_pow : R -> R -> positive -> R. Fixpoint mkmult_rec (r:R) (lm:list (R*positive)) {struct lm}: R := match lm with | nil => r | cons (x,p) t => mkmult_rec (mkmult_pow r x p) t end. Definition mkmult1 lm := match lm with | nil => 1 | cons (x,p) t => mkmult_rec (mkpow x p) t end. Definition mkmultm1 lm := match lm with | nil => ropp rI | cons (x,p) t => mkmult_rec (mkopp_pow x p) t end. Definition mkmult_c_pos c lm := if c ?=! cI then mkmult1 (rev' lm) else mkmult_rec [c] (rev' lm). Definition mkmult_c c lm := match get_sign c with | None => mkmult_c_pos c lm | Some c' => if c' ?=! cI then mkmultm1 (rev' lm) else mkmult_rec [c] (rev' lm) end. Definition mkadd_mult rP c lm := match get_sign c with | None => rP + mkmult_c_pos c lm | Some c' => rP - mkmult_c_pos c' lm end. Definition add_pow_list (r:R) n l := match n with | N0 => l | Npos p => (r,p)::l end. Fixpoint add_mult_dev (rP:R) (P:Pol) (fv:list R) (n:N) (lm:list (R*positive)) {struct P} : R := match P with | Pc c => let lm := add_pow_list (hd fv) n lm in mkadd_mult rP c lm | Pinj j Q => add_mult_dev rP Q (jump j fv) N0 (add_pow_list (hd fv) n lm) | PX P i Q => let rP := add_mult_dev rP P fv (N.add (Npos i) n) lm in if Q ?== P0 then rP else add_mult_dev rP Q (tail fv) N0 (add_pow_list (hd fv) n lm) end. Fixpoint mult_dev (P:Pol) (fv : list R) (n:N) (lm:list (R*positive)) {struct P} : R := (* P@l * (hd 0 l)^n * lm *) match P with | Pc c => mkmult_c c (add_pow_list (hd fv) n lm) | Pinj j Q => mult_dev Q (jump j fv) N0 (add_pow_list (hd fv) n lm) | PX P i Q => let rP := mult_dev P fv (N.add (Npos i) n) lm in if Q ?== P0 then rP else let lmq := add_pow_list (hd fv) n lm in add_mult_dev rP Q (tail fv) N0 lmq end. Definition Pphi_avoid fv P := mult_dev P fv N0 nil. Fixpoint r_list_pow (l:list (R*positive)) : R := match l with | nil => rI | cons (r,p) l => pow_pos rmul r p * r_list_pow l end. Hypothesis mkpow_spec : forall r p, mkpow r p == pow_pos rmul r p. Hypothesis mkopp_pow_spec : forall r p, mkopp_pow r p == - (pow_pos rmul r p). Hypothesis mkmult_pow_spec : forall r x p, mkmult_pow r x p == r * pow_pos rmul x p. Lemma mkmult_rec_ok : forall lm r, mkmult_rec r lm == r * r_list_pow lm. Proof. induction lm;intros;simpl;Esimpl. destruct a as (x,p);Esimpl. rewrite IHlm. rewrite mkmult_pow_spec. Esimpl. Qed. Lemma mkmult1_ok : forall lm, mkmult1 lm == r_list_pow lm. Proof. destruct lm;simpl;Esimpl. destruct p. rewrite mkmult_rec_ok;rewrite mkpow_spec;Esimpl. Qed. Lemma mkmultm1_ok : forall lm, mkmultm1 lm == - r_list_pow lm. Proof. destruct lm;simpl;Esimpl. destruct p;rewrite mkmult_rec_ok. rewrite mkopp_pow_spec;Esimpl. Qed. Lemma r_list_pow_rev : forall l, r_list_pow (rev' l) == r_list_pow l. Proof. assert (forall l lr : list (R * positive), r_list_pow (rev_append l lr) == r_list_pow lr * r_list_pow l). induction l;intros;simpl;Esimpl. destruct a;rewrite IHl;Esimpl. rewrite (ARmul_comm ARth (pow_pos rmul r p)). reflexivity. intros;unfold rev'. rewrite H;simpl;Esimpl. Qed. Lemma mkmult_c_pos_ok : forall c lm, mkmult_c_pos c lm == [c]* r_list_pow lm. Proof. intros;unfold mkmult_c_pos;simpl. assert (H := (morph_eq CRmorph) c cI). rewrite <- r_list_pow_rev; destruct (c ?=! cI). rewrite H;trivial;Esimpl. apply mkmult1_ok. apply mkmult_rec_ok. Qed. Lemma mkmult_c_ok : forall c lm, mkmult_c c lm == [c] * r_list_pow lm. Proof. intros;unfold mkmult_c;simpl. case_eq (get_sign c);intros. assert (H1 := (morph_eq CRmorph) c0 cI). destruct (c0 ?=! cI). rewrite (CRmorph.(morph_eq) _ _ (get_sign_spec.(sign_spec) _ H)). Esimpl. rewrite H1;trivial. rewrite <- r_list_pow_rev;trivial;Esimpl. apply mkmultm1_ok. rewrite <- r_list_pow_rev; apply mkmult_rec_ok. apply mkmult_c_pos_ok. Qed. Lemma mkadd_mult_ok : forall rP c lm, mkadd_mult rP c lm == rP + [c]*r_list_pow lm. Proof. intros;unfold mkadd_mult. case_eq (get_sign c);intros. rewrite (CRmorph.(morph_eq) _ _ (get_sign_spec.(sign_spec) _ H));Esimpl. rewrite mkmult_c_pos_ok;Esimpl. rewrite mkmult_c_pos_ok;Esimpl. Qed. Lemma add_pow_list_ok : forall r n l, r_list_pow (add_pow_list r n l) == pow_N rI rmul r n * r_list_pow l. Proof. destruct n;simpl;intros;Esimpl. Qed. Lemma add_mult_dev_ok : forall P rP fv n lm, add_mult_dev rP P fv n lm == rP + P@fv*pow_N rI rmul (hd fv) n * r_list_pow lm. Proof. induction P;simpl;intros. rewrite mkadd_mult_ok. rewrite add_pow_list_ok; Esimpl. rewrite IHP. simpl. rewrite add_pow_list_ok; Esimpl. change (match P3 with | Pc c => c ?=! cO | Pinj _ _ => false | PX _ _ _ => false end) with (Peq P3 P0). change match n with | N0 => Npos p | Npos q => Npos (p + q) end with (N.add (Npos p) n);trivial. assert (H := Peq_ok P3 P0). destruct (P3 ?== P0). rewrite (H eq_refl). rewrite IHP1. destruct n;simpl;Esimpl;rewrite pow_pos_add;Esimpl. add_permut. mul_permut. rewrite IHP2. rewrite IHP1. destruct n;simpl;Esimpl;rewrite pow_pos_add;Esimpl. add_permut. mul_permut. Qed. Lemma mult_dev_ok : forall P fv n lm, mult_dev P fv n lm == P@fv * pow_N rI rmul (hd fv) n * r_list_pow lm. Proof. induction P;simpl;intros;Esimpl. rewrite mkmult_c_ok;rewrite add_pow_list_ok;Esimpl. rewrite IHP. simpl;rewrite add_pow_list_ok;Esimpl. change (match P3 with | Pc c => c ?=! cO | Pinj _ _ => false | PX _ _ _ => false end) with (Peq P3 P0). change match n with | N0 => Npos p | Npos q => Npos (p + q) end with (N.add (Npos p) n);trivial. assert (H := Peq_ok P3 P0). destruct (P3 ?== P0). rewrite (H eq_refl). rewrite IHP1. destruct n;simpl;Esimpl;rewrite pow_pos_add;Esimpl. mul_permut. rewrite add_mult_dev_ok. rewrite IHP1; rewrite add_pow_list_ok. destruct n;simpl;Esimpl;rewrite pow_pos_add;Esimpl. add_permut; mul_permut. Qed. Lemma Pphi_avoid_ok : forall P fv, Pphi_avoid fv P == P@fv. Proof. unfold Pphi_avoid;intros;rewrite mult_dev_ok;simpl;Esimpl. Qed. End EVALUATION. Definition Pphi_pow := let mkpow x p := match p with xH => x | _ => rpow x (Cp_phi (Npos p)) end in let mkopp_pow x p := ropp (mkpow x p) in let mkmult_pow r x p := rmul r (mkpow x p) in Pphi_avoid mkpow mkopp_pow mkmult_pow. Lemma local_mkpow_ok r p : match p with | xI _ => rpow r (Cp_phi (Npos p)) | xO _ => rpow r (Cp_phi (Npos p)) | 1 => r end == pow_pos rmul r p. Proof. destruct p; now rewrite ?pow_th.(rpow_pow_N). Qed. Lemma Pphi_pow_ok : forall P fv, Pphi_pow fv P == P@fv. Proof. unfold Pphi_pow;intros;apply Pphi_avoid_ok;intros; now rewrite ?local_mkpow_ok. Qed. Lemma ring_rw_pow_correct : forall n lH l, interp_PElist l lH -> forall lmp, mk_monpol_list lH = lmp -> forall pe npe, norm_subst n lmp pe = npe -> PEeval l pe == Pphi_pow l npe. Proof. intros n lH l H1 lmp Heq1 pe npe Heq2. rewrite Pphi_pow_ok, <- Heq2, <- Heq1. apply norm_subst_ok. trivial. Qed. Fixpoint mkmult_pow (r x:R) (p: positive) {struct p} : R := match p with | xH => r*x | xO p => mkmult_pow (mkmult_pow r x p) x p | xI p => mkmult_pow (mkmult_pow (r*x) x p) x p end. Definition mkpow x p := match p with | xH => x | xO p => mkmult_pow x x (Pos.pred_double p) | xI p => mkmult_pow x x (xO p) end. Definition mkopp_pow x p := match p with | xH => -x | xO p => mkmult_pow (-x) x (Pos.pred_double p) | xI p => mkmult_pow (-x) x (xO p) end. Definition Pphi_dev := Pphi_avoid mkpow mkopp_pow mkmult_pow. Lemma mkmult_pow_ok p r x : mkmult_pow r x p == r * x^p. Proof. revert r; induction p;intros;simpl;Esimpl;rewrite !IHp;Esimpl. Qed. Lemma mkpow_ok p x : mkpow x p == x^p. Proof. destruct p;simpl;intros;Esimpl. - rewrite !mkmult_pow_ok;Esimpl. - rewrite mkmult_pow_ok;Esimpl. change x with (x^1) at 1. now rewrite <- pow_pos_add, Pos.add_1_r, Pos.succ_pred_double. Qed. Lemma mkopp_pow_ok p x : mkopp_pow x p == - x^p. Proof. destruct p;simpl;intros;Esimpl. - rewrite !mkmult_pow_ok;Esimpl. - rewrite mkmult_pow_ok;Esimpl. change x with (x^1) at 1. now rewrite <- pow_pos_add, Pos.add_1_r, Pos.succ_pred_double. Qed. Lemma Pphi_dev_ok : forall P fv, Pphi_dev fv P == P@fv. Proof. unfold Pphi_dev;intros;apply Pphi_avoid_ok. - intros;apply mkpow_ok. - intros;apply mkopp_pow_ok. - intros;apply mkmult_pow_ok. Qed. Lemma ring_rw_correct : forall n lH l, interp_PElist l lH -> forall lmp, mk_monpol_list lH = lmp -> forall pe npe, norm_subst n lmp pe = npe -> PEeval l pe == Pphi_dev l npe. Proof. intros n lH l H1 lmp Heq1 pe npe Heq2. rewrite Pphi_dev_ok. rewrite <- Heq2;rewrite <- Heq1. apply norm_subst_ok. trivial. Qed. End MakeRingPol. coq-8.4pl2/plugins/setoid_ring/Field_theory.v0000640000175000001440000017623512010532755020440 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R->R) (ropp : R->R). Variable (rdiv : R -> R -> R) (rinv : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y). Notation "x - y " := (rsub x y). Notation "x / y" := (rdiv x y). Notation "- x" := (ropp x). Notation "/ x" := (rinv x). Notation "x == y" := (req x y) (at level 70, no associativity). (* Equality properties *) Variable Rsth : Equivalence req. Variable Reqe : ring_eq_ext radd rmul ropp req. Variable SRinv_ext : forall p q, p == q -> / p == / q. (* Field properties *) Record almost_field_theory : Prop := mk_afield { AF_AR : almost_ring_theory rO rI radd rmul rsub ropp req; AF_1_neq_0 : ~ 1 == 0; AFdiv_def : forall p q, p / q == p * / q; AFinv_l : forall p, ~ p == 0 -> / p * p == 1 }. Section AlmostField. Variable AFth : almost_field_theory. Let ARth := AFth.(AF_AR). Let rI_neq_rO := AFth.(AF_1_neq_0). Let rdiv_def := AFth.(AFdiv_def). Let rinv_l := AFth.(AFinv_l). (* Coefficients *) Variable C: Type. Variable (cO cI: C) (cadd cmul csub : C->C->C) (copp : C->C). Variable ceqb : C->C->bool. Variable phi : C -> R. Variable CRmorph : ring_morph rO rI radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi. Lemma ceqb_rect : forall c1 c2 (A:Type) (x y:A) (P:A->Type), (phi c1 == phi c2 -> P x) -> P y -> P (if ceqb c1 c2 then x else y). Proof. intros. generalize (fun h => X (morph_eq CRmorph c1 c2 h)). case (ceqb c1 c2); auto. Qed. (* C notations *) Notation "x +! y" := (cadd x y) (at level 50). Notation "x *! y " := (cmul x y) (at level 40). Notation "x -! y " := (csub x y) (at level 50). Notation "-! x" := (copp x) (at level 35). Notation " x ?=! y" := (ceqb x y) (at level 70, no associativity). Notation "[ x ]" := (phi x) (at level 0). (* Useful tactics *) Add Morphism radd : radd_ext. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext. exact (Ropp_ext Reqe). Qed. Add Morphism rsub : rsub_ext. exact (ARsub_ext Rsth Reqe ARth). Qed. Add Morphism rinv : rinv_ext. exact SRinv_ext. Qed. Let eq_trans := Setoid.Seq_trans _ _ Rsth. Let eq_sym := Setoid.Seq_sym _ _ Rsth. Let eq_refl := Setoid.Seq_refl _ _ Rsth. Hint Resolve eq_refl rdiv_def rinv_l rI_neq_rO CRmorph.(morph1) . Hint Resolve (Rmul_ext Reqe) (Rmul_ext Reqe) (Radd_ext Reqe) (ARsub_ext Rsth Reqe ARth) (Ropp_ext Reqe) SRinv_ext. Hint Resolve (ARadd_0_l ARth) (ARadd_comm ARth) (ARadd_assoc ARth) (ARmul_1_l ARth) (ARmul_0_l ARth) (ARmul_comm ARth) (ARmul_assoc ARth) (ARdistr_l ARth) (ARopp_mul_l ARth) (ARopp_add ARth) (ARsub_def ARth) . (* Power coefficients *) Variable Cpow : Type. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Variable pow_th : power_theory rI rmul req Cp_phi rpow. (* sign function *) Variable get_sign : C -> option C. Variable get_sign_spec : sign_theory copp ceqb get_sign. Variable cdiv:C -> C -> C*C. Variable cdiv_th : div_theory req cadd cmul phi cdiv. Notation NPEeval := (PEeval rO radd rmul rsub ropp phi Cp_phi rpow). Notation Nnorm:= (norm_subst cO cI cadd cmul csub copp ceqb cdiv). Notation NPphi_dev := (Pphi_dev rO rI radd rmul rsub ropp cO cI ceqb phi get_sign). Notation NPphi_pow := (Pphi_pow rO rI radd rmul rsub ropp cO cI ceqb phi Cp_phi rpow get_sign). (* add abstract semi-ring to help with some proofs *) Add Ring Rring : (ARth_SRth ARth). Local Hint Extern 2 (_ == _) => f_equiv. (* additional ring properties *) Lemma rsub_0_l : forall r, 0 - r == - r. intros; rewrite (ARsub_def ARth);ring. Qed. Lemma rsub_0_r : forall r, r - 0 == r. intros; rewrite (ARsub_def ARth). rewrite (ARopp_zero Rsth Reqe ARth); ring. Qed. (*************************************************************************** Properties of division ***************************************************************************) Theorem rdiv_simpl: forall p q, ~ q == 0 -> q * (p / q) == p. Proof. intros p q H. rewrite rdiv_def. transitivity (/ q * q * p); [ ring | idtac ]. rewrite rinv_l; auto. Qed. Hint Resolve rdiv_simpl . Instance SRdiv_ext: Proper (req ==> req ==> req) rdiv. Proof. intros p1 p2 Ep q1 q2 Eq. transitivity (p1 * / q1); auto. transitivity (p2 * / q2); auto. Qed. Hint Resolve SRdiv_ext. Lemma rmul_reg_l : forall p q1 q2, ~ p == 0 -> p * q1 == p * q2 -> q1 == q2. Proof. intros p q1 q2 H EQ. rewrite <- (@rdiv_simpl q1 p) by trivial. rewrite <- (@rdiv_simpl q2 p) by trivial. rewrite !rdiv_def, !(ARmul_assoc ARth). now rewrite EQ. Qed. Theorem field_is_integral_domain : forall r1 r2, ~ r1 == 0 -> ~ r2 == 0 -> ~ r1 * r2 == 0. Proof. intros r1 r2 H1 H2. contradict H2. transitivity (1 * r2); auto. transitivity (/ r1 * r1 * r2); auto. rewrite <- (ARmul_assoc ARth). rewrite H2. apply ARmul_0_r with (1 := Rsth) (2 := ARth). Qed. Theorem ropp_neq_0 : forall r, ~ -(1) == 0 -> ~ r == 0 -> ~ -r == 0. intros. setoid_replace (- r) with (- (1) * r). apply field_is_integral_domain; trivial. rewrite <- (ARopp_mul_l ARth). rewrite (ARmul_1_l ARth). reflexivity. Qed. Theorem rdiv_r_r : forall r, ~ r == 0 -> r / r == 1. intros. rewrite (AFdiv_def AFth). rewrite (ARmul_comm ARth). apply (AFinv_l AFth). trivial. Qed. Theorem rdiv1: forall r, r == r / 1. intros r; transitivity (1 * (r / 1)); auto. Qed. Theorem rdiv2: forall r1 r2 r3 r4, ~ r2 == 0 -> ~ r4 == 0 -> r1 / r2 + r3 / r4 == (r1 * r4 + r3 * r2) / (r2 * r4). Proof. intros r1 r2 r3 r4 H H0. assert (~ r2 * r4 == 0) by (apply field_is_integral_domain; trivial). apply rmul_reg_l with (r2 * r4); trivial. rewrite rdiv_simpl; trivial. rewrite (ARdistr_r Rsth Reqe ARth). apply (Radd_ext Reqe). - transitivity (r2 * (r1 / r2) * r4); [ ring | auto ]. - transitivity (r2 * (r4 * (r3 / r4))); auto. transitivity (r2 * r3); auto. Qed. Theorem rdiv2b: forall r1 r2 r3 r4 r5, ~ (r2*r5) == 0 -> ~ (r4*r5) == 0 -> r1 / (r2*r5) + r3 / (r4*r5) == (r1 * r4 + r3 * r2) / (r2 * (r4 * r5)). Proof. intros r1 r2 r3 r4 r5 H H0. assert (HH1: ~ r2 == 0) by (intros HH; case H; rewrite HH; ring). assert (HH2: ~ r5 == 0) by (intros HH; case H; rewrite HH; ring). assert (HH3: ~ r4 == 0) by (intros HH; case H0; rewrite HH; ring). assert (HH4: ~ r2 * (r4 * r5) == 0) by (repeat apply field_is_integral_domain; trivial). apply rmul_reg_l with (r2 * (r4 * r5)); trivial. rewrite rdiv_simpl; trivial. rewrite (ARdistr_r Rsth Reqe ARth). apply (Radd_ext Reqe). transitivity ((r2 * r5) * (r1 / (r2 * r5)) * r4); [ ring | auto ]. transitivity ((r4 * r5) * (r3 / (r4 * r5)) * r2); [ ring | auto ]. Qed. Theorem rdiv5: forall r1 r2, - (r1 / r2) == - r1 / r2. Proof. intros r1 r2. transitivity (- (r1 * / r2)); auto. transitivity (- r1 * / r2); auto. Qed. Hint Resolve rdiv5 . Theorem rdiv3 r1 r2 r3 r4 : ~ r2 == 0 -> ~ r4 == 0 -> r1 / r2 - r3 / r4 == (r1 * r4 - r3 * r2) / (r2 * r4). Proof. intros H2 H4. assert (~ r2 * r4 == 0) by (apply field_is_integral_domain; trivial). transitivity (r1 / r2 + - (r3 / r4)); auto. transitivity (r1 / r2 + - r3 / r4); auto. transitivity ((r1 * r4 + - r3 * r2) / (r2 * r4)). apply rdiv2; auto. f_equiv. transitivity (r1 * r4 + - (r3 * r2)); auto. Qed. Theorem rdiv3b: forall r1 r2 r3 r4 r5, ~ (r2 * r5) == 0 -> ~ (r4 * r5) == 0 -> r1 / (r2*r5) - r3 / (r4*r5) == (r1 * r4 - r3 * r2) / (r2 * (r4 * r5)). Proof. intros r1 r2 r3 r4 r5 H H0. transitivity (r1 / (r2 * r5) + - (r3 / (r4 * r5))); auto. transitivity (r1 / (r2 * r5) + - r3 / (r4 * r5)); auto. transitivity ((r1 * r4 + - r3 * r2) / (r2 * (r4 * r5))). apply rdiv2b; auto; try ring. apply (SRdiv_ext); auto. transitivity (r1 * r4 + - (r3 * r2)); symmetry; auto. Qed. Theorem rdiv6: forall r1 r2, ~ r1 == 0 -> ~ r2 == 0 -> / (r1 / r2) == r2 / r1. intros r1 r2 H H0. assert (~ r1 / r2 == 0) as Hk. intros H1; case H. transitivity (r2 * (r1 / r2)); auto. rewrite H1; ring. apply rmul_reg_l with (r1 / r2); auto. transitivity (/ (r1 / r2) * (r1 / r2)); auto. transitivity 1; auto. repeat rewrite rdiv_def. transitivity (/ r1 * r1 * (/ r2 * r2)); [ idtac | ring ]. repeat rewrite rinv_l; auto. Qed. Hint Resolve rdiv6 . Theorem rdiv4: forall r1 r2 r3 r4, ~ r2 == 0 -> ~ r4 == 0 -> (r1 / r2) * (r3 / r4) == (r1 * r3) / (r2 * r4). Proof. intros r1 r2 r3 r4 H H0. assert (~ r2 * r4 == 0) by (apply field_is_integral_domain; trivial). apply rmul_reg_l with (r2 * r4); trivial. rewrite rdiv_simpl; trivial. transitivity (r2 * (r1 / r2) * (r4 * (r3 / r4))); [ ring | idtac ]. repeat rewrite rdiv_simpl; trivial. Qed. Theorem rdiv4b: forall r1 r2 r3 r4 r5 r6, ~ r2 * r5 == 0 -> ~ r4 * r6 == 0 -> ((r1 * r6) / (r2 * r5)) * ((r3 * r5) / (r4 * r6)) == (r1 * r3) / (r2 * r4). Proof. intros r1 r2 r3 r4 r5 r6 H H0. rewrite rdiv4; auto. transitivity ((r5 * r6) * (r1 * r3) / ((r5 * r6) * (r2 * r4))). apply SRdiv_ext; ring. assert (HH: ~ r5*r6 == 0). apply field_is_integral_domain. intros H1; case H; rewrite H1; ring. intros H1; case H0; rewrite H1; ring. rewrite <- rdiv4 ; auto. rewrite rdiv_r_r; auto. apply field_is_integral_domain. intros H1; case H; rewrite H1; ring. intros H1; case H0; rewrite H1; ring. Qed. Theorem rdiv7: forall r1 r2 r3 r4, ~ r2 == 0 -> ~ r3 == 0 -> ~ r4 == 0 -> (r1 / r2) / (r3 / r4) == (r1 * r4) / (r2 * r3). Proof. intros. rewrite (rdiv_def (r1 / r2)). rewrite rdiv6; trivial. apply rdiv4; trivial. Qed. Theorem rdiv7b: forall r1 r2 r3 r4 r5 r6, ~ r2 * r6 == 0 -> ~ r3 * r5 == 0 -> ~ r4 * r6 == 0 -> ((r1 * r5) / (r2 * r6)) / ((r3 * r5) / (r4 * r6)) == (r1 * r4) / (r2 * r3). Proof. intros. rewrite rdiv7; auto. transitivity ((r5 * r6) * (r1 * r4) / ((r5 * r6) * (r2 * r3))). apply SRdiv_ext; ring. assert (HH: ~ r5*r6 == 0). apply field_is_integral_domain. intros H2; case H0; rewrite H2; ring. intros H2; case H1; rewrite H2; ring. rewrite <- rdiv4 ; auto. rewrite rdiv_r_r; auto. apply field_is_integral_domain. intros H2; case H; rewrite H2; ring. intros H2; case H0; rewrite H2; ring. Qed. Theorem rdiv8: forall r1 r2, ~ r2 == 0 -> r1 == 0 -> r1 / r2 == 0. intros r1 r2 H H0. transitivity (r1 * / r2); auto. transitivity (0 * / r2); auto. Qed. Theorem cross_product_eq : forall r1 r2 r3 r4, ~ r2 == 0 -> ~ r4 == 0 -> r1 * r4 == r3 * r2 -> r1 / r2 == r3 / r4. intros. transitivity (r1 / r2 * (r4 / r4)). rewrite rdiv_r_r; trivial. symmetry . apply (ARmul_1_r Rsth ARth). rewrite rdiv4; trivial. rewrite H1. rewrite (ARmul_comm ARth r2 r4). rewrite <- rdiv4; trivial. rewrite rdiv_r_r by trivial. apply (ARmul_1_r Rsth ARth). Qed. (*************************************************************************** Some equality test ***************************************************************************) (* equality test *) Fixpoint PExpr_eq (e1 e2 : PExpr C) {struct e1} : bool := match e1, e2 with PEc c1, PEc c2 => ceqb c1 c2 | PEX p1, PEX p2 => Pos.eqb p1 p2 | PEadd e3 e5, PEadd e4 e6 => if PExpr_eq e3 e4 then PExpr_eq e5 e6 else false | PEsub e3 e5, PEsub e4 e6 => if PExpr_eq e3 e4 then PExpr_eq e5 e6 else false | PEmul e3 e5, PEmul e4 e6 => if PExpr_eq e3 e4 then PExpr_eq e5 e6 else false | PEopp e3, PEopp e4 => PExpr_eq e3 e4 | PEpow e3 n3, PEpow e4 n4 => if N.eqb n3 n4 then PExpr_eq e3 e4 else false | _, _ => false end. Add Morphism (pow_pos rmul) with signature req ==> eq ==> req as pow_morph. intros x y H p;induction p as [p IH| p IH|];simpl;auto;ring[IH]. Qed. Add Morphism (pow_N rI rmul) with signature req ==> eq ==> req as pow_N_morph. intros x y H [|p];simpl;auto. apply pow_morph;trivial. Qed. Theorem PExpr_eq_semi_correct: forall l e1 e2, PExpr_eq e1 e2 = true -> NPEeval l e1 == NPEeval l e2. intros l e1; elim e1. intros c1; intros e2; elim e2; simpl; (try (intros; discriminate)). intros c2; apply (morph_eq CRmorph). intros p1; intros e2; elim e2; simpl; (try (intros; discriminate)). intros p2; case Pos.eqb_spec; intros; now subst. intros e3 rec1 e5 rec2 e2; case e2; simpl; (try (intros; discriminate)). intros e4 e6; generalize (rec1 e4); case (PExpr_eq e3 e4); (try (intros; discriminate)); generalize (rec2 e6); case (PExpr_eq e5 e6); (try (intros; discriminate)); auto. intros e3 rec1 e5 rec2 e2; case e2; simpl; (try (intros; discriminate)). intros e4 e6; generalize (rec1 e4); case (PExpr_eq e3 e4); (try (intros; discriminate)); generalize (rec2 e6); case (PExpr_eq e5 e6); (try (intros; discriminate)); auto. intros e3 rec1 e5 rec2 e2; case e2; simpl; (try (intros; discriminate)). intros e4 e6; generalize (rec1 e4); case (PExpr_eq e3 e4); (try (intros; discriminate)); generalize (rec2 e6); case (PExpr_eq e5 e6); (try (intros; discriminate)); auto. intros e3 rec e2; (case e2; simpl; (try (intros; discriminate))). intros e4; generalize (rec e4); case (PExpr_eq e3 e4); (try (intros; discriminate)); auto. intros e3 rec n3 e2;(case e2;simpl;(try (intros;discriminate))). intros e4 n4; case N.eqb_spec; try discriminate; intros EQ H; subst. repeat rewrite pow_th.(rpow_pow_N). rewrite (rec _ H);auto. Qed. (* add *) Definition NPEadd e1 e2 := match e1, e2 with PEc c1, PEc c2 => PEc (cadd c1 c2) | PEc c, _ => if ceqb c cO then e2 else PEadd e1 e2 | _, PEc c => if ceqb c cO then e1 else PEadd e1 e2 (* Peut t'on factoriser ici ??? *) | _, _ => PEadd e1 e2 end. Theorem NPEadd_correct: forall l e1 e2, NPEeval l (NPEadd e1 e2) == NPEeval l (PEadd e1 e2). Proof. intros l e1 e2. destruct e1; destruct e2; simpl; try reflexivity; try apply ceqb_rect; try (intro eq_c; rewrite eq_c); simpl;try apply eq_refl; try (ring [(morph0 CRmorph)]). apply (morph_add CRmorph). Qed. Definition NPEpow x n := match n with | N0 => PEc cI | Npos p => if Pos.eqb p xH then x else match x with | PEc c => if ceqb c cI then PEc cI else if ceqb c cO then PEc cO else PEc (pow_pos cmul c p) | _ => PEpow x n end end. Theorem NPEpow_correct : forall l e n, NPEeval l (NPEpow e n) == NPEeval l (PEpow e n). Proof. destruct n;simpl. rewrite pow_th.(rpow_pow_N);simpl;auto. fold (p =? 1)%positive. case Pos.eqb_spec; intros H; (rewrite H || clear H). now rewrite pow_th.(rpow_pow_N). destruct e;simpl;auto. repeat apply ceqb_rect;simpl;intros;rewrite pow_th.(rpow_pow_N);simpl. symmetry;induction p;simpl;trivial; ring [IHp H CRmorph.(morph1)]. symmetry; induction p;simpl;trivial;ring [IHp CRmorph.(morph0)]. induction p;simpl;auto;repeat rewrite CRmorph.(morph_mul);ring [IHp]. Qed. (* mul *) Fixpoint NPEmul (x y : PExpr C) {struct x} : PExpr C := match x, y with PEc c1, PEc c2 => PEc (cmul c1 c2) | PEc c, _ => if ceqb c cI then y else if ceqb c cO then PEc cO else PEmul x y | _, PEc c => if ceqb c cI then x else if ceqb c cO then PEc cO else PEmul x y | PEpow e1 n1, PEpow e2 n2 => if N.eqb n1 n2 then NPEpow (NPEmul e1 e2) n1 else PEmul x y | _, _ => PEmul x y end. Lemma pow_pos_mul : forall x y p, pow_pos rmul (x * y) p == pow_pos rmul x p * pow_pos rmul y p. induction p;simpl;auto;try ring [IHp]. Qed. Theorem NPEmul_correct : forall l e1 e2, NPEeval l (NPEmul e1 e2) == NPEeval l (PEmul e1 e2). induction e1;destruct e2; simpl;try reflexivity; repeat apply ceqb_rect; try (intro eq_c; rewrite eq_c); simpl; try reflexivity; try ring [(morph0 CRmorph) (morph1 CRmorph)]. apply (morph_mul CRmorph). case N.eqb_spec; intros H; try rewrite <- H; clear H. rewrite NPEpow_correct. simpl. repeat rewrite pow_th.(rpow_pow_N). rewrite IHe1; destruct n;simpl;try ring. apply pow_pos_mul. simpl;auto. Qed. (* sub *) Definition NPEsub e1 e2 := match e1, e2 with PEc c1, PEc c2 => PEc (csub c1 c2) | PEc c, _ => if ceqb c cO then PEopp e2 else PEsub e1 e2 | _, PEc c => if ceqb c cO then e1 else PEsub e1 e2 (* Peut-on factoriser ici *) | _, _ => PEsub e1 e2 end. Theorem NPEsub_correct: forall l e1 e2, NPEeval l (NPEsub e1 e2) == NPEeval l (PEsub e1 e2). intros l e1 e2. destruct e1; destruct e2; simpl; try reflexivity; try apply ceqb_rect; try (intro eq_c; rewrite eq_c); simpl; try rewrite (morph0 CRmorph); try reflexivity; try (symmetry; apply rsub_0_l); try (symmetry; apply rsub_0_r). apply (morph_sub CRmorph). Qed. (* opp *) Definition NPEopp e1 := match e1 with PEc c1 => PEc (copp c1) | _ => PEopp e1 end. Theorem NPEopp_correct: forall l e1, NPEeval l (NPEopp e1) == NPEeval l (PEopp e1). intros l e1; case e1; simpl; auto. intros; apply (morph_opp CRmorph). Qed. (* simplification *) Fixpoint PExpr_simp (e : PExpr C) : PExpr C := match e with PEadd e1 e2 => NPEadd (PExpr_simp e1) (PExpr_simp e2) | PEmul e1 e2 => NPEmul (PExpr_simp e1) (PExpr_simp e2) | PEsub e1 e2 => NPEsub (PExpr_simp e1) (PExpr_simp e2) | PEopp e1 => NPEopp (PExpr_simp e1) | PEpow e1 n1 => NPEpow (PExpr_simp e1) n1 | _ => e end. Theorem PExpr_simp_correct: forall l e, NPEeval l (PExpr_simp e) == NPEeval l e. intros l e; elim e; simpl; auto. intros e1 He1 e2 He2. transitivity (NPEeval l (PEadd (PExpr_simp e1) (PExpr_simp e2))); auto. apply NPEadd_correct. simpl; auto. intros e1 He1 e2 He2. transitivity (NPEeval l (PEsub (PExpr_simp e1) (PExpr_simp e2))); auto. apply NPEsub_correct. simpl; auto. intros e1 He1 e2 He2. transitivity (NPEeval l (PEmul (PExpr_simp e1) (PExpr_simp e2))); auto. apply NPEmul_correct. simpl; auto. intros e1 He1. transitivity (NPEeval l (PEopp (PExpr_simp e1))); auto. apply NPEopp_correct. simpl; auto. intros e1 He1 n;simpl. rewrite NPEpow_correct;simpl. repeat rewrite pow_th.(rpow_pow_N). rewrite He1;auto. Qed. (**************************************************************************** Datastructure ***************************************************************************) (* The input: syntax of a field expression *) Inductive FExpr : Type := FEc: C -> FExpr | FEX: positive -> FExpr | FEadd: FExpr -> FExpr -> FExpr | FEsub: FExpr -> FExpr -> FExpr | FEmul: FExpr -> FExpr -> FExpr | FEopp: FExpr -> FExpr | FEinv: FExpr -> FExpr | FEdiv: FExpr -> FExpr -> FExpr | FEpow: FExpr -> N -> FExpr . Fixpoint FEeval (l : list R) (pe : FExpr) {struct pe} : R := match pe with | FEc c => phi c | FEX x => BinList.nth 0 x l | FEadd x y => FEeval l x + FEeval l y | FEsub x y => FEeval l x - FEeval l y | FEmul x y => FEeval l x * FEeval l y | FEopp x => - FEeval l x | FEinv x => / FEeval l x | FEdiv x y => FEeval l x / FEeval l y | FEpow x n => rpow (FEeval l x) (Cp_phi n) end. Strategy expand [FEeval]. (* The result of the normalisation *) Record linear : Type := mk_linear { num : PExpr C; denum : PExpr C; condition : list (PExpr C) }. (*************************************************************************** Semantics and properties of side condition ***************************************************************************) Fixpoint PCond (l : list R) (le : list (PExpr C)) {struct le} : Prop := match le with | nil => True | e1 :: nil => ~ req (NPEeval l e1) rO | e1 :: l1 => ~ req (NPEeval l e1) rO /\ PCond l l1 end. Theorem PCond_cons_inv_l : forall l a l1, PCond l (a::l1) -> ~ NPEeval l a == 0. intros l a l1 H. destruct l1; simpl in H |- *; trivial. destruct H; trivial. Qed. Theorem PCond_cons_inv_r : forall l a l1, PCond l (a :: l1) -> PCond l l1. intros l a l1 H. destruct l1; simpl in H |- *; trivial. destruct H; trivial. Qed. Theorem PCond_app_inv_l: forall l l1 l2, PCond l (l1 ++ l2) -> PCond l l1. intros l l1 l2; elim l1; simpl app. simpl; auto. destruct l0; simpl in *. destruct l2; firstorder. firstorder. Qed. Theorem PCond_app_inv_r: forall l l1 l2, PCond l (l1 ++ l2) -> PCond l l2. intros l l1 l2; elim l1; simpl app; auto. intros a l0 H H0; apply H; apply PCond_cons_inv_r with ( 1 := H0 ). Qed. (* An unsatisfiable condition: issued when a division by zero is detected *) Definition absurd_PCond := cons (PEc cO) nil. Lemma absurd_PCond_bottom : forall l, ~ PCond l absurd_PCond. unfold absurd_PCond; simpl. red; intros. apply H. apply (morph0 CRmorph). Qed. (*************************************************************************** Normalisation ***************************************************************************) Fixpoint isIn (e1:PExpr C) (p1:positive) (e2:PExpr C) (p2:positive) {struct e2}: option (N * PExpr C) := match e2 with | PEmul e3 e4 => match isIn e1 p1 e3 p2 with | Some (N0, e5) => Some (N0, NPEmul e5 (NPEpow e4 (Npos p2))) | Some (Npos p, e5) => match isIn e1 p e4 p2 with | Some (n, e6) => Some (n, NPEmul e5 e6) | None => Some (Npos p, NPEmul e5 (NPEpow e4 (Npos p2))) end | None => match isIn e1 p1 e4 p2 with | Some (n, e5) => Some (n,NPEmul (NPEpow e3 (Npos p2)) e5) | None => None end end | PEpow e3 N0 => None | PEpow e3 (Npos p3) => isIn e1 p1 e3 (Pos.mul p3 p2) | _ => if PExpr_eq e1 e2 then match Z.pos_sub p1 p2 with | Zpos p => Some (Npos p, PEc cI) | Z0 => Some (N0, PEc cI) | Zneg p => Some (N0, NPEpow e2 (Npos p)) end else None end. Definition ZtoN z := match z with Zpos p => Npos p | _ => N0 end. Definition NtoZ n := match n with Npos p => Zpos p | _ => Z0 end. Notation pow_pos_add := (Ring_theory.pow_pos_add Rsth Reqe.(Rmul_ext) ARth.(ARmul_assoc)). Lemma Z_pos_sub_gt p q : (p > q)%positive -> Z.pos_sub p q = Zpos (p - q). Proof. intros; now apply Z.pos_sub_gt, Pos.gt_lt. Qed. Ltac simpl_pos_sub := rewrite ?Z_pos_sub_gt in * by assumption. Lemma isIn_correct_aux : forall l e1 e2 p1 p2, match (if PExpr_eq e1 e2 then match Z.sub (Zpos p1) (Zpos p2) with | Zpos p => Some (Npos p, PEc cI) | Z0 => Some (N0, PEc cI) | Zneg p => Some (N0, NPEpow e2 (Npos p)) end else None) with | Some(n, e3) => NPEeval l (PEpow e2 (Npos p2)) == NPEeval l (PEmul (PEpow e1 (ZtoN (Zpos p1 - NtoZ n))) e3) /\ (Zpos p1 > NtoZ n)%Z | _ => True end. Proof. intros l e1 e2 p1 p2; generalize (PExpr_eq_semi_correct l e1 e2); case (PExpr_eq e1 e2); simpl; auto; intros H. rewrite Z.pos_sub_spec. case Pos.compare_spec;intros;simpl. - repeat rewrite pow_th.(rpow_pow_N);simpl. split. 2:reflexivity. subst. rewrite H by trivial. ring [ (morph1 CRmorph)]. - fold (p2 - p1 =? 1)%positive. fold (NPEpow e2 (Npos (p2 - p1))). rewrite NPEpow_correct;simpl. repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite H;trivial. split. 2:reflexivity. rewrite <- pow_pos_add. now rewrite Pos.add_comm, Pos.sub_add. - repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite H;trivial. rewrite Z.pos_sub_gt by now apply Pos.sub_decr. replace (p1 - (p1 - p2))%positive with p2; [| rewrite Pos.sub_sub_distr, Pos.add_comm; auto using Pos.add_sub, Pos.sub_decr ]. split. simpl. ring [ (morph1 CRmorph)]. now apply Z.lt_gt, Pos.sub_decr. Qed. Lemma pow_pos_pow_pos : forall x p1 p2, pow_pos rmul (pow_pos rmul x p1) p2 == pow_pos rmul x (p1*p2). induction p1;simpl;intros;repeat rewrite pow_pos_mul;repeat rewrite pow_pos_add;simpl. ring [(IHp1 p2)]. ring [(IHp1 p2)]. auto. Qed. Theorem isIn_correct: forall l e1 p1 e2 p2, match isIn e1 p1 e2 p2 with | Some(n, e3) => NPEeval l (PEpow e2 (Npos p2)) == NPEeval l (PEmul (PEpow e1 (ZtoN (Zpos p1 - NtoZ n))) e3) /\ (Zpos p1 > NtoZ n)%Z | _ => True end. Proof. Opaque NPEpow. intros l e1 p1 e2; generalize p1;clear p1;elim e2; intros; try (refine (isIn_correct_aux l e1 _ p1 p2);fail);simpl isIn. generalize (H p1 p2);clear H;destruct (isIn e1 p1 p p2). destruct p3. destruct n. simpl. rewrite NPEmul_correct. simpl; rewrite NPEpow_correct;simpl. repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite pow_pos_mul;intros (H,H1);split;[ring[H]|trivial]. generalize (H0 p4 p2);clear H0;destruct (isIn e1 p4 p0 p2). destruct p5. destruct n;simpl. rewrite NPEmul_correct;repeat rewrite pow_th.(rpow_pow_N);simpl. intros (H1,H2) (H3,H4). simpl_pos_sub. simpl in H3. rewrite pow_pos_mul. rewrite H1;rewrite H3. assert (pow_pos rmul (NPEeval l e1) (p1 - p4) * NPEeval l p3 * (pow_pos rmul (NPEeval l e1) p4 * NPEeval l p5) == pow_pos rmul (NPEeval l e1) p4 * pow_pos rmul (NPEeval l e1) (p1 - p4) * NPEeval l p3 *NPEeval l p5) by ring. rewrite H;clear H. rewrite <- pow_pos_add. rewrite Pos.add_comm, Pos.sub_add by (now apply Z.gt_lt in H4). split. symmetry;apply ARth.(ARmul_assoc). reflexivity. repeat rewrite pow_th.(rpow_pow_N);simpl. intros (H1,H2) (H3,H4). simpl_pos_sub. simpl in H1, H3. assert (Zpos p1 > Zpos p6)%Z. apply Zgt_trans with (Zpos p4). exact H4. exact H2. simpl_pos_sub. split. 2:exact H. rewrite pow_pos_mul. simpl;rewrite H1;rewrite H3. assert (pow_pos rmul (NPEeval l e1) (p1 - p4) * NPEeval l p3 * (pow_pos rmul (NPEeval l e1) (p4 - p6) * NPEeval l p5) == pow_pos rmul (NPEeval l e1) (p1 - p4) * pow_pos rmul (NPEeval l e1) (p4 - p6) * NPEeval l p3 * NPEeval l p5) by ring. rewrite H0;clear H0. rewrite <- pow_pos_add. replace (p1 - p4 + (p4 - p6))%positive with (p1 - p6)%positive. rewrite NPEmul_correct. simpl;ring. assert (Zpos p1 - Zpos p6 = Zpos p1 - Zpos p4 + (Zpos p4 - Zpos p6))%Z. change ((Zpos p1 - Zpos p6)%Z = (Zpos p1 + (- Zpos p4) + (Zpos p4 +(- Zpos p6)))%Z). rewrite <- Z.add_assoc. rewrite (Z.add_assoc (- Zpos p4)). simpl. rewrite Z.pos_sub_diag. simpl. reflexivity. unfold Z.sub, Z.opp in H0. simpl in H0. simpl_pos_sub. inversion H0; trivial. simpl. repeat rewrite pow_th.(rpow_pow_N). intros H1 (H2,H3). simpl_pos_sub. rewrite NPEmul_correct;simpl;rewrite NPEpow_correct;simpl. simpl in H2. rewrite pow_th.(rpow_pow_N);simpl. rewrite pow_pos_mul. split. ring [H2]. exact H3. generalize (H0 p1 p2);clear H0;destruct (isIn e1 p1 p0 p2). destruct p3. destruct n;simpl. rewrite NPEmul_correct;simpl;rewrite NPEpow_correct;simpl. repeat rewrite pow_th.(rpow_pow_N);simpl. intros (H1,H2);split;trivial. rewrite pow_pos_mul;ring [H1]. rewrite NPEmul_correct;simpl;rewrite NPEpow_correct;simpl. repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite pow_pos_mul. intros (H1, H2);rewrite H1;split. simpl_pos_sub. simpl in H1;ring [H1]. trivial. trivial. destruct n. trivial. generalize (H p1 (p0*p2)%positive);clear H;destruct (isIn e1 p1 p (p0*p2)). destruct p3. destruct n;simpl. repeat rewrite pow_th.(rpow_pow_N). simpl. intros (H1,H2);split. rewrite pow_pos_pow_pos. trivial. trivial. repeat rewrite pow_th.(rpow_pow_N). simpl. intros (H1,H2);split;trivial. rewrite pow_pos_pow_pos;trivial. trivial. Qed. Record rsplit : Type := mk_rsplit { rsplit_left : PExpr C; rsplit_common : PExpr C; rsplit_right : PExpr C}. (* Stupid name clash *) Notation left := rsplit_left. Notation right := rsplit_right. Notation common := rsplit_common. Fixpoint split_aux (e1: PExpr C) (p:positive) (e2:PExpr C) {struct e1}: rsplit := match e1 with | PEmul e3 e4 => let r1 := split_aux e3 p e2 in let r2 := split_aux e4 p (right r1) in mk_rsplit (NPEmul (left r1) (left r2)) (NPEmul (common r1) (common r2)) (right r2) | PEpow e3 N0 => mk_rsplit (PEc cI) (PEc cI) e2 | PEpow e3 (Npos p3) => split_aux e3 (Pos.mul p3 p) e2 | _ => match isIn e1 p e2 xH with | Some (N0,e3) => mk_rsplit (PEc cI) (NPEpow e1 (Npos p)) e3 | Some (Npos q, e3) => mk_rsplit (NPEpow e1 (Npos q)) (NPEpow e1 (Npos (p - q))) e3 | None => mk_rsplit (NPEpow e1 (Npos p)) (PEc cI) e2 end end. Lemma split_aux_correct_1 : forall l e1 p e2, let res := match isIn e1 p e2 xH with | Some (N0,e3) => mk_rsplit (PEc cI) (NPEpow e1 (Npos p)) e3 | Some (Npos q, e3) => mk_rsplit (NPEpow e1 (Npos q)) (NPEpow e1 (Npos (p - q))) e3 | None => mk_rsplit (NPEpow e1 (Npos p)) (PEc cI) e2 end in NPEeval l (PEpow e1 (Npos p)) == NPEeval l (NPEmul (left res) (common res)) /\ NPEeval l e2 == NPEeval l (NPEmul (right res) (common res)). Proof. intros. unfold res;clear res; generalize (isIn_correct l e1 p e2 xH). destruct (isIn e1 p e2 1). destruct p0. Opaque NPEpow NPEmul. destruct n;simpl; (repeat rewrite NPEmul_correct;simpl; repeat rewrite NPEpow_correct;simpl; repeat rewrite pow_th.(rpow_pow_N);simpl). intros (H, Hgt);split;try ring [H CRmorph.(morph1)]. intros (H, Hgt). simpl_pos_sub. simpl in H;split;try ring [H]. apply Z.gt_lt in Hgt. now rewrite <- pow_pos_add, Pos.add_comm, Pos.sub_add. simpl;intros. repeat rewrite NPEmul_correct;simpl. rewrite NPEpow_correct;simpl. split;ring [CRmorph.(morph1)]. Qed. Theorem split_aux_correct: forall l e1 p e2, NPEeval l (PEpow e1 (Npos p)) == NPEeval l (NPEmul (left (split_aux e1 p e2)) (common (split_aux e1 p e2))) /\ NPEeval l e2 == NPEeval l (NPEmul (right (split_aux e1 p e2)) (common (split_aux e1 p e2))). Proof. intros l; induction e1;intros k e2; try refine (split_aux_correct_1 l _ k e2);simpl. generalize (IHe1_1 k e2); clear IHe1_1. generalize (IHe1_2 k (rsplit_right (split_aux e1_1 k e2))); clear IHe1_2. simpl. repeat (rewrite NPEmul_correct;simpl). repeat rewrite pow_th.(rpow_pow_N);simpl. intros (H1,H2) (H3,H4);split. rewrite pow_pos_mul. rewrite H1;rewrite H3. ring. rewrite H4;rewrite H2;ring. destruct n;simpl. split. repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite NPEmul_correct. simpl. induction k;simpl;try ring [CRmorph.(morph1)]; ring [IHk CRmorph.(morph1)]. rewrite NPEmul_correct;simpl. ring [CRmorph.(morph1)]. generalize (IHe1 (p*k)%positive e2);clear IHe1;simpl. repeat rewrite NPEmul_correct;simpl. repeat rewrite pow_th.(rpow_pow_N);simpl. rewrite pow_pos_pow_pos. intros [H1 H2];split;ring [H1 H2]. Qed. Definition split e1 e2 := split_aux e1 xH e2. Theorem split_correct_l: forall l e1 e2, NPEeval l e1 == NPEeval l (NPEmul (left (split e1 e2)) (common (split e1 e2))). Proof. intros l e1 e2; case (split_aux_correct l e1 xH e2);simpl. rewrite pow_th.(rpow_pow_N);simpl;auto. Qed. Theorem split_correct_r: forall l e1 e2, NPEeval l e2 == NPEeval l (NPEmul (right (split e1 e2)) (common (split e1 e2))). Proof. intros l e1 e2; case (split_aux_correct l e1 xH e2);simpl;auto. Qed. Fixpoint Fnorm (e : FExpr) : linear := match e with | FEc c => mk_linear (PEc c) (PEc cI) nil | FEX x => mk_linear (PEX C x) (PEc cI) nil | FEadd e1 e2 => let x := Fnorm e1 in let y := Fnorm e2 in let s := split (denum x) (denum y) in mk_linear (NPEadd (NPEmul (num x) (right s)) (NPEmul (num y) (left s))) (NPEmul (left s) (NPEmul (right s) (common s))) (condition x ++ condition y) | FEsub e1 e2 => let x := Fnorm e1 in let y := Fnorm e2 in let s := split (denum x) (denum y) in mk_linear (NPEsub (NPEmul (num x) (right s)) (NPEmul (num y) (left s))) (NPEmul (left s) (NPEmul (right s) (common s))) (condition x ++ condition y) | FEmul e1 e2 => let x := Fnorm e1 in let y := Fnorm e2 in let s1 := split (num x) (denum y) in let s2 := split (num y) (denum x) in mk_linear (NPEmul (left s1) (left s2)) (NPEmul (right s2) (right s1)) (condition x ++ condition y) | FEopp e1 => let x := Fnorm e1 in mk_linear (NPEopp (num x)) (denum x) (condition x) | FEinv e1 => let x := Fnorm e1 in mk_linear (denum x) (num x) (num x :: condition x) | FEdiv e1 e2 => let x := Fnorm e1 in let y := Fnorm e2 in let s1 := split (num x) (num y) in let s2 := split (denum x) (denum y) in mk_linear (NPEmul (left s1) (right s2)) (NPEmul (left s2) (right s1)) (num y :: condition x ++ condition y) | FEpow e1 n => let x := Fnorm e1 in mk_linear (NPEpow (num x) n) (NPEpow (denum x) n) (condition x) end. (* Example *) (* Eval compute in (Fnorm (FEdiv (FEc cI) (FEadd (FEinv (FEX xH%positive)) (FEinv (FEX (xO xH)%positive))))). *) Lemma pow_pos_not_0 : forall x, ~x==0 -> forall p, ~pow_pos rmul x p == 0. Proof. induction p;simpl. intro Hp;assert (H1 := @rmul_reg_l _ (pow_pos rmul x p * pow_pos rmul x p) 0 H). apply IHp. rewrite (@rmul_reg_l _ (pow_pos rmul x p) 0 IHp). reflexivity. rewrite H1. ring. rewrite Hp;ring. intro Hp;apply IHp. rewrite (@rmul_reg_l _ (pow_pos rmul x p) 0 IHp). reflexivity. rewrite Hp;ring. trivial. Qed. Theorem Pcond_Fnorm: forall l e, PCond l (condition (Fnorm e)) -> ~ NPEeval l (denum (Fnorm e)) == 0. intros l e; elim e. simpl; intros _ _; rewrite (morph1 CRmorph); exact rI_neq_rO. simpl; intros _ _; rewrite (morph1 CRmorph); exact rI_neq_rO. intros e1 Hrec1 e2 Hrec2 Hcond. simpl condition in Hcond. simpl denum. rewrite NPEmul_correct. simpl. apply field_is_integral_domain. intros HH; case Hrec1; auto. apply PCond_app_inv_l with (1 := Hcond). rewrite (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))). rewrite NPEmul_correct; simpl; rewrite HH; ring. intros HH; case Hrec2; auto. apply PCond_app_inv_r with (1 := Hcond). rewrite (split_correct_r l (denum (Fnorm e1)) (denum (Fnorm e2))); auto. intros e1 Hrec1 e2 Hrec2 Hcond. simpl condition in Hcond. simpl denum. rewrite NPEmul_correct. simpl. apply field_is_integral_domain. intros HH; case Hrec1; auto. apply PCond_app_inv_l with (1 := Hcond). rewrite (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))). rewrite NPEmul_correct; simpl; rewrite HH; ring. intros HH; case Hrec2; auto. apply PCond_app_inv_r with (1 := Hcond). rewrite (split_correct_r l (denum (Fnorm e1)) (denum (Fnorm e2))); auto. intros e1 Hrec1 e2 Hrec2 Hcond. simpl condition in Hcond. simpl denum. rewrite NPEmul_correct. simpl. apply field_is_integral_domain. intros HH; apply Hrec1. apply PCond_app_inv_l with (1 := Hcond). rewrite (split_correct_r l (num (Fnorm e2)) (denum (Fnorm e1))). rewrite NPEmul_correct; simpl; rewrite HH; ring. intros HH; apply Hrec2. apply PCond_app_inv_r with (1 := Hcond). rewrite (split_correct_r l (num (Fnorm e1)) (denum (Fnorm e2))). rewrite NPEmul_correct; simpl; rewrite HH; ring. intros e1 Hrec1 Hcond. simpl condition in Hcond. simpl denum. auto. intros e1 Hrec1 Hcond. simpl condition in Hcond. simpl denum. apply PCond_cons_inv_l with (1:=Hcond). intros e1 Hrec1 e2 Hrec2 Hcond. simpl condition in Hcond. simpl denum. rewrite NPEmul_correct. simpl. apply field_is_integral_domain. intros HH; apply Hrec1. specialize PCond_cons_inv_r with (1:=Hcond); intro Hcond1. apply PCond_app_inv_l with (1 := Hcond1). rewrite (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))). rewrite NPEmul_correct; simpl; rewrite HH; ring. intros HH; apply PCond_cons_inv_l with (1:=Hcond). rewrite (split_correct_r l (num (Fnorm e1)) (num (Fnorm e2))). rewrite NPEmul_correct; simpl; rewrite HH; ring. simpl;intros e1 Hrec1 n Hcond. rewrite NPEpow_correct. simpl;rewrite pow_th.(rpow_pow_N). destruct n;simpl;intros. apply AFth.(AF_1_neq_0). apply pow_pos_not_0;auto. Qed. Hint Resolve Pcond_Fnorm. (*************************************************************************** Main theorem ***************************************************************************) Theorem Fnorm_FEeval_PEeval: forall l fe, PCond l (condition (Fnorm fe)) -> FEeval l fe == NPEeval l (num (Fnorm fe)) / NPEeval l (denum (Fnorm fe)). Proof. intros l fe; elim fe; simpl. intros c H; rewrite CRmorph.(morph1); apply rdiv1. intros p H; rewrite CRmorph.(morph1); apply rdiv1. intros e1 He1 e2 He2 HH. assert (HH1: PCond l (condition (Fnorm e1))). apply PCond_app_inv_l with ( 1 := HH ). assert (HH2: PCond l (condition (Fnorm e2))). apply PCond_app_inv_r with ( 1 := HH ). rewrite (He1 HH1); rewrite (He2 HH2). rewrite NPEadd_correct; simpl. repeat rewrite NPEmul_correct; simpl. generalize (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))) (split_correct_r l (denum (Fnorm e1)) (denum (Fnorm e2))). repeat rewrite NPEmul_correct; simpl. intros U1 U2; rewrite U1; rewrite U2. apply rdiv2b; auto. rewrite <- U1; auto. rewrite <- U2; auto. intros e1 He1 e2 He2 HH. assert (HH1: PCond l (condition (Fnorm e1))). apply PCond_app_inv_l with ( 1 := HH ). assert (HH2: PCond l (condition (Fnorm e2))). apply PCond_app_inv_r with ( 1 := HH ). rewrite (He1 HH1); rewrite (He2 HH2). rewrite NPEsub_correct; simpl. repeat rewrite NPEmul_correct; simpl. generalize (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))) (split_correct_r l (denum (Fnorm e1)) (denum (Fnorm e2))). repeat rewrite NPEmul_correct; simpl. intros U1 U2; rewrite U1; rewrite U2. apply rdiv3b; auto. rewrite <- U1; auto. rewrite <- U2; auto. intros e1 He1 e2 He2 HH. assert (HH1: PCond l (condition (Fnorm e1))). apply PCond_app_inv_l with ( 1 := HH ). assert (HH2: PCond l (condition (Fnorm e2))). apply PCond_app_inv_r with ( 1 := HH ). rewrite (He1 HH1); rewrite (He2 HH2). repeat rewrite NPEmul_correct; simpl. generalize (split_correct_l l (num (Fnorm e1)) (denum (Fnorm e2))) (split_correct_r l (num (Fnorm e1)) (denum (Fnorm e2))) (split_correct_l l (num (Fnorm e2)) (denum (Fnorm e1))) (split_correct_r l (num (Fnorm e2)) (denum (Fnorm e1))). repeat rewrite NPEmul_correct; simpl. intros U1 U2 U3 U4; rewrite U1; rewrite U2; rewrite U3; rewrite U4; simpl. apply rdiv4b; auto. rewrite <- U4; auto. rewrite <- U2; auto. intros e1 He1 HH. rewrite NPEopp_correct; simpl; rewrite (He1 HH); apply rdiv5; auto. intros e1 He1 HH. assert (HH1: PCond l (condition (Fnorm e1))). apply PCond_cons_inv_r with ( 1 := HH ). rewrite (He1 HH1); apply rdiv6; auto. apply PCond_cons_inv_l with ( 1 := HH ). intros e1 He1 e2 He2 HH. assert (HH1: PCond l (condition (Fnorm e1))). apply PCond_app_inv_l with (condition (Fnorm e2)). apply PCond_cons_inv_r with ( 1 := HH ). assert (HH2: PCond l (condition (Fnorm e2))). apply PCond_app_inv_r with (condition (Fnorm e1)). apply PCond_cons_inv_r with ( 1 := HH ). rewrite (He1 HH1); rewrite (He2 HH2). repeat rewrite NPEmul_correct;simpl. generalize (split_correct_l l (num (Fnorm e1)) (num (Fnorm e2))) (split_correct_r l (num (Fnorm e1)) (num (Fnorm e2))) (split_correct_l l (denum (Fnorm e1)) (denum (Fnorm e2))) (split_correct_r l (denum (Fnorm e1)) (denum (Fnorm e2))). repeat rewrite NPEmul_correct; simpl. intros U1 U2 U3 U4; rewrite U1; rewrite U2; rewrite U3; rewrite U4; simpl. apply rdiv7b; auto. rewrite <- U3; auto. rewrite <- U2; auto. apply PCond_cons_inv_l with ( 1 := HH ). rewrite <- U4; auto. intros e1 He1 n Hcond;assert (He1' := He1 Hcond);clear He1. repeat rewrite NPEpow_correct;simpl;repeat rewrite pow_th.(rpow_pow_N). rewrite He1';clear He1'. destruct n;simpl. apply rdiv1. generalize (NPEeval l (num (Fnorm e1))) (NPEeval l (denum (Fnorm e1))) (Pcond_Fnorm _ _ Hcond). intros r r0 Hdiff;induction p;simpl. repeat (rewrite <- rdiv4;trivial). rewrite IHp. reflexivity. apply pow_pos_not_0;trivial. apply pow_pos_not_0;trivial. intro Hp. apply (pow_pos_not_0 Hdiff p). rewrite (@rmul_reg_l (pow_pos rmul r0 p) (pow_pos rmul r0 p) 0). reflexivity. apply pow_pos_not_0;trivial. ring [Hp]. rewrite <- rdiv4;trivial. rewrite IHp;reflexivity. apply pow_pos_not_0;trivial. apply pow_pos_not_0;trivial. reflexivity. Qed. Theorem Fnorm_crossproduct: forall l fe1 fe2, let nfe1 := Fnorm fe1 in let nfe2 := Fnorm fe2 in NPEeval l (PEmul (num nfe1) (denum nfe2)) == NPEeval l (PEmul (num nfe2) (denum nfe1)) -> PCond l (condition nfe1 ++ condition nfe2) -> FEeval l fe1 == FEeval l fe2. intros l fe1 fe2 nfe1 nfe2 Hcrossprod Hcond; subst nfe1 nfe2. rewrite Fnorm_FEeval_PEeval by apply PCond_app_inv_l with (1 := Hcond). rewrite Fnorm_FEeval_PEeval by apply PCond_app_inv_r with (1 := Hcond). apply cross_product_eq; trivial. apply Pcond_Fnorm. apply PCond_app_inv_l with (1 := Hcond). apply Pcond_Fnorm. apply PCond_app_inv_r with (1 := Hcond). Qed. (* Correctness lemmas of reflexive tactics *) Notation Ninterp_PElist := (interp_PElist rO radd rmul rsub ropp req phi Cp_phi rpow). Notation Nmk_monpol_list := (mk_monpol_list cO cI cadd cmul csub copp ceqb cdiv). Theorem Fnorm_correct: forall n l lpe fe, Ninterp_PElist l lpe -> Peq ceqb (Nnorm n (Nmk_monpol_list lpe) (num (Fnorm fe))) (Pc cO) = true -> PCond l (condition (Fnorm fe)) -> FEeval l fe == 0. intros n l lpe fe Hlpe H H1; apply eq_trans with (1 := Fnorm_FEeval_PEeval l fe H1). apply rdiv8; auto. transitivity (NPEeval l (PEc cO)); auto. rewrite (norm_subst_ok Rsth Reqe ARth CRmorph pow_th cdiv_th n l lpe);auto. change (NPEeval l (PEc cO)) with (Pphi 0 radd rmul phi l (Pc cO)). apply (Peq_ok Rsth Reqe CRmorph);auto. simpl. apply (morph0 CRmorph); auto. Qed. (* simplify a field expression into a fraction *) (* TODO: simplify when den is constant... *) Definition display_linear l num den := NPphi_dev l num / NPphi_dev l den. Definition display_pow_linear l num den := NPphi_pow l num / NPphi_pow l den. Theorem Field_rw_correct : forall n lpe l, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall fe nfe, Fnorm fe = nfe -> PCond l (condition nfe) -> FEeval l fe == display_linear l (Nnorm n lmp (num nfe)) (Nnorm n lmp (denum nfe)). Proof. intros n lpe l Hlpe lmp lmp_eq fe nfe eq_nfe H; subst nfe lmp. apply eq_trans with (1 := Fnorm_FEeval_PEeval _ _ H). unfold display_linear; apply SRdiv_ext; eapply (ring_rw_correct Rsth Reqe ARth CRmorph);eauto. Qed. Theorem Field_rw_pow_correct : forall n lpe l, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall fe nfe, Fnorm fe = nfe -> PCond l (condition nfe) -> FEeval l fe == display_pow_linear l (Nnorm n lmp (num nfe)) (Nnorm n lmp (denum nfe)). Proof. intros n lpe l Hlpe lmp lmp_eq fe nfe eq_nfe H; subst nfe lmp. apply eq_trans with (1 := Fnorm_FEeval_PEeval _ _ H). unfold display_pow_linear; apply SRdiv_ext; eapply (ring_rw_pow_correct Rsth Reqe ARth CRmorph);eauto. Qed. Theorem Field_correct : forall n l lpe fe1 fe2, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall nfe1, Fnorm fe1 = nfe1 -> forall nfe2, Fnorm fe2 = nfe2 -> Peq ceqb (Nnorm n lmp (PEmul (num nfe1) (denum nfe2))) (Nnorm n lmp (PEmul (num nfe2) (denum nfe1))) = true -> PCond l (condition nfe1 ++ condition nfe2) -> FEeval l fe1 == FEeval l fe2. Proof. intros n l lpe fe1 fe2 Hlpe lmp eq_lmp nfe1 eq1 nfe2 eq2 Hnorm Hcond; subst nfe1 nfe2 lmp. apply Fnorm_crossproduct; trivial. eapply (ring_correct Rsth Reqe ARth CRmorph); eauto. Qed. (* simplify a field equation : generate the crossproduct and simplify polynomials *) Theorem Field_simplify_eq_old_correct : forall l fe1 fe2 nfe1 nfe2, Fnorm fe1 = nfe1 -> Fnorm fe2 = nfe2 -> NPphi_dev l (Nnorm O nil (PEmul (num nfe1) (denum nfe2))) == NPphi_dev l (Nnorm O nil (PEmul (num nfe2) (denum nfe1))) -> PCond l (condition nfe1 ++ condition nfe2) -> FEeval l fe1 == FEeval l fe2. Proof. intros l fe1 fe2 nfe1 nfe2 eq1 eq2 Hcrossprod Hcond; subst nfe1 nfe2. apply Fnorm_crossproduct; trivial. match goal with [ |- NPEeval l ?x == NPEeval l ?y] => rewrite (ring_rw_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec O nil l I Logic.eq_refl x Logic.eq_refl); rewrite (ring_rw_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec O nil l I Logic.eq_refl y Logic.eq_refl) end. trivial. Qed. Theorem Field_simplify_eq_correct : forall n l lpe fe1 fe2, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall nfe1, Fnorm fe1 = nfe1 -> forall nfe2, Fnorm fe2 = nfe2 -> forall den, split (denum nfe1) (denum nfe2) = den -> NPphi_dev l (Nnorm n lmp (PEmul (num nfe1) (right den))) == NPphi_dev l (Nnorm n lmp (PEmul (num nfe2) (left den))) -> PCond l (condition nfe1 ++ condition nfe2) -> FEeval l fe1 == FEeval l fe2. Proof. intros n l lpe fe1 fe2 Hlpe lmp Hlmp nfe1 eq1 nfe2 eq2 den eq3 Hcrossprod Hcond; subst nfe1 nfe2 den lmp. apply Fnorm_crossproduct; trivial. simpl. rewrite (split_correct_l l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite (split_correct_r l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite NPEmul_correct. rewrite NPEmul_correct. simpl. repeat rewrite (ARmul_assoc ARth). rewrite <-( let x := PEmul (num (Fnorm fe1)) (rsplit_right (split (denum (Fnorm fe1)) (denum (Fnorm fe2)))) in ring_rw_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec n lpe l Hlpe Logic.eq_refl x Logic.eq_refl) in Hcrossprod. rewrite <-( let x := (PEmul (num (Fnorm fe2)) (rsplit_left (split (denum (Fnorm fe1)) (denum (Fnorm fe2))))) in ring_rw_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec n lpe l Hlpe Logic.eq_refl x Logic.eq_refl) in Hcrossprod. simpl in Hcrossprod. rewrite Hcrossprod. reflexivity. Qed. Theorem Field_simplify_eq_pow_correct : forall n l lpe fe1 fe2, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall nfe1, Fnorm fe1 = nfe1 -> forall nfe2, Fnorm fe2 = nfe2 -> forall den, split (denum nfe1) (denum nfe2) = den -> NPphi_pow l (Nnorm n lmp (PEmul (num nfe1) (right den))) == NPphi_pow l (Nnorm n lmp (PEmul (num nfe2) (left den))) -> PCond l (condition nfe1 ++ condition nfe2) -> FEeval l fe1 == FEeval l fe2. Proof. intros n l lpe fe1 fe2 Hlpe lmp Hlmp nfe1 eq1 nfe2 eq2 den eq3 Hcrossprod Hcond; subst nfe1 nfe2 den lmp. apply Fnorm_crossproduct; trivial. simpl. rewrite (split_correct_l l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite (split_correct_r l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite NPEmul_correct. rewrite NPEmul_correct. simpl. repeat rewrite (ARmul_assoc ARth). rewrite <-( let x := PEmul (num (Fnorm fe1)) (rsplit_right (split (denum (Fnorm fe1)) (denum (Fnorm fe2)))) in ring_rw_pow_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec n lpe l Hlpe Logic.eq_refl x Logic.eq_refl) in Hcrossprod. rewrite <-( let x := (PEmul (num (Fnorm fe2)) (rsplit_left (split (denum (Fnorm fe1)) (denum (Fnorm fe2))))) in ring_rw_pow_correct Rsth Reqe ARth CRmorph pow_th cdiv_th get_sign_spec n lpe l Hlpe Logic.eq_refl x Logic.eq_refl) in Hcrossprod. simpl in Hcrossprod. rewrite Hcrossprod. reflexivity. Qed. Theorem Field_simplify_eq_pow_in_correct : forall n l lpe fe1 fe2, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall nfe1, Fnorm fe1 = nfe1 -> forall nfe2, Fnorm fe2 = nfe2 -> forall den, split (denum nfe1) (denum nfe2) = den -> forall np1, Nnorm n lmp (PEmul (num nfe1) (right den)) = np1 -> forall np2, Nnorm n lmp (PEmul (num nfe2) (left den)) = np2 -> FEeval l fe1 == FEeval l fe2 -> PCond l (condition nfe1 ++ condition nfe2) -> NPphi_pow l np1 == NPphi_pow l np2. Proof. intros. subst nfe1 nfe2 lmp np1 np2. repeat rewrite (Pphi_pow_ok Rsth Reqe ARth CRmorph pow_th get_sign_spec). repeat (rewrite <- (norm_subst_ok Rsth Reqe ARth CRmorph pow_th);trivial). simpl. assert (N1 := Pcond_Fnorm _ _ (PCond_app_inv_l _ _ _ H7)). assert (N2 := Pcond_Fnorm _ _ (PCond_app_inv_r _ _ _ H7)). apply (@rmul_reg_l (NPEeval l (rsplit_common den))). intro Heq;apply N1. rewrite (split_correct_l l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite H3. rewrite NPEmul_correct. simpl. ring [Heq]. repeat rewrite (ARth.(ARmul_comm) (NPEeval l (rsplit_common den))). repeat rewrite <- ARth.(ARmul_assoc). change (NPEeval l (rsplit_right den) * NPEeval l (rsplit_common den)) with (NPEeval l (PEmul (rsplit_right den) (rsplit_common den))). change (NPEeval l (rsplit_left den) * NPEeval l (rsplit_common den)) with (NPEeval l (PEmul (rsplit_left den) (rsplit_common den))). repeat rewrite <- NPEmul_correct. rewrite <- H3. rewrite <- split_correct_l. rewrite <- split_correct_r. apply (@rmul_reg_l (/NPEeval l (denum (Fnorm fe2)))). intro Heq; apply AFth.(AF_1_neq_0). rewrite <- (@AFinv_l AFth (NPEeval l (denum (Fnorm fe2))));trivial. ring [Heq]. rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe2)))). repeat rewrite <- (ARth.(ARmul_assoc)). rewrite <- (AFth.(AFdiv_def)). rewrite rdiv_r_r by trivial. apply (@rmul_reg_l (/NPEeval l (denum (Fnorm fe1)))). intro Heq; apply AFth.(AF_1_neq_0). rewrite <- (@AFinv_l AFth (NPEeval l (denum (Fnorm fe1))));trivial. ring [Heq]. repeat rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe1)))). repeat rewrite <- (ARth.(ARmul_assoc)). repeat rewrite <- (AFth.(AFdiv_def)). rewrite rdiv_r_r by trivial. rewrite (AFth.(AFdiv_def)). ring_simplify. unfold SRopp. rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe2)))). repeat rewrite <- (AFth.(AFdiv_def)). repeat rewrite <- Fnorm_FEeval_PEeval ; trivial. apply (PCond_app_inv_r _ _ _ H7). apply (PCond_app_inv_l _ _ _ H7). Qed. Theorem Field_simplify_eq_in_correct : forall n l lpe fe1 fe2, Ninterp_PElist l lpe -> forall lmp, Nmk_monpol_list lpe = lmp -> forall nfe1, Fnorm fe1 = nfe1 -> forall nfe2, Fnorm fe2 = nfe2 -> forall den, split (denum nfe1) (denum nfe2) = den -> forall np1, Nnorm n lmp (PEmul (num nfe1) (right den)) = np1 -> forall np2, Nnorm n lmp (PEmul (num nfe2) (left den)) = np2 -> FEeval l fe1 == FEeval l fe2 -> PCond l (condition nfe1 ++ condition nfe2) -> NPphi_dev l np1 == NPphi_dev l np2. Proof. intros. subst nfe1 nfe2 lmp np1 np2. repeat rewrite (Pphi_dev_ok Rsth Reqe ARth CRmorph get_sign_spec). repeat (rewrite <- (norm_subst_ok Rsth Reqe ARth CRmorph pow_th);trivial). simpl. assert (N1 := Pcond_Fnorm _ _ (PCond_app_inv_l _ _ _ H7)). assert (N2 := Pcond_Fnorm _ _ (PCond_app_inv_r _ _ _ H7)). apply (@rmul_reg_l (NPEeval l (rsplit_common den))). intro Heq;apply N1. rewrite (split_correct_l l (denum (Fnorm fe1)) (denum (Fnorm fe2))). rewrite H3. rewrite NPEmul_correct. simpl. ring [Heq]. repeat rewrite (ARth.(ARmul_comm) (NPEeval l (rsplit_common den))). repeat rewrite <- ARth.(ARmul_assoc). change (NPEeval l (rsplit_right den) * NPEeval l (rsplit_common den)) with (NPEeval l (PEmul (rsplit_right den) (rsplit_common den))). change (NPEeval l (rsplit_left den) * NPEeval l (rsplit_common den)) with (NPEeval l (PEmul (rsplit_left den) (rsplit_common den))). repeat rewrite <- NPEmul_correct;rewrite <- H3. rewrite <- split_correct_l. rewrite <- split_correct_r. apply (@rmul_reg_l (/NPEeval l (denum (Fnorm fe2)))). intro Heq; apply AFth.(AF_1_neq_0). rewrite <- (@AFinv_l AFth (NPEeval l (denum (Fnorm fe2))));trivial. ring [Heq]. rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe2)))). repeat rewrite <- (ARth.(ARmul_assoc)). rewrite <- (AFth.(AFdiv_def)). rewrite rdiv_r_r by trivial. apply (@rmul_reg_l (/NPEeval l (denum (Fnorm fe1)))). intro Heq; apply AFth.(AF_1_neq_0). rewrite <- (@AFinv_l AFth (NPEeval l (denum (Fnorm fe1))));trivial. ring [Heq]. repeat rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe1)))). repeat rewrite <- (ARth.(ARmul_assoc)). repeat rewrite <- (AFth.(AFdiv_def)). rewrite rdiv_r_r by trivial. rewrite (AFth.(AFdiv_def)). ring_simplify. unfold SRopp. rewrite (ARth.(ARmul_comm) (/ NPEeval l (denum (Fnorm fe2)))). repeat rewrite <- (AFth.(AFdiv_def)). repeat rewrite <- Fnorm_FEeval_PEeval;trivial. apply (PCond_app_inv_r _ _ _ H7). apply (PCond_app_inv_l _ _ _ H7). Qed. Section Fcons_impl. Variable Fcons : PExpr C -> list (PExpr C) -> list (PExpr C). Hypothesis PCond_fcons_inv : forall l a l1, PCond l (Fcons a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. Fixpoint Fapp (l m:list (PExpr C)) {struct l} : list (PExpr C) := match l with | nil => m | cons a l1 => Fcons a (Fapp l1 m) end. Lemma fcons_correct : forall l l1, PCond l (Fapp l1 nil) -> PCond l l1. induction l1; simpl; intros. trivial. elim PCond_fcons_inv with (1 := H); intros. destruct l1; auto. Qed. End Fcons_impl. Section Fcons_simpl. (* Some general simpifications of the condition: eliminate duplicates, split multiplications *) Fixpoint Fcons (e:PExpr C) (l:list (PExpr C)) {struct l} : list (PExpr C) := match l with nil => cons e nil | cons a l1 => if PExpr_eq e a then l else cons a (Fcons e l1) end. Theorem PFcons_fcons_inv: forall l a l1, PCond l (Fcons a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. intros l a l1; elim l1; simpl Fcons; auto. simpl; auto. intros a0 l0. generalize (PExpr_eq_semi_correct l a a0); case (PExpr_eq a a0). intros H H0 H1; split; auto. rewrite H; auto. generalize (PCond_cons_inv_l _ _ _ H1); simpl; auto. intros H H0 H1; assert (Hp: ~ NPEeval l a0 == 0 /\ (~ NPEeval l a == 0 /\ PCond l l0)). split. generalize (PCond_cons_inv_l _ _ _ H1); simpl; auto. apply H0. generalize (PCond_cons_inv_r _ _ _ H1); simpl; auto. generalize Hp; case l0; simpl; intuition. Qed. (* equality of normal forms rather than syntactic equality *) Fixpoint Fcons0 (e:PExpr C) (l:list (PExpr C)) {struct l} : list (PExpr C) := match l with nil => cons e nil | cons a l1 => if Peq ceqb (Nnorm O nil e) (Nnorm O nil a) then l else cons a (Fcons0 e l1) end. Theorem PFcons0_fcons_inv: forall l a l1, PCond l (Fcons0 a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. intros l a l1; elim l1; simpl Fcons0; auto. simpl; auto. intros a0 l0. generalize (ring_correct Rsth Reqe ARth CRmorph pow_th cdiv_th O l nil a a0). simpl. case (Peq ceqb (Nnorm O nil a) (Nnorm O nil a0)). intros H H0 H1; split; auto. rewrite H; auto. generalize (PCond_cons_inv_l _ _ _ H1); simpl; auto. intros H H0 H1; assert (Hp: ~ NPEeval l a0 == 0 /\ (~ NPEeval l a == 0 /\ PCond l l0)). split. generalize (PCond_cons_inv_l _ _ _ H1); simpl; auto. apply H0. generalize (PCond_cons_inv_r _ _ _ H1); simpl; auto. clear get_sign get_sign_spec. generalize Hp; case l0; simpl; intuition. Qed. (* split factorized denominators *) Fixpoint Fcons00 (e:PExpr C) (l:list (PExpr C)) {struct e} : list (PExpr C) := match e with PEmul e1 e2 => Fcons00 e1 (Fcons00 e2 l) | PEpow e1 _ => Fcons00 e1 l | _ => Fcons0 e l end. Theorem PFcons00_fcons_inv: forall l a l1, PCond l (Fcons00 a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. intros l a; elim a; try (intros; apply PFcons0_fcons_inv; auto; fail). intros p H p0 H0 l1 H1. simpl in H1. case (H _ H1); intros H2 H3. case (H0 _ H3); intros H4 H5; split; auto. simpl. apply field_is_integral_domain; trivial. simpl;intros. rewrite pow_th.(rpow_pow_N). destruct (H _ H0);split;auto. destruct n;simpl. apply AFth.(AF_1_neq_0). apply pow_pos_not_0;trivial. Qed. Definition Pcond_simpl_gen := fcons_correct _ PFcons00_fcons_inv. (* Specific case when the equality test of coefs is complete w.r.t. the field equality: non-zero coefs can be eliminated, and opposite can be simplified (if -1 <> 0) *) Hypothesis ceqb_complete : forall c1 c2, phi c1 == phi c2 -> ceqb c1 c2 = true. Lemma ceqb_rect_complete : forall c1 c2 (A:Type) (x y:A) (P:A->Type), (phi c1 == phi c2 -> P x) -> (~ phi c1 == phi c2 -> P y) -> P (if ceqb c1 c2 then x else y). Proof. intros. generalize (fun h => X (morph_eq CRmorph c1 c2 h)). generalize (@ceqb_complete c1 c2). case (c1 ?=! c2); auto; intros. apply X0. red; intro. absurd (false = true); auto; discriminate. Qed. Fixpoint Fcons1 (e:PExpr C) (l:list (PExpr C)) {struct e} : list (PExpr C) := match e with PEmul e1 e2 => Fcons1 e1 (Fcons1 e2 l) | PEpow e _ => Fcons1 e l | PEopp e => if ceqb (copp cI) cO then absurd_PCond else Fcons1 e l | PEc c => if ceqb c cO then absurd_PCond else l | _ => Fcons0 e l end. Theorem PFcons1_fcons_inv: forall l a l1, PCond l (Fcons1 a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. intros l a; elim a; try (intros; apply PFcons0_fcons_inv; auto; fail). simpl; intros c l1. apply ceqb_rect_complete; intros. elim (@absurd_PCond_bottom l H0). split; trivial. rewrite <- (morph0 CRmorph); trivial. intros p H p0 H0 l1 H1. simpl in H1. case (H _ H1); intros H2 H3. case (H0 _ H3); intros H4 H5; split; auto. simpl. apply field_is_integral_domain; trivial. simpl; intros p H l1. apply ceqb_rect_complete; intros. elim (@absurd_PCond_bottom l H1). destruct (H _ H1). split; trivial. apply ropp_neq_0; trivial. rewrite (morph_opp CRmorph) in H0. rewrite (morph1 CRmorph) in H0. rewrite (morph0 CRmorph) in H0. trivial. intros;simpl. destruct (H _ H0);split;trivial. rewrite pow_th.(rpow_pow_N). destruct n;simpl. apply AFth.(AF_1_neq_0). apply pow_pos_not_0;trivial. Qed. Definition Fcons2 e l := Fcons1 (PExpr_simp e) l. Theorem PFcons2_fcons_inv: forall l a l1, PCond l (Fcons2 a l1) -> ~ NPEeval l a == 0 /\ PCond l l1. unfold Fcons2; intros l a l1 H; split; case (PFcons1_fcons_inv l (PExpr_simp a) l1); auto. intros H1 H2 H3; case H1. transitivity (NPEeval l a); trivial. apply PExpr_simp_correct. Qed. Definition Pcond_simpl_complete := fcons_correct _ PFcons2_fcons_inv. End Fcons_simpl. End AlmostField. Section FieldAndSemiField. Record field_theory : Prop := mk_field { F_R : ring_theory rO rI radd rmul rsub ropp req; F_1_neq_0 : ~ 1 == 0; Fdiv_def : forall p q, p / q == p * / q; Finv_l : forall p, ~ p == 0 -> / p * p == 1 }. Definition F2AF f := mk_afield (Rth_ARth Rsth Reqe f.(F_R)) f.(F_1_neq_0) f.(Fdiv_def) f.(Finv_l). Record semi_field_theory : Prop := mk_sfield { SF_SR : semi_ring_theory rO rI radd rmul req; SF_1_neq_0 : ~ 1 == 0; SFdiv_def : forall p q, p / q == p * / q; SFinv_l : forall p, ~ p == 0 -> / p * p == 1 }. End FieldAndSemiField. End MakeFieldPol. Definition SF2AF R (rO rI:R) radd rmul rdiv rinv req Rsth (sf:semi_field_theory rO rI radd rmul rdiv rinv req) := mk_afield _ _ (SRth_ARth Rsth sf.(SF_SR)) sf.(SF_1_neq_0) sf.(SFdiv_def) sf.(SFinv_l). Section Complete. Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable (rdiv : R -> R -> R) (rinv : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y). Notation "x - y " := (rsub x y). Notation "- x" := (ropp x). Notation "x / y " := (rdiv x y). Notation "/ x" := (rinv x). Notation "x == y" := (req x y) (at level 70, no associativity). Variable Rsth : Setoid_Theory R req. Add Setoid R req Rsth as R_setoid3. Variable Reqe : ring_eq_ext radd rmul ropp req. Add Morphism radd : radd_ext3. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext3. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext3. exact (Ropp_ext Reqe). Qed. Section AlmostField. Variable AFth : almost_field_theory rO rI radd rmul rsub ropp rdiv rinv req. Let ARth := AFth.(AF_AR). Let rI_neq_rO := AFth.(AF_1_neq_0). Let rdiv_def := AFth.(AFdiv_def). Let rinv_l := AFth.(AFinv_l). Hypothesis S_inj : forall x y, 1+x==1+y -> x==y. Hypothesis gen_phiPOS_not_0 : forall p, ~ gen_phiPOS1 rI radd rmul p == 0. Lemma add_inj_r : forall p x y, gen_phiPOS1 rI radd rmul p + x == gen_phiPOS1 rI radd rmul p + y -> x==y. intros p x y. elim p using Pos.peano_ind; simpl; intros. apply S_inj; trivial. apply H. apply S_inj. repeat rewrite (ARadd_assoc ARth). rewrite <- (ARgen_phiPOS_Psucc Rsth Reqe ARth); trivial. Qed. Lemma gen_phiPOS_inj : forall x y, gen_phiPOS rI radd rmul x == gen_phiPOS rI radd rmul y -> x = y. intros x y. repeat rewrite <- (same_gen Rsth Reqe ARth). case (Pos.compare_spec x y). intros. trivial. intros. elim gen_phiPOS_not_0 with (y - x)%positive. apply add_inj_r with x. symmetry. rewrite (ARadd_0_r Rsth ARth). rewrite <- (ARgen_phiPOS_add Rsth Reqe ARth). now rewrite Pos.add_comm, Pos.sub_add. intros. elim gen_phiPOS_not_0 with (x - y)%positive. apply add_inj_r with y. rewrite (ARadd_0_r Rsth ARth). rewrite <- (ARgen_phiPOS_add Rsth Reqe ARth). now rewrite Pos.add_comm, Pos.sub_add. Qed. Lemma gen_phiN_inj : forall x y, gen_phiN rO rI radd rmul x == gen_phiN rO rI radd rmul y -> x = y. destruct x; destruct y; simpl; intros; trivial. elim gen_phiPOS_not_0 with p. symmetry . rewrite (same_gen Rsth Reqe ARth); trivial. elim gen_phiPOS_not_0 with p. rewrite (same_gen Rsth Reqe ARth); trivial. rewrite gen_phiPOS_inj with (1 := H); trivial. Qed. Lemma gen_phiN_complete : forall x y, gen_phiN rO rI radd rmul x == gen_phiN rO rI radd rmul y -> N.eqb x y = true. Proof. intros. now apply N.eqb_eq, gen_phiN_inj. Qed. End AlmostField. Section Field. Variable Fth : field_theory rO rI radd rmul rsub ropp rdiv rinv req. Let Rth := Fth.(F_R). Let rI_neq_rO := Fth.(F_1_neq_0). Let rdiv_def := Fth.(Fdiv_def). Let rinv_l := Fth.(Finv_l). Let AFth := F2AF Rsth Reqe Fth. Let ARth := Rth_ARth Rsth Reqe Rth. Lemma ring_S_inj : forall x y, 1+x==1+y -> x==y. intros. transitivity (x + (1 + - (1))). rewrite (Ropp_def Rth). symmetry . apply (ARadd_0_r Rsth ARth). transitivity (y + (1 + - (1))). repeat rewrite <- (ARplus_assoc ARth). repeat rewrite (ARadd_assoc ARth). apply (Radd_ext Reqe). repeat rewrite <- (ARadd_comm ARth 1). trivial. reflexivity. rewrite (Ropp_def Rth). apply (ARadd_0_r Rsth ARth). Qed. Hypothesis gen_phiPOS_not_0 : forall p, ~ gen_phiPOS1 rI radd rmul p == 0. Let gen_phiPOS_inject := gen_phiPOS_inj AFth ring_S_inj gen_phiPOS_not_0. Lemma gen_phiPOS_discr_sgn : forall x y, ~ gen_phiPOS rI radd rmul x == - gen_phiPOS rI radd rmul y. red; intros. apply gen_phiPOS_not_0 with (y + x)%positive. rewrite (ARgen_phiPOS_add Rsth Reqe ARth). transitivity (gen_phiPOS1 1 radd rmul y + - gen_phiPOS1 1 radd rmul y). apply (Radd_ext Reqe); trivial. reflexivity. rewrite (same_gen Rsth Reqe ARth). rewrite (same_gen Rsth Reqe ARth). trivial. apply (Ropp_def Rth). Qed. Lemma gen_phiZ_inj : forall x y, gen_phiZ rO rI radd rmul ropp x == gen_phiZ rO rI radd rmul ropp y -> x = y. destruct x; destruct y; simpl; intros. trivial. elim gen_phiPOS_not_0 with p. rewrite (same_gen Rsth Reqe ARth). symmetry ; trivial. elim gen_phiPOS_not_0 with p. rewrite (same_gen Rsth Reqe ARth). rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)). rewrite <- H. apply (ARopp_zero Rsth Reqe ARth). elim gen_phiPOS_not_0 with p. rewrite (same_gen Rsth Reqe ARth). trivial. rewrite gen_phiPOS_inject with (1 := H); trivial. elim gen_phiPOS_discr_sgn with (1 := H). elim gen_phiPOS_not_0 with p. rewrite (same_gen Rsth Reqe ARth). rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)). rewrite H. apply (ARopp_zero Rsth Reqe ARth). elim gen_phiPOS_discr_sgn with p0 p. symmetry ; trivial. replace p0 with p; trivial. apply gen_phiPOS_inject. rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p)). rewrite <- (Ropp_opp Rsth Reqe Rth (gen_phiPOS 1 radd rmul p0)). rewrite H; trivial. reflexivity. Qed. Lemma gen_phiZ_complete : forall x y, gen_phiZ rO rI radd rmul ropp x == gen_phiZ rO rI radd rmul ropp y -> Zeq_bool x y = true. intros. replace y with x. unfold Zeq_bool. rewrite Z.compare_refl; trivial. apply gen_phiZ_inj; trivial. Qed. End Field. End Complete. coq-8.4pl2/plugins/setoid_ring/vo.itarget0000640000175000001440000000051611616534440017631 0ustar notinusersArithRing.vo BinList.vo Field_tac.vo Field_theory.vo Field.vo InitialRing.vo NArithRing.vo RealField.vo Ring_base.vo Ring_equiv.vo Ring_polynom.vo Ring_tac.vo Ring_theory.vo Ring.vo ZArithRing.vo Algebra_syntax.vo Cring.vo Ncring.vo Ncring_polynom.vo Ncring_initial.vo Ncring_tac.vo Rings_Z.vo Rings_R.vo Rings_Q.vo Integral_domain.vocoq-8.4pl2/plugins/setoid_ring/Ncring.v0000640000175000001440000002241012010532755017224 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* T->T} {mul:T->T->T} {sub:T->T->T} {opp:T->T} {ring_eq:T->T->Prop}. Instance zero_notation(T:Type)`{Ring_ops T}:Zero T:= ring0. Instance one_notation(T:Type)`{Ring_ops T}:One T:= ring1. Instance add_notation(T:Type)`{Ring_ops T}:Addition T:= add. Instance mul_notation(T:Type)`{Ring_ops T}:@Multiplication T T:= mul. Instance sub_notation(T:Type)`{Ring_ops T}:Subtraction T:= sub. Instance opp_notation(T:Type)`{Ring_ops T}:Opposite T:= opp. Instance eq_notation(T:Type)`{Ring_ops T}:@Equality T:= ring_eq. Class Ring `{Ro:Ring_ops}:={ ring_setoid: Equivalence _==_; ring_plus_comp: Proper (_==_ ==> _==_ ==>_==_) _+_; ring_mult_comp: Proper (_==_ ==> _==_ ==>_==_) _*_; ring_sub_comp: Proper (_==_ ==> _==_ ==>_==_) _-_; ring_opp_comp: Proper (_==_==>_==_) -_; ring_add_0_l : forall x, 0 + x == x; ring_add_comm : forall x y, x + y == y + x; ring_add_assoc : forall x y z, x + (y + z) == (x + y) + z; ring_mul_1_l : forall x, 1 * x == x; ring_mul_1_r : forall x, x * 1 == x; ring_mul_assoc : forall x y z, x * (y * z) == (x * y) * z; ring_distr_l : forall x y z, (x + y) * z == x * z + y * z; ring_distr_r : forall x y z, z * ( x + y) == z * x + z * y; ring_sub_def : forall x y, x - y == x + -y; ring_opp_def : forall x, x + -x == 0 }. (* inutile! je sais plus pourquoi j'ai mis ca... Instance ring_Ring_ops(R:Type)`{Ring R} :@Ring_ops R 0 1 addition multiplication subtraction opposite equality. *) Existing Instance ring_setoid. Existing Instance ring_plus_comp. Existing Instance ring_mult_comp. Existing Instance ring_sub_comp. Existing Instance ring_opp_comp. Section Ring_power. Context {R:Type}`{Ring R}. Fixpoint pow_pos (x:R) (i:positive) {struct i}: R := match i with | xH => x | xO i => let p := pow_pos x i in p * p | xI i => let p := pow_pos x i in x * (p * p) end. Definition pow_N (x:R) (p:N) := match p with | N0 => 1 | Npos p => pow_pos x p end. End Ring_power. Definition ZN(x:Z):= match x with Z0 => N0 |Zpos p | Zneg p => Npos p end. Instance power_ring {R:Type}`{Ring R} : Power:= {power x y := pow_N x (ZN y)}. (** Interpretation morphisms definition*) Class Ring_morphism (C R:Type)`{Cr:Ring C} `{Rr:Ring R}`{Rh:Bracket C R}:= { ring_morphism0 : [0] == 0; ring_morphism1 : [1] == 1; ring_morphism_add : forall x y, [x + y] == [x] + [y]; ring_morphism_sub : forall x y, [x - y] == [x] - [y]; ring_morphism_mul : forall x y, [x * y] == [x] * [y]; ring_morphism_opp : forall x, [-x] == -[x]; ring_morphism_eq : forall x y, x == y -> [x] == [y]}. Section Ring. Context {R:Type}`{Rr:Ring R}. (* Powers *) Lemma pow_pos_comm : forall x j, x * pow_pos x j == pow_pos x j * x. Proof. induction j; simpl. rewrite <- ring_mul_assoc. rewrite <- ring_mul_assoc. rewrite <- IHj. rewrite (ring_mul_assoc (pow_pos x j) x (pow_pos x j)). rewrite <- IHj. rewrite <- ring_mul_assoc. reflexivity. rewrite <- ring_mul_assoc. rewrite <- IHj. rewrite ring_mul_assoc. rewrite IHj. rewrite <- ring_mul_assoc. rewrite IHj. reflexivity. reflexivity. Qed. Lemma pow_pos_succ : forall x j, pow_pos x (Pos.succ j) == x * pow_pos x j. Proof. induction j; simpl. rewrite IHj. rewrite <- (ring_mul_assoc x (pow_pos x j) (x * pow_pos x j)). rewrite (ring_mul_assoc (pow_pos x j) x (pow_pos x j)). rewrite <- pow_pos_comm. rewrite <- ring_mul_assoc. reflexivity. reflexivity. reflexivity. Qed. Lemma pow_pos_add : forall x i j, pow_pos x (i + j) == pow_pos x i * pow_pos x j. Proof. intro x;induction i;intros. rewrite Pos.xI_succ_xO;rewrite <- Pos.add_1_r. rewrite <- Pos.add_diag;repeat rewrite <- Pos.add_assoc. repeat rewrite IHi. rewrite Pos.add_comm;rewrite Pos.add_1_r; rewrite pow_pos_succ. simpl;repeat rewrite ring_mul_assoc. reflexivity. rewrite <- Pos.add_diag;repeat rewrite <- Pos.add_assoc. repeat rewrite IHi. rewrite ring_mul_assoc. reflexivity. rewrite Pos.add_comm;rewrite Pos.add_1_r;rewrite pow_pos_succ. simpl. reflexivity. Qed. Definition id_phi_N (x:N) : N := x. Lemma pow_N_pow_N : forall x n, pow_N x (id_phi_N n) == pow_N x n. Proof. intros; reflexivity. Qed. (** Identity is a morphism *) (* Instance IDmorph : Ring_morphism _ _ _ (fun x => x). Proof. apply (Build_Ring_morphism H6 H6 (fun x => x));intros; try reflexivity. trivial. Qed. *) (** rings are almost rings*) Lemma ring_mul_0_l : forall x, 0 * x == 0. Proof. intro x. setoid_replace (0*x) with ((0+1)*x + -x). rewrite ring_add_0_l. rewrite ring_mul_1_l . rewrite ring_opp_def . fold zero. reflexivity. rewrite ring_distr_l . rewrite ring_mul_1_l . rewrite <- ring_add_assoc ; rewrite ring_opp_def . rewrite ring_add_comm ; rewrite ring_add_0_l ;reflexivity. Qed. Lemma ring_mul_0_r : forall x, x * 0 == 0. Proof. intro x; setoid_replace (x*0) with (x*(0+1) + -x). rewrite ring_add_0_l ; rewrite ring_mul_1_r . rewrite ring_opp_def ; fold zero; reflexivity. rewrite ring_distr_r ;rewrite ring_mul_1_r . rewrite <- ring_add_assoc ; rewrite ring_opp_def . rewrite ring_add_comm ; rewrite ring_add_0_l ;reflexivity. Qed. Lemma ring_opp_mul_l : forall x y, -(x * y) == -x * y. Proof. intros x y;rewrite <- (ring_add_0_l (- x * y)). rewrite ring_add_comm . rewrite <- (ring_opp_def (x*y)). rewrite ring_add_assoc . rewrite <- ring_distr_l. rewrite (ring_add_comm (-x));rewrite ring_opp_def . rewrite ring_mul_0_l;rewrite ring_add_0_l ;reflexivity. Qed. Lemma ring_opp_mul_r : forall x y, -(x * y) == x * -y. Proof. intros x y;rewrite <- (ring_add_0_l (x * - y)). rewrite ring_add_comm . rewrite <- (ring_opp_def (x*y)). rewrite ring_add_assoc . rewrite <- ring_distr_r . rewrite (ring_add_comm (-y));rewrite ring_opp_def . rewrite ring_mul_0_r;rewrite ring_add_0_l ;reflexivity. Qed. Lemma ring_opp_add : forall x y, -(x + y) == -x + -y. Proof. intros x y;rewrite <- (ring_add_0_l (-(x+y))). rewrite <- (ring_opp_def x). rewrite <- (ring_add_0_l (x + - x + - (x + y))). rewrite <- (ring_opp_def y). rewrite (ring_add_comm x). rewrite (ring_add_comm y). rewrite <- (ring_add_assoc (-y)). rewrite <- (ring_add_assoc (- x)). rewrite (ring_add_assoc y). rewrite (ring_add_comm y). rewrite <- (ring_add_assoc (- x)). rewrite (ring_add_assoc y). rewrite (ring_add_comm y);rewrite ring_opp_def . rewrite (ring_add_comm (-x) 0);rewrite ring_add_0_l . rewrite ring_add_comm; reflexivity. Qed. Lemma ring_opp_opp : forall x, - -x == x. Proof. intros x; rewrite <- (ring_add_0_l (- -x)). rewrite <- (ring_opp_def x). rewrite <- ring_add_assoc ; rewrite ring_opp_def . rewrite (ring_add_comm x); rewrite ring_add_0_l . reflexivity. Qed. Lemma ring_sub_ext : forall x1 x2, x1 == x2 -> forall y1 y2, y1 == y2 -> x1 - y1 == x2 - y2. Proof. intros. setoid_replace (x1 - y1) with (x1 + -y1). setoid_replace (x2 - y2) with (x2 + -y2). rewrite H;rewrite H0;reflexivity. rewrite ring_sub_def. reflexivity. rewrite ring_sub_def. reflexivity. Qed. Ltac mrewrite := repeat first [ rewrite ring_add_0_l | rewrite <- (ring_add_comm 0) | rewrite ring_mul_1_l | rewrite ring_mul_0_l | rewrite ring_distr_l | reflexivity ]. Lemma ring_add_0_r : forall x, (x + 0) == x. Proof. intros; mrewrite. Qed. Lemma ring_add_assoc1 : forall x y z, (x + y) + z == (y + z) + x. Proof. intros;rewrite <- (ring_add_assoc x). rewrite (ring_add_comm x);reflexivity. Qed. Lemma ring_add_assoc2 : forall x y z, (y + x) + z == (y + z) + x. Proof. intros; repeat rewrite <- ring_add_assoc. rewrite (ring_add_comm x); reflexivity. Qed. Lemma ring_opp_zero : -0 == 0. Proof. rewrite <- (ring_mul_0_r 0). rewrite ring_opp_mul_l. repeat rewrite ring_mul_0_r. reflexivity. Qed. End Ring. (** Some simplification tactics*) Ltac gen_reflexivity := reflexivity. Ltac gen_rewrite := repeat first [ reflexivity | progress rewrite ring_opp_zero | rewrite ring_add_0_l | rewrite ring_add_0_r | rewrite ring_mul_1_l | rewrite ring_mul_1_r | rewrite ring_mul_0_l | rewrite ring_mul_0_r | rewrite ring_distr_l | rewrite ring_distr_r | rewrite ring_add_assoc | rewrite ring_mul_assoc | progress rewrite ring_opp_add | progress rewrite ring_sub_def | progress rewrite <- ring_opp_mul_l | progress rewrite <- ring_opp_mul_r ]. Ltac gen_add_push x := repeat (match goal with | |- context [(?y + x) + ?z] => progress rewrite (ring_add_assoc2 x y z) | |- context [(x + ?y) + ?z] => progress rewrite (ring_add_assoc1 x y z) end). coq-8.4pl2/plugins/setoid_ring/newring_plugin.mllib0000640000175000001440000000003311161000644021653 0ustar notinusersNewring Newring_plugin_mod coq-8.4pl2/plugins/setoid_ring/ZArithRing.v0000640000175000001440000000276612010532755020041 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t | _ => constr:NotConstant end. Ltac isZpow_coef t := match t with | Zpos ?p => isPcst p | Z0 => constr:true | _ => constr:false end. Notation N_of_Z := Z.to_N (only parsing). Ltac Zpow_tac t := match isZpow_coef t with | true => constr:(N_of_Z t) | _ => constr:NotConstant end. Ltac Zpower_neg := repeat match goal with | [|- ?G] => match G with | context c [Z.pow _ (Zneg _)] => let t := context c [Z0] in change t end end. Add Ring Zr : Zth (decidable Zeq_bool_eq, constants [Zcst], preprocess [Zpower_neg;unfold Z.succ], power_tac Zpower_theory [Zpow_tac], (* The two following option are not needed, it is the default chose when the set of coefficiant is usual ring Z *) div (InitialRing.Ztriv_div_th (@Eqsth Z) (@IDphi Z)), sign get_signZ_th). coq-8.4pl2/plugins/setoid_ring/Field.v0000640000175000001440000000110512010532755017025 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* _ => R | _ => fail 1000 "Equality has no relation type" end. Ltac Get_goal := match goal with [|- ?G] => G end. (********************************************************************) (* Tacticals to build reflexive tactics *) Ltac OnEquation req := match goal with | |- req ?lhs ?rhs => (fun f => f lhs rhs) | _ => (fun _ => fail "Goal is not an equation (of expected equality)") end. Ltac OnEquationHyp req h := match type of h with | req ?lhs ?rhs => fun f => f lhs rhs | _ => (fun _ => fail "Hypothesis is not an equation (of expected equality)") end. (* Note: auxiliary subgoals in reverse order *) Ltac OnMainSubgoal H ty := match ty with | _ -> ?ty' => let subtac := OnMainSubgoal H ty' in fun kont => lapply H; [clear H; intro H; subtac kont | idtac] | _ => (fun kont => kont()) end. (* A generic pattern to have reflexive tactics do some computation: lemmas of the form [forall x', x=x' -> P(x')] are understood as: compute the normal form of x, instantiate x' with it, prove hypothesis x=x' with vm_compute and reflexivity, and pass the instantiated lemma to the continuation. *) Ltac ProveLemmaHyp lemma := match type of lemma with forall x', ?x = x' -> _ => (fun kont => let x' := fresh "res" in let H := fresh "res_eq" in compute_assertion H x' x; let lemma' := constr:(lemma x' H) in kont lemma'; (clear H||idtac"ProveLemmaHyp: cleanup failed"); subst x') | _ => (fun _ => fail "ProveLemmaHyp: lemma not of the expected form") end. Ltac ProveLemmaHyps lemma := match type of lemma with forall x', ?x = x' -> _ => (fun kont => let x' := fresh "res" in let H := fresh "res_eq" in compute_assertion H x' x; let lemma' := constr:(lemma x' H) in ProveLemmaHyps lemma' kont; (clear H||idtac"ProveLemmaHyps: cleanup failed"); subst x') | _ => (fun kont => kont lemma) end. (* Ltac ProveLemmaHyps lemma := (* expects a continuation *) let try_step := ProveLemmaHyp lemma in (fun kont => try_step ltac:(fun lemma' => ProveLemmaHyps lemma' kont) || kont lemma). *) Ltac ApplyLemmaThen lemma expr kont := let lem := constr:(lemma expr) in ProveLemmaHyp lem ltac:(fun lem' => let Heq := fresh "thm" in assert (Heq:=lem'); OnMainSubgoal Heq ltac:(type of Heq) ltac:(fun _ => kont Heq); (clear Heq||idtac"ApplyLemmaThen: cleanup failed")). (* Ltac ApplyLemmaThenAndCont lemma expr tac CONT_tac cont_arg := let pe := match type of (lemma expr) with forall pe', ?pe = pe' -> _ => pe | _ => fail 1 "ApplyLemmaThenAndCont: cannot find norm expression" end in let pe' := fresh "expr_nf" in let nf_pe := fresh "pe_eq" in compute_assertion nf_pe pe' pe; let Heq := fresh "thm" in (assert (Heq:=lemma pe pe' H) || fail "anomaly: failed to apply lemma"); clear nf_pe; OnMainSubgoal Heq ltac:(type of Heq) ltac:(try tac Heq; clear Heq pe';CONT_tac cont_arg)). *) Ltac ApplyLemmaThenAndCont lemma expr tac CONT_tac := ApplyLemmaThen lemma expr ltac:(fun lemma' => try tac lemma'; CONT_tac()). (* General scheme of reflexive tactics using of correctness lemma that involves normalisation of one expression - [FV_tac term fv] is a tactic that adds the atomic expressions of [term] into [fv] - [SYN_tac term fv] reifies [term] given the list of atomic expressions - [LEMMA_tac fv kont] computes the correctness lemma and passes it to continuation kont - [MAIN_tac H] process H which is the conclusion of the correctness lemma instantiated with each reified term - [fv] is the initial value of atomic expressions (to be completed by the reification of the terms - [terms] the list (a constr of type list) of terms to reify and process. *) Ltac ReflexiveRewriteTactic FV_tac SYN_tac LEMMA_tac MAIN_tac fv terms := (* extend the atom list *) let fv := list_fold_left FV_tac fv terms in let RW_tac lemma := let fcons term CONT_tac := let expr := SYN_tac term fv in let main H := match type of H with | (?req _ ?rhs) => change (req term rhs) in H end; MAIN_tac H in (ApplyLemmaThenAndCont lemma expr main CONT_tac) in (* rewrite steps *) lazy_list_fold_right fcons ltac:(fun _=>idtac) terms in LEMMA_tac fv RW_tac. (********************************************************) Ltac FV_hypo_tac mkFV req lH := let R := relation_carrier req in let FV_hypo_l_tac h := match h with @mkhypo (req ?pe _) _ => mkFV pe end in let FV_hypo_r_tac h := match h with @mkhypo (req _ ?pe) _ => mkFV pe end in let fv := list_fold_right FV_hypo_l_tac (@nil R) lH in list_fold_right FV_hypo_r_tac fv lH. Ltac mkHyp_tac C req Reify lH := let mkHyp h res := match h with | @mkhypo (req ?r1 ?r2) _ => let pe1 := Reify r1 in let pe2 := Reify r2 in constr:(cons (pe1,pe2) res) | _ => fail 1 "hypothesis is not a ring equality" end in list_fold_right mkHyp (@nil (PExpr C * PExpr C)) lH. Ltac proofHyp_tac lH := let get_proof h := match h with | @mkhypo _ ?p => p end in let rec bh l := match l with | nil => constr:(I) | cons ?h nil => get_proof h | cons ?h ?tl => let l := get_proof h in let r := bh tl in constr:(conj l r) end in bh lH. Ltac get_MonPol lemma := match type of lemma with | context [(mk_monpol_list ?cO ?cI ?cadd ?cmul ?csub ?copp ?cdiv ?ceqb _)] => constr:(mk_monpol_list cO cI cadd cmul csub copp cdiv ceqb) | _ => fail 1 "ring/field anomaly: bad correctness lemma (get_MonPol)" end. (********************************************************) (* Building the atom list of a ring expression *) Ltac FV Cst CstPow add mul sub opp pow t fv := let rec TFV t fv := let f := match Cst t with | NotConstant => match t with | (add ?t1 ?t2) => fun _ => TFV t2 ltac:(TFV t1 fv) | (mul ?t1 ?t2) => fun _ => TFV t2 ltac:(TFV t1 fv) | (sub ?t1 ?t2) => fun _ => TFV t2 ltac:(TFV t1 fv) | (opp ?t1) => fun _ => TFV t1 fv | (pow ?t1 ?n) => match CstPow n with | InitialRing.NotConstant => fun _ => AddFvTail t fv | _ => fun _ => TFV t1 fv end | _ => fun _ => AddFvTail t fv end | _ => fun _ => fv end in f() in TFV t fv. (* syntaxification of ring expressions *) Ltac mkPolexpr C Cst CstPow radd rmul rsub ropp rpow t fv := let rec mkP t := let f := match Cst t with | InitialRing.NotConstant => match t with | (radd ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(PEadd e1 e2) | (rmul ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(PEmul e1 e2) | (rsub ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(PEsub e1 e2) | (ropp ?t1) => fun _ => let e1 := mkP t1 in constr:(PEopp e1) | (rpow ?t1 ?n) => match CstPow n with | InitialRing.NotConstant => fun _ => let p := Find_at t fv in constr:(PEX C p) | ?c => fun _ => let e1 := mkP t1 in constr:(PEpow e1 c) end | _ => fun _ => let p := Find_at t fv in constr:(PEX C p) end | ?c => fun _ => constr:(@PEc C c) end in f () in mkP t. (* packaging the ring structure *) Ltac PackRing F req sth ext morph arth cst_tac pow_tac lemma1 lemma2 pre post := let RNG := match type of lemma1 with | context [@PEeval ?R ?rO ?add ?mul ?sub ?opp ?C ?phi ?Cpow ?powphi ?pow _ _] => (fun proj => proj cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2) | _ => fail 1 "field anomaly: bad correctness lemma (parse)" end in F RNG. Ltac get_Carrier RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => R). Ltac get_Eq RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => req). Ltac get_Pre RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => pre). Ltac get_Post RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => post). Ltac get_NormLemma RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => lemma1). Ltac get_SimplifyLemma RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => lemma2). Ltac get_RingFV RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => FV cst_tac pow_tac add mul sub opp pow). Ltac get_RingMeta RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => mkPolexpr C cst_tac pow_tac add mul sub opp pow). Ltac get_RingHypTac RNG := RNG ltac:(fun cst_tac pow_tac pre post R req add mul sub opp C Cpow powphi pow lemma1 lemma2 => let mkPol := mkPolexpr C cst_tac pow_tac add mul sub opp pow in fun fv lH => mkHyp_tac C req ltac:(fun t => mkPol t fv) lH). (* ring tactics *) Definition ring_subst_niter := (10*10*10)%nat. Ltac Ring RNG lemma lH := let req := get_Eq RNG in OnEquation req ltac:(fun lhs rhs => let mkFV := get_RingFV RNG in let mkPol := get_RingMeta RNG in let mkHyp := get_RingHypTac RNG in let fv := FV_hypo_tac mkFV ltac:(get_Eq RNG) lH in let fv := mkFV lhs fv in let fv := mkFV rhs fv in check_fv fv; let pe1 := mkPol lhs fv in let pe2 := mkPol rhs fv in let lpe := mkHyp fv lH in let vlpe := fresh "hyp_list" in let vfv := fresh "fv_list" in pose (vlpe := lpe); pose (vfv := fv); (apply (lemma vfv vlpe pe1 pe2) || fail "typing error while applying ring"); [ ((let prh := proofHyp_tac lH in exact prh) || idtac "can not automatically proof hypothesis :"; idtac " maybe a left member of a hypothesis is not a monomial") | vm_compute; (exact (eq_refl true) || fail "not a valid ring equation")]). Ltac Ring_norm_gen f RNG lemma lH rl := let mkFV := get_RingFV RNG in let mkPol := get_RingMeta RNG in let mkHyp := get_RingHypTac RNG in let mk_monpol := get_MonPol lemma in let fv := FV_hypo_tac mkFV ltac:(get_Eq RNG) lH in let lemma_tac fv kont := let lpe := mkHyp fv lH in let vlpe := fresh "list_hyp" in let vlmp := fresh "list_hyp_norm" in let vlmp_eq := fresh "list_hyp_norm_eq" in let prh := proofHyp_tac lH in pose (vlpe := lpe); compute_assertion vlmp_eq vlmp (mk_monpol vlpe); let H := fresh "ring_lemma" in (assert (H := lemma vlpe fv prh vlmp vlmp_eq) || fail "type error when build the rewriting lemma"); clear vlmp_eq; kont H; (clear H||idtac"Ring_norm_gen: cleanup failed"); subst vlpe vlmp in let simpl_ring H := (protect_fv "ring" in H; f H) in ReflexiveRewriteTactic mkFV mkPol lemma_tac simpl_ring fv rl. Ltac Ring_gen RNG lH rl := let lemma := get_NormLemma RNG in get_Pre RNG (); Ring RNG (lemma ring_subst_niter) lH. Tactic Notation (at level 0) "ring" := let G := Get_goal in ring_lookup (PackRing Ring_gen) [] G. Tactic Notation (at level 0) "ring" "[" constr_list(lH) "]" := let G := Get_goal in ring_lookup (PackRing Ring_gen) [lH] G. (* Simplification *) Ltac Ring_simplify_gen f RNG lH rl := let lemma := get_SimplifyLemma RNG in let l := fresh "to_rewrite" in pose (l:= rl); generalize (eq_refl l); unfold l at 2; get_Pre RNG (); let rl := match goal with | [|- l = ?RL -> _ ] => RL | _ => fail 1 "ring_simplify anomaly: bad goal after pre" end in let Heq := fresh "Heq" in intros Heq;clear Heq l; Ring_norm_gen f RNG (lemma ring_subst_niter) lH rl; get_Post RNG (). Ltac Ring_simplify := Ring_simplify_gen ltac:(fun H => rewrite H). Tactic Notation (at level 0) "ring_simplify" constr_list(rl) := let G := Get_goal in ring_lookup (PackRing Ring_simplify) [] rl G. Tactic Notation (at level 0) "ring_simplify" "[" constr_list(lH) "]" constr_list(rl) := let G := Get_goal in ring_lookup (PackRing Ring_simplify) [lH] rl G. (* MON DIEU QUE C'EST MOCHE !!!!!!!!!!!!! *) Tactic Notation "ring_simplify" constr_list(rl) "in" hyp(H):= let G := Get_goal in let t := type of H in let g := fresh "goal" in set (g:= G); generalize H;clear H; ring_lookup (PackRing Ring_simplify) [] rl t; intro H; unfold g;clear g. Tactic Notation "ring_simplify" "["constr_list(lH)"]" constr_list(rl) "in" hyp(H):= let G := Get_goal in let t := type of H in let g := fresh "goal" in set (g:= G); generalize H;clear H; ring_lookup (PackRing Ring_simplify) [lH] rl t; intro H; unfold g;clear g. coq-8.4pl2/plugins/setoid_ring/ArithRing.v0000640000175000001440000000336312010532755017701 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr:(N.of_nat t) | _ => constr:InitialRing.NotConstant end. Ltac Ss_to_add f acc := match f with | S ?f1 => Ss_to_add f1 (S acc) | _ => constr:(acc + f)%nat end. Ltac natprering := match goal with |- context C [S ?p] => match p with O => fail 1 (* avoid replacing 1 with 1+0 ! *) | p => match isnatcst p with | true => fail 1 | false => let v := Ss_to_add p (S 0) in fold v; natprering end end | _ => idtac end. Add Ring natr : natSRth (morphism nat_morph_N, constants [natcst], preprocess [natprering]). coq-8.4pl2/plugins/setoid_ring/RealField.v0000640000175000001440000000624711776416531017660 0ustar notinusersRequire Import Nnat. Require Import ArithRing. Require Export Ring Field. Require Import Rdefinitions. Require Import Rpow_def. Require Import Raxioms. Local Open Scope R_scope. Lemma RTheory : ring_theory 0 1 Rplus Rmult Rminus Ropp (eq (A:=R)). Proof. constructor. intro; apply Rplus_0_l. exact Rplus_comm. symmetry ; apply Rplus_assoc. intro; apply Rmult_1_l. exact Rmult_comm. symmetry ; apply Rmult_assoc. intros m n p. rewrite Rmult_comm. rewrite (Rmult_comm n p). rewrite (Rmult_comm m p). apply Rmult_plus_distr_l. reflexivity. exact Rplus_opp_r. Qed. Lemma Rfield : field_theory 0 1 Rplus Rmult Rminus Ropp Rdiv Rinv (eq(A:=R)). Proof. constructor. exact RTheory. exact R1_neq_R0. reflexivity. exact Rinv_l. Qed. Lemma Rlt_n_Sn : forall x, x < x + 1. Proof. intro. elim archimed with x; intros. destruct H0. apply Rlt_trans with (IZR (up x)); trivial. replace (IZR (up x)) with (x + (IZR (up x) - x))%R. apply Rplus_lt_compat_l; trivial. unfold Rminus. rewrite (Rplus_comm (IZR (up x)) (- x)). rewrite <- Rplus_assoc. rewrite Rplus_opp_r. apply Rplus_0_l. elim H0. unfold Rminus. rewrite (Rplus_comm (IZR (up x)) (- x)). rewrite <- Rplus_assoc. rewrite Rplus_opp_r. rewrite Rplus_0_l; trivial. Qed. Notation Rset := (Eqsth R). Notation Rext := (Eq_ext Rplus Rmult Ropp). Lemma Rlt_0_2 : 0 < 2. apply Rlt_trans with (0 + 1). apply Rlt_n_Sn. rewrite Rplus_comm. apply Rplus_lt_compat_l. replace 1 with (0 + 1). apply Rlt_n_Sn. apply Rplus_0_l. Qed. Lemma Rgen_phiPOS : forall x, InitialRing.gen_phiPOS1 1 Rplus Rmult x > 0. unfold Rgt. induction x; simpl; intros. apply Rlt_trans with (1 + 0). rewrite Rplus_comm. apply Rlt_n_Sn. apply Rplus_lt_compat_l. rewrite <- (Rmul_0_l Rset Rext RTheory 2). rewrite Rmult_comm. apply Rmult_lt_compat_l. apply Rlt_0_2. trivial. rewrite <- (Rmul_0_l Rset Rext RTheory 2). rewrite Rmult_comm. apply Rmult_lt_compat_l. apply Rlt_0_2. trivial. replace 1 with (0 + 1). apply Rlt_n_Sn. apply Rplus_0_l. Qed. Lemma Rgen_phiPOS_not_0 : forall x, InitialRing.gen_phiPOS1 1 Rplus Rmult x <> 0. red; intros. specialize (Rgen_phiPOS x). rewrite H; intro. apply (Rlt_asym 0 0); trivial. Qed. Lemma Zeq_bool_complete : forall x y, InitialRing.gen_phiZ 0%R 1%R Rplus Rmult Ropp x = InitialRing.gen_phiZ 0%R 1%R Rplus Rmult Ropp y -> Zeq_bool x y = true. Proof gen_phiZ_complete Rset Rext Rfield Rgen_phiPOS_not_0. Lemma Rdef_pow_add : forall (x:R) (n m:nat), pow x (n + m) = pow x n * pow x m. Proof. intros x n; elim n; simpl; auto with real. intros n0 H' m; rewrite H'; auto with real. Qed. Lemma R_power_theory : power_theory 1%R Rmult (@eq R) N.to_nat pow. Proof. constructor. destruct n. reflexivity. simpl. induction p. - rewrite Pos2Nat.inj_xI. simpl. now rewrite plus_0_r, Rdef_pow_add, IHp. - rewrite Pos2Nat.inj_xO. simpl. now rewrite plus_0_r, Rdef_pow_add, IHp. - simpl. rewrite Rmult_comm;apply Rmult_1_l. Qed. Ltac Rpow_tac t := match isnatcst t with | false => constr:(InitialRing.NotConstant) | _ => constr:(N.of_nat t) end. Add Field RField : Rfield (completeness Zeq_bool_complete, power_tac R_power_theory [Rpow_tac]). coq-8.4pl2/plugins/setoid_ring/Ring_theory.v0000640000175000001440000004372312010532755020307 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R -> R. Variable req : R -> R -> Prop. Variable Rsth : Equivalence req. Infix "*" := rmul. Infix "==" := req. Hypothesis mul_ext : Proper (req ==> req ==> req) rmul. Hypothesis mul_assoc : forall x y z, x * (y * z) == (x * y) * z. Fixpoint pow_pos (x:R) (i:positive) : R := match i with | xH => x | xO i => let p := pow_pos x i in p * p | xI i => let p := pow_pos x i in x * (p * p) end. Lemma pow_pos_swap x j : pow_pos x j * x == x * pow_pos x j. Proof. induction j; simpl; rewrite <- ?mul_assoc. - f_equiv. now do 2 (rewrite IHj, mul_assoc). - now do 2 (rewrite IHj, mul_assoc). - reflexivity. Qed. Lemma pow_pos_succ x j : pow_pos x (Pos.succ j) == x * pow_pos x j. Proof. induction j; simpl; try reflexivity. rewrite IHj, <- mul_assoc; f_equiv. now rewrite mul_assoc, pow_pos_swap, mul_assoc. Qed. Lemma pow_pos_add x i j : pow_pos x (i + j) == pow_pos x i * pow_pos x j. Proof. induction i using Pos.peano_ind. - now rewrite Pos.add_1_l, pow_pos_succ. - now rewrite Pos.add_succ_l, !pow_pos_succ, IHi, mul_assoc. Qed. Definition pow_N (x:R) (p:N) := match p with | N0 => rI | Npos p => pow_pos x p end. Definition id_phi_N (x:N) : N := x. Lemma pow_N_pow_N x n : pow_N x (id_phi_N n) == pow_N x n. Proof. reflexivity. Qed. End Power. Section DEFINITIONS. Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Infix "==" := req. Infix "+" := radd. Infix "*" := rmul. Infix "-" := rsub. Notation "- x" := (ropp x). (** Semi Ring *) Record semi_ring_theory : Prop := mk_srt { SRadd_0_l : forall n, 0 + n == n; SRadd_comm : forall n m, n + m == m + n ; SRadd_assoc : forall n m p, n + (m + p) == (n + m) + p; SRmul_1_l : forall n, 1*n == n; SRmul_0_l : forall n, 0*n == 0; SRmul_comm : forall n m, n*m == m*n; SRmul_assoc : forall n m p, n*(m*p) == (n*m)*p; SRdistr_l : forall n m p, (n + m)*p == n*p + m*p }. (** Almost Ring *) (*Almost ring are no ring : Ropp_def is missing **) Record almost_ring_theory : Prop := mk_art { ARadd_0_l : forall x, 0 + x == x; ARadd_comm : forall x y, x + y == y + x; ARadd_assoc : forall x y z, x + (y + z) == (x + y) + z; ARmul_1_l : forall x, 1 * x == x; ARmul_0_l : forall x, 0 * x == 0; ARmul_comm : forall x y, x * y == y * x; ARmul_assoc : forall x y z, x * (y * z) == (x * y) * z; ARdistr_l : forall x y z, (x + y) * z == (x * z) + (y * z); ARopp_mul_l : forall x y, -(x * y) == -x * y; ARopp_add : forall x y, -(x + y) == -x + -y; ARsub_def : forall x y, x - y == x + -y }. (** Ring *) Record ring_theory : Prop := mk_rt { Radd_0_l : forall x, 0 + x == x; Radd_comm : forall x y, x + y == y + x; Radd_assoc : forall x y z, x + (y + z) == (x + y) + z; Rmul_1_l : forall x, 1 * x == x; Rmul_comm : forall x y, x * y == y * x; Rmul_assoc : forall x y z, x * (y * z) == (x * y) * z; Rdistr_l : forall x y z, (x + y) * z == (x * z) + (y * z); Rsub_def : forall x y, x - y == x + -y; Ropp_def : forall x, x + (- x) == 0 }. (** Equality is extensional *) Record sring_eq_ext : Prop := mk_seqe { (* SRing operators are compatible with equality *) SRadd_ext : Proper (req ==> req ==> req) radd; SRmul_ext : Proper (req ==> req ==> req) rmul }. Record ring_eq_ext : Prop := mk_reqe { (* Ring operators are compatible with equality *) Radd_ext : Proper (req ==> req ==> req) radd; Rmul_ext : Proper (req ==> req ==> req) rmul; Ropp_ext : Proper (req ==> req) ropp }. (** Interpretation morphisms definition*) Section MORPHISM. Variable C:Type. Variable (cO cI : C) (cadd cmul csub : C->C->C) (copp : C->C). Variable ceqb : C->C->bool. (* [phi] est un morphisme de [C] dans [R] *) Variable phi : C -> R. Infix "+!" := cadd. Infix "-!" := csub. Infix "*!" := cmul. Notation "-! x" := (copp x). Infix "?=!" := ceqb. Notation "[ x ]" := (phi x). (*for semi rings*) Record semi_morph : Prop := mkRmorph { Smorph0 : [cO] == 0; Smorph1 : [cI] == 1; Smorph_add : forall x y, [x +! y] == [x]+[y]; Smorph_mul : forall x y, [x *! y] == [x]*[y]; Smorph_eq : forall x y, x?=!y = true -> [x] == [y] }. (* for rings*) Record ring_morph : Prop := mkmorph { morph0 : [cO] == 0; morph1 : [cI] == 1; morph_add : forall x y, [x +! y] == [x]+[y]; morph_sub : forall x y, [x -! y] == [x]-[y]; morph_mul : forall x y, [x *! y] == [x]*[y]; morph_opp : forall x, [-!x] == -[x]; morph_eq : forall x y, x?=!y = true -> [x] == [y] }. Section SIGN. Variable get_sign : C -> option C. Record sign_theory : Prop := mksign_th { sign_spec : forall c c', get_sign c = Some c' -> c ?=! -! c' = true }. End SIGN. Definition get_sign_None (c:C) := @None C. Lemma get_sign_None_th : sign_theory get_sign_None. Proof. constructor;intros;discriminate. Qed. Section DIV. Variable cdiv: C -> C -> C*C. Record div_theory : Prop := mkdiv_th { div_eucl_th : forall a b, let (q,r) := cdiv a b in [a] == [b *! q +! r] }. End DIV. End MORPHISM. (** Identity is a morphism *) Variable Rsth : Equivalence req. Variable reqb : R->R->bool. Hypothesis morph_req : forall x y, (reqb x y) = true -> x == y. Definition IDphi (x:R) := x. Lemma IDmorph : ring_morph rO rI radd rmul rsub ropp reqb IDphi. Proof. now apply (mkmorph rO rI radd rmul rsub ropp reqb IDphi). Qed. (** Specification of the power function *) Section POWER. Variable Cpow : Type. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Record power_theory : Prop := mkpow_th { rpow_pow_N : forall r n, req (rpow r (Cp_phi n)) (pow_N rI rmul r n) }. End POWER. Definition pow_N_th := mkpow_th id_phi_N (pow_N rI rmul) (pow_N_pow_N rI rmul Rsth). End DEFINITIONS. Section ALMOST_RING. Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Infix "==" := req. Infix "+" := radd. Infix "* " := rmul. Infix "-" := rsub. Notation "- x" := (ropp x). (** Leibniz equality leads to a setoid theory and is extensional*) Lemma Eqsth : Equivalence (@eq R). Proof. exact eq_equivalence. Qed. Lemma Eq_s_ext : sring_eq_ext radd rmul (@eq R). Proof. constructor;solve_proper. Qed. Lemma Eq_ext : ring_eq_ext radd rmul ropp (@eq R). Proof. constructor;solve_proper. Qed. Variable Rsth : Equivalence req. Section SEMI_RING. Variable SReqe : sring_eq_ext radd rmul req. Add Morphism radd : radd_ext1. exact (SRadd_ext SReqe). Qed. Add Morphism rmul : rmul_ext1. exact (SRmul_ext SReqe). Qed. Variable SRth : semi_ring_theory 0 1 radd rmul req. (** Every semi ring can be seen as an almost ring, by taking : -x = x and x - y = x + y *) Definition SRopp (x:R) := x. Notation "- x" := (SRopp x). Definition SRsub x y := x + -y. Notation "x - y " := (SRsub x y). Lemma SRopp_ext : forall x y, x == y -> -x == -y. Proof. intros x y H; exact H. Qed. Lemma SReqe_Reqe : ring_eq_ext radd rmul SRopp req. Proof. constructor. - exact (SRadd_ext SReqe). - exact (SRmul_ext SReqe). - exact SRopp_ext. Qed. Lemma SRopp_mul_l : forall x y, -(x * y) == -x * y. Proof. reflexivity. Qed. Lemma SRopp_add : forall x y, -(x + y) == -x + -y. Proof. reflexivity. Qed. Lemma SRsub_def : forall x y, x - y == x + -y. Proof. reflexivity. Qed. Lemma SRth_ARth : almost_ring_theory 0 1 radd rmul SRsub SRopp req. Proof (mk_art 0 1 radd rmul SRsub SRopp req (SRadd_0_l SRth) (SRadd_comm SRth) (SRadd_assoc SRth) (SRmul_1_l SRth) (SRmul_0_l SRth) (SRmul_comm SRth) (SRmul_assoc SRth) (SRdistr_l SRth) SRopp_mul_l SRopp_add SRsub_def). (** Identity morphism for semi-ring equipped with their almost-ring structure*) Variable reqb : R->R->bool. Hypothesis morph_req : forall x y, (reqb x y) = true -> x == y. Definition SRIDmorph : ring_morph 0 1 radd rmul SRsub SRopp req 0 1 radd rmul SRsub SRopp reqb (@IDphi R). Proof. now apply mkmorph. Qed. (* a semi_morph can be extended to a ring_morph for the almost_ring derived from a semi_ring, provided the ring is a setoid (we only need reflexivity) *) Variable C : Type. Variable (cO cI : C) (cadd cmul: C->C->C). Variable (ceqb : C -> C -> bool). Variable phi : C -> R. Variable Smorph : semi_morph rO rI radd rmul req cO cI cadd cmul ceqb phi. Lemma SRmorph_Rmorph : ring_morph rO rI radd rmul SRsub SRopp req cO cI cadd cmul cadd (fun x => x) ceqb phi. Proof. case Smorph; now constructor. Qed. End SEMI_RING. Variable Reqe : ring_eq_ext radd rmul ropp req. Add Morphism radd : radd_ext2. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext2. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext2. exact (Ropp_ext Reqe). Qed. Section RING. Variable Rth : ring_theory 0 1 radd rmul rsub ropp req. (** Rings are almost rings*) Lemma Rmul_0_l x : 0 * x == 0. Proof. setoid_replace (0*x) with ((0+1)*x + -x). now rewrite (Radd_0_l Rth), (Rmul_1_l Rth), (Ropp_def Rth). rewrite (Rdistr_l Rth), (Rmul_1_l Rth). rewrite <- (Radd_assoc Rth), (Ropp_def Rth). now rewrite (Radd_comm Rth), (Radd_0_l Rth). Qed. Lemma Ropp_mul_l x y : -(x * y) == -x * y. Proof. rewrite <-(Radd_0_l Rth (- x * y)). rewrite (Radd_comm Rth), <-(Ropp_def Rth (x*y)). rewrite (Radd_assoc Rth), <- (Rdistr_l Rth). rewrite (Rth.(Radd_comm) (-x)), (Ropp_def Rth). now rewrite Rmul_0_l, (Radd_0_l Rth). Qed. Lemma Ropp_add x y : -(x + y) == -x + -y. Proof. rewrite <- ((Radd_0_l Rth) (-(x+y))). rewrite <- ((Ropp_def Rth) x). rewrite <- ((Radd_0_l Rth) (x + - x + - (x + y))). rewrite <- ((Ropp_def Rth) y). rewrite ((Radd_comm Rth) x). rewrite ((Radd_comm Rth) y). rewrite <- ((Radd_assoc Rth) (-y)). rewrite <- ((Radd_assoc Rth) (- x)). rewrite ((Radd_assoc Rth) y). rewrite ((Radd_comm Rth) y). rewrite <- ((Radd_assoc Rth) (- x)). rewrite ((Radd_assoc Rth) y). rewrite ((Radd_comm Rth) y), (Ropp_def Rth). rewrite ((Radd_comm Rth) (-x) 0), (Radd_0_l Rth). now apply (Radd_comm Rth). Qed. Lemma Ropp_opp x : - -x == x. Proof. rewrite <- (Radd_0_l Rth (- -x)). rewrite <- (Ropp_def Rth x). rewrite <- (Radd_assoc Rth), (Ropp_def Rth). rewrite ((Radd_comm Rth) x); now apply (Radd_0_l Rth). Qed. Lemma Rth_ARth : almost_ring_theory 0 1 radd rmul rsub ropp req. Proof (mk_art 0 1 radd rmul rsub ropp req (Radd_0_l Rth) (Radd_comm Rth) (Radd_assoc Rth) (Rmul_1_l Rth) Rmul_0_l (Rmul_comm Rth) (Rmul_assoc Rth) (Rdistr_l Rth) Ropp_mul_l Ropp_add (Rsub_def Rth)). (** Every semi morphism between two rings is a morphism*) Variable C : Type. Variable (cO cI : C) (cadd cmul csub: C->C->C) (copp : C -> C). Variable (ceq : C -> C -> Prop) (ceqb : C -> C -> bool). Variable phi : C -> R. Infix "+!" := cadd. Infix "*!" := cmul. Infix "-!" := csub. Notation "-! x" := (copp x). Notation "?=!" := ceqb. Notation "[ x ]" := (phi x). Variable Csth : Equivalence ceq. Variable Ceqe : ring_eq_ext cadd cmul copp ceq. Add Setoid C ceq Csth as C_setoid. Add Morphism cadd : cadd_ext. exact (Radd_ext Ceqe). Qed. Add Morphism cmul : cmul_ext. exact (Rmul_ext Ceqe). Qed. Add Morphism copp : copp_ext. exact (Ropp_ext Ceqe). Qed. Variable Cth : ring_theory cO cI cadd cmul csub copp ceq. Variable Smorph : semi_morph 0 1 radd rmul req cO cI cadd cmul ceqb phi. Variable phi_ext : forall x y, ceq x y -> [x] == [y]. Add Morphism phi : phi_ext1. exact phi_ext. Qed. Lemma Smorph_opp x : [-!x] == -[x]. Proof. rewrite <- (Rth.(Radd_0_l) [-!x]). rewrite <- ((Ropp_def Rth) [x]). rewrite ((Radd_comm Rth) [x]). rewrite <- (Radd_assoc Rth). rewrite <- (Smorph_add Smorph). rewrite (Ropp_def Cth). rewrite (Smorph0 Smorph). rewrite (Radd_comm Rth (-[x])). now apply (Radd_0_l Rth). Qed. Lemma Smorph_sub x y : [x -! y] == [x] - [y]. Proof. rewrite (Rsub_def Cth), (Rsub_def Rth). now rewrite (Smorph_add Smorph), Smorph_opp. Qed. Lemma Smorph_morph : ring_morph 0 1 radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi. Proof (mkmorph 0 1 radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi (Smorph0 Smorph) (Smorph1 Smorph) (Smorph_add Smorph) Smorph_sub (Smorph_mul Smorph) Smorph_opp (Smorph_eq Smorph)). End RING. (** Useful lemmas on almost ring *) Variable ARth : almost_ring_theory 0 1 radd rmul rsub ropp req. Lemma ARth_SRth : semi_ring_theory 0 1 radd rmul req. Proof. elim ARth; intros. constructor; trivial. Qed. Instance ARsub_ext : Proper (req ==> req ==> req) rsub. Proof. intros x1 x2 Ex y1 y2 Ey. now rewrite !(ARsub_def ARth), Ex, Ey. Qed. Ltac mrewrite := repeat first [ rewrite (ARadd_0_l ARth) | rewrite <- ((ARadd_comm ARth) 0) | rewrite (ARmul_1_l ARth) | rewrite <- ((ARmul_comm ARth) 1) | rewrite (ARmul_0_l ARth) | rewrite <- ((ARmul_comm ARth) 0) | rewrite (ARdistr_l ARth) | reflexivity | match goal with | |- context [?z * (?x + ?y)] => rewrite ((ARmul_comm ARth) z (x+y)) end]. Lemma ARadd_0_r x : x + 0 == x. Proof. mrewrite. Qed. Lemma ARmul_1_r x : x * 1 == x. Proof. mrewrite. Qed. Lemma ARmul_0_r x : x * 0 == 0. Proof. mrewrite. Qed. Lemma ARdistr_r x y z : z * (x + y) == z*x + z*y. Proof. mrewrite. now rewrite !(ARth.(ARmul_comm) z). Qed. Lemma ARadd_assoc1 x y z : (x + y) + z == (y + z) + x. Proof. now rewrite <-(ARth.(ARadd_assoc) x), (ARth.(ARadd_comm) x). Qed. Lemma ARadd_assoc2 x y z : (y + x) + z == (y + z) + x. Proof. now rewrite <- !(ARadd_assoc ARth), ((ARadd_comm ARth) x). Qed. Lemma ARmul_assoc1 x y z : (x * y) * z == (y * z) * x. Proof. now rewrite <- ((ARmul_assoc ARth) x), ((ARmul_comm ARth) x). Qed. Lemma ARmul_assoc2 x y z : (y * x) * z == (y * z) * x. Proof. now rewrite <- !(ARmul_assoc ARth), ((ARmul_comm ARth) x). Qed. Lemma ARopp_mul_r x y : - (x * y) == x * -y. Proof. rewrite ((ARmul_comm ARth) x y), (ARopp_mul_l ARth). now apply (ARmul_comm ARth). Qed. Lemma ARopp_zero : -0 == 0. Proof. now rewrite <- (ARmul_0_r 0), (ARopp_mul_l ARth), !ARmul_0_r. Qed. End ALMOST_RING. Section AddRing. (* Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable req : R -> R -> Prop. *) Inductive ring_kind : Type := | Abstract | Computational (R:Type) (req : R -> R -> Prop) (reqb : R -> R -> bool) (_ : forall x y, (reqb x y) = true -> req x y) | Morphism (R : Type) (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R) (req : R -> R -> Prop) (C : Type) (cO cI : C) (cadd cmul csub : C->C->C) (copp : C->C) (ceqb : C->C->bool) phi (_ : ring_morph rO rI radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi). End AddRing. (** Some simplification tactics*) Ltac gen_reflexivity Rsth := apply (Seq_refl _ _ Rsth). Ltac gen_srewrite Rsth Reqe ARth := repeat first [ gen_reflexivity Rsth | progress rewrite (ARopp_zero Rsth Reqe ARth) | rewrite (ARadd_0_l ARth) | rewrite (ARadd_0_r Rsth ARth) | rewrite (ARmul_1_l ARth) | rewrite (ARmul_1_r Rsth ARth) | rewrite (ARmul_0_l ARth) | rewrite (ARmul_0_r Rsth ARth) | rewrite (ARdistr_l ARth) | rewrite (ARdistr_r Rsth Reqe ARth) | rewrite (ARadd_assoc ARth) | rewrite (ARmul_assoc ARth) | progress rewrite (ARopp_add ARth) | progress rewrite (ARsub_def ARth) | progress rewrite <- (ARopp_mul_l ARth) | progress rewrite <- (ARopp_mul_r Rsth Reqe ARth) ]. Ltac gen_srewrite_sr Rsth Reqe ARth := repeat first [ gen_reflexivity Rsth | progress rewrite (ARopp_zero Rsth Reqe ARth) | rewrite (ARadd_0_l ARth) | rewrite (ARadd_0_r Rsth ARth) | rewrite (ARmul_1_l ARth) | rewrite (ARmul_1_r Rsth ARth) | rewrite (ARmul_0_l ARth) | rewrite (ARmul_0_r Rsth ARth) | rewrite (ARdistr_l ARth) | rewrite (ARdistr_r Rsth Reqe ARth) | rewrite (ARadd_assoc ARth) | rewrite (ARmul_assoc ARth) ]. Ltac gen_add_push add Rsth Reqe ARth x := repeat (match goal with | |- context [add (add ?y x) ?z] => progress rewrite (ARadd_assoc2 Rsth Reqe ARth x y z) | |- context [add (add x ?y) ?z] => progress rewrite (ARadd_assoc1 Rsth ARth x y z) | |- context [(add x ?y)] => progress rewrite (ARadd_comm ARth x y) end). Ltac gen_mul_push mul Rsth Reqe ARth x := repeat (match goal with | |- context [mul (mul ?y x) ?z] => progress rewrite (ARmul_assoc2 Rsth Reqe ARth x y z) | |- context [mul (mul x ?y) ?z] => progress rewrite (ARmul_assoc1 Rsth ARth x y z) | |- context [(mul x ?y)] => progress rewrite (ARmul_comm ARth x y) end). coq-8.4pl2/plugins/setoid_ring/Ring_base.v0000640000175000001440000000146112010532755017700 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 1 | xO p => (1 + 1) * (gen_phiPOS1 p) | xI p => 1 + ((1 + 1) * (gen_phiPOS1 p)) end. Fixpoint gen_phiPOS (p:positive) : R := match p with | xH => 1 | xO xH => (1 + 1) | xO p => (1 + 1) * (gen_phiPOS p) | xI xH => 1 + (1 +1) | xI p => 1 + ((1 + 1) * (gen_phiPOS p)) end. Definition gen_phiZ1 z := match z with | Zpos p => gen_phiPOS1 p | Z0 => 0 | Zneg p => -(gen_phiPOS1 p) end. Definition gen_phiZ z := match z with | Zpos p => gen_phiPOS p | Z0 => 0 | Zneg p => -(gen_phiPOS p) end. Notation "[ x ]" := (gen_phiZ x). Definition get_signZ z := match z with | Zneg p => Some (Zpos p) | _ => None end. Ltac norm := gen_rewrite. Ltac add_push := Ncring.gen_add_push. Ltac rsimpl := simpl. Lemma same_gen : forall x, gen_phiPOS1 x == gen_phiPOS x. Proof. induction x;rsimpl. rewrite IHx. destruct x;simpl;norm. rewrite IHx;destruct x;simpl;norm. reflexivity. Qed. Lemma ARgen_phiPOS_Psucc : forall x, gen_phiPOS1 (Pos.succ x) == 1 + (gen_phiPOS1 x). Proof. induction x;rsimpl;norm. rewrite IHx. gen_rewrite. add_push 1. reflexivity. Qed. Lemma ARgen_phiPOS_add : forall x y, gen_phiPOS1 (x + y) == (gen_phiPOS1 x) + (gen_phiPOS1 y). Proof. induction x;destruct y;simpl;norm. rewrite Pos.add_carry_spec. rewrite ARgen_phiPOS_Psucc. rewrite IHx;norm. add_push (gen_phiPOS1 y);add_push 1;reflexivity. rewrite IHx;norm;add_push (gen_phiPOS1 y);reflexivity. rewrite ARgen_phiPOS_Psucc;norm;add_push 1;reflexivity. rewrite IHx;norm;add_push(gen_phiPOS1 y); add_push 1;reflexivity. rewrite IHx;norm;add_push(gen_phiPOS1 y);reflexivity. add_push 1;reflexivity. rewrite ARgen_phiPOS_Psucc;norm;add_push 1;reflexivity. Qed. Lemma ARgen_phiPOS_mult : forall x y, gen_phiPOS1 (x * y) == gen_phiPOS1 x * gen_phiPOS1 y. Proof. induction x;intros;simpl;norm. rewrite ARgen_phiPOS_add;simpl;rewrite IHx;norm. rewrite IHx;reflexivity. Qed. (*morphisms are extensionaly equal*) Lemma same_genZ : forall x, [x] == gen_phiZ1 x. Proof. destruct x;rsimpl; try rewrite same_gen; reflexivity. Qed. Lemma gen_Zeqb_ok : forall x y, Zeq_bool x y = true -> [x] == [y]. Proof. intros x y H7. assert (H10 := Zeq_bool_eq x y H7);unfold IDphi in H10. rewrite H10;reflexivity. Qed. Lemma gen_phiZ1_add_pos_neg : forall x y, gen_phiZ1 (Z.pos_sub x y) == gen_phiPOS1 x + -gen_phiPOS1 y. Proof. intros x y. generalize (Z.pos_sub_discr x y). destruct (Z.pos_sub x y) as [|p|p]; intros; subst. - now rewrite ring_opp_def. - rewrite ARgen_phiPOS_add;simpl;norm. add_push (gen_phiPOS1 p). rewrite ring_opp_def;norm. - rewrite ARgen_phiPOS_add;simpl;norm. rewrite ring_opp_def;norm. Qed. Lemma match_compOpp : forall x (B:Type) (be bl bg:B), match CompOpp x with Eq => be | Lt => bl | Gt => bg end = match x with Eq => be | Lt => bg | Gt => bl end. Proof. destruct x;simpl;intros;trivial. Qed. Lemma gen_phiZ_add : forall x y, [x + y] == [x] + [y]. Proof. intros x y; repeat rewrite same_genZ; generalize x y;clear x y. induction x;destruct y;simpl;norm. apply ARgen_phiPOS_add. apply gen_phiZ1_add_pos_neg. rewrite gen_phiZ1_add_pos_neg. rewrite ring_add_comm. reflexivity. rewrite ARgen_phiPOS_add. rewrite ring_opp_add. reflexivity. Qed. Lemma gen_phiZ_opp : forall x, [- x] == - [x]. Proof. intros x. repeat rewrite same_genZ. generalize x ;clear x. induction x;simpl;norm. rewrite ring_opp_opp. reflexivity. Qed. Lemma gen_phiZ_mul : forall x y, [x * y] == [x] * [y]. Proof. intros x y;repeat rewrite same_genZ. destruct x;destruct y;simpl;norm; rewrite ARgen_phiPOS_mult;try (norm;fail). rewrite ring_opp_opp ;reflexivity. Qed. Lemma gen_phiZ_ext : forall x y : Z, x = y -> [x] == [y]. Proof. intros;subst;reflexivity. Qed. (*proof that [.] satisfies morphism specifications*) Global Instance gen_phiZ_morph : (@Ring_morphism (Z:Type) R _ _ _ _ _ _ _ Zops Zr _ _ _ _ _ _ _ _ _ gen_phiZ) . (* beurk!*) apply Build_Ring_morphism; simpl;try reflexivity. apply gen_phiZ_add. intros. rewrite ring_sub_def. replace (x-y)%Z with (x + (-y))%Z. now rewrite gen_phiZ_add, gen_phiZ_opp, ring_sub_def. reflexivity. apply gen_phiZ_mul. apply gen_phiZ_opp. apply gen_phiZ_ext. Defined. End ZMORPHISM. Instance multiplication_phi_ring{R:Type}`{Ring R} : Multiplication := {multiplication x y := (gen_phiZ x) * y}. coq-8.4pl2/plugins/setoid_ring/Integral_domain.v0000640000175000001440000000243011776416511021111 0ustar notinusersRequire Export Cring. (* Definition of integral domains: commutative ring without zero divisor *) Class Integral_domain {R : Type}`{Rcr:Cring R} := { integral_domain_product: forall x y, x * y == 0 -> x == 0 \/ y == 0; integral_domain_one_zero: not (1 == 0)}. Section integral_domain. Context {R:Type}`{Rid:Integral_domain R}. Lemma integral_domain_minus_one_zero: ~ - (1:R) == 0. red;intro. apply integral_domain_one_zero. assert (0 == - (0:R)). cring. rewrite H0. rewrite <- H. cring. Qed. Definition pow (r : R) (n : nat) := Ring_theory.pow_N 1 mul r (N.of_nat n). Lemma pow_not_zero: forall p n, pow p n == 0 -> p == 0. induction n. unfold pow; simpl. intros. absurd (1 == 0). simpl. apply integral_domain_one_zero. trivial. setoid_replace (pow p (S n)) with (p * (pow p n)). intros. case (integral_domain_product p (pow p n) H). trivial. trivial. unfold pow; simpl. clear IHn. induction n; simpl; try cring. rewrite Ring_theory.pow_pos_succ. cring. apply ring_setoid. apply ring_mult_comp. apply ring_mul_assoc. Qed. Lemma Rintegral_domain_pow: forall c p r, ~c == 0 -> c * (pow p r) == ring0 -> p == ring0. intros. case (integral_domain_product c (pow p r) H0). intros; absurd (c == ring0); auto. intros. apply pow_not_zero with r. trivial. Qed. End integral_domain. coq-8.4pl2/plugins/setoid_ring/Ncring_polynom.v0000640000175000001440000004067212010532755021013 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* : non commutative polynomials on a commutative ring A *) Set Implicit Arguments. Require Import Setoid. Require Import BinList. Require Import BinPos. Require Import BinNat. Require Import BinInt. Require Export Ring_polynom. (* n'utilise que PExpr *) Require Export Ncring. Section MakeRingPol. Context (C R:Type) `{Rh:Ring_morphism C R}. Variable phiCR_comm: forall (c:C)(x:R), x * [c] == [c] * x. Ltac rsimpl := repeat (gen_rewrite || rewrite phiCR_comm). Ltac add_push := gen_add_push . (* Definition of non commutative multivariable polynomials with coefficients in C : *) Inductive Pol : Type := | Pc : C -> Pol | PX : Pol -> positive -> positive -> Pol -> Pol. (* PX P i n Q represents P * X_i^n + Q *) Definition cO:C . exact ring0. Defined. Definition cI:C . exact ring1. Defined. Definition P0 := Pc 0. Definition P1 := Pc 1. Variable Ceqb:C->C->bool. Class Equalityb (A : Type):= {equalityb : A -> A -> bool}. Notation "x =? y" := (equalityb x y) (at level 70, no associativity). Variable Ceqb_eq: forall x y:C, Ceqb x y = true -> (x == y). Instance equalityb_coef : Equalityb C := {equalityb x y := Ceqb x y}. Fixpoint Peq (P P' : Pol) {struct P'} : bool := match P, P' with | Pc c, Pc c' => c =? c' | PX P i n Q, PX P' i' n' Q' => match Pos.compare i i', Pos.compare n n' with | Eq, Eq => if Peq P P' then Peq Q Q' else false | _,_ => false end | _, _ => false end. Instance equalityb_pol : Equalityb Pol := {equalityb x y := Peq x y}. (* Q a ses variables de queue < i *) Definition mkPX P i n Q := match P with | Pc c => if c =? 0 then Q else PX P i n Q | PX P' i' n' Q' => match Pos.compare i i' with | Eq => if Q' =? P0 then PX P' i (n + n') Q else PX P i n Q | _ => PX P i n Q end end. Definition mkXi i n := PX P1 i n P0. Definition mkX i := mkXi i 1. (** Opposite of addition *) Fixpoint Popp (P:Pol) : Pol := match P with | Pc c => Pc (- c) | PX P i n Q => PX (Popp P) i n (Popp Q) end. Notation "-- P" := (Popp P)(at level 30). (** Addition et subtraction *) Fixpoint PaddCl (c:C)(P:Pol) {struct P} : Pol := match P with | Pc c1 => Pc (c + c1) | PX P i n Q => PX P i n (PaddCl c Q) end. (* Q quelconque *) Section PaddX. Variable Padd:Pol->Pol->Pol. Variable P:Pol. (* Xi^n * P + Q les variables de tete de Q ne sont pas forcement < i mais Q est normalis : variables de tete decroissantes *) Fixpoint PaddX (i n:positive)(Q:Pol){struct Q}:= match Q with | Pc c => mkPX P i n Q | PX P' i' n' Q' => match Pos.compare i i' with | (* i > i' *) Gt => mkPX P i n Q | (* i < i' *) Lt => mkPX P' i' n' (PaddX i n Q') | (* i = i' *) Eq => match Z.pos_sub n n' with | (* n > n' *) Zpos k => mkPX (PaddX i k P') i' n' Q' | (* n = n' *) Z0 => mkPX (Padd P P') i n Q' | (* n < n' *) Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q' end end end. End PaddX. Fixpoint Padd (P1 P2: Pol) {struct P1} : Pol := match P1 with | Pc c => PaddCl c P2 | PX P' i' n' Q' => PaddX Padd P' i' n' (Padd Q' P2) end. Notation "P ++ P'" := (Padd P P'). Definition Psub(P P':Pol):= P ++ (--P'). Notation "P -- P'" := (Psub P P')(at level 50). (** Multiplication *) Fixpoint PmulC_aux (P:Pol) (c:C) {struct P} : Pol := match P with | Pc c' => Pc (c' * c) | PX P i n Q => mkPX (PmulC_aux P c) i n (PmulC_aux Q c) end. Definition PmulC P c := if c =? 0 then P0 else if c =? 1 then P else PmulC_aux P c. Fixpoint Pmul (P1 P2 : Pol) {struct P2} : Pol := match P2 with | Pc c => PmulC P1 c | PX P i n Q => PaddX Padd (Pmul P1 P) i n (Pmul P1 Q) end. Notation "P ** P'" := (Pmul P P')(at level 40). Definition Psquare (P:Pol) : Pol := P ** P. (** Evaluation of a polynomial towards R *) Fixpoint Pphi(l:list R) (P:Pol) {struct P} : R := match P with | Pc c => [c] | PX P i n Q => let x := nth 0 i l in let xn := pow_pos x n in (Pphi l P) * xn + (Pphi l Q) end. Reserved Notation "P @ l " (at level 10, no associativity). Notation "P @ l " := (Pphi l P). (** Proofs *) Ltac destr_pos_sub H := match goal with |- context [Z.pos_sub ?x ?y] => assert (H := Z.pos_sub_discr x y); destruct (Z.pos_sub x y) end. Lemma Peq_ok : forall P P', (P =? P') = true -> forall l, P@l == P'@ l. Proof. induction P;destruct P';simpl;intros ;try easy. - now apply ring_morphism_eq, Ceqb_eq. - specialize (IHP1 P'1). specialize (IHP2 P'2). simpl in IHP1, IHP2. destruct (Pos.compare_spec p p1); try discriminate; destruct (Pos.compare_spec p0 p2); try discriminate. destruct (Peq P2 P'1); try discriminate. subst; now rewrite IHP1, IHP2. Qed. Lemma Pphi0 : forall l, P0@l == 0. Proof. intros;simpl. rewrite ring_morphism0. reflexivity. Qed. Lemma Pphi1 : forall l, P1@l == 1. Proof. intros;simpl; rewrite ring_morphism1. reflexivity. Qed. Lemma mkPX_ok : forall l P i n Q, (mkPX P i n Q)@l == P@l * (pow_pos (nth 0 i l) n) + Q@l. Proof. intros l P i n Q;unfold mkPX. destruct P;try (simpl;reflexivity). assert (Hh := ring_morphism_eq c 0). simpl; case_eq (Ceqb c 0);simpl;try reflexivity. intros. rewrite Hh. rewrite ring_morphism0. rsimpl. apply Ceqb_eq. trivial. destruct (Pos.compare_spec i p). assert (Hh := @Peq_ok P3 P0). case_eq (P3=? P0). intro. simpl. rewrite Hh. rewrite Pphi0. rsimpl. rewrite Pos.add_comm. rewrite pow_pos_add;rsimpl. subst;trivial. reflexivity. trivial. intros. simpl. reflexivity. simpl. reflexivity. simpl. reflexivity. Qed. Ltac Esimpl := repeat (progress ( match goal with | |- context [?P@?l] => match P with | P0 => rewrite (Pphi0 l) | P1 => rewrite (Pphi1 l) | (mkPX ?P ?i ?n ?Q) => rewrite (mkPX_ok l P i n Q) end | |- context [[?c]] => match c with | 0 => rewrite ring_morphism0 | 1 => rewrite ring_morphism1 | ?x + ?y => rewrite ring_morphism_add | ?x * ?y => rewrite ring_morphism_mul | ?x - ?y => rewrite ring_morphism_sub | - ?x => rewrite ring_morphism_opp end end)); simpl; rsimpl. Lemma PaddCl_ok : forall c P l, (PaddCl c P)@l == [c] + P@l . Proof. induction P; simpl; intros; Esimpl; try reflexivity. rewrite IHP2. rsimpl. rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0) [c]). reflexivity. Qed. Lemma PmulC_aux_ok : forall c P l, (PmulC_aux P c)@l == P@l * [c]. Proof. induction P;simpl;intros. rewrite ring_morphism_mul. try reflexivity. simpl. Esimpl. rewrite IHP1;rewrite IHP2;rsimpl. Qed. Lemma PmulC_ok : forall c P l, (PmulC P c)@l == P@l * [c]. Proof. intros c P l; unfold PmulC. assert (Hh:= ring_morphism_eq c 0);case_eq (c =? 0). intros. rewrite Hh;Esimpl. apply Ceqb_eq;trivial. assert (H1h:= ring_morphism_eq c 1);case_eq (c =? 1);intros. rewrite H1h;Esimpl. apply Ceqb_eq;trivial. apply PmulC_aux_ok. Qed. Lemma Popp_ok : forall P l, (--P)@l == - P@l. Proof. induction P;simpl;intros. Esimpl. rewrite IHP1;rewrite IHP2;rsimpl. Qed. Ltac Esimpl2 := Esimpl; repeat (progress ( match goal with | |- context [(PaddCl ?c ?P)@?l] => rewrite (PaddCl_ok c P l) | |- context [(PmulC ?P ?c)@?l] => rewrite (PmulC_ok c P l) | |- context [(--?P)@?l] => rewrite (Popp_ok P l) end)); Esimpl. Lemma PaddXPX: forall P i n Q, PaddX Padd P i n Q = match Q with | Pc c => mkPX P i n Q | PX P' i' n' Q' => match Pos.compare i i' with | (* i > i' *) Gt => mkPX P i n Q | (* i < i' *) Lt => mkPX P' i' n' (PaddX Padd P i n Q') | (* i = i' *) Eq => match Z.pos_sub n n' with | (* n > n' *) Zpos k => mkPX (PaddX Padd P i k P') i' n' Q' | (* n = n' *) Z0 => mkPX (Padd P P') i n Q' | (* n < n' *) Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q' end end end. induction Q; reflexivity. Qed. Lemma PaddX_ok2 : forall P2, (forall P l, (P2 ++ P) @ l == P2 @ l + P @ l) /\ (forall P k n l, (PaddX Padd P2 k n P) @ l == P2 @ l * pow_pos (nth 0 k l) n + P @ l). induction P2;simpl;intros. split. intros. apply PaddCl_ok. induction P. unfold PaddX. intros. rewrite mkPX_ok. simpl. rsimpl. intros. simpl. destruct (Pos.compare_spec k p) as [Hh|Hh|Hh]. destr_pos_sub H1h. Esimpl2. rewrite Hh; trivial. rewrite H1h. reflexivity. simpl. rewrite mkPX_ok. rewrite IHP1. Esimpl2. rewrite Pos.add_comm in H1h. rewrite H1h. rewrite pow_pos_add. Esimpl2. rewrite Hh; trivial. reflexivity. rewrite mkPX_ok. rewrite PaddCl_ok. Esimpl2. rewrite Pos.add_comm in H1h. rewrite H1h. Esimpl2. rewrite pow_pos_add. Esimpl2. rewrite Hh; trivial. reflexivity. rewrite mkPX_ok. rewrite IHP2. Esimpl2. rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0) ([c] * pow_pos (nth 0 k l) n)). reflexivity. assert (H1h := ring_morphism_eq c 0);case_eq (Ceqb c 0); intros; simpl. rewrite H1h;trivial. Esimpl2. apply Ceqb_eq; trivial. reflexivity. decompose [and] IHP2_1. decompose [and] IHP2_2. clear IHP2_1 IHP2_2. split. intros. rewrite H0. rewrite H1. Esimpl2. induction P. unfold PaddX. intros. rewrite mkPX_ok. simpl. reflexivity. intros. rewrite PaddXPX. destruct (Pos.compare_spec k p1) as [H3h|H3h|H3h]. destr_pos_sub H4h. rewrite mkPX_ok. simpl. rewrite H0. rewrite H1. Esimpl2. rewrite H4h. rewrite H3h;trivial. reflexivity. rewrite mkPX_ok. rewrite IHP1. Esimpl2. rewrite H3h;trivial. rewrite Pos.add_comm in H4h. rewrite H4h. rewrite pow_pos_add. Esimpl2. rewrite mkPX_ok. simpl. rewrite H0. rewrite H1. rewrite mkPX_ok. Esimpl2. rewrite H3h;trivial. rewrite Pos.add_comm in H4h. rewrite H4h. rewrite pow_pos_add. Esimpl2. rewrite mkPX_ok. simpl. rewrite IHP2. Esimpl2. gen_add_push (P2 @ l * pow_pos (nth 0 p1 l) p2). try reflexivity. rewrite mkPX_ok. simpl. reflexivity. Qed. Lemma Padd_ok : forall P Q l, (P ++ Q) @ l == P @ l + Q @ l. intro P. elim (PaddX_ok2 P); auto. Qed. Lemma PaddX_ok : forall P2 P k n l, (PaddX Padd P2 k n P) @ l == P2 @ l * pow_pos (nth 0 k l) n + P @ l. intro P2. elim (PaddX_ok2 P2); auto. Qed. Lemma Psub_ok : forall P' P l, (P -- P')@l == P@l - P'@l. unfold Psub. intros. rewrite Padd_ok. rewrite Popp_ok. rsimpl. Qed. Lemma Pmul_ok : forall P P' l, (P**P')@l == P@l * P'@l. induction P'; simpl; intros. rewrite PmulC_ok. reflexivity. rewrite PaddX_ok. rewrite IHP'1. rewrite IHP'2. Esimpl2. Qed. Lemma Psquare_ok : forall P l, (Psquare P)@l == P@l * P@l. Proof. intros. unfold Psquare. apply Pmul_ok. Qed. (** Definition of polynomial expressions *) (* Inductive PExpr : Type := | PEc : C -> PExpr | PEX : positive -> PExpr | PEadd : PExpr -> PExpr -> PExpr | PEsub : PExpr -> PExpr -> PExpr | PEmul : PExpr -> PExpr -> PExpr | PEopp : PExpr -> PExpr | PEpow : PExpr -> N -> PExpr. *) (** Specification of the power function *) Section POWER. Variable Cpow : Set. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Record power_theory : Prop := mkpow_th { rpow_pow_N : forall r n, (rpow r (Cp_phi n))== (pow_N r n) }. End POWER. Variable Cpow : Set. Variable Cp_phi : N -> Cpow. Variable rpow : R -> Cpow -> R. Variable pow_th : power_theory Cp_phi rpow. (** evaluation of polynomial expressions towards R *) Fixpoint PEeval (l:list R) (pe:PExpr C) {struct pe} : R := match pe with | PEc c => [c] | PEX j => nth 0 j l | PEadd pe1 pe2 => (PEeval l pe1) + (PEeval l pe2) | PEsub pe1 pe2 => (PEeval l pe1) - (PEeval l pe2) | PEmul pe1 pe2 => (PEeval l pe1) * (PEeval l pe2) | PEopp pe1 => - (PEeval l pe1) | PEpow pe1 n => rpow (PEeval l pe1) (Cp_phi n) end. Strategy expand [PEeval]. Definition mk_X j := mkX j. (** Correctness proofs *) Lemma mkX_ok : forall p l, nth 0 p l == (mk_X p) @ l. Proof. destruct p;simpl;intros;Esimpl;trivial. Qed. Ltac Esimpl3 := repeat match goal with | |- context [(?P1 ++ ?P2)@?l] => rewrite (Padd_ok P1 P2 l) | |- context [(?P1 -- ?P2)@?l] => rewrite (Psub_ok P1 P2 l) end;try Esimpl2;try reflexivity;try apply ring_add_comm. (* Power using the chinise algorithm *) Section POWER2. Variable subst_l : Pol -> Pol. Fixpoint Ppow_pos (res P:Pol) (p:positive){struct p} : Pol := match p with | xH => subst_l (Pmul P res) | xO p => Ppow_pos (Ppow_pos res P p) P p | xI p => subst_l (Pmul P (Ppow_pos (Ppow_pos res P p) P p)) end. Definition Ppow_N P n := match n with | N0 => P1 | Npos p => Ppow_pos P1 P p end. Fixpoint pow_pos_gen (R:Type)(m:R->R->R)(x:R) (i:positive) {struct i}: R := match i with | xH => x | xO i => let p := pow_pos_gen m x i in m p p | xI i => let p := pow_pos_gen m x i in m x (m p p) end. Lemma Ppow_pos_ok : forall l, (forall P, subst_l P@l == P@l) -> forall res P p, (Ppow_pos res P p)@l == (pow_pos_gen Pmul P p)@l * res@l. Proof. intros l subst_l_ok res P p. generalize res;clear res. induction p;simpl;intros. try rewrite subst_l_ok. repeat rewrite Pmul_ok. repeat rewrite IHp. rsimpl. repeat rewrite Pmul_ok. repeat rewrite IHp. rsimpl. try rewrite subst_l_ok. repeat rewrite Pmul_ok. reflexivity. Qed. Definition pow_N_gen (R:Type)(x1:R)(m:R->R->R)(x:R) (p:N) := match p with | N0 => x1 | Npos p => pow_pos_gen m x p end. Lemma Ppow_N_ok : forall l, (forall P, subst_l P@l == P@l) -> forall P n, (Ppow_N P n)@l == (pow_N_gen P1 Pmul P n)@l. Proof. destruct n;simpl. reflexivity. rewrite Ppow_pos_ok; trivial. Esimpl. Qed. End POWER2. (** Normalization and rewriting *) Section NORM_SUBST_REC. Let subst_l (P:Pol) := P. Let Pmul_subst P1 P2 := subst_l (Pmul P1 P2). Let Ppow_subst := Ppow_N subst_l. Fixpoint norm_aux (pe:PExpr C) : Pol := match pe with | PEc c => Pc c | PEX j => mk_X j | PEadd pe1 (PEopp pe2) => Psub (norm_aux pe1) (norm_aux pe2) | PEadd pe1 pe2 => Padd (norm_aux pe1) (norm_aux pe2) | PEsub pe1 pe2 => Psub (norm_aux pe1) (norm_aux pe2) | PEmul pe1 pe2 => Pmul (norm_aux pe1) (norm_aux pe2) | PEopp pe1 => Popp (norm_aux pe1) | PEpow pe1 n => Ppow_N (fun p => p) (norm_aux pe1) n end. Definition norm_subst pe := subst_l (norm_aux pe). Lemma norm_aux_spec : forall l pe, PEeval l pe == (norm_aux pe)@l. Proof. intros. induction pe. Esimpl3. Esimpl3. simpl. rewrite IHpe1;rewrite IHpe2. destruct pe2; Esimpl3. unfold Psub. destruct pe1; destruct pe2; rewrite Padd_ok; rewrite Popp_ok; reflexivity. simpl. unfold Psub. rewrite IHpe1;rewrite IHpe2. destruct pe1. destruct pe2; rewrite Padd_ok; rewrite Popp_ok; try reflexivity. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. Esimpl3. simpl. rewrite IHpe1;rewrite IHpe2. rewrite Pmul_ok. reflexivity. simpl. rewrite IHpe; Esimpl3. simpl. rewrite Ppow_N_ok; (intros;try reflexivity). rewrite rpow_pow_N. Esimpl3. induction n;simpl. Esimpl3. induction p; simpl. try rewrite IHp;try rewrite IHpe; repeat rewrite Pms_ok; repeat rewrite Pmul_ok;reflexivity. rewrite Pmul_ok. try rewrite IHp;try rewrite IHpe; repeat rewrite Pms_ok; repeat rewrite Pmul_ok;reflexivity. trivial. exact pow_th. Qed. Lemma norm_subst_spec : forall l pe, PEeval l pe == (norm_subst pe)@l. Proof. intros;unfold norm_subst. unfold subst_l. apply norm_aux_spec. Qed. End NORM_SUBST_REC. Fixpoint interp_PElist (l:list R) (lpe:list (PExpr C * PExpr C)) {struct lpe} : Prop := match lpe with | nil => True | (me,pe)::lpe => match lpe with | nil => PEeval l me == PEeval l pe | _ => PEeval l me == PEeval l pe /\ interp_PElist l lpe end end. Lemma norm_subst_ok : forall l pe, PEeval l pe == (norm_subst pe)@l. Proof. intros;apply norm_subst_spec. Qed. Lemma ring_correct : forall l pe1 pe2, (norm_subst pe1 =? norm_subst pe2) = true -> PEeval l pe1 == PEeval l pe2. Proof. simpl;intros. do 2 (rewrite (norm_subst_ok l);trivial). apply Peq_ok;trivial. Qed. End MakeRingPol. coq-8.4pl2/plugins/setoid_ring/Ring.v0000640000175000001440000000265012010532755016707 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* b) (eq(A:=bool)). split; simpl. destruct x; reflexivity. destruct x; destruct y; reflexivity. destruct x; destruct y; destruct z; reflexivity. reflexivity. destruct x; destruct y; reflexivity. destruct x; destruct y; reflexivity. destruct x; destruct y; destruct z; reflexivity. reflexivity. destruct x; reflexivity. Qed. Definition bool_eq (b1 b2:bool) := if b1 then b2 else negb b2. Lemma bool_eq_ok : forall b1 b2, bool_eq b1 b2 = true -> b1 = b2. destruct b1; destruct b2; auto. Qed. Ltac bool_cst t := let t := eval hnf in t in match t with true => constr:true | false => constr:false | _ => constr:NotConstant end. Add Ring bool_ring : BoolTheory (decidable bool_eq_ok, constants [bool_cst]). coq-8.4pl2/plugins/setoid_ring/newring.ml40000640000175000001440000012106012010532755017705 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* f i c | _ -> assert false type protect_flag = Eval|Prot|Rec let tag_arg tag_rec map subs i c = match map i with Eval -> mk_clos subs c | Prot -> mk_atom c | Rec -> if i = -1 then mk_clos subs c else tag_rec c let rec mk_clos_but f_map subs t = match f_map t with | Some map -> tag_arg (mk_clos_but f_map subs) map subs (-1) t | None -> (match kind_of_term t with App(f,args) -> mk_clos_app_but f_map subs f args 0 | Prod _ -> mk_clos_deep (mk_clos_but f_map) subs t | _ -> mk_atom t) and mk_clos_app_but f_map subs f args n = if n >= Array.length args then mk_atom(mkApp(f, args)) else let fargs, args' = array_chop n args in let f' = mkApp(f,fargs) in match f_map f' with Some map -> mk_clos_deep (fun s' -> unmark_arg (tag_arg (mk_clos_but f_map s') map s')) subs (mkApp (mark_arg (-1) f', Array.mapi mark_arg args')) | None -> mk_clos_app_but f_map subs f args (n+1) let interp_map l c = try let (im,am) = List.assoc c l in Some(fun i -> if List.mem i im then Eval else if List.mem i am then Prot else if i = -1 then Eval else Rec) with Not_found -> None let interp_map l t = try Some(list_assoc_f eq_constr t l) with Not_found -> None let protect_maps = ref Stringmap.empty let add_map s m = protect_maps := Stringmap.add s m !protect_maps let lookup_map map = try Stringmap.find map !protect_maps with Not_found -> errorlabstrm"lookup_map"(str"map "++qs map++str"not found") let protect_red map env sigma c = kl (create_clos_infos betadeltaiota env) (mk_clos_but (lookup_map map c) (Esubst.subs_id 0) c);; let protect_tac map = Tactics.reduct_option (protect_red map,DEFAULTcast) None ;; let protect_tac_in map id = Tactics.reduct_option (protect_red map,DEFAULTcast) (Some(id, Termops.InHyp));; TACTIC EXTEND protect_fv [ "protect_fv" string(map) "in" ident(id) ] -> [ protect_tac_in map id ] | [ "protect_fv" string(map) ] -> [ protect_tac map ] END;; (****************************************************************************) let closed_term t l = let l = List.map constr_of_global l in let cs = List.fold_right Quote.ConstrSet.add l Quote.ConstrSet.empty in if Quote.closed_under cs t then tclIDTAC else tclFAIL 0 (mt()) ;; TACTIC EXTEND closed_term [ "closed_term" constr(t) "[" ne_reference_list(l) "]" ] -> [ closed_term t l ] END ;; TACTIC EXTEND echo | [ "echo" constr(t) ] -> [ Pp.msg (Termops.print_constr t); Tacinterp.eval_tactic (TacId []) ] END;; (* let closed_term_ast l = TacFun([Some(id_of_string"t")], TacAtom(dummy_loc,TacExtend(dummy_loc,"closed_term", [Genarg.in_gen Genarg.wit_constr (mkVar(id_of_string"t")); Genarg.in_gen (Genarg.wit_list1 Genarg.wit_ref) l]))) *) let closed_term_ast l = let l = List.map (fun gr -> ArgArg(dummy_loc,gr)) l in TacFun([Some(id_of_string"t")], TacAtom(dummy_loc,TacExtend(dummy_loc,"closed_term", [Genarg.in_gen Genarg.globwit_constr (GVar(dummy_loc,id_of_string"t"),None); Genarg.in_gen (Genarg.wit_list1 Genarg.globwit_ref) l]))) (* let _ = add_tacdef false ((dummy_loc,id_of_string"ring_closed_term" *) (****************************************************************************) let ic c = let env = Global.env() and sigma = Evd.empty in Constrintern.interp_constr sigma env c let ty c = Typing.type_of (Global.env()) Evd.empty c let decl_constant na c = mkConst(declare_constant (id_of_string na) (DefinitionEntry { const_entry_body = c; const_entry_secctx = None; const_entry_type = None; const_entry_opaque = true }, IsProof Lemma)) (* Calling a global tactic *) let ltac_call tac (args:glob_tactic_arg list) = TacArg(dummy_loc,TacCall(dummy_loc, ArgArg(dummy_loc, Lazy.force tac),args)) (* Calling a locally bound tactic *) let ltac_lcall tac args = TacArg(dummy_loc,TacCall(dummy_loc, ArgVar(dummy_loc, id_of_string tac),args)) let ltac_letin (x, e1) e2 = TacLetIn(false,[(dummy_loc,id_of_string x),e1],e2) let ltac_apply (f:glob_tactic_expr) (args:glob_tactic_arg list) = Tacinterp.eval_tactic (ltac_letin ("F", Tacexp f) (ltac_lcall "F" args)) let ltac_record flds = TacFun([Some(id_of_string"proj")], ltac_lcall "proj" flds) let carg c = TacDynamic(dummy_loc,Pretyping.constr_in c) let dummy_goal env = let (gl,_,sigma) = Goal.V82.mk_goal Evd.empty (named_context_val env) mkProp Store.empty in {Evd.it = gl; Evd.sigma = sigma} let exec_tactic env n f args = let lid = list_tabulate(fun i -> id_of_string("x"^string_of_int i)) n in let res = ref [||] in let get_res ist = let l = List.map (fun id -> List.assoc id ist.lfun) lid in res := Array.of_list l; TacId[] in let getter = Tacexp(TacFun(List.map(fun id -> Some id) lid, glob_tactic(tacticIn get_res))) in let _ = Tacinterp.eval_tactic(ltac_call f (args@[getter])) (dummy_goal env) in !res let constr_of = function | VConstr ([],c) -> c | _ -> failwith "Ring.exec_tactic: anomaly" let stdlib_modules = [["Coq";"Setoids";"Setoid"]; ["Coq";"Lists";"List"]; ["Coq";"Init";"Datatypes"]; ["Coq";"Init";"Logic"]; ] let coq_constant c = lazy (Coqlib.gen_constant_in_modules "Ring" stdlib_modules c) let coq_mk_Setoid = coq_constant "Build_Setoid_Theory" let coq_cons = coq_constant "cons" let coq_nil = coq_constant "nil" let coq_None = coq_constant "None" let coq_Some = coq_constant "Some" let coq_eq = coq_constant "eq" let lapp f args = mkApp(Lazy.force f,args) let dest_rel0 t = match kind_of_term t with | App(f,args) when Array.length args >= 2 -> let rel = mkApp(f,Array.sub args 0 (Array.length args - 2)) in if closed0 rel then (rel,args.(Array.length args - 2),args.(Array.length args - 1)) else error "ring: cannot find relation (not closed)" | _ -> error "ring: cannot find relation" let rec dest_rel t = match kind_of_term t with | Prod(_,_,c) -> dest_rel c | _ -> dest_rel0 t (****************************************************************************) (* Library linking *) let plugin_dir = "setoid_ring" let cdir = ["Coq";plugin_dir] let plugin_modules = List.map (fun d -> cdir@d) [["Ring_theory"];["Ring_polynom"]; ["Ring_tac"];["InitialRing"]; ["Field_tac"]; ["Field_theory"] ] let my_constant c = lazy (Coqlib.gen_constant_in_modules "Ring" plugin_modules c) let new_ring_path = make_dirpath (List.map id_of_string ["Ring_tac";plugin_dir;"Coq"]) let ltac s = lazy(make_kn (MPfile new_ring_path) (make_dirpath []) (mk_label s)) let znew_ring_path = make_dirpath (List.map id_of_string ["InitialRing";plugin_dir;"Coq"]) let zltac s = lazy(make_kn (MPfile znew_ring_path) (make_dirpath []) (mk_label s)) let mk_cst l s = lazy (Coqlib.gen_constant "newring" l s);; let pol_cst s = mk_cst [plugin_dir;"Ring_polynom"] s ;; (* Ring theory *) (* almost_ring defs *) let coq_almost_ring_theory = my_constant "almost_ring_theory" (* setoid and morphism utilities *) let coq_eq_setoid = my_constant "Eqsth" let coq_eq_morph = my_constant "Eq_ext" let coq_eq_smorph = my_constant "Eq_s_ext" (* ring -> almost_ring utilities *) let coq_ring_theory = my_constant "ring_theory" let coq_mk_reqe = my_constant "mk_reqe" (* semi_ring -> almost_ring utilities *) let coq_semi_ring_theory = my_constant "semi_ring_theory" let coq_mk_seqe = my_constant "mk_seqe" let ltac_inv_morph_gen = zltac"inv_gen_phi" let ltac_inv_morphZ = zltac"inv_gen_phiZ" let ltac_inv_morphN = zltac"inv_gen_phiN" let ltac_inv_morphNword = zltac"inv_gen_phiNword" let coq_abstract = my_constant"Abstract" let coq_comp = my_constant"Computational" let coq_morph = my_constant"Morphism" (* morphism *) let coq_ring_morph = my_constant "ring_morph" let coq_semi_morph = my_constant "semi_morph" (* power function *) let ltac_inv_morph_nothing = zltac"inv_morph_nothing" let coq_pow_N_pow_N = my_constant "pow_N_pow_N" (* hypothesis *) let coq_mkhypo = my_constant "mkhypo" let coq_hypo = my_constant "hypo" (* Equality: do not evaluate but make recursive call on both sides *) let map_with_eq arg_map c = let (req,_,_) = dest_rel c in interp_map ((req,(function -1->Prot|_->Rec)):: List.map (fun (c,map) -> (Lazy.force c,map)) arg_map) let _ = add_map "ring" (map_with_eq [coq_cons,(function -1->Eval|2->Rec|_->Prot); coq_nil, (function -1->Eval|_ -> Prot); (* Pphi_dev: evaluate polynomial and coef operations, protect ring operations and make recursive call on the var map *) pol_cst "Pphi_dev", (function -1|8|9|10|11|12|14->Eval|13->Rec|_->Prot); pol_cst "Pphi_pow", (function -1|8|9|10|11|13|15|17->Eval|16->Rec|_->Prot); (* PEeval: evaluate morphism and polynomial, protect ring operations and make recursive call on the var map *) pol_cst "PEeval", (function -1|7|9|12->Eval|11->Rec|_->Prot)]) (****************************************************************************) (* Ring database *) type ring_info = { ring_carrier : types; ring_req : constr; ring_setoid : constr; ring_ext : constr; ring_morph : constr; ring_th : constr; ring_cst_tac : glob_tactic_expr; ring_pow_tac : glob_tactic_expr; ring_lemma1 : constr; ring_lemma2 : constr; ring_pre_tac : glob_tactic_expr; ring_post_tac : glob_tactic_expr } module Cmap = Map.Make(struct type t = constr let compare = constr_ord end) let from_carrier = ref Cmap.empty let from_relation = ref Cmap.empty let from_name = ref Spmap.empty let ring_for_carrier r = Cmap.find r !from_carrier let ring_for_relation rel = Cmap.find rel !from_relation let find_ring_structure env sigma l = match l with | t::cl' -> let ty = Retyping.get_type_of env sigma t in let check c = let ty' = Retyping.get_type_of env sigma c in if not (Reductionops.is_conv env sigma ty ty') then errorlabstrm "ring" (str"arguments of ring_simplify do not have all the same type") in List.iter check cl'; (try ring_for_carrier ty with Not_found -> errorlabstrm "ring" (str"cannot find a declared ring structure over"++ spc()++str"\""++pr_constr ty++str"\"")) | [] -> assert false (* let (req,_,_) = dest_rel cl in (try ring_for_relation req with Not_found -> errorlabstrm "ring" (str"cannot find a declared ring structure for equality"++ spc()++str"\""++pr_constr req++str"\"")) *) let _ = Summary.declare_summary "tactic-new-ring-table" { Summary.freeze_function = (fun () -> !from_carrier,!from_relation,!from_name); Summary.unfreeze_function = (fun (ct,rt,nt) -> from_carrier := ct; from_relation := rt; from_name := nt); Summary.init_function = (fun () -> from_carrier := Cmap.empty; from_relation := Cmap.empty; from_name := Spmap.empty) } let add_entry (sp,_kn) e = (* let _ = ty e.ring_lemma1 in let _ = ty e.ring_lemma2 in *) from_carrier := Cmap.add e.ring_carrier e !from_carrier; from_relation := Cmap.add e.ring_req e !from_relation; from_name := Spmap.add sp e !from_name let subst_th (subst,th) = let c' = subst_mps subst th.ring_carrier in let eq' = subst_mps subst th.ring_req in let set' = subst_mps subst th.ring_setoid in let ext' = subst_mps subst th.ring_ext in let morph' = subst_mps subst th.ring_morph in let th' = subst_mps subst th.ring_th in let thm1' = subst_mps subst th.ring_lemma1 in let thm2' = subst_mps subst th.ring_lemma2 in let tac'= subst_tactic subst th.ring_cst_tac in let pow_tac'= subst_tactic subst th.ring_pow_tac in let pretac'= subst_tactic subst th.ring_pre_tac in let posttac'= subst_tactic subst th.ring_post_tac in if c' == th.ring_carrier && eq' == th.ring_req && eq_constr set' th.ring_setoid && ext' == th.ring_ext && morph' == th.ring_morph && th' == th.ring_th && thm1' == th.ring_lemma1 && thm2' == th.ring_lemma2 && tac' == th.ring_cst_tac && pow_tac' == th.ring_pow_tac && pretac' == th.ring_pre_tac && posttac' == th.ring_post_tac then th else { ring_carrier = c'; ring_req = eq'; ring_setoid = set'; ring_ext = ext'; ring_morph = morph'; ring_th = th'; ring_cst_tac = tac'; ring_pow_tac = pow_tac'; ring_lemma1 = thm1'; ring_lemma2 = thm2'; ring_pre_tac = pretac'; ring_post_tac = posttac' } let theory_to_obj : ring_info -> obj = let cache_th (name,th) = add_entry name th in declare_object {(default_object "tactic-new-ring-theory") with open_function = (fun i o -> if i=1 then cache_th o); cache_function = cache_th; subst_function = subst_th; classify_function = (fun x -> Substitute x)} let setoid_of_relation env a r = let evm = Evd.empty in try lapp coq_mk_Setoid [|a ; r ; Rewrite.get_reflexive_proof env evm a r ; Rewrite.get_symmetric_proof env evm a r ; Rewrite.get_transitive_proof env evm a r |] with Not_found -> error "cannot find setoid relation" let op_morph r add mul opp req m1 m2 m3 = lapp coq_mk_reqe [| r; add; mul; opp; req; m1; m2; m3 |] let op_smorph r add mul req m1 m2 = lapp coq_mk_seqe [| r; add; mul; req; m1; m2 |] (* let default_ring_equality (r,add,mul,opp,req) = *) (* let is_setoid = function *) (* {rel_refl=Some _; rel_sym=Some _;rel_trans=Some _;rel_aeq=rel} -> *) (* eq_constr req rel (\* Qu: use conversion ? *\) *) (* | _ -> false in *) (* match default_relation_for_carrier ~filter:is_setoid r with *) (* Leibniz _ -> *) (* let setoid = lapp coq_eq_setoid [|r|] in *) (* let op_morph = *) (* match opp with *) (* Some opp -> lapp coq_eq_morph [|r;add;mul;opp|] *) (* | None -> lapp coq_eq_smorph [|r;add;mul|] in *) (* (setoid,op_morph) *) (* | Relation rel -> *) (* let setoid = setoid_of_relation rel in *) (* let is_endomorphism = function *) (* { args=args } -> List.for_all *) (* (function (var,Relation rel) -> *) (* var=None && eq_constr req rel *) (* | _ -> false) args in *) (* let add_m = *) (* try default_morphism ~filter:is_endomorphism add *) (* with Not_found -> *) (* error "ring addition should be declared as a morphism" in *) (* let mul_m = *) (* try default_morphism ~filter:is_endomorphism mul *) (* with Not_found -> *) (* error "ring multiplication should be declared as a morphism" in *) (* let op_morph = *) (* match opp with *) (* | Some opp -> *) (* (let opp_m = *) (* try default_morphism ~filter:is_endomorphism opp *) (* with Not_found -> *) (* error "ring opposite should be declared as a morphism" in *) (* let op_morph = *) (* op_morph r add mul opp req add_m.lem mul_m.lem opp_m.lem in *) (* msgnl *) (* (str"Using setoid \""++pr_constr rel.rel_aeq++str"\""++spc()++ *) (* str"and morphisms \""++pr_constr add_m.morphism_theory++ *) (* str"\","++spc()++ str"\""++pr_constr mul_m.morphism_theory++ *) (* str"\""++spc()++str"and \""++pr_constr opp_m.morphism_theory++ *) (* str"\""); *) (* op_morph) *) (* | None -> *) (* (msgnl *) (* (str"Using setoid \""++pr_constr rel.rel_aeq++str"\"" ++ spc() ++ *) (* str"and morphisms \""++pr_constr add_m.morphism_theory++ *) (* str"\""++spc()++str"and \""++ *) (* pr_constr mul_m.morphism_theory++str"\""); *) (* op_smorph r add mul req add_m.lem mul_m.lem) in *) (* (setoid,op_morph) *) let ring_equality (r,add,mul,opp,req) = match kind_of_term req with | App (f, [| _ |]) when eq_constr f (Lazy.force coq_eq) -> let setoid = lapp coq_eq_setoid [|r|] in let op_morph = match opp with Some opp -> lapp coq_eq_morph [|r;add;mul;opp|] | None -> lapp coq_eq_smorph [|r;add;mul|] in (setoid,op_morph) | _ -> let setoid = setoid_of_relation (Global.env ()) r req in let signature = [Some (r,Some req);Some (r,Some req)],Some(r,Some req) in let add_m, add_m_lem = try Rewrite.default_morphism signature add with Not_found -> error "ring addition should be declared as a morphism" in let mul_m, mul_m_lem = try Rewrite.default_morphism signature mul with Not_found -> error "ring multiplication should be declared as a morphism" in let op_morph = match opp with | Some opp -> (let opp_m,opp_m_lem = try Rewrite.default_morphism ([Some(r,Some req)],Some(r,Some req)) opp with Not_found -> error "ring opposite should be declared as a morphism" in let op_morph = op_morph r add mul opp req add_m_lem mul_m_lem opp_m_lem in Flags.if_verbose msgnl (str"Using setoid \""++pr_constr req++str"\""++spc()++ str"and morphisms \""++pr_constr add_m_lem ++ str"\","++spc()++ str"\""++pr_constr mul_m_lem++ str"\""++spc()++str"and \""++pr_constr opp_m_lem++ str"\""); op_morph) | None -> (Flags.if_verbose msgnl (str"Using setoid \""++pr_constr req ++str"\"" ++ spc() ++ str"and morphisms \""++pr_constr add_m_lem ++ str"\""++spc()++str"and \""++ pr_constr mul_m_lem++str"\""); op_smorph r add mul req add_m_lem mul_m_lem) in (setoid,op_morph) let build_setoid_params r add mul opp req eqth = match eqth with Some th -> th | None -> ring_equality (r,add,mul,opp,req) let dest_ring env sigma th_spec = let th_typ = Retyping.get_type_of env sigma th_spec in match kind_of_term th_typ with App(f,[|r;zero;one;add;mul;sub;opp;req|]) when eq_constr f (Lazy.force coq_almost_ring_theory) -> (None,r,zero,one,add,mul,Some sub,Some opp,req) | App(f,[|r;zero;one;add;mul;req|]) when eq_constr f (Lazy.force coq_semi_ring_theory) -> (Some true,r,zero,one,add,mul,None,None,req) | App(f,[|r;zero;one;add;mul;sub;opp;req|]) when eq_constr f (Lazy.force coq_ring_theory) -> (Some false,r,zero,one,add,mul,Some sub,Some opp,req) | _ -> error "bad ring structure" let dest_morph env sigma m_spec = let m_typ = Retyping.get_type_of env sigma m_spec in match kind_of_term m_typ with App(f,[|r;zero;one;add;mul;sub;opp;req; c;czero;cone;cadd;cmul;csub;copp;ceqb;phi|]) when eq_constr f (Lazy.force coq_ring_morph) -> (c,czero,cone,cadd,cmul,Some csub,Some copp,ceqb,phi) | App(f,[|r;zero;one;add;mul;req;c;czero;cone;cadd;cmul;ceqb;phi|]) when eq_constr f (Lazy.force coq_semi_morph) -> (c,czero,cone,cadd,cmul,None,None,ceqb,phi) | _ -> error "bad morphism structure" type coeff_spec = Computational of constr (* equality test *) | Abstract (* coeffs = Z *) | Morphism of constr (* general morphism *) let reflect_coeff rkind = (* We build an ill-typed terms on purpose... *) match rkind with Abstract -> Lazy.force coq_abstract | Computational c -> lapp coq_comp [|c|] | Morphism m -> lapp coq_morph [|m|] type cst_tac_spec = CstTac of raw_tactic_expr | Closed of reference list let interp_cst_tac env sigma rk kind (zero,one,add,mul,opp) cst_tac = match cst_tac with Some (CstTac t) -> Tacinterp.glob_tactic t | Some (Closed lc) -> closed_term_ast (List.map Smartlocate.global_with_alias lc) | None -> (match rk, opp, kind with Abstract, None, _ -> let t = ArgArg(dummy_loc,Lazy.force ltac_inv_morphN) in TacArg(dummy_loc,TacCall(dummy_loc,t,List.map carg [zero;one;add;mul])) | Abstract, Some opp, Some _ -> let t = ArgArg(dummy_loc, Lazy.force ltac_inv_morphZ) in TacArg(dummy_loc,TacCall(dummy_loc,t,List.map carg [zero;one;add;mul;opp])) | Abstract, Some opp, None -> let t = ArgArg(dummy_loc, Lazy.force ltac_inv_morphNword) in TacArg (dummy_loc,TacCall(dummy_loc,t,List.map carg [zero;one;add;mul;opp])) | Computational _,_,_ -> let t = ArgArg(dummy_loc, Lazy.force ltac_inv_morph_gen) in TacArg (dummy_loc,TacCall(dummy_loc,t,List.map carg [zero;one;zero;one])) | Morphism mth,_,_ -> let (_,czero,cone,_,_,_,_,_,_) = dest_morph env sigma mth in let t = ArgArg(dummy_loc, Lazy.force ltac_inv_morph_gen) in TacArg (dummy_loc,TacCall(dummy_loc,t,List.map carg [zero;one;czero;cone]))) let make_hyp env c = let t = Retyping.get_type_of env Evd.empty c in lapp coq_mkhypo [|t;c|] let make_hyp_list env lH = let carrier = Lazy.force coq_hypo in List.fold_right (fun c l -> lapp coq_cons [|carrier; (make_hyp env c); l|]) lH (lapp coq_nil [|carrier|]) let interp_power env pow = let carrier = Lazy.force coq_hypo in match pow with | None -> let t = ArgArg(dummy_loc, Lazy.force ltac_inv_morph_nothing) in (TacArg(dummy_loc,TacCall(dummy_loc,t,[])), lapp coq_None [|carrier|]) | Some (tac, spec) -> let tac = match tac with | CstTac t -> Tacinterp.glob_tactic t | Closed lc -> closed_term_ast (List.map Smartlocate.global_with_alias lc) in let spec = make_hyp env (ic spec) in (tac, lapp coq_Some [|carrier; spec|]) let interp_sign env sign = let carrier = Lazy.force coq_hypo in match sign with | None -> lapp coq_None [|carrier|] | Some spec -> let spec = make_hyp env (ic spec) in lapp coq_Some [|carrier;spec|] (* Same remark on ill-typed terms ... *) let interp_div env div = let carrier = Lazy.force coq_hypo in match div with | None -> lapp coq_None [|carrier|] | Some spec -> let spec = make_hyp env (ic spec) in lapp coq_Some [|carrier;spec|] (* Same remark on ill-typed terms ... *) let add_theory name rth eqth morphth cst_tac (pre,post) power sign div = check_required_library (cdir@["Ring_base"]); let env = Global.env() in let sigma = Evd.empty in let (kind,r,zero,one,add,mul,sub,opp,req) = dest_ring env sigma rth in let (sth,ext) = build_setoid_params r add mul opp req eqth in let (pow_tac, pspec) = interp_power env power in let sspec = interp_sign env sign in let dspec = interp_div env div in let rk = reflect_coeff morphth in let params = exec_tactic env 5 (zltac "ring_lemmas") (List.map carg[sth;ext;rth;pspec;sspec;dspec;rk]) in let lemma1 = constr_of params.(3) in let lemma2 = constr_of params.(4) in let lemma1 = decl_constant (string_of_id name^"_ring_lemma1") lemma1 in let lemma2 = decl_constant (string_of_id name^"_ring_lemma2") lemma2 in let cst_tac = interp_cst_tac env sigma morphth kind (zero,one,add,mul,opp) cst_tac in let pretac = match pre with Some t -> Tacinterp.glob_tactic t | _ -> TacId [] in let posttac = match post with Some t -> Tacinterp.glob_tactic t | _ -> TacId [] in let _ = Lib.add_leaf name (theory_to_obj { ring_carrier = r; ring_req = req; ring_setoid = sth; ring_ext = constr_of params.(1); ring_morph = constr_of params.(2); ring_th = constr_of params.(0); ring_cst_tac = cst_tac; ring_pow_tac = pow_tac; ring_lemma1 = lemma1; ring_lemma2 = lemma2; ring_pre_tac = pretac; ring_post_tac = posttac }) in () type ring_mod = Ring_kind of coeff_spec | Const_tac of cst_tac_spec | Pre_tac of raw_tactic_expr | Post_tac of raw_tactic_expr | Setoid of Topconstr.constr_expr * Topconstr.constr_expr | Pow_spec of cst_tac_spec * Topconstr.constr_expr (* Syntaxification tactic , correctness lemma *) | Sign_spec of Topconstr.constr_expr | Div_spec of Topconstr.constr_expr VERNAC ARGUMENT EXTEND ring_mod | [ "decidable" constr(eq_test) ] -> [ Ring_kind(Computational (ic eq_test)) ] | [ "abstract" ] -> [ Ring_kind Abstract ] | [ "morphism" constr(morph) ] -> [ Ring_kind(Morphism (ic morph)) ] | [ "constants" "[" tactic(cst_tac) "]" ] -> [ Const_tac(CstTac cst_tac) ] | [ "closed" "[" ne_global_list(l) "]" ] -> [ Const_tac(Closed l) ] | [ "preprocess" "[" tactic(pre) "]" ] -> [ Pre_tac pre ] | [ "postprocess" "[" tactic(post) "]" ] -> [ Post_tac post ] | [ "setoid" constr(sth) constr(ext) ] -> [ Setoid(sth,ext) ] | [ "sign" constr(sign_spec) ] -> [ Sign_spec sign_spec ] | [ "power" constr(pow_spec) "[" ne_global_list(l) "]" ] -> [ Pow_spec (Closed l, pow_spec) ] | [ "power_tac" constr(pow_spec) "[" tactic(cst_tac) "]" ] -> [ Pow_spec (CstTac cst_tac, pow_spec) ] | [ "div" constr(div_spec) ] -> [ Div_spec div_spec ] END let set_once s r v = if !r = None then r := Some v else error (s^" cannot be set twice") let process_ring_mods l = let kind = ref None in let set = ref None in let cst_tac = ref None in let pre = ref None in let post = ref None in let sign = ref None in let power = ref None in let div = ref None in List.iter(function Ring_kind k -> set_once "ring kind" kind k | Const_tac t -> set_once "tactic recognizing constants" cst_tac t | Pre_tac t -> set_once "preprocess tactic" pre t | Post_tac t -> set_once "postprocess tactic" post t | Setoid(sth,ext) -> set_once "setoid" set (ic sth,ic ext) | Pow_spec(t,spec) -> set_once "power" power (t,spec) | Sign_spec t -> set_once "sign" sign t | Div_spec t -> set_once "div" div t) l; let k = match !kind with Some k -> k | None -> Abstract in (k, !set, !cst_tac, !pre, !post, !power, !sign, !div) VERNAC COMMAND EXTEND AddSetoidRing | [ "Add" "Ring" ident(id) ":" constr(t) ring_mods(l) ] -> [ let (k,set,cst,pre,post,power,sign, div) = process_ring_mods l in add_theory id (ic t) set k cst (pre,post) power sign div] END (*****************************************************************************) (* The tactics consist then only in a lookup in the ring database and call the appropriate ltac. *) let make_args_list rl t = match rl with | [] -> let (_,t1,t2) = dest_rel0 t in [t1;t2] | _ -> rl let make_term_list carrier rl = List.fold_right (fun x l -> lapp coq_cons [|carrier;x;l|]) rl (lapp coq_nil [|carrier|]) let ltac_ring_structure e = let req = carg e.ring_req in let sth = carg e.ring_setoid in let ext = carg e.ring_ext in let morph = carg e.ring_morph in let th = carg e.ring_th in let cst_tac = Tacexp e.ring_cst_tac in let pow_tac = Tacexp e.ring_pow_tac in let lemma1 = carg e.ring_lemma1 in let lemma2 = carg e.ring_lemma2 in let pretac = Tacexp(TacFun([None],e.ring_pre_tac)) in let posttac = Tacexp(TacFun([None],e.ring_post_tac)) in [req;sth;ext;morph;th;cst_tac;pow_tac; lemma1;lemma2;pretac;posttac] let ring_lookup (f:glob_tactic_expr) lH rl t gl = let env = pf_env gl in let sigma = project gl in let rl = make_args_list rl t in let e = find_ring_structure env sigma rl in let rl = carg (make_term_list e.ring_carrier rl) in let lH = carg (make_hyp_list env lH) in let ring = ltac_ring_structure e in ltac_apply f (ring@[lH;rl]) gl TACTIC EXTEND ring_lookup | [ "ring_lookup" tactic0(f) "[" constr_list(lH) "]" ne_constr_list(lrt) ] -> [ let (t,lr) = list_sep_last lrt in ring_lookup f lH lr t] END (***********************************************************************) let new_field_path = make_dirpath (List.map id_of_string ["Field_tac";plugin_dir;"Coq"]) let field_ltac s = lazy(make_kn (MPfile new_field_path) (make_dirpath []) (mk_label s)) let _ = add_map "field" (map_with_eq [coq_cons,(function -1->Eval|2->Rec|_->Prot); coq_nil, (function -1->Eval|_ -> Prot); (* display_linear: evaluate polynomials and coef operations, protect field operations and make recursive call on the var map *) my_constant "display_linear", (function -1|9|10|11|12|13|15|16->Eval|14->Rec|_->Prot); my_constant "display_pow_linear", (function -1|9|10|11|12|13|14|16|18|19->Eval|17->Rec|_->Prot); (* Pphi_dev: evaluate polynomial and coef operations, protect ring operations and make recursive call on the var map *) pol_cst "Pphi_dev", (function -1|8|9|10|11|12|14->Eval|13->Rec|_->Prot); pol_cst "Pphi_pow", (function -1|8|9|10|11|13|15|17->Eval|16->Rec|_->Prot); (* PEeval: evaluate morphism and polynomial, protect ring operations and make recursive call on the var map *) pol_cst "PEeval", (function -1|7|9|12->Eval|11->Rec|_->Prot); (* FEeval: evaluate morphism, protect field operations and make recursive call on the var map *) my_constant "FEeval", (function -1|8|9|10|11|14->Eval|13->Rec|_->Prot)]);; let _ = add_map "field_cond" (map_with_eq [coq_cons,(function -1->Eval|2->Rec|_->Prot); coq_nil, (function -1->Eval|_ -> Prot); (* PCond: evaluate morphism and denum list, protect ring operations and make recursive call on the var map *) my_constant "PCond", (function -1|8|10|13->Eval|12->Rec|_->Prot)]);; (* (function -1|8|10->Eval|9->Rec|_->Prot)]);;*) let _ = Redexpr.declare_reduction "simpl_field_expr" (protect_red "field") let afield_theory = my_constant "almost_field_theory" let field_theory = my_constant "field_theory" let sfield_theory = my_constant "semi_field_theory" let af_ar = my_constant"AF_AR" let f_r = my_constant"F_R" let sf_sr = my_constant"SF_SR" let dest_field env sigma th_spec = let th_typ = Retyping.get_type_of env sigma th_spec in match kind_of_term th_typ with | App(f,[|r;zero;one;add;mul;sub;opp;div;inv;req|]) when eq_constr f (Lazy.force afield_theory) -> let rth = lapp af_ar [|r;zero;one;add;mul;sub;opp;div;inv;req;th_spec|] in (None,r,zero,one,add,mul,Some sub,Some opp,div,inv,req,rth) | App(f,[|r;zero;one;add;mul;sub;opp;div;inv;req|]) when eq_constr f (Lazy.force field_theory) -> let rth = lapp f_r [|r;zero;one;add;mul;sub;opp;div;inv;req;th_spec|] in (Some false,r,zero,one,add,mul,Some sub,Some opp,div,inv,req,rth) | App(f,[|r;zero;one;add;mul;div;inv;req|]) when eq_constr f (Lazy.force sfield_theory) -> let rth = lapp sf_sr [|r;zero;one;add;mul;div;inv;req;th_spec|] in (Some true,r,zero,one,add,mul,None,None,div,inv,req,rth) | _ -> error "bad field structure" type field_info = { field_carrier : types; field_req : constr; field_cst_tac : glob_tactic_expr; field_pow_tac : glob_tactic_expr; field_ok : constr; field_simpl_eq_ok : constr; field_simpl_ok : constr; field_simpl_eq_in_ok : constr; field_cond : constr; field_pre_tac : glob_tactic_expr; field_post_tac : glob_tactic_expr } let field_from_carrier = ref Cmap.empty let field_from_relation = ref Cmap.empty let field_from_name = ref Spmap.empty let field_for_carrier r = Cmap.find r !field_from_carrier let field_for_relation rel = Cmap.find rel !field_from_relation let find_field_structure env sigma l = check_required_library (cdir@["Field_tac"]); match l with | t::cl' -> let ty = Retyping.get_type_of env sigma t in let check c = let ty' = Retyping.get_type_of env sigma c in if not (Reductionops.is_conv env sigma ty ty') then errorlabstrm "field" (str"arguments of field_simplify do not have all the same type") in List.iter check cl'; (try field_for_carrier ty with Not_found -> errorlabstrm "field" (str"cannot find a declared field structure over"++ spc()++str"\""++pr_constr ty++str"\"")) | [] -> assert false (* let (req,_,_) = dest_rel cl in (try field_for_relation req with Not_found -> errorlabstrm "field" (str"cannot find a declared field structure for equality"++ spc()++str"\""++pr_constr req++str"\"")) *) let _ = Summary.declare_summary "tactic-new-field-table" { Summary.freeze_function = (fun () -> !field_from_carrier,!field_from_relation,!field_from_name); Summary.unfreeze_function = (fun (ct,rt,nt) -> field_from_carrier := ct; field_from_relation := rt; field_from_name := nt); Summary.init_function = (fun () -> field_from_carrier := Cmap.empty; field_from_relation := Cmap.empty; field_from_name := Spmap.empty) } let add_field_entry (sp,_kn) e = (* let _ = ty e.field_ok in let _ = ty e.field_simpl_eq_ok in let _ = ty e.field_simpl_ok in let _ = ty e.field_cond in *) field_from_carrier := Cmap.add e.field_carrier e !field_from_carrier; field_from_relation := Cmap.add e.field_req e !field_from_relation; field_from_name := Spmap.add sp e !field_from_name let subst_th (subst,th) = let c' = subst_mps subst th.field_carrier in let eq' = subst_mps subst th.field_req in let thm1' = subst_mps subst th.field_ok in let thm2' = subst_mps subst th.field_simpl_eq_ok in let thm3' = subst_mps subst th.field_simpl_ok in let thm4' = subst_mps subst th.field_simpl_eq_in_ok in let thm5' = subst_mps subst th.field_cond in let tac'= subst_tactic subst th.field_cst_tac in let pow_tac' = subst_tactic subst th.field_pow_tac in let pretac'= subst_tactic subst th.field_pre_tac in let posttac'= subst_tactic subst th.field_post_tac in if c' == th.field_carrier && eq' == th.field_req && thm1' == th.field_ok && thm2' == th.field_simpl_eq_ok && thm3' == th.field_simpl_ok && thm4' == th.field_simpl_eq_in_ok && thm5' == th.field_cond && tac' == th.field_cst_tac && pow_tac' == th.field_pow_tac && pretac' == th.field_pre_tac && posttac' == th.field_post_tac then th else { field_carrier = c'; field_req = eq'; field_cst_tac = tac'; field_pow_tac = pow_tac'; field_ok = thm1'; field_simpl_eq_ok = thm2'; field_simpl_ok = thm3'; field_simpl_eq_in_ok = thm4'; field_cond = thm5'; field_pre_tac = pretac'; field_post_tac = posttac' } let ftheory_to_obj : field_info -> obj = let cache_th (name,th) = add_field_entry name th in declare_object {(default_object "tactic-new-field-theory") with open_function = (fun i o -> if i=1 then cache_th o); cache_function = cache_th; subst_function = subst_th; classify_function = (fun x -> Substitute x) } let field_equality r inv req = match kind_of_term req with | App (f, [| _ |]) when eq_constr f (Lazy.force coq_eq) -> mkApp((Coqlib.build_coq_eq_data()).congr,[|r;r;inv|]) | _ -> let _setoid = setoid_of_relation (Global.env ()) r req in let signature = [Some (r,Some req)],Some(r,Some req) in let inv_m, inv_m_lem = try Rewrite.default_morphism signature inv with Not_found -> error "field inverse should be declared as a morphism" in inv_m_lem let add_field_theory name fth eqth morphth cst_tac inj (pre,post) power sign odiv = check_required_library (cdir@["Field_tac"]); let env = Global.env() in let sigma = Evd.empty in let (kind,r,zero,one,add,mul,sub,opp,div,inv,req,rth) = dest_field env sigma fth in let (sth,ext) = build_setoid_params r add mul opp req eqth in let eqth = Some(sth,ext) in let _ = add_theory name rth eqth morphth cst_tac (None,None) power sign odiv in let (pow_tac, pspec) = interp_power env power in let sspec = interp_sign env sign in let dspec = interp_div env odiv in let inv_m = field_equality r inv req in let rk = reflect_coeff morphth in let params = exec_tactic env 9 (field_ltac"field_lemmas") (List.map carg[sth;ext;inv_m;fth;pspec;sspec;dspec;rk]) in let lemma1 = constr_of params.(3) in let lemma2 = constr_of params.(4) in let lemma3 = constr_of params.(5) in let lemma4 = constr_of params.(6) in let cond_lemma = match inj with | Some thm -> mkApp(constr_of params.(8),[|thm|]) | None -> constr_of params.(7) in let lemma1 = decl_constant (string_of_id name^"_field_lemma1") lemma1 in let lemma2 = decl_constant (string_of_id name^"_field_lemma2") lemma2 in let lemma3 = decl_constant (string_of_id name^"_field_lemma3") lemma3 in let lemma4 = decl_constant (string_of_id name^"_field_lemma4") lemma4 in let cond_lemma = decl_constant (string_of_id name^"_lemma5") cond_lemma in let cst_tac = interp_cst_tac env sigma morphth kind (zero,one,add,mul,opp) cst_tac in let pretac = match pre with Some t -> Tacinterp.glob_tactic t | _ -> TacId [] in let posttac = match post with Some t -> Tacinterp.glob_tactic t | _ -> TacId [] in let _ = Lib.add_leaf name (ftheory_to_obj { field_carrier = r; field_req = req; field_cst_tac = cst_tac; field_pow_tac = pow_tac; field_ok = lemma1; field_simpl_eq_ok = lemma2; field_simpl_ok = lemma3; field_simpl_eq_in_ok = lemma4; field_cond = cond_lemma; field_pre_tac = pretac; field_post_tac = posttac }) in () type field_mod = Ring_mod of ring_mod | Inject of Topconstr.constr_expr VERNAC ARGUMENT EXTEND field_mod | [ ring_mod(m) ] -> [ Ring_mod m ] | [ "completeness" constr(inj) ] -> [ Inject inj ] END let process_field_mods l = let kind = ref None in let set = ref None in let cst_tac = ref None in let pre = ref None in let post = ref None in let inj = ref None in let sign = ref None in let power = ref None in let div = ref None in List.iter(function Ring_mod(Ring_kind k) -> set_once "field kind" kind k | Ring_mod(Const_tac t) -> set_once "tactic recognizing constants" cst_tac t | Ring_mod(Pre_tac t) -> set_once "preprocess tactic" pre t | Ring_mod(Post_tac t) -> set_once "postprocess tactic" post t | Ring_mod(Setoid(sth,ext)) -> set_once "setoid" set (ic sth,ic ext) | Ring_mod(Pow_spec(t,spec)) -> set_once "power" power (t,spec) | Ring_mod(Sign_spec t) -> set_once "sign" sign t | Ring_mod(Div_spec t) -> set_once "div" div t | Inject i -> set_once "infinite property" inj (ic i)) l; let k = match !kind with Some k -> k | None -> Abstract in (k, !set, !inj, !cst_tac, !pre, !post, !power, !sign, !div) VERNAC COMMAND EXTEND AddSetoidField | [ "Add" "Field" ident(id) ":" constr(t) field_mods(l) ] -> [ let (k,set,inj,cst_tac,pre,post,power,sign,div) = process_field_mods l in add_field_theory id (ic t) set k cst_tac inj (pre,post) power sign div] END let ltac_field_structure e = let req = carg e.field_req in let cst_tac = Tacexp e.field_cst_tac in let pow_tac = Tacexp e.field_pow_tac in let field_ok = carg e.field_ok in let field_simpl_ok = carg e.field_simpl_ok in let field_simpl_eq_ok = carg e.field_simpl_eq_ok in let field_simpl_eq_in_ok = carg e.field_simpl_eq_in_ok in let cond_ok = carg e.field_cond in let pretac = Tacexp(TacFun([None],e.field_pre_tac)) in let posttac = Tacexp(TacFun([None],e.field_post_tac)) in [req;cst_tac;pow_tac;field_ok;field_simpl_ok;field_simpl_eq_ok; field_simpl_eq_in_ok;cond_ok;pretac;posttac] let field_lookup (f:glob_tactic_expr) lH rl t gl = let env = pf_env gl in let sigma = project gl in let rl = make_args_list rl t in let e = find_field_structure env sigma rl in let rl = carg (make_term_list e.field_carrier rl) in let lH = carg (make_hyp_list env lH) in let field = ltac_field_structure e in ltac_apply f (field@[lH;rl]) gl TACTIC EXTEND field_lookup | [ "field_lookup" tactic(f) "[" constr_list(lH) "]" ne_constr_list(lt) ] -> [ let (t,l) = list_sep_last lt in field_lookup f lH l t ] END coq-8.4pl2/plugins/setoid_ring/Cring.v0000640000175000001440000002141512010532755017052 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* idtac | ?e1::?e2::_ => match goal with |- (?op ?u1 ?u2) => change (op (@Ring_polynom.PEeval _ zero _+_ _*_ _-_ -_ Z Ncring_initial.gen_phiZ N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) lvar e1) (@Ring_polynom.PEeval _ zero _+_ _*_ _-_ -_ Z Ncring_initial.gen_phiZ N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) lvar e2)) end end. Section cring. Context {R:Type}`{Rr:Cring R}. Lemma cring_eq_ext: ring_eq_ext _+_ _*_ -_ _==_. Proof. intros. apply mk_reqe; solve_proper. Defined. Lemma cring_almost_ring_theory: almost_ring_theory (R:=R) zero one _+_ _*_ _-_ -_ _==_. intros. apply mk_art ;intros. rewrite ring_add_0_l; reflexivity. rewrite ring_add_comm; reflexivity. rewrite ring_add_assoc; reflexivity. rewrite ring_mul_1_l; reflexivity. apply ring_mul_0_l. rewrite cring_mul_comm; reflexivity. rewrite ring_mul_assoc; reflexivity. rewrite ring_distr_l; reflexivity. rewrite ring_opp_mul_l; reflexivity. apply ring_opp_add. rewrite ring_sub_def ; reflexivity. Defined. Lemma cring_morph: ring_morph zero one _+_ _*_ _-_ -_ _==_ 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool Ncring_initial.gen_phiZ. intros. apply mkmorph ; intros; simpl; try reflexivity. rewrite Ncring_initial.gen_phiZ_add; reflexivity. rewrite ring_sub_def. unfold Z.sub. rewrite Ncring_initial.gen_phiZ_add. rewrite Ncring_initial.gen_phiZ_opp; reflexivity. rewrite Ncring_initial.gen_phiZ_mul; reflexivity. rewrite Ncring_initial.gen_phiZ_opp; reflexivity. rewrite (Zeqb_ok x y H). reflexivity. Defined. Lemma cring_power_theory : @Ring_theory.power_theory R one _*_ _==_ N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication). intros; apply Ring_theory.mkpow_th. reflexivity. Defined. Lemma cring_div_theory: div_theory _==_ Z.add Z.mul Ncring_initial.gen_phiZ Z.quotrem. intros. apply InitialRing.Ztriv_div_th. unfold Setoid_Theory. simpl. apply ring_setoid. Defined. End cring. Ltac cring_gen := match goal with |- ?g => let lterm := lterm_goal g in match eval red in (list_reifyl (lterm:=lterm)) with | (?fv, ?lexpr) => (*idtac "variables:";idtac fv; idtac "terms:"; idtac lterm; idtac "reifications:"; idtac lexpr; *) reify_goal fv lexpr lterm; match goal with |- ?g => generalize (@Ring_polynom.ring_correct _ 0 1 _+_ _*_ _-_ -_ _==_ ring_setoid cring_eq_ext cring_almost_ring_theory Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool Ncring_initial.gen_phiZ cring_morph N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) cring_power_theory Z.quotrem cring_div_theory O fv nil); let rc := fresh "rc"in intro rc; apply rc end end end. Ltac cring_compute:= vm_compute; reflexivity. Ltac cring:= intros; cring_gen; cring_compute. Instance Zcri: (Cring (Rr:=Zr)). red. exact Z.mul_comm. Defined. (* Cring_simplify *) Ltac cring_simplify_aux lterm fv lexpr hyp := match lterm with | ?t0::?lterm => match lexpr with | ?e::?le => let t := constr:(@Ring_polynom.norm_subst Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool Z.quotrem O nil e) in let te := constr:(@Ring_polynom.Pphi_dev _ 0 1 _+_ _*_ _-_ -_ Z 0%Z 1%Z Zeq_bool Ncring_initial.gen_phiZ get_signZ fv t) in let eq1 := fresh "ring" in let nft := eval vm_compute in t in let t':= fresh "t" in pose (t' := nft); assert (eq1 : t = t'); [vm_cast_no_check (eq_refl t')| let eq2 := fresh "ring" in assert (eq2:(@Ring_polynom.PEeval _ zero _+_ _*_ _-_ -_ Z Ncring_initial.gen_phiZ N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) fv e) == te); [let eq3 := fresh "ring" in generalize (@ring_rw_correct _ 0 1 _+_ _*_ _-_ -_ _==_ ring_setoid cring_eq_ext cring_almost_ring_theory Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp Zeq_bool Ncring_initial.gen_phiZ cring_morph N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) cring_power_theory Z.quotrem cring_div_theory get_signZ get_signZ_th O nil fv I nil (eq_refl nil) ); intro eq3; apply eq3; reflexivity| match hyp with | 1%nat => rewrite eq2 | ?H => try rewrite eq2 in H end]; let P:= fresh "P" in match hyp with | 1%nat => rewrite eq1; pattern (@Ring_polynom.Pphi_dev _ 0 1 _+_ _*_ _-_ -_ Z 0%Z 1%Z Zeq_bool Ncring_initial.gen_phiZ get_signZ fv t'); match goal with |- (?p ?t) => set (P:=p) end; unfold t' in *; clear t' eq1 eq2; unfold Pphi_dev, Pphi_avoid; simpl; repeat (unfold mkmult1, mkmultm1, mkmult_c_pos, mkmult_c, mkadd_mult, mkmult_c_pos, mkmult_pow, mkadd_mult, mkpow;simpl) | ?H => rewrite eq1 in H; pattern (@Ring_polynom.Pphi_dev _ 0 1 _+_ _*_ _-_ -_ Z 0%Z 1%Z Zeq_bool Ncring_initial.gen_phiZ get_signZ fv t') in H; match type of H with | (?p ?t) => set (P:=p) in H end; unfold t' in *; clear t' eq1 eq2; unfold Pphi_dev, Pphi_avoid in H; simpl in H; repeat (unfold mkmult1, mkmultm1, mkmult_c_pos, mkmult_c, mkadd_mult, mkmult_c_pos, mkmult_pow, mkadd_mult, mkpow in H;simpl in H) end; unfold P in *; clear P ]; cring_simplify_aux lterm fv le hyp | nil => idtac end | nil => idtac end. Ltac set_variables fv := match fv with | nil => idtac | ?t::?fv => let v := fresh "X" in set (v:=t) in *; set_variables fv end. Ltac deset n:= match n with | 0%nat => idtac | S ?n1 => match goal with | h:= ?v : ?t |- ?g => unfold h in *; clear h; deset n1 end end. (* a est soit un terme de l'anneau, soit une liste de termes. J'ai pas réussi à un décomposer les Vlists obtenues avec ne_constr_list dans Tactic Notation *) Ltac cring_simplify_gen a hyp := let lterm := match a with | _::_ => a | _ => constr:(a::nil) end in match eval red in (list_reifyl (lterm:=lterm)) with | (?fv, ?lexpr) => idtac lterm; idtac fv; idtac lexpr; let n := eval compute in (length fv) in idtac n; let lt:=fresh "lt" in set (lt:= lterm); let lv:=fresh "fv" in set (lv:= fv); (* les termes de fv sont remplacés par des variables pour pouvoir utiliser simpl ensuite sans risquer des simplifications indésirables *) set_variables fv; let lterm1 := eval unfold lt in lt in let lv1 := eval unfold lv in lv in idtac lterm1; idtac lv1; cring_simplify_aux lterm1 lv1 lexpr hyp; clear lt lv; (* on remet les termes de fv *) deset n end. Tactic Notation "cring_simplify" constr(lterm):= cring_simplify_gen lterm 1%nat. Tactic Notation "cring_simplify" constr(lterm) "in" ident(H):= cring_simplify_gen lterm H. coq-8.4pl2/plugins/setoid_ring/Rings_Q.v0000640000175000001440000000141311616534440017352 0ustar notinusersRequire Export Cring. Require Export Integral_domain. (* Rational numbers *) Require Import QArith. Instance Qops: (@Ring_ops Q 0%Q 1%Q Qplus Qmult Qminus Qopp Qeq). Instance Qri : (Ring (Ro:=Qops)). constructor. try apply Q_Setoid. apply Qplus_comp. apply Qmult_comp. apply Qminus_comp. apply Qopp_comp. exact Qplus_0_l. exact Qplus_comm. apply Qplus_assoc. exact Qmult_1_l. exact Qmult_1_r. apply Qmult_assoc. apply Qmult_plus_distr_l. intros. apply Qmult_plus_distr_r. reflexivity. exact Qplus_opp_r. Defined. Instance Qcri: (Cring (Rr:=Qri)). red. exact Qmult_comm. Defined. Lemma Q_one_zero: not (Qeq 1%Q 0%Q). unfold Qeq. simpl. auto with *. Qed. Instance Qdi : (Integral_domain (Rcr:=Qcri)). constructor. exact Qmult_integral. exact Q_one_zero. Defined. coq-8.4pl2/plugins/setoid_ring/Algebra_syntax.v0000640000175000001440000000163411616534440020760 0ustar notinusers Class Zero (A : Type) := zero : A. Notation "0" := zero. Class One (A : Type) := one : A. Notation "1" := one. Class Addition (A : Type) := addition : A -> A -> A. Notation "_+_" := addition. Notation "x + y" := (addition x y). Class Multiplication {A B : Type} := multiplication : A -> B -> B. Notation "_*_" := multiplication. Notation "x * y" := (multiplication x y). Class Subtraction (A : Type) := subtraction : A -> A -> A. Notation "_-_" := subtraction. Notation "x - y" := (subtraction x y). Class Opposite (A : Type) := opposite : A -> A. Notation "-_" := opposite. Notation "- x" := (opposite(x)). Class Equality {A : Type}:= equality : A -> A -> Prop. Notation "_==_" := equality. Notation "x == y" := (equality x y) (at level 70, no associativity). Class Bracket (A B: Type):= bracket : A -> B. Notation "[ x ]" := (bracket(x)). Class Power {A B: Type} := power : A -> B -> A. Notation "x ^ y" := (power x y). coq-8.4pl2/plugins/setoid_ring/Rings_R.v0000640000175000001440000000162711616534440017362 0ustar notinusersRequire Export Cring. Require Export Integral_domain. (* Real numbers *) Require Import Reals. Require Import RealField. Lemma Rsth : Setoid_Theory R (@eq R). constructor;red;intros;subst;trivial. Qed. Instance Rops: (@Ring_ops R 0%R 1%R Rplus Rmult Rminus Ropp (@eq R)). Instance Rri : (Ring (Ro:=Rops)). constructor; try (try apply Rsth; try (unfold respectful, Proper; unfold equality; unfold eq_notation in *; intros; try rewrite H; try rewrite H0; reflexivity)). exact Rplus_0_l. exact Rplus_comm. symmetry. apply Rplus_assoc. exact Rmult_1_l. exact Rmult_1_r. symmetry. apply Rmult_assoc. exact Rmult_plus_distr_r. intros; apply Rmult_plus_distr_l. exact Rplus_opp_r. Defined. Instance Rcri: (Cring (Rr:=Rri)). red. exact Rmult_comm. Defined. Lemma R_one_zero: 1%R <> 0%R. discrR. Qed. Instance Rdi : (Integral_domain (Rcr:=Rcri)). constructor. exact Rmult_integral. exact R_one_zero. Defined. coq-8.4pl2/plugins/setoid_ring/BinList.v0000640000175000001440000000424012010532755017351 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* tl l | xO p => jump p (jump p l) | xI p => jump p (jump p (tl l)) end. Fixpoint nth (p:positive) (l:list A) {struct p} : A:= match p with | xH => hd default l | xO p => nth p (jump p l) | xI p => nth p (jump p (tl l)) end. Lemma jump_tl : forall j l, tl (jump j l) = jump j (tl l). Proof. induction j;simpl;intros; now rewrite ?IHj. Qed. Lemma jump_succ : forall j l, jump (Pos.succ j) l = jump 1 (jump j l). Proof. induction j;simpl;intros. - rewrite !IHj; simpl; now rewrite !jump_tl. - now rewrite !jump_tl. - trivial. Qed. Lemma jump_add : forall i j l, jump (i + j) l = jump i (jump j l). Proof. induction i using Pos.peano_ind; intros. - now rewrite Pos.add_1_l, jump_succ. - now rewrite Pos.add_succ_l, !jump_succ, IHi. Qed. Lemma jump_pred_double : forall i l, jump (Pos.pred_double i) (tl l) = jump i (jump i l). Proof. induction i;intros;simpl. - now rewrite !jump_tl. - now rewrite IHi, <- 2 jump_tl, IHi. - trivial. Qed. Lemma nth_jump : forall p l, nth p (tl l) = hd default (jump p l). Proof. induction p;simpl;intros. - now rewrite <-jump_tl, IHp. - now rewrite <-jump_tl, IHp. - trivial. Qed. Lemma nth_pred_double : forall p l, nth (Pos.pred_double p) (tl l) = nth p (jump p l). Proof. induction p;simpl;intros. - now rewrite !jump_tl. - now rewrite jump_pred_double, <- !jump_tl, IHp. - trivial. Qed. End MakeBinList. coq-8.4pl2/plugins/setoid_ring/NArithRing.v0000640000175000001440000000137712010532755020022 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t | _ => constr:NotConstant end. Add Ring Nr : Nth (decidable Neqb_ok, constants [Ncst]). coq-8.4pl2/plugins/setoid_ring/Ncring_tac.v0000640000175000001440000002233412010532755020060 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* constr:(t1::t2::nil) | ?t1 = ?t2 => constr:(t1::t2::nil) | (_ ?t1 ?t2) => constr:(t1::t2::nil) end. Lemma Zeqb_ok: forall x y : Z, Zeq_bool x y = true -> x == y. intros x y H. rewrite (Zeq_bool_eq x y H). reflexivity. Qed. Ltac reify_goal lvar lexpr lterm:= (*idtac lvar; idtac lexpr; idtac lterm;*) match lexpr with nil => idtac | ?e1::?e2::_ => match goal with |- (?op ?u1 ?u2) => change (op (@PEeval Z _ _ _ _ _ _ _ _ _ (@gen_phiZ _ _ _ _ _ _ _ _ _) N (fun n:N => n) (@pow_N _ _ _ _ _ _ _ _ _) lvar e1) (@PEeval Z _ _ _ _ _ _ _ _ _ (@gen_phiZ _ _ _ _ _ _ _ _ _) N (fun n:N => n) (@pow_N _ _ _ _ _ _ _ _ _) lvar e2)) end end. Lemma comm: forall (R:Type)`{Ring R}(c : Z) (x : R), x * (gen_phiZ c) == (gen_phiZ c) * x. induction c. intros. simpl. gen_rewrite. simpl. intros. rewrite <- same_gen. induction p. simpl. gen_rewrite. rewrite IHp. reflexivity. simpl. gen_rewrite. rewrite IHp. reflexivity. simpl. gen_rewrite. simpl. intros. rewrite <- same_gen. induction p. simpl. generalize IHp. clear IHp. gen_rewrite. intro IHp. rewrite IHp. reflexivity. simpl. generalize IHp. clear IHp. gen_rewrite. intro IHp. rewrite IHp. reflexivity. simpl. gen_rewrite. Qed. Ltac ring_gen := match goal with |- ?g => let lterm := lterm_goal g in match eval red in (list_reifyl (lterm:=lterm)) with | (?fv, ?lexpr) => (*idtac "variables:";idtac fv; idtac "terms:"; idtac lterm; idtac "reifications:"; idtac lexpr; *) reify_goal fv lexpr lterm; match goal with |- ?g => apply (@ring_correct Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ (@gen_phiZ _ _ _ _ _ _ _ _ _) _ (@comm _ _ _ _ _ _ _ _ _ _) Zeq_bool Zeqb_ok N (fun n:N => n) (@pow_N _ _ _ _ _ _ _ _ _)); [apply mkpow_th; reflexivity |vm_compute; reflexivity] end end end. Ltac non_commutative_ring:= intros; ring_gen. (* simplification *) Ltac ring_simplify_aux lterm fv lexpr hyp := match lterm with | ?t0::?lterm => match lexpr with | ?e::?le => (* e:PExpr Z est la réification de t0:R *) let t := constr:(@Ncring_polynom.norm_subst Z 0%Z 1%Z Z.add Z.mul Z.sub Z.opp (@eq Z) Zops Zeq_bool e) in (* t:Pol Z *) let te := constr:(@Ncring_polynom.Pphi Z _ 0 1 _+_ _*_ _-_ -_ _==_ _ Ncring_initial.gen_phiZ fv t) in let eq1 := fresh "ring" in let nft := eval vm_compute in t in let t':= fresh "t" in pose (t' := nft); assert (eq1 : t = t'); [vm_cast_no_check (eq_refl t')| let eq2 := fresh "ring" in assert (eq2:(@Ncring_polynom.PEeval Z _ 0 1 _+_ _*_ _-_ -_ _==_ _ Ncring_initial.gen_phiZ N (fun n:N => n) (@Ring_theory.pow_N _ 1 multiplication) fv e) == te); [apply (@Ncring_polynom.norm_subst_ok Z _ 0%Z 1%Z Z.add Z.mul Z.sub Z.opp (@eq Z) _ _ 0 1 _+_ _*_ _-_ -_ _==_ _ _ Ncring_initial.gen_phiZ _ (@comm _ 0 1 _+_ _*_ _-_ -_ _==_ _ _) _ Zeqb_ok); apply mkpow_th; reflexivity | match hyp with | 1%nat => rewrite eq2 | ?H => try rewrite eq2 in H end]; let P:= fresh "P" in match hyp with | 1%nat => idtac "ok"; rewrite eq1; pattern (@Ncring_polynom.Pphi Z _ 0 1 _+_ _*_ _-_ -_ _==_ _ Ncring_initial.gen_phiZ fv t'); match goal with |- (?p ?t) => set (P:=p) end; unfold t' in *; clear t' eq1 eq2; simpl | ?H => rewrite eq1 in H; pattern (@Ncring_polynom.Pphi Z _ 0 1 _+_ _*_ _-_ -_ _==_ _ Ncring_initial.gen_phiZ fv t') in H; match type of H with | (?p ?t) => set (P:=p) in H end; unfold t' in *; clear t' eq1 eq2; simpl in H end; unfold P in *; clear P ]; ring_simplify_aux lterm fv le hyp | nil => idtac end | nil => idtac end. Ltac set_variables fv := match fv with | nil => idtac | ?t::?fv => let v := fresh "X" in set (v:=t) in *; set_variables fv end. Ltac deset n:= match n with | 0%nat => idtac | S ?n1 => match goal with | h:= ?v : ?t |- ?g => unfold h in *; clear h; deset n1 end end. (* a est soit un terme de l'anneau, soit une liste de termes. J'ai pas réussi à un décomposer les Vlists obtenues avec ne_constr_list dans Tactic Notation *) Ltac ring_simplify_gen a hyp := let lterm := match a with | _::_ => a | _ => constr:(a::nil) end in match eval red in (list_reifyl (lterm:=lterm)) with | (?fv, ?lexpr) => idtac lterm; idtac fv; idtac lexpr; let n := eval compute in (length fv) in idtac n; let lt:=fresh "lt" in set (lt:= lterm); let lv:=fresh "fv" in set (lv:= fv); (* les termes de fv sont remplacés par des variables pour pouvoir utiliser simpl ensuite sans risquer des simplifications indésirables *) set_variables fv; let lterm1 := eval unfold lt in lt in let lv1 := eval unfold lv in lv in idtac lterm1; idtac lv1; ring_simplify_aux lterm1 lv1 lexpr hyp; clear lt lv; (* on remet les termes de fv *) deset n end. Tactic Notation "non_commutative_ring_simplify" constr(lterm):= ring_simplify_gen lterm 1%nat. Tactic Notation "non_commutative_ring_simplify" constr(lterm) "in" ident(H):= ring_simplify_gen lterm H. coq-8.4pl2/plugins/setoid_ring/Ring_equiv.v0000640000175000001440000000314311160567762020131 0ustar notinusersRequire Import Setoid_ring_theory. Require Import LegacyRing_theory. Require Import Ring_theory. Set Implicit Arguments. Section Old2New. Variable A : Type. Variable Aplus : A -> A -> A. Variable Amult : A -> A -> A. Variable Aone : A. Variable Azero : A. Variable Aopp : A -> A. Variable Aeq : A -> A -> bool. Variable R : Ring_Theory Aplus Amult Aone Azero Aopp Aeq. Let Aminus := fun x y => Aplus x (Aopp y). Lemma ring_equiv1 : ring_theory Azero Aone Aplus Amult Aminus Aopp (eq (A:=A)). Proof. destruct R. split; eauto. Qed. End Old2New. Section New2OldRing. Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable Rth : ring_theory rO rI radd rmul rsub ropp (eq (A:=R)). Variable reqb : R -> R -> bool. Variable reqb_ok : forall x y, reqb x y = true -> x = y. Lemma ring_equiv2 : Ring_Theory radd rmul rI rO ropp reqb. Proof. elim Rth; intros; constructor; eauto. intros. apply reqb_ok. destruct (reqb x y); trivial; intros. elim H. Qed. Definition default_eqb : R -> R -> bool := fun x y => false. Lemma default_eqb_ok : forall x y, default_eqb x y = true -> x = y. Proof. discriminate 1. Qed. End New2OldRing. Section New2OldSemiRing. Variable R : Type. Variable (rO rI : R) (radd rmul: R->R->R). Variable SRth : semi_ring_theory rO rI radd rmul (eq (A:=R)). Variable reqb : R -> R -> bool. Variable reqb_ok : forall x y, reqb x y = true -> x = y. Lemma sring_equiv2 : Semi_Ring_Theory radd rmul rI rO reqb. Proof. elim SRth; intros; constructor; eauto. intros. apply reqb_ok. destruct (reqb x y); trivial; intros. elim H. Qed. End New2OldSemiRing. coq-8.4pl2/plugins/setoid_ring/InitialRing.v0000640000175000001440000006114612010532755020226 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* R->R) (ropp : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y). Notation "x - y " := (rsub x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Variable Rsth : Setoid_Theory R req. Add Setoid R req Rsth as R_setoid3. Ltac rrefl := gen_reflexivity Rsth. Variable Reqe : ring_eq_ext radd rmul ropp req. Add Morphism radd : radd_ext3. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext3. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext3. exact (Ropp_ext Reqe). Qed. Fixpoint gen_phiPOS1 (p:positive) : R := match p with | xH => 1 | xO p => (1 + 1) * (gen_phiPOS1 p) | xI p => 1 + ((1 + 1) * (gen_phiPOS1 p)) end. Fixpoint gen_phiPOS (p:positive) : R := match p with | xH => 1 | xO xH => (1 + 1) | xO p => (1 + 1) * (gen_phiPOS p) | xI xH => 1 + (1 +1) | xI p => 1 + ((1 + 1) * (gen_phiPOS p)) end. Definition gen_phiZ1 z := match z with | Zpos p => gen_phiPOS1 p | Z0 => 0 | Zneg p => -(gen_phiPOS1 p) end. Definition gen_phiZ z := match z with | Zpos p => gen_phiPOS p | Z0 => 0 | Zneg p => -(gen_phiPOS p) end. Notation "[ x ]" := (gen_phiZ x). Definition get_signZ z := match z with | Zneg p => Some (Zpos p) | _ => None end. Lemma get_signZ_th : sign_theory Z.opp Zeq_bool get_signZ. Proof. constructor. destruct c;intros;try discriminate. injection H;clear H;intros H1;subst c'. simpl. unfold Zeq_bool. rewrite Z.compare_refl. trivial. Qed. Section ALMOST_RING. Variable ARth : almost_ring_theory 0 1 radd rmul rsub ropp req. Add Morphism rsub : rsub_ext3. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac norm := gen_srewrite Rsth Reqe ARth. Ltac add_push := gen_add_push radd Rsth Reqe ARth. Lemma same_gen : forall x, gen_phiPOS1 x == gen_phiPOS x. Proof. induction x;simpl. rewrite IHx;destruct x;simpl;norm. rewrite IHx;destruct x;simpl;norm. rrefl. Qed. Lemma ARgen_phiPOS_Psucc : forall x, gen_phiPOS1 (Pos.succ x) == 1 + (gen_phiPOS1 x). Proof. induction x;simpl;norm. rewrite IHx;norm. add_push 1;rrefl. Qed. Lemma ARgen_phiPOS_add : forall x y, gen_phiPOS1 (x + y) == (gen_phiPOS1 x) + (gen_phiPOS1 y). Proof. induction x;destruct y;simpl;norm. rewrite Pos.add_carry_spec. rewrite ARgen_phiPOS_Psucc. rewrite IHx;norm. add_push (gen_phiPOS1 y);add_push 1;rrefl. rewrite IHx;norm;add_push (gen_phiPOS1 y);rrefl. rewrite ARgen_phiPOS_Psucc;norm;add_push 1;rrefl. rewrite IHx;norm;add_push(gen_phiPOS1 y); add_push 1;rrefl. rewrite IHx;norm;add_push(gen_phiPOS1 y);rrefl. add_push 1;rrefl. rewrite ARgen_phiPOS_Psucc;norm;add_push 1;rrefl. Qed. Lemma ARgen_phiPOS_mult : forall x y, gen_phiPOS1 (x * y) == gen_phiPOS1 x * gen_phiPOS1 y. Proof. induction x;intros;simpl;norm. rewrite ARgen_phiPOS_add;simpl;rewrite IHx;norm. rewrite IHx;rrefl. Qed. End ALMOST_RING. Variable Rth : ring_theory 0 1 radd rmul rsub ropp req. Let ARth := Rth_ARth Rsth Reqe Rth. Add Morphism rsub : rsub_ext4. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac norm := gen_srewrite Rsth Reqe ARth. Ltac add_push := gen_add_push radd Rsth Reqe ARth. (*morphisms are extensionaly equal*) Lemma same_genZ : forall x, [x] == gen_phiZ1 x. Proof. destruct x;simpl; try rewrite (same_gen ARth);rrefl. Qed. Lemma gen_Zeqb_ok : forall x y, Zeq_bool x y = true -> [x] == [y]. Proof. intros x y H. assert (H1 := Zeq_bool_eq x y H);unfold IDphi in H1. rewrite H1;rrefl. Qed. Lemma gen_phiZ1_pos_sub : forall x y, gen_phiZ1 (Z.pos_sub x y) == gen_phiPOS1 x + -gen_phiPOS1 y. Proof. intros x y. rewrite Z.pos_sub_spec. case Pos.compare_spec; intros H; simpl. rewrite H. rewrite (Ropp_def Rth);rrefl. rewrite <- (Pos.sub_add y x H) at 2. rewrite Pos.add_comm. rewrite (ARgen_phiPOS_add ARth);simpl;norm. rewrite (Ropp_def Rth);norm. rewrite <- (Pos.sub_add x y H) at 2. rewrite (ARgen_phiPOS_add ARth);simpl;norm. add_push (gen_phiPOS1 (x-y));rewrite (Ropp_def Rth); norm. Qed. Lemma gen_phiZ_add : forall x y, [x + y] == [x] + [y]. Proof. intros x y; repeat rewrite same_genZ; generalize x y;clear x y. destruct x, y; simpl; norm. apply (ARgen_phiPOS_add ARth). apply gen_phiZ1_pos_sub. rewrite gen_phiZ1_pos_sub. apply (Radd_comm Rth). rewrite (ARgen_phiPOS_add ARth); norm. Qed. Lemma gen_phiZ_mul : forall x y, [x * y] == [x] * [y]. Proof. intros x y;repeat rewrite same_genZ. destruct x;destruct y;simpl;norm; rewrite (ARgen_phiPOS_mult ARth);try (norm;fail). rewrite (Ropp_opp Rsth Reqe Rth);rrefl. Qed. Lemma gen_phiZ_ext : forall x y : Z, x = y -> [x] == [y]. Proof. intros;subst;rrefl. Qed. (*proof that [.] satisfies morphism specifications*) Lemma gen_phiZ_morph : ring_morph 0 1 radd rmul rsub ropp req Z0 (Zpos xH) Z.add Z.mul Z.sub Z.opp Zeq_bool gen_phiZ. Proof. assert ( SRmorph : semi_morph 0 1 radd rmul req Z0 (Zpos xH) Z.add Z.mul Zeq_bool gen_phiZ). apply mkRmorph;simpl;try rrefl. apply gen_phiZ_add. apply gen_phiZ_mul. apply gen_Zeqb_ok. apply (Smorph_morph Rsth Reqe Rth Zth SRmorph gen_phiZ_ext). Qed. End ZMORPHISM. (** N is a semi-ring and a setoid*) Lemma Nsth : Setoid_Theory N (@eq N). Proof (Eqsth N). Lemma Nseqe : sring_eq_ext N.add N.mul (@eq N). Proof (Eq_s_ext N.add N.mul). Lemma Nth : semi_ring_theory 0%N 1%N N.add N.mul (@eq N). Proof. constructor. exact N.add_0_l. exact N.add_comm. exact N.add_assoc. exact N.mul_1_l. exact N.mul_0_l. exact N.mul_comm. exact N.mul_assoc. exact N.mul_add_distr_r. Qed. Definition Nsub := SRsub N.add. Definition Nopp := (@SRopp N). Lemma Neqe : ring_eq_ext N.add N.mul Nopp (@eq N). Proof (SReqe_Reqe Nseqe). Lemma Nath : almost_ring_theory 0%N 1%N N.add N.mul Nsub Nopp (@eq N). Proof (SRth_ARth Nsth Nth). Lemma Neqb_ok : forall x y, N.eqb x y = true -> x = y. Proof. exact (fun x y => proj1 (N.eqb_eq x y)). Qed. (**Same as above : definition of two,extensionaly equal, generic morphisms *) (**from N to any semi-ring*) Section NMORPHISM. Variable R : Type. Variable (rO rI : R) (radd rmul: R->R->R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y). Variable Rsth : Setoid_Theory R req. Add Setoid R req Rsth as R_setoid4. Ltac rrefl := gen_reflexivity Rsth. Variable SReqe : sring_eq_ext radd rmul req. Variable SRth : semi_ring_theory 0 1 radd rmul req. Let ARth := SRth_ARth Rsth SRth. Let Reqe := SReqe_Reqe SReqe. Let ropp := (@SRopp R). Let rsub := (@SRsub R radd). Notation "x - y " := (rsub x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Add Morphism radd : radd_ext4. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext4. exact (Rmul_ext Reqe). Qed. Ltac norm := gen_srewrite_sr Rsth Reqe ARth. Definition gen_phiN1 x := match x with | N0 => 0 | Npos x => gen_phiPOS1 1 radd rmul x end. Definition gen_phiN x := match x with | N0 => 0 | Npos x => gen_phiPOS 1 radd rmul x end. Notation "[ x ]" := (gen_phiN x). Lemma same_genN : forall x, [x] == gen_phiN1 x. Proof. destruct x;simpl. reflexivity. now rewrite (same_gen Rsth Reqe ARth). Qed. Lemma gen_phiN_add : forall x y, [x + y] == [x] + [y]. Proof. intros x y;repeat rewrite same_genN. destruct x;destruct y;simpl;norm. apply (ARgen_phiPOS_add Rsth Reqe ARth). Qed. Lemma gen_phiN_mult : forall x y, [x * y] == [x] * [y]. Proof. intros x y;repeat rewrite same_genN. destruct x;destruct y;simpl;norm. apply (ARgen_phiPOS_mult Rsth Reqe ARth). Qed. Lemma gen_phiN_sub : forall x y, [Nsub x y] == [x] - [y]. Proof. exact gen_phiN_add. Qed. (*gen_phiN satisfies morphism specifications*) Lemma gen_phiN_morph : ring_morph 0 1 radd rmul rsub ropp req 0%N 1%N N.add N.mul Nsub Nopp N.eqb gen_phiN. Proof. constructor; simpl; try reflexivity. apply gen_phiN_add. apply gen_phiN_sub. apply gen_phiN_mult. intros x y EQ. apply N.eqb_eq in EQ. now subst. Qed. End NMORPHISM. (* Words on N : initial structure for almost-rings. *) Definition Nword := list N. Definition NwO : Nword := nil. Definition NwI : Nword := 1%N :: nil. Definition Nwcons n (w : Nword) : Nword := match w, n with | nil, 0%N => nil | _, _ => n :: w end. Fixpoint Nwadd (w1 w2 : Nword) {struct w1} : Nword := match w1, w2 with | n1::w1', n2:: w2' => (n1+n2)%N :: Nwadd w1' w2' | nil, _ => w2 | _, nil => w1 end. Definition Nwopp (w:Nword) : Nword := Nwcons 0%N w. Definition Nwsub w1 w2 := Nwadd w1 (Nwopp w2). Fixpoint Nwscal (n : N) (w : Nword) {struct w} : Nword := match w with | m :: w' => (n*m)%N :: Nwscal n w' | nil => nil end. Fixpoint Nwmul (w1 w2 : Nword) {struct w1} : Nword := match w1 with | 0%N::w1' => Nwopp (Nwmul w1' w2) | n1::w1' => Nwsub (Nwscal n1 w2) (Nwmul w1' w2) | nil => nil end. Fixpoint Nw_is0 (w : Nword) : bool := match w with | nil => true | 0%N :: w' => Nw_is0 w' | _ => false end. Fixpoint Nweq_bool (w1 w2 : Nword) {struct w1} : bool := match w1, w2 with | n1::w1', n2::w2' => if N.eqb n1 n2 then Nweq_bool w1' w2' else false | nil, _ => Nw_is0 w2 | _, nil => Nw_is0 w1 end. Section NWORDMORPHISM. Variable R : Type. Variable (rO rI : R) (radd rmul rsub: R->R->R) (ropp : R -> R). Variable req : R -> R -> Prop. Notation "0" := rO. Notation "1" := rI. Notation "x + y" := (radd x y). Notation "x * y " := (rmul x y). Notation "x - y " := (rsub x y). Notation "- x" := (ropp x). Notation "x == y" := (req x y). Variable Rsth : Setoid_Theory R req. Add Setoid R req Rsth as R_setoid5. Ltac rrefl := gen_reflexivity Rsth. Variable Reqe : ring_eq_ext radd rmul ropp req. Add Morphism radd : radd_ext5. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext5. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext5. exact (Ropp_ext Reqe). Qed. Variable ARth : almost_ring_theory 0 1 radd rmul rsub ropp req. Add Morphism rsub : rsub_ext7. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac norm := gen_srewrite Rsth Reqe ARth. Ltac add_push := gen_add_push radd Rsth Reqe ARth. Fixpoint gen_phiNword (w : Nword) : R := match w with | nil => 0 | n :: nil => gen_phiN rO rI radd rmul n | N0 :: w' => - gen_phiNword w' | n::w' => gen_phiN rO rI radd rmul n - gen_phiNword w' end. Lemma gen_phiNword0_ok : forall w, Nw_is0 w = true -> gen_phiNword w == 0. Proof. induction w; simpl; intros; auto. reflexivity. destruct a. destruct w. reflexivity. rewrite IHw; trivial. apply (ARopp_zero Rsth Reqe ARth). discriminate. Qed. Lemma gen_phiNword_cons : forall w n, gen_phiNword (n::w) == gen_phiN rO rI radd rmul n - gen_phiNword w. induction w. destruct n; simpl; norm. intros. destruct n; norm. Qed. Lemma gen_phiNword_Nwcons : forall w n, gen_phiNword (Nwcons n w) == gen_phiN rO rI radd rmul n - gen_phiNword w. destruct w; intros. destruct n; norm. unfold Nwcons. rewrite gen_phiNword_cons. reflexivity. Qed. Lemma gen_phiNword_ok : forall w1 w2, Nweq_bool w1 w2 = true -> gen_phiNword w1 == gen_phiNword w2. induction w1; intros. simpl. rewrite (gen_phiNword0_ok _ H). reflexivity. rewrite gen_phiNword_cons. destruct w2. simpl in H. destruct a; try discriminate. rewrite (gen_phiNword0_ok _ H). norm. simpl in H. rewrite gen_phiNword_cons. case_eq (N.eqb a n); intros H0. rewrite H0 in H. apply N.eqb_eq in H0. rewrite <- H0. rewrite (IHw1 _ H). reflexivity. rewrite H0 in H; discriminate H. Qed. Lemma Nwadd_ok : forall x y, gen_phiNword (Nwadd x y) == gen_phiNword x + gen_phiNword y. induction x; intros. simpl. norm. destruct y. simpl Nwadd; norm. simpl Nwadd. repeat rewrite gen_phiNword_cons. rewrite (fun sreq => gen_phiN_add Rsth sreq (ARth_SRth ARth)) by (destruct Reqe; constructor; trivial). rewrite IHx. norm. add_push (- gen_phiNword x); reflexivity. Qed. Lemma Nwopp_ok : forall x, gen_phiNword (Nwopp x) == - gen_phiNword x. simpl. unfold Nwopp; simpl. intros. rewrite gen_phiNword_Nwcons; norm. Qed. Lemma Nwscal_ok : forall n x, gen_phiNword (Nwscal n x) == gen_phiN rO rI radd rmul n * gen_phiNword x. induction x; intros. norm. simpl Nwscal. repeat rewrite gen_phiNword_cons. rewrite (fun sreq => gen_phiN_mult Rsth sreq (ARth_SRth ARth)) by (destruct Reqe; constructor; trivial). rewrite IHx. norm. Qed. Lemma Nwmul_ok : forall x y, gen_phiNword (Nwmul x y) == gen_phiNword x * gen_phiNword y. induction x; intros. norm. destruct a. simpl Nwmul. rewrite Nwopp_ok. rewrite IHx. rewrite gen_phiNword_cons. norm. simpl Nwmul. unfold Nwsub. rewrite Nwadd_ok. rewrite Nwscal_ok. rewrite Nwopp_ok. rewrite IHx. rewrite gen_phiNword_cons. norm. Qed. (* Proof that [.] satisfies morphism specifications *) Lemma gen_phiNword_morph : ring_morph 0 1 radd rmul rsub ropp req NwO NwI Nwadd Nwmul Nwsub Nwopp Nweq_bool gen_phiNword. constructor. reflexivity. reflexivity. exact Nwadd_ok. intros. unfold Nwsub. rewrite Nwadd_ok. rewrite Nwopp_ok. norm. exact Nwmul_ok. exact Nwopp_ok. exact gen_phiNword_ok. Qed. End NWORDMORPHISM. Section GEN_DIV. Variables (R : Type) (rO : R) (rI : R) (radd : R -> R -> R) (rmul : R -> R -> R) (rsub : R -> R -> R) (ropp : R -> R) (req : R -> R -> Prop) (C : Type) (cO : C) (cI : C) (cadd : C -> C -> C) (cmul : C -> C -> C) (csub : C -> C -> C) (copp : C -> C) (ceqb : C -> C -> bool) (phi : C -> R). Variable Rsth : Setoid_Theory R req. Variable Reqe : ring_eq_ext radd rmul ropp req. Variable ARth : almost_ring_theory rO rI radd rmul rsub ropp req. Variable morph : ring_morph rO rI radd rmul rsub ropp req cO cI cadd cmul csub copp ceqb phi. (* Useful tactics *) Add Setoid R req Rsth as R_set1. Ltac rrefl := gen_reflexivity Rsth. Add Morphism radd : radd_ext. exact (Radd_ext Reqe). Qed. Add Morphism rmul : rmul_ext. exact (Rmul_ext Reqe). Qed. Add Morphism ropp : ropp_ext. exact (Ropp_ext Reqe). Qed. Add Morphism rsub : rsub_ext. exact (ARsub_ext Rsth Reqe ARth). Qed. Ltac rsimpl := gen_srewrite Rsth Reqe ARth. Definition triv_div x y := if ceqb x y then (cI, cO) else (cO, x). Ltac Esimpl :=repeat (progress ( match goal with | |- context [phi cO] => rewrite (morph0 morph) | |- context [phi cI] => rewrite (morph1 morph) | |- context [phi (cadd ?x ?y)] => rewrite ((morph_add morph) x y) | |- context [phi (cmul ?x ?y)] => rewrite ((morph_mul morph) x y) | |- context [phi (csub ?x ?y)] => rewrite ((morph_sub morph) x y) | |- context [phi (copp ?x)] => rewrite ((morph_opp morph) x) end)). Lemma triv_div_th : Ring_theory.div_theory req cadd cmul phi triv_div. Proof. constructor. intros a b;unfold triv_div. assert (X:= morph.(morph_eq) a b);destruct (ceqb a b). Esimpl. rewrite X; trivial. rsimpl. Esimpl; rsimpl. Qed. Variable zphi : Z -> R. Lemma Ztriv_div_th : div_theory req Z.add Z.mul zphi Z.quotrem. Proof. constructor. intros; generalize (Z.quotrem_eq a b); case Z.quotrem; intros; subst. rewrite Z.mul_comm; rsimpl. Qed. Variable nphi : N -> R. Lemma Ntriv_div_th : div_theory req N.add N.mul nphi N.div_eucl. constructor. intros; generalize (N.div_eucl_spec a b); case N.div_eucl; intros; subst. rewrite N.mul_comm; rsimpl. Qed. End GEN_DIV. (* syntaxification of constants in an abstract ring: the inverse of gen_phiPOS *) Ltac inv_gen_phi_pos rI add mul t := let rec inv_cst t := match t with rI => constr:1%positive | (add rI rI) => constr:2%positive | (add rI (add rI rI)) => constr:3%positive | (mul (add rI rI) ?p) => (* 2p *) match inv_cst p with NotConstant => constr:NotConstant | 1%positive => constr:NotConstant (* 2*1 is not convertible to 2 *) | ?p => constr:(xO p) end | (add rI (mul (add rI rI) ?p)) => (* 1+2p *) match inv_cst p with NotConstant => constr:NotConstant | 1%positive => constr:NotConstant | ?p => constr:(xI p) end | _ => constr:NotConstant end in inv_cst t. (* The (partial) inverse of gen_phiNword *) Ltac inv_gen_phiNword rO rI add mul opp t := match t with rO => constr:NwO | _ => match inv_gen_phi_pos rI add mul t with NotConstant => constr:NotConstant | ?p => constr:(Npos p::nil) end end. (* The inverse of gen_phiN *) Ltac inv_gen_phiN rO rI add mul t := match t with rO => constr:0%N | _ => match inv_gen_phi_pos rI add mul t with NotConstant => constr:NotConstant | ?p => constr:(Npos p) end end. (* The inverse of gen_phiZ *) Ltac inv_gen_phiZ rO rI add mul opp t := match t with rO => constr:0%Z | (opp ?p) => match inv_gen_phi_pos rI add mul p with NotConstant => constr:NotConstant | ?p => constr:(Zneg p) end | _ => match inv_gen_phi_pos rI add mul t with NotConstant => constr:NotConstant | ?p => constr:(Zpos p) end end. (* A simple tactic recognizing only 0 and 1. The inv_gen_phiX above are only optimisations that directly returns the reifid constant instead of resorting to the constant propagation of the simplification algorithm. *) Ltac inv_gen_phi rO rI cO cI t := match t with | rO => cO | rI => cI end. (* A simple tactic recognizing no constant *) Ltac inv_morph_nothing t := constr:NotConstant. Ltac coerce_to_almost_ring set ext rspec := match type of rspec with | ring_theory _ _ _ _ _ _ _ => constr:(Rth_ARth set ext rspec) | semi_ring_theory _ _ _ _ _ => constr:(SRth_ARth set rspec) | almost_ring_theory _ _ _ _ _ _ _ => rspec | _ => fail 1 "not a valid ring theory" end. Ltac coerce_to_ring_ext ext := match type of ext with | ring_eq_ext _ _ _ _ => ext | sring_eq_ext _ _ _ => constr:(SReqe_Reqe ext) | _ => fail 1 "not a valid ring_eq_ext theory" end. Ltac abstract_ring_morphism set ext rspec := match type of rspec with | ring_theory _ _ _ _ _ _ _ => constr:(gen_phiZ_morph set ext rspec) | semi_ring_theory _ _ _ _ _ => constr:(gen_phiN_morph set ext rspec) | almost_ring_theory _ _ _ _ _ _ _ => constr:(gen_phiNword_morph set ext rspec) | _ => fail 1 "bad ring structure" end. Record hypo : Type := mkhypo { hypo_type : Type; hypo_proof : hypo_type }. Ltac gen_ring_pow set arth pspec := match pspec with | None => match type of arth with | @almost_ring_theory ?R ?rO ?rI ?radd ?rmul ?rsub ?ropp ?req => constr:(mkhypo (@pow_N_th R rI rmul req set)) | _ => fail 1 "gen_ring_pow" end | Some ?t => constr:(t) end. Ltac gen_ring_sign morph sspec := match sspec with | None => match type of morph with | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req Z ?c0 ?c1 ?cadd ?cmul ?csub ?copp ?ceqb ?phi => constr:(@mkhypo (sign_theory copp ceqb get_signZ) get_signZ_th) | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req ?C ?c0 ?c1 ?cadd ?cmul ?csub ?copp ?ceqb ?phi => constr:(mkhypo (@get_sign_None_th C copp ceqb)) | _ => fail 2 "ring anomaly : default_sign_spec" end | Some ?t => constr:(t) end. Ltac default_div_spec set reqe arth morph := match type of morph with | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req Z ?c0 ?c1 Z.add Z.mul ?csub ?copp ?ceq_b ?phi => constr:(mkhypo (Ztriv_div_th set phi)) | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req N ?c0 ?c1 N.add N.mul ?csub ?copp ?ceq_b ?phi => constr:(mkhypo (Ntriv_div_th set phi)) | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req ?C ?c0 ?c1 ?cadd ?cmul ?csub ?copp ?ceq_b ?phi => constr:(mkhypo (triv_div_th set reqe arth morph)) | _ => fail 1 "ring anomaly : default_sign_spec" end. Ltac gen_ring_div set reqe arth morph dspec := match dspec with | None => default_div_spec set reqe arth morph | Some ?t => constr:(t) end. Ltac ring_elements set ext rspec pspec sspec dspec rk := let arth := coerce_to_almost_ring set ext rspec in let ext_r := coerce_to_ring_ext ext in let morph := match rk with | Abstract => abstract_ring_morphism set ext rspec | @Computational ?reqb_ok => match type of arth with | almost_ring_theory ?rO ?rI ?add ?mul ?sub ?opp _ => constr:(IDmorph rO rI add mul sub opp set _ reqb_ok) | _ => fail 2 "ring anomaly" end | @Morphism ?m => match type of m with | ring_morph _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ => m | @semi_morph _ _ _ _ _ _ _ _ _ _ _ _ _ => constr:(SRmorph_Rmorph set m) | _ => fail 2 "ring anomaly" end | _ => fail 1 "ill-formed ring kind" end in let p_spec := gen_ring_pow set arth pspec in let s_spec := gen_ring_sign morph sspec in let d_spec := gen_ring_div set ext_r arth morph dspec in fun f => f arth ext_r morph p_spec s_spec d_spec. (* Given a ring structure and the kind of morphism, returns 2 lemmas (one for ring, and one for ring_simplify). *) Ltac ring_lemmas set ext rspec pspec sspec dspec rk := let gen_lemma2 := match pspec with | None => constr:(ring_rw_correct) | Some _ => constr:(ring_rw_pow_correct) end in ring_elements set ext rspec pspec sspec dspec rk ltac:(fun arth ext_r morph p_spec s_spec d_spec => match type of morph with | @ring_morph ?R ?r0 ?rI ?radd ?rmul ?rsub ?ropp ?req ?C ?c0 ?c1 ?cadd ?cmul ?csub ?copp ?ceq_b ?phi => let gen_lemma2_0 := constr:(gen_lemma2 R r0 rI radd rmul rsub ropp req set ext_r arth C c0 c1 cadd cmul csub copp ceq_b phi morph) in match p_spec with | @mkhypo (power_theory _ _ _ ?Cp_phi ?rpow) ?pp_spec => let gen_lemma2_1 := constr:(gen_lemma2_0 _ Cp_phi rpow pp_spec) in match d_spec with | @mkhypo (div_theory _ _ _ _ ?cdiv) ?dd_spec => let gen_lemma2_2 := constr:(gen_lemma2_1 cdiv dd_spec) in match s_spec with | @mkhypo (sign_theory _ _ ?get_sign) ?ss_spec => let lemma2 := constr:(gen_lemma2_2 get_sign ss_spec) in let lemma1 := constr:(ring_correct set ext_r arth morph pp_spec dd_spec) in fun f => f arth ext_r morph lemma1 lemma2 | _ => fail 4 "ring: bad sign specification" end | _ => fail 3 "ring: bad coefficiant division specification" end | _ => fail 2 "ring: bad power specification" end | _ => fail 1 "ring internal error: ring_lemmas, please report" end). (* Tactic for constant *) Ltac isnatcst t := match t with O => constr:true | S ?p => isnatcst p | _ => constr:false end. Ltac isPcst t := match t with | xI ?p => isPcst p | xO ?p => isPcst p | xH => constr:true (* nat -> positive *) | Pos.of_succ_nat ?n => isnatcst n | _ => constr:false end. Ltac isNcst t := match t with N0 => constr:true | Npos ?p => isPcst p | _ => constr:false end. Ltac isZcst t := match t with Z0 => constr:true | Zpos ?p => isPcst p | Zneg ?p => isPcst p (* injection nat -> Z *) | Z.of_nat ?n => isnatcst n (* injection N -> Z *) | Z.of_N ?n => isNcst n (* *) | _ => constr:false end. coq-8.4pl2/plugins/setoid_ring/Field_tac.v0000640000175000001440000004446212010532755017671 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* match t with | (radd ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(FEadd e1 e2) | (rmul ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(FEmul e1 e2) | (rsub ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(FEsub e1 e2) | (ropp ?t1) => fun _ => let e1 := mkP t1 in constr:(FEopp e1) | (rdiv ?t1 ?t2) => fun _ => let e1 := mkP t1 in let e2 := mkP t2 in constr:(FEdiv e1 e2) | (rinv ?t1) => fun _ => let e1 := mkP t1 in constr:(FEinv e1) | (rpow ?t1 ?n) => match CstPow n with | InitialRing.NotConstant => fun _ => let p := Find_at t fv in constr:(@FEX C p) | ?c => fun _ => let e1 := mkP t1 in constr:(FEpow e1 c) end | _ => fun _ => let p := Find_at t fv in constr:(@FEX C p) end | ?c => fun _ => constr:(FEc c) end in f () in mkP t. Ltac FFV Cst CstPow add mul sub opp div inv pow t fv := let rec TFV t fv := match Cst t with | InitialRing.NotConstant => match t with | (add ?t1 ?t2) => TFV t2 ltac:(TFV t1 fv) | (mul ?t1 ?t2) => TFV t2 ltac:(TFV t1 fv) | (sub ?t1 ?t2) => TFV t2 ltac:(TFV t1 fv) | (opp ?t1) => TFV t1 fv | (div ?t1 ?t2) => TFV t2 ltac:(TFV t1 fv) | (inv ?t1) => TFV t1 fv | (pow ?t1 ?n) => match CstPow n with | InitialRing.NotConstant => AddFvTail t fv | _ => TFV t1 fv end | _ => AddFvTail t fv end | _ => fv end in TFV t fv. (* packaging the field structure *) (* TODO: inline PackField into field_lookup *) Ltac PackField F req Cst_tac Pow_tac L1 L2 L3 L4 cond_ok pre post := let FLD := match type of L1 with | context [req (@FEeval ?R ?rO ?radd ?rmul ?rsub ?ropp ?rdiv ?rinv ?C ?phi ?Cpow ?Cp_phi ?rpow _ _) _ ] => (fun proj => proj Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok) | _ => fail 1 "field anomaly: bad correctness lemma (parse)" end in F FLD. Ltac get_FldPre FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => pre). Ltac get_FldPost FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => post). Ltac get_L1 FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => L1). Ltac get_SimplifyEqLemma FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => L2). Ltac get_SimplifyLemma FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => L3). Ltac get_L4 FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => L4). Ltac get_CondLemma FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => cond_ok). Ltac get_FldEq FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => req). Ltac get_FldCarrier FLD := let req := get_FldEq FLD in relation_carrier req. Ltac get_RingFV FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => FV Cst_tac Pow_tac radd rmul rsub ropp rpow). Ltac get_FFV FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => FFV Cst_tac Pow_tac radd rmul rsub ropp rdiv rinv rpow). Ltac get_RingMeta FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => mkPolexpr C Cst_tac Pow_tac radd rmul rsub ropp rpow). Ltac get_Meta FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => mkFieldexpr C Cst_tac Pow_tac radd rmul rsub ropp rdiv rinv rpow). Ltac get_Hyp_tac FLD := FLD ltac: (fun Cst_tac Pow_tac pre post req radd rmul rsub ropp rdiv rinv rpow C L1 L2 L3 L4 cond_ok => let mkPol := mkPolexpr C Cst_tac Pow_tac radd rmul rsub ropp rpow in fun fv lH => mkHyp_tac C req ltac:(fun t => mkPol t fv) lH). Ltac get_FEeval FLD := let L1 := get_L1 FLD in match type of L1 with | context [(@FEeval ?R ?r0 ?add ?mul ?sub ?opp ?div ?inv ?C ?phi ?Cpow ?powphi ?pow _ _)] => constr:(@FEeval R r0 add mul sub opp div inv C phi Cpow powphi pow) | _ => fail 1 "field anomaly: bad correctness lemma (get_FEeval)" end. (* simplifying the non-zero condition... *) Ltac fold_field_cond req := let rec fold_concl t := match t with ?x /\ ?y => let fx := fold_concl x in let fy := fold_concl y in constr:(fx/\fy) | req ?x ?y -> False => constr:(~ req x y) | _ => t end in let ft := fold_concl Get_goal in change ft. Ltac simpl_PCond FLD := let req := get_FldEq FLD in let lemma := get_CondLemma FLD in try apply lemma; protect_fv "field_cond"; fold_field_cond req; try exact I. Ltac simpl_PCond_BEURK FLD := let req := get_FldEq FLD in let lemma := get_CondLemma FLD in apply lemma; protect_fv "field_cond"; fold_field_cond req. (* Rewriting (field_simplify) *) Ltac Field_norm_gen f n FLD lH rl := let mkFV := get_RingFV FLD in let mkFFV := get_FFV FLD in let mkFE := get_Meta FLD in let fv0 := FV_hypo_tac mkFV ltac:(get_FldEq FLD) lH in let lemma_tac fv kont := let lemma := get_SimplifyLemma FLD in (* reify equations of the context *) let lpe := get_Hyp_tac FLD fv lH in let vlpe := fresh "hyps" in pose (vlpe := lpe); let prh := proofHyp_tac lH in (* compute the normal form of the reified hyps *) let vlmp := fresh "hyps'" in let vlmp_eq := fresh "hyps_eq" in let mk_monpol := get_MonPol lemma in compute_assertion vlmp_eq vlmp (mk_monpol vlpe); (* partially instantiate the lemma *) let lem := fresh "f_rw_lemma" in (assert (lem := lemma n vlpe fv prh vlmp vlmp_eq) || fail "type error when building the rewriting lemma"); (* continuation will call main_tac for all reified terms *) kont lem; (* at the end, cleanup *) (clear lem vlmp_eq vlmp vlpe||idtac"Field_norm_gen:cleanup failed") in (* each instance of the lemma is simplified then passed to f *) let main_tac H := protect_fv "field" in H; f H in (* generate and use equations for each expression *) ReflexiveRewriteTactic mkFFV mkFE lemma_tac main_tac fv0 rl; try simpl_PCond FLD. Ltac Field_simplify_gen f FLD lH rl := get_FldPre FLD (); Field_norm_gen f ring_subst_niter FLD lH rl; get_FldPost FLD (). Ltac Field_simplify := Field_simplify_gen ltac:(fun H => rewrite H). Tactic Notation (at level 0) "field_simplify" constr_list(rl) := let G := Get_goal in field_lookup (PackField Field_simplify) [] rl G. Tactic Notation (at level 0) "field_simplify" "[" constr_list(lH) "]" constr_list(rl) := let G := Get_goal in field_lookup (PackField Field_simplify) [lH] rl G. Tactic Notation "field_simplify" constr_list(rl) "in" hyp(H):= let G := Get_goal in let t := type of H in let g := fresh "goal" in set (g:= G); revert H; field_lookup (PackField Field_simplify) [] rl t; intro H; unfold g;clear g. Tactic Notation "field_simplify" "["constr_list(lH) "]" constr_list(rl) "in" hyp(H):= let G := Get_goal in let t := type of H in let g := fresh "goal" in set (g:= G); revert H; field_lookup (PackField Field_simplify) [lH] rl t; intro H; unfold g;clear g. (* Ltac Field_simplify_in hyp:= Field_simplify_gen ltac:(fun H => rewrite H in hyp). Tactic Notation (at level 0) "field_simplify" constr_list(rl) "in" hyp(h) := let t := type of h in field_lookup (Field_simplify_in h) [] rl t. Tactic Notation (at level 0) "field_simplify" "[" constr_list(lH) "]" constr_list(rl) "in" hyp(h) := let t := type of h in field_lookup (Field_simplify_in h) [lH] rl t. *) (** Generic tactic for solving equations *) Ltac Field_Scheme Simpl_tac n lemma FLD lH := let req := get_FldEq FLD in let mkFV := get_RingFV FLD in let mkFFV := get_FFV FLD in let mkFE := get_Meta FLD in let Main_eq t1 t2 := let fv := FV_hypo_tac mkFV req lH in let fv := mkFFV t1 fv in let fv := mkFFV t2 fv in let lpe := get_Hyp_tac FLD fv lH in let prh := proofHyp_tac lH in let vlpe := fresh "list_hyp" in let fe1 := mkFE t1 fv in let fe2 := mkFE t2 fv in pose (vlpe := lpe); let nlemma := fresh "field_lemma" in (assert (nlemma := lemma n fv vlpe fe1 fe2 prh) || fail "field anomaly:failed to build lemma"); ProveLemmaHyps nlemma ltac:(fun ilemma => apply ilemma || fail "field anomaly: failed in applying lemma"; [ Simpl_tac | simpl_PCond FLD]); clear nlemma; subst vlpe in OnEquation req Main_eq. (* solve completely a field equation, leaving non-zero conditions to be proved (field) *) Ltac FIELD FLD lH rl := let Simpl := vm_compute; reflexivity || fail "not a valid field equation" in let lemma := get_L1 FLD in get_FldPre FLD (); Field_Scheme Simpl Ring_tac.ring_subst_niter lemma FLD lH; try exact I; get_FldPost FLD(). Tactic Notation (at level 0) "field" := let G := Get_goal in field_lookup (PackField FIELD) [] G. Tactic Notation (at level 0) "field" "[" constr_list(lH) "]" := let G := Get_goal in field_lookup (PackField FIELD) [lH] G. (* transforms a field equation to an equivalent (simplified) ring equation, and leaves non-zero conditions to be proved (field_simplify_eq) *) Ltac FIELD_SIMPL FLD lH rl := let Simpl := (protect_fv "field") in let lemma := get_SimplifyEqLemma FLD in get_FldPre FLD (); Field_Scheme Simpl Ring_tac.ring_subst_niter lemma FLD lH; get_FldPost FLD (). Tactic Notation (at level 0) "field_simplify_eq" := let G := Get_goal in field_lookup (PackField FIELD_SIMPL) [] G. Tactic Notation (at level 0) "field_simplify_eq" "[" constr_list(lH) "]" := let G := Get_goal in field_lookup (PackField FIELD_SIMPL) [lH] G. (* Same as FIELD_SIMPL but in hypothesis *) Ltac Field_simplify_eq n FLD lH := let req := get_FldEq FLD in let mkFV := get_RingFV FLD in let mkFFV := get_FFV FLD in let mkFE := get_Meta FLD in let lemma := get_L4 FLD in let hyp := fresh "hyp" in intro hyp; OnEquationHyp req hyp ltac:(fun t1 t2 => let fv := FV_hypo_tac mkFV req lH in let fv := mkFFV t1 fv in let fv := mkFFV t2 fv in let lpe := get_Hyp_tac FLD fv lH in let prh := proofHyp_tac lH in let fe1 := mkFE t1 fv in let fe2 := mkFE t2 fv in let vlpe := fresh "vlpe" in ProveLemmaHyps (lemma n fv lpe fe1 fe2 prh) ltac:(fun ilemma => match type of ilemma with | req _ _ -> _ -> ?EQ => let tmp := fresh "tmp" in assert (tmp : EQ); [ apply ilemma; [ exact hyp | simpl_PCond_BEURK FLD] | protect_fv "field" in tmp; revert tmp ]; clear hyp end)). Ltac FIELD_SIMPL_EQ FLD lH rl := get_FldPre FLD (); Field_simplify_eq Ring_tac.ring_subst_niter FLD lH; get_FldPost FLD (). Tactic Notation (at level 0) "field_simplify_eq" "in" hyp(H) := let t := type of H in generalize H; field_lookup (PackField FIELD_SIMPL_EQ) [] t; [ try exact I | clear H;intro H]. Tactic Notation (at level 0) "field_simplify_eq" "[" constr_list(lH) "]" "in" hyp(H) := let t := type of H in generalize H; field_lookup (PackField FIELD_SIMPL_EQ) [lH] t; [ try exact I |clear H;intro H]. (* More generic tactics to build variants of field *) (* This tactic reifies c and pass to F: - the FLD structure gathering all info in the field DB - the atom list - the expression (FExpr) *) Ltac gen_with_field F c := let MetaExpr FLD _ rl := let R := get_FldCarrier FLD in let mkFFV := get_FFV FLD in let mkFE := get_Meta FLD in let csr := match rl with | List.cons ?r _ => r | _ => fail 1 "anomaly: ill-formed list" end in let fv := mkFFV csr (@List.nil R) in let expr := mkFE csr fv in F FLD fv expr in field_lookup (PackField MetaExpr) [] (c=c). (* pushes the equation expr = ope(expr) in the goal, and discharge it with field *) Ltac prove_field_eqn ope FLD fv expr := let res := ope expr in let expr' := fresh "input_expr" in pose (expr' := expr); let res' := fresh "result" in pose (res' := res); let lemma := get_L1 FLD in let lemma := constr:(lemma O fv List.nil expr' res' I List.nil (eq_refl _)) in let ty := type of lemma in let lhs := match ty with forall _, ?lhs=_ -> _ => lhs end in let rhs := match ty with forall _, _=_ -> forall _, ?rhs=_ -> _ => rhs end in let lhs' := fresh "lhs" in let lhs_eq := fresh "lhs_eq" in let rhs' := fresh "rhs" in let rhs_eq := fresh "rhs_eq" in compute_assertion lhs_eq lhs' lhs; compute_assertion rhs_eq rhs' rhs; let H := fresh "fld_eqn" in refine (_ (lemma lhs' lhs_eq rhs' rhs_eq _ _)); (* main goal *) [intro H;protect_fv "field" in H; revert H (* ring-nf(lhs') = ring-nf(rhs') *) | vm_compute; reflexivity || fail "field cannot prove this equality" (* denominator condition *) | simpl_PCond FLD]; clear lhs_eq rhs_eq; subst lhs' rhs'. Ltac prove_with_field ope c := gen_with_field ltac:(prove_field_eqn ope) c. (* Prove an equation x=ope(x) and rewrite with it *) Ltac prove_rw ope x := prove_with_field ope x; [ let H := fresh "Heq_maple" in intro H; rewrite H; clear H |..]. (* Apply ope (FExpr->FExpr) on an expression *) Ltac reduce_field_expr ope kont FLD fv expr := let evfun := get_FEeval FLD in let res := ope expr in let c := (eval simpl_field_expr in (evfun fv res)) in kont c. (* Hack to let a Ltac return a term in the context of a primitive tactic *) Ltac return_term x := generalize (eq_refl x). Ltac get_term := match goal with | |- ?x = _ -> _ => x end. (* Turn an operation on field expressions (FExpr) into a reduction on terms (in the field carrier). Because of field_lookup, the tactic cannot return a term directly, so it is returned via the conclusion of the goal (return_term). *) Ltac reduce_field_ope ope c := gen_with_field ltac:(reduce_field_expr ope return_term) c. (* Adding a new field *) Ltac ring_of_field f := match type of f with | almost_field_theory _ _ _ _ _ _ _ _ _ => constr:(AF_AR f) | field_theory _ _ _ _ _ _ _ _ _ => constr:(F_R f) | semi_field_theory _ _ _ _ _ _ _ => constr:(SF_SR f) end. Ltac coerce_to_almost_field set ext f := match type of f with | almost_field_theory _ _ _ _ _ _ _ _ _ => f | field_theory _ _ _ _ _ _ _ _ _ => constr:(F2AF set ext f) | semi_field_theory _ _ _ _ _ _ _ => constr:(SF2AF set f) end. Ltac field_elements set ext fspec pspec sspec dspec rk := let afth := coerce_to_almost_field set ext fspec in let rspec := ring_of_field fspec in ring_elements set ext rspec pspec sspec dspec rk ltac:(fun arth ext_r morph p_spec s_spec d_spec f => f afth ext_r morph p_spec s_spec d_spec). Ltac field_lemmas set ext inv_m fspec pspec sspec dspec rk := let get_lemma := match pspec with None => fun x y => x | _ => fun x y => y end in let simpl_eq_lemma := get_lemma Field_simplify_eq_correct Field_simplify_eq_pow_correct in let simpl_eq_in_lemma := get_lemma Field_simplify_eq_in_correct Field_simplify_eq_pow_in_correct in let rw_lemma := get_lemma Field_rw_correct Field_rw_pow_correct in field_elements set ext fspec pspec sspec dspec rk ltac:(fun afth ext_r morph p_spec s_spec d_spec => match morph with | _ => let field_ok1 := constr:(Field_correct set ext_r inv_m afth morph) in match p_spec with | mkhypo ?pp_spec => let field_ok2 := constr:(field_ok1 _ _ _ pp_spec) in match s_spec with | mkhypo ?ss_spec => let field_ok3 := constr:(field_ok2 _ ss_spec) in match d_spec with | mkhypo ?dd_spec => let field_ok := constr:(field_ok3 _ dd_spec) in let mk_lemma lemma := constr:(lemma _ _ _ _ _ _ _ _ _ _ set ext_r inv_m afth _ _ _ _ _ _ _ _ _ morph _ _ _ pp_spec _ ss_spec _ dd_spec) in let field_simpl_eq_ok := mk_lemma simpl_eq_lemma in let field_simpl_ok := mk_lemma rw_lemma in let field_simpl_eq_in := mk_lemma simpl_eq_in_lemma in let cond1_ok := constr:(Pcond_simpl_gen set ext_r afth morph pp_spec dd_spec) in let cond2_ok := constr:(Pcond_simpl_complete set ext_r afth morph pp_spec dd_spec) in (fun f => f afth ext_r morph field_ok field_simpl_ok field_simpl_eq_ok field_simpl_eq_in cond1_ok cond2_ok) | _ => fail 4 "field: bad coefficiant division specification" end | _ => fail 3 "field: bad sign specification" end | _ => fail 2 "field: bad power specification" end | _ => fail 1 "field internal error : field_lemmas, please report" end). coq-8.4pl2/plugins/romega/0000750000175000001440000000000012127276542014571 5ustar notinuserscoq-8.4pl2/plugins/romega/vo.itarget0000640000175000001440000000003311307752066016572 0ustar notinusersReflOmegaCore.vo ROmega.vo coq-8.4pl2/plugins/romega/const_omega.ml0000640000175000001440000003031512121620060017402 0ustar notinusers(************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crgut - France Tlcom R&D Licence : LGPL version 2.1 *************************************************************************) let module_refl_name = "ReflOmegaCore" let module_refl_path = ["Coq"; "romega"; module_refl_name] type result = Kvar of string | Kapp of string * Term.constr list | Kimp of Term.constr * Term.constr | Kufo;; let meaningful_submodule = [ "Z"; "N"; "Pos" ] let string_of_global r = let dp = Nametab.dirpath_of_global r in let prefix = match Names.repr_dirpath dp with | [] -> "" | m::_ -> let s = Names.string_of_id m in if List.mem s meaningful_submodule then s^"." else "" in prefix^(Names.string_of_id (Nametab.basename_of_global r)) let destructurate t = let c, args = Term.decompose_app t in match Term.kind_of_term c, args with | Term.Const sp, args -> Kapp (string_of_global (Libnames.ConstRef sp), args) | Term.Construct csp , args -> Kapp (string_of_global (Libnames.ConstructRef csp), args) | Term.Ind isp, args -> Kapp (string_of_global (Libnames.IndRef isp), args) | Term.Var id,[] -> Kvar(Names.string_of_id id) | Term.Prod (Names.Anonymous,typ,body), [] -> Kimp(typ,body) | Term.Prod (Names.Name _,_,_),[] -> Util.error "Omega: Not a quantifier-free goal" | _ -> Kufo exception Destruct let dest_const_apply t = let f,args = Term.decompose_app t in let ref = match Term.kind_of_term f with | Term.Const sp -> Libnames.ConstRef sp | Term.Construct csp -> Libnames.ConstructRef csp | Term.Ind isp -> Libnames.IndRef isp | _ -> raise Destruct in Nametab.basename_of_global ref, args let logic_dir = ["Coq";"Logic";"Decidable"] let coq_modules = Coqlib.init_modules @ [logic_dir] @ Coqlib.arith_modules @ Coqlib.zarith_base_modules @ [["Coq"; "Lists"; "List"]] @ [module_refl_path] @ [module_refl_path@["ZOmega"]] let bin_module = [["Coq";"Numbers";"BinNums"]] let z_module = [["Coq";"ZArith";"BinInt"]] let init_constant = Coqlib.gen_constant_in_modules "Omega" Coqlib.init_modules let constant = Coqlib.gen_constant_in_modules "Omega" coq_modules let z_constant = Coqlib.gen_constant_in_modules "Omega" z_module let bin_constant = Coqlib.gen_constant_in_modules "Omega" bin_module (* Logic *) let coq_eq = lazy(init_constant "eq") let coq_refl_equal = lazy(init_constant "eq_refl") let coq_and = lazy(init_constant "and") let coq_not = lazy(init_constant "not") let coq_or = lazy(init_constant "or") let coq_True = lazy(init_constant "True") let coq_False = lazy(init_constant "False") let coq_I = lazy(init_constant "I") (* ReflOmegaCore/ZOmega *) let coq_h_step = lazy (constant "h_step") let coq_pair_step = lazy (constant "pair_step") let coq_p_left = lazy (constant "P_LEFT") let coq_p_right = lazy (constant "P_RIGHT") let coq_p_invert = lazy (constant "P_INVERT") let coq_p_step = lazy (constant "P_STEP") let coq_t_int = lazy (constant "Tint") let coq_t_plus = lazy (constant "Tplus") let coq_t_mult = lazy (constant "Tmult") let coq_t_opp = lazy (constant "Topp") let coq_t_minus = lazy (constant "Tminus") let coq_t_var = lazy (constant "Tvar") let coq_proposition = lazy (constant "proposition") let coq_p_eq = lazy (constant "EqTerm") let coq_p_leq = lazy (constant "LeqTerm") let coq_p_geq = lazy (constant "GeqTerm") let coq_p_lt = lazy (constant "LtTerm") let coq_p_gt = lazy (constant "GtTerm") let coq_p_neq = lazy (constant "NeqTerm") let coq_p_true = lazy (constant "TrueTerm") let coq_p_false = lazy (constant "FalseTerm") let coq_p_not = lazy (constant "Tnot") let coq_p_or = lazy (constant "Tor") let coq_p_and = lazy (constant "Tand") let coq_p_imp = lazy (constant "Timp") let coq_p_prop = lazy (constant "Tprop") (* Constructors for shuffle tactic *) let coq_t_fusion = lazy (constant "t_fusion") let coq_f_equal = lazy (constant "F_equal") let coq_f_cancel = lazy (constant "F_cancel") let coq_f_left = lazy (constant "F_left") let coq_f_right = lazy (constant "F_right") (* Constructors for reordering tactics *) let coq_c_do_both = lazy (constant "C_DO_BOTH") let coq_c_do_left = lazy (constant "C_LEFT") let coq_c_do_right = lazy (constant "C_RIGHT") let coq_c_do_seq = lazy (constant "C_SEQ") let coq_c_nop = lazy (constant "C_NOP") let coq_c_opp_plus = lazy (constant "C_OPP_PLUS") let coq_c_opp_opp = lazy (constant "C_OPP_OPP") let coq_c_opp_mult_r = lazy (constant "C_OPP_MULT_R") let coq_c_opp_one = lazy (constant "C_OPP_ONE") let coq_c_reduce = lazy (constant "C_REDUCE") let coq_c_mult_plus_distr = lazy (constant "C_MULT_PLUS_DISTR") let coq_c_opp_left = lazy (constant "C_MULT_OPP_LEFT") let coq_c_mult_assoc_r = lazy (constant "C_MULT_ASSOC_R") let coq_c_plus_assoc_r = lazy (constant "C_PLUS_ASSOC_R") let coq_c_plus_assoc_l = lazy (constant "C_PLUS_ASSOC_L") let coq_c_plus_permute = lazy (constant "C_PLUS_PERMUTE") let coq_c_plus_comm = lazy (constant "C_PLUS_COMM") let coq_c_red0 = lazy (constant "C_RED0") let coq_c_red1 = lazy (constant "C_RED1") let coq_c_red2 = lazy (constant "C_RED2") let coq_c_red3 = lazy (constant "C_RED3") let coq_c_red4 = lazy (constant "C_RED4") let coq_c_red5 = lazy (constant "C_RED5") let coq_c_red6 = lazy (constant "C_RED6") let coq_c_mult_opp_left = lazy (constant "C_MULT_OPP_LEFT") let coq_c_mult_assoc_reduced = lazy (constant "C_MULT_ASSOC_REDUCED") let coq_c_minus = lazy (constant "C_MINUS") let coq_c_mult_comm = lazy (constant "C_MULT_COMM") let coq_s_constant_not_nul = lazy (constant "O_CONSTANT_NOT_NUL") let coq_s_constant_neg = lazy (constant "O_CONSTANT_NEG") let coq_s_div_approx = lazy (constant "O_DIV_APPROX") let coq_s_not_exact_divide = lazy (constant "O_NOT_EXACT_DIVIDE") let coq_s_exact_divide = lazy (constant "O_EXACT_DIVIDE") let coq_s_sum = lazy (constant "O_SUM") let coq_s_state = lazy (constant "O_STATE") let coq_s_contradiction = lazy (constant "O_CONTRADICTION") let coq_s_merge_eq = lazy (constant "O_MERGE_EQ") let coq_s_split_ineq =lazy (constant "O_SPLIT_INEQ") let coq_s_constant_nul =lazy (constant "O_CONSTANT_NUL") let coq_s_negate_contradict =lazy (constant "O_NEGATE_CONTRADICT") let coq_s_negate_contradict_inv =lazy (constant "O_NEGATE_CONTRADICT_INV") (* construction for the [extract_hyp] tactic *) let coq_direction = lazy (constant "direction") let coq_d_left = lazy (constant "D_left") let coq_d_right = lazy (constant "D_right") let coq_d_mono = lazy (constant "D_mono") let coq_e_split = lazy (constant "E_SPLIT") let coq_e_extract = lazy (constant "E_EXTRACT") let coq_e_solve = lazy (constant "E_SOLVE") let coq_interp_sequent = lazy (constant "interp_goal_concl") let coq_do_omega = lazy (constant "do_omega") (* \subsection{Construction d'expressions} *) let do_left t = if Term.eq_constr t (Lazy.force coq_c_nop) then Lazy.force coq_c_nop else Term.mkApp (Lazy.force coq_c_do_left, [|t |] ) let do_right t = if Term.eq_constr t (Lazy.force coq_c_nop) then Lazy.force coq_c_nop else Term.mkApp (Lazy.force coq_c_do_right, [|t |]) let do_both t1 t2 = if Term.eq_constr t1 (Lazy.force coq_c_nop) then do_right t2 else if Term.eq_constr t2 (Lazy.force coq_c_nop) then do_left t1 else Term.mkApp (Lazy.force coq_c_do_both , [|t1; t2 |]) let do_seq t1 t2 = if Term.eq_constr t1 (Lazy.force coq_c_nop) then t2 else if Term.eq_constr t2 (Lazy.force coq_c_nop) then t1 else Term.mkApp (Lazy.force coq_c_do_seq, [|t1; t2 |]) let rec do_list = function | [] -> Lazy.force coq_c_nop | [x] -> x | (x::l) -> do_seq x (do_list l) (* Nat *) let coq_S = lazy(init_constant "S") let coq_O = lazy(init_constant "O") let rec mk_nat = function | 0 -> Lazy.force coq_O | n -> Term.mkApp (Lazy.force coq_S, [| mk_nat (n-1) |]) (* Lists *) let coq_cons = lazy (constant "cons") let coq_nil = lazy (constant "nil") let mk_list typ l = let rec loop = function | [] -> Term.mkApp (Lazy.force coq_nil, [|typ|]) | (step :: l) -> Term.mkApp (Lazy.force coq_cons, [|typ; step; loop l |]) in loop l let mk_plist l = mk_list Term.mkProp l let mk_shuffle_list l = mk_list (Lazy.force coq_t_fusion) l type parse_term = | Tplus of Term.constr * Term.constr | Tmult of Term.constr * Term.constr | Tminus of Term.constr * Term.constr | Topp of Term.constr | Tsucc of Term.constr | Tnum of Bigint.bigint | Tother type parse_rel = | Req of Term.constr * Term.constr | Rne of Term.constr * Term.constr | Rlt of Term.constr * Term.constr | Rle of Term.constr * Term.constr | Rgt of Term.constr * Term.constr | Rge of Term.constr * Term.constr | Rtrue | Rfalse | Rnot of Term.constr | Ror of Term.constr * Term.constr | Rand of Term.constr * Term.constr | Rimp of Term.constr * Term.constr | Riff of Term.constr * Term.constr | Rother let parse_logic_rel c = try match destructurate c with | Kapp("True",[]) -> Rtrue | Kapp("False",[]) -> Rfalse | Kapp("not",[t]) -> Rnot t | Kapp("or",[t1;t2]) -> Ror (t1,t2) | Kapp("and",[t1;t2]) -> Rand (t1,t2) | Kimp(t1,t2) -> Rimp (t1,t2) | Kapp("iff",[t1;t2]) -> Riff (t1,t2) | _ -> Rother with e when Logic.catchable_exception e -> Rother module type Int = sig val typ : Term.constr Lazy.t val plus : Term.constr Lazy.t val mult : Term.constr Lazy.t val opp : Term.constr Lazy.t val minus : Term.constr Lazy.t val mk : Bigint.bigint -> Term.constr val parse_term : Term.constr -> parse_term val parse_rel : Proof_type.goal Tacmach.sigma -> Term.constr -> parse_rel (* check whether t is built only with numbers and + * - *) val is_scalar : Term.constr -> bool end module Z : Int = struct let typ = lazy (bin_constant "Z") let plus = lazy (z_constant "Z.add") let mult = lazy (z_constant "Z.mul") let opp = lazy (z_constant "Z.opp") let minus = lazy (z_constant "Z.sub") let coq_xH = lazy (bin_constant "xH") let coq_xO = lazy (bin_constant "xO") let coq_xI = lazy (bin_constant "xI") let coq_Z0 = lazy (bin_constant "Z0") let coq_Zpos = lazy (bin_constant "Zpos") let coq_Zneg = lazy (bin_constant "Zneg") let recognize t = let rec loop t = let f,l = dest_const_apply t in match Names.string_of_id f,l with "xI",[t] -> Bigint.add Bigint.one (Bigint.mult Bigint.two (loop t)) | "xO",[t] -> Bigint.mult Bigint.two (loop t) | "xH",[] -> Bigint.one | _ -> failwith "not a number" in let f,l = dest_const_apply t in match Names.string_of_id f,l with "Zpos",[t] -> loop t | "Zneg",[t] -> Bigint.neg (loop t) | "Z0",[] -> Bigint.zero | _ -> failwith "not a number";; let rec mk_positive n = if n=Bigint.one then Lazy.force coq_xH else let (q,r) = Bigint.euclid n Bigint.two in Term.mkApp ((if r = Bigint.zero then Lazy.force coq_xO else Lazy.force coq_xI), [| mk_positive q |]) let mk_Z n = if n = Bigint.zero then Lazy.force coq_Z0 else if Bigint.is_strictly_pos n then Term.mkApp (Lazy.force coq_Zpos, [| mk_positive n |]) else Term.mkApp (Lazy.force coq_Zneg, [| mk_positive (Bigint.neg n) |]) let mk = mk_Z let parse_term t = try match destructurate t with | Kapp("Z.add",[t1;t2]) -> Tplus (t1,t2) | Kapp("Z.sub",[t1;t2]) -> Tminus (t1,t2) | Kapp("Z.mul",[t1;t2]) -> Tmult (t1,t2) | Kapp("Z.opp",[t]) -> Topp t | Kapp("Z.succ",[t]) -> Tsucc t | Kapp("Z.pred",[t]) -> Tplus(t, mk_Z (Bigint.neg Bigint.one)) | Kapp(("Zpos"|"Zneg"|"Z0"),_) -> (try Tnum (recognize t) with e when Errors.noncritical e -> Tother) | _ -> Tother with e when Logic.catchable_exception e -> Tother let parse_rel gl t = try match destructurate t with | Kapp("eq",[typ;t1;t2]) when destructurate (Tacmach.pf_nf gl typ) = Kapp("Z",[]) -> Req (t1,t2) | Kapp("Zne",[t1;t2]) -> Rne (t1,t2) | Kapp("Z.le",[t1;t2]) -> Rle (t1,t2) | Kapp("Z.lt",[t1;t2]) -> Rlt (t1,t2) | Kapp("Z.ge",[t1;t2]) -> Rge (t1,t2) | Kapp("Z.gt",[t1;t2]) -> Rgt (t1,t2) | _ -> parse_logic_rel t with e when Logic.catchable_exception e -> Rother let is_scalar t = let rec aux t = match destructurate t with | Kapp(("Z.add"|"Z.sub"|"Z.mul"),[t1;t2]) -> aux t1 & aux t2 | Kapp(("Z.opp"|"Z.succ"|"Z.pred"),[t]) -> aux t | Kapp(("Zpos"|"Zneg"|"Z0"),_) -> let _ = recognize t in true | _ -> false in try aux t with e when Errors.noncritical e -> false end coq-8.4pl2/plugins/romega/romega_plugin.mllib0000640000175000001440000000006211161000644020422 0ustar notinusersConst_omega Refl_omega G_romega Romega_plugin_mod coq-8.4pl2/plugins/romega/refl_omega.ml0000640000175000001440000013574712121620060017223 0ustar notinusers(************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crgut - France Tlcom R&D Licence : LGPL version 2.1 *************************************************************************) open Util open Const_omega module OmegaSolver = Omega.MakeOmegaSolver (Bigint) open OmegaSolver (* \section{Useful functions and flags} *) (* Especially useful debugging functions *) let debug = ref false let show_goal gl = if !debug then Pp.ppnl (Tacmach.pr_gls gl); Tacticals.tclIDTAC gl let pp i = print_int i; print_newline (); flush stdout (* More readable than the prefix notation *) let (>>) = Tacticals.tclTHEN let mkApp = Term.mkApp (* \section{Types} \subsection{How to walk in a term} To represent how to get to a proposition. Only choice points are kept (branch to choose in a disjunction and identifier of the disjunctive connector) *) type direction = Left of int | Right of int (* Step to find a proposition (operators are at most binary). A list is a path *) type occ_step = O_left | O_right | O_mono type occ_path = occ_step list (* chemin identifiant une proposition sous forme du nom de l'hypothse et d'une liste de pas partir de la racine de l'hypothse *) type occurence = {o_hyp : Names.identifier; o_path : occ_path} (* \subsection{refiable formulas} *) type oformula = (* integer *) | Oint of Bigint.bigint (* recognized binary and unary operations *) | Oplus of oformula * oformula | Omult of oformula * oformula | Ominus of oformula * oformula | Oopp of oformula (* an atome in the environment *) | Oatom of int (* weird expression that cannot be translated *) | Oufo of oformula (* Operators for comparison recognized by Omega *) type comparaison = Eq | Leq | Geq | Gt | Lt | Neq (* Type des prdicats rifis (fragment de calcul propositionnel. Les * quantifications sont externes au langage) *) type oproposition = Pequa of Term.constr * oequation | Ptrue | Pfalse | Pnot of oproposition | Por of int * oproposition * oproposition | Pand of int * oproposition * oproposition | Pimp of int * oproposition * oproposition | Pprop of Term.constr (* Les quations ou proposiitions atomiques utiles du calcul *) and oequation = { e_comp: comparaison; (* comparaison *) e_left: oformula; (* formule brute gauche *) e_right: oformula; (* formule brute droite *) e_trace: Term.constr; (* tactique de normalisation *) e_origin: occurence; (* l'hypothse dont vient le terme *) e_negated: bool; (* vrai si apparait en position ni aprs normalisation *) e_depends: direction list; (* liste des points de disjonction dont dpend l'accs l'quation avec la direction (branche) pour y accder *) e_omega: afine (* la fonction normalise *) } (* \subsection{Proof context} This environment codes \begin{itemize} \item the terms and propositions that are given as parameters of the reified proof (and are represented as variables in the reified goals) \item translation functions linking the decision procedure and the Coq proof \end{itemize} *) type environment = { (* La liste des termes non reifies constituant l'environnement global *) mutable terms : Term.constr list; (* La meme chose pour les propositions *) mutable props : Term.constr list; (* Les variables introduites par omega *) mutable om_vars : (oformula * int) list; (* Traduction des indices utiliss ici en les indices finaux utiliss par * la tactique Omega aprs dnombrement des variables utiles *) real_indices : (int,int) Hashtbl.t; mutable cnt_connectors : int; equations : (int,oequation) Hashtbl.t; constructors : (int, occurence) Hashtbl.t } (* \subsection{Solution tree} Dfinition d'une solution trouve par Omega sous la forme d'un identifiant, d'un ensemble d'quation dont dpend la solution et d'une trace *) (* La liste des dpendances est trie et sans redondance *) type solution = { s_index : int; s_equa_deps : int list; s_trace : action list } (* Arbre de solution rsolvant compltement un ensemble de systmes *) type solution_tree = Leaf of solution (* un noeud interne reprsente un point de branchement correspondant l'limination d'un connecteur gnrant plusieurs buts (typ. disjonction). Le premier argument est l'identifiant du connecteur *) | Tree of int * solution_tree * solution_tree (* Reprsentation de l'environnement extrait du but initial sous forme de chemins pour extraire des equations ou d'hypothses *) type context_content = CCHyp of occurence | CCEqua of int (* \section{Specific utility functions to handle base types} *) (* Nom arbitraire de l'hypothse codant la ngation du but final *) let id_concl = Names.id_of_string "__goal__" (* Initialisation de l'environnement de rification de la tactique *) let new_environment () = { terms = []; props = []; om_vars = []; cnt_connectors = 0; real_indices = Hashtbl.create 7; equations = Hashtbl.create 7; constructors = Hashtbl.create 7; } (* Gnration d'un nom d'quation *) let new_connector_id env = env.cnt_connectors <- succ env.cnt_connectors; env.cnt_connectors (* Calcul de la branche complmentaire *) let barre = function Left x -> Right x | Right x -> Left x (* Identifiant associ une branche *) let indice = function Left x | Right x -> x (* Affichage de l'environnement de rification (termes et propositions) *) let print_env_reification env = let rec loop c i = function [] -> Printf.printf " ===============================\n\n" | t :: l -> Printf.printf " (%c%02d) := " c i; Pp.ppnl (Printer.pr_lconstr t); Pp.flush_all (); loop c (succ i) l in print_newline (); Printf.printf " ENVIRONMENT OF PROPOSITIONS :\n\n"; loop 'P' 0 env.props; Printf.printf " ENVIRONMENT OF TERMS :\n\n"; loop 'V' 0 env.terms (* \subsection{Gestion des environnements de variable pour Omega} *) (* generation d'identifiant d'equation pour Omega *) let new_omega_eq, rst_omega_eq = let cpt = ref 0 in (function () -> incr cpt; !cpt), (function () -> cpt:=0) (* generation d'identifiant de variable pour Omega *) let new_omega_var, rst_omega_var = let cpt = ref 0 in (function () -> incr cpt; !cpt), (function () -> cpt:=0) (* Affichage des variables d'un systme *) let display_omega_var i = Printf.sprintf "OV%d" i (* Recherche la variable codant un terme pour Omega et cre la variable dans l'environnement si il n'existe pas. Cas ou la variable dans Omega reprsente le terme d'un monome (le plus souvent un atome) *) let intern_omega env t = begin try List.assoc t env.om_vars with Not_found -> let v = new_omega_var () in env.om_vars <- (t,v) :: env.om_vars; v end (* Ajout forc d'un lien entre un terme et une variable Cas o la variable est cre par Omega et o il faut la lier aprs coup un atome rifi introduit de force *) let intern_omega_force env t v = env.om_vars <- (t,v) :: env.om_vars (* Rcupre le terme associ une variable *) let unintern_omega env id = let rec loop = function [] -> failwith "unintern" | ((t,j)::l) -> if id = j then t else loop l in loop env.om_vars (* \subsection{Gestion des environnements de variable pour la rflexion} Gestion des environnements de traduction entre termes des constructions non rifis et variables des termes reifies. Attention il s'agit de l'environnement initial contenant tout. Il faudra le rduire aprs calcul des variables utiles. *) let add_reified_atom t env = try list_index0_f Term.eq_constr t env.terms with Not_found -> let i = List.length env.terms in env.terms <- env.terms @ [t]; i let get_reified_atom env = try List.nth env.terms with e when Errors.noncritical e -> failwith "get_reified_atom" (* \subsection{Gestion de l'environnement de proposition pour Omega} *) (* ajout d'une proposition *) let add_prop env t = try list_index0_f Term.eq_constr t env.props with Not_found -> let i = List.length env.props in env.props <- env.props @ [t]; i (* accs a une proposition *) let get_prop v env = try List.nth v env with e when Errors.noncritical e -> failwith "get_prop" (* \subsection{Gestion du nommage des quations} *) (* Ajout d'une equation dans l'environnement de reification *) let add_equation env e = let id = e.e_omega.id in try let _ = Hashtbl.find env.equations id in () with Not_found -> Hashtbl.add env.equations id e (* accs a une equation *) let get_equation env id = try Hashtbl.find env.equations id with Not_found as e -> Printf.printf "Omega Equation %d non trouve\n" id; raise e (* Affichage des termes rifis *) let rec oprint ch = function | Oint n -> Printf.fprintf ch "%s" (Bigint.to_string n) | Oplus (t1,t2) -> Printf.fprintf ch "(%a + %a)" oprint t1 oprint t2 | Omult (t1,t2) -> Printf.fprintf ch "(%a * %a)" oprint t1 oprint t2 | Ominus(t1,t2) -> Printf.fprintf ch "(%a - %a)" oprint t1 oprint t2 | Oopp t1 ->Printf.fprintf ch "~ %a" oprint t1 | Oatom n -> Printf.fprintf ch "V%02d" n | Oufo x -> Printf.fprintf ch "?" let rec pprint ch = function Pequa (_,{ e_comp=comp; e_left=t1; e_right=t2 }) -> let connector = match comp with Eq -> "=" | Leq -> "<=" | Geq -> ">=" | Gt -> ">" | Lt -> "<" | Neq -> "!=" in Printf.fprintf ch "%a %s %a" oprint t1 connector oprint t2 | Ptrue -> Printf.fprintf ch "TT" | Pfalse -> Printf.fprintf ch "FF" | Pnot t -> Printf.fprintf ch "not(%a)" pprint t | Por (_,t1,t2) -> Printf.fprintf ch "(%a or %a)" pprint t1 pprint t2 | Pand(_,t1,t2) -> Printf.fprintf ch "(%a and %a)" pprint t1 pprint t2 | Pimp(_,t1,t2) -> Printf.fprintf ch "(%a => %a)" pprint t1 pprint t2 | Pprop c -> Printf.fprintf ch "Prop" let rec weight env = function | Oint _ -> -1 | Oopp c -> weight env c | Omult(c,_) -> weight env c | Oplus _ -> failwith "weight" | Ominus _ -> failwith "weight minus" | Oufo _ -> -1 | Oatom _ as c -> (intern_omega env c) (* \section{Passage entre oformules et reprsentation interne de Omega} *) (* \subsection{Oformula vers Omega} *) let omega_of_oformula env kind = let rec loop accu = function | Oplus(Omult(v,Oint n),r) -> loop ({v=intern_omega env v; c=n} :: accu) r | Oint n -> let id = new_omega_eq () in (*i tag_equation name id; i*) {kind = kind; body = List.rev accu; constant = n; id = id} | t -> print_string "CO"; oprint stdout t; failwith "compile_equation" in loop [] (* \subsection{Omega vers Oformula} *) let rec oformula_of_omega env af = let rec loop = function | ({v=v; c=n}::r) -> Oplus(Omult(unintern_omega env v,Oint n),loop r) | [] -> Oint af.constant in loop af.body let app f v = mkApp(Lazy.force f,v) (* \subsection{Oformula vers COQ reel} *) let rec coq_of_formula env t = let rec loop = function | Oplus (t1,t2) -> app Z.plus [| loop t1; loop t2 |] | Oopp t -> app Z.opp [| loop t |] | Omult(t1,t2) -> app Z.mult [| loop t1; loop t2 |] | Oint v -> Z.mk v | Oufo t -> loop t | Oatom var -> (* attention ne traite pas les nouvelles variables si on ne les * met pas dans env.term *) get_reified_atom env var | Ominus(t1,t2) -> app Z.minus [| loop t1; loop t2 |] in loop t (* \subsection{Oformula vers COQ reifi} *) let reified_of_atom env i = try Hashtbl.find env.real_indices i with Not_found -> Printf.printf "Atome %d non trouv\n" i; Hashtbl.iter (fun k v -> Printf.printf "%d -> %d\n" k v) env.real_indices; raise Not_found let rec reified_of_formula env = function | Oplus (t1,t2) -> app coq_t_plus [| reified_of_formula env t1; reified_of_formula env t2 |] | Oopp t -> app coq_t_opp [| reified_of_formula env t |] | Omult(t1,t2) -> app coq_t_mult [| reified_of_formula env t1; reified_of_formula env t2 |] | Oint v -> app coq_t_int [| Z.mk v |] | Oufo t -> reified_of_formula env t | Oatom i -> app coq_t_var [| mk_nat (reified_of_atom env i) |] | Ominus(t1,t2) -> app coq_t_minus [| reified_of_formula env t1; reified_of_formula env t2 |] let reified_of_formula env f = try reified_of_formula env f with reraise -> oprint stderr f; raise reraise let rec reified_of_proposition env = function Pequa (_,{ e_comp=Eq; e_left=t1; e_right=t2 }) -> app coq_p_eq [| reified_of_formula env t1; reified_of_formula env t2 |] | Pequa (_,{ e_comp=Leq; e_left=t1; e_right=t2 }) -> app coq_p_leq [| reified_of_formula env t1; reified_of_formula env t2 |] | Pequa(_,{ e_comp=Geq; e_left=t1; e_right=t2 }) -> app coq_p_geq [| reified_of_formula env t1; reified_of_formula env t2 |] | Pequa(_,{ e_comp=Gt; e_left=t1; e_right=t2 }) -> app coq_p_gt [| reified_of_formula env t1; reified_of_formula env t2 |] | Pequa(_,{ e_comp=Lt; e_left=t1; e_right=t2 }) -> app coq_p_lt [| reified_of_formula env t1; reified_of_formula env t2 |] | Pequa(_,{ e_comp=Neq; e_left=t1; e_right=t2 }) -> app coq_p_neq [| reified_of_formula env t1; reified_of_formula env t2 |] | Ptrue -> Lazy.force coq_p_true | Pfalse -> Lazy.force coq_p_false | Pnot t -> app coq_p_not [| reified_of_proposition env t |] | Por (_,t1,t2) -> app coq_p_or [| reified_of_proposition env t1; reified_of_proposition env t2 |] | Pand(_,t1,t2) -> app coq_p_and [| reified_of_proposition env t1; reified_of_proposition env t2 |] | Pimp(_,t1,t2) -> app coq_p_imp [| reified_of_proposition env t1; reified_of_proposition env t2 |] | Pprop t -> app coq_p_prop [| mk_nat (add_prop env t) |] let reified_of_proposition env f = try reified_of_proposition env f with reraise -> pprint stderr f; raise reraise (* \subsection{Omega vers COQ rifi} *) let reified_of_omega env body constant = let coeff_constant = app coq_t_int [| Z.mk constant |] in let mk_coeff {c=c; v=v} t = let coef = app coq_t_mult [| reified_of_formula env (unintern_omega env v); app coq_t_int [| Z.mk c |] |] in app coq_t_plus [|coef; t |] in List.fold_right mk_coeff body coeff_constant let reified_of_omega env body c = try reified_of_omega env body c with reraise -> display_eq display_omega_var (body,c); raise reraise (* \section{Oprations sur les quations} Ces fonctions prparent les traces utilises par la tactique rflchie pour faire des oprations de normalisation sur les quations. *) (* \subsection{Extractions des variables d'une quation} *) (* Extraction des variables d'une quation. *) (* Chaque fonction retourne une liste trie sans redondance *) let (@@) = list_merge_uniq compare let rec vars_of_formula = function | Oint _ -> [] | Oplus (e1,e2) -> (vars_of_formula e1) @@ (vars_of_formula e2) | Omult (e1,e2) -> (vars_of_formula e1) @@ (vars_of_formula e2) | Ominus (e1,e2) -> (vars_of_formula e1) @@ (vars_of_formula e2) | Oopp e -> vars_of_formula e | Oatom i -> [i] | Oufo _ -> [] let rec vars_of_equations = function | [] -> [] | e::l -> (vars_of_formula e.e_left) @@ (vars_of_formula e.e_right) @@ (vars_of_equations l) let rec vars_of_prop = function | Pequa(_,e) -> vars_of_equations [e] | Pnot p -> vars_of_prop p | Por(_,p1,p2) -> (vars_of_prop p1) @@ (vars_of_prop p2) | Pand(_,p1,p2) -> (vars_of_prop p1) @@ (vars_of_prop p2) | Pimp(_,p1,p2) -> (vars_of_prop p1) @@ (vars_of_prop p2) | Pprop _ | Ptrue | Pfalse -> [] (* \subsection{Multiplication par un scalaire} *) let rec scalar n = function Oplus(t1,t2) -> let tac1,t1' = scalar n t1 and tac2,t2' = scalar n t2 in do_list [Lazy.force coq_c_mult_plus_distr; do_both tac1 tac2], Oplus(t1',t2') | Oopp t -> do_list [Lazy.force coq_c_mult_opp_left], Omult(t,Oint(Bigint.neg n)) | Omult(t1,Oint x) -> do_list [Lazy.force coq_c_mult_assoc_reduced], Omult(t1,Oint (n*x)) | Omult(t1,t2) -> Util.error "Omega: Can't solve a goal with non-linear products" | (Oatom _ as t) -> do_list [], Omult(t,Oint n) | Oint i -> do_list [Lazy.force coq_c_reduce],Oint(n*i) | (Oufo _ as t)-> do_list [], Oufo (Omult(t,Oint n)) | Ominus _ -> failwith "scalar minus" (* \subsection{Propagation de l'inversion} *) let rec negate = function Oplus(t1,t2) -> let tac1,t1' = negate t1 and tac2,t2' = negate t2 in do_list [Lazy.force coq_c_opp_plus ; (do_both tac1 tac2)], Oplus(t1',t2') | Oopp t -> do_list [Lazy.force coq_c_opp_opp], t | Omult(t1,Oint x) -> do_list [Lazy.force coq_c_opp_mult_r], Omult(t1,Oint (Bigint.neg x)) | Omult(t1,t2) -> Util.error "Omega: Can't solve a goal with non-linear products" | (Oatom _ as t) -> do_list [Lazy.force coq_c_opp_one], Omult(t,Oint(negone)) | Oint i -> do_list [Lazy.force coq_c_reduce] ,Oint(Bigint.neg i) | Oufo c -> do_list [], Oufo (Oopp c) | Ominus _ -> failwith "negate minus" let rec norm l = (List.length l) (* \subsection{Mlange (fusion) de deux quations} *) (* \subsubsection{Version avec coefficients} *) let rec shuffle_path k1 e1 k2 e2 = let rec loop = function (({c=c1;v=v1}::l1) as l1'), (({c=c2;v=v2}::l2) as l2') -> if v1 = v2 then if k1*c1 + k2 * c2 = zero then ( Lazy.force coq_f_cancel :: loop (l1,l2)) else ( Lazy.force coq_f_equal :: loop (l1,l2) ) else if v1 > v2 then ( Lazy.force coq_f_left :: loop(l1,l2')) else ( Lazy.force coq_f_right :: loop(l1',l2)) | ({c=c1;v=v1}::l1), [] -> Lazy.force coq_f_left :: loop(l1,[]) | [],({c=c2;v=v2}::l2) -> Lazy.force coq_f_right :: loop([],l2) | [],[] -> flush stdout; [] in mk_shuffle_list (loop (e1,e2)) (* \subsubsection{Version sans coefficients} *) let rec shuffle env (t1,t2) = match t1,t2 with Oplus(l1,r1), Oplus(l2,r2) -> if weight env l1 > weight env l2 then let l_action,t' = shuffle env (r1,t2) in do_list [Lazy.force coq_c_plus_assoc_r;do_right l_action], Oplus(l1,t') else let l_action,t' = shuffle env (t1,r2) in do_list [Lazy.force coq_c_plus_permute;do_right l_action], Oplus(l2,t') | Oplus(l1,r1), t2 -> if weight env l1 > weight env t2 then let (l_action,t') = shuffle env (r1,t2) in do_list [Lazy.force coq_c_plus_assoc_r;do_right l_action],Oplus(l1, t') else do_list [Lazy.force coq_c_plus_comm], Oplus(t2,t1) | t1,Oplus(l2,r2) -> if weight env l2 > weight env t1 then let (l_action,t') = shuffle env (t1,r2) in do_list [Lazy.force coq_c_plus_permute;do_right l_action], Oplus(l2,t') else do_list [],Oplus(t1,t2) | Oint t1,Oint t2 -> do_list [Lazy.force coq_c_reduce], Oint(t1+t2) | t1,t2 -> if weight env t1 < weight env t2 then do_list [Lazy.force coq_c_plus_comm], Oplus(t2,t1) else do_list [],Oplus(t1,t2) (* \subsection{Fusion avec rduction} *) let shrink_pair f1 f2 = begin match f1,f2 with Oatom v,Oatom _ -> Lazy.force coq_c_red1, Omult(Oatom v,Oint two) | Oatom v, Omult(_,c2) -> Lazy.force coq_c_red2, Omult(Oatom v,Oplus(c2,Oint one)) | Omult (v1,c1),Oatom v -> Lazy.force coq_c_red3, Omult(Oatom v,Oplus(c1,Oint one)) | Omult (Oatom v,c1),Omult (v2,c2) -> Lazy.force coq_c_red4, Omult(Oatom v,Oplus(c1,c2)) | t1,t2 -> oprint stdout t1; print_newline (); oprint stdout t2; print_newline (); flush Pervasives.stdout; Util.error "shrink.1" end (* \subsection{Calcul d'une sous formule constante} *) let reduce_factor = function Oatom v -> let r = Omult(Oatom v,Oint one) in [Lazy.force coq_c_red0],r | Omult(Oatom v,Oint n) as f -> [],f | Omult(Oatom v,c) -> let rec compute = function Oint n -> n | Oplus(t1,t2) -> compute t1 + compute t2 | _ -> Util.error "condense.1" in [Lazy.force coq_c_reduce], Omult(Oatom v,Oint(compute c)) | t -> Util.error "reduce_factor.1" (* \subsection{Rordonnancement} *) let rec condense env = function Oplus(f1,(Oplus(f2,r) as t)) -> if weight env f1 = weight env f2 then begin let shrink_tac,t = shrink_pair f1 f2 in let assoc_tac = Lazy.force coq_c_plus_assoc_l in let tac_list,t' = condense env (Oplus(t,r)) in assoc_tac :: do_left (do_list [shrink_tac]) :: tac_list, t' end else begin let tac,f = reduce_factor f1 in let tac',t' = condense env t in [do_both (do_list tac) (do_list tac')], Oplus(f,t') end | Oplus(f1,Oint n) -> let tac,f1' = reduce_factor f1 in [do_left (do_list tac)],Oplus(f1',Oint n) | Oplus(f1,f2) -> if weight env f1 = weight env f2 then begin let tac_shrink,t = shrink_pair f1 f2 in let tac,t' = condense env t in tac_shrink :: tac,t' end else begin let tac,f = reduce_factor f1 in let tac',t' = condense env f2 in [do_both (do_list tac) (do_list tac')],Oplus(f,t') end | (Oint _ as t)-> [],t | t -> let tac,t' = reduce_factor t in let final = Oplus(t',Oint zero) in tac @ [Lazy.force coq_c_red6], final (* \subsection{Elimination des zros} *) let rec clear_zero = function Oplus(Omult(Oatom v,Oint n),r) when n=zero -> let tac',t = clear_zero r in Lazy.force coq_c_red5 :: tac',t | Oplus(f,r) -> let tac,t = clear_zero r in (if tac = [] then [] else [do_right (do_list tac)]),Oplus(f,t) | t -> [],t;; (* \subsection{Transformation des hypothses} *) let rec reduce env = function Oplus(t1,t2) -> let t1', trace1 = reduce env t1 in let t2', trace2 = reduce env t2 in let trace3,t' = shuffle env (t1',t2') in t', do_list [do_both trace1 trace2; trace3] | Ominus(t1,t2) -> let t,trace = reduce env (Oplus(t1, Oopp t2)) in t, do_list [Lazy.force coq_c_minus; trace] | Omult(t1,t2) as t -> let t1', trace1 = reduce env t1 in let t2', trace2 = reduce env t2 in begin match t1',t2' with | (_, Oint n) -> let tac,t' = scalar n t1' in t', do_list [do_both trace1 trace2; tac] | (Oint n,_) -> let tac,t' = scalar n t2' in t', do_list [do_both trace1 trace2; Lazy.force coq_c_mult_comm; tac] | _ -> Oufo t, Lazy.force coq_c_nop end | Oopp t -> let t',trace = reduce env t in let trace',t'' = negate t' in t'', do_list [do_left trace; trace'] | (Oint _ | Oatom _ | Oufo _) as t -> t, Lazy.force coq_c_nop let normalize_linear_term env t = let t1,trace1 = reduce env t in let trace2,t2 = condense env t1 in let trace3,t3 = clear_zero t2 in do_list [trace1; do_list trace2; do_list trace3], t3 (* Cette fonction reproduit trs exactement le comportement de [p_invert] *) let negate_oper = function Eq -> Neq | Neq -> Eq | Leq -> Gt | Geq -> Lt | Lt -> Geq | Gt -> Leq let normalize_equation env (negated,depends,origin,path) (oper,t1,t2) = let mk_step t1 t2 f kind = let t = f t1 t2 in let trace, oterm = normalize_linear_term env t in let equa = omega_of_oformula env kind oterm in { e_comp = oper; e_left = t1; e_right = t2; e_negated = negated; e_depends = depends; e_origin = { o_hyp = origin; o_path = List.rev path }; e_trace = trace; e_omega = equa } in try match (if negated then (negate_oper oper) else oper) with | Eq -> mk_step t1 t2 (fun o1 o2 -> Oplus (o1,Oopp o2)) EQUA | Neq -> mk_step t1 t2 (fun o1 o2 -> Oplus (o1,Oopp o2)) DISE | Leq -> mk_step t1 t2 (fun o1 o2 -> Oplus (o2,Oopp o1)) INEQ | Geq -> mk_step t1 t2 (fun o1 o2 -> Oplus (o1,Oopp o2)) INEQ | Lt -> mk_step t1 t2 (fun o1 o2 -> Oplus (Oplus(o2,Oint negone),Oopp o1)) INEQ | Gt -> mk_step t1 t2 (fun o1 o2 -> Oplus (Oplus(o1,Oint negone),Oopp o2)) INEQ with e when Logic.catchable_exception e -> raise e (* \section{Compilation des hypothses} *) let rec oformula_of_constr env t = match Z.parse_term t with | Tplus (t1,t2) -> binop env (fun x y -> Oplus(x,y)) t1 t2 | Tminus (t1,t2) -> binop env (fun x y -> Ominus(x,y)) t1 t2 | Tmult (t1,t2) when Z.is_scalar t1 || Z.is_scalar t2 -> binop env (fun x y -> Omult(x,y)) t1 t2 | Topp t -> Oopp(oformula_of_constr env t) | Tsucc t -> Oplus(oformula_of_constr env t, Oint one) | Tnum n -> Oint n | _ -> Oatom (add_reified_atom t env) and binop env c t1 t2 = let t1' = oformula_of_constr env t1 in let t2' = oformula_of_constr env t2 in c t1' t2' and binprop env (neg2,depends,origin,path) add_to_depends neg1 gl c t1 t2 = let i = new_connector_id env in let depends1 = if add_to_depends then Left i::depends else depends in let depends2 = if add_to_depends then Right i::depends else depends in if add_to_depends then Hashtbl.add env.constructors i {o_hyp = origin; o_path = List.rev path}; let t1' = oproposition_of_constr env (neg1,depends1,origin,O_left::path) gl t1 in let t2' = oproposition_of_constr env (neg2,depends2,origin,O_right::path) gl t2 in (* On numrote le connecteur dans l'environnement. *) c i t1' t2' and mk_equation env ctxt c connector t1 t2 = let t1' = oformula_of_constr env t1 in let t2' = oformula_of_constr env t2 in (* On ajoute l'equation dans l'environnement. *) let omega = normalize_equation env ctxt (connector,t1',t2') in add_equation env omega; Pequa (c,omega) and oproposition_of_constr env ((negated,depends,origin,path) as ctxt) gl c = match Z.parse_rel gl c with | Req (t1,t2) -> mk_equation env ctxt c Eq t1 t2 | Rne (t1,t2) -> mk_equation env ctxt c Neq t1 t2 | Rle (t1,t2) -> mk_equation env ctxt c Leq t1 t2 | Rlt (t1,t2) -> mk_equation env ctxt c Lt t1 t2 | Rge (t1,t2) -> mk_equation env ctxt c Geq t1 t2 | Rgt (t1,t2) -> mk_equation env ctxt c Gt t1 t2 | Rtrue -> Ptrue | Rfalse -> Pfalse | Rnot t -> let t' = oproposition_of_constr env (not negated, depends, origin,(O_mono::path)) gl t in Pnot t' | Ror (t1,t2) -> binprop env ctxt (not negated) negated gl (fun i x y -> Por(i,x,y)) t1 t2 | Rand (t1,t2) -> binprop env ctxt negated negated gl (fun i x y -> Pand(i,x,y)) t1 t2 | Rimp (t1,t2) -> binprop env ctxt (not negated) (not negated) gl (fun i x y -> Pimp(i,x,y)) t1 t2 | Riff (t1,t2) -> binprop env ctxt negated negated gl (fun i x y -> Pand(i,x,y)) (Term.mkArrow t1 t2) (Term.mkArrow t2 t1) | _ -> Pprop c (* Destructuration des hypothses et de la conclusion *) let reify_gl env gl = let concl = Tacmach.pf_concl gl in let t_concl = Pnot (oproposition_of_constr env (true,[],id_concl,[O_mono]) gl concl) in if !debug then begin Printf.printf "REIFED PROBLEM\n\n"; Printf.printf " CONCL: "; pprint stdout t_concl; Printf.printf "\n" end; let rec loop = function (i,t) :: lhyps -> let t' = oproposition_of_constr env (false,[],i,[]) gl t in if !debug then begin Printf.printf " %s: " (Names.string_of_id i); pprint stdout t'; Printf.printf "\n" end; (i,t') :: loop lhyps | [] -> if !debug then print_env_reification env; [] in let t_lhyps = loop (Tacmach.pf_hyps_types gl) in (id_concl,t_concl) :: t_lhyps let rec destructurate_pos_hyp orig list_equations list_depends = function | Pequa (_,e) -> [e :: list_equations] | Ptrue | Pfalse | Pprop _ -> [list_equations] | Pnot t -> destructurate_neg_hyp orig list_equations list_depends t | Por (i,t1,t2) -> let s1 = destructurate_pos_hyp orig list_equations (i::list_depends) t1 in let s2 = destructurate_pos_hyp orig list_equations (i::list_depends) t2 in s1 @ s2 | Pand(i,t1,t2) -> let list_s1 = destructurate_pos_hyp orig list_equations (list_depends) t1 in let rec loop = function le1 :: ll -> destructurate_pos_hyp orig le1 list_depends t2 @ loop ll | [] -> [] in loop list_s1 | Pimp(i,t1,t2) -> let s1 = destructurate_neg_hyp orig list_equations (i::list_depends) t1 in let s2 = destructurate_pos_hyp orig list_equations (i::list_depends) t2 in s1 @ s2 and destructurate_neg_hyp orig list_equations list_depends = function | Pequa (_,e) -> [e :: list_equations] | Ptrue | Pfalse | Pprop _ -> [list_equations] | Pnot t -> destructurate_pos_hyp orig list_equations list_depends t | Pand (i,t1,t2) -> let s1 = destructurate_neg_hyp orig list_equations (i::list_depends) t1 in let s2 = destructurate_neg_hyp orig list_equations (i::list_depends) t2 in s1 @ s2 | Por(_,t1,t2) -> let list_s1 = destructurate_neg_hyp orig list_equations list_depends t1 in let rec loop = function le1 :: ll -> destructurate_neg_hyp orig le1 list_depends t2 @ loop ll | [] -> [] in loop list_s1 | Pimp(_,t1,t2) -> let list_s1 = destructurate_pos_hyp orig list_equations list_depends t1 in let rec loop = function le1 :: ll -> destructurate_neg_hyp orig le1 list_depends t2 @ loop ll | [] -> [] in loop list_s1 let destructurate_hyps syst = let rec loop = function (i,t) :: l -> let l_syst1 = destructurate_pos_hyp i [] [] t in let l_syst2 = loop l in list_cartesian (@) l_syst1 l_syst2 | [] -> [[]] in loop syst (* \subsection{Affichage d'un systme d'quation} *) (* Affichage des dpendances de systme *) let display_depend = function Left i -> Printf.printf " L%d" i | Right i -> Printf.printf " R%d" i let display_systems syst_list = let display_omega om_e = Printf.printf " E%d : %a %s 0\n" om_e.id (fun _ -> display_eq display_omega_var) (om_e.body, om_e.constant) (operator_of_eq om_e.kind) in let display_equation oformula_eq = pprint stdout (Pequa (Lazy.force coq_c_nop,oformula_eq)); print_newline (); display_omega oformula_eq.e_omega; Printf.printf " Depends on:"; List.iter display_depend oformula_eq.e_depends; Printf.printf "\n Path: %s" (String.concat "" (List.map (function O_left -> "L" | O_right -> "R" | O_mono -> "M") oformula_eq.e_origin.o_path)); Printf.printf "\n Origin: %s (negated : %s)\n\n" (Names.string_of_id oformula_eq.e_origin.o_hyp) (if oformula_eq.e_negated then "yes" else "no") in let display_system syst = Printf.printf "=SYSTEM===================================\n"; List.iter display_equation syst in List.iter display_system syst_list (* Extraction des prdicats utilises dans une trace. Permet ensuite le calcul des hypothses *) let rec hyps_used_in_trace = function | act :: l -> begin match act with | HYP e -> [e.id] @@ (hyps_used_in_trace l) | SPLIT_INEQ (_,(_,act1),(_,act2)) -> hyps_used_in_trace act1 @@ hyps_used_in_trace act2 | _ -> hyps_used_in_trace l end | [] -> [] (* Extraction des variables dclares dans une quation. Permet ensuite de les dclarer dans l'environnement de la procdure rflexive et viter les crations de variable au vol *) let rec variable_stated_in_trace = function | act :: l -> begin match act with | STATE action -> (*i nlle_equa: afine, def: afine, eq_orig: afine, i*) (*i coef: int, var:int i*) action :: variable_stated_in_trace l | SPLIT_INEQ (_,(_,act1),(_,act2)) -> variable_stated_in_trace act1 @ variable_stated_in_trace act2 | _ -> variable_stated_in_trace l end | [] -> [] ;; let add_stated_equations env tree = (* Il faut trier les variables par ordre d'introduction pour ne pas risquer de dfinir dans le mauvais ordre *) let stated_equations = let cmpvar x y = Pervasives.(-) x.st_var y.st_var in let rec loop = function | Tree(_,t1,t2) -> List.merge cmpvar (loop t1) (loop t2) | Leaf s -> List.sort cmpvar (variable_stated_in_trace s.s_trace) in loop tree in let add_env st = (* On retransforme la dfinition de v en formule reifie *) let v_def = oformula_of_omega env st.st_def in (* Notez que si l'ordre de cration des variables n'est pas respect, * ca va planter *) let coq_v = coq_of_formula env v_def in let v = add_reified_atom coq_v env in (* Le terme qu'il va falloir introduire *) let term_to_generalize = app coq_refl_equal [|Lazy.force Z.typ; coq_v|] in (* sa reprsentation sous forme d'quation mais non rifi car on n'a pas * l'environnement pour le faire correctement *) let term_to_reify = (v_def,Oatom v) in (* enregistre le lien entre la variable omega et la variable Coq *) intern_omega_force env (Oatom v) st.st_var; (v, term_to_generalize,term_to_reify,st.st_def.id) in List.map add_env stated_equations (* Calcule la liste des clatements raliser sur les hypothses ncessaires pour extraire une liste d'quations donne *) (* PL: experimentally, the result order of the following function seems _very_ crucial for efficiency. No idea why. Do not remove the List.rev or modify the current semantics of Util.list_union (some elements of first arg, then second arg), unless you know what you're doing. *) let rec get_eclatement env = function i :: r -> let l = try (get_equation env i).e_depends with Not_found -> [] in list_union (List.rev l) (get_eclatement env r) | [] -> [] let select_smaller l = let comp (_,x) (_,y) = Pervasives.(-) (List.length x) (List.length y) in try List.hd (List.sort comp l) with Failure _ -> failwith "select_smaller" let filter_compatible_systems required systems = let rec select = function (x::l) -> if List.mem x required then select l else if List.mem (barre x) required then failwith "Exit" else x :: select l | [] -> [] in map_succeed (function (sol,splits) -> (sol,select splits)) systems let rec equas_of_solution_tree = function Tree(_,t1,t2) -> (equas_of_solution_tree t1)@@(equas_of_solution_tree t2) | Leaf s -> s.s_equa_deps (* [really_useful_prop] pushes useless props in a new Pprop variable *) (* Things get shorter, but may also get wrong, since a Prop is considered to be undecidable in ReflOmegaCore.concl_to_hyp, whereas for instance Pfalse is decidable. So should not be used on conclusion (??) *) let really_useful_prop l_equa c = let rec real_of = function Pequa(t,_) -> t | Ptrue -> app coq_True [||] | Pfalse -> app coq_False [||] | Pnot t1 -> app coq_not [|real_of t1|] | Por(_,t1,t2) -> app coq_or [|real_of t1; real_of t2|] | Pand(_,t1,t2) -> app coq_and [|real_of t1; real_of t2|] (* Attention : implications sur le lifting des variables comprendre ! *) | Pimp(_,t1,t2) -> Term.mkArrow (real_of t1) (real_of t2) | Pprop t -> t in let rec loop c = match c with Pequa(_,e) -> if List.mem e.e_omega.id l_equa then Some c else None | Ptrue -> None | Pfalse -> None | Pnot t1 -> begin match loop t1 with None -> None | Some t1' -> Some (Pnot t1') end | Por(i,t1,t2) -> binop (fun (t1,t2) -> Por(i,t1,t2)) t1 t2 | Pand(i,t1,t2) -> binop (fun (t1,t2) -> Pand(i,t1,t2)) t1 t2 | Pimp(i,t1,t2) -> binop (fun (t1,t2) -> Pimp(i,t1,t2)) t1 t2 | Pprop t -> None and binop f t1 t2 = begin match loop t1, loop t2 with None, None -> None | Some t1',Some t2' -> Some (f(t1',t2')) | Some t1',None -> Some (f(t1',Pprop (real_of t2))) | None,Some t2' -> Some (f(Pprop (real_of t1),t2')) end in match loop c with None -> Pprop (real_of c) | Some t -> t let rec display_solution_tree ch = function Leaf t -> output_string ch (Printf.sprintf "%d[%s]" t.s_index (String.concat " " (List.map string_of_int t.s_equa_deps))) | Tree(i,t1,t2) -> Printf.fprintf ch "S%d(%a,%a)" i display_solution_tree t1 display_solution_tree t2 let rec solve_with_constraints all_solutions path = let rec build_tree sol buf = function [] -> Leaf sol | (Left i :: remainder) -> Tree(i, build_tree sol (Left i :: buf) remainder, solve_with_constraints all_solutions (List.rev(Right i :: buf))) | (Right i :: remainder) -> Tree(i, solve_with_constraints all_solutions (List.rev (Left i :: buf)), build_tree sol (Right i :: buf) remainder) in let weighted = filter_compatible_systems path all_solutions in let (winner_sol,winner_deps) = try select_smaller weighted with reraise -> Printf.printf "%d - %d\n" (List.length weighted) (List.length all_solutions); List.iter display_depend path; raise reraise in build_tree winner_sol (List.rev path) winner_deps let find_path {o_hyp=id;o_path=p} env = let rec loop_path = function ([],l) -> Some l | (x1::l1,x2::l2) when x1 = x2 -> loop_path (l1,l2) | _ -> None in let rec loop_id i = function CCHyp{o_hyp=id';o_path=p'} :: l when id = id' -> begin match loop_path (p',p) with Some r -> i,r | None -> loop_id (succ i) l end | _ :: l -> loop_id (succ i) l | [] -> failwith "find_path" in loop_id 0 env let mk_direction_list l = let trans = function O_left -> coq_d_left | O_right -> coq_d_right | O_mono -> coq_d_mono in mk_list (Lazy.force coq_direction) (List.map (fun d-> Lazy.force(trans d)) l) (* \section{Rejouer l'historique} *) let get_hyp env_hyp i = try list_index0 (CCEqua i) env_hyp with Not_found -> failwith (Printf.sprintf "get_hyp %d" i) let replay_history env env_hyp = let rec loop env_hyp t = match t with | CONTRADICTION (e1,e2) :: l -> let trace = mk_nat (List.length e1.body) in mkApp (Lazy.force coq_s_contradiction, [| trace ; mk_nat (get_hyp env_hyp e1.id); mk_nat (get_hyp env_hyp e2.id) |]) | DIVIDE_AND_APPROX (e1,e2,k,d) :: l -> mkApp (Lazy.force coq_s_div_approx, [| Z.mk k; Z.mk d; reified_of_omega env e2.body e2.constant; mk_nat (List.length e2.body); loop env_hyp l; mk_nat (get_hyp env_hyp e1.id) |]) | NOT_EXACT_DIVIDE (e1,k) :: l -> let e2_constant = floor_div e1.constant k in let d = e1.constant - e2_constant * k in let e2_body = map_eq_linear (fun c -> c / k) e1.body in mkApp (Lazy.force coq_s_not_exact_divide, [|Z.mk k; Z.mk d; reified_of_omega env e2_body e2_constant; mk_nat (List.length e2_body); mk_nat (get_hyp env_hyp e1.id)|]) | EXACT_DIVIDE (e1,k) :: l -> let e2_body = map_eq_linear (fun c -> c / k) e1.body in let e2_constant = floor_div e1.constant k in mkApp (Lazy.force coq_s_exact_divide, [|Z.mk k; reified_of_omega env e2_body e2_constant; mk_nat (List.length e2_body); loop env_hyp l; mk_nat (get_hyp env_hyp e1.id)|]) | (MERGE_EQ(e3,e1,e2)) :: l -> let n1 = get_hyp env_hyp e1.id and n2 = get_hyp env_hyp e2 in mkApp (Lazy.force coq_s_merge_eq, [| mk_nat (List.length e1.body); mk_nat n1; mk_nat n2; loop (CCEqua e3:: env_hyp) l |]) | SUM(e3,(k1,e1),(k2,e2)) :: l -> let n1 = get_hyp env_hyp e1.id and n2 = get_hyp env_hyp e2.id in let trace = shuffle_path k1 e1.body k2 e2.body in mkApp (Lazy.force coq_s_sum, [| Z.mk k1; mk_nat n1; Z.mk k2; mk_nat n2; trace; (loop (CCEqua e3 :: env_hyp) l) |]) | CONSTANT_NOT_NUL(e,k) :: l -> mkApp (Lazy.force coq_s_constant_not_nul, [| mk_nat (get_hyp env_hyp e) |]) | CONSTANT_NEG(e,k) :: l -> mkApp (Lazy.force coq_s_constant_neg, [| mk_nat (get_hyp env_hyp e) |]) | STATE {st_new_eq=new_eq; st_def =def; st_orig=orig; st_coef=m; st_var=sigma } :: l -> let n1 = get_hyp env_hyp orig.id and n2 = get_hyp env_hyp def.id in let v = unintern_omega env sigma in let o_def = oformula_of_omega env def in let o_orig = oformula_of_omega env orig in let body = Oplus (o_orig,Omult (Oplus (Oopp v,o_def), Oint m)) in let trace,_ = normalize_linear_term env body in mkApp (Lazy.force coq_s_state, [| Z.mk m; trace; mk_nat n1; mk_nat n2; loop (CCEqua new_eq.id :: env_hyp) l |]) | HYP _ :: l -> loop env_hyp l | CONSTANT_NUL e :: l -> mkApp (Lazy.force coq_s_constant_nul, [| mk_nat (get_hyp env_hyp e) |]) | NEGATE_CONTRADICT(e1,e2,true) :: l -> mkApp (Lazy.force coq_s_negate_contradict, [| mk_nat (get_hyp env_hyp e1.id); mk_nat (get_hyp env_hyp e2.id) |]) | NEGATE_CONTRADICT(e1,e2,false) :: l -> mkApp (Lazy.force coq_s_negate_contradict_inv, [| mk_nat (List.length e2.body); mk_nat (get_hyp env_hyp e1.id); mk_nat (get_hyp env_hyp e2.id) |]) | SPLIT_INEQ(e,(e1,l1),(e2,l2)) :: l -> let i = get_hyp env_hyp e.id in let r1 = loop (CCEqua e1 :: env_hyp) l1 in let r2 = loop (CCEqua e2 :: env_hyp) l2 in mkApp (Lazy.force coq_s_split_ineq, [| mk_nat (List.length e.body); mk_nat i; r1 ; r2 |]) | (FORGET_C _ | FORGET _ | FORGET_I _) :: l -> loop env_hyp l | (WEAKEN _ ) :: l -> failwith "not_treated" | [] -> failwith "no contradiction" in loop env_hyp let rec decompose_tree env ctxt = function Tree(i,left,right) -> let org = try Hashtbl.find env.constructors i with Not_found -> failwith (Printf.sprintf "Cannot find constructor %d" i) in let (index,path) = find_path org ctxt in let left_hyp = CCHyp{o_hyp=org.o_hyp;o_path=org.o_path @ [O_left]} in let right_hyp = CCHyp{o_hyp=org.o_hyp;o_path=org.o_path @ [O_right]} in app coq_e_split [| mk_nat index; mk_direction_list path; decompose_tree env (left_hyp::ctxt) left; decompose_tree env (right_hyp::ctxt) right |] | Leaf s -> decompose_tree_hyps s.s_trace env ctxt s.s_equa_deps and decompose_tree_hyps trace env ctxt = function [] -> app coq_e_solve [| replay_history env ctxt trace |] | (i::l) -> let equation = try Hashtbl.find env.equations i with Not_found -> failwith (Printf.sprintf "Cannot find equation %d" i) in let (index,path) = find_path equation.e_origin ctxt in let full_path = if equation.e_negated then path @ [O_mono] else path in let cont = decompose_tree_hyps trace env (CCEqua equation.e_omega.id :: ctxt) l in app coq_e_extract [|mk_nat index; mk_direction_list full_path; cont |] (* \section{La fonction principale} *) (* Cette fonction construit la trace pour la procdure de dcision rflexive. A partir des rsultats de l'extraction des systmes, elle lance la rsolution par Omega, puis l'extraction d'un ensemble minimal de solutions permettant la rsolution globale du systme et enfin construit la trace qui permet de faire rejouer cette solution par la tactique rflexive. *) let resolution env full_reified_goal systems_list = let num = ref 0 in let solve_system list_eq = let index = !num in let system = List.map (fun eq -> eq.e_omega) list_eq in let trace = simplify_strong (new_omega_eq,new_omega_var,display_omega_var) system in (* calcule les hypotheses utilises pour la solution *) let vars = hyps_used_in_trace trace in let splits = get_eclatement env vars in if !debug then begin Printf.printf "SYSTEME %d\n" index; display_action display_omega_var trace; print_string "\n Depend :"; List.iter (fun i -> Printf.printf " %d" i) vars; print_string "\n Split points :"; List.iter display_depend splits; Printf.printf "\n------------------------------------\n" end; incr num; {s_index = index; s_trace = trace; s_equa_deps = vars}, splits in if !debug then Printf.printf "\n====================================\n"; let all_solutions = List.map solve_system systems_list in let solution_tree = solve_with_constraints all_solutions [] in if !debug then begin display_solution_tree stdout solution_tree; print_newline() end; (* calcule la liste de toutes les hypothses utilises dans l'arbre de solution *) let useful_equa_id = equas_of_solution_tree solution_tree in (* recupere explicitement ces equations *) let equations = List.map (get_equation env) useful_equa_id in let l_hyps' = list_uniquize (List.map (fun e -> e.e_origin.o_hyp) equations) in let l_hyps = id_concl :: list_remove id_concl l_hyps' in let useful_hyps = List.map (fun id -> List.assoc id full_reified_goal) l_hyps in let useful_vars = let really_useful_vars = vars_of_equations equations in let concl_vars = vars_of_prop (List.assoc id_concl full_reified_goal) in really_useful_vars @@ concl_vars in (* variables a introduire *) let to_introduce = add_stated_equations env solution_tree in let stated_vars = List.map (fun (v,_,_,_) -> v) to_introduce in let l_generalize_arg = List.map (fun (_,t,_,_) -> t) to_introduce in let hyp_stated_vars = List.map (fun (_,_,_,id) -> CCEqua id) to_introduce in (* L'environnement de base se construit en deux morceaux : - les variables des quations utiles (et de la conclusion) - les nouvelles variables declares durant les preuves *) let all_vars_env = useful_vars @ stated_vars in let basic_env = let rec loop i = function var :: l -> let t = get_reified_atom env var in Hashtbl.add env.real_indices var i; t :: loop (succ i) l | [] -> [] in loop 0 all_vars_env in let env_terms_reified = mk_list (Lazy.force Z.typ) basic_env in (* On peut maintenant gnraliser le but : env est a jour *) let l_reified_stated = List.map (fun (_,_,(l,r),_) -> app coq_p_eq [| reified_of_formula env l; reified_of_formula env r |]) to_introduce in let reified_concl = match useful_hyps with (Pnot p) :: _ -> reified_of_proposition env p | _ -> reified_of_proposition env Pfalse in let l_reified_terms = (List.map (fun p -> reified_of_proposition env (really_useful_prop useful_equa_id p)) (List.tl useful_hyps)) in let env_props_reified = mk_plist env.props in let reified_goal = mk_list (Lazy.force coq_proposition) (l_reified_stated @ l_reified_terms) in let reified = app coq_interp_sequent [| reified_concl;env_props_reified;env_terms_reified;reified_goal|] in let normalize_equation e = let rec loop = function [] -> app (if e.e_negated then coq_p_invert else coq_p_step) [| e.e_trace |] | ((O_left | O_mono) :: l) -> app coq_p_left [| loop l |] | (O_right :: l) -> app coq_p_right [| loop l |] in let correct_index = let i = list_index0 e.e_origin.o_hyp l_hyps in (* PL: it seems that additionnally introduced hyps are in the way during normalization, hence this index shifting... *) if i=0 then 0 else Pervasives.(+) i (List.length to_introduce) in app coq_pair_step [| mk_nat correct_index; loop e.e_origin.o_path |] in let normalization_trace = mk_list (Lazy.force coq_h_step) (List.map normalize_equation equations) in let initial_context = List.map (fun id -> CCHyp{o_hyp=id;o_path=[]}) (List.tl l_hyps) in let context = CCHyp{o_hyp=id_concl;o_path=[]} :: hyp_stated_vars @ initial_context in let decompose_tactic = decompose_tree env context solution_tree in Tactics.generalize (l_generalize_arg @ List.map Term.mkVar (List.tl l_hyps)) >> Tactics.change_in_concl None reified >> Tactics.apply (app coq_do_omega [|decompose_tactic; normalization_trace|]) >> show_goal >> Tactics.normalise_vm_in_concl >> (*i Alternatives to the previous line: - Normalisation without VM: Tactics.normalise_in_concl - Skip the conversion check and rely directly on the QED: Tacmach.convert_concl_no_check (Lazy.force coq_True) Term.VMcast >> i*) Tactics.apply (Lazy.force coq_I) let total_reflexive_omega_tactic gl = Coqlib.check_required_library ["Coq";"romega";"ROmega"]; rst_omega_eq (); rst_omega_var (); try let env = new_environment () in let full_reified_goal = reify_gl env gl in let systems_list = destructurate_hyps full_reified_goal in if !debug then display_systems systems_list; resolution env full_reified_goal systems_list gl with NO_CONTRADICTION -> Util.error "ROmega can't solve this system" (*i let tester = Tacmach.hide_atomic_tactic "TestOmega" test_tactic i*) coq-8.4pl2/plugins/romega/g_romega.ml40000640000175000001440000000243211254456226016770 0ustar notinusers(************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crgut - France Tlcom R&D Licence : LGPL version 2.1 *************************************************************************) (*i camlp4deps: "parsing/grammar.cma" i*) open Refl_omega open Refiner let romega_tactic l = let tacs = List.map (function | "nat" -> Tacinterp.interp <:tactic> | "positive" -> Tacinterp.interp <:tactic> | "N" -> Tacinterp.interp <:tactic> | "Z" -> Tacinterp.interp <:tactic> | s -> Util.error ("No ROmega knowledge base for type "^s)) (Util.list_uniquize (List.sort compare l)) in tclTHEN (tclREPEAT (tclPROGRESS (tclTHENLIST tacs))) (tclTHEN (* because of the contradiction process in (r)omega, we'd better leave as little as possible in the conclusion, for an easier decidability argument. *) Tactics.intros total_reflexive_omega_tactic) TACTIC EXTEND romega | [ "romega" ] -> [ romega_tactic [] ] END TACTIC EXTEND romega' | [ "romega" "with" ne_ident_list(l) ] -> [ romega_tactic (List.map Names.string_of_id l) ] | [ "romega" "with" "*" ] -> [ romega_tactic ["nat";"positive";"N";"Z"] ] END coq-8.4pl2/plugins/romega/README0000640000175000001440000000023011160567762015450 0ustar notinusersThis work was done for the RNRT Project Calife. As such it is distributed under the LGPL licence. Report bugs to : pierre.cregut@francetelecom.com coq-8.4pl2/plugins/romega/ROmega.v0000640000175000001440000000065611160567762016145 0ustar notinusers(************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crégut - France Télécom R&D Licence : LGPL version 2.1 *************************************************************************) Require Import ReflOmegaCore. Require Export Setoid. Require Export PreOmega. Require Export ZArith_base. Require Import OmegaPlugin. Declare ML Module "romega_plugin".coq-8.4pl2/plugins/romega/ReflOmegaCore.v0000640000175000001440000027424312063736507017450 0ustar notinusers(* -*- coding: utf-8 -*- *) (************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crégut - France Télécom R&D Licence du projet : LGPL version 2.1 *************************************************************************) Require Import List Bool Sumbool EqNat Setoid Ring_theory Decidable ZArith_base. Delimit Scope Int_scope with I. (* Abstract Integers. *) Module Type Int. Parameter t : Set. Parameter zero : t. Parameter one : t. Parameter plus : t -> t -> t. Parameter opp : t -> t. Parameter minus : t -> t -> t. Parameter mult : t -> t -> t. Notation "0" := zero : Int_scope. Notation "1" := one : Int_scope. Infix "+" := plus : Int_scope. Infix "-" := minus : Int_scope. Infix "*" := mult : Int_scope. Notation "- x" := (opp x) : Int_scope. Open Scope Int_scope. (* First, int is a ring: *) Axiom ring : @ring_theory t 0 1 plus mult minus opp (@eq t). (* int should also be ordered: *) Parameter le : t -> t -> Prop. Parameter lt : t -> t -> Prop. Parameter ge : t -> t -> Prop. Parameter gt : t -> t -> Prop. Notation "x <= y" := (le x y): Int_scope. Notation "x < y" := (lt x y) : Int_scope. Notation "x >= y" := (ge x y) : Int_scope. Notation "x > y" := (gt x y): Int_scope. Axiom le_lt_iff : forall i j, (i<=j) <-> ~(j=j) <-> (j<=i). Axiom gt_lt_iff : forall i j, (i>j) <-> (j j i i<>j. (* Compatibilities *) Axiom lt_0_1 : 0<1. Axiom plus_le_compat : forall i j k l, i<=j -> k<=l -> i+k<=j+l. Axiom opp_le_compat : forall i j, i<=j -> (-j)<=(-i). Axiom mult_lt_compat_l : forall i j k, 0 < k -> i < j -> k*i t -> comparison. Infix "?=" := compare (at level 70, no associativity) : Int_scope. Axiom compare_Eq : forall i j, compare i j = Eq <-> i=j. Axiom compare_Lt : forall i j, compare i j = Lt <-> i i>j. (* Up to here, these requirements could be fulfilled by any totally ordered ring. Let's now be int-specific: *) Axiom le_lt_int : forall x y, x x<=y+-(1). (* Btw, lt_0_1 could be deduced from this last axiom *) End Int. (* Of course, Z is a model for our abstract int *) Module Z_as_Int <: Int. Open Scope Z_scope. Definition t := Z. Definition zero := 0. Definition one := 1. Definition plus := Z.add. Definition opp := Z.opp. Definition minus := Z.sub. Definition mult := Z.mul. Lemma ring : @ring_theory t zero one plus mult minus opp (@eq t). Proof. constructor. exact Z.add_0_l. exact Z.add_comm. exact Z.add_assoc. exact Z.mul_1_l. exact Z.mul_comm. exact Z.mul_assoc. exact Z.mul_add_distr_r. unfold minus, Z.sub; auto. exact Z.add_opp_diag_r. Qed. Definition le := Z.le. Definition lt := Z.lt. Definition ge := Z.ge. Definition gt := Z.gt. Definition le_lt_iff := Z.le_ngt. Definition ge_le_iff := Z.ge_le_iff. Definition gt_lt_iff := Z.gt_lt_iff. Definition lt_trans := Z.lt_trans. Definition lt_not_eq := Z.lt_neq. Definition lt_0_1 := Z.lt_0_1. Definition plus_le_compat := Z.add_le_mono. Definition mult_lt_compat_l := Zmult_lt_compat_l. Lemma opp_le_compat i j : i<=j -> (-j)<=(-i). Proof. apply -> Z.opp_le_mono. Qed. Definition compare := Z.compare. Definition compare_Eq := Z.compare_eq_iff. Lemma compare_Lt i j : compare i j = Lt <-> i i>j. Proof. reflexivity. Qed. Definition le_lt_int := Z.lt_le_pred. End Z_as_Int. Module IntProperties (I:Int). Import I. Local Notation int := I.t. (* Primo, some consequences of being a ring theory... *) Definition two := 1+1. Notation "2" := two : Int_scope. (* Aliases for properties packed in the ring record. *) Definition plus_assoc := ring.(Radd_assoc). Definition plus_comm := ring.(Radd_comm). Definition plus_0_l := ring.(Radd_0_l). Definition mult_assoc := ring.(Rmul_assoc). Definition mult_comm := ring.(Rmul_comm). Definition mult_1_l := ring.(Rmul_1_l). Definition mult_plus_distr_r := ring.(Rdistr_l). Definition opp_def := ring.(Ropp_def). Definition minus_def := ring.(Rsub_def). Opaque plus_assoc plus_comm plus_0_l mult_assoc mult_comm mult_1_l mult_plus_distr_r opp_def minus_def. (* More facts about plus *) Lemma plus_0_r : forall x, x+0 = x. Proof. intros; rewrite plus_comm; apply plus_0_l. Qed. Lemma plus_0_r_reverse : forall x, x = x+0. Proof. intros; symmetry; apply plus_0_r. Qed. Lemma plus_assoc_reverse : forall x y z, x+y+z = x+(y+z). Proof. intros; symmetry; apply plus_assoc. Qed. Lemma plus_permute : forall x y z, x+(y+z) = y+(x+z). Proof. intros; do 2 rewrite plus_assoc; f_equal; apply plus_comm. Qed. Lemma plus_reg_l : forall x y z, x+y = x+z -> y = z. Proof. intros. rewrite (plus_0_r_reverse y), (plus_0_r_reverse z), <-(opp_def x). now rewrite plus_permute, plus_assoc, H, <- plus_assoc, plus_permute. Qed. (* More facts about mult *) Lemma mult_assoc_reverse : forall x y z, x*y*z = x*(y*z). Proof. intros; symmetry; apply mult_assoc. Qed. Lemma mult_plus_distr_l : forall x y z, x*(y+z)=x*y+x*z. Proof. intros. rewrite (mult_comm x (y+z)), (mult_comm x y), (mult_comm x z). apply mult_plus_distr_r. Qed. Lemma mult_0_l : forall x, 0*x = 0. Proof. intros. generalize (mult_plus_distr_r 0 1 x). rewrite plus_0_l, mult_1_l, plus_comm; intros. apply plus_reg_l with x. rewrite <- H. apply plus_0_r_reverse. Qed. (* More facts about opp *) Definition plus_opp_r := opp_def. Lemma plus_opp_l : forall x, -x + x = 0. Proof. intros; now rewrite plus_comm, opp_def. Qed. Lemma mult_opp_comm : forall x y, - x * y = x * - y. Proof. intros. apply plus_reg_l with (x*y). rewrite <- mult_plus_distr_l, <- mult_plus_distr_r. now rewrite opp_def, opp_def, mult_0_l, mult_comm, mult_0_l. Qed. Lemma opp_eq_mult_neg_1 : forall x, -x = x * -(1). Proof. intros; now rewrite mult_comm, mult_opp_comm, mult_1_l. Qed. Lemma opp_involutive : forall x, -(-x) = x. Proof. intros. apply plus_reg_l with (-x). now rewrite opp_def, plus_comm, opp_def. Qed. Lemma opp_plus_distr : forall x y, -(x+y) = -x + -y. Proof. intros. apply plus_reg_l with (x+y). rewrite opp_def. rewrite plus_permute. do 2 rewrite plus_assoc. now rewrite (plus_comm (-x)), opp_def, plus_0_l, opp_def. Qed. Lemma opp_mult_distr_r : forall x y, -(x*y) = x * -y. Proof. intros. rewrite <- mult_opp_comm. apply plus_reg_l with (x*y). now rewrite opp_def, <-mult_plus_distr_r, opp_def, mult_0_l. Qed. Lemma egal_left : forall n m, n=m -> n+-m = 0. Proof. intros; subst; apply opp_def. Qed. Lemma ne_left_2 : forall x y : int, x<>y -> 0<>(x + - y). Proof. intros; contradict H. apply (plus_reg_l (-y)). now rewrite plus_opp_l, plus_comm, H. Qed. (* Special lemmas for factorisation. *) Lemma red_factor0 : forall n, n = n*1. Proof. symmetry; rewrite mult_comm; apply mult_1_l. Qed. Lemma red_factor1 : forall n, n+n = n*2. Proof. intros; unfold two. now rewrite mult_comm, mult_plus_distr_r, mult_1_l. Qed. Lemma red_factor2 : forall n m, n + n*m = n * (1+m). Proof. intros; rewrite mult_plus_distr_l. f_equal; now rewrite mult_comm, mult_1_l. Qed. Lemma red_factor3 : forall n m, n*m + n = n*(1+m). Proof. intros; now rewrite plus_comm, red_factor2. Qed. Lemma red_factor4 : forall n m p, n*m + n*p = n*(m+p). Proof. intros; now rewrite mult_plus_distr_l. Qed. Lemma red_factor5 : forall n m , n * 0 + m = m. Proof. intros; now rewrite mult_comm, mult_0_l, plus_0_l. Qed. Definition red_factor6 := plus_0_r_reverse. (* Specialized distributivities *) Hint Rewrite mult_plus_distr_l mult_plus_distr_r mult_assoc : int. Hint Rewrite <- plus_assoc : int. Lemma OMEGA10 : forall v c1 c2 l1 l2 k1 k2 : int, (v * c1 + l1) * k1 + (v * c2 + l2) * k2 = v * (c1 * k1 + c2 * k2) + (l1 * k1 + l2 * k2). Proof. intros; autorewrite with int; f_equal; now rewrite plus_permute. Qed. Lemma OMEGA11 : forall v1 c1 l1 l2 k1 : int, (v1 * c1 + l1) * k1 + l2 = v1 * (c1 * k1) + (l1 * k1 + l2). Proof. intros; now autorewrite with int. Qed. Lemma OMEGA12 : forall v2 c2 l1 l2 k2 : int, l1 + (v2 * c2 + l2) * k2 = v2 * (c2 * k2) + (l1 + l2 * k2). Proof. intros; autorewrite with int; now rewrite plus_permute. Qed. Lemma OMEGA13 : forall v l1 l2 x : int, v * -x + l1 + (v * x + l2) = l1 + l2. Proof. intros; autorewrite with int. rewrite plus_permute; f_equal. rewrite plus_assoc. now rewrite <- mult_plus_distr_l, plus_opp_l, mult_comm, mult_0_l, plus_0_l. Qed. Lemma OMEGA15 : forall v c1 c2 l1 l2 k2 : int, v * c1 + l1 + (v * c2 + l2) * k2 = v * (c1 + c2 * k2) + (l1 + l2 * k2). Proof. intros; autorewrite with int; f_equal; now rewrite plus_permute. Qed. Lemma OMEGA16 : forall v c l k : int, (v * c + l) * k = v * (c * k) + l * k. Proof. intros; now autorewrite with int. Qed. Lemma sum1 : forall a b c d : int, 0 = a -> 0 = b -> 0 = a * c + b * d. Proof. intros; elim H; elim H0; simpl; auto. now rewrite mult_0_l, mult_0_l, plus_0_l. Qed. (* Secondo, some results about order (and equality) *) Lemma lt_irrefl : forall n, ~ n m False. Proof. intros; elim (lt_irrefl _ (lt_trans _ _ _ H H0)); auto. Qed. Lemma lt_le_weak : forall n m, n n<=m. Proof. intros; rewrite le_lt_iff; intro H'; eapply lt_antisym; eauto. Qed. Lemma le_refl : forall n, n<=n. Proof. intros; rewrite le_lt_iff; apply lt_irrefl; auto. Qed. Lemma le_antisym : forall n m, n<=m -> m<=n -> n=m. Proof. intros n m; do 2 rewrite le_lt_iff; intros. rewrite <- compare_Lt in H0. rewrite <- gt_lt_iff, <- compare_Gt in H. rewrite <- compare_Eq. destruct compare; intuition. Qed. Lemma lt_eq_lt_dec : forall n m, { n ~(m<=n). Proof. intros. rewrite le_lt_iff. destruct (lt_dec n m); intuition. Qed. Lemma le_dec : forall n m: int, { n<=m } + { ~n<=m }. Proof. intros; destruct (lt_dec m n); [right|left]; rewrite le_lt_iff; intuition. Qed. Lemma le_lt_dec : forall n m, { n<=m } + { m true | _ => false end. Lemma beq_iff : forall i j, beq i j = true <-> i=j. Proof. intros; unfold beq; generalize (compare_Eq i j). destruct compare; intuition discriminate. Qed. Lemma beq_true : forall i j, beq i j = true -> i=j. Proof. intros. rewrite <- beq_iff; auto. Qed. Lemma beq_false : forall i j, beq i j = false -> i<>j. Proof. intros. intro H'. rewrite <- beq_iff in H'; rewrite H' in H; discriminate. Qed. Lemma eq_dec : forall n m:int, { n=m } + { n<>m }. Proof. intros; generalize (beq_iff n m); destruct beq; [left|right]; intuition. Qed. Definition bgt i j := match compare i j with Gt => true | _ => false end. Lemma bgt_iff : forall i j, bgt i j = true <-> i>j. Proof. intros; unfold bgt; generalize (compare_Gt i j). destruct compare; intuition discriminate. Qed. Lemma bgt_true : forall i j, bgt i j = true -> i>j. Proof. intros; now rewrite <- bgt_iff. Qed. Lemma bgt_false : forall i j, bgt i j = false -> i<=j. Proof. intros. rewrite le_lt_iff, <-gt_lt_iff, <-bgt_iff; intro H'; now rewrite H' in H. Qed. Lemma le_is_lt_or_eq : forall n m, n<=m -> { n n<>m -> n m<=p -> n<=p. Proof. intros n m p; do 3 rewrite le_lt_iff; intros A B C. destruct (lt_eq_lt_dec p m) as [[H|H]|H]; subst; auto. generalize (lt_trans _ _ _ H C); intuition. Qed. (* order and operations *) Lemma le_0_neg : forall n, 0 <= n <-> -n <= 0. Proof. intros. pattern 0 at 2; rewrite <- (mult_0_l (-(1))). rewrite <- opp_eq_mult_neg_1. split; intros. apply opp_le_compat; auto. rewrite <-(opp_involutive 0), <-(opp_involutive n). apply opp_le_compat; auto. Qed. Lemma le_0_neg' : forall n, n <= 0 <-> 0 <= -n. Proof. intros; rewrite le_0_neg, opp_involutive; intuition. Qed. Lemma plus_le_reg_r : forall n m p, n + p <= m + p -> n <= m. Proof. intros. replace n with ((n+p)+-p). replace m with ((m+p)+-p). apply plus_le_compat; auto. apply le_refl. now rewrite <- plus_assoc, opp_def, plus_0_r. now rewrite <- plus_assoc, opp_def, plus_0_r. Qed. Lemma plus_le_lt_compat : forall n m p q, n<=m -> p n+p p n+p -m < -n. Proof. intros n m; do 2 rewrite lt_le_iff; intros H; contradict H. rewrite <-(opp_involutive m), <-(opp_involutive n). apply opp_le_compat; auto. Qed. Lemma lt_0_neg : forall n, 0 < n <-> -n < 0. Proof. intros. pattern 0 at 2; rewrite <- (mult_0_l (-(1))). rewrite <- opp_eq_mult_neg_1. split; intros. apply opp_lt_compat; auto. rewrite <-(opp_involutive 0), <-(opp_involutive n). apply opp_lt_compat; auto. Qed. Lemma lt_0_neg' : forall n, n < 0 <-> 0 < -n. Proof. intros; rewrite lt_0_neg, opp_involutive; intuition. Qed. Lemma mult_lt_0_compat : forall n m, 0 < n -> 0 < m -> 0 < n*m. Proof. intros. rewrite <- (mult_0_l n), mult_comm. apply mult_lt_compat_l; auto. Qed. Lemma mult_integral : forall n m, n * m = 0 -> n = 0 \/ m = 0. Proof. intros. destruct (lt_eq_lt_dec n 0) as [[Hn|Hn]|Hn]; auto; destruct (lt_eq_lt_dec m 0) as [[Hm|Hm]|Hm]; auto; exfalso. rewrite lt_0_neg' in Hn. rewrite lt_0_neg' in Hm. generalize (mult_lt_0_compat _ _ Hn Hm). rewrite <- opp_mult_distr_r, mult_comm, <- opp_mult_distr_r, opp_involutive. rewrite mult_comm, H. exact (lt_irrefl 0). rewrite lt_0_neg' in Hn. generalize (mult_lt_0_compat _ _ Hn Hm). rewrite mult_comm, <- opp_mult_distr_r, mult_comm. rewrite H. rewrite opp_eq_mult_neg_1, mult_0_l. exact (lt_irrefl 0). rewrite lt_0_neg' in Hm. generalize (mult_lt_0_compat _ _ Hn Hm). rewrite <- opp_mult_distr_r. rewrite H. rewrite opp_eq_mult_neg_1, mult_0_l. exact (lt_irrefl 0). generalize (mult_lt_0_compat _ _ Hn Hm). rewrite H. exact (lt_irrefl 0). Qed. Lemma mult_le_compat : forall i j k l, i<=j -> k<=l -> 0<=i -> 0<=k -> i*k<=j*l. Proof. intros. destruct (le_is_lt_or_eq _ _ H1). apply le_trans with (i*l). destruct (le_is_lt_or_eq _ _ H0); [ | subst; apply le_refl]. apply lt_le_weak. apply mult_lt_compat_l; auto. generalize (le_trans _ _ _ H2 H0); clear H0 H1 H2; intros. rewrite (mult_comm i), (mult_comm j). destruct (le_is_lt_or_eq _ _ H0); [ | subst; do 2 rewrite mult_0_l; apply le_refl]. destruct (le_is_lt_or_eq _ _ H); [ | subst; apply le_refl]. apply lt_le_weak. apply mult_lt_compat_l; auto. subst i. rewrite mult_0_l. generalize (le_trans _ _ _ H2 H0); clear H0 H1 H2; intros. destruct (le_is_lt_or_eq _ _ H); [ | subst; rewrite mult_0_l; apply le_refl]. destruct (le_is_lt_or_eq _ _ H0); [ | subst; rewrite mult_comm, mult_0_l; apply le_refl]. apply lt_le_weak. apply mult_lt_0_compat; auto. Qed. Lemma sum5 : forall a b c d : int, c <> 0 -> 0 <> a -> 0 = b -> 0 <> a * c + b * d. Proof. intros. subst b; rewrite mult_0_l, plus_0_r. contradict H. symmetry in H; destruct (mult_integral _ _ H); congruence. Qed. Lemma one_neq_zero : 1 <> 0. Proof. red; intro. symmetry in H. apply (lt_not_eq 0 1); auto. apply lt_0_1. Qed. Lemma minus_one_neq_zero : -(1) <> 0. Proof. apply lt_not_eq. rewrite <- lt_0_neg. apply lt_0_1. Qed. Lemma le_left : forall n m, n <= m -> 0 <= m + - n. Proof. intros. rewrite <- (opp_def m). apply plus_le_compat. apply le_refl. apply opp_le_compat; auto. Qed. Lemma OMEGA2 : forall x y, 0 <= x -> 0 <= y -> 0 <= x + y. Proof. intros. replace 0 with (0+0). apply plus_le_compat; auto. rewrite plus_0_l; auto. Qed. Lemma OMEGA8 : forall x y, 0 <= x -> 0 <= y -> x = - y -> x = 0. Proof. intros. assert (y=-x). subst x; symmetry; apply opp_involutive. clear H1; subst y. destruct (eq_dec 0 x) as [H'|H']; auto. assert (H'':=le_neq_lt _ _ H H'). generalize (plus_le_lt_compat _ _ _ _ H0 H''). rewrite plus_opp_l, plus_0_l. intros. elim (lt_not_eq _ _ H1); auto. Qed. Lemma sum2 : forall a b c d : int, 0 <= d -> 0 = a -> 0 <= b -> 0 <= a * c + b * d. Proof. intros. subst a; rewrite mult_0_l, plus_0_l. rewrite <- (mult_0_l 0). apply mult_le_compat; auto; apply le_refl. Qed. Lemma sum3 : forall a b c d : int, 0 <= c -> 0 <= d -> 0 <= a -> 0 <= b -> 0 <= a * c + b * d. Proof. intros. rewrite <- (plus_0_l 0). apply plus_le_compat; auto. rewrite <- (mult_0_l 0). apply mult_le_compat; auto; apply le_refl. rewrite <- (mult_0_l 0). apply mult_le_compat; auto; apply le_refl. Qed. Lemma sum4 : forall k : int, k>0 -> 0 <= k. Proof. intros k; rewrite gt_lt_iff; apply lt_le_weak. Qed. (* Lemmas specific to integers (they use lt_le_int) *) Lemma lt_left : forall n m, n < m -> 0 <= m + -(1) + - n. Proof. intros; apply le_left. now rewrite <- le_lt_int. Qed. Lemma lt_left_inv : forall x y, 0 <= y + -(1) + - x -> x < y. Proof. intros. generalize (plus_le_compat _ _ _ _ H (le_refl x)); clear H. now rewrite plus_0_l, <-plus_assoc, plus_opp_l, plus_0_r, le_lt_int. Qed. Lemma OMEGA4 : forall x y z, x > 0 -> y > x -> z * y + x <> 0. Proof. intros. intro H'. rewrite gt_lt_iff in H,H0. destruct (lt_eq_lt_dec z 0) as [[G|G]|G]. rewrite lt_0_neg' in G. generalize (plus_le_lt_compat _ _ _ _ (le_refl (z*y)) H0). rewrite H'. pattern y at 2; rewrite <-(mult_1_l y), <-mult_plus_distr_r. intros. rewrite le_lt_int in G. rewrite <- opp_plus_distr in G. assert (0 < y) by (apply lt_trans with x; auto). generalize (mult_le_compat _ _ _ _ G (lt_le_weak _ _ H2) (le_refl 0) (le_refl 0)). rewrite mult_0_l, mult_comm, <- opp_mult_distr_r, mult_comm, <-le_0_neg', le_lt_iff. intuition. subst; rewrite mult_0_l, plus_0_l in H'; subst. apply (lt_not_eq _ _ H); auto. apply (lt_not_eq 0 (z*y+x)); auto. rewrite <- (plus_0_l 0). apply plus_lt_compat; auto. apply mult_lt_0_compat; auto. apply lt_trans with x; auto. Qed. Lemma OMEGA19 : forall x, x<>0 -> 0 <= x + -(1) \/ 0 <= x * -(1) + -(1). Proof. intros. do 2 rewrite <- le_lt_int. rewrite <- opp_eq_mult_neg_1. destruct (lt_eq_lt_dec 0 x) as [[H'|H']|H']. auto. congruence. right. rewrite <-(mult_0_l (-(1))), <-(opp_eq_mult_neg_1 0). apply opp_lt_compat; auto. Qed. Lemma mult_le_approx : forall n m p, n > 0 -> n > p -> 0 <= m * n + p -> 0 <= m. Proof. intros n m p. do 2 rewrite gt_lt_iff. do 2 rewrite le_lt_iff; intros. contradict H1. rewrite lt_0_neg' in H1. rewrite lt_0_neg'. rewrite opp_plus_distr. rewrite mult_comm, opp_mult_distr_r. rewrite le_lt_int. rewrite <- plus_assoc, (plus_comm (-p)), plus_assoc. apply lt_left. rewrite le_lt_int. rewrite le_lt_int in H0. apply le_trans with (n+-(1)); auto. apply plus_le_compat; [ | apply le_refl ]. rewrite le_lt_int in H1. generalize (mult_le_compat _ _ _ _ (lt_le_weak _ _ H) H1 (le_refl 0) (le_refl 0)). rewrite mult_0_l. rewrite mult_plus_distr_l. rewrite <- opp_eq_mult_neg_1. intros. generalize (plus_le_compat _ _ _ _ (le_refl n) H2). now rewrite plus_permute, opp_def, plus_0_r, plus_0_r. Qed. (* Some decidabilities *) Lemma dec_eq : forall i j:int, decidable (i=j). Proof. red; intros; destruct (eq_dec i j); auto. Qed. Lemma dec_ne : forall i j:int, decidable (i<>j). Proof. red; intros; destruct (eq_dec i j); auto. Qed. Lemma dec_le : forall i j:int, decidable (i<=j). Proof. red; intros; destruct (le_dec i j); auto. Qed. Lemma dec_lt : forall i j:int, decidable (i=j). Proof. red; intros; rewrite ge_le_iff; destruct (le_dec j i); auto. Qed. Lemma dec_gt : forall i j:int, decidable (i>j). Proof. red; intros; rewrite gt_lt_iff; destruct (lt_dec j i); auto. Qed. End IntProperties. Module IntOmega (I:Int). Import I. Module IP:=IntProperties(I). Import IP. Local Notation int := I.t. (* \subsubsection{Definition of reified integer expressions} Terms are either: \begin{itemize} \item integers [Tint] \item variables [Tvar] \item operation over integers (addition, product, opposite, subtraction) The last two are translated in additions and products. *) Inductive term : Set := | Tint : int -> term | Tplus : term -> term -> term | Tmult : term -> term -> term | Tminus : term -> term -> term | Topp : term -> term | Tvar : nat -> term. Delimit Scope romega_scope with term. Arguments Tint _%I. Arguments Tplus (_ _)%term. Arguments Tmult (_ _)%term. Arguments Tminus (_ _)%term. Arguments Topp _%term. Infix "+" := Tplus : romega_scope. Infix "*" := Tmult : romega_scope. Infix "-" := Tminus : romega_scope. Notation "- x" := (Topp x) : romega_scope. Notation "[ x ]" := (Tvar x) (at level 0) : romega_scope. (* \subsubsection{Definition of reified goals} *) (* Very restricted definition of handled predicates that should be extended to cover a wider set of operations. Taking care of negations and disequations require solving more than a goal in parallel. This is a major improvement over previous versions. *) Inductive proposition : Set := | EqTerm : term -> term -> proposition (* equality between terms *) | LeqTerm : term -> term -> proposition (* less or equal on terms *) | TrueTerm : proposition (* true *) | FalseTerm : proposition (* false *) | Tnot : proposition -> proposition (* negation *) | GeqTerm : term -> term -> proposition | GtTerm : term -> term -> proposition | LtTerm : term -> term -> proposition | NeqTerm : term -> term -> proposition | Tor : proposition -> proposition -> proposition | Tand : proposition -> proposition -> proposition | Timp : proposition -> proposition -> proposition | Tprop : nat -> proposition. (* Definition of goals as a list of hypothesis *) Notation hyps := (list proposition). (* Definition of lists of subgoals (set of open goals) *) Notation lhyps := (list hyps). (* a single goal packed in a subgoal list *) Notation singleton := (fun a : hyps => a :: nil). (* an absurd goal *) Definition absurd := FalseTerm :: nil. (* \subsubsection{Traces for merging equations} This inductive type describes how the monomial of two equations should be merged when the equations are added. For [F_equal], both equations have the same head variable and coefficient must be added, furthermore if coefficients are opposite, [F_cancel] should be used to collapse the term. [F_left] and [F_right] indicate which monomial should be put first in the result *) Inductive t_fusion : Set := | F_equal : t_fusion | F_cancel : t_fusion | F_left : t_fusion | F_right : t_fusion. (* \subsubsection{Rewriting steps to normalize terms} *) Inductive step : Set := (* apply the rewriting steps to both subterms of an operation *) | C_DO_BOTH : step -> step -> step (* apply the rewriting step to the first branch *) | C_LEFT : step -> step (* apply the rewriting step to the second branch *) | C_RIGHT : step -> step (* apply two steps consecutively to a term *) | C_SEQ : step -> step -> step (* empty step *) | C_NOP : step (* the following operations correspond to actual rewriting *) | C_OPP_PLUS : step | C_OPP_OPP : step | C_OPP_MULT_R : step | C_OPP_ONE : step (* This is a special step that reduces the term (computation) *) | C_REDUCE : step | C_MULT_PLUS_DISTR : step | C_MULT_OPP_LEFT : step | C_MULT_ASSOC_R : step | C_PLUS_ASSOC_R : step | C_PLUS_ASSOC_L : step | C_PLUS_PERMUTE : step | C_PLUS_COMM : step | C_RED0 : step | C_RED1 : step | C_RED2 : step | C_RED3 : step | C_RED4 : step | C_RED5 : step | C_RED6 : step | C_MULT_ASSOC_REDUCED : step | C_MINUS : step | C_MULT_COMM : step. (* \subsubsection{Omega steps} *) (* The following inductive type describes steps as they can be found in the trace coming from the decision procedure Omega. *) Inductive t_omega : Set := (* n = 0 and n!= 0 *) | O_CONSTANT_NOT_NUL : nat -> t_omega | O_CONSTANT_NEG : nat -> t_omega (* division and approximation of an equation *) | O_DIV_APPROX : int -> int -> term -> nat -> t_omega -> nat -> t_omega (* no solution because no exact division *) | O_NOT_EXACT_DIVIDE : int -> int -> term -> nat -> nat -> t_omega (* exact division *) | O_EXACT_DIVIDE : int -> term -> nat -> t_omega -> nat -> t_omega | O_SUM : int -> nat -> int -> nat -> list t_fusion -> t_omega -> t_omega | O_CONTRADICTION : nat -> nat -> nat -> t_omega | O_MERGE_EQ : nat -> nat -> nat -> t_omega -> t_omega | O_SPLIT_INEQ : nat -> nat -> t_omega -> t_omega -> t_omega | O_CONSTANT_NUL : nat -> t_omega | O_NEGATE_CONTRADICT : nat -> nat -> t_omega | O_NEGATE_CONTRADICT_INV : nat -> nat -> nat -> t_omega | O_STATE : int -> step -> nat -> nat -> t_omega -> t_omega. (* \subsubsection{Rules for normalizing the hypothesis} *) (* These rules indicate how to normalize useful propositions of each useful hypothesis before the decomposition of hypothesis. The rules include the inversion phase for negation removal. *) Inductive p_step : Set := | P_LEFT : p_step -> p_step | P_RIGHT : p_step -> p_step | P_INVERT : step -> p_step | P_STEP : step -> p_step | P_NOP : p_step. (* List of normalizations to perform : with a constructor of type [p_step] allowing to visit both left and right branches, we would be able to restrict to only one normalization by hypothesis. And since all hypothesis are useful (otherwise they wouldn't be included), we would be able to replace [h_step] by a simple list. *) Inductive h_step : Set := pair_step : nat -> p_step -> h_step. (* \subsubsection{Rules for decomposing the hypothesis} *) (* This type allows to navigate in the logical constructors that form the predicats of the hypothesis in order to decompose them. This allows in particular to extract one hypothesis from a conjunction with possibly the right level of negations. *) Inductive direction : Set := | D_left : direction | D_right : direction | D_mono : direction. (* This type allows to extract useful components from hypothesis, either hypothesis generated by splitting a disjonction, or equations. The last constructor indicates how to solve the obtained system via the use of the trace type of Omega [t_omega] *) Inductive e_step : Set := | E_SPLIT : nat -> list direction -> e_step -> e_step -> e_step | E_EXTRACT : nat -> list direction -> e_step -> e_step | E_SOLVE : t_omega -> e_step. (* \subsection{Efficient decidable equality} *) (* For each reified data-type, we define an efficient equality test. It is not the one produced by [Decide Equality]. Then we prove two theorem allowing to eliminate such equalities : \begin{verbatim} (t1,t2: typ) (eq_typ t1 t2) = true -> t1 = t2. (t1,t2: typ) (eq_typ t1 t2) = false -> ~ t1 = t2. \end{verbatim} *) (* \subsubsection{Reified terms} *) Open Scope romega_scope. Fixpoint eq_term (t1 t2 : term) {struct t2} : bool := match t1, t2 with | Tint st1, Tint st2 => beq st1 st2 | (st11 + st12), (st21 + st22) => eq_term st11 st21 && eq_term st12 st22 | (st11 * st12), (st21 * st22) => eq_term st11 st21 && eq_term st12 st22 | (st11 - st12), (st21 - st22) => eq_term st11 st21 && eq_term st12 st22 | (- st1), (- st2) => eq_term st1 st2 | [st1], [st2] => beq_nat st1 st2 | _, _ => false end. Close Scope romega_scope. Theorem eq_term_true : forall t1 t2 : term, eq_term t1 t2 = true -> t1 = t2. Proof. induction t1; destruct t2; simpl in *; try discriminate; (rewrite andb_true_iff; intros (H1,H2)) || intros H; f_equal; auto using beq_true, beq_nat_true. Qed. Theorem eq_term_refl : forall t0 : term, eq_term t0 t0 = true. Proof. induction t0; simpl in *; try (apply andb_true_iff; split); trivial. - now apply beq_iff. - now apply beq_nat_true_iff. Qed. Ltac trivial_case := unfold not; intros; discriminate. Theorem eq_term_false : forall t1 t2 : term, eq_term t1 t2 = false -> t1 <> t2. Proof. intros t1 t2 H E. subst t2. now rewrite eq_term_refl in H. Qed. (* \subsubsection{Tactiques pour éliminer ces tests} Si on se contente de faire un [Case (eq_typ t1 t2)] on perd totalement dans chaque branche le fait que [t1=t2] ou [~t1=t2]. Initialement, les développements avaient été réalisés avec les tests rendus par [Decide Equality], c'est à dire un test rendant des termes du type [{t1=t2}+{~t1=t2}]. Faire une élimination sur un tel test préserve bien l'information voulue mais calculatoirement de telles fonctions sont trop lentes. *) (* Les tactiques définies si après se comportent exactement comme si on avait utilisé le test précédent et fait une elimination dessus. *) Ltac elim_eq_term t1 t2 := pattern (eq_term t1 t2); apply bool_eq_ind; intro Aux; [ generalize (eq_term_true t1 t2 Aux); clear Aux | generalize (eq_term_false t1 t2 Aux); clear Aux ]. Ltac elim_beq t1 t2 := pattern (beq t1 t2); apply bool_eq_ind; intro Aux; [ generalize (beq_true t1 t2 Aux); clear Aux | generalize (beq_false t1 t2 Aux); clear Aux ]. Ltac elim_bgt t1 t2 := pattern (bgt t1 t2); apply bool_eq_ind; intro Aux; [ generalize (bgt_true t1 t2 Aux); clear Aux | generalize (bgt_false t1 t2 Aux); clear Aux ]. (* \subsection{Interprétations} \subsubsection{Interprétation des termes dans Z} *) Fixpoint interp_term (env : list int) (t : term) {struct t} : int := match t with | Tint x => x | (t1 + t2)%term => interp_term env t1 + interp_term env t2 | (t1 * t2)%term => interp_term env t1 * interp_term env t2 | (t1 - t2)%term => interp_term env t1 - interp_term env t2 | (- t)%term => - interp_term env t | [n]%term => nth n env 0 end. (* \subsubsection{Interprétation des prédicats} *) Fixpoint interp_proposition (envp : list Prop) (env : list int) (p : proposition) {struct p} : Prop := match p with | EqTerm t1 t2 => interp_term env t1 = interp_term env t2 | LeqTerm t1 t2 => interp_term env t1 <= interp_term env t2 | TrueTerm => True | FalseTerm => False | Tnot p' => ~ interp_proposition envp env p' | GeqTerm t1 t2 => interp_term env t1 >= interp_term env t2 | GtTerm t1 t2 => interp_term env t1 > interp_term env t2 | LtTerm t1 t2 => interp_term env t1 < interp_term env t2 | NeqTerm t1 t2 => (interp_term env t1)<>(interp_term env t2) | Tor p1 p2 => interp_proposition envp env p1 \/ interp_proposition envp env p2 | Tand p1 p2 => interp_proposition envp env p1 /\ interp_proposition envp env p2 | Timp p1 p2 => interp_proposition envp env p1 -> interp_proposition envp env p2 | Tprop n => nth n envp True end. (* \subsubsection{Inteprétation des listes d'hypothèses} \paragraph{Sous forme de conjonction} Interprétation sous forme d'une conjonction d'hypothèses plus faciles à manipuler individuellement *) Fixpoint interp_hyps (envp : list Prop) (env : list int) (l : hyps) {struct l} : Prop := match l with | nil => True | p' :: l' => interp_proposition envp env p' /\ interp_hyps envp env l' end. (* \paragraph{sous forme de but} C'est cette interpétation que l'on utilise sur le but (car on utilise [Generalize] et qu'une conjonction est forcément lourde (répétition des types dans les conjonctions intermédiaires) *) Fixpoint interp_goal_concl (c : proposition) (envp : list Prop) (env : list int) (l : hyps) {struct l} : Prop := match l with | nil => interp_proposition envp env c | p' :: l' => interp_proposition envp env p' -> interp_goal_concl c envp env l' end. Notation interp_goal := (interp_goal_concl FalseTerm). (* Les théorèmes qui suivent assurent la correspondance entre les deux interprétations. *) Theorem goal_to_hyps : forall (envp : list Prop) (env : list int) (l : hyps), (interp_hyps envp env l -> False) -> interp_goal envp env l. Proof. simple induction l; [ simpl; auto | simpl; intros a l1 H1 H2 H3; apply H1; intro H4; apply H2; auto ]. Qed. Theorem hyps_to_goal : forall (envp : list Prop) (env : list int) (l : hyps), interp_goal envp env l -> interp_hyps envp env l -> False. Proof. simple induction l; simpl; [ auto | intros; apply H; elim H1; auto ]. Qed. (* \subsection{Manipulations sur les hypothèses} *) (* \subsubsection{Définitions de base de stabilité pour la réflexion} *) (* Une opération laisse un terme stable si l'égalité est préservée *) Definition term_stable (f : term -> term) := forall (e : list int) (t : term), interp_term e t = interp_term e (f t). (* Une opération est valide sur une hypothèse, si l'hypothèse implique le résultat de l'opération. \emph{Attention : cela ne concerne que des opérations sur les hypothèses et non sur les buts (contravariance)}. On définit la validité pour une opération prenant une ou deux propositions en argument (cela suffit pour omega). *) Definition valid1 (f : proposition -> proposition) := forall (ep : list Prop) (e : list int) (p1 : proposition), interp_proposition ep e p1 -> interp_proposition ep e (f p1). Definition valid2 (f : proposition -> proposition -> proposition) := forall (ep : list Prop) (e : list int) (p1 p2 : proposition), interp_proposition ep e p1 -> interp_proposition ep e p2 -> interp_proposition ep e (f p1 p2). (* Dans cette notion de validité, la fonction prend directement une liste de propositions et rend une nouvelle liste de proposition. On reste contravariant *) Definition valid_hyps (f : hyps -> hyps) := forall (ep : list Prop) (e : list int) (lp : hyps), interp_hyps ep e lp -> interp_hyps ep e (f lp). (* Enfin ce théorème élimine la contravariance et nous ramène à une opération sur les buts *) Theorem valid_goal : forall (ep : list Prop) (env : list int) (l : hyps) (a : hyps -> hyps), valid_hyps a -> interp_goal ep env (a l) -> interp_goal ep env l. Proof. intros; simpl; apply goal_to_hyps; intro H1; apply (hyps_to_goal ep env (a l) H0); apply H; assumption. Qed. (* \subsubsection{Généralisation a des listes de buts (disjonctions)} *) Fixpoint interp_list_hyps (envp : list Prop) (env : list int) (l : lhyps) {struct l} : Prop := match l with | nil => False | h :: l' => interp_hyps envp env h \/ interp_list_hyps envp env l' end. Fixpoint interp_list_goal (envp : list Prop) (env : list int) (l : lhyps) {struct l} : Prop := match l with | nil => True | h :: l' => interp_goal envp env h /\ interp_list_goal envp env l' end. Theorem list_goal_to_hyps : forall (envp : list Prop) (env : list int) (l : lhyps), (interp_list_hyps envp env l -> False) -> interp_list_goal envp env l. Proof. simple induction l; simpl; [ auto | intros h1 l1 H H1; split; [ apply goal_to_hyps; intro H2; apply H1; auto | apply H; intro H2; apply H1; auto ] ]. Qed. Theorem list_hyps_to_goal : forall (envp : list Prop) (env : list int) (l : lhyps), interp_list_goal envp env l -> interp_list_hyps envp env l -> False. Proof. simple induction l; simpl; [ auto | intros h1 l1 H (H1, H2) H3; elim H3; intro H4; [ apply hyps_to_goal with (1 := H1); assumption | auto ] ]. Qed. Definition valid_list_hyps (f : hyps -> lhyps) := forall (ep : list Prop) (e : list int) (lp : hyps), interp_hyps ep e lp -> interp_list_hyps ep e (f lp). Definition valid_list_goal (f : hyps -> lhyps) := forall (ep : list Prop) (e : list int) (lp : hyps), interp_list_goal ep e (f lp) -> interp_goal ep e lp. Theorem goal_valid : forall f : hyps -> lhyps, valid_list_hyps f -> valid_list_goal f. Proof. unfold valid_list_goal; intros f H ep e lp H1; apply goal_to_hyps; intro H2; apply list_hyps_to_goal with (1 := H1); apply (H ep e lp); assumption. Qed. Theorem append_valid : forall (ep : list Prop) (e : list int) (l1 l2 : lhyps), interp_list_hyps ep e l1 \/ interp_list_hyps ep e l2 -> interp_list_hyps ep e (l1 ++ l2). Proof. intros ep e; simple induction l1; [ simpl; intros l2 [H| H]; [ contradiction | trivial ] | simpl; intros h1 t1 HR l2 [[H| H]| H]; [ auto | right; apply (HR l2); left; trivial | right; apply (HR l2); right; trivial ] ]. Qed. (* \subsubsection{Opérateurs valides sur les hypothèses} *) (* Extraire une hypothèse de la liste *) Definition nth_hyps (n : nat) (l : hyps) := nth n l TrueTerm. Theorem nth_valid : forall (ep : list Prop) (e : list int) (i : nat) (l : hyps), interp_hyps ep e l -> interp_proposition ep e (nth_hyps i l). Proof. unfold nth_hyps; simple induction i; [ simple induction l; simpl; [ auto | intros; elim H0; auto ] | intros n H; simple induction l; [ simpl; trivial | intros; simpl; apply H; elim H1; auto ] ]. Qed. (* Appliquer une opération (valide) sur deux hypothèses extraites de la liste et ajouter le résultat à la liste. *) Definition apply_oper_2 (i j : nat) (f : proposition -> proposition -> proposition) (l : hyps) := f (nth_hyps i l) (nth_hyps j l) :: l. Theorem apply_oper_2_valid : forall (i j : nat) (f : proposition -> proposition -> proposition), valid2 f -> valid_hyps (apply_oper_2 i j f). Proof. intros i j f Hf; unfold apply_oper_2, valid_hyps; simpl; intros lp Hlp; split; [ apply Hf; apply nth_valid; assumption | assumption ]. Qed. (* Modifier une hypothèse par application d'une opération valide *) Fixpoint apply_oper_1 (i : nat) (f : proposition -> proposition) (l : hyps) {struct i} : hyps := match l with | nil => nil (A:=proposition) | p :: l' => match i with | O => f p :: l' | S j => p :: apply_oper_1 j f l' end end. Theorem apply_oper_1_valid : forall (i : nat) (f : proposition -> proposition), valid1 f -> valid_hyps (apply_oper_1 i f). Proof. unfold valid_hyps; intros i f Hf ep e; elim i; [ intro lp; case lp; [ simpl; trivial | simpl; intros p l' (H1, H2); split; [ apply Hf with (1 := H1) | assumption ] ] | intros n Hrec lp; case lp; [ simpl; auto | simpl; intros p l' (H1, H2); split; [ assumption | apply Hrec; assumption ] ] ]. Qed. (* \subsubsection{Manipulations de termes} *) (* Les fonctions suivantes permettent d'appliquer une fonction de réécriture sur un sous terme du terme principal. Avec la composition, cela permet de construire des réécritures complexes proches des tactiques de conversion *) Definition apply_left (f : term -> term) (t : term) := match t with | (x + y)%term => (f x + y)%term | (x * y)%term => (f x * y)%term | (- x)%term => (- f x)%term | x => x end. Definition apply_right (f : term -> term) (t : term) := match t with | (x + y)%term => (x + f y)%term | (x * y)%term => (x * f y)%term | x => x end. Definition apply_both (f g : term -> term) (t : term) := match t with | (x + y)%term => (f x + g y)%term | (x * y)%term => (f x * g y)%term | x => x end. (* Les théorèmes suivants montrent la stabilité (conditionnée) des fonctions. *) Theorem apply_left_stable : forall f : term -> term, term_stable f -> term_stable (apply_left f). Proof. unfold term_stable; intros f H e t; case t; auto; simpl; intros; elim H; trivial. Qed. Theorem apply_right_stable : forall f : term -> term, term_stable f -> term_stable (apply_right f). Proof. unfold term_stable; intros f H e t; case t; auto; simpl; intros t0 t1; elim H; trivial. Qed. Theorem apply_both_stable : forall f g : term -> term, term_stable f -> term_stable g -> term_stable (apply_both f g). Proof. unfold term_stable; intros f g H1 H2 e t; case t; auto; simpl; intros t0 t1; elim H1; elim H2; trivial. Qed. Theorem compose_term_stable : forall f g : term -> term, term_stable f -> term_stable g -> term_stable (fun t : term => f (g t)). Proof. unfold term_stable; intros f g Hf Hg e t; elim Hf; apply Hg. Qed. (* \subsection{Les règles de réécriture} *) (* Chacune des règles de réécriture est accompagnée par sa preuve de stabilité. Toutes ces preuves ont la même forme : il faut analyser suivant la forme du terme (élimination de chaque Case). On a besoin d'une élimination uniquement dans les cas d'utilisation d'égalité décidable. Cette tactique itère la décomposition des Case. Elle est constituée de deux fonctions s'appelant mutuellement : \begin{itemize} \item une fonction d'enrobage qui lance la recherche sur le but, \item une fonction récursive qui décompose ce but. Quand elle a trouvé un Case, elle l'élimine. \end{itemize} Les motifs sur les cas sont très imparfaits et dans certains cas, il semble que cela ne marche pas. On aimerait plutot un motif de la forme [ Case (?1 :: T) of _ end ] permettant de s'assurer que l'on utilise le bon type. Chaque élimination introduit correctement exactement le nombre d'hypothèses nécessaires et conserve dans le cas d'une égalité la connaissance du résultat du test en faisant la réécriture. Pour un test de comparaison, on conserve simplement le résultat. Cette fonction déborde très largement la résolution des réécritures simples et fait une bonne partie des preuves des pas de Omega. *) (* \subsubsection{La tactique pour prouver la stabilité} *) Ltac loop t := match t with (* Global *) | (?X1 = ?X2) => loop X1 || loop X2 | (_ -> ?X1) => loop X1 (* Interpretations *) | (interp_hyps _ _ ?X1) => loop X1 | (interp_list_hyps _ _ ?X1) => loop X1 | (interp_proposition _ _ ?X1) => loop X1 | (interp_term _ ?X1) => loop X1 (* Propositions *) | (EqTerm ?X1 ?X2) => loop X1 || loop X2 | (LeqTerm ?X1 ?X2) => loop X1 || loop X2 (* Termes *) | (?X1 + ?X2)%term => loop X1 || loop X2 | (?X1 - ?X2)%term => loop X1 || loop X2 | (?X1 * ?X2)%term => loop X1 || loop X2 | (- ?X1)%term => loop X1 | (Tint ?X1) => loop X1 (* Eliminations *) | match ?X1 with | EqTerm x x0 => _ | LeqTerm x x0 => _ | TrueTerm => _ | FalseTerm => _ | Tnot x => _ | GeqTerm x x0 => _ | GtTerm x x0 => _ | LtTerm x x0 => _ | NeqTerm x x0 => _ | Tor x x0 => _ | Tand x x0 => _ | Timp x x0 => _ | Tprop x => _ end => destruct X1; auto; Simplify | match ?X1 with | Tint x => _ | (x + x0)%term => _ | (x * x0)%term => _ | (x - x0)%term => _ | (- x)%term => _ | [x]%term => _ end => destruct X1; auto; Simplify | (if beq ?X1 ?X2 then _ else _) => let H := fresh "H" in elim_beq X1 X2; intro H; try (rewrite H in *; clear H); simpl; auto; Simplify | (if bgt ?X1 ?X2 then _ else _) => let H := fresh "H" in elim_bgt X1 X2; intro H; simpl; auto; Simplify | (if eq_term ?X1 ?X2 then _ else _) => let H := fresh "H" in elim_eq_term X1 X2; intro H; try (rewrite H in *; clear H); simpl; auto; Simplify | (if _ && _ then _ else _) => rewrite andb_if; Simplify | (if negb _ then _ else _) => rewrite negb_if; Simplify | _ => fail end with Simplify := match goal with | |- ?X1 => try loop X1 | _ => idtac end. Ltac prove_stable x th := match constr:x with | ?X1 => unfold term_stable, X1; intros; Simplify; simpl; apply th end. (* \subsubsection{Les règles elle mêmes} *) Definition Tplus_assoc_l (t : term) := match t with | (n + (m + p))%term => (n + m + p)%term | _ => t end. Theorem Tplus_assoc_l_stable : term_stable Tplus_assoc_l. Proof. prove_stable Tplus_assoc_l (ring.(Radd_assoc)). Qed. Definition Tplus_assoc_r (t : term) := match t with | (n + m + p)%term => (n + (m + p))%term | _ => t end. Theorem Tplus_assoc_r_stable : term_stable Tplus_assoc_r. Proof. prove_stable Tplus_assoc_r plus_assoc_reverse. Qed. Definition Tmult_assoc_r (t : term) := match t with | (n * m * p)%term => (n * (m * p))%term | _ => t end. Theorem Tmult_assoc_r_stable : term_stable Tmult_assoc_r. Proof. prove_stable Tmult_assoc_r mult_assoc_reverse. Qed. Definition Tplus_permute (t : term) := match t with | (n + (m + p))%term => (m + (n + p))%term | _ => t end. Theorem Tplus_permute_stable : term_stable Tplus_permute. Proof. prove_stable Tplus_permute plus_permute. Qed. Definition Tplus_comm (t : term) := match t with | (x + y)%term => (y + x)%term | _ => t end. Theorem Tplus_comm_stable : term_stable Tplus_comm. Proof. prove_stable Tplus_comm plus_comm. Qed. Definition Tmult_comm (t : term) := match t with | (x * y)%term => (y * x)%term | _ => t end. Theorem Tmult_comm_stable : term_stable Tmult_comm. Proof. prove_stable Tmult_comm mult_comm. Qed. Definition T_OMEGA10 (t : term) := match t with | ((v * Tint c1 + l1) * Tint k1 + (v' * Tint c2 + l2) * Tint k2)%term => if eq_term v v' then (v * Tint (c1 * k1 + c2 * k2)%I + (l1 * Tint k1 + l2 * Tint k2))%term else t | _ => t end. Theorem T_OMEGA10_stable : term_stable T_OMEGA10. Proof. prove_stable T_OMEGA10 OMEGA10. Qed. Definition T_OMEGA11 (t : term) := match t with | ((v1 * Tint c1 + l1) * Tint k1 + l2)%term => (v1 * Tint (c1 * k1) + (l1 * Tint k1 + l2))%term | _ => t end. Theorem T_OMEGA11_stable : term_stable T_OMEGA11. Proof. prove_stable T_OMEGA11 OMEGA11. Qed. Definition T_OMEGA12 (t : term) := match t with | (l1 + (v2 * Tint c2 + l2) * Tint k2)%term => (v2 * Tint (c2 * k2) + (l1 + l2 * Tint k2))%term | _ => t end. Theorem T_OMEGA12_stable : term_stable T_OMEGA12. Proof. prove_stable T_OMEGA12 OMEGA12. Qed. Definition T_OMEGA13 (t : term) := match t with | (v * Tint x + l1 + (v' * Tint x' + l2))%term => if eq_term v v' && beq x (-x') then (l1+l2)%term else t | _ => t end. Theorem T_OMEGA13_stable : term_stable T_OMEGA13. Proof. unfold term_stable, T_OMEGA13; intros; Simplify; simpl; apply OMEGA13. Qed. Definition T_OMEGA15 (t : term) := match t with | (v * Tint c1 + l1 + (v' * Tint c2 + l2) * Tint k2)%term => if eq_term v v' then (v * Tint (c1 + c2 * k2)%I + (l1 + l2 * Tint k2))%term else t | _ => t end. Theorem T_OMEGA15_stable : term_stable T_OMEGA15. Proof. prove_stable T_OMEGA15 OMEGA15. Qed. Definition T_OMEGA16 (t : term) := match t with | ((v * Tint c + l) * Tint k)%term => (v * Tint (c * k) + l * Tint k)%term | _ => t end. Theorem T_OMEGA16_stable : term_stable T_OMEGA16. Proof. prove_stable T_OMEGA16 OMEGA16. Qed. Definition Tred_factor5 (t : term) := match t with | (x * Tint c + y)%term => if beq c 0 then y else t | _ => t end. Theorem Tred_factor5_stable : term_stable Tred_factor5. Proof. prove_stable Tred_factor5 red_factor5. Qed. Definition Topp_plus (t : term) := match t with | (- (x + y))%term => (- x + - y)%term | _ => t end. Theorem Topp_plus_stable : term_stable Topp_plus. Proof. prove_stable Topp_plus opp_plus_distr. Qed. Definition Topp_opp (t : term) := match t with | (- - x)%term => x | _ => t end. Theorem Topp_opp_stable : term_stable Topp_opp. Proof. prove_stable Topp_opp opp_involutive. Qed. Definition Topp_mult_r (t : term) := match t with | (- (x * Tint k))%term => (x * Tint (- k))%term | _ => t end. Theorem Topp_mult_r_stable : term_stable Topp_mult_r. Proof. prove_stable Topp_mult_r opp_mult_distr_r. Qed. Definition Topp_one (t : term) := match t with | (- x)%term => (x * Tint (-(1)))%term | _ => t end. Theorem Topp_one_stable : term_stable Topp_one. Proof. prove_stable Topp_one opp_eq_mult_neg_1. Qed. Definition Tmult_plus_distr (t : term) := match t with | ((n + m) * p)%term => (n * p + m * p)%term | _ => t end. Theorem Tmult_plus_distr_stable : term_stable Tmult_plus_distr. Proof. prove_stable Tmult_plus_distr mult_plus_distr_r. Qed. Definition Tmult_opp_left (t : term) := match t with | (- x * Tint y)%term => (x * Tint (- y))%term | _ => t end. Theorem Tmult_opp_left_stable : term_stable Tmult_opp_left. Proof. prove_stable Tmult_opp_left mult_opp_comm. Qed. Definition Tmult_assoc_reduced (t : term) := match t with | (n * Tint m * Tint p)%term => (n * Tint (m * p))%term | _ => t end. Theorem Tmult_assoc_reduced_stable : term_stable Tmult_assoc_reduced. Proof. prove_stable Tmult_assoc_reduced mult_assoc_reverse. Qed. Definition Tred_factor0 (t : term) := (t * Tint 1)%term. Theorem Tred_factor0_stable : term_stable Tred_factor0. Proof. prove_stable Tred_factor0 red_factor0. Qed. Definition Tred_factor1 (t : term) := match t with | (x + y)%term => if eq_term x y then (x * Tint 2)%term else t | _ => t end. Theorem Tred_factor1_stable : term_stable Tred_factor1. Proof. prove_stable Tred_factor1 red_factor1. Qed. Definition Tred_factor2 (t : term) := match t with | (x + y * Tint k)%term => if eq_term x y then (x * Tint (1 + k))%term else t | _ => t end. Theorem Tred_factor2_stable : term_stable Tred_factor2. Proof. prove_stable Tred_factor2 red_factor2. Qed. Definition Tred_factor3 (t : term) := match t with | (x * Tint k + y)%term => if eq_term x y then (x * Tint (1 + k))%term else t | _ => t end. Theorem Tred_factor3_stable : term_stable Tred_factor3. Proof. prove_stable Tred_factor3 red_factor3. Qed. Definition Tred_factor4 (t : term) := match t with | (x * Tint k1 + y * Tint k2)%term => if eq_term x y then (x * Tint (k1 + k2))%term else t | _ => t end. Theorem Tred_factor4_stable : term_stable Tred_factor4. Proof. prove_stable Tred_factor4 red_factor4. Qed. Definition Tred_factor6 (t : term) := (t + Tint 0)%term. Theorem Tred_factor6_stable : term_stable Tred_factor6. Proof. prove_stable Tred_factor6 red_factor6. Qed. Definition Tminus_def (t : term) := match t with | (x - y)%term => (x + - y)%term | _ => t end. Theorem Tminus_def_stable : term_stable Tminus_def. Proof. prove_stable Tminus_def minus_def. Qed. (* \subsection{Fonctions de réécriture complexes} *) (* \subsubsection{Fonction de réduction} *) (* Cette fonction réduit un terme dont la forme normale est un entier. Il suffit pour cela d'échanger le constructeur [Tint] avec les opérateurs réifiés. La réduction est ``gratuite''. *) Fixpoint reduce (t : term) : term := match t with | (x + y)%term => match reduce x with | Tint x' => match reduce y with | Tint y' => Tint (x' + y') | y' => (Tint x' + y')%term end | x' => (x' + reduce y)%term end | (x * y)%term => match reduce x with | Tint x' => match reduce y with | Tint y' => Tint (x' * y') | y' => (Tint x' * y')%term end | x' => (x' * reduce y)%term end | (x - y)%term => match reduce x with | Tint x' => match reduce y with | Tint y' => Tint (x' - y') | y' => (Tint x' - y')%term end | x' => (x' - reduce y)%term end | (- x)%term => match reduce x with | Tint x' => Tint (- x') | x' => (- x')%term end | _ => t end. Theorem reduce_stable : term_stable reduce. Proof. unfold term_stable; intros e t; elim t; auto; try (intros t0 H0 t1 H1; simpl; rewrite H0; rewrite H1; (case (reduce t0); [ intro z0; case (reduce t1); intros; auto | intros; auto | intros; auto | intros; auto | intros; auto | intros; auto ])); intros t0 H0; simpl; rewrite H0; case (reduce t0); intros; auto. Qed. (* \subsubsection{Fusions} \paragraph{Fusion de deux équations} *) (* On donne une somme de deux équations qui sont supposées normalisées. Cette fonction prend une trace de fusion en argument et transforme le terme en une équation normalisée. C'est une version très simplifiée du moteur de réécriture [rewrite]. *) Fixpoint fusion (trace : list t_fusion) (t : term) {struct trace} : term := match trace with | nil => reduce t | step :: trace' => match step with | F_equal => apply_right (fusion trace') (T_OMEGA10 t) | F_cancel => fusion trace' (Tred_factor5 (T_OMEGA10 t)) | F_left => apply_right (fusion trace') (T_OMEGA11 t) | F_right => apply_right (fusion trace') (T_OMEGA12 t) end end. Theorem fusion_stable : forall trace : list t_fusion, term_stable (fusion trace). Proof. simple induction trace; simpl; [ exact reduce_stable | intros stp l H; case stp; [ apply compose_term_stable; [ apply apply_right_stable; assumption | exact T_OMEGA10_stable ] | unfold term_stable; intros e t1; rewrite T_OMEGA10_stable; rewrite Tred_factor5_stable; apply H | apply compose_term_stable; [ apply apply_right_stable; assumption | exact T_OMEGA11_stable ] | apply compose_term_stable; [ apply apply_right_stable; assumption | exact T_OMEGA12_stable ] ] ]. Qed. (* \paragraph{Fusion de deux équations dont une sans coefficient} *) Definition fusion_right (trace : list t_fusion) (t : term) : term := match trace with | nil => reduce t (* Il faut mettre un compute *) | step :: trace' => match step with | F_equal => apply_right (fusion trace') (T_OMEGA15 t) | F_cancel => fusion trace' (Tred_factor5 (T_OMEGA15 t)) | F_left => apply_right (fusion trace') (Tplus_assoc_r t) | F_right => apply_right (fusion trace') (T_OMEGA12 t) end end. (* \paragraph{Fusion avec annihilation} *) (* Normalement le résultat est une constante *) Fixpoint fusion_cancel (trace : nat) (t : term) {struct trace} : term := match trace with | O => reduce t | S trace' => fusion_cancel trace' (T_OMEGA13 t) end. Theorem fusion_cancel_stable : forall t : nat, term_stable (fusion_cancel t). Proof. unfold term_stable, fusion_cancel; intros trace e; elim trace; [ exact (reduce_stable e) | intros n H t; elim H; exact (T_OMEGA13_stable e t) ]. Qed. (* \subsubsection{Opérations affines sur une équation} *) (* \paragraph{Multiplication scalaire et somme d'une constante} *) Fixpoint scalar_norm_add (trace : nat) (t : term) {struct trace} : term := match trace with | O => reduce t | S trace' => apply_right (scalar_norm_add trace') (T_OMEGA11 t) end. Theorem scalar_norm_add_stable : forall t : nat, term_stable (scalar_norm_add t). Proof. unfold term_stable, scalar_norm_add; intros trace; elim trace; [ exact reduce_stable | intros n H e t; elim apply_right_stable; [ exact (T_OMEGA11_stable e t) | exact H ] ]. Qed. (* \paragraph{Multiplication scalaire} *) Fixpoint scalar_norm (trace : nat) (t : term) {struct trace} : term := match trace with | O => reduce t | S trace' => apply_right (scalar_norm trace') (T_OMEGA16 t) end. Theorem scalar_norm_stable : forall t : nat, term_stable (scalar_norm t). Proof. unfold term_stable, scalar_norm; intros trace; elim trace; [ exact reduce_stable | intros n H e t; elim apply_right_stable; [ exact (T_OMEGA16_stable e t) | exact H ] ]. Qed. (* \paragraph{Somme d'une constante} *) Fixpoint add_norm (trace : nat) (t : term) {struct trace} : term := match trace with | O => reduce t | S trace' => apply_right (add_norm trace') (Tplus_assoc_r t) end. Theorem add_norm_stable : forall t : nat, term_stable (add_norm t). Proof. unfold term_stable, add_norm; intros trace; elim trace; [ exact reduce_stable | intros n H e t; elim apply_right_stable; [ exact (Tplus_assoc_r_stable e t) | exact H ] ]. Qed. (* \subsection{La fonction de normalisation des termes (moteur de réécriture)} *) Fixpoint t_rewrite (s : step) : term -> term := match s with | C_DO_BOTH s1 s2 => apply_both (t_rewrite s1) (t_rewrite s2) | C_LEFT s => apply_left (t_rewrite s) | C_RIGHT s => apply_right (t_rewrite s) | C_SEQ s1 s2 => fun t : term => t_rewrite s2 (t_rewrite s1 t) | C_NOP => fun t : term => t | C_OPP_PLUS => Topp_plus | C_OPP_OPP => Topp_opp | C_OPP_MULT_R => Topp_mult_r | C_OPP_ONE => Topp_one | C_REDUCE => reduce | C_MULT_PLUS_DISTR => Tmult_plus_distr | C_MULT_OPP_LEFT => Tmult_opp_left | C_MULT_ASSOC_R => Tmult_assoc_r | C_PLUS_ASSOC_R => Tplus_assoc_r | C_PLUS_ASSOC_L => Tplus_assoc_l | C_PLUS_PERMUTE => Tplus_permute | C_PLUS_COMM => Tplus_comm | C_RED0 => Tred_factor0 | C_RED1 => Tred_factor1 | C_RED2 => Tred_factor2 | C_RED3 => Tred_factor3 | C_RED4 => Tred_factor4 | C_RED5 => Tred_factor5 | C_RED6 => Tred_factor6 | C_MULT_ASSOC_REDUCED => Tmult_assoc_reduced | C_MINUS => Tminus_def | C_MULT_COMM => Tmult_comm end. Theorem t_rewrite_stable : forall s : step, term_stable (t_rewrite s). Proof. simple induction s; simpl; [ intros; apply apply_both_stable; auto | intros; apply apply_left_stable; auto | intros; apply apply_right_stable; auto | unfold term_stable; intros; elim H0; apply H | unfold term_stable; auto | exact Topp_plus_stable | exact Topp_opp_stable | exact Topp_mult_r_stable | exact Topp_one_stable | exact reduce_stable | exact Tmult_plus_distr_stable | exact Tmult_opp_left_stable | exact Tmult_assoc_r_stable | exact Tplus_assoc_r_stable | exact Tplus_assoc_l_stable | exact Tplus_permute_stable | exact Tplus_comm_stable | exact Tred_factor0_stable | exact Tred_factor1_stable | exact Tred_factor2_stable | exact Tred_factor3_stable | exact Tred_factor4_stable | exact Tred_factor5_stable | exact Tred_factor6_stable | exact Tmult_assoc_reduced_stable | exact Tminus_def_stable | exact Tmult_comm_stable ]. Qed. (* \subsection{tactiques de résolution d'un but omega normalisé} Trace de la procédure \subsubsection{Tactiques générant une contradiction} \paragraph{[O_CONSTANT_NOT_NUL]} *) Definition constant_not_nul (i : nat) (h : hyps) := match nth_hyps i h with | EqTerm (Tint Nul) (Tint n) => if beq n Nul then h else absurd | _ => h end. Theorem constant_not_nul_valid : forall i : nat, valid_hyps (constant_not_nul i). Proof. unfold valid_hyps, constant_not_nul; intros i ep e lp H. generalize (nth_valid ep e i lp H); Simplify. Qed. (* \paragraph{[O_CONSTANT_NEG]} *) Definition constant_neg (i : nat) (h : hyps) := match nth_hyps i h with | LeqTerm (Tint Nul) (Tint Neg) => if bgt Nul Neg then absurd else h | _ => h end. Theorem constant_neg_valid : forall i : nat, valid_hyps (constant_neg i). Proof. unfold valid_hyps, constant_neg; intros; generalize (nth_valid ep e i lp); Simplify; simpl. rewrite gt_lt_iff in H0; rewrite le_lt_iff; intuition. Qed. (* \paragraph{[NOT_EXACT_DIVIDE]} *) Definition not_exact_divide (k1 k2 : int) (body : term) (t i : nat) (l : hyps) := match nth_hyps i l with | EqTerm (Tint Nul) b => if beq Nul 0 && eq_term (scalar_norm_add t (body * Tint k1 + Tint k2)%term) b && bgt k2 0 && bgt k1 k2 then absurd else l | _ => l end. Theorem not_exact_divide_valid : forall (k1 k2 : int) (body : term) (t0 i : nat), valid_hyps (not_exact_divide k1 k2 body t0 i). Proof. unfold valid_hyps, not_exact_divide; intros; generalize (nth_valid ep e i lp); Simplify. rewrite (scalar_norm_add_stable t0 e), <-H1. do 2 rewrite <- scalar_norm_add_stable; simpl in *; intros. absurd (interp_term e body * k1 + k2 = 0); [ now apply OMEGA4 | symmetry; auto ]. Qed. (* \paragraph{[O_CONTRADICTION]} *) Definition contradiction (t i j : nat) (l : hyps) := match nth_hyps i l with | LeqTerm (Tint Nul) b1 => match nth_hyps j l with | LeqTerm (Tint Nul') b2 => match fusion_cancel t (b1 + b2)%term with | Tint k => if beq Nul 0 && beq Nul' 0 && bgt 0 k then absurd else l | _ => l end | _ => l end | _ => l end. Theorem contradiction_valid : forall t i j : nat, valid_hyps (contradiction t i j). Proof. unfold valid_hyps, contradiction; intros t i j ep e l H; generalize (nth_valid _ _ i _ H); generalize (nth_valid _ _ j _ H); case (nth_hyps i l); auto; intros t1 t2; case t1; auto; case (nth_hyps j l); auto; intros t3 t4; case t3; auto; simpl; intros z z' H1 H2; generalize (eq_refl (interp_term e (fusion_cancel t (t2 + t4)%term))); pattern (fusion_cancel t (t2 + t4)%term) at 2 3; case (fusion_cancel t (t2 + t4)%term); simpl; auto; intro k; elim (fusion_cancel_stable t); simpl. Simplify; intro H3. generalize (OMEGA2 _ _ H2 H1); rewrite H3. rewrite gt_lt_iff in H0; rewrite le_lt_iff; intuition. Qed. (* \paragraph{[O_NEGATE_CONTRADICT]} *) Definition negate_contradict (i1 i2 : nat) (h : hyps) := match nth_hyps i1 h with | EqTerm (Tint Nul) b1 => match nth_hyps i2 h with | NeqTerm (Tint Nul') b2 => if beq Nul 0 && beq Nul' 0 && eq_term b1 b2 then absurd else h | _ => h end | NeqTerm (Tint Nul) b1 => match nth_hyps i2 h with | EqTerm (Tint Nul') b2 => if beq Nul 0 && beq Nul' 0 && eq_term b1 b2 then absurd else h | _ => h end | _ => h end. Definition negate_contradict_inv (t i1 i2 : nat) (h : hyps) := match nth_hyps i1 h with | EqTerm (Tint Nul) b1 => match nth_hyps i2 h with | NeqTerm (Tint Nul') b2 => if beq Nul 0 && beq Nul' 0 && eq_term b1 (scalar_norm t (b2 * Tint (-(1)))%term) then absurd else h | _ => h end | NeqTerm (Tint Nul) b1 => match nth_hyps i2 h with | EqTerm (Tint Nul') b2 => if beq Nul 0 && beq Nul' 0 && eq_term b1 (scalar_norm t (b2 * Tint (-(1)))%term) then absurd else h | _ => h end | _ => h end. Theorem negate_contradict_valid : forall i j : nat, valid_hyps (negate_contradict i j). Proof. unfold valid_hyps, negate_contradict; intros i j ep e l H; generalize (nth_valid _ _ i _ H); generalize (nth_valid _ _ j _ H); case (nth_hyps i l); auto; intros t1 t2; case t1; auto; intros z; auto; case (nth_hyps j l); auto; intros t3 t4; case t3; auto; intros z'; auto; simpl; intros H1 H2; Simplify. Qed. Theorem negate_contradict_inv_valid : forall t i j : nat, valid_hyps (negate_contradict_inv t i j). Proof. unfold valid_hyps, negate_contradict_inv; intros t i j ep e l H; generalize (nth_valid _ _ i _ H); generalize (nth_valid _ _ j _ H); case (nth_hyps i l); auto; intros t1 t2; case t1; auto; intros z; auto; case (nth_hyps j l); auto; intros t3 t4; case t3; auto; intros z'; auto; simpl; intros H1 H2; Simplify; [ rewrite <- scalar_norm_stable in H2; simpl in *; elim (mult_integral (interp_term e t4) (-(1))); intuition; elim minus_one_neq_zero; auto | elim H2; clear H2; rewrite <- scalar_norm_stable; simpl in *; now rewrite <- H1, mult_0_l ]. Qed. (* \subsubsection{Tactiques générant une nouvelle équation} *) (* \paragraph{[O_SUM]} C'est une oper2 valide mais elle traite plusieurs cas à la fois (suivant les opérateurs de comparaison des deux arguments) d'où une preuve un peu compliquée. On utilise quelques lemmes qui sont des généralisations des théorèmes utilisés par OMEGA. *) Definition sum (k1 k2 : int) (trace : list t_fusion) (prop1 prop2 : proposition) := match prop1 with | EqTerm (Tint Null) b1 => match prop2 with | EqTerm (Tint Null') b2 => if beq Null 0 && beq Null' 0 then EqTerm (Tint 0) (fusion trace (b1 * Tint k1 + b2 * Tint k2)%term) else TrueTerm | LeqTerm (Tint Null') b2 => if beq Null 0 && beq Null' 0 && bgt k2 0 then LeqTerm (Tint 0) (fusion trace (b1 * Tint k1 + b2 * Tint k2)%term) else TrueTerm | _ => TrueTerm end | LeqTerm (Tint Null) b1 => if beq Null 0 && bgt k1 0 then match prop2 with | EqTerm (Tint Null') b2 => if beq Null' 0 then LeqTerm (Tint 0) (fusion trace (b1 * Tint k1 + b2 * Tint k2)%term) else TrueTerm | LeqTerm (Tint Null') b2 => if beq Null' 0 && bgt k2 0 then LeqTerm (Tint 0) (fusion trace (b1 * Tint k1 + b2 * Tint k2)%term) else TrueTerm | _ => TrueTerm end else TrueTerm | NeqTerm (Tint Null) b1 => match prop2 with | EqTerm (Tint Null') b2 => if beq Null 0 && beq Null' 0 && (negb (beq k1 0)) then NeqTerm (Tint 0) (fusion trace (b1 * Tint k1 + b2 * Tint k2)%term) else TrueTerm | _ => TrueTerm end | _ => TrueTerm end. Theorem sum_valid : forall (k1 k2 : int) (t : list t_fusion), valid2 (sum k1 k2 t). Proof. unfold valid2; intros k1 k2 t ep e p1 p2; unfold sum; Simplify; simpl; auto; try elim (fusion_stable t); simpl; intros; [ apply sum1; assumption | apply sum2; try assumption; apply sum4; assumption | rewrite plus_comm; apply sum2; try assumption; apply sum4; assumption | apply sum3; try assumption; apply sum4; assumption | apply sum5; auto ]. Qed. (* \paragraph{[O_EXACT_DIVIDE]} c'est une oper1 valide mais on préfère une substitution a ce point la *) Definition exact_divide (k : int) (body : term) (t : nat) (prop : proposition) := match prop with | EqTerm (Tint Null) b => if beq Null 0 && eq_term (scalar_norm t (body * Tint k)%term) b && negb (beq k 0) then EqTerm (Tint 0) body else TrueTerm | NeqTerm (Tint Null) b => if beq Null 0 && eq_term (scalar_norm t (body * Tint k)%term) b && negb (beq k 0) then NeqTerm (Tint 0) body else TrueTerm | _ => TrueTerm end. Theorem exact_divide_valid : forall (k : int) (t : term) (n : nat), valid1 (exact_divide k t n). Proof. unfold valid1, exact_divide; intros k1 k2 t ep e p1; Simplify; simpl; auto; subst; rewrite <- scalar_norm_stable; simpl; intros; [ destruct (mult_integral _ _ (eq_sym H0)); intuition | contradict H0; rewrite <- H0, mult_0_l; auto ]. Qed. (* \paragraph{[O_DIV_APPROX]} La preuve reprend le schéma de la précédente mais on est sur une opération de type valid1 et non sur une opération terminale. *) Definition divide_and_approx (k1 k2 : int) (body : term) (t : nat) (prop : proposition) := match prop with | LeqTerm (Tint Null) b => if beq Null 0 && eq_term (scalar_norm_add t (body * Tint k1 + Tint k2)%term) b && bgt k1 0 && bgt k1 k2 then LeqTerm (Tint 0) body else prop | _ => prop end. Theorem divide_and_approx_valid : forall (k1 k2 : int) (body : term) (t : nat), valid1 (divide_and_approx k1 k2 body t). Proof. unfold valid1, divide_and_approx; intros k1 k2 body t ep e p1; Simplify; simpl; auto; subst; elim (scalar_norm_add_stable t e); simpl. intro H2; apply mult_le_approx with (3 := H2); assumption. Qed. (* \paragraph{[MERGE_EQ]} *) Definition merge_eq (t : nat) (prop1 prop2 : proposition) := match prop1 with | LeqTerm (Tint Null) b1 => match prop2 with | LeqTerm (Tint Null') b2 => if beq Null 0 && beq Null' 0 && eq_term b1 (scalar_norm t (b2 * Tint (-(1)))%term) then EqTerm (Tint 0) b1 else TrueTerm | _ => TrueTerm end | _ => TrueTerm end. Theorem merge_eq_valid : forall n : nat, valid2 (merge_eq n). Proof. unfold valid2, merge_eq; intros n ep e p1 p2; Simplify; simpl; auto; elim (scalar_norm_stable n e); simpl; intros; symmetry ; apply OMEGA8 with (2 := H0); [ assumption | elim opp_eq_mult_neg_1; trivial ]. Qed. (* \paragraph{[O_CONSTANT_NUL]} *) Definition constant_nul (i : nat) (h : hyps) := match nth_hyps i h with | NeqTerm (Tint Null) (Tint Null') => if beq Null Null' then absurd else h | _ => h end. Theorem constant_nul_valid : forall i : nat, valid_hyps (constant_nul i). Proof. unfold valid_hyps, constant_nul; intros; generalize (nth_valid ep e i lp); Simplify; simpl; intro H1; absurd (0 = 0); intuition. Qed. (* \paragraph{[O_STATE]} *) Definition state (m : int) (s : step) (prop1 prop2 : proposition) := match prop1 with | EqTerm (Tint Null) b1 => match prop2 with | EqTerm b2 b3 => if beq Null 0 then EqTerm (Tint 0) (t_rewrite s (b1 + (- b3 + b2) * Tint m)%term) else TrueTerm | _ => TrueTerm end | _ => TrueTerm end. Theorem state_valid : forall (m : int) (s : step), valid2 (state m s). Proof. unfold valid2; intros m s ep e p1 p2; unfold state; Simplify; simpl; auto; elim (t_rewrite_stable s e); simpl; intros H1 H2; elim H1. now rewrite H2, plus_opp_l, plus_0_l, mult_0_l. Qed. (* \subsubsection{Tactiques générant plusieurs but} \paragraph{[O_SPLIT_INEQ]} La seule pour le moment (tant que la normalisation n'est pas réfléchie). *) Definition split_ineq (i t : nat) (f1 f2 : hyps -> lhyps) (l : hyps) := match nth_hyps i l with | NeqTerm (Tint Null) b1 => if beq Null 0 then f1 (LeqTerm (Tint 0) (add_norm t (b1 + Tint (-(1)))%term) :: l) ++ f2 (LeqTerm (Tint 0) (scalar_norm_add t (b1 * Tint (-(1)) + Tint (-(1)))%term) :: l) else l :: nil | _ => l :: nil end. Theorem split_ineq_valid : forall (i t : nat) (f1 f2 : hyps -> lhyps), valid_list_hyps f1 -> valid_list_hyps f2 -> valid_list_hyps (split_ineq i t f1 f2). Proof. unfold valid_list_hyps, split_ineq; intros i t f1 f2 H1 H2 ep e lp H; generalize (nth_valid _ _ i _ H); case (nth_hyps i lp); simpl; auto; intros t1 t2; case t1; simpl; auto; intros z; simpl; auto; intro H3. Simplify. apply append_valid; elim (OMEGA19 (interp_term e t2)); [ intro H4; left; apply H1; simpl; elim (add_norm_stable t); simpl; auto | intro H4; right; apply H2; simpl; elim (scalar_norm_add_stable t); simpl; auto | generalize H3; unfold not; intros E1 E2; apply E1; symmetry ; trivial ]. Qed. (* \subsection{La fonction de rejeu de la trace} *) Fixpoint execute_omega (t : t_omega) (l : hyps) {struct t} : lhyps := match t with | O_CONSTANT_NOT_NUL n => singleton (constant_not_nul n l) | O_CONSTANT_NEG n => singleton (constant_neg n l) | O_DIV_APPROX k1 k2 body t cont n => execute_omega cont (apply_oper_1 n (divide_and_approx k1 k2 body t) l) | O_NOT_EXACT_DIVIDE k1 k2 body t i => singleton (not_exact_divide k1 k2 body t i l) | O_EXACT_DIVIDE k body t cont n => execute_omega cont (apply_oper_1 n (exact_divide k body t) l) | O_SUM k1 i1 k2 i2 t cont => execute_omega cont (apply_oper_2 i1 i2 (sum k1 k2 t) l) | O_CONTRADICTION t i j => singleton (contradiction t i j l) | O_MERGE_EQ t i1 i2 cont => execute_omega cont (apply_oper_2 i1 i2 (merge_eq t) l) | O_SPLIT_INEQ t i cont1 cont2 => split_ineq i t (execute_omega cont1) (execute_omega cont2) l | O_CONSTANT_NUL i => singleton (constant_nul i l) | O_NEGATE_CONTRADICT i j => singleton (negate_contradict i j l) | O_NEGATE_CONTRADICT_INV t i j => singleton (negate_contradict_inv t i j l) | O_STATE m s i1 i2 cont => execute_omega cont (apply_oper_2 i1 i2 (state m s) l) end. Theorem omega_valid : forall tr : t_omega, valid_list_hyps (execute_omega tr). Proof. simple induction tr; simpl; [ unfold valid_list_hyps; simpl; intros; left; apply (constant_not_nul_valid n ep e lp H) | unfold valid_list_hyps; simpl; intros; left; apply (constant_neg_valid n ep e lp H) | unfold valid_list_hyps, valid_hyps; intros k1 k2 body n t' Ht' m ep e lp H; apply Ht'; apply (apply_oper_1_valid m (divide_and_approx k1 k2 body n) (divide_and_approx_valid k1 k2 body n) ep e lp H) | unfold valid_list_hyps; simpl; intros; left; apply (not_exact_divide_valid _ _ _ _ _ ep e lp H) | unfold valid_list_hyps, valid_hyps; intros k body n t' Ht' m ep e lp H; apply Ht'; apply (apply_oper_1_valid m (exact_divide k body n) (exact_divide_valid k body n) ep e lp H) | unfold valid_list_hyps, valid_hyps; intros k1 i1 k2 i2 trace t' Ht' ep e lp H; apply Ht'; apply (apply_oper_2_valid i1 i2 (sum k1 k2 trace) (sum_valid k1 k2 trace) ep e lp H) | unfold valid_list_hyps; simpl; intros; left; apply (contradiction_valid n n0 n1 ep e lp H) | unfold valid_list_hyps, valid_hyps; intros trace i1 i2 t' Ht' ep e lp H; apply Ht'; apply (apply_oper_2_valid i1 i2 (merge_eq trace) (merge_eq_valid trace) ep e lp H) | intros t' i k1 H1 k2 H2; unfold valid_list_hyps; simpl; intros ep e lp H; apply (split_ineq_valid i t' (execute_omega k1) (execute_omega k2) H1 H2 ep e lp H) | unfold valid_list_hyps; simpl; intros i ep e lp H; left; apply (constant_nul_valid i ep e lp H) | unfold valid_list_hyps; simpl; intros i j ep e lp H; left; apply (negate_contradict_valid i j ep e lp H) | unfold valid_list_hyps; simpl; intros n i j ep e lp H; left; apply (negate_contradict_inv_valid n i j ep e lp H) | unfold valid_list_hyps, valid_hyps; intros m s i1 i2 t' Ht' ep e lp H; apply Ht'; apply (apply_oper_2_valid i1 i2 (state m s) (state_valid m s) ep e lp H) ]. Qed. (* \subsection{Les opérations globales sur le but} \subsubsection{Normalisation} *) Definition move_right (s : step) (p : proposition) := match p with | EqTerm t1 t2 => EqTerm (Tint 0) (t_rewrite s (t1 + - t2)%term) | LeqTerm t1 t2 => LeqTerm (Tint 0) (t_rewrite s (t2 + - t1)%term) | GeqTerm t1 t2 => LeqTerm (Tint 0) (t_rewrite s (t1 + - t2)%term) | LtTerm t1 t2 => LeqTerm (Tint 0) (t_rewrite s (t2 + Tint (-(1)) + - t1)%term) | GtTerm t1 t2 => LeqTerm (Tint 0) (t_rewrite s (t1 + Tint (-(1)) + - t2)%term) | NeqTerm t1 t2 => NeqTerm (Tint 0) (t_rewrite s (t1 + - t2)%term) | p => p end. Theorem move_right_valid : forall s : step, valid1 (move_right s). Proof. unfold valid1, move_right; intros s ep e p; Simplify; simpl; elim (t_rewrite_stable s e); simpl; [ symmetry ; apply egal_left; assumption | intro; apply le_left; assumption | intro; apply le_left; rewrite <- ge_le_iff; assumption | intro; apply lt_left; rewrite <- gt_lt_iff; assumption | intro; apply lt_left; assumption | intro; apply ne_left_2; assumption ]. Qed. Definition do_normalize (i : nat) (s : step) := apply_oper_1 i (move_right s). Theorem do_normalize_valid : forall (i : nat) (s : step), valid_hyps (do_normalize i s). Proof. intros; unfold do_normalize; apply apply_oper_1_valid; apply move_right_valid. Qed. Fixpoint do_normalize_list (l : list step) (i : nat) (h : hyps) {struct l} : hyps := match l with | s :: l' => do_normalize_list l' (S i) (do_normalize i s h) | nil => h end. Theorem do_normalize_list_valid : forall (l : list step) (i : nat), valid_hyps (do_normalize_list l i). Proof. simple induction l; simpl; unfold valid_hyps; [ auto | intros a l' Hl' i ep e lp H; unfold valid_hyps in Hl'; apply Hl'; apply (do_normalize_valid i a ep e lp); assumption ]. Qed. Theorem normalize_goal : forall (s : list step) (ep : list Prop) (env : list int) (l : hyps), interp_goal ep env (do_normalize_list s 0 l) -> interp_goal ep env l. Proof. intros; apply valid_goal with (2 := H); apply do_normalize_list_valid. Qed. (* \subsubsection{Exécution de la trace} *) Theorem execute_goal : forall (tr : t_omega) (ep : list Prop) (env : list int) (l : hyps), interp_list_goal ep env (execute_omega tr l) -> interp_goal ep env l. Proof. intros; apply (goal_valid (execute_omega tr) (omega_valid tr) ep env l H). Qed. Theorem append_goal : forall (ep : list Prop) (e : list int) (l1 l2 : lhyps), interp_list_goal ep e l1 /\ interp_list_goal ep e l2 -> interp_list_goal ep e (l1 ++ l2). Proof. intros ep e; simple induction l1; [ simpl; intros l2 (H1, H2); assumption | simpl; intros h1 t1 HR l2 ((H1, H2), H3); split; auto ]. Qed. (* A simple decidability checker : if the proposition belongs to the simple grammar describe below then it is decidable. Proof is by induction and uses well known theorem about arithmetic and propositional calculus *) Fixpoint decidability (p : proposition) : bool := match p with | EqTerm _ _ => true | LeqTerm _ _ => true | GeqTerm _ _ => true | GtTerm _ _ => true | LtTerm _ _ => true | NeqTerm _ _ => true | FalseTerm => true | TrueTerm => true | Tnot t => decidability t | Tand t1 t2 => decidability t1 && decidability t2 | Timp t1 t2 => decidability t1 && decidability t2 | Tor t1 t2 => decidability t1 && decidability t2 | Tprop _ => false end. Theorem decidable_correct : forall (ep : list Prop) (e : list int) (p : proposition), decidability p = true -> decidable (interp_proposition ep e p). Proof. simple induction p; simpl; intros; [ apply dec_eq | apply dec_le | left; auto | right; unfold not; auto | apply dec_not; auto | apply dec_ge | apply dec_gt | apply dec_lt | apply dec_ne | apply dec_or; elim andb_prop with (1 := H1); auto | apply dec_and; elim andb_prop with (1 := H1); auto | apply dec_imp; elim andb_prop with (1 := H1); auto | discriminate H ]. Qed. (* An interpretation function for a complete goal with an explicit conclusion. We use an intermediate fixpoint. *) Fixpoint interp_full_goal (envp : list Prop) (env : list int) (c : proposition) (l : hyps) {struct l} : Prop := match l with | nil => interp_proposition envp env c | p' :: l' => interp_proposition envp env p' -> interp_full_goal envp env c l' end. Definition interp_full (ep : list Prop) (e : list int) (lc : hyps * proposition) : Prop := match lc with | (l, c) => interp_full_goal ep e c l end. (* Relates the interpretation of a complete goal with the interpretation of its hypothesis and conclusion *) Theorem interp_full_false : forall (ep : list Prop) (e : list int) (l : hyps) (c : proposition), (interp_hyps ep e l -> interp_proposition ep e c) -> interp_full ep e (l, c). Proof. simple induction l; unfold interp_full; simpl; [ auto | intros a l1 H1 c H2 H3; apply H1; auto ]. Qed. (* Push the conclusion in the list of hypothesis using a double negation If the decidability cannot be "proven", then just forget about the conclusion (equivalent of replacing it with false) *) Definition to_contradict (lc : hyps * proposition) := match lc with | (l, c) => if decidability c then Tnot c :: l else l end. (* The previous operation is valid in the sense that the new list of hypothesis implies the original goal *) Theorem to_contradict_valid : forall (ep : list Prop) (e : list int) (lc : hyps * proposition), interp_goal ep e (to_contradict lc) -> interp_full ep e lc. Proof. intros ep e lc; case lc; intros l c; simpl; pattern (decidability c); apply bool_eq_ind; [ simpl; intros H H1; apply interp_full_false; intros H2; apply not_not; [ apply decidable_correct; assumption | unfold not at 1; intro H3; apply hyps_to_goal with (2 := H2); auto ] | intros H1 H2; apply interp_full_false; intro H3; elim hyps_to_goal with (1 := H2); assumption ]. Qed. (* [map_cons x l] adds [x] at the head of each list in [l] (which is a list of lists *) Fixpoint map_cons (A : Set) (x : A) (l : list (list A)) {struct l} : list (list A) := match l with | nil => nil | l :: ll => (x :: l) :: map_cons A x ll end. (* This function breaks up a list of hypothesis in a list of simpler list of hypothesis that together implie the original one. The goal of all this is to transform the goal in a list of solvable problems. Note that : - we need a way to drive the analysis as some hypotheis may not require a split. - this procedure must be perfectly mimicked by the ML part otherwise hypothesis will get desynchronised and this will be a mess. *) Fixpoint destructure_hyps (nn : nat) (ll : hyps) {struct nn} : lhyps := match nn with | O => ll :: nil | S n => match ll with | nil => nil :: nil | Tor p1 p2 :: l => destructure_hyps n (p1 :: l) ++ destructure_hyps n (p2 :: l) | Tand p1 p2 :: l => destructure_hyps n (p1 :: p2 :: l) | Timp p1 p2 :: l => if decidability p1 then destructure_hyps n (Tnot p1 :: l) ++ destructure_hyps n (p2 :: l) else map_cons _ (Timp p1 p2) (destructure_hyps n l) | Tnot p :: l => match p with | Tnot p1 => if decidability p1 then destructure_hyps n (p1 :: l) else map_cons _ (Tnot (Tnot p1)) (destructure_hyps n l) | Tor p1 p2 => destructure_hyps n (Tnot p1 :: Tnot p2 :: l) | Tand p1 p2 => if decidability p1 then destructure_hyps n (Tnot p1 :: l) ++ destructure_hyps n (Tnot p2 :: l) else map_cons _ (Tnot p) (destructure_hyps n l) | _ => map_cons _ (Tnot p) (destructure_hyps n l) end | x :: l => map_cons _ x (destructure_hyps n l) end end. Theorem map_cons_val : forall (ep : list Prop) (e : list int) (p : proposition) (l : lhyps), interp_proposition ep e p -> interp_list_hyps ep e l -> interp_list_hyps ep e (map_cons _ p l). Proof. simple induction l; simpl; [ auto | intros; elim H1; intro H2; auto ]. Qed. Hint Resolve map_cons_val append_valid decidable_correct. Theorem destructure_hyps_valid : forall n : nat, valid_list_hyps (destructure_hyps n). Proof. simple induction n; [ unfold valid_list_hyps; simpl; auto | unfold valid_list_hyps at 2; intros n1 H ep e lp; case lp; [ simpl; auto | intros p l; case p; try (simpl; intros; apply map_cons_val; simpl; elim H0; auto); [ intro p'; case p'; try (simpl; intros; apply map_cons_val; simpl; elim H0; auto); [ simpl; intros p1 (H1, H2); pattern (decidability p1); apply bool_eq_ind; intro H3; [ apply H; simpl; split; [ apply not_not; auto | assumption ] | auto ] | simpl; intros p1 p2 (H1, H2); apply H; simpl; elim not_or with (1 := H1); auto | simpl; intros p1 p2 (H1, H2); pattern (decidability p1); apply bool_eq_ind; intro H3; [ apply append_valid; elim not_and with (2 := H1); [ intro; left; apply H; simpl; auto | intro; right; apply H; simpl; auto | auto ] | auto ] ] | simpl; intros p1 p2 (H1, H2); apply append_valid; (elim H1; intro H3; simpl; [ left | right ]); apply H; simpl; auto | simpl; intros; apply H; simpl; tauto | simpl; intros p1 p2 (H1, H2); pattern (decidability p1); apply bool_eq_ind; intro H3; [ apply append_valid; elim imp_simp with (2 := H1); [ intro H4; left; simpl; apply H; simpl; auto | intro H4; right; simpl; apply H; simpl; auto | auto ] | auto ] ] ] ]. Qed. Definition prop_stable (f : proposition -> proposition) := forall (ep : list Prop) (e : list int) (p : proposition), interp_proposition ep e p <-> interp_proposition ep e (f p). Definition p_apply_left (f : proposition -> proposition) (p : proposition) := match p with | Timp x y => Timp (f x) y | Tor x y => Tor (f x) y | Tand x y => Tand (f x) y | Tnot x => Tnot (f x) | x => x end. Theorem p_apply_left_stable : forall f : proposition -> proposition, prop_stable f -> prop_stable (p_apply_left f). Proof. unfold prop_stable; intros f H ep e p; split; (case p; simpl; auto; intros p1; elim (H ep e p1); tauto). Qed. Definition p_apply_right (f : proposition -> proposition) (p : proposition) := match p with | Timp x y => Timp x (f y) | Tor x y => Tor x (f y) | Tand x y => Tand x (f y) | Tnot x => Tnot (f x) | x => x end. Theorem p_apply_right_stable : forall f : proposition -> proposition, prop_stable f -> prop_stable (p_apply_right f). Proof. unfold prop_stable; intros f H ep e p; split; (case p; simpl; auto; [ intros p1; elim (H ep e p1); tauto | intros p1 p2; elim (H ep e p2); tauto | intros p1 p2; elim (H ep e p2); tauto | intros p1 p2; elim (H ep e p2); tauto ]). Qed. Definition p_invert (f : proposition -> proposition) (p : proposition) := match p with | EqTerm x y => Tnot (f (NeqTerm x y)) | LeqTerm x y => Tnot (f (GtTerm x y)) | GeqTerm x y => Tnot (f (LtTerm x y)) | GtTerm x y => Tnot (f (LeqTerm x y)) | LtTerm x y => Tnot (f (GeqTerm x y)) | NeqTerm x y => Tnot (f (EqTerm x y)) | x => x end. Theorem p_invert_stable : forall f : proposition -> proposition, prop_stable f -> prop_stable (p_invert f). Proof. unfold prop_stable; intros f H ep e p; split; (case p; simpl; auto; [ intros t1 t2; elim (H ep e (NeqTerm t1 t2)); simpl; generalize (dec_eq (interp_term e t1) (interp_term e t2)); unfold decidable; tauto | intros t1 t2; elim (H ep e (GtTerm t1 t2)); simpl; generalize (dec_gt (interp_term e t1) (interp_term e t2)); unfold decidable; rewrite le_lt_iff, <- gt_lt_iff; tauto | intros t1 t2; elim (H ep e (LtTerm t1 t2)); simpl; generalize (dec_lt (interp_term e t1) (interp_term e t2)); unfold decidable; rewrite ge_le_iff, le_lt_iff; tauto | intros t1 t2; elim (H ep e (LeqTerm t1 t2)); simpl; generalize (dec_gt (interp_term e t1) (interp_term e t2)); unfold decidable; repeat rewrite le_lt_iff; repeat rewrite gt_lt_iff; tauto | intros t1 t2; elim (H ep e (GeqTerm t1 t2)); simpl; generalize (dec_lt (interp_term e t1) (interp_term e t2)); unfold decidable; repeat rewrite ge_le_iff; repeat rewrite le_lt_iff; tauto | intros t1 t2; elim (H ep e (EqTerm t1 t2)); simpl; generalize (dec_eq (interp_term e t1) (interp_term e t2)); unfold decidable; tauto ]). Qed. Theorem move_right_stable : forall s : step, prop_stable (move_right s). Proof. unfold move_right, prop_stable; intros s ep e p; split; [ Simplify; simpl; elim (t_rewrite_stable s e); simpl; [ symmetry ; apply egal_left; assumption | intro; apply le_left; assumption | intro; apply le_left; rewrite <- ge_le_iff; assumption | intro; apply lt_left; rewrite <- gt_lt_iff; assumption | intro; apply lt_left; assumption | intro; apply ne_left_2; assumption ] | case p; simpl; intros; auto; generalize H; elim (t_rewrite_stable s); simpl; intro H1; [ rewrite (plus_0_r_reverse (interp_term e t1)); rewrite H1; rewrite plus_permute; rewrite plus_opp_r; rewrite plus_0_r; trivial | apply (fun a b => plus_le_reg_r a b (- interp_term e t0)); rewrite plus_opp_r; assumption | rewrite ge_le_iff; apply (fun a b => plus_le_reg_r a b (- interp_term e t1)); rewrite plus_opp_r; assumption | rewrite gt_lt_iff; apply lt_left_inv; assumption | apply lt_left_inv; assumption | unfold not; intro H2; apply H1; rewrite H2; rewrite plus_opp_r; trivial ] ]. Qed. Fixpoint p_rewrite (s : p_step) : proposition -> proposition := match s with | P_LEFT s => p_apply_left (p_rewrite s) | P_RIGHT s => p_apply_right (p_rewrite s) | P_STEP s => move_right s | P_INVERT s => p_invert (move_right s) | P_NOP => fun p : proposition => p end. Theorem p_rewrite_stable : forall s : p_step, prop_stable (p_rewrite s). Proof. simple induction s; simpl; [ intros; apply p_apply_left_stable; trivial | intros; apply p_apply_right_stable; trivial | intros; apply p_invert_stable; apply move_right_stable | apply move_right_stable | unfold prop_stable; simpl; intros; split; auto ]. Qed. Fixpoint normalize_hyps (l : list h_step) (lh : hyps) {struct l} : hyps := match l with | nil => lh | pair_step i s :: r => normalize_hyps r (apply_oper_1 i (p_rewrite s) lh) end. Theorem normalize_hyps_valid : forall l : list h_step, valid_hyps (normalize_hyps l). Proof. simple induction l; unfold valid_hyps; simpl; [ auto | intros n_s r; case n_s; intros n s H ep e lp H1; apply H; apply apply_oper_1_valid; [ unfold valid1; intros ep1 e1 p1 H2; elim (p_rewrite_stable s ep1 e1 p1); auto | assumption ] ]. Qed. Theorem normalize_hyps_goal : forall (s : list h_step) (ep : list Prop) (env : list int) (l : hyps), interp_goal ep env (normalize_hyps s l) -> interp_goal ep env l. Proof. intros; apply valid_goal with (2 := H); apply normalize_hyps_valid. Qed. Fixpoint extract_hyp_pos (s : list direction) (p : proposition) {struct s} : proposition := match s with | D_left :: l => match p with | Tand x y => extract_hyp_pos l x | _ => p end | D_right :: l => match p with | Tand x y => extract_hyp_pos l y | _ => p end | D_mono :: l => match p with | Tnot x => extract_hyp_neg l x | _ => p end | _ => p end with extract_hyp_neg (s : list direction) (p : proposition) {struct s} : proposition := match s with | D_left :: l => match p with | Tor x y => extract_hyp_neg l x | Timp x y => if decidability x then extract_hyp_pos l x else Tnot p | _ => Tnot p end | D_right :: l => match p with | Tor x y => extract_hyp_neg l y | Timp x y => extract_hyp_neg l y | _ => Tnot p end | D_mono :: l => match p with | Tnot x => if decidability x then extract_hyp_pos l x else Tnot p | _ => Tnot p end | _ => match p with | Tnot x => if decidability x then x else Tnot p | _ => Tnot p end end. Definition co_valid1 (f : proposition -> proposition) := forall (ep : list Prop) (e : list int) (p1 : proposition), interp_proposition ep e (Tnot p1) -> interp_proposition ep e (f p1). Theorem extract_valid : forall s : list direction, valid1 (extract_hyp_pos s) /\ co_valid1 (extract_hyp_neg s). Proof. unfold valid1, co_valid1; simple induction s; [ split; [ simpl; auto | intros ep e p1; case p1; simpl; auto; intro p; pattern (decidability p); apply bool_eq_ind; [ intro H; generalize (decidable_correct ep e p H); unfold decidable; tauto | simpl; auto ] ] | intros a s' (H1, H2); simpl in H2; split; intros ep e p; case a; auto; case p; auto; simpl; intros; (apply H1; tauto) || (apply H2; tauto) || (pattern (decidability p0); apply bool_eq_ind; [ intro H3; generalize (decidable_correct ep e p0 H3); unfold decidable; intro H4; apply H1; tauto | intro; tauto ]) ]. Qed. Fixpoint decompose_solve (s : e_step) (h : hyps) {struct s} : lhyps := match s with | E_SPLIT i dl s1 s2 => match extract_hyp_pos dl (nth_hyps i h) with | Tor x y => decompose_solve s1 (x :: h) ++ decompose_solve s2 (y :: h) | Tnot (Tand x y) => if decidability x then decompose_solve s1 (Tnot x :: h) ++ decompose_solve s2 (Tnot y :: h) else h :: nil | Timp x y => if decidability x then decompose_solve s1 (Tnot x :: h) ++ decompose_solve s2 (y :: h) else h::nil | _ => h :: nil end | E_EXTRACT i dl s1 => decompose_solve s1 (extract_hyp_pos dl (nth_hyps i h) :: h) | E_SOLVE t => execute_omega t h end. Theorem decompose_solve_valid : forall s : e_step, valid_list_goal (decompose_solve s). Proof. intro s; apply goal_valid; unfold valid_list_hyps; elim s; simpl; intros; [ cut (interp_proposition ep e1 (extract_hyp_pos l (nth_hyps n lp))); [ case (extract_hyp_pos l (nth_hyps n lp)); simpl; auto; [ intro p; case p; simpl; auto; intros p1 p2 H2; pattern (decidability p1); apply bool_eq_ind; [ intro H3; generalize (decidable_correct ep e1 p1 H3); intro H4; apply append_valid; elim H4; intro H5; [ right; apply H0; simpl; tauto | left; apply H; simpl; tauto ] | simpl; auto ] | intros p1 p2 H2; apply append_valid; simpl; elim H2; [ intros H3; left; apply H; simpl; auto | intros H3; right; apply H0; simpl; auto ] | intros p1 p2 H2; pattern (decidability p1); apply bool_eq_ind; [ intro H3; generalize (decidable_correct ep e1 p1 H3); intro H4; apply append_valid; elim H4; intro H5; [ right; apply H0; simpl; tauto | left; apply H; simpl; tauto ] | simpl; auto ] ] | elim (extract_valid l); intros H2 H3; apply H2; apply nth_valid; auto ] | intros; apply H; simpl; split; [ elim (extract_valid l); intros H2 H3; apply H2; apply nth_valid; auto | auto ] | apply omega_valid with (1 := H) ]. Qed. (* \subsection{La dernière étape qui élimine tous les séquents inutiles} *) Definition valid_lhyps (f : lhyps -> lhyps) := forall (ep : list Prop) (e : list int) (lp : lhyps), interp_list_hyps ep e lp -> interp_list_hyps ep e (f lp). Fixpoint reduce_lhyps (lp : lhyps) : lhyps := match lp with | (FalseTerm :: nil) :: lp' => reduce_lhyps lp' | x :: lp' => x :: reduce_lhyps lp' | nil => nil (A:=hyps) end. Theorem reduce_lhyps_valid : valid_lhyps reduce_lhyps. Proof. unfold valid_lhyps; intros ep e lp; elim lp; [ simpl; auto | intros a l HR; elim a; [ simpl; tauto | intros a1 l1; case l1; case a1; simpl; try tauto ] ]. Qed. Theorem do_reduce_lhyps : forall (envp : list Prop) (env : list int) (l : lhyps), interp_list_goal envp env (reduce_lhyps l) -> interp_list_goal envp env l. Proof. intros envp env l H; apply list_goal_to_hyps; intro H1; apply list_hyps_to_goal with (1 := H); apply reduce_lhyps_valid; assumption. Qed. Definition concl_to_hyp (p : proposition) := if decidability p then Tnot p else TrueTerm. Definition do_concl_to_hyp : forall (envp : list Prop) (env : list int) (c : proposition) (l : hyps), interp_goal envp env (concl_to_hyp c :: l) -> interp_goal_concl c envp env l. Proof. simpl; intros envp env c l; induction l as [| a l Hrecl]; [ simpl; unfold concl_to_hyp; pattern (decidability c); apply bool_eq_ind; [ intro H; generalize (decidable_correct envp env c H); unfold decidable; simpl; tauto | simpl; intros H1 H2; elim H2; trivial ] | simpl; tauto ]. Qed. Definition omega_tactic (t1 : e_step) (t2 : list h_step) (c : proposition) (l : hyps) := reduce_lhyps (decompose_solve t1 (normalize_hyps t2 (concl_to_hyp c :: l))). Theorem do_omega : forall (t1 : e_step) (t2 : list h_step) (envp : list Prop) (env : list int) (c : proposition) (l : hyps), interp_list_goal envp env (omega_tactic t1 t2 c l) -> interp_goal_concl c envp env l. Proof. unfold omega_tactic; intros; apply do_concl_to_hyp; apply (normalize_hyps_goal t2); apply (decompose_solve_valid t1); apply do_reduce_lhyps; assumption. Qed. End IntOmega. (* For now, the above modular construction is instanciated on Z, in order to retrieve the initial ROmega. *) Module ZOmega := IntOmega(Z_as_Int). coq-8.4pl2/plugins/romega/const_omega.mli0000640000175000001440000001341211254456226017573 0ustar notinusers(************************************************************************* PROJET RNRT Calife - 2001 Author: Pierre Crgut - France Tlcom R&D Licence : LGPL version 2.1 *************************************************************************) (** Coq objects used in romega *) (* from Logic *) val coq_refl_equal : Term.constr lazy_t val coq_and : Term.constr lazy_t val coq_not : Term.constr lazy_t val coq_or : Term.constr lazy_t val coq_True : Term.constr lazy_t val coq_False : Term.constr lazy_t val coq_I : Term.constr lazy_t (* from ReflOmegaCore/ZOmega *) val coq_h_step : Term.constr lazy_t val coq_pair_step : Term.constr lazy_t val coq_p_left : Term.constr lazy_t val coq_p_right : Term.constr lazy_t val coq_p_invert : Term.constr lazy_t val coq_p_step : Term.constr lazy_t val coq_t_int : Term.constr lazy_t val coq_t_plus : Term.constr lazy_t val coq_t_mult : Term.constr lazy_t val coq_t_opp : Term.constr lazy_t val coq_t_minus : Term.constr lazy_t val coq_t_var : Term.constr lazy_t val coq_proposition : Term.constr lazy_t val coq_p_eq : Term.constr lazy_t val coq_p_leq : Term.constr lazy_t val coq_p_geq : Term.constr lazy_t val coq_p_lt : Term.constr lazy_t val coq_p_gt : Term.constr lazy_t val coq_p_neq : Term.constr lazy_t val coq_p_true : Term.constr lazy_t val coq_p_false : Term.constr lazy_t val coq_p_not : Term.constr lazy_t val coq_p_or : Term.constr lazy_t val coq_p_and : Term.constr lazy_t val coq_p_imp : Term.constr lazy_t val coq_p_prop : Term.constr lazy_t val coq_f_equal : Term.constr lazy_t val coq_f_cancel : Term.constr lazy_t val coq_f_left : Term.constr lazy_t val coq_f_right : Term.constr lazy_t val coq_c_do_both : Term.constr lazy_t val coq_c_do_left : Term.constr lazy_t val coq_c_do_right : Term.constr lazy_t val coq_c_do_seq : Term.constr lazy_t val coq_c_nop : Term.constr lazy_t val coq_c_opp_plus : Term.constr lazy_t val coq_c_opp_opp : Term.constr lazy_t val coq_c_opp_mult_r : Term.constr lazy_t val coq_c_opp_one : Term.constr lazy_t val coq_c_reduce : Term.constr lazy_t val coq_c_mult_plus_distr : Term.constr lazy_t val coq_c_opp_left : Term.constr lazy_t val coq_c_mult_assoc_r : Term.constr lazy_t val coq_c_plus_assoc_r : Term.constr lazy_t val coq_c_plus_assoc_l : Term.constr lazy_t val coq_c_plus_permute : Term.constr lazy_t val coq_c_plus_comm : Term.constr lazy_t val coq_c_red0 : Term.constr lazy_t val coq_c_red1 : Term.constr lazy_t val coq_c_red2 : Term.constr lazy_t val coq_c_red3 : Term.constr lazy_t val coq_c_red4 : Term.constr lazy_t val coq_c_red5 : Term.constr lazy_t val coq_c_red6 : Term.constr lazy_t val coq_c_mult_opp_left : Term.constr lazy_t val coq_c_mult_assoc_reduced : Term.constr lazy_t val coq_c_minus : Term.constr lazy_t val coq_c_mult_comm : Term.constr lazy_t val coq_s_constant_not_nul : Term.constr lazy_t val coq_s_constant_neg : Term.constr lazy_t val coq_s_div_approx : Term.constr lazy_t val coq_s_not_exact_divide : Term.constr lazy_t val coq_s_exact_divide : Term.constr lazy_t val coq_s_sum : Term.constr lazy_t val coq_s_state : Term.constr lazy_t val coq_s_contradiction : Term.constr lazy_t val coq_s_merge_eq : Term.constr lazy_t val coq_s_split_ineq : Term.constr lazy_t val coq_s_constant_nul : Term.constr lazy_t val coq_s_negate_contradict : Term.constr lazy_t val coq_s_negate_contradict_inv : Term.constr lazy_t val coq_direction : Term.constr lazy_t val coq_d_left : Term.constr lazy_t val coq_d_right : Term.constr lazy_t val coq_d_mono : Term.constr lazy_t val coq_e_split : Term.constr lazy_t val coq_e_extract : Term.constr lazy_t val coq_e_solve : Term.constr lazy_t val coq_interp_sequent : Term.constr lazy_t val coq_do_omega : Term.constr lazy_t (** Building expressions *) val do_left : Term.constr -> Term.constr val do_right : Term.constr -> Term.constr val do_both : Term.constr -> Term.constr -> Term.constr val do_seq : Term.constr -> Term.constr -> Term.constr val do_list : Term.constr list -> Term.constr val mk_nat : int -> Term.constr val mk_list : Term.constr -> Term.constr list -> Term.constr val mk_plist : Term.types list -> Term.types val mk_shuffle_list : Term.constr list -> Term.constr (** Analyzing a coq term *) (* The generic result shape of the analysis of a term. One-level depth, except when a number is found *) type parse_term = Tplus of Term.constr * Term.constr | Tmult of Term.constr * Term.constr | Tminus of Term.constr * Term.constr | Topp of Term.constr | Tsucc of Term.constr | Tnum of Bigint.bigint | Tother (* The generic result shape of the analysis of a relation. One-level depth. *) type parse_rel = Req of Term.constr * Term.constr | Rne of Term.constr * Term.constr | Rlt of Term.constr * Term.constr | Rle of Term.constr * Term.constr | Rgt of Term.constr * Term.constr | Rge of Term.constr * Term.constr | Rtrue | Rfalse | Rnot of Term.constr | Ror of Term.constr * Term.constr | Rand of Term.constr * Term.constr | Rimp of Term.constr * Term.constr | Riff of Term.constr * Term.constr | Rother (* A module factorizing what we should now about the number representation *) module type Int = sig (* the coq type of the numbers *) val typ : Term.constr Lazy.t (* the operations on the numbers *) val plus : Term.constr Lazy.t val mult : Term.constr Lazy.t val opp : Term.constr Lazy.t val minus : Term.constr Lazy.t (* building a coq number *) val mk : Bigint.bigint -> Term.constr (* parsing a term (one level, except if a number is found) *) val parse_term : Term.constr -> parse_term (* parsing a relation expression, including = < <= >= > *) val parse_rel : Proof_type.goal Tacmach.sigma -> Term.constr -> parse_rel (* Is a particular term only made of numbers and + * - ? *) val is_scalar : Term.constr -> bool end (* Currently, we only use Z numbers *) module Z : Int coq-8.4pl2/plugins/fourier/0000750000175000001440000000000012127276542014772 5ustar notinuserscoq-8.4pl2/plugins/fourier/Fourier.v0000640000175000001440000000151312010532755016565 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* match ie.coef with [] -> raise (Failure "empty ineq") |(c::r) -> if rinf c r0 then pop ie lneg else if rinf r0 c then pop ie lpos else pop ie lnul) s; [!lneg;!lnul;!lpos] ;; (* initialise les histoires d'une liste d'inquations donnes par leurs listes de coefficients et leurs strictitudes (!): (add_hist [(equation 1, s1);...;(quation n, sn)]) = [{quation 1, [1;0;...;0], s1}; {quation 2, [0;1;...;0], s2}; ... {quation n, [0;0;...;1], sn}] *) let add_hist le = let n = List.length le in let i=ref 0 in List.map (fun (ie,s) -> let h =ref [] in for k=1 to (n-(!i)-1) do pop r0 h; done; pop r1 h; for k=1 to !i do pop r0 h; done; i:=!i+1; {coef=ie;hist=(!h);strict=s}) le ;; (* additionne deux inquations *) let ie_add ie1 ie2 = {coef=List.map2 rplus ie1.coef ie2.coef; hist=List.map2 rplus ie1.hist ie2.hist; strict=ie1.strict || ie2.strict} ;; (* multiplication d'une inquation par un rationnel (positif) *) let ie_emult a ie = {coef=List.map (fun x -> rmult a x) ie.coef; hist=List.map (fun x -> rmult a x) ie.hist; strict= ie.strict} ;; (* on enlve le premier coefficient *) let ie_tl ie = {coef=List.tl ie.coef;hist=ie.hist;strict=ie.strict} ;; (* le premier coefficient: "tte" de l'inquation *) let hd_coef ie = List.hd ie.coef ;; (* calcule toutes les combinaisons entre inquations de tte ngative et inquations de tte positive qui annulent le premier coefficient. *) let deduce_add lneg lpos = let res=ref [] in List.iter (fun i1 -> List.iter (fun i2 -> let a = rop (hd_coef i1) in let b = hd_coef i2 in pop (ie_tl (ie_add (ie_emult b i1) (ie_emult a i2))) res) lpos) lneg; !res ;; (* limination de la premire variable partir d'une liste d'inquations: opration qu'on itre dans l'algorithme de Fourier. *) let deduce1 s = match (partitionne s) with [lneg;lnul;lpos] -> let lnew = deduce_add lneg lpos in (List.map ie_tl lnul)@lnew |_->assert false ;; (* algorithme de Fourier: on limine successivement toutes les variables. *) let deduce lie = let n = List.length (fst (List.hd lie)) in let lie=ref (add_hist lie) in for i=1 to n-1 do lie:= deduce1 !lie; done; !lie ;; (* donne [] si le systme a des solutions, sinon donne [c,s,lc] o lc est la combinaison linaire des inquations de dpart qui donne 0 < c si s=true ou 0 <= c sinon cette inquation tant absurde. *) let unsolvable lie = let lr = deduce lie in let res = ref [] in (try (List.iter (fun e -> match e with {coef=[c];hist=lc;strict=s} -> if (rinf c r0 && (not s)) || (rinfeq c r0 && s) then (res := [c,s,lc]; raise (Failure "contradiction found")) |_->assert false) lr) with e when Errors.noncritical e -> ()); !res ;; (* Exemples: let test1=[[r1;r1;r0],true;[rop r1;r1;r1],false;[r0;rop r1;rop r1],false];; deduce test1;; unsolvable test1;; let test2=[ [r1;r1;r0;r0;r0],false; [r0;r1;r1;r0;r0],false; [r0;r0;r1;r1;r0],false; [r0;r0;r0;r1;r1],false; [r1;r0;r0;r0;r1],false; [rop r1;rop r1;r0;r0;r0],false; [r0;rop r1;rop r1;r0;r0],false; [r0;r0;rop r1;rop r1;r0],false; [r0;r0;r0;rop r1;rop r1],false; [rop r1;r0;r0;r0;rop r1],false ];; deduce test2;; unsolvable test2;; *) coq-8.4pl2/plugins/fourier/vo.itarget0000640000175000001440000000003311307752066016773 0ustar notinusersFourier_util.vo Fourier.vo coq-8.4pl2/plugins/fourier/fourierR.ml0000640000175000001440000005127512121620060017112 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* r0;; let flin_add f x c = let cx = flin_coef f x in Constrhash.remove f.fhom x; Constrhash.add f.fhom x (rplus cx c); f ;; let flin_add_cste f c = {fhom=f.fhom; fcste=rplus f.fcste c} ;; let flin_one () = flin_add_cste (flin_zero()) r1;; let flin_plus f1 f2 = let f3 = flin_zero() in Constrhash.iter (fun x c -> let _=flin_add f3 x c in ()) f1.fhom; Constrhash.iter (fun x c -> let _=flin_add f3 x c in ()) f2.fhom; flin_add_cste (flin_add_cste f3 f1.fcste) f2.fcste; ;; let flin_minus f1 f2 = let f3 = flin_zero() in Constrhash.iter (fun x c -> let _=flin_add f3 x c in ()) f1.fhom; Constrhash.iter (fun x c -> let _=flin_add f3 x (rop c) in ()) f2.fhom; flin_add_cste (flin_add_cste f3 f1.fcste) (rop f2.fcste); ;; let flin_emult a f = let f2 = flin_zero() in Constrhash.iter (fun x c -> let _=flin_add f2 x (rmult a c) in ()) f.fhom; flin_add_cste f2 (rmult a f.fcste); ;; (*****************************************************************************) open Vernacexpr type ineq = Rlt | Rle | Rgt | Rge let string_of_R_constant kn = match Names.repr_con kn with | MPfile dir, sec_dir, id when sec_dir = empty_dirpath && string_of_dirpath dir = "Coq.Reals.Rdefinitions" -> string_of_label id | _ -> "constant_not_of_R" let rec string_of_R_constr c = match kind_of_term c with Cast (c,_,_) -> string_of_R_constr c |Const c -> string_of_R_constant c | _ -> "not_of_constant" let rec rational_of_constr c = match kind_of_term c with | Cast (c,_,_) -> (rational_of_constr c) | App (c,args) -> (match (string_of_R_constr c) with | "Ropp" -> rop (rational_of_constr args.(0)) | "Rinv" -> rinv (rational_of_constr args.(0)) | "Rmult" -> rmult (rational_of_constr args.(0)) (rational_of_constr args.(1)) | "Rdiv" -> rdiv (rational_of_constr args.(0)) (rational_of_constr args.(1)) | "Rplus" -> rplus (rational_of_constr args.(0)) (rational_of_constr args.(1)) | "Rminus" -> rminus (rational_of_constr args.(0)) (rational_of_constr args.(1)) | _ -> failwith "not a rational") | Const kn -> (match (string_of_R_constant kn) with "R1" -> r1 |"R0" -> r0 | _ -> failwith "not a rational") | _ -> failwith "not a rational" ;; let rec flin_of_constr c = try( match kind_of_term c with | Cast (c,_,_) -> (flin_of_constr c) | App (c,args) -> (match (string_of_R_constr c) with "Ropp" -> flin_emult (rop r1) (flin_of_constr args.(0)) | "Rplus"-> flin_plus (flin_of_constr args.(0)) (flin_of_constr args.(1)) | "Rminus"-> flin_minus (flin_of_constr args.(0)) (flin_of_constr args.(1)) | "Rmult"-> (try (let a=(rational_of_constr args.(0)) in try (let b = (rational_of_constr args.(1)) in (flin_add_cste (flin_zero()) (rmult a b))) with e when Errors.noncritical e -> (flin_add (flin_zero()) args.(1) a)) with e when Errors.noncritical e -> (flin_add (flin_zero()) args.(0) (rational_of_constr args.(1)))) | "Rinv"-> let a=(rational_of_constr args.(0)) in flin_add_cste (flin_zero()) (rinv a) | "Rdiv"-> (let b=(rational_of_constr args.(1)) in try (let a = (rational_of_constr args.(0)) in (flin_add_cste (flin_zero()) (rdiv a b))) with e when Errors.noncritical e -> (flin_add (flin_zero()) args.(0) (rinv b))) |_->assert false) | Const c -> (match (string_of_R_constant c) with "R1" -> flin_one () |"R0" -> flin_zero () |_-> assert false) |_-> assert false) with e when Errors.noncritical e -> flin_add (flin_zero()) c r1 ;; let flin_to_alist f = let res=ref [] in Constrhash.iter (fun x c -> res:=(c,x)::(!res)) f; !res ;; (* Reprsentation des hypothses qui sont des inquations ou des quations. *) type hineq={hname:constr; (* le nom de l'hypothse *) htype:string; (* Rlt, Rgt, Rle, Rge, eqTLR ou eqTRL *) hleft:constr; hright:constr; hflin:flin; hstrict:bool} ;; (* Transforme une hypothese h:t en inquation flin<0 ou flin<=0 *) let ineq1_of_constr (h,t) = match (kind_of_term t) with App (f,args) -> (match kind_of_term f with Const c when Array.length args = 2 -> let t1= args.(0) in let t2= args.(1) in (match (string_of_R_constant c) with "Rlt" -> [{hname=h; htype="Rlt"; hleft=t1; hright=t2; hflin= flin_minus (flin_of_constr t1) (flin_of_constr t2); hstrict=true}] |"Rgt" -> [{hname=h; htype="Rgt"; hleft=t2; hright=t1; hflin= flin_minus (flin_of_constr t2) (flin_of_constr t1); hstrict=true}] |"Rle" -> [{hname=h; htype="Rle"; hleft=t1; hright=t2; hflin= flin_minus (flin_of_constr t1) (flin_of_constr t2); hstrict=false}] |"Rge" -> [{hname=h; htype="Rge"; hleft=t2; hright=t1; hflin= flin_minus (flin_of_constr t2) (flin_of_constr t1); hstrict=false}] |_->assert false) | Ind (kn,i) -> if IndRef(kn,i) = Coqlib.glob_eq then let t0= args.(0) in let t1= args.(1) in let t2= args.(2) in (match (kind_of_term t0) with Const c -> (match (string_of_R_constant c) with "R"-> [{hname=h; htype="eqTLR"; hleft=t1; hright=t2; hflin= flin_minus (flin_of_constr t1) (flin_of_constr t2); hstrict=false}; {hname=h; htype="eqTRL"; hleft=t2; hright=t1; hflin= flin_minus (flin_of_constr t2) (flin_of_constr t1); hstrict=false}] |_-> assert false) |_-> assert false) else assert false |_-> assert false) |_-> assert false ;; (* Applique la mthode de Fourier une liste d'hypothses (type hineq) *) let fourier_lineq lineq1 = let nvar=ref (-1) in let hvar=Constrhash.create 50 in (* la table des variables des inquations *) List.iter (fun f -> Constrhash.iter (fun x _ -> if not (Constrhash.mem hvar x) then begin nvar:=(!nvar)+1; Constrhash.add hvar x (!nvar) end) f.hflin.fhom) lineq1; let sys= List.map (fun h-> let v=Array.create ((!nvar)+1) r0 in Constrhash.iter (fun x c -> v.(Constrhash.find hvar x)<-c) h.hflin.fhom; ((Array.to_list v)@[rop h.hflin.fcste],h.hstrict)) lineq1 in unsolvable sys ;; (*********************************************************************) (* Defined constants *) let get = Lazy.force let constant = Coqlib.gen_constant "Fourier" (* Standard library *) open Coqlib let coq_sym_eqT = lazy (build_coq_eq_sym ()) let coq_False = lazy (build_coq_False ()) let coq_not = lazy (build_coq_not ()) let coq_eq = lazy (build_coq_eq ()) (* Rdefinitions *) let constant_real = constant ["Reals";"Rdefinitions"] let coq_Rlt = lazy (constant_real "Rlt") let coq_Rgt = lazy (constant_real "Rgt") let coq_Rle = lazy (constant_real "Rle") let coq_Rge = lazy (constant_real "Rge") let coq_R = lazy (constant_real "R") let coq_Rminus = lazy (constant_real "Rminus") let coq_Rmult = lazy (constant_real "Rmult") let coq_Rplus = lazy (constant_real "Rplus") let coq_Ropp = lazy (constant_real "Ropp") let coq_Rinv = lazy (constant_real "Rinv") let coq_R0 = lazy (constant_real "R0") let coq_R1 = lazy (constant_real "R1") (* RIneq *) let coq_Rinv_1 = lazy (constant ["Reals";"RIneq"] "Rinv_1") (* Fourier_util *) let constant_fourier = constant ["fourier";"Fourier_util"] let coq_Rlt_zero_1 = lazy (constant_fourier "Rlt_zero_1") let coq_Rlt_zero_pos_plus1 = lazy (constant_fourier "Rlt_zero_pos_plus1") let coq_Rle_zero_pos_plus1 = lazy (constant_fourier "Rle_zero_pos_plus1") let coq_Rlt_mult_inv_pos = lazy (constant_fourier "Rlt_mult_inv_pos") let coq_Rle_zero_zero = lazy (constant_fourier "Rle_zero_zero") let coq_Rle_zero_1 = lazy (constant_fourier "Rle_zero_1") let coq_Rle_mult_inv_pos = lazy (constant_fourier "Rle_mult_inv_pos") let coq_Rnot_lt0 = lazy (constant_fourier "Rnot_lt0") let coq_Rle_not_lt = lazy (constant_fourier "Rle_not_lt") let coq_Rfourier_gt_to_lt = lazy (constant_fourier "Rfourier_gt_to_lt") let coq_Rfourier_ge_to_le = lazy (constant_fourier "Rfourier_ge_to_le") let coq_Rfourier_eqLR_to_le = lazy (constant_fourier "Rfourier_eqLR_to_le") let coq_Rfourier_eqRL_to_le = lazy (constant_fourier "Rfourier_eqRL_to_le") let coq_Rfourier_not_ge_lt = lazy (constant_fourier "Rfourier_not_ge_lt") let coq_Rfourier_not_gt_le = lazy (constant_fourier "Rfourier_not_gt_le") let coq_Rfourier_not_le_gt = lazy (constant_fourier "Rfourier_not_le_gt") let coq_Rfourier_not_lt_ge = lazy (constant_fourier "Rfourier_not_lt_ge") let coq_Rfourier_lt = lazy (constant_fourier "Rfourier_lt") let coq_Rfourier_le = lazy (constant_fourier "Rfourier_le") let coq_Rfourier_lt_lt = lazy (constant_fourier "Rfourier_lt_lt") let coq_Rfourier_lt_le = lazy (constant_fourier "Rfourier_lt_le") let coq_Rfourier_le_lt = lazy (constant_fourier "Rfourier_le_lt") let coq_Rfourier_le_le = lazy (constant_fourier "Rfourier_le_le") let coq_Rnot_lt_lt = lazy (constant_fourier "Rnot_lt_lt") let coq_Rnot_le_le = lazy (constant_fourier "Rnot_le_le") let coq_Rlt_not_le_frac_opp = lazy (constant_fourier "Rlt_not_le_frac_opp") (****************************************************************************** Construction de la preuve en cas de succs de la mthode de Fourier, i.e. on obtient une contradiction. *) let is_int x = (x.den)=1 ;; (* fraction = couple (num,den) *) let rec rational_to_fraction x= (x.num,x.den) ;; (* traduction -3 -> (Ropp (Rplus R1 (Rplus R1 R1))) *) let int_to_real n = let nn=abs n in if nn=0 then get coq_R0 else (let s=ref (get coq_R1) in for i=1 to (nn-1) do s:=mkApp (get coq_Rplus,[|get coq_R1;!s|]) done; if n<0 then mkApp (get coq_Ropp, [|!s|]) else !s) ;; (* -1/2 -> (Rmult (Ropp R1) (Rinv (Rplus R1 R1))) *) let rational_to_real x = let (n,d)=rational_to_fraction x in mkApp (get coq_Rmult, [|int_to_real n;mkApp(get coq_Rinv,[|int_to_real d|])|]) ;; (* preuve que 0 False *) let tac_zero_inf_false gl (n,d) = if n=0 then (apply (get coq_Rnot_lt0)) else (tclTHEN (apply (get coq_Rle_not_lt)) (tac_zero_infeq_pos gl (-n,d))) ;; (* preuve que 0<=(-n)*(1/d) => False *) let tac_zero_infeq_false gl (n,d) = (tclTHEN (apply (get coq_Rlt_not_le_frac_opp)) (tac_zero_inf_pos gl (-n,d))) ;; let create_meta () = mkMeta(Evarutil.new_meta());; let my_cut c gl= let concl = pf_concl gl in apply_type (mkProd(Anonymous,c,concl)) [create_meta()] gl ;; let exact = exact_check;; let tac_use h = match h.htype with "Rlt" -> exact h.hname |"Rle" -> exact h.hname |"Rgt" -> (tclTHEN (apply (get coq_Rfourier_gt_to_lt)) (exact h.hname)) |"Rge" -> (tclTHEN (apply (get coq_Rfourier_ge_to_le)) (exact h.hname)) |"eqTLR" -> (tclTHEN (apply (get coq_Rfourier_eqLR_to_le)) (exact h.hname)) |"eqTRL" -> (tclTHEN (apply (get coq_Rfourier_eqRL_to_le)) (exact h.hname)) |_->assert false ;; (* let is_ineq (h,t) = match (kind_of_term t) with App (f,args) -> (match (string_of_R_constr f) with "Rlt" -> true | "Rgt" -> true | "Rle" -> true | "Rge" -> true (* Wrong:not in Rdefinitions: *) | "eqT" -> (match (string_of_R_constr args.(0)) with "R" -> true | _ -> false) | _ ->false) |_->false ;; *) let list_of_sign s = List.map (fun (x,_,z)->(x,z)) s;; let mkAppL a = let l = Array.to_list a in mkApp(List.hd l, Array.of_list (List.tl l)) ;; (* Rsolution d'inquations linaires dans R *) let rec fourier gl= Coqlib.check_required_library ["Coq";"fourier";"Fourier"]; let goal = strip_outer_cast (pf_concl gl) in let fhyp=id_of_string "new_hyp_for_fourier" in (* si le but est une inquation, on introduit son contraire, et le but prouver devient False *) try (let tac = match (kind_of_term goal) with App (f,args) -> (match (string_of_R_constr f) with "Rlt" -> (tclTHEN (tclTHEN (apply (get coq_Rfourier_not_ge_lt)) (intro_using fhyp)) fourier) |"Rle" -> (tclTHEN (tclTHEN (apply (get coq_Rfourier_not_gt_le)) (intro_using fhyp)) fourier) |"Rgt" -> (tclTHEN (tclTHEN (apply (get coq_Rfourier_not_le_gt)) (intro_using fhyp)) fourier) |"Rge" -> (tclTHEN (tclTHEN (apply (get coq_Rfourier_not_lt_ge)) (intro_using fhyp)) fourier) |_->assert false) |_->assert false in tac gl) with e when Errors.noncritical e -> (* les hypothses *) let hyps = List.map (fun (h,t)-> (mkVar h,t)) (list_of_sign (pf_hyps gl)) in let lineq =ref [] in List.iter (fun h -> try (lineq:=(ineq1_of_constr h)@(!lineq)) with e when Errors.noncritical e -> ()) hyps; (* lineq = les inquations dcoulant des hypothses *) if !lineq=[] then Util.error "No inequalities"; let res=fourier_lineq (!lineq) in let tac=ref tclIDTAC in if res=[] then Util.error "fourier failed" (* l'algorithme de Fourier a russi: on va en tirer une preuve Coq *) else (match res with [(cres,sres,lc)]-> (* lc=coefficients multiplicateurs des inquations qui donnent 0 if c<>r0 then (lutil:=(h,c)::(!lutil)(*; print_rational(c);print_string " "*))) (List.combine (!lineq) lc); (* on construit la combinaison linaire des inquation *) (match (!lutil) with (h1,c1)::lutil -> let s=ref (h1.hstrict) in let t1=ref (mkAppL [|get coq_Rmult; rational_to_real c1; h1.hleft|]) in let t2=ref (mkAppL [|get coq_Rmult; rational_to_real c1; h1.hright|]) in List.iter (fun (h,c) -> s:=(!s)||(h.hstrict); t1:=(mkAppL [|get coq_Rplus; !t1; mkAppL [|get coq_Rmult; rational_to_real c; h.hleft|] |]); t2:=(mkAppL [|get coq_Rplus; !t2; mkAppL [|get coq_Rmult; rational_to_real c; h.hright|] |])) lutil; let ineq=mkAppL [|if (!s) then get coq_Rlt else get coq_Rle; !t1; !t2 |] in let tc=rational_to_real cres in (* puis sa preuve *) let tac1=ref (if h1.hstrict then (tclTHENS (apply (get coq_Rfourier_lt)) [tac_use h1; tac_zero_inf_pos gl (rational_to_fraction c1)]) else (tclTHENS (apply (get coq_Rfourier_le)) [tac_use h1; tac_zero_inf_pos gl (rational_to_fraction c1)])) in s:=h1.hstrict; List.iter (fun (h,c)-> (if (!s) then (if h.hstrict then tac1:=(tclTHENS (apply (get coq_Rfourier_lt_lt)) [!tac1;tac_use h; tac_zero_inf_pos gl (rational_to_fraction c)]) else tac1:=(tclTHENS (apply (get coq_Rfourier_lt_le)) [!tac1;tac_use h; tac_zero_inf_pos gl (rational_to_fraction c)])) else (if h.hstrict then tac1:=(tclTHENS (apply (get coq_Rfourier_le_lt)) [!tac1;tac_use h; tac_zero_inf_pos gl (rational_to_fraction c)]) else tac1:=(tclTHENS (apply (get coq_Rfourier_le_le)) [!tac1;tac_use h; tac_zero_inf_pos gl (rational_to_fraction c)]))); s:=(!s)||(h.hstrict)) lutil; let tac2= if sres then tac_zero_inf_false gl (rational_to_fraction cres) else tac_zero_infeq_false gl (rational_to_fraction cres) in tac:=(tclTHENS (my_cut ineq) [tclTHEN (change_in_concl None (mkAppL [| get coq_not; ineq|] )) (tclTHEN (apply (if sres then get coq_Rnot_lt_lt else get coq_Rnot_le_le)) (tclTHENS (Equality.replace (mkAppL [|get coq_Rminus;!t2;!t1|] ) tc) [tac2; (tclTHENS (Equality.replace (mkApp (get coq_Rinv, [|get coq_R1|])) (get coq_R1)) (* en attendant Field, a peut aider Ring de remplacer 1/1 par 1 ... *) [tclORELSE (Ring.polynom []) tclIDTAC; (tclTHEN (apply (get coq_sym_eqT)) (apply (get coq_Rinv_1)))] ) ])); !tac1]); tac:=(tclTHENS (cut (get coq_False)) [tclTHEN intro (contradiction None); !tac]) |_-> assert false) |_-> assert false ); (* ((tclTHEN !tac (tclFAIL 1 (* 1 au hasard... *))) gl) *) (!tac gl) (* ((tclABSTRACT None !tac) gl) *) ;; (* let fourier_tac x gl = fourier gl ;; let v_fourier = add_tactic "Fourier" fourier_tac *) coq-8.4pl2/plugins/fourier/fourier_plugin.mllib0000640000175000001440000000005611161000644021027 0ustar notinusersFourier FourierR G_fourier Fourier_plugin_mod coq-8.4pl2/plugins/fourier/Fourier_util.v0000640000175000001440000001170612010532755017627 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* 0 < a -> a * x1 < a * y1. intros; apply Rmult_lt_compat_l; assumption. Qed. Lemma Rfourier_le : forall x1 y1 a:R, x1 <= y1 -> 0 < a -> a * x1 <= a * y1. red. intros. case H; auto with real. Qed. Lemma Rfourier_lt_lt : forall x1 y1 x2 y2 a:R, x1 < y1 -> x2 < y2 -> 0 < a -> x1 + a * x2 < y1 + a * y2. intros x1 y1 x2 y2 a H H0 H1; try assumption. apply Rplus_lt_compat. try exact H. apply Rfourier_lt. try exact H0. try exact H1. Qed. Lemma Rfourier_lt_le : forall x1 y1 x2 y2 a:R, x1 < y1 -> x2 <= y2 -> 0 < a -> x1 + a * x2 < y1 + a * y2. intros x1 y1 x2 y2 a H H0 H1; try assumption. case H0; intros. apply Rplus_lt_compat. try exact H. apply Rfourier_lt; auto with real. rewrite H2. rewrite (Rplus_comm y1 (a * y2)). rewrite (Rplus_comm x1 (a * y2)). apply Rplus_lt_compat_l. try exact H. Qed. Lemma Rfourier_le_lt : forall x1 y1 x2 y2 a:R, x1 <= y1 -> x2 < y2 -> 0 < a -> x1 + a * x2 < y1 + a * y2. intros x1 y1 x2 y2 a H H0 H1; try assumption. case H; intros. apply Rfourier_lt_le; auto with real. rewrite H2. apply Rplus_lt_compat_l. apply Rfourier_lt; auto with real. Qed. Lemma Rfourier_le_le : forall x1 y1 x2 y2 a:R, x1 <= y1 -> x2 <= y2 -> 0 < a -> x1 + a * x2 <= y1 + a * y2. intros x1 y1 x2 y2 a H H0 H1; try assumption. case H0; intros. red. left; try assumption. apply Rfourier_le_lt; auto with real. rewrite H2. case H; intros. red. left; try assumption. rewrite (Rplus_comm x1 (a * y2)). rewrite (Rplus_comm y1 (a * y2)). apply Rplus_lt_compat_l. try exact H3. rewrite H3. red. right; try assumption. auto with real. Qed. Lemma Rlt_zero_pos_plus1 : forall x:R, 0 < x -> 0 < 1 + x. intros x H; try assumption. rewrite Rplus_comm. apply Rle_lt_0_plus_1. red; auto with real. Qed. Lemma Rlt_mult_inv_pos : forall x y:R, 0 < x -> 0 < y -> 0 < x * / y. intros x y H H0; try assumption. replace 0 with (x * 0). apply Rmult_lt_compat_l; auto with real. ring. Qed. Lemma Rlt_zero_1 : 0 < 1. exact Rlt_0_1. Qed. Lemma Rle_zero_pos_plus1 : forall x:R, 0 <= x -> 0 <= 1 + x. intros x H; try assumption. case H; intros. red. left; try assumption. apply Rlt_zero_pos_plus1; auto with real. rewrite <- H0. replace (1 + 0) with 1. red; left. exact Rlt_zero_1. ring. Qed. Lemma Rle_mult_inv_pos : forall x y:R, 0 <= x -> 0 < y -> 0 <= x * / y. intros x y H H0; try assumption. case H; intros. red; left. apply Rlt_mult_inv_pos; auto with real. rewrite <- H1. red; right; ring. Qed. Lemma Rle_zero_1 : 0 <= 1. red; left. exact Rlt_zero_1. Qed. Lemma Rle_not_lt : forall n d:R, 0 <= n * / d -> ~ 0 < - n * / d. intros n d H; red; intros H0; try exact H0. generalize (Rgt_not_le 0 (n * / d)). intros H1; elim H1; try assumption. replace (n * / d) with (- - (n * / d)). replace 0 with (- -0). replace (- (n * / d)) with (- n * / d). replace (-0) with 0. red. apply Ropp_gt_lt_contravar. red. exact H0. ring. ring. ring. ring. Qed. Lemma Rnot_lt0 : forall x:R, ~ 0 < 0 * x. intros x; try assumption. replace (0 * x) with 0. apply Rlt_irrefl. ring. Qed. Lemma Rlt_not_le_frac_opp : forall n d:R, 0 < n * / d -> ~ 0 <= - n * / d. intros n d H; try assumption. apply Rgt_not_le. replace 0 with (-0). replace (- n * / d) with (- (n * / d)). apply Ropp_lt_gt_contravar. try exact H. ring. ring. Qed. Lemma Rnot_lt_lt : forall x y:R, ~ 0 < y - x -> ~ x < y. unfold not; intros. apply H. apply Rplus_lt_reg_r with x. replace (x + 0) with x. replace (x + (y - x)) with y. try exact H0. ring. ring. Qed. Lemma Rnot_le_le : forall x y:R, ~ 0 <= y - x -> ~ x <= y. unfold not; intros. apply H. case H0; intros. left. apply Rplus_lt_reg_r with x. replace (x + 0) with x. replace (x + (y - x)) with y. try exact H1. ring. ring. right. rewrite H1; ring. Qed. Lemma Rfourier_gt_to_lt : forall x y:R, y > x -> x < y. unfold Rgt; intros; assumption. Qed. Lemma Rfourier_ge_to_le : forall x y:R, y >= x -> x <= y. intros x y; exact (Rge_le y x). Qed. Lemma Rfourier_eqLR_to_le : forall x y:R, x = y -> x <= y. exact Req_le. Qed. Lemma Rfourier_eqRL_to_le : forall x y:R, y = x -> x <= y. exact Req_le_sym. Qed. Lemma Rfourier_not_ge_lt : forall x y:R, (x >= y -> False) -> x < y. exact Rnot_ge_lt. Qed. Lemma Rfourier_not_gt_le : forall x y:R, (x > y -> False) -> x <= y. exact Rnot_gt_le. Qed. Lemma Rfourier_not_le_gt : forall x y:R, (x <= y -> False) -> x > y. exact Rnot_le_lt. Qed. Lemma Rfourier_not_lt_ge : forall x y:R, (x < y -> False) -> x >= y. exact Rnot_lt_ge. Qed. coq-8.4pl2/plugins/fourier/g_fourier.ml40000640000175000001440000000120212010532755017355 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ fourier ] END coq-8.4pl2/plugins/decl_mode/0000750000175000001440000000000012127276542015232 5ustar notinuserscoq-8.4pl2/plugins/decl_mode/decl_interp.ml0000640000175000001440000004036612010532755020056 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Thesis n | This c -> This (intern_constr globs c) let add_var id globs= let l1,l2=globs.ltacvars in {globs with ltacvars= (id::l1),(id::l2)} let add_name nam globs= match nam with Anonymous -> globs | Name id -> add_var id globs let intern_hyp iconstr globs = function Hvar (loc,(id,topt)) -> add_var id globs, Hvar (loc,(id,Option.map (intern_constr globs) topt)) | Hprop st -> add_name st.st_label globs, Hprop (intern_statement iconstr globs st) let intern_hyps iconstr globs hyps = snd (list_fold_map (intern_hyp iconstr) globs hyps) let intern_cut intern_it globs cut= let nglobs,nstat=intern_it globs cut.cut_stat in {cut_stat=nstat; cut_by=intern_justification_items nglobs cut.cut_by; cut_using=intern_justification_method nglobs cut.cut_using} let intern_casee globs = function Real c -> Real (intern_constr globs c) | Virtual cut -> Virtual (intern_cut (intern_no_bind (intern_statement intern_constr)) globs cut) let intern_hyp_list args globs = let intern_one globs (loc,(id,opttyp)) = (add_var id globs), (loc,(id,Option.map (intern_constr globs) opttyp)) in list_fold_map intern_one globs args let intern_suffices_clause globs (hyps,c) = let nglobs,nhyps = list_fold_map (intern_hyp intern_constr) globs hyps in nglobs,(nhyps,intern_constr_or_thesis nglobs c) let intern_fundecl args body globs= let nglobs,nargs = intern_hyp_list args globs in nargs,intern_constr nglobs body let rec add_vars_of_simple_pattern globs = function CPatAlias (loc,p,id) -> add_vars_of_simple_pattern (add_var id globs) p (* Loc.raise loc (UserError ("simple_pattern",str "\"as\" is not allowed here"))*) | CPatOr (loc, _)-> Loc.raise loc (UserError ("simple_pattern",str "\"(_ | _)\" is not allowed here")) | CPatDelimiters (_,_,p) -> add_vars_of_simple_pattern globs p | CPatCstr (_,_,pl) | CPatCstrExpl (_,_,pl) -> List.fold_left add_vars_of_simple_pattern globs pl | CPatNotation(_,_,(pl,pll)) -> List.fold_left add_vars_of_simple_pattern globs (List.flatten (pl::pll)) | CPatAtom (_,Some (Libnames.Ident (_,id))) -> add_var id globs | _ -> globs let rec intern_bare_proof_instr globs = function Pthus i -> Pthus (intern_bare_proof_instr globs i) | Pthen i -> Pthen (intern_bare_proof_instr globs i) | Phence i -> Phence (intern_bare_proof_instr globs i) | Pcut c -> Pcut (intern_cut (intern_no_bind (intern_statement intern_constr_or_thesis)) globs c) | Psuffices c -> Psuffices (intern_cut intern_suffices_clause globs c) | Prew (s,c) -> Prew (s,intern_cut (intern_no_bind (intern_statement intern_constr)) globs c) | Psuppose hyps -> Psuppose (intern_hyps intern_constr globs hyps) | Pcase (params,pat,hyps) -> let nglobs,nparams = intern_hyp_list params globs in let nnglobs= add_vars_of_simple_pattern nglobs pat in let nhyps = intern_hyps intern_constr_or_thesis nnglobs hyps in Pcase (nparams,pat,nhyps) | Ptake witl -> Ptake (List.map (intern_constr globs) witl) | Pconsider (c,hyps) -> Pconsider (intern_constr globs c, intern_hyps intern_constr globs hyps) | Pper (et,c) -> Pper (et,intern_casee globs c) | Pend bt -> Pend bt | Pescape -> Pescape | Passume hyps -> Passume (intern_hyps intern_constr globs hyps) | Pgiven hyps -> Pgiven (intern_hyps intern_constr globs hyps) | Plet hyps -> Plet (intern_hyps intern_constr globs hyps) | Pclaim st -> Pclaim (intern_statement intern_constr globs st) | Pfocus st -> Pfocus (intern_statement intern_constr globs st) | Pdefine (id,args,body) -> let nargs,nbody = intern_fundecl args body globs in Pdefine (id,nargs,nbody) | Pcast (id,typ) -> Pcast (id,intern_constr globs typ) let rec intern_proof_instr globs instr= {emph = instr.emph; instr = intern_bare_proof_instr globs instr.instr} (* INTERP *) let interp_justification_items sigma env = Option.map (List.map (fun c ->understand sigma env (fst c))) let interp_constr check_sort sigma env c = if check_sort then understand_type sigma env (fst c) else understand sigma env (fst c) let special_whd env = let infos=Closure.create_clos_infos Closure.betadeltaiota env in (fun t -> Closure.whd_val infos (Closure.inject t)) let _eq = Libnames.constr_of_global (Coqlib.glob_eq) let decompose_eq env id = let typ = Environ.named_type id env in let whd = special_whd env typ in match kind_of_term whd with App (f,args)-> if eq_constr f _eq && (Array.length args)=3 then args.(0) else error "Previous step is not an equality." | _ -> error "Previous step is not an equality." let get_eq_typ info env = let typ = decompose_eq env (get_last env) in typ let interp_constr_in_type typ sigma env c = understand sigma env (fst c) ~expected_type:typ let interp_statement interp_it sigma env st = {st_label=st.st_label; st_it=interp_it sigma env st.st_it} let interp_constr_or_thesis check_sort sigma env = function Thesis n -> Thesis n | This c -> This (interp_constr check_sort sigma env c) let abstract_one_hyp inject h glob = match h with Hvar (loc,(id,None)) -> GProd (dummy_loc,Name id, Explicit, GHole (loc,Evd.BinderType (Name id)), glob) | Hvar (loc,(id,Some typ)) -> GProd (dummy_loc,Name id, Explicit, fst typ, glob) | Hprop st -> GProd (dummy_loc,st.st_label, Explicit, inject st.st_it, glob) let glob_constr_of_hyps inject hyps head = List.fold_right (abstract_one_hyp inject) hyps head let glob_prop = GSort (dummy_loc,GProp Null) let rec match_hyps blend names constr = function [] -> [],substl names constr | hyp::q -> let (name,typ,body)=destProd constr in let st= {st_label=name;st_it=substl names typ} in let qnames= match name with Anonymous -> mkMeta 0 :: names | Name id -> mkVar id :: names in let qhyp = match hyp with Hprop st' -> Hprop (blend st st') | Hvar _ -> Hvar st in let rhyps,head = match_hyps blend qnames body q in qhyp::rhyps,head let interp_hyps_gen inject blend sigma env hyps head = let constr=understand sigma env (glob_constr_of_hyps inject hyps head) in match_hyps blend [] constr hyps let interp_hyps sigma env hyps = fst (interp_hyps_gen fst (fun x _ -> x) sigma env hyps glob_prop) let dummy_prefix= id_of_string "__" let rec deanonymize ids = function PatVar (loc,Anonymous) -> let (found,known) = !ids in let new_id=Namegen.next_ident_away dummy_prefix known in let _= ids:= (loc,new_id) :: found , new_id :: known in PatVar (loc,Name new_id) | PatVar (loc,Name id) as pat -> let (found,known) = !ids in let _= ids:= (loc,id) :: found , known in pat | PatCstr(loc,cstr,lpat,nam) -> PatCstr(loc,cstr,List.map (deanonymize ids) lpat,nam) let rec glob_of_pat = function PatVar (loc,Anonymous) -> anomaly "Anonymous pattern variable" | PatVar (loc,Name id) -> GVar (loc,id) | PatCstr(loc,((ind,_) as cstr),lpat,_) -> let mind= fst (Global.lookup_inductive ind) in let rec add_params n q = if n<=0 then q else add_params (pred n) (GHole(dummy_loc, Evd.TomatchTypeParameter(ind,n))::q) in let args = List.map glob_of_pat lpat in glob_app(loc,GRef(dummy_loc,Libnames.ConstructRef cstr), add_params mind.Declarations.mind_nparams args) let prod_one_hyp = function (loc,(id,None)) -> (fun glob -> GProd (dummy_loc,Name id, Explicit, GHole (loc,Evd.BinderType (Name id)), glob)) | (loc,(id,Some typ)) -> (fun glob -> GProd (dummy_loc,Name id, Explicit, fst typ, glob)) let prod_one_id (loc,id) glob = GProd (dummy_loc,Name id, Explicit, GHole (loc,Evd.BinderType (Name id)), glob) let let_in_one_alias (id,pat) glob = GLetIn (dummy_loc,Name id, glob_of_pat pat, glob) let rec bind_primary_aliases map pat = match pat with PatVar (_,_) -> map | PatCstr(loc,_,lpat,nam) -> let map1 = match nam with Anonymous -> map | Name id -> (id,pat)::map in List.fold_left bind_primary_aliases map1 lpat let bind_secondary_aliases map subst = List.fold_left (fun map (ids,idp) -> (ids,List.assoc idp map)::map) map subst let bind_aliases patvars subst patt = let map = bind_primary_aliases [] patt in let map1 = bind_secondary_aliases map subst in List.rev map1 let interp_pattern env pat_expr = let patvars,pats = Constrintern.intern_pattern env pat_expr in match pats with [] -> anomaly "empty pattern list" | [subst,patt] -> (patvars,bind_aliases patvars subst patt,patt) | _ -> anomaly "undetected disjunctive pattern" let rec match_args dest names constr = function [] -> [],names,substl names constr | _::q -> let (name,typ,body)=dest constr in let st={st_label=name;st_it=substl names typ} in let qnames= match name with Anonymous -> assert false | Name id -> mkVar id :: names in let args,bnames,body = match_args dest qnames body q in st::args,bnames,body let rec match_aliases names constr = function [] -> [],names,substl names constr | _::q -> let (name,c,typ,body)=destLetIn constr in let st={st_label=name;st_it=(substl names c,substl names typ)} in let qnames= match name with Anonymous -> assert false | Name id -> mkVar id :: names in let args,bnames,body = match_aliases qnames body q in st::args,bnames,body let detype_ground c = Detyping.detype false [] [] c let interp_cases info sigma env params (pat:cases_pattern_expr) hyps = let et,pinfo = match info.pm_stack with Per(et,pi,_,_)::_ -> et,pi | _ -> error "No proof per cases/induction/inversion in progress." in let mib,oib=Global.lookup_inductive pinfo.per_ind in let num_params = pinfo.per_nparams in let _ = let expected = mib.Declarations.mind_nparams - num_params in if List.length params <> expected then errorlabstrm "suppose it is" (str "Wrong number of extra arguments: " ++ (if expected = 0 then str "none" else int expected) ++ spc () ++ str "expected.") in let app_ind = let rind = GRef (dummy_loc,Libnames.IndRef pinfo.per_ind) in let rparams = List.map detype_ground pinfo.per_params in let rparams_rec = List.map (fun (loc,(id,_)) -> GVar (loc,id)) params in let dum_args= list_tabulate (fun _ -> GHole (dummy_loc,Evd.QuestionMark (Evd.Define false))) oib.Declarations.mind_nrealargs in glob_app(dummy_loc,rind,rparams@rparams_rec@dum_args) in let pat_vars,aliases,patt = interp_pattern env pat in let inject = function Thesis (Plain) -> Glob_term.GSort(dummy_loc,GProp Null) | Thesis (For rec_occ) -> if not (List.mem rec_occ pat_vars) then errorlabstrm "suppose it is" (str "Variable " ++ Nameops.pr_id rec_occ ++ str " does not occur in pattern."); Glob_term.GSort(dummy_loc,GProp Null) | This (c,_) -> c in let term1 = glob_constr_of_hyps inject hyps glob_prop in let loc_ids,npatt = let rids=ref ([],pat_vars) in let npatt= deanonymize rids patt in List.rev (fst !rids),npatt in let term2 = GLetIn(dummy_loc,Anonymous, GCast(dummy_loc,glob_of_pat npatt, CastConv (DEFAULTcast,app_ind)),term1) in let term3=List.fold_right let_in_one_alias aliases term2 in let term4=List.fold_right prod_one_id loc_ids term3 in let term5=List.fold_right prod_one_hyp params term4 in let constr = understand sigma env term5 in let tparams,nam4,rest4 = match_args destProd [] constr params in let tpatvars,nam3,rest3 = match_args destProd nam4 rest4 loc_ids in let taliases,nam2,rest2 = match_aliases nam3 rest3 aliases in let (_,pat_pat,pat_typ,rest1) = destLetIn rest2 in let blend st st' = match st'.st_it with Thesis nam -> {st_it=Thesis nam;st_label=st'.st_label} | This _ -> {st_it = This st.st_it;st_label=st.st_label} in let thyps = fst (match_hyps blend nam2 (Termops.pop rest1) hyps) in tparams,{pat_vars=tpatvars; pat_aliases=taliases; pat_constr=pat_pat; pat_typ=pat_typ; pat_pat=patt; pat_expr=pat},thyps let interp_cut interp_it sigma env cut= let nenv,nstat = interp_it sigma env cut.cut_stat in {cut with cut_stat=nstat; cut_by=interp_justification_items sigma nenv cut.cut_by} let interp_no_bind interp_it sigma env x = env,interp_it sigma env x let interp_suffices_clause sigma env (hyps,cot)= let (locvars,_) as res = match cot with This (c,_) -> let nhyps,nc = interp_hyps_gen fst (fun x _ -> x) sigma env hyps c in nhyps,This nc | Thesis Plain as th -> interp_hyps sigma env hyps,th | Thesis (For n) -> error "\"thesis for\" is not applicable here." in let push_one hyp env0 = match hyp with (Hprop st | Hvar st) -> match st.st_label with Name id -> Environ.push_named (id,None,st.st_it) env0 | _ -> env in let nenv = List.fold_right push_one locvars env in nenv,res let interp_casee sigma env = function Real c -> Real (understand sigma env (fst c)) | Virtual cut -> Virtual (interp_cut (interp_no_bind (interp_statement (interp_constr true))) sigma env cut) let abstract_one_arg = function (loc,(id,None)) -> (fun glob -> GLambda (dummy_loc,Name id, Explicit, GHole (loc,Evd.BinderType (Name id)), glob)) | (loc,(id,Some typ)) -> (fun glob -> GLambda (dummy_loc,Name id, Explicit, fst typ, glob)) let glob_constr_of_fun args body = List.fold_right abstract_one_arg args (fst body) let interp_fun sigma env args body = let constr=understand sigma env (glob_constr_of_fun args body) in match_args destLambda [] constr args let rec interp_bare_proof_instr info (sigma:Evd.evar_map) (env:Environ.env) = function Pthus i -> Pthus (interp_bare_proof_instr info sigma env i) | Pthen i -> Pthen (interp_bare_proof_instr info sigma env i) | Phence i -> Phence (interp_bare_proof_instr info sigma env i) | Pcut c -> Pcut (interp_cut (interp_no_bind (interp_statement (interp_constr_or_thesis true))) sigma env c) | Psuffices c -> Psuffices (interp_cut interp_suffices_clause sigma env c) | Prew (s,c) -> Prew (s,interp_cut (interp_no_bind (interp_statement (interp_constr_in_type (get_eq_typ info env)))) sigma env c) | Psuppose hyps -> Psuppose (interp_hyps sigma env hyps) | Pcase (params,pat,hyps) -> let tparams,tpat,thyps = interp_cases info sigma env params pat hyps in Pcase (tparams,tpat,thyps) | Ptake witl -> Ptake (List.map (fun c -> understand sigma env (fst c)) witl) | Pconsider (c,hyps) -> Pconsider (interp_constr false sigma env c, interp_hyps sigma env hyps) | Pper (et,c) -> Pper (et,interp_casee sigma env c) | Pend bt -> Pend bt | Pescape -> Pescape | Passume hyps -> Passume (interp_hyps sigma env hyps) | Pgiven hyps -> Pgiven (interp_hyps sigma env hyps) | Plet hyps -> Plet (interp_hyps sigma env hyps) | Pclaim st -> Pclaim (interp_statement (interp_constr true) sigma env st) | Pfocus st -> Pfocus (interp_statement (interp_constr true) sigma env st) | Pdefine (id,args,body) -> let nargs,_,nbody = interp_fun sigma env args body in Pdefine (id,nargs,nbody) | Pcast (id,typ) -> Pcast(id,interp_constr true sigma env typ) let rec interp_proof_instr info sigma env instr= {emph = instr.emph; instr = interp_bare_proof_instr info sigma env instr.instr} coq-8.4pl2/plugins/decl_mode/decl_mode.ml0000640000175000001440000000607512121620060017466 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Mode_none let check_not_proof_mode str = if get_current_mode () = Mode_proof then error str let get_info sigma gl= match info.get (Goal.V82.extra sigma gl) with | None -> invalid_arg "get_info" | Some pm -> pm let try_get_info sigma gl = info.get (Goal.V82.extra sigma gl) let get_stack pts = let { it = goals ; sigma = sigma } = Proof.V82.subgoals pts in let info = get_info sigma (List.hd goals) in info.pm_stack let proof_focus = Proof.new_focus_kind () let proof_cond = Proof.no_cond proof_focus let focus p = let inf = get_stack p in Proof.focus proof_cond inf 1 p let unfocus = Proof.unfocus proof_focus let maximal_unfocus = Proof_global.maximal_unfocus proof_focus let get_top_stack pts = try Proof.get_at_focus proof_focus pts with Proof.NoSuchFocus -> let { it = gl ; sigma = sigma } = Proof.V82.top_goal pts in let info = get_info sigma gl in info.pm_stack let get_last env = try let (id,_,_) = List.hd (Environ.named_context env) in id with Invalid_argument _ -> error "no previous statement to use" coq-8.4pl2/plugins/decl_mode/decl_proof_instr.ml0000640000175000001440000013065212121620060021105 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (!strictness)),(fun b -> strictness:=b) let _ = declare_bool_option { optsync = true; optdepr = false; optname = "strict mode"; optkey = ["Strict";"Proofs"]; optread = get_strictness; optwrite = set_strictness } let tcl_change_info_gen info_gen = (fun gls -> let concl = pf_concl gls in let hyps = Goal.V82.hyps (project gls) (sig_it gls) in let extra = Goal.V82.extra (project gls) (sig_it gls) in let (gl,ev,sigma) = Goal.V82.mk_goal (project gls) hyps concl (info_gen extra) in let sigma = Goal.V82.partial_solution sigma (sig_it gls) ev in { it = [gl] ; sigma= sigma } ) open Store.Field let tcl_change_info info gls = let info_gen = Decl_mode.info.set info in tcl_change_info_gen info_gen gls let tcl_erase_info gls = tcl_change_info_gen (Decl_mode.info.remove) gls let special_whd gl= let infos=Closure.create_clos_infos Closure.betadeltaiota (pf_env gl) in (fun t -> Closure.whd_val infos (Closure.inject t)) let special_nf gl= let infos=Closure.create_clos_infos Closure.betaiotazeta (pf_env gl) in (fun t -> Closure.norm_val infos (Closure.inject t)) let is_good_inductive env ind = let mib,oib = Inductive.lookup_mind_specif env ind in oib.mind_nrealargs = 0 && not (Inductiveops.mis_is_recursive (ind,mib,oib)) let check_not_per pts = if not (Proof.is_done pts) then match get_stack pts with Per (_,_,_,_)::_ -> error "You are inside a proof per cases/induction.\n\ Please \"suppose\" something or \"end\" it now." | _ -> () let mk_evd metalist gls = let evd0= create_goal_evar_defs (sig_sig gls) in let add_one (meta,typ) evd = meta_declare meta typ evd in List.fold_right add_one metalist evd0 let is_tmp id = (string_of_id id).[0] = '_' let tmp_ids gls = let ctx = pf_hyps gls in match ctx with [] -> [] | _::q -> List.filter is_tmp (ids_of_named_context q) let clean_tmp gls = let clean_id id0 gls0 = tclTRY (clear [id0]) gls0 in let rec clean_all = function [] -> tclIDTAC | id :: rest -> tclTHEN (clean_id id) (clean_all rest) in clean_all (tmp_ids gls) gls let assert_postpone id t = assert_tac (Name id) t (* start a proof *) let start_proof_tac gls= let info={pm_stack=[]} in tcl_change_info info gls let go_to_proof_mode () = Pfedit.by start_proof_tac; let p = Proof_global.give_me_the_proof () in Decl_mode.focus p (* closing gaps *) let daimon_tac gls = set_daimon_flag (); {it=[];sigma=sig_sig gls} (* marking closed blocks *) let rec is_focussing_instr = function Pthus i | Pthen i | Phence i -> is_focussing_instr i | Pescape | Pper _ | Pclaim _ | Pfocus _ | Psuppose _ | Pcase (_,_,_) -> true | _ -> false let mark_rule_as_done = function Decl_proof true -> Decl_proof false | Decl_proof false -> anomaly "already marked as done" | _ -> anomaly "mark_rule_as_done" (* post-instruction focus management *) (* spiwack: This used to fail if there was no focusing command above, but I don't think it ever happened. I hope it doesn't mess things up*) let goto_current_focus pts = Decl_mode.maximal_unfocus pts let goto_current_focus_or_top pts = goto_current_focus pts (* return *) let close_tactic_mode pts = try goto_current_focus pts with Not_found -> error "\"return\" cannot be used outside of Declarative Proof Mode." let return_from_tactic_mode () = close_tactic_mode (Proof_global.give_me_the_proof ()) (* end proof/claim *) let close_block bt pts = if Proof.no_focused_goal pts then goto_current_focus pts else let stack = if Proof.is_done pts then get_top_stack pts else get_stack pts in match bt,stack with B_claim, Claim::_ | B_focus, Focus_claim::_ | B_proof, [] -> (goto_current_focus pts) | _, Claim::_ -> error "\"end claim\" expected." | _, Focus_claim::_ -> error "\"end focus\" expected." | _, [] -> error "\"end proof\" expected." | _, (Per (et,_,_,_)::_|Suppose_case::Per (et,_,_,_)::_) -> begin match et with ET_Case_analysis -> error "\"end cases\" expected." | ET_Induction -> error "\"end induction\" expected." end | _,_ -> anomaly "Lonely suppose on stack." (* utility for suppose / suppose it is *) let close_previous_case pts = if Proof.is_done pts then match get_top_stack pts with Per (et,_,_,_) :: _ -> anomaly "Weird case occured ..." | Suppose_case :: Per (et,_,_,_) :: _ -> goto_current_focus (pts) | _ -> error "Not inside a proof per cases or induction." else match get_stack pts with Per (et,_,_,_) :: _ -> () | Suppose_case :: Per (et,_,_,_) :: _ -> goto_current_focus ((pts)) | _ -> error "Not inside a proof per cases or induction." (* Proof instructions *) (* automation *) let filter_hyps f gls = let filter_aux (id,_,_) = if f id then tclIDTAC else tclTRY (clear [id]) in tclMAP filter_aux (pf_hyps gls) gls let local_hyp_prefix = id_of_string "___" let add_justification_hyps keep items gls = let add_aux c gls= match kind_of_term c with Var id -> keep:=Idset.add id !keep; tclIDTAC gls | _ -> let id=pf_get_new_id local_hyp_prefix gls in keep:=Idset.add id !keep; tclTHEN (letin_tac None (Names.Name id) c None Tacexpr.nowhere) (thin_body [id]) gls in tclMAP add_aux items gls let prepare_goal items gls = let tokeep = ref Idset.empty in let auxres = add_justification_hyps tokeep items gls in tclTHENLIST [ (fun _ -> auxres); filter_hyps (let keep = !tokeep in fun id -> Idset.mem id keep)] gls let my_automation_tac = ref (fun gls -> anomaly "No automation registered") let register_automation_tac tac = my_automation_tac:= tac let automation_tac gls = !my_automation_tac gls let justification tac gls= tclORELSE (tclSOLVE [tclTHEN tac assumption]) (fun gls -> if get_strictness () then error "Insufficient justification." else begin msg_warning (str "Insufficient justification."); daimon_tac gls end) gls let default_justification elems gls= justification (tclTHEN (prepare_goal elems) automation_tac) gls (* code for conclusion refining *) let constant dir s = lazy (Coqlib.gen_constant "Declarative" dir s) let _and = constant ["Init";"Logic"] "and" let _and_rect = constant ["Init";"Logic"] "and_rect" let _prod = constant ["Init";"Datatypes"] "prod" let _prod_rect = constant ["Init";"Datatypes"] "prod_rect" let _ex = constant ["Init";"Logic"] "ex" let _ex_ind = constant ["Init";"Logic"] "ex_ind" let _sig = constant ["Init";"Specif"] "sig" let _sig_rect = constant ["Init";"Specif"] "sig_rect" let _sigT = constant ["Init";"Specif"] "sigT" let _sigT_rect = constant ["Init";"Specif"] "sigT_rect" type stackd_elt = {se_meta:metavariable; se_type:types; se_last_meta:metavariable; se_meta_list:(metavariable*types) list; se_evd: evar_map} let rec replace_in_list m l = function [] -> raise Not_found | c::q -> if m=fst c then l@q else c::replace_in_list m l q let enstack_subsubgoals env se stack gls= let hd,params = decompose_app (special_whd gls se.se_type) in match kind_of_term hd with Ind ind when is_good_inductive env ind -> let mib,oib= Inductive.lookup_mind_specif env ind in let gentypes= Inductive.arities_of_constructors ind (mib,oib) in let process i gentyp = let constructor = mkConstruct(ind,succ i) (* constructors numbering*) in let appterm = applist (constructor,params) in let apptype = Term.prod_applist gentyp params in let rc,_ = Reduction.dest_prod env apptype in let rec meta_aux last lenv = function [] -> (last,lenv,[]) | (nam,_,typ)::q -> let nlast=succ last in let (llast,holes,metas) = meta_aux nlast (mkMeta nlast :: lenv) q in (llast,holes,(nlast,special_nf gls (substl lenv typ))::metas) in let (nlast,holes,nmetas) = meta_aux se.se_last_meta [] (List.rev rc) in let refiner = applist (appterm,List.rev holes) in let evd = meta_assign se.se_meta (refiner,(Conv,TypeProcessed (* ? *))) se.se_evd in let ncreated = replace_in_list se.se_meta nmetas se.se_meta_list in let evd0 = List.fold_left (fun evd (m,typ) -> meta_declare m typ evd) evd nmetas in List.iter (fun (m,typ) -> Stack.push {se_meta=m; se_type=typ; se_evd=evd0; se_meta_list=ncreated; se_last_meta=nlast} stack) (List.rev nmetas) in Array.iteri process gentypes | _ -> () let rec nf_list evd = function [] -> [] | (m,typ)::others -> if meta_defined evd m then nf_list evd others else (m,nf_meta evd typ)::nf_list evd others let find_subsubgoal c ctyp skip submetas gls = let env= pf_env gls in let concl = pf_concl gls in let evd = mk_evd ((0,concl)::submetas) gls in let stack = Stack.create () in let max_meta = List.fold_left (fun a (m,_) -> max a m) 0 submetas in let _ = Stack.push {se_meta=0; se_type=concl; se_last_meta=max_meta; se_meta_list=[0,concl]; se_evd=evd} stack in let rec dfs n = let se = Stack.pop stack in try let unifier = Unification.w_unify env se.se_evd Reduction.CUMUL ~flags:Unification.elim_flags ctyp se.se_type in if n <= 0 then {se with se_evd=meta_assign se.se_meta (c,(Conv,TypeNotProcessed (* ?? *))) unifier; se_meta_list=replace_in_list se.se_meta submetas se.se_meta_list} else dfs (pred n) with e when Errors.noncritical e -> begin enstack_subsubgoals env se stack gls; dfs n end in let nse= try dfs skip with Stack.Empty -> raise Not_found in nf_list nse.se_evd nse.se_meta_list,nf_meta nse.se_evd (mkMeta 0) let concl_refiner metas body gls = let concl = pf_concl gls in let evd = sig_sig gls in let env = pf_env gls in let sort = family_of_sort (Typing.sort_of env evd concl) in let rec aux env avoid subst = function [] -> anomaly "concl_refiner: cannot happen" | (n,typ)::rest -> let _A = subst_meta subst typ in let x = id_of_name_using_hdchar env _A Anonymous in let _x = fresh_id avoid x gls in let nenv = Environ.push_named (_x,None,_A) env in let asort = family_of_sort (Typing.sort_of nenv evd _A) in let nsubst = (n,mkVar _x)::subst in if rest = [] then asort,_A,mkNamedLambda _x _A (subst_meta nsubst body) else let bsort,_B,nbody = aux nenv (_x::avoid) ((n,mkVar _x)::subst) rest in let body = mkNamedLambda _x _A nbody in if occur_term (mkVar _x) _B then begin let _P = mkNamedLambda _x _A _B in match bsort,sort with InProp,InProp -> let _AxB = mkApp(Lazy.force _ex,[|_A;_P|]) in InProp,_AxB, mkApp(Lazy.force _ex_ind,[|_A;_P;concl;body|]) | InProp,_ -> let _AxB = mkApp(Lazy.force _sig,[|_A;_P|]) in let _P0 = mkLambda(Anonymous,_AxB,concl) in InType,_AxB, mkApp(Lazy.force _sig_rect,[|_A;_P;_P0;body|]) | _,_ -> let _AxB = mkApp(Lazy.force _sigT,[|_A;_P|]) in let _P0 = mkLambda(Anonymous,_AxB,concl) in InType,_AxB, mkApp(Lazy.force _sigT_rect,[|_A;_P;_P0;body|]) end else begin match asort,bsort with InProp,InProp -> let _AxB = mkApp(Lazy.force _and,[|_A;_B|]) in InProp,_AxB, mkApp(Lazy.force _and_rect,[|_A;_B;concl;body|]) |_,_ -> let _AxB = mkApp(Lazy.force _prod,[|_A;_B|]) in let _P0 = mkLambda(Anonymous,_AxB,concl) in InType,_AxB, mkApp(Lazy.force _prod_rect,[|_A;_B;_P0;body|]) end in let (_,_,prf) = aux env [] [] metas in mkApp(prf,[|mkMeta 1|]) let thus_tac c ctyp submetas gls = let list,proof = try find_subsubgoal c ctyp 0 submetas gls with Not_found -> error "I could not relate this statement to the thesis." in if list = [] then exact_check proof gls else let refiner = concl_refiner list proof gls in Tactics.refine refiner gls (* general forward step *) let mk_stat_or_thesis info gls = function This c -> c | Thesis (For _ ) -> error "\"thesis for ...\" is not applicable here." | Thesis Plain -> pf_concl gls let just_tac _then cut info gls0 = let last_item = if _then then let last_id = try get_last (pf_env gls0) with Failure _ -> error "\"then\" and \"hence\" require at least one previous fact" in [mkVar last_id] else [] in let items_tac gls = match cut.cut_by with None -> tclIDTAC gls | Some items -> prepare_goal (last_item@items) gls in let method_tac gls = match cut.cut_using with None -> automation_tac gls | Some tac -> (Tacinterp.eval_tactic tac) gls in justification (tclTHEN items_tac method_tac) gls0 let instr_cut mkstat _thus _then cut gls0 = let info = get_its_info gls0 in let stat = cut.cut_stat in let (c_id,_) = match stat.st_label with Anonymous -> pf_get_new_id (id_of_string "_fact") gls0,false | Name id -> id,true in let c_stat = mkstat info gls0 stat.st_it in let thus_tac gls= if _thus then thus_tac (mkVar c_id) c_stat [] gls else tclIDTAC gls in tclTHENS (assert_postpone c_id c_stat) [tclTHEN tcl_erase_info (just_tac _then cut info); thus_tac] gls0 (* iterated equality *) let _eq = Libnames.constr_of_global (Coqlib.glob_eq) let decompose_eq id gls = let typ = pf_get_hyp_typ gls id in let whd = (special_whd gls typ) in match kind_of_term whd with App (f,args)-> if eq_constr f _eq && (Array.length args)=3 then (args.(0), args.(1), args.(2)) else error "Previous step is not an equality." | _ -> error "Previous step is not an equality." let instr_rew _thus rew_side cut gls0 = let last_id = try get_last (pf_env gls0) with e when Errors.noncritical e -> error "No previous equality." in let typ,lhs,rhs = decompose_eq last_id gls0 in let items_tac gls = match cut.cut_by with None -> tclIDTAC gls | Some items -> prepare_goal items gls in let method_tac gls = match cut.cut_using with None -> automation_tac gls | Some tac -> (Tacinterp.eval_tactic tac) gls in let just_tac gls = justification (tclTHEN items_tac method_tac) gls in let (c_id,_) = match cut.cut_stat.st_label with Anonymous -> pf_get_new_id (id_of_string "_eq") gls0,false | Name id -> id,true in let thus_tac new_eq gls= if _thus then thus_tac (mkVar c_id) new_eq [] gls else tclIDTAC gls in match rew_side with Lhs -> let new_eq = mkApp(_eq,[|typ;cut.cut_stat.st_it;rhs|]) in tclTHENS (assert_postpone c_id new_eq) [tclTHEN tcl_erase_info (tclTHENS (transitivity lhs) [just_tac;exact_check (mkVar last_id)]); thus_tac new_eq] gls0 | Rhs -> let new_eq = mkApp(_eq,[|typ;lhs;cut.cut_stat.st_it|]) in tclTHENS (assert_postpone c_id new_eq) [tclTHEN tcl_erase_info (tclTHENS (transitivity rhs) [exact_check (mkVar last_id);just_tac]); thus_tac new_eq] gls0 (* tactics for claim/focus *) let instr_claim _thus st gls0 = let info = get_its_info gls0 in let (id,_) = match st.st_label with Anonymous -> pf_get_new_id (id_of_string "_claim") gls0,false | Name id -> id,true in let thus_tac gls= if _thus then thus_tac (mkVar id) st.st_it [] gls else tclIDTAC gls in let ninfo1 = {pm_stack= (if _thus then Focus_claim else Claim)::info.pm_stack} in tclTHENS (assert_postpone id st.st_it) [thus_tac; tcl_change_info ninfo1] gls0 (* tactics for assume *) let push_intro_tac coerce nam gls = let (hid,_) = match nam with Anonymous -> pf_get_new_id (id_of_string "_hyp") gls,false | Name id -> id,true in tclTHENLIST [intro_mustbe_force hid; coerce hid] gls let assume_tac hyps gls = List.fold_right (fun (Hvar st | Hprop st) -> tclTHEN (push_intro_tac (fun id -> convert_hyp (id,None,st.st_it)) st.st_label)) hyps tclIDTAC gls let assume_hyps_or_theses hyps gls = List.fold_right (function (Hvar {st_label=nam;st_it=c} | Hprop {st_label=nam;st_it=This c}) -> tclTHEN (push_intro_tac (fun id -> convert_hyp (id,None,c)) nam) | Hprop {st_label=nam;st_it=Thesis (tk)} -> tclTHEN (push_intro_tac (fun id -> tclIDTAC) nam)) hyps tclIDTAC gls let assume_st hyps gls = List.fold_right (fun st -> tclTHEN (push_intro_tac (fun id -> convert_hyp (id,None,st.st_it)) st.st_label)) hyps tclIDTAC gls let assume_st_letin hyps gls = List.fold_right (fun st -> tclTHEN (push_intro_tac (fun id -> convert_hyp (id,Some (fst st.st_it),snd st.st_it)) st.st_label)) hyps tclIDTAC gls (* suffices *) let rec metas_from n hyps = match hyps with _ :: q -> n :: metas_from (succ n) q | [] -> [] let rec build_product args body = match args with (Hprop st| Hvar st )::rest -> let pprod= lift 1 (build_product rest body) in let lbody = match st.st_label with Anonymous -> pprod | Name id -> subst_term (mkVar id) pprod in mkProd (st.st_label, st.st_it, lbody) | [] -> body let rec build_applist prod = function [] -> [],prod | n::q -> let (_,typ,_) = destProd prod in let ctx,head = build_applist (Term.prod_applist prod [mkMeta n]) q in (n,typ)::ctx,head let instr_suffices _then cut gls0 = let info = get_its_info gls0 in let c_id = pf_get_new_id (id_of_string "_cofact") gls0 in let ctx,hd = cut.cut_stat in let c_stat = build_product ctx (mk_stat_or_thesis info gls0 hd) in let metas = metas_from 1 ctx in let c_ctx,c_head = build_applist c_stat metas in let c_term = applist (mkVar c_id,List.map mkMeta metas) in let thus_tac gls= thus_tac c_term c_head c_ctx gls in tclTHENS (assert_postpone c_id c_stat) [tclTHENLIST [ assume_tac ctx; tcl_erase_info; just_tac _then cut info]; thus_tac] gls0 (* tactics for consider/given *) let conjunction_arity id gls = let typ = pf_get_hyp_typ gls id in let hd,params = decompose_app (special_whd gls typ) in let env =pf_env gls in match kind_of_term hd with Ind ind when is_good_inductive env ind -> let mib,oib= Inductive.lookup_mind_specif env ind in let gentypes= Inductive.arities_of_constructors ind (mib,oib) in let _ = if Array.length gentypes <> 1 then raise Not_found in let apptype = Term.prod_applist gentypes.(0) params in let rc,_ = Reduction.dest_prod env apptype in List.length rc | _ -> raise Not_found let rec intron_then n ids ltac gls = if n<=0 then ltac ids gls else let id = pf_get_new_id (id_of_string "_tmp") gls in tclTHEN (intro_mustbe_force id) (intron_then (pred n) (id::ids) ltac) gls let rec consider_match may_intro introduced available expected gls = match available,expected with [],[] -> tclIDTAC gls | _,[] -> error "Last statements do not match a complete hypothesis." (* should tell which ones *) | [],hyps -> if may_intro then begin let id = pf_get_new_id (id_of_string "_tmp") gls in tclIFTHENELSE (intro_mustbe_force id) (consider_match true [] [id] hyps) (fun _ -> error "Not enough sub-hypotheses to match statements.") gls end else error "Not enough sub-hypotheses to match statements." (* should tell which ones *) | id::rest_ids,(Hvar st | Hprop st)::rest -> tclIFTHENELSE (convert_hyp (id,None,st.st_it)) begin match st.st_label with Anonymous -> consider_match may_intro ((id,false)::introduced) rest_ids rest | Name hid -> tclTHENLIST [rename_hyp [id,hid]; consider_match may_intro ((hid,true)::introduced) rest_ids rest] end begin (fun gls -> let nhyps = try conjunction_arity id gls with Not_found -> error "Matching hypothesis not found." in tclTHENLIST [general_case_analysis false (mkVar id,NoBindings); intron_then nhyps [] (fun l -> consider_match may_intro introduced (List.rev_append l rest_ids) expected)] gls) end gls let consider_tac c hyps gls = match kind_of_term (strip_outer_cast c) with Var id -> consider_match false [] [id] hyps gls | _ -> let id = pf_get_new_id (id_of_string "_tmp") gls in tclTHEN (forward None (Some (dummy_loc, Genarg.IntroIdentifier id)) c) (consider_match false [] [id] hyps) gls let given_tac hyps gls = consider_match true [] [] hyps gls (* tactics for take *) let rec take_tac wits gls = match wits with [] -> tclIDTAC gls | wit::rest -> let typ = pf_type_of gls wit in tclTHEN (thus_tac wit typ []) (take_tac rest) gls (* tactics for define *) let rec build_function args body = match args with st::rest -> let pfun= lift 1 (build_function rest body) in let id = match st.st_label with Anonymous -> assert false | Name id -> id in mkLambda (Name id, st.st_it, subst_term (mkVar id) pfun) | [] -> body let define_tac id args body gls = let t = build_function args body in letin_tac None (Name id) t None Tacexpr.nowhere gls (* tactics for reconsider *) let cast_tac id_or_thesis typ gls = match id_or_thesis with This id -> let (_,body,_) = pf_get_hyp gls id in convert_hyp (id,body,typ) gls | Thesis (For _ ) -> error "\"thesis for ...\" is not applicable here." | Thesis Plain -> convert_concl typ DEFAULTcast gls (* per cases *) let is_rec_pos (main_ind,wft) = match main_ind with None -> false | Some index -> match fst (Rtree.dest_node wft) with Mrec (_,i) when i = index -> true | _ -> false let rec constr_trees (main_ind,wft) ind = match Rtree.dest_node wft with Norec,_ -> let itree = (snd (Global.lookup_inductive ind)).mind_recargs in constr_trees (None,itree) ind | _,constrs -> main_ind,constrs let ind_args rp ind = let main_ind,constrs = constr_trees rp ind in let args ctree = Array.map (fun t -> main_ind,t) (snd (Rtree.dest_node ctree)) in Array.map args constrs let init_tree ids ind rp nexti = let indargs = ind_args rp ind in let do_i i arp = (Array.map is_rec_pos arp),nexti i arp in Split_patt (ids,ind,Array.mapi do_i indargs) let map_tree_rp rp id_fun mapi = function Split_patt (ids,ind,branches) -> let indargs = ind_args rp ind in let do_i i (recargs,bri) = recargs,mapi i indargs.(i) bri in Split_patt (id_fun ids,ind,Array.mapi do_i branches) | _ -> failwith "map_tree_rp: not a splitting node" let map_tree id_fun mapi = function Split_patt (ids,ind,branches) -> let do_i i (recargs,bri) = recargs,mapi i bri in Split_patt (id_fun ids,ind,Array.mapi do_i branches) | _ -> failwith "map_tree: not a splitting node" let start_tree env ind rp = init_tree Idset.empty ind rp (fun _ _ -> None) let build_per_info etype casee gls = let concl=pf_concl gls in let env=pf_env gls in let ctyp=pf_type_of gls casee in let is_dep = dependent casee concl in let hd,args = decompose_app (special_whd gls ctyp) in let ind = try destInd hd with e when Errors.noncritical e -> error "Case analysis must be done on an inductive object." in let mind,oind = Global.lookup_inductive ind in let nparams,index = match etype with ET_Induction -> mind.mind_nparams_rec,Some (snd ind) | _ -> mind.mind_nparams,None in let params,real_args = list_chop nparams args in let abstract_obj c body = let typ=pf_type_of gls c in lambda_create env (typ,subst_term c body) in let pred= List.fold_right abstract_obj real_args (lambda_create env (ctyp,subst_term casee concl)) in is_dep, {per_casee=casee; per_ctype=ctyp; per_ind=ind; per_pred=pred; per_args=real_args; per_params=params; per_nparams=nparams; per_wf=index,oind.mind_recargs} let per_tac etype casee gls= let env=pf_env gls in let info = get_its_info gls in match casee with Real c -> let is_dep,per_info = build_per_info etype c gls in let ek = if is_dep then EK_dep (start_tree env per_info.per_ind per_info.per_wf) else EK_unknown in tcl_change_info {pm_stack= Per(etype,per_info,ek,[])::info.pm_stack} gls | Virtual cut -> assert (cut.cut_stat.st_label=Anonymous); let id = pf_get_new_id (id_of_string "anonymous_matched") gls in let c = mkVar id in let modified_cut = {cut with cut_stat={cut.cut_stat with st_label=Name id}} in tclTHEN (instr_cut (fun _ _ c -> c) false false modified_cut) (fun gls0 -> let is_dep,per_info = build_per_info etype c gls0 in assert (not is_dep); tcl_change_info {pm_stack= Per(etype,per_info,EK_unknown,[])::info.pm_stack} gls0) gls (* suppose *) let register_nodep_subcase id= function Per(et,pi,ek,clauses)::s -> begin match ek with EK_unknown -> clauses,Per(et,pi,EK_nodep,id::clauses)::s | EK_nodep -> clauses,Per(et,pi,EK_nodep,id::clauses)::s | EK_dep _ -> error "Do not mix \"suppose\" with \"suppose it is\"." end | _ -> anomaly "wrong stack state" let suppose_tac hyps gls0 = let info = get_its_info gls0 in let thesis = pf_concl gls0 in let id = pf_get_new_id (id_of_string "subcase_") gls0 in let clause = build_product hyps thesis in let ninfo1 = {pm_stack=Suppose_case::info.pm_stack} in let old_clauses,stack = register_nodep_subcase id info.pm_stack in let ninfo2 = {pm_stack=stack} in tclTHENS (assert_postpone id clause) [tclTHENLIST [tcl_change_info ninfo1; assume_tac hyps; clear old_clauses]; tcl_change_info ninfo2] gls0 (* suppose it is ... *) (* pattern matching compiling *) let rec skip_args rest ids n = if n <= 0 then Close_patt rest else Skip_patt (ids,skip_args rest ids (pred n)) let rec tree_of_pats ((id,_) as cpl) pats = match pats with [] -> End_patt cpl | args::stack -> match args with [] -> Close_patt (tree_of_pats cpl stack) | (patt,rp) :: rest_args -> match patt with PatVar (_,v) -> Skip_patt (Idset.singleton id, tree_of_pats cpl (rest_args::stack)) | PatCstr (_,(ind,cnum),args,nam) -> let nexti i ati = if i = pred cnum then let nargs = list_map_i (fun j a -> (a,ati.(j))) 0 args in Some (Idset.singleton id, tree_of_pats cpl (nargs::rest_args::stack)) else None in init_tree Idset.empty ind rp nexti let rec add_branch ((id,_) as cpl) pats tree= match pats with [] -> begin match tree with End_patt cpl0 -> End_patt cpl0 (* this ensures precedence for overlapping patterns *) | _ -> anomaly "tree is expected to end here" end | args::stack -> match args with [] -> begin match tree with Close_patt t -> Close_patt (add_branch cpl stack t) | _ -> anomaly "we should pop here" end | (patt,rp) :: rest_args -> match patt with PatVar (_,v) -> begin match tree with Skip_patt (ids,t) -> Skip_patt (Idset.add id ids, add_branch cpl (rest_args::stack) t) | Split_patt (_,_,_) -> map_tree (Idset.add id) (fun i bri -> append_branch cpl 1 (rest_args::stack) bri) tree | _ -> anomaly "No pop/stop expected here" end | PatCstr (_,(ind,cnum),args,nam) -> match tree with Skip_patt (ids,t) -> let nexti i ati = if i = pred cnum then let nargs = list_map_i (fun j a -> (a,ati.(j))) 0 args in Some (Idset.add id ids, add_branch cpl (nargs::rest_args::stack) (skip_args t ids (Array.length ati))) else Some (ids, skip_args t ids (Array.length ati)) in init_tree ids ind rp nexti | Split_patt (_,ind0,_) -> if (ind <> ind0) then error (* this can happen with coercions *) "Case pattern belongs to wrong inductive type."; let mapi i ati bri = if i = pred cnum then let nargs = list_map_i (fun j a -> (a,ati.(j))) 0 args in append_branch cpl 0 (nargs::rest_args::stack) bri else bri in map_tree_rp rp (fun ids -> ids) mapi tree | _ -> anomaly "No pop/stop expected here" and append_branch ((id,_) as cpl) depth pats = function Some (ids,tree) -> Some (Idset.add id ids,append_tree cpl depth pats tree) | None -> Some (Idset.singleton id,tree_of_pats cpl pats) and append_tree ((id,_) as cpl) depth pats tree = if depth<=0 then add_branch cpl pats tree else match tree with Close_patt t -> Close_patt (append_tree cpl (pred depth) pats t) | Skip_patt (ids,t) -> Skip_patt (Idset.add id ids,append_tree cpl depth pats t) | End_patt _ -> anomaly "Premature end of branch" | Split_patt (_,_,_) -> map_tree (Idset.add id) (fun i bri -> append_branch cpl (succ depth) pats bri) tree (* suppose it is *) let rec st_assoc id = function [] -> raise Not_found | st::_ when st.st_label = id -> st.st_it | _ :: rest -> st_assoc id rest let thesis_for obj typ per_info env= let rc,hd1=decompose_prod typ in let cind,all_args=decompose_app typ in let ind = destInd cind in let _ = if ind <> per_info.per_ind then errorlabstrm "thesis_for" ((Printer.pr_constr_env env obj) ++ spc () ++ str"cannot give an induction hypothesis (wrong inductive type).") in let params,args = list_chop per_info.per_nparams all_args in let _ = if not (List.for_all2 eq_constr params per_info.per_params) then errorlabstrm "thesis_for" ((Printer.pr_constr_env env obj) ++ spc () ++ str "cannot give an induction hypothesis (wrong parameters).") in let hd2 = (applist ((lift (List.length rc) per_info.per_pred),args@[obj])) in compose_prod rc (whd_beta Evd.empty hd2) let rec build_product_dep pat_info per_info args body gls = match args with (Hprop {st_label=nam;st_it=This c} | Hvar {st_label=nam;st_it=c})::rest -> let pprod= lift 1 (build_product_dep pat_info per_info rest body gls) in let lbody = match nam with Anonymous -> body | Name id -> subst_var id pprod in mkProd (nam,c,lbody) | Hprop ({st_it=Thesis tk} as st)::rest -> let pprod= lift 1 (build_product_dep pat_info per_info rest body gls) in let lbody = match st.st_label with Anonymous -> body | Name id -> subst_var id pprod in let ptyp = match tk with For id -> let obj = mkVar id in let typ = try st_assoc (Name id) pat_info.pat_vars with Not_found -> snd (st_assoc (Name id) pat_info.pat_aliases) in thesis_for obj typ per_info (pf_env gls) | Plain -> pf_concl gls in mkProd (st.st_label,ptyp,lbody) | [] -> body let build_dep_clause params pat_info per_info hyps gls = let concl= thesis_for pat_info.pat_constr pat_info.pat_typ per_info (pf_env gls) in let open_clause = build_product_dep pat_info per_info hyps concl gls in let prod_one st body = match st.st_label with Anonymous -> mkProd(Anonymous,st.st_it,lift 1 body) | Name id -> mkNamedProd id st.st_it (lift 1 body) in let let_one_in st body = match st.st_label with Anonymous -> mkLetIn(Anonymous,fst st.st_it,snd st.st_it,lift 1 body) | Name id -> mkNamedLetIn id (fst st.st_it) (snd st.st_it) (lift 1 body) in let aliased_clause = List.fold_right let_one_in pat_info.pat_aliases open_clause in List.fold_right prod_one (params@pat_info.pat_vars) aliased_clause let rec register_dep_subcase id env per_info pat = function EK_nodep -> error "Only \"suppose it is\" can be used here." | EK_unknown -> register_dep_subcase id env per_info pat (EK_dep (start_tree env per_info.per_ind per_info.per_wf)) | EK_dep tree -> EK_dep (add_branch id [[pat,per_info.per_wf]] tree) let case_tac params pat_info hyps gls0 = let info = get_its_info gls0 in let id = pf_get_new_id (id_of_string "subcase_") gls0 in let et,per_info,ek,old_clauses,rest = match info.pm_stack with Per (et,pi,ek,old_clauses)::rest -> (et,pi,ek,old_clauses,rest) | _ -> anomaly "wrong place for cases" in let clause = build_dep_clause params pat_info per_info hyps gls0 in let ninfo1 = {pm_stack=Suppose_case::info.pm_stack} in let nek = register_dep_subcase (id,(List.length params,List.length hyps)) (pf_env gls0) per_info pat_info.pat_pat ek in let ninfo2 = {pm_stack=Per(et,per_info,nek,id::old_clauses)::rest} in tclTHENS (assert_postpone id clause) [tclTHENLIST [tcl_change_info ninfo1; assume_st (params@pat_info.pat_vars); assume_st_letin pat_info.pat_aliases; assume_hyps_or_theses hyps; clear old_clauses]; tcl_change_info ninfo2] gls0 (* end cases *) type instance_stack = (constr option*(constr list) list) list let initial_instance_stack ids = List.map (fun id -> id,[None,[]]) ids let push_one_arg arg = function [] -> anomaly "impossible" | (head,args) :: ctx -> ((head,(arg::args)) :: ctx) let push_arg arg stacks = List.map (fun (id,stack) -> (id,push_one_arg arg stack)) stacks let push_one_head c ids (id,stack) = let head = if Idset.mem id ids then Some c else None in id,(head,[]) :: stack let push_head c ids stacks = List.map (push_one_head c ids) stacks let pop_one (id,stack) = let nstack= match stack with [] -> anomaly "impossible" | [c] as l -> l | (Some head,args)::(head0,args0)::ctx -> let arg = applist (head,(List.rev args)) in (head0,(arg::args0))::ctx | (None,args)::(head0,args0)::ctx -> (head0,(args@args0))::ctx in id,nstack let pop_stacks stacks = List.map pop_one stacks let hrec_for fix_id per_info gls obj_id = let obj=mkVar obj_id in let typ=pf_get_hyp_typ gls obj_id in let rc,hd1=decompose_prod typ in let cind,all_args=decompose_app typ in let ind = destInd cind in assert (ind=per_info.per_ind); let params,args= list_chop per_info.per_nparams all_args in assert begin try List.for_all2 eq_constr params per_info.per_params with Invalid_argument _ -> false end; let hd2 = applist (mkVar fix_id,args@[obj]) in compose_lam rc (whd_beta gls.sigma hd2) let rec execute_cases fix_name per_info tacnext args objs nhrec tree gls = match tree, objs with Close_patt t,_ -> let args0 = pop_stacks args in execute_cases fix_name per_info tacnext args0 objs nhrec t gls | Skip_patt (_,t),skipped::next_objs -> let args0 = push_arg skipped args in execute_cases fix_name per_info tacnext args0 next_objs nhrec t gls | End_patt (id,(nparams,nhyps)),[] -> begin match List.assoc id args with [None,br_args] -> let all_metas = list_tabulate (fun n -> mkMeta (succ n)) (nparams + nhyps) in let param_metas,hyp_metas = list_chop nparams all_metas in tclTHEN (tclDO nhrec introf) (tacnext (applist (mkVar id, List.append param_metas (List.rev_append br_args hyp_metas)))) gls | _ -> anomaly "wrong stack size" end | Split_patt (ids,ind,br), casee::next_objs -> let (mind,oind) as spec = Global.lookup_inductive ind in let nparams = mind.mind_nparams in let concl=pf_concl gls in let env=pf_env gls in let ctyp=pf_type_of gls casee in let hd,all_args = decompose_app (special_whd gls ctyp) in let _ = assert (destInd hd = ind) in (* just in case *) let params,real_args = list_chop nparams all_args in let abstract_obj c body = let typ=pf_type_of gls c in lambda_create env (typ,subst_term c body) in let elim_pred = List.fold_right abstract_obj real_args (lambda_create env (ctyp,subst_term casee concl)) in let case_info = Inductiveops.make_case_info env ind RegularStyle in let gen_arities = Inductive.arities_of_constructors ind spec in let f_ids typ = let sign = (prod_assum (Term.prod_applist typ params)) in find_intro_names sign gls in let constr_args_ids = Array.map f_ids gen_arities in let case_term = mkCase(case_info,elim_pred,casee, Array.mapi (fun i _ -> mkMeta (succ i)) constr_args_ids) in let branch_tac i (recargs,bro) gls0 = let args_ids = constr_args_ids.(i) in let rec aux n = function [] -> assert (n=Array.length recargs); next_objs,[],nhrec | id :: q -> let objs,recs,nrec = aux (succ n) q in if recargs.(n) then (mkVar id::objs),(id::recs),succ nrec else (mkVar id::objs),recs,nrec in let objs,recs,nhrec = aux 0 args_ids in tclTHENLIST [tclMAP intro_mustbe_force args_ids; begin fun gls1 -> let hrecs = List.map (fun id -> hrec_for (out_name fix_name) per_info gls1 id) recs in generalize hrecs gls1 end; match bro with None -> msg_warning (str "missing case"); tacnext (mkMeta 1) | Some (sub_ids,tree) -> let br_args = List.filter (fun (id,_) -> Idset.mem id sub_ids) args in let construct = applist (mkConstruct(ind,succ i),params) in let p_args = push_head construct ids br_args in execute_cases fix_name per_info tacnext p_args objs nhrec tree] gls0 in tclTHENSV (refine case_term) (Array.mapi branch_tac br) gls | Split_patt (_, _, _) , [] -> anomaly "execute_cases : Nothing to split" | Skip_patt _ , [] -> anomaly "execute_cases : Nothing to skip" | End_patt (_,_) , _ :: _ -> anomaly "execute_cases : End of branch with garbage left" let understand_my_constr c gls = let env = pf_env gls in let nc = names_of_rel_context env in let rawc = Detyping.detype false [] nc c in let rec frob = function GEvar _ -> GHole (dummy_loc,QuestionMark Expand) | rc -> map_glob_constr frob rc in Pretyping.Default.understand_tcc (sig_sig gls) env ~expected_type:(pf_concl gls) (frob rawc) let my_refine c gls = let oc = understand_my_constr c gls in Refine.refine oc gls (* end focus/claim *) let end_tac et2 gls = let info = get_its_info gls in let et1,pi,ek,clauses = match info.pm_stack with Suppose_case::_ -> anomaly "This case should already be trapped" | Claim::_ -> error "\"end claim\" expected." | Focus_claim::_ -> error "\"end focus\" expected." | Per(et',pi,ek,clauses)::_ -> (et',pi,ek,clauses) | [] -> anomaly "This case should already be trapped" in let et = if et1 <> et2 then match et1 with ET_Case_analysis -> error "\"end cases\" expected." | ET_Induction -> error "\"end induction\" expected." else et1 in tclTHEN tcl_erase_info begin match et,ek with _,EK_unknown -> tclSOLVE [simplest_elim pi.per_casee] | ET_Case_analysis,EK_nodep -> tclTHEN (general_case_analysis false (pi.per_casee,NoBindings)) (default_justification (List.map mkVar clauses)) | ET_Induction,EK_nodep -> tclTHENLIST [generalize (pi.per_args@[pi.per_casee]); simple_induct (AnonHyp (succ (List.length pi.per_args))); default_justification (List.map mkVar clauses)] | ET_Case_analysis,EK_dep tree -> execute_cases Anonymous pi (fun c -> tclTHENLIST [my_refine c; clear clauses; justification assumption]) (initial_instance_stack clauses) [pi.per_casee] 0 tree | ET_Induction,EK_dep tree -> let nargs = (List.length pi.per_args) in tclTHEN (generalize (pi.per_args@[pi.per_casee])) begin fun gls0 -> let fix_id = pf_get_new_id (id_of_string "_fix") gls0 in let c_id = pf_get_new_id (id_of_string "_main_arg") gls0 in tclTHENLIST [fix (Some fix_id) (succ nargs); tclDO nargs introf; intro_mustbe_force c_id; execute_cases (Name fix_id) pi (fun c -> tclTHENLIST [clear [fix_id]; my_refine c; clear clauses; justification assumption]) (initial_instance_stack clauses) [mkVar c_id] 0 tree] gls0 end end gls (* escape *) let escape_tac gls = (* spiwack: sets an empty info stack to avoid interferences. We could erase the info altogether, but that doesn't play well with the Decl_mode.focus (used in post_processing). *) let info={pm_stack=[]} in tcl_change_info info gls (* General instruction engine *) let rec do_proof_instr_gen _thus _then instr = match instr with Pthus i -> assert (not _thus); do_proof_instr_gen true _then i | Pthen i -> assert (not _then); do_proof_instr_gen _thus true i | Phence i -> assert (not (_then || _thus)); do_proof_instr_gen true true i | Pcut c -> instr_cut mk_stat_or_thesis _thus _then c | Psuffices c -> instr_suffices _then c | Prew (s,c) -> assert (not _then); instr_rew _thus s c | Pconsider (c,hyps) -> consider_tac c hyps | Pgiven hyps -> given_tac hyps | Passume hyps -> assume_tac hyps | Plet hyps -> assume_tac hyps | Pclaim st -> instr_claim false st | Pfocus st -> instr_claim true st | Ptake witl -> take_tac witl | Pdefine (id,args,body) -> define_tac id args body | Pcast (id,typ) -> cast_tac id typ | Pper (et,cs) -> per_tac et cs | Psuppose hyps -> suppose_tac hyps | Pcase (params,pat_info,hyps) -> case_tac params pat_info hyps | Pend (B_elim et) -> end_tac et | Pend _ -> anomaly "Not applicable" | Pescape -> escape_tac let eval_instr {instr=instr} = do_proof_instr_gen false false instr let rec preprocess pts instr = match instr with Phence i |Pthus i | Pthen i -> preprocess pts i | Psuffices _ | Pcut _ | Passume _ | Plet _ | Pclaim _ | Pfocus _ | Pconsider (_,_) | Pcast (_,_) | Pgiven _ | Ptake _ | Pdefine (_,_,_) | Pper _ | Prew _ -> check_not_per pts; true | Pescape -> check_not_per pts; true | Pcase _ | Psuppose _ | Pend (B_elim _) -> close_previous_case pts ; true | Pend bt -> close_block bt pts ; false let rec postprocess pts instr = match instr with Phence i | Pthus i | Pthen i -> postprocess pts i | Pcut _ | Psuffices _ | Passume _ | Plet _ | Pconsider (_,_) | Pcast (_,_) | Pgiven _ | Ptake _ | Pdefine (_,_,_) | Prew (_,_) -> () | Pclaim _ | Pfocus _ | Psuppose _ | Pcase _ | Pper _ -> Decl_mode.focus pts | Pescape -> Decl_mode.focus pts; Proof_global.set_proof_mode "Classic" | Pend (B_elim ET_Induction) -> begin let pfterm = List.hd (Proof.partial_proof pts) in let { it = gls ; sigma = sigma } = Proof.V82.subgoals pts in let env = try Goal.V82.env sigma (List.hd gls) with Failure "hd" -> Global.env () in try Inductiveops.control_only_guard env pfterm; goto_current_focus_or_top pts with Type_errors.TypeError(env, Type_errors.IllFormedRecBody(_,_,_,_,_)) -> anomaly "\"end induction\" generated an ill-formed fixpoint" end | Pend _ -> goto_current_focus_or_top (pts) let do_instr raw_instr pts = let has_tactic = preprocess pts raw_instr.instr in begin if has_tactic then let { it=gls ; sigma=sigma } = Proof.V82.subgoals pts in let gl = { it=List.hd gls ; sigma=sigma } in let env= pf_env gl in let ist = {ltacvars = ([],[]); ltacrecvars = []; gsigma = sigma; genv = env} in let glob_instr = intern_proof_instr ist raw_instr in let instr = interp_proof_instr (get_its_info gl) sigma env glob_instr in Pfedit.by (tclTHEN (eval_instr instr) clean_tmp) else () end; postprocess pts raw_instr.instr; (* spiwack: this should restore a compatible semantics with v8.3 where we never stayed focused on 0 goal. *) Decl_mode.maximal_unfocus pts let proof_instr raw_instr = let p = Proof_global.give_me_the_proof () in do_instr raw_instr p (* (* STUFF FOR ITERATED RELATIONS *) let decompose_bin_app t= let hd,args = destApp let identify_transitivity_lemma c = let varx,tx,c1 = destProd c in let vary,ty,c2 = destProd (pop c1) in let varz,tz,c3 = destProd (pop c2) in let _,p1,c4 = destProd (pop c3) in let _,lp2,lp3 = destProd (pop c4) in let p2=pop lp2 in let p3=pop lp3 in *) coq-8.4pl2/plugins/decl_mode/ppdecl_proof.ml0000640000175000001440000001434012010532755020233 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* mt () | Name id -> pr_id id ++ spc () ++ str ":" ++ spc () let pr_justification_items env = function Some [] -> mt () | Some (_::_ as l) -> spc () ++ str "by" ++ spc () ++ prlist_with_sep (fun () -> str ",") (pr_constr env) l | None -> spc () ++ str "by *" let pr_justification_method env = function None -> mt () | Some tac -> spc () ++ str "using" ++ spc () ++ pr_tac env tac let pr_statement pr_it env st = pr_label st.st_label ++ pr_it env st.st_it let pr_or_thesis pr_this env = function Thesis Plain -> str "thesis" | Thesis (For id) -> str "thesis" ++ spc() ++ str "for" ++ spc () ++ pr_id id | This c -> pr_this env c let pr_cut pr_it env c = hov 1 (pr_it env c.cut_stat) ++ pr_justification_items env c.cut_by ++ pr_justification_method env c.cut_using let type_or_thesis = function Thesis _ -> Term.mkProp | This c -> c let _I x = x let rec print_hyps pconstr gtyp env sep _be _have hyps = let pr_sep = if sep then str "and" ++ spc () else mt () in match hyps with (Hvar _ ::_) as rest -> spc () ++ pr_sep ++ str _have ++ print_vars pconstr gtyp env false _be _have rest | Hprop st :: rest -> begin let nenv = match st.st_label with Anonymous -> env | Name id -> Environ.push_named (id,None,gtyp st.st_it) env in spc() ++ pr_sep ++ pr_statement pconstr env st ++ print_hyps pconstr gtyp nenv true _be _have rest end | [] -> mt () and print_vars pconstr gtyp env sep _be _have vars = match vars with Hvar st :: rest -> begin let nenv = match st.st_label with Anonymous -> anomaly "anonymous variable" | Name id -> Environ.push_named (id,None,st.st_it) env in let pr_sep = if sep then pr_comma () else mt () in spc() ++ pr_sep ++ pr_statement pr_constr env st ++ print_vars pconstr gtyp nenv true _be _have rest end | (Hprop _ :: _) as rest -> let _st = if _be then str "be such that" else str "such that" in spc() ++ _st ++ print_hyps pconstr gtyp env false _be _have rest | [] -> mt () let pr_suffices_clause env (hyps,c) = print_hyps pr_constr _I env false false "to have" hyps ++ spc () ++ str "to show" ++ spc () ++ pr_or_thesis pr_constr env c let pr_elim_type = function ET_Case_analysis -> str "cases" | ET_Induction -> str "induction" let pr_casee env =function Real c -> str "on" ++ spc () ++ pr_constr env c | Virtual cut -> str "of" ++ spc () ++ pr_cut (pr_statement pr_constr) env cut let pr_side = function Lhs -> str "=~" | Rhs -> str "~=" let rec pr_bare_proof_instr _then _thus env = function | Pescape -> str "escape" | Pthen i -> pr_bare_proof_instr true _thus env i | Pthus i -> pr_bare_proof_instr _then true env i | Phence i -> pr_bare_proof_instr true true env i | Pcut c -> begin match _then,_thus with false,false -> str "have" ++ spc () ++ pr_cut (pr_statement (pr_or_thesis pr_constr)) env c | false,true -> str "thus" ++ spc () ++ pr_cut (pr_statement (pr_or_thesis pr_constr)) env c | true,false -> str "then" ++ spc () ++ pr_cut (pr_statement (pr_or_thesis pr_constr)) env c | true,true -> str "hence" ++ spc () ++ pr_cut (pr_statement (pr_or_thesis pr_constr)) env c end | Psuffices c -> str "suffices" ++ pr_cut pr_suffices_clause env c | Prew (sid,c) -> (if _thus then str "thus" else str " ") ++ spc () ++ pr_side sid ++ spc () ++ pr_cut (pr_statement pr_constr) env c | Passume hyps -> str "assume" ++ print_hyps pr_constr _I env false false "we have" hyps | Plet hyps -> str "let" ++ print_vars pr_constr _I env false true "let" hyps | Pclaim st -> str "claim" ++ spc () ++ pr_statement pr_constr env st | Pfocus st -> str "focus on" ++ spc () ++ pr_statement pr_constr env st | Pconsider (id,hyps) -> str "consider" ++ print_vars pr_constr _I env false false "consider" hyps ++ spc () ++ str "from " ++ pr_constr env id | Pgiven hyps -> str "given" ++ print_vars pr_constr _I env false false "given" hyps | Ptake witl -> str "take" ++ spc () ++ prlist_with_sep pr_comma (pr_constr env) witl | Pdefine (id,args,body) -> str "define" ++ spc () ++ pr_id id ++ spc () ++ prlist_with_sep spc (fun st -> str "(" ++ pr_statement pr_constr env st ++ str ")") args ++ spc () ++ str "as" ++ (pr_constr env body) | Pcast (id,typ) -> str "reconsider" ++ spc () ++ pr_or_thesis (fun _ -> pr_id) env id ++ spc () ++ str "as" ++ spc () ++ (pr_constr env typ) | Psuppose hyps -> str "suppose" ++ print_hyps pr_constr _I env false false "we have" hyps | Pcase (params,pat,hyps) -> str "suppose it is" ++ spc () ++ pr_pat pat ++ (if params = [] then mt () else (spc () ++ str "with" ++ spc () ++ prlist_with_sep spc (fun st -> str "(" ++ pr_statement pr_constr env st ++ str ")") params ++ spc ())) ++ (if hyps = [] then mt () else (spc () ++ str "and" ++ print_hyps (pr_or_thesis pr_constr) type_or_thesis env false false "we have" hyps)) | Pper (et,c) -> str "per" ++ spc () ++ pr_elim_type et ++ spc () ++ pr_casee env c | Pend (B_elim et) -> str "end" ++ spc () ++ pr_elim_type et | _ -> anomaly "unprintable instruction" let pr_emph = function 0 -> str " " | 1 -> str "* " | 2 -> str "** " | 3 -> str "*** " | _ -> anomaly "unknown emphasis" let pr_proof_instr env instr = pr_emph instr.emph ++ spc () ++ pr_bare_proof_instr false false env instr.instr coq-8.4pl2/plugins/decl_mode/decl_expr.mli0000640000175000001440000000576212010532755017705 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* raw_proof_instr -> glob_proof_instr val interp_proof_instr : Decl_mode.pm_info -> Evd.evar_map -> Environ.env -> glob_proof_instr -> proof_instr coq-8.4pl2/plugins/decl_mode/decl_mode.mli0000640000175000001440000000377012010532755017650 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val clear_daimon_flag : unit -> unit val get_daimon_flag : unit -> bool type command_mode = Mode_tactic | Mode_proof | Mode_none val mode_of_pftreestate : Proof.proof -> command_mode val get_current_mode : unit -> command_mode val check_not_proof_mode : string -> unit type split_tree= Skip_patt of Idset.t * split_tree | Split_patt of Idset.t * inductive * (bool array * (Idset.t * split_tree) option) array | Close_patt of split_tree | End_patt of (identifier * (int * int)) type elim_kind = EK_dep of split_tree | EK_nodep | EK_unknown type recpath = int option*Declarations.wf_paths type per_info = {per_casee:constr; per_ctype:types; per_ind:inductive; per_pred:constr; per_args:constr list; per_params:constr list; per_nparams:int; per_wf:recpath} type stack_info = Per of Decl_expr.elim_type * per_info * elim_kind * Names.identifier list | Suppose_case | Claim | Focus_claim type pm_info = {pm_stack : stack_info list } val info : pm_info Store.Field.t val get_info : Evd.evar_map -> Proof_type.goal -> pm_info val try_get_info : Evd.evar_map -> Proof_type.goal -> pm_info option val get_stack : Proof.proof -> stack_info list val get_top_stack : Proof.proof -> stack_info list val get_last: Environ.env -> identifier val focus : Proof.proof -> unit val unfocus : Proof.proof -> unit val maximal_unfocus : Proof.proof -> unit coq-8.4pl2/plugins/decl_mode/decl_proof_instr.mli0000640000175000001440000000676612010532755021300 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* unit val return_from_tactic_mode: unit -> unit val register_automation_tac: tactic -> unit val automation_tac : tactic val concl_refiner: Termops.meta_type_map -> constr -> Proof_type.goal sigma -> constr val do_instr: Decl_expr.raw_proof_instr -> Proof.proof -> unit val proof_instr: Decl_expr.raw_proof_instr -> unit val tcl_change_info : Decl_mode.pm_info -> tactic val execute_cases : Names.name -> Decl_mode.per_info -> (Term.constr -> Proof_type.tactic) -> (Names.Idset.elt * (Term.constr option * Term.constr list) list) list -> Term.constr list -> int -> Decl_mode.split_tree -> Proof_type.tactic val tree_of_pats : identifier * (int * int) -> (Glob_term.cases_pattern*recpath) list list -> split_tree val add_branch : identifier * (int * int) -> (Glob_term.cases_pattern*recpath) list list -> split_tree -> split_tree val append_branch : identifier *(int * int) -> int -> (Glob_term.cases_pattern*recpath) list list -> (Names.Idset.t * Decl_mode.split_tree) option -> (Names.Idset.t * Decl_mode.split_tree) option val append_tree : identifier * (int * int) -> int -> (Glob_term.cases_pattern*recpath) list list -> split_tree -> split_tree val build_dep_clause : Term.types Decl_expr.statement list -> Decl_expr.proof_pattern -> Decl_mode.per_info -> (Term.types Decl_expr.statement, Term.types Decl_expr.or_thesis) Decl_expr.hyp list -> Proof_type.goal Tacmach.sigma -> Term.types val register_dep_subcase : Names.identifier * (int * int) -> Environ.env -> Decl_mode.per_info -> Glob_term.cases_pattern -> Decl_mode.elim_kind -> Decl_mode.elim_kind val thesis_for : Term.constr -> Term.constr -> Decl_mode.per_info -> Environ.env -> Term.constr val close_previous_case : Proof.proof -> unit val pop_stacks : (Names.identifier * (Term.constr option * Term.constr list) list) list -> (Names.identifier * (Term.constr option * Term.constr list) list) list val push_head : Term.constr -> Names.Idset.t -> (Names.identifier * (Term.constr option * Term.constr list) list) list -> (Names.identifier * (Term.constr option * Term.constr list) list) list val push_arg : Term.constr -> (Names.identifier * (Term.constr option * Term.constr list) list) list -> (Names.identifier * (Term.constr option * Term.constr list) list) list val hrec_for: Names.identifier -> Decl_mode.per_info -> Proof_type.goal Tacmach.sigma -> Names.identifier -> Term.constr val consider_match : bool -> (Names.Idset.elt*bool) list -> Names.Idset.elt list -> (Term.types Decl_expr.statement, Term.types) Decl_expr.hyp list -> Proof_type.tactic val init_tree: Names.Idset.t -> Names.inductive -> int option * Declarations.wf_paths -> (int -> (int option * Declarations.recarg Rtree.t) array -> (Names.Idset.t * Decl_mode.split_tree) option) -> Decl_mode.split_tree coq-8.4pl2/plugins/decl_mode/g_decl_mode.ml40000640000175000001440000003313012010532755020062 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Decl_proof_instr.go_to_proof_mode () ; Proof_global.set_proof_mode "Declarative" ; Vernacentries.print_subgoals () end (* spiwack: some bureaucracy is not performed here *) let vernac_return () = Proof.transaction (Proof_global.give_me_the_proof ()) begin fun () -> Decl_proof_instr.return_from_tactic_mode () ; Proof_global.set_proof_mode "Declarative" ; Vernacentries.print_subgoals () end let vernac_proof_instr instr = Proof.transaction (Proof_global.give_me_the_proof ()) begin fun () -> Decl_proof_instr.proof_instr instr; Vernacentries.print_subgoals () end (* We create a new parser entry [proof_mode]. The Declarative proof mode will replace the normal parser entry for tactics with this one. *) let proof_mode = Gram.entry_create "vernac:proof_command" (* Auxiliary grammar entry. *) let proof_instr = Gram.entry_create "proofmode:instr" (* Before we can write an new toplevel command (see below) which takes a [proof_instr] as argument, we need to declare how to parse it, print it, globalise it and interprete it. Normally we could do that easily through ARGUMENT EXTEND, but as the parsing is fairly complicated we will do it manually to indirect through the [proof_instr] grammar entry. *) (* spiwack: proposal: doing that directly from argextend.ml4, maybe ? *) (* [Genarg.create_arg] creates a new embedding into Genarg. *) let (wit_proof_instr,globwit_proof_instr,rawwit_proof_instr) = Genarg.create_arg None "proof_instr" let _ = Tacinterp.add_interp_genarg "proof_instr" begin begin fun e x -> (* declares the globalisation function *) Genarg.in_gen globwit_proof_instr (Decl_interp.intern_proof_instr e (Genarg.out_gen rawwit_proof_instr x)) end, begin fun ist gl x -> (* declares the interpretation function *) Tacmach.project gl , Genarg.in_gen wit_proof_instr (interp_proof_instr ist gl (Genarg.out_gen globwit_proof_instr x)) end, begin fun _ x -> x end (* declares the substitution function, irrelevant in our case *) end let _ = Pptactic.declare_extra_genarg_pprule (rawwit_proof_instr, pr_raw_proof_instr) (globwit_proof_instr, pr_glob_proof_instr) (wit_proof_instr, pr_proof_instr) (* We use the VERNAC EXTEND facility with a custom non-terminal to populate [proof_mode] with a new toplevel interpreter. The "-" indicates that the rule does not start with a distinguished string. *) VERNAC proof_mode EXTEND ProofInstr [ - proof_instr(instr) ] -> [ vernac_proof_instr instr ] END (* It is useful to use GEXTEND directly to call grammar entries that have been defined previously VERNAC EXTEND. In this case we allow, in proof mode, the use of commands like Check or Print. VERNAC EXTEND does quite a bit of bureaucracy for us, but it is not needed in this sort of case, and it would require to have an ARGUMENT EXTEND version of the "proof_mode" grammar entry. *) GEXTEND Gram GLOBAL: proof_mode ; proof_mode: LAST [ [ c=G_vernac.subgoal_command -> c (Some 1) ] ] ; END (* We register a new proof mode here *) let _ = Proof_global.register_proof_mode { Proof_global. name = "Declarative" ; (* name for identifying and printing *) (* function [set] goes from No Proof Mode to Declarative Proof Mode performing side effects *) set = begin fun () -> (* We set the command non terminal to [proof_mode] (which we just defined). *) G_vernac.set_command_entry proof_mode ; (* We substitute the goal printer, by the one we built for the proof mode. *) Printer.set_printer_pr { Printer.default_printer_pr with Printer.pr_goal = pr_goal } end ; (* function [reset] goes back to No Proof Mode from Declarative Proof Mode *) reset = begin fun () -> (* We restore the command non terminal to [noedit_mode]. *) G_vernac.set_command_entry G_vernac.noedit_mode ; (* We restore the goal printer to default *) Printer.set_printer_pr Printer.default_printer_pr end } (* Two new vernacular commands *) VERNAC COMMAND EXTEND DeclProof [ "proof" ] -> [ vernac_decl_proof () ] END VERNAC COMMAND EXTEND DeclReturn [ "return" ] -> [ vernac_return () ] END let none_is_empty = function None -> [] | Some l -> l GEXTEND Gram GLOBAL: proof_instr; thesis : [[ "thesis" -> Plain | "thesis"; "for"; i=ident -> (For i) ]]; statement : [[ i=ident ; ":" ; c=constr -> {st_label=Name i;st_it=c} | i=ident -> {st_label=Anonymous; st_it=Topconstr.CRef (Libnames.Ident (loc, i))} | c=constr -> {st_label=Anonymous;st_it=c} ]]; constr_or_thesis : [[ t=thesis -> Thesis t ] | [ c=constr -> This c ]]; statement_or_thesis : [ [ t=thesis -> {st_label=Anonymous;st_it=Thesis t} ] | [ i=ident ; ":" ; cot=constr_or_thesis -> {st_label=Name i;st_it=cot} | i=ident -> {st_label=Anonymous; st_it=This (Topconstr.CRef (Libnames.Ident (loc, i)))} | c=constr -> {st_label=Anonymous;st_it=This c} ] ]; justification_items : [[ -> Some [] | "by"; l=LIST1 constr SEP "," -> Some l | "by"; "*" -> None ]] ; justification_method : [[ -> None | "using"; tac = tactic -> Some tac ]] ; simple_cut_or_thesis : [[ ls = statement_or_thesis; j = justification_items; taco = justification_method -> {cut_stat=ls;cut_by=j;cut_using=taco} ]] ; simple_cut : [[ ls = statement; j = justification_items; taco = justification_method -> {cut_stat=ls;cut_by=j;cut_using=taco} ]] ; elim_type: [[ IDENT "induction" -> ET_Induction | IDENT "cases" -> ET_Case_analysis ]] ; block_type : [[ IDENT "claim" -> B_claim | IDENT "focus" -> B_focus | IDENT "proof" -> B_proof | et=elim_type -> B_elim et ]] ; elim_obj: [[ IDENT "on"; c=constr -> Real c | IDENT "of"; c=simple_cut -> Virtual c ]] ; elim_step: [[ IDENT "consider" ; h=consider_vars ; IDENT "from" ; c=constr -> Pconsider (c,h) | IDENT "per"; et=elim_type; obj=elim_obj -> Pper (et,obj) | IDENT "suffices"; ls=suff_clause; j = justification_items; taco = justification_method -> Psuffices {cut_stat=ls;cut_by=j;cut_using=taco} ]] ; rew_step : [[ "~=" ; c=simple_cut -> (Rhs,c) | "=~" ; c=simple_cut -> (Lhs,c)]] ; cut_step: [[ "then"; tt=elim_step -> Pthen tt | "then"; c=simple_cut_or_thesis -> Pthen (Pcut c) | IDENT "thus"; tt=rew_step -> Pthus (let s,c=tt in Prew (s,c)) | IDENT "thus"; c=simple_cut_or_thesis -> Pthus (Pcut c) | IDENT "hence"; c=simple_cut_or_thesis -> Phence (Pcut c) | tt=elim_step -> tt | tt=rew_step -> let s,c=tt in Prew (s,c); | IDENT "have"; c=simple_cut_or_thesis -> Pcut c; | IDENT "claim"; c=statement -> Pclaim c; | IDENT "focus"; IDENT "on"; c=statement -> Pfocus c; | "end"; bt = block_type -> Pend bt; | IDENT "escape" -> Pescape ]] ; (* examiner s'il est possible de faire R _ et _ R pour R une relation qcq*) loc_id: [[ id=ident -> fun x -> (loc,(id,x)) ]]; hyp: [[ id=loc_id -> id None ; | id=loc_id ; ":" ; c=constr -> id (Some c)]] ; consider_vars: [[ name=hyp -> [Hvar name] | name=hyp; ","; v=consider_vars -> (Hvar name) :: v | name=hyp; IDENT "such"; IDENT "that"; h=consider_hyps -> (Hvar name)::h ]] ; consider_hyps: [[ st=statement; IDENT "and"; h=consider_hyps -> Hprop st::h | st=statement; IDENT "and"; IDENT "consider" ; v=consider_vars -> Hprop st::v | st=statement -> [Hprop st] ]] ; assume_vars: [[ name=hyp -> [Hvar name] | name=hyp; ","; v=assume_vars -> (Hvar name) :: v | name=hyp; IDENT "such"; IDENT "that"; h=assume_hyps -> (Hvar name)::h ]] ; assume_hyps: [[ st=statement; IDENT "and"; h=assume_hyps -> Hprop st::h | st=statement; IDENT "and"; IDENT "we"; IDENT "have" ; v=assume_vars -> Hprop st::v | st=statement -> [Hprop st] ]] ; assume_clause: [[ IDENT "we" ; IDENT "have" ; v=assume_vars -> v | h=assume_hyps -> h ]] ; suff_vars: [[ name=hyp; IDENT "to"; IDENT "show" ; c = constr_or_thesis -> [Hvar name],c | name=hyp; ","; v=suff_vars -> let (q,c) = v in ((Hvar name) :: q),c | name=hyp; IDENT "such"; IDENT "that"; h=suff_hyps -> let (q,c) = h in ((Hvar name) :: q),c ]]; suff_hyps: [[ st=statement; IDENT "and"; h=suff_hyps -> let (q,c) = h in (Hprop st::q),c | st=statement; IDENT "and"; IDENT "to" ; IDENT "have" ; v=suff_vars -> let (q,c) = v in (Hprop st::q),c | st=statement; IDENT "to"; IDENT "show" ; c = constr_or_thesis -> [Hprop st],c ]] ; suff_clause: [[ IDENT "to" ; IDENT "have" ; v=suff_vars -> v | h=suff_hyps -> h ]] ; let_vars: [[ name=hyp -> [Hvar name] | name=hyp; ","; v=let_vars -> (Hvar name) :: v | name=hyp; IDENT "be"; IDENT "such"; IDENT "that"; h=let_hyps -> (Hvar name)::h ]] ; let_hyps: [[ st=statement; IDENT "and"; h=let_hyps -> Hprop st::h | st=statement; IDENT "and"; "let"; v=let_vars -> Hprop st::v | st=statement -> [Hprop st] ]]; given_vars: [[ name=hyp -> [Hvar name] | name=hyp; ","; v=given_vars -> (Hvar name) :: v | name=hyp; IDENT "such"; IDENT "that"; h=given_hyps -> (Hvar name)::h ]] ; given_hyps: [[ st=statement; IDENT "and"; h=given_hyps -> Hprop st::h | st=statement; IDENT "and"; IDENT "given"; v=given_vars -> Hprop st::v | st=statement -> [Hprop st] ]]; suppose_vars: [[name=hyp -> [Hvar name] |name=hyp; ","; v=suppose_vars -> (Hvar name) :: v |name=hyp; OPT[IDENT "be"]; IDENT "such"; IDENT "that"; h=suppose_hyps -> (Hvar name)::h ]] ; suppose_hyps: [[ st=statement_or_thesis; IDENT "and"; h=suppose_hyps -> Hprop st::h | st=statement_or_thesis; IDENT "and"; IDENT "we"; IDENT "have"; v=suppose_vars -> Hprop st::v | st=statement_or_thesis -> [Hprop st] ]] ; suppose_clause: [[ IDENT "we"; IDENT "have"; v=suppose_vars -> v; | h=suppose_hyps -> h ]] ; intro_step: [[ IDENT "suppose" ; h=assume_clause -> Psuppose h | IDENT "suppose" ; IDENT "it"; IDENT "is" ; c=pattern LEVEL "0" ; po=OPT[ "with"; p=LIST1 hyp SEP ","-> p ] ; ho=OPT[ IDENT "and" ; h=suppose_clause -> h ] -> Pcase (none_is_empty po,c,none_is_empty ho) | "let" ; v=let_vars -> Plet v | IDENT "take"; witnesses = LIST1 constr SEP "," -> Ptake witnesses | IDENT "assume"; h=assume_clause -> Passume h | IDENT "given"; h=given_vars -> Pgiven h | IDENT "define"; id=ident; args=LIST0 hyp; "as"; body=constr -> Pdefine(id,args,body) | IDENT "reconsider"; id=ident; "as" ; typ=constr -> Pcast (This id,typ) | IDENT "reconsider"; t=thesis; "as" ; typ=constr -> Pcast (Thesis t ,typ) ]] ; emphasis : [[ -> 0 | "*" -> 1 | "**" -> 2 | "***" -> 3 ]] ; bare_proof_instr: [[ c = cut_step -> c ; | i = intro_step -> i ]] ; proof_instr : [[ e=emphasis;i=bare_proof_instr;"." -> {emph=e;instr=i}]] ; END;; coq-8.4pl2/plugins/decl_mode/ppdecl_proof.mli0000640000175000001440000000011411364120540020372 0ustar notinusers val pr_proof_instr : Environ.env -> Decl_expr.proof_instr -> Pp.std_ppcmds coq-8.4pl2/plugins/rtauto/0000750000175000001440000000000012127276542014635 5ustar notinuserscoq-8.4pl2/plugins/rtauto/g_rtauto.ml40000640000175000001440000000120012010532755017061 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* [ Refl_tauto.rtauto_tac ] END coq-8.4pl2/plugins/rtauto/proof_search.ml0000640000175000001440000003417212010532755017641 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* !pruning); optwrite=(fun b -> pruning:=b)} let _ = declare_bool_option opt_pruning type form= Atom of int | Arrow of form * form | Bot | Conjunct of form * form | Disjunct of form * form type tag=int let decomp_form=function Atom i -> Some (i,[]) | Arrow (f1,f2) -> Some (-1,[f1;f2]) | Bot -> Some (-2,[]) | Conjunct (f1,f2) -> Some (-3,[f1;f2]) | Disjunct (f1,f2) -> Some (-4,[f1;f2]) module Fmap=Map.Make(struct type t=form let compare=compare end) type sequent = {rev_hyps: form Intmap.t; norev_hyps: form Intmap.t; size:int; left:int Fmap.t; right:(int*form) list Fmap.t; cnx:(int*int*form*form) list; abs:int option; gl:form} let add_one_arrow i f1 f2 m= try Fmap.add f1 ((i,f2)::(Fmap.find f1 m)) m with Not_found -> Fmap.add f1 [i,f2] m type proof = Ax of int | I_Arrow of proof | E_Arrow of int*int*proof | D_Arrow of int*proof*proof | E_False of int | I_And of proof*proof | E_And of int*proof | D_And of int*proof | I_Or_l of proof | I_Or_r of proof | E_Or of int*proof*proof | D_Or of int*proof | Pop of int*proof type rule = SAx of int | SI_Arrow | SE_Arrow of int*int | SD_Arrow of int | SE_False of int | SI_And | SE_And of int | SD_And of int | SI_Or_l | SI_Or_r | SE_Or of int | SD_Or of int let add_step s sub = match s,sub with SAx i,[] -> Ax i | SI_Arrow,[p] -> I_Arrow p | SE_Arrow(i,j),[p] -> E_Arrow (i,j,p) | SD_Arrow i,[p1;p2] -> D_Arrow (i,p1,p2) | SE_False i,[] -> E_False i | SI_And,[p1;p2] -> I_And(p1,p2) | SE_And i,[p] -> E_And(i,p) | SD_And i,[p] -> D_And(i,p) | SI_Or_l,[p] -> I_Or_l p | SI_Or_r,[p] -> I_Or_r p | SE_Or i,[p1;p2] -> E_Or(i,p1,p2) | SD_Or i,[p] -> D_Or(i,p) | _,_ -> anomaly "add_step: wrong arity" type 'a with_deps = {dep_it:'a; dep_goal:bool; dep_hyps:Intset.t} type slice= {proofs_done:proof list; proofs_todo:sequent with_deps list; step:rule; needs_goal:bool; needs_hyps:Intset.t; changes_goal:bool; creates_hyps:Intset.t} type state = Complete of proof | Incomplete of sequent * slice list let project = function Complete prf -> prf | Incomplete (_,_) -> anomaly "not a successful state" let pop n prf = let nprf= match prf.dep_it with Pop (i,p) -> Pop (i+n,p) | p -> Pop(n,p) in {prf with dep_it = nprf} let rec fill stack proof = match stack with [] -> Complete proof.dep_it | slice::super -> if !pruning && slice.proofs_done=[] && not (slice.changes_goal && proof.dep_goal) && not (Intset.exists (fun i -> Intset.mem i proof.dep_hyps) slice.creates_hyps) then begin s_info.pruned_steps<-s_info.pruned_steps+1; s_info.pruned_branches<- s_info.pruned_branches + List.length slice.proofs_todo; let created_here=Intset.cardinal slice.creates_hyps in s_info.pruned_hyps<-s_info.pruned_hyps+ List.fold_left (fun sum dseq -> sum + Intset.cardinal dseq.dep_hyps) created_here slice.proofs_todo; fill super (pop (Intset.cardinal slice.creates_hyps) proof) end else let dep_hyps= Intset.union slice.needs_hyps (Intset.diff proof.dep_hyps slice.creates_hyps) in let dep_goal= slice.needs_goal || ((not slice.changes_goal) && proof.dep_goal) in let proofs_done= proof.dep_it::slice.proofs_done in match slice.proofs_todo with [] -> fill super {dep_it = add_step slice.step (List.rev proofs_done); dep_goal = dep_goal; dep_hyps = dep_hyps} | current::next -> let nslice= {proofs_done=proofs_done; proofs_todo=next; step=slice.step; needs_goal=dep_goal; needs_hyps=dep_hyps; changes_goal=current.dep_goal; creates_hyps=current.dep_hyps} in Incomplete (current.dep_it,nslice::super) let append stack (step,subgoals) = s_info.created_steps<-s_info.created_steps+1; match subgoals with [] -> s_info.branch_successes<-s_info.branch_successes+1; fill stack {dep_it=add_step step.dep_it []; dep_goal=step.dep_goal; dep_hyps=step.dep_hyps} | hd :: next -> s_info.created_branches<- s_info.created_branches+List.length next; let slice= {proofs_done=[]; proofs_todo=next; step=step.dep_it; needs_goal=step.dep_goal; needs_hyps=step.dep_hyps; changes_goal=hd.dep_goal; creates_hyps=hd.dep_hyps} in Incomplete(hd.dep_it,slice::stack) let embed seq= {dep_it=seq; dep_goal=false; dep_hyps=Intset.empty} let change_goal seq gl= {seq with dep_it={seq.dep_it with gl=gl}; dep_goal=true} let add_hyp seqwd f= s_info.created_hyps<-s_info.created_hyps+1; let seq=seqwd.dep_it in let num = seq.size+1 in let left = Fmap.add f num seq.left in let cnx,right= try let l=Fmap.find f seq.right in List.fold_right (fun (i,f0) l0 -> (num,i,f,f0)::l0) l seq.cnx, Fmap.remove f seq.right with Not_found -> seq.cnx,seq.right in let nseq= match f with Bot -> {seq with left=left; right=right; size=num; abs=Some num; cnx=cnx} | Atom _ -> {seq with size=num; left=left; right=right; cnx=cnx} | Conjunct (_,_) | Disjunct (_,_) -> {seq with rev_hyps=Intmap.add num f seq.rev_hyps; size=num; left=left; right=right; cnx=cnx} | Arrow (f1,f2) -> let ncnx,nright= try let i = Fmap.find f1 seq.left in (i,num,f1,f2)::cnx,right with Not_found -> cnx,(add_one_arrow num f1 f2 right) in match f1 with Conjunct (_,_) | Disjunct (_,_) -> {seq with rev_hyps=Intmap.add num f seq.rev_hyps; size=num; left=left; right=nright; cnx=ncnx} | Arrow(_,_) -> {seq with norev_hyps=Intmap.add num f seq.norev_hyps; size=num; left=left; right=nright; cnx=ncnx} | _ -> {seq with size=num; left=left; right=nright; cnx=ncnx} in {seqwd with dep_it=nseq; dep_hyps=Intset.add num seqwd.dep_hyps} exception Here_is of (int*form) let choose m= try Intmap.iter (fun i f -> raise (Here_is (i,f))) m; raise Not_found with Here_is (i,f) -> (i,f) let search_or seq= match seq.gl with Disjunct (f1,f2) -> [{dep_it = SI_Or_l; dep_goal = true; dep_hyps = Intset.empty}, [change_goal (embed seq) f1]; {dep_it = SI_Or_r; dep_goal = true; dep_hyps = Intset.empty}, [change_goal (embed seq) f2]] | _ -> [] let search_norev seq= let goals=ref (search_or seq) in let add_one i f= match f with Arrow (Arrow (f1,f2),f3) -> let nseq = {seq with norev_hyps=Intmap.remove i seq.norev_hyps} in goals:= ({dep_it=SD_Arrow(i); dep_goal=false; dep_hyps=Intset.singleton i}, [add_hyp (add_hyp (change_goal (embed nseq) f2) (Arrow(f2,f3))) f1; add_hyp (embed nseq) f3]):: !goals | _ -> anomaly "search_no_rev: can't happen" in Intmap.iter add_one seq.norev_hyps; List.rev !goals let search_in_rev_hyps seq= try let i,f=choose seq.rev_hyps in let make_step step= {dep_it=step; dep_goal=false; dep_hyps=Intset.singleton i} in let nseq={seq with rev_hyps=Intmap.remove i seq.rev_hyps} in match f with Conjunct (f1,f2) -> [make_step (SE_And(i)), [add_hyp (add_hyp (embed nseq) f1) f2]] | Disjunct (f1,f2) -> [make_step (SE_Or(i)), [add_hyp (embed nseq) f1;add_hyp (embed nseq) f2]] | Arrow (Conjunct (f1,f2),f0) -> [make_step (SD_And(i)), [add_hyp (embed nseq) (Arrow (f1,Arrow (f2,f0)))]] | Arrow (Disjunct (f1,f2),f0) -> [make_step (SD_Or(i)), [add_hyp (add_hyp (embed nseq) (Arrow(f1,f0))) (Arrow (f2,f0))]] | _ -> anomaly "search_in_rev_hyps: can't happen" with Not_found -> search_norev seq let search_rev seq= match seq.cnx with (i,j,f1,f2)::next -> let nseq= match f1 with Conjunct (_,_) | Disjunct (_,_) -> {seq with cnx=next; rev_hyps=Intmap.remove j seq.rev_hyps} | Arrow (_,_) -> {seq with cnx=next; norev_hyps=Intmap.remove j seq.norev_hyps} | _ -> {seq with cnx=next} in [{dep_it=SE_Arrow(i,j); dep_goal=false; dep_hyps=Intset.add i (Intset.singleton j)}, [add_hyp (embed nseq) f2]] | [] -> match seq.gl with Arrow (f1,f2) -> [{dep_it=SI_Arrow; dep_goal=true; dep_hyps=Intset.empty}, [add_hyp (change_goal (embed seq) f2) f1]] | Conjunct (f1,f2) -> [{dep_it=SI_And; dep_goal=true; dep_hyps=Intset.empty},[change_goal (embed seq) f1; change_goal (embed seq) f2]] | _ -> search_in_rev_hyps seq let search_all seq= match seq.abs with Some i -> [{dep_it=SE_False (i); dep_goal=false; dep_hyps=Intset.singleton i},[]] | None -> try let ax = Fmap.find seq.gl seq.left in [{dep_it=SAx (ax); dep_goal=true; dep_hyps=Intset.singleton ax},[]] with Not_found -> search_rev seq let bare_sequent = embed {rev_hyps=Intmap.empty; norev_hyps=Intmap.empty; size=0; left=Fmap.empty; right=Fmap.empty; cnx=[]; abs=None; gl=Bot} let init_state hyps gl= let init = change_goal bare_sequent gl in let goal=List.fold_right (fun (_,f,_) seq ->add_hyp seq f) hyps init in Incomplete (goal.dep_it,[]) let success= function Complete _ -> true | Incomplete (_,_) -> false let branching = function Incomplete (seq,stack) -> check_for_interrupt (); let successors = search_all seq in let _ = match successors with [] -> s_info.branch_failures<-s_info.branch_failures+1 | _::next -> s_info.nd_branching<-s_info.nd_branching+List.length next in List.map (append stack) successors | Complete prf -> anomaly "already succeeded" open Pp let rec pp_form = function Arrow(f1,f2) -> (pp_or f1) ++ (str " -> ") ++ (pp_form f2) | f -> pp_or f and pp_or = function Disjunct(f1,f2) -> (pp_or f1) ++ (str " \\/ ") ++ (pp_and f2) | f -> pp_and f and pp_and = function Conjunct(f1,f2) -> (pp_and f1) ++ (str " /\\ ") ++ (pp_atom f2) | f -> pp_atom f and pp_atom= function Bot -> str "#" | Atom n -> int n | f -> str "(" ++ hv 2 (pp_form f) ++ str ")" let pr_form f = msg (pp_form f) let pp_intmap map = let pp=ref (str "") in Intmap.iter (fun i obj -> pp:= (!pp ++ pp_form obj ++ cut ())) map; str "{ " ++ v 0 (!pp) ++ str " }" let pp_list pp_obj l= let pp=ref (str "") in List.iter (fun o -> pp := !pp ++ (pp_obj o) ++ str ", ") l; str "[ " ++ !pp ++ str "]" let pp_mapint map = let pp=ref (str "") in Fmap.iter (fun obj l -> pp:= (!pp ++ pp_form obj ++ str " => " ++ pp_list (fun (i,f) -> pp_form f) l ++ cut ()) ) map; str "{ " ++ vb 0 ++ (!pp) ++ str " }" ++ close () let pp_connect (i,j,f1,f2) = pp_form f1 ++ str " => " ++ pp_form f2 let pp_gl gl= cut () ++ str "{ " ++ vb 0 ++ begin match gl.abs with None -> str "" | Some i -> str "ABSURD" ++ cut () end ++ str "rev =" ++ pp_intmap gl.rev_hyps ++ cut () ++ str "norev =" ++ pp_intmap gl.norev_hyps ++ cut () ++ str "arrows=" ++ pp_mapint gl.right ++ cut () ++ str "cnx =" ++ pp_list pp_connect gl.cnx ++ cut () ++ str "goal =" ++ pp_form gl.gl ++ str " }" ++ close () let pp = function Incomplete(gl,ctx) -> pp_gl gl ++ fnl () | _ -> str "" let pp_info () = let count_info = if !pruning then str "Proof steps : " ++ int s_info.created_steps ++ str " created / " ++ int s_info.pruned_steps ++ str " pruned" ++ fnl () ++ str "Proof branches : " ++ int s_info.created_branches ++ str " created / " ++ int s_info.pruned_branches ++ str " pruned" ++ fnl () ++ str "Hypotheses : " ++ int s_info.created_hyps ++ str " created / " ++ int s_info.pruned_hyps ++ str " pruned" ++ fnl () else str "Pruning is off" ++ fnl () ++ str "Proof steps : " ++ int s_info.created_steps ++ str " created" ++ fnl () ++ str "Proof branches : " ++ int s_info.created_branches ++ str " created" ++ fnl () ++ str "Hypotheses : " ++ int s_info.created_hyps ++ str " created" ++ fnl () in msgnl ( str "Proof-search statistics :" ++ fnl () ++ count_info ++ str "Branch ends: " ++ int s_info.branch_successes ++ str " successes / " ++ int s_info.branch_failures ++ str " failures" ++ fnl () ++ str "Non-deterministic choices : " ++ int s_info.nd_branching ++ str " branches") coq-8.4pl2/plugins/rtauto/Rtauto.v0000640000175000001440000002421712010532755016301 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* form | Arrow : form -> form -> form | Bot | Conjunct : form -> form -> form | Disjunct : form -> form -> form. Notation "[ n ]":=(Atom n). Notation "A =>> B":= (Arrow A B) (at level 59, right associativity). Notation "#" := Bot. Notation "A //\\ B" := (Conjunct A B) (at level 57, left associativity). Notation "A \\// B" := (Disjunct A B) (at level 58, left associativity). Definition ctx := Store form. Fixpoint pos_eq (m n:positive) {struct m} :bool := match m with xI mm => match n with xI nn => pos_eq mm nn | _ => false end | xO mm => match n with xO nn => pos_eq mm nn | _ => false end | xH => match n with xH => true | _ => false end end. Theorem pos_eq_refl : forall m n, pos_eq m n = true -> m = n. induction m;simpl;destruct n;congruence || (intro e;apply f_equal;auto). Qed. Fixpoint form_eq (p q:form) {struct p} :bool := match p with Atom m => match q with Atom n => pos_eq m n | _ => false end | Arrow p1 p2 => match q with Arrow q1 q2 => form_eq p1 q1 && form_eq p2 q2 | _ => false end | Bot => match q with Bot => true | _ => false end | Conjunct p1 p2 => match q with Conjunct q1 q2 => form_eq p1 q1 && form_eq p2 q2 | _ => false end | Disjunct p1 p2 => match q with Disjunct q1 q2 => form_eq p1 q1 && form_eq p2 q2 | _ => false end end. Theorem form_eq_refl: forall p q, form_eq p q = true -> p = q. induction p;destruct q;simpl;clean. intro h;generalize (pos_eq_refl _ _ h);congruence. case_eq (form_eq p1 q1);clean. intros e1 e2;generalize (IHp1 _ e1) (IHp2 _ e2);congruence. case_eq (form_eq p1 q1);clean. intros e1 e2;generalize (IHp1 _ e1) (IHp2 _ e2);congruence. case_eq (form_eq p1 q1);clean. intros e1 e2;generalize (IHp1 _ e1) (IHp2 _ e2);congruence. Qed. Arguments form_eq_refl [p q] _. Section with_env. Variable env:Store Prop. Fixpoint interp_form (f:form): Prop := match f with [n]=> match get n env with PNone => True | PSome P => P end | A =>> B => (interp_form A) -> (interp_form B) | # => False | A //\\ B => (interp_form A) /\ (interp_form B) | A \\// B => (interp_form A) \/ (interp_form B) end. Notation "[[ A ]]" := (interp_form A). Fixpoint interp_ctx (hyps:ctx) (F:Full hyps) (G:Prop) {struct F} : Prop := match F with F_empty => G | F_push H hyps0 F0 => interp_ctx hyps0 F0 ([[H]] -> G) end. Require Export BinPos. Ltac wipe := intros;simpl;constructor. Lemma compose0 : forall hyps F (A:Prop), A -> (interp_ctx hyps F A). induction F;intros A H;simpl;auto. Qed. Lemma compose1 : forall hyps F (A B:Prop), (A -> B) -> (interp_ctx hyps F A) -> (interp_ctx hyps F B). induction F;intros A B H;simpl;auto. apply IHF;auto. Qed. Theorem compose2 : forall hyps F (A B C:Prop), (A -> B -> C) -> (interp_ctx hyps F A) -> (interp_ctx hyps F B) -> (interp_ctx hyps F C). induction F;intros A B C H;simpl;auto. apply IHF;auto. Qed. Theorem compose3 : forall hyps F (A B C D:Prop), (A -> B -> C -> D) -> (interp_ctx hyps F A) -> (interp_ctx hyps F B) -> (interp_ctx hyps F C) -> (interp_ctx hyps F D). induction F;intros A B C D H;simpl;auto. apply IHF;auto. Qed. Lemma weaken : forall hyps F f G, (interp_ctx hyps F G) -> (interp_ctx (hyps\f) (F_push f hyps F) G). induction F;simpl;intros;auto. apply compose1 with ([[a]]-> G);auto. Qed. Theorem project_In : forall hyps F g, In g hyps F -> interp_ctx hyps F [[g]]. induction F;simpl. contradiction. intros g H;destruct H. subst;apply compose0;simpl;trivial. apply compose1 with [[g]];auto. Qed. Theorem project : forall hyps F p g, get p hyps = PSome g-> interp_ctx hyps F [[g]]. intros hyps F p g e; apply project_In. apply get_In with p;assumption. Qed. Arguments project [hyps] F [p g] _. Inductive proof:Set := Ax : positive -> proof | I_Arrow : proof -> proof | E_Arrow : positive -> positive -> proof -> proof | D_Arrow : positive -> proof -> proof -> proof | E_False : positive -> proof | I_And: proof -> proof -> proof | E_And: positive -> proof -> proof | D_And: positive -> proof -> proof | I_Or_l: proof -> proof | I_Or_r: proof -> proof | E_Or: positive -> proof -> proof -> proof | D_Or: positive -> proof -> proof | Cut: form -> proof -> proof -> proof. Notation "hyps \ A" := (push A hyps) (at level 72,left associativity). Fixpoint check_proof (hyps:ctx) (gl:form) (P:proof) {struct P}: bool := match P with Ax i => match get i hyps with PSome F => form_eq F gl | _ => false end | I_Arrow p => match gl with A =>> B => check_proof (hyps \ A) B p | _ => false end | E_Arrow i j p => match get i hyps,get j hyps with PSome A,PSome (B =>>C) => form_eq A B && check_proof (hyps \ C) (gl) p | _,_ => false end | D_Arrow i p1 p2 => match get i hyps with PSome ((A =>>B)=>>C) => (check_proof ( hyps \ B =>> C \ A) B p1) && (check_proof (hyps \ C) gl p2) | _ => false end | E_False i => match get i hyps with PSome # => true | _ => false end | I_And p1 p2 => match gl with A //\\ B => check_proof hyps A p1 && check_proof hyps B p2 | _ => false end | E_And i p => match get i hyps with PSome (A //\\ B) => check_proof (hyps \ A \ B) gl p | _=> false end | D_And i p => match get i hyps with PSome (A //\\ B =>> C) => check_proof (hyps \ A=>>B=>>C) gl p | _=> false end | I_Or_l p => match gl with (A \\// B) => check_proof hyps A p | _ => false end | I_Or_r p => match gl with (A \\// B) => check_proof hyps B p | _ => false end | E_Or i p1 p2 => match get i hyps with PSome (A \\// B) => check_proof (hyps \ A) gl p1 && check_proof (hyps \ B) gl p2 | _=> false end | D_Or i p => match get i hyps with PSome (A \\// B =>> C) => (check_proof (hyps \ A=>>C \ B=>>C) gl p) | _=> false end | Cut A p1 p2 => check_proof hyps A p1 && check_proof (hyps \ A) gl p2 end. Theorem interp_proof: forall p hyps F gl, check_proof hyps gl p = true -> interp_ctx hyps F [[gl]]. induction p;intros hyps F gl. (* cas Axiom *) Focus 1. simpl;case_eq (get p hyps);clean. intros f nth_f e;rewrite <- (form_eq_refl e). apply project with p;trivial. (* Cas Arrow_Intro *) Focus 1. destruct gl;clean. simpl;intros. change (interp_ctx (hyps\gl1) (F_push gl1 hyps F) [[gl2]]). apply IHp;try constructor;trivial. (* Cas Arrow_Elim *) Focus 1. simpl check_proof;case_eq (get p hyps);clean. intros f ef;case_eq (get p0 hyps);clean. intros f0 ef0;destruct f0;clean. case_eq (form_eq f f0_1);clean. simpl;intros e check_p1. generalize (project F ef) (project F ef0) (IHp (hyps \ f0_2) (F_push f0_2 hyps F) gl check_p1); clear check_p1 IHp p p0 p1 ef ef0. simpl. apply compose3. rewrite (form_eq_refl e). auto. (* cas Arrow_Destruct *) Focus 1. simpl;case_eq (get p1 hyps);clean. intros f ef;destruct f;clean. destruct f1;clean. case_eq (check_proof (hyps \ f1_2 =>> f2 \ f1_1) f1_2 p2);clean. intros check_p1 check_p2. generalize (project F ef) (IHp1 (hyps \ f1_2 =>> f2 \ f1_1) (F_push f1_1 (hyps \ f1_2 =>> f2) (F_push (f1_2 =>> f2) hyps F)) f1_2 check_p1) (IHp2 (hyps \ f2) (F_push f2 hyps F) gl check_p2). simpl;apply compose3;auto. (* Cas False_Elim *) Focus 1. simpl;case_eq (get p hyps);clean. intros f ef;destruct f;clean. intros _; generalize (project F ef). apply compose1;apply False_ind. (* Cas And_Intro *) Focus 1. simpl;destruct gl;clean. case_eq (check_proof hyps gl1 p1);clean. intros Hp1 Hp2;generalize (IHp1 hyps F gl1 Hp1) (IHp2 hyps F gl2 Hp2). apply compose2 ;simpl;auto. (* cas And_Elim *) Focus 1. simpl;case_eq (get p hyps);clean. intros f ef;destruct f;clean. intro check_p;generalize (project F ef) (IHp (hyps \ f1 \ f2) (F_push f2 (hyps \ f1) (F_push f1 hyps F)) gl check_p). simpl;apply compose2;intros [h1 h2];auto. (* cas And_Destruct *) Focus 1. simpl;case_eq (get p hyps);clean. intros f ef;destruct f;clean. destruct f1;clean. intro H;generalize (project F ef) (IHp (hyps \ f1_1 =>> f1_2 =>> f2) (F_push (f1_1 =>> f1_2 =>> f2) hyps F) gl H);clear H;simpl. apply compose2;auto. (* cas Or_Intro_left *) Focus 1. destruct gl;clean. intro Hp;generalize (IHp hyps F gl1 Hp). apply compose1;simpl;auto. (* cas Or_Intro_right *) Focus 1. destruct gl;clean. intro Hp;generalize (IHp hyps F gl2 Hp). apply compose1;simpl;auto. (* cas Or_elim *) Focus 1. simpl;case_eq (get p1 hyps);clean. intros f ef;destruct f;clean. case_eq (check_proof (hyps \ f1) gl p2);clean. intros check_p1 check_p2;generalize (project F ef) (IHp1 (hyps \ f1) (F_push f1 hyps F) gl check_p1) (IHp2 (hyps \ f2) (F_push f2 hyps F) gl check_p2); simpl;apply compose3;simpl;intro h;destruct h;auto. (* cas Or_Destruct *) Focus 1. simpl;case_eq (get p hyps);clean. intros f ef;destruct f;clean. destruct f1;clean. intro check_p0;generalize (project F ef) (IHp (hyps \ f1_1 =>> f2 \ f1_2 =>> f2) (F_push (f1_2 =>> f2) (hyps \ f1_1 =>> f2) (F_push (f1_1 =>> f2) hyps F)) gl check_p0);simpl. apply compose2;auto. (* cas Cut *) Focus 1. simpl;case_eq (check_proof hyps f p1);clean. intros check_p1 check_p2; generalize (IHp1 hyps F f check_p1) (IHp2 (hyps\f) (F_push f hyps F) gl check_p2); simpl; apply compose2;auto. Qed. Theorem Reflect: forall gl prf, if check_proof empty gl prf then [[gl]] else True. intros gl prf;case_eq (check_proof empty gl prf);intro check_prf. change (interp_ctx empty F_empty [[gl]]) ; apply interp_proof with prf;assumption. trivial. Qed. End with_env. (* (* A small example *) Parameters A B C D:Prop. Theorem toto:A /\ (B \/ C) -> (A /\ B) \/ (A /\ C). exact (Reflect (empty \ A \ B \ C) ([1] //\\ ([2] \\// [3]) =>> [1] //\\ [2] \\// [1] //\\ [3]) (I_Arrow (E_And 1 (E_Or 3 (I_Or_l (I_And (Ax 2) (Ax 4))) (I_Or_r (I_And (Ax 2) (Ax 4))))))). Qed. Print toto. *) coq-8.4pl2/plugins/rtauto/vo.itarget0000640000175000001440000000002511307752066016637 0ustar notinusersBintree.vo Rtauto.vo coq-8.4pl2/plugins/rtauto/refl_tauto.mli0000640000175000001440000000173412010532755017502 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Proof_type.goal Tacmach.sigma -> Term.types -> Proof_search.form val make_hyps : atom_env -> Proof_type.goal Tacmach.sigma -> Term.types list -> (Names.identifier * Term.types option * Term.types) list -> (Names.identifier * Proof_search.form) list val rtauto_tac : Proof_type.tactic coq-8.4pl2/plugins/rtauto/proof_search.mli0000640000175000001440000000232212010532755020002 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* proof val init_state : ('a * form * 'b) list -> form -> state val branching: state -> state list val success: state -> bool val pp: state -> Pp.std_ppcmds val pr_form : form -> unit val reset_info : unit -> unit val pp_info : unit -> unit coq-8.4pl2/plugins/rtauto/rtauto_plugin.mllib0000640000175000001440000000006311161000644020533 0ustar notinusersProof_search Refl_tauto G_rtauto Rtauto_plugin_mod coq-8.4pl2/plugins/rtauto/refl_tauto.ml0000640000175000001440000002351212010532755017327 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* Closure.whd_val infos (Closure.inject t)) let special_nf gl= let infos=Closure.create_clos_infos Closure.betaiotazeta (pf_env gl) in (fun t -> Closure.norm_val infos (Closure.inject t)) type atom_env= {mutable next:int; mutable env:(constr*int) list} let make_atom atom_env term= try let (_,i)= List.find (fun (t,_)-> eq_constr term t) atom_env.env in Atom i with Not_found -> let i=atom_env.next in atom_env.env <- (term,i)::atom_env.env; atom_env.next<- i + 1; Atom i let rec make_form atom_env gls term = let normalize=special_nf gls in let cciterm=special_whd gls term in match kind_of_term cciterm with Prod(_,a,b) -> if not (Termops.dependent (mkRel 1) b) && Retyping.get_sort_family_of (pf_env gls) (Tacmach.project gls) a = InProp then let fa=make_form atom_env gls a in let fb=make_form atom_env gls b in Arrow (fa,fb) else make_atom atom_env (normalize term) | Cast(a,_,_) -> make_form atom_env gls a | Ind ind -> if ind = Lazy.force li_False then Bot else make_atom atom_env (normalize term) | App(hd,argv) when Array.length argv = 2 -> begin try let ind = destInd hd in if ind = Lazy.force li_and then let fa=make_form atom_env gls argv.(0) in let fb=make_form atom_env gls argv.(1) in Conjunct (fa,fb) else if ind = Lazy.force li_or then let fa=make_form atom_env gls argv.(0) in let fb=make_form atom_env gls argv.(1) in Disjunct (fa,fb) else make_atom atom_env (normalize term) with Invalid_argument _ -> make_atom atom_env (normalize term) end | _ -> make_atom atom_env (normalize term) let rec make_hyps atom_env gls lenv = function [] -> [] | (_,Some body,typ)::rest -> make_hyps atom_env gls (typ::body::lenv) rest | (id,None,typ)::rest -> let hrec= make_hyps atom_env gls (typ::lenv) rest in if List.exists (Termops.dependent (mkVar id)) lenv || (Retyping.get_sort_family_of (pf_env gls) (Tacmach.project gls) typ <> InProp) then hrec else (id,make_form atom_env gls typ)::hrec let rec build_pos n = if n<=1 then force node_count l_xH else if n land 1 = 0 then mkApp (force node_count l_xO,[|build_pos (n asr 1)|]) else mkApp (force node_count l_xI,[|build_pos (n asr 1)|]) let rec build_form = function Atom n -> mkApp (force node_count l_Atom,[|build_pos n|]) | Arrow (f1,f2) -> mkApp (force node_count l_Arrow,[|build_form f1;build_form f2|]) | Bot -> force node_count l_Bot | Conjunct (f1,f2) -> mkApp (force node_count l_Conjunct,[|build_form f1;build_form f2|]) | Disjunct (f1,f2) -> mkApp (force node_count l_Disjunct,[|build_form f1;build_form f2|]) let rec decal k = function [] -> k | (start,delta)::rest -> if k>start then k - delta else decal k rest let add_pop size d pops= match pops with [] -> [size+d,d] | (_,sum)::_ -> (size+sum,sum+d)::pops let rec build_proof pops size = function Ax i -> mkApp (force step_count l_Ax, [|build_pos (decal i pops)|]) | I_Arrow p -> mkApp (force step_count l_I_Arrow, [|build_proof pops (size + 1) p|]) | E_Arrow(i,j,p) -> mkApp (force step_count l_E_Arrow, [|build_pos (decal i pops); build_pos (decal j pops); build_proof pops (size + 1) p|]) | D_Arrow(i,p1,p2) -> mkApp (force step_count l_D_Arrow, [|build_pos (decal i pops); build_proof pops (size + 2) p1; build_proof pops (size + 1) p2|]) | E_False i -> mkApp (force step_count l_E_False, [|build_pos (decal i pops)|]) | I_And(p1,p2) -> mkApp (force step_count l_I_And, [|build_proof pops size p1; build_proof pops size p2|]) | E_And(i,p) -> mkApp (force step_count l_E_And, [|build_pos (decal i pops); build_proof pops (size + 2) p|]) | D_And(i,p) -> mkApp (force step_count l_D_And, [|build_pos (decal i pops); build_proof pops (size + 1) p|]) | I_Or_l(p) -> mkApp (force step_count l_I_Or_l, [|build_proof pops size p|]) | I_Or_r(p) -> mkApp (force step_count l_I_Or_r, [|build_proof pops size p|]) | E_Or(i,p1,p2) -> mkApp (force step_count l_E_Or, [|build_pos (decal i pops); build_proof pops (size + 1) p1; build_proof pops (size + 1) p2|]) | D_Or(i,p) -> mkApp (force step_count l_D_Or, [|build_pos (decal i pops); build_proof pops (size + 2) p|]) | Pop(d,p) -> build_proof (add_pop size d pops) size p let build_env gamma= List.fold_right (fun (p,_) e -> mkApp(force node_count l_push,[|mkProp;p;e|])) gamma.env (mkApp (force node_count l_empty,[|mkProp|])) open Goptions let verbose = ref false let opt_verbose= {optsync=true; optdepr=false; optname="Rtauto Verbose"; optkey=["Rtauto";"Verbose"]; optread=(fun () -> !verbose); optwrite=(fun b -> verbose:=b)} let _ = declare_bool_option opt_verbose let check = ref false let opt_check= {optsync=true; optdepr=false; optname="Rtauto Check"; optkey=["Rtauto";"Check"]; optread=(fun () -> !check); optwrite=(fun b -> check:=b)} let _ = declare_bool_option opt_check open Pp let rtauto_tac gls= Coqlib.check_required_library ["Coq";"rtauto";"Rtauto"]; let gamma={next=1;env=[]} in let gl=pf_concl gls in let _= if Retyping.get_sort_family_of (pf_env gls) (Tacmach.project gls) gl <> InProp then errorlabstrm "rtauto" (Pp.str "goal should be in Prop") in let glf=make_form gamma gls gl in let hyps=make_hyps gamma gls [gl] (pf_hyps gls) in let formula= List.fold_left (fun gl (_,f)-> Arrow (f,gl)) glf hyps in let search_fun = if Tacinterp.get_debug()=Tactic_debug.DebugOn 0 then Search.debug_depth_first else Search.depth_first in let _ = begin reset_info (); if !verbose then msgnl (str "Starting proof-search ..."); end in let search_start_time = System.get_time () in let prf = try project (search_fun (init_state [] formula)) with Not_found -> errorlabstrm "rtauto" (Pp.str "rtauto couldn't find any proof") in let search_end_time = System.get_time () in let _ = if !verbose then begin msgnl (str "Proof tree found in " ++ System.fmt_time_difference search_start_time search_end_time); pp_info (); msgnl (str "Building proof term ... ") end in let build_start_time=System.get_time () in let _ = step_count := 0; node_count := 0 in let main = mkApp (force node_count l_Reflect, [|build_env gamma; build_form formula; build_proof [] 0 prf|]) in let term= Term.applist (main,List.rev_map (fun (id,_) -> mkVar id) hyps) in let build_end_time=System.get_time () in let _ = if !verbose then begin msgnl (str "Proof term built in " ++ System.fmt_time_difference build_start_time build_end_time ++ fnl () ++ str "Proof size : " ++ int !step_count ++ str " steps" ++ fnl () ++ str "Proof term size : " ++ int (!step_count+ !node_count) ++ str " nodes (constants)" ++ fnl () ++ str "Giving proof term to Coq ... ") end in let tac_start_time = System.get_time () in let result= if !check then Tactics.exact_check term gls else Tactics.exact_no_check term gls in let tac_end_time = System.get_time () in let _ = if !check then msgnl (str "Proof term type-checking is on"); if !verbose then msgnl (str "Internal tactic executed in " ++ System.fmt_time_difference tac_start_time tac_end_time) in result coq-8.4pl2/plugins/rtauto/Bintree.v0000640000175000001440000002226212010532755016411 0ustar notinusers(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* (p ?= q) = Gt. Proof. intros. rewrite <- Pos.compare_succ_succ. now apply Pos.lt_gt, Pos.lt_lt_succ, Pos.gt_lt. Qed. Lemma Psucc_Gt : forall p, (Pos.succ p ?= p) = Gt. Proof. intros. apply Pos.lt_gt, Pos.lt_succ_diag_r. Qed. Fixpoint Lget (A:Set) (n:nat) (l:list A) {struct l}:option A := match l with nil => None | x::q => match n with O => Some x | S m => Lget A m q end end . Arguments Lget [A] n l. Lemma map_app : forall (A B:Set) (f:A -> B) l m, List.map f (l ++ m) = List.map f l ++ List.map f m. induction l. reflexivity. simpl. intro m ; apply f_equal;apply IHl. Qed. Lemma length_map : forall (A B:Set) (f:A -> B) l, length (List.map f l) = length l. induction l. reflexivity. simpl; apply f_equal;apply IHl. Qed. Lemma Lget_map : forall (A B:Set) (f:A -> B) i l, Lget i (List.map f l) = match Lget i l with Some a => Some (f a) | None => None end. induction i;intros [ | x l ] ;trivial. simpl;auto. Qed. Lemma Lget_app : forall (A:Set) (a:A) l i, Lget i (l ++ a :: nil) = if Arith.EqNat.beq_nat i (length l) then Some a else Lget i l. Proof. induction l;simpl Lget;simpl length. intros [ | i];simpl;reflexivity. intros [ | i];simpl. reflexivity. auto. Qed. Lemma Lget_app_Some : forall (A:Set) l delta i (a: A), Lget i l = Some a -> Lget i (l ++ delta) = Some a. induction l;destruct i;simpl;try congruence;auto. Qed. Section Store. Variable A:Type. Inductive Poption : Type:= PSome : A -> Poption | PNone : Poption. Inductive Tree : Type := Tempty : Tree | Branch0 : Tree -> Tree -> Tree | Branch1 : A -> Tree -> Tree -> Tree. Fixpoint Tget (p:positive) (T:Tree) {struct p} : Poption := match T with Tempty => PNone | Branch0 T1 T2 => match p with xI pp => Tget pp T2 | xO pp => Tget pp T1 | xH => PNone end | Branch1 a T1 T2 => match p with xI pp => Tget pp T2 | xO pp => Tget pp T1 | xH => PSome a end end. Fixpoint Tadd (p:positive) (a:A) (T:Tree) {struct p}: Tree := match T with | Tempty => match p with | xI pp => Branch0 Tempty (Tadd pp a Tempty) | xO pp => Branch0 (Tadd pp a Tempty) Tempty | xH => Branch1 a Tempty Tempty end | Branch0 T1 T2 => match p with | xI pp => Branch0 T1 (Tadd pp a T2) | xO pp => Branch0 (Tadd pp a T1) T2 | xH => Branch1 a T1 T2 end | Branch1 b T1 T2 => match p with | xI pp => Branch1 b T1 (Tadd pp a T2) | xO pp => Branch1 b (Tadd pp a T1) T2 | xH => Branch1 a T1 T2 end end. Definition mkBranch0 (T1 T2:Tree) := match T1,T2 with Tempty ,Tempty => Tempty | _,_ => Branch0 T1 T2 end. Fixpoint Tremove (p:positive) (T:Tree) {struct p}: Tree := match T with | Tempty => Tempty | Branch0 T1 T2 => match p with | xI pp => mkBranch0 T1 (Tremove pp T2) | xO pp => mkBranch0 (Tremove pp T1) T2 | xH => T end | Branch1 b T1 T2 => match p with | xI pp => Branch1 b T1 (Tremove pp T2) | xO pp => Branch1 b (Tremove pp T1) T2 | xH => mkBranch0 T1 T2 end end. Theorem Tget_Tempty: forall (p : positive), Tget p (Tempty) = PNone. destruct p;reflexivity. Qed. Theorem Tget_Tadd: forall i j a T, Tget i (Tadd j a T) = match (i ?= j) with Eq => PSome a | Lt => Tget i T | Gt => Tget i T end. Proof. intros i j. case_eq (i ?= j). intro H;rewrite (Pos.compare_eq _ _ H);intros a;clear i H. induction j;destruct T;simpl;try (apply IHj);congruence. unfold Pos.compare. generalize i;clear i;induction j;destruct T;simpl in H|-*; destruct i;simpl;try rewrite (IHj _ H);try (destruct i;simpl;congruence);reflexivity|| congruence. unfold Pos.compare. generalize i;clear i;induction j;destruct T;simpl in H|-*; destruct i;simpl;try rewrite (IHj _ H);try (destruct i;simpl;congruence);reflexivity|| congruence. Qed. Record Store : Type := mkStore {index:positive;contents:Tree}. Definition empty := mkStore xH Tempty. Definition push a S := mkStore (Pos.succ (index S)) (Tadd (index S) a (contents S)). Definition get i S := Tget i (contents S). Lemma get_empty : forall i, get i empty = PNone. intro i; case i; unfold empty,get; simpl;reflexivity. Qed. Inductive Full : Store -> Type:= F_empty : Full empty | F_push : forall a S, Full S -> Full (push a S). Theorem get_Full_Gt : forall S, Full S -> forall i, (i ?= index S) = Gt -> get i S = PNone. Proof. intros S W;induction W. unfold empty,index,get,contents;intros;apply Tget_Tempty. unfold index,get,push;simpl contents. intros i e;rewrite Tget_Tadd. rewrite (Gt_Psucc _ _ e). unfold get in IHW. apply IHW;apply Gt_Psucc;assumption. Qed. Theorem get_Full_Eq : forall S, Full S -> get (index S) S = PNone. intros [index0 contents0] F. case F. unfold empty,index,get,contents;intros;apply Tget_Tempty. unfold index,get,push;simpl contents. intros a S. rewrite Tget_Tadd. rewrite Psucc_Gt. intro W. change (get (Pos.succ (index S)) S =PNone). apply get_Full_Gt; auto. apply Psucc_Gt. Qed. Theorem get_push_Full : forall i a S, Full S -> get i (push a S) = match (i ?= index S) with Eq => PSome a | Lt => get i S | Gt => PNone end. Proof. intros i a S F. case_eq (i ?= index S). intro e;rewrite (Pos.compare_eq _ _ e). destruct S;unfold get,push,index;simpl contents;rewrite Tget_Tadd. rewrite Pos.compare_refl;reflexivity. intros;destruct S;unfold get,push,index;simpl contents;rewrite Tget_Tadd. simpl index in H;rewrite H;reflexivity. intro H;generalize H;clear H. unfold get,push;simpl index;simpl contents. rewrite Tget_Tadd;intro e;rewrite e. change (get i S=PNone). apply get_Full_Gt;auto. Qed. Lemma Full_push_compat : forall i a S, Full S -> forall x, get i S = PSome x -> get i (push a S) = PSome x. Proof. intros i a S F x H. case_eq (i ?= index S);intro test. rewrite (Pos.compare_eq _ _ test) in H. rewrite (get_Full_Eq _ F) in H;congruence. rewrite <- H. rewrite (get_push_Full i a). rewrite test;reflexivity. assumption. rewrite (get_Full_Gt _ F) in H;congruence. Qed. Lemma Full_index_one_empty : forall S, Full S -> index S = 1 -> S=empty. intros [ind cont] F one; inversion F. reflexivity. simpl index in one;assert (h:=Pos.succ_not_1 (index S)). congruence. Qed. Lemma push_not_empty: forall a S, (push a S) <> empty. intros a [ind cont];unfold push,empty. simpl;intro H;injection H; intros _ ; apply Pos.succ_not_1. Qed. Fixpoint In (x:A) (S:Store) (F:Full S) {struct F}: Prop := match F with F_empty => False | F_push a SS FF => x=a \/ In x SS FF end. Lemma get_In : forall (x:A) (S:Store) (F:Full S) i , get i S = PSome x -> In x S F. induction F. intro i;rewrite get_empty; congruence. intro i;rewrite get_push_Full;trivial. case_eq (i ?= index S);simpl. left;congruence. right;eauto. congruence. Qed. End Store. Arguments PNone [A]. Arguments PSome [A] _. Arguments Tempty [A]. Arguments Branch0 [A] _ _. Arguments Branch1 [A] _ _ _. Arguments Tget [A] p T. Arguments Tadd [A] p a T. Arguments Tget_Tempty [A] p. Arguments Tget_Tadd [A] i j a T. Arguments mkStore [A] index contents. Arguments index [A] s. Arguments contents [A] s. Arguments empty [A]. Arguments get [A] i S. Arguments push [A] a S. Arguments get_empty [A] i. Arguments get_push_Full [A] i a S _. Arguments Full [A] _. Arguments F_empty [A]. Arguments F_push [A] a S _. Arguments In [A] x S F. Section Map. Variables A B:Set. Variable f: A -> B. Fixpoint Tmap (T: Tree A) : Tree B := match T with Tempty => Tempty | Branch0 t1 t2 => Branch0 (Tmap t1) (Tmap t2) | Branch1 a t1 t2 => Branch1 (f a) (Tmap t1) (Tmap t2) end. Lemma Tget_Tmap: forall T i, Tget i (Tmap T)= match Tget i T with PNone => PNone | PSome a => PSome (f a) end. induction T;intro i;case i;simpl;auto. Defined. Lemma Tmap_Tadd: forall i a T, Tmap (Tadd i a T) = Tadd i (f a) (Tmap T). induction i;intros a T;case T;simpl;intros;try (rewrite IHi);simpl;reflexivity. Defined. Definition map (S:Store A) : Store B := mkStore (index S) (Tmap (contents S)). Lemma get_map: forall i S, get i (map S)= match get i S with PNone => PNone | PSome a => PSome (f a) end. destruct S;unfold get,map,contents,index;apply Tget_Tmap. Defined. Lemma map_push: forall a S, map (push a S) = push (f a) (map S). intros a S. case S. unfold push,map,contents,index. intros;rewrite Tmap_Tadd;reflexivity. Defined. Theorem Full_map : forall S, Full S -> Full (map S). intros S F. induction F. exact F_empty. rewrite map_push;constructor 2;assumption. Defined. End Map. Arguments Tmap [A B] f T. Arguments map [A B] f S. Arguments Full_map [A B f] S _. Notation "hyps \ A" := (push A hyps) (at level 72,left associativity). coq-8.4pl2/Makefile.doc0000640000175000001440000003165112043513320014032 0ustar notinusers# Makefile for the Coq documentation # COQSRC needs to be set to a coq source repository # To compile documentation, you need the following tools: # Dvi: latex (latex2e), bibtex, makeindex # Pdf: pdflatex # Html: hevea (http://hevea.inria.fr) >= 1.05 ###################################################################### ### General rules ###################################################################### .PHONY: doc doc-html doc-pdf doc-ps refman refman-quick tutorial .PHONY: stdlib full-stdlib faq rectutorial refman-html-dir INDEXURLS:=doc/refman/html/index_urls.txt doc: refman faq tutorial rectutorial stdlib $(INDEXURLS) doc-html:\ doc/tutorial/Tutorial.v.html doc/refman/html/index.html \ doc/faq/html/index.html doc/stdlib/html/index.html doc/RecTutorial/RecTutorial.html doc-pdf:\ doc/tutorial/Tutorial.v.pdf doc/refman/Reference-Manual.pdf \ doc/faq/FAQ.v.pdf doc/stdlib/Library.pdf doc/RecTutorial/RecTutorial.pdf doc-ps:\ doc/tutorial/Tutorial.v.ps doc/refman/Reference-Manual.ps \ doc/faq/FAQ.v.ps doc/stdlib/Library.ps doc/RecTutorial/RecTutorial.ps refman: \ doc/refman/html/index.html doc/refman/Reference-Manual.ps doc/refman/Reference-Manual.pdf tutorial: \ doc/tutorial/Tutorial.v.html doc/tutorial/Tutorial.v.ps doc/tutorial/Tutorial.v.pdf stdlib: \ doc/stdlib/html/index.html doc/stdlib/Library.ps doc/stdlib/Library.pdf full-stdlib: \ doc/stdlib/html/index.html doc/stdlib/FullLibrary.ps doc/stdlib/FullLibrary.pdf faq: doc/faq/html/index.html doc/faq/FAQ.v.ps doc/faq/FAQ.v.pdf rectutorial: doc/RecTutorial/RecTutorial.html \ doc/RecTutorial/RecTutorial.ps doc/RecTutorial/RecTutorial.pdf ###################################################################### ### Implicit rules ###################################################################### ifdef QUICK %.v.tex: %.tex $(COQTEX) $(COQTEXOPTS) $< else %.v.tex: %.tex $(COQTEX) $(COQTOPEXE) $(PLUGINSVO) $(THEORIESVO) $(COQTEX) $(COQTEXOPTS) $< endif %.ps: %.dvi (cd `dirname $<`; dvips -q -o `basename $@` `basename $<`) ###################################################################### # Macros for filtering outputs ###################################################################### HIDEBIBTEXINFO=| grep -v "^A level-1 auxiliary file" SHOWMAKEINDEXERROR=egrep '^!! Input index error|^\*\* Input style error|^ --' # Empty subsection levels in faq are on purpose HEVEAFAQFILTER=2>&1 | grep -v "^Warning: List with no item" ###################################################################### # Common ###################################################################### ### Version doc/common/version.tex: config/Makefile printf '\\newcommand{\\coqversion}{$(VERSION)}' > doc/common/version.tex ###################################################################### # Reference Manual ###################################################################### ### Reference Manual (printable format) # The second LATEX compilation is necessary otherwise the pages of the index # are not correct (don't know why...) - BB doc/refman/Reference-Manual.dvi: $(REFMANFILES) doc/refman/Reference-Manual.tex @(cd doc/refman;\ $(LATEX) -interaction=batchmode Reference-Manual;\ $(BIBTEX) -terse Reference-Manual $(HIDEBIBTEXINFO);\ $(LATEX) -interaction=batchmode Reference-Manual > /dev/null;\ $(MAKEINDEX) -q Reference-Manual;\ $(SHOWMAKEINDEXERROR) Reference-Manual.ilg;\ $(MAKEINDEX) -q Reference-Manual.tacidx -o Reference-Manual.tacind;\ $(SHOWMAKEINDEXERROR) Reference-Manual.ilg;\ $(MAKEINDEX) -q Reference-Manual.comidx -o Reference-Manual.comind;\ $(SHOWMAKEINDEXERROR) Reference-Manual.ilg;\ $(MAKEINDEX) -q Reference-Manual.erridx -o Reference-Manual.errind;\ $(SHOWMAKEINDEXERROR) Reference-Manual.ilg;\ $(LATEX) -interaction=batchmode Reference-Manual > /dev/null;\ $(LATEX) -interaction=batchmode Reference-Manual > /dev/null;\ ../tools/show_latex_messages -no-overfull Reference-Manual.log) doc/refman/Reference-Manual.pdf: doc/refman/Reference-Manual.dvi (cd doc/refman;\ $(PDFLATEX) -interaction=batchmode Reference-Manual.tex;\ ../tools/show_latex_messages -no-overfull Reference-Manual.log) ### Reference Manual (browsable format) doc/refman/Reference-Manual.html: doc/refman/styles.hva doc/refman/headers.hva doc/refman/Reference-Manual.dvi # to ensure bbl file (cd doc/refman; BIBINPUTS=.: $(HEVEA) $(HEVEAOPTS) ./styles.hva ./Reference-Manual.tex) doc/refman/cover.html: doc/common/styles/html/$(HTMLSTYLE)/cover.html $(INSTALLLIB) $< doc/refman doc/refman/styles.hva: doc/common/styles/html/$(HTMLSTYLE)/styles.hva $(INSTALLLIB) $< doc/refman INDEXES:= doc/refman/html/command-index.html doc/refman/html/tactic-index.html ALLINDEXES:= doc/refman/html/index.html $(INDEXES) $(ALLINDEXES): refman-html-dir refman-html-dir: doc/refman/Reference-Manual.html $(REFMANPNGFILES) \ doc/refman/cover.html doc/refman/styles.hva doc/refman/index.html - rm -rf doc/refman/html $(MKDIR) doc/refman/html $(INSTALLLIB) $(REFMANPNGFILES) doc/refman/html (cd doc/refman/html; hacha -nolinks -tocbis -o toc.html ../styles.hva ../Reference-Manual.html) $(INSTALLLIB) doc/refman/cover.html doc/refman/html/index.html -$(INSTALLLIB) doc/common/styles/html/$(HTMLSTYLE)/*.css doc/refman/html refman-quick: (cd doc/refman;\ $(PDFLATEX) -interaction=batchmode Reference-Manual.tex;\ ../tools/show_latex_messages -no-overfull Reference-Manual.log && \ $(HEVEA) $(HEVEAOPTS) ./Reference-Manual.tex) ###################################################################### # Index file for CoqIDE ###################################################################### $(INDEXURLS): $(INDEXES) cat $< | grep li-indexenv | grep HREF | sed -e 's@.*\(.*\).*, .*@\1,\2@' > $@ ###################################################################### # Tutorial ###################################################################### doc/tutorial/Tutorial.v.dvi: $(DOCCOMMON) doc/tutorial/Tutorial.v.tex (cd doc/tutorial;\ $(LATEX) -interaction=batchmode Tutorial.v;\ ../tools/show_latex_messages Tutorial.v.log) doc/tutorial/Tutorial.v.pdf: $(DOCCOMMON) doc/tutorial/Tutorial.v.tex (cd doc/tutorial;\ $(PDFLATEX) -interaction=batchmode Tutorial.v.tex;\ ../tools/show_latex_messages Tutorial.v.log) doc/tutorial/Tutorial.v.html: $(DOCCOMMON) doc/tutorial/Tutorial.v.tex (cd doc/tutorial; $(HEVEA) $(HEVEAOPTS) Tutorial.v) ###################################################################### # FAQ ###################################################################### doc/faq/FAQ.v.dvi: doc/common/version.tex doc/common/title.tex doc/faq/FAQ.v.tex (cd doc/faq;\ $(LATEX) -interaction=batchmode FAQ.v;\ $(BIBTEX) -terse FAQ.v;\ $(LATEX) -interaction=batchmode FAQ.v > /dev/null;\ $(LATEX) -interaction=batchmode FAQ.v > /dev/null;\ ../tools/show_latex_messages FAQ.v.log) doc/faq/FAQ.v.pdf: doc/common/version.tex doc/common/title.tex doc/faq/FAQ.v.dvi doc/faq/axioms.png (cd doc/faq;\ $(PDFLATEX) -interaction=batchmode FAQ.v.tex;\ ../tools/show_latex_messages FAQ.v.log) doc/faq/FAQ.v.html: doc/faq/FAQ.v.dvi # to ensure FAQ.v.bbl (cd doc/faq; ($(HEVEA) $(HEVEAOPTS) FAQ.v.tex $(HEVEAFAQFILTER))) doc/faq/html/index.html: doc/faq/FAQ.v.html - rm -rf doc/faq/html $(MKDIR) doc/faq/html $(INSTALLLIB) doc/faq/interval_discr.v doc/faq/axioms.png doc/faq/html $(INSTALLLIB) doc/faq/FAQ.v.html doc/faq/html/index.html ###################################################################### # Standard library ###################################################################### ### Standard library (browsable html format) ifdef QUICK doc/stdlib/html/genindex.html: else doc/stdlib/html/genindex.html: | $(COQDOC) $(THEORIESVO) endif - rm -rf doc/stdlib/html $(MKDIR) doc/stdlib/html $(COQDOC) -q -d doc/stdlib/html --with-header doc/common/styles/html/$(HTMLSTYLE)/header.html --with-footer doc/common/styles/html/$(HTMLSTYLE)/footer.html --multi-index --html -g \ -R theories Coq $(THEORIESVO:.vo=.v) mv doc/stdlib/html/index.html doc/stdlib/html/genindex.html doc/stdlib/index-list.html: doc/stdlib/index-list.html.template doc/stdlib/make-library-index ./doc/stdlib/make-library-index doc/stdlib/index-list.html doc/stdlib/hidden-files doc/stdlib/html/index.html: doc/stdlib/html/genindex.html doc/stdlib/index-list.html cat doc/common/styles/html/$(HTMLSTYLE)/header.html doc/stdlib/index-list.html > $@ cat doc/common/styles/html/$(HTMLSTYLE)/footer.html >> $@ ### Standard library (light version, full version is definitely too big) ifdef QUICK doc/stdlib/Library.coqdoc.tex: else doc/stdlib/Library.coqdoc.tex: | $(COQDOC) $(THEORIESLIGHTVO) endif $(COQDOC) -q -boot --gallina --body-only --latex --stdout \ -R theories Coq $(THEORIESLIGHTVO:.vo=.v) >> $@ doc/stdlib/Library.dvi: $(DOCCOMMON) doc/stdlib/Library.coqdoc.tex doc/stdlib/Library.tex (cd doc/stdlib;\ $(LATEX) -interaction=batchmode Library;\ $(LATEX) -interaction=batchmode Library > /dev/null;\ ../tools/show_latex_messages -no-overfull Library.log) doc/stdlib/Library.pdf: $(DOCCOMMON) doc/stdlib/Library.coqdoc.tex doc/stdlib/Library.dvi (cd doc/stdlib;\ $(PDFLATEX) -interaction=batchmode Library;\ ../tools/show_latex_messages -no-overfull Library.log) ### Standard library (full version if you're crazy enouth to try) doc/stdlib/FullLibrary.tex: doc/stdlib/Library.tex sed -e 's/Library.coqdoc/FullLibrary.coqdoc/g;s/\\begin{document}/\\newcommand{\\textlambda}{\\ensuremath{\\lambda}}\\newcommand{\\textPi}{\\ensuremath{\\Pi}}\\begin{document}/' $< > $@ ifdef QUICK doc/stdlib/FullLibrary.coqdoc.tex: $(COQDOC) -q -boot --gallina --body-only --latex --stdout --utf8 \ -R theories Coq $(THEORIESVO:.vo=.v) > $@ sed -i.tmp -e 's///g' $@ && rm $@.tmp else doc/stdlib/FullLibrary.coqdoc.tex: $(COQDOC) $(THEORIESVO) $(COQDOC) -q -boot --gallina --body-only --latex --stdout --utf8 \ -R theories Coq $(THEORIESVO:.vo=.v) > $@ sed -i.tmp -e 's///g' $@ && rm $@.tmp endif doc/stdlib/FullLibrary.dvi: $(DOCCOMMON) doc/stdlib/FullLibrary.coqdoc.tex doc/stdlib/FullLibrary.tex (cd doc/stdlib;\ $(LATEX) -interaction=batchmode FullLibrary;\ $(LATEX) -interaction=batchmode FullLibrary > /dev/null;\ ../tools/show_latex_messages -no-overfull FullLibrary.log) doc/stdlib/FullLibrary.pdf: $(DOCCOMMON) doc/stdlib/FullLibrary.coqdoc.tex doc/stdlib/FullLibrary.dvi (cd doc/stdlib;\ $(PDFLATEX) -interaction=batchmode FullLibrary;\ ../tools/show_latex_messages -no-overfull FullLibrary.log) ###################################################################### # Tutorial on inductive types ###################################################################### doc/RecTutorial/RecTutorial.dvi: doc/common/version.tex doc/common/title.tex doc/RecTutorial/RecTutorial.tex (cd doc/RecTutorial;\ $(LATEX) -interaction=batchmode RecTutorial;\ $(BIBTEX) -terse RecTutorial;\ $(LATEX) -interaction=batchmode RecTutorial > /dev/null;\ $(LATEX) -interaction=batchmode RecTutorial > /dev/null;\ ../tools/show_latex_messages RecTutorial.log) doc/RecTutorial/RecTutorial.pdf: doc/common/version.tex doc/common/title.tex doc/RecTutorial/RecTutorial.dvi (cd doc/RecTutorial;\ $(PDFLATEX) -interaction=batchmode RecTutorial.tex;\ ../tools/show_latex_messages RecTutorial.log) doc/RecTutorial/RecTutorial.html: doc/RecTutorial/RecTutorial.tex (cd doc/RecTutorial; $(HEVEA) $(HEVEAOPTS) RecTutorial) ###################################################################### # Install all documentation files ###################################################################### .PHONY: install-doc install-doc-meta install-doc-html install-doc-printable install-doc-index-urls install-doc: install-doc-meta install-doc-html install-doc-printable install-doc-index-urls install-doc-meta: $(MKDIR) $(FULLDOCDIR) $(INSTALLLIB) doc/LICENSE $(FULLDOCDIR)/LICENSE.doc install-doc-html: $(MKDIR) $(addprefix $(FULLDOCDIR)/html/, refman stdlib faq) $(INSTALLLIB) doc/refman/html/* $(FULLDOCDIR)/html/refman $(INSTALLLIB) doc/stdlib/html/* $(FULLDOCDIR)/html/stdlib $(INSTALLLIB) doc/RecTutorial/RecTutorial.html $(FULLDOCDIR)/html/RecTutorial.html $(INSTALLLIB) doc/faq/html/* $(FULLDOCDIR)/html/faq $(INSTALLLIB) doc/tutorial/Tutorial.v.html $(FULLDOCDIR)/html/Tutorial.html install-doc-printable: $(MKDIR) $(FULLDOCDIR)/ps $(FULLDOCDIR)/pdf $(INSTALLLIB) doc/refman/Reference-Manual.pdf \ doc/stdlib/Library.pdf $(FULLDOCDIR)/pdf $(INSTALLLIB) doc/refman/Reference-Manual.ps \ doc/stdlib/Library.ps $(FULLDOCDIR)/ps $(INSTALLLIB) doc/tutorial/Tutorial.v.pdf $(FULLDOCDIR)/pdf/Tutorial.pdf $(INSTALLLIB) doc/RecTutorial/RecTutorial.pdf $(FULLDOCDIR)/pdf/RecTutorial.pdf $(INSTALLLIB) doc/faq/FAQ.v.pdf $(FULLDOCDIR)/pdf/FAQ.pdf $(INSTALLLIB) doc/tutorial/Tutorial.v.ps $(FULLDOCDIR)/ps/Tutorial.ps $(INSTALLLIB) doc/RecTutorial/RecTutorial.ps $(FULLDOCDIR)/ps/RecTutorial.ps $(INSTALLLIB) doc/faq/FAQ.v.ps $(FULLDOCDIR)/ps/FAQ.ps install-doc-index-urls: $(MKDIR) $(FULLDOCDIR)/ps $(FULLDOCDIR)/pdf $(INSTALLLIB) $(INDEXURLS) \ $(FULLDOCDIR)/html/refman # For emacs: # Local Variables: # mode: makefile # End: coq-8.4pl2/theories/0000750000175000001440000000000012127276551013460 5ustar notinuserscoq-8.4pl2/theories/theories.itarget0000640000175000001440000000067011500424625016656 0ustar notinusersArith/vo.otarget Bool/vo.otarget Classes/vo.otarget FSets/vo.otarget MSets/vo.otarget Structures/vo.otarget Init/vo.otarget Lists/vo.otarget Vectors/vo.otarget Logic/vo.otarget PArith/vo.otarget NArith/vo.otarget Numbers/vo.otarget Program/vo.otarget QArith/vo.otarget Reals/vo.otarget Relations/vo.otarget Setoids/vo.otarget Sets/vo.otarget Sorting/vo.otarget Strings/vo.otarget Unicode/vo.otarget Wellfounded/vo.otarget ZArith/vo.otarget coq-8.4pl2/theories/Structures/0000750000175000001440000000000012127276543015644 5ustar notinuserscoq-8.4pl2/theories/Structures/DecidableTypeEx.v0000640000175000001440000000612311776416511021031 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq y x. Proof. intros (x1,x2) (y1,y2); unfold eq; simpl; intuition. Qed. Lemma eq_trans : forall x y z : t, eq x y -> eq y z -> eq x z. Proof. intros (x1,x2) (y1,y2) (z1,z2); unfold eq; simpl; intuition eauto. Qed. Definition eq_dec : forall x y, { eq x y }+{ ~eq x y }. Proof. intros (x1,x2) (y1,y2); unfold eq; simpl. destruct (D1.eq_dec x1 y1); destruct (D2.eq_dec x2 y2); intuition. Defined. End PairDecidableType. (** Similarly for pairs of UsualDecidableType *) Module PairUsualDecidableType(D1 D2:UsualDecidableType) <: UsualDecidableType. Definition t := prod D1.t D2.t. Definition eq := @eq t. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition eq_dec : forall x y, { eq x y }+{ ~eq x y }. Proof. intros (x1,x2) (y1,y2); destruct (D1.eq_dec x1 y1); destruct (D2.eq_dec x2 y2); unfold eq, D1.eq, D2.eq in *; simpl; (left; f_equal; auto; fail) || (right; intro H; injection H; auto). Defined. End PairUsualDecidableType. coq-8.4pl2/theories/Structures/OrdersEx.v0000640000175000001440000000541611560537173017574 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* eq==>iff) lt. Proof. compute. intros (x1,x2) (x1',x2') (X1,X2) (y1,y2) (y1',y2') (Y1,Y2). rewrite X1,X2,Y1,Y2; intuition. Qed. Definition compare x y := match O1.compare (fst x) (fst y) with | Eq => O2.compare (snd x) (snd y) | Lt => Lt | Gt => Gt end. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. intros (x1,x2) (y1,y2); unfold compare; simpl. destruct (O1.compare_spec x1 y1); try (constructor; compute; auto). destruct (O2.compare_spec x2 y2); constructor; compute; auto with relations. Qed. End PairOrderedType. coq-8.4pl2/theories/Structures/OrdersLists.v0000640000175000001440000001604711537153605020316 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* In x l -> In y l. Proof. intros. rewrite <- H; auto. Qed. Lemma ListIn_In : forall l x, List.In x l -> In x l. Proof. exact (In_InA eq_equiv). Qed. Lemma Inf_lt : forall l x y, lt x y -> Inf y l -> Inf x l. Proof. exact (InfA_ltA lt_strorder). Qed. Lemma Inf_eq : forall l x y, eq x y -> Inf y l -> Inf x l. Proof. exact (InfA_eqA eq_equiv lt_strorder lt_compat). Qed. Lemma Sort_Inf_In : forall l x a, Sort l -> Inf a l -> In x l -> lt a x. Proof. exact (SortA_InfA_InA eq_equiv lt_strorder lt_compat). Qed. Lemma ListIn_Inf : forall l x, (forall y, List.In y l -> lt x y) -> Inf x l. Proof. exact (@In_InfA t lt). Qed. Lemma In_Inf : forall l x, (forall y, In y l -> lt x y) -> Inf x l. Proof. exact (InA_InfA eq_equiv (ltA:=lt)). Qed. Lemma Inf_alt : forall l x, Sort l -> (Inf x l <-> (forall y, In y l -> lt x y)). Proof. exact (InfA_alt eq_equiv lt_strorder lt_compat). Qed. Lemma Sort_NoDup : forall l, Sort l -> NoDup l. Proof. exact (SortA_NoDupA eq_equiv lt_strorder lt_compat) . Qed. End ForNotations. Hint Resolve ListIn_In Sort_NoDup Inf_lt. Hint Immediate In_eq Inf_lt. End OrderedTypeLists. (** * Results about keys and data as manipulated in FMaps. *) Module KeyOrderedType(Import O:OrderedType). Module Import MO:=OrderedTypeLists(O). Section Elt. Variable elt : Type. Notation key:=t. Local Open Scope signature_scope. Definition eqk : relation (key*elt) := eq @@1. Definition eqke : relation (key*elt) := eq * Logic.eq. Definition ltk : relation (key*elt) := lt @@1. Hint Unfold eqk eqke ltk. (* eqke is stricter than eqk *) Global Instance eqke_eqk : subrelation eqke eqk. Proof. firstorder. Qed. (* eqk, eqke are equalities, ltk is a strict order *) Global Instance eqk_equiv : Equivalence eqk := _. Global Instance eqke_equiv : Equivalence eqke := _. Global Instance ltk_strorder : StrictOrder ltk := _. Global Instance ltk_compat : Proper (eqk==>eqk==>iff) ltk. Proof. unfold eqk, ltk; auto with *. Qed. (* Additionnal facts *) Global Instance pair_compat : Proper (eq==>Logic.eq==>eqke) (@pair key elt). Proof. apply pair_compat. Qed. Lemma ltk_not_eqk : forall e e', ltk e e' -> ~ eqk e e'. Proof. intros e e' LT EQ; rewrite EQ in LT. elim (StrictOrder_Irreflexive _ LT). Qed. Lemma ltk_not_eqke : forall e e', ltk e e' -> ~eqke e e'. Proof. intros e e' LT EQ; rewrite EQ in LT. elim (StrictOrder_Irreflexive _ LT). Qed. Lemma InA_eqke_eqk : forall x m, InA eqke x m -> InA eqk x m. Proof. unfold eqke, RelProd; induction 1; firstorder. Qed. Hint Resolve InA_eqke_eqk. Definition MapsTo (k:key)(e:elt):= InA eqke (k,e). Definition In k m := exists e:elt, MapsTo k e m. Notation Sort := (sort ltk). Notation Inf := (lelistA ltk). Hint Unfold MapsTo In. (* An alternative formulation for [In k l] is [exists e, InA eqk (k,e) l] *) Lemma In_alt : forall k l, In k l <-> exists e, InA eqk (k,e) l. Proof. firstorder. exists x; auto. induction H. destruct y; compute in H. exists e; left; auto. destruct IHInA as [e H0]. exists e; auto. Qed. Lemma In_alt2 : forall k l, In k l <-> Exists (fun p => eq k (fst p)) l. Proof. unfold In, MapsTo. setoid_rewrite Exists_exists; setoid_rewrite InA_alt. firstorder. exists (snd x), x; auto. Qed. Lemma In_nil : forall k, In k nil <-> False. Proof. intros; rewrite In_alt2; apply Exists_nil. Qed. Lemma In_cons : forall k p l, In k (p::l) <-> eq k (fst p) \/ In k l. Proof. intros; rewrite !In_alt2, Exists_cons; intuition. Qed. Global Instance MapsTo_compat : Proper (eq==>Logic.eq==>equivlistA eqke==>iff) MapsTo. Proof. intros x x' Hx e e' He l l' Hl. unfold MapsTo. rewrite Hx, He, Hl; intuition. Qed. Global Instance In_compat : Proper (eq==>equivlistA eqk==>iff) In. Proof. intros x x' Hx l l' Hl. rewrite !In_alt. setoid_rewrite Hl. setoid_rewrite Hx. intuition. Qed. Lemma MapsTo_eq : forall l x y e, eq x y -> MapsTo x e l -> MapsTo y e l. Proof. intros l x y e EQ. rewrite <- EQ; auto. Qed. Lemma In_eq : forall l x y, eq x y -> In x l -> In y l. Proof. intros l x y EQ. rewrite <- EQ; auto. Qed. Lemma Inf_eq : forall l x x', eqk x x' -> Inf x' l -> Inf x l. Proof. intros l x x' H. rewrite H; auto. Qed. Lemma Inf_lt : forall l x x', ltk x x' -> Inf x' l -> Inf x l. Proof. apply InfA_ltA; auto with *. Qed. Hint Immediate Inf_eq. Hint Resolve Inf_lt. Lemma Sort_Inf_In : forall l p q, Sort l -> Inf q l -> InA eqk p l -> ltk q p. Proof. apply SortA_InfA_InA; auto with *. Qed. Lemma Sort_Inf_NotIn : forall l k e, Sort l -> Inf (k,e) l -> ~In k l. Proof. intros; red; intros. destruct H1 as [e' H2]. elim (@ltk_not_eqk (k,e) (k,e')). eapply Sort_Inf_In; eauto. repeat red; reflexivity. Qed. Lemma Sort_NoDupA: forall l, Sort l -> NoDupA eqk l. Proof. apply SortA_NoDupA; auto with *. Qed. Lemma Sort_In_cons_1 : forall e l e', Sort (e::l) -> InA eqk e' l -> ltk e e'. Proof. intros; invlist sort; eapply Sort_Inf_In; eauto. Qed. Lemma Sort_In_cons_2 : forall l e e', Sort (e::l) -> InA eqk e' (e::l) -> ltk e e' \/ eqk e e'. Proof. intros; invlist InA; auto with relations. left; apply Sort_In_cons_1 with l; auto with relations. Qed. Lemma Sort_In_cons_3 : forall x l k e, Sort ((k,e)::l) -> In x l -> ~eq x k. Proof. intros; invlist sort; red; intros. eapply Sort_Inf_NotIn; eauto using In_eq. Qed. Lemma In_inv : forall k k' e l, In k ((k',e) :: l) -> eq k k' \/ In k l. Proof. intros; invlist In; invlist MapsTo. compute in * |- ; intuition. right; exists x; auto. Qed. Lemma In_inv_2 : forall k k' e e' l, InA eqk (k, e) ((k', e') :: l) -> ~ eq k k' -> InA eqk (k, e) l. Proof. intros; invlist InA; intuition. Qed. Lemma In_inv_3 : forall x x' l, InA eqke x (x' :: l) -> ~ eqk x x' -> InA eqke x l. Proof. intros; invlist InA; compute in * |- ; intuition. Qed. End Elt. Hint Unfold eqk eqke ltk. Hint Extern 2 (eqke ?a ?b) => split. Hint Resolve ltk_not_eqk ltk_not_eqke. Hint Resolve InA_eqke_eqk. Hint Unfold MapsTo In. Hint Immediate Inf_eq. Hint Resolve Inf_lt. Hint Resolve Sort_Inf_NotIn. Hint Resolve In_inv_2 In_inv_3. End KeyOrderedType. coq-8.4pl2/theories/Structures/DecidableType.v0000640000175000001440000001006611366307247020535 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* split. (* eqke is stricter than eqk *) Lemma eqke_eqk : forall x x', eqke x x' -> eqk x x'. Proof. unfold eqk, eqke; intuition. Qed. (* eqk, eqke are equalities *) Lemma eqk_refl : forall e, eqk e e. Proof. auto. Qed. Lemma eqke_refl : forall e, eqke e e. Proof. auto. Qed. Lemma eqk_sym : forall e e', eqk e e' -> eqk e' e. Proof. auto. Qed. Lemma eqke_sym : forall e e', eqke e e' -> eqke e' e. Proof. unfold eqke; intuition. Qed. Lemma eqk_trans : forall e e' e'', eqk e e' -> eqk e' e'' -> eqk e e''. Proof. eauto. Qed. Lemma eqke_trans : forall e e' e'', eqke e e' -> eqke e' e'' -> eqke e e''. Proof. unfold eqke; intuition; [ eauto | congruence ]. Qed. Hint Resolve eqk_trans eqke_trans eqk_refl eqke_refl. Hint Immediate eqk_sym eqke_sym. Global Instance eqk_equiv : Equivalence eqk. Proof. split; eauto. Qed. Global Instance eqke_equiv : Equivalence eqke. Proof. split; eauto. Qed. Lemma InA_eqke_eqk : forall x m, InA eqke x m -> InA eqk x m. Proof. unfold eqke; induction 1; intuition. Qed. Hint Resolve InA_eqke_eqk. Lemma InA_eqk : forall p q m, eqk p q -> InA eqk p m -> InA eqk q m. Proof. intros; apply InA_eqA with p; auto with *. Qed. Definition MapsTo (k:key)(e:elt):= InA eqke (k,e). Definition In k m := exists e:elt, MapsTo k e m. Hint Unfold MapsTo In. (* An alternative formulation for [In k l] is [exists e, InA eqk (k,e) l] *) Lemma In_alt : forall k l, In k l <-> exists e, InA eqk (k,e) l. Proof. firstorder. exists x; auto. induction H. destruct y. exists e; auto. destruct IHInA as [e H0]. exists e; auto. Qed. Lemma MapsTo_eq : forall l x y e, eq x y -> MapsTo x e l -> MapsTo y e l. Proof. intros; unfold MapsTo in *; apply InA_eqA with (x,e); eauto with *. Qed. Lemma In_eq : forall l x y, eq x y -> In x l -> In y l. Proof. destruct 2 as (e,E); exists e; eapply MapsTo_eq; eauto. Qed. Lemma In_inv : forall k k' e l, In k ((k',e) :: l) -> eq k k' \/ In k l. Proof. inversion 1. inversion_clear H0; eauto. destruct H1; simpl in *; intuition. Qed. Lemma In_inv_2 : forall k k' e e' l, InA eqk (k, e) ((k', e') :: l) -> ~ eq k k' -> InA eqk (k, e) l. Proof. inversion_clear 1; compute in H0; intuition. Qed. Lemma In_inv_3 : forall x x' l, InA eqke x (x' :: l) -> ~ eqk x x' -> InA eqke x l. Proof. inversion_clear 1; compute in H0; intuition. Qed. End Elt. Hint Unfold eqk eqke. Hint Extern 2 (eqke ?a ?b) => split. Hint Resolve eqk_trans eqke_trans eqk_refl eqke_refl. Hint Immediate eqk_sym eqke_sym. Hint Resolve InA_eqke_eqk. Hint Unfold MapsTo In. Hint Resolve In_inv_2 In_inv_3. End KeyDecidableType. coq-8.4pl2/theories/Structures/Orders.v0000640000175000001440000002505511577700357017304 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> Prop. End HasLt. Module Type HasLe (Import T:Typ). Parameter Inline le : t -> t -> Prop. End HasLe. Module Type EqLt := Typ <+ HasEq <+ HasLt. Module Type EqLe := Typ <+ HasEq <+ HasLe. Module Type EqLtLe := Typ <+ HasEq <+ HasLt <+ HasLe. (** Versions with nice notations *) Module Type LtNotation (E:EqLt). Infix "<" := E.lt. Notation "x > y" := (y= y" := (y<=x) (only parsing). Notation "x <= y <= z" := (x<=y /\ y<=z). End LeNotation. Module Type LtLeNotation (E:EqLtLe). Include LtNotation E <+ LeNotation E. Notation "x <= y < z" := (x<=y /\ yeq==>iff) lt. End IsStrOrder. Module Type LeIsLtEq (Import E:EqLtLe'). Axiom le_lteq : forall x y, x<=y <-> x t -> comparison. End HasCmp. Module Type CmpNotation (T:Typ)(C:HasCmp T). Infix "?=" := C.compare (at level 70, no associativity). End CmpNotation. Module Type CmpSpec (Import E:EqLt')(Import C:HasCmp E). Axiom compare_spec : forall x y, CompareSpec (x==y) (x true | _ => false end. Lemma eqb_eq : forall x y, eqb x y = true <-> x==y. Proof. unfold eqb. intros x y. destruct (compare_spec x y) as [H|H|H]; split; auto; try discriminate. intros EQ; rewrite EQ in H; elim (StrictOrder_Irreflexive _ H). intros EQ; rewrite EQ in H; elim (StrictOrder_Irreflexive _ H). Qed. End Compare2EqBool. Module DSO_to_OT (O:DecStrOrder) <: OrderedType := O <+ Compare2EqBool <+ HasEqBool2Dec. (** From [OrderedType] To [OrderedTypeFull] (adding [<=]) *) Module OT_to_Full (O:OrderedType') <: OrderedTypeFull. Include O. Definition le x y := x x-> Sortclass. Hint Unfold is_true. Module Type HasLeb (Import T:Typ). Parameter Inline leb : t -> t -> bool. End HasLeb. Module Type HasLtb (Import T:Typ). Parameter Inline ltb : t -> t -> bool. End HasLtb. Module Type LebNotation (T:Typ)(E:HasLeb T). Infix "<=?" := E.leb (at level 35). End LebNotation. Module Type LtbNotation (T:Typ)(E:HasLtb T). Infix " X.le x y. End LebSpec. Module Type LtbSpec (T:Typ)(X:HasLt T)(Y:HasLtb T). Parameter ltb_lt : forall x y, Y.ltb x y = true <-> X.lt x y. End LtbSpec. Module Type LeBool := Typ <+ HasLeb. Module Type LtBool := Typ <+ HasLtb. Module Type LeBool' := LeBool <+ LebNotation. Module Type LtBool' := LtBool <+ LtbNotation. Module Type LebIsTotal (Import X:LeBool'). Axiom leb_total : forall x y, (x <=? y) = true \/ (y <=? x) = true. End LebIsTotal. Module Type TotalLeBool := LeBool <+ LebIsTotal. Module Type TotalLeBool' := LeBool' <+ LebIsTotal. Module Type LebIsTransitive (Import X:LeBool'). Axiom leb_trans : Transitive X.leb. End LebIsTransitive. Module Type TotalTransitiveLeBool := TotalLeBool <+ LebIsTransitive. Module Type TotalTransitiveLeBool' := TotalLeBool' <+ LebIsTransitive. (** Grouping all boolean comparison functions *) Module Type HasBoolOrdFuns (T:Typ) := HasEqb T <+ HasLtb T <+ HasLeb T. Module Type HasBoolOrdFuns' (T:Typ) := HasBoolOrdFuns T <+ EqbNotation T <+ LtbNotation T <+ LebNotation T. Module Type BoolOrdSpecs (O:EqLtLe)(F:HasBoolOrdFuns O) := EqbSpec O O F <+ LtbSpec O O F <+ LebSpec O O F. Module Type OrderFunctions (E:EqLtLe) := HasCompare E <+ HasBoolOrdFuns E <+ BoolOrdSpecs E. Module Type OrderFunctions' (E:EqLtLe) := HasCompare E <+ CmpNotation E <+ HasBoolOrdFuns' E <+ BoolOrdSpecs E. (** * From [OrderedTypeFull] to [TotalTransitiveLeBool] *) Module OTF_to_TTLB (Import O : OrderedTypeFull') <: TotalTransitiveLeBool. Definition leb x y := match compare x y with Gt => false | _ => true end. Lemma leb_le : forall x y, leb x y <-> x <= y. Proof. intros. unfold leb. rewrite le_lteq. destruct (compare_spec x y) as [EQ|LT|GT]; split; auto. discriminate. intros LE. elim (StrictOrder_Irreflexive x). destruct LE as [LT|EQ]. now transitivity y. now rewrite <- EQ in GT. Qed. Lemma leb_total : forall x y, leb x y \/ leb y x. Proof. intros. rewrite 2 leb_le. rewrite 2 le_lteq. destruct (compare_spec x y); intuition. Qed. Lemma leb_trans : Transitive leb. Proof. intros x y z. rewrite !leb_le, !le_lteq. intros [Hxy|Hxy] [Hyz|Hyz]. left; transitivity y; auto. left; rewrite <- Hyz; auto. left; rewrite Hxy; auto. right; transitivity y; auto. Qed. Definition t := t. End OTF_to_TTLB. (** * From [TotalTransitiveLeBool] to [OrderedTypeFull] [le] is [leb ... = true]. [eq] is [le /\ swap le]. [lt] is [le /\ ~swap le]. *) Local Open Scope bool_scope. Module TTLB_to_OTF (Import O : TotalTransitiveLeBool') <: OrderedTypeFull. Definition t := t. Definition le x y : Prop := x <=? y. Definition eq x y : Prop := le x y /\ le y x. Definition lt x y : Prop := le x y /\ ~le y x. Definition compare x y := if x <=? y then (if y <=? x then Eq else Lt) else Gt. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. intros. unfold compare. case_eq (x <=? y). case_eq (y <=? x). constructor. split; auto. constructor. split; congruence. constructor. destruct (leb_total x y); split; congruence. Qed. Definition eqb x y := (x <=? y) && (y <=? x). Lemma eqb_eq : forall x y, eqb x y <-> eq x y. Proof. intros. unfold eq, eqb, le. case leb; simpl; intuition; discriminate. Qed. Include HasEqBool2Dec. Instance eq_equiv : Equivalence eq. Proof. split. intros x; unfold eq, le. destruct (leb_total x x); auto. intros x y; unfold eq, le. intuition. intros x y z; unfold eq, le. intuition; apply leb_trans with y; auto. Qed. Instance lt_strorder : StrictOrder lt. Proof. split. intros x. unfold lt; red; intuition. intros x y z; unfold lt, le. intuition. apply leb_trans with y; auto. absurd (z <=? y); auto. apply leb_trans with x; auto. Qed. Instance lt_compat : Proper (eq ==> eq ==> iff) lt. Proof. apply proper_sym_impl_iff_2; auto with *. intros x x' Hx y y' Hy' H. unfold eq, lt, le in *. intuition. apply leb_trans with x; auto. apply leb_trans with y; auto. absurd (y <=? x); auto. apply leb_trans with x'; auto. apply leb_trans with y'; auto. Qed. Definition le_lteq : forall x y, le x y <-> lt x y \/ eq x y. Proof. intros. unfold lt, eq, le. split; [ | intuition ]. intros LE. case_eq (y <=? x); [right|left]; intuition; try discriminate. Qed. End TTLB_to_OTF. coq-8.4pl2/theories/Structures/vo.itarget0000640000175000001440000000032611321377244017646 0ustar notinusersEqualities.vo EqualitiesFacts.vo Orders.vo OrdersEx.vo OrdersFacts.vo OrdersLists.vo OrdersTac.vo OrdersAlt.vo GenericMinMax.vo DecidableType.vo DecidableTypeEx.vo OrderedTypeAlt.vo OrderedTypeEx.vo OrderedType.vo coq-8.4pl2/theories/Structures/OrdersAlt.v0000640000175000001440000001516311776623104017737 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> comparison. Infix "?=" := compare (at level 70, no associativity). Parameter compare_sym : forall x y, (y?=x) = CompOpp (x?=y). Parameter compare_trans : forall c x y z, (x?=y) = c -> (y?=z) = c -> (x?=z) = c. End OrderedTypeAlt. (** ** From OrderedTypeOrig to OrderedType. *) Module Update_OT (O:OrderedTypeOrig) <: OrderedType. Include Update_DT O. (* Provides : t eq eq_equiv eq_dec *) Definition lt := O.lt. Instance lt_strorder : StrictOrder lt. Proof. split. intros x Hx. apply (O.lt_not_eq Hx); auto with *. exact O.lt_trans. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. apply proper_sym_impl_iff_2; auto with *. intros x x' Hx y y' Hy H. assert (H0 : lt x' y). destruct (O.compare x' y) as [H'|H'|H']; auto. elim (O.lt_not_eq H). transitivity x'; auto with *. elim (O.lt_not_eq (O.lt_trans H H')); auto. destruct (O.compare x' y') as [H'|H'|H']; auto. elim (O.lt_not_eq H). transitivity x'; auto with *. transitivity y'; auto with *. elim (O.lt_not_eq (O.lt_trans H' H0)); auto with *. Qed. Definition compare x y := match O.compare x y with | EQ _ => Eq | LT _ => Lt | GT _ => Gt end. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. intros; unfold compare; destruct O.compare; auto. Qed. End Update_OT. (** ** From OrderedType to OrderedTypeOrig. *) Module Backport_OT (O:OrderedType) <: OrderedTypeOrig. Include Backport_DT O. (* Provides : t eq eq_refl eq_sym eq_trans eq_dec *) Definition lt := O.lt. Lemma lt_not_eq : forall x y, lt x y -> ~eq x y. Proof. intros x y L E; rewrite E in L. apply (StrictOrder_Irreflexive y); auto. Qed. Lemma lt_trans : Transitive lt. Proof. apply O.lt_strorder. Qed. Definition compare : forall x y, Compare lt eq x y. Proof. intros x y; destruct (CompSpec2Type (O.compare_spec x y)); [apply EQ|apply LT|apply GT]; auto. Defined. End Backport_OT. (** ** From OrderedTypeAlt to OrderedType. *) Module OT_from_Alt (Import O:OrderedTypeAlt) <: OrderedType. Definition t := t. Definition eq x y := (x?=y) = Eq. Definition lt x y := (x?=y) = Lt. Instance eq_equiv : Equivalence eq. Proof. split; red. (* refl *) unfold eq; intros x. assert (H:=compare_sym x x). destruct (x ?= x); simpl in *; auto; discriminate. (* sym *) unfold eq; intros x y H. rewrite compare_sym, H; simpl; auto. (* trans *) apply compare_trans. Qed. Instance lt_strorder : StrictOrder lt. Proof. split; repeat red; unfold lt; try apply compare_trans. intros x H. assert (eq x x) by reflexivity. unfold eq in *; congruence. Qed. Lemma lt_eq : forall x y z, lt x y -> eq y z -> lt x z. Proof. unfold lt, eq; intros x y z Hxy Hyz. destruct (compare x z) eqn:Hxz; auto. rewrite compare_sym, CompOpp_iff in Hyz. simpl in Hyz. rewrite (compare_trans Hxz Hyz) in Hxy; discriminate. rewrite compare_sym, CompOpp_iff in Hxy. simpl in Hxy. rewrite (compare_trans Hxy Hxz) in Hyz; discriminate. Qed. Lemma eq_lt : forall x y z, eq x y -> lt y z -> lt x z. Proof. unfold lt, eq; intros x y z Hxy Hyz. destruct (compare x z) eqn:Hxz; auto. rewrite compare_sym, CompOpp_iff in Hxy. simpl in Hxy. rewrite (compare_trans Hxy Hxz) in Hyz; discriminate. rewrite compare_sym, CompOpp_iff in Hyz. simpl in Hyz. rewrite (compare_trans Hxz Hyz) in Hxy; discriminate. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. apply proper_sym_impl_iff_2; auto with *. repeat red; intros. eapply lt_eq; eauto. eapply eq_lt; eauto. symmetry; auto. Qed. Definition compare := O.compare. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. unfold eq, lt, compare; intros. destruct (O.compare x y) eqn:H; auto. apply CompGt. rewrite compare_sym, H; auto. Qed. Definition eq_dec : forall x y, { eq x y } + { ~ eq x y }. Proof. intros; unfold eq. case (x ?= y); [ left | right | right ]; auto; discriminate. Defined. End OT_from_Alt. (** From the original presentation to this alternative one. *) Module OT_to_Alt (Import O:OrderedType) <: OrderedTypeAlt. Definition t := t. Definition compare := compare. Infix "?=" := compare (at level 70, no associativity). Lemma compare_sym : forall x y, (y?=x) = CompOpp (x?=y). Proof. intros x y; unfold compare. destruct (compare_spec x y) as [U|U|U]; destruct (compare_spec y x) as [V|V|V]; auto. rewrite U in V. elim (StrictOrder_Irreflexive y); auto. rewrite U in V. elim (StrictOrder_Irreflexive y); auto. rewrite V in U. elim (StrictOrder_Irreflexive x); auto. rewrite V in U. elim (StrictOrder_Irreflexive x); auto. rewrite V in U. elim (StrictOrder_Irreflexive x); auto. rewrite V in U. elim (StrictOrder_Irreflexive y); auto. Qed. Lemma compare_Eq : forall x y, compare x y = Eq <-> eq x y. Proof. unfold compare. intros x y; destruct (compare_spec x y); intuition; try discriminate. rewrite H0 in H. elim (StrictOrder_Irreflexive y); auto. rewrite H0 in H. elim (StrictOrder_Irreflexive y); auto. Qed. Lemma compare_Lt : forall x y, compare x y = Lt <-> lt x y. Proof. unfold compare. intros x y; destruct (compare_spec x y); intuition; try discriminate. rewrite H in H0. elim (StrictOrder_Irreflexive y); auto. rewrite H in H0. elim (StrictOrder_Irreflexive x); auto. Qed. Lemma compare_Gt : forall x y, compare x y = Gt <-> lt y x. Proof. intros x y. rewrite compare_sym, CompOpp_iff. apply compare_Lt. Qed. Lemma compare_trans : forall c x y z, (x?=y) = c -> (y?=z) = c -> (x?=z) = c. Proof. intros c x y z. destruct c; unfold compare; rewrite ?compare_Eq, ?compare_Lt, ?compare_Gt; transitivity y; auto. Qed. End OT_to_Alt. coq-8.4pl2/theories/Structures/OrdersFacts.v0000640000175000001440000003071111600205267020243 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* y==x := Equivalence_Symmetric x y. Definition eq_trans (x y z:t) : x==y -> y==z -> x==z := Equivalence_Transitive x y z. Definition lt_trans (x y z:t) : x y x b | _ => b' end). Proof. intros; destruct eq_dec; elim_compare x y; auto; order. Qed. Lemma eqb_alt : forall x y, eqb x y = match compare x y with Eq => true | _ => false end. Proof. unfold eqb; intros; apply if_eq_dec. Qed. Instance eqb_compat : Proper (eq==>eq==>Logic.eq) eqb. Proof. intros x x' Hxx' y y' Hyy'. rewrite 2 eqb_alt, Hxx', Hyy'; auto. Qed. End OrderedTypeFacts. (** * Tests of the order tactic Is it at least capable of proving some basic properties ? *) Module OrderedTypeTest (Import O:OrderedType'). Module Import MO := OrderedTypeFacts O. Local Open Scope order. Lemma lt_not_eq x y : x ~x==y. Proof. order. Qed. Lemma lt_eq x y z : x y==z -> x y x y==z -> x<=z. Proof. order. Qed. Lemma eq_le x y z : x==y -> y<=z -> x<=z. Proof. order. Qed. Lemma neq_eq x y z : ~x==y -> y==z -> ~x==z. Proof. order. Qed. Lemma eq_neq x y z : x==y -> ~y==z -> ~x==z. Proof. order. Qed. Lemma le_lt_trans x y z : x<=y -> y x y<=z -> x y<=z -> x<=z. Proof. order. Qed. Lemma le_antisym x y : x<=y -> y<=x -> x==y. Proof. order. Qed. Lemma le_neq x y : x<=y -> ~x==y -> x ~y==x. Proof. order. Qed. Lemma lt_le x y : x x<=y. Proof. order. Qed. Lemma gt_not_eq x y : y ~x==y. Proof. order. Qed. Lemma eq_not_lt x y : x==y -> ~x ~ y ~ y ~xeq==>iff) lt. Proof. unfold lt; auto with *. Qed. Lemma le_lteq : forall x y, le x y <-> lt x y \/ eq x y. Proof. intros; unfold le, lt, flip. rewrite O.le_lteq; intuition. Qed. Definition compare := flip O.compare. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. intros; unfold compare, eq, lt, flip. destruct (O.compare_spec y x); auto with relations. Qed. End OrderedTypeRev. Unset Implicit Arguments. (** * Order relations derived from a [compare] function. We factorize here some common properties for ZArith, NArith and co, where [lt] and [le] are defined in terms of [compare]. Note that we do not require anything here concerning compatibility of [compare] w.r.t [eq], nor anything concerning transitivity. *) Module Type CompareBasedOrder (Import E:EqLtLe')(Import C:HasCmp E). Include CmpNotation E C. Include IsEq E. Axiom compare_eq_iff : forall x y, (x ?= y) = Eq <-> x == y. Axiom compare_lt_iff : forall x y, (x ?= y) = Lt <-> x < y. Axiom compare_le_iff : forall x y, (x ?= y) <> Gt <-> x <= y. Axiom compare_antisym : forall x y, (y ?= x) = CompOpp (x ?= y). End CompareBasedOrder. Module Type CompareBasedOrderFacts (Import E:EqLtLe') (Import C:HasCmp E) (Import O:CompareBasedOrder E C). Lemma compare_spec x y : CompareSpec (x==y) (x x==y. Proof. apply compare_eq_iff. Qed. Lemma compare_refl x : (x ?= x) = Eq. Proof. now apply compare_eq_iff. Qed. Lemma compare_gt_iff x y : (x ?= y) = Gt <-> y Lt <-> y<=x. Proof. now rewrite <- compare_le_iff, compare_antisym, CompOpp_iff. Qed. Lemma compare_ngt_iff x y : (x ?= y) <> Gt <-> ~(y Lt <-> ~(x ~(x<=y). Proof. rewrite <- compare_le_iff. destruct compare; split; easy || now destruct 1. Qed. Lemma compare_nge_iff x y : (x ?= y) = Lt <-> ~(y<=x). Proof. now rewrite <- compare_nle_iff, compare_antisym, CompOpp_iff. Qed. Lemma lt_irrefl x : ~ (x n < m \/ n==m. Proof. rewrite <- compare_lt_iff, <- compare_le_iff, <- compare_eq_iff. destruct (n ?= m); now intuition. Qed. End CompareBasedOrderFacts. (** Basic facts about boolean comparisons *) Module Type BoolOrderFacts (Import E:EqLtLe') (Import C:HasCmp E) (Import F:HasBoolOrdFuns' E) (Import O:CompareBasedOrder E C) (Import S:BoolOrdSpecs E F). Include CompareBasedOrderFacts E C O. (** Nota : apart from [eqb_compare] below, facts about [eqb] are in BoolEqualityFacts *) (** Alternate specifications based on [BoolSpec] and [reflect] *) Lemma leb_spec0 x y : reflect (x<=y) (x<=?y). Proof. apply iff_reflect. symmetry. apply leb_le. Defined. Lemma leb_spec x y : BoolSpec (x<=y) (y ~ (x <= y). Proof. now rewrite <- not_true_iff_false, leb_le. Qed. Lemma leb_gt x y : x <=? y = false <-> y < x. Proof. now rewrite leb_nle, <- compare_lt_iff, compare_nge_iff. Qed. Lemma ltb_nlt x y : x ~ (x < y). Proof. now rewrite <- not_true_iff_false, ltb_lt. Qed. Lemma ltb_ge x y : x y <= x. Proof. now rewrite ltb_nlt, <- compare_le_iff, compare_ngt_iff. Qed. (** Basic equality laws for boolean tests *) Lemma leb_refl x : x <=? x = true. Proof. apply leb_le. apply lt_eq_cases. now right. Qed. Lemma leb_antisym x y : y <=? x = negb (x true | _ => false end. Proof. apply eq_true_iff_eq. rewrite eqb_eq, <- compare_eq_iff. destruct compare; now split. Qed. Lemma ltb_compare x y : (x true | _ => false end. Proof. apply eq_true_iff_eq. rewrite ltb_lt, <- compare_lt_iff. destruct compare; now split. Qed. Lemma leb_compare x y : (x <=? y) = match compare x y with Gt => false | _ => true end. Proof. apply eq_true_iff_eq. rewrite leb_le, <- compare_le_iff. destruct compare; split; try easy. now destruct 1. Qed. End BoolOrderFacts. coq-8.4pl2/theories/Structures/OrderedTypeEx.v0000640000175000001440000002050011776416511020554 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> Prop. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Axiom lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Axiom lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Parameter compare : forall x y : t, Compare lt eq x y. Parameter eq_dec : forall x y : t, { eq x y } + { ~ eq x y }. End UsualOrderedType. (** a [UsualOrderedType] is in particular an [OrderedType]. *) Module UOT_to_OT (U:UsualOrderedType) <: OrderedType := U. (** [nat] is an ordered type with respect to the usual order on natural numbers. *) Module Nat_as_OT <: UsualOrderedType. Definition t := nat. Definition eq := @eq nat. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition lt := lt. Lemma lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Proof. unfold lt; intros; apply lt_trans with y; auto. Qed. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Proof. unfold lt, eq; intros; omega. Qed. Definition compare x y : Compare lt eq x y. Proof. case_eq (nat_compare x y); intro. - apply EQ. now apply nat_compare_eq. - apply LT. now apply nat_compare_Lt_lt. - apply GT. now apply nat_compare_Gt_gt. Defined. Definition eq_dec := eq_nat_dec. End Nat_as_OT. (** [Z] is an ordered type with respect to the usual order on integers. *) Local Open Scope Z_scope. Module Z_as_OT <: UsualOrderedType. Definition t := Z. Definition eq := @eq Z. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition lt (x y:Z) := (x y x ~ x=y. Proof. intros; omega. Qed. Definition compare x y : Compare lt eq x y. Proof. case_eq (x ?= y); intro. - apply EQ. now apply Z.compare_eq. - apply LT. assumption. - apply GT. now apply Z.gt_lt. Defined. Definition eq_dec := Z.eq_dec. End Z_as_OT. (** [positive] is an ordered type with respect to the usual order on natural numbers. *) Local Open Scope positive_scope. Module Positive_as_OT <: UsualOrderedType. Definition t:=positive. Definition eq:=@eq positive. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition lt := Pos.lt. Definition lt_trans := Pos.lt_trans. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Proof. intros x y H. contradict H. rewrite H. apply Pos.lt_irrefl. Qed. Definition compare x y : Compare lt eq x y. Proof. case_eq (x ?= y); intros H. - apply EQ. now apply Pos.compare_eq. - apply LT; assumption. - apply GT. now apply Pos.gt_lt. Defined. Definition eq_dec := Pos.eq_dec. End Positive_as_OT. (** [N] is an ordered type with respect to the usual order on natural numbers. *) Module N_as_OT <: UsualOrderedType. Definition t:=N. Definition eq:=@eq N. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Definition lt := N.lt. Definition lt_trans := N.lt_trans. Definition lt_not_eq := N.lt_neq. Definition compare x y : Compare lt eq x y. Proof. case_eq (x ?= y)%N; intro. - apply EQ. now apply N.compare_eq. - apply LT. assumption. - apply GT. now apply N.gt_lt. Defined. Definition eq_dec := N.eq_dec. End N_as_OT. (** From two ordered types, we can build a new OrderedType over their cartesian product, using the lexicographic order. *) Module PairOrderedType(O1 O2:OrderedType) <: OrderedType. Module MO1:=OrderedTypeFacts(O1). Module MO2:=OrderedTypeFacts(O2). Definition t := prod O1.t O2.t. Definition eq x y := O1.eq (fst x) (fst y) /\ O2.eq (snd x) (snd y). Definition lt x y := O1.lt (fst x) (fst y) \/ (O1.eq (fst x) (fst y) /\ O2.lt (snd x) (snd y)). Lemma eq_refl : forall x : t, eq x x. Proof. intros (x1,x2); red; simpl; auto. Qed. Lemma eq_sym : forall x y : t, eq x y -> eq y x. Proof. intros (x1,x2) (y1,y2); unfold eq; simpl; intuition. Qed. Lemma eq_trans : forall x y z : t, eq x y -> eq y z -> eq x z. Proof. intros (x1,x2) (y1,y2) (z1,z2); unfold eq; simpl; intuition eauto. Qed. Lemma lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Proof. intros (x1,x2) (y1,y2) (z1,z2); unfold eq, lt; simpl; intuition. left; eauto. left; eapply MO1.lt_eq; eauto. left; eapply MO1.eq_lt; eauto. right; split; eauto. Qed. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Proof. intros (x1,x2) (y1,y2); unfold eq, lt; simpl; intuition. apply (O1.lt_not_eq H0 H1). apply (O2.lt_not_eq H3 H2). Qed. Definition compare : forall x y : t, Compare lt eq x y. intros (x1,x2) (y1,y2). destruct (O1.compare x1 y1). apply LT; unfold lt; auto. destruct (O2.compare x2 y2). apply LT; unfold lt; auto. apply EQ; unfold eq; auto. apply GT; unfold lt; auto. apply GT; unfold lt; auto. Defined. Definition eq_dec : forall x y : t, {eq x y} + {~ eq x y}. Proof. intros; elim (compare x y); intro H; [ right | left | right ]; auto. auto using lt_not_eq. assert (~ eq y x); auto using lt_not_eq, eq_sym. Defined. End PairOrderedType. (** Even if [positive] can be seen as an ordered type with respect to the usual order (see above), we can also use a lexicographic order over bits (lower bits are considered first). This is more natural when using [positive] as indexes for sets or maps (see FSetPositive and FMapPositive. *) Module PositiveOrderedTypeBits <: UsualOrderedType. Definition t:=positive. Definition eq:=@eq positive. Definition eq_refl := @eq_refl t. Definition eq_sym := @eq_sym t. Definition eq_trans := @eq_trans t. Fixpoint bits_lt (p q:positive) : Prop := match p, q with | xH, xI _ => True | xH, _ => False | xO p, xO q => bits_lt p q | xO _, _ => True | xI p, xI q => bits_lt p q | xI _, _ => False end. Definition lt:=bits_lt. Lemma bits_lt_trans : forall x y z : positive, bits_lt x y -> bits_lt y z -> bits_lt x z. Proof. induction x. induction y; destruct z; simpl; eauto; intuition. induction y; destruct z; simpl; eauto; intuition. induction y; destruct z; simpl; eauto; intuition. Qed. Lemma lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Proof. exact bits_lt_trans. Qed. Lemma bits_lt_antirefl : forall x : positive, ~ bits_lt x x. Proof. induction x; simpl; auto. Qed. Lemma lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Proof. intros; intro. rewrite <- H0 in H; clear H0 y. unfold lt in H. exact (bits_lt_antirefl x H). Qed. Definition compare : forall x y : t, Compare lt eq x y. Proof. induction x; destruct y. - (* I I *) destruct (IHx y). apply LT; auto. apply EQ; rewrite e; red; auto. apply GT; auto. - (* I O *) apply GT; simpl; auto. - (* I H *) apply GT; simpl; auto. - (* O I *) apply LT; simpl; auto. - (* O O *) destruct (IHx y). apply LT; auto. apply EQ; rewrite e; red; auto. apply GT; auto. - (* O H *) apply LT; simpl; auto. - (* H I *) apply LT; simpl; auto. - (* H O *) apply GT; simpl; auto. - (* H H *) apply EQ; red; auto. Qed. Lemma eq_dec (x y: positive): {x = y} + {x <> y}. Proof. intros. case_eq (x ?= y); intros. - left. now apply Pos.compare_eq. - right. intro. subst y. now rewrite (Pos.compare_refl x) in *. - right. intro. subst y. now rewrite (Pos.compare_refl x) in *. Qed. End PositiveOrderedTypeBits. coq-8.4pl2/theories/Structures/Equalities.v0000640000175000001440000001774711600205267020147 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> Prop. End HasEq. Module Type Eq := Typ <+ HasEq. Module Type EqNotation (Import E:Eq). Infix "==" := eq (at level 70, no associativity). Notation "x ~= y" := (~eq x y) (at level 70, no associativity). End EqNotation. Module Type Eq' := Eq <+ EqNotation. (** * Specification of the equality via the [Equivalence] type class *) Module Type IsEq (Import E:Eq). Declare Instance eq_equiv : Equivalence eq. End IsEq. (** * Earlier specification of equality by three separate lemmas. *) Module Type IsEqOrig (Import E:Eq'). Axiom eq_refl : forall x : t, x==x. Axiom eq_sym : forall x y : t, x==y -> y==x. Axiom eq_trans : forall x y z : t, x==y -> y==z -> x==z. Hint Immediate eq_sym. Hint Resolve eq_refl eq_trans. End IsEqOrig. (** * Types with decidable equality *) Module Type HasEqDec (Import E:Eq'). Parameter eq_dec : forall x y : t, { x==y } + { ~ x==y }. End HasEqDec. (** * Boolean Equality *) (** Having [eq_dec] is the same as having a boolean equality plus a correctness proof. *) Module Type HasEqb (Import T:Typ). Parameter Inline eqb : t -> t -> bool. End HasEqb. Module Type EqbSpec (T:Typ)(X:HasEq T)(Y:HasEqb T). Parameter eqb_eq : forall x y, Y.eqb x y = true <-> X.eq x y. End EqbSpec. Module Type EqbNotation (T:Typ)(E:HasEqb T). Infix "=?" := E.eqb (at level 70, no associativity). End EqbNotation. Module Type HasEqBool (E:Eq) := HasEqb E <+ EqbSpec E E. (** From these basic blocks, we can build many combinations of static standalone module types. *) Module Type EqualityType := Eq <+ IsEq. Module Type EqualityTypeOrig := Eq <+ IsEqOrig. Module Type EqualityTypeBoth <: EqualityType <: EqualityTypeOrig := Eq <+ IsEq <+ IsEqOrig. Module Type DecidableType <: EqualityType := Eq <+ IsEq <+ HasEqDec. Module Type DecidableTypeOrig <: EqualityTypeOrig := Eq <+ IsEqOrig <+ HasEqDec. Module Type DecidableTypeBoth <: DecidableType <: DecidableTypeOrig := EqualityTypeBoth <+ HasEqDec. Module Type BooleanEqualityType <: EqualityType := Eq <+ IsEq <+ HasEqBool. Module Type BooleanDecidableType <: DecidableType <: BooleanEqualityType := Eq <+ IsEq <+ HasEqDec <+ HasEqBool. Module Type DecidableTypeFull <: DecidableTypeBoth <: BooleanDecidableType := Eq <+ IsEq <+ IsEqOrig <+ HasEqDec <+ HasEqBool. (** Same, with notation for [eq] *) Module Type EqualityType' := EqualityType <+ EqNotation. Module Type EqualityTypeOrig' := EqualityTypeOrig <+ EqNotation. Module Type EqualityTypeBoth' := EqualityTypeBoth <+ EqNotation. Module Type DecidableType' := DecidableType <+ EqNotation. Module Type DecidableTypeOrig' := DecidableTypeOrig <+ EqNotation. Module Type DecidableTypeBoth' := DecidableTypeBoth <+ EqNotation. Module Type BooleanEqualityType' := BooleanEqualityType <+ EqNotation <+ EqbNotation. Module Type BooleanDecidableType' := BooleanDecidableType <+ EqNotation <+ EqbNotation. Module Type DecidableTypeFull' := DecidableTypeFull <+ EqNotation. (** * Compatibility wrapper from/to the old version of [EqualityType] and [DecidableType] *) Module BackportEq (E:Eq)(F:IsEq E) <: IsEqOrig E. Definition eq_refl := @Equivalence_Reflexive _ _ F.eq_equiv. Definition eq_sym := @Equivalence_Symmetric _ _ F.eq_equiv. Definition eq_trans := @Equivalence_Transitive _ _ F.eq_equiv. End BackportEq. Module UpdateEq (E:Eq)(F:IsEqOrig E) <: IsEq E. Instance eq_equiv : Equivalence E.eq. Proof. exact (Build_Equivalence _ _ F.eq_refl F.eq_sym F.eq_trans). Qed. End UpdateEq. Module Backport_ET (E:EqualityType) <: EqualityTypeBoth := E <+ BackportEq. Module Update_ET (E:EqualityTypeOrig) <: EqualityTypeBoth := E <+ UpdateEq. Module Backport_DT (E:DecidableType) <: DecidableTypeBoth := E <+ BackportEq. Module Update_DT (E:DecidableTypeOrig) <: DecidableTypeBoth := E <+ UpdateEq. (** * Having [eq_dec] is equivalent to having [eqb] and its spec. *) Module HasEqDec2Bool (E:Eq)(F:HasEqDec E) <: HasEqBool E. Definition eqb x y := if F.eq_dec x y then true else false. Lemma eqb_eq : forall x y, eqb x y = true <-> E.eq x y. Proof. intros x y. unfold eqb. destruct F.eq_dec as [EQ|NEQ]. auto with *. split. discriminate. intro EQ; elim NEQ; auto. Qed. End HasEqDec2Bool. Module HasEqBool2Dec (E:Eq)(F:HasEqBool E) <: HasEqDec E. Lemma eq_dec : forall x y, {E.eq x y}+{~E.eq x y}. Proof. intros x y. assert (H:=F.eqb_eq x y). destruct (F.eqb x y); [left|right]. apply -> H; auto. intro EQ. apply H in EQ. discriminate. Defined. End HasEqBool2Dec. Module Dec2Bool (E:DecidableType) <: BooleanDecidableType := E <+ HasEqDec2Bool. Module Bool2Dec (E:BooleanEqualityType) <: BooleanDecidableType := E <+ HasEqBool2Dec. (** Some properties of boolean equality *) Module BoolEqualityFacts (Import E : BooleanEqualityType'). (** [eqb] is compatible with [eq] *) Instance eqb_compat : Proper (E.eq ==> E.eq ==> Logic.eq) eqb. Proof. intros x x' Exx' y y' Eyy'. apply eq_true_iff_eq. now rewrite 2 eqb_eq, Exx', Eyy'. Qed. (** Alternative specification of [eqb] based on [reflect]. *) Lemma eqb_spec x y : reflect (x==y) (x =? y). Proof. apply iff_reflect. symmetry. apply eqb_eq. Defined. (** Negated form of [eqb_eq] *) Lemma eqb_neq x y : (x =? y) = false <-> x ~= y. Proof. now rewrite <- not_true_iff_false, eqb_eq. Qed. (** Basic equality laws for [eqb] *) Lemma eqb_refl x : (x =? x) = true. Proof. now apply eqb_eq. Qed. Lemma eqb_sym x y : (x =? y) = (y =? x). Proof. apply eq_true_iff_eq. now rewrite 2 eqb_eq. Qed. (** Transitivity is a particular case of [eqb_compat] *) End BoolEqualityFacts. (** * UsualDecidableType A particular case of [DecidableType] where the equality is the usual one of Coq. *) Module Type HasUsualEq (Import T:Typ) <: HasEq T. Definition eq := @Logic.eq t. End HasUsualEq. Module Type UsualEq <: Eq := Typ <+ HasUsualEq. Module Type UsualIsEq (E:UsualEq) <: IsEq E. (* No Instance syntax to avoid saturating the Equivalence tables *) Definition eq_equiv : Equivalence E.eq := eq_equivalence. End UsualIsEq. Module Type UsualIsEqOrig (E:UsualEq) <: IsEqOrig E. Definition eq_refl := @Logic.eq_refl E.t. Definition eq_sym := @Logic.eq_sym E.t. Definition eq_trans := @Logic.eq_trans E.t. End UsualIsEqOrig. Module Type UsualEqualityType <: EqualityType := UsualEq <+ UsualIsEq. Module Type UsualDecidableType <: DecidableType := UsualEq <+ UsualIsEq <+ HasEqDec. Module Type UsualDecidableTypeOrig <: DecidableTypeOrig := UsualEq <+ UsualIsEqOrig <+ HasEqDec. Module Type UsualDecidableTypeBoth <: DecidableTypeBoth := UsualEq <+ UsualIsEq <+ UsualIsEqOrig <+ HasEqDec. Module Type UsualBoolEq := UsualEq <+ HasEqBool. Module Type UsualDecidableTypeFull <: DecidableTypeFull := UsualEq <+ UsualIsEq <+ UsualIsEqOrig <+ HasEqDec <+ HasEqBool. (** Some shortcuts for easily building a [UsualDecidableType] *) Module Type MiniDecidableType. Include Typ. Parameter eq_dec : forall x y : t, {x=y}+{~x=y}. End MiniDecidableType. Module Make_UDT (M:MiniDecidableType) <: UsualDecidableTypeBoth := M <+ HasUsualEq <+ UsualIsEq <+ UsualIsEqOrig. Module Make_UDTF (M:UsualBoolEq) <: UsualDecidableTypeFull := M <+ UsualIsEq <+ UsualIsEqOrig <+ HasEqBool2Dec. coq-8.4pl2/theories/Structures/EqualitiesFacts.v0000640000175000001440000001122511537153605021120 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* InA eqk x m. Proof. unfold eqke, RelProd; induction 1; firstorder. Qed. Hint Resolve InA_eqke_eqk. Lemma InA_eqk : forall p q m, eqk p q -> InA eqk p m -> InA eqk q m. Proof. intros. rewrite <- H; auto. Qed. Definition MapsTo (k:key)(e:elt):= InA eqke (k,e). Definition In k m := exists e:elt, MapsTo k e m. Hint Unfold MapsTo In. (* An alternative formulation for [In k l] is [exists e, InA eqk (k,e) l] *) Lemma In_alt : forall k l, In k l <-> exists e, InA eqk (k,e) l. Proof. firstorder. exists x; auto. induction H. destruct y; compute in H. exists e; left; auto. destruct IHInA as [e H0]. exists e; auto. Qed. Lemma In_alt2 : forall k l, In k l <-> Exists (fun p => eq k (fst p)) l. Proof. unfold In, MapsTo. setoid_rewrite Exists_exists; setoid_rewrite InA_alt. firstorder. exists (snd x), x; auto. Qed. Lemma In_nil : forall k, In k nil <-> False. Proof. intros; rewrite In_alt2; apply Exists_nil. Qed. Lemma In_cons : forall k p l, In k (p::l) <-> eq k (fst p) \/ In k l. Proof. intros; rewrite !In_alt2, Exists_cons; intuition. Qed. Global Instance MapsTo_compat : Proper (eq==>Logic.eq==>equivlistA eqke==>iff) MapsTo. Proof. intros x x' Hx e e' He l l' Hl. unfold MapsTo. rewrite Hx, He, Hl; intuition. Qed. Global Instance In_compat : Proper (eq==>equivlistA eqk==>iff) In. Proof. intros x x' Hx l l' Hl. rewrite !In_alt. setoid_rewrite Hl. setoid_rewrite Hx. intuition. Qed. Lemma MapsTo_eq : forall l x y e, eq x y -> MapsTo x e l -> MapsTo y e l. Proof. intros l x y e EQ. rewrite <- EQ; auto. Qed. Lemma In_eq : forall l x y, eq x y -> In x l -> In y l. Proof. intros l x y EQ. rewrite <- EQ; auto. Qed. Lemma In_inv : forall k k' e l, In k ((k',e) :: l) -> eq k k' \/ In k l. Proof. intros; invlist In; invlist MapsTo. compute in * |- ; intuition. right; exists x; auto. Qed. Lemma In_inv_2 : forall k k' e e' l, InA eqk (k, e) ((k', e') :: l) -> ~ eq k k' -> InA eqk (k, e) l. Proof. intros; invlist InA; intuition. Qed. Lemma In_inv_3 : forall x x' l, InA eqke x (x' :: l) -> ~ eqk x x' -> InA eqke x l. Proof. intros; invlist InA; compute in * |- ; intuition. Qed. End Elt. Hint Unfold eqk eqke. Hint Extern 2 (eqke ?a ?b) => split. Hint Resolve InA_eqke_eqk. Hint Unfold MapsTo In. Hint Resolve In_inv_2 In_inv_3. End KeyDecidableType. (** * PairDecidableType From two decidable types, we can build a new DecidableType over their cartesian product. *) Module PairDecidableType(D1 D2:DecidableType) <: DecidableType. Definition t := (D1.t * D2.t)%type. Definition eq := (D1.eq * D2.eq)%signature. Instance eq_equiv : Equivalence eq := _. Definition eq_dec : forall x y, { eq x y }+{ ~eq x y }. Proof. intros (x1,x2) (y1,y2); unfold eq; simpl. destruct (D1.eq_dec x1 y1); destruct (D2.eq_dec x2 y2); compute; intuition. Defined. End PairDecidableType. (** Similarly for pairs of UsualDecidableType *) Module PairUsualDecidableType(D1 D2:UsualDecidableType) <: UsualDecidableType. Definition t := (D1.t * D2.t)%type. Definition eq := @eq t. Instance eq_equiv : Equivalence eq := _. Definition eq_dec : forall x y, { eq x y }+{ ~eq x y }. Proof. intros (x1,x2) (y1,y2); destruct (D1.eq_dec x1 y1); destruct (D2.eq_dec x2 y2); unfold eq, D1.eq, D2.eq in *; simpl; (left; f_equal; auto; fail) || (right; intro H; injection H; auto). Defined. End PairUsualDecidableType. coq-8.4pl2/theories/Structures/GenericMinMax.v0000640000175000001440000004456312064143020020515 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* t -> t. Parameter max_l : forall x y, y<=x -> max x y == x. Parameter max_r : forall x y, x<=y -> max x y == y. End HasMax. Module Type HasMin (Import E:EqLe'). Parameter Inline min : t -> t -> t. Parameter min_l : forall x y, x<=y -> min x y == x. Parameter min_r : forall x y, y<=x -> min x y == y. End HasMin. Module Type HasMinMax (E:EqLe) := HasMax E <+ HasMin E. (** ** Any [OrderedTypeFull] can be equipped by [max] and [min] based on the compare function. *) Definition gmax {A} (cmp : A->A->comparison) x y := match cmp x y with Lt => y | _ => x end. Definition gmin {A} (cmp : A->A->comparison) x y := match cmp x y with Gt => y | _ => x end. Module GenericMinMax (Import O:OrderedTypeFull') <: HasMinMax O. Definition max := gmax O.compare. Definition min := gmin O.compare. Lemma ge_not_lt x y : y<=x -> x False. Proof. intros H H'. apply (StrictOrder_Irreflexive x). rewrite le_lteq in *; destruct H as [H|H]. transitivity y; auto. rewrite H in H'; auto. Qed. Lemma max_l x y : y<=x -> max x y == x. Proof. intros. unfold max, gmax. case compare_spec; auto with relations. intros; elim (ge_not_lt x y); auto. Qed. Lemma max_r x y : x<=y -> max x y == y. Proof. intros. unfold max, gmax. case compare_spec; auto with relations. intros; elim (ge_not_lt y x); auto. Qed. Lemma min_l x y : x<=y -> min x y == x. Proof. intros. unfold min, gmin. case compare_spec; auto with relations. intros; elim (ge_not_lt y x); auto. Qed. Lemma min_r x y : y<=x -> min x y == y. Proof. intros. unfold min, gmin. case compare_spec; auto with relations. intros; elim (ge_not_lt x y); auto. Qed. End GenericMinMax. (** ** Consequences of the minimalist interface: facts about [max] and [min]. *) Module MinMaxLogicalProperties (Import O:TotalOrder')(Import M:HasMinMax O). Module Import Private_Tac := !MakeOrderTac O O. (** An alternative caracterisation of [max], equivalent to [max_l /\ max_r] *) Lemma max_spec n m : (n < m /\ max n m == m) \/ (m <= n /\ max n m == n). Proof. destruct (lt_total n m); [left|right]. - split; auto. apply max_r. rewrite le_lteq; auto. - assert (m <= n) by (rewrite le_lteq; intuition). split; auto. now apply max_l. Qed. (** A more symmetric version of [max_spec], based only on [le]. Beware that left and right alternatives overlap. *) Lemma max_spec_le n m : (n <= m /\ max n m == m) \/ (m <= n /\ max n m == n). Proof. destruct (max_spec n m); [left|right]; intuition; order. Qed. Instance : Proper (eq==>eq==>iff) le. Proof. repeat red. intuition order. Qed. Instance max_compat : Proper (eq==>eq==>eq) max. Proof. intros x x' Hx y y' Hy. assert (H1 := max_spec x y). assert (H2 := max_spec x' y'). set (m := max x y) in *; set (m' := max x' y') in *; clearbody m m'. rewrite <- Hx, <- Hy in *. destruct (lt_total x y); intuition order. Qed. (** A function satisfying the same specification is equal to [max]. *) Lemma max_unicity n m p : ((n < m /\ p == m) \/ (m <= n /\ p == n)) -> p == max n m. Proof. assert (Hm := max_spec n m). destruct (lt_total n m); intuition; order. Qed. Lemma max_unicity_ext f : (forall n m, (n < m /\ f n m == m) \/ (m <= n /\ f n m == n)) -> (forall n m, f n m == max n m). Proof. intros. apply max_unicity; auto. Qed. (** [max] commutes with monotone functions. *) Lemma max_mono f : (Proper (eq ==> eq) f) -> (Proper (le ==> le) f) -> forall x y, max (f x) (f y) == f (max x y). Proof. intros Eqf Lef x y. destruct (max_spec x y) as [(H,E)|(H,E)]; rewrite E; destruct (max_spec (f x) (f y)) as [(H',E')|(H',E')]; auto. assert (f x <= f y) by (apply Lef; order). order. assert (f y <= f x) by (apply Lef; order). order. Qed. (** *** Semi-lattice algebraic properties of [max] *) Lemma max_id n : max n n == n. Proof. apply max_l; order. Qed. Notation max_idempotent := max_id (only parsing). Lemma max_assoc m n p : max m (max n p) == max (max m n) p. Proof. destruct (max_spec n p) as [(H,E)|(H,E)]; rewrite E; destruct (max_spec m n) as [(H',E')|(H',E')]; rewrite E', ?E; try easy. - apply max_r; order. - symmetry. apply max_l; order. Qed. Lemma max_comm n m : max n m == max m n. Proof. destruct (max_spec m n) as [(H,E)|(H,E)]; rewrite E; (apply max_r || apply max_l); order. Qed. Ltac solve_max := match goal with |- context [max ?n ?m] => destruct (max_spec n m); intuition; order end. (** *** Least-upper bound properties of [max] *) Lemma le_max_l n m : n <= max n m. Proof. solve_max. Qed. Lemma le_max_r n m : m <= max n m. Proof. solve_max. Qed. Lemma max_l_iff n m : max n m == n <-> m <= n. Proof. solve_max. Qed. Lemma max_r_iff n m : max n m == m <-> n <= m. Proof. solve_max. Qed. Lemma max_le n m p : p <= max n m -> p <= n \/ p <= m. Proof. destruct (max_spec n m); [right|left]; intuition; order. Qed. Lemma max_le_iff n m p : p <= max n m <-> p <= n \/ p <= m. Proof. split. apply max_le. solve_max. Qed. Lemma max_lt_iff n m p : p < max n m <-> p < n \/ p < m. Proof. destruct (max_spec n m); intuition; order || (right; order) || (left; order). Qed. Lemma max_lub_l n m p : max n m <= p -> n <= p. Proof. solve_max. Qed. Lemma max_lub_r n m p : max n m <= p -> m <= p. Proof. solve_max. Qed. Lemma max_lub n m p : n <= p -> m <= p -> max n m <= p. Proof. solve_max. Qed. Lemma max_lub_iff n m p : max n m <= p <-> n <= p /\ m <= p. Proof. solve_max. Qed. Lemma max_lub_lt n m p : n < p -> m < p -> max n m < p. Proof. solve_max. Qed. Lemma max_lub_lt_iff n m p : max n m < p <-> n < p /\ m < p. Proof. solve_max. Qed. Lemma max_le_compat_l n m p : n <= m -> max p n <= max p m. Proof. intros. apply max_lub_iff. solve_max. Qed. Lemma max_le_compat_r n m p : n <= m -> max n p <= max m p. Proof. intros. apply max_lub_iff. solve_max. Qed. Lemma max_le_compat n m p q : n <= m -> p <= q -> max n p <= max m q. Proof. intros Hnm Hpq. assert (LE := max_le_compat_l _ _ m Hpq). assert (LE' := max_le_compat_r _ _ p Hnm). order. Qed. (** Properties of [min] *) Lemma min_spec n m : (n < m /\ min n m == n) \/ (m <= n /\ min n m == m). Proof. destruct (lt_total n m); [left|right]. - split; auto. apply min_l. rewrite le_lteq; auto. - assert (m <= n) by (rewrite le_lteq; intuition). split; auto. now apply min_r. Qed. Lemma min_spec_le n m : (n <= m /\ min n m == n) \/ (m <= n /\ min n m == m). Proof. destruct (min_spec n m); [left|right]; intuition; order. Qed. Instance min_compat : Proper (eq==>eq==>eq) min. Proof. intros x x' Hx y y' Hy. assert (H1 := min_spec x y). assert (H2 := min_spec x' y'). set (m := min x y) in *; set (m' := min x' y') in *; clearbody m m'. rewrite <- Hx, <- Hy in *. destruct (lt_total x y); intuition order. Qed. Lemma min_unicity n m p : ((n < m /\ p == n) \/ (m <= n /\ p == m)) -> p == min n m. Proof. assert (Hm := min_spec n m). destruct (lt_total n m); intuition; order. Qed. Lemma min_unicity_ext f : (forall n m, (n < m /\ f n m == n) \/ (m <= n /\ f n m == m)) -> (forall n m, f n m == min n m). Proof. intros. apply min_unicity; auto. Qed. Lemma min_mono f : (Proper (eq ==> eq) f) -> (Proper (le ==> le) f) -> forall x y, min (f x) (f y) == f (min x y). Proof. intros Eqf Lef x y. destruct (min_spec x y) as [(H,E)|(H,E)]; rewrite E; destruct (min_spec (f x) (f y)) as [(H',E')|(H',E')]; auto. assert (f x <= f y) by (apply Lef; order). order. assert (f y <= f x) by (apply Lef; order). order. Qed. Lemma min_id n : min n n == n. Proof. apply min_l; order. Qed. Notation min_idempotent := min_id (only parsing). Lemma min_assoc m n p : min m (min n p) == min (min m n) p. Proof. destruct (min_spec n p) as [(H,E)|(H,E)]; rewrite E; destruct (min_spec m n) as [(H',E')|(H',E')]; rewrite E', ?E; try easy. - symmetry. apply min_l; order. - apply min_r; order. Qed. Lemma min_comm n m : min n m == min m n. Proof. destruct (min_spec m n) as [(H,E)|(H,E)]; rewrite E; (apply min_r || apply min_l); order. Qed. Ltac solve_min := match goal with |- context [min ?n ?m] => destruct (min_spec n m); intuition; order end. Lemma le_min_r n m : min n m <= m. Proof. solve_min. Qed. Lemma le_min_l n m : min n m <= n. Proof. solve_min. Qed. Lemma min_l_iff n m : min n m == n <-> n <= m. Proof. solve_min. Qed. Lemma min_r_iff n m : min n m == m <-> m <= n. Proof. solve_min. Qed. Lemma min_le n m p : min n m <= p -> n <= p \/ m <= p. Proof. destruct (min_spec n m); [left|right]; intuition; order. Qed. Lemma min_le_iff n m p : min n m <= p <-> n <= p \/ m <= p. Proof. split. apply min_le. solve_min. Qed. Lemma min_lt_iff n m p : min n m < p <-> n < p \/ m < p. Proof. destruct (min_spec n m); intuition; order || (right; order) || (left; order). Qed. Lemma min_glb_l n m p : p <= min n m -> p <= n. Proof. solve_min. Qed. Lemma min_glb_r n m p : p <= min n m -> p <= m. Proof. solve_min. Qed. Lemma min_glb n m p : p <= n -> p <= m -> p <= min n m. Proof. solve_min. Qed. Lemma min_glb_iff n m p : p <= min n m <-> p <= n /\ p <= m. Proof. solve_min. Qed. Lemma min_glb_lt n m p : p < n -> p < m -> p < min n m. Proof. solve_min. Qed. Lemma min_glb_lt_iff n m p : p < min n m <-> p < n /\ p < m. Proof. solve_min. Qed. Lemma min_le_compat_l n m p : n <= m -> min p n <= min p m. Proof. intros. apply min_glb_iff. solve_min. Qed. Lemma min_le_compat_r n m p : n <= m -> min n p <= min m p. Proof. intros. apply min_glb_iff. solve_min. Qed. Lemma min_le_compat n m p q : n <= m -> p <= q -> min n p <= min m q. Proof. intros Hnm Hpq. assert (LE := min_le_compat_l _ _ m Hpq). assert (LE' := min_le_compat_r _ _ p Hnm). order. Qed. (** *** Combined properties of min and max *) Lemma min_max_absorption n m : max n (min n m) == n. Proof. intros. destruct (min_spec n m) as [(C,E)|(C,E)]; rewrite E. apply max_l. order. destruct (max_spec n m); intuition; order. Qed. Lemma max_min_absorption n m : min n (max n m) == n. Proof. intros. destruct (max_spec n m) as [(C,E)|(C,E)]; rewrite E. destruct (min_spec n m) as [(C',E')|(C',E')]; auto. order. apply min_l; auto. order. Qed. (** Distributivity *) Lemma max_min_distr n m p : max n (min m p) == min (max n m) (max n p). Proof. symmetry. apply min_mono. eauto with *. repeat red; intros. apply max_le_compat_l; auto. Qed. Lemma min_max_distr n m p : min n (max m p) == max (min n m) (min n p). Proof. symmetry. apply max_mono. eauto with *. repeat red; intros. apply min_le_compat_l; auto. Qed. (** Modularity *) Lemma max_min_modular n m p : max n (min m (max n p)) == min (max n m) (max n p). Proof. rewrite <- max_min_distr. destruct (max_spec n p) as [(C,E)|(C,E)]; rewrite E; auto with *. destruct (min_spec m n) as [(C',E')|(C',E')]; rewrite E'. rewrite 2 max_l; try order. rewrite min_le_iff; auto. rewrite 2 max_l; try order. rewrite min_le_iff; auto. Qed. Lemma min_max_modular n m p : min n (max m (min n p)) == max (min n m) (min n p). Proof. intros. rewrite <- min_max_distr. destruct (min_spec n p) as [(C,E)|(C,E)]; rewrite E; auto with *. destruct (max_spec m n) as [(C',E')|(C',E')]; rewrite E'. rewrite 2 min_l; try order. rewrite max_le_iff; right; order. rewrite 2 min_l; try order. rewrite max_le_iff; auto. Qed. (** Disassociativity *) Lemma max_min_disassoc n m p : min n (max m p) <= max (min n m) p. Proof. intros. rewrite min_max_distr. auto using max_le_compat_l, le_min_r. Qed. (** Anti-monotonicity swaps the role of [min] and [max] *) Lemma max_min_antimono f : Proper (eq==>eq) f -> Proper (le==>inverse le) f -> forall x y, max (f x) (f y) == f (min x y). Proof. intros Eqf Lef x y. destruct (min_spec x y) as [(H,E)|(H,E)]; rewrite E; destruct (max_spec (f x) (f y)) as [(H',E')|(H',E')]; auto. assert (f y <= f x) by (apply Lef; order). order. assert (f x <= f y) by (apply Lef; order). order. Qed. Lemma min_max_antimono f : Proper (eq==>eq) f -> Proper (le==>inverse le) f -> forall x y, min (f x) (f y) == f (max x y). Proof. intros Eqf Lef x y. destruct (max_spec x y) as [(H,E)|(H,E)]; rewrite E; destruct (min_spec (f x) (f y)) as [(H',E')|(H',E')]; auto. assert (f y <= f x) by (apply Lef; order). order. assert (f x <= f y) by (apply Lef; order). order. Qed. End MinMaxLogicalProperties. (** ** Properties requiring a decidable order *) Module MinMaxDecProperties (Import O:OrderedTypeFull')(Import M:HasMinMax O). (** Induction principles for [max]. *) Lemma max_case_strong n m (P:t -> Type) : (forall x y, x==y -> P x -> P y) -> (m<=n -> P n) -> (n<=m -> P m) -> P (max n m). Proof. intros Compat Hl Hr. destruct (CompSpec2Type (compare_spec n m)) as [EQ|LT|GT]. assert (n<=m) by (rewrite le_lteq; auto). apply (Compat m), Hr; auto. symmetry; apply max_r; auto. assert (n<=m) by (rewrite le_lteq; auto). apply (Compat m), Hr; auto. symmetry; apply max_r; auto. assert (m<=n) by (rewrite le_lteq; auto). apply (Compat n), Hl; auto. symmetry; apply max_l; auto. Defined. Lemma max_case n m (P:t -> Type) : (forall x y, x == y -> P x -> P y) -> P n -> P m -> P (max n m). Proof. intros. apply max_case_strong; auto. Defined. (** [max] returns one of its arguments. *) Lemma max_dec n m : {max n m == n} + {max n m == m}. Proof. apply max_case; auto with relations. intros x y H [E|E]; [left|right]; rewrite <-H; auto. Defined. (** Idem for [min] *) Lemma min_case_strong n m (P:O.t -> Type) : (forall x y, x == y -> P x -> P y) -> (n<=m -> P n) -> (m<=n -> P m) -> P (min n m). Proof. intros Compat Hl Hr. destruct (CompSpec2Type (compare_spec n m)) as [EQ|LT|GT]. assert (n<=m) by (rewrite le_lteq; auto). apply (Compat n), Hl; auto. symmetry; apply min_l; auto. assert (n<=m) by (rewrite le_lteq; auto). apply (Compat n), Hl; auto. symmetry; apply min_l; auto. assert (m<=n) by (rewrite le_lteq; auto). apply (Compat m), Hr; auto. symmetry; apply min_r; auto. Defined. Lemma min_case n m (P:O.t -> Type) : (forall x y, x == y -> P x -> P y) -> P n -> P m -> P (min n m). Proof. intros. apply min_case_strong; auto. Defined. Lemma min_dec n m : {min n m == n} + {min n m == m}. Proof. intros. apply min_case; auto with relations. intros x y H [E|E]; [left|right]; rewrite <- E; auto with relations. Defined. End MinMaxDecProperties. Module MinMaxProperties (Import O:OrderedTypeFull')(Import M:HasMinMax O). Module OT := OTF_to_TotalOrder O. Include MinMaxLogicalProperties OT M. Include MinMaxDecProperties O M. Definition max_l := max_l. Definition max_r := max_r. Definition min_l := min_l. Definition min_r := min_r. Notation max_monotone := max_mono. Notation min_monotone := min_mono. Notation max_min_antimonotone := max_min_antimono. Notation min_max_antimonotone := min_max_antimono. End MinMaxProperties. (** ** When the equality is Leibniz, we can skip a few [Proper] precondition. *) Module UsualMinMaxLogicalProperties (Import O:UsualTotalOrder')(Import M:HasMinMax O). Include MinMaxLogicalProperties O M. Lemma max_monotone f : Proper (le ==> le) f -> forall x y, max (f x) (f y) = f (max x y). Proof. intros; apply max_mono; auto. congruence. Qed. Lemma min_monotone f : Proper (le ==> le) f -> forall x y, min (f x) (f y) = f (min x y). Proof. intros; apply min_mono; auto. congruence. Qed. Lemma min_max_antimonotone f : Proper (le ==> inverse le) f -> forall x y, min (f x) (f y) = f (max x y). Proof. intros; apply min_max_antimono; auto. congruence. Qed. Lemma max_min_antimonotone f : Proper (le ==> inverse le) f -> forall x y, max (f x) (f y) = f (min x y). Proof. intros; apply max_min_antimono; auto. congruence. Qed. End UsualMinMaxLogicalProperties. Module UsualMinMaxDecProperties (Import O:UsualOrderedTypeFull')(Import M:HasMinMax O). Module Import Private_Dec := MinMaxDecProperties O M. Lemma max_case_strong : forall n m (P:t -> Type), (m<=n -> P n) -> (n<=m -> P m) -> P (max n m). Proof. intros; apply max_case_strong; auto. congruence. Defined. Lemma max_case : forall n m (P:t -> Type), P n -> P m -> P (max n m). Proof. intros; apply max_case_strong; auto. Defined. Lemma max_dec : forall n m, {max n m = n} + {max n m = m}. Proof. exact max_dec. Defined. Lemma min_case_strong : forall n m (P:O.t -> Type), (n<=m -> P n) -> (m<=n -> P m) -> P (min n m). Proof. intros; apply min_case_strong; auto. congruence. Defined. Lemma min_case : forall n m (P:O.t -> Type), P n -> P m -> P (min n m). Proof. intros. apply min_case_strong; auto. Defined. Lemma min_dec : forall n m, {min n m = n} + {min n m = m}. Proof. exact min_dec. Defined. End UsualMinMaxDecProperties. Module UsualMinMaxProperties (Import O:UsualOrderedTypeFull')(Import M:HasMinMax O). Module OT := OTF_to_TotalOrder O. Include UsualMinMaxLogicalProperties OT M. Include UsualMinMaxDecProperties O M. Definition max_l := max_l. Definition max_r := max_r. Definition min_l := min_l. Definition min_r := min_r. End UsualMinMaxProperties. (** From [TotalOrder] and [HasMax] and [HasEqDec], we can prove that the order is decidable and build an [OrderedTypeFull]. *) Module TOMaxEqDec_to_Compare (Import O:TotalOrder')(Import M:HasMax O)(Import E:HasEqDec O) <: HasCompare O. Definition compare x y := if eq_dec x y then Eq else if eq_dec (M.max x y) y then Lt else Gt. Lemma compare_spec : forall x y, CompSpec eq lt x y (compare x y). Proof. intros; unfold compare; repeat destruct eq_dec; auto; constructor. destruct (lt_total x y); auto. absurd (x==y); auto. transitivity (max x y); auto. symmetry. apply max_l. rewrite le_lteq; intuition. destruct (lt_total y x); auto. absurd (max x y == y); auto. apply max_r; rewrite le_lteq; intuition. Qed. End TOMaxEqDec_to_Compare. Module TOMaxEqDec_to_OTF (O:TotalOrder)(M:HasMax O)(E:HasEqDec O) <: OrderedTypeFull := O <+ E <+ TOMaxEqDec_to_Compare O M E. (** TODO: Some Remaining questions... --> Compare with a type-classes version ? --> Is max_unicity and max_unicity_ext really convenient to express that any possible definition of max will in fact be equivalent ? --> Is it possible to avoid copy-paste about min even more ? *) coq-8.4pl2/theories/Structures/OrderedType.v0000640000175000001440000003404512064143020020247 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* X -> Prop) (x y : X) : Type := | LT : lt x y -> Compare lt eq x y | EQ : eq x y -> Compare lt eq x y | GT : lt y x -> Compare lt eq x y. Arguments LT [X lt eq x y] _. Arguments EQ [X lt eq x y] _. Arguments GT [X lt eq x y] _. Module Type MiniOrderedType. Parameter Inline t : Type. Parameter Inline eq : t -> t -> Prop. Parameter Inline lt : t -> t -> Prop. Axiom eq_refl : forall x : t, eq x x. Axiom eq_sym : forall x y : t, eq x y -> eq y x. Axiom eq_trans : forall x y z : t, eq x y -> eq y z -> eq x z. Axiom lt_trans : forall x y z : t, lt x y -> lt y z -> lt x z. Axiom lt_not_eq : forall x y : t, lt x y -> ~ eq x y. Parameter compare : forall x y : t, Compare lt eq x y. Hint Immediate eq_sym. Hint Resolve eq_refl eq_trans lt_not_eq lt_trans. End MiniOrderedType. Module Type OrderedType. Include MiniOrderedType. (** A [eq_dec] can be deduced from [compare] below. But adding this redundant field allows to see an OrderedType as a DecidableType. *) Parameter eq_dec : forall x y, { eq x y } + { ~ eq x y }. End OrderedType. Module MOT_to_OT (Import O : MiniOrderedType) <: OrderedType. Include O. Definition eq_dec : forall x y : t, {eq x y} + {~ eq x y}. Proof. intros; elim (compare x y); intro H; [ right | left | right ]; auto. assert (~ eq y x); auto. Defined. End MOT_to_OT. (** * Ordered types properties *) (** Additional properties that can be derived from signature [OrderedType]. *) Module OrderedTypeFacts (Import O: OrderedType). Instance eq_equiv : Equivalence eq. Proof. split; [ exact eq_refl | exact eq_sym | exact eq_trans ]. Qed. Lemma lt_antirefl : forall x, ~ lt x x. Proof. intros; intro; absurd (eq x x); auto. Qed. Instance lt_strorder : StrictOrder lt. Proof. split; [ exact lt_antirefl | exact lt_trans]. Qed. Lemma lt_eq : forall x y z, lt x y -> eq y z -> lt x z. Proof. intros; destruct (compare x z); auto. elim (lt_not_eq H); apply eq_trans with z; auto. elim (lt_not_eq (lt_trans l H)); auto. Qed. Lemma eq_lt : forall x y z, eq x y -> lt y z -> lt x z. Proof. intros; destruct (compare x z); auto. elim (lt_not_eq H0); apply eq_trans with x; auto. elim (lt_not_eq (lt_trans H0 l)); auto. Qed. Instance lt_compat : Proper (eq==>eq==>iff) lt. Proof. apply proper_sym_impl_iff_2; auto with *. intros x x' Hx y y' Hy H. apply eq_lt with x; auto. apply lt_eq with y; auto. Qed. Lemma lt_total : forall x y, lt x y \/ eq x y \/ lt y x. Proof. intros; destruct (compare x y); auto. Qed. Module TO. Definition t := t. Definition eq := eq. Definition lt := lt. Definition le x y := lt x y \/ eq x y. End TO. Module IsTO. Definition eq_equiv := eq_equiv. Definition lt_strorder := lt_strorder. Definition lt_compat := lt_compat. Definition lt_total := lt_total. Lemma le_lteq x y : TO.le x y <-> lt x y \/ eq x y. Proof. reflexivity. Qed. End IsTO. Module OrderTac := !MakeOrderTac TO IsTO. Ltac order := OrderTac.order. Lemma le_eq x y z : ~lt x y -> eq y z -> ~lt x z. Proof. order. Qed. Lemma eq_le x y z : eq x y -> ~lt y z -> ~lt x z. Proof. order. Qed. Lemma neq_eq x y z : ~eq x y -> eq y z -> ~eq x z. Proof. order. Qed. Lemma eq_neq x y z : eq x y -> ~eq y z -> ~eq x z. Proof. order. Qed. Lemma le_lt_trans x y z : ~lt y x -> lt y z -> lt x z. Proof. order. Qed. Lemma lt_le_trans x y z : lt x y -> ~lt z y -> lt x z. Proof. order. Qed. Lemma le_neq x y : ~lt x y -> ~eq x y -> lt y x. Proof. order. Qed. Lemma le_trans x y z : ~lt y x -> ~lt z y -> ~lt z x. Proof. order. Qed. Lemma le_antisym x y : ~lt y x -> ~lt x y -> eq x y. Proof. order. Qed. Lemma neq_sym x y : ~eq x y -> ~eq y x. Proof. order. Qed. Lemma lt_le x y : lt x y -> ~lt y x. Proof. order. Qed. Lemma gt_not_eq x y : lt y x -> ~ eq x y. Proof. order. Qed. Lemma eq_not_lt x y : eq x y -> ~ lt x y. Proof. order. Qed. Lemma eq_not_gt x y : eq x y -> ~ lt y x. Proof. order. Qed. Lemma lt_not_gt x y : lt x y -> ~ lt y x. Proof. order. Qed. Hint Resolve gt_not_eq eq_not_lt. Hint Immediate eq_lt lt_eq le_eq eq_le neq_eq eq_neq. Hint Resolve eq_not_gt lt_antirefl lt_not_gt. Lemma elim_compare_eq : forall x y : t, eq x y -> exists H : eq x y, compare x y = EQ H. Proof. intros; case (compare x y); intros H'; try (exfalso; order). exists H'; auto. Qed. Lemma elim_compare_lt : forall x y : t, lt x y -> exists H : lt x y, compare x y = LT H. Proof. intros; case (compare x y); intros H'; try (exfalso; order). exists H'; auto. Qed. Lemma elim_compare_gt : forall x y : t, lt y x -> exists H : lt y x, compare x y = GT H. Proof. intros; case (compare x y); intros H'; try (exfalso; order). exists H'; auto. Qed. Ltac elim_comp := match goal with | |- ?e => match e with | context ctx [ compare ?a ?b ] => let H := fresh in (destruct (compare a b) as [H|H|H]; try order) end end. Ltac elim_comp_eq x y := elim (elim_compare_eq (x:=x) (y:=y)); [ intros _1 _2; rewrite _2; clear _1 _2 | auto ]. Ltac elim_comp_lt x y := elim (elim_compare_lt (x:=x) (y:=y)); [ intros _1 _2; rewrite _2; clear _1 _2 | auto ]. Ltac elim_comp_gt x y := elim (elim_compare_gt (x:=x) (y:=y)); [ intros _1 _2; rewrite _2; clear _1 _2 | auto ]. (** For compatibility reasons *) Definition eq_dec := eq_dec. Lemma lt_dec : forall x y : t, {lt x y} + {~ lt x y}. Proof. intros; elim (compare x y); [ left | right | right ]; auto. Defined. Definition eqb x y : bool := if eq_dec x y then true else false. Lemma eqb_alt : forall x y, eqb x y = match compare x y with EQ _ => true | _ => false end. Proof. unfold eqb; intros; destruct (eq_dec x y); elim_comp; auto. Qed. (* Specialization of resuts about lists modulo. *) Section ForNotations. Notation In:=(InA eq). Notation Inf:=(lelistA lt). Notation Sort:=(sort lt). Notation NoDup:=(NoDupA eq). Lemma In_eq : forall l x y, eq x y -> In x l -> In y l. Proof. exact (InA_eqA eq_equiv). Qed. Lemma ListIn_In : forall l x, List.In x l -> In x l. Proof. exact (In_InA eq_equiv). Qed. Lemma Inf_lt : forall l x y, lt x y -> Inf y l -> Inf x l. Proof. exact (InfA_ltA lt_strorder). Qed. Lemma Inf_eq : forall l x y, eq x y -> Inf y l -> Inf x l. Proof. exact (InfA_eqA eq_equiv lt_strorder lt_compat). Qed. Lemma Sort_Inf_In : forall l x a, Sort l -> Inf a l -> In x l -> lt a x. Proof. exact (SortA_InfA_InA eq_equiv lt_strorder lt_compat). Qed. Lemma ListIn_Inf : forall l x, (forall y, List.In y l -> lt x y) -> Inf x l. Proof. exact (@In_InfA t lt). Qed. Lemma In_Inf : forall l x, (forall y, In y l -> lt x y) -> Inf x l. Proof. exact (InA_InfA eq_equiv (ltA:=lt)). Qed. Lemma Inf_alt : forall l x, Sort l -> (Inf x l <-> (forall y, In y l -> lt x y)). Proof. exact (InfA_alt eq_equiv lt_strorder lt_compat). Qed. Lemma Sort_NoDup : forall l, Sort l -> NoDup l. Proof. exact (SortA_NoDupA eq_equiv lt_strorder lt_compat). Qed. End ForNotations. Hint Resolve ListIn_In Sort_NoDup Inf_lt. Hint Immediate In_eq Inf_lt. End OrderedTypeFacts. Module KeyOrderedType(O:OrderedType). Import O. Module MO:=OrderedTypeFacts(O). Import MO. Section Elt. Variable elt : Type. Notation key:=t. Definition eqk (p p':key*elt) := eq (fst p) (fst p'). Definition eqke (p p':key*elt) := eq (fst p) (fst p') /\ (snd p) = (snd p'). Definition ltk (p p':key*elt) := lt (fst p) (fst p'). Hint Unfold eqk eqke ltk. Hint Extern 2 (eqke ?a ?b) => split. (* eqke is stricter than eqk *) Lemma eqke_eqk : forall x x', eqke x x' -> eqk x x'. Proof. unfold eqk, eqke; intuition. Qed. (* ltk ignore the second components *) Lemma ltk_right_r : forall x k e e', ltk x (k,e) -> ltk x (k,e'). Proof. auto. Qed. Lemma ltk_right_l : forall x k e e', ltk (k,e) x -> ltk (k,e') x. Proof. auto. Qed. Hint Immediate ltk_right_r ltk_right_l. (* eqk, eqke are equalities, ltk is a strict order *) Lemma eqk_refl : forall e, eqk e e. Proof. auto. Qed. Lemma eqke_refl : forall e, eqke e e. Proof. auto. Qed. Lemma eqk_sym : forall e e', eqk e e' -> eqk e' e. Proof. auto. Qed. Lemma eqke_sym : forall e e', eqke e e' -> eqke e' e. Proof. unfold eqke; intuition. Qed. Lemma eqk_trans : forall e e' e'', eqk e e' -> eqk e' e'' -> eqk e e''. Proof. eauto. Qed. Lemma eqke_trans : forall e e' e'', eqke e e' -> eqke e' e'' -> eqke e e''. Proof. unfold eqke; intuition; [ eauto | congruence ]. Qed. Lemma ltk_trans : forall e e' e'', ltk e e' -> ltk e' e'' -> ltk e e''. Proof. eauto. Qed. Lemma ltk_not_eqk : forall e e', ltk e e' -> ~ eqk e e'. Proof. unfold eqk, ltk; auto. Qed. Lemma ltk_not_eqke : forall e e', ltk e e' -> ~eqke e e'. Proof. unfold eqke, ltk; intuition; simpl in *; subst. exact (lt_not_eq H H1). Qed. Hint Resolve eqk_trans eqke_trans eqk_refl eqke_refl. Hint Resolve ltk_trans ltk_not_eqk ltk_not_eqke. Hint Immediate eqk_sym eqke_sym. Global Instance eqk_equiv : Equivalence eqk. Proof. constructor; eauto. Qed. Global Instance eqke_equiv : Equivalence eqke. Proof. split; eauto. Qed. Global Instance ltk_strorder : StrictOrder ltk. Proof. constructor; eauto. intros x; apply (irreflexivity (x:=fst x)). Qed. Global Instance ltk_compat : Proper (eqk==>eqk==>iff) ltk. Proof. intros (x,e) (x',e') Hxx' (y,f) (y',f') Hyy'; compute. compute in Hxx'; compute in Hyy'. rewrite Hxx', Hyy'; auto. Qed. Global Instance ltk_compat' : Proper (eqke==>eqke==>iff) ltk. Proof. intros (x,e) (x',e') (Hxx',_) (y,f) (y',f') (Hyy',_); compute. compute in Hxx'; compute in Hyy'. rewrite Hxx', Hyy'; auto. Qed. (* Additionnal facts *) Lemma eqk_not_ltk : forall x x', eqk x x' -> ~ltk x x'. Proof. unfold eqk, ltk; simpl; auto. Qed. Lemma ltk_eqk : forall e e' e'', ltk e e' -> eqk e' e'' -> ltk e e''. Proof. eauto. Qed. Lemma eqk_ltk : forall e e' e'', eqk e e' -> ltk e' e'' -> ltk e e''. Proof. intros (k,e) (k',e') (k'',e''). unfold ltk, eqk; simpl; eauto. Qed. Hint Resolve eqk_not_ltk. Hint Immediate ltk_eqk eqk_ltk. Lemma InA_eqke_eqk : forall x m, InA eqke x m -> InA eqk x m. Proof. unfold eqke; induction 1; intuition. Qed. Hint Resolve InA_eqke_eqk. Definition MapsTo (k:key)(e:elt):= InA eqke (k,e). Definition In k m := exists e:elt, MapsTo k e m. Notation Sort := (sort ltk). Notation Inf := (lelistA ltk). Hint Unfold MapsTo In. (* An alternative formulation for [In k l] is [exists e, InA eqk (k,e) l] *) Lemma In_alt : forall k l, In k l <-> exists e, InA eqk (k,e) l. Proof. firstorder. exists x; auto. induction H. destruct y. exists e; auto. destruct IHInA as [e H0]. exists e; auto. Qed. Lemma MapsTo_eq : forall l x y e, eq x y -> MapsTo x e l -> MapsTo y e l. Proof. intros; unfold MapsTo in *; apply InA_eqA with (x,e); eauto with *. Qed. Lemma In_eq : forall l x y, eq x y -> In x l -> In y l. Proof. destruct 2 as (e,E); exists e; eapply MapsTo_eq; eauto. Qed. Lemma Inf_eq : forall l x x', eqk x x' -> Inf x' l -> Inf x l. Proof. exact (InfA_eqA eqk_equiv ltk_strorder ltk_compat). Qed. Lemma Inf_lt : forall l x x', ltk x x' -> Inf x' l -> Inf x l. Proof. exact (InfA_ltA ltk_strorder). Qed. Hint Immediate Inf_eq. Hint Resolve Inf_lt. Lemma Sort_Inf_In : forall l p q, Sort l -> Inf q l -> InA eqk p l -> ltk q p. Proof. exact (SortA_InfA_InA eqk_equiv ltk_strorder ltk_compat). Qed. Lemma Sort_Inf_NotIn : forall l k e, Sort l -> Inf (k,e) l -> ~In k l. Proof. intros; red; intros. destruct H1 as [e' H2]. elim (@ltk_not_eqk (k,e) (k,e')). eapply Sort_Inf_In; eauto. red; simpl; auto. Qed. Lemma Sort_NoDupA: forall l, Sort l -> NoDupA eqk l. Proof. exact (SortA_NoDupA eqk_equiv ltk_strorder ltk_compat). Qed. Lemma Sort_In_cons_1 : forall e l e', Sort (e::l) -> InA eqk e' l -> ltk e e'. Proof. inversion 1; intros; eapply Sort_Inf_In; eauto. Qed. Lemma Sort_In_cons_2 : forall l e e', Sort (e::l) -> InA eqk e' (e::l) -> ltk e e' \/ eqk e e'. Proof. inversion_clear 2; auto. left; apply Sort_In_cons_1 with l; auto. Qed. Lemma Sort_In_cons_3 : forall x l k e, Sort ((k,e)::l) -> In x l -> ~eq x k. Proof. inversion_clear 1; red; intros. destruct (Sort_Inf_NotIn H0 H1 (In_eq H2 H)). Qed. Lemma In_inv : forall k k' e l, In k ((k',e) :: l) -> eq k k' \/ In k l. Proof. inversion 1. inversion_clear H0; eauto. destruct H1; simpl in *; intuition. Qed. Lemma In_inv_2 : forall k k' e e' l, InA eqk (k, e) ((k', e') :: l) -> ~ eq k k' -> InA eqk (k, e) l. Proof. inversion_clear 1; compute in H0; intuition. Qed. Lemma In_inv_3 : forall x x' l, InA eqke x (x' :: l) -> ~ eqk x x' -> InA eqke x l. Proof. inversion_clear 1; compute in H0; intuition. Qed. End Elt. Hint Unfold eqk eqke ltk. Hint Extern 2 (eqke ?a ?b) => split. Hint Resolve eqk_trans eqke_trans eqk_refl eqke_refl. Hint Resolve ltk_trans ltk_not_eqk ltk_not_eqke. Hint Immediate eqk_sym eqke_sym. Hint Resolve eqk_not_ltk. Hint Immediate ltk_eqk eqk_ltk. Hint Resolve InA_eqke_eqk. Hint Unfold MapsTo In. Hint Immediate Inf_eq. Hint Resolve Inf_lt. Hint Resolve Sort_Inf_NotIn. Hint Resolve In_inv_2 In_inv_3. End KeyOrderedType. coq-8.4pl2/theories/Structures/OrdersTac.v0000640000175000001440000002207312064143020017705 0ustar notinusers(***********************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* le y z -> le x z]. *) Inductive ord := OEQ | OLT | OLE. Definition trans_ord o o' := match o, o' with | OEQ, _ => o' | _, OEQ => o | OLE, OLE => OLE | _, _ => OLT end. Local Infix "+" := trans_ord. (** ** The tactic requirements : a total order We need : - an equivalence [eq], - a strict order [lt] total and compatible with [eq], - a larger order [le] synonym for [lt\/eq]. This used to be provided here via a [TotalOrder], but for technical reasons related to extraction, we now ask for two sperate parts: relations in a [EqLtLe] + properties in [IsTotalOrder]. Note that [TotalOrder = EqLtLe <+ IsTotalOrder] *) Module Type IsTotalOrder (O:EqLtLe) := IsEq O <+ IsStrOrder O <+ LeIsLtEq O <+ LtIsTotal O. (** ** Properties that will be used by the [order] tactic *) Module OrderFacts (Import O:EqLtLe)(P:IsTotalOrder O). Include EqLtLeNotation O. (** Reflexivity rules *) Lemma eq_refl : forall x, x==x. Proof. reflexivity. Qed. Lemma le_refl : forall x, x<=x. Proof. intros; rewrite P.le_lteq; right; reflexivity. Qed. Lemma lt_irrefl : forall x, ~ x y==x. Proof. auto with *. Qed. Lemma le_antisym : forall x y, x<=y -> y<=x -> x==y. Proof. intros x y; rewrite 2 P.le_lteq. intuition. elim (StrictOrder_Irreflexive x); transitivity y; auto. Qed. Lemma neq_sym : forall x y, ~x==y -> ~y==x. Proof. auto using eq_sym. Qed. (** Transitivity rules : first, a generic formulation, then instances*) Ltac subst_eqns := match goal with | H : _==_ |- _ => (rewrite H || rewrite <- H); clear H; subst_eqns | _ => idtac end. Definition interp_ord o := match o with OEQ => O.eq | OLT => O.lt | OLE => O.le end. Local Notation "#" := interp_ord. Lemma trans : forall o o' x y z, #o x y -> #o' y z -> #(o+o') x z. Proof. destruct o, o'; simpl; intros x y z; rewrite ?P.le_lteq; intuition; subst_eqns; eauto using (StrictOrder_Transitive x y z) with *. Qed. Definition eq_trans x y z : x==y -> y==z -> x==z := @trans OEQ OEQ x y z. Definition le_trans x y z : x<=y -> y<=z -> x<=z := @trans OLE OLE x y z. Definition lt_trans x y z : x y x y x y<=z -> x y x y==z -> x y<=z -> x<=z := @trans OEQ OLE x y z. Definition le_eq x y z : x<=y -> y==z -> x<=z := @trans OLE OEQ x y z. Lemma eq_neq : forall x y z, x==y -> ~y==z -> ~x==z. Proof. eauto using eq_trans, eq_sym. Qed. Lemma neq_eq : forall x y z, ~x==y -> y==z -> ~x==z. Proof. eauto using eq_trans, eq_sym. Qed. (** (double) negation rules *) Lemma not_neq_eq : forall x y, ~~x==y -> x==y. Proof. intros x y H. destruct (P.lt_total x y) as [H'|[H'|H']]; auto; destruct H; intro H; rewrite H in H'; eapply lt_irrefl; eauto. Qed. Lemma not_ge_lt : forall x y, ~y<=x -> x x<=y. Proof. intros x y H. rewrite P.le_lteq. generalize (P.lt_total x y); intuition. Qed. Lemma le_neq_lt : forall x y, x<=y -> ~x==y -> x